summaryrefslogtreecommitdiffstats
path: root/rust/vendor/num
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-19 17:39:49 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-19 17:39:49 +0000
commita0aa2307322cd47bbf416810ac0292925e03be87 (patch)
tree37076262a026c4b48c8a0e84f44ff9187556ca35 /rust/vendor/num
parentInitial commit. (diff)
downloadsuricata-a0aa2307322cd47bbf416810ac0292925e03be87.tar.xz
suricata-a0aa2307322cd47bbf416810ac0292925e03be87.zip
Adding upstream version 1:7.0.3.upstream/1%7.0.3
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to '')
-rw-r--r--rust/vendor/num-bigint-0.2.6/.cargo-checksum.json1
-rw-r--r--rust/vendor/num-bigint-0.2.6/Cargo.toml81
-rw-r--r--rust/vendor/num-bigint-0.2.6/LICENSE-APACHE201
-rw-r--r--rust/vendor/num-bigint-0.2.6/LICENSE-MIT25
-rw-r--r--rust/vendor/num-bigint-0.2.6/README.md76
-rw-r--r--rust/vendor/num-bigint-0.2.6/RELEASES.md166
-rw-r--r--rust/vendor/num-bigint-0.2.6/benches/bigint.rs368
-rw-r--r--rust/vendor/num-bigint-0.2.6/benches/factorial.rs44
-rw-r--r--rust/vendor/num-bigint-0.2.6/benches/gcd.rs86
-rw-r--r--rust/vendor/num-bigint-0.2.6/benches/roots.rs176
-rw-r--r--rust/vendor/num-bigint-0.2.6/benches/shootout-pidigits.rs142
-rw-r--r--rust/vendor/num-bigint-0.2.6/bors.toml3
-rw-r--r--rust/vendor/num-bigint-0.2.6/build.rs14
-rwxr-xr-xrust/vendor/num-bigint-0.2.6/ci/rustup.sh12
-rwxr-xr-xrust/vendor/num-bigint-0.2.6/ci/test_full.sh39
-rw-r--r--rust/vendor/num-bigint-0.2.6/src/algorithms.rs789
-rw-r--r--rust/vendor/num-bigint-0.2.6/src/bigint.rs3110
-rw-r--r--rust/vendor/num-bigint-0.2.6/src/bigrand.rs223
-rw-r--r--rust/vendor/num-bigint-0.2.6/src/biguint.rs3106
-rw-r--r--rust/vendor/num-bigint-0.2.6/src/lib.rs233
-rw-r--r--rust/vendor/num-bigint-0.2.6/src/macros.rs445
-rw-r--r--rust/vendor/num-bigint-0.2.6/src/monty.rs129
-rw-r--r--rust/vendor/num-bigint-0.2.6/tests/bigint.rs1193
-rw-r--r--rust/vendor/num-bigint-0.2.6/tests/bigint_bitwise.rs181
-rw-r--r--rust/vendor/num-bigint-0.2.6/tests/bigint_scalar.rs151
-rw-r--r--rust/vendor/num-bigint-0.2.6/tests/biguint.rs1713
-rw-r--r--rust/vendor/num-bigint-0.2.6/tests/biguint_scalar.rs116
-rw-r--r--rust/vendor/num-bigint-0.2.6/tests/consts/mod.rs56
-rw-r--r--rust/vendor/num-bigint-0.2.6/tests/macros/mod.rs116
-rw-r--r--rust/vendor/num-bigint-0.2.6/tests/modpow.rs185
-rw-r--r--rust/vendor/num-bigint-0.2.6/tests/quickcheck.rs361
-rw-r--r--rust/vendor/num-bigint-0.2.6/tests/rand.rs324
-rw-r--r--rust/vendor/num-bigint-0.2.6/tests/roots.rs186
-rw-r--r--rust/vendor/num-bigint-0.2.6/tests/serde.rs103
-rw-r--r--rust/vendor/num-bigint-0.2.6/tests/torture.rs43
-rw-r--r--rust/vendor/num-bigint/.cargo-checksum.json1
-rw-r--r--rust/vendor/num-bigint/Cargo.toml103
-rw-r--r--rust/vendor/num-bigint/LICENSE-APACHE201
-rw-r--r--rust/vendor/num-bigint/LICENSE-MIT25
-rw-r--r--rust/vendor/num-bigint/README.md84
-rw-r--r--rust/vendor/num-bigint/RELEASES.md319
-rw-r--r--rust/vendor/num-bigint/benches/bigint.rs440
-rw-r--r--rust/vendor/num-bigint/benches/factorial.rs42
-rw-r--r--rust/vendor/num-bigint/benches/gcd.rs76
-rw-r--r--rust/vendor/num-bigint/benches/rng/mod.rs38
-rw-r--r--rust/vendor/num-bigint/benches/roots.rs166
-rw-r--r--rust/vendor/num-bigint/benches/shootout-pidigits.rs138
-rw-r--r--rust/vendor/num-bigint/build.rs94
-rw-r--r--rust/vendor/num-bigint/src/bigint.rs1171
-rw-r--r--rust/vendor/num-bigint/src/bigint/addition.rs239
-rw-r--r--rust/vendor/num-bigint/src/bigint/arbitrary.rs39
-rw-r--r--rust/vendor/num-bigint/src/bigint/bits.rs531
-rw-r--r--rust/vendor/num-bigint/src/bigint/convert.rs479
-rw-r--r--rust/vendor/num-bigint/src/bigint/division.rs496
-rw-r--r--rust/vendor/num-bigint/src/bigint/multiplication.rs217
-rw-r--r--rust/vendor/num-bigint/src/bigint/power.rs94
-rw-r--r--rust/vendor/num-bigint/src/bigint/serde.rs58
-rw-r--r--rust/vendor/num-bigint/src/bigint/shift.rs107
-rw-r--r--rust/vendor/num-bigint/src/bigint/subtraction.rs300
-rw-r--r--rust/vendor/num-bigint/src/bigrand.rs283
-rw-r--r--rust/vendor/num-bigint/src/biguint.rs1130
-rw-r--r--rust/vendor/num-bigint/src/biguint/addition.rs254
-rw-r--r--rust/vendor/num-bigint/src/biguint/arbitrary.rs34
-rw-r--r--rust/vendor/num-bigint/src/biguint/bits.rs93
-rw-r--r--rust/vendor/num-bigint/src/biguint/convert.rs820
-rw-r--r--rust/vendor/num-bigint/src/biguint/division.rs652
-rw-r--r--rust/vendor/num-bigint/src/biguint/iter.rs358
-rw-r--r--rust/vendor/num-bigint/src/biguint/monty.rs225
-rw-r--r--rust/vendor/num-bigint/src/biguint/multiplication.rs568
-rw-r--r--rust/vendor/num-bigint/src/biguint/power.rs258
-rw-r--r--rust/vendor/num-bigint/src/biguint/serde.rs119
-rw-r--r--rust/vendor/num-bigint/src/biguint/shift.rs172
-rw-r--r--rust/vendor/num-bigint/src/biguint/subtraction.rs312
-rw-r--r--rust/vendor/num-bigint/src/lib.rs290
-rw-r--r--rust/vendor/num-bigint/src/macros.rs441
-rw-r--r--rust/vendor/num-bigint/tests/bigint.rs1478
-rw-r--r--rust/vendor/num-bigint/tests/bigint_bitwise.rs178
-rw-r--r--rust/vendor/num-bigint/tests/bigint_scalar.rs157
-rw-r--r--rust/vendor/num-bigint/tests/biguint.rs1924
-rw-r--r--rust/vendor/num-bigint/tests/biguint_scalar.rs123
-rw-r--r--rust/vendor/num-bigint/tests/consts/mod.rs51
-rw-r--r--rust/vendor/num-bigint/tests/fuzzed.rs185
-rw-r--r--rust/vendor/num-bigint/tests/macros/mod.rs78
-rw-r--r--rust/vendor/num-bigint/tests/modpow.rs181
-rw-r--r--rust/vendor/num-bigint/tests/roots.rs160
-rw-r--r--rust/vendor/num-complex/.cargo-checksum.json1
-rw-r--r--rust/vendor/num-complex/Cargo.toml48
-rw-r--r--rust/vendor/num-complex/LICENSE-APACHE201
-rw-r--r--rust/vendor/num-complex/LICENSE-MIT25
-rw-r--r--rust/vendor/num-complex/README.md50
-rw-r--r--rust/vendor/num-complex/RELEASES.md103
-rw-r--r--rust/vendor/num-complex/build.rs20
-rw-r--r--rust/vendor/num-complex/src/cast.rs119
-rw-r--r--rust/vendor/num-complex/src/crand.rs115
-rw-r--r--rust/vendor/num-complex/src/lib.rs2663
-rw-r--r--rust/vendor/num-complex/src/pow.rs187
-rw-r--r--rust/vendor/num-derive/.cargo-checksum.json1
-rw-r--r--rust/vendor/num-derive/Cargo.toml47
-rw-r--r--rust/vendor/num-derive/LICENSE-APACHE201
-rw-r--r--rust/vendor/num-derive/LICENSE-MIT25
-rw-r--r--rust/vendor/num-derive/README.md53
-rw-r--r--rust/vendor/num-derive/RELEASES.md78
-rw-r--r--rust/vendor/num-derive/build.rs35
-rw-r--r--rust/vendor/num-derive/src/lib.rs797
-rw-r--r--rust/vendor/num-derive/tests/empty_enum.rs23
-rw-r--r--rust/vendor/num-derive/tests/issue-6.rs17
-rw-r--r--rust/vendor/num-derive/tests/issue-9.rs18
-rw-r--r--rust/vendor/num-derive/tests/newtype.rs91
-rw-r--r--rust/vendor/num-derive/tests/num_derive_without_num.rs20
-rw-r--r--rust/vendor/num-derive/tests/trivial.rs64
-rw-r--r--rust/vendor/num-derive/tests/with_custom_values.rs70
-rw-r--r--rust/vendor/num-integer/.cargo-checksum.json1
-rw-r--r--rust/vendor/num-integer/Cargo.toml51
-rw-r--r--rust/vendor/num-integer/LICENSE-APACHE201
-rw-r--r--rust/vendor/num-integer/LICENSE-MIT25
-rw-r--r--rust/vendor/num-integer/README.md64
-rw-r--r--rust/vendor/num-integer/RELEASES.md112
-rw-r--r--rust/vendor/num-integer/benches/average.rs414
-rw-r--r--rust/vendor/num-integer/benches/gcd.rs176
-rw-r--r--rust/vendor/num-integer/benches/roots.rs170
-rw-r--r--rust/vendor/num-integer/build.rs13
-rw-r--r--rust/vendor/num-integer/src/average.rs78
-rw-r--r--rust/vendor/num-integer/src/lib.rs1386
-rw-r--r--rust/vendor/num-integer/src/roots.rs391
-rw-r--r--rust/vendor/num-integer/tests/average.rs100
-rw-r--r--rust/vendor/num-integer/tests/roots.rs272
-rw-r--r--rust/vendor/num-iter/.cargo-checksum.json1
-rw-r--r--rust/vendor/num-iter/Cargo.toml61
-rw-r--r--rust/vendor/num-iter/LICENSE-APACHE201
-rw-r--r--rust/vendor/num-iter/LICENSE-MIT25
-rw-r--r--rust/vendor/num-iter/README.md64
-rw-r--r--rust/vendor/num-iter/RELEASES.md88
-rw-r--r--rust/vendor/num-iter/build.rs19
-rw-r--r--rust/vendor/num-iter/src/lib.rs734
-rw-r--r--rust/vendor/num-rational/.cargo-checksum.json1
-rw-r--r--rust/vendor/num-rational/Cargo.toml54
-rw-r--r--rust/vendor/num-rational/LICENSE-APACHE201
-rw-r--r--rust/vendor/num-rational/LICENSE-MIT25
-rw-r--r--rust/vendor/num-rational/README.md46
-rw-r--r--rust/vendor/num-rational/RELEASES.md91
-rw-r--r--rust/vendor/num-rational/build.rs20
-rw-r--r--rust/vendor/num-rational/src/lib.rs2516
-rw-r--r--rust/vendor/num-traits-0.1.43/.cargo-checksum.json1
-rw-r--r--rust/vendor/num-traits-0.1.43/Cargo.toml29
-rw-r--r--rust/vendor/num-traits-0.1.43/LICENSE-APACHE201
-rw-r--r--rust/vendor/num-traits-0.1.43/LICENSE-MIT25
-rw-r--r--rust/vendor/num-traits-0.1.43/README.md33
-rw-r--r--rust/vendor/num-traits-0.1.43/RELEASES.md41
-rw-r--r--rust/vendor/num-traits-0.1.43/bors.toml3
-rwxr-xr-xrust/vendor/num-traits-0.1.43/ci/rustup.sh12
-rwxr-xr-xrust/vendor/num-traits-0.1.43/ci/test_full.sh11
-rw-r--r--rust/vendor/num-traits-0.1.43/src/lib.rs88
-rw-r--r--rust/vendor/num-traits/.cargo-checksum.json1
-rw-r--r--rust/vendor/num-traits/Cargo.toml54
-rw-r--r--rust/vendor/num-traits/LICENSE-APACHE201
-rw-r--r--rust/vendor/num-traits/LICENSE-MIT25
-rw-r--r--rust/vendor/num-traits/README.md58
-rw-r--r--rust/vendor/num-traits/RELEASES.md283
-rw-r--r--rust/vendor/num-traits/build.rs24
-rw-r--r--rust/vendor/num-traits/src/bounds.rs148
-rw-r--r--rust/vendor/num-traits/src/cast.rs778
-rw-r--r--rust/vendor/num-traits/src/float.rs2344
-rw-r--r--rust/vendor/num-traits/src/identities.rs202
-rw-r--r--rust/vendor/num-traits/src/int.rs565
-rw-r--r--rust/vendor/num-traits/src/lib.rs635
-rw-r--r--rust/vendor/num-traits/src/macros.rs44
-rw-r--r--rust/vendor/num-traits/src/ops/bytes.rs403
-rw-r--r--rust/vendor/num-traits/src/ops/checked.rs261
-rw-r--r--rust/vendor/num-traits/src/ops/euclid.rs339
-rw-r--r--rust/vendor/num-traits/src/ops/inv.rs47
-rw-r--r--rust/vendor/num-traits/src/ops/mod.rs8
-rw-r--r--rust/vendor/num-traits/src/ops/mul_add.rs149
-rw-r--r--rust/vendor/num-traits/src/ops/overflowing.rs96
-rw-r--r--rust/vendor/num-traits/src/ops/saturating.rs130
-rw-r--r--rust/vendor/num-traits/src/ops/wrapping.rs327
-rw-r--r--rust/vendor/num-traits/src/pow.rs242
-rw-r--r--rust/vendor/num-traits/src/real.rs834
-rw-r--r--rust/vendor/num-traits/src/sign.rs216
-rw-r--r--rust/vendor/num-traits/tests/cast.rs387
-rw-r--r--rust/vendor/num/.cargo-checksum.json1
-rw-r--r--rust/vendor/num/Cargo.toml62
-rw-r--r--rust/vendor/num/LICENSE-APACHE201
-rw-r--r--rust/vendor/num/LICENSE-MIT25
-rw-r--r--rust/vendor/num/README.md124
-rw-r--r--rust/vendor/num/RELEASES.md62
-rw-r--r--rust/vendor/num/src/lib.rs115
-rw-r--r--rust/vendor/num_enum/.cargo-checksum.json1
-rw-r--r--rust/vendor/num_enum/Cargo.toml64
-rw-r--r--rust/vendor/num_enum/LICENSE-APACHE176
-rw-r--r--rust/vendor/num_enum/LICENSE-BSD27
-rw-r--r--rust/vendor/num_enum/LICENSE-MIT23
-rw-r--r--rust/vendor/num_enum/README.md277
-rw-r--r--rust/vendor/num_enum/src/lib.rs67
-rw-r--r--rust/vendor/num_enum/tests/default.rs33
-rw-r--r--rust/vendor/num_enum/tests/from_primitive.rs144
-rw-r--r--rust/vendor/num_enum/tests/into_primitive.rs47
-rw-r--r--rust/vendor/num_enum/tests/renamed_num_enum.rs33
-rw-r--r--rust/vendor/num_enum/tests/try_build.rs123
-rw-r--r--rust/vendor/num_enum/tests/try_build/compile_fail/alternative_clashes_with_its_discriminant.rs10
-rw-r--r--rust/vendor/num_enum/tests/try_build/compile_fail/alternative_clashes_with_its_discriminant.stderr5
-rw-r--r--rust/vendor/num_enum/tests/try_build/compile_fail/alternative_clashes_with_variant.rs12
-rw-r--r--rust/vendor/num_enum/tests/try_build/compile_fail/alternative_clashes_with_variant.stderr5
-rw-r--r--rust/vendor/num_enum/tests/try_build/compile_fail/alternative_clashes_with_variant_out_of_order.rs10
-rw-r--r--rust/vendor/num_enum/tests/try_build/compile_fail/alternative_clashes_with_variant_out_of_order.stderr5
-rw-r--r--rust/vendor/num_enum/tests/try_build/compile_fail/alternative_exprs.rs13
-rw-r--r--rust/vendor/num_enum/tests/try_build/compile_fail/alternative_exprs.stderr5
-rw-r--r--rust/vendor/num_enum/tests/try_build/compile_fail/catch_all_multiple_fields.rs9
-rw-r--r--rust/vendor/num_enum/tests/try_build/compile_fail/catch_all_multiple_fields.stderr5
-rw-r--r--rust/vendor/num_enum/tests/try_build/compile_fail/catch_all_non_tuple.rs9
-rw-r--r--rust/vendor/num_enum/tests/try_build/compile_fail/catch_all_non_tuple.stderr5
-rw-r--r--rust/vendor/num_enum/tests/try_build/compile_fail/catch_all_type_mismatch.rs9
-rw-r--r--rust/vendor/num_enum/tests/try_build/compile_fail/catch_all_type_mismatch.stderr5
-rw-r--r--rust/vendor/num_enum/tests/try_build/compile_fail/conflicting_default.rs9
-rw-r--r--rust/vendor/num_enum/tests/try_build/compile_fail/conflicting_default.stderr9
-rw-r--r--rust/vendor/num_enum/tests/try_build/compile_fail/conflicting_derive.rs11
-rw-r--r--rust/vendor/num_enum/tests/try_build/compile_fail/conflicting_derive.stderr20
-rw-r--r--rust/vendor/num_enum/tests/try_build/compile_fail/default_and_catch_all.rs10
-rw-r--r--rust/vendor/num_enum/tests/try_build/compile_fail/default_and_catch_all.stderr5
-rw-r--r--rust/vendor/num_enum/tests/try_build/compile_fail/default_and_catch_all_alt.rs10
-rw-r--r--rust/vendor/num_enum/tests/try_build/compile_fail/default_and_catch_all_alt.stderr5
-rw-r--r--rust/vendor/num_enum/tests/try_build/compile_fail/default_and_catch_all_same_variant.rs10
-rw-r--r--rust/vendor/num_enum/tests/try_build/compile_fail/default_and_catch_all_same_variant.stderr5
-rw-r--r--rust/vendor/num_enum/tests/try_build/compile_fail/default_and_catch_all_same_variant_alt.rs10
-rw-r--r--rust/vendor/num_enum/tests/try_build/compile_fail/default_and_catch_all_same_variant_alt.stderr5
-rw-r--r--rust/vendor/num_enum/tests/try_build/compile_fail/features/!complex-expressions/alternate_exprs_with_range.rs11
-rw-r--r--rust/vendor/num_enum/tests/try_build/compile_fail/features/!complex-expressions/alternate_exprs_with_range.stderr5
-rw-r--r--rust/vendor/num_enum/tests/try_build/compile_fail/features/complex-expressions/alternate_exprs_range_missing_lower_bound.rs11
-rw-r--r--rust/vendor/num_enum/tests/try_build/compile_fail/features/complex-expressions/alternate_exprs_range_missing_lower_bound.stderr5
-rw-r--r--rust/vendor/num_enum/tests/try_build/compile_fail/features/complex-expressions/alternate_exprs_range_missing_upper_bound.rs11
-rw-r--r--rust/vendor/num_enum/tests/try_build/compile_fail/features/complex-expressions/alternate_exprs_range_missing_upper_bound.stderr5
-rw-r--r--rust/vendor/num_enum/tests/try_build/compile_fail/features/complex-expressions/alternate_exprs_range_nonlit_lower_bound.rs13
-rw-r--r--rust/vendor/num_enum/tests/try_build/compile_fail/features/complex-expressions/alternate_exprs_range_nonlit_lower_bound.stderr5
-rw-r--r--rust/vendor/num_enum/tests/try_build/compile_fail/features/complex-expressions/alternate_exprs_range_nonlit_upper_bound.rs13
-rw-r--r--rust/vendor/num_enum/tests/try_build/compile_fail/features/complex-expressions/alternate_exprs_range_nonlit_upper_bound.stderr5
-rw-r--r--rust/vendor/num_enum/tests/try_build/compile_fail/features/complex-expressions/alternate_exprs_range_swapped_bounds.rs11
-rw-r--r--rust/vendor/num_enum/tests/try_build/compile_fail/features/complex-expressions/alternate_exprs_range_swapped_bounds.stderr5
-rw-r--r--rust/vendor/num_enum/tests/try_build/compile_fail/garbage_attribute.rs12
-rw-r--r--rust/vendor/num_enum/tests/try_build/compile_fail/garbage_attribute.stderr5
-rw-r--r--rust/vendor/num_enum/tests/try_build/compile_fail/missing_default.rs11
-rw-r--r--rust/vendor/num_enum/tests/try_build/compile_fail/missing_default.stderr7
-rw-r--r--rust/vendor/num_enum/tests/try_build/compile_fail/missing_repr.rs7
-rw-r--r--rust/vendor/num_enum/tests/try_build/compile_fail/missing_repr.stderr7
-rw-r--r--rust/vendor/num_enum/tests/try_build/compile_fail/multiple_catch_all.rs10
-rw-r--r--rust/vendor/num_enum/tests/try_build/compile_fail/multiple_catch_all.stderr5
-rw-r--r--rust/vendor/num_enum/tests/try_build/compile_fail/multiple_catch_all_same_variant.rs10
-rw-r--r--rust/vendor/num_enum/tests/try_build/compile_fail/multiple_catch_all_same_variant.stderr5
-rw-r--r--rust/vendor/num_enum/tests/try_build/compile_fail/multiple_defaults.rs13
-rw-r--r--rust/vendor/num_enum/tests/try_build/compile_fail/multiple_defaults.stderr5
-rw-r--r--rust/vendor/num_enum/tests/try_build/compile_fail/multiple_defaults_different_kinds.rs13
-rw-r--r--rust/vendor/num_enum/tests/try_build/compile_fail/multiple_defaults_different_kinds.stderr5
-rw-r--r--rust/vendor/num_enum/tests/try_build/compile_fail/multiple_num_enum_defaults.rs13
-rw-r--r--rust/vendor/num_enum/tests/try_build/compile_fail/multiple_num_enum_defaults.stderr5
-rw-r--r--rust/vendor/num_enum/tests/try_build/compile_fail/repr_c.rs8
-rw-r--r--rust/vendor/num_enum/tests/try_build/compile_fail/repr_c.stderr5
-rw-r--r--rust/vendor/num_enum/tests/try_build/compile_fail/unpexpected_alternatives.rs11
-rw-r--r--rust/vendor/num_enum/tests/try_build/compile_fail/unpexpected_alternatives.stderr5
-rw-r--r--rust/vendor/num_enum/tests/try_build/compile_fail/unpexpected_default.rs11
-rw-r--r--rust/vendor/num_enum/tests/try_build/compile_fail/unpexpected_default.stderr5
-rw-r--r--rust/vendor/num_enum/tests/try_build/compile_fail/variants_with_fields.rs22
-rw-r--r--rust/vendor/num_enum/tests/try_build/compile_fail/variants_with_fields.stderr17
-rw-r--r--rust/vendor/num_enum/tests/try_build/pass/default_and_alternatives.rs66
-rw-r--r--rust/vendor/num_enum/tests/try_build/pass/exhaustive_enum_try_from.rs40
-rw-r--r--rust/vendor/num_enum/tests/try_build/pass/exhaustive_enum_via_alternatives.rs40
-rw-r--r--rust/vendor/num_enum/tests/try_build/pass/exhaustive_enum_via_default.rs39
-rw-r--r--rust/vendor/num_enum/tests/try_build/pass/features/complex-expressions/alternate_exprs_exhaustive_with_range.rs11
-rw-r--r--rust/vendor/num_enum/tests/try_build/pass/features/complex-expressions/alternate_exprs_non_exhaustive_with_range.rs11
-rw-r--r--rust/vendor/num_enum/tests/try_from_primitive.rs504
-rw-r--r--rust/vendor/num_enum/tests/unsafe_from_primitive.rs22
-rw-r--r--rust/vendor/num_enum_derive/.cargo-checksum.json1
-rw-r--r--rust/vendor/num_enum_derive/Cargo.toml52
-rw-r--r--rust/vendor/num_enum_derive/LICENSE-APACHE176
-rw-r--r--rust/vendor/num_enum_derive/LICENSE-BSD27
-rw-r--r--rust/vendor/num_enum_derive/LICENSE-MIT23
-rw-r--r--rust/vendor/num_enum_derive/README.md277
-rw-r--r--rust/vendor/num_enum_derive/src/lib.rs1066
-rw-r--r--rust/vendor/num_threads/.cargo-checksum.json1
-rw-r--r--rust/vendor/num_threads/Cargo.toml36
-rw-r--r--rust/vendor/num_threads/LICENSE-Apache202
-rw-r--r--rust/vendor/num_threads/LICENSE-MIT19
-rw-r--r--rust/vendor/num_threads/src/apple.rs45
-rw-r--r--rust/vendor/num_threads/src/freebsd.rs36
-rw-r--r--rust/vendor/num_threads/src/imp.rs7
-rw-r--r--rust/vendor/num_threads/src/lib.rs64
-rw-r--r--rust/vendor/num_threads/src/linux.rs14
284 files changed, 57014 insertions, 0 deletions
diff --git a/rust/vendor/num-bigint-0.2.6/.cargo-checksum.json b/rust/vendor/num-bigint-0.2.6/.cargo-checksum.json
new file mode 100644
index 0000000..390f95e
--- /dev/null
+++ b/rust/vendor/num-bigint-0.2.6/.cargo-checksum.json
@@ -0,0 +1 @@
+{"files":{"Cargo.toml":"e10df83bce937fc2d2ee258af173fee42891dc2d5bc3217955935669bdcc6dfb","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"6485b8ed310d3f0340bf1ad1f47645069ce4069dcc6bb46c7d5c6faf41de1fdb","README.md":"00ff4891b409b25220a0f863c772a786c81cf291fab894963a2c8d7a251f67fd","RELEASES.md":"e712d5bdd58cc82a34f15f7926b4a7a65022ededc3069375fc54ab63eba2d133","benches/bigint.rs":"252c0dc1f220a6fbdc151e729069260c2f5909516467ceb873e412e5691d7042","benches/factorial.rs":"d536f5584987847f10321b94175a0d8fd2beb14b7c814ec28eef1f96ca081fbe","benches/gcd.rs":"7ec5ce7174e1d31bd08ccc5670f5a32a5c084f258d7980cd6d02e0a8bb5562c4","benches/roots.rs":"3f87db894c379122aee5cd8520c7c759c26d8a9649ac47f45d1bf4d560e1cb07","benches/shootout-pidigits.rs":"985b76d6dba05c396efe4da136c6a0bb2c02bcf5b05cbb346f0f802a891629bb","bors.toml":"1c81ede536a37edd30fe4e622ff0531b25372403ac9475a5d6c50f14156565a2","build.rs":"b4b2d0df90ca7570a339ca4d84a72e4ef00d9dced8927350424e666790c752d7","ci/rustup.sh":"c976bb2756da3876363b01fdbf06c13de20df421e5add45e4017c4df42ed06a6","ci/test_full.sh":"a0ac26b85809eb43edd813c9dc88f34a1a8227b7618f4bede89c8f2ac9a4c05a","src/algorithms.rs":"96220e32bd2496fbcba07ba35ef21e08e12a38d85d9724b33877c078040caf99","src/bigint.rs":"5d4888db1d8487a5c73bd4ee23967f4ba04f6f6edb4c3212902297e61f628141","src/bigrand.rs":"025b795928efa69592da2a7f54a60b0b72105ec6bae652f1f1b6402d6ca7bf3a","src/biguint.rs":"068abe461e9ba0582a2a18aede236ffdfe0cffc39fae9688f6a0873af6935b9b","src/lib.rs":"0f4280a9ffd3b8465ecaf9a7c99087bf18e5242b31d0311ac15bec4e995e5a41","src/macros.rs":"327691e39e31ac2d708e2bb02b50338d57cb0d3ca7107a30117df391bf781714","src/monty.rs":"ecdacb02e7d0a64b249a3395bf33c26f62b1ca05908f2eab0edd7f8f7a2ad7ae","tests/bigint.rs":"f7df454f085a862ad5a98e3a802303a3fdf06275a7a1b92074b40b76a715bed2","tests/bigint_bitwise.rs":"dc9436c8f200f2b0ac08cefb23bb8e39c4e688e9026a506a678416c3d573128b","tests/bigint_scalar.rs":"fddaa72911cd22cd34df130fee65dc1a1447ddbd9dfe5491b15e7a5868ee49e7","tests/biguint.rs":"9ae79f96d1a3beca5be95dffe9d79dc3436f886edc6cae51faf4203c3e0c4681","tests/biguint_scalar.rs":"4601e36b78bb177893f3abbd0a4050030eb8a65d3e57cdf005aea0a5b811adde","tests/consts/mod.rs":"f9ea5f40733e2f5f432803d830be9db929d91e5e5efd8510b07c6ced2fe554be","tests/macros/mod.rs":"5e73339e8baa9fc2c3f5b9097f9909091b5e4b47f905ffdf7da81d4647c6141c","tests/modpow.rs":"e21e46a97fc5da7eed1db5d6a52ac7ba50b78532f1a97aa46d41867d4848282c","tests/quickcheck.rs":"a3c68279b50cc35ec2856b74ac3f3265457dd788ed6138a14dd16a32868f22ba","tests/rand.rs":"08370135bd78432660cfcd708a9ea852022d555bc92c1f3c482fabd17faa64a0","tests/roots.rs":"9ec1bdb0cd1c72402a41e5470325a5276af75979b7fc0f0b63e7bbbb9f3505b2","tests/serde.rs":"79d7a0347207b3a3666af67d2ed97fa34f2922732121a3cb8f5b9f990846acfa","tests/torture.rs":"9fe4897580c0ebe2b7062f5b0b890b4b03510daa45c9236f0edba7144f9eb6f8"},"package":"090c7f9998ee0ff65aa5b723e4009f7b217707f1fb5ea551329cc4d6231fb304"} \ No newline at end of file
diff --git a/rust/vendor/num-bigint-0.2.6/Cargo.toml b/rust/vendor/num-bigint-0.2.6/Cargo.toml
new file mode 100644
index 0000000..9b99bae
--- /dev/null
+++ b/rust/vendor/num-bigint-0.2.6/Cargo.toml
@@ -0,0 +1,81 @@
+# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO
+#
+# When uploading crates to the registry Cargo will automatically
+# "normalize" Cargo.toml files for maximal compatibility
+# with all versions of Cargo and also rewrite `path` dependencies
+# to registry (e.g., crates.io) dependencies
+#
+# If you believe there's an error in this file please file an
+# issue against the rust-lang/cargo repository. If you're
+# editing this file be aware that the upstream Cargo.toml
+# will likely look very different (and much more reasonable)
+
+[package]
+name = "num-bigint"
+version = "0.2.6"
+authors = ["The Rust Project Developers"]
+build = "build.rs"
+description = "Big integer implementation for Rust"
+homepage = "https://github.com/rust-num/num-bigint"
+documentation = "https://docs.rs/num-bigint"
+readme = "README.md"
+keywords = ["mathematics", "numerics", "bignum"]
+categories = ["algorithms", "data-structures", "science"]
+license = "MIT/Apache-2.0"
+repository = "https://github.com/rust-num/num-bigint"
+[package.metadata.docs.rs]
+features = ["std", "serde", "rand", "quickcheck"]
+
+[[bench]]
+name = "bigint"
+
+[[bench]]
+name = "factorial"
+
+[[bench]]
+name = "gcd"
+
+[[bench]]
+name = "roots"
+
+[[bench]]
+name = "shootout-pidigits"
+harness = false
+[dependencies.num-integer]
+version = "0.1.42"
+default-features = false
+
+[dependencies.num-traits]
+version = "0.2.11"
+default-features = false
+
+[dependencies.quickcheck]
+version = "0.8"
+optional = true
+default-features = false
+
+[dependencies.quickcheck_macros]
+version = "0.8"
+optional = true
+default-features = false
+
+[dependencies.rand]
+version = "0.5"
+features = ["std"]
+optional = true
+default-features = false
+
+[dependencies.serde]
+version = "1.0"
+features = ["std"]
+optional = true
+default-features = false
+[dev-dependencies.serde_test]
+version = "1.0"
+[build-dependencies.autocfg]
+version = "1"
+
+[features]
+default = ["std"]
+i128 = ["num-integer/i128", "num-traits/i128"]
+std = ["num-integer/std", "num-traits/std"]
diff --git a/rust/vendor/num-bigint-0.2.6/LICENSE-APACHE b/rust/vendor/num-bigint-0.2.6/LICENSE-APACHE
new file mode 100644
index 0000000..16fe87b
--- /dev/null
+++ b/rust/vendor/num-bigint-0.2.6/LICENSE-APACHE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+END OF TERMS AND CONDITIONS
+
+APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+Copyright [yyyy] [name of copyright owner]
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
diff --git a/rust/vendor/num-bigint-0.2.6/LICENSE-MIT b/rust/vendor/num-bigint-0.2.6/LICENSE-MIT
new file mode 100644
index 0000000..39d4bdb
--- /dev/null
+++ b/rust/vendor/num-bigint-0.2.6/LICENSE-MIT
@@ -0,0 +1,25 @@
+Copyright (c) 2014 The Rust Project Developers
+
+Permission is hereby granted, free of charge, to any
+person obtaining a copy of this software and associated
+documentation files (the "Software"), to deal in the
+Software without restriction, including without
+limitation the rights to use, copy, modify, merge,
+publish, distribute, sublicense, and/or sell copies of
+the Software, and to permit persons to whom the Software
+is furnished to do so, subject to the following
+conditions:
+
+The above copyright notice and this permission notice
+shall be included in all copies or substantial portions
+of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
+ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
+TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
+PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
+IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+DEALINGS IN THE SOFTWARE.
diff --git a/rust/vendor/num-bigint-0.2.6/README.md b/rust/vendor/num-bigint-0.2.6/README.md
new file mode 100644
index 0000000..4691c9a
--- /dev/null
+++ b/rust/vendor/num-bigint-0.2.6/README.md
@@ -0,0 +1,76 @@
+# num-bigint
+
+[![crate](https://img.shields.io/crates/v/num-bigint.svg)](https://crates.io/crates/num-bigint)
+[![documentation](https://docs.rs/num-bigint/badge.svg)](https://docs.rs/num-bigint)
+![minimum rustc 1.15](https://img.shields.io/badge/rustc-1.15+-red.svg)
+[![Travis status](https://travis-ci.org/rust-num/num-bigint.svg?branch=master)](https://travis-ci.org/rust-num/num-bigint)
+
+Big integer types for Rust, `BigInt` and `BigUint`.
+
+## Usage
+
+Add this to your `Cargo.toml`:
+
+```toml
+[dependencies]
+num-bigint = "0.2"
+```
+
+and this to your crate root:
+
+```rust
+extern crate num_bigint;
+```
+
+## Features
+
+The `std` crate feature is mandatory and enabled by default. If you depend on
+`num-bigint` with `default-features = false`, you must manually enable the
+`std` feature yourself. In the future, we hope to support `#![no_std]` with
+the `alloc` crate when `std` is not enabled.
+
+Implementations for `i128` and `u128` are only available with Rust 1.26 and
+later. The build script automatically detects this, but you can make it
+mandatory by enabling the `i128` crate feature.
+
+### Random Generation
+
+`num-bigint` supports the generation of random big integers when the `rand`
+feature is enabled. To enable it include rand as
+
+```toml
+rand = "0.5"
+num-bigint = { version = "0.2", features = ["rand"] }
+```
+
+Note that you must use the version of `rand` that `num-bigint` is compatible
+with: `0.5`.
+
+## Releases
+
+Release notes are available in [RELEASES.md](RELEASES.md).
+
+## Compatibility
+
+The `num-bigint` crate is tested for rustc 1.15 and greater.
+
+## Alternatives
+
+While `num-bigint` strives for good performance in pure Rust code, other
+crates may offer better performance with different trade-offs. The following
+table offers a brief comparison to a few alternatives.
+
+| Crate | License | Min rustc | Implementation |
+| :--------------- | :------------- | :-------- | :------------- |
+| **`num-bigint`** | MIT/Apache-2.0 | 1.15 | pure rust |
+| [`ramp`] | Apache-2.0 | nightly | rust and inline assembly |
+| [`rug`] | LGPL-3.0+ | 1.31 | bundles [GMP] via [`gmp-mpfr-sys`] |
+| [`rust-gmp`] | MIT | stable? | links to [GMP] |
+| [`apint`] | MIT/Apache-2.0 | 1.26 | pure rust (unfinished) |
+
+[GMP]: https://gmplib.org/
+[`gmp-mpfr-sys`]: https://crates.io/crates/gmp-mpfr-sys
+[`rug`]: https://crates.io/crates/rug
+[`rust-gmp`]: https://crates.io/crates/rust-gmp
+[`ramp`]: https://crates.io/crates/ramp
+[`apint`]: https://crates.io/crates/apint
diff --git a/rust/vendor/num-bigint-0.2.6/RELEASES.md b/rust/vendor/num-bigint-0.2.6/RELEASES.md
new file mode 100644
index 0000000..358534e
--- /dev/null
+++ b/rust/vendor/num-bigint-0.2.6/RELEASES.md
@@ -0,0 +1,166 @@
+# Release 0.2.6 (2020-01-27)
+
+- [Fix the promotion of negative `isize` in `BigInt` assign-ops][133].
+
+**Contributors**: @cuviper, @HactarCE
+
+[133]: https://github.com/rust-num/num-bigint/pull/133
+
+# Release 0.2.5 (2020-01-09)
+
+- [Updated the `autocfg` build dependency to 1.0][126].
+
+**Contributors**: @cuviper, @tspiteri
+
+[126]: https://github.com/rust-num/num-bigint/pull/126
+
+# Release 0.2.4 (2020-01-01)
+
+- [The new `BigUint::to_u32_digits` method][104] returns the number as a
+ little-endian vector of base-2<sup>32</sup> digits. The same method on
+ `BigInt` also returns the sign.
+- [`BigUint::modpow` now applies a modulus even for exponent 1][113], which
+ also affects `BigInt::modpow`.
+- [`BigInt::modpow` now returns the correct sign for negative bases with even
+ exponents][114].
+
+[104]: https://github.com/rust-num/num-bigint/pull/104
+[113]: https://github.com/rust-num/num-bigint/pull/113
+[114]: https://github.com/rust-num/num-bigint/pull/114
+
+**Contributors**: @alex-ozdemir, @cuviper, @dingelish, @Speedy37, @youknowone
+
+# Release 0.2.3 (2019-09-03)
+
+- [`Pow` is now implemented for `BigUint` exponents][77].
+- [The optional `quickcheck` feature enables implementations of `Arbitrary`][99].
+- See the [full comparison][compare-0.2.3] for performance enhancements and more!
+
+[77]: https://github.com/rust-num/num-bigint/pull/77
+[99]: https://github.com/rust-num/num-bigint/pull/99
+[compare-0.2.3]: https://github.com/rust-num/num-bigint/compare/num-bigint-0.2.2...num-bigint-0.2.3
+
+**Contributors**: @cuviper, @lcnr, @maxbla, @mikelodder7, @mikong,
+@TheLetterTheta, @tspiteri, @XAMPPRocky, @youknowone
+
+# Release 0.2.2 (2018-12-14)
+
+- [The `Roots` implementations now use better initial guesses][71].
+- [Fixed `to_signed_bytes_*` for some positive numbers][72], where the
+ most-significant byte is `0x80` and the rest are `0`.
+
+[71]: https://github.com/rust-num/num-bigint/pull/71
+[72]: https://github.com/rust-num/num-bigint/pull/72
+
+**Contributors**: @cuviper, @leodasvacas
+
+# Release 0.2.1 (2018-11-02)
+
+- [`RandBigInt` now uses `Rng::fill_bytes`][53] to improve performance, instead
+ of repeated `gen::<u32>` calls. The also affects the implementations of the
+ other `rand` traits. This may potentially change the values produced by some
+ seeded RNGs on previous versions, but the values were tested to be stable
+ with `ChaChaRng`, `IsaacRng`, and `XorShiftRng`.
+- [`BigInt` and `BigUint` now implement `num_integer::Roots`][56].
+- [`BigInt` and `BigUint` now implement `num_traits::Pow`][54].
+- [`BigInt` and `BigUint` now implement operators with 128-bit integers][64].
+
+**Contributors**: @cuviper, @dignifiedquire, @mancabizjak, @Robbepop,
+@TheIronBorn, @thomwiggers
+
+[53]: https://github.com/rust-num/num-bigint/pull/53
+[54]: https://github.com/rust-num/num-bigint/pull/54
+[56]: https://github.com/rust-num/num-bigint/pull/56
+[64]: https://github.com/rust-num/num-bigint/pull/64
+
+# Release 0.2.0 (2018-05-25)
+
+### Enhancements
+
+- [`BigInt` and `BigUint` now implement `Product` and `Sum`][22] for iterators
+ of any item that we can `Mul` and `Add`, respectively. For example, a
+ factorial can now be simply: `let f: BigUint = (1u32..1000).product();`
+- [`BigInt` now supports two's-complement logic operations][26], namely
+ `BitAnd`, `BitOr`, `BitXor`, and `Not`. These act conceptually as if each
+ number had an infinite prefix of `0` or `1` bits for positive or negative.
+- [`BigInt` now supports assignment operators][41] like `AddAssign`.
+- [`BigInt` and `BigUint` now support conversions with `i128` and `u128`][44],
+ if sufficient compiler support is detected.
+- [`BigInt` and `BigUint` now implement rand's `SampleUniform` trait][48], and
+ [a custom `RandomBits` distribution samples by bit size][49].
+- The release also includes other miscellaneous improvements to performance.
+
+### Breaking Changes
+
+- [`num-bigint` now requires rustc 1.15 or greater][23].
+- [The crate now has a `std` feature, and won't build without it][46]. This is
+ in preparation for someday supporting `#![no_std]` with `alloc`.
+- [The `serde` dependency has been updated to 1.0][24], still disabled by
+ default. The `rustc-serialize` crate is no longer supported by `num-bigint`.
+- [The `rand` dependency has been updated to 0.5][48], now disabled by default.
+ This requires rustc 1.22 or greater for `rand`'s own requirement.
+- [`Shr for BigInt` now rounds down][8] rather than toward zero, matching the
+ behavior of the primitive integers for negative values.
+- [`ParseBigIntError` is now an opaque type][37].
+- [The `big_digit` module is no longer public][38], nor are the `BigDigit` and
+ `DoubleBigDigit` types and `ZERO_BIG_DIGIT` constant that were re-exported in
+ the crate root. Public APIs which deal in digits, like `BigUint::from_slice`,
+ will now always be base-`u32`.
+
+**Contributors**: @clarcharr, @cuviper, @dodomorandi, @tiehuis, @tspiteri
+
+[8]: https://github.com/rust-num/num-bigint/pull/8
+[22]: https://github.com/rust-num/num-bigint/pull/22
+[23]: https://github.com/rust-num/num-bigint/pull/23
+[24]: https://github.com/rust-num/num-bigint/pull/24
+[26]: https://github.com/rust-num/num-bigint/pull/26
+[37]: https://github.com/rust-num/num-bigint/pull/37
+[38]: https://github.com/rust-num/num-bigint/pull/38
+[41]: https://github.com/rust-num/num-bigint/pull/41
+[44]: https://github.com/rust-num/num-bigint/pull/44
+[46]: https://github.com/rust-num/num-bigint/pull/46
+[48]: https://github.com/rust-num/num-bigint/pull/48
+[49]: https://github.com/rust-num/num-bigint/pull/49
+
+# Release 0.1.44 (2018-05-14)
+
+- [Division with single-digit divisors is now much faster.][42]
+- The README now compares [`ramp`, `rug`, `rust-gmp`][20], and [`apint`][21].
+
+**Contributors**: @cuviper, @Robbepop
+
+[20]: https://github.com/rust-num/num-bigint/pull/20
+[21]: https://github.com/rust-num/num-bigint/pull/21
+[42]: https://github.com/rust-num/num-bigint/pull/42
+
+# Release 0.1.43 (2018-02-08)
+
+- [The new `BigInt::modpow`][18] performs signed modular exponentiation, using
+ the existing `BigUint::modpow` and rounding negatives similar to `mod_floor`.
+
+**Contributors**: @cuviper
+
+[18]: https://github.com/rust-num/num-bigint/pull/18
+
+
+# Release 0.1.42 (2018-02-07)
+
+- [num-bigint now has its own source repository][num-356] at [rust-num/num-bigint][home].
+- [`lcm` now avoids creating a large intermediate product][num-350].
+- [`gcd` now uses Stein's algorithm][15] with faster shifts instead of division.
+- [`rand` support is now extended to 0.4][11] (while still allowing 0.3).
+
+**Contributors**: @cuviper, @Emerentius, @ignatenkobrain, @mhogrefe
+
+[home]: https://github.com/rust-num/num-bigint
+[num-350]: https://github.com/rust-num/num/pull/350
+[num-356]: https://github.com/rust-num/num/pull/356
+[11]: https://github.com/rust-num/num-bigint/pull/11
+[15]: https://github.com/rust-num/num-bigint/pull/15
+
+
+# Prior releases
+
+No prior release notes were kept. Thanks all the same to the many
+contributors that have made this crate what it is!
+
diff --git a/rust/vendor/num-bigint-0.2.6/benches/bigint.rs b/rust/vendor/num-bigint-0.2.6/benches/bigint.rs
new file mode 100644
index 0000000..bc0875d
--- /dev/null
+++ b/rust/vendor/num-bigint-0.2.6/benches/bigint.rs
@@ -0,0 +1,368 @@
+#![feature(test)]
+#![cfg(feature = "rand")]
+
+extern crate num_bigint;
+extern crate num_integer;
+extern crate num_traits;
+extern crate rand;
+extern crate test;
+
+use num_bigint::{BigInt, BigUint, RandBigInt};
+use num_traits::{FromPrimitive, Num, One, Pow, Zero};
+use rand::{SeedableRng, StdRng};
+use std::mem::replace;
+use test::Bencher;
+
+fn get_rng() -> StdRng {
+ let mut seed = [0; 32];
+ for i in 1..32 {
+ seed[usize::from(i)] = i;
+ }
+ SeedableRng::from_seed(seed)
+}
+
+fn multiply_bench(b: &mut Bencher, xbits: usize, ybits: usize) {
+ let mut rng = get_rng();
+ let x = rng.gen_bigint(xbits);
+ let y = rng.gen_bigint(ybits);
+
+ b.iter(|| &x * &y);
+}
+
+fn divide_bench(b: &mut Bencher, xbits: usize, ybits: usize) {
+ let mut rng = get_rng();
+ let x = rng.gen_bigint(xbits);
+ let y = rng.gen_bigint(ybits);
+
+ b.iter(|| &x / &y);
+}
+
+fn remainder_bench(b: &mut Bencher, xbits: usize, ybits: usize) {
+ let mut rng = get_rng();
+ let x = rng.gen_bigint(xbits);
+ let y = rng.gen_bigint(ybits);
+
+ b.iter(|| &x % &y);
+}
+
+fn factorial(n: usize) -> BigUint {
+ let mut f: BigUint = One::one();
+ for i in 1..(n + 1) {
+ let bu: BigUint = FromPrimitive::from_usize(i).unwrap();
+ f = f * bu;
+ }
+ f
+}
+
+/// Compute Fibonacci numbers
+fn fib(n: usize) -> BigUint {
+ let mut f0: BigUint = Zero::zero();
+ let mut f1: BigUint = One::one();
+ for _ in 0..n {
+ let f2 = f0 + &f1;
+ f0 = replace(&mut f1, f2);
+ }
+ f0
+}
+
+/// Compute Fibonacci numbers with two ops per iteration
+/// (add and subtract, like issue #200)
+fn fib2(n: usize) -> BigUint {
+ let mut f0: BigUint = Zero::zero();
+ let mut f1: BigUint = One::one();
+ for _ in 0..n {
+ f1 = f1 + &f0;
+ f0 = &f1 - f0;
+ }
+ f0
+}
+
+#[bench]
+fn multiply_0(b: &mut Bencher) {
+ multiply_bench(b, 1 << 8, 1 << 8);
+}
+
+#[bench]
+fn multiply_1(b: &mut Bencher) {
+ multiply_bench(b, 1 << 8, 1 << 16);
+}
+
+#[bench]
+fn multiply_2(b: &mut Bencher) {
+ multiply_bench(b, 1 << 16, 1 << 16);
+}
+
+#[bench]
+fn multiply_3(b: &mut Bencher) {
+ multiply_bench(b, 1 << 16, 1 << 17);
+}
+
+#[bench]
+fn divide_0(b: &mut Bencher) {
+ divide_bench(b, 1 << 8, 1 << 6);
+}
+
+#[bench]
+fn divide_1(b: &mut Bencher) {
+ divide_bench(b, 1 << 12, 1 << 8);
+}
+
+#[bench]
+fn divide_2(b: &mut Bencher) {
+ divide_bench(b, 1 << 16, 1 << 12);
+}
+
+#[bench]
+fn remainder_0(b: &mut Bencher) {
+ remainder_bench(b, 1 << 8, 1 << 6);
+}
+
+#[bench]
+fn remainder_1(b: &mut Bencher) {
+ remainder_bench(b, 1 << 12, 1 << 8);
+}
+
+#[bench]
+fn remainder_2(b: &mut Bencher) {
+ remainder_bench(b, 1 << 16, 1 << 12);
+}
+
+#[bench]
+fn factorial_100(b: &mut Bencher) {
+ b.iter(|| factorial(100));
+}
+
+#[bench]
+fn fib_100(b: &mut Bencher) {
+ b.iter(|| fib(100));
+}
+
+#[bench]
+fn fib_1000(b: &mut Bencher) {
+ b.iter(|| fib(1000));
+}
+
+#[bench]
+fn fib_10000(b: &mut Bencher) {
+ b.iter(|| fib(10000));
+}
+
+#[bench]
+fn fib2_100(b: &mut Bencher) {
+ b.iter(|| fib2(100));
+}
+
+#[bench]
+fn fib2_1000(b: &mut Bencher) {
+ b.iter(|| fib2(1000));
+}
+
+#[bench]
+fn fib2_10000(b: &mut Bencher) {
+ b.iter(|| fib2(10000));
+}
+
+#[bench]
+fn fac_to_string(b: &mut Bencher) {
+ let fac = factorial(100);
+ b.iter(|| fac.to_string());
+}
+
+#[bench]
+fn fib_to_string(b: &mut Bencher) {
+ let fib = fib(100);
+ b.iter(|| fib.to_string());
+}
+
+fn to_str_radix_bench(b: &mut Bencher, radix: u32) {
+ let mut rng = get_rng();
+ let x = rng.gen_bigint(1009);
+ b.iter(|| x.to_str_radix(radix));
+}
+
+#[bench]
+fn to_str_radix_02(b: &mut Bencher) {
+ to_str_radix_bench(b, 2);
+}
+
+#[bench]
+fn to_str_radix_08(b: &mut Bencher) {
+ to_str_radix_bench(b, 8);
+}
+
+#[bench]
+fn to_str_radix_10(b: &mut Bencher) {
+ to_str_radix_bench(b, 10);
+}
+
+#[bench]
+fn to_str_radix_16(b: &mut Bencher) {
+ to_str_radix_bench(b, 16);
+}
+
+#[bench]
+fn to_str_radix_36(b: &mut Bencher) {
+ to_str_radix_bench(b, 36);
+}
+
+fn from_str_radix_bench(b: &mut Bencher, radix: u32) {
+ let mut rng = get_rng();
+ let x = rng.gen_bigint(1009);
+ let s = x.to_str_radix(radix);
+ assert_eq!(x, BigInt::from_str_radix(&s, radix).unwrap());
+ b.iter(|| BigInt::from_str_radix(&s, radix));
+}
+
+#[bench]
+fn from_str_radix_02(b: &mut Bencher) {
+ from_str_radix_bench(b, 2);
+}
+
+#[bench]
+fn from_str_radix_08(b: &mut Bencher) {
+ from_str_radix_bench(b, 8);
+}
+
+#[bench]
+fn from_str_radix_10(b: &mut Bencher) {
+ from_str_radix_bench(b, 10);
+}
+
+#[bench]
+fn from_str_radix_16(b: &mut Bencher) {
+ from_str_radix_bench(b, 16);
+}
+
+#[bench]
+fn from_str_radix_36(b: &mut Bencher) {
+ from_str_radix_bench(b, 36);
+}
+
+fn rand_bench(b: &mut Bencher, bits: usize) {
+ let mut rng = get_rng();
+
+ b.iter(|| rng.gen_bigint(bits));
+}
+
+#[bench]
+fn rand_64(b: &mut Bencher) {
+ rand_bench(b, 1 << 6);
+}
+
+#[bench]
+fn rand_256(b: &mut Bencher) {
+ rand_bench(b, 1 << 8);
+}
+
+#[bench]
+fn rand_1009(b: &mut Bencher) {
+ rand_bench(b, 1009);
+}
+
+#[bench]
+fn rand_2048(b: &mut Bencher) {
+ rand_bench(b, 1 << 11);
+}
+
+#[bench]
+fn rand_4096(b: &mut Bencher) {
+ rand_bench(b, 1 << 12);
+}
+
+#[bench]
+fn rand_8192(b: &mut Bencher) {
+ rand_bench(b, 1 << 13);
+}
+
+#[bench]
+fn rand_65536(b: &mut Bencher) {
+ rand_bench(b, 1 << 16);
+}
+
+#[bench]
+fn rand_131072(b: &mut Bencher) {
+ rand_bench(b, 1 << 17);
+}
+
+#[bench]
+fn shl(b: &mut Bencher) {
+ let n = BigUint::one() << 1000;
+ b.iter(|| {
+ let mut m = n.clone();
+ for i in 0..50 {
+ m = m << i;
+ }
+ })
+}
+
+#[bench]
+fn shr(b: &mut Bencher) {
+ let n = BigUint::one() << 2000;
+ b.iter(|| {
+ let mut m = n.clone();
+ for i in 0..50 {
+ m = m >> i;
+ }
+ })
+}
+
+#[bench]
+fn hash(b: &mut Bencher) {
+ use std::collections::HashSet;
+ let mut rng = get_rng();
+ let v: Vec<BigInt> = (1000..2000).map(|bits| rng.gen_bigint(bits)).collect();
+ b.iter(|| {
+ let h: HashSet<&BigInt> = v.iter().collect();
+ assert_eq!(h.len(), v.len());
+ });
+}
+
+#[bench]
+fn pow_bench(b: &mut Bencher) {
+ b.iter(|| {
+ let upper = 100_usize;
+ for i in 2..upper + 1 {
+ for j in 2..upper + 1 {
+ let i_big = BigUint::from_usize(i).unwrap();
+ i_big.pow(j);
+ }
+ }
+ });
+}
+
+/// This modulus is the prime from the 2048-bit MODP DH group:
+/// https://tools.ietf.org/html/rfc3526#section-3
+const RFC3526_2048BIT_MODP_GROUP: &'static str =
+ "\
+ FFFFFFFF_FFFFFFFF_C90FDAA2_2168C234_C4C6628B_80DC1CD1\
+ 29024E08_8A67CC74_020BBEA6_3B139B22_514A0879_8E3404DD\
+ EF9519B3_CD3A431B_302B0A6D_F25F1437_4FE1356D_6D51C245\
+ E485B576_625E7EC6_F44C42E9_A637ED6B_0BFF5CB6_F406B7ED\
+ EE386BFB_5A899FA5_AE9F2411_7C4B1FE6_49286651_ECE45B3D\
+ C2007CB8_A163BF05_98DA4836_1C55D39A_69163FA8_FD24CF5F\
+ 83655D23_DCA3AD96_1C62F356_208552BB_9ED52907_7096966D\
+ 670C354E_4ABC9804_F1746C08_CA18217C_32905E46_2E36CE3B\
+ E39E772C_180E8603_9B2783A2_EC07A28F_B5C55DF0_6F4C52C9\
+ DE2BCBF6_95581718_3995497C_EA956AE5_15D22618_98FA0510\
+ 15728E5A_8AACAA68_FFFFFFFF_FFFFFFFF";
+
+#[bench]
+fn modpow(b: &mut Bencher) {
+ let mut rng = get_rng();
+ let base = rng.gen_biguint(2048);
+ let e = rng.gen_biguint(2048);
+ let m = BigUint::from_str_radix(RFC3526_2048BIT_MODP_GROUP, 16).unwrap();
+
+ b.iter(|| base.modpow(&e, &m));
+}
+
+#[bench]
+fn modpow_even(b: &mut Bencher) {
+ let mut rng = get_rng();
+ let base = rng.gen_biguint(2048);
+ let e = rng.gen_biguint(2048);
+ // Make the modulus even, so monty (base-2^32) doesn't apply.
+ let m = BigUint::from_str_radix(RFC3526_2048BIT_MODP_GROUP, 16).unwrap() - 1u32;
+
+ b.iter(|| base.modpow(&e, &m));
+}
diff --git a/rust/vendor/num-bigint-0.2.6/benches/factorial.rs b/rust/vendor/num-bigint-0.2.6/benches/factorial.rs
new file mode 100644
index 0000000..4392df8
--- /dev/null
+++ b/rust/vendor/num-bigint-0.2.6/benches/factorial.rs
@@ -0,0 +1,44 @@
+#![feature(test)]
+
+extern crate num_bigint;
+extern crate num_traits;
+extern crate test;
+
+use num_bigint::BigUint;
+use num_traits::One;
+use std::ops::{Div, Mul};
+use test::Bencher;
+
+#[bench]
+fn factorial_mul_biguint(b: &mut Bencher) {
+ b.iter(|| {
+ (1u32..1000)
+ .map(BigUint::from)
+ .fold(BigUint::one(), Mul::mul)
+ });
+}
+
+#[bench]
+fn factorial_mul_u32(b: &mut Bencher) {
+ b.iter(|| (1u32..1000).fold(BigUint::one(), Mul::mul));
+}
+
+// The division test is inspired by this blog comparison:
+// <https://tiehuis.github.io/big-integers-in-zig#division-test-single-limb>
+
+#[bench]
+fn factorial_div_biguint(b: &mut Bencher) {
+ let n: BigUint = (1u32..1000).fold(BigUint::one(), Mul::mul);
+ b.iter(|| {
+ (1u32..1000)
+ .rev()
+ .map(BigUint::from)
+ .fold(n.clone(), Div::div)
+ });
+}
+
+#[bench]
+fn factorial_div_u32(b: &mut Bencher) {
+ let n: BigUint = (1u32..1000).fold(BigUint::one(), Mul::mul);
+ b.iter(|| (1u32..1000).rev().fold(n.clone(), Div::div));
+}
diff --git a/rust/vendor/num-bigint-0.2.6/benches/gcd.rs b/rust/vendor/num-bigint-0.2.6/benches/gcd.rs
new file mode 100644
index 0000000..5fe5260
--- /dev/null
+++ b/rust/vendor/num-bigint-0.2.6/benches/gcd.rs
@@ -0,0 +1,86 @@
+#![feature(test)]
+#![cfg(feature = "rand")]
+
+extern crate num_bigint;
+extern crate num_integer;
+extern crate num_traits;
+extern crate rand;
+extern crate test;
+
+use num_bigint::{BigUint, RandBigInt};
+use num_integer::Integer;
+use num_traits::Zero;
+use rand::{SeedableRng, StdRng};
+use test::Bencher;
+
+fn get_rng() -> StdRng {
+ let mut seed = [0; 32];
+ for i in 1..32 {
+ seed[usize::from(i)] = i;
+ }
+ SeedableRng::from_seed(seed)
+}
+
+fn bench(b: &mut Bencher, bits: usize, gcd: fn(&BigUint, &BigUint) -> BigUint) {
+ let mut rng = get_rng();
+ let x = rng.gen_biguint(bits);
+ let y = rng.gen_biguint(bits);
+
+ assert_eq!(euclid(&x, &y), x.gcd(&y));
+
+ b.iter(|| gcd(&x, &y));
+}
+
+fn euclid(x: &BigUint, y: &BigUint) -> BigUint {
+ // Use Euclid's algorithm
+ let mut m = x.clone();
+ let mut n = y.clone();
+ while !m.is_zero() {
+ let temp = m;
+ m = n % &temp;
+ n = temp;
+ }
+ return n;
+}
+
+#[bench]
+fn gcd_euclid_0064(b: &mut Bencher) {
+ bench(b, 64, euclid);
+}
+
+#[bench]
+fn gcd_euclid_0256(b: &mut Bencher) {
+ bench(b, 256, euclid);
+}
+
+#[bench]
+fn gcd_euclid_1024(b: &mut Bencher) {
+ bench(b, 1024, euclid);
+}
+
+#[bench]
+fn gcd_euclid_4096(b: &mut Bencher) {
+ bench(b, 4096, euclid);
+}
+
+// Integer for BigUint now uses Stein for gcd
+
+#[bench]
+fn gcd_stein_0064(b: &mut Bencher) {
+ bench(b, 64, BigUint::gcd);
+}
+
+#[bench]
+fn gcd_stein_0256(b: &mut Bencher) {
+ bench(b, 256, BigUint::gcd);
+}
+
+#[bench]
+fn gcd_stein_1024(b: &mut Bencher) {
+ bench(b, 1024, BigUint::gcd);
+}
+
+#[bench]
+fn gcd_stein_4096(b: &mut Bencher) {
+ bench(b, 4096, BigUint::gcd);
+}
diff --git a/rust/vendor/num-bigint-0.2.6/benches/roots.rs b/rust/vendor/num-bigint-0.2.6/benches/roots.rs
new file mode 100644
index 0000000..51e67d9
--- /dev/null
+++ b/rust/vendor/num-bigint-0.2.6/benches/roots.rs
@@ -0,0 +1,176 @@
+#![feature(test)]
+#![cfg(feature = "rand")]
+
+extern crate num_bigint;
+extern crate num_traits;
+extern crate rand;
+extern crate test;
+
+use num_bigint::{BigUint, RandBigInt};
+use num_traits::Pow;
+use rand::{SeedableRng, StdRng};
+use test::Bencher;
+
+// The `big64` cases demonstrate the speed of cases where the value
+// can be converted to a `u64` primitive for faster calculation.
+//
+// The `big1k` cases demonstrate those that can convert to `f64` for
+// a better initial guess of the actual value.
+//
+// The `big2k` and `big4k` cases are too big for `f64`, and use a simpler guess.
+
+fn get_rng() -> StdRng {
+ let mut seed = [0; 32];
+ for i in 1..32 {
+ seed[usize::from(i)] = i;
+ }
+ SeedableRng::from_seed(seed)
+}
+
+fn check(x: &BigUint, n: u32) {
+ let root = x.nth_root(n);
+ if n == 2 {
+ assert_eq!(root, x.sqrt())
+ } else if n == 3 {
+ assert_eq!(root, x.cbrt())
+ }
+
+ let lo = root.pow(n);
+ assert!(lo <= *x);
+ assert_eq!(lo.nth_root(n), root);
+ assert_eq!((&lo - 1u32).nth_root(n), &root - 1u32);
+
+ let hi = (&root + 1u32).pow(n);
+ assert!(hi > *x);
+ assert_eq!(hi.nth_root(n), &root + 1u32);
+ assert_eq!((&hi - 1u32).nth_root(n), root);
+}
+
+fn bench_sqrt(b: &mut Bencher, bits: usize) {
+ let x = get_rng().gen_biguint(bits);
+ eprintln!("bench_sqrt({})", x);
+
+ check(&x, 2);
+ b.iter(|| x.sqrt());
+}
+
+#[bench]
+fn big64_sqrt(b: &mut Bencher) {
+ bench_sqrt(b, 64);
+}
+
+#[bench]
+fn big1k_sqrt(b: &mut Bencher) {
+ bench_sqrt(b, 1024);
+}
+
+#[bench]
+fn big2k_sqrt(b: &mut Bencher) {
+ bench_sqrt(b, 2048);
+}
+
+#[bench]
+fn big4k_sqrt(b: &mut Bencher) {
+ bench_sqrt(b, 4096);
+}
+
+fn bench_cbrt(b: &mut Bencher, bits: usize) {
+ let x = get_rng().gen_biguint(bits);
+ eprintln!("bench_cbrt({})", x);
+
+ check(&x, 3);
+ b.iter(|| x.cbrt());
+}
+
+#[bench]
+fn big64_cbrt(b: &mut Bencher) {
+ bench_cbrt(b, 64);
+}
+
+#[bench]
+fn big1k_cbrt(b: &mut Bencher) {
+ bench_cbrt(b, 1024);
+}
+
+#[bench]
+fn big2k_cbrt(b: &mut Bencher) {
+ bench_cbrt(b, 2048);
+}
+
+#[bench]
+fn big4k_cbrt(b: &mut Bencher) {
+ bench_cbrt(b, 4096);
+}
+
+fn bench_nth_root(b: &mut Bencher, bits: usize, n: u32) {
+ let x = get_rng().gen_biguint(bits);
+ eprintln!("bench_{}th_root({})", n, x);
+
+ check(&x, n);
+ b.iter(|| x.nth_root(n));
+}
+
+#[bench]
+fn big64_nth_10(b: &mut Bencher) {
+ bench_nth_root(b, 64, 10);
+}
+
+#[bench]
+fn big1k_nth_10(b: &mut Bencher) {
+ bench_nth_root(b, 1024, 10);
+}
+
+#[bench]
+fn big1k_nth_100(b: &mut Bencher) {
+ bench_nth_root(b, 1024, 100);
+}
+
+#[bench]
+fn big1k_nth_1000(b: &mut Bencher) {
+ bench_nth_root(b, 1024, 1000);
+}
+
+#[bench]
+fn big1k_nth_10000(b: &mut Bencher) {
+ bench_nth_root(b, 1024, 10000);
+}
+
+#[bench]
+fn big2k_nth_10(b: &mut Bencher) {
+ bench_nth_root(b, 2048, 10);
+}
+
+#[bench]
+fn big2k_nth_100(b: &mut Bencher) {
+ bench_nth_root(b, 2048, 100);
+}
+
+#[bench]
+fn big2k_nth_1000(b: &mut Bencher) {
+ bench_nth_root(b, 2048, 1000);
+}
+
+#[bench]
+fn big2k_nth_10000(b: &mut Bencher) {
+ bench_nth_root(b, 2048, 10000);
+}
+
+#[bench]
+fn big4k_nth_10(b: &mut Bencher) {
+ bench_nth_root(b, 4096, 10);
+}
+
+#[bench]
+fn big4k_nth_100(b: &mut Bencher) {
+ bench_nth_root(b, 4096, 100);
+}
+
+#[bench]
+fn big4k_nth_1000(b: &mut Bencher) {
+ bench_nth_root(b, 4096, 1000);
+}
+
+#[bench]
+fn big4k_nth_10000(b: &mut Bencher) {
+ bench_nth_root(b, 4096, 10000);
+}
diff --git a/rust/vendor/num-bigint-0.2.6/benches/shootout-pidigits.rs b/rust/vendor/num-bigint-0.2.6/benches/shootout-pidigits.rs
new file mode 100644
index 0000000..f90a697
--- /dev/null
+++ b/rust/vendor/num-bigint-0.2.6/benches/shootout-pidigits.rs
@@ -0,0 +1,142 @@
+// The Computer Language Benchmarks Game
+// http://benchmarksgame.alioth.debian.org/
+//
+// contributed by the Rust Project Developers
+
+// Copyright (c) 2013-2014 The Rust Project Developers
+//
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+//
+// - Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// - Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in
+// the documentation and/or other materials provided with the
+// distribution.
+//
+// - Neither the name of "The Computer Language Benchmarks Game" nor
+// the name of "The Computer Language Shootout Benchmarks" nor the
+// names of its contributors may be used to endorse or promote
+// products derived from this software without specific prior
+// written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+// COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+// HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+// STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
+// OF THE POSSIBILITY OF SUCH DAMAGE.
+
+extern crate num_bigint;
+extern crate num_integer;
+extern crate num_traits;
+
+use std::io;
+use std::str::FromStr;
+
+use num_bigint::BigInt;
+use num_integer::Integer;
+use num_traits::{FromPrimitive, One, ToPrimitive, Zero};
+
+struct Context {
+ numer: BigInt,
+ accum: BigInt,
+ denom: BigInt,
+}
+
+impl Context {
+ fn new() -> Context {
+ Context {
+ numer: One::one(),
+ accum: Zero::zero(),
+ denom: One::one(),
+ }
+ }
+
+ fn from_i32(i: i32) -> BigInt {
+ FromPrimitive::from_i32(i).unwrap()
+ }
+
+ fn extract_digit(&self) -> i32 {
+ if self.numer > self.accum {
+ return -1;
+ }
+ let (q, r) = (&self.numer * Context::from_i32(3) + &self.accum).div_rem(&self.denom);
+ if r + &self.numer >= self.denom {
+ return -1;
+ }
+ q.to_i32().unwrap()
+ }
+
+ fn next_term(&mut self, k: i32) {
+ let y2 = Context::from_i32(k * 2 + 1);
+ self.accum = (&self.accum + (&self.numer << 1)) * &y2;
+ self.numer = &self.numer * Context::from_i32(k);
+ self.denom = &self.denom * y2;
+ }
+
+ fn eliminate_digit(&mut self, d: i32) {
+ let d = Context::from_i32(d);
+ let ten = Context::from_i32(10);
+ self.accum = (&self.accum - &self.denom * d) * &ten;
+ self.numer = &self.numer * ten;
+ }
+}
+
+fn pidigits(n: isize, out: &mut dyn io::Write) -> io::Result<()> {
+ let mut k = 0;
+ let mut context = Context::new();
+
+ for i in 1..(n + 1) {
+ let mut d;
+ loop {
+ k += 1;
+ context.next_term(k);
+ d = context.extract_digit();
+ if d != -1 {
+ break;
+ }
+ }
+
+ write!(out, "{}", d)?;
+ if i % 10 == 0 {
+ write!(out, "\t:{}\n", i)?;
+ }
+
+ context.eliminate_digit(d);
+ }
+
+ let m = n % 10;
+ if m != 0 {
+ for _ in m..10 {
+ write!(out, " ")?;
+ }
+ write!(out, "\t:{}\n", n)?;
+ }
+ Ok(())
+}
+
+const DEFAULT_DIGITS: isize = 512;
+
+fn main() {
+ let args = std::env::args().collect::<Vec<_>>();
+ let n = if args.len() < 2 {
+ DEFAULT_DIGITS
+ } else if args[1] == "--bench" {
+ return pidigits(DEFAULT_DIGITS, &mut std::io::sink()).unwrap();
+ } else {
+ FromStr::from_str(&args[1]).unwrap()
+ };
+ pidigits(n, &mut std::io::stdout()).unwrap();
+}
diff --git a/rust/vendor/num-bigint-0.2.6/bors.toml b/rust/vendor/num-bigint-0.2.6/bors.toml
new file mode 100644
index 0000000..ca08e81
--- /dev/null
+++ b/rust/vendor/num-bigint-0.2.6/bors.toml
@@ -0,0 +1,3 @@
+status = [
+ "continuous-integration/travis-ci/push",
+]
diff --git a/rust/vendor/num-bigint-0.2.6/build.rs b/rust/vendor/num-bigint-0.2.6/build.rs
new file mode 100644
index 0000000..e483c15
--- /dev/null
+++ b/rust/vendor/num-bigint-0.2.6/build.rs
@@ -0,0 +1,14 @@
+extern crate autocfg;
+
+use std::env;
+
+fn main() {
+ let ac = autocfg::new();
+ if ac.probe_type("i128") {
+ println!("cargo:rustc-cfg=has_i128");
+ } else if env::var_os("CARGO_FEATURE_I128").is_some() {
+ panic!("i128 support was not detected!");
+ }
+
+ autocfg::rerun_path("build.rs");
+}
diff --git a/rust/vendor/num-bigint-0.2.6/ci/rustup.sh b/rust/vendor/num-bigint-0.2.6/ci/rustup.sh
new file mode 100755
index 0000000..c5aea79
--- /dev/null
+++ b/rust/vendor/num-bigint-0.2.6/ci/rustup.sh
@@ -0,0 +1,12 @@
+#!/bin/sh
+# Use rustup to locally run the same suite of tests as .travis.yml.
+# (You should first install/update all versions listed below.)
+
+set -ex
+
+export TRAVIS_RUST_VERSION
+for TRAVIS_RUST_VERSION in 1.15.0 1.22.0 1.26.0 stable beta nightly; do
+ run="rustup run $TRAVIS_RUST_VERSION"
+ $run cargo build --verbose
+ $run $PWD/ci/test_full.sh
+done
diff --git a/rust/vendor/num-bigint-0.2.6/ci/test_full.sh b/rust/vendor/num-bigint-0.2.6/ci/test_full.sh
new file mode 100755
index 0000000..4e1b60e
--- /dev/null
+++ b/rust/vendor/num-bigint-0.2.6/ci/test_full.sh
@@ -0,0 +1,39 @@
+#!/bin/bash
+
+set -ex
+
+echo Testing num-bigint on rustc ${TRAVIS_RUST_VERSION}
+
+FEATURES="serde"
+if [[ "$TRAVIS_RUST_VERSION" =~ ^(nightly|beta|stable|1.31.0|1.26.0|1.22.0)$ ]]; then
+ FEATURES="$FEATURES rand"
+fi
+if [[ "$TRAVIS_RUST_VERSION" =~ ^(nightly|beta|stable|1.31.0|1.26.0)$ ]]; then
+ FEATURES="$FEATURES i128"
+fi
+if [[ "$TRAVIS_RUST_VERSION" =~ ^(nightly|beta|stable|1.31.0)$ ]]; then
+ FEATURES="$FEATURES quickcheck quickcheck_macros"
+fi
+
+# num-bigint should build and test everywhere.
+cargo build --verbose
+cargo test --verbose
+
+# It should build with minimal features too.
+cargo build --no-default-features --features="std"
+cargo test --no-default-features --features="std"
+
+# Each isolated feature should also work everywhere.
+for feature in $FEATURES; do
+ cargo build --verbose --no-default-features --features="std $feature"
+ cargo test --verbose --no-default-features --features="std $feature"
+done
+
+# test all supported features together
+cargo build --features="std $FEATURES"
+cargo test --features="std $FEATURES"
+
+# make sure benchmarks can be built
+if [[ "$TRAVIS_RUST_VERSION" == "nightly" ]]; then
+ cargo bench --all-features --no-run
+fi
diff --git a/rust/vendor/num-bigint-0.2.6/src/algorithms.rs b/rust/vendor/num-bigint-0.2.6/src/algorithms.rs
new file mode 100644
index 0000000..223f051
--- /dev/null
+++ b/rust/vendor/num-bigint-0.2.6/src/algorithms.rs
@@ -0,0 +1,789 @@
+use std::borrow::Cow;
+use std::cmp;
+use std::cmp::Ordering::{self, Equal, Greater, Less};
+use std::iter::repeat;
+use std::mem;
+use traits;
+use traits::{One, Zero};
+
+use biguint::BigUint;
+
+use bigint::BigInt;
+use bigint::Sign;
+use bigint::Sign::{Minus, NoSign, Plus};
+
+use big_digit::{self, BigDigit, DoubleBigDigit, SignedDoubleBigDigit};
+
+// Generic functions for add/subtract/multiply with carry/borrow:
+
+// Add with carry:
+#[inline]
+fn adc(a: BigDigit, b: BigDigit, acc: &mut DoubleBigDigit) -> BigDigit {
+ *acc += DoubleBigDigit::from(a);
+ *acc += DoubleBigDigit::from(b);
+ let lo = *acc as BigDigit;
+ *acc >>= big_digit::BITS;
+ lo
+}
+
+// Subtract with borrow:
+#[inline]
+fn sbb(a: BigDigit, b: BigDigit, acc: &mut SignedDoubleBigDigit) -> BigDigit {
+ *acc += SignedDoubleBigDigit::from(a);
+ *acc -= SignedDoubleBigDigit::from(b);
+ let lo = *acc as BigDigit;
+ *acc >>= big_digit::BITS;
+ lo
+}
+
+#[inline]
+pub fn mac_with_carry(a: BigDigit, b: BigDigit, c: BigDigit, acc: &mut DoubleBigDigit) -> BigDigit {
+ *acc += DoubleBigDigit::from(a);
+ *acc += DoubleBigDigit::from(b) * DoubleBigDigit::from(c);
+ let lo = *acc as BigDigit;
+ *acc >>= big_digit::BITS;
+ lo
+}
+
+#[inline]
+pub fn mul_with_carry(a: BigDigit, b: BigDigit, acc: &mut DoubleBigDigit) -> BigDigit {
+ *acc += DoubleBigDigit::from(a) * DoubleBigDigit::from(b);
+ let lo = *acc as BigDigit;
+ *acc >>= big_digit::BITS;
+ lo
+}
+
+/// Divide a two digit numerator by a one digit divisor, returns quotient and remainder:
+///
+/// Note: the caller must ensure that both the quotient and remainder will fit into a single digit.
+/// This is _not_ true for an arbitrary numerator/denominator.
+///
+/// (This function also matches what the x86 divide instruction does).
+#[inline]
+fn div_wide(hi: BigDigit, lo: BigDigit, divisor: BigDigit) -> (BigDigit, BigDigit) {
+ debug_assert!(hi < divisor);
+
+ let lhs = big_digit::to_doublebigdigit(hi, lo);
+ let rhs = DoubleBigDigit::from(divisor);
+ ((lhs / rhs) as BigDigit, (lhs % rhs) as BigDigit)
+}
+
+pub fn div_rem_digit(mut a: BigUint, b: BigDigit) -> (BigUint, BigDigit) {
+ let mut rem = 0;
+
+ for d in a.data.iter_mut().rev() {
+ let (q, r) = div_wide(rem, *d, b);
+ *d = q;
+ rem = r;
+ }
+
+ (a.normalized(), rem)
+}
+
+pub fn rem_digit(a: &BigUint, b: BigDigit) -> BigDigit {
+ let mut rem: DoubleBigDigit = 0;
+ for &digit in a.data.iter().rev() {
+ rem = (rem << big_digit::BITS) + DoubleBigDigit::from(digit);
+ rem %= DoubleBigDigit::from(b);
+ }
+
+ rem as BigDigit
+}
+
+// Only for the Add impl:
+#[inline]
+pub fn __add2(a: &mut [BigDigit], b: &[BigDigit]) -> BigDigit {
+ debug_assert!(a.len() >= b.len());
+
+ let mut carry = 0;
+ let (a_lo, a_hi) = a.split_at_mut(b.len());
+
+ for (a, b) in a_lo.iter_mut().zip(b) {
+ *a = adc(*a, *b, &mut carry);
+ }
+
+ if carry != 0 {
+ for a in a_hi {
+ *a = adc(*a, 0, &mut carry);
+ if carry == 0 {
+ break;
+ }
+ }
+ }
+
+ carry as BigDigit
+}
+
+/// Two argument addition of raw slices:
+/// a += b
+///
+/// The caller _must_ ensure that a is big enough to store the result - typically this means
+/// resizing a to max(a.len(), b.len()) + 1, to fit a possible carry.
+pub fn add2(a: &mut [BigDigit], b: &[BigDigit]) {
+ let carry = __add2(a, b);
+
+ debug_assert!(carry == 0);
+}
+
+pub fn sub2(a: &mut [BigDigit], b: &[BigDigit]) {
+ let mut borrow = 0;
+
+ let len = cmp::min(a.len(), b.len());
+ let (a_lo, a_hi) = a.split_at_mut(len);
+ let (b_lo, b_hi) = b.split_at(len);
+
+ for (a, b) in a_lo.iter_mut().zip(b_lo) {
+ *a = sbb(*a, *b, &mut borrow);
+ }
+
+ if borrow != 0 {
+ for a in a_hi {
+ *a = sbb(*a, 0, &mut borrow);
+ if borrow == 0 {
+ break;
+ }
+ }
+ }
+
+ // note: we're _required_ to fail on underflow
+ assert!(
+ borrow == 0 && b_hi.iter().all(|x| *x == 0),
+ "Cannot subtract b from a because b is larger than a."
+ );
+}
+
+// Only for the Sub impl. `a` and `b` must have same length.
+#[inline]
+pub fn __sub2rev(a: &[BigDigit], b: &mut [BigDigit]) -> BigDigit {
+ debug_assert!(b.len() == a.len());
+
+ let mut borrow = 0;
+
+ for (ai, bi) in a.iter().zip(b) {
+ *bi = sbb(*ai, *bi, &mut borrow);
+ }
+
+ borrow as BigDigit
+}
+
+pub fn sub2rev(a: &[BigDigit], b: &mut [BigDigit]) {
+ debug_assert!(b.len() >= a.len());
+
+ let len = cmp::min(a.len(), b.len());
+ let (a_lo, a_hi) = a.split_at(len);
+ let (b_lo, b_hi) = b.split_at_mut(len);
+
+ let borrow = __sub2rev(a_lo, b_lo);
+
+ assert!(a_hi.is_empty());
+
+ // note: we're _required_ to fail on underflow
+ assert!(
+ borrow == 0 && b_hi.iter().all(|x| *x == 0),
+ "Cannot subtract b from a because b is larger than a."
+ );
+}
+
+pub fn sub_sign(a: &[BigDigit], b: &[BigDigit]) -> (Sign, BigUint) {
+ // Normalize:
+ let a = &a[..a.iter().rposition(|&x| x != 0).map_or(0, |i| i + 1)];
+ let b = &b[..b.iter().rposition(|&x| x != 0).map_or(0, |i| i + 1)];
+
+ match cmp_slice(a, b) {
+ Greater => {
+ let mut a = a.to_vec();
+ sub2(&mut a, b);
+ (Plus, BigUint::new(a))
+ }
+ Less => {
+ let mut b = b.to_vec();
+ sub2(&mut b, a);
+ (Minus, BigUint::new(b))
+ }
+ _ => (NoSign, Zero::zero()),
+ }
+}
+
+/// Three argument multiply accumulate:
+/// acc += b * c
+pub fn mac_digit(acc: &mut [BigDigit], b: &[BigDigit], c: BigDigit) {
+ if c == 0 {
+ return;
+ }
+
+ let mut carry = 0;
+ let (a_lo, a_hi) = acc.split_at_mut(b.len());
+
+ for (a, &b) in a_lo.iter_mut().zip(b) {
+ *a = mac_with_carry(*a, b, c, &mut carry);
+ }
+
+ let mut a = a_hi.iter_mut();
+ while carry != 0 {
+ let a = a.next().expect("carry overflow during multiplication!");
+ *a = adc(*a, 0, &mut carry);
+ }
+}
+
+/// Three argument multiply accumulate:
+/// acc += b * c
+fn mac3(acc: &mut [BigDigit], b: &[BigDigit], c: &[BigDigit]) {
+ let (x, y) = if b.len() < c.len() { (b, c) } else { (c, b) };
+
+ // We use three algorithms for different input sizes.
+ //
+ // - For small inputs, long multiplication is fastest.
+ // - Next we use Karatsuba multiplication (Toom-2), which we have optimized
+ // to avoid unnecessary allocations for intermediate values.
+ // - For the largest inputs we use Toom-3, which better optimizes the
+ // number of operations, but uses more temporary allocations.
+ //
+ // The thresholds are somewhat arbitrary, chosen by evaluating the results
+ // of `cargo bench --bench bigint multiply`.
+
+ if x.len() <= 32 {
+ // Long multiplication:
+ for (i, xi) in x.iter().enumerate() {
+ mac_digit(&mut acc[i..], y, *xi);
+ }
+ } else if x.len() <= 256 {
+ /*
+ * Karatsuba multiplication:
+ *
+ * The idea is that we break x and y up into two smaller numbers that each have about half
+ * as many digits, like so (note that multiplying by b is just a shift):
+ *
+ * x = x0 + x1 * b
+ * y = y0 + y1 * b
+ *
+ * With some algebra, we can compute x * y with three smaller products, where the inputs to
+ * each of the smaller products have only about half as many digits as x and y:
+ *
+ * x * y = (x0 + x1 * b) * (y0 + y1 * b)
+ *
+ * x * y = x0 * y0
+ * + x0 * y1 * b
+ * + x1 * y0 * b
+ * + x1 * y1 * b^2
+ *
+ * Let p0 = x0 * y0 and p2 = x1 * y1:
+ *
+ * x * y = p0
+ * + (x0 * y1 + x1 * y0) * b
+ * + p2 * b^2
+ *
+ * The real trick is that middle term:
+ *
+ * x0 * y1 + x1 * y0
+ *
+ * = x0 * y1 + x1 * y0 - p0 + p0 - p2 + p2
+ *
+ * = x0 * y1 + x1 * y0 - x0 * y0 - x1 * y1 + p0 + p2
+ *
+ * Now we complete the square:
+ *
+ * = -(x0 * y0 - x0 * y1 - x1 * y0 + x1 * y1) + p0 + p2
+ *
+ * = -((x1 - x0) * (y1 - y0)) + p0 + p2
+ *
+ * Let p1 = (x1 - x0) * (y1 - y0), and substitute back into our original formula:
+ *
+ * x * y = p0
+ * + (p0 + p2 - p1) * b
+ * + p2 * b^2
+ *
+ * Where the three intermediate products are:
+ *
+ * p0 = x0 * y0
+ * p1 = (x1 - x0) * (y1 - y0)
+ * p2 = x1 * y1
+ *
+ * In doing the computation, we take great care to avoid unnecessary temporary variables
+ * (since creating a BigUint requires a heap allocation): thus, we rearrange the formula a
+ * bit so we can use the same temporary variable for all the intermediate products:
+ *
+ * x * y = p2 * b^2 + p2 * b
+ * + p0 * b + p0
+ * - p1 * b
+ *
+ * The other trick we use is instead of doing explicit shifts, we slice acc at the
+ * appropriate offset when doing the add.
+ */
+
+ /*
+ * When x is smaller than y, it's significantly faster to pick b such that x is split in
+ * half, not y:
+ */
+ let b = x.len() / 2;
+ let (x0, x1) = x.split_at(b);
+ let (y0, y1) = y.split_at(b);
+
+ /*
+ * We reuse the same BigUint for all the intermediate multiplies and have to size p
+ * appropriately here: x1.len() >= x0.len and y1.len() >= y0.len():
+ */
+ let len = x1.len() + y1.len() + 1;
+ let mut p = BigUint { data: vec![0; len] };
+
+ // p2 = x1 * y1
+ mac3(&mut p.data[..], x1, y1);
+
+ // Not required, but the adds go faster if we drop any unneeded 0s from the end:
+ p.normalize();
+
+ add2(&mut acc[b..], &p.data[..]);
+ add2(&mut acc[b * 2..], &p.data[..]);
+
+ // Zero out p before the next multiply:
+ p.data.truncate(0);
+ p.data.extend(repeat(0).take(len));
+
+ // p0 = x0 * y0
+ mac3(&mut p.data[..], x0, y0);
+ p.normalize();
+
+ add2(&mut acc[..], &p.data[..]);
+ add2(&mut acc[b..], &p.data[..]);
+
+ // p1 = (x1 - x0) * (y1 - y0)
+ // We do this one last, since it may be negative and acc can't ever be negative:
+ let (j0_sign, j0) = sub_sign(x1, x0);
+ let (j1_sign, j1) = sub_sign(y1, y0);
+
+ match j0_sign * j1_sign {
+ Plus => {
+ p.data.truncate(0);
+ p.data.extend(repeat(0).take(len));
+
+ mac3(&mut p.data[..], &j0.data[..], &j1.data[..]);
+ p.normalize();
+
+ sub2(&mut acc[b..], &p.data[..]);
+ }
+ Minus => {
+ mac3(&mut acc[b..], &j0.data[..], &j1.data[..]);
+ }
+ NoSign => (),
+ }
+ } else {
+ // Toom-3 multiplication:
+ //
+ // Toom-3 is like Karatsuba above, but dividing the inputs into three parts.
+ // Both are instances of Toom-Cook, using `k=3` and `k=2` respectively.
+ //
+ // The general idea is to treat the large integers digits as
+ // polynomials of a certain degree and determine the coefficients/digits
+ // of the product of the two via interpolation of the polynomial product.
+ let i = y.len() / 3 + 1;
+
+ let x0_len = cmp::min(x.len(), i);
+ let x1_len = cmp::min(x.len() - x0_len, i);
+
+ let y0_len = i;
+ let y1_len = cmp::min(y.len() - y0_len, i);
+
+ // Break x and y into three parts, representating an order two polynomial.
+ // t is chosen to be the size of a digit so we can use faster shifts
+ // in place of multiplications.
+ //
+ // x(t) = x2*t^2 + x1*t + x0
+ let x0 = BigInt::from_slice(Plus, &x[..x0_len]);
+ let x1 = BigInt::from_slice(Plus, &x[x0_len..x0_len + x1_len]);
+ let x2 = BigInt::from_slice(Plus, &x[x0_len + x1_len..]);
+
+ // y(t) = y2*t^2 + y1*t + y0
+ let y0 = BigInt::from_slice(Plus, &y[..y0_len]);
+ let y1 = BigInt::from_slice(Plus, &y[y0_len..y0_len + y1_len]);
+ let y2 = BigInt::from_slice(Plus, &y[y0_len + y1_len..]);
+
+ // Let w(t) = x(t) * y(t)
+ //
+ // This gives us the following order-4 polynomial.
+ //
+ // w(t) = w4*t^4 + w3*t^3 + w2*t^2 + w1*t + w0
+ //
+ // We need to find the coefficients w4, w3, w2, w1 and w0. Instead
+ // of simply multiplying the x and y in total, we can evaluate w
+ // at 5 points. An n-degree polynomial is uniquely identified by (n + 1)
+ // points.
+ //
+ // It is arbitrary as to what points we evaluate w at but we use the
+ // following.
+ //
+ // w(t) at t = 0, 1, -1, -2 and inf
+ //
+ // The values for w(t) in terms of x(t)*y(t) at these points are:
+ //
+ // let a = w(0) = x0 * y0
+ // let b = w(1) = (x2 + x1 + x0) * (y2 + y1 + y0)
+ // let c = w(-1) = (x2 - x1 + x0) * (y2 - y1 + y0)
+ // let d = w(-2) = (4*x2 - 2*x1 + x0) * (4*y2 - 2*y1 + y0)
+ // let e = w(inf) = x2 * y2 as t -> inf
+
+ // x0 + x2, avoiding temporaries
+ let p = &x0 + &x2;
+
+ // y0 + y2, avoiding temporaries
+ let q = &y0 + &y2;
+
+ // x2 - x1 + x0, avoiding temporaries
+ let p2 = &p - &x1;
+
+ // y2 - y1 + y0, avoiding temporaries
+ let q2 = &q - &y1;
+
+ // w(0)
+ let r0 = &x0 * &y0;
+
+ // w(inf)
+ let r4 = &x2 * &y2;
+
+ // w(1)
+ let r1 = (p + x1) * (q + y1);
+
+ // w(-1)
+ let r2 = &p2 * &q2;
+
+ // w(-2)
+ let r3 = ((p2 + x2) * 2 - x0) * ((q2 + y2) * 2 - y0);
+
+ // Evaluating these points gives us the following system of linear equations.
+ //
+ // 0 0 0 0 1 | a
+ // 1 1 1 1 1 | b
+ // 1 -1 1 -1 1 | c
+ // 16 -8 4 -2 1 | d
+ // 1 0 0 0 0 | e
+ //
+ // The solved equation (after gaussian elimination or similar)
+ // in terms of its coefficients:
+ //
+ // w0 = w(0)
+ // w1 = w(0)/2 + w(1)/3 - w(-1) + w(2)/6 - 2*w(inf)
+ // w2 = -w(0) + w(1)/2 + w(-1)/2 - w(inf)
+ // w3 = -w(0)/2 + w(1)/6 + w(-1)/2 - w(1)/6
+ // w4 = w(inf)
+ //
+ // This particular sequence is given by Bodrato and is an interpolation
+ // of the above equations.
+ let mut comp3: BigInt = (r3 - &r1) / 3;
+ let mut comp1: BigInt = (r1 - &r2) / 2;
+ let mut comp2: BigInt = r2 - &r0;
+ comp3 = (&comp2 - comp3) / 2 + &r4 * 2;
+ comp2 += &comp1 - &r4;
+ comp1 -= &comp3;
+
+ // Recomposition. The coefficients of the polynomial are now known.
+ //
+ // Evaluate at w(t) where t is our given base to get the result.
+ let result = r0
+ + (comp1 << (32 * i))
+ + (comp2 << (2 * 32 * i))
+ + (comp3 << (3 * 32 * i))
+ + (r4 << (4 * 32 * i));
+ let result_pos = result.to_biguint().unwrap();
+ add2(&mut acc[..], &result_pos.data);
+ }
+}
+
+pub fn mul3(x: &[BigDigit], y: &[BigDigit]) -> BigUint {
+ let len = x.len() + y.len() + 1;
+ let mut prod = BigUint { data: vec![0; len] };
+
+ mac3(&mut prod.data[..], x, y);
+ prod.normalized()
+}
+
+pub fn scalar_mul(a: &mut [BigDigit], b: BigDigit) -> BigDigit {
+ let mut carry = 0;
+ for a in a.iter_mut() {
+ *a = mul_with_carry(*a, b, &mut carry);
+ }
+ carry as BigDigit
+}
+
+pub fn div_rem(mut u: BigUint, mut d: BigUint) -> (BigUint, BigUint) {
+ if d.is_zero() {
+ panic!()
+ }
+ if u.is_zero() {
+ return (Zero::zero(), Zero::zero());
+ }
+
+ if d.data.len() == 1 {
+ if d.data == [1] {
+ return (u, Zero::zero());
+ }
+ let (div, rem) = div_rem_digit(u, d.data[0]);
+ // reuse d
+ d.data.clear();
+ d += rem;
+ return (div, d);
+ }
+
+ // Required or the q_len calculation below can underflow:
+ match u.cmp(&d) {
+ Less => return (Zero::zero(), u),
+ Equal => {
+ u.set_one();
+ return (u, Zero::zero());
+ }
+ Greater => {} // Do nothing
+ }
+
+ // This algorithm is from Knuth, TAOCP vol 2 section 4.3, algorithm D:
+ //
+ // First, normalize the arguments so the highest bit in the highest digit of the divisor is
+ // set: the main loop uses the highest digit of the divisor for generating guesses, so we
+ // want it to be the largest number we can efficiently divide by.
+ //
+ let shift = d.data.last().unwrap().leading_zeros() as usize;
+ let (q, r) = if shift == 0 {
+ // no need to clone d
+ div_rem_core(u, &d)
+ } else {
+ div_rem_core(u << shift, &(d << shift))
+ };
+ // renormalize the remainder
+ (q, r >> shift)
+}
+
+pub fn div_rem_ref(u: &BigUint, d: &BigUint) -> (BigUint, BigUint) {
+ if d.is_zero() {
+ panic!()
+ }
+ if u.is_zero() {
+ return (Zero::zero(), Zero::zero());
+ }
+
+ if d.data.len() == 1 {
+ if d.data == [1] {
+ return (u.clone(), Zero::zero());
+ }
+
+ let (div, rem) = div_rem_digit(u.clone(), d.data[0]);
+ return (div, rem.into());
+ }
+
+ // Required or the q_len calculation below can underflow:
+ match u.cmp(d) {
+ Less => return (Zero::zero(), u.clone()),
+ Equal => return (One::one(), Zero::zero()),
+ Greater => {} // Do nothing
+ }
+
+ // This algorithm is from Knuth, TAOCP vol 2 section 4.3, algorithm D:
+ //
+ // First, normalize the arguments so the highest bit in the highest digit of the divisor is
+ // set: the main loop uses the highest digit of the divisor for generating guesses, so we
+ // want it to be the largest number we can efficiently divide by.
+ //
+ let shift = d.data.last().unwrap().leading_zeros() as usize;
+
+ let (q, r) = if shift == 0 {
+ // no need to clone d
+ div_rem_core(u.clone(), d)
+ } else {
+ div_rem_core(u << shift, &(d << shift))
+ };
+ // renormalize the remainder
+ (q, r >> shift)
+}
+
+/// an implementation of Knuth, TAOCP vol 2 section 4.3, algorithm D
+///
+/// # Correctness
+///
+/// This function requires the following conditions to run correctly and/or effectively
+///
+/// - `a > b`
+/// - `d.data.len() > 1`
+/// - `d.data.last().unwrap().leading_zeros() == 0`
+fn div_rem_core(mut a: BigUint, b: &BigUint) -> (BigUint, BigUint) {
+ // The algorithm works by incrementally calculating "guesses", q0, for part of the
+ // remainder. Once we have any number q0 such that q0 * b <= a, we can set
+ //
+ // q += q0
+ // a -= q0 * b
+ //
+ // and then iterate until a < b. Then, (q, a) will be our desired quotient and remainder.
+ //
+ // q0, our guess, is calculated by dividing the last few digits of a by the last digit of b
+ // - this should give us a guess that is "close" to the actual quotient, but is possibly
+ // greater than the actual quotient. If q0 * b > a, we simply use iterated subtraction
+ // until we have a guess such that q0 * b <= a.
+ //
+
+ let bn = *b.data.last().unwrap();
+ let q_len = a.data.len() - b.data.len() + 1;
+ let mut q = BigUint {
+ data: vec![0; q_len],
+ };
+
+ // We reuse the same temporary to avoid hitting the allocator in our inner loop - this is
+ // sized to hold a0 (in the common case; if a particular digit of the quotient is zero a0
+ // can be bigger).
+ //
+ let mut tmp = BigUint {
+ data: Vec::with_capacity(2),
+ };
+
+ for j in (0..q_len).rev() {
+ /*
+ * When calculating our next guess q0, we don't need to consider the digits below j
+ * + b.data.len() - 1: we're guessing digit j of the quotient (i.e. q0 << j) from
+ * digit bn of the divisor (i.e. bn << (b.data.len() - 1) - so the product of those
+ * two numbers will be zero in all digits up to (j + b.data.len() - 1).
+ */
+ let offset = j + b.data.len() - 1;
+ if offset >= a.data.len() {
+ continue;
+ }
+
+ /* just avoiding a heap allocation: */
+ let mut a0 = tmp;
+ a0.data.truncate(0);
+ a0.data.extend(a.data[offset..].iter().cloned());
+
+ /*
+ * q0 << j * big_digit::BITS is our actual quotient estimate - we do the shifts
+ * implicitly at the end, when adding and subtracting to a and q. Not only do we
+ * save the cost of the shifts, the rest of the arithmetic gets to work with
+ * smaller numbers.
+ */
+ let (mut q0, _) = div_rem_digit(a0, bn);
+ let mut prod = b * &q0;
+
+ while cmp_slice(&prod.data[..], &a.data[j..]) == Greater {
+ let one: BigUint = One::one();
+ q0 -= one;
+ prod -= b;
+ }
+
+ add2(&mut q.data[j..], &q0.data[..]);
+ sub2(&mut a.data[j..], &prod.data[..]);
+ a.normalize();
+
+ tmp = q0;
+ }
+
+ debug_assert!(a < *b);
+
+ (q.normalized(), a)
+}
+
+/// Find last set bit
+/// fls(0) == 0, fls(u32::MAX) == 32
+pub fn fls<T: traits::PrimInt>(v: T) -> usize {
+ mem::size_of::<T>() * 8 - v.leading_zeros() as usize
+}
+
+pub fn ilog2<T: traits::PrimInt>(v: T) -> usize {
+ fls(v) - 1
+}
+
+#[inline]
+pub fn biguint_shl(n: Cow<BigUint>, bits: usize) -> BigUint {
+ let n_unit = bits / big_digit::BITS;
+ let mut data = match n_unit {
+ 0 => n.into_owned().data,
+ _ => {
+ let len = n_unit + n.data.len() + 1;
+ let mut data = Vec::with_capacity(len);
+ data.extend(repeat(0).take(n_unit));
+ data.extend(n.data.iter().cloned());
+ data
+ }
+ };
+
+ let n_bits = bits % big_digit::BITS;
+ if n_bits > 0 {
+ let mut carry = 0;
+ for elem in data[n_unit..].iter_mut() {
+ let new_carry = *elem >> (big_digit::BITS - n_bits);
+ *elem = (*elem << n_bits) | carry;
+ carry = new_carry;
+ }
+ if carry != 0 {
+ data.push(carry);
+ }
+ }
+
+ BigUint::new(data)
+}
+
+#[inline]
+pub fn biguint_shr(n: Cow<BigUint>, bits: usize) -> BigUint {
+ let n_unit = bits / big_digit::BITS;
+ if n_unit >= n.data.len() {
+ return Zero::zero();
+ }
+ let mut data = match n {
+ Cow::Borrowed(n) => n.data[n_unit..].to_vec(),
+ Cow::Owned(mut n) => {
+ n.data.drain(..n_unit);
+ n.data
+ }
+ };
+
+ let n_bits = bits % big_digit::BITS;
+ if n_bits > 0 {
+ let mut borrow = 0;
+ for elem in data.iter_mut().rev() {
+ let new_borrow = *elem << (big_digit::BITS - n_bits);
+ *elem = (*elem >> n_bits) | borrow;
+ borrow = new_borrow;
+ }
+ }
+
+ BigUint::new(data)
+}
+
+pub fn cmp_slice(a: &[BigDigit], b: &[BigDigit]) -> Ordering {
+ debug_assert!(a.last() != Some(&0));
+ debug_assert!(b.last() != Some(&0));
+
+ let (a_len, b_len) = (a.len(), b.len());
+ if a_len < b_len {
+ return Less;
+ }
+ if a_len > b_len {
+ return Greater;
+ }
+
+ for (&ai, &bi) in a.iter().rev().zip(b.iter().rev()) {
+ if ai < bi {
+ return Less;
+ }
+ if ai > bi {
+ return Greater;
+ }
+ }
+ Equal
+}
+
+#[cfg(test)]
+mod algorithm_tests {
+ use big_digit::BigDigit;
+ use traits::Num;
+ use Sign::Plus;
+ use {BigInt, BigUint};
+
+ #[test]
+ fn test_sub_sign() {
+ use super::sub_sign;
+
+ fn sub_sign_i(a: &[BigDigit], b: &[BigDigit]) -> BigInt {
+ let (sign, val) = sub_sign(a, b);
+ BigInt::from_biguint(sign, val)
+ }
+
+ let a = BigUint::from_str_radix("265252859812191058636308480000000", 10).unwrap();
+ let b = BigUint::from_str_radix("26525285981219105863630848000000", 10).unwrap();
+ let a_i = BigInt::from_biguint(Plus, a.clone());
+ let b_i = BigInt::from_biguint(Plus, b.clone());
+
+ assert_eq!(sub_sign_i(&a.data[..], &b.data[..]), &a_i - &b_i);
+ assert_eq!(sub_sign_i(&b.data[..], &a.data[..]), &b_i - &a_i);
+ }
+}
diff --git a/rust/vendor/num-bigint-0.2.6/src/bigint.rs b/rust/vendor/num-bigint-0.2.6/src/bigint.rs
new file mode 100644
index 0000000..bd74e7d
--- /dev/null
+++ b/rust/vendor/num-bigint-0.2.6/src/bigint.rs
@@ -0,0 +1,3110 @@
+#[allow(deprecated, unused_imports)]
+use std::ascii::AsciiExt;
+use std::cmp::Ordering::{self, Equal, Greater, Less};
+use std::default::Default;
+use std::fmt;
+use std::iter::{Product, Sum};
+use std::mem;
+use std::ops::{
+ Add, AddAssign, BitAnd, BitAndAssign, BitOr, BitOrAssign, BitXor, BitXorAssign, Div, DivAssign,
+ Mul, MulAssign, Neg, Not, Rem, RemAssign, Shl, ShlAssign, Shr, ShrAssign, Sub, SubAssign,
+};
+use std::str::{self, FromStr};
+#[cfg(has_i128)]
+use std::{i128, u128};
+use std::{i64, u64};
+
+#[cfg(feature = "serde")]
+use serde;
+
+use integer::{Integer, Roots};
+use traits::{
+ CheckedAdd, CheckedDiv, CheckedMul, CheckedSub, FromPrimitive, Num, One, Pow, Signed,
+ ToPrimitive, Zero,
+};
+
+use self::Sign::{Minus, NoSign, Plus};
+
+use super::ParseBigIntError;
+use big_digit::{self, BigDigit, DoubleBigDigit};
+use biguint;
+use biguint::to_str_radix_reversed;
+use biguint::{BigUint, IntDigits};
+
+use IsizePromotion;
+use UsizePromotion;
+
+#[cfg(feature = "quickcheck")]
+use quickcheck::{Arbitrary, Gen};
+
+/// A Sign is a `BigInt`'s composing element.
+#[derive(PartialEq, PartialOrd, Eq, Ord, Copy, Clone, Debug, Hash)]
+pub enum Sign {
+ Minus,
+ NoSign,
+ Plus,
+}
+
+impl Neg for Sign {
+ type Output = Sign;
+
+ /// Negate Sign value.
+ #[inline]
+ fn neg(self) -> Sign {
+ match self {
+ Minus => Plus,
+ NoSign => NoSign,
+ Plus => Minus,
+ }
+ }
+}
+
+impl Mul<Sign> for Sign {
+ type Output = Sign;
+
+ #[inline]
+ fn mul(self, other: Sign) -> Sign {
+ match (self, other) {
+ (NoSign, _) | (_, NoSign) => NoSign,
+ (Plus, Plus) | (Minus, Minus) => Plus,
+ (Plus, Minus) | (Minus, Plus) => Minus,
+ }
+ }
+}
+
+#[cfg(feature = "serde")]
+impl serde::Serialize for Sign {
+ fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
+ where
+ S: serde::Serializer,
+ {
+ // Note: do not change the serialization format, or it may break
+ // forward and backward compatibility of serialized data!
+ match *self {
+ Sign::Minus => (-1i8).serialize(serializer),
+ Sign::NoSign => 0i8.serialize(serializer),
+ Sign::Plus => 1i8.serialize(serializer),
+ }
+ }
+}
+
+#[cfg(feature = "serde")]
+impl<'de> serde::Deserialize<'de> for Sign {
+ fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
+ where
+ D: serde::Deserializer<'de>,
+ {
+ use serde::de::Error;
+ use serde::de::Unexpected;
+
+ let sign: i8 = serde::Deserialize::deserialize(deserializer)?;
+ match sign {
+ -1 => Ok(Sign::Minus),
+ 0 => Ok(Sign::NoSign),
+ 1 => Ok(Sign::Plus),
+ _ => Err(D::Error::invalid_value(
+ Unexpected::Signed(sign.into()),
+ &"a sign of -1, 0, or 1",
+ )),
+ }
+ }
+}
+
+/// A big signed integer type.
+#[derive(Clone, Debug, Hash)]
+pub struct BigInt {
+ sign: Sign,
+ data: BigUint,
+}
+
+#[cfg(feature = "quickcheck")]
+impl Arbitrary for BigInt {
+ fn arbitrary<G: Gen>(g: &mut G) -> Self {
+ let positive = bool::arbitrary(g);
+ let sign = if positive { Sign::Plus } else { Sign::Minus };
+ Self::from_biguint(sign, BigUint::arbitrary(g))
+ }
+
+ #[allow(bare_trait_objects)] // `dyn` needs Rust 1.27 to parse, even when cfg-disabled
+ fn shrink(&self) -> Box<Iterator<Item = Self>> {
+ let sign = self.sign();
+ let unsigned_shrink = self.data.shrink();
+ Box::new(unsigned_shrink.map(move |x| BigInt::from_biguint(sign, x)))
+ }
+}
+
+/// Return the magnitude of a `BigInt`.
+///
+/// This is in a private module, pseudo pub(crate)
+#[cfg(feature = "rand")]
+pub fn magnitude(i: &BigInt) -> &BigUint {
+ &i.data
+}
+
+/// Return the owned magnitude of a `BigInt`.
+///
+/// This is in a private module, pseudo pub(crate)
+#[cfg(feature = "rand")]
+pub fn into_magnitude(i: BigInt) -> BigUint {
+ i.data
+}
+
+impl PartialEq for BigInt {
+ #[inline]
+ fn eq(&self, other: &BigInt) -> bool {
+ self.cmp(other) == Equal
+ }
+}
+
+impl Eq for BigInt {}
+
+impl PartialOrd for BigInt {
+ #[inline]
+ fn partial_cmp(&self, other: &BigInt) -> Option<Ordering> {
+ Some(self.cmp(other))
+ }
+}
+
+impl Ord for BigInt {
+ #[inline]
+ fn cmp(&self, other: &BigInt) -> Ordering {
+ let scmp = self.sign.cmp(&other.sign);
+ if scmp != Equal {
+ return scmp;
+ }
+
+ match self.sign {
+ NoSign => Equal,
+ Plus => self.data.cmp(&other.data),
+ Minus => other.data.cmp(&self.data),
+ }
+ }
+}
+
+impl Default for BigInt {
+ #[inline]
+ fn default() -> BigInt {
+ Zero::zero()
+ }
+}
+
+impl fmt::Display for BigInt {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ f.pad_integral(!self.is_negative(), "", &self.data.to_str_radix(10))
+ }
+}
+
+impl fmt::Binary for BigInt {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ f.pad_integral(!self.is_negative(), "0b", &self.data.to_str_radix(2))
+ }
+}
+
+impl fmt::Octal for BigInt {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ f.pad_integral(!self.is_negative(), "0o", &self.data.to_str_radix(8))
+ }
+}
+
+impl fmt::LowerHex for BigInt {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ f.pad_integral(!self.is_negative(), "0x", &self.data.to_str_radix(16))
+ }
+}
+
+impl fmt::UpperHex for BigInt {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ let mut s = self.data.to_str_radix(16);
+ s.make_ascii_uppercase();
+ f.pad_integral(!self.is_negative(), "0x", &s)
+ }
+}
+
+// Negation in two's complement.
+// acc must be initialized as 1 for least-significant digit.
+//
+// When negating, a carry (acc == 1) means that all the digits
+// considered to this point were zero. This means that if all the
+// digits of a negative BigInt have been considered, carry must be
+// zero as we cannot have negative zero.
+//
+// 01 -> ...f ff
+// ff -> ...f 01
+// 01 00 -> ...f ff 00
+// 01 01 -> ...f fe ff
+// 01 ff -> ...f fe 01
+// ff 00 -> ...f 01 00
+// ff 01 -> ...f 00 ff
+// ff ff -> ...f 00 01
+#[inline]
+fn negate_carry(a: BigDigit, acc: &mut DoubleBigDigit) -> BigDigit {
+ *acc += DoubleBigDigit::from(!a);
+ let lo = *acc as BigDigit;
+ *acc >>= big_digit::BITS;
+ lo
+}
+
+// !-2 = !...f fe = ...0 01 = +1
+// !-1 = !...f ff = ...0 00 = 0
+// ! 0 = !...0 00 = ...f ff = -1
+// !+1 = !...0 01 = ...f fe = -2
+impl Not for BigInt {
+ type Output = BigInt;
+
+ fn not(mut self) -> BigInt {
+ match self.sign {
+ NoSign | Plus => {
+ self.data += 1u32;
+ self.sign = Minus;
+ }
+ Minus => {
+ self.data -= 1u32;
+ self.sign = if self.data.is_zero() { NoSign } else { Plus };
+ }
+ }
+ self
+ }
+}
+
+impl<'a> Not for &'a BigInt {
+ type Output = BigInt;
+
+ fn not(self) -> BigInt {
+ match self.sign {
+ NoSign | Plus => BigInt::from_biguint(Minus, &self.data + 1u32),
+ Minus => BigInt::from_biguint(Plus, &self.data - 1u32),
+ }
+ }
+}
+
+// + 1 & -ff = ...0 01 & ...f 01 = ...0 01 = + 1
+// +ff & - 1 = ...0 ff & ...f ff = ...0 ff = +ff
+// answer is pos, has length of a
+fn bitand_pos_neg(a: &mut Vec<BigDigit>, b: &[BigDigit]) {
+ let mut carry_b = 1;
+ for (ai, &bi) in a.iter_mut().zip(b.iter()) {
+ let twos_b = negate_carry(bi, &mut carry_b);
+ *ai &= twos_b;
+ }
+ debug_assert!(b.len() > a.len() || carry_b == 0);
+}
+
+// - 1 & +ff = ...f ff & ...0 ff = ...0 ff = +ff
+// -ff & + 1 = ...f 01 & ...0 01 = ...0 01 = + 1
+// answer is pos, has length of b
+fn bitand_neg_pos(a: &mut Vec<BigDigit>, b: &[BigDigit]) {
+ let mut carry_a = 1;
+ for (ai, &bi) in a.iter_mut().zip(b.iter()) {
+ let twos_a = negate_carry(*ai, &mut carry_a);
+ *ai = twos_a & bi;
+ }
+ debug_assert!(a.len() > b.len() || carry_a == 0);
+ if a.len() > b.len() {
+ a.truncate(b.len());
+ } else if b.len() > a.len() {
+ let extra = &b[a.len()..];
+ a.extend(extra.iter().cloned());
+ }
+}
+
+// - 1 & -ff = ...f ff & ...f 01 = ...f 01 = - ff
+// -ff & - 1 = ...f 01 & ...f ff = ...f 01 = - ff
+// -ff & -fe = ...f 01 & ...f 02 = ...f 00 = -100
+// answer is neg, has length of longest with a possible carry
+fn bitand_neg_neg(a: &mut Vec<BigDigit>, b: &[BigDigit]) {
+ let mut carry_a = 1;
+ let mut carry_b = 1;
+ let mut carry_and = 1;
+ for (ai, &bi) in a.iter_mut().zip(b.iter()) {
+ let twos_a = negate_carry(*ai, &mut carry_a);
+ let twos_b = negate_carry(bi, &mut carry_b);
+ *ai = negate_carry(twos_a & twos_b, &mut carry_and);
+ }
+ debug_assert!(a.len() > b.len() || carry_a == 0);
+ debug_assert!(b.len() > a.len() || carry_b == 0);
+ if a.len() > b.len() {
+ for ai in a[b.len()..].iter_mut() {
+ let twos_a = negate_carry(*ai, &mut carry_a);
+ *ai = negate_carry(twos_a, &mut carry_and);
+ }
+ debug_assert!(carry_a == 0);
+ } else if b.len() > a.len() {
+ let extra = &b[a.len()..];
+ a.extend(extra.iter().map(|&bi| {
+ let twos_b = negate_carry(bi, &mut carry_b);
+ negate_carry(twos_b, &mut carry_and)
+ }));
+ debug_assert!(carry_b == 0);
+ }
+ if carry_and != 0 {
+ a.push(1);
+ }
+}
+
+forward_val_val_binop!(impl BitAnd for BigInt, bitand);
+forward_ref_val_binop!(impl BitAnd for BigInt, bitand);
+
+// do not use forward_ref_ref_binop_commutative! for bitand so that we can
+// clone as needed, avoiding over-allocation
+impl<'a, 'b> BitAnd<&'b BigInt> for &'a BigInt {
+ type Output = BigInt;
+
+ #[inline]
+ fn bitand(self, other: &BigInt) -> BigInt {
+ match (self.sign, other.sign) {
+ (NoSign, _) | (_, NoSign) => BigInt::from_slice(NoSign, &[]),
+ (Plus, Plus) => BigInt::from_biguint(Plus, &self.data & &other.data),
+ (Plus, Minus) => self.clone() & other,
+ (Minus, Plus) => other.clone() & self,
+ (Minus, Minus) => {
+ // forward to val-ref, choosing the larger to clone
+ if self.len() >= other.len() {
+ self.clone() & other
+ } else {
+ other.clone() & self
+ }
+ }
+ }
+ }
+}
+
+impl<'a> BitAnd<&'a BigInt> for BigInt {
+ type Output = BigInt;
+
+ #[inline]
+ fn bitand(mut self, other: &BigInt) -> BigInt {
+ self &= other;
+ self
+ }
+}
+
+forward_val_assign!(impl BitAndAssign for BigInt, bitand_assign);
+
+impl<'a> BitAndAssign<&'a BigInt> for BigInt {
+ fn bitand_assign(&mut self, other: &BigInt) {
+ match (self.sign, other.sign) {
+ (NoSign, _) => {}
+ (_, NoSign) => self.assign_from_slice(NoSign, &[]),
+ (Plus, Plus) => {
+ self.data &= &other.data;
+ if self.data.is_zero() {
+ self.sign = NoSign;
+ }
+ }
+ (Plus, Minus) => {
+ bitand_pos_neg(self.digits_mut(), other.digits());
+ self.normalize();
+ }
+ (Minus, Plus) => {
+ bitand_neg_pos(self.digits_mut(), other.digits());
+ self.sign = Plus;
+ self.normalize();
+ }
+ (Minus, Minus) => {
+ bitand_neg_neg(self.digits_mut(), other.digits());
+ self.normalize();
+ }
+ }
+ }
+}
+
+// + 1 | -ff = ...0 01 | ...f 01 = ...f 01 = -ff
+// +ff | - 1 = ...0 ff | ...f ff = ...f ff = - 1
+// answer is neg, has length of b
+fn bitor_pos_neg(a: &mut Vec<BigDigit>, b: &[BigDigit]) {
+ let mut carry_b = 1;
+ let mut carry_or = 1;
+ for (ai, &bi) in a.iter_mut().zip(b.iter()) {
+ let twos_b = negate_carry(bi, &mut carry_b);
+ *ai = negate_carry(*ai | twos_b, &mut carry_or);
+ }
+ debug_assert!(b.len() > a.len() || carry_b == 0);
+ if a.len() > b.len() {
+ a.truncate(b.len());
+ } else if b.len() > a.len() {
+ let extra = &b[a.len()..];
+ a.extend(extra.iter().map(|&bi| {
+ let twos_b = negate_carry(bi, &mut carry_b);
+ negate_carry(twos_b, &mut carry_or)
+ }));
+ debug_assert!(carry_b == 0);
+ }
+ // for carry_or to be non-zero, we would need twos_b == 0
+ debug_assert!(carry_or == 0);
+}
+
+// - 1 | +ff = ...f ff | ...0 ff = ...f ff = - 1
+// -ff | + 1 = ...f 01 | ...0 01 = ...f 01 = -ff
+// answer is neg, has length of a
+fn bitor_neg_pos(a: &mut Vec<BigDigit>, b: &[BigDigit]) {
+ let mut carry_a = 1;
+ let mut carry_or = 1;
+ for (ai, &bi) in a.iter_mut().zip(b.iter()) {
+ let twos_a = negate_carry(*ai, &mut carry_a);
+ *ai = negate_carry(twos_a | bi, &mut carry_or);
+ }
+ debug_assert!(a.len() > b.len() || carry_a == 0);
+ if a.len() > b.len() {
+ for ai in a[b.len()..].iter_mut() {
+ let twos_a = negate_carry(*ai, &mut carry_a);
+ *ai = negate_carry(twos_a, &mut carry_or);
+ }
+ debug_assert!(carry_a == 0);
+ }
+ // for carry_or to be non-zero, we would need twos_a == 0
+ debug_assert!(carry_or == 0);
+}
+
+// - 1 | -ff = ...f ff | ...f 01 = ...f ff = -1
+// -ff | - 1 = ...f 01 | ...f ff = ...f ff = -1
+// answer is neg, has length of shortest
+fn bitor_neg_neg(a: &mut Vec<BigDigit>, b: &[BigDigit]) {
+ let mut carry_a = 1;
+ let mut carry_b = 1;
+ let mut carry_or = 1;
+ for (ai, &bi) in a.iter_mut().zip(b.iter()) {
+ let twos_a = negate_carry(*ai, &mut carry_a);
+ let twos_b = negate_carry(bi, &mut carry_b);
+ *ai = negate_carry(twos_a | twos_b, &mut carry_or);
+ }
+ debug_assert!(a.len() > b.len() || carry_a == 0);
+ debug_assert!(b.len() > a.len() || carry_b == 0);
+ if a.len() > b.len() {
+ a.truncate(b.len());
+ }
+ // for carry_or to be non-zero, we would need twos_a == 0 or twos_b == 0
+ debug_assert!(carry_or == 0);
+}
+
+forward_val_val_binop!(impl BitOr for BigInt, bitor);
+forward_ref_val_binop!(impl BitOr for BigInt, bitor);
+
+// do not use forward_ref_ref_binop_commutative! for bitor so that we can
+// clone as needed, avoiding over-allocation
+impl<'a, 'b> BitOr<&'b BigInt> for &'a BigInt {
+ type Output = BigInt;
+
+ #[inline]
+ fn bitor(self, other: &BigInt) -> BigInt {
+ match (self.sign, other.sign) {
+ (NoSign, _) => other.clone(),
+ (_, NoSign) => self.clone(),
+ (Plus, Plus) => BigInt::from_biguint(Plus, &self.data | &other.data),
+ (Plus, Minus) => other.clone() | self,
+ (Minus, Plus) => self.clone() | other,
+ (Minus, Minus) => {
+ // forward to val-ref, choosing the smaller to clone
+ if self.len() <= other.len() {
+ self.clone() | other
+ } else {
+ other.clone() | self
+ }
+ }
+ }
+ }
+}
+
+impl<'a> BitOr<&'a BigInt> for BigInt {
+ type Output = BigInt;
+
+ #[inline]
+ fn bitor(mut self, other: &BigInt) -> BigInt {
+ self |= other;
+ self
+ }
+}
+
+forward_val_assign!(impl BitOrAssign for BigInt, bitor_assign);
+
+impl<'a> BitOrAssign<&'a BigInt> for BigInt {
+ fn bitor_assign(&mut self, other: &BigInt) {
+ match (self.sign, other.sign) {
+ (_, NoSign) => {}
+ (NoSign, _) => self.assign_from_slice(other.sign, other.digits()),
+ (Plus, Plus) => self.data |= &other.data,
+ (Plus, Minus) => {
+ bitor_pos_neg(self.digits_mut(), other.digits());
+ self.sign = Minus;
+ self.normalize();
+ }
+ (Minus, Plus) => {
+ bitor_neg_pos(self.digits_mut(), other.digits());
+ self.normalize();
+ }
+ (Minus, Minus) => {
+ bitor_neg_neg(self.digits_mut(), other.digits());
+ self.normalize();
+ }
+ }
+ }
+}
+
+// + 1 ^ -ff = ...0 01 ^ ...f 01 = ...f 00 = -100
+// +ff ^ - 1 = ...0 ff ^ ...f ff = ...f 00 = -100
+// answer is neg, has length of longest with a possible carry
+fn bitxor_pos_neg(a: &mut Vec<BigDigit>, b: &[BigDigit]) {
+ let mut carry_b = 1;
+ let mut carry_xor = 1;
+ for (ai, &bi) in a.iter_mut().zip(b.iter()) {
+ let twos_b = negate_carry(bi, &mut carry_b);
+ *ai = negate_carry(*ai ^ twos_b, &mut carry_xor);
+ }
+ debug_assert!(b.len() > a.len() || carry_b == 0);
+ if a.len() > b.len() {
+ for ai in a[b.len()..].iter_mut() {
+ let twos_b = !0;
+ *ai = negate_carry(*ai ^ twos_b, &mut carry_xor);
+ }
+ } else if b.len() > a.len() {
+ let extra = &b[a.len()..];
+ a.extend(extra.iter().map(|&bi| {
+ let twos_b = negate_carry(bi, &mut carry_b);
+ negate_carry(twos_b, &mut carry_xor)
+ }));
+ debug_assert!(carry_b == 0);
+ }
+ if carry_xor != 0 {
+ a.push(1);
+ }
+}
+
+// - 1 ^ +ff = ...f ff ^ ...0 ff = ...f 00 = -100
+// -ff ^ + 1 = ...f 01 ^ ...0 01 = ...f 00 = -100
+// answer is neg, has length of longest with a possible carry
+fn bitxor_neg_pos(a: &mut Vec<BigDigit>, b: &[BigDigit]) {
+ let mut carry_a = 1;
+ let mut carry_xor = 1;
+ for (ai, &bi) in a.iter_mut().zip(b.iter()) {
+ let twos_a = negate_carry(*ai, &mut carry_a);
+ *ai = negate_carry(twos_a ^ bi, &mut carry_xor);
+ }
+ debug_assert!(a.len() > b.len() || carry_a == 0);
+ if a.len() > b.len() {
+ for ai in a[b.len()..].iter_mut() {
+ let twos_a = negate_carry(*ai, &mut carry_a);
+ *ai = negate_carry(twos_a, &mut carry_xor);
+ }
+ debug_assert!(carry_a == 0);
+ } else if b.len() > a.len() {
+ let extra = &b[a.len()..];
+ a.extend(extra.iter().map(|&bi| {
+ let twos_a = !0;
+ negate_carry(twos_a ^ bi, &mut carry_xor)
+ }));
+ }
+ if carry_xor != 0 {
+ a.push(1);
+ }
+}
+
+// - 1 ^ -ff = ...f ff ^ ...f 01 = ...0 fe = +fe
+// -ff & - 1 = ...f 01 ^ ...f ff = ...0 fe = +fe
+// answer is pos, has length of longest
+fn bitxor_neg_neg(a: &mut Vec<BigDigit>, b: &[BigDigit]) {
+ let mut carry_a = 1;
+ let mut carry_b = 1;
+ for (ai, &bi) in a.iter_mut().zip(b.iter()) {
+ let twos_a = negate_carry(*ai, &mut carry_a);
+ let twos_b = negate_carry(bi, &mut carry_b);
+ *ai = twos_a ^ twos_b;
+ }
+ debug_assert!(a.len() > b.len() || carry_a == 0);
+ debug_assert!(b.len() > a.len() || carry_b == 0);
+ if a.len() > b.len() {
+ for ai in a[b.len()..].iter_mut() {
+ let twos_a = negate_carry(*ai, &mut carry_a);
+ let twos_b = !0;
+ *ai = twos_a ^ twos_b;
+ }
+ debug_assert!(carry_a == 0);
+ } else if b.len() > a.len() {
+ let extra = &b[a.len()..];
+ a.extend(extra.iter().map(|&bi| {
+ let twos_a = !0;
+ let twos_b = negate_carry(bi, &mut carry_b);
+ twos_a ^ twos_b
+ }));
+ debug_assert!(carry_b == 0);
+ }
+}
+
+forward_all_binop_to_val_ref_commutative!(impl BitXor for BigInt, bitxor);
+
+impl<'a> BitXor<&'a BigInt> for BigInt {
+ type Output = BigInt;
+
+ #[inline]
+ fn bitxor(mut self, other: &BigInt) -> BigInt {
+ self ^= other;
+ self
+ }
+}
+
+forward_val_assign!(impl BitXorAssign for BigInt, bitxor_assign);
+
+impl<'a> BitXorAssign<&'a BigInt> for BigInt {
+ fn bitxor_assign(&mut self, other: &BigInt) {
+ match (self.sign, other.sign) {
+ (_, NoSign) => {}
+ (NoSign, _) => self.assign_from_slice(other.sign, other.digits()),
+ (Plus, Plus) => {
+ self.data ^= &other.data;
+ if self.data.is_zero() {
+ self.sign = NoSign;
+ }
+ }
+ (Plus, Minus) => {
+ bitxor_pos_neg(self.digits_mut(), other.digits());
+ self.sign = Minus;
+ self.normalize();
+ }
+ (Minus, Plus) => {
+ bitxor_neg_pos(self.digits_mut(), other.digits());
+ self.normalize();
+ }
+ (Minus, Minus) => {
+ bitxor_neg_neg(self.digits_mut(), other.digits());
+ self.sign = Plus;
+ self.normalize();
+ }
+ }
+ }
+}
+
+impl FromStr for BigInt {
+ type Err = ParseBigIntError;
+
+ #[inline]
+ fn from_str(s: &str) -> Result<BigInt, ParseBigIntError> {
+ BigInt::from_str_radix(s, 10)
+ }
+}
+
+impl Num for BigInt {
+ type FromStrRadixErr = ParseBigIntError;
+
+ /// Creates and initializes a BigInt.
+ #[inline]
+ fn from_str_radix(mut s: &str, radix: u32) -> Result<BigInt, ParseBigIntError> {
+ let sign = if s.starts_with('-') {
+ let tail = &s[1..];
+ if !tail.starts_with('+') {
+ s = tail
+ }
+ Minus
+ } else {
+ Plus
+ };
+ let bu = BigUint::from_str_radix(s, radix)?;
+ Ok(BigInt::from_biguint(sign, bu))
+ }
+}
+
+impl Shl<usize> for BigInt {
+ type Output = BigInt;
+
+ #[inline]
+ fn shl(mut self, rhs: usize) -> BigInt {
+ self <<= rhs;
+ self
+ }
+}
+
+impl<'a> Shl<usize> for &'a BigInt {
+ type Output = BigInt;
+
+ #[inline]
+ fn shl(self, rhs: usize) -> BigInt {
+ BigInt::from_biguint(self.sign, &self.data << rhs)
+ }
+}
+
+impl ShlAssign<usize> for BigInt {
+ #[inline]
+ fn shl_assign(&mut self, rhs: usize) {
+ self.data <<= rhs;
+ }
+}
+
+// Negative values need a rounding adjustment if there are any ones in the
+// bits that are getting shifted out.
+fn shr_round_down(i: &BigInt, rhs: usize) -> bool {
+ i.is_negative()
+ && biguint::trailing_zeros(&i.data)
+ .map(|n| n < rhs)
+ .unwrap_or(false)
+}
+
+impl Shr<usize> for BigInt {
+ type Output = BigInt;
+
+ #[inline]
+ fn shr(mut self, rhs: usize) -> BigInt {
+ self >>= rhs;
+ self
+ }
+}
+
+impl<'a> Shr<usize> for &'a BigInt {
+ type Output = BigInt;
+
+ #[inline]
+ fn shr(self, rhs: usize) -> BigInt {
+ let round_down = shr_round_down(self, rhs);
+ let data = &self.data >> rhs;
+ BigInt::from_biguint(self.sign, if round_down { data + 1u8 } else { data })
+ }
+}
+
+impl ShrAssign<usize> for BigInt {
+ #[inline]
+ fn shr_assign(&mut self, rhs: usize) {
+ let round_down = shr_round_down(self, rhs);
+ self.data >>= rhs;
+ if round_down {
+ self.data += 1u8;
+ } else if self.data.is_zero() {
+ self.sign = NoSign;
+ }
+ }
+}
+
+impl Zero for BigInt {
+ #[inline]
+ fn zero() -> BigInt {
+ BigInt::from_biguint(NoSign, Zero::zero())
+ }
+
+ #[inline]
+ fn set_zero(&mut self) {
+ self.data.set_zero();
+ self.sign = NoSign;
+ }
+
+ #[inline]
+ fn is_zero(&self) -> bool {
+ self.sign == NoSign
+ }
+}
+
+impl One for BigInt {
+ #[inline]
+ fn one() -> BigInt {
+ BigInt::from_biguint(Plus, One::one())
+ }
+
+ #[inline]
+ fn set_one(&mut self) {
+ self.data.set_one();
+ self.sign = Plus;
+ }
+
+ #[inline]
+ fn is_one(&self) -> bool {
+ self.sign == Plus && self.data.is_one()
+ }
+}
+
+impl Signed for BigInt {
+ #[inline]
+ fn abs(&self) -> BigInt {
+ match self.sign {
+ Plus | NoSign => self.clone(),
+ Minus => BigInt::from_biguint(Plus, self.data.clone()),
+ }
+ }
+
+ #[inline]
+ fn abs_sub(&self, other: &BigInt) -> BigInt {
+ if *self <= *other {
+ Zero::zero()
+ } else {
+ self - other
+ }
+ }
+
+ #[inline]
+ fn signum(&self) -> BigInt {
+ match self.sign {
+ Plus => BigInt::from_biguint(Plus, One::one()),
+ Minus => BigInt::from_biguint(Minus, One::one()),
+ NoSign => Zero::zero(),
+ }
+ }
+
+ #[inline]
+ fn is_positive(&self) -> bool {
+ self.sign == Plus
+ }
+
+ #[inline]
+ fn is_negative(&self) -> bool {
+ self.sign == Minus
+ }
+}
+
+/// Help function for pow
+///
+/// Computes the effect of the exponent on the sign.
+#[inline]
+fn powsign<T: Integer>(sign: Sign, other: &T) -> Sign {
+ if other.is_zero() {
+ Plus
+ } else if sign != Minus {
+ sign
+ } else if other.is_odd() {
+ sign
+ } else {
+ -sign
+ }
+}
+
+macro_rules! pow_impl {
+ ($T:ty) => {
+ impl<'a> Pow<$T> for &'a BigInt {
+ type Output = BigInt;
+
+ #[inline]
+ fn pow(self, rhs: $T) -> BigInt {
+ BigInt::from_biguint(powsign(self.sign, &rhs), (&self.data).pow(rhs))
+ }
+ }
+
+ impl<'a, 'b> Pow<&'b $T> for &'a BigInt {
+ type Output = BigInt;
+
+ #[inline]
+ fn pow(self, rhs: &$T) -> BigInt {
+ BigInt::from_biguint(powsign(self.sign, rhs), (&self.data).pow(rhs))
+ }
+ }
+ };
+}
+
+pow_impl!(u8);
+pow_impl!(u16);
+pow_impl!(u32);
+pow_impl!(u64);
+pow_impl!(usize);
+#[cfg(has_i128)]
+pow_impl!(u128);
+pow_impl!(BigUint);
+
+// A convenience method for getting the absolute value of an i32 in a u32.
+#[inline]
+fn i32_abs_as_u32(a: i32) -> u32 {
+ if a == i32::min_value() {
+ a as u32
+ } else {
+ a.abs() as u32
+ }
+}
+
+// A convenience method for getting the absolute value of an i64 in a u64.
+#[inline]
+fn i64_abs_as_u64(a: i64) -> u64 {
+ if a == i64::min_value() {
+ a as u64
+ } else {
+ a.abs() as u64
+ }
+}
+
+// A convenience method for getting the absolute value of an i128 in a u128.
+#[cfg(has_i128)]
+#[inline]
+fn i128_abs_as_u128(a: i128) -> u128 {
+ if a == i128::min_value() {
+ a as u128
+ } else {
+ a.abs() as u128
+ }
+}
+
+// We want to forward to BigUint::add, but it's not clear how that will go until
+// we compare both sign and magnitude. So we duplicate this body for every
+// val/ref combination, deferring that decision to BigUint's own forwarding.
+macro_rules! bigint_add {
+ ($a:expr, $a_owned:expr, $a_data:expr, $b:expr, $b_owned:expr, $b_data:expr) => {
+ match ($a.sign, $b.sign) {
+ (_, NoSign) => $a_owned,
+ (NoSign, _) => $b_owned,
+ // same sign => keep the sign with the sum of magnitudes
+ (Plus, Plus) | (Minus, Minus) => BigInt::from_biguint($a.sign, $a_data + $b_data),
+ // opposite signs => keep the sign of the larger with the difference of magnitudes
+ (Plus, Minus) | (Minus, Plus) => match $a.data.cmp(&$b.data) {
+ Less => BigInt::from_biguint($b.sign, $b_data - $a_data),
+ Greater => BigInt::from_biguint($a.sign, $a_data - $b_data),
+ Equal => Zero::zero(),
+ },
+ }
+ };
+}
+
+impl<'a, 'b> Add<&'b BigInt> for &'a BigInt {
+ type Output = BigInt;
+
+ #[inline]
+ fn add(self, other: &BigInt) -> BigInt {
+ bigint_add!(
+ self,
+ self.clone(),
+ &self.data,
+ other,
+ other.clone(),
+ &other.data
+ )
+ }
+}
+
+impl<'a> Add<BigInt> for &'a BigInt {
+ type Output = BigInt;
+
+ #[inline]
+ fn add(self, other: BigInt) -> BigInt {
+ bigint_add!(self, self.clone(), &self.data, other, other, other.data)
+ }
+}
+
+impl<'a> Add<&'a BigInt> for BigInt {
+ type Output = BigInt;
+
+ #[inline]
+ fn add(self, other: &BigInt) -> BigInt {
+ bigint_add!(self, self, self.data, other, other.clone(), &other.data)
+ }
+}
+
+impl Add<BigInt> for BigInt {
+ type Output = BigInt;
+
+ #[inline]
+ fn add(self, other: BigInt) -> BigInt {
+ bigint_add!(self, self, self.data, other, other, other.data)
+ }
+}
+
+impl<'a> AddAssign<&'a BigInt> for BigInt {
+ #[inline]
+ fn add_assign(&mut self, other: &BigInt) {
+ let n = mem::replace(self, BigInt::zero());
+ *self = n + other;
+ }
+}
+forward_val_assign!(impl AddAssign for BigInt, add_assign);
+
+promote_all_scalars!(impl Add for BigInt, add);
+promote_all_scalars_assign!(impl AddAssign for BigInt, add_assign);
+forward_all_scalar_binop_to_val_val_commutative!(impl Add<u32> for BigInt, add);
+forward_all_scalar_binop_to_val_val_commutative!(impl Add<u64> for BigInt, add);
+#[cfg(has_i128)]
+forward_all_scalar_binop_to_val_val_commutative!(impl Add<u128> for BigInt, add);
+
+impl Add<u32> for BigInt {
+ type Output = BigInt;
+
+ #[inline]
+ fn add(self, other: u32) -> BigInt {
+ match self.sign {
+ NoSign => From::from(other),
+ Plus => BigInt::from_biguint(Plus, self.data + other),
+ Minus => match self.data.cmp(&From::from(other)) {
+ Equal => Zero::zero(),
+ Less => BigInt::from_biguint(Plus, other - self.data),
+ Greater => BigInt::from_biguint(Minus, self.data - other),
+ },
+ }
+ }
+}
+impl AddAssign<u32> for BigInt {
+ #[inline]
+ fn add_assign(&mut self, other: u32) {
+ let n = mem::replace(self, BigInt::zero());
+ *self = n + other;
+ }
+}
+
+impl Add<u64> for BigInt {
+ type Output = BigInt;
+
+ #[inline]
+ fn add(self, other: u64) -> BigInt {
+ match self.sign {
+ NoSign => From::from(other),
+ Plus => BigInt::from_biguint(Plus, self.data + other),
+ Minus => match self.data.cmp(&From::from(other)) {
+ Equal => Zero::zero(),
+ Less => BigInt::from_biguint(Plus, other - self.data),
+ Greater => BigInt::from_biguint(Minus, self.data - other),
+ },
+ }
+ }
+}
+impl AddAssign<u64> for BigInt {
+ #[inline]
+ fn add_assign(&mut self, other: u64) {
+ let n = mem::replace(self, BigInt::zero());
+ *self = n + other;
+ }
+}
+
+#[cfg(has_i128)]
+impl Add<u128> for BigInt {
+ type Output = BigInt;
+
+ #[inline]
+ fn add(self, other: u128) -> BigInt {
+ match self.sign {
+ NoSign => From::from(other),
+ Plus => BigInt::from_biguint(Plus, self.data + other),
+ Minus => match self.data.cmp(&From::from(other)) {
+ Equal => Zero::zero(),
+ Less => BigInt::from_biguint(Plus, other - self.data),
+ Greater => BigInt::from_biguint(Minus, self.data - other),
+ },
+ }
+ }
+}
+#[cfg(has_i128)]
+impl AddAssign<u128> for BigInt {
+ #[inline]
+ fn add_assign(&mut self, other: u128) {
+ let n = mem::replace(self, BigInt::zero());
+ *self = n + other;
+ }
+}
+
+forward_all_scalar_binop_to_val_val_commutative!(impl Add<i32> for BigInt, add);
+forward_all_scalar_binop_to_val_val_commutative!(impl Add<i64> for BigInt, add);
+#[cfg(has_i128)]
+forward_all_scalar_binop_to_val_val_commutative!(impl Add<i128> for BigInt, add);
+
+impl Add<i32> for BigInt {
+ type Output = BigInt;
+
+ #[inline]
+ fn add(self, other: i32) -> BigInt {
+ if other >= 0 {
+ self + other as u32
+ } else {
+ self - i32_abs_as_u32(other)
+ }
+ }
+}
+impl AddAssign<i32> for BigInt {
+ #[inline]
+ fn add_assign(&mut self, other: i32) {
+ if other >= 0 {
+ *self += other as u32;
+ } else {
+ *self -= i32_abs_as_u32(other);
+ }
+ }
+}
+
+impl Add<i64> for BigInt {
+ type Output = BigInt;
+
+ #[inline]
+ fn add(self, other: i64) -> BigInt {
+ if other >= 0 {
+ self + other as u64
+ } else {
+ self - i64_abs_as_u64(other)
+ }
+ }
+}
+impl AddAssign<i64> for BigInt {
+ #[inline]
+ fn add_assign(&mut self, other: i64) {
+ if other >= 0 {
+ *self += other as u64;
+ } else {
+ *self -= i64_abs_as_u64(other);
+ }
+ }
+}
+
+#[cfg(has_i128)]
+impl Add<i128> for BigInt {
+ type Output = BigInt;
+
+ #[inline]
+ fn add(self, other: i128) -> BigInt {
+ if other >= 0 {
+ self + other as u128
+ } else {
+ self - i128_abs_as_u128(other)
+ }
+ }
+}
+#[cfg(has_i128)]
+impl AddAssign<i128> for BigInt {
+ #[inline]
+ fn add_assign(&mut self, other: i128) {
+ if other >= 0 {
+ *self += other as u128;
+ } else {
+ *self -= i128_abs_as_u128(other);
+ }
+ }
+}
+
+// We want to forward to BigUint::sub, but it's not clear how that will go until
+// we compare both sign and magnitude. So we duplicate this body for every
+// val/ref combination, deferring that decision to BigUint's own forwarding.
+macro_rules! bigint_sub {
+ ($a:expr, $a_owned:expr, $a_data:expr, $b:expr, $b_owned:expr, $b_data:expr) => {
+ match ($a.sign, $b.sign) {
+ (_, NoSign) => $a_owned,
+ (NoSign, _) => -$b_owned,
+ // opposite signs => keep the sign of the left with the sum of magnitudes
+ (Plus, Minus) | (Minus, Plus) => BigInt::from_biguint($a.sign, $a_data + $b_data),
+ // same sign => keep or toggle the sign of the left with the difference of magnitudes
+ (Plus, Plus) | (Minus, Minus) => match $a.data.cmp(&$b.data) {
+ Less => BigInt::from_biguint(-$a.sign, $b_data - $a_data),
+ Greater => BigInt::from_biguint($a.sign, $a_data - $b_data),
+ Equal => Zero::zero(),
+ },
+ }
+ };
+}
+
+impl<'a, 'b> Sub<&'b BigInt> for &'a BigInt {
+ type Output = BigInt;
+
+ #[inline]
+ fn sub(self, other: &BigInt) -> BigInt {
+ bigint_sub!(
+ self,
+ self.clone(),
+ &self.data,
+ other,
+ other.clone(),
+ &other.data
+ )
+ }
+}
+
+impl<'a> Sub<BigInt> for &'a BigInt {
+ type Output = BigInt;
+
+ #[inline]
+ fn sub(self, other: BigInt) -> BigInt {
+ bigint_sub!(self, self.clone(), &self.data, other, other, other.data)
+ }
+}
+
+impl<'a> Sub<&'a BigInt> for BigInt {
+ type Output = BigInt;
+
+ #[inline]
+ fn sub(self, other: &BigInt) -> BigInt {
+ bigint_sub!(self, self, self.data, other, other.clone(), &other.data)
+ }
+}
+
+impl Sub<BigInt> for BigInt {
+ type Output = BigInt;
+
+ #[inline]
+ fn sub(self, other: BigInt) -> BigInt {
+ bigint_sub!(self, self, self.data, other, other, other.data)
+ }
+}
+
+impl<'a> SubAssign<&'a BigInt> for BigInt {
+ #[inline]
+ fn sub_assign(&mut self, other: &BigInt) {
+ let n = mem::replace(self, BigInt::zero());
+ *self = n - other;
+ }
+}
+forward_val_assign!(impl SubAssign for BigInt, sub_assign);
+
+promote_all_scalars!(impl Sub for BigInt, sub);
+promote_all_scalars_assign!(impl SubAssign for BigInt, sub_assign);
+forward_all_scalar_binop_to_val_val!(impl Sub<u32> for BigInt, sub);
+forward_all_scalar_binop_to_val_val!(impl Sub<u64> for BigInt, sub);
+#[cfg(has_i128)]
+forward_all_scalar_binop_to_val_val!(impl Sub<u128> for BigInt, sub);
+
+impl Sub<u32> for BigInt {
+ type Output = BigInt;
+
+ #[inline]
+ fn sub(self, other: u32) -> BigInt {
+ match self.sign {
+ NoSign => BigInt::from_biguint(Minus, From::from(other)),
+ Minus => BigInt::from_biguint(Minus, self.data + other),
+ Plus => match self.data.cmp(&From::from(other)) {
+ Equal => Zero::zero(),
+ Greater => BigInt::from_biguint(Plus, self.data - other),
+ Less => BigInt::from_biguint(Minus, other - self.data),
+ },
+ }
+ }
+}
+impl SubAssign<u32> for BigInt {
+ #[inline]
+ fn sub_assign(&mut self, other: u32) {
+ let n = mem::replace(self, BigInt::zero());
+ *self = n - other;
+ }
+}
+
+impl Sub<BigInt> for u32 {
+ type Output = BigInt;
+
+ #[inline]
+ fn sub(self, other: BigInt) -> BigInt {
+ -(other - self)
+ }
+}
+
+impl Sub<BigInt> for u64 {
+ type Output = BigInt;
+
+ #[inline]
+ fn sub(self, other: BigInt) -> BigInt {
+ -(other - self)
+ }
+}
+#[cfg(has_i128)]
+impl Sub<BigInt> for u128 {
+ type Output = BigInt;
+
+ #[inline]
+ fn sub(self, other: BigInt) -> BigInt {
+ -(other - self)
+ }
+}
+
+impl Sub<u64> for BigInt {
+ type Output = BigInt;
+
+ #[inline]
+ fn sub(self, other: u64) -> BigInt {
+ match self.sign {
+ NoSign => BigInt::from_biguint(Minus, From::from(other)),
+ Minus => BigInt::from_biguint(Minus, self.data + other),
+ Plus => match self.data.cmp(&From::from(other)) {
+ Equal => Zero::zero(),
+ Greater => BigInt::from_biguint(Plus, self.data - other),
+ Less => BigInt::from_biguint(Minus, other - self.data),
+ },
+ }
+ }
+}
+impl SubAssign<u64> for BigInt {
+ #[inline]
+ fn sub_assign(&mut self, other: u64) {
+ let n = mem::replace(self, BigInt::zero());
+ *self = n - other;
+ }
+}
+
+#[cfg(has_i128)]
+impl Sub<u128> for BigInt {
+ type Output = BigInt;
+
+ #[inline]
+ fn sub(self, other: u128) -> BigInt {
+ match self.sign {
+ NoSign => BigInt::from_biguint(Minus, From::from(other)),
+ Minus => BigInt::from_biguint(Minus, self.data + other),
+ Plus => match self.data.cmp(&From::from(other)) {
+ Equal => Zero::zero(),
+ Greater => BigInt::from_biguint(Plus, self.data - other),
+ Less => BigInt::from_biguint(Minus, other - self.data),
+ },
+ }
+ }
+}
+#[cfg(has_i128)]
+impl SubAssign<u128> for BigInt {
+ #[inline]
+ fn sub_assign(&mut self, other: u128) {
+ let n = mem::replace(self, BigInt::zero());
+ *self = n - other;
+ }
+}
+
+forward_all_scalar_binop_to_val_val!(impl Sub<i32> for BigInt, sub);
+forward_all_scalar_binop_to_val_val!(impl Sub<i64> for BigInt, sub);
+#[cfg(has_i128)]
+forward_all_scalar_binop_to_val_val!(impl Sub<i128> for BigInt, sub);
+
+impl Sub<i32> for BigInt {
+ type Output = BigInt;
+
+ #[inline]
+ fn sub(self, other: i32) -> BigInt {
+ if other >= 0 {
+ self - other as u32
+ } else {
+ self + i32_abs_as_u32(other)
+ }
+ }
+}
+impl SubAssign<i32> for BigInt {
+ #[inline]
+ fn sub_assign(&mut self, other: i32) {
+ if other >= 0 {
+ *self -= other as u32;
+ } else {
+ *self += i32_abs_as_u32(other);
+ }
+ }
+}
+
+impl Sub<BigInt> for i32 {
+ type Output = BigInt;
+
+ #[inline]
+ fn sub(self, other: BigInt) -> BigInt {
+ if self >= 0 {
+ self as u32 - other
+ } else {
+ -other - i32_abs_as_u32(self)
+ }
+ }
+}
+
+impl Sub<i64> for BigInt {
+ type Output = BigInt;
+
+ #[inline]
+ fn sub(self, other: i64) -> BigInt {
+ if other >= 0 {
+ self - other as u64
+ } else {
+ self + i64_abs_as_u64(other)
+ }
+ }
+}
+impl SubAssign<i64> for BigInt {
+ #[inline]
+ fn sub_assign(&mut self, other: i64) {
+ if other >= 0 {
+ *self -= other as u64;
+ } else {
+ *self += i64_abs_as_u64(other);
+ }
+ }
+}
+
+impl Sub<BigInt> for i64 {
+ type Output = BigInt;
+
+ #[inline]
+ fn sub(self, other: BigInt) -> BigInt {
+ if self >= 0 {
+ self as u64 - other
+ } else {
+ -other - i64_abs_as_u64(self)
+ }
+ }
+}
+
+#[cfg(has_i128)]
+impl Sub<i128> for BigInt {
+ type Output = BigInt;
+
+ #[inline]
+ fn sub(self, other: i128) -> BigInt {
+ if other >= 0 {
+ self - other as u128
+ } else {
+ self + i128_abs_as_u128(other)
+ }
+ }
+}
+#[cfg(has_i128)]
+impl SubAssign<i128> for BigInt {
+ #[inline]
+ fn sub_assign(&mut self, other: i128) {
+ if other >= 0 {
+ *self -= other as u128;
+ } else {
+ *self += i128_abs_as_u128(other);
+ }
+ }
+}
+#[cfg(has_i128)]
+impl Sub<BigInt> for i128 {
+ type Output = BigInt;
+
+ #[inline]
+ fn sub(self, other: BigInt) -> BigInt {
+ if self >= 0 {
+ self as u128 - other
+ } else {
+ -other - i128_abs_as_u128(self)
+ }
+ }
+}
+
+forward_all_binop_to_ref_ref!(impl Mul for BigInt, mul);
+
+impl<'a, 'b> Mul<&'b BigInt> for &'a BigInt {
+ type Output = BigInt;
+
+ #[inline]
+ fn mul(self, other: &BigInt) -> BigInt {
+ BigInt::from_biguint(self.sign * other.sign, &self.data * &other.data)
+ }
+}
+
+impl<'a> MulAssign<&'a BigInt> for BigInt {
+ #[inline]
+ fn mul_assign(&mut self, other: &BigInt) {
+ *self = &*self * other;
+ }
+}
+forward_val_assign!(impl MulAssign for BigInt, mul_assign);
+
+promote_all_scalars!(impl Mul for BigInt, mul);
+promote_all_scalars_assign!(impl MulAssign for BigInt, mul_assign);
+forward_all_scalar_binop_to_val_val_commutative!(impl Mul<u32> for BigInt, mul);
+forward_all_scalar_binop_to_val_val_commutative!(impl Mul<u64> for BigInt, mul);
+#[cfg(has_i128)]
+forward_all_scalar_binop_to_val_val_commutative!(impl Mul<u128> for BigInt, mul);
+
+impl Mul<u32> for BigInt {
+ type Output = BigInt;
+
+ #[inline]
+ fn mul(self, other: u32) -> BigInt {
+ BigInt::from_biguint(self.sign, self.data * other)
+ }
+}
+
+impl MulAssign<u32> for BigInt {
+ #[inline]
+ fn mul_assign(&mut self, other: u32) {
+ self.data *= other;
+ if self.data.is_zero() {
+ self.sign = NoSign;
+ }
+ }
+}
+
+impl Mul<u64> for BigInt {
+ type Output = BigInt;
+
+ #[inline]
+ fn mul(self, other: u64) -> BigInt {
+ BigInt::from_biguint(self.sign, self.data * other)
+ }
+}
+
+impl MulAssign<u64> for BigInt {
+ #[inline]
+ fn mul_assign(&mut self, other: u64) {
+ self.data *= other;
+ if self.data.is_zero() {
+ self.sign = NoSign;
+ }
+ }
+}
+#[cfg(has_i128)]
+impl Mul<u128> for BigInt {
+ type Output = BigInt;
+
+ #[inline]
+ fn mul(self, other: u128) -> BigInt {
+ BigInt::from_biguint(self.sign, self.data * other)
+ }
+}
+#[cfg(has_i128)]
+impl MulAssign<u128> for BigInt {
+ #[inline]
+ fn mul_assign(&mut self, other: u128) {
+ self.data *= other;
+ if self.data.is_zero() {
+ self.sign = NoSign;
+ }
+ }
+}
+
+forward_all_scalar_binop_to_val_val_commutative!(impl Mul<i32> for BigInt, mul);
+forward_all_scalar_binop_to_val_val_commutative!(impl Mul<i64> for BigInt, mul);
+#[cfg(has_i128)]
+forward_all_scalar_binop_to_val_val_commutative!(impl Mul<i128> for BigInt, mul);
+
+impl Mul<i32> for BigInt {
+ type Output = BigInt;
+
+ #[inline]
+ fn mul(self, other: i32) -> BigInt {
+ if other >= 0 {
+ self * other as u32
+ } else {
+ -(self * i32_abs_as_u32(other))
+ }
+ }
+}
+
+impl MulAssign<i32> for BigInt {
+ #[inline]
+ fn mul_assign(&mut self, other: i32) {
+ if other >= 0 {
+ *self *= other as u32;
+ } else {
+ self.sign = -self.sign;
+ *self *= i32_abs_as_u32(other);
+ }
+ }
+}
+
+impl Mul<i64> for BigInt {
+ type Output = BigInt;
+
+ #[inline]
+ fn mul(self, other: i64) -> BigInt {
+ if other >= 0 {
+ self * other as u64
+ } else {
+ -(self * i64_abs_as_u64(other))
+ }
+ }
+}
+
+impl MulAssign<i64> for BigInt {
+ #[inline]
+ fn mul_assign(&mut self, other: i64) {
+ if other >= 0 {
+ *self *= other as u64;
+ } else {
+ self.sign = -self.sign;
+ *self *= i64_abs_as_u64(other);
+ }
+ }
+}
+#[cfg(has_i128)]
+impl Mul<i128> for BigInt {
+ type Output = BigInt;
+
+ #[inline]
+ fn mul(self, other: i128) -> BigInt {
+ if other >= 0 {
+ self * other as u128
+ } else {
+ -(self * i128_abs_as_u128(other))
+ }
+ }
+}
+#[cfg(has_i128)]
+impl MulAssign<i128> for BigInt {
+ #[inline]
+ fn mul_assign(&mut self, other: i128) {
+ if other >= 0 {
+ *self *= other as u128;
+ } else {
+ self.sign = -self.sign;
+ *self *= i128_abs_as_u128(other);
+ }
+ }
+}
+
+forward_all_binop_to_ref_ref!(impl Div for BigInt, div);
+
+impl<'a, 'b> Div<&'b BigInt> for &'a BigInt {
+ type Output = BigInt;
+
+ #[inline]
+ fn div(self, other: &BigInt) -> BigInt {
+ let (q, _) = self.div_rem(other);
+ q
+ }
+}
+
+impl<'a> DivAssign<&'a BigInt> for BigInt {
+ #[inline]
+ fn div_assign(&mut self, other: &BigInt) {
+ *self = &*self / other;
+ }
+}
+forward_val_assign!(impl DivAssign for BigInt, div_assign);
+
+promote_all_scalars!(impl Div for BigInt, div);
+promote_all_scalars_assign!(impl DivAssign for BigInt, div_assign);
+forward_all_scalar_binop_to_val_val!(impl Div<u32> for BigInt, div);
+forward_all_scalar_binop_to_val_val!(impl Div<u64> for BigInt, div);
+#[cfg(has_i128)]
+forward_all_scalar_binop_to_val_val!(impl Div<u128> for BigInt, div);
+
+impl Div<u32> for BigInt {
+ type Output = BigInt;
+
+ #[inline]
+ fn div(self, other: u32) -> BigInt {
+ BigInt::from_biguint(self.sign, self.data / other)
+ }
+}
+
+impl DivAssign<u32> for BigInt {
+ #[inline]
+ fn div_assign(&mut self, other: u32) {
+ self.data /= other;
+ if self.data.is_zero() {
+ self.sign = NoSign;
+ }
+ }
+}
+
+impl Div<BigInt> for u32 {
+ type Output = BigInt;
+
+ #[inline]
+ fn div(self, other: BigInt) -> BigInt {
+ BigInt::from_biguint(other.sign, self / other.data)
+ }
+}
+
+impl Div<u64> for BigInt {
+ type Output = BigInt;
+
+ #[inline]
+ fn div(self, other: u64) -> BigInt {
+ BigInt::from_biguint(self.sign, self.data / other)
+ }
+}
+
+impl DivAssign<u64> for BigInt {
+ #[inline]
+ fn div_assign(&mut self, other: u64) {
+ self.data /= other;
+ if self.data.is_zero() {
+ self.sign = NoSign;
+ }
+ }
+}
+
+impl Div<BigInt> for u64 {
+ type Output = BigInt;
+
+ #[inline]
+ fn div(self, other: BigInt) -> BigInt {
+ BigInt::from_biguint(other.sign, self / other.data)
+ }
+}
+
+#[cfg(has_i128)]
+impl Div<u128> for BigInt {
+ type Output = BigInt;
+
+ #[inline]
+ fn div(self, other: u128) -> BigInt {
+ BigInt::from_biguint(self.sign, self.data / other)
+ }
+}
+
+#[cfg(has_i128)]
+impl DivAssign<u128> for BigInt {
+ #[inline]
+ fn div_assign(&mut self, other: u128) {
+ self.data /= other;
+ if self.data.is_zero() {
+ self.sign = NoSign;
+ }
+ }
+}
+
+#[cfg(has_i128)]
+impl Div<BigInt> for u128 {
+ type Output = BigInt;
+
+ #[inline]
+ fn div(self, other: BigInt) -> BigInt {
+ BigInt::from_biguint(other.sign, self / other.data)
+ }
+}
+
+forward_all_scalar_binop_to_val_val!(impl Div<i32> for BigInt, div);
+forward_all_scalar_binop_to_val_val!(impl Div<i64> for BigInt, div);
+#[cfg(has_i128)]
+forward_all_scalar_binop_to_val_val!(impl Div<i128> for BigInt, div);
+
+impl Div<i32> for BigInt {
+ type Output = BigInt;
+
+ #[inline]
+ fn div(self, other: i32) -> BigInt {
+ if other >= 0 {
+ self / other as u32
+ } else {
+ -(self / i32_abs_as_u32(other))
+ }
+ }
+}
+
+impl DivAssign<i32> for BigInt {
+ #[inline]
+ fn div_assign(&mut self, other: i32) {
+ if other >= 0 {
+ *self /= other as u32;
+ } else {
+ self.sign = -self.sign;
+ *self /= i32_abs_as_u32(other);
+ }
+ }
+}
+
+impl Div<BigInt> for i32 {
+ type Output = BigInt;
+
+ #[inline]
+ fn div(self, other: BigInt) -> BigInt {
+ if self >= 0 {
+ self as u32 / other
+ } else {
+ -(i32_abs_as_u32(self) / other)
+ }
+ }
+}
+
+impl Div<i64> for BigInt {
+ type Output = BigInt;
+
+ #[inline]
+ fn div(self, other: i64) -> BigInt {
+ if other >= 0 {
+ self / other as u64
+ } else {
+ -(self / i64_abs_as_u64(other))
+ }
+ }
+}
+
+impl DivAssign<i64> for BigInt {
+ #[inline]
+ fn div_assign(&mut self, other: i64) {
+ if other >= 0 {
+ *self /= other as u64;
+ } else {
+ self.sign = -self.sign;
+ *self /= i64_abs_as_u64(other);
+ }
+ }
+}
+
+impl Div<BigInt> for i64 {
+ type Output = BigInt;
+
+ #[inline]
+ fn div(self, other: BigInt) -> BigInt {
+ if self >= 0 {
+ self as u64 / other
+ } else {
+ -(i64_abs_as_u64(self) / other)
+ }
+ }
+}
+
+#[cfg(has_i128)]
+impl Div<i128> for BigInt {
+ type Output = BigInt;
+
+ #[inline]
+ fn div(self, other: i128) -> BigInt {
+ if other >= 0 {
+ self / other as u128
+ } else {
+ -(self / i128_abs_as_u128(other))
+ }
+ }
+}
+
+#[cfg(has_i128)]
+impl DivAssign<i128> for BigInt {
+ #[inline]
+ fn div_assign(&mut self, other: i128) {
+ if other >= 0 {
+ *self /= other as u128;
+ } else {
+ self.sign = -self.sign;
+ *self /= i128_abs_as_u128(other);
+ }
+ }
+}
+
+#[cfg(has_i128)]
+impl Div<BigInt> for i128 {
+ type Output = BigInt;
+
+ #[inline]
+ fn div(self, other: BigInt) -> BigInt {
+ if self >= 0 {
+ self as u128 / other
+ } else {
+ -(i128_abs_as_u128(self) / other)
+ }
+ }
+}
+
+forward_all_binop_to_ref_ref!(impl Rem for BigInt, rem);
+
+impl<'a, 'b> Rem<&'b BigInt> for &'a BigInt {
+ type Output = BigInt;
+
+ #[inline]
+ fn rem(self, other: &BigInt) -> BigInt {
+ let (_, r) = self.div_rem(other);
+ r
+ }
+}
+
+impl<'a> RemAssign<&'a BigInt> for BigInt {
+ #[inline]
+ fn rem_assign(&mut self, other: &BigInt) {
+ *self = &*self % other;
+ }
+}
+forward_val_assign!(impl RemAssign for BigInt, rem_assign);
+
+promote_all_scalars!(impl Rem for BigInt, rem);
+promote_all_scalars_assign!(impl RemAssign for BigInt, rem_assign);
+forward_all_scalar_binop_to_val_val!(impl Rem<u32> for BigInt, rem);
+forward_all_scalar_binop_to_val_val!(impl Rem<u64> for BigInt, rem);
+#[cfg(has_i128)]
+forward_all_scalar_binop_to_val_val!(impl Rem<u128> for BigInt, rem);
+
+impl Rem<u32> for BigInt {
+ type Output = BigInt;
+
+ #[inline]
+ fn rem(self, other: u32) -> BigInt {
+ BigInt::from_biguint(self.sign, self.data % other)
+ }
+}
+
+impl RemAssign<u32> for BigInt {
+ #[inline]
+ fn rem_assign(&mut self, other: u32) {
+ self.data %= other;
+ if self.data.is_zero() {
+ self.sign = NoSign;
+ }
+ }
+}
+
+impl Rem<BigInt> for u32 {
+ type Output = BigInt;
+
+ #[inline]
+ fn rem(self, other: BigInt) -> BigInt {
+ BigInt::from_biguint(Plus, self % other.data)
+ }
+}
+
+impl Rem<u64> for BigInt {
+ type Output = BigInt;
+
+ #[inline]
+ fn rem(self, other: u64) -> BigInt {
+ BigInt::from_biguint(self.sign, self.data % other)
+ }
+}
+
+impl RemAssign<u64> for BigInt {
+ #[inline]
+ fn rem_assign(&mut self, other: u64) {
+ self.data %= other;
+ if self.data.is_zero() {
+ self.sign = NoSign;
+ }
+ }
+}
+
+impl Rem<BigInt> for u64 {
+ type Output = BigInt;
+
+ #[inline]
+ fn rem(self, other: BigInt) -> BigInt {
+ BigInt::from_biguint(Plus, self % other.data)
+ }
+}
+
+#[cfg(has_i128)]
+impl Rem<u128> for BigInt {
+ type Output = BigInt;
+
+ #[inline]
+ fn rem(self, other: u128) -> BigInt {
+ BigInt::from_biguint(self.sign, self.data % other)
+ }
+}
+
+#[cfg(has_i128)]
+impl RemAssign<u128> for BigInt {
+ #[inline]
+ fn rem_assign(&mut self, other: u128) {
+ self.data %= other;
+ if self.data.is_zero() {
+ self.sign = NoSign;
+ }
+ }
+}
+
+#[cfg(has_i128)]
+impl Rem<BigInt> for u128 {
+ type Output = BigInt;
+
+ #[inline]
+ fn rem(self, other: BigInt) -> BigInt {
+ BigInt::from_biguint(Plus, self % other.data)
+ }
+}
+
+forward_all_scalar_binop_to_val_val!(impl Rem<i32> for BigInt, rem);
+forward_all_scalar_binop_to_val_val!(impl Rem<i64> for BigInt, rem);
+#[cfg(has_i128)]
+forward_all_scalar_binop_to_val_val!(impl Rem<i128> for BigInt, rem);
+
+impl Rem<i32> for BigInt {
+ type Output = BigInt;
+
+ #[inline]
+ fn rem(self, other: i32) -> BigInt {
+ if other >= 0 {
+ self % other as u32
+ } else {
+ self % i32_abs_as_u32(other)
+ }
+ }
+}
+
+impl RemAssign<i32> for BigInt {
+ #[inline]
+ fn rem_assign(&mut self, other: i32) {
+ if other >= 0 {
+ *self %= other as u32;
+ } else {
+ *self %= i32_abs_as_u32(other);
+ }
+ }
+}
+
+impl Rem<BigInt> for i32 {
+ type Output = BigInt;
+
+ #[inline]
+ fn rem(self, other: BigInt) -> BigInt {
+ if self >= 0 {
+ self as u32 % other
+ } else {
+ -(i32_abs_as_u32(self) % other)
+ }
+ }
+}
+
+impl Rem<i64> for BigInt {
+ type Output = BigInt;
+
+ #[inline]
+ fn rem(self, other: i64) -> BigInt {
+ if other >= 0 {
+ self % other as u64
+ } else {
+ self % i64_abs_as_u64(other)
+ }
+ }
+}
+
+impl RemAssign<i64> for BigInt {
+ #[inline]
+ fn rem_assign(&mut self, other: i64) {
+ if other >= 0 {
+ *self %= other as u64;
+ } else {
+ *self %= i64_abs_as_u64(other);
+ }
+ }
+}
+
+impl Rem<BigInt> for i64 {
+ type Output = BigInt;
+
+ #[inline]
+ fn rem(self, other: BigInt) -> BigInt {
+ if self >= 0 {
+ self as u64 % other
+ } else {
+ -(i64_abs_as_u64(self) % other)
+ }
+ }
+}
+
+#[cfg(has_i128)]
+impl Rem<i128> for BigInt {
+ type Output = BigInt;
+
+ #[inline]
+ fn rem(self, other: i128) -> BigInt {
+ if other >= 0 {
+ self % other as u128
+ } else {
+ self % i128_abs_as_u128(other)
+ }
+ }
+}
+#[cfg(has_i128)]
+impl RemAssign<i128> for BigInt {
+ #[inline]
+ fn rem_assign(&mut self, other: i128) {
+ if other >= 0 {
+ *self %= other as u128;
+ } else {
+ *self %= i128_abs_as_u128(other);
+ }
+ }
+}
+#[cfg(has_i128)]
+impl Rem<BigInt> for i128 {
+ type Output = BigInt;
+
+ #[inline]
+ fn rem(self, other: BigInt) -> BigInt {
+ if self >= 0 {
+ self as u128 % other
+ } else {
+ -(i128_abs_as_u128(self) % other)
+ }
+ }
+}
+
+impl Neg for BigInt {
+ type Output = BigInt;
+
+ #[inline]
+ fn neg(mut self) -> BigInt {
+ self.sign = -self.sign;
+ self
+ }
+}
+
+impl<'a> Neg for &'a BigInt {
+ type Output = BigInt;
+
+ #[inline]
+ fn neg(self) -> BigInt {
+ -self.clone()
+ }
+}
+
+impl CheckedAdd for BigInt {
+ #[inline]
+ fn checked_add(&self, v: &BigInt) -> Option<BigInt> {
+ Some(self.add(v))
+ }
+}
+
+impl CheckedSub for BigInt {
+ #[inline]
+ fn checked_sub(&self, v: &BigInt) -> Option<BigInt> {
+ Some(self.sub(v))
+ }
+}
+
+impl CheckedMul for BigInt {
+ #[inline]
+ fn checked_mul(&self, v: &BigInt) -> Option<BigInt> {
+ Some(self.mul(v))
+ }
+}
+
+impl CheckedDiv for BigInt {
+ #[inline]
+ fn checked_div(&self, v: &BigInt) -> Option<BigInt> {
+ if v.is_zero() {
+ return None;
+ }
+ Some(self.div(v))
+ }
+}
+
+impl Integer for BigInt {
+ #[inline]
+ fn div_rem(&self, other: &BigInt) -> (BigInt, BigInt) {
+ // r.sign == self.sign
+ let (d_ui, r_ui) = self.data.div_mod_floor(&other.data);
+ let d = BigInt::from_biguint(self.sign, d_ui);
+ let r = BigInt::from_biguint(self.sign, r_ui);
+ if other.is_negative() {
+ (-d, r)
+ } else {
+ (d, r)
+ }
+ }
+
+ #[inline]
+ fn div_floor(&self, other: &BigInt) -> BigInt {
+ let (d, _) = self.div_mod_floor(other);
+ d
+ }
+
+ #[inline]
+ fn mod_floor(&self, other: &BigInt) -> BigInt {
+ let (_, m) = self.div_mod_floor(other);
+ m
+ }
+
+ fn div_mod_floor(&self, other: &BigInt) -> (BigInt, BigInt) {
+ // m.sign == other.sign
+ let (d_ui, m_ui) = self.data.div_rem(&other.data);
+ let d = BigInt::from_biguint(Plus, d_ui);
+ let m = BigInt::from_biguint(Plus, m_ui);
+ let one: BigInt = One::one();
+ match (self.sign, other.sign) {
+ (_, NoSign) => panic!(),
+ (Plus, Plus) | (NoSign, Plus) => (d, m),
+ (Plus, Minus) | (NoSign, Minus) => {
+ if m.is_zero() {
+ (-d, Zero::zero())
+ } else {
+ (-d - one, m + other)
+ }
+ }
+ (Minus, Plus) => {
+ if m.is_zero() {
+ (-d, Zero::zero())
+ } else {
+ (-d - one, other - m)
+ }
+ }
+ (Minus, Minus) => (d, -m),
+ }
+ }
+
+ /// Calculates the Greatest Common Divisor (GCD) of the number and `other`.
+ ///
+ /// The result is always positive.
+ #[inline]
+ fn gcd(&self, other: &BigInt) -> BigInt {
+ BigInt::from_biguint(Plus, self.data.gcd(&other.data))
+ }
+
+ /// Calculates the Lowest Common Multiple (LCM) of the number and `other`.
+ #[inline]
+ fn lcm(&self, other: &BigInt) -> BigInt {
+ BigInt::from_biguint(Plus, self.data.lcm(&other.data))
+ }
+
+ /// Deprecated, use `is_multiple_of` instead.
+ #[inline]
+ fn divides(&self, other: &BigInt) -> bool {
+ self.is_multiple_of(other)
+ }
+
+ /// Returns `true` if the number is a multiple of `other`.
+ #[inline]
+ fn is_multiple_of(&self, other: &BigInt) -> bool {
+ self.data.is_multiple_of(&other.data)
+ }
+
+ /// Returns `true` if the number is divisible by `2`.
+ #[inline]
+ fn is_even(&self) -> bool {
+ self.data.is_even()
+ }
+
+ /// Returns `true` if the number is not divisible by `2`.
+ #[inline]
+ fn is_odd(&self) -> bool {
+ self.data.is_odd()
+ }
+}
+
+impl Roots for BigInt {
+ fn nth_root(&self, n: u32) -> Self {
+ assert!(
+ !(self.is_negative() && n.is_even()),
+ "root of degree {} is imaginary",
+ n
+ );
+
+ BigInt::from_biguint(self.sign, self.data.nth_root(n))
+ }
+
+ fn sqrt(&self) -> Self {
+ assert!(!self.is_negative(), "square root is imaginary");
+
+ BigInt::from_biguint(self.sign, self.data.sqrt())
+ }
+
+ fn cbrt(&self) -> Self {
+ BigInt::from_biguint(self.sign, self.data.cbrt())
+ }
+}
+
+impl ToPrimitive for BigInt {
+ #[inline]
+ fn to_i64(&self) -> Option<i64> {
+ match self.sign {
+ Plus => self.data.to_i64(),
+ NoSign => Some(0),
+ Minus => self.data.to_u64().and_then(|n| {
+ let m: u64 = 1 << 63;
+ if n < m {
+ Some(-(n as i64))
+ } else if n == m {
+ Some(i64::MIN)
+ } else {
+ None
+ }
+ }),
+ }
+ }
+
+ #[inline]
+ #[cfg(has_i128)]
+ fn to_i128(&self) -> Option<i128> {
+ match self.sign {
+ Plus => self.data.to_i128(),
+ NoSign => Some(0),
+ Minus => self.data.to_u128().and_then(|n| {
+ let m: u128 = 1 << 127;
+ if n < m {
+ Some(-(n as i128))
+ } else if n == m {
+ Some(i128::MIN)
+ } else {
+ None
+ }
+ }),
+ }
+ }
+
+ #[inline]
+ fn to_u64(&self) -> Option<u64> {
+ match self.sign {
+ Plus => self.data.to_u64(),
+ NoSign => Some(0),
+ Minus => None,
+ }
+ }
+
+ #[inline]
+ #[cfg(has_i128)]
+ fn to_u128(&self) -> Option<u128> {
+ match self.sign {
+ Plus => self.data.to_u128(),
+ NoSign => Some(0),
+ Minus => None,
+ }
+ }
+
+ #[inline]
+ fn to_f32(&self) -> Option<f32> {
+ self.data
+ .to_f32()
+ .map(|n| if self.sign == Minus { -n } else { n })
+ }
+
+ #[inline]
+ fn to_f64(&self) -> Option<f64> {
+ self.data
+ .to_f64()
+ .map(|n| if self.sign == Minus { -n } else { n })
+ }
+}
+
+impl FromPrimitive for BigInt {
+ #[inline]
+ fn from_i64(n: i64) -> Option<BigInt> {
+ Some(BigInt::from(n))
+ }
+
+ #[inline]
+ #[cfg(has_i128)]
+ fn from_i128(n: i128) -> Option<BigInt> {
+ Some(BigInt::from(n))
+ }
+
+ #[inline]
+ fn from_u64(n: u64) -> Option<BigInt> {
+ Some(BigInt::from(n))
+ }
+
+ #[inline]
+ #[cfg(has_i128)]
+ fn from_u128(n: u128) -> Option<BigInt> {
+ Some(BigInt::from(n))
+ }
+
+ #[inline]
+ fn from_f64(n: f64) -> Option<BigInt> {
+ if n >= 0.0 {
+ BigUint::from_f64(n).map(|x| BigInt::from_biguint(Plus, x))
+ } else {
+ BigUint::from_f64(-n).map(|x| BigInt::from_biguint(Minus, x))
+ }
+ }
+}
+
+impl From<i64> for BigInt {
+ #[inline]
+ fn from(n: i64) -> Self {
+ if n >= 0 {
+ BigInt::from(n as u64)
+ } else {
+ let u = u64::MAX - (n as u64) + 1;
+ BigInt {
+ sign: Minus,
+ data: BigUint::from(u),
+ }
+ }
+ }
+}
+
+#[cfg(has_i128)]
+impl From<i128> for BigInt {
+ #[inline]
+ fn from(n: i128) -> Self {
+ if n >= 0 {
+ BigInt::from(n as u128)
+ } else {
+ let u = u128::MAX - (n as u128) + 1;
+ BigInt {
+ sign: Minus,
+ data: BigUint::from(u),
+ }
+ }
+ }
+}
+
+macro_rules! impl_bigint_from_int {
+ ($T:ty) => {
+ impl From<$T> for BigInt {
+ #[inline]
+ fn from(n: $T) -> Self {
+ BigInt::from(n as i64)
+ }
+ }
+ };
+}
+
+impl_bigint_from_int!(i8);
+impl_bigint_from_int!(i16);
+impl_bigint_from_int!(i32);
+impl_bigint_from_int!(isize);
+
+impl From<u64> for BigInt {
+ #[inline]
+ fn from(n: u64) -> Self {
+ if n > 0 {
+ BigInt {
+ sign: Plus,
+ data: BigUint::from(n),
+ }
+ } else {
+ BigInt::zero()
+ }
+ }
+}
+
+#[cfg(has_i128)]
+impl From<u128> for BigInt {
+ #[inline]
+ fn from(n: u128) -> Self {
+ if n > 0 {
+ BigInt {
+ sign: Plus,
+ data: BigUint::from(n),
+ }
+ } else {
+ BigInt::zero()
+ }
+ }
+}
+
+macro_rules! impl_bigint_from_uint {
+ ($T:ty) => {
+ impl From<$T> for BigInt {
+ #[inline]
+ fn from(n: $T) -> Self {
+ BigInt::from(n as u64)
+ }
+ }
+ };
+}
+
+impl_bigint_from_uint!(u8);
+impl_bigint_from_uint!(u16);
+impl_bigint_from_uint!(u32);
+impl_bigint_from_uint!(usize);
+
+impl From<BigUint> for BigInt {
+ #[inline]
+ fn from(n: BigUint) -> Self {
+ if n.is_zero() {
+ BigInt::zero()
+ } else {
+ BigInt {
+ sign: Plus,
+ data: n,
+ }
+ }
+ }
+}
+
+impl IntDigits for BigInt {
+ #[inline]
+ fn digits(&self) -> &[BigDigit] {
+ self.data.digits()
+ }
+ #[inline]
+ fn digits_mut(&mut self) -> &mut Vec<BigDigit> {
+ self.data.digits_mut()
+ }
+ #[inline]
+ fn normalize(&mut self) {
+ self.data.normalize();
+ if self.data.is_zero() {
+ self.sign = NoSign;
+ }
+ }
+ #[inline]
+ fn capacity(&self) -> usize {
+ self.data.capacity()
+ }
+ #[inline]
+ fn len(&self) -> usize {
+ self.data.len()
+ }
+}
+
+#[cfg(feature = "serde")]
+impl serde::Serialize for BigInt {
+ fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
+ where
+ S: serde::Serializer,
+ {
+ // Note: do not change the serialization format, or it may break
+ // forward and backward compatibility of serialized data!
+ (self.sign, &self.data).serialize(serializer)
+ }
+}
+
+#[cfg(feature = "serde")]
+impl<'de> serde::Deserialize<'de> for BigInt {
+ fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
+ where
+ D: serde::Deserializer<'de>,
+ {
+ let (sign, data) = serde::Deserialize::deserialize(deserializer)?;
+ Ok(BigInt::from_biguint(sign, data))
+ }
+}
+
+/// A generic trait for converting a value to a `BigInt`. This may return
+/// `None` when converting from `f32` or `f64`, and will always succeed
+/// when converting from any integer or unsigned primitive, or `BigUint`.
+pub trait ToBigInt {
+ /// Converts the value of `self` to a `BigInt`.
+ fn to_bigint(&self) -> Option<BigInt>;
+}
+
+impl ToBigInt for BigInt {
+ #[inline]
+ fn to_bigint(&self) -> Option<BigInt> {
+ Some(self.clone())
+ }
+}
+
+impl ToBigInt for BigUint {
+ #[inline]
+ fn to_bigint(&self) -> Option<BigInt> {
+ if self.is_zero() {
+ Some(Zero::zero())
+ } else {
+ Some(BigInt {
+ sign: Plus,
+ data: self.clone(),
+ })
+ }
+ }
+}
+
+impl biguint::ToBigUint for BigInt {
+ #[inline]
+ fn to_biguint(&self) -> Option<BigUint> {
+ match self.sign() {
+ Plus => Some(self.data.clone()),
+ NoSign => Some(Zero::zero()),
+ Minus => None,
+ }
+ }
+}
+
+macro_rules! impl_to_bigint {
+ ($T:ty, $from_ty:path) => {
+ impl ToBigInt for $T {
+ #[inline]
+ fn to_bigint(&self) -> Option<BigInt> {
+ $from_ty(*self)
+ }
+ }
+ };
+}
+
+impl_to_bigint!(isize, FromPrimitive::from_isize);
+impl_to_bigint!(i8, FromPrimitive::from_i8);
+impl_to_bigint!(i16, FromPrimitive::from_i16);
+impl_to_bigint!(i32, FromPrimitive::from_i32);
+impl_to_bigint!(i64, FromPrimitive::from_i64);
+#[cfg(has_i128)]
+impl_to_bigint!(i128, FromPrimitive::from_i128);
+
+impl_to_bigint!(usize, FromPrimitive::from_usize);
+impl_to_bigint!(u8, FromPrimitive::from_u8);
+impl_to_bigint!(u16, FromPrimitive::from_u16);
+impl_to_bigint!(u32, FromPrimitive::from_u32);
+impl_to_bigint!(u64, FromPrimitive::from_u64);
+#[cfg(has_i128)]
+impl_to_bigint!(u128, FromPrimitive::from_u128);
+
+impl_to_bigint!(f32, FromPrimitive::from_f32);
+impl_to_bigint!(f64, FromPrimitive::from_f64);
+
+impl BigInt {
+ /// Creates and initializes a BigInt.
+ ///
+ /// The base 2<sup>32</sup> digits are ordered least significant digit first.
+ #[inline]
+ pub fn new(sign: Sign, digits: Vec<u32>) -> BigInt {
+ BigInt::from_biguint(sign, BigUint::new(digits))
+ }
+
+ /// Creates and initializes a `BigInt`.
+ ///
+ /// The base 2<sup>32</sup> digits are ordered least significant digit first.
+ #[inline]
+ pub fn from_biguint(mut sign: Sign, mut data: BigUint) -> BigInt {
+ if sign == NoSign {
+ data.assign_from_slice(&[]);
+ } else if data.is_zero() {
+ sign = NoSign;
+ }
+
+ BigInt {
+ sign: sign,
+ data: data,
+ }
+ }
+
+ /// Creates and initializes a `BigInt`.
+ ///
+ /// The base 2<sup>32</sup> digits are ordered least significant digit first.
+ #[inline]
+ pub fn from_slice(sign: Sign, slice: &[u32]) -> BigInt {
+ BigInt::from_biguint(sign, BigUint::from_slice(slice))
+ }
+
+ /// Reinitializes a `BigInt`.
+ ///
+ /// The base 2<sup>32</sup> digits are ordered least significant digit first.
+ #[inline]
+ pub fn assign_from_slice(&mut self, sign: Sign, slice: &[u32]) {
+ if sign == NoSign {
+ self.data.assign_from_slice(&[]);
+ self.sign = NoSign;
+ } else {
+ self.data.assign_from_slice(slice);
+ self.sign = match self.data.is_zero() {
+ true => NoSign,
+ false => sign,
+ }
+ }
+ }
+
+ /// Creates and initializes a `BigInt`.
+ ///
+ /// The bytes are in big-endian byte order.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use num_bigint::{BigInt, Sign};
+ ///
+ /// assert_eq!(BigInt::from_bytes_be(Sign::Plus, b"A"),
+ /// BigInt::parse_bytes(b"65", 10).unwrap());
+ /// assert_eq!(BigInt::from_bytes_be(Sign::Plus, b"AA"),
+ /// BigInt::parse_bytes(b"16705", 10).unwrap());
+ /// assert_eq!(BigInt::from_bytes_be(Sign::Plus, b"AB"),
+ /// BigInt::parse_bytes(b"16706", 10).unwrap());
+ /// assert_eq!(BigInt::from_bytes_be(Sign::Plus, b"Hello world!"),
+ /// BigInt::parse_bytes(b"22405534230753963835153736737", 10).unwrap());
+ /// ```
+ #[inline]
+ pub fn from_bytes_be(sign: Sign, bytes: &[u8]) -> BigInt {
+ BigInt::from_biguint(sign, BigUint::from_bytes_be(bytes))
+ }
+
+ /// Creates and initializes a `BigInt`.
+ ///
+ /// The bytes are in little-endian byte order.
+ #[inline]
+ pub fn from_bytes_le(sign: Sign, bytes: &[u8]) -> BigInt {
+ BigInt::from_biguint(sign, BigUint::from_bytes_le(bytes))
+ }
+
+ /// Creates and initializes a `BigInt` from an array of bytes in
+ /// two's complement binary representation.
+ ///
+ /// The digits are in big-endian base 2<sup>8</sup>.
+ #[inline]
+ pub fn from_signed_bytes_be(digits: &[u8]) -> BigInt {
+ let sign = match digits.first() {
+ Some(v) if *v > 0x7f => Sign::Minus,
+ Some(_) => Sign::Plus,
+ None => return BigInt::zero(),
+ };
+
+ if sign == Sign::Minus {
+ // two's-complement the content to retrieve the magnitude
+ let mut digits = Vec::from(digits);
+ twos_complement_be(&mut digits);
+ BigInt::from_biguint(sign, BigUint::from_bytes_be(&*digits))
+ } else {
+ BigInt::from_biguint(sign, BigUint::from_bytes_be(digits))
+ }
+ }
+
+ /// Creates and initializes a `BigInt` from an array of bytes in two's complement.
+ ///
+ /// The digits are in little-endian base 2<sup>8</sup>.
+ #[inline]
+ pub fn from_signed_bytes_le(digits: &[u8]) -> BigInt {
+ let sign = match digits.last() {
+ Some(v) if *v > 0x7f => Sign::Minus,
+ Some(_) => Sign::Plus,
+ None => return BigInt::zero(),
+ };
+
+ if sign == Sign::Minus {
+ // two's-complement the content to retrieve the magnitude
+ let mut digits = Vec::from(digits);
+ twos_complement_le(&mut digits);
+ BigInt::from_biguint(sign, BigUint::from_bytes_le(&*digits))
+ } else {
+ BigInt::from_biguint(sign, BigUint::from_bytes_le(digits))
+ }
+ }
+
+ /// Creates and initializes a `BigInt`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use num_bigint::{BigInt, ToBigInt};
+ ///
+ /// assert_eq!(BigInt::parse_bytes(b"1234", 10), ToBigInt::to_bigint(&1234));
+ /// assert_eq!(BigInt::parse_bytes(b"ABCD", 16), ToBigInt::to_bigint(&0xABCD));
+ /// assert_eq!(BigInt::parse_bytes(b"G", 16), None);
+ /// ```
+ #[inline]
+ pub fn parse_bytes(buf: &[u8], radix: u32) -> Option<BigInt> {
+ str::from_utf8(buf)
+ .ok()
+ .and_then(|s| BigInt::from_str_radix(s, radix).ok())
+ }
+
+ /// Creates and initializes a `BigInt`. Each u8 of the input slice is
+ /// interpreted as one digit of the number
+ /// and must therefore be less than `radix`.
+ ///
+ /// The bytes are in big-endian byte order.
+ /// `radix` must be in the range `2...256`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use num_bigint::{BigInt, Sign};
+ ///
+ /// let inbase190 = vec![15, 33, 125, 12, 14];
+ /// let a = BigInt::from_radix_be(Sign::Minus, &inbase190, 190).unwrap();
+ /// assert_eq!(a.to_radix_be(190), (Sign:: Minus, inbase190));
+ /// ```
+ pub fn from_radix_be(sign: Sign, buf: &[u8], radix: u32) -> Option<BigInt> {
+ BigUint::from_radix_be(buf, radix).map(|u| BigInt::from_biguint(sign, u))
+ }
+
+ /// Creates and initializes a `BigInt`. Each u8 of the input slice is
+ /// interpreted as one digit of the number
+ /// and must therefore be less than `radix`.
+ ///
+ /// The bytes are in little-endian byte order.
+ /// `radix` must be in the range `2...256`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use num_bigint::{BigInt, Sign};
+ ///
+ /// let inbase190 = vec![14, 12, 125, 33, 15];
+ /// let a = BigInt::from_radix_be(Sign::Minus, &inbase190, 190).unwrap();
+ /// assert_eq!(a.to_radix_be(190), (Sign::Minus, inbase190));
+ /// ```
+ pub fn from_radix_le(sign: Sign, buf: &[u8], radix: u32) -> Option<BigInt> {
+ BigUint::from_radix_le(buf, radix).map(|u| BigInt::from_biguint(sign, u))
+ }
+
+ /// Returns the sign and the byte representation of the `BigInt` in big-endian byte order.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use num_bigint::{ToBigInt, Sign};
+ ///
+ /// let i = -1125.to_bigint().unwrap();
+ /// assert_eq!(i.to_bytes_be(), (Sign::Minus, vec![4, 101]));
+ /// ```
+ #[inline]
+ pub fn to_bytes_be(&self) -> (Sign, Vec<u8>) {
+ (self.sign, self.data.to_bytes_be())
+ }
+
+ /// Returns the sign and the byte representation of the `BigInt` in little-endian byte order.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use num_bigint::{ToBigInt, Sign};
+ ///
+ /// let i = -1125.to_bigint().unwrap();
+ /// assert_eq!(i.to_bytes_le(), (Sign::Minus, vec![101, 4]));
+ /// ```
+ #[inline]
+ pub fn to_bytes_le(&self) -> (Sign, Vec<u8>) {
+ (self.sign, self.data.to_bytes_le())
+ }
+
+ /// Returns the sign and the `u32` digits representation of the `BigInt` ordered least
+ /// significant digit first.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use num_bigint::{BigInt, Sign};
+ ///
+ /// assert_eq!(BigInt::from(-1125).to_u32_digits(), (Sign::Minus, vec![1125]));
+ /// assert_eq!(BigInt::from(4294967295u32).to_u32_digits(), (Sign::Plus, vec![4294967295]));
+ /// assert_eq!(BigInt::from(4294967296u64).to_u32_digits(), (Sign::Plus, vec![0, 1]));
+ /// assert_eq!(BigInt::from(-112500000000i64).to_u32_digits(), (Sign::Minus, vec![830850304, 26]));
+ /// assert_eq!(BigInt::from(112500000000i64).to_u32_digits(), (Sign::Plus, vec![830850304, 26]));
+ /// ```
+ #[inline]
+ pub fn to_u32_digits(&self) -> (Sign, Vec<u32>) {
+ (self.sign, self.data.to_u32_digits())
+ }
+
+ /// Returns the two's-complement byte representation of the `BigInt` in big-endian byte order.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use num_bigint::ToBigInt;
+ ///
+ /// let i = -1125.to_bigint().unwrap();
+ /// assert_eq!(i.to_signed_bytes_be(), vec![251, 155]);
+ /// ```
+ #[inline]
+ pub fn to_signed_bytes_be(&self) -> Vec<u8> {
+ let mut bytes = self.data.to_bytes_be();
+ let first_byte = bytes.first().cloned().unwrap_or(0);
+ if first_byte > 0x7f
+ && !(first_byte == 0x80
+ && bytes.iter().skip(1).all(Zero::is_zero)
+ && self.sign == Sign::Minus)
+ {
+ // msb used by magnitude, extend by 1 byte
+ bytes.insert(0, 0);
+ }
+ if self.sign == Sign::Minus {
+ twos_complement_be(&mut bytes);
+ }
+ bytes
+ }
+
+ /// Returns the two's-complement byte representation of the `BigInt` in little-endian byte order.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use num_bigint::ToBigInt;
+ ///
+ /// let i = -1125.to_bigint().unwrap();
+ /// assert_eq!(i.to_signed_bytes_le(), vec![155, 251]);
+ /// ```
+ #[inline]
+ pub fn to_signed_bytes_le(&self) -> Vec<u8> {
+ let mut bytes = self.data.to_bytes_le();
+ let last_byte = bytes.last().cloned().unwrap_or(0);
+ if last_byte > 0x7f
+ && !(last_byte == 0x80
+ && bytes.iter().rev().skip(1).all(Zero::is_zero)
+ && self.sign == Sign::Minus)
+ {
+ // msb used by magnitude, extend by 1 byte
+ bytes.push(0);
+ }
+ if self.sign == Sign::Minus {
+ twos_complement_le(&mut bytes);
+ }
+ bytes
+ }
+
+ /// Returns the integer formatted as a string in the given radix.
+ /// `radix` must be in the range `2...36`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use num_bigint::BigInt;
+ ///
+ /// let i = BigInt::parse_bytes(b"ff", 16).unwrap();
+ /// assert_eq!(i.to_str_radix(16), "ff");
+ /// ```
+ #[inline]
+ pub fn to_str_radix(&self, radix: u32) -> String {
+ let mut v = to_str_radix_reversed(&self.data, radix);
+
+ if self.is_negative() {
+ v.push(b'-');
+ }
+
+ v.reverse();
+ unsafe { String::from_utf8_unchecked(v) }
+ }
+
+ /// Returns the integer in the requested base in big-endian digit order.
+ /// The output is not given in a human readable alphabet but as a zero
+ /// based u8 number.
+ /// `radix` must be in the range `2...256`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use num_bigint::{BigInt, Sign};
+ ///
+ /// assert_eq!(BigInt::from(-0xFFFFi64).to_radix_be(159),
+ /// (Sign::Minus, vec![2, 94, 27]));
+ /// // 0xFFFF = 65535 = 2*(159^2) + 94*159 + 27
+ /// ```
+ #[inline]
+ pub fn to_radix_be(&self, radix: u32) -> (Sign, Vec<u8>) {
+ (self.sign, self.data.to_radix_be(radix))
+ }
+
+ /// Returns the integer in the requested base in little-endian digit order.
+ /// The output is not given in a human readable alphabet but as a zero
+ /// based u8 number.
+ /// `radix` must be in the range `2...256`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use num_bigint::{BigInt, Sign};
+ ///
+ /// assert_eq!(BigInt::from(-0xFFFFi64).to_radix_le(159),
+ /// (Sign::Minus, vec![27, 94, 2]));
+ /// // 0xFFFF = 65535 = 27 + 94*159 + 2*(159^2)
+ /// ```
+ #[inline]
+ pub fn to_radix_le(&self, radix: u32) -> (Sign, Vec<u8>) {
+ (self.sign, self.data.to_radix_le(radix))
+ }
+
+ /// Returns the sign of the `BigInt` as a `Sign`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use num_bigint::{ToBigInt, Sign};
+ ///
+ /// assert_eq!(ToBigInt::to_bigint(&1234).unwrap().sign(), Sign::Plus);
+ /// assert_eq!(ToBigInt::to_bigint(&-4321).unwrap().sign(), Sign::Minus);
+ /// assert_eq!(ToBigInt::to_bigint(&0).unwrap().sign(), Sign::NoSign);
+ /// ```
+ #[inline]
+ pub fn sign(&self) -> Sign {
+ self.sign
+ }
+
+ /// Determines the fewest bits necessary to express the `BigInt`,
+ /// not including the sign.
+ #[inline]
+ pub fn bits(&self) -> usize {
+ self.data.bits()
+ }
+
+ /// Converts this `BigInt` into a `BigUint`, if it's not negative.
+ #[inline]
+ pub fn to_biguint(&self) -> Option<BigUint> {
+ match self.sign {
+ Plus => Some(self.data.clone()),
+ NoSign => Some(Zero::zero()),
+ Minus => None,
+ }
+ }
+
+ #[inline]
+ pub fn checked_add(&self, v: &BigInt) -> Option<BigInt> {
+ Some(self.add(v))
+ }
+
+ #[inline]
+ pub fn checked_sub(&self, v: &BigInt) -> Option<BigInt> {
+ Some(self.sub(v))
+ }
+
+ #[inline]
+ pub fn checked_mul(&self, v: &BigInt) -> Option<BigInt> {
+ Some(self.mul(v))
+ }
+
+ #[inline]
+ pub fn checked_div(&self, v: &BigInt) -> Option<BigInt> {
+ if v.is_zero() {
+ return None;
+ }
+ Some(self.div(v))
+ }
+
+ /// Returns `(self ^ exponent) mod modulus`
+ ///
+ /// Note that this rounds like `mod_floor`, not like the `%` operator,
+ /// which makes a difference when given a negative `self` or `modulus`.
+ /// The result will be in the interval `[0, modulus)` for `modulus > 0`,
+ /// or in the interval `(modulus, 0]` for `modulus < 0`
+ ///
+ /// Panics if the exponent is negative or the modulus is zero.
+ pub fn modpow(&self, exponent: &Self, modulus: &Self) -> Self {
+ assert!(
+ !exponent.is_negative(),
+ "negative exponentiation is not supported!"
+ );
+ assert!(!modulus.is_zero(), "divide by zero!");
+
+ let result = self.data.modpow(&exponent.data, &modulus.data);
+ if result.is_zero() {
+ return BigInt::zero();
+ }
+
+ // The sign of the result follows the modulus, like `mod_floor`.
+ let (sign, mag) = match (
+ self.is_negative() && exponent.is_odd(),
+ modulus.is_negative(),
+ ) {
+ (false, false) => (Plus, result),
+ (true, false) => (Plus, &modulus.data - result),
+ (false, true) => (Minus, &modulus.data - result),
+ (true, true) => (Minus, result),
+ };
+ BigInt::from_biguint(sign, mag)
+ }
+
+ /// Returns the truncated principal square root of `self` --
+ /// see [Roots::sqrt](https://docs.rs/num-integer/0.1/num_integer/trait.Roots.html#method.sqrt).
+ pub fn sqrt(&self) -> Self {
+ Roots::sqrt(self)
+ }
+
+ /// Returns the truncated principal cube root of `self` --
+ /// see [Roots::cbrt](https://docs.rs/num-integer/0.1/num_integer/trait.Roots.html#method.cbrt).
+ pub fn cbrt(&self) -> Self {
+ Roots::cbrt(self)
+ }
+
+ /// Returns the truncated principal `n`th root of `self` --
+ /// See [Roots::nth_root](https://docs.rs/num-integer/0.1/num_integer/trait.Roots.html#tymethod.nth_root).
+ pub fn nth_root(&self, n: u32) -> Self {
+ Roots::nth_root(self, n)
+ }
+}
+
+impl_sum_iter_type!(BigInt);
+impl_product_iter_type!(BigInt);
+
+/// Perform in-place two's complement of the given binary representation,
+/// in little-endian byte order.
+#[inline]
+fn twos_complement_le(digits: &mut [u8]) {
+ twos_complement(digits)
+}
+
+/// Perform in-place two's complement of the given binary representation
+/// in big-endian byte order.
+#[inline]
+fn twos_complement_be(digits: &mut [u8]) {
+ twos_complement(digits.iter_mut().rev())
+}
+
+/// Perform in-place two's complement of the given digit iterator
+/// starting from the least significant byte.
+#[inline]
+fn twos_complement<'a, I>(digits: I)
+where
+ I: IntoIterator<Item = &'a mut u8>,
+{
+ let mut carry = true;
+ for d in digits {
+ *d = d.not();
+ if carry {
+ *d = d.wrapping_add(1);
+ carry = d.is_zero();
+ }
+ }
+}
+
+#[test]
+fn test_from_biguint() {
+ fn check(inp_s: Sign, inp_n: usize, ans_s: Sign, ans_n: usize) {
+ let inp = BigInt::from_biguint(inp_s, FromPrimitive::from_usize(inp_n).unwrap());
+ let ans = BigInt {
+ sign: ans_s,
+ data: FromPrimitive::from_usize(ans_n).unwrap(),
+ };
+ assert_eq!(inp, ans);
+ }
+ check(Plus, 1, Plus, 1);
+ check(Plus, 0, NoSign, 0);
+ check(Minus, 1, Minus, 1);
+ check(NoSign, 1, NoSign, 0);
+}
+
+#[test]
+fn test_from_slice() {
+ fn check(inp_s: Sign, inp_n: u32, ans_s: Sign, ans_n: u32) {
+ let inp = BigInt::from_slice(inp_s, &[inp_n]);
+ let ans = BigInt {
+ sign: ans_s,
+ data: FromPrimitive::from_u32(ans_n).unwrap(),
+ };
+ assert_eq!(inp, ans);
+ }
+ check(Plus, 1, Plus, 1);
+ check(Plus, 0, NoSign, 0);
+ check(Minus, 1, Minus, 1);
+ check(NoSign, 1, NoSign, 0);
+}
+
+#[test]
+fn test_assign_from_slice() {
+ fn check(inp_s: Sign, inp_n: u32, ans_s: Sign, ans_n: u32) {
+ let mut inp = BigInt::from_slice(Minus, &[2627_u32, 0_u32, 9182_u32, 42_u32]);
+ inp.assign_from_slice(inp_s, &[inp_n]);
+ let ans = BigInt {
+ sign: ans_s,
+ data: FromPrimitive::from_u32(ans_n).unwrap(),
+ };
+ assert_eq!(inp, ans);
+ }
+ check(Plus, 1, Plus, 1);
+ check(Plus, 0, NoSign, 0);
+ check(Minus, 1, Minus, 1);
+ check(NoSign, 1, NoSign, 0);
+}
diff --git a/rust/vendor/num-bigint-0.2.6/src/bigrand.rs b/rust/vendor/num-bigint-0.2.6/src/bigrand.rs
new file mode 100644
index 0000000..69564dd
--- /dev/null
+++ b/rust/vendor/num-bigint-0.2.6/src/bigrand.rs
@@ -0,0 +1,223 @@
+//! Randomization of big integers
+
+use rand::distributions::uniform::{SampleUniform, UniformSampler};
+use rand::prelude::*;
+use rand::AsByteSliceMut;
+
+use BigInt;
+use BigUint;
+use Sign::*;
+
+use big_digit::BigDigit;
+use bigint::{into_magnitude, magnitude};
+
+use integer::Integer;
+use traits::Zero;
+
+/// A trait for sampling random big integers.
+///
+/// The `rand` feature must be enabled to use this. See crate-level documentation for details.
+pub trait RandBigInt {
+ /// Generate a random `BigUint` of the given bit size.
+ fn gen_biguint(&mut self, bit_size: usize) -> BigUint;
+
+ /// Generate a random BigInt of the given bit size.
+ fn gen_bigint(&mut self, bit_size: usize) -> BigInt;
+
+ /// Generate a random `BigUint` less than the given bound. Fails
+ /// when the bound is zero.
+ fn gen_biguint_below(&mut self, bound: &BigUint) -> BigUint;
+
+ /// Generate a random `BigUint` within the given range. The lower
+ /// bound is inclusive; the upper bound is exclusive. Fails when
+ /// the upper bound is not greater than the lower bound.
+ fn gen_biguint_range(&mut self, lbound: &BigUint, ubound: &BigUint) -> BigUint;
+
+ /// Generate a random `BigInt` within the given range. The lower
+ /// bound is inclusive; the upper bound is exclusive. Fails when
+ /// the upper bound is not greater than the lower bound.
+ fn gen_bigint_range(&mut self, lbound: &BigInt, ubound: &BigInt) -> BigInt;
+}
+
+impl<R: Rng + ?Sized> RandBigInt for R {
+ fn gen_biguint(&mut self, bit_size: usize) -> BigUint {
+ use super::big_digit::BITS;
+ let (digits, rem) = bit_size.div_rem(&BITS);
+ let mut data = vec![BigDigit::default(); digits + (rem > 0) as usize];
+ // `fill_bytes` is faster than many `gen::<u32>` calls
+ self.fill_bytes(data[..].as_byte_slice_mut());
+ // Swap bytes per the `Rng::fill` source. This might be
+ // unnecessary if reproducibility across architectures is not
+ // desired.
+ data.to_le();
+ if rem > 0 {
+ data[digits] >>= BITS - rem;
+ }
+ BigUint::new(data)
+ }
+
+ fn gen_bigint(&mut self, bit_size: usize) -> BigInt {
+ loop {
+ // Generate a random BigUint...
+ let biguint = self.gen_biguint(bit_size);
+ // ...and then randomly assign it a Sign...
+ let sign = if biguint.is_zero() {
+ // ...except that if the BigUint is zero, we need to try
+ // again with probability 0.5. This is because otherwise,
+ // the probability of generating a zero BigInt would be
+ // double that of any other number.
+ if self.gen() {
+ continue;
+ } else {
+ NoSign
+ }
+ } else if self.gen() {
+ Plus
+ } else {
+ Minus
+ };
+ return BigInt::from_biguint(sign, biguint);
+ }
+ }
+
+ fn gen_biguint_below(&mut self, bound: &BigUint) -> BigUint {
+ assert!(!bound.is_zero());
+ let bits = bound.bits();
+ loop {
+ let n = self.gen_biguint(bits);
+ if n < *bound {
+ return n;
+ }
+ }
+ }
+
+ fn gen_biguint_range(&mut self, lbound: &BigUint, ubound: &BigUint) -> BigUint {
+ assert!(*lbound < *ubound);
+ if lbound.is_zero() {
+ self.gen_biguint_below(ubound)
+ } else {
+ lbound + self.gen_biguint_below(&(ubound - lbound))
+ }
+ }
+
+ fn gen_bigint_range(&mut self, lbound: &BigInt, ubound: &BigInt) -> BigInt {
+ assert!(*lbound < *ubound);
+ if lbound.is_zero() {
+ BigInt::from(self.gen_biguint_below(magnitude(&ubound)))
+ } else if ubound.is_zero() {
+ lbound + BigInt::from(self.gen_biguint_below(magnitude(&lbound)))
+ } else {
+ let delta = ubound - lbound;
+ lbound + BigInt::from(self.gen_biguint_below(magnitude(&delta)))
+ }
+ }
+}
+
+/// The back-end implementing rand's `UniformSampler` for `BigUint`.
+#[derive(Clone, Debug)]
+pub struct UniformBigUint {
+ base: BigUint,
+ len: BigUint,
+}
+
+impl UniformSampler for UniformBigUint {
+ type X = BigUint;
+
+ #[inline]
+ fn new(low: Self::X, high: Self::X) -> Self {
+ assert!(low < high);
+ UniformBigUint {
+ len: high - &low,
+ base: low,
+ }
+ }
+
+ #[inline]
+ fn new_inclusive(low: Self::X, high: Self::X) -> Self {
+ assert!(low <= high);
+ Self::new(low, high + 1u32)
+ }
+
+ #[inline]
+ fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> Self::X {
+ &self.base + rng.gen_biguint_below(&self.len)
+ }
+
+ #[inline]
+ fn sample_single<R: Rng + ?Sized>(low: Self::X, high: Self::X, rng: &mut R) -> Self::X {
+ rng.gen_biguint_range(&low, &high)
+ }
+}
+
+impl SampleUniform for BigUint {
+ type Sampler = UniformBigUint;
+}
+
+/// The back-end implementing rand's `UniformSampler` for `BigInt`.
+#[derive(Clone, Debug)]
+pub struct UniformBigInt {
+ base: BigInt,
+ len: BigUint,
+}
+
+impl UniformSampler for UniformBigInt {
+ type X = BigInt;
+
+ #[inline]
+ fn new(low: Self::X, high: Self::X) -> Self {
+ assert!(low < high);
+ UniformBigInt {
+ len: into_magnitude(high - &low),
+ base: low,
+ }
+ }
+
+ #[inline]
+ fn new_inclusive(low: Self::X, high: Self::X) -> Self {
+ assert!(low <= high);
+ Self::new(low, high + 1u32)
+ }
+
+ #[inline]
+ fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> Self::X {
+ &self.base + BigInt::from(rng.gen_biguint_below(&self.len))
+ }
+
+ #[inline]
+ fn sample_single<R: Rng + ?Sized>(low: Self::X, high: Self::X, rng: &mut R) -> Self::X {
+ rng.gen_bigint_range(&low, &high)
+ }
+}
+
+impl SampleUniform for BigInt {
+ type Sampler = UniformBigInt;
+}
+
+/// A random distribution for `BigUint` and `BigInt` values of a particular bit size.
+///
+/// The `rand` feature must be enabled to use this. See crate-level documentation for details.
+#[derive(Clone, Copy, Debug)]
+pub struct RandomBits {
+ bits: usize,
+}
+
+impl RandomBits {
+ #[inline]
+ pub fn new(bits: usize) -> RandomBits {
+ RandomBits { bits }
+ }
+}
+
+impl Distribution<BigUint> for RandomBits {
+ #[inline]
+ fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> BigUint {
+ rng.gen_biguint(self.bits)
+ }
+}
+
+impl Distribution<BigInt> for RandomBits {
+ #[inline]
+ fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> BigInt {
+ rng.gen_bigint(self.bits)
+ }
+}
diff --git a/rust/vendor/num-bigint-0.2.6/src/biguint.rs b/rust/vendor/num-bigint-0.2.6/src/biguint.rs
new file mode 100644
index 0000000..6836342
--- /dev/null
+++ b/rust/vendor/num-bigint-0.2.6/src/biguint.rs
@@ -0,0 +1,3106 @@
+#[allow(deprecated, unused_imports)]
+use std::ascii::AsciiExt;
+use std::borrow::Cow;
+use std::cmp;
+use std::cmp::Ordering::{self, Equal, Greater, Less};
+use std::default::Default;
+use std::fmt;
+use std::iter::{Product, Sum};
+use std::mem;
+use std::ops::{
+ Add, AddAssign, BitAnd, BitAndAssign, BitOr, BitOrAssign, BitXor, BitXorAssign, Div, DivAssign,
+ Mul, MulAssign, Neg, Rem, RemAssign, Shl, ShlAssign, Shr, ShrAssign, Sub, SubAssign,
+};
+use std::str::{self, FromStr};
+use std::{f32, f64};
+use std::{u64, u8};
+
+#[cfg(feature = "serde")]
+use serde;
+
+use integer::{Integer, Roots};
+use traits::{
+ CheckedAdd, CheckedDiv, CheckedMul, CheckedSub, Float, FromPrimitive, Num, One, Pow,
+ ToPrimitive, Unsigned, Zero,
+};
+
+use big_digit::{self, BigDigit};
+
+#[path = "algorithms.rs"]
+mod algorithms;
+#[path = "monty.rs"]
+mod monty;
+
+use self::algorithms::{__add2, __sub2rev, add2, sub2, sub2rev};
+use self::algorithms::{biguint_shl, biguint_shr};
+use self::algorithms::{cmp_slice, fls, ilog2};
+use self::algorithms::{div_rem, div_rem_digit, div_rem_ref, rem_digit};
+use self::algorithms::{mac_with_carry, mul3, scalar_mul};
+use self::monty::monty_modpow;
+
+use UsizePromotion;
+
+use ParseBigIntError;
+
+#[cfg(feature = "quickcheck")]
+use quickcheck::{Arbitrary, Gen};
+
+/// A big unsigned integer type.
+#[derive(Clone, Debug, Hash)]
+pub struct BigUint {
+ data: Vec<BigDigit>,
+}
+
+#[cfg(feature = "quickcheck")]
+impl Arbitrary for BigUint {
+ fn arbitrary<G: Gen>(g: &mut G) -> Self {
+ // Use arbitrary from Vec
+ Self::new(Vec::<u32>::arbitrary(g))
+ }
+
+ #[allow(bare_trait_objects)] // `dyn` needs Rust 1.27 to parse, even when cfg-disabled
+ fn shrink(&self) -> Box<Iterator<Item = Self>> {
+ // Use shrinker from Vec
+ Box::new(self.data.shrink().map(BigUint::new))
+ }
+}
+
+impl PartialEq for BigUint {
+ #[inline]
+ fn eq(&self, other: &BigUint) -> bool {
+ match self.cmp(other) {
+ Equal => true,
+ _ => false,
+ }
+ }
+}
+impl Eq for BigUint {}
+
+impl PartialOrd for BigUint {
+ #[inline]
+ fn partial_cmp(&self, other: &BigUint) -> Option<Ordering> {
+ Some(self.cmp(other))
+ }
+}
+
+impl Ord for BigUint {
+ #[inline]
+ fn cmp(&self, other: &BigUint) -> Ordering {
+ cmp_slice(&self.data[..], &other.data[..])
+ }
+}
+
+impl Default for BigUint {
+ #[inline]
+ fn default() -> BigUint {
+ Zero::zero()
+ }
+}
+
+impl fmt::Display for BigUint {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ f.pad_integral(true, "", &self.to_str_radix(10))
+ }
+}
+
+impl fmt::LowerHex for BigUint {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ f.pad_integral(true, "0x", &self.to_str_radix(16))
+ }
+}
+
+impl fmt::UpperHex for BigUint {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ let mut s = self.to_str_radix(16);
+ s.make_ascii_uppercase();
+ f.pad_integral(true, "0x", &s)
+ }
+}
+
+impl fmt::Binary for BigUint {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ f.pad_integral(true, "0b", &self.to_str_radix(2))
+ }
+}
+
+impl fmt::Octal for BigUint {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ f.pad_integral(true, "0o", &self.to_str_radix(8))
+ }
+}
+
+impl FromStr for BigUint {
+ type Err = ParseBigIntError;
+
+ #[inline]
+ fn from_str(s: &str) -> Result<BigUint, ParseBigIntError> {
+ BigUint::from_str_radix(s, 10)
+ }
+}
+
+// Convert from a power of two radix (bits == ilog2(radix)) where bits evenly divides
+// BigDigit::BITS
+fn from_bitwise_digits_le(v: &[u8], bits: usize) -> BigUint {
+ debug_assert!(!v.is_empty() && bits <= 8 && big_digit::BITS % bits == 0);
+ debug_assert!(v.iter().all(|&c| BigDigit::from(c) < (1 << bits)));
+
+ let digits_per_big_digit = big_digit::BITS / bits;
+
+ let data = v
+ .chunks(digits_per_big_digit)
+ .map(|chunk| {
+ chunk
+ .iter()
+ .rev()
+ .fold(0, |acc, &c| (acc << bits) | BigDigit::from(c))
+ })
+ .collect();
+
+ BigUint::new(data)
+}
+
+// Convert from a power of two radix (bits == ilog2(radix)) where bits doesn't evenly divide
+// BigDigit::BITS
+fn from_inexact_bitwise_digits_le(v: &[u8], bits: usize) -> BigUint {
+ debug_assert!(!v.is_empty() && bits <= 8 && big_digit::BITS % bits != 0);
+ debug_assert!(v.iter().all(|&c| BigDigit::from(c) < (1 << bits)));
+
+ let big_digits = (v.len() * bits + big_digit::BITS - 1) / big_digit::BITS;
+ let mut data = Vec::with_capacity(big_digits);
+
+ let mut d = 0;
+ let mut dbits = 0; // number of bits we currently have in d
+
+ // walk v accumululating bits in d; whenever we accumulate big_digit::BITS in d, spit out a
+ // big_digit:
+ for &c in v {
+ d |= BigDigit::from(c) << dbits;
+ dbits += bits;
+
+ if dbits >= big_digit::BITS {
+ data.push(d);
+ dbits -= big_digit::BITS;
+ // if dbits was > big_digit::BITS, we dropped some of the bits in c (they couldn't fit
+ // in d) - grab the bits we lost here:
+ d = BigDigit::from(c) >> (bits - dbits);
+ }
+ }
+
+ if dbits > 0 {
+ debug_assert!(dbits < big_digit::BITS);
+ data.push(d as BigDigit);
+ }
+
+ BigUint::new(data)
+}
+
+// Read little-endian radix digits
+fn from_radix_digits_be(v: &[u8], radix: u32) -> BigUint {
+ debug_assert!(!v.is_empty() && !radix.is_power_of_two());
+ debug_assert!(v.iter().all(|&c| u32::from(c) < radix));
+
+ // Estimate how big the result will be, so we can pre-allocate it.
+ let bits = f64::from(radix).log2() * v.len() as f64;
+ let big_digits = (bits / big_digit::BITS as f64).ceil();
+ let mut data = Vec::with_capacity(big_digits as usize);
+
+ let (base, power) = get_radix_base(radix);
+ let radix = radix as BigDigit;
+
+ let r = v.len() % power;
+ let i = if r == 0 { power } else { r };
+ let (head, tail) = v.split_at(i);
+
+ let first = head
+ .iter()
+ .fold(0, |acc, &d| acc * radix + BigDigit::from(d));
+ data.push(first);
+
+ debug_assert!(tail.len() % power == 0);
+ for chunk in tail.chunks(power) {
+ if data.last() != Some(&0) {
+ data.push(0);
+ }
+
+ let mut carry = 0;
+ for d in data.iter_mut() {
+ *d = mac_with_carry(0, *d, base, &mut carry);
+ }
+ debug_assert!(carry == 0);
+
+ let n = chunk
+ .iter()
+ .fold(0, |acc, &d| acc * radix + BigDigit::from(d));
+ add2(&mut data, &[n]);
+ }
+
+ BigUint::new(data)
+}
+
+impl Num for BigUint {
+ type FromStrRadixErr = ParseBigIntError;
+
+ /// Creates and initializes a `BigUint`.
+ fn from_str_radix(s: &str, radix: u32) -> Result<BigUint, ParseBigIntError> {
+ assert!(2 <= radix && radix <= 36, "The radix must be within 2...36");
+ let mut s = s;
+ if s.starts_with('+') {
+ let tail = &s[1..];
+ if !tail.starts_with('+') {
+ s = tail
+ }
+ }
+
+ if s.is_empty() {
+ return Err(ParseBigIntError::empty());
+ }
+
+ if s.starts_with('_') {
+ // Must lead with a real digit!
+ return Err(ParseBigIntError::invalid());
+ }
+
+ // First normalize all characters to plain digit values
+ let mut v = Vec::with_capacity(s.len());
+ for b in s.bytes() {
+ #[allow(unknown_lints, ellipsis_inclusive_range_patterns)]
+ let d = match b {
+ b'0'...b'9' => b - b'0',
+ b'a'...b'z' => b - b'a' + 10,
+ b'A'...b'Z' => b - b'A' + 10,
+ b'_' => continue,
+ _ => u8::MAX,
+ };
+ if d < radix as u8 {
+ v.push(d);
+ } else {
+ return Err(ParseBigIntError::invalid());
+ }
+ }
+
+ let res = if radix.is_power_of_two() {
+ // Powers of two can use bitwise masks and shifting instead of multiplication
+ let bits = ilog2(radix);
+ v.reverse();
+ if big_digit::BITS % bits == 0 {
+ from_bitwise_digits_le(&v, bits)
+ } else {
+ from_inexact_bitwise_digits_le(&v, bits)
+ }
+ } else {
+ from_radix_digits_be(&v, radix)
+ };
+ Ok(res)
+ }
+}
+
+forward_val_val_binop!(impl BitAnd for BigUint, bitand);
+forward_ref_val_binop!(impl BitAnd for BigUint, bitand);
+
+// do not use forward_ref_ref_binop_commutative! for bitand so that we can
+// clone the smaller value rather than the larger, avoiding over-allocation
+impl<'a, 'b> BitAnd<&'b BigUint> for &'a BigUint {
+ type Output = BigUint;
+
+ #[inline]
+ fn bitand(self, other: &BigUint) -> BigUint {
+ // forward to val-ref, choosing the smaller to clone
+ if self.data.len() <= other.data.len() {
+ self.clone() & other
+ } else {
+ other.clone() & self
+ }
+ }
+}
+
+forward_val_assign!(impl BitAndAssign for BigUint, bitand_assign);
+
+impl<'a> BitAnd<&'a BigUint> for BigUint {
+ type Output = BigUint;
+
+ #[inline]
+ fn bitand(mut self, other: &BigUint) -> BigUint {
+ self &= other;
+ self
+ }
+}
+impl<'a> BitAndAssign<&'a BigUint> for BigUint {
+ #[inline]
+ fn bitand_assign(&mut self, other: &BigUint) {
+ for (ai, &bi) in self.data.iter_mut().zip(other.data.iter()) {
+ *ai &= bi;
+ }
+ self.data.truncate(other.data.len());
+ self.normalize();
+ }
+}
+
+forward_all_binop_to_val_ref_commutative!(impl BitOr for BigUint, bitor);
+forward_val_assign!(impl BitOrAssign for BigUint, bitor_assign);
+
+impl<'a> BitOr<&'a BigUint> for BigUint {
+ type Output = BigUint;
+
+ fn bitor(mut self, other: &BigUint) -> BigUint {
+ self |= other;
+ self
+ }
+}
+impl<'a> BitOrAssign<&'a BigUint> for BigUint {
+ #[inline]
+ fn bitor_assign(&mut self, other: &BigUint) {
+ for (ai, &bi) in self.data.iter_mut().zip(other.data.iter()) {
+ *ai |= bi;
+ }
+ if other.data.len() > self.data.len() {
+ let extra = &other.data[self.data.len()..];
+ self.data.extend(extra.iter().cloned());
+ }
+ }
+}
+
+forward_all_binop_to_val_ref_commutative!(impl BitXor for BigUint, bitxor);
+forward_val_assign!(impl BitXorAssign for BigUint, bitxor_assign);
+
+impl<'a> BitXor<&'a BigUint> for BigUint {
+ type Output = BigUint;
+
+ fn bitxor(mut self, other: &BigUint) -> BigUint {
+ self ^= other;
+ self
+ }
+}
+impl<'a> BitXorAssign<&'a BigUint> for BigUint {
+ #[inline]
+ fn bitxor_assign(&mut self, other: &BigUint) {
+ for (ai, &bi) in self.data.iter_mut().zip(other.data.iter()) {
+ *ai ^= bi;
+ }
+ if other.data.len() > self.data.len() {
+ let extra = &other.data[self.data.len()..];
+ self.data.extend(extra.iter().cloned());
+ }
+ self.normalize();
+ }
+}
+
+impl Shl<usize> for BigUint {
+ type Output = BigUint;
+
+ #[inline]
+ fn shl(self, rhs: usize) -> BigUint {
+ biguint_shl(Cow::Owned(self), rhs)
+ }
+}
+impl<'a> Shl<usize> for &'a BigUint {
+ type Output = BigUint;
+
+ #[inline]
+ fn shl(self, rhs: usize) -> BigUint {
+ biguint_shl(Cow::Borrowed(self), rhs)
+ }
+}
+
+impl ShlAssign<usize> for BigUint {
+ #[inline]
+ fn shl_assign(&mut self, rhs: usize) {
+ let n = mem::replace(self, BigUint::zero());
+ *self = n << rhs;
+ }
+}
+
+impl Shr<usize> for BigUint {
+ type Output = BigUint;
+
+ #[inline]
+ fn shr(self, rhs: usize) -> BigUint {
+ biguint_shr(Cow::Owned(self), rhs)
+ }
+}
+impl<'a> Shr<usize> for &'a BigUint {
+ type Output = BigUint;
+
+ #[inline]
+ fn shr(self, rhs: usize) -> BigUint {
+ biguint_shr(Cow::Borrowed(self), rhs)
+ }
+}
+
+impl ShrAssign<usize> for BigUint {
+ #[inline]
+ fn shr_assign(&mut self, rhs: usize) {
+ let n = mem::replace(self, BigUint::zero());
+ *self = n >> rhs;
+ }
+}
+
+impl Zero for BigUint {
+ #[inline]
+ fn zero() -> BigUint {
+ BigUint::new(Vec::new())
+ }
+
+ #[inline]
+ fn set_zero(&mut self) {
+ self.data.clear();
+ }
+
+ #[inline]
+ fn is_zero(&self) -> bool {
+ self.data.is_empty()
+ }
+}
+
+impl One for BigUint {
+ #[inline]
+ fn one() -> BigUint {
+ BigUint::new(vec![1])
+ }
+
+ #[inline]
+ fn set_one(&mut self) {
+ self.data.clear();
+ self.data.push(1);
+ }
+
+ #[inline]
+ fn is_one(&self) -> bool {
+ self.data[..] == [1]
+ }
+}
+
+impl Unsigned for BigUint {}
+
+impl<'a> Pow<BigUint> for &'a BigUint {
+ type Output = BigUint;
+
+ #[inline]
+ fn pow(self, exp: BigUint) -> Self::Output {
+ self.pow(&exp)
+ }
+}
+
+impl<'a, 'b> Pow<&'b BigUint> for &'a BigUint {
+ type Output = BigUint;
+
+ #[inline]
+ fn pow(self, exp: &BigUint) -> Self::Output {
+ if self.is_one() || exp.is_zero() {
+ BigUint::one()
+ } else if self.is_zero() {
+ BigUint::zero()
+ } else if let Some(exp) = exp.to_u64() {
+ self.pow(exp)
+ } else {
+ // At this point, `self >= 2` and `exp >= 2⁶⁴`. The smallest possible result
+ // given `2.pow(2⁶⁴)` would take 2.3 exabytes of memory!
+ panic!("memory overflow")
+ }
+ }
+}
+
+macro_rules! pow_impl {
+ ($T:ty) => {
+ impl<'a> Pow<$T> for &'a BigUint {
+ type Output = BigUint;
+
+ #[inline]
+ fn pow(self, mut exp: $T) -> Self::Output {
+ if exp == 0 {
+ return BigUint::one();
+ }
+ let mut base = self.clone();
+
+ while exp & 1 == 0 {
+ base = &base * &base;
+ exp >>= 1;
+ }
+
+ if exp == 1 {
+ return base;
+ }
+
+ let mut acc = base.clone();
+ while exp > 1 {
+ exp >>= 1;
+ base = &base * &base;
+ if exp & 1 == 1 {
+ acc = &acc * &base;
+ }
+ }
+ acc
+ }
+ }
+
+ impl<'a, 'b> Pow<&'b $T> for &'a BigUint {
+ type Output = BigUint;
+
+ #[inline]
+ fn pow(self, exp: &$T) -> Self::Output {
+ self.pow(*exp)
+ }
+ }
+ };
+}
+
+pow_impl!(u8);
+pow_impl!(u16);
+pow_impl!(u32);
+pow_impl!(u64);
+pow_impl!(usize);
+#[cfg(has_i128)]
+pow_impl!(u128);
+
+forward_all_binop_to_val_ref_commutative!(impl Add for BigUint, add);
+forward_val_assign!(impl AddAssign for BigUint, add_assign);
+
+impl<'a> Add<&'a BigUint> for BigUint {
+ type Output = BigUint;
+
+ fn add(mut self, other: &BigUint) -> BigUint {
+ self += other;
+ self
+ }
+}
+impl<'a> AddAssign<&'a BigUint> for BigUint {
+ #[inline]
+ fn add_assign(&mut self, other: &BigUint) {
+ let self_len = self.data.len();
+ let carry = if self_len < other.data.len() {
+ let lo_carry = __add2(&mut self.data[..], &other.data[..self_len]);
+ self.data.extend_from_slice(&other.data[self_len..]);
+ __add2(&mut self.data[self_len..], &[lo_carry])
+ } else {
+ __add2(&mut self.data[..], &other.data[..])
+ };
+ if carry != 0 {
+ self.data.push(carry);
+ }
+ }
+}
+
+promote_unsigned_scalars!(impl Add for BigUint, add);
+promote_unsigned_scalars_assign!(impl AddAssign for BigUint, add_assign);
+forward_all_scalar_binop_to_val_val_commutative!(impl Add<u32> for BigUint, add);
+forward_all_scalar_binop_to_val_val_commutative!(impl Add<u64> for BigUint, add);
+#[cfg(has_i128)]
+forward_all_scalar_binop_to_val_val_commutative!(impl Add<u128> for BigUint, add);
+
+impl Add<u32> for BigUint {
+ type Output = BigUint;
+
+ #[inline]
+ fn add(mut self, other: u32) -> BigUint {
+ self += other;
+ self
+ }
+}
+
+impl AddAssign<u32> for BigUint {
+ #[inline]
+ fn add_assign(&mut self, other: u32) {
+ if other != 0 {
+ if self.data.is_empty() {
+ self.data.push(0);
+ }
+
+ let carry = __add2(&mut self.data, &[other as BigDigit]);
+ if carry != 0 {
+ self.data.push(carry);
+ }
+ }
+ }
+}
+
+impl Add<u64> for BigUint {
+ type Output = BigUint;
+
+ #[inline]
+ fn add(mut self, other: u64) -> BigUint {
+ self += other;
+ self
+ }
+}
+
+impl AddAssign<u64> for BigUint {
+ #[inline]
+ fn add_assign(&mut self, other: u64) {
+ let (hi, lo) = big_digit::from_doublebigdigit(other);
+ if hi == 0 {
+ *self += lo;
+ } else {
+ while self.data.len() < 2 {
+ self.data.push(0);
+ }
+
+ let carry = __add2(&mut self.data, &[lo, hi]);
+ if carry != 0 {
+ self.data.push(carry);
+ }
+ }
+ }
+}
+
+#[cfg(has_i128)]
+impl Add<u128> for BigUint {
+ type Output = BigUint;
+
+ #[inline]
+ fn add(mut self, other: u128) -> BigUint {
+ self += other;
+ self
+ }
+}
+
+#[cfg(has_i128)]
+impl AddAssign<u128> for BigUint {
+ #[inline]
+ fn add_assign(&mut self, other: u128) {
+ if other <= u128::from(u64::max_value()) {
+ *self += other as u64
+ } else {
+ let (a, b, c, d) = u32_from_u128(other);
+ let carry = if a > 0 {
+ while self.data.len() < 4 {
+ self.data.push(0);
+ }
+ __add2(&mut self.data, &[d, c, b, a])
+ } else {
+ debug_assert!(b > 0);
+ while self.data.len() < 3 {
+ self.data.push(0);
+ }
+ __add2(&mut self.data, &[d, c, b])
+ };
+
+ if carry != 0 {
+ self.data.push(carry);
+ }
+ }
+ }
+}
+
+forward_val_val_binop!(impl Sub for BigUint, sub);
+forward_ref_ref_binop!(impl Sub for BigUint, sub);
+forward_val_assign!(impl SubAssign for BigUint, sub_assign);
+
+impl<'a> Sub<&'a BigUint> for BigUint {
+ type Output = BigUint;
+
+ fn sub(mut self, other: &BigUint) -> BigUint {
+ self -= other;
+ self
+ }
+}
+impl<'a> SubAssign<&'a BigUint> for BigUint {
+ fn sub_assign(&mut self, other: &'a BigUint) {
+ sub2(&mut self.data[..], &other.data[..]);
+ self.normalize();
+ }
+}
+
+impl<'a> Sub<BigUint> for &'a BigUint {
+ type Output = BigUint;
+
+ fn sub(self, mut other: BigUint) -> BigUint {
+ let other_len = other.data.len();
+ if other_len < self.data.len() {
+ let lo_borrow = __sub2rev(&self.data[..other_len], &mut other.data);
+ other.data.extend_from_slice(&self.data[other_len..]);
+ if lo_borrow != 0 {
+ sub2(&mut other.data[other_len..], &[1])
+ }
+ } else {
+ sub2rev(&self.data[..], &mut other.data[..]);
+ }
+ other.normalized()
+ }
+}
+
+promote_unsigned_scalars!(impl Sub for BigUint, sub);
+promote_unsigned_scalars_assign!(impl SubAssign for BigUint, sub_assign);
+forward_all_scalar_binop_to_val_val!(impl Sub<u32> for BigUint, sub);
+forward_all_scalar_binop_to_val_val!(impl Sub<u64> for BigUint, sub);
+#[cfg(has_i128)]
+forward_all_scalar_binop_to_val_val!(impl Sub<u128> for BigUint, sub);
+
+impl Sub<u32> for BigUint {
+ type Output = BigUint;
+
+ #[inline]
+ fn sub(mut self, other: u32) -> BigUint {
+ self -= other;
+ self
+ }
+}
+impl SubAssign<u32> for BigUint {
+ fn sub_assign(&mut self, other: u32) {
+ sub2(&mut self.data[..], &[other as BigDigit]);
+ self.normalize();
+ }
+}
+
+impl Sub<BigUint> for u32 {
+ type Output = BigUint;
+
+ #[inline]
+ fn sub(self, mut other: BigUint) -> BigUint {
+ if other.data.is_empty() {
+ other.data.push(self as BigDigit);
+ } else {
+ sub2rev(&[self as BigDigit], &mut other.data[..]);
+ }
+ other.normalized()
+ }
+}
+
+impl Sub<u64> for BigUint {
+ type Output = BigUint;
+
+ #[inline]
+ fn sub(mut self, other: u64) -> BigUint {
+ self -= other;
+ self
+ }
+}
+
+impl SubAssign<u64> for BigUint {
+ #[inline]
+ fn sub_assign(&mut self, other: u64) {
+ let (hi, lo) = big_digit::from_doublebigdigit(other);
+ sub2(&mut self.data[..], &[lo, hi]);
+ self.normalize();
+ }
+}
+
+impl Sub<BigUint> for u64 {
+ type Output = BigUint;
+
+ #[inline]
+ fn sub(self, mut other: BigUint) -> BigUint {
+ while other.data.len() < 2 {
+ other.data.push(0);
+ }
+
+ let (hi, lo) = big_digit::from_doublebigdigit(self);
+ sub2rev(&[lo, hi], &mut other.data[..]);
+ other.normalized()
+ }
+}
+
+#[cfg(has_i128)]
+impl Sub<u128> for BigUint {
+ type Output = BigUint;
+
+ #[inline]
+ fn sub(mut self, other: u128) -> BigUint {
+ self -= other;
+ self
+ }
+}
+#[cfg(has_i128)]
+impl SubAssign<u128> for BigUint {
+ fn sub_assign(&mut self, other: u128) {
+ let (a, b, c, d) = u32_from_u128(other);
+ sub2(&mut self.data[..], &[d, c, b, a]);
+ self.normalize();
+ }
+}
+
+#[cfg(has_i128)]
+impl Sub<BigUint> for u128 {
+ type Output = BigUint;
+
+ #[inline]
+ fn sub(self, mut other: BigUint) -> BigUint {
+ while other.data.len() < 4 {
+ other.data.push(0);
+ }
+
+ let (a, b, c, d) = u32_from_u128(self);
+ sub2rev(&[d, c, b, a], &mut other.data[..]);
+ other.normalized()
+ }
+}
+
+forward_all_binop_to_ref_ref!(impl Mul for BigUint, mul);
+forward_val_assign!(impl MulAssign for BigUint, mul_assign);
+
+impl<'a, 'b> Mul<&'b BigUint> for &'a BigUint {
+ type Output = BigUint;
+
+ #[inline]
+ fn mul(self, other: &BigUint) -> BigUint {
+ mul3(&self.data[..], &other.data[..])
+ }
+}
+impl<'a> MulAssign<&'a BigUint> for BigUint {
+ #[inline]
+ fn mul_assign(&mut self, other: &'a BigUint) {
+ *self = &*self * other
+ }
+}
+
+promote_unsigned_scalars!(impl Mul for BigUint, mul);
+promote_unsigned_scalars_assign!(impl MulAssign for BigUint, mul_assign);
+forward_all_scalar_binop_to_val_val_commutative!(impl Mul<u32> for BigUint, mul);
+forward_all_scalar_binop_to_val_val_commutative!(impl Mul<u64> for BigUint, mul);
+#[cfg(has_i128)]
+forward_all_scalar_binop_to_val_val_commutative!(impl Mul<u128> for BigUint, mul);
+
+impl Mul<u32> for BigUint {
+ type Output = BigUint;
+
+ #[inline]
+ fn mul(mut self, other: u32) -> BigUint {
+ self *= other;
+ self
+ }
+}
+impl MulAssign<u32> for BigUint {
+ #[inline]
+ fn mul_assign(&mut self, other: u32) {
+ if other == 0 {
+ self.data.clear();
+ } else {
+ let carry = scalar_mul(&mut self.data[..], other as BigDigit);
+ if carry != 0 {
+ self.data.push(carry);
+ }
+ }
+ }
+}
+
+impl Mul<u64> for BigUint {
+ type Output = BigUint;
+
+ #[inline]
+ fn mul(mut self, other: u64) -> BigUint {
+ self *= other;
+ self
+ }
+}
+impl MulAssign<u64> for BigUint {
+ #[inline]
+ fn mul_assign(&mut self, other: u64) {
+ if other == 0 {
+ self.data.clear();
+ } else if other <= u64::from(BigDigit::max_value()) {
+ *self *= other as BigDigit
+ } else {
+ let (hi, lo) = big_digit::from_doublebigdigit(other);
+ *self = mul3(&self.data[..], &[lo, hi])
+ }
+ }
+}
+
+#[cfg(has_i128)]
+impl Mul<u128> for BigUint {
+ type Output = BigUint;
+
+ #[inline]
+ fn mul(mut self, other: u128) -> BigUint {
+ self *= other;
+ self
+ }
+}
+#[cfg(has_i128)]
+impl MulAssign<u128> for BigUint {
+ #[inline]
+ fn mul_assign(&mut self, other: u128) {
+ if other == 0 {
+ self.data.clear();
+ } else if other <= u128::from(BigDigit::max_value()) {
+ *self *= other as BigDigit
+ } else {
+ let (a, b, c, d) = u32_from_u128(other);
+ *self = mul3(&self.data[..], &[d, c, b, a])
+ }
+ }
+}
+
+forward_val_ref_binop!(impl Div for BigUint, div);
+forward_ref_val_binop!(impl Div for BigUint, div);
+forward_val_assign!(impl DivAssign for BigUint, div_assign);
+
+impl Div<BigUint> for BigUint {
+ type Output = BigUint;
+
+ #[inline]
+ fn div(self, other: BigUint) -> BigUint {
+ let (q, _) = div_rem(self, other);
+ q
+ }
+}
+
+impl<'a, 'b> Div<&'b BigUint> for &'a BigUint {
+ type Output = BigUint;
+
+ #[inline]
+ fn div(self, other: &BigUint) -> BigUint {
+ let (q, _) = self.div_rem(other);
+ q
+ }
+}
+impl<'a> DivAssign<&'a BigUint> for BigUint {
+ #[inline]
+ fn div_assign(&mut self, other: &'a BigUint) {
+ *self = &*self / other;
+ }
+}
+
+promote_unsigned_scalars!(impl Div for BigUint, div);
+promote_unsigned_scalars_assign!(impl DivAssign for BigUint, div_assign);
+forward_all_scalar_binop_to_val_val!(impl Div<u32> for BigUint, div);
+forward_all_scalar_binop_to_val_val!(impl Div<u64> for BigUint, div);
+#[cfg(has_i128)]
+forward_all_scalar_binop_to_val_val!(impl Div<u128> for BigUint, div);
+
+impl Div<u32> for BigUint {
+ type Output = BigUint;
+
+ #[inline]
+ fn div(self, other: u32) -> BigUint {
+ let (q, _) = div_rem_digit(self, other as BigDigit);
+ q
+ }
+}
+impl DivAssign<u32> for BigUint {
+ #[inline]
+ fn div_assign(&mut self, other: u32) {
+ *self = &*self / other;
+ }
+}
+
+impl Div<BigUint> for u32 {
+ type Output = BigUint;
+
+ #[inline]
+ fn div(self, other: BigUint) -> BigUint {
+ match other.data.len() {
+ 0 => panic!(),
+ 1 => From::from(self as BigDigit / other.data[0]),
+ _ => Zero::zero(),
+ }
+ }
+}
+
+impl Div<u64> for BigUint {
+ type Output = BigUint;
+
+ #[inline]
+ fn div(self, other: u64) -> BigUint {
+ let (q, _) = div_rem(self, From::from(other));
+ q
+ }
+}
+impl DivAssign<u64> for BigUint {
+ #[inline]
+ fn div_assign(&mut self, other: u64) {
+ // a vec of size 0 does not allocate, so this is fairly cheap
+ let temp = mem::replace(self, Zero::zero());
+ *self = temp / other;
+ }
+}
+
+impl Div<BigUint> for u64 {
+ type Output = BigUint;
+
+ #[inline]
+ fn div(self, other: BigUint) -> BigUint {
+ match other.data.len() {
+ 0 => panic!(),
+ 1 => From::from(self / u64::from(other.data[0])),
+ 2 => From::from(self / big_digit::to_doublebigdigit(other.data[1], other.data[0])),
+ _ => Zero::zero(),
+ }
+ }
+}
+
+#[cfg(has_i128)]
+impl Div<u128> for BigUint {
+ type Output = BigUint;
+
+ #[inline]
+ fn div(self, other: u128) -> BigUint {
+ let (q, _) = div_rem(self, From::from(other));
+ q
+ }
+}
+#[cfg(has_i128)]
+impl DivAssign<u128> for BigUint {
+ #[inline]
+ fn div_assign(&mut self, other: u128) {
+ *self = &*self / other;
+ }
+}
+
+#[cfg(has_i128)]
+impl Div<BigUint> for u128 {
+ type Output = BigUint;
+
+ #[inline]
+ fn div(self, other: BigUint) -> BigUint {
+ match other.data.len() {
+ 0 => panic!(),
+ 1 => From::from(self / u128::from(other.data[0])),
+ 2 => From::from(
+ self / u128::from(big_digit::to_doublebigdigit(other.data[1], other.data[0])),
+ ),
+ 3 => From::from(self / u32_to_u128(0, other.data[2], other.data[1], other.data[0])),
+ 4 => From::from(
+ self / u32_to_u128(other.data[3], other.data[2], other.data[1], other.data[0]),
+ ),
+ _ => Zero::zero(),
+ }
+ }
+}
+
+forward_val_ref_binop!(impl Rem for BigUint, rem);
+forward_ref_val_binop!(impl Rem for BigUint, rem);
+forward_val_assign!(impl RemAssign for BigUint, rem_assign);
+
+impl Rem<BigUint> for BigUint {
+ type Output = BigUint;
+
+ #[inline]
+ fn rem(self, other: BigUint) -> BigUint {
+ let (_, r) = div_rem(self, other);
+ r
+ }
+}
+
+impl<'a, 'b> Rem<&'b BigUint> for &'a BigUint {
+ type Output = BigUint;
+
+ #[inline]
+ fn rem(self, other: &BigUint) -> BigUint {
+ let (_, r) = self.div_rem(other);
+ r
+ }
+}
+impl<'a> RemAssign<&'a BigUint> for BigUint {
+ #[inline]
+ fn rem_assign(&mut self, other: &BigUint) {
+ *self = &*self % other;
+ }
+}
+
+promote_unsigned_scalars!(impl Rem for BigUint, rem);
+promote_unsigned_scalars_assign!(impl RemAssign for BigUint, rem_assign);
+forward_all_scalar_binop_to_ref_val!(impl Rem<u32> for BigUint, rem);
+forward_all_scalar_binop_to_val_val!(impl Rem<u64> for BigUint, rem);
+#[cfg(has_i128)]
+forward_all_scalar_binop_to_val_val!(impl Rem<u128> for BigUint, rem);
+
+impl<'a> Rem<u32> for &'a BigUint {
+ type Output = BigUint;
+
+ #[inline]
+ fn rem(self, other: u32) -> BigUint {
+ From::from(rem_digit(self, other as BigDigit))
+ }
+}
+impl RemAssign<u32> for BigUint {
+ #[inline]
+ fn rem_assign(&mut self, other: u32) {
+ *self = &*self % other;
+ }
+}
+
+impl<'a> Rem<&'a BigUint> for u32 {
+ type Output = BigUint;
+
+ #[inline]
+ fn rem(mut self, other: &'a BigUint) -> BigUint {
+ self %= other;
+ From::from(self)
+ }
+}
+
+macro_rules! impl_rem_assign_scalar {
+ ($scalar:ty, $to_scalar:ident) => {
+ forward_val_assign_scalar!(impl RemAssign for BigUint, $scalar, rem_assign);
+ impl<'a> RemAssign<&'a BigUint> for $scalar {
+ #[inline]
+ fn rem_assign(&mut self, other: &BigUint) {
+ *self = match other.$to_scalar() {
+ None => *self,
+ Some(0) => panic!(),
+ Some(v) => *self % v
+ };
+ }
+ }
+ }
+}
+// we can scalar %= BigUint for any scalar, including signed types
+#[cfg(has_i128)]
+impl_rem_assign_scalar!(u128, to_u128);
+impl_rem_assign_scalar!(usize, to_usize);
+impl_rem_assign_scalar!(u64, to_u64);
+impl_rem_assign_scalar!(u32, to_u32);
+impl_rem_assign_scalar!(u16, to_u16);
+impl_rem_assign_scalar!(u8, to_u8);
+#[cfg(has_i128)]
+impl_rem_assign_scalar!(i128, to_i128);
+impl_rem_assign_scalar!(isize, to_isize);
+impl_rem_assign_scalar!(i64, to_i64);
+impl_rem_assign_scalar!(i32, to_i32);
+impl_rem_assign_scalar!(i16, to_i16);
+impl_rem_assign_scalar!(i8, to_i8);
+
+impl Rem<u64> for BigUint {
+ type Output = BigUint;
+
+ #[inline]
+ fn rem(self, other: u64) -> BigUint {
+ let (_, r) = div_rem(self, From::from(other));
+ r
+ }
+}
+impl RemAssign<u64> for BigUint {
+ #[inline]
+ fn rem_assign(&mut self, other: u64) {
+ *self = &*self % other;
+ }
+}
+
+impl Rem<BigUint> for u64 {
+ type Output = BigUint;
+
+ #[inline]
+ fn rem(mut self, other: BigUint) -> BigUint {
+ self %= other;
+ From::from(self)
+ }
+}
+
+#[cfg(has_i128)]
+impl Rem<u128> for BigUint {
+ type Output = BigUint;
+
+ #[inline]
+ fn rem(self, other: u128) -> BigUint {
+ let (_, r) = div_rem(self, From::from(other));
+ r
+ }
+}
+#[cfg(has_i128)]
+impl RemAssign<u128> for BigUint {
+ #[inline]
+ fn rem_assign(&mut self, other: u128) {
+ *self = &*self % other;
+ }
+}
+
+#[cfg(has_i128)]
+impl Rem<BigUint> for u128 {
+ type Output = BigUint;
+
+ #[inline]
+ fn rem(mut self, other: BigUint) -> BigUint {
+ self %= other;
+ From::from(self)
+ }
+}
+
+impl Neg for BigUint {
+ type Output = BigUint;
+
+ #[inline]
+ fn neg(self) -> BigUint {
+ panic!()
+ }
+}
+
+impl<'a> Neg for &'a BigUint {
+ type Output = BigUint;
+
+ #[inline]
+ fn neg(self) -> BigUint {
+ panic!()
+ }
+}
+
+impl CheckedAdd for BigUint {
+ #[inline]
+ fn checked_add(&self, v: &BigUint) -> Option<BigUint> {
+ Some(self.add(v))
+ }
+}
+
+impl CheckedSub for BigUint {
+ #[inline]
+ fn checked_sub(&self, v: &BigUint) -> Option<BigUint> {
+ match self.cmp(v) {
+ Less => None,
+ Equal => Some(Zero::zero()),
+ Greater => Some(self.sub(v)),
+ }
+ }
+}
+
+impl CheckedMul for BigUint {
+ #[inline]
+ fn checked_mul(&self, v: &BigUint) -> Option<BigUint> {
+ Some(self.mul(v))
+ }
+}
+
+impl CheckedDiv for BigUint {
+ #[inline]
+ fn checked_div(&self, v: &BigUint) -> Option<BigUint> {
+ if v.is_zero() {
+ return None;
+ }
+ Some(self.div(v))
+ }
+}
+
+impl Integer for BigUint {
+ #[inline]
+ fn div_rem(&self, other: &BigUint) -> (BigUint, BigUint) {
+ div_rem_ref(self, other)
+ }
+
+ #[inline]
+ fn div_floor(&self, other: &BigUint) -> BigUint {
+ let (d, _) = div_rem_ref(self, other);
+ d
+ }
+
+ #[inline]
+ fn mod_floor(&self, other: &BigUint) -> BigUint {
+ let (_, m) = div_rem_ref(self, other);
+ m
+ }
+
+ #[inline]
+ fn div_mod_floor(&self, other: &BigUint) -> (BigUint, BigUint) {
+ div_rem_ref(self, other)
+ }
+
+ /// Calculates the Greatest Common Divisor (GCD) of the number and `other`.
+ ///
+ /// The result is always positive.
+ #[inline]
+ fn gcd(&self, other: &Self) -> Self {
+ #[inline]
+ fn twos(x: &BigUint) -> usize {
+ trailing_zeros(x).unwrap_or(0)
+ }
+
+ // Stein's algorithm
+ if self.is_zero() {
+ return other.clone();
+ }
+ if other.is_zero() {
+ return self.clone();
+ }
+ let mut m = self.clone();
+ let mut n = other.clone();
+
+ // find common factors of 2
+ let shift = cmp::min(twos(&n), twos(&m));
+
+ // divide m and n by 2 until odd
+ // m inside loop
+ n >>= twos(&n);
+
+ while !m.is_zero() {
+ m >>= twos(&m);
+ if n > m {
+ mem::swap(&mut n, &mut m)
+ }
+ m -= &n;
+ }
+
+ n << shift
+ }
+
+ /// Calculates the Lowest Common Multiple (LCM) of the number and `other`.
+ #[inline]
+ fn lcm(&self, other: &BigUint) -> BigUint {
+ if self.is_zero() && other.is_zero() {
+ Self::zero()
+ } else {
+ self / self.gcd(other) * other
+ }
+ }
+
+ /// Deprecated, use `is_multiple_of` instead.
+ #[inline]
+ fn divides(&self, other: &BigUint) -> bool {
+ self.is_multiple_of(other)
+ }
+
+ /// Returns `true` if the number is a multiple of `other`.
+ #[inline]
+ fn is_multiple_of(&self, other: &BigUint) -> bool {
+ (self % other).is_zero()
+ }
+
+ /// Returns `true` if the number is divisible by `2`.
+ #[inline]
+ fn is_even(&self) -> bool {
+ // Considering only the last digit.
+ match self.data.first() {
+ Some(x) => x.is_even(),
+ None => true,
+ }
+ }
+
+ /// Returns `true` if the number is not divisible by `2`.
+ #[inline]
+ fn is_odd(&self) -> bool {
+ !self.is_even()
+ }
+}
+
+#[inline]
+fn fixpoint<F>(mut x: BigUint, max_bits: usize, f: F) -> BigUint
+where
+ F: Fn(&BigUint) -> BigUint,
+{
+ let mut xn = f(&x);
+
+ // If the value increased, then the initial guess must have been low.
+ // Repeat until we reverse course.
+ while x < xn {
+ // Sometimes an increase will go way too far, especially with large
+ // powers, and then take a long time to walk back. We know an upper
+ // bound based on bit size, so saturate on that.
+ x = if xn.bits() > max_bits {
+ BigUint::one() << max_bits
+ } else {
+ xn
+ };
+ xn = f(&x);
+ }
+
+ // Now keep repeating while the estimate is decreasing.
+ while x > xn {
+ x = xn;
+ xn = f(&x);
+ }
+ x
+}
+
+impl Roots for BigUint {
+ // nth_root, sqrt and cbrt use Newton's method to compute
+ // principal root of a given degree for a given integer.
+
+ // Reference:
+ // Brent & Zimmermann, Modern Computer Arithmetic, v0.5.9, Algorithm 1.14
+ fn nth_root(&self, n: u32) -> Self {
+ assert!(n > 0, "root degree n must be at least 1");
+
+ if self.is_zero() || self.is_one() {
+ return self.clone();
+ }
+
+ match n {
+ // Optimize for small n
+ 1 => return self.clone(),
+ 2 => return self.sqrt(),
+ 3 => return self.cbrt(),
+ _ => (),
+ }
+
+ // The root of non-zero values less than 2ⁿ can only be 1.
+ let bits = self.bits();
+ if bits <= n as usize {
+ return BigUint::one();
+ }
+
+ // If we fit in `u64`, compute the root that way.
+ if let Some(x) = self.to_u64() {
+ return x.nth_root(n).into();
+ }
+
+ let max_bits = bits / n as usize + 1;
+
+ let guess = if let Some(f) = self.to_f64() {
+ // We fit in `f64` (lossy), so get a better initial guess from that.
+ BigUint::from_f64((f.ln() / f64::from(n)).exp()).unwrap()
+ } else {
+ // Try to guess by scaling down such that it does fit in `f64`.
+ // With some (x * 2ⁿᵏ), its nth root ≈ (ⁿ√x * 2ᵏ)
+ let nsz = n as usize;
+ let extra_bits = bits - (f64::MAX_EXP as usize - 1);
+ let root_scale = (extra_bits + (nsz - 1)) / nsz;
+ let scale = root_scale * nsz;
+ if scale < bits && bits - scale > nsz {
+ (self >> scale).nth_root(n) << root_scale
+ } else {
+ BigUint::one() << max_bits
+ }
+ };
+
+ let n_min_1 = n - 1;
+ fixpoint(guess, max_bits, move |s| {
+ let q = self / s.pow(n_min_1);
+ let t = n_min_1 * s + q;
+ t / n
+ })
+ }
+
+ // Reference:
+ // Brent & Zimmermann, Modern Computer Arithmetic, v0.5.9, Algorithm 1.13
+ fn sqrt(&self) -> Self {
+ if self.is_zero() || self.is_one() {
+ return self.clone();
+ }
+
+ // If we fit in `u64`, compute the root that way.
+ if let Some(x) = self.to_u64() {
+ return x.sqrt().into();
+ }
+
+ let bits = self.bits();
+ let max_bits = bits / 2 as usize + 1;
+
+ let guess = if let Some(f) = self.to_f64() {
+ // We fit in `f64` (lossy), so get a better initial guess from that.
+ BigUint::from_f64(f.sqrt()).unwrap()
+ } else {
+ // Try to guess by scaling down such that it does fit in `f64`.
+ // With some (x * 2²ᵏ), its sqrt ≈ (√x * 2ᵏ)
+ let extra_bits = bits - (f64::MAX_EXP as usize - 1);
+ let root_scale = (extra_bits + 1) / 2;
+ let scale = root_scale * 2;
+ (self >> scale).sqrt() << root_scale
+ };
+
+ fixpoint(guess, max_bits, move |s| {
+ let q = self / s;
+ let t = s + q;
+ t >> 1
+ })
+ }
+
+ fn cbrt(&self) -> Self {
+ if self.is_zero() || self.is_one() {
+ return self.clone();
+ }
+
+ // If we fit in `u64`, compute the root that way.
+ if let Some(x) = self.to_u64() {
+ return x.cbrt().into();
+ }
+
+ let bits = self.bits();
+ let max_bits = bits / 3 as usize + 1;
+
+ let guess = if let Some(f) = self.to_f64() {
+ // We fit in `f64` (lossy), so get a better initial guess from that.
+ BigUint::from_f64(f.cbrt()).unwrap()
+ } else {
+ // Try to guess by scaling down such that it does fit in `f64`.
+ // With some (x * 2³ᵏ), its cbrt ≈ (∛x * 2ᵏ)
+ let extra_bits = bits - (f64::MAX_EXP as usize - 1);
+ let root_scale = (extra_bits + 2) / 3;
+ let scale = root_scale * 3;
+ (self >> scale).cbrt() << root_scale
+ };
+
+ fixpoint(guess, max_bits, move |s| {
+ let q = self / (s * s);
+ let t = (s << 1) + q;
+ t / 3u32
+ })
+ }
+}
+
+fn high_bits_to_u64(v: &BigUint) -> u64 {
+ match v.data.len() {
+ 0 => 0,
+ 1 => u64::from(v.data[0]),
+ _ => {
+ let mut bits = v.bits();
+ let mut ret = 0u64;
+ let mut ret_bits = 0;
+
+ for d in v.data.iter().rev() {
+ let digit_bits = (bits - 1) % big_digit::BITS + 1;
+ let bits_want = cmp::min(64 - ret_bits, digit_bits);
+
+ if bits_want != 64 {
+ ret <<= bits_want;
+ }
+ ret |= u64::from(*d) >> (digit_bits - bits_want);
+ ret_bits += bits_want;
+ bits -= bits_want;
+
+ if ret_bits == 64 {
+ break;
+ }
+ }
+
+ ret
+ }
+ }
+}
+
+impl ToPrimitive for BigUint {
+ #[inline]
+ fn to_i64(&self) -> Option<i64> {
+ self.to_u64().as_ref().and_then(u64::to_i64)
+ }
+
+ #[inline]
+ #[cfg(has_i128)]
+ fn to_i128(&self) -> Option<i128> {
+ self.to_u128().as_ref().and_then(u128::to_i128)
+ }
+
+ #[inline]
+ fn to_u64(&self) -> Option<u64> {
+ let mut ret: u64 = 0;
+ let mut bits = 0;
+
+ for i in self.data.iter() {
+ if bits >= 64 {
+ return None;
+ }
+
+ ret += u64::from(*i) << bits;
+ bits += big_digit::BITS;
+ }
+
+ Some(ret)
+ }
+
+ #[inline]
+ #[cfg(has_i128)]
+ fn to_u128(&self) -> Option<u128> {
+ let mut ret: u128 = 0;
+ let mut bits = 0;
+
+ for i in self.data.iter() {
+ if bits >= 128 {
+ return None;
+ }
+
+ ret |= u128::from(*i) << bits;
+ bits += big_digit::BITS;
+ }
+
+ Some(ret)
+ }
+
+ #[inline]
+ fn to_f32(&self) -> Option<f32> {
+ let mantissa = high_bits_to_u64(self);
+ let exponent = self.bits() - fls(mantissa);
+
+ if exponent > f32::MAX_EXP as usize {
+ None
+ } else {
+ let ret = (mantissa as f32) * 2.0f32.powi(exponent as i32);
+ if ret.is_infinite() {
+ None
+ } else {
+ Some(ret)
+ }
+ }
+ }
+
+ #[inline]
+ fn to_f64(&self) -> Option<f64> {
+ let mantissa = high_bits_to_u64(self);
+ let exponent = self.bits() - fls(mantissa);
+
+ if exponent > f64::MAX_EXP as usize {
+ None
+ } else {
+ let ret = (mantissa as f64) * 2.0f64.powi(exponent as i32);
+ if ret.is_infinite() {
+ None
+ } else {
+ Some(ret)
+ }
+ }
+ }
+}
+
+impl FromPrimitive for BigUint {
+ #[inline]
+ fn from_i64(n: i64) -> Option<BigUint> {
+ if n >= 0 {
+ Some(BigUint::from(n as u64))
+ } else {
+ None
+ }
+ }
+
+ #[inline]
+ #[cfg(has_i128)]
+ fn from_i128(n: i128) -> Option<BigUint> {
+ if n >= 0 {
+ Some(BigUint::from(n as u128))
+ } else {
+ None
+ }
+ }
+
+ #[inline]
+ fn from_u64(n: u64) -> Option<BigUint> {
+ Some(BigUint::from(n))
+ }
+
+ #[inline]
+ #[cfg(has_i128)]
+ fn from_u128(n: u128) -> Option<BigUint> {
+ Some(BigUint::from(n))
+ }
+
+ #[inline]
+ fn from_f64(mut n: f64) -> Option<BigUint> {
+ // handle NAN, INFINITY, NEG_INFINITY
+ if !n.is_finite() {
+ return None;
+ }
+
+ // match the rounding of casting from float to int
+ n = n.trunc();
+
+ // handle 0.x, -0.x
+ if n.is_zero() {
+ return Some(BigUint::zero());
+ }
+
+ let (mantissa, exponent, sign) = Float::integer_decode(n);
+
+ if sign == -1 {
+ return None;
+ }
+
+ let mut ret = BigUint::from(mantissa);
+ if exponent > 0 {
+ ret <<= exponent as usize;
+ } else if exponent < 0 {
+ ret >>= (-exponent) as usize;
+ }
+ Some(ret)
+ }
+}
+
+impl From<u64> for BigUint {
+ #[inline]
+ fn from(mut n: u64) -> Self {
+ let mut ret: BigUint = Zero::zero();
+
+ while n != 0 {
+ ret.data.push(n as BigDigit);
+ // don't overflow if BITS is 64:
+ n = (n >> 1) >> (big_digit::BITS - 1);
+ }
+
+ ret
+ }
+}
+
+#[cfg(has_i128)]
+impl From<u128> for BigUint {
+ #[inline]
+ fn from(mut n: u128) -> Self {
+ let mut ret: BigUint = Zero::zero();
+
+ while n != 0 {
+ ret.data.push(n as BigDigit);
+ n >>= big_digit::BITS;
+ }
+
+ ret
+ }
+}
+
+macro_rules! impl_biguint_from_uint {
+ ($T:ty) => {
+ impl From<$T> for BigUint {
+ #[inline]
+ fn from(n: $T) -> Self {
+ BigUint::from(n as u64)
+ }
+ }
+ };
+}
+
+impl_biguint_from_uint!(u8);
+impl_biguint_from_uint!(u16);
+impl_biguint_from_uint!(u32);
+impl_biguint_from_uint!(usize);
+
+/// A generic trait for converting a value to a `BigUint`.
+pub trait ToBigUint {
+ /// Converts the value of `self` to a `BigUint`.
+ fn to_biguint(&self) -> Option<BigUint>;
+}
+
+impl ToBigUint for BigUint {
+ #[inline]
+ fn to_biguint(&self) -> Option<BigUint> {
+ Some(self.clone())
+ }
+}
+
+macro_rules! impl_to_biguint {
+ ($T:ty, $from_ty:path) => {
+ impl ToBigUint for $T {
+ #[inline]
+ fn to_biguint(&self) -> Option<BigUint> {
+ $from_ty(*self)
+ }
+ }
+ };
+}
+
+impl_to_biguint!(isize, FromPrimitive::from_isize);
+impl_to_biguint!(i8, FromPrimitive::from_i8);
+impl_to_biguint!(i16, FromPrimitive::from_i16);
+impl_to_biguint!(i32, FromPrimitive::from_i32);
+impl_to_biguint!(i64, FromPrimitive::from_i64);
+#[cfg(has_i128)]
+impl_to_biguint!(i128, FromPrimitive::from_i128);
+
+impl_to_biguint!(usize, FromPrimitive::from_usize);
+impl_to_biguint!(u8, FromPrimitive::from_u8);
+impl_to_biguint!(u16, FromPrimitive::from_u16);
+impl_to_biguint!(u32, FromPrimitive::from_u32);
+impl_to_biguint!(u64, FromPrimitive::from_u64);
+#[cfg(has_i128)]
+impl_to_biguint!(u128, FromPrimitive::from_u128);
+
+impl_to_biguint!(f32, FromPrimitive::from_f32);
+impl_to_biguint!(f64, FromPrimitive::from_f64);
+
+// Extract bitwise digits that evenly divide BigDigit
+fn to_bitwise_digits_le(u: &BigUint, bits: usize) -> Vec<u8> {
+ debug_assert!(!u.is_zero() && bits <= 8 && big_digit::BITS % bits == 0);
+
+ let last_i = u.data.len() - 1;
+ let mask: BigDigit = (1 << bits) - 1;
+ let digits_per_big_digit = big_digit::BITS / bits;
+ let digits = (u.bits() + bits - 1) / bits;
+ let mut res = Vec::with_capacity(digits);
+
+ for mut r in u.data[..last_i].iter().cloned() {
+ for _ in 0..digits_per_big_digit {
+ res.push((r & mask) as u8);
+ r >>= bits;
+ }
+ }
+
+ let mut r = u.data[last_i];
+ while r != 0 {
+ res.push((r & mask) as u8);
+ r >>= bits;
+ }
+
+ res
+}
+
+// Extract bitwise digits that don't evenly divide BigDigit
+fn to_inexact_bitwise_digits_le(u: &BigUint, bits: usize) -> Vec<u8> {
+ debug_assert!(!u.is_zero() && bits <= 8 && big_digit::BITS % bits != 0);
+
+ let mask: BigDigit = (1 << bits) - 1;
+ let digits = (u.bits() + bits - 1) / bits;
+ let mut res = Vec::with_capacity(digits);
+
+ let mut r = 0;
+ let mut rbits = 0;
+
+ for c in &u.data {
+ r |= *c << rbits;
+ rbits += big_digit::BITS;
+
+ while rbits >= bits {
+ res.push((r & mask) as u8);
+ r >>= bits;
+
+ // r had more bits than it could fit - grab the bits we lost
+ if rbits > big_digit::BITS {
+ r = *c >> (big_digit::BITS - (rbits - bits));
+ }
+
+ rbits -= bits;
+ }
+ }
+
+ if rbits != 0 {
+ res.push(r as u8);
+ }
+
+ while let Some(&0) = res.last() {
+ res.pop();
+ }
+
+ res
+}
+
+// Extract little-endian radix digits
+#[inline(always)] // forced inline to get const-prop for radix=10
+fn to_radix_digits_le(u: &BigUint, radix: u32) -> Vec<u8> {
+ debug_assert!(!u.is_zero() && !radix.is_power_of_two());
+
+ // Estimate how big the result will be, so we can pre-allocate it.
+ let radix_digits = ((u.bits() as f64) / f64::from(radix).log2()).ceil();
+ let mut res = Vec::with_capacity(radix_digits as usize);
+ let mut digits = u.clone();
+
+ let (base, power) = get_radix_base(radix);
+ let radix = radix as BigDigit;
+
+ while digits.data.len() > 1 {
+ let (q, mut r) = div_rem_digit(digits, base);
+ for _ in 0..power {
+ res.push((r % radix) as u8);
+ r /= radix;
+ }
+ digits = q;
+ }
+
+ let mut r = digits.data[0];
+ while r != 0 {
+ res.push((r % radix) as u8);
+ r /= radix;
+ }
+
+ res
+}
+
+pub fn to_radix_le(u: &BigUint, radix: u32) -> Vec<u8> {
+ if u.is_zero() {
+ vec![0]
+ } else if radix.is_power_of_two() {
+ // Powers of two can use bitwise masks and shifting instead of division
+ let bits = ilog2(radix);
+ if big_digit::BITS % bits == 0 {
+ to_bitwise_digits_le(u, bits)
+ } else {
+ to_inexact_bitwise_digits_le(u, bits)
+ }
+ } else if radix == 10 {
+ // 10 is so common that it's worth separating out for const-propagation.
+ // Optimizers can often turn constant division into a faster multiplication.
+ to_radix_digits_le(u, 10)
+ } else {
+ to_radix_digits_le(u, radix)
+ }
+}
+
+pub fn to_str_radix_reversed(u: &BigUint, radix: u32) -> Vec<u8> {
+ assert!(2 <= radix && radix <= 36, "The radix must be within 2...36");
+
+ if u.is_zero() {
+ return vec![b'0'];
+ }
+
+ let mut res = to_radix_le(u, radix);
+
+ // Now convert everything to ASCII digits.
+ for r in &mut res {
+ debug_assert!(u32::from(*r) < radix);
+ if *r < 10 {
+ *r += b'0';
+ } else {
+ *r += b'a' - 10;
+ }
+ }
+ res
+}
+
+impl BigUint {
+ /// Creates and initializes a `BigUint`.
+ ///
+ /// The base 2<sup>32</sup> digits are ordered least significant digit first.
+ #[inline]
+ pub fn new(digits: Vec<u32>) -> BigUint {
+ BigUint { data: digits }.normalized()
+ }
+
+ /// Creates and initializes a `BigUint`.
+ ///
+ /// The base 2<sup>32</sup> digits are ordered least significant digit first.
+ #[inline]
+ pub fn from_slice(slice: &[u32]) -> BigUint {
+ BigUint::new(slice.to_vec())
+ }
+
+ /// Assign a value to a `BigUint`.
+ ///
+ /// The base 2<sup>32</sup> digits are ordered least significant digit first.
+ #[inline]
+ pub fn assign_from_slice(&mut self, slice: &[u32]) {
+ self.data.resize(slice.len(), 0);
+ self.data.clone_from_slice(slice);
+ self.normalize();
+ }
+
+ /// Creates and initializes a `BigUint`.
+ ///
+ /// The bytes are in big-endian byte order.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use num_bigint::BigUint;
+ ///
+ /// assert_eq!(BigUint::from_bytes_be(b"A"),
+ /// BigUint::parse_bytes(b"65", 10).unwrap());
+ /// assert_eq!(BigUint::from_bytes_be(b"AA"),
+ /// BigUint::parse_bytes(b"16705", 10).unwrap());
+ /// assert_eq!(BigUint::from_bytes_be(b"AB"),
+ /// BigUint::parse_bytes(b"16706", 10).unwrap());
+ /// assert_eq!(BigUint::from_bytes_be(b"Hello world!"),
+ /// BigUint::parse_bytes(b"22405534230753963835153736737", 10).unwrap());
+ /// ```
+ #[inline]
+ pub fn from_bytes_be(bytes: &[u8]) -> BigUint {
+ if bytes.is_empty() {
+ Zero::zero()
+ } else {
+ let mut v = bytes.to_vec();
+ v.reverse();
+ BigUint::from_bytes_le(&*v)
+ }
+ }
+
+ /// Creates and initializes a `BigUint`.
+ ///
+ /// The bytes are in little-endian byte order.
+ #[inline]
+ pub fn from_bytes_le(bytes: &[u8]) -> BigUint {
+ if bytes.is_empty() {
+ Zero::zero()
+ } else {
+ from_bitwise_digits_le(bytes, 8)
+ }
+ }
+
+ /// Creates and initializes a `BigUint`. The input slice must contain
+ /// ascii/utf8 characters in [0-9a-zA-Z].
+ /// `radix` must be in the range `2...36`.
+ ///
+ /// The function `from_str_radix` from the `Num` trait provides the same logic
+ /// for `&str` buffers.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use num_bigint::{BigUint, ToBigUint};
+ ///
+ /// assert_eq!(BigUint::parse_bytes(b"1234", 10), ToBigUint::to_biguint(&1234));
+ /// assert_eq!(BigUint::parse_bytes(b"ABCD", 16), ToBigUint::to_biguint(&0xABCD));
+ /// assert_eq!(BigUint::parse_bytes(b"G", 16), None);
+ /// ```
+ #[inline]
+ pub fn parse_bytes(buf: &[u8], radix: u32) -> Option<BigUint> {
+ str::from_utf8(buf)
+ .ok()
+ .and_then(|s| BigUint::from_str_radix(s, radix).ok())
+ }
+
+ /// Creates and initializes a `BigUint`. Each u8 of the input slice is
+ /// interpreted as one digit of the number
+ /// and must therefore be less than `radix`.
+ ///
+ /// The bytes are in big-endian byte order.
+ /// `radix` must be in the range `2...256`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use num_bigint::{BigUint};
+ ///
+ /// let inbase190 = &[15, 33, 125, 12, 14];
+ /// let a = BigUint::from_radix_be(inbase190, 190).unwrap();
+ /// assert_eq!(a.to_radix_be(190), inbase190);
+ /// ```
+ pub fn from_radix_be(buf: &[u8], radix: u32) -> Option<BigUint> {
+ assert!(
+ 2 <= radix && radix <= 256,
+ "The radix must be within 2...256"
+ );
+
+ if radix != 256 && buf.iter().any(|&b| b >= radix as u8) {
+ return None;
+ }
+
+ let res = if radix.is_power_of_two() {
+ // Powers of two can use bitwise masks and shifting instead of multiplication
+ let bits = ilog2(radix);
+ let mut v = Vec::from(buf);
+ v.reverse();
+ if big_digit::BITS % bits == 0 {
+ from_bitwise_digits_le(&v, bits)
+ } else {
+ from_inexact_bitwise_digits_le(&v, bits)
+ }
+ } else {
+ from_radix_digits_be(buf, radix)
+ };
+
+ Some(res)
+ }
+
+ /// Creates and initializes a `BigUint`. Each u8 of the input slice is
+ /// interpreted as one digit of the number
+ /// and must therefore be less than `radix`.
+ ///
+ /// The bytes are in little-endian byte order.
+ /// `radix` must be in the range `2...256`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use num_bigint::{BigUint};
+ ///
+ /// let inbase190 = &[14, 12, 125, 33, 15];
+ /// let a = BigUint::from_radix_be(inbase190, 190).unwrap();
+ /// assert_eq!(a.to_radix_be(190), inbase190);
+ /// ```
+ pub fn from_radix_le(buf: &[u8], radix: u32) -> Option<BigUint> {
+ assert!(
+ 2 <= radix && radix <= 256,
+ "The radix must be within 2...256"
+ );
+
+ if radix != 256 && buf.iter().any(|&b| b >= radix as u8) {
+ return None;
+ }
+
+ let res = if radix.is_power_of_two() {
+ // Powers of two can use bitwise masks and shifting instead of multiplication
+ let bits = ilog2(radix);
+ if big_digit::BITS % bits == 0 {
+ from_bitwise_digits_le(buf, bits)
+ } else {
+ from_inexact_bitwise_digits_le(buf, bits)
+ }
+ } else {
+ let mut v = Vec::from(buf);
+ v.reverse();
+ from_radix_digits_be(&v, radix)
+ };
+
+ Some(res)
+ }
+
+ /// Returns the byte representation of the `BigUint` in big-endian byte order.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use num_bigint::BigUint;
+ ///
+ /// let i = BigUint::parse_bytes(b"1125", 10).unwrap();
+ /// assert_eq!(i.to_bytes_be(), vec![4, 101]);
+ /// ```
+ #[inline]
+ pub fn to_bytes_be(&self) -> Vec<u8> {
+ let mut v = self.to_bytes_le();
+ v.reverse();
+ v
+ }
+
+ /// Returns the byte representation of the `BigUint` in little-endian byte order.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use num_bigint::BigUint;
+ ///
+ /// let i = BigUint::parse_bytes(b"1125", 10).unwrap();
+ /// assert_eq!(i.to_bytes_le(), vec![101, 4]);
+ /// ```
+ #[inline]
+ pub fn to_bytes_le(&self) -> Vec<u8> {
+ if self.is_zero() {
+ vec![0]
+ } else {
+ to_bitwise_digits_le(self, 8)
+ }
+ }
+
+ /// Returns the `u32` digits representation of the `BigUint` ordered least significant digit
+ /// first.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use num_bigint::BigUint;
+ ///
+ /// assert_eq!(BigUint::from(1125u32).to_u32_digits(), vec![1125]);
+ /// assert_eq!(BigUint::from(4294967295u32).to_u32_digits(), vec![4294967295]);
+ /// assert_eq!(BigUint::from(4294967296u64).to_u32_digits(), vec![0, 1]);
+ /// assert_eq!(BigUint::from(112500000000u64).to_u32_digits(), vec![830850304, 26]);
+ /// ```
+ #[inline]
+ pub fn to_u32_digits(&self) -> Vec<u32> {
+ self.data.clone()
+ }
+
+ /// Returns the integer formatted as a string in the given radix.
+ /// `radix` must be in the range `2...36`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use num_bigint::BigUint;
+ ///
+ /// let i = BigUint::parse_bytes(b"ff", 16).unwrap();
+ /// assert_eq!(i.to_str_radix(16), "ff");
+ /// ```
+ #[inline]
+ pub fn to_str_radix(&self, radix: u32) -> String {
+ let mut v = to_str_radix_reversed(self, radix);
+ v.reverse();
+ unsafe { String::from_utf8_unchecked(v) }
+ }
+
+ /// Returns the integer in the requested base in big-endian digit order.
+ /// The output is not given in a human readable alphabet but as a zero
+ /// based u8 number.
+ /// `radix` must be in the range `2...256`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use num_bigint::BigUint;
+ ///
+ /// assert_eq!(BigUint::from(0xFFFFu64).to_radix_be(159),
+ /// vec![2, 94, 27]);
+ /// // 0xFFFF = 65535 = 2*(159^2) + 94*159 + 27
+ /// ```
+ #[inline]
+ pub fn to_radix_be(&self, radix: u32) -> Vec<u8> {
+ let mut v = to_radix_le(self, radix);
+ v.reverse();
+ v
+ }
+
+ /// Returns the integer in the requested base in little-endian digit order.
+ /// The output is not given in a human readable alphabet but as a zero
+ /// based u8 number.
+ /// `radix` must be in the range `2...256`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use num_bigint::BigUint;
+ ///
+ /// assert_eq!(BigUint::from(0xFFFFu64).to_radix_le(159),
+ /// vec![27, 94, 2]);
+ /// // 0xFFFF = 65535 = 27 + 94*159 + 2*(159^2)
+ /// ```
+ #[inline]
+ pub fn to_radix_le(&self, radix: u32) -> Vec<u8> {
+ to_radix_le(self, radix)
+ }
+
+ /// Determines the fewest bits necessary to express the `BigUint`.
+ #[inline]
+ pub fn bits(&self) -> usize {
+ if self.is_zero() {
+ return 0;
+ }
+ let zeros = self.data.last().unwrap().leading_zeros();
+ self.data.len() * big_digit::BITS - zeros as usize
+ }
+
+ /// Strips off trailing zero bigdigits - comparisons require the last element in the vector to
+ /// be nonzero.
+ #[inline]
+ fn normalize(&mut self) {
+ while let Some(&0) = self.data.last() {
+ self.data.pop();
+ }
+ }
+
+ /// Returns a normalized `BigUint`.
+ #[inline]
+ fn normalized(mut self) -> BigUint {
+ self.normalize();
+ self
+ }
+
+ /// Returns `(self ^ exponent) % modulus`.
+ ///
+ /// Panics if the modulus is zero.
+ pub fn modpow(&self, exponent: &Self, modulus: &Self) -> Self {
+ assert!(!modulus.is_zero(), "divide by zero!");
+
+ if modulus.is_odd() {
+ // For an odd modulus, we can use Montgomery multiplication in base 2^32.
+ monty_modpow(self, exponent, modulus)
+ } else {
+ // Otherwise do basically the same as `num::pow`, but with a modulus.
+ plain_modpow(self, &exponent.data, modulus)
+ }
+ }
+
+ /// Returns the truncated principal square root of `self` --
+ /// see [Roots::sqrt](https://docs.rs/num-integer/0.1/num_integer/trait.Roots.html#method.sqrt)
+ pub fn sqrt(&self) -> Self {
+ Roots::sqrt(self)
+ }
+
+ /// Returns the truncated principal cube root of `self` --
+ /// see [Roots::cbrt](https://docs.rs/num-integer/0.1/num_integer/trait.Roots.html#method.cbrt).
+ pub fn cbrt(&self) -> Self {
+ Roots::cbrt(self)
+ }
+
+ /// Returns the truncated principal `n`th root of `self` --
+ /// see [Roots::nth_root](https://docs.rs/num-integer/0.1/num_integer/trait.Roots.html#tymethod.nth_root).
+ pub fn nth_root(&self, n: u32) -> Self {
+ Roots::nth_root(self, n)
+ }
+}
+
+fn plain_modpow(base: &BigUint, exp_data: &[BigDigit], modulus: &BigUint) -> BigUint {
+ assert!(!modulus.is_zero(), "divide by zero!");
+
+ let i = match exp_data.iter().position(|&r| r != 0) {
+ None => return BigUint::one(),
+ Some(i) => i,
+ };
+
+ let mut base = base % modulus;
+ for _ in 0..i {
+ for _ in 0..big_digit::BITS {
+ base = &base * &base % modulus;
+ }
+ }
+
+ let mut r = exp_data[i];
+ let mut b = 0usize;
+ while r.is_even() {
+ base = &base * &base % modulus;
+ r >>= 1;
+ b += 1;
+ }
+
+ let mut exp_iter = exp_data[i + 1..].iter();
+ if exp_iter.len() == 0 && r.is_one() {
+ return base;
+ }
+
+ let mut acc = base.clone();
+ r >>= 1;
+ b += 1;
+
+ {
+ let mut unit = |exp_is_odd| {
+ base = &base * &base % modulus;
+ if exp_is_odd {
+ acc = &acc * &base % modulus;
+ }
+ };
+
+ if let Some(&last) = exp_iter.next_back() {
+ // consume exp_data[i]
+ for _ in b..big_digit::BITS {
+ unit(r.is_odd());
+ r >>= 1;
+ }
+
+ // consume all other digits before the last
+ for &r in exp_iter {
+ let mut r = r;
+ for _ in 0..big_digit::BITS {
+ unit(r.is_odd());
+ r >>= 1;
+ }
+ }
+ r = last;
+ }
+
+ debug_assert_ne!(r, 0);
+ while !r.is_zero() {
+ unit(r.is_odd());
+ r >>= 1;
+ }
+ }
+ acc
+}
+
+#[test]
+fn test_plain_modpow() {
+ let two = BigUint::from(2u32);
+ let modulus = BigUint::from(0x1100u32);
+
+ let exp = vec![0, 0b1];
+ assert_eq!(
+ two.pow(0b1_00000000_u32) % &modulus,
+ plain_modpow(&two, &exp, &modulus)
+ );
+ let exp = vec![0, 0b10];
+ assert_eq!(
+ two.pow(0b10_00000000_u32) % &modulus,
+ plain_modpow(&two, &exp, &modulus)
+ );
+ let exp = vec![0, 0b110010];
+ assert_eq!(
+ two.pow(0b110010_00000000_u32) % &modulus,
+ plain_modpow(&two, &exp, &modulus)
+ );
+ let exp = vec![0b1, 0b1];
+ assert_eq!(
+ two.pow(0b1_00000001_u32) % &modulus,
+ plain_modpow(&two, &exp, &modulus)
+ );
+ let exp = vec![0b1100, 0, 0b1];
+ assert_eq!(
+ two.pow(0b1_00000000_00001100_u32) % &modulus,
+ plain_modpow(&two, &exp, &modulus)
+ );
+}
+
+/// Returns the number of least-significant bits that are zero,
+/// or `None` if the entire number is zero.
+pub fn trailing_zeros(u: &BigUint) -> Option<usize> {
+ u.data
+ .iter()
+ .enumerate()
+ .find(|&(_, &digit)| digit != 0)
+ .map(|(i, digit)| i * big_digit::BITS + digit.trailing_zeros() as usize)
+}
+
+impl_sum_iter_type!(BigUint);
+impl_product_iter_type!(BigUint);
+
+pub trait IntDigits {
+ fn digits(&self) -> &[BigDigit];
+ fn digits_mut(&mut self) -> &mut Vec<BigDigit>;
+ fn normalize(&mut self);
+ fn capacity(&self) -> usize;
+ fn len(&self) -> usize;
+}
+
+impl IntDigits for BigUint {
+ #[inline]
+ fn digits(&self) -> &[BigDigit] {
+ &self.data
+ }
+ #[inline]
+ fn digits_mut(&mut self) -> &mut Vec<BigDigit> {
+ &mut self.data
+ }
+ #[inline]
+ fn normalize(&mut self) {
+ self.normalize();
+ }
+ #[inline]
+ fn capacity(&self) -> usize {
+ self.data.capacity()
+ }
+ #[inline]
+ fn len(&self) -> usize {
+ self.data.len()
+ }
+}
+
+/// Combine four `u32`s into a single `u128`.
+#[cfg(has_i128)]
+#[inline]
+fn u32_to_u128(a: u32, b: u32, c: u32, d: u32) -> u128 {
+ u128::from(d) | (u128::from(c) << 32) | (u128::from(b) << 64) | (u128::from(a) << 96)
+}
+
+/// Split a single `u128` into four `u32`.
+#[cfg(has_i128)]
+#[inline]
+fn u32_from_u128(n: u128) -> (u32, u32, u32, u32) {
+ (
+ (n >> 96) as u32,
+ (n >> 64) as u32,
+ (n >> 32) as u32,
+ n as u32,
+ )
+}
+
+#[cfg(feature = "serde")]
+impl serde::Serialize for BigUint {
+ fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
+ where
+ S: serde::Serializer,
+ {
+ // Note: do not change the serialization format, or it may break forward
+ // and backward compatibility of serialized data! If we ever change the
+ // internal representation, we should still serialize in base-`u32`.
+ let data: &Vec<u32> = &self.data;
+ data.serialize(serializer)
+ }
+}
+
+#[cfg(feature = "serde")]
+impl<'de> serde::Deserialize<'de> for BigUint {
+ fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
+ where
+ D: serde::Deserializer<'de>,
+ {
+ let data: Vec<u32> = Vec::deserialize(deserializer)?;
+ Ok(BigUint::new(data))
+ }
+}
+
+/// Returns the greatest power of the radix <= big_digit::BASE
+#[inline]
+fn get_radix_base(radix: u32) -> (BigDigit, usize) {
+ debug_assert!(
+ 2 <= radix && radix <= 256,
+ "The radix must be within 2...256"
+ );
+ debug_assert!(!radix.is_power_of_two());
+
+ // To generate this table:
+ // for radix in 2u64..257 {
+ // let mut power = big_digit::BITS / fls(radix as u64);
+ // let mut base = radix.pow(power as u32);
+ //
+ // while let Some(b) = base.checked_mul(radix) {
+ // if b > big_digit::MAX {
+ // break;
+ // }
+ // base = b;
+ // power += 1;
+ // }
+ //
+ // println!("({:10}, {:2}), // {:2}", base, power, radix);
+ // }
+ // and
+ // for radix in 2u64..257 {
+ // let mut power = 64 / fls(radix as u64);
+ // let mut base = radix.pow(power as u32);
+ //
+ // while let Some(b) = base.checked_mul(radix) {
+ // base = b;
+ // power += 1;
+ // }
+ //
+ // println!("({:20}, {:2}), // {:2}", base, power, radix);
+ // }
+ match big_digit::BITS {
+ 32 => {
+ const BASES: [(u32, usize); 257] = [
+ (0, 0),
+ (0, 0),
+ (0, 0), // 2
+ (3486784401, 20), // 3
+ (0, 0), // 4
+ (1220703125, 13), // 5
+ (2176782336, 12), // 6
+ (1977326743, 11), // 7
+ (0, 0), // 8
+ (3486784401, 10), // 9
+ (1000000000, 9), // 10
+ (2357947691, 9), // 11
+ (429981696, 8), // 12
+ (815730721, 8), // 13
+ (1475789056, 8), // 14
+ (2562890625, 8), // 15
+ (0, 0), // 16
+ (410338673, 7), // 17
+ (612220032, 7), // 18
+ (893871739, 7), // 19
+ (1280000000, 7), // 20
+ (1801088541, 7), // 21
+ (2494357888, 7), // 22
+ (3404825447, 7), // 23
+ (191102976, 6), // 24
+ (244140625, 6), // 25
+ (308915776, 6), // 26
+ (387420489, 6), // 27
+ (481890304, 6), // 28
+ (594823321, 6), // 29
+ (729000000, 6), // 30
+ (887503681, 6), // 31
+ (0, 0), // 32
+ (1291467969, 6), // 33
+ (1544804416, 6), // 34
+ (1838265625, 6), // 35
+ (2176782336, 6), // 36
+ (2565726409, 6), // 37
+ (3010936384, 6), // 38
+ (3518743761, 6), // 39
+ (4096000000, 6), // 40
+ (115856201, 5), // 41
+ (130691232, 5), // 42
+ (147008443, 5), // 43
+ (164916224, 5), // 44
+ (184528125, 5), // 45
+ (205962976, 5), // 46
+ (229345007, 5), // 47
+ (254803968, 5), // 48
+ (282475249, 5), // 49
+ (312500000, 5), // 50
+ (345025251, 5), // 51
+ (380204032, 5), // 52
+ (418195493, 5), // 53
+ (459165024, 5), // 54
+ (503284375, 5), // 55
+ (550731776, 5), // 56
+ (601692057, 5), // 57
+ (656356768, 5), // 58
+ (714924299, 5), // 59
+ (777600000, 5), // 60
+ (844596301, 5), // 61
+ (916132832, 5), // 62
+ (992436543, 5), // 63
+ (0, 0), // 64
+ (1160290625, 5), // 65
+ (1252332576, 5), // 66
+ (1350125107, 5), // 67
+ (1453933568, 5), // 68
+ (1564031349, 5), // 69
+ (1680700000, 5), // 70
+ (1804229351, 5), // 71
+ (1934917632, 5), // 72
+ (2073071593, 5), // 73
+ (2219006624, 5), // 74
+ (2373046875, 5), // 75
+ (2535525376, 5), // 76
+ (2706784157, 5), // 77
+ (2887174368, 5), // 78
+ (3077056399, 5), // 79
+ (3276800000, 5), // 80
+ (3486784401, 5), // 81
+ (3707398432, 5), // 82
+ (3939040643, 5), // 83
+ (4182119424, 5), // 84
+ (52200625, 4), // 85
+ (54700816, 4), // 86
+ (57289761, 4), // 87
+ (59969536, 4), // 88
+ (62742241, 4), // 89
+ (65610000, 4), // 90
+ (68574961, 4), // 91
+ (71639296, 4), // 92
+ (74805201, 4), // 93
+ (78074896, 4), // 94
+ (81450625, 4), // 95
+ (84934656, 4), // 96
+ (88529281, 4), // 97
+ (92236816, 4), // 98
+ (96059601, 4), // 99
+ (100000000, 4), // 100
+ (104060401, 4), // 101
+ (108243216, 4), // 102
+ (112550881, 4), // 103
+ (116985856, 4), // 104
+ (121550625, 4), // 105
+ (126247696, 4), // 106
+ (131079601, 4), // 107
+ (136048896, 4), // 108
+ (141158161, 4), // 109
+ (146410000, 4), // 110
+ (151807041, 4), // 111
+ (157351936, 4), // 112
+ (163047361, 4), // 113
+ (168896016, 4), // 114
+ (174900625, 4), // 115
+ (181063936, 4), // 116
+ (187388721, 4), // 117
+ (193877776, 4), // 118
+ (200533921, 4), // 119
+ (207360000, 4), // 120
+ (214358881, 4), // 121
+ (221533456, 4), // 122
+ (228886641, 4), // 123
+ (236421376, 4), // 124
+ (244140625, 4), // 125
+ (252047376, 4), // 126
+ (260144641, 4), // 127
+ (0, 0), // 128
+ (276922881, 4), // 129
+ (285610000, 4), // 130
+ (294499921, 4), // 131
+ (303595776, 4), // 132
+ (312900721, 4), // 133
+ (322417936, 4), // 134
+ (332150625, 4), // 135
+ (342102016, 4), // 136
+ (352275361, 4), // 137
+ (362673936, 4), // 138
+ (373301041, 4), // 139
+ (384160000, 4), // 140
+ (395254161, 4), // 141
+ (406586896, 4), // 142
+ (418161601, 4), // 143
+ (429981696, 4), // 144
+ (442050625, 4), // 145
+ (454371856, 4), // 146
+ (466948881, 4), // 147
+ (479785216, 4), // 148
+ (492884401, 4), // 149
+ (506250000, 4), // 150
+ (519885601, 4), // 151
+ (533794816, 4), // 152
+ (547981281, 4), // 153
+ (562448656, 4), // 154
+ (577200625, 4), // 155
+ (592240896, 4), // 156
+ (607573201, 4), // 157
+ (623201296, 4), // 158
+ (639128961, 4), // 159
+ (655360000, 4), // 160
+ (671898241, 4), // 161
+ (688747536, 4), // 162
+ (705911761, 4), // 163
+ (723394816, 4), // 164
+ (741200625, 4), // 165
+ (759333136, 4), // 166
+ (777796321, 4), // 167
+ (796594176, 4), // 168
+ (815730721, 4), // 169
+ (835210000, 4), // 170
+ (855036081, 4), // 171
+ (875213056, 4), // 172
+ (895745041, 4), // 173
+ (916636176, 4), // 174
+ (937890625, 4), // 175
+ (959512576, 4), // 176
+ (981506241, 4), // 177
+ (1003875856, 4), // 178
+ (1026625681, 4), // 179
+ (1049760000, 4), // 180
+ (1073283121, 4), // 181
+ (1097199376, 4), // 182
+ (1121513121, 4), // 183
+ (1146228736, 4), // 184
+ (1171350625, 4), // 185
+ (1196883216, 4), // 186
+ (1222830961, 4), // 187
+ (1249198336, 4), // 188
+ (1275989841, 4), // 189
+ (1303210000, 4), // 190
+ (1330863361, 4), // 191
+ (1358954496, 4), // 192
+ (1387488001, 4), // 193
+ (1416468496, 4), // 194
+ (1445900625, 4), // 195
+ (1475789056, 4), // 196
+ (1506138481, 4), // 197
+ (1536953616, 4), // 198
+ (1568239201, 4), // 199
+ (1600000000, 4), // 200
+ (1632240801, 4), // 201
+ (1664966416, 4), // 202
+ (1698181681, 4), // 203
+ (1731891456, 4), // 204
+ (1766100625, 4), // 205
+ (1800814096, 4), // 206
+ (1836036801, 4), // 207
+ (1871773696, 4), // 208
+ (1908029761, 4), // 209
+ (1944810000, 4), // 210
+ (1982119441, 4), // 211
+ (2019963136, 4), // 212
+ (2058346161, 4), // 213
+ (2097273616, 4), // 214
+ (2136750625, 4), // 215
+ (2176782336, 4), // 216
+ (2217373921, 4), // 217
+ (2258530576, 4), // 218
+ (2300257521, 4), // 219
+ (2342560000, 4), // 220
+ (2385443281, 4), // 221
+ (2428912656, 4), // 222
+ (2472973441, 4), // 223
+ (2517630976, 4), // 224
+ (2562890625, 4), // 225
+ (2608757776, 4), // 226
+ (2655237841, 4), // 227
+ (2702336256, 4), // 228
+ (2750058481, 4), // 229
+ (2798410000, 4), // 230
+ (2847396321, 4), // 231
+ (2897022976, 4), // 232
+ (2947295521, 4), // 233
+ (2998219536, 4), // 234
+ (3049800625, 4), // 235
+ (3102044416, 4), // 236
+ (3154956561, 4), // 237
+ (3208542736, 4), // 238
+ (3262808641, 4), // 239
+ (3317760000, 4), // 240
+ (3373402561, 4), // 241
+ (3429742096, 4), // 242
+ (3486784401, 4), // 243
+ (3544535296, 4), // 244
+ (3603000625, 4), // 245
+ (3662186256, 4), // 246
+ (3722098081, 4), // 247
+ (3782742016, 4), // 248
+ (3844124001, 4), // 249
+ (3906250000, 4), // 250
+ (3969126001, 4), // 251
+ (4032758016, 4), // 252
+ (4097152081, 4), // 253
+ (4162314256, 4), // 254
+ (4228250625, 4), // 255
+ (0, 0), // 256
+ ];
+
+ let (base, power) = BASES[radix as usize];
+ (base as BigDigit, power)
+ }
+ 64 => {
+ const BASES: [(u64, usize); 257] = [
+ (0, 0),
+ (0, 0),
+ (9223372036854775808, 63), // 2
+ (12157665459056928801, 40), // 3
+ (4611686018427387904, 31), // 4
+ (7450580596923828125, 27), // 5
+ (4738381338321616896, 24), // 6
+ (3909821048582988049, 22), // 7
+ (9223372036854775808, 21), // 8
+ (12157665459056928801, 20), // 9
+ (10000000000000000000, 19), // 10
+ (5559917313492231481, 18), // 11
+ (2218611106740436992, 17), // 12
+ (8650415919381337933, 17), // 13
+ (2177953337809371136, 16), // 14
+ (6568408355712890625, 16), // 15
+ (1152921504606846976, 15), // 16
+ (2862423051509815793, 15), // 17
+ (6746640616477458432, 15), // 18
+ (15181127029874798299, 15), // 19
+ (1638400000000000000, 14), // 20
+ (3243919932521508681, 14), // 21
+ (6221821273427820544, 14), // 22
+ (11592836324538749809, 14), // 23
+ (876488338465357824, 13), // 24
+ (1490116119384765625, 13), // 25
+ (2481152873203736576, 13), // 26
+ (4052555153018976267, 13), // 27
+ (6502111422497947648, 13), // 28
+ (10260628712958602189, 13), // 29
+ (15943230000000000000, 13), // 30
+ (787662783788549761, 12), // 31
+ (1152921504606846976, 12), // 32
+ (1667889514952984961, 12), // 33
+ (2386420683693101056, 12), // 34
+ (3379220508056640625, 12), // 35
+ (4738381338321616896, 12), // 36
+ (6582952005840035281, 12), // 37
+ (9065737908494995456, 12), // 38
+ (12381557655576425121, 12), // 39
+ (16777216000000000000, 12), // 40
+ (550329031716248441, 11), // 41
+ (717368321110468608, 11), // 42
+ (929293739471222707, 11), // 43
+ (1196683881290399744, 11), // 44
+ (1532278301220703125, 11), // 45
+ (1951354384207722496, 11), // 46
+ (2472159215084012303, 11), // 47
+ (3116402981210161152, 11), // 48
+ (3909821048582988049, 11), // 49
+ (4882812500000000000, 11), // 50
+ (6071163615208263051, 11), // 51
+ (7516865509350965248, 11), // 52
+ (9269035929372191597, 11), // 53
+ (11384956040305711104, 11), // 54
+ (13931233916552734375, 11), // 55
+ (16985107389382393856, 11), // 56
+ (362033331456891249, 10), // 57
+ (430804206899405824, 10), // 58
+ (511116753300641401, 10), // 59
+ (604661760000000000, 10), // 60
+ (713342911662882601, 10), // 61
+ (839299365868340224, 10), // 62
+ (984930291881790849, 10), // 63
+ (1152921504606846976, 10), // 64
+ (1346274334462890625, 10), // 65
+ (1568336880910795776, 10), // 66
+ (1822837804551761449, 10), // 67
+ (2113922820157210624, 10), // 68
+ (2446194060654759801, 10), // 69
+ (2824752490000000000, 10), // 70
+ (3255243551009881201, 10), // 71
+ (3743906242624487424, 10), // 72
+ (4297625829703557649, 10), // 73
+ (4923990397355877376, 10), // 74
+ (5631351470947265625, 10), // 75
+ (6428888932339941376, 10), // 76
+ (7326680472586200649, 10), // 77
+ (8335775831236199424, 10), // 78
+ (9468276082626847201, 10), // 79
+ (10737418240000000000, 10), // 80
+ (12157665459056928801, 10), // 81
+ (13744803133596058624, 10), // 82
+ (15516041187205853449, 10), // 83
+ (17490122876598091776, 10), // 84
+ (231616946283203125, 9), // 85
+ (257327417311663616, 9), // 86
+ (285544154243029527, 9), // 87
+ (316478381828866048, 9), // 88
+ (350356403707485209, 9), // 89
+ (387420489000000000, 9), // 90
+ (427929800129788411, 9), // 91
+ (472161363286556672, 9), // 92
+ (520411082988487293, 9), // 93
+ (572994802228616704, 9), // 94
+ (630249409724609375, 9), // 95
+ (692533995824480256, 9), // 96
+ (760231058654565217, 9), // 97
+ (833747762130149888, 9), // 98
+ (913517247483640899, 9), // 99
+ (1000000000000000000, 9), // 100
+ (1093685272684360901, 9), // 101
+ (1195092568622310912, 9), // 102
+ (1304773183829244583, 9), // 103
+ (1423311812421484544, 9), // 104
+ (1551328215978515625, 9), // 105
+ (1689478959002692096, 9), // 106
+ (1838459212420154507, 9), // 107
+ (1999004627104432128, 9), // 108
+ (2171893279442309389, 9), // 109
+ (2357947691000000000, 9), // 110
+ (2558036924386500591, 9), // 111
+ (2773078757450186752, 9), // 112
+ (3004041937984268273, 9), // 113
+ (3251948521156637184, 9), // 114
+ (3517876291919921875, 9), // 115
+ (3802961274698203136, 9), // 116
+ (4108400332687853397, 9), // 117
+ (4435453859151328768, 9), // 118
+ (4785448563124474679, 9), // 119
+ (5159780352000000000, 9), // 120
+ (5559917313492231481, 9), // 121
+ (5987402799531080192, 9), // 122
+ (6443858614676334363, 9), // 123
+ (6930988311686938624, 9), // 124
+ (7450580596923828125, 9), // 125
+ (8004512848309157376, 9), // 126
+ (8594754748609397887, 9), // 127
+ (9223372036854775808, 9), // 128
+ (9892530380752880769, 9), // 129
+ (10604499373000000000, 9), // 130
+ (11361656654439817571, 9), // 131
+ (12166492167065567232, 9), // 132
+ (13021612539908538853, 9), // 133
+ (13929745610903012864, 9), // 134
+ (14893745087865234375, 9), // 135
+ (15916595351771938816, 9), // 136
+ (17001416405572203977, 9), // 137
+ (18151468971815029248, 9), // 138
+ (139353667211683681, 8), // 139
+ (147578905600000000, 8), // 140
+ (156225851787813921, 8), // 141
+ (165312903998914816, 8), // 142
+ (174859124550883201, 8), // 143
+ (184884258895036416, 8), // 144
+ (195408755062890625, 8), // 145
+ (206453783524884736, 8), // 146
+ (218041257467152161, 8), // 147
+ (230193853492166656, 8), // 148
+ (242935032749128801, 8), // 149
+ (256289062500000000, 8), // 150
+ (270281038127131201, 8), // 151
+ (284936905588473856, 8), // 152
+ (300283484326400961, 8), // 153
+ (316348490636206336, 8), // 154
+ (333160561500390625, 8), // 155
+ (350749278894882816, 8), // 156
+ (369145194573386401, 8), // 157
+ (388379855336079616, 8), // 158
+ (408485828788939521, 8), // 159
+ (429496729600000000, 8), // 160
+ (451447246258894081, 8), // 161
+ (474373168346071296, 8), // 162
+ (498311414318121121, 8), // 163
+ (523300059815673856, 8), // 164
+ (549378366500390625, 8), // 165
+ (576586811427594496, 8), // 166
+ (604967116961135041, 8), // 167
+ (634562281237118976, 8), // 168
+ (665416609183179841, 8), // 169
+ (697575744100000000, 8), // 170
+ (731086699811838561, 8), // 171
+ (765997893392859136, 8), // 172
+ (802359178476091681, 8), // 173
+ (840221879151902976, 8), // 174
+ (879638824462890625, 8), // 175
+ (920664383502155776, 8), // 176
+ (963354501121950081, 8), // 177
+ (1007766734259732736, 8), // 178
+ (1053960288888713761, 8), // 179
+ (1101996057600000000, 8), // 180
+ (1151936657823500641, 8), // 181
+ (1203846470694789376, 8), // 182
+ (1257791680575160641, 8), // 183
+ (1313840315232157696, 8), // 184
+ (1372062286687890625, 8), // 185
+ (1432529432742502656, 8), // 186
+ (1495315559180183521, 8), // 187
+ (1560496482665168896, 8), // 188
+ (1628150074335205281, 8), // 189
+ (1698356304100000000, 8), // 190
+ (1771197285652216321, 8), // 191
+ (1846757322198614016, 8), // 192
+ (1925122952918976001, 8), // 193
+ (2006383000160502016, 8), // 194
+ (2090628617375390625, 8), // 195
+ (2177953337809371136, 8), // 196
+ (2268453123948987361, 8), // 197
+ (2362226417735475456, 8), // 198
+ (2459374191553118401, 8), // 199
+ (2560000000000000000, 8), // 200
+ (2664210032449121601, 8), // 201
+ (2772113166407885056, 8), // 202
+ (2883821021683985761, 8), // 203
+ (2999448015365799936, 8), // 204
+ (3119111417625390625, 8), // 205
+ (3242931408352297216, 8), // 206
+ (3371031134626313601, 8), // 207
+ (3503536769037500416, 8), // 208
+ (3640577568861717121, 8), // 209
+ (3782285936100000000, 8), // 210
+ (3928797478390152481, 8), // 211
+ (4080251070798954496, 8), // 212
+ (4236788918503437921, 8), // 213
+ (4398556620369715456, 8), // 214
+ (4565703233437890625, 8), // 215
+ (4738381338321616896, 8), // 216
+ (4916747105530914241, 8), // 217
+ (5100960362726891776, 8), // 218
+ (5291184662917065441, 8), // 219
+ (5487587353600000000, 8), // 220
+ (5690339646868044961, 8), // 221
+ (5899616690476974336, 8), // 222
+ (6115597639891380481, 8), // 223
+ (6338465731314712576, 8), // 224
+ (6568408355712890625, 8), // 225
+ (6805617133840466176, 8), // 226
+ (7050287992278341281, 8), // 227
+ (7302621240492097536, 8), // 228
+ (7562821648920027361, 8), // 229
+ (7831098528100000000, 8), // 230
+ (8107665808844335041, 8), // 231
+ (8392742123471896576, 8), // 232
+ (8686550888106661441, 8), // 233
+ (8989320386052055296, 8), // 234
+ (9301283852250390625, 8), // 235
+ (9622679558836781056, 8), // 236
+ (9953750901796946721, 8), // 237
+ (10294746488738365696, 8), // 238
+ (10645920227784266881, 8), // 239
+ (11007531417600000000, 8), // 240
+ (11379844838561358721, 8), // 241
+ (11763130845074473216, 8), // 242
+ (12157665459056928801, 8), // 243
+ (12563730464589807616, 8), // 244
+ (12981613503750390625, 8), // 245
+ (13411608173635297536, 8), // 246
+ (13854014124583882561, 8), // 247
+ (14309137159611744256, 8), // 248
+ (14777289335064248001, 8), // 249
+ (15258789062500000000, 8), // 250
+ (15753961211814252001, 8), // 251
+ (16263137215612256256, 8), // 252
+ (16786655174842630561, 8), // 253
+ (17324859965700833536, 8), // 254
+ (17878103347812890625, 8), // 255
+ (72057594037927936, 7), // 256
+ ];
+
+ let (base, power) = BASES[radix as usize];
+ (base as BigDigit, power)
+ }
+ _ => panic!("Invalid bigdigit size"),
+ }
+}
+
+#[test]
+fn test_from_slice() {
+ fn check(slice: &[BigDigit], data: &[BigDigit]) {
+ assert!(BigUint::from_slice(slice).data == data);
+ }
+ check(&[1], &[1]);
+ check(&[0, 0, 0], &[]);
+ check(&[1, 2, 0, 0], &[1, 2]);
+ check(&[0, 0, 1, 2], &[0, 0, 1, 2]);
+ check(&[0, 0, 1, 2, 0, 0], &[0, 0, 1, 2]);
+ check(&[-1i32 as BigDigit], &[-1i32 as BigDigit]);
+}
+
+#[test]
+fn test_assign_from_slice() {
+ fn check(slice: &[BigDigit], data: &[BigDigit]) {
+ let mut p = BigUint::from_slice(&[2627_u32, 0_u32, 9182_u32, 42_u32]);
+ p.assign_from_slice(slice);
+ assert!(p.data == data);
+ }
+ check(&[1], &[1]);
+ check(&[0, 0, 0], &[]);
+ check(&[1, 2, 0, 0], &[1, 2]);
+ check(&[0, 0, 1, 2], &[0, 0, 1, 2]);
+ check(&[0, 0, 1, 2, 0, 0], &[0, 0, 1, 2]);
+ check(&[-1i32 as BigDigit], &[-1i32 as BigDigit]);
+}
+
+#[cfg(has_i128)]
+#[test]
+fn test_u32_u128() {
+ assert_eq!(u32_from_u128(0u128), (0, 0, 0, 0));
+ assert_eq!(
+ u32_from_u128(u128::max_value()),
+ (
+ u32::max_value(),
+ u32::max_value(),
+ u32::max_value(),
+ u32::max_value()
+ )
+ );
+
+ assert_eq!(
+ u32_from_u128(u32::max_value() as u128),
+ (0, 0, 0, u32::max_value())
+ );
+
+ assert_eq!(
+ u32_from_u128(u64::max_value() as u128),
+ (0, 0, u32::max_value(), u32::max_value())
+ );
+
+ assert_eq!(
+ u32_from_u128((u64::max_value() as u128) + u32::max_value() as u128),
+ (0, 1, 0, u32::max_value() - 1)
+ );
+
+ assert_eq!(u32_from_u128(36_893_488_151_714_070_528), (0, 2, 1, 0));
+}
+
+#[cfg(has_i128)]
+#[test]
+fn test_u128_u32_roundtrip() {
+ // roundtrips
+ let values = vec![
+ 0u128,
+ 1u128,
+ u64::max_value() as u128 * 3,
+ u32::max_value() as u128,
+ u64::max_value() as u128,
+ (u64::max_value() as u128) + u32::max_value() as u128,
+ u128::max_value(),
+ ];
+
+ for val in &values {
+ let (a, b, c, d) = u32_from_u128(*val);
+ assert_eq!(u32_to_u128(a, b, c, d), *val);
+ }
+}
+
+#[test]
+fn test_pow_biguint() {
+ let base = BigUint::from(5u8);
+ let exponent = BigUint::from(3u8);
+
+ assert_eq!(BigUint::from(125u8), base.pow(exponent));
+}
diff --git a/rust/vendor/num-bigint-0.2.6/src/lib.rs b/rust/vendor/num-bigint-0.2.6/src/lib.rs
new file mode 100644
index 0000000..837870a
--- /dev/null
+++ b/rust/vendor/num-bigint-0.2.6/src/lib.rs
@@ -0,0 +1,233 @@
+// Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+//! A Big integer (signed version: `BigInt`, unsigned version: `BigUint`).
+//!
+//! A `BigUint` is represented as a vector of `BigDigit`s.
+//! A `BigInt` is a combination of `BigUint` and `Sign`.
+//!
+//! Common numerical operations are overloaded, so we can treat them
+//! the same way we treat other numbers.
+//!
+//! ## Example
+//!
+//! ```rust
+//! extern crate num_bigint;
+//! extern crate num_traits;
+//!
+//! # fn main() {
+//! use num_bigint::BigUint;
+//! use num_traits::{Zero, One};
+//! use std::mem::replace;
+//!
+//! // Calculate large fibonacci numbers.
+//! fn fib(n: usize) -> BigUint {
+//! let mut f0: BigUint = Zero::zero();
+//! let mut f1: BigUint = One::one();
+//! for _ in 0..n {
+//! let f2 = f0 + &f1;
+//! // This is a low cost way of swapping f0 with f1 and f1 with f2.
+//! f0 = replace(&mut f1, f2);
+//! }
+//! f0
+//! }
+//!
+//! // This is a very large number.
+//! println!("fib(1000) = {}", fib(1000));
+//! # }
+//! ```
+//!
+//! It's easy to generate large random numbers:
+//!
+//! ```rust
+//! # #[cfg(feature = "rand")]
+//! extern crate rand;
+//! extern crate num_bigint as bigint;
+//!
+//! # #[cfg(feature = "rand")]
+//! # fn main() {
+//! use bigint::{ToBigInt, RandBigInt};
+//!
+//! let mut rng = rand::thread_rng();
+//! let a = rng.gen_bigint(1000);
+//!
+//! let low = -10000.to_bigint().unwrap();
+//! let high = 10000.to_bigint().unwrap();
+//! let b = rng.gen_bigint_range(&low, &high);
+//!
+//! // Probably an even larger number.
+//! println!("{}", a * b);
+//! # }
+//!
+//! # #[cfg(not(feature = "rand"))]
+//! # fn main() {
+//! # }
+//! ```
+//!
+//! See the "Features" section for instructions for enabling random number generation.
+//!
+//! ## Features
+//!
+//! The `std` crate feature is mandatory and enabled by default. If you depend on
+//! `num-bigint` with `default-features = false`, you must manually enable the
+//! `std` feature yourself. In the future, we hope to support `#![no_std]` with
+//! the `alloc` crate when `std` is not enabled.
+//!
+//! Implementations for `i128` and `u128` are only available with Rust 1.26 and
+//! later. The build script automatically detects this, but you can make it
+//! mandatory by enabling the `i128` crate feature.
+//!
+//! ### Random Generation
+//!
+//! `num-bigint` supports the generation of random big integers when the `rand`
+//! feature is enabled. To enable it include rand as
+//!
+//! ```toml
+//! rand = "0.5"
+//! num-bigint = { version = "0.2", features = ["rand"] }
+//! ```
+//!
+//! Note that you must use the version of `rand` that `num-bigint` is compatible
+//! with: `0.5`.
+//!
+//!
+//! ## Compatibility
+//!
+//! The `num-bigint` crate is tested for rustc 1.15 and greater.
+
+#![doc(html_root_url = "https://docs.rs/num-bigint/0.2")]
+// We don't actually support `no_std` yet, and probably won't until `alloc` is stable. We're just
+// reserving this ability with the "std" feature now, and compilation will fail without.
+#![cfg_attr(not(feature = "std"), no_std)]
+
+#[cfg(feature = "rand")]
+extern crate rand;
+#[cfg(feature = "serde")]
+extern crate serde;
+
+extern crate num_integer as integer;
+extern crate num_traits as traits;
+#[cfg(feature = "quickcheck")]
+extern crate quickcheck;
+
+use std::error::Error;
+use std::fmt;
+
+#[macro_use]
+mod macros;
+
+mod bigint;
+mod biguint;
+
+#[cfg(feature = "rand")]
+mod bigrand;
+
+#[cfg(target_pointer_width = "32")]
+type UsizePromotion = u32;
+#[cfg(target_pointer_width = "64")]
+type UsizePromotion = u64;
+
+#[cfg(target_pointer_width = "32")]
+type IsizePromotion = i32;
+#[cfg(target_pointer_width = "64")]
+type IsizePromotion = i64;
+
+#[derive(Debug, Clone, PartialEq, Eq)]
+pub struct ParseBigIntError {
+ kind: BigIntErrorKind,
+}
+
+#[derive(Debug, Clone, PartialEq, Eq)]
+enum BigIntErrorKind {
+ Empty,
+ InvalidDigit,
+}
+
+impl ParseBigIntError {
+ fn __description(&self) -> &str {
+ use BigIntErrorKind::*;
+ match self.kind {
+ Empty => "cannot parse integer from empty string",
+ InvalidDigit => "invalid digit found in string",
+ }
+ }
+
+ fn empty() -> Self {
+ ParseBigIntError {
+ kind: BigIntErrorKind::Empty,
+ }
+ }
+
+ fn invalid() -> Self {
+ ParseBigIntError {
+ kind: BigIntErrorKind::InvalidDigit,
+ }
+ }
+}
+
+impl fmt::Display for ParseBigIntError {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ self.__description().fmt(f)
+ }
+}
+
+impl Error for ParseBigIntError {
+ fn description(&self) -> &str {
+ self.__description()
+ }
+}
+
+pub use biguint::BigUint;
+pub use biguint::ToBigUint;
+
+pub use bigint::BigInt;
+pub use bigint::Sign;
+pub use bigint::ToBigInt;
+
+#[cfg(feature = "rand")]
+pub use bigrand::{RandBigInt, RandomBits, UniformBigInt, UniformBigUint};
+
+mod big_digit {
+ /// A `BigDigit` is a `BigUint`'s composing element.
+ pub type BigDigit = u32;
+
+ /// A `DoubleBigDigit` is the internal type used to do the computations. Its
+ /// size is the double of the size of `BigDigit`.
+ pub type DoubleBigDigit = u64;
+
+ /// A `SignedDoubleBigDigit` is the signed version of `DoubleBigDigit`.
+ pub type SignedDoubleBigDigit = i64;
+
+ // `DoubleBigDigit` size dependent
+ pub const BITS: usize = 32;
+
+ const LO_MASK: DoubleBigDigit = (-1i32 as DoubleBigDigit) >> BITS;
+
+ #[inline]
+ fn get_hi(n: DoubleBigDigit) -> BigDigit {
+ (n >> BITS) as BigDigit
+ }
+ #[inline]
+ fn get_lo(n: DoubleBigDigit) -> BigDigit {
+ (n & LO_MASK) as BigDigit
+ }
+
+ /// Split one `DoubleBigDigit` into two `BigDigit`s.
+ #[inline]
+ pub fn from_doublebigdigit(n: DoubleBigDigit) -> (BigDigit, BigDigit) {
+ (get_hi(n), get_lo(n))
+ }
+
+ /// Join two `BigDigit`s into one `DoubleBigDigit`
+ #[inline]
+ pub fn to_doublebigdigit(hi: BigDigit, lo: BigDigit) -> DoubleBigDigit {
+ DoubleBigDigit::from(lo) | (DoubleBigDigit::from(hi) << BITS)
+ }
+}
diff --git a/rust/vendor/num-bigint-0.2.6/src/macros.rs b/rust/vendor/num-bigint-0.2.6/src/macros.rs
new file mode 100644
index 0000000..735cfcb
--- /dev/null
+++ b/rust/vendor/num-bigint-0.2.6/src/macros.rs
@@ -0,0 +1,445 @@
+#![allow(unknown_lints)] // older rustc doesn't know `unused_macros`
+#![allow(unused_macros)]
+
+macro_rules! forward_val_val_binop {
+ (impl $imp:ident for $res:ty, $method:ident) => {
+ impl $imp<$res> for $res {
+ type Output = $res;
+
+ #[inline]
+ fn $method(self, other: $res) -> $res {
+ // forward to val-ref
+ $imp::$method(self, &other)
+ }
+ }
+ };
+}
+
+macro_rules! forward_val_val_binop_commutative {
+ (impl $imp:ident for $res:ty, $method:ident) => {
+ impl $imp<$res> for $res {
+ type Output = $res;
+
+ #[inline]
+ fn $method(self, other: $res) -> $res {
+ // forward to val-ref, with the larger capacity as val
+ if self.capacity() >= other.capacity() {
+ $imp::$method(self, &other)
+ } else {
+ $imp::$method(other, &self)
+ }
+ }
+ }
+ };
+}
+
+macro_rules! forward_ref_val_binop {
+ (impl $imp:ident for $res:ty, $method:ident) => {
+ impl<'a> $imp<$res> for &'a $res {
+ type Output = $res;
+
+ #[inline]
+ fn $method(self, other: $res) -> $res {
+ // forward to ref-ref
+ $imp::$method(self, &other)
+ }
+ }
+ };
+}
+
+macro_rules! forward_ref_val_binop_commutative {
+ (impl $imp:ident for $res:ty, $method:ident) => {
+ impl<'a> $imp<$res> for &'a $res {
+ type Output = $res;
+
+ #[inline]
+ fn $method(self, other: $res) -> $res {
+ // reverse, forward to val-ref
+ $imp::$method(other, self)
+ }
+ }
+ };
+}
+
+macro_rules! forward_val_ref_binop {
+ (impl $imp:ident for $res:ty, $method:ident) => {
+ impl<'a> $imp<&'a $res> for $res {
+ type Output = $res;
+
+ #[inline]
+ fn $method(self, other: &$res) -> $res {
+ // forward to ref-ref
+ $imp::$method(&self, other)
+ }
+ }
+ };
+}
+
+macro_rules! forward_ref_ref_binop {
+ (impl $imp:ident for $res:ty, $method:ident) => {
+ impl<'a, 'b> $imp<&'b $res> for &'a $res {
+ type Output = $res;
+
+ #[inline]
+ fn $method(self, other: &$res) -> $res {
+ // forward to val-ref
+ $imp::$method(self.clone(), other)
+ }
+ }
+ };
+}
+
+macro_rules! forward_ref_ref_binop_commutative {
+ (impl $imp:ident for $res:ty, $method:ident) => {
+ impl<'a, 'b> $imp<&'b $res> for &'a $res {
+ type Output = $res;
+
+ #[inline]
+ fn $method(self, other: &$res) -> $res {
+ // forward to val-ref, choosing the larger to clone
+ if self.len() >= other.len() {
+ $imp::$method(self.clone(), other)
+ } else {
+ $imp::$method(other.clone(), self)
+ }
+ }
+ }
+ };
+}
+
+macro_rules! forward_val_assign {
+ (impl $imp:ident for $res:ty, $method:ident) => {
+ impl $imp<$res> for $res {
+ #[inline]
+ fn $method(&mut self, other: $res) {
+ self.$method(&other);
+ }
+ }
+ };
+}
+
+macro_rules! forward_val_assign_scalar {
+ (impl $imp:ident for $res:ty, $scalar:ty, $method:ident) => {
+ impl $imp<$res> for $scalar {
+ #[inline]
+ fn $method(&mut self, other: $res) {
+ self.$method(&other);
+ }
+ }
+ };
+}
+
+/// use this if val_val_binop is already implemented and the reversed order is required
+macro_rules! forward_scalar_val_val_binop_commutative {
+ (impl $imp:ident < $scalar:ty > for $res:ty, $method:ident) => {
+ impl $imp<$res> for $scalar {
+ type Output = $res;
+
+ #[inline]
+ fn $method(self, other: $res) -> $res {
+ $imp::$method(other, self)
+ }
+ }
+ };
+}
+
+// Forward scalar to ref-val, when reusing storage is not helpful
+macro_rules! forward_scalar_val_val_binop_to_ref_val {
+ (impl $imp:ident<$scalar:ty> for $res:ty, $method:ident) => {
+ impl $imp<$scalar> for $res {
+ type Output = $res;
+
+ #[inline]
+ fn $method(self, other: $scalar) -> $res {
+ $imp::$method(&self, other)
+ }
+ }
+
+ impl $imp<$res> for $scalar {
+ type Output = $res;
+
+ #[inline]
+ fn $method(self, other: $res) -> $res {
+ $imp::$method(self, &other)
+ }
+ }
+ };
+}
+
+macro_rules! forward_scalar_ref_ref_binop_to_ref_val {
+ (impl $imp:ident<$scalar:ty> for $res:ty, $method:ident) => {
+ impl<'a, 'b> $imp<&'b $scalar> for &'a $res {
+ type Output = $res;
+
+ #[inline]
+ fn $method(self, other: &$scalar) -> $res {
+ $imp::$method(self, *other)
+ }
+ }
+
+ impl<'a, 'b> $imp<&'a $res> for &'b $scalar {
+ type Output = $res;
+
+ #[inline]
+ fn $method(self, other: &$res) -> $res {
+ $imp::$method(*self, other)
+ }
+ }
+ };
+}
+
+macro_rules! forward_scalar_val_ref_binop_to_ref_val {
+ (impl $imp:ident<$scalar:ty> for $res:ty, $method:ident) => {
+ impl<'a> $imp<&'a $scalar> for $res {
+ type Output = $res;
+
+ #[inline]
+ fn $method(self, other: &$scalar) -> $res {
+ $imp::$method(&self, *other)
+ }
+ }
+
+ impl<'a> $imp<$res> for &'a $scalar {
+ type Output = $res;
+
+ #[inline]
+ fn $method(self, other: $res) -> $res {
+ $imp::$method(*self, &other)
+ }
+ }
+ };
+}
+
+macro_rules! forward_scalar_val_ref_binop_to_val_val {
+ (impl $imp:ident<$scalar:ty> for $res:ty, $method:ident) => {
+ impl<'a> $imp<&'a $scalar> for $res {
+ type Output = $res;
+
+ #[inline]
+ fn $method(self, other: &$scalar) -> $res {
+ $imp::$method(self, *other)
+ }
+ }
+
+ impl<'a> $imp<$res> for &'a $scalar {
+ type Output = $res;
+
+ #[inline]
+ fn $method(self, other: $res) -> $res {
+ $imp::$method(*self, other)
+ }
+ }
+ };
+}
+
+macro_rules! forward_scalar_ref_val_binop_to_val_val {
+ (impl $imp:ident < $scalar:ty > for $res:ty, $method:ident) => {
+ impl<'a> $imp<$scalar> for &'a $res {
+ type Output = $res;
+
+ #[inline]
+ fn $method(self, other: $scalar) -> $res {
+ $imp::$method(self.clone(), other)
+ }
+ }
+
+ impl<'a> $imp<&'a $res> for $scalar {
+ type Output = $res;
+
+ #[inline]
+ fn $method(self, other: &$res) -> $res {
+ $imp::$method(self, other.clone())
+ }
+ }
+ };
+}
+
+macro_rules! forward_scalar_ref_ref_binop_to_val_val {
+ (impl $imp:ident<$scalar:ty> for $res:ty, $method:ident) => {
+ impl<'a, 'b> $imp<&'b $scalar> for &'a $res {
+ type Output = $res;
+
+ #[inline]
+ fn $method(self, other: &$scalar) -> $res {
+ $imp::$method(self.clone(), *other)
+ }
+ }
+
+ impl<'a, 'b> $imp<&'a $res> for &'b $scalar {
+ type Output = $res;
+
+ #[inline]
+ fn $method(self, other: &$res) -> $res {
+ $imp::$method(*self, other.clone())
+ }
+ }
+ };
+}
+
+macro_rules! promote_scalars {
+ (impl $imp:ident<$promo:ty> for $res:ty, $method:ident, $( $scalar:ty ),*) => {
+ $(
+ forward_all_scalar_binop_to_val_val!(impl $imp<$scalar> for $res, $method);
+
+ impl $imp<$scalar> for $res {
+ type Output = $res;
+
+ #[cfg_attr(feature = "cargo-clippy", allow(renamed_and_removed_lints))]
+ #[cfg_attr(feature = "cargo-clippy", allow(cast_lossless))]
+ #[inline]
+ fn $method(self, other: $scalar) -> $res {
+ $imp::$method(self, other as $promo)
+ }
+ }
+
+ impl $imp<$res> for $scalar {
+ type Output = $res;
+
+ #[cfg_attr(feature = "cargo-clippy", allow(renamed_and_removed_lints))]
+ #[cfg_attr(feature = "cargo-clippy", allow(cast_lossless))]
+ #[inline]
+ fn $method(self, other: $res) -> $res {
+ $imp::$method(self as $promo, other)
+ }
+ }
+ )*
+ }
+}
+macro_rules! promote_scalars_assign {
+ (impl $imp:ident<$promo:ty> for $res:ty, $method:ident, $( $scalar:ty ),*) => {
+ $(
+ impl $imp<$scalar> for $res {
+ #[cfg_attr(feature = "cargo-clippy", allow(renamed_and_removed_lints))]
+ #[cfg_attr(feature = "cargo-clippy", allow(cast_lossless))]
+ #[inline]
+ fn $method(&mut self, other: $scalar) {
+ self.$method(other as $promo);
+ }
+ }
+ )*
+ }
+}
+
+macro_rules! promote_unsigned_scalars {
+ (impl $imp:ident for $res:ty, $method:ident) => {
+ promote_scalars!(impl $imp<u32> for $res, $method, u8, u16);
+ promote_scalars!(impl $imp<UsizePromotion> for $res, $method, usize);
+ }
+}
+
+macro_rules! promote_unsigned_scalars_assign {
+ (impl $imp:ident for $res:ty, $method:ident) => {
+ promote_scalars_assign!(impl $imp<u32> for $res, $method, u8, u16);
+ promote_scalars_assign!(impl $imp<UsizePromotion> for $res, $method, usize);
+ }
+}
+
+macro_rules! promote_signed_scalars {
+ (impl $imp:ident for $res:ty, $method:ident) => {
+ promote_scalars!(impl $imp<i32> for $res, $method, i8, i16);
+ promote_scalars!(impl $imp<IsizePromotion> for $res, $method, isize);
+ }
+}
+
+macro_rules! promote_signed_scalars_assign {
+ (impl $imp:ident for $res:ty, $method:ident) => {
+ promote_scalars_assign!(impl $imp<i32> for $res, $method, i8, i16);
+ promote_scalars_assign!(impl $imp<IsizePromotion> for $res, $method, isize);
+ }
+}
+
+// Forward everything to ref-ref, when reusing storage is not helpful
+macro_rules! forward_all_binop_to_ref_ref {
+ (impl $imp:ident for $res:ty, $method:ident) => {
+ forward_val_val_binop!(impl $imp for $res, $method);
+ forward_val_ref_binop!(impl $imp for $res, $method);
+ forward_ref_val_binop!(impl $imp for $res, $method);
+ };
+}
+
+// Forward everything to val-ref, so LHS storage can be reused
+macro_rules! forward_all_binop_to_val_ref {
+ (impl $imp:ident for $res:ty, $method:ident) => {
+ forward_val_val_binop!(impl $imp for $res, $method);
+ forward_ref_val_binop!(impl $imp for $res, $method);
+ forward_ref_ref_binop!(impl $imp for $res, $method);
+ };
+}
+
+// Forward everything to val-ref, commutatively, so either LHS or RHS storage can be reused
+macro_rules! forward_all_binop_to_val_ref_commutative {
+ (impl $imp:ident for $res:ty, $method:ident) => {
+ forward_val_val_binop_commutative!(impl $imp for $res, $method);
+ forward_ref_val_binop_commutative!(impl $imp for $res, $method);
+ forward_ref_ref_binop_commutative!(impl $imp for $res, $method);
+ };
+}
+
+macro_rules! forward_all_scalar_binop_to_ref_val {
+ (impl $imp:ident<$scalar:ty> for $res:ty, $method:ident) => {
+ forward_scalar_val_val_binop_to_ref_val!(impl $imp<$scalar> for $res, $method);
+ forward_scalar_val_ref_binop_to_ref_val!(impl $imp<$scalar> for $res, $method);
+ forward_scalar_ref_ref_binop_to_ref_val!(impl $imp<$scalar> for $res, $method);
+ }
+}
+
+macro_rules! forward_all_scalar_binop_to_val_val {
+ (impl $imp:ident<$scalar:ty> for $res:ty, $method:ident) => {
+ forward_scalar_val_ref_binop_to_val_val!(impl $imp<$scalar> for $res, $method);
+ forward_scalar_ref_val_binop_to_val_val!(impl $imp<$scalar> for $res, $method);
+ forward_scalar_ref_ref_binop_to_val_val!(impl $imp<$scalar> for $res, $method);
+ }
+}
+
+macro_rules! forward_all_scalar_binop_to_val_val_commutative {
+ (impl $imp:ident<$scalar:ty> for $res:ty, $method:ident) => {
+ forward_scalar_val_val_binop_commutative!(impl $imp<$scalar> for $res, $method);
+ forward_all_scalar_binop_to_val_val!(impl $imp<$scalar> for $res, $method);
+ }
+}
+
+macro_rules! promote_all_scalars {
+ (impl $imp:ident for $res:ty, $method:ident) => {
+ promote_unsigned_scalars!(impl $imp for $res, $method);
+ promote_signed_scalars!(impl $imp for $res, $method);
+ }
+}
+
+macro_rules! promote_all_scalars_assign {
+ (impl $imp:ident for $res:ty, $method:ident) => {
+ promote_unsigned_scalars_assign!(impl $imp for $res, $method);
+ promote_signed_scalars_assign!(impl $imp for $res, $method);
+ }
+}
+
+macro_rules! impl_sum_iter_type {
+ ($res:ty) => {
+ impl<T> Sum<T> for $res
+ where
+ $res: Add<T, Output = $res>,
+ {
+ fn sum<I>(iter: I) -> Self
+ where
+ I: Iterator<Item = T>,
+ {
+ iter.fold(Zero::zero(), <$res>::add)
+ }
+ }
+ };
+}
+
+macro_rules! impl_product_iter_type {
+ ($res:ty) => {
+ impl<T> Product<T> for $res
+ where
+ $res: Mul<T, Output = $res>,
+ {
+ fn product<I>(iter: I) -> Self
+ where
+ I: Iterator<Item = T>,
+ {
+ iter.fold(One::one(), <$res>::mul)
+ }
+ }
+ };
+}
diff --git a/rust/vendor/num-bigint-0.2.6/src/monty.rs b/rust/vendor/num-bigint-0.2.6/src/monty.rs
new file mode 100644
index 0000000..62f59b3
--- /dev/null
+++ b/rust/vendor/num-bigint-0.2.6/src/monty.rs
@@ -0,0 +1,129 @@
+use integer::Integer;
+use traits::Zero;
+
+use biguint::BigUint;
+
+struct MontyReducer<'a> {
+ n: &'a BigUint,
+ n0inv: u32,
+}
+
+// Calculate the modular inverse of `num`, using Extended GCD.
+//
+// Reference:
+// Brent & Zimmermann, Modern Computer Arithmetic, v0.5.9, Algorithm 1.20
+fn inv_mod_u32(num: u32) -> u32 {
+ // num needs to be relatively prime to 2**32 -- i.e. it must be odd.
+ assert!(num % 2 != 0);
+
+ let mut a: i64 = i64::from(num);
+ let mut b: i64 = i64::from(u32::max_value()) + 1;
+
+ // ExtendedGcd
+ // Input: positive integers a and b
+ // Output: integers (g, u, v) such that g = gcd(a, b) = ua + vb
+ // As we don't need v for modular inverse, we don't calculate it.
+
+ // 1: (u, w) <- (1, 0)
+ let mut u = 1;
+ let mut w = 0;
+ // 3: while b != 0
+ while b != 0 {
+ // 4: (q, r) <- DivRem(a, b)
+ let q = a / b;
+ let r = a % b;
+ // 5: (a, b) <- (b, r)
+ a = b;
+ b = r;
+ // 6: (u, w) <- (w, u - qw)
+ let m = u - w * q;
+ u = w;
+ w = m;
+ }
+
+ assert!(a == 1);
+ // Downcasting acts like a mod 2^32 too.
+ u as u32
+}
+
+impl<'a> MontyReducer<'a> {
+ fn new(n: &'a BigUint) -> Self {
+ let n0inv = inv_mod_u32(n.data[0]);
+ MontyReducer { n: n, n0inv: n0inv }
+ }
+}
+
+// Montgomery Reduction
+//
+// Reference:
+// Brent & Zimmermann, Modern Computer Arithmetic, v0.5.9, Algorithm 2.6
+fn monty_redc(a: BigUint, mr: &MontyReducer) -> BigUint {
+ let mut c = a.data;
+ let n = &mr.n.data;
+ let n_size = n.len();
+
+ // Allocate sufficient work space
+ c.resize(2 * n_size + 2, 0);
+
+ // β is the size of a word, in this case 32 bits. So "a mod β" is
+ // equivalent to masking a to 32 bits.
+ // mu <- -N^(-1) mod β
+ let mu = 0u32.wrapping_sub(mr.n0inv);
+
+ // 1: for i = 0 to (n-1)
+ for i in 0..n_size {
+ // 2: q_i <- mu*c_i mod β
+ let q_i = c[i].wrapping_mul(mu);
+
+ // 3: C <- C + q_i * N * β^i
+ super::algorithms::mac_digit(&mut c[i..], n, q_i);
+ }
+
+ // 4: R <- C * β^(-n)
+ // This is an n-word bitshift, equivalent to skipping n words.
+ let ret = BigUint::new(c[n_size..].to_vec());
+
+ // 5: if R >= β^n then return R-N else return R.
+ if ret < *mr.n {
+ ret
+ } else {
+ ret - mr.n
+ }
+}
+
+// Montgomery Multiplication
+fn monty_mult(a: BigUint, b: &BigUint, mr: &MontyReducer) -> BigUint {
+ monty_redc(a * b, mr)
+}
+
+// Montgomery Squaring
+fn monty_sqr(a: BigUint, mr: &MontyReducer) -> BigUint {
+ // TODO: Replace with an optimised squaring function
+ monty_redc(&a * &a, mr)
+}
+
+pub fn monty_modpow(a: &BigUint, exp: &BigUint, modulus: &BigUint) -> BigUint {
+ let mr = MontyReducer::new(modulus);
+
+ // Calculate the Montgomery parameter
+ let mut v = vec![0; modulus.data.len()];
+ v.push(1);
+ let r = BigUint::new(v);
+
+ // Map the base to the Montgomery domain
+ let mut apri = a * &r % modulus;
+
+ // Binary exponentiation
+ let mut ans = &r % modulus;
+ let mut e = exp.clone();
+ while !e.is_zero() {
+ if e.is_odd() {
+ ans = monty_mult(ans, &apri, &mr);
+ }
+ apri = monty_sqr(apri, &mr);
+ e >>= 1;
+ }
+
+ // Map the result back to the residues domain
+ monty_redc(ans, &mr)
+}
diff --git a/rust/vendor/num-bigint-0.2.6/tests/bigint.rs b/rust/vendor/num-bigint-0.2.6/tests/bigint.rs
new file mode 100644
index 0000000..911bff0
--- /dev/null
+++ b/rust/vendor/num-bigint-0.2.6/tests/bigint.rs
@@ -0,0 +1,1193 @@
+extern crate num_bigint;
+extern crate num_integer;
+extern crate num_traits;
+#[cfg(feature = "rand")]
+extern crate rand;
+
+use num_bigint::BigUint;
+use num_bigint::Sign::{Minus, NoSign, Plus};
+use num_bigint::{BigInt, ToBigInt};
+
+use std::cmp::Ordering::{Equal, Greater, Less};
+use std::collections::hash_map::RandomState;
+use std::hash::{BuildHasher, Hash, Hasher};
+use std::iter::repeat;
+use std::ops::Neg;
+use std::{f32, f64};
+#[cfg(has_i128)]
+use std::{i128, u128};
+use std::{i16, i32, i64, i8, isize};
+use std::{u16, u32, u64, u8, usize};
+
+use num_integer::Integer;
+use num_traits::{Float, FromPrimitive, Num, One, Pow, Signed, ToPrimitive, Zero};
+
+mod consts;
+use consts::*;
+
+#[macro_use]
+mod macros;
+
+#[test]
+fn test_from_bytes_be() {
+ fn check(s: &str, result: &str) {
+ assert_eq!(
+ BigInt::from_bytes_be(Plus, s.as_bytes()),
+ BigInt::parse_bytes(result.as_bytes(), 10).unwrap()
+ );
+ }
+ check("A", "65");
+ check("AA", "16705");
+ check("AB", "16706");
+ check("Hello world!", "22405534230753963835153736737");
+ assert_eq!(BigInt::from_bytes_be(Plus, &[]), Zero::zero());
+ assert_eq!(BigInt::from_bytes_be(Minus, &[]), Zero::zero());
+}
+
+#[test]
+fn test_to_bytes_be() {
+ fn check(s: &str, result: &str) {
+ let b = BigInt::parse_bytes(result.as_bytes(), 10).unwrap();
+ let (sign, v) = b.to_bytes_be();
+ assert_eq!((Plus, s.as_bytes()), (sign, &*v));
+ }
+ check("A", "65");
+ check("AA", "16705");
+ check("AB", "16706");
+ check("Hello world!", "22405534230753963835153736737");
+ let b: BigInt = Zero::zero();
+ assert_eq!(b.to_bytes_be(), (NoSign, vec![0]));
+
+ // Test with leading/trailing zero bytes and a full BigDigit of value 0
+ let b = BigInt::from_str_radix("00010000000000000200", 16).unwrap();
+ assert_eq!(b.to_bytes_be(), (Plus, vec![1, 0, 0, 0, 0, 0, 0, 2, 0]));
+}
+
+#[test]
+fn test_from_bytes_le() {
+ fn check(s: &str, result: &str) {
+ assert_eq!(
+ BigInt::from_bytes_le(Plus, s.as_bytes()),
+ BigInt::parse_bytes(result.as_bytes(), 10).unwrap()
+ );
+ }
+ check("A", "65");
+ check("AA", "16705");
+ check("BA", "16706");
+ check("!dlrow olleH", "22405534230753963835153736737");
+ assert_eq!(BigInt::from_bytes_le(Plus, &[]), Zero::zero());
+ assert_eq!(BigInt::from_bytes_le(Minus, &[]), Zero::zero());
+}
+
+#[test]
+fn test_to_bytes_le() {
+ fn check(s: &str, result: &str) {
+ let b = BigInt::parse_bytes(result.as_bytes(), 10).unwrap();
+ let (sign, v) = b.to_bytes_le();
+ assert_eq!((Plus, s.as_bytes()), (sign, &*v));
+ }
+ check("A", "65");
+ check("AA", "16705");
+ check("BA", "16706");
+ check("!dlrow olleH", "22405534230753963835153736737");
+ let b: BigInt = Zero::zero();
+ assert_eq!(b.to_bytes_le(), (NoSign, vec![0]));
+
+ // Test with leading/trailing zero bytes and a full BigDigit of value 0
+ let b = BigInt::from_str_radix("00010000000000000200", 16).unwrap();
+ assert_eq!(b.to_bytes_le(), (Plus, vec![0, 2, 0, 0, 0, 0, 0, 0, 1]));
+}
+
+#[test]
+fn test_to_signed_bytes_le() {
+ fn check(s: &str, result: Vec<u8>) {
+ assert_eq!(
+ BigInt::parse_bytes(s.as_bytes(), 10)
+ .unwrap()
+ .to_signed_bytes_le(),
+ result
+ );
+ }
+
+ check("0", vec![0]);
+ check("32767", vec![0xff, 0x7f]);
+ check("-1", vec![0xff]);
+ check("16777216", vec![0, 0, 0, 1]);
+ check("-100", vec![156]);
+ check("-8388608", vec![0, 0, 0x80]);
+ check("-192", vec![0x40, 0xff]);
+ check("128", vec![0x80, 0])
+}
+
+#[test]
+fn test_from_signed_bytes_le() {
+ fn check(s: &[u8], result: &str) {
+ assert_eq!(
+ BigInt::from_signed_bytes_le(s),
+ BigInt::parse_bytes(result.as_bytes(), 10).unwrap()
+ );
+ }
+
+ check(&[], "0");
+ check(&[0], "0");
+ check(&[0; 10], "0");
+ check(&[0xff, 0x7f], "32767");
+ check(&[0xff], "-1");
+ check(&[0, 0, 0, 1], "16777216");
+ check(&[156], "-100");
+ check(&[0, 0, 0x80], "-8388608");
+ check(&[0xff; 10], "-1");
+ check(&[0x40, 0xff], "-192");
+}
+
+#[test]
+fn test_to_signed_bytes_be() {
+ fn check(s: &str, result: Vec<u8>) {
+ assert_eq!(
+ BigInt::parse_bytes(s.as_bytes(), 10)
+ .unwrap()
+ .to_signed_bytes_be(),
+ result
+ );
+ }
+
+ check("0", vec![0]);
+ check("32767", vec![0x7f, 0xff]);
+ check("-1", vec![255]);
+ check("16777216", vec![1, 0, 0, 0]);
+ check("-100", vec![156]);
+ check("-8388608", vec![128, 0, 0]);
+ check("-192", vec![0xff, 0x40]);
+ check("128", vec![0, 0x80]);
+}
+
+#[test]
+fn test_from_signed_bytes_be() {
+ fn check(s: &[u8], result: &str) {
+ assert_eq!(
+ BigInt::from_signed_bytes_be(s),
+ BigInt::parse_bytes(result.as_bytes(), 10).unwrap()
+ );
+ }
+
+ check(&[], "0");
+ check(&[0], "0");
+ check(&[0; 10], "0");
+ check(&[127, 255], "32767");
+ check(&[255], "-1");
+ check(&[1, 0, 0, 0], "16777216");
+ check(&[156], "-100");
+ check(&[128, 0, 0], "-8388608");
+ check(&[255; 10], "-1");
+ check(&[0xff, 0x40], "-192");
+}
+
+#[test]
+fn test_signed_bytes_be_round_trip() {
+ for i in -0x1FFFF..0x20000 {
+ let n = BigInt::from(i);
+ assert_eq!(n, BigInt::from_signed_bytes_be(&n.to_signed_bytes_be()));
+ }
+}
+
+#[test]
+fn test_signed_bytes_le_round_trip() {
+ for i in -0x1FFFF..0x20000 {
+ let n = BigInt::from(i);
+ assert_eq!(n, BigInt::from_signed_bytes_le(&n.to_signed_bytes_le()));
+ }
+}
+
+#[test]
+fn test_cmp() {
+ let vs: [&[u32]; 4] = [&[2 as u32], &[1, 1], &[2, 1], &[1, 1, 1]];
+ let mut nums = Vec::new();
+ for s in vs.iter().rev() {
+ nums.push(BigInt::from_slice(Minus, *s));
+ }
+ nums.push(Zero::zero());
+ nums.extend(vs.iter().map(|s| BigInt::from_slice(Plus, *s)));
+
+ for (i, ni) in nums.iter().enumerate() {
+ for (j0, nj) in nums[i..].iter().enumerate() {
+ let j = i + j0;
+ if i == j {
+ assert_eq!(ni.cmp(nj), Equal);
+ assert_eq!(nj.cmp(ni), Equal);
+ assert_eq!(ni, nj);
+ assert!(!(ni != nj));
+ assert!(ni <= nj);
+ assert!(ni >= nj);
+ assert!(!(ni < nj));
+ assert!(!(ni > nj));
+ } else {
+ assert_eq!(ni.cmp(nj), Less);
+ assert_eq!(nj.cmp(ni), Greater);
+
+ assert!(!(ni == nj));
+ assert!(ni != nj);
+
+ assert!(ni <= nj);
+ assert!(!(ni >= nj));
+ assert!(ni < nj);
+ assert!(!(ni > nj));
+
+ assert!(!(nj <= ni));
+ assert!(nj >= ni);
+ assert!(!(nj < ni));
+ assert!(nj > ni);
+ }
+ }
+ }
+}
+
+fn hash<T: Hash>(x: &T) -> u64 {
+ let mut hasher = <RandomState as BuildHasher>::Hasher::new();
+ x.hash(&mut hasher);
+ hasher.finish()
+}
+
+#[test]
+fn test_hash() {
+ let a = BigInt::new(NoSign, vec![]);
+ let b = BigInt::new(NoSign, vec![0]);
+ let c = BigInt::new(Plus, vec![1]);
+ let d = BigInt::new(Plus, vec![1, 0, 0, 0, 0, 0]);
+ let e = BigInt::new(Plus, vec![0, 0, 0, 0, 0, 1]);
+ let f = BigInt::new(Minus, vec![1]);
+ assert!(hash(&a) == hash(&b));
+ assert!(hash(&b) != hash(&c));
+ assert!(hash(&c) == hash(&d));
+ assert!(hash(&d) != hash(&e));
+ assert!(hash(&c) != hash(&f));
+}
+
+#[test]
+fn test_convert_i64() {
+ fn check(b1: BigInt, i: i64) {
+ let b2: BigInt = FromPrimitive::from_i64(i).unwrap();
+ assert!(b1 == b2);
+ assert!(b1.to_i64().unwrap() == i);
+ }
+
+ check(Zero::zero(), 0);
+ check(One::one(), 1);
+ check(i64::MIN.to_bigint().unwrap(), i64::MIN);
+ check(i64::MAX.to_bigint().unwrap(), i64::MAX);
+
+ assert_eq!((i64::MAX as u64 + 1).to_bigint().unwrap().to_i64(), None);
+
+ assert_eq!(
+ BigInt::from_biguint(Plus, BigUint::new(vec![1, 2, 3, 4, 5])).to_i64(),
+ None
+ );
+
+ assert_eq!(
+ BigInt::from_biguint(Minus, BigUint::new(vec![1, 0, 0, 1 << 31])).to_i64(),
+ None
+ );
+
+ assert_eq!(
+ BigInt::from_biguint(Minus, BigUint::new(vec![1, 2, 3, 4, 5])).to_i64(),
+ None
+ );
+}
+
+#[test]
+#[cfg(has_i128)]
+fn test_convert_i128() {
+ fn check(b1: BigInt, i: i128) {
+ let b2: BigInt = FromPrimitive::from_i128(i).unwrap();
+ assert!(b1 == b2);
+ assert!(b1.to_i128().unwrap() == i);
+ }
+
+ check(Zero::zero(), 0);
+ check(One::one(), 1);
+ check(i128::MIN.to_bigint().unwrap(), i128::MIN);
+ check(i128::MAX.to_bigint().unwrap(), i128::MAX);
+
+ assert_eq!((i128::MAX as u128 + 1).to_bigint().unwrap().to_i128(), None);
+
+ assert_eq!(
+ BigInt::from_biguint(Plus, BigUint::new(vec![1, 2, 3, 4, 5])).to_i128(),
+ None
+ );
+
+ assert_eq!(
+ BigInt::from_biguint(Minus, BigUint::new(vec![1, 0, 0, 1 << 31])).to_i128(),
+ None
+ );
+
+ assert_eq!(
+ BigInt::from_biguint(Minus, BigUint::new(vec![1, 2, 3, 4, 5])).to_i128(),
+ None
+ );
+}
+
+#[test]
+fn test_convert_u64() {
+ fn check(b1: BigInt, u: u64) {
+ let b2: BigInt = FromPrimitive::from_u64(u).unwrap();
+ assert!(b1 == b2);
+ assert!(b1.to_u64().unwrap() == u);
+ }
+
+ check(Zero::zero(), 0);
+ check(One::one(), 1);
+ check(u64::MIN.to_bigint().unwrap(), u64::MIN);
+ check(u64::MAX.to_bigint().unwrap(), u64::MAX);
+
+ assert_eq!(
+ BigInt::from_biguint(Plus, BigUint::new(vec![1, 2, 3, 4, 5])).to_u64(),
+ None
+ );
+
+ let max_value: BigUint = FromPrimitive::from_u64(u64::MAX).unwrap();
+ assert_eq!(BigInt::from_biguint(Minus, max_value).to_u64(), None);
+ assert_eq!(
+ BigInt::from_biguint(Minus, BigUint::new(vec![1, 2, 3, 4, 5])).to_u64(),
+ None
+ );
+}
+
+#[test]
+#[cfg(has_i128)]
+fn test_convert_u128() {
+ fn check(b1: BigInt, u: u128) {
+ let b2: BigInt = FromPrimitive::from_u128(u).unwrap();
+ assert!(b1 == b2);
+ assert!(b1.to_u128().unwrap() == u);
+ }
+
+ check(Zero::zero(), 0);
+ check(One::one(), 1);
+ check(u128::MIN.to_bigint().unwrap(), u128::MIN);
+ check(u128::MAX.to_bigint().unwrap(), u128::MAX);
+
+ assert_eq!(
+ BigInt::from_biguint(Plus, BigUint::new(vec![1, 2, 3, 4, 5])).to_u128(),
+ None
+ );
+
+ let max_value: BigUint = FromPrimitive::from_u128(u128::MAX).unwrap();
+ assert_eq!(BigInt::from_biguint(Minus, max_value).to_u128(), None);
+ assert_eq!(
+ BigInt::from_biguint(Minus, BigUint::new(vec![1, 2, 3, 4, 5])).to_u128(),
+ None
+ );
+}
+
+#[test]
+fn test_convert_f32() {
+ fn check(b1: &BigInt, f: f32) {
+ let b2 = BigInt::from_f32(f).unwrap();
+ assert_eq!(b1, &b2);
+ assert_eq!(b1.to_f32().unwrap(), f);
+ let neg_b1 = -b1;
+ let neg_b2 = BigInt::from_f32(-f).unwrap();
+ assert_eq!(neg_b1, neg_b2);
+ assert_eq!(neg_b1.to_f32().unwrap(), -f);
+ }
+
+ check(&BigInt::zero(), 0.0);
+ check(&BigInt::one(), 1.0);
+ check(&BigInt::from(u16::MAX), 2.0.powi(16) - 1.0);
+ check(&BigInt::from(1u64 << 32), 2.0.powi(32));
+ check(&BigInt::from_slice(Plus, &[0, 0, 1]), 2.0.powi(64));
+ check(
+ &((BigInt::one() << 100) + (BigInt::one() << 123)),
+ 2.0.powi(100) + 2.0.powi(123),
+ );
+ check(&(BigInt::one() << 127), 2.0.powi(127));
+ check(&(BigInt::from((1u64 << 24) - 1) << (128 - 24)), f32::MAX);
+
+ // keeping all 24 digits with the bits at different offsets to the BigDigits
+ let x: u32 = 0b00000000101111011111011011011101;
+ let mut f = x as f32;
+ let mut b = BigInt::from(x);
+ for _ in 0..64 {
+ check(&b, f);
+ f *= 2.0;
+ b = b << 1;
+ }
+
+ // this number when rounded to f64 then f32 isn't the same as when rounded straight to f32
+ let mut n: i64 = 0b0000000000111111111111111111111111011111111111111111111111111111;
+ assert!((n as f64) as f32 != n as f32);
+ assert_eq!(BigInt::from(n).to_f32(), Some(n as f32));
+ n = -n;
+ assert!((n as f64) as f32 != n as f32);
+ assert_eq!(BigInt::from(n).to_f32(), Some(n as f32));
+
+ // test rounding up with the bits at different offsets to the BigDigits
+ let mut f = ((1u64 << 25) - 1) as f32;
+ let mut b = BigInt::from(1u64 << 25);
+ for _ in 0..64 {
+ assert_eq!(b.to_f32(), Some(f));
+ f *= 2.0;
+ b = b << 1;
+ }
+
+ // rounding
+ assert_eq!(
+ BigInt::from_f32(-f32::consts::PI),
+ Some(BigInt::from(-3i32))
+ );
+ assert_eq!(BigInt::from_f32(-f32::consts::E), Some(BigInt::from(-2i32)));
+ assert_eq!(BigInt::from_f32(-0.99999), Some(BigInt::zero()));
+ assert_eq!(BigInt::from_f32(-0.5), Some(BigInt::zero()));
+ assert_eq!(BigInt::from_f32(-0.0), Some(BigInt::zero()));
+ assert_eq!(
+ BigInt::from_f32(f32::MIN_POSITIVE / 2.0),
+ Some(BigInt::zero())
+ );
+ assert_eq!(BigInt::from_f32(f32::MIN_POSITIVE), Some(BigInt::zero()));
+ assert_eq!(BigInt::from_f32(0.5), Some(BigInt::zero()));
+ assert_eq!(BigInt::from_f32(0.99999), Some(BigInt::zero()));
+ assert_eq!(BigInt::from_f32(f32::consts::E), Some(BigInt::from(2u32)));
+ assert_eq!(BigInt::from_f32(f32::consts::PI), Some(BigInt::from(3u32)));
+
+ // special float values
+ assert_eq!(BigInt::from_f32(f32::NAN), None);
+ assert_eq!(BigInt::from_f32(f32::INFINITY), None);
+ assert_eq!(BigInt::from_f32(f32::NEG_INFINITY), None);
+
+ // largest BigInt that will round to a finite f32 value
+ let big_num = (BigInt::one() << 128) - BigInt::one() - (BigInt::one() << (128 - 25));
+ assert_eq!(big_num.to_f32(), Some(f32::MAX));
+ assert_eq!((&big_num + BigInt::one()).to_f32(), None);
+ assert_eq!((-&big_num).to_f32(), Some(f32::MIN));
+ assert_eq!(((-&big_num) - BigInt::one()).to_f32(), None);
+
+ assert_eq!(((BigInt::one() << 128) - BigInt::one()).to_f32(), None);
+ assert_eq!((BigInt::one() << 128).to_f32(), None);
+ assert_eq!((-((BigInt::one() << 128) - BigInt::one())).to_f32(), None);
+ assert_eq!((-(BigInt::one() << 128)).to_f32(), None);
+}
+
+#[test]
+fn test_convert_f64() {
+ fn check(b1: &BigInt, f: f64) {
+ let b2 = BigInt::from_f64(f).unwrap();
+ assert_eq!(b1, &b2);
+ assert_eq!(b1.to_f64().unwrap(), f);
+ let neg_b1 = -b1;
+ let neg_b2 = BigInt::from_f64(-f).unwrap();
+ assert_eq!(neg_b1, neg_b2);
+ assert_eq!(neg_b1.to_f64().unwrap(), -f);
+ }
+
+ check(&BigInt::zero(), 0.0);
+ check(&BigInt::one(), 1.0);
+ check(&BigInt::from(u32::MAX), 2.0.powi(32) - 1.0);
+ check(&BigInt::from(1u64 << 32), 2.0.powi(32));
+ check(&BigInt::from_slice(Plus, &[0, 0, 1]), 2.0.powi(64));
+ check(
+ &((BigInt::one() << 100) + (BigInt::one() << 152)),
+ 2.0.powi(100) + 2.0.powi(152),
+ );
+ check(&(BigInt::one() << 1023), 2.0.powi(1023));
+ check(&(BigInt::from((1u64 << 53) - 1) << (1024 - 53)), f64::MAX);
+
+ // keeping all 53 digits with the bits at different offsets to the BigDigits
+ let x: u64 = 0b0000000000011110111110110111111101110111101111011111011011011101;
+ let mut f = x as f64;
+ let mut b = BigInt::from(x);
+ for _ in 0..128 {
+ check(&b, f);
+ f *= 2.0;
+ b = b << 1;
+ }
+
+ // test rounding up with the bits at different offsets to the BigDigits
+ let mut f = ((1u64 << 54) - 1) as f64;
+ let mut b = BigInt::from(1u64 << 54);
+ for _ in 0..128 {
+ assert_eq!(b.to_f64(), Some(f));
+ f *= 2.0;
+ b = b << 1;
+ }
+
+ // rounding
+ assert_eq!(
+ BigInt::from_f64(-f64::consts::PI),
+ Some(BigInt::from(-3i32))
+ );
+ assert_eq!(BigInt::from_f64(-f64::consts::E), Some(BigInt::from(-2i32)));
+ assert_eq!(BigInt::from_f64(-0.99999), Some(BigInt::zero()));
+ assert_eq!(BigInt::from_f64(-0.5), Some(BigInt::zero()));
+ assert_eq!(BigInt::from_f64(-0.0), Some(BigInt::zero()));
+ assert_eq!(
+ BigInt::from_f64(f64::MIN_POSITIVE / 2.0),
+ Some(BigInt::zero())
+ );
+ assert_eq!(BigInt::from_f64(f64::MIN_POSITIVE), Some(BigInt::zero()));
+ assert_eq!(BigInt::from_f64(0.5), Some(BigInt::zero()));
+ assert_eq!(BigInt::from_f64(0.99999), Some(BigInt::zero()));
+ assert_eq!(BigInt::from_f64(f64::consts::E), Some(BigInt::from(2u32)));
+ assert_eq!(BigInt::from_f64(f64::consts::PI), Some(BigInt::from(3u32)));
+
+ // special float values
+ assert_eq!(BigInt::from_f64(f64::NAN), None);
+ assert_eq!(BigInt::from_f64(f64::INFINITY), None);
+ assert_eq!(BigInt::from_f64(f64::NEG_INFINITY), None);
+
+ // largest BigInt that will round to a finite f64 value
+ let big_num = (BigInt::one() << 1024) - BigInt::one() - (BigInt::one() << (1024 - 54));
+ assert_eq!(big_num.to_f64(), Some(f64::MAX));
+ assert_eq!((&big_num + BigInt::one()).to_f64(), None);
+ assert_eq!((-&big_num).to_f64(), Some(f64::MIN));
+ assert_eq!(((-&big_num) - BigInt::one()).to_f64(), None);
+
+ assert_eq!(((BigInt::one() << 1024) - BigInt::one()).to_f64(), None);
+ assert_eq!((BigInt::one() << 1024).to_f64(), None);
+ assert_eq!((-((BigInt::one() << 1024) - BigInt::one())).to_f64(), None);
+ assert_eq!((-(BigInt::one() << 1024)).to_f64(), None);
+}
+
+#[test]
+fn test_convert_to_biguint() {
+ fn check(n: BigInt, ans_1: BigUint) {
+ assert_eq!(n.to_biguint().unwrap(), ans_1);
+ assert_eq!(n.to_biguint().unwrap().to_bigint().unwrap(), n);
+ }
+ let zero: BigInt = Zero::zero();
+ let unsigned_zero: BigUint = Zero::zero();
+ let positive = BigInt::from_biguint(Plus, BigUint::new(vec![1, 2, 3]));
+ let negative = -&positive;
+
+ check(zero, unsigned_zero);
+ check(positive, BigUint::new(vec![1, 2, 3]));
+
+ assert_eq!(negative.to_biguint(), None);
+}
+
+#[test]
+fn test_convert_from_uint() {
+ macro_rules! check {
+ ($ty:ident, $max:expr) => {
+ assert_eq!(BigInt::from($ty::zero()), BigInt::zero());
+ assert_eq!(BigInt::from($ty::one()), BigInt::one());
+ assert_eq!(BigInt::from($ty::MAX - $ty::one()), $max - BigInt::one());
+ assert_eq!(BigInt::from($ty::MAX), $max);
+ };
+ }
+
+ check!(u8, BigInt::from_slice(Plus, &[u8::MAX as u32]));
+ check!(u16, BigInt::from_slice(Plus, &[u16::MAX as u32]));
+ check!(u32, BigInt::from_slice(Plus, &[u32::MAX]));
+ check!(u64, BigInt::from_slice(Plus, &[u32::MAX, u32::MAX]));
+ #[cfg(has_i128)]
+ check!(
+ u128,
+ BigInt::from_slice(Plus, &[u32::MAX, u32::MAX, u32::MAX, u32::MAX])
+ );
+ check!(usize, BigInt::from(usize::MAX as u64));
+}
+
+#[test]
+fn test_convert_from_int() {
+ macro_rules! check {
+ ($ty:ident, $min:expr, $max:expr) => {
+ assert_eq!(BigInt::from($ty::MIN), $min);
+ assert_eq!(BigInt::from($ty::MIN + $ty::one()), $min + BigInt::one());
+ assert_eq!(BigInt::from(-$ty::one()), -BigInt::one());
+ assert_eq!(BigInt::from($ty::zero()), BigInt::zero());
+ assert_eq!(BigInt::from($ty::one()), BigInt::one());
+ assert_eq!(BigInt::from($ty::MAX - $ty::one()), $max - BigInt::one());
+ assert_eq!(BigInt::from($ty::MAX), $max);
+ };
+ }
+
+ check!(
+ i8,
+ BigInt::from_slice(Minus, &[1 << 7]),
+ BigInt::from_slice(Plus, &[i8::MAX as u32])
+ );
+ check!(
+ i16,
+ BigInt::from_slice(Minus, &[1 << 15]),
+ BigInt::from_slice(Plus, &[i16::MAX as u32])
+ );
+ check!(
+ i32,
+ BigInt::from_slice(Minus, &[1 << 31]),
+ BigInt::from_slice(Plus, &[i32::MAX as u32])
+ );
+ check!(
+ i64,
+ BigInt::from_slice(Minus, &[0, 1 << 31]),
+ BigInt::from_slice(Plus, &[u32::MAX, i32::MAX as u32])
+ );
+ #[cfg(has_i128)]
+ check!(
+ i128,
+ BigInt::from_slice(Minus, &[0, 0, 0, 1 << 31]),
+ BigInt::from_slice(Plus, &[u32::MAX, u32::MAX, u32::MAX, i32::MAX as u32])
+ );
+ check!(
+ isize,
+ BigInt::from(isize::MIN as i64),
+ BigInt::from(isize::MAX as i64)
+ );
+}
+
+#[test]
+fn test_convert_from_biguint() {
+ assert_eq!(BigInt::from(BigUint::zero()), BigInt::zero());
+ assert_eq!(BigInt::from(BigUint::one()), BigInt::one());
+ assert_eq!(
+ BigInt::from(BigUint::from_slice(&[1, 2, 3])),
+ BigInt::from_slice(Plus, &[1, 2, 3])
+ );
+}
+
+#[test]
+fn test_add() {
+ for elm in SUM_TRIPLES.iter() {
+ let (a_vec, b_vec, c_vec) = *elm;
+ let a = BigInt::from_slice(Plus, a_vec);
+ let b = BigInt::from_slice(Plus, b_vec);
+ let c = BigInt::from_slice(Plus, c_vec);
+ let (na, nb, nc) = (-&a, -&b, -&c);
+
+ assert_op!(a + b == c);
+ assert_op!(b + a == c);
+ assert_op!(c + na == b);
+ assert_op!(c + nb == a);
+ assert_op!(a + nc == nb);
+ assert_op!(b + nc == na);
+ assert_op!(na + nb == nc);
+ assert_op!(a + na == Zero::zero());
+
+ assert_assign_op!(a += b == c);
+ assert_assign_op!(b += a == c);
+ assert_assign_op!(c += na == b);
+ assert_assign_op!(c += nb == a);
+ assert_assign_op!(a += nc == nb);
+ assert_assign_op!(b += nc == na);
+ assert_assign_op!(na += nb == nc);
+ assert_assign_op!(a += na == Zero::zero());
+ }
+}
+
+#[test]
+fn test_sub() {
+ for elm in SUM_TRIPLES.iter() {
+ let (a_vec, b_vec, c_vec) = *elm;
+ let a = BigInt::from_slice(Plus, a_vec);
+ let b = BigInt::from_slice(Plus, b_vec);
+ let c = BigInt::from_slice(Plus, c_vec);
+ let (na, nb, nc) = (-&a, -&b, -&c);
+
+ assert_op!(c - a == b);
+ assert_op!(c - b == a);
+ assert_op!(nb - a == nc);
+ assert_op!(na - b == nc);
+ assert_op!(b - na == c);
+ assert_op!(a - nb == c);
+ assert_op!(nc - na == nb);
+ assert_op!(a - a == Zero::zero());
+
+ assert_assign_op!(c -= a == b);
+ assert_assign_op!(c -= b == a);
+ assert_assign_op!(nb -= a == nc);
+ assert_assign_op!(na -= b == nc);
+ assert_assign_op!(b -= na == c);
+ assert_assign_op!(a -= nb == c);
+ assert_assign_op!(nc -= na == nb);
+ assert_assign_op!(a -= a == Zero::zero());
+ }
+}
+
+#[test]
+fn test_mul() {
+ for elm in MUL_TRIPLES.iter() {
+ let (a_vec, b_vec, c_vec) = *elm;
+ let a = BigInt::from_slice(Plus, a_vec);
+ let b = BigInt::from_slice(Plus, b_vec);
+ let c = BigInt::from_slice(Plus, c_vec);
+ let (na, nb, nc) = (-&a, -&b, -&c);
+
+ assert_op!(a * b == c);
+ assert_op!(b * a == c);
+ assert_op!(na * nb == c);
+
+ assert_op!(na * b == nc);
+ assert_op!(nb * a == nc);
+
+ assert_assign_op!(a *= b == c);
+ assert_assign_op!(b *= a == c);
+ assert_assign_op!(na *= nb == c);
+
+ assert_assign_op!(na *= b == nc);
+ assert_assign_op!(nb *= a == nc);
+ }
+
+ for elm in DIV_REM_QUADRUPLES.iter() {
+ let (a_vec, b_vec, c_vec, d_vec) = *elm;
+ let a = BigInt::from_slice(Plus, a_vec);
+ let b = BigInt::from_slice(Plus, b_vec);
+ let c = BigInt::from_slice(Plus, c_vec);
+ let d = BigInt::from_slice(Plus, d_vec);
+
+ assert!(a == &b * &c + &d);
+ assert!(a == &c * &b + &d);
+ }
+}
+
+#[test]
+fn test_div_mod_floor() {
+ fn check_sub(a: &BigInt, b: &BigInt, ans_d: &BigInt, ans_m: &BigInt) {
+ let (d, m) = a.div_mod_floor(b);
+ if !m.is_zero() {
+ assert_eq!(m.sign(), b.sign());
+ }
+ assert!(m.abs() <= b.abs());
+ assert!(*a == b * &d + &m);
+ assert!(d == *ans_d);
+ assert!(m == *ans_m);
+ }
+
+ fn check(a: &BigInt, b: &BigInt, d: &BigInt, m: &BigInt) {
+ if m.is_zero() {
+ check_sub(a, b, d, m);
+ check_sub(a, &b.neg(), &d.neg(), m);
+ check_sub(&a.neg(), b, &d.neg(), m);
+ check_sub(&a.neg(), &b.neg(), d, m);
+ } else {
+ let one: BigInt = One::one();
+ check_sub(a, b, d, m);
+ check_sub(a, &b.neg(), &(d.neg() - &one), &(m - b));
+ check_sub(&a.neg(), b, &(d.neg() - &one), &(b - m));
+ check_sub(&a.neg(), &b.neg(), d, &m.neg());
+ }
+ }
+
+ for elm in MUL_TRIPLES.iter() {
+ let (a_vec, b_vec, c_vec) = *elm;
+ let a = BigInt::from_slice(Plus, a_vec);
+ let b = BigInt::from_slice(Plus, b_vec);
+ let c = BigInt::from_slice(Plus, c_vec);
+
+ if !a.is_zero() {
+ check(&c, &a, &b, &Zero::zero());
+ }
+ if !b.is_zero() {
+ check(&c, &b, &a, &Zero::zero());
+ }
+ }
+
+ for elm in DIV_REM_QUADRUPLES.iter() {
+ let (a_vec, b_vec, c_vec, d_vec) = *elm;
+ let a = BigInt::from_slice(Plus, a_vec);
+ let b = BigInt::from_slice(Plus, b_vec);
+ let c = BigInt::from_slice(Plus, c_vec);
+ let d = BigInt::from_slice(Plus, d_vec);
+
+ if !b.is_zero() {
+ check(&a, &b, &c, &d);
+ }
+ }
+}
+
+#[test]
+fn test_div_rem() {
+ fn check_sub(a: &BigInt, b: &BigInt, ans_q: &BigInt, ans_r: &BigInt) {
+ let (q, r) = a.div_rem(b);
+ if !r.is_zero() {
+ assert_eq!(r.sign(), a.sign());
+ }
+ assert!(r.abs() <= b.abs());
+ assert!(*a == b * &q + &r);
+ assert!(q == *ans_q);
+ assert!(r == *ans_r);
+
+ let (a, b, ans_q, ans_r) = (a.clone(), b.clone(), ans_q.clone(), ans_r.clone());
+ assert_op!(a / b == ans_q);
+ assert_op!(a % b == ans_r);
+ assert_assign_op!(a /= b == ans_q);
+ assert_assign_op!(a %= b == ans_r);
+ }
+
+ fn check(a: &BigInt, b: &BigInt, q: &BigInt, r: &BigInt) {
+ check_sub(a, b, q, r);
+ check_sub(a, &b.neg(), &q.neg(), r);
+ check_sub(&a.neg(), b, &q.neg(), &r.neg());
+ check_sub(&a.neg(), &b.neg(), q, &r.neg());
+ }
+ for elm in MUL_TRIPLES.iter() {
+ let (a_vec, b_vec, c_vec) = *elm;
+ let a = BigInt::from_slice(Plus, a_vec);
+ let b = BigInt::from_slice(Plus, b_vec);
+ let c = BigInt::from_slice(Plus, c_vec);
+
+ if !a.is_zero() {
+ check(&c, &a, &b, &Zero::zero());
+ }
+ if !b.is_zero() {
+ check(&c, &b, &a, &Zero::zero());
+ }
+ }
+
+ for elm in DIV_REM_QUADRUPLES.iter() {
+ let (a_vec, b_vec, c_vec, d_vec) = *elm;
+ let a = BigInt::from_slice(Plus, a_vec);
+ let b = BigInt::from_slice(Plus, b_vec);
+ let c = BigInt::from_slice(Plus, c_vec);
+ let d = BigInt::from_slice(Plus, d_vec);
+
+ if !b.is_zero() {
+ check(&a, &b, &c, &d);
+ }
+ }
+}
+
+#[test]
+fn test_checked_add() {
+ for elm in SUM_TRIPLES.iter() {
+ let (a_vec, b_vec, c_vec) = *elm;
+ let a = BigInt::from_slice(Plus, a_vec);
+ let b = BigInt::from_slice(Plus, b_vec);
+ let c = BigInt::from_slice(Plus, c_vec);
+
+ assert!(a.checked_add(&b).unwrap() == c);
+ assert!(b.checked_add(&a).unwrap() == c);
+ assert!(c.checked_add(&(-&a)).unwrap() == b);
+ assert!(c.checked_add(&(-&b)).unwrap() == a);
+ assert!(a.checked_add(&(-&c)).unwrap() == (-&b));
+ assert!(b.checked_add(&(-&c)).unwrap() == (-&a));
+ assert!((-&a).checked_add(&(-&b)).unwrap() == (-&c));
+ assert!(a.checked_add(&(-&a)).unwrap() == Zero::zero());
+ }
+}
+
+#[test]
+fn test_checked_sub() {
+ for elm in SUM_TRIPLES.iter() {
+ let (a_vec, b_vec, c_vec) = *elm;
+ let a = BigInt::from_slice(Plus, a_vec);
+ let b = BigInt::from_slice(Plus, b_vec);
+ let c = BigInt::from_slice(Plus, c_vec);
+
+ assert!(c.checked_sub(&a).unwrap() == b);
+ assert!(c.checked_sub(&b).unwrap() == a);
+ assert!((-&b).checked_sub(&a).unwrap() == (-&c));
+ assert!((-&a).checked_sub(&b).unwrap() == (-&c));
+ assert!(b.checked_sub(&(-&a)).unwrap() == c);
+ assert!(a.checked_sub(&(-&b)).unwrap() == c);
+ assert!((-&c).checked_sub(&(-&a)).unwrap() == (-&b));
+ assert!(a.checked_sub(&a).unwrap() == Zero::zero());
+ }
+}
+
+#[test]
+fn test_checked_mul() {
+ for elm in MUL_TRIPLES.iter() {
+ let (a_vec, b_vec, c_vec) = *elm;
+ let a = BigInt::from_slice(Plus, a_vec);
+ let b = BigInt::from_slice(Plus, b_vec);
+ let c = BigInt::from_slice(Plus, c_vec);
+
+ assert!(a.checked_mul(&b).unwrap() == c);
+ assert!(b.checked_mul(&a).unwrap() == c);
+
+ assert!((-&a).checked_mul(&b).unwrap() == -&c);
+ assert!((-&b).checked_mul(&a).unwrap() == -&c);
+ }
+
+ for elm in DIV_REM_QUADRUPLES.iter() {
+ let (a_vec, b_vec, c_vec, d_vec) = *elm;
+ let a = BigInt::from_slice(Plus, a_vec);
+ let b = BigInt::from_slice(Plus, b_vec);
+ let c = BigInt::from_slice(Plus, c_vec);
+ let d = BigInt::from_slice(Plus, d_vec);
+
+ assert!(a == b.checked_mul(&c).unwrap() + &d);
+ assert!(a == c.checked_mul(&b).unwrap() + &d);
+ }
+}
+#[test]
+fn test_checked_div() {
+ for elm in MUL_TRIPLES.iter() {
+ let (a_vec, b_vec, c_vec) = *elm;
+ let a = BigInt::from_slice(Plus, a_vec);
+ let b = BigInt::from_slice(Plus, b_vec);
+ let c = BigInt::from_slice(Plus, c_vec);
+
+ if !a.is_zero() {
+ assert!(c.checked_div(&a).unwrap() == b);
+ assert!((-&c).checked_div(&(-&a)).unwrap() == b);
+ assert!((-&c).checked_div(&a).unwrap() == -&b);
+ }
+ if !b.is_zero() {
+ assert!(c.checked_div(&b).unwrap() == a);
+ assert!((-&c).checked_div(&(-&b)).unwrap() == a);
+ assert!((-&c).checked_div(&b).unwrap() == -&a);
+ }
+
+ assert!(c.checked_div(&Zero::zero()).is_none());
+ assert!((-&c).checked_div(&Zero::zero()).is_none());
+ }
+}
+
+#[test]
+fn test_gcd() {
+ fn check(a: isize, b: isize, c: isize) {
+ let big_a: BigInt = FromPrimitive::from_isize(a).unwrap();
+ let big_b: BigInt = FromPrimitive::from_isize(b).unwrap();
+ let big_c: BigInt = FromPrimitive::from_isize(c).unwrap();
+
+ assert_eq!(big_a.gcd(&big_b), big_c);
+ }
+
+ check(10, 2, 2);
+ check(10, 3, 1);
+ check(0, 3, 3);
+ check(3, 3, 3);
+ check(56, 42, 14);
+ check(3, -3, 3);
+ check(-6, 3, 3);
+ check(-4, -2, 2);
+}
+
+#[test]
+fn test_lcm() {
+ fn check(a: isize, b: isize, c: isize) {
+ let big_a: BigInt = FromPrimitive::from_isize(a).unwrap();
+ let big_b: BigInt = FromPrimitive::from_isize(b).unwrap();
+ let big_c: BigInt = FromPrimitive::from_isize(c).unwrap();
+
+ assert_eq!(big_a.lcm(&big_b), big_c);
+ }
+
+ check(0, 0, 0);
+ check(1, 0, 0);
+ check(0, 1, 0);
+ check(1, 1, 1);
+ check(-1, 1, 1);
+ check(1, -1, 1);
+ check(-1, -1, 1);
+ check(8, 9, 72);
+ check(11, 5, 55);
+}
+
+#[test]
+fn test_abs_sub() {
+ let zero: BigInt = Zero::zero();
+ let one: BigInt = One::one();
+ assert_eq!((-&one).abs_sub(&one), zero);
+ let one: BigInt = One::one();
+ let zero: BigInt = Zero::zero();
+ assert_eq!(one.abs_sub(&one), zero);
+ let one: BigInt = One::one();
+ let zero: BigInt = Zero::zero();
+ assert_eq!(one.abs_sub(&zero), one);
+ let one: BigInt = One::one();
+ let two: BigInt = FromPrimitive::from_isize(2).unwrap();
+ assert_eq!(one.abs_sub(&-&one), two);
+}
+
+#[test]
+fn test_from_str_radix() {
+ fn check(s: &str, ans: Option<isize>) {
+ let ans = ans.map(|n| {
+ let x: BigInt = FromPrimitive::from_isize(n).unwrap();
+ x
+ });
+ assert_eq!(BigInt::from_str_radix(s, 10).ok(), ans);
+ }
+ check("10", Some(10));
+ check("1", Some(1));
+ check("0", Some(0));
+ check("-1", Some(-1));
+ check("-10", Some(-10));
+ check("+10", Some(10));
+ check("--7", None);
+ check("++5", None);
+ check("+-9", None);
+ check("-+3", None);
+ check("Z", None);
+ check("_", None);
+
+ // issue 10522, this hit an edge case that caused it to
+ // attempt to allocate a vector of size (-1u) == huge.
+ let x: BigInt = format!("1{}", repeat("0").take(36).collect::<String>())
+ .parse()
+ .unwrap();
+ let _y = x.to_string();
+}
+
+#[test]
+fn test_lower_hex() {
+ let a = BigInt::parse_bytes(b"A", 16).unwrap();
+ let hello = BigInt::parse_bytes("-22405534230753963835153736737".as_bytes(), 10).unwrap();
+
+ assert_eq!(format!("{:x}", a), "a");
+ assert_eq!(format!("{:x}", hello), "-48656c6c6f20776f726c6421");
+ assert_eq!(format!("{:♥>+#8x}", a), "♥♥♥♥+0xa");
+}
+
+#[test]
+fn test_upper_hex() {
+ let a = BigInt::parse_bytes(b"A", 16).unwrap();
+ let hello = BigInt::parse_bytes("-22405534230753963835153736737".as_bytes(), 10).unwrap();
+
+ assert_eq!(format!("{:X}", a), "A");
+ assert_eq!(format!("{:X}", hello), "-48656C6C6F20776F726C6421");
+ assert_eq!(format!("{:♥>+#8X}", a), "♥♥♥♥+0xA");
+}
+
+#[test]
+fn test_binary() {
+ let a = BigInt::parse_bytes(b"A", 16).unwrap();
+ let hello = BigInt::parse_bytes("-224055342307539".as_bytes(), 10).unwrap();
+
+ assert_eq!(format!("{:b}", a), "1010");
+ assert_eq!(
+ format!("{:b}", hello),
+ "-110010111100011011110011000101101001100011010011"
+ );
+ assert_eq!(format!("{:♥>+#8b}", a), "♥+0b1010");
+}
+
+#[test]
+fn test_octal() {
+ let a = BigInt::parse_bytes(b"A", 16).unwrap();
+ let hello = BigInt::parse_bytes("-22405534230753963835153736737".as_bytes(), 10).unwrap();
+
+ assert_eq!(format!("{:o}", a), "12");
+ assert_eq!(format!("{:o}", hello), "-22062554330674403566756233062041");
+ assert_eq!(format!("{:♥>+#8o}", a), "♥♥♥+0o12");
+}
+
+#[test]
+fn test_display() {
+ let a = BigInt::parse_bytes(b"A", 16).unwrap();
+ let hello = BigInt::parse_bytes("-22405534230753963835153736737".as_bytes(), 10).unwrap();
+
+ assert_eq!(format!("{}", a), "10");
+ assert_eq!(format!("{}", hello), "-22405534230753963835153736737");
+ assert_eq!(format!("{:♥>+#8}", a), "♥♥♥♥♥+10");
+}
+
+#[test]
+fn test_neg() {
+ assert!(-BigInt::new(Plus, vec![1, 1, 1]) == BigInt::new(Minus, vec![1, 1, 1]));
+ assert!(-BigInt::new(Minus, vec![1, 1, 1]) == BigInt::new(Plus, vec![1, 1, 1]));
+ let zero: BigInt = Zero::zero();
+ assert_eq!(-&zero, zero);
+}
+
+#[test]
+fn test_negative_shr() {
+ assert_eq!(BigInt::from(-1) >> 1, BigInt::from(-1));
+ assert_eq!(BigInt::from(-2) >> 1, BigInt::from(-1));
+ assert_eq!(BigInt::from(-3) >> 1, BigInt::from(-2));
+ assert_eq!(BigInt::from(-3) >> 2, BigInt::from(-1));
+}
+
+#[test]
+#[cfg(feature = "rand")]
+fn test_random_shr() {
+ use rand::distributions::Standard;
+ use rand::Rng;
+ let mut rng = rand::thread_rng();
+
+ for p in rng.sample_iter::<i64, _>(&Standard).take(1000) {
+ let big = BigInt::from(p);
+ let bigger = &big << 1000;
+ assert_eq!(&bigger >> 1000, big);
+ for i in 0..64 {
+ let answer = BigInt::from(p >> i);
+ assert_eq!(&big >> i, answer);
+ assert_eq!(&bigger >> (1000 + i), answer);
+ }
+ }
+}
+
+#[test]
+fn test_iter_sum() {
+ let result: BigInt = FromPrimitive::from_isize(-1234567).unwrap();
+ let data: Vec<BigInt> = vec![
+ FromPrimitive::from_i32(-1000000).unwrap(),
+ FromPrimitive::from_i32(-200000).unwrap(),
+ FromPrimitive::from_i32(-30000).unwrap(),
+ FromPrimitive::from_i32(-4000).unwrap(),
+ FromPrimitive::from_i32(-500).unwrap(),
+ FromPrimitive::from_i32(-60).unwrap(),
+ FromPrimitive::from_i32(-7).unwrap(),
+ ];
+
+ assert_eq!(result, data.iter().sum());
+ assert_eq!(result, data.into_iter().sum());
+}
+
+#[test]
+fn test_iter_product() {
+ let data: Vec<BigInt> = vec![
+ FromPrimitive::from_i32(1001).unwrap(),
+ FromPrimitive::from_i32(-1002).unwrap(),
+ FromPrimitive::from_i32(1003).unwrap(),
+ FromPrimitive::from_i32(-1004).unwrap(),
+ FromPrimitive::from_i32(1005).unwrap(),
+ ];
+ let result = data.get(0).unwrap()
+ * data.get(1).unwrap()
+ * data.get(2).unwrap()
+ * data.get(3).unwrap()
+ * data.get(4).unwrap();
+
+ assert_eq!(result, data.iter().product());
+ assert_eq!(result, data.into_iter().product());
+}
+
+#[test]
+fn test_iter_sum_generic() {
+ let result: BigInt = FromPrimitive::from_isize(-1234567).unwrap();
+ let data = vec![-1000000, -200000, -30000, -4000, -500, -60, -7];
+
+ assert_eq!(result, data.iter().sum());
+ assert_eq!(result, data.into_iter().sum());
+}
+
+#[test]
+fn test_iter_product_generic() {
+ let data = vec![1001, -1002, 1003, -1004, 1005];
+ let result = data[0].to_bigint().unwrap()
+ * data[1].to_bigint().unwrap()
+ * data[2].to_bigint().unwrap()
+ * data[3].to_bigint().unwrap()
+ * data[4].to_bigint().unwrap();
+
+ assert_eq!(result, data.iter().product());
+ assert_eq!(result, data.into_iter().product());
+}
+
+#[test]
+fn test_pow() {
+ let one = BigInt::from(1i32);
+ let two = BigInt::from(2i32);
+ let four = BigInt::from(4i32);
+ let eight = BigInt::from(8i32);
+ let minus_two = BigInt::from(-2i32);
+ macro_rules! check {
+ ($t:ty) => {
+ assert_eq!(two.pow(0 as $t), one);
+ assert_eq!(two.pow(1 as $t), two);
+ assert_eq!(two.pow(2 as $t), four);
+ assert_eq!(two.pow(3 as $t), eight);
+ assert_eq!(two.pow(&(3 as $t)), eight);
+ assert_eq!(minus_two.pow(0 as $t), one, "-2^0");
+ assert_eq!(minus_two.pow(1 as $t), minus_two, "-2^1");
+ assert_eq!(minus_two.pow(2 as $t), four, "-2^2");
+ assert_eq!(minus_two.pow(3 as $t), -&eight, "-2^3");
+ };
+ }
+ check!(u8);
+ check!(u16);
+ check!(u32);
+ check!(u64);
+ check!(usize);
+}
diff --git a/rust/vendor/num-bigint-0.2.6/tests/bigint_bitwise.rs b/rust/vendor/num-bigint-0.2.6/tests/bigint_bitwise.rs
new file mode 100644
index 0000000..cc0c493
--- /dev/null
+++ b/rust/vendor/num-bigint-0.2.6/tests/bigint_bitwise.rs
@@ -0,0 +1,181 @@
+extern crate num_bigint;
+extern crate num_traits;
+
+use num_bigint::{BigInt, Sign, ToBigInt};
+use num_traits::ToPrimitive;
+use std::{i32, i64, u32};
+
+enum ValueVec {
+ N,
+ P(&'static [u32]),
+ M(&'static [u32]),
+}
+
+use ValueVec::*;
+
+impl ToBigInt for ValueVec {
+ fn to_bigint(&self) -> Option<BigInt> {
+ match self {
+ &N => Some(BigInt::from_slice(Sign::NoSign, &[])),
+ &P(s) => Some(BigInt::from_slice(Sign::Plus, s)),
+ &M(s) => Some(BigInt::from_slice(Sign::Minus, s)),
+ }
+ }
+}
+
+// a, !a
+const NOT_VALUES: &'static [(ValueVec, ValueVec)] = &[
+ (N, M(&[1])),
+ (P(&[1]), M(&[2])),
+ (P(&[2]), M(&[3])),
+ (P(&[!0 - 2]), M(&[!0 - 1])),
+ (P(&[!0 - 1]), M(&[!0])),
+ (P(&[!0]), M(&[0, 1])),
+ (P(&[0, 1]), M(&[1, 1])),
+ (P(&[1, 1]), M(&[2, 1])),
+];
+
+// a, b, a & b, a | b, a ^ b
+const BITWISE_VALUES: &'static [(ValueVec, ValueVec, ValueVec, ValueVec, ValueVec)] = &[
+ (N, N, N, N, N),
+ (N, P(&[1]), N, P(&[1]), P(&[1])),
+ (N, P(&[!0]), N, P(&[!0]), P(&[!0])),
+ (N, P(&[0, 1]), N, P(&[0, 1]), P(&[0, 1])),
+ (N, M(&[1]), N, M(&[1]), M(&[1])),
+ (N, M(&[!0]), N, M(&[!0]), M(&[!0])),
+ (N, M(&[0, 1]), N, M(&[0, 1]), M(&[0, 1])),
+ (P(&[1]), P(&[!0]), P(&[1]), P(&[!0]), P(&[!0 - 1])),
+ (P(&[!0]), P(&[!0]), P(&[!0]), P(&[!0]), N),
+ (P(&[!0]), P(&[1, 1]), P(&[1]), P(&[!0, 1]), P(&[!0 - 1, 1])),
+ (P(&[1]), M(&[!0]), P(&[1]), M(&[!0]), M(&[0, 1])),
+ (P(&[!0]), M(&[1]), P(&[!0]), M(&[1]), M(&[0, 1])),
+ (P(&[!0]), M(&[!0]), P(&[1]), M(&[1]), M(&[2])),
+ (P(&[!0]), M(&[1, 1]), P(&[!0]), M(&[1, 1]), M(&[0, 2])),
+ (P(&[1, 1]), M(&[!0]), P(&[1, 1]), M(&[!0]), M(&[0, 2])),
+ (M(&[1]), M(&[!0]), M(&[!0]), M(&[1]), P(&[!0 - 1])),
+ (M(&[!0]), M(&[!0]), M(&[!0]), M(&[!0]), N),
+ (M(&[!0]), M(&[1, 1]), M(&[!0, 1]), M(&[1]), P(&[!0 - 1, 1])),
+];
+
+const I32_MIN: i64 = i32::MIN as i64;
+const I32_MAX: i64 = i32::MAX as i64;
+const U32_MAX: i64 = u32::MAX as i64;
+
+// some corner cases
+const I64_VALUES: &'static [i64] = &[
+ i64::MIN,
+ i64::MIN + 1,
+ i64::MIN + 2,
+ i64::MIN + 3,
+ -U32_MAX - 3,
+ -U32_MAX - 2,
+ -U32_MAX - 1,
+ -U32_MAX,
+ -U32_MAX + 1,
+ -U32_MAX + 2,
+ -U32_MAX + 3,
+ I32_MIN - 3,
+ I32_MIN - 2,
+ I32_MIN - 1,
+ I32_MIN,
+ I32_MIN + 1,
+ I32_MIN + 2,
+ I32_MIN + 3,
+ -3,
+ -2,
+ -1,
+ 0,
+ 1,
+ 2,
+ 3,
+ I32_MAX - 3,
+ I32_MAX - 2,
+ I32_MAX - 1,
+ I32_MAX,
+ I32_MAX + 1,
+ I32_MAX + 2,
+ I32_MAX + 3,
+ U32_MAX - 3,
+ U32_MAX - 2,
+ U32_MAX - 1,
+ U32_MAX,
+ U32_MAX + 1,
+ U32_MAX + 2,
+ U32_MAX + 3,
+ i64::MAX - 3,
+ i64::MAX - 2,
+ i64::MAX - 1,
+ i64::MAX,
+];
+
+#[test]
+fn test_not() {
+ for &(ref a, ref not) in NOT_VALUES.iter() {
+ let a = a.to_bigint().unwrap();
+ let not = not.to_bigint().unwrap();
+
+ // sanity check for tests that fit in i64
+ if let (Some(prim_a), Some(prim_not)) = (a.to_i64(), not.to_i64()) {
+ assert_eq!(!prim_a, prim_not);
+ }
+
+ assert_eq!(!a.clone(), not, "!{:x}", a);
+ assert_eq!(!not.clone(), a, "!{:x}", not);
+ }
+}
+
+#[test]
+fn test_not_i64() {
+ for &prim_a in I64_VALUES.iter() {
+ let a = prim_a.to_bigint().unwrap();
+ let not = (!prim_a).to_bigint().unwrap();
+ assert_eq!(!a.clone(), not, "!{:x}", a);
+ }
+}
+
+#[test]
+fn test_bitwise() {
+ for &(ref a, ref b, ref and, ref or, ref xor) in BITWISE_VALUES.iter() {
+ let a = a.to_bigint().unwrap();
+ let b = b.to_bigint().unwrap();
+ let and = and.to_bigint().unwrap();
+ let or = or.to_bigint().unwrap();
+ let xor = xor.to_bigint().unwrap();
+
+ // sanity check for tests that fit in i64
+ if let (Some(prim_a), Some(prim_b)) = (a.to_i64(), b.to_i64()) {
+ if let Some(prim_and) = and.to_i64() {
+ assert_eq!(prim_a & prim_b, prim_and);
+ }
+ if let Some(prim_or) = or.to_i64() {
+ assert_eq!(prim_a | prim_b, prim_or);
+ }
+ if let Some(prim_xor) = xor.to_i64() {
+ assert_eq!(prim_a ^ prim_b, prim_xor);
+ }
+ }
+
+ assert_eq!(a.clone() & &b, and, "{:x} & {:x}", a, b);
+ assert_eq!(b.clone() & &a, and, "{:x} & {:x}", b, a);
+ assert_eq!(a.clone() | &b, or, "{:x} | {:x}", a, b);
+ assert_eq!(b.clone() | &a, or, "{:x} | {:x}", b, a);
+ assert_eq!(a.clone() ^ &b, xor, "{:x} ^ {:x}", a, b);
+ assert_eq!(b.clone() ^ &a, xor, "{:x} ^ {:x}", b, a);
+ }
+}
+
+#[test]
+fn test_bitwise_i64() {
+ for &prim_a in I64_VALUES.iter() {
+ let a = prim_a.to_bigint().unwrap();
+ for &prim_b in I64_VALUES.iter() {
+ let b = prim_b.to_bigint().unwrap();
+ let and = (prim_a & prim_b).to_bigint().unwrap();
+ let or = (prim_a | prim_b).to_bigint().unwrap();
+ let xor = (prim_a ^ prim_b).to_bigint().unwrap();
+ assert_eq!(a.clone() & &b, and, "{:x} & {:x}", a, b);
+ assert_eq!(a.clone() | &b, or, "{:x} | {:x}", a, b);
+ assert_eq!(a.clone() ^ &b, xor, "{:x} ^ {:x}", a, b);
+ }
+ }
+}
diff --git a/rust/vendor/num-bigint-0.2.6/tests/bigint_scalar.rs b/rust/vendor/num-bigint-0.2.6/tests/bigint_scalar.rs
new file mode 100644
index 0000000..6a1601e
--- /dev/null
+++ b/rust/vendor/num-bigint-0.2.6/tests/bigint_scalar.rs
@@ -0,0 +1,151 @@
+extern crate num_bigint;
+extern crate num_traits;
+
+use num_bigint::BigInt;
+use num_bigint::Sign::Plus;
+use num_traits::{Signed, ToPrimitive, Zero};
+
+use std::ops::Neg;
+
+mod consts;
+use consts::*;
+
+#[macro_use]
+mod macros;
+
+#[test]
+fn test_scalar_add() {
+ fn check(x: &BigInt, y: &BigInt, z: &BigInt) {
+ let (x, y, z) = (x.clone(), y.clone(), z.clone());
+ assert_signed_scalar_op!(x + y == z);
+ assert_signed_scalar_assign_op!(x += y == z);
+ }
+
+ for elm in SUM_TRIPLES.iter() {
+ let (a_vec, b_vec, c_vec) = *elm;
+ let a = BigInt::from_slice(Plus, a_vec);
+ let b = BigInt::from_slice(Plus, b_vec);
+ let c = BigInt::from_slice(Plus, c_vec);
+ let (na, nb, nc) = (-&a, -&b, -&c);
+
+ check(&a, &b, &c);
+ check(&b, &a, &c);
+ check(&c, &na, &b);
+ check(&c, &nb, &a);
+ check(&a, &nc, &nb);
+ check(&b, &nc, &na);
+ check(&na, &nb, &nc);
+ check(&a, &na, &Zero::zero());
+ }
+}
+
+#[test]
+fn test_scalar_sub() {
+ fn check(x: &BigInt, y: &BigInt, z: &BigInt) {
+ let (x, y, z) = (x.clone(), y.clone(), z.clone());
+ assert_signed_scalar_op!(x - y == z);
+ assert_signed_scalar_assign_op!(x -= y == z);
+ }
+
+ for elm in SUM_TRIPLES.iter() {
+ let (a_vec, b_vec, c_vec) = *elm;
+ let a = BigInt::from_slice(Plus, a_vec);
+ let b = BigInt::from_slice(Plus, b_vec);
+ let c = BigInt::from_slice(Plus, c_vec);
+ let (na, nb, nc) = (-&a, -&b, -&c);
+
+ check(&c, &a, &b);
+ check(&c, &b, &a);
+ check(&nb, &a, &nc);
+ check(&na, &b, &nc);
+ check(&b, &na, &c);
+ check(&a, &nb, &c);
+ check(&nc, &na, &nb);
+ check(&a, &a, &Zero::zero());
+ }
+}
+
+#[test]
+fn test_scalar_mul() {
+ fn check(x: &BigInt, y: &BigInt, z: &BigInt) {
+ let (x, y, z) = (x.clone(), y.clone(), z.clone());
+ assert_signed_scalar_op!(x * y == z);
+ assert_signed_scalar_assign_op!(x *= y == z);
+ }
+
+ for elm in MUL_TRIPLES.iter() {
+ let (a_vec, b_vec, c_vec) = *elm;
+ let a = BigInt::from_slice(Plus, a_vec);
+ let b = BigInt::from_slice(Plus, b_vec);
+ let c = BigInt::from_slice(Plus, c_vec);
+ let (na, nb, nc) = (-&a, -&b, -&c);
+
+ check(&a, &b, &c);
+ check(&b, &a, &c);
+ check(&na, &nb, &c);
+
+ check(&na, &b, &nc);
+ check(&nb, &a, &nc);
+ }
+}
+
+#[test]
+fn test_scalar_div_rem() {
+ fn check_sub(a: &BigInt, b: u32, ans_q: &BigInt, ans_r: &BigInt) {
+ let (q, r) = (a / b, a % b);
+ if !r.is_zero() {
+ assert_eq!(r.sign(), a.sign());
+ }
+ assert!(r.abs() <= From::from(b));
+ assert!(*a == b * &q + &r);
+ assert!(q == *ans_q);
+ assert!(r == *ans_r);
+
+ let b = BigInt::from(b);
+ let (a, b, ans_q, ans_r) = (a.clone(), b.clone(), ans_q.clone(), ans_r.clone());
+ assert_signed_scalar_op!(a / b == ans_q);
+ assert_signed_scalar_op!(a % b == ans_r);
+ assert_signed_scalar_assign_op!(a /= b == ans_q);
+ assert_signed_scalar_assign_op!(a %= b == ans_r);
+
+ let nb = -b;
+ assert_signed_scalar_op!(a / nb == -ans_q.clone());
+ assert_signed_scalar_op!(a % nb == ans_r);
+ assert_signed_scalar_assign_op!(a /= nb == -ans_q.clone());
+ assert_signed_scalar_assign_op!(a %= nb == ans_r);
+ }
+
+ fn check(a: &BigInt, b: u32, q: &BigInt, r: &BigInt) {
+ check_sub(a, b, q, r);
+ check_sub(&a.neg(), b, &q.neg(), &r.neg());
+ }
+
+ for elm in MUL_TRIPLES.iter() {
+ let (a_vec, b_vec, c_vec) = *elm;
+ let a = BigInt::from_slice(Plus, a_vec);
+ let b = BigInt::from_slice(Plus, b_vec);
+ let c = BigInt::from_slice(Plus, c_vec);
+
+ if a_vec.len() == 1 && a_vec[0] != 0 {
+ let a = a_vec[0];
+ check(&c, a, &b, &Zero::zero());
+ }
+
+ if b_vec.len() == 1 && b_vec[0] != 0 {
+ let b = b_vec[0];
+ check(&c, b, &a, &Zero::zero());
+ }
+ }
+
+ for elm in DIV_REM_QUADRUPLES.iter() {
+ let (a_vec, b_vec, c_vec, d_vec) = *elm;
+ let a = BigInt::from_slice(Plus, a_vec);
+ let c = BigInt::from_slice(Plus, c_vec);
+ let d = BigInt::from_slice(Plus, d_vec);
+
+ if b_vec.len() == 1 && b_vec[0] != 0 {
+ let b = b_vec[0];
+ check(&a, b, &c, &d);
+ }
+ }
+}
diff --git a/rust/vendor/num-bigint-0.2.6/tests/biguint.rs b/rust/vendor/num-bigint-0.2.6/tests/biguint.rs
new file mode 100644
index 0000000..1e23aa1
--- /dev/null
+++ b/rust/vendor/num-bigint-0.2.6/tests/biguint.rs
@@ -0,0 +1,1713 @@
+extern crate num_bigint;
+extern crate num_integer;
+extern crate num_traits;
+
+use num_bigint::Sign::Plus;
+use num_bigint::{BigInt, ToBigInt};
+use num_bigint::{BigUint, ToBigUint};
+use num_integer::Integer;
+
+use std::cmp::Ordering::{Equal, Greater, Less};
+use std::collections::hash_map::RandomState;
+use std::hash::{BuildHasher, Hash, Hasher};
+use std::i64;
+use std::iter::repeat;
+use std::str::FromStr;
+use std::{f32, f64};
+#[cfg(has_i128)]
+use std::{i128, u128};
+use std::{u16, u32, u64, u8, usize};
+
+use num_traits::{
+ CheckedAdd, CheckedDiv, CheckedMul, CheckedSub, Float, FromPrimitive, Num, One, Pow,
+ ToPrimitive, Zero,
+};
+
+mod consts;
+use consts::*;
+
+#[macro_use]
+mod macros;
+
+#[test]
+fn test_from_bytes_be() {
+ fn check(s: &str, result: &str) {
+ assert_eq!(
+ BigUint::from_bytes_be(s.as_bytes()),
+ BigUint::parse_bytes(result.as_bytes(), 10).unwrap()
+ );
+ }
+ check("A", "65");
+ check("AA", "16705");
+ check("AB", "16706");
+ check("Hello world!", "22405534230753963835153736737");
+ assert_eq!(BigUint::from_bytes_be(&[]), Zero::zero());
+}
+
+#[test]
+fn test_to_bytes_be() {
+ fn check(s: &str, result: &str) {
+ let b = BigUint::parse_bytes(result.as_bytes(), 10).unwrap();
+ assert_eq!(b.to_bytes_be(), s.as_bytes());
+ }
+ check("A", "65");
+ check("AA", "16705");
+ check("AB", "16706");
+ check("Hello world!", "22405534230753963835153736737");
+ let b: BigUint = Zero::zero();
+ assert_eq!(b.to_bytes_be(), [0]);
+
+ // Test with leading/trailing zero bytes and a full BigDigit of value 0
+ let b = BigUint::from_str_radix("00010000000000000200", 16).unwrap();
+ assert_eq!(b.to_bytes_be(), [1, 0, 0, 0, 0, 0, 0, 2, 0]);
+}
+
+#[test]
+fn test_from_bytes_le() {
+ fn check(s: &str, result: &str) {
+ assert_eq!(
+ BigUint::from_bytes_le(s.as_bytes()),
+ BigUint::parse_bytes(result.as_bytes(), 10).unwrap()
+ );
+ }
+ check("A", "65");
+ check("AA", "16705");
+ check("BA", "16706");
+ check("!dlrow olleH", "22405534230753963835153736737");
+ assert_eq!(BigUint::from_bytes_le(&[]), Zero::zero());
+}
+
+#[test]
+fn test_to_bytes_le() {
+ fn check(s: &str, result: &str) {
+ let b = BigUint::parse_bytes(result.as_bytes(), 10).unwrap();
+ assert_eq!(b.to_bytes_le(), s.as_bytes());
+ }
+ check("A", "65");
+ check("AA", "16705");
+ check("BA", "16706");
+ check("!dlrow olleH", "22405534230753963835153736737");
+ let b: BigUint = Zero::zero();
+ assert_eq!(b.to_bytes_le(), [0]);
+
+ // Test with leading/trailing zero bytes and a full BigDigit of value 0
+ let b = BigUint::from_str_radix("00010000000000000200", 16).unwrap();
+ assert_eq!(b.to_bytes_le(), [0, 2, 0, 0, 0, 0, 0, 0, 1]);
+}
+
+#[test]
+fn test_cmp() {
+ let data: [&[_]; 7] = [&[], &[1], &[2], &[!0], &[0, 1], &[2, 1], &[1, 1, 1]];
+ let data: Vec<BigUint> = data.iter().map(|v| BigUint::from_slice(*v)).collect();
+ for (i, ni) in data.iter().enumerate() {
+ for (j0, nj) in data[i..].iter().enumerate() {
+ let j = j0 + i;
+ if i == j {
+ assert_eq!(ni.cmp(nj), Equal);
+ assert_eq!(nj.cmp(ni), Equal);
+ assert_eq!(ni, nj);
+ assert!(!(ni != nj));
+ assert!(ni <= nj);
+ assert!(ni >= nj);
+ assert!(!(ni < nj));
+ assert!(!(ni > nj));
+ } else {
+ assert_eq!(ni.cmp(nj), Less);
+ assert_eq!(nj.cmp(ni), Greater);
+
+ assert!(!(ni == nj));
+ assert!(ni != nj);
+
+ assert!(ni <= nj);
+ assert!(!(ni >= nj));
+ assert!(ni < nj);
+ assert!(!(ni > nj));
+
+ assert!(!(nj <= ni));
+ assert!(nj >= ni);
+ assert!(!(nj < ni));
+ assert!(nj > ni);
+ }
+ }
+ }
+}
+
+fn hash<T: Hash>(x: &T) -> u64 {
+ let mut hasher = <RandomState as BuildHasher>::Hasher::new();
+ x.hash(&mut hasher);
+ hasher.finish()
+}
+
+#[test]
+fn test_hash() {
+ use hash;
+
+ let a = BigUint::new(vec![]);
+ let b = BigUint::new(vec![0]);
+ let c = BigUint::new(vec![1]);
+ let d = BigUint::new(vec![1, 0, 0, 0, 0, 0]);
+ let e = BigUint::new(vec![0, 0, 0, 0, 0, 1]);
+ assert!(hash(&a) == hash(&b));
+ assert!(hash(&b) != hash(&c));
+ assert!(hash(&c) == hash(&d));
+ assert!(hash(&d) != hash(&e));
+}
+
+// LEFT, RIGHT, AND, OR, XOR
+const BIT_TESTS: &'static [(
+ &'static [u32],
+ &'static [u32],
+ &'static [u32],
+ &'static [u32],
+ &'static [u32],
+)] = &[
+ (&[], &[], &[], &[], &[]),
+ (&[1, 0, 1], &[1, 1], &[1], &[1, 1, 1], &[0, 1, 1]),
+ (&[1, 0, 1], &[0, 1, 1], &[0, 0, 1], &[1, 1, 1], &[1, 1]),
+ (
+ &[268, 482, 17],
+ &[964, 54],
+ &[260, 34],
+ &[972, 502, 17],
+ &[712, 468, 17],
+ ),
+];
+
+#[test]
+fn test_bitand() {
+ for elm in BIT_TESTS {
+ let (a_vec, b_vec, c_vec, _, _) = *elm;
+ let a = BigUint::from_slice(a_vec);
+ let b = BigUint::from_slice(b_vec);
+ let c = BigUint::from_slice(c_vec);
+
+ assert_op!(a & b == c);
+ assert_op!(b & a == c);
+ assert_assign_op!(a &= b == c);
+ assert_assign_op!(b &= a == c);
+ }
+}
+
+#[test]
+fn test_bitor() {
+ for elm in BIT_TESTS {
+ let (a_vec, b_vec, _, c_vec, _) = *elm;
+ let a = BigUint::from_slice(a_vec);
+ let b = BigUint::from_slice(b_vec);
+ let c = BigUint::from_slice(c_vec);
+
+ assert_op!(a | b == c);
+ assert_op!(b | a == c);
+ assert_assign_op!(a |= b == c);
+ assert_assign_op!(b |= a == c);
+ }
+}
+
+#[test]
+fn test_bitxor() {
+ for elm in BIT_TESTS {
+ let (a_vec, b_vec, _, _, c_vec) = *elm;
+ let a = BigUint::from_slice(a_vec);
+ let b = BigUint::from_slice(b_vec);
+ let c = BigUint::from_slice(c_vec);
+
+ assert_op!(a ^ b == c);
+ assert_op!(b ^ a == c);
+ assert_op!(a ^ c == b);
+ assert_op!(c ^ a == b);
+ assert_op!(b ^ c == a);
+ assert_op!(c ^ b == a);
+ assert_assign_op!(a ^= b == c);
+ assert_assign_op!(b ^= a == c);
+ assert_assign_op!(a ^= c == b);
+ assert_assign_op!(c ^= a == b);
+ assert_assign_op!(b ^= c == a);
+ assert_assign_op!(c ^= b == a);
+ }
+}
+
+#[test]
+fn test_shl() {
+ fn check(s: &str, shift: usize, ans: &str) {
+ let opt_biguint = BigUint::from_str_radix(s, 16).ok();
+ let mut bu_assign = opt_biguint.unwrap();
+ let bu = (bu_assign.clone() << shift).to_str_radix(16);
+ assert_eq!(bu, ans);
+ bu_assign <<= shift;
+ assert_eq!(bu_assign.to_str_radix(16), ans);
+ }
+
+ check("0", 3, "0");
+ check("1", 3, "8");
+
+ check(
+ "1\
+ 0000\
+ 0000\
+ 0000\
+ 0001\
+ 0000\
+ 0000\
+ 0000\
+ 0001",
+ 3,
+ "8\
+ 0000\
+ 0000\
+ 0000\
+ 0008\
+ 0000\
+ 0000\
+ 0000\
+ 0008",
+ );
+ check(
+ "1\
+ 0000\
+ 0001\
+ 0000\
+ 0001",
+ 2,
+ "4\
+ 0000\
+ 0004\
+ 0000\
+ 0004",
+ );
+ check(
+ "1\
+ 0001\
+ 0001",
+ 1,
+ "2\
+ 0002\
+ 0002",
+ );
+
+ check(
+ "\
+ 4000\
+ 0000\
+ 0000\
+ 0000",
+ 3,
+ "2\
+ 0000\
+ 0000\
+ 0000\
+ 0000",
+ );
+ check(
+ "4000\
+ 0000",
+ 2,
+ "1\
+ 0000\
+ 0000",
+ );
+ check(
+ "4000",
+ 2,
+ "1\
+ 0000",
+ );
+
+ check(
+ "4000\
+ 0000\
+ 0000\
+ 0000",
+ 67,
+ "2\
+ 0000\
+ 0000\
+ 0000\
+ 0000\
+ 0000\
+ 0000\
+ 0000\
+ 0000",
+ );
+ check(
+ "4000\
+ 0000",
+ 35,
+ "2\
+ 0000\
+ 0000\
+ 0000\
+ 0000",
+ );
+ check(
+ "4000",
+ 19,
+ "2\
+ 0000\
+ 0000",
+ );
+
+ check(
+ "fedc\
+ ba98\
+ 7654\
+ 3210\
+ fedc\
+ ba98\
+ 7654\
+ 3210",
+ 4,
+ "f\
+ edcb\
+ a987\
+ 6543\
+ 210f\
+ edcb\
+ a987\
+ 6543\
+ 2100",
+ );
+ check(
+ "88887777666655554444333322221111",
+ 16,
+ "888877776666555544443333222211110000",
+ );
+}
+
+#[test]
+fn test_shr() {
+ fn check(s: &str, shift: usize, ans: &str) {
+ let opt_biguint = BigUint::from_str_radix(s, 16).ok();
+ let mut bu_assign = opt_biguint.unwrap();
+ let bu = (bu_assign.clone() >> shift).to_str_radix(16);
+ assert_eq!(bu, ans);
+ bu_assign >>= shift;
+ assert_eq!(bu_assign.to_str_radix(16), ans);
+ }
+
+ check("0", 3, "0");
+ check("f", 3, "1");
+
+ check(
+ "1\
+ 0000\
+ 0000\
+ 0000\
+ 0001\
+ 0000\
+ 0000\
+ 0000\
+ 0001",
+ 3,
+ "2000\
+ 0000\
+ 0000\
+ 0000\
+ 2000\
+ 0000\
+ 0000\
+ 0000",
+ );
+ check(
+ "1\
+ 0000\
+ 0001\
+ 0000\
+ 0001",
+ 2,
+ "4000\
+ 0000\
+ 4000\
+ 0000",
+ );
+ check(
+ "1\
+ 0001\
+ 0001",
+ 1,
+ "8000\
+ 8000",
+ );
+
+ check(
+ "2\
+ 0000\
+ 0000\
+ 0000\
+ 0001\
+ 0000\
+ 0000\
+ 0000\
+ 0001",
+ 67,
+ "4000\
+ 0000\
+ 0000\
+ 0000",
+ );
+ check(
+ "2\
+ 0000\
+ 0001\
+ 0000\
+ 0001",
+ 35,
+ "4000\
+ 0000",
+ );
+ check(
+ "2\
+ 0001\
+ 0001",
+ 19,
+ "4000",
+ );
+
+ check(
+ "1\
+ 0000\
+ 0000\
+ 0000\
+ 0000",
+ 1,
+ "8000\
+ 0000\
+ 0000\
+ 0000",
+ );
+ check(
+ "1\
+ 0000\
+ 0000",
+ 1,
+ "8000\
+ 0000",
+ );
+ check(
+ "1\
+ 0000",
+ 1,
+ "8000",
+ );
+ check(
+ "f\
+ edcb\
+ a987\
+ 6543\
+ 210f\
+ edcb\
+ a987\
+ 6543\
+ 2100",
+ 4,
+ "fedc\
+ ba98\
+ 7654\
+ 3210\
+ fedc\
+ ba98\
+ 7654\
+ 3210",
+ );
+
+ check(
+ "888877776666555544443333222211110000",
+ 16,
+ "88887777666655554444333322221111",
+ );
+}
+
+// `DoubleBigDigit` size dependent
+#[test]
+fn test_convert_i64() {
+ fn check(b1: BigUint, i: i64) {
+ let b2: BigUint = FromPrimitive::from_i64(i).unwrap();
+ assert_eq!(b1, b2);
+ assert_eq!(b1.to_i64().unwrap(), i);
+ }
+
+ check(Zero::zero(), 0);
+ check(One::one(), 1);
+ check(i64::MAX.to_biguint().unwrap(), i64::MAX);
+
+ check(BigUint::new(vec![]), 0);
+ check(BigUint::new(vec![1]), 1);
+ check(BigUint::new(vec![N1]), (1 << 32) - 1);
+ check(BigUint::new(vec![0, 1]), 1 << 32);
+ check(BigUint::new(vec![N1, N1 >> 1]), i64::MAX);
+
+ assert_eq!(i64::MIN.to_biguint(), None);
+ assert_eq!(BigUint::new(vec![N1, N1]).to_i64(), None);
+ assert_eq!(BigUint::new(vec![0, 0, 1]).to_i64(), None);
+ assert_eq!(BigUint::new(vec![N1, N1, N1]).to_i64(), None);
+}
+
+#[test]
+#[cfg(has_i128)]
+fn test_convert_i128() {
+ fn check(b1: BigUint, i: i128) {
+ let b2: BigUint = FromPrimitive::from_i128(i).unwrap();
+ assert_eq!(b1, b2);
+ assert_eq!(b1.to_i128().unwrap(), i);
+ }
+
+ check(Zero::zero(), 0);
+ check(One::one(), 1);
+ check(i128::MAX.to_biguint().unwrap(), i128::MAX);
+
+ check(BigUint::new(vec![]), 0);
+ check(BigUint::new(vec![1]), 1);
+ check(BigUint::new(vec![N1]), (1 << 32) - 1);
+ check(BigUint::new(vec![0, 1]), 1 << 32);
+ check(BigUint::new(vec![N1, N1, N1, N1 >> 1]), i128::MAX);
+
+ assert_eq!(i128::MIN.to_biguint(), None);
+ assert_eq!(BigUint::new(vec![N1, N1, N1, N1]).to_i128(), None);
+ assert_eq!(BigUint::new(vec![0, 0, 0, 0, 1]).to_i128(), None);
+ assert_eq!(BigUint::new(vec![N1, N1, N1, N1, N1]).to_i128(), None);
+}
+
+// `DoubleBigDigit` size dependent
+#[test]
+fn test_convert_u64() {
+ fn check(b1: BigUint, u: u64) {
+ let b2: BigUint = FromPrimitive::from_u64(u).unwrap();
+ assert_eq!(b1, b2);
+ assert_eq!(b1.to_u64().unwrap(), u);
+ }
+
+ check(Zero::zero(), 0);
+ check(One::one(), 1);
+ check(u64::MIN.to_biguint().unwrap(), u64::MIN);
+ check(u64::MAX.to_biguint().unwrap(), u64::MAX);
+
+ check(BigUint::new(vec![]), 0);
+ check(BigUint::new(vec![1]), 1);
+ check(BigUint::new(vec![N1]), (1 << 32) - 1);
+ check(BigUint::new(vec![0, 1]), 1 << 32);
+ check(BigUint::new(vec![N1, N1]), u64::MAX);
+
+ assert_eq!(BigUint::new(vec![0, 0, 1]).to_u64(), None);
+ assert_eq!(BigUint::new(vec![N1, N1, N1]).to_u64(), None);
+}
+
+#[test]
+#[cfg(has_i128)]
+fn test_convert_u128() {
+ fn check(b1: BigUint, u: u128) {
+ let b2: BigUint = FromPrimitive::from_u128(u).unwrap();
+ assert_eq!(b1, b2);
+ assert_eq!(b1.to_u128().unwrap(), u);
+ }
+
+ check(Zero::zero(), 0);
+ check(One::one(), 1);
+ check(u128::MIN.to_biguint().unwrap(), u128::MIN);
+ check(u128::MAX.to_biguint().unwrap(), u128::MAX);
+
+ check(BigUint::new(vec![]), 0);
+ check(BigUint::new(vec![1]), 1);
+ check(BigUint::new(vec![N1]), (1 << 32) - 1);
+ check(BigUint::new(vec![0, 1]), 1 << 32);
+ check(BigUint::new(vec![N1, N1, N1, N1]), u128::MAX);
+
+ assert_eq!(BigUint::new(vec![0, 0, 0, 0, 1]).to_u128(), None);
+ assert_eq!(BigUint::new(vec![N1, N1, N1, N1, N1]).to_u128(), None);
+}
+
+#[test]
+fn test_convert_f32() {
+ fn check(b1: &BigUint, f: f32) {
+ let b2 = BigUint::from_f32(f).unwrap();
+ assert_eq!(b1, &b2);
+ assert_eq!(b1.to_f32().unwrap(), f);
+ }
+
+ check(&BigUint::zero(), 0.0);
+ check(&BigUint::one(), 1.0);
+ check(&BigUint::from(u16::MAX), 2.0.powi(16) - 1.0);
+ check(&BigUint::from(1u64 << 32), 2.0.powi(32));
+ check(&BigUint::from_slice(&[0, 0, 1]), 2.0.powi(64));
+ check(
+ &((BigUint::one() << 100) + (BigUint::one() << 123)),
+ 2.0.powi(100) + 2.0.powi(123),
+ );
+ check(&(BigUint::one() << 127), 2.0.powi(127));
+ check(&(BigUint::from((1u64 << 24) - 1) << (128 - 24)), f32::MAX);
+
+ // keeping all 24 digits with the bits at different offsets to the BigDigits
+ let x: u32 = 0b00000000101111011111011011011101;
+ let mut f = x as f32;
+ let mut b = BigUint::from(x);
+ for _ in 0..64 {
+ check(&b, f);
+ f *= 2.0;
+ b = b << 1;
+ }
+
+ // this number when rounded to f64 then f32 isn't the same as when rounded straight to f32
+ let n: u64 = 0b0000000000111111111111111111111111011111111111111111111111111111;
+ assert!((n as f64) as f32 != n as f32);
+ assert_eq!(BigUint::from(n).to_f32(), Some(n as f32));
+
+ // test rounding up with the bits at different offsets to the BigDigits
+ let mut f = ((1u64 << 25) - 1) as f32;
+ let mut b = BigUint::from(1u64 << 25);
+ for _ in 0..64 {
+ assert_eq!(b.to_f32(), Some(f));
+ f *= 2.0;
+ b = b << 1;
+ }
+
+ // rounding
+ assert_eq!(BigUint::from_f32(-1.0), None);
+ assert_eq!(BigUint::from_f32(-0.99999), Some(BigUint::zero()));
+ assert_eq!(BigUint::from_f32(-0.5), Some(BigUint::zero()));
+ assert_eq!(BigUint::from_f32(-0.0), Some(BigUint::zero()));
+ assert_eq!(
+ BigUint::from_f32(f32::MIN_POSITIVE / 2.0),
+ Some(BigUint::zero())
+ );
+ assert_eq!(BigUint::from_f32(f32::MIN_POSITIVE), Some(BigUint::zero()));
+ assert_eq!(BigUint::from_f32(0.5), Some(BigUint::zero()));
+ assert_eq!(BigUint::from_f32(0.99999), Some(BigUint::zero()));
+ assert_eq!(BigUint::from_f32(f32::consts::E), Some(BigUint::from(2u32)));
+ assert_eq!(
+ BigUint::from_f32(f32::consts::PI),
+ Some(BigUint::from(3u32))
+ );
+
+ // special float values
+ assert_eq!(BigUint::from_f32(f32::NAN), None);
+ assert_eq!(BigUint::from_f32(f32::INFINITY), None);
+ assert_eq!(BigUint::from_f32(f32::NEG_INFINITY), None);
+ assert_eq!(BigUint::from_f32(f32::MIN), None);
+
+ // largest BigUint that will round to a finite f32 value
+ let big_num = (BigUint::one() << 128) - BigUint::one() - (BigUint::one() << (128 - 25));
+ assert_eq!(big_num.to_f32(), Some(f32::MAX));
+ assert_eq!((big_num + BigUint::one()).to_f32(), None);
+
+ assert_eq!(((BigUint::one() << 128) - BigUint::one()).to_f32(), None);
+ assert_eq!((BigUint::one() << 128).to_f32(), None);
+}
+
+#[test]
+fn test_convert_f64() {
+ fn check(b1: &BigUint, f: f64) {
+ let b2 = BigUint::from_f64(f).unwrap();
+ assert_eq!(b1, &b2);
+ assert_eq!(b1.to_f64().unwrap(), f);
+ }
+
+ check(&BigUint::zero(), 0.0);
+ check(&BigUint::one(), 1.0);
+ check(&BigUint::from(u32::MAX), 2.0.powi(32) - 1.0);
+ check(&BigUint::from(1u64 << 32), 2.0.powi(32));
+ check(&BigUint::from_slice(&[0, 0, 1]), 2.0.powi(64));
+ check(
+ &((BigUint::one() << 100) + (BigUint::one() << 152)),
+ 2.0.powi(100) + 2.0.powi(152),
+ );
+ check(&(BigUint::one() << 1023), 2.0.powi(1023));
+ check(&(BigUint::from((1u64 << 53) - 1) << (1024 - 53)), f64::MAX);
+
+ // keeping all 53 digits with the bits at different offsets to the BigDigits
+ let x: u64 = 0b0000000000011110111110110111111101110111101111011111011011011101;
+ let mut f = x as f64;
+ let mut b = BigUint::from(x);
+ for _ in 0..128 {
+ check(&b, f);
+ f *= 2.0;
+ b = b << 1;
+ }
+
+ // test rounding up with the bits at different offsets to the BigDigits
+ let mut f = ((1u64 << 54) - 1) as f64;
+ let mut b = BigUint::from(1u64 << 54);
+ for _ in 0..128 {
+ assert_eq!(b.to_f64(), Some(f));
+ f *= 2.0;
+ b = b << 1;
+ }
+
+ // rounding
+ assert_eq!(BigUint::from_f64(-1.0), None);
+ assert_eq!(BigUint::from_f64(-0.99999), Some(BigUint::zero()));
+ assert_eq!(BigUint::from_f64(-0.5), Some(BigUint::zero()));
+ assert_eq!(BigUint::from_f64(-0.0), Some(BigUint::zero()));
+ assert_eq!(
+ BigUint::from_f64(f64::MIN_POSITIVE / 2.0),
+ Some(BigUint::zero())
+ );
+ assert_eq!(BigUint::from_f64(f64::MIN_POSITIVE), Some(BigUint::zero()));
+ assert_eq!(BigUint::from_f64(0.5), Some(BigUint::zero()));
+ assert_eq!(BigUint::from_f64(0.99999), Some(BigUint::zero()));
+ assert_eq!(BigUint::from_f64(f64::consts::E), Some(BigUint::from(2u32)));
+ assert_eq!(
+ BigUint::from_f64(f64::consts::PI),
+ Some(BigUint::from(3u32))
+ );
+
+ // special float values
+ assert_eq!(BigUint::from_f64(f64::NAN), None);
+ assert_eq!(BigUint::from_f64(f64::INFINITY), None);
+ assert_eq!(BigUint::from_f64(f64::NEG_INFINITY), None);
+ assert_eq!(BigUint::from_f64(f64::MIN), None);
+
+ // largest BigUint that will round to a finite f64 value
+ let big_num = (BigUint::one() << 1024) - BigUint::one() - (BigUint::one() << (1024 - 54));
+ assert_eq!(big_num.to_f64(), Some(f64::MAX));
+ assert_eq!((big_num + BigUint::one()).to_f64(), None);
+
+ assert_eq!(((BigInt::one() << 1024) - BigInt::one()).to_f64(), None);
+ assert_eq!((BigUint::one() << 1024).to_f64(), None);
+}
+
+#[test]
+fn test_convert_to_bigint() {
+ fn check(n: BigUint, ans: BigInt) {
+ assert_eq!(n.to_bigint().unwrap(), ans);
+ assert_eq!(n.to_bigint().unwrap().to_biguint().unwrap(), n);
+ }
+ check(Zero::zero(), Zero::zero());
+ check(
+ BigUint::new(vec![1, 2, 3]),
+ BigInt::from_biguint(Plus, BigUint::new(vec![1, 2, 3])),
+ );
+}
+
+#[test]
+fn test_convert_from_uint() {
+ macro_rules! check {
+ ($ty:ident, $max:expr) => {
+ assert_eq!(BigUint::from($ty::zero()), BigUint::zero());
+ assert_eq!(BigUint::from($ty::one()), BigUint::one());
+ assert_eq!(BigUint::from($ty::MAX - $ty::one()), $max - BigUint::one());
+ assert_eq!(BigUint::from($ty::MAX), $max);
+ };
+ }
+
+ check!(u8, BigUint::from_slice(&[u8::MAX as u32]));
+ check!(u16, BigUint::from_slice(&[u16::MAX as u32]));
+ check!(u32, BigUint::from_slice(&[u32::MAX]));
+ check!(u64, BigUint::from_slice(&[u32::MAX, u32::MAX]));
+ #[cfg(has_i128)]
+ check!(
+ u128,
+ BigUint::from_slice(&[u32::MAX, u32::MAX, u32::MAX, u32::MAX])
+ );
+ check!(usize, BigUint::from(usize::MAX as u64));
+}
+
+#[test]
+fn test_add() {
+ for elm in SUM_TRIPLES.iter() {
+ let (a_vec, b_vec, c_vec) = *elm;
+ let a = BigUint::from_slice(a_vec);
+ let b = BigUint::from_slice(b_vec);
+ let c = BigUint::from_slice(c_vec);
+
+ assert_op!(a + b == c);
+ assert_op!(b + a == c);
+ assert_assign_op!(a += b == c);
+ assert_assign_op!(b += a == c);
+ }
+}
+
+#[test]
+fn test_sub() {
+ for elm in SUM_TRIPLES.iter() {
+ let (a_vec, b_vec, c_vec) = *elm;
+ let a = BigUint::from_slice(a_vec);
+ let b = BigUint::from_slice(b_vec);
+ let c = BigUint::from_slice(c_vec);
+
+ assert_op!(c - a == b);
+ assert_op!(c - b == a);
+ assert_assign_op!(c -= a == b);
+ assert_assign_op!(c -= b == a);
+ }
+}
+
+#[test]
+#[should_panic]
+fn test_sub_fail_on_underflow() {
+ let (a, b): (BigUint, BigUint) = (Zero::zero(), One::one());
+ let _ = a - b;
+}
+
+#[test]
+fn test_mul() {
+ for elm in MUL_TRIPLES.iter() {
+ let (a_vec, b_vec, c_vec) = *elm;
+ let a = BigUint::from_slice(a_vec);
+ let b = BigUint::from_slice(b_vec);
+ let c = BigUint::from_slice(c_vec);
+
+ assert_op!(a * b == c);
+ assert_op!(b * a == c);
+ assert_assign_op!(a *= b == c);
+ assert_assign_op!(b *= a == c);
+ }
+
+ for elm in DIV_REM_QUADRUPLES.iter() {
+ let (a_vec, b_vec, c_vec, d_vec) = *elm;
+ let a = BigUint::from_slice(a_vec);
+ let b = BigUint::from_slice(b_vec);
+ let c = BigUint::from_slice(c_vec);
+ let d = BigUint::from_slice(d_vec);
+
+ assert!(a == &b * &c + &d);
+ assert!(a == &c * &b + &d);
+ }
+}
+
+#[test]
+fn test_div_rem() {
+ for elm in MUL_TRIPLES.iter() {
+ let (a_vec, b_vec, c_vec) = *elm;
+ let a = BigUint::from_slice(a_vec);
+ let b = BigUint::from_slice(b_vec);
+ let c = BigUint::from_slice(c_vec);
+
+ if !a.is_zero() {
+ assert_op!(c / a == b);
+ assert_op!(c % a == Zero::zero());
+ assert_assign_op!(c /= a == b);
+ assert_assign_op!(c %= a == Zero::zero());
+ assert_eq!(c.div_rem(&a), (b.clone(), Zero::zero()));
+ }
+ if !b.is_zero() {
+ assert_op!(c / b == a);
+ assert_op!(c % b == Zero::zero());
+ assert_assign_op!(c /= b == a);
+ assert_assign_op!(c %= b == Zero::zero());
+ assert_eq!(c.div_rem(&b), (a.clone(), Zero::zero()));
+ }
+ }
+
+ for elm in DIV_REM_QUADRUPLES.iter() {
+ let (a_vec, b_vec, c_vec, d_vec) = *elm;
+ let a = BigUint::from_slice(a_vec);
+ let b = BigUint::from_slice(b_vec);
+ let c = BigUint::from_slice(c_vec);
+ let d = BigUint::from_slice(d_vec);
+
+ if !b.is_zero() {
+ assert_op!(a / b == c);
+ assert_op!(a % b == d);
+ assert_assign_op!(a /= b == c);
+ assert_assign_op!(a %= b == d);
+ assert!(a.div_rem(&b) == (c, d));
+ }
+ }
+}
+
+#[test]
+fn test_checked_add() {
+ for elm in SUM_TRIPLES.iter() {
+ let (a_vec, b_vec, c_vec) = *elm;
+ let a = BigUint::from_slice(a_vec);
+ let b = BigUint::from_slice(b_vec);
+ let c = BigUint::from_slice(c_vec);
+
+ assert!(a.checked_add(&b).unwrap() == c);
+ assert!(b.checked_add(&a).unwrap() == c);
+ }
+}
+
+#[test]
+fn test_checked_sub() {
+ for elm in SUM_TRIPLES.iter() {
+ let (a_vec, b_vec, c_vec) = *elm;
+ let a = BigUint::from_slice(a_vec);
+ let b = BigUint::from_slice(b_vec);
+ let c = BigUint::from_slice(c_vec);
+
+ assert!(c.checked_sub(&a).unwrap() == b);
+ assert!(c.checked_sub(&b).unwrap() == a);
+
+ if a > c {
+ assert!(a.checked_sub(&c).is_none());
+ }
+ if b > c {
+ assert!(b.checked_sub(&c).is_none());
+ }
+ }
+}
+
+#[test]
+fn test_checked_mul() {
+ for elm in MUL_TRIPLES.iter() {
+ let (a_vec, b_vec, c_vec) = *elm;
+ let a = BigUint::from_slice(a_vec);
+ let b = BigUint::from_slice(b_vec);
+ let c = BigUint::from_slice(c_vec);
+
+ assert!(a.checked_mul(&b).unwrap() == c);
+ assert!(b.checked_mul(&a).unwrap() == c);
+ }
+
+ for elm in DIV_REM_QUADRUPLES.iter() {
+ let (a_vec, b_vec, c_vec, d_vec) = *elm;
+ let a = BigUint::from_slice(a_vec);
+ let b = BigUint::from_slice(b_vec);
+ let c = BigUint::from_slice(c_vec);
+ let d = BigUint::from_slice(d_vec);
+
+ assert!(a == b.checked_mul(&c).unwrap() + &d);
+ assert!(a == c.checked_mul(&b).unwrap() + &d);
+ }
+}
+
+#[test]
+fn test_mul_overflow() {
+ /* Test for issue #187 - overflow due to mac3 incorrectly sizing temporary */
+ let s = "531137992816767098689588206552468627329593117727031923199444138200403559860852242739162502232636710047537552105951370000796528760829212940754539968588340162273730474622005920097370111";
+ let a: BigUint = s.parse().unwrap();
+ let b = a.clone();
+ let _ = a.checked_mul(&b);
+}
+
+#[test]
+fn test_checked_div() {
+ for elm in MUL_TRIPLES.iter() {
+ let (a_vec, b_vec, c_vec) = *elm;
+ let a = BigUint::from_slice(a_vec);
+ let b = BigUint::from_slice(b_vec);
+ let c = BigUint::from_slice(c_vec);
+
+ if !a.is_zero() {
+ assert!(c.checked_div(&a).unwrap() == b);
+ }
+ if !b.is_zero() {
+ assert!(c.checked_div(&b).unwrap() == a);
+ }
+
+ assert!(c.checked_div(&Zero::zero()).is_none());
+ }
+}
+
+#[test]
+fn test_gcd() {
+ fn check(a: usize, b: usize, c: usize) {
+ let big_a: BigUint = FromPrimitive::from_usize(a).unwrap();
+ let big_b: BigUint = FromPrimitive::from_usize(b).unwrap();
+ let big_c: BigUint = FromPrimitive::from_usize(c).unwrap();
+
+ assert_eq!(big_a.gcd(&big_b), big_c);
+ }
+
+ check(10, 2, 2);
+ check(10, 3, 1);
+ check(0, 3, 3);
+ check(3, 3, 3);
+ check(56, 42, 14);
+}
+
+#[test]
+fn test_lcm() {
+ fn check(a: usize, b: usize, c: usize) {
+ let big_a: BigUint = FromPrimitive::from_usize(a).unwrap();
+ let big_b: BigUint = FromPrimitive::from_usize(b).unwrap();
+ let big_c: BigUint = FromPrimitive::from_usize(c).unwrap();
+
+ assert_eq!(big_a.lcm(&big_b), big_c);
+ }
+
+ check(0, 0, 0);
+ check(1, 0, 0);
+ check(0, 1, 0);
+ check(1, 1, 1);
+ check(8, 9, 72);
+ check(11, 5, 55);
+ check(99, 17, 1683);
+}
+
+#[test]
+fn test_is_even() {
+ let one: BigUint = FromStr::from_str("1").unwrap();
+ let two: BigUint = FromStr::from_str("2").unwrap();
+ let thousand: BigUint = FromStr::from_str("1000").unwrap();
+ let big: BigUint = FromStr::from_str("1000000000000000000000").unwrap();
+ let bigger: BigUint = FromStr::from_str("1000000000000000000001").unwrap();
+ assert!(one.is_odd());
+ assert!(two.is_even());
+ assert!(thousand.is_even());
+ assert!(big.is_even());
+ assert!(bigger.is_odd());
+ assert!((&one << 64).is_even());
+ assert!(((&one << 64) + one).is_odd());
+}
+
+fn to_str_pairs() -> Vec<(BigUint, Vec<(u32, String)>)> {
+ let bits = 32;
+ vec![
+ (
+ Zero::zero(),
+ vec![(2, "0".to_string()), (3, "0".to_string())],
+ ),
+ (
+ BigUint::from_slice(&[0xff]),
+ vec![
+ (2, "11111111".to_string()),
+ (3, "100110".to_string()),
+ (4, "3333".to_string()),
+ (5, "2010".to_string()),
+ (6, "1103".to_string()),
+ (7, "513".to_string()),
+ (8, "377".to_string()),
+ (9, "313".to_string()),
+ (10, "255".to_string()),
+ (11, "212".to_string()),
+ (12, "193".to_string()),
+ (13, "168".to_string()),
+ (14, "143".to_string()),
+ (15, "120".to_string()),
+ (16, "ff".to_string()),
+ ],
+ ),
+ (
+ BigUint::from_slice(&[0xfff]),
+ vec![
+ (2, "111111111111".to_string()),
+ (4, "333333".to_string()),
+ (16, "fff".to_string()),
+ ],
+ ),
+ (
+ BigUint::from_slice(&[1, 2]),
+ vec![
+ (
+ 2,
+ format!("10{}1", repeat("0").take(bits - 1).collect::<String>()),
+ ),
+ (
+ 4,
+ format!("2{}1", repeat("0").take(bits / 2 - 1).collect::<String>()),
+ ),
+ (
+ 10,
+ match bits {
+ 64 => "36893488147419103233".to_string(),
+ 32 => "8589934593".to_string(),
+ 16 => "131073".to_string(),
+ _ => panic!(),
+ },
+ ),
+ (
+ 16,
+ format!("2{}1", repeat("0").take(bits / 4 - 1).collect::<String>()),
+ ),
+ ],
+ ),
+ (
+ BigUint::from_slice(&[1, 2, 3]),
+ vec![
+ (
+ 2,
+ format!(
+ "11{}10{}1",
+ repeat("0").take(bits - 2).collect::<String>(),
+ repeat("0").take(bits - 1).collect::<String>()
+ ),
+ ),
+ (
+ 4,
+ format!(
+ "3{}2{}1",
+ repeat("0").take(bits / 2 - 1).collect::<String>(),
+ repeat("0").take(bits / 2 - 1).collect::<String>()
+ ),
+ ),
+ (
+ 8,
+ match bits {
+ 64 => "14000000000000000000004000000000000000000001".to_string(),
+ 32 => "6000000000100000000001".to_string(),
+ 16 => "140000400001".to_string(),
+ _ => panic!(),
+ },
+ ),
+ (
+ 10,
+ match bits {
+ 64 => "1020847100762815390427017310442723737601".to_string(),
+ 32 => "55340232229718589441".to_string(),
+ 16 => "12885032961".to_string(),
+ _ => panic!(),
+ },
+ ),
+ (
+ 16,
+ format!(
+ "3{}2{}1",
+ repeat("0").take(bits / 4 - 1).collect::<String>(),
+ repeat("0").take(bits / 4 - 1).collect::<String>()
+ ),
+ ),
+ ],
+ ),
+ ]
+}
+
+#[test]
+fn test_to_str_radix() {
+ let r = to_str_pairs();
+ for num_pair in r.iter() {
+ let &(ref n, ref rs) = num_pair;
+ for str_pair in rs.iter() {
+ let &(ref radix, ref str) = str_pair;
+ assert_eq!(n.to_str_radix(*radix), *str);
+ }
+ }
+}
+
+#[test]
+fn test_from_and_to_radix() {
+ const GROUND_TRUTH: &'static [(&'static [u8], u32, &'static [u8])] = &[
+ (b"0", 42, &[0]),
+ (
+ b"ffffeeffbb",
+ 2,
+ &[
+ 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ ],
+ ),
+ (
+ b"ffffeeffbb",
+ 3,
+ &[
+ 2, 2, 1, 1, 2, 1, 1, 2, 0, 0, 0, 0, 0, 1, 2, 0, 0, 0, 0, 1, 0, 0, 2, 2, 0, 1,
+ ],
+ ),
+ (
+ b"ffffeeffbb",
+ 4,
+ &[3, 2, 3, 2, 3, 3, 3, 3, 2, 3, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3],
+ ),
+ (
+ b"ffffeeffbb",
+ 5,
+ &[0, 4, 3, 3, 1, 4, 2, 4, 1, 4, 4, 2, 3, 0, 0, 1, 2, 1],
+ ),
+ (
+ b"ffffeeffbb",
+ 6,
+ &[5, 5, 4, 5, 5, 0, 0, 1, 2, 5, 3, 0, 1, 0, 2, 2],
+ ),
+ (
+ b"ffffeeffbb",
+ 7,
+ &[4, 2, 3, 6, 0, 1, 6, 1, 6, 2, 0, 3, 2, 4, 1],
+ ),
+ (
+ b"ffffeeffbb",
+ 8,
+ &[3, 7, 6, 7, 7, 5, 3, 7, 7, 7, 7, 7, 7, 1],
+ ),
+ (b"ffffeeffbb", 9, &[8, 4, 5, 7, 0, 0, 3, 2, 0, 3, 0, 8, 3]),
+ (b"ffffeeffbb", 10, &[5, 9, 5, 3, 1, 5, 0, 1, 5, 9, 9, 0, 1]),
+ (b"ffffeeffbb", 11, &[10, 7, 6, 5, 2, 0, 3, 3, 3, 4, 9, 3]),
+ (b"ffffeeffbb", 12, &[11, 8, 5, 10, 1, 10, 3, 1, 1, 9, 5, 1]),
+ (b"ffffeeffbb", 13, &[0, 5, 7, 4, 6, 5, 6, 11, 8, 12, 7]),
+ (b"ffffeeffbb", 14, &[11, 4, 4, 11, 8, 4, 6, 0, 3, 11, 3]),
+ (b"ffffeeffbb", 15, &[5, 11, 13, 2, 1, 10, 2, 0, 9, 13, 1]),
+ (b"ffffeeffbb", 16, &[11, 11, 15, 15, 14, 14, 15, 15, 15, 15]),
+ (b"ffffeeffbb", 17, &[0, 2, 14, 12, 2, 14, 8, 10, 4, 9]),
+ (b"ffffeeffbb", 18, &[17, 15, 5, 13, 10, 16, 16, 13, 9, 5]),
+ (b"ffffeeffbb", 19, &[14, 13, 2, 8, 9, 0, 1, 14, 7, 3]),
+ (b"ffffeeffbb", 20, &[15, 19, 3, 14, 0, 17, 19, 18, 2, 2]),
+ (b"ffffeeffbb", 21, &[11, 5, 4, 13, 5, 18, 9, 1, 8, 1]),
+ (b"ffffeeffbb", 22, &[21, 3, 7, 21, 15, 12, 17, 0, 20]),
+ (b"ffffeeffbb", 23, &[21, 21, 6, 9, 10, 7, 21, 0, 14]),
+ (b"ffffeeffbb", 24, &[11, 10, 19, 14, 22, 11, 17, 23, 9]),
+ (b"ffffeeffbb", 25, &[20, 18, 21, 22, 21, 14, 3, 5, 7]),
+ (b"ffffeeffbb", 26, &[13, 15, 24, 11, 17, 6, 23, 6, 5]),
+ (b"ffffeeffbb", 27, &[17, 16, 7, 0, 21, 0, 3, 24, 3]),
+ (b"ffffeeffbb", 28, &[11, 16, 11, 15, 14, 18, 13, 25, 2]),
+ (b"ffffeeffbb", 29, &[6, 8, 7, 19, 14, 13, 21, 5, 2]),
+ (b"ffffeeffbb", 30, &[5, 13, 18, 11, 10, 7, 8, 20, 1]),
+ (b"ffffeeffbb", 31, &[22, 26, 15, 19, 8, 27, 29, 8, 1]),
+ (b"ffffeeffbb", 32, &[27, 29, 31, 29, 30, 31, 31, 31]),
+ (b"ffffeeffbb", 33, &[32, 20, 27, 12, 1, 12, 26, 25]),
+ (b"ffffeeffbb", 34, &[17, 9, 16, 33, 13, 25, 31, 20]),
+ (b"ffffeeffbb", 35, &[25, 32, 2, 25, 11, 4, 3, 17]),
+ (b"ffffeeffbb", 36, &[35, 34, 5, 6, 32, 3, 1, 14]),
+ (b"ffffeeffbb", 37, &[16, 21, 18, 4, 33, 19, 21, 11]),
+ (b"ffffeeffbb", 38, &[33, 25, 19, 29, 20, 6, 23, 9]),
+ (b"ffffeeffbb", 39, &[26, 27, 29, 23, 16, 18, 0, 8]),
+ (b"ffffeeffbb", 40, &[35, 39, 30, 11, 16, 17, 28, 6]),
+ (b"ffffeeffbb", 41, &[36, 30, 9, 18, 12, 19, 26, 5]),
+ (b"ffffeeffbb", 42, &[11, 34, 37, 27, 1, 13, 32, 4]),
+ (b"ffffeeffbb", 43, &[3, 24, 11, 2, 10, 40, 1, 4]),
+ (b"ffffeeffbb", 44, &[43, 12, 40, 32, 3, 23, 19, 3]),
+ (b"ffffeeffbb", 45, &[35, 38, 44, 18, 22, 18, 42, 2]),
+ (b"ffffeeffbb", 46, &[21, 45, 18, 41, 17, 2, 24, 2]),
+ (b"ffffeeffbb", 47, &[37, 37, 11, 12, 6, 0, 8, 2]),
+ (b"ffffeeffbb", 48, &[11, 41, 40, 43, 5, 43, 41, 1]),
+ (b"ffffeeffbb", 49, &[18, 45, 7, 13, 20, 21, 30, 1]),
+ (b"ffffeeffbb", 50, &[45, 21, 5, 34, 21, 18, 20, 1]),
+ (b"ffffeeffbb", 51, &[17, 6, 26, 22, 38, 24, 11, 1]),
+ (b"ffffeeffbb", 52, &[39, 33, 38, 30, 46, 31, 3, 1]),
+ (b"ffffeeffbb", 53, &[31, 7, 44, 23, 9, 32, 49]),
+ (b"ffffeeffbb", 54, &[17, 35, 8, 37, 31, 18, 44]),
+ (b"ffffeeffbb", 55, &[10, 52, 9, 48, 36, 39, 39]),
+ (b"ffffeeffbb", 56, &[11, 50, 51, 22, 25, 36, 35]),
+ (b"ffffeeffbb", 57, &[14, 55, 12, 43, 20, 3, 32]),
+ (b"ffffeeffbb", 58, &[35, 18, 45, 56, 9, 51, 28]),
+ (b"ffffeeffbb", 59, &[51, 28, 20, 26, 55, 3, 26]),
+ (b"ffffeeffbb", 60, &[35, 6, 27, 46, 58, 33, 23]),
+ (b"ffffeeffbb", 61, &[58, 7, 6, 54, 49, 20, 21]),
+ (b"ffffeeffbb", 62, &[53, 59, 3, 14, 10, 22, 19]),
+ (b"ffffeeffbb", 63, &[53, 50, 23, 4, 56, 36, 17]),
+ (b"ffffeeffbb", 64, &[59, 62, 47, 59, 63, 63, 15]),
+ (b"ffffeeffbb", 65, &[0, 53, 39, 4, 40, 37, 14]),
+ (b"ffffeeffbb", 66, &[65, 59, 39, 1, 64, 19, 13]),
+ (b"ffffeeffbb", 67, &[35, 14, 19, 16, 25, 10, 12]),
+ (b"ffffeeffbb", 68, &[51, 38, 63, 50, 15, 8, 11]),
+ (b"ffffeeffbb", 69, &[44, 45, 18, 58, 68, 12, 10]),
+ (b"ffffeeffbb", 70, &[25, 51, 0, 60, 13, 24, 9]),
+ (b"ffffeeffbb", 71, &[54, 30, 9, 65, 28, 41, 8]),
+ (b"ffffeeffbb", 72, &[35, 35, 55, 54, 17, 64, 7]),
+ (b"ffffeeffbb", 73, &[34, 4, 48, 40, 27, 19, 7]),
+ (b"ffffeeffbb", 74, &[53, 47, 4, 56, 36, 51, 6]),
+ (b"ffffeeffbb", 75, &[20, 56, 10, 72, 24, 13, 6]),
+ (b"ffffeeffbb", 76, &[71, 31, 52, 60, 48, 53, 5]),
+ (b"ffffeeffbb", 77, &[32, 73, 14, 63, 15, 21, 5]),
+ (b"ffffeeffbb", 78, &[65, 13, 17, 32, 64, 68, 4]),
+ (b"ffffeeffbb", 79, &[37, 56, 2, 56, 25, 41, 4]),
+ (b"ffffeeffbb", 80, &[75, 59, 37, 41, 43, 15, 4]),
+ (b"ffffeeffbb", 81, &[44, 68, 0, 21, 27, 72, 3]),
+ (b"ffffeeffbb", 82, &[77, 35, 2, 74, 46, 50, 3]),
+ (b"ffffeeffbb", 83, &[52, 51, 19, 76, 10, 30, 3]),
+ (b"ffffeeffbb", 84, &[11, 80, 19, 19, 76, 10, 3]),
+ (b"ffffeeffbb", 85, &[0, 82, 20, 14, 68, 77, 2]),
+ (b"ffffeeffbb", 86, &[3, 12, 78, 37, 62, 61, 2]),
+ (b"ffffeeffbb", 87, &[35, 12, 20, 8, 52, 46, 2]),
+ (b"ffffeeffbb", 88, &[43, 6, 54, 42, 30, 32, 2]),
+ (b"ffffeeffbb", 89, &[49, 52, 85, 21, 80, 18, 2]),
+ (b"ffffeeffbb", 90, &[35, 64, 78, 24, 18, 6, 2]),
+ (b"ffffeeffbb", 91, &[39, 17, 83, 63, 17, 85, 1]),
+ (b"ffffeeffbb", 92, &[67, 22, 85, 79, 75, 74, 1]),
+ (b"ffffeeffbb", 93, &[53, 60, 39, 29, 4, 65, 1]),
+ (b"ffffeeffbb", 94, &[37, 89, 2, 72, 76, 55, 1]),
+ (b"ffffeeffbb", 95, &[90, 74, 89, 9, 9, 47, 1]),
+ (b"ffffeeffbb", 96, &[59, 20, 46, 35, 81, 38, 1]),
+ (b"ffffeeffbb", 97, &[94, 87, 60, 71, 3, 31, 1]),
+ (b"ffffeeffbb", 98, &[67, 22, 63, 50, 62, 23, 1]),
+ (b"ffffeeffbb", 99, &[98, 6, 69, 12, 61, 16, 1]),
+ (b"ffffeeffbb", 100, &[95, 35, 51, 10, 95, 9, 1]),
+ (b"ffffeeffbb", 101, &[87, 27, 7, 8, 62, 3, 1]),
+ (b"ffffeeffbb", 102, &[17, 3, 32, 79, 59, 99]),
+ (b"ffffeeffbb", 103, &[30, 22, 90, 0, 87, 94]),
+ (b"ffffeeffbb", 104, &[91, 68, 87, 68, 38, 90]),
+ (b"ffffeeffbb", 105, &[95, 80, 54, 73, 15, 86]),
+ (b"ffffeeffbb", 106, &[31, 30, 24, 16, 17, 82]),
+ (b"ffffeeffbb", 107, &[51, 50, 10, 12, 42, 78]),
+ (b"ffffeeffbb", 108, &[71, 71, 96, 78, 89, 74]),
+ (b"ffffeeffbb", 109, &[33, 18, 93, 22, 50, 71]),
+ (b"ffffeeffbb", 110, &[65, 53, 57, 88, 29, 68]),
+ (b"ffffeeffbb", 111, &[53, 93, 67, 90, 27, 65]),
+ (b"ffffeeffbb", 112, &[11, 109, 96, 65, 43, 62]),
+ (b"ffffeeffbb", 113, &[27, 23, 106, 56, 76, 59]),
+ (b"ffffeeffbb", 114, &[71, 84, 31, 112, 11, 57]),
+ (b"ffffeeffbb", 115, &[90, 22, 1, 56, 76, 54]),
+ (b"ffffeeffbb", 116, &[35, 38, 98, 57, 40, 52]),
+ (b"ffffeeffbb", 117, &[26, 113, 115, 62, 17, 50]),
+ (b"ffffeeffbb", 118, &[51, 14, 5, 18, 7, 48]),
+ (b"ffffeeffbb", 119, &[102, 31, 110, 108, 8, 46]),
+ (b"ffffeeffbb", 120, &[35, 93, 96, 50, 22, 44]),
+ (b"ffffeeffbb", 121, &[87, 61, 2, 36, 47, 42]),
+ (b"ffffeeffbb", 122, &[119, 64, 1, 22, 83, 40]),
+ (b"ffffeeffbb", 123, &[77, 119, 32, 90, 6, 39]),
+ (b"ffffeeffbb", 124, &[115, 122, 31, 79, 62, 37]),
+ (b"ffffeeffbb", 125, &[95, 108, 47, 74, 3, 36]),
+ (b"ffffeeffbb", 126, &[53, 25, 116, 39, 78, 34]),
+ (b"ffffeeffbb", 127, &[22, 23, 125, 67, 35, 33]),
+ (b"ffffeeffbb", 128, &[59, 127, 59, 127, 127, 31]),
+ (b"ffffeeffbb", 129, &[89, 36, 1, 59, 100, 30]),
+ (b"ffffeeffbb", 130, &[65, 91, 123, 89, 79, 29]),
+ (b"ffffeeffbb", 131, &[58, 72, 39, 63, 65, 28]),
+ (b"ffffeeffbb", 132, &[131, 62, 92, 82, 57, 27]),
+ (b"ffffeeffbb", 133, &[109, 31, 51, 123, 55, 26]),
+ (b"ffffeeffbb", 134, &[35, 74, 21, 27, 60, 25]),
+ (b"ffffeeffbb", 135, &[125, 132, 49, 37, 70, 24]),
+ (b"ffffeeffbb", 136, &[51, 121, 117, 133, 85, 23]),
+ (b"ffffeeffbb", 137, &[113, 60, 135, 22, 107, 22]),
+ (b"ffffeeffbb", 138, &[113, 91, 73, 93, 133, 21]),
+ (b"ffffeeffbb", 139, &[114, 75, 102, 51, 26, 21]),
+ (b"ffffeeffbb", 140, &[95, 25, 35, 16, 62, 20]),
+ (b"ffffeeffbb", 141, &[131, 137, 16, 110, 102, 19]),
+ (b"ffffeeffbb", 142, &[125, 121, 108, 34, 6, 19]),
+ (b"ffffeeffbb", 143, &[65, 78, 138, 55, 55, 18]),
+ (b"ffffeeffbb", 144, &[107, 125, 121, 15, 109, 17]),
+ (b"ffffeeffbb", 145, &[35, 13, 122, 42, 22, 17]),
+ (b"ffffeeffbb", 146, &[107, 38, 103, 123, 83, 16]),
+ (b"ffffeeffbb", 147, &[116, 96, 71, 98, 2, 16]),
+ (b"ffffeeffbb", 148, &[127, 23, 75, 99, 71, 15]),
+ (b"ffffeeffbb", 149, &[136, 110, 53, 114, 144, 14]),
+ (b"ffffeeffbb", 150, &[95, 140, 133, 130, 71, 14]),
+ (b"ffffeeffbb", 151, &[15, 50, 29, 137, 0, 14]),
+ (b"ffffeeffbb", 152, &[147, 15, 89, 121, 83, 13]),
+ (b"ffffeeffbb", 153, &[17, 87, 93, 72, 17, 13]),
+ (b"ffffeeffbb", 154, &[109, 113, 3, 133, 106, 12]),
+ (b"ffffeeffbb", 155, &[115, 141, 120, 139, 44, 12]),
+ (b"ffffeeffbb", 156, &[143, 45, 4, 82, 140, 11]),
+ (b"ffffeeffbb", 157, &[149, 92, 15, 106, 82, 11]),
+ (b"ffffeeffbb", 158, &[37, 107, 79, 46, 26, 11]),
+ (b"ffffeeffbb", 159, &[137, 37, 146, 51, 130, 10]),
+ (b"ffffeeffbb", 160, &[155, 69, 29, 115, 77, 10]),
+ (b"ffffeeffbb", 161, &[67, 98, 46, 68, 26, 10]),
+ (b"ffffeeffbb", 162, &[125, 155, 60, 63, 138, 9]),
+ (b"ffffeeffbb", 163, &[96, 43, 118, 93, 90, 9]),
+ (b"ffffeeffbb", 164, &[159, 99, 123, 152, 43, 9]),
+ (b"ffffeeffbb", 165, &[65, 17, 1, 69, 163, 8]),
+ (b"ffffeeffbb", 166, &[135, 108, 25, 165, 119, 8]),
+ (b"ffffeeffbb", 167, &[165, 116, 164, 103, 77, 8]),
+ (b"ffffeeffbb", 168, &[11, 166, 67, 44, 36, 8]),
+ (b"ffffeeffbb", 169, &[65, 59, 71, 149, 164, 7]),
+ (b"ffffeeffbb", 170, &[85, 83, 26, 76, 126, 7]),
+ (b"ffffeeffbb", 171, &[71, 132, 140, 157, 88, 7]),
+ (b"ffffeeffbb", 172, &[3, 6, 127, 47, 52, 7]),
+ (b"ffffeeffbb", 173, &[122, 66, 53, 83, 16, 7]),
+ (b"ffffeeffbb", 174, &[35, 6, 5, 88, 155, 6]),
+ (b"ffffeeffbb", 175, &[95, 20, 84, 56, 122, 6]),
+ (b"ffffeeffbb", 176, &[43, 91, 57, 159, 89, 6]),
+ (b"ffffeeffbb", 177, &[110, 127, 54, 40, 58, 6]),
+ (b"ffffeeffbb", 178, &[49, 115, 43, 47, 27, 6]),
+ (b"ffffeeffbb", 179, &[130, 91, 4, 178, 175, 5]),
+ (b"ffffeeffbb", 180, &[35, 122, 109, 70, 147, 5]),
+ (b"ffffeeffbb", 181, &[94, 94, 4, 79, 119, 5]),
+ (b"ffffeeffbb", 182, &[39, 54, 66, 19, 92, 5]),
+ (b"ffffeeffbb", 183, &[119, 2, 143, 69, 65, 5]),
+ (b"ffffeeffbb", 184, &[67, 57, 90, 44, 39, 5]),
+ (b"ffffeeffbb", 185, &[90, 63, 141, 123, 13, 5]),
+ (b"ffffeeffbb", 186, &[53, 123, 172, 119, 174, 4]),
+ (b"ffffeeffbb", 187, &[153, 21, 68, 28, 151, 4]),
+ (b"ffffeeffbb", 188, &[131, 138, 94, 32, 128, 4]),
+ (b"ffffeeffbb", 189, &[179, 121, 156, 130, 105, 4]),
+ (b"ffffeeffbb", 190, &[185, 179, 164, 131, 83, 4]),
+ (b"ffffeeffbb", 191, &[118, 123, 37, 31, 62, 4]),
+ (b"ffffeeffbb", 192, &[59, 106, 83, 16, 41, 4]),
+ (b"ffffeeffbb", 193, &[57, 37, 47, 86, 20, 4]),
+ (b"ffffeeffbb", 194, &[191, 140, 63, 45, 0, 4]),
+ (b"ffffeeffbb", 195, &[65, 169, 83, 84, 175, 3]),
+ (b"ffffeeffbb", 196, &[67, 158, 64, 6, 157, 3]),
+ (b"ffffeeffbb", 197, &[121, 26, 167, 3, 139, 3]),
+ (b"ffffeeffbb", 198, &[197, 151, 165, 75, 121, 3]),
+ (b"ffffeeffbb", 199, &[55, 175, 36, 22, 104, 3]),
+ (b"ffffeeffbb", 200, &[195, 167, 162, 38, 87, 3]),
+ (b"ffffeeffbb", 201, &[35, 27, 136, 124, 70, 3]),
+ (b"ffffeeffbb", 202, &[87, 64, 153, 76, 54, 3]),
+ (b"ffffeeffbb", 203, &[151, 191, 14, 94, 38, 3]),
+ (b"ffffeeffbb", 204, &[119, 103, 135, 175, 22, 3]),
+ (b"ffffeeffbb", 205, &[200, 79, 123, 115, 7, 3]),
+ (b"ffffeeffbb", 206, &[133, 165, 202, 115, 198, 2]),
+ (b"ffffeeffbb", 207, &[44, 153, 193, 175, 184, 2]),
+ (b"ffffeeffbb", 208, &[91, 190, 125, 86, 171, 2]),
+ (b"ffffeeffbb", 209, &[109, 151, 34, 53, 158, 2]),
+ (b"ffffeeffbb", 210, &[95, 40, 171, 74, 145, 2]),
+ (b"ffffeeffbb", 211, &[84, 195, 162, 150, 132, 2]),
+ (b"ffffeeffbb", 212, &[31, 15, 59, 68, 120, 2]),
+ (b"ffffeeffbb", 213, &[125, 57, 127, 36, 108, 2]),
+ (b"ffffeeffbb", 214, &[51, 132, 2, 55, 96, 2]),
+ (b"ffffeeffbb", 215, &[175, 133, 177, 122, 84, 2]),
+ (b"ffffeeffbb", 216, &[179, 35, 78, 23, 73, 2]),
+ (b"ffffeeffbb", 217, &[53, 101, 208, 186, 61, 2]),
+ (b"ffffeeffbb", 218, &[33, 9, 214, 179, 50, 2]),
+ (b"ffffeeffbb", 219, &[107, 147, 175, 217, 39, 2]),
+ (b"ffffeeffbb", 220, &[175, 81, 179, 79, 29, 2]),
+ (b"ffffeeffbb", 221, &[0, 76, 95, 204, 18, 2]),
+ (b"ffffeeffbb", 222, &[53, 213, 16, 150, 8, 2]),
+ (b"ffffeeffbb", 223, &[158, 161, 42, 136, 221, 1]),
+ (b"ffffeeffbb", 224, &[123, 54, 52, 162, 212, 1]),
+ (b"ffffeeffbb", 225, &[170, 43, 151, 2, 204, 1]),
+ (b"ffffeeffbb", 226, &[27, 68, 224, 105, 195, 1]),
+ (b"ffffeeffbb", 227, &[45, 69, 157, 20, 187, 1]),
+ (b"ffffeeffbb", 228, &[71, 213, 64, 199, 178, 1]),
+ (b"ffffeeffbb", 229, &[129, 203, 66, 186, 170, 1]),
+ (b"ffffeeffbb", 230, &[205, 183, 57, 208, 162, 1]),
+ (b"ffffeeffbb", 231, &[32, 50, 164, 33, 155, 1]),
+ (b"ffffeeffbb", 232, &[35, 135, 53, 123, 147, 1]),
+ (b"ffffeeffbb", 233, &[209, 47, 89, 13, 140, 1]),
+ (b"ffffeeffbb", 234, &[143, 56, 175, 168, 132, 1]),
+ (b"ffffeeffbb", 235, &[225, 157, 216, 121, 125, 1]),
+ (b"ffffeeffbb", 236, &[51, 66, 119, 105, 118, 1]),
+ (b"ffffeeffbb", 237, &[116, 150, 26, 119, 111, 1]),
+ (b"ffffeeffbb", 238, &[221, 15, 87, 162, 104, 1]),
+ (b"ffffeeffbb", 239, &[234, 155, 214, 234, 97, 1]),
+ (b"ffffeeffbb", 240, &[155, 46, 84, 96, 91, 1]),
+ (b"ffffeeffbb", 241, &[187, 48, 90, 225, 84, 1]),
+ (b"ffffeeffbb", 242, &[87, 212, 151, 140, 78, 1]),
+ (b"ffffeeffbb", 243, &[206, 22, 189, 81, 72, 1]),
+ (b"ffffeeffbb", 244, &[119, 93, 122, 48, 66, 1]),
+ (b"ffffeeffbb", 245, &[165, 224, 117, 40, 60, 1]),
+ (b"ffffeeffbb", 246, &[77, 121, 100, 57, 54, 1]),
+ (b"ffffeeffbb", 247, &[52, 128, 242, 98, 48, 1]),
+ (b"ffffeeffbb", 248, &[115, 247, 224, 164, 42, 1]),
+ (b"ffffeeffbb", 249, &[218, 127, 223, 5, 37, 1]),
+ (b"ffffeeffbb", 250, &[95, 54, 168, 118, 31, 1]),
+ (b"ffffeeffbb", 251, &[121, 204, 240, 3, 26, 1]),
+ (b"ffffeeffbb", 252, &[179, 138, 123, 162, 20, 1]),
+ (b"ffffeeffbb", 253, &[21, 50, 1, 91, 15, 1]),
+ (b"ffffeeffbb", 254, &[149, 11, 63, 40, 10, 1]),
+ (b"ffffeeffbb", 255, &[170, 225, 247, 9, 5, 1]),
+ (b"ffffeeffbb", 256, &[187, 255, 238, 255, 255]),
+ ];
+
+ for &(bigint, radix, inbaseradix_le) in GROUND_TRUTH.iter() {
+ let bigint = BigUint::parse_bytes(bigint, 16).unwrap();
+ // to_radix_le
+ assert_eq!(bigint.to_radix_le(radix), inbaseradix_le);
+ // to_radix_be
+ let mut inbase_be = bigint.to_radix_be(radix);
+ inbase_be.reverse(); // now le
+ assert_eq!(inbase_be, inbaseradix_le);
+ // from_radix_le
+ assert_eq!(
+ BigUint::from_radix_le(inbaseradix_le, radix).unwrap(),
+ bigint
+ );
+ // from_radix_be
+ let mut inbaseradix_be = Vec::from(inbaseradix_le);
+ inbaseradix_be.reverse();
+ assert_eq!(
+ BigUint::from_radix_be(&inbaseradix_be, radix).unwrap(),
+ bigint
+ );
+ }
+
+ assert!(BigUint::from_radix_le(&[10, 100, 10], 50).is_none());
+}
+
+#[test]
+fn test_from_str_radix() {
+ let r = to_str_pairs();
+ for num_pair in r.iter() {
+ let &(ref n, ref rs) = num_pair;
+ for str_pair in rs.iter() {
+ let &(ref radix, ref str) = str_pair;
+ assert_eq!(n, &BigUint::from_str_radix(str, *radix).unwrap());
+ }
+ }
+
+ let zed = BigUint::from_str_radix("Z", 10).ok();
+ assert_eq!(zed, None);
+ let blank = BigUint::from_str_radix("_", 2).ok();
+ assert_eq!(blank, None);
+ let blank_one = BigUint::from_str_radix("_1", 2).ok();
+ assert_eq!(blank_one, None);
+ let plus_one = BigUint::from_str_radix("+1", 10).ok();
+ assert_eq!(plus_one, Some(BigUint::from_slice(&[1])));
+ let plus_plus_one = BigUint::from_str_radix("++1", 10).ok();
+ assert_eq!(plus_plus_one, None);
+ let minus_one = BigUint::from_str_radix("-1", 10).ok();
+ assert_eq!(minus_one, None);
+ let zero_plus_two = BigUint::from_str_radix("0+2", 10).ok();
+ assert_eq!(zero_plus_two, None);
+ let three = BigUint::from_str_radix("1_1", 2).ok();
+ assert_eq!(three, Some(BigUint::from_slice(&[3])));
+ let ff = BigUint::from_str_radix("1111_1111", 2).ok();
+ assert_eq!(ff, Some(BigUint::from_slice(&[0xff])));
+}
+
+#[test]
+fn test_all_str_radix() {
+ #[allow(deprecated, unused_imports)]
+ use std::ascii::AsciiExt;
+
+ let n = BigUint::new((0..10).collect());
+ for radix in 2..37 {
+ let s = n.to_str_radix(radix);
+ let x = BigUint::from_str_radix(&s, radix);
+ assert_eq!(x.unwrap(), n);
+
+ let s = s.to_ascii_uppercase();
+ let x = BigUint::from_str_radix(&s, radix);
+ assert_eq!(x.unwrap(), n);
+ }
+}
+
+#[test]
+fn test_lower_hex() {
+ let a = BigUint::parse_bytes(b"A", 16).unwrap();
+ let hello = BigUint::parse_bytes("22405534230753963835153736737".as_bytes(), 10).unwrap();
+
+ assert_eq!(format!("{:x}", a), "a");
+ assert_eq!(format!("{:x}", hello), "48656c6c6f20776f726c6421");
+ assert_eq!(format!("{:♥>+#8x}", a), "♥♥♥♥+0xa");
+}
+
+#[test]
+fn test_upper_hex() {
+ let a = BigUint::parse_bytes(b"A", 16).unwrap();
+ let hello = BigUint::parse_bytes("22405534230753963835153736737".as_bytes(), 10).unwrap();
+
+ assert_eq!(format!("{:X}", a), "A");
+ assert_eq!(format!("{:X}", hello), "48656C6C6F20776F726C6421");
+ assert_eq!(format!("{:♥>+#8X}", a), "♥♥♥♥+0xA");
+}
+
+#[test]
+fn test_binary() {
+ let a = BigUint::parse_bytes(b"A", 16).unwrap();
+ let hello = BigUint::parse_bytes("224055342307539".as_bytes(), 10).unwrap();
+
+ assert_eq!(format!("{:b}", a), "1010");
+ assert_eq!(
+ format!("{:b}", hello),
+ "110010111100011011110011000101101001100011010011"
+ );
+ assert_eq!(format!("{:♥>+#8b}", a), "♥+0b1010");
+}
+
+#[test]
+fn test_octal() {
+ let a = BigUint::parse_bytes(b"A", 16).unwrap();
+ let hello = BigUint::parse_bytes("22405534230753963835153736737".as_bytes(), 10).unwrap();
+
+ assert_eq!(format!("{:o}", a), "12");
+ assert_eq!(format!("{:o}", hello), "22062554330674403566756233062041");
+ assert_eq!(format!("{:♥>+#8o}", a), "♥♥♥+0o12");
+}
+
+#[test]
+fn test_display() {
+ let a = BigUint::parse_bytes(b"A", 16).unwrap();
+ let hello = BigUint::parse_bytes("22405534230753963835153736737".as_bytes(), 10).unwrap();
+
+ assert_eq!(format!("{}", a), "10");
+ assert_eq!(format!("{}", hello), "22405534230753963835153736737");
+ assert_eq!(format!("{:♥>+#8}", a), "♥♥♥♥♥+10");
+}
+
+#[test]
+fn test_factor() {
+ fn factor(n: usize) -> BigUint {
+ let mut f: BigUint = One::one();
+ for i in 2..n + 1 {
+ // FIXME(#5992): assignment operator overloads
+ // f *= FromPrimitive::from_usize(i);
+ let bu: BigUint = FromPrimitive::from_usize(i).unwrap();
+ f = f * bu;
+ }
+ return f;
+ }
+
+ fn check(n: usize, s: &str) {
+ let n = factor(n);
+ let ans = match BigUint::from_str_radix(s, 10) {
+ Ok(x) => x,
+ Err(_) => panic!(),
+ };
+ assert_eq!(n, ans);
+ }
+
+ check(3, "6");
+ check(10, "3628800");
+ check(20, "2432902008176640000");
+ check(30, "265252859812191058636308480000000");
+}
+
+#[test]
+fn test_bits() {
+ assert_eq!(BigUint::new(vec![0, 0, 0, 0]).bits(), 0);
+ let n: BigUint = FromPrimitive::from_usize(0).unwrap();
+ assert_eq!(n.bits(), 0);
+ let n: BigUint = FromPrimitive::from_usize(1).unwrap();
+ assert_eq!(n.bits(), 1);
+ let n: BigUint = FromPrimitive::from_usize(3).unwrap();
+ assert_eq!(n.bits(), 2);
+ let n: BigUint = BigUint::from_str_radix("4000000000", 16).unwrap();
+ assert_eq!(n.bits(), 39);
+ let one: BigUint = One::one();
+ assert_eq!((one << 426).bits(), 427);
+}
+
+#[test]
+fn test_iter_sum() {
+ let result: BigUint = FromPrimitive::from_isize(1234567).unwrap();
+ let data: Vec<BigUint> = vec![
+ FromPrimitive::from_u32(1000000).unwrap(),
+ FromPrimitive::from_u32(200000).unwrap(),
+ FromPrimitive::from_u32(30000).unwrap(),
+ FromPrimitive::from_u32(4000).unwrap(),
+ FromPrimitive::from_u32(500).unwrap(),
+ FromPrimitive::from_u32(60).unwrap(),
+ FromPrimitive::from_u32(7).unwrap(),
+ ];
+
+ assert_eq!(result, data.iter().sum());
+ assert_eq!(result, data.into_iter().sum());
+}
+
+#[test]
+fn test_iter_product() {
+ let data: Vec<BigUint> = vec![
+ FromPrimitive::from_u32(1001).unwrap(),
+ FromPrimitive::from_u32(1002).unwrap(),
+ FromPrimitive::from_u32(1003).unwrap(),
+ FromPrimitive::from_u32(1004).unwrap(),
+ FromPrimitive::from_u32(1005).unwrap(),
+ ];
+ let result = data.get(0).unwrap()
+ * data.get(1).unwrap()
+ * data.get(2).unwrap()
+ * data.get(3).unwrap()
+ * data.get(4).unwrap();
+
+ assert_eq!(result, data.iter().product());
+ assert_eq!(result, data.into_iter().product());
+}
+
+#[test]
+fn test_iter_sum_generic() {
+ let result: BigUint = FromPrimitive::from_isize(1234567).unwrap();
+ let data = vec![1000000_u32, 200000, 30000, 4000, 500, 60, 7];
+
+ assert_eq!(result, data.iter().sum());
+ assert_eq!(result, data.into_iter().sum());
+}
+
+#[test]
+fn test_iter_product_generic() {
+ let data = vec![1001_u32, 1002, 1003, 1004, 1005];
+ let result = data[0].to_biguint().unwrap()
+ * data[1].to_biguint().unwrap()
+ * data[2].to_biguint().unwrap()
+ * data[3].to_biguint().unwrap()
+ * data[4].to_biguint().unwrap();
+
+ assert_eq!(result, data.iter().product());
+ assert_eq!(result, data.into_iter().product());
+}
+
+#[test]
+fn test_pow() {
+ let one = BigUint::from(1u32);
+ let two = BigUint::from(2u32);
+ let four = BigUint::from(4u32);
+ let eight = BigUint::from(8u32);
+ let tentwentyfour = BigUint::from(1024u32);
+ let twentyfourtyeight = BigUint::from(2048u32);
+ macro_rules! check {
+ ($t:ty) => {
+ assert_eq!(two.pow(0 as $t), one);
+ assert_eq!(two.pow(1 as $t), two);
+ assert_eq!(two.pow(2 as $t), four);
+ assert_eq!(two.pow(3 as $t), eight);
+ assert_eq!(two.pow(10 as $t), tentwentyfour);
+ assert_eq!(two.pow(11 as $t), twentyfourtyeight);
+ assert_eq!(two.pow(&(11 as $t)), twentyfourtyeight);
+ };
+ }
+ check!(u8);
+ check!(u16);
+ check!(u32);
+ check!(u64);
+ check!(usize);
+ #[cfg(has_i128)]
+ check!(u128);
+}
diff --git a/rust/vendor/num-bigint-0.2.6/tests/biguint_scalar.rs b/rust/vendor/num-bigint-0.2.6/tests/biguint_scalar.rs
new file mode 100644
index 0000000..4522773
--- /dev/null
+++ b/rust/vendor/num-bigint-0.2.6/tests/biguint_scalar.rs
@@ -0,0 +1,116 @@
+extern crate num_bigint;
+extern crate num_traits;
+
+use num_bigint::BigUint;
+use num_traits::{ToPrimitive, Zero};
+
+mod consts;
+use consts::*;
+
+#[macro_use]
+mod macros;
+
+#[test]
+fn test_scalar_add() {
+ fn check(x: &BigUint, y: &BigUint, z: &BigUint) {
+ let (x, y, z) = (x.clone(), y.clone(), z.clone());
+ assert_unsigned_scalar_op!(x + y == z);
+ assert_unsigned_scalar_assign_op!(x += y == z);
+ }
+
+ for elm in SUM_TRIPLES.iter() {
+ let (a_vec, b_vec, c_vec) = *elm;
+ let a = BigUint::from_slice(a_vec);
+ let b = BigUint::from_slice(b_vec);
+ let c = BigUint::from_slice(c_vec);
+
+ check(&a, &b, &c);
+ check(&b, &a, &c);
+ }
+}
+
+#[test]
+fn test_scalar_sub() {
+ fn check(x: &BigUint, y: &BigUint, z: &BigUint) {
+ let (x, y, z) = (x.clone(), y.clone(), z.clone());
+ assert_unsigned_scalar_op!(x - y == z);
+ assert_unsigned_scalar_assign_op!(x -= y == z);
+ }
+
+ for elm in SUM_TRIPLES.iter() {
+ let (a_vec, b_vec, c_vec) = *elm;
+ let a = BigUint::from_slice(a_vec);
+ let b = BigUint::from_slice(b_vec);
+ let c = BigUint::from_slice(c_vec);
+
+ check(&c, &a, &b);
+ check(&c, &b, &a);
+ }
+}
+
+#[test]
+fn test_scalar_mul() {
+ fn check(x: &BigUint, y: &BigUint, z: &BigUint) {
+ let (x, y, z) = (x.clone(), y.clone(), z.clone());
+ assert_unsigned_scalar_op!(x * y == z);
+ assert_unsigned_scalar_assign_op!(x *= y == z);
+ }
+
+ for elm in MUL_TRIPLES.iter() {
+ let (a_vec, b_vec, c_vec) = *elm;
+ let a = BigUint::from_slice(a_vec);
+ let b = BigUint::from_slice(b_vec);
+ let c = BigUint::from_slice(c_vec);
+
+ check(&a, &b, &c);
+ check(&b, &a, &c);
+ }
+}
+
+#[test]
+fn test_scalar_rem_noncommutative() {
+ assert_eq!(5u8 % BigUint::from(7u8), 5u8.into());
+ assert_eq!(BigUint::from(5u8) % 7u8, 5u8.into());
+}
+
+#[test]
+fn test_scalar_div_rem() {
+ fn check(x: &BigUint, y: &BigUint, z: &BigUint, r: &BigUint) {
+ let (x, y, z, r) = (x.clone(), y.clone(), z.clone(), r.clone());
+ assert_unsigned_scalar_op!(x / y == z);
+ assert_unsigned_scalar_op!(x % y == r);
+ assert_unsigned_scalar_assign_op!(x /= y == z);
+ assert_unsigned_scalar_assign_op!(x %= y == r);
+ }
+
+ for elm in MUL_TRIPLES.iter() {
+ let (a_vec, b_vec, c_vec) = *elm;
+ let a = BigUint::from_slice(a_vec);
+ let b = BigUint::from_slice(b_vec);
+ let c = BigUint::from_slice(c_vec);
+
+ if !a.is_zero() {
+ check(&c, &a, &b, &Zero::zero());
+ }
+
+ if !b.is_zero() {
+ check(&c, &b, &a, &Zero::zero());
+ }
+ }
+
+ for elm in DIV_REM_QUADRUPLES.iter() {
+ let (a_vec, b_vec, c_vec, d_vec) = *elm;
+ let a = BigUint::from_slice(a_vec);
+ let b = BigUint::from_slice(b_vec);
+ let c = BigUint::from_slice(c_vec);
+ let d = BigUint::from_slice(d_vec);
+
+ if !b.is_zero() {
+ check(&a, &b, &c, &d);
+ assert_unsigned_scalar_op!(a / b == c);
+ assert_unsigned_scalar_op!(a % b == d);
+ assert_unsigned_scalar_assign_op!(a /= b == c);
+ assert_unsigned_scalar_assign_op!(a %= b == d);
+ }
+ }
+}
diff --git a/rust/vendor/num-bigint-0.2.6/tests/consts/mod.rs b/rust/vendor/num-bigint-0.2.6/tests/consts/mod.rs
new file mode 100644
index 0000000..87805d5
--- /dev/null
+++ b/rust/vendor/num-bigint-0.2.6/tests/consts/mod.rs
@@ -0,0 +1,56 @@
+#![allow(unused)]
+
+pub const N1: u32 = -1i32 as u32;
+pub const N2: u32 = -2i32 as u32;
+
+pub const SUM_TRIPLES: &'static [(&'static [u32], &'static [u32], &'static [u32])] = &[
+ (&[], &[], &[]),
+ (&[], &[1], &[1]),
+ (&[1], &[1], &[2]),
+ (&[1], &[1, 1], &[2, 1]),
+ (&[1], &[N1], &[0, 1]),
+ (&[1], &[N1, N1], &[0, 0, 1]),
+ (&[N1, N1], &[N1, N1], &[N2, N1, 1]),
+ (&[1, 1, 1], &[N1, N1], &[0, 1, 2]),
+ (&[2, 2, 1], &[N1, N2], &[1, 1, 2]),
+ (&[1, 2, 2, 1], &[N1, N2], &[0, 1, 3, 1]),
+];
+
+pub const M: u32 = ::std::u32::MAX;
+pub const MUL_TRIPLES: &'static [(&'static [u32], &'static [u32], &'static [u32])] = &[
+ (&[], &[], &[]),
+ (&[], &[1], &[]),
+ (&[2], &[], &[]),
+ (&[1], &[1], &[1]),
+ (&[2], &[3], &[6]),
+ (&[1], &[1, 1, 1], &[1, 1, 1]),
+ (&[1, 2, 3], &[3], &[3, 6, 9]),
+ (&[1, 1, 1], &[N1], &[N1, N1, N1]),
+ (&[1, 2, 3], &[N1], &[N1, N2, N2, 2]),
+ (&[1, 2, 3, 4], &[N1], &[N1, N2, N2, N2, 3]),
+ (&[N1], &[N1], &[1, N2]),
+ (&[N1, N1], &[N1], &[1, N1, N2]),
+ (&[N1, N1, N1], &[N1], &[1, N1, N1, N2]),
+ (&[N1, N1, N1, N1], &[N1], &[1, N1, N1, N1, N2]),
+ (&[M / 2 + 1], &[2], &[0, 1]),
+ (&[0, M / 2 + 1], &[2], &[0, 0, 1]),
+ (&[1, 2], &[1, 2, 3], &[1, 4, 7, 6]),
+ (&[N1, N1], &[N1, N1, N1], &[1, 0, N1, N2, N1]),
+ (&[N1, N1, N1], &[N1, N1, N1, N1], &[1, 0, 0, N1, N2, N1, N1]),
+ (&[0, 0, 1], &[1, 2, 3], &[0, 0, 1, 2, 3]),
+ (&[0, 0, 1], &[0, 0, 0, 1], &[0, 0, 0, 0, 0, 1]),
+];
+
+pub const DIV_REM_QUADRUPLES: &'static [(
+ &'static [u32],
+ &'static [u32],
+ &'static [u32],
+ &'static [u32],
+)] = &[
+ (&[1], &[2], &[], &[1]),
+ (&[3], &[2], &[1], &[1]),
+ (&[1, 1], &[2], &[M / 2 + 1], &[1]),
+ (&[1, 1, 1], &[2], &[M / 2 + 1, M / 2 + 1], &[1]),
+ (&[0, 1], &[N1], &[1], &[1]),
+ (&[N1, N1], &[N2], &[2, 1], &[3]),
+];
diff --git a/rust/vendor/num-bigint-0.2.6/tests/macros/mod.rs b/rust/vendor/num-bigint-0.2.6/tests/macros/mod.rs
new file mode 100644
index 0000000..946534a
--- /dev/null
+++ b/rust/vendor/num-bigint-0.2.6/tests/macros/mod.rs
@@ -0,0 +1,116 @@
+#![allow(unused)]
+
+/// Assert that an op works for all val/ref combinations
+macro_rules! assert_op {
+ ($left:ident $op:tt $right:ident == $expected:expr) => {
+ assert_eq!((&$left) $op (&$right), $expected);
+ assert_eq!((&$left) $op $right.clone(), $expected);
+ assert_eq!($left.clone() $op (&$right), $expected);
+ assert_eq!($left.clone() $op $right.clone(), $expected);
+ };
+}
+
+/// Assert that an assign-op works for all val/ref combinations
+macro_rules! assert_assign_op {
+ ($left:ident $op:tt $right:ident == $expected:expr) => {{
+ let mut left = $left.clone();
+ assert_eq!({ left $op &$right; left}, $expected);
+
+ let mut left = $left.clone();
+ assert_eq!({ left $op $right.clone(); left}, $expected);
+ }};
+}
+
+/// Assert that an op works for scalar left or right
+macro_rules! assert_scalar_op {
+ (($($to:ident),*) $left:ident $op:tt $right:ident == $expected:expr) => {
+ $(
+ if let Some(left) = $left.$to() {
+ assert_op!(left $op $right == $expected);
+ }
+ if let Some(right) = $right.$to() {
+ assert_op!($left $op right == $expected);
+ }
+ )*
+ };
+}
+
+#[cfg(not(has_i128))]
+macro_rules! assert_unsigned_scalar_op {
+ ($left:ident $op:tt $right:ident == $expected:expr) => {
+ assert_scalar_op!((to_u8, to_u16, to_u32, to_u64, to_usize)
+ $left $op $right == $expected);
+ };
+}
+
+#[cfg(has_i128)]
+macro_rules! assert_unsigned_scalar_op {
+ ($left:ident $op:tt $right:ident == $expected:expr) => {
+ assert_scalar_op!((to_u8, to_u16, to_u32, to_u64, to_usize, to_u128)
+ $left $op $right == $expected);
+ };
+}
+
+#[cfg(not(has_i128))]
+macro_rules! assert_signed_scalar_op {
+ ($left:ident $op:tt $right:ident == $expected:expr) => {
+ assert_scalar_op!((to_u8, to_u16, to_u32, to_u64, to_usize,
+ to_i8, to_i16, to_i32, to_i64, to_isize)
+ $left $op $right == $expected);
+ };
+}
+
+#[cfg(has_i128)]
+macro_rules! assert_signed_scalar_op {
+ ($left:ident $op:tt $right:ident == $expected:expr) => {
+ assert_scalar_op!((to_u8, to_u16, to_u32, to_u64, to_usize, to_u128,
+ to_i8, to_i16, to_i32, to_i64, to_isize, to_i128)
+ $left $op $right == $expected);
+ };
+}
+
+/// Assert that an op works for scalar right
+macro_rules! assert_scalar_assign_op {
+ (($($to:ident),*) $left:ident $op:tt $right:ident == $expected:expr) => {
+ $(
+ if let Some(right) = $right.$to() {
+ let mut left = $left.clone();
+ assert_eq!({ left $op right; left}, $expected);
+ }
+ )*
+ };
+}
+
+#[cfg(not(has_i128))]
+macro_rules! assert_unsigned_scalar_assign_op {
+ ($left:ident $op:tt $right:ident == $expected:expr) => {
+ assert_scalar_assign_op!((to_u8, to_u16, to_u32, to_u64, to_usize)
+ $left $op $right == $expected);
+ };
+}
+
+#[cfg(has_i128)]
+macro_rules! assert_unsigned_scalar_assign_op {
+ ($left:ident $op:tt $right:ident == $expected:expr) => {
+ assert_scalar_assign_op!((to_u8, to_u16, to_u32, to_u64, to_usize, to_u128)
+ $left $op $right == $expected);
+ };
+}
+
+#[cfg(not(has_i128))]
+macro_rules! assert_signed_scalar_assign_op {
+ ($left:ident $op:tt $right:ident == $expected:expr) => {
+ assert_scalar_assign_op!((to_u8, to_u16, to_u32, to_u64, to_usize,
+ to_i8, to_i16, to_i32, to_i64, to_isize)
+ $left $op $right == $expected);
+ };
+}
+
+#[cfg(has_i128)]
+macro_rules! assert_signed_scalar_assign_op {
+ ($left:ident $op:tt $right:ident == $expected:expr) => {
+ assert_scalar_assign_op!((to_u8, to_u16, to_u32, to_u64, to_usize, to_u128,
+ to_i8, to_i16, to_i32, to_i64, to_isize, to_i128)
+ $left $op $right == $expected);
+ };
+}
diff --git a/rust/vendor/num-bigint-0.2.6/tests/modpow.rs b/rust/vendor/num-bigint-0.2.6/tests/modpow.rs
new file mode 100644
index 0000000..a5f8b92
--- /dev/null
+++ b/rust/vendor/num-bigint-0.2.6/tests/modpow.rs
@@ -0,0 +1,185 @@
+extern crate num_bigint;
+extern crate num_integer;
+extern crate num_traits;
+
+static BIG_B: &'static str = "\
+ efac3c0a_0de55551_fee0bfe4_67fa017a_1a898fa1_6ca57cb1\
+ ca9e3248_cacc09a9_b99d6abc_38418d0f_82ae4238_d9a68832\
+ aadec7c1_ac5fed48_7a56a71b_67ac59d5_afb28022_20d9592d\
+ 247c4efc_abbd9b75_586088ee_1dc00dc4_232a8e15_6e8191dd\
+ 675b6ae0_c80f5164_752940bc_284b7cee_885c1e10_e495345b\
+ 8fbe9cfd_e5233fe1_19459d0b_d64be53c_27de5a02_a829976b\
+ 33096862_82dad291_bd38b6a9_be396646_ddaf8039_a2573c39\
+ 1b14e8bc_2cb53e48_298c047e_d9879e9c_5a521076_f0e27df3\
+ 990e1659_d3d8205b_6443ebc0_9918ebee_6764f668_9f2b2be3\
+ b59cbc76_d76d0dfc_d737c3ec_0ccf9c00_ad0554bf_17e776ad\
+ b4edf9cc_6ce540be_76229093_5c53893b";
+
+static BIG_E: &'static str = "\
+ be0e6ea6_08746133_e0fbc1bf_82dba91e_e2b56231_a81888d2\
+ a833a1fc_f7ff002a_3c486a13_4f420bf3_a5435be9_1a5c8391\
+ 774d6e6c_085d8357_b0c97d4d_2bb33f7c_34c68059_f78d2541\
+ eacc8832_426f1816_d3be001e_b69f9242_51c7708e_e10efe98\
+ 449c9a4a_b55a0f23_9d797410_515da00d_3ea07970_4478a2ca\
+ c3d5043c_bd9be1b4_6dce479d_4302d344_84a939e6_0ab5ada7\
+ 12ae34b2_30cc473c_9f8ee69d_2cac5970_29f5bf18_bc8203e4\
+ f3e895a2_13c94f1e_24c73d77_e517e801_53661fdd_a2ce9e47\
+ a73dd7f8_2f2adb1e_3f136bf7_8ae5f3b8_08730de1_a4eff678\
+ e77a06d0_19a522eb_cbefba2a_9caf7736_b157c5c6_2d192591\
+ 17946850_2ddb1822_117b68a0_32f7db88";
+
+// This modulus is the prime from the 2048-bit MODP DH group:
+// https://tools.ietf.org/html/rfc3526#section-3
+static BIG_M: &'static str = "\
+ FFFFFFFF_FFFFFFFF_C90FDAA2_2168C234_C4C6628B_80DC1CD1\
+ 29024E08_8A67CC74_020BBEA6_3B139B22_514A0879_8E3404DD\
+ EF9519B3_CD3A431B_302B0A6D_F25F1437_4FE1356D_6D51C245\
+ E485B576_625E7EC6_F44C42E9_A637ED6B_0BFF5CB6_F406B7ED\
+ EE386BFB_5A899FA5_AE9F2411_7C4B1FE6_49286651_ECE45B3D\
+ C2007CB8_A163BF05_98DA4836_1C55D39A_69163FA8_FD24CF5F\
+ 83655D23_DCA3AD96_1C62F356_208552BB_9ED52907_7096966D\
+ 670C354E_4ABC9804_F1746C08_CA18217C_32905E46_2E36CE3B\
+ E39E772C_180E8603_9B2783A2_EC07A28F_B5C55DF0_6F4C52C9\
+ DE2BCBF6_95581718_3995497C_EA956AE5_15D22618_98FA0510\
+ 15728E5A_8AACAA68_FFFFFFFF_FFFFFFFF";
+
+static BIG_R: &'static str = "\
+ a1468311_6e56edc9_7a98228b_5e924776_0dd7836e_caabac13\
+ eda5373b_4752aa65_a1454850_40dc770e_30aa8675_6be7d3a8\
+ 9d3085e4_da5155cf_b451ef62_54d0da61_cf2b2c87_f495e096\
+ 055309f7_77802bbb_37271ba8_1313f1b5_075c75d1_024b6c77\
+ fdb56f17_b05bce61_e527ebfd_2ee86860_e9907066_edd526e7\
+ 93d289bf_6726b293_41b0de24_eff82424_8dfd374b_4ec59542\
+ 35ced2b2_6b195c90_10042ffb_8f58ce21_bc10ec42_64fda779\
+ d352d234_3d4eaea6_a86111ad_a37e9555_43ca78ce_2885bed7\
+ 5a30d182_f1cf6834_dc5b6e27_1a41ac34_a2e91e11_33363ff0\
+ f88a7b04_900227c9_f6e6d06b_7856b4bb_4e354d61_060db6c8\
+ 109c4735_6e7db425_7b5d74c7_0b709508";
+
+mod biguint {
+ use num_bigint::BigUint;
+ use num_integer::Integer;
+ use num_traits::Num;
+
+ fn check_modpow<T: Into<BigUint>>(b: T, e: T, m: T, r: T) {
+ let b: BigUint = b.into();
+ let e: BigUint = e.into();
+ let m: BigUint = m.into();
+ let r: BigUint = r.into();
+
+ assert_eq!(b.modpow(&e, &m), r);
+
+ let even_m = &m << 1;
+ let even_modpow = b.modpow(&e, &even_m);
+ assert!(even_modpow < even_m);
+ assert_eq!(even_modpow.mod_floor(&m), r);
+ }
+
+ #[test]
+ fn test_modpow() {
+ check_modpow::<u32>(1, 0, 11, 1);
+ check_modpow::<u32>(0, 15, 11, 0);
+ check_modpow::<u32>(3, 7, 11, 9);
+ check_modpow::<u32>(5, 117, 19, 1);
+ check_modpow::<u32>(20, 1, 2, 0);
+ check_modpow::<u32>(20, 1, 3, 2);
+ }
+
+ #[test]
+ fn test_modpow_small() {
+ for b in 0u64..11 {
+ for e in 0u64..11 {
+ for m in 1..11 {
+ check_modpow::<u64>(b, e, m, b.pow(e as u32) % m);
+ }
+ }
+ }
+ }
+
+ #[test]
+ fn test_modpow_big() {
+ let b = BigUint::from_str_radix(super::BIG_B, 16).unwrap();
+ let e = BigUint::from_str_radix(super::BIG_E, 16).unwrap();
+ let m = BigUint::from_str_radix(super::BIG_M, 16).unwrap();
+ let r = BigUint::from_str_radix(super::BIG_R, 16).unwrap();
+
+ assert_eq!(b.modpow(&e, &m), r);
+
+ let even_m = &m << 1;
+ let even_modpow = b.modpow(&e, &even_m);
+ assert!(even_modpow < even_m);
+ assert_eq!(even_modpow % m, r);
+ }
+}
+
+mod bigint {
+ use num_bigint::BigInt;
+ use num_integer::Integer;
+ use num_traits::{Num, One, Signed};
+
+ fn check_modpow<T: Into<BigInt>>(b: T, e: T, m: T, r: T) {
+ fn check(b: &BigInt, e: &BigInt, m: &BigInt, r: &BigInt) {
+ assert_eq!(&b.modpow(e, m), r);
+
+ let even_m = m << 1;
+ let even_modpow = b.modpow(e, m);
+ assert!(even_modpow.abs() < even_m.abs());
+ assert_eq!(&even_modpow.mod_floor(&m), r);
+
+ // the sign of the result follows the modulus like `mod_floor`, not `rem`
+ assert_eq!(b.modpow(&BigInt::one(), m), b.mod_floor(m));
+ }
+
+ let b: BigInt = b.into();
+ let e: BigInt = e.into();
+ let m: BigInt = m.into();
+ let r: BigInt = r.into();
+
+ let neg_b_r = if e.is_odd() {
+ (-&r).mod_floor(&m)
+ } else {
+ r.clone()
+ };
+ let neg_m_r = r.mod_floor(&-&m);
+ let neg_bm_r = neg_b_r.mod_floor(&-&m);
+
+ check(&b, &e, &m, &r);
+ check(&-&b, &e, &m, &neg_b_r);
+ check(&b, &e, &-&m, &neg_m_r);
+ check(&-b, &e, &-&m, &neg_bm_r);
+ }
+
+ #[test]
+ fn test_modpow() {
+ check_modpow(1, 0, 11, 1);
+ check_modpow(0, 15, 11, 0);
+ check_modpow(3, 7, 11, 9);
+ check_modpow(5, 117, 19, 1);
+ check_modpow(-20, 1, 2, 0);
+ check_modpow(-20, 1, 3, 1);
+ }
+
+ #[test]
+ fn test_modpow_small() {
+ for b in -10i64..11 {
+ for e in 0i64..11 {
+ for m in -10..11 {
+ if m == 0 {
+ continue;
+ }
+ check_modpow(b, e, m, b.pow(e as u32).mod_floor(&m));
+ }
+ }
+ }
+ }
+
+ #[test]
+ fn test_modpow_big() {
+ let b = BigInt::from_str_radix(super::BIG_B, 16).unwrap();
+ let e = BigInt::from_str_radix(super::BIG_E, 16).unwrap();
+ let m = BigInt::from_str_radix(super::BIG_M, 16).unwrap();
+ let r = BigInt::from_str_radix(super::BIG_R, 16).unwrap();
+
+ check_modpow(b, e, m, r);
+ }
+}
diff --git a/rust/vendor/num-bigint-0.2.6/tests/quickcheck.rs b/rust/vendor/num-bigint-0.2.6/tests/quickcheck.rs
new file mode 100644
index 0000000..a9e7b04
--- /dev/null
+++ b/rust/vendor/num-bigint-0.2.6/tests/quickcheck.rs
@@ -0,0 +1,361 @@
+#![cfg(feature = "quickcheck")]
+#![cfg(feature = "quickcheck_macros")]
+
+extern crate num_bigint;
+extern crate num_integer;
+extern crate num_traits;
+
+extern crate quickcheck;
+#[macro_use]
+extern crate quickcheck_macros;
+
+use num_bigint::{BigInt, BigUint};
+use num_integer::Integer;
+use num_traits::{Num, One, Pow, Signed, Zero};
+use quickcheck::{QuickCheck, StdThreadGen, TestResult};
+
+#[quickcheck]
+fn quickcheck_unsigned_eq_reflexive(a: BigUint) -> bool {
+ a == a
+}
+
+#[quickcheck]
+fn quickcheck_signed_eq_reflexive(a: BigInt) -> bool {
+ a == a
+}
+
+#[quickcheck]
+fn quickcheck_unsigned_eq_symmetric(a: BigUint, b: BigUint) -> bool {
+ if a == b {
+ b == a
+ } else {
+ b != a
+ }
+}
+
+#[quickcheck]
+fn quickcheck_signed_eq_symmetric(a: BigInt, b: BigInt) -> bool {
+ if a == b {
+ b == a
+ } else {
+ b != a
+ }
+}
+
+#[test]
+fn quickcheck_arith_primitive() {
+ let gen = StdThreadGen::new(usize::max_value());
+ let mut qc = QuickCheck::with_gen(gen);
+
+ fn test_unsigned_add_primitive(a: usize, b: usize) -> TestResult {
+ let actual = BigUint::from(a) + BigUint::from(b);
+ match a.checked_add(b) {
+ None => TestResult::discard(),
+ Some(expected) => TestResult::from_bool(BigUint::from(expected) == actual),
+ }
+ }
+
+ fn test_signed_add_primitive(a: isize, b: isize) -> TestResult {
+ let actual = BigInt::from(a) + BigInt::from(b);
+ match a.checked_add(b) {
+ None => TestResult::discard(),
+ Some(expected) => TestResult::from_bool(BigInt::from(expected) == actual),
+ }
+ }
+
+ fn test_unsigned_mul_primitive(a: u64, b: u64) -> bool {
+ //maximum value of u64 means no overflow
+ BigUint::from(a as u128 * b as u128) == BigUint::from(a) * BigUint::from(b)
+ }
+
+ fn test_signed_mul_primitive(a: i64, b: i64) -> bool {
+ //maximum value of i64 means no overflow
+ BigInt::from(a as i128 * b as i128) == BigInt::from(a) * BigInt::from(b)
+ }
+
+ fn test_unsigned_sub_primitive(a: u128, b: u128) -> bool {
+ if b < a {
+ BigUint::from(a - b) == BigUint::from(a) - BigUint::from(b)
+ } else {
+ BigUint::from(b - a) == BigUint::from(b) - BigUint::from(a)
+ }
+ }
+
+ fn test_signed_sub_primitive(a: i128, b: i128) -> bool {
+ if b < a {
+ BigInt::from(a - b) == BigInt::from(a) - BigInt::from(b)
+ } else {
+ BigInt::from(b - a) == BigInt::from(b) - BigInt::from(a)
+ }
+ }
+
+ fn test_unsigned_div_primitive(a: u128, b: u128) -> TestResult {
+ if b == 0 {
+ TestResult::discard()
+ } else {
+ TestResult::from_bool(BigUint::from(a / b) == BigUint::from(a) / BigUint::from(b))
+ }
+ }
+
+ fn test_signed_div_primitive(a: i128, b: i128) -> TestResult {
+ if b == 0 {
+ TestResult::discard()
+ } else {
+ TestResult::from_bool(BigInt::from(a / b) == BigInt::from(a) / BigInt::from(b))
+ }
+ }
+
+ qc.quickcheck(test_unsigned_add_primitive as fn(usize, usize) -> TestResult);
+ qc.quickcheck(test_signed_add_primitive as fn(isize, isize) -> TestResult);
+ qc.quickcheck(test_unsigned_mul_primitive as fn(u64, u64) -> bool);
+ qc.quickcheck(test_signed_mul_primitive as fn(i64, i64) -> bool);
+ qc.quickcheck(test_unsigned_sub_primitive as fn(u128, u128) -> bool);
+ qc.quickcheck(test_signed_sub_primitive as fn(i128, i128) -> bool);
+ qc.quickcheck(test_unsigned_div_primitive as fn(u128, u128) -> TestResult);
+ qc.quickcheck(test_signed_div_primitive as fn(i128, i128) -> TestResult);
+}
+
+#[quickcheck]
+fn quickcheck_unsigned_add_commutative(a: BigUint, b: BigUint) -> bool {
+ &a + &b == b + a
+}
+
+#[quickcheck]
+fn quickcheck_signed_add_commutative(a: BigInt, b: BigInt) -> bool {
+ &a + &b == b + a
+}
+
+#[quickcheck]
+fn quickcheck_unsigned_add_zero(a: BigUint) -> bool {
+ a == &a + BigUint::zero()
+}
+
+#[quickcheck]
+fn quickcheck_signed_add_zero(a: BigInt) -> bool {
+ a == &a + BigInt::zero()
+}
+
+#[quickcheck]
+fn quickcheck_unsigned_add_associative(a: BigUint, b: BigUint, c: BigUint) -> bool {
+ (&a + &b) + &c == a + (b + c)
+}
+
+#[quickcheck]
+fn quickcheck_signed_add_associative(a: BigInt, b: BigInt, c: BigInt) -> bool {
+ (&a + &b) + &c == a + (b + c)
+}
+
+#[quickcheck]
+fn quickcheck_unsigned_mul_zero(a: BigUint) -> bool {
+ a * BigUint::zero() == BigUint::zero()
+}
+
+#[quickcheck]
+fn quickcheck_signed_mul_zero(a: BigInt) -> bool {
+ a * BigInt::zero() == BigInt::zero()
+}
+
+#[quickcheck]
+fn quickcheck_unsigned_mul_one(a: BigUint) -> bool {
+ &a * BigUint::one() == a
+}
+
+#[quickcheck]
+fn quickcheck_signed_mul_one(a: BigInt) -> bool {
+ &a * BigInt::one() == a
+}
+
+#[quickcheck]
+fn quickcheck_unsigned_mul_commutative(a: BigUint, b: BigUint) -> bool {
+ &a * &b == b * a
+}
+
+#[quickcheck]
+fn quickcheck_signed_mul_commutative(a: BigInt, b: BigInt) -> bool {
+ &a * &b == b * a
+}
+
+#[quickcheck]
+fn quickcheck_unsigned_mul_associative(a: BigUint, b: BigUint, c: BigUint) -> bool {
+ (&a * &b) * &c == a * (b * c)
+}
+
+#[quickcheck]
+fn quickcheck_signed_mul_associative(a: BigInt, b: BigInt, c: BigInt) -> bool {
+ (&a * &b) * &c == a * (b * c)
+}
+
+#[quickcheck]
+fn quickcheck_unsigned_distributive(a: BigUint, b: BigUint, c: BigUint) -> bool {
+ &a * (&b + &c) == &a * b + a * c
+}
+
+#[quickcheck]
+fn quickcheck_signed_distributive(a: BigInt, b: BigInt, c: BigInt) -> bool {
+ &a * (&b + &c) == &a * b + a * c
+}
+
+#[quickcheck]
+///Tests that exactly one of a<b a>b a=b is true
+fn quickcheck_unsigned_ge_le_eq_mut_exclusive(a: BigUint, b: BigUint) -> bool {
+ let gt_lt_eq = vec![a > b, a < b, a == b];
+ gt_lt_eq
+ .iter()
+ .fold(0, |acc, e| if *e { acc + 1 } else { acc })
+ == 1
+}
+
+#[quickcheck]
+///Tests that exactly one of a<b a>b a=b is true
+fn quickcheck_signed_ge_le_eq_mut_exclusive(a: BigInt, b: BigInt) -> bool {
+ let gt_lt_eq = vec![a > b, a < b, a == b];
+ gt_lt_eq
+ .iter()
+ .fold(0, |acc, e| if *e { acc + 1 } else { acc })
+ == 1
+}
+
+#[quickcheck]
+/// Tests correctness of subtraction assuming addition is correct
+fn quickcheck_unsigned_sub(a: BigUint, b: BigUint) -> bool {
+ if b < a {
+ &a - &b + b == a
+ } else {
+ &b - &a + a == b
+ }
+}
+
+#[quickcheck]
+/// Tests correctness of subtraction assuming addition is correct
+fn quickcheck_signed_sub(a: BigInt, b: BigInt) -> bool {
+ if b < a {
+ &a - &b + b == a
+ } else {
+ &b - &a + a == b
+ }
+}
+
+#[quickcheck]
+fn quickcheck_unsigned_pow_zero(a: BigUint) -> bool {
+ a.pow(0_u32) == BigUint::one()
+}
+
+#[quickcheck]
+fn quickcheck_unsigned_pow_one(a: BigUint) -> bool {
+ a.pow(1_u32) == a
+}
+
+#[quickcheck]
+fn quickcheck_unsigned_sqrt(a: BigUint) -> bool {
+ (&a * &a).sqrt() == a
+}
+
+#[quickcheck]
+fn quickcheck_unsigned_cbrt(a: BigUint) -> bool {
+ (&a * &a * &a).cbrt() == a
+}
+
+#[quickcheck]
+fn quickcheck_signed_cbrt(a: BigInt) -> bool {
+ (&a * &a * &a).cbrt() == a
+}
+
+#[quickcheck]
+fn quickcheck_unsigned_conversion(a: BigUint, radix: u8) -> TestResult {
+ let radix = radix as u32;
+ if radix > 36 || radix < 2 {
+ return TestResult::discard();
+ }
+ let string = a.to_str_radix(radix);
+ TestResult::from_bool(a == BigUint::from_str_radix(&string, radix).unwrap())
+}
+
+#[quickcheck]
+fn quickcheck_signed_conversion(a: BigInt, radix: u8) -> TestResult {
+ let radix = radix as u32;
+ if radix > 36 || radix < 2 {
+ return TestResult::discard();
+ }
+ let string = a.to_str_radix(radix);
+ TestResult::from_bool(a == BigInt::from_str_radix(&string, radix).unwrap())
+}
+
+#[test]
+fn quicktest_shift() {
+ let gen = StdThreadGen::new(usize::max_value());
+ let mut qc = QuickCheck::with_gen(gen);
+
+ fn test_shr_unsigned(a: u64, shift: u8) -> TestResult {
+ let shift = (shift % 64) as usize; //shift at most 64 bits
+ let big_a = BigUint::from(a);
+ TestResult::from_bool(BigUint::from(a >> shift) == big_a >> shift)
+ }
+
+ fn test_shr_signed(a: i64, shift: u8) -> TestResult {
+ let shift = (shift % 64) as usize; //shift at most 64 bits
+ let big_a = BigInt::from(a);
+ TestResult::from_bool(BigInt::from(a >> shift) == big_a >> shift)
+ }
+
+ fn test_shl_unsigned(a: u32, shift: u8) -> TestResult {
+ let shift = (shift % 32) as usize; //shift at most 32 bits
+ let a = a as u64; //leave room for the shifted bits
+ let big_a = BigUint::from(a);
+ TestResult::from_bool(BigUint::from(a >> shift) == big_a >> shift)
+ }
+
+ fn test_shl_signed(a: i32, shift: u8) -> TestResult {
+ let shift = (shift % 32) as usize;
+ let a = a as u64; //leave room for the shifted bits
+ let big_a = BigInt::from(a);
+ TestResult::from_bool(BigInt::from(a >> shift) == big_a >> shift)
+ }
+
+ qc.quickcheck(test_shr_unsigned as fn(u64, u8) -> TestResult);
+ qc.quickcheck(test_shr_signed as fn(i64, u8) -> TestResult);
+ qc.quickcheck(test_shl_unsigned as fn(u32, u8) -> TestResult);
+ qc.quickcheck(test_shl_signed as fn(i32, u8) -> TestResult);
+}
+
+#[test]
+fn quickcheck_modpow() {
+ let gen = StdThreadGen::new(usize::max_value());
+ let mut qc = QuickCheck::with_gen(gen);
+
+ fn simple_modpow(base: &BigInt, exponent: &BigInt, modulus: &BigInt) -> BigInt {
+ assert!(!exponent.is_negative());
+ let mut result = BigInt::one().mod_floor(modulus);
+ let mut base = base.mod_floor(modulus);
+ let mut exponent = exponent.clone();
+ while !exponent.is_zero() {
+ if exponent.is_odd() {
+ result = (result * &base).mod_floor(modulus);
+ }
+ base = (&base * &base).mod_floor(modulus);
+ exponent >>= 1;
+ }
+ result
+ }
+
+ fn test_modpow(base: i128, exponent: u128, modulus: i128) -> TestResult {
+ if modulus.is_zero() {
+ TestResult::discard()
+ } else {
+ let base = BigInt::from(base);
+ let exponent = BigInt::from(exponent);
+ let modulus = BigInt::from(modulus);
+ let modpow = base.modpow(&exponent, &modulus);
+ let simple = simple_modpow(&base, &exponent, &modulus);
+ if modpow != simple {
+ eprintln!("{}.modpow({}, {})", base, exponent, modulus);
+ eprintln!(" expected {}", simple);
+ eprintln!(" actual {}", modpow);
+ TestResult::failed()
+ } else {
+ TestResult::passed()
+ }
+ }
+ }
+
+ qc.quickcheck(test_modpow as fn(i128, u128, i128) -> TestResult);
+}
diff --git a/rust/vendor/num-bigint-0.2.6/tests/rand.rs b/rust/vendor/num-bigint-0.2.6/tests/rand.rs
new file mode 100644
index 0000000..666b764
--- /dev/null
+++ b/rust/vendor/num-bigint-0.2.6/tests/rand.rs
@@ -0,0 +1,324 @@
+#![cfg(feature = "rand")]
+
+extern crate num_bigint;
+extern crate num_traits;
+extern crate rand;
+
+mod biguint {
+ use num_bigint::{BigUint, RandBigInt, RandomBits};
+ use num_traits::Zero;
+ use rand::distributions::Uniform;
+ use rand::thread_rng;
+ use rand::{Rng, SeedableRng};
+
+ #[test]
+ fn test_rand() {
+ let mut rng = thread_rng();
+ let n: BigUint = rng.gen_biguint(137);
+ assert!(n.bits() <= 137);
+ assert!(rng.gen_biguint(0).is_zero());
+ }
+
+ #[test]
+ fn test_rand_bits() {
+ let mut rng = thread_rng();
+ let n: BigUint = rng.sample(&RandomBits::new(137));
+ assert!(n.bits() <= 137);
+ let z: BigUint = rng.sample(&RandomBits::new(0));
+ assert!(z.is_zero());
+ }
+
+ #[test]
+ fn test_rand_range() {
+ let mut rng = thread_rng();
+
+ for _ in 0..10 {
+ assert_eq!(
+ rng.gen_biguint_range(&BigUint::from(236u32), &BigUint::from(237u32)),
+ BigUint::from(236u32)
+ );
+ }
+
+ let l = BigUint::from(403469000u32 + 2352);
+ let u = BigUint::from(403469000u32 + 3513);
+ for _ in 0..1000 {
+ let n: BigUint = rng.gen_biguint_below(&u);
+ assert!(n < u);
+
+ let n: BigUint = rng.gen_biguint_range(&l, &u);
+ assert!(n >= l);
+ assert!(n < u);
+ }
+ }
+
+ #[test]
+ #[should_panic]
+ fn test_zero_rand_range() {
+ thread_rng().gen_biguint_range(&BigUint::from(54u32), &BigUint::from(54u32));
+ }
+
+ #[test]
+ #[should_panic]
+ fn test_negative_rand_range() {
+ let mut rng = thread_rng();
+ let l = BigUint::from(2352u32);
+ let u = BigUint::from(3513u32);
+ // Switching u and l should fail:
+ let _n: BigUint = rng.gen_biguint_range(&u, &l);
+ }
+
+ #[test]
+ fn test_rand_uniform() {
+ let mut rng = thread_rng();
+
+ let tiny = Uniform::new(BigUint::from(236u32), BigUint::from(237u32));
+ for _ in 0..10 {
+ assert_eq!(rng.sample(&tiny), BigUint::from(236u32));
+ }
+
+ let l = BigUint::from(403469000u32 + 2352);
+ let u = BigUint::from(403469000u32 + 3513);
+ let below = Uniform::new(BigUint::zero(), u.clone());
+ let range = Uniform::new(l.clone(), u.clone());
+ for _ in 0..1000 {
+ let n: BigUint = rng.sample(&below);
+ assert!(n < u);
+
+ let n: BigUint = rng.sample(&range);
+ assert!(n >= l);
+ assert!(n < u);
+ }
+ }
+
+ fn seeded_value_stability<R: SeedableRng + RandBigInt>(expected: &[&str]) {
+ let mut seed = <R::Seed>::default();
+ for (i, x) in seed.as_mut().iter_mut().enumerate() {
+ *x = (i as u8).wrapping_mul(191);
+ }
+ let mut rng = R::from_seed(seed);
+ for (i, &s) in expected.iter().enumerate() {
+ let n: BigUint = s.parse().unwrap();
+ let r = rng.gen_biguint((1 << i) + i);
+ assert_eq!(n, r);
+ }
+ }
+
+ #[test]
+ fn test_chacha_value_stability() {
+ const EXPECTED: &[&str] = &[
+ "0",
+ "0",
+ "52",
+ "84",
+ "23780",
+ "86502865016",
+ "187057847319509867386",
+ "34045731223080904464438757488196244981910",
+ "23813754422987836414755953516143692594193066497413249270287126597896871975915808",
+ "57401636903146945411652549098818446911814352529449356393690984105383482703074355\
+ 67088360974672291353736011718191813678720755501317478656550386324355699624671",
+ ];
+ use rand::prng::ChaChaRng;
+ seeded_value_stability::<ChaChaRng>(EXPECTED);
+ }
+
+ #[test]
+ fn test_isaac_value_stability() {
+ const EXPECTED: &[&str] = &[
+ "1",
+ "4",
+ "3",
+ "649",
+ "89116",
+ "7730042024",
+ "20773149082453254949",
+ "35999009049239918667571895439206839620281",
+ "10191757312714088681302309313551624007714035309632506837271600807524767413673006",
+ "37805949268912387809989378008822038725134260145886913321084097194957861133272558\
+ 43458183365174899239251448892645546322463253898288141861183340823194379722556",
+ ];
+ use rand::prng::IsaacRng;
+ seeded_value_stability::<IsaacRng>(EXPECTED);
+ }
+
+ #[test]
+ fn test_xorshift_value_stability() {
+ const EXPECTED: &[&str] = &[
+ "1",
+ "0",
+ "37",
+ "395",
+ "181116",
+ "122718231117",
+ "1068467172329355695001",
+ "28246925743544411614293300167064395633287",
+ "12750053187017853048648861493745244146555950255549630854523304068318587267293038",
+ "53041498719137109355568081064978196049094604705283682101683207799515709404788873\
+ 53417136457745727045473194367732849819278740266658219147356315674940229288531",
+ ];
+ use rand::prng::XorShiftRng;
+ seeded_value_stability::<XorShiftRng>(EXPECTED);
+ }
+}
+
+mod bigint {
+ use num_bigint::{BigInt, RandBigInt, RandomBits};
+ use num_traits::Zero;
+ use rand::distributions::Uniform;
+ use rand::thread_rng;
+ use rand::{Rng, SeedableRng};
+
+ #[test]
+ fn test_rand() {
+ let mut rng = thread_rng();
+ let n: BigInt = rng.gen_bigint(137);
+ assert!(n.bits() <= 137);
+ assert!(rng.gen_bigint(0).is_zero());
+ }
+
+ #[test]
+ fn test_rand_bits() {
+ let mut rng = thread_rng();
+ let n: BigInt = rng.sample(&RandomBits::new(137));
+ assert!(n.bits() <= 137);
+ let z: BigInt = rng.sample(&RandomBits::new(0));
+ assert!(z.is_zero());
+ }
+
+ #[test]
+ fn test_rand_range() {
+ let mut rng = thread_rng();
+
+ for _ in 0..10 {
+ assert_eq!(
+ rng.gen_bigint_range(&BigInt::from(236), &BigInt::from(237)),
+ BigInt::from(236)
+ );
+ }
+
+ fn check(l: BigInt, u: BigInt) {
+ let mut rng = thread_rng();
+ for _ in 0..1000 {
+ let n: BigInt = rng.gen_bigint_range(&l, &u);
+ assert!(n >= l);
+ assert!(n < u);
+ }
+ }
+ let l: BigInt = BigInt::from(403469000 + 2352);
+ let u: BigInt = BigInt::from(403469000 + 3513);
+ check(l.clone(), u.clone());
+ check(-l.clone(), u.clone());
+ check(-u.clone(), -l.clone());
+ }
+
+ #[test]
+ #[should_panic]
+ fn test_zero_rand_range() {
+ thread_rng().gen_bigint_range(&BigInt::from(54), &BigInt::from(54));
+ }
+
+ #[test]
+ #[should_panic]
+ fn test_negative_rand_range() {
+ let mut rng = thread_rng();
+ let l = BigInt::from(2352);
+ let u = BigInt::from(3513);
+ // Switching u and l should fail:
+ let _n: BigInt = rng.gen_bigint_range(&u, &l);
+ }
+
+ #[test]
+ fn test_rand_uniform() {
+ let mut rng = thread_rng();
+
+ let tiny = Uniform::new(BigInt::from(236u32), BigInt::from(237u32));
+ for _ in 0..10 {
+ assert_eq!(rng.sample(&tiny), BigInt::from(236u32));
+ }
+
+ fn check(l: BigInt, u: BigInt) {
+ let mut rng = thread_rng();
+ let range = Uniform::new(l.clone(), u.clone());
+ for _ in 0..1000 {
+ let n: BigInt = rng.sample(&range);
+ assert!(n >= l);
+ assert!(n < u);
+ }
+ }
+ let l: BigInt = BigInt::from(403469000 + 2352);
+ let u: BigInt = BigInt::from(403469000 + 3513);
+ check(l.clone(), u.clone());
+ check(-l.clone(), u.clone());
+ check(-u.clone(), -l.clone());
+ }
+
+ fn seeded_value_stability<R: SeedableRng + RandBigInt>(expected: &[&str]) {
+ let mut seed = <R::Seed>::default();
+ for (i, x) in seed.as_mut().iter_mut().enumerate() {
+ *x = (i as u8).wrapping_mul(191);
+ }
+ let mut rng = R::from_seed(seed);
+ for (i, &s) in expected.iter().enumerate() {
+ let n: BigInt = s.parse().unwrap();
+ let r = rng.gen_bigint((1 << i) + i);
+ assert_eq!(n, r);
+ }
+ }
+
+ #[test]
+ fn test_chacha_value_stability() {
+ const EXPECTED: &[&str] = &[
+ "0",
+ "-6",
+ "-1",
+ "1321",
+ "-147247",
+ "8486373526",
+ "-272736656290199720696",
+ "2731152629387534140535423510744221288522",
+ "-28820024790651190394679732038637785320661450462089347915910979466834461433196572",
+ "501454570554170484799723603981439288209930393334472085317977614690773821680884844\
+ 8530978478667288338327570972869032358120588620346111979053742269317702532328",
+ ];
+ use rand::prng::ChaChaRng;
+ seeded_value_stability::<ChaChaRng>(EXPECTED);
+ }
+
+ #[test]
+ fn test_isaac_value_stability() {
+ const EXPECTED: &[&str] = &[
+ "1",
+ "0",
+ "5",
+ "113",
+ "-132240",
+ "-36348760761",
+ "-365690596708430705434",
+ "-14090753008246284277803606722552430292432",
+ "-26313941628626248579319341019368550803676255307056857978955881718727601479436059",
+ "-14563174552421101848999036239003801073335703811160945137332228646111920972691151\
+ 88341090358094331641182310792892459091016794928947242043358702692294695845817",
+ ];
+ use rand::prng::IsaacRng;
+ seeded_value_stability::<IsaacRng>(EXPECTED);
+ }
+
+ #[test]
+ fn test_xorshift_value_stability() {
+ const EXPECTED: &[&str] = &[
+ "-1",
+ "-4",
+ "11",
+ "-1802",
+ "966495",
+ "-62592045703",
+ "-602281783447192077116",
+ "-34335811410223060575607987996861632509125",
+ "29156580925282215857325937227200350542000244609280383263289720243118706105351199",
+ "49920038676141573457451407325930326489996232208489690499754573826911037849083623\
+ 24546142615325187412887314466195222441945661833644117700809693098722026764846",
+ ];
+ use rand::prng::XorShiftRng;
+ seeded_value_stability::<XorShiftRng>(EXPECTED);
+ }
+}
diff --git a/rust/vendor/num-bigint-0.2.6/tests/roots.rs b/rust/vendor/num-bigint-0.2.6/tests/roots.rs
new file mode 100644
index 0000000..39201fa
--- /dev/null
+++ b/rust/vendor/num-bigint-0.2.6/tests/roots.rs
@@ -0,0 +1,186 @@
+extern crate num_bigint;
+extern crate num_integer;
+extern crate num_traits;
+
+#[cfg(feature = "rand")]
+extern crate rand;
+
+mod biguint {
+ use num_bigint::BigUint;
+ use num_traits::{One, Pow, Zero};
+ use std::{i32, u32};
+
+ fn check<T: Into<BigUint>>(x: T, n: u32) {
+ let x: BigUint = x.into();
+ let root = x.nth_root(n);
+ println!("check {}.nth_root({}) = {}", x, n, root);
+
+ if n == 2 {
+ assert_eq!(root, x.sqrt())
+ } else if n == 3 {
+ assert_eq!(root, x.cbrt())
+ }
+
+ let lo = root.pow(n);
+ assert!(lo <= x);
+ assert_eq!(lo.nth_root(n), root);
+ if !lo.is_zero() {
+ assert_eq!((&lo - 1u32).nth_root(n), &root - 1u32);
+ }
+
+ let hi = (&root + 1u32).pow(n);
+ assert!(hi > x);
+ assert_eq!(hi.nth_root(n), &root + 1u32);
+ assert_eq!((&hi - 1u32).nth_root(n), root);
+ }
+
+ #[test]
+ fn test_sqrt() {
+ check(99u32, 2);
+ check(100u32, 2);
+ check(120u32, 2);
+ }
+
+ #[test]
+ fn test_cbrt() {
+ check(8u32, 3);
+ check(26u32, 3);
+ }
+
+ #[test]
+ fn test_nth_root() {
+ check(0u32, 1);
+ check(10u32, 1);
+ check(100u32, 4);
+ }
+
+ #[test]
+ #[should_panic]
+ fn test_nth_root_n_is_zero() {
+ check(4u32, 0);
+ }
+
+ #[test]
+ fn test_nth_root_big() {
+ let x = BigUint::from(123_456_789_u32);
+ let expected = BigUint::from(6u32);
+
+ assert_eq!(x.nth_root(10), expected);
+ check(x, 10);
+ }
+
+ #[test]
+ fn test_nth_root_googol() {
+ let googol = BigUint::from(10u32).pow(100u32);
+
+ // perfect divisors of 100
+ for &n in &[2, 4, 5, 10, 20, 25, 50, 100] {
+ let expected = BigUint::from(10u32).pow(100u32 / n);
+ assert_eq!(googol.nth_root(n), expected);
+ check(googol.clone(), n);
+ }
+ }
+
+ #[test]
+ fn test_nth_root_twos() {
+ const EXP: u32 = 12;
+ const LOG2: usize = 1 << EXP;
+ let x = BigUint::one() << LOG2;
+
+ // the perfect divisors are just powers of two
+ for exp in 1..EXP + 1 {
+ let n = 2u32.pow(exp);
+ let expected = BigUint::one() << (LOG2 / n as usize);
+ assert_eq!(x.nth_root(n), expected);
+ check(x.clone(), n);
+ }
+
+ // degenerate cases should return quickly
+ assert!(x.nth_root(x.bits() as u32).is_one());
+ assert!(x.nth_root(i32::MAX as u32).is_one());
+ assert!(x.nth_root(u32::MAX).is_one());
+ }
+
+ #[cfg(feature = "rand")]
+ #[test]
+ fn test_roots_rand() {
+ use num_bigint::RandBigInt;
+ use rand::distributions::Uniform;
+ use rand::{thread_rng, Rng};
+
+ let mut rng = thread_rng();
+ let bit_range = Uniform::new(0, 2048);
+ let sample_bits: Vec<_> = rng.sample_iter(&bit_range).take(100).collect();
+ for bits in sample_bits {
+ let x = rng.gen_biguint(bits);
+ for n in 2..11 {
+ check(x.clone(), n);
+ }
+ check(x.clone(), 100);
+ }
+ }
+
+ #[test]
+ fn test_roots_rand1() {
+ // A random input that found regressions
+ let s = "575981506858479247661989091587544744717244516135539456183849\
+ 986593934723426343633698413178771587697273822147578889823552\
+ 182702908597782734558103025298880194023243541613924361007059\
+ 353344183590348785832467726433749431093350684849462759540710\
+ 026019022227591412417064179299354183441181373862905039254106\
+ 4781867";
+ let x: BigUint = s.parse().unwrap();
+
+ check(x.clone(), 2);
+ check(x.clone(), 3);
+ check(x.clone(), 10);
+ check(x.clone(), 100);
+ }
+}
+
+mod bigint {
+ use num_bigint::BigInt;
+ use num_traits::{Pow, Signed};
+
+ fn check(x: i64, n: u32) {
+ let big_x = BigInt::from(x);
+ let res = big_x.nth_root(n);
+
+ if n == 2 {
+ assert_eq!(&res, &big_x.sqrt())
+ } else if n == 3 {
+ assert_eq!(&res, &big_x.cbrt())
+ }
+
+ if big_x.is_negative() {
+ assert!(res.pow(n) >= big_x);
+ assert!((res - 1u32).pow(n) < big_x);
+ } else {
+ assert!(res.pow(n) <= big_x);
+ assert!((res + 1u32).pow(n) > big_x);
+ }
+ }
+
+ #[test]
+ fn test_nth_root() {
+ check(-100, 3);
+ }
+
+ #[test]
+ #[should_panic]
+ fn test_nth_root_x_neg_n_even() {
+ check(-100, 4);
+ }
+
+ #[test]
+ #[should_panic]
+ fn test_sqrt_x_neg() {
+ check(-4, 2);
+ }
+
+ #[test]
+ fn test_cbrt() {
+ check(8, 3);
+ check(-8, 3);
+ }
+}
diff --git a/rust/vendor/num-bigint-0.2.6/tests/serde.rs b/rust/vendor/num-bigint-0.2.6/tests/serde.rs
new file mode 100644
index 0000000..0f3d486
--- /dev/null
+++ b/rust/vendor/num-bigint-0.2.6/tests/serde.rs
@@ -0,0 +1,103 @@
+//! Test serialization and deserialization of `BigUint` and `BigInt`
+//!
+//! The serialized formats should not change, even if we change our
+//! internal representation, because we want to preserve forward and
+//! backward compatibility of serialized data!
+
+#![cfg(feature = "serde")]
+
+extern crate num_bigint;
+extern crate num_traits;
+extern crate serde_test;
+
+use num_bigint::{BigInt, BigUint};
+use num_traits::{One, Zero};
+use serde_test::{assert_tokens, Token};
+
+#[test]
+fn biguint_zero() {
+ let tokens = [Token::Seq { len: Some(0) }, Token::SeqEnd];
+ assert_tokens(&BigUint::zero(), &tokens);
+}
+
+#[test]
+fn bigint_zero() {
+ let tokens = [
+ Token::Tuple { len: 2 },
+ Token::I8(0),
+ Token::Seq { len: Some(0) },
+ Token::SeqEnd,
+ Token::TupleEnd,
+ ];
+ assert_tokens(&BigInt::zero(), &tokens);
+}
+
+#[test]
+fn biguint_one() {
+ let tokens = [Token::Seq { len: Some(1) }, Token::U32(1), Token::SeqEnd];
+ assert_tokens(&BigUint::one(), &tokens);
+}
+
+#[test]
+fn bigint_one() {
+ let tokens = [
+ Token::Tuple { len: 2 },
+ Token::I8(1),
+ Token::Seq { len: Some(1) },
+ Token::U32(1),
+ Token::SeqEnd,
+ Token::TupleEnd,
+ ];
+ assert_tokens(&BigInt::one(), &tokens);
+}
+
+#[test]
+fn bigint_negone() {
+ let tokens = [
+ Token::Tuple { len: 2 },
+ Token::I8(-1),
+ Token::Seq { len: Some(1) },
+ Token::U32(1),
+ Token::SeqEnd,
+ Token::TupleEnd,
+ ];
+ assert_tokens(&-BigInt::one(), &tokens);
+}
+
+// Generated independently from python `hex(factorial(100))`
+const FACTORIAL_100: &'static [u32] = &[
+ 0x00000000, 0x00000000, 0x00000000, 0x2735c61a, 0xee8b02ea, 0xb3b72ed2, 0x9420c6ec, 0x45570cca,
+ 0xdf103917, 0x943a321c, 0xeb21b5b2, 0x66ef9a70, 0xa40d16e9, 0x28d54bbd, 0xdc240695, 0x964ec395,
+ 0x1b30,
+];
+
+#[test]
+fn biguint_factorial_100() {
+ let n: BigUint = (1u8..101).product();
+
+ let mut tokens = vec![];
+ tokens.push(Token::Seq {
+ len: Some(FACTORIAL_100.len()),
+ });
+ tokens.extend(FACTORIAL_100.iter().map(|&u| Token::U32(u)));
+ tokens.push(Token::SeqEnd);
+
+ assert_tokens(&n, &tokens);
+}
+
+#[test]
+fn bigint_factorial_100() {
+ let n: BigInt = (1i8..101).product();
+
+ let mut tokens = vec![];
+ tokens.push(Token::Tuple { len: 2 });
+ tokens.push(Token::I8(1));
+ tokens.push(Token::Seq {
+ len: Some(FACTORIAL_100.len()),
+ });
+ tokens.extend(FACTORIAL_100.iter().map(|&u| Token::U32(u)));
+ tokens.push(Token::SeqEnd);
+ tokens.push(Token::TupleEnd);
+
+ assert_tokens(&n, &tokens);
+}
diff --git a/rust/vendor/num-bigint-0.2.6/tests/torture.rs b/rust/vendor/num-bigint-0.2.6/tests/torture.rs
new file mode 100644
index 0000000..4f073d3
--- /dev/null
+++ b/rust/vendor/num-bigint-0.2.6/tests/torture.rs
@@ -0,0 +1,43 @@
+#![cfg(feature = "rand")]
+
+extern crate num_bigint;
+extern crate num_traits;
+extern crate rand;
+
+use num_bigint::RandBigInt;
+use num_traits::Zero;
+use rand::prelude::*;
+
+fn test_mul_divide_torture_count(count: usize) {
+ let bits_max = 1 << 12;
+ let seed = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16];
+ let mut rng = SmallRng::from_seed(seed);
+
+ for _ in 0..count {
+ // Test with numbers of random sizes:
+ let xbits = rng.gen_range(0, bits_max);
+ let ybits = rng.gen_range(0, bits_max);
+
+ let x = rng.gen_biguint(xbits);
+ let y = rng.gen_biguint(ybits);
+
+ if x.is_zero() || y.is_zero() {
+ continue;
+ }
+
+ let prod = &x * &y;
+ assert_eq!(&prod / &x, y);
+ assert_eq!(&prod / &y, x);
+ }
+}
+
+#[test]
+fn test_mul_divide_torture() {
+ test_mul_divide_torture_count(1000);
+}
+
+#[test]
+#[ignore]
+fn test_mul_divide_torture_long() {
+ test_mul_divide_torture_count(1000000);
+}
diff --git a/rust/vendor/num-bigint/.cargo-checksum.json b/rust/vendor/num-bigint/.cargo-checksum.json
new file mode 100644
index 0000000..28c7008
--- /dev/null
+++ b/rust/vendor/num-bigint/.cargo-checksum.json
@@ -0,0 +1 @@
+{"files":{"Cargo.toml":"2cfbab335a56779607ae2bd573b3144768321d4aaeb3b48a8fc5e8473bfe61ff","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"6485b8ed310d3f0340bf1ad1f47645069ce4069dcc6bb46c7d5c6faf41de1fdb","README.md":"1ee66f7d39c572dabe553b5ad698111d58a9ab1f0019ed8b6371af2b67ad0aae","RELEASES.md":"ed8cff3aa3583da2ab9cb3185e93b5644aafbef03e08647b51f57ddc99d27acd","benches/bigint.rs":"7efd4741f53c786bae63d1196492b5657fd0d928b37a59a08629f6efdc35c845","benches/factorial.rs":"ed1d276a780e7e5fe79121b941c22a00c2854dbf92fd8a5372619853ba0c13b7","benches/gcd.rs":"3cc1a3356f680a6fa625f0ece0c8dd778f4091a53a31177f2870ef9a6c858c7d","benches/rng/mod.rs":"38144fc8283955db4be72a1533328fded98986d6f9d0bc7da0b306f7d4b5ca43","benches/roots.rs":"b31846852a7215c26df228940f2e469aff32fa8805eccc2b5ee5e7280ef0eeb4","benches/shootout-pidigits.rs":"c2a48133f5b679928f7e3f4e764c78aaa8c5b811f58b86fe57fae8c63cb07136","build.rs":"9a4c2d95c240f46a54b565a6fc9d5f3b2fef2634bc829175ea7740652207bd34","src/bigint.rs":"f7aa256232ac8b4208ae6c3e026a4313afd6d0a79377d0d989ef6db8b31bb942","src/bigint/addition.rs":"57d88a77b36494441b6b6e969269c4dee237cead018512f2bcd074f28806ef00","src/bigint/arbitrary.rs":"6679833dffb38fa81f251bf9dd35b0d5b4cecb2a368e82aac92b00cef4dfc21b","src/bigint/bits.rs":"0e09e62317fa74a85c7f45c2bf441a7a40bb02b4a2ef52620871573b73ac6808","src/bigint/convert.rs":"8c3763391fc2df1e79268fcc03cae4454ace87455d1994288e55473a8993528a","src/bigint/division.rs":"1a268e7739a7f5be56ff084aa092d1c570e593690ca980ff77254faf51620202","src/bigint/multiplication.rs":"c262f732b86cc36804895df21e5ea5221944cadc1fca7903ff75a26102ba70f1","src/bigint/power.rs":"0ed57f2c9b5bc3debd6811dbb8705d07b0131f594be268321da35e70c08dbea9","src/bigint/serde.rs":"8240ed79ac11ec0ec2dfc85d4657693d5b03379bdd60a42dccee4764b000e5b6","src/bigint/shift.rs":"3f28bca2d52621133cdf567290645d30a746e663d8dacea29c172b5ed7ff3538","src/bigint/subtraction.rs":"6c3cce163293e2b95df38df4797329e2f9b43369e845697744dfbe43a67399ce","src/bigrand.rs":"2af7fde779b326006d1fd00f85f81ab6d3b2e30ec370fe4eaa41eeceeec6b7a6","src/biguint.rs":"a8c6b4d5fd6c3e07e888727385fc7148ad5e952eb940ee48c3086c864ad80941","src/biguint/addition.rs":"4f79fc1fc3157303309dcfa2166ae10ed33edbb11bf5602c43038485b5562746","src/biguint/arbitrary.rs":"895fe5a9bbcf40824d1a342e089fb2aec78cb9bad0dd489cfef489a3323f6c3b","src/biguint/bits.rs":"509036c6c6cb083d4568f92ac852cf5f12510f98c4547d47a80e3db4282a159e","src/biguint/convert.rs":"10d25021dab0bf88a58b96ca786689f72530a946d0a080e29ba40079dc1d844d","src/biguint/division.rs":"bedce3a1a7ffa271e0b74e98929f98a87343bb6a54a9de3c2effdd27041c78a5","src/biguint/iter.rs":"c21e30f573bdf1936e72dd56a89ee662329d25e8b55e37e88e9b1aea2da48abd","src/biguint/monty.rs":"08554859ed7aeaec89b311458c443d3eab375b02da7a9b20df8b42ed18b8c8c1","src/biguint/multiplication.rs":"501da2844830b1699a05e58fae3deb21b158fc0871e2cd4fc50049202faeb2c8","src/biguint/power.rs":"e428489a4aa6db640fde03eca61bce8eb56502303c48794248a6d14154ecf2ae","src/biguint/serde.rs":"23e028b21a9a836f443c8bf996c2011d2a1df87bc53fb96b83ae1545efccba81","src/biguint/shift.rs":"c67a37cf2605e06bb706d493659303992d25d623fa62dfa085e01d53acbaa08a","src/biguint/subtraction.rs":"3b18ab809da5efed5204217b46ccf4d653b3e8abb61820c1e0eebe13e400b3a9","src/lib.rs":"35f99739d938f36701d7951e40aaaf4a5b96efdce6c6d489af775913ed32eed4","src/macros.rs":"a007d89d68848f99119c524ff1d01c4acaf052f46555cb395fff066879153b77","tests/bigint.rs":"f2bfe36ebee59e083a1133838240167e902f1f2580bce68057a86de9dab5fe56","tests/bigint_bitwise.rs":"e6a2f76fa1eb919e7c513d7e30a8a2a963841a295a71103462fb8ab9792419b5","tests/bigint_scalar.rs":"a87e801e370686985d44e1f020c69fceca72b9f048e0f7301d2b8d38469e5636","tests/biguint.rs":"09dc0cc5e7c81974e2097666de3732917b21df93362b17fcab1e180d44296d0a","tests/biguint_scalar.rs":"b09cda9d4fe6ec519e93282653f69b57d70db73b9cb59c0ea5cd0861ca2de266","tests/consts/mod.rs":"e20bc49a7cc95077242cbe4016b37745ea986c779d2385cb367fbfe44f15ff94","tests/fuzzed.rs":"f60a84c446ea2f45d87eb4ee64682ea63fdef05bc74f482739d4e968960e8f4e","tests/macros/mod.rs":"1a8f9f015e5caaac60ce9ccff01a75ae489801c3ede6e7b9b3c5079b6efefc9c","tests/modpow.rs":"f4c81111308d63876ed02229cbab50bec9bac082bfa71f481120e46b5d7e5560","tests/roots.rs":"a3bc2de170a0f6297cc8d8830d608db537ca102ccf204fd4fb8e2d92675622d8"},"package":"608e7659b5c3d7cba262d894801b9ec9d00de989e8a82bd4bef91d08da45cdc0"} \ No newline at end of file
diff --git a/rust/vendor/num-bigint/Cargo.toml b/rust/vendor/num-bigint/Cargo.toml
new file mode 100644
index 0000000..2656b5f
--- /dev/null
+++ b/rust/vendor/num-bigint/Cargo.toml
@@ -0,0 +1,103 @@
+# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO
+#
+# When uploading crates to the registry Cargo will automatically
+# "normalize" Cargo.toml files for maximal compatibility
+# with all versions of Cargo and also rewrite `path` dependencies
+# to registry (e.g., crates.io) dependencies.
+#
+# If you are reading this file be aware that the original Cargo.toml
+# will likely look very different (and much more reasonable).
+# See Cargo.toml.orig for the original contents.
+
+[package]
+edition = "2018"
+name = "num-bigint"
+version = "0.4.4"
+authors = ["The Rust Project Developers"]
+build = "build.rs"
+exclude = [
+ "/bors.toml",
+ "/ci/*",
+ "/.github/*",
+]
+description = "Big integer implementation for Rust"
+homepage = "https://github.com/rust-num/num-bigint"
+documentation = "https://docs.rs/num-bigint"
+readme = "README.md"
+keywords = [
+ "mathematics",
+ "numerics",
+ "bignum",
+]
+categories = [
+ "algorithms",
+ "data-structures",
+ "science",
+]
+license = "MIT OR Apache-2.0"
+repository = "https://github.com/rust-num/num-bigint"
+
+[package.metadata.docs.rs]
+features = [
+ "std",
+ "serde",
+ "rand",
+ "quickcheck",
+ "arbitrary",
+]
+
+[[bench]]
+name = "bigint"
+
+[[bench]]
+name = "factorial"
+
+[[bench]]
+name = "gcd"
+
+[[bench]]
+name = "roots"
+
+[[bench]]
+name = "shootout-pidigits"
+harness = false
+
+[dependencies.arbitrary]
+version = "1"
+optional = true
+default-features = false
+
+[dependencies.num-integer]
+version = "0.1.42"
+features = ["i128"]
+default-features = false
+
+[dependencies.num-traits]
+version = "0.2.16"
+features = ["i128"]
+default-features = false
+
+[dependencies.quickcheck]
+version = "1"
+optional = true
+default-features = false
+
+[dependencies.rand]
+version = "0.8"
+optional = true
+default-features = false
+
+[dependencies.serde]
+version = "1.0"
+optional = true
+default-features = false
+
+[build-dependencies.autocfg]
+version = "1"
+
+[features]
+default = ["std"]
+std = [
+ "num-integer/std",
+ "num-traits/std",
+]
diff --git a/rust/vendor/num-bigint/LICENSE-APACHE b/rust/vendor/num-bigint/LICENSE-APACHE
new file mode 100644
index 0000000..16fe87b
--- /dev/null
+++ b/rust/vendor/num-bigint/LICENSE-APACHE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+END OF TERMS AND CONDITIONS
+
+APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+Copyright [yyyy] [name of copyright owner]
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
diff --git a/rust/vendor/num-bigint/LICENSE-MIT b/rust/vendor/num-bigint/LICENSE-MIT
new file mode 100644
index 0000000..39d4bdb
--- /dev/null
+++ b/rust/vendor/num-bigint/LICENSE-MIT
@@ -0,0 +1,25 @@
+Copyright (c) 2014 The Rust Project Developers
+
+Permission is hereby granted, free of charge, to any
+person obtaining a copy of this software and associated
+documentation files (the "Software"), to deal in the
+Software without restriction, including without
+limitation the rights to use, copy, modify, merge,
+publish, distribute, sublicense, and/or sell copies of
+the Software, and to permit persons to whom the Software
+is furnished to do so, subject to the following
+conditions:
+
+The above copyright notice and this permission notice
+shall be included in all copies or substantial portions
+of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
+ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
+TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
+PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
+IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+DEALINGS IN THE SOFTWARE.
diff --git a/rust/vendor/num-bigint/README.md b/rust/vendor/num-bigint/README.md
new file mode 100644
index 0000000..21f7749
--- /dev/null
+++ b/rust/vendor/num-bigint/README.md
@@ -0,0 +1,84 @@
+# num-bigint
+
+[![crate](https://img.shields.io/crates/v/num-bigint.svg)](https://crates.io/crates/num-bigint)
+[![documentation](https://docs.rs/num-bigint/badge.svg)](https://docs.rs/num-bigint)
+[![minimum rustc 1.31](https://img.shields.io/badge/rustc-1.31+-red.svg)](https://rust-lang.github.io/rfcs/2495-min-rust-version.html)
+[![build status](https://github.com/rust-num/num-bigint/workflows/master/badge.svg)](https://github.com/rust-num/num-bigint/actions)
+
+Big integer types for Rust, `BigInt` and `BigUint`.
+
+## Usage
+
+Add this to your `Cargo.toml`:
+
+```toml
+[dependencies]
+num-bigint = "0.4"
+```
+
+## Features
+
+The `std` crate feature is enabled by default, and is mandatory before Rust
+1.36 and the stabilized `alloc` crate. If you depend on `num-bigint` with
+`default-features = false`, you must manually enable the `std` feature yourself
+if your compiler is not new enough.
+
+### Random Generation
+
+`num-bigint` supports the generation of random big integers when the `rand`
+feature is enabled. To enable it include rand as
+
+```toml
+rand = "0.8"
+num-bigint = { version = "0.4", features = ["rand"] }
+```
+
+Note that you must use the version of `rand` that `num-bigint` is compatible
+with: `0.8`.
+
+## Releases
+
+Release notes are available in [RELEASES.md](RELEASES.md).
+
+## Compatibility
+
+The `num-bigint` crate is tested for rustc 1.31 and greater.
+
+## Alternatives
+
+While `num-bigint` strives for good performance in pure Rust code, other
+crates may offer better performance with different trade-offs. The following
+table offers a brief comparison to a few alternatives.
+
+| Crate | License | Min rustc | Implementation | Features |
+| :--------------- | :------------- | :-------- | :------------- | :------- |
+| **`num-bigint`** | MIT/Apache-2.0 | 1.31 | pure rust | dynamic width, number theoretical functions |
+| [`awint`] | MIT/Apache-2.0 | 1.66 | pure rust | fixed width, heap or stack, concatenation macros |
+| [`bnum`] | MIT/Apache-2.0 | 1.61 | pure rust | fixed width, parity with Rust primitives including floats |
+| [`crypto-bigint`] | MIT/Apache-2.0 | 1.57 | pure rust | fixed width, stack only |
+| [`ibig`] | MIT/Apache-2.0 | 1.49 | pure rust | dynamic width, number theoretical functions |
+| [`rug`] | LGPL-3.0+ | 1.65 | bundles [GMP] via [`gmp-mpfr-sys`] | all the features of GMP, MPFR, and MPC |
+
+[`awint`]: https://crates.io/crates/awint
+[`bnum`]: https://crates.io/crates/bnum
+[`crypto-bigint`]: https://crates.io/crates/crypto-bigint
+[`ibig`]: https://crates.io/crates/ibig
+[`rug`]: https://crates.io/crates/rug
+
+[GMP]: https://gmplib.org/
+[`gmp-mpfr-sys`]: https://crates.io/crates/gmp-mpfr-sys
+
+## License
+
+Licensed under either of
+
+ * [Apache License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0)
+ * [MIT license](http://opensource.org/licenses/MIT)
+
+at your option.
+
+### Contribution
+
+Unless you explicitly state otherwise, any contribution intentionally submitted
+for inclusion in the work by you, as defined in the Apache-2.0 license, shall be
+dual licensed as above, without any additional terms or conditions.
diff --git a/rust/vendor/num-bigint/RELEASES.md b/rust/vendor/num-bigint/RELEASES.md
new file mode 100644
index 0000000..ad5dd49
--- /dev/null
+++ b/rust/vendor/num-bigint/RELEASES.md
@@ -0,0 +1,319 @@
+# Release 0.4.4 (2023-08-22)
+
+- [Implemented `From<bool>` for `BigInt` and `BigUint`.][239]
+- [Implemented `num_traits::Euclid` and `CheckedEuclid` for `BigInt` and `BigUint`.][245]
+- [Implemented ties-to-even for `BigInt` and `BigUint::to_f32` and `to_f64`.][271]
+- [Implemented `num_traits::FromBytes` and `ToBytes` for `BigInt` and `BigUint`.][276]
+- Limited pre-allocation from serde size hints against potential OOM.
+- Miscellaneous other code cleanups and maintenance tasks.
+
+**Contributors**: @AaronKutch, @archseer, @cuviper, @dramforever, @icecream17,
+@icedrocket, @janmarthedal, @jaybosamiya, @OliveIsAWord, @PatrickNorton,
+@smoelius, @waywardmonkeys
+
+[239]: https://github.com/rust-num/num-bigint/pull/239
+[245]: https://github.com/rust-num/num-bigint/pull/245
+[271]: https://github.com/rust-num/num-bigint/pull/271
+[276]: https://github.com/rust-num/num-bigint/pull/276
+
+# Release 0.4.3 (2021-11-02)
+
+- [GHSA-v935-pqmr-g8v9]: [Fix unexpected panics in multiplication.][228]
+
+**Contributors**: @arvidn, @cuviper, @guidovranken
+
+[228]: https://github.com/rust-num/num-bigint/pull/228
+[GHSA-v935-pqmr-g8v9]: https://github.com/rust-num/num-bigint/security/advisories/GHSA-v935-pqmr-g8v9
+
+# Release 0.4.2 (2021-09-03)
+
+- [Use explicit `Integer::div_ceil` to avoid the new unstable method.][219]
+
+**Contributors**: @catenacyber, @cuviper
+
+[219]: https://github.com/rust-num/num-bigint/pull/219
+
+# Release 0.4.1 (2021-08-27)
+
+- [Fixed scalar divide-by-zero panics.][200]
+- [Implemented `DoubleEndedIterator` for `U32Digits` and `U64Digits`.][208]
+- [Optimized multiplication to avoid unnecessary allocations.][199]
+- [Optimized string formatting for very large values.][216]
+
+**Contributors**: @cuviper, @PatrickNorton
+
+[199]: https://github.com/rust-num/num-bigint/pull/199
+[200]: https://github.com/rust-num/num-bigint/pull/200
+[208]: https://github.com/rust-num/num-bigint/pull/208
+[216]: https://github.com/rust-num/num-bigint/pull/216
+
+# Release 0.4.0 (2021-03-05)
+
+### Breaking Changes
+
+- Updated public dependences on [arbitrary, quickcheck][194], and [rand][185]:
+ - `arbitrary` support has been updated to 1.0, requiring Rust 1.40.
+ - `quickcheck` support has been updated to 1.0, requiring Rust 1.46.
+ - `rand` support has been updated to 0.8, requiring Rust 1.36.
+- [`Debug` now shows plain numeric values for `BigInt` and `BigUint`][195],
+ rather than the raw list of internal digits.
+
+**Contributors**: @cuviper, @Gelbpunkt
+
+[185]: https://github.com/rust-num/num-bigint/pull/185
+[194]: https://github.com/rust-num/num-bigint/pull/194
+[195]: https://github.com/rust-num/num-bigint/pull/195
+
+# Release 0.3.3 (2021-09-03)
+
+- [Use explicit `Integer::div_ceil` to avoid the new unstable method.][219]
+
+**Contributors**: @catenacyber, @cuviper
+
+# Release 0.3.2 (2021-03-04)
+
+- [The new `BigUint` methods `count_ones` and `trailing_ones`][175] return the
+ number of `1` bits in the entire value or just its least-significant tail,
+ respectively.
+- [The new `BigInt` and `BigUint` methods `bit` and `set_bit`][183] will read
+ and write individual bits of the value. For negative `BigInt`, bits are
+ determined as if they were in the two's complement representation.
+- [The `from_radix_le` and `from_radix_be` methods][187] now accept empty
+ buffers to represent zero.
+- [`BigInt` and `BigUint` can now iterate digits as `u32` or `u64`][192],
+ regardless of the actual internal digit size.
+
+**Contributors**: @BartMassey, @cuviper, @janmarthedal, @sebastianv89, @Speedy37
+
+[175]: https://github.com/rust-num/num-bigint/pull/175
+[183]: https://github.com/rust-num/num-bigint/pull/183
+[187]: https://github.com/rust-num/num-bigint/pull/187
+[192]: https://github.com/rust-num/num-bigint/pull/192
+
+# Release 0.3.1 (2020-11-03)
+
+- [Addition and subtraction now uses intrinsics][141] for performance on `x86`
+ and `x86_64` when built with Rust 1.33 or later.
+- [Conversions `to_f32` and `to_f64` now return infinity][163] for very large
+ numbers, rather than `None`. This does preserve the sign too, so a large
+ negative `BigInt` will convert to negative infinity.
+- [The optional `arbitrary` feature implements `arbitrary::Arbitrary`][166],
+ distinct from `quickcheck::Arbitrary`.
+- [The division algorithm has been optimized][170] to reduce the number of
+ temporary allocations and improve the internal guesses at each step.
+- [`BigInt` and `BigUint` will opportunistically shrink capacity][171] if the
+ internal vector is much larger than needed.
+
+**Contributors**: @cuviper, @e00E, @ejmahler, @notoria, @tczajka
+
+[141]: https://github.com/rust-num/num-bigint/pull/141
+[163]: https://github.com/rust-num/num-bigint/pull/163
+[166]: https://github.com/rust-num/num-bigint/pull/166
+[170]: https://github.com/rust-num/num-bigint/pull/170
+[171]: https://github.com/rust-num/num-bigint/pull/171
+
+# Release 0.3.0 (2020-06-12)
+
+### Enhancements
+
+- [The internal `BigDigit` may now be either `u32` or `u64`][62], although that
+ implementation detail is not exposed in the API. For now, this is chosen to
+ match the target pointer size, but may change in the future.
+- [No-`std` is now supported with the `alloc` crate on Rust 1.36][101].
+- [`Pow` is now implemented for bigint values][137], not just references.
+- [`TryFrom` is now implemented on Rust 1.34 and later][123], converting signed
+ integers to unsigned, and narrowing big integers to primitives.
+- [`Shl` and `Shr` are now implemented for a variety of shift types][142].
+- A new `trailing_zeros()` returns the number of consecutive zeros from the
+ least significant bit.
+- The new `BigInt::magnitude` and `into_parts` methods give access to its
+ `BigUint` part as the magnitude.
+
+### Breaking Changes
+
+- `num-bigint` now requires Rust 1.31 or greater.
+ - The "i128" opt-in feature was removed, now always available.
+- [Updated public dependences][110]:
+ - `rand` support has been updated to 0.7, requiring Rust 1.32.
+ - `quickcheck` support has been updated to 0.9, requiring Rust 1.34.
+- [Removed `impl Neg for BigUint`][145], which only ever panicked.
+- [Bit counts are now `u64` instead of `usize`][143].
+
+**Contributors**: @cuviper, @dignifiedquire, @hansihe,
+@kpcyrd, @milesand, @tech6hutch
+
+[62]: https://github.com/rust-num/num-bigint/pull/62
+[101]: https://github.com/rust-num/num-bigint/pull/101
+[110]: https://github.com/rust-num/num-bigint/pull/110
+[123]: https://github.com/rust-num/num-bigint/pull/123
+[137]: https://github.com/rust-num/num-bigint/pull/137
+[142]: https://github.com/rust-num/num-bigint/pull/142
+[143]: https://github.com/rust-num/num-bigint/pull/143
+[145]: https://github.com/rust-num/num-bigint/pull/145
+
+# Release 0.2.6 (2020-01-27)
+
+- [Fix the promotion of negative `isize` in `BigInt` assign-ops][133].
+
+**Contributors**: @cuviper, @HactarCE
+
+[133]: https://github.com/rust-num/num-bigint/pull/133
+
+# Release 0.2.5 (2020-01-09)
+
+- [Updated the `autocfg` build dependency to 1.0][126].
+
+**Contributors**: @cuviper, @tspiteri
+
+[126]: https://github.com/rust-num/num-bigint/pull/126
+
+# Release 0.2.4 (2020-01-01)
+
+- [The new `BigUint::to_u32_digits` method][104] returns the number as a
+ little-endian vector of base-2<sup>32</sup> digits. The same method on
+ `BigInt` also returns the sign.
+- [`BigUint::modpow` now applies a modulus even for exponent 1][113], which
+ also affects `BigInt::modpow`.
+- [`BigInt::modpow` now returns the correct sign for negative bases with even
+ exponents][114].
+
+[104]: https://github.com/rust-num/num-bigint/pull/104
+[113]: https://github.com/rust-num/num-bigint/pull/113
+[114]: https://github.com/rust-num/num-bigint/pull/114
+
+**Contributors**: @alex-ozdemir, @cuviper, @dingelish, @Speedy37, @youknowone
+
+# Release 0.2.3 (2019-09-03)
+
+- [`Pow` is now implemented for `BigUint` exponents][77].
+- [The optional `quickcheck` feature enables implementations of `Arbitrary`][99].
+- See the [full comparison][compare-0.2.3] for performance enhancements and more!
+
+[77]: https://github.com/rust-num/num-bigint/pull/77
+[99]: https://github.com/rust-num/num-bigint/pull/99
+[compare-0.2.3]: https://github.com/rust-num/num-bigint/compare/num-bigint-0.2.2...num-bigint-0.2.3
+
+**Contributors**: @cuviper, @lcnr, @maxbla, @mikelodder7, @mikong,
+@TheLetterTheta, @tspiteri, @XAMPPRocky, @youknowone
+
+# Release 0.2.2 (2018-12-14)
+
+- [The `Roots` implementations now use better initial guesses][71].
+- [Fixed `to_signed_bytes_*` for some positive numbers][72], where the
+ most-significant byte is `0x80` and the rest are `0`.
+
+[71]: https://github.com/rust-num/num-bigint/pull/71
+[72]: https://github.com/rust-num/num-bigint/pull/72
+
+**Contributors**: @cuviper, @leodasvacas
+
+# Release 0.2.1 (2018-11-02)
+
+- [`RandBigInt` now uses `Rng::fill_bytes`][53] to improve performance, instead
+ of repeated `gen::<u32>` calls. The also affects the implementations of the
+ other `rand` traits. This may potentially change the values produced by some
+ seeded RNGs on previous versions, but the values were tested to be stable
+ with `ChaChaRng`, `IsaacRng`, and `XorShiftRng`.
+- [`BigInt` and `BigUint` now implement `num_integer::Roots`][56].
+- [`BigInt` and `BigUint` now implement `num_traits::Pow`][54].
+- [`BigInt` and `BigUint` now implement operators with 128-bit integers][64].
+
+**Contributors**: @cuviper, @dignifiedquire, @mancabizjak, @Robbepop,
+@TheIronBorn, @thomwiggers
+
+[53]: https://github.com/rust-num/num-bigint/pull/53
+[54]: https://github.com/rust-num/num-bigint/pull/54
+[56]: https://github.com/rust-num/num-bigint/pull/56
+[64]: https://github.com/rust-num/num-bigint/pull/64
+
+# Release 0.2.0 (2018-05-25)
+
+### Enhancements
+
+- [`BigInt` and `BigUint` now implement `Product` and `Sum`][22] for iterators
+ of any item that we can `Mul` and `Add`, respectively. For example, a
+ factorial can now be simply: `let f: BigUint = (1u32..1000).product();`
+- [`BigInt` now supports two's-complement logic operations][26], namely
+ `BitAnd`, `BitOr`, `BitXor`, and `Not`. These act conceptually as if each
+ number had an infinite prefix of `0` or `1` bits for positive or negative.
+- [`BigInt` now supports assignment operators][41] like `AddAssign`.
+- [`BigInt` and `BigUint` now support conversions with `i128` and `u128`][44],
+ if sufficient compiler support is detected.
+- [`BigInt` and `BigUint` now implement rand's `SampleUniform` trait][48], and
+ [a custom `RandomBits` distribution samples by bit size][49].
+- The release also includes other miscellaneous improvements to performance.
+
+### Breaking Changes
+
+- [`num-bigint` now requires rustc 1.15 or greater][23].
+- [The crate now has a `std` feature, and won't build without it][46]. This is
+ in preparation for someday supporting `#![no_std]` with `alloc`.
+- [The `serde` dependency has been updated to 1.0][24], still disabled by
+ default. The `rustc-serialize` crate is no longer supported by `num-bigint`.
+- [The `rand` dependency has been updated to 0.5][48], now disabled by default.
+ This requires rustc 1.22 or greater for `rand`'s own requirement.
+- [`Shr for BigInt` now rounds down][8] rather than toward zero, matching the
+ behavior of the primitive integers for negative values.
+- [`ParseBigIntError` is now an opaque type][37].
+- [The `big_digit` module is no longer public][38], nor are the `BigDigit` and
+ `DoubleBigDigit` types and `ZERO_BIG_DIGIT` constant that were re-exported in
+ the crate root. Public APIs which deal in digits, like `BigUint::from_slice`,
+ will now always be base-`u32`.
+
+**Contributors**: @clarcharr, @cuviper, @dodomorandi, @tiehuis, @tspiteri
+
+[8]: https://github.com/rust-num/num-bigint/pull/8
+[22]: https://github.com/rust-num/num-bigint/pull/22
+[23]: https://github.com/rust-num/num-bigint/pull/23
+[24]: https://github.com/rust-num/num-bigint/pull/24
+[26]: https://github.com/rust-num/num-bigint/pull/26
+[37]: https://github.com/rust-num/num-bigint/pull/37
+[38]: https://github.com/rust-num/num-bigint/pull/38
+[41]: https://github.com/rust-num/num-bigint/pull/41
+[44]: https://github.com/rust-num/num-bigint/pull/44
+[46]: https://github.com/rust-num/num-bigint/pull/46
+[48]: https://github.com/rust-num/num-bigint/pull/48
+[49]: https://github.com/rust-num/num-bigint/pull/49
+
+# Release 0.1.44 (2018-05-14)
+
+- [Division with single-digit divisors is now much faster.][42]
+- The README now compares [`ramp`, `rug`, `rust-gmp`][20], and [`apint`][21].
+
+**Contributors**: @cuviper, @Robbepop
+
+[20]: https://github.com/rust-num/num-bigint/pull/20
+[21]: https://github.com/rust-num/num-bigint/pull/21
+[42]: https://github.com/rust-num/num-bigint/pull/42
+
+# Release 0.1.43 (2018-02-08)
+
+- [The new `BigInt::modpow`][18] performs signed modular exponentiation, using
+ the existing `BigUint::modpow` and rounding negatives similar to `mod_floor`.
+
+**Contributors**: @cuviper
+
+[18]: https://github.com/rust-num/num-bigint/pull/18
+
+
+# Release 0.1.42 (2018-02-07)
+
+- [num-bigint now has its own source repository][num-356] at [rust-num/num-bigint][home].
+- [`lcm` now avoids creating a large intermediate product][num-350].
+- [`gcd` now uses Stein's algorithm][15] with faster shifts instead of division.
+- [`rand` support is now extended to 0.4][11] (while still allowing 0.3).
+
+**Contributors**: @cuviper, @Emerentius, @ignatenkobrain, @mhogrefe
+
+[home]: https://github.com/rust-num/num-bigint
+[num-350]: https://github.com/rust-num/num/pull/350
+[num-356]: https://github.com/rust-num/num/pull/356
+[11]: https://github.com/rust-num/num-bigint/pull/11
+[15]: https://github.com/rust-num/num-bigint/pull/15
+
+
+# Prior releases
+
+No prior release notes were kept. Thanks all the same to the many
+contributors that have made this crate what it is!
+
diff --git a/rust/vendor/num-bigint/benches/bigint.rs b/rust/vendor/num-bigint/benches/bigint.rs
new file mode 100644
index 0000000..80ec191
--- /dev/null
+++ b/rust/vendor/num-bigint/benches/bigint.rs
@@ -0,0 +1,440 @@
+#![feature(test)]
+#![cfg(feature = "rand")]
+
+extern crate test;
+
+use num_bigint::{BigInt, BigUint, RandBigInt};
+use num_traits::{FromPrimitive, Num, One, Zero};
+use std::mem::replace;
+use test::Bencher;
+
+mod rng;
+use rng::get_rng;
+
+fn multiply_bench(b: &mut Bencher, xbits: u64, ybits: u64) {
+ let mut rng = get_rng();
+ let x = rng.gen_bigint(xbits);
+ let y = rng.gen_bigint(ybits);
+
+ b.iter(|| &x * &y);
+}
+
+fn divide_bench(b: &mut Bencher, xbits: u64, ybits: u64) {
+ let mut rng = get_rng();
+ let x = rng.gen_bigint(xbits);
+ let y = rng.gen_bigint(ybits);
+
+ b.iter(|| &x / &y);
+}
+
+fn remainder_bench(b: &mut Bencher, xbits: u64, ybits: u64) {
+ let mut rng = get_rng();
+ let x = rng.gen_bigint(xbits);
+ let y = rng.gen_bigint(ybits);
+
+ b.iter(|| &x % &y);
+}
+
+fn factorial(n: usize) -> BigUint {
+ let mut f: BigUint = One::one();
+ for i in 1..=n {
+ let bu: BigUint = FromPrimitive::from_usize(i).unwrap();
+ f *= bu;
+ }
+ f
+}
+
+/// Compute Fibonacci numbers
+fn fib(n: usize) -> BigUint {
+ let mut f0: BigUint = Zero::zero();
+ let mut f1: BigUint = One::one();
+ for _ in 0..n {
+ let f2 = f0 + &f1;
+ f0 = replace(&mut f1, f2);
+ }
+ f0
+}
+
+/// Compute Fibonacci numbers with two ops per iteration
+/// (add and subtract, like issue #200)
+fn fib2(n: usize) -> BigUint {
+ let mut f0: BigUint = Zero::zero();
+ let mut f1: BigUint = One::one();
+ for _ in 0..n {
+ f1 += &f0;
+ f0 = &f1 - f0;
+ }
+ f0
+}
+
+#[bench]
+fn multiply_0(b: &mut Bencher) {
+ multiply_bench(b, 1 << 8, 1 << 8);
+}
+
+#[bench]
+fn multiply_1(b: &mut Bencher) {
+ multiply_bench(b, 1 << 8, 1 << 16);
+}
+
+#[bench]
+fn multiply_2(b: &mut Bencher) {
+ multiply_bench(b, 1 << 16, 1 << 16);
+}
+
+#[bench]
+fn multiply_3(b: &mut Bencher) {
+ multiply_bench(b, 1 << 16, 1 << 17);
+}
+
+#[bench]
+fn divide_0(b: &mut Bencher) {
+ divide_bench(b, 1 << 8, 1 << 6);
+}
+
+#[bench]
+fn divide_1(b: &mut Bencher) {
+ divide_bench(b, 1 << 12, 1 << 8);
+}
+
+#[bench]
+fn divide_2(b: &mut Bencher) {
+ divide_bench(b, 1 << 16, 1 << 12);
+}
+
+#[bench]
+fn divide_big_little(b: &mut Bencher) {
+ divide_bench(b, 1 << 16, 1 << 4);
+}
+
+#[bench]
+fn remainder_0(b: &mut Bencher) {
+ remainder_bench(b, 1 << 8, 1 << 6);
+}
+
+#[bench]
+fn remainder_1(b: &mut Bencher) {
+ remainder_bench(b, 1 << 12, 1 << 8);
+}
+
+#[bench]
+fn remainder_2(b: &mut Bencher) {
+ remainder_bench(b, 1 << 16, 1 << 12);
+}
+
+#[bench]
+fn remainder_big_little(b: &mut Bencher) {
+ remainder_bench(b, 1 << 16, 1 << 4);
+}
+
+#[bench]
+fn factorial_100(b: &mut Bencher) {
+ b.iter(|| factorial(100));
+}
+
+#[bench]
+fn fib_100(b: &mut Bencher) {
+ b.iter(|| fib(100));
+}
+
+#[bench]
+fn fib_1000(b: &mut Bencher) {
+ b.iter(|| fib(1000));
+}
+
+#[bench]
+fn fib_10000(b: &mut Bencher) {
+ b.iter(|| fib(10000));
+}
+
+#[bench]
+fn fib2_100(b: &mut Bencher) {
+ b.iter(|| fib2(100));
+}
+
+#[bench]
+fn fib2_1000(b: &mut Bencher) {
+ b.iter(|| fib2(1000));
+}
+
+#[bench]
+fn fib2_10000(b: &mut Bencher) {
+ b.iter(|| fib2(10000));
+}
+
+#[bench]
+fn fac_to_string(b: &mut Bencher) {
+ let fac = factorial(100);
+ b.iter(|| fac.to_string());
+}
+
+#[bench]
+fn fib_to_string(b: &mut Bencher) {
+ let fib = fib(100);
+ b.iter(|| fib.to_string());
+}
+
+fn to_str_radix_bench(b: &mut Bencher, radix: u32, bits: u64) {
+ let mut rng = get_rng();
+ let x = rng.gen_bigint(bits);
+ b.iter(|| x.to_str_radix(radix));
+}
+
+#[bench]
+fn to_str_radix_02(b: &mut Bencher) {
+ to_str_radix_bench(b, 2, 1009);
+}
+
+#[bench]
+fn to_str_radix_08(b: &mut Bencher) {
+ to_str_radix_bench(b, 8, 1009);
+}
+
+#[bench]
+fn to_str_radix_10(b: &mut Bencher) {
+ to_str_radix_bench(b, 10, 1009);
+}
+
+#[bench]
+fn to_str_radix_10_2(b: &mut Bencher) {
+ to_str_radix_bench(b, 10, 10009);
+}
+
+#[bench]
+fn to_str_radix_16(b: &mut Bencher) {
+ to_str_radix_bench(b, 16, 1009);
+}
+
+#[bench]
+fn to_str_radix_36(b: &mut Bencher) {
+ to_str_radix_bench(b, 36, 1009);
+}
+
+fn from_str_radix_bench(b: &mut Bencher, radix: u32) {
+ let mut rng = get_rng();
+ let x = rng.gen_bigint(1009);
+ let s = x.to_str_radix(radix);
+ assert_eq!(x, BigInt::from_str_radix(&s, radix).unwrap());
+ b.iter(|| BigInt::from_str_radix(&s, radix));
+}
+
+#[bench]
+fn from_str_radix_02(b: &mut Bencher) {
+ from_str_radix_bench(b, 2);
+}
+
+#[bench]
+fn from_str_radix_08(b: &mut Bencher) {
+ from_str_radix_bench(b, 8);
+}
+
+#[bench]
+fn from_str_radix_10(b: &mut Bencher) {
+ from_str_radix_bench(b, 10);
+}
+
+#[bench]
+fn from_str_radix_16(b: &mut Bencher) {
+ from_str_radix_bench(b, 16);
+}
+
+#[bench]
+fn from_str_radix_36(b: &mut Bencher) {
+ from_str_radix_bench(b, 36);
+}
+
+fn rand_bench(b: &mut Bencher, bits: u64) {
+ let mut rng = get_rng();
+
+ b.iter(|| rng.gen_bigint(bits));
+}
+
+#[bench]
+fn rand_64(b: &mut Bencher) {
+ rand_bench(b, 1 << 6);
+}
+
+#[bench]
+fn rand_256(b: &mut Bencher) {
+ rand_bench(b, 1 << 8);
+}
+
+#[bench]
+fn rand_1009(b: &mut Bencher) {
+ rand_bench(b, 1009);
+}
+
+#[bench]
+fn rand_2048(b: &mut Bencher) {
+ rand_bench(b, 1 << 11);
+}
+
+#[bench]
+fn rand_4096(b: &mut Bencher) {
+ rand_bench(b, 1 << 12);
+}
+
+#[bench]
+fn rand_8192(b: &mut Bencher) {
+ rand_bench(b, 1 << 13);
+}
+
+#[bench]
+fn rand_65536(b: &mut Bencher) {
+ rand_bench(b, 1 << 16);
+}
+
+#[bench]
+fn rand_131072(b: &mut Bencher) {
+ rand_bench(b, 1 << 17);
+}
+
+#[bench]
+fn shl(b: &mut Bencher) {
+ let n = BigUint::one() << 1000u32;
+ let mut m = n.clone();
+ b.iter(|| {
+ m.clone_from(&n);
+ for i in 0..50 {
+ m <<= i;
+ }
+ })
+}
+
+#[bench]
+fn shr(b: &mut Bencher) {
+ let n = BigUint::one() << 2000u32;
+ let mut m = n.clone();
+ b.iter(|| {
+ m.clone_from(&n);
+ for i in 0..50 {
+ m >>= i;
+ }
+ })
+}
+
+#[bench]
+fn hash(b: &mut Bencher) {
+ use std::collections::HashSet;
+ let mut rng = get_rng();
+ let v: Vec<BigInt> = (1000..2000).map(|bits| rng.gen_bigint(bits)).collect();
+ b.iter(|| {
+ let h: HashSet<&BigInt> = v.iter().collect();
+ assert_eq!(h.len(), v.len());
+ });
+}
+
+#[bench]
+fn pow_bench(b: &mut Bencher) {
+ b.iter(|| {
+ let upper = 100_u32;
+ let mut i_big = BigUint::from(1u32);
+ for _i in 2..=upper {
+ i_big += 1u32;
+ for j in 2..=upper {
+ i_big.pow(j);
+ }
+ }
+ });
+}
+
+#[bench]
+fn pow_bench_bigexp(b: &mut Bencher) {
+ use num_traits::Pow;
+
+ b.iter(|| {
+ let upper = 100_u32;
+ let mut i_big = BigUint::from(1u32);
+ for _i in 2..=upper {
+ i_big += 1u32;
+ let mut j_big = BigUint::from(1u32);
+ for _j in 2..=upper {
+ j_big += 1u32;
+ Pow::pow(&i_big, &j_big);
+ }
+ }
+ });
+}
+
+#[bench]
+fn pow_bench_1e1000(b: &mut Bencher) {
+ b.iter(|| BigUint::from(10u32).pow(1_000));
+}
+
+#[bench]
+fn pow_bench_1e10000(b: &mut Bencher) {
+ b.iter(|| BigUint::from(10u32).pow(10_000));
+}
+
+#[bench]
+fn pow_bench_1e100000(b: &mut Bencher) {
+ b.iter(|| BigUint::from(10u32).pow(100_000));
+}
+
+/// This modulus is the prime from the 2048-bit MODP DH group:
+/// https://tools.ietf.org/html/rfc3526#section-3
+const RFC3526_2048BIT_MODP_GROUP: &str = "\
+ FFFFFFFF_FFFFFFFF_C90FDAA2_2168C234_C4C6628B_80DC1CD1\
+ 29024E08_8A67CC74_020BBEA6_3B139B22_514A0879_8E3404DD\
+ EF9519B3_CD3A431B_302B0A6D_F25F1437_4FE1356D_6D51C245\
+ E485B576_625E7EC6_F44C42E9_A637ED6B_0BFF5CB6_F406B7ED\
+ EE386BFB_5A899FA5_AE9F2411_7C4B1FE6_49286651_ECE45B3D\
+ C2007CB8_A163BF05_98DA4836_1C55D39A_69163FA8_FD24CF5F\
+ 83655D23_DCA3AD96_1C62F356_208552BB_9ED52907_7096966D\
+ 670C354E_4ABC9804_F1746C08_CA18217C_32905E46_2E36CE3B\
+ E39E772C_180E8603_9B2783A2_EC07A28F_B5C55DF0_6F4C52C9\
+ DE2BCBF6_95581718_3995497C_EA956AE5_15D22618_98FA0510\
+ 15728E5A_8AACAA68_FFFFFFFF_FFFFFFFF";
+
+#[bench]
+fn modpow(b: &mut Bencher) {
+ let mut rng = get_rng();
+ let base = rng.gen_biguint(2048);
+ let e = rng.gen_biguint(2048);
+ let m = BigUint::from_str_radix(RFC3526_2048BIT_MODP_GROUP, 16).unwrap();
+
+ b.iter(|| base.modpow(&e, &m));
+}
+
+#[bench]
+fn modpow_even(b: &mut Bencher) {
+ let mut rng = get_rng();
+ let base = rng.gen_biguint(2048);
+ let e = rng.gen_biguint(2048);
+ // Make the modulus even, so monty (base-2^32) doesn't apply.
+ let m = BigUint::from_str_radix(RFC3526_2048BIT_MODP_GROUP, 16).unwrap() - 1u32;
+
+ b.iter(|| base.modpow(&e, &m));
+}
+
+#[bench]
+fn to_u32_digits(b: &mut Bencher) {
+ let mut rng = get_rng();
+ let n = rng.gen_biguint(2048);
+
+ b.iter(|| n.to_u32_digits());
+}
+
+#[bench]
+fn iter_u32_digits(b: &mut Bencher) {
+ let mut rng = get_rng();
+ let n = rng.gen_biguint(2048);
+
+ b.iter(|| n.iter_u32_digits().max());
+}
+
+#[bench]
+fn to_u64_digits(b: &mut Bencher) {
+ let mut rng = get_rng();
+ let n = rng.gen_biguint(2048);
+
+ b.iter(|| n.to_u64_digits());
+}
+
+#[bench]
+fn iter_u64_digits(b: &mut Bencher) {
+ let mut rng = get_rng();
+ let n = rng.gen_biguint(2048);
+
+ b.iter(|| n.iter_u64_digits().max());
+}
diff --git a/rust/vendor/num-bigint/benches/factorial.rs b/rust/vendor/num-bigint/benches/factorial.rs
new file mode 100644
index 0000000..a1e7b3c
--- /dev/null
+++ b/rust/vendor/num-bigint/benches/factorial.rs
@@ -0,0 +1,42 @@
+#![feature(test)]
+
+extern crate test;
+
+use num_bigint::BigUint;
+use num_traits::One;
+use std::ops::{Div, Mul};
+use test::Bencher;
+
+#[bench]
+fn factorial_mul_biguint(b: &mut Bencher) {
+ b.iter(|| {
+ (1u32..1000)
+ .map(BigUint::from)
+ .fold(BigUint::one(), Mul::mul)
+ });
+}
+
+#[bench]
+fn factorial_mul_u32(b: &mut Bencher) {
+ b.iter(|| (1u32..1000).fold(BigUint::one(), Mul::mul));
+}
+
+// The division test is inspired by this blog comparison:
+// <https://tiehuis.github.io/big-integers-in-zig#division-test-single-limb>
+
+#[bench]
+fn factorial_div_biguint(b: &mut Bencher) {
+ let n: BigUint = (1u32..1000).fold(BigUint::one(), Mul::mul);
+ b.iter(|| {
+ (1u32..1000)
+ .rev()
+ .map(BigUint::from)
+ .fold(n.clone(), Div::div)
+ });
+}
+
+#[bench]
+fn factorial_div_u32(b: &mut Bencher) {
+ let n: BigUint = (1u32..1000).fold(BigUint::one(), Mul::mul);
+ b.iter(|| (1u32..1000).rev().fold(n.clone(), Div::div));
+}
diff --git a/rust/vendor/num-bigint/benches/gcd.rs b/rust/vendor/num-bigint/benches/gcd.rs
new file mode 100644
index 0000000..c211b6e
--- /dev/null
+++ b/rust/vendor/num-bigint/benches/gcd.rs
@@ -0,0 +1,76 @@
+#![feature(test)]
+#![cfg(feature = "rand")]
+
+extern crate test;
+
+use num_bigint::{BigUint, RandBigInt};
+use num_integer::Integer;
+use num_traits::Zero;
+use test::Bencher;
+
+mod rng;
+use rng::get_rng;
+
+fn bench(b: &mut Bencher, bits: u64, gcd: fn(&BigUint, &BigUint) -> BigUint) {
+ let mut rng = get_rng();
+ let x = rng.gen_biguint(bits);
+ let y = rng.gen_biguint(bits);
+
+ assert_eq!(euclid(&x, &y), x.gcd(&y));
+
+ b.iter(|| gcd(&x, &y));
+}
+
+fn euclid(x: &BigUint, y: &BigUint) -> BigUint {
+ // Use Euclid's algorithm
+ let mut m = x.clone();
+ let mut n = y.clone();
+ while !m.is_zero() {
+ let temp = m;
+ m = n % &temp;
+ n = temp;
+ }
+ n
+}
+
+#[bench]
+fn gcd_euclid_0064(b: &mut Bencher) {
+ bench(b, 64, euclid);
+}
+
+#[bench]
+fn gcd_euclid_0256(b: &mut Bencher) {
+ bench(b, 256, euclid);
+}
+
+#[bench]
+fn gcd_euclid_1024(b: &mut Bencher) {
+ bench(b, 1024, euclid);
+}
+
+#[bench]
+fn gcd_euclid_4096(b: &mut Bencher) {
+ bench(b, 4096, euclid);
+}
+
+// Integer for BigUint now uses Stein for gcd
+
+#[bench]
+fn gcd_stein_0064(b: &mut Bencher) {
+ bench(b, 64, BigUint::gcd);
+}
+
+#[bench]
+fn gcd_stein_0256(b: &mut Bencher) {
+ bench(b, 256, BigUint::gcd);
+}
+
+#[bench]
+fn gcd_stein_1024(b: &mut Bencher) {
+ bench(b, 1024, BigUint::gcd);
+}
+
+#[bench]
+fn gcd_stein_4096(b: &mut Bencher) {
+ bench(b, 4096, BigUint::gcd);
+}
diff --git a/rust/vendor/num-bigint/benches/rng/mod.rs b/rust/vendor/num-bigint/benches/rng/mod.rs
new file mode 100644
index 0000000..33e4f0f
--- /dev/null
+++ b/rust/vendor/num-bigint/benches/rng/mod.rs
@@ -0,0 +1,38 @@
+use rand::RngCore;
+
+pub(crate) fn get_rng() -> impl RngCore {
+ XorShiftStar {
+ a: 0x0123_4567_89AB_CDEF,
+ }
+}
+
+/// Simple `Rng` for benchmarking without additional dependencies
+struct XorShiftStar {
+ a: u64,
+}
+
+impl RngCore for XorShiftStar {
+ fn next_u32(&mut self) -> u32 {
+ self.next_u64() as u32
+ }
+
+ fn next_u64(&mut self) -> u64 {
+ // https://en.wikipedia.org/wiki/Xorshift#xorshift*
+ self.a ^= self.a >> 12;
+ self.a ^= self.a << 25;
+ self.a ^= self.a >> 27;
+ self.a.wrapping_mul(0x2545_F491_4F6C_DD1D)
+ }
+
+ fn fill_bytes(&mut self, dest: &mut [u8]) {
+ for chunk in dest.chunks_mut(8) {
+ let bytes = self.next_u64().to_le_bytes();
+ let slice = &bytes[..chunk.len()];
+ chunk.copy_from_slice(slice)
+ }
+ }
+
+ fn try_fill_bytes(&mut self, dest: &mut [u8]) -> Result<(), rand::Error> {
+ Ok(self.fill_bytes(dest))
+ }
+}
diff --git a/rust/vendor/num-bigint/benches/roots.rs b/rust/vendor/num-bigint/benches/roots.rs
new file mode 100644
index 0000000..7afc4f7
--- /dev/null
+++ b/rust/vendor/num-bigint/benches/roots.rs
@@ -0,0 +1,166 @@
+#![feature(test)]
+#![cfg(feature = "rand")]
+
+extern crate test;
+
+use num_bigint::{BigUint, RandBigInt};
+use test::Bencher;
+
+mod rng;
+use rng::get_rng;
+
+// The `big64` cases demonstrate the speed of cases where the value
+// can be converted to a `u64` primitive for faster calculation.
+//
+// The `big1k` cases demonstrate those that can convert to `f64` for
+// a better initial guess of the actual value.
+//
+// The `big2k` and `big4k` cases are too big for `f64`, and use a simpler guess.
+
+fn check(x: &BigUint, n: u32) {
+ let root = x.nth_root(n);
+ if n == 2 {
+ assert_eq!(root, x.sqrt())
+ } else if n == 3 {
+ assert_eq!(root, x.cbrt())
+ }
+
+ let lo = root.pow(n);
+ assert!(lo <= *x);
+ assert_eq!(lo.nth_root(n), root);
+ assert_eq!((&lo - 1u32).nth_root(n), &root - 1u32);
+
+ let hi = (&root + 1u32).pow(n);
+ assert!(hi > *x);
+ assert_eq!(hi.nth_root(n), &root + 1u32);
+ assert_eq!((&hi - 1u32).nth_root(n), root);
+}
+
+fn bench_sqrt(b: &mut Bencher, bits: u64) {
+ let x = get_rng().gen_biguint(bits);
+ eprintln!("bench_sqrt({})", x);
+
+ check(&x, 2);
+ b.iter(|| x.sqrt());
+}
+
+#[bench]
+fn big64_sqrt(b: &mut Bencher) {
+ bench_sqrt(b, 64);
+}
+
+#[bench]
+fn big1k_sqrt(b: &mut Bencher) {
+ bench_sqrt(b, 1024);
+}
+
+#[bench]
+fn big2k_sqrt(b: &mut Bencher) {
+ bench_sqrt(b, 2048);
+}
+
+#[bench]
+fn big4k_sqrt(b: &mut Bencher) {
+ bench_sqrt(b, 4096);
+}
+
+fn bench_cbrt(b: &mut Bencher, bits: u64) {
+ let x = get_rng().gen_biguint(bits);
+ eprintln!("bench_cbrt({})", x);
+
+ check(&x, 3);
+ b.iter(|| x.cbrt());
+}
+
+#[bench]
+fn big64_cbrt(b: &mut Bencher) {
+ bench_cbrt(b, 64);
+}
+
+#[bench]
+fn big1k_cbrt(b: &mut Bencher) {
+ bench_cbrt(b, 1024);
+}
+
+#[bench]
+fn big2k_cbrt(b: &mut Bencher) {
+ bench_cbrt(b, 2048);
+}
+
+#[bench]
+fn big4k_cbrt(b: &mut Bencher) {
+ bench_cbrt(b, 4096);
+}
+
+fn bench_nth_root(b: &mut Bencher, bits: u64, n: u32) {
+ let x = get_rng().gen_biguint(bits);
+ eprintln!("bench_{}th_root({})", n, x);
+
+ check(&x, n);
+ b.iter(|| x.nth_root(n));
+}
+
+#[bench]
+fn big64_nth_10(b: &mut Bencher) {
+ bench_nth_root(b, 64, 10);
+}
+
+#[bench]
+fn big1k_nth_10(b: &mut Bencher) {
+ bench_nth_root(b, 1024, 10);
+}
+
+#[bench]
+fn big1k_nth_100(b: &mut Bencher) {
+ bench_nth_root(b, 1024, 100);
+}
+
+#[bench]
+fn big1k_nth_1000(b: &mut Bencher) {
+ bench_nth_root(b, 1024, 1000);
+}
+
+#[bench]
+fn big1k_nth_10000(b: &mut Bencher) {
+ bench_nth_root(b, 1024, 10000);
+}
+
+#[bench]
+fn big2k_nth_10(b: &mut Bencher) {
+ bench_nth_root(b, 2048, 10);
+}
+
+#[bench]
+fn big2k_nth_100(b: &mut Bencher) {
+ bench_nth_root(b, 2048, 100);
+}
+
+#[bench]
+fn big2k_nth_1000(b: &mut Bencher) {
+ bench_nth_root(b, 2048, 1000);
+}
+
+#[bench]
+fn big2k_nth_10000(b: &mut Bencher) {
+ bench_nth_root(b, 2048, 10000);
+}
+
+#[bench]
+fn big4k_nth_10(b: &mut Bencher) {
+ bench_nth_root(b, 4096, 10);
+}
+
+#[bench]
+fn big4k_nth_100(b: &mut Bencher) {
+ bench_nth_root(b, 4096, 100);
+}
+
+#[bench]
+fn big4k_nth_1000(b: &mut Bencher) {
+ bench_nth_root(b, 4096, 1000);
+}
+
+#[bench]
+fn big4k_nth_10000(b: &mut Bencher) {
+ bench_nth_root(b, 4096, 10000);
+}
diff --git a/rust/vendor/num-bigint/benches/shootout-pidigits.rs b/rust/vendor/num-bigint/benches/shootout-pidigits.rs
new file mode 100644
index 0000000..b95d42c
--- /dev/null
+++ b/rust/vendor/num-bigint/benches/shootout-pidigits.rs
@@ -0,0 +1,138 @@
+// The Computer Language Benchmarks Game
+// http://benchmarksgame.alioth.debian.org/
+//
+// contributed by the Rust Project Developers
+
+// Copyright (c) 2013-2014 The Rust Project Developers
+//
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+//
+// - Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// - Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in
+// the documentation and/or other materials provided with the
+// distribution.
+//
+// - Neither the name of "The Computer Language Benchmarks Game" nor
+// the name of "The Computer Language Shootout Benchmarks" nor the
+// names of its contributors may be used to endorse or promote
+// products derived from this software without specific prior
+// written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+// COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+// HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+// STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
+// OF THE POSSIBILITY OF SUCH DAMAGE.
+
+use std::io;
+use std::str::FromStr;
+
+use num_bigint::BigInt;
+use num_integer::Integer;
+use num_traits::{FromPrimitive, One, ToPrimitive, Zero};
+
+struct Context {
+ numer: BigInt,
+ accum: BigInt,
+ denom: BigInt,
+}
+
+impl Context {
+ fn new() -> Context {
+ Context {
+ numer: One::one(),
+ accum: Zero::zero(),
+ denom: One::one(),
+ }
+ }
+
+ fn from_i32(i: i32) -> BigInt {
+ FromPrimitive::from_i32(i).unwrap()
+ }
+
+ fn extract_digit(&self) -> i32 {
+ if self.numer > self.accum {
+ return -1;
+ }
+ let (q, r) = (&self.numer * Context::from_i32(3) + &self.accum).div_rem(&self.denom);
+ if r + &self.numer >= self.denom {
+ return -1;
+ }
+ q.to_i32().unwrap()
+ }
+
+ fn next_term(&mut self, k: i32) {
+ let y2 = Context::from_i32(k * 2 + 1);
+ self.accum = (&self.accum + (&self.numer << 1)) * &y2;
+ self.numer = &self.numer * Context::from_i32(k);
+ self.denom = &self.denom * y2;
+ }
+
+ fn eliminate_digit(&mut self, d: i32) {
+ let d = Context::from_i32(d);
+ let ten = Context::from_i32(10);
+ self.accum = (&self.accum - &self.denom * d) * &ten;
+ self.numer = &self.numer * ten;
+ }
+}
+
+fn pidigits(n: isize, out: &mut dyn io::Write) -> io::Result<()> {
+ let mut k = 0;
+ let mut context = Context::new();
+
+ for i in 1..=n {
+ let mut d;
+ loop {
+ k += 1;
+ context.next_term(k);
+ d = context.extract_digit();
+ if d != -1 {
+ break;
+ }
+ }
+
+ write!(out, "{}", d)?;
+ if i % 10 == 0 {
+ writeln!(out, "\t:{}", i)?;
+ }
+
+ context.eliminate_digit(d);
+ }
+
+ let m = n % 10;
+ if m != 0 {
+ for _ in m..10 {
+ write!(out, " ")?;
+ }
+ writeln!(out, "\t:{}", n)?;
+ }
+ Ok(())
+}
+
+const DEFAULT_DIGITS: isize = 512;
+
+fn main() {
+ let args = std::env::args().collect::<Vec<_>>();
+ let n = if args.len() < 2 {
+ DEFAULT_DIGITS
+ } else if args[1] == "--bench" {
+ return pidigits(DEFAULT_DIGITS, &mut std::io::sink()).unwrap();
+ } else {
+ FromStr::from_str(&args[1]).unwrap()
+ };
+ pidigits(n, &mut std::io::stdout()).unwrap();
+}
diff --git a/rust/vendor/num-bigint/build.rs b/rust/vendor/num-bigint/build.rs
new file mode 100644
index 0000000..5d5406c
--- /dev/null
+++ b/rust/vendor/num-bigint/build.rs
@@ -0,0 +1,94 @@
+use std::env;
+use std::error::Error;
+use std::fs::File;
+use std::io::Write;
+use std::path::Path;
+
+fn main() {
+ let ptr_width = env::var("CARGO_CFG_TARGET_POINTER_WIDTH");
+ let u64_digit = ptr_width
+ .as_ref()
+ .map(|x| x == "64" || x == "128")
+ .unwrap_or(false);
+
+ if u64_digit {
+ autocfg::emit("u64_digit");
+ }
+
+ let ac = autocfg::new();
+ let std = if ac.probe_sysroot_crate("std") {
+ "std"
+ } else {
+ "core"
+ };
+
+ if ac.probe_path(&format!("{}::convert::TryFrom", std)) {
+ autocfg::emit("has_try_from");
+ }
+
+ if let Ok(arch) = env::var("CARGO_CFG_TARGET_ARCH") {
+ if arch == "x86_64" || arch == "x86" {
+ let digit = if u64_digit { "u64" } else { "u32" };
+
+ let addcarry = format!("{}::arch::{}::_addcarry_{}", std, arch, digit);
+ if ac.probe_path(&addcarry) {
+ autocfg::emit("use_addcarry");
+ }
+ }
+ }
+
+ autocfg::rerun_path("build.rs");
+
+ write_radix_bases().unwrap();
+}
+
+/// Write tables of the greatest power of each radix for the given bit size. These are returned
+/// from `biguint::get_radix_base` to batch the multiplication/division of radix conversions on
+/// full `BigUint` values, operating on primitive integers as much as possible.
+///
+/// e.g. BASES_16[3] = (59049, 10) // 3¹⁰ fits in u16, but 3¹¹ is too big
+/// BASES_32[3] = (3486784401, 20)
+/// BASES_64[3] = (12157665459056928801, 40)
+///
+/// Powers of two are not included, just zeroed, as they're implemented with shifts.
+fn write_radix_bases() -> Result<(), Box<dyn Error>> {
+ let out_dir = env::var("OUT_DIR")?;
+ let dest_path = Path::new(&out_dir).join("radix_bases.rs");
+ let mut f = File::create(&dest_path)?;
+
+ for &bits in &[16, 32, 64] {
+ let max = if bits < 64 {
+ (1 << bits) - 1
+ } else {
+ std::u64::MAX
+ };
+
+ writeln!(f, "#[deny(overflowing_literals)]")?;
+ writeln!(
+ f,
+ "pub(crate) static BASES_{bits}: [(u{bits}, usize); 257] = [",
+ bits = bits
+ )?;
+ for radix in 0u64..257 {
+ let (base, power) = if radix == 0 || radix.is_power_of_two() {
+ (0, 0)
+ } else {
+ let mut power = 1;
+ let mut base = radix;
+
+ while let Some(b) = base.checked_mul(radix) {
+ if b > max {
+ break;
+ }
+ base = b;
+ power += 1;
+ }
+ (base, power)
+ };
+ writeln!(f, " ({}, {}), // {}", base, power, radix)?;
+ }
+ writeln!(f, "];")?;
+ }
+
+ Ok(())
+}
diff --git a/rust/vendor/num-bigint/src/bigint.rs b/rust/vendor/num-bigint/src/bigint.rs
new file mode 100644
index 0000000..97faa83
--- /dev/null
+++ b/rust/vendor/num-bigint/src/bigint.rs
@@ -0,0 +1,1171 @@
+// `Add`/`Sub` ops may flip from `BigInt` to its `BigUint` magnitude
+#![allow(clippy::suspicious_arithmetic_impl)]
+
+use crate::std_alloc::{String, Vec};
+use core::cmp::Ordering::{self, Equal};
+use core::default::Default;
+use core::fmt;
+use core::hash;
+use core::ops::{Neg, Not};
+use core::str;
+use core::{i128, u128};
+use core::{i64, u64};
+
+use num_integer::{Integer, Roots};
+use num_traits::{Num, One, Pow, Signed, Zero};
+
+use self::Sign::{Minus, NoSign, Plus};
+
+use crate::big_digit::BigDigit;
+use crate::biguint::to_str_radix_reversed;
+use crate::biguint::{BigUint, IntDigits, U32Digits, U64Digits};
+
+mod addition;
+mod division;
+mod multiplication;
+mod subtraction;
+
+mod bits;
+mod convert;
+mod power;
+mod shift;
+
+#[cfg(any(feature = "quickcheck", feature = "arbitrary"))]
+mod arbitrary;
+
+#[cfg(feature = "serde")]
+mod serde;
+
+/// A `Sign` is a [`BigInt`]'s composing element.
+#[derive(PartialEq, PartialOrd, Eq, Ord, Copy, Clone, Debug, Hash)]
+pub enum Sign {
+ Minus,
+ NoSign,
+ Plus,
+}
+
+impl Neg for Sign {
+ type Output = Sign;
+
+ /// Negate `Sign` value.
+ #[inline]
+ fn neg(self) -> Sign {
+ match self {
+ Minus => Plus,
+ NoSign => NoSign,
+ Plus => Minus,
+ }
+ }
+}
+
+/// A big signed integer type.
+pub struct BigInt {
+ sign: Sign,
+ data: BigUint,
+}
+
+// Note: derived `Clone` doesn't specialize `clone_from`,
+// but we want to keep the allocation in `data`.
+impl Clone for BigInt {
+ #[inline]
+ fn clone(&self) -> Self {
+ BigInt {
+ sign: self.sign,
+ data: self.data.clone(),
+ }
+ }
+
+ #[inline]
+ fn clone_from(&mut self, other: &Self) {
+ self.sign = other.sign;
+ self.data.clone_from(&other.data);
+ }
+}
+
+impl hash::Hash for BigInt {
+ #[inline]
+ fn hash<H: hash::Hasher>(&self, state: &mut H) {
+ debug_assert!((self.sign != NoSign) ^ self.data.is_zero());
+ self.sign.hash(state);
+ if self.sign != NoSign {
+ self.data.hash(state);
+ }
+ }
+}
+
+impl PartialEq for BigInt {
+ #[inline]
+ fn eq(&self, other: &BigInt) -> bool {
+ debug_assert!((self.sign != NoSign) ^ self.data.is_zero());
+ debug_assert!((other.sign != NoSign) ^ other.data.is_zero());
+ self.sign == other.sign && (self.sign == NoSign || self.data == other.data)
+ }
+}
+
+impl Eq for BigInt {}
+
+impl PartialOrd for BigInt {
+ #[inline]
+ fn partial_cmp(&self, other: &BigInt) -> Option<Ordering> {
+ Some(self.cmp(other))
+ }
+}
+
+impl Ord for BigInt {
+ #[inline]
+ fn cmp(&self, other: &BigInt) -> Ordering {
+ debug_assert!((self.sign != NoSign) ^ self.data.is_zero());
+ debug_assert!((other.sign != NoSign) ^ other.data.is_zero());
+ let scmp = self.sign.cmp(&other.sign);
+ if scmp != Equal {
+ return scmp;
+ }
+
+ match self.sign {
+ NoSign => Equal,
+ Plus => self.data.cmp(&other.data),
+ Minus => other.data.cmp(&self.data),
+ }
+ }
+}
+
+impl Default for BigInt {
+ #[inline]
+ fn default() -> BigInt {
+ Zero::zero()
+ }
+}
+
+impl fmt::Debug for BigInt {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt::Display::fmt(self, f)
+ }
+}
+
+impl fmt::Display for BigInt {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.pad_integral(!self.is_negative(), "", &self.data.to_str_radix(10))
+ }
+}
+
+impl fmt::Binary for BigInt {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.pad_integral(!self.is_negative(), "0b", &self.data.to_str_radix(2))
+ }
+}
+
+impl fmt::Octal for BigInt {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.pad_integral(!self.is_negative(), "0o", &self.data.to_str_radix(8))
+ }
+}
+
+impl fmt::LowerHex for BigInt {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.pad_integral(!self.is_negative(), "0x", &self.data.to_str_radix(16))
+ }
+}
+
+impl fmt::UpperHex for BigInt {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ let mut s = self.data.to_str_radix(16);
+ s.make_ascii_uppercase();
+ f.pad_integral(!self.is_negative(), "0x", &s)
+ }
+}
+
+// !-2 = !...f fe = ...0 01 = +1
+// !-1 = !...f ff = ...0 00 = 0
+// ! 0 = !...0 00 = ...f ff = -1
+// !+1 = !...0 01 = ...f fe = -2
+impl Not for BigInt {
+ type Output = BigInt;
+
+ fn not(mut self) -> BigInt {
+ match self.sign {
+ NoSign | Plus => {
+ self.data += 1u32;
+ self.sign = Minus;
+ }
+ Minus => {
+ self.data -= 1u32;
+ self.sign = if self.data.is_zero() { NoSign } else { Plus };
+ }
+ }
+ self
+ }
+}
+
+impl Not for &BigInt {
+ type Output = BigInt;
+
+ fn not(self) -> BigInt {
+ match self.sign {
+ NoSign => -BigInt::one(),
+ Plus => -BigInt::from(&self.data + 1u32),
+ Minus => BigInt::from(&self.data - 1u32),
+ }
+ }
+}
+
+impl Zero for BigInt {
+ #[inline]
+ fn zero() -> BigInt {
+ BigInt {
+ sign: NoSign,
+ data: BigUint::zero(),
+ }
+ }
+
+ #[inline]
+ fn set_zero(&mut self) {
+ self.data.set_zero();
+ self.sign = NoSign;
+ }
+
+ #[inline]
+ fn is_zero(&self) -> bool {
+ self.sign == NoSign
+ }
+}
+
+impl One for BigInt {
+ #[inline]
+ fn one() -> BigInt {
+ BigInt {
+ sign: Plus,
+ data: BigUint::one(),
+ }
+ }
+
+ #[inline]
+ fn set_one(&mut self) {
+ self.data.set_one();
+ self.sign = Plus;
+ }
+
+ #[inline]
+ fn is_one(&self) -> bool {
+ self.sign == Plus && self.data.is_one()
+ }
+}
+
+impl Signed for BigInt {
+ #[inline]
+ fn abs(&self) -> BigInt {
+ match self.sign {
+ Plus | NoSign => self.clone(),
+ Minus => BigInt::from(self.data.clone()),
+ }
+ }
+
+ #[inline]
+ fn abs_sub(&self, other: &BigInt) -> BigInt {
+ if *self <= *other {
+ Zero::zero()
+ } else {
+ self - other
+ }
+ }
+
+ #[inline]
+ fn signum(&self) -> BigInt {
+ match self.sign {
+ Plus => BigInt::one(),
+ Minus => -BigInt::one(),
+ NoSign => BigInt::zero(),
+ }
+ }
+
+ #[inline]
+ fn is_positive(&self) -> bool {
+ self.sign == Plus
+ }
+
+ #[inline]
+ fn is_negative(&self) -> bool {
+ self.sign == Minus
+ }
+}
+
+trait UnsignedAbs {
+ type Unsigned;
+
+ /// A convenience method for getting the absolute value of a signed primitive as unsigned
+ /// See also `unsigned_abs`: <https://github.com/rust-lang/rust/issues/74913>
+ fn uabs(self) -> Self::Unsigned;
+
+ fn checked_uabs(self) -> CheckedUnsignedAbs<Self::Unsigned>;
+}
+
+enum CheckedUnsignedAbs<T> {
+ Positive(T),
+ Negative(T),
+}
+use self::CheckedUnsignedAbs::{Negative, Positive};
+
+macro_rules! impl_unsigned_abs {
+ ($Signed:ty, $Unsigned:ty) => {
+ impl UnsignedAbs for $Signed {
+ type Unsigned = $Unsigned;
+
+ #[inline]
+ fn uabs(self) -> $Unsigned {
+ self.wrapping_abs() as $Unsigned
+ }
+
+ #[inline]
+ fn checked_uabs(self) -> CheckedUnsignedAbs<Self::Unsigned> {
+ if self >= 0 {
+ Positive(self as $Unsigned)
+ } else {
+ Negative(self.wrapping_neg() as $Unsigned)
+ }
+ }
+ }
+ };
+}
+impl_unsigned_abs!(i8, u8);
+impl_unsigned_abs!(i16, u16);
+impl_unsigned_abs!(i32, u32);
+impl_unsigned_abs!(i64, u64);
+impl_unsigned_abs!(i128, u128);
+impl_unsigned_abs!(isize, usize);
+
+impl Neg for BigInt {
+ type Output = BigInt;
+
+ #[inline]
+ fn neg(mut self) -> BigInt {
+ self.sign = -self.sign;
+ self
+ }
+}
+
+impl Neg for &BigInt {
+ type Output = BigInt;
+
+ #[inline]
+ fn neg(self) -> BigInt {
+ -self.clone()
+ }
+}
+
+impl Integer for BigInt {
+ #[inline]
+ fn div_rem(&self, other: &BigInt) -> (BigInt, BigInt) {
+ // r.sign == self.sign
+ let (d_ui, r_ui) = self.data.div_rem(&other.data);
+ let d = BigInt::from_biguint(self.sign, d_ui);
+ let r = BigInt::from_biguint(self.sign, r_ui);
+ if other.is_negative() {
+ (-d, r)
+ } else {
+ (d, r)
+ }
+ }
+
+ #[inline]
+ fn div_floor(&self, other: &BigInt) -> BigInt {
+ let (d_ui, m) = self.data.div_mod_floor(&other.data);
+ let d = BigInt::from(d_ui);
+ match (self.sign, other.sign) {
+ (Plus, Plus) | (NoSign, Plus) | (Minus, Minus) => d,
+ (Plus, Minus) | (NoSign, Minus) | (Minus, Plus) => {
+ if m.is_zero() {
+ -d
+ } else {
+ -d - 1u32
+ }
+ }
+ (_, NoSign) => unreachable!(),
+ }
+ }
+
+ #[inline]
+ fn mod_floor(&self, other: &BigInt) -> BigInt {
+ // m.sign == other.sign
+ let m_ui = self.data.mod_floor(&other.data);
+ let m = BigInt::from_biguint(other.sign, m_ui);
+ match (self.sign, other.sign) {
+ (Plus, Plus) | (NoSign, Plus) | (Minus, Minus) => m,
+ (Plus, Minus) | (NoSign, Minus) | (Minus, Plus) => {
+ if m.is_zero() {
+ m
+ } else {
+ other - m
+ }
+ }
+ (_, NoSign) => unreachable!(),
+ }
+ }
+
+ fn div_mod_floor(&self, other: &BigInt) -> (BigInt, BigInt) {
+ // m.sign == other.sign
+ let (d_ui, m_ui) = self.data.div_mod_floor(&other.data);
+ let d = BigInt::from(d_ui);
+ let m = BigInt::from_biguint(other.sign, m_ui);
+ match (self.sign, other.sign) {
+ (Plus, Plus) | (NoSign, Plus) | (Minus, Minus) => (d, m),
+ (Plus, Minus) | (NoSign, Minus) | (Minus, Plus) => {
+ if m.is_zero() {
+ (-d, m)
+ } else {
+ (-d - 1u32, other - m)
+ }
+ }
+ (_, NoSign) => unreachable!(),
+ }
+ }
+
+ #[inline]
+ fn div_ceil(&self, other: &Self) -> Self {
+ let (d_ui, m) = self.data.div_mod_floor(&other.data);
+ let d = BigInt::from(d_ui);
+ match (self.sign, other.sign) {
+ (Plus, Minus) | (NoSign, Minus) | (Minus, Plus) => -d,
+ (Plus, Plus) | (NoSign, Plus) | (Minus, Minus) => {
+ if m.is_zero() {
+ d
+ } else {
+ d + 1u32
+ }
+ }
+ (_, NoSign) => unreachable!(),
+ }
+ }
+
+ /// Calculates the Greatest Common Divisor (GCD) of the number and `other`.
+ ///
+ /// The result is always positive.
+ #[inline]
+ fn gcd(&self, other: &BigInt) -> BigInt {
+ BigInt::from(self.data.gcd(&other.data))
+ }
+
+ /// Calculates the Lowest Common Multiple (LCM) of the number and `other`.
+ #[inline]
+ fn lcm(&self, other: &BigInt) -> BigInt {
+ BigInt::from(self.data.lcm(&other.data))
+ }
+
+ /// Calculates the Greatest Common Divisor (GCD) and
+ /// Lowest Common Multiple (LCM) together.
+ #[inline]
+ fn gcd_lcm(&self, other: &BigInt) -> (BigInt, BigInt) {
+ let (gcd, lcm) = self.data.gcd_lcm(&other.data);
+ (BigInt::from(gcd), BigInt::from(lcm))
+ }
+
+ /// Greatest common divisor, least common multiple, and Bézout coefficients.
+ #[inline]
+ fn extended_gcd_lcm(&self, other: &BigInt) -> (num_integer::ExtendedGcd<BigInt>, BigInt) {
+ let egcd = self.extended_gcd(other);
+ let lcm = if egcd.gcd.is_zero() {
+ BigInt::zero()
+ } else {
+ BigInt::from(&self.data / &egcd.gcd.data * &other.data)
+ };
+ (egcd, lcm)
+ }
+
+ /// Deprecated, use `is_multiple_of` instead.
+ #[inline]
+ fn divides(&self, other: &BigInt) -> bool {
+ self.is_multiple_of(other)
+ }
+
+ /// Returns `true` if the number is a multiple of `other`.
+ #[inline]
+ fn is_multiple_of(&self, other: &BigInt) -> bool {
+ self.data.is_multiple_of(&other.data)
+ }
+
+ /// Returns `true` if the number is divisible by `2`.
+ #[inline]
+ fn is_even(&self) -> bool {
+ self.data.is_even()
+ }
+
+ /// Returns `true` if the number is not divisible by `2`.
+ #[inline]
+ fn is_odd(&self) -> bool {
+ self.data.is_odd()
+ }
+
+ /// Rounds up to nearest multiple of argument.
+ #[inline]
+ fn next_multiple_of(&self, other: &Self) -> Self {
+ let m = self.mod_floor(other);
+ if m.is_zero() {
+ self.clone()
+ } else {
+ self + (other - m)
+ }
+ }
+ /// Rounds down to nearest multiple of argument.
+ #[inline]
+ fn prev_multiple_of(&self, other: &Self) -> Self {
+ self - self.mod_floor(other)
+ }
+}
+
+impl Roots for BigInt {
+ fn nth_root(&self, n: u32) -> Self {
+ assert!(
+ !(self.is_negative() && n.is_even()),
+ "root of degree {} is imaginary",
+ n
+ );
+
+ BigInt::from_biguint(self.sign, self.data.nth_root(n))
+ }
+
+ fn sqrt(&self) -> Self {
+ assert!(!self.is_negative(), "square root is imaginary");
+
+ BigInt::from_biguint(self.sign, self.data.sqrt())
+ }
+
+ fn cbrt(&self) -> Self {
+ BigInt::from_biguint(self.sign, self.data.cbrt())
+ }
+}
+
+impl IntDigits for BigInt {
+ #[inline]
+ fn digits(&self) -> &[BigDigit] {
+ self.data.digits()
+ }
+ #[inline]
+ fn digits_mut(&mut self) -> &mut Vec<BigDigit> {
+ self.data.digits_mut()
+ }
+ #[inline]
+ fn normalize(&mut self) {
+ self.data.normalize();
+ if self.data.is_zero() {
+ self.sign = NoSign;
+ }
+ }
+ #[inline]
+ fn capacity(&self) -> usize {
+ self.data.capacity()
+ }
+ #[inline]
+ fn len(&self) -> usize {
+ self.data.len()
+ }
+}
+
+/// A generic trait for converting a value to a [`BigInt`]. This may return
+/// `None` when converting from `f32` or `f64`, and will always succeed
+/// when converting from any integer or unsigned primitive, or [`BigUint`].
+pub trait ToBigInt {
+ /// Converts the value of `self` to a [`BigInt`].
+ fn to_bigint(&self) -> Option<BigInt>;
+}
+
+impl BigInt {
+ /// Creates and initializes a [`BigInt`].
+ ///
+ /// The base 2<sup>32</sup> digits are ordered least significant digit first.
+ #[inline]
+ pub fn new(sign: Sign, digits: Vec<u32>) -> BigInt {
+ BigInt::from_biguint(sign, BigUint::new(digits))
+ }
+
+ /// Creates and initializes a [`BigInt`].
+ ///
+ /// The base 2<sup>32</sup> digits are ordered least significant digit first.
+ #[inline]
+ pub fn from_biguint(mut sign: Sign, mut data: BigUint) -> BigInt {
+ if sign == NoSign {
+ data.assign_from_slice(&[]);
+ } else if data.is_zero() {
+ sign = NoSign;
+ }
+
+ BigInt { sign, data }
+ }
+
+ /// Creates and initializes a [`BigInt`].
+ ///
+ /// The base 2<sup>32</sup> digits are ordered least significant digit first.
+ #[inline]
+ pub fn from_slice(sign: Sign, slice: &[u32]) -> BigInt {
+ BigInt::from_biguint(sign, BigUint::from_slice(slice))
+ }
+
+ /// Reinitializes a [`BigInt`].
+ ///
+ /// The base 2<sup>32</sup> digits are ordered least significant digit first.
+ #[inline]
+ pub fn assign_from_slice(&mut self, sign: Sign, slice: &[u32]) {
+ if sign == NoSign {
+ self.set_zero();
+ } else {
+ self.data.assign_from_slice(slice);
+ self.sign = if self.data.is_zero() { NoSign } else { sign };
+ }
+ }
+
+ /// Creates and initializes a [`BigInt`].
+ ///
+ /// The bytes are in big-endian byte order.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use num_bigint::{BigInt, Sign};
+ ///
+ /// assert_eq!(BigInt::from_bytes_be(Sign::Plus, b"A"),
+ /// BigInt::parse_bytes(b"65", 10).unwrap());
+ /// assert_eq!(BigInt::from_bytes_be(Sign::Plus, b"AA"),
+ /// BigInt::parse_bytes(b"16705", 10).unwrap());
+ /// assert_eq!(BigInt::from_bytes_be(Sign::Plus, b"AB"),
+ /// BigInt::parse_bytes(b"16706", 10).unwrap());
+ /// assert_eq!(BigInt::from_bytes_be(Sign::Plus, b"Hello world!"),
+ /// BigInt::parse_bytes(b"22405534230753963835153736737", 10).unwrap());
+ /// ```
+ #[inline]
+ pub fn from_bytes_be(sign: Sign, bytes: &[u8]) -> BigInt {
+ BigInt::from_biguint(sign, BigUint::from_bytes_be(bytes))
+ }
+
+ /// Creates and initializes a [`BigInt`].
+ ///
+ /// The bytes are in little-endian byte order.
+ #[inline]
+ pub fn from_bytes_le(sign: Sign, bytes: &[u8]) -> BigInt {
+ BigInt::from_biguint(sign, BigUint::from_bytes_le(bytes))
+ }
+
+ /// Creates and initializes a [`BigInt`] from an array of bytes in
+ /// two's complement binary representation.
+ ///
+ /// The digits are in big-endian base 2<sup>8</sup>.
+ #[inline]
+ pub fn from_signed_bytes_be(digits: &[u8]) -> BigInt {
+ convert::from_signed_bytes_be(digits)
+ }
+
+ /// Creates and initializes a [`BigInt`] from an array of bytes in two's complement.
+ ///
+ /// The digits are in little-endian base 2<sup>8</sup>.
+ #[inline]
+ pub fn from_signed_bytes_le(digits: &[u8]) -> BigInt {
+ convert::from_signed_bytes_le(digits)
+ }
+
+ /// Creates and initializes a [`BigInt`].
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use num_bigint::{BigInt, ToBigInt};
+ ///
+ /// assert_eq!(BigInt::parse_bytes(b"1234", 10), ToBigInt::to_bigint(&1234));
+ /// assert_eq!(BigInt::parse_bytes(b"ABCD", 16), ToBigInt::to_bigint(&0xABCD));
+ /// assert_eq!(BigInt::parse_bytes(b"G", 16), None);
+ /// ```
+ #[inline]
+ pub fn parse_bytes(buf: &[u8], radix: u32) -> Option<BigInt> {
+ let s = str::from_utf8(buf).ok()?;
+ BigInt::from_str_radix(s, radix).ok()
+ }
+
+ /// Creates and initializes a [`BigInt`]. Each `u8` of the input slice is
+ /// interpreted as one digit of the number
+ /// and must therefore be less than `radix`.
+ ///
+ /// The bytes are in big-endian byte order.
+ /// `radix` must be in the range `2...256`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use num_bigint::{BigInt, Sign};
+ ///
+ /// let inbase190 = vec![15, 33, 125, 12, 14];
+ /// let a = BigInt::from_radix_be(Sign::Minus, &inbase190, 190).unwrap();
+ /// assert_eq!(a.to_radix_be(190), (Sign:: Minus, inbase190));
+ /// ```
+ pub fn from_radix_be(sign: Sign, buf: &[u8], radix: u32) -> Option<BigInt> {
+ let u = BigUint::from_radix_be(buf, radix)?;
+ Some(BigInt::from_biguint(sign, u))
+ }
+
+ /// Creates and initializes a [`BigInt`]. Each `u8` of the input slice is
+ /// interpreted as one digit of the number
+ /// and must therefore be less than `radix`.
+ ///
+ /// The bytes are in little-endian byte order.
+ /// `radix` must be in the range `2...256`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use num_bigint::{BigInt, Sign};
+ ///
+ /// let inbase190 = vec![14, 12, 125, 33, 15];
+ /// let a = BigInt::from_radix_be(Sign::Minus, &inbase190, 190).unwrap();
+ /// assert_eq!(a.to_radix_be(190), (Sign::Minus, inbase190));
+ /// ```
+ pub fn from_radix_le(sign: Sign, buf: &[u8], radix: u32) -> Option<BigInt> {
+ let u = BigUint::from_radix_le(buf, radix)?;
+ Some(BigInt::from_biguint(sign, u))
+ }
+
+ /// Returns the sign and the byte representation of the [`BigInt`] in big-endian byte order.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use num_bigint::{ToBigInt, Sign};
+ ///
+ /// let i = -1125.to_bigint().unwrap();
+ /// assert_eq!(i.to_bytes_be(), (Sign::Minus, vec![4, 101]));
+ /// ```
+ #[inline]
+ pub fn to_bytes_be(&self) -> (Sign, Vec<u8>) {
+ (self.sign, self.data.to_bytes_be())
+ }
+
+ /// Returns the sign and the byte representation of the [`BigInt`] in little-endian byte order.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use num_bigint::{ToBigInt, Sign};
+ ///
+ /// let i = -1125.to_bigint().unwrap();
+ /// assert_eq!(i.to_bytes_le(), (Sign::Minus, vec![101, 4]));
+ /// ```
+ #[inline]
+ pub fn to_bytes_le(&self) -> (Sign, Vec<u8>) {
+ (self.sign, self.data.to_bytes_le())
+ }
+
+ /// Returns the sign and the `u32` digits representation of the [`BigInt`] ordered least
+ /// significant digit first.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use num_bigint::{BigInt, Sign};
+ ///
+ /// assert_eq!(BigInt::from(-1125).to_u32_digits(), (Sign::Minus, vec![1125]));
+ /// assert_eq!(BigInt::from(4294967295u32).to_u32_digits(), (Sign::Plus, vec![4294967295]));
+ /// assert_eq!(BigInt::from(4294967296u64).to_u32_digits(), (Sign::Plus, vec![0, 1]));
+ /// assert_eq!(BigInt::from(-112500000000i64).to_u32_digits(), (Sign::Minus, vec![830850304, 26]));
+ /// assert_eq!(BigInt::from(112500000000i64).to_u32_digits(), (Sign::Plus, vec![830850304, 26]));
+ /// ```
+ #[inline]
+ pub fn to_u32_digits(&self) -> (Sign, Vec<u32>) {
+ (self.sign, self.data.to_u32_digits())
+ }
+
+ /// Returns the sign and the `u64` digits representation of the [`BigInt`] ordered least
+ /// significant digit first.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use num_bigint::{BigInt, Sign};
+ ///
+ /// assert_eq!(BigInt::from(-1125).to_u64_digits(), (Sign::Minus, vec![1125]));
+ /// assert_eq!(BigInt::from(4294967295u32).to_u64_digits(), (Sign::Plus, vec![4294967295]));
+ /// assert_eq!(BigInt::from(4294967296u64).to_u64_digits(), (Sign::Plus, vec![4294967296]));
+ /// assert_eq!(BigInt::from(-112500000000i64).to_u64_digits(), (Sign::Minus, vec![112500000000]));
+ /// assert_eq!(BigInt::from(112500000000i64).to_u64_digits(), (Sign::Plus, vec![112500000000]));
+ /// assert_eq!(BigInt::from(1u128 << 64).to_u64_digits(), (Sign::Plus, vec![0, 1]));
+ /// ```
+ #[inline]
+ pub fn to_u64_digits(&self) -> (Sign, Vec<u64>) {
+ (self.sign, self.data.to_u64_digits())
+ }
+
+ /// Returns an iterator of `u32` digits representation of the [`BigInt`] ordered least
+ /// significant digit first.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use num_bigint::BigInt;
+ ///
+ /// assert_eq!(BigInt::from(-1125).iter_u32_digits().collect::<Vec<u32>>(), vec![1125]);
+ /// assert_eq!(BigInt::from(4294967295u32).iter_u32_digits().collect::<Vec<u32>>(), vec![4294967295]);
+ /// assert_eq!(BigInt::from(4294967296u64).iter_u32_digits().collect::<Vec<u32>>(), vec![0, 1]);
+ /// assert_eq!(BigInt::from(-112500000000i64).iter_u32_digits().collect::<Vec<u32>>(), vec![830850304, 26]);
+ /// assert_eq!(BigInt::from(112500000000i64).iter_u32_digits().collect::<Vec<u32>>(), vec![830850304, 26]);
+ /// ```
+ #[inline]
+ pub fn iter_u32_digits(&self) -> U32Digits<'_> {
+ self.data.iter_u32_digits()
+ }
+
+ /// Returns an iterator of `u64` digits representation of the [`BigInt`] ordered least
+ /// significant digit first.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use num_bigint::BigInt;
+ ///
+ /// assert_eq!(BigInt::from(-1125).iter_u64_digits().collect::<Vec<u64>>(), vec![1125u64]);
+ /// assert_eq!(BigInt::from(4294967295u32).iter_u64_digits().collect::<Vec<u64>>(), vec![4294967295u64]);
+ /// assert_eq!(BigInt::from(4294967296u64).iter_u64_digits().collect::<Vec<u64>>(), vec![4294967296u64]);
+ /// assert_eq!(BigInt::from(-112500000000i64).iter_u64_digits().collect::<Vec<u64>>(), vec![112500000000u64]);
+ /// assert_eq!(BigInt::from(112500000000i64).iter_u64_digits().collect::<Vec<u64>>(), vec![112500000000u64]);
+ /// assert_eq!(BigInt::from(1u128 << 64).iter_u64_digits().collect::<Vec<u64>>(), vec![0, 1]);
+ /// ```
+ #[inline]
+ pub fn iter_u64_digits(&self) -> U64Digits<'_> {
+ self.data.iter_u64_digits()
+ }
+
+ /// Returns the two's-complement byte representation of the [`BigInt`] in big-endian byte order.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use num_bigint::ToBigInt;
+ ///
+ /// let i = -1125.to_bigint().unwrap();
+ /// assert_eq!(i.to_signed_bytes_be(), vec![251, 155]);
+ /// ```
+ #[inline]
+ pub fn to_signed_bytes_be(&self) -> Vec<u8> {
+ convert::to_signed_bytes_be(self)
+ }
+
+ /// Returns the two's-complement byte representation of the [`BigInt`] in little-endian byte order.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use num_bigint::ToBigInt;
+ ///
+ /// let i = -1125.to_bigint().unwrap();
+ /// assert_eq!(i.to_signed_bytes_le(), vec![155, 251]);
+ /// ```
+ #[inline]
+ pub fn to_signed_bytes_le(&self) -> Vec<u8> {
+ convert::to_signed_bytes_le(self)
+ }
+
+ /// Returns the integer formatted as a string in the given radix.
+ /// `radix` must be in the range `2...36`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use num_bigint::BigInt;
+ ///
+ /// let i = BigInt::parse_bytes(b"ff", 16).unwrap();
+ /// assert_eq!(i.to_str_radix(16), "ff");
+ /// ```
+ #[inline]
+ pub fn to_str_radix(&self, radix: u32) -> String {
+ let mut v = to_str_radix_reversed(&self.data, radix);
+
+ if self.is_negative() {
+ v.push(b'-');
+ }
+
+ v.reverse();
+ unsafe { String::from_utf8_unchecked(v) }
+ }
+
+ /// Returns the integer in the requested base in big-endian digit order.
+ /// The output is not given in a human readable alphabet but as a zero
+ /// based `u8` number.
+ /// `radix` must be in the range `2...256`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use num_bigint::{BigInt, Sign};
+ ///
+ /// assert_eq!(BigInt::from(-0xFFFFi64).to_radix_be(159),
+ /// (Sign::Minus, vec![2, 94, 27]));
+ /// // 0xFFFF = 65535 = 2*(159^2) + 94*159 + 27
+ /// ```
+ #[inline]
+ pub fn to_radix_be(&self, radix: u32) -> (Sign, Vec<u8>) {
+ (self.sign, self.data.to_radix_be(radix))
+ }
+
+ /// Returns the integer in the requested base in little-endian digit order.
+ /// The output is not given in a human readable alphabet but as a zero
+ /// based `u8` number.
+ /// `radix` must be in the range `2...256`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use num_bigint::{BigInt, Sign};
+ ///
+ /// assert_eq!(BigInt::from(-0xFFFFi64).to_radix_le(159),
+ /// (Sign::Minus, vec![27, 94, 2]));
+ /// // 0xFFFF = 65535 = 27 + 94*159 + 2*(159^2)
+ /// ```
+ #[inline]
+ pub fn to_radix_le(&self, radix: u32) -> (Sign, Vec<u8>) {
+ (self.sign, self.data.to_radix_le(radix))
+ }
+
+ /// Returns the sign of the [`BigInt`] as a [`Sign`].
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use num_bigint::{BigInt, Sign};
+ /// use num_traits::Zero;
+ ///
+ /// assert_eq!(BigInt::from(1234).sign(), Sign::Plus);
+ /// assert_eq!(BigInt::from(-4321).sign(), Sign::Minus);
+ /// assert_eq!(BigInt::zero().sign(), Sign::NoSign);
+ /// ```
+ #[inline]
+ pub fn sign(&self) -> Sign {
+ self.sign
+ }
+
+ /// Returns the magnitude of the [`BigInt`] as a [`BigUint`].
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use num_bigint::{BigInt, BigUint};
+ /// use num_traits::Zero;
+ ///
+ /// assert_eq!(BigInt::from(1234).magnitude(), &BigUint::from(1234u32));
+ /// assert_eq!(BigInt::from(-4321).magnitude(), &BigUint::from(4321u32));
+ /// assert!(BigInt::zero().magnitude().is_zero());
+ /// ```
+ #[inline]
+ pub fn magnitude(&self) -> &BigUint {
+ &self.data
+ }
+
+ /// Convert this [`BigInt`] into its [`Sign`] and [`BigUint`] magnitude,
+ /// the reverse of [`BigInt::from_biguint()`].
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use num_bigint::{BigInt, BigUint, Sign};
+ /// use num_traits::Zero;
+ ///
+ /// assert_eq!(BigInt::from(1234).into_parts(), (Sign::Plus, BigUint::from(1234u32)));
+ /// assert_eq!(BigInt::from(-4321).into_parts(), (Sign::Minus, BigUint::from(4321u32)));
+ /// assert_eq!(BigInt::zero().into_parts(), (Sign::NoSign, BigUint::zero()));
+ /// ```
+ #[inline]
+ pub fn into_parts(self) -> (Sign, BigUint) {
+ (self.sign, self.data)
+ }
+
+ /// Determines the fewest bits necessary to express the [`BigInt`],
+ /// not including the sign.
+ #[inline]
+ pub fn bits(&self) -> u64 {
+ self.data.bits()
+ }
+
+ /// Converts this [`BigInt`] into a [`BigUint`], if it's not negative.
+ #[inline]
+ pub fn to_biguint(&self) -> Option<BigUint> {
+ match self.sign {
+ Plus => Some(self.data.clone()),
+ NoSign => Some(Zero::zero()),
+ Minus => None,
+ }
+ }
+
+ #[inline]
+ pub fn checked_add(&self, v: &BigInt) -> Option<BigInt> {
+ Some(self + v)
+ }
+
+ #[inline]
+ pub fn checked_sub(&self, v: &BigInt) -> Option<BigInt> {
+ Some(self - v)
+ }
+
+ #[inline]
+ pub fn checked_mul(&self, v: &BigInt) -> Option<BigInt> {
+ Some(self * v)
+ }
+
+ #[inline]
+ pub fn checked_div(&self, v: &BigInt) -> Option<BigInt> {
+ if v.is_zero() {
+ return None;
+ }
+ Some(self / v)
+ }
+
+ /// Returns `self ^ exponent`.
+ pub fn pow(&self, exponent: u32) -> Self {
+ Pow::pow(self, exponent)
+ }
+
+ /// Returns `(self ^ exponent) mod modulus`
+ ///
+ /// Note that this rounds like `mod_floor`, not like the `%` operator,
+ /// which makes a difference when given a negative `self` or `modulus`.
+ /// The result will be in the interval `[0, modulus)` for `modulus > 0`,
+ /// or in the interval `(modulus, 0]` for `modulus < 0`
+ ///
+ /// Panics if the exponent is negative or the modulus is zero.
+ pub fn modpow(&self, exponent: &Self, modulus: &Self) -> Self {
+ power::modpow(self, exponent, modulus)
+ }
+
+ /// Returns the truncated principal square root of `self` --
+ /// see [`num_integer::Roots::sqrt()`].
+ pub fn sqrt(&self) -> Self {
+ Roots::sqrt(self)
+ }
+
+ /// Returns the truncated principal cube root of `self` --
+ /// see [`num_integer::Roots::cbrt()`].
+ pub fn cbrt(&self) -> Self {
+ Roots::cbrt(self)
+ }
+
+ /// Returns the truncated principal `n`th root of `self` --
+ /// See [`num_integer::Roots::nth_root()`].
+ pub fn nth_root(&self, n: u32) -> Self {
+ Roots::nth_root(self, n)
+ }
+
+ /// Returns the number of least-significant bits that are zero,
+ /// or `None` if the entire number is zero.
+ pub fn trailing_zeros(&self) -> Option<u64> {
+ self.data.trailing_zeros()
+ }
+
+ /// Returns whether the bit in position `bit` is set,
+ /// using the two's complement for negative numbers
+ pub fn bit(&self, bit: u64) -> bool {
+ if self.is_negative() {
+ // Let the binary representation of a number be
+ // ... 0 x 1 0 ... 0
+ // Then the two's complement is
+ // ... 1 !x 1 0 ... 0
+ // where !x is obtained from x by flipping each bit
+ if bit >= u64::from(crate::big_digit::BITS) * self.len() as u64 {
+ true
+ } else {
+ let trailing_zeros = self.data.trailing_zeros().unwrap();
+ match Ord::cmp(&bit, &trailing_zeros) {
+ Ordering::Less => false,
+ Ordering::Equal => true,
+ Ordering::Greater => !self.data.bit(bit),
+ }
+ }
+ } else {
+ self.data.bit(bit)
+ }
+ }
+
+ /// Sets or clears the bit in the given position,
+ /// using the two's complement for negative numbers
+ ///
+ /// Note that setting/clearing a bit (for positive/negative numbers,
+ /// respectively) greater than the current bit length, a reallocation
+ /// may be needed to store the new digits
+ pub fn set_bit(&mut self, bit: u64, value: bool) {
+ match self.sign {
+ Sign::Plus => self.data.set_bit(bit, value),
+ Sign::Minus => bits::set_negative_bit(self, bit, value),
+ Sign::NoSign => {
+ if value {
+ self.data.set_bit(bit, true);
+ self.sign = Sign::Plus;
+ } else {
+ // Clearing a bit for zero is a no-op
+ }
+ }
+ }
+ // The top bit may have been cleared, so normalize
+ self.normalize();
+ }
+}
+
+impl num_traits::FromBytes for BigInt {
+ type Bytes = [u8];
+
+ fn from_be_bytes(bytes: &Self::Bytes) -> Self {
+ Self::from_signed_bytes_be(bytes)
+ }
+
+ fn from_le_bytes(bytes: &Self::Bytes) -> Self {
+ Self::from_signed_bytes_le(bytes)
+ }
+}
+
+impl num_traits::ToBytes for BigInt {
+ type Bytes = Vec<u8>;
+
+ fn to_be_bytes(&self) -> Self::Bytes {
+ self.to_signed_bytes_be()
+ }
+
+ fn to_le_bytes(&self) -> Self::Bytes {
+ self.to_signed_bytes_le()
+ }
+}
+
+#[test]
+fn test_from_biguint() {
+ fn check(inp_s: Sign, inp_n: usize, ans_s: Sign, ans_n: usize) {
+ let inp = BigInt::from_biguint(inp_s, BigUint::from(inp_n));
+ let ans = BigInt {
+ sign: ans_s,
+ data: BigUint::from(ans_n),
+ };
+ assert_eq!(inp, ans);
+ }
+ check(Plus, 1, Plus, 1);
+ check(Plus, 0, NoSign, 0);
+ check(Minus, 1, Minus, 1);
+ check(NoSign, 1, NoSign, 0);
+}
+
+#[test]
+fn test_from_slice() {
+ fn check(inp_s: Sign, inp_n: u32, ans_s: Sign, ans_n: u32) {
+ let inp = BigInt::from_slice(inp_s, &[inp_n]);
+ let ans = BigInt {
+ sign: ans_s,
+ data: BigUint::from(ans_n),
+ };
+ assert_eq!(inp, ans);
+ }
+ check(Plus, 1, Plus, 1);
+ check(Plus, 0, NoSign, 0);
+ check(Minus, 1, Minus, 1);
+ check(NoSign, 1, NoSign, 0);
+}
+
+#[test]
+fn test_assign_from_slice() {
+ fn check(inp_s: Sign, inp_n: u32, ans_s: Sign, ans_n: u32) {
+ let mut inp = BigInt::from_slice(Minus, &[2627_u32, 0_u32, 9182_u32, 42_u32]);
+ inp.assign_from_slice(inp_s, &[inp_n]);
+ let ans = BigInt {
+ sign: ans_s,
+ data: BigUint::from(ans_n),
+ };
+ assert_eq!(inp, ans);
+ }
+ check(Plus, 1, Plus, 1);
+ check(Plus, 0, NoSign, 0);
+ check(Minus, 1, Minus, 1);
+ check(NoSign, 1, NoSign, 0);
+}
diff --git a/rust/vendor/num-bigint/src/bigint/addition.rs b/rust/vendor/num-bigint/src/bigint/addition.rs
new file mode 100644
index 0000000..76aeb99
--- /dev/null
+++ b/rust/vendor/num-bigint/src/bigint/addition.rs
@@ -0,0 +1,239 @@
+use super::CheckedUnsignedAbs::{Negative, Positive};
+use super::Sign::{Minus, NoSign, Plus};
+use super::{BigInt, UnsignedAbs};
+
+use crate::{IsizePromotion, UsizePromotion};
+
+use core::cmp::Ordering::{Equal, Greater, Less};
+use core::iter::Sum;
+use core::mem;
+use core::ops::{Add, AddAssign};
+use num_traits::{CheckedAdd, Zero};
+
+// We want to forward to BigUint::add, but it's not clear how that will go until
+// we compare both sign and magnitude. So we duplicate this body for every
+// val/ref combination, deferring that decision to BigUint's own forwarding.
+macro_rules! bigint_add {
+ ($a:expr, $a_owned:expr, $a_data:expr, $b:expr, $b_owned:expr, $b_data:expr) => {
+ match ($a.sign, $b.sign) {
+ (_, NoSign) => $a_owned,
+ (NoSign, _) => $b_owned,
+ // same sign => keep the sign with the sum of magnitudes
+ (Plus, Plus) | (Minus, Minus) => BigInt::from_biguint($a.sign, $a_data + $b_data),
+ // opposite signs => keep the sign of the larger with the difference of magnitudes
+ (Plus, Minus) | (Minus, Plus) => match $a.data.cmp(&$b.data) {
+ Less => BigInt::from_biguint($b.sign, $b_data - $a_data),
+ Greater => BigInt::from_biguint($a.sign, $a_data - $b_data),
+ Equal => Zero::zero(),
+ },
+ }
+ };
+}
+
+impl Add<&BigInt> for &BigInt {
+ type Output = BigInt;
+
+ #[inline]
+ fn add(self, other: &BigInt) -> BigInt {
+ bigint_add!(
+ self,
+ self.clone(),
+ &self.data,
+ other,
+ other.clone(),
+ &other.data
+ )
+ }
+}
+
+impl Add<BigInt> for &BigInt {
+ type Output = BigInt;
+
+ #[inline]
+ fn add(self, other: BigInt) -> BigInt {
+ bigint_add!(self, self.clone(), &self.data, other, other, other.data)
+ }
+}
+
+impl Add<&BigInt> for BigInt {
+ type Output = BigInt;
+
+ #[inline]
+ fn add(self, other: &BigInt) -> BigInt {
+ bigint_add!(self, self, self.data, other, other.clone(), &other.data)
+ }
+}
+
+impl Add<BigInt> for BigInt {
+ type Output = BigInt;
+
+ #[inline]
+ fn add(self, other: BigInt) -> BigInt {
+ bigint_add!(self, self, self.data, other, other, other.data)
+ }
+}
+
+impl AddAssign<&BigInt> for BigInt {
+ #[inline]
+ fn add_assign(&mut self, other: &BigInt) {
+ let n = mem::replace(self, BigInt::zero());
+ *self = n + other;
+ }
+}
+forward_val_assign!(impl AddAssign for BigInt, add_assign);
+
+promote_all_scalars!(impl Add for BigInt, add);
+promote_all_scalars_assign!(impl AddAssign for BigInt, add_assign);
+forward_all_scalar_binop_to_val_val_commutative!(impl Add<u32> for BigInt, add);
+forward_all_scalar_binop_to_val_val_commutative!(impl Add<u64> for BigInt, add);
+forward_all_scalar_binop_to_val_val_commutative!(impl Add<u128> for BigInt, add);
+
+impl Add<u32> for BigInt {
+ type Output = BigInt;
+
+ #[inline]
+ fn add(self, other: u32) -> BigInt {
+ match self.sign {
+ NoSign => From::from(other),
+ Plus => BigInt::from(self.data + other),
+ Minus => match self.data.cmp(&From::from(other)) {
+ Equal => Zero::zero(),
+ Less => BigInt::from(other - self.data),
+ Greater => -BigInt::from(self.data - other),
+ },
+ }
+ }
+}
+
+impl AddAssign<u32> for BigInt {
+ #[inline]
+ fn add_assign(&mut self, other: u32) {
+ let n = mem::replace(self, BigInt::zero());
+ *self = n + other;
+ }
+}
+
+impl Add<u64> for BigInt {
+ type Output = BigInt;
+
+ #[inline]
+ fn add(self, other: u64) -> BigInt {
+ match self.sign {
+ NoSign => From::from(other),
+ Plus => BigInt::from(self.data + other),
+ Minus => match self.data.cmp(&From::from(other)) {
+ Equal => Zero::zero(),
+ Less => BigInt::from(other - self.data),
+ Greater => -BigInt::from(self.data - other),
+ },
+ }
+ }
+}
+
+impl AddAssign<u64> for BigInt {
+ #[inline]
+ fn add_assign(&mut self, other: u64) {
+ let n = mem::replace(self, BigInt::zero());
+ *self = n + other;
+ }
+}
+
+impl Add<u128> for BigInt {
+ type Output = BigInt;
+
+ #[inline]
+ fn add(self, other: u128) -> BigInt {
+ match self.sign {
+ NoSign => BigInt::from(other),
+ Plus => BigInt::from(self.data + other),
+ Minus => match self.data.cmp(&From::from(other)) {
+ Equal => BigInt::zero(),
+ Less => BigInt::from(other - self.data),
+ Greater => -BigInt::from(self.data - other),
+ },
+ }
+ }
+}
+impl AddAssign<u128> for BigInt {
+ #[inline]
+ fn add_assign(&mut self, other: u128) {
+ let n = mem::replace(self, BigInt::zero());
+ *self = n + other;
+ }
+}
+
+forward_all_scalar_binop_to_val_val_commutative!(impl Add<i32> for BigInt, add);
+forward_all_scalar_binop_to_val_val_commutative!(impl Add<i64> for BigInt, add);
+forward_all_scalar_binop_to_val_val_commutative!(impl Add<i128> for BigInt, add);
+
+impl Add<i32> for BigInt {
+ type Output = BigInt;
+
+ #[inline]
+ fn add(self, other: i32) -> BigInt {
+ match other.checked_uabs() {
+ Positive(u) => self + u,
+ Negative(u) => self - u,
+ }
+ }
+}
+impl AddAssign<i32> for BigInt {
+ #[inline]
+ fn add_assign(&mut self, other: i32) {
+ match other.checked_uabs() {
+ Positive(u) => *self += u,
+ Negative(u) => *self -= u,
+ }
+ }
+}
+
+impl Add<i64> for BigInt {
+ type Output = BigInt;
+
+ #[inline]
+ fn add(self, other: i64) -> BigInt {
+ match other.checked_uabs() {
+ Positive(u) => self + u,
+ Negative(u) => self - u,
+ }
+ }
+}
+impl AddAssign<i64> for BigInt {
+ #[inline]
+ fn add_assign(&mut self, other: i64) {
+ match other.checked_uabs() {
+ Positive(u) => *self += u,
+ Negative(u) => *self -= u,
+ }
+ }
+}
+
+impl Add<i128> for BigInt {
+ type Output = BigInt;
+
+ #[inline]
+ fn add(self, other: i128) -> BigInt {
+ match other.checked_uabs() {
+ Positive(u) => self + u,
+ Negative(u) => self - u,
+ }
+ }
+}
+impl AddAssign<i128> for BigInt {
+ #[inline]
+ fn add_assign(&mut self, other: i128) {
+ match other.checked_uabs() {
+ Positive(u) => *self += u,
+ Negative(u) => *self -= u,
+ }
+ }
+}
+
+impl CheckedAdd for BigInt {
+ #[inline]
+ fn checked_add(&self, v: &BigInt) -> Option<BigInt> {
+ Some(self.add(v))
+ }
+}
+
+impl_sum_iter_type!(BigInt);
diff --git a/rust/vendor/num-bigint/src/bigint/arbitrary.rs b/rust/vendor/num-bigint/src/bigint/arbitrary.rs
new file mode 100644
index 0000000..df66050
--- /dev/null
+++ b/rust/vendor/num-bigint/src/bigint/arbitrary.rs
@@ -0,0 +1,39 @@
+use super::{BigInt, Sign};
+
+#[cfg(feature = "quickcheck")]
+use crate::std_alloc::Box;
+use crate::BigUint;
+
+#[cfg(feature = "quickcheck")]
+impl quickcheck::Arbitrary for BigInt {
+ fn arbitrary(g: &mut quickcheck::Gen) -> Self {
+ let positive = bool::arbitrary(g);
+ let sign = if positive { Sign::Plus } else { Sign::Minus };
+ Self::from_biguint(sign, BigUint::arbitrary(g))
+ }
+
+ fn shrink(&self) -> Box<dyn Iterator<Item = Self>> {
+ let sign = self.sign();
+ let unsigned_shrink = self.data.shrink();
+ Box::new(unsigned_shrink.map(move |x| BigInt::from_biguint(sign, x)))
+ }
+}
+
+#[cfg(feature = "arbitrary")]
+impl arbitrary::Arbitrary<'_> for BigInt {
+ fn arbitrary(u: &mut arbitrary::Unstructured<'_>) -> arbitrary::Result<Self> {
+ let positive = bool::arbitrary(u)?;
+ let sign = if positive { Sign::Plus } else { Sign::Minus };
+ Ok(Self::from_biguint(sign, BigUint::arbitrary(u)?))
+ }
+
+ fn arbitrary_take_rest(mut u: arbitrary::Unstructured<'_>) -> arbitrary::Result<Self> {
+ let positive = bool::arbitrary(&mut u)?;
+ let sign = if positive { Sign::Plus } else { Sign::Minus };
+ Ok(Self::from_biguint(sign, BigUint::arbitrary_take_rest(u)?))
+ }
+
+ fn size_hint(depth: usize) -> (usize, Option<usize>) {
+ arbitrary::size_hint::and(bool::size_hint(depth), BigUint::size_hint(depth))
+ }
+}
diff --git a/rust/vendor/num-bigint/src/bigint/bits.rs b/rust/vendor/num-bigint/src/bigint/bits.rs
new file mode 100644
index 0000000..80f4e2c
--- /dev/null
+++ b/rust/vendor/num-bigint/src/bigint/bits.rs
@@ -0,0 +1,531 @@
+use super::BigInt;
+use super::Sign::{Minus, NoSign, Plus};
+
+use crate::big_digit::{self, BigDigit, DoubleBigDigit};
+use crate::biguint::IntDigits;
+use crate::std_alloc::Vec;
+
+use core::cmp::Ordering::{Equal, Greater, Less};
+use core::ops::{BitAnd, BitAndAssign, BitOr, BitOrAssign, BitXor, BitXorAssign};
+use num_traits::{ToPrimitive, Zero};
+
+// Negation in two's complement.
+// acc must be initialized as 1 for least-significant digit.
+//
+// When negating, a carry (acc == 1) means that all the digits
+// considered to this point were zero. This means that if all the
+// digits of a negative BigInt have been considered, carry must be
+// zero as we cannot have negative zero.
+//
+// 01 -> ...f ff
+// ff -> ...f 01
+// 01 00 -> ...f ff 00
+// 01 01 -> ...f fe ff
+// 01 ff -> ...f fe 01
+// ff 00 -> ...f 01 00
+// ff 01 -> ...f 00 ff
+// ff ff -> ...f 00 01
+#[inline]
+fn negate_carry(a: BigDigit, acc: &mut DoubleBigDigit) -> BigDigit {
+ *acc += DoubleBigDigit::from(!a);
+ let lo = *acc as BigDigit;
+ *acc >>= big_digit::BITS;
+ lo
+}
+
+// + 1 & -ff = ...0 01 & ...f 01 = ...0 01 = + 1
+// +ff & - 1 = ...0 ff & ...f ff = ...0 ff = +ff
+// answer is pos, has length of a
+fn bitand_pos_neg(a: &mut Vec<BigDigit>, b: &[BigDigit]) {
+ let mut carry_b = 1;
+ for (ai, &bi) in a.iter_mut().zip(b.iter()) {
+ let twos_b = negate_carry(bi, &mut carry_b);
+ *ai &= twos_b;
+ }
+ debug_assert!(b.len() > a.len() || carry_b == 0);
+}
+
+// - 1 & +ff = ...f ff & ...0 ff = ...0 ff = +ff
+// -ff & + 1 = ...f 01 & ...0 01 = ...0 01 = + 1
+// answer is pos, has length of b
+fn bitand_neg_pos(a: &mut Vec<BigDigit>, b: &[BigDigit]) {
+ let mut carry_a = 1;
+ for (ai, &bi) in a.iter_mut().zip(b.iter()) {
+ let twos_a = negate_carry(*ai, &mut carry_a);
+ *ai = twos_a & bi;
+ }
+ debug_assert!(a.len() > b.len() || carry_a == 0);
+ match Ord::cmp(&a.len(), &b.len()) {
+ Greater => a.truncate(b.len()),
+ Equal => {}
+ Less => {
+ let extra = &b[a.len()..];
+ a.extend(extra.iter().cloned());
+ }
+ }
+}
+
+// - 1 & -ff = ...f ff & ...f 01 = ...f 01 = - ff
+// -ff & - 1 = ...f 01 & ...f ff = ...f 01 = - ff
+// -ff & -fe = ...f 01 & ...f 02 = ...f 00 = -100
+// answer is neg, has length of longest with a possible carry
+fn bitand_neg_neg(a: &mut Vec<BigDigit>, b: &[BigDigit]) {
+ let mut carry_a = 1;
+ let mut carry_b = 1;
+ let mut carry_and = 1;
+ for (ai, &bi) in a.iter_mut().zip(b.iter()) {
+ let twos_a = negate_carry(*ai, &mut carry_a);
+ let twos_b = negate_carry(bi, &mut carry_b);
+ *ai = negate_carry(twos_a & twos_b, &mut carry_and);
+ }
+ debug_assert!(a.len() > b.len() || carry_a == 0);
+ debug_assert!(b.len() > a.len() || carry_b == 0);
+ match Ord::cmp(&a.len(), &b.len()) {
+ Greater => {
+ for ai in a[b.len()..].iter_mut() {
+ let twos_a = negate_carry(*ai, &mut carry_a);
+ *ai = negate_carry(twos_a, &mut carry_and);
+ }
+ debug_assert!(carry_a == 0);
+ }
+ Equal => {}
+ Less => {
+ let extra = &b[a.len()..];
+ a.extend(extra.iter().map(|&bi| {
+ let twos_b = negate_carry(bi, &mut carry_b);
+ negate_carry(twos_b, &mut carry_and)
+ }));
+ debug_assert!(carry_b == 0);
+ }
+ }
+ if carry_and != 0 {
+ a.push(1);
+ }
+}
+
+forward_val_val_binop!(impl BitAnd for BigInt, bitand);
+forward_ref_val_binop!(impl BitAnd for BigInt, bitand);
+
+// do not use forward_ref_ref_binop_commutative! for bitand so that we can
+// clone as needed, avoiding over-allocation
+impl BitAnd<&BigInt> for &BigInt {
+ type Output = BigInt;
+
+ #[inline]
+ fn bitand(self, other: &BigInt) -> BigInt {
+ match (self.sign, other.sign) {
+ (NoSign, _) | (_, NoSign) => BigInt::zero(),
+ (Plus, Plus) => BigInt::from(&self.data & &other.data),
+ (Plus, Minus) => self.clone() & other,
+ (Minus, Plus) => other.clone() & self,
+ (Minus, Minus) => {
+ // forward to val-ref, choosing the larger to clone
+ if self.len() >= other.len() {
+ self.clone() & other
+ } else {
+ other.clone() & self
+ }
+ }
+ }
+ }
+}
+
+impl BitAnd<&BigInt> for BigInt {
+ type Output = BigInt;
+
+ #[inline]
+ fn bitand(mut self, other: &BigInt) -> BigInt {
+ self &= other;
+ self
+ }
+}
+
+forward_val_assign!(impl BitAndAssign for BigInt, bitand_assign);
+
+impl BitAndAssign<&BigInt> for BigInt {
+ fn bitand_assign(&mut self, other: &BigInt) {
+ match (self.sign, other.sign) {
+ (NoSign, _) => {}
+ (_, NoSign) => self.set_zero(),
+ (Plus, Plus) => {
+ self.data &= &other.data;
+ if self.data.is_zero() {
+ self.sign = NoSign;
+ }
+ }
+ (Plus, Minus) => {
+ bitand_pos_neg(self.digits_mut(), other.digits());
+ self.normalize();
+ }
+ (Minus, Plus) => {
+ bitand_neg_pos(self.digits_mut(), other.digits());
+ self.sign = Plus;
+ self.normalize();
+ }
+ (Minus, Minus) => {
+ bitand_neg_neg(self.digits_mut(), other.digits());
+ self.normalize();
+ }
+ }
+ }
+}
+
+// + 1 | -ff = ...0 01 | ...f 01 = ...f 01 = -ff
+// +ff | - 1 = ...0 ff | ...f ff = ...f ff = - 1
+// answer is neg, has length of b
+fn bitor_pos_neg(a: &mut Vec<BigDigit>, b: &[BigDigit]) {
+ let mut carry_b = 1;
+ let mut carry_or = 1;
+ for (ai, &bi) in a.iter_mut().zip(b.iter()) {
+ let twos_b = negate_carry(bi, &mut carry_b);
+ *ai = negate_carry(*ai | twos_b, &mut carry_or);
+ }
+ debug_assert!(b.len() > a.len() || carry_b == 0);
+ match Ord::cmp(&a.len(), &b.len()) {
+ Greater => {
+ a.truncate(b.len());
+ }
+ Equal => {}
+ Less => {
+ let extra = &b[a.len()..];
+ a.extend(extra.iter().map(|&bi| {
+ let twos_b = negate_carry(bi, &mut carry_b);
+ negate_carry(twos_b, &mut carry_or)
+ }));
+ debug_assert!(carry_b == 0);
+ }
+ }
+ // for carry_or to be non-zero, we would need twos_b == 0
+ debug_assert!(carry_or == 0);
+}
+
+// - 1 | +ff = ...f ff | ...0 ff = ...f ff = - 1
+// -ff | + 1 = ...f 01 | ...0 01 = ...f 01 = -ff
+// answer is neg, has length of a
+fn bitor_neg_pos(a: &mut Vec<BigDigit>, b: &[BigDigit]) {
+ let mut carry_a = 1;
+ let mut carry_or = 1;
+ for (ai, &bi) in a.iter_mut().zip(b.iter()) {
+ let twos_a = negate_carry(*ai, &mut carry_a);
+ *ai = negate_carry(twos_a | bi, &mut carry_or);
+ }
+ debug_assert!(a.len() > b.len() || carry_a == 0);
+ if a.len() > b.len() {
+ for ai in a[b.len()..].iter_mut() {
+ let twos_a = negate_carry(*ai, &mut carry_a);
+ *ai = negate_carry(twos_a, &mut carry_or);
+ }
+ debug_assert!(carry_a == 0);
+ }
+ // for carry_or to be non-zero, we would need twos_a == 0
+ debug_assert!(carry_or == 0);
+}
+
+// - 1 | -ff = ...f ff | ...f 01 = ...f ff = -1
+// -ff | - 1 = ...f 01 | ...f ff = ...f ff = -1
+// answer is neg, has length of shortest
+fn bitor_neg_neg(a: &mut Vec<BigDigit>, b: &[BigDigit]) {
+ let mut carry_a = 1;
+ let mut carry_b = 1;
+ let mut carry_or = 1;
+ for (ai, &bi) in a.iter_mut().zip(b.iter()) {
+ let twos_a = negate_carry(*ai, &mut carry_a);
+ let twos_b = negate_carry(bi, &mut carry_b);
+ *ai = negate_carry(twos_a | twos_b, &mut carry_or);
+ }
+ debug_assert!(a.len() > b.len() || carry_a == 0);
+ debug_assert!(b.len() > a.len() || carry_b == 0);
+ if a.len() > b.len() {
+ a.truncate(b.len());
+ }
+ // for carry_or to be non-zero, we would need twos_a == 0 or twos_b == 0
+ debug_assert!(carry_or == 0);
+}
+
+forward_val_val_binop!(impl BitOr for BigInt, bitor);
+forward_ref_val_binop!(impl BitOr for BigInt, bitor);
+
+// do not use forward_ref_ref_binop_commutative! for bitor so that we can
+// clone as needed, avoiding over-allocation
+impl BitOr<&BigInt> for &BigInt {
+ type Output = BigInt;
+
+ #[inline]
+ fn bitor(self, other: &BigInt) -> BigInt {
+ match (self.sign, other.sign) {
+ (NoSign, _) => other.clone(),
+ (_, NoSign) => self.clone(),
+ (Plus, Plus) => BigInt::from(&self.data | &other.data),
+ (Plus, Minus) => other.clone() | self,
+ (Minus, Plus) => self.clone() | other,
+ (Minus, Minus) => {
+ // forward to val-ref, choosing the smaller to clone
+ if self.len() <= other.len() {
+ self.clone() | other
+ } else {
+ other.clone() | self
+ }
+ }
+ }
+ }
+}
+
+impl BitOr<&BigInt> for BigInt {
+ type Output = BigInt;
+
+ #[inline]
+ fn bitor(mut self, other: &BigInt) -> BigInt {
+ self |= other;
+ self
+ }
+}
+
+forward_val_assign!(impl BitOrAssign for BigInt, bitor_assign);
+
+impl BitOrAssign<&BigInt> for BigInt {
+ fn bitor_assign(&mut self, other: &BigInt) {
+ match (self.sign, other.sign) {
+ (_, NoSign) => {}
+ (NoSign, _) => self.clone_from(other),
+ (Plus, Plus) => self.data |= &other.data,
+ (Plus, Minus) => {
+ bitor_pos_neg(self.digits_mut(), other.digits());
+ self.sign = Minus;
+ self.normalize();
+ }
+ (Minus, Plus) => {
+ bitor_neg_pos(self.digits_mut(), other.digits());
+ self.normalize();
+ }
+ (Minus, Minus) => {
+ bitor_neg_neg(self.digits_mut(), other.digits());
+ self.normalize();
+ }
+ }
+ }
+}
+
+// + 1 ^ -ff = ...0 01 ^ ...f 01 = ...f 00 = -100
+// +ff ^ - 1 = ...0 ff ^ ...f ff = ...f 00 = -100
+// answer is neg, has length of longest with a possible carry
+fn bitxor_pos_neg(a: &mut Vec<BigDigit>, b: &[BigDigit]) {
+ let mut carry_b = 1;
+ let mut carry_xor = 1;
+ for (ai, &bi) in a.iter_mut().zip(b.iter()) {
+ let twos_b = negate_carry(bi, &mut carry_b);
+ *ai = negate_carry(*ai ^ twos_b, &mut carry_xor);
+ }
+ debug_assert!(b.len() > a.len() || carry_b == 0);
+ match Ord::cmp(&a.len(), &b.len()) {
+ Greater => {
+ for ai in a[b.len()..].iter_mut() {
+ let twos_b = !0;
+ *ai = negate_carry(*ai ^ twos_b, &mut carry_xor);
+ }
+ }
+ Equal => {}
+ Less => {
+ let extra = &b[a.len()..];
+ a.extend(extra.iter().map(|&bi| {
+ let twos_b = negate_carry(bi, &mut carry_b);
+ negate_carry(twos_b, &mut carry_xor)
+ }));
+ debug_assert!(carry_b == 0);
+ }
+ }
+ if carry_xor != 0 {
+ a.push(1);
+ }
+}
+
+// - 1 ^ +ff = ...f ff ^ ...0 ff = ...f 00 = -100
+// -ff ^ + 1 = ...f 01 ^ ...0 01 = ...f 00 = -100
+// answer is neg, has length of longest with a possible carry
+fn bitxor_neg_pos(a: &mut Vec<BigDigit>, b: &[BigDigit]) {
+ let mut carry_a = 1;
+ let mut carry_xor = 1;
+ for (ai, &bi) in a.iter_mut().zip(b.iter()) {
+ let twos_a = negate_carry(*ai, &mut carry_a);
+ *ai = negate_carry(twos_a ^ bi, &mut carry_xor);
+ }
+ debug_assert!(a.len() > b.len() || carry_a == 0);
+ match Ord::cmp(&a.len(), &b.len()) {
+ Greater => {
+ for ai in a[b.len()..].iter_mut() {
+ let twos_a = negate_carry(*ai, &mut carry_a);
+ *ai = negate_carry(twos_a, &mut carry_xor);
+ }
+ debug_assert!(carry_a == 0);
+ }
+ Equal => {}
+ Less => {
+ let extra = &b[a.len()..];
+ a.extend(extra.iter().map(|&bi| {
+ let twos_a = !0;
+ negate_carry(twos_a ^ bi, &mut carry_xor)
+ }));
+ }
+ }
+ if carry_xor != 0 {
+ a.push(1);
+ }
+}
+
+// - 1 ^ -ff = ...f ff ^ ...f 01 = ...0 fe = +fe
+// -ff & - 1 = ...f 01 ^ ...f ff = ...0 fe = +fe
+// answer is pos, has length of longest
+fn bitxor_neg_neg(a: &mut Vec<BigDigit>, b: &[BigDigit]) {
+ let mut carry_a = 1;
+ let mut carry_b = 1;
+ for (ai, &bi) in a.iter_mut().zip(b.iter()) {
+ let twos_a = negate_carry(*ai, &mut carry_a);
+ let twos_b = negate_carry(bi, &mut carry_b);
+ *ai = twos_a ^ twos_b;
+ }
+ debug_assert!(a.len() > b.len() || carry_a == 0);
+ debug_assert!(b.len() > a.len() || carry_b == 0);
+ match Ord::cmp(&a.len(), &b.len()) {
+ Greater => {
+ for ai in a[b.len()..].iter_mut() {
+ let twos_a = negate_carry(*ai, &mut carry_a);
+ let twos_b = !0;
+ *ai = twos_a ^ twos_b;
+ }
+ debug_assert!(carry_a == 0);
+ }
+ Equal => {}
+ Less => {
+ let extra = &b[a.len()..];
+ a.extend(extra.iter().map(|&bi| {
+ let twos_a = !0;
+ let twos_b = negate_carry(bi, &mut carry_b);
+ twos_a ^ twos_b
+ }));
+ debug_assert!(carry_b == 0);
+ }
+ }
+}
+
+forward_all_binop_to_val_ref_commutative!(impl BitXor for BigInt, bitxor);
+
+impl BitXor<&BigInt> for BigInt {
+ type Output = BigInt;
+
+ #[inline]
+ fn bitxor(mut self, other: &BigInt) -> BigInt {
+ self ^= other;
+ self
+ }
+}
+
+forward_val_assign!(impl BitXorAssign for BigInt, bitxor_assign);
+
+impl BitXorAssign<&BigInt> for BigInt {
+ fn bitxor_assign(&mut self, other: &BigInt) {
+ match (self.sign, other.sign) {
+ (_, NoSign) => {}
+ (NoSign, _) => self.clone_from(other),
+ (Plus, Plus) => {
+ self.data ^= &other.data;
+ if self.data.is_zero() {
+ self.sign = NoSign;
+ }
+ }
+ (Plus, Minus) => {
+ bitxor_pos_neg(self.digits_mut(), other.digits());
+ self.sign = Minus;
+ self.normalize();
+ }
+ (Minus, Plus) => {
+ bitxor_neg_pos(self.digits_mut(), other.digits());
+ self.normalize();
+ }
+ (Minus, Minus) => {
+ bitxor_neg_neg(self.digits_mut(), other.digits());
+ self.sign = Plus;
+ self.normalize();
+ }
+ }
+ }
+}
+
+pub(super) fn set_negative_bit(x: &mut BigInt, bit: u64, value: bool) {
+ debug_assert_eq!(x.sign, Minus);
+ let data = &mut x.data;
+
+ let bits_per_digit = u64::from(big_digit::BITS);
+ if bit >= bits_per_digit * data.len() as u64 {
+ if !value {
+ data.set_bit(bit, true);
+ }
+ } else {
+ // If the Uint number is
+ // ... 0 x 1 0 ... 0
+ // then the two's complement is
+ // ... 1 !x 1 0 ... 0
+ // |-- bit at position 'trailing_zeros'
+ // where !x is obtained from x by flipping each bit
+ let trailing_zeros = data.trailing_zeros().unwrap();
+ if bit > trailing_zeros {
+ data.set_bit(bit, !value);
+ } else if bit == trailing_zeros && !value {
+ // Clearing the bit at position `trailing_zeros` is dealt with by doing
+ // similarly to what `bitand_neg_pos` does, except we start at digit
+ // `bit_index`. All digits below `bit_index` are guaranteed to be zero,
+ // so initially we have `carry_in` = `carry_out` = 1. Furthermore, we
+ // stop traversing the digits when there are no more carries.
+ let bit_index = (bit / bits_per_digit).to_usize().unwrap();
+ let bit_mask = (1 as BigDigit) << (bit % bits_per_digit);
+ let mut digit_iter = data.digits_mut().iter_mut().skip(bit_index);
+ let mut carry_in = 1;
+ let mut carry_out = 1;
+
+ let digit = digit_iter.next().unwrap();
+ let twos_in = negate_carry(*digit, &mut carry_in);
+ let twos_out = twos_in & !bit_mask;
+ *digit = negate_carry(twos_out, &mut carry_out);
+
+ for digit in digit_iter {
+ if carry_in == 0 && carry_out == 0 {
+ // Exit the loop since no more digits can change
+ break;
+ }
+ let twos = negate_carry(*digit, &mut carry_in);
+ *digit = negate_carry(twos, &mut carry_out);
+ }
+
+ if carry_out != 0 {
+ // All digits have been traversed and there is a carry
+ debug_assert_eq!(carry_in, 0);
+ data.digits_mut().push(1);
+ }
+ } else if bit < trailing_zeros && value {
+ // Flip each bit from position 'bit' to 'trailing_zeros', both inclusive
+ // ... 1 !x 1 0 ... 0 ... 0
+ // |-- bit at position 'bit'
+ // |-- bit at position 'trailing_zeros'
+ // bit_mask: 1 1 ... 1 0 .. 0
+ // This is done by xor'ing with the bit_mask
+ let index_lo = (bit / bits_per_digit).to_usize().unwrap();
+ let index_hi = (trailing_zeros / bits_per_digit).to_usize().unwrap();
+ let bit_mask_lo = big_digit::MAX << (bit % bits_per_digit);
+ let bit_mask_hi =
+ big_digit::MAX >> (bits_per_digit - 1 - (trailing_zeros % bits_per_digit));
+ let digits = data.digits_mut();
+
+ if index_lo == index_hi {
+ digits[index_lo] ^= bit_mask_lo & bit_mask_hi;
+ } else {
+ digits[index_lo] = bit_mask_lo;
+ for digit in &mut digits[index_lo + 1..index_hi] {
+ *digit = big_digit::MAX;
+ }
+ digits[index_hi] ^= bit_mask_hi;
+ }
+ } else {
+ // We end up here in two cases:
+ // bit == trailing_zeros && value: Bit is already set
+ // bit < trailing_zeros && !value: Bit is already cleared
+ }
+ }
+}
diff --git a/rust/vendor/num-bigint/src/bigint/convert.rs b/rust/vendor/num-bigint/src/bigint/convert.rs
new file mode 100644
index 0000000..c4f888b
--- /dev/null
+++ b/rust/vendor/num-bigint/src/bigint/convert.rs
@@ -0,0 +1,479 @@
+use super::Sign::{self, Minus, NoSign, Plus};
+use super::{BigInt, ToBigInt};
+
+use crate::std_alloc::Vec;
+#[cfg(has_try_from)]
+use crate::TryFromBigIntError;
+use crate::{BigUint, ParseBigIntError, ToBigUint};
+
+use core::cmp::Ordering::{Equal, Greater, Less};
+#[cfg(has_try_from)]
+use core::convert::TryFrom;
+use core::str::{self, FromStr};
+use num_traits::{FromPrimitive, Num, One, ToPrimitive, Zero};
+
+impl FromStr for BigInt {
+ type Err = ParseBigIntError;
+
+ #[inline]
+ fn from_str(s: &str) -> Result<BigInt, ParseBigIntError> {
+ BigInt::from_str_radix(s, 10)
+ }
+}
+
+impl Num for BigInt {
+ type FromStrRadixErr = ParseBigIntError;
+
+ /// Creates and initializes a [`BigInt`].
+ #[inline]
+ fn from_str_radix(mut s: &str, radix: u32) -> Result<BigInt, ParseBigIntError> {
+ let sign = if s.starts_with('-') {
+ let tail = &s[1..];
+ if !tail.starts_with('+') {
+ s = tail
+ }
+ Minus
+ } else {
+ Plus
+ };
+ let bu = BigUint::from_str_radix(s, radix)?;
+ Ok(BigInt::from_biguint(sign, bu))
+ }
+}
+
+impl ToPrimitive for BigInt {
+ #[inline]
+ fn to_i64(&self) -> Option<i64> {
+ match self.sign {
+ Plus => self.data.to_i64(),
+ NoSign => Some(0),
+ Minus => {
+ let n = self.data.to_u64()?;
+ let m: u64 = 1 << 63;
+ match n.cmp(&m) {
+ Less => Some(-(n as i64)),
+ Equal => Some(core::i64::MIN),
+ Greater => None,
+ }
+ }
+ }
+ }
+
+ #[inline]
+ fn to_i128(&self) -> Option<i128> {
+ match self.sign {
+ Plus => self.data.to_i128(),
+ NoSign => Some(0),
+ Minus => {
+ let n = self.data.to_u128()?;
+ let m: u128 = 1 << 127;
+ match n.cmp(&m) {
+ Less => Some(-(n as i128)),
+ Equal => Some(core::i128::MIN),
+ Greater => None,
+ }
+ }
+ }
+ }
+
+ #[inline]
+ fn to_u64(&self) -> Option<u64> {
+ match self.sign {
+ Plus => self.data.to_u64(),
+ NoSign => Some(0),
+ Minus => None,
+ }
+ }
+
+ #[inline]
+ fn to_u128(&self) -> Option<u128> {
+ match self.sign {
+ Plus => self.data.to_u128(),
+ NoSign => Some(0),
+ Minus => None,
+ }
+ }
+
+ #[inline]
+ fn to_f32(&self) -> Option<f32> {
+ let n = self.data.to_f32()?;
+ Some(if self.sign == Minus { -n } else { n })
+ }
+
+ #[inline]
+ fn to_f64(&self) -> Option<f64> {
+ let n = self.data.to_f64()?;
+ Some(if self.sign == Minus { -n } else { n })
+ }
+}
+
+macro_rules! impl_try_from_bigint {
+ ($T:ty, $to_ty:path) => {
+ #[cfg(has_try_from)]
+ impl TryFrom<&BigInt> for $T {
+ type Error = TryFromBigIntError<()>;
+
+ #[inline]
+ fn try_from(value: &BigInt) -> Result<$T, TryFromBigIntError<()>> {
+ $to_ty(value).ok_or(TryFromBigIntError::new(()))
+ }
+ }
+
+ #[cfg(has_try_from)]
+ impl TryFrom<BigInt> for $T {
+ type Error = TryFromBigIntError<BigInt>;
+
+ #[inline]
+ fn try_from(value: BigInt) -> Result<$T, TryFromBigIntError<BigInt>> {
+ <$T>::try_from(&value).map_err(|_| TryFromBigIntError::new(value))
+ }
+ }
+ };
+}
+
+impl_try_from_bigint!(u8, ToPrimitive::to_u8);
+impl_try_from_bigint!(u16, ToPrimitive::to_u16);
+impl_try_from_bigint!(u32, ToPrimitive::to_u32);
+impl_try_from_bigint!(u64, ToPrimitive::to_u64);
+impl_try_from_bigint!(usize, ToPrimitive::to_usize);
+impl_try_from_bigint!(u128, ToPrimitive::to_u128);
+
+impl_try_from_bigint!(i8, ToPrimitive::to_i8);
+impl_try_from_bigint!(i16, ToPrimitive::to_i16);
+impl_try_from_bigint!(i32, ToPrimitive::to_i32);
+impl_try_from_bigint!(i64, ToPrimitive::to_i64);
+impl_try_from_bigint!(isize, ToPrimitive::to_isize);
+impl_try_from_bigint!(i128, ToPrimitive::to_i128);
+
+impl FromPrimitive for BigInt {
+ #[inline]
+ fn from_i64(n: i64) -> Option<BigInt> {
+ Some(BigInt::from(n))
+ }
+
+ #[inline]
+ fn from_i128(n: i128) -> Option<BigInt> {
+ Some(BigInt::from(n))
+ }
+
+ #[inline]
+ fn from_u64(n: u64) -> Option<BigInt> {
+ Some(BigInt::from(n))
+ }
+
+ #[inline]
+ fn from_u128(n: u128) -> Option<BigInt> {
+ Some(BigInt::from(n))
+ }
+
+ #[inline]
+ fn from_f64(n: f64) -> Option<BigInt> {
+ if n >= 0.0 {
+ BigUint::from_f64(n).map(BigInt::from)
+ } else {
+ let x = BigUint::from_f64(-n)?;
+ Some(-BigInt::from(x))
+ }
+ }
+}
+
+impl From<i64> for BigInt {
+ #[inline]
+ fn from(n: i64) -> Self {
+ if n >= 0 {
+ BigInt::from(n as u64)
+ } else {
+ let u = core::u64::MAX - (n as u64) + 1;
+ BigInt {
+ sign: Minus,
+ data: BigUint::from(u),
+ }
+ }
+ }
+}
+
+impl From<i128> for BigInt {
+ #[inline]
+ fn from(n: i128) -> Self {
+ if n >= 0 {
+ BigInt::from(n as u128)
+ } else {
+ let u = core::u128::MAX - (n as u128) + 1;
+ BigInt {
+ sign: Minus,
+ data: BigUint::from(u),
+ }
+ }
+ }
+}
+
+macro_rules! impl_bigint_from_int {
+ ($T:ty) => {
+ impl From<$T> for BigInt {
+ #[inline]
+ fn from(n: $T) -> Self {
+ BigInt::from(n as i64)
+ }
+ }
+ };
+}
+
+impl_bigint_from_int!(i8);
+impl_bigint_from_int!(i16);
+impl_bigint_from_int!(i32);
+impl_bigint_from_int!(isize);
+
+impl From<u64> for BigInt {
+ #[inline]
+ fn from(n: u64) -> Self {
+ if n > 0 {
+ BigInt {
+ sign: Plus,
+ data: BigUint::from(n),
+ }
+ } else {
+ BigInt::zero()
+ }
+ }
+}
+
+impl From<u128> for BigInt {
+ #[inline]
+ fn from(n: u128) -> Self {
+ if n > 0 {
+ BigInt {
+ sign: Plus,
+ data: BigUint::from(n),
+ }
+ } else {
+ BigInt::zero()
+ }
+ }
+}
+
+macro_rules! impl_bigint_from_uint {
+ ($T:ty) => {
+ impl From<$T> for BigInt {
+ #[inline]
+ fn from(n: $T) -> Self {
+ BigInt::from(n as u64)
+ }
+ }
+ };
+}
+
+impl_bigint_from_uint!(u8);
+impl_bigint_from_uint!(u16);
+impl_bigint_from_uint!(u32);
+impl_bigint_from_uint!(usize);
+
+impl From<BigUint> for BigInt {
+ #[inline]
+ fn from(n: BigUint) -> Self {
+ if n.is_zero() {
+ BigInt::zero()
+ } else {
+ BigInt {
+ sign: Plus,
+ data: n,
+ }
+ }
+ }
+}
+
+impl ToBigInt for BigInt {
+ #[inline]
+ fn to_bigint(&self) -> Option<BigInt> {
+ Some(self.clone())
+ }
+}
+
+impl ToBigInt for BigUint {
+ #[inline]
+ fn to_bigint(&self) -> Option<BigInt> {
+ if self.is_zero() {
+ Some(Zero::zero())
+ } else {
+ Some(BigInt {
+ sign: Plus,
+ data: self.clone(),
+ })
+ }
+ }
+}
+
+impl ToBigUint for BigInt {
+ #[inline]
+ fn to_biguint(&self) -> Option<BigUint> {
+ match self.sign() {
+ Plus => Some(self.data.clone()),
+ NoSign => Some(Zero::zero()),
+ Minus => None,
+ }
+ }
+}
+
+#[cfg(has_try_from)]
+impl TryFrom<&BigInt> for BigUint {
+ type Error = TryFromBigIntError<()>;
+
+ #[inline]
+ fn try_from(value: &BigInt) -> Result<BigUint, TryFromBigIntError<()>> {
+ value
+ .to_biguint()
+ .ok_or_else(|| TryFromBigIntError::new(()))
+ }
+}
+
+#[cfg(has_try_from)]
+impl TryFrom<BigInt> for BigUint {
+ type Error = TryFromBigIntError<BigInt>;
+
+ #[inline]
+ fn try_from(value: BigInt) -> Result<BigUint, TryFromBigIntError<BigInt>> {
+ if value.sign() == Sign::Minus {
+ Err(TryFromBigIntError::new(value))
+ } else {
+ Ok(value.data)
+ }
+ }
+}
+
+macro_rules! impl_to_bigint {
+ ($T:ty, $from_ty:path) => {
+ impl ToBigInt for $T {
+ #[inline]
+ fn to_bigint(&self) -> Option<BigInt> {
+ $from_ty(*self)
+ }
+ }
+ };
+}
+
+impl_to_bigint!(isize, FromPrimitive::from_isize);
+impl_to_bigint!(i8, FromPrimitive::from_i8);
+impl_to_bigint!(i16, FromPrimitive::from_i16);
+impl_to_bigint!(i32, FromPrimitive::from_i32);
+impl_to_bigint!(i64, FromPrimitive::from_i64);
+impl_to_bigint!(i128, FromPrimitive::from_i128);
+
+impl_to_bigint!(usize, FromPrimitive::from_usize);
+impl_to_bigint!(u8, FromPrimitive::from_u8);
+impl_to_bigint!(u16, FromPrimitive::from_u16);
+impl_to_bigint!(u32, FromPrimitive::from_u32);
+impl_to_bigint!(u64, FromPrimitive::from_u64);
+impl_to_bigint!(u128, FromPrimitive::from_u128);
+
+impl_to_bigint!(f32, FromPrimitive::from_f32);
+impl_to_bigint!(f64, FromPrimitive::from_f64);
+
+impl From<bool> for BigInt {
+ fn from(x: bool) -> Self {
+ if x {
+ One::one()
+ } else {
+ Zero::zero()
+ }
+ }
+}
+
+#[inline]
+pub(super) fn from_signed_bytes_be(digits: &[u8]) -> BigInt {
+ let sign = match digits.first() {
+ Some(v) if *v > 0x7f => Sign::Minus,
+ Some(_) => Sign::Plus,
+ None => return BigInt::zero(),
+ };
+
+ if sign == Sign::Minus {
+ // two's-complement the content to retrieve the magnitude
+ let mut digits = Vec::from(digits);
+ twos_complement_be(&mut digits);
+ BigInt::from_biguint(sign, BigUint::from_bytes_be(&digits))
+ } else {
+ BigInt::from_biguint(sign, BigUint::from_bytes_be(digits))
+ }
+}
+
+#[inline]
+pub(super) fn from_signed_bytes_le(digits: &[u8]) -> BigInt {
+ let sign = match digits.last() {
+ Some(v) if *v > 0x7f => Sign::Minus,
+ Some(_) => Sign::Plus,
+ None => return BigInt::zero(),
+ };
+
+ if sign == Sign::Minus {
+ // two's-complement the content to retrieve the magnitude
+ let mut digits = Vec::from(digits);
+ twos_complement_le(&mut digits);
+ BigInt::from_biguint(sign, BigUint::from_bytes_le(&digits))
+ } else {
+ BigInt::from_biguint(sign, BigUint::from_bytes_le(digits))
+ }
+}
+
+#[inline]
+pub(super) fn to_signed_bytes_be(x: &BigInt) -> Vec<u8> {
+ let mut bytes = x.data.to_bytes_be();
+ let first_byte = bytes.first().cloned().unwrap_or(0);
+ if first_byte > 0x7f
+ && !(first_byte == 0x80 && bytes.iter().skip(1).all(Zero::is_zero) && x.sign == Sign::Minus)
+ {
+ // msb used by magnitude, extend by 1 byte
+ bytes.insert(0, 0);
+ }
+ if x.sign == Sign::Minus {
+ twos_complement_be(&mut bytes);
+ }
+ bytes
+}
+
+#[inline]
+pub(super) fn to_signed_bytes_le(x: &BigInt) -> Vec<u8> {
+ let mut bytes = x.data.to_bytes_le();
+ let last_byte = bytes.last().cloned().unwrap_or(0);
+ if last_byte > 0x7f
+ && !(last_byte == 0x80
+ && bytes.iter().rev().skip(1).all(Zero::is_zero)
+ && x.sign == Sign::Minus)
+ {
+ // msb used by magnitude, extend by 1 byte
+ bytes.push(0);
+ }
+ if x.sign == Sign::Minus {
+ twos_complement_le(&mut bytes);
+ }
+ bytes
+}
+
+/// Perform in-place two's complement of the given binary representation,
+/// in little-endian byte order.
+#[inline]
+fn twos_complement_le(digits: &mut [u8]) {
+ twos_complement(digits)
+}
+
+/// Perform in-place two's complement of the given binary representation
+/// in big-endian byte order.
+#[inline]
+fn twos_complement_be(digits: &mut [u8]) {
+ twos_complement(digits.iter_mut().rev())
+}
+
+/// Perform in-place two's complement of the given digit iterator
+/// starting from the least significant byte.
+#[inline]
+fn twos_complement<'a, I>(digits: I)
+where
+ I: IntoIterator<Item = &'a mut u8>,
+{
+ let mut carry = true;
+ for d in digits {
+ *d = !*d;
+ if carry {
+ *d = d.wrapping_add(1);
+ carry = d.is_zero();
+ }
+ }
+}
diff --git a/rust/vendor/num-bigint/src/bigint/division.rs b/rust/vendor/num-bigint/src/bigint/division.rs
new file mode 100644
index 0000000..318d1fb
--- /dev/null
+++ b/rust/vendor/num-bigint/src/bigint/division.rs
@@ -0,0 +1,496 @@
+use super::CheckedUnsignedAbs::{Negative, Positive};
+use super::Sign::NoSign;
+use super::{BigInt, UnsignedAbs};
+
+use crate::{IsizePromotion, UsizePromotion};
+
+use core::ops::{Div, DivAssign, Rem, RemAssign};
+use num_integer::Integer;
+use num_traits::{CheckedDiv, CheckedEuclid, Euclid, Signed, ToPrimitive, Zero};
+
+forward_all_binop_to_ref_ref!(impl Div for BigInt, div);
+
+impl Div<&BigInt> for &BigInt {
+ type Output = BigInt;
+
+ #[inline]
+ fn div(self, other: &BigInt) -> BigInt {
+ let (q, _) = self.div_rem(other);
+ q
+ }
+}
+
+impl DivAssign<&BigInt> for BigInt {
+ #[inline]
+ fn div_assign(&mut self, other: &BigInt) {
+ *self = &*self / other;
+ }
+}
+forward_val_assign!(impl DivAssign for BigInt, div_assign);
+
+promote_all_scalars!(impl Div for BigInt, div);
+promote_all_scalars_assign!(impl DivAssign for BigInt, div_assign);
+forward_all_scalar_binop_to_val_val!(impl Div<u32> for BigInt, div);
+forward_all_scalar_binop_to_val_val!(impl Div<u64> for BigInt, div);
+forward_all_scalar_binop_to_val_val!(impl Div<u128> for BigInt, div);
+
+impl Div<u32> for BigInt {
+ type Output = BigInt;
+
+ #[inline]
+ fn div(self, other: u32) -> BigInt {
+ BigInt::from_biguint(self.sign, self.data / other)
+ }
+}
+
+impl DivAssign<u32> for BigInt {
+ #[inline]
+ fn div_assign(&mut self, other: u32) {
+ self.data /= other;
+ if self.data.is_zero() {
+ self.sign = NoSign;
+ }
+ }
+}
+
+impl Div<BigInt> for u32 {
+ type Output = BigInt;
+
+ #[inline]
+ fn div(self, other: BigInt) -> BigInt {
+ BigInt::from_biguint(other.sign, self / other.data)
+ }
+}
+
+impl Div<u64> for BigInt {
+ type Output = BigInt;
+
+ #[inline]
+ fn div(self, other: u64) -> BigInt {
+ BigInt::from_biguint(self.sign, self.data / other)
+ }
+}
+
+impl DivAssign<u64> for BigInt {
+ #[inline]
+ fn div_assign(&mut self, other: u64) {
+ self.data /= other;
+ if self.data.is_zero() {
+ self.sign = NoSign;
+ }
+ }
+}
+
+impl Div<BigInt> for u64 {
+ type Output = BigInt;
+
+ #[inline]
+ fn div(self, other: BigInt) -> BigInt {
+ BigInt::from_biguint(other.sign, self / other.data)
+ }
+}
+
+impl Div<u128> for BigInt {
+ type Output = BigInt;
+
+ #[inline]
+ fn div(self, other: u128) -> BigInt {
+ BigInt::from_biguint(self.sign, self.data / other)
+ }
+}
+
+impl DivAssign<u128> for BigInt {
+ #[inline]
+ fn div_assign(&mut self, other: u128) {
+ self.data /= other;
+ if self.data.is_zero() {
+ self.sign = NoSign;
+ }
+ }
+}
+
+impl Div<BigInt> for u128 {
+ type Output = BigInt;
+
+ #[inline]
+ fn div(self, other: BigInt) -> BigInt {
+ BigInt::from_biguint(other.sign, self / other.data)
+ }
+}
+
+forward_all_scalar_binop_to_val_val!(impl Div<i32> for BigInt, div);
+forward_all_scalar_binop_to_val_val!(impl Div<i64> for BigInt, div);
+forward_all_scalar_binop_to_val_val!(impl Div<i128> for BigInt, div);
+
+impl Div<i32> for BigInt {
+ type Output = BigInt;
+
+ #[inline]
+ fn div(self, other: i32) -> BigInt {
+ match other.checked_uabs() {
+ Positive(u) => self / u,
+ Negative(u) => -self / u,
+ }
+ }
+}
+
+impl DivAssign<i32> for BigInt {
+ #[inline]
+ fn div_assign(&mut self, other: i32) {
+ match other.checked_uabs() {
+ Positive(u) => *self /= u,
+ Negative(u) => {
+ self.sign = -self.sign;
+ *self /= u;
+ }
+ }
+ }
+}
+
+impl Div<BigInt> for i32 {
+ type Output = BigInt;
+
+ #[inline]
+ fn div(self, other: BigInt) -> BigInt {
+ match self.checked_uabs() {
+ Positive(u) => u / other,
+ Negative(u) => u / -other,
+ }
+ }
+}
+
+impl Div<i64> for BigInt {
+ type Output = BigInt;
+
+ #[inline]
+ fn div(self, other: i64) -> BigInt {
+ match other.checked_uabs() {
+ Positive(u) => self / u,
+ Negative(u) => -self / u,
+ }
+ }
+}
+
+impl DivAssign<i64> for BigInt {
+ #[inline]
+ fn div_assign(&mut self, other: i64) {
+ match other.checked_uabs() {
+ Positive(u) => *self /= u,
+ Negative(u) => {
+ self.sign = -self.sign;
+ *self /= u;
+ }
+ }
+ }
+}
+
+impl Div<BigInt> for i64 {
+ type Output = BigInt;
+
+ #[inline]
+ fn div(self, other: BigInt) -> BigInt {
+ match self.checked_uabs() {
+ Positive(u) => u / other,
+ Negative(u) => u / -other,
+ }
+ }
+}
+
+impl Div<i128> for BigInt {
+ type Output = BigInt;
+
+ #[inline]
+ fn div(self, other: i128) -> BigInt {
+ match other.checked_uabs() {
+ Positive(u) => self / u,
+ Negative(u) => -self / u,
+ }
+ }
+}
+
+impl DivAssign<i128> for BigInt {
+ #[inline]
+ fn div_assign(&mut self, other: i128) {
+ match other.checked_uabs() {
+ Positive(u) => *self /= u,
+ Negative(u) => {
+ self.sign = -self.sign;
+ *self /= u;
+ }
+ }
+ }
+}
+
+impl Div<BigInt> for i128 {
+ type Output = BigInt;
+
+ #[inline]
+ fn div(self, other: BigInt) -> BigInt {
+ match self.checked_uabs() {
+ Positive(u) => u / other,
+ Negative(u) => u / -other,
+ }
+ }
+}
+
+forward_all_binop_to_ref_ref!(impl Rem for BigInt, rem);
+
+impl Rem<&BigInt> for &BigInt {
+ type Output = BigInt;
+
+ #[inline]
+ fn rem(self, other: &BigInt) -> BigInt {
+ if let Some(other) = other.to_u32() {
+ self % other
+ } else if let Some(other) = other.to_i32() {
+ self % other
+ } else {
+ let (_, r) = self.div_rem(other);
+ r
+ }
+ }
+}
+
+impl RemAssign<&BigInt> for BigInt {
+ #[inline]
+ fn rem_assign(&mut self, other: &BigInt) {
+ *self = &*self % other;
+ }
+}
+forward_val_assign!(impl RemAssign for BigInt, rem_assign);
+
+promote_all_scalars!(impl Rem for BigInt, rem);
+promote_all_scalars_assign!(impl RemAssign for BigInt, rem_assign);
+forward_all_scalar_binop_to_val_val!(impl Rem<u32> for BigInt, rem);
+forward_all_scalar_binop_to_val_val!(impl Rem<u64> for BigInt, rem);
+forward_all_scalar_binop_to_val_val!(impl Rem<u128> for BigInt, rem);
+
+impl Rem<u32> for BigInt {
+ type Output = BigInt;
+
+ #[inline]
+ fn rem(self, other: u32) -> BigInt {
+ BigInt::from_biguint(self.sign, self.data % other)
+ }
+}
+
+impl RemAssign<u32> for BigInt {
+ #[inline]
+ fn rem_assign(&mut self, other: u32) {
+ self.data %= other;
+ if self.data.is_zero() {
+ self.sign = NoSign;
+ }
+ }
+}
+
+impl Rem<BigInt> for u32 {
+ type Output = BigInt;
+
+ #[inline]
+ fn rem(self, other: BigInt) -> BigInt {
+ BigInt::from(self % other.data)
+ }
+}
+
+impl Rem<u64> for BigInt {
+ type Output = BigInt;
+
+ #[inline]
+ fn rem(self, other: u64) -> BigInt {
+ BigInt::from_biguint(self.sign, self.data % other)
+ }
+}
+
+impl RemAssign<u64> for BigInt {
+ #[inline]
+ fn rem_assign(&mut self, other: u64) {
+ self.data %= other;
+ if self.data.is_zero() {
+ self.sign = NoSign;
+ }
+ }
+}
+
+impl Rem<BigInt> for u64 {
+ type Output = BigInt;
+
+ #[inline]
+ fn rem(self, other: BigInt) -> BigInt {
+ BigInt::from(self % other.data)
+ }
+}
+
+impl Rem<u128> for BigInt {
+ type Output = BigInt;
+
+ #[inline]
+ fn rem(self, other: u128) -> BigInt {
+ BigInt::from_biguint(self.sign, self.data % other)
+ }
+}
+
+impl RemAssign<u128> for BigInt {
+ #[inline]
+ fn rem_assign(&mut self, other: u128) {
+ self.data %= other;
+ if self.data.is_zero() {
+ self.sign = NoSign;
+ }
+ }
+}
+
+impl Rem<BigInt> for u128 {
+ type Output = BigInt;
+
+ #[inline]
+ fn rem(self, other: BigInt) -> BigInt {
+ BigInt::from(self % other.data)
+ }
+}
+
+forward_all_scalar_binop_to_val_val!(impl Rem<i32> for BigInt, rem);
+forward_all_scalar_binop_to_val_val!(impl Rem<i64> for BigInt, rem);
+forward_all_scalar_binop_to_val_val!(impl Rem<i128> for BigInt, rem);
+
+impl Rem<i32> for BigInt {
+ type Output = BigInt;
+
+ #[inline]
+ fn rem(self, other: i32) -> BigInt {
+ self % other.uabs()
+ }
+}
+
+impl RemAssign<i32> for BigInt {
+ #[inline]
+ fn rem_assign(&mut self, other: i32) {
+ *self %= other.uabs();
+ }
+}
+
+impl Rem<BigInt> for i32 {
+ type Output = BigInt;
+
+ #[inline]
+ fn rem(self, other: BigInt) -> BigInt {
+ match self.checked_uabs() {
+ Positive(u) => u % other,
+ Negative(u) => -(u % other),
+ }
+ }
+}
+
+impl Rem<i64> for BigInt {
+ type Output = BigInt;
+
+ #[inline]
+ fn rem(self, other: i64) -> BigInt {
+ self % other.uabs()
+ }
+}
+
+impl RemAssign<i64> for BigInt {
+ #[inline]
+ fn rem_assign(&mut self, other: i64) {
+ *self %= other.uabs();
+ }
+}
+
+impl Rem<BigInt> for i64 {
+ type Output = BigInt;
+
+ #[inline]
+ fn rem(self, other: BigInt) -> BigInt {
+ match self.checked_uabs() {
+ Positive(u) => u % other,
+ Negative(u) => -(u % other),
+ }
+ }
+}
+
+impl Rem<i128> for BigInt {
+ type Output = BigInt;
+
+ #[inline]
+ fn rem(self, other: i128) -> BigInt {
+ self % other.uabs()
+ }
+}
+
+impl RemAssign<i128> for BigInt {
+ #[inline]
+ fn rem_assign(&mut self, other: i128) {
+ *self %= other.uabs();
+ }
+}
+
+impl Rem<BigInt> for i128 {
+ type Output = BigInt;
+
+ #[inline]
+ fn rem(self, other: BigInt) -> BigInt {
+ match self.checked_uabs() {
+ Positive(u) => u % other,
+ Negative(u) => -(u % other),
+ }
+ }
+}
+
+impl CheckedDiv for BigInt {
+ #[inline]
+ fn checked_div(&self, v: &BigInt) -> Option<BigInt> {
+ if v.is_zero() {
+ return None;
+ }
+ Some(self.div(v))
+ }
+}
+
+impl CheckedEuclid for BigInt {
+ #[inline]
+ fn checked_div_euclid(&self, v: &BigInt) -> Option<BigInt> {
+ if v.is_zero() {
+ return None;
+ }
+ Some(self.div_euclid(v))
+ }
+
+ #[inline]
+ fn checked_rem_euclid(&self, v: &BigInt) -> Option<BigInt> {
+ if v.is_zero() {
+ return None;
+ }
+ Some(self.rem_euclid(v))
+ }
+}
+
+impl Euclid for BigInt {
+ #[inline]
+ fn div_euclid(&self, v: &BigInt) -> BigInt {
+ let (q, r) = self.div_rem(v);
+ if r.is_negative() {
+ if v.is_positive() {
+ q - 1
+ } else {
+ q + 1
+ }
+ } else {
+ q
+ }
+ }
+
+ #[inline]
+ fn rem_euclid(&self, v: &BigInt) -> BigInt {
+ let r = self % v;
+ if r.is_negative() {
+ if v.is_positive() {
+ r + v
+ } else {
+ r - v
+ }
+ } else {
+ r
+ }
+ }
+}
diff --git a/rust/vendor/num-bigint/src/bigint/multiplication.rs b/rust/vendor/num-bigint/src/bigint/multiplication.rs
new file mode 100644
index 0000000..82e64c2
--- /dev/null
+++ b/rust/vendor/num-bigint/src/bigint/multiplication.rs
@@ -0,0 +1,217 @@
+use super::CheckedUnsignedAbs::{Negative, Positive};
+use super::Sign::{self, Minus, NoSign, Plus};
+use super::{BigInt, UnsignedAbs};
+
+use crate::{IsizePromotion, UsizePromotion};
+
+use core::iter::Product;
+use core::ops::{Mul, MulAssign};
+use num_traits::{CheckedMul, One, Zero};
+
+impl Mul<Sign> for Sign {
+ type Output = Sign;
+
+ #[inline]
+ fn mul(self, other: Sign) -> Sign {
+ match (self, other) {
+ (NoSign, _) | (_, NoSign) => NoSign,
+ (Plus, Plus) | (Minus, Minus) => Plus,
+ (Plus, Minus) | (Minus, Plus) => Minus,
+ }
+ }
+}
+
+macro_rules! impl_mul {
+ ($(impl Mul<$Other:ty> for $Self:ty;)*) => {$(
+ impl Mul<$Other> for $Self {
+ type Output = BigInt;
+
+ #[inline]
+ fn mul(self, other: $Other) -> BigInt {
+ // automatically match value/ref
+ let BigInt { data: x, .. } = self;
+ let BigInt { data: y, .. } = other;
+ BigInt::from_biguint(self.sign * other.sign, x * y)
+ }
+ }
+ )*}
+}
+impl_mul! {
+ impl Mul<BigInt> for BigInt;
+ impl Mul<BigInt> for &BigInt;
+ impl Mul<&BigInt> for BigInt;
+ impl Mul<&BigInt> for &BigInt;
+}
+
+macro_rules! impl_mul_assign {
+ ($(impl MulAssign<$Other:ty> for BigInt;)*) => {$(
+ impl MulAssign<$Other> for BigInt {
+ #[inline]
+ fn mul_assign(&mut self, other: $Other) {
+ // automatically match value/ref
+ let BigInt { data: y, .. } = other;
+ self.data *= y;
+ if self.data.is_zero() {
+ self.sign = NoSign;
+ } else {
+ self.sign = self.sign * other.sign;
+ }
+ }
+ }
+ )*}
+}
+impl_mul_assign! {
+ impl MulAssign<BigInt> for BigInt;
+ impl MulAssign<&BigInt> for BigInt;
+}
+
+promote_all_scalars!(impl Mul for BigInt, mul);
+promote_all_scalars_assign!(impl MulAssign for BigInt, mul_assign);
+forward_all_scalar_binop_to_val_val_commutative!(impl Mul<u32> for BigInt, mul);
+forward_all_scalar_binop_to_val_val_commutative!(impl Mul<u64> for BigInt, mul);
+forward_all_scalar_binop_to_val_val_commutative!(impl Mul<u128> for BigInt, mul);
+
+impl Mul<u32> for BigInt {
+ type Output = BigInt;
+
+ #[inline]
+ fn mul(self, other: u32) -> BigInt {
+ BigInt::from_biguint(self.sign, self.data * other)
+ }
+}
+
+impl MulAssign<u32> for BigInt {
+ #[inline]
+ fn mul_assign(&mut self, other: u32) {
+ self.data *= other;
+ if self.data.is_zero() {
+ self.sign = NoSign;
+ }
+ }
+}
+
+impl Mul<u64> for BigInt {
+ type Output = BigInt;
+
+ #[inline]
+ fn mul(self, other: u64) -> BigInt {
+ BigInt::from_biguint(self.sign, self.data * other)
+ }
+}
+
+impl MulAssign<u64> for BigInt {
+ #[inline]
+ fn mul_assign(&mut self, other: u64) {
+ self.data *= other;
+ if self.data.is_zero() {
+ self.sign = NoSign;
+ }
+ }
+}
+
+impl Mul<u128> for BigInt {
+ type Output = BigInt;
+
+ #[inline]
+ fn mul(self, other: u128) -> BigInt {
+ BigInt::from_biguint(self.sign, self.data * other)
+ }
+}
+
+impl MulAssign<u128> for BigInt {
+ #[inline]
+ fn mul_assign(&mut self, other: u128) {
+ self.data *= other;
+ if self.data.is_zero() {
+ self.sign = NoSign;
+ }
+ }
+}
+
+forward_all_scalar_binop_to_val_val_commutative!(impl Mul<i32> for BigInt, mul);
+forward_all_scalar_binop_to_val_val_commutative!(impl Mul<i64> for BigInt, mul);
+forward_all_scalar_binop_to_val_val_commutative!(impl Mul<i128> for BigInt, mul);
+
+impl Mul<i32> for BigInt {
+ type Output = BigInt;
+
+ #[inline]
+ fn mul(self, other: i32) -> BigInt {
+ match other.checked_uabs() {
+ Positive(u) => self * u,
+ Negative(u) => -self * u,
+ }
+ }
+}
+
+impl MulAssign<i32> for BigInt {
+ #[inline]
+ fn mul_assign(&mut self, other: i32) {
+ match other.checked_uabs() {
+ Positive(u) => *self *= u,
+ Negative(u) => {
+ self.sign = -self.sign;
+ self.data *= u;
+ }
+ }
+ }
+}
+
+impl Mul<i64> for BigInt {
+ type Output = BigInt;
+
+ #[inline]
+ fn mul(self, other: i64) -> BigInt {
+ match other.checked_uabs() {
+ Positive(u) => self * u,
+ Negative(u) => -self * u,
+ }
+ }
+}
+
+impl MulAssign<i64> for BigInt {
+ #[inline]
+ fn mul_assign(&mut self, other: i64) {
+ match other.checked_uabs() {
+ Positive(u) => *self *= u,
+ Negative(u) => {
+ self.sign = -self.sign;
+ self.data *= u;
+ }
+ }
+ }
+}
+
+impl Mul<i128> for BigInt {
+ type Output = BigInt;
+
+ #[inline]
+ fn mul(self, other: i128) -> BigInt {
+ match other.checked_uabs() {
+ Positive(u) => self * u,
+ Negative(u) => -self * u,
+ }
+ }
+}
+
+impl MulAssign<i128> for BigInt {
+ #[inline]
+ fn mul_assign(&mut self, other: i128) {
+ match other.checked_uabs() {
+ Positive(u) => *self *= u,
+ Negative(u) => {
+ self.sign = -self.sign;
+ self.data *= u;
+ }
+ }
+ }
+}
+
+impl CheckedMul for BigInt {
+ #[inline]
+ fn checked_mul(&self, v: &BigInt) -> Option<BigInt> {
+ Some(self.mul(v))
+ }
+}
+
+impl_product_iter_type!(BigInt);
diff --git a/rust/vendor/num-bigint/src/bigint/power.rs b/rust/vendor/num-bigint/src/bigint/power.rs
new file mode 100644
index 0000000..4b41f4f
--- /dev/null
+++ b/rust/vendor/num-bigint/src/bigint/power.rs
@@ -0,0 +1,94 @@
+use super::BigInt;
+use super::Sign::{self, Minus, Plus};
+
+use crate::BigUint;
+
+use num_integer::Integer;
+use num_traits::{Pow, Signed, Zero};
+
+/// Help function for pow
+///
+/// Computes the effect of the exponent on the sign.
+#[inline]
+fn powsign<T: Integer>(sign: Sign, other: &T) -> Sign {
+ if other.is_zero() {
+ Plus
+ } else if sign != Minus || other.is_odd() {
+ sign
+ } else {
+ -sign
+ }
+}
+
+macro_rules! pow_impl {
+ ($T:ty) => {
+ impl Pow<$T> for BigInt {
+ type Output = BigInt;
+
+ #[inline]
+ fn pow(self, rhs: $T) -> BigInt {
+ BigInt::from_biguint(powsign(self.sign, &rhs), self.data.pow(rhs))
+ }
+ }
+
+ impl Pow<&$T> for BigInt {
+ type Output = BigInt;
+
+ #[inline]
+ fn pow(self, rhs: &$T) -> BigInt {
+ BigInt::from_biguint(powsign(self.sign, rhs), self.data.pow(rhs))
+ }
+ }
+
+ impl Pow<$T> for &BigInt {
+ type Output = BigInt;
+
+ #[inline]
+ fn pow(self, rhs: $T) -> BigInt {
+ BigInt::from_biguint(powsign(self.sign, &rhs), Pow::pow(&self.data, rhs))
+ }
+ }
+
+ impl Pow<&$T> for &BigInt {
+ type Output = BigInt;
+
+ #[inline]
+ fn pow(self, rhs: &$T) -> BigInt {
+ BigInt::from_biguint(powsign(self.sign, rhs), Pow::pow(&self.data, rhs))
+ }
+ }
+ };
+}
+
+pow_impl!(u8);
+pow_impl!(u16);
+pow_impl!(u32);
+pow_impl!(u64);
+pow_impl!(usize);
+pow_impl!(u128);
+pow_impl!(BigUint);
+
+pub(super) fn modpow(x: &BigInt, exponent: &BigInt, modulus: &BigInt) -> BigInt {
+ assert!(
+ !exponent.is_negative(),
+ "negative exponentiation is not supported!"
+ );
+ assert!(
+ !modulus.is_zero(),
+ "attempt to calculate with zero modulus!"
+ );
+
+ let result = x.data.modpow(&exponent.data, &modulus.data);
+ if result.is_zero() {
+ return BigInt::zero();
+ }
+
+ // The sign of the result follows the modulus, like `mod_floor`.
+ let (sign, mag) = match (x.is_negative() && exponent.is_odd(), modulus.is_negative()) {
+ (false, false) => (Plus, result),
+ (true, false) => (Plus, &modulus.data - result),
+ (false, true) => (Minus, &modulus.data - result),
+ (true, true) => (Minus, result),
+ };
+ BigInt::from_biguint(sign, mag)
+}
diff --git a/rust/vendor/num-bigint/src/bigint/serde.rs b/rust/vendor/num-bigint/src/bigint/serde.rs
new file mode 100644
index 0000000..5c232f9
--- /dev/null
+++ b/rust/vendor/num-bigint/src/bigint/serde.rs
@@ -0,0 +1,58 @@
+use super::{BigInt, Sign};
+
+use serde::de::{Error, Unexpected};
+use serde::{Deserialize, Deserializer, Serialize, Serializer};
+
+impl Serialize for Sign {
+ fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
+ where
+ S: Serializer,
+ {
+ // Note: do not change the serialization format, or it may break
+ // forward and backward compatibility of serialized data!
+ match *self {
+ Sign::Minus => (-1i8).serialize(serializer),
+ Sign::NoSign => 0i8.serialize(serializer),
+ Sign::Plus => 1i8.serialize(serializer),
+ }
+ }
+}
+
+impl<'de> Deserialize<'de> for Sign {
+ fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
+ where
+ D: Deserializer<'de>,
+ {
+ let sign = i8::deserialize(deserializer)?;
+ match sign {
+ -1 => Ok(Sign::Minus),
+ 0 => Ok(Sign::NoSign),
+ 1 => Ok(Sign::Plus),
+ _ => Err(D::Error::invalid_value(
+ Unexpected::Signed(sign.into()),
+ &"a sign of -1, 0, or 1",
+ )),
+ }
+ }
+}
+
+impl Serialize for BigInt {
+ fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
+ where
+ S: Serializer,
+ {
+ // Note: do not change the serialization format, or it may break
+ // forward and backward compatibility of serialized data!
+ (self.sign, &self.data).serialize(serializer)
+ }
+}
+
+impl<'de> Deserialize<'de> for BigInt {
+ fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
+ where
+ D: Deserializer<'de>,
+ {
+ let (sign, data) = Deserialize::deserialize(deserializer)?;
+ Ok(BigInt::from_biguint(sign, data))
+ }
+}
diff --git a/rust/vendor/num-bigint/src/bigint/shift.rs b/rust/vendor/num-bigint/src/bigint/shift.rs
new file mode 100644
index 0000000..22bb744
--- /dev/null
+++ b/rust/vendor/num-bigint/src/bigint/shift.rs
@@ -0,0 +1,107 @@
+use super::BigInt;
+use super::Sign::NoSign;
+
+use core::ops::{Shl, ShlAssign, Shr, ShrAssign};
+use num_traits::{PrimInt, Signed, Zero};
+
+macro_rules! impl_shift {
+ (@ref $Shx:ident :: $shx:ident, $ShxAssign:ident :: $shx_assign:ident, $rhs:ty) => {
+ impl $Shx<&$rhs> for BigInt {
+ type Output = BigInt;
+
+ #[inline]
+ fn $shx(self, rhs: &$rhs) -> BigInt {
+ $Shx::$shx(self, *rhs)
+ }
+ }
+ impl $Shx<&$rhs> for &BigInt {
+ type Output = BigInt;
+
+ #[inline]
+ fn $shx(self, rhs: &$rhs) -> BigInt {
+ $Shx::$shx(self, *rhs)
+ }
+ }
+ impl $ShxAssign<&$rhs> for BigInt {
+ #[inline]
+ fn $shx_assign(&mut self, rhs: &$rhs) {
+ $ShxAssign::$shx_assign(self, *rhs);
+ }
+ }
+ };
+ ($($rhs:ty),+) => {$(
+ impl Shl<$rhs> for BigInt {
+ type Output = BigInt;
+
+ #[inline]
+ fn shl(self, rhs: $rhs) -> BigInt {
+ BigInt::from_biguint(self.sign, self.data << rhs)
+ }
+ }
+ impl Shl<$rhs> for &BigInt {
+ type Output = BigInt;
+
+ #[inline]
+ fn shl(self, rhs: $rhs) -> BigInt {
+ BigInt::from_biguint(self.sign, &self.data << rhs)
+ }
+ }
+ impl ShlAssign<$rhs> for BigInt {
+ #[inline]
+ fn shl_assign(&mut self, rhs: $rhs) {
+ self.data <<= rhs
+ }
+ }
+ impl_shift! { @ref Shl::shl, ShlAssign::shl_assign, $rhs }
+
+ impl Shr<$rhs> for BigInt {
+ type Output = BigInt;
+
+ #[inline]
+ fn shr(self, rhs: $rhs) -> BigInt {
+ let round_down = shr_round_down(&self, rhs);
+ let data = self.data >> rhs;
+ let data = if round_down { data + 1u8 } else { data };
+ BigInt::from_biguint(self.sign, data)
+ }
+ }
+ impl Shr<$rhs> for &BigInt {
+ type Output = BigInt;
+
+ #[inline]
+ fn shr(self, rhs: $rhs) -> BigInt {
+ let round_down = shr_round_down(self, rhs);
+ let data = &self.data >> rhs;
+ let data = if round_down { data + 1u8 } else { data };
+ BigInt::from_biguint(self.sign, data)
+ }
+ }
+ impl ShrAssign<$rhs> for BigInt {
+ #[inline]
+ fn shr_assign(&mut self, rhs: $rhs) {
+ let round_down = shr_round_down(self, rhs);
+ self.data >>= rhs;
+ if round_down {
+ self.data += 1u8;
+ } else if self.data.is_zero() {
+ self.sign = NoSign;
+ }
+ }
+ }
+ impl_shift! { @ref Shr::shr, ShrAssign::shr_assign, $rhs }
+ )*};
+}
+
+impl_shift! { u8, u16, u32, u64, u128, usize }
+impl_shift! { i8, i16, i32, i64, i128, isize }
+
+// Negative values need a rounding adjustment if there are any ones in the
+// bits that are getting shifted out.
+fn shr_round_down<T: PrimInt>(i: &BigInt, shift: T) -> bool {
+ if i.is_negative() {
+ let zeros = i.trailing_zeros().expect("negative values are non-zero");
+ shift > T::zero() && shift.to_u64().map(|shift| zeros < shift).unwrap_or(true)
+ } else {
+ false
+ }
+}
diff --git a/rust/vendor/num-bigint/src/bigint/subtraction.rs b/rust/vendor/num-bigint/src/bigint/subtraction.rs
new file mode 100644
index 0000000..548f314
--- /dev/null
+++ b/rust/vendor/num-bigint/src/bigint/subtraction.rs
@@ -0,0 +1,300 @@
+use super::CheckedUnsignedAbs::{Negative, Positive};
+use super::Sign::{Minus, NoSign, Plus};
+use super::{BigInt, UnsignedAbs};
+
+use crate::{IsizePromotion, UsizePromotion};
+
+use core::cmp::Ordering::{Equal, Greater, Less};
+use core::mem;
+use core::ops::{Sub, SubAssign};
+use num_traits::{CheckedSub, Zero};
+
+// We want to forward to BigUint::sub, but it's not clear how that will go until
+// we compare both sign and magnitude. So we duplicate this body for every
+// val/ref combination, deferring that decision to BigUint's own forwarding.
+macro_rules! bigint_sub {
+ ($a:expr, $a_owned:expr, $a_data:expr, $b:expr, $b_owned:expr, $b_data:expr) => {
+ match ($a.sign, $b.sign) {
+ (_, NoSign) => $a_owned,
+ (NoSign, _) => -$b_owned,
+ // opposite signs => keep the sign of the left with the sum of magnitudes
+ (Plus, Minus) | (Minus, Plus) => BigInt::from_biguint($a.sign, $a_data + $b_data),
+ // same sign => keep or toggle the sign of the left with the difference of magnitudes
+ (Plus, Plus) | (Minus, Minus) => match $a.data.cmp(&$b.data) {
+ Less => BigInt::from_biguint(-$a.sign, $b_data - $a_data),
+ Greater => BigInt::from_biguint($a.sign, $a_data - $b_data),
+ Equal => Zero::zero(),
+ },
+ }
+ };
+}
+
+impl Sub<&BigInt> for &BigInt {
+ type Output = BigInt;
+
+ #[inline]
+ fn sub(self, other: &BigInt) -> BigInt {
+ bigint_sub!(
+ self,
+ self.clone(),
+ &self.data,
+ other,
+ other.clone(),
+ &other.data
+ )
+ }
+}
+
+impl Sub<BigInt> for &BigInt {
+ type Output = BigInt;
+
+ #[inline]
+ fn sub(self, other: BigInt) -> BigInt {
+ bigint_sub!(self, self.clone(), &self.data, other, other, other.data)
+ }
+}
+
+impl Sub<&BigInt> for BigInt {
+ type Output = BigInt;
+
+ #[inline]
+ fn sub(self, other: &BigInt) -> BigInt {
+ bigint_sub!(self, self, self.data, other, other.clone(), &other.data)
+ }
+}
+
+impl Sub<BigInt> for BigInt {
+ type Output = BigInt;
+
+ #[inline]
+ fn sub(self, other: BigInt) -> BigInt {
+ bigint_sub!(self, self, self.data, other, other, other.data)
+ }
+}
+
+impl SubAssign<&BigInt> for BigInt {
+ #[inline]
+ fn sub_assign(&mut self, other: &BigInt) {
+ let n = mem::replace(self, BigInt::zero());
+ *self = n - other;
+ }
+}
+forward_val_assign!(impl SubAssign for BigInt, sub_assign);
+
+promote_all_scalars!(impl Sub for BigInt, sub);
+promote_all_scalars_assign!(impl SubAssign for BigInt, sub_assign);
+forward_all_scalar_binop_to_val_val!(impl Sub<u32> for BigInt, sub);
+forward_all_scalar_binop_to_val_val!(impl Sub<u64> for BigInt, sub);
+forward_all_scalar_binop_to_val_val!(impl Sub<u128> for BigInt, sub);
+
+impl Sub<u32> for BigInt {
+ type Output = BigInt;
+
+ #[inline]
+ fn sub(self, other: u32) -> BigInt {
+ match self.sign {
+ NoSign => -BigInt::from(other),
+ Minus => -BigInt::from(self.data + other),
+ Plus => match self.data.cmp(&From::from(other)) {
+ Equal => Zero::zero(),
+ Greater => BigInt::from(self.data - other),
+ Less => -BigInt::from(other - self.data),
+ },
+ }
+ }
+}
+impl SubAssign<u32> for BigInt {
+ #[inline]
+ fn sub_assign(&mut self, other: u32) {
+ let n = mem::replace(self, BigInt::zero());
+ *self = n - other;
+ }
+}
+
+impl Sub<BigInt> for u32 {
+ type Output = BigInt;
+
+ #[inline]
+ fn sub(self, other: BigInt) -> BigInt {
+ -(other - self)
+ }
+}
+
+impl Sub<BigInt> for u64 {
+ type Output = BigInt;
+
+ #[inline]
+ fn sub(self, other: BigInt) -> BigInt {
+ -(other - self)
+ }
+}
+
+impl Sub<BigInt> for u128 {
+ type Output = BigInt;
+
+ #[inline]
+ fn sub(self, other: BigInt) -> BigInt {
+ -(other - self)
+ }
+}
+
+impl Sub<u64> for BigInt {
+ type Output = BigInt;
+
+ #[inline]
+ fn sub(self, other: u64) -> BigInt {
+ match self.sign {
+ NoSign => -BigInt::from(other),
+ Minus => -BigInt::from(self.data + other),
+ Plus => match self.data.cmp(&From::from(other)) {
+ Equal => Zero::zero(),
+ Greater => BigInt::from(self.data - other),
+ Less => -BigInt::from(other - self.data),
+ },
+ }
+ }
+}
+
+impl SubAssign<u64> for BigInt {
+ #[inline]
+ fn sub_assign(&mut self, other: u64) {
+ let n = mem::replace(self, BigInt::zero());
+ *self = n - other;
+ }
+}
+
+impl Sub<u128> for BigInt {
+ type Output = BigInt;
+
+ #[inline]
+ fn sub(self, other: u128) -> BigInt {
+ match self.sign {
+ NoSign => -BigInt::from(other),
+ Minus => -BigInt::from(self.data + other),
+ Plus => match self.data.cmp(&From::from(other)) {
+ Equal => Zero::zero(),
+ Greater => BigInt::from(self.data - other),
+ Less => -BigInt::from(other - self.data),
+ },
+ }
+ }
+}
+
+impl SubAssign<u128> for BigInt {
+ #[inline]
+ fn sub_assign(&mut self, other: u128) {
+ let n = mem::replace(self, BigInt::zero());
+ *self = n - other;
+ }
+}
+
+forward_all_scalar_binop_to_val_val!(impl Sub<i32> for BigInt, sub);
+forward_all_scalar_binop_to_val_val!(impl Sub<i64> for BigInt, sub);
+forward_all_scalar_binop_to_val_val!(impl Sub<i128> for BigInt, sub);
+
+impl Sub<i32> for BigInt {
+ type Output = BigInt;
+
+ #[inline]
+ fn sub(self, other: i32) -> BigInt {
+ match other.checked_uabs() {
+ Positive(u) => self - u,
+ Negative(u) => self + u,
+ }
+ }
+}
+impl SubAssign<i32> for BigInt {
+ #[inline]
+ fn sub_assign(&mut self, other: i32) {
+ match other.checked_uabs() {
+ Positive(u) => *self -= u,
+ Negative(u) => *self += u,
+ }
+ }
+}
+
+impl Sub<BigInt> for i32 {
+ type Output = BigInt;
+
+ #[inline]
+ fn sub(self, other: BigInt) -> BigInt {
+ match self.checked_uabs() {
+ Positive(u) => u - other,
+ Negative(u) => -other - u,
+ }
+ }
+}
+
+impl Sub<i64> for BigInt {
+ type Output = BigInt;
+
+ #[inline]
+ fn sub(self, other: i64) -> BigInt {
+ match other.checked_uabs() {
+ Positive(u) => self - u,
+ Negative(u) => self + u,
+ }
+ }
+}
+impl SubAssign<i64> for BigInt {
+ #[inline]
+ fn sub_assign(&mut self, other: i64) {
+ match other.checked_uabs() {
+ Positive(u) => *self -= u,
+ Negative(u) => *self += u,
+ }
+ }
+}
+
+impl Sub<BigInt> for i64 {
+ type Output = BigInt;
+
+ #[inline]
+ fn sub(self, other: BigInt) -> BigInt {
+ match self.checked_uabs() {
+ Positive(u) => u - other,
+ Negative(u) => -other - u,
+ }
+ }
+}
+
+impl Sub<i128> for BigInt {
+ type Output = BigInt;
+
+ #[inline]
+ fn sub(self, other: i128) -> BigInt {
+ match other.checked_uabs() {
+ Positive(u) => self - u,
+ Negative(u) => self + u,
+ }
+ }
+}
+
+impl SubAssign<i128> for BigInt {
+ #[inline]
+ fn sub_assign(&mut self, other: i128) {
+ match other.checked_uabs() {
+ Positive(u) => *self -= u,
+ Negative(u) => *self += u,
+ }
+ }
+}
+
+impl Sub<BigInt> for i128 {
+ type Output = BigInt;
+
+ #[inline]
+ fn sub(self, other: BigInt) -> BigInt {
+ match self.checked_uabs() {
+ Positive(u) => u - other,
+ Negative(u) => -other - u,
+ }
+ }
+}
+
+impl CheckedSub for BigInt {
+ #[inline]
+ fn checked_sub(&self, v: &BigInt) -> Option<BigInt> {
+ Some(self.sub(v))
+ }
+}
diff --git a/rust/vendor/num-bigint/src/bigrand.rs b/rust/vendor/num-bigint/src/bigrand.rs
new file mode 100644
index 0000000..ec03224
--- /dev/null
+++ b/rust/vendor/num-bigint/src/bigrand.rs
@@ -0,0 +1,283 @@
+//! Randomization of big integers
+
+use rand::distributions::uniform::{SampleBorrow, SampleUniform, UniformSampler};
+use rand::prelude::*;
+
+use crate::BigInt;
+use crate::BigUint;
+use crate::Sign::*;
+
+use crate::biguint::biguint_from_vec;
+
+use num_integer::Integer;
+use num_traits::{ToPrimitive, Zero};
+
+/// A trait for sampling random big integers.
+///
+/// The `rand` feature must be enabled to use this. See crate-level documentation for details.
+pub trait RandBigInt {
+ /// Generate a random [`BigUint`] of the given bit size.
+ fn gen_biguint(&mut self, bit_size: u64) -> BigUint;
+
+ /// Generate a random [ BigInt`] of the given bit size.
+ fn gen_bigint(&mut self, bit_size: u64) -> BigInt;
+
+ /// Generate a random [`BigUint`] less than the given bound. Fails
+ /// when the bound is zero.
+ fn gen_biguint_below(&mut self, bound: &BigUint) -> BigUint;
+
+ /// Generate a random [`BigUint`] within the given range. The lower
+ /// bound is inclusive; the upper bound is exclusive. Fails when
+ /// the upper bound is not greater than the lower bound.
+ fn gen_biguint_range(&mut self, lbound: &BigUint, ubound: &BigUint) -> BigUint;
+
+ /// Generate a random [`BigInt`] within the given range. The lower
+ /// bound is inclusive; the upper bound is exclusive. Fails when
+ /// the upper bound is not greater than the lower bound.
+ fn gen_bigint_range(&mut self, lbound: &BigInt, ubound: &BigInt) -> BigInt;
+}
+
+fn gen_bits<R: Rng + ?Sized>(rng: &mut R, data: &mut [u32], rem: u64) {
+ // `fill` is faster than many `gen::<u32>` calls
+ rng.fill(data);
+ if rem > 0 {
+ let last = data.len() - 1;
+ data[last] >>= 32 - rem;
+ }
+}
+
+impl<R: Rng + ?Sized> RandBigInt for R {
+ #[cfg(not(u64_digit))]
+ fn gen_biguint(&mut self, bit_size: u64) -> BigUint {
+ let (digits, rem) = bit_size.div_rem(&32);
+ let len = (digits + (rem > 0) as u64)
+ .to_usize()
+ .expect("capacity overflow");
+ let mut data = vec![0u32; len];
+ gen_bits(self, &mut data, rem);
+ biguint_from_vec(data)
+ }
+
+ #[cfg(u64_digit)]
+ fn gen_biguint(&mut self, bit_size: u64) -> BigUint {
+ use core::slice;
+
+ let (digits, rem) = bit_size.div_rem(&32);
+ let len = (digits + (rem > 0) as u64)
+ .to_usize()
+ .expect("capacity overflow");
+ let native_digits = Integer::div_ceil(&bit_size, &64);
+ let native_len = native_digits.to_usize().expect("capacity overflow");
+ let mut data = vec![0u64; native_len];
+ unsafe {
+ // Generate bits in a `&mut [u32]` slice for value stability
+ let ptr = data.as_mut_ptr() as *mut u32;
+ debug_assert!(native_len * 2 >= len);
+ let data = slice::from_raw_parts_mut(ptr, len);
+ gen_bits(self, data, rem);
+ }
+ #[cfg(target_endian = "big")]
+ for digit in &mut data {
+ // swap u32 digits into u64 endianness
+ *digit = (*digit << 32) | (*digit >> 32);
+ }
+ biguint_from_vec(data)
+ }
+
+ fn gen_bigint(&mut self, bit_size: u64) -> BigInt {
+ loop {
+ // Generate a random BigUint...
+ let biguint = self.gen_biguint(bit_size);
+ // ...and then randomly assign it a Sign...
+ let sign = if biguint.is_zero() {
+ // ...except that if the BigUint is zero, we need to try
+ // again with probability 0.5. This is because otherwise,
+ // the probability of generating a zero BigInt would be
+ // double that of any other number.
+ if self.gen() {
+ continue;
+ } else {
+ NoSign
+ }
+ } else if self.gen() {
+ Plus
+ } else {
+ Minus
+ };
+ return BigInt::from_biguint(sign, biguint);
+ }
+ }
+
+ fn gen_biguint_below(&mut self, bound: &BigUint) -> BigUint {
+ assert!(!bound.is_zero());
+ let bits = bound.bits();
+ loop {
+ let n = self.gen_biguint(bits);
+ if n < *bound {
+ return n;
+ }
+ }
+ }
+
+ fn gen_biguint_range(&mut self, lbound: &BigUint, ubound: &BigUint) -> BigUint {
+ assert!(*lbound < *ubound);
+ if lbound.is_zero() {
+ self.gen_biguint_below(ubound)
+ } else {
+ lbound + self.gen_biguint_below(&(ubound - lbound))
+ }
+ }
+
+ fn gen_bigint_range(&mut self, lbound: &BigInt, ubound: &BigInt) -> BigInt {
+ assert!(*lbound < *ubound);
+ if lbound.is_zero() {
+ BigInt::from(self.gen_biguint_below(ubound.magnitude()))
+ } else if ubound.is_zero() {
+ lbound + BigInt::from(self.gen_biguint_below(lbound.magnitude()))
+ } else {
+ let delta = ubound - lbound;
+ lbound + BigInt::from(self.gen_biguint_below(delta.magnitude()))
+ }
+ }
+}
+
+/// The back-end implementing rand's [`UniformSampler`] for [`BigUint`].
+#[derive(Clone, Debug)]
+pub struct UniformBigUint {
+ base: BigUint,
+ len: BigUint,
+}
+
+impl UniformSampler for UniformBigUint {
+ type X = BigUint;
+
+ #[inline]
+ fn new<B1, B2>(low_b: B1, high_b: B2) -> Self
+ where
+ B1: SampleBorrow<Self::X> + Sized,
+ B2: SampleBorrow<Self::X> + Sized,
+ {
+ let low = low_b.borrow();
+ let high = high_b.borrow();
+ assert!(low < high);
+ UniformBigUint {
+ len: high - low,
+ base: low.clone(),
+ }
+ }
+
+ #[inline]
+ fn new_inclusive<B1, B2>(low_b: B1, high_b: B2) -> Self
+ where
+ B1: SampleBorrow<Self::X> + Sized,
+ B2: SampleBorrow<Self::X> + Sized,
+ {
+ let low = low_b.borrow();
+ let high = high_b.borrow();
+ assert!(low <= high);
+ Self::new(low, high + 1u32)
+ }
+
+ #[inline]
+ fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> Self::X {
+ &self.base + rng.gen_biguint_below(&self.len)
+ }
+
+ #[inline]
+ fn sample_single<R: Rng + ?Sized, B1, B2>(low: B1, high: B2, rng: &mut R) -> Self::X
+ where
+ B1: SampleBorrow<Self::X> + Sized,
+ B2: SampleBorrow<Self::X> + Sized,
+ {
+ rng.gen_biguint_range(low.borrow(), high.borrow())
+ }
+}
+
+impl SampleUniform for BigUint {
+ type Sampler = UniformBigUint;
+}
+
+/// The back-end implementing rand's [`UniformSampler`] for [`BigInt`].
+#[derive(Clone, Debug)]
+pub struct UniformBigInt {
+ base: BigInt,
+ len: BigUint,
+}
+
+impl UniformSampler for UniformBigInt {
+ type X = BigInt;
+
+ #[inline]
+ fn new<B1, B2>(low_b: B1, high_b: B2) -> Self
+ where
+ B1: SampleBorrow<Self::X> + Sized,
+ B2: SampleBorrow<Self::X> + Sized,
+ {
+ let low = low_b.borrow();
+ let high = high_b.borrow();
+ assert!(low < high);
+ UniformBigInt {
+ len: (high - low).into_parts().1,
+ base: low.clone(),
+ }
+ }
+
+ #[inline]
+ fn new_inclusive<B1, B2>(low_b: B1, high_b: B2) -> Self
+ where
+ B1: SampleBorrow<Self::X> + Sized,
+ B2: SampleBorrow<Self::X> + Sized,
+ {
+ let low = low_b.borrow();
+ let high = high_b.borrow();
+ assert!(low <= high);
+ Self::new(low, high + 1u32)
+ }
+
+ #[inline]
+ fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> Self::X {
+ &self.base + BigInt::from(rng.gen_biguint_below(&self.len))
+ }
+
+ #[inline]
+ fn sample_single<R: Rng + ?Sized, B1, B2>(low: B1, high: B2, rng: &mut R) -> Self::X
+ where
+ B1: SampleBorrow<Self::X> + Sized,
+ B2: SampleBorrow<Self::X> + Sized,
+ {
+ rng.gen_bigint_range(low.borrow(), high.borrow())
+ }
+}
+
+impl SampleUniform for BigInt {
+ type Sampler = UniformBigInt;
+}
+
+/// A random distribution for [`BigUint`] and [`BigInt`] values of a particular bit size.
+///
+/// The `rand` feature must be enabled to use this. See crate-level documentation for details.
+#[derive(Clone, Copy, Debug)]
+pub struct RandomBits {
+ bits: u64,
+}
+
+impl RandomBits {
+ #[inline]
+ pub fn new(bits: u64) -> RandomBits {
+ RandomBits { bits }
+ }
+}
+
+impl Distribution<BigUint> for RandomBits {
+ #[inline]
+ fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> BigUint {
+ rng.gen_biguint(self.bits)
+ }
+}
+
+impl Distribution<BigInt> for RandomBits {
+ #[inline]
+ fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> BigInt {
+ rng.gen_bigint(self.bits)
+ }
+}
diff --git a/rust/vendor/num-bigint/src/biguint.rs b/rust/vendor/num-bigint/src/biguint.rs
new file mode 100644
index 0000000..1554eb0
--- /dev/null
+++ b/rust/vendor/num-bigint/src/biguint.rs
@@ -0,0 +1,1130 @@
+use crate::big_digit::{self, BigDigit};
+use crate::std_alloc::{String, Vec};
+
+use core::cmp;
+use core::cmp::Ordering;
+use core::default::Default;
+use core::fmt;
+use core::hash;
+use core::mem;
+use core::str;
+use core::{u32, u64, u8};
+
+use num_integer::{Integer, Roots};
+use num_traits::{Num, One, Pow, ToPrimitive, Unsigned, Zero};
+
+mod addition;
+mod division;
+mod multiplication;
+mod subtraction;
+
+mod bits;
+mod convert;
+mod iter;
+mod monty;
+mod power;
+mod shift;
+
+#[cfg(any(feature = "quickcheck", feature = "arbitrary"))]
+mod arbitrary;
+
+#[cfg(feature = "serde")]
+mod serde;
+
+pub(crate) use self::convert::to_str_radix_reversed;
+pub use self::iter::{U32Digits, U64Digits};
+
+/// A big unsigned integer type.
+pub struct BigUint {
+ data: Vec<BigDigit>,
+}
+
+// Note: derived `Clone` doesn't specialize `clone_from`,
+// but we want to keep the allocation in `data`.
+impl Clone for BigUint {
+ #[inline]
+ fn clone(&self) -> Self {
+ BigUint {
+ data: self.data.clone(),
+ }
+ }
+
+ #[inline]
+ fn clone_from(&mut self, other: &Self) {
+ self.data.clone_from(&other.data);
+ }
+}
+
+impl hash::Hash for BigUint {
+ #[inline]
+ fn hash<H: hash::Hasher>(&self, state: &mut H) {
+ debug_assert!(self.data.last() != Some(&0));
+ self.data.hash(state);
+ }
+}
+
+impl PartialEq for BigUint {
+ #[inline]
+ fn eq(&self, other: &BigUint) -> bool {
+ debug_assert!(self.data.last() != Some(&0));
+ debug_assert!(other.data.last() != Some(&0));
+ self.data == other.data
+ }
+}
+impl Eq for BigUint {}
+
+impl PartialOrd for BigUint {
+ #[inline]
+ fn partial_cmp(&self, other: &BigUint) -> Option<Ordering> {
+ Some(self.cmp(other))
+ }
+}
+
+impl Ord for BigUint {
+ #[inline]
+ fn cmp(&self, other: &BigUint) -> Ordering {
+ cmp_slice(&self.data[..], &other.data[..])
+ }
+}
+
+#[inline]
+fn cmp_slice(a: &[BigDigit], b: &[BigDigit]) -> Ordering {
+ debug_assert!(a.last() != Some(&0));
+ debug_assert!(b.last() != Some(&0));
+
+ match Ord::cmp(&a.len(), &b.len()) {
+ Ordering::Equal => Iterator::cmp(a.iter().rev(), b.iter().rev()),
+ other => other,
+ }
+}
+
+impl Default for BigUint {
+ #[inline]
+ fn default() -> BigUint {
+ Zero::zero()
+ }
+}
+
+impl fmt::Debug for BigUint {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt::Display::fmt(self, f)
+ }
+}
+
+impl fmt::Display for BigUint {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.pad_integral(true, "", &self.to_str_radix(10))
+ }
+}
+
+impl fmt::LowerHex for BigUint {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.pad_integral(true, "0x", &self.to_str_radix(16))
+ }
+}
+
+impl fmt::UpperHex for BigUint {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ let mut s = self.to_str_radix(16);
+ s.make_ascii_uppercase();
+ f.pad_integral(true, "0x", &s)
+ }
+}
+
+impl fmt::Binary for BigUint {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.pad_integral(true, "0b", &self.to_str_radix(2))
+ }
+}
+
+impl fmt::Octal for BigUint {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.pad_integral(true, "0o", &self.to_str_radix(8))
+ }
+}
+
+impl Zero for BigUint {
+ #[inline]
+ fn zero() -> BigUint {
+ BigUint { data: Vec::new() }
+ }
+
+ #[inline]
+ fn set_zero(&mut self) {
+ self.data.clear();
+ }
+
+ #[inline]
+ fn is_zero(&self) -> bool {
+ self.data.is_empty()
+ }
+}
+
+impl One for BigUint {
+ #[inline]
+ fn one() -> BigUint {
+ BigUint { data: vec![1] }
+ }
+
+ #[inline]
+ fn set_one(&mut self) {
+ self.data.clear();
+ self.data.push(1);
+ }
+
+ #[inline]
+ fn is_one(&self) -> bool {
+ self.data[..] == [1]
+ }
+}
+
+impl Unsigned for BigUint {}
+
+impl Integer for BigUint {
+ #[inline]
+ fn div_rem(&self, other: &BigUint) -> (BigUint, BigUint) {
+ division::div_rem_ref(self, other)
+ }
+
+ #[inline]
+ fn div_floor(&self, other: &BigUint) -> BigUint {
+ let (d, _) = division::div_rem_ref(self, other);
+ d
+ }
+
+ #[inline]
+ fn mod_floor(&self, other: &BigUint) -> BigUint {
+ let (_, m) = division::div_rem_ref(self, other);
+ m
+ }
+
+ #[inline]
+ fn div_mod_floor(&self, other: &BigUint) -> (BigUint, BigUint) {
+ division::div_rem_ref(self, other)
+ }
+
+ #[inline]
+ fn div_ceil(&self, other: &BigUint) -> BigUint {
+ let (d, m) = division::div_rem_ref(self, other);
+ if m.is_zero() {
+ d
+ } else {
+ d + 1u32
+ }
+ }
+
+ /// Calculates the Greatest Common Divisor (GCD) of the number and `other`.
+ ///
+ /// The result is always positive.
+ #[inline]
+ fn gcd(&self, other: &Self) -> Self {
+ #[inline]
+ fn twos(x: &BigUint) -> u64 {
+ x.trailing_zeros().unwrap_or(0)
+ }
+
+ // Stein's algorithm
+ if self.is_zero() {
+ return other.clone();
+ }
+ if other.is_zero() {
+ return self.clone();
+ }
+ let mut m = self.clone();
+ let mut n = other.clone();
+
+ // find common factors of 2
+ let shift = cmp::min(twos(&n), twos(&m));
+
+ // divide m and n by 2 until odd
+ // m inside loop
+ n >>= twos(&n);
+
+ while !m.is_zero() {
+ m >>= twos(&m);
+ if n > m {
+ mem::swap(&mut n, &mut m)
+ }
+ m -= &n;
+ }
+
+ n << shift
+ }
+
+ /// Calculates the Lowest Common Multiple (LCM) of the number and `other`.
+ #[inline]
+ fn lcm(&self, other: &BigUint) -> BigUint {
+ if self.is_zero() && other.is_zero() {
+ Self::zero()
+ } else {
+ self / self.gcd(other) * other
+ }
+ }
+
+ /// Calculates the Greatest Common Divisor (GCD) and
+ /// Lowest Common Multiple (LCM) together.
+ #[inline]
+ fn gcd_lcm(&self, other: &Self) -> (Self, Self) {
+ let gcd = self.gcd(other);
+ let lcm = if gcd.is_zero() {
+ Self::zero()
+ } else {
+ self / &gcd * other
+ };
+ (gcd, lcm)
+ }
+
+ /// Deprecated, use `is_multiple_of` instead.
+ #[inline]
+ fn divides(&self, other: &BigUint) -> bool {
+ self.is_multiple_of(other)
+ }
+
+ /// Returns `true` if the number is a multiple of `other`.
+ #[inline]
+ fn is_multiple_of(&self, other: &BigUint) -> bool {
+ if other.is_zero() {
+ return self.is_zero();
+ }
+ (self % other).is_zero()
+ }
+
+ /// Returns `true` if the number is divisible by `2`.
+ #[inline]
+ fn is_even(&self) -> bool {
+ // Considering only the last digit.
+ match self.data.first() {
+ Some(x) => x.is_even(),
+ None => true,
+ }
+ }
+
+ /// Returns `true` if the number is not divisible by `2`.
+ #[inline]
+ fn is_odd(&self) -> bool {
+ !self.is_even()
+ }
+
+ /// Rounds up to nearest multiple of argument.
+ #[inline]
+ fn next_multiple_of(&self, other: &Self) -> Self {
+ let m = self.mod_floor(other);
+ if m.is_zero() {
+ self.clone()
+ } else {
+ self + (other - m)
+ }
+ }
+ /// Rounds down to nearest multiple of argument.
+ #[inline]
+ fn prev_multiple_of(&self, other: &Self) -> Self {
+ self - self.mod_floor(other)
+ }
+}
+
+#[inline]
+fn fixpoint<F>(mut x: BigUint, max_bits: u64, f: F) -> BigUint
+where
+ F: Fn(&BigUint) -> BigUint,
+{
+ let mut xn = f(&x);
+
+ // If the value increased, then the initial guess must have been low.
+ // Repeat until we reverse course.
+ while x < xn {
+ // Sometimes an increase will go way too far, especially with large
+ // powers, and then take a long time to walk back. We know an upper
+ // bound based on bit size, so saturate on that.
+ x = if xn.bits() > max_bits {
+ BigUint::one() << max_bits
+ } else {
+ xn
+ };
+ xn = f(&x);
+ }
+
+ // Now keep repeating while the estimate is decreasing.
+ while x > xn {
+ x = xn;
+ xn = f(&x);
+ }
+ x
+}
+
+impl Roots for BigUint {
+ // nth_root, sqrt and cbrt use Newton's method to compute
+ // principal root of a given degree for a given integer.
+
+ // Reference:
+ // Brent & Zimmermann, Modern Computer Arithmetic, v0.5.9, Algorithm 1.14
+ fn nth_root(&self, n: u32) -> Self {
+ assert!(n > 0, "root degree n must be at least 1");
+
+ if self.is_zero() || self.is_one() {
+ return self.clone();
+ }
+
+ match n {
+ // Optimize for small n
+ 1 => return self.clone(),
+ 2 => return self.sqrt(),
+ 3 => return self.cbrt(),
+ _ => (),
+ }
+
+ // The root of non-zero values less than 2ⁿ can only be 1.
+ let bits = self.bits();
+ let n64 = u64::from(n);
+ if bits <= n64 {
+ return BigUint::one();
+ }
+
+ // If we fit in `u64`, compute the root that way.
+ if let Some(x) = self.to_u64() {
+ return x.nth_root(n).into();
+ }
+
+ let max_bits = bits / n64 + 1;
+
+ #[cfg(feature = "std")]
+ let guess = match self.to_f64() {
+ Some(f) if f.is_finite() => {
+ use num_traits::FromPrimitive;
+
+ // We fit in `f64` (lossy), so get a better initial guess from that.
+ BigUint::from_f64((f.ln() / f64::from(n)).exp()).unwrap()
+ }
+ _ => {
+ // Try to guess by scaling down such that it does fit in `f64`.
+ // With some (x * 2ⁿᵏ), its nth root ≈ (ⁿ√x * 2ᵏ)
+ let extra_bits = bits - (core::f64::MAX_EXP as u64 - 1);
+ let root_scale = Integer::div_ceil(&extra_bits, &n64);
+ let scale = root_scale * n64;
+ if scale < bits && bits - scale > n64 {
+ (self >> scale).nth_root(n) << root_scale
+ } else {
+ BigUint::one() << max_bits
+ }
+ }
+ };
+
+ #[cfg(not(feature = "std"))]
+ let guess = BigUint::one() << max_bits;
+
+ let n_min_1 = n - 1;
+ fixpoint(guess, max_bits, move |s| {
+ let q = self / s.pow(n_min_1);
+ let t = n_min_1 * s + q;
+ t / n
+ })
+ }
+
+ // Reference:
+ // Brent & Zimmermann, Modern Computer Arithmetic, v0.5.9, Algorithm 1.13
+ fn sqrt(&self) -> Self {
+ if self.is_zero() || self.is_one() {
+ return self.clone();
+ }
+
+ // If we fit in `u64`, compute the root that way.
+ if let Some(x) = self.to_u64() {
+ return x.sqrt().into();
+ }
+
+ let bits = self.bits();
+ let max_bits = bits / 2 + 1;
+
+ #[cfg(feature = "std")]
+ let guess = match self.to_f64() {
+ Some(f) if f.is_finite() => {
+ use num_traits::FromPrimitive;
+
+ // We fit in `f64` (lossy), so get a better initial guess from that.
+ BigUint::from_f64(f.sqrt()).unwrap()
+ }
+ _ => {
+ // Try to guess by scaling down such that it does fit in `f64`.
+ // With some (x * 2²ᵏ), its sqrt ≈ (√x * 2ᵏ)
+ let extra_bits = bits - (core::f64::MAX_EXP as u64 - 1);
+ let root_scale = (extra_bits + 1) / 2;
+ let scale = root_scale * 2;
+ (self >> scale).sqrt() << root_scale
+ }
+ };
+
+ #[cfg(not(feature = "std"))]
+ let guess = BigUint::one() << max_bits;
+
+ fixpoint(guess, max_bits, move |s| {
+ let q = self / s;
+ let t = s + q;
+ t >> 1
+ })
+ }
+
+ fn cbrt(&self) -> Self {
+ if self.is_zero() || self.is_one() {
+ return self.clone();
+ }
+
+ // If we fit in `u64`, compute the root that way.
+ if let Some(x) = self.to_u64() {
+ return x.cbrt().into();
+ }
+
+ let bits = self.bits();
+ let max_bits = bits / 3 + 1;
+
+ #[cfg(feature = "std")]
+ let guess = match self.to_f64() {
+ Some(f) if f.is_finite() => {
+ use num_traits::FromPrimitive;
+
+ // We fit in `f64` (lossy), so get a better initial guess from that.
+ BigUint::from_f64(f.cbrt()).unwrap()
+ }
+ _ => {
+ // Try to guess by scaling down such that it does fit in `f64`.
+ // With some (x * 2³ᵏ), its cbrt ≈ (∛x * 2ᵏ)
+ let extra_bits = bits - (core::f64::MAX_EXP as u64 - 1);
+ let root_scale = (extra_bits + 2) / 3;
+ let scale = root_scale * 3;
+ (self >> scale).cbrt() << root_scale
+ }
+ };
+
+ #[cfg(not(feature = "std"))]
+ let guess = BigUint::one() << max_bits;
+
+ fixpoint(guess, max_bits, move |s| {
+ let q = self / (s * s);
+ let t = (s << 1) + q;
+ t / 3u32
+ })
+ }
+}
+
+/// A generic trait for converting a value to a [`BigUint`].
+pub trait ToBigUint {
+ /// Converts the value of `self` to a [`BigUint`].
+ fn to_biguint(&self) -> Option<BigUint>;
+}
+
+/// Creates and initializes a [`BigUint`].
+///
+/// The digits are in little-endian base matching `BigDigit`.
+#[inline]
+pub(crate) fn biguint_from_vec(digits: Vec<BigDigit>) -> BigUint {
+ BigUint { data: digits }.normalized()
+}
+
+impl BigUint {
+ /// Creates and initializes a [`BigUint`].
+ ///
+ /// The base 2<sup>32</sup> digits are ordered least significant digit first.
+ #[inline]
+ pub fn new(digits: Vec<u32>) -> BigUint {
+ let mut big = BigUint::zero();
+
+ #[cfg(not(u64_digit))]
+ {
+ big.data = digits;
+ big.normalize();
+ }
+
+ #[cfg(u64_digit)]
+ big.assign_from_slice(&digits);
+
+ big
+ }
+
+ /// Creates and initializes a [`BigUint`].
+ ///
+ /// The base 2<sup>32</sup> digits are ordered least significant digit first.
+ #[inline]
+ pub fn from_slice(slice: &[u32]) -> BigUint {
+ let mut big = BigUint::zero();
+ big.assign_from_slice(slice);
+ big
+ }
+
+ /// Assign a value to a [`BigUint`].
+ ///
+ /// The base 2<sup>32</sup> digits are ordered least significant digit first.
+ #[inline]
+ pub fn assign_from_slice(&mut self, slice: &[u32]) {
+ self.data.clear();
+
+ #[cfg(not(u64_digit))]
+ self.data.extend_from_slice(slice);
+
+ #[cfg(u64_digit)]
+ self.data.extend(slice.chunks(2).map(u32_chunk_to_u64));
+
+ self.normalize();
+ }
+
+ /// Creates and initializes a [`BigUint`].
+ ///
+ /// The bytes are in big-endian byte order.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use num_bigint::BigUint;
+ ///
+ /// assert_eq!(BigUint::from_bytes_be(b"A"),
+ /// BigUint::parse_bytes(b"65", 10).unwrap());
+ /// assert_eq!(BigUint::from_bytes_be(b"AA"),
+ /// BigUint::parse_bytes(b"16705", 10).unwrap());
+ /// assert_eq!(BigUint::from_bytes_be(b"AB"),
+ /// BigUint::parse_bytes(b"16706", 10).unwrap());
+ /// assert_eq!(BigUint::from_bytes_be(b"Hello world!"),
+ /// BigUint::parse_bytes(b"22405534230753963835153736737", 10).unwrap());
+ /// ```
+ #[inline]
+ pub fn from_bytes_be(bytes: &[u8]) -> BigUint {
+ if bytes.is_empty() {
+ Zero::zero()
+ } else {
+ let mut v = bytes.to_vec();
+ v.reverse();
+ BigUint::from_bytes_le(&v)
+ }
+ }
+
+ /// Creates and initializes a [`BigUint`].
+ ///
+ /// The bytes are in little-endian byte order.
+ #[inline]
+ pub fn from_bytes_le(bytes: &[u8]) -> BigUint {
+ if bytes.is_empty() {
+ Zero::zero()
+ } else {
+ convert::from_bitwise_digits_le(bytes, 8)
+ }
+ }
+
+ /// Creates and initializes a [`BigUint`]. The input slice must contain
+ /// ascii/utf8 characters in [0-9a-zA-Z].
+ /// `radix` must be in the range `2...36`.
+ ///
+ /// The function `from_str_radix` from the `Num` trait provides the same logic
+ /// for `&str` buffers.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use num_bigint::{BigUint, ToBigUint};
+ ///
+ /// assert_eq!(BigUint::parse_bytes(b"1234", 10), ToBigUint::to_biguint(&1234));
+ /// assert_eq!(BigUint::parse_bytes(b"ABCD", 16), ToBigUint::to_biguint(&0xABCD));
+ /// assert_eq!(BigUint::parse_bytes(b"G", 16), None);
+ /// ```
+ #[inline]
+ pub fn parse_bytes(buf: &[u8], radix: u32) -> Option<BigUint> {
+ let s = str::from_utf8(buf).ok()?;
+ BigUint::from_str_radix(s, radix).ok()
+ }
+
+ /// Creates and initializes a [`BigUint`]. Each `u8` of the input slice is
+ /// interpreted as one digit of the number
+ /// and must therefore be less than `radix`.
+ ///
+ /// The bytes are in big-endian byte order.
+ /// `radix` must be in the range `2...256`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use num_bigint::{BigUint};
+ ///
+ /// let inbase190 = &[15, 33, 125, 12, 14];
+ /// let a = BigUint::from_radix_be(inbase190, 190).unwrap();
+ /// assert_eq!(a.to_radix_be(190), inbase190);
+ /// ```
+ pub fn from_radix_be(buf: &[u8], radix: u32) -> Option<BigUint> {
+ convert::from_radix_be(buf, radix)
+ }
+
+ /// Creates and initializes a [`BigUint`]. Each `u8` of the input slice is
+ /// interpreted as one digit of the number
+ /// and must therefore be less than `radix`.
+ ///
+ /// The bytes are in little-endian byte order.
+ /// `radix` must be in the range `2...256`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use num_bigint::{BigUint};
+ ///
+ /// let inbase190 = &[14, 12, 125, 33, 15];
+ /// let a = BigUint::from_radix_be(inbase190, 190).unwrap();
+ /// assert_eq!(a.to_radix_be(190), inbase190);
+ /// ```
+ pub fn from_radix_le(buf: &[u8], radix: u32) -> Option<BigUint> {
+ convert::from_radix_le(buf, radix)
+ }
+
+ /// Returns the byte representation of the [`BigUint`] in big-endian byte order.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use num_bigint::BigUint;
+ ///
+ /// let i = BigUint::parse_bytes(b"1125", 10).unwrap();
+ /// assert_eq!(i.to_bytes_be(), vec![4, 101]);
+ /// ```
+ #[inline]
+ pub fn to_bytes_be(&self) -> Vec<u8> {
+ let mut v = self.to_bytes_le();
+ v.reverse();
+ v
+ }
+
+ /// Returns the byte representation of the [`BigUint`] in little-endian byte order.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use num_bigint::BigUint;
+ ///
+ /// let i = BigUint::parse_bytes(b"1125", 10).unwrap();
+ /// assert_eq!(i.to_bytes_le(), vec![101, 4]);
+ /// ```
+ #[inline]
+ pub fn to_bytes_le(&self) -> Vec<u8> {
+ if self.is_zero() {
+ vec![0]
+ } else {
+ convert::to_bitwise_digits_le(self, 8)
+ }
+ }
+
+ /// Returns the `u32` digits representation of the [`BigUint`] ordered least significant digit
+ /// first.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use num_bigint::BigUint;
+ ///
+ /// assert_eq!(BigUint::from(1125u32).to_u32_digits(), vec![1125]);
+ /// assert_eq!(BigUint::from(4294967295u32).to_u32_digits(), vec![4294967295]);
+ /// assert_eq!(BigUint::from(4294967296u64).to_u32_digits(), vec![0, 1]);
+ /// assert_eq!(BigUint::from(112500000000u64).to_u32_digits(), vec![830850304, 26]);
+ /// ```
+ #[inline]
+ pub fn to_u32_digits(&self) -> Vec<u32> {
+ self.iter_u32_digits().collect()
+ }
+
+ /// Returns the `u64` digits representation of the [`BigUint`] ordered least significant digit
+ /// first.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use num_bigint::BigUint;
+ ///
+ /// assert_eq!(BigUint::from(1125u32).to_u64_digits(), vec![1125]);
+ /// assert_eq!(BigUint::from(4294967295u32).to_u64_digits(), vec![4294967295]);
+ /// assert_eq!(BigUint::from(4294967296u64).to_u64_digits(), vec![4294967296]);
+ /// assert_eq!(BigUint::from(112500000000u64).to_u64_digits(), vec![112500000000]);
+ /// assert_eq!(BigUint::from(1u128 << 64).to_u64_digits(), vec![0, 1]);
+ /// ```
+ #[inline]
+ pub fn to_u64_digits(&self) -> Vec<u64> {
+ self.iter_u64_digits().collect()
+ }
+
+ /// Returns an iterator of `u32` digits representation of the [`BigUint`] ordered least
+ /// significant digit first.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use num_bigint::BigUint;
+ ///
+ /// assert_eq!(BigUint::from(1125u32).iter_u32_digits().collect::<Vec<u32>>(), vec![1125]);
+ /// assert_eq!(BigUint::from(4294967295u32).iter_u32_digits().collect::<Vec<u32>>(), vec![4294967295]);
+ /// assert_eq!(BigUint::from(4294967296u64).iter_u32_digits().collect::<Vec<u32>>(), vec![0, 1]);
+ /// assert_eq!(BigUint::from(112500000000u64).iter_u32_digits().collect::<Vec<u32>>(), vec![830850304, 26]);
+ /// ```
+ #[inline]
+ pub fn iter_u32_digits(&self) -> U32Digits<'_> {
+ U32Digits::new(self.data.as_slice())
+ }
+
+ /// Returns an iterator of `u64` digits representation of the [`BigUint`] ordered least
+ /// significant digit first.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use num_bigint::BigUint;
+ ///
+ /// assert_eq!(BigUint::from(1125u32).iter_u64_digits().collect::<Vec<u64>>(), vec![1125]);
+ /// assert_eq!(BigUint::from(4294967295u32).iter_u64_digits().collect::<Vec<u64>>(), vec![4294967295]);
+ /// assert_eq!(BigUint::from(4294967296u64).iter_u64_digits().collect::<Vec<u64>>(), vec![4294967296]);
+ /// assert_eq!(BigUint::from(112500000000u64).iter_u64_digits().collect::<Vec<u64>>(), vec![112500000000]);
+ /// assert_eq!(BigUint::from(1u128 << 64).iter_u64_digits().collect::<Vec<u64>>(), vec![0, 1]);
+ /// ```
+ #[inline]
+ pub fn iter_u64_digits(&self) -> U64Digits<'_> {
+ U64Digits::new(self.data.as_slice())
+ }
+
+ /// Returns the integer formatted as a string in the given radix.
+ /// `radix` must be in the range `2...36`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use num_bigint::BigUint;
+ ///
+ /// let i = BigUint::parse_bytes(b"ff", 16).unwrap();
+ /// assert_eq!(i.to_str_radix(16), "ff");
+ /// ```
+ #[inline]
+ pub fn to_str_radix(&self, radix: u32) -> String {
+ let mut v = to_str_radix_reversed(self, radix);
+ v.reverse();
+ unsafe { String::from_utf8_unchecked(v) }
+ }
+
+ /// Returns the integer in the requested base in big-endian digit order.
+ /// The output is not given in a human readable alphabet but as a zero
+ /// based `u8` number.
+ /// `radix` must be in the range `2...256`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use num_bigint::BigUint;
+ ///
+ /// assert_eq!(BigUint::from(0xFFFFu64).to_radix_be(159),
+ /// vec![2, 94, 27]);
+ /// // 0xFFFF = 65535 = 2*(159^2) + 94*159 + 27
+ /// ```
+ #[inline]
+ pub fn to_radix_be(&self, radix: u32) -> Vec<u8> {
+ let mut v = convert::to_radix_le(self, radix);
+ v.reverse();
+ v
+ }
+
+ /// Returns the integer in the requested base in little-endian digit order.
+ /// The output is not given in a human readable alphabet but as a zero
+ /// based u8 number.
+ /// `radix` must be in the range `2...256`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use num_bigint::BigUint;
+ ///
+ /// assert_eq!(BigUint::from(0xFFFFu64).to_radix_le(159),
+ /// vec![27, 94, 2]);
+ /// // 0xFFFF = 65535 = 27 + 94*159 + 2*(159^2)
+ /// ```
+ #[inline]
+ pub fn to_radix_le(&self, radix: u32) -> Vec<u8> {
+ convert::to_radix_le(self, radix)
+ }
+
+ /// Determines the fewest bits necessary to express the [`BigUint`].
+ #[inline]
+ pub fn bits(&self) -> u64 {
+ if self.is_zero() {
+ return 0;
+ }
+ let zeros: u64 = self.data.last().unwrap().leading_zeros().into();
+ self.data.len() as u64 * u64::from(big_digit::BITS) - zeros
+ }
+
+ /// Strips off trailing zero bigdigits - comparisons require the last element in the vector to
+ /// be nonzero.
+ #[inline]
+ fn normalize(&mut self) {
+ if let Some(&0) = self.data.last() {
+ let len = self.data.iter().rposition(|&d| d != 0).map_or(0, |i| i + 1);
+ self.data.truncate(len);
+ }
+ if self.data.len() < self.data.capacity() / 4 {
+ self.data.shrink_to_fit();
+ }
+ }
+
+ /// Returns a normalized [`BigUint`].
+ #[inline]
+ fn normalized(mut self) -> BigUint {
+ self.normalize();
+ self
+ }
+
+ /// Returns `self ^ exponent`.
+ pub fn pow(&self, exponent: u32) -> Self {
+ Pow::pow(self, exponent)
+ }
+
+ /// Returns `(self ^ exponent) % modulus`.
+ ///
+ /// Panics if the modulus is zero.
+ pub fn modpow(&self, exponent: &Self, modulus: &Self) -> Self {
+ power::modpow(self, exponent, modulus)
+ }
+
+ /// Returns the truncated principal square root of `self` --
+ /// see [Roots::sqrt](https://docs.rs/num-integer/0.1/num_integer/trait.Roots.html#method.sqrt)
+ pub fn sqrt(&self) -> Self {
+ Roots::sqrt(self)
+ }
+
+ /// Returns the truncated principal cube root of `self` --
+ /// see [Roots::cbrt](https://docs.rs/num-integer/0.1/num_integer/trait.Roots.html#method.cbrt).
+ pub fn cbrt(&self) -> Self {
+ Roots::cbrt(self)
+ }
+
+ /// Returns the truncated principal `n`th root of `self` --
+ /// see [Roots::nth_root](https://docs.rs/num-integer/0.1/num_integer/trait.Roots.html#tymethod.nth_root).
+ pub fn nth_root(&self, n: u32) -> Self {
+ Roots::nth_root(self, n)
+ }
+
+ /// Returns the number of least-significant bits that are zero,
+ /// or `None` if the entire number is zero.
+ pub fn trailing_zeros(&self) -> Option<u64> {
+ let i = self.data.iter().position(|&digit| digit != 0)?;
+ let zeros: u64 = self.data[i].trailing_zeros().into();
+ Some(i as u64 * u64::from(big_digit::BITS) + zeros)
+ }
+
+ /// Returns the number of least-significant bits that are ones.
+ pub fn trailing_ones(&self) -> u64 {
+ if let Some(i) = self.data.iter().position(|&digit| !digit != 0) {
+ // XXX u64::trailing_ones() introduced in Rust 1.46,
+ // but we need to be compatible further back.
+ // Thanks to cuviper for this workaround.
+ let ones: u64 = (!self.data[i]).trailing_zeros().into();
+ i as u64 * u64::from(big_digit::BITS) + ones
+ } else {
+ self.data.len() as u64 * u64::from(big_digit::BITS)
+ }
+ }
+
+ /// Returns the number of one bits.
+ pub fn count_ones(&self) -> u64 {
+ self.data.iter().map(|&d| u64::from(d.count_ones())).sum()
+ }
+
+ /// Returns whether the bit in the given position is set
+ pub fn bit(&self, bit: u64) -> bool {
+ let bits_per_digit = u64::from(big_digit::BITS);
+ if let Some(digit_index) = (bit / bits_per_digit).to_usize() {
+ if let Some(digit) = self.data.get(digit_index) {
+ let bit_mask = (1 as BigDigit) << (bit % bits_per_digit);
+ return (digit & bit_mask) != 0;
+ }
+ }
+ false
+ }
+
+ /// Sets or clears the bit in the given position
+ ///
+ /// Note that setting a bit greater than the current bit length, a reallocation may be needed
+ /// to store the new digits
+ pub fn set_bit(&mut self, bit: u64, value: bool) {
+ // Note: we're saturating `digit_index` and `new_len` -- any such case is guaranteed to
+ // fail allocation, and that's more consistent than adding our own overflow panics.
+ let bits_per_digit = u64::from(big_digit::BITS);
+ let digit_index = (bit / bits_per_digit)
+ .to_usize()
+ .unwrap_or(core::usize::MAX);
+ let bit_mask = (1 as BigDigit) << (bit % bits_per_digit);
+ if value {
+ if digit_index >= self.data.len() {
+ let new_len = digit_index.saturating_add(1);
+ self.data.resize(new_len, 0);
+ }
+ self.data[digit_index] |= bit_mask;
+ } else if digit_index < self.data.len() {
+ self.data[digit_index] &= !bit_mask;
+ // the top bit may have been cleared, so normalize
+ self.normalize();
+ }
+ }
+}
+
+impl num_traits::FromBytes for BigUint {
+ type Bytes = [u8];
+
+ fn from_be_bytes(bytes: &Self::Bytes) -> Self {
+ Self::from_bytes_be(bytes)
+ }
+
+ fn from_le_bytes(bytes: &Self::Bytes) -> Self {
+ Self::from_bytes_le(bytes)
+ }
+}
+
+impl num_traits::ToBytes for BigUint {
+ type Bytes = Vec<u8>;
+
+ fn to_be_bytes(&self) -> Self::Bytes {
+ self.to_bytes_be()
+ }
+
+ fn to_le_bytes(&self) -> Self::Bytes {
+ self.to_bytes_le()
+ }
+}
+
+pub(crate) trait IntDigits {
+ fn digits(&self) -> &[BigDigit];
+ fn digits_mut(&mut self) -> &mut Vec<BigDigit>;
+ fn normalize(&mut self);
+ fn capacity(&self) -> usize;
+ fn len(&self) -> usize;
+}
+
+impl IntDigits for BigUint {
+ #[inline]
+ fn digits(&self) -> &[BigDigit] {
+ &self.data
+ }
+ #[inline]
+ fn digits_mut(&mut self) -> &mut Vec<BigDigit> {
+ &mut self.data
+ }
+ #[inline]
+ fn normalize(&mut self) {
+ self.normalize();
+ }
+ #[inline]
+ fn capacity(&self) -> usize {
+ self.data.capacity()
+ }
+ #[inline]
+ fn len(&self) -> usize {
+ self.data.len()
+ }
+}
+
+/// Convert a `u32` chunk (len is either 1 or 2) to a single `u64` digit
+#[inline]
+fn u32_chunk_to_u64(chunk: &[u32]) -> u64 {
+ // raw could have odd length
+ let mut digit = chunk[0] as u64;
+ if let Some(&hi) = chunk.get(1) {
+ digit |= (hi as u64) << 32;
+ }
+ digit
+}
+
+/// Combine four `u32`s into a single `u128`.
+#[cfg(any(test, not(u64_digit)))]
+#[inline]
+fn u32_to_u128(a: u32, b: u32, c: u32, d: u32) -> u128 {
+ u128::from(d) | (u128::from(c) << 32) | (u128::from(b) << 64) | (u128::from(a) << 96)
+}
+
+/// Split a single `u128` into four `u32`.
+#[cfg(any(test, not(u64_digit)))]
+#[inline]
+fn u32_from_u128(n: u128) -> (u32, u32, u32, u32) {
+ (
+ (n >> 96) as u32,
+ (n >> 64) as u32,
+ (n >> 32) as u32,
+ n as u32,
+ )
+}
+
+#[cfg(not(u64_digit))]
+#[test]
+fn test_from_slice() {
+ fn check(slice: &[u32], data: &[BigDigit]) {
+ assert_eq!(BigUint::from_slice(slice).data, data);
+ }
+ check(&[1], &[1]);
+ check(&[0, 0, 0], &[]);
+ check(&[1, 2, 0, 0], &[1, 2]);
+ check(&[0, 0, 1, 2], &[0, 0, 1, 2]);
+ check(&[0, 0, 1, 2, 0, 0], &[0, 0, 1, 2]);
+ check(&[-1i32 as u32], &[-1i32 as BigDigit]);
+}
+
+#[cfg(u64_digit)]
+#[test]
+fn test_from_slice() {
+ fn check(slice: &[u32], data: &[BigDigit]) {
+ assert_eq!(
+ BigUint::from_slice(slice).data,
+ data,
+ "from {:?}, to {:?}",
+ slice,
+ data
+ );
+ }
+ check(&[1], &[1]);
+ check(&[0, 0, 0], &[]);
+ check(&[1, 2], &[8_589_934_593]);
+ check(&[1, 2, 0, 0], &[8_589_934_593]);
+ check(&[0, 0, 1, 2], &[0, 8_589_934_593]);
+ check(&[0, 0, 1, 2, 0, 0], &[0, 8_589_934_593]);
+ check(&[-1i32 as u32], &[(-1i32 as u32) as BigDigit]);
+}
+
+#[test]
+fn test_u32_u128() {
+ assert_eq!(u32_from_u128(0u128), (0, 0, 0, 0));
+ assert_eq!(
+ u32_from_u128(u128::max_value()),
+ (
+ u32::max_value(),
+ u32::max_value(),
+ u32::max_value(),
+ u32::max_value()
+ )
+ );
+
+ assert_eq!(
+ u32_from_u128(u32::max_value() as u128),
+ (0, 0, 0, u32::max_value())
+ );
+
+ assert_eq!(
+ u32_from_u128(u64::max_value() as u128),
+ (0, 0, u32::max_value(), u32::max_value())
+ );
+
+ assert_eq!(
+ u32_from_u128((u64::max_value() as u128) + u32::max_value() as u128),
+ (0, 1, 0, u32::max_value() - 1)
+ );
+
+ assert_eq!(u32_from_u128(36_893_488_151_714_070_528), (0, 2, 1, 0));
+}
+
+#[test]
+fn test_u128_u32_roundtrip() {
+ // roundtrips
+ let values = vec![
+ 0u128,
+ 1u128,
+ u64::max_value() as u128 * 3,
+ u32::max_value() as u128,
+ u64::max_value() as u128,
+ (u64::max_value() as u128) + u32::max_value() as u128,
+ u128::max_value(),
+ ];
+
+ for val in &values {
+ let (a, b, c, d) = u32_from_u128(*val);
+ assert_eq!(u32_to_u128(a, b, c, d), *val);
+ }
+}
diff --git a/rust/vendor/num-bigint/src/biguint/addition.rs b/rust/vendor/num-bigint/src/biguint/addition.rs
new file mode 100644
index 0000000..ac6c0dd
--- /dev/null
+++ b/rust/vendor/num-bigint/src/biguint/addition.rs
@@ -0,0 +1,254 @@
+#[cfg(not(u64_digit))]
+use super::u32_from_u128;
+use super::{BigUint, IntDigits};
+
+use crate::big_digit::{self, BigDigit};
+use crate::UsizePromotion;
+
+use core::iter::Sum;
+use core::ops::{Add, AddAssign};
+use num_traits::{CheckedAdd, Zero};
+
+#[cfg(all(use_addcarry, target_arch = "x86_64"))]
+use core::arch::x86_64 as arch;
+
+#[cfg(all(use_addcarry, target_arch = "x86"))]
+use core::arch::x86 as arch;
+
+// Add with carry:
+#[cfg(all(use_addcarry, u64_digit))]
+#[inline]
+fn adc(carry: u8, a: u64, b: u64, out: &mut u64) -> u8 {
+ // Safety: There are absolutely no safety concerns with calling `_addcarry_u64`.
+ // It's just unsafe for API consistency with other intrinsics.
+ unsafe { arch::_addcarry_u64(carry, a, b, out) }
+}
+
+#[cfg(all(use_addcarry, not(u64_digit)))]
+#[inline]
+fn adc(carry: u8, a: u32, b: u32, out: &mut u32) -> u8 {
+ // Safety: There are absolutely no safety concerns with calling `_addcarry_u32`.
+ // It's just unsafe for API consistency with other intrinsics.
+ unsafe { arch::_addcarry_u32(carry, a, b, out) }
+}
+
+// fallback for environments where we don't have an addcarry intrinsic
+#[cfg(not(use_addcarry))]
+#[inline]
+fn adc(carry: u8, a: BigDigit, b: BigDigit, out: &mut BigDigit) -> u8 {
+ use crate::big_digit::DoubleBigDigit;
+
+ let sum = DoubleBigDigit::from(a) + DoubleBigDigit::from(b) + DoubleBigDigit::from(carry);
+ *out = sum as BigDigit;
+ (sum >> big_digit::BITS) as u8
+}
+
+/// Two argument addition of raw slices, `a += b`, returning the carry.
+///
+/// This is used when the data `Vec` might need to resize to push a non-zero carry, so we perform
+/// the addition first hoping that it will fit.
+///
+/// The caller _must_ ensure that `a` is at least as long as `b`.
+#[inline]
+pub(super) fn __add2(a: &mut [BigDigit], b: &[BigDigit]) -> BigDigit {
+ debug_assert!(a.len() >= b.len());
+
+ let mut carry = 0;
+ let (a_lo, a_hi) = a.split_at_mut(b.len());
+
+ for (a, b) in a_lo.iter_mut().zip(b) {
+ carry = adc(carry, *a, *b, a);
+ }
+
+ if carry != 0 {
+ for a in a_hi {
+ carry = adc(carry, *a, 0, a);
+ if carry == 0 {
+ break;
+ }
+ }
+ }
+
+ carry as BigDigit
+}
+
+/// Two argument addition of raw slices:
+/// a += b
+///
+/// The caller _must_ ensure that a is big enough to store the result - typically this means
+/// resizing a to max(a.len(), b.len()) + 1, to fit a possible carry.
+pub(super) fn add2(a: &mut [BigDigit], b: &[BigDigit]) {
+ let carry = __add2(a, b);
+
+ debug_assert!(carry == 0);
+}
+
+forward_all_binop_to_val_ref_commutative!(impl Add for BigUint, add);
+forward_val_assign!(impl AddAssign for BigUint, add_assign);
+
+impl Add<&BigUint> for BigUint {
+ type Output = BigUint;
+
+ fn add(mut self, other: &BigUint) -> BigUint {
+ self += other;
+ self
+ }
+}
+impl AddAssign<&BigUint> for BigUint {
+ #[inline]
+ fn add_assign(&mut self, other: &BigUint) {
+ let self_len = self.data.len();
+ let carry = if self_len < other.data.len() {
+ let lo_carry = __add2(&mut self.data[..], &other.data[..self_len]);
+ self.data.extend_from_slice(&other.data[self_len..]);
+ __add2(&mut self.data[self_len..], &[lo_carry])
+ } else {
+ __add2(&mut self.data[..], &other.data[..])
+ };
+ if carry != 0 {
+ self.data.push(carry);
+ }
+ }
+}
+
+promote_unsigned_scalars!(impl Add for BigUint, add);
+promote_unsigned_scalars_assign!(impl AddAssign for BigUint, add_assign);
+forward_all_scalar_binop_to_val_val_commutative!(impl Add<u32> for BigUint, add);
+forward_all_scalar_binop_to_val_val_commutative!(impl Add<u64> for BigUint, add);
+forward_all_scalar_binop_to_val_val_commutative!(impl Add<u128> for BigUint, add);
+
+impl Add<u32> for BigUint {
+ type Output = BigUint;
+
+ #[inline]
+ fn add(mut self, other: u32) -> BigUint {
+ self += other;
+ self
+ }
+}
+
+impl AddAssign<u32> for BigUint {
+ #[inline]
+ fn add_assign(&mut self, other: u32) {
+ if other != 0 {
+ if self.data.is_empty() {
+ self.data.push(0);
+ }
+
+ let carry = __add2(&mut self.data, &[other as BigDigit]);
+ if carry != 0 {
+ self.data.push(carry);
+ }
+ }
+ }
+}
+
+impl Add<u64> for BigUint {
+ type Output = BigUint;
+
+ #[inline]
+ fn add(mut self, other: u64) -> BigUint {
+ self += other;
+ self
+ }
+}
+
+impl AddAssign<u64> for BigUint {
+ #[cfg(not(u64_digit))]
+ #[inline]
+ fn add_assign(&mut self, other: u64) {
+ let (hi, lo) = big_digit::from_doublebigdigit(other);
+ if hi == 0 {
+ *self += lo;
+ } else {
+ while self.data.len() < 2 {
+ self.data.push(0);
+ }
+
+ let carry = __add2(&mut self.data, &[lo, hi]);
+ if carry != 0 {
+ self.data.push(carry);
+ }
+ }
+ }
+
+ #[cfg(u64_digit)]
+ #[inline]
+ fn add_assign(&mut self, other: u64) {
+ if other != 0 {
+ if self.data.is_empty() {
+ self.data.push(0);
+ }
+
+ let carry = __add2(&mut self.data, &[other as BigDigit]);
+ if carry != 0 {
+ self.data.push(carry);
+ }
+ }
+ }
+}
+
+impl Add<u128> for BigUint {
+ type Output = BigUint;
+
+ #[inline]
+ fn add(mut self, other: u128) -> BigUint {
+ self += other;
+ self
+ }
+}
+
+impl AddAssign<u128> for BigUint {
+ #[cfg(not(u64_digit))]
+ #[inline]
+ fn add_assign(&mut self, other: u128) {
+ if other <= u128::from(u64::max_value()) {
+ *self += other as u64
+ } else {
+ let (a, b, c, d) = u32_from_u128(other);
+ let carry = if a > 0 {
+ while self.data.len() < 4 {
+ self.data.push(0);
+ }
+ __add2(&mut self.data, &[d, c, b, a])
+ } else {
+ debug_assert!(b > 0);
+ while self.data.len() < 3 {
+ self.data.push(0);
+ }
+ __add2(&mut self.data, &[d, c, b])
+ };
+
+ if carry != 0 {
+ self.data.push(carry);
+ }
+ }
+ }
+
+ #[cfg(u64_digit)]
+ #[inline]
+ fn add_assign(&mut self, other: u128) {
+ let (hi, lo) = big_digit::from_doublebigdigit(other);
+ if hi == 0 {
+ *self += lo;
+ } else {
+ while self.data.len() < 2 {
+ self.data.push(0);
+ }
+
+ let carry = __add2(&mut self.data, &[lo, hi]);
+ if carry != 0 {
+ self.data.push(carry);
+ }
+ }
+ }
+}
+
+impl CheckedAdd for BigUint {
+ #[inline]
+ fn checked_add(&self, v: &BigUint) -> Option<BigUint> {
+ Some(self.add(v))
+ }
+}
+
+impl_sum_iter_type!(BigUint);
diff --git a/rust/vendor/num-bigint/src/biguint/arbitrary.rs b/rust/vendor/num-bigint/src/biguint/arbitrary.rs
new file mode 100644
index 0000000..6fa91c0
--- /dev/null
+++ b/rust/vendor/num-bigint/src/biguint/arbitrary.rs
@@ -0,0 +1,34 @@
+use super::{biguint_from_vec, BigUint};
+
+use crate::big_digit::BigDigit;
+#[cfg(feature = "quickcheck")]
+use crate::std_alloc::Box;
+use crate::std_alloc::Vec;
+
+#[cfg(feature = "quickcheck")]
+impl quickcheck::Arbitrary for BigUint {
+ fn arbitrary(g: &mut quickcheck::Gen) -> Self {
+ // Use arbitrary from Vec
+ biguint_from_vec(Vec::<BigDigit>::arbitrary(g))
+ }
+
+ fn shrink(&self) -> Box<dyn Iterator<Item = Self>> {
+ // Use shrinker from Vec
+ Box::new(self.data.shrink().map(biguint_from_vec))
+ }
+}
+
+#[cfg(feature = "arbitrary")]
+impl arbitrary::Arbitrary<'_> for BigUint {
+ fn arbitrary(u: &mut arbitrary::Unstructured<'_>) -> arbitrary::Result<Self> {
+ Ok(biguint_from_vec(Vec::<BigDigit>::arbitrary(u)?))
+ }
+
+ fn arbitrary_take_rest(u: arbitrary::Unstructured<'_>) -> arbitrary::Result<Self> {
+ Ok(biguint_from_vec(Vec::<BigDigit>::arbitrary_take_rest(u)?))
+ }
+
+ fn size_hint(depth: usize) -> (usize, Option<usize>) {
+ Vec::<BigDigit>::size_hint(depth)
+ }
+}
diff --git a/rust/vendor/num-bigint/src/biguint/bits.rs b/rust/vendor/num-bigint/src/biguint/bits.rs
new file mode 100644
index 0000000..42d7ec0
--- /dev/null
+++ b/rust/vendor/num-bigint/src/biguint/bits.rs
@@ -0,0 +1,93 @@
+use super::{BigUint, IntDigits};
+
+use core::ops::{BitAnd, BitAndAssign, BitOr, BitOrAssign, BitXor, BitXorAssign};
+
+forward_val_val_binop!(impl BitAnd for BigUint, bitand);
+forward_ref_val_binop!(impl BitAnd for BigUint, bitand);
+
+// do not use forward_ref_ref_binop_commutative! for bitand so that we can
+// clone the smaller value rather than the larger, avoiding over-allocation
+impl BitAnd<&BigUint> for &BigUint {
+ type Output = BigUint;
+
+ #[inline]
+ fn bitand(self, other: &BigUint) -> BigUint {
+ // forward to val-ref, choosing the smaller to clone
+ if self.data.len() <= other.data.len() {
+ self.clone() & other
+ } else {
+ other.clone() & self
+ }
+ }
+}
+
+forward_val_assign!(impl BitAndAssign for BigUint, bitand_assign);
+
+impl BitAnd<&BigUint> for BigUint {
+ type Output = BigUint;
+
+ #[inline]
+ fn bitand(mut self, other: &BigUint) -> BigUint {
+ self &= other;
+ self
+ }
+}
+impl BitAndAssign<&BigUint> for BigUint {
+ #[inline]
+ fn bitand_assign(&mut self, other: &BigUint) {
+ for (ai, &bi) in self.data.iter_mut().zip(other.data.iter()) {
+ *ai &= bi;
+ }
+ self.data.truncate(other.data.len());
+ self.normalize();
+ }
+}
+
+forward_all_binop_to_val_ref_commutative!(impl BitOr for BigUint, bitor);
+forward_val_assign!(impl BitOrAssign for BigUint, bitor_assign);
+
+impl BitOr<&BigUint> for BigUint {
+ type Output = BigUint;
+
+ fn bitor(mut self, other: &BigUint) -> BigUint {
+ self |= other;
+ self
+ }
+}
+impl BitOrAssign<&BigUint> for BigUint {
+ #[inline]
+ fn bitor_assign(&mut self, other: &BigUint) {
+ for (ai, &bi) in self.data.iter_mut().zip(other.data.iter()) {
+ *ai |= bi;
+ }
+ if other.data.len() > self.data.len() {
+ let extra = &other.data[self.data.len()..];
+ self.data.extend(extra.iter().cloned());
+ }
+ }
+}
+
+forward_all_binop_to_val_ref_commutative!(impl BitXor for BigUint, bitxor);
+forward_val_assign!(impl BitXorAssign for BigUint, bitxor_assign);
+
+impl BitXor<&BigUint> for BigUint {
+ type Output = BigUint;
+
+ fn bitxor(mut self, other: &BigUint) -> BigUint {
+ self ^= other;
+ self
+ }
+}
+impl BitXorAssign<&BigUint> for BigUint {
+ #[inline]
+ fn bitxor_assign(&mut self, other: &BigUint) {
+ for (ai, &bi) in self.data.iter_mut().zip(other.data.iter()) {
+ *ai ^= bi;
+ }
+ if other.data.len() > self.data.len() {
+ let extra = &other.data[self.data.len()..];
+ self.data.extend(extra.iter().cloned());
+ }
+ self.normalize();
+ }
+}
diff --git a/rust/vendor/num-bigint/src/biguint/convert.rs b/rust/vendor/num-bigint/src/biguint/convert.rs
new file mode 100644
index 0000000..f19bc75
--- /dev/null
+++ b/rust/vendor/num-bigint/src/biguint/convert.rs
@@ -0,0 +1,820 @@
+// This uses stdlib features higher than the MSRV
+#![allow(clippy::manual_range_contains)] // 1.35
+
+use super::{biguint_from_vec, BigUint, ToBigUint};
+
+use super::addition::add2;
+use super::division::div_rem_digit;
+use super::multiplication::mac_with_carry;
+
+use crate::big_digit::{self, BigDigit};
+use crate::std_alloc::Vec;
+use crate::ParseBigIntError;
+#[cfg(has_try_from)]
+use crate::TryFromBigIntError;
+
+use core::cmp::Ordering::{Equal, Greater, Less};
+#[cfg(has_try_from)]
+use core::convert::TryFrom;
+use core::mem;
+use core::str::FromStr;
+use num_integer::{Integer, Roots};
+use num_traits::float::FloatCore;
+use num_traits::{FromPrimitive, Num, One, PrimInt, ToPrimitive, Zero};
+
+/// Find last set bit
+/// fls(0) == 0, fls(u32::MAX) == 32
+fn fls<T: PrimInt>(v: T) -> u8 {
+ mem::size_of::<T>() as u8 * 8 - v.leading_zeros() as u8
+}
+
+fn ilog2<T: PrimInt>(v: T) -> u8 {
+ fls(v) - 1
+}
+
+impl FromStr for BigUint {
+ type Err = ParseBigIntError;
+
+ #[inline]
+ fn from_str(s: &str) -> Result<BigUint, ParseBigIntError> {
+ BigUint::from_str_radix(s, 10)
+ }
+}
+
+// Convert from a power of two radix (bits == ilog2(radix)) where bits evenly divides
+// BigDigit::BITS
+pub(super) fn from_bitwise_digits_le(v: &[u8], bits: u8) -> BigUint {
+ debug_assert!(!v.is_empty() && bits <= 8 && big_digit::BITS % bits == 0);
+ debug_assert!(v.iter().all(|&c| BigDigit::from(c) < (1 << bits)));
+
+ let digits_per_big_digit = big_digit::BITS / bits;
+
+ let data = v
+ .chunks(digits_per_big_digit.into())
+ .map(|chunk| {
+ chunk
+ .iter()
+ .rev()
+ .fold(0, |acc, &c| (acc << bits) | BigDigit::from(c))
+ })
+ .collect();
+
+ biguint_from_vec(data)
+}
+
+// Convert from a power of two radix (bits == ilog2(radix)) where bits doesn't evenly divide
+// BigDigit::BITS
+fn from_inexact_bitwise_digits_le(v: &[u8], bits: u8) -> BigUint {
+ debug_assert!(!v.is_empty() && bits <= 8 && big_digit::BITS % bits != 0);
+ debug_assert!(v.iter().all(|&c| BigDigit::from(c) < (1 << bits)));
+
+ let total_bits = (v.len() as u64).saturating_mul(bits.into());
+ let big_digits = Integer::div_ceil(&total_bits, &big_digit::BITS.into())
+ .to_usize()
+ .unwrap_or(core::usize::MAX);
+ let mut data = Vec::with_capacity(big_digits);
+
+ let mut d = 0;
+ let mut dbits = 0; // number of bits we currently have in d
+
+ // walk v accumululating bits in d; whenever we accumulate big_digit::BITS in d, spit out a
+ // big_digit:
+ for &c in v {
+ d |= BigDigit::from(c) << dbits;
+ dbits += bits;
+
+ if dbits >= big_digit::BITS {
+ data.push(d);
+ dbits -= big_digit::BITS;
+ // if dbits was > big_digit::BITS, we dropped some of the bits in c (they couldn't fit
+ // in d) - grab the bits we lost here:
+ d = BigDigit::from(c) >> (bits - dbits);
+ }
+ }
+
+ if dbits > 0 {
+ debug_assert!(dbits < big_digit::BITS);
+ data.push(d as BigDigit);
+ }
+
+ biguint_from_vec(data)
+}
+
+// Read little-endian radix digits
+fn from_radix_digits_be(v: &[u8], radix: u32) -> BigUint {
+ debug_assert!(!v.is_empty() && !radix.is_power_of_two());
+ debug_assert!(v.iter().all(|&c| u32::from(c) < radix));
+
+ // Estimate how big the result will be, so we can pre-allocate it.
+ #[cfg(feature = "std")]
+ let big_digits = {
+ let radix_log2 = f64::from(radix).log2();
+ let bits = radix_log2 * v.len() as f64;
+ (bits / big_digit::BITS as f64).ceil()
+ };
+ #[cfg(not(feature = "std"))]
+ let big_digits = {
+ let radix_log2 = ilog2(radix.next_power_of_two()) as usize;
+ let bits = radix_log2 * v.len();
+ (bits / big_digit::BITS as usize) + 1
+ };
+
+ let mut data = Vec::with_capacity(big_digits.to_usize().unwrap_or(0));
+
+ let (base, power) = get_radix_base(radix, big_digit::BITS);
+ let radix = radix as BigDigit;
+
+ let r = v.len() % power;
+ let i = if r == 0 { power } else { r };
+ let (head, tail) = v.split_at(i);
+
+ let first = head
+ .iter()
+ .fold(0, |acc, &d| acc * radix + BigDigit::from(d));
+ data.push(first);
+
+ debug_assert!(tail.len() % power == 0);
+ for chunk in tail.chunks(power) {
+ if data.last() != Some(&0) {
+ data.push(0);
+ }
+
+ let mut carry = 0;
+ for d in data.iter_mut() {
+ *d = mac_with_carry(0, *d, base, &mut carry);
+ }
+ debug_assert!(carry == 0);
+
+ let n = chunk
+ .iter()
+ .fold(0, |acc, &d| acc * radix + BigDigit::from(d));
+ add2(&mut data, &[n]);
+ }
+
+ biguint_from_vec(data)
+}
+
+pub(super) fn from_radix_be(buf: &[u8], radix: u32) -> Option<BigUint> {
+ assert!(
+ 2 <= radix && radix <= 256,
+ "The radix must be within 2...256"
+ );
+
+ if buf.is_empty() {
+ return Some(Zero::zero());
+ }
+
+ if radix != 256 && buf.iter().any(|&b| b >= radix as u8) {
+ return None;
+ }
+
+ let res = if radix.is_power_of_two() {
+ // Powers of two can use bitwise masks and shifting instead of multiplication
+ let bits = ilog2(radix);
+ let mut v = Vec::from(buf);
+ v.reverse();
+ if big_digit::BITS % bits == 0 {
+ from_bitwise_digits_le(&v, bits)
+ } else {
+ from_inexact_bitwise_digits_le(&v, bits)
+ }
+ } else {
+ from_radix_digits_be(buf, radix)
+ };
+
+ Some(res)
+}
+
+pub(super) fn from_radix_le(buf: &[u8], radix: u32) -> Option<BigUint> {
+ assert!(
+ 2 <= radix && radix <= 256,
+ "The radix must be within 2...256"
+ );
+
+ if buf.is_empty() {
+ return Some(Zero::zero());
+ }
+
+ if radix != 256 && buf.iter().any(|&b| b >= radix as u8) {
+ return None;
+ }
+
+ let res = if radix.is_power_of_two() {
+ // Powers of two can use bitwise masks and shifting instead of multiplication
+ let bits = ilog2(radix);
+ if big_digit::BITS % bits == 0 {
+ from_bitwise_digits_le(buf, bits)
+ } else {
+ from_inexact_bitwise_digits_le(buf, bits)
+ }
+ } else {
+ let mut v = Vec::from(buf);
+ v.reverse();
+ from_radix_digits_be(&v, radix)
+ };
+
+ Some(res)
+}
+
+impl Num for BigUint {
+ type FromStrRadixErr = ParseBigIntError;
+
+ /// Creates and initializes a `BigUint`.
+ fn from_str_radix(s: &str, radix: u32) -> Result<BigUint, ParseBigIntError> {
+ assert!(2 <= radix && radix <= 36, "The radix must be within 2...36");
+ let mut s = s;
+ if s.starts_with('+') {
+ let tail = &s[1..];
+ if !tail.starts_with('+') {
+ s = tail
+ }
+ }
+
+ if s.is_empty() {
+ return Err(ParseBigIntError::empty());
+ }
+
+ if s.starts_with('_') {
+ // Must lead with a real digit!
+ return Err(ParseBigIntError::invalid());
+ }
+
+ // First normalize all characters to plain digit values
+ let mut v = Vec::with_capacity(s.len());
+ for b in s.bytes() {
+ let d = match b {
+ b'0'..=b'9' => b - b'0',
+ b'a'..=b'z' => b - b'a' + 10,
+ b'A'..=b'Z' => b - b'A' + 10,
+ b'_' => continue,
+ _ => core::u8::MAX,
+ };
+ if d < radix as u8 {
+ v.push(d);
+ } else {
+ return Err(ParseBigIntError::invalid());
+ }
+ }
+
+ let res = if radix.is_power_of_two() {
+ // Powers of two can use bitwise masks and shifting instead of multiplication
+ let bits = ilog2(radix);
+ v.reverse();
+ if big_digit::BITS % bits == 0 {
+ from_bitwise_digits_le(&v, bits)
+ } else {
+ from_inexact_bitwise_digits_le(&v, bits)
+ }
+ } else {
+ from_radix_digits_be(&v, radix)
+ };
+ Ok(res)
+ }
+}
+
+fn high_bits_to_u64(v: &BigUint) -> u64 {
+ match v.data.len() {
+ 0 => 0,
+ 1 => {
+ // XXX Conversion is useless if already 64-bit.
+ #[allow(clippy::useless_conversion)]
+ let v0 = u64::from(v.data[0]);
+ v0
+ }
+ _ => {
+ let mut bits = v.bits();
+ let mut ret = 0u64;
+ let mut ret_bits = 0;
+
+ for d in v.data.iter().rev() {
+ let digit_bits = (bits - 1) % u64::from(big_digit::BITS) + 1;
+ let bits_want = Ord::min(64 - ret_bits, digit_bits);
+
+ if bits_want != 0 {
+ if bits_want != 64 {
+ ret <<= bits_want;
+ }
+ // XXX Conversion is useless if already 64-bit.
+ #[allow(clippy::useless_conversion)]
+ let d0 = u64::from(*d) >> (digit_bits - bits_want);
+ ret |= d0;
+ }
+
+ // Implement round-to-odd: If any lower bits are 1, set LSB to 1
+ // so that rounding again to floating point value using
+ // nearest-ties-to-even is correct.
+ //
+ // See: https://en.wikipedia.org/wiki/Rounding#Rounding_to_prepare_for_shorter_precision
+
+ if digit_bits - bits_want != 0 {
+ // XXX Conversion is useless if already 64-bit.
+ #[allow(clippy::useless_conversion)]
+ let masked = u64::from(*d) << (64 - (digit_bits - bits_want) as u32);
+ ret |= (masked != 0) as u64;
+ }
+
+ ret_bits += bits_want;
+ bits -= bits_want;
+ }
+
+ ret
+ }
+ }
+}
+
+impl ToPrimitive for BigUint {
+ #[inline]
+ fn to_i64(&self) -> Option<i64> {
+ self.to_u64().as_ref().and_then(u64::to_i64)
+ }
+
+ #[inline]
+ fn to_i128(&self) -> Option<i128> {
+ self.to_u128().as_ref().and_then(u128::to_i128)
+ }
+
+ #[allow(clippy::useless_conversion)]
+ #[inline]
+ fn to_u64(&self) -> Option<u64> {
+ let mut ret: u64 = 0;
+ let mut bits = 0;
+
+ for i in self.data.iter() {
+ if bits >= 64 {
+ return None;
+ }
+
+ // XXX Conversion is useless if already 64-bit.
+ ret += u64::from(*i) << bits;
+ bits += big_digit::BITS;
+ }
+
+ Some(ret)
+ }
+
+ #[inline]
+ fn to_u128(&self) -> Option<u128> {
+ let mut ret: u128 = 0;
+ let mut bits = 0;
+
+ for i in self.data.iter() {
+ if bits >= 128 {
+ return None;
+ }
+
+ ret |= u128::from(*i) << bits;
+ bits += big_digit::BITS;
+ }
+
+ Some(ret)
+ }
+
+ #[inline]
+ fn to_f32(&self) -> Option<f32> {
+ let mantissa = high_bits_to_u64(self);
+ let exponent = self.bits() - u64::from(fls(mantissa));
+
+ if exponent > core::f32::MAX_EXP as u64 {
+ Some(core::f32::INFINITY)
+ } else {
+ Some((mantissa as f32) * 2.0f32.powi(exponent as i32))
+ }
+ }
+
+ #[inline]
+ fn to_f64(&self) -> Option<f64> {
+ let mantissa = high_bits_to_u64(self);
+ let exponent = self.bits() - u64::from(fls(mantissa));
+
+ if exponent > core::f64::MAX_EXP as u64 {
+ Some(core::f64::INFINITY)
+ } else {
+ Some((mantissa as f64) * 2.0f64.powi(exponent as i32))
+ }
+ }
+}
+
+macro_rules! impl_try_from_biguint {
+ ($T:ty, $to_ty:path) => {
+ #[cfg(has_try_from)]
+ impl TryFrom<&BigUint> for $T {
+ type Error = TryFromBigIntError<()>;
+
+ #[inline]
+ fn try_from(value: &BigUint) -> Result<$T, TryFromBigIntError<()>> {
+ $to_ty(value).ok_or(TryFromBigIntError::new(()))
+ }
+ }
+
+ #[cfg(has_try_from)]
+ impl TryFrom<BigUint> for $T {
+ type Error = TryFromBigIntError<BigUint>;
+
+ #[inline]
+ fn try_from(value: BigUint) -> Result<$T, TryFromBigIntError<BigUint>> {
+ <$T>::try_from(&value).map_err(|_| TryFromBigIntError::new(value))
+ }
+ }
+ };
+}
+
+impl_try_from_biguint!(u8, ToPrimitive::to_u8);
+impl_try_from_biguint!(u16, ToPrimitive::to_u16);
+impl_try_from_biguint!(u32, ToPrimitive::to_u32);
+impl_try_from_biguint!(u64, ToPrimitive::to_u64);
+impl_try_from_biguint!(usize, ToPrimitive::to_usize);
+impl_try_from_biguint!(u128, ToPrimitive::to_u128);
+
+impl_try_from_biguint!(i8, ToPrimitive::to_i8);
+impl_try_from_biguint!(i16, ToPrimitive::to_i16);
+impl_try_from_biguint!(i32, ToPrimitive::to_i32);
+impl_try_from_biguint!(i64, ToPrimitive::to_i64);
+impl_try_from_biguint!(isize, ToPrimitive::to_isize);
+impl_try_from_biguint!(i128, ToPrimitive::to_i128);
+
+impl FromPrimitive for BigUint {
+ #[inline]
+ fn from_i64(n: i64) -> Option<BigUint> {
+ if n >= 0 {
+ Some(BigUint::from(n as u64))
+ } else {
+ None
+ }
+ }
+
+ #[inline]
+ fn from_i128(n: i128) -> Option<BigUint> {
+ if n >= 0 {
+ Some(BigUint::from(n as u128))
+ } else {
+ None
+ }
+ }
+
+ #[inline]
+ fn from_u64(n: u64) -> Option<BigUint> {
+ Some(BigUint::from(n))
+ }
+
+ #[inline]
+ fn from_u128(n: u128) -> Option<BigUint> {
+ Some(BigUint::from(n))
+ }
+
+ #[inline]
+ fn from_f64(mut n: f64) -> Option<BigUint> {
+ // handle NAN, INFINITY, NEG_INFINITY
+ if !n.is_finite() {
+ return None;
+ }
+
+ // match the rounding of casting from float to int
+ n = n.trunc();
+
+ // handle 0.x, -0.x
+ if n.is_zero() {
+ return Some(BigUint::zero());
+ }
+
+ let (mantissa, exponent, sign) = FloatCore::integer_decode(n);
+
+ if sign == -1 {
+ return None;
+ }
+
+ let mut ret = BigUint::from(mantissa);
+ match exponent.cmp(&0) {
+ Greater => ret <<= exponent as usize,
+ Equal => {}
+ Less => ret >>= (-exponent) as usize,
+ }
+ Some(ret)
+ }
+}
+
+impl From<u64> for BigUint {
+ #[inline]
+ fn from(mut n: u64) -> Self {
+ let mut ret: BigUint = Zero::zero();
+
+ while n != 0 {
+ ret.data.push(n as BigDigit);
+ // don't overflow if BITS is 64:
+ n = (n >> 1) >> (big_digit::BITS - 1);
+ }
+
+ ret
+ }
+}
+
+impl From<u128> for BigUint {
+ #[inline]
+ fn from(mut n: u128) -> Self {
+ let mut ret: BigUint = Zero::zero();
+
+ while n != 0 {
+ ret.data.push(n as BigDigit);
+ n >>= big_digit::BITS;
+ }
+
+ ret
+ }
+}
+
+macro_rules! impl_biguint_from_uint {
+ ($T:ty) => {
+ impl From<$T> for BigUint {
+ #[inline]
+ fn from(n: $T) -> Self {
+ BigUint::from(n as u64)
+ }
+ }
+ };
+}
+
+impl_biguint_from_uint!(u8);
+impl_biguint_from_uint!(u16);
+impl_biguint_from_uint!(u32);
+impl_biguint_from_uint!(usize);
+
+macro_rules! impl_biguint_try_from_int {
+ ($T:ty, $from_ty:path) => {
+ #[cfg(has_try_from)]
+ impl TryFrom<$T> for BigUint {
+ type Error = TryFromBigIntError<()>;
+
+ #[inline]
+ fn try_from(value: $T) -> Result<BigUint, TryFromBigIntError<()>> {
+ $from_ty(value).ok_or(TryFromBigIntError::new(()))
+ }
+ }
+ };
+}
+
+impl_biguint_try_from_int!(i8, FromPrimitive::from_i8);
+impl_biguint_try_from_int!(i16, FromPrimitive::from_i16);
+impl_biguint_try_from_int!(i32, FromPrimitive::from_i32);
+impl_biguint_try_from_int!(i64, FromPrimitive::from_i64);
+impl_biguint_try_from_int!(isize, FromPrimitive::from_isize);
+impl_biguint_try_from_int!(i128, FromPrimitive::from_i128);
+
+impl ToBigUint for BigUint {
+ #[inline]
+ fn to_biguint(&self) -> Option<BigUint> {
+ Some(self.clone())
+ }
+}
+
+macro_rules! impl_to_biguint {
+ ($T:ty, $from_ty:path) => {
+ impl ToBigUint for $T {
+ #[inline]
+ fn to_biguint(&self) -> Option<BigUint> {
+ $from_ty(*self)
+ }
+ }
+ };
+}
+
+impl_to_biguint!(isize, FromPrimitive::from_isize);
+impl_to_biguint!(i8, FromPrimitive::from_i8);
+impl_to_biguint!(i16, FromPrimitive::from_i16);
+impl_to_biguint!(i32, FromPrimitive::from_i32);
+impl_to_biguint!(i64, FromPrimitive::from_i64);
+impl_to_biguint!(i128, FromPrimitive::from_i128);
+
+impl_to_biguint!(usize, FromPrimitive::from_usize);
+impl_to_biguint!(u8, FromPrimitive::from_u8);
+impl_to_biguint!(u16, FromPrimitive::from_u16);
+impl_to_biguint!(u32, FromPrimitive::from_u32);
+impl_to_biguint!(u64, FromPrimitive::from_u64);
+impl_to_biguint!(u128, FromPrimitive::from_u128);
+
+impl_to_biguint!(f32, FromPrimitive::from_f32);
+impl_to_biguint!(f64, FromPrimitive::from_f64);
+
+impl From<bool> for BigUint {
+ fn from(x: bool) -> Self {
+ if x {
+ One::one()
+ } else {
+ Zero::zero()
+ }
+ }
+}
+
+// Extract bitwise digits that evenly divide BigDigit
+pub(super) fn to_bitwise_digits_le(u: &BigUint, bits: u8) -> Vec<u8> {
+ debug_assert!(!u.is_zero() && bits <= 8 && big_digit::BITS % bits == 0);
+
+ let last_i = u.data.len() - 1;
+ let mask: BigDigit = (1 << bits) - 1;
+ let digits_per_big_digit = big_digit::BITS / bits;
+ let digits = Integer::div_ceil(&u.bits(), &u64::from(bits))
+ .to_usize()
+ .unwrap_or(core::usize::MAX);
+ let mut res = Vec::with_capacity(digits);
+
+ for mut r in u.data[..last_i].iter().cloned() {
+ for _ in 0..digits_per_big_digit {
+ res.push((r & mask) as u8);
+ r >>= bits;
+ }
+ }
+
+ let mut r = u.data[last_i];
+ while r != 0 {
+ res.push((r & mask) as u8);
+ r >>= bits;
+ }
+
+ res
+}
+
+// Extract bitwise digits that don't evenly divide BigDigit
+fn to_inexact_bitwise_digits_le(u: &BigUint, bits: u8) -> Vec<u8> {
+ debug_assert!(!u.is_zero() && bits <= 8 && big_digit::BITS % bits != 0);
+
+ let mask: BigDigit = (1 << bits) - 1;
+ let digits = Integer::div_ceil(&u.bits(), &u64::from(bits))
+ .to_usize()
+ .unwrap_or(core::usize::MAX);
+ let mut res = Vec::with_capacity(digits);
+
+ let mut r = 0;
+ let mut rbits = 0;
+
+ for c in &u.data {
+ r |= *c << rbits;
+ rbits += big_digit::BITS;
+
+ while rbits >= bits {
+ res.push((r & mask) as u8);
+ r >>= bits;
+
+ // r had more bits than it could fit - grab the bits we lost
+ if rbits > big_digit::BITS {
+ r = *c >> (big_digit::BITS - (rbits - bits));
+ }
+
+ rbits -= bits;
+ }
+ }
+
+ if rbits != 0 {
+ res.push(r as u8);
+ }
+
+ while let Some(&0) = res.last() {
+ res.pop();
+ }
+
+ res
+}
+
+// Extract little-endian radix digits
+#[inline(always)] // forced inline to get const-prop for radix=10
+pub(super) fn to_radix_digits_le(u: &BigUint, radix: u32) -> Vec<u8> {
+ debug_assert!(!u.is_zero() && !radix.is_power_of_two());
+
+ #[cfg(feature = "std")]
+ let radix_digits = {
+ let radix_log2 = f64::from(radix).log2();
+ ((u.bits() as f64) / radix_log2).ceil()
+ };
+ #[cfg(not(feature = "std"))]
+ let radix_digits = {
+ let radix_log2 = ilog2(radix) as usize;
+ ((u.bits() as usize) / radix_log2) + 1
+ };
+
+ // Estimate how big the result will be, so we can pre-allocate it.
+ let mut res = Vec::with_capacity(radix_digits.to_usize().unwrap_or(0));
+
+ let mut digits = u.clone();
+
+ let (base, power) = get_radix_base(radix, big_digit::HALF_BITS);
+ let radix = radix as BigDigit;
+
+ // For very large numbers, the O(n²) loop of repeated `div_rem_digit` dominates the
+ // performance. We can mitigate this by dividing into chunks of a larger base first.
+ // The threshold for this was chosen by anecdotal performance measurements to
+ // approximate where this starts to make a noticeable difference.
+ if digits.data.len() >= 64 {
+ let mut big_base = BigUint::from(base * base);
+ let mut big_power = 2usize;
+
+ // Choose a target base length near √n.
+ let target_len = digits.data.len().sqrt();
+ while big_base.data.len() < target_len {
+ big_base = &big_base * &big_base;
+ big_power *= 2;
+ }
+
+ // This outer loop will run approximately √n times.
+ while digits > big_base {
+ // This is still the dominating factor, with n digits divided by √n digits.
+ let (q, mut big_r) = digits.div_rem(&big_base);
+ digits = q;
+
+ // This inner loop now has O(√n²)=O(n) behavior altogether.
+ for _ in 0..big_power {
+ let (q, mut r) = div_rem_digit(big_r, base);
+ big_r = q;
+ for _ in 0..power {
+ res.push((r % radix) as u8);
+ r /= radix;
+ }
+ }
+ }
+ }
+
+ while digits.data.len() > 1 {
+ let (q, mut r) = div_rem_digit(digits, base);
+ for _ in 0..power {
+ res.push((r % radix) as u8);
+ r /= radix;
+ }
+ digits = q;
+ }
+
+ let mut r = digits.data[0];
+ while r != 0 {
+ res.push((r % radix) as u8);
+ r /= radix;
+ }
+
+ res
+}
+
+pub(super) fn to_radix_le(u: &BigUint, radix: u32) -> Vec<u8> {
+ if u.is_zero() {
+ vec![0]
+ } else if radix.is_power_of_two() {
+ // Powers of two can use bitwise masks and shifting instead of division
+ let bits = ilog2(radix);
+ if big_digit::BITS % bits == 0 {
+ to_bitwise_digits_le(u, bits)
+ } else {
+ to_inexact_bitwise_digits_le(u, bits)
+ }
+ } else if radix == 10 {
+ // 10 is so common that it's worth separating out for const-propagation.
+ // Optimizers can often turn constant division into a faster multiplication.
+ to_radix_digits_le(u, 10)
+ } else {
+ to_radix_digits_le(u, radix)
+ }
+}
+
+pub(crate) fn to_str_radix_reversed(u: &BigUint, radix: u32) -> Vec<u8> {
+ assert!(2 <= radix && radix <= 36, "The radix must be within 2...36");
+
+ if u.is_zero() {
+ return vec![b'0'];
+ }
+
+ let mut res = to_radix_le(u, radix);
+
+ // Now convert everything to ASCII digits.
+ for r in &mut res {
+ debug_assert!(u32::from(*r) < radix);
+ if *r < 10 {
+ *r += b'0';
+ } else {
+ *r += b'a' - 10;
+ }
+ }
+ res
+}
+
+/// Returns the greatest power of the radix for the given bit size
+#[inline]
+fn get_radix_base(radix: u32, bits: u8) -> (BigDigit, usize) {
+ mod gen {
+ include! { concat!(env!("OUT_DIR"), "/radix_bases.rs") }
+ }
+
+ debug_assert!(
+ 2 <= radix && radix <= 256,
+ "The radix must be within 2...256"
+ );
+ debug_assert!(!radix.is_power_of_two());
+ debug_assert!(bits <= big_digit::BITS);
+
+ match bits {
+ 16 => {
+ let (base, power) = gen::BASES_16[radix as usize];
+ (base as BigDigit, power)
+ }
+ 32 => {
+ let (base, power) = gen::BASES_32[radix as usize];
+ (base as BigDigit, power)
+ }
+ 64 => {
+ let (base, power) = gen::BASES_64[radix as usize];
+ (base as BigDigit, power)
+ }
+ _ => panic!("Invalid bigdigit size"),
+ }
+}
diff --git a/rust/vendor/num-bigint/src/biguint/division.rs b/rust/vendor/num-bigint/src/biguint/division.rs
new file mode 100644
index 0000000..2999838
--- /dev/null
+++ b/rust/vendor/num-bigint/src/biguint/division.rs
@@ -0,0 +1,652 @@
+use super::addition::__add2;
+#[cfg(not(u64_digit))]
+use super::u32_to_u128;
+use super::{cmp_slice, BigUint};
+
+use crate::big_digit::{self, BigDigit, DoubleBigDigit};
+use crate::UsizePromotion;
+
+use core::cmp::Ordering::{Equal, Greater, Less};
+use core::mem;
+use core::ops::{Div, DivAssign, Rem, RemAssign};
+use num_integer::Integer;
+use num_traits::{CheckedDiv, CheckedEuclid, Euclid, One, ToPrimitive, Zero};
+
+/// Divide a two digit numerator by a one digit divisor, returns quotient and remainder:
+///
+/// Note: the caller must ensure that both the quotient and remainder will fit into a single digit.
+/// This is _not_ true for an arbitrary numerator/denominator.
+///
+/// (This function also matches what the x86 divide instruction does).
+#[inline]
+fn div_wide(hi: BigDigit, lo: BigDigit, divisor: BigDigit) -> (BigDigit, BigDigit) {
+ debug_assert!(hi < divisor);
+
+ let lhs = big_digit::to_doublebigdigit(hi, lo);
+ let rhs = DoubleBigDigit::from(divisor);
+ ((lhs / rhs) as BigDigit, (lhs % rhs) as BigDigit)
+}
+
+/// For small divisors, we can divide without promoting to `DoubleBigDigit` by
+/// using half-size pieces of digit, like long-division.
+#[inline]
+fn div_half(rem: BigDigit, digit: BigDigit, divisor: BigDigit) -> (BigDigit, BigDigit) {
+ use crate::big_digit::{HALF, HALF_BITS};
+
+ debug_assert!(rem < divisor && divisor <= HALF);
+ let (hi, rem) = ((rem << HALF_BITS) | (digit >> HALF_BITS)).div_rem(&divisor);
+ let (lo, rem) = ((rem << HALF_BITS) | (digit & HALF)).div_rem(&divisor);
+ ((hi << HALF_BITS) | lo, rem)
+}
+
+#[inline]
+pub(super) fn div_rem_digit(mut a: BigUint, b: BigDigit) -> (BigUint, BigDigit) {
+ if b == 0 {
+ panic!("attempt to divide by zero")
+ }
+
+ let mut rem = 0;
+
+ if b <= big_digit::HALF {
+ for d in a.data.iter_mut().rev() {
+ let (q, r) = div_half(rem, *d, b);
+ *d = q;
+ rem = r;
+ }
+ } else {
+ for d in a.data.iter_mut().rev() {
+ let (q, r) = div_wide(rem, *d, b);
+ *d = q;
+ rem = r;
+ }
+ }
+
+ (a.normalized(), rem)
+}
+
+#[inline]
+fn rem_digit(a: &BigUint, b: BigDigit) -> BigDigit {
+ if b == 0 {
+ panic!("attempt to divide by zero")
+ }
+
+ let mut rem = 0;
+
+ if b <= big_digit::HALF {
+ for &digit in a.data.iter().rev() {
+ let (_, r) = div_half(rem, digit, b);
+ rem = r;
+ }
+ } else {
+ for &digit in a.data.iter().rev() {
+ let (_, r) = div_wide(rem, digit, b);
+ rem = r;
+ }
+ }
+
+ rem
+}
+
+/// Subtract a multiple.
+/// a -= b * c
+/// Returns a borrow (if a < b then borrow > 0).
+fn sub_mul_digit_same_len(a: &mut [BigDigit], b: &[BigDigit], c: BigDigit) -> BigDigit {
+ debug_assert!(a.len() == b.len());
+
+ // carry is between -big_digit::MAX and 0, so to avoid overflow we store
+ // offset_carry = carry + big_digit::MAX
+ let mut offset_carry = big_digit::MAX;
+
+ for (x, y) in a.iter_mut().zip(b) {
+ // We want to calculate sum = x - y * c + carry.
+ // sum >= -(big_digit::MAX * big_digit::MAX) - big_digit::MAX
+ // sum <= big_digit::MAX
+ // Offsetting sum by (big_digit::MAX << big_digit::BITS) puts it in DoubleBigDigit range.
+ let offset_sum = big_digit::to_doublebigdigit(big_digit::MAX, *x)
+ - big_digit::MAX as DoubleBigDigit
+ + offset_carry as DoubleBigDigit
+ - *y as DoubleBigDigit * c as DoubleBigDigit;
+
+ let (new_offset_carry, new_x) = big_digit::from_doublebigdigit(offset_sum);
+ offset_carry = new_offset_carry;
+ *x = new_x;
+ }
+
+ // Return the borrow.
+ big_digit::MAX - offset_carry
+}
+
+fn div_rem(mut u: BigUint, mut d: BigUint) -> (BigUint, BigUint) {
+ if d.is_zero() {
+ panic!("attempt to divide by zero")
+ }
+ if u.is_zero() {
+ return (Zero::zero(), Zero::zero());
+ }
+
+ if d.data.len() == 1 {
+ if d.data == [1] {
+ return (u, Zero::zero());
+ }
+ let (div, rem) = div_rem_digit(u, d.data[0]);
+ // reuse d
+ d.data.clear();
+ d += rem;
+ return (div, d);
+ }
+
+ // Required or the q_len calculation below can underflow:
+ match u.cmp(&d) {
+ Less => return (Zero::zero(), u),
+ Equal => {
+ u.set_one();
+ return (u, Zero::zero());
+ }
+ Greater => {} // Do nothing
+ }
+
+ // This algorithm is from Knuth, TAOCP vol 2 section 4.3, algorithm D:
+ //
+ // First, normalize the arguments so the highest bit in the highest digit of the divisor is
+ // set: the main loop uses the highest digit of the divisor for generating guesses, so we
+ // want it to be the largest number we can efficiently divide by.
+ //
+ let shift = d.data.last().unwrap().leading_zeros() as usize;
+
+ if shift == 0 {
+ // no need to clone d
+ div_rem_core(u, &d.data)
+ } else {
+ let (q, r) = div_rem_core(u << shift, &(d << shift).data);
+ // renormalize the remainder
+ (q, r >> shift)
+ }
+}
+
+pub(super) fn div_rem_ref(u: &BigUint, d: &BigUint) -> (BigUint, BigUint) {
+ if d.is_zero() {
+ panic!("attempt to divide by zero")
+ }
+ if u.is_zero() {
+ return (Zero::zero(), Zero::zero());
+ }
+
+ if d.data.len() == 1 {
+ if d.data == [1] {
+ return (u.clone(), Zero::zero());
+ }
+
+ let (div, rem) = div_rem_digit(u.clone(), d.data[0]);
+ return (div, rem.into());
+ }
+
+ // Required or the q_len calculation below can underflow:
+ match u.cmp(d) {
+ Less => return (Zero::zero(), u.clone()),
+ Equal => return (One::one(), Zero::zero()),
+ Greater => {} // Do nothing
+ }
+
+ // This algorithm is from Knuth, TAOCP vol 2 section 4.3, algorithm D:
+ //
+ // First, normalize the arguments so the highest bit in the highest digit of the divisor is
+ // set: the main loop uses the highest digit of the divisor for generating guesses, so we
+ // want it to be the largest number we can efficiently divide by.
+ //
+ let shift = d.data.last().unwrap().leading_zeros() as usize;
+
+ if shift == 0 {
+ // no need to clone d
+ div_rem_core(u.clone(), &d.data)
+ } else {
+ let (q, r) = div_rem_core(u << shift, &(d << shift).data);
+ // renormalize the remainder
+ (q, r >> shift)
+ }
+}
+
+/// An implementation of the base division algorithm.
+/// Knuth, TAOCP vol 2 section 4.3.1, algorithm D, with an improvement from exercises 19-21.
+fn div_rem_core(mut a: BigUint, b: &[BigDigit]) -> (BigUint, BigUint) {
+ debug_assert!(a.data.len() >= b.len() && b.len() > 1);
+ debug_assert!(b.last().unwrap().leading_zeros() == 0);
+
+ // The algorithm works by incrementally calculating "guesses", q0, for the next digit of the
+ // quotient. Once we have any number q0 such that (q0 << j) * b <= a, we can set
+ //
+ // q += q0 << j
+ // a -= (q0 << j) * b
+ //
+ // and then iterate until a < b. Then, (q, a) will be our desired quotient and remainder.
+ //
+ // q0, our guess, is calculated by dividing the last three digits of a by the last two digits of
+ // b - this will give us a guess that is close to the actual quotient, but is possibly greater.
+ // It can only be greater by 1 and only in rare cases, with probability at most
+ // 2^-(big_digit::BITS-1) for random a, see TAOCP 4.3.1 exercise 21.
+ //
+ // If the quotient turns out to be too large, we adjust it by 1:
+ // q -= 1 << j
+ // a += b << j
+
+ // a0 stores an additional extra most significant digit of the dividend, not stored in a.
+ let mut a0 = 0;
+
+ // [b1, b0] are the two most significant digits of the divisor. They never change.
+ let b0 = *b.last().unwrap();
+ let b1 = b[b.len() - 2];
+
+ let q_len = a.data.len() - b.len() + 1;
+ let mut q = BigUint {
+ data: vec![0; q_len],
+ };
+
+ for j in (0..q_len).rev() {
+ debug_assert!(a.data.len() == b.len() + j);
+
+ let a1 = *a.data.last().unwrap();
+ let a2 = a.data[a.data.len() - 2];
+
+ // The first q0 estimate is [a1,a0] / b0. It will never be too small, it may be too large
+ // by at most 2.
+ let (mut q0, mut r) = if a0 < b0 {
+ let (q0, r) = div_wide(a0, a1, b0);
+ (q0, r as DoubleBigDigit)
+ } else {
+ debug_assert!(a0 == b0);
+ // Avoid overflowing q0, we know the quotient fits in BigDigit.
+ // [a1,a0] = b0 * (1<<BITS - 1) + (a0 + a1)
+ (big_digit::MAX, a0 as DoubleBigDigit + a1 as DoubleBigDigit)
+ };
+
+ // r = [a1,a0] - q0 * b0
+ //
+ // Now we want to compute a more precise estimate [a2,a1,a0] / [b1,b0] which can only be
+ // less or equal to the current q0.
+ //
+ // q0 is too large if:
+ // [a2,a1,a0] < q0 * [b1,b0]
+ // (r << BITS) + a2 < q0 * b1
+ while r <= big_digit::MAX as DoubleBigDigit
+ && big_digit::to_doublebigdigit(r as BigDigit, a2)
+ < q0 as DoubleBigDigit * b1 as DoubleBigDigit
+ {
+ q0 -= 1;
+ r += b0 as DoubleBigDigit;
+ }
+
+ // q0 is now either the correct quotient digit, or in rare cases 1 too large.
+ // Subtract (q0 << j) from a. This may overflow, in which case we will have to correct.
+
+ let mut borrow = sub_mul_digit_same_len(&mut a.data[j..], b, q0);
+ if borrow > a0 {
+ // q0 is too large. We need to add back one multiple of b.
+ q0 -= 1;
+ borrow -= __add2(&mut a.data[j..], b);
+ }
+ // The top digit of a, stored in a0, has now been zeroed.
+ debug_assert!(borrow == a0);
+
+ q.data[j] = q0;
+
+ // Pop off the next top digit of a.
+ a0 = a.data.pop().unwrap();
+ }
+
+ a.data.push(a0);
+ a.normalize();
+
+ debug_assert_eq!(cmp_slice(&a.data, b), Less);
+
+ (q.normalized(), a)
+}
+
+forward_val_ref_binop!(impl Div for BigUint, div);
+forward_ref_val_binop!(impl Div for BigUint, div);
+forward_val_assign!(impl DivAssign for BigUint, div_assign);
+
+impl Div<BigUint> for BigUint {
+ type Output = BigUint;
+
+ #[inline]
+ fn div(self, other: BigUint) -> BigUint {
+ let (q, _) = div_rem(self, other);
+ q
+ }
+}
+
+impl Div<&BigUint> for &BigUint {
+ type Output = BigUint;
+
+ #[inline]
+ fn div(self, other: &BigUint) -> BigUint {
+ let (q, _) = self.div_rem(other);
+ q
+ }
+}
+impl DivAssign<&BigUint> for BigUint {
+ #[inline]
+ fn div_assign(&mut self, other: &BigUint) {
+ *self = &*self / other;
+ }
+}
+
+promote_unsigned_scalars!(impl Div for BigUint, div);
+promote_unsigned_scalars_assign!(impl DivAssign for BigUint, div_assign);
+forward_all_scalar_binop_to_val_val!(impl Div<u32> for BigUint, div);
+forward_all_scalar_binop_to_val_val!(impl Div<u64> for BigUint, div);
+forward_all_scalar_binop_to_val_val!(impl Div<u128> for BigUint, div);
+
+impl Div<u32> for BigUint {
+ type Output = BigUint;
+
+ #[inline]
+ fn div(self, other: u32) -> BigUint {
+ let (q, _) = div_rem_digit(self, other as BigDigit);
+ q
+ }
+}
+impl DivAssign<u32> for BigUint {
+ #[inline]
+ fn div_assign(&mut self, other: u32) {
+ *self = &*self / other;
+ }
+}
+
+impl Div<BigUint> for u32 {
+ type Output = BigUint;
+
+ #[inline]
+ fn div(self, other: BigUint) -> BigUint {
+ match other.data.len() {
+ 0 => panic!("attempt to divide by zero"),
+ 1 => From::from(self as BigDigit / other.data[0]),
+ _ => Zero::zero(),
+ }
+ }
+}
+
+impl Div<u64> for BigUint {
+ type Output = BigUint;
+
+ #[inline]
+ fn div(self, other: u64) -> BigUint {
+ let (q, _) = div_rem(self, From::from(other));
+ q
+ }
+}
+impl DivAssign<u64> for BigUint {
+ #[inline]
+ fn div_assign(&mut self, other: u64) {
+ // a vec of size 0 does not allocate, so this is fairly cheap
+ let temp = mem::replace(self, Zero::zero());
+ *self = temp / other;
+ }
+}
+
+impl Div<BigUint> for u64 {
+ type Output = BigUint;
+
+ #[cfg(not(u64_digit))]
+ #[inline]
+ fn div(self, other: BigUint) -> BigUint {
+ match other.data.len() {
+ 0 => panic!("attempt to divide by zero"),
+ 1 => From::from(self / u64::from(other.data[0])),
+ 2 => From::from(self / big_digit::to_doublebigdigit(other.data[1], other.data[0])),
+ _ => Zero::zero(),
+ }
+ }
+
+ #[cfg(u64_digit)]
+ #[inline]
+ fn div(self, other: BigUint) -> BigUint {
+ match other.data.len() {
+ 0 => panic!("attempt to divide by zero"),
+ 1 => From::from(self / other.data[0]),
+ _ => Zero::zero(),
+ }
+ }
+}
+
+impl Div<u128> for BigUint {
+ type Output = BigUint;
+
+ #[inline]
+ fn div(self, other: u128) -> BigUint {
+ let (q, _) = div_rem(self, From::from(other));
+ q
+ }
+}
+
+impl DivAssign<u128> for BigUint {
+ #[inline]
+ fn div_assign(&mut self, other: u128) {
+ *self = &*self / other;
+ }
+}
+
+impl Div<BigUint> for u128 {
+ type Output = BigUint;
+
+ #[cfg(not(u64_digit))]
+ #[inline]
+ fn div(self, other: BigUint) -> BigUint {
+ match other.data.len() {
+ 0 => panic!("attempt to divide by zero"),
+ 1 => From::from(self / u128::from(other.data[0])),
+ 2 => From::from(
+ self / u128::from(big_digit::to_doublebigdigit(other.data[1], other.data[0])),
+ ),
+ 3 => From::from(self / u32_to_u128(0, other.data[2], other.data[1], other.data[0])),
+ 4 => From::from(
+ self / u32_to_u128(other.data[3], other.data[2], other.data[1], other.data[0]),
+ ),
+ _ => Zero::zero(),
+ }
+ }
+
+ #[cfg(u64_digit)]
+ #[inline]
+ fn div(self, other: BigUint) -> BigUint {
+ match other.data.len() {
+ 0 => panic!("attempt to divide by zero"),
+ 1 => From::from(self / other.data[0] as u128),
+ 2 => From::from(self / big_digit::to_doublebigdigit(other.data[1], other.data[0])),
+ _ => Zero::zero(),
+ }
+ }
+}
+
+forward_val_ref_binop!(impl Rem for BigUint, rem);
+forward_ref_val_binop!(impl Rem for BigUint, rem);
+forward_val_assign!(impl RemAssign for BigUint, rem_assign);
+
+impl Rem<BigUint> for BigUint {
+ type Output = BigUint;
+
+ #[inline]
+ fn rem(self, other: BigUint) -> BigUint {
+ if let Some(other) = other.to_u32() {
+ &self % other
+ } else {
+ let (_, r) = div_rem(self, other);
+ r
+ }
+ }
+}
+
+impl Rem<&BigUint> for &BigUint {
+ type Output = BigUint;
+
+ #[inline]
+ fn rem(self, other: &BigUint) -> BigUint {
+ if let Some(other) = other.to_u32() {
+ self % other
+ } else {
+ let (_, r) = self.div_rem(other);
+ r
+ }
+ }
+}
+impl RemAssign<&BigUint> for BigUint {
+ #[inline]
+ fn rem_assign(&mut self, other: &BigUint) {
+ *self = &*self % other;
+ }
+}
+
+promote_unsigned_scalars!(impl Rem for BigUint, rem);
+promote_unsigned_scalars_assign!(impl RemAssign for BigUint, rem_assign);
+forward_all_scalar_binop_to_ref_val!(impl Rem<u32> for BigUint, rem);
+forward_all_scalar_binop_to_val_val!(impl Rem<u64> for BigUint, rem);
+forward_all_scalar_binop_to_val_val!(impl Rem<u128> for BigUint, rem);
+
+impl Rem<u32> for &BigUint {
+ type Output = BigUint;
+
+ #[inline]
+ fn rem(self, other: u32) -> BigUint {
+ rem_digit(self, other as BigDigit).into()
+ }
+}
+impl RemAssign<u32> for BigUint {
+ #[inline]
+ fn rem_assign(&mut self, other: u32) {
+ *self = &*self % other;
+ }
+}
+
+impl Rem<&BigUint> for u32 {
+ type Output = BigUint;
+
+ #[inline]
+ fn rem(mut self, other: &BigUint) -> BigUint {
+ self %= other;
+ From::from(self)
+ }
+}
+
+macro_rules! impl_rem_assign_scalar {
+ ($scalar:ty, $to_scalar:ident) => {
+ forward_val_assign_scalar!(impl RemAssign for BigUint, $scalar, rem_assign);
+ impl RemAssign<&BigUint> for $scalar {
+ #[inline]
+ fn rem_assign(&mut self, other: &BigUint) {
+ *self = match other.$to_scalar() {
+ None => *self,
+ Some(0) => panic!("attempt to divide by zero"),
+ Some(v) => *self % v
+ };
+ }
+ }
+ }
+}
+
+// we can scalar %= BigUint for any scalar, including signed types
+impl_rem_assign_scalar!(u128, to_u128);
+impl_rem_assign_scalar!(usize, to_usize);
+impl_rem_assign_scalar!(u64, to_u64);
+impl_rem_assign_scalar!(u32, to_u32);
+impl_rem_assign_scalar!(u16, to_u16);
+impl_rem_assign_scalar!(u8, to_u8);
+impl_rem_assign_scalar!(i128, to_i128);
+impl_rem_assign_scalar!(isize, to_isize);
+impl_rem_assign_scalar!(i64, to_i64);
+impl_rem_assign_scalar!(i32, to_i32);
+impl_rem_assign_scalar!(i16, to_i16);
+impl_rem_assign_scalar!(i8, to_i8);
+
+impl Rem<u64> for BigUint {
+ type Output = BigUint;
+
+ #[inline]
+ fn rem(self, other: u64) -> BigUint {
+ let (_, r) = div_rem(self, From::from(other));
+ r
+ }
+}
+impl RemAssign<u64> for BigUint {
+ #[inline]
+ fn rem_assign(&mut self, other: u64) {
+ *self = &*self % other;
+ }
+}
+
+impl Rem<BigUint> for u64 {
+ type Output = BigUint;
+
+ #[inline]
+ fn rem(mut self, other: BigUint) -> BigUint {
+ self %= other;
+ From::from(self)
+ }
+}
+
+impl Rem<u128> for BigUint {
+ type Output = BigUint;
+
+ #[inline]
+ fn rem(self, other: u128) -> BigUint {
+ let (_, r) = div_rem(self, From::from(other));
+ r
+ }
+}
+
+impl RemAssign<u128> for BigUint {
+ #[inline]
+ fn rem_assign(&mut self, other: u128) {
+ *self = &*self % other;
+ }
+}
+
+impl Rem<BigUint> for u128 {
+ type Output = BigUint;
+
+ #[inline]
+ fn rem(mut self, other: BigUint) -> BigUint {
+ self %= other;
+ From::from(self)
+ }
+}
+
+impl CheckedDiv for BigUint {
+ #[inline]
+ fn checked_div(&self, v: &BigUint) -> Option<BigUint> {
+ if v.is_zero() {
+ return None;
+ }
+ Some(self.div(v))
+ }
+}
+
+impl CheckedEuclid for BigUint {
+ #[inline]
+ fn checked_div_euclid(&self, v: &BigUint) -> Option<BigUint> {
+ if v.is_zero() {
+ return None;
+ }
+ Some(self.div_euclid(v))
+ }
+
+ #[inline]
+ fn checked_rem_euclid(&self, v: &BigUint) -> Option<BigUint> {
+ if v.is_zero() {
+ return None;
+ }
+ Some(self.rem_euclid(v))
+ }
+}
+
+impl Euclid for BigUint {
+ #[inline]
+ fn div_euclid(&self, v: &BigUint) -> BigUint {
+ // trivially same as regular division
+ self / v
+ }
+
+ #[inline]
+ fn rem_euclid(&self, v: &BigUint) -> BigUint {
+ // trivially same as regular remainder
+ self % v
+ }
+}
diff --git a/rust/vendor/num-bigint/src/biguint/iter.rs b/rust/vendor/num-bigint/src/biguint/iter.rs
new file mode 100644
index 0000000..1e673e4
--- /dev/null
+++ b/rust/vendor/num-bigint/src/biguint/iter.rs
@@ -0,0 +1,358 @@
+use core::iter::FusedIterator;
+
+#[cfg(not(u64_digit))]
+use super::u32_chunk_to_u64;
+
+/// An iterator of `u32` digits representation of a `BigUint` or `BigInt`,
+/// ordered least significant digit first.
+pub struct U32Digits<'a> {
+ #[cfg(u64_digit)]
+ data: &'a [u64],
+ #[cfg(u64_digit)]
+ next_is_lo: bool,
+ #[cfg(u64_digit)]
+ last_hi_is_zero: bool,
+
+ #[cfg(not(u64_digit))]
+ it: core::slice::Iter<'a, u32>,
+}
+
+#[cfg(u64_digit)]
+impl<'a> U32Digits<'a> {
+ #[inline]
+ pub(super) fn new(data: &'a [u64]) -> Self {
+ let last_hi_is_zero = data
+ .last()
+ .map(|&last| {
+ let last_hi = (last >> 32) as u32;
+ last_hi == 0
+ })
+ .unwrap_or(false);
+ U32Digits {
+ data,
+ next_is_lo: true,
+ last_hi_is_zero,
+ }
+ }
+}
+
+#[cfg(u64_digit)]
+impl Iterator for U32Digits<'_> {
+ type Item = u32;
+ #[inline]
+ fn next(&mut self) -> Option<u32> {
+ match self.data.split_first() {
+ Some((&first, data)) => {
+ let next_is_lo = self.next_is_lo;
+ self.next_is_lo = !next_is_lo;
+ if next_is_lo {
+ Some(first as u32)
+ } else {
+ self.data = data;
+ if data.is_empty() && self.last_hi_is_zero {
+ self.last_hi_is_zero = false;
+ None
+ } else {
+ Some((first >> 32) as u32)
+ }
+ }
+ }
+ None => None,
+ }
+ }
+
+ #[inline]
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ let len = self.len();
+ (len, Some(len))
+ }
+
+ #[inline]
+ fn last(self) -> Option<u32> {
+ self.data.last().map(|&last| {
+ if self.last_hi_is_zero {
+ last as u32
+ } else {
+ (last >> 32) as u32
+ }
+ })
+ }
+
+ #[inline]
+ fn count(self) -> usize {
+ self.len()
+ }
+}
+
+#[cfg(u64_digit)]
+impl DoubleEndedIterator for U32Digits<'_> {
+ fn next_back(&mut self) -> Option<Self::Item> {
+ match self.data.split_last() {
+ Some((&last, data)) => {
+ let last_is_lo = self.last_hi_is_zero;
+ self.last_hi_is_zero = !last_is_lo;
+ if last_is_lo {
+ self.data = data;
+ if data.is_empty() && !self.next_is_lo {
+ self.next_is_lo = true;
+ None
+ } else {
+ Some(last as u32)
+ }
+ } else {
+ Some((last >> 32) as u32)
+ }
+ }
+ None => None,
+ }
+ }
+}
+
+#[cfg(u64_digit)]
+impl ExactSizeIterator for U32Digits<'_> {
+ #[inline]
+ fn len(&self) -> usize {
+ self.data.len() * 2 - usize::from(self.last_hi_is_zero) - usize::from(!self.next_is_lo)
+ }
+}
+
+#[cfg(not(u64_digit))]
+impl<'a> U32Digits<'a> {
+ #[inline]
+ pub(super) fn new(data: &'a [u32]) -> Self {
+ Self { it: data.iter() }
+ }
+}
+
+#[cfg(not(u64_digit))]
+impl Iterator for U32Digits<'_> {
+ type Item = u32;
+ #[inline]
+ fn next(&mut self) -> Option<u32> {
+ self.it.next().cloned()
+ }
+
+ #[inline]
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ self.it.size_hint()
+ }
+
+ #[inline]
+ fn nth(&mut self, n: usize) -> Option<u32> {
+ self.it.nth(n).cloned()
+ }
+
+ #[inline]
+ fn last(self) -> Option<u32> {
+ self.it.last().cloned()
+ }
+
+ #[inline]
+ fn count(self) -> usize {
+ self.it.count()
+ }
+}
+
+#[cfg(not(u64_digit))]
+impl DoubleEndedIterator for U32Digits<'_> {
+ fn next_back(&mut self) -> Option<Self::Item> {
+ self.it.next_back().copied()
+ }
+}
+
+#[cfg(not(u64_digit))]
+impl ExactSizeIterator for U32Digits<'_> {
+ #[inline]
+ fn len(&self) -> usize {
+ self.it.len()
+ }
+}
+
+impl FusedIterator for U32Digits<'_> {}
+
+/// An iterator of `u64` digits representation of a `BigUint` or `BigInt`,
+/// ordered least significant digit first.
+pub struct U64Digits<'a> {
+ #[cfg(not(u64_digit))]
+ it: core::slice::Chunks<'a, u32>,
+
+ #[cfg(u64_digit)]
+ it: core::slice::Iter<'a, u64>,
+}
+
+#[cfg(not(u64_digit))]
+impl<'a> U64Digits<'a> {
+ #[inline]
+ pub(super) fn new(data: &'a [u32]) -> Self {
+ U64Digits { it: data.chunks(2) }
+ }
+}
+
+#[cfg(not(u64_digit))]
+impl Iterator for U64Digits<'_> {
+ type Item = u64;
+ #[inline]
+ fn next(&mut self) -> Option<u64> {
+ self.it.next().map(u32_chunk_to_u64)
+ }
+
+ #[inline]
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ let len = self.len();
+ (len, Some(len))
+ }
+
+ #[inline]
+ fn last(self) -> Option<u64> {
+ self.it.last().map(u32_chunk_to_u64)
+ }
+
+ #[inline]
+ fn count(self) -> usize {
+ self.len()
+ }
+}
+
+#[cfg(not(u64_digit))]
+impl DoubleEndedIterator for U64Digits<'_> {
+ fn next_back(&mut self) -> Option<Self::Item> {
+ self.it.next_back().map(u32_chunk_to_u64)
+ }
+}
+
+#[cfg(u64_digit)]
+impl<'a> U64Digits<'a> {
+ #[inline]
+ pub(super) fn new(data: &'a [u64]) -> Self {
+ Self { it: data.iter() }
+ }
+}
+
+#[cfg(u64_digit)]
+impl Iterator for U64Digits<'_> {
+ type Item = u64;
+ #[inline]
+ fn next(&mut self) -> Option<u64> {
+ self.it.next().cloned()
+ }
+
+ #[inline]
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ self.it.size_hint()
+ }
+
+ #[inline]
+ fn nth(&mut self, n: usize) -> Option<u64> {
+ self.it.nth(n).cloned()
+ }
+
+ #[inline]
+ fn last(self) -> Option<u64> {
+ self.it.last().cloned()
+ }
+
+ #[inline]
+ fn count(self) -> usize {
+ self.it.count()
+ }
+}
+
+#[cfg(u64_digit)]
+impl DoubleEndedIterator for U64Digits<'_> {
+ fn next_back(&mut self) -> Option<Self::Item> {
+ self.it.next_back().cloned()
+ }
+}
+
+impl ExactSizeIterator for U64Digits<'_> {
+ #[inline]
+ fn len(&self) -> usize {
+ self.it.len()
+ }
+}
+
+impl FusedIterator for U64Digits<'_> {}
+
+#[test]
+fn test_iter_u32_digits() {
+ let n = super::BigUint::from(5u8);
+ let mut it = n.iter_u32_digits();
+ assert_eq!(it.len(), 1);
+ assert_eq!(it.next(), Some(5));
+ assert_eq!(it.len(), 0);
+ assert_eq!(it.next(), None);
+ assert_eq!(it.len(), 0);
+ assert_eq!(it.next(), None);
+
+ let n = super::BigUint::from(112500000000u64);
+ let mut it = n.iter_u32_digits();
+ assert_eq!(it.len(), 2);
+ assert_eq!(it.next(), Some(830850304));
+ assert_eq!(it.len(), 1);
+ assert_eq!(it.next(), Some(26));
+ assert_eq!(it.len(), 0);
+ assert_eq!(it.next(), None);
+}
+
+#[test]
+fn test_iter_u64_digits() {
+ let n = super::BigUint::from(5u8);
+ let mut it = n.iter_u64_digits();
+ assert_eq!(it.len(), 1);
+ assert_eq!(it.next(), Some(5));
+ assert_eq!(it.len(), 0);
+ assert_eq!(it.next(), None);
+ assert_eq!(it.len(), 0);
+ assert_eq!(it.next(), None);
+
+ let n = super::BigUint::from(18_446_744_073_709_551_616u128);
+ let mut it = n.iter_u64_digits();
+ assert_eq!(it.len(), 2);
+ assert_eq!(it.next(), Some(0));
+ assert_eq!(it.len(), 1);
+ assert_eq!(it.next(), Some(1));
+ assert_eq!(it.len(), 0);
+ assert_eq!(it.next(), None);
+}
+
+#[test]
+fn test_iter_u32_digits_be() {
+ let n = super::BigUint::from(5u8);
+ let mut it = n.iter_u32_digits();
+ assert_eq!(it.len(), 1);
+ assert_eq!(it.next(), Some(5));
+ assert_eq!(it.len(), 0);
+ assert_eq!(it.next(), None);
+ assert_eq!(it.len(), 0);
+ assert_eq!(it.next(), None);
+
+ let n = super::BigUint::from(112500000000u64);
+ let mut it = n.iter_u32_digits();
+ assert_eq!(it.len(), 2);
+ assert_eq!(it.next(), Some(830850304));
+ assert_eq!(it.len(), 1);
+ assert_eq!(it.next(), Some(26));
+ assert_eq!(it.len(), 0);
+ assert_eq!(it.next(), None);
+}
+
+#[test]
+fn test_iter_u64_digits_be() {
+ let n = super::BigUint::from(5u8);
+ let mut it = n.iter_u64_digits();
+ assert_eq!(it.len(), 1);
+ assert_eq!(it.next_back(), Some(5));
+ assert_eq!(it.len(), 0);
+ assert_eq!(it.next(), None);
+ assert_eq!(it.len(), 0);
+ assert_eq!(it.next(), None);
+
+ let n = super::BigUint::from(18_446_744_073_709_551_616u128);
+ let mut it = n.iter_u64_digits();
+ assert_eq!(it.len(), 2);
+ assert_eq!(it.next_back(), Some(1));
+ assert_eq!(it.len(), 1);
+ assert_eq!(it.next_back(), Some(0));
+ assert_eq!(it.len(), 0);
+ assert_eq!(it.next(), None);
+}
diff --git a/rust/vendor/num-bigint/src/biguint/monty.rs b/rust/vendor/num-bigint/src/biguint/monty.rs
new file mode 100644
index 0000000..abaca50
--- /dev/null
+++ b/rust/vendor/num-bigint/src/biguint/monty.rs
@@ -0,0 +1,225 @@
+use crate::std_alloc::Vec;
+use core::mem;
+use core::ops::Shl;
+use num_traits::{One, Zero};
+
+use crate::big_digit::{self, BigDigit, DoubleBigDigit, SignedDoubleBigDigit};
+use crate::biguint::BigUint;
+
+struct MontyReducer {
+ n0inv: BigDigit,
+}
+
+// k0 = -m**-1 mod 2**BITS. Algorithm from: Dumas, J.G. "On Newton–Raphson
+// Iteration for Multiplicative Inverses Modulo Prime Powers".
+fn inv_mod_alt(b: BigDigit) -> BigDigit {
+ assert_ne!(b & 1, 0);
+
+ let mut k0 = 2 - b as SignedDoubleBigDigit;
+ let mut t = (b - 1) as SignedDoubleBigDigit;
+ let mut i = 1;
+ while i < big_digit::BITS {
+ t = t.wrapping_mul(t);
+ k0 = k0.wrapping_mul(t + 1);
+
+ i <<= 1;
+ }
+ -k0 as BigDigit
+}
+
+impl MontyReducer {
+ fn new(n: &BigUint) -> Self {
+ let n0inv = inv_mod_alt(n.data[0]);
+ MontyReducer { n0inv }
+ }
+}
+
+/// Computes z mod m = x * y * 2 ** (-n*_W) mod m
+/// assuming k = -1/m mod 2**_W
+/// See Gueron, "Efficient Software Implementations of Modular Exponentiation".
+/// <https://eprint.iacr.org/2011/239.pdf>
+/// In the terminology of that paper, this is an "Almost Montgomery Multiplication":
+/// x and y are required to satisfy 0 <= z < 2**(n*_W) and then the result
+/// z is guaranteed to satisfy 0 <= z < 2**(n*_W), but it may not be < m.
+#[allow(clippy::many_single_char_names)]
+fn montgomery(x: &BigUint, y: &BigUint, m: &BigUint, k: BigDigit, n: usize) -> BigUint {
+ // This code assumes x, y, m are all the same length, n.
+ // (required by addMulVVW and the for loop).
+ // It also assumes that x, y are already reduced mod m,
+ // or else the result will not be properly reduced.
+ assert!(
+ x.data.len() == n && y.data.len() == n && m.data.len() == n,
+ "{:?} {:?} {:?} {}",
+ x,
+ y,
+ m,
+ n
+ );
+
+ let mut z = BigUint::zero();
+ z.data.resize(n * 2, 0);
+
+ let mut c: BigDigit = 0;
+ for i in 0..n {
+ let c2 = add_mul_vvw(&mut z.data[i..n + i], &x.data, y.data[i]);
+ let t = z.data[i].wrapping_mul(k);
+ let c3 = add_mul_vvw(&mut z.data[i..n + i], &m.data, t);
+ let cx = c.wrapping_add(c2);
+ let cy = cx.wrapping_add(c3);
+ z.data[n + i] = cy;
+ if cx < c2 || cy < c3 {
+ c = 1;
+ } else {
+ c = 0;
+ }
+ }
+
+ if c == 0 {
+ z.data = z.data[n..].to_vec();
+ } else {
+ {
+ let (first, second) = z.data.split_at_mut(n);
+ sub_vv(first, second, &m.data);
+ }
+ z.data = z.data[..n].to_vec();
+ }
+
+ z
+}
+
+#[inline(always)]
+fn add_mul_vvw(z: &mut [BigDigit], x: &[BigDigit], y: BigDigit) -> BigDigit {
+ let mut c = 0;
+ for (zi, xi) in z.iter_mut().zip(x.iter()) {
+ let (z1, z0) = mul_add_www(*xi, y, *zi);
+ let (c_, zi_) = add_ww(z0, c, 0);
+ *zi = zi_;
+ c = c_ + z1;
+ }
+
+ c
+}
+
+/// The resulting carry c is either 0 or 1.
+#[inline(always)]
+fn sub_vv(z: &mut [BigDigit], x: &[BigDigit], y: &[BigDigit]) -> BigDigit {
+ let mut c = 0;
+ for (i, (xi, yi)) in x.iter().zip(y.iter()).enumerate().take(z.len()) {
+ let zi = xi.wrapping_sub(*yi).wrapping_sub(c);
+ z[i] = zi;
+ // see "Hacker's Delight", section 2-12 (overflow detection)
+ c = ((yi & !xi) | ((yi | !xi) & zi)) >> (big_digit::BITS - 1)
+ }
+
+ c
+}
+
+/// z1<<_W + z0 = x+y+c, with c == 0 or 1
+#[inline(always)]
+fn add_ww(x: BigDigit, y: BigDigit, c: BigDigit) -> (BigDigit, BigDigit) {
+ let yc = y.wrapping_add(c);
+ let z0 = x.wrapping_add(yc);
+ let z1 = if z0 < x || yc < y { 1 } else { 0 };
+
+ (z1, z0)
+}
+
+/// z1 << _W + z0 = x * y + c
+#[inline(always)]
+fn mul_add_www(x: BigDigit, y: BigDigit, c: BigDigit) -> (BigDigit, BigDigit) {
+ let z = x as DoubleBigDigit * y as DoubleBigDigit + c as DoubleBigDigit;
+ ((z >> big_digit::BITS) as BigDigit, z as BigDigit)
+}
+
+/// Calculates x ** y mod m using a fixed, 4-bit window.
+#[allow(clippy::many_single_char_names)]
+pub(super) fn monty_modpow(x: &BigUint, y: &BigUint, m: &BigUint) -> BigUint {
+ assert!(m.data[0] & 1 == 1);
+ let mr = MontyReducer::new(m);
+ let num_words = m.data.len();
+
+ let mut x = x.clone();
+
+ // We want the lengths of x and m to be equal.
+ // It is OK if x >= m as long as len(x) == len(m).
+ if x.data.len() > num_words {
+ x %= m;
+ // Note: now len(x) <= numWords, not guaranteed ==.
+ }
+ if x.data.len() < num_words {
+ x.data.resize(num_words, 0);
+ }
+
+ // rr = 2**(2*_W*len(m)) mod m
+ let mut rr = BigUint::one();
+ rr = (rr.shl(2 * num_words as u64 * u64::from(big_digit::BITS))) % m;
+ if rr.data.len() < num_words {
+ rr.data.resize(num_words, 0);
+ }
+ // one = 1, with equal length to that of m
+ let mut one = BigUint::one();
+ one.data.resize(num_words, 0);
+
+ let n = 4;
+ // powers[i] contains x^i
+ let mut powers = Vec::with_capacity(1 << n);
+ powers.push(montgomery(&one, &rr, m, mr.n0inv, num_words));
+ powers.push(montgomery(&x, &rr, m, mr.n0inv, num_words));
+ for i in 2..1 << n {
+ let r = montgomery(&powers[i - 1], &powers[1], m, mr.n0inv, num_words);
+ powers.push(r);
+ }
+
+ // initialize z = 1 (Montgomery 1)
+ let mut z = powers[0].clone();
+ z.data.resize(num_words, 0);
+ let mut zz = BigUint::zero();
+ zz.data.resize(num_words, 0);
+
+ // same windowed exponent, but with Montgomery multiplications
+ for i in (0..y.data.len()).rev() {
+ let mut yi = y.data[i];
+ let mut j = 0;
+ while j < big_digit::BITS {
+ if i != y.data.len() - 1 || j != 0 {
+ zz = montgomery(&z, &z, m, mr.n0inv, num_words);
+ z = montgomery(&zz, &zz, m, mr.n0inv, num_words);
+ zz = montgomery(&z, &z, m, mr.n0inv, num_words);
+ z = montgomery(&zz, &zz, m, mr.n0inv, num_words);
+ }
+ zz = montgomery(
+ &z,
+ &powers[(yi >> (big_digit::BITS - n)) as usize],
+ m,
+ mr.n0inv,
+ num_words,
+ );
+ mem::swap(&mut z, &mut zz);
+ yi <<= n;
+ j += n;
+ }
+ }
+
+ // convert to regular number
+ zz = montgomery(&z, &one, m, mr.n0inv, num_words);
+
+ zz.normalize();
+ // One last reduction, just in case.
+ // See golang.org/issue/13907.
+ if zz >= *m {
+ // Common case is m has high bit set; in that case,
+ // since zz is the same length as m, there can be just
+ // one multiple of m to remove. Just subtract.
+ // We think that the subtract should be sufficient in general,
+ // so do that unconditionally, but double-check,
+ // in case our beliefs are wrong.
+ // The div is not expected to be reached.
+ zz -= m;
+ if zz >= *m {
+ zz %= m;
+ }
+ }
+
+ zz.normalize();
+ zz
+}
diff --git a/rust/vendor/num-bigint/src/biguint/multiplication.rs b/rust/vendor/num-bigint/src/biguint/multiplication.rs
new file mode 100644
index 0000000..4d7f1f2
--- /dev/null
+++ b/rust/vendor/num-bigint/src/biguint/multiplication.rs
@@ -0,0 +1,568 @@
+use super::addition::{__add2, add2};
+use super::subtraction::sub2;
+#[cfg(not(u64_digit))]
+use super::u32_from_u128;
+use super::{biguint_from_vec, cmp_slice, BigUint, IntDigits};
+
+use crate::big_digit::{self, BigDigit, DoubleBigDigit};
+use crate::Sign::{self, Minus, NoSign, Plus};
+use crate::{BigInt, UsizePromotion};
+
+use core::cmp::Ordering;
+use core::iter::Product;
+use core::ops::{Mul, MulAssign};
+use num_traits::{CheckedMul, FromPrimitive, One, Zero};
+
+#[inline]
+pub(super) fn mac_with_carry(
+ a: BigDigit,
+ b: BigDigit,
+ c: BigDigit,
+ acc: &mut DoubleBigDigit,
+) -> BigDigit {
+ *acc += DoubleBigDigit::from(a);
+ *acc += DoubleBigDigit::from(b) * DoubleBigDigit::from(c);
+ let lo = *acc as BigDigit;
+ *acc >>= big_digit::BITS;
+ lo
+}
+
+#[inline]
+fn mul_with_carry(a: BigDigit, b: BigDigit, acc: &mut DoubleBigDigit) -> BigDigit {
+ *acc += DoubleBigDigit::from(a) * DoubleBigDigit::from(b);
+ let lo = *acc as BigDigit;
+ *acc >>= big_digit::BITS;
+ lo
+}
+
+/// Three argument multiply accumulate:
+/// acc += b * c
+fn mac_digit(acc: &mut [BigDigit], b: &[BigDigit], c: BigDigit) {
+ if c == 0 {
+ return;
+ }
+
+ let mut carry = 0;
+ let (a_lo, a_hi) = acc.split_at_mut(b.len());
+
+ for (a, &b) in a_lo.iter_mut().zip(b) {
+ *a = mac_with_carry(*a, b, c, &mut carry);
+ }
+
+ let (carry_hi, carry_lo) = big_digit::from_doublebigdigit(carry);
+
+ let final_carry = if carry_hi == 0 {
+ __add2(a_hi, &[carry_lo])
+ } else {
+ __add2(a_hi, &[carry_hi, carry_lo])
+ };
+ assert_eq!(final_carry, 0, "carry overflow during multiplication!");
+}
+
+fn bigint_from_slice(slice: &[BigDigit]) -> BigInt {
+ BigInt::from(biguint_from_vec(slice.to_vec()))
+}
+
+/// Three argument multiply accumulate:
+/// acc += b * c
+#[allow(clippy::many_single_char_names)]
+fn mac3(mut acc: &mut [BigDigit], mut b: &[BigDigit], mut c: &[BigDigit]) {
+ // Least-significant zeros have no effect on the output.
+ if let Some(&0) = b.first() {
+ if let Some(nz) = b.iter().position(|&d| d != 0) {
+ b = &b[nz..];
+ acc = &mut acc[nz..];
+ } else {
+ return;
+ }
+ }
+ if let Some(&0) = c.first() {
+ if let Some(nz) = c.iter().position(|&d| d != 0) {
+ c = &c[nz..];
+ acc = &mut acc[nz..];
+ } else {
+ return;
+ }
+ }
+
+ let acc = acc;
+ let (x, y) = if b.len() < c.len() { (b, c) } else { (c, b) };
+
+ // We use three algorithms for different input sizes.
+ //
+ // - For small inputs, long multiplication is fastest.
+ // - Next we use Karatsuba multiplication (Toom-2), which we have optimized
+ // to avoid unnecessary allocations for intermediate values.
+ // - For the largest inputs we use Toom-3, which better optimizes the
+ // number of operations, but uses more temporary allocations.
+ //
+ // The thresholds are somewhat arbitrary, chosen by evaluating the results
+ // of `cargo bench --bench bigint multiply`.
+
+ if x.len() <= 32 {
+ // Long multiplication:
+ for (i, xi) in x.iter().enumerate() {
+ mac_digit(&mut acc[i..], y, *xi);
+ }
+ } else if x.len() <= 256 {
+ // Karatsuba multiplication:
+ //
+ // The idea is that we break x and y up into two smaller numbers that each have about half
+ // as many digits, like so (note that multiplying by b is just a shift):
+ //
+ // x = x0 + x1 * b
+ // y = y0 + y1 * b
+ //
+ // With some algebra, we can compute x * y with three smaller products, where the inputs to
+ // each of the smaller products have only about half as many digits as x and y:
+ //
+ // x * y = (x0 + x1 * b) * (y0 + y1 * b)
+ //
+ // x * y = x0 * y0
+ // + x0 * y1 * b
+ // + x1 * y0 * b
+ // + x1 * y1 * b^2
+ //
+ // Let p0 = x0 * y0 and p2 = x1 * y1:
+ //
+ // x * y = p0
+ // + (x0 * y1 + x1 * y0) * b
+ // + p2 * b^2
+ //
+ // The real trick is that middle term:
+ //
+ // x0 * y1 + x1 * y0
+ //
+ // = x0 * y1 + x1 * y0 - p0 + p0 - p2 + p2
+ //
+ // = x0 * y1 + x1 * y0 - x0 * y0 - x1 * y1 + p0 + p2
+ //
+ // Now we complete the square:
+ //
+ // = -(x0 * y0 - x0 * y1 - x1 * y0 + x1 * y1) + p0 + p2
+ //
+ // = -((x1 - x0) * (y1 - y0)) + p0 + p2
+ //
+ // Let p1 = (x1 - x0) * (y1 - y0), and substitute back into our original formula:
+ //
+ // x * y = p0
+ // + (p0 + p2 - p1) * b
+ // + p2 * b^2
+ //
+ // Where the three intermediate products are:
+ //
+ // p0 = x0 * y0
+ // p1 = (x1 - x0) * (y1 - y0)
+ // p2 = x1 * y1
+ //
+ // In doing the computation, we take great care to avoid unnecessary temporary variables
+ // (since creating a BigUint requires a heap allocation): thus, we rearrange the formula a
+ // bit so we can use the same temporary variable for all the intermediate products:
+ //
+ // x * y = p2 * b^2 + p2 * b
+ // + p0 * b + p0
+ // - p1 * b
+ //
+ // The other trick we use is instead of doing explicit shifts, we slice acc at the
+ // appropriate offset when doing the add.
+
+ // When x is smaller than y, it's significantly faster to pick b such that x is split in
+ // half, not y:
+ let b = x.len() / 2;
+ let (x0, x1) = x.split_at(b);
+ let (y0, y1) = y.split_at(b);
+
+ // We reuse the same BigUint for all the intermediate multiplies and have to size p
+ // appropriately here: x1.len() >= x0.len and y1.len() >= y0.len():
+ let len = x1.len() + y1.len() + 1;
+ let mut p = BigUint { data: vec![0; len] };
+
+ // p2 = x1 * y1
+ mac3(&mut p.data, x1, y1);
+
+ // Not required, but the adds go faster if we drop any unneeded 0s from the end:
+ p.normalize();
+
+ add2(&mut acc[b..], &p.data);
+ add2(&mut acc[b * 2..], &p.data);
+
+ // Zero out p before the next multiply:
+ p.data.truncate(0);
+ p.data.resize(len, 0);
+
+ // p0 = x0 * y0
+ mac3(&mut p.data, x0, y0);
+ p.normalize();
+
+ add2(acc, &p.data);
+ add2(&mut acc[b..], &p.data);
+
+ // p1 = (x1 - x0) * (y1 - y0)
+ // We do this one last, since it may be negative and acc can't ever be negative:
+ let (j0_sign, j0) = sub_sign(x1, x0);
+ let (j1_sign, j1) = sub_sign(y1, y0);
+
+ match j0_sign * j1_sign {
+ Plus => {
+ p.data.truncate(0);
+ p.data.resize(len, 0);
+
+ mac3(&mut p.data, &j0.data, &j1.data);
+ p.normalize();
+
+ sub2(&mut acc[b..], &p.data);
+ }
+ Minus => {
+ mac3(&mut acc[b..], &j0.data, &j1.data);
+ }
+ NoSign => (),
+ }
+ } else {
+ // Toom-3 multiplication:
+ //
+ // Toom-3 is like Karatsuba above, but dividing the inputs into three parts.
+ // Both are instances of Toom-Cook, using `k=3` and `k=2` respectively.
+ //
+ // The general idea is to treat the large integers digits as
+ // polynomials of a certain degree and determine the coefficients/digits
+ // of the product of the two via interpolation of the polynomial product.
+ let i = y.len() / 3 + 1;
+
+ let x0_len = Ord::min(x.len(), i);
+ let x1_len = Ord::min(x.len() - x0_len, i);
+
+ let y0_len = i;
+ let y1_len = Ord::min(y.len() - y0_len, i);
+
+ // Break x and y into three parts, representating an order two polynomial.
+ // t is chosen to be the size of a digit so we can use faster shifts
+ // in place of multiplications.
+ //
+ // x(t) = x2*t^2 + x1*t + x0
+ let x0 = bigint_from_slice(&x[..x0_len]);
+ let x1 = bigint_from_slice(&x[x0_len..x0_len + x1_len]);
+ let x2 = bigint_from_slice(&x[x0_len + x1_len..]);
+
+ // y(t) = y2*t^2 + y1*t + y0
+ let y0 = bigint_from_slice(&y[..y0_len]);
+ let y1 = bigint_from_slice(&y[y0_len..y0_len + y1_len]);
+ let y2 = bigint_from_slice(&y[y0_len + y1_len..]);
+
+ // Let w(t) = x(t) * y(t)
+ //
+ // This gives us the following order-4 polynomial.
+ //
+ // w(t) = w4*t^4 + w3*t^3 + w2*t^2 + w1*t + w0
+ //
+ // We need to find the coefficients w4, w3, w2, w1 and w0. Instead
+ // of simply multiplying the x and y in total, we can evaluate w
+ // at 5 points. An n-degree polynomial is uniquely identified by (n + 1)
+ // points.
+ //
+ // It is arbitrary as to what points we evaluate w at but we use the
+ // following.
+ //
+ // w(t) at t = 0, 1, -1, -2 and inf
+ //
+ // The values for w(t) in terms of x(t)*y(t) at these points are:
+ //
+ // let a = w(0) = x0 * y0
+ // let b = w(1) = (x2 + x1 + x0) * (y2 + y1 + y0)
+ // let c = w(-1) = (x2 - x1 + x0) * (y2 - y1 + y0)
+ // let d = w(-2) = (4*x2 - 2*x1 + x0) * (4*y2 - 2*y1 + y0)
+ // let e = w(inf) = x2 * y2 as t -> inf
+
+ // x0 + x2, avoiding temporaries
+ let p = &x0 + &x2;
+
+ // y0 + y2, avoiding temporaries
+ let q = &y0 + &y2;
+
+ // x2 - x1 + x0, avoiding temporaries
+ let p2 = &p - &x1;
+
+ // y2 - y1 + y0, avoiding temporaries
+ let q2 = &q - &y1;
+
+ // w(0)
+ let r0 = &x0 * &y0;
+
+ // w(inf)
+ let r4 = &x2 * &y2;
+
+ // w(1)
+ let r1 = (p + x1) * (q + y1);
+
+ // w(-1)
+ let r2 = &p2 * &q2;
+
+ // w(-2)
+ let r3 = ((p2 + x2) * 2 - x0) * ((q2 + y2) * 2 - y0);
+
+ // Evaluating these points gives us the following system of linear equations.
+ //
+ // 0 0 0 0 1 | a
+ // 1 1 1 1 1 | b
+ // 1 -1 1 -1 1 | c
+ // 16 -8 4 -2 1 | d
+ // 1 0 0 0 0 | e
+ //
+ // The solved equation (after gaussian elimination or similar)
+ // in terms of its coefficients:
+ //
+ // w0 = w(0)
+ // w1 = w(0)/2 + w(1)/3 - w(-1) + w(2)/6 - 2*w(inf)
+ // w2 = -w(0) + w(1)/2 + w(-1)/2 - w(inf)
+ // w3 = -w(0)/2 + w(1)/6 + w(-1)/2 - w(1)/6
+ // w4 = w(inf)
+ //
+ // This particular sequence is given by Bodrato and is an interpolation
+ // of the above equations.
+ let mut comp3: BigInt = (r3 - &r1) / 3u32;
+ let mut comp1: BigInt = (r1 - &r2) >> 1;
+ let mut comp2: BigInt = r2 - &r0;
+ comp3 = ((&comp2 - comp3) >> 1) + (&r4 << 1);
+ comp2 += &comp1 - &r4;
+ comp1 -= &comp3;
+
+ // Recomposition. The coefficients of the polynomial are now known.
+ //
+ // Evaluate at w(t) where t is our given base to get the result.
+ //
+ // let bits = u64::from(big_digit::BITS) * i as u64;
+ // let result = r0
+ // + (comp1 << bits)
+ // + (comp2 << (2 * bits))
+ // + (comp3 << (3 * bits))
+ // + (r4 << (4 * bits));
+ // let result_pos = result.to_biguint().unwrap();
+ // add2(&mut acc[..], &result_pos.data);
+ //
+ // But with less intermediate copying:
+ for (j, result) in [&r0, &comp1, &comp2, &comp3, &r4].iter().enumerate().rev() {
+ match result.sign() {
+ Plus => add2(&mut acc[i * j..], result.digits()),
+ Minus => sub2(&mut acc[i * j..], result.digits()),
+ NoSign => {}
+ }
+ }
+ }
+}
+
+fn mul3(x: &[BigDigit], y: &[BigDigit]) -> BigUint {
+ let len = x.len() + y.len() + 1;
+ let mut prod = BigUint { data: vec![0; len] };
+
+ mac3(&mut prod.data, x, y);
+ prod.normalized()
+}
+
+fn scalar_mul(a: &mut BigUint, b: BigDigit) {
+ match b {
+ 0 => a.set_zero(),
+ 1 => {}
+ _ => {
+ if b.is_power_of_two() {
+ *a <<= b.trailing_zeros();
+ } else {
+ let mut carry = 0;
+ for a in a.data.iter_mut() {
+ *a = mul_with_carry(*a, b, &mut carry);
+ }
+ if carry != 0 {
+ a.data.push(carry as BigDigit);
+ }
+ }
+ }
+ }
+}
+
+fn sub_sign(mut a: &[BigDigit], mut b: &[BigDigit]) -> (Sign, BigUint) {
+ // Normalize:
+ if let Some(&0) = a.last() {
+ a = &a[..a.iter().rposition(|&x| x != 0).map_or(0, |i| i + 1)];
+ }
+ if let Some(&0) = b.last() {
+ b = &b[..b.iter().rposition(|&x| x != 0).map_or(0, |i| i + 1)];
+ }
+
+ match cmp_slice(a, b) {
+ Ordering::Greater => {
+ let mut a = a.to_vec();
+ sub2(&mut a, b);
+ (Plus, biguint_from_vec(a))
+ }
+ Ordering::Less => {
+ let mut b = b.to_vec();
+ sub2(&mut b, a);
+ (Minus, biguint_from_vec(b))
+ }
+ Ordering::Equal => (NoSign, Zero::zero()),
+ }
+}
+
+macro_rules! impl_mul {
+ ($(impl Mul<$Other:ty> for $Self:ty;)*) => {$(
+ impl Mul<$Other> for $Self {
+ type Output = BigUint;
+
+ #[inline]
+ fn mul(self, other: $Other) -> BigUint {
+ match (&*self.data, &*other.data) {
+ // multiply by zero
+ (&[], _) | (_, &[]) => BigUint::zero(),
+ // multiply by a scalar
+ (_, &[digit]) => self * digit,
+ (&[digit], _) => other * digit,
+ // full multiplication
+ (x, y) => mul3(x, y),
+ }
+ }
+ }
+ )*}
+}
+impl_mul! {
+ impl Mul<BigUint> for BigUint;
+ impl Mul<BigUint> for &BigUint;
+ impl Mul<&BigUint> for BigUint;
+ impl Mul<&BigUint> for &BigUint;
+}
+
+macro_rules! impl_mul_assign {
+ ($(impl MulAssign<$Other:ty> for BigUint;)*) => {$(
+ impl MulAssign<$Other> for BigUint {
+ #[inline]
+ fn mul_assign(&mut self, other: $Other) {
+ match (&*self.data, &*other.data) {
+ // multiply by zero
+ (&[], _) => {},
+ (_, &[]) => self.set_zero(),
+ // multiply by a scalar
+ (_, &[digit]) => *self *= digit,
+ (&[digit], _) => *self = other * digit,
+ // full multiplication
+ (x, y) => *self = mul3(x, y),
+ }
+ }
+ }
+ )*}
+}
+impl_mul_assign! {
+ impl MulAssign<BigUint> for BigUint;
+ impl MulAssign<&BigUint> for BigUint;
+}
+
+promote_unsigned_scalars!(impl Mul for BigUint, mul);
+promote_unsigned_scalars_assign!(impl MulAssign for BigUint, mul_assign);
+forward_all_scalar_binop_to_val_val_commutative!(impl Mul<u32> for BigUint, mul);
+forward_all_scalar_binop_to_val_val_commutative!(impl Mul<u64> for BigUint, mul);
+forward_all_scalar_binop_to_val_val_commutative!(impl Mul<u128> for BigUint, mul);
+
+impl Mul<u32> for BigUint {
+ type Output = BigUint;
+
+ #[inline]
+ fn mul(mut self, other: u32) -> BigUint {
+ self *= other;
+ self
+ }
+}
+impl MulAssign<u32> for BigUint {
+ #[inline]
+ fn mul_assign(&mut self, other: u32) {
+ scalar_mul(self, other as BigDigit);
+ }
+}
+
+impl Mul<u64> for BigUint {
+ type Output = BigUint;
+
+ #[inline]
+ fn mul(mut self, other: u64) -> BigUint {
+ self *= other;
+ self
+ }
+}
+impl MulAssign<u64> for BigUint {
+ #[cfg(not(u64_digit))]
+ #[inline]
+ fn mul_assign(&mut self, other: u64) {
+ if let Some(other) = BigDigit::from_u64(other) {
+ scalar_mul(self, other);
+ } else {
+ let (hi, lo) = big_digit::from_doublebigdigit(other);
+ *self = mul3(&self.data, &[lo, hi]);
+ }
+ }
+
+ #[cfg(u64_digit)]
+ #[inline]
+ fn mul_assign(&mut self, other: u64) {
+ scalar_mul(self, other);
+ }
+}
+
+impl Mul<u128> for BigUint {
+ type Output = BigUint;
+
+ #[inline]
+ fn mul(mut self, other: u128) -> BigUint {
+ self *= other;
+ self
+ }
+}
+
+impl MulAssign<u128> for BigUint {
+ #[cfg(not(u64_digit))]
+ #[inline]
+ fn mul_assign(&mut self, other: u128) {
+ if let Some(other) = BigDigit::from_u128(other) {
+ scalar_mul(self, other);
+ } else {
+ *self = match u32_from_u128(other) {
+ (0, 0, c, d) => mul3(&self.data, &[d, c]),
+ (0, b, c, d) => mul3(&self.data, &[d, c, b]),
+ (a, b, c, d) => mul3(&self.data, &[d, c, b, a]),
+ };
+ }
+ }
+
+ #[cfg(u64_digit)]
+ #[inline]
+ fn mul_assign(&mut self, other: u128) {
+ if let Some(other) = BigDigit::from_u128(other) {
+ scalar_mul(self, other);
+ } else {
+ let (hi, lo) = big_digit::from_doublebigdigit(other);
+ *self = mul3(&self.data, &[lo, hi]);
+ }
+ }
+}
+
+impl CheckedMul for BigUint {
+ #[inline]
+ fn checked_mul(&self, v: &BigUint) -> Option<BigUint> {
+ Some(self.mul(v))
+ }
+}
+
+impl_product_iter_type!(BigUint);
+
+#[test]
+fn test_sub_sign() {
+ use crate::BigInt;
+ use num_traits::Num;
+
+ fn sub_sign_i(a: &[BigDigit], b: &[BigDigit]) -> BigInt {
+ let (sign, val) = sub_sign(a, b);
+ BigInt::from_biguint(sign, val)
+ }
+
+ let a = BigUint::from_str_radix("265252859812191058636308480000000", 10).unwrap();
+ let b = BigUint::from_str_radix("26525285981219105863630848000000", 10).unwrap();
+ let a_i = BigInt::from(a.clone());
+ let b_i = BigInt::from(b.clone());
+
+ assert_eq!(sub_sign_i(&a.data, &b.data), &a_i - &b_i);
+ assert_eq!(sub_sign_i(&b.data, &a.data), &b_i - &a_i);
+}
diff --git a/rust/vendor/num-bigint/src/biguint/power.rs b/rust/vendor/num-bigint/src/biguint/power.rs
new file mode 100644
index 0000000..621e1b1
--- /dev/null
+++ b/rust/vendor/num-bigint/src/biguint/power.rs
@@ -0,0 +1,258 @@
+use super::monty::monty_modpow;
+use super::BigUint;
+
+use crate::big_digit::{self, BigDigit};
+
+use num_integer::Integer;
+use num_traits::{One, Pow, ToPrimitive, Zero};
+
+impl Pow<&BigUint> for BigUint {
+ type Output = BigUint;
+
+ #[inline]
+ fn pow(self, exp: &BigUint) -> BigUint {
+ if self.is_one() || exp.is_zero() {
+ BigUint::one()
+ } else if self.is_zero() {
+ BigUint::zero()
+ } else if let Some(exp) = exp.to_u64() {
+ self.pow(exp)
+ } else if let Some(exp) = exp.to_u128() {
+ self.pow(exp)
+ } else {
+ // At this point, `self >= 2` and `exp >= 2¹²⁸`. The smallest possible result given
+ // `2.pow(2¹²⁸)` would require far more memory than 64-bit targets can address!
+ panic!("memory overflow")
+ }
+ }
+}
+
+impl Pow<BigUint> for BigUint {
+ type Output = BigUint;
+
+ #[inline]
+ fn pow(self, exp: BigUint) -> BigUint {
+ Pow::pow(self, &exp)
+ }
+}
+
+impl Pow<&BigUint> for &BigUint {
+ type Output = BigUint;
+
+ #[inline]
+ fn pow(self, exp: &BigUint) -> BigUint {
+ if self.is_one() || exp.is_zero() {
+ BigUint::one()
+ } else if self.is_zero() {
+ BigUint::zero()
+ } else {
+ self.clone().pow(exp)
+ }
+ }
+}
+
+impl Pow<BigUint> for &BigUint {
+ type Output = BigUint;
+
+ #[inline]
+ fn pow(self, exp: BigUint) -> BigUint {
+ Pow::pow(self, &exp)
+ }
+}
+
+macro_rules! pow_impl {
+ ($T:ty) => {
+ impl Pow<$T> for BigUint {
+ type Output = BigUint;
+
+ fn pow(self, mut exp: $T) -> BigUint {
+ if exp == 0 {
+ return BigUint::one();
+ }
+ let mut base = self;
+
+ while exp & 1 == 0 {
+ base = &base * &base;
+ exp >>= 1;
+ }
+
+ if exp == 1 {
+ return base;
+ }
+
+ let mut acc = base.clone();
+ while exp > 1 {
+ exp >>= 1;
+ base = &base * &base;
+ if exp & 1 == 1 {
+ acc *= &base;
+ }
+ }
+ acc
+ }
+ }
+
+ impl Pow<&$T> for BigUint {
+ type Output = BigUint;
+
+ #[inline]
+ fn pow(self, exp: &$T) -> BigUint {
+ Pow::pow(self, *exp)
+ }
+ }
+
+ impl Pow<$T> for &BigUint {
+ type Output = BigUint;
+
+ #[inline]
+ fn pow(self, exp: $T) -> BigUint {
+ if exp == 0 {
+ return BigUint::one();
+ }
+ Pow::pow(self.clone(), exp)
+ }
+ }
+
+ impl Pow<&$T> for &BigUint {
+ type Output = BigUint;
+
+ #[inline]
+ fn pow(self, exp: &$T) -> BigUint {
+ Pow::pow(self, *exp)
+ }
+ }
+ };
+}
+
+pow_impl!(u8);
+pow_impl!(u16);
+pow_impl!(u32);
+pow_impl!(u64);
+pow_impl!(usize);
+pow_impl!(u128);
+
+pub(super) fn modpow(x: &BigUint, exponent: &BigUint, modulus: &BigUint) -> BigUint {
+ assert!(
+ !modulus.is_zero(),
+ "attempt to calculate with zero modulus!"
+ );
+
+ if modulus.is_odd() {
+ // For an odd modulus, we can use Montgomery multiplication in base 2^32.
+ monty_modpow(x, exponent, modulus)
+ } else {
+ // Otherwise do basically the same as `num::pow`, but with a modulus.
+ plain_modpow(x, &exponent.data, modulus)
+ }
+}
+
+fn plain_modpow(base: &BigUint, exp_data: &[BigDigit], modulus: &BigUint) -> BigUint {
+ assert!(
+ !modulus.is_zero(),
+ "attempt to calculate with zero modulus!"
+ );
+
+ let i = match exp_data.iter().position(|&r| r != 0) {
+ None => return BigUint::one(),
+ Some(i) => i,
+ };
+
+ let mut base = base % modulus;
+ for _ in 0..i {
+ for _ in 0..big_digit::BITS {
+ base = &base * &base % modulus;
+ }
+ }
+
+ let mut r = exp_data[i];
+ let mut b = 0u8;
+ while r.is_even() {
+ base = &base * &base % modulus;
+ r >>= 1;
+ b += 1;
+ }
+
+ let mut exp_iter = exp_data[i + 1..].iter();
+ if exp_iter.len() == 0 && r.is_one() {
+ return base;
+ }
+
+ let mut acc = base.clone();
+ r >>= 1;
+ b += 1;
+
+ {
+ let mut unit = |exp_is_odd| {
+ base = &base * &base % modulus;
+ if exp_is_odd {
+ acc *= &base;
+ acc %= modulus;
+ }
+ };
+
+ if let Some(&last) = exp_iter.next_back() {
+ // consume exp_data[i]
+ for _ in b..big_digit::BITS {
+ unit(r.is_odd());
+ r >>= 1;
+ }
+
+ // consume all other digits before the last
+ for &r in exp_iter {
+ let mut r = r;
+ for _ in 0..big_digit::BITS {
+ unit(r.is_odd());
+ r >>= 1;
+ }
+ }
+ r = last;
+ }
+
+ debug_assert_ne!(r, 0);
+ while !r.is_zero() {
+ unit(r.is_odd());
+ r >>= 1;
+ }
+ }
+ acc
+}
+
+#[test]
+fn test_plain_modpow() {
+ let two = &BigUint::from(2u32);
+ let modulus = BigUint::from(0x1100u32);
+
+ let exp = vec![0, 0b1];
+ assert_eq!(
+ two.pow(0b1_00000000_u32) % &modulus,
+ plain_modpow(two, &exp, &modulus)
+ );
+ let exp = vec![0, 0b10];
+ assert_eq!(
+ two.pow(0b10_00000000_u32) % &modulus,
+ plain_modpow(two, &exp, &modulus)
+ );
+ let exp = vec![0, 0b110010];
+ assert_eq!(
+ two.pow(0b110010_00000000_u32) % &modulus,
+ plain_modpow(two, &exp, &modulus)
+ );
+ let exp = vec![0b1, 0b1];
+ assert_eq!(
+ two.pow(0b1_00000001_u32) % &modulus,
+ plain_modpow(two, &exp, &modulus)
+ );
+ let exp = vec![0b1100, 0, 0b1];
+ assert_eq!(
+ two.pow(0b1_00000000_00001100_u32) % &modulus,
+ plain_modpow(two, &exp, &modulus)
+ );
+}
+
+#[test]
+fn test_pow_biguint() {
+ let base = BigUint::from(5u8);
+ let exponent = BigUint::from(3u8);
+
+ assert_eq!(BigUint::from(125u8), base.pow(exponent));
+}
diff --git a/rust/vendor/num-bigint/src/biguint/serde.rs b/rust/vendor/num-bigint/src/biguint/serde.rs
new file mode 100644
index 0000000..3240f09
--- /dev/null
+++ b/rust/vendor/num-bigint/src/biguint/serde.rs
@@ -0,0 +1,119 @@
+use super::{biguint_from_vec, BigUint};
+
+use crate::std_alloc::Vec;
+
+use core::{cmp, fmt, mem};
+use serde::de::{SeqAccess, Visitor};
+use serde::{Deserialize, Deserializer, Serialize, Serializer};
+
+// `cautious` is based on the function of the same name in `serde`, but specialized to `u32`:
+// https://github.com/dtolnay/serde/blob/399ef081ecc36d2f165ff1f6debdcbf6a1dc7efb/serde/src/private/size_hint.rs#L11-L22
+fn cautious(hint: Option<usize>) -> usize {
+ const MAX_PREALLOC_BYTES: usize = 1024 * 1024;
+
+ cmp::min(
+ hint.unwrap_or(0),
+ MAX_PREALLOC_BYTES / mem::size_of::<u32>(),
+ )
+}
+
+impl Serialize for BigUint {
+ #[cfg(not(u64_digit))]
+ fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
+ where
+ S: Serializer,
+ {
+ // Note: do not change the serialization format, or it may break forward
+ // and backward compatibility of serialized data! If we ever change the
+ // internal representation, we should still serialize in base-`u32`.
+ let data: &[u32] = &self.data;
+ data.serialize(serializer)
+ }
+
+ #[cfg(u64_digit)]
+ fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
+ where
+ S: Serializer,
+ {
+ use serde::ser::SerializeSeq;
+
+ if let Some((&last, data)) = self.data.split_last() {
+ let last_lo = last as u32;
+ let last_hi = (last >> 32) as u32;
+ let u32_len = data.len() * 2 + 1 + (last_hi != 0) as usize;
+ let mut seq = serializer.serialize_seq(Some(u32_len))?;
+ for &x in data {
+ seq.serialize_element(&(x as u32))?;
+ seq.serialize_element(&((x >> 32) as u32))?;
+ }
+ seq.serialize_element(&last_lo)?;
+ if last_hi != 0 {
+ seq.serialize_element(&last_hi)?;
+ }
+ seq.end()
+ } else {
+ let data: &[u32] = &[];
+ data.serialize(serializer)
+ }
+ }
+}
+
+impl<'de> Deserialize<'de> for BigUint {
+ fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
+ where
+ D: Deserializer<'de>,
+ {
+ deserializer.deserialize_seq(U32Visitor)
+ }
+}
+
+struct U32Visitor;
+
+impl<'de> Visitor<'de> for U32Visitor {
+ type Value = BigUint;
+
+ fn expecting(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result {
+ formatter.write_str("a sequence of unsigned 32-bit numbers")
+ }
+
+ #[cfg(not(u64_digit))]
+ fn visit_seq<S>(self, mut seq: S) -> Result<Self::Value, S::Error>
+ where
+ S: SeqAccess<'de>,
+ {
+ let len = cautious(seq.size_hint());
+ let mut data = Vec::with_capacity(len);
+
+ while let Some(value) = seq.next_element::<u32>()? {
+ data.push(value);
+ }
+
+ Ok(biguint_from_vec(data))
+ }
+
+ #[cfg(u64_digit)]
+ fn visit_seq<S>(self, mut seq: S) -> Result<Self::Value, S::Error>
+ where
+ S: SeqAccess<'de>,
+ {
+ use crate::big_digit::BigDigit;
+ use num_integer::Integer;
+
+ let u32_len = cautious(seq.size_hint());
+ let len = Integer::div_ceil(&u32_len, &2);
+ let mut data = Vec::with_capacity(len);
+
+ while let Some(lo) = seq.next_element::<u32>()? {
+ let mut value = BigDigit::from(lo);
+ if let Some(hi) = seq.next_element::<u32>()? {
+ value |= BigDigit::from(hi) << 32;
+ data.push(value);
+ } else {
+ data.push(value);
+ break;
+ }
+ }
+
+ Ok(biguint_from_vec(data))
+ }
+}
diff --git a/rust/vendor/num-bigint/src/biguint/shift.rs b/rust/vendor/num-bigint/src/biguint/shift.rs
new file mode 100644
index 0000000..00326bb
--- /dev/null
+++ b/rust/vendor/num-bigint/src/biguint/shift.rs
@@ -0,0 +1,172 @@
+use super::{biguint_from_vec, BigUint};
+
+use crate::big_digit;
+use crate::std_alloc::{Cow, Vec};
+
+use core::mem;
+use core::ops::{Shl, ShlAssign, Shr, ShrAssign};
+use num_traits::{PrimInt, Zero};
+
+#[inline]
+fn biguint_shl<T: PrimInt>(n: Cow<'_, BigUint>, shift: T) -> BigUint {
+ if shift < T::zero() {
+ panic!("attempt to shift left with negative");
+ }
+ if n.is_zero() {
+ return n.into_owned();
+ }
+ let bits = T::from(big_digit::BITS).unwrap();
+ let digits = (shift / bits).to_usize().expect("capacity overflow");
+ let shift = (shift % bits).to_u8().unwrap();
+ biguint_shl2(n, digits, shift)
+}
+
+fn biguint_shl2(n: Cow<'_, BigUint>, digits: usize, shift: u8) -> BigUint {
+ let mut data = match digits {
+ 0 => n.into_owned().data,
+ _ => {
+ let len = digits.saturating_add(n.data.len() + 1);
+ let mut data = Vec::with_capacity(len);
+ data.resize(digits, 0);
+ data.extend(n.data.iter());
+ data
+ }
+ };
+
+ if shift > 0 {
+ let mut carry = 0;
+ let carry_shift = big_digit::BITS - shift;
+ for elem in data[digits..].iter_mut() {
+ let new_carry = *elem >> carry_shift;
+ *elem = (*elem << shift) | carry;
+ carry = new_carry;
+ }
+ if carry != 0 {
+ data.push(carry);
+ }
+ }
+
+ biguint_from_vec(data)
+}
+
+#[inline]
+fn biguint_shr<T: PrimInt>(n: Cow<'_, BigUint>, shift: T) -> BigUint {
+ if shift < T::zero() {
+ panic!("attempt to shift right with negative");
+ }
+ if n.is_zero() {
+ return n.into_owned();
+ }
+ let bits = T::from(big_digit::BITS).unwrap();
+ let digits = (shift / bits).to_usize().unwrap_or(core::usize::MAX);
+ let shift = (shift % bits).to_u8().unwrap();
+ biguint_shr2(n, digits, shift)
+}
+
+fn biguint_shr2(n: Cow<'_, BigUint>, digits: usize, shift: u8) -> BigUint {
+ if digits >= n.data.len() {
+ let mut n = n.into_owned();
+ n.set_zero();
+ return n;
+ }
+ let mut data = match n {
+ Cow::Borrowed(n) => n.data[digits..].to_vec(),
+ Cow::Owned(mut n) => {
+ n.data.drain(..digits);
+ n.data
+ }
+ };
+
+ if shift > 0 {
+ let mut borrow = 0;
+ let borrow_shift = big_digit::BITS - shift;
+ for elem in data.iter_mut().rev() {
+ let new_borrow = *elem << borrow_shift;
+ *elem = (*elem >> shift) | borrow;
+ borrow = new_borrow;
+ }
+ }
+
+ biguint_from_vec(data)
+}
+
+macro_rules! impl_shift {
+ (@ref $Shx:ident :: $shx:ident, $ShxAssign:ident :: $shx_assign:ident, $rhs:ty) => {
+ impl $Shx<&$rhs> for BigUint {
+ type Output = BigUint;
+
+ #[inline]
+ fn $shx(self, rhs: &$rhs) -> BigUint {
+ $Shx::$shx(self, *rhs)
+ }
+ }
+ impl $Shx<&$rhs> for &BigUint {
+ type Output = BigUint;
+
+ #[inline]
+ fn $shx(self, rhs: &$rhs) -> BigUint {
+ $Shx::$shx(self, *rhs)
+ }
+ }
+ impl $ShxAssign<&$rhs> for BigUint {
+ #[inline]
+ fn $shx_assign(&mut self, rhs: &$rhs) {
+ $ShxAssign::$shx_assign(self, *rhs);
+ }
+ }
+ };
+ ($($rhs:ty),+) => {$(
+ impl Shl<$rhs> for BigUint {
+ type Output = BigUint;
+
+ #[inline]
+ fn shl(self, rhs: $rhs) -> BigUint {
+ biguint_shl(Cow::Owned(self), rhs)
+ }
+ }
+ impl Shl<$rhs> for &BigUint {
+ type Output = BigUint;
+
+ #[inline]
+ fn shl(self, rhs: $rhs) -> BigUint {
+ biguint_shl(Cow::Borrowed(self), rhs)
+ }
+ }
+ impl ShlAssign<$rhs> for BigUint {
+ #[inline]
+ fn shl_assign(&mut self, rhs: $rhs) {
+ let n = mem::replace(self, BigUint::zero());
+ *self = n << rhs;
+ }
+ }
+ impl_shift! { @ref Shl::shl, ShlAssign::shl_assign, $rhs }
+
+ impl Shr<$rhs> for BigUint {
+ type Output = BigUint;
+
+ #[inline]
+ fn shr(self, rhs: $rhs) -> BigUint {
+ biguint_shr(Cow::Owned(self), rhs)
+ }
+ }
+ impl Shr<$rhs> for &BigUint {
+ type Output = BigUint;
+
+ #[inline]
+ fn shr(self, rhs: $rhs) -> BigUint {
+ biguint_shr(Cow::Borrowed(self), rhs)
+ }
+ }
+ impl ShrAssign<$rhs> for BigUint {
+ #[inline]
+ fn shr_assign(&mut self, rhs: $rhs) {
+ let n = mem::replace(self, BigUint::zero());
+ *self = n >> rhs;
+ }
+ }
+ impl_shift! { @ref Shr::shr, ShrAssign::shr_assign, $rhs }
+ )*};
+}
+
+impl_shift! { u8, u16, u32, u64, u128, usize }
+impl_shift! { i8, i16, i32, i64, i128, isize }
diff --git a/rust/vendor/num-bigint/src/biguint/subtraction.rs b/rust/vendor/num-bigint/src/biguint/subtraction.rs
new file mode 100644
index 0000000..b7cf59d
--- /dev/null
+++ b/rust/vendor/num-bigint/src/biguint/subtraction.rs
@@ -0,0 +1,312 @@
+#[cfg(not(u64_digit))]
+use super::u32_from_u128;
+use super::BigUint;
+
+use crate::big_digit::{self, BigDigit};
+use crate::UsizePromotion;
+
+use core::cmp::Ordering::{Equal, Greater, Less};
+use core::ops::{Sub, SubAssign};
+use num_traits::{CheckedSub, Zero};
+
+#[cfg(all(use_addcarry, target_arch = "x86_64"))]
+use core::arch::x86_64 as arch;
+
+#[cfg(all(use_addcarry, target_arch = "x86"))]
+use core::arch::x86 as arch;
+
+// Subtract with borrow:
+#[cfg(all(use_addcarry, u64_digit))]
+#[inline]
+fn sbb(borrow: u8, a: u64, b: u64, out: &mut u64) -> u8 {
+ // Safety: There are absolutely no safety concerns with calling `_subborrow_u64`.
+ // It's just unsafe for API consistency with other intrinsics.
+ unsafe { arch::_subborrow_u64(borrow, a, b, out) }
+}
+
+#[cfg(all(use_addcarry, not(u64_digit)))]
+#[inline]
+fn sbb(borrow: u8, a: u32, b: u32, out: &mut u32) -> u8 {
+ // Safety: There are absolutely no safety concerns with calling `_subborrow_u32`.
+ // It's just unsafe for API consistency with other intrinsics.
+ unsafe { arch::_subborrow_u32(borrow, a, b, out) }
+}
+
+// fallback for environments where we don't have a subborrow intrinsic
+#[cfg(not(use_addcarry))]
+#[inline]
+fn sbb(borrow: u8, a: BigDigit, b: BigDigit, out: &mut BigDigit) -> u8 {
+ use crate::big_digit::SignedDoubleBigDigit;
+
+ let difference = SignedDoubleBigDigit::from(a)
+ - SignedDoubleBigDigit::from(b)
+ - SignedDoubleBigDigit::from(borrow);
+ *out = difference as BigDigit;
+ u8::from(difference < 0)
+}
+
+pub(super) fn sub2(a: &mut [BigDigit], b: &[BigDigit]) {
+ let mut borrow = 0;
+
+ let len = Ord::min(a.len(), b.len());
+ let (a_lo, a_hi) = a.split_at_mut(len);
+ let (b_lo, b_hi) = b.split_at(len);
+
+ for (a, b) in a_lo.iter_mut().zip(b_lo) {
+ borrow = sbb(borrow, *a, *b, a);
+ }
+
+ if borrow != 0 {
+ for a in a_hi {
+ borrow = sbb(borrow, *a, 0, a);
+ if borrow == 0 {
+ break;
+ }
+ }
+ }
+
+ // note: we're _required_ to fail on underflow
+ assert!(
+ borrow == 0 && b_hi.iter().all(|x| *x == 0),
+ "Cannot subtract b from a because b is larger than a."
+ );
+}
+
+// Only for the Sub impl. `a` and `b` must have same length.
+#[inline]
+fn __sub2rev(a: &[BigDigit], b: &mut [BigDigit]) -> u8 {
+ debug_assert!(b.len() == a.len());
+
+ let mut borrow = 0;
+
+ for (ai, bi) in a.iter().zip(b) {
+ borrow = sbb(borrow, *ai, *bi, bi);
+ }
+
+ borrow
+}
+
+fn sub2rev(a: &[BigDigit], b: &mut [BigDigit]) {
+ debug_assert!(b.len() >= a.len());
+
+ let len = Ord::min(a.len(), b.len());
+ let (a_lo, a_hi) = a.split_at(len);
+ let (b_lo, b_hi) = b.split_at_mut(len);
+
+ let borrow = __sub2rev(a_lo, b_lo);
+
+ assert!(a_hi.is_empty());
+
+ // note: we're _required_ to fail on underflow
+ assert!(
+ borrow == 0 && b_hi.iter().all(|x| *x == 0),
+ "Cannot subtract b from a because b is larger than a."
+ );
+}
+
+forward_val_val_binop!(impl Sub for BigUint, sub);
+forward_ref_ref_binop!(impl Sub for BigUint, sub);
+forward_val_assign!(impl SubAssign for BigUint, sub_assign);
+
+impl Sub<&BigUint> for BigUint {
+ type Output = BigUint;
+
+ fn sub(mut self, other: &BigUint) -> BigUint {
+ self -= other;
+ self
+ }
+}
+impl SubAssign<&BigUint> for BigUint {
+ fn sub_assign(&mut self, other: &BigUint) {
+ sub2(&mut self.data[..], &other.data[..]);
+ self.normalize();
+ }
+}
+
+impl Sub<BigUint> for &BigUint {
+ type Output = BigUint;
+
+ fn sub(self, mut other: BigUint) -> BigUint {
+ let other_len = other.data.len();
+ if other_len < self.data.len() {
+ let lo_borrow = __sub2rev(&self.data[..other_len], &mut other.data);
+ other.data.extend_from_slice(&self.data[other_len..]);
+ if lo_borrow != 0 {
+ sub2(&mut other.data[other_len..], &[1])
+ }
+ } else {
+ sub2rev(&self.data[..], &mut other.data[..]);
+ }
+ other.normalized()
+ }
+}
+
+promote_unsigned_scalars!(impl Sub for BigUint, sub);
+promote_unsigned_scalars_assign!(impl SubAssign for BigUint, sub_assign);
+forward_all_scalar_binop_to_val_val!(impl Sub<u32> for BigUint, sub);
+forward_all_scalar_binop_to_val_val!(impl Sub<u64> for BigUint, sub);
+forward_all_scalar_binop_to_val_val!(impl Sub<u128> for BigUint, sub);
+
+impl Sub<u32> for BigUint {
+ type Output = BigUint;
+
+ #[inline]
+ fn sub(mut self, other: u32) -> BigUint {
+ self -= other;
+ self
+ }
+}
+
+impl SubAssign<u32> for BigUint {
+ fn sub_assign(&mut self, other: u32) {
+ sub2(&mut self.data[..], &[other as BigDigit]);
+ self.normalize();
+ }
+}
+
+impl Sub<BigUint> for u32 {
+ type Output = BigUint;
+
+ #[cfg(not(u64_digit))]
+ #[inline]
+ fn sub(self, mut other: BigUint) -> BigUint {
+ if other.data.len() == 0 {
+ other.data.push(self);
+ } else {
+ sub2rev(&[self], &mut other.data[..]);
+ }
+ other.normalized()
+ }
+
+ #[cfg(u64_digit)]
+ #[inline]
+ fn sub(self, mut other: BigUint) -> BigUint {
+ if other.data.is_empty() {
+ other.data.push(self as BigDigit);
+ } else {
+ sub2rev(&[self as BigDigit], &mut other.data[..]);
+ }
+ other.normalized()
+ }
+}
+
+impl Sub<u64> for BigUint {
+ type Output = BigUint;
+
+ #[inline]
+ fn sub(mut self, other: u64) -> BigUint {
+ self -= other;
+ self
+ }
+}
+
+impl SubAssign<u64> for BigUint {
+ #[cfg(not(u64_digit))]
+ #[inline]
+ fn sub_assign(&mut self, other: u64) {
+ let (hi, lo) = big_digit::from_doublebigdigit(other);
+ sub2(&mut self.data[..], &[lo, hi]);
+ self.normalize();
+ }
+
+ #[cfg(u64_digit)]
+ #[inline]
+ fn sub_assign(&mut self, other: u64) {
+ sub2(&mut self.data[..], &[other as BigDigit]);
+ self.normalize();
+ }
+}
+
+impl Sub<BigUint> for u64 {
+ type Output = BigUint;
+
+ #[cfg(not(u64_digit))]
+ #[inline]
+ fn sub(self, mut other: BigUint) -> BigUint {
+ while other.data.len() < 2 {
+ other.data.push(0);
+ }
+
+ let (hi, lo) = big_digit::from_doublebigdigit(self);
+ sub2rev(&[lo, hi], &mut other.data[..]);
+ other.normalized()
+ }
+
+ #[cfg(u64_digit)]
+ #[inline]
+ fn sub(self, mut other: BigUint) -> BigUint {
+ if other.data.is_empty() {
+ other.data.push(self);
+ } else {
+ sub2rev(&[self], &mut other.data[..]);
+ }
+ other.normalized()
+ }
+}
+
+impl Sub<u128> for BigUint {
+ type Output = BigUint;
+
+ #[inline]
+ fn sub(mut self, other: u128) -> BigUint {
+ self -= other;
+ self
+ }
+}
+
+impl SubAssign<u128> for BigUint {
+ #[cfg(not(u64_digit))]
+ #[inline]
+ fn sub_assign(&mut self, other: u128) {
+ let (a, b, c, d) = u32_from_u128(other);
+ sub2(&mut self.data[..], &[d, c, b, a]);
+ self.normalize();
+ }
+
+ #[cfg(u64_digit)]
+ #[inline]
+ fn sub_assign(&mut self, other: u128) {
+ let (hi, lo) = big_digit::from_doublebigdigit(other);
+ sub2(&mut self.data[..], &[lo, hi]);
+ self.normalize();
+ }
+}
+
+impl Sub<BigUint> for u128 {
+ type Output = BigUint;
+
+ #[cfg(not(u64_digit))]
+ #[inline]
+ fn sub(self, mut other: BigUint) -> BigUint {
+ while other.data.len() < 4 {
+ other.data.push(0);
+ }
+
+ let (a, b, c, d) = u32_from_u128(self);
+ sub2rev(&[d, c, b, a], &mut other.data[..]);
+ other.normalized()
+ }
+
+ #[cfg(u64_digit)]
+ #[inline]
+ fn sub(self, mut other: BigUint) -> BigUint {
+ while other.data.len() < 2 {
+ other.data.push(0);
+ }
+
+ let (hi, lo) = big_digit::from_doublebigdigit(self);
+ sub2rev(&[lo, hi], &mut other.data[..]);
+ other.normalized()
+ }
+}
+
+impl CheckedSub for BigUint {
+ #[inline]
+ fn checked_sub(&self, v: &BigUint) -> Option<BigUint> {
+ match self.cmp(v) {
+ Less => None,
+ Equal => Some(Zero::zero()),
+ Greater => Some(self.sub(v)),
+ }
+ }
+}
diff --git a/rust/vendor/num-bigint/src/lib.rs b/rust/vendor/num-bigint/src/lib.rs
new file mode 100644
index 0000000..893b747
--- /dev/null
+++ b/rust/vendor/num-bigint/src/lib.rs
@@ -0,0 +1,290 @@
+// Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+//! Big Integer Types for Rust
+//!
+//! * A [`BigUint`] is unsigned and represented as a vector of digits.
+//! * A [`BigInt`] is signed and is a combination of [`BigUint`] and [`Sign`].
+//!
+//! Common numerical operations are overloaded, so we can treat them
+//! the same way we treat other numbers.
+//!
+//! ## Example
+//!
+//! ```rust
+//! # fn main() {
+//! use num_bigint::BigUint;
+//! use num_traits::{Zero, One};
+//!
+//! // Calculate large fibonacci numbers.
+//! fn fib(n: usize) -> BigUint {
+//! let mut f0: BigUint = Zero::zero();
+//! let mut f1: BigUint = One::one();
+//! for _ in 0..n {
+//! let f2 = f0 + &f1;
+//! f0 = f1;
+//! f1 = f2;
+//! }
+//! f0
+//! }
+//!
+//! // This is a very large number.
+//! println!("fib(1000) = {}", fib(1000));
+//! # }
+//! ```
+//!
+//! It's easy to generate large random numbers:
+//!
+//! ```rust,ignore
+//! use num_bigint::{ToBigInt, RandBigInt};
+//!
+//! let mut rng = rand::thread_rng();
+//! let a = rng.gen_bigint(1000);
+//!
+//! let low = -10000.to_bigint().unwrap();
+//! let high = 10000.to_bigint().unwrap();
+//! let b = rng.gen_bigint_range(&low, &high);
+//!
+//! // Probably an even larger number.
+//! println!("{}", a * b);
+//! ```
+//!
+//! See the "Features" section for instructions for enabling random number generation.
+//!
+//! ## Features
+//!
+//! The `std` crate feature is enabled by default, and is mandatory before Rust
+//! 1.36 and the stabilized `alloc` crate. If you depend on `num-bigint` with
+//! `default-features = false`, you must manually enable the `std` feature yourself
+//! if your compiler is not new enough.
+//!
+//! ### Random Generation
+//!
+//! `num-bigint` supports the generation of random big integers when the `rand`
+//! feature is enabled. To enable it include rand as
+//!
+//! ```toml
+//! rand = "0.8"
+//! num-bigint = { version = "0.4", features = ["rand"] }
+//! ```
+//!
+//! Note that you must use the version of `rand` that `num-bigint` is compatible
+//! with: `0.8`.
+//!
+//!
+//! ## Compatibility
+//!
+//! The `num-bigint` crate is tested for rustc 1.31 and greater.
+
+#![doc(html_root_url = "https://docs.rs/num-bigint/0.4")]
+#![warn(rust_2018_idioms)]
+#![no_std]
+
+#[cfg(feature = "std")]
+#[macro_use]
+extern crate std;
+
+#[cfg(feature = "std")]
+mod std_alloc {
+ pub(crate) use std::borrow::Cow;
+ #[cfg(feature = "quickcheck")]
+ pub(crate) use std::boxed::Box;
+ pub(crate) use std::string::String;
+ pub(crate) use std::vec::Vec;
+}
+
+#[cfg(not(feature = "std"))]
+#[macro_use]
+extern crate alloc;
+
+#[cfg(not(feature = "std"))]
+mod std_alloc {
+ pub(crate) use alloc::borrow::Cow;
+ #[cfg(feature = "quickcheck")]
+ pub(crate) use alloc::boxed::Box;
+ pub(crate) use alloc::string::String;
+ pub(crate) use alloc::vec::Vec;
+}
+
+use core::fmt;
+#[cfg(feature = "std")]
+use std::error::Error;
+
+#[macro_use]
+mod macros;
+
+mod bigint;
+mod biguint;
+
+#[cfg(feature = "rand")]
+mod bigrand;
+
+#[cfg(target_pointer_width = "32")]
+type UsizePromotion = u32;
+#[cfg(target_pointer_width = "64")]
+type UsizePromotion = u64;
+
+#[cfg(target_pointer_width = "32")]
+type IsizePromotion = i32;
+#[cfg(target_pointer_width = "64")]
+type IsizePromotion = i64;
+
+#[derive(Debug, Clone, PartialEq, Eq)]
+pub struct ParseBigIntError {
+ kind: BigIntErrorKind,
+}
+
+#[derive(Debug, Clone, PartialEq, Eq)]
+enum BigIntErrorKind {
+ Empty,
+ InvalidDigit,
+}
+
+impl ParseBigIntError {
+ fn __description(&self) -> &str {
+ use crate::BigIntErrorKind::*;
+ match self.kind {
+ Empty => "cannot parse integer from empty string",
+ InvalidDigit => "invalid digit found in string",
+ }
+ }
+
+ fn empty() -> Self {
+ ParseBigIntError {
+ kind: BigIntErrorKind::Empty,
+ }
+ }
+
+ fn invalid() -> Self {
+ ParseBigIntError {
+ kind: BigIntErrorKind::InvalidDigit,
+ }
+ }
+}
+
+impl fmt::Display for ParseBigIntError {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ self.__description().fmt(f)
+ }
+}
+
+#[cfg(feature = "std")]
+impl Error for ParseBigIntError {
+ fn description(&self) -> &str {
+ self.__description()
+ }
+}
+
+/// The error type returned when a checked conversion regarding big integer fails.
+#[cfg(has_try_from)]
+#[derive(Debug, Copy, Clone, PartialEq, Eq)]
+pub struct TryFromBigIntError<T> {
+ original: T,
+}
+
+#[cfg(has_try_from)]
+impl<T> TryFromBigIntError<T> {
+ fn new(original: T) -> Self {
+ TryFromBigIntError { original }
+ }
+
+ fn __description(&self) -> &str {
+ "out of range conversion regarding big integer attempted"
+ }
+
+ /// Extract the original value, if available. The value will be available
+ /// if the type before conversion was either [`BigInt`] or [`BigUint`].
+ pub fn into_original(self) -> T {
+ self.original
+ }
+}
+
+#[cfg(all(feature = "std", has_try_from))]
+impl<T> std::error::Error for TryFromBigIntError<T>
+where
+ T: fmt::Debug,
+{
+ fn description(&self) -> &str {
+ self.__description()
+ }
+}
+
+#[cfg(has_try_from)]
+impl<T> fmt::Display for TryFromBigIntError<T> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ self.__description().fmt(f)
+ }
+}
+
+pub use crate::biguint::BigUint;
+pub use crate::biguint::ToBigUint;
+pub use crate::biguint::U32Digits;
+pub use crate::biguint::U64Digits;
+
+pub use crate::bigint::BigInt;
+pub use crate::bigint::Sign;
+pub use crate::bigint::ToBigInt;
+
+#[cfg(feature = "rand")]
+pub use crate::bigrand::{RandBigInt, RandomBits, UniformBigInt, UniformBigUint};
+
+mod big_digit {
+ /// A [`BigDigit`] is a [`BigUint`]'s composing element.
+ #[cfg(not(u64_digit))]
+ pub(crate) type BigDigit = u32;
+ #[cfg(u64_digit)]
+ pub(crate) type BigDigit = u64;
+
+ /// A [`DoubleBigDigit`] is the internal type used to do the computations. Its
+ /// size is the double of the size of [`BigDigit`].
+ #[cfg(not(u64_digit))]
+ pub(crate) type DoubleBigDigit = u64;
+ #[cfg(u64_digit)]
+ pub(crate) type DoubleBigDigit = u128;
+
+ /// A [`SignedDoubleBigDigit`] is the signed version of [`DoubleBigDigit`].
+ #[cfg(not(u64_digit))]
+ pub(crate) type SignedDoubleBigDigit = i64;
+ #[cfg(u64_digit)]
+ pub(crate) type SignedDoubleBigDigit = i128;
+
+ // [`DoubleBigDigit`] size dependent
+ #[cfg(not(u64_digit))]
+ pub(crate) const BITS: u8 = 32;
+ #[cfg(u64_digit)]
+ pub(crate) const BITS: u8 = 64;
+
+ pub(crate) const HALF_BITS: u8 = BITS / 2;
+ pub(crate) const HALF: BigDigit = (1 << HALF_BITS) - 1;
+
+ const LO_MASK: DoubleBigDigit = (1 << BITS) - 1;
+ pub(crate) const MAX: BigDigit = LO_MASK as BigDigit;
+
+ #[inline]
+ fn get_hi(n: DoubleBigDigit) -> BigDigit {
+ (n >> BITS) as BigDigit
+ }
+ #[inline]
+ fn get_lo(n: DoubleBigDigit) -> BigDigit {
+ (n & LO_MASK) as BigDigit
+ }
+
+ /// Split one [`DoubleBigDigit`] into two [`BigDigit`]s.
+ #[inline]
+ pub(crate) fn from_doublebigdigit(n: DoubleBigDigit) -> (BigDigit, BigDigit) {
+ (get_hi(n), get_lo(n))
+ }
+
+ /// Join two [`BigDigit`]s into one [`DoubleBigDigit`].
+ #[inline]
+ pub(crate) fn to_doublebigdigit(hi: BigDigit, lo: BigDigit) -> DoubleBigDigit {
+ DoubleBigDigit::from(lo) | (DoubleBigDigit::from(hi) << BITS)
+ }
+}
diff --git a/rust/vendor/num-bigint/src/macros.rs b/rust/vendor/num-bigint/src/macros.rs
new file mode 100644
index 0000000..1618616
--- /dev/null
+++ b/rust/vendor/num-bigint/src/macros.rs
@@ -0,0 +1,441 @@
+#![allow(unused_macros)]
+
+macro_rules! forward_val_val_binop {
+ (impl $imp:ident for $res:ty, $method:ident) => {
+ impl $imp<$res> for $res {
+ type Output = $res;
+
+ #[inline]
+ fn $method(self, other: $res) -> $res {
+ // forward to val-ref
+ $imp::$method(self, &other)
+ }
+ }
+ };
+}
+
+macro_rules! forward_val_val_binop_commutative {
+ (impl $imp:ident for $res:ty, $method:ident) => {
+ impl $imp<$res> for $res {
+ type Output = $res;
+
+ #[inline]
+ fn $method(self, other: $res) -> $res {
+ // forward to val-ref, with the larger capacity as val
+ if self.capacity() >= other.capacity() {
+ $imp::$method(self, &other)
+ } else {
+ $imp::$method(other, &self)
+ }
+ }
+ }
+ };
+}
+
+macro_rules! forward_ref_val_binop {
+ (impl $imp:ident for $res:ty, $method:ident) => {
+ impl $imp<$res> for &$res {
+ type Output = $res;
+
+ #[inline]
+ fn $method(self, other: $res) -> $res {
+ // forward to ref-ref
+ $imp::$method(self, &other)
+ }
+ }
+ };
+}
+
+macro_rules! forward_ref_val_binop_commutative {
+ (impl $imp:ident for $res:ty, $method:ident) => {
+ impl $imp<$res> for &$res {
+ type Output = $res;
+
+ #[inline]
+ fn $method(self, other: $res) -> $res {
+ // reverse, forward to val-ref
+ $imp::$method(other, self)
+ }
+ }
+ };
+}
+
+macro_rules! forward_val_ref_binop {
+ (impl $imp:ident for $res:ty, $method:ident) => {
+ impl $imp<&$res> for $res {
+ type Output = $res;
+
+ #[inline]
+ fn $method(self, other: &$res) -> $res {
+ // forward to ref-ref
+ $imp::$method(&self, other)
+ }
+ }
+ };
+}
+
+macro_rules! forward_ref_ref_binop {
+ (impl $imp:ident for $res:ty, $method:ident) => {
+ impl $imp<&$res> for &$res {
+ type Output = $res;
+
+ #[inline]
+ fn $method(self, other: &$res) -> $res {
+ // forward to val-ref
+ $imp::$method(self.clone(), other)
+ }
+ }
+ };
+}
+
+macro_rules! forward_ref_ref_binop_commutative {
+ (impl $imp:ident for $res:ty, $method:ident) => {
+ impl $imp<&$res> for &$res {
+ type Output = $res;
+
+ #[inline]
+ fn $method(self, other: &$res) -> $res {
+ // forward to val-ref, choosing the larger to clone
+ if self.len() >= other.len() {
+ $imp::$method(self.clone(), other)
+ } else {
+ $imp::$method(other.clone(), self)
+ }
+ }
+ }
+ };
+}
+
+macro_rules! forward_val_assign {
+ (impl $imp:ident for $res:ty, $method:ident) => {
+ impl $imp<$res> for $res {
+ #[inline]
+ fn $method(&mut self, other: $res) {
+ self.$method(&other);
+ }
+ }
+ };
+}
+
+macro_rules! forward_val_assign_scalar {
+ (impl $imp:ident for $res:ty, $scalar:ty, $method:ident) => {
+ impl $imp<$res> for $scalar {
+ #[inline]
+ fn $method(&mut self, other: $res) {
+ self.$method(&other);
+ }
+ }
+ };
+}
+
+/// use this if val_val_binop is already implemented and the reversed order is required
+macro_rules! forward_scalar_val_val_binop_commutative {
+ (impl $imp:ident < $scalar:ty > for $res:ty, $method:ident) => {
+ impl $imp<$res> for $scalar {
+ type Output = $res;
+
+ #[inline]
+ fn $method(self, other: $res) -> $res {
+ $imp::$method(other, self)
+ }
+ }
+ };
+}
+
+// Forward scalar to ref-val, when reusing storage is not helpful
+macro_rules! forward_scalar_val_val_binop_to_ref_val {
+ (impl $imp:ident<$scalar:ty> for $res:ty, $method:ident) => {
+ impl $imp<$scalar> for $res {
+ type Output = $res;
+
+ #[inline]
+ fn $method(self, other: $scalar) -> $res {
+ $imp::$method(&self, other)
+ }
+ }
+
+ impl $imp<$res> for $scalar {
+ type Output = $res;
+
+ #[inline]
+ fn $method(self, other: $res) -> $res {
+ $imp::$method(self, &other)
+ }
+ }
+ };
+}
+
+macro_rules! forward_scalar_ref_ref_binop_to_ref_val {
+ (impl $imp:ident<$scalar:ty> for $res:ty, $method:ident) => {
+ impl $imp<&$scalar> for &$res {
+ type Output = $res;
+
+ #[inline]
+ fn $method(self, other: &$scalar) -> $res {
+ $imp::$method(self, *other)
+ }
+ }
+
+ impl $imp<&$res> for &$scalar {
+ type Output = $res;
+
+ #[inline]
+ fn $method(self, other: &$res) -> $res {
+ $imp::$method(*self, other)
+ }
+ }
+ };
+}
+
+macro_rules! forward_scalar_val_ref_binop_to_ref_val {
+ (impl $imp:ident<$scalar:ty> for $res:ty, $method:ident) => {
+ impl $imp<&$scalar> for $res {
+ type Output = $res;
+
+ #[inline]
+ fn $method(self, other: &$scalar) -> $res {
+ $imp::$method(&self, *other)
+ }
+ }
+
+ impl $imp<$res> for &$scalar {
+ type Output = $res;
+
+ #[inline]
+ fn $method(self, other: $res) -> $res {
+ $imp::$method(*self, &other)
+ }
+ }
+ };
+}
+
+macro_rules! forward_scalar_val_ref_binop_to_val_val {
+ (impl $imp:ident<$scalar:ty> for $res:ty, $method:ident) => {
+ impl $imp<&$scalar> for $res {
+ type Output = $res;
+
+ #[inline]
+ fn $method(self, other: &$scalar) -> $res {
+ $imp::$method(self, *other)
+ }
+ }
+
+ impl $imp<$res> for &$scalar {
+ type Output = $res;
+
+ #[inline]
+ fn $method(self, other: $res) -> $res {
+ $imp::$method(*self, other)
+ }
+ }
+ };
+}
+
+macro_rules! forward_scalar_ref_val_binop_to_val_val {
+ (impl $imp:ident < $scalar:ty > for $res:ty, $method:ident) => {
+ impl $imp<$scalar> for &$res {
+ type Output = $res;
+
+ #[inline]
+ fn $method(self, other: $scalar) -> $res {
+ $imp::$method(self.clone(), other)
+ }
+ }
+
+ impl $imp<&$res> for $scalar {
+ type Output = $res;
+
+ #[inline]
+ fn $method(self, other: &$res) -> $res {
+ $imp::$method(self, other.clone())
+ }
+ }
+ };
+}
+
+macro_rules! forward_scalar_ref_ref_binop_to_val_val {
+ (impl $imp:ident<$scalar:ty> for $res:ty, $method:ident) => {
+ impl $imp<&$scalar> for &$res {
+ type Output = $res;
+
+ #[inline]
+ fn $method(self, other: &$scalar) -> $res {
+ $imp::$method(self.clone(), *other)
+ }
+ }
+
+ impl $imp<&$res> for &$scalar {
+ type Output = $res;
+
+ #[inline]
+ fn $method(self, other: &$res) -> $res {
+ $imp::$method(*self, other.clone())
+ }
+ }
+ };
+}
+
+macro_rules! promote_scalars {
+ (impl $imp:ident<$promo:ty> for $res:ty, $method:ident, $( $scalar:ty ),*) => {
+ $(
+ forward_all_scalar_binop_to_val_val!(impl $imp<$scalar> for $res, $method);
+
+ impl $imp<$scalar> for $res {
+ type Output = $res;
+
+ #[allow(clippy::cast_lossless)]
+ #[inline]
+ fn $method(self, other: $scalar) -> $res {
+ $imp::$method(self, other as $promo)
+ }
+ }
+
+ impl $imp<$res> for $scalar {
+ type Output = $res;
+
+ #[allow(clippy::cast_lossless)]
+ #[inline]
+ fn $method(self, other: $res) -> $res {
+ $imp::$method(self as $promo, other)
+ }
+ }
+ )*
+ }
+}
+macro_rules! promote_scalars_assign {
+ (impl $imp:ident<$promo:ty> for $res:ty, $method:ident, $( $scalar:ty ),*) => {
+ $(
+ impl $imp<$scalar> for $res {
+ #[allow(clippy::cast_lossless)]
+ #[inline]
+ fn $method(&mut self, other: $scalar) {
+ self.$method(other as $promo);
+ }
+ }
+ )*
+ }
+}
+
+macro_rules! promote_unsigned_scalars {
+ (impl $imp:ident for $res:ty, $method:ident) => {
+ promote_scalars!(impl $imp<u32> for $res, $method, u8, u16);
+ promote_scalars!(impl $imp<UsizePromotion> for $res, $method, usize);
+ }
+}
+
+macro_rules! promote_unsigned_scalars_assign {
+ (impl $imp:ident for $res:ty, $method:ident) => {
+ promote_scalars_assign!(impl $imp<u32> for $res, $method, u8, u16);
+ promote_scalars_assign!(impl $imp<UsizePromotion> for $res, $method, usize);
+ }
+}
+
+macro_rules! promote_signed_scalars {
+ (impl $imp:ident for $res:ty, $method:ident) => {
+ promote_scalars!(impl $imp<i32> for $res, $method, i8, i16);
+ promote_scalars!(impl $imp<IsizePromotion> for $res, $method, isize);
+ }
+}
+
+macro_rules! promote_signed_scalars_assign {
+ (impl $imp:ident for $res:ty, $method:ident) => {
+ promote_scalars_assign!(impl $imp<i32> for $res, $method, i8, i16);
+ promote_scalars_assign!(impl $imp<IsizePromotion> for $res, $method, isize);
+ }
+}
+
+// Forward everything to ref-ref, when reusing storage is not helpful
+macro_rules! forward_all_binop_to_ref_ref {
+ (impl $imp:ident for $res:ty, $method:ident) => {
+ forward_val_val_binop!(impl $imp for $res, $method);
+ forward_val_ref_binop!(impl $imp for $res, $method);
+ forward_ref_val_binop!(impl $imp for $res, $method);
+ };
+}
+
+// Forward everything to val-ref, so LHS storage can be reused
+macro_rules! forward_all_binop_to_val_ref {
+ (impl $imp:ident for $res:ty, $method:ident) => {
+ forward_val_val_binop!(impl $imp for $res, $method);
+ forward_ref_val_binop!(impl $imp for $res, $method);
+ forward_ref_ref_binop!(impl $imp for $res, $method);
+ };
+}
+
+// Forward everything to val-ref, commutatively, so either LHS or RHS storage can be reused
+macro_rules! forward_all_binop_to_val_ref_commutative {
+ (impl $imp:ident for $res:ty, $method:ident) => {
+ forward_val_val_binop_commutative!(impl $imp for $res, $method);
+ forward_ref_val_binop_commutative!(impl $imp for $res, $method);
+ forward_ref_ref_binop_commutative!(impl $imp for $res, $method);
+ };
+}
+
+macro_rules! forward_all_scalar_binop_to_ref_val {
+ (impl $imp:ident<$scalar:ty> for $res:ty, $method:ident) => {
+ forward_scalar_val_val_binop_to_ref_val!(impl $imp<$scalar> for $res, $method);
+ forward_scalar_val_ref_binop_to_ref_val!(impl $imp<$scalar> for $res, $method);
+ forward_scalar_ref_ref_binop_to_ref_val!(impl $imp<$scalar> for $res, $method);
+ }
+}
+
+macro_rules! forward_all_scalar_binop_to_val_val {
+ (impl $imp:ident<$scalar:ty> for $res:ty, $method:ident) => {
+ forward_scalar_val_ref_binop_to_val_val!(impl $imp<$scalar> for $res, $method);
+ forward_scalar_ref_val_binop_to_val_val!(impl $imp<$scalar> for $res, $method);
+ forward_scalar_ref_ref_binop_to_val_val!(impl $imp<$scalar> for $res, $method);
+ }
+}
+
+macro_rules! forward_all_scalar_binop_to_val_val_commutative {
+ (impl $imp:ident<$scalar:ty> for $res:ty, $method:ident) => {
+ forward_scalar_val_val_binop_commutative!(impl $imp<$scalar> for $res, $method);
+ forward_all_scalar_binop_to_val_val!(impl $imp<$scalar> for $res, $method);
+ }
+}
+
+macro_rules! promote_all_scalars {
+ (impl $imp:ident for $res:ty, $method:ident) => {
+ promote_unsigned_scalars!(impl $imp for $res, $method);
+ promote_signed_scalars!(impl $imp for $res, $method);
+ }
+}
+
+macro_rules! promote_all_scalars_assign {
+ (impl $imp:ident for $res:ty, $method:ident) => {
+ promote_unsigned_scalars_assign!(impl $imp for $res, $method);
+ promote_signed_scalars_assign!(impl $imp for $res, $method);
+ }
+}
+
+macro_rules! impl_sum_iter_type {
+ ($res:ty) => {
+ impl<T> Sum<T> for $res
+ where
+ $res: Add<T, Output = $res>,
+ {
+ fn sum<I>(iter: I) -> Self
+ where
+ I: Iterator<Item = T>,
+ {
+ iter.fold(Zero::zero(), <$res>::add)
+ }
+ }
+ };
+}
+
+macro_rules! impl_product_iter_type {
+ ($res:ty) => {
+ impl<T> Product<T> for $res
+ where
+ $res: Mul<T, Output = $res>,
+ {
+ fn product<I>(iter: I) -> Self
+ where
+ I: Iterator<Item = T>,
+ {
+ iter.fold(One::one(), <$res>::mul)
+ }
+ }
+ };
+}
diff --git a/rust/vendor/num-bigint/tests/bigint.rs b/rust/vendor/num-bigint/tests/bigint.rs
new file mode 100644
index 0000000..75cf81e
--- /dev/null
+++ b/rust/vendor/num-bigint/tests/bigint.rs
@@ -0,0 +1,1478 @@
+use num_bigint::BigUint;
+use num_bigint::Sign::{Minus, NoSign, Plus};
+use num_bigint::{BigInt, ToBigInt};
+
+use std::cmp::Ordering::{Equal, Greater, Less};
+use std::collections::hash_map::RandomState;
+use std::hash::{BuildHasher, Hash, Hasher};
+use std::iter::repeat;
+use std::ops::Neg;
+use std::{f32, f64};
+use std::{i128, u128};
+use std::{i16, i32, i64, i8, isize};
+use std::{u16, u32, u64, u8, usize};
+
+use num_integer::Integer;
+use num_traits::{
+ pow, Euclid, FromBytes, FromPrimitive, Num, One, Pow, Signed, ToBytes, ToPrimitive, Zero,
+};
+
+mod consts;
+use crate::consts::*;
+
+#[macro_use]
+mod macros;
+
+#[test]
+fn test_from_bytes_be() {
+ fn check(s: &str, result: &str) {
+ assert_eq!(
+ BigInt::from_bytes_be(Plus, s.as_bytes()),
+ BigInt::parse_bytes(result.as_bytes(), 10).unwrap()
+ );
+ }
+ check("A", "65");
+ check("AA", "16705");
+ check("AB", "16706");
+ check("Hello world!", "22405534230753963835153736737");
+ assert_eq!(BigInt::from_bytes_be(Plus, &[]), BigInt::zero());
+ assert_eq!(BigInt::from_bytes_be(Minus, &[]), BigInt::zero());
+}
+
+#[test]
+fn test_to_bytes_be() {
+ fn check(s: &str, result: &str) {
+ let b = BigInt::parse_bytes(result.as_bytes(), 10).unwrap();
+ let (sign, v) = b.to_bytes_be();
+ assert_eq!((Plus, s.as_bytes()), (sign, &*v));
+ }
+ check("A", "65");
+ check("AA", "16705");
+ check("AB", "16706");
+ check("Hello world!", "22405534230753963835153736737");
+ let b: BigInt = Zero::zero();
+ assert_eq!(b.to_bytes_be(), (NoSign, vec![0]));
+
+ // Test with leading/trailing zero bytes and a full BigDigit of value 0
+ let b = BigInt::from_str_radix("00010000000000000200", 16).unwrap();
+ assert_eq!(b.to_bytes_be(), (Plus, vec![1, 0, 0, 0, 0, 0, 0, 2, 0]));
+}
+
+#[test]
+fn test_from_bytes_le() {
+ fn check(s: &str, result: &str) {
+ assert_eq!(
+ BigInt::from_bytes_le(Plus, s.as_bytes()),
+ BigInt::parse_bytes(result.as_bytes(), 10).unwrap()
+ );
+ }
+ check("A", "65");
+ check("AA", "16705");
+ check("BA", "16706");
+ check("!dlrow olleH", "22405534230753963835153736737");
+ assert_eq!(BigInt::from_bytes_le(Plus, &[]), BigInt::zero());
+ assert_eq!(BigInt::from_bytes_le(Minus, &[]), BigInt::zero());
+}
+
+#[test]
+fn test_to_bytes_le() {
+ fn check(s: &str, result: &str) {
+ let b = BigInt::parse_bytes(result.as_bytes(), 10).unwrap();
+ let (sign, v) = b.to_bytes_le();
+ assert_eq!((Plus, s.as_bytes()), (sign, &*v));
+ }
+ check("A", "65");
+ check("AA", "16705");
+ check("BA", "16706");
+ check("!dlrow olleH", "22405534230753963835153736737");
+ let b: BigInt = Zero::zero();
+ assert_eq!(b.to_bytes_le(), (NoSign, vec![0]));
+
+ // Test with leading/trailing zero bytes and a full BigDigit of value 0
+ let b = BigInt::from_str_radix("00010000000000000200", 16).unwrap();
+ assert_eq!(b.to_bytes_le(), (Plus, vec![0, 2, 0, 0, 0, 0, 0, 0, 1]));
+}
+
+#[test]
+fn test_to_signed_bytes_le() {
+ fn check(s: &str, result: Vec<u8>) {
+ let b = BigInt::parse_bytes(s.as_bytes(), 10).unwrap();
+ assert_eq!(b.to_signed_bytes_le(), result);
+ assert_eq!(<BigInt as ToBytes>::to_le_bytes(&b), result);
+ }
+
+ check("0", vec![0]);
+ check("32767", vec![0xff, 0x7f]);
+ check("-1", vec![0xff]);
+ check("16777216", vec![0, 0, 0, 1]);
+ check("-100", vec![156]);
+ check("-8388608", vec![0, 0, 0x80]);
+ check("-192", vec![0x40, 0xff]);
+ check("128", vec![0x80, 0])
+}
+
+#[test]
+fn test_from_signed_bytes_le() {
+ fn check(s: &[u8], result: &str) {
+ let b = BigInt::parse_bytes(result.as_bytes(), 10).unwrap();
+ assert_eq!(BigInt::from_signed_bytes_le(s), b);
+ assert_eq!(<BigInt as FromBytes>::from_le_bytes(s), b);
+ }
+
+ check(&[], "0");
+ check(&[0], "0");
+ check(&[0; 10], "0");
+ check(&[0xff, 0x7f], "32767");
+ check(&[0xff], "-1");
+ check(&[0, 0, 0, 1], "16777216");
+ check(&[156], "-100");
+ check(&[0, 0, 0x80], "-8388608");
+ check(&[0xff; 10], "-1");
+ check(&[0x40, 0xff], "-192");
+}
+
+#[test]
+fn test_to_signed_bytes_be() {
+ fn check(s: &str, result: Vec<u8>) {
+ let b = BigInt::parse_bytes(s.as_bytes(), 10).unwrap();
+ assert_eq!(b.to_signed_bytes_be(), result);
+ assert_eq!(<BigInt as ToBytes>::to_be_bytes(&b), result);
+ }
+
+ check("0", vec![0]);
+ check("32767", vec![0x7f, 0xff]);
+ check("-1", vec![255]);
+ check("16777216", vec![1, 0, 0, 0]);
+ check("-100", vec![156]);
+ check("-8388608", vec![128, 0, 0]);
+ check("-192", vec![0xff, 0x40]);
+ check("128", vec![0, 0x80]);
+}
+
+#[test]
+fn test_from_signed_bytes_be() {
+ fn check(s: &[u8], result: &str) {
+ let b = BigInt::parse_bytes(result.as_bytes(), 10).unwrap();
+ assert_eq!(BigInt::from_signed_bytes_be(s), b);
+ assert_eq!(<BigInt as FromBytes>::from_be_bytes(s), b);
+ }
+
+ check(&[], "0");
+ check(&[0], "0");
+ check(&[0; 10], "0");
+ check(&[127, 255], "32767");
+ check(&[255], "-1");
+ check(&[1, 0, 0, 0], "16777216");
+ check(&[156], "-100");
+ check(&[128, 0, 0], "-8388608");
+ check(&[255; 10], "-1");
+ check(&[0xff, 0x40], "-192");
+}
+
+#[test]
+fn test_signed_bytes_be_round_trip() {
+ for i in -0x1FFFF..0x20000 {
+ let n = BigInt::from(i);
+ assert_eq!(n, BigInt::from_signed_bytes_be(&n.to_signed_bytes_be()));
+ }
+}
+
+#[test]
+fn test_signed_bytes_le_round_trip() {
+ for i in -0x1FFFF..0x20000 {
+ let n = BigInt::from(i);
+ assert_eq!(n, BigInt::from_signed_bytes_le(&n.to_signed_bytes_le()));
+ }
+}
+
+#[test]
+fn test_cmp() {
+ let vs: [&[u32]; 4] = [&[2_u32], &[1, 1], &[2, 1], &[1, 1, 1]];
+ let mut nums = Vec::new();
+ for s in vs.iter().rev() {
+ nums.push(BigInt::from_slice(Minus, *s));
+ }
+ nums.push(Zero::zero());
+ nums.extend(vs.iter().map(|s| BigInt::from_slice(Plus, *s)));
+
+ for (i, ni) in nums.iter().enumerate() {
+ for (j0, nj) in nums[i..].iter().enumerate() {
+ let j = i + j0;
+ if i == j {
+ assert_eq!(ni.cmp(nj), Equal);
+ assert_eq!(nj.cmp(ni), Equal);
+ assert_eq!(ni, nj);
+ assert!(!(ni != nj));
+ assert!(ni <= nj);
+ assert!(ni >= nj);
+ assert!(!(ni < nj));
+ assert!(!(ni > nj));
+ } else {
+ assert_eq!(ni.cmp(nj), Less);
+ assert_eq!(nj.cmp(ni), Greater);
+
+ assert!(!(ni == nj));
+ assert!(ni != nj);
+
+ assert!(ni <= nj);
+ assert!(!(ni >= nj));
+ assert!(ni < nj);
+ assert!(!(ni > nj));
+
+ assert!(!(nj <= ni));
+ assert!(nj >= ni);
+ assert!(!(nj < ni));
+ assert!(nj > ni);
+ }
+ }
+ }
+}
+
+fn hash<T: Hash>(x: &T) -> u64 {
+ let mut hasher = <RandomState as BuildHasher>::Hasher::new();
+ x.hash(&mut hasher);
+ hasher.finish()
+}
+
+#[test]
+fn test_hash() {
+ let a = BigInt::new(NoSign, vec![]);
+ let b = BigInt::new(NoSign, vec![0]);
+ let c = BigInt::new(Plus, vec![1]);
+ let d = BigInt::new(Plus, vec![1, 0, 0, 0, 0, 0]);
+ let e = BigInt::new(Plus, vec![0, 0, 0, 0, 0, 1]);
+ let f = BigInt::new(Minus, vec![1]);
+ assert!(hash(&a) == hash(&b));
+ assert!(hash(&b) != hash(&c));
+ assert!(hash(&c) == hash(&d));
+ assert!(hash(&d) != hash(&e));
+ assert!(hash(&c) != hash(&f));
+}
+
+#[test]
+fn test_convert_i64() {
+ fn check(b1: BigInt, i: i64) {
+ let b2: BigInt = FromPrimitive::from_i64(i).unwrap();
+ assert!(b1 == b2);
+ assert!(b1.to_i64().unwrap() == i);
+ }
+
+ check(Zero::zero(), 0);
+ check(One::one(), 1);
+ check(i64::MIN.to_bigint().unwrap(), i64::MIN);
+ check(i64::MAX.to_bigint().unwrap(), i64::MAX);
+
+ assert_eq!((i64::MAX as u64 + 1).to_bigint().unwrap().to_i64(), None);
+
+ assert_eq!(
+ BigInt::from_biguint(Plus, BigUint::new(vec![1, 2, 3, 4, 5])).to_i64(),
+ None
+ );
+
+ assert_eq!(
+ BigInt::from_biguint(Minus, BigUint::new(vec![1, 0, 0, 1 << 31])).to_i64(),
+ None
+ );
+
+ assert_eq!(
+ BigInt::from_biguint(Minus, BigUint::new(vec![1, 2, 3, 4, 5])).to_i64(),
+ None
+ );
+}
+
+#[test]
+fn test_convert_i128() {
+ fn check(b1: BigInt, i: i128) {
+ let b2: BigInt = FromPrimitive::from_i128(i).unwrap();
+ assert!(b1 == b2);
+ assert!(b1.to_i128().unwrap() == i);
+ }
+
+ check(Zero::zero(), 0);
+ check(One::one(), 1);
+ check(i128::MIN.to_bigint().unwrap(), i128::MIN);
+ check(i128::MAX.to_bigint().unwrap(), i128::MAX);
+
+ assert_eq!((i128::MAX as u128 + 1).to_bigint().unwrap().to_i128(), None);
+
+ assert_eq!(
+ BigInt::from_biguint(Plus, BigUint::new(vec![1, 2, 3, 4, 5])).to_i128(),
+ None
+ );
+
+ assert_eq!(
+ BigInt::from_biguint(Minus, BigUint::new(vec![1, 0, 0, 1 << 31])).to_i128(),
+ None
+ );
+
+ assert_eq!(
+ BigInt::from_biguint(Minus, BigUint::new(vec![1, 2, 3, 4, 5])).to_i128(),
+ None
+ );
+}
+
+#[test]
+fn test_convert_u64() {
+ fn check(b1: BigInt, u: u64) {
+ let b2: BigInt = FromPrimitive::from_u64(u).unwrap();
+ assert!(b1 == b2);
+ assert!(b1.to_u64().unwrap() == u);
+ }
+
+ check(Zero::zero(), 0);
+ check(One::one(), 1);
+ check(u64::MIN.to_bigint().unwrap(), u64::MIN);
+ check(u64::MAX.to_bigint().unwrap(), u64::MAX);
+
+ assert_eq!(
+ BigInt::from_biguint(Plus, BigUint::new(vec![1, 2, 3, 4, 5])).to_u64(),
+ None
+ );
+
+ let max_value: BigUint = FromPrimitive::from_u64(u64::MAX).unwrap();
+ assert_eq!(BigInt::from_biguint(Minus, max_value).to_u64(), None);
+ assert_eq!(
+ BigInt::from_biguint(Minus, BigUint::new(vec![1, 2, 3, 4, 5])).to_u64(),
+ None
+ );
+}
+
+#[test]
+fn test_convert_u128() {
+ fn check(b1: BigInt, u: u128) {
+ let b2: BigInt = FromPrimitive::from_u128(u).unwrap();
+ assert!(b1 == b2);
+ assert!(b1.to_u128().unwrap() == u);
+ }
+
+ check(Zero::zero(), 0);
+ check(One::one(), 1);
+ check(u128::MIN.to_bigint().unwrap(), u128::MIN);
+ check(u128::MAX.to_bigint().unwrap(), u128::MAX);
+
+ assert_eq!(
+ BigInt::from_biguint(Plus, BigUint::new(vec![1, 2, 3, 4, 5])).to_u128(),
+ None
+ );
+
+ let max_value: BigUint = FromPrimitive::from_u128(u128::MAX).unwrap();
+ assert_eq!(BigInt::from_biguint(Minus, max_value).to_u128(), None);
+ assert_eq!(
+ BigInt::from_biguint(Minus, BigUint::new(vec![1, 2, 3, 4, 5])).to_u128(),
+ None
+ );
+}
+
+#[test]
+#[allow(clippy::float_cmp)]
+fn test_convert_f32() {
+ fn check(b1: &BigInt, f: f32) {
+ let b2 = BigInt::from_f32(f).unwrap();
+ assert_eq!(b1, &b2);
+ assert_eq!(b1.to_f32().unwrap(), f);
+ let neg_b1 = -b1;
+ let neg_b2 = BigInt::from_f32(-f).unwrap();
+ assert_eq!(neg_b1, neg_b2);
+ assert_eq!(neg_b1.to_f32().unwrap(), -f);
+ }
+
+ check(&BigInt::zero(), 0.0);
+ check(&BigInt::one(), 1.0);
+ check(&BigInt::from(u16::MAX), pow(2.0_f32, 16) - 1.0);
+ check(&BigInt::from(1u64 << 32), pow(2.0_f32, 32));
+ check(&BigInt::from_slice(Plus, &[0, 0, 1]), pow(2.0_f32, 64));
+ check(
+ &((BigInt::one() << 100) + (BigInt::one() << 123)),
+ pow(2.0_f32, 100) + pow(2.0_f32, 123),
+ );
+ check(&(BigInt::one() << 127), pow(2.0_f32, 127));
+ check(&(BigInt::from((1u64 << 24) - 1) << (128 - 24)), f32::MAX);
+
+ // keeping all 24 digits with the bits at different offsets to the BigDigits
+ let x: u32 = 0b00000000101111011111011011011101;
+ let mut f = x as f32;
+ let mut b = BigInt::from(x);
+ for _ in 0..64 {
+ check(&b, f);
+ f *= 2.0;
+ b <<= 1;
+ }
+
+ // this number when rounded to f64 then f32 isn't the same as when rounded straight to f32
+ let mut n: i64 = 0b0000000000111111111111111111111111011111111111111111111111111111;
+ assert!((n as f64) as f32 != n as f32);
+ assert_eq!(BigInt::from(n).to_f32(), Some(n as f32));
+ n = -n;
+ assert!((n as f64) as f32 != n as f32);
+ assert_eq!(BigInt::from(n).to_f32(), Some(n as f32));
+
+ // test rounding up with the bits at different offsets to the BigDigits
+ let mut f = ((1u64 << 25) - 1) as f32;
+ let mut b = BigInt::from(1u64 << 25);
+ for _ in 0..64 {
+ assert_eq!(b.to_f32(), Some(f));
+ f *= 2.0;
+ b <<= 1;
+ }
+
+ // test correct ties-to-even rounding
+ let weird: i128 = (1i128 << 100) + (1i128 << (100 - f32::MANTISSA_DIGITS));
+ assert_ne!(weird as f32, (weird + 1) as f32);
+
+ assert_eq!(BigInt::from(weird).to_f32(), Some(weird as f32));
+ assert_eq!(BigInt::from(weird + 1).to_f32(), Some((weird + 1) as f32));
+
+ // rounding
+ assert_eq!(
+ BigInt::from_f32(-f32::consts::PI),
+ Some(BigInt::from(-3i32))
+ );
+ assert_eq!(BigInt::from_f32(-f32::consts::E), Some(BigInt::from(-2i32)));
+ assert_eq!(BigInt::from_f32(-0.99999), Some(BigInt::zero()));
+ assert_eq!(BigInt::from_f32(-0.5), Some(BigInt::zero()));
+ assert_eq!(BigInt::from_f32(-0.0), Some(BigInt::zero()));
+ assert_eq!(
+ BigInt::from_f32(f32::MIN_POSITIVE / 2.0),
+ Some(BigInt::zero())
+ );
+ assert_eq!(BigInt::from_f32(f32::MIN_POSITIVE), Some(BigInt::zero()));
+ assert_eq!(BigInt::from_f32(0.5), Some(BigInt::zero()));
+ assert_eq!(BigInt::from_f32(0.99999), Some(BigInt::zero()));
+ assert_eq!(BigInt::from_f32(f32::consts::E), Some(BigInt::from(2u32)));
+ assert_eq!(BigInt::from_f32(f32::consts::PI), Some(BigInt::from(3u32)));
+
+ // special float values
+ assert_eq!(BigInt::from_f32(f32::NAN), None);
+ assert_eq!(BigInt::from_f32(f32::INFINITY), None);
+ assert_eq!(BigInt::from_f32(f32::NEG_INFINITY), None);
+
+ // largest BigInt that will round to a finite f32 value
+ let big_num = (BigInt::one() << 128u8) - 1u8 - (BigInt::one() << (128u8 - 25));
+ assert_eq!(big_num.to_f32(), Some(f32::MAX));
+ assert_eq!((&big_num + 1u8).to_f32(), Some(f32::INFINITY));
+ assert_eq!((-&big_num).to_f32(), Some(f32::MIN));
+ assert_eq!(((-&big_num) - 1u8).to_f32(), Some(f32::NEG_INFINITY));
+
+ assert_eq!(
+ ((BigInt::one() << 128u8) - 1u8).to_f32(),
+ Some(f32::INFINITY)
+ );
+ assert_eq!((BigInt::one() << 128u8).to_f32(), Some(f32::INFINITY));
+ assert_eq!(
+ (-((BigInt::one() << 128u8) - 1u8)).to_f32(),
+ Some(f32::NEG_INFINITY)
+ );
+ assert_eq!(
+ (-(BigInt::one() << 128u8)).to_f32(),
+ Some(f32::NEG_INFINITY)
+ );
+}
+
+#[test]
+#[allow(clippy::float_cmp)]
+fn test_convert_f64() {
+ fn check(b1: &BigInt, f: f64) {
+ let b2 = BigInt::from_f64(f).unwrap();
+ assert_eq!(b1, &b2);
+ assert_eq!(b1.to_f64().unwrap(), f);
+ let neg_b1 = -b1;
+ let neg_b2 = BigInt::from_f64(-f).unwrap();
+ assert_eq!(neg_b1, neg_b2);
+ assert_eq!(neg_b1.to_f64().unwrap(), -f);
+ }
+
+ check(&BigInt::zero(), 0.0);
+ check(&BigInt::one(), 1.0);
+ check(&BigInt::from(u32::MAX), pow(2.0_f64, 32) - 1.0);
+ check(&BigInt::from(1u64 << 32), pow(2.0_f64, 32));
+ check(&BigInt::from_slice(Plus, &[0, 0, 1]), pow(2.0_f64, 64));
+ check(
+ &((BigInt::one() << 100) + (BigInt::one() << 152)),
+ pow(2.0_f64, 100) + pow(2.0_f64, 152),
+ );
+ check(&(BigInt::one() << 1023), pow(2.0_f64, 1023));
+ check(&(BigInt::from((1u64 << 53) - 1) << (1024 - 53)), f64::MAX);
+
+ // keeping all 53 digits with the bits at different offsets to the BigDigits
+ let x: u64 = 0b0000000000011110111110110111111101110111101111011111011011011101;
+ let mut f = x as f64;
+ let mut b = BigInt::from(x);
+ for _ in 0..128 {
+ check(&b, f);
+ f *= 2.0;
+ b <<= 1;
+ }
+
+ // test rounding up with the bits at different offsets to the BigDigits
+ let mut f = ((1u64 << 54) - 1) as f64;
+ let mut b = BigInt::from(1u64 << 54);
+ for _ in 0..128 {
+ assert_eq!(b.to_f64(), Some(f));
+ f *= 2.0;
+ b <<= 1;
+ }
+
+ // test correct ties-to-even rounding
+ let weird: i128 = (1i128 << 100) + (1i128 << (100 - f64::MANTISSA_DIGITS));
+ assert_ne!(weird as f64, (weird + 1) as f64);
+
+ assert_eq!(BigInt::from(weird).to_f64(), Some(weird as f64));
+ assert_eq!(BigInt::from(weird + 1).to_f64(), Some((weird + 1) as f64));
+
+ // rounding
+ assert_eq!(
+ BigInt::from_f64(-f64::consts::PI),
+ Some(BigInt::from(-3i32))
+ );
+ assert_eq!(BigInt::from_f64(-f64::consts::E), Some(BigInt::from(-2i32)));
+ assert_eq!(BigInt::from_f64(-0.99999), Some(BigInt::zero()));
+ assert_eq!(BigInt::from_f64(-0.5), Some(BigInt::zero()));
+ assert_eq!(BigInt::from_f64(-0.0), Some(BigInt::zero()));
+ assert_eq!(
+ BigInt::from_f64(f64::MIN_POSITIVE / 2.0),
+ Some(BigInt::zero())
+ );
+ assert_eq!(BigInt::from_f64(f64::MIN_POSITIVE), Some(BigInt::zero()));
+ assert_eq!(BigInt::from_f64(0.5), Some(BigInt::zero()));
+ assert_eq!(BigInt::from_f64(0.99999), Some(BigInt::zero()));
+ assert_eq!(BigInt::from_f64(f64::consts::E), Some(BigInt::from(2u32)));
+ assert_eq!(BigInt::from_f64(f64::consts::PI), Some(BigInt::from(3u32)));
+
+ // special float values
+ assert_eq!(BigInt::from_f64(f64::NAN), None);
+ assert_eq!(BigInt::from_f64(f64::INFINITY), None);
+ assert_eq!(BigInt::from_f64(f64::NEG_INFINITY), None);
+
+ // largest BigInt that will round to a finite f64 value
+ let big_num = (BigInt::one() << 1024u16) - 1u8 - (BigInt::one() << (1024u16 - 54));
+ assert_eq!(big_num.to_f64(), Some(f64::MAX));
+ assert_eq!((&big_num + 1u8).to_f64(), Some(f64::INFINITY));
+ assert_eq!((-&big_num).to_f64(), Some(f64::MIN));
+ assert_eq!(((-&big_num) - 1u8).to_f64(), Some(f64::NEG_INFINITY));
+
+ assert_eq!(
+ ((BigInt::one() << 1024u16) - 1u8).to_f64(),
+ Some(f64::INFINITY)
+ );
+ assert_eq!((BigInt::one() << 1024u16).to_f64(), Some(f64::INFINITY));
+ assert_eq!(
+ (-((BigInt::one() << 1024u16) - 1u8)).to_f64(),
+ Some(f64::NEG_INFINITY)
+ );
+ assert_eq!(
+ (-(BigInt::one() << 1024u16)).to_f64(),
+ Some(f64::NEG_INFINITY)
+ );
+}
+
+#[test]
+fn test_convert_to_biguint() {
+ fn check(n: BigInt, ans_1: BigUint) {
+ assert_eq!(n.to_biguint().unwrap(), ans_1);
+ assert_eq!(n.to_biguint().unwrap().to_bigint().unwrap(), n);
+ }
+ let zero: BigInt = Zero::zero();
+ let unsigned_zero: BigUint = Zero::zero();
+ let positive = BigInt::from_biguint(Plus, BigUint::new(vec![1, 2, 3]));
+ let negative = -&positive;
+
+ check(zero, unsigned_zero);
+ check(positive, BigUint::new(vec![1, 2, 3]));
+
+ assert_eq!(negative.to_biguint(), None);
+}
+
+#[test]
+fn test_convert_from_uint() {
+ macro_rules! check {
+ ($ty:ident, $max:expr) => {
+ assert_eq!(BigInt::from($ty::zero()), BigInt::zero());
+ assert_eq!(BigInt::from($ty::one()), BigInt::one());
+ assert_eq!(BigInt::from($ty::MAX - $ty::one()), $max - BigInt::one());
+ assert_eq!(BigInt::from($ty::MAX), $max);
+ };
+ }
+
+ check!(u8, BigInt::from_slice(Plus, &[u8::MAX as u32]));
+ check!(u16, BigInt::from_slice(Plus, &[u16::MAX as u32]));
+ check!(u32, BigInt::from_slice(Plus, &[u32::MAX]));
+ check!(u64, BigInt::from_slice(Plus, &[u32::MAX, u32::MAX]));
+ check!(
+ u128,
+ BigInt::from_slice(Plus, &[u32::MAX, u32::MAX, u32::MAX, u32::MAX])
+ );
+ check!(usize, BigInt::from(usize::MAX as u64));
+}
+
+#[test]
+fn test_convert_from_int() {
+ macro_rules! check {
+ ($ty:ident, $min:expr, $max:expr) => {
+ assert_eq!(BigInt::from($ty::MIN), $min);
+ assert_eq!(BigInt::from($ty::MIN + $ty::one()), $min + BigInt::one());
+ assert_eq!(BigInt::from(-$ty::one()), -BigInt::one());
+ assert_eq!(BigInt::from($ty::zero()), BigInt::zero());
+ assert_eq!(BigInt::from($ty::one()), BigInt::one());
+ assert_eq!(BigInt::from($ty::MAX - $ty::one()), $max - BigInt::one());
+ assert_eq!(BigInt::from($ty::MAX), $max);
+ };
+ }
+
+ check!(
+ i8,
+ BigInt::from_slice(Minus, &[1 << 7]),
+ BigInt::from_slice(Plus, &[i8::MAX as u32])
+ );
+ check!(
+ i16,
+ BigInt::from_slice(Minus, &[1 << 15]),
+ BigInt::from_slice(Plus, &[i16::MAX as u32])
+ );
+ check!(
+ i32,
+ BigInt::from_slice(Minus, &[1 << 31]),
+ BigInt::from_slice(Plus, &[i32::MAX as u32])
+ );
+ check!(
+ i64,
+ BigInt::from_slice(Minus, &[0, 1 << 31]),
+ BigInt::from_slice(Plus, &[u32::MAX, i32::MAX as u32])
+ );
+ check!(
+ i128,
+ BigInt::from_slice(Minus, &[0, 0, 0, 1 << 31]),
+ BigInt::from_slice(Plus, &[u32::MAX, u32::MAX, u32::MAX, i32::MAX as u32])
+ );
+ check!(
+ isize,
+ BigInt::from(isize::MIN as i64),
+ BigInt::from(isize::MAX as i64)
+ );
+}
+
+#[test]
+fn test_convert_from_biguint() {
+ assert_eq!(BigInt::from(BigUint::zero()), BigInt::zero());
+ assert_eq!(BigInt::from(BigUint::one()), BigInt::one());
+ assert_eq!(
+ BigInt::from(BigUint::from_slice(&[1, 2, 3])),
+ BigInt::from_slice(Plus, &[1, 2, 3])
+ );
+}
+
+#[test]
+fn test_add() {
+ for elm in SUM_TRIPLES.iter() {
+ let (a_vec, b_vec, c_vec) = *elm;
+ let a = BigInt::from_slice(Plus, a_vec);
+ let b = BigInt::from_slice(Plus, b_vec);
+ let c = BigInt::from_slice(Plus, c_vec);
+ let (na, nb, nc) = (-&a, -&b, -&c);
+
+ assert_op!(a + b == c);
+ assert_op!(b + a == c);
+ assert_op!(c + na == b);
+ assert_op!(c + nb == a);
+ assert_op!(a + nc == nb);
+ assert_op!(b + nc == na);
+ assert_op!(na + nb == nc);
+ assert_op!(a + na == BigInt::zero());
+
+ assert_assign_op!(a += b == c);
+ assert_assign_op!(b += a == c);
+ assert_assign_op!(c += na == b);
+ assert_assign_op!(c += nb == a);
+ assert_assign_op!(a += nc == nb);
+ assert_assign_op!(b += nc == na);
+ assert_assign_op!(na += nb == nc);
+ assert_assign_op!(a += na == BigInt::zero());
+ }
+}
+
+#[test]
+fn test_sub() {
+ for elm in SUM_TRIPLES.iter() {
+ let (a_vec, b_vec, c_vec) = *elm;
+ let a = BigInt::from_slice(Plus, a_vec);
+ let b = BigInt::from_slice(Plus, b_vec);
+ let c = BigInt::from_slice(Plus, c_vec);
+ let (na, nb, nc) = (-&a, -&b, -&c);
+
+ assert_op!(c - a == b);
+ assert_op!(c - b == a);
+ assert_op!(nb - a == nc);
+ assert_op!(na - b == nc);
+ assert_op!(b - na == c);
+ assert_op!(a - nb == c);
+ assert_op!(nc - na == nb);
+ assert_op!(a - a == BigInt::zero());
+
+ assert_assign_op!(c -= a == b);
+ assert_assign_op!(c -= b == a);
+ assert_assign_op!(nb -= a == nc);
+ assert_assign_op!(na -= b == nc);
+ assert_assign_op!(b -= na == c);
+ assert_assign_op!(a -= nb == c);
+ assert_assign_op!(nc -= na == nb);
+ assert_assign_op!(a -= a == BigInt::zero());
+ }
+}
+
+#[test]
+fn test_mul() {
+ for elm in MUL_TRIPLES.iter() {
+ let (a_vec, b_vec, c_vec) = *elm;
+ let a = BigInt::from_slice(Plus, a_vec);
+ let b = BigInt::from_slice(Plus, b_vec);
+ let c = BigInt::from_slice(Plus, c_vec);
+ let (na, nb, nc) = (-&a, -&b, -&c);
+
+ assert_op!(a * b == c);
+ assert_op!(b * a == c);
+ assert_op!(na * nb == c);
+
+ assert_op!(na * b == nc);
+ assert_op!(nb * a == nc);
+
+ assert_assign_op!(a *= b == c);
+ assert_assign_op!(b *= a == c);
+ assert_assign_op!(na *= nb == c);
+
+ assert_assign_op!(na *= b == nc);
+ assert_assign_op!(nb *= a == nc);
+ }
+
+ for elm in DIV_REM_QUADRUPLES.iter() {
+ let (a_vec, b_vec, c_vec, d_vec) = *elm;
+ let a = BigInt::from_slice(Plus, a_vec);
+ let b = BigInt::from_slice(Plus, b_vec);
+ let c = BigInt::from_slice(Plus, c_vec);
+ let d = BigInt::from_slice(Plus, d_vec);
+
+ assert!(a == &b * &c + &d);
+ assert!(a == &c * &b + &d);
+ }
+}
+
+#[test]
+fn test_div_mod_floor() {
+ fn check_sub(a: &BigInt, b: &BigInt, ans_d: &BigInt, ans_m: &BigInt) {
+ let (d, m) = a.div_mod_floor(b);
+ assert_eq!(d, a.div_floor(b));
+ assert_eq!(m, a.mod_floor(b));
+ if !m.is_zero() {
+ assert_eq!(m.sign(), b.sign());
+ }
+ assert!(m.abs() <= b.abs());
+ assert!(*a == b * &d + &m);
+ assert!(d == *ans_d);
+ assert!(m == *ans_m);
+ }
+
+ fn check(a: &BigInt, b: &BigInt, d: &BigInt, m: &BigInt) {
+ if m.is_zero() {
+ check_sub(a, b, d, m);
+ check_sub(a, &b.neg(), &d.neg(), m);
+ check_sub(&a.neg(), b, &d.neg(), m);
+ check_sub(&a.neg(), &b.neg(), d, m);
+ } else {
+ let one: BigInt = One::one();
+ check_sub(a, b, d, m);
+ check_sub(a, &b.neg(), &(d.neg() - &one), &(m - b));
+ check_sub(&a.neg(), b, &(d.neg() - &one), &(b - m));
+ check_sub(&a.neg(), &b.neg(), d, &m.neg());
+ }
+ }
+
+ for elm in MUL_TRIPLES.iter() {
+ let (a_vec, b_vec, c_vec) = *elm;
+ let a = BigInt::from_slice(Plus, a_vec);
+ let b = BigInt::from_slice(Plus, b_vec);
+ let c = BigInt::from_slice(Plus, c_vec);
+
+ if !a.is_zero() {
+ check(&c, &a, &b, &Zero::zero());
+ }
+ if !b.is_zero() {
+ check(&c, &b, &a, &Zero::zero());
+ }
+ }
+
+ for elm in DIV_REM_QUADRUPLES.iter() {
+ let (a_vec, b_vec, c_vec, d_vec) = *elm;
+ let a = BigInt::from_slice(Plus, a_vec);
+ let b = BigInt::from_slice(Plus, b_vec);
+ let c = BigInt::from_slice(Plus, c_vec);
+ let d = BigInt::from_slice(Plus, d_vec);
+
+ if !b.is_zero() {
+ check(&a, &b, &c, &d);
+ }
+ }
+}
+
+#[test]
+fn test_div_rem() {
+ fn check_sub(a: &BigInt, b: &BigInt, ans_q: &BigInt, ans_r: &BigInt) {
+ let (q, r) = a.div_rem(b);
+ if !r.is_zero() {
+ assert_eq!(r.sign(), a.sign());
+ }
+ assert!(r.abs() <= b.abs());
+ assert!(*a == b * &q + &r);
+ assert!(q == *ans_q);
+ assert!(r == *ans_r);
+
+ let (a, b, ans_q, ans_r) = (a.clone(), b.clone(), ans_q.clone(), ans_r.clone());
+ assert_op!(a / b == ans_q);
+ assert_op!(a % b == ans_r);
+ assert_assign_op!(a /= b == ans_q);
+ assert_assign_op!(a %= b == ans_r);
+ }
+
+ fn check(a: &BigInt, b: &BigInt, q: &BigInt, r: &BigInt) {
+ check_sub(a, b, q, r);
+ check_sub(a, &b.neg(), &q.neg(), r);
+ check_sub(&a.neg(), b, &q.neg(), &r.neg());
+ check_sub(&a.neg(), &b.neg(), q, &r.neg());
+ }
+ for elm in MUL_TRIPLES.iter() {
+ let (a_vec, b_vec, c_vec) = *elm;
+ let a = BigInt::from_slice(Plus, a_vec);
+ let b = BigInt::from_slice(Plus, b_vec);
+ let c = BigInt::from_slice(Plus, c_vec);
+
+ if !a.is_zero() {
+ check(&c, &a, &b, &Zero::zero());
+ }
+ if !b.is_zero() {
+ check(&c, &b, &a, &Zero::zero());
+ }
+ }
+
+ for elm in DIV_REM_QUADRUPLES.iter() {
+ let (a_vec, b_vec, c_vec, d_vec) = *elm;
+ let a = BigInt::from_slice(Plus, a_vec);
+ let b = BigInt::from_slice(Plus, b_vec);
+ let c = BigInt::from_slice(Plus, c_vec);
+ let d = BigInt::from_slice(Plus, d_vec);
+
+ if !b.is_zero() {
+ check(&a, &b, &c, &d);
+ }
+ }
+}
+
+#[test]
+fn test_div_ceil() {
+ fn check_sub(a: &BigInt, b: &BigInt, ans_d: &BigInt) {
+ assert_eq!(a.div_ceil(b), *ans_d);
+ }
+
+ fn check(a: &BigInt, b: &BigInt, d: &BigInt, m: &BigInt) {
+ if m.is_zero() {
+ check_sub(a, b, d);
+ check_sub(a, &b.neg(), &d.neg());
+ check_sub(&a.neg(), b, &d.neg());
+ check_sub(&a.neg(), &b.neg(), d);
+ } else {
+ check_sub(a, b, &(d + 1));
+ check_sub(a, &b.neg(), &d.neg());
+ check_sub(&a.neg(), b, &d.neg());
+ check_sub(&a.neg(), &b.neg(), &(d + 1));
+ }
+ }
+
+ for elm in MUL_TRIPLES.iter() {
+ let (a_vec, b_vec, c_vec) = *elm;
+ let a = BigInt::from_slice(Plus, a_vec);
+ let b = BigInt::from_slice(Plus, b_vec);
+ let c = BigInt::from_slice(Plus, c_vec);
+
+ if !a.is_zero() {
+ check(&c, &a, &b, &Zero::zero());
+ }
+ if !b.is_zero() {
+ check(&c, &b, &a, &Zero::zero());
+ }
+ }
+
+ for elm in DIV_REM_QUADRUPLES.iter() {
+ let (a_vec, b_vec, c_vec, d_vec) = *elm;
+ let a = BigInt::from_slice(Plus, a_vec);
+ let b = BigInt::from_slice(Plus, b_vec);
+ let c = BigInt::from_slice(Plus, c_vec);
+ let d = BigInt::from_slice(Plus, d_vec);
+
+ if !b.is_zero() {
+ check(&a, &b, &c, &d);
+ }
+ }
+}
+
+#[test]
+fn test_div_rem_euclid() {
+ fn check_sub(a: &BigInt, b: &BigInt, ans_d: &BigInt, ans_m: &BigInt) {
+ eprintln!("{} {} {} {}", a, b, ans_d, ans_m);
+ assert_eq!(a.div_euclid(b), *ans_d);
+ assert_eq!(a.rem_euclid(b), *ans_m);
+ assert!(*ans_m >= BigInt::zero());
+ assert!(*ans_m < b.abs());
+ }
+
+ fn check(a: &BigInt, b: &BigInt, d: &BigInt, m: &BigInt) {
+ if m.is_zero() {
+ check_sub(a, b, d, m);
+ check_sub(a, &b.neg(), &d.neg(), m);
+ check_sub(&a.neg(), b, &d.neg(), m);
+ check_sub(&a.neg(), &b.neg(), d, m);
+ } else {
+ let one: BigInt = One::one();
+ check_sub(a, b, d, m);
+ check_sub(a, &b.neg(), &d.neg(), m);
+ check_sub(&a.neg(), b, &(d + &one).neg(), &(b - m));
+ check_sub(&a.neg(), &b.neg(), &(d + &one), &(b.abs() - m));
+ }
+ }
+
+ for elm in MUL_TRIPLES.iter() {
+ let (a_vec, b_vec, c_vec) = *elm;
+ let a = BigInt::from_slice(Plus, a_vec);
+ let b = BigInt::from_slice(Plus, b_vec);
+ let c = BigInt::from_slice(Plus, c_vec);
+
+ if !a.is_zero() {
+ check(&c, &a, &b, &Zero::zero());
+ }
+ if !b.is_zero() {
+ check(&c, &b, &a, &Zero::zero());
+ }
+ }
+
+ for elm in DIV_REM_QUADRUPLES.iter() {
+ let (a_vec, b_vec, c_vec, d_vec) = *elm;
+ let a = BigInt::from_slice(Plus, a_vec);
+ let b = BigInt::from_slice(Plus, b_vec);
+ let c = BigInt::from_slice(Plus, c_vec);
+ let d = BigInt::from_slice(Plus, d_vec);
+
+ if !b.is_zero() {
+ check(&a, &b, &c, &d);
+ }
+ }
+}
+
+#[test]
+fn test_checked_add() {
+ for elm in SUM_TRIPLES.iter() {
+ let (a_vec, b_vec, c_vec) = *elm;
+ let a = BigInt::from_slice(Plus, a_vec);
+ let b = BigInt::from_slice(Plus, b_vec);
+ let c = BigInt::from_slice(Plus, c_vec);
+
+ assert!(a.checked_add(&b).unwrap() == c);
+ assert!(b.checked_add(&a).unwrap() == c);
+ assert!(c.checked_add(&(-&a)).unwrap() == b);
+ assert!(c.checked_add(&(-&b)).unwrap() == a);
+ assert!(a.checked_add(&(-&c)).unwrap() == (-&b));
+ assert!(b.checked_add(&(-&c)).unwrap() == (-&a));
+ assert!((-&a).checked_add(&(-&b)).unwrap() == (-&c));
+ assert!(a.checked_add(&(-&a)).unwrap() == BigInt::zero());
+ }
+}
+
+#[test]
+fn test_checked_sub() {
+ for elm in SUM_TRIPLES.iter() {
+ let (a_vec, b_vec, c_vec) = *elm;
+ let a = BigInt::from_slice(Plus, a_vec);
+ let b = BigInt::from_slice(Plus, b_vec);
+ let c = BigInt::from_slice(Plus, c_vec);
+
+ assert!(c.checked_sub(&a).unwrap() == b);
+ assert!(c.checked_sub(&b).unwrap() == a);
+ assert!((-&b).checked_sub(&a).unwrap() == (-&c));
+ assert!((-&a).checked_sub(&b).unwrap() == (-&c));
+ assert!(b.checked_sub(&(-&a)).unwrap() == c);
+ assert!(a.checked_sub(&(-&b)).unwrap() == c);
+ assert!((-&c).checked_sub(&(-&a)).unwrap() == (-&b));
+ assert!(a.checked_sub(&a).unwrap() == BigInt::zero());
+ }
+}
+
+#[test]
+fn test_checked_mul() {
+ for elm in MUL_TRIPLES.iter() {
+ let (a_vec, b_vec, c_vec) = *elm;
+ let a = BigInt::from_slice(Plus, a_vec);
+ let b = BigInt::from_slice(Plus, b_vec);
+ let c = BigInt::from_slice(Plus, c_vec);
+
+ assert!(a.checked_mul(&b).unwrap() == c);
+ assert!(b.checked_mul(&a).unwrap() == c);
+
+ assert!((-&a).checked_mul(&b).unwrap() == -&c);
+ assert!((-&b).checked_mul(&a).unwrap() == -&c);
+ }
+
+ for elm in DIV_REM_QUADRUPLES.iter() {
+ let (a_vec, b_vec, c_vec, d_vec) = *elm;
+ let a = BigInt::from_slice(Plus, a_vec);
+ let b = BigInt::from_slice(Plus, b_vec);
+ let c = BigInt::from_slice(Plus, c_vec);
+ let d = BigInt::from_slice(Plus, d_vec);
+
+ assert!(a == b.checked_mul(&c).unwrap() + &d);
+ assert!(a == c.checked_mul(&b).unwrap() + &d);
+ }
+}
+#[test]
+fn test_checked_div() {
+ for elm in MUL_TRIPLES.iter() {
+ let (a_vec, b_vec, c_vec) = *elm;
+ let a = BigInt::from_slice(Plus, a_vec);
+ let b = BigInt::from_slice(Plus, b_vec);
+ let c = BigInt::from_slice(Plus, c_vec);
+
+ if !a.is_zero() {
+ assert!(c.checked_div(&a).unwrap() == b);
+ assert!((-&c).checked_div(&(-&a)).unwrap() == b);
+ assert!((-&c).checked_div(&a).unwrap() == -&b);
+ }
+ if !b.is_zero() {
+ assert!(c.checked_div(&b).unwrap() == a);
+ assert!((-&c).checked_div(&(-&b)).unwrap() == a);
+ assert!((-&c).checked_div(&b).unwrap() == -&a);
+ }
+
+ assert!(c.checked_div(&Zero::zero()).is_none());
+ assert!((-&c).checked_div(&Zero::zero()).is_none());
+ }
+}
+
+#[test]
+fn test_gcd() {
+ fn check(a: isize, b: isize, c: isize) {
+ let big_a: BigInt = FromPrimitive::from_isize(a).unwrap();
+ let big_b: BigInt = FromPrimitive::from_isize(b).unwrap();
+ let big_c: BigInt = FromPrimitive::from_isize(c).unwrap();
+
+ assert_eq!(big_a.gcd(&big_b), big_c);
+ assert_eq!(big_a.extended_gcd(&big_b).gcd, big_c);
+ assert_eq!(big_a.gcd_lcm(&big_b).0, big_c);
+ assert_eq!(big_a.extended_gcd_lcm(&big_b).0.gcd, big_c);
+ }
+
+ check(10, 2, 2);
+ check(10, 3, 1);
+ check(0, 3, 3);
+ check(3, 3, 3);
+ check(56, 42, 14);
+ check(3, -3, 3);
+ check(-6, 3, 3);
+ check(-4, -2, 2);
+}
+
+#[test]
+fn test_lcm() {
+ fn check(a: isize, b: isize, c: isize) {
+ let big_a: BigInt = FromPrimitive::from_isize(a).unwrap();
+ let big_b: BigInt = FromPrimitive::from_isize(b).unwrap();
+ let big_c: BigInt = FromPrimitive::from_isize(c).unwrap();
+
+ assert_eq!(big_a.lcm(&big_b), big_c);
+ assert_eq!(big_a.gcd_lcm(&big_b).1, big_c);
+ assert_eq!(big_a.extended_gcd_lcm(&big_b).1, big_c);
+ }
+
+ check(0, 0, 0);
+ check(1, 0, 0);
+ check(0, 1, 0);
+ check(1, 1, 1);
+ check(-1, 1, 1);
+ check(1, -1, 1);
+ check(-1, -1, 1);
+ check(8, 9, 72);
+ check(11, 5, 55);
+}
+
+#[test]
+fn test_is_multiple_of() {
+ assert!(BigInt::from(0).is_multiple_of(&BigInt::from(0)));
+ assert!(BigInt::from(6).is_multiple_of(&BigInt::from(6)));
+ assert!(BigInt::from(6).is_multiple_of(&BigInt::from(3)));
+ assert!(BigInt::from(6).is_multiple_of(&BigInt::from(1)));
+
+ assert!(!BigInt::from(42).is_multiple_of(&BigInt::from(5)));
+ assert!(!BigInt::from(5).is_multiple_of(&BigInt::from(3)));
+ assert!(!BigInt::from(42).is_multiple_of(&BigInt::from(0)));
+}
+
+#[test]
+fn test_next_multiple_of() {
+ assert_eq!(
+ BigInt::from(16).next_multiple_of(&BigInt::from(8)),
+ BigInt::from(16)
+ );
+ assert_eq!(
+ BigInt::from(23).next_multiple_of(&BigInt::from(8)),
+ BigInt::from(24)
+ );
+ assert_eq!(
+ BigInt::from(16).next_multiple_of(&BigInt::from(-8)),
+ BigInt::from(16)
+ );
+ assert_eq!(
+ BigInt::from(23).next_multiple_of(&BigInt::from(-8)),
+ BigInt::from(16)
+ );
+ assert_eq!(
+ BigInt::from(-16).next_multiple_of(&BigInt::from(8)),
+ BigInt::from(-16)
+ );
+ assert_eq!(
+ BigInt::from(-23).next_multiple_of(&BigInt::from(8)),
+ BigInt::from(-16)
+ );
+ assert_eq!(
+ BigInt::from(-16).next_multiple_of(&BigInt::from(-8)),
+ BigInt::from(-16)
+ );
+ assert_eq!(
+ BigInt::from(-23).next_multiple_of(&BigInt::from(-8)),
+ BigInt::from(-24)
+ );
+}
+
+#[test]
+fn test_prev_multiple_of() {
+ assert_eq!(
+ BigInt::from(16).prev_multiple_of(&BigInt::from(8)),
+ BigInt::from(16)
+ );
+ assert_eq!(
+ BigInt::from(23).prev_multiple_of(&BigInt::from(8)),
+ BigInt::from(16)
+ );
+ assert_eq!(
+ BigInt::from(16).prev_multiple_of(&BigInt::from(-8)),
+ BigInt::from(16)
+ );
+ assert_eq!(
+ BigInt::from(23).prev_multiple_of(&BigInt::from(-8)),
+ BigInt::from(24)
+ );
+ assert_eq!(
+ BigInt::from(-16).prev_multiple_of(&BigInt::from(8)),
+ BigInt::from(-16)
+ );
+ assert_eq!(
+ BigInt::from(-23).prev_multiple_of(&BigInt::from(8)),
+ BigInt::from(-24)
+ );
+ assert_eq!(
+ BigInt::from(-16).prev_multiple_of(&BigInt::from(-8)),
+ BigInt::from(-16)
+ );
+ assert_eq!(
+ BigInt::from(-23).prev_multiple_of(&BigInt::from(-8)),
+ BigInt::from(-16)
+ );
+}
+
+#[test]
+fn test_abs_sub() {
+ let zero: BigInt = Zero::zero();
+ let one: BigInt = One::one();
+ assert_eq!((-&one).abs_sub(&one), zero);
+ let one: BigInt = One::one();
+ let zero: BigInt = Zero::zero();
+ assert_eq!(one.abs_sub(&one), zero);
+ let one: BigInt = One::one();
+ let zero: BigInt = Zero::zero();
+ assert_eq!(one.abs_sub(&zero), one);
+ let one: BigInt = One::one();
+ let two: BigInt = FromPrimitive::from_isize(2).unwrap();
+ assert_eq!(one.abs_sub(&-&one), two);
+}
+
+#[test]
+fn test_from_str_radix() {
+ fn check(s: &str, ans: Option<isize>) {
+ let ans = ans.map(|n| {
+ let x: BigInt = FromPrimitive::from_isize(n).unwrap();
+ x
+ });
+ assert_eq!(BigInt::from_str_radix(s, 10).ok(), ans);
+ }
+ check("10", Some(10));
+ check("1", Some(1));
+ check("0", Some(0));
+ check("-1", Some(-1));
+ check("-10", Some(-10));
+ check("+10", Some(10));
+ check("--7", None);
+ check("++5", None);
+ check("+-9", None);
+ check("-+3", None);
+ check("Z", None);
+ check("_", None);
+
+ // issue 10522, this hit an edge case that caused it to
+ // attempt to allocate a vector of size (-1u) == huge.
+ let x: BigInt = format!("1{}", repeat("0").take(36).collect::<String>())
+ .parse()
+ .unwrap();
+ let _y = x.to_string();
+}
+
+#[test]
+fn test_lower_hex() {
+ let a = BigInt::parse_bytes(b"A", 16).unwrap();
+ let hello = BigInt::parse_bytes(b"-22405534230753963835153736737", 10).unwrap();
+
+ assert_eq!(format!("{:x}", a), "a");
+ assert_eq!(format!("{:x}", hello), "-48656c6c6f20776f726c6421");
+ assert_eq!(format!("{:♥>+#8x}", a), "♥♥♥♥+0xa");
+}
+
+#[test]
+fn test_upper_hex() {
+ let a = BigInt::parse_bytes(b"A", 16).unwrap();
+ let hello = BigInt::parse_bytes(b"-22405534230753963835153736737", 10).unwrap();
+
+ assert_eq!(format!("{:X}", a), "A");
+ assert_eq!(format!("{:X}", hello), "-48656C6C6F20776F726C6421");
+ assert_eq!(format!("{:♥>+#8X}", a), "♥♥♥♥+0xA");
+}
+
+#[test]
+fn test_binary() {
+ let a = BigInt::parse_bytes(b"A", 16).unwrap();
+ let hello = BigInt::parse_bytes(b"-224055342307539", 10).unwrap();
+
+ assert_eq!(format!("{:b}", a), "1010");
+ assert_eq!(
+ format!("{:b}", hello),
+ "-110010111100011011110011000101101001100011010011"
+ );
+ assert_eq!(format!("{:♥>+#8b}", a), "♥+0b1010");
+}
+
+#[test]
+fn test_octal() {
+ let a = BigInt::parse_bytes(b"A", 16).unwrap();
+ let hello = BigInt::parse_bytes(b"-22405534230753963835153736737", 10).unwrap();
+
+ assert_eq!(format!("{:o}", a), "12");
+ assert_eq!(format!("{:o}", hello), "-22062554330674403566756233062041");
+ assert_eq!(format!("{:♥>+#8o}", a), "♥♥♥+0o12");
+}
+
+#[test]
+fn test_display() {
+ let a = BigInt::parse_bytes(b"A", 16).unwrap();
+ let hello = BigInt::parse_bytes(b"-22405534230753963835153736737", 10).unwrap();
+
+ assert_eq!(format!("{}", a), "10");
+ assert_eq!(format!("{}", hello), "-22405534230753963835153736737");
+ assert_eq!(format!("{:♥>+#8}", a), "♥♥♥♥♥+10");
+}
+
+#[test]
+fn test_neg() {
+ assert!(-BigInt::new(Plus, vec![1, 1, 1]) == BigInt::new(Minus, vec![1, 1, 1]));
+ assert!(-BigInt::new(Minus, vec![1, 1, 1]) == BigInt::new(Plus, vec![1, 1, 1]));
+ let zero: BigInt = Zero::zero();
+ assert_eq!(-&zero, zero);
+}
+
+#[test]
+fn test_negative_shr() {
+ assert_eq!(BigInt::from(-1) >> 1, BigInt::from(-1));
+ assert_eq!(BigInt::from(-2) >> 1, BigInt::from(-1));
+ assert_eq!(BigInt::from(-3) >> 1, BigInt::from(-2));
+ assert_eq!(BigInt::from(-3) >> 2, BigInt::from(-1));
+}
+
+#[test]
+fn test_iter_sum() {
+ let result: BigInt = FromPrimitive::from_isize(-1234567).unwrap();
+ let data: Vec<BigInt> = vec![
+ FromPrimitive::from_i32(-1000000).unwrap(),
+ FromPrimitive::from_i32(-200000).unwrap(),
+ FromPrimitive::from_i32(-30000).unwrap(),
+ FromPrimitive::from_i32(-4000).unwrap(),
+ FromPrimitive::from_i32(-500).unwrap(),
+ FromPrimitive::from_i32(-60).unwrap(),
+ FromPrimitive::from_i32(-7).unwrap(),
+ ];
+
+ assert_eq!(result, data.iter().sum::<BigInt>());
+ assert_eq!(result, data.into_iter().sum::<BigInt>());
+}
+
+#[test]
+fn test_iter_product() {
+ let data: Vec<BigInt> = vec![
+ FromPrimitive::from_i32(1001).unwrap(),
+ FromPrimitive::from_i32(-1002).unwrap(),
+ FromPrimitive::from_i32(1003).unwrap(),
+ FromPrimitive::from_i32(-1004).unwrap(),
+ FromPrimitive::from_i32(1005).unwrap(),
+ ];
+ let result = data.get(0).unwrap()
+ * data.get(1).unwrap()
+ * data.get(2).unwrap()
+ * data.get(3).unwrap()
+ * data.get(4).unwrap();
+
+ assert_eq!(result, data.iter().product::<BigInt>());
+ assert_eq!(result, data.into_iter().product::<BigInt>());
+}
+
+#[test]
+fn test_iter_sum_generic() {
+ let result: BigInt = FromPrimitive::from_isize(-1234567).unwrap();
+ let data = vec![-1000000, -200000, -30000, -4000, -500, -60, -7];
+
+ assert_eq!(result, data.iter().sum::<BigInt>());
+ assert_eq!(result, data.into_iter().sum::<BigInt>());
+}
+
+#[test]
+fn test_iter_product_generic() {
+ let data = vec![1001, -1002, 1003, -1004, 1005];
+ let result = data[0].to_bigint().unwrap()
+ * data[1].to_bigint().unwrap()
+ * data[2].to_bigint().unwrap()
+ * data[3].to_bigint().unwrap()
+ * data[4].to_bigint().unwrap();
+
+ assert_eq!(result, data.iter().product::<BigInt>());
+ assert_eq!(result, data.into_iter().product::<BigInt>());
+}
+
+#[test]
+fn test_pow() {
+ let one = BigInt::from(1i32);
+ let two = BigInt::from(2i32);
+ let four = BigInt::from(4i32);
+ let eight = BigInt::from(8i32);
+ let minus_two = BigInt::from(-2i32);
+ macro_rules! check {
+ ($t:ty) => {
+ assert_eq!(Pow::pow(&two, 0 as $t), one);
+ assert_eq!(Pow::pow(&two, 1 as $t), two);
+ assert_eq!(Pow::pow(&two, 2 as $t), four);
+ assert_eq!(Pow::pow(&two, 3 as $t), eight);
+ assert_eq!(Pow::pow(&two, &(3 as $t)), eight);
+ assert_eq!(Pow::pow(&minus_two, 0 as $t), one, "-2^0");
+ assert_eq!(Pow::pow(&minus_two, 1 as $t), minus_two, "-2^1");
+ assert_eq!(Pow::pow(&minus_two, 2 as $t), four, "-2^2");
+ assert_eq!(Pow::pow(&minus_two, 3 as $t), -&eight, "-2^3");
+ };
+ }
+ check!(u8);
+ check!(u16);
+ check!(u32);
+ check!(u64);
+ check!(usize);
+
+ let pow_1e10000 = BigInt::from(10u32).pow(10_000_u32);
+ let manual_1e10000 = repeat(10u32).take(10_000).product::<BigInt>();
+ assert!(manual_1e10000 == pow_1e10000);
+}
+
+#[test]
+fn test_bit() {
+ // 12 = (1100)_2
+ assert!(!BigInt::from(0b1100u8).bit(0));
+ assert!(!BigInt::from(0b1100u8).bit(1));
+ assert!(BigInt::from(0b1100u8).bit(2));
+ assert!(BigInt::from(0b1100u8).bit(3));
+ assert!(!BigInt::from(0b1100u8).bit(4));
+ assert!(!BigInt::from(0b1100u8).bit(200));
+ assert!(!BigInt::from(0b1100u8).bit(u64::MAX));
+ // -12 = (...110100)_2
+ assert!(!BigInt::from(-12i8).bit(0));
+ assert!(!BigInt::from(-12i8).bit(1));
+ assert!(BigInt::from(-12i8).bit(2));
+ assert!(!BigInt::from(-12i8).bit(3));
+ assert!(BigInt::from(-12i8).bit(4));
+ assert!(BigInt::from(-12i8).bit(200));
+ assert!(BigInt::from(-12i8).bit(u64::MAX));
+}
+
+#[test]
+fn test_set_bit() {
+ let mut x: BigInt;
+
+ // zero
+ x = BigInt::zero();
+ x.set_bit(200, true);
+ assert_eq!(x, BigInt::one() << 200);
+ x = BigInt::zero();
+ x.set_bit(200, false);
+ assert_eq!(x, BigInt::zero());
+
+ // positive numbers
+ x = BigInt::from_biguint(Plus, BigUint::one() << 200);
+ x.set_bit(10, true);
+ x.set_bit(200, false);
+ assert_eq!(x, BigInt::one() << 10);
+ x.set_bit(10, false);
+ x.set_bit(5, false);
+ assert_eq!(x, BigInt::zero());
+
+ // negative numbers
+ x = BigInt::from(-12i8);
+ x.set_bit(200, true);
+ assert_eq!(x, BigInt::from(-12i8));
+ x.set_bit(200, false);
+ assert_eq!(
+ x,
+ BigInt::from_biguint(Minus, BigUint::from(12u8) | (BigUint::one() << 200))
+ );
+ x.set_bit(6, false);
+ assert_eq!(
+ x,
+ BigInt::from_biguint(Minus, BigUint::from(76u8) | (BigUint::one() << 200))
+ );
+ x.set_bit(6, true);
+ assert_eq!(
+ x,
+ BigInt::from_biguint(Minus, BigUint::from(12u8) | (BigUint::one() << 200))
+ );
+ x.set_bit(200, true);
+ assert_eq!(x, BigInt::from(-12i8));
+
+ x = BigInt::from_biguint(Minus, BigUint::one() << 30);
+ x.set_bit(10, true);
+ assert_eq!(
+ x,
+ BigInt::from_biguint(Minus, (BigUint::one() << 30) - (BigUint::one() << 10))
+ );
+
+ x = BigInt::from_biguint(Minus, BigUint::one() << 200);
+ x.set_bit(40, true);
+ assert_eq!(
+ x,
+ BigInt::from_biguint(Minus, (BigUint::one() << 200) - (BigUint::one() << 40))
+ );
+
+ x = BigInt::from_biguint(Minus, (BigUint::one() << 200) | (BigUint::one() << 100));
+ x.set_bit(100, false);
+ assert_eq!(
+ x,
+ BigInt::from_biguint(Minus, (BigUint::one() << 200) | (BigUint::one() << 101))
+ );
+
+ x = BigInt::from_biguint(Minus, (BigUint::one() << 63) | (BigUint::one() << 62));
+ x.set_bit(62, false);
+ assert_eq!(x, BigInt::from_biguint(Minus, BigUint::one() << 64));
+
+ x = BigInt::from_biguint(Minus, (BigUint::one() << 200) - BigUint::one());
+ x.set_bit(0, false);
+ assert_eq!(x, BigInt::from_biguint(Minus, BigUint::one() << 200));
+}
diff --git a/rust/vendor/num-bigint/tests/bigint_bitwise.rs b/rust/vendor/num-bigint/tests/bigint_bitwise.rs
new file mode 100644
index 0000000..6c1e82f
--- /dev/null
+++ b/rust/vendor/num-bigint/tests/bigint_bitwise.rs
@@ -0,0 +1,178 @@
+use num_bigint::{BigInt, Sign, ToBigInt};
+use num_traits::ToPrimitive;
+use std::{i32, i64, u32};
+
+enum ValueVec {
+ N,
+ P(&'static [u32]),
+ M(&'static [u32]),
+}
+
+use crate::ValueVec::*;
+
+impl ToBigInt for ValueVec {
+ fn to_bigint(&self) -> Option<BigInt> {
+ match self {
+ &N => Some(BigInt::from_slice(Sign::NoSign, &[])),
+ &P(s) => Some(BigInt::from_slice(Sign::Plus, s)),
+ &M(s) => Some(BigInt::from_slice(Sign::Minus, s)),
+ }
+ }
+}
+
+// a, !a
+const NOT_VALUES: &[(ValueVec, ValueVec)] = &[
+ (N, M(&[1])),
+ (P(&[1]), M(&[2])),
+ (P(&[2]), M(&[3])),
+ (P(&[!0 - 2]), M(&[!0 - 1])),
+ (P(&[!0 - 1]), M(&[!0])),
+ (P(&[!0]), M(&[0, 1])),
+ (P(&[0, 1]), M(&[1, 1])),
+ (P(&[1, 1]), M(&[2, 1])),
+];
+
+// a, b, a & b, a | b, a ^ b
+const BITWISE_VALUES: &[(ValueVec, ValueVec, ValueVec, ValueVec, ValueVec)] = &[
+ (N, N, N, N, N),
+ (N, P(&[1]), N, P(&[1]), P(&[1])),
+ (N, P(&[!0]), N, P(&[!0]), P(&[!0])),
+ (N, P(&[0, 1]), N, P(&[0, 1]), P(&[0, 1])),
+ (N, M(&[1]), N, M(&[1]), M(&[1])),
+ (N, M(&[!0]), N, M(&[!0]), M(&[!0])),
+ (N, M(&[0, 1]), N, M(&[0, 1]), M(&[0, 1])),
+ (P(&[1]), P(&[!0]), P(&[1]), P(&[!0]), P(&[!0 - 1])),
+ (P(&[!0]), P(&[!0]), P(&[!0]), P(&[!0]), N),
+ (P(&[!0]), P(&[1, 1]), P(&[1]), P(&[!0, 1]), P(&[!0 - 1, 1])),
+ (P(&[1]), M(&[!0]), P(&[1]), M(&[!0]), M(&[0, 1])),
+ (P(&[!0]), M(&[1]), P(&[!0]), M(&[1]), M(&[0, 1])),
+ (P(&[!0]), M(&[!0]), P(&[1]), M(&[1]), M(&[2])),
+ (P(&[!0]), M(&[1, 1]), P(&[!0]), M(&[1, 1]), M(&[0, 2])),
+ (P(&[1, 1]), M(&[!0]), P(&[1, 1]), M(&[!0]), M(&[0, 2])),
+ (M(&[1]), M(&[!0]), M(&[!0]), M(&[1]), P(&[!0 - 1])),
+ (M(&[!0]), M(&[!0]), M(&[!0]), M(&[!0]), N),
+ (M(&[!0]), M(&[1, 1]), M(&[!0, 1]), M(&[1]), P(&[!0 - 1, 1])),
+];
+
+const I32_MIN: i64 = i32::MIN as i64;
+const I32_MAX: i64 = i32::MAX as i64;
+const U32_MAX: i64 = u32::MAX as i64;
+
+// some corner cases
+const I64_VALUES: &[i64] = &[
+ i64::MIN,
+ i64::MIN + 1,
+ i64::MIN + 2,
+ i64::MIN + 3,
+ -U32_MAX - 3,
+ -U32_MAX - 2,
+ -U32_MAX - 1,
+ -U32_MAX,
+ -U32_MAX + 1,
+ -U32_MAX + 2,
+ -U32_MAX + 3,
+ I32_MIN - 3,
+ I32_MIN - 2,
+ I32_MIN - 1,
+ I32_MIN,
+ I32_MIN + 1,
+ I32_MIN + 2,
+ I32_MIN + 3,
+ -3,
+ -2,
+ -1,
+ 0,
+ 1,
+ 2,
+ 3,
+ I32_MAX - 3,
+ I32_MAX - 2,
+ I32_MAX - 1,
+ I32_MAX,
+ I32_MAX + 1,
+ I32_MAX + 2,
+ I32_MAX + 3,
+ U32_MAX - 3,
+ U32_MAX - 2,
+ U32_MAX - 1,
+ U32_MAX,
+ U32_MAX + 1,
+ U32_MAX + 2,
+ U32_MAX + 3,
+ i64::MAX - 3,
+ i64::MAX - 2,
+ i64::MAX - 1,
+ i64::MAX,
+];
+
+#[test]
+fn test_not() {
+ for &(ref a, ref not) in NOT_VALUES.iter() {
+ let a = a.to_bigint().unwrap();
+ let not = not.to_bigint().unwrap();
+
+ // sanity check for tests that fit in i64
+ if let (Some(prim_a), Some(prim_not)) = (a.to_i64(), not.to_i64()) {
+ assert_eq!(!prim_a, prim_not);
+ }
+
+ assert_eq!(!a.clone(), not, "!{:x}", a);
+ assert_eq!(!not.clone(), a, "!{:x}", not);
+ }
+}
+
+#[test]
+fn test_not_i64() {
+ for &prim_a in I64_VALUES.iter() {
+ let a = prim_a.to_bigint().unwrap();
+ let not = (!prim_a).to_bigint().unwrap();
+ assert_eq!(!a.clone(), not, "!{:x}", a);
+ }
+}
+
+#[test]
+fn test_bitwise() {
+ for &(ref a, ref b, ref and, ref or, ref xor) in BITWISE_VALUES.iter() {
+ let a = a.to_bigint().unwrap();
+ let b = b.to_bigint().unwrap();
+ let and = and.to_bigint().unwrap();
+ let or = or.to_bigint().unwrap();
+ let xor = xor.to_bigint().unwrap();
+
+ // sanity check for tests that fit in i64
+ if let (Some(prim_a), Some(prim_b)) = (a.to_i64(), b.to_i64()) {
+ if let Some(prim_and) = and.to_i64() {
+ assert_eq!(prim_a & prim_b, prim_and);
+ }
+ if let Some(prim_or) = or.to_i64() {
+ assert_eq!(prim_a | prim_b, prim_or);
+ }
+ if let Some(prim_xor) = xor.to_i64() {
+ assert_eq!(prim_a ^ prim_b, prim_xor);
+ }
+ }
+
+ assert_eq!(a.clone() & &b, and, "{:x} & {:x}", a, b);
+ assert_eq!(b.clone() & &a, and, "{:x} & {:x}", b, a);
+ assert_eq!(a.clone() | &b, or, "{:x} | {:x}", a, b);
+ assert_eq!(b.clone() | &a, or, "{:x} | {:x}", b, a);
+ assert_eq!(a.clone() ^ &b, xor, "{:x} ^ {:x}", a, b);
+ assert_eq!(b.clone() ^ &a, xor, "{:x} ^ {:x}", b, a);
+ }
+}
+
+#[test]
+fn test_bitwise_i64() {
+ for &prim_a in I64_VALUES.iter() {
+ let a = prim_a.to_bigint().unwrap();
+ for &prim_b in I64_VALUES.iter() {
+ let b = prim_b.to_bigint().unwrap();
+ let and = (prim_a & prim_b).to_bigint().unwrap();
+ let or = (prim_a | prim_b).to_bigint().unwrap();
+ let xor = (prim_a ^ prim_b).to_bigint().unwrap();
+ assert_eq!(a.clone() & &b, and, "{:x} & {:x}", a, b);
+ assert_eq!(a.clone() | &b, or, "{:x} | {:x}", a, b);
+ assert_eq!(a.clone() ^ &b, xor, "{:x} ^ {:x}", a, b);
+ }
+ }
+}
diff --git a/rust/vendor/num-bigint/tests/bigint_scalar.rs b/rust/vendor/num-bigint/tests/bigint_scalar.rs
new file mode 100644
index 0000000..2a19faf
--- /dev/null
+++ b/rust/vendor/num-bigint/tests/bigint_scalar.rs
@@ -0,0 +1,157 @@
+use num_bigint::BigInt;
+use num_bigint::Sign::Plus;
+use num_traits::{One, Signed, ToPrimitive, Zero};
+
+use std::ops::Neg;
+use std::panic::catch_unwind;
+
+mod consts;
+use crate::consts::*;
+
+#[macro_use]
+mod macros;
+
+#[test]
+fn test_scalar_add() {
+ fn check(x: &BigInt, y: &BigInt, z: &BigInt) {
+ let (x, y, z) = (x.clone(), y.clone(), z.clone());
+ assert_signed_scalar_op!(x + y == z);
+ assert_signed_scalar_assign_op!(x += y == z);
+ }
+
+ for elm in SUM_TRIPLES.iter() {
+ let (a_vec, b_vec, c_vec) = *elm;
+ let a = BigInt::from_slice(Plus, a_vec);
+ let b = BigInt::from_slice(Plus, b_vec);
+ let c = BigInt::from_slice(Plus, c_vec);
+ let (na, nb, nc) = (-&a, -&b, -&c);
+
+ check(&a, &b, &c);
+ check(&b, &a, &c);
+ check(&c, &na, &b);
+ check(&c, &nb, &a);
+ check(&a, &nc, &nb);
+ check(&b, &nc, &na);
+ check(&na, &nb, &nc);
+ check(&a, &na, &Zero::zero());
+ }
+}
+
+#[test]
+fn test_scalar_sub() {
+ fn check(x: &BigInt, y: &BigInt, z: &BigInt) {
+ let (x, y, z) = (x.clone(), y.clone(), z.clone());
+ assert_signed_scalar_op!(x - y == z);
+ assert_signed_scalar_assign_op!(x -= y == z);
+ }
+
+ for elm in SUM_TRIPLES.iter() {
+ let (a_vec, b_vec, c_vec) = *elm;
+ let a = BigInt::from_slice(Plus, a_vec);
+ let b = BigInt::from_slice(Plus, b_vec);
+ let c = BigInt::from_slice(Plus, c_vec);
+ let (na, nb, nc) = (-&a, -&b, -&c);
+
+ check(&c, &a, &b);
+ check(&c, &b, &a);
+ check(&nb, &a, &nc);
+ check(&na, &b, &nc);
+ check(&b, &na, &c);
+ check(&a, &nb, &c);
+ check(&nc, &na, &nb);
+ check(&a, &a, &Zero::zero());
+ }
+}
+
+#[test]
+fn test_scalar_mul() {
+ fn check(x: &BigInt, y: &BigInt, z: &BigInt) {
+ let (x, y, z) = (x.clone(), y.clone(), z.clone());
+ assert_signed_scalar_op!(x * y == z);
+ assert_signed_scalar_assign_op!(x *= y == z);
+ }
+
+ for elm in MUL_TRIPLES.iter() {
+ let (a_vec, b_vec, c_vec) = *elm;
+ let a = BigInt::from_slice(Plus, a_vec);
+ let b = BigInt::from_slice(Plus, b_vec);
+ let c = BigInt::from_slice(Plus, c_vec);
+ let (na, nb, nc) = (-&a, -&b, -&c);
+
+ check(&a, &b, &c);
+ check(&b, &a, &c);
+ check(&na, &nb, &c);
+
+ check(&na, &b, &nc);
+ check(&nb, &a, &nc);
+ }
+}
+
+#[test]
+fn test_scalar_div_rem() {
+ fn check_sub(a: &BigInt, b: u32, ans_q: &BigInt, ans_r: &BigInt) {
+ let (q, r) = (a / b, a % b);
+ if !r.is_zero() {
+ assert_eq!(r.sign(), a.sign());
+ }
+ assert!(r.abs() <= BigInt::from(b));
+ assert!(*a == b * &q + &r);
+ assert!(q == *ans_q);
+ assert!(r == *ans_r);
+
+ let b = BigInt::from(b);
+ let (a, ans_q, ans_r) = (a.clone(), ans_q.clone(), ans_r.clone());
+ assert_signed_scalar_op!(a / b == ans_q);
+ assert_signed_scalar_op!(a % b == ans_r);
+ assert_signed_scalar_assign_op!(a /= b == ans_q);
+ assert_signed_scalar_assign_op!(a %= b == ans_r);
+
+ let nb = -b;
+ assert_signed_scalar_op!(a / nb == -ans_q.clone());
+ assert_signed_scalar_op!(a % nb == ans_r);
+ assert_signed_scalar_assign_op!(a /= nb == -ans_q.clone());
+ assert_signed_scalar_assign_op!(a %= nb == ans_r);
+ }
+
+ fn check(a: &BigInt, b: u32, q: &BigInt, r: &BigInt) {
+ check_sub(a, b, q, r);
+ check_sub(&a.neg(), b, &q.neg(), &r.neg());
+ }
+
+ for elm in MUL_TRIPLES.iter() {
+ let (a_vec, b_vec, c_vec) = *elm;
+ let a = BigInt::from_slice(Plus, a_vec);
+ let b = BigInt::from_slice(Plus, b_vec);
+ let c = BigInt::from_slice(Plus, c_vec);
+
+ if a_vec.len() == 1 && a_vec[0] != 0 {
+ let a = a_vec[0];
+ check(&c, a, &b, &Zero::zero());
+ }
+
+ if b_vec.len() == 1 && b_vec[0] != 0 {
+ let b = b_vec[0];
+ check(&c, b, &a, &Zero::zero());
+ }
+ }
+
+ for elm in DIV_REM_QUADRUPLES.iter() {
+ let (a_vec, b_vec, c_vec, d_vec) = *elm;
+ let a = BigInt::from_slice(Plus, a_vec);
+ let c = BigInt::from_slice(Plus, c_vec);
+ let d = BigInt::from_slice(Plus, d_vec);
+
+ if b_vec.len() == 1 && b_vec[0] != 0 {
+ let b = b_vec[0];
+ check(&a, b, &c, &d);
+ }
+ }
+}
+
+#[test]
+fn test_scalar_div_rem_zero() {
+ catch_unwind(|| BigInt::zero() / 0u32).unwrap_err();
+ catch_unwind(|| BigInt::zero() % 0u32).unwrap_err();
+ catch_unwind(|| BigInt::one() / 0u32).unwrap_err();
+ catch_unwind(|| BigInt::one() % 0u32).unwrap_err();
+}
diff --git a/rust/vendor/num-bigint/tests/biguint.rs b/rust/vendor/num-bigint/tests/biguint.rs
new file mode 100644
index 0000000..c027771
--- /dev/null
+++ b/rust/vendor/num-bigint/tests/biguint.rs
@@ -0,0 +1,1924 @@
+use num_bigint::Sign::Plus;
+use num_bigint::{BigInt, ToBigInt};
+use num_bigint::{BigUint, ToBigUint};
+use num_integer::Integer;
+
+use std::cmp::Ordering::{Equal, Greater, Less};
+use std::collections::hash_map::RandomState;
+use std::hash::{BuildHasher, Hash, Hasher};
+use std::i64;
+use std::iter::repeat;
+use std::str::FromStr;
+use std::{f32, f64};
+use std::{i128, u128};
+use std::{u16, u32, u64, u8, usize};
+
+use num_traits::{
+ pow, CheckedAdd, CheckedDiv, CheckedMul, CheckedSub, Euclid, FromBytes, FromPrimitive, Num,
+ One, Pow, ToBytes, ToPrimitive, Zero,
+};
+
+mod consts;
+use crate::consts::*;
+
+#[macro_use]
+mod macros;
+
+#[test]
+fn test_from_bytes_be() {
+ fn check(s: &str, result: &str) {
+ let b = BigUint::parse_bytes(result.as_bytes(), 10).unwrap();
+ assert_eq!(BigUint::from_bytes_be(s.as_bytes()), b);
+ assert_eq!(<BigUint as FromBytes>::from_be_bytes(s.as_bytes()), b);
+ }
+ check("A", "65");
+ check("AA", "16705");
+ check("AB", "16706");
+ check("Hello world!", "22405534230753963835153736737");
+ assert_eq!(BigUint::from_bytes_be(&[]), BigUint::zero());
+}
+
+#[test]
+fn test_to_bytes_be() {
+ fn check(s: &str, result: &str) {
+ let b = BigUint::parse_bytes(result.as_bytes(), 10).unwrap();
+ assert_eq!(b.to_bytes_be(), s.as_bytes());
+ assert_eq!(<BigUint as ToBytes>::to_be_bytes(&b), s.as_bytes());
+ }
+ check("A", "65");
+ check("AA", "16705");
+ check("AB", "16706");
+ check("Hello world!", "22405534230753963835153736737");
+ let b: BigUint = Zero::zero();
+ assert_eq!(b.to_bytes_be(), [0]);
+
+ // Test with leading/trailing zero bytes and a full BigDigit of value 0
+ let b = BigUint::from_str_radix("00010000000000000200", 16).unwrap();
+ assert_eq!(b.to_bytes_be(), [1, 0, 0, 0, 0, 0, 0, 2, 0]);
+}
+
+#[test]
+fn test_from_bytes_le() {
+ fn check(s: &str, result: &str) {
+ let b = BigUint::parse_bytes(result.as_bytes(), 10).unwrap();
+ assert_eq!(BigUint::from_bytes_le(s.as_bytes()), b);
+ assert_eq!(<BigUint as FromBytes>::from_le_bytes(s.as_bytes()), b);
+ }
+ check("A", "65");
+ check("AA", "16705");
+ check("BA", "16706");
+ check("!dlrow olleH", "22405534230753963835153736737");
+ assert_eq!(BigUint::from_bytes_le(&[]), BigUint::zero());
+}
+
+#[test]
+fn test_to_bytes_le() {
+ fn check(s: &str, result: &str) {
+ let b = BigUint::parse_bytes(result.as_bytes(), 10).unwrap();
+ assert_eq!(b.to_bytes_le(), s.as_bytes());
+ assert_eq!(<BigUint as ToBytes>::to_le_bytes(&b), s.as_bytes());
+ }
+ check("A", "65");
+ check("AA", "16705");
+ check("BA", "16706");
+ check("!dlrow olleH", "22405534230753963835153736737");
+ let b: BigUint = Zero::zero();
+ assert_eq!(b.to_bytes_le(), [0]);
+
+ // Test with leading/trailing zero bytes and a full BigDigit of value 0
+ let b = BigUint::from_str_radix("00010000000000000200", 16).unwrap();
+ assert_eq!(b.to_bytes_le(), [0, 2, 0, 0, 0, 0, 0, 0, 1]);
+}
+
+#[test]
+fn test_cmp() {
+ let data: [&[_]; 7] = [&[], &[1], &[2], &[!0], &[0, 1], &[2, 1], &[1, 1, 1]];
+ let data: Vec<BigUint> = data.iter().map(|v| BigUint::from_slice(*v)).collect();
+ for (i, ni) in data.iter().enumerate() {
+ for (j0, nj) in data[i..].iter().enumerate() {
+ let j = j0 + i;
+ if i == j {
+ assert_eq!(ni.cmp(nj), Equal);
+ assert_eq!(nj.cmp(ni), Equal);
+ assert_eq!(ni, nj);
+ assert!(!(ni != nj));
+ assert!(ni <= nj);
+ assert!(ni >= nj);
+ assert!(!(ni < nj));
+ assert!(!(ni > nj));
+ } else {
+ assert_eq!(ni.cmp(nj), Less);
+ assert_eq!(nj.cmp(ni), Greater);
+
+ assert!(!(ni == nj));
+ assert!(ni != nj);
+
+ assert!(ni <= nj);
+ assert!(!(ni >= nj));
+ assert!(ni < nj);
+ assert!(!(ni > nj));
+
+ assert!(!(nj <= ni));
+ assert!(nj >= ni);
+ assert!(!(nj < ni));
+ assert!(nj > ni);
+ }
+ }
+ }
+}
+
+fn hash<T: Hash>(x: &T) -> u64 {
+ let mut hasher = <RandomState as BuildHasher>::Hasher::new();
+ x.hash(&mut hasher);
+ hasher.finish()
+}
+
+#[test]
+fn test_hash() {
+ use crate::hash;
+
+ let a = BigUint::new(vec![]);
+ let b = BigUint::new(vec![0]);
+ let c = BigUint::new(vec![1]);
+ let d = BigUint::new(vec![1, 0, 0, 0, 0, 0]);
+ let e = BigUint::new(vec![0, 0, 0, 0, 0, 1]);
+ assert!(hash(&a) == hash(&b));
+ assert!(hash(&b) != hash(&c));
+ assert!(hash(&c) == hash(&d));
+ assert!(hash(&d) != hash(&e));
+}
+
+// LEFT, RIGHT, AND, OR, XOR
+const BIT_TESTS: &[(&[u32], &[u32], &[u32], &[u32], &[u32])] = &[
+ (&[], &[], &[], &[], &[]),
+ (&[1, 0, 1], &[1, 1], &[1], &[1, 1, 1], &[0, 1, 1]),
+ (&[1, 0, 1], &[0, 1, 1], &[0, 0, 1], &[1, 1, 1], &[1, 1]),
+ (
+ &[268, 482, 17],
+ &[964, 54],
+ &[260, 34],
+ &[972, 502, 17],
+ &[712, 468, 17],
+ ),
+];
+
+#[test]
+fn test_bitand() {
+ for elm in BIT_TESTS {
+ let (a_vec, b_vec, c_vec, _, _) = *elm;
+ let a = BigUint::from_slice(a_vec);
+ let b = BigUint::from_slice(b_vec);
+ let c = BigUint::from_slice(c_vec);
+
+ assert_op!(a & b == c);
+ assert_op!(b & a == c);
+ assert_assign_op!(a &= b == c);
+ assert_assign_op!(b &= a == c);
+ }
+}
+
+#[test]
+fn test_bitor() {
+ for elm in BIT_TESTS {
+ let (a_vec, b_vec, _, c_vec, _) = *elm;
+ let a = BigUint::from_slice(a_vec);
+ let b = BigUint::from_slice(b_vec);
+ let c = BigUint::from_slice(c_vec);
+
+ assert_op!(a | b == c);
+ assert_op!(b | a == c);
+ assert_assign_op!(a |= b == c);
+ assert_assign_op!(b |= a == c);
+ }
+}
+
+#[test]
+fn test_bitxor() {
+ for elm in BIT_TESTS {
+ let (a_vec, b_vec, _, _, c_vec) = *elm;
+ let a = BigUint::from_slice(a_vec);
+ let b = BigUint::from_slice(b_vec);
+ let c = BigUint::from_slice(c_vec);
+
+ assert_op!(a ^ b == c);
+ assert_op!(b ^ a == c);
+ assert_op!(a ^ c == b);
+ assert_op!(c ^ a == b);
+ assert_op!(b ^ c == a);
+ assert_op!(c ^ b == a);
+ assert_assign_op!(a ^= b == c);
+ assert_assign_op!(b ^= a == c);
+ assert_assign_op!(a ^= c == b);
+ assert_assign_op!(c ^= a == b);
+ assert_assign_op!(b ^= c == a);
+ assert_assign_op!(c ^= b == a);
+ }
+}
+
+#[test]
+fn test_shl() {
+ fn check(s: &str, shift: usize, ans: &str) {
+ let opt_biguint = BigUint::from_str_radix(s, 16).ok();
+ let mut bu_assign = opt_biguint.unwrap();
+ let bu = (bu_assign.clone() << shift).to_str_radix(16);
+ assert_eq!(bu, ans);
+ bu_assign <<= shift;
+ assert_eq!(bu_assign.to_str_radix(16), ans);
+ }
+
+ check("0", 3, "0");
+ check("1", 3, "8");
+
+ check(
+ "1\
+ 0000\
+ 0000\
+ 0000\
+ 0001\
+ 0000\
+ 0000\
+ 0000\
+ 0001",
+ 3,
+ "8\
+ 0000\
+ 0000\
+ 0000\
+ 0008\
+ 0000\
+ 0000\
+ 0000\
+ 0008",
+ );
+ check(
+ "1\
+ 0000\
+ 0001\
+ 0000\
+ 0001",
+ 2,
+ "4\
+ 0000\
+ 0004\
+ 0000\
+ 0004",
+ );
+ check(
+ "1\
+ 0001\
+ 0001",
+ 1,
+ "2\
+ 0002\
+ 0002",
+ );
+
+ check(
+ "\
+ 4000\
+ 0000\
+ 0000\
+ 0000",
+ 3,
+ "2\
+ 0000\
+ 0000\
+ 0000\
+ 0000",
+ );
+ check(
+ "4000\
+ 0000",
+ 2,
+ "1\
+ 0000\
+ 0000",
+ );
+ check(
+ "4000",
+ 2,
+ "1\
+ 0000",
+ );
+
+ check(
+ "4000\
+ 0000\
+ 0000\
+ 0000",
+ 67,
+ "2\
+ 0000\
+ 0000\
+ 0000\
+ 0000\
+ 0000\
+ 0000\
+ 0000\
+ 0000",
+ );
+ check(
+ "4000\
+ 0000",
+ 35,
+ "2\
+ 0000\
+ 0000\
+ 0000\
+ 0000",
+ );
+ check(
+ "4000",
+ 19,
+ "2\
+ 0000\
+ 0000",
+ );
+
+ check(
+ "fedc\
+ ba98\
+ 7654\
+ 3210\
+ fedc\
+ ba98\
+ 7654\
+ 3210",
+ 4,
+ "f\
+ edcb\
+ a987\
+ 6543\
+ 210f\
+ edcb\
+ a987\
+ 6543\
+ 2100",
+ );
+ check(
+ "88887777666655554444333322221111",
+ 16,
+ "888877776666555544443333222211110000",
+ );
+}
+
+#[test]
+fn test_shr() {
+ fn check(s: &str, shift: usize, ans: &str) {
+ let opt_biguint = BigUint::from_str_radix(s, 16).ok();
+ let mut bu_assign = opt_biguint.unwrap();
+ let bu = (bu_assign.clone() >> shift).to_str_radix(16);
+ assert_eq!(bu, ans);
+ bu_assign >>= shift;
+ assert_eq!(bu_assign.to_str_radix(16), ans);
+ }
+
+ check("0", 3, "0");
+ check("f", 3, "1");
+
+ check(
+ "1\
+ 0000\
+ 0000\
+ 0000\
+ 0001\
+ 0000\
+ 0000\
+ 0000\
+ 0001",
+ 3,
+ "2000\
+ 0000\
+ 0000\
+ 0000\
+ 2000\
+ 0000\
+ 0000\
+ 0000",
+ );
+ check(
+ "1\
+ 0000\
+ 0001\
+ 0000\
+ 0001",
+ 2,
+ "4000\
+ 0000\
+ 4000\
+ 0000",
+ );
+ check(
+ "1\
+ 0001\
+ 0001",
+ 1,
+ "8000\
+ 8000",
+ );
+
+ check(
+ "2\
+ 0000\
+ 0000\
+ 0000\
+ 0001\
+ 0000\
+ 0000\
+ 0000\
+ 0001",
+ 67,
+ "4000\
+ 0000\
+ 0000\
+ 0000",
+ );
+ check(
+ "2\
+ 0000\
+ 0001\
+ 0000\
+ 0001",
+ 35,
+ "4000\
+ 0000",
+ );
+ check(
+ "2\
+ 0001\
+ 0001",
+ 19,
+ "4000",
+ );
+
+ check(
+ "1\
+ 0000\
+ 0000\
+ 0000\
+ 0000",
+ 1,
+ "8000\
+ 0000\
+ 0000\
+ 0000",
+ );
+ check(
+ "1\
+ 0000\
+ 0000",
+ 1,
+ "8000\
+ 0000",
+ );
+ check(
+ "1\
+ 0000",
+ 1,
+ "8000",
+ );
+ check(
+ "f\
+ edcb\
+ a987\
+ 6543\
+ 210f\
+ edcb\
+ a987\
+ 6543\
+ 2100",
+ 4,
+ "fedc\
+ ba98\
+ 7654\
+ 3210\
+ fedc\
+ ba98\
+ 7654\
+ 3210",
+ );
+
+ check(
+ "888877776666555544443333222211110000",
+ 16,
+ "88887777666655554444333322221111",
+ );
+}
+
+// `DoubleBigDigit` size dependent
+#[test]
+fn test_convert_i64() {
+ fn check(b1: BigUint, i: i64) {
+ let b2: BigUint = FromPrimitive::from_i64(i).unwrap();
+ assert_eq!(b1, b2);
+ assert_eq!(b1.to_i64().unwrap(), i);
+ }
+
+ check(Zero::zero(), 0);
+ check(One::one(), 1);
+ check(i64::MAX.to_biguint().unwrap(), i64::MAX);
+
+ check(BigUint::new(vec![]), 0);
+ check(BigUint::new(vec![1]), 1);
+ check(BigUint::new(vec![N1]), (1 << 32) - 1);
+ check(BigUint::new(vec![0, 1]), 1 << 32);
+ check(BigUint::new(vec![N1, N1 >> 1]), i64::MAX);
+
+ assert_eq!(i64::MIN.to_biguint(), None);
+ assert_eq!(BigUint::new(vec![N1, N1]).to_i64(), None);
+ assert_eq!(BigUint::new(vec![0, 0, 1]).to_i64(), None);
+ assert_eq!(BigUint::new(vec![N1, N1, N1]).to_i64(), None);
+}
+
+#[test]
+fn test_convert_i128() {
+ fn check(b1: BigUint, i: i128) {
+ let b2: BigUint = FromPrimitive::from_i128(i).unwrap();
+ assert_eq!(b1, b2);
+ assert_eq!(b1.to_i128().unwrap(), i);
+ }
+
+ check(Zero::zero(), 0);
+ check(One::one(), 1);
+ check(i128::MAX.to_biguint().unwrap(), i128::MAX);
+
+ check(BigUint::new(vec![]), 0);
+ check(BigUint::new(vec![1]), 1);
+ check(BigUint::new(vec![N1]), (1 << 32) - 1);
+ check(BigUint::new(vec![0, 1]), 1 << 32);
+ check(BigUint::new(vec![N1, N1, N1, N1 >> 1]), i128::MAX);
+
+ assert_eq!(i128::MIN.to_biguint(), None);
+ assert_eq!(BigUint::new(vec![N1, N1, N1, N1]).to_i128(), None);
+ assert_eq!(BigUint::new(vec![0, 0, 0, 0, 1]).to_i128(), None);
+ assert_eq!(BigUint::new(vec![N1, N1, N1, N1, N1]).to_i128(), None);
+}
+
+// `DoubleBigDigit` size dependent
+#[test]
+fn test_convert_u64() {
+ fn check(b1: BigUint, u: u64) {
+ let b2: BigUint = FromPrimitive::from_u64(u).unwrap();
+ assert_eq!(b1, b2);
+ assert_eq!(b1.to_u64().unwrap(), u);
+ }
+
+ check(Zero::zero(), 0);
+ check(One::one(), 1);
+ check(u64::MIN.to_biguint().unwrap(), u64::MIN);
+ check(u64::MAX.to_biguint().unwrap(), u64::MAX);
+
+ check(BigUint::new(vec![]), 0);
+ check(BigUint::new(vec![1]), 1);
+ check(BigUint::new(vec![N1]), (1 << 32) - 1);
+ check(BigUint::new(vec![0, 1]), 1 << 32);
+ check(BigUint::new(vec![N1, N1]), u64::MAX);
+
+ assert_eq!(BigUint::new(vec![0, 0, 1]).to_u64(), None);
+ assert_eq!(BigUint::new(vec![N1, N1, N1]).to_u64(), None);
+}
+
+#[test]
+fn test_convert_u128() {
+ fn check(b1: BigUint, u: u128) {
+ let b2: BigUint = FromPrimitive::from_u128(u).unwrap();
+ assert_eq!(b1, b2);
+ assert_eq!(b1.to_u128().unwrap(), u);
+ }
+
+ check(Zero::zero(), 0);
+ check(One::one(), 1);
+ check(u128::MIN.to_biguint().unwrap(), u128::MIN);
+ check(u128::MAX.to_biguint().unwrap(), u128::MAX);
+
+ check(BigUint::new(vec![]), 0);
+ check(BigUint::new(vec![1]), 1);
+ check(BigUint::new(vec![N1]), (1 << 32) - 1);
+ check(BigUint::new(vec![0, 1]), 1 << 32);
+ check(BigUint::new(vec![N1, N1, N1, N1]), u128::MAX);
+
+ assert_eq!(BigUint::new(vec![0, 0, 0, 0, 1]).to_u128(), None);
+ assert_eq!(BigUint::new(vec![N1, N1, N1, N1, N1]).to_u128(), None);
+}
+
+#[test]
+#[allow(clippy::float_cmp)]
+fn test_convert_f32() {
+ fn check(b1: &BigUint, f: f32) {
+ let b2 = BigUint::from_f32(f).unwrap();
+ assert_eq!(b1, &b2);
+ assert_eq!(b1.to_f32().unwrap(), f);
+ }
+
+ check(&BigUint::zero(), 0.0);
+ check(&BigUint::one(), 1.0);
+ check(&BigUint::from(u16::MAX), pow(2.0_f32, 16) - 1.0);
+ check(&BigUint::from(1u64 << 32), pow(2.0_f32, 32));
+ check(&BigUint::from_slice(&[0, 0, 1]), pow(2.0_f32, 64));
+ check(
+ &((BigUint::one() << 100) + (BigUint::one() << 123)),
+ pow(2.0_f32, 100) + pow(2.0_f32, 123),
+ );
+ check(&(BigUint::one() << 127), pow(2.0_f32, 127));
+ check(&(BigUint::from((1u64 << 24) - 1) << (128 - 24)), f32::MAX);
+
+ // keeping all 24 digits with the bits at different offsets to the BigDigits
+ let x: u32 = 0b00000000101111011111011011011101;
+ let mut f = x as f32;
+ let mut b = BigUint::from(x);
+ for _ in 0..64 {
+ check(&b, f);
+ f *= 2.0;
+ b <<= 1;
+ }
+
+ // this number when rounded to f64 then f32 isn't the same as when rounded straight to f32
+ let n: u64 = 0b0000000000111111111111111111111111011111111111111111111111111111;
+ assert!((n as f64) as f32 != n as f32);
+ assert_eq!(BigUint::from(n).to_f32(), Some(n as f32));
+
+ // test rounding up with the bits at different offsets to the BigDigits
+ let mut f = ((1u64 << 25) - 1) as f32;
+ let mut b = BigUint::from(1u64 << 25);
+ for _ in 0..64 {
+ assert_eq!(b.to_f32(), Some(f));
+ f *= 2.0;
+ b <<= 1;
+ }
+
+ // test correct ties-to-even rounding
+ let weird: i128 = (1i128 << 100) + (1i128 << (100 - f32::MANTISSA_DIGITS));
+ assert_ne!(weird as f32, (weird + 1) as f32);
+
+ assert_eq!(BigInt::from(weird).to_f32(), Some(weird as f32));
+ assert_eq!(BigInt::from(weird + 1).to_f32(), Some((weird + 1) as f32));
+
+ // rounding
+ assert_eq!(BigUint::from_f32(-1.0), None);
+ assert_eq!(BigUint::from_f32(-0.99999), Some(BigUint::zero()));
+ assert_eq!(BigUint::from_f32(-0.5), Some(BigUint::zero()));
+ assert_eq!(BigUint::from_f32(-0.0), Some(BigUint::zero()));
+ assert_eq!(
+ BigUint::from_f32(f32::MIN_POSITIVE / 2.0),
+ Some(BigUint::zero())
+ );
+ assert_eq!(BigUint::from_f32(f32::MIN_POSITIVE), Some(BigUint::zero()));
+ assert_eq!(BigUint::from_f32(0.5), Some(BigUint::zero()));
+ assert_eq!(BigUint::from_f32(0.99999), Some(BigUint::zero()));
+ assert_eq!(BigUint::from_f32(f32::consts::E), Some(BigUint::from(2u32)));
+ assert_eq!(
+ BigUint::from_f32(f32::consts::PI),
+ Some(BigUint::from(3u32))
+ );
+
+ // special float values
+ assert_eq!(BigUint::from_f32(f32::NAN), None);
+ assert_eq!(BigUint::from_f32(f32::INFINITY), None);
+ assert_eq!(BigUint::from_f32(f32::NEG_INFINITY), None);
+ assert_eq!(BigUint::from_f32(f32::MIN), None);
+
+ // largest BigUint that will round to a finite f32 value
+ let big_num = (BigUint::one() << 128u8) - 1u8 - (BigUint::one() << (128u8 - 25));
+ assert_eq!(big_num.to_f32(), Some(f32::MAX));
+ assert_eq!((big_num + 1u8).to_f32(), Some(f32::INFINITY));
+
+ assert_eq!(
+ ((BigUint::one() << 128u8) - 1u8).to_f32(),
+ Some(f32::INFINITY)
+ );
+ assert_eq!((BigUint::one() << 128u8).to_f32(), Some(f32::INFINITY));
+}
+
+#[test]
+#[allow(clippy::float_cmp)]
+fn test_convert_f64() {
+ fn check(b1: &BigUint, f: f64) {
+ let b2 = BigUint::from_f64(f).unwrap();
+ assert_eq!(b1, &b2);
+ assert_eq!(b1.to_f64().unwrap(), f);
+ }
+
+ check(&BigUint::zero(), 0.0);
+ check(&BigUint::one(), 1.0);
+ check(&BigUint::from(u32::MAX), pow(2.0_f64, 32) - 1.0);
+ check(&BigUint::from(1u64 << 32), pow(2.0_f64, 32));
+ check(&BigUint::from_slice(&[0, 0, 1]), pow(2.0_f64, 64));
+ check(
+ &((BigUint::one() << 100) + (BigUint::one() << 152)),
+ pow(2.0_f64, 100) + pow(2.0_f64, 152),
+ );
+ check(&(BigUint::one() << 1023), pow(2.0_f64, 1023));
+ check(&(BigUint::from((1u64 << 53) - 1) << (1024 - 53)), f64::MAX);
+
+ // keeping all 53 digits with the bits at different offsets to the BigDigits
+ let x: u64 = 0b0000000000011110111110110111111101110111101111011111011011011101;
+ let mut f = x as f64;
+ let mut b = BigUint::from(x);
+ for _ in 0..128 {
+ check(&b, f);
+ f *= 2.0;
+ b <<= 1;
+ }
+
+ // test rounding up with the bits at different offsets to the BigDigits
+ let mut f = ((1u64 << 54) - 1) as f64;
+ let mut b = BigUint::from(1u64 << 54);
+ for _ in 0..128 {
+ assert_eq!(b.to_f64(), Some(f));
+ f *= 2.0;
+ b <<= 1;
+ }
+
+ // test correct ties-to-even rounding
+ let weird: i128 = (1i128 << 100) + (1i128 << (100 - f64::MANTISSA_DIGITS));
+ assert_ne!(weird as f64, (weird + 1) as f64);
+
+ assert_eq!(BigInt::from(weird).to_f64(), Some(weird as f64));
+ assert_eq!(BigInt::from(weird + 1).to_f64(), Some((weird + 1) as f64));
+
+ // rounding
+ assert_eq!(BigUint::from_f64(-1.0), None);
+ assert_eq!(BigUint::from_f64(-0.99999), Some(BigUint::zero()));
+ assert_eq!(BigUint::from_f64(-0.5), Some(BigUint::zero()));
+ assert_eq!(BigUint::from_f64(-0.0), Some(BigUint::zero()));
+ assert_eq!(
+ BigUint::from_f64(f64::MIN_POSITIVE / 2.0),
+ Some(BigUint::zero())
+ );
+ assert_eq!(BigUint::from_f64(f64::MIN_POSITIVE), Some(BigUint::zero()));
+ assert_eq!(BigUint::from_f64(0.5), Some(BigUint::zero()));
+ assert_eq!(BigUint::from_f64(0.99999), Some(BigUint::zero()));
+ assert_eq!(BigUint::from_f64(f64::consts::E), Some(BigUint::from(2u32)));
+ assert_eq!(
+ BigUint::from_f64(f64::consts::PI),
+ Some(BigUint::from(3u32))
+ );
+
+ // special float values
+ assert_eq!(BigUint::from_f64(f64::NAN), None);
+ assert_eq!(BigUint::from_f64(f64::INFINITY), None);
+ assert_eq!(BigUint::from_f64(f64::NEG_INFINITY), None);
+ assert_eq!(BigUint::from_f64(f64::MIN), None);
+
+ // largest BigUint that will round to a finite f64 value
+ let big_num = (BigUint::one() << 1024u16) - 1u8 - (BigUint::one() << (1024u16 - 54));
+ assert_eq!(big_num.to_f64(), Some(f64::MAX));
+ assert_eq!((big_num + 1u8).to_f64(), Some(f64::INFINITY));
+
+ assert_eq!(
+ ((BigUint::one() << 1024u16) - 1u8).to_f64(),
+ Some(f64::INFINITY)
+ );
+ assert_eq!((BigUint::one() << 1024u16).to_f64(), Some(f64::INFINITY));
+}
+
+#[test]
+fn test_convert_to_bigint() {
+ fn check(n: BigUint, ans: BigInt) {
+ assert_eq!(n.to_bigint().unwrap(), ans);
+ assert_eq!(n.to_bigint().unwrap().to_biguint().unwrap(), n);
+ }
+ check(Zero::zero(), Zero::zero());
+ check(
+ BigUint::new(vec![1, 2, 3]),
+ BigInt::from_biguint(Plus, BigUint::new(vec![1, 2, 3])),
+ );
+}
+
+#[test]
+fn test_convert_from_uint() {
+ macro_rules! check {
+ ($ty:ident, $max:expr) => {
+ assert_eq!(BigUint::from($ty::zero()), BigUint::zero());
+ assert_eq!(BigUint::from($ty::one()), BigUint::one());
+ assert_eq!(BigUint::from($ty::MAX - $ty::one()), $max - BigUint::one());
+ assert_eq!(BigUint::from($ty::MAX), $max);
+ };
+ }
+
+ check!(u8, BigUint::from_slice(&[u8::MAX as u32]));
+ check!(u16, BigUint::from_slice(&[u16::MAX as u32]));
+ check!(u32, BigUint::from_slice(&[u32::MAX]));
+ check!(u64, BigUint::from_slice(&[u32::MAX, u32::MAX]));
+ check!(
+ u128,
+ BigUint::from_slice(&[u32::MAX, u32::MAX, u32::MAX, u32::MAX])
+ );
+ check!(usize, BigUint::from(usize::MAX as u64));
+}
+
+#[test]
+fn test_add() {
+ for elm in SUM_TRIPLES.iter() {
+ let (a_vec, b_vec, c_vec) = *elm;
+ let a = BigUint::from_slice(a_vec);
+ let b = BigUint::from_slice(b_vec);
+ let c = BigUint::from_slice(c_vec);
+
+ assert_op!(a + b == c);
+ assert_op!(b + a == c);
+ assert_assign_op!(a += b == c);
+ assert_assign_op!(b += a == c);
+ }
+}
+
+#[test]
+fn test_sub() {
+ for elm in SUM_TRIPLES.iter() {
+ let (a_vec, b_vec, c_vec) = *elm;
+ let a = BigUint::from_slice(a_vec);
+ let b = BigUint::from_slice(b_vec);
+ let c = BigUint::from_slice(c_vec);
+
+ assert_op!(c - a == b);
+ assert_op!(c - b == a);
+ assert_assign_op!(c -= a == b);
+ assert_assign_op!(c -= b == a);
+ }
+}
+
+#[test]
+#[should_panic]
+fn test_sub_fail_on_underflow() {
+ let (a, b): (BigUint, BigUint) = (Zero::zero(), One::one());
+ let _ = a - b;
+}
+
+#[test]
+fn test_mul() {
+ for elm in MUL_TRIPLES.iter() {
+ let (a_vec, b_vec, c_vec) = *elm;
+ let a = BigUint::from_slice(a_vec);
+ let b = BigUint::from_slice(b_vec);
+ let c = BigUint::from_slice(c_vec);
+
+ assert_op!(a * b == c);
+ assert_op!(b * a == c);
+ assert_assign_op!(a *= b == c);
+ assert_assign_op!(b *= a == c);
+ }
+
+ for elm in DIV_REM_QUADRUPLES.iter() {
+ let (a_vec, b_vec, c_vec, d_vec) = *elm;
+ let a = BigUint::from_slice(a_vec);
+ let b = BigUint::from_slice(b_vec);
+ let c = BigUint::from_slice(c_vec);
+ let d = BigUint::from_slice(d_vec);
+
+ assert!(a == &b * &c + &d);
+ assert!(a == &c * &b + &d);
+ }
+}
+
+#[test]
+fn test_div_rem() {
+ for elm in MUL_TRIPLES.iter() {
+ let (a_vec, b_vec, c_vec) = *elm;
+ let a = BigUint::from_slice(a_vec);
+ let b = BigUint::from_slice(b_vec);
+ let c = BigUint::from_slice(c_vec);
+
+ if !a.is_zero() {
+ assert_op!(c / a == b);
+ assert_op!(c % a == BigUint::zero());
+ assert_assign_op!(c /= a == b);
+ assert_assign_op!(c %= a == BigUint::zero());
+ assert_eq!(c.div_rem(&a), (b.clone(), BigUint::zero()));
+ }
+ if !b.is_zero() {
+ assert_op!(c / b == a);
+ assert_op!(c % b == BigUint::zero());
+ assert_assign_op!(c /= b == a);
+ assert_assign_op!(c %= b == BigUint::zero());
+ assert_eq!(c.div_rem(&b), (a.clone(), BigUint::zero()));
+ }
+ }
+
+ for elm in DIV_REM_QUADRUPLES.iter() {
+ let (a_vec, b_vec, c_vec, d_vec) = *elm;
+ let a = BigUint::from_slice(a_vec);
+ let b = BigUint::from_slice(b_vec);
+ let c = BigUint::from_slice(c_vec);
+ let d = BigUint::from_slice(d_vec);
+
+ if !b.is_zero() {
+ assert_op!(a / b == c);
+ assert_op!(a % b == d);
+ assert_assign_op!(a /= b == c);
+ assert_assign_op!(a %= b == d);
+ assert!(a.div_rem(&b) == (c, d));
+ }
+ }
+}
+
+#[test]
+fn test_div_rem_big_multiple() {
+ let a = BigUint::from(3u32).pow(100u32);
+ let a2 = &a * &a;
+
+ let (div, rem) = a2.div_rem(&a);
+ assert_eq!(div, a);
+ assert!(rem.is_zero());
+
+ let (div, rem) = (&a2 - 1u32).div_rem(&a);
+ assert_eq!(div, &a - 1u32);
+ assert_eq!(rem, &a - 1u32);
+}
+
+#[test]
+fn test_div_ceil() {
+ fn check(a: &BigUint, b: &BigUint, d: &BigUint, m: &BigUint) {
+ if m.is_zero() {
+ assert_eq!(a.div_ceil(b), *d);
+ } else {
+ assert_eq!(a.div_ceil(b), d + 1u32);
+ }
+ }
+
+ for elm in MUL_TRIPLES.iter() {
+ let (a_vec, b_vec, c_vec) = *elm;
+ let a = BigUint::from_slice(a_vec);
+ let b = BigUint::from_slice(b_vec);
+ let c = BigUint::from_slice(c_vec);
+
+ if !a.is_zero() {
+ check(&c, &a, &b, &Zero::zero());
+ }
+ if !b.is_zero() {
+ check(&c, &b, &a, &Zero::zero());
+ }
+ }
+
+ for elm in DIV_REM_QUADRUPLES.iter() {
+ let (a_vec, b_vec, c_vec, d_vec) = *elm;
+ let a = BigUint::from_slice(a_vec);
+ let b = BigUint::from_slice(b_vec);
+ let c = BigUint::from_slice(c_vec);
+ let d = BigUint::from_slice(d_vec);
+
+ if !b.is_zero() {
+ check(&a, &b, &c, &d);
+ }
+ }
+}
+
+#[test]
+fn test_div_rem_euclid() {
+ fn check(a: &BigUint, b: &BigUint, d: &BigUint, m: &BigUint) {
+ assert_eq!(a.div_euclid(b), *d);
+ assert_eq!(a.rem_euclid(b), *m);
+ }
+
+ for elm in MUL_TRIPLES.iter() {
+ let (a_vec, b_vec, c_vec) = *elm;
+ let a = BigUint::from_slice(a_vec);
+ let b = BigUint::from_slice(b_vec);
+ let c = BigUint::from_slice(c_vec);
+
+ if !a.is_zero() {
+ check(&c, &a, &b, &Zero::zero());
+ }
+ if !b.is_zero() {
+ check(&c, &b, &a, &Zero::zero());
+ }
+ }
+
+ for elm in DIV_REM_QUADRUPLES.iter() {
+ let (a_vec, b_vec, c_vec, d_vec) = *elm;
+ let a = BigUint::from_slice(a_vec);
+ let b = BigUint::from_slice(b_vec);
+ let c = BigUint::from_slice(c_vec);
+ let d = BigUint::from_slice(d_vec);
+
+ if !b.is_zero() {
+ check(&a, &b, &c, &d);
+ }
+ }
+}
+
+#[test]
+fn test_checked_add() {
+ for elm in SUM_TRIPLES.iter() {
+ let (a_vec, b_vec, c_vec) = *elm;
+ let a = BigUint::from_slice(a_vec);
+ let b = BigUint::from_slice(b_vec);
+ let c = BigUint::from_slice(c_vec);
+
+ assert!(a.checked_add(&b).unwrap() == c);
+ assert!(b.checked_add(&a).unwrap() == c);
+ }
+}
+
+#[test]
+fn test_checked_sub() {
+ for elm in SUM_TRIPLES.iter() {
+ let (a_vec, b_vec, c_vec) = *elm;
+ let a = BigUint::from_slice(a_vec);
+ let b = BigUint::from_slice(b_vec);
+ let c = BigUint::from_slice(c_vec);
+
+ assert!(c.checked_sub(&a).unwrap() == b);
+ assert!(c.checked_sub(&b).unwrap() == a);
+
+ if a > c {
+ assert!(a.checked_sub(&c).is_none());
+ }
+ if b > c {
+ assert!(b.checked_sub(&c).is_none());
+ }
+ }
+}
+
+#[test]
+fn test_checked_mul() {
+ for elm in MUL_TRIPLES.iter() {
+ let (a_vec, b_vec, c_vec) = *elm;
+ let a = BigUint::from_slice(a_vec);
+ let b = BigUint::from_slice(b_vec);
+ let c = BigUint::from_slice(c_vec);
+
+ assert!(a.checked_mul(&b).unwrap() == c);
+ assert!(b.checked_mul(&a).unwrap() == c);
+ }
+
+ for elm in DIV_REM_QUADRUPLES.iter() {
+ let (a_vec, b_vec, c_vec, d_vec) = *elm;
+ let a = BigUint::from_slice(a_vec);
+ let b = BigUint::from_slice(b_vec);
+ let c = BigUint::from_slice(c_vec);
+ let d = BigUint::from_slice(d_vec);
+
+ assert!(a == b.checked_mul(&c).unwrap() + &d);
+ assert!(a == c.checked_mul(&b).unwrap() + &d);
+ }
+}
+
+#[test]
+fn test_mul_overflow() {
+ // Test for issue #187 - overflow due to mac3 incorrectly sizing temporary
+ let s = "5311379928167670986895882065524686273295931177270319231994441382\
+ 0040355986085224273916250223263671004753755210595137000079652876\
+ 0829212940754539968588340162273730474622005920097370111";
+ let a: BigUint = s.parse().unwrap();
+ let b = a.clone();
+ let _ = a.checked_mul(&b);
+}
+
+#[test]
+fn test_mul_overflow_2() {
+ // Try a bunch of sizes that are right on the edge of multiplication length
+ // overflow, where (x * x).data.len() == 2 * x.data.len() + 1.
+ for i in 1u8..20 {
+ let bits = 1u32 << i;
+ let x = (BigUint::one() << bits) - 1u32;
+ let x2 = (BigUint::one() << (2 * bits)) - &x - &x - 1u32;
+ assert_eq!(&x * &x, x2);
+ }
+}
+
+#[test]
+fn test_checked_div() {
+ for elm in MUL_TRIPLES.iter() {
+ let (a_vec, b_vec, c_vec) = *elm;
+ let a = BigUint::from_slice(a_vec);
+ let b = BigUint::from_slice(b_vec);
+ let c = BigUint::from_slice(c_vec);
+
+ if !a.is_zero() {
+ assert!(c.checked_div(&a).unwrap() == b);
+ }
+ if !b.is_zero() {
+ assert!(c.checked_div(&b).unwrap() == a);
+ }
+
+ assert!(c.checked_div(&Zero::zero()).is_none());
+ }
+}
+
+#[test]
+fn test_gcd() {
+ fn check(a: usize, b: usize, c: usize) {
+ let big_a: BigUint = FromPrimitive::from_usize(a).unwrap();
+ let big_b: BigUint = FromPrimitive::from_usize(b).unwrap();
+ let big_c: BigUint = FromPrimitive::from_usize(c).unwrap();
+
+ assert_eq!(big_a.gcd(&big_b), big_c);
+ assert_eq!(big_a.gcd_lcm(&big_b).0, big_c);
+ }
+
+ check(10, 2, 2);
+ check(10, 3, 1);
+ check(0, 3, 3);
+ check(3, 3, 3);
+ check(56, 42, 14);
+}
+
+#[test]
+fn test_lcm() {
+ fn check(a: usize, b: usize, c: usize) {
+ let big_a: BigUint = FromPrimitive::from_usize(a).unwrap();
+ let big_b: BigUint = FromPrimitive::from_usize(b).unwrap();
+ let big_c: BigUint = FromPrimitive::from_usize(c).unwrap();
+
+ assert_eq!(big_a.lcm(&big_b), big_c);
+ assert_eq!(big_a.gcd_lcm(&big_b).1, big_c);
+ }
+
+ check(0, 0, 0);
+ check(1, 0, 0);
+ check(0, 1, 0);
+ check(1, 1, 1);
+ check(8, 9, 72);
+ check(11, 5, 55);
+ check(99, 17, 1683);
+}
+
+#[test]
+fn test_is_multiple_of() {
+ assert!(BigUint::from(0u32).is_multiple_of(&BigUint::from(0u32)));
+ assert!(BigUint::from(6u32).is_multiple_of(&BigUint::from(6u32)));
+ assert!(BigUint::from(6u32).is_multiple_of(&BigUint::from(3u32)));
+ assert!(BigUint::from(6u32).is_multiple_of(&BigUint::from(1u32)));
+
+ assert!(!BigUint::from(42u32).is_multiple_of(&BigUint::from(5u32)));
+ assert!(!BigUint::from(5u32).is_multiple_of(&BigUint::from(3u32)));
+ assert!(!BigUint::from(42u32).is_multiple_of(&BigUint::from(0u32)));
+}
+
+#[test]
+fn test_next_multiple_of() {
+ assert_eq!(
+ BigUint::from(16u32).next_multiple_of(&BigUint::from(8u32)),
+ BigUint::from(16u32)
+ );
+ assert_eq!(
+ BigUint::from(23u32).next_multiple_of(&BigUint::from(8u32)),
+ BigUint::from(24u32)
+ );
+}
+
+#[test]
+fn test_prev_multiple_of() {
+ assert_eq!(
+ BigUint::from(16u32).prev_multiple_of(&BigUint::from(8u32)),
+ BigUint::from(16u32)
+ );
+ assert_eq!(
+ BigUint::from(23u32).prev_multiple_of(&BigUint::from(8u32)),
+ BigUint::from(16u32)
+ );
+}
+
+#[test]
+fn test_is_even() {
+ let one: BigUint = FromStr::from_str("1").unwrap();
+ let two: BigUint = FromStr::from_str("2").unwrap();
+ let thousand: BigUint = FromStr::from_str("1000").unwrap();
+ let big: BigUint = FromStr::from_str("1000000000000000000000").unwrap();
+ let bigger: BigUint = FromStr::from_str("1000000000000000000001").unwrap();
+ assert!(one.is_odd());
+ assert!(two.is_even());
+ assert!(thousand.is_even());
+ assert!(big.is_even());
+ assert!(bigger.is_odd());
+ assert!((&one << 64u8).is_even());
+ assert!(((&one << 64u8) + one).is_odd());
+}
+
+fn to_str_pairs() -> Vec<(BigUint, Vec<(u32, String)>)> {
+ let bits = 32;
+ vec![
+ (
+ Zero::zero(),
+ vec![(2, "0".to_string()), (3, "0".to_string())],
+ ),
+ (
+ BigUint::from_slice(&[0xff]),
+ vec![
+ (2, "11111111".to_string()),
+ (3, "100110".to_string()),
+ (4, "3333".to_string()),
+ (5, "2010".to_string()),
+ (6, "1103".to_string()),
+ (7, "513".to_string()),
+ (8, "377".to_string()),
+ (9, "313".to_string()),
+ (10, "255".to_string()),
+ (11, "212".to_string()),
+ (12, "193".to_string()),
+ (13, "168".to_string()),
+ (14, "143".to_string()),
+ (15, "120".to_string()),
+ (16, "ff".to_string()),
+ ],
+ ),
+ (
+ BigUint::from_slice(&[0xfff]),
+ vec![
+ (2, "111111111111".to_string()),
+ (4, "333333".to_string()),
+ (16, "fff".to_string()),
+ ],
+ ),
+ (
+ BigUint::from_slice(&[1, 2]),
+ vec![
+ (
+ 2,
+ format!("10{}1", repeat("0").take(bits - 1).collect::<String>()),
+ ),
+ (
+ 4,
+ format!("2{}1", repeat("0").take(bits / 2 - 1).collect::<String>()),
+ ),
+ (
+ 10,
+ match bits {
+ 64 => "36893488147419103233".to_string(),
+ 32 => "8589934593".to_string(),
+ 16 => "131073".to_string(),
+ _ => panic!(),
+ },
+ ),
+ (
+ 16,
+ format!("2{}1", repeat("0").take(bits / 4 - 1).collect::<String>()),
+ ),
+ ],
+ ),
+ (
+ BigUint::from_slice(&[1, 2, 3]),
+ vec![
+ (
+ 2,
+ format!(
+ "11{}10{}1",
+ repeat("0").take(bits - 2).collect::<String>(),
+ repeat("0").take(bits - 1).collect::<String>()
+ ),
+ ),
+ (
+ 4,
+ format!(
+ "3{}2{}1",
+ repeat("0").take(bits / 2 - 1).collect::<String>(),
+ repeat("0").take(bits / 2 - 1).collect::<String>()
+ ),
+ ),
+ (
+ 8,
+ match bits {
+ 64 => "14000000000000000000004000000000000000000001".to_string(),
+ 32 => "6000000000100000000001".to_string(),
+ 16 => "140000400001".to_string(),
+ _ => panic!(),
+ },
+ ),
+ (
+ 10,
+ match bits {
+ 64 => "1020847100762815390427017310442723737601".to_string(),
+ 32 => "55340232229718589441".to_string(),
+ 16 => "12885032961".to_string(),
+ _ => panic!(),
+ },
+ ),
+ (
+ 16,
+ format!(
+ "3{}2{}1",
+ repeat("0").take(bits / 4 - 1).collect::<String>(),
+ repeat("0").take(bits / 4 - 1).collect::<String>()
+ ),
+ ),
+ ],
+ ),
+ ]
+}
+
+#[test]
+fn test_to_str_radix() {
+ let r = to_str_pairs();
+ for num_pair in r.iter() {
+ let &(ref n, ref rs) = num_pair;
+ for str_pair in rs.iter() {
+ let &(ref radix, ref str) = str_pair;
+ assert_eq!(n.to_str_radix(*radix), *str);
+ }
+ }
+}
+
+#[test]
+fn test_from_and_to_radix() {
+ const GROUND_TRUTH: &[(&[u8], u32, &[u8])] = &[
+ (b"0", 42, &[0]),
+ (
+ b"ffffeeffbb",
+ 2,
+ &[
+ 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ ],
+ ),
+ (
+ b"ffffeeffbb",
+ 3,
+ &[
+ 2, 2, 1, 1, 2, 1, 1, 2, 0, 0, 0, 0, 0, 1, 2, 0, 0, 0, 0, 1, 0, 0, 2, 2, 0, 1,
+ ],
+ ),
+ (
+ b"ffffeeffbb",
+ 4,
+ &[3, 2, 3, 2, 3, 3, 3, 3, 2, 3, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3],
+ ),
+ (
+ b"ffffeeffbb",
+ 5,
+ &[0, 4, 3, 3, 1, 4, 2, 4, 1, 4, 4, 2, 3, 0, 0, 1, 2, 1],
+ ),
+ (
+ b"ffffeeffbb",
+ 6,
+ &[5, 5, 4, 5, 5, 0, 0, 1, 2, 5, 3, 0, 1, 0, 2, 2],
+ ),
+ (
+ b"ffffeeffbb",
+ 7,
+ &[4, 2, 3, 6, 0, 1, 6, 1, 6, 2, 0, 3, 2, 4, 1],
+ ),
+ (
+ b"ffffeeffbb",
+ 8,
+ &[3, 7, 6, 7, 7, 5, 3, 7, 7, 7, 7, 7, 7, 1],
+ ),
+ (b"ffffeeffbb", 9, &[8, 4, 5, 7, 0, 0, 3, 2, 0, 3, 0, 8, 3]),
+ (b"ffffeeffbb", 10, &[5, 9, 5, 3, 1, 5, 0, 1, 5, 9, 9, 0, 1]),
+ (b"ffffeeffbb", 11, &[10, 7, 6, 5, 2, 0, 3, 3, 3, 4, 9, 3]),
+ (b"ffffeeffbb", 12, &[11, 8, 5, 10, 1, 10, 3, 1, 1, 9, 5, 1]),
+ (b"ffffeeffbb", 13, &[0, 5, 7, 4, 6, 5, 6, 11, 8, 12, 7]),
+ (b"ffffeeffbb", 14, &[11, 4, 4, 11, 8, 4, 6, 0, 3, 11, 3]),
+ (b"ffffeeffbb", 15, &[5, 11, 13, 2, 1, 10, 2, 0, 9, 13, 1]),
+ (b"ffffeeffbb", 16, &[11, 11, 15, 15, 14, 14, 15, 15, 15, 15]),
+ (b"ffffeeffbb", 17, &[0, 2, 14, 12, 2, 14, 8, 10, 4, 9]),
+ (b"ffffeeffbb", 18, &[17, 15, 5, 13, 10, 16, 16, 13, 9, 5]),
+ (b"ffffeeffbb", 19, &[14, 13, 2, 8, 9, 0, 1, 14, 7, 3]),
+ (b"ffffeeffbb", 20, &[15, 19, 3, 14, 0, 17, 19, 18, 2, 2]),
+ (b"ffffeeffbb", 21, &[11, 5, 4, 13, 5, 18, 9, 1, 8, 1]),
+ (b"ffffeeffbb", 22, &[21, 3, 7, 21, 15, 12, 17, 0, 20]),
+ (b"ffffeeffbb", 23, &[21, 21, 6, 9, 10, 7, 21, 0, 14]),
+ (b"ffffeeffbb", 24, &[11, 10, 19, 14, 22, 11, 17, 23, 9]),
+ (b"ffffeeffbb", 25, &[20, 18, 21, 22, 21, 14, 3, 5, 7]),
+ (b"ffffeeffbb", 26, &[13, 15, 24, 11, 17, 6, 23, 6, 5]),
+ (b"ffffeeffbb", 27, &[17, 16, 7, 0, 21, 0, 3, 24, 3]),
+ (b"ffffeeffbb", 28, &[11, 16, 11, 15, 14, 18, 13, 25, 2]),
+ (b"ffffeeffbb", 29, &[6, 8, 7, 19, 14, 13, 21, 5, 2]),
+ (b"ffffeeffbb", 30, &[5, 13, 18, 11, 10, 7, 8, 20, 1]),
+ (b"ffffeeffbb", 31, &[22, 26, 15, 19, 8, 27, 29, 8, 1]),
+ (b"ffffeeffbb", 32, &[27, 29, 31, 29, 30, 31, 31, 31]),
+ (b"ffffeeffbb", 33, &[32, 20, 27, 12, 1, 12, 26, 25]),
+ (b"ffffeeffbb", 34, &[17, 9, 16, 33, 13, 25, 31, 20]),
+ (b"ffffeeffbb", 35, &[25, 32, 2, 25, 11, 4, 3, 17]),
+ (b"ffffeeffbb", 36, &[35, 34, 5, 6, 32, 3, 1, 14]),
+ (b"ffffeeffbb", 37, &[16, 21, 18, 4, 33, 19, 21, 11]),
+ (b"ffffeeffbb", 38, &[33, 25, 19, 29, 20, 6, 23, 9]),
+ (b"ffffeeffbb", 39, &[26, 27, 29, 23, 16, 18, 0, 8]),
+ (b"ffffeeffbb", 40, &[35, 39, 30, 11, 16, 17, 28, 6]),
+ (b"ffffeeffbb", 41, &[36, 30, 9, 18, 12, 19, 26, 5]),
+ (b"ffffeeffbb", 42, &[11, 34, 37, 27, 1, 13, 32, 4]),
+ (b"ffffeeffbb", 43, &[3, 24, 11, 2, 10, 40, 1, 4]),
+ (b"ffffeeffbb", 44, &[43, 12, 40, 32, 3, 23, 19, 3]),
+ (b"ffffeeffbb", 45, &[35, 38, 44, 18, 22, 18, 42, 2]),
+ (b"ffffeeffbb", 46, &[21, 45, 18, 41, 17, 2, 24, 2]),
+ (b"ffffeeffbb", 47, &[37, 37, 11, 12, 6, 0, 8, 2]),
+ (b"ffffeeffbb", 48, &[11, 41, 40, 43, 5, 43, 41, 1]),
+ (b"ffffeeffbb", 49, &[18, 45, 7, 13, 20, 21, 30, 1]),
+ (b"ffffeeffbb", 50, &[45, 21, 5, 34, 21, 18, 20, 1]),
+ (b"ffffeeffbb", 51, &[17, 6, 26, 22, 38, 24, 11, 1]),
+ (b"ffffeeffbb", 52, &[39, 33, 38, 30, 46, 31, 3, 1]),
+ (b"ffffeeffbb", 53, &[31, 7, 44, 23, 9, 32, 49]),
+ (b"ffffeeffbb", 54, &[17, 35, 8, 37, 31, 18, 44]),
+ (b"ffffeeffbb", 55, &[10, 52, 9, 48, 36, 39, 39]),
+ (b"ffffeeffbb", 56, &[11, 50, 51, 22, 25, 36, 35]),
+ (b"ffffeeffbb", 57, &[14, 55, 12, 43, 20, 3, 32]),
+ (b"ffffeeffbb", 58, &[35, 18, 45, 56, 9, 51, 28]),
+ (b"ffffeeffbb", 59, &[51, 28, 20, 26, 55, 3, 26]),
+ (b"ffffeeffbb", 60, &[35, 6, 27, 46, 58, 33, 23]),
+ (b"ffffeeffbb", 61, &[58, 7, 6, 54, 49, 20, 21]),
+ (b"ffffeeffbb", 62, &[53, 59, 3, 14, 10, 22, 19]),
+ (b"ffffeeffbb", 63, &[53, 50, 23, 4, 56, 36, 17]),
+ (b"ffffeeffbb", 64, &[59, 62, 47, 59, 63, 63, 15]),
+ (b"ffffeeffbb", 65, &[0, 53, 39, 4, 40, 37, 14]),
+ (b"ffffeeffbb", 66, &[65, 59, 39, 1, 64, 19, 13]),
+ (b"ffffeeffbb", 67, &[35, 14, 19, 16, 25, 10, 12]),
+ (b"ffffeeffbb", 68, &[51, 38, 63, 50, 15, 8, 11]),
+ (b"ffffeeffbb", 69, &[44, 45, 18, 58, 68, 12, 10]),
+ (b"ffffeeffbb", 70, &[25, 51, 0, 60, 13, 24, 9]),
+ (b"ffffeeffbb", 71, &[54, 30, 9, 65, 28, 41, 8]),
+ (b"ffffeeffbb", 72, &[35, 35, 55, 54, 17, 64, 7]),
+ (b"ffffeeffbb", 73, &[34, 4, 48, 40, 27, 19, 7]),
+ (b"ffffeeffbb", 74, &[53, 47, 4, 56, 36, 51, 6]),
+ (b"ffffeeffbb", 75, &[20, 56, 10, 72, 24, 13, 6]),
+ (b"ffffeeffbb", 76, &[71, 31, 52, 60, 48, 53, 5]),
+ (b"ffffeeffbb", 77, &[32, 73, 14, 63, 15, 21, 5]),
+ (b"ffffeeffbb", 78, &[65, 13, 17, 32, 64, 68, 4]),
+ (b"ffffeeffbb", 79, &[37, 56, 2, 56, 25, 41, 4]),
+ (b"ffffeeffbb", 80, &[75, 59, 37, 41, 43, 15, 4]),
+ (b"ffffeeffbb", 81, &[44, 68, 0, 21, 27, 72, 3]),
+ (b"ffffeeffbb", 82, &[77, 35, 2, 74, 46, 50, 3]),
+ (b"ffffeeffbb", 83, &[52, 51, 19, 76, 10, 30, 3]),
+ (b"ffffeeffbb", 84, &[11, 80, 19, 19, 76, 10, 3]),
+ (b"ffffeeffbb", 85, &[0, 82, 20, 14, 68, 77, 2]),
+ (b"ffffeeffbb", 86, &[3, 12, 78, 37, 62, 61, 2]),
+ (b"ffffeeffbb", 87, &[35, 12, 20, 8, 52, 46, 2]),
+ (b"ffffeeffbb", 88, &[43, 6, 54, 42, 30, 32, 2]),
+ (b"ffffeeffbb", 89, &[49, 52, 85, 21, 80, 18, 2]),
+ (b"ffffeeffbb", 90, &[35, 64, 78, 24, 18, 6, 2]),
+ (b"ffffeeffbb", 91, &[39, 17, 83, 63, 17, 85, 1]),
+ (b"ffffeeffbb", 92, &[67, 22, 85, 79, 75, 74, 1]),
+ (b"ffffeeffbb", 93, &[53, 60, 39, 29, 4, 65, 1]),
+ (b"ffffeeffbb", 94, &[37, 89, 2, 72, 76, 55, 1]),
+ (b"ffffeeffbb", 95, &[90, 74, 89, 9, 9, 47, 1]),
+ (b"ffffeeffbb", 96, &[59, 20, 46, 35, 81, 38, 1]),
+ (b"ffffeeffbb", 97, &[94, 87, 60, 71, 3, 31, 1]),
+ (b"ffffeeffbb", 98, &[67, 22, 63, 50, 62, 23, 1]),
+ (b"ffffeeffbb", 99, &[98, 6, 69, 12, 61, 16, 1]),
+ (b"ffffeeffbb", 100, &[95, 35, 51, 10, 95, 9, 1]),
+ (b"ffffeeffbb", 101, &[87, 27, 7, 8, 62, 3, 1]),
+ (b"ffffeeffbb", 102, &[17, 3, 32, 79, 59, 99]),
+ (b"ffffeeffbb", 103, &[30, 22, 90, 0, 87, 94]),
+ (b"ffffeeffbb", 104, &[91, 68, 87, 68, 38, 90]),
+ (b"ffffeeffbb", 105, &[95, 80, 54, 73, 15, 86]),
+ (b"ffffeeffbb", 106, &[31, 30, 24, 16, 17, 82]),
+ (b"ffffeeffbb", 107, &[51, 50, 10, 12, 42, 78]),
+ (b"ffffeeffbb", 108, &[71, 71, 96, 78, 89, 74]),
+ (b"ffffeeffbb", 109, &[33, 18, 93, 22, 50, 71]),
+ (b"ffffeeffbb", 110, &[65, 53, 57, 88, 29, 68]),
+ (b"ffffeeffbb", 111, &[53, 93, 67, 90, 27, 65]),
+ (b"ffffeeffbb", 112, &[11, 109, 96, 65, 43, 62]),
+ (b"ffffeeffbb", 113, &[27, 23, 106, 56, 76, 59]),
+ (b"ffffeeffbb", 114, &[71, 84, 31, 112, 11, 57]),
+ (b"ffffeeffbb", 115, &[90, 22, 1, 56, 76, 54]),
+ (b"ffffeeffbb", 116, &[35, 38, 98, 57, 40, 52]),
+ (b"ffffeeffbb", 117, &[26, 113, 115, 62, 17, 50]),
+ (b"ffffeeffbb", 118, &[51, 14, 5, 18, 7, 48]),
+ (b"ffffeeffbb", 119, &[102, 31, 110, 108, 8, 46]),
+ (b"ffffeeffbb", 120, &[35, 93, 96, 50, 22, 44]),
+ (b"ffffeeffbb", 121, &[87, 61, 2, 36, 47, 42]),
+ (b"ffffeeffbb", 122, &[119, 64, 1, 22, 83, 40]),
+ (b"ffffeeffbb", 123, &[77, 119, 32, 90, 6, 39]),
+ (b"ffffeeffbb", 124, &[115, 122, 31, 79, 62, 37]),
+ (b"ffffeeffbb", 125, &[95, 108, 47, 74, 3, 36]),
+ (b"ffffeeffbb", 126, &[53, 25, 116, 39, 78, 34]),
+ (b"ffffeeffbb", 127, &[22, 23, 125, 67, 35, 33]),
+ (b"ffffeeffbb", 128, &[59, 127, 59, 127, 127, 31]),
+ (b"ffffeeffbb", 129, &[89, 36, 1, 59, 100, 30]),
+ (b"ffffeeffbb", 130, &[65, 91, 123, 89, 79, 29]),
+ (b"ffffeeffbb", 131, &[58, 72, 39, 63, 65, 28]),
+ (b"ffffeeffbb", 132, &[131, 62, 92, 82, 57, 27]),
+ (b"ffffeeffbb", 133, &[109, 31, 51, 123, 55, 26]),
+ (b"ffffeeffbb", 134, &[35, 74, 21, 27, 60, 25]),
+ (b"ffffeeffbb", 135, &[125, 132, 49, 37, 70, 24]),
+ (b"ffffeeffbb", 136, &[51, 121, 117, 133, 85, 23]),
+ (b"ffffeeffbb", 137, &[113, 60, 135, 22, 107, 22]),
+ (b"ffffeeffbb", 138, &[113, 91, 73, 93, 133, 21]),
+ (b"ffffeeffbb", 139, &[114, 75, 102, 51, 26, 21]),
+ (b"ffffeeffbb", 140, &[95, 25, 35, 16, 62, 20]),
+ (b"ffffeeffbb", 141, &[131, 137, 16, 110, 102, 19]),
+ (b"ffffeeffbb", 142, &[125, 121, 108, 34, 6, 19]),
+ (b"ffffeeffbb", 143, &[65, 78, 138, 55, 55, 18]),
+ (b"ffffeeffbb", 144, &[107, 125, 121, 15, 109, 17]),
+ (b"ffffeeffbb", 145, &[35, 13, 122, 42, 22, 17]),
+ (b"ffffeeffbb", 146, &[107, 38, 103, 123, 83, 16]),
+ (b"ffffeeffbb", 147, &[116, 96, 71, 98, 2, 16]),
+ (b"ffffeeffbb", 148, &[127, 23, 75, 99, 71, 15]),
+ (b"ffffeeffbb", 149, &[136, 110, 53, 114, 144, 14]),
+ (b"ffffeeffbb", 150, &[95, 140, 133, 130, 71, 14]),
+ (b"ffffeeffbb", 151, &[15, 50, 29, 137, 0, 14]),
+ (b"ffffeeffbb", 152, &[147, 15, 89, 121, 83, 13]),
+ (b"ffffeeffbb", 153, &[17, 87, 93, 72, 17, 13]),
+ (b"ffffeeffbb", 154, &[109, 113, 3, 133, 106, 12]),
+ (b"ffffeeffbb", 155, &[115, 141, 120, 139, 44, 12]),
+ (b"ffffeeffbb", 156, &[143, 45, 4, 82, 140, 11]),
+ (b"ffffeeffbb", 157, &[149, 92, 15, 106, 82, 11]),
+ (b"ffffeeffbb", 158, &[37, 107, 79, 46, 26, 11]),
+ (b"ffffeeffbb", 159, &[137, 37, 146, 51, 130, 10]),
+ (b"ffffeeffbb", 160, &[155, 69, 29, 115, 77, 10]),
+ (b"ffffeeffbb", 161, &[67, 98, 46, 68, 26, 10]),
+ (b"ffffeeffbb", 162, &[125, 155, 60, 63, 138, 9]),
+ (b"ffffeeffbb", 163, &[96, 43, 118, 93, 90, 9]),
+ (b"ffffeeffbb", 164, &[159, 99, 123, 152, 43, 9]),
+ (b"ffffeeffbb", 165, &[65, 17, 1, 69, 163, 8]),
+ (b"ffffeeffbb", 166, &[135, 108, 25, 165, 119, 8]),
+ (b"ffffeeffbb", 167, &[165, 116, 164, 103, 77, 8]),
+ (b"ffffeeffbb", 168, &[11, 166, 67, 44, 36, 8]),
+ (b"ffffeeffbb", 169, &[65, 59, 71, 149, 164, 7]),
+ (b"ffffeeffbb", 170, &[85, 83, 26, 76, 126, 7]),
+ (b"ffffeeffbb", 171, &[71, 132, 140, 157, 88, 7]),
+ (b"ffffeeffbb", 172, &[3, 6, 127, 47, 52, 7]),
+ (b"ffffeeffbb", 173, &[122, 66, 53, 83, 16, 7]),
+ (b"ffffeeffbb", 174, &[35, 6, 5, 88, 155, 6]),
+ (b"ffffeeffbb", 175, &[95, 20, 84, 56, 122, 6]),
+ (b"ffffeeffbb", 176, &[43, 91, 57, 159, 89, 6]),
+ (b"ffffeeffbb", 177, &[110, 127, 54, 40, 58, 6]),
+ (b"ffffeeffbb", 178, &[49, 115, 43, 47, 27, 6]),
+ (b"ffffeeffbb", 179, &[130, 91, 4, 178, 175, 5]),
+ (b"ffffeeffbb", 180, &[35, 122, 109, 70, 147, 5]),
+ (b"ffffeeffbb", 181, &[94, 94, 4, 79, 119, 5]),
+ (b"ffffeeffbb", 182, &[39, 54, 66, 19, 92, 5]),
+ (b"ffffeeffbb", 183, &[119, 2, 143, 69, 65, 5]),
+ (b"ffffeeffbb", 184, &[67, 57, 90, 44, 39, 5]),
+ (b"ffffeeffbb", 185, &[90, 63, 141, 123, 13, 5]),
+ (b"ffffeeffbb", 186, &[53, 123, 172, 119, 174, 4]),
+ (b"ffffeeffbb", 187, &[153, 21, 68, 28, 151, 4]),
+ (b"ffffeeffbb", 188, &[131, 138, 94, 32, 128, 4]),
+ (b"ffffeeffbb", 189, &[179, 121, 156, 130, 105, 4]),
+ (b"ffffeeffbb", 190, &[185, 179, 164, 131, 83, 4]),
+ (b"ffffeeffbb", 191, &[118, 123, 37, 31, 62, 4]),
+ (b"ffffeeffbb", 192, &[59, 106, 83, 16, 41, 4]),
+ (b"ffffeeffbb", 193, &[57, 37, 47, 86, 20, 4]),
+ (b"ffffeeffbb", 194, &[191, 140, 63, 45, 0, 4]),
+ (b"ffffeeffbb", 195, &[65, 169, 83, 84, 175, 3]),
+ (b"ffffeeffbb", 196, &[67, 158, 64, 6, 157, 3]),
+ (b"ffffeeffbb", 197, &[121, 26, 167, 3, 139, 3]),
+ (b"ffffeeffbb", 198, &[197, 151, 165, 75, 121, 3]),
+ (b"ffffeeffbb", 199, &[55, 175, 36, 22, 104, 3]),
+ (b"ffffeeffbb", 200, &[195, 167, 162, 38, 87, 3]),
+ (b"ffffeeffbb", 201, &[35, 27, 136, 124, 70, 3]),
+ (b"ffffeeffbb", 202, &[87, 64, 153, 76, 54, 3]),
+ (b"ffffeeffbb", 203, &[151, 191, 14, 94, 38, 3]),
+ (b"ffffeeffbb", 204, &[119, 103, 135, 175, 22, 3]),
+ (b"ffffeeffbb", 205, &[200, 79, 123, 115, 7, 3]),
+ (b"ffffeeffbb", 206, &[133, 165, 202, 115, 198, 2]),
+ (b"ffffeeffbb", 207, &[44, 153, 193, 175, 184, 2]),
+ (b"ffffeeffbb", 208, &[91, 190, 125, 86, 171, 2]),
+ (b"ffffeeffbb", 209, &[109, 151, 34, 53, 158, 2]),
+ (b"ffffeeffbb", 210, &[95, 40, 171, 74, 145, 2]),
+ (b"ffffeeffbb", 211, &[84, 195, 162, 150, 132, 2]),
+ (b"ffffeeffbb", 212, &[31, 15, 59, 68, 120, 2]),
+ (b"ffffeeffbb", 213, &[125, 57, 127, 36, 108, 2]),
+ (b"ffffeeffbb", 214, &[51, 132, 2, 55, 96, 2]),
+ (b"ffffeeffbb", 215, &[175, 133, 177, 122, 84, 2]),
+ (b"ffffeeffbb", 216, &[179, 35, 78, 23, 73, 2]),
+ (b"ffffeeffbb", 217, &[53, 101, 208, 186, 61, 2]),
+ (b"ffffeeffbb", 218, &[33, 9, 214, 179, 50, 2]),
+ (b"ffffeeffbb", 219, &[107, 147, 175, 217, 39, 2]),
+ (b"ffffeeffbb", 220, &[175, 81, 179, 79, 29, 2]),
+ (b"ffffeeffbb", 221, &[0, 76, 95, 204, 18, 2]),
+ (b"ffffeeffbb", 222, &[53, 213, 16, 150, 8, 2]),
+ (b"ffffeeffbb", 223, &[158, 161, 42, 136, 221, 1]),
+ (b"ffffeeffbb", 224, &[123, 54, 52, 162, 212, 1]),
+ (b"ffffeeffbb", 225, &[170, 43, 151, 2, 204, 1]),
+ (b"ffffeeffbb", 226, &[27, 68, 224, 105, 195, 1]),
+ (b"ffffeeffbb", 227, &[45, 69, 157, 20, 187, 1]),
+ (b"ffffeeffbb", 228, &[71, 213, 64, 199, 178, 1]),
+ (b"ffffeeffbb", 229, &[129, 203, 66, 186, 170, 1]),
+ (b"ffffeeffbb", 230, &[205, 183, 57, 208, 162, 1]),
+ (b"ffffeeffbb", 231, &[32, 50, 164, 33, 155, 1]),
+ (b"ffffeeffbb", 232, &[35, 135, 53, 123, 147, 1]),
+ (b"ffffeeffbb", 233, &[209, 47, 89, 13, 140, 1]),
+ (b"ffffeeffbb", 234, &[143, 56, 175, 168, 132, 1]),
+ (b"ffffeeffbb", 235, &[225, 157, 216, 121, 125, 1]),
+ (b"ffffeeffbb", 236, &[51, 66, 119, 105, 118, 1]),
+ (b"ffffeeffbb", 237, &[116, 150, 26, 119, 111, 1]),
+ (b"ffffeeffbb", 238, &[221, 15, 87, 162, 104, 1]),
+ (b"ffffeeffbb", 239, &[234, 155, 214, 234, 97, 1]),
+ (b"ffffeeffbb", 240, &[155, 46, 84, 96, 91, 1]),
+ (b"ffffeeffbb", 241, &[187, 48, 90, 225, 84, 1]),
+ (b"ffffeeffbb", 242, &[87, 212, 151, 140, 78, 1]),
+ (b"ffffeeffbb", 243, &[206, 22, 189, 81, 72, 1]),
+ (b"ffffeeffbb", 244, &[119, 93, 122, 48, 66, 1]),
+ (b"ffffeeffbb", 245, &[165, 224, 117, 40, 60, 1]),
+ (b"ffffeeffbb", 246, &[77, 121, 100, 57, 54, 1]),
+ (b"ffffeeffbb", 247, &[52, 128, 242, 98, 48, 1]),
+ (b"ffffeeffbb", 248, &[115, 247, 224, 164, 42, 1]),
+ (b"ffffeeffbb", 249, &[218, 127, 223, 5, 37, 1]),
+ (b"ffffeeffbb", 250, &[95, 54, 168, 118, 31, 1]),
+ (b"ffffeeffbb", 251, &[121, 204, 240, 3, 26, 1]),
+ (b"ffffeeffbb", 252, &[179, 138, 123, 162, 20, 1]),
+ (b"ffffeeffbb", 253, &[21, 50, 1, 91, 15, 1]),
+ (b"ffffeeffbb", 254, &[149, 11, 63, 40, 10, 1]),
+ (b"ffffeeffbb", 255, &[170, 225, 247, 9, 5, 1]),
+ (b"ffffeeffbb", 256, &[187, 255, 238, 255, 255]),
+ ];
+
+ for &(bigint, radix, inbaseradix_le) in GROUND_TRUTH.iter() {
+ let bigint = BigUint::parse_bytes(bigint, 16).unwrap();
+ // to_radix_le
+ assert_eq!(bigint.to_radix_le(radix), inbaseradix_le);
+ // to_radix_be
+ let mut inbase_be = bigint.to_radix_be(radix);
+ inbase_be.reverse(); // now le
+ assert_eq!(inbase_be, inbaseradix_le);
+ // from_radix_le
+ assert_eq!(
+ BigUint::from_radix_le(inbaseradix_le, radix).unwrap(),
+ bigint
+ );
+ // from_radix_be
+ let mut inbaseradix_be = Vec::from(inbaseradix_le);
+ inbaseradix_be.reverse();
+ assert_eq!(
+ BigUint::from_radix_be(&inbaseradix_be, radix).unwrap(),
+ bigint
+ );
+ }
+
+ assert!(BigUint::from_radix_le(&[10, 100, 10], 50).is_none());
+ assert_eq!(BigUint::from_radix_le(&[], 2), Some(BigUint::zero()));
+ assert_eq!(BigUint::from_radix_be(&[], 2), Some(BigUint::zero()));
+}
+
+#[test]
+fn test_from_str_radix() {
+ let r = to_str_pairs();
+ for num_pair in r.iter() {
+ let &(ref n, ref rs) = num_pair;
+ for str_pair in rs.iter() {
+ let &(ref radix, ref str) = str_pair;
+ assert_eq!(n, &BigUint::from_str_radix(str, *radix).unwrap());
+ }
+ }
+
+ let zed = BigUint::from_str_radix("Z", 10).ok();
+ assert_eq!(zed, None);
+ let blank = BigUint::from_str_radix("_", 2).ok();
+ assert_eq!(blank, None);
+ let blank_one = BigUint::from_str_radix("_1", 2).ok();
+ assert_eq!(blank_one, None);
+ let plus_one = BigUint::from_str_radix("+1", 10).ok();
+ assert_eq!(plus_one, Some(BigUint::from_slice(&[1])));
+ let plus_plus_one = BigUint::from_str_radix("++1", 10).ok();
+ assert_eq!(plus_plus_one, None);
+ let minus_one = BigUint::from_str_radix("-1", 10).ok();
+ assert_eq!(minus_one, None);
+ let zero_plus_two = BigUint::from_str_radix("0+2", 10).ok();
+ assert_eq!(zero_plus_two, None);
+ let three = BigUint::from_str_radix("1_1", 2).ok();
+ assert_eq!(three, Some(BigUint::from_slice(&[3])));
+ let ff = BigUint::from_str_radix("1111_1111", 2).ok();
+ assert_eq!(ff, Some(BigUint::from_slice(&[0xff])));
+}
+
+#[test]
+fn test_all_str_radix() {
+ let n = BigUint::new((0..10).collect());
+ for radix in 2..37 {
+ let s = n.to_str_radix(radix);
+ let x = BigUint::from_str_radix(&s, radix);
+ assert_eq!(x.unwrap(), n);
+
+ let s = s.to_ascii_uppercase();
+ let x = BigUint::from_str_radix(&s, radix);
+ assert_eq!(x.unwrap(), n);
+ }
+}
+
+#[test]
+fn test_big_str() {
+ for n in 2..=20_u32 {
+ let x: BigUint = BigUint::from(n).pow(10_000_u32);
+ let s = x.to_string();
+ let y: BigUint = s.parse().unwrap();
+ assert_eq!(x, y);
+ }
+}
+
+#[test]
+fn test_lower_hex() {
+ let a = BigUint::parse_bytes(b"A", 16).unwrap();
+ let hello = BigUint::parse_bytes(b"22405534230753963835153736737", 10).unwrap();
+
+ assert_eq!(format!("{:x}", a), "a");
+ assert_eq!(format!("{:x}", hello), "48656c6c6f20776f726c6421");
+ assert_eq!(format!("{:♥>+#8x}", a), "♥♥♥♥+0xa");
+}
+
+#[test]
+fn test_upper_hex() {
+ let a = BigUint::parse_bytes(b"A", 16).unwrap();
+ let hello = BigUint::parse_bytes(b"22405534230753963835153736737", 10).unwrap();
+
+ assert_eq!(format!("{:X}", a), "A");
+ assert_eq!(format!("{:X}", hello), "48656C6C6F20776F726C6421");
+ assert_eq!(format!("{:♥>+#8X}", a), "♥♥♥♥+0xA");
+}
+
+#[test]
+fn test_binary() {
+ let a = BigUint::parse_bytes(b"A", 16).unwrap();
+ let hello = BigUint::parse_bytes(b"224055342307539", 10).unwrap();
+
+ assert_eq!(format!("{:b}", a), "1010");
+ assert_eq!(
+ format!("{:b}", hello),
+ "110010111100011011110011000101101001100011010011"
+ );
+ assert_eq!(format!("{:♥>+#8b}", a), "♥+0b1010");
+}
+
+#[test]
+fn test_octal() {
+ let a = BigUint::parse_bytes(b"A", 16).unwrap();
+ let hello = BigUint::parse_bytes(b"22405534230753963835153736737", 10).unwrap();
+
+ assert_eq!(format!("{:o}", a), "12");
+ assert_eq!(format!("{:o}", hello), "22062554330674403566756233062041");
+ assert_eq!(format!("{:♥>+#8o}", a), "♥♥♥+0o12");
+}
+
+#[test]
+fn test_display() {
+ let a = BigUint::parse_bytes(b"A", 16).unwrap();
+ let hello = BigUint::parse_bytes(b"22405534230753963835153736737", 10).unwrap();
+
+ assert_eq!(format!("{}", a), "10");
+ assert_eq!(format!("{}", hello), "22405534230753963835153736737");
+ assert_eq!(format!("{:♥>+#8}", a), "♥♥♥♥♥+10");
+}
+
+#[test]
+fn test_factor() {
+ fn factor(n: usize) -> BigUint {
+ let mut f: BigUint = One::one();
+ for i in 2..=n {
+ // FIXME(#5992): assignment operator overloads
+ // f *= FromPrimitive::from_usize(i);
+ let bu: BigUint = FromPrimitive::from_usize(i).unwrap();
+ f *= bu;
+ }
+ f
+ }
+
+ fn check(n: usize, s: &str) {
+ let n = factor(n);
+ let ans = BigUint::from_str_radix(s, 10).unwrap();
+ assert_eq!(n, ans);
+ }
+
+ check(3, "6");
+ check(10, "3628800");
+ check(20, "2432902008176640000");
+ check(30, "265252859812191058636308480000000");
+}
+
+#[test]
+fn test_bits() {
+ assert_eq!(BigUint::new(vec![0, 0, 0, 0]).bits(), 0);
+ let n: BigUint = FromPrimitive::from_usize(0).unwrap();
+ assert_eq!(n.bits(), 0);
+ let n: BigUint = FromPrimitive::from_usize(1).unwrap();
+ assert_eq!(n.bits(), 1);
+ let n: BigUint = FromPrimitive::from_usize(3).unwrap();
+ assert_eq!(n.bits(), 2);
+ let n: BigUint = BigUint::from_str_radix("4000000000", 16).unwrap();
+ assert_eq!(n.bits(), 39);
+ let one: BigUint = One::one();
+ assert_eq!((one << 426u16).bits(), 427);
+}
+
+#[test]
+fn test_iter_sum() {
+ let result: BigUint = FromPrimitive::from_isize(1234567).unwrap();
+ let data: Vec<BigUint> = vec![
+ FromPrimitive::from_u32(1000000).unwrap(),
+ FromPrimitive::from_u32(200000).unwrap(),
+ FromPrimitive::from_u32(30000).unwrap(),
+ FromPrimitive::from_u32(4000).unwrap(),
+ FromPrimitive::from_u32(500).unwrap(),
+ FromPrimitive::from_u32(60).unwrap(),
+ FromPrimitive::from_u32(7).unwrap(),
+ ];
+
+ assert_eq!(result, data.iter().sum::<BigUint>());
+ assert_eq!(result, data.into_iter().sum::<BigUint>());
+}
+
+#[test]
+fn test_iter_product() {
+ let data: Vec<BigUint> = vec![
+ FromPrimitive::from_u32(1001).unwrap(),
+ FromPrimitive::from_u32(1002).unwrap(),
+ FromPrimitive::from_u32(1003).unwrap(),
+ FromPrimitive::from_u32(1004).unwrap(),
+ FromPrimitive::from_u32(1005).unwrap(),
+ ];
+ let result = data.get(0).unwrap()
+ * data.get(1).unwrap()
+ * data.get(2).unwrap()
+ * data.get(3).unwrap()
+ * data.get(4).unwrap();
+
+ assert_eq!(result, data.iter().product::<BigUint>());
+ assert_eq!(result, data.into_iter().product::<BigUint>());
+}
+
+#[test]
+fn test_iter_sum_generic() {
+ let result: BigUint = FromPrimitive::from_isize(1234567).unwrap();
+ let data = vec![1000000_u32, 200000, 30000, 4000, 500, 60, 7];
+
+ assert_eq!(result, data.iter().sum::<BigUint>());
+ assert_eq!(result, data.into_iter().sum::<BigUint>());
+}
+
+#[test]
+fn test_iter_product_generic() {
+ let data = vec![1001_u32, 1002, 1003, 1004, 1005];
+ let result = data[0].to_biguint().unwrap()
+ * data[1].to_biguint().unwrap()
+ * data[2].to_biguint().unwrap()
+ * data[3].to_biguint().unwrap()
+ * data[4].to_biguint().unwrap();
+
+ assert_eq!(result, data.iter().product::<BigUint>());
+ assert_eq!(result, data.into_iter().product::<BigUint>());
+}
+
+#[test]
+fn test_pow() {
+ let one = BigUint::from(1u32);
+ let two = BigUint::from(2u32);
+ let four = BigUint::from(4u32);
+ let eight = BigUint::from(8u32);
+ let tentwentyfour = BigUint::from(1024u32);
+ let twentyfourtyeight = BigUint::from(2048u32);
+ macro_rules! check {
+ ($t:ty) => {
+ assert_eq!(Pow::pow(&two, 0 as $t), one);
+ assert_eq!(Pow::pow(&two, 1 as $t), two);
+ assert_eq!(Pow::pow(&two, 2 as $t), four);
+ assert_eq!(Pow::pow(&two, 3 as $t), eight);
+ assert_eq!(Pow::pow(&two, 10 as $t), tentwentyfour);
+ assert_eq!(Pow::pow(&two, 11 as $t), twentyfourtyeight);
+ assert_eq!(Pow::pow(&two, &(11 as $t)), twentyfourtyeight);
+ };
+ }
+ check!(u8);
+ check!(u16);
+ check!(u32);
+ check!(u64);
+ check!(u128);
+ check!(usize);
+
+ let pow_1e10000 = BigUint::from(10u32).pow(10_000_u32);
+ let manual_1e10000 = repeat(10u32).take(10_000).product::<BigUint>();
+ assert!(manual_1e10000 == pow_1e10000);
+}
+
+#[test]
+fn test_trailing_zeros() {
+ assert!(BigUint::from(0u8).trailing_zeros().is_none());
+ assert_eq!(BigUint::from(1u8).trailing_zeros().unwrap(), 0);
+ assert_eq!(BigUint::from(2u8).trailing_zeros().unwrap(), 1);
+ let x: BigUint = BigUint::one() << 128;
+ assert_eq!(x.trailing_zeros().unwrap(), 128);
+}
+
+#[test]
+fn test_trailing_ones() {
+ assert_eq!(BigUint::from(0u8).trailing_ones(), 0);
+ assert_eq!(BigUint::from(1u8).trailing_ones(), 1);
+ assert_eq!(BigUint::from(2u8).trailing_ones(), 0);
+ assert_eq!(BigUint::from(3u8).trailing_ones(), 2);
+ let x: BigUint = (BigUint::from(3u8) << 128) | BigUint::from(3u8);
+ assert_eq!(x.trailing_ones(), 2);
+ let x: BigUint = (BigUint::one() << 128) - BigUint::one();
+ assert_eq!(x.trailing_ones(), 128);
+}
+
+#[test]
+fn test_count_ones() {
+ assert_eq!(BigUint::from(0u8).count_ones(), 0);
+ assert_eq!(BigUint::from(1u8).count_ones(), 1);
+ assert_eq!(BigUint::from(2u8).count_ones(), 1);
+ assert_eq!(BigUint::from(3u8).count_ones(), 2);
+ let x: BigUint = (BigUint::from(3u8) << 128) | BigUint::from(3u8);
+ assert_eq!(x.count_ones(), 4);
+}
+
+#[test]
+fn test_bit() {
+ assert!(!BigUint::from(0u8).bit(0));
+ assert!(!BigUint::from(0u8).bit(100));
+ assert!(!BigUint::from(42u8).bit(4));
+ assert!(BigUint::from(42u8).bit(5));
+ let x: BigUint = (BigUint::from(3u8) << 128) | BigUint::from(3u8);
+ assert!(x.bit(129));
+ assert!(!x.bit(130));
+}
+
+#[test]
+fn test_set_bit() {
+ let mut x = BigUint::from(3u8);
+ x.set_bit(128, true);
+ x.set_bit(129, true);
+ assert_eq!(x, (BigUint::from(3u8) << 128) | BigUint::from(3u8));
+ x.set_bit(0, false);
+ x.set_bit(128, false);
+ x.set_bit(130, false);
+ assert_eq!(x, (BigUint::from(2u8) << 128) | BigUint::from(2u8));
+ x.set_bit(129, false);
+ x.set_bit(1, false);
+ assert_eq!(x, BigUint::zero());
+}
diff --git a/rust/vendor/num-bigint/tests/biguint_scalar.rs b/rust/vendor/num-bigint/tests/biguint_scalar.rs
new file mode 100644
index 0000000..7c34f7e
--- /dev/null
+++ b/rust/vendor/num-bigint/tests/biguint_scalar.rs
@@ -0,0 +1,123 @@
+use num_bigint::BigUint;
+use num_traits::{One, ToPrimitive, Zero};
+
+use std::panic::catch_unwind;
+
+mod consts;
+use crate::consts::*;
+
+#[macro_use]
+mod macros;
+
+#[test]
+fn test_scalar_add() {
+ fn check(x: &BigUint, y: &BigUint, z: &BigUint) {
+ let (x, y, z) = (x.clone(), y.clone(), z.clone());
+ assert_unsigned_scalar_op!(x + y == z);
+ assert_unsigned_scalar_assign_op!(x += y == z);
+ }
+
+ for elm in SUM_TRIPLES.iter() {
+ let (a_vec, b_vec, c_vec) = *elm;
+ let a = BigUint::from_slice(a_vec);
+ let b = BigUint::from_slice(b_vec);
+ let c = BigUint::from_slice(c_vec);
+
+ check(&a, &b, &c);
+ check(&b, &a, &c);
+ }
+}
+
+#[test]
+fn test_scalar_sub() {
+ fn check(x: &BigUint, y: &BigUint, z: &BigUint) {
+ let (x, y, z) = (x.clone(), y.clone(), z.clone());
+ assert_unsigned_scalar_op!(x - y == z);
+ assert_unsigned_scalar_assign_op!(x -= y == z);
+ }
+
+ for elm in SUM_TRIPLES.iter() {
+ let (a_vec, b_vec, c_vec) = *elm;
+ let a = BigUint::from_slice(a_vec);
+ let b = BigUint::from_slice(b_vec);
+ let c = BigUint::from_slice(c_vec);
+
+ check(&c, &a, &b);
+ check(&c, &b, &a);
+ }
+}
+
+#[test]
+fn test_scalar_mul() {
+ fn check(x: &BigUint, y: &BigUint, z: &BigUint) {
+ let (x, y, z) = (x.clone(), y.clone(), z.clone());
+ assert_unsigned_scalar_op!(x * y == z);
+ assert_unsigned_scalar_assign_op!(x *= y == z);
+ }
+
+ for elm in MUL_TRIPLES.iter() {
+ let (a_vec, b_vec, c_vec) = *elm;
+ let a = BigUint::from_slice(a_vec);
+ let b = BigUint::from_slice(b_vec);
+ let c = BigUint::from_slice(c_vec);
+
+ check(&a, &b, &c);
+ check(&b, &a, &c);
+ }
+}
+
+#[test]
+fn test_scalar_rem_noncommutative() {
+ assert_eq!(5u8 % BigUint::from(7u8), BigUint::from(5u8));
+ assert_eq!(BigUint::from(5u8) % 7u8, BigUint::from(5u8));
+}
+
+#[test]
+fn test_scalar_div_rem() {
+ fn check(x: &BigUint, y: &BigUint, z: &BigUint, r: &BigUint) {
+ let (x, y, z, r) = (x.clone(), y.clone(), z.clone(), r.clone());
+ assert_unsigned_scalar_op!(x / y == z);
+ assert_unsigned_scalar_op!(x % y == r);
+ assert_unsigned_scalar_assign_op!(x /= y == z);
+ assert_unsigned_scalar_assign_op!(x %= y == r);
+ }
+
+ for elm in MUL_TRIPLES.iter() {
+ let (a_vec, b_vec, c_vec) = *elm;
+ let a = BigUint::from_slice(a_vec);
+ let b = BigUint::from_slice(b_vec);
+ let c = BigUint::from_slice(c_vec);
+
+ if !a.is_zero() {
+ check(&c, &a, &b, &Zero::zero());
+ }
+
+ if !b.is_zero() {
+ check(&c, &b, &a, &Zero::zero());
+ }
+ }
+
+ for elm in DIV_REM_QUADRUPLES.iter() {
+ let (a_vec, b_vec, c_vec, d_vec) = *elm;
+ let a = BigUint::from_slice(a_vec);
+ let b = BigUint::from_slice(b_vec);
+ let c = BigUint::from_slice(c_vec);
+ let d = BigUint::from_slice(d_vec);
+
+ if !b.is_zero() {
+ check(&a, &b, &c, &d);
+ assert_unsigned_scalar_op!(a / b == c);
+ assert_unsigned_scalar_op!(a % b == d);
+ assert_unsigned_scalar_assign_op!(a /= b == c);
+ assert_unsigned_scalar_assign_op!(a %= b == d);
+ }
+ }
+}
+
+#[test]
+fn test_scalar_div_rem_zero() {
+ catch_unwind(|| BigUint::zero() / 0u32).unwrap_err();
+ catch_unwind(|| BigUint::zero() % 0u32).unwrap_err();
+ catch_unwind(|| BigUint::one() / 0u32).unwrap_err();
+ catch_unwind(|| BigUint::one() % 0u32).unwrap_err();
+}
diff --git a/rust/vendor/num-bigint/tests/consts/mod.rs b/rust/vendor/num-bigint/tests/consts/mod.rs
new file mode 100644
index 0000000..5d0555d
--- /dev/null
+++ b/rust/vendor/num-bigint/tests/consts/mod.rs
@@ -0,0 +1,51 @@
+#![allow(unused)]
+
+pub const N1: u32 = -1i32 as u32;
+pub const N2: u32 = -2i32 as u32;
+
+pub const SUM_TRIPLES: &[(&[u32], &[u32], &[u32])] = &[
+ (&[], &[], &[]),
+ (&[], &[1], &[1]),
+ (&[1], &[1], &[2]),
+ (&[1], &[1, 1], &[2, 1]),
+ (&[1], &[N1], &[0, 1]),
+ (&[1], &[N1, N1], &[0, 0, 1]),
+ (&[N1, N1], &[N1, N1], &[N2, N1, 1]),
+ (&[1, 1, 1], &[N1, N1], &[0, 1, 2]),
+ (&[2, 2, 1], &[N1, N2], &[1, 1, 2]),
+ (&[1, 2, 2, 1], &[N1, N2], &[0, 1, 3, 1]),
+];
+
+pub const M: u32 = ::std::u32::MAX;
+pub const MUL_TRIPLES: &[(&[u32], &[u32], &[u32])] = &[
+ (&[], &[], &[]),
+ (&[], &[1], &[]),
+ (&[2], &[], &[]),
+ (&[1], &[1], &[1]),
+ (&[2], &[3], &[6]),
+ (&[1], &[1, 1, 1], &[1, 1, 1]),
+ (&[1, 2, 3], &[3], &[3, 6, 9]),
+ (&[1, 1, 1], &[N1], &[N1, N1, N1]),
+ (&[1, 2, 3], &[N1], &[N1, N2, N2, 2]),
+ (&[1, 2, 3, 4], &[N1], &[N1, N2, N2, N2, 3]),
+ (&[N1], &[N1], &[1, N2]),
+ (&[N1, N1], &[N1], &[1, N1, N2]),
+ (&[N1, N1, N1], &[N1], &[1, N1, N1, N2]),
+ (&[N1, N1, N1, N1], &[N1], &[1, N1, N1, N1, N2]),
+ (&[M / 2 + 1], &[2], &[0, 1]),
+ (&[0, M / 2 + 1], &[2], &[0, 0, 1]),
+ (&[1, 2], &[1, 2, 3], &[1, 4, 7, 6]),
+ (&[N1, N1], &[N1, N1, N1], &[1, 0, N1, N2, N1]),
+ (&[N1, N1, N1], &[N1, N1, N1, N1], &[1, 0, 0, N1, N2, N1, N1]),
+ (&[0, 0, 1], &[1, 2, 3], &[0, 0, 1, 2, 3]),
+ (&[0, 0, 1], &[0, 0, 0, 1], &[0, 0, 0, 0, 0, 1]),
+];
+
+pub const DIV_REM_QUADRUPLES: &[(&[u32], &[u32], &[u32], &[u32])] = &[
+ (&[1], &[2], &[], &[1]),
+ (&[3], &[2], &[1], &[1]),
+ (&[1, 1], &[2], &[M / 2 + 1], &[1]),
+ (&[1, 1, 1], &[2], &[M / 2 + 1, M / 2 + 1], &[1]),
+ (&[0, 1], &[N1], &[1], &[1]),
+ (&[N1, N1], &[N2], &[2, 1], &[3]),
+];
diff --git a/rust/vendor/num-bigint/tests/fuzzed.rs b/rust/vendor/num-bigint/tests/fuzzed.rs
new file mode 100644
index 0000000..7ff5641
--- /dev/null
+++ b/rust/vendor/num-bigint/tests/fuzzed.rs
@@ -0,0 +1,185 @@
+//! Check buggy inputs that were found by fuzzing
+
+use num_bigint::BigUint;
+use num_traits::Num;
+
+#[test]
+fn fuzzed_mul_1() {
+ let hex1 = "\
+ cd6839ee857cf791a40494c2e522846eefbca9eca9912fdc1feed4561dbde75c75f1ddca2325ebb1\
+ b9cd6eae07308578e58e57f4ddd7dc239b4fd347b883e37d87232a8e5d5a8690c8dba69c97fe8ac4\
+ 58add18be7e460e03c9d1ae8223db53d20681a4027ffc17d1e43b764791c4db5ff7add849da7e378\
+ ac8d9be0e8b517c490da3c0f944b6a52a0c5dc5217c71da8eec35d2c3110d8b041d2b52f3e2a8904\
+ abcaaca517a8f2ef6cd26ceadd39a1cf9f770bc08f55f5a230cd81961348bb18534245430699de77\
+ d93b805153cffd05dfd0f2cfc2332888cec9c5abf3ece9b4d7886ad94c784bf74fce12853b2a9a75\
+ b62a845151a703446cc20300eafe7332330e992ae88817cd6ccef8877b66a7252300a4664d7074da\
+ 181cd9fd502ea1cd71c0b02db3c009fe970a7d226382cdba5b5576c5c0341694681c7adc4ca2d059\
+ d9a6b300957a2235a4eb6689b71d34dcc4037b520eabd2c8b66604bb662fe2bcf533ba8d242dbc91\
+ f04c1795b9f0fee800d197d8c6e998248b15855a9602b76cb3f94b148d8f71f7d6225b79d63a8e20\
+ 8ec8f0fa56a1c381b6c09bad9886056aec17fc92b9bb0f8625fd3444e40cccc2ede768ddb23c66ad\
+ 59a680a26a26d519d02e4d46ce93cce9e9dd86702bdd376abae0959a0e8e418aa507a63fafb8f422\
+ 83b03dc26f371c5e261a8f90f3ac9e2a6bcc7f0a39c3f73043b5aa5a950d4e945e9f68b2c2e593e3\
+ b995be174714c1967b71f579043f89bfce37437af9388828a3ba0465c88954110cae6d38b638e094\
+ 13c15c9faddd6fb63623fd50e06d00c4d5954e787158b3e4eea7e9fae8b189fa8a204b23ac2f7bbc\
+ b601189c0df2075977c2424336024ba3594172bea87f0f92beb20276ce8510c8ef2a4cd5ede87e7e\
+ 38b3fa49d66fbcd322be686a349c24919f4000000000000000000000000000000000000000000000\
+ 000000000000000000000000000000000";
+ let hex2 = "\
+ 40000000000000000000000000000000000000000000000000000000000000000000000000000000\
+ 00000000000000000000000000000000000000000000000000000000000000000000000000000000\
+ 00000000000000000000000000000000000000000000000000000000000000000000000000000000\
+ 00000000000000000000000000000000000000000000000000000000000000000000000000000000\
+ 00000000000000000000000000000000000000000000000000000000000000000000000000000000\
+ 00000000000000000000000000000000000000000000000000000000000000000000000000000000\
+ 00000000000000000000000000000000000000000000000000000000000000000000000000000000\
+ 00000000000000000000000000000000000000000000000000000000000000000000000000000000\
+ 00000000000000000000000000000000000000000000000000000000000000000000000000000000\
+ 00000000000000000000000000000000000000000000000000000000000000000000000000000000\
+ 00000000000000000000000000000000000000000000000000000000000000000000000000000000\
+ 00000000000000000000000000000000000000000000000000000000000000000000000000000000\
+ 00000000000000000000000000000000000000000000000000000000000000000000000000000000\
+ 00000000000000000000000000000000000000000000000000000000000000000000000000000000\
+ 00000000000000000000000000000000000000000000000000000000000000000000000000000000\
+ 00000000000000000000000000000000000000000000000000000000000000000000000007";
+
+ // Result produced independently in Python
+ let hex_result = "\
+ 335a0e7ba15f3de469012530b948a11bbbef2a7b2a644bf707fbb515876f79d71d7c777288c97aec\
+ 6e735bab81cc215e396395fd3775f708e6d3f4d1ee20f8df61c8caa39756a1a43236e9a725ffa2b1\
+ 162b7462f9f918380f2746ba088f6d4f481a069009fff05f4790edd91e47136d7fdeb7612769f8de\
+ 2b2366f83a2d45f124368f03e512da94a831771485f1c76a3bb0d74b0c44362c1074ad4bcf8aa241\
+ 2af2ab2945ea3cbbdb349b3ab74e6873e7ddc2f023d57d688c33606584d22ec614d09150c1a6779d\
+ f64ee01454f3ff4177f43cb3f08cca2233b2716afcfb3a6d35e21ab6531e12fdd3f384a14ecaa69d\
+ 6d8aa1145469c0d11b3080c03abf9ccc8cc3a64aba2205f35b33be21ded9a9c948c02919935c1d36\
+ 8607367f540ba8735c702c0b6cf0027fa5c29f4898e0b36e96d55db1700d05a51a071eb71328b416\
+ 7669acc0255e888d693ad9a26dc74d373100ded483aaf4b22d99812ed98bf8af3d4ceea3490b6f24\
+ 7c1305e56e7c3fba003465f631ba660922c56156a580addb2cfe52c52363dc7df58896de758ea388\
+ 23b23c3e95a870e06db026eb6621815abb05ff24ae6ec3e1897f4d1139033330bb79da376c8f19ab\
+ 5669a0289a89b546740b9351b3a4f33a7a77619c0af74ddaaeb8256683a39062a941e98febee3d08\
+ a0ec0f709bcdc7178986a3e43ceb278a9af31fc28e70fdcc10ed6a96a54353a517a7da2cb0b964f8\
+ ee656f85d1c530659edc7d5e410fe26ff38dd0debe4e220a28ee811972225504432b9b4e2d8e3825\
+ 04f05727eb775bed8d88ff54381b40313565539e1c562cf93ba9fa7eba2c627ea28812c8eb0bdeef\
+ 2d804627037c81d65df09090cd8092e8d6505cafaa1fc3e4afac809db3a144323bca93358117f935\
+ 13d3695771180f461cf38bb995b531c9e072f84f04df87ce5ad0315387399d1086f60971dc149e06\
+ c23253a64e46e467b210e704f93f2ec6f60b9b386eb1f629e48d79adf57e018e4827f5cb5e6cc0ba\
+ d3573ea621a84bbc58efaff4abe2d8b7c117fe4a6bd3da03bf4fc61ff9fc5c0ea04f97384cb7df43\
+ 265cf3a65ff5f7a46d0e0fe8426569063ea671cf9e87578c355775ecd1ccc2f44ab329bf20b28ab8\
+ 83a59ea48bf9c0fa6c0c936cad5c415243eb59b76f559e8b1a86fd1daa46cfe4d52e351546f0a082\
+ 394aafeb291eb6a3ae4f661bbda78467b3ab7a63f1e4baebf1174a13c32ea281a49e2a3937fb299e\
+ 393b9116def94e15066cf5265f6566302c5bb8a69df9a8cbb45fce9203f5047ecc1e1331f6a8c9f5\
+ ed31466c9e1c44d13fea4045f621496bf0b893a0187f563f68416c9e0ed8c75c061873b274f38ee5\
+ 041656ef77826fcdc401cc72095c185f3e66b2c37cfcca211fcb4f332ab46a19dbfd4027fd9214a5\
+ 181596f85805bb26ed706328ffcd96a57a1a1303f8ebd10d8fdeec1dc6daf08054db99e2e3e77e96\
+ d85e6c588bff4441bf2baa25ec74a7e803141d6cab09ec6de23c5999548153de0fdfa6cebd738d84\
+ 70e70fd3b4b1441cefa60a9a65650ead11330c83eb1c24173665e3caca83358bbdce0eacf199d1b0\
+ 510a81c6930ab9ecf6a9b85328f2977947945bc251d9f7a87a135d260e965bdce354470b3a131832\
+ a2f1914b1d601db64f1dbcc43ea382d85cd08bb91c7a161ec87bc14c7758c4fc8cfb8e240c8a4988\
+ 5dc10e0dfb7afbed3622fb0561d715254b196ceb42869765dc5cdac5d9c6e20df9b54c6228fa07ac\
+ 44619e3372464fcfd67a10117770ca23369b796d0336de113fa5a3757e8a2819d9815b75738cebd8\
+ 04dd0e29c5f334dae77044fffb5ac000000000000000000000000000000000000000000000000000\
+ 000000000000000000000000000";
+
+ let bn1 = &BigUint::from_str_radix(hex1, 16).unwrap();
+ let bn2 = &BigUint::from_str_radix(hex2, 16).unwrap();
+ let result = BigUint::from_str_radix(hex_result, 16).unwrap();
+
+ assert_eq!(bn1 * bn2, result);
+ assert_eq!(bn2 * bn1, result);
+}
+
+#[test]
+fn fuzzed_mul_2() {
+ let hex_a = "\
+ 812cff04ff812cff04ff8180ff80ffff11ff80ff2cff04ff812cff04ff812cff04ff81232cff047d\
+ ff04ff812cff04ff812cff04ff812cff047f812cff04ff8180ff2cff04ff04ff8180ff2cff04ff04\
+ ff812cbf04ff8180ff2cff04ff812cff0401010000000000000000ffff1a80ffc006c70084ffff80\
+ ffc0064006000084ffff72ffc020ffffffffffff06d709000000dbffffffc799999999b999999999\
+ 99999999000084ffff72ffc02006e1ffffffc70900ffffff00f312ff80ebffffff6f505f6c2e6712\
+ 108970ffff5f6c6f6727020000000000007400000000000000000000a50000000000000000000000\
+ 000000000000000000000000ff812cff04ff812cff2c04ff812cff8180ff2cff04ff04ff818b8b8b\
+ 8b8b8b8b8b8b8b8b8b8b8b8b8b06c70084ffff80ffc006c700847fff80ffc006c700ffff12c70084\
+ ffff80ffc0060000000000000056ff00c789bfff80ffc006c70084ffff80ffc006c700ffff840100\
+ 00000000001289ffc08b8b8b8b8b8b8b2c";
+ let hex_b = "\
+ 7ed300fb007ed300fb007e7f00db00fb007ed3007ed300fb007edcd300fb8200fb007ed300fb007e\
+ d300fb007ed300fb007ed300fbfeffffffffffffa8fb007e7f00d300fb00fb007ed340fb007e7f00\
+ 00fb007ed300fb007ed300fb007e7f00d300fb00fb007e7f00d300fb007efb007e7f00d300fb007e\
+ d300fb007e7f0097d300fb00bf007ed300fb007ed300fb00fb00fb00fbffffffffffffffffffff00\
+ 00000000000000000000000000000000000000000000000000000000000000000000000000000000\
+ 000000000000df9b3900ff908fa08d9e968c9a0000e7fffb7fff0000003fd9004c90d8f600de7f00\
+ 3fdf9b3900ff908fa08d9e968cf9b9ff0000ed38ff7b00007f003ff9ffffffffffffffa900ff3876\
+ 000078003ff938ff7b00007f003ff938ff00007bfeffffffffffffed76003f74747474747474d300\
+ fb00fb007e7f00d300fb007efb007e7f00d3003e7f007ed300fb007ed300fb007e7f00d300fb017e\
+ d300fb007ed300fb007edcd300fb8200fb007e0000e580";
+ let hex_c = "\
+ 7b00387ffff938ff7b80007f003ff9b9ff00fdec38ff7b00007f003ff9ffffffffffffffa900ff38\
+ 76000078003ff938ff7b00007f003ff938ff00007bfeffffffffffffed76003f74747474747474d3\
+ 00fb00fb007e7f00d300fb007efb007e7f00d3003e7f007ed300fb007ed300fb007e7f00d300fb01\
+ 7ed300fb007ed300fb007edcd300fb8200fb007e000000ee7f003f0000007b00387ffff938ff7b80\
+ 007f003ff9b9ff00fdec38ff7b00007f003ff9ffffffffffffffa900ff3876000078003ff938ff7b\
+ 00007f003ff938ff00007bfeffffffffffffed76003f74747474747474d300fb00fb007e7f00d300\
+ fb007efb007e7f00d3003e7f007ed300fb007ed300fb007e7f00d300fb017ed300fb007ed300fb00\
+ 7edcd300fb8200fb007e000000ee7f003f000000000000000000000000000000002a000000000000\
+ 00000000000000000000000000000000000000000000000000000000000000000000000000000000\
+ 00000000000000000000000000000000000000000000000000000000000000000000000000000000\
+ 00000000000000000000000000000000000000000000000000000000000000000000000000000000\
+ 00000000000000000000000000000000000000000000000000000000000000000000000000000000\
+ 00000000000000000000000000000000000000000000000000000000000000000000000000000000\
+ 0000000000000000000000df9b3900ff908fa08d9e968c9a0000e7fffb7fff0000003fd9004c90d8\
+ f600de7f003fdf9b3900ff908fa08d9e968c9a0000e7fffa7fff0000004005004c90d8f600de908f\
+ dcd300fb8200fb007e0000e57f003ff938ff7b00007f003d7ed300fb007ed300fb007ed300fb007e\
+ fa00fb007ed300fbf9ffffffffffffffa900ff387600007f003ff938ff7b00007f003ff938fd0000\
+ 7bfeffffffffffffed76003f74747474747474d300fc";
+
+ // Result produced independently in Python
+ let hex_result = "\
+ 1ebf6415da7ac71a689cd450727b7a361402a1626e0b6cd057e0e2a77d4cb722c1b7d0cbd73a7c07\
+ d756813fe97d73d5905c4a26404c7162769ba2dbc1e2742855a1db803e2d2c2fddc77c0598cc70fe\
+ 066fd4b81cae3e23c55b4795de63acacd1343cf5ad5e715e6919d140c01bab1af1a737ebbf8a7775\
+ 7602acd611f555ee2d5be56cc14b97c248009cd77490a3dfd6762bae25459a544e369eb4b0cc952a\
+ 8e6a551ff35a4a7a6e5f5b0b72495c4baadf3a26b9d5d97402ad60fa2324e93adc96ca159b62d147\
+ 5695f26ff27da100a76e2d273420572e61b4dfbd97e826d9d946f85b87434523f6aa7ce43c443285\
+ 33f5b5adf32574167b1e9ea3bf6254d6afacf865894907de196285169cfcc1c0fcf438873d13f7e8\
+ 654acc27c1abb00bec2729e34c994ff2152f60406f75db3ab616541795d9db8ca0b381148de7875f\
+ e7a8191407abc390718003698ca28498948caf1dbc3f02593dd85fa929ebae86cfe783d7be473e98\
+ 0060d9ec60843661cb4cb9b8ddb24bb710f93700b22530501b5ea26c5c94c7370fe0ccbafe0ce7e4\
+ cd4f071d0cf0ac151c85a5b132ecaa75793abfb4a6ee33fddd2aa2f5cf2a8eb19c75322792c0d8dc\
+ 1efb2dcd8ae2b49dd57b84898f531c7f745464f637716151831db56b3e293f587dc95a5e12edfe6b\
+ 8458033dddf3556da55bef55ba3c3769def0c0f0c86786aca8313dc0ce09118760721eb545d69b46\
+ cdb89d377f2c80e67b572da0f75760c2849288a8457c18c6f0b58244b7f95a7567ce23756f1fe359\
+ 64f7e84fbe28b188157519dd99b8798b076e21984d15c37f41da1309e0fbc539e8b9b09fed36a908\
+ 28c94f72e7b755c187e58db6bfef0c02309086626ad0fe2efd2ff1467b3de11e057687865f4f85e7\
+ 0a39bcbc4674dcaded9b04562afe08eb92fbd96ea4a99aa4f9347a075d4421f070ce3a33225f5af1\
+ 9c27ec5d1720e659ca7fff9686f46b01d76d7de64c738671aaec57ee5582ef7956206fb37c6a36f8\
+ 8f226ce2124a7f9894a0e9a7aa02001746e6def35699d7adc84a7dcf513ff3da20fd849950f41a5d\
+ bb02c91666697156d69ebbe2ef26732b6595d1b6d014a60006d2d3c7055ff9b531779195b8dcd7d9\
+ 426e776cbc9041735384568ba4adbf7eeea7e0e6cbb47b70335a7ed12a68904eecd334921e4ae6d9\
+ c983af20d73215c39573963f03bc87082450cc1c70250e1e8eaa318acaf044a072891fc60324d134\
+ 6c0a1d02cceb4d4806e536d6017bf6bc125c41694ded38766fea51bfbf7a008ca0b3eb1168766486\
+ 8aa8469b3e6787a5d5bad6cd67c24005a5cbaa10b63d1b4d05ac42a8b31263052a1260b5900be628\
+ 4dcab4eb0cf5cda815412ced7bd78f87c00ac3581f41a04352a4a186805a5c9e37b14561a5fc97d2\
+ 52ca4654fe3d82f42080c21483789cc4b4cbb568f79844f7a317aa2a6555774da26c6f027d3cb0ee\
+ 9276c6dc4f285fc3b4b9a3cd51c8815cebf110e73c80a9b842cc3b7c80af13f702662b10e868eb61\
+ 947000b390cd2f3a0899f6f1bab86acf767062f5526507790645ae13b9701ba96b3f873047c9d3b8\
+ 5e8a5d904a01fbfe10e63495b6021e7cc082aa66679e4d92b3e4e2d62490b44f7e250584cedff0e7\
+ 072a870ddaa9687a1eae11afc874d83065fb98dbc3cfd90f39517ff3015c71a8c0ab36a6483c7b87\
+ f41b2c832fa9428fe95ffba4e49cc553d9e2d33a540958da51588e5120fef6497bfaa96a4dcfc024\
+ 8170c57f78e9ab9546efbbaf8e9ad6a993493577edd3d29ce8fd9a2e9eb4363b5b472a4ecb2065eb\
+ 38f876a841af1f227a703248955c8978329dffcd8e065d8da4d42504796ff7abc62832ed86c4f8d0\
+ 0f55cd567fb9d42524be57ebdacef730c3f94c0372f86fa1b0114f8620f553e4329b2a586fcfeedc\
+ af47934909090e14a1f1204e6f1681fb2df05356381e6340f4feaf0787e06218b0b0d8df51acb0bc\
+ f98546f33273adf260da959d6fc4a04872122af6508d124abb963c14c30e7c07fee368324921fe33\
+ 9ae89490c5d6cdae0c356bb6921de95ea13b54e23800";
+
+ let a = &BigUint::from_str_radix(hex_a, 16).unwrap();
+ let b = &BigUint::from_str_radix(hex_b, 16).unwrap();
+ let c = &BigUint::from_str_radix(hex_c, 16).unwrap();
+ let result = BigUint::from_str_radix(hex_result, 16).unwrap();
+
+ assert_eq!(a * b * c, result);
+ assert_eq!(a * c * b, result);
+ assert_eq!(b * a * c, result);
+ assert_eq!(b * c * a, result);
+ assert_eq!(c * a * b, result);
+ assert_eq!(c * b * a, result);
+}
diff --git a/rust/vendor/num-bigint/tests/macros/mod.rs b/rust/vendor/num-bigint/tests/macros/mod.rs
new file mode 100644
index 0000000..b14cd57
--- /dev/null
+++ b/rust/vendor/num-bigint/tests/macros/mod.rs
@@ -0,0 +1,78 @@
+#![allow(unused)]
+
+/// Assert that an op works for all val/ref combinations
+macro_rules! assert_op {
+ ($left:ident $op:tt $right:ident == $expected:expr) => {
+ assert_eq!((&$left) $op (&$right), $expected);
+ assert_eq!((&$left) $op $right.clone(), $expected);
+ assert_eq!($left.clone() $op (&$right), $expected);
+ assert_eq!($left.clone() $op $right.clone(), $expected);
+ };
+}
+
+/// Assert that an assign-op works for all val/ref combinations
+macro_rules! assert_assign_op {
+ ($left:ident $op:tt $right:ident == $expected:expr) => {{
+ let mut left = $left.clone();
+ assert_eq!({ left $op &$right; left}, $expected);
+
+ let mut left = $left.clone();
+ assert_eq!({ left $op $right.clone(); left}, $expected);
+ }};
+}
+
+/// Assert that an op works for scalar left or right
+macro_rules! assert_scalar_op {
+ (($($to:ident),*) $left:ident $op:tt $right:ident == $expected:expr) => {
+ $(
+ if let Some(left) = $left.$to() {
+ assert_op!(left $op $right == $expected);
+ }
+ if let Some(right) = $right.$to() {
+ assert_op!($left $op right == $expected);
+ }
+ )*
+ };
+}
+
+macro_rules! assert_unsigned_scalar_op {
+ ($left:ident $op:tt $right:ident == $expected:expr) => {
+ assert_scalar_op!((to_u8, to_u16, to_u32, to_u64, to_usize, to_u128)
+ $left $op $right == $expected);
+ };
+}
+
+macro_rules! assert_signed_scalar_op {
+ ($left:ident $op:tt $right:ident == $expected:expr) => {
+ assert_scalar_op!((to_u8, to_u16, to_u32, to_u64, to_usize, to_u128,
+ to_i8, to_i16, to_i32, to_i64, to_isize, to_i128)
+ $left $op $right == $expected);
+ };
+}
+
+/// Assert that an op works for scalar right
+macro_rules! assert_scalar_assign_op {
+ (($($to:ident),*) $left:ident $op:tt $right:ident == $expected:expr) => {
+ $(
+ if let Some(right) = $right.$to() {
+ let mut left = $left.clone();
+ assert_eq!({ left $op right; left}, $expected);
+ }
+ )*
+ };
+}
+
+macro_rules! assert_unsigned_scalar_assign_op {
+ ($left:ident $op:tt $right:ident == $expected:expr) => {
+ assert_scalar_assign_op!((to_u8, to_u16, to_u32, to_u64, to_usize, to_u128)
+ $left $op $right == $expected);
+ };
+}
+
+macro_rules! assert_signed_scalar_assign_op {
+ ($left:ident $op:tt $right:ident == $expected:expr) => {
+ assert_scalar_assign_op!((to_u8, to_u16, to_u32, to_u64, to_usize, to_u128,
+ to_i8, to_i16, to_i32, to_i64, to_isize, to_i128)
+ $left $op $right == $expected);
+ };
+}
diff --git a/rust/vendor/num-bigint/tests/modpow.rs b/rust/vendor/num-bigint/tests/modpow.rs
new file mode 100644
index 0000000..d7a247b
--- /dev/null
+++ b/rust/vendor/num-bigint/tests/modpow.rs
@@ -0,0 +1,181 @@
+static BIG_B: &str = "\
+ efac3c0a_0de55551_fee0bfe4_67fa017a_1a898fa1_6ca57cb1\
+ ca9e3248_cacc09a9_b99d6abc_38418d0f_82ae4238_d9a68832\
+ aadec7c1_ac5fed48_7a56a71b_67ac59d5_afb28022_20d9592d\
+ 247c4efc_abbd9b75_586088ee_1dc00dc4_232a8e15_6e8191dd\
+ 675b6ae0_c80f5164_752940bc_284b7cee_885c1e10_e495345b\
+ 8fbe9cfd_e5233fe1_19459d0b_d64be53c_27de5a02_a829976b\
+ 33096862_82dad291_bd38b6a9_be396646_ddaf8039_a2573c39\
+ 1b14e8bc_2cb53e48_298c047e_d9879e9c_5a521076_f0e27df3\
+ 990e1659_d3d8205b_6443ebc0_9918ebee_6764f668_9f2b2be3\
+ b59cbc76_d76d0dfc_d737c3ec_0ccf9c00_ad0554bf_17e776ad\
+ b4edf9cc_6ce540be_76229093_5c53893b";
+
+static BIG_E: &str = "\
+ be0e6ea6_08746133_e0fbc1bf_82dba91e_e2b56231_a81888d2\
+ a833a1fc_f7ff002a_3c486a13_4f420bf3_a5435be9_1a5c8391\
+ 774d6e6c_085d8357_b0c97d4d_2bb33f7c_34c68059_f78d2541\
+ eacc8832_426f1816_d3be001e_b69f9242_51c7708e_e10efe98\
+ 449c9a4a_b55a0f23_9d797410_515da00d_3ea07970_4478a2ca\
+ c3d5043c_bd9be1b4_6dce479d_4302d344_84a939e6_0ab5ada7\
+ 12ae34b2_30cc473c_9f8ee69d_2cac5970_29f5bf18_bc8203e4\
+ f3e895a2_13c94f1e_24c73d77_e517e801_53661fdd_a2ce9e47\
+ a73dd7f8_2f2adb1e_3f136bf7_8ae5f3b8_08730de1_a4eff678\
+ e77a06d0_19a522eb_cbefba2a_9caf7736_b157c5c6_2d192591\
+ 17946850_2ddb1822_117b68a0_32f7db88";
+
+// This modulus is the prime from the 2048-bit MODP DH group:
+// https://tools.ietf.org/html/rfc3526#section-3
+static BIG_M: &str = "\
+ FFFFFFFF_FFFFFFFF_C90FDAA2_2168C234_C4C6628B_80DC1CD1\
+ 29024E08_8A67CC74_020BBEA6_3B139B22_514A0879_8E3404DD\
+ EF9519B3_CD3A431B_302B0A6D_F25F1437_4FE1356D_6D51C245\
+ E485B576_625E7EC6_F44C42E9_A637ED6B_0BFF5CB6_F406B7ED\
+ EE386BFB_5A899FA5_AE9F2411_7C4B1FE6_49286651_ECE45B3D\
+ C2007CB8_A163BF05_98DA4836_1C55D39A_69163FA8_FD24CF5F\
+ 83655D23_DCA3AD96_1C62F356_208552BB_9ED52907_7096966D\
+ 670C354E_4ABC9804_F1746C08_CA18217C_32905E46_2E36CE3B\
+ E39E772C_180E8603_9B2783A2_EC07A28F_B5C55DF0_6F4C52C9\
+ DE2BCBF6_95581718_3995497C_EA956AE5_15D22618_98FA0510\
+ 15728E5A_8AACAA68_FFFFFFFF_FFFFFFFF";
+
+static BIG_R: &str = "\
+ a1468311_6e56edc9_7a98228b_5e924776_0dd7836e_caabac13\
+ eda5373b_4752aa65_a1454850_40dc770e_30aa8675_6be7d3a8\
+ 9d3085e4_da5155cf_b451ef62_54d0da61_cf2b2c87_f495e096\
+ 055309f7_77802bbb_37271ba8_1313f1b5_075c75d1_024b6c77\
+ fdb56f17_b05bce61_e527ebfd_2ee86860_e9907066_edd526e7\
+ 93d289bf_6726b293_41b0de24_eff82424_8dfd374b_4ec59542\
+ 35ced2b2_6b195c90_10042ffb_8f58ce21_bc10ec42_64fda779\
+ d352d234_3d4eaea6_a86111ad_a37e9555_43ca78ce_2885bed7\
+ 5a30d182_f1cf6834_dc5b6e27_1a41ac34_a2e91e11_33363ff0\
+ f88a7b04_900227c9_f6e6d06b_7856b4bb_4e354d61_060db6c8\
+ 109c4735_6e7db425_7b5d74c7_0b709508";
+
+mod biguint {
+ use num_bigint::BigUint;
+ use num_integer::Integer;
+ use num_traits::Num;
+
+ fn check_modpow<T: Into<BigUint>>(b: T, e: T, m: T, r: T) {
+ let b: BigUint = b.into();
+ let e: BigUint = e.into();
+ let m: BigUint = m.into();
+ let r: BigUint = r.into();
+
+ assert_eq!(b.modpow(&e, &m), r);
+
+ let even_m = &m << 1;
+ let even_modpow = b.modpow(&e, &even_m);
+ assert!(even_modpow < even_m);
+ assert_eq!(even_modpow.mod_floor(&m), r);
+ }
+
+ #[test]
+ fn test_modpow_single() {
+ check_modpow::<u32>(1, 0, 11, 1);
+ check_modpow::<u32>(0, 15, 11, 0);
+ check_modpow::<u32>(3, 7, 11, 9);
+ check_modpow::<u32>(5, 117, 19, 1);
+ check_modpow::<u32>(20, 1, 2, 0);
+ check_modpow::<u32>(20, 1, 3, 2);
+ }
+
+ #[test]
+ fn test_modpow_small() {
+ for b in 0u64..11 {
+ for e in 0u64..11 {
+ for m in 1..11 {
+ check_modpow::<u64>(b, e, m, b.pow(e as u32) % m);
+ }
+ }
+ }
+ }
+
+ #[test]
+ fn test_modpow_big() {
+ let b = BigUint::from_str_radix(super::BIG_B, 16).unwrap();
+ let e = BigUint::from_str_radix(super::BIG_E, 16).unwrap();
+ let m = BigUint::from_str_radix(super::BIG_M, 16).unwrap();
+ let r = BigUint::from_str_radix(super::BIG_R, 16).unwrap();
+
+ assert_eq!(b.modpow(&e, &m), r);
+
+ let even_m = &m << 1;
+ let even_modpow = b.modpow(&e, &even_m);
+ assert!(even_modpow < even_m);
+ assert_eq!(even_modpow % m, r);
+ }
+}
+
+mod bigint {
+ use num_bigint::BigInt;
+ use num_integer::Integer;
+ use num_traits::{Num, One, Signed};
+
+ fn check_modpow<T: Into<BigInt>>(b: T, e: T, m: T, r: T) {
+ fn check(b: &BigInt, e: &BigInt, m: &BigInt, r: &BigInt) {
+ assert_eq!(&b.modpow(e, m), r, "{} ** {} (mod {}) != {}", b, e, m, r);
+
+ let even_m = m << 1u8;
+ let even_modpow = b.modpow(e, m);
+ assert!(even_modpow.abs() < even_m.abs());
+ assert_eq!(&even_modpow.mod_floor(m), r);
+
+ // the sign of the result follows the modulus like `mod_floor`, not `rem`
+ assert_eq!(b.modpow(&BigInt::one(), m), b.mod_floor(m));
+ }
+
+ let b: BigInt = b.into();
+ let e: BigInt = e.into();
+ let m: BigInt = m.into();
+ let r: BigInt = r.into();
+
+ let neg_b_r = if e.is_odd() {
+ (-&r).mod_floor(&m)
+ } else {
+ r.clone()
+ };
+ let neg_m_r = r.mod_floor(&-&m);
+ let neg_bm_r = neg_b_r.mod_floor(&-&m);
+
+ check(&b, &e, &m, &r);
+ check(&-&b, &e, &m, &neg_b_r);
+ check(&b, &e, &-&m, &neg_m_r);
+ check(&-b, &e, &-&m, &neg_bm_r);
+ }
+
+ #[test]
+ fn test_modpow() {
+ check_modpow(1, 0, 11, 1);
+ check_modpow(0, 15, 11, 0);
+ check_modpow(3, 7, 11, 9);
+ check_modpow(5, 117, 19, 1);
+ check_modpow(-20, 1, 2, 0);
+ check_modpow(-20, 1, 3, 1);
+ }
+
+ #[test]
+ fn test_modpow_small() {
+ for b in -10i64..11 {
+ for e in 0i64..11 {
+ for m in -10..11 {
+ if m == 0 {
+ continue;
+ }
+ check_modpow(b, e, m, b.pow(e as u32).mod_floor(&m));
+ }
+ }
+ }
+ }
+
+ #[test]
+ fn test_modpow_big() {
+ let b = BigInt::from_str_radix(super::BIG_B, 16).unwrap();
+ let e = BigInt::from_str_radix(super::BIG_E, 16).unwrap();
+ let m = BigInt::from_str_radix(super::BIG_M, 16).unwrap();
+ let r = BigInt::from_str_radix(super::BIG_R, 16).unwrap();
+
+ check_modpow(b, e, m, r);
+ }
+}
diff --git a/rust/vendor/num-bigint/tests/roots.rs b/rust/vendor/num-bigint/tests/roots.rs
new file mode 100644
index 0000000..cfef80c
--- /dev/null
+++ b/rust/vendor/num-bigint/tests/roots.rs
@@ -0,0 +1,160 @@
+mod biguint {
+ use num_bigint::BigUint;
+ use num_traits::{One, Zero};
+ use std::{i32, u32};
+
+ fn check<T: Into<BigUint>>(x: T, n: u32) {
+ let x: BigUint = x.into();
+ let root = x.nth_root(n);
+ println!("check {}.nth_root({}) = {}", x, n, root);
+
+ if n == 2 {
+ assert_eq!(root, x.sqrt())
+ } else if n == 3 {
+ assert_eq!(root, x.cbrt())
+ }
+
+ let lo = root.pow(n);
+ assert!(lo <= x);
+ assert_eq!(lo.nth_root(n), root);
+ if !lo.is_zero() {
+ assert_eq!((&lo - 1u32).nth_root(n), &root - 1u32);
+ }
+
+ let hi = (&root + 1u32).pow(n);
+ assert!(hi > x);
+ assert_eq!(hi.nth_root(n), &root + 1u32);
+ assert_eq!((&hi - 1u32).nth_root(n), root);
+ }
+
+ #[test]
+ fn test_sqrt() {
+ check(99u32, 2);
+ check(100u32, 2);
+ check(120u32, 2);
+ }
+
+ #[test]
+ fn test_cbrt() {
+ check(8u32, 3);
+ check(26u32, 3);
+ }
+
+ #[test]
+ fn test_nth_root() {
+ check(0u32, 1);
+ check(10u32, 1);
+ check(100u32, 4);
+ }
+
+ #[test]
+ #[should_panic]
+ fn test_nth_root_n_is_zero() {
+ check(4u32, 0);
+ }
+
+ #[test]
+ fn test_nth_root_big() {
+ let x = BigUint::from(123_456_789_u32);
+ let expected = BigUint::from(6u32);
+
+ assert_eq!(x.nth_root(10), expected);
+ check(x, 10);
+ }
+
+ #[test]
+ fn test_nth_root_googol() {
+ let googol = BigUint::from(10u32).pow(100u32);
+
+ // perfect divisors of 100
+ for &n in &[2, 4, 5, 10, 20, 25, 50, 100] {
+ let expected = BigUint::from(10u32).pow(100u32 / n);
+ assert_eq!(googol.nth_root(n), expected);
+ check(googol.clone(), n);
+ }
+ }
+
+ #[test]
+ fn test_nth_root_twos() {
+ const EXP: u32 = 12;
+ const LOG2: usize = 1 << EXP;
+ let x = BigUint::one() << LOG2;
+
+ // the perfect divisors are just powers of two
+ for exp in 1..=EXP {
+ let n = 2u32.pow(exp);
+ let expected = BigUint::one() << (LOG2 / n as usize);
+ assert_eq!(x.nth_root(n), expected);
+ check(x.clone(), n);
+ }
+
+ // degenerate cases should return quickly
+ assert!(x.nth_root(x.bits() as u32).is_one());
+ assert!(x.nth_root(i32::MAX as u32).is_one());
+ assert!(x.nth_root(u32::MAX).is_one());
+ }
+
+ #[test]
+ fn test_roots_rand1() {
+ // A random input that found regressions
+ let s = "575981506858479247661989091587544744717244516135539456183849\
+ 986593934723426343633698413178771587697273822147578889823552\
+ 182702908597782734558103025298880194023243541613924361007059\
+ 353344183590348785832467726433749431093350684849462759540710\
+ 026019022227591412417064179299354183441181373862905039254106\
+ 4781867";
+ let x: BigUint = s.parse().unwrap();
+
+ check(x.clone(), 2);
+ check(x.clone(), 3);
+ check(x.clone(), 10);
+ check(x, 100);
+ }
+}
+
+mod bigint {
+ use num_bigint::BigInt;
+ use num_traits::Signed;
+
+ fn check(x: i64, n: u32) {
+ let big_x = BigInt::from(x);
+ let res = big_x.nth_root(n);
+
+ if n == 2 {
+ assert_eq!(&res, &big_x.sqrt())
+ } else if n == 3 {
+ assert_eq!(&res, &big_x.cbrt())
+ }
+
+ if big_x.is_negative() {
+ assert!(res.pow(n) >= big_x);
+ assert!((res - 1u32).pow(n) < big_x);
+ } else {
+ assert!(res.pow(n) <= big_x);
+ assert!((res + 1u32).pow(n) > big_x);
+ }
+ }
+
+ #[test]
+ fn test_nth_root() {
+ check(-100, 3);
+ }
+
+ #[test]
+ #[should_panic]
+ fn test_nth_root_x_neg_n_even() {
+ check(-100, 4);
+ }
+
+ #[test]
+ #[should_panic]
+ fn test_sqrt_x_neg() {
+ check(-4, 2);
+ }
+
+ #[test]
+ fn test_cbrt() {
+ check(8, 3);
+ check(-8, 3);
+ }
+}
diff --git a/rust/vendor/num-complex/.cargo-checksum.json b/rust/vendor/num-complex/.cargo-checksum.json
new file mode 100644
index 0000000..b570343
--- /dev/null
+++ b/rust/vendor/num-complex/.cargo-checksum.json
@@ -0,0 +1 @@
+{"files":{"Cargo.toml":"8cf6fb85b2079ec956d3f68462408742e73b936addeec8e41d9b74751f78762d","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"6485b8ed310d3f0340bf1ad1f47645069ce4069dcc6bb46c7d5c6faf41de1fdb","README.md":"f0b140084775e2375b360eb112879be8e4af84219de2418ea192dac0a44e496d","RELEASES.md":"de3b50aa25c31813e4c8f6d26a98927ea8ebca37cfd750b702e4f53f25a2e56a","build.rs":"aba9dbc29eff865d95ce39cfe7cb20fde6137c7b7fae441d1b52ebb5087e402f","src/cast.rs":"dc674642a5a5cd74370dc8f400a5db1698bcb655796d4d5267dfd3582ad20023","src/crand.rs":"07c6dbb07e0d93200c43a75c8ce0ebd22e99b6a4d728ec8e00441414be7e2321","src/lib.rs":"8ab88e7253b6fccdb6717b7d2fb4fce21d02ca9d68b6c6841ac55b763bff906d","src/pow.rs":"c74f6cb40fe05c41fcdda0d684ead945f5799d1a94a7d13c645a003b76710d97"},"package":"b6b19411a9719e753aff12e5187b74d60d3dc449ec3f4dc21e3989c3f554bc95"} \ No newline at end of file
diff --git a/rust/vendor/num-complex/Cargo.toml b/rust/vendor/num-complex/Cargo.toml
new file mode 100644
index 0000000..595ef47
--- /dev/null
+++ b/rust/vendor/num-complex/Cargo.toml
@@ -0,0 +1,48 @@
+# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO
+#
+# When uploading crates to the registry Cargo will automatically
+# "normalize" Cargo.toml files for maximal compatibility
+# with all versions of Cargo and also rewrite `path` dependencies
+# to registry (e.g., crates.io) dependencies
+#
+# If you believe there's an error in this file please file an
+# issue against the rust-lang/cargo repository. If you're
+# editing this file be aware that the upstream Cargo.toml
+# will likely look very different (and much more reasonable)
+
+[package]
+name = "num-complex"
+version = "0.2.4"
+authors = ["The Rust Project Developers"]
+build = "build.rs"
+exclude = ["/ci/*", "/.travis.yml", "/bors.toml"]
+description = "Complex numbers implementation for Rust"
+homepage = "https://github.com/rust-num/num-complex"
+documentation = "https://docs.rs/num-complex"
+readme = "README.md"
+keywords = ["mathematics", "numerics"]
+categories = ["algorithms", "data-structures", "science", "no-std"]
+license = "MIT/Apache-2.0"
+repository = "https://github.com/rust-num/num-complex"
+[package.metadata.docs.rs]
+features = ["std", "serde", "rand"]
+[dependencies.num-traits]
+version = "0.2.11"
+default-features = false
+
+[dependencies.rand]
+version = "0.5"
+optional = true
+default-features = false
+
+[dependencies.serde]
+version = "1.0"
+optional = true
+default-features = false
+[build-dependencies.autocfg]
+version = "1"
+
+[features]
+default = ["std"]
+i128 = ["num-traits/i128"]
+std = ["num-traits/std"]
diff --git a/rust/vendor/num-complex/LICENSE-APACHE b/rust/vendor/num-complex/LICENSE-APACHE
new file mode 100644
index 0000000..16fe87b
--- /dev/null
+++ b/rust/vendor/num-complex/LICENSE-APACHE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+END OF TERMS AND CONDITIONS
+
+APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+Copyright [yyyy] [name of copyright owner]
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
diff --git a/rust/vendor/num-complex/LICENSE-MIT b/rust/vendor/num-complex/LICENSE-MIT
new file mode 100644
index 0000000..39d4bdb
--- /dev/null
+++ b/rust/vendor/num-complex/LICENSE-MIT
@@ -0,0 +1,25 @@
+Copyright (c) 2014 The Rust Project Developers
+
+Permission is hereby granted, free of charge, to any
+person obtaining a copy of this software and associated
+documentation files (the "Software"), to deal in the
+Software without restriction, including without
+limitation the rights to use, copy, modify, merge,
+publish, distribute, sublicense, and/or sell copies of
+the Software, and to permit persons to whom the Software
+is furnished to do so, subject to the following
+conditions:
+
+The above copyright notice and this permission notice
+shall be included in all copies or substantial portions
+of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
+ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
+TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
+PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
+IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+DEALINGS IN THE SOFTWARE.
diff --git a/rust/vendor/num-complex/README.md b/rust/vendor/num-complex/README.md
new file mode 100644
index 0000000..0536a2b
--- /dev/null
+++ b/rust/vendor/num-complex/README.md
@@ -0,0 +1,50 @@
+# num-complex
+
+[![crate](https://img.shields.io/crates/v/num-complex.svg)](https://crates.io/crates/num-complex)
+[![documentation](https://docs.rs/num-complex/badge.svg)](https://docs.rs/num-complex)
+![minimum rustc 1.15](https://img.shields.io/badge/rustc-1.15+-red.svg)
+[![Travis status](https://travis-ci.org/rust-num/num-complex.svg?branch=master)](https://travis-ci.org/rust-num/num-complex)
+
+`Complex` numbers for Rust.
+
+## Usage
+
+Add this to your `Cargo.toml`:
+
+```toml
+[dependencies]
+num-complex = "0.2"
+```
+
+and this to your crate root:
+
+```rust
+extern crate num_complex;
+```
+
+## Features
+
+This crate can be used without the standard library (`#![no_std]`) by disabling
+the default `std` feature. Use this in `Cargo.toml`:
+
+```toml
+[dependencies.num-complex]
+version = "0.2"
+default-features = false
+```
+
+Features based on `Float` types are only available when `std` is enabled. Where
+possible, `FloatCore` is used instead. Formatting complex numbers only supports
+format width when `std` is enabled.
+
+Implementations for `i128` and `u128` are only available with Rust 1.26 and
+later. The build script automatically detects this, but you can make it
+mandatory by enabling the `i128` crate feature.
+
+## Releases
+
+Release notes are available in [RELEASES.md](RELEASES.md).
+
+## Compatibility
+
+The `num-complex` crate is tested for rustc 1.15 and greater.
diff --git a/rust/vendor/num-complex/RELEASES.md b/rust/vendor/num-complex/RELEASES.md
new file mode 100644
index 0000000..5163276
--- /dev/null
+++ b/rust/vendor/num-complex/RELEASES.md
@@ -0,0 +1,103 @@
+# Release 0.2.4 (2020-01-09)
+
+- [`Complex::new` is now a `const fn` for Rust 1.31 and later][63].
+- [Updated the `autocfg` build dependency to 1.0][68].
+
+**Contributors**: @burrbull, @cuviper, @dingelish
+
+[63]: https://github.com/rust-num/num-complex/pull/63
+[68]: https://github.com/rust-num/num-complex/pull/68
+
+# Release 0.2.3 (2019-06-11)
+
+- [`Complex::sqrt()` is now more accurate for negative reals][60].
+- [`Complex::cbrt()` computes the principal cube root][61].
+
+**Contributors**: @cuviper
+
+[60]: https://github.com/rust-num/num-complex/pull/60
+[61]: https://github.com/rust-num/num-complex/pull/61
+
+# Release 0.2.2 (2019-06-10)
+
+- [`Complex::l1_norm()` computes the Manhattan distance from the origin][43].
+- [`Complex::fdiv()` and `finv()` use floating-point for inversion][41], which
+ may avoid overflows for some inputs, at the cost of trigonometric rounding.
+- [`Complex` now implements `num_traits::MulAdd` and `MulAddAssign`][44].
+- [`Complex` now implements `Zero::set_zero` and `One::set_one`][57].
+- [`Complex` now implements `num_traits::Pow` and adds `powi` and `powu`][56].
+
+**Contributors**: @adamnemecek, @cuviper, @ignatenkobrain, @Schultzer
+
+[41]: https://github.com/rust-num/num-complex/pull/41
+[43]: https://github.com/rust-num/num-complex/pull/43
+[44]: https://github.com/rust-num/num-complex/pull/44
+[56]: https://github.com/rust-num/num-complex/pull/56
+[57]: https://github.com/rust-num/num-complex/pull/57
+
+# Release 0.2.1 (2018-10-08)
+
+- [`Complex` now implements `ToPrimitive`, `FromPrimitive`, `AsPrimitive`, and `NumCast`][33].
+
+**Contributors**: @cuviper, @termoshtt
+
+[33]: https://github.com/rust-num/num-complex/pull/33
+
+# Release 0.2.0 (2018-05-24)
+
+### Enhancements
+
+- [`Complex` now implements `num_traits::Inv` and `One::is_one`][17].
+- [`Complex` now implements `Sum` and `Product`][11].
+- [`Complex` now supports `i128` and `u128` components][27] with Rust 1.26+.
+- [`Complex` now optionally supports `rand` 0.5][28], implementing the
+ `Standard` distribution and [a generic `ComplexDistribution`][30].
+- [`Rem` with a scalar divisor now avoids `norm_sqr` overflow][25].
+
+### Breaking Changes
+
+- [`num-complex` now requires rustc 1.15 or greater][16].
+- [There is now a `std` feature][22], enabled by default, along with the
+ implication that building *without* this feature makes this a `#![no_std]`
+ crate. A few methods now require `FloatCore`, and the remaining methods
+ based on `Float` are only supported with `std`.
+- [The `serde` dependency has been updated to 1.0][7], and `rustc-serialize`
+ is no longer supported by `num-complex`.
+
+**Contributors**: @clarcharr, @cuviper, @shingtaklam1324, @termoshtt
+
+[7]: https://github.com/rust-num/num-complex/pull/7
+[11]: https://github.com/rust-num/num-complex/pull/11
+[16]: https://github.com/rust-num/num-complex/pull/16
+[17]: https://github.com/rust-num/num-complex/pull/17
+[22]: https://github.com/rust-num/num-complex/pull/22
+[25]: https://github.com/rust-num/num-complex/pull/25
+[27]: https://github.com/rust-num/num-complex/pull/27
+[28]: https://github.com/rust-num/num-complex/pull/28
+[30]: https://github.com/rust-num/num-complex/pull/30
+
+
+# Release 0.1.43 (2018-03-08)
+
+- [Fix a usage typo in README.md][20].
+
+**Contributors**: @shingtaklam1324
+
+[20]: https://github.com/rust-num/num-complex/pull/20
+
+
+# Release 0.1.42 (2018-02-07)
+
+- [num-complex now has its own source repository][num-356] at [rust-num/num-complex][home].
+
+**Contributors**: @cuviper
+
+[home]: https://github.com/rust-num/num-complex
+[num-356]: https://github.com/rust-num/num/pull/356
+
+
+# Prior releases
+
+No prior release notes were kept. Thanks all the same to the many
+contributors that have made this crate what it is!
+
diff --git a/rust/vendor/num-complex/build.rs b/rust/vendor/num-complex/build.rs
new file mode 100644
index 0000000..85e88b7
--- /dev/null
+++ b/rust/vendor/num-complex/build.rs
@@ -0,0 +1,20 @@
+extern crate autocfg;
+
+use std::env;
+
+fn main() {
+ let ac = autocfg::new();
+
+ if ac.probe_type("i128") {
+ println!("cargo:rustc-cfg=has_i128");
+ } else if env::var_os("CARGO_FEATURE_I128").is_some() {
+ panic!("i128 support was not detected!");
+ }
+
+ // autocfg doesn't have a direct way to probe for `const fn` yet.
+ if ac.probe_rustc_version(1, 31) {
+ autocfg::emit("has_const_fn");
+ }
+
+ autocfg::rerun_path("build.rs");
+}
diff --git a/rust/vendor/num-complex/src/cast.rs b/rust/vendor/num-complex/src/cast.rs
new file mode 100644
index 0000000..ace981d
--- /dev/null
+++ b/rust/vendor/num-complex/src/cast.rs
@@ -0,0 +1,119 @@
+use super::Complex;
+use traits::{AsPrimitive, FromPrimitive, Num, NumCast, ToPrimitive};
+
+macro_rules! impl_to_primitive {
+ ($ty:ty, $to:ident) => {
+ #[inline]
+ fn $to(&self) -> Option<$ty> {
+ if self.im.is_zero() { self.re.$to() } else { None }
+ }
+ }
+} // impl_to_primitive
+
+// Returns None if Complex part is non-zero
+impl<T: ToPrimitive + Num> ToPrimitive for Complex<T> {
+ impl_to_primitive!(usize, to_usize);
+ impl_to_primitive!(isize, to_isize);
+ impl_to_primitive!(u8, to_u8);
+ impl_to_primitive!(u16, to_u16);
+ impl_to_primitive!(u32, to_u32);
+ impl_to_primitive!(u64, to_u64);
+ impl_to_primitive!(i8, to_i8);
+ impl_to_primitive!(i16, to_i16);
+ impl_to_primitive!(i32, to_i32);
+ impl_to_primitive!(i64, to_i64);
+ #[cfg(has_i128)]
+ impl_to_primitive!(u128, to_u128);
+ #[cfg(has_i128)]
+ impl_to_primitive!(i128, to_i128);
+ impl_to_primitive!(f32, to_f32);
+ impl_to_primitive!(f64, to_f64);
+}
+
+macro_rules! impl_from_primitive {
+ ($ty:ty, $from_xx:ident) => {
+ #[inline]
+ fn $from_xx(n: $ty) -> Option<Self> {
+ T::$from_xx(n).map(|re| Complex {
+ re: re,
+ im: T::zero(),
+ })
+ }
+ };
+} // impl_from_primitive
+
+impl<T: FromPrimitive + Num> FromPrimitive for Complex<T> {
+ impl_from_primitive!(usize, from_usize);
+ impl_from_primitive!(isize, from_isize);
+ impl_from_primitive!(u8, from_u8);
+ impl_from_primitive!(u16, from_u16);
+ impl_from_primitive!(u32, from_u32);
+ impl_from_primitive!(u64, from_u64);
+ impl_from_primitive!(i8, from_i8);
+ impl_from_primitive!(i16, from_i16);
+ impl_from_primitive!(i32, from_i32);
+ impl_from_primitive!(i64, from_i64);
+ #[cfg(has_i128)]
+ impl_from_primitive!(u128, from_u128);
+ #[cfg(has_i128)]
+ impl_from_primitive!(i128, from_i128);
+ impl_from_primitive!(f32, from_f32);
+ impl_from_primitive!(f64, from_f64);
+}
+
+impl<T: NumCast + Num> NumCast for Complex<T> {
+ fn from<U: ToPrimitive>(n: U) -> Option<Self> {
+ T::from(n).map(|re| Complex {
+ re: re,
+ im: T::zero(),
+ })
+ }
+}
+
+impl<T, U> AsPrimitive<U> for Complex<T>
+where
+ T: AsPrimitive<U>,
+ U: 'static + Copy,
+{
+ fn as_(self) -> U {
+ self.re.as_()
+ }
+}
+
+#[cfg(test)]
+mod test {
+ use super::*;
+
+ #[test]
+ fn test_to_primitive() {
+ let a: Complex<u32> = Complex { re: 3, im: 0 };
+ assert_eq!(a.to_i32(), Some(3_i32));
+ let b: Complex<u32> = Complex { re: 3, im: 1 };
+ assert_eq!(b.to_i32(), None);
+ let x: Complex<f32> = Complex { re: 1.0, im: 0.1 };
+ assert_eq!(x.to_f32(), None);
+ let y: Complex<f32> = Complex { re: 1.0, im: 0.0 };
+ assert_eq!(y.to_f32(), Some(1.0));
+ let z: Complex<f32> = Complex { re: 1.0, im: 0.0 };
+ assert_eq!(z.to_i32(), Some(1));
+ }
+
+ #[test]
+ fn test_from_primitive() {
+ let a: Complex<f32> = FromPrimitive::from_i32(2).unwrap();
+ assert_eq!(a, Complex { re: 2.0, im: 0.0 });
+ }
+
+ #[test]
+ fn test_num_cast() {
+ let a: Complex<f32> = NumCast::from(2_i32).unwrap();
+ assert_eq!(a, Complex { re: 2.0, im: 0.0 });
+ }
+
+ #[test]
+ fn test_as_primitive() {
+ let a: Complex<f32> = Complex { re: 2.0, im: 0.2 };
+ let a_: i32 = a.as_();
+ assert_eq!(a_, 2_i32);
+ }
+}
diff --git a/rust/vendor/num-complex/src/crand.rs b/rust/vendor/num-complex/src/crand.rs
new file mode 100644
index 0000000..9e43974
--- /dev/null
+++ b/rust/vendor/num-complex/src/crand.rs
@@ -0,0 +1,115 @@
+//! Rand implementations for complex numbers
+
+use rand::distributions::Standard;
+use rand::prelude::*;
+use traits::Num;
+use Complex;
+
+impl<T> Distribution<Complex<T>> for Standard
+where
+ T: Num + Clone,
+ Standard: Distribution<T>,
+{
+ fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> Complex<T> {
+ Complex::new(self.sample(rng), self.sample(rng))
+ }
+}
+
+/// A generic random value distribution for complex numbers.
+#[derive(Clone, Copy, Debug)]
+pub struct ComplexDistribution<Re, Im = Re> {
+ re: Re,
+ im: Im,
+}
+
+impl<Re, Im> ComplexDistribution<Re, Im> {
+ /// Creates a complex distribution from independent
+ /// distributions of the real and imaginary parts.
+ pub fn new(re: Re, im: Im) -> Self {
+ ComplexDistribution { re, im }
+ }
+}
+
+impl<T, Re, Im> Distribution<Complex<T>> for ComplexDistribution<Re, Im>
+where
+ T: Num + Clone,
+ Re: Distribution<T>,
+ Im: Distribution<T>,
+{
+ fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> Complex<T> {
+ Complex::new(self.re.sample(rng), self.im.sample(rng))
+ }
+}
+
+#[cfg(test)]
+fn test_rng() -> SmallRng {
+ SmallRng::from_seed([42; 16])
+}
+
+#[test]
+fn standard_f64() {
+ let mut rng = test_rng();
+ for _ in 0..100 {
+ let c: Complex<f64> = rng.gen();
+ assert!(c.re >= 0.0 && c.re < 1.0);
+ assert!(c.im >= 0.0 && c.im < 1.0);
+ }
+}
+
+#[test]
+fn generic_standard_f64() {
+ let mut rng = test_rng();
+ let dist = ComplexDistribution::new(Standard, Standard);
+ for _ in 0..100 {
+ let c: Complex<f64> = rng.sample(&dist);
+ assert!(c.re >= 0.0 && c.re < 1.0);
+ assert!(c.im >= 0.0 && c.im < 1.0);
+ }
+}
+
+#[test]
+fn generic_uniform_f64() {
+ use rand::distributions::Uniform;
+
+ let mut rng = test_rng();
+ let re = Uniform::new(-100.0, 0.0);
+ let im = Uniform::new(0.0, 100.0);
+ let dist = ComplexDistribution::new(re, im);
+ for _ in 0..100 {
+ // no type annotation required, since `Uniform` only produces one type.
+ let c = rng.sample(&dist);
+ assert!(c.re >= -100.0 && c.re < 0.0);
+ assert!(c.im >= 0.0 && c.im < 100.0);
+ }
+}
+
+#[test]
+fn generic_mixed_f64() {
+ use rand::distributions::Uniform;
+
+ let mut rng = test_rng();
+ let re = Uniform::new(-100.0, 0.0);
+ let dist = ComplexDistribution::new(re, Standard);
+ for _ in 0..100 {
+ // no type annotation required, since `Uniform` only produces one type.
+ let c = rng.sample(&dist);
+ assert!(c.re >= -100.0 && c.re < 0.0);
+ assert!(c.im >= 0.0 && c.im < 1.0);
+ }
+}
+
+#[test]
+fn generic_uniform_i32() {
+ use rand::distributions::Uniform;
+
+ let mut rng = test_rng();
+ let re = Uniform::new(-100, 0);
+ let im = Uniform::new(0, 100);
+ let dist = ComplexDistribution::new(re, im);
+ for _ in 0..100 {
+ // no type annotation required, since `Uniform` only produces one type.
+ let c = rng.sample(&dist);
+ assert!(c.re >= -100 && c.re < 0);
+ assert!(c.im >= 0 && c.im < 100);
+ }
+}
diff --git a/rust/vendor/num-complex/src/lib.rs b/rust/vendor/num-complex/src/lib.rs
new file mode 100644
index 0000000..46dc2e8
--- /dev/null
+++ b/rust/vendor/num-complex/src/lib.rs
@@ -0,0 +1,2663 @@
+// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+//! Complex numbers.
+//!
+//! ## Compatibility
+//!
+//! The `num-complex` crate is tested for rustc 1.15 and greater.
+
+#![doc(html_root_url = "https://docs.rs/num-complex/0.2")]
+#![no_std]
+
+#[cfg(any(test, feature = "std"))]
+#[cfg_attr(test, macro_use)]
+extern crate std;
+
+extern crate num_traits as traits;
+
+#[cfg(feature = "serde")]
+extern crate serde;
+
+#[cfg(feature = "rand")]
+extern crate rand;
+
+use core::fmt;
+#[cfg(test)]
+use core::hash;
+use core::iter::{Product, Sum};
+use core::ops::{Add, Div, Mul, Neg, Rem, Sub};
+use core::str::FromStr;
+#[cfg(feature = "std")]
+use std::error::Error;
+
+use traits::{Inv, MulAdd, Num, One, Pow, Signed, Zero};
+
+#[cfg(feature = "std")]
+use traits::float::Float;
+use traits::float::FloatCore;
+
+mod cast;
+mod pow;
+
+#[cfg(feature = "rand")]
+mod crand;
+#[cfg(feature = "rand")]
+pub use crand::ComplexDistribution;
+
+// FIXME #1284: handle complex NaN & infinity etc. This
+// probably doesn't map to C's _Complex correctly.
+
+/// A complex number in Cartesian form.
+///
+/// ## Representation and Foreign Function Interface Compatibility
+///
+/// `Complex<T>` is memory layout compatible with an array `[T; 2]`.
+///
+/// Note that `Complex<F>` where F is a floating point type is **only** memory
+/// layout compatible with C's complex types, **not** necessarily calling
+/// convention compatible. This means that for FFI you can only pass
+/// `Complex<F>` behind a pointer, not as a value.
+///
+/// ## Examples
+///
+/// Example of extern function declaration.
+///
+/// ```
+/// use num_complex::Complex;
+/// use std::os::raw::c_int;
+///
+/// extern "C" {
+/// fn zaxpy_(n: *const c_int, alpha: *const Complex<f64>,
+/// x: *const Complex<f64>, incx: *const c_int,
+/// y: *mut Complex<f64>, incy: *const c_int);
+/// }
+/// ```
+#[derive(PartialEq, Eq, Copy, Clone, Hash, Debug, Default)]
+#[repr(C)]
+pub struct Complex<T> {
+ /// Real portion of the complex number
+ pub re: T,
+ /// Imaginary portion of the complex number
+ pub im: T,
+}
+
+pub type Complex32 = Complex<f32>;
+pub type Complex64 = Complex<f64>;
+
+impl<T> Complex<T> {
+ #[cfg(has_const_fn)]
+ /// Create a new Complex
+ #[inline]
+ pub const fn new(re: T, im: T) -> Self {
+ Complex { re: re, im: im }
+ }
+
+ #[cfg(not(has_const_fn))]
+ /// Create a new Complex
+ #[inline]
+ pub fn new(re: T, im: T) -> Self {
+ Complex { re: re, im: im }
+ }
+}
+
+impl<T: Clone + Num> Complex<T> {
+ /// Returns imaginary unit
+ #[inline]
+ pub fn i() -> Self {
+ Self::new(T::zero(), T::one())
+ }
+
+ /// Returns the square of the norm (since `T` doesn't necessarily
+ /// have a sqrt function), i.e. `re^2 + im^2`.
+ #[inline]
+ pub fn norm_sqr(&self) -> T {
+ self.re.clone() * self.re.clone() + self.im.clone() * self.im.clone()
+ }
+
+ /// Multiplies `self` by the scalar `t`.
+ #[inline]
+ pub fn scale(&self, t: T) -> Self {
+ Self::new(self.re.clone() * t.clone(), self.im.clone() * t)
+ }
+
+ /// Divides `self` by the scalar `t`.
+ #[inline]
+ pub fn unscale(&self, t: T) -> Self {
+ Self::new(self.re.clone() / t.clone(), self.im.clone() / t)
+ }
+
+ /// Raises `self` to an unsigned integer power.
+ #[inline]
+ pub fn powu(&self, exp: u32) -> Self {
+ Pow::pow(self, exp)
+ }
+}
+
+impl<T: Clone + Num + Neg<Output = T>> Complex<T> {
+ /// Returns the complex conjugate. i.e. `re - i im`
+ #[inline]
+ pub fn conj(&self) -> Self {
+ Self::new(self.re.clone(), -self.im.clone())
+ }
+
+ /// Returns `1/self`
+ #[inline]
+ pub fn inv(&self) -> Self {
+ let norm_sqr = self.norm_sqr();
+ Self::new(
+ self.re.clone() / norm_sqr.clone(),
+ -self.im.clone() / norm_sqr,
+ )
+ }
+
+ /// Raises `self` to a signed integer power.
+ #[inline]
+ pub fn powi(&self, exp: i32) -> Self {
+ Pow::pow(self, exp)
+ }
+}
+
+impl<T: Clone + Signed> Complex<T> {
+ /// Returns the L1 norm `|re| + |im|` -- the [Manhattan distance] from the origin.
+ ///
+ /// [Manhattan distance]: https://en.wikipedia.org/wiki/Taxicab_geometry
+ #[inline]
+ pub fn l1_norm(&self) -> T {
+ self.re.abs() + self.im.abs()
+ }
+}
+
+#[cfg(feature = "std")]
+impl<T: Clone + Float> Complex<T> {
+ /// Calculate |self|
+ #[inline]
+ pub fn norm(&self) -> T {
+ self.re.hypot(self.im)
+ }
+ /// Calculate the principal Arg of self.
+ #[inline]
+ pub fn arg(&self) -> T {
+ self.im.atan2(self.re)
+ }
+ /// Convert to polar form (r, theta), such that
+ /// `self = r * exp(i * theta)`
+ #[inline]
+ pub fn to_polar(&self) -> (T, T) {
+ (self.norm(), self.arg())
+ }
+ /// Convert a polar representation into a complex number.
+ #[inline]
+ pub fn from_polar(r: &T, theta: &T) -> Self {
+ Self::new(*r * theta.cos(), *r * theta.sin())
+ }
+
+ /// Computes `e^(self)`, where `e` is the base of the natural logarithm.
+ #[inline]
+ pub fn exp(&self) -> Self {
+ // formula: e^(a + bi) = e^a (cos(b) + i*sin(b))
+ // = from_polar(e^a, b)
+ Self::from_polar(&self.re.exp(), &self.im)
+ }
+
+ /// Computes the principal value of natural logarithm of `self`.
+ ///
+ /// This function has one branch cut:
+ ///
+ /// * `(-∞, 0]`, continuous from above.
+ ///
+ /// The branch satisfies `-π ≤ arg(ln(z)) ≤ π`.
+ #[inline]
+ pub fn ln(&self) -> Self {
+ // formula: ln(z) = ln|z| + i*arg(z)
+ let (r, theta) = self.to_polar();
+ Self::new(r.ln(), theta)
+ }
+
+ /// Computes the principal value of the square root of `self`.
+ ///
+ /// This function has one branch cut:
+ ///
+ /// * `(-∞, 0)`, continuous from above.
+ ///
+ /// The branch satisfies `-π/2 ≤ arg(sqrt(z)) ≤ π/2`.
+ #[inline]
+ pub fn sqrt(&self) -> Self {
+ if self.im.is_zero() {
+ if self.re.is_sign_positive() {
+ // simple positive real √r, and copy `im` for its sign
+ Self::new(self.re.sqrt(), self.im)
+ } else {
+ // √(r e^(iπ)) = √r e^(iπ/2) = i√r
+ // √(r e^(-iπ)) = √r e^(-iπ/2) = -i√r
+ let re = T::zero();
+ let im = (-self.re).sqrt();
+ if self.im.is_sign_positive() {
+ Self::new(re, im)
+ } else {
+ Self::new(re, -im)
+ }
+ }
+ } else if self.re.is_zero() {
+ // √(r e^(iπ/2)) = √r e^(iπ/4) = √(r/2) + i√(r/2)
+ // √(r e^(-iπ/2)) = √r e^(-iπ/4) = √(r/2) - i√(r/2)
+ let one = T::one();
+ let two = one + one;
+ let x = (self.im.abs() / two).sqrt();
+ if self.im.is_sign_positive() {
+ Self::new(x, x)
+ } else {
+ Self::new(x, -x)
+ }
+ } else {
+ // formula: sqrt(r e^(it)) = sqrt(r) e^(it/2)
+ let one = T::one();
+ let two = one + one;
+ let (r, theta) = self.to_polar();
+ Self::from_polar(&(r.sqrt()), &(theta / two))
+ }
+ }
+
+ /// Computes the principal value of the cube root of `self`.
+ ///
+ /// This function has one branch cut:
+ ///
+ /// * `(-∞, 0)`, continuous from above.
+ ///
+ /// The branch satisfies `-π/3 ≤ arg(cbrt(z)) ≤ π/3`.
+ ///
+ /// Note that this does not match the usual result for the cube root of
+ /// negative real numbers. For example, the real cube root of `-8` is `-2`,
+ /// but the principal complex cube root of `-8` is `1 + i√3`.
+ #[inline]
+ pub fn cbrt(&self) -> Self {
+ if self.im.is_zero() {
+ if self.re.is_sign_positive() {
+ // simple positive real ∛r, and copy `im` for its sign
+ Self::new(self.re.cbrt(), self.im)
+ } else {
+ // ∛(r e^(iπ)) = ∛r e^(iπ/3) = ∛r/2 + i∛r√3/2
+ // ∛(r e^(-iπ)) = ∛r e^(-iπ/3) = ∛r/2 - i∛r√3/2
+ let one = T::one();
+ let two = one + one;
+ let three = two + one;
+ let re = (-self.re).cbrt() / two;
+ let im = three.sqrt() * re;
+ if self.im.is_sign_positive() {
+ Self::new(re, im)
+ } else {
+ Self::new(re, -im)
+ }
+ }
+ } else if self.re.is_zero() {
+ // ∛(r e^(iπ/2)) = ∛r e^(iπ/6) = ∛r√3/2 + i∛r/2
+ // ∛(r e^(-iπ/2)) = ∛r e^(-iπ/6) = ∛r√3/2 - i∛r/2
+ let one = T::one();
+ let two = one + one;
+ let three = two + one;
+ let im = self.im.abs().cbrt() / two;
+ let re = three.sqrt() * im;
+ if self.im.is_sign_positive() {
+ Self::new(re, im)
+ } else {
+ Self::new(re, -im)
+ }
+ } else {
+ // formula: cbrt(r e^(it)) = cbrt(r) e^(it/3)
+ let one = T::one();
+ let three = one + one + one;
+ let (r, theta) = self.to_polar();
+ Self::from_polar(&(r.cbrt()), &(theta / three))
+ }
+ }
+
+ /// Raises `self` to a floating point power.
+ #[inline]
+ pub fn powf(&self, exp: T) -> Self {
+ // formula: x^y = (ρ e^(i θ))^y = ρ^y e^(i θ y)
+ // = from_polar(ρ^y, θ y)
+ let (r, theta) = self.to_polar();
+ Self::from_polar(&r.powf(exp), &(theta * exp))
+ }
+
+ /// Returns the logarithm of `self` with respect to an arbitrary base.
+ #[inline]
+ pub fn log(&self, base: T) -> Self {
+ // formula: log_y(x) = log_y(ρ e^(i θ))
+ // = log_y(ρ) + log_y(e^(i θ)) = log_y(ρ) + ln(e^(i θ)) / ln(y)
+ // = log_y(ρ) + i θ / ln(y)
+ let (r, theta) = self.to_polar();
+ Self::new(r.log(base), theta / base.ln())
+ }
+
+ /// Raises `self` to a complex power.
+ #[inline]
+ pub fn powc(&self, exp: Self) -> Self {
+ // formula: x^y = (a + i b)^(c + i d)
+ // = (ρ e^(i θ))^c (ρ e^(i θ))^(i d)
+ // where ρ=|x| and θ=arg(x)
+ // = ρ^c e^(−d θ) e^(i c θ) ρ^(i d)
+ // = p^c e^(−d θ) (cos(c θ)
+ // + i sin(c θ)) (cos(d ln(ρ)) + i sin(d ln(ρ)))
+ // = p^c e^(−d θ) (
+ // cos(c θ) cos(d ln(ρ)) − sin(c θ) sin(d ln(ρ))
+ // + i(cos(c θ) sin(d ln(ρ)) + sin(c θ) cos(d ln(ρ))))
+ // = p^c e^(−d θ) (cos(c θ + d ln(ρ)) + i sin(c θ + d ln(ρ)))
+ // = from_polar(p^c e^(−d θ), c θ + d ln(ρ))
+ let (r, theta) = self.to_polar();
+ Self::from_polar(
+ &(r.powf(exp.re) * (-exp.im * theta).exp()),
+ &(exp.re * theta + exp.im * r.ln()),
+ )
+ }
+
+ /// Raises a floating point number to the complex power `self`.
+ #[inline]
+ pub fn expf(&self, base: T) -> Self {
+ // formula: x^(a+bi) = x^a x^bi = x^a e^(b ln(x) i)
+ // = from_polar(x^a, b ln(x))
+ Self::from_polar(&base.powf(self.re), &(self.im * base.ln()))
+ }
+
+ /// Computes the sine of `self`.
+ #[inline]
+ pub fn sin(&self) -> Self {
+ // formula: sin(a + bi) = sin(a)cosh(b) + i*cos(a)sinh(b)
+ Self::new(
+ self.re.sin() * self.im.cosh(),
+ self.re.cos() * self.im.sinh(),
+ )
+ }
+
+ /// Computes the cosine of `self`.
+ #[inline]
+ pub fn cos(&self) -> Self {
+ // formula: cos(a + bi) = cos(a)cosh(b) - i*sin(a)sinh(b)
+ Self::new(
+ self.re.cos() * self.im.cosh(),
+ -self.re.sin() * self.im.sinh(),
+ )
+ }
+
+ /// Computes the tangent of `self`.
+ #[inline]
+ pub fn tan(&self) -> Self {
+ // formula: tan(a + bi) = (sin(2a) + i*sinh(2b))/(cos(2a) + cosh(2b))
+ let (two_re, two_im) = (self.re + self.re, self.im + self.im);
+ Self::new(two_re.sin(), two_im.sinh()).unscale(two_re.cos() + two_im.cosh())
+ }
+
+ /// Computes the principal value of the inverse sine of `self`.
+ ///
+ /// This function has two branch cuts:
+ ///
+ /// * `(-∞, -1)`, continuous from above.
+ /// * `(1, ∞)`, continuous from below.
+ ///
+ /// The branch satisfies `-π/2 ≤ Re(asin(z)) ≤ π/2`.
+ #[inline]
+ pub fn asin(&self) -> Self {
+ // formula: arcsin(z) = -i ln(sqrt(1-z^2) + iz)
+ let i = Self::i();
+ -i * ((Self::one() - self * self).sqrt() + i * self).ln()
+ }
+
+ /// Computes the principal value of the inverse cosine of `self`.
+ ///
+ /// This function has two branch cuts:
+ ///
+ /// * `(-∞, -1)`, continuous from above.
+ /// * `(1, ∞)`, continuous from below.
+ ///
+ /// The branch satisfies `0 ≤ Re(acos(z)) ≤ π`.
+ #[inline]
+ pub fn acos(&self) -> Self {
+ // formula: arccos(z) = -i ln(i sqrt(1-z^2) + z)
+ let i = Self::i();
+ -i * (i * (Self::one() - self * self).sqrt() + self).ln()
+ }
+
+ /// Computes the principal value of the inverse tangent of `self`.
+ ///
+ /// This function has two branch cuts:
+ ///
+ /// * `(-∞i, -i]`, continuous from the left.
+ /// * `[i, ∞i)`, continuous from the right.
+ ///
+ /// The branch satisfies `-π/2 ≤ Re(atan(z)) ≤ π/2`.
+ #[inline]
+ pub fn atan(&self) -> Self {
+ // formula: arctan(z) = (ln(1+iz) - ln(1-iz))/(2i)
+ let i = Self::i();
+ let one = Self::one();
+ let two = one + one;
+ if *self == i {
+ return Self::new(T::zero(), T::infinity());
+ } else if *self == -i {
+ return Self::new(T::zero(), -T::infinity());
+ }
+ ((one + i * self).ln() - (one - i * self).ln()) / (two * i)
+ }
+
+ /// Computes the hyperbolic sine of `self`.
+ #[inline]
+ pub fn sinh(&self) -> Self {
+ // formula: sinh(a + bi) = sinh(a)cos(b) + i*cosh(a)sin(b)
+ Self::new(
+ self.re.sinh() * self.im.cos(),
+ self.re.cosh() * self.im.sin(),
+ )
+ }
+
+ /// Computes the hyperbolic cosine of `self`.
+ #[inline]
+ pub fn cosh(&self) -> Self {
+ // formula: cosh(a + bi) = cosh(a)cos(b) + i*sinh(a)sin(b)
+ Self::new(
+ self.re.cosh() * self.im.cos(),
+ self.re.sinh() * self.im.sin(),
+ )
+ }
+
+ /// Computes the hyperbolic tangent of `self`.
+ #[inline]
+ pub fn tanh(&self) -> Self {
+ // formula: tanh(a + bi) = (sinh(2a) + i*sin(2b))/(cosh(2a) + cos(2b))
+ let (two_re, two_im) = (self.re + self.re, self.im + self.im);
+ Self::new(two_re.sinh(), two_im.sin()).unscale(two_re.cosh() + two_im.cos())
+ }
+
+ /// Computes the principal value of inverse hyperbolic sine of `self`.
+ ///
+ /// This function has two branch cuts:
+ ///
+ /// * `(-∞i, -i)`, continuous from the left.
+ /// * `(i, ∞i)`, continuous from the right.
+ ///
+ /// The branch satisfies `-π/2 ≤ Im(asinh(z)) ≤ π/2`.
+ #[inline]
+ pub fn asinh(&self) -> Self {
+ // formula: arcsinh(z) = ln(z + sqrt(1+z^2))
+ let one = Self::one();
+ (self + (one + self * self).sqrt()).ln()
+ }
+
+ /// Computes the principal value of inverse hyperbolic cosine of `self`.
+ ///
+ /// This function has one branch cut:
+ ///
+ /// * `(-∞, 1)`, continuous from above.
+ ///
+ /// The branch satisfies `-π ≤ Im(acosh(z)) ≤ π` and `0 ≤ Re(acosh(z)) < ∞`.
+ #[inline]
+ pub fn acosh(&self) -> Self {
+ // formula: arccosh(z) = 2 ln(sqrt((z+1)/2) + sqrt((z-1)/2))
+ let one = Self::one();
+ let two = one + one;
+ two * (((self + one) / two).sqrt() + ((self - one) / two).sqrt()).ln()
+ }
+
+ /// Computes the principal value of inverse hyperbolic tangent of `self`.
+ ///
+ /// This function has two branch cuts:
+ ///
+ /// * `(-∞, -1]`, continuous from above.
+ /// * `[1, ∞)`, continuous from below.
+ ///
+ /// The branch satisfies `-π/2 ≤ Im(atanh(z)) ≤ π/2`.
+ #[inline]
+ pub fn atanh(&self) -> Self {
+ // formula: arctanh(z) = (ln(1+z) - ln(1-z))/2
+ let one = Self::one();
+ let two = one + one;
+ if *self == one {
+ return Self::new(T::infinity(), T::zero());
+ } else if *self == -one {
+ return Self::new(-T::infinity(), T::zero());
+ }
+ ((one + self).ln() - (one - self).ln()) / two
+ }
+
+ /// Returns `1/self` using floating-point operations.
+ ///
+ /// This may be more accurate than the generic `self.inv()` in cases
+ /// where `self.norm_sqr()` would overflow to ∞ or underflow to 0.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use num_complex::Complex64;
+ /// let c = Complex64::new(1e300, 1e300);
+ ///
+ /// // The generic `inv()` will overflow.
+ /// assert!(!c.inv().is_normal());
+ ///
+ /// // But we can do better for `Float` types.
+ /// let inv = c.finv();
+ /// assert!(inv.is_normal());
+ /// println!("{:e}", inv);
+ ///
+ /// let expected = Complex64::new(5e-301, -5e-301);
+ /// assert!((inv - expected).norm() < 1e-315);
+ /// ```
+ #[inline]
+ pub fn finv(&self) -> Complex<T> {
+ let norm = self.norm();
+ self.conj() / norm / norm
+ }
+
+ /// Returns `self/other` using floating-point operations.
+ ///
+ /// This may be more accurate than the generic `Div` implementation in cases
+ /// where `other.norm_sqr()` would overflow to ∞ or underflow to 0.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use num_complex::Complex64;
+ /// let a = Complex64::new(2.0, 3.0);
+ /// let b = Complex64::new(1e300, 1e300);
+ ///
+ /// // Generic division will overflow.
+ /// assert!(!(a / b).is_normal());
+ ///
+ /// // But we can do better for `Float` types.
+ /// let quotient = a.fdiv(b);
+ /// assert!(quotient.is_normal());
+ /// println!("{:e}", quotient);
+ ///
+ /// let expected = Complex64::new(2.5e-300, 5e-301);
+ /// assert!((quotient - expected).norm() < 1e-315);
+ /// ```
+ #[inline]
+ pub fn fdiv(&self, other: Complex<T>) -> Complex<T> {
+ self * other.finv()
+ }
+}
+
+impl<T: Clone + FloatCore> Complex<T> {
+ /// Checks if the given complex number is NaN
+ #[inline]
+ pub fn is_nan(self) -> bool {
+ self.re.is_nan() || self.im.is_nan()
+ }
+
+ /// Checks if the given complex number is infinite
+ #[inline]
+ pub fn is_infinite(self) -> bool {
+ !self.is_nan() && (self.re.is_infinite() || self.im.is_infinite())
+ }
+
+ /// Checks if the given complex number is finite
+ #[inline]
+ pub fn is_finite(self) -> bool {
+ self.re.is_finite() && self.im.is_finite()
+ }
+
+ /// Checks if the given complex number is normal
+ #[inline]
+ pub fn is_normal(self) -> bool {
+ self.re.is_normal() && self.im.is_normal()
+ }
+}
+
+impl<T: Clone + Num> From<T> for Complex<T> {
+ #[inline]
+ fn from(re: T) -> Self {
+ Self::new(re, T::zero())
+ }
+}
+
+impl<'a, T: Clone + Num> From<&'a T> for Complex<T> {
+ #[inline]
+ fn from(re: &T) -> Self {
+ From::from(re.clone())
+ }
+}
+
+macro_rules! forward_ref_ref_binop {
+ (impl $imp:ident, $method:ident) => {
+ impl<'a, 'b, T: Clone + Num> $imp<&'b Complex<T>> for &'a Complex<T> {
+ type Output = Complex<T>;
+
+ #[inline]
+ fn $method(self, other: &Complex<T>) -> Self::Output {
+ self.clone().$method(other.clone())
+ }
+ }
+ };
+}
+
+macro_rules! forward_ref_val_binop {
+ (impl $imp:ident, $method:ident) => {
+ impl<'a, T: Clone + Num> $imp<Complex<T>> for &'a Complex<T> {
+ type Output = Complex<T>;
+
+ #[inline]
+ fn $method(self, other: Complex<T>) -> Self::Output {
+ self.clone().$method(other)
+ }
+ }
+ };
+}
+
+macro_rules! forward_val_ref_binop {
+ (impl $imp:ident, $method:ident) => {
+ impl<'a, T: Clone + Num> $imp<&'a Complex<T>> for Complex<T> {
+ type Output = Complex<T>;
+
+ #[inline]
+ fn $method(self, other: &Complex<T>) -> Self::Output {
+ self.$method(other.clone())
+ }
+ }
+ };
+}
+
+macro_rules! forward_all_binop {
+ (impl $imp:ident, $method:ident) => {
+ forward_ref_ref_binop!(impl $imp, $method);
+ forward_ref_val_binop!(impl $imp, $method);
+ forward_val_ref_binop!(impl $imp, $method);
+ };
+}
+
+/* arithmetic */
+forward_all_binop!(impl Add, add);
+
+// (a + i b) + (c + i d) == (a + c) + i (b + d)
+impl<T: Clone + Num> Add<Complex<T>> for Complex<T> {
+ type Output = Self;
+
+ #[inline]
+ fn add(self, other: Self) -> Self::Output {
+ Self::Output::new(self.re + other.re, self.im + other.im)
+ }
+}
+
+forward_all_binop!(impl Sub, sub);
+
+// (a + i b) - (c + i d) == (a - c) + i (b - d)
+impl<T: Clone + Num> Sub<Complex<T>> for Complex<T> {
+ type Output = Self;
+
+ #[inline]
+ fn sub(self, other: Self) -> Self::Output {
+ Self::Output::new(self.re - other.re, self.im - other.im)
+ }
+}
+
+forward_all_binop!(impl Mul, mul);
+
+// (a + i b) * (c + i d) == (a*c - b*d) + i (a*d + b*c)
+impl<T: Clone + Num> Mul<Complex<T>> for Complex<T> {
+ type Output = Self;
+
+ #[inline]
+ fn mul(self, other: Self) -> Self::Output {
+ let re = self.re.clone() * other.re.clone() - self.im.clone() * other.im.clone();
+ let im = self.re * other.im + self.im * other.re;
+ Self::Output::new(re, im)
+ }
+}
+
+// (a + i b) * (c + i d) + (e + i f) == ((a*c + e) - b*d) + i (a*d + (b*c + f))
+impl<T: Clone + Num + MulAdd<Output = T>> MulAdd<Complex<T>> for Complex<T> {
+ type Output = Complex<T>;
+
+ #[inline]
+ fn mul_add(self, other: Complex<T>, add: Complex<T>) -> Complex<T> {
+ let re = self.re.clone().mul_add(other.re.clone(), add.re)
+ - (self.im.clone() * other.im.clone()); // FIXME: use mulsub when available in rust
+ let im = self.re.mul_add(other.im, self.im.mul_add(other.re, add.im));
+ Complex::new(re, im)
+ }
+}
+impl<'a, 'b, T: Clone + Num + MulAdd<Output = T>> MulAdd<&'b Complex<T>> for &'a Complex<T> {
+ type Output = Complex<T>;
+
+ #[inline]
+ fn mul_add(self, other: &Complex<T>, add: &Complex<T>) -> Complex<T> {
+ self.clone().mul_add(other.clone(), add.clone())
+ }
+}
+
+forward_all_binop!(impl Div, div);
+
+// (a + i b) / (c + i d) == [(a + i b) * (c - i d)] / (c*c + d*d)
+// == [(a*c + b*d) / (c*c + d*d)] + i [(b*c - a*d) / (c*c + d*d)]
+impl<T: Clone + Num> Div<Complex<T>> for Complex<T> {
+ type Output = Self;
+
+ #[inline]
+ fn div(self, other: Self) -> Self::Output {
+ let norm_sqr = other.norm_sqr();
+ let re = self.re.clone() * other.re.clone() + self.im.clone() * other.im.clone();
+ let im = self.im * other.re - self.re * other.im;
+ Self::Output::new(re / norm_sqr.clone(), im / norm_sqr)
+ }
+}
+
+forward_all_binop!(impl Rem, rem);
+
+// Attempts to identify the gaussian integer whose product with `modulus`
+// is closest to `self`.
+impl<T: Clone + Num> Rem<Complex<T>> for Complex<T> {
+ type Output = Self;
+
+ #[inline]
+ fn rem(self, modulus: Self) -> Self::Output {
+ let Complex { re, im } = self.clone() / modulus.clone();
+ // This is the gaussian integer corresponding to the true ratio
+ // rounded towards zero.
+ let (re0, im0) = (re.clone() - re % T::one(), im.clone() - im % T::one());
+ self - modulus * Self::Output::new(re0, im0)
+ }
+}
+
+// Op Assign
+
+mod opassign {
+ use core::ops::{AddAssign, DivAssign, MulAssign, RemAssign, SubAssign};
+
+ use traits::{MulAddAssign, NumAssign};
+
+ use Complex;
+
+ impl<T: Clone + NumAssign> AddAssign for Complex<T> {
+ fn add_assign(&mut self, other: Self) {
+ self.re += other.re;
+ self.im += other.im;
+ }
+ }
+
+ impl<T: Clone + NumAssign> SubAssign for Complex<T> {
+ fn sub_assign(&mut self, other: Self) {
+ self.re -= other.re;
+ self.im -= other.im;
+ }
+ }
+
+ impl<T: Clone + NumAssign> MulAssign for Complex<T> {
+ fn mul_assign(&mut self, other: Self) {
+ *self = self.clone() * other;
+ }
+ }
+
+ // (a + i b) * (c + i d) + (e + i f) == ((a*c + e) - b*d) + i (b*c + (a*d + f))
+ impl<T: Clone + NumAssign + MulAddAssign> MulAddAssign for Complex<T> {
+ fn mul_add_assign(&mut self, other: Complex<T>, add: Complex<T>) {
+ let a = self.re.clone();
+
+ self.re.mul_add_assign(other.re.clone(), add.re); // (a*c + e)
+ self.re -= self.im.clone() * other.im.clone(); // ((a*c + e) - b*d)
+
+ let mut adf = a;
+ adf.mul_add_assign(other.im, add.im); // (a*d + f)
+ self.im.mul_add_assign(other.re, adf); // (b*c + (a*d + f))
+ }
+ }
+
+ impl<'a, 'b, T: Clone + NumAssign + MulAddAssign> MulAddAssign<&'a Complex<T>, &'b Complex<T>>
+ for Complex<T>
+ {
+ fn mul_add_assign(&mut self, other: &Complex<T>, add: &Complex<T>) {
+ self.mul_add_assign(other.clone(), add.clone());
+ }
+ }
+
+ impl<T: Clone + NumAssign> DivAssign for Complex<T> {
+ fn div_assign(&mut self, other: Self) {
+ *self = self.clone() / other;
+ }
+ }
+
+ impl<T: Clone + NumAssign> RemAssign for Complex<T> {
+ fn rem_assign(&mut self, other: Self) {
+ *self = self.clone() % other;
+ }
+ }
+
+ impl<T: Clone + NumAssign> AddAssign<T> for Complex<T> {
+ fn add_assign(&mut self, other: T) {
+ self.re += other;
+ }
+ }
+
+ impl<T: Clone + NumAssign> SubAssign<T> for Complex<T> {
+ fn sub_assign(&mut self, other: T) {
+ self.re -= other;
+ }
+ }
+
+ impl<T: Clone + NumAssign> MulAssign<T> for Complex<T> {
+ fn mul_assign(&mut self, other: T) {
+ self.re *= other.clone();
+ self.im *= other;
+ }
+ }
+
+ impl<T: Clone + NumAssign> DivAssign<T> for Complex<T> {
+ fn div_assign(&mut self, other: T) {
+ self.re /= other.clone();
+ self.im /= other;
+ }
+ }
+
+ impl<T: Clone + NumAssign> RemAssign<T> for Complex<T> {
+ fn rem_assign(&mut self, other: T) {
+ *self = self.clone() % other;
+ }
+ }
+
+ macro_rules! forward_op_assign {
+ (impl $imp:ident, $method:ident) => {
+ impl<'a, T: Clone + NumAssign> $imp<&'a Complex<T>> for Complex<T> {
+ #[inline]
+ fn $method(&mut self, other: &Self) {
+ self.$method(other.clone())
+ }
+ }
+ impl<'a, T: Clone + NumAssign> $imp<&'a T> for Complex<T> {
+ #[inline]
+ fn $method(&mut self, other: &T) {
+ self.$method(other.clone())
+ }
+ }
+ };
+ }
+
+ forward_op_assign!(impl AddAssign, add_assign);
+ forward_op_assign!(impl SubAssign, sub_assign);
+ forward_op_assign!(impl MulAssign, mul_assign);
+ forward_op_assign!(impl DivAssign, div_assign);
+
+ impl<'a, T: Clone + NumAssign> RemAssign<&'a Complex<T>> for Complex<T> {
+ #[inline]
+ fn rem_assign(&mut self, other: &Self) {
+ self.rem_assign(other.clone())
+ }
+ }
+ impl<'a, T: Clone + NumAssign> RemAssign<&'a T> for Complex<T> {
+ #[inline]
+ fn rem_assign(&mut self, other: &T) {
+ self.rem_assign(other.clone())
+ }
+ }
+}
+
+impl<T: Clone + Num + Neg<Output = T>> Neg for Complex<T> {
+ type Output = Self;
+
+ #[inline]
+ fn neg(self) -> Self::Output {
+ Self::Output::new(-self.re, -self.im)
+ }
+}
+
+impl<'a, T: Clone + Num + Neg<Output = T>> Neg for &'a Complex<T> {
+ type Output = Complex<T>;
+
+ #[inline]
+ fn neg(self) -> Self::Output {
+ -self.clone()
+ }
+}
+
+impl<T: Clone + Num + Neg<Output = T>> Inv for Complex<T> {
+ type Output = Self;
+
+ #[inline]
+ fn inv(self) -> Self::Output {
+ (&self).inv()
+ }
+}
+
+impl<'a, T: Clone + Num + Neg<Output = T>> Inv for &'a Complex<T> {
+ type Output = Complex<T>;
+
+ #[inline]
+ fn inv(self) -> Self::Output {
+ self.inv()
+ }
+}
+
+macro_rules! real_arithmetic {
+ (@forward $imp:ident::$method:ident for $($real:ident),*) => (
+ impl<'a, T: Clone + Num> $imp<&'a T> for Complex<T> {
+ type Output = Complex<T>;
+
+ #[inline]
+ fn $method(self, other: &T) -> Self::Output {
+ self.$method(other.clone())
+ }
+ }
+ impl<'a, T: Clone + Num> $imp<T> for &'a Complex<T> {
+ type Output = Complex<T>;
+
+ #[inline]
+ fn $method(self, other: T) -> Self::Output {
+ self.clone().$method(other)
+ }
+ }
+ impl<'a, 'b, T: Clone + Num> $imp<&'a T> for &'b Complex<T> {
+ type Output = Complex<T>;
+
+ #[inline]
+ fn $method(self, other: &T) -> Self::Output {
+ self.clone().$method(other.clone())
+ }
+ }
+ $(
+ impl<'a> $imp<&'a Complex<$real>> for $real {
+ type Output = Complex<$real>;
+
+ #[inline]
+ fn $method(self, other: &Complex<$real>) -> Complex<$real> {
+ self.$method(other.clone())
+ }
+ }
+ impl<'a> $imp<Complex<$real>> for &'a $real {
+ type Output = Complex<$real>;
+
+ #[inline]
+ fn $method(self, other: Complex<$real>) -> Complex<$real> {
+ self.clone().$method(other)
+ }
+ }
+ impl<'a, 'b> $imp<&'a Complex<$real>> for &'b $real {
+ type Output = Complex<$real>;
+
+ #[inline]
+ fn $method(self, other: &Complex<$real>) -> Complex<$real> {
+ self.clone().$method(other.clone())
+ }
+ }
+ )*
+ );
+ ($($real:ident),*) => (
+ real_arithmetic!(@forward Add::add for $($real),*);
+ real_arithmetic!(@forward Sub::sub for $($real),*);
+ real_arithmetic!(@forward Mul::mul for $($real),*);
+ real_arithmetic!(@forward Div::div for $($real),*);
+ real_arithmetic!(@forward Rem::rem for $($real),*);
+
+ $(
+ impl Add<Complex<$real>> for $real {
+ type Output = Complex<$real>;
+
+ #[inline]
+ fn add(self, other: Complex<$real>) -> Self::Output {
+ Self::Output::new(self + other.re, other.im)
+ }
+ }
+
+ impl Sub<Complex<$real>> for $real {
+ type Output = Complex<$real>;
+
+ #[inline]
+ fn sub(self, other: Complex<$real>) -> Self::Output {
+ Self::Output::new(self - other.re, $real::zero() - other.im)
+ }
+ }
+
+ impl Mul<Complex<$real>> for $real {
+ type Output = Complex<$real>;
+
+ #[inline]
+ fn mul(self, other: Complex<$real>) -> Self::Output {
+ Self::Output::new(self * other.re, self * other.im)
+ }
+ }
+
+ impl Div<Complex<$real>> for $real {
+ type Output = Complex<$real>;
+
+ #[inline]
+ fn div(self, other: Complex<$real>) -> Self::Output {
+ // a / (c + i d) == [a * (c - i d)] / (c*c + d*d)
+ let norm_sqr = other.norm_sqr();
+ Self::Output::new(self * other.re / norm_sqr.clone(),
+ $real::zero() - self * other.im / norm_sqr)
+ }
+ }
+
+ impl Rem<Complex<$real>> for $real {
+ type Output = Complex<$real>;
+
+ #[inline]
+ fn rem(self, other: Complex<$real>) -> Self::Output {
+ Self::Output::new(self, Self::zero()) % other
+ }
+ }
+ )*
+ );
+}
+
+impl<T: Clone + Num> Add<T> for Complex<T> {
+ type Output = Complex<T>;
+
+ #[inline]
+ fn add(self, other: T) -> Self::Output {
+ Self::Output::new(self.re + other, self.im)
+ }
+}
+
+impl<T: Clone + Num> Sub<T> for Complex<T> {
+ type Output = Complex<T>;
+
+ #[inline]
+ fn sub(self, other: T) -> Self::Output {
+ Self::Output::new(self.re - other, self.im)
+ }
+}
+
+impl<T: Clone + Num> Mul<T> for Complex<T> {
+ type Output = Complex<T>;
+
+ #[inline]
+ fn mul(self, other: T) -> Self::Output {
+ Self::Output::new(self.re * other.clone(), self.im * other)
+ }
+}
+
+impl<T: Clone + Num> Div<T> for Complex<T> {
+ type Output = Self;
+
+ #[inline]
+ fn div(self, other: T) -> Self::Output {
+ Self::Output::new(self.re / other.clone(), self.im / other)
+ }
+}
+
+impl<T: Clone + Num> Rem<T> for Complex<T> {
+ type Output = Complex<T>;
+
+ #[inline]
+ fn rem(self, other: T) -> Self::Output {
+ Self::Output::new(self.re % other.clone(), self.im % other)
+ }
+}
+
+#[cfg(not(has_i128))]
+real_arithmetic!(usize, u8, u16, u32, u64, isize, i8, i16, i32, i64, f32, f64);
+#[cfg(has_i128)]
+real_arithmetic!(usize, u8, u16, u32, u64, u128, isize, i8, i16, i32, i64, i128, f32, f64);
+
+/* constants */
+impl<T: Clone + Num> Zero for Complex<T> {
+ #[inline]
+ fn zero() -> Self {
+ Self::new(Zero::zero(), Zero::zero())
+ }
+
+ #[inline]
+ fn is_zero(&self) -> bool {
+ self.re.is_zero() && self.im.is_zero()
+ }
+
+ #[inline]
+ fn set_zero(&mut self) {
+ self.re.set_zero();
+ self.im.set_zero();
+ }
+}
+
+impl<T: Clone + Num> One for Complex<T> {
+ #[inline]
+ fn one() -> Self {
+ Self::new(One::one(), Zero::zero())
+ }
+
+ #[inline]
+ fn is_one(&self) -> bool {
+ self.re.is_one() && self.im.is_zero()
+ }
+
+ #[inline]
+ fn set_one(&mut self) {
+ self.re.set_one();
+ self.im.set_zero();
+ }
+}
+
+macro_rules! write_complex {
+ ($f:ident, $t:expr, $prefix:expr, $re:expr, $im:expr, $T:ident) => {{
+ let abs_re = if $re < Zero::zero() {
+ $T::zero() - $re.clone()
+ } else {
+ $re.clone()
+ };
+ let abs_im = if $im < Zero::zero() {
+ $T::zero() - $im.clone()
+ } else {
+ $im.clone()
+ };
+
+ return if let Some(prec) = $f.precision() {
+ fmt_re_im(
+ $f,
+ $re < $T::zero(),
+ $im < $T::zero(),
+ format_args!(concat!("{:.1$", $t, "}"), abs_re, prec),
+ format_args!(concat!("{:.1$", $t, "}"), abs_im, prec),
+ )
+ } else {
+ fmt_re_im(
+ $f,
+ $re < $T::zero(),
+ $im < $T::zero(),
+ format_args!(concat!("{:", $t, "}"), abs_re),
+ format_args!(concat!("{:", $t, "}"), abs_im),
+ )
+ };
+
+ fn fmt_re_im(
+ f: &mut fmt::Formatter,
+ re_neg: bool,
+ im_neg: bool,
+ real: fmt::Arguments,
+ imag: fmt::Arguments,
+ ) -> fmt::Result {
+ let prefix = if f.alternate() { $prefix } else { "" };
+ let sign = if re_neg {
+ "-"
+ } else if f.sign_plus() {
+ "+"
+ } else {
+ ""
+ };
+
+ if im_neg {
+ fmt_complex(
+ f,
+ format_args!(
+ "{}{pre}{re}-{pre}{im}i",
+ sign,
+ re = real,
+ im = imag,
+ pre = prefix
+ ),
+ )
+ } else {
+ fmt_complex(
+ f,
+ format_args!(
+ "{}{pre}{re}+{pre}{im}i",
+ sign,
+ re = real,
+ im = imag,
+ pre = prefix
+ ),
+ )
+ }
+ }
+
+ #[cfg(feature = "std")]
+ // Currently, we can only apply width using an intermediate `String` (and thus `std`)
+ fn fmt_complex(f: &mut fmt::Formatter, complex: fmt::Arguments) -> fmt::Result {
+ use std::string::ToString;
+ if let Some(width) = f.width() {
+ write!(f, "{0: >1$}", complex.to_string(), width)
+ } else {
+ write!(f, "{}", complex)
+ }
+ }
+
+ #[cfg(not(feature = "std"))]
+ fn fmt_complex(f: &mut fmt::Formatter, complex: fmt::Arguments) -> fmt::Result {
+ write!(f, "{}", complex)
+ }
+ }};
+}
+
+/* string conversions */
+impl<T> fmt::Display for Complex<T>
+where
+ T: fmt::Display + Num + PartialOrd + Clone,
+{
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ write_complex!(f, "", "", self.re, self.im, T)
+ }
+}
+
+impl<T> fmt::LowerExp for Complex<T>
+where
+ T: fmt::LowerExp + Num + PartialOrd + Clone,
+{
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ write_complex!(f, "e", "", self.re, self.im, T)
+ }
+}
+
+impl<T> fmt::UpperExp for Complex<T>
+where
+ T: fmt::UpperExp + Num + PartialOrd + Clone,
+{
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ write_complex!(f, "E", "", self.re, self.im, T)
+ }
+}
+
+impl<T> fmt::LowerHex for Complex<T>
+where
+ T: fmt::LowerHex + Num + PartialOrd + Clone,
+{
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ write_complex!(f, "x", "0x", self.re, self.im, T)
+ }
+}
+
+impl<T> fmt::UpperHex for Complex<T>
+where
+ T: fmt::UpperHex + Num + PartialOrd + Clone,
+{
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ write_complex!(f, "X", "0x", self.re, self.im, T)
+ }
+}
+
+impl<T> fmt::Octal for Complex<T>
+where
+ T: fmt::Octal + Num + PartialOrd + Clone,
+{
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ write_complex!(f, "o", "0o", self.re, self.im, T)
+ }
+}
+
+impl<T> fmt::Binary for Complex<T>
+where
+ T: fmt::Binary + Num + PartialOrd + Clone,
+{
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ write_complex!(f, "b", "0b", self.re, self.im, T)
+ }
+}
+
+#[allow(deprecated)] // `trim_left_matches` and `trim_right_matches` since 1.33
+fn from_str_generic<T, E, F>(s: &str, from: F) -> Result<Complex<T>, ParseComplexError<E>>
+where
+ F: Fn(&str) -> Result<T, E>,
+ T: Clone + Num,
+{
+ #[cfg(not(feature = "std"))]
+ #[inline]
+ fn is_whitespace(c: char) -> bool {
+ match c {
+ ' ' | '\x09'...'\x0d' => true,
+ _ if c > '\x7f' => match c {
+ '\u{0085}' | '\u{00a0}' | '\u{1680}' => true,
+ '\u{2000}'...'\u{200a}' => true,
+ '\u{2028}' | '\u{2029}' | '\u{202f}' | '\u{205f}' => true,
+ '\u{3000}' => true,
+ _ => false,
+ },
+ _ => false,
+ }
+ }
+
+ #[cfg(feature = "std")]
+ let is_whitespace = char::is_whitespace;
+
+ let imag = match s.rfind('j') {
+ None => 'i',
+ _ => 'j',
+ };
+
+ let mut neg_b = false;
+ let mut a = s;
+ let mut b = "";
+
+ for (i, w) in s.as_bytes().windows(2).enumerate() {
+ let p = w[0];
+ let c = w[1];
+
+ // ignore '+'/'-' if part of an exponent
+ if (c == b'+' || c == b'-') && !(p == b'e' || p == b'E') {
+ // trim whitespace around the separator
+ a = &s[..i + 1].trim_right_matches(is_whitespace);
+ b = &s[i + 2..].trim_left_matches(is_whitespace);
+ neg_b = c == b'-';
+
+ if b.is_empty() || (neg_b && b.starts_with('-')) {
+ return Err(ParseComplexError::new());
+ }
+ break;
+ }
+ }
+
+ // split off real and imaginary parts
+ if b.is_empty() {
+ // input was either pure real or pure imaginary
+ b = match a.ends_with(imag) {
+ false => "0i",
+ true => "0",
+ };
+ }
+
+ let re;
+ let neg_re;
+ let im;
+ let neg_im;
+ if a.ends_with(imag) {
+ im = a;
+ neg_im = false;
+ re = b;
+ neg_re = neg_b;
+ } else if b.ends_with(imag) {
+ re = a;
+ neg_re = false;
+ im = b;
+ neg_im = neg_b;
+ } else {
+ return Err(ParseComplexError::new());
+ }
+
+ // parse re
+ let re = try!(from(re).map_err(ParseComplexError::from_error));
+ let re = if neg_re { T::zero() - re } else { re };
+
+ // pop imaginary unit off
+ let mut im = &im[..im.len() - 1];
+ // handle im == "i" or im == "-i"
+ if im.is_empty() || im == "+" {
+ im = "1";
+ } else if im == "-" {
+ im = "-1";
+ }
+
+ // parse im
+ let im = try!(from(im).map_err(ParseComplexError::from_error));
+ let im = if neg_im { T::zero() - im } else { im };
+
+ Ok(Complex::new(re, im))
+}
+
+impl<T> FromStr for Complex<T>
+where
+ T: FromStr + Num + Clone,
+{
+ type Err = ParseComplexError<T::Err>;
+
+ /// Parses `a +/- bi`; `ai +/- b`; `a`; or `bi` where `a` and `b` are of type `T`
+ fn from_str(s: &str) -> Result<Self, Self::Err> {
+ from_str_generic(s, T::from_str)
+ }
+}
+
+impl<T: Num + Clone> Num for Complex<T> {
+ type FromStrRadixErr = ParseComplexError<T::FromStrRadixErr>;
+
+ /// Parses `a +/- bi`; `ai +/- b`; `a`; or `bi` where `a` and `b` are of type `T`
+ fn from_str_radix(s: &str, radix: u32) -> Result<Self, Self::FromStrRadixErr> {
+ from_str_generic(s, |x| -> Result<T, T::FromStrRadixErr> {
+ T::from_str_radix(x, radix)
+ })
+ }
+}
+
+impl<T: Num + Clone> Sum for Complex<T> {
+ fn sum<I>(iter: I) -> Self
+ where
+ I: Iterator<Item = Self>,
+ {
+ iter.fold(Self::zero(), |acc, c| acc + c)
+ }
+}
+
+impl<'a, T: 'a + Num + Clone> Sum<&'a Complex<T>> for Complex<T> {
+ fn sum<I>(iter: I) -> Self
+ where
+ I: Iterator<Item = &'a Complex<T>>,
+ {
+ iter.fold(Self::zero(), |acc, c| acc + c)
+ }
+}
+
+impl<T: Num + Clone> Product for Complex<T> {
+ fn product<I>(iter: I) -> Self
+ where
+ I: Iterator<Item = Self>,
+ {
+ iter.fold(Self::one(), |acc, c| acc * c)
+ }
+}
+
+impl<'a, T: 'a + Num + Clone> Product<&'a Complex<T>> for Complex<T> {
+ fn product<I>(iter: I) -> Self
+ where
+ I: Iterator<Item = &'a Complex<T>>,
+ {
+ iter.fold(Self::one(), |acc, c| acc * c)
+ }
+}
+
+#[cfg(feature = "serde")]
+impl<T> serde::Serialize for Complex<T>
+where
+ T: serde::Serialize,
+{
+ fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
+ where
+ S: serde::Serializer,
+ {
+ (&self.re, &self.im).serialize(serializer)
+ }
+}
+
+#[cfg(feature = "serde")]
+impl<'de, T> serde::Deserialize<'de> for Complex<T>
+where
+ T: serde::Deserialize<'de> + Num + Clone,
+{
+ fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
+ where
+ D: serde::Deserializer<'de>,
+ {
+ let (re, im) = try!(serde::Deserialize::deserialize(deserializer));
+ Ok(Self::new(re, im))
+ }
+}
+
+#[derive(Debug, PartialEq)]
+pub struct ParseComplexError<E> {
+ kind: ComplexErrorKind<E>,
+}
+
+#[derive(Debug, PartialEq)]
+enum ComplexErrorKind<E> {
+ ParseError(E),
+ ExprError,
+}
+
+impl<E> ParseComplexError<E> {
+ fn new() -> Self {
+ ParseComplexError {
+ kind: ComplexErrorKind::ExprError,
+ }
+ }
+
+ fn from_error(error: E) -> Self {
+ ParseComplexError {
+ kind: ComplexErrorKind::ParseError(error),
+ }
+ }
+}
+
+#[cfg(feature = "std")]
+impl<E: Error> Error for ParseComplexError<E> {
+ fn description(&self) -> &str {
+ match self.kind {
+ ComplexErrorKind::ParseError(ref e) => e.description(),
+ ComplexErrorKind::ExprError => "invalid or unsupported complex expression",
+ }
+ }
+}
+
+impl<E: fmt::Display> fmt::Display for ParseComplexError<E> {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ match self.kind {
+ ComplexErrorKind::ParseError(ref e) => e.fmt(f),
+ ComplexErrorKind::ExprError => "invalid or unsupported complex expression".fmt(f),
+ }
+ }
+}
+
+#[cfg(test)]
+fn hash<T: hash::Hash>(x: &T) -> u64 {
+ use std::collections::hash_map::RandomState;
+ use std::hash::{BuildHasher, Hasher};
+ let mut hasher = <RandomState as BuildHasher>::Hasher::new();
+ x.hash(&mut hasher);
+ hasher.finish()
+}
+
+#[cfg(test)]
+mod test {
+ #![allow(non_upper_case_globals)]
+
+ use super::{Complex, Complex64};
+ use core::f64;
+ use core::str::FromStr;
+
+ use std::string::{String, ToString};
+
+ use traits::{Num, One, Zero};
+
+ pub const _0_0i: Complex64 = Complex { re: 0.0, im: 0.0 };
+ pub const _1_0i: Complex64 = Complex { re: 1.0, im: 0.0 };
+ pub const _1_1i: Complex64 = Complex { re: 1.0, im: 1.0 };
+ pub const _0_1i: Complex64 = Complex { re: 0.0, im: 1.0 };
+ pub const _neg1_1i: Complex64 = Complex { re: -1.0, im: 1.0 };
+ pub const _05_05i: Complex64 = Complex { re: 0.5, im: 0.5 };
+ pub const all_consts: [Complex64; 5] = [_0_0i, _1_0i, _1_1i, _neg1_1i, _05_05i];
+ pub const _4_2i: Complex64 = Complex { re: 4.0, im: 2.0 };
+
+ #[test]
+ fn test_consts() {
+ // check our constants are what Complex::new creates
+ fn test(c: Complex64, r: f64, i: f64) {
+ assert_eq!(c, Complex::new(r, i));
+ }
+ test(_0_0i, 0.0, 0.0);
+ test(_1_0i, 1.0, 0.0);
+ test(_1_1i, 1.0, 1.0);
+ test(_neg1_1i, -1.0, 1.0);
+ test(_05_05i, 0.5, 0.5);
+
+ assert_eq!(_0_0i, Zero::zero());
+ assert_eq!(_1_0i, One::one());
+ }
+
+ #[test]
+ fn test_scale_unscale() {
+ assert_eq!(_05_05i.scale(2.0), _1_1i);
+ assert_eq!(_1_1i.unscale(2.0), _05_05i);
+ for &c in all_consts.iter() {
+ assert_eq!(c.scale(2.0).unscale(2.0), c);
+ }
+ }
+
+ #[test]
+ fn test_conj() {
+ for &c in all_consts.iter() {
+ assert_eq!(c.conj(), Complex::new(c.re, -c.im));
+ assert_eq!(c.conj().conj(), c);
+ }
+ }
+
+ #[test]
+ fn test_inv() {
+ assert_eq!(_1_1i.inv(), _05_05i.conj());
+ assert_eq!(_1_0i.inv(), _1_0i.inv());
+ }
+
+ #[test]
+ #[should_panic]
+ fn test_divide_by_zero_natural() {
+ let n = Complex::new(2, 3);
+ let d = Complex::new(0, 0);
+ let _x = n / d;
+ }
+
+ #[test]
+ fn test_inv_zero() {
+ // FIXME #20: should this really fail, or just NaN?
+ assert!(_0_0i.inv().is_nan());
+ }
+
+ #[test]
+ fn test_l1_norm() {
+ assert_eq!(_0_0i.l1_norm(), 0.0);
+ assert_eq!(_1_0i.l1_norm(), 1.0);
+ assert_eq!(_1_1i.l1_norm(), 2.0);
+ assert_eq!(_0_1i.l1_norm(), 1.0);
+ assert_eq!(_neg1_1i.l1_norm(), 2.0);
+ assert_eq!(_05_05i.l1_norm(), 1.0);
+ assert_eq!(_4_2i.l1_norm(), 6.0);
+ }
+
+ #[test]
+ fn test_pow() {
+ for c in all_consts.iter() {
+ assert_eq!(c.powi(0), _1_0i);
+ let mut pos = _1_0i;
+ let mut neg = _1_0i;
+ for i in 1i32..20 {
+ pos *= c;
+ assert_eq!(pos, c.powi(i));
+ if c.is_zero() {
+ assert!(c.powi(-i).is_nan());
+ } else {
+ neg /= c;
+ assert_eq!(neg, c.powi(-i));
+ }
+ }
+ }
+ }
+
+ #[cfg(feature = "std")]
+ mod float {
+ use super::*;
+ use traits::{Float, Pow};
+
+ #[test]
+ #[cfg_attr(target_arch = "x86", ignore)]
+ // FIXME #7158: (maybe?) currently failing on x86.
+ fn test_norm() {
+ fn test(c: Complex64, ns: f64) {
+ assert_eq!(c.norm_sqr(), ns);
+ assert_eq!(c.norm(), ns.sqrt())
+ }
+ test(_0_0i, 0.0);
+ test(_1_0i, 1.0);
+ test(_1_1i, 2.0);
+ test(_neg1_1i, 2.0);
+ test(_05_05i, 0.5);
+ }
+
+ #[test]
+ fn test_arg() {
+ fn test(c: Complex64, arg: f64) {
+ assert!((c.arg() - arg).abs() < 1.0e-6)
+ }
+ test(_1_0i, 0.0);
+ test(_1_1i, 0.25 * f64::consts::PI);
+ test(_neg1_1i, 0.75 * f64::consts::PI);
+ test(_05_05i, 0.25 * f64::consts::PI);
+ }
+
+ #[test]
+ fn test_polar_conv() {
+ fn test(c: Complex64) {
+ let (r, theta) = c.to_polar();
+ assert!((c - Complex::from_polar(&r, &theta)).norm() < 1e-6);
+ }
+ for &c in all_consts.iter() {
+ test(c);
+ }
+ }
+
+ fn close(a: Complex64, b: Complex64) -> bool {
+ close_to_tol(a, b, 1e-10)
+ }
+
+ fn close_to_tol(a: Complex64, b: Complex64, tol: f64) -> bool {
+ // returns true if a and b are reasonably close
+ let close = (a == b) || (a - b).norm() < tol;
+ if !close {
+ println!("{:?} != {:?}", a, b);
+ }
+ close
+ }
+
+ #[test]
+ fn test_exp() {
+ assert!(close(_1_0i.exp(), _1_0i.scale(f64::consts::E)));
+ assert!(close(_0_0i.exp(), _1_0i));
+ assert!(close(_0_1i.exp(), Complex::new(1.0.cos(), 1.0.sin())));
+ assert!(close(_05_05i.exp() * _05_05i.exp(), _1_1i.exp()));
+ assert!(close(
+ _0_1i.scale(-f64::consts::PI).exp(),
+ _1_0i.scale(-1.0)
+ ));
+ for &c in all_consts.iter() {
+ // e^conj(z) = conj(e^z)
+ assert!(close(c.conj().exp(), c.exp().conj()));
+ // e^(z + 2 pi i) = e^z
+ assert!(close(
+ c.exp(),
+ (c + _0_1i.scale(f64::consts::PI * 2.0)).exp()
+ ));
+ }
+ }
+
+ #[test]
+ fn test_ln() {
+ assert!(close(_1_0i.ln(), _0_0i));
+ assert!(close(_0_1i.ln(), _0_1i.scale(f64::consts::PI / 2.0)));
+ assert!(close(_0_0i.ln(), Complex::new(f64::neg_infinity(), 0.0)));
+ assert!(close(
+ (_neg1_1i * _05_05i).ln(),
+ _neg1_1i.ln() + _05_05i.ln()
+ ));
+ for &c in all_consts.iter() {
+ // ln(conj(z() = conj(ln(z))
+ assert!(close(c.conj().ln(), c.ln().conj()));
+ // for this branch, -pi <= arg(ln(z)) <= pi
+ assert!(-f64::consts::PI <= c.ln().arg() && c.ln().arg() <= f64::consts::PI);
+ }
+ }
+
+ #[test]
+ fn test_powc() {
+ let a = Complex::new(2.0, -3.0);
+ let b = Complex::new(3.0, 0.0);
+ assert!(close(a.powc(b), a.powf(b.re)));
+ assert!(close(b.powc(a), a.expf(b.re)));
+ let c = Complex::new(1.0 / 3.0, 0.1);
+ assert!(close_to_tol(
+ a.powc(c),
+ Complex::new(1.65826, -0.33502),
+ 1e-5
+ ));
+ }
+
+ #[test]
+ fn test_powf() {
+ let c = Complex64::new(2.0, -1.0);
+ let expected = Complex64::new(-0.8684746, -16.695934);
+ assert!(close_to_tol(c.powf(3.5), expected, 1e-5));
+ assert!(close_to_tol(Pow::pow(c, 3.5_f64), expected, 1e-5));
+ assert!(close_to_tol(Pow::pow(c, 3.5_f32), expected, 1e-5));
+ }
+
+ #[test]
+ fn test_log() {
+ let c = Complex::new(2.0, -1.0);
+ let r = c.log(10.0);
+ assert!(close_to_tol(r, Complex::new(0.349485, -0.20135958), 1e-5));
+ }
+
+ #[test]
+ fn test_some_expf_cases() {
+ let c = Complex::new(2.0, -1.0);
+ let r = c.expf(10.0);
+ assert!(close_to_tol(r, Complex::new(-66.82015, -74.39803), 1e-5));
+
+ let c = Complex::new(5.0, -2.0);
+ let r = c.expf(3.4);
+ assert!(close_to_tol(r, Complex::new(-349.25, -290.63), 1e-2));
+
+ let c = Complex::new(-1.5, 2.0 / 3.0);
+ let r = c.expf(1.0 / 3.0);
+ assert!(close_to_tol(r, Complex::new(3.8637, -3.4745), 1e-2));
+ }
+
+ #[test]
+ fn test_sqrt() {
+ assert!(close(_0_0i.sqrt(), _0_0i));
+ assert!(close(_1_0i.sqrt(), _1_0i));
+ assert!(close(Complex::new(-1.0, 0.0).sqrt(), _0_1i));
+ assert!(close(Complex::new(-1.0, -0.0).sqrt(), _0_1i.scale(-1.0)));
+ assert!(close(_0_1i.sqrt(), _05_05i.scale(2.0.sqrt())));
+ for &c in all_consts.iter() {
+ // sqrt(conj(z() = conj(sqrt(z))
+ assert!(close(c.conj().sqrt(), c.sqrt().conj()));
+ // for this branch, -pi/2 <= arg(sqrt(z)) <= pi/2
+ assert!(
+ -f64::consts::FRAC_PI_2 <= c.sqrt().arg()
+ && c.sqrt().arg() <= f64::consts::FRAC_PI_2
+ );
+ // sqrt(z) * sqrt(z) = z
+ assert!(close(c.sqrt() * c.sqrt(), c));
+ }
+ }
+
+ #[test]
+ fn test_sqrt_real() {
+ for n in (0..100).map(f64::from) {
+ // √(n² + 0i) = n + 0i
+ let n2 = n * n;
+ assert_eq!(Complex64::new(n2, 0.0).sqrt(), Complex64::new(n, 0.0));
+ // √(-n² + 0i) = 0 + ni
+ assert_eq!(Complex64::new(-n2, 0.0).sqrt(), Complex64::new(0.0, n));
+ // √(-n² - 0i) = 0 - ni
+ assert_eq!(Complex64::new(-n2, -0.0).sqrt(), Complex64::new(0.0, -n));
+ }
+ }
+
+ #[test]
+ fn test_sqrt_imag() {
+ for n in (0..100).map(f64::from) {
+ // √(0 + n²i) = n e^(iπ/4)
+ let n2 = n * n;
+ assert!(close(
+ Complex64::new(0.0, n2).sqrt(),
+ Complex64::from_polar(&n, &(f64::consts::FRAC_PI_4))
+ ));
+ // √(0 - n²i) = n e^(-iπ/4)
+ assert!(close(
+ Complex64::new(0.0, -n2).sqrt(),
+ Complex64::from_polar(&n, &(-f64::consts::FRAC_PI_4))
+ ));
+ }
+ }
+
+ #[test]
+ fn test_cbrt() {
+ assert!(close(_0_0i.cbrt(), _0_0i));
+ assert!(close(_1_0i.cbrt(), _1_0i));
+ assert!(close(
+ Complex::new(-1.0, 0.0).cbrt(),
+ Complex::new(0.5, 0.75.sqrt())
+ ));
+ assert!(close(
+ Complex::new(-1.0, -0.0).cbrt(),
+ Complex::new(0.5, -0.75.sqrt())
+ ));
+ assert!(close(_0_1i.cbrt(), Complex::new(0.75.sqrt(), 0.5)));
+ assert!(close(_0_1i.conj().cbrt(), Complex::new(0.75.sqrt(), -0.5)));
+ for &c in all_consts.iter() {
+ // cbrt(conj(z() = conj(cbrt(z))
+ assert!(close(c.conj().cbrt(), c.cbrt().conj()));
+ // for this branch, -pi/3 <= arg(cbrt(z)) <= pi/3
+ assert!(
+ -f64::consts::FRAC_PI_3 <= c.cbrt().arg()
+ && c.cbrt().arg() <= f64::consts::FRAC_PI_3
+ );
+ // cbrt(z) * cbrt(z) cbrt(z) = z
+ assert!(close(c.cbrt() * c.cbrt() * c.cbrt(), c));
+ }
+ }
+
+ #[test]
+ fn test_cbrt_real() {
+ for n in (0..100).map(f64::from) {
+ // ∛(n³ + 0i) = n + 0i
+ let n3 = n * n * n;
+ assert!(close(
+ Complex64::new(n3, 0.0).cbrt(),
+ Complex64::new(n, 0.0)
+ ));
+ // ∛(-n³ + 0i) = n e^(iπ/3)
+ assert!(close(
+ Complex64::new(-n3, 0.0).cbrt(),
+ Complex64::from_polar(&n, &(f64::consts::FRAC_PI_3))
+ ));
+ // ∛(-n³ - 0i) = n e^(-iπ/3)
+ assert!(close(
+ Complex64::new(-n3, -0.0).cbrt(),
+ Complex64::from_polar(&n, &(-f64::consts::FRAC_PI_3))
+ ));
+ }
+ }
+
+ #[test]
+ fn test_cbrt_imag() {
+ for n in (0..100).map(f64::from) {
+ // ∛(0 + n³i) = n e^(iπ/6)
+ let n3 = n * n * n;
+ assert!(close(
+ Complex64::new(0.0, n3).cbrt(),
+ Complex64::from_polar(&n, &(f64::consts::FRAC_PI_6))
+ ));
+ // ∛(0 - n³i) = n e^(-iπ/6)
+ assert!(close(
+ Complex64::new(0.0, -n3).cbrt(),
+ Complex64::from_polar(&n, &(-f64::consts::FRAC_PI_6))
+ ));
+ }
+ }
+
+ #[test]
+ fn test_sin() {
+ assert!(close(_0_0i.sin(), _0_0i));
+ assert!(close(_1_0i.scale(f64::consts::PI * 2.0).sin(), _0_0i));
+ assert!(close(_0_1i.sin(), _0_1i.scale(1.0.sinh())));
+ for &c in all_consts.iter() {
+ // sin(conj(z)) = conj(sin(z))
+ assert!(close(c.conj().sin(), c.sin().conj()));
+ // sin(-z) = -sin(z)
+ assert!(close(c.scale(-1.0).sin(), c.sin().scale(-1.0)));
+ }
+ }
+
+ #[test]
+ fn test_cos() {
+ assert!(close(_0_0i.cos(), _1_0i));
+ assert!(close(_1_0i.scale(f64::consts::PI * 2.0).cos(), _1_0i));
+ assert!(close(_0_1i.cos(), _1_0i.scale(1.0.cosh())));
+ for &c in all_consts.iter() {
+ // cos(conj(z)) = conj(cos(z))
+ assert!(close(c.conj().cos(), c.cos().conj()));
+ // cos(-z) = cos(z)
+ assert!(close(c.scale(-1.0).cos(), c.cos()));
+ }
+ }
+
+ #[test]
+ fn test_tan() {
+ assert!(close(_0_0i.tan(), _0_0i));
+ assert!(close(_1_0i.scale(f64::consts::PI / 4.0).tan(), _1_0i));
+ assert!(close(_1_0i.scale(f64::consts::PI).tan(), _0_0i));
+ for &c in all_consts.iter() {
+ // tan(conj(z)) = conj(tan(z))
+ assert!(close(c.conj().tan(), c.tan().conj()));
+ // tan(-z) = -tan(z)
+ assert!(close(c.scale(-1.0).tan(), c.tan().scale(-1.0)));
+ }
+ }
+
+ #[test]
+ fn test_asin() {
+ assert!(close(_0_0i.asin(), _0_0i));
+ assert!(close(_1_0i.asin(), _1_0i.scale(f64::consts::PI / 2.0)));
+ assert!(close(
+ _1_0i.scale(-1.0).asin(),
+ _1_0i.scale(-f64::consts::PI / 2.0)
+ ));
+ assert!(close(_0_1i.asin(), _0_1i.scale((1.0 + 2.0.sqrt()).ln())));
+ for &c in all_consts.iter() {
+ // asin(conj(z)) = conj(asin(z))
+ assert!(close(c.conj().asin(), c.asin().conj()));
+ // asin(-z) = -asin(z)
+ assert!(close(c.scale(-1.0).asin(), c.asin().scale(-1.0)));
+ // for this branch, -pi/2 <= asin(z).re <= pi/2
+ assert!(
+ -f64::consts::PI / 2.0 <= c.asin().re && c.asin().re <= f64::consts::PI / 2.0
+ );
+ }
+ }
+
+ #[test]
+ fn test_acos() {
+ assert!(close(_0_0i.acos(), _1_0i.scale(f64::consts::PI / 2.0)));
+ assert!(close(_1_0i.acos(), _0_0i));
+ assert!(close(
+ _1_0i.scale(-1.0).acos(),
+ _1_0i.scale(f64::consts::PI)
+ ));
+ assert!(close(
+ _0_1i.acos(),
+ Complex::new(f64::consts::PI / 2.0, (2.0.sqrt() - 1.0).ln())
+ ));
+ for &c in all_consts.iter() {
+ // acos(conj(z)) = conj(acos(z))
+ assert!(close(c.conj().acos(), c.acos().conj()));
+ // for this branch, 0 <= acos(z).re <= pi
+ assert!(0.0 <= c.acos().re && c.acos().re <= f64::consts::PI);
+ }
+ }
+
+ #[test]
+ fn test_atan() {
+ assert!(close(_0_0i.atan(), _0_0i));
+ assert!(close(_1_0i.atan(), _1_0i.scale(f64::consts::PI / 4.0)));
+ assert!(close(
+ _1_0i.scale(-1.0).atan(),
+ _1_0i.scale(-f64::consts::PI / 4.0)
+ ));
+ assert!(close(_0_1i.atan(), Complex::new(0.0, f64::infinity())));
+ for &c in all_consts.iter() {
+ // atan(conj(z)) = conj(atan(z))
+ assert!(close(c.conj().atan(), c.atan().conj()));
+ // atan(-z) = -atan(z)
+ assert!(close(c.scale(-1.0).atan(), c.atan().scale(-1.0)));
+ // for this branch, -pi/2 <= atan(z).re <= pi/2
+ assert!(
+ -f64::consts::PI / 2.0 <= c.atan().re && c.atan().re <= f64::consts::PI / 2.0
+ );
+ }
+ }
+
+ #[test]
+ fn test_sinh() {
+ assert!(close(_0_0i.sinh(), _0_0i));
+ assert!(close(
+ _1_0i.sinh(),
+ _1_0i.scale((f64::consts::E - 1.0 / f64::consts::E) / 2.0)
+ ));
+ assert!(close(_0_1i.sinh(), _0_1i.scale(1.0.sin())));
+ for &c in all_consts.iter() {
+ // sinh(conj(z)) = conj(sinh(z))
+ assert!(close(c.conj().sinh(), c.sinh().conj()));
+ // sinh(-z) = -sinh(z)
+ assert!(close(c.scale(-1.0).sinh(), c.sinh().scale(-1.0)));
+ }
+ }
+
+ #[test]
+ fn test_cosh() {
+ assert!(close(_0_0i.cosh(), _1_0i));
+ assert!(close(
+ _1_0i.cosh(),
+ _1_0i.scale((f64::consts::E + 1.0 / f64::consts::E) / 2.0)
+ ));
+ assert!(close(_0_1i.cosh(), _1_0i.scale(1.0.cos())));
+ for &c in all_consts.iter() {
+ // cosh(conj(z)) = conj(cosh(z))
+ assert!(close(c.conj().cosh(), c.cosh().conj()));
+ // cosh(-z) = cosh(z)
+ assert!(close(c.scale(-1.0).cosh(), c.cosh()));
+ }
+ }
+
+ #[test]
+ fn test_tanh() {
+ assert!(close(_0_0i.tanh(), _0_0i));
+ assert!(close(
+ _1_0i.tanh(),
+ _1_0i.scale((f64::consts::E.powi(2) - 1.0) / (f64::consts::E.powi(2) + 1.0))
+ ));
+ assert!(close(_0_1i.tanh(), _0_1i.scale(1.0.tan())));
+ for &c in all_consts.iter() {
+ // tanh(conj(z)) = conj(tanh(z))
+ assert!(close(c.conj().tanh(), c.conj().tanh()));
+ // tanh(-z) = -tanh(z)
+ assert!(close(c.scale(-1.0).tanh(), c.tanh().scale(-1.0)));
+ }
+ }
+
+ #[test]
+ fn test_asinh() {
+ assert!(close(_0_0i.asinh(), _0_0i));
+ assert!(close(_1_0i.asinh(), _1_0i.scale(1.0 + 2.0.sqrt()).ln()));
+ assert!(close(_0_1i.asinh(), _0_1i.scale(f64::consts::PI / 2.0)));
+ assert!(close(
+ _0_1i.asinh().scale(-1.0),
+ _0_1i.scale(-f64::consts::PI / 2.0)
+ ));
+ for &c in all_consts.iter() {
+ // asinh(conj(z)) = conj(asinh(z))
+ assert!(close(c.conj().asinh(), c.conj().asinh()));
+ // asinh(-z) = -asinh(z)
+ assert!(close(c.scale(-1.0).asinh(), c.asinh().scale(-1.0)));
+ // for this branch, -pi/2 <= asinh(z).im <= pi/2
+ assert!(
+ -f64::consts::PI / 2.0 <= c.asinh().im && c.asinh().im <= f64::consts::PI / 2.0
+ );
+ }
+ }
+
+ #[test]
+ fn test_acosh() {
+ assert!(close(_0_0i.acosh(), _0_1i.scale(f64::consts::PI / 2.0)));
+ assert!(close(_1_0i.acosh(), _0_0i));
+ assert!(close(
+ _1_0i.scale(-1.0).acosh(),
+ _0_1i.scale(f64::consts::PI)
+ ));
+ for &c in all_consts.iter() {
+ // acosh(conj(z)) = conj(acosh(z))
+ assert!(close(c.conj().acosh(), c.conj().acosh()));
+ // for this branch, -pi <= acosh(z).im <= pi and 0 <= acosh(z).re
+ assert!(
+ -f64::consts::PI <= c.acosh().im
+ && c.acosh().im <= f64::consts::PI
+ && 0.0 <= c.cosh().re
+ );
+ }
+ }
+
+ #[test]
+ fn test_atanh() {
+ assert!(close(_0_0i.atanh(), _0_0i));
+ assert!(close(_0_1i.atanh(), _0_1i.scale(f64::consts::PI / 4.0)));
+ assert!(close(_1_0i.atanh(), Complex::new(f64::infinity(), 0.0)));
+ for &c in all_consts.iter() {
+ // atanh(conj(z)) = conj(atanh(z))
+ assert!(close(c.conj().atanh(), c.conj().atanh()));
+ // atanh(-z) = -atanh(z)
+ assert!(close(c.scale(-1.0).atanh(), c.atanh().scale(-1.0)));
+ // for this branch, -pi/2 <= atanh(z).im <= pi/2
+ assert!(
+ -f64::consts::PI / 2.0 <= c.atanh().im && c.atanh().im <= f64::consts::PI / 2.0
+ );
+ }
+ }
+
+ #[test]
+ fn test_exp_ln() {
+ for &c in all_consts.iter() {
+ // e^ln(z) = z
+ assert!(close(c.ln().exp(), c));
+ }
+ }
+
+ #[test]
+ fn test_trig_to_hyperbolic() {
+ for &c in all_consts.iter() {
+ // sin(iz) = i sinh(z)
+ assert!(close((_0_1i * c).sin(), _0_1i * c.sinh()));
+ // cos(iz) = cosh(z)
+ assert!(close((_0_1i * c).cos(), c.cosh()));
+ // tan(iz) = i tanh(z)
+ assert!(close((_0_1i * c).tan(), _0_1i * c.tanh()));
+ }
+ }
+
+ #[test]
+ fn test_trig_identities() {
+ for &c in all_consts.iter() {
+ // tan(z) = sin(z)/cos(z)
+ assert!(close(c.tan(), c.sin() / c.cos()));
+ // sin(z)^2 + cos(z)^2 = 1
+ assert!(close(c.sin() * c.sin() + c.cos() * c.cos(), _1_0i));
+
+ // sin(asin(z)) = z
+ assert!(close(c.asin().sin(), c));
+ // cos(acos(z)) = z
+ assert!(close(c.acos().cos(), c));
+ // tan(atan(z)) = z
+ // i and -i are branch points
+ if c != _0_1i && c != _0_1i.scale(-1.0) {
+ assert!(close(c.atan().tan(), c));
+ }
+
+ // sin(z) = (e^(iz) - e^(-iz))/(2i)
+ assert!(close(
+ ((_0_1i * c).exp() - (_0_1i * c).exp().inv()) / _0_1i.scale(2.0),
+ c.sin()
+ ));
+ // cos(z) = (e^(iz) + e^(-iz))/2
+ assert!(close(
+ ((_0_1i * c).exp() + (_0_1i * c).exp().inv()).unscale(2.0),
+ c.cos()
+ ));
+ // tan(z) = i (1 - e^(2iz))/(1 + e^(2iz))
+ assert!(close(
+ _0_1i * (_1_0i - (_0_1i * c).scale(2.0).exp())
+ / (_1_0i + (_0_1i * c).scale(2.0).exp()),
+ c.tan()
+ ));
+ }
+ }
+
+ #[test]
+ fn test_hyperbolic_identites() {
+ for &c in all_consts.iter() {
+ // tanh(z) = sinh(z)/cosh(z)
+ assert!(close(c.tanh(), c.sinh() / c.cosh()));
+ // cosh(z)^2 - sinh(z)^2 = 1
+ assert!(close(c.cosh() * c.cosh() - c.sinh() * c.sinh(), _1_0i));
+
+ // sinh(asinh(z)) = z
+ assert!(close(c.asinh().sinh(), c));
+ // cosh(acosh(z)) = z
+ assert!(close(c.acosh().cosh(), c));
+ // tanh(atanh(z)) = z
+ // 1 and -1 are branch points
+ if c != _1_0i && c != _1_0i.scale(-1.0) {
+ assert!(close(c.atanh().tanh(), c));
+ }
+
+ // sinh(z) = (e^z - e^(-z))/2
+ assert!(close((c.exp() - c.exp().inv()).unscale(2.0), c.sinh()));
+ // cosh(z) = (e^z + e^(-z))/2
+ assert!(close((c.exp() + c.exp().inv()).unscale(2.0), c.cosh()));
+ // tanh(z) = ( e^(2z) - 1)/(e^(2z) + 1)
+ assert!(close(
+ (c.scale(2.0).exp() - _1_0i) / (c.scale(2.0).exp() + _1_0i),
+ c.tanh()
+ ));
+ }
+ }
+ }
+
+ // Test both a + b and a += b
+ macro_rules! test_a_op_b {
+ ($a:ident + $b:expr, $answer:expr) => {
+ assert_eq!($a + $b, $answer);
+ assert_eq!(
+ {
+ let mut x = $a;
+ x += $b;
+ x
+ },
+ $answer
+ );
+ };
+ ($a:ident - $b:expr, $answer:expr) => {
+ assert_eq!($a - $b, $answer);
+ assert_eq!(
+ {
+ let mut x = $a;
+ x -= $b;
+ x
+ },
+ $answer
+ );
+ };
+ ($a:ident * $b:expr, $answer:expr) => {
+ assert_eq!($a * $b, $answer);
+ assert_eq!(
+ {
+ let mut x = $a;
+ x *= $b;
+ x
+ },
+ $answer
+ );
+ };
+ ($a:ident / $b:expr, $answer:expr) => {
+ assert_eq!($a / $b, $answer);
+ assert_eq!(
+ {
+ let mut x = $a;
+ x /= $b;
+ x
+ },
+ $answer
+ );
+ };
+ ($a:ident % $b:expr, $answer:expr) => {
+ assert_eq!($a % $b, $answer);
+ assert_eq!(
+ {
+ let mut x = $a;
+ x %= $b;
+ x
+ },
+ $answer
+ );
+ };
+ }
+
+ // Test both a + b and a + &b
+ macro_rules! test_op {
+ ($a:ident $op:tt $b:expr, $answer:expr) => {
+ test_a_op_b!($a $op $b, $answer);
+ test_a_op_b!($a $op &$b, $answer);
+ };
+ }
+
+ mod complex_arithmetic {
+ use super::{_05_05i, _0_0i, _0_1i, _1_0i, _1_1i, _4_2i, _neg1_1i, all_consts};
+ use traits::{MulAdd, MulAddAssign, Zero};
+
+ #[test]
+ fn test_add() {
+ test_op!(_05_05i + _05_05i, _1_1i);
+ test_op!(_0_1i + _1_0i, _1_1i);
+ test_op!(_1_0i + _neg1_1i, _0_1i);
+
+ for &c in all_consts.iter() {
+ test_op!(_0_0i + c, c);
+ test_op!(c + _0_0i, c);
+ }
+ }
+
+ #[test]
+ fn test_sub() {
+ test_op!(_05_05i - _05_05i, _0_0i);
+ test_op!(_0_1i - _1_0i, _neg1_1i);
+ test_op!(_0_1i - _neg1_1i, _1_0i);
+
+ for &c in all_consts.iter() {
+ test_op!(c - _0_0i, c);
+ test_op!(c - c, _0_0i);
+ }
+ }
+
+ #[test]
+ fn test_mul() {
+ test_op!(_05_05i * _05_05i, _0_1i.unscale(2.0));
+ test_op!(_1_1i * _0_1i, _neg1_1i);
+
+ // i^2 & i^4
+ test_op!(_0_1i * _0_1i, -_1_0i);
+ assert_eq!(_0_1i * _0_1i * _0_1i * _0_1i, _1_0i);
+
+ for &c in all_consts.iter() {
+ test_op!(c * _1_0i, c);
+ test_op!(_1_0i * c, c);
+ }
+ }
+
+ #[test]
+ #[cfg(feature = "std")]
+ fn test_mul_add_float() {
+ assert_eq!(_05_05i.mul_add(_05_05i, _0_0i), _05_05i * _05_05i + _0_0i);
+ assert_eq!(_05_05i * _05_05i + _0_0i, _05_05i.mul_add(_05_05i, _0_0i));
+ assert_eq!(_0_1i.mul_add(_0_1i, _0_1i), _neg1_1i);
+ assert_eq!(_1_0i.mul_add(_1_0i, _1_0i), _1_0i * _1_0i + _1_0i);
+ assert_eq!(_1_0i * _1_0i + _1_0i, _1_0i.mul_add(_1_0i, _1_0i));
+
+ let mut x = _1_0i;
+ x.mul_add_assign(_1_0i, _1_0i);
+ assert_eq!(x, _1_0i * _1_0i + _1_0i);
+
+ for &a in &all_consts {
+ for &b in &all_consts {
+ for &c in &all_consts {
+ let abc = a * b + c;
+ assert_eq!(a.mul_add(b, c), abc);
+ let mut x = a;
+ x.mul_add_assign(b, c);
+ assert_eq!(x, abc);
+ }
+ }
+ }
+ }
+
+ #[test]
+ fn test_mul_add() {
+ use super::Complex;
+ const _0_0i: Complex<i32> = Complex { re: 0, im: 0 };
+ const _1_0i: Complex<i32> = Complex { re: 1, im: 0 };
+ const _1_1i: Complex<i32> = Complex { re: 1, im: 1 };
+ const _0_1i: Complex<i32> = Complex { re: 0, im: 1 };
+ const _neg1_1i: Complex<i32> = Complex { re: -1, im: 1 };
+ const all_consts: [Complex<i32>; 5] = [_0_0i, _1_0i, _1_1i, _0_1i, _neg1_1i];
+
+ assert_eq!(_1_0i.mul_add(_1_0i, _0_0i), _1_0i * _1_0i + _0_0i);
+ assert_eq!(_1_0i * _1_0i + _0_0i, _1_0i.mul_add(_1_0i, _0_0i));
+ assert_eq!(_0_1i.mul_add(_0_1i, _0_1i), _neg1_1i);
+ assert_eq!(_1_0i.mul_add(_1_0i, _1_0i), _1_0i * _1_0i + _1_0i);
+ assert_eq!(_1_0i * _1_0i + _1_0i, _1_0i.mul_add(_1_0i, _1_0i));
+
+ let mut x = _1_0i;
+ x.mul_add_assign(_1_0i, _1_0i);
+ assert_eq!(x, _1_0i * _1_0i + _1_0i);
+
+ for &a in &all_consts {
+ for &b in &all_consts {
+ for &c in &all_consts {
+ let abc = a * b + c;
+ assert_eq!(a.mul_add(b, c), abc);
+ let mut x = a;
+ x.mul_add_assign(b, c);
+ assert_eq!(x, abc);
+ }
+ }
+ }
+ }
+
+ #[test]
+ fn test_div() {
+ test_op!(_neg1_1i / _0_1i, _1_1i);
+ for &c in all_consts.iter() {
+ if c != Zero::zero() {
+ test_op!(c / c, _1_0i);
+ }
+ }
+ }
+
+ #[test]
+ fn test_rem() {
+ test_op!(_neg1_1i % _0_1i, _0_0i);
+ test_op!(_4_2i % _0_1i, _0_0i);
+ test_op!(_05_05i % _0_1i, _05_05i);
+ test_op!(_05_05i % _1_1i, _05_05i);
+ assert_eq!((_4_2i + _05_05i) % _0_1i, _05_05i);
+ assert_eq!((_4_2i + _05_05i) % _1_1i, _05_05i);
+ }
+
+ #[test]
+ fn test_neg() {
+ assert_eq!(-_1_0i + _0_1i, _neg1_1i);
+ assert_eq!((-_0_1i) * _0_1i, _1_0i);
+ for &c in all_consts.iter() {
+ assert_eq!(-(-c), c);
+ }
+ }
+ }
+
+ mod real_arithmetic {
+ use super::super::Complex;
+ use super::{_4_2i, _neg1_1i};
+
+ #[test]
+ fn test_add() {
+ test_op!(_4_2i + 0.5, Complex::new(4.5, 2.0));
+ assert_eq!(0.5 + _4_2i, Complex::new(4.5, 2.0));
+ }
+
+ #[test]
+ fn test_sub() {
+ test_op!(_4_2i - 0.5, Complex::new(3.5, 2.0));
+ assert_eq!(0.5 - _4_2i, Complex::new(-3.5, -2.0));
+ }
+
+ #[test]
+ fn test_mul() {
+ assert_eq!(_4_2i * 0.5, Complex::new(2.0, 1.0));
+ assert_eq!(0.5 * _4_2i, Complex::new(2.0, 1.0));
+ }
+
+ #[test]
+ fn test_div() {
+ assert_eq!(_4_2i / 0.5, Complex::new(8.0, 4.0));
+ assert_eq!(0.5 / _4_2i, Complex::new(0.1, -0.05));
+ }
+
+ #[test]
+ fn test_rem() {
+ assert_eq!(_4_2i % 2.0, Complex::new(0.0, 0.0));
+ assert_eq!(_4_2i % 3.0, Complex::new(1.0, 2.0));
+ assert_eq!(3.0 % _4_2i, Complex::new(3.0, 0.0));
+ assert_eq!(_neg1_1i % 2.0, _neg1_1i);
+ assert_eq!(-_4_2i % 3.0, Complex::new(-1.0, -2.0));
+ }
+
+ #[test]
+ fn test_div_rem_gaussian() {
+ // These would overflow with `norm_sqr` division.
+ let max = Complex::new(255u8, 255u8);
+ assert_eq!(max / 200, Complex::new(1, 1));
+ assert_eq!(max % 200, Complex::new(55, 55));
+ }
+ }
+
+ #[test]
+ fn test_to_string() {
+ fn test(c: Complex64, s: String) {
+ assert_eq!(c.to_string(), s);
+ }
+ test(_0_0i, "0+0i".to_string());
+ test(_1_0i, "1+0i".to_string());
+ test(_0_1i, "0+1i".to_string());
+ test(_1_1i, "1+1i".to_string());
+ test(_neg1_1i, "-1+1i".to_string());
+ test(-_neg1_1i, "1-1i".to_string());
+ test(_05_05i, "0.5+0.5i".to_string());
+ }
+
+ #[test]
+ fn test_string_formatting() {
+ let a = Complex::new(1.23456, 123.456);
+ assert_eq!(format!("{}", a), "1.23456+123.456i");
+ assert_eq!(format!("{:.2}", a), "1.23+123.46i");
+ assert_eq!(format!("{:.2e}", a), "1.23e0+1.23e2i");
+ assert_eq!(format!("{:+.2E}", a), "+1.23E0+1.23E2i");
+ #[cfg(feature = "std")]
+ assert_eq!(format!("{:+20.2E}", a), " +1.23E0+1.23E2i");
+
+ let b = Complex::new(0x80, 0xff);
+ assert_eq!(format!("{:X}", b), "80+FFi");
+ assert_eq!(format!("{:#x}", b), "0x80+0xffi");
+ assert_eq!(format!("{:+#b}", b), "+0b10000000+0b11111111i");
+ assert_eq!(format!("{:+#o}", b), "+0o200+0o377i");
+ #[cfg(feature = "std")]
+ assert_eq!(format!("{:+#16o}", b), " +0o200+0o377i");
+
+ let c = Complex::new(-10, -10000);
+ assert_eq!(format!("{}", c), "-10-10000i");
+ #[cfg(feature = "std")]
+ assert_eq!(format!("{:16}", c), " -10-10000i");
+ }
+
+ #[test]
+ fn test_hash() {
+ let a = Complex::new(0i32, 0i32);
+ let b = Complex::new(1i32, 0i32);
+ let c = Complex::new(0i32, 1i32);
+ assert!(::hash(&a) != ::hash(&b));
+ assert!(::hash(&b) != ::hash(&c));
+ assert!(::hash(&c) != ::hash(&a));
+ }
+
+ #[test]
+ fn test_hashset() {
+ use std::collections::HashSet;
+ let a = Complex::new(0i32, 0i32);
+ let b = Complex::new(1i32, 0i32);
+ let c = Complex::new(0i32, 1i32);
+
+ let set: HashSet<_> = [a, b, c].iter().cloned().collect();
+ assert!(set.contains(&a));
+ assert!(set.contains(&b));
+ assert!(set.contains(&c));
+ assert!(!set.contains(&(a + b + c)));
+ }
+
+ #[test]
+ fn test_is_nan() {
+ assert!(!_1_1i.is_nan());
+ let a = Complex::new(f64::NAN, f64::NAN);
+ assert!(a.is_nan());
+ }
+
+ #[test]
+ fn test_is_nan_special_cases() {
+ let a = Complex::new(0f64, f64::NAN);
+ let b = Complex::new(f64::NAN, 0f64);
+ assert!(a.is_nan());
+ assert!(b.is_nan());
+ }
+
+ #[test]
+ fn test_is_infinite() {
+ let a = Complex::new(2f64, f64::INFINITY);
+ assert!(a.is_infinite());
+ }
+
+ #[test]
+ fn test_is_finite() {
+ assert!(_1_1i.is_finite())
+ }
+
+ #[test]
+ fn test_is_normal() {
+ let a = Complex::new(0f64, f64::NAN);
+ let b = Complex::new(2f64, f64::INFINITY);
+ assert!(!a.is_normal());
+ assert!(!b.is_normal());
+ assert!(_1_1i.is_normal());
+ }
+
+ #[test]
+ fn test_from_str() {
+ fn test(z: Complex64, s: &str) {
+ assert_eq!(FromStr::from_str(s), Ok(z));
+ }
+ test(_0_0i, "0 + 0i");
+ test(_0_0i, "0+0j");
+ test(_0_0i, "0 - 0j");
+ test(_0_0i, "0-0i");
+ test(_0_0i, "0i + 0");
+ test(_0_0i, "0");
+ test(_0_0i, "-0");
+ test(_0_0i, "0i");
+ test(_0_0i, "0j");
+ test(_0_0i, "+0j");
+ test(_0_0i, "-0i");
+
+ test(_1_0i, "1 + 0i");
+ test(_1_0i, "1+0j");
+ test(_1_0i, "1 - 0j");
+ test(_1_0i, "+1-0i");
+ test(_1_0i, "-0j+1");
+ test(_1_0i, "1");
+
+ test(_1_1i, "1 + i");
+ test(_1_1i, "1+j");
+ test(_1_1i, "1 + 1j");
+ test(_1_1i, "1+1i");
+ test(_1_1i, "i + 1");
+ test(_1_1i, "1i+1");
+ test(_1_1i, "+j+1");
+
+ test(_0_1i, "0 + i");
+ test(_0_1i, "0+j");
+ test(_0_1i, "-0 + j");
+ test(_0_1i, "-0+i");
+ test(_0_1i, "0 + 1i");
+ test(_0_1i, "0+1j");
+ test(_0_1i, "-0 + 1j");
+ test(_0_1i, "-0+1i");
+ test(_0_1i, "j + 0");
+ test(_0_1i, "i");
+ test(_0_1i, "j");
+ test(_0_1i, "1j");
+
+ test(_neg1_1i, "-1 + i");
+ test(_neg1_1i, "-1+j");
+ test(_neg1_1i, "-1 + 1j");
+ test(_neg1_1i, "-1+1i");
+ test(_neg1_1i, "1i-1");
+ test(_neg1_1i, "j + -1");
+
+ test(_05_05i, "0.5 + 0.5i");
+ test(_05_05i, "0.5+0.5j");
+ test(_05_05i, "5e-1+0.5j");
+ test(_05_05i, "5E-1 + 0.5j");
+ test(_05_05i, "5E-1i + 0.5");
+ test(_05_05i, "0.05e+1j + 50E-2");
+ }
+
+ #[test]
+ fn test_from_str_radix() {
+ fn test(z: Complex64, s: &str, radix: u32) {
+ let res: Result<Complex64, <Complex64 as Num>::FromStrRadixErr> =
+ Num::from_str_radix(s, radix);
+ assert_eq!(res.unwrap(), z)
+ }
+ test(_4_2i, "4+2i", 10);
+ test(Complex::new(15.0, 32.0), "F+20i", 16);
+ test(Complex::new(15.0, 32.0), "1111+100000i", 2);
+ test(Complex::new(-15.0, -32.0), "-F-20i", 16);
+ test(Complex::new(-15.0, -32.0), "-1111-100000i", 2);
+ }
+
+ #[test]
+ fn test_from_str_fail() {
+ fn test(s: &str) {
+ let complex: Result<Complex64, _> = FromStr::from_str(s);
+ assert!(
+ complex.is_err(),
+ "complex {:?} -> {:?} should be an error",
+ s,
+ complex
+ );
+ }
+ test("foo");
+ test("6E");
+ test("0 + 2.718");
+ test("1 - -2i");
+ test("314e-2ij");
+ test("4.3j - i");
+ test("1i - 2i");
+ test("+ 1 - 3.0i");
+ }
+
+ #[test]
+ fn test_sum() {
+ let v = vec![_0_1i, _1_0i];
+ assert_eq!(v.iter().sum::<Complex64>(), _1_1i);
+ assert_eq!(v.into_iter().sum::<Complex64>(), _1_1i);
+ }
+
+ #[test]
+ fn test_prod() {
+ let v = vec![_0_1i, _1_0i];
+ assert_eq!(v.iter().product::<Complex64>(), _0_1i);
+ assert_eq!(v.into_iter().product::<Complex64>(), _0_1i);
+ }
+
+ #[test]
+ fn test_zero() {
+ let zero = Complex64::zero();
+ assert!(zero.is_zero());
+
+ let mut c = Complex::new(1.23, 4.56);
+ assert!(!c.is_zero());
+ assert_eq!(&c + &zero, c);
+
+ c.set_zero();
+ assert!(c.is_zero());
+ }
+
+ #[test]
+ fn test_one() {
+ let one = Complex64::one();
+ assert!(one.is_one());
+
+ let mut c = Complex::new(1.23, 4.56);
+ assert!(!c.is_one());
+ assert_eq!(&c * &one, c);
+
+ c.set_one();
+ assert!(c.is_one());
+ }
+
+ #[cfg(has_const_fn)]
+ #[test]
+ fn test_const() {
+ const R: f64 = 12.3;
+ const I: f64 = -4.5;
+ const C: Complex64 = Complex::new(R, I);
+
+ assert_eq!(C.re, 12.3);
+ assert_eq!(C.im, -4.5);
+ }
+}
diff --git a/rust/vendor/num-complex/src/pow.rs b/rust/vendor/num-complex/src/pow.rs
new file mode 100644
index 0000000..2f6b5ba
--- /dev/null
+++ b/rust/vendor/num-complex/src/pow.rs
@@ -0,0 +1,187 @@
+use super::Complex;
+
+use core::ops::Neg;
+#[cfg(feature = "std")]
+use traits::Float;
+use traits::{Num, One, Pow};
+
+macro_rules! pow_impl {
+ ($U:ty, $S:ty) => {
+ impl<'a, T: Clone + Num> Pow<$U> for &'a Complex<T> {
+ type Output = Complex<T>;
+
+ #[inline]
+ fn pow(self, mut exp: $U) -> Self::Output {
+ if exp == 0 {
+ return Complex::one();
+ }
+ let mut base = self.clone();
+
+ while exp & 1 == 0 {
+ base = base.clone() * base;
+ exp >>= 1;
+ }
+
+ if exp == 1 {
+ return base;
+ }
+
+ let mut acc = base.clone();
+ while exp > 1 {
+ exp >>= 1;
+ base = base.clone() * base;
+ if exp & 1 == 1 {
+ acc = acc * base.clone();
+ }
+ }
+ acc
+ }
+ }
+
+ impl<'a, 'b, T: Clone + Num> Pow<&'b $U> for &'a Complex<T> {
+ type Output = Complex<T>;
+
+ #[inline]
+ fn pow(self, exp: &$U) -> Self::Output {
+ self.pow(*exp)
+ }
+ }
+
+ impl<'a, T: Clone + Num + Neg<Output = T>> Pow<$S> for &'a Complex<T> {
+ type Output = Complex<T>;
+
+ #[inline]
+ fn pow(self, exp: $S) -> Self::Output {
+ if exp < 0 {
+ Pow::pow(&self.inv(), exp.wrapping_neg() as $U)
+ } else {
+ Pow::pow(self, exp as $U)
+ }
+ }
+ }
+
+ impl<'a, 'b, T: Clone + Num + Neg<Output = T>> Pow<&'b $S> for &'a Complex<T> {
+ type Output = Complex<T>;
+
+ #[inline]
+ fn pow(self, exp: &$S) -> Self::Output {
+ self.pow(*exp)
+ }
+ }
+ };
+}
+
+pow_impl!(u8, i8);
+pow_impl!(u16, i16);
+pow_impl!(u32, i32);
+pow_impl!(u64, i64);
+pow_impl!(usize, isize);
+#[cfg(has_i128)]
+pow_impl!(u128, i128);
+
+// Note: we can't add `impl<T: Float> Pow<T> for Complex<T>` because new blanket impls are a
+// breaking change. Someone could already have their own `F` and `impl Pow<F> for Complex<F>`
+// which would conflict. We can't even do this in a new semantic version, because we have to
+// gate it on the "std" feature, and features can't add breaking changes either.
+
+macro_rules! powf_impl {
+ ($F:ty) => {
+ #[cfg(feature = "std")]
+ impl<'a, T: Float> Pow<$F> for &'a Complex<T>
+ where
+ $F: Into<T>,
+ {
+ type Output = Complex<T>;
+
+ #[inline]
+ fn pow(self, exp: $F) -> Self::Output {
+ self.powf(exp.into())
+ }
+ }
+
+ #[cfg(feature = "std")]
+ impl<'a, 'b, T: Float> Pow<&'b $F> for &'a Complex<T>
+ where
+ $F: Into<T>,
+ {
+ type Output = Complex<T>;
+
+ #[inline]
+ fn pow(self, &exp: &$F) -> Self::Output {
+ self.powf(exp.into())
+ }
+ }
+
+ #[cfg(feature = "std")]
+ impl<T: Float> Pow<$F> for Complex<T>
+ where
+ $F: Into<T>,
+ {
+ type Output = Complex<T>;
+
+ #[inline]
+ fn pow(self, exp: $F) -> Self::Output {
+ self.powf(exp.into())
+ }
+ }
+
+ #[cfg(feature = "std")]
+ impl<'b, T: Float> Pow<&'b $F> for Complex<T>
+ where
+ $F: Into<T>,
+ {
+ type Output = Complex<T>;
+
+ #[inline]
+ fn pow(self, &exp: &$F) -> Self::Output {
+ self.powf(exp.into())
+ }
+ }
+ };
+}
+
+powf_impl!(f32);
+powf_impl!(f64);
+
+// These blanket impls are OK, because both the target type and the trait parameter would be
+// foreign to anyone else trying to implement something that would overlap, raising E0117.
+
+#[cfg(feature = "std")]
+impl<'a, T: Float> Pow<Complex<T>> for &'a Complex<T> {
+ type Output = Complex<T>;
+
+ #[inline]
+ fn pow(self, exp: Complex<T>) -> Self::Output {
+ self.powc(exp)
+ }
+}
+
+#[cfg(feature = "std")]
+impl<'a, 'b, T: Float> Pow<&'b Complex<T>> for &'a Complex<T> {
+ type Output = Complex<T>;
+
+ #[inline]
+ fn pow(self, &exp: &'b Complex<T>) -> Self::Output {
+ self.powc(exp)
+ }
+}
+
+#[cfg(feature = "std")]
+impl<T: Float> Pow<Complex<T>> for Complex<T> {
+ type Output = Complex<T>;
+
+ #[inline]
+ fn pow(self, exp: Complex<T>) -> Self::Output {
+ self.powc(exp)
+ }
+}
+
+#[cfg(feature = "std")]
+impl<'b, T: Float> Pow<&'b Complex<T>> for Complex<T> {
+ type Output = Complex<T>;
+
+ #[inline]
+ fn pow(self, &exp: &'b Complex<T>) -> Self::Output {
+ self.powc(exp)
+ }
+}
diff --git a/rust/vendor/num-derive/.cargo-checksum.json b/rust/vendor/num-derive/.cargo-checksum.json
new file mode 100644
index 0000000..0f4ca68
--- /dev/null
+++ b/rust/vendor/num-derive/.cargo-checksum.json
@@ -0,0 +1 @@
+{"files":{"Cargo.toml":"24357f38521e03dbc562a42d44139d615a922406fb2bb122e17df29bc9bbb586","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"6485b8ed310d3f0340bf1ad1f47645069ce4069dcc6bb46c7d5c6faf41de1fdb","README.md":"5c81631711af54d31e40841cd0153a95d9e505d8eba7503d114789ffb5e232c6","RELEASES.md":"62d6fef92273d9ee0e520ba611562b744e775318f5f6ae8e042ed94a3e19b2d6","build.rs":"16de2aa57e754fc1526d0400b5d87a3f771296705fca54601aa598b6f74ded8f","src/lib.rs":"5860c6007ea74b7b58033c15beae7c9e0859205e3ca1b76af9dc3e82914e08a2","tests/empty_enum.rs":"1b2312ec2fc9866fce7172e71e0aa2efcc3cb9d7659d0b633eb352bb1e080d53","tests/issue-6.rs":"b03b7382de854f30b84fd39d11b2c09aa97c136408942841cfc2c30c31b3f1a7","tests/issue-9.rs":"1aa7353078321a964c70986ceb071569290509b70faa9825e8b584165865ea7e","tests/newtype.rs":"1b60f13afbed8f18e94fe37141543d0c8d265419e1c2447b84ce14ac82af48e8","tests/num_derive_without_num.rs":"3ce528221a2cb752859e20c5423c4b474fec714b41d8c1b62f5614b165d7262b","tests/trivial.rs":"a6b0faab04527f6835f43cd72317a00065a7a6cf4c506d04e77f898134f7a59b","tests/with_custom_values.rs":"81ed60b50726555ee840ca773335aae68ac425d5af9ebbcbb3c6d6834358c73c"},"package":"eafd0b45c5537c3ba526f79d3e75120036502bebacbb3f3220914067ce39dbf2"} \ No newline at end of file
diff --git a/rust/vendor/num-derive/Cargo.toml b/rust/vendor/num-derive/Cargo.toml
new file mode 100644
index 0000000..f7c68ad
--- /dev/null
+++ b/rust/vendor/num-derive/Cargo.toml
@@ -0,0 +1,47 @@
+# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO
+#
+# When uploading crates to the registry Cargo will automatically
+# "normalize" Cargo.toml files for maximal compatibility
+# with all versions of Cargo and also rewrite `path` dependencies
+# to registry (e.g., crates.io) dependencies
+#
+# If you believe there's an error in this file please file an
+# issue against the rust-lang/cargo repository. If you're
+# editing this file be aware that the upstream Cargo.toml
+# will likely look very different (and much more reasonable)
+
+[package]
+name = "num-derive"
+version = "0.2.5"
+authors = ["The Rust Project Developers"]
+build = "build.rs"
+exclude = ["/ci/*", "/.travis.yml", "/bors.toml"]
+description = "Numeric syntax extensions"
+homepage = "https://github.com/rust-num/num-derive"
+documentation = "https://docs.rs/num-derive"
+readme = "README.md"
+keywords = ["mathematics", "numerics"]
+categories = ["science"]
+license = "MIT/Apache-2.0"
+repository = "https://github.com/rust-num/num-derive"
+
+[lib]
+name = "num_derive"
+test = false
+proc-macro = true
+[dependencies.proc-macro2]
+version = "0.4.2"
+
+[dependencies.quote]
+version = "0.6"
+
+[dependencies.syn]
+version = "0.15"
+[dev-dependencies.num]
+version = "0.2"
+
+[dev-dependencies.num-traits]
+version = "0.2"
+
+[features]
+full-syntax = ["syn/full"]
diff --git a/rust/vendor/num-derive/LICENSE-APACHE b/rust/vendor/num-derive/LICENSE-APACHE
new file mode 100644
index 0000000..16fe87b
--- /dev/null
+++ b/rust/vendor/num-derive/LICENSE-APACHE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+END OF TERMS AND CONDITIONS
+
+APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+Copyright [yyyy] [name of copyright owner]
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
diff --git a/rust/vendor/num-derive/LICENSE-MIT b/rust/vendor/num-derive/LICENSE-MIT
new file mode 100644
index 0000000..39d4bdb
--- /dev/null
+++ b/rust/vendor/num-derive/LICENSE-MIT
@@ -0,0 +1,25 @@
+Copyright (c) 2014 The Rust Project Developers
+
+Permission is hereby granted, free of charge, to any
+person obtaining a copy of this software and associated
+documentation files (the "Software"), to deal in the
+Software without restriction, including without
+limitation the rights to use, copy, modify, merge,
+publish, distribute, sublicense, and/or sell copies of
+the Software, and to permit persons to whom the Software
+is furnished to do so, subject to the following
+conditions:
+
+The above copyright notice and this permission notice
+shall be included in all copies or substantial portions
+of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
+ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
+TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
+PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
+IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+DEALINGS IN THE SOFTWARE.
diff --git a/rust/vendor/num-derive/README.md b/rust/vendor/num-derive/README.md
new file mode 100644
index 0000000..d2c71e4
--- /dev/null
+++ b/rust/vendor/num-derive/README.md
@@ -0,0 +1,53 @@
+# num-derive
+
+[![crate](https://img.shields.io/crates/v/num-derive.svg)](https://crates.io/crates/num-derive)
+[![documentation](https://docs.rs/num-derive/badge.svg)](https://docs.rs/num-derive)
+[![Travis status](https://travis-ci.org/rust-num/num-derive.svg?branch=master)](https://travis-ci.org/rust-num/num-derive)
+
+Procedural macros to derive numeric traits in Rust.
+
+## Usage
+
+Add this to your `Cargo.toml`:
+
+```toml
+[dependencies]
+num-traits = "0.2"
+num-derive = "0.2"
+```
+
+and this to your crate root:
+
+```rust
+#[macro_use]
+extern crate num_derive;
+```
+
+Then you can derive traits on your own types:
+
+```rust
+#[derive(FromPrimitive, ToPrimitive)]
+enum Color {
+ Red,
+ Blue,
+ Green,
+}
+```
+
+## Optional features
+
+- **`full-syntax`** — Enables `num-derive` to handle enum discriminants
+ represented by complex expressions. Usually can be avoided by
+ [utilizing constants], so only use this feature if namespace pollution is
+ undesired and [compile time doubling] is acceptable.
+
+[utilizing constants]: https://github.com/rust-num/num-derive/pull/3#issuecomment-359044704
+[compile time doubling]: https://github.com/rust-num/num-derive/pull/3#issuecomment-359172588
+
+## Releases
+
+Release notes are available in [RELEASES.md](RELEASES.md).
+
+## Compatibility
+
+The `num-derive` crate is tested for rustc 1.15 and greater.
diff --git a/rust/vendor/num-derive/RELEASES.md b/rust/vendor/num-derive/RELEASES.md
new file mode 100644
index 0000000..151548e
--- /dev/null
+++ b/rust/vendor/num-derive/RELEASES.md
@@ -0,0 +1,78 @@
+# Release 0.2.5 (2019-04-23)
+
+- [Improved the masking of lints in derived code][23].
+
+[23]: https://github.com/rust-num/num-derive/pull/23
+
+# Release 0.2.4 (2019-01-25)
+
+- [Adjusted dependencies to allow no-std targets][22].
+
+[22]: https://github.com/rust-num/num-derive/pull/22
+
+# Release 0.2.3 (2018-10-03)
+
+- [Added newtype deriving][17] for `FromPrimitive`, `ToPrimitive`,
+ `NumOps<Self, Self>`, `NumCast`, `Zero`, `One`, `Num`, and `Float`.
+ Thanks @asayers!
+
+[17]: https://github.com/rust-num/num-derive/pull/17
+
+# Release 0.2.2 (2018-05-22)
+
+- [Updated dependencies][14].
+
+[14]: https://github.com/rust-num/num-derive/pull/14
+
+# Release 0.2.1 (2018-05-09)
+
+- [Updated dependencies][12] -- thanks @spearman!
+
+[12]: https://github.com/rust-num/num-derive/pull/12
+
+# Release 0.2.0 (2018-02-21)
+
+- [Discriminant matching is now simplified][10], casting values directly by
+ name, rather than trying to compute offsets from known values manually.
+- **breaking change**: [Derivations now import the traits from `num-traits`][11]
+ instead of the full `num` crate. These are still compatible, but users need
+ to have an explicit `num-traits = "0.2"` dependency in their `Cargo.toml`.
+
+[10]: https://github.com/rust-num/num-derive/pull/10
+[11]: https://github.com/rust-num/num-derive/pull/11
+
+
+# Release 0.1.44 (2018-01-26)
+
+- [The derived code now explicitly allows `unused_qualifications`][9], so users
+ that globally deny that lint don't encounter an error.
+
+[9]: https://github.com/rust-num/num-derive/pull/9
+
+
+# Release 0.1.43 (2018-01-23)
+
+- [The derived code now explicitly allows `trivial_numeric_casts`][7], so users
+ that globally deny that lint don't encounter an error.
+
+[7]: https://github.com/rust-num/num-derive/pull/7
+
+
+# Release 0.1.42 (2018-01-22)
+
+- [num-derive now has its own source repository][num-356] at [rust-num/num-derive][home].
+- [The derivation macros have been updated][3] to using `syn` 0.12. Support for complex
+ expressions in enum values can be enabled with the `full-syntax` feature.
+
+Thanks to @cuviper and @hcpl for their contributions!
+
+[home]: https://github.com/rust-num/num-derive
+[num-356]: https://github.com/rust-num/num/pull/356
+[3]: https://github.com/rust-num/num-derive/pull/3
+
+
+# Prior releases
+
+No prior release notes were kept. Thanks all the same to the many
+contributors that have made this crate what it is!
+
diff --git a/rust/vendor/num-derive/build.rs b/rust/vendor/num-derive/build.rs
new file mode 100644
index 0000000..fd60866
--- /dev/null
+++ b/rust/vendor/num-derive/build.rs
@@ -0,0 +1,35 @@
+use std::env;
+use std::io::Write;
+use std::process::{Command, Stdio};
+
+fn main() {
+ if probe("fn main() { 0i128; }") {
+ println!("cargo:rustc-cfg=has_i128");
+ } else if env::var_os("CARGO_FEATURE_I128").is_some() {
+ panic!("i128 support was not detected!");
+ }
+}
+
+/// Test if a code snippet can be compiled
+fn probe(code: &str) -> bool {
+ let rustc = env::var_os("RUSTC").unwrap_or_else(|| "rustc".into());
+ let out_dir = env::var_os("OUT_DIR").expect("environment variable OUT_DIR");
+
+ let mut child = Command::new(rustc)
+ .arg("--out-dir")
+ .arg(out_dir)
+ .arg("--emit=obj")
+ .arg("-")
+ .stdin(Stdio::piped())
+ .spawn()
+ .expect("rustc probe");
+
+ child
+ .stdin
+ .as_mut()
+ .expect("rustc stdin")
+ .write_all(code.as_bytes())
+ .expect("write rustc stdin");
+
+ child.wait().expect("rustc probe").success()
+}
diff --git a/rust/vendor/num-derive/src/lib.rs b/rust/vendor/num-derive/src/lib.rs
new file mode 100644
index 0000000..ea53b70
--- /dev/null
+++ b/rust/vendor/num-derive/src/lib.rs
@@ -0,0 +1,797 @@
+// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#![crate_type = "proc-macro"]
+#![doc(html_root_url = "https://docs.rs/num-derive/0.2")]
+#![recursion_limit = "512"]
+
+//! Procedural macros to derive numeric traits in Rust.
+//!
+//! ## Usage
+//!
+//! Add this to your `Cargo.toml`:
+//!
+//! ```toml
+//! [dependencies]
+//! num-traits = "0.2"
+//! num-derive = "0.2"
+//! ```
+//!
+//! Then you can derive traits on your own types:
+//!
+//! ```rust
+//! #[macro_use]
+//! extern crate num_derive;
+//!
+//! #[derive(FromPrimitive, ToPrimitive)]
+//! enum Color {
+//! Red,
+//! Blue,
+//! Green,
+//! }
+//! # fn main() {}
+//! ```
+
+extern crate proc_macro;
+
+extern crate proc_macro2;
+#[macro_use]
+extern crate quote;
+extern crate syn;
+
+use proc_macro::TokenStream;
+use proc_macro2::Span;
+
+use syn::{Data, Fields, Ident};
+
+// Within `exp`, you can bring things into scope with `extern crate`.
+//
+// We don't want to assume that `num_traits::` is in scope - the user may have imported it under a
+// different name, or may have imported it in a non-toplevel module (common when putting impls
+// behind a feature gate).
+//
+// Solution: let's just generate `extern crate num_traits as _num_traits` and then refer to
+// `_num_traits` in the derived code. However, macros are not allowed to produce `extern crate`
+// statements at the toplevel.
+//
+// Solution: let's generate `mod _impl_foo` and import num_traits within that. However, now we
+// lose access to private members of the surrounding module. This is a problem if, for example,
+// we're deriving for a newtype, where the inner type is defined in the same module, but not
+// exported.
+//
+// Solution: use the dummy const trick. For some reason, `extern crate` statements are allowed
+// here, but everything from the surrounding module is in scope. This trick is taken from serde.
+fn dummy_const_trick<T: quote::ToTokens>(
+ trait_: &str,
+ name: &proc_macro2::Ident,
+ exp: T,
+) -> proc_macro2::TokenStream {
+ let dummy_const = Ident::new(
+ &format!("_IMPL_NUM_{}_FOR_{}", trait_, unraw(name)),
+ Span::call_site(),
+ );
+ quote! {
+ #[allow(non_upper_case_globals, unused_attributes, unused_qualifications)]
+ const #dummy_const: () = {
+ #[allow(unknown_lints)]
+ #[cfg_attr(feature = "cargo-clippy", allow(useless_attribute))]
+ #[allow(rust_2018_idioms)]
+ extern crate num_traits as _num_traits;
+ #exp
+ };
+ }
+}
+
+#[allow(deprecated)]
+fn unraw(ident: &proc_macro2::Ident) -> String {
+ // str::trim_start_matches was added in 1.30, trim_left_matches deprecated
+ // in 1.33. We currently support rustc back to 1.15 so we need to continue
+ // to use the deprecated one.
+ ident.to_string().trim_left_matches("r#").to_owned()
+}
+
+// If `data` is a newtype, return the type it's wrapping.
+fn newtype_inner(data: &syn::Data) -> Option<syn::Type> {
+ match *data {
+ Data::Struct(ref s) => {
+ match s.fields {
+ Fields::Unnamed(ref fs) => {
+ if fs.unnamed.len() == 1 {
+ Some(fs.unnamed[0].ty.clone())
+ } else {
+ None
+ }
+ }
+ Fields::Named(ref fs) => {
+ if fs.named.len() == 1 {
+ panic!("num-derive doesn't know how to handle newtypes with named fields yet. \
+ Please use a tuple-style newtype, or submit a PR!");
+ }
+ None
+ }
+ _ => None,
+ }
+ }
+ _ => None,
+ }
+}
+
+/// Derives [`num_traits::FromPrimitive`][from] for simple enums and newtypes.
+///
+/// [from]: https://docs.rs/num-traits/0.2/num_traits/cast/trait.FromPrimitive.html
+///
+/// # Examples
+///
+/// Simple enums can be derived:
+///
+/// ```rust
+/// # #[macro_use]
+/// # extern crate num_derive;
+///
+/// #[derive(FromPrimitive)]
+/// enum Color {
+/// Red,
+/// Blue,
+/// Green = 42,
+/// }
+/// # fn main() {}
+/// ```
+///
+/// Enums that contain data are not allowed:
+///
+/// ```compile_fail
+/// # #[macro_use]
+/// # extern crate num_derive;
+///
+/// #[derive(FromPrimitive)]
+/// enum Color {
+/// Rgb(u8, u8, u8),
+/// Hsv(u8, u8, u8),
+/// }
+/// # fn main() {}
+/// ```
+///
+/// Structs are not allowed:
+///
+/// ```compile_fail
+/// # #[macro_use]
+/// # extern crate num_derive;
+/// #[derive(FromPrimitive)]
+/// struct Color {
+/// r: u8,
+/// g: u8,
+/// b: u8,
+/// }
+/// # fn main() {}
+/// ```
+#[proc_macro_derive(FromPrimitive)]
+pub fn from_primitive(input: TokenStream) -> TokenStream {
+ let ast: syn::DeriveInput = syn::parse(input).unwrap();
+ let name = &ast.ident;
+
+ let impl_ = if let Some(inner_ty) = newtype_inner(&ast.data) {
+ let i128_fns = if cfg!(has_i128) {
+ quote! {
+ fn from_i128(n: i128) -> Option<Self> {
+ <#inner_ty as _num_traits::FromPrimitive>::from_i128(n).map(#name)
+ }
+ fn from_u128(n: u128) -> Option<Self> {
+ <#inner_ty as _num_traits::FromPrimitive>::from_u128(n).map(#name)
+ }
+ }
+ } else {
+ quote! {}
+ };
+
+ quote! {
+ impl _num_traits::FromPrimitive for #name {
+ fn from_i64(n: i64) -> Option<Self> {
+ <#inner_ty as _num_traits::FromPrimitive>::from_i64(n).map(#name)
+ }
+ fn from_u64(n: u64) -> Option<Self> {
+ <#inner_ty as _num_traits::FromPrimitive>::from_u64(n).map(#name)
+ }
+ fn from_isize(n: isize) -> Option<Self> {
+ <#inner_ty as _num_traits::FromPrimitive>::from_isize(n).map(#name)
+ }
+ fn from_i8(n: i8) -> Option<Self> {
+ <#inner_ty as _num_traits::FromPrimitive>::from_i8(n).map(#name)
+ }
+ fn from_i16(n: i16) -> Option<Self> {
+ <#inner_ty as _num_traits::FromPrimitive>::from_i16(n).map(#name)
+ }
+ fn from_i32(n: i32) -> Option<Self> {
+ <#inner_ty as _num_traits::FromPrimitive>::from_i32(n).map(#name)
+ }
+ fn from_usize(n: usize) -> Option<Self> {
+ <#inner_ty as _num_traits::FromPrimitive>::from_usize(n).map(#name)
+ }
+ fn from_u8(n: u8) -> Option<Self> {
+ <#inner_ty as _num_traits::FromPrimitive>::from_u8(n).map(#name)
+ }
+ fn from_u16(n: u16) -> Option<Self> {
+ <#inner_ty as _num_traits::FromPrimitive>::from_u16(n).map(#name)
+ }
+ fn from_u32(n: u32) -> Option<Self> {
+ <#inner_ty as _num_traits::FromPrimitive>::from_u32(n).map(#name)
+ }
+ fn from_f32(n: f32) -> Option<Self> {
+ <#inner_ty as _num_traits::FromPrimitive>::from_f32(n).map(#name)
+ }
+ fn from_f64(n: f64) -> Option<Self> {
+ <#inner_ty as _num_traits::FromPrimitive>::from_f64(n).map(#name)
+ }
+ #i128_fns
+ }
+ }
+ } else {
+ let variants = match ast.data {
+ Data::Enum(ref data_enum) => &data_enum.variants,
+ _ => panic!(
+ "`FromPrimitive` can be applied only to enums and newtypes, {} is neither",
+ name
+ ),
+ };
+
+ let from_i64_var = quote! { n };
+ let clauses: Vec<_> = variants
+ .iter()
+ .map(|variant| {
+ let ident = &variant.ident;
+ match variant.fields {
+ Fields::Unit => (),
+ _ => panic!(
+ "`FromPrimitive` can be applied only to unitary enums and newtypes, \
+ {}::{} is either struct or tuple",
+ name, ident
+ ),
+ }
+
+ quote! {
+ if #from_i64_var == #name::#ident as i64 {
+ Some(#name::#ident)
+ }
+ }
+ })
+ .collect();
+
+ let from_i64_var = if clauses.is_empty() {
+ quote!(_)
+ } else {
+ from_i64_var
+ };
+
+ quote! {
+ impl _num_traits::FromPrimitive for #name {
+ #[allow(trivial_numeric_casts)]
+ fn from_i64(#from_i64_var: i64) -> Option<Self> {
+ #(#clauses else)* {
+ None
+ }
+ }
+
+ fn from_u64(n: u64) -> Option<Self> {
+ Self::from_i64(n as i64)
+ }
+ }
+ }
+ };
+
+ dummy_const_trick("FromPrimitive", &name, impl_).into()
+}
+
+/// Derives [`num_traits::ToPrimitive`][to] for simple enums and newtypes.
+///
+/// [to]: https://docs.rs/num-traits/0.2/num_traits/cast/trait.ToPrimitive.html
+///
+/// # Examples
+///
+/// Simple enums can be derived:
+///
+/// ```rust
+/// # #[macro_use]
+/// # extern crate num_derive;
+///
+/// #[derive(ToPrimitive)]
+/// enum Color {
+/// Red,
+/// Blue,
+/// Green = 42,
+/// }
+/// # fn main() {}
+/// ```
+///
+/// Enums that contain data are not allowed:
+///
+/// ```compile_fail
+/// # #[macro_use]
+/// # extern crate num_derive;
+///
+/// #[derive(ToPrimitive)]
+/// enum Color {
+/// Rgb(u8, u8, u8),
+/// Hsv(u8, u8, u8),
+/// }
+/// # fn main() {}
+/// ```
+///
+/// Structs are not allowed:
+///
+/// ```compile_fail
+/// # #[macro_use]
+/// # extern crate num_derive;
+/// #[derive(ToPrimitive)]
+/// struct Color {
+/// r: u8,
+/// g: u8,
+/// b: u8,
+/// }
+/// # fn main() {}
+/// ```
+#[proc_macro_derive(ToPrimitive)]
+pub fn to_primitive(input: TokenStream) -> TokenStream {
+ let ast: syn::DeriveInput = syn::parse(input).unwrap();
+ let name = &ast.ident;
+
+ let impl_ = if let Some(inner_ty) = newtype_inner(&ast.data) {
+ let i128_fns = if cfg!(has_i128) {
+ quote! {
+ fn to_i128(&self) -> Option<i128> {
+ <#inner_ty as _num_traits::ToPrimitive>::to_i128(&self.0)
+ }
+ fn to_u128(&self) -> Option<u128> {
+ <#inner_ty as _num_traits::ToPrimitive>::to_u128(&self.0)
+ }
+ }
+ } else {
+ quote! {}
+ };
+
+ quote! {
+ impl _num_traits::ToPrimitive for #name {
+ fn to_i64(&self) -> Option<i64> {
+ <#inner_ty as _num_traits::ToPrimitive>::to_i64(&self.0)
+ }
+ fn to_u64(&self) -> Option<u64> {
+ <#inner_ty as _num_traits::ToPrimitive>::to_u64(&self.0)
+ }
+ fn to_isize(&self) -> Option<isize> {
+ <#inner_ty as _num_traits::ToPrimitive>::to_isize(&self.0)
+ }
+ fn to_i8(&self) -> Option<i8> {
+ <#inner_ty as _num_traits::ToPrimitive>::to_i8(&self.0)
+ }
+ fn to_i16(&self) -> Option<i16> {
+ <#inner_ty as _num_traits::ToPrimitive>::to_i16(&self.0)
+ }
+ fn to_i32(&self) -> Option<i32> {
+ <#inner_ty as _num_traits::ToPrimitive>::to_i32(&self.0)
+ }
+ fn to_usize(&self) -> Option<usize> {
+ <#inner_ty as _num_traits::ToPrimitive>::to_usize(&self.0)
+ }
+ fn to_u8(&self) -> Option<u8> {
+ <#inner_ty as _num_traits::ToPrimitive>::to_u8(&self.0)
+ }
+ fn to_u16(&self) -> Option<u16> {
+ <#inner_ty as _num_traits::ToPrimitive>::to_u16(&self.0)
+ }
+ fn to_u32(&self) -> Option<u32> {
+ <#inner_ty as _num_traits::ToPrimitive>::to_u32(&self.0)
+ }
+ fn to_f32(&self) -> Option<f32> {
+ <#inner_ty as _num_traits::ToPrimitive>::to_f32(&self.0)
+ }
+ fn to_f64(&self) -> Option<f64> {
+ <#inner_ty as _num_traits::ToPrimitive>::to_f64(&self.0)
+ }
+ #i128_fns
+ }
+ }
+ } else {
+ let variants = match ast.data {
+ Data::Enum(ref data_enum) => &data_enum.variants,
+ _ => panic!(
+ "`ToPrimitive` can be applied only to enums and newtypes, {} is neither",
+ name
+ ),
+ };
+
+ let variants: Vec<_> = variants
+ .iter()
+ .map(|variant| {
+ let ident = &variant.ident;
+ match variant.fields {
+ Fields::Unit => (),
+ _ => {
+ panic!("`ToPrimitive` can be applied only to unitary enums and newtypes, {}::{} is either struct or tuple", name, ident)
+ },
+ }
+
+ // NB: We have to check each variant individually, because we'll only have `&self`
+ // for the input. We can't move from that, and it might not be `Clone` or `Copy`.
+ // (Otherwise we could just do `*self as i64` without a `match` at all.)
+ quote!(#name::#ident => #name::#ident as i64)
+ })
+ .collect();
+
+ let match_expr = if variants.is_empty() {
+ // No variants found, so do not use Some to not to trigger `unreachable_code` lint
+ quote! {
+ match *self {}
+ }
+ } else {
+ quote! {
+ Some(match *self {
+ #(#variants,)*
+ })
+ }
+ };
+
+ quote! {
+ impl _num_traits::ToPrimitive for #name {
+ #[allow(trivial_numeric_casts)]
+ fn to_i64(&self) -> Option<i64> {
+ #match_expr
+ }
+
+ fn to_u64(&self) -> Option<u64> {
+ self.to_i64().map(|x| x as u64)
+ }
+ }
+ }
+ };
+
+ dummy_const_trick("ToPrimitive", &name, impl_).into()
+}
+
+#[allow(renamed_and_removed_lints)]
+#[cfg_attr(feature = "cargo-clippy", allow(const_static_lifetime))]
+const NEWTYPE_ONLY: &'static str = "This trait can only be derived for newtypes";
+
+/// Derives [`num_traits::NumOps`][num_ops] for newtypes. The inner type must already implement
+/// `NumOps`.
+///
+/// [num_ops]: https://docs.rs/num-traits/0.2/num_traits/trait.NumOps.html
+///
+/// Note that, since `NumOps` is really a trait alias for `Add + Sub + Mul + Div + Rem`, this macro
+/// generates impls for _those_ traits. Furthermore, in all generated impls, `RHS=Self` and
+/// `Output=Self`.
+#[proc_macro_derive(NumOps)]
+pub fn num_ops(input: TokenStream) -> TokenStream {
+ let ast: syn::DeriveInput = syn::parse(input).unwrap();
+ let name = &ast.ident;
+ let inner_ty = newtype_inner(&ast.data).expect(NEWTYPE_ONLY);
+ dummy_const_trick(
+ "NumOps",
+ &name,
+ quote! {
+ impl ::std::ops::Add for #name {
+ type Output = Self;
+ fn add(self, other: Self) -> Self {
+ #name(<#inner_ty as ::std::ops::Add>::add(self.0, other.0))
+ }
+ }
+ impl ::std::ops::Sub for #name {
+ type Output = Self;
+ fn sub(self, other: Self) -> Self {
+ #name(<#inner_ty as ::std::ops::Sub>::sub(self.0, other.0))
+ }
+ }
+ impl ::std::ops::Mul for #name {
+ type Output = Self;
+ fn mul(self, other: Self) -> Self {
+ #name(<#inner_ty as ::std::ops::Mul>::mul(self.0, other.0))
+ }
+ }
+ impl ::std::ops::Div for #name {
+ type Output = Self;
+ fn div(self, other: Self) -> Self {
+ #name(<#inner_ty as ::std::ops::Div>::div(self.0, other.0))
+ }
+ }
+ impl ::std::ops::Rem for #name {
+ type Output = Self;
+ fn rem(self, other: Self) -> Self {
+ #name(<#inner_ty as ::std::ops::Rem>::rem(self.0, other.0))
+ }
+ }
+ },
+ )
+ .into()
+}
+
+/// Derives [`num_traits::NumCast`][num_cast] for newtypes. The inner type must already implement
+/// `NumCast`.
+///
+/// [num_cast]: https://docs.rs/num-traits/0.2/num_traits/cast/trait.NumCast.html
+#[proc_macro_derive(NumCast)]
+pub fn num_cast(input: TokenStream) -> TokenStream {
+ let ast: syn::DeriveInput = syn::parse(input).unwrap();
+ let name = &ast.ident;
+ let inner_ty = newtype_inner(&ast.data).expect(NEWTYPE_ONLY);
+ dummy_const_trick(
+ "NumCast",
+ &name,
+ quote! {
+ impl _num_traits::NumCast for #name {
+ fn from<T: _num_traits::ToPrimitive>(n: T) -> Option<Self> {
+ <#inner_ty as _num_traits::NumCast>::from(n).map(#name)
+ }
+ }
+ },
+ )
+ .into()
+}
+
+/// Derives [`num_traits::Zero`][zero] for newtypes. The inner type must already implement `Zero`.
+///
+/// [zero]: https://docs.rs/num-traits/0.2/num_traits/identities/trait.Zero.html
+#[proc_macro_derive(Zero)]
+pub fn zero(input: TokenStream) -> TokenStream {
+ let ast: syn::DeriveInput = syn::parse(input).unwrap();
+ let name = &ast.ident;
+ let inner_ty = newtype_inner(&ast.data).expect(NEWTYPE_ONLY);
+ dummy_const_trick(
+ "Zero",
+ &name,
+ quote! {
+ impl _num_traits::Zero for #name {
+ fn zero() -> Self {
+ #name(<#inner_ty as _num_traits::Zero>::zero())
+ }
+ fn is_zero(&self) -> bool {
+ <#inner_ty as _num_traits::Zero>::is_zero(&self.0)
+ }
+ }
+ },
+ )
+ .into()
+}
+
+/// Derives [`num_traits::One`][one] for newtypes. The inner type must already implement `One`.
+///
+/// [one]: https://docs.rs/num-traits/0.2/num_traits/identities/trait.One.html
+#[proc_macro_derive(One)]
+pub fn one(input: TokenStream) -> TokenStream {
+ let ast: syn::DeriveInput = syn::parse(input).unwrap();
+ let name = &ast.ident;
+ let inner_ty = newtype_inner(&ast.data).expect(NEWTYPE_ONLY);
+ dummy_const_trick(
+ "One",
+ &name,
+ quote! {
+ impl _num_traits::One for #name {
+ fn one() -> Self {
+ #name(<#inner_ty as _num_traits::One>::one())
+ }
+ fn is_one(&self) -> bool {
+ <#inner_ty as _num_traits::One>::is_one(&self.0)
+ }
+ }
+ },
+ )
+ .into()
+}
+
+/// Derives [`num_traits::Num`][num] for newtypes. The inner type must already implement `Num`.
+///
+/// [num]: https://docs.rs/num-traits/0.2/num_traits/trait.Num.html
+#[proc_macro_derive(Num)]
+pub fn num(input: TokenStream) -> TokenStream {
+ let ast: syn::DeriveInput = syn::parse(input).unwrap();
+ let name = &ast.ident;
+ let inner_ty = newtype_inner(&ast.data).expect(NEWTYPE_ONLY);
+ dummy_const_trick(
+ "Num",
+ &name,
+ quote! {
+ impl _num_traits::Num for #name {
+ type FromStrRadixErr = <#inner_ty as _num_traits::Num>::FromStrRadixErr;
+ fn from_str_radix(s: &str, radix: u32) -> Result<Self, Self::FromStrRadixErr> {
+ <#inner_ty as _num_traits::Num>::from_str_radix(s, radix).map(#name)
+ }
+ }
+ },
+ )
+ .into()
+}
+
+/// Derives [`num_traits::Float`][float] for newtypes. The inner type must already implement
+/// `Float`.
+///
+/// [float]: https://docs.rs/num-traits/0.2/num_traits/float/trait.Float.html
+#[proc_macro_derive(Float)]
+pub fn float(input: TokenStream) -> TokenStream {
+ let ast: syn::DeriveInput = syn::parse(input).unwrap();
+ let name = &ast.ident;
+ let inner_ty = newtype_inner(&ast.data).expect(NEWTYPE_ONLY);
+ dummy_const_trick(
+ "Float",
+ &name,
+ quote! {
+ impl _num_traits::Float for #name {
+ fn nan() -> Self {
+ #name(<#inner_ty as _num_traits::Float>::nan())
+ }
+ fn infinity() -> Self {
+ #name(<#inner_ty as _num_traits::Float>::infinity())
+ }
+ fn neg_infinity() -> Self {
+ #name(<#inner_ty as _num_traits::Float>::neg_infinity())
+ }
+ fn neg_zero() -> Self {
+ #name(<#inner_ty as _num_traits::Float>::neg_zero())
+ }
+ fn min_value() -> Self {
+ #name(<#inner_ty as _num_traits::Float>::min_value())
+ }
+ fn min_positive_value() -> Self {
+ #name(<#inner_ty as _num_traits::Float>::min_positive_value())
+ }
+ fn max_value() -> Self {
+ #name(<#inner_ty as _num_traits::Float>::max_value())
+ }
+ fn is_nan(self) -> bool {
+ <#inner_ty as _num_traits::Float>::is_nan(self.0)
+ }
+ fn is_infinite(self) -> bool {
+ <#inner_ty as _num_traits::Float>::is_infinite(self.0)
+ }
+ fn is_finite(self) -> bool {
+ <#inner_ty as _num_traits::Float>::is_finite(self.0)
+ }
+ fn is_normal(self) -> bool {
+ <#inner_ty as _num_traits::Float>::is_normal(self.0)
+ }
+ fn classify(self) -> ::std::num::FpCategory {
+ <#inner_ty as _num_traits::Float>::classify(self.0)
+ }
+ fn floor(self) -> Self {
+ #name(<#inner_ty as _num_traits::Float>::floor(self.0))
+ }
+ fn ceil(self) -> Self {
+ #name(<#inner_ty as _num_traits::Float>::ceil(self.0))
+ }
+ fn round(self) -> Self {
+ #name(<#inner_ty as _num_traits::Float>::round(self.0))
+ }
+ fn trunc(self) -> Self {
+ #name(<#inner_ty as _num_traits::Float>::trunc(self.0))
+ }
+ fn fract(self) -> Self {
+ #name(<#inner_ty as _num_traits::Float>::fract(self.0))
+ }
+ fn abs(self) -> Self {
+ #name(<#inner_ty as _num_traits::Float>::abs(self.0))
+ }
+ fn signum(self) -> Self {
+ #name(<#inner_ty as _num_traits::Float>::signum(self.0))
+ }
+ fn is_sign_positive(self) -> bool {
+ <#inner_ty as _num_traits::Float>::is_sign_positive(self.0)
+ }
+ fn is_sign_negative(self) -> bool {
+ <#inner_ty as _num_traits::Float>::is_sign_negative(self.0)
+ }
+ fn mul_add(self, a: Self, b: Self) -> Self {
+ #name(<#inner_ty as _num_traits::Float>::mul_add(self.0, a.0, b.0))
+ }
+ fn recip(self) -> Self {
+ #name(<#inner_ty as _num_traits::Float>::recip(self.0))
+ }
+ fn powi(self, n: i32) -> Self {
+ #name(<#inner_ty as _num_traits::Float>::powi(self.0, n))
+ }
+ fn powf(self, n: Self) -> Self {
+ #name(<#inner_ty as _num_traits::Float>::powf(self.0, n.0))
+ }
+ fn sqrt(self) -> Self {
+ #name(<#inner_ty as _num_traits::Float>::sqrt(self.0))
+ }
+ fn exp(self) -> Self {
+ #name(<#inner_ty as _num_traits::Float>::exp(self.0))
+ }
+ fn exp2(self) -> Self {
+ #name(<#inner_ty as _num_traits::Float>::exp2(self.0))
+ }
+ fn ln(self) -> Self {
+ #name(<#inner_ty as _num_traits::Float>::ln(self.0))
+ }
+ fn log(self, base: Self) -> Self {
+ #name(<#inner_ty as _num_traits::Float>::log(self.0, base.0))
+ }
+ fn log2(self) -> Self {
+ #name(<#inner_ty as _num_traits::Float>::log2(self.0))
+ }
+ fn log10(self) -> Self {
+ #name(<#inner_ty as _num_traits::Float>::log10(self.0))
+ }
+ fn max(self, other: Self) -> Self {
+ #name(<#inner_ty as _num_traits::Float>::max(self.0, other.0))
+ }
+ fn min(self, other: Self) -> Self {
+ #name(<#inner_ty as _num_traits::Float>::min(self.0, other.0))
+ }
+ fn abs_sub(self, other: Self) -> Self {
+ #name(<#inner_ty as _num_traits::Float>::abs_sub(self.0, other.0))
+ }
+ fn cbrt(self) -> Self {
+ #name(<#inner_ty as _num_traits::Float>::cbrt(self.0))
+ }
+ fn hypot(self, other: Self) -> Self {
+ #name(<#inner_ty as _num_traits::Float>::hypot(self.0, other.0))
+ }
+ fn sin(self) -> Self {
+ #name(<#inner_ty as _num_traits::Float>::sin(self.0))
+ }
+ fn cos(self) -> Self {
+ #name(<#inner_ty as _num_traits::Float>::cos(self.0))
+ }
+ fn tan(self) -> Self {
+ #name(<#inner_ty as _num_traits::Float>::tan(self.0))
+ }
+ fn asin(self) -> Self {
+ #name(<#inner_ty as _num_traits::Float>::asin(self.0))
+ }
+ fn acos(self) -> Self {
+ #name(<#inner_ty as _num_traits::Float>::acos(self.0))
+ }
+ fn atan(self) -> Self {
+ #name(<#inner_ty as _num_traits::Float>::atan(self.0))
+ }
+ fn atan2(self, other: Self) -> Self {
+ #name(<#inner_ty as _num_traits::Float>::atan2(self.0, other.0))
+ }
+ fn sin_cos(self) -> (Self, Self) {
+ let (x, y) = <#inner_ty as _num_traits::Float>::sin_cos(self.0);
+ (#name(x), #name(y))
+ }
+ fn exp_m1(self) -> Self {
+ #name(<#inner_ty as _num_traits::Float>::exp_m1(self.0))
+ }
+ fn ln_1p(self) -> Self {
+ #name(<#inner_ty as _num_traits::Float>::ln_1p(self.0))
+ }
+ fn sinh(self) -> Self {
+ #name(<#inner_ty as _num_traits::Float>::sinh(self.0))
+ }
+ fn cosh(self) -> Self {
+ #name(<#inner_ty as _num_traits::Float>::cosh(self.0))
+ }
+ fn tanh(self) -> Self {
+ #name(<#inner_ty as _num_traits::Float>::tanh(self.0))
+ }
+ fn asinh(self) -> Self {
+ #name(<#inner_ty as _num_traits::Float>::asinh(self.0))
+ }
+ fn acosh(self) -> Self {
+ #name(<#inner_ty as _num_traits::Float>::acosh(self.0))
+ }
+ fn atanh(self) -> Self {
+ #name(<#inner_ty as _num_traits::Float>::atanh(self.0))
+ }
+ fn integer_decode(self) -> (u64, i16, i8) {
+ <#inner_ty as _num_traits::Float>::integer_decode(self.0)
+ }
+ fn epsilon() -> Self {
+ #name(<#inner_ty as _num_traits::Float>::epsilon())
+ }
+ fn to_degrees(self) -> Self {
+ #name(<#inner_ty as _num_traits::Float>::to_degrees(self.0))
+ }
+ fn to_radians(self) -> Self {
+ #name(<#inner_ty as _num_traits::Float>::to_radians(self.0))
+ }
+ }
+ },
+ )
+ .into()
+}
diff --git a/rust/vendor/num-derive/tests/empty_enum.rs b/rust/vendor/num-derive/tests/empty_enum.rs
new file mode 100644
index 0000000..173996c
--- /dev/null
+++ b/rust/vendor/num-derive/tests/empty_enum.rs
@@ -0,0 +1,23 @@
+// Copyright 2013-2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+extern crate num as num_renamed;
+#[macro_use]
+extern crate num_derive;
+
+#[derive(Debug, PartialEq, FromPrimitive, ToPrimitive)]
+enum Color {}
+
+#[test]
+fn test_empty_enum() {
+ let v: [Option<Color>; 1] = [num_renamed::FromPrimitive::from_u64(0)];
+
+ assert_eq!(v, [None]);
+}
diff --git a/rust/vendor/num-derive/tests/issue-6.rs b/rust/vendor/num-derive/tests/issue-6.rs
new file mode 100644
index 0000000..b2503de
--- /dev/null
+++ b/rust/vendor/num-derive/tests/issue-6.rs
@@ -0,0 +1,17 @@
+#![deny(trivial_numeric_casts)]
+extern crate num;
+#[macro_use]
+extern crate num_derive;
+
+#[derive(FromPrimitive, ToPrimitive)]
+pub enum SomeEnum {
+ A = 1,
+}
+
+#[test]
+fn test_trivial_numeric_casts() {
+ use num::{FromPrimitive, ToPrimitive};
+ assert!(SomeEnum::from_u64(1).is_some());
+ assert!(SomeEnum::from_i64(-1).is_none());
+ assert_eq!(SomeEnum::A.to_u64(), Some(1));
+}
diff --git a/rust/vendor/num-derive/tests/issue-9.rs b/rust/vendor/num-derive/tests/issue-9.rs
new file mode 100644
index 0000000..06038a4
--- /dev/null
+++ b/rust/vendor/num-derive/tests/issue-9.rs
@@ -0,0 +1,18 @@
+#![deny(unused_qualifications)]
+extern crate num;
+#[macro_use]
+extern crate num_derive;
+use num::FromPrimitive;
+use num::ToPrimitive;
+
+#[derive(FromPrimitive, ToPrimitive)]
+pub enum SomeEnum {
+ A = 1,
+}
+
+#[test]
+fn test_unused_qualifications() {
+ assert!(SomeEnum::from_u64(1).is_some());
+ assert!(SomeEnum::from_i64(-1).is_none());
+ assert!(SomeEnum::A.to_i64().is_some());
+}
diff --git a/rust/vendor/num-derive/tests/newtype.rs b/rust/vendor/num-derive/tests/newtype.rs
new file mode 100644
index 0000000..b80eaa1
--- /dev/null
+++ b/rust/vendor/num-derive/tests/newtype.rs
@@ -0,0 +1,91 @@
+extern crate num as num_renamed;
+#[macro_use]
+extern crate num_derive;
+
+use num_renamed::{Float, FromPrimitive, Num, NumCast, One, ToPrimitive, Zero};
+use std::ops::Neg;
+
+#[derive(
+ Debug,
+ Clone,
+ Copy,
+ PartialEq,
+ PartialOrd,
+ ToPrimitive,
+ FromPrimitive,
+ NumOps,
+ NumCast,
+ One,
+ Zero,
+ Num,
+ Float,
+)]
+struct MyFloat(f64);
+
+impl Neg for MyFloat {
+ type Output = MyFloat;
+ fn neg(self) -> Self {
+ MyFloat(self.0.neg())
+ }
+}
+
+#[test]
+fn test_from_primitive() {
+ assert_eq!(MyFloat::from_u32(25), Some(MyFloat(25.0)));
+}
+
+#[test]
+#[cfg(has_i128)]
+fn test_from_primitive_128() {
+ assert_eq!(
+ MyFloat::from_i128(std::i128::MIN),
+ Some(MyFloat((-2.0).powi(127)))
+ );
+}
+
+#[test]
+fn test_to_primitive() {
+ assert_eq!(MyFloat(25.0).to_u32(), Some(25));
+}
+
+#[test]
+#[cfg(has_i128)]
+fn test_to_primitive_128() {
+ let f = MyFloat::from_f32(std::f32::MAX).unwrap();
+ assert_eq!(f.to_i128(), None);
+ assert_eq!(f.to_u128(), Some(0xffff_ff00_0000_0000_0000_0000_0000_0000));
+}
+
+#[test]
+fn test_num_ops() {
+ assert_eq!(MyFloat(25.0) + MyFloat(10.0), MyFloat(35.0));
+ assert_eq!(MyFloat(25.0) - MyFloat(10.0), MyFloat(15.0));
+ assert_eq!(MyFloat(25.0) * MyFloat(2.0), MyFloat(50.0));
+ assert_eq!(MyFloat(25.0) / MyFloat(10.0), MyFloat(2.5));
+ assert_eq!(MyFloat(25.0) % MyFloat(10.0), MyFloat(5.0));
+}
+
+#[test]
+fn test_num_cast() {
+ assert_eq!(<MyFloat as NumCast>::from(25u8), Some(MyFloat(25.0)));
+}
+
+#[test]
+fn test_zero() {
+ assert_eq!(MyFloat::zero(), MyFloat(0.0));
+}
+
+#[test]
+fn test_one() {
+ assert_eq!(MyFloat::one(), MyFloat(1.0));
+}
+
+#[test]
+fn test_num() {
+ assert_eq!(MyFloat::from_str_radix("25", 10).ok(), Some(MyFloat(25.0)));
+}
+
+#[test]
+fn test_float() {
+ assert_eq!(MyFloat(4.0).log(MyFloat(2.0)), MyFloat(2.0));
+}
diff --git a/rust/vendor/num-derive/tests/num_derive_without_num.rs b/rust/vendor/num-derive/tests/num_derive_without_num.rs
new file mode 100644
index 0000000..edebbec
--- /dev/null
+++ b/rust/vendor/num-derive/tests/num_derive_without_num.rs
@@ -0,0 +1,20 @@
+// Copyright 2013-2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#[macro_use]
+extern crate num_derive;
+
+#[derive(Debug, FromPrimitive, ToPrimitive)]
+enum Direction {
+ Up,
+ Down,
+ Left,
+ Right,
+}
diff --git a/rust/vendor/num-derive/tests/trivial.rs b/rust/vendor/num-derive/tests/trivial.rs
new file mode 100644
index 0000000..d3b56b6
--- /dev/null
+++ b/rust/vendor/num-derive/tests/trivial.rs
@@ -0,0 +1,64 @@
+// Copyright 2013-2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+extern crate num as num_renamed;
+#[macro_use]
+extern crate num_derive;
+
+#[derive(Debug, PartialEq, FromPrimitive, ToPrimitive)]
+enum Color {
+ Red,
+ Blue,
+ Green,
+}
+
+#[test]
+fn test_from_primitive_for_trivial_case() {
+ let v: [Option<Color>; 4] = [
+ num_renamed::FromPrimitive::from_u64(0),
+ num_renamed::FromPrimitive::from_u64(1),
+ num_renamed::FromPrimitive::from_u64(2),
+ num_renamed::FromPrimitive::from_u64(3),
+ ];
+
+ assert_eq!(
+ v,
+ [
+ Some(Color::Red),
+ Some(Color::Blue),
+ Some(Color::Green),
+ None
+ ]
+ );
+}
+
+#[test]
+fn test_to_primitive_for_trivial_case() {
+ let v: [Option<u64>; 3] = [
+ num_renamed::ToPrimitive::to_u64(&Color::Red),
+ num_renamed::ToPrimitive::to_u64(&Color::Blue),
+ num_renamed::ToPrimitive::to_u64(&Color::Green),
+ ];
+
+ assert_eq!(v, [Some(0), Some(1), Some(2)]);
+}
+
+#[test]
+fn test_reflexive_for_trivial_case() {
+ let before: [u64; 3] = [0, 1, 2];
+ let after: Vec<Option<u64>> = before
+ .iter()
+ .map(|&x| -> Option<Color> { num_renamed::FromPrimitive::from_u64(x) })
+ .map(|x| x.and_then(|x| num_renamed::ToPrimitive::to_u64(&x)))
+ .collect();
+ let before = before.iter().cloned().map(Some).collect::<Vec<_>>();
+
+ assert_eq!(before, after);
+}
diff --git a/rust/vendor/num-derive/tests/with_custom_values.rs b/rust/vendor/num-derive/tests/with_custom_values.rs
new file mode 100644
index 0000000..7ff3c09
--- /dev/null
+++ b/rust/vendor/num-derive/tests/with_custom_values.rs
@@ -0,0 +1,70 @@
+// Copyright 2013-2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#![cfg(feature = "full-syntax")]
+
+extern crate num as num_renamed;
+#[macro_use]
+extern crate num_derive;
+
+#[derive(Debug, PartialEq, FromPrimitive, ToPrimitive)]
+enum Color {
+ Red,
+ Blue = 5,
+ Green,
+ Alpha = (-3 - (-5isize)) - 10,
+}
+
+#[test]
+fn test_from_primitive_for_enum_with_custom_value() {
+ let v: [Option<Color>; 5] = [
+ num_renamed::FromPrimitive::from_u64(0),
+ num_renamed::FromPrimitive::from_u64(5),
+ num_renamed::FromPrimitive::from_u64(6),
+ num_renamed::FromPrimitive::from_u64(-8isize as u64),
+ num_renamed::FromPrimitive::from_u64(3),
+ ];
+
+ assert_eq!(
+ v,
+ [
+ Some(Color::Red),
+ Some(Color::Blue),
+ Some(Color::Green),
+ Some(Color::Alpha),
+ None
+ ]
+ );
+}
+
+#[test]
+fn test_to_primitive_for_enum_with_custom_value() {
+ let v: [Option<u64>; 4] = [
+ num_renamed::ToPrimitive::to_u64(&Color::Red),
+ num_renamed::ToPrimitive::to_u64(&Color::Blue),
+ num_renamed::ToPrimitive::to_u64(&Color::Green),
+ num_renamed::ToPrimitive::to_u64(&Color::Alpha),
+ ];
+
+ assert_eq!(v, [Some(0), Some(5), Some(6), Some(-8isize as u64)]);
+}
+
+#[test]
+fn test_reflexive_for_enum_with_custom_value() {
+ let before: [u64; 3] = [0, 5, 6];
+ let after: Vec<Option<u64>> = before
+ .iter()
+ .map(|&x| -> Option<Color> { num_renamed::FromPrimitive::from_u64(x) })
+ .map(|x| x.and_then(|x| num_renamed::ToPrimitive::to_u64(&x)))
+ .collect();
+ let before = before.into_iter().cloned().map(Some).collect::<Vec<_>>();
+
+ assert_eq!(before, after);
+}
diff --git a/rust/vendor/num-integer/.cargo-checksum.json b/rust/vendor/num-integer/.cargo-checksum.json
new file mode 100644
index 0000000..52b0e24
--- /dev/null
+++ b/rust/vendor/num-integer/.cargo-checksum.json
@@ -0,0 +1 @@
+{"files":{"Cargo.toml":"01a1f6e6771981ddeaf682be79918c45a88d032d887f188fdcb1ee7eedcf63a6","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"6485b8ed310d3f0340bf1ad1f47645069ce4069dcc6bb46c7d5c6faf41de1fdb","README.md":"68f533703554b9130ea902776bd9eb20d1a2d32b213ebadebcd49ed0f1ef9728","RELEASES.md":"21252a72a308b4dfff190bc4b67d95f2be968fab5d7ddb58cd5cfbcdab8c5adf","benches/average.rs":"94ceeb7423bcd18ab0476bc3499505ce12d9552e53fa959e50975d71300f8404","benches/gcd.rs":"9b5c0ae8ccd6c7fc8f8384fb351d10cfdd0be5fbea9365f9ea925d8915b015bf","benches/roots.rs":"79b4ab2d8fe7bbf43fe65314d2e1bc206165bc4cb34b3ceaa899f9ea7af31c09","build.rs":"575b157527243fe355a7c8d7d874a1f790c3fb0177beba9032076a7803c5b9dd","src/average.rs":"a66cf6a49f893e60697c17b2540258e69daa15ab97d8d444c6f2e8cac2f01ae9","src/lib.rs":"b77bd1a04555b180da9661d98d69fb28eb59a02f02abbaaa332c2b27c4e753c9","src/roots.rs":"2a9b908bd3666b5cffc58c1b37d329e46ed02f71ad6d5deea1e8440c10660e1a","tests/average.rs":"5f26a31be042626e9af66f7b751798621561fa090da48b1ec5ab63e388288a91","tests/roots.rs":"a0caa4142899ec8cb806a7a0d3410c39d50de97cceadc4c2ceca707be91b1ddd"},"package":"225d3389fb3509a24c93f5c29eb6bde2586b98d9f016636dff58d7c6f7569cd9"} \ No newline at end of file
diff --git a/rust/vendor/num-integer/Cargo.toml b/rust/vendor/num-integer/Cargo.toml
new file mode 100644
index 0000000..51a1a3e
--- /dev/null
+++ b/rust/vendor/num-integer/Cargo.toml
@@ -0,0 +1,51 @@
+# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO
+#
+# When uploading crates to the registry Cargo will automatically
+# "normalize" Cargo.toml files for maximal compatibility
+# with all versions of Cargo and also rewrite `path` dependencies
+# to registry (e.g., crates.io) dependencies.
+#
+# If you are reading this file be aware that the original Cargo.toml
+# will likely look very different (and much more reasonable).
+# See Cargo.toml.orig for the original contents.
+
+[package]
+name = "num-integer"
+version = "0.1.45"
+authors = ["The Rust Project Developers"]
+build = "build.rs"
+exclude = [
+ "/bors.toml",
+ "/ci/*",
+ "/.github/*",
+]
+description = "Integer traits and functions"
+homepage = "https://github.com/rust-num/num-integer"
+documentation = "https://docs.rs/num-integer"
+readme = "README.md"
+keywords = [
+ "mathematics",
+ "numerics",
+]
+categories = [
+ "algorithms",
+ "science",
+ "no-std",
+]
+license = "MIT OR Apache-2.0"
+repository = "https://github.com/rust-num/num-integer"
+
+[package.metadata.docs.rs]
+features = ["std"]
+
+[dependencies.num-traits]
+version = "0.2.11"
+default-features = false
+
+[build-dependencies.autocfg]
+version = "1"
+
+[features]
+default = ["std"]
+i128 = ["num-traits/i128"]
+std = ["num-traits/std"]
diff --git a/rust/vendor/num-integer/LICENSE-APACHE b/rust/vendor/num-integer/LICENSE-APACHE
new file mode 100644
index 0000000..16fe87b
--- /dev/null
+++ b/rust/vendor/num-integer/LICENSE-APACHE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+END OF TERMS AND CONDITIONS
+
+APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+Copyright [yyyy] [name of copyright owner]
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
diff --git a/rust/vendor/num-integer/LICENSE-MIT b/rust/vendor/num-integer/LICENSE-MIT
new file mode 100644
index 0000000..39d4bdb
--- /dev/null
+++ b/rust/vendor/num-integer/LICENSE-MIT
@@ -0,0 +1,25 @@
+Copyright (c) 2014 The Rust Project Developers
+
+Permission is hereby granted, free of charge, to any
+person obtaining a copy of this software and associated
+documentation files (the "Software"), to deal in the
+Software without restriction, including without
+limitation the rights to use, copy, modify, merge,
+publish, distribute, sublicense, and/or sell copies of
+the Software, and to permit persons to whom the Software
+is furnished to do so, subject to the following
+conditions:
+
+The above copyright notice and this permission notice
+shall be included in all copies or substantial portions
+of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
+ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
+TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
+PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
+IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+DEALINGS IN THE SOFTWARE.
diff --git a/rust/vendor/num-integer/README.md b/rust/vendor/num-integer/README.md
new file mode 100644
index 0000000..5f638cd
--- /dev/null
+++ b/rust/vendor/num-integer/README.md
@@ -0,0 +1,64 @@
+# num-integer
+
+[![crate](https://img.shields.io/crates/v/num-integer.svg)](https://crates.io/crates/num-integer)
+[![documentation](https://docs.rs/num-integer/badge.svg)](https://docs.rs/num-integer)
+[![minimum rustc 1.8](https://img.shields.io/badge/rustc-1.8+-red.svg)](https://rust-lang.github.io/rfcs/2495-min-rust-version.html)
+[![build status](https://github.com/rust-num/num-integer/workflows/master/badge.svg)](https://github.com/rust-num/num-integer/actions)
+
+`Integer` trait and functions for Rust.
+
+## Usage
+
+Add this to your `Cargo.toml`:
+
+```toml
+[dependencies]
+num-integer = "0.1"
+```
+
+and this to your crate root:
+
+```rust
+extern crate num_integer;
+```
+
+## Features
+
+This crate can be used without the standard library (`#![no_std]`) by disabling
+the default `std` feature. Use this in `Cargo.toml`:
+
+```toml
+[dependencies.num-integer]
+version = "0.1.36"
+default-features = false
+```
+
+There is no functional difference with and without `std` at this time, but
+there may be in the future.
+
+Implementations for `i128` and `u128` are only available with Rust 1.26 and
+later. The build script automatically detects this, but you can make it
+mandatory by enabling the `i128` crate feature.
+
+## Releases
+
+Release notes are available in [RELEASES.md](RELEASES.md).
+
+## Compatibility
+
+The `num-integer` crate is tested for rustc 1.8 and greater.
+
+## License
+
+Licensed under either of
+
+ * [Apache License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0)
+ * [MIT license](http://opensource.org/licenses/MIT)
+
+at your option.
+
+### Contribution
+
+Unless you explicitly state otherwise, any contribution intentionally submitted
+for inclusion in the work by you, as defined in the Apache-2.0 license, shall be
+dual licensed as above, without any additional terms or conditions.
diff --git a/rust/vendor/num-integer/RELEASES.md b/rust/vendor/num-integer/RELEASES.md
new file mode 100644
index 0000000..05c649b
--- /dev/null
+++ b/rust/vendor/num-integer/RELEASES.md
@@ -0,0 +1,112 @@
+# Release 0.1.45 (2022-04-29)
+
+- [`Integer::next_multiple_of` and `prev_multiple_of` no longer overflow -1][45].
+- [`Integer::is_multiple_of` now handles a 0 argument without panicking][47]
+ for primitive integers.
+- [`ExtendedGcd` no longer has any private fields][46], making it possible for
+ external implementations to customize `Integer::extended_gcd`.
+
+**Contributors**: @ciphergoth, @cuviper, @tspiteri, @WizardOfMenlo
+
+[45]: https://github.com/rust-num/num-integer/pull/45
+[46]: https://github.com/rust-num/num-integer/pull/46
+[47]: https://github.com/rust-num/num-integer/pull/47
+
+# Release 0.1.44 (2020-10-29)
+
+- [The "i128" feature now bypasses compiler probing][35]. The build script
+ used to probe anyway and panic if requested support wasn't found, but
+ sometimes this ran into bad corner cases with `autocfg`.
+
+**Contributors**: @cuviper
+
+[35]: https://github.com/rust-num/num-integer/pull/35
+
+# Release 0.1.43 (2020-06-11)
+
+- [The new `Average` trait][31] computes fast integer averages, rounded up or
+ down, without any risk of overflow.
+
+**Contributors**: @althonos, @cuviper
+
+[31]: https://github.com/rust-num/num-integer/pull/31
+
+# Release 0.1.42 (2020-01-09)
+
+- [Updated the `autocfg` build dependency to 1.0][29].
+
+**Contributors**: @cuviper, @dingelish
+
+[29]: https://github.com/rust-num/num-integer/pull/29
+
+# Release 0.1.41 (2019-05-21)
+
+- [Fixed feature detection on `no_std` targets][25].
+
+**Contributors**: @cuviper
+
+[25]: https://github.com/rust-num/num-integer/pull/25
+
+# Release 0.1.40 (2019-05-20)
+
+- [Optimized primitive `gcd` by avoiding memory swaps][11].
+- [Fixed `lcm(0, 0)` to return `0`, rather than panicking][18].
+- [Added `Integer::div_ceil`, `next_multiple_of`, and `prev_multiple_of`][16].
+- [Added `Integer::gcd_lcm`, `extended_gcd`, and `extended_gcd_lcm`][19].
+
+**Contributors**: @cuviper, @ignatenkobrain, @smarnach, @strake
+
+[11]: https://github.com/rust-num/num-integer/pull/11
+[16]: https://github.com/rust-num/num-integer/pull/16
+[18]: https://github.com/rust-num/num-integer/pull/18
+[19]: https://github.com/rust-num/num-integer/pull/19
+
+# Release 0.1.39 (2018-06-20)
+
+- [The new `Roots` trait provides `sqrt`, `cbrt`, and `nth_root` methods][9],
+ calculating an `Integer`'s principal roots rounded toward zero.
+
+**Contributors**: @cuviper
+
+[9]: https://github.com/rust-num/num-integer/pull/9
+
+# Release 0.1.38 (2018-05-11)
+
+- [Support for 128-bit integers is now automatically detected and enabled.][8]
+ Setting the `i128` crate feature now causes the build script to panic if such
+ support is not detected.
+
+**Contributors**: @cuviper
+
+[8]: https://github.com/rust-num/num-integer/pull/8
+
+# Release 0.1.37 (2018-05-10)
+
+- [`Integer` is now implemented for `i128` and `u128`][7] starting with Rust
+ 1.26, enabled by the new `i128` crate feature.
+
+**Contributors**: @cuviper
+
+[7]: https://github.com/rust-num/num-integer/pull/7
+
+# Release 0.1.36 (2018-02-06)
+
+- [num-integer now has its own source repository][num-356] at [rust-num/num-integer][home].
+- [Corrected the argument order documented in `Integer::is_multiple_of`][1]
+- [There is now a `std` feature][5], enabled by default, along with the implication
+ that building *without* this feature makes this a `#[no_std]` crate.
+ - There is no difference in the API at this time.
+
+**Contributors**: @cuviper, @jaystrictor
+
+[home]: https://github.com/rust-num/num-integer
+[num-356]: https://github.com/rust-num/num/pull/356
+[1]: https://github.com/rust-num/num-integer/pull/1
+[5]: https://github.com/rust-num/num-integer/pull/5
+
+
+# Prior releases
+
+No prior release notes were kept. Thanks all the same to the many
+contributors that have made this crate what it is!
+
diff --git a/rust/vendor/num-integer/benches/average.rs b/rust/vendor/num-integer/benches/average.rs
new file mode 100644
index 0000000..649078c
--- /dev/null
+++ b/rust/vendor/num-integer/benches/average.rs
@@ -0,0 +1,414 @@
+//! Benchmark sqrt and cbrt
+
+#![feature(test)]
+
+extern crate num_integer;
+extern crate num_traits;
+extern crate test;
+
+use num_integer::Integer;
+use num_traits::{AsPrimitive, PrimInt, WrappingAdd, WrappingMul};
+use std::cmp::{max, min};
+use std::fmt::Debug;
+use test::{black_box, Bencher};
+
+// --- Utilities for RNG ----------------------------------------------------
+
+trait BenchInteger: Integer + PrimInt + WrappingAdd + WrappingMul + 'static {}
+
+impl<T> BenchInteger for T where T: Integer + PrimInt + WrappingAdd + WrappingMul + 'static {}
+
+// Simple PRNG so we don't have to worry about rand compatibility
+fn lcg<T>(x: T) -> T
+where
+ u32: AsPrimitive<T>,
+ T: BenchInteger,
+{
+ // LCG parameters from Numerical Recipes
+ // (but we're applying it to arbitrary sizes)
+ const LCG_A: u32 = 1664525;
+ const LCG_C: u32 = 1013904223;
+ x.wrapping_mul(&LCG_A.as_()).wrapping_add(&LCG_C.as_())
+}
+
+// --- Alt. Implementations -------------------------------------------------
+
+trait NaiveAverage {
+ fn naive_average_ceil(&self, other: &Self) -> Self;
+ fn naive_average_floor(&self, other: &Self) -> Self;
+}
+
+trait UncheckedAverage {
+ fn unchecked_average_ceil(&self, other: &Self) -> Self;
+ fn unchecked_average_floor(&self, other: &Self) -> Self;
+}
+
+trait ModuloAverage {
+ fn modulo_average_ceil(&self, other: &Self) -> Self;
+ fn modulo_average_floor(&self, other: &Self) -> Self;
+}
+
+macro_rules! naive_average {
+ ($T:ident) => {
+ impl super::NaiveAverage for $T {
+ fn naive_average_floor(&self, other: &$T) -> $T {
+ match self.checked_add(*other) {
+ Some(z) => Integer::div_floor(&z, &2),
+ None => {
+ if self > other {
+ let diff = self - other;
+ other + Integer::div_floor(&diff, &2)
+ } else {
+ let diff = other - self;
+ self + Integer::div_floor(&diff, &2)
+ }
+ }
+ }
+ }
+ fn naive_average_ceil(&self, other: &$T) -> $T {
+ match self.checked_add(*other) {
+ Some(z) => Integer::div_ceil(&z, &2),
+ None => {
+ if self > other {
+ let diff = self - other;
+ self - Integer::div_floor(&diff, &2)
+ } else {
+ let diff = other - self;
+ other - Integer::div_floor(&diff, &2)
+ }
+ }
+ }
+ }
+ }
+ };
+}
+
+macro_rules! unchecked_average {
+ ($T:ident) => {
+ impl super::UncheckedAverage for $T {
+ fn unchecked_average_floor(&self, other: &$T) -> $T {
+ self.wrapping_add(*other) / 2
+ }
+ fn unchecked_average_ceil(&self, other: &$T) -> $T {
+ (self.wrapping_add(*other) / 2).wrapping_add(1)
+ }
+ }
+ };
+}
+
+macro_rules! modulo_average {
+ ($T:ident) => {
+ impl super::ModuloAverage for $T {
+ fn modulo_average_ceil(&self, other: &$T) -> $T {
+ let (q1, r1) = self.div_mod_floor(&2);
+ let (q2, r2) = other.div_mod_floor(&2);
+ q1 + q2 + (r1 | r2)
+ }
+ fn modulo_average_floor(&self, other: &$T) -> $T {
+ let (q1, r1) = self.div_mod_floor(&2);
+ let (q2, r2) = other.div_mod_floor(&2);
+ q1 + q2 + (r1 * r2)
+ }
+ }
+ };
+}
+
+// --- Bench functions ------------------------------------------------------
+
+fn bench_unchecked<T, F>(b: &mut Bencher, v: &[(T, T)], f: F)
+where
+ T: Integer + Debug + Copy,
+ F: Fn(&T, &T) -> T,
+{
+ b.iter(|| {
+ for (x, y) in v {
+ black_box(f(x, y));
+ }
+ });
+}
+
+fn bench_ceil<T, F>(b: &mut Bencher, v: &[(T, T)], f: F)
+where
+ T: Integer + Debug + Copy,
+ F: Fn(&T, &T) -> T,
+{
+ for &(i, j) in v {
+ let rt = f(&i, &j);
+ let (a, b) = (min(i, j), max(i, j));
+ // if both number are the same sign, check rt is in the middle
+ if (a < T::zero()) == (b < T::zero()) {
+ if (b - a).is_even() {
+ assert_eq!(rt - a, b - rt);
+ } else {
+ assert_eq!(rt - a, b - rt + T::one());
+ }
+ // if both number have a different sign,
+ } else {
+ if (a + b).is_even() {
+ assert_eq!(rt, (a + b) / (T::one() + T::one()))
+ } else {
+ assert_eq!(rt, (a + b + T::one()) / (T::one() + T::one()))
+ }
+ }
+ }
+ bench_unchecked(b, v, f);
+}
+
+fn bench_floor<T, F>(b: &mut Bencher, v: &[(T, T)], f: F)
+where
+ T: Integer + Debug + Copy,
+ F: Fn(&T, &T) -> T,
+{
+ for &(i, j) in v {
+ let rt = f(&i, &j);
+ let (a, b) = (min(i, j), max(i, j));
+ // if both number are the same sign, check rt is in the middle
+ if (a < T::zero()) == (b < T::zero()) {
+ if (b - a).is_even() {
+ assert_eq!(rt - a, b - rt);
+ } else {
+ assert_eq!(rt - a + T::one(), b - rt);
+ }
+ // if both number have a different sign,
+ } else {
+ if (a + b).is_even() {
+ assert_eq!(rt, (a + b) / (T::one() + T::one()))
+ } else {
+ assert_eq!(rt, (a + b - T::one()) / (T::one() + T::one()))
+ }
+ }
+ }
+ bench_unchecked(b, v, f);
+}
+
+// --- Bench implementation -------------------------------------------------
+
+macro_rules! bench_average {
+ ($($T:ident),*) => {$(
+ mod $T {
+ use test::Bencher;
+ use num_integer::{Average, Integer};
+ use super::{UncheckedAverage, NaiveAverage, ModuloAverage};
+ use super::{bench_ceil, bench_floor, bench_unchecked};
+
+ naive_average!($T);
+ unchecked_average!($T);
+ modulo_average!($T);
+
+ const SIZE: $T = 30;
+
+ fn overflowing() -> Vec<($T, $T)> {
+ (($T::max_value()-SIZE)..$T::max_value())
+ .flat_map(|x| -> Vec<_> {
+ (($T::max_value()-100)..($T::max_value()-100+SIZE))
+ .map(|y| (x, y))
+ .collect()
+ })
+ .collect()
+ }
+
+ fn small() -> Vec<($T, $T)> {
+ (0..SIZE)
+ .flat_map(|x| -> Vec<_> {(0..SIZE).map(|y| (x, y)).collect()})
+ .collect()
+ }
+
+ fn rand() -> Vec<($T, $T)> {
+ small()
+ .into_iter()
+ .map(|(x, y)| (super::lcg(x), super::lcg(y)))
+ .collect()
+ }
+
+ mod ceil {
+
+ use super::*;
+
+ mod small {
+
+ use super::*;
+
+ #[bench]
+ fn optimized(b: &mut Bencher) {
+ let v = small();
+ bench_ceil(b, &v, |x: &$T, y: &$T| x.average_ceil(y));
+ }
+
+ #[bench]
+ fn naive(b: &mut Bencher) {
+ let v = small();
+ bench_ceil(b, &v, |x: &$T, y: &$T| x.naive_average_ceil(y));
+ }
+
+ #[bench]
+ fn unchecked(b: &mut Bencher) {
+ let v = small();
+ bench_unchecked(b, &v, |x: &$T, y: &$T| x.unchecked_average_ceil(y));
+ }
+
+ #[bench]
+ fn modulo(b: &mut Bencher) {
+ let v = small();
+ bench_ceil(b, &v, |x: &$T, y: &$T| x.modulo_average_ceil(y));
+ }
+ }
+
+ mod overflowing {
+
+ use super::*;
+
+ #[bench]
+ fn optimized(b: &mut Bencher) {
+ let v = overflowing();
+ bench_ceil(b, &v, |x: &$T, y: &$T| x.average_ceil(y));
+ }
+
+ #[bench]
+ fn naive(b: &mut Bencher) {
+ let v = overflowing();
+ bench_ceil(b, &v, |x: &$T, y: &$T| x.naive_average_ceil(y));
+ }
+
+ #[bench]
+ fn unchecked(b: &mut Bencher) {
+ let v = overflowing();
+ bench_unchecked(b, &v, |x: &$T, y: &$T| x.unchecked_average_ceil(y));
+ }
+
+ #[bench]
+ fn modulo(b: &mut Bencher) {
+ let v = overflowing();
+ bench_ceil(b, &v, |x: &$T, y: &$T| x.modulo_average_ceil(y));
+ }
+ }
+
+ mod rand {
+
+ use super::*;
+
+ #[bench]
+ fn optimized(b: &mut Bencher) {
+ let v = rand();
+ bench_ceil(b, &v, |x: &$T, y: &$T| x.average_ceil(y));
+ }
+
+ #[bench]
+ fn naive(b: &mut Bencher) {
+ let v = rand();
+ bench_ceil(b, &v, |x: &$T, y: &$T| x.naive_average_ceil(y));
+ }
+
+ #[bench]
+ fn unchecked(b: &mut Bencher) {
+ let v = rand();
+ bench_unchecked(b, &v, |x: &$T, y: &$T| x.unchecked_average_ceil(y));
+ }
+
+ #[bench]
+ fn modulo(b: &mut Bencher) {
+ let v = rand();
+ bench_ceil(b, &v, |x: &$T, y: &$T| x.modulo_average_ceil(y));
+ }
+ }
+
+ }
+
+ mod floor {
+
+ use super::*;
+
+ mod small {
+
+ use super::*;
+
+ #[bench]
+ fn optimized(b: &mut Bencher) {
+ let v = small();
+ bench_floor(b, &v, |x: &$T, y: &$T| x.average_floor(y));
+ }
+
+ #[bench]
+ fn naive(b: &mut Bencher) {
+ let v = small();
+ bench_floor(b, &v, |x: &$T, y: &$T| x.naive_average_floor(y));
+ }
+
+ #[bench]
+ fn unchecked(b: &mut Bencher) {
+ let v = small();
+ bench_unchecked(b, &v, |x: &$T, y: &$T| x.unchecked_average_floor(y));
+ }
+
+ #[bench]
+ fn modulo(b: &mut Bencher) {
+ let v = small();
+ bench_floor(b, &v, |x: &$T, y: &$T| x.modulo_average_floor(y));
+ }
+ }
+
+ mod overflowing {
+
+ use super::*;
+
+ #[bench]
+ fn optimized(b: &mut Bencher) {
+ let v = overflowing();
+ bench_floor(b, &v, |x: &$T, y: &$T| x.average_floor(y));
+ }
+
+ #[bench]
+ fn naive(b: &mut Bencher) {
+ let v = overflowing();
+ bench_floor(b, &v, |x: &$T, y: &$T| x.naive_average_floor(y));
+ }
+
+ #[bench]
+ fn unchecked(b: &mut Bencher) {
+ let v = overflowing();
+ bench_unchecked(b, &v, |x: &$T, y: &$T| x.unchecked_average_floor(y));
+ }
+
+ #[bench]
+ fn modulo(b: &mut Bencher) {
+ let v = overflowing();
+ bench_floor(b, &v, |x: &$T, y: &$T| x.modulo_average_floor(y));
+ }
+ }
+
+ mod rand {
+
+ use super::*;
+
+ #[bench]
+ fn optimized(b: &mut Bencher) {
+ let v = rand();
+ bench_floor(b, &v, |x: &$T, y: &$T| x.average_floor(y));
+ }
+
+ #[bench]
+ fn naive(b: &mut Bencher) {
+ let v = rand();
+ bench_floor(b, &v, |x: &$T, y: &$T| x.naive_average_floor(y));
+ }
+
+ #[bench]
+ fn unchecked(b: &mut Bencher) {
+ let v = rand();
+ bench_unchecked(b, &v, |x: &$T, y: &$T| x.unchecked_average_floor(y));
+ }
+
+ #[bench]
+ fn modulo(b: &mut Bencher) {
+ let v = rand();
+ bench_floor(b, &v, |x: &$T, y: &$T| x.modulo_average_floor(y));
+ }
+ }
+
+ }
+
+ }
+ )*}
+}
+
+bench_average!(i8, i16, i32, i64, i128, isize);
+bench_average!(u8, u16, u32, u64, u128, usize);
diff --git a/rust/vendor/num-integer/benches/gcd.rs b/rust/vendor/num-integer/benches/gcd.rs
new file mode 100644
index 0000000..082d5ee
--- /dev/null
+++ b/rust/vendor/num-integer/benches/gcd.rs
@@ -0,0 +1,176 @@
+//! Benchmark comparing the current GCD implemtation against an older one.
+
+#![feature(test)]
+
+extern crate num_integer;
+extern crate num_traits;
+extern crate test;
+
+use num_integer::Integer;
+use num_traits::{AsPrimitive, Bounded, Signed};
+use test::{black_box, Bencher};
+
+trait GcdOld: Integer {
+ fn gcd_old(&self, other: &Self) -> Self;
+}
+
+macro_rules! impl_gcd_old_for_isize {
+ ($T:ty) => {
+ impl GcdOld for $T {
+ /// Calculates the Greatest Common Divisor (GCD) of the number and
+ /// `other`. The result is always positive.
+ #[inline]
+ fn gcd_old(&self, other: &Self) -> Self {
+ // Use Stein's algorithm
+ let mut m = *self;
+ let mut n = *other;
+ if m == 0 || n == 0 {
+ return (m | n).abs();
+ }
+
+ // find common factors of 2
+ let shift = (m | n).trailing_zeros();
+
+ // The algorithm needs positive numbers, but the minimum value
+ // can't be represented as a positive one.
+ // It's also a power of two, so the gcd can be
+ // calculated by bitshifting in that case
+
+ // Assuming two's complement, the number created by the shift
+ // is positive for all numbers except gcd = abs(min value)
+ // The call to .abs() causes a panic in debug mode
+ if m == Self::min_value() || n == Self::min_value() {
+ return (1 << shift).abs();
+ }
+
+ // guaranteed to be positive now, rest like unsigned algorithm
+ m = m.abs();
+ n = n.abs();
+
+ // divide n and m by 2 until odd
+ // m inside loop
+ n >>= n.trailing_zeros();
+
+ while m != 0 {
+ m >>= m.trailing_zeros();
+ if n > m {
+ std::mem::swap(&mut n, &mut m)
+ }
+ m -= n;
+ }
+
+ n << shift
+ }
+ }
+ };
+}
+
+impl_gcd_old_for_isize!(i8);
+impl_gcd_old_for_isize!(i16);
+impl_gcd_old_for_isize!(i32);
+impl_gcd_old_for_isize!(i64);
+impl_gcd_old_for_isize!(isize);
+impl_gcd_old_for_isize!(i128);
+
+macro_rules! impl_gcd_old_for_usize {
+ ($T:ty) => {
+ impl GcdOld for $T {
+ /// Calculates the Greatest Common Divisor (GCD) of the number and
+ /// `other`. The result is always positive.
+ #[inline]
+ fn gcd_old(&self, other: &Self) -> Self {
+ // Use Stein's algorithm
+ let mut m = *self;
+ let mut n = *other;
+ if m == 0 || n == 0 {
+ return m | n;
+ }
+
+ // find common factors of 2
+ let shift = (m | n).trailing_zeros();
+
+ // divide n and m by 2 until odd
+ // m inside loop
+ n >>= n.trailing_zeros();
+
+ while m != 0 {
+ m >>= m.trailing_zeros();
+ if n > m {
+ std::mem::swap(&mut n, &mut m)
+ }
+ m -= n;
+ }
+
+ n << shift
+ }
+ }
+ };
+}
+
+impl_gcd_old_for_usize!(u8);
+impl_gcd_old_for_usize!(u16);
+impl_gcd_old_for_usize!(u32);
+impl_gcd_old_for_usize!(u64);
+impl_gcd_old_for_usize!(usize);
+impl_gcd_old_for_usize!(u128);
+
+/// Return an iterator that yields all Fibonacci numbers fitting into a u128.
+fn fibonacci() -> impl Iterator<Item = u128> {
+ (0..185).scan((0, 1), |&mut (ref mut a, ref mut b), _| {
+ let tmp = *a;
+ *a = *b;
+ *b += tmp;
+ Some(*b)
+ })
+}
+
+fn run_bench<T: Integer + Bounded + Copy + 'static>(b: &mut Bencher, gcd: fn(&T, &T) -> T)
+where
+ T: AsPrimitive<u128>,
+ u128: AsPrimitive<T>,
+{
+ let max_value: u128 = T::max_value().as_();
+ let pairs: Vec<(T, T)> = fibonacci()
+ .collect::<Vec<_>>()
+ .windows(2)
+ .filter(|&pair| pair[0] <= max_value && pair[1] <= max_value)
+ .map(|pair| (pair[0].as_(), pair[1].as_()))
+ .collect();
+ b.iter(|| {
+ for &(ref m, ref n) in &pairs {
+ black_box(gcd(m, n));
+ }
+ });
+}
+
+macro_rules! bench_gcd {
+ ($T:ident) => {
+ mod $T {
+ use crate::{run_bench, GcdOld};
+ use num_integer::Integer;
+ use test::Bencher;
+
+ #[bench]
+ fn bench_gcd(b: &mut Bencher) {
+ run_bench(b, $T::gcd);
+ }
+
+ #[bench]
+ fn bench_gcd_old(b: &mut Bencher) {
+ run_bench(b, $T::gcd_old);
+ }
+ }
+ };
+}
+
+bench_gcd!(u8);
+bench_gcd!(u16);
+bench_gcd!(u32);
+bench_gcd!(u64);
+bench_gcd!(u128);
+
+bench_gcd!(i8);
+bench_gcd!(i16);
+bench_gcd!(i32);
+bench_gcd!(i64);
+bench_gcd!(i128);
diff --git a/rust/vendor/num-integer/benches/roots.rs b/rust/vendor/num-integer/benches/roots.rs
new file mode 100644
index 0000000..7f67278
--- /dev/null
+++ b/rust/vendor/num-integer/benches/roots.rs
@@ -0,0 +1,170 @@
+//! Benchmark sqrt and cbrt
+
+#![feature(test)]
+
+extern crate num_integer;
+extern crate num_traits;
+extern crate test;
+
+use num_integer::Integer;
+use num_traits::checked_pow;
+use num_traits::{AsPrimitive, PrimInt, WrappingAdd, WrappingMul};
+use test::{black_box, Bencher};
+
+trait BenchInteger: Integer + PrimInt + WrappingAdd + WrappingMul + 'static {}
+
+impl<T> BenchInteger for T where T: Integer + PrimInt + WrappingAdd + WrappingMul + 'static {}
+
+fn bench<T, F>(b: &mut Bencher, v: &[T], f: F, n: u32)
+where
+ T: BenchInteger,
+ F: Fn(&T) -> T,
+{
+ // Pre-validate the results...
+ for i in v {
+ let rt = f(i);
+ if *i >= T::zero() {
+ let rt1 = rt + T::one();
+ assert!(rt.pow(n) <= *i);
+ if let Some(x) = checked_pow(rt1, n as usize) {
+ assert!(*i < x);
+ }
+ } else {
+ let rt1 = rt - T::one();
+ assert!(rt < T::zero());
+ assert!(*i <= rt.pow(n));
+ if let Some(x) = checked_pow(rt1, n as usize) {
+ assert!(x < *i);
+ }
+ };
+ }
+
+ // Now just run as fast as we can!
+ b.iter(|| {
+ for i in v {
+ black_box(f(i));
+ }
+ });
+}
+
+// Simple PRNG so we don't have to worry about rand compatibility
+fn lcg<T>(x: T) -> T
+where
+ u32: AsPrimitive<T>,
+ T: BenchInteger,
+{
+ // LCG parameters from Numerical Recipes
+ // (but we're applying it to arbitrary sizes)
+ const LCG_A: u32 = 1664525;
+ const LCG_C: u32 = 1013904223;
+ x.wrapping_mul(&LCG_A.as_()).wrapping_add(&LCG_C.as_())
+}
+
+fn bench_rand<T, F>(b: &mut Bencher, f: F, n: u32)
+where
+ u32: AsPrimitive<T>,
+ T: BenchInteger,
+ F: Fn(&T) -> T,
+{
+ let mut x: T = 3u32.as_();
+ let v: Vec<T> = (0..1000)
+ .map(|_| {
+ x = lcg(x);
+ x
+ })
+ .collect();
+ bench(b, &v, f, n);
+}
+
+fn bench_rand_pos<T, F>(b: &mut Bencher, f: F, n: u32)
+where
+ u32: AsPrimitive<T>,
+ T: BenchInteger,
+ F: Fn(&T) -> T,
+{
+ let mut x: T = 3u32.as_();
+ let v: Vec<T> = (0..1000)
+ .map(|_| {
+ x = lcg(x);
+ while x < T::zero() {
+ x = lcg(x);
+ }
+ x
+ })
+ .collect();
+ bench(b, &v, f, n);
+}
+
+fn bench_small<T, F>(b: &mut Bencher, f: F, n: u32)
+where
+ u32: AsPrimitive<T>,
+ T: BenchInteger,
+ F: Fn(&T) -> T,
+{
+ let v: Vec<T> = (0..1000).map(|i| i.as_()).collect();
+ bench(b, &v, f, n);
+}
+
+fn bench_small_pos<T, F>(b: &mut Bencher, f: F, n: u32)
+where
+ u32: AsPrimitive<T>,
+ T: BenchInteger,
+ F: Fn(&T) -> T,
+{
+ let v: Vec<T> = (0..1000)
+ .map(|i| i.as_().mod_floor(&T::max_value()))
+ .collect();
+ bench(b, &v, f, n);
+}
+
+macro_rules! bench_roots {
+ ($($T:ident),*) => {$(
+ mod $T {
+ use test::Bencher;
+ use num_integer::Roots;
+
+ #[bench]
+ fn sqrt_rand(b: &mut Bencher) {
+ ::bench_rand_pos(b, $T::sqrt, 2);
+ }
+
+ #[bench]
+ fn sqrt_small(b: &mut Bencher) {
+ ::bench_small_pos(b, $T::sqrt, 2);
+ }
+
+ #[bench]
+ fn cbrt_rand(b: &mut Bencher) {
+ ::bench_rand(b, $T::cbrt, 3);
+ }
+
+ #[bench]
+ fn cbrt_small(b: &mut Bencher) {
+ ::bench_small(b, $T::cbrt, 3);
+ }
+
+ #[bench]
+ fn fourth_root_rand(b: &mut Bencher) {
+ ::bench_rand_pos(b, |x: &$T| x.nth_root(4), 4);
+ }
+
+ #[bench]
+ fn fourth_root_small(b: &mut Bencher) {
+ ::bench_small_pos(b, |x: &$T| x.nth_root(4), 4);
+ }
+
+ #[bench]
+ fn fifth_root_rand(b: &mut Bencher) {
+ ::bench_rand(b, |x: &$T| x.nth_root(5), 5);
+ }
+
+ #[bench]
+ fn fifth_root_small(b: &mut Bencher) {
+ ::bench_small(b, |x: &$T| x.nth_root(5), 5);
+ }
+ }
+ )*}
+}
+
+bench_roots!(i8, i16, i32, i64, i128);
+bench_roots!(u8, u16, u32, u64, u128);
diff --git a/rust/vendor/num-integer/build.rs b/rust/vendor/num-integer/build.rs
new file mode 100644
index 0000000..37c9857
--- /dev/null
+++ b/rust/vendor/num-integer/build.rs
@@ -0,0 +1,13 @@
+extern crate autocfg;
+
+use std::env;
+
+fn main() {
+ // If the "i128" feature is explicity requested, don't bother probing for it.
+ // It will still cause a build error if that was set improperly.
+ if env::var_os("CARGO_FEATURE_I128").is_some() || autocfg::new().probe_type("i128") {
+ autocfg::emit("has_i128");
+ }
+
+ autocfg::rerun_path("build.rs");
+}
diff --git a/rust/vendor/num-integer/src/average.rs b/rust/vendor/num-integer/src/average.rs
new file mode 100644
index 0000000..29cd11e
--- /dev/null
+++ b/rust/vendor/num-integer/src/average.rs
@@ -0,0 +1,78 @@
+use core::ops::{BitAnd, BitOr, BitXor, Shr};
+use Integer;
+
+/// Provides methods to compute the average of two integers, without overflows.
+pub trait Average: Integer {
+ /// Returns the ceiling value of the average of `self` and `other`.
+ /// -- `⌈(self + other)/2⌉`
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use num_integer::Average;
+ ///
+ /// assert_eq!(( 3).average_ceil(&10), 7);
+ /// assert_eq!((-2).average_ceil(&-5), -3);
+ /// assert_eq!(( 4).average_ceil(& 4), 4);
+ ///
+ /// assert_eq!(u8::max_value().average_ceil(&2), 129);
+ /// assert_eq!(i8::min_value().average_ceil(&-1), -64);
+ /// assert_eq!(i8::min_value().average_ceil(&i8::max_value()), 0);
+ /// ```
+ ///
+ fn average_ceil(&self, other: &Self) -> Self;
+
+ /// Returns the floor value of the average of `self` and `other`.
+ /// -- `⌊(self + other)/2⌋`
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use num_integer::Average;
+ ///
+ /// assert_eq!(( 3).average_floor(&10), 6);
+ /// assert_eq!((-2).average_floor(&-5), -4);
+ /// assert_eq!(( 4).average_floor(& 4), 4);
+ ///
+ /// assert_eq!(u8::max_value().average_floor(&2), 128);
+ /// assert_eq!(i8::min_value().average_floor(&-1), -65);
+ /// assert_eq!(i8::min_value().average_floor(&i8::max_value()), -1);
+ /// ```
+ ///
+ fn average_floor(&self, other: &Self) -> Self;
+}
+
+impl<I> Average for I
+where
+ I: Integer + Shr<usize, Output = I>,
+ for<'a, 'b> &'a I:
+ BitAnd<&'b I, Output = I> + BitOr<&'b I, Output = I> + BitXor<&'b I, Output = I>,
+{
+ // The Henry Gordon Dietz implementation as shown in the Hacker's Delight,
+ // see http://aggregate.org/MAGIC/#Average%20of%20Integers
+
+ /// Returns the floor value of the average of `self` and `other`.
+ #[inline]
+ fn average_floor(&self, other: &I) -> I {
+ (self & other) + ((self ^ other) >> 1)
+ }
+
+ /// Returns the ceil value of the average of `self` and `other`.
+ #[inline]
+ fn average_ceil(&self, other: &I) -> I {
+ (self | other) - ((self ^ other) >> 1)
+ }
+}
+
+/// Returns the floor value of the average of `x` and `y` --
+/// see [Average::average_floor](trait.Average.html#tymethod.average_floor).
+#[inline]
+pub fn average_floor<T: Average>(x: T, y: T) -> T {
+ x.average_floor(&y)
+}
+/// Returns the ceiling value of the average of `x` and `y` --
+/// see [Average::average_ceil](trait.Average.html#tymethod.average_ceil).
+#[inline]
+pub fn average_ceil<T: Average>(x: T, y: T) -> T {
+ x.average_ceil(&y)
+}
diff --git a/rust/vendor/num-integer/src/lib.rs b/rust/vendor/num-integer/src/lib.rs
new file mode 100644
index 0000000..5005801
--- /dev/null
+++ b/rust/vendor/num-integer/src/lib.rs
@@ -0,0 +1,1386 @@
+// Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+//! Integer trait and functions.
+//!
+//! ## Compatibility
+//!
+//! The `num-integer` crate is tested for rustc 1.8 and greater.
+
+#![doc(html_root_url = "https://docs.rs/num-integer/0.1")]
+#![no_std]
+#[cfg(feature = "std")]
+extern crate std;
+
+extern crate num_traits as traits;
+
+use core::mem;
+use core::ops::Add;
+
+use traits::{Num, Signed, Zero};
+
+mod roots;
+pub use roots::Roots;
+pub use roots::{cbrt, nth_root, sqrt};
+
+mod average;
+pub use average::Average;
+pub use average::{average_ceil, average_floor};
+
+pub trait Integer: Sized + Num + PartialOrd + Ord + Eq {
+ /// Floored integer division.
+ ///
+ /// # Examples
+ ///
+ /// ~~~
+ /// # use num_integer::Integer;
+ /// assert!(( 8).div_floor(& 3) == 2);
+ /// assert!(( 8).div_floor(&-3) == -3);
+ /// assert!((-8).div_floor(& 3) == -3);
+ /// assert!((-8).div_floor(&-3) == 2);
+ ///
+ /// assert!(( 1).div_floor(& 2) == 0);
+ /// assert!(( 1).div_floor(&-2) == -1);
+ /// assert!((-1).div_floor(& 2) == -1);
+ /// assert!((-1).div_floor(&-2) == 0);
+ /// ~~~
+ fn div_floor(&self, other: &Self) -> Self;
+
+ /// Floored integer modulo, satisfying:
+ ///
+ /// ~~~
+ /// # use num_integer::Integer;
+ /// # let n = 1; let d = 1;
+ /// assert!(n.div_floor(&d) * d + n.mod_floor(&d) == n)
+ /// ~~~
+ ///
+ /// # Examples
+ ///
+ /// ~~~
+ /// # use num_integer::Integer;
+ /// assert!(( 8).mod_floor(& 3) == 2);
+ /// assert!(( 8).mod_floor(&-3) == -1);
+ /// assert!((-8).mod_floor(& 3) == 1);
+ /// assert!((-8).mod_floor(&-3) == -2);
+ ///
+ /// assert!(( 1).mod_floor(& 2) == 1);
+ /// assert!(( 1).mod_floor(&-2) == -1);
+ /// assert!((-1).mod_floor(& 2) == 1);
+ /// assert!((-1).mod_floor(&-2) == -1);
+ /// ~~~
+ fn mod_floor(&self, other: &Self) -> Self;
+
+ /// Ceiled integer division.
+ ///
+ /// # Examples
+ ///
+ /// ~~~
+ /// # use num_integer::Integer;
+ /// assert_eq!(( 8).div_ceil( &3), 3);
+ /// assert_eq!(( 8).div_ceil(&-3), -2);
+ /// assert_eq!((-8).div_ceil( &3), -2);
+ /// assert_eq!((-8).div_ceil(&-3), 3);
+ ///
+ /// assert_eq!(( 1).div_ceil( &2), 1);
+ /// assert_eq!(( 1).div_ceil(&-2), 0);
+ /// assert_eq!((-1).div_ceil( &2), 0);
+ /// assert_eq!((-1).div_ceil(&-2), 1);
+ /// ~~~
+ fn div_ceil(&self, other: &Self) -> Self {
+ let (q, r) = self.div_mod_floor(other);
+ if r.is_zero() {
+ q
+ } else {
+ q + Self::one()
+ }
+ }
+
+ /// Greatest Common Divisor (GCD).
+ ///
+ /// # Examples
+ ///
+ /// ~~~
+ /// # use num_integer::Integer;
+ /// assert_eq!(6.gcd(&8), 2);
+ /// assert_eq!(7.gcd(&3), 1);
+ /// ~~~
+ fn gcd(&self, other: &Self) -> Self;
+
+ /// Lowest Common Multiple (LCM).
+ ///
+ /// # Examples
+ ///
+ /// ~~~
+ /// # use num_integer::Integer;
+ /// assert_eq!(7.lcm(&3), 21);
+ /// assert_eq!(2.lcm(&4), 4);
+ /// assert_eq!(0.lcm(&0), 0);
+ /// ~~~
+ fn lcm(&self, other: &Self) -> Self;
+
+ /// Greatest Common Divisor (GCD) and
+ /// Lowest Common Multiple (LCM) together.
+ ///
+ /// Potentially more efficient than calling `gcd` and `lcm`
+ /// individually for identical inputs.
+ ///
+ /// # Examples
+ ///
+ /// ~~~
+ /// # use num_integer::Integer;
+ /// assert_eq!(10.gcd_lcm(&4), (2, 20));
+ /// assert_eq!(8.gcd_lcm(&9), (1, 72));
+ /// ~~~
+ #[inline]
+ fn gcd_lcm(&self, other: &Self) -> (Self, Self) {
+ (self.gcd(other), self.lcm(other))
+ }
+
+ /// Greatest common divisor and Bézout coefficients.
+ ///
+ /// # Examples
+ ///
+ /// ~~~
+ /// # extern crate num_integer;
+ /// # extern crate num_traits;
+ /// # fn main() {
+ /// # use num_integer::{ExtendedGcd, Integer};
+ /// # use num_traits::NumAssign;
+ /// fn check<A: Copy + Integer + NumAssign>(a: A, b: A) -> bool {
+ /// let ExtendedGcd { gcd, x, y, .. } = a.extended_gcd(&b);
+ /// gcd == x * a + y * b
+ /// }
+ /// assert!(check(10isize, 4isize));
+ /// assert!(check(8isize, 9isize));
+ /// # }
+ /// ~~~
+ #[inline]
+ fn extended_gcd(&self, other: &Self) -> ExtendedGcd<Self>
+ where
+ Self: Clone,
+ {
+ let mut s = (Self::zero(), Self::one());
+ let mut t = (Self::one(), Self::zero());
+ let mut r = (other.clone(), self.clone());
+
+ while !r.0.is_zero() {
+ let q = r.1.clone() / r.0.clone();
+ let f = |mut r: (Self, Self)| {
+ mem::swap(&mut r.0, &mut r.1);
+ r.0 = r.0 - q.clone() * r.1.clone();
+ r
+ };
+ r = f(r);
+ s = f(s);
+ t = f(t);
+ }
+
+ if r.1 >= Self::zero() {
+ ExtendedGcd {
+ gcd: r.1,
+ x: s.1,
+ y: t.1,
+ }
+ } else {
+ ExtendedGcd {
+ gcd: Self::zero() - r.1,
+ x: Self::zero() - s.1,
+ y: Self::zero() - t.1,
+ }
+ }
+ }
+
+ /// Greatest common divisor, least common multiple, and Bézout coefficients.
+ #[inline]
+ fn extended_gcd_lcm(&self, other: &Self) -> (ExtendedGcd<Self>, Self)
+ where
+ Self: Clone + Signed,
+ {
+ (self.extended_gcd(other), self.lcm(other))
+ }
+
+ /// Deprecated, use `is_multiple_of` instead.
+ fn divides(&self, other: &Self) -> bool;
+
+ /// Returns `true` if `self` is a multiple of `other`.
+ ///
+ /// # Examples
+ ///
+ /// ~~~
+ /// # use num_integer::Integer;
+ /// assert_eq!(9.is_multiple_of(&3), true);
+ /// assert_eq!(3.is_multiple_of(&9), false);
+ /// ~~~
+ fn is_multiple_of(&self, other: &Self) -> bool;
+
+ /// Returns `true` if the number is even.
+ ///
+ /// # Examples
+ ///
+ /// ~~~
+ /// # use num_integer::Integer;
+ /// assert_eq!(3.is_even(), false);
+ /// assert_eq!(4.is_even(), true);
+ /// ~~~
+ fn is_even(&self) -> bool;
+
+ /// Returns `true` if the number is odd.
+ ///
+ /// # Examples
+ ///
+ /// ~~~
+ /// # use num_integer::Integer;
+ /// assert_eq!(3.is_odd(), true);
+ /// assert_eq!(4.is_odd(), false);
+ /// ~~~
+ fn is_odd(&self) -> bool;
+
+ /// Simultaneous truncated integer division and modulus.
+ /// Returns `(quotient, remainder)`.
+ ///
+ /// # Examples
+ ///
+ /// ~~~
+ /// # use num_integer::Integer;
+ /// assert_eq!(( 8).div_rem( &3), ( 2, 2));
+ /// assert_eq!(( 8).div_rem(&-3), (-2, 2));
+ /// assert_eq!((-8).div_rem( &3), (-2, -2));
+ /// assert_eq!((-8).div_rem(&-3), ( 2, -2));
+ ///
+ /// assert_eq!(( 1).div_rem( &2), ( 0, 1));
+ /// assert_eq!(( 1).div_rem(&-2), ( 0, 1));
+ /// assert_eq!((-1).div_rem( &2), ( 0, -1));
+ /// assert_eq!((-1).div_rem(&-2), ( 0, -1));
+ /// ~~~
+ fn div_rem(&self, other: &Self) -> (Self, Self);
+
+ /// Simultaneous floored integer division and modulus.
+ /// Returns `(quotient, remainder)`.
+ ///
+ /// # Examples
+ ///
+ /// ~~~
+ /// # use num_integer::Integer;
+ /// assert_eq!(( 8).div_mod_floor( &3), ( 2, 2));
+ /// assert_eq!(( 8).div_mod_floor(&-3), (-3, -1));
+ /// assert_eq!((-8).div_mod_floor( &3), (-3, 1));
+ /// assert_eq!((-8).div_mod_floor(&-3), ( 2, -2));
+ ///
+ /// assert_eq!(( 1).div_mod_floor( &2), ( 0, 1));
+ /// assert_eq!(( 1).div_mod_floor(&-2), (-1, -1));
+ /// assert_eq!((-1).div_mod_floor( &2), (-1, 1));
+ /// assert_eq!((-1).div_mod_floor(&-2), ( 0, -1));
+ /// ~~~
+ fn div_mod_floor(&self, other: &Self) -> (Self, Self) {
+ (self.div_floor(other), self.mod_floor(other))
+ }
+
+ /// Rounds up to nearest multiple of argument.
+ ///
+ /// # Notes
+ ///
+ /// For signed types, `a.next_multiple_of(b) = a.prev_multiple_of(b.neg())`.
+ ///
+ /// # Examples
+ ///
+ /// ~~~
+ /// # use num_integer::Integer;
+ /// assert_eq!(( 16).next_multiple_of(& 8), 16);
+ /// assert_eq!(( 23).next_multiple_of(& 8), 24);
+ /// assert_eq!(( 16).next_multiple_of(&-8), 16);
+ /// assert_eq!(( 23).next_multiple_of(&-8), 16);
+ /// assert_eq!((-16).next_multiple_of(& 8), -16);
+ /// assert_eq!((-23).next_multiple_of(& 8), -16);
+ /// assert_eq!((-16).next_multiple_of(&-8), -16);
+ /// assert_eq!((-23).next_multiple_of(&-8), -24);
+ /// ~~~
+ #[inline]
+ fn next_multiple_of(&self, other: &Self) -> Self
+ where
+ Self: Clone,
+ {
+ let m = self.mod_floor(other);
+ self.clone()
+ + if m.is_zero() {
+ Self::zero()
+ } else {
+ other.clone() - m
+ }
+ }
+
+ /// Rounds down to nearest multiple of argument.
+ ///
+ /// # Notes
+ ///
+ /// For signed types, `a.prev_multiple_of(b) = a.next_multiple_of(b.neg())`.
+ ///
+ /// # Examples
+ ///
+ /// ~~~
+ /// # use num_integer::Integer;
+ /// assert_eq!(( 16).prev_multiple_of(& 8), 16);
+ /// assert_eq!(( 23).prev_multiple_of(& 8), 16);
+ /// assert_eq!(( 16).prev_multiple_of(&-8), 16);
+ /// assert_eq!(( 23).prev_multiple_of(&-8), 24);
+ /// assert_eq!((-16).prev_multiple_of(& 8), -16);
+ /// assert_eq!((-23).prev_multiple_of(& 8), -24);
+ /// assert_eq!((-16).prev_multiple_of(&-8), -16);
+ /// assert_eq!((-23).prev_multiple_of(&-8), -16);
+ /// ~~~
+ #[inline]
+ fn prev_multiple_of(&self, other: &Self) -> Self
+ where
+ Self: Clone,
+ {
+ self.clone() - self.mod_floor(other)
+ }
+}
+
+/// Greatest common divisor and Bézout coefficients
+///
+/// ```no_build
+/// let e = isize::extended_gcd(a, b);
+/// assert_eq!(e.gcd, e.x*a + e.y*b);
+/// ```
+#[derive(Debug, Clone, Copy, PartialEq, Eq)]
+pub struct ExtendedGcd<A> {
+ pub gcd: A,
+ pub x: A,
+ pub y: A,
+}
+
+/// Simultaneous integer division and modulus
+#[inline]
+pub fn div_rem<T: Integer>(x: T, y: T) -> (T, T) {
+ x.div_rem(&y)
+}
+/// Floored integer division
+#[inline]
+pub fn div_floor<T: Integer>(x: T, y: T) -> T {
+ x.div_floor(&y)
+}
+/// Floored integer modulus
+#[inline]
+pub fn mod_floor<T: Integer>(x: T, y: T) -> T {
+ x.mod_floor(&y)
+}
+/// Simultaneous floored integer division and modulus
+#[inline]
+pub fn div_mod_floor<T: Integer>(x: T, y: T) -> (T, T) {
+ x.div_mod_floor(&y)
+}
+/// Ceiled integer division
+#[inline]
+pub fn div_ceil<T: Integer>(x: T, y: T) -> T {
+ x.div_ceil(&y)
+}
+
+/// Calculates the Greatest Common Divisor (GCD) of the number and `other`. The
+/// result is always non-negative.
+#[inline(always)]
+pub fn gcd<T: Integer>(x: T, y: T) -> T {
+ x.gcd(&y)
+}
+/// Calculates the Lowest Common Multiple (LCM) of the number and `other`.
+#[inline(always)]
+pub fn lcm<T: Integer>(x: T, y: T) -> T {
+ x.lcm(&y)
+}
+
+/// Calculates the Greatest Common Divisor (GCD) and
+/// Lowest Common Multiple (LCM) of the number and `other`.
+#[inline(always)]
+pub fn gcd_lcm<T: Integer>(x: T, y: T) -> (T, T) {
+ x.gcd_lcm(&y)
+}
+
+macro_rules! impl_integer_for_isize {
+ ($T:ty, $test_mod:ident) => {
+ impl Integer for $T {
+ /// Floored integer division
+ #[inline]
+ fn div_floor(&self, other: &Self) -> Self {
+ // Algorithm from [Daan Leijen. _Division and Modulus for Computer Scientists_,
+ // December 2001](http://research.microsoft.com/pubs/151917/divmodnote-letter.pdf)
+ let (d, r) = self.div_rem(other);
+ if (r > 0 && *other < 0) || (r < 0 && *other > 0) {
+ d - 1
+ } else {
+ d
+ }
+ }
+
+ /// Floored integer modulo
+ #[inline]
+ fn mod_floor(&self, other: &Self) -> Self {
+ // Algorithm from [Daan Leijen. _Division and Modulus for Computer Scientists_,
+ // December 2001](http://research.microsoft.com/pubs/151917/divmodnote-letter.pdf)
+ let r = *self % *other;
+ if (r > 0 && *other < 0) || (r < 0 && *other > 0) {
+ r + *other
+ } else {
+ r
+ }
+ }
+
+ /// Calculates `div_floor` and `mod_floor` simultaneously
+ #[inline]
+ fn div_mod_floor(&self, other: &Self) -> (Self, Self) {
+ // Algorithm from [Daan Leijen. _Division and Modulus for Computer Scientists_,
+ // December 2001](http://research.microsoft.com/pubs/151917/divmodnote-letter.pdf)
+ let (d, r) = self.div_rem(other);
+ if (r > 0 && *other < 0) || (r < 0 && *other > 0) {
+ (d - 1, r + *other)
+ } else {
+ (d, r)
+ }
+ }
+
+ #[inline]
+ fn div_ceil(&self, other: &Self) -> Self {
+ let (d, r) = self.div_rem(other);
+ if (r > 0 && *other > 0) || (r < 0 && *other < 0) {
+ d + 1
+ } else {
+ d
+ }
+ }
+
+ /// Calculates the Greatest Common Divisor (GCD) of the number and
+ /// `other`. The result is always non-negative.
+ #[inline]
+ fn gcd(&self, other: &Self) -> Self {
+ // Use Stein's algorithm
+ let mut m = *self;
+ let mut n = *other;
+ if m == 0 || n == 0 {
+ return (m | n).abs();
+ }
+
+ // find common factors of 2
+ let shift = (m | n).trailing_zeros();
+
+ // The algorithm needs positive numbers, but the minimum value
+ // can't be represented as a positive one.
+ // It's also a power of two, so the gcd can be
+ // calculated by bitshifting in that case
+
+ // Assuming two's complement, the number created by the shift
+ // is positive for all numbers except gcd = abs(min value)
+ // The call to .abs() causes a panic in debug mode
+ if m == Self::min_value() || n == Self::min_value() {
+ return (1 << shift).abs();
+ }
+
+ // guaranteed to be positive now, rest like unsigned algorithm
+ m = m.abs();
+ n = n.abs();
+
+ // divide n and m by 2 until odd
+ m >>= m.trailing_zeros();
+ n >>= n.trailing_zeros();
+
+ while m != n {
+ if m > n {
+ m -= n;
+ m >>= m.trailing_zeros();
+ } else {
+ n -= m;
+ n >>= n.trailing_zeros();
+ }
+ }
+ m << shift
+ }
+
+ #[inline]
+ fn extended_gcd_lcm(&self, other: &Self) -> (ExtendedGcd<Self>, Self) {
+ let egcd = self.extended_gcd(other);
+ // should not have to recalculate abs
+ let lcm = if egcd.gcd.is_zero() {
+ Self::zero()
+ } else {
+ (*self * (*other / egcd.gcd)).abs()
+ };
+ (egcd, lcm)
+ }
+
+ /// Calculates the Lowest Common Multiple (LCM) of the number and
+ /// `other`.
+ #[inline]
+ fn lcm(&self, other: &Self) -> Self {
+ self.gcd_lcm(other).1
+ }
+
+ /// Calculates the Greatest Common Divisor (GCD) and
+ /// Lowest Common Multiple (LCM) of the number and `other`.
+ #[inline]
+ fn gcd_lcm(&self, other: &Self) -> (Self, Self) {
+ if self.is_zero() && other.is_zero() {
+ return (Self::zero(), Self::zero());
+ }
+ let gcd = self.gcd(other);
+ // should not have to recalculate abs
+ let lcm = (*self * (*other / gcd)).abs();
+ (gcd, lcm)
+ }
+
+ /// Deprecated, use `is_multiple_of` instead.
+ #[inline]
+ fn divides(&self, other: &Self) -> bool {
+ self.is_multiple_of(other)
+ }
+
+ /// Returns `true` if the number is a multiple of `other`.
+ #[inline]
+ fn is_multiple_of(&self, other: &Self) -> bool {
+ if other.is_zero() {
+ return self.is_zero();
+ }
+ *self % *other == 0
+ }
+
+ /// Returns `true` if the number is divisible by `2`
+ #[inline]
+ fn is_even(&self) -> bool {
+ (*self) & 1 == 0
+ }
+
+ /// Returns `true` if the number is not divisible by `2`
+ #[inline]
+ fn is_odd(&self) -> bool {
+ !self.is_even()
+ }
+
+ /// Simultaneous truncated integer division and modulus.
+ #[inline]
+ fn div_rem(&self, other: &Self) -> (Self, Self) {
+ (*self / *other, *self % *other)
+ }
+
+ /// Rounds up to nearest multiple of argument.
+ #[inline]
+ fn next_multiple_of(&self, other: &Self) -> Self {
+ // Avoid the overflow of `MIN % -1`
+ if *other == -1 {
+ return *self;
+ }
+
+ let m = Integer::mod_floor(self, other);
+ *self + if m == 0 { 0 } else { other - m }
+ }
+
+ /// Rounds down to nearest multiple of argument.
+ #[inline]
+ fn prev_multiple_of(&self, other: &Self) -> Self {
+ // Avoid the overflow of `MIN % -1`
+ if *other == -1 {
+ return *self;
+ }
+
+ *self - Integer::mod_floor(self, other)
+ }
+ }
+
+ #[cfg(test)]
+ mod $test_mod {
+ use core::mem;
+ use Integer;
+
+ /// Checks that the division rule holds for:
+ ///
+ /// - `n`: numerator (dividend)
+ /// - `d`: denominator (divisor)
+ /// - `qr`: quotient and remainder
+ #[cfg(test)]
+ fn test_division_rule((n, d): ($T, $T), (q, r): ($T, $T)) {
+ assert_eq!(d * q + r, n);
+ }
+
+ #[test]
+ fn test_div_rem() {
+ fn test_nd_dr(nd: ($T, $T), qr: ($T, $T)) {
+ let (n, d) = nd;
+ let separate_div_rem = (n / d, n % d);
+ let combined_div_rem = n.div_rem(&d);
+
+ assert_eq!(separate_div_rem, qr);
+ assert_eq!(combined_div_rem, qr);
+
+ test_division_rule(nd, separate_div_rem);
+ test_division_rule(nd, combined_div_rem);
+ }
+
+ test_nd_dr((8, 3), (2, 2));
+ test_nd_dr((8, -3), (-2, 2));
+ test_nd_dr((-8, 3), (-2, -2));
+ test_nd_dr((-8, -3), (2, -2));
+
+ test_nd_dr((1, 2), (0, 1));
+ test_nd_dr((1, -2), (0, 1));
+ test_nd_dr((-1, 2), (0, -1));
+ test_nd_dr((-1, -2), (0, -1));
+ }
+
+ #[test]
+ fn test_div_mod_floor() {
+ fn test_nd_dm(nd: ($T, $T), dm: ($T, $T)) {
+ let (n, d) = nd;
+ let separate_div_mod_floor =
+ (Integer::div_floor(&n, &d), Integer::mod_floor(&n, &d));
+ let combined_div_mod_floor = Integer::div_mod_floor(&n, &d);
+
+ assert_eq!(separate_div_mod_floor, dm);
+ assert_eq!(combined_div_mod_floor, dm);
+
+ test_division_rule(nd, separate_div_mod_floor);
+ test_division_rule(nd, combined_div_mod_floor);
+ }
+
+ test_nd_dm((8, 3), (2, 2));
+ test_nd_dm((8, -3), (-3, -1));
+ test_nd_dm((-8, 3), (-3, 1));
+ test_nd_dm((-8, -3), (2, -2));
+
+ test_nd_dm((1, 2), (0, 1));
+ test_nd_dm((1, -2), (-1, -1));
+ test_nd_dm((-1, 2), (-1, 1));
+ test_nd_dm((-1, -2), (0, -1));
+ }
+
+ #[test]
+ fn test_gcd() {
+ assert_eq!((10 as $T).gcd(&2), 2 as $T);
+ assert_eq!((10 as $T).gcd(&3), 1 as $T);
+ assert_eq!((0 as $T).gcd(&3), 3 as $T);
+ assert_eq!((3 as $T).gcd(&3), 3 as $T);
+ assert_eq!((56 as $T).gcd(&42), 14 as $T);
+ assert_eq!((3 as $T).gcd(&-3), 3 as $T);
+ assert_eq!((-6 as $T).gcd(&3), 3 as $T);
+ assert_eq!((-4 as $T).gcd(&-2), 2 as $T);
+ }
+
+ #[test]
+ fn test_gcd_cmp_with_euclidean() {
+ fn euclidean_gcd(mut m: $T, mut n: $T) -> $T {
+ while m != 0 {
+ mem::swap(&mut m, &mut n);
+ m %= n;
+ }
+
+ n.abs()
+ }
+
+ // gcd(-128, b) = 128 is not representable as positive value
+ // for i8
+ for i in -127..127 {
+ for j in -127..127 {
+ assert_eq!(euclidean_gcd(i, j), i.gcd(&j));
+ }
+ }
+
+ // last value
+ // FIXME: Use inclusive ranges for above loop when implemented
+ let i = 127;
+ for j in -127..127 {
+ assert_eq!(euclidean_gcd(i, j), i.gcd(&j));
+ }
+ assert_eq!(127.gcd(&127), 127);
+ }
+
+ #[test]
+ fn test_gcd_min_val() {
+ let min = <$T>::min_value();
+ let max = <$T>::max_value();
+ let max_pow2 = max / 2 + 1;
+ assert_eq!(min.gcd(&max), 1 as $T);
+ assert_eq!(max.gcd(&min), 1 as $T);
+ assert_eq!(min.gcd(&max_pow2), max_pow2);
+ assert_eq!(max_pow2.gcd(&min), max_pow2);
+ assert_eq!(min.gcd(&42), 2 as $T);
+ assert_eq!((42 as $T).gcd(&min), 2 as $T);
+ }
+
+ #[test]
+ #[should_panic]
+ fn test_gcd_min_val_min_val() {
+ let min = <$T>::min_value();
+ assert!(min.gcd(&min) >= 0);
+ }
+
+ #[test]
+ #[should_panic]
+ fn test_gcd_min_val_0() {
+ let min = <$T>::min_value();
+ assert!(min.gcd(&0) >= 0);
+ }
+
+ #[test]
+ #[should_panic]
+ fn test_gcd_0_min_val() {
+ let min = <$T>::min_value();
+ assert!((0 as $T).gcd(&min) >= 0);
+ }
+
+ #[test]
+ fn test_lcm() {
+ assert_eq!((1 as $T).lcm(&0), 0 as $T);
+ assert_eq!((0 as $T).lcm(&1), 0 as $T);
+ assert_eq!((1 as $T).lcm(&1), 1 as $T);
+ assert_eq!((-1 as $T).lcm(&1), 1 as $T);
+ assert_eq!((1 as $T).lcm(&-1), 1 as $T);
+ assert_eq!((-1 as $T).lcm(&-1), 1 as $T);
+ assert_eq!((8 as $T).lcm(&9), 72 as $T);
+ assert_eq!((11 as $T).lcm(&5), 55 as $T);
+ }
+
+ #[test]
+ fn test_gcd_lcm() {
+ use core::iter::once;
+ for i in once(0)
+ .chain((1..).take(127).flat_map(|a| once(a).chain(once(-a))))
+ .chain(once(-128))
+ {
+ for j in once(0)
+ .chain((1..).take(127).flat_map(|a| once(a).chain(once(-a))))
+ .chain(once(-128))
+ {
+ assert_eq!(i.gcd_lcm(&j), (i.gcd(&j), i.lcm(&j)));
+ }
+ }
+ }
+
+ #[test]
+ fn test_extended_gcd_lcm() {
+ use core::fmt::Debug;
+ use traits::NumAssign;
+ use ExtendedGcd;
+
+ fn check<A: Copy + Debug + Integer + NumAssign>(a: A, b: A) {
+ let ExtendedGcd { gcd, x, y, .. } = a.extended_gcd(&b);
+ assert_eq!(gcd, x * a + y * b);
+ }
+
+ use core::iter::once;
+ for i in once(0)
+ .chain((1..).take(127).flat_map(|a| once(a).chain(once(-a))))
+ .chain(once(-128))
+ {
+ for j in once(0)
+ .chain((1..).take(127).flat_map(|a| once(a).chain(once(-a))))
+ .chain(once(-128))
+ {
+ check(i, j);
+ let (ExtendedGcd { gcd, .. }, lcm) = i.extended_gcd_lcm(&j);
+ assert_eq!((gcd, lcm), (i.gcd(&j), i.lcm(&j)));
+ }
+ }
+ }
+
+ #[test]
+ fn test_even() {
+ assert_eq!((-4 as $T).is_even(), true);
+ assert_eq!((-3 as $T).is_even(), false);
+ assert_eq!((-2 as $T).is_even(), true);
+ assert_eq!((-1 as $T).is_even(), false);
+ assert_eq!((0 as $T).is_even(), true);
+ assert_eq!((1 as $T).is_even(), false);
+ assert_eq!((2 as $T).is_even(), true);
+ assert_eq!((3 as $T).is_even(), false);
+ assert_eq!((4 as $T).is_even(), true);
+ }
+
+ #[test]
+ fn test_odd() {
+ assert_eq!((-4 as $T).is_odd(), false);
+ assert_eq!((-3 as $T).is_odd(), true);
+ assert_eq!((-2 as $T).is_odd(), false);
+ assert_eq!((-1 as $T).is_odd(), true);
+ assert_eq!((0 as $T).is_odd(), false);
+ assert_eq!((1 as $T).is_odd(), true);
+ assert_eq!((2 as $T).is_odd(), false);
+ assert_eq!((3 as $T).is_odd(), true);
+ assert_eq!((4 as $T).is_odd(), false);
+ }
+
+ #[test]
+ fn test_multiple_of_one_limits() {
+ for x in &[<$T>::min_value(), <$T>::max_value()] {
+ for one in &[1, -1] {
+ assert_eq!(Integer::next_multiple_of(x, one), *x);
+ assert_eq!(Integer::prev_multiple_of(x, one), *x);
+ }
+ }
+ }
+ }
+ };
+}
+
+impl_integer_for_isize!(i8, test_integer_i8);
+impl_integer_for_isize!(i16, test_integer_i16);
+impl_integer_for_isize!(i32, test_integer_i32);
+impl_integer_for_isize!(i64, test_integer_i64);
+impl_integer_for_isize!(isize, test_integer_isize);
+#[cfg(has_i128)]
+impl_integer_for_isize!(i128, test_integer_i128);
+
+macro_rules! impl_integer_for_usize {
+ ($T:ty, $test_mod:ident) => {
+ impl Integer for $T {
+ /// Unsigned integer division. Returns the same result as `div` (`/`).
+ #[inline]
+ fn div_floor(&self, other: &Self) -> Self {
+ *self / *other
+ }
+
+ /// Unsigned integer modulo operation. Returns the same result as `rem` (`%`).
+ #[inline]
+ fn mod_floor(&self, other: &Self) -> Self {
+ *self % *other
+ }
+
+ #[inline]
+ fn div_ceil(&self, other: &Self) -> Self {
+ *self / *other + (0 != *self % *other) as Self
+ }
+
+ /// Calculates the Greatest Common Divisor (GCD) of the number and `other`
+ #[inline]
+ fn gcd(&self, other: &Self) -> Self {
+ // Use Stein's algorithm
+ let mut m = *self;
+ let mut n = *other;
+ if m == 0 || n == 0 {
+ return m | n;
+ }
+
+ // find common factors of 2
+ let shift = (m | n).trailing_zeros();
+
+ // divide n and m by 2 until odd
+ m >>= m.trailing_zeros();
+ n >>= n.trailing_zeros();
+
+ while m != n {
+ if m > n {
+ m -= n;
+ m >>= m.trailing_zeros();
+ } else {
+ n -= m;
+ n >>= n.trailing_zeros();
+ }
+ }
+ m << shift
+ }
+
+ #[inline]
+ fn extended_gcd_lcm(&self, other: &Self) -> (ExtendedGcd<Self>, Self) {
+ let egcd = self.extended_gcd(other);
+ // should not have to recalculate abs
+ let lcm = if egcd.gcd.is_zero() {
+ Self::zero()
+ } else {
+ *self * (*other / egcd.gcd)
+ };
+ (egcd, lcm)
+ }
+
+ /// Calculates the Lowest Common Multiple (LCM) of the number and `other`.
+ #[inline]
+ fn lcm(&self, other: &Self) -> Self {
+ self.gcd_lcm(other).1
+ }
+
+ /// Calculates the Greatest Common Divisor (GCD) and
+ /// Lowest Common Multiple (LCM) of the number and `other`.
+ #[inline]
+ fn gcd_lcm(&self, other: &Self) -> (Self, Self) {
+ if self.is_zero() && other.is_zero() {
+ return (Self::zero(), Self::zero());
+ }
+ let gcd = self.gcd(other);
+ let lcm = *self * (*other / gcd);
+ (gcd, lcm)
+ }
+
+ /// Deprecated, use `is_multiple_of` instead.
+ #[inline]
+ fn divides(&self, other: &Self) -> bool {
+ self.is_multiple_of(other)
+ }
+
+ /// Returns `true` if the number is a multiple of `other`.
+ #[inline]
+ fn is_multiple_of(&self, other: &Self) -> bool {
+ if other.is_zero() {
+ return self.is_zero();
+ }
+ *self % *other == 0
+ }
+
+ /// Returns `true` if the number is divisible by `2`.
+ #[inline]
+ fn is_even(&self) -> bool {
+ *self % 2 == 0
+ }
+
+ /// Returns `true` if the number is not divisible by `2`.
+ #[inline]
+ fn is_odd(&self) -> bool {
+ !self.is_even()
+ }
+
+ /// Simultaneous truncated integer division and modulus.
+ #[inline]
+ fn div_rem(&self, other: &Self) -> (Self, Self) {
+ (*self / *other, *self % *other)
+ }
+ }
+
+ #[cfg(test)]
+ mod $test_mod {
+ use core::mem;
+ use Integer;
+
+ #[test]
+ fn test_div_mod_floor() {
+ assert_eq!(<$T as Integer>::div_floor(&10, &3), 3 as $T);
+ assert_eq!(<$T as Integer>::mod_floor(&10, &3), 1 as $T);
+ assert_eq!(<$T as Integer>::div_mod_floor(&10, &3), (3 as $T, 1 as $T));
+ assert_eq!(<$T as Integer>::div_floor(&5, &5), 1 as $T);
+ assert_eq!(<$T as Integer>::mod_floor(&5, &5), 0 as $T);
+ assert_eq!(<$T as Integer>::div_mod_floor(&5, &5), (1 as $T, 0 as $T));
+ assert_eq!(<$T as Integer>::div_floor(&3, &7), 0 as $T);
+ assert_eq!(<$T as Integer>::div_floor(&3, &7), 0 as $T);
+ assert_eq!(<$T as Integer>::mod_floor(&3, &7), 3 as $T);
+ assert_eq!(<$T as Integer>::div_mod_floor(&3, &7), (0 as $T, 3 as $T));
+ }
+
+ #[test]
+ fn test_gcd() {
+ assert_eq!((10 as $T).gcd(&2), 2 as $T);
+ assert_eq!((10 as $T).gcd(&3), 1 as $T);
+ assert_eq!((0 as $T).gcd(&3), 3 as $T);
+ assert_eq!((3 as $T).gcd(&3), 3 as $T);
+ assert_eq!((56 as $T).gcd(&42), 14 as $T);
+ }
+
+ #[test]
+ fn test_gcd_cmp_with_euclidean() {
+ fn euclidean_gcd(mut m: $T, mut n: $T) -> $T {
+ while m != 0 {
+ mem::swap(&mut m, &mut n);
+ m %= n;
+ }
+ n
+ }
+
+ for i in 0..255 {
+ for j in 0..255 {
+ assert_eq!(euclidean_gcd(i, j), i.gcd(&j));
+ }
+ }
+
+ // last value
+ // FIXME: Use inclusive ranges for above loop when implemented
+ let i = 255;
+ for j in 0..255 {
+ assert_eq!(euclidean_gcd(i, j), i.gcd(&j));
+ }
+ assert_eq!(255.gcd(&255), 255);
+ }
+
+ #[test]
+ fn test_lcm() {
+ assert_eq!((1 as $T).lcm(&0), 0 as $T);
+ assert_eq!((0 as $T).lcm(&1), 0 as $T);
+ assert_eq!((1 as $T).lcm(&1), 1 as $T);
+ assert_eq!((8 as $T).lcm(&9), 72 as $T);
+ assert_eq!((11 as $T).lcm(&5), 55 as $T);
+ assert_eq!((15 as $T).lcm(&17), 255 as $T);
+ }
+
+ #[test]
+ fn test_gcd_lcm() {
+ for i in (0..).take(256) {
+ for j in (0..).take(256) {
+ assert_eq!(i.gcd_lcm(&j), (i.gcd(&j), i.lcm(&j)));
+ }
+ }
+ }
+
+ #[test]
+ fn test_is_multiple_of() {
+ assert!((0 as $T).is_multiple_of(&(0 as $T)));
+ assert!((6 as $T).is_multiple_of(&(6 as $T)));
+ assert!((6 as $T).is_multiple_of(&(3 as $T)));
+ assert!((6 as $T).is_multiple_of(&(1 as $T)));
+
+ assert!(!(42 as $T).is_multiple_of(&(5 as $T)));
+ assert!(!(5 as $T).is_multiple_of(&(3 as $T)));
+ assert!(!(42 as $T).is_multiple_of(&(0 as $T)));
+ }
+
+ #[test]
+ fn test_even() {
+ assert_eq!((0 as $T).is_even(), true);
+ assert_eq!((1 as $T).is_even(), false);
+ assert_eq!((2 as $T).is_even(), true);
+ assert_eq!((3 as $T).is_even(), false);
+ assert_eq!((4 as $T).is_even(), true);
+ }
+
+ #[test]
+ fn test_odd() {
+ assert_eq!((0 as $T).is_odd(), false);
+ assert_eq!((1 as $T).is_odd(), true);
+ assert_eq!((2 as $T).is_odd(), false);
+ assert_eq!((3 as $T).is_odd(), true);
+ assert_eq!((4 as $T).is_odd(), false);
+ }
+ }
+ };
+}
+
+impl_integer_for_usize!(u8, test_integer_u8);
+impl_integer_for_usize!(u16, test_integer_u16);
+impl_integer_for_usize!(u32, test_integer_u32);
+impl_integer_for_usize!(u64, test_integer_u64);
+impl_integer_for_usize!(usize, test_integer_usize);
+#[cfg(has_i128)]
+impl_integer_for_usize!(u128, test_integer_u128);
+
+/// An iterator over binomial coefficients.
+pub struct IterBinomial<T> {
+ a: T,
+ n: T,
+ k: T,
+}
+
+impl<T> IterBinomial<T>
+where
+ T: Integer,
+{
+ /// For a given n, iterate over all binomial coefficients binomial(n, k), for k=0...n.
+ ///
+ /// Note that this might overflow, depending on `T`. For the primitive
+ /// integer types, the following n are the largest ones for which there will
+ /// be no overflow:
+ ///
+ /// type | n
+ /// -----|---
+ /// u8 | 10
+ /// i8 | 9
+ /// u16 | 18
+ /// i16 | 17
+ /// u32 | 34
+ /// i32 | 33
+ /// u64 | 67
+ /// i64 | 66
+ ///
+ /// For larger n, `T` should be a bigint type.
+ pub fn new(n: T) -> IterBinomial<T> {
+ IterBinomial {
+ k: T::zero(),
+ a: T::one(),
+ n: n,
+ }
+ }
+}
+
+impl<T> Iterator for IterBinomial<T>
+where
+ T: Integer + Clone,
+{
+ type Item = T;
+
+ fn next(&mut self) -> Option<T> {
+ if self.k > self.n {
+ return None;
+ }
+ self.a = if !self.k.is_zero() {
+ multiply_and_divide(
+ self.a.clone(),
+ self.n.clone() - self.k.clone() + T::one(),
+ self.k.clone(),
+ )
+ } else {
+ T::one()
+ };
+ self.k = self.k.clone() + T::one();
+ Some(self.a.clone())
+ }
+}
+
+/// Calculate r * a / b, avoiding overflows and fractions.
+///
+/// Assumes that b divides r * a evenly.
+fn multiply_and_divide<T: Integer + Clone>(r: T, a: T, b: T) -> T {
+ // See http://blog.plover.com/math/choose-2.html for the idea.
+ let g = gcd(r.clone(), b.clone());
+ r / g.clone() * (a / (b / g))
+}
+
+/// Calculate the binomial coefficient.
+///
+/// Note that this might overflow, depending on `T`. For the primitive integer
+/// types, the following n are the largest ones possible such that there will
+/// be no overflow for any k:
+///
+/// type | n
+/// -----|---
+/// u8 | 10
+/// i8 | 9
+/// u16 | 18
+/// i16 | 17
+/// u32 | 34
+/// i32 | 33
+/// u64 | 67
+/// i64 | 66
+///
+/// For larger n, consider using a bigint type for `T`.
+pub fn binomial<T: Integer + Clone>(mut n: T, k: T) -> T {
+ // See http://blog.plover.com/math/choose.html for the idea.
+ if k > n {
+ return T::zero();
+ }
+ if k > n.clone() - k.clone() {
+ return binomial(n.clone(), n - k);
+ }
+ let mut r = T::one();
+ let mut d = T::one();
+ loop {
+ if d > k {
+ break;
+ }
+ r = multiply_and_divide(r, n.clone(), d.clone());
+ n = n - T::one();
+ d = d + T::one();
+ }
+ r
+}
+
+/// Calculate the multinomial coefficient.
+pub fn multinomial<T: Integer + Clone>(k: &[T]) -> T
+where
+ for<'a> T: Add<&'a T, Output = T>,
+{
+ let mut r = T::one();
+ let mut p = T::zero();
+ for i in k {
+ p = p + i;
+ r = r * binomial(p.clone(), i.clone());
+ }
+ r
+}
+
+#[test]
+fn test_lcm_overflow() {
+ macro_rules! check {
+ ($t:ty, $x:expr, $y:expr, $r:expr) => {{
+ let x: $t = $x;
+ let y: $t = $y;
+ let o = x.checked_mul(y);
+ assert!(
+ o.is_none(),
+ "sanity checking that {} input {} * {} overflows",
+ stringify!($t),
+ x,
+ y
+ );
+ assert_eq!(x.lcm(&y), $r);
+ assert_eq!(y.lcm(&x), $r);
+ }};
+ }
+
+ // Original bug (Issue #166)
+ check!(i64, 46656000000000000, 600, 46656000000000000);
+
+ check!(i8, 0x40, 0x04, 0x40);
+ check!(u8, 0x80, 0x02, 0x80);
+ check!(i16, 0x40_00, 0x04, 0x40_00);
+ check!(u16, 0x80_00, 0x02, 0x80_00);
+ check!(i32, 0x4000_0000, 0x04, 0x4000_0000);
+ check!(u32, 0x8000_0000, 0x02, 0x8000_0000);
+ check!(i64, 0x4000_0000_0000_0000, 0x04, 0x4000_0000_0000_0000);
+ check!(u64, 0x8000_0000_0000_0000, 0x02, 0x8000_0000_0000_0000);
+}
+
+#[test]
+fn test_iter_binomial() {
+ macro_rules! check_simple {
+ ($t:ty) => {{
+ let n: $t = 3;
+ let expected = [1, 3, 3, 1];
+ for (b, &e) in IterBinomial::new(n).zip(&expected) {
+ assert_eq!(b, e);
+ }
+ }};
+ }
+
+ check_simple!(u8);
+ check_simple!(i8);
+ check_simple!(u16);
+ check_simple!(i16);
+ check_simple!(u32);
+ check_simple!(i32);
+ check_simple!(u64);
+ check_simple!(i64);
+
+ macro_rules! check_binomial {
+ ($t:ty, $n:expr) => {{
+ let n: $t = $n;
+ let mut k: $t = 0;
+ for b in IterBinomial::new(n) {
+ assert_eq!(b, binomial(n, k));
+ k += 1;
+ }
+ }};
+ }
+
+ // Check the largest n for which there is no overflow.
+ check_binomial!(u8, 10);
+ check_binomial!(i8, 9);
+ check_binomial!(u16, 18);
+ check_binomial!(i16, 17);
+ check_binomial!(u32, 34);
+ check_binomial!(i32, 33);
+ check_binomial!(u64, 67);
+ check_binomial!(i64, 66);
+}
+
+#[test]
+fn test_binomial() {
+ macro_rules! check {
+ ($t:ty, $x:expr, $y:expr, $r:expr) => {{
+ let x: $t = $x;
+ let y: $t = $y;
+ let expected: $t = $r;
+ assert_eq!(binomial(x, y), expected);
+ if y <= x {
+ assert_eq!(binomial(x, x - y), expected);
+ }
+ }};
+ }
+ check!(u8, 9, 4, 126);
+ check!(u8, 0, 0, 1);
+ check!(u8, 2, 3, 0);
+
+ check!(i8, 9, 4, 126);
+ check!(i8, 0, 0, 1);
+ check!(i8, 2, 3, 0);
+
+ check!(u16, 100, 2, 4950);
+ check!(u16, 14, 4, 1001);
+ check!(u16, 0, 0, 1);
+ check!(u16, 2, 3, 0);
+
+ check!(i16, 100, 2, 4950);
+ check!(i16, 14, 4, 1001);
+ check!(i16, 0, 0, 1);
+ check!(i16, 2, 3, 0);
+
+ check!(u32, 100, 2, 4950);
+ check!(u32, 35, 11, 417225900);
+ check!(u32, 14, 4, 1001);
+ check!(u32, 0, 0, 1);
+ check!(u32, 2, 3, 0);
+
+ check!(i32, 100, 2, 4950);
+ check!(i32, 35, 11, 417225900);
+ check!(i32, 14, 4, 1001);
+ check!(i32, 0, 0, 1);
+ check!(i32, 2, 3, 0);
+
+ check!(u64, 100, 2, 4950);
+ check!(u64, 35, 11, 417225900);
+ check!(u64, 14, 4, 1001);
+ check!(u64, 0, 0, 1);
+ check!(u64, 2, 3, 0);
+
+ check!(i64, 100, 2, 4950);
+ check!(i64, 35, 11, 417225900);
+ check!(i64, 14, 4, 1001);
+ check!(i64, 0, 0, 1);
+ check!(i64, 2, 3, 0);
+}
+
+#[test]
+fn test_multinomial() {
+ macro_rules! check_binomial {
+ ($t:ty, $k:expr) => {{
+ let n: $t = $k.iter().fold(0, |acc, &x| acc + x);
+ let k: &[$t] = $k;
+ assert_eq!(k.len(), 2);
+ assert_eq!(multinomial(k), binomial(n, k[0]));
+ }};
+ }
+
+ check_binomial!(u8, &[4, 5]);
+
+ check_binomial!(i8, &[4, 5]);
+
+ check_binomial!(u16, &[2, 98]);
+ check_binomial!(u16, &[4, 10]);
+
+ check_binomial!(i16, &[2, 98]);
+ check_binomial!(i16, &[4, 10]);
+
+ check_binomial!(u32, &[2, 98]);
+ check_binomial!(u32, &[11, 24]);
+ check_binomial!(u32, &[4, 10]);
+
+ check_binomial!(i32, &[2, 98]);
+ check_binomial!(i32, &[11, 24]);
+ check_binomial!(i32, &[4, 10]);
+
+ check_binomial!(u64, &[2, 98]);
+ check_binomial!(u64, &[11, 24]);
+ check_binomial!(u64, &[4, 10]);
+
+ check_binomial!(i64, &[2, 98]);
+ check_binomial!(i64, &[11, 24]);
+ check_binomial!(i64, &[4, 10]);
+
+ macro_rules! check_multinomial {
+ ($t:ty, $k:expr, $r:expr) => {{
+ let k: &[$t] = $k;
+ let expected: $t = $r;
+ assert_eq!(multinomial(k), expected);
+ }};
+ }
+
+ check_multinomial!(u8, &[2, 1, 2], 30);
+ check_multinomial!(u8, &[2, 3, 0], 10);
+
+ check_multinomial!(i8, &[2, 1, 2], 30);
+ check_multinomial!(i8, &[2, 3, 0], 10);
+
+ check_multinomial!(u16, &[2, 1, 2], 30);
+ check_multinomial!(u16, &[2, 3, 0], 10);
+
+ check_multinomial!(i16, &[2, 1, 2], 30);
+ check_multinomial!(i16, &[2, 3, 0], 10);
+
+ check_multinomial!(u32, &[2, 1, 2], 30);
+ check_multinomial!(u32, &[2, 3, 0], 10);
+
+ check_multinomial!(i32, &[2, 1, 2], 30);
+ check_multinomial!(i32, &[2, 3, 0], 10);
+
+ check_multinomial!(u64, &[2, 1, 2], 30);
+ check_multinomial!(u64, &[2, 3, 0], 10);
+
+ check_multinomial!(i64, &[2, 1, 2], 30);
+ check_multinomial!(i64, &[2, 3, 0], 10);
+
+ check_multinomial!(u64, &[], 1);
+ check_multinomial!(u64, &[0], 1);
+ check_multinomial!(u64, &[12345], 1);
+}
diff --git a/rust/vendor/num-integer/src/roots.rs b/rust/vendor/num-integer/src/roots.rs
new file mode 100644
index 0000000..a9eec1a
--- /dev/null
+++ b/rust/vendor/num-integer/src/roots.rs
@@ -0,0 +1,391 @@
+use core;
+use core::mem;
+use traits::checked_pow;
+use traits::PrimInt;
+use Integer;
+
+/// Provides methods to compute an integer's square root, cube root,
+/// and arbitrary `n`th root.
+pub trait Roots: Integer {
+ /// Returns the truncated principal `n`th root of an integer
+ /// -- `if x >= 0 { ⌊ⁿ√x⌋ } else { ⌈ⁿ√x⌉ }`
+ ///
+ /// This is solving for `r` in `rⁿ = x`, rounding toward zero.
+ /// If `x` is positive, the result will satisfy `rⁿ ≤ x < (r+1)ⁿ`.
+ /// If `x` is negative and `n` is odd, then `(r-1)ⁿ < x ≤ rⁿ`.
+ ///
+ /// # Panics
+ ///
+ /// Panics if `n` is zero:
+ ///
+ /// ```should_panic
+ /// # use num_integer::Roots;
+ /// println!("can't compute ⁰√x : {}", 123.nth_root(0));
+ /// ```
+ ///
+ /// or if `n` is even and `self` is negative:
+ ///
+ /// ```should_panic
+ /// # use num_integer::Roots;
+ /// println!("no imaginary numbers... {}", (-1).nth_root(10));
+ /// ```
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use num_integer::Roots;
+ ///
+ /// let x: i32 = 12345;
+ /// assert_eq!(x.nth_root(1), x);
+ /// assert_eq!(x.nth_root(2), x.sqrt());
+ /// assert_eq!(x.nth_root(3), x.cbrt());
+ /// assert_eq!(x.nth_root(4), 10);
+ /// assert_eq!(x.nth_root(13), 2);
+ /// assert_eq!(x.nth_root(14), 1);
+ /// assert_eq!(x.nth_root(std::u32::MAX), 1);
+ ///
+ /// assert_eq!(std::i32::MAX.nth_root(30), 2);
+ /// assert_eq!(std::i32::MAX.nth_root(31), 1);
+ /// assert_eq!(std::i32::MIN.nth_root(31), -2);
+ /// assert_eq!((std::i32::MIN + 1).nth_root(31), -1);
+ ///
+ /// assert_eq!(std::u32::MAX.nth_root(31), 2);
+ /// assert_eq!(std::u32::MAX.nth_root(32), 1);
+ /// ```
+ fn nth_root(&self, n: u32) -> Self;
+
+ /// Returns the truncated principal square root of an integer -- `⌊√x⌋`
+ ///
+ /// This is solving for `r` in `r² = x`, rounding toward zero.
+ /// The result will satisfy `r² ≤ x < (r+1)²`.
+ ///
+ /// # Panics
+ ///
+ /// Panics if `self` is less than zero:
+ ///
+ /// ```should_panic
+ /// # use num_integer::Roots;
+ /// println!("no imaginary numbers... {}", (-1).sqrt());
+ /// ```
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use num_integer::Roots;
+ ///
+ /// let x: i32 = 12345;
+ /// assert_eq!((x * x).sqrt(), x);
+ /// assert_eq!((x * x + 1).sqrt(), x);
+ /// assert_eq!((x * x - 1).sqrt(), x - 1);
+ /// ```
+ #[inline]
+ fn sqrt(&self) -> Self {
+ self.nth_root(2)
+ }
+
+ /// Returns the truncated principal cube root of an integer --
+ /// `if x >= 0 { ⌊∛x⌋ } else { ⌈∛x⌉ }`
+ ///
+ /// This is solving for `r` in `r³ = x`, rounding toward zero.
+ /// If `x` is positive, the result will satisfy `r³ ≤ x < (r+1)³`.
+ /// If `x` is negative, then `(r-1)³ < x ≤ r³`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use num_integer::Roots;
+ ///
+ /// let x: i32 = 1234;
+ /// assert_eq!((x * x * x).cbrt(), x);
+ /// assert_eq!((x * x * x + 1).cbrt(), x);
+ /// assert_eq!((x * x * x - 1).cbrt(), x - 1);
+ ///
+ /// assert_eq!((-(x * x * x)).cbrt(), -x);
+ /// assert_eq!((-(x * x * x + 1)).cbrt(), -x);
+ /// assert_eq!((-(x * x * x - 1)).cbrt(), -(x - 1));
+ /// ```
+ #[inline]
+ fn cbrt(&self) -> Self {
+ self.nth_root(3)
+ }
+}
+
+/// Returns the truncated principal square root of an integer --
+/// see [Roots::sqrt](trait.Roots.html#method.sqrt).
+#[inline]
+pub fn sqrt<T: Roots>(x: T) -> T {
+ x.sqrt()
+}
+
+/// Returns the truncated principal cube root of an integer --
+/// see [Roots::cbrt](trait.Roots.html#method.cbrt).
+#[inline]
+pub fn cbrt<T: Roots>(x: T) -> T {
+ x.cbrt()
+}
+
+/// Returns the truncated principal `n`th root of an integer --
+/// see [Roots::nth_root](trait.Roots.html#tymethod.nth_root).
+#[inline]
+pub fn nth_root<T: Roots>(x: T, n: u32) -> T {
+ x.nth_root(n)
+}
+
+macro_rules! signed_roots {
+ ($T:ty, $U:ty) => {
+ impl Roots for $T {
+ #[inline]
+ fn nth_root(&self, n: u32) -> Self {
+ if *self >= 0 {
+ (*self as $U).nth_root(n) as Self
+ } else {
+ assert!(n.is_odd(), "even roots of a negative are imaginary");
+ -((self.wrapping_neg() as $U).nth_root(n) as Self)
+ }
+ }
+
+ #[inline]
+ fn sqrt(&self) -> Self {
+ assert!(*self >= 0, "the square root of a negative is imaginary");
+ (*self as $U).sqrt() as Self
+ }
+
+ #[inline]
+ fn cbrt(&self) -> Self {
+ if *self >= 0 {
+ (*self as $U).cbrt() as Self
+ } else {
+ -((self.wrapping_neg() as $U).cbrt() as Self)
+ }
+ }
+ }
+ };
+}
+
+signed_roots!(i8, u8);
+signed_roots!(i16, u16);
+signed_roots!(i32, u32);
+signed_roots!(i64, u64);
+#[cfg(has_i128)]
+signed_roots!(i128, u128);
+signed_roots!(isize, usize);
+
+#[inline]
+fn fixpoint<T, F>(mut x: T, f: F) -> T
+where
+ T: Integer + Copy,
+ F: Fn(T) -> T,
+{
+ let mut xn = f(x);
+ while x < xn {
+ x = xn;
+ xn = f(x);
+ }
+ while x > xn {
+ x = xn;
+ xn = f(x);
+ }
+ x
+}
+
+#[inline]
+fn bits<T>() -> u32 {
+ 8 * mem::size_of::<T>() as u32
+}
+
+#[inline]
+fn log2<T: PrimInt>(x: T) -> u32 {
+ debug_assert!(x > T::zero());
+ bits::<T>() - 1 - x.leading_zeros()
+}
+
+macro_rules! unsigned_roots {
+ ($T:ident) => {
+ impl Roots for $T {
+ #[inline]
+ fn nth_root(&self, n: u32) -> Self {
+ fn go(a: $T, n: u32) -> $T {
+ // Specialize small roots
+ match n {
+ 0 => panic!("can't find a root of degree 0!"),
+ 1 => return a,
+ 2 => return a.sqrt(),
+ 3 => return a.cbrt(),
+ _ => (),
+ }
+
+ // The root of values less than 2ⁿ can only be 0 or 1.
+ if bits::<$T>() <= n || a < (1 << n) {
+ return (a > 0) as $T;
+ }
+
+ if bits::<$T>() > 64 {
+ // 128-bit division is slow, so do a bitwise `nth_root` until it's small enough.
+ return if a <= core::u64::MAX as $T {
+ (a as u64).nth_root(n) as $T
+ } else {
+ let lo = (a >> n).nth_root(n) << 1;
+ let hi = lo + 1;
+ // 128-bit `checked_mul` also involves division, but we can't always
+ // compute `hiⁿ` without risking overflow. Try to avoid it though...
+ if hi.next_power_of_two().trailing_zeros() * n >= bits::<$T>() {
+ match checked_pow(hi, n as usize) {
+ Some(x) if x <= a => hi,
+ _ => lo,
+ }
+ } else {
+ if hi.pow(n) <= a {
+ hi
+ } else {
+ lo
+ }
+ }
+ };
+ }
+
+ #[cfg(feature = "std")]
+ #[inline]
+ fn guess(x: $T, n: u32) -> $T {
+ // for smaller inputs, `f64` doesn't justify its cost.
+ if bits::<$T>() <= 32 || x <= core::u32::MAX as $T {
+ 1 << ((log2(x) + n - 1) / n)
+ } else {
+ ((x as f64).ln() / f64::from(n)).exp() as $T
+ }
+ }
+
+ #[cfg(not(feature = "std"))]
+ #[inline]
+ fn guess(x: $T, n: u32) -> $T {
+ 1 << ((log2(x) + n - 1) / n)
+ }
+
+ // https://en.wikipedia.org/wiki/Nth_root_algorithm
+ let n1 = n - 1;
+ let next = |x: $T| {
+ let y = match checked_pow(x, n1 as usize) {
+ Some(ax) => a / ax,
+ None => 0,
+ };
+ (y + x * n1 as $T) / n as $T
+ };
+ fixpoint(guess(a, n), next)
+ }
+ go(*self, n)
+ }
+
+ #[inline]
+ fn sqrt(&self) -> Self {
+ fn go(a: $T) -> $T {
+ if bits::<$T>() > 64 {
+ // 128-bit division is slow, so do a bitwise `sqrt` until it's small enough.
+ return if a <= core::u64::MAX as $T {
+ (a as u64).sqrt() as $T
+ } else {
+ let lo = (a >> 2u32).sqrt() << 1;
+ let hi = lo + 1;
+ if hi * hi <= a {
+ hi
+ } else {
+ lo
+ }
+ };
+ }
+
+ if a < 4 {
+ return (a > 0) as $T;
+ }
+
+ #[cfg(feature = "std")]
+ #[inline]
+ fn guess(x: $T) -> $T {
+ (x as f64).sqrt() as $T
+ }
+
+ #[cfg(not(feature = "std"))]
+ #[inline]
+ fn guess(x: $T) -> $T {
+ 1 << ((log2(x) + 1) / 2)
+ }
+
+ // https://en.wikipedia.org/wiki/Methods_of_computing_square_roots#Babylonian_method
+ let next = |x: $T| (a / x + x) >> 1;
+ fixpoint(guess(a), next)
+ }
+ go(*self)
+ }
+
+ #[inline]
+ fn cbrt(&self) -> Self {
+ fn go(a: $T) -> $T {
+ if bits::<$T>() > 64 {
+ // 128-bit division is slow, so do a bitwise `cbrt` until it's small enough.
+ return if a <= core::u64::MAX as $T {
+ (a as u64).cbrt() as $T
+ } else {
+ let lo = (a >> 3u32).cbrt() << 1;
+ let hi = lo + 1;
+ if hi * hi * hi <= a {
+ hi
+ } else {
+ lo
+ }
+ };
+ }
+
+ if bits::<$T>() <= 32 {
+ // Implementation based on Hacker's Delight `icbrt2`
+ let mut x = a;
+ let mut y2 = 0;
+ let mut y = 0;
+ let smax = bits::<$T>() / 3;
+ for s in (0..smax + 1).rev() {
+ let s = s * 3;
+ y2 *= 4;
+ y *= 2;
+ let b = 3 * (y2 + y) + 1;
+ if x >> s >= b {
+ x -= b << s;
+ y2 += 2 * y + 1;
+ y += 1;
+ }
+ }
+ return y;
+ }
+
+ if a < 8 {
+ return (a > 0) as $T;
+ }
+ if a <= core::u32::MAX as $T {
+ return (a as u32).cbrt() as $T;
+ }
+
+ #[cfg(feature = "std")]
+ #[inline]
+ fn guess(x: $T) -> $T {
+ (x as f64).cbrt() as $T
+ }
+
+ #[cfg(not(feature = "std"))]
+ #[inline]
+ fn guess(x: $T) -> $T {
+ 1 << ((log2(x) + 2) / 3)
+ }
+
+ // https://en.wikipedia.org/wiki/Cube_root#Numerical_methods
+ let next = |x: $T| (a / (x * x) + x * 2) / 3;
+ fixpoint(guess(a), next)
+ }
+ go(*self)
+ }
+ }
+ };
+}
+
+unsigned_roots!(u8);
+unsigned_roots!(u16);
+unsigned_roots!(u32);
+unsigned_roots!(u64);
+#[cfg(has_i128)]
+unsigned_roots!(u128);
+unsigned_roots!(usize);
diff --git a/rust/vendor/num-integer/tests/average.rs b/rust/vendor/num-integer/tests/average.rs
new file mode 100644
index 0000000..9fd8cf1
--- /dev/null
+++ b/rust/vendor/num-integer/tests/average.rs
@@ -0,0 +1,100 @@
+extern crate num_integer;
+extern crate num_traits;
+
+macro_rules! test_average {
+ ($I:ident, $U:ident) => {
+ mod $I {
+ mod ceil {
+ use num_integer::Average;
+
+ #[test]
+ fn same_sign() {
+ assert_eq!((14 as $I).average_ceil(&16), 15 as $I);
+ assert_eq!((14 as $I).average_ceil(&17), 16 as $I);
+
+ let max = $crate::std::$I::MAX;
+ assert_eq!((max - 3).average_ceil(&(max - 1)), max - 2);
+ assert_eq!((max - 3).average_ceil(&(max - 2)), max - 2);
+ }
+
+ #[test]
+ fn different_sign() {
+ assert_eq!((14 as $I).average_ceil(&-4), 5 as $I);
+ assert_eq!((14 as $I).average_ceil(&-5), 5 as $I);
+
+ let min = $crate::std::$I::MIN;
+ let max = $crate::std::$I::MAX;
+ assert_eq!(min.average_ceil(&max), 0 as $I);
+ }
+ }
+
+ mod floor {
+ use num_integer::Average;
+
+ #[test]
+ fn same_sign() {
+ assert_eq!((14 as $I).average_floor(&16), 15 as $I);
+ assert_eq!((14 as $I).average_floor(&17), 15 as $I);
+
+ let max = $crate::std::$I::MAX;
+ assert_eq!((max - 3).average_floor(&(max - 1)), max - 2);
+ assert_eq!((max - 3).average_floor(&(max - 2)), max - 3);
+ }
+
+ #[test]
+ fn different_sign() {
+ assert_eq!((14 as $I).average_floor(&-4), 5 as $I);
+ assert_eq!((14 as $I).average_floor(&-5), 4 as $I);
+
+ let min = $crate::std::$I::MIN;
+ let max = $crate::std::$I::MAX;
+ assert_eq!(min.average_floor(&max), -1 as $I);
+ }
+ }
+ }
+
+ mod $U {
+ mod ceil {
+ use num_integer::Average;
+
+ #[test]
+ fn bounded() {
+ assert_eq!((14 as $U).average_ceil(&16), 15 as $U);
+ assert_eq!((14 as $U).average_ceil(&17), 16 as $U);
+ }
+
+ #[test]
+ fn overflow() {
+ let max = $crate::std::$U::MAX;
+ assert_eq!((max - 3).average_ceil(&(max - 1)), max - 2);
+ assert_eq!((max - 3).average_ceil(&(max - 2)), max - 2);
+ }
+ }
+
+ mod floor {
+ use num_integer::Average;
+
+ #[test]
+ fn bounded() {
+ assert_eq!((14 as $U).average_floor(&16), 15 as $U);
+ assert_eq!((14 as $U).average_floor(&17), 15 as $U);
+ }
+
+ #[test]
+ fn overflow() {
+ let max = $crate::std::$U::MAX;
+ assert_eq!((max - 3).average_floor(&(max - 1)), max - 2);
+ assert_eq!((max - 3).average_floor(&(max - 2)), max - 3);
+ }
+ }
+ }
+ };
+}
+
+test_average!(i8, u8);
+test_average!(i16, u16);
+test_average!(i32, u32);
+test_average!(i64, u64);
+#[cfg(has_i128)]
+test_average!(i128, u128);
+test_average!(isize, usize);
diff --git a/rust/vendor/num-integer/tests/roots.rs b/rust/vendor/num-integer/tests/roots.rs
new file mode 100644
index 0000000..f85f9e0
--- /dev/null
+++ b/rust/vendor/num-integer/tests/roots.rs
@@ -0,0 +1,272 @@
+extern crate num_integer;
+extern crate num_traits;
+
+use num_integer::Roots;
+use num_traits::checked_pow;
+use num_traits::{AsPrimitive, PrimInt, Signed};
+use std::f64::MANTISSA_DIGITS;
+use std::fmt::Debug;
+use std::mem;
+
+trait TestInteger: Roots + PrimInt + Debug + AsPrimitive<f64> + 'static {}
+
+impl<T> TestInteger for T where T: Roots + PrimInt + Debug + AsPrimitive<f64> + 'static {}
+
+/// Check that each root is correct
+///
+/// If `x` is positive, check `rⁿ ≤ x < (r+1)ⁿ`.
+/// If `x` is negative, check `(r-1)ⁿ < x ≤ rⁿ`.
+fn check<T>(v: &[T], n: u32)
+where
+ T: TestInteger,
+{
+ for i in v {
+ let rt = i.nth_root(n);
+ // println!("nth_root({:?}, {}) = {:?}", i, n, rt);
+ if n == 2 {
+ assert_eq!(rt, i.sqrt());
+ } else if n == 3 {
+ assert_eq!(rt, i.cbrt());
+ }
+ if *i >= T::zero() {
+ let rt1 = rt + T::one();
+ assert!(rt.pow(n) <= *i);
+ if let Some(x) = checked_pow(rt1, n as usize) {
+ assert!(*i < x);
+ }
+ } else {
+ let rt1 = rt - T::one();
+ assert!(rt < T::zero());
+ assert!(*i <= rt.pow(n));
+ if let Some(x) = checked_pow(rt1, n as usize) {
+ assert!(x < *i);
+ }
+ };
+ }
+}
+
+/// Get the maximum value that will round down as `f64` (if any),
+/// and its successor that will round up.
+///
+/// Important because the `std` implementations cast to `f64` to
+/// get a close approximation of the roots.
+fn mantissa_max<T>() -> Option<(T, T)>
+where
+ T: TestInteger,
+{
+ let bits = if T::min_value().is_zero() {
+ 8 * mem::size_of::<T>()
+ } else {
+ 8 * mem::size_of::<T>() - 1
+ };
+ if bits > MANTISSA_DIGITS as usize {
+ let rounding_bit = T::one() << (bits - MANTISSA_DIGITS as usize - 1);
+ let x = T::max_value() - rounding_bit;
+
+ let x1 = x + T::one();
+ let x2 = x1 + T::one();
+ assert!(x.as_() < x1.as_());
+ assert_eq!(x1.as_(), x2.as_());
+
+ Some((x, x1))
+ } else {
+ None
+ }
+}
+
+fn extend<T>(v: &mut Vec<T>, start: T, end: T)
+where
+ T: TestInteger,
+{
+ let mut i = start;
+ while i < end {
+ v.push(i);
+ i = i + T::one();
+ }
+ v.push(i);
+}
+
+fn extend_shl<T>(v: &mut Vec<T>, start: T, end: T, mask: T)
+where
+ T: TestInteger,
+{
+ let mut i = start;
+ while i != end {
+ v.push(i);
+ i = (i << 1) & mask;
+ }
+}
+
+fn extend_shr<T>(v: &mut Vec<T>, start: T, end: T)
+where
+ T: TestInteger,
+{
+ let mut i = start;
+ while i != end {
+ v.push(i);
+ i = i >> 1;
+ }
+}
+
+fn pos<T>() -> Vec<T>
+where
+ T: TestInteger,
+ i8: AsPrimitive<T>,
+{
+ let mut v: Vec<T> = vec![];
+ if mem::size_of::<T>() == 1 {
+ extend(&mut v, T::zero(), T::max_value());
+ } else {
+ extend(&mut v, T::zero(), i8::max_value().as_());
+ extend(
+ &mut v,
+ T::max_value() - i8::max_value().as_(),
+ T::max_value(),
+ );
+ if let Some((i, j)) = mantissa_max::<T>() {
+ v.push(i);
+ v.push(j);
+ }
+ extend_shl(&mut v, T::max_value(), T::zero(), !T::min_value());
+ extend_shr(&mut v, T::max_value(), T::zero());
+ }
+ v
+}
+
+fn neg<T>() -> Vec<T>
+where
+ T: TestInteger + Signed,
+ i8: AsPrimitive<T>,
+{
+ let mut v: Vec<T> = vec![];
+ if mem::size_of::<T>() <= 1 {
+ extend(&mut v, T::min_value(), T::zero());
+ } else {
+ extend(&mut v, i8::min_value().as_(), T::zero());
+ extend(
+ &mut v,
+ T::min_value(),
+ T::min_value() - i8::min_value().as_(),
+ );
+ if let Some((i, j)) = mantissa_max::<T>() {
+ v.push(-i);
+ v.push(-j);
+ }
+ extend_shl(&mut v, -T::one(), T::min_value(), !T::zero());
+ extend_shr(&mut v, T::min_value(), -T::one());
+ }
+ v
+}
+
+macro_rules! test_roots {
+ ($I:ident, $U:ident) => {
+ mod $I {
+ use check;
+ use neg;
+ use num_integer::Roots;
+ use pos;
+ use std::mem;
+
+ #[test]
+ #[should_panic]
+ fn zeroth_root() {
+ (123 as $I).nth_root(0);
+ }
+
+ #[test]
+ fn sqrt() {
+ check(&pos::<$I>(), 2);
+ }
+
+ #[test]
+ #[should_panic]
+ fn sqrt_neg() {
+ (-123 as $I).sqrt();
+ }
+
+ #[test]
+ fn cbrt() {
+ check(&pos::<$I>(), 3);
+ }
+
+ #[test]
+ fn cbrt_neg() {
+ check(&neg::<$I>(), 3);
+ }
+
+ #[test]
+ fn nth_root() {
+ let bits = 8 * mem::size_of::<$I>() as u32 - 1;
+ let pos = pos::<$I>();
+ for n in 4..bits {
+ check(&pos, n);
+ }
+ }
+
+ #[test]
+ fn nth_root_neg() {
+ let bits = 8 * mem::size_of::<$I>() as u32 - 1;
+ let neg = neg::<$I>();
+ for n in 2..bits / 2 {
+ check(&neg, 2 * n + 1);
+ }
+ }
+
+ #[test]
+ fn bit_size() {
+ let bits = 8 * mem::size_of::<$I>() as u32 - 1;
+ assert_eq!($I::max_value().nth_root(bits - 1), 2);
+ assert_eq!($I::max_value().nth_root(bits), 1);
+ assert_eq!($I::min_value().nth_root(bits), -2);
+ assert_eq!(($I::min_value() + 1).nth_root(bits), -1);
+ }
+ }
+
+ mod $U {
+ use check;
+ use num_integer::Roots;
+ use pos;
+ use std::mem;
+
+ #[test]
+ #[should_panic]
+ fn zeroth_root() {
+ (123 as $U).nth_root(0);
+ }
+
+ #[test]
+ fn sqrt() {
+ check(&pos::<$U>(), 2);
+ }
+
+ #[test]
+ fn cbrt() {
+ check(&pos::<$U>(), 3);
+ }
+
+ #[test]
+ fn nth_root() {
+ let bits = 8 * mem::size_of::<$I>() as u32 - 1;
+ let pos = pos::<$I>();
+ for n in 4..bits {
+ check(&pos, n);
+ }
+ }
+
+ #[test]
+ fn bit_size() {
+ let bits = 8 * mem::size_of::<$U>() as u32;
+ assert_eq!($U::max_value().nth_root(bits - 1), 2);
+ assert_eq!($U::max_value().nth_root(bits), 1);
+ }
+ }
+ };
+}
+
+test_roots!(i8, u8);
+test_roots!(i16, u16);
+test_roots!(i32, u32);
+test_roots!(i64, u64);
+#[cfg(has_i128)]
+test_roots!(i128, u128);
+test_roots!(isize, usize);
diff --git a/rust/vendor/num-iter/.cargo-checksum.json b/rust/vendor/num-iter/.cargo-checksum.json
new file mode 100644
index 0000000..dfc7c4e
--- /dev/null
+++ b/rust/vendor/num-iter/.cargo-checksum.json
@@ -0,0 +1 @@
+{"files":{"Cargo.toml":"66957e317ffad0ea8f4865dd5dc4cbbdfe3254df4632292731c6791189eec2ef","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"6485b8ed310d3f0340bf1ad1f47645069ce4069dcc6bb46c7d5c6faf41de1fdb","README.md":"3a6e646eef8d12624d3afd96fc7515fa37c2e6572f6f88ea64afaca354fd31f1","RELEASES.md":"9f688e94ed6e0f01ae149c501b4ea65b0193724f85f3da9ee11ca06ea2fe5358","build.rs":"ade351dc146cbd66e25a8fcf8a636400b16d8497a2c1b224c99f70896561329f","src/lib.rs":"66a19ec175770b508e1bd5fa5d5442b0e75676fc7fd605c8a9aad886c7f32998"},"package":"7d03e6c028c5dc5cac6e2dec0efda81fc887605bb3d884578bb6d6bf7514e252"} \ No newline at end of file
diff --git a/rust/vendor/num-iter/Cargo.toml b/rust/vendor/num-iter/Cargo.toml
new file mode 100644
index 0000000..d05c283
--- /dev/null
+++ b/rust/vendor/num-iter/Cargo.toml
@@ -0,0 +1,61 @@
+# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO
+#
+# When uploading crates to the registry Cargo will automatically
+# "normalize" Cargo.toml files for maximal compatibility
+# with all versions of Cargo and also rewrite `path` dependencies
+# to registry (e.g., crates.io) dependencies.
+#
+# If you are reading this file be aware that the original Cargo.toml
+# will likely look very different (and much more reasonable).
+# See Cargo.toml.orig for the original contents.
+
+[package]
+name = "num-iter"
+version = "0.1.43"
+authors = ["The Rust Project Developers"]
+build = "build.rs"
+exclude = [
+ "/bors.toml",
+ "/ci/*",
+ "/.github/*",
+]
+description = "External iterators for generic mathematics"
+homepage = "https://github.com/rust-num/num-iter"
+documentation = "https://docs.rs/num-iter"
+readme = "README.md"
+keywords = [
+ "mathematics",
+ "numerics",
+]
+categories = [
+ "algorithms",
+ "science",
+ "no-std",
+]
+license = "MIT OR Apache-2.0"
+repository = "https://github.com/rust-num/num-iter"
+
+[package.metadata.docs.rs]
+features = ["std"]
+
+[dependencies.num-integer]
+version = "0.1.42"
+default-features = false
+
+[dependencies.num-traits]
+version = "0.2.11"
+default-features = false
+
+[build-dependencies.autocfg]
+version = "1"
+
+[features]
+default = ["std"]
+i128 = [
+ "num-integer/i128",
+ "num-traits/i128",
+]
+std = [
+ "num-integer/std",
+ "num-traits/std",
+]
diff --git a/rust/vendor/num-iter/LICENSE-APACHE b/rust/vendor/num-iter/LICENSE-APACHE
new file mode 100644
index 0000000..16fe87b
--- /dev/null
+++ b/rust/vendor/num-iter/LICENSE-APACHE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+END OF TERMS AND CONDITIONS
+
+APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+Copyright [yyyy] [name of copyright owner]
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
diff --git a/rust/vendor/num-iter/LICENSE-MIT b/rust/vendor/num-iter/LICENSE-MIT
new file mode 100644
index 0000000..39d4bdb
--- /dev/null
+++ b/rust/vendor/num-iter/LICENSE-MIT
@@ -0,0 +1,25 @@
+Copyright (c) 2014 The Rust Project Developers
+
+Permission is hereby granted, free of charge, to any
+person obtaining a copy of this software and associated
+documentation files (the "Software"), to deal in the
+Software without restriction, including without
+limitation the rights to use, copy, modify, merge,
+publish, distribute, sublicense, and/or sell copies of
+the Software, and to permit persons to whom the Software
+is furnished to do so, subject to the following
+conditions:
+
+The above copyright notice and this permission notice
+shall be included in all copies or substantial portions
+of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
+ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
+TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
+PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
+IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+DEALINGS IN THE SOFTWARE.
diff --git a/rust/vendor/num-iter/README.md b/rust/vendor/num-iter/README.md
new file mode 100644
index 0000000..b4511c2
--- /dev/null
+++ b/rust/vendor/num-iter/README.md
@@ -0,0 +1,64 @@
+# num-iter
+
+[![crate](https://img.shields.io/crates/v/num-iter.svg)](https://crates.io/crates/num-iter)
+[![documentation](https://docs.rs/num-iter/badge.svg)](https://docs.rs/num-iter)
+[![minimum rustc 1.8](https://img.shields.io/badge/rustc-1.8+-red.svg)](https://rust-lang.github.io/rfcs/2495-min-rust-version.html)
+[![build status](https://github.com/rust-num/num-iter/workflows/master/badge.svg)](https://github.com/rust-num/num-iter/actions)
+
+Generic `Range` iterators for Rust.
+
+## Usage
+
+Add this to your `Cargo.toml`:
+
+```toml
+[dependencies]
+num-iter = "0.1"
+```
+
+and this to your crate root:
+
+```rust
+extern crate num_iter;
+```
+
+## Features
+
+This crate can be used without the standard library (`#![no_std]`) by disabling
+the default `std` feature. Use this in `Cargo.toml`:
+
+```toml
+[dependencies.num-iter]
+version = "0.1.35"
+default-features = false
+```
+
+There is no functional difference with and without `std` at this time, but
+there may be in the future.
+
+Implementations for `i128` and `u128` are only available with Rust 1.26 and
+later. The build script automatically detects this, but you can make it
+mandatory by enabling the `i128` crate feature.
+
+## Releases
+
+Release notes are available in [RELEASES.md](RELEASES.md).
+
+## Compatibility
+
+The `num-iter` crate is tested for rustc 1.8 and greater.
+
+## License
+
+Licensed under either of
+
+ * [Apache License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0)
+ * [MIT license](http://opensource.org/licenses/MIT)
+
+at your option.
+
+### Contribution
+
+Unless you explicitly state otherwise, any contribution intentionally submitted
+for inclusion in the work by you, as defined in the Apache-2.0 license, shall be
+dual licensed as above, without any additional terms or conditions.
diff --git a/rust/vendor/num-iter/RELEASES.md b/rust/vendor/num-iter/RELEASES.md
new file mode 100644
index 0000000..c164e76
--- /dev/null
+++ b/rust/vendor/num-iter/RELEASES.md
@@ -0,0 +1,88 @@
+# Release 0.1.43 (2022-04-26)
+
+- [`Range`, `RangeInclusive`, and `RangeFrom` now implement `RangeBounds`][21]
+ from Rust 1.28 and later.
+
+**Contributors**: @chrismit3s, @cuviper
+
+[21]: https://github.com/rust-num/num-iter/pull/21
+
+# Release 0.1.42 (2020-10-29)
+
+- [The "i128" feature now bypasses compiler probing][20]. The build script
+ used to probe anyway and panic if requested support wasn't found, but
+ sometimes this ran into bad corner cases with `autocfg`.
+
+**Contributors**: @cuviper
+
+[20]: https://github.com/rust-num/num-iter/pull/20
+
+# Release 0.1.41 (2020-06-11)
+
+- [The new `RangeFrom` and `RangeFromStep` iterators][18] will count from a
+ given starting value, without any terminating value.
+
+**Contributors**: @cuviper, @sollyucko
+
+[18]: https://github.com/rust-num/num-iter/pull/18
+
+# Release 0.1.40 (2020-01-09)
+
+- [Updated the `autocfg` build dependency to 1.0][14].
+
+**Contributors**: @cuviper, @dingelish
+
+[14]: https://github.com/rust-num/num-iter/pull/14
+
+# Release 0.1.39 (2019-05-21)
+
+- [Fixed feature detection on `no_std` targets][11].
+
+**Contributors**: @cuviper
+
+[11]: https://github.com/rust-num/num-iter/pull/11
+
+# Release 0.1.38 (2019-05-20)
+
+- Maintenance update -- no functional changes.
+
+**Contributors**: @cuviper, @ignatenkobrain
+
+# Release 0.1.37 (2018-05-11)
+
+- [Support for 128-bit integers is now automatically detected and enabled.][5]
+ Setting the `i128` crate feature now causes the build script to panic if such
+ support is not detected.
+
+**Contributors**: @cuviper
+
+[5]: https://github.com/rust-num/num-iter/pull/5
+
+# Release 0.1.36 (2018-05-10)
+
+- [The iterators are now implemented for `i128` and `u128`][7] starting with
+ Rust 1.26, enabled by the new `i128` crate feature.
+
+**Contributors**: @cuviper
+
+[4]: https://github.com/rust-num/num-iter/pull/4
+
+# Release 0.1.35 (2018-02-06)
+
+- [num-iter now has its own source repository][num-356] at [rust-num/num-iter][home].
+- [There is now a `std` feature][2], enabled by default, along with the implication
+ that building *without* this feature makes this a `#[no_std]` crate.
+ - There is no difference in the API at this time.
+
+**Contributors**: @cuviper
+
+[home]: https://github.com/rust-num/num-iter
+[num-356]: https://github.com/rust-num/num/pull/356
+[2]: https://github.com/rust-num/num-iter/pull/2
+
+
+# Prior releases
+
+No prior release notes were kept. Thanks all the same to the many
+contributors that have made this crate what it is!
+
diff --git a/rust/vendor/num-iter/build.rs b/rust/vendor/num-iter/build.rs
new file mode 100644
index 0000000..4f39b83
--- /dev/null
+++ b/rust/vendor/num-iter/build.rs
@@ -0,0 +1,19 @@
+extern crate autocfg;
+
+use std::env;
+
+fn main() {
+ let autocfg = autocfg::new();
+
+ // If the "i128" feature is explicity requested, don't bother probing for it.
+ // It will still cause a build error if that was set improperly.
+ if env::var_os("CARGO_FEATURE_I128").is_some() || autocfg.probe_type("i128") {
+ autocfg::emit("has_i128");
+ }
+
+ // The RangeBounds trait was stabilized in 1.28, so from that version onwards we
+ // implement that trait.
+ autocfg.emit_rustc_version(1, 28);
+
+ autocfg::rerun_path("build.rs");
+}
diff --git a/rust/vendor/num-iter/src/lib.rs b/rust/vendor/num-iter/src/lib.rs
new file mode 100644
index 0000000..74700d0
--- /dev/null
+++ b/rust/vendor/num-iter/src/lib.rs
@@ -0,0 +1,734 @@
+// Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+//! External iterators for generic mathematics
+//!
+//! ## Compatibility
+//!
+//! The `num-iter` crate is tested for rustc 1.8 and greater.
+
+#![doc(html_root_url = "https://docs.rs/num-iter/0.1")]
+#![no_std]
+#[cfg(feature = "std")]
+extern crate std;
+
+extern crate num_integer as integer;
+extern crate num_traits as traits;
+
+use core::ops::{Add, Sub};
+use core::usize;
+use integer::Integer;
+use traits::{CheckedAdd, One, ToPrimitive, Zero};
+
+#[cfg(rustc_1_28)]
+use core::ops::{Bound, RangeBounds};
+
+/// An iterator over the range [start, stop)
+#[derive(Clone)]
+pub struct Range<A> {
+ state: A,
+ stop: A,
+ one: A,
+}
+
+/// Returns an iterator over the given range [start, stop) (that is, starting
+/// at start (inclusive), and ending at stop (exclusive)).
+///
+/// # Example
+///
+/// ```rust
+/// let array = [0, 1, 2, 3, 4];
+///
+/// for i in num_iter::range(0, 5) {
+/// println!("{}", i);
+/// assert_eq!(i, array[i]);
+/// }
+/// ```
+#[inline]
+pub fn range<A>(start: A, stop: A) -> Range<A>
+where
+ A: Add<A, Output = A> + PartialOrd + Clone + One,
+{
+ Range {
+ state: start,
+ stop: stop,
+ one: One::one(),
+ }
+}
+
+#[inline]
+#[cfg(has_i128)]
+fn unsigned<T: ToPrimitive>(x: &T) -> Option<u128> {
+ match x.to_u128() {
+ None => match x.to_i128() {
+ Some(i) => Some(i as u128),
+ None => None,
+ },
+ Some(u) => Some(u),
+ }
+}
+
+#[inline]
+#[cfg(not(has_i128))]
+fn unsigned<T: ToPrimitive>(x: &T) -> Option<u64> {
+ match x.to_u64() {
+ None => match x.to_i64() {
+ Some(i) => Some(i as u64),
+ None => None,
+ },
+ Some(u) => Some(u),
+ }
+}
+
+#[cfg(rustc_1_28)]
+impl<A> RangeBounds<A> for Range<A> {
+ fn start_bound(&self) -> Bound<&A> {
+ Bound::Included(&self.state)
+ }
+
+ fn end_bound(&self) -> Bound<&A> {
+ Bound::Excluded(&self.stop)
+ }
+}
+
+// FIXME: rust-lang/rust#10414: Unfortunate type bound
+impl<A> Iterator for Range<A>
+where
+ A: Add<A, Output = A> + PartialOrd + Clone + ToPrimitive,
+{
+ type Item = A;
+
+ #[inline]
+ fn next(&mut self) -> Option<A> {
+ if self.state < self.stop {
+ let result = self.state.clone();
+ self.state = self.state.clone() + self.one.clone();
+ Some(result)
+ } else {
+ None
+ }
+ }
+
+ #[inline]
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ // Check for empty ranges first.
+ if self.state >= self.stop {
+ return (0, Some(0));
+ }
+
+ // Try to cast both ends to the largest unsigned primitive.
+ // Note that negative values will wrap to a large positive.
+ if let Some(a) = unsigned(&self.state) {
+ if let Some(b) = unsigned(&self.stop) {
+ // We've lost signs, but we already know state < stop, so
+ // a `wrapping_sub` will give the correct unsigned delta.
+ return match b.wrapping_sub(a).to_usize() {
+ Some(len) => (len, Some(len)),
+ None => (usize::MAX, None),
+ };
+ }
+ }
+
+ // Standard fallback for unbounded/unrepresentable bounds
+ (0, None)
+ }
+}
+
+/// `Integer` is required to ensure the range will be the same regardless of
+/// the direction it is consumed.
+impl<A> DoubleEndedIterator for Range<A>
+where
+ A: Integer + Clone + ToPrimitive,
+{
+ #[inline]
+ fn next_back(&mut self) -> Option<A> {
+ if self.stop > self.state {
+ self.stop = self.stop.clone() - self.one.clone();
+ Some(self.stop.clone())
+ } else {
+ None
+ }
+ }
+}
+
+/// An iterator over the range [start, stop]
+#[derive(Clone)]
+pub struct RangeInclusive<A> {
+ range: Range<A>,
+ done: bool,
+}
+
+/// Return an iterator over the range [start, stop]
+#[inline]
+pub fn range_inclusive<A>(start: A, stop: A) -> RangeInclusive<A>
+where
+ A: Add<A, Output = A> + PartialOrd + Clone + One,
+{
+ RangeInclusive {
+ range: range(start, stop),
+ done: false,
+ }
+}
+
+#[cfg(rustc_1_28)]
+impl<A> RangeBounds<A> for RangeInclusive<A> {
+ fn start_bound(&self) -> Bound<&A> {
+ Bound::Included(&self.range.state)
+ }
+
+ fn end_bound(&self) -> Bound<&A> {
+ Bound::Included(&self.range.stop)
+ }
+}
+
+impl<A> Iterator for RangeInclusive<A>
+where
+ A: Add<A, Output = A> + PartialOrd + Clone + ToPrimitive,
+{
+ type Item = A;
+
+ #[inline]
+ fn next(&mut self) -> Option<A> {
+ match self.range.next() {
+ Some(x) => Some(x),
+ None => {
+ if !self.done && self.range.state == self.range.stop {
+ self.done = true;
+ Some(self.range.stop.clone())
+ } else {
+ None
+ }
+ }
+ }
+ }
+
+ #[inline]
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ let (lo, hi) = self.range.size_hint();
+ if self.done {
+ (lo, hi)
+ } else {
+ let lo = lo.saturating_add(1);
+ let hi = match hi {
+ Some(x) => x.checked_add(1),
+ None => None,
+ };
+ (lo, hi)
+ }
+ }
+}
+
+impl<A> DoubleEndedIterator for RangeInclusive<A>
+where
+ A: Sub<A, Output = A> + Integer + Clone + ToPrimitive,
+{
+ #[inline]
+ fn next_back(&mut self) -> Option<A> {
+ if self.range.stop > self.range.state {
+ let result = self.range.stop.clone();
+ self.range.stop = self.range.stop.clone() - self.range.one.clone();
+ Some(result)
+ } else if !self.done && self.range.state == self.range.stop {
+ self.done = true;
+ Some(self.range.stop.clone())
+ } else {
+ None
+ }
+ }
+}
+
+/// An iterator over the range [start, stop) by `step`. It handles overflow by stopping.
+#[derive(Clone)]
+pub struct RangeStep<A> {
+ state: A,
+ stop: A,
+ step: A,
+ rev: bool,
+}
+
+/// Return an iterator over the range [start, stop) by `step`. It handles overflow by stopping.
+#[inline]
+pub fn range_step<A>(start: A, stop: A, step: A) -> RangeStep<A>
+where
+ A: CheckedAdd + PartialOrd + Clone + Zero,
+{
+ let rev = step < Zero::zero();
+ RangeStep {
+ state: start,
+ stop: stop,
+ step: step,
+ rev: rev,
+ }
+}
+
+impl<A> Iterator for RangeStep<A>
+where
+ A: CheckedAdd + PartialOrd + Clone,
+{
+ type Item = A;
+
+ #[inline]
+ fn next(&mut self) -> Option<A> {
+ if (self.rev && self.state > self.stop) || (!self.rev && self.state < self.stop) {
+ let result = self.state.clone();
+ match self.state.checked_add(&self.step) {
+ Some(x) => self.state = x,
+ None => self.state = self.stop.clone(),
+ }
+ Some(result)
+ } else {
+ None
+ }
+ }
+}
+
+/// An iterator over the range [start, stop] by `step`. It handles overflow by stopping.
+#[derive(Clone)]
+pub struct RangeStepInclusive<A> {
+ state: A,
+ stop: A,
+ step: A,
+ rev: bool,
+ done: bool,
+}
+
+/// Return an iterator over the range [start, stop] by `step`. It handles overflow by stopping.
+#[inline]
+pub fn range_step_inclusive<A>(start: A, stop: A, step: A) -> RangeStepInclusive<A>
+where
+ A: CheckedAdd + PartialOrd + Clone + Zero,
+{
+ let rev = step < Zero::zero();
+ RangeStepInclusive {
+ state: start,
+ stop: stop,
+ step: step,
+ rev: rev,
+ done: false,
+ }
+}
+
+impl<A> Iterator for RangeStepInclusive<A>
+where
+ A: CheckedAdd + PartialOrd + Clone + PartialEq,
+{
+ type Item = A;
+
+ #[inline]
+ fn next(&mut self) -> Option<A> {
+ if !self.done
+ && ((self.rev && self.state >= self.stop) || (!self.rev && self.state <= self.stop))
+ {
+ let result = self.state.clone();
+ match self.state.checked_add(&self.step) {
+ Some(x) => self.state = x,
+ None => self.done = true,
+ }
+ Some(result)
+ } else {
+ None
+ }
+ }
+}
+
+/// An iterator over the infinite range starting at `start`
+#[derive(Clone)]
+pub struct RangeFrom<A> {
+ state: A,
+ one: A,
+}
+
+/// Return an iterator over the infinite range starting at `start` and continuing forever.
+///
+/// *Note*: Currently, the `Iterator` implementation is not checked for overflow.
+/// If you use a finite-sized integer type and the integer overflows,
+/// it might panic in debug mode or wrap around in release mode.
+/// **This behavior is not guaranteed and may change at any time.**
+#[inline]
+pub fn range_from<A>(start: A) -> RangeFrom<A>
+where
+ A: Add<A, Output = A> + Clone + One,
+{
+ RangeFrom {
+ state: start,
+ one: One::one(),
+ }
+}
+
+#[cfg(rustc_1_28)]
+impl<A> RangeBounds<A> for RangeFrom<A> {
+ fn start_bound(&self) -> Bound<&A> {
+ Bound::Included(&self.state)
+ }
+
+ fn end_bound(&self) -> Bound<&A> {
+ Bound::Unbounded
+ }
+}
+
+impl<A> Iterator for RangeFrom<A>
+where
+ A: Add<A, Output = A> + Clone,
+{
+ type Item = A;
+
+ #[inline]
+ fn next(&mut self) -> Option<A> {
+ let result = self.state.clone();
+ self.state = self.state.clone() + self.one.clone();
+ Some(result)
+ }
+
+ #[inline]
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ (usize::MAX, None)
+ }
+}
+
+/// An iterator over the infinite range starting at `start` by `step`
+#[derive(Clone)]
+pub struct RangeStepFrom<A> {
+ state: A,
+ step: A,
+}
+
+/// Return an iterator over the infinite range starting at `start` and continuing forever by `step`.
+///
+/// *Note*: Currently, the `Iterator` implementation is not checked for overflow.
+/// If you use a finite-sized integer type and the integer overflows,
+/// it might panic in debug mode or wrap around in release mode.
+/// **This behavior is not guaranteed and may change at any time.**
+#[inline]
+pub fn range_step_from<A>(start: A, step: A) -> RangeStepFrom<A>
+where
+ A: Add<A, Output = A> + Clone,
+{
+ RangeStepFrom {
+ state: start,
+ step: step,
+ }
+}
+
+impl<A> Iterator for RangeStepFrom<A>
+where
+ A: Add<A, Output = A> + Clone,
+{
+ type Item = A;
+
+ #[inline]
+ fn next(&mut self) -> Option<A> {
+ let result = self.state.clone();
+ self.state = self.state.clone() + self.step.clone();
+ Some(result)
+ }
+
+ #[inline]
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ (usize::MAX, None)
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use core::cmp::Ordering;
+ use core::iter;
+ use core::ops::{Add, Mul};
+ use core::{isize, usize};
+ use traits::{One, ToPrimitive};
+
+ #[test]
+ fn test_range() {
+ /// A mock type to check Range when ToPrimitive returns None
+ struct Foo;
+
+ impl ToPrimitive for Foo {
+ fn to_i64(&self) -> Option<i64> {
+ None
+ }
+ fn to_u64(&self) -> Option<u64> {
+ None
+ }
+ }
+
+ impl Add<Foo> for Foo {
+ type Output = Foo;
+
+ fn add(self, _: Foo) -> Foo {
+ Foo
+ }
+ }
+
+ impl PartialEq for Foo {
+ fn eq(&self, _: &Foo) -> bool {
+ true
+ }
+ }
+
+ impl PartialOrd for Foo {
+ fn partial_cmp(&self, _: &Foo) -> Option<Ordering> {
+ None
+ }
+ }
+
+ impl Clone for Foo {
+ fn clone(&self) -> Foo {
+ Foo
+ }
+ }
+
+ impl Mul<Foo> for Foo {
+ type Output = Foo;
+
+ fn mul(self, _: Foo) -> Foo {
+ Foo
+ }
+ }
+
+ impl One for Foo {
+ fn one() -> Foo {
+ Foo
+ }
+ }
+
+ assert!(super::range(0, 5).eq([0, 1, 2, 3, 4].iter().cloned()));
+ assert!(super::range(-10, -1).eq([-10, -9, -8, -7, -6, -5, -4, -3, -2].iter().cloned()));
+ assert!(super::range(0, 5).rev().eq([4, 3, 2, 1, 0].iter().cloned()));
+ assert_eq!(super::range(200, -5).count(), 0);
+ assert_eq!(super::range(200, -5).rev().count(), 0);
+ assert_eq!(super::range(200, 200).count(), 0);
+ assert_eq!(super::range(200, 200).rev().count(), 0);
+
+ assert_eq!(super::range(0, 100).size_hint(), (100, Some(100)));
+ // this test is only meaningful when sizeof usize < sizeof u64
+ assert_eq!(
+ super::range(usize::MAX - 1, usize::MAX).size_hint(),
+ (1, Some(1))
+ );
+ assert_eq!(super::range(-10, -1).size_hint(), (9, Some(9)));
+ assert_eq!(
+ super::range(isize::MIN, isize::MAX).size_hint(),
+ (usize::MAX, Some(usize::MAX))
+ );
+ }
+
+ #[test]
+ #[cfg(has_i128)]
+ fn test_range_128() {
+ use core::{i128, u128};
+
+ assert!(super::range(0i128, 5).eq([0, 1, 2, 3, 4].iter().cloned()));
+ assert!(super::range(-10i128, -1).eq([-10, -9, -8, -7, -6, -5, -4, -3, -2].iter().cloned()));
+ assert!(super::range(0u128, 5)
+ .rev()
+ .eq([4, 3, 2, 1, 0].iter().cloned()));
+
+ assert_eq!(
+ super::range(i128::MIN, i128::MIN + 1).size_hint(),
+ (1, Some(1))
+ );
+ assert_eq!(
+ super::range(i128::MAX - 1, i128::MAX).size_hint(),
+ (1, Some(1))
+ );
+ assert_eq!(
+ super::range(i128::MIN, i128::MAX).size_hint(),
+ (usize::MAX, None)
+ );
+
+ assert_eq!(
+ super::range(u128::MAX - 1, u128::MAX).size_hint(),
+ (1, Some(1))
+ );
+ assert_eq!(
+ super::range(0, usize::MAX as u128).size_hint(),
+ (usize::MAX, Some(usize::MAX))
+ );
+ assert_eq!(
+ super::range(0, usize::MAX as u128 + 1).size_hint(),
+ (usize::MAX, None)
+ );
+ assert_eq!(super::range(0, i128::MAX).size_hint(), (usize::MAX, None));
+ }
+
+ #[test]
+ fn test_range_inclusive() {
+ assert!(super::range_inclusive(0, 5).eq([0, 1, 2, 3, 4, 5].iter().cloned()));
+ assert!(super::range_inclusive(0, 5)
+ .rev()
+ .eq([5, 4, 3, 2, 1, 0].iter().cloned()));
+ assert_eq!(super::range_inclusive(200, -5).count(), 0);
+ assert_eq!(super::range_inclusive(200, -5).rev().count(), 0);
+ assert!(super::range_inclusive(200, 200).eq(iter::once(200)));
+ assert!(super::range_inclusive(200, 200).rev().eq(iter::once(200)));
+ assert_eq!(
+ super::range_inclusive(isize::MIN, isize::MAX - 1).size_hint(),
+ (usize::MAX, Some(usize::MAX))
+ );
+ assert_eq!(
+ super::range_inclusive(isize::MIN, isize::MAX).size_hint(),
+ (usize::MAX, None)
+ );
+ }
+
+ #[test]
+ #[cfg(has_i128)]
+ fn test_range_inclusive_128() {
+ use core::i128;
+
+ assert!(super::range_inclusive(0u128, 5).eq([0, 1, 2, 3, 4, 5].iter().cloned()));
+ assert!(super::range_inclusive(0u128, 5)
+ .rev()
+ .eq([5, 4, 3, 2, 1, 0].iter().cloned()));
+ assert_eq!(super::range_inclusive(200i128, -5).count(), 0);
+ assert_eq!(super::range_inclusive(200i128, -5).rev().count(), 0);
+ assert!(super::range_inclusive(200u128, 200).eq(iter::once(200)));
+ assert!(super::range_inclusive(200u128, 200)
+ .rev()
+ .eq(iter::once(200)));
+ assert_eq!(
+ super::range_inclusive(isize::MIN as i128, isize::MAX as i128 - 1).size_hint(),
+ (usize::MAX, Some(usize::MAX))
+ );
+ assert_eq!(
+ super::range_inclusive(isize::MIN as i128, isize::MAX as i128).size_hint(),
+ (usize::MAX, None)
+ );
+ assert_eq!(
+ super::range_inclusive(isize::MIN as i128, isize::MAX as i128 + 1).size_hint(),
+ (usize::MAX, None)
+ );
+ assert_eq!(
+ super::range_inclusive(i128::MIN, i128::MAX).size_hint(),
+ (usize::MAX, None)
+ );
+ }
+
+ #[test]
+ fn test_range_step() {
+ assert!(super::range_step(0, 20, 5).eq([0, 5, 10, 15].iter().cloned()));
+ assert!(super::range_step(20, 0, -5).eq([20, 15, 10, 5].iter().cloned()));
+ assert!(super::range_step(20, 0, -6).eq([20, 14, 8, 2].iter().cloned()));
+ assert!(super::range_step(200u8, 255, 50).eq([200u8, 250].iter().cloned()));
+ assert!(super::range_step(200, -5, 1).eq(iter::empty()));
+ assert!(super::range_step(200, 200, 1).eq(iter::empty()));
+ }
+
+ #[test]
+ #[cfg(has_i128)]
+ fn test_range_step_128() {
+ use core::u128::MAX as UMAX;
+
+ assert!(super::range_step(0u128, 20, 5).eq([0, 5, 10, 15].iter().cloned()));
+ assert!(super::range_step(20i128, 0, -5).eq([20, 15, 10, 5].iter().cloned()));
+ assert!(super::range_step(20i128, 0, -6).eq([20, 14, 8, 2].iter().cloned()));
+ assert!(super::range_step(UMAX - 55, UMAX, 50).eq([UMAX - 55, UMAX - 5].iter().cloned()));
+ assert!(super::range_step(200i128, -5, 1).eq(iter::empty()));
+ assert!(super::range_step(200i128, 200, 1).eq(iter::empty()));
+ }
+
+ #[test]
+ fn test_range_step_inclusive() {
+ assert!(super::range_step_inclusive(0, 20, 5).eq([0, 5, 10, 15, 20].iter().cloned()));
+ assert!(super::range_step_inclusive(20, 0, -5).eq([20, 15, 10, 5, 0].iter().cloned()));
+ assert!(super::range_step_inclusive(20, 0, -6).eq([20, 14, 8, 2].iter().cloned()));
+ assert!(super::range_step_inclusive(200u8, 255, 50).eq([200u8, 250].iter().cloned()));
+ assert!(super::range_step_inclusive(200, -5, 1).eq(iter::empty()));
+ assert!(super::range_step_inclusive(200, 200, 1).eq(iter::once(200)));
+ }
+
+ #[test]
+ #[cfg(has_i128)]
+ fn test_range_step_inclusive_128() {
+ use core::u128::MAX as UMAX;
+
+ assert!(super::range_step_inclusive(0u128, 20, 5).eq([0, 5, 10, 15, 20].iter().cloned()));
+ assert!(super::range_step_inclusive(20i128, 0, -5).eq([20, 15, 10, 5, 0].iter().cloned()));
+ assert!(super::range_step_inclusive(20i128, 0, -6).eq([20, 14, 8, 2].iter().cloned()));
+ assert!(super::range_step_inclusive(UMAX - 55, UMAX, 50)
+ .eq([UMAX - 55, UMAX - 5].iter().cloned()));
+ assert!(super::range_step_inclusive(200i128, -5, 1).eq(iter::empty()));
+ assert!(super::range_step_inclusive(200i128, 200, 1).eq(iter::once(200)));
+ }
+
+ #[test]
+ fn test_range_from() {
+ assert!(super::range_from(10u8)
+ .take(5)
+ .eq([10, 11, 12, 13, 14].iter().cloned()));
+ assert_eq!(super::range_from(10u8).size_hint(), (usize::MAX, None));
+ }
+
+ #[test]
+ fn test_range_step_from() {
+ assert!(super::range_step_from(10u8, 2u8)
+ .take(5)
+ .eq([10, 12, 14, 16, 18].iter().cloned()));
+ assert_eq!(
+ super::range_step_from(10u8, 2u8).size_hint(),
+ (usize::MAX, None)
+ );
+
+ assert!(super::range_step_from(10u8, 1u8)
+ .take(5)
+ .eq([10, 11, 12, 13, 14].iter().cloned()));
+ assert_eq!(
+ super::range_step_from(10u8, 1u8).size_hint(),
+ (usize::MAX, None)
+ );
+
+ assert!(super::range_step_from(10u8, 0u8)
+ .take(5)
+ .eq([10, 10, 10, 10, 10].iter().cloned()));
+ assert_eq!(
+ super::range_step_from(10u8, 0u8).size_hint(),
+ (usize::MAX, None)
+ );
+
+ assert!(super::range_step_from(10i8, 2i8)
+ .take(5)
+ .eq([10, 12, 14, 16, 18].iter().cloned()));
+ assert_eq!(
+ super::range_step_from(10i8, 2i8).size_hint(),
+ (usize::MAX, None)
+ );
+
+ assert!(super::range_step_from(10i8, 1i8)
+ .take(5)
+ .eq([10, 11, 12, 13, 14].iter().cloned()));
+ assert_eq!(
+ super::range_step_from(10i8, 1i8).size_hint(),
+ (usize::MAX, None)
+ );
+
+ assert!(super::range_step_from(10i8, 0i8)
+ .take(5)
+ .eq([10, 10, 10, 10, 10].iter().cloned()));
+ assert_eq!(
+ super::range_step_from(10i8, 0i8).size_hint(),
+ (usize::MAX, None)
+ );
+
+ assert!(super::range_step_from(10i8, -1i8)
+ .take(5)
+ .eq([10, 9, 8, 7, 6].iter().cloned()));
+ assert_eq!(
+ super::range_step_from(10i8, -1i8).size_hint(),
+ (usize::MAX, None)
+ );
+
+ assert!(super::range_step_from(10i8, -2i8)
+ .take(5)
+ .eq([10, 8, 6, 4, 2].iter().cloned()));
+ assert_eq!(
+ super::range_step_from(10i8, -2i8).size_hint(),
+ (usize::MAX, None)
+ );
+ }
+}
diff --git a/rust/vendor/num-rational/.cargo-checksum.json b/rust/vendor/num-rational/.cargo-checksum.json
new file mode 100644
index 0000000..f3d8f80
--- /dev/null
+++ b/rust/vendor/num-rational/.cargo-checksum.json
@@ -0,0 +1 @@
+{"files":{"Cargo.toml":"15b5b2c51cedb62324d85209347b1f104bc32fa3bb8c16a4a642f2b27e93d077","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"6485b8ed310d3f0340bf1ad1f47645069ce4069dcc6bb46c7d5c6faf41de1fdb","README.md":"44e4759870c5e7b1b68d3ded60f655cac360e7614ad09d646cc2db35c47a502f","RELEASES.md":"41de3744864404bfbb946e93e425a0615ae87b674ad93d27b191223b75909753","build.rs":"aba9dbc29eff865d95ce39cfe7cb20fde6137c7b7fae441d1b52ebb5087e402f","src/lib.rs":"3d47aadd8b47165ebee680250bfa097cced6b813e4ca06cb1a0e14d28501a531"},"package":"5c000134b5dbf44adc5cb772486d335293351644b801551abe8f75c84cfa4aef"} \ No newline at end of file
diff --git a/rust/vendor/num-rational/Cargo.toml b/rust/vendor/num-rational/Cargo.toml
new file mode 100644
index 0000000..dd94ea3
--- /dev/null
+++ b/rust/vendor/num-rational/Cargo.toml
@@ -0,0 +1,54 @@
+# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO
+#
+# When uploading crates to the registry Cargo will automatically
+# "normalize" Cargo.toml files for maximal compatibility
+# with all versions of Cargo and also rewrite `path` dependencies
+# to registry (e.g., crates.io) dependencies
+#
+# If you believe there's an error in this file please file an
+# issue against the rust-lang/cargo repository. If you're
+# editing this file be aware that the upstream Cargo.toml
+# will likely look very different (and much more reasonable)
+
+[package]
+name = "num-rational"
+version = "0.2.4"
+authors = ["The Rust Project Developers"]
+build = "build.rs"
+exclude = ["/ci/*", "/.travis.yml", "/bors.toml"]
+description = "Rational numbers implementation for Rust"
+homepage = "https://github.com/rust-num/num-rational"
+documentation = "https://docs.rs/num-rational"
+readme = "README.md"
+keywords = ["mathematics", "numerics", "fractions"]
+categories = ["algorithms", "data-structures", "science", "no-std"]
+license = "MIT/Apache-2.0"
+repository = "https://github.com/rust-num/num-rational"
+[package.metadata.docs.rs]
+features = ["std", "bigint-std", "serde"]
+[dependencies.num-bigint]
+version = "0.2.5"
+optional = true
+default-features = false
+
+[dependencies.num-integer]
+version = "0.1.42"
+default-features = false
+
+[dependencies.num-traits]
+version = "0.2.11"
+default-features = false
+
+[dependencies.serde]
+version = "1.0.0"
+optional = true
+default-features = false
+[build-dependencies.autocfg]
+version = "1"
+
+[features]
+bigint = ["num-bigint"]
+bigint-std = ["bigint", "num-bigint/std"]
+default = ["bigint-std", "std"]
+i128 = ["num-integer/i128", "num-traits/i128"]
+std = ["num-integer/std", "num-traits/std"]
diff --git a/rust/vendor/num-rational/LICENSE-APACHE b/rust/vendor/num-rational/LICENSE-APACHE
new file mode 100644
index 0000000..16fe87b
--- /dev/null
+++ b/rust/vendor/num-rational/LICENSE-APACHE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+END OF TERMS AND CONDITIONS
+
+APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+Copyright [yyyy] [name of copyright owner]
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
diff --git a/rust/vendor/num-rational/LICENSE-MIT b/rust/vendor/num-rational/LICENSE-MIT
new file mode 100644
index 0000000..39d4bdb
--- /dev/null
+++ b/rust/vendor/num-rational/LICENSE-MIT
@@ -0,0 +1,25 @@
+Copyright (c) 2014 The Rust Project Developers
+
+Permission is hereby granted, free of charge, to any
+person obtaining a copy of this software and associated
+documentation files (the "Software"), to deal in the
+Software without restriction, including without
+limitation the rights to use, copy, modify, merge,
+publish, distribute, sublicense, and/or sell copies of
+the Software, and to permit persons to whom the Software
+is furnished to do so, subject to the following
+conditions:
+
+The above copyright notice and this permission notice
+shall be included in all copies or substantial portions
+of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
+ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
+TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
+PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
+IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+DEALINGS IN THE SOFTWARE.
diff --git a/rust/vendor/num-rational/README.md b/rust/vendor/num-rational/README.md
new file mode 100644
index 0000000..476bff7
--- /dev/null
+++ b/rust/vendor/num-rational/README.md
@@ -0,0 +1,46 @@
+# num-rational
+
+[![crate](https://img.shields.io/crates/v/num-rational.svg)](https://crates.io/crates/num-rational)
+[![documentation](https://docs.rs/num-rational/badge.svg)](https://docs.rs/num-rational)
+![minimum rustc 1.15](https://img.shields.io/badge/rustc-1.15+-red.svg)
+[![Travis status](https://travis-ci.org/rust-num/num-rational.svg?branch=master)](https://travis-ci.org/rust-num/num-rational)
+
+Generic `Rational` numbers (aka fractions) for Rust.
+
+## Usage
+
+Add this to your `Cargo.toml`:
+
+```toml
+[dependencies]
+num-rational = "0.2"
+```
+
+and this to your crate root:
+
+```rust
+extern crate num_rational;
+```
+
+## Features
+
+This crate can be used without the standard library (`#![no_std]`) by disabling
+the default `std` feature. Use this in `Cargo.toml`:
+
+```toml
+[dependencies.num-rational]
+version = "0.2"
+default-features = false
+```
+
+Implementations for `i128` and `u128` are only available with Rust 1.26 and
+later. The build script automatically detects this, but you can make it
+mandatory by enabling the `i128` crate feature.
+
+## Releases
+
+Release notes are available in [RELEASES.md](RELEASES.md).
+
+## Compatibility
+
+The `num-rational` crate is tested for rustc 1.15 and greater.
diff --git a/rust/vendor/num-rational/RELEASES.md b/rust/vendor/num-rational/RELEASES.md
new file mode 100644
index 0000000..6094226
--- /dev/null
+++ b/rust/vendor/num-rational/RELEASES.md
@@ -0,0 +1,91 @@
+# Release 0.2.4 (2020-03-17)
+
+- [Fixed `CheckedDiv` when both dividend and divisor are 0][74].
+- [Fixed `CheckedDiv` with `min_value()` numerators][76].
+
+[74]: https://github.com/rust-num/num-rational/pull/74
+[76]: https://github.com/rust-num/num-rational/pull/76
+
+# Release 0.2.3 (2020-01-09)
+
+- [`Ratio` now performs earlier reductions to avoid overflow with `+-*/%` operators][42].
+- [`Ratio::{new_raw, numer, denom}` are now `const fn` for Rust 1.31 and later][48].
+- [Updated the `autocfg` build dependency to 1.0][63].
+
+**Contributors**: @cuviper, @dingelish, @jimbo1qaz, @maxbla
+
+[42]: https://github.com/rust-num/num-rational/pull/42
+[48]: https://github.com/rust-num/num-rational/pull/48
+[63]: https://github.com/rust-num/num-rational/pull/63
+
+# Release 0.2.2 (2019-06-10)
+
+- [`Ratio` now implements `Zero::set_zero` and `One::set_one`][47].
+
+**Contributors**: @cuviper, @ignatenkobrain, @vks
+
+[47]: https://github.com/rust-num/num-rational/pull/47
+
+# Release 0.2.1 (2018-06-22)
+
+- Maintenance release to fix `html_root_url`.
+
+# Release 0.2.0 (2018-06-19)
+
+### Enhancements
+
+- [`Ratio` now implements `One::is_one` and the `Inv` trait][19].
+- [`Ratio` now implements `Sum` and `Product`][25].
+- [`Ratio` now supports `i128` and `u128` components][29] with Rust 1.26+.
+- [`Ratio` now implements the `Pow` trait][21].
+
+### Breaking Changes
+
+- [`num-rational` now requires rustc 1.15 or greater][18].
+- [There is now a `std` feature][23], enabled by default, along with the
+ implication that building *without* this feature makes this a `#![no_std]`
+ crate. A few methods now require `FloatCore` instead of `Float`.
+- [The `serde` dependency has been updated to 1.0][24], and `rustc-serialize`
+ is no longer supported by `num-rational`.
+- The optional `num-bigint` dependency has been updated to 0.2, and should be
+ enabled using the `bigint-std` feature. In the future, it may be possible
+ to use the `bigint` feature with `no_std`.
+
+**Contributors**: @clarcharr, @cuviper, @Emerentius, @robomancer-or, @vks
+
+[18]: https://github.com/rust-num/num-rational/pull/18
+[19]: https://github.com/rust-num/num-rational/pull/19
+[21]: https://github.com/rust-num/num-rational/pull/21
+[23]: https://github.com/rust-num/num-rational/pull/23
+[24]: https://github.com/rust-num/num-rational/pull/24
+[25]: https://github.com/rust-num/num-rational/pull/25
+[29]: https://github.com/rust-num/num-rational/pull/29
+
+
+# Release 0.1.42 (2018-02-08)
+
+- Maintenance release to update dependencies.
+
+
+# Release 0.1.41 (2018-01-26)
+
+- [num-rational now has its own source repository][num-356] at [rust-num/num-rational][home].
+- [`Ratio` now implements `CheckedAdd`, `CheckedSub`, `CheckedMul`, and `CheckedDiv`][11].
+- [`Ratio` now implements `AddAssign`, `SubAssign`, `MulAssign`, `DivAssign`, and `RemAssign`][12]
+ with either `Ratio` or an integer on the right side. The non-assignment operators now also
+ accept integers as an operand.
+- [`Ratio` operators now make fewer `clone()` calls][14].
+
+Thanks to @c410-f3r, @cuviper, and @psimonyi for their contributions!
+
+[home]: https://github.com/rust-num/num-rational
+[num-356]: https://github.com/rust-num/num/pull/356
+[11]: https://github.com/rust-num/num-rational/pull/11
+[12]: https://github.com/rust-num/num-rational/pull/12
+[14]: https://github.com/rust-num/num-rational/pull/14
+
+
+# Prior releases
+
+No prior release notes were kept. Thanks all the same to the many
+contributors that have made this crate what it is!
diff --git a/rust/vendor/num-rational/build.rs b/rust/vendor/num-rational/build.rs
new file mode 100644
index 0000000..85e88b7
--- /dev/null
+++ b/rust/vendor/num-rational/build.rs
@@ -0,0 +1,20 @@
+extern crate autocfg;
+
+use std::env;
+
+fn main() {
+ let ac = autocfg::new();
+
+ if ac.probe_type("i128") {
+ println!("cargo:rustc-cfg=has_i128");
+ } else if env::var_os("CARGO_FEATURE_I128").is_some() {
+ panic!("i128 support was not detected!");
+ }
+
+ // autocfg doesn't have a direct way to probe for `const fn` yet.
+ if ac.probe_rustc_version(1, 31) {
+ autocfg::emit("has_const_fn");
+ }
+
+ autocfg::rerun_path("build.rs");
+}
diff --git a/rust/vendor/num-rational/src/lib.rs b/rust/vendor/num-rational/src/lib.rs
new file mode 100644
index 0000000..4f21f57
--- /dev/null
+++ b/rust/vendor/num-rational/src/lib.rs
@@ -0,0 +1,2516 @@
+// Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+//! Rational numbers
+//!
+//! ## Compatibility
+//!
+//! The `num-rational` crate is tested for rustc 1.15 and greater.
+
+#![doc(html_root_url = "https://docs.rs/num-rational/0.2")]
+#![no_std]
+
+#[cfg(feature = "bigint")]
+extern crate num_bigint as bigint;
+#[cfg(feature = "serde")]
+extern crate serde;
+
+extern crate num_integer as integer;
+extern crate num_traits as traits;
+
+#[cfg(feature = "std")]
+#[cfg_attr(test, macro_use)]
+extern crate std;
+
+use core::cmp;
+use core::fmt;
+use core::hash::{Hash, Hasher};
+use core::ops::{Add, Div, Mul, Neg, Rem, Sub};
+use core::str::FromStr;
+#[cfg(feature = "std")]
+use std::error::Error;
+
+#[cfg(feature = "bigint")]
+use bigint::{BigInt, BigUint, Sign};
+
+use integer::Integer;
+use traits::float::FloatCore;
+use traits::{
+ Bounded, CheckedAdd, CheckedDiv, CheckedMul, CheckedSub, FromPrimitive, Inv, Num, NumCast, One,
+ Pow, Signed, Zero,
+};
+
+/// Represents the ratio between two numbers.
+#[derive(Copy, Clone, Debug)]
+#[allow(missing_docs)]
+pub struct Ratio<T> {
+ /// Numerator.
+ numer: T,
+ /// Denominator.
+ denom: T,
+}
+
+/// Alias for a `Ratio` of machine-sized integers.
+pub type Rational = Ratio<isize>;
+/// Alias for a `Ratio` of 32-bit-sized integers.
+pub type Rational32 = Ratio<i32>;
+/// Alias for a `Ratio` of 64-bit-sized integers.
+pub type Rational64 = Ratio<i64>;
+
+#[cfg(feature = "bigint")]
+/// Alias for arbitrary precision rationals.
+pub type BigRational = Ratio<BigInt>;
+
+macro_rules! maybe_const {
+ ($( $(#[$attr:meta])* pub fn $name:ident $args:tt -> $ret:ty $body:block )*) => {$(
+ #[cfg(has_const_fn)]
+ $(#[$attr])* pub const fn $name $args -> $ret $body
+
+ #[cfg(not(has_const_fn))]
+ $(#[$attr])* pub fn $name $args -> $ret $body
+ )*}
+}
+
+/// These method are `const` for Rust 1.31 and later.
+impl<T> Ratio<T> {
+ maybe_const! {
+ /// Creates a `Ratio` without checking for `denom == 0` or reducing.
+ #[inline]
+ pub fn new_raw(numer: T, denom: T) -> Ratio<T> {
+ Ratio {
+ numer: numer,
+ denom: denom,
+ }
+ }
+
+ /// Gets an immutable reference to the numerator.
+ #[inline]
+ pub fn numer(&self) -> &T {
+ &self.numer
+ }
+
+ /// Gets an immutable reference to the denominator.
+ #[inline]
+ pub fn denom(&self) -> &T {
+ &self.denom
+ }
+ }
+}
+
+impl<T: Clone + Integer> Ratio<T> {
+ /// Creates a new `Ratio`. Fails if `denom` is zero.
+ #[inline]
+ pub fn new(numer: T, denom: T) -> Ratio<T> {
+ let mut ret = Ratio::new_raw(numer, denom);
+ ret.reduce();
+ ret
+ }
+
+ /// Creates a `Ratio` representing the integer `t`.
+ #[inline]
+ pub fn from_integer(t: T) -> Ratio<T> {
+ Ratio::new_raw(t, One::one())
+ }
+
+ /// Converts to an integer, rounding towards zero.
+ #[inline]
+ pub fn to_integer(&self) -> T {
+ self.trunc().numer
+ }
+
+ /// Returns true if the rational number is an integer (denominator is 1).
+ #[inline]
+ pub fn is_integer(&self) -> bool {
+ self.denom.is_one()
+ }
+
+ /// Puts self into lowest terms, with denom > 0.
+ fn reduce(&mut self) {
+ if self.denom.is_zero() {
+ panic!("denominator == 0");
+ }
+ if self.numer.is_zero() {
+ self.denom.set_one();
+ return;
+ }
+ if self.numer == self.denom {
+ self.set_one();
+ return;
+ }
+ let g: T = self.numer.gcd(&self.denom);
+
+ // FIXME(#5992): assignment operator overloads
+ // self.numer /= g;
+ // T: Clone + Integer != T: Clone + NumAssign
+ self.numer = self.numer.clone() / g.clone();
+ // FIXME(#5992): assignment operator overloads
+ // self.denom /= g;
+ // T: Clone + Integer != T: Clone + NumAssign
+ self.denom = self.denom.clone() / g;
+
+ // keep denom positive!
+ if self.denom < T::zero() {
+ self.numer = T::zero() - self.numer.clone();
+ self.denom = T::zero() - self.denom.clone();
+ }
+ }
+
+ /// Returns a reduced copy of self.
+ ///
+ /// In general, it is not necessary to use this method, as the only
+ /// method of procuring a non-reduced fraction is through `new_raw`.
+ pub fn reduced(&self) -> Ratio<T> {
+ let mut ret = self.clone();
+ ret.reduce();
+ ret
+ }
+
+ /// Returns the reciprocal.
+ ///
+ /// Fails if the `Ratio` is zero.
+ #[inline]
+ pub fn recip(&self) -> Ratio<T> {
+ match self.numer.cmp(&T::zero()) {
+ cmp::Ordering::Equal => panic!("numerator == 0"),
+ cmp::Ordering::Greater => Ratio::new_raw(self.denom.clone(), self.numer.clone()),
+ cmp::Ordering::Less => Ratio::new_raw(
+ T::zero() - self.denom.clone(),
+ T::zero() - self.numer.clone(),
+ ),
+ }
+ }
+
+ /// Rounds towards minus infinity.
+ #[inline]
+ pub fn floor(&self) -> Ratio<T> {
+ if *self < Zero::zero() {
+ let one: T = One::one();
+ Ratio::from_integer(
+ (self.numer.clone() - self.denom.clone() + one) / self.denom.clone(),
+ )
+ } else {
+ Ratio::from_integer(self.numer.clone() / self.denom.clone())
+ }
+ }
+
+ /// Rounds towards plus infinity.
+ #[inline]
+ pub fn ceil(&self) -> Ratio<T> {
+ if *self < Zero::zero() {
+ Ratio::from_integer(self.numer.clone() / self.denom.clone())
+ } else {
+ let one: T = One::one();
+ Ratio::from_integer(
+ (self.numer.clone() + self.denom.clone() - one) / self.denom.clone(),
+ )
+ }
+ }
+
+ /// Rounds to the nearest integer. Rounds half-way cases away from zero.
+ #[inline]
+ pub fn round(&self) -> Ratio<T> {
+ let zero: Ratio<T> = Zero::zero();
+ let one: T = One::one();
+ let two: T = one.clone() + one.clone();
+
+ // Find unsigned fractional part of rational number
+ let mut fractional = self.fract();
+ if fractional < zero {
+ fractional = zero - fractional
+ };
+
+ // The algorithm compares the unsigned fractional part with 1/2, that
+ // is, a/b >= 1/2, or a >= b/2. For odd denominators, we use
+ // a >= (b/2)+1. This avoids overflow issues.
+ let half_or_larger = if fractional.denom().is_even() {
+ *fractional.numer() >= fractional.denom().clone() / two.clone()
+ } else {
+ *fractional.numer() >= (fractional.denom().clone() / two.clone()) + one.clone()
+ };
+
+ if half_or_larger {
+ let one: Ratio<T> = One::one();
+ if *self >= Zero::zero() {
+ self.trunc() + one
+ } else {
+ self.trunc() - one
+ }
+ } else {
+ self.trunc()
+ }
+ }
+
+ /// Rounds towards zero.
+ #[inline]
+ pub fn trunc(&self) -> Ratio<T> {
+ Ratio::from_integer(self.numer.clone() / self.denom.clone())
+ }
+
+ /// Returns the fractional part of a number, with division rounded towards zero.
+ ///
+ /// Satisfies `self == self.trunc() + self.fract()`.
+ #[inline]
+ pub fn fract(&self) -> Ratio<T> {
+ Ratio::new_raw(self.numer.clone() % self.denom.clone(), self.denom.clone())
+ }
+}
+
+impl<T: Clone + Integer + Pow<u32, Output = T>> Ratio<T> {
+ /// Raises the `Ratio` to the power of an exponent.
+ #[inline]
+ pub fn pow(&self, expon: i32) -> Ratio<T> {
+ Pow::pow(self, expon)
+ }
+}
+
+macro_rules! pow_impl {
+ ($exp:ty) => {
+ pow_impl!($exp, $exp);
+ };
+ ($exp:ty, $unsigned:ty) => {
+ impl<T: Clone + Integer + Pow<$unsigned, Output = T>> Pow<$exp> for Ratio<T> {
+ type Output = Ratio<T>;
+ #[inline]
+ fn pow(self, expon: $exp) -> Ratio<T> {
+ match expon.cmp(&0) {
+ cmp::Ordering::Equal => One::one(),
+ cmp::Ordering::Less => {
+ let expon = expon.wrapping_abs() as $unsigned;
+ Ratio::new_raw(Pow::pow(self.denom, expon), Pow::pow(self.numer, expon))
+ }
+ cmp::Ordering::Greater => Ratio::new_raw(
+ Pow::pow(self.numer, expon as $unsigned),
+ Pow::pow(self.denom, expon as $unsigned),
+ ),
+ }
+ }
+ }
+ impl<'a, T: Clone + Integer + Pow<$unsigned, Output = T>> Pow<$exp> for &'a Ratio<T> {
+ type Output = Ratio<T>;
+ #[inline]
+ fn pow(self, expon: $exp) -> Ratio<T> {
+ Pow::pow(self.clone(), expon)
+ }
+ }
+ impl<'a, T: Clone + Integer + Pow<$unsigned, Output = T>> Pow<&'a $exp> for Ratio<T> {
+ type Output = Ratio<T>;
+ #[inline]
+ fn pow(self, expon: &'a $exp) -> Ratio<T> {
+ Pow::pow(self, *expon)
+ }
+ }
+ impl<'a, 'b, T: Clone + Integer + Pow<$unsigned, Output = T>> Pow<&'a $exp>
+ for &'b Ratio<T>
+ {
+ type Output = Ratio<T>;
+ #[inline]
+ fn pow(self, expon: &'a $exp) -> Ratio<T> {
+ Pow::pow(self.clone(), *expon)
+ }
+ }
+ };
+}
+
+// this is solely to make `pow_impl!` work
+trait WrappingAbs: Sized {
+ fn wrapping_abs(self) -> Self {
+ self
+ }
+}
+impl WrappingAbs for u8 {}
+impl WrappingAbs for u16 {}
+impl WrappingAbs for u32 {}
+impl WrappingAbs for u64 {}
+impl WrappingAbs for usize {}
+
+pow_impl!(i8, u8);
+pow_impl!(i16, u16);
+pow_impl!(i32, u32);
+pow_impl!(i64, u64);
+pow_impl!(isize, usize);
+pow_impl!(u8);
+pow_impl!(u16);
+pow_impl!(u32);
+pow_impl!(u64);
+pow_impl!(usize);
+
+// TODO: pow_impl!(BigUint) and pow_impl!(BigInt, BigUint)
+
+#[cfg(feature = "bigint")]
+impl Ratio<BigInt> {
+ /// Converts a float into a rational number.
+ pub fn from_float<T: FloatCore>(f: T) -> Option<BigRational> {
+ if !f.is_finite() {
+ return None;
+ }
+ let (mantissa, exponent, sign) = f.integer_decode();
+ let bigint_sign = if sign == 1 { Sign::Plus } else { Sign::Minus };
+ if exponent < 0 {
+ let one: BigInt = One::one();
+ let denom: BigInt = one << ((-exponent) as usize);
+ let numer: BigUint = FromPrimitive::from_u64(mantissa).unwrap();
+ Some(Ratio::new(BigInt::from_biguint(bigint_sign, numer), denom))
+ } else {
+ let mut numer: BigUint = FromPrimitive::from_u64(mantissa).unwrap();
+ numer = numer << (exponent as usize);
+ Some(Ratio::from_integer(BigInt::from_biguint(
+ bigint_sign,
+ numer,
+ )))
+ }
+ }
+}
+
+// From integer
+impl<T> From<T> for Ratio<T>
+where
+ T: Clone + Integer,
+{
+ fn from(x: T) -> Ratio<T> {
+ Ratio::from_integer(x)
+ }
+}
+
+// From pair (through the `new` constructor)
+impl<T> From<(T, T)> for Ratio<T>
+where
+ T: Clone + Integer,
+{
+ fn from(pair: (T, T)) -> Ratio<T> {
+ Ratio::new(pair.0, pair.1)
+ }
+}
+
+// Comparisons
+
+// Mathematically, comparing a/b and c/d is the same as comparing a*d and b*c, but it's very easy
+// for those multiplications to overflow fixed-size integers, so we need to take care.
+
+impl<T: Clone + Integer> Ord for Ratio<T> {
+ #[inline]
+ fn cmp(&self, other: &Self) -> cmp::Ordering {
+ // With equal denominators, the numerators can be directly compared
+ if self.denom == other.denom {
+ let ord = self.numer.cmp(&other.numer);
+ return if self.denom < T::zero() {
+ ord.reverse()
+ } else {
+ ord
+ };
+ }
+
+ // With equal numerators, the denominators can be inversely compared
+ if self.numer == other.numer {
+ if self.numer.is_zero() {
+ return cmp::Ordering::Equal;
+ }
+ let ord = self.denom.cmp(&other.denom);
+ return if self.numer < T::zero() {
+ ord
+ } else {
+ ord.reverse()
+ };
+ }
+
+ // Unfortunately, we don't have CheckedMul to try. That could sometimes avoid all the
+ // division below, or even always avoid it for BigInt and BigUint.
+ // FIXME- future breaking change to add Checked* to Integer?
+
+ // Compare as floored integers and remainders
+ let (self_int, self_rem) = self.numer.div_mod_floor(&self.denom);
+ let (other_int, other_rem) = other.numer.div_mod_floor(&other.denom);
+ match self_int.cmp(&other_int) {
+ cmp::Ordering::Greater => cmp::Ordering::Greater,
+ cmp::Ordering::Less => cmp::Ordering::Less,
+ cmp::Ordering::Equal => {
+ match (self_rem.is_zero(), other_rem.is_zero()) {
+ (true, true) => cmp::Ordering::Equal,
+ (true, false) => cmp::Ordering::Less,
+ (false, true) => cmp::Ordering::Greater,
+ (false, false) => {
+ // Compare the reciprocals of the remaining fractions in reverse
+ let self_recip = Ratio::new_raw(self.denom.clone(), self_rem);
+ let other_recip = Ratio::new_raw(other.denom.clone(), other_rem);
+ self_recip.cmp(&other_recip).reverse()
+ }
+ }
+ }
+ }
+ }
+}
+
+impl<T: Clone + Integer> PartialOrd for Ratio<T> {
+ #[inline]
+ fn partial_cmp(&self, other: &Self) -> Option<cmp::Ordering> {
+ Some(self.cmp(other))
+ }
+}
+
+impl<T: Clone + Integer> PartialEq for Ratio<T> {
+ #[inline]
+ fn eq(&self, other: &Self) -> bool {
+ self.cmp(other) == cmp::Ordering::Equal
+ }
+}
+
+impl<T: Clone + Integer> Eq for Ratio<T> {}
+
+// NB: We can't just `#[derive(Hash)]`, because it needs to agree
+// with `Eq` even for non-reduced ratios.
+impl<T: Clone + Integer + Hash> Hash for Ratio<T> {
+ fn hash<H: Hasher>(&self, state: &mut H) {
+ recurse(&self.numer, &self.denom, state);
+
+ fn recurse<T: Integer + Hash, H: Hasher>(numer: &T, denom: &T, state: &mut H) {
+ if !denom.is_zero() {
+ let (int, rem) = numer.div_mod_floor(denom);
+ int.hash(state);
+ recurse(denom, &rem, state);
+ } else {
+ denom.hash(state);
+ }
+ }
+ }
+}
+
+mod iter_sum_product {
+ use core::iter::{Product, Sum};
+ use integer::Integer;
+ use traits::{One, Zero};
+ use Ratio;
+
+ impl<T: Integer + Clone> Sum for Ratio<T> {
+ fn sum<I>(iter: I) -> Self
+ where
+ I: Iterator<Item = Ratio<T>>,
+ {
+ iter.fold(Self::zero(), |sum, num| sum + num)
+ }
+ }
+
+ impl<'a, T: Integer + Clone> Sum<&'a Ratio<T>> for Ratio<T> {
+ fn sum<I>(iter: I) -> Self
+ where
+ I: Iterator<Item = &'a Ratio<T>>,
+ {
+ iter.fold(Self::zero(), |sum, num| sum + num)
+ }
+ }
+
+ impl<T: Integer + Clone> Product for Ratio<T> {
+ fn product<I>(iter: I) -> Self
+ where
+ I: Iterator<Item = Ratio<T>>,
+ {
+ iter.fold(Self::one(), |prod, num| prod * num)
+ }
+ }
+
+ impl<'a, T: Integer + Clone> Product<&'a Ratio<T>> for Ratio<T> {
+ fn product<I>(iter: I) -> Self
+ where
+ I: Iterator<Item = &'a Ratio<T>>,
+ {
+ iter.fold(Self::one(), |prod, num| prod * num)
+ }
+ }
+}
+
+mod opassign {
+ use core::ops::{AddAssign, DivAssign, MulAssign, RemAssign, SubAssign};
+
+ use integer::Integer;
+ use traits::NumAssign;
+ use Ratio;
+
+ impl<T: Clone + Integer + NumAssign> AddAssign for Ratio<T> {
+ fn add_assign(&mut self, other: Ratio<T>) {
+ if self.denom == other.denom {
+ self.numer += other.numer
+ } else {
+ let lcm = self.denom.lcm(&other.denom);
+ let lhs_numer = self.numer.clone() * (lcm.clone() / self.denom.clone());
+ let rhs_numer = other.numer * (lcm.clone() / other.denom);
+ self.numer = lhs_numer + rhs_numer;
+ self.denom = lcm;
+ }
+ self.reduce();
+ }
+ }
+
+ // (a/b) / (c/d) = (a/gcd_ac)*(d/gcd_bd) / ((c/gcd_ac)*(b/gcd_bd))
+ impl<T: Clone + Integer + NumAssign> DivAssign for Ratio<T> {
+ fn div_assign(&mut self, other: Ratio<T>) {
+ let gcd_ac = self.numer.gcd(&other.numer);
+ let gcd_bd = self.denom.gcd(&other.denom);
+ self.numer /= gcd_ac.clone();
+ self.numer *= other.denom / gcd_bd.clone();
+ self.denom /= gcd_bd;
+ self.denom *= other.numer / gcd_ac;
+ self.reduce(); //TODO: remove this line. see #8.
+ }
+ }
+
+ // a/b * c/d = (a/gcd_ad)*(c/gcd_bc) / ((d/gcd_ad)*(b/gcd_bc))
+ impl<T: Clone + Integer + NumAssign> MulAssign for Ratio<T> {
+ fn mul_assign(&mut self, other: Ratio<T>) {
+ let gcd_ad = self.numer.gcd(&other.denom);
+ let gcd_bc = self.denom.gcd(&other.numer);
+ self.numer /= gcd_ad.clone();
+ self.numer *= other.numer / gcd_bc.clone();
+ self.denom /= gcd_bc;
+ self.denom *= other.denom / gcd_ad;
+ self.reduce(); //TODO: remove this line. see #8.
+ }
+ }
+
+ impl<T: Clone + Integer + NumAssign> RemAssign for Ratio<T> {
+ fn rem_assign(&mut self, other: Ratio<T>) {
+ if self.denom == other.denom {
+ self.numer %= other.numer
+ } else {
+ let lcm = self.denom.lcm(&other.denom);
+ let lhs_numer = self.numer.clone() * (lcm.clone() / self.denom.clone());
+ let rhs_numer = other.numer * (lcm.clone() / other.denom);
+ self.numer = lhs_numer % rhs_numer;
+ self.denom = lcm;
+ }
+ self.reduce();
+ }
+ }
+
+ impl<T: Clone + Integer + NumAssign> SubAssign for Ratio<T> {
+ fn sub_assign(&mut self, other: Ratio<T>) {
+ if self.denom == other.denom {
+ self.numer -= other.numer
+ } else {
+ let lcm = self.denom.lcm(&other.denom);
+ let lhs_numer = self.numer.clone() * (lcm.clone() / self.denom.clone());
+ let rhs_numer = other.numer * (lcm.clone() / other.denom);
+ self.numer = lhs_numer - rhs_numer;
+ self.denom = lcm;
+ }
+ self.reduce();
+ }
+ }
+
+ // a/b + c/1 = (a*1 + b*c) / (b*1) = (a + b*c) / b
+ impl<T: Clone + Integer + NumAssign> AddAssign<T> for Ratio<T> {
+ fn add_assign(&mut self, other: T) {
+ self.numer += self.denom.clone() * other;
+ self.reduce();
+ }
+ }
+
+ impl<T: Clone + Integer + NumAssign> DivAssign<T> for Ratio<T> {
+ fn div_assign(&mut self, other: T) {
+ let gcd = self.numer.gcd(&other);
+ self.numer /= gcd.clone();
+ self.denom *= other / gcd;
+ self.reduce(); //TODO: remove this line. see #8.
+ }
+ }
+
+ impl<T: Clone + Integer + NumAssign> MulAssign<T> for Ratio<T> {
+ fn mul_assign(&mut self, other: T) {
+ let gcd = self.denom.gcd(&other);
+ self.denom /= gcd.clone();
+ self.numer *= other / gcd;
+ self.reduce(); //TODO: remove this line. see #8.
+ }
+ }
+
+ // a/b % c/1 = (a*1 % b*c) / (b*1) = (a % b*c) / b
+ impl<T: Clone + Integer + NumAssign> RemAssign<T> for Ratio<T> {
+ fn rem_assign(&mut self, other: T) {
+ self.numer %= self.denom.clone() * other;
+ self.reduce();
+ }
+ }
+
+ // a/b - c/1 = (a*1 - b*c) / (b*1) = (a - b*c) / b
+ impl<T: Clone + Integer + NumAssign> SubAssign<T> for Ratio<T> {
+ fn sub_assign(&mut self, other: T) {
+ self.numer -= self.denom.clone() * other;
+ self.reduce();
+ }
+ }
+
+ macro_rules! forward_op_assign {
+ (impl $imp:ident, $method:ident) => {
+ impl<'a, T: Clone + Integer + NumAssign> $imp<&'a Ratio<T>> for Ratio<T> {
+ #[inline]
+ fn $method(&mut self, other: &Ratio<T>) {
+ self.$method(other.clone())
+ }
+ }
+ impl<'a, T: Clone + Integer + NumAssign> $imp<&'a T> for Ratio<T> {
+ #[inline]
+ fn $method(&mut self, other: &T) {
+ self.$method(other.clone())
+ }
+ }
+ };
+ }
+
+ forward_op_assign!(impl AddAssign, add_assign);
+ forward_op_assign!(impl DivAssign, div_assign);
+ forward_op_assign!(impl MulAssign, mul_assign);
+ forward_op_assign!(impl RemAssign, rem_assign);
+ forward_op_assign!(impl SubAssign, sub_assign);
+}
+
+macro_rules! forward_ref_ref_binop {
+ (impl $imp:ident, $method:ident) => {
+ impl<'a, 'b, T: Clone + Integer> $imp<&'b Ratio<T>> for &'a Ratio<T> {
+ type Output = Ratio<T>;
+
+ #[inline]
+ fn $method(self, other: &'b Ratio<T>) -> Ratio<T> {
+ self.clone().$method(other.clone())
+ }
+ }
+ impl<'a, 'b, T: Clone + Integer> $imp<&'b T> for &'a Ratio<T> {
+ type Output = Ratio<T>;
+
+ #[inline]
+ fn $method(self, other: &'b T) -> Ratio<T> {
+ self.clone().$method(other.clone())
+ }
+ }
+ };
+}
+
+macro_rules! forward_ref_val_binop {
+ (impl $imp:ident, $method:ident) => {
+ impl<'a, T> $imp<Ratio<T>> for &'a Ratio<T>
+ where
+ T: Clone + Integer,
+ {
+ type Output = Ratio<T>;
+
+ #[inline]
+ fn $method(self, other: Ratio<T>) -> Ratio<T> {
+ self.clone().$method(other)
+ }
+ }
+ impl<'a, T> $imp<T> for &'a Ratio<T>
+ where
+ T: Clone + Integer,
+ {
+ type Output = Ratio<T>;
+
+ #[inline]
+ fn $method(self, other: T) -> Ratio<T> {
+ self.clone().$method(other)
+ }
+ }
+ };
+}
+
+macro_rules! forward_val_ref_binop {
+ (impl $imp:ident, $method:ident) => {
+ impl<'a, T> $imp<&'a Ratio<T>> for Ratio<T>
+ where
+ T: Clone + Integer,
+ {
+ type Output = Ratio<T>;
+
+ #[inline]
+ fn $method(self, other: &Ratio<T>) -> Ratio<T> {
+ self.$method(other.clone())
+ }
+ }
+ impl<'a, T> $imp<&'a T> for Ratio<T>
+ where
+ T: Clone + Integer,
+ {
+ type Output = Ratio<T>;
+
+ #[inline]
+ fn $method(self, other: &T) -> Ratio<T> {
+ self.$method(other.clone())
+ }
+ }
+ };
+}
+
+macro_rules! forward_all_binop {
+ (impl $imp:ident, $method:ident) => {
+ forward_ref_ref_binop!(impl $imp, $method);
+ forward_ref_val_binop!(impl $imp, $method);
+ forward_val_ref_binop!(impl $imp, $method);
+ };
+}
+
+// Arithmetic
+forward_all_binop!(impl Mul, mul);
+// a/b * c/d = (a/gcd_ad)*(c/gcd_bc) / ((d/gcd_ad)*(b/gcd_bc))
+impl<T> Mul<Ratio<T>> for Ratio<T>
+where
+ T: Clone + Integer,
+{
+ type Output = Ratio<T>;
+ #[inline]
+ fn mul(self, rhs: Ratio<T>) -> Ratio<T> {
+ let gcd_ad = self.numer.gcd(&rhs.denom);
+ let gcd_bc = self.denom.gcd(&rhs.numer);
+ Ratio::new(
+ self.numer / gcd_ad.clone() * (rhs.numer / gcd_bc.clone()),
+ self.denom / gcd_bc * (rhs.denom / gcd_ad),
+ )
+ }
+}
+// a/b * c/1 = (a*c) / (b*1) = (a*c) / b
+impl<T> Mul<T> for Ratio<T>
+where
+ T: Clone + Integer,
+{
+ type Output = Ratio<T>;
+ #[inline]
+ fn mul(self, rhs: T) -> Ratio<T> {
+ let gcd = self.denom.gcd(&rhs);
+ Ratio::new(self.numer * (rhs / gcd.clone()), self.denom / gcd)
+ }
+}
+
+forward_all_binop!(impl Div, div);
+// (a/b) / (c/d) = (a/gcd_ac)*(d/gcd_bd) / ((c/gcd_ac)*(b/gcd_bd))
+impl<T> Div<Ratio<T>> for Ratio<T>
+where
+ T: Clone + Integer,
+{
+ type Output = Ratio<T>;
+
+ #[inline]
+ fn div(self, rhs: Ratio<T>) -> Ratio<T> {
+ let gcd_ac = self.numer.gcd(&rhs.numer);
+ let gcd_bd = self.denom.gcd(&rhs.denom);
+ Ratio::new(
+ self.numer / gcd_ac.clone() * (rhs.denom / gcd_bd.clone()),
+ self.denom / gcd_bd * (rhs.numer / gcd_ac),
+ )
+ }
+}
+// (a/b) / (c/1) = (a*1) / (b*c) = a / (b*c)
+impl<T> Div<T> for Ratio<T>
+where
+ T: Clone + Integer,
+{
+ type Output = Ratio<T>;
+
+ #[inline]
+ fn div(self, rhs: T) -> Ratio<T> {
+ let gcd = self.numer.gcd(&rhs);
+ Ratio::new(self.numer / gcd.clone(), self.denom * (rhs / gcd))
+ }
+}
+
+macro_rules! arith_impl {
+ (impl $imp:ident, $method:ident) => {
+ forward_all_binop!(impl $imp, $method);
+ // Abstracts a/b `op` c/d = (a*lcm/b `op` c*lcm/d)/lcm where lcm = lcm(b,d)
+ impl<T: Clone + Integer> $imp<Ratio<T>> for Ratio<T> {
+ type Output = Ratio<T>;
+ #[inline]
+ fn $method(self, rhs: Ratio<T>) -> Ratio<T> {
+ if self.denom == rhs.denom {
+ return Ratio::new(self.numer.$method(rhs.numer), rhs.denom);
+ }
+ let lcm = self.denom.lcm(&rhs.denom);
+ let lhs_numer = self.numer * (lcm.clone() / self.denom);
+ let rhs_numer = rhs.numer * (lcm.clone() / rhs.denom);
+ Ratio::new(lhs_numer.$method(rhs_numer), lcm)
+ }
+ }
+ // Abstracts the a/b `op` c/1 = (a*1 `op` b*c) / (b*1) = (a `op` b*c) / b pattern
+ impl<T: Clone + Integer> $imp<T> for Ratio<T> {
+ type Output = Ratio<T>;
+ #[inline]
+ fn $method(self, rhs: T) -> Ratio<T> {
+ Ratio::new(self.numer.$method(self.denom.clone() * rhs), self.denom)
+ }
+ }
+ };
+}
+
+arith_impl!(impl Add, add);
+arith_impl!(impl Sub, sub);
+arith_impl!(impl Rem, rem);
+
+// Like `std::try!` for Option<T>, unwrap the value or early-return None.
+// Since Rust 1.22 this can be replaced by the `?` operator.
+macro_rules! otry {
+ ($expr:expr) => {
+ match $expr {
+ Some(val) => val,
+ None => return None,
+ }
+ };
+}
+
+// a/b * c/d = (a*c)/(b*d)
+impl<T> CheckedMul for Ratio<T>
+where
+ T: Clone + Integer + CheckedMul,
+{
+ #[inline]
+ fn checked_mul(&self, rhs: &Ratio<T>) -> Option<Ratio<T>> {
+ let gcd_ad = self.numer.gcd(&rhs.denom);
+ let gcd_bc = self.denom.gcd(&rhs.numer);
+ Some(Ratio::new(
+ otry!((self.numer.clone() / gcd_ad.clone())
+ .checked_mul(&(rhs.numer.clone() / gcd_bc.clone()))),
+ otry!((self.denom.clone() / gcd_bc).checked_mul(&(rhs.denom.clone() / gcd_ad))),
+ ))
+ }
+}
+
+// (a/b) / (c/d) = (a*d)/(b*c)
+impl<T> CheckedDiv for Ratio<T>
+where
+ T: Clone + Integer + CheckedMul,
+{
+ #[inline]
+ fn checked_div(&self, rhs: &Ratio<T>) -> Option<Ratio<T>> {
+ if rhs.is_zero() {
+ return None;
+ }
+ let (numer, denom) = if self.denom == rhs.denom {
+ (self.numer.clone(), rhs.numer.clone())
+ } else if self.numer == rhs.numer {
+ (rhs.denom.clone(), self.denom.clone())
+ } else {
+ let gcd_ac = self.numer.gcd(&rhs.numer);
+ let gcd_bd = self.denom.gcd(&rhs.denom);
+ let denom = otry!((self.denom.clone() / gcd_bd.clone())
+ .checked_mul(&(rhs.numer.clone() / gcd_ac.clone())));
+ (
+ otry!((self.numer.clone() / gcd_ac).checked_mul(&(rhs.denom.clone() / gcd_bd))),
+ denom,
+ )
+ };
+ // Manual `reduce()`, avoiding sharp edges
+ if denom.is_zero() {
+ None
+ } else if numer.is_zero() {
+ Some(Self::zero())
+ } else if numer == denom {
+ Some(Self::one())
+ } else {
+ let g = numer.gcd(&denom);
+ let numer = numer / g.clone();
+ let denom = denom / g;
+ let raw = if denom < T::zero() {
+ // We need to keep denom positive, but 2's-complement MIN may
+ // overflow negation -- instead we can check multiplying -1.
+ let n1 = T::zero() - T::one();
+ Ratio::new_raw(otry!(numer.checked_mul(&n1)), otry!(denom.checked_mul(&n1)))
+ } else {
+ Ratio::new_raw(numer, denom)
+ };
+ Some(raw)
+ }
+ }
+}
+
+// As arith_impl! but for Checked{Add,Sub} traits
+macro_rules! checked_arith_impl {
+ (impl $imp:ident, $method:ident) => {
+ impl<T: Clone + Integer + CheckedMul + $imp> $imp for Ratio<T> {
+ #[inline]
+ fn $method(&self, rhs: &Ratio<T>) -> Option<Ratio<T>> {
+ let gcd = self.denom.clone().gcd(&rhs.denom);
+ let lcm = otry!((self.denom.clone() / gcd.clone()).checked_mul(&rhs.denom));
+ let lhs_numer = otry!((lcm.clone() / self.denom.clone()).checked_mul(&self.numer));
+ let rhs_numer = otry!((lcm.clone() / rhs.denom.clone()).checked_mul(&rhs.numer));
+ Some(Ratio::new(otry!(lhs_numer.$method(&rhs_numer)), lcm))
+ }
+ }
+ };
+}
+
+// a/b + c/d = (lcm/b*a + lcm/d*c)/lcm, where lcm = lcm(b,d)
+checked_arith_impl!(impl CheckedAdd, checked_add);
+
+// a/b - c/d = (lcm/b*a - lcm/d*c)/lcm, where lcm = lcm(b,d)
+checked_arith_impl!(impl CheckedSub, checked_sub);
+
+impl<T> Neg for Ratio<T>
+where
+ T: Clone + Integer + Neg<Output = T>,
+{
+ type Output = Ratio<T>;
+
+ #[inline]
+ fn neg(self) -> Ratio<T> {
+ Ratio::new_raw(-self.numer, self.denom)
+ }
+}
+
+impl<'a, T> Neg for &'a Ratio<T>
+where
+ T: Clone + Integer + Neg<Output = T>,
+{
+ type Output = Ratio<T>;
+
+ #[inline]
+ fn neg(self) -> Ratio<T> {
+ -self.clone()
+ }
+}
+
+impl<T> Inv for Ratio<T>
+where
+ T: Clone + Integer,
+{
+ type Output = Ratio<T>;
+
+ #[inline]
+ fn inv(self) -> Ratio<T> {
+ self.recip()
+ }
+}
+
+impl<'a, T> Inv for &'a Ratio<T>
+where
+ T: Clone + Integer,
+{
+ type Output = Ratio<T>;
+
+ #[inline]
+ fn inv(self) -> Ratio<T> {
+ self.recip()
+ }
+}
+
+// Constants
+impl<T: Clone + Integer> Zero for Ratio<T> {
+ #[inline]
+ fn zero() -> Ratio<T> {
+ Ratio::new_raw(Zero::zero(), One::one())
+ }
+
+ #[inline]
+ fn is_zero(&self) -> bool {
+ self.numer.is_zero()
+ }
+
+ #[inline]
+ fn set_zero(&mut self) {
+ self.numer.set_zero();
+ self.denom.set_one();
+ }
+}
+
+impl<T: Clone + Integer> One for Ratio<T> {
+ #[inline]
+ fn one() -> Ratio<T> {
+ Ratio::new_raw(One::one(), One::one())
+ }
+
+ #[inline]
+ fn is_one(&self) -> bool {
+ self.numer == self.denom
+ }
+
+ #[inline]
+ fn set_one(&mut self) {
+ self.numer.set_one();
+ self.denom.set_one();
+ }
+}
+
+impl<T: Clone + Integer> Num for Ratio<T> {
+ type FromStrRadixErr = ParseRatioError;
+
+ /// Parses `numer/denom` where the numbers are in base `radix`.
+ fn from_str_radix(s: &str, radix: u32) -> Result<Ratio<T>, ParseRatioError> {
+ if s.splitn(2, '/').count() == 2 {
+ let mut parts = s.splitn(2, '/').map(|ss| {
+ T::from_str_radix(ss, radix).map_err(|_| ParseRatioError {
+ kind: RatioErrorKind::ParseError,
+ })
+ });
+ let numer: T = parts.next().unwrap()?;
+ let denom: T = parts.next().unwrap()?;
+ if denom.is_zero() {
+ Err(ParseRatioError {
+ kind: RatioErrorKind::ZeroDenominator,
+ })
+ } else {
+ Ok(Ratio::new(numer, denom))
+ }
+ } else {
+ Err(ParseRatioError {
+ kind: RatioErrorKind::ParseError,
+ })
+ }
+ }
+}
+
+impl<T: Clone + Integer + Signed> Signed for Ratio<T> {
+ #[inline]
+ fn abs(&self) -> Ratio<T> {
+ if self.is_negative() {
+ -self.clone()
+ } else {
+ self.clone()
+ }
+ }
+
+ #[inline]
+ fn abs_sub(&self, other: &Ratio<T>) -> Ratio<T> {
+ if *self <= *other {
+ Zero::zero()
+ } else {
+ self - other
+ }
+ }
+
+ #[inline]
+ fn signum(&self) -> Ratio<T> {
+ if self.is_positive() {
+ Self::one()
+ } else if self.is_zero() {
+ Self::zero()
+ } else {
+ -Self::one()
+ }
+ }
+
+ #[inline]
+ fn is_positive(&self) -> bool {
+ (self.numer.is_positive() && self.denom.is_positive())
+ || (self.numer.is_negative() && self.denom.is_negative())
+ }
+
+ #[inline]
+ fn is_negative(&self) -> bool {
+ (self.numer.is_negative() && self.denom.is_positive())
+ || (self.numer.is_positive() && self.denom.is_negative())
+ }
+}
+
+// String conversions
+impl<T> fmt::Display for Ratio<T>
+where
+ T: fmt::Display + Eq + One,
+{
+ /// Renders as `numer/denom`. If denom=1, renders as numer.
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ if self.denom.is_one() {
+ write!(f, "{}", self.numer)
+ } else {
+ write!(f, "{}/{}", self.numer, self.denom)
+ }
+ }
+}
+
+impl<T: FromStr + Clone + Integer> FromStr for Ratio<T> {
+ type Err = ParseRatioError;
+
+ /// Parses `numer/denom` or just `numer`.
+ fn from_str(s: &str) -> Result<Ratio<T>, ParseRatioError> {
+ let mut split = s.splitn(2, '/');
+
+ let n = split.next().ok_or(ParseRatioError {
+ kind: RatioErrorKind::ParseError,
+ })?;
+ let num = FromStr::from_str(n).map_err(|_| ParseRatioError {
+ kind: RatioErrorKind::ParseError,
+ })?;
+
+ let d = split.next().unwrap_or("1");
+ let den = FromStr::from_str(d).map_err(|_| ParseRatioError {
+ kind: RatioErrorKind::ParseError,
+ })?;
+
+ if Zero::is_zero(&den) {
+ Err(ParseRatioError {
+ kind: RatioErrorKind::ZeroDenominator,
+ })
+ } else {
+ Ok(Ratio::new(num, den))
+ }
+ }
+}
+
+impl<T> Into<(T, T)> for Ratio<T> {
+ fn into(self) -> (T, T) {
+ (self.numer, self.denom)
+ }
+}
+
+#[cfg(feature = "serde")]
+impl<T> serde::Serialize for Ratio<T>
+where
+ T: serde::Serialize + Clone + Integer + PartialOrd,
+{
+ fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
+ where
+ S: serde::Serializer,
+ {
+ (self.numer(), self.denom()).serialize(serializer)
+ }
+}
+
+#[cfg(feature = "serde")]
+impl<'de, T> serde::Deserialize<'de> for Ratio<T>
+where
+ T: serde::Deserialize<'de> + Clone + Integer + PartialOrd,
+{
+ fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
+ where
+ D: serde::Deserializer<'de>,
+ {
+ use serde::de::Error;
+ use serde::de::Unexpected;
+ let (numer, denom): (T, T) = try!(serde::Deserialize::deserialize(deserializer));
+ if denom.is_zero() {
+ Err(Error::invalid_value(
+ Unexpected::Signed(0),
+ &"a ratio with non-zero denominator",
+ ))
+ } else {
+ Ok(Ratio::new_raw(numer, denom))
+ }
+ }
+}
+
+// FIXME: Bubble up specific errors
+#[derive(Copy, Clone, Debug, PartialEq)]
+pub struct ParseRatioError {
+ kind: RatioErrorKind,
+}
+
+#[derive(Copy, Clone, Debug, PartialEq)]
+enum RatioErrorKind {
+ ParseError,
+ ZeroDenominator,
+}
+
+impl fmt::Display for ParseRatioError {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ self.kind.description().fmt(f)
+ }
+}
+
+#[cfg(feature = "std")]
+impl Error for ParseRatioError {
+ fn description(&self) -> &str {
+ self.kind.description()
+ }
+}
+
+impl RatioErrorKind {
+ fn description(&self) -> &'static str {
+ match *self {
+ RatioErrorKind::ParseError => "failed to parse integer",
+ RatioErrorKind::ZeroDenominator => "zero value denominator",
+ }
+ }
+}
+
+#[cfg(feature = "bigint")]
+impl FromPrimitive for Ratio<BigInt> {
+ fn from_i64(n: i64) -> Option<Self> {
+ Some(Ratio::from_integer(n.into()))
+ }
+
+ #[cfg(has_i128)]
+ fn from_i128(n: i128) -> Option<Self> {
+ Some(Ratio::from_integer(n.into()))
+ }
+
+ fn from_u64(n: u64) -> Option<Self> {
+ Some(Ratio::from_integer(n.into()))
+ }
+
+ #[cfg(has_i128)]
+ fn from_u128(n: u128) -> Option<Self> {
+ Some(Ratio::from_integer(n.into()))
+ }
+
+ fn from_f32(n: f32) -> Option<Self> {
+ Ratio::from_float(n)
+ }
+
+ fn from_f64(n: f64) -> Option<Self> {
+ Ratio::from_float(n)
+ }
+}
+
+macro_rules! from_primitive_integer {
+ ($typ:ty, $approx:ident) => {
+ impl FromPrimitive for Ratio<$typ> {
+ fn from_i64(n: i64) -> Option<Self> {
+ <$typ as FromPrimitive>::from_i64(n).map(Ratio::from_integer)
+ }
+
+ #[cfg(has_i128)]
+ fn from_i128(n: i128) -> Option<Self> {
+ <$typ as FromPrimitive>::from_i128(n).map(Ratio::from_integer)
+ }
+
+ fn from_u64(n: u64) -> Option<Self> {
+ <$typ as FromPrimitive>::from_u64(n).map(Ratio::from_integer)
+ }
+
+ #[cfg(has_i128)]
+ fn from_u128(n: u128) -> Option<Self> {
+ <$typ as FromPrimitive>::from_u128(n).map(Ratio::from_integer)
+ }
+
+ fn from_f32(n: f32) -> Option<Self> {
+ $approx(n, 10e-20, 30)
+ }
+
+ fn from_f64(n: f64) -> Option<Self> {
+ $approx(n, 10e-20, 30)
+ }
+ }
+ };
+}
+
+from_primitive_integer!(i8, approximate_float);
+from_primitive_integer!(i16, approximate_float);
+from_primitive_integer!(i32, approximate_float);
+from_primitive_integer!(i64, approximate_float);
+#[cfg(has_i128)]
+from_primitive_integer!(i128, approximate_float);
+from_primitive_integer!(isize, approximate_float);
+
+from_primitive_integer!(u8, approximate_float_unsigned);
+from_primitive_integer!(u16, approximate_float_unsigned);
+from_primitive_integer!(u32, approximate_float_unsigned);
+from_primitive_integer!(u64, approximate_float_unsigned);
+#[cfg(has_i128)]
+from_primitive_integer!(u128, approximate_float_unsigned);
+from_primitive_integer!(usize, approximate_float_unsigned);
+
+impl<T: Integer + Signed + Bounded + NumCast + Clone> Ratio<T> {
+ pub fn approximate_float<F: FloatCore + NumCast>(f: F) -> Option<Ratio<T>> {
+ // 1/10e-20 < 1/2**32 which seems like a good default, and 30 seems
+ // to work well. Might want to choose something based on the types in the future, e.g.
+ // T::max().recip() and T::bits() or something similar.
+ let epsilon = <F as NumCast>::from(10e-20).expect("Can't convert 10e-20");
+ approximate_float(f, epsilon, 30)
+ }
+}
+
+fn approximate_float<T, F>(val: F, max_error: F, max_iterations: usize) -> Option<Ratio<T>>
+where
+ T: Integer + Signed + Bounded + NumCast + Clone,
+ F: FloatCore + NumCast,
+{
+ let negative = val.is_sign_negative();
+ let abs_val = val.abs();
+
+ let r = approximate_float_unsigned(abs_val, max_error, max_iterations);
+
+ // Make negative again if needed
+ if negative {
+ r.map(|r| r.neg())
+ } else {
+ r
+ }
+}
+
+// No Unsigned constraint because this also works on positive integers and is called
+// like that, see above
+fn approximate_float_unsigned<T, F>(val: F, max_error: F, max_iterations: usize) -> Option<Ratio<T>>
+where
+ T: Integer + Bounded + NumCast + Clone,
+ F: FloatCore + NumCast,
+{
+ // Continued fractions algorithm
+ // http://mathforum.org/dr.math/faq/faq.fractions.html#decfrac
+
+ if val < F::zero() || val.is_nan() {
+ return None;
+ }
+
+ let mut q = val;
+ let mut n0 = T::zero();
+ let mut d0 = T::one();
+ let mut n1 = T::one();
+ let mut d1 = T::zero();
+
+ let t_max = T::max_value();
+ let t_max_f = match <F as NumCast>::from(t_max.clone()) {
+ None => return None,
+ Some(t_max_f) => t_max_f,
+ };
+
+ // 1/epsilon > T::MAX
+ let epsilon = t_max_f.recip();
+
+ // Overflow
+ if q > t_max_f {
+ return None;
+ }
+
+ for _ in 0..max_iterations {
+ let a = match <T as NumCast>::from(q) {
+ None => break,
+ Some(a) => a,
+ };
+
+ let a_f = match <F as NumCast>::from(a.clone()) {
+ None => break,
+ Some(a_f) => a_f,
+ };
+ let f = q - a_f;
+
+ // Prevent overflow
+ if !a.is_zero()
+ && (n1 > t_max.clone() / a.clone()
+ || d1 > t_max.clone() / a.clone()
+ || a.clone() * n1.clone() > t_max.clone() - n0.clone()
+ || a.clone() * d1.clone() > t_max.clone() - d0.clone())
+ {
+ break;
+ }
+
+ let n = a.clone() * n1.clone() + n0.clone();
+ let d = a.clone() * d1.clone() + d0.clone();
+
+ n0 = n1;
+ d0 = d1;
+ n1 = n.clone();
+ d1 = d.clone();
+
+ // Simplify fraction. Doing so here instead of at the end
+ // allows us to get closer to the target value without overflows
+ let g = Integer::gcd(&n1, &d1);
+ if !g.is_zero() {
+ n1 = n1 / g.clone();
+ d1 = d1 / g.clone();
+ }
+
+ // Close enough?
+ let (n_f, d_f) = match (<F as NumCast>::from(n), <F as NumCast>::from(d)) {
+ (Some(n_f), Some(d_f)) => (n_f, d_f),
+ _ => break,
+ };
+ if (n_f / d_f - val).abs() < max_error {
+ break;
+ }
+
+ // Prevent division by ~0
+ if f < epsilon {
+ break;
+ }
+ q = f.recip();
+ }
+
+ // Overflow
+ if d1.is_zero() {
+ return None;
+ }
+
+ Some(Ratio::new(n1, d1))
+}
+
+#[cfg(test)]
+#[cfg(feature = "std")]
+fn hash<T: Hash>(x: &T) -> u64 {
+ use std::collections::hash_map::RandomState;
+ use std::hash::BuildHasher;
+ let mut hasher = <RandomState as BuildHasher>::Hasher::new();
+ x.hash(&mut hasher);
+ hasher.finish()
+}
+
+#[cfg(test)]
+mod test {
+ #[cfg(feature = "bigint")]
+ use super::BigRational;
+ use super::{Ratio, Rational, Rational64};
+
+ use core::f64;
+ use core::i32;
+ use core::isize;
+ use core::str::FromStr;
+ use integer::Integer;
+ use traits::{FromPrimitive, One, Pow, Signed, Zero};
+
+ pub const _0: Rational = Ratio { numer: 0, denom: 1 };
+ pub const _1: Rational = Ratio { numer: 1, denom: 1 };
+ pub const _2: Rational = Ratio { numer: 2, denom: 1 };
+ pub const _NEG2: Rational = Ratio {
+ numer: -2,
+ denom: 1,
+ };
+ pub const _1_2: Rational = Ratio { numer: 1, denom: 2 };
+ pub const _3_2: Rational = Ratio { numer: 3, denom: 2 };
+ pub const _5_2: Rational = Ratio { numer: 5, denom: 2 };
+ pub const _NEG1_2: Rational = Ratio {
+ numer: -1,
+ denom: 2,
+ };
+ pub const _1_NEG2: Rational = Ratio {
+ numer: 1,
+ denom: -2,
+ };
+ pub const _NEG1_NEG2: Rational = Ratio {
+ numer: -1,
+ denom: -2,
+ };
+ pub const _1_3: Rational = Ratio { numer: 1, denom: 3 };
+ pub const _NEG1_3: Rational = Ratio {
+ numer: -1,
+ denom: 3,
+ };
+ pub const _2_3: Rational = Ratio { numer: 2, denom: 3 };
+ pub const _NEG2_3: Rational = Ratio {
+ numer: -2,
+ denom: 3,
+ };
+ pub const _MIN: Rational = Ratio {
+ numer: isize::MIN,
+ denom: 1,
+ };
+ pub const _MIN_P1: Rational = Ratio {
+ numer: isize::MIN + 1,
+ denom: 1,
+ };
+ pub const _MAX: Rational = Ratio {
+ numer: isize::MAX,
+ denom: 1,
+ };
+ pub const _MAX_M1: Rational = Ratio {
+ numer: isize::MAX - 1,
+ denom: 1,
+ };
+
+ #[cfg(feature = "bigint")]
+ pub fn to_big(n: Rational) -> BigRational {
+ Ratio::new(
+ FromPrimitive::from_isize(n.numer).unwrap(),
+ FromPrimitive::from_isize(n.denom).unwrap(),
+ )
+ }
+ #[cfg(not(feature = "bigint"))]
+ pub fn to_big(n: Rational) -> Rational {
+ Ratio::new(
+ FromPrimitive::from_isize(n.numer).unwrap(),
+ FromPrimitive::from_isize(n.denom).unwrap(),
+ )
+ }
+
+ #[test]
+ fn test_test_constants() {
+ // check our constants are what Ratio::new etc. would make.
+ assert_eq!(_0, Zero::zero());
+ assert_eq!(_1, One::one());
+ assert_eq!(_2, Ratio::from_integer(2));
+ assert_eq!(_1_2, Ratio::new(1, 2));
+ assert_eq!(_3_2, Ratio::new(3, 2));
+ assert_eq!(_NEG1_2, Ratio::new(-1, 2));
+ assert_eq!(_2, From::from(2));
+ }
+
+ #[test]
+ fn test_new_reduce() {
+ assert_eq!(Ratio::new(2, 2), One::one());
+ assert_eq!(Ratio::new(0, i32::MIN), Zero::zero());
+ assert_eq!(Ratio::new(i32::MIN, i32::MIN), One::one());
+ }
+ #[test]
+ #[should_panic]
+ fn test_new_zero() {
+ let _a = Ratio::new(1, 0);
+ }
+
+ #[test]
+ fn test_approximate_float() {
+ assert_eq!(Ratio::from_f32(0.5f32), Some(Ratio::new(1i64, 2)));
+ assert_eq!(Ratio::from_f64(0.5f64), Some(Ratio::new(1i32, 2)));
+ assert_eq!(Ratio::from_f32(5f32), Some(Ratio::new(5i64, 1)));
+ assert_eq!(Ratio::from_f64(5f64), Some(Ratio::new(5i32, 1)));
+ assert_eq!(Ratio::from_f32(29.97f32), Some(Ratio::new(2997i64, 100)));
+ assert_eq!(Ratio::from_f32(-29.97f32), Some(Ratio::new(-2997i64, 100)));
+
+ assert_eq!(Ratio::<i8>::from_f32(63.5f32), Some(Ratio::new(127i8, 2)));
+ assert_eq!(Ratio::<i8>::from_f32(126.5f32), Some(Ratio::new(126i8, 1)));
+ assert_eq!(Ratio::<i8>::from_f32(127.0f32), Some(Ratio::new(127i8, 1)));
+ assert_eq!(Ratio::<i8>::from_f32(127.5f32), None);
+ assert_eq!(Ratio::<i8>::from_f32(-63.5f32), Some(Ratio::new(-127i8, 2)));
+ assert_eq!(
+ Ratio::<i8>::from_f32(-126.5f32),
+ Some(Ratio::new(-126i8, 1))
+ );
+ assert_eq!(
+ Ratio::<i8>::from_f32(-127.0f32),
+ Some(Ratio::new(-127i8, 1))
+ );
+ assert_eq!(Ratio::<i8>::from_f32(-127.5f32), None);
+
+ assert_eq!(Ratio::<u8>::from_f32(-127f32), None);
+ assert_eq!(Ratio::<u8>::from_f32(127f32), Some(Ratio::new(127u8, 1)));
+ assert_eq!(Ratio::<u8>::from_f32(127.5f32), Some(Ratio::new(255u8, 2)));
+ assert_eq!(Ratio::<u8>::from_f32(256f32), None);
+
+ assert_eq!(Ratio::<i64>::from_f64(-10e200), None);
+ assert_eq!(Ratio::<i64>::from_f64(10e200), None);
+ assert_eq!(Ratio::<i64>::from_f64(f64::INFINITY), None);
+ assert_eq!(Ratio::<i64>::from_f64(f64::NEG_INFINITY), None);
+ assert_eq!(Ratio::<i64>::from_f64(f64::NAN), None);
+ assert_eq!(
+ Ratio::<i64>::from_f64(f64::EPSILON),
+ Some(Ratio::new(1, 4503599627370496))
+ );
+ assert_eq!(Ratio::<i64>::from_f64(0.0), Some(Ratio::new(0, 1)));
+ assert_eq!(Ratio::<i64>::from_f64(-0.0), Some(Ratio::new(0, 1)));
+ }
+
+ #[test]
+ fn test_cmp() {
+ assert!(_0 == _0 && _1 == _1);
+ assert!(_0 != _1 && _1 != _0);
+ assert!(_0 < _1 && !(_1 < _0));
+ assert!(_1 > _0 && !(_0 > _1));
+
+ assert!(_0 <= _0 && _1 <= _1);
+ assert!(_0 <= _1 && !(_1 <= _0));
+
+ assert!(_0 >= _0 && _1 >= _1);
+ assert!(_1 >= _0 && !(_0 >= _1));
+
+ let _0_2: Rational = Ratio::new_raw(0, 2);
+ assert_eq!(_0, _0_2);
+ }
+
+ #[test]
+ fn test_cmp_overflow() {
+ use core::cmp::Ordering;
+
+ // issue #7 example:
+ let big = Ratio::new(128u8, 1);
+ let small = big.recip();
+ assert!(big > small);
+
+ // try a few that are closer together
+ // (some matching numer, some matching denom, some neither)
+ let ratios = [
+ Ratio::new(125_i8, 127_i8),
+ Ratio::new(63_i8, 64_i8),
+ Ratio::new(124_i8, 125_i8),
+ Ratio::new(125_i8, 126_i8),
+ Ratio::new(126_i8, 127_i8),
+ Ratio::new(127_i8, 126_i8),
+ ];
+
+ fn check_cmp(a: Ratio<i8>, b: Ratio<i8>, ord: Ordering) {
+ #[cfg(feature = "std")]
+ println!("comparing {} and {}", a, b);
+ assert_eq!(a.cmp(&b), ord);
+ assert_eq!(b.cmp(&a), ord.reverse());
+ }
+
+ for (i, &a) in ratios.iter().enumerate() {
+ check_cmp(a, a, Ordering::Equal);
+ check_cmp(-a, a, Ordering::Less);
+ for &b in &ratios[i + 1..] {
+ check_cmp(a, b, Ordering::Less);
+ check_cmp(-a, -b, Ordering::Greater);
+ check_cmp(a.recip(), b.recip(), Ordering::Greater);
+ check_cmp(-a.recip(), -b.recip(), Ordering::Less);
+ }
+ }
+ }
+
+ #[test]
+ fn test_to_integer() {
+ assert_eq!(_0.to_integer(), 0);
+ assert_eq!(_1.to_integer(), 1);
+ assert_eq!(_2.to_integer(), 2);
+ assert_eq!(_1_2.to_integer(), 0);
+ assert_eq!(_3_2.to_integer(), 1);
+ assert_eq!(_NEG1_2.to_integer(), 0);
+ }
+
+ #[test]
+ fn test_numer() {
+ assert_eq!(_0.numer(), &0);
+ assert_eq!(_1.numer(), &1);
+ assert_eq!(_2.numer(), &2);
+ assert_eq!(_1_2.numer(), &1);
+ assert_eq!(_3_2.numer(), &3);
+ assert_eq!(_NEG1_2.numer(), &(-1));
+ }
+ #[test]
+ fn test_denom() {
+ assert_eq!(_0.denom(), &1);
+ assert_eq!(_1.denom(), &1);
+ assert_eq!(_2.denom(), &1);
+ assert_eq!(_1_2.denom(), &2);
+ assert_eq!(_3_2.denom(), &2);
+ assert_eq!(_NEG1_2.denom(), &2);
+ }
+
+ #[test]
+ fn test_is_integer() {
+ assert!(_0.is_integer());
+ assert!(_1.is_integer());
+ assert!(_2.is_integer());
+ assert!(!_1_2.is_integer());
+ assert!(!_3_2.is_integer());
+ assert!(!_NEG1_2.is_integer());
+ }
+
+ #[test]
+ #[cfg(feature = "std")]
+ fn test_show() {
+ use std::string::ToString;
+ assert_eq!(format!("{}", _2), "2".to_string());
+ assert_eq!(format!("{}", _1_2), "1/2".to_string());
+ assert_eq!(format!("{}", _0), "0".to_string());
+ assert_eq!(format!("{}", Ratio::from_integer(-2)), "-2".to_string());
+ }
+
+ mod arith {
+ use super::super::{Ratio, Rational};
+ use super::{to_big, _0, _1, _1_2, _2, _3_2, _5_2, _MAX, _MAX_M1, _MIN, _MIN_P1, _NEG1_2};
+ use core::fmt::Debug;
+ use integer::Integer;
+ use traits::{Bounded, CheckedAdd, CheckedDiv, CheckedMul, CheckedSub, NumAssign};
+
+ #[test]
+ fn test_add() {
+ fn test(a: Rational, b: Rational, c: Rational) {
+ assert_eq!(a + b, c);
+ assert_eq!(
+ {
+ let mut x = a;
+ x += b;
+ x
+ },
+ c
+ );
+ assert_eq!(to_big(a) + to_big(b), to_big(c));
+ assert_eq!(a.checked_add(&b), Some(c));
+ assert_eq!(to_big(a).checked_add(&to_big(b)), Some(to_big(c)));
+ }
+ fn test_assign(a: Rational, b: isize, c: Rational) {
+ assert_eq!(a + b, c);
+ assert_eq!(
+ {
+ let mut x = a;
+ x += b;
+ x
+ },
+ c
+ );
+ }
+
+ test(_1, _1_2, _3_2);
+ test(_1, _1, _2);
+ test(_1_2, _3_2, _2);
+ test(_1_2, _NEG1_2, _0);
+ test_assign(_1_2, 1, _3_2);
+ }
+
+ #[test]
+ fn test_add_overflow() {
+ // compares Ratio(1, T::max_value()) + Ratio(1, T::max_value())
+ // to Ratio(1+1, T::max_value()) for each integer type.
+ // Previously, this calculation would overflow.
+ fn test_add_typed_overflow<T>()
+ where
+ T: Integer + Bounded + Clone + Debug + NumAssign,
+ {
+ let _1_max = Ratio::new(T::one(), T::max_value());
+ let _2_max = Ratio::new(T::one() + T::one(), T::max_value());
+ assert_eq!(_1_max.clone() + _1_max.clone(), _2_max);
+ assert_eq!(
+ {
+ let mut tmp = _1_max.clone();
+ tmp += _1_max.clone();
+ tmp
+ },
+ _2_max.clone()
+ );
+ }
+ test_add_typed_overflow::<u8>();
+ test_add_typed_overflow::<u16>();
+ test_add_typed_overflow::<u32>();
+ test_add_typed_overflow::<u64>();
+ test_add_typed_overflow::<usize>();
+ #[cfg(has_u128)]
+ test_add_typed_overflow::<u128>();
+
+ test_add_typed_overflow::<i8>();
+ test_add_typed_overflow::<i16>();
+ test_add_typed_overflow::<i32>();
+ test_add_typed_overflow::<i64>();
+ test_add_typed_overflow::<isize>();
+ #[cfg(has_i128)]
+ test_add_typed_overflow::<i128>();
+ }
+
+ #[test]
+ fn test_sub() {
+ fn test(a: Rational, b: Rational, c: Rational) {
+ assert_eq!(a - b, c);
+ assert_eq!(
+ {
+ let mut x = a;
+ x -= b;
+ x
+ },
+ c
+ );
+ assert_eq!(to_big(a) - to_big(b), to_big(c));
+ assert_eq!(a.checked_sub(&b), Some(c));
+ assert_eq!(to_big(a).checked_sub(&to_big(b)), Some(to_big(c)));
+ }
+ fn test_assign(a: Rational, b: isize, c: Rational) {
+ assert_eq!(a - b, c);
+ assert_eq!(
+ {
+ let mut x = a;
+ x -= b;
+ x
+ },
+ c
+ );
+ }
+
+ test(_1, _1_2, _1_2);
+ test(_3_2, _1_2, _1);
+ test(_1, _NEG1_2, _3_2);
+ test_assign(_1_2, 1, _NEG1_2);
+ }
+
+ #[test]
+ fn test_sub_overflow() {
+ // compares Ratio(1, T::max_value()) - Ratio(1, T::max_value()) to T::zero()
+ // for each integer type. Previously, this calculation would overflow.
+ fn test_sub_typed_overflow<T>()
+ where
+ T: Integer + Bounded + Clone + Debug + NumAssign,
+ {
+ let _1_max: Ratio<T> = Ratio::new(T::one(), T::max_value());
+ assert!(T::is_zero(&(_1_max.clone() - _1_max.clone()).numer));
+ {
+ let mut tmp: Ratio<T> = _1_max.clone();
+ tmp -= _1_max.clone();
+ assert!(T::is_zero(&tmp.numer));
+ }
+ }
+ test_sub_typed_overflow::<u8>();
+ test_sub_typed_overflow::<u16>();
+ test_sub_typed_overflow::<u32>();
+ test_sub_typed_overflow::<u64>();
+ test_sub_typed_overflow::<usize>();
+ #[cfg(has_u128)]
+ test_sub_typed_overflow::<u128>();
+
+ test_sub_typed_overflow::<i8>();
+ test_sub_typed_overflow::<i16>();
+ test_sub_typed_overflow::<i32>();
+ test_sub_typed_overflow::<i64>();
+ test_sub_typed_overflow::<isize>();
+ #[cfg(has_i128)]
+ test_sub_typed_overflow::<i128>();
+ }
+
+ #[test]
+ fn test_mul() {
+ fn test(a: Rational, b: Rational, c: Rational) {
+ assert_eq!(a * b, c);
+ assert_eq!(
+ {
+ let mut x = a;
+ x *= b;
+ x
+ },
+ c
+ );
+ assert_eq!(to_big(a) * to_big(b), to_big(c));
+ assert_eq!(a.checked_mul(&b), Some(c));
+ assert_eq!(to_big(a).checked_mul(&to_big(b)), Some(to_big(c)));
+ }
+ fn test_assign(a: Rational, b: isize, c: Rational) {
+ assert_eq!(a * b, c);
+ assert_eq!(
+ {
+ let mut x = a;
+ x *= b;
+ x
+ },
+ c
+ );
+ }
+
+ test(_1, _1_2, _1_2);
+ test(_1_2, _3_2, Ratio::new(3, 4));
+ test(_1_2, _NEG1_2, Ratio::new(-1, 4));
+ test_assign(_1_2, 2, _1);
+ }
+
+ #[test]
+ fn test_mul_overflow() {
+ fn test_mul_typed_overflow<T>()
+ where
+ T: Integer + Bounded + Clone + Debug + NumAssign + CheckedMul,
+ {
+ let two = T::one() + T::one();
+ let _3 = T::one() + T::one() + T::one();
+
+ // 1/big * 2/3 = 1/(max/4*3), where big is max/2
+ // make big = max/2, but also divisible by 2
+ let big = T::max_value() / two.clone() / two.clone() * two.clone();
+ let _1_big: Ratio<T> = Ratio::new(T::one(), big.clone());
+ let _2_3: Ratio<T> = Ratio::new(two.clone(), _3.clone());
+ assert_eq!(None, big.clone().checked_mul(&_3.clone()));
+ let expected = Ratio::new(T::one(), big / two.clone() * _3.clone());
+ assert_eq!(expected.clone(), _1_big.clone() * _2_3.clone());
+ assert_eq!(
+ Some(expected.clone()),
+ _1_big.clone().checked_mul(&_2_3.clone())
+ );
+ assert_eq!(expected, {
+ let mut tmp = _1_big.clone();
+ tmp *= _2_3;
+ tmp
+ });
+
+ // big/3 * 3 = big/1
+ // make big = max/2, but make it indivisible by 3
+ let big = T::max_value() / two.clone() / _3.clone() * _3.clone() + T::one();
+ assert_eq!(None, big.clone().checked_mul(&_3.clone()));
+ let big_3 = Ratio::new(big.clone(), _3.clone());
+ let expected = Ratio::new(big.clone(), T::one());
+ assert_eq!(expected, big_3.clone() * _3.clone());
+ assert_eq!(expected, {
+ let mut tmp = big_3.clone();
+ tmp *= _3.clone();
+ tmp
+ });
+ }
+ test_mul_typed_overflow::<u16>();
+ test_mul_typed_overflow::<u8>();
+ test_mul_typed_overflow::<u32>();
+ test_mul_typed_overflow::<u64>();
+ test_mul_typed_overflow::<usize>();
+ #[cfg(has_u128)]
+ test_mul_typed_overflow::<u128>();
+
+ test_mul_typed_overflow::<i8>();
+ test_mul_typed_overflow::<i16>();
+ test_mul_typed_overflow::<i32>();
+ test_mul_typed_overflow::<i64>();
+ test_mul_typed_overflow::<isize>();
+ #[cfg(has_i128)]
+ test_mul_typed_overflow::<i128>();
+ }
+
+ #[test]
+ fn test_div() {
+ fn test(a: Rational, b: Rational, c: Rational) {
+ assert_eq!(a / b, c);
+ assert_eq!(
+ {
+ let mut x = a;
+ x /= b;
+ x
+ },
+ c
+ );
+ assert_eq!(to_big(a) / to_big(b), to_big(c));
+ assert_eq!(a.checked_div(&b), Some(c));
+ assert_eq!(to_big(a).checked_div(&to_big(b)), Some(to_big(c)));
+ }
+ fn test_assign(a: Rational, b: isize, c: Rational) {
+ assert_eq!(a / b, c);
+ assert_eq!(
+ {
+ let mut x = a;
+ x /= b;
+ x
+ },
+ c
+ );
+ }
+
+ test(_1, _1_2, _2);
+ test(_3_2, _1_2, _1 + _2);
+ test(_1, _NEG1_2, _NEG1_2 + _NEG1_2 + _NEG1_2 + _NEG1_2);
+ test_assign(_1, 2, _1_2);
+ }
+
+ #[test]
+ fn test_div_overflow() {
+ fn test_div_typed_overflow<T>()
+ where
+ T: Integer + Bounded + Clone + Debug + NumAssign + CheckedMul,
+ {
+ let two = T::one() + T::one();
+ let _3 = T::one() + T::one() + T::one();
+
+ // 1/big / 3/2 = 1/(max/4*3), where big is max/2
+ // big ~ max/2, and big is divisible by 2
+ let big = T::max_value() / two.clone() / two.clone() * two.clone();
+ assert_eq!(None, big.clone().checked_mul(&_3.clone()));
+ let _1_big: Ratio<T> = Ratio::new(T::one(), big.clone());
+ let _3_two: Ratio<T> = Ratio::new(_3.clone(), two.clone());
+ let expected = Ratio::new(T::one(), big.clone() / two.clone() * _3.clone());
+ assert_eq!(expected.clone(), _1_big.clone() / _3_two.clone());
+ assert_eq!(
+ Some(expected.clone()),
+ _1_big.clone().checked_div(&_3_two.clone())
+ );
+ assert_eq!(expected, {
+ let mut tmp = _1_big.clone();
+ tmp /= _3_two;
+ tmp
+ });
+
+ // 3/big / 3 = 1/big where big is max/2
+ // big ~ max/2, and big is not divisible by 3
+ let big = T::max_value() / two.clone() / _3.clone() * _3.clone() + T::one();
+ assert_eq!(None, big.clone().checked_mul(&_3.clone()));
+ let _3_big = Ratio::new(_3.clone(), big.clone());
+ let expected = Ratio::new(T::one(), big.clone());
+ assert_eq!(expected, _3_big.clone() / _3.clone());
+ assert_eq!(expected, {
+ let mut tmp = _3_big.clone();
+ tmp /= _3.clone();
+ tmp
+ });
+ }
+ test_div_typed_overflow::<u8>();
+ test_div_typed_overflow::<u16>();
+ test_div_typed_overflow::<u32>();
+ test_div_typed_overflow::<u64>();
+ test_div_typed_overflow::<usize>();
+ #[cfg(has_u128)]
+ test_div_typed_overflow::<u128>();
+
+ test_div_typed_overflow::<i8>();
+ test_div_typed_overflow::<i16>();
+ test_div_typed_overflow::<i32>();
+ test_div_typed_overflow::<i64>();
+ test_div_typed_overflow::<isize>();
+ #[cfg(has_i128)]
+ test_div_typed_overflow::<i128>();
+ }
+
+ #[test]
+ fn test_rem() {
+ fn test(a: Rational, b: Rational, c: Rational) {
+ assert_eq!(a % b, c);
+ assert_eq!(
+ {
+ let mut x = a;
+ x %= b;
+ x
+ },
+ c
+ );
+ assert_eq!(to_big(a) % to_big(b), to_big(c))
+ }
+ fn test_assign(a: Rational, b: isize, c: Rational) {
+ assert_eq!(a % b, c);
+ assert_eq!(
+ {
+ let mut x = a;
+ x %= b;
+ x
+ },
+ c
+ );
+ }
+
+ test(_3_2, _1, _1_2);
+ test(_3_2, _1_2, _0);
+ test(_5_2, _3_2, _1);
+ test(_2, _NEG1_2, _0);
+ test(_1_2, _2, _1_2);
+ test_assign(_3_2, 1, _1_2);
+ }
+
+ #[test]
+ fn test_rem_overflow() {
+ // tests that Ratio(1,2) % Ratio(1, T::max_value()) equals 0
+ // for each integer type. Previously, this calculation would overflow.
+ fn test_rem_typed_overflow<T>()
+ where
+ T: Integer + Bounded + Clone + Debug + NumAssign,
+ {
+ let two = T::one() + T::one();
+ //value near to maximum, but divisible by two
+ let max_div2 = T::max_value() / two.clone() * two.clone();
+ let _1_max: Ratio<T> = Ratio::new(T::one(), max_div2.clone());
+ let _1_two: Ratio<T> = Ratio::new(T::one(), two);
+ assert!(T::is_zero(&(_1_two.clone() % _1_max.clone()).numer));
+ {
+ let mut tmp: Ratio<T> = _1_two.clone();
+ tmp %= _1_max.clone();
+ assert!(T::is_zero(&tmp.numer));
+ }
+ }
+ test_rem_typed_overflow::<u8>();
+ test_rem_typed_overflow::<u16>();
+ test_rem_typed_overflow::<u32>();
+ test_rem_typed_overflow::<u64>();
+ test_rem_typed_overflow::<usize>();
+ #[cfg(has_u128)]
+ test_rem_typed_overflow::<u128>();
+
+ test_rem_typed_overflow::<i8>();
+ test_rem_typed_overflow::<i16>();
+ test_rem_typed_overflow::<i32>();
+ test_rem_typed_overflow::<i64>();
+ test_rem_typed_overflow::<isize>();
+ #[cfg(has_i128)]
+ test_rem_typed_overflow::<i128>();
+ }
+
+ #[test]
+ fn test_neg() {
+ fn test(a: Rational, b: Rational) {
+ assert_eq!(-a, b);
+ assert_eq!(-to_big(a), to_big(b))
+ }
+
+ test(_0, _0);
+ test(_1_2, _NEG1_2);
+ test(-_1, _1);
+ }
+ #[test]
+ fn test_zero() {
+ assert_eq!(_0 + _0, _0);
+ assert_eq!(_0 * _0, _0);
+ assert_eq!(_0 * _1, _0);
+ assert_eq!(_0 / _NEG1_2, _0);
+ assert_eq!(_0 - _0, _0);
+ }
+ #[test]
+ #[should_panic]
+ fn test_div_0() {
+ let _a = _1 / _0;
+ }
+
+ #[test]
+ fn test_checked_failures() {
+ let big = Ratio::new(128u8, 1);
+ let small = Ratio::new(1, 128u8);
+ assert_eq!(big.checked_add(&big), None);
+ assert_eq!(small.checked_sub(&big), None);
+ assert_eq!(big.checked_mul(&big), None);
+ assert_eq!(small.checked_div(&big), None);
+ assert_eq!(_1.checked_div(&_0), None);
+ }
+
+ #[test]
+ fn test_checked_zeros() {
+ assert_eq!(_0.checked_add(&_0), Some(_0));
+ assert_eq!(_0.checked_sub(&_0), Some(_0));
+ assert_eq!(_0.checked_mul(&_0), Some(_0));
+ assert_eq!(_0.checked_div(&_0), None);
+ }
+
+ #[test]
+ fn test_checked_min() {
+ assert_eq!(_MIN.checked_add(&_MIN), None);
+ assert_eq!(_MIN.checked_sub(&_MIN), Some(_0));
+ assert_eq!(_MIN.checked_mul(&_MIN), None);
+ assert_eq!(_MIN.checked_div(&_MIN), Some(_1));
+ assert_eq!(_0.checked_add(&_MIN), Some(_MIN));
+ assert_eq!(_0.checked_sub(&_MIN), None);
+ assert_eq!(_0.checked_mul(&_MIN), Some(_0));
+ assert_eq!(_0.checked_div(&_MIN), Some(_0));
+ assert_eq!(_1.checked_add(&_MIN), Some(_MIN_P1));
+ assert_eq!(_1.checked_sub(&_MIN), None);
+ assert_eq!(_1.checked_mul(&_MIN), Some(_MIN));
+ assert_eq!(_1.checked_div(&_MIN), None);
+ assert_eq!(_MIN.checked_add(&_0), Some(_MIN));
+ assert_eq!(_MIN.checked_sub(&_0), Some(_MIN));
+ assert_eq!(_MIN.checked_mul(&_0), Some(_0));
+ assert_eq!(_MIN.checked_div(&_0), None);
+ assert_eq!(_MIN.checked_add(&_1), Some(_MIN_P1));
+ assert_eq!(_MIN.checked_sub(&_1), None);
+ assert_eq!(_MIN.checked_mul(&_1), Some(_MIN));
+ assert_eq!(_MIN.checked_div(&_1), Some(_MIN));
+ }
+
+ #[test]
+ fn test_checked_max() {
+ assert_eq!(_MAX.checked_add(&_MAX), None);
+ assert_eq!(_MAX.checked_sub(&_MAX), Some(_0));
+ assert_eq!(_MAX.checked_mul(&_MAX), None);
+ assert_eq!(_MAX.checked_div(&_MAX), Some(_1));
+ assert_eq!(_0.checked_add(&_MAX), Some(_MAX));
+ assert_eq!(_0.checked_sub(&_MAX), Some(_MIN_P1));
+ assert_eq!(_0.checked_mul(&_MAX), Some(_0));
+ assert_eq!(_0.checked_div(&_MAX), Some(_0));
+ assert_eq!(_1.checked_add(&_MAX), None);
+ assert_eq!(_1.checked_sub(&_MAX), Some(-_MAX_M1));
+ assert_eq!(_1.checked_mul(&_MAX), Some(_MAX));
+ assert_eq!(_1.checked_div(&_MAX), Some(_MAX.recip()));
+ assert_eq!(_MAX.checked_add(&_0), Some(_MAX));
+ assert_eq!(_MAX.checked_sub(&_0), Some(_MAX));
+ assert_eq!(_MAX.checked_mul(&_0), Some(_0));
+ assert_eq!(_MAX.checked_div(&_0), None);
+ assert_eq!(_MAX.checked_add(&_1), None);
+ assert_eq!(_MAX.checked_sub(&_1), Some(_MAX_M1));
+ assert_eq!(_MAX.checked_mul(&_1), Some(_MAX));
+ assert_eq!(_MAX.checked_div(&_1), Some(_MAX));
+ }
+
+ #[test]
+ fn test_checked_min_max() {
+ assert_eq!(_MIN.checked_add(&_MAX), Some(-_1));
+ assert_eq!(_MIN.checked_sub(&_MAX), None);
+ assert_eq!(_MIN.checked_mul(&_MAX), None);
+ assert_eq!(
+ _MIN.checked_div(&_MAX),
+ Some(Ratio::new(_MIN.numer, _MAX.numer))
+ );
+ assert_eq!(_MAX.checked_add(&_MIN), Some(-_1));
+ assert_eq!(_MAX.checked_sub(&_MIN), None);
+ assert_eq!(_MAX.checked_mul(&_MIN), None);
+ assert_eq!(_MAX.checked_div(&_MIN), None);
+ }
+ }
+
+ #[test]
+ fn test_round() {
+ assert_eq!(_1_3.ceil(), _1);
+ assert_eq!(_1_3.floor(), _0);
+ assert_eq!(_1_3.round(), _0);
+ assert_eq!(_1_3.trunc(), _0);
+
+ assert_eq!(_NEG1_3.ceil(), _0);
+ assert_eq!(_NEG1_3.floor(), -_1);
+ assert_eq!(_NEG1_3.round(), _0);
+ assert_eq!(_NEG1_3.trunc(), _0);
+
+ assert_eq!(_2_3.ceil(), _1);
+ assert_eq!(_2_3.floor(), _0);
+ assert_eq!(_2_3.round(), _1);
+ assert_eq!(_2_3.trunc(), _0);
+
+ assert_eq!(_NEG2_3.ceil(), _0);
+ assert_eq!(_NEG2_3.floor(), -_1);
+ assert_eq!(_NEG2_3.round(), -_1);
+ assert_eq!(_NEG2_3.trunc(), _0);
+
+ assert_eq!(_1_2.ceil(), _1);
+ assert_eq!(_1_2.floor(), _0);
+ assert_eq!(_1_2.round(), _1);
+ assert_eq!(_1_2.trunc(), _0);
+
+ assert_eq!(_NEG1_2.ceil(), _0);
+ assert_eq!(_NEG1_2.floor(), -_1);
+ assert_eq!(_NEG1_2.round(), -_1);
+ assert_eq!(_NEG1_2.trunc(), _0);
+
+ assert_eq!(_1.ceil(), _1);
+ assert_eq!(_1.floor(), _1);
+ assert_eq!(_1.round(), _1);
+ assert_eq!(_1.trunc(), _1);
+
+ // Overflow checks
+
+ let _neg1 = Ratio::from_integer(-1);
+ let _large_rat1 = Ratio::new(i32::MAX, i32::MAX - 1);
+ let _large_rat2 = Ratio::new(i32::MAX - 1, i32::MAX);
+ let _large_rat3 = Ratio::new(i32::MIN + 2, i32::MIN + 1);
+ let _large_rat4 = Ratio::new(i32::MIN + 1, i32::MIN + 2);
+ let _large_rat5 = Ratio::new(i32::MIN + 2, i32::MAX);
+ let _large_rat6 = Ratio::new(i32::MAX, i32::MIN + 2);
+ let _large_rat7 = Ratio::new(1, i32::MIN + 1);
+ let _large_rat8 = Ratio::new(1, i32::MAX);
+
+ assert_eq!(_large_rat1.round(), One::one());
+ assert_eq!(_large_rat2.round(), One::one());
+ assert_eq!(_large_rat3.round(), One::one());
+ assert_eq!(_large_rat4.round(), One::one());
+ assert_eq!(_large_rat5.round(), _neg1);
+ assert_eq!(_large_rat6.round(), _neg1);
+ assert_eq!(_large_rat7.round(), Zero::zero());
+ assert_eq!(_large_rat8.round(), Zero::zero());
+ }
+
+ #[test]
+ fn test_fract() {
+ assert_eq!(_1.fract(), _0);
+ assert_eq!(_NEG1_2.fract(), _NEG1_2);
+ assert_eq!(_1_2.fract(), _1_2);
+ assert_eq!(_3_2.fract(), _1_2);
+ }
+
+ #[test]
+ fn test_recip() {
+ assert_eq!(_1 * _1.recip(), _1);
+ assert_eq!(_2 * _2.recip(), _1);
+ assert_eq!(_1_2 * _1_2.recip(), _1);
+ assert_eq!(_3_2 * _3_2.recip(), _1);
+ assert_eq!(_NEG1_2 * _NEG1_2.recip(), _1);
+
+ assert_eq!(_3_2.recip(), _2_3);
+ assert_eq!(_NEG1_2.recip(), _NEG2);
+ assert_eq!(_NEG1_2.recip().denom(), &1);
+ }
+
+ #[test]
+ #[should_panic(expected = "== 0")]
+ fn test_recip_fail() {
+ let _a = Ratio::new(0, 1).recip();
+ }
+
+ #[test]
+ fn test_pow() {
+ fn test(r: Rational, e: i32, expected: Rational) {
+ assert_eq!(r.pow(e), expected);
+ assert_eq!(Pow::pow(r, e), expected);
+ assert_eq!(Pow::pow(r, &e), expected);
+ assert_eq!(Pow::pow(&r, e), expected);
+ assert_eq!(Pow::pow(&r, &e), expected);
+ }
+
+ test(_1_2, 2, Ratio::new(1, 4));
+ test(_1_2, -2, Ratio::new(4, 1));
+ test(_1, 1, _1);
+ test(_1, i32::MAX, _1);
+ test(_1, i32::MIN, _1);
+ test(_NEG1_2, 2, _1_2.pow(2i32));
+ test(_NEG1_2, 3, -_1_2.pow(3i32));
+ test(_3_2, 0, _1);
+ test(_3_2, -1, _3_2.recip());
+ test(_3_2, 3, Ratio::new(27, 8));
+ }
+
+ #[test]
+ #[cfg(feature = "std")]
+ fn test_to_from_str() {
+ use std::string::{String, ToString};
+ fn test(r: Rational, s: String) {
+ assert_eq!(FromStr::from_str(&s), Ok(r));
+ assert_eq!(r.to_string(), s);
+ }
+ test(_1, "1".to_string());
+ test(_0, "0".to_string());
+ test(_1_2, "1/2".to_string());
+ test(_3_2, "3/2".to_string());
+ test(_2, "2".to_string());
+ test(_NEG1_2, "-1/2".to_string());
+ }
+ #[test]
+ fn test_from_str_fail() {
+ fn test(s: &str) {
+ let rational: Result<Rational, _> = FromStr::from_str(s);
+ assert!(rational.is_err());
+ }
+
+ let xs = ["0 /1", "abc", "", "1/", "--1/2", "3/2/1", "1/0"];
+ for &s in xs.iter() {
+ test(s);
+ }
+ }
+
+ #[cfg(feature = "bigint")]
+ #[test]
+ fn test_from_float() {
+ use traits::float::FloatCore;
+ fn test<T: FloatCore>(given: T, (numer, denom): (&str, &str)) {
+ let ratio: BigRational = Ratio::from_float(given).unwrap();
+ assert_eq!(
+ ratio,
+ Ratio::new(
+ FromStr::from_str(numer).unwrap(),
+ FromStr::from_str(denom).unwrap()
+ )
+ );
+ }
+
+ // f32
+ test(3.14159265359f32, ("13176795", "4194304"));
+ test(2f32.powf(100.), ("1267650600228229401496703205376", "1"));
+ test(-2f32.powf(100.), ("-1267650600228229401496703205376", "1"));
+ test(
+ 1.0 / 2f32.powf(100.),
+ ("1", "1267650600228229401496703205376"),
+ );
+ test(684729.48391f32, ("1369459", "2"));
+ test(-8573.5918555f32, ("-4389679", "512"));
+
+ // f64
+ test(3.14159265359f64, ("3537118876014453", "1125899906842624"));
+ test(2f64.powf(100.), ("1267650600228229401496703205376", "1"));
+ test(-2f64.powf(100.), ("-1267650600228229401496703205376", "1"));
+ test(684729.48391f64, ("367611342500051", "536870912"));
+ test(-8573.5918555f64, ("-4713381968463931", "549755813888"));
+ test(
+ 1.0 / 2f64.powf(100.),
+ ("1", "1267650600228229401496703205376"),
+ );
+ }
+
+ #[cfg(feature = "bigint")]
+ #[test]
+ fn test_from_float_fail() {
+ use core::{f32, f64};
+
+ assert_eq!(Ratio::from_float(f32::NAN), None);
+ assert_eq!(Ratio::from_float(f32::INFINITY), None);
+ assert_eq!(Ratio::from_float(f32::NEG_INFINITY), None);
+ assert_eq!(Ratio::from_float(f64::NAN), None);
+ assert_eq!(Ratio::from_float(f64::INFINITY), None);
+ assert_eq!(Ratio::from_float(f64::NEG_INFINITY), None);
+ }
+
+ #[test]
+ fn test_signed() {
+ assert_eq!(_NEG1_2.abs(), _1_2);
+ assert_eq!(_3_2.abs_sub(&_1_2), _1);
+ assert_eq!(_1_2.abs_sub(&_3_2), Zero::zero());
+ assert_eq!(_1_2.signum(), One::one());
+ assert_eq!(_NEG1_2.signum(), -<Ratio<isize>>::one());
+ assert_eq!(_0.signum(), Zero::zero());
+ assert!(_NEG1_2.is_negative());
+ assert!(_1_NEG2.is_negative());
+ assert!(!_NEG1_2.is_positive());
+ assert!(!_1_NEG2.is_positive());
+ assert!(_1_2.is_positive());
+ assert!(_NEG1_NEG2.is_positive());
+ assert!(!_1_2.is_negative());
+ assert!(!_NEG1_NEG2.is_negative());
+ assert!(!_0.is_positive());
+ assert!(!_0.is_negative());
+ }
+
+ #[test]
+ #[cfg(feature = "std")]
+ fn test_hash() {
+ assert!(::hash(&_0) != ::hash(&_1));
+ assert!(::hash(&_0) != ::hash(&_3_2));
+
+ // a == b -> hash(a) == hash(b)
+ let a = Rational::new_raw(4, 2);
+ let b = Rational::new_raw(6, 3);
+ assert_eq!(a, b);
+ assert_eq!(::hash(&a), ::hash(&b));
+
+ let a = Rational::new_raw(123456789, 1000);
+ let b = Rational::new_raw(123456789 * 5, 5000);
+ assert_eq!(a, b);
+ assert_eq!(::hash(&a), ::hash(&b));
+ }
+
+ #[test]
+ fn test_into_pair() {
+ assert_eq!((0, 1), _0.into());
+ assert_eq!((-2, 1), _NEG2.into());
+ assert_eq!((1, -2), _1_NEG2.into());
+ }
+
+ #[test]
+ fn test_from_pair() {
+ assert_eq!(_0, Ratio::from((0, 1)));
+ assert_eq!(_1, Ratio::from((1, 1)));
+ assert_eq!(_NEG2, Ratio::from((-2, 1)));
+ assert_eq!(_1_NEG2, Ratio::from((1, -2)));
+ }
+
+ #[test]
+ fn ratio_iter_sum() {
+ // generic function to assure the iter method can be called
+ // for any Iterator with Item = Ratio<impl Integer> or Ratio<&impl Integer>
+ fn iter_sums<T: Integer + Clone>(slice: &[Ratio<T>]) -> [Ratio<T>; 3] {
+ let mut manual_sum = Ratio::new(T::zero(), T::one());
+ for ratio in slice {
+ manual_sum = manual_sum + ratio;
+ }
+ [manual_sum, slice.iter().sum(), slice.iter().cloned().sum()]
+ }
+ // collect into array so test works on no_std
+ let mut nums = [Ratio::new(0, 1); 1000];
+ for (i, r) in (0..1000).map(|n| Ratio::new(n, 500)).enumerate() {
+ nums[i] = r;
+ }
+ let sums = iter_sums(&nums[..]);
+ assert_eq!(sums[0], sums[1]);
+ assert_eq!(sums[0], sums[2]);
+ }
+
+ #[test]
+ fn ratio_iter_product() {
+ // generic function to assure the iter method can be called
+ // for any Iterator with Item = Ratio<impl Integer> or Ratio<&impl Integer>
+ fn iter_products<T: Integer + Clone>(slice: &[Ratio<T>]) -> [Ratio<T>; 3] {
+ let mut manual_prod = Ratio::new(T::one(), T::one());
+ for ratio in slice {
+ manual_prod = manual_prod * ratio;
+ }
+ [
+ manual_prod,
+ slice.iter().product(),
+ slice.iter().cloned().product(),
+ ]
+ }
+
+ // collect into array so test works on no_std
+ let mut nums = [Ratio::new(0, 1); 1000];
+ for (i, r) in (0..1000).map(|n| Ratio::new(n, 500)).enumerate() {
+ nums[i] = r;
+ }
+ let products = iter_products(&nums[..]);
+ assert_eq!(products[0], products[1]);
+ assert_eq!(products[0], products[2]);
+ }
+
+ #[test]
+ fn test_num_zero() {
+ let zero = Rational64::zero();
+ assert!(zero.is_zero());
+
+ let mut r = Rational64::new(123, 456);
+ assert!(!r.is_zero());
+ assert_eq!(&r + &zero, r);
+
+ r.set_zero();
+ assert!(r.is_zero());
+ }
+
+ #[test]
+ fn test_num_one() {
+ let one = Rational64::one();
+ assert!(one.is_one());
+
+ let mut r = Rational64::new(123, 456);
+ assert!(!r.is_one());
+ assert_eq!(&r * &one, r);
+
+ r.set_one();
+ assert!(r.is_one());
+ }
+
+ #[cfg(has_const_fn)]
+ #[test]
+ fn test_const() {
+ const N: Ratio<i32> = Ratio::new_raw(123, 456);
+ const N_NUMER: &i32 = N.numer();
+ const N_DENOM: &i32 = N.denom();
+
+ assert_eq!(N_NUMER, &123);
+ assert_eq!(N_DENOM, &456);
+
+ let r = N.reduced();
+ assert_eq!(r.numer(), &(123 / 3));
+ assert_eq!(r.denom(), &(456 / 3));
+ }
+}
diff --git a/rust/vendor/num-traits-0.1.43/.cargo-checksum.json b/rust/vendor/num-traits-0.1.43/.cargo-checksum.json
new file mode 100644
index 0000000..762feb3
--- /dev/null
+++ b/rust/vendor/num-traits-0.1.43/.cargo-checksum.json
@@ -0,0 +1 @@
+{"files":{"Cargo.toml":"b3ca516be2bbfdf5b76fd95da36d75b40161074f5f75f81391737b7c7cdee4c6","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"6485b8ed310d3f0340bf1ad1f47645069ce4069dcc6bb46c7d5c6faf41de1fdb","README.md":"b49a361bd2026fbbe3c778ab9ce1c100bbf459a5c47f6d35f55086c0c4da1285","RELEASES.md":"86c3e6703e6948bfc23c165a8b121930b8da4ffc7c38ab49d7a9e27b1655090e","bors.toml":"1c81ede536a37edd30fe4e622ff0531b25372403ac9475a5d6c50f14156565a2","ci/rustup.sh":"723d546a1ffefcdd5d4db9fb26dbf4128954e3991aff32932284cdc67fa5c85e","ci/test_full.sh":"c66b8c60fa2a643f521cd645aa9338d74dd29ee064c670557ae95b73f919b5cb","src/lib.rs":"3beb14151cfb7fe47f1c4f42869df5c4a347efac34b93b34ec776c158be78fcc"},"package":"92e5113e9fd4cc14ded8e499429f396a20f98c772a47cc8622a736e1ec843c31"} \ No newline at end of file
diff --git a/rust/vendor/num-traits-0.1.43/Cargo.toml b/rust/vendor/num-traits-0.1.43/Cargo.toml
new file mode 100644
index 0000000..fd68dfc
--- /dev/null
+++ b/rust/vendor/num-traits-0.1.43/Cargo.toml
@@ -0,0 +1,29 @@
+# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO
+#
+# When uploading crates to the registry Cargo will automatically
+# "normalize" Cargo.toml files for maximal compatibility
+# with all versions of Cargo and also rewrite `path` dependencies
+# to registry (e.g. crates.io) dependencies
+#
+# If you believe there's an error in this file please file an
+# issue against the rust-lang/cargo repository. If you're
+# editing this file be aware that the upstream Cargo.toml
+# will likely look very different (and much more reasonable)
+
+[package]
+name = "num-traits"
+version = "0.1.43"
+authors = ["The Rust Project Developers"]
+description = "Numeric traits for generic mathematics"
+homepage = "https://github.com/rust-num/num-traits"
+documentation = "https://docs.rs/num-traits"
+readme = "README.md"
+keywords = ["mathematics", "numerics"]
+categories = ["algorithms", "science"]
+license = "MIT/Apache-2.0"
+repository = "https://github.com/rust-num/num-traits"
+
+[lib]
+doctest = false
+[dependencies.num-traits]
+version = "0.2.0"
diff --git a/rust/vendor/num-traits-0.1.43/LICENSE-APACHE b/rust/vendor/num-traits-0.1.43/LICENSE-APACHE
new file mode 100644
index 0000000..16fe87b
--- /dev/null
+++ b/rust/vendor/num-traits-0.1.43/LICENSE-APACHE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+END OF TERMS AND CONDITIONS
+
+APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+Copyright [yyyy] [name of copyright owner]
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
diff --git a/rust/vendor/num-traits-0.1.43/LICENSE-MIT b/rust/vendor/num-traits-0.1.43/LICENSE-MIT
new file mode 100644
index 0000000..39d4bdb
--- /dev/null
+++ b/rust/vendor/num-traits-0.1.43/LICENSE-MIT
@@ -0,0 +1,25 @@
+Copyright (c) 2014 The Rust Project Developers
+
+Permission is hereby granted, free of charge, to any
+person obtaining a copy of this software and associated
+documentation files (the "Software"), to deal in the
+Software without restriction, including without
+limitation the rights to use, copy, modify, merge,
+publish, distribute, sublicense, and/or sell copies of
+the Software, and to permit persons to whom the Software
+is furnished to do so, subject to the following
+conditions:
+
+The above copyright notice and this permission notice
+shall be included in all copies or substantial portions
+of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
+ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
+TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
+PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
+IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+DEALINGS IN THE SOFTWARE.
diff --git a/rust/vendor/num-traits-0.1.43/README.md b/rust/vendor/num-traits-0.1.43/README.md
new file mode 100644
index 0000000..83021f9
--- /dev/null
+++ b/rust/vendor/num-traits-0.1.43/README.md
@@ -0,0 +1,33 @@
+# num-traits
+
+[![crate](https://img.shields.io/crates/v/num-traits.svg)](https://crates.io/crates/num-traits)
+[![documentation](https://docs.rs/num-traits/badge.svg)](https://docs.rs/num-traits)
+[![Travis status](https://travis-ci.org/rust-num/num-traits.svg?branch=master)](https://travis-ci.org/rust-num/num-traits)
+
+Numeric traits for generic mathematics in Rust.
+
+This version of the crate only exists to re-export compatible
+items from `num-traits` 0.2. Please consider updating!
+
+## Usage
+
+Add this to your `Cargo.toml`:
+
+```toml
+[dependencies]
+num-traits = "0.1"
+```
+
+and this to your crate root:
+
+```rust
+extern crate num_traits;
+```
+
+## Releases
+
+Release notes are available in [RELEASES.md](RELEASES.md).
+
+## Compatibility
+
+The `num-traits` crate is tested for rustc 1.8 and greater.
diff --git a/rust/vendor/num-traits-0.1.43/RELEASES.md b/rust/vendor/num-traits-0.1.43/RELEASES.md
new file mode 100644
index 0000000..d2bc774
--- /dev/null
+++ b/rust/vendor/num-traits-0.1.43/RELEASES.md
@@ -0,0 +1,41 @@
+# Release 0.2.0
+
+- **breaking change**: There is now a `std` feature, enabled by default, along
+ with the implication that building *without* this feature makes this a
+ `#[no_std]` crate.
+ - The `Float` and `Real` traits are only available when `std` is enabled.
+ - Otherwise, the API is unchanged, and num-traits 0.1.43 now re-exports its
+ items from num-traits 0.2 for compatibility (the [semver-trick]).
+
+**Contributors**: @cuviper, @termoshtt, @vks
+
+[semver-trick]: https://github.com/dtolnay/semver-trick
+
+# Release 0.1.43
+
+- All items are now re-exported from num-traits 0.2 for compatibility.
+
+# Release 0.1.42
+
+- [num-traits now has its own source repository][num-356] at [rust-num/num-traits][home].
+- [`ParseFloatError` now implements `Display`][22].
+- [The new `AsPrimitive` trait][17] implements generic casting with the `as` operator.
+- [The new `CheckedShl` and `CheckedShr` traits][21] implement generic
+ support for the `checked_shl` and `checked_shr` methods on primitive integers.
+- [The new `Real` trait][23] offers a subset of `Float` functionality that may be applicable to more
+ types, with a blanket implementation for all existing `T: Float` types.
+
+Thanks to @cuviper, @Enet4, @fabianschuiki, @svartalf, and @yoanlcq for their contributions!
+
+[home]: https://github.com/rust-num/num-traits
+[num-356]: https://github.com/rust-num/num/pull/356
+[17]: https://github.com/rust-num/num-traits/pull/17
+[21]: https://github.com/rust-num/num-traits/pull/21
+[22]: https://github.com/rust-num/num-traits/pull/22
+[23]: https://github.com/rust-num/num-traits/pull/23
+
+
+# Prior releases
+
+No prior release notes were kept. Thanks all the same to the many
+contributors that have made this crate what it is!
diff --git a/rust/vendor/num-traits-0.1.43/bors.toml b/rust/vendor/num-traits-0.1.43/bors.toml
new file mode 100644
index 0000000..ca08e81
--- /dev/null
+++ b/rust/vendor/num-traits-0.1.43/bors.toml
@@ -0,0 +1,3 @@
+status = [
+ "continuous-integration/travis-ci/push",
+]
diff --git a/rust/vendor/num-traits-0.1.43/ci/rustup.sh b/rust/vendor/num-traits-0.1.43/ci/rustup.sh
new file mode 100755
index 0000000..16483d4
--- /dev/null
+++ b/rust/vendor/num-traits-0.1.43/ci/rustup.sh
@@ -0,0 +1,12 @@
+#!/bin/sh
+# Use rustup to locally run the same suite of tests as .travis.yml.
+# (You should first install/update 1.8.0, stable, beta, and nightly.)
+
+set -ex
+
+export TRAVIS_RUST_VERSION
+for TRAVIS_RUST_VERSION in 1.8.0 stable beta nightly; do
+ run="rustup run $TRAVIS_RUST_VERSION"
+ $run cargo build --verbose
+ $run $PWD/ci/test_full.sh
+done
diff --git a/rust/vendor/num-traits-0.1.43/ci/test_full.sh b/rust/vendor/num-traits-0.1.43/ci/test_full.sh
new file mode 100755
index 0000000..c64610f
--- /dev/null
+++ b/rust/vendor/num-traits-0.1.43/ci/test_full.sh
@@ -0,0 +1,11 @@
+#!/bin/bash
+
+set -ex
+
+echo Testing num-traits on rustc ${TRAVIS_RUST_VERSION}
+
+# num-integer should build and test everywhere.
+cargo build --verbose
+cargo test --verbose
+
+# We have no features to test...
diff --git a/rust/vendor/num-traits-0.1.43/src/lib.rs b/rust/vendor/num-traits-0.1.43/src/lib.rs
new file mode 100644
index 0000000..80075d7
--- /dev/null
+++ b/rust/vendor/num-traits-0.1.43/src/lib.rs
@@ -0,0 +1,88 @@
+// Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+//! Numeric traits for generic mathematics
+//!
+//! This version of the crate only exists to re-export compatible
+//! items from num-traits 0.2. Please consider updating!
+
+#![doc(html_root_url = "https://docs.rs/num-traits/0.1")]
+
+extern crate num_traits;
+
+pub use bounds::Bounded;
+pub use float::{Float, FloatConst};
+// pub use real::Real; // NOTE: Don't do this, it breaks `use num_traits::*;`.
+pub use identities::{Zero, One, zero, one};
+pub use ops::checked::{CheckedAdd, CheckedSub, CheckedMul, CheckedDiv, CheckedShl, CheckedShr};
+pub use ops::wrapping::{WrappingAdd, WrappingMul, WrappingSub};
+pub use ops::saturating::Saturating;
+pub use sign::{Signed, Unsigned, abs, abs_sub, signum};
+pub use cast::{AsPrimitive, FromPrimitive, ToPrimitive, NumCast, cast};
+pub use int::PrimInt;
+pub use pow::{pow, checked_pow};
+
+
+// Re-exports from num-traits 0.2!
+
+pub use num_traits::{Num, NumOps, NumRef, RefNum};
+pub use num_traits::{NumAssignOps, NumAssign, NumAssignRef};
+pub use num_traits::{FloatErrorKind, ParseFloatError};
+pub use num_traits::clamp;
+
+// Note: the module structure is explicitly re-created, rather than re-exporting en masse,
+// so we won't expose any items that may be added later in the new version.
+
+pub mod identities {
+ pub use num_traits::identities::{Zero, One, zero, one};
+}
+
+pub mod sign {
+ pub use num_traits::sign::{Signed, Unsigned, abs, abs_sub, signum};
+}
+
+pub mod ops {
+ pub mod saturating {
+ pub use num_traits::ops::saturating::Saturating;
+ }
+
+ pub mod checked {
+ pub use num_traits::ops::checked::{CheckedAdd, CheckedSub, CheckedMul, CheckedDiv,
+ CheckedShl, CheckedShr};
+ }
+
+ pub mod wrapping {
+ pub use num_traits::ops::wrapping::{WrappingAdd, WrappingMul, WrappingSub};
+ }
+}
+
+pub mod bounds {
+ pub use num_traits::bounds::Bounded;
+}
+
+pub mod float {
+ pub use num_traits::float::{Float, FloatConst};
+}
+
+pub mod real {
+ pub use num_traits::real::Real;
+}
+
+pub mod cast {
+ pub use num_traits::cast::{AsPrimitive, FromPrimitive, ToPrimitive, NumCast, cast};
+}
+
+pub mod int {
+ pub use num_traits::int::PrimInt;
+}
+
+pub mod pow {
+ pub use num_traits::pow::{pow, checked_pow};
+}
diff --git a/rust/vendor/num-traits/.cargo-checksum.json b/rust/vendor/num-traits/.cargo-checksum.json
new file mode 100644
index 0000000..b7529cc
--- /dev/null
+++ b/rust/vendor/num-traits/.cargo-checksum.json
@@ -0,0 +1 @@
+{"files":{"Cargo.toml":"374f8137b42ea85e9e2510b28a1962956d81cd93deecb043c315f8522612df0f","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"6485b8ed310d3f0340bf1ad1f47645069ce4069dcc6bb46c7d5c6faf41de1fdb","README.md":"90b7360049619cd5581aa03a7c24f1ccf1373e419f35d0967170724b1ef3c1e1","RELEASES.md":"41b2b610d9185f55688c8e227c1396ca78860f61781597ccec5bbaf2219baddc","build.rs":"d1ad566e26756269ab06f8df62d4e11161efb7ebffb1b3b16c4e1e7bf4636856","src/bounds.rs":"a79325f6a92269ad7df3d11b9ff700d2d13fb1160e28f0c091a644efc4badc2b","src/cast.rs":"d652cce6e7ec4a5e5b6532df1cd65c56a63eb453bed33f5dff02b51c5abd3ca0","src/float.rs":"90b93cfd6dc9ca679b009c0d00fd0bddb03b9dbde164aaa5a4c136e091a39699","src/identities.rs":"e9f490c03a450f0fd9bf01293a922938544515f12d4447c29cf8e9aecf686d24","src/int.rs":"ada847767ef9808a2ab9703060d6001e427bc33b72c04dcd81a9776a407a5c16","src/lib.rs":"833828950fd58e567f0a4bd80264cb65e1b1594eb0ae5dc25e72af28a9f0e9e9","src/macros.rs":"ee96613a2c73a3bef10ec7ae4d359dbf5f0b41f83e8a87c3d62ccc18dd27e498","src/ops/bytes.rs":"303a648f2ebfa37e355a22495710ef1debf3715d422e2e00f52ae311255af1b7","src/ops/checked.rs":"01e6379bf1d8eeca9dcf8bb5397e419e898e4043b57b0e2470e225bc27e81e6a","src/ops/euclid.rs":"82734bc3bff9c1110aac9015129fba5bc9f24cc6bc3be9a946f71d8b0c19586f","src/ops/inv.rs":"dd80b9bd48d815f17855a25842287942317fa49d1fdcdd655b61bd20ef927cda","src/ops/mod.rs":"2b3c396af44cd240205ba8b560625fa00c07cf387139d2c49eeb7869545d976d","src/ops/mul_add.rs":"15bd64d9420c86300c5ea7f57aa736af2ef968e4e5eaaae03f62fd277f124569","src/ops/overflowing.rs":"01f4cd27f8b0e257687170cc537188029e08e5d13e0c552b01153be5d66d5716","src/ops/saturating.rs":"165993c829c10c4f60e32c8cf34434b669ef54284d7f73dc7ec58a22ba65e6fc","src/ops/wrapping.rs":"39d7bc7e074ba7590cd29b40206baed9cb30ae70dca2b7ceb460c6ca7eaad2a8","src/pow.rs":"92c12990d2396b2dabd4ba80e80ad706c0c8fd0f1b967ab3bdd9cb738b150702","src/real.rs":"d97dd6a73704828faf835c62f12ea02362997fa9de08a70cd9b7abdea7cb10ed","src/sign.rs":"7ca11eebee94b553a33a9e53b7663ba5173db297dee523d1a2600fbbc80ef850","tests/cast.rs":"6fcc0d6653253182e979e42542fe971829cd24ab2c3a21a668e935c23d39f7c0"},"package":"39e3200413f237f41ab11ad6d161bc7239c84dcb631773ccd7de3dfe4b5c267c"} \ No newline at end of file
diff --git a/rust/vendor/num-traits/Cargo.toml b/rust/vendor/num-traits/Cargo.toml
new file mode 100644
index 0000000..ef37eb7
--- /dev/null
+++ b/rust/vendor/num-traits/Cargo.toml
@@ -0,0 +1,54 @@
+# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO
+#
+# When uploading crates to the registry Cargo will automatically
+# "normalize" Cargo.toml files for maximal compatibility
+# with all versions of Cargo and also rewrite `path` dependencies
+# to registry (e.g., crates.io) dependencies.
+#
+# If you are reading this file be aware that the original Cargo.toml
+# will likely look very different (and much more reasonable).
+# See Cargo.toml.orig for the original contents.
+
+[package]
+edition = "2018"
+rust-version = "1.31"
+name = "num-traits"
+version = "0.2.17"
+authors = ["The Rust Project Developers"]
+build = "build.rs"
+exclude = [
+ "/bors.toml",
+ "/ci/*",
+ "/.github/*",
+]
+description = "Numeric traits for generic mathematics"
+homepage = "https://github.com/rust-num/num-traits"
+documentation = "https://docs.rs/num-traits"
+readme = "README.md"
+keywords = [
+ "mathematics",
+ "numerics",
+]
+categories = [
+ "algorithms",
+ "science",
+ "no-std",
+]
+license = "MIT OR Apache-2.0"
+repository = "https://github.com/rust-num/num-traits"
+
+[package.metadata.docs.rs]
+features = ["std"]
+rustdoc-args = ["--generate-link-to-definition"]
+
+[dependencies.libm]
+version = "0.2.0"
+optional = true
+
+[build-dependencies.autocfg]
+version = "1"
+
+[features]
+default = ["std"]
+i128 = []
+std = []
diff --git a/rust/vendor/num-traits/LICENSE-APACHE b/rust/vendor/num-traits/LICENSE-APACHE
new file mode 100644
index 0000000..16fe87b
--- /dev/null
+++ b/rust/vendor/num-traits/LICENSE-APACHE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+END OF TERMS AND CONDITIONS
+
+APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+Copyright [yyyy] [name of copyright owner]
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
diff --git a/rust/vendor/num-traits/LICENSE-MIT b/rust/vendor/num-traits/LICENSE-MIT
new file mode 100644
index 0000000..39d4bdb
--- /dev/null
+++ b/rust/vendor/num-traits/LICENSE-MIT
@@ -0,0 +1,25 @@
+Copyright (c) 2014 The Rust Project Developers
+
+Permission is hereby granted, free of charge, to any
+person obtaining a copy of this software and associated
+documentation files (the "Software"), to deal in the
+Software without restriction, including without
+limitation the rights to use, copy, modify, merge,
+publish, distribute, sublicense, and/or sell copies of
+the Software, and to permit persons to whom the Software
+is furnished to do so, subject to the following
+conditions:
+
+The above copyright notice and this permission notice
+shall be included in all copies or substantial portions
+of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
+ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
+TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
+PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
+IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+DEALINGS IN THE SOFTWARE.
diff --git a/rust/vendor/num-traits/README.md b/rust/vendor/num-traits/README.md
new file mode 100644
index 0000000..fa2f297
--- /dev/null
+++ b/rust/vendor/num-traits/README.md
@@ -0,0 +1,58 @@
+# num-traits
+
+[![crate](https://img.shields.io/crates/v/num-traits.svg)](https://crates.io/crates/num-traits)
+[![documentation](https://docs.rs/num-traits/badge.svg)](https://docs.rs/num-traits)
+[![minimum rustc 1.31](https://img.shields.io/badge/rustc-1.31+-red.svg)](https://rust-lang.github.io/rfcs/2495-min-rust-version.html)
+[![build status](https://github.com/rust-num/num-traits/workflows/master/badge.svg)](https://github.com/rust-num/num-traits/actions)
+
+Numeric traits for generic mathematics in Rust.
+
+## Usage
+
+Add this to your `Cargo.toml`:
+
+```toml
+[dependencies]
+num-traits = "0.2"
+```
+
+## Features
+
+This crate can be used without the standard library (`#![no_std]`) by disabling
+the default `std` feature. Use this in `Cargo.toml`:
+
+```toml
+[dependencies.num-traits]
+version = "0.2"
+default-features = false
+# features = ["libm"] # <--- Uncomment if you wish to use `Float` and `Real` without `std`
+```
+
+The `Float` and `Real` traits are only available when either `std` or `libm` is enabled.
+
+The `FloatCore` trait is always available. `MulAdd` and `MulAddAssign` for `f32`
+and `f64` also require `std` or `libm`, as do implementations of signed and floating-
+point exponents in `Pow`.
+
+## Releases
+
+Release notes are available in [RELEASES.md](RELEASES.md).
+
+## Compatibility
+
+The `num-traits` crate is tested for rustc 1.31 and greater.
+
+## License
+
+Licensed under either of
+
+ * [Apache License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0)
+ * [MIT license](http://opensource.org/licenses/MIT)
+
+at your option.
+
+### Contribution
+
+Unless you explicitly state otherwise, any contribution intentionally submitted
+for inclusion in the work by you, as defined in the Apache-2.0 license, shall be
+dual licensed as above, without any additional terms or conditions.
diff --git a/rust/vendor/num-traits/RELEASES.md b/rust/vendor/num-traits/RELEASES.md
new file mode 100644
index 0000000..f0c0cf8
--- /dev/null
+++ b/rust/vendor/num-traits/RELEASES.md
@@ -0,0 +1,283 @@
+# Release 0.2.17 (2023-10-07)
+
+- [Fix a doc warning about custom classes with newer rustdoc.][286]
+
+**Contributors**: @robamu
+
+[286]: https://github.com/rust-num/num-traits/pull/286
+
+# Release 0.2.16 (2023-07-20)
+
+- [Upgrade to 2018 edition, **MSRV 1.31**][240]
+- [The new `ToBytes` and `FromBytes` traits][224] convert to and from byte
+ representations of a value, with little, big, and native-endian options.
+- [The new `Float::is_subnormal` method checks for subnormal values][279], with
+ a non-zero magnitude that is less than the normal minimum positive value.
+- Several other improvements to documentation and testing.
+
+**Contributors**: @ctrlcctrlv, @cuviper, @flier, @GuillaumeGomez, @kaidokert,
+@rs017991, @vicsn
+
+[224]: https://github.com/rust-num/num-traits/pull/224
+[240]: https://github.com/rust-num/num-traits/pull/240
+[279]: https://github.com/rust-num/num-traits/pull/279
+
+# Release 0.2.15 (2022-05-02)
+
+- [The new `Euclid` trait calculates Euclidean division][195], where the
+ remainder is always positive or zero.
+- [The new `LowerBounded` and `UpperBounded` traits][210] separately describe
+ types with lower and upper bounds. These traits are automatically implemented
+ for all fully-`Bounded` types.
+- [The new `Float::copysign` method copies the sign of the argument][207] to
+ to the magnitude of `self`.
+- [The new `PrimInt::leading_ones` and `trailing_ones` methods][205] are the
+ complement of the existing methods that count zero bits.
+- [The new `PrimInt::reverse_bits` method reverses the order of all bits][202]
+ of a primitive integer.
+- [Improved `Num::from_str_radix` for floats][201], also [ignoring case][214].
+- [`Float` and `FloatCore` use more from `libm`][196] when that is enabled.
+
+**Contributors**: @alion02, @clarfonthey, @cuviper, @ElectronicRU,
+@ibraheemdev, @SparrowLii, @sshilovsky, @tspiteri, @XAMPPRocky, @Xiretza
+
+[195]: https://github.com/rust-num/num-traits/pull/195
+[196]: https://github.com/rust-num/num-traits/pull/196
+[201]: https://github.com/rust-num/num-traits/pull/201
+[202]: https://github.com/rust-num/num-traits/pull/202
+[205]: https://github.com/rust-num/num-traits/pull/205
+[207]: https://github.com/rust-num/num-traits/pull/207
+[210]: https://github.com/rust-num/num-traits/pull/210
+[214]: https://github.com/rust-num/num-traits/pull/214
+
+# Release 0.2.14 (2020-10-29)
+
+- Clarify the license specification as "MIT OR Apache-2.0".
+
+**Contributors**: @cuviper
+
+# Release 0.2.13 (2020-10-29)
+
+- [The new `OverflowingAdd`, `OverflowingSub`, and `OverflowingMul` traits][180]
+ return a tuple with the operation result and a `bool` indicating overflow.
+- [The "i128" feature now overrides compiler probes for that support][185].
+ This may fix scenarios where `autocfg` probing doesn't work properly.
+- [Casts from large `f64` values to `f32` now saturate to infinity][186]. They
+ previously returned `None` because that was once thought to be undefined
+ behavior, but [rust#15536] resolved that such casts are fine.
+- [`Num::from_str_radix` documents requirements for radix support][192], which
+ are now more relaxed than previously implied. It is suggested to accept at
+ least `2..=36` without panicking, but `Err` may be returned otherwise.
+
+**Contributors**: @cuviper, @Enet4, @KaczuH, @martin-t, @newpavlov
+
+[180]: https://github.com/rust-num/num-traits/pull/180
+[185]: https://github.com/rust-num/num-traits/pull/185
+[186]: https://github.com/rust-num/num-traits/pull/186
+[192]: https://github.com/rust-num/num-traits/issues/192
+[rust#15536]: https://github.com/rust-lang/rust/issues/15536
+
+# Release 0.2.12 (2020-06-11)
+
+- [The new `WrappingNeg` trait][153] will wrap the result if it exceeds the
+ boundary of the type, e.g. `i32::MIN.wrapping_neg() == i32::MIN`.
+- [The new `SaturatingAdd`, `SaturatingSub`, and `SaturatingMul` traits][165]
+ will saturate at the numeric bounds if the operation would overflow. These
+ soft-deprecate the existing `Saturating` trait that only has addition and
+ subtraction methods.
+- [Added new constants for logarithms, `FloatConst::{LOG10_2, LOG2_10}`][171].
+
+**Contributors**: @cuviper, @ocstl, @trepetti, @vallentin
+
+[153]: https://github.com/rust-num/num-traits/pull/153
+[165]: https://github.com/rust-num/num-traits/pull/165
+[171]: https://github.com/rust-num/num-traits/pull/171
+
+# Release 0.2.11 (2020-01-09)
+
+- [Added the full circle constant τ as `FloatConst::TAU`][145].
+- [Updated the `autocfg` build dependency to 1.0][148].
+
+**Contributors**: @cuviper, @m-ou-se
+
+[145]: https://github.com/rust-num/num-traits/pull/145
+[148]: https://github.com/rust-num/num-traits/pull/148
+
+# Release 0.2.10 (2019-11-22)
+
+- [Updated the `libm` dependency to 0.2][144].
+
+**Contributors**: @CryZe
+
+[144]: https://github.com/rust-num/num-traits/pull/144
+
+# Release 0.2.9 (2019-11-12)
+
+- [A new optional `libm` dependency][99] enables the `Float` and `Real` traits
+ in `no_std` builds.
+- [The new `clamp_min` and `clamp_max`][122] limit minimum and maximum values
+ while preserving input `NAN`s.
+- [Fixed a panic in floating point `from_str_radix` on invalid signs][126].
+- Miscellaneous documentation updates.
+
+**Contributors**: @cuviper, @dingelish, @HeroicKatora, @jturner314, @ocstl,
+@Shnatsel, @termoshtt, @waywardmonkeys, @yoanlcq
+
+[99]: https://github.com/rust-num/num-traits/pull/99
+[122]: https://github.com/rust-num/num-traits/pull/122
+[126]: https://github.com/rust-num/num-traits/pull/126
+
+# Release 0.2.8 (2019-05-21)
+
+- [Fixed feature detection on `no_std` targets][116].
+
+**Contributors**: @cuviper
+
+[116]: https://github.com/rust-num/num-traits/pull/116
+
+# Release 0.2.7 (2019-05-20)
+
+- [Documented when `CheckedShl` and `CheckedShr` return `None`][90].
+- [The new `Zero::set_zero` and `One::set_one`][104] will set values to their
+ identities in place, possibly optimized better than direct assignment.
+- [Documented general features and intentions of `PrimInt`][108].
+
+**Contributors**: @cuviper, @dvdhrm, @ignatenkobrain, @lcnr, @samueltardieu
+
+[90]: https://github.com/rust-num/num-traits/pull/90
+[104]: https://github.com/rust-num/num-traits/pull/104
+[108]: https://github.com/rust-num/num-traits/pull/108
+
+# Release 0.2.6 (2018-09-13)
+
+- [Documented that `pow(0, 0)` returns `1`][79]. Mathematically, this is not
+ strictly defined, but the current behavior is a pragmatic choice that has
+ precedent in Rust `core` for the primitives and in many other languages.
+- [The new `WrappingShl` and `WrappingShr` traits][81] will wrap the shift count
+ if it exceeds the bit size of the type.
+
+**Contributors**: @cuviper, @edmccard, @meltinglava
+
+[79]: https://github.com/rust-num/num-traits/pull/79
+[81]: https://github.com/rust-num/num-traits/pull/81
+
+# Release 0.2.5 (2018-06-20)
+
+- [Documentation for `mul_add` now clarifies that it's not always faster.][70]
+- [The default methods in `FromPrimitive` and `ToPrimitive` are more robust.][73]
+
+**Contributors**: @cuviper, @frewsxcv
+
+[70]: https://github.com/rust-num/num-traits/pull/70
+[73]: https://github.com/rust-num/num-traits/pull/73
+
+# Release 0.2.4 (2018-05-11)
+
+- [Support for 128-bit integers is now automatically detected and enabled.][69]
+ Setting the `i128` crate feature now causes the build script to panic if such
+ support is not detected.
+
+**Contributors**: @cuviper
+
+[69]: https://github.com/rust-num/num-traits/pull/69
+
+# Release 0.2.3 (2018-05-10)
+
+- [The new `CheckedNeg` and `CheckedRem` traits][63] perform checked `Neg` and
+ `Rem`, returning `Some(output)` or `None` on overflow.
+- [The `no_std` implementation of `FloatCore::to_degrees` for `f32`][61] now
+ uses a constant for greater accuracy, mirroring [rust#47919]. (With `std` it
+ just calls the inherent `f32::to_degrees` in the standard library.)
+- [The new `MulAdd` and `MulAddAssign` traits][59] perform a fused multiply-
+ add. For integer types this is just a convenience, but for floating point
+ types this produces a more accurate result than the separate operations.
+- [All applicable traits are now implemented for 128-bit integers][60] starting
+ with Rust 1.26, enabled by the new `i128` crate feature. The `FromPrimitive`
+ and `ToPrimitive` traits now also have corresponding 128-bit methods, which
+ default to converting via 64-bit integers for compatibility.
+
+**Contributors**: @cuviper, @LEXUGE, @regexident, @vks
+
+[59]: https://github.com/rust-num/num-traits/pull/59
+[60]: https://github.com/rust-num/num-traits/pull/60
+[61]: https://github.com/rust-num/num-traits/pull/61
+[63]: https://github.com/rust-num/num-traits/pull/63
+[rust#47919]: https://github.com/rust-lang/rust/pull/47919
+
+# Release 0.2.2 (2018-03-18)
+
+- [Casting from floating point to integers now returns `None` on overflow][52],
+ avoiding [rustc's undefined behavior][rust-10184]. This applies to the `cast`
+ function and the traits `NumCast`, `FromPrimitive`, and `ToPrimitive`.
+
+**Contributors**: @apopiak, @cuviper, @dbarella
+
+[52]: https://github.com/rust-num/num-traits/pull/52
+[rust-10184]: https://github.com/rust-lang/rust/issues/10184
+
+
+# Release 0.2.1 (2018-03-01)
+
+- [The new `FloatCore` trait][32] offers a subset of `Float` for `#![no_std]` use.
+ [This includes everything][41] except the transcendental functions and FMA.
+- [The new `Inv` trait][37] returns the multiplicative inverse, or reciprocal.
+- [The new `Pow` trait][37] performs exponentiation, much like the existing `pow`
+ function, but with generic exponent types.
+- [The new `One::is_one` method][39] tests if a value equals 1. Implementers
+ should override this method if there's a more efficient way to check for 1,
+ rather than comparing with a temporary `one()`.
+
+**Contributors**: @clarcharr, @cuviper, @vks
+
+[32]: https://github.com/rust-num/num-traits/pull/32
+[37]: https://github.com/rust-num/num-traits/pull/37
+[39]: https://github.com/rust-num/num-traits/pull/39
+[41]: https://github.com/rust-num/num-traits/pull/41
+
+
+# Release 0.2.0 (2018-02-06)
+
+- **breaking change**: [There is now a `std` feature][30], enabled by default, along
+ with the implication that building *without* this feature makes this a
+ `#![no_std]` crate.
+ - The `Float` and `Real` traits are only available when `std` is enabled.
+ - Otherwise, the API is unchanged, and num-traits 0.1.43 now re-exports its
+ items from num-traits 0.2 for compatibility (the [semver-trick]).
+
+**Contributors**: @cuviper, @termoshtt, @vks
+
+[semver-trick]: https://github.com/dtolnay/semver-trick
+[30]: https://github.com/rust-num/num-traits/pull/30
+
+
+# Release 0.1.43 (2018-02-06)
+
+- All items are now [re-exported from num-traits 0.2][31] for compatibility.
+
+[31]: https://github.com/rust-num/num-traits/pull/31
+
+
+# Release 0.1.42 (2018-01-22)
+
+- [num-traits now has its own source repository][num-356] at [rust-num/num-traits][home].
+- [`ParseFloatError` now implements `Display`][22].
+- [The new `AsPrimitive` trait][17] implements generic casting with the `as` operator.
+- [The new `CheckedShl` and `CheckedShr` traits][21] implement generic
+ support for the `checked_shl` and `checked_shr` methods on primitive integers.
+- [The new `Real` trait][23] offers a subset of `Float` functionality that may be applicable to more
+ types, with a blanket implementation for all existing `T: Float` types.
+
+Thanks to @cuviper, @Enet4, @fabianschuiki, @svartalf, and @yoanlcq for their contributions!
+
+[home]: https://github.com/rust-num/num-traits
+[num-356]: https://github.com/rust-num/num/pull/356
+[17]: https://github.com/rust-num/num-traits/pull/17
+[21]: https://github.com/rust-num/num-traits/pull/21
+[22]: https://github.com/rust-num/num-traits/pull/22
+[23]: https://github.com/rust-num/num-traits/pull/23
+
+
+# Prior releases
+
+No prior release notes were kept. Thanks all the same to the many
+contributors that have made this crate what it is!
diff --git a/rust/vendor/num-traits/build.rs b/rust/vendor/num-traits/build.rs
new file mode 100644
index 0000000..bb78328
--- /dev/null
+++ b/rust/vendor/num-traits/build.rs
@@ -0,0 +1,24 @@
+use std::env;
+
+fn main() {
+ let ac = autocfg::new();
+
+ ac.emit_expression_cfg(
+ "unsafe { 1f64.to_int_unchecked::<i32>() }",
+ "has_to_int_unchecked",
+ );
+
+ ac.emit_expression_cfg("1u32.reverse_bits()", "has_reverse_bits");
+ ac.emit_expression_cfg("1u32.trailing_ones()", "has_leading_trailing_ones");
+ ac.emit_expression_cfg("1u32.div_euclid(1u32)", "has_div_euclid");
+
+ if env::var_os("CARGO_FEATURE_STD").is_some() {
+ ac.emit_expression_cfg("1f64.copysign(-1f64)", "has_copysign");
+ }
+ ac.emit_expression_cfg("1f64.is_subnormal()", "has_is_subnormal");
+
+ ac.emit_expression_cfg("1u32.to_ne_bytes()", "has_int_to_from_bytes");
+ ac.emit_expression_cfg("3.14f64.to_ne_bytes()", "has_float_to_from_bytes");
+
+ autocfg::rerun_path("build.rs");
+}
diff --git a/rust/vendor/num-traits/src/bounds.rs b/rust/vendor/num-traits/src/bounds.rs
new file mode 100644
index 0000000..acc990e
--- /dev/null
+++ b/rust/vendor/num-traits/src/bounds.rs
@@ -0,0 +1,148 @@
+use core::num::Wrapping;
+use core::{f32, f64};
+use core::{i128, i16, i32, i64, i8, isize};
+use core::{u128, u16, u32, u64, u8, usize};
+
+/// Numbers which have upper and lower bounds
+pub trait Bounded {
+ // FIXME (#5527): These should be associated constants
+ /// Returns the smallest finite number this type can represent
+ fn min_value() -> Self;
+ /// Returns the largest finite number this type can represent
+ fn max_value() -> Self;
+}
+
+/// Numbers which have lower bounds
+pub trait LowerBounded {
+ /// Returns the smallest finite number this type can represent
+ fn min_value() -> Self;
+}
+
+// FIXME: With a major version bump, this should be a supertrait instead
+impl<T: Bounded> LowerBounded for T {
+ fn min_value() -> T {
+ Bounded::min_value()
+ }
+}
+
+/// Numbers which have upper bounds
+pub trait UpperBounded {
+ /// Returns the largest finite number this type can represent
+ fn max_value() -> Self;
+}
+
+// FIXME: With a major version bump, this should be a supertrait instead
+impl<T: Bounded> UpperBounded for T {
+ fn max_value() -> T {
+ Bounded::max_value()
+ }
+}
+
+macro_rules! bounded_impl {
+ ($t:ty, $min:expr, $max:expr) => {
+ impl Bounded for $t {
+ #[inline]
+ fn min_value() -> $t {
+ $min
+ }
+
+ #[inline]
+ fn max_value() -> $t {
+ $max
+ }
+ }
+ };
+}
+
+bounded_impl!(usize, usize::MIN, usize::MAX);
+bounded_impl!(u8, u8::MIN, u8::MAX);
+bounded_impl!(u16, u16::MIN, u16::MAX);
+bounded_impl!(u32, u32::MIN, u32::MAX);
+bounded_impl!(u64, u64::MIN, u64::MAX);
+bounded_impl!(u128, u128::MIN, u128::MAX);
+
+bounded_impl!(isize, isize::MIN, isize::MAX);
+bounded_impl!(i8, i8::MIN, i8::MAX);
+bounded_impl!(i16, i16::MIN, i16::MAX);
+bounded_impl!(i32, i32::MIN, i32::MAX);
+bounded_impl!(i64, i64::MIN, i64::MAX);
+bounded_impl!(i128, i128::MIN, i128::MAX);
+
+impl<T: Bounded> Bounded for Wrapping<T> {
+ fn min_value() -> Self {
+ Wrapping(T::min_value())
+ }
+ fn max_value() -> Self {
+ Wrapping(T::max_value())
+ }
+}
+
+bounded_impl!(f32, f32::MIN, f32::MAX);
+
+macro_rules! for_each_tuple_ {
+ ( $m:ident !! ) => (
+ $m! { }
+ );
+ ( $m:ident !! $h:ident, $($t:ident,)* ) => (
+ $m! { $h $($t)* }
+ for_each_tuple_! { $m !! $($t,)* }
+ );
+}
+macro_rules! for_each_tuple {
+ ($m:ident) => {
+ for_each_tuple_! { $m !! A, B, C, D, E, F, G, H, I, J, K, L, M, N, O, P, Q, R, S, T, }
+ };
+}
+
+macro_rules! bounded_tuple {
+ ( $($name:ident)* ) => (
+ impl<$($name: Bounded,)*> Bounded for ($($name,)*) {
+ #[inline]
+ fn min_value() -> Self {
+ ($($name::min_value(),)*)
+ }
+ #[inline]
+ fn max_value() -> Self {
+ ($($name::max_value(),)*)
+ }
+ }
+ );
+}
+
+for_each_tuple!(bounded_tuple);
+bounded_impl!(f64, f64::MIN, f64::MAX);
+
+#[test]
+fn wrapping_bounded() {
+ macro_rules! test_wrapping_bounded {
+ ($($t:ty)+) => {
+ $(
+ assert_eq!(<Wrapping<$t> as Bounded>::min_value().0, <$t>::min_value());
+ assert_eq!(<Wrapping<$t> as Bounded>::max_value().0, <$t>::max_value());
+ )+
+ };
+ }
+
+ test_wrapping_bounded!(usize u8 u16 u32 u64 isize i8 i16 i32 i64);
+}
+
+#[test]
+fn wrapping_bounded_i128() {
+ macro_rules! test_wrapping_bounded {
+ ($($t:ty)+) => {
+ $(
+ assert_eq!(<Wrapping<$t> as Bounded>::min_value().0, <$t>::min_value());
+ assert_eq!(<Wrapping<$t> as Bounded>::max_value().0, <$t>::max_value());
+ )+
+ };
+ }
+
+ test_wrapping_bounded!(u128 i128);
+}
+
+#[test]
+fn wrapping_is_bounded() {
+ fn require_bounded<T: Bounded>(_: &T) {}
+ require_bounded(&Wrapping(42_u32));
+ require_bounded(&Wrapping(-42));
+}
diff --git a/rust/vendor/num-traits/src/cast.rs b/rust/vendor/num-traits/src/cast.rs
new file mode 100644
index 0000000..125e2e3
--- /dev/null
+++ b/rust/vendor/num-traits/src/cast.rs
@@ -0,0 +1,778 @@
+use core::mem::size_of;
+use core::num::Wrapping;
+use core::{f32, f64};
+use core::{i128, i16, i32, i64, i8, isize};
+use core::{u128, u16, u32, u64, u8, usize};
+
+/// A generic trait for converting a value to a number.
+///
+/// A value can be represented by the target type when it lies within
+/// the range of scalars supported by the target type.
+/// For example, a negative integer cannot be represented by an unsigned
+/// integer type, and an `i64` with a very high magnitude might not be
+/// convertible to an `i32`.
+/// On the other hand, conversions with possible precision loss or truncation
+/// are admitted, like an `f32` with a decimal part to an integer type, or
+/// even a large `f64` saturating to `f32` infinity.
+pub trait ToPrimitive {
+ /// Converts the value of `self` to an `isize`. If the value cannot be
+ /// represented by an `isize`, then `None` is returned.
+ #[inline]
+ fn to_isize(&self) -> Option<isize> {
+ self.to_i64().as_ref().and_then(ToPrimitive::to_isize)
+ }
+
+ /// Converts the value of `self` to an `i8`. If the value cannot be
+ /// represented by an `i8`, then `None` is returned.
+ #[inline]
+ fn to_i8(&self) -> Option<i8> {
+ self.to_i64().as_ref().and_then(ToPrimitive::to_i8)
+ }
+
+ /// Converts the value of `self` to an `i16`. If the value cannot be
+ /// represented by an `i16`, then `None` is returned.
+ #[inline]
+ fn to_i16(&self) -> Option<i16> {
+ self.to_i64().as_ref().and_then(ToPrimitive::to_i16)
+ }
+
+ /// Converts the value of `self` to an `i32`. If the value cannot be
+ /// represented by an `i32`, then `None` is returned.
+ #[inline]
+ fn to_i32(&self) -> Option<i32> {
+ self.to_i64().as_ref().and_then(ToPrimitive::to_i32)
+ }
+
+ /// Converts the value of `self` to an `i64`. If the value cannot be
+ /// represented by an `i64`, then `None` is returned.
+ fn to_i64(&self) -> Option<i64>;
+
+ /// Converts the value of `self` to an `i128`. If the value cannot be
+ /// represented by an `i128` (`i64` under the default implementation), then
+ /// `None` is returned.
+ ///
+ /// The default implementation converts through `to_i64()`. Types implementing
+ /// this trait should override this method if they can represent a greater range.
+ #[inline]
+ fn to_i128(&self) -> Option<i128> {
+ self.to_i64().map(From::from)
+ }
+
+ /// Converts the value of `self` to a `usize`. If the value cannot be
+ /// represented by a `usize`, then `None` is returned.
+ #[inline]
+ fn to_usize(&self) -> Option<usize> {
+ self.to_u64().as_ref().and_then(ToPrimitive::to_usize)
+ }
+
+ /// Converts the value of `self` to a `u8`. If the value cannot be
+ /// represented by a `u8`, then `None` is returned.
+ #[inline]
+ fn to_u8(&self) -> Option<u8> {
+ self.to_u64().as_ref().and_then(ToPrimitive::to_u8)
+ }
+
+ /// Converts the value of `self` to a `u16`. If the value cannot be
+ /// represented by a `u16`, then `None` is returned.
+ #[inline]
+ fn to_u16(&self) -> Option<u16> {
+ self.to_u64().as_ref().and_then(ToPrimitive::to_u16)
+ }
+
+ /// Converts the value of `self` to a `u32`. If the value cannot be
+ /// represented by a `u32`, then `None` is returned.
+ #[inline]
+ fn to_u32(&self) -> Option<u32> {
+ self.to_u64().as_ref().and_then(ToPrimitive::to_u32)
+ }
+
+ /// Converts the value of `self` to a `u64`. If the value cannot be
+ /// represented by a `u64`, then `None` is returned.
+ fn to_u64(&self) -> Option<u64>;
+
+ /// Converts the value of `self` to a `u128`. If the value cannot be
+ /// represented by a `u128` (`u64` under the default implementation), then
+ /// `None` is returned.
+ ///
+ /// The default implementation converts through `to_u64()`. Types implementing
+ /// this trait should override this method if they can represent a greater range.
+ #[inline]
+ fn to_u128(&self) -> Option<u128> {
+ self.to_u64().map(From::from)
+ }
+
+ /// Converts the value of `self` to an `f32`. Overflows may map to positive
+ /// or negative inifinity, otherwise `None` is returned if the value cannot
+ /// be represented by an `f32`.
+ #[inline]
+ fn to_f32(&self) -> Option<f32> {
+ self.to_f64().as_ref().and_then(ToPrimitive::to_f32)
+ }
+
+ /// Converts the value of `self` to an `f64`. Overflows may map to positive
+ /// or negative inifinity, otherwise `None` is returned if the value cannot
+ /// be represented by an `f64`.
+ ///
+ /// The default implementation tries to convert through `to_i64()`, and
+ /// failing that through `to_u64()`. Types implementing this trait should
+ /// override this method if they can represent a greater range.
+ #[inline]
+ fn to_f64(&self) -> Option<f64> {
+ match self.to_i64() {
+ Some(i) => i.to_f64(),
+ None => self.to_u64().as_ref().and_then(ToPrimitive::to_f64),
+ }
+ }
+}
+
+macro_rules! impl_to_primitive_int_to_int {
+ ($SrcT:ident : $( $(#[$cfg:meta])* fn $method:ident -> $DstT:ident ; )*) => {$(
+ #[inline]
+ $(#[$cfg])*
+ fn $method(&self) -> Option<$DstT> {
+ let min = $DstT::MIN as $SrcT;
+ let max = $DstT::MAX as $SrcT;
+ if size_of::<$SrcT>() <= size_of::<$DstT>() || (min <= *self && *self <= max) {
+ Some(*self as $DstT)
+ } else {
+ None
+ }
+ }
+ )*}
+}
+
+macro_rules! impl_to_primitive_int_to_uint {
+ ($SrcT:ident : $( $(#[$cfg:meta])* fn $method:ident -> $DstT:ident ; )*) => {$(
+ #[inline]
+ $(#[$cfg])*
+ fn $method(&self) -> Option<$DstT> {
+ let max = $DstT::MAX as $SrcT;
+ if 0 <= *self && (size_of::<$SrcT>() <= size_of::<$DstT>() || *self <= max) {
+ Some(*self as $DstT)
+ } else {
+ None
+ }
+ }
+ )*}
+}
+
+macro_rules! impl_to_primitive_int {
+ ($T:ident) => {
+ impl ToPrimitive for $T {
+ impl_to_primitive_int_to_int! { $T:
+ fn to_isize -> isize;
+ fn to_i8 -> i8;
+ fn to_i16 -> i16;
+ fn to_i32 -> i32;
+ fn to_i64 -> i64;
+ fn to_i128 -> i128;
+ }
+
+ impl_to_primitive_int_to_uint! { $T:
+ fn to_usize -> usize;
+ fn to_u8 -> u8;
+ fn to_u16 -> u16;
+ fn to_u32 -> u32;
+ fn to_u64 -> u64;
+ fn to_u128 -> u128;
+ }
+
+ #[inline]
+ fn to_f32(&self) -> Option<f32> {
+ Some(*self as f32)
+ }
+ #[inline]
+ fn to_f64(&self) -> Option<f64> {
+ Some(*self as f64)
+ }
+ }
+ };
+}
+
+impl_to_primitive_int!(isize);
+impl_to_primitive_int!(i8);
+impl_to_primitive_int!(i16);
+impl_to_primitive_int!(i32);
+impl_to_primitive_int!(i64);
+impl_to_primitive_int!(i128);
+
+macro_rules! impl_to_primitive_uint_to_int {
+ ($SrcT:ident : $( $(#[$cfg:meta])* fn $method:ident -> $DstT:ident ; )*) => {$(
+ #[inline]
+ $(#[$cfg])*
+ fn $method(&self) -> Option<$DstT> {
+ let max = $DstT::MAX as $SrcT;
+ if size_of::<$SrcT>() < size_of::<$DstT>() || *self <= max {
+ Some(*self as $DstT)
+ } else {
+ None
+ }
+ }
+ )*}
+}
+
+macro_rules! impl_to_primitive_uint_to_uint {
+ ($SrcT:ident : $( $(#[$cfg:meta])* fn $method:ident -> $DstT:ident ; )*) => {$(
+ #[inline]
+ $(#[$cfg])*
+ fn $method(&self) -> Option<$DstT> {
+ let max = $DstT::MAX as $SrcT;
+ if size_of::<$SrcT>() <= size_of::<$DstT>() || *self <= max {
+ Some(*self as $DstT)
+ } else {
+ None
+ }
+ }
+ )*}
+}
+
+macro_rules! impl_to_primitive_uint {
+ ($T:ident) => {
+ impl ToPrimitive for $T {
+ impl_to_primitive_uint_to_int! { $T:
+ fn to_isize -> isize;
+ fn to_i8 -> i8;
+ fn to_i16 -> i16;
+ fn to_i32 -> i32;
+ fn to_i64 -> i64;
+ fn to_i128 -> i128;
+ }
+
+ impl_to_primitive_uint_to_uint! { $T:
+ fn to_usize -> usize;
+ fn to_u8 -> u8;
+ fn to_u16 -> u16;
+ fn to_u32 -> u32;
+ fn to_u64 -> u64;
+ fn to_u128 -> u128;
+ }
+
+ #[inline]
+ fn to_f32(&self) -> Option<f32> {
+ Some(*self as f32)
+ }
+ #[inline]
+ fn to_f64(&self) -> Option<f64> {
+ Some(*self as f64)
+ }
+ }
+ };
+}
+
+impl_to_primitive_uint!(usize);
+impl_to_primitive_uint!(u8);
+impl_to_primitive_uint!(u16);
+impl_to_primitive_uint!(u32);
+impl_to_primitive_uint!(u64);
+impl_to_primitive_uint!(u128);
+
+macro_rules! impl_to_primitive_float_to_float {
+ ($SrcT:ident : $( fn $method:ident -> $DstT:ident ; )*) => {$(
+ #[inline]
+ fn $method(&self) -> Option<$DstT> {
+ // We can safely cast all values, whether NaN, +-inf, or finite.
+ // Finite values that are reducing size may saturate to +-inf.
+ Some(*self as $DstT)
+ }
+ )*}
+}
+
+#[cfg(has_to_int_unchecked)]
+macro_rules! float_to_int_unchecked {
+ // SAFETY: Must not be NaN or infinite; must be representable as the integer after truncating.
+ // We already checked that the float is in the exclusive range `(MIN-1, MAX+1)`.
+ ($float:expr => $int:ty) => {
+ unsafe { $float.to_int_unchecked::<$int>() }
+ };
+}
+
+#[cfg(not(has_to_int_unchecked))]
+macro_rules! float_to_int_unchecked {
+ ($float:expr => $int:ty) => {
+ $float as $int
+ };
+}
+
+macro_rules! impl_to_primitive_float_to_signed_int {
+ ($f:ident : $( $(#[$cfg:meta])* fn $method:ident -> $i:ident ; )*) => {$(
+ #[inline]
+ $(#[$cfg])*
+ fn $method(&self) -> Option<$i> {
+ // Float as int truncates toward zero, so we want to allow values
+ // in the exclusive range `(MIN-1, MAX+1)`.
+ if size_of::<$f>() > size_of::<$i>() {
+ // With a larger size, we can represent the range exactly.
+ const MIN_M1: $f = $i::MIN as $f - 1.0;
+ const MAX_P1: $f = $i::MAX as $f + 1.0;
+ if *self > MIN_M1 && *self < MAX_P1 {
+ return Some(float_to_int_unchecked!(*self => $i));
+ }
+ } else {
+ // We can't represent `MIN-1` exactly, but there's no fractional part
+ // at this magnitude, so we can just use a `MIN` inclusive boundary.
+ const MIN: $f = $i::MIN as $f;
+ // We can't represent `MAX` exactly, but it will round up to exactly
+ // `MAX+1` (a power of two) when we cast it.
+ const MAX_P1: $f = $i::MAX as $f;
+ if *self >= MIN && *self < MAX_P1 {
+ return Some(float_to_int_unchecked!(*self => $i));
+ }
+ }
+ None
+ }
+ )*}
+}
+
+macro_rules! impl_to_primitive_float_to_unsigned_int {
+ ($f:ident : $( $(#[$cfg:meta])* fn $method:ident -> $u:ident ; )*) => {$(
+ #[inline]
+ $(#[$cfg])*
+ fn $method(&self) -> Option<$u> {
+ // Float as int truncates toward zero, so we want to allow values
+ // in the exclusive range `(-1, MAX+1)`.
+ if size_of::<$f>() > size_of::<$u>() {
+ // With a larger size, we can represent the range exactly.
+ const MAX_P1: $f = $u::MAX as $f + 1.0;
+ if *self > -1.0 && *self < MAX_P1 {
+ return Some(float_to_int_unchecked!(*self => $u));
+ }
+ } else {
+ // We can't represent `MAX` exactly, but it will round up to exactly
+ // `MAX+1` (a power of two) when we cast it.
+ // (`u128::MAX as f32` is infinity, but this is still ok.)
+ const MAX_P1: $f = $u::MAX as $f;
+ if *self > -1.0 && *self < MAX_P1 {
+ return Some(float_to_int_unchecked!(*self => $u));
+ }
+ }
+ None
+ }
+ )*}
+}
+
+macro_rules! impl_to_primitive_float {
+ ($T:ident) => {
+ impl ToPrimitive for $T {
+ impl_to_primitive_float_to_signed_int! { $T:
+ fn to_isize -> isize;
+ fn to_i8 -> i8;
+ fn to_i16 -> i16;
+ fn to_i32 -> i32;
+ fn to_i64 -> i64;
+ fn to_i128 -> i128;
+ }
+
+ impl_to_primitive_float_to_unsigned_int! { $T:
+ fn to_usize -> usize;
+ fn to_u8 -> u8;
+ fn to_u16 -> u16;
+ fn to_u32 -> u32;
+ fn to_u64 -> u64;
+ fn to_u128 -> u128;
+ }
+
+ impl_to_primitive_float_to_float! { $T:
+ fn to_f32 -> f32;
+ fn to_f64 -> f64;
+ }
+ }
+ };
+}
+
+impl_to_primitive_float!(f32);
+impl_to_primitive_float!(f64);
+
+/// A generic trait for converting a number to a value.
+///
+/// A value can be represented by the target type when it lies within
+/// the range of scalars supported by the target type.
+/// For example, a negative integer cannot be represented by an unsigned
+/// integer type, and an `i64` with a very high magnitude might not be
+/// convertible to an `i32`.
+/// On the other hand, conversions with possible precision loss or truncation
+/// are admitted, like an `f32` with a decimal part to an integer type, or
+/// even a large `f64` saturating to `f32` infinity.
+pub trait FromPrimitive: Sized {
+ /// Converts an `isize` to return an optional value of this type. If the
+ /// value cannot be represented by this type, then `None` is returned.
+ #[inline]
+ fn from_isize(n: isize) -> Option<Self> {
+ n.to_i64().and_then(FromPrimitive::from_i64)
+ }
+
+ /// Converts an `i8` to return an optional value of this type. If the
+ /// value cannot be represented by this type, then `None` is returned.
+ #[inline]
+ fn from_i8(n: i8) -> Option<Self> {
+ FromPrimitive::from_i64(From::from(n))
+ }
+
+ /// Converts an `i16` to return an optional value of this type. If the
+ /// value cannot be represented by this type, then `None` is returned.
+ #[inline]
+ fn from_i16(n: i16) -> Option<Self> {
+ FromPrimitive::from_i64(From::from(n))
+ }
+
+ /// Converts an `i32` to return an optional value of this type. If the
+ /// value cannot be represented by this type, then `None` is returned.
+ #[inline]
+ fn from_i32(n: i32) -> Option<Self> {
+ FromPrimitive::from_i64(From::from(n))
+ }
+
+ /// Converts an `i64` to return an optional value of this type. If the
+ /// value cannot be represented by this type, then `None` is returned.
+ fn from_i64(n: i64) -> Option<Self>;
+
+ /// Converts an `i128` to return an optional value of this type. If the
+ /// value cannot be represented by this type, then `None` is returned.
+ ///
+ /// The default implementation converts through `from_i64()`. Types implementing
+ /// this trait should override this method if they can represent a greater range.
+ #[inline]
+ fn from_i128(n: i128) -> Option<Self> {
+ n.to_i64().and_then(FromPrimitive::from_i64)
+ }
+
+ /// Converts a `usize` to return an optional value of this type. If the
+ /// value cannot be represented by this type, then `None` is returned.
+ #[inline]
+ fn from_usize(n: usize) -> Option<Self> {
+ n.to_u64().and_then(FromPrimitive::from_u64)
+ }
+
+ /// Converts an `u8` to return an optional value of this type. If the
+ /// value cannot be represented by this type, then `None` is returned.
+ #[inline]
+ fn from_u8(n: u8) -> Option<Self> {
+ FromPrimitive::from_u64(From::from(n))
+ }
+
+ /// Converts an `u16` to return an optional value of this type. If the
+ /// value cannot be represented by this type, then `None` is returned.
+ #[inline]
+ fn from_u16(n: u16) -> Option<Self> {
+ FromPrimitive::from_u64(From::from(n))
+ }
+
+ /// Converts an `u32` to return an optional value of this type. If the
+ /// value cannot be represented by this type, then `None` is returned.
+ #[inline]
+ fn from_u32(n: u32) -> Option<Self> {
+ FromPrimitive::from_u64(From::from(n))
+ }
+
+ /// Converts an `u64` to return an optional value of this type. If the
+ /// value cannot be represented by this type, then `None` is returned.
+ fn from_u64(n: u64) -> Option<Self>;
+
+ /// Converts an `u128` to return an optional value of this type. If the
+ /// value cannot be represented by this type, then `None` is returned.
+ ///
+ /// The default implementation converts through `from_u64()`. Types implementing
+ /// this trait should override this method if they can represent a greater range.
+ #[inline]
+ fn from_u128(n: u128) -> Option<Self> {
+ n.to_u64().and_then(FromPrimitive::from_u64)
+ }
+
+ /// Converts a `f32` to return an optional value of this type. If the
+ /// value cannot be represented by this type, then `None` is returned.
+ #[inline]
+ fn from_f32(n: f32) -> Option<Self> {
+ FromPrimitive::from_f64(From::from(n))
+ }
+
+ /// Converts a `f64` to return an optional value of this type. If the
+ /// value cannot be represented by this type, then `None` is returned.
+ ///
+ /// The default implementation tries to convert through `from_i64()`, and
+ /// failing that through `from_u64()`. Types implementing this trait should
+ /// override this method if they can represent a greater range.
+ #[inline]
+ fn from_f64(n: f64) -> Option<Self> {
+ match n.to_i64() {
+ Some(i) => FromPrimitive::from_i64(i),
+ None => n.to_u64().and_then(FromPrimitive::from_u64),
+ }
+ }
+}
+
+macro_rules! impl_from_primitive {
+ ($T:ty, $to_ty:ident) => {
+ #[allow(deprecated)]
+ impl FromPrimitive for $T {
+ #[inline]
+ fn from_isize(n: isize) -> Option<$T> {
+ n.$to_ty()
+ }
+ #[inline]
+ fn from_i8(n: i8) -> Option<$T> {
+ n.$to_ty()
+ }
+ #[inline]
+ fn from_i16(n: i16) -> Option<$T> {
+ n.$to_ty()
+ }
+ #[inline]
+ fn from_i32(n: i32) -> Option<$T> {
+ n.$to_ty()
+ }
+ #[inline]
+ fn from_i64(n: i64) -> Option<$T> {
+ n.$to_ty()
+ }
+ #[inline]
+ fn from_i128(n: i128) -> Option<$T> {
+ n.$to_ty()
+ }
+
+ #[inline]
+ fn from_usize(n: usize) -> Option<$T> {
+ n.$to_ty()
+ }
+ #[inline]
+ fn from_u8(n: u8) -> Option<$T> {
+ n.$to_ty()
+ }
+ #[inline]
+ fn from_u16(n: u16) -> Option<$T> {
+ n.$to_ty()
+ }
+ #[inline]
+ fn from_u32(n: u32) -> Option<$T> {
+ n.$to_ty()
+ }
+ #[inline]
+ fn from_u64(n: u64) -> Option<$T> {
+ n.$to_ty()
+ }
+ #[inline]
+ fn from_u128(n: u128) -> Option<$T> {
+ n.$to_ty()
+ }
+
+ #[inline]
+ fn from_f32(n: f32) -> Option<$T> {
+ n.$to_ty()
+ }
+ #[inline]
+ fn from_f64(n: f64) -> Option<$T> {
+ n.$to_ty()
+ }
+ }
+ };
+}
+
+impl_from_primitive!(isize, to_isize);
+impl_from_primitive!(i8, to_i8);
+impl_from_primitive!(i16, to_i16);
+impl_from_primitive!(i32, to_i32);
+impl_from_primitive!(i64, to_i64);
+impl_from_primitive!(i128, to_i128);
+impl_from_primitive!(usize, to_usize);
+impl_from_primitive!(u8, to_u8);
+impl_from_primitive!(u16, to_u16);
+impl_from_primitive!(u32, to_u32);
+impl_from_primitive!(u64, to_u64);
+impl_from_primitive!(u128, to_u128);
+impl_from_primitive!(f32, to_f32);
+impl_from_primitive!(f64, to_f64);
+
+macro_rules! impl_to_primitive_wrapping {
+ ($( $(#[$cfg:meta])* fn $method:ident -> $i:ident ; )*) => {$(
+ #[inline]
+ $(#[$cfg])*
+ fn $method(&self) -> Option<$i> {
+ (self.0).$method()
+ }
+ )*}
+}
+
+impl<T: ToPrimitive> ToPrimitive for Wrapping<T> {
+ impl_to_primitive_wrapping! {
+ fn to_isize -> isize;
+ fn to_i8 -> i8;
+ fn to_i16 -> i16;
+ fn to_i32 -> i32;
+ fn to_i64 -> i64;
+ fn to_i128 -> i128;
+
+ fn to_usize -> usize;
+ fn to_u8 -> u8;
+ fn to_u16 -> u16;
+ fn to_u32 -> u32;
+ fn to_u64 -> u64;
+ fn to_u128 -> u128;
+
+ fn to_f32 -> f32;
+ fn to_f64 -> f64;
+ }
+}
+
+macro_rules! impl_from_primitive_wrapping {
+ ($( $(#[$cfg:meta])* fn $method:ident ( $i:ident ); )*) => {$(
+ #[inline]
+ $(#[$cfg])*
+ fn $method(n: $i) -> Option<Self> {
+ T::$method(n).map(Wrapping)
+ }
+ )*}
+}
+
+impl<T: FromPrimitive> FromPrimitive for Wrapping<T> {
+ impl_from_primitive_wrapping! {
+ fn from_isize(isize);
+ fn from_i8(i8);
+ fn from_i16(i16);
+ fn from_i32(i32);
+ fn from_i64(i64);
+ fn from_i128(i128);
+
+ fn from_usize(usize);
+ fn from_u8(u8);
+ fn from_u16(u16);
+ fn from_u32(u32);
+ fn from_u64(u64);
+ fn from_u128(u128);
+
+ fn from_f32(f32);
+ fn from_f64(f64);
+ }
+}
+
+/// Cast from one machine scalar to another.
+///
+/// # Examples
+///
+/// ```
+/// # use num_traits as num;
+/// let twenty: f32 = num::cast(0x14).unwrap();
+/// assert_eq!(twenty, 20f32);
+/// ```
+///
+#[inline]
+pub fn cast<T: NumCast, U: NumCast>(n: T) -> Option<U> {
+ NumCast::from(n)
+}
+
+/// An interface for casting between machine scalars.
+pub trait NumCast: Sized + ToPrimitive {
+ /// Creates a number from another value that can be converted into
+ /// a primitive via the `ToPrimitive` trait. If the source value cannot be
+ /// represented by the target type, then `None` is returned.
+ ///
+ /// A value can be represented by the target type when it lies within
+ /// the range of scalars supported by the target type.
+ /// For example, a negative integer cannot be represented by an unsigned
+ /// integer type, and an `i64` with a very high magnitude might not be
+ /// convertible to an `i32`.
+ /// On the other hand, conversions with possible precision loss or truncation
+ /// are admitted, like an `f32` with a decimal part to an integer type, or
+ /// even a large `f64` saturating to `f32` infinity.
+ fn from<T: ToPrimitive>(n: T) -> Option<Self>;
+}
+
+macro_rules! impl_num_cast {
+ ($T:ty, $conv:ident) => {
+ impl NumCast for $T {
+ #[inline]
+ #[allow(deprecated)]
+ fn from<N: ToPrimitive>(n: N) -> Option<$T> {
+ // `$conv` could be generated using `concat_idents!`, but that
+ // macro seems to be broken at the moment
+ n.$conv()
+ }
+ }
+ };
+}
+
+impl_num_cast!(u8, to_u8);
+impl_num_cast!(u16, to_u16);
+impl_num_cast!(u32, to_u32);
+impl_num_cast!(u64, to_u64);
+impl_num_cast!(u128, to_u128);
+impl_num_cast!(usize, to_usize);
+impl_num_cast!(i8, to_i8);
+impl_num_cast!(i16, to_i16);
+impl_num_cast!(i32, to_i32);
+impl_num_cast!(i64, to_i64);
+impl_num_cast!(i128, to_i128);
+impl_num_cast!(isize, to_isize);
+impl_num_cast!(f32, to_f32);
+impl_num_cast!(f64, to_f64);
+
+impl<T: NumCast> NumCast for Wrapping<T> {
+ fn from<U: ToPrimitive>(n: U) -> Option<Self> {
+ T::from(n).map(Wrapping)
+ }
+}
+
+/// A generic interface for casting between machine scalars with the
+/// `as` operator, which admits narrowing and precision loss.
+/// Implementers of this trait `AsPrimitive` should behave like a primitive
+/// numeric type (e.g. a newtype around another primitive), and the
+/// intended conversion must never fail.
+///
+/// # Examples
+///
+/// ```
+/// # use num_traits::AsPrimitive;
+/// let three: i32 = (3.14159265f32).as_();
+/// assert_eq!(three, 3);
+/// ```
+///
+/// # Safety
+///
+/// **In Rust versions before 1.45.0**, some uses of the `as` operator were not entirely safe.
+/// In particular, it was undefined behavior if
+/// a truncated floating point value could not fit in the target integer
+/// type ([#10184](https://github.com/rust-lang/rust/issues/10184)).
+///
+/// ```ignore
+/// # use num_traits::AsPrimitive;
+/// let x: u8 = (1.04E+17).as_(); // UB
+/// ```
+///
+pub trait AsPrimitive<T>: 'static + Copy
+where
+ T: 'static + Copy,
+{
+ /// Convert a value to another, using the `as` operator.
+ fn as_(self) -> T;
+}
+
+macro_rules! impl_as_primitive {
+ (@ $T: ty => $(#[$cfg:meta])* impl $U: ty ) => {
+ $(#[$cfg])*
+ impl AsPrimitive<$U> for $T {
+ #[inline] fn as_(self) -> $U { self as $U }
+ }
+ };
+ (@ $T: ty => { $( $U: ty ),* } ) => {$(
+ impl_as_primitive!(@ $T => impl $U);
+ )*};
+ ($T: ty => { $( $U: ty ),* } ) => {
+ impl_as_primitive!(@ $T => { $( $U ),* });
+ impl_as_primitive!(@ $T => { u8, u16, u32, u64, u128, usize });
+ impl_as_primitive!(@ $T => { i8, i16, i32, i64, i128, isize });
+ };
+}
+
+impl_as_primitive!(u8 => { char, f32, f64 });
+impl_as_primitive!(i8 => { f32, f64 });
+impl_as_primitive!(u16 => { f32, f64 });
+impl_as_primitive!(i16 => { f32, f64 });
+impl_as_primitive!(u32 => { f32, f64 });
+impl_as_primitive!(i32 => { f32, f64 });
+impl_as_primitive!(u64 => { f32, f64 });
+impl_as_primitive!(i64 => { f32, f64 });
+impl_as_primitive!(u128 => { f32, f64 });
+impl_as_primitive!(i128 => { f32, f64 });
+impl_as_primitive!(usize => { f32, f64 });
+impl_as_primitive!(isize => { f32, f64 });
+impl_as_primitive!(f32 => { f32, f64 });
+impl_as_primitive!(f64 => { f32, f64 });
+impl_as_primitive!(char => { char });
+impl_as_primitive!(bool => {});
diff --git a/rust/vendor/num-traits/src/float.rs b/rust/vendor/num-traits/src/float.rs
new file mode 100644
index 0000000..87f8387
--- /dev/null
+++ b/rust/vendor/num-traits/src/float.rs
@@ -0,0 +1,2344 @@
+use core::num::FpCategory;
+use core::ops::{Add, Div, Neg};
+
+use core::f32;
+use core::f64;
+
+use crate::{Num, NumCast, ToPrimitive};
+
+/// Generic trait for floating point numbers that works with `no_std`.
+///
+/// This trait implements a subset of the `Float` trait.
+pub trait FloatCore: Num + NumCast + Neg<Output = Self> + PartialOrd + Copy {
+ /// Returns positive infinity.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use num_traits::float::FloatCore;
+ /// use std::{f32, f64};
+ ///
+ /// fn check<T: FloatCore>(x: T) {
+ /// assert!(T::infinity() == x);
+ /// }
+ ///
+ /// check(f32::INFINITY);
+ /// check(f64::INFINITY);
+ /// ```
+ fn infinity() -> Self;
+
+ /// Returns negative infinity.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use num_traits::float::FloatCore;
+ /// use std::{f32, f64};
+ ///
+ /// fn check<T: FloatCore>(x: T) {
+ /// assert!(T::neg_infinity() == x);
+ /// }
+ ///
+ /// check(f32::NEG_INFINITY);
+ /// check(f64::NEG_INFINITY);
+ /// ```
+ fn neg_infinity() -> Self;
+
+ /// Returns NaN.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use num_traits::float::FloatCore;
+ ///
+ /// fn check<T: FloatCore>() {
+ /// let n = T::nan();
+ /// assert!(n != n);
+ /// }
+ ///
+ /// check::<f32>();
+ /// check::<f64>();
+ /// ```
+ fn nan() -> Self;
+
+ /// Returns `-0.0`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use num_traits::float::FloatCore;
+ /// use std::{f32, f64};
+ ///
+ /// fn check<T: FloatCore>(n: T) {
+ /// let z = T::neg_zero();
+ /// assert!(z.is_zero());
+ /// assert!(T::one() / z == n);
+ /// }
+ ///
+ /// check(f32::NEG_INFINITY);
+ /// check(f64::NEG_INFINITY);
+ /// ```
+ fn neg_zero() -> Self;
+
+ /// Returns the smallest finite value that this type can represent.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use num_traits::float::FloatCore;
+ /// use std::{f32, f64};
+ ///
+ /// fn check<T: FloatCore>(x: T) {
+ /// assert!(T::min_value() == x);
+ /// }
+ ///
+ /// check(f32::MIN);
+ /// check(f64::MIN);
+ /// ```
+ fn min_value() -> Self;
+
+ /// Returns the smallest positive, normalized value that this type can represent.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use num_traits::float::FloatCore;
+ /// use std::{f32, f64};
+ ///
+ /// fn check<T: FloatCore>(x: T) {
+ /// assert!(T::min_positive_value() == x);
+ /// }
+ ///
+ /// check(f32::MIN_POSITIVE);
+ /// check(f64::MIN_POSITIVE);
+ /// ```
+ fn min_positive_value() -> Self;
+
+ /// Returns epsilon, a small positive value.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use num_traits::float::FloatCore;
+ /// use std::{f32, f64};
+ ///
+ /// fn check<T: FloatCore>(x: T) {
+ /// assert!(T::epsilon() == x);
+ /// }
+ ///
+ /// check(f32::EPSILON);
+ /// check(f64::EPSILON);
+ /// ```
+ fn epsilon() -> Self;
+
+ /// Returns the largest finite value that this type can represent.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use num_traits::float::FloatCore;
+ /// use std::{f32, f64};
+ ///
+ /// fn check<T: FloatCore>(x: T) {
+ /// assert!(T::max_value() == x);
+ /// }
+ ///
+ /// check(f32::MAX);
+ /// check(f64::MAX);
+ /// ```
+ fn max_value() -> Self;
+
+ /// Returns `true` if the number is NaN.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use num_traits::float::FloatCore;
+ /// use std::{f32, f64};
+ ///
+ /// fn check<T: FloatCore>(x: T, p: bool) {
+ /// assert!(x.is_nan() == p);
+ /// }
+ ///
+ /// check(f32::NAN, true);
+ /// check(f32::INFINITY, false);
+ /// check(f64::NAN, true);
+ /// check(0.0f64, false);
+ /// ```
+ #[inline]
+ #[allow(clippy::eq_op)]
+ fn is_nan(self) -> bool {
+ self != self
+ }
+
+ /// Returns `true` if the number is infinite.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use num_traits::float::FloatCore;
+ /// use std::{f32, f64};
+ ///
+ /// fn check<T: FloatCore>(x: T, p: bool) {
+ /// assert!(x.is_infinite() == p);
+ /// }
+ ///
+ /// check(f32::INFINITY, true);
+ /// check(f32::NEG_INFINITY, true);
+ /// check(f32::NAN, false);
+ /// check(f64::INFINITY, true);
+ /// check(f64::NEG_INFINITY, true);
+ /// check(0.0f64, false);
+ /// ```
+ #[inline]
+ fn is_infinite(self) -> bool {
+ self == Self::infinity() || self == Self::neg_infinity()
+ }
+
+ /// Returns `true` if the number is neither infinite or NaN.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use num_traits::float::FloatCore;
+ /// use std::{f32, f64};
+ ///
+ /// fn check<T: FloatCore>(x: T, p: bool) {
+ /// assert!(x.is_finite() == p);
+ /// }
+ ///
+ /// check(f32::INFINITY, false);
+ /// check(f32::MAX, true);
+ /// check(f64::NEG_INFINITY, false);
+ /// check(f64::MIN_POSITIVE, true);
+ /// check(f64::NAN, false);
+ /// ```
+ #[inline]
+ fn is_finite(self) -> bool {
+ !(self.is_nan() || self.is_infinite())
+ }
+
+ /// Returns `true` if the number is neither zero, infinite, subnormal or NaN.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use num_traits::float::FloatCore;
+ /// use std::{f32, f64};
+ ///
+ /// fn check<T: FloatCore>(x: T, p: bool) {
+ /// assert!(x.is_normal() == p);
+ /// }
+ ///
+ /// check(f32::INFINITY, false);
+ /// check(f32::MAX, true);
+ /// check(f64::NEG_INFINITY, false);
+ /// check(f64::MIN_POSITIVE, true);
+ /// check(0.0f64, false);
+ /// ```
+ #[inline]
+ fn is_normal(self) -> bool {
+ self.classify() == FpCategory::Normal
+ }
+
+ /// Returns `true` if the number is [subnormal].
+ ///
+ /// ```
+ /// use num_traits::float::FloatCore;
+ /// use std::f64;
+ ///
+ /// let min = f64::MIN_POSITIVE; // 2.2250738585072014e-308_f64
+ /// let max = f64::MAX;
+ /// let lower_than_min = 1.0e-308_f64;
+ /// let zero = 0.0_f64;
+ ///
+ /// assert!(!min.is_subnormal());
+ /// assert!(!max.is_subnormal());
+ ///
+ /// assert!(!zero.is_subnormal());
+ /// assert!(!f64::NAN.is_subnormal());
+ /// assert!(!f64::INFINITY.is_subnormal());
+ /// // Values between `0` and `min` are Subnormal.
+ /// assert!(lower_than_min.is_subnormal());
+ /// ```
+ /// [subnormal]: https://en.wikipedia.org/wiki/Subnormal_number
+ #[inline]
+ fn is_subnormal(self) -> bool {
+ self.classify() == FpCategory::Subnormal
+ }
+
+ /// Returns the floating point category of the number. If only one property
+ /// is going to be tested, it is generally faster to use the specific
+ /// predicate instead.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use num_traits::float::FloatCore;
+ /// use std::{f32, f64};
+ /// use std::num::FpCategory;
+ ///
+ /// fn check<T: FloatCore>(x: T, c: FpCategory) {
+ /// assert!(x.classify() == c);
+ /// }
+ ///
+ /// check(f32::INFINITY, FpCategory::Infinite);
+ /// check(f32::MAX, FpCategory::Normal);
+ /// check(f64::NAN, FpCategory::Nan);
+ /// check(f64::MIN_POSITIVE, FpCategory::Normal);
+ /// check(f64::MIN_POSITIVE / 2.0, FpCategory::Subnormal);
+ /// check(0.0f64, FpCategory::Zero);
+ /// ```
+ fn classify(self) -> FpCategory;
+
+ /// Returns the largest integer less than or equal to a number.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use num_traits::float::FloatCore;
+ /// use std::{f32, f64};
+ ///
+ /// fn check<T: FloatCore>(x: T, y: T) {
+ /// assert!(x.floor() == y);
+ /// }
+ ///
+ /// check(f32::INFINITY, f32::INFINITY);
+ /// check(0.9f32, 0.0);
+ /// check(1.0f32, 1.0);
+ /// check(1.1f32, 1.0);
+ /// check(-0.0f64, 0.0);
+ /// check(-0.9f64, -1.0);
+ /// check(-1.0f64, -1.0);
+ /// check(-1.1f64, -2.0);
+ /// check(f64::MIN, f64::MIN);
+ /// ```
+ #[inline]
+ fn floor(self) -> Self {
+ let f = self.fract();
+ if f.is_nan() || f.is_zero() {
+ self
+ } else if self < Self::zero() {
+ self - f - Self::one()
+ } else {
+ self - f
+ }
+ }
+
+ /// Returns the smallest integer greater than or equal to a number.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use num_traits::float::FloatCore;
+ /// use std::{f32, f64};
+ ///
+ /// fn check<T: FloatCore>(x: T, y: T) {
+ /// assert!(x.ceil() == y);
+ /// }
+ ///
+ /// check(f32::INFINITY, f32::INFINITY);
+ /// check(0.9f32, 1.0);
+ /// check(1.0f32, 1.0);
+ /// check(1.1f32, 2.0);
+ /// check(-0.0f64, 0.0);
+ /// check(-0.9f64, -0.0);
+ /// check(-1.0f64, -1.0);
+ /// check(-1.1f64, -1.0);
+ /// check(f64::MIN, f64::MIN);
+ /// ```
+ #[inline]
+ fn ceil(self) -> Self {
+ let f = self.fract();
+ if f.is_nan() || f.is_zero() {
+ self
+ } else if self > Self::zero() {
+ self - f + Self::one()
+ } else {
+ self - f
+ }
+ }
+
+ /// Returns the nearest integer to a number. Round half-way cases away from `0.0`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use num_traits::float::FloatCore;
+ /// use std::{f32, f64};
+ ///
+ /// fn check<T: FloatCore>(x: T, y: T) {
+ /// assert!(x.round() == y);
+ /// }
+ ///
+ /// check(f32::INFINITY, f32::INFINITY);
+ /// check(0.4f32, 0.0);
+ /// check(0.5f32, 1.0);
+ /// check(0.6f32, 1.0);
+ /// check(-0.4f64, 0.0);
+ /// check(-0.5f64, -1.0);
+ /// check(-0.6f64, -1.0);
+ /// check(f64::MIN, f64::MIN);
+ /// ```
+ #[inline]
+ fn round(self) -> Self {
+ let one = Self::one();
+ let h = Self::from(0.5).expect("Unable to cast from 0.5");
+ let f = self.fract();
+ if f.is_nan() || f.is_zero() {
+ self
+ } else if self > Self::zero() {
+ if f < h {
+ self - f
+ } else {
+ self - f + one
+ }
+ } else if -f < h {
+ self - f
+ } else {
+ self - f - one
+ }
+ }
+
+ /// Return the integer part of a number.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use num_traits::float::FloatCore;
+ /// use std::{f32, f64};
+ ///
+ /// fn check<T: FloatCore>(x: T, y: T) {
+ /// assert!(x.trunc() == y);
+ /// }
+ ///
+ /// check(f32::INFINITY, f32::INFINITY);
+ /// check(0.9f32, 0.0);
+ /// check(1.0f32, 1.0);
+ /// check(1.1f32, 1.0);
+ /// check(-0.0f64, 0.0);
+ /// check(-0.9f64, -0.0);
+ /// check(-1.0f64, -1.0);
+ /// check(-1.1f64, -1.0);
+ /// check(f64::MIN, f64::MIN);
+ /// ```
+ #[inline]
+ fn trunc(self) -> Self {
+ let f = self.fract();
+ if f.is_nan() {
+ self
+ } else {
+ self - f
+ }
+ }
+
+ /// Returns the fractional part of a number.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use num_traits::float::FloatCore;
+ /// use std::{f32, f64};
+ ///
+ /// fn check<T: FloatCore>(x: T, y: T) {
+ /// assert!(x.fract() == y);
+ /// }
+ ///
+ /// check(f32::MAX, 0.0);
+ /// check(0.75f32, 0.75);
+ /// check(1.0f32, 0.0);
+ /// check(1.25f32, 0.25);
+ /// check(-0.0f64, 0.0);
+ /// check(-0.75f64, -0.75);
+ /// check(-1.0f64, 0.0);
+ /// check(-1.25f64, -0.25);
+ /// check(f64::MIN, 0.0);
+ /// ```
+ #[inline]
+ fn fract(self) -> Self {
+ if self.is_zero() {
+ Self::zero()
+ } else {
+ self % Self::one()
+ }
+ }
+
+ /// Computes the absolute value of `self`. Returns `FloatCore::nan()` if the
+ /// number is `FloatCore::nan()`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use num_traits::float::FloatCore;
+ /// use std::{f32, f64};
+ ///
+ /// fn check<T: FloatCore>(x: T, y: T) {
+ /// assert!(x.abs() == y);
+ /// }
+ ///
+ /// check(f32::INFINITY, f32::INFINITY);
+ /// check(1.0f32, 1.0);
+ /// check(0.0f64, 0.0);
+ /// check(-0.0f64, 0.0);
+ /// check(-1.0f64, 1.0);
+ /// check(f64::MIN, f64::MAX);
+ /// ```
+ #[inline]
+ fn abs(self) -> Self {
+ if self.is_sign_positive() {
+ return self;
+ }
+ if self.is_sign_negative() {
+ return -self;
+ }
+ Self::nan()
+ }
+
+ /// Returns a number that represents the sign of `self`.
+ ///
+ /// - `1.0` if the number is positive, `+0.0` or `FloatCore::infinity()`
+ /// - `-1.0` if the number is negative, `-0.0` or `FloatCore::neg_infinity()`
+ /// - `FloatCore::nan()` if the number is `FloatCore::nan()`
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use num_traits::float::FloatCore;
+ /// use std::{f32, f64};
+ ///
+ /// fn check<T: FloatCore>(x: T, y: T) {
+ /// assert!(x.signum() == y);
+ /// }
+ ///
+ /// check(f32::INFINITY, 1.0);
+ /// check(3.0f32, 1.0);
+ /// check(0.0f32, 1.0);
+ /// check(-0.0f64, -1.0);
+ /// check(-3.0f64, -1.0);
+ /// check(f64::MIN, -1.0);
+ /// ```
+ #[inline]
+ fn signum(self) -> Self {
+ if self.is_nan() {
+ Self::nan()
+ } else if self.is_sign_negative() {
+ -Self::one()
+ } else {
+ Self::one()
+ }
+ }
+
+ /// Returns `true` if `self` is positive, including `+0.0` and
+ /// `FloatCore::infinity()`, and `FloatCore::nan()`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use num_traits::float::FloatCore;
+ /// use std::{f32, f64};
+ ///
+ /// fn check<T: FloatCore>(x: T, p: bool) {
+ /// assert!(x.is_sign_positive() == p);
+ /// }
+ ///
+ /// check(f32::INFINITY, true);
+ /// check(f32::MAX, true);
+ /// check(0.0f32, true);
+ /// check(-0.0f64, false);
+ /// check(f64::NEG_INFINITY, false);
+ /// check(f64::MIN_POSITIVE, true);
+ /// check(f64::NAN, true);
+ /// check(-f64::NAN, false);
+ /// ```
+ #[inline]
+ fn is_sign_positive(self) -> bool {
+ !self.is_sign_negative()
+ }
+
+ /// Returns `true` if `self` is negative, including `-0.0` and
+ /// `FloatCore::neg_infinity()`, and `-FloatCore::nan()`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use num_traits::float::FloatCore;
+ /// use std::{f32, f64};
+ ///
+ /// fn check<T: FloatCore>(x: T, p: bool) {
+ /// assert!(x.is_sign_negative() == p);
+ /// }
+ ///
+ /// check(f32::INFINITY, false);
+ /// check(f32::MAX, false);
+ /// check(0.0f32, false);
+ /// check(-0.0f64, true);
+ /// check(f64::NEG_INFINITY, true);
+ /// check(f64::MIN_POSITIVE, false);
+ /// check(f64::NAN, false);
+ /// check(-f64::NAN, true);
+ /// ```
+ #[inline]
+ fn is_sign_negative(self) -> bool {
+ let (_, _, sign) = self.integer_decode();
+ sign < 0
+ }
+
+ /// Returns the minimum of the two numbers.
+ ///
+ /// If one of the arguments is NaN, then the other argument is returned.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use num_traits::float::FloatCore;
+ /// use std::{f32, f64};
+ ///
+ /// fn check<T: FloatCore>(x: T, y: T, min: T) {
+ /// assert!(x.min(y) == min);
+ /// }
+ ///
+ /// check(1.0f32, 2.0, 1.0);
+ /// check(f32::NAN, 2.0, 2.0);
+ /// check(1.0f64, -2.0, -2.0);
+ /// check(1.0f64, f64::NAN, 1.0);
+ /// ```
+ #[inline]
+ fn min(self, other: Self) -> Self {
+ if self.is_nan() {
+ return other;
+ }
+ if other.is_nan() {
+ return self;
+ }
+ if self < other {
+ self
+ } else {
+ other
+ }
+ }
+
+ /// Returns the maximum of the two numbers.
+ ///
+ /// If one of the arguments is NaN, then the other argument is returned.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use num_traits::float::FloatCore;
+ /// use std::{f32, f64};
+ ///
+ /// fn check<T: FloatCore>(x: T, y: T, max: T) {
+ /// assert!(x.max(y) == max);
+ /// }
+ ///
+ /// check(1.0f32, 2.0, 2.0);
+ /// check(1.0f32, f32::NAN, 1.0);
+ /// check(-1.0f64, 2.0, 2.0);
+ /// check(-1.0f64, f64::NAN, -1.0);
+ /// ```
+ #[inline]
+ fn max(self, other: Self) -> Self {
+ if self.is_nan() {
+ return other;
+ }
+ if other.is_nan() {
+ return self;
+ }
+ if self > other {
+ self
+ } else {
+ other
+ }
+ }
+
+ /// Returns the reciprocal (multiplicative inverse) of the number.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use num_traits::float::FloatCore;
+ /// use std::{f32, f64};
+ ///
+ /// fn check<T: FloatCore>(x: T, y: T) {
+ /// assert!(x.recip() == y);
+ /// assert!(y.recip() == x);
+ /// }
+ ///
+ /// check(f32::INFINITY, 0.0);
+ /// check(2.0f32, 0.5);
+ /// check(-0.25f64, -4.0);
+ /// check(-0.0f64, f64::NEG_INFINITY);
+ /// ```
+ #[inline]
+ fn recip(self) -> Self {
+ Self::one() / self
+ }
+
+ /// Raise a number to an integer power.
+ ///
+ /// Using this function is generally faster than using `powf`
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use num_traits::float::FloatCore;
+ ///
+ /// fn check<T: FloatCore>(x: T, exp: i32, powi: T) {
+ /// assert!(x.powi(exp) == powi);
+ /// }
+ ///
+ /// check(9.0f32, 2, 81.0);
+ /// check(1.0f32, -2, 1.0);
+ /// check(10.0f64, 20, 1e20);
+ /// check(4.0f64, -2, 0.0625);
+ /// check(-1.0f64, std::i32::MIN, 1.0);
+ /// ```
+ #[inline]
+ fn powi(mut self, mut exp: i32) -> Self {
+ if exp < 0 {
+ exp = exp.wrapping_neg();
+ self = self.recip();
+ }
+ // It should always be possible to convert a positive `i32` to a `usize`.
+ // Note, `i32::MIN` will wrap and still be negative, so we need to convert
+ // to `u32` without sign-extension before growing to `usize`.
+ super::pow(self, (exp as u32).to_usize().unwrap())
+ }
+
+ /// Converts to degrees, assuming the number is in radians.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use num_traits::float::FloatCore;
+ /// use std::{f32, f64};
+ ///
+ /// fn check<T: FloatCore>(rad: T, deg: T) {
+ /// assert!(rad.to_degrees() == deg);
+ /// }
+ ///
+ /// check(0.0f32, 0.0);
+ /// check(f32::consts::PI, 180.0);
+ /// check(f64::consts::FRAC_PI_4, 45.0);
+ /// check(f64::INFINITY, f64::INFINITY);
+ /// ```
+ fn to_degrees(self) -> Self;
+
+ /// Converts to radians, assuming the number is in degrees.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use num_traits::float::FloatCore;
+ /// use std::{f32, f64};
+ ///
+ /// fn check<T: FloatCore>(deg: T, rad: T) {
+ /// assert!(deg.to_radians() == rad);
+ /// }
+ ///
+ /// check(0.0f32, 0.0);
+ /// check(180.0, f32::consts::PI);
+ /// check(45.0, f64::consts::FRAC_PI_4);
+ /// check(f64::INFINITY, f64::INFINITY);
+ /// ```
+ fn to_radians(self) -> Self;
+
+ /// Returns the mantissa, base 2 exponent, and sign as integers, respectively.
+ /// The original number can be recovered by `sign * mantissa * 2 ^ exponent`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use num_traits::float::FloatCore;
+ /// use std::{f32, f64};
+ ///
+ /// fn check<T: FloatCore>(x: T, m: u64, e: i16, s:i8) {
+ /// let (mantissa, exponent, sign) = x.integer_decode();
+ /// assert_eq!(mantissa, m);
+ /// assert_eq!(exponent, e);
+ /// assert_eq!(sign, s);
+ /// }
+ ///
+ /// check(2.0f32, 1 << 23, -22, 1);
+ /// check(-2.0f32, 1 << 23, -22, -1);
+ /// check(f32::INFINITY, 1 << 23, 105, 1);
+ /// check(f64::NEG_INFINITY, 1 << 52, 972, -1);
+ /// ```
+ fn integer_decode(self) -> (u64, i16, i8);
+}
+
+impl FloatCore for f32 {
+ constant! {
+ infinity() -> f32::INFINITY;
+ neg_infinity() -> f32::NEG_INFINITY;
+ nan() -> f32::NAN;
+ neg_zero() -> -0.0;
+ min_value() -> f32::MIN;
+ min_positive_value() -> f32::MIN_POSITIVE;
+ epsilon() -> f32::EPSILON;
+ max_value() -> f32::MAX;
+ }
+
+ #[inline]
+ fn integer_decode(self) -> (u64, i16, i8) {
+ integer_decode_f32(self)
+ }
+
+ forward! {
+ Self::is_nan(self) -> bool;
+ Self::is_infinite(self) -> bool;
+ Self::is_finite(self) -> bool;
+ Self::is_normal(self) -> bool;
+ Self::classify(self) -> FpCategory;
+ Self::is_sign_positive(self) -> bool;
+ Self::is_sign_negative(self) -> bool;
+ Self::min(self, other: Self) -> Self;
+ Self::max(self, other: Self) -> Self;
+ Self::recip(self) -> Self;
+ Self::to_degrees(self) -> Self;
+ Self::to_radians(self) -> Self;
+ }
+
+ #[cfg(has_is_subnormal)]
+ forward! {
+ Self::is_subnormal(self) -> bool;
+ }
+
+ #[cfg(feature = "std")]
+ forward! {
+ Self::floor(self) -> Self;
+ Self::ceil(self) -> Self;
+ Self::round(self) -> Self;
+ Self::trunc(self) -> Self;
+ Self::fract(self) -> Self;
+ Self::abs(self) -> Self;
+ Self::signum(self) -> Self;
+ Self::powi(self, n: i32) -> Self;
+ }
+
+ #[cfg(all(not(feature = "std"), feature = "libm"))]
+ forward! {
+ libm::floorf as floor(self) -> Self;
+ libm::ceilf as ceil(self) -> Self;
+ libm::roundf as round(self) -> Self;
+ libm::truncf as trunc(self) -> Self;
+ libm::fabsf as abs(self) -> Self;
+ }
+
+ #[cfg(all(not(feature = "std"), feature = "libm"))]
+ #[inline]
+ fn fract(self) -> Self {
+ self - libm::truncf(self)
+ }
+}
+
+impl FloatCore for f64 {
+ constant! {
+ infinity() -> f64::INFINITY;
+ neg_infinity() -> f64::NEG_INFINITY;
+ nan() -> f64::NAN;
+ neg_zero() -> -0.0;
+ min_value() -> f64::MIN;
+ min_positive_value() -> f64::MIN_POSITIVE;
+ epsilon() -> f64::EPSILON;
+ max_value() -> f64::MAX;
+ }
+
+ #[inline]
+ fn integer_decode(self) -> (u64, i16, i8) {
+ integer_decode_f64(self)
+ }
+
+ forward! {
+ Self::is_nan(self) -> bool;
+ Self::is_infinite(self) -> bool;
+ Self::is_finite(self) -> bool;
+ Self::is_normal(self) -> bool;
+ Self::classify(self) -> FpCategory;
+ Self::is_sign_positive(self) -> bool;
+ Self::is_sign_negative(self) -> bool;
+ Self::min(self, other: Self) -> Self;
+ Self::max(self, other: Self) -> Self;
+ Self::recip(self) -> Self;
+ Self::to_degrees(self) -> Self;
+ Self::to_radians(self) -> Self;
+ }
+
+ #[cfg(has_is_subnormal)]
+ forward! {
+ Self::is_subnormal(self) -> bool;
+ }
+
+ #[cfg(feature = "std")]
+ forward! {
+ Self::floor(self) -> Self;
+ Self::ceil(self) -> Self;
+ Self::round(self) -> Self;
+ Self::trunc(self) -> Self;
+ Self::fract(self) -> Self;
+ Self::abs(self) -> Self;
+ Self::signum(self) -> Self;
+ Self::powi(self, n: i32) -> Self;
+ }
+
+ #[cfg(all(not(feature = "std"), feature = "libm"))]
+ forward! {
+ libm::floor as floor(self) -> Self;
+ libm::ceil as ceil(self) -> Self;
+ libm::round as round(self) -> Self;
+ libm::trunc as trunc(self) -> Self;
+ libm::fabs as abs(self) -> Self;
+ }
+
+ #[cfg(all(not(feature = "std"), feature = "libm"))]
+ #[inline]
+ fn fract(self) -> Self {
+ self - libm::trunc(self)
+ }
+}
+
+// FIXME: these doctests aren't actually helpful, because they're using and
+// testing the inherent methods directly, not going through `Float`.
+
+/// Generic trait for floating point numbers
+///
+/// This trait is only available with the `std` feature, or with the `libm` feature otherwise.
+#[cfg(any(feature = "std", feature = "libm"))]
+pub trait Float: Num + Copy + NumCast + PartialOrd + Neg<Output = Self> {
+ /// Returns the `NaN` value.
+ ///
+ /// ```
+ /// use num_traits::Float;
+ ///
+ /// let nan: f32 = Float::nan();
+ ///
+ /// assert!(nan.is_nan());
+ /// ```
+ fn nan() -> Self;
+ /// Returns the infinite value.
+ ///
+ /// ```
+ /// use num_traits::Float;
+ /// use std::f32;
+ ///
+ /// let infinity: f32 = Float::infinity();
+ ///
+ /// assert!(infinity.is_infinite());
+ /// assert!(!infinity.is_finite());
+ /// assert!(infinity > f32::MAX);
+ /// ```
+ fn infinity() -> Self;
+ /// Returns the negative infinite value.
+ ///
+ /// ```
+ /// use num_traits::Float;
+ /// use std::f32;
+ ///
+ /// let neg_infinity: f32 = Float::neg_infinity();
+ ///
+ /// assert!(neg_infinity.is_infinite());
+ /// assert!(!neg_infinity.is_finite());
+ /// assert!(neg_infinity < f32::MIN);
+ /// ```
+ fn neg_infinity() -> Self;
+ /// Returns `-0.0`.
+ ///
+ /// ```
+ /// use num_traits::{Zero, Float};
+ ///
+ /// let inf: f32 = Float::infinity();
+ /// let zero: f32 = Zero::zero();
+ /// let neg_zero: f32 = Float::neg_zero();
+ ///
+ /// assert_eq!(zero, neg_zero);
+ /// assert_eq!(7.0f32/inf, zero);
+ /// assert_eq!(zero * 10.0, zero);
+ /// ```
+ fn neg_zero() -> Self;
+
+ /// Returns the smallest finite value that this type can represent.
+ ///
+ /// ```
+ /// use num_traits::Float;
+ /// use std::f64;
+ ///
+ /// let x: f64 = Float::min_value();
+ ///
+ /// assert_eq!(x, f64::MIN);
+ /// ```
+ fn min_value() -> Self;
+
+ /// Returns the smallest positive, normalized value that this type can represent.
+ ///
+ /// ```
+ /// use num_traits::Float;
+ /// use std::f64;
+ ///
+ /// let x: f64 = Float::min_positive_value();
+ ///
+ /// assert_eq!(x, f64::MIN_POSITIVE);
+ /// ```
+ fn min_positive_value() -> Self;
+
+ /// Returns epsilon, a small positive value.
+ ///
+ /// ```
+ /// use num_traits::Float;
+ /// use std::f64;
+ ///
+ /// let x: f64 = Float::epsilon();
+ ///
+ /// assert_eq!(x, f64::EPSILON);
+ /// ```
+ ///
+ /// # Panics
+ ///
+ /// The default implementation will panic if `f32::EPSILON` cannot
+ /// be cast to `Self`.
+ fn epsilon() -> Self {
+ Self::from(f32::EPSILON).expect("Unable to cast from f32::EPSILON")
+ }
+
+ /// Returns the largest finite value that this type can represent.
+ ///
+ /// ```
+ /// use num_traits::Float;
+ /// use std::f64;
+ ///
+ /// let x: f64 = Float::max_value();
+ /// assert_eq!(x, f64::MAX);
+ /// ```
+ fn max_value() -> Self;
+
+ /// Returns `true` if this value is `NaN` and false otherwise.
+ ///
+ /// ```
+ /// use num_traits::Float;
+ /// use std::f64;
+ ///
+ /// let nan = f64::NAN;
+ /// let f = 7.0;
+ ///
+ /// assert!(nan.is_nan());
+ /// assert!(!f.is_nan());
+ /// ```
+ fn is_nan(self) -> bool;
+
+ /// Returns `true` if this value is positive infinity or negative infinity and
+ /// false otherwise.
+ ///
+ /// ```
+ /// use num_traits::Float;
+ /// use std::f32;
+ ///
+ /// let f = 7.0f32;
+ /// let inf: f32 = Float::infinity();
+ /// let neg_inf: f32 = Float::neg_infinity();
+ /// let nan: f32 = f32::NAN;
+ ///
+ /// assert!(!f.is_infinite());
+ /// assert!(!nan.is_infinite());
+ ///
+ /// assert!(inf.is_infinite());
+ /// assert!(neg_inf.is_infinite());
+ /// ```
+ fn is_infinite(self) -> bool;
+
+ /// Returns `true` if this number is neither infinite nor `NaN`.
+ ///
+ /// ```
+ /// use num_traits::Float;
+ /// use std::f32;
+ ///
+ /// let f = 7.0f32;
+ /// let inf: f32 = Float::infinity();
+ /// let neg_inf: f32 = Float::neg_infinity();
+ /// let nan: f32 = f32::NAN;
+ ///
+ /// assert!(f.is_finite());
+ ///
+ /// assert!(!nan.is_finite());
+ /// assert!(!inf.is_finite());
+ /// assert!(!neg_inf.is_finite());
+ /// ```
+ fn is_finite(self) -> bool;
+
+ /// Returns `true` if the number is neither zero, infinite,
+ /// [subnormal][subnormal], or `NaN`.
+ ///
+ /// ```
+ /// use num_traits::Float;
+ /// use std::f32;
+ ///
+ /// let min = f32::MIN_POSITIVE; // 1.17549435e-38f32
+ /// let max = f32::MAX;
+ /// let lower_than_min = 1.0e-40_f32;
+ /// let zero = 0.0f32;
+ ///
+ /// assert!(min.is_normal());
+ /// assert!(max.is_normal());
+ ///
+ /// assert!(!zero.is_normal());
+ /// assert!(!f32::NAN.is_normal());
+ /// assert!(!f32::INFINITY.is_normal());
+ /// // Values between `0` and `min` are Subnormal.
+ /// assert!(!lower_than_min.is_normal());
+ /// ```
+ /// [subnormal]: http://en.wikipedia.org/wiki/Subnormal_number
+ fn is_normal(self) -> bool;
+
+ /// Returns `true` if the number is [subnormal].
+ ///
+ /// ```
+ /// use num_traits::Float;
+ /// use std::f64;
+ ///
+ /// let min = f64::MIN_POSITIVE; // 2.2250738585072014e-308_f64
+ /// let max = f64::MAX;
+ /// let lower_than_min = 1.0e-308_f64;
+ /// let zero = 0.0_f64;
+ ///
+ /// assert!(!min.is_subnormal());
+ /// assert!(!max.is_subnormal());
+ ///
+ /// assert!(!zero.is_subnormal());
+ /// assert!(!f64::NAN.is_subnormal());
+ /// assert!(!f64::INFINITY.is_subnormal());
+ /// // Values between `0` and `min` are Subnormal.
+ /// assert!(lower_than_min.is_subnormal());
+ /// ```
+ /// [subnormal]: https://en.wikipedia.org/wiki/Subnormal_number
+ #[inline]
+ fn is_subnormal(self) -> bool {
+ self.classify() == FpCategory::Subnormal
+ }
+
+ /// Returns the floating point category of the number. If only one property
+ /// is going to be tested, it is generally faster to use the specific
+ /// predicate instead.
+ ///
+ /// ```
+ /// use num_traits::Float;
+ /// use std::num::FpCategory;
+ /// use std::f32;
+ ///
+ /// let num = 12.4f32;
+ /// let inf = f32::INFINITY;
+ ///
+ /// assert_eq!(num.classify(), FpCategory::Normal);
+ /// assert_eq!(inf.classify(), FpCategory::Infinite);
+ /// ```
+ fn classify(self) -> FpCategory;
+
+ /// Returns the largest integer less than or equal to a number.
+ ///
+ /// ```
+ /// use num_traits::Float;
+ ///
+ /// let f = 3.99;
+ /// let g = 3.0;
+ ///
+ /// assert_eq!(f.floor(), 3.0);
+ /// assert_eq!(g.floor(), 3.0);
+ /// ```
+ fn floor(self) -> Self;
+
+ /// Returns the smallest integer greater than or equal to a number.
+ ///
+ /// ```
+ /// use num_traits::Float;
+ ///
+ /// let f = 3.01;
+ /// let g = 4.0;
+ ///
+ /// assert_eq!(f.ceil(), 4.0);
+ /// assert_eq!(g.ceil(), 4.0);
+ /// ```
+ fn ceil(self) -> Self;
+
+ /// Returns the nearest integer to a number. Round half-way cases away from
+ /// `0.0`.
+ ///
+ /// ```
+ /// use num_traits::Float;
+ ///
+ /// let f = 3.3;
+ /// let g = -3.3;
+ ///
+ /// assert_eq!(f.round(), 3.0);
+ /// assert_eq!(g.round(), -3.0);
+ /// ```
+ fn round(self) -> Self;
+
+ /// Return the integer part of a number.
+ ///
+ /// ```
+ /// use num_traits::Float;
+ ///
+ /// let f = 3.3;
+ /// let g = -3.7;
+ ///
+ /// assert_eq!(f.trunc(), 3.0);
+ /// assert_eq!(g.trunc(), -3.0);
+ /// ```
+ fn trunc(self) -> Self;
+
+ /// Returns the fractional part of a number.
+ ///
+ /// ```
+ /// use num_traits::Float;
+ ///
+ /// let x = 3.5;
+ /// let y = -3.5;
+ /// let abs_difference_x = (x.fract() - 0.5).abs();
+ /// let abs_difference_y = (y.fract() - (-0.5)).abs();
+ ///
+ /// assert!(abs_difference_x < 1e-10);
+ /// assert!(abs_difference_y < 1e-10);
+ /// ```
+ fn fract(self) -> Self;
+
+ /// Computes the absolute value of `self`. Returns `Float::nan()` if the
+ /// number is `Float::nan()`.
+ ///
+ /// ```
+ /// use num_traits::Float;
+ /// use std::f64;
+ ///
+ /// let x = 3.5;
+ /// let y = -3.5;
+ ///
+ /// let abs_difference_x = (x.abs() - x).abs();
+ /// let abs_difference_y = (y.abs() - (-y)).abs();
+ ///
+ /// assert!(abs_difference_x < 1e-10);
+ /// assert!(abs_difference_y < 1e-10);
+ ///
+ /// assert!(f64::NAN.abs().is_nan());
+ /// ```
+ fn abs(self) -> Self;
+
+ /// Returns a number that represents the sign of `self`.
+ ///
+ /// - `1.0` if the number is positive, `+0.0` or `Float::infinity()`
+ /// - `-1.0` if the number is negative, `-0.0` or `Float::neg_infinity()`
+ /// - `Float::nan()` if the number is `Float::nan()`
+ ///
+ /// ```
+ /// use num_traits::Float;
+ /// use std::f64;
+ ///
+ /// let f = 3.5;
+ ///
+ /// assert_eq!(f.signum(), 1.0);
+ /// assert_eq!(f64::NEG_INFINITY.signum(), -1.0);
+ ///
+ /// assert!(f64::NAN.signum().is_nan());
+ /// ```
+ fn signum(self) -> Self;
+
+ /// Returns `true` if `self` is positive, including `+0.0`,
+ /// `Float::infinity()`, and `Float::nan()`.
+ ///
+ /// ```
+ /// use num_traits::Float;
+ /// use std::f64;
+ ///
+ /// let nan: f64 = f64::NAN;
+ /// let neg_nan: f64 = -f64::NAN;
+ ///
+ /// let f = 7.0;
+ /// let g = -7.0;
+ ///
+ /// assert!(f.is_sign_positive());
+ /// assert!(!g.is_sign_positive());
+ /// assert!(nan.is_sign_positive());
+ /// assert!(!neg_nan.is_sign_positive());
+ /// ```
+ fn is_sign_positive(self) -> bool;
+
+ /// Returns `true` if `self` is negative, including `-0.0`,
+ /// `Float::neg_infinity()`, and `-Float::nan()`.
+ ///
+ /// ```
+ /// use num_traits::Float;
+ /// use std::f64;
+ ///
+ /// let nan: f64 = f64::NAN;
+ /// let neg_nan: f64 = -f64::NAN;
+ ///
+ /// let f = 7.0;
+ /// let g = -7.0;
+ ///
+ /// assert!(!f.is_sign_negative());
+ /// assert!(g.is_sign_negative());
+ /// assert!(!nan.is_sign_negative());
+ /// assert!(neg_nan.is_sign_negative());
+ /// ```
+ fn is_sign_negative(self) -> bool;
+
+ /// Fused multiply-add. Computes `(self * a) + b` with only one rounding
+ /// error, yielding a more accurate result than an unfused multiply-add.
+ ///
+ /// Using `mul_add` can be more performant than an unfused multiply-add if
+ /// the target architecture has a dedicated `fma` CPU instruction.
+ ///
+ /// ```
+ /// use num_traits::Float;
+ ///
+ /// let m = 10.0;
+ /// let x = 4.0;
+ /// let b = 60.0;
+ ///
+ /// // 100.0
+ /// let abs_difference = (m.mul_add(x, b) - (m*x + b)).abs();
+ ///
+ /// assert!(abs_difference < 1e-10);
+ /// ```
+ fn mul_add(self, a: Self, b: Self) -> Self;
+ /// Take the reciprocal (inverse) of a number, `1/x`.
+ ///
+ /// ```
+ /// use num_traits::Float;
+ ///
+ /// let x = 2.0;
+ /// let abs_difference = (x.recip() - (1.0/x)).abs();
+ ///
+ /// assert!(abs_difference < 1e-10);
+ /// ```
+ fn recip(self) -> Self;
+
+ /// Raise a number to an integer power.
+ ///
+ /// Using this function is generally faster than using `powf`
+ ///
+ /// ```
+ /// use num_traits::Float;
+ ///
+ /// let x = 2.0;
+ /// let abs_difference = (x.powi(2) - x*x).abs();
+ ///
+ /// assert!(abs_difference < 1e-10);
+ /// ```
+ fn powi(self, n: i32) -> Self;
+
+ /// Raise a number to a floating point power.
+ ///
+ /// ```
+ /// use num_traits::Float;
+ ///
+ /// let x = 2.0;
+ /// let abs_difference = (x.powf(2.0) - x*x).abs();
+ ///
+ /// assert!(abs_difference < 1e-10);
+ /// ```
+ fn powf(self, n: Self) -> Self;
+
+ /// Take the square root of a number.
+ ///
+ /// Returns NaN if `self` is a negative number.
+ ///
+ /// ```
+ /// use num_traits::Float;
+ ///
+ /// let positive = 4.0;
+ /// let negative = -4.0;
+ ///
+ /// let abs_difference = (positive.sqrt() - 2.0).abs();
+ ///
+ /// assert!(abs_difference < 1e-10);
+ /// assert!(negative.sqrt().is_nan());
+ /// ```
+ fn sqrt(self) -> Self;
+
+ /// Returns `e^(self)`, (the exponential function).
+ ///
+ /// ```
+ /// use num_traits::Float;
+ ///
+ /// let one = 1.0;
+ /// // e^1
+ /// let e = one.exp();
+ ///
+ /// // ln(e) - 1 == 0
+ /// let abs_difference = (e.ln() - 1.0).abs();
+ ///
+ /// assert!(abs_difference < 1e-10);
+ /// ```
+ fn exp(self) -> Self;
+
+ /// Returns `2^(self)`.
+ ///
+ /// ```
+ /// use num_traits::Float;
+ ///
+ /// let f = 2.0;
+ ///
+ /// // 2^2 - 4 == 0
+ /// let abs_difference = (f.exp2() - 4.0).abs();
+ ///
+ /// assert!(abs_difference < 1e-10);
+ /// ```
+ fn exp2(self) -> Self;
+
+ /// Returns the natural logarithm of the number.
+ ///
+ /// ```
+ /// use num_traits::Float;
+ ///
+ /// let one = 1.0;
+ /// // e^1
+ /// let e = one.exp();
+ ///
+ /// // ln(e) - 1 == 0
+ /// let abs_difference = (e.ln() - 1.0).abs();
+ ///
+ /// assert!(abs_difference < 1e-10);
+ /// ```
+ fn ln(self) -> Self;
+
+ /// Returns the logarithm of the number with respect to an arbitrary base.
+ ///
+ /// ```
+ /// use num_traits::Float;
+ ///
+ /// let ten = 10.0;
+ /// let two = 2.0;
+ ///
+ /// // log10(10) - 1 == 0
+ /// let abs_difference_10 = (ten.log(10.0) - 1.0).abs();
+ ///
+ /// // log2(2) - 1 == 0
+ /// let abs_difference_2 = (two.log(2.0) - 1.0).abs();
+ ///
+ /// assert!(abs_difference_10 < 1e-10);
+ /// assert!(abs_difference_2 < 1e-10);
+ /// ```
+ fn log(self, base: Self) -> Self;
+
+ /// Returns the base 2 logarithm of the number.
+ ///
+ /// ```
+ /// use num_traits::Float;
+ ///
+ /// let two = 2.0;
+ ///
+ /// // log2(2) - 1 == 0
+ /// let abs_difference = (two.log2() - 1.0).abs();
+ ///
+ /// assert!(abs_difference < 1e-10);
+ /// ```
+ fn log2(self) -> Self;
+
+ /// Returns the base 10 logarithm of the number.
+ ///
+ /// ```
+ /// use num_traits::Float;
+ ///
+ /// let ten = 10.0;
+ ///
+ /// // log10(10) - 1 == 0
+ /// let abs_difference = (ten.log10() - 1.0).abs();
+ ///
+ /// assert!(abs_difference < 1e-10);
+ /// ```
+ fn log10(self) -> Self;
+
+ /// Converts radians to degrees.
+ ///
+ /// ```
+ /// use std::f64::consts;
+ ///
+ /// let angle = consts::PI;
+ ///
+ /// let abs_difference = (angle.to_degrees() - 180.0).abs();
+ ///
+ /// assert!(abs_difference < 1e-10);
+ /// ```
+ #[inline]
+ fn to_degrees(self) -> Self {
+ let halfpi = Self::zero().acos();
+ let ninety = Self::from(90u8).unwrap();
+ self * ninety / halfpi
+ }
+
+ /// Converts degrees to radians.
+ ///
+ /// ```
+ /// use std::f64::consts;
+ ///
+ /// let angle = 180.0_f64;
+ ///
+ /// let abs_difference = (angle.to_radians() - consts::PI).abs();
+ ///
+ /// assert!(abs_difference < 1e-10);
+ /// ```
+ #[inline]
+ fn to_radians(self) -> Self {
+ let halfpi = Self::zero().acos();
+ let ninety = Self::from(90u8).unwrap();
+ self * halfpi / ninety
+ }
+
+ /// Returns the maximum of the two numbers.
+ ///
+ /// ```
+ /// use num_traits::Float;
+ ///
+ /// let x = 1.0;
+ /// let y = 2.0;
+ ///
+ /// assert_eq!(x.max(y), y);
+ /// ```
+ fn max(self, other: Self) -> Self;
+
+ /// Returns the minimum of the two numbers.
+ ///
+ /// ```
+ /// use num_traits::Float;
+ ///
+ /// let x = 1.0;
+ /// let y = 2.0;
+ ///
+ /// assert_eq!(x.min(y), x);
+ /// ```
+ fn min(self, other: Self) -> Self;
+
+ /// The positive difference of two numbers.
+ ///
+ /// * If `self <= other`: `0:0`
+ /// * Else: `self - other`
+ ///
+ /// ```
+ /// use num_traits::Float;
+ ///
+ /// let x = 3.0;
+ /// let y = -3.0;
+ ///
+ /// let abs_difference_x = (x.abs_sub(1.0) - 2.0).abs();
+ /// let abs_difference_y = (y.abs_sub(1.0) - 0.0).abs();
+ ///
+ /// assert!(abs_difference_x < 1e-10);
+ /// assert!(abs_difference_y < 1e-10);
+ /// ```
+ fn abs_sub(self, other: Self) -> Self;
+
+ /// Take the cubic root of a number.
+ ///
+ /// ```
+ /// use num_traits::Float;
+ ///
+ /// let x = 8.0;
+ ///
+ /// // x^(1/3) - 2 == 0
+ /// let abs_difference = (x.cbrt() - 2.0).abs();
+ ///
+ /// assert!(abs_difference < 1e-10);
+ /// ```
+ fn cbrt(self) -> Self;
+
+ /// Calculate the length of the hypotenuse of a right-angle triangle given
+ /// legs of length `x` and `y`.
+ ///
+ /// ```
+ /// use num_traits::Float;
+ ///
+ /// let x = 2.0;
+ /// let y = 3.0;
+ ///
+ /// // sqrt(x^2 + y^2)
+ /// let abs_difference = (x.hypot(y) - (x.powi(2) + y.powi(2)).sqrt()).abs();
+ ///
+ /// assert!(abs_difference < 1e-10);
+ /// ```
+ fn hypot(self, other: Self) -> Self;
+
+ /// Computes the sine of a number (in radians).
+ ///
+ /// ```
+ /// use num_traits::Float;
+ /// use std::f64;
+ ///
+ /// let x = f64::consts::PI/2.0;
+ ///
+ /// let abs_difference = (x.sin() - 1.0).abs();
+ ///
+ /// assert!(abs_difference < 1e-10);
+ /// ```
+ fn sin(self) -> Self;
+
+ /// Computes the cosine of a number (in radians).
+ ///
+ /// ```
+ /// use num_traits::Float;
+ /// use std::f64;
+ ///
+ /// let x = 2.0*f64::consts::PI;
+ ///
+ /// let abs_difference = (x.cos() - 1.0).abs();
+ ///
+ /// assert!(abs_difference < 1e-10);
+ /// ```
+ fn cos(self) -> Self;
+
+ /// Computes the tangent of a number (in radians).
+ ///
+ /// ```
+ /// use num_traits::Float;
+ /// use std::f64;
+ ///
+ /// let x = f64::consts::PI/4.0;
+ /// let abs_difference = (x.tan() - 1.0).abs();
+ ///
+ /// assert!(abs_difference < 1e-14);
+ /// ```
+ fn tan(self) -> Self;
+
+ /// Computes the arcsine of a number. Return value is in radians in
+ /// the range [-pi/2, pi/2] or NaN if the number is outside the range
+ /// [-1, 1].
+ ///
+ /// ```
+ /// use num_traits::Float;
+ /// use std::f64;
+ ///
+ /// let f = f64::consts::PI / 2.0;
+ ///
+ /// // asin(sin(pi/2))
+ /// let abs_difference = (f.sin().asin() - f64::consts::PI / 2.0).abs();
+ ///
+ /// assert!(abs_difference < 1e-10);
+ /// ```
+ fn asin(self) -> Self;
+
+ /// Computes the arccosine of a number. Return value is in radians in
+ /// the range [0, pi] or NaN if the number is outside the range
+ /// [-1, 1].
+ ///
+ /// ```
+ /// use num_traits::Float;
+ /// use std::f64;
+ ///
+ /// let f = f64::consts::PI / 4.0;
+ ///
+ /// // acos(cos(pi/4))
+ /// let abs_difference = (f.cos().acos() - f64::consts::PI / 4.0).abs();
+ ///
+ /// assert!(abs_difference < 1e-10);
+ /// ```
+ fn acos(self) -> Self;
+
+ /// Computes the arctangent of a number. Return value is in radians in the
+ /// range [-pi/2, pi/2];
+ ///
+ /// ```
+ /// use num_traits::Float;
+ ///
+ /// let f = 1.0;
+ ///
+ /// // atan(tan(1))
+ /// let abs_difference = (f.tan().atan() - 1.0).abs();
+ ///
+ /// assert!(abs_difference < 1e-10);
+ /// ```
+ fn atan(self) -> Self;
+
+ /// Computes the four quadrant arctangent of `self` (`y`) and `other` (`x`).
+ ///
+ /// * `x = 0`, `y = 0`: `0`
+ /// * `x >= 0`: `arctan(y/x)` -> `[-pi/2, pi/2]`
+ /// * `y >= 0`: `arctan(y/x) + pi` -> `(pi/2, pi]`
+ /// * `y < 0`: `arctan(y/x) - pi` -> `(-pi, -pi/2)`
+ ///
+ /// ```
+ /// use num_traits::Float;
+ /// use std::f64;
+ ///
+ /// let pi = f64::consts::PI;
+ /// // All angles from horizontal right (+x)
+ /// // 45 deg counter-clockwise
+ /// let x1 = 3.0;
+ /// let y1 = -3.0;
+ ///
+ /// // 135 deg clockwise
+ /// let x2 = -3.0;
+ /// let y2 = 3.0;
+ ///
+ /// let abs_difference_1 = (y1.atan2(x1) - (-pi/4.0)).abs();
+ /// let abs_difference_2 = (y2.atan2(x2) - 3.0*pi/4.0).abs();
+ ///
+ /// assert!(abs_difference_1 < 1e-10);
+ /// assert!(abs_difference_2 < 1e-10);
+ /// ```
+ fn atan2(self, other: Self) -> Self;
+
+ /// Simultaneously computes the sine and cosine of the number, `x`. Returns
+ /// `(sin(x), cos(x))`.
+ ///
+ /// ```
+ /// use num_traits::Float;
+ /// use std::f64;
+ ///
+ /// let x = f64::consts::PI/4.0;
+ /// let f = x.sin_cos();
+ ///
+ /// let abs_difference_0 = (f.0 - x.sin()).abs();
+ /// let abs_difference_1 = (f.1 - x.cos()).abs();
+ ///
+ /// assert!(abs_difference_0 < 1e-10);
+ /// assert!(abs_difference_0 < 1e-10);
+ /// ```
+ fn sin_cos(self) -> (Self, Self);
+
+ /// Returns `e^(self) - 1` in a way that is accurate even if the
+ /// number is close to zero.
+ ///
+ /// ```
+ /// use num_traits::Float;
+ ///
+ /// let x = 7.0;
+ ///
+ /// // e^(ln(7)) - 1
+ /// let abs_difference = (x.ln().exp_m1() - 6.0).abs();
+ ///
+ /// assert!(abs_difference < 1e-10);
+ /// ```
+ fn exp_m1(self) -> Self;
+
+ /// Returns `ln(1+n)` (natural logarithm) more accurately than if
+ /// the operations were performed separately.
+ ///
+ /// ```
+ /// use num_traits::Float;
+ /// use std::f64;
+ ///
+ /// let x = f64::consts::E - 1.0;
+ ///
+ /// // ln(1 + (e - 1)) == ln(e) == 1
+ /// let abs_difference = (x.ln_1p() - 1.0).abs();
+ ///
+ /// assert!(abs_difference < 1e-10);
+ /// ```
+ fn ln_1p(self) -> Self;
+
+ /// Hyperbolic sine function.
+ ///
+ /// ```
+ /// use num_traits::Float;
+ /// use std::f64;
+ ///
+ /// let e = f64::consts::E;
+ /// let x = 1.0;
+ ///
+ /// let f = x.sinh();
+ /// // Solving sinh() at 1 gives `(e^2-1)/(2e)`
+ /// let g = (e*e - 1.0)/(2.0*e);
+ /// let abs_difference = (f - g).abs();
+ ///
+ /// assert!(abs_difference < 1e-10);
+ /// ```
+ fn sinh(self) -> Self;
+
+ /// Hyperbolic cosine function.
+ ///
+ /// ```
+ /// use num_traits::Float;
+ /// use std::f64;
+ ///
+ /// let e = f64::consts::E;
+ /// let x = 1.0;
+ /// let f = x.cosh();
+ /// // Solving cosh() at 1 gives this result
+ /// let g = (e*e + 1.0)/(2.0*e);
+ /// let abs_difference = (f - g).abs();
+ ///
+ /// // Same result
+ /// assert!(abs_difference < 1.0e-10);
+ /// ```
+ fn cosh(self) -> Self;
+
+ /// Hyperbolic tangent function.
+ ///
+ /// ```
+ /// use num_traits::Float;
+ /// use std::f64;
+ ///
+ /// let e = f64::consts::E;
+ /// let x = 1.0;
+ ///
+ /// let f = x.tanh();
+ /// // Solving tanh() at 1 gives `(1 - e^(-2))/(1 + e^(-2))`
+ /// let g = (1.0 - e.powi(-2))/(1.0 + e.powi(-2));
+ /// let abs_difference = (f - g).abs();
+ ///
+ /// assert!(abs_difference < 1.0e-10);
+ /// ```
+ fn tanh(self) -> Self;
+
+ /// Inverse hyperbolic sine function.
+ ///
+ /// ```
+ /// use num_traits::Float;
+ ///
+ /// let x = 1.0;
+ /// let f = x.sinh().asinh();
+ ///
+ /// let abs_difference = (f - x).abs();
+ ///
+ /// assert!(abs_difference < 1.0e-10);
+ /// ```
+ fn asinh(self) -> Self;
+
+ /// Inverse hyperbolic cosine function.
+ ///
+ /// ```
+ /// use num_traits::Float;
+ ///
+ /// let x = 1.0;
+ /// let f = x.cosh().acosh();
+ ///
+ /// let abs_difference = (f - x).abs();
+ ///
+ /// assert!(abs_difference < 1.0e-10);
+ /// ```
+ fn acosh(self) -> Self;
+
+ /// Inverse hyperbolic tangent function.
+ ///
+ /// ```
+ /// use num_traits::Float;
+ /// use std::f64;
+ ///
+ /// let e = f64::consts::E;
+ /// let f = e.tanh().atanh();
+ ///
+ /// let abs_difference = (f - e).abs();
+ ///
+ /// assert!(abs_difference < 1.0e-10);
+ /// ```
+ fn atanh(self) -> Self;
+
+ /// Returns the mantissa, base 2 exponent, and sign as integers, respectively.
+ /// The original number can be recovered by `sign * mantissa * 2 ^ exponent`.
+ ///
+ /// ```
+ /// use num_traits::Float;
+ ///
+ /// let num = 2.0f32;
+ ///
+ /// // (8388608, -22, 1)
+ /// let (mantissa, exponent, sign) = Float::integer_decode(num);
+ /// let sign_f = sign as f32;
+ /// let mantissa_f = mantissa as f32;
+ /// let exponent_f = num.powf(exponent as f32);
+ ///
+ /// // 1 * 8388608 * 2^(-22) == 2
+ /// let abs_difference = (sign_f * mantissa_f * exponent_f - num).abs();
+ ///
+ /// assert!(abs_difference < 1e-10);
+ /// ```
+ fn integer_decode(self) -> (u64, i16, i8);
+
+ /// Returns a number composed of the magnitude of `self` and the sign of
+ /// `sign`.
+ ///
+ /// Equal to `self` if the sign of `self` and `sign` are the same, otherwise
+ /// equal to `-self`. If `self` is a `NAN`, then a `NAN` with the sign of
+ /// `sign` is returned.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use num_traits::Float;
+ ///
+ /// let f = 3.5_f32;
+ ///
+ /// assert_eq!(f.copysign(0.42), 3.5_f32);
+ /// assert_eq!(f.copysign(-0.42), -3.5_f32);
+ /// assert_eq!((-f).copysign(0.42), 3.5_f32);
+ /// assert_eq!((-f).copysign(-0.42), -3.5_f32);
+ ///
+ /// assert!(f32::nan().copysign(1.0).is_nan());
+ /// ```
+ fn copysign(self, sign: Self) -> Self {
+ if self.is_sign_negative() == sign.is_sign_negative() {
+ self
+ } else {
+ self.neg()
+ }
+ }
+}
+
+#[cfg(feature = "std")]
+macro_rules! float_impl_std {
+ ($T:ident $decode:ident) => {
+ impl Float for $T {
+ constant! {
+ nan() -> $T::NAN;
+ infinity() -> $T::INFINITY;
+ neg_infinity() -> $T::NEG_INFINITY;
+ neg_zero() -> -0.0;
+ min_value() -> $T::MIN;
+ min_positive_value() -> $T::MIN_POSITIVE;
+ epsilon() -> $T::EPSILON;
+ max_value() -> $T::MAX;
+ }
+
+ #[inline]
+ #[allow(deprecated)]
+ fn abs_sub(self, other: Self) -> Self {
+ <$T>::abs_sub(self, other)
+ }
+
+ #[inline]
+ fn integer_decode(self) -> (u64, i16, i8) {
+ $decode(self)
+ }
+
+ forward! {
+ Self::is_nan(self) -> bool;
+ Self::is_infinite(self) -> bool;
+ Self::is_finite(self) -> bool;
+ Self::is_normal(self) -> bool;
+ Self::classify(self) -> FpCategory;
+ Self::floor(self) -> Self;
+ Self::ceil(self) -> Self;
+ Self::round(self) -> Self;
+ Self::trunc(self) -> Self;
+ Self::fract(self) -> Self;
+ Self::abs(self) -> Self;
+ Self::signum(self) -> Self;
+ Self::is_sign_positive(self) -> bool;
+ Self::is_sign_negative(self) -> bool;
+ Self::mul_add(self, a: Self, b: Self) -> Self;
+ Self::recip(self) -> Self;
+ Self::powi(self, n: i32) -> Self;
+ Self::powf(self, n: Self) -> Self;
+ Self::sqrt(self) -> Self;
+ Self::exp(self) -> Self;
+ Self::exp2(self) -> Self;
+ Self::ln(self) -> Self;
+ Self::log(self, base: Self) -> Self;
+ Self::log2(self) -> Self;
+ Self::log10(self) -> Self;
+ Self::to_degrees(self) -> Self;
+ Self::to_radians(self) -> Self;
+ Self::max(self, other: Self) -> Self;
+ Self::min(self, other: Self) -> Self;
+ Self::cbrt(self) -> Self;
+ Self::hypot(self, other: Self) -> Self;
+ Self::sin(self) -> Self;
+ Self::cos(self) -> Self;
+ Self::tan(self) -> Self;
+ Self::asin(self) -> Self;
+ Self::acos(self) -> Self;
+ Self::atan(self) -> Self;
+ Self::atan2(self, other: Self) -> Self;
+ Self::sin_cos(self) -> (Self, Self);
+ Self::exp_m1(self) -> Self;
+ Self::ln_1p(self) -> Self;
+ Self::sinh(self) -> Self;
+ Self::cosh(self) -> Self;
+ Self::tanh(self) -> Self;
+ Self::asinh(self) -> Self;
+ Self::acosh(self) -> Self;
+ Self::atanh(self) -> Self;
+ }
+
+ #[cfg(has_copysign)]
+ forward! {
+ Self::copysign(self, sign: Self) -> Self;
+ }
+
+ #[cfg(has_is_subnormal)]
+ forward! {
+ Self::is_subnormal(self) -> bool;
+ }
+ }
+ };
+}
+
+#[cfg(all(not(feature = "std"), feature = "libm"))]
+macro_rules! float_impl_libm {
+ ($T:ident $decode:ident) => {
+ constant! {
+ nan() -> $T::NAN;
+ infinity() -> $T::INFINITY;
+ neg_infinity() -> $T::NEG_INFINITY;
+ neg_zero() -> -0.0;
+ min_value() -> $T::MIN;
+ min_positive_value() -> $T::MIN_POSITIVE;
+ epsilon() -> $T::EPSILON;
+ max_value() -> $T::MAX;
+ }
+
+ #[inline]
+ fn integer_decode(self) -> (u64, i16, i8) {
+ $decode(self)
+ }
+
+ #[inline]
+ fn fract(self) -> Self {
+ self - Float::trunc(self)
+ }
+
+ #[inline]
+ fn log(self, base: Self) -> Self {
+ self.ln() / base.ln()
+ }
+
+ forward! {
+ Self::is_nan(self) -> bool;
+ Self::is_infinite(self) -> bool;
+ Self::is_finite(self) -> bool;
+ Self::is_normal(self) -> bool;
+ Self::classify(self) -> FpCategory;
+ Self::is_sign_positive(self) -> bool;
+ Self::is_sign_negative(self) -> bool;
+ Self::min(self, other: Self) -> Self;
+ Self::max(self, other: Self) -> Self;
+ Self::recip(self) -> Self;
+ Self::to_degrees(self) -> Self;
+ Self::to_radians(self) -> Self;
+ }
+
+ #[cfg(has_is_subnormal)]
+ forward! {
+ Self::is_subnormal(self) -> bool;
+ }
+
+ forward! {
+ FloatCore::signum(self) -> Self;
+ FloatCore::powi(self, n: i32) -> Self;
+ }
+ };
+}
+
+fn integer_decode_f32(f: f32) -> (u64, i16, i8) {
+ let bits: u32 = f.to_bits();
+ let sign: i8 = if bits >> 31 == 0 { 1 } else { -1 };
+ let mut exponent: i16 = ((bits >> 23) & 0xff) as i16;
+ let mantissa = if exponent == 0 {
+ (bits & 0x7fffff) << 1
+ } else {
+ (bits & 0x7fffff) | 0x800000
+ };
+ // Exponent bias + mantissa shift
+ exponent -= 127 + 23;
+ (mantissa as u64, exponent, sign)
+}
+
+fn integer_decode_f64(f: f64) -> (u64, i16, i8) {
+ let bits: u64 = f.to_bits();
+ let sign: i8 = if bits >> 63 == 0 { 1 } else { -1 };
+ let mut exponent: i16 = ((bits >> 52) & 0x7ff) as i16;
+ let mantissa = if exponent == 0 {
+ (bits & 0xfffffffffffff) << 1
+ } else {
+ (bits & 0xfffffffffffff) | 0x10000000000000
+ };
+ // Exponent bias + mantissa shift
+ exponent -= 1023 + 52;
+ (mantissa, exponent, sign)
+}
+
+#[cfg(feature = "std")]
+float_impl_std!(f32 integer_decode_f32);
+#[cfg(feature = "std")]
+float_impl_std!(f64 integer_decode_f64);
+
+#[cfg(all(not(feature = "std"), feature = "libm"))]
+impl Float for f32 {
+ float_impl_libm!(f32 integer_decode_f32);
+
+ #[inline]
+ #[allow(deprecated)]
+ fn abs_sub(self, other: Self) -> Self {
+ libm::fdimf(self, other)
+ }
+
+ forward! {
+ libm::floorf as floor(self) -> Self;
+ libm::ceilf as ceil(self) -> Self;
+ libm::roundf as round(self) -> Self;
+ libm::truncf as trunc(self) -> Self;
+ libm::fabsf as abs(self) -> Self;
+ libm::fmaf as mul_add(self, a: Self, b: Self) -> Self;
+ libm::powf as powf(self, n: Self) -> Self;
+ libm::sqrtf as sqrt(self) -> Self;
+ libm::expf as exp(self) -> Self;
+ libm::exp2f as exp2(self) -> Self;
+ libm::logf as ln(self) -> Self;
+ libm::log2f as log2(self) -> Self;
+ libm::log10f as log10(self) -> Self;
+ libm::cbrtf as cbrt(self) -> Self;
+ libm::hypotf as hypot(self, other: Self) -> Self;
+ libm::sinf as sin(self) -> Self;
+ libm::cosf as cos(self) -> Self;
+ libm::tanf as tan(self) -> Self;
+ libm::asinf as asin(self) -> Self;
+ libm::acosf as acos(self) -> Self;
+ libm::atanf as atan(self) -> Self;
+ libm::atan2f as atan2(self, other: Self) -> Self;
+ libm::sincosf as sin_cos(self) -> (Self, Self);
+ libm::expm1f as exp_m1(self) -> Self;
+ libm::log1pf as ln_1p(self) -> Self;
+ libm::sinhf as sinh(self) -> Self;
+ libm::coshf as cosh(self) -> Self;
+ libm::tanhf as tanh(self) -> Self;
+ libm::asinhf as asinh(self) -> Self;
+ libm::acoshf as acosh(self) -> Self;
+ libm::atanhf as atanh(self) -> Self;
+ libm::copysignf as copysign(self, other: Self) -> Self;
+ }
+}
+
+#[cfg(all(not(feature = "std"), feature = "libm"))]
+impl Float for f64 {
+ float_impl_libm!(f64 integer_decode_f64);
+
+ #[inline]
+ #[allow(deprecated)]
+ fn abs_sub(self, other: Self) -> Self {
+ libm::fdim(self, other)
+ }
+
+ forward! {
+ libm::floor as floor(self) -> Self;
+ libm::ceil as ceil(self) -> Self;
+ libm::round as round(self) -> Self;
+ libm::trunc as trunc(self) -> Self;
+ libm::fabs as abs(self) -> Self;
+ libm::fma as mul_add(self, a: Self, b: Self) -> Self;
+ libm::pow as powf(self, n: Self) -> Self;
+ libm::sqrt as sqrt(self) -> Self;
+ libm::exp as exp(self) -> Self;
+ libm::exp2 as exp2(self) -> Self;
+ libm::log as ln(self) -> Self;
+ libm::log2 as log2(self) -> Self;
+ libm::log10 as log10(self) -> Self;
+ libm::cbrt as cbrt(self) -> Self;
+ libm::hypot as hypot(self, other: Self) -> Self;
+ libm::sin as sin(self) -> Self;
+ libm::cos as cos(self) -> Self;
+ libm::tan as tan(self) -> Self;
+ libm::asin as asin(self) -> Self;
+ libm::acos as acos(self) -> Self;
+ libm::atan as atan(self) -> Self;
+ libm::atan2 as atan2(self, other: Self) -> Self;
+ libm::sincos as sin_cos(self) -> (Self, Self);
+ libm::expm1 as exp_m1(self) -> Self;
+ libm::log1p as ln_1p(self) -> Self;
+ libm::sinh as sinh(self) -> Self;
+ libm::cosh as cosh(self) -> Self;
+ libm::tanh as tanh(self) -> Self;
+ libm::asinh as asinh(self) -> Self;
+ libm::acosh as acosh(self) -> Self;
+ libm::atanh as atanh(self) -> Self;
+ libm::copysign as copysign(self, sign: Self) -> Self;
+ }
+}
+
+macro_rules! float_const_impl {
+ ($(#[$doc:meta] $constant:ident,)+) => (
+ #[allow(non_snake_case)]
+ pub trait FloatConst {
+ $(#[$doc] fn $constant() -> Self;)+
+ #[doc = "Return the full circle constant `τ`."]
+ #[inline]
+ fn TAU() -> Self where Self: Sized + Add<Self, Output = Self> {
+ Self::PI() + Self::PI()
+ }
+ #[doc = "Return `log10(2.0)`."]
+ #[inline]
+ fn LOG10_2() -> Self where Self: Sized + Div<Self, Output = Self> {
+ Self::LN_2() / Self::LN_10()
+ }
+ #[doc = "Return `log2(10.0)`."]
+ #[inline]
+ fn LOG2_10() -> Self where Self: Sized + Div<Self, Output = Self> {
+ Self::LN_10() / Self::LN_2()
+ }
+ }
+ float_const_impl! { @float f32, $($constant,)+ }
+ float_const_impl! { @float f64, $($constant,)+ }
+ );
+ (@float $T:ident, $($constant:ident,)+) => (
+ impl FloatConst for $T {
+ constant! {
+ $( $constant() -> $T::consts::$constant; )+
+ TAU() -> 6.28318530717958647692528676655900577;
+ LOG10_2() -> 0.301029995663981195213738894724493027;
+ LOG2_10() -> 3.32192809488736234787031942948939018;
+ }
+ }
+ );
+}
+
+float_const_impl! {
+ #[doc = "Return Euler’s number."]
+ E,
+ #[doc = "Return `1.0 / π`."]
+ FRAC_1_PI,
+ #[doc = "Return `1.0 / sqrt(2.0)`."]
+ FRAC_1_SQRT_2,
+ #[doc = "Return `2.0 / π`."]
+ FRAC_2_PI,
+ #[doc = "Return `2.0 / sqrt(π)`."]
+ FRAC_2_SQRT_PI,
+ #[doc = "Return `π / 2.0`."]
+ FRAC_PI_2,
+ #[doc = "Return `π / 3.0`."]
+ FRAC_PI_3,
+ #[doc = "Return `π / 4.0`."]
+ FRAC_PI_4,
+ #[doc = "Return `π / 6.0`."]
+ FRAC_PI_6,
+ #[doc = "Return `π / 8.0`."]
+ FRAC_PI_8,
+ #[doc = "Return `ln(10.0)`."]
+ LN_10,
+ #[doc = "Return `ln(2.0)`."]
+ LN_2,
+ #[doc = "Return `log10(e)`."]
+ LOG10_E,
+ #[doc = "Return `log2(e)`."]
+ LOG2_E,
+ #[doc = "Return Archimedes’ constant `π`."]
+ PI,
+ #[doc = "Return `sqrt(2.0)`."]
+ SQRT_2,
+}
+
+#[cfg(test)]
+mod tests {
+ use core::f64::consts;
+
+ const DEG_RAD_PAIRS: [(f64, f64); 7] = [
+ (0.0, 0.),
+ (22.5, consts::FRAC_PI_8),
+ (30.0, consts::FRAC_PI_6),
+ (45.0, consts::FRAC_PI_4),
+ (60.0, consts::FRAC_PI_3),
+ (90.0, consts::FRAC_PI_2),
+ (180.0, consts::PI),
+ ];
+
+ #[test]
+ fn convert_deg_rad() {
+ use crate::float::FloatCore;
+
+ for &(deg, rad) in &DEG_RAD_PAIRS {
+ assert!((FloatCore::to_degrees(rad) - deg).abs() < 1e-6);
+ assert!((FloatCore::to_radians(deg) - rad).abs() < 1e-6);
+
+ let (deg, rad) = (deg as f32, rad as f32);
+ assert!((FloatCore::to_degrees(rad) - deg).abs() < 1e-5);
+ assert!((FloatCore::to_radians(deg) - rad).abs() < 1e-5);
+ }
+ }
+
+ #[cfg(any(feature = "std", feature = "libm"))]
+ #[test]
+ fn convert_deg_rad_std() {
+ for &(deg, rad) in &DEG_RAD_PAIRS {
+ use crate::Float;
+
+ assert!((Float::to_degrees(rad) - deg).abs() < 1e-6);
+ assert!((Float::to_radians(deg) - rad).abs() < 1e-6);
+
+ let (deg, rad) = (deg as f32, rad as f32);
+ assert!((Float::to_degrees(rad) - deg).abs() < 1e-5);
+ assert!((Float::to_radians(deg) - rad).abs() < 1e-5);
+ }
+ }
+
+ #[test]
+ fn to_degrees_rounding() {
+ use crate::float::FloatCore;
+
+ assert_eq!(
+ FloatCore::to_degrees(1_f32),
+ 57.2957795130823208767981548141051703
+ );
+ }
+
+ #[test]
+ #[cfg(any(feature = "std", feature = "libm"))]
+ fn extra_logs() {
+ use crate::float::{Float, FloatConst};
+
+ fn check<F: Float + FloatConst>(diff: F) {
+ let _2 = F::from(2.0).unwrap();
+ assert!((F::LOG10_2() - F::log10(_2)).abs() < diff);
+ assert!((F::LOG10_2() - F::LN_2() / F::LN_10()).abs() < diff);
+
+ let _10 = F::from(10.0).unwrap();
+ assert!((F::LOG2_10() - F::log2(_10)).abs() < diff);
+ assert!((F::LOG2_10() - F::LN_10() / F::LN_2()).abs() < diff);
+ }
+
+ check::<f32>(1e-6);
+ check::<f64>(1e-12);
+ }
+
+ #[test]
+ #[cfg(any(feature = "std", feature = "libm"))]
+ fn copysign() {
+ use crate::float::Float;
+ test_copysign_generic(2.0_f32, -2.0_f32, f32::nan());
+ test_copysign_generic(2.0_f64, -2.0_f64, f64::nan());
+ test_copysignf(2.0_f32, -2.0_f32, f32::nan());
+ }
+
+ #[cfg(any(feature = "std", feature = "libm"))]
+ fn test_copysignf(p: f32, n: f32, nan: f32) {
+ use crate::float::Float;
+ use core::ops::Neg;
+
+ assert!(p.is_sign_positive());
+ assert!(n.is_sign_negative());
+ assert!(nan.is_nan());
+
+ assert_eq!(p, Float::copysign(p, p));
+ assert_eq!(p.neg(), Float::copysign(p, n));
+
+ assert_eq!(n, Float::copysign(n, n));
+ assert_eq!(n.neg(), Float::copysign(n, p));
+
+ assert!(Float::copysign(nan, p).is_sign_positive());
+ assert!(Float::copysign(nan, n).is_sign_negative());
+ }
+
+ #[cfg(any(feature = "std", feature = "libm"))]
+ fn test_copysign_generic<F: crate::float::Float + ::core::fmt::Debug>(p: F, n: F, nan: F) {
+ assert!(p.is_sign_positive());
+ assert!(n.is_sign_negative());
+ assert!(nan.is_nan());
+ assert!(!nan.is_subnormal());
+
+ assert_eq!(p, p.copysign(p));
+ assert_eq!(p.neg(), p.copysign(n));
+
+ assert_eq!(n, n.copysign(n));
+ assert_eq!(n.neg(), n.copysign(p));
+
+ assert!(nan.copysign(p).is_sign_positive());
+ assert!(nan.copysign(n).is_sign_negative());
+ }
+
+ #[cfg(any(feature = "std", feature = "libm"))]
+ fn test_subnormal<F: crate::float::Float + ::core::fmt::Debug>() {
+ let min_positive = F::min_positive_value();
+ let lower_than_min = min_positive / F::from(2.0f32).unwrap();
+ assert!(!min_positive.is_subnormal());
+ assert!(lower_than_min.is_subnormal());
+ }
+
+ #[test]
+ #[cfg(any(feature = "std", feature = "libm"))]
+ fn subnormal() {
+ test_subnormal::<f64>();
+ test_subnormal::<f32>();
+ }
+}
diff --git a/rust/vendor/num-traits/src/identities.rs b/rust/vendor/num-traits/src/identities.rs
new file mode 100644
index 0000000..2486cb1
--- /dev/null
+++ b/rust/vendor/num-traits/src/identities.rs
@@ -0,0 +1,202 @@
+use core::num::Wrapping;
+use core::ops::{Add, Mul};
+
+/// Defines an additive identity element for `Self`.
+///
+/// # Laws
+///
+/// ```text
+/// a + 0 = a ∀ a ∈ Self
+/// 0 + a = a ∀ a ∈ Self
+/// ```
+pub trait Zero: Sized + Add<Self, Output = Self> {
+ /// Returns the additive identity element of `Self`, `0`.
+ /// # Purity
+ ///
+ /// This function should return the same result at all times regardless of
+ /// external mutable state, for example values stored in TLS or in
+ /// `static mut`s.
+ // This cannot be an associated constant, because of bignums.
+ fn zero() -> Self;
+
+ /// Sets `self` to the additive identity element of `Self`, `0`.
+ fn set_zero(&mut self) {
+ *self = Zero::zero();
+ }
+
+ /// Returns `true` if `self` is equal to the additive identity.
+ fn is_zero(&self) -> bool;
+}
+
+macro_rules! zero_impl {
+ ($t:ty, $v:expr) => {
+ impl Zero for $t {
+ #[inline]
+ fn zero() -> $t {
+ $v
+ }
+ #[inline]
+ fn is_zero(&self) -> bool {
+ *self == $v
+ }
+ }
+ };
+}
+
+zero_impl!(usize, 0);
+zero_impl!(u8, 0);
+zero_impl!(u16, 0);
+zero_impl!(u32, 0);
+zero_impl!(u64, 0);
+zero_impl!(u128, 0);
+
+zero_impl!(isize, 0);
+zero_impl!(i8, 0);
+zero_impl!(i16, 0);
+zero_impl!(i32, 0);
+zero_impl!(i64, 0);
+zero_impl!(i128, 0);
+
+zero_impl!(f32, 0.0);
+zero_impl!(f64, 0.0);
+
+impl<T: Zero> Zero for Wrapping<T>
+where
+ Wrapping<T>: Add<Output = Wrapping<T>>,
+{
+ fn is_zero(&self) -> bool {
+ self.0.is_zero()
+ }
+
+ fn set_zero(&mut self) {
+ self.0.set_zero();
+ }
+
+ fn zero() -> Self {
+ Wrapping(T::zero())
+ }
+}
+
+/// Defines a multiplicative identity element for `Self`.
+///
+/// # Laws
+///
+/// ```text
+/// a * 1 = a ∀ a ∈ Self
+/// 1 * a = a ∀ a ∈ Self
+/// ```
+pub trait One: Sized + Mul<Self, Output = Self> {
+ /// Returns the multiplicative identity element of `Self`, `1`.
+ ///
+ /// # Purity
+ ///
+ /// This function should return the same result at all times regardless of
+ /// external mutable state, for example values stored in TLS or in
+ /// `static mut`s.
+ // This cannot be an associated constant, because of bignums.
+ fn one() -> Self;
+
+ /// Sets `self` to the multiplicative identity element of `Self`, `1`.
+ fn set_one(&mut self) {
+ *self = One::one();
+ }
+
+ /// Returns `true` if `self` is equal to the multiplicative identity.
+ ///
+ /// For performance reasons, it's best to implement this manually.
+ /// After a semver bump, this method will be required, and the
+ /// `where Self: PartialEq` bound will be removed.
+ #[inline]
+ fn is_one(&self) -> bool
+ where
+ Self: PartialEq,
+ {
+ *self == Self::one()
+ }
+}
+
+macro_rules! one_impl {
+ ($t:ty, $v:expr) => {
+ impl One for $t {
+ #[inline]
+ fn one() -> $t {
+ $v
+ }
+ #[inline]
+ fn is_one(&self) -> bool {
+ *self == $v
+ }
+ }
+ };
+}
+
+one_impl!(usize, 1);
+one_impl!(u8, 1);
+one_impl!(u16, 1);
+one_impl!(u32, 1);
+one_impl!(u64, 1);
+one_impl!(u128, 1);
+
+one_impl!(isize, 1);
+one_impl!(i8, 1);
+one_impl!(i16, 1);
+one_impl!(i32, 1);
+one_impl!(i64, 1);
+one_impl!(i128, 1);
+
+one_impl!(f32, 1.0);
+one_impl!(f64, 1.0);
+
+impl<T: One> One for Wrapping<T>
+where
+ Wrapping<T>: Mul<Output = Wrapping<T>>,
+{
+ fn set_one(&mut self) {
+ self.0.set_one();
+ }
+
+ fn one() -> Self {
+ Wrapping(T::one())
+ }
+}
+
+// Some helper functions provided for backwards compatibility.
+
+/// Returns the additive identity, `0`.
+#[inline(always)]
+pub fn zero<T: Zero>() -> T {
+ Zero::zero()
+}
+
+/// Returns the multiplicative identity, `1`.
+#[inline(always)]
+pub fn one<T: One>() -> T {
+ One::one()
+}
+
+#[test]
+fn wrapping_identities() {
+ macro_rules! test_wrapping_identities {
+ ($($t:ty)+) => {
+ $(
+ assert_eq!(zero::<$t>(), zero::<Wrapping<$t>>().0);
+ assert_eq!(one::<$t>(), one::<Wrapping<$t>>().0);
+ assert_eq!((0 as $t).is_zero(), Wrapping(0 as $t).is_zero());
+ assert_eq!((1 as $t).is_zero(), Wrapping(1 as $t).is_zero());
+ )+
+ };
+ }
+
+ test_wrapping_identities!(isize i8 i16 i32 i64 usize u8 u16 u32 u64);
+}
+
+#[test]
+fn wrapping_is_zero() {
+ fn require_zero<T: Zero>(_: &T) {}
+ require_zero(&Wrapping(42));
+}
+#[test]
+fn wrapping_is_one() {
+ fn require_one<T: One>(_: &T) {}
+ require_one(&Wrapping(42));
+}
diff --git a/rust/vendor/num-traits/src/int.rs b/rust/vendor/num-traits/src/int.rs
new file mode 100644
index 0000000..e3ca72c
--- /dev/null
+++ b/rust/vendor/num-traits/src/int.rs
@@ -0,0 +1,565 @@
+use core::ops::{BitAnd, BitOr, BitXor, Not, Shl, Shr};
+
+use crate::bounds::Bounded;
+use crate::ops::checked::*;
+use crate::ops::saturating::Saturating;
+use crate::{Num, NumCast};
+
+/// Generic trait for primitive integers.
+///
+/// The `PrimInt` trait is an abstraction over the builtin primitive integer types (e.g., `u8`,
+/// `u32`, `isize`, `i128`, ...). It inherits the basic numeric traits and extends them with
+/// bitwise operators and non-wrapping arithmetic.
+///
+/// The trait explicitly inherits `Copy`, `Eq`, `Ord`, and `Sized`. The intention is that all
+/// types implementing this trait behave like primitive types that are passed by value by default
+/// and behave like builtin integers. Furthermore, the types are expected to expose the integer
+/// value in binary representation and support bitwise operators. The standard bitwise operations
+/// (e.g., bitwise-and, bitwise-or, right-shift, left-shift) are inherited and the trait extends
+/// these with introspective queries (e.g., `PrimInt::count_ones()`, `PrimInt::leading_zeros()`),
+/// bitwise combinators (e.g., `PrimInt::rotate_left()`), and endianness converters (e.g.,
+/// `PrimInt::to_be()`).
+///
+/// All `PrimInt` types are expected to be fixed-width binary integers. The width can be queried
+/// via `T::zero().count_zeros()`. The trait currently lacks a way to query the width at
+/// compile-time.
+///
+/// While a default implementation for all builtin primitive integers is provided, the trait is in
+/// no way restricted to these. Other integer types that fulfil the requirements are free to
+/// implement the trait was well.
+///
+/// This trait and many of the method names originate in the unstable `core::num::Int` trait from
+/// the rust standard library. The original trait was never stabilized and thus removed from the
+/// standard library.
+pub trait PrimInt:
+ Sized
+ + Copy
+ + Num
+ + NumCast
+ + Bounded
+ + PartialOrd
+ + Ord
+ + Eq
+ + Not<Output = Self>
+ + BitAnd<Output = Self>
+ + BitOr<Output = Self>
+ + BitXor<Output = Self>
+ + Shl<usize, Output = Self>
+ + Shr<usize, Output = Self>
+ + CheckedAdd<Output = Self>
+ + CheckedSub<Output = Self>
+ + CheckedMul<Output = Self>
+ + CheckedDiv<Output = Self>
+ + Saturating
+{
+ /// Returns the number of ones in the binary representation of `self`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use num_traits::PrimInt;
+ ///
+ /// let n = 0b01001100u8;
+ ///
+ /// assert_eq!(n.count_ones(), 3);
+ /// ```
+ fn count_ones(self) -> u32;
+
+ /// Returns the number of zeros in the binary representation of `self`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use num_traits::PrimInt;
+ ///
+ /// let n = 0b01001100u8;
+ ///
+ /// assert_eq!(n.count_zeros(), 5);
+ /// ```
+ fn count_zeros(self) -> u32;
+
+ /// Returns the number of leading ones in the binary representation
+ /// of `self`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use num_traits::PrimInt;
+ ///
+ /// let n = 0xF00Du16;
+ ///
+ /// assert_eq!(n.leading_ones(), 4);
+ /// ```
+ fn leading_ones(self) -> u32 {
+ (!self).leading_zeros()
+ }
+
+ /// Returns the number of leading zeros in the binary representation
+ /// of `self`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use num_traits::PrimInt;
+ ///
+ /// let n = 0b0101000u16;
+ ///
+ /// assert_eq!(n.leading_zeros(), 10);
+ /// ```
+ fn leading_zeros(self) -> u32;
+
+ /// Returns the number of trailing ones in the binary representation
+ /// of `self`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use num_traits::PrimInt;
+ ///
+ /// let n = 0xBEEFu16;
+ ///
+ /// assert_eq!(n.trailing_ones(), 4);
+ /// ```
+ fn trailing_ones(self) -> u32 {
+ (!self).trailing_zeros()
+ }
+
+ /// Returns the number of trailing zeros in the binary representation
+ /// of `self`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use num_traits::PrimInt;
+ ///
+ /// let n = 0b0101000u16;
+ ///
+ /// assert_eq!(n.trailing_zeros(), 3);
+ /// ```
+ fn trailing_zeros(self) -> u32;
+
+ /// Shifts the bits to the left by a specified amount, `n`, wrapping
+ /// the truncated bits to the end of the resulting integer.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use num_traits::PrimInt;
+ ///
+ /// let n = 0x0123456789ABCDEFu64;
+ /// let m = 0x3456789ABCDEF012u64;
+ ///
+ /// assert_eq!(n.rotate_left(12), m);
+ /// ```
+ fn rotate_left(self, n: u32) -> Self;
+
+ /// Shifts the bits to the right by a specified amount, `n`, wrapping
+ /// the truncated bits to the beginning of the resulting integer.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use num_traits::PrimInt;
+ ///
+ /// let n = 0x0123456789ABCDEFu64;
+ /// let m = 0xDEF0123456789ABCu64;
+ ///
+ /// assert_eq!(n.rotate_right(12), m);
+ /// ```
+ fn rotate_right(self, n: u32) -> Self;
+
+ /// Shifts the bits to the left by a specified amount, `n`, filling
+ /// zeros in the least significant bits.
+ ///
+ /// This is bitwise equivalent to signed `Shl`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use num_traits::PrimInt;
+ ///
+ /// let n = 0x0123456789ABCDEFu64;
+ /// let m = 0x3456789ABCDEF000u64;
+ ///
+ /// assert_eq!(n.signed_shl(12), m);
+ /// ```
+ fn signed_shl(self, n: u32) -> Self;
+
+ /// Shifts the bits to the right by a specified amount, `n`, copying
+ /// the "sign bit" in the most significant bits even for unsigned types.
+ ///
+ /// This is bitwise equivalent to signed `Shr`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use num_traits::PrimInt;
+ ///
+ /// let n = 0xFEDCBA9876543210u64;
+ /// let m = 0xFFFFEDCBA9876543u64;
+ ///
+ /// assert_eq!(n.signed_shr(12), m);
+ /// ```
+ fn signed_shr(self, n: u32) -> Self;
+
+ /// Shifts the bits to the left by a specified amount, `n`, filling
+ /// zeros in the least significant bits.
+ ///
+ /// This is bitwise equivalent to unsigned `Shl`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use num_traits::PrimInt;
+ ///
+ /// let n = 0x0123456789ABCDEFi64;
+ /// let m = 0x3456789ABCDEF000i64;
+ ///
+ /// assert_eq!(n.unsigned_shl(12), m);
+ /// ```
+ fn unsigned_shl(self, n: u32) -> Self;
+
+ /// Shifts the bits to the right by a specified amount, `n`, filling
+ /// zeros in the most significant bits.
+ ///
+ /// This is bitwise equivalent to unsigned `Shr`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use num_traits::PrimInt;
+ ///
+ /// let n = -8i8; // 0b11111000
+ /// let m = 62i8; // 0b00111110
+ ///
+ /// assert_eq!(n.unsigned_shr(2), m);
+ /// ```
+ fn unsigned_shr(self, n: u32) -> Self;
+
+ /// Reverses the byte order of the integer.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use num_traits::PrimInt;
+ ///
+ /// let n = 0x0123456789ABCDEFu64;
+ /// let m = 0xEFCDAB8967452301u64;
+ ///
+ /// assert_eq!(n.swap_bytes(), m);
+ /// ```
+ fn swap_bytes(self) -> Self;
+
+ /// Reverses the order of bits in the integer.
+ ///
+ /// The least significant bit becomes the most significant bit, second least-significant bit
+ /// becomes second most-significant bit, etc.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use num_traits::PrimInt;
+ ///
+ /// let n = 0x12345678u32;
+ /// let m = 0x1e6a2c48u32;
+ ///
+ /// assert_eq!(n.reverse_bits(), m);
+ /// assert_eq!(0u32.reverse_bits(), 0);
+ /// ```
+ fn reverse_bits(self) -> Self {
+ reverse_bits_fallback(self)
+ }
+
+ /// Convert an integer from big endian to the target's endianness.
+ ///
+ /// On big endian this is a no-op. On little endian the bytes are swapped.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use num_traits::PrimInt;
+ ///
+ /// let n = 0x0123456789ABCDEFu64;
+ ///
+ /// if cfg!(target_endian = "big") {
+ /// assert_eq!(u64::from_be(n), n)
+ /// } else {
+ /// assert_eq!(u64::from_be(n), n.swap_bytes())
+ /// }
+ /// ```
+ fn from_be(x: Self) -> Self;
+
+ /// Convert an integer from little endian to the target's endianness.
+ ///
+ /// On little endian this is a no-op. On big endian the bytes are swapped.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use num_traits::PrimInt;
+ ///
+ /// let n = 0x0123456789ABCDEFu64;
+ ///
+ /// if cfg!(target_endian = "little") {
+ /// assert_eq!(u64::from_le(n), n)
+ /// } else {
+ /// assert_eq!(u64::from_le(n), n.swap_bytes())
+ /// }
+ /// ```
+ fn from_le(x: Self) -> Self;
+
+ /// Convert `self` to big endian from the target's endianness.
+ ///
+ /// On big endian this is a no-op. On little endian the bytes are swapped.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use num_traits::PrimInt;
+ ///
+ /// let n = 0x0123456789ABCDEFu64;
+ ///
+ /// if cfg!(target_endian = "big") {
+ /// assert_eq!(n.to_be(), n)
+ /// } else {
+ /// assert_eq!(n.to_be(), n.swap_bytes())
+ /// }
+ /// ```
+ fn to_be(self) -> Self;
+
+ /// Convert `self` to little endian from the target's endianness.
+ ///
+ /// On little endian this is a no-op. On big endian the bytes are swapped.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use num_traits::PrimInt;
+ ///
+ /// let n = 0x0123456789ABCDEFu64;
+ ///
+ /// if cfg!(target_endian = "little") {
+ /// assert_eq!(n.to_le(), n)
+ /// } else {
+ /// assert_eq!(n.to_le(), n.swap_bytes())
+ /// }
+ /// ```
+ fn to_le(self) -> Self;
+
+ /// Raises self to the power of `exp`, using exponentiation by squaring.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use num_traits::PrimInt;
+ ///
+ /// assert_eq!(2i32.pow(4), 16);
+ /// ```
+ fn pow(self, exp: u32) -> Self;
+}
+
+fn one_per_byte<P: PrimInt>() -> P {
+ // i8, u8: return 0x01
+ // i16, u16: return 0x0101 = (0x01 << 8) | 0x01
+ // i32, u32: return 0x01010101 = (0x0101 << 16) | 0x0101
+ // ...
+ let mut ret = P::one();
+ let mut shift = 8;
+ let mut b = ret.count_zeros() >> 3;
+ while b != 0 {
+ ret = (ret << shift) | ret;
+ shift <<= 1;
+ b >>= 1;
+ }
+ ret
+}
+
+fn reverse_bits_fallback<P: PrimInt>(i: P) -> P {
+ let rep_01: P = one_per_byte();
+ let rep_03 = (rep_01 << 1) | rep_01;
+ let rep_05 = (rep_01 << 2) | rep_01;
+ let rep_0f = (rep_03 << 2) | rep_03;
+ let rep_33 = (rep_03 << 4) | rep_03;
+ let rep_55 = (rep_05 << 4) | rep_05;
+
+ // code above only used to determine rep_0f, rep_33, rep_55;
+ // optimizer should be able to do it in compile time
+ let mut ret = i.swap_bytes();
+ ret = ((ret & rep_0f) << 4) | ((ret >> 4) & rep_0f);
+ ret = ((ret & rep_33) << 2) | ((ret >> 2) & rep_33);
+ ret = ((ret & rep_55) << 1) | ((ret >> 1) & rep_55);
+ ret
+}
+
+macro_rules! prim_int_impl {
+ ($T:ty, $S:ty, $U:ty) => {
+ impl PrimInt for $T {
+ #[inline]
+ fn count_ones(self) -> u32 {
+ <$T>::count_ones(self)
+ }
+
+ #[inline]
+ fn count_zeros(self) -> u32 {
+ <$T>::count_zeros(self)
+ }
+
+ #[cfg(has_leading_trailing_ones)]
+ #[inline]
+ fn leading_ones(self) -> u32 {
+ <$T>::leading_ones(self)
+ }
+
+ #[inline]
+ fn leading_zeros(self) -> u32 {
+ <$T>::leading_zeros(self)
+ }
+
+ #[cfg(has_leading_trailing_ones)]
+ #[inline]
+ fn trailing_ones(self) -> u32 {
+ <$T>::trailing_ones(self)
+ }
+
+ #[inline]
+ fn trailing_zeros(self) -> u32 {
+ <$T>::trailing_zeros(self)
+ }
+
+ #[inline]
+ fn rotate_left(self, n: u32) -> Self {
+ <$T>::rotate_left(self, n)
+ }
+
+ #[inline]
+ fn rotate_right(self, n: u32) -> Self {
+ <$T>::rotate_right(self, n)
+ }
+
+ #[inline]
+ fn signed_shl(self, n: u32) -> Self {
+ ((self as $S) << n) as $T
+ }
+
+ #[inline]
+ fn signed_shr(self, n: u32) -> Self {
+ ((self as $S) >> n) as $T
+ }
+
+ #[inline]
+ fn unsigned_shl(self, n: u32) -> Self {
+ ((self as $U) << n) as $T
+ }
+
+ #[inline]
+ fn unsigned_shr(self, n: u32) -> Self {
+ ((self as $U) >> n) as $T
+ }
+
+ #[inline]
+ fn swap_bytes(self) -> Self {
+ <$T>::swap_bytes(self)
+ }
+
+ #[cfg(has_reverse_bits)]
+ #[inline]
+ fn reverse_bits(self) -> Self {
+ <$T>::reverse_bits(self)
+ }
+
+ #[inline]
+ fn from_be(x: Self) -> Self {
+ <$T>::from_be(x)
+ }
+
+ #[inline]
+ fn from_le(x: Self) -> Self {
+ <$T>::from_le(x)
+ }
+
+ #[inline]
+ fn to_be(self) -> Self {
+ <$T>::to_be(self)
+ }
+
+ #[inline]
+ fn to_le(self) -> Self {
+ <$T>::to_le(self)
+ }
+
+ #[inline]
+ fn pow(self, exp: u32) -> Self {
+ <$T>::pow(self, exp)
+ }
+ }
+ };
+}
+
+// prim_int_impl!(type, signed, unsigned);
+prim_int_impl!(u8, i8, u8);
+prim_int_impl!(u16, i16, u16);
+prim_int_impl!(u32, i32, u32);
+prim_int_impl!(u64, i64, u64);
+prim_int_impl!(u128, i128, u128);
+prim_int_impl!(usize, isize, usize);
+prim_int_impl!(i8, i8, u8);
+prim_int_impl!(i16, i16, u16);
+prim_int_impl!(i32, i32, u32);
+prim_int_impl!(i64, i64, u64);
+prim_int_impl!(i128, i128, u128);
+prim_int_impl!(isize, isize, usize);
+
+#[cfg(test)]
+mod tests {
+ use crate::int::PrimInt;
+
+ #[test]
+ pub fn reverse_bits() {
+ use core::{i16, i32, i64, i8};
+
+ assert_eq!(
+ PrimInt::reverse_bits(0x0123_4567_89ab_cdefu64),
+ 0xf7b3_d591_e6a2_c480
+ );
+
+ assert_eq!(PrimInt::reverse_bits(0i8), 0);
+ assert_eq!(PrimInt::reverse_bits(-1i8), -1);
+ assert_eq!(PrimInt::reverse_bits(1i8), i8::MIN);
+ assert_eq!(PrimInt::reverse_bits(i8::MIN), 1);
+ assert_eq!(PrimInt::reverse_bits(-2i8), i8::MAX);
+ assert_eq!(PrimInt::reverse_bits(i8::MAX), -2);
+
+ assert_eq!(PrimInt::reverse_bits(0i16), 0);
+ assert_eq!(PrimInt::reverse_bits(-1i16), -1);
+ assert_eq!(PrimInt::reverse_bits(1i16), i16::MIN);
+ assert_eq!(PrimInt::reverse_bits(i16::MIN), 1);
+ assert_eq!(PrimInt::reverse_bits(-2i16), i16::MAX);
+ assert_eq!(PrimInt::reverse_bits(i16::MAX), -2);
+
+ assert_eq!(PrimInt::reverse_bits(0i32), 0);
+ assert_eq!(PrimInt::reverse_bits(-1i32), -1);
+ assert_eq!(PrimInt::reverse_bits(1i32), i32::MIN);
+ assert_eq!(PrimInt::reverse_bits(i32::MIN), 1);
+ assert_eq!(PrimInt::reverse_bits(-2i32), i32::MAX);
+ assert_eq!(PrimInt::reverse_bits(i32::MAX), -2);
+
+ assert_eq!(PrimInt::reverse_bits(0i64), 0);
+ assert_eq!(PrimInt::reverse_bits(-1i64), -1);
+ assert_eq!(PrimInt::reverse_bits(1i64), i64::MIN);
+ assert_eq!(PrimInt::reverse_bits(i64::MIN), 1);
+ assert_eq!(PrimInt::reverse_bits(-2i64), i64::MAX);
+ assert_eq!(PrimInt::reverse_bits(i64::MAX), -2);
+ }
+
+ #[test]
+ pub fn reverse_bits_i128() {
+ use core::i128;
+
+ assert_eq!(PrimInt::reverse_bits(0i128), 0);
+ assert_eq!(PrimInt::reverse_bits(-1i128), -1);
+ assert_eq!(PrimInt::reverse_bits(1i128), i128::MIN);
+ assert_eq!(PrimInt::reverse_bits(i128::MIN), 1);
+ assert_eq!(PrimInt::reverse_bits(-2i128), i128::MAX);
+ assert_eq!(PrimInt::reverse_bits(i128::MAX), -2);
+ }
+}
diff --git a/rust/vendor/num-traits/src/lib.rs b/rust/vendor/num-traits/src/lib.rs
new file mode 100644
index 0000000..54dab6e
--- /dev/null
+++ b/rust/vendor/num-traits/src/lib.rs
@@ -0,0 +1,635 @@
+// Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+//! Numeric traits for generic mathematics
+//!
+//! ## Compatibility
+//!
+//! The `num-traits` crate is tested for rustc 1.31 and greater.
+
+#![doc(html_root_url = "https://docs.rs/num-traits/0.2")]
+#![deny(unconditional_recursion)]
+#![no_std]
+
+// Need to explicitly bring the crate in for inherent float methods
+#[cfg(feature = "std")]
+extern crate std;
+
+use core::fmt;
+use core::num::Wrapping;
+use core::ops::{Add, Div, Mul, Rem, Sub};
+use core::ops::{AddAssign, DivAssign, MulAssign, RemAssign, SubAssign};
+
+pub use crate::bounds::Bounded;
+#[cfg(any(feature = "std", feature = "libm"))]
+pub use crate::float::Float;
+pub use crate::float::FloatConst;
+// pub use real::{FloatCore, Real}; // NOTE: Don't do this, it breaks `use num_traits::*;`.
+pub use crate::cast::{cast, AsPrimitive, FromPrimitive, NumCast, ToPrimitive};
+pub use crate::identities::{one, zero, One, Zero};
+pub use crate::int::PrimInt;
+pub use crate::ops::bytes::{FromBytes, ToBytes};
+pub use crate::ops::checked::{
+ CheckedAdd, CheckedDiv, CheckedMul, CheckedNeg, CheckedRem, CheckedShl, CheckedShr, CheckedSub,
+};
+pub use crate::ops::euclid::{CheckedEuclid, Euclid};
+pub use crate::ops::inv::Inv;
+pub use crate::ops::mul_add::{MulAdd, MulAddAssign};
+pub use crate::ops::saturating::{Saturating, SaturatingAdd, SaturatingMul, SaturatingSub};
+pub use crate::ops::wrapping::{
+ WrappingAdd, WrappingMul, WrappingNeg, WrappingShl, WrappingShr, WrappingSub,
+};
+pub use crate::pow::{checked_pow, pow, Pow};
+pub use crate::sign::{abs, abs_sub, signum, Signed, Unsigned};
+
+#[macro_use]
+mod macros;
+
+pub mod bounds;
+pub mod cast;
+pub mod float;
+pub mod identities;
+pub mod int;
+pub mod ops;
+pub mod pow;
+pub mod real;
+pub mod sign;
+
+/// The base trait for numeric types, covering `0` and `1` values,
+/// comparisons, basic numeric operations, and string conversion.
+pub trait Num: PartialEq + Zero + One + NumOps {
+ type FromStrRadixErr;
+
+ /// Convert from a string and radix (typically `2..=36`).
+ ///
+ /// # Examples
+ ///
+ /// ```rust
+ /// use num_traits::Num;
+ ///
+ /// let result = <i32 as Num>::from_str_radix("27", 10);
+ /// assert_eq!(result, Ok(27));
+ ///
+ /// let result = <i32 as Num>::from_str_radix("foo", 10);
+ /// assert!(result.is_err());
+ /// ```
+ ///
+ /// # Supported radices
+ ///
+ /// The exact range of supported radices is at the discretion of each type implementation. For
+ /// primitive integers, this is implemented by the inherent `from_str_radix` methods in the
+ /// standard library, which **panic** if the radix is not in the range from 2 to 36. The
+ /// implementation in this crate for primitive floats is similar.
+ ///
+ /// For third-party types, it is suggested that implementations should follow suit and at least
+ /// accept `2..=36` without panicking, but an `Err` may be returned for any unsupported radix.
+ /// It's possible that a type might not even support the common radix 10, nor any, if string
+ /// parsing doesn't make sense for that type.
+ fn from_str_radix(str: &str, radix: u32) -> Result<Self, Self::FromStrRadixErr>;
+}
+
+/// Generic trait for types implementing basic numeric operations
+///
+/// This is automatically implemented for types which implement the operators.
+pub trait NumOps<Rhs = Self, Output = Self>:
+ Add<Rhs, Output = Output>
+ + Sub<Rhs, Output = Output>
+ + Mul<Rhs, Output = Output>
+ + Div<Rhs, Output = Output>
+ + Rem<Rhs, Output = Output>
+{
+}
+
+impl<T, Rhs, Output> NumOps<Rhs, Output> for T where
+ T: Add<Rhs, Output = Output>
+ + Sub<Rhs, Output = Output>
+ + Mul<Rhs, Output = Output>
+ + Div<Rhs, Output = Output>
+ + Rem<Rhs, Output = Output>
+{
+}
+
+/// The trait for `Num` types which also implement numeric operations taking
+/// the second operand by reference.
+///
+/// This is automatically implemented for types which implement the operators.
+pub trait NumRef: Num + for<'r> NumOps<&'r Self> {}
+impl<T> NumRef for T where T: Num + for<'r> NumOps<&'r T> {}
+
+/// The trait for `Num` references which implement numeric operations, taking the
+/// second operand either by value or by reference.
+///
+/// This is automatically implemented for all types which implement the operators. It covers
+/// every type implementing the operations though, regardless of it being a reference or
+/// related to `Num`.
+pub trait RefNum<Base>: NumOps<Base, Base> + for<'r> NumOps<&'r Base, Base> {}
+impl<T, Base> RefNum<Base> for T where T: NumOps<Base, Base> + for<'r> NumOps<&'r Base, Base> {}
+
+/// Generic trait for types implementing numeric assignment operators (like `+=`).
+///
+/// This is automatically implemented for types which implement the operators.
+pub trait NumAssignOps<Rhs = Self>:
+ AddAssign<Rhs> + SubAssign<Rhs> + MulAssign<Rhs> + DivAssign<Rhs> + RemAssign<Rhs>
+{
+}
+
+impl<T, Rhs> NumAssignOps<Rhs> for T where
+ T: AddAssign<Rhs> + SubAssign<Rhs> + MulAssign<Rhs> + DivAssign<Rhs> + RemAssign<Rhs>
+{
+}
+
+/// The trait for `Num` types which also implement assignment operators.
+///
+/// This is automatically implemented for types which implement the operators.
+pub trait NumAssign: Num + NumAssignOps {}
+impl<T> NumAssign for T where T: Num + NumAssignOps {}
+
+/// The trait for `NumAssign` types which also implement assignment operations
+/// taking the second operand by reference.
+///
+/// This is automatically implemented for types which implement the operators.
+pub trait NumAssignRef: NumAssign + for<'r> NumAssignOps<&'r Self> {}
+impl<T> NumAssignRef for T where T: NumAssign + for<'r> NumAssignOps<&'r T> {}
+
+macro_rules! int_trait_impl {
+ ($name:ident for $($t:ty)*) => ($(
+ impl $name for $t {
+ type FromStrRadixErr = ::core::num::ParseIntError;
+ #[inline]
+ fn from_str_radix(s: &str, radix: u32)
+ -> Result<Self, ::core::num::ParseIntError>
+ {
+ <$t>::from_str_radix(s, radix)
+ }
+ }
+ )*)
+}
+int_trait_impl!(Num for usize u8 u16 u32 u64 u128);
+int_trait_impl!(Num for isize i8 i16 i32 i64 i128);
+
+impl<T: Num> Num for Wrapping<T>
+where
+ Wrapping<T>: NumOps,
+{
+ type FromStrRadixErr = T::FromStrRadixErr;
+ fn from_str_radix(str: &str, radix: u32) -> Result<Self, Self::FromStrRadixErr> {
+ T::from_str_radix(str, radix).map(Wrapping)
+ }
+}
+
+#[derive(Debug)]
+pub enum FloatErrorKind {
+ Empty,
+ Invalid,
+}
+// FIXME: core::num::ParseFloatError is stable in 1.0, but opaque to us,
+// so there's not really any way for us to reuse it.
+#[derive(Debug)]
+pub struct ParseFloatError {
+ pub kind: FloatErrorKind,
+}
+
+impl fmt::Display for ParseFloatError {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ let description = match self.kind {
+ FloatErrorKind::Empty => "cannot parse float from empty string",
+ FloatErrorKind::Invalid => "invalid float literal",
+ };
+
+ description.fmt(f)
+ }
+}
+
+fn str_to_ascii_lower_eq_str(a: &str, b: &str) -> bool {
+ a.len() == b.len()
+ && a.bytes().zip(b.bytes()).all(|(a, b)| {
+ let a_to_ascii_lower = a | (((b'A' <= a && a <= b'Z') as u8) << 5);
+ a_to_ascii_lower == b
+ })
+}
+
+// FIXME: The standard library from_str_radix on floats was deprecated, so we're stuck
+// with this implementation ourselves until we want to make a breaking change.
+// (would have to drop it from `Num` though)
+macro_rules! float_trait_impl {
+ ($name:ident for $($t:ident)*) => ($(
+ impl $name for $t {
+ type FromStrRadixErr = ParseFloatError;
+
+ fn from_str_radix(src: &str, radix: u32)
+ -> Result<Self, Self::FromStrRadixErr>
+ {
+ use self::FloatErrorKind::*;
+ use self::ParseFloatError as PFE;
+
+ // Special case radix 10 to use more accurate standard library implementation
+ if radix == 10 {
+ return src.parse().map_err(|_| PFE {
+ kind: if src.is_empty() { Empty } else { Invalid },
+ });
+ }
+
+ // Special values
+ if str_to_ascii_lower_eq_str(src, "inf")
+ || str_to_ascii_lower_eq_str(src, "infinity")
+ {
+ return Ok(core::$t::INFINITY);
+ } else if str_to_ascii_lower_eq_str(src, "-inf")
+ || str_to_ascii_lower_eq_str(src, "-infinity")
+ {
+ return Ok(core::$t::NEG_INFINITY);
+ } else if str_to_ascii_lower_eq_str(src, "nan") {
+ return Ok(core::$t::NAN);
+ } else if str_to_ascii_lower_eq_str(src, "-nan") {
+ return Ok(-core::$t::NAN);
+ }
+
+ fn slice_shift_char(src: &str) -> Option<(char, &str)> {
+ let mut chars = src.chars();
+ Some((chars.next()?, chars.as_str()))
+ }
+
+ let (is_positive, src) = match slice_shift_char(src) {
+ None => return Err(PFE { kind: Empty }),
+ Some(('-', "")) => return Err(PFE { kind: Empty }),
+ Some(('-', src)) => (false, src),
+ Some((_, _)) => (true, src),
+ };
+
+ // The significand to accumulate
+ let mut sig = if is_positive { 0.0 } else { -0.0 };
+ // Necessary to detect overflow
+ let mut prev_sig = sig;
+ let mut cs = src.chars().enumerate();
+ // Exponent prefix and exponent index offset
+ let mut exp_info = None::<(char, usize)>;
+
+ // Parse the integer part of the significand
+ for (i, c) in cs.by_ref() {
+ match c.to_digit(radix) {
+ Some(digit) => {
+ // shift significand one digit left
+ sig *= radix as $t;
+
+ // add/subtract current digit depending on sign
+ if is_positive {
+ sig += (digit as isize) as $t;
+ } else {
+ sig -= (digit as isize) as $t;
+ }
+
+ // Detect overflow by comparing to last value, except
+ // if we've not seen any non-zero digits.
+ if prev_sig != 0.0 {
+ if is_positive && sig <= prev_sig
+ { return Ok(core::$t::INFINITY); }
+ if !is_positive && sig >= prev_sig
+ { return Ok(core::$t::NEG_INFINITY); }
+
+ // Detect overflow by reversing the shift-and-add process
+ if is_positive && (prev_sig != (sig - digit as $t) / radix as $t)
+ { return Ok(core::$t::INFINITY); }
+ if !is_positive && (prev_sig != (sig + digit as $t) / radix as $t)
+ { return Ok(core::$t::NEG_INFINITY); }
+ }
+ prev_sig = sig;
+ },
+ None => match c {
+ 'e' | 'E' | 'p' | 'P' => {
+ exp_info = Some((c, i + 1));
+ break; // start of exponent
+ },
+ '.' => {
+ break; // start of fractional part
+ },
+ _ => {
+ return Err(PFE { kind: Invalid });
+ },
+ },
+ }
+ }
+
+ // If we are not yet at the exponent parse the fractional
+ // part of the significand
+ if exp_info.is_none() {
+ let mut power = 1.0;
+ for (i, c) in cs.by_ref() {
+ match c.to_digit(radix) {
+ Some(digit) => {
+ // Decrease power one order of magnitude
+ power /= radix as $t;
+ // add/subtract current digit depending on sign
+ sig = if is_positive {
+ sig + (digit as $t) * power
+ } else {
+ sig - (digit as $t) * power
+ };
+ // Detect overflow by comparing to last value
+ if is_positive && sig < prev_sig
+ { return Ok(core::$t::INFINITY); }
+ if !is_positive && sig > prev_sig
+ { return Ok(core::$t::NEG_INFINITY); }
+ prev_sig = sig;
+ },
+ None => match c {
+ 'e' | 'E' | 'p' | 'P' => {
+ exp_info = Some((c, i + 1));
+ break; // start of exponent
+ },
+ _ => {
+ return Err(PFE { kind: Invalid });
+ },
+ },
+ }
+ }
+ }
+
+ // Parse and calculate the exponent
+ let exp = match exp_info {
+ Some((c, offset)) => {
+ let base = match c {
+ 'E' | 'e' if radix == 10 => 10.0,
+ 'P' | 'p' if radix == 16 => 2.0,
+ _ => return Err(PFE { kind: Invalid }),
+ };
+
+ // Parse the exponent as decimal integer
+ let src = &src[offset..];
+ let (is_positive, exp) = match slice_shift_char(src) {
+ Some(('-', src)) => (false, src.parse::<usize>()),
+ Some(('+', src)) => (true, src.parse::<usize>()),
+ Some((_, _)) => (true, src.parse::<usize>()),
+ None => return Err(PFE { kind: Invalid }),
+ };
+
+ #[cfg(feature = "std")]
+ fn pow(base: $t, exp: usize) -> $t {
+ Float::powi(base, exp as i32)
+ }
+ // otherwise uses the generic `pow` from the root
+
+ match (is_positive, exp) {
+ (true, Ok(exp)) => pow(base, exp),
+ (false, Ok(exp)) => 1.0 / pow(base, exp),
+ (_, Err(_)) => return Err(PFE { kind: Invalid }),
+ }
+ },
+ None => 1.0, // no exponent
+ };
+
+ Ok(sig * exp)
+ }
+ }
+ )*)
+}
+float_trait_impl!(Num for f32 f64);
+
+/// A value bounded by a minimum and a maximum
+///
+/// If input is less than min then this returns min.
+/// If input is greater than max then this returns max.
+/// Otherwise this returns input.
+///
+/// **Panics** in debug mode if `!(min <= max)`.
+#[inline]
+pub fn clamp<T: PartialOrd>(input: T, min: T, max: T) -> T {
+ debug_assert!(min <= max, "min must be less than or equal to max");
+ if input < min {
+ min
+ } else if input > max {
+ max
+ } else {
+ input
+ }
+}
+
+/// A value bounded by a minimum value
+///
+/// If input is less than min then this returns min.
+/// Otherwise this returns input.
+/// `clamp_min(std::f32::NAN, 1.0)` preserves `NAN` different from `f32::min(std::f32::NAN, 1.0)`.
+///
+/// **Panics** in debug mode if `!(min == min)`. (This occurs if `min` is `NAN`.)
+#[inline]
+#[allow(clippy::eq_op)]
+pub fn clamp_min<T: PartialOrd>(input: T, min: T) -> T {
+ debug_assert!(min == min, "min must not be NAN");
+ if input < min {
+ min
+ } else {
+ input
+ }
+}
+
+/// A value bounded by a maximum value
+///
+/// If input is greater than max then this returns max.
+/// Otherwise this returns input.
+/// `clamp_max(std::f32::NAN, 1.0)` preserves `NAN` different from `f32::max(std::f32::NAN, 1.0)`.
+///
+/// **Panics** in debug mode if `!(max == max)`. (This occurs if `max` is `NAN`.)
+#[inline]
+#[allow(clippy::eq_op)]
+pub fn clamp_max<T: PartialOrd>(input: T, max: T) -> T {
+ debug_assert!(max == max, "max must not be NAN");
+ if input > max {
+ max
+ } else {
+ input
+ }
+}
+
+#[test]
+fn clamp_test() {
+ // Int test
+ assert_eq!(1, clamp(1, -1, 2));
+ assert_eq!(-1, clamp(-2, -1, 2));
+ assert_eq!(2, clamp(3, -1, 2));
+ assert_eq!(1, clamp_min(1, -1));
+ assert_eq!(-1, clamp_min(-2, -1));
+ assert_eq!(-1, clamp_max(1, -1));
+ assert_eq!(-2, clamp_max(-2, -1));
+
+ // Float test
+ assert_eq!(1.0, clamp(1.0, -1.0, 2.0));
+ assert_eq!(-1.0, clamp(-2.0, -1.0, 2.0));
+ assert_eq!(2.0, clamp(3.0, -1.0, 2.0));
+ assert_eq!(1.0, clamp_min(1.0, -1.0));
+ assert_eq!(-1.0, clamp_min(-2.0, -1.0));
+ assert_eq!(-1.0, clamp_max(1.0, -1.0));
+ assert_eq!(-2.0, clamp_max(-2.0, -1.0));
+ assert!(clamp(::core::f32::NAN, -1.0, 1.0).is_nan());
+ assert!(clamp_min(::core::f32::NAN, 1.0).is_nan());
+ assert!(clamp_max(::core::f32::NAN, 1.0).is_nan());
+}
+
+#[test]
+#[should_panic]
+#[cfg(debug_assertions)]
+fn clamp_nan_min() {
+ clamp(0., ::core::f32::NAN, 1.);
+}
+
+#[test]
+#[should_panic]
+#[cfg(debug_assertions)]
+fn clamp_nan_max() {
+ clamp(0., -1., ::core::f32::NAN);
+}
+
+#[test]
+#[should_panic]
+#[cfg(debug_assertions)]
+fn clamp_nan_min_max() {
+ clamp(0., ::core::f32::NAN, ::core::f32::NAN);
+}
+
+#[test]
+#[should_panic]
+#[cfg(debug_assertions)]
+fn clamp_min_nan_min() {
+ clamp_min(0., ::core::f32::NAN);
+}
+
+#[test]
+#[should_panic]
+#[cfg(debug_assertions)]
+fn clamp_max_nan_max() {
+ clamp_max(0., ::core::f32::NAN);
+}
+
+#[test]
+fn from_str_radix_unwrap() {
+ // The Result error must impl Debug to allow unwrap()
+
+ let i: i32 = Num::from_str_radix("0", 10).unwrap();
+ assert_eq!(i, 0);
+
+ let f: f32 = Num::from_str_radix("0.0", 10).unwrap();
+ assert_eq!(f, 0.0);
+}
+
+#[test]
+fn from_str_radix_multi_byte_fail() {
+ // Ensure parsing doesn't panic, even on invalid sign characters
+ assert!(f32::from_str_radix("™0.2", 10).is_err());
+
+ // Even when parsing the exponent sign
+ assert!(f32::from_str_radix("0.2E™1", 10).is_err());
+}
+
+#[test]
+fn from_str_radix_ignore_case() {
+ assert_eq!(
+ f32::from_str_radix("InF", 16).unwrap(),
+ ::core::f32::INFINITY
+ );
+ assert_eq!(
+ f32::from_str_radix("InfinitY", 16).unwrap(),
+ ::core::f32::INFINITY
+ );
+ assert_eq!(
+ f32::from_str_radix("-InF", 8).unwrap(),
+ ::core::f32::NEG_INFINITY
+ );
+ assert_eq!(
+ f32::from_str_radix("-InfinitY", 8).unwrap(),
+ ::core::f32::NEG_INFINITY
+ );
+ assert!(f32::from_str_radix("nAn", 4).unwrap().is_nan());
+ assert!(f32::from_str_radix("-nAn", 4).unwrap().is_nan());
+}
+
+#[test]
+fn wrapping_is_num() {
+ fn require_num<T: Num>(_: &T) {}
+ require_num(&Wrapping(42_u32));
+ require_num(&Wrapping(-42));
+}
+
+#[test]
+fn wrapping_from_str_radix() {
+ macro_rules! test_wrapping_from_str_radix {
+ ($($t:ty)+) => {
+ $(
+ for &(s, r) in &[("42", 10), ("42", 2), ("-13.0", 10), ("foo", 10)] {
+ let w = Wrapping::<$t>::from_str_radix(s, r).map(|w| w.0);
+ assert_eq!(w, <$t as Num>::from_str_radix(s, r));
+ }
+ )+
+ };
+ }
+
+ test_wrapping_from_str_radix!(usize u8 u16 u32 u64 isize i8 i16 i32 i64);
+}
+
+#[test]
+fn check_num_ops() {
+ fn compute<T: Num + Copy>(x: T, y: T) -> T {
+ x * y / y % y + y - y
+ }
+ assert_eq!(compute(1, 2), 1)
+}
+
+#[test]
+fn check_numref_ops() {
+ fn compute<T: NumRef>(x: T, y: &T) -> T {
+ x * y / y % y + y - y
+ }
+ assert_eq!(compute(1, &2), 1)
+}
+
+#[test]
+fn check_refnum_ops() {
+ fn compute<T: Copy>(x: &T, y: T) -> T
+ where
+ for<'a> &'a T: RefNum<T>,
+ {
+ &(&(&(&(x * y) / y) % y) + y) - y
+ }
+ assert_eq!(compute(&1, 2), 1)
+}
+
+#[test]
+fn check_refref_ops() {
+ fn compute<T>(x: &T, y: &T) -> T
+ where
+ for<'a> &'a T: RefNum<T>,
+ {
+ &(&(&(&(x * y) / y) % y) + y) - y
+ }
+ assert_eq!(compute(&1, &2), 1)
+}
+
+#[test]
+fn check_numassign_ops() {
+ fn compute<T: NumAssign + Copy>(mut x: T, y: T) -> T {
+ x *= y;
+ x /= y;
+ x %= y;
+ x += y;
+ x -= y;
+ x
+ }
+ assert_eq!(compute(1, 2), 1)
+}
+
+#[test]
+fn check_numassignref_ops() {
+ fn compute<T: NumAssignRef + Copy>(mut x: T, y: &T) -> T {
+ x *= y;
+ x /= y;
+ x %= y;
+ x += y;
+ x -= y;
+ x
+ }
+ assert_eq!(compute(1, &2), 1)
+}
diff --git a/rust/vendor/num-traits/src/macros.rs b/rust/vendor/num-traits/src/macros.rs
new file mode 100644
index 0000000..b97758e
--- /dev/null
+++ b/rust/vendor/num-traits/src/macros.rs
@@ -0,0 +1,44 @@
+// not all are used in all features configurations
+#![allow(unused)]
+
+/// Forward a method to an inherent method or a base trait method.
+macro_rules! forward {
+ ($( Self :: $method:ident ( self $( , $arg:ident : $ty:ty )* ) -> $ret:ty ; )*)
+ => {$(
+ #[inline]
+ fn $method(self $( , $arg : $ty )* ) -> $ret {
+ Self::$method(self $( , $arg )* )
+ }
+ )*};
+ ($( $base:ident :: $method:ident ( self $( , $arg:ident : $ty:ty )* ) -> $ret:ty ; )*)
+ => {$(
+ #[inline]
+ fn $method(self $( , $arg : $ty )* ) -> $ret {
+ <Self as $base>::$method(self $( , $arg )* )
+ }
+ )*};
+ ($( $base:ident :: $method:ident ( $( $arg:ident : $ty:ty ),* ) -> $ret:ty ; )*)
+ => {$(
+ #[inline]
+ fn $method( $( $arg : $ty ),* ) -> $ret {
+ <Self as $base>::$method( $( $arg ),* )
+ }
+ )*};
+ ($( $imp:path as $method:ident ( self $( , $arg:ident : $ty:ty )* ) -> $ret:ty ; )*)
+ => {$(
+ #[inline]
+ fn $method(self $( , $arg : $ty )* ) -> $ret {
+ $imp(self $( , $arg )* )
+ }
+ )*};
+}
+
+macro_rules! constant {
+ ($( $method:ident () -> $ret:expr ; )*)
+ => {$(
+ #[inline]
+ fn $method() -> Self {
+ $ret
+ }
+ )*};
+}
diff --git a/rust/vendor/num-traits/src/ops/bytes.rs b/rust/vendor/num-traits/src/ops/bytes.rs
new file mode 100644
index 0000000..4df9ecd
--- /dev/null
+++ b/rust/vendor/num-traits/src/ops/bytes.rs
@@ -0,0 +1,403 @@
+use core::borrow::{Borrow, BorrowMut};
+use core::cmp::{Eq, Ord, PartialEq, PartialOrd};
+use core::fmt::Debug;
+use core::hash::Hash;
+#[cfg(not(has_int_to_from_bytes))]
+use core::mem::transmute;
+
+pub trait NumBytes:
+ Debug
+ + AsRef<[u8]>
+ + AsMut<[u8]>
+ + PartialEq
+ + Eq
+ + PartialOrd
+ + Ord
+ + Hash
+ + Borrow<[u8]>
+ + BorrowMut<[u8]>
+{
+}
+
+impl<T> NumBytes for T where
+ T: Debug
+ + AsRef<[u8]>
+ + AsMut<[u8]>
+ + PartialEq
+ + Eq
+ + PartialOrd
+ + Ord
+ + Hash
+ + Borrow<[u8]>
+ + BorrowMut<[u8]>
+ + ?Sized
+{
+}
+
+pub trait ToBytes {
+ type Bytes: NumBytes;
+
+ /// Return the memory representation of this number as a byte array in big-endian byte order.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use num_traits::ToBytes;
+ ///
+ /// let bytes = ToBytes::to_be_bytes(&0x12345678u32);
+ /// assert_eq!(bytes, [0x12, 0x34, 0x56, 0x78]);
+ /// ```
+ fn to_be_bytes(&self) -> Self::Bytes;
+
+ /// Return the memory representation of this number as a byte array in little-endian byte order.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use num_traits::ToBytes;
+ ///
+ /// let bytes = ToBytes::to_le_bytes(&0x12345678u32);
+ /// assert_eq!(bytes, [0x78, 0x56, 0x34, 0x12]);
+ /// ```
+ fn to_le_bytes(&self) -> Self::Bytes;
+
+ /// Return the memory representation of this number as a byte array in native byte order.
+ ///
+ /// As the target platform's native endianness is used,
+ /// portable code should use [`to_be_bytes`] or [`to_le_bytes`], as appropriate, instead.
+ ///
+ /// [`to_be_bytes`]: #method.to_be_bytes
+ /// [`to_le_bytes`]: #method.to_le_bytes
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use num_traits::ToBytes;
+ ///
+ /// #[cfg(target_endian = "big")]
+ /// let expected = [0x12, 0x34, 0x56, 0x78];
+ ///
+ /// #[cfg(target_endian = "little")]
+ /// let expected = [0x78, 0x56, 0x34, 0x12];
+ ///
+ /// let bytes = ToBytes::to_ne_bytes(&0x12345678u32);
+ /// assert_eq!(bytes, expected)
+ /// ```
+ fn to_ne_bytes(&self) -> Self::Bytes {
+ #[cfg(target_endian = "big")]
+ let bytes = self.to_be_bytes();
+ #[cfg(target_endian = "little")]
+ let bytes = self.to_le_bytes();
+ bytes
+ }
+}
+
+pub trait FromBytes: Sized {
+ type Bytes: NumBytes + ?Sized;
+
+ /// Create a number from its representation as a byte array in big endian.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use num_traits::FromBytes;
+ ///
+ /// let value: u32 = FromBytes::from_be_bytes(&[0x12, 0x34, 0x56, 0x78]);
+ /// assert_eq!(value, 0x12345678);
+ /// ```
+ fn from_be_bytes(bytes: &Self::Bytes) -> Self;
+
+ /// Create a number from its representation as a byte array in little endian.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use num_traits::FromBytes;
+ ///
+ /// let value: u32 = FromBytes::from_le_bytes(&[0x78, 0x56, 0x34, 0x12]);
+ /// assert_eq!(value, 0x12345678);
+ /// ```
+ fn from_le_bytes(bytes: &Self::Bytes) -> Self;
+
+ /// Create a number from its memory representation as a byte array in native endianness.
+ ///
+ /// As the target platform's native endianness is used,
+ /// portable code likely wants to use [`from_be_bytes`] or [`from_le_bytes`], as appropriate instead.
+ ///
+ /// [`from_be_bytes`]: #method.from_be_bytes
+ /// [`from_le_bytes`]: #method.from_le_bytes
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use num_traits::FromBytes;
+ ///
+ /// #[cfg(target_endian = "big")]
+ /// let bytes = [0x12, 0x34, 0x56, 0x78];
+ ///
+ /// #[cfg(target_endian = "little")]
+ /// let bytes = [0x78, 0x56, 0x34, 0x12];
+ ///
+ /// let value: u32 = FromBytes::from_ne_bytes(&bytes);
+ /// assert_eq!(value, 0x12345678)
+ /// ```
+ fn from_ne_bytes(bytes: &Self::Bytes) -> Self {
+ #[cfg(target_endian = "big")]
+ let this = Self::from_be_bytes(bytes);
+ #[cfg(target_endian = "little")]
+ let this = Self::from_le_bytes(bytes);
+ this
+ }
+}
+
+macro_rules! float_to_from_bytes_impl {
+ ($T:ty, $L:expr) => {
+ #[cfg(has_float_to_from_bytes)]
+ impl ToBytes for $T {
+ type Bytes = [u8; $L];
+
+ #[inline]
+ fn to_be_bytes(&self) -> Self::Bytes {
+ <$T>::to_be_bytes(*self)
+ }
+
+ #[inline]
+ fn to_le_bytes(&self) -> Self::Bytes {
+ <$T>::to_le_bytes(*self)
+ }
+
+ #[inline]
+ fn to_ne_bytes(&self) -> Self::Bytes {
+ <$T>::to_ne_bytes(*self)
+ }
+ }
+
+ #[cfg(has_float_to_from_bytes)]
+ impl FromBytes for $T {
+ type Bytes = [u8; $L];
+
+ #[inline]
+ fn from_be_bytes(bytes: &Self::Bytes) -> Self {
+ <$T>::from_be_bytes(*bytes)
+ }
+
+ #[inline]
+ fn from_le_bytes(bytes: &Self::Bytes) -> Self {
+ <$T>::from_le_bytes(*bytes)
+ }
+
+ #[inline]
+ fn from_ne_bytes(bytes: &Self::Bytes) -> Self {
+ <$T>::from_ne_bytes(*bytes)
+ }
+ }
+
+ #[cfg(not(has_float_to_from_bytes))]
+ impl ToBytes for $T {
+ type Bytes = [u8; $L];
+
+ #[inline]
+ fn to_be_bytes(&self) -> Self::Bytes {
+ ToBytes::to_be_bytes(&self.to_bits())
+ }
+
+ #[inline]
+ fn to_le_bytes(&self) -> Self::Bytes {
+ ToBytes::to_le_bytes(&self.to_bits())
+ }
+
+ #[inline]
+ fn to_ne_bytes(&self) -> Self::Bytes {
+ ToBytes::to_ne_bytes(&self.to_bits())
+ }
+ }
+
+ #[cfg(not(has_float_to_from_bytes))]
+ impl FromBytes for $T {
+ type Bytes = [u8; $L];
+
+ #[inline]
+ fn from_be_bytes(bytes: &Self::Bytes) -> Self {
+ Self::from_bits(FromBytes::from_be_bytes(bytes))
+ }
+
+ #[inline]
+ fn from_le_bytes(bytes: &Self::Bytes) -> Self {
+ Self::from_bits(FromBytes::from_le_bytes(bytes))
+ }
+
+ #[inline]
+ fn from_ne_bytes(bytes: &Self::Bytes) -> Self {
+ Self::from_bits(FromBytes::from_ne_bytes(bytes))
+ }
+ }
+ };
+}
+
+macro_rules! int_to_from_bytes_impl {
+ ($T:ty, $L:expr) => {
+ #[cfg(has_int_to_from_bytes)]
+ impl ToBytes for $T {
+ type Bytes = [u8; $L];
+
+ #[inline]
+ fn to_be_bytes(&self) -> Self::Bytes {
+ <$T>::to_be_bytes(*self)
+ }
+
+ #[inline]
+ fn to_le_bytes(&self) -> Self::Bytes {
+ <$T>::to_le_bytes(*self)
+ }
+
+ #[inline]
+ fn to_ne_bytes(&self) -> Self::Bytes {
+ <$T>::to_ne_bytes(*self)
+ }
+ }
+
+ #[cfg(has_int_to_from_bytes)]
+ impl FromBytes for $T {
+ type Bytes = [u8; $L];
+
+ #[inline]
+ fn from_be_bytes(bytes: &Self::Bytes) -> Self {
+ <$T>::from_be_bytes(*bytes)
+ }
+
+ #[inline]
+ fn from_le_bytes(bytes: &Self::Bytes) -> Self {
+ <$T>::from_le_bytes(*bytes)
+ }
+
+ #[inline]
+ fn from_ne_bytes(bytes: &Self::Bytes) -> Self {
+ <$T>::from_ne_bytes(*bytes)
+ }
+ }
+
+ #[cfg(not(has_int_to_from_bytes))]
+ impl ToBytes for $T {
+ type Bytes = [u8; $L];
+
+ #[inline]
+ fn to_be_bytes(&self) -> Self::Bytes {
+ <$T as ToBytes>::to_ne_bytes(&<$T>::to_be(*self))
+ }
+
+ #[inline]
+ fn to_le_bytes(&self) -> Self::Bytes {
+ <$T as ToBytes>::to_ne_bytes(&<$T>::to_le(*self))
+ }
+
+ #[inline]
+ fn to_ne_bytes(&self) -> Self::Bytes {
+ unsafe { transmute(*self) }
+ }
+ }
+
+ #[cfg(not(has_int_to_from_bytes))]
+ impl FromBytes for $T {
+ type Bytes = [u8; $L];
+
+ #[inline]
+ fn from_be_bytes(bytes: &Self::Bytes) -> Self {
+ Self::from_be(<Self as FromBytes>::from_ne_bytes(bytes))
+ }
+
+ #[inline]
+ fn from_le_bytes(bytes: &Self::Bytes) -> Self {
+ Self::from_le(<Self as FromBytes>::from_ne_bytes(bytes))
+ }
+
+ #[inline]
+ fn from_ne_bytes(bytes: &Self::Bytes) -> Self {
+ unsafe { transmute(*bytes) }
+ }
+ }
+ };
+}
+
+int_to_from_bytes_impl!(u8, 1);
+int_to_from_bytes_impl!(u16, 2);
+int_to_from_bytes_impl!(u32, 4);
+int_to_from_bytes_impl!(u64, 8);
+int_to_from_bytes_impl!(u128, 16);
+#[cfg(target_pointer_width = "64")]
+int_to_from_bytes_impl!(usize, 8);
+#[cfg(target_pointer_width = "32")]
+int_to_from_bytes_impl!(usize, 4);
+
+int_to_from_bytes_impl!(i8, 1);
+int_to_from_bytes_impl!(i16, 2);
+int_to_from_bytes_impl!(i32, 4);
+int_to_from_bytes_impl!(i64, 8);
+int_to_from_bytes_impl!(i128, 16);
+#[cfg(target_pointer_width = "64")]
+int_to_from_bytes_impl!(isize, 8);
+#[cfg(target_pointer_width = "32")]
+int_to_from_bytes_impl!(isize, 4);
+
+float_to_from_bytes_impl!(f32, 4);
+float_to_from_bytes_impl!(f64, 8);
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+
+ macro_rules! check_to_from_bytes {
+ ($( $ty:ty )+) => {$({
+ let n = 1;
+ let be = <$ty as ToBytes>::to_be_bytes(&n);
+ let le = <$ty as ToBytes>::to_le_bytes(&n);
+ let ne = <$ty as ToBytes>::to_ne_bytes(&n);
+
+ assert_eq!(*be.last().unwrap(), 1);
+ assert_eq!(*le.first().unwrap(), 1);
+ if cfg!(target_endian = "big") {
+ assert_eq!(*ne.last().unwrap(), 1);
+ } else {
+ assert_eq!(*ne.first().unwrap(), 1);
+ }
+
+ assert_eq!(<$ty as FromBytes>::from_be_bytes(&be), n);
+ assert_eq!(<$ty as FromBytes>::from_le_bytes(&le), n);
+ if cfg!(target_endian = "big") {
+ assert_eq!(<$ty as FromBytes>::from_ne_bytes(&be), n);
+ } else {
+ assert_eq!(<$ty as FromBytes>::from_ne_bytes(&le), n);
+ }
+ })+}
+ }
+
+ #[test]
+ fn convert_between_int_and_bytes() {
+ check_to_from_bytes!(u8 u16 u32 u64 u128 usize);
+ check_to_from_bytes!(i8 i16 i32 i64 i128 isize);
+ }
+
+ #[test]
+ fn convert_between_float_and_bytes() {
+ macro_rules! check_to_from_bytes {
+ ($( $ty:ty )+) => {$(
+ let n: $ty = 3.14;
+
+ let be = <$ty as ToBytes>::to_be_bytes(&n);
+ let le = <$ty as ToBytes>::to_le_bytes(&n);
+ let ne = <$ty as ToBytes>::to_ne_bytes(&n);
+
+ assert_eq!(<$ty as FromBytes>::from_be_bytes(&be), n);
+ assert_eq!(<$ty as FromBytes>::from_le_bytes(&le), n);
+ if cfg!(target_endian = "big") {
+ assert_eq!(ne, be);
+ assert_eq!(<$ty as FromBytes>::from_ne_bytes(&be), n);
+ } else {
+ assert_eq!(ne, le);
+ assert_eq!(<$ty as FromBytes>::from_ne_bytes(&le), n);
+ }
+ )+}
+ }
+
+ check_to_from_bytes!(f32 f64);
+ }
+}
diff --git a/rust/vendor/num-traits/src/ops/checked.rs b/rust/vendor/num-traits/src/ops/checked.rs
new file mode 100644
index 0000000..da1eb3e
--- /dev/null
+++ b/rust/vendor/num-traits/src/ops/checked.rs
@@ -0,0 +1,261 @@
+use core::ops::{Add, Div, Mul, Rem, Shl, Shr, Sub};
+
+/// Performs addition that returns `None` instead of wrapping around on
+/// overflow.
+pub trait CheckedAdd: Sized + Add<Self, Output = Self> {
+ /// Adds two numbers, checking for overflow. If overflow happens, `None` is
+ /// returned.
+ fn checked_add(&self, v: &Self) -> Option<Self>;
+}
+
+macro_rules! checked_impl {
+ ($trait_name:ident, $method:ident, $t:ty) => {
+ impl $trait_name for $t {
+ #[inline]
+ fn $method(&self, v: &$t) -> Option<$t> {
+ <$t>::$method(*self, *v)
+ }
+ }
+ };
+}
+
+checked_impl!(CheckedAdd, checked_add, u8);
+checked_impl!(CheckedAdd, checked_add, u16);
+checked_impl!(CheckedAdd, checked_add, u32);
+checked_impl!(CheckedAdd, checked_add, u64);
+checked_impl!(CheckedAdd, checked_add, usize);
+checked_impl!(CheckedAdd, checked_add, u128);
+
+checked_impl!(CheckedAdd, checked_add, i8);
+checked_impl!(CheckedAdd, checked_add, i16);
+checked_impl!(CheckedAdd, checked_add, i32);
+checked_impl!(CheckedAdd, checked_add, i64);
+checked_impl!(CheckedAdd, checked_add, isize);
+checked_impl!(CheckedAdd, checked_add, i128);
+
+/// Performs subtraction that returns `None` instead of wrapping around on underflow.
+pub trait CheckedSub: Sized + Sub<Self, Output = Self> {
+ /// Subtracts two numbers, checking for underflow. If underflow happens,
+ /// `None` is returned.
+ fn checked_sub(&self, v: &Self) -> Option<Self>;
+}
+
+checked_impl!(CheckedSub, checked_sub, u8);
+checked_impl!(CheckedSub, checked_sub, u16);
+checked_impl!(CheckedSub, checked_sub, u32);
+checked_impl!(CheckedSub, checked_sub, u64);
+checked_impl!(CheckedSub, checked_sub, usize);
+checked_impl!(CheckedSub, checked_sub, u128);
+
+checked_impl!(CheckedSub, checked_sub, i8);
+checked_impl!(CheckedSub, checked_sub, i16);
+checked_impl!(CheckedSub, checked_sub, i32);
+checked_impl!(CheckedSub, checked_sub, i64);
+checked_impl!(CheckedSub, checked_sub, isize);
+checked_impl!(CheckedSub, checked_sub, i128);
+
+/// Performs multiplication that returns `None` instead of wrapping around on underflow or
+/// overflow.
+pub trait CheckedMul: Sized + Mul<Self, Output = Self> {
+ /// Multiplies two numbers, checking for underflow or overflow. If underflow
+ /// or overflow happens, `None` is returned.
+ fn checked_mul(&self, v: &Self) -> Option<Self>;
+}
+
+checked_impl!(CheckedMul, checked_mul, u8);
+checked_impl!(CheckedMul, checked_mul, u16);
+checked_impl!(CheckedMul, checked_mul, u32);
+checked_impl!(CheckedMul, checked_mul, u64);
+checked_impl!(CheckedMul, checked_mul, usize);
+checked_impl!(CheckedMul, checked_mul, u128);
+
+checked_impl!(CheckedMul, checked_mul, i8);
+checked_impl!(CheckedMul, checked_mul, i16);
+checked_impl!(CheckedMul, checked_mul, i32);
+checked_impl!(CheckedMul, checked_mul, i64);
+checked_impl!(CheckedMul, checked_mul, isize);
+checked_impl!(CheckedMul, checked_mul, i128);
+
+/// Performs division that returns `None` instead of panicking on division by zero and instead of
+/// wrapping around on underflow and overflow.
+pub trait CheckedDiv: Sized + Div<Self, Output = Self> {
+ /// Divides two numbers, checking for underflow, overflow and division by
+ /// zero. If any of that happens, `None` is returned.
+ fn checked_div(&self, v: &Self) -> Option<Self>;
+}
+
+checked_impl!(CheckedDiv, checked_div, u8);
+checked_impl!(CheckedDiv, checked_div, u16);
+checked_impl!(CheckedDiv, checked_div, u32);
+checked_impl!(CheckedDiv, checked_div, u64);
+checked_impl!(CheckedDiv, checked_div, usize);
+checked_impl!(CheckedDiv, checked_div, u128);
+
+checked_impl!(CheckedDiv, checked_div, i8);
+checked_impl!(CheckedDiv, checked_div, i16);
+checked_impl!(CheckedDiv, checked_div, i32);
+checked_impl!(CheckedDiv, checked_div, i64);
+checked_impl!(CheckedDiv, checked_div, isize);
+checked_impl!(CheckedDiv, checked_div, i128);
+
+/// Performs an integral remainder that returns `None` instead of panicking on division by zero and
+/// instead of wrapping around on underflow and overflow.
+pub trait CheckedRem: Sized + Rem<Self, Output = Self> {
+ /// Finds the remainder of dividing two numbers, checking for underflow, overflow and division
+ /// by zero. If any of that happens, `None` is returned.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use num_traits::CheckedRem;
+ /// use std::i32::MIN;
+ ///
+ /// assert_eq!(CheckedRem::checked_rem(&10, &7), Some(3));
+ /// assert_eq!(CheckedRem::checked_rem(&10, &-7), Some(3));
+ /// assert_eq!(CheckedRem::checked_rem(&-10, &7), Some(-3));
+ /// assert_eq!(CheckedRem::checked_rem(&-10, &-7), Some(-3));
+ ///
+ /// assert_eq!(CheckedRem::checked_rem(&10, &0), None);
+ ///
+ /// assert_eq!(CheckedRem::checked_rem(&MIN, &1), Some(0));
+ /// assert_eq!(CheckedRem::checked_rem(&MIN, &-1), None);
+ /// ```
+ fn checked_rem(&self, v: &Self) -> Option<Self>;
+}
+
+checked_impl!(CheckedRem, checked_rem, u8);
+checked_impl!(CheckedRem, checked_rem, u16);
+checked_impl!(CheckedRem, checked_rem, u32);
+checked_impl!(CheckedRem, checked_rem, u64);
+checked_impl!(CheckedRem, checked_rem, usize);
+checked_impl!(CheckedRem, checked_rem, u128);
+
+checked_impl!(CheckedRem, checked_rem, i8);
+checked_impl!(CheckedRem, checked_rem, i16);
+checked_impl!(CheckedRem, checked_rem, i32);
+checked_impl!(CheckedRem, checked_rem, i64);
+checked_impl!(CheckedRem, checked_rem, isize);
+checked_impl!(CheckedRem, checked_rem, i128);
+
+macro_rules! checked_impl_unary {
+ ($trait_name:ident, $method:ident, $t:ty) => {
+ impl $trait_name for $t {
+ #[inline]
+ fn $method(&self) -> Option<$t> {
+ <$t>::$method(*self)
+ }
+ }
+ };
+}
+
+/// Performs negation that returns `None` if the result can't be represented.
+pub trait CheckedNeg: Sized {
+ /// Negates a number, returning `None` for results that can't be represented, like signed `MIN`
+ /// values that can't be positive, or non-zero unsigned values that can't be negative.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use num_traits::CheckedNeg;
+ /// use std::i32::MIN;
+ ///
+ /// assert_eq!(CheckedNeg::checked_neg(&1_i32), Some(-1));
+ /// assert_eq!(CheckedNeg::checked_neg(&-1_i32), Some(1));
+ /// assert_eq!(CheckedNeg::checked_neg(&MIN), None);
+ ///
+ /// assert_eq!(CheckedNeg::checked_neg(&0_u32), Some(0));
+ /// assert_eq!(CheckedNeg::checked_neg(&1_u32), None);
+ /// ```
+ fn checked_neg(&self) -> Option<Self>;
+}
+
+checked_impl_unary!(CheckedNeg, checked_neg, u8);
+checked_impl_unary!(CheckedNeg, checked_neg, u16);
+checked_impl_unary!(CheckedNeg, checked_neg, u32);
+checked_impl_unary!(CheckedNeg, checked_neg, u64);
+checked_impl_unary!(CheckedNeg, checked_neg, usize);
+checked_impl_unary!(CheckedNeg, checked_neg, u128);
+
+checked_impl_unary!(CheckedNeg, checked_neg, i8);
+checked_impl_unary!(CheckedNeg, checked_neg, i16);
+checked_impl_unary!(CheckedNeg, checked_neg, i32);
+checked_impl_unary!(CheckedNeg, checked_neg, i64);
+checked_impl_unary!(CheckedNeg, checked_neg, isize);
+checked_impl_unary!(CheckedNeg, checked_neg, i128);
+
+/// Performs a left shift that returns `None` on shifts larger than
+/// or equal to the type width.
+pub trait CheckedShl: Sized + Shl<u32, Output = Self> {
+ /// Checked shift left. Computes `self << rhs`, returning `None`
+ /// if `rhs` is larger than or equal to the number of bits in `self`.
+ ///
+ /// ```
+ /// use num_traits::CheckedShl;
+ ///
+ /// let x: u16 = 0x0001;
+ ///
+ /// assert_eq!(CheckedShl::checked_shl(&x, 0), Some(0x0001));
+ /// assert_eq!(CheckedShl::checked_shl(&x, 1), Some(0x0002));
+ /// assert_eq!(CheckedShl::checked_shl(&x, 15), Some(0x8000));
+ /// assert_eq!(CheckedShl::checked_shl(&x, 16), None);
+ /// ```
+ fn checked_shl(&self, rhs: u32) -> Option<Self>;
+}
+
+macro_rules! checked_shift_impl {
+ ($trait_name:ident, $method:ident, $t:ty) => {
+ impl $trait_name for $t {
+ #[inline]
+ fn $method(&self, rhs: u32) -> Option<$t> {
+ <$t>::$method(*self, rhs)
+ }
+ }
+ };
+}
+
+checked_shift_impl!(CheckedShl, checked_shl, u8);
+checked_shift_impl!(CheckedShl, checked_shl, u16);
+checked_shift_impl!(CheckedShl, checked_shl, u32);
+checked_shift_impl!(CheckedShl, checked_shl, u64);
+checked_shift_impl!(CheckedShl, checked_shl, usize);
+checked_shift_impl!(CheckedShl, checked_shl, u128);
+
+checked_shift_impl!(CheckedShl, checked_shl, i8);
+checked_shift_impl!(CheckedShl, checked_shl, i16);
+checked_shift_impl!(CheckedShl, checked_shl, i32);
+checked_shift_impl!(CheckedShl, checked_shl, i64);
+checked_shift_impl!(CheckedShl, checked_shl, isize);
+checked_shift_impl!(CheckedShl, checked_shl, i128);
+
+/// Performs a right shift that returns `None` on shifts larger than
+/// or equal to the type width.
+pub trait CheckedShr: Sized + Shr<u32, Output = Self> {
+ /// Checked shift right. Computes `self >> rhs`, returning `None`
+ /// if `rhs` is larger than or equal to the number of bits in `self`.
+ ///
+ /// ```
+ /// use num_traits::CheckedShr;
+ ///
+ /// let x: u16 = 0x8000;
+ ///
+ /// assert_eq!(CheckedShr::checked_shr(&x, 0), Some(0x8000));
+ /// assert_eq!(CheckedShr::checked_shr(&x, 1), Some(0x4000));
+ /// assert_eq!(CheckedShr::checked_shr(&x, 15), Some(0x0001));
+ /// assert_eq!(CheckedShr::checked_shr(&x, 16), None);
+ /// ```
+ fn checked_shr(&self, rhs: u32) -> Option<Self>;
+}
+
+checked_shift_impl!(CheckedShr, checked_shr, u8);
+checked_shift_impl!(CheckedShr, checked_shr, u16);
+checked_shift_impl!(CheckedShr, checked_shr, u32);
+checked_shift_impl!(CheckedShr, checked_shr, u64);
+checked_shift_impl!(CheckedShr, checked_shr, usize);
+checked_shift_impl!(CheckedShr, checked_shr, u128);
+
+checked_shift_impl!(CheckedShr, checked_shr, i8);
+checked_shift_impl!(CheckedShr, checked_shr, i16);
+checked_shift_impl!(CheckedShr, checked_shr, i32);
+checked_shift_impl!(CheckedShr, checked_shr, i64);
+checked_shift_impl!(CheckedShr, checked_shr, isize);
+checked_shift_impl!(CheckedShr, checked_shr, i128);
diff --git a/rust/vendor/num-traits/src/ops/euclid.rs b/rust/vendor/num-traits/src/ops/euclid.rs
new file mode 100644
index 0000000..4547fee
--- /dev/null
+++ b/rust/vendor/num-traits/src/ops/euclid.rs
@@ -0,0 +1,339 @@
+use core::ops::{Div, Rem};
+
+pub trait Euclid: Sized + Div<Self, Output = Self> + Rem<Self, Output = Self> {
+ /// Calculates Euclidean division, the matching method for `rem_euclid`.
+ ///
+ /// This computes the integer `n` such that
+ /// `self = n * v + self.rem_euclid(v)`.
+ /// In other words, the result is `self / v` rounded to the integer `n`
+ /// such that `self >= n * v`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use num_traits::Euclid;
+ ///
+ /// let a: i32 = 7;
+ /// let b: i32 = 4;
+ /// assert_eq!(Euclid::div_euclid(&a, &b), 1); // 7 > 4 * 1
+ /// assert_eq!(Euclid::div_euclid(&-a, &b), -2); // -7 >= 4 * -2
+ /// assert_eq!(Euclid::div_euclid(&a, &-b), -1); // 7 >= -4 * -1
+ /// assert_eq!(Euclid::div_euclid(&-a, &-b), 2); // -7 >= -4 * 2
+ /// ```
+ fn div_euclid(&self, v: &Self) -> Self;
+
+ /// Calculates the least nonnegative remainder of `self (mod v)`.
+ ///
+ /// In particular, the return value `r` satisfies `0.0 <= r < v.abs()` in
+ /// most cases. However, due to a floating point round-off error it can
+ /// result in `r == v.abs()`, violating the mathematical definition, if
+ /// `self` is much smaller than `v.abs()` in magnitude and `self < 0.0`.
+ /// This result is not an element of the function's codomain, but it is the
+ /// closest floating point number in the real numbers and thus fulfills the
+ /// property `self == self.div_euclid(v) * v + self.rem_euclid(v)`
+ /// approximatively.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use num_traits::Euclid;
+ ///
+ /// let a: i32 = 7;
+ /// let b: i32 = 4;
+ /// assert_eq!(Euclid::rem_euclid(&a, &b), 3);
+ /// assert_eq!(Euclid::rem_euclid(&-a, &b), 1);
+ /// assert_eq!(Euclid::rem_euclid(&a, &-b), 3);
+ /// assert_eq!(Euclid::rem_euclid(&-a, &-b), 1);
+ /// ```
+ fn rem_euclid(&self, v: &Self) -> Self;
+}
+
+macro_rules! euclid_forward_impl {
+ ($($t:ty)*) => {$(
+ #[cfg(has_div_euclid)]
+ impl Euclid for $t {
+ #[inline]
+ fn div_euclid(&self, v: &$t) -> Self {
+ <$t>::div_euclid(*self, *v)
+ }
+
+ #[inline]
+ fn rem_euclid(&self, v: &$t) -> Self {
+ <$t>::rem_euclid(*self, *v)
+ }
+ }
+ )*}
+}
+
+macro_rules! euclid_int_impl {
+ ($($t:ty)*) => {$(
+ euclid_forward_impl!($t);
+
+ #[cfg(not(has_div_euclid))]
+ impl Euclid for $t {
+ #[inline]
+ fn div_euclid(&self, v: &$t) -> Self {
+ let q = self / v;
+ if self % v < 0 {
+ return if *v > 0 { q - 1 } else { q + 1 }
+ }
+ q
+ }
+
+ #[inline]
+ fn rem_euclid(&self, v: &$t) -> Self {
+ let r = self % v;
+ if r < 0 {
+ if *v < 0 {
+ r - v
+ } else {
+ r + v
+ }
+ } else {
+ r
+ }
+ }
+ }
+ )*}
+}
+
+macro_rules! euclid_uint_impl {
+ ($($t:ty)*) => {$(
+ euclid_forward_impl!($t);
+
+ #[cfg(not(has_div_euclid))]
+ impl Euclid for $t {
+ #[inline]
+ fn div_euclid(&self, v: &$t) -> Self {
+ self / v
+ }
+
+ #[inline]
+ fn rem_euclid(&self, v: &$t) -> Self {
+ self % v
+ }
+ }
+ )*}
+}
+
+euclid_int_impl!(isize i8 i16 i32 i64 i128);
+euclid_uint_impl!(usize u8 u16 u32 u64 u128);
+
+#[cfg(all(has_div_euclid, feature = "std"))]
+euclid_forward_impl!(f32 f64);
+
+#[cfg(not(all(has_div_euclid, feature = "std")))]
+impl Euclid for f32 {
+ #[inline]
+ fn div_euclid(&self, v: &f32) -> f32 {
+ let q = <f32 as crate::float::FloatCore>::trunc(self / v);
+ if self % v < 0.0 {
+ return if *v > 0.0 { q - 1.0 } else { q + 1.0 };
+ }
+ q
+ }
+
+ #[inline]
+ fn rem_euclid(&self, v: &f32) -> f32 {
+ let r = self % v;
+ if r < 0.0 {
+ r + <f32 as crate::float::FloatCore>::abs(*v)
+ } else {
+ r
+ }
+ }
+}
+
+#[cfg(not(all(has_div_euclid, feature = "std")))]
+impl Euclid for f64 {
+ #[inline]
+ fn div_euclid(&self, v: &f64) -> f64 {
+ let q = <f64 as crate::float::FloatCore>::trunc(self / v);
+ if self % v < 0.0 {
+ return if *v > 0.0 { q - 1.0 } else { q + 1.0 };
+ }
+ q
+ }
+
+ #[inline]
+ fn rem_euclid(&self, v: &f64) -> f64 {
+ let r = self % v;
+ if r < 0.0 {
+ r + <f64 as crate::float::FloatCore>::abs(*v)
+ } else {
+ r
+ }
+ }
+}
+
+pub trait CheckedEuclid: Euclid {
+ /// Performs euclid division that returns `None` instead of panicking on division by zero
+ /// and instead of wrapping around on underflow and overflow.
+ fn checked_div_euclid(&self, v: &Self) -> Option<Self>;
+
+ /// Finds the euclid remainder of dividing two numbers, checking for underflow, overflow and
+ /// division by zero. If any of that happens, `None` is returned.
+ fn checked_rem_euclid(&self, v: &Self) -> Option<Self>;
+}
+
+macro_rules! checked_euclid_forward_impl {
+ ($($t:ty)*) => {$(
+ #[cfg(has_div_euclid)]
+ impl CheckedEuclid for $t {
+ #[inline]
+ fn checked_div_euclid(&self, v: &$t) -> Option<Self> {
+ <$t>::checked_div_euclid(*self, *v)
+ }
+
+ #[inline]
+ fn checked_rem_euclid(&self, v: &$t) -> Option<Self> {
+ <$t>::checked_rem_euclid(*self, *v)
+ }
+ }
+ )*}
+}
+
+macro_rules! checked_euclid_int_impl {
+ ($($t:ty)*) => {$(
+ checked_euclid_forward_impl!($t);
+
+ #[cfg(not(has_div_euclid))]
+ impl CheckedEuclid for $t {
+ #[inline]
+ fn checked_div_euclid(&self, v: &$t) -> Option<$t> {
+ if *v == 0 || (*self == Self::min_value() && *v == -1) {
+ None
+ } else {
+ Some(Euclid::div_euclid(self, v))
+ }
+ }
+
+ #[inline]
+ fn checked_rem_euclid(&self, v: &$t) -> Option<$t> {
+ if *v == 0 || (*self == Self::min_value() && *v == -1) {
+ None
+ } else {
+ Some(Euclid::rem_euclid(self, v))
+ }
+ }
+ }
+ )*}
+}
+
+macro_rules! checked_euclid_uint_impl {
+ ($($t:ty)*) => {$(
+ checked_euclid_forward_impl!($t);
+
+ #[cfg(not(has_div_euclid))]
+ impl CheckedEuclid for $t {
+ #[inline]
+ fn checked_div_euclid(&self, v: &$t) -> Option<$t> {
+ if *v == 0 {
+ None
+ } else {
+ Some(Euclid::div_euclid(self, v))
+ }
+ }
+
+ #[inline]
+ fn checked_rem_euclid(&self, v: &$t) -> Option<$t> {
+ if *v == 0 {
+ None
+ } else {
+ Some(Euclid::rem_euclid(self, v))
+ }
+ }
+ }
+ )*}
+}
+
+checked_euclid_int_impl!(isize i8 i16 i32 i64 i128);
+checked_euclid_uint_impl!(usize u8 u16 u32 u64 u128);
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+
+ #[test]
+ fn euclid_unsigned() {
+ macro_rules! test_euclid {
+ ($($t:ident)+) => {
+ $(
+ {
+ let x: $t = 10;
+ let y: $t = 3;
+ assert_eq!(Euclid::div_euclid(&x, &y), 3);
+ assert_eq!(Euclid::rem_euclid(&x, &y), 1);
+ }
+ )+
+ };
+ }
+
+ test_euclid!(usize u8 u16 u32 u64);
+ }
+
+ #[test]
+ fn euclid_signed() {
+ macro_rules! test_euclid {
+ ($($t:ident)+) => {
+ $(
+ {
+ let x: $t = 10;
+ let y: $t = -3;
+ assert_eq!(Euclid::div_euclid(&x, &y), -3);
+ assert_eq!(Euclid::div_euclid(&-x, &y), 4);
+ assert_eq!(Euclid::rem_euclid(&x, &y), 1);
+ assert_eq!(Euclid::rem_euclid(&-x, &y), 2);
+ let x: $t = $t::min_value() + 1;
+ let y: $t = -1;
+ assert_eq!(Euclid::div_euclid(&x, &y), $t::max_value());
+ }
+ )+
+ };
+ }
+
+ test_euclid!(isize i8 i16 i32 i64 i128);
+ }
+
+ #[test]
+ fn euclid_float() {
+ macro_rules! test_euclid {
+ ($($t:ident)+) => {
+ $(
+ {
+ let x: $t = 12.1;
+ let y: $t = 3.2;
+ assert!(Euclid::div_euclid(&x, &y) * y + Euclid::rem_euclid(&x, &y) - x
+ <= 46.4 * <$t as crate::float::FloatCore>::epsilon());
+ assert!(Euclid::div_euclid(&x, &-y) * -y + Euclid::rem_euclid(&x, &-y) - x
+ <= 46.4 * <$t as crate::float::FloatCore>::epsilon());
+ assert!(Euclid::div_euclid(&-x, &y) * y + Euclid::rem_euclid(&-x, &y) + x
+ <= 46.4 * <$t as crate::float::FloatCore>::epsilon());
+ assert!(Euclid::div_euclid(&-x, &-y) * -y + Euclid::rem_euclid(&-x, &-y) + x
+ <= 46.4 * <$t as crate::float::FloatCore>::epsilon());
+ }
+ )+
+ };
+ }
+
+ test_euclid!(f32 f64);
+ }
+
+ #[test]
+ fn euclid_checked() {
+ macro_rules! test_euclid_checked {
+ ($($t:ident)+) => {
+ $(
+ {
+ assert_eq!(CheckedEuclid::checked_div_euclid(&$t::min_value(), &-1), None);
+ assert_eq!(CheckedEuclid::checked_rem_euclid(&$t::min_value(), &-1), None);
+ assert_eq!(CheckedEuclid::checked_div_euclid(&1, &0), None);
+ assert_eq!(CheckedEuclid::checked_rem_euclid(&1, &0), None);
+ }
+ )+
+ };
+ }
+
+ test_euclid_checked!(isize i8 i16 i32 i64 i128);
+ }
+}
diff --git a/rust/vendor/num-traits/src/ops/inv.rs b/rust/vendor/num-traits/src/ops/inv.rs
new file mode 100644
index 0000000..7087d09
--- /dev/null
+++ b/rust/vendor/num-traits/src/ops/inv.rs
@@ -0,0 +1,47 @@
+/// Unary operator for retrieving the multiplicative inverse, or reciprocal, of a value.
+pub trait Inv {
+ /// The result after applying the operator.
+ type Output;
+
+ /// Returns the multiplicative inverse of `self`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::f64::INFINITY;
+ /// use num_traits::Inv;
+ ///
+ /// assert_eq!(7.0.inv() * 7.0, 1.0);
+ /// assert_eq!((-0.0).inv(), -INFINITY);
+ /// ```
+ fn inv(self) -> Self::Output;
+}
+
+impl Inv for f32 {
+ type Output = f32;
+ #[inline]
+ fn inv(self) -> f32 {
+ 1.0 / self
+ }
+}
+impl Inv for f64 {
+ type Output = f64;
+ #[inline]
+ fn inv(self) -> f64 {
+ 1.0 / self
+ }
+}
+impl<'a> Inv for &'a f32 {
+ type Output = f32;
+ #[inline]
+ fn inv(self) -> f32 {
+ 1.0 / *self
+ }
+}
+impl<'a> Inv for &'a f64 {
+ type Output = f64;
+ #[inline]
+ fn inv(self) -> f64 {
+ 1.0 / *self
+ }
+}
diff --git a/rust/vendor/num-traits/src/ops/mod.rs b/rust/vendor/num-traits/src/ops/mod.rs
new file mode 100644
index 0000000..2128d86
--- /dev/null
+++ b/rust/vendor/num-traits/src/ops/mod.rs
@@ -0,0 +1,8 @@
+pub mod bytes;
+pub mod checked;
+pub mod euclid;
+pub mod inv;
+pub mod mul_add;
+pub mod overflowing;
+pub mod saturating;
+pub mod wrapping;
diff --git a/rust/vendor/num-traits/src/ops/mul_add.rs b/rust/vendor/num-traits/src/ops/mul_add.rs
new file mode 100644
index 0000000..51beb55
--- /dev/null
+++ b/rust/vendor/num-traits/src/ops/mul_add.rs
@@ -0,0 +1,149 @@
+/// Fused multiply-add. Computes `(self * a) + b` with only one rounding
+/// error, yielding a more accurate result than an unfused multiply-add.
+///
+/// Using `mul_add` can be more performant than an unfused multiply-add if
+/// the target architecture has a dedicated `fma` CPU instruction.
+///
+/// Note that `A` and `B` are `Self` by default, but this is not mandatory.
+///
+/// # Example
+///
+/// ```
+/// use std::f32;
+///
+/// let m = 10.0_f32;
+/// let x = 4.0_f32;
+/// let b = 60.0_f32;
+///
+/// // 100.0
+/// let abs_difference = (m.mul_add(x, b) - (m*x + b)).abs();
+///
+/// assert!(abs_difference <= 100.0 * f32::EPSILON);
+/// ```
+pub trait MulAdd<A = Self, B = Self> {
+ /// The resulting type after applying the fused multiply-add.
+ type Output;
+
+ /// Performs the fused multiply-add operation `(self * a) + b`
+ fn mul_add(self, a: A, b: B) -> Self::Output;
+}
+
+/// The fused multiply-add assignment operation `*self = (*self * a) + b`
+pub trait MulAddAssign<A = Self, B = Self> {
+ /// Performs the fused multiply-add assignment operation `*self = (*self * a) + b`
+ fn mul_add_assign(&mut self, a: A, b: B);
+}
+
+#[cfg(any(feature = "std", feature = "libm"))]
+impl MulAdd<f32, f32> for f32 {
+ type Output = Self;
+
+ #[inline]
+ fn mul_add(self, a: Self, b: Self) -> Self::Output {
+ <Self as crate::Float>::mul_add(self, a, b)
+ }
+}
+
+#[cfg(any(feature = "std", feature = "libm"))]
+impl MulAdd<f64, f64> for f64 {
+ type Output = Self;
+
+ #[inline]
+ fn mul_add(self, a: Self, b: Self) -> Self::Output {
+ <Self as crate::Float>::mul_add(self, a, b)
+ }
+}
+
+macro_rules! mul_add_impl {
+ ($trait_name:ident for $($t:ty)*) => {$(
+ impl $trait_name for $t {
+ type Output = Self;
+
+ #[inline]
+ fn mul_add(self, a: Self, b: Self) -> Self::Output {
+ (self * a) + b
+ }
+ }
+ )*}
+}
+
+mul_add_impl!(MulAdd for isize i8 i16 i32 i64 i128);
+mul_add_impl!(MulAdd for usize u8 u16 u32 u64 u128);
+
+#[cfg(any(feature = "std", feature = "libm"))]
+impl MulAddAssign<f32, f32> for f32 {
+ #[inline]
+ fn mul_add_assign(&mut self, a: Self, b: Self) {
+ *self = <Self as crate::Float>::mul_add(*self, a, b)
+ }
+}
+
+#[cfg(any(feature = "std", feature = "libm"))]
+impl MulAddAssign<f64, f64> for f64 {
+ #[inline]
+ fn mul_add_assign(&mut self, a: Self, b: Self) {
+ *self = <Self as crate::Float>::mul_add(*self, a, b)
+ }
+}
+
+macro_rules! mul_add_assign_impl {
+ ($trait_name:ident for $($t:ty)*) => {$(
+ impl $trait_name for $t {
+ #[inline]
+ fn mul_add_assign(&mut self, a: Self, b: Self) {
+ *self = (*self * a) + b
+ }
+ }
+ )*}
+}
+
+mul_add_assign_impl!(MulAddAssign for isize i8 i16 i32 i64 i128);
+mul_add_assign_impl!(MulAddAssign for usize u8 u16 u32 u64 u128);
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+
+ #[test]
+ fn mul_add_integer() {
+ macro_rules! test_mul_add {
+ ($($t:ident)+) => {
+ $(
+ {
+ let m: $t = 2;
+ let x: $t = 3;
+ let b: $t = 4;
+
+ assert_eq!(MulAdd::mul_add(m, x, b), (m*x + b));
+ }
+ )+
+ };
+ }
+
+ test_mul_add!(usize u8 u16 u32 u64 isize i8 i16 i32 i64);
+ }
+
+ #[test]
+ #[cfg(feature = "std")]
+ fn mul_add_float() {
+ macro_rules! test_mul_add {
+ ($($t:ident)+) => {
+ $(
+ {
+ use core::$t;
+
+ let m: $t = 12.0;
+ let x: $t = 3.4;
+ let b: $t = 5.6;
+
+ let abs_difference = (MulAdd::mul_add(m, x, b) - (m*x + b)).abs();
+
+ assert!(abs_difference <= 46.4 * $t::EPSILON);
+ }
+ )+
+ };
+ }
+
+ test_mul_add!(f32 f64);
+ }
+}
diff --git a/rust/vendor/num-traits/src/ops/overflowing.rs b/rust/vendor/num-traits/src/ops/overflowing.rs
new file mode 100644
index 0000000..c7a35a5
--- /dev/null
+++ b/rust/vendor/num-traits/src/ops/overflowing.rs
@@ -0,0 +1,96 @@
+use core::ops::{Add, Mul, Sub};
+use core::{i128, i16, i32, i64, i8, isize};
+use core::{u128, u16, u32, u64, u8, usize};
+
+macro_rules! overflowing_impl {
+ ($trait_name:ident, $method:ident, $t:ty) => {
+ impl $trait_name for $t {
+ #[inline]
+ fn $method(&self, v: &Self) -> (Self, bool) {
+ <$t>::$method(*self, *v)
+ }
+ }
+ };
+}
+
+/// Performs addition with a flag for overflow.
+pub trait OverflowingAdd: Sized + Add<Self, Output = Self> {
+ /// Returns a tuple of the sum along with a boolean indicating whether an arithmetic overflow would occur.
+ /// If an overflow would have occurred then the wrapped value is returned.
+ fn overflowing_add(&self, v: &Self) -> (Self, bool);
+}
+
+overflowing_impl!(OverflowingAdd, overflowing_add, u8);
+overflowing_impl!(OverflowingAdd, overflowing_add, u16);
+overflowing_impl!(OverflowingAdd, overflowing_add, u32);
+overflowing_impl!(OverflowingAdd, overflowing_add, u64);
+overflowing_impl!(OverflowingAdd, overflowing_add, usize);
+overflowing_impl!(OverflowingAdd, overflowing_add, u128);
+
+overflowing_impl!(OverflowingAdd, overflowing_add, i8);
+overflowing_impl!(OverflowingAdd, overflowing_add, i16);
+overflowing_impl!(OverflowingAdd, overflowing_add, i32);
+overflowing_impl!(OverflowingAdd, overflowing_add, i64);
+overflowing_impl!(OverflowingAdd, overflowing_add, isize);
+overflowing_impl!(OverflowingAdd, overflowing_add, i128);
+
+/// Performs substraction with a flag for overflow.
+pub trait OverflowingSub: Sized + Sub<Self, Output = Self> {
+ /// Returns a tuple of the difference along with a boolean indicating whether an arithmetic overflow would occur.
+ /// If an overflow would have occurred then the wrapped value is returned.
+ fn overflowing_sub(&self, v: &Self) -> (Self, bool);
+}
+
+overflowing_impl!(OverflowingSub, overflowing_sub, u8);
+overflowing_impl!(OverflowingSub, overflowing_sub, u16);
+overflowing_impl!(OverflowingSub, overflowing_sub, u32);
+overflowing_impl!(OverflowingSub, overflowing_sub, u64);
+overflowing_impl!(OverflowingSub, overflowing_sub, usize);
+overflowing_impl!(OverflowingSub, overflowing_sub, u128);
+
+overflowing_impl!(OverflowingSub, overflowing_sub, i8);
+overflowing_impl!(OverflowingSub, overflowing_sub, i16);
+overflowing_impl!(OverflowingSub, overflowing_sub, i32);
+overflowing_impl!(OverflowingSub, overflowing_sub, i64);
+overflowing_impl!(OverflowingSub, overflowing_sub, isize);
+overflowing_impl!(OverflowingSub, overflowing_sub, i128);
+
+/// Performs multiplication with a flag for overflow.
+pub trait OverflowingMul: Sized + Mul<Self, Output = Self> {
+ /// Returns a tuple of the product along with a boolean indicating whether an arithmetic overflow would occur.
+ /// If an overflow would have occurred then the wrapped value is returned.
+ fn overflowing_mul(&self, v: &Self) -> (Self, bool);
+}
+
+overflowing_impl!(OverflowingMul, overflowing_mul, u8);
+overflowing_impl!(OverflowingMul, overflowing_mul, u16);
+overflowing_impl!(OverflowingMul, overflowing_mul, u32);
+overflowing_impl!(OverflowingMul, overflowing_mul, u64);
+overflowing_impl!(OverflowingMul, overflowing_mul, usize);
+overflowing_impl!(OverflowingMul, overflowing_mul, u128);
+
+overflowing_impl!(OverflowingMul, overflowing_mul, i8);
+overflowing_impl!(OverflowingMul, overflowing_mul, i16);
+overflowing_impl!(OverflowingMul, overflowing_mul, i32);
+overflowing_impl!(OverflowingMul, overflowing_mul, i64);
+overflowing_impl!(OverflowingMul, overflowing_mul, isize);
+overflowing_impl!(OverflowingMul, overflowing_mul, i128);
+
+#[test]
+fn test_overflowing_traits() {
+ fn overflowing_add<T: OverflowingAdd>(a: T, b: T) -> (T, bool) {
+ a.overflowing_add(&b)
+ }
+ fn overflowing_sub<T: OverflowingSub>(a: T, b: T) -> (T, bool) {
+ a.overflowing_sub(&b)
+ }
+ fn overflowing_mul<T: OverflowingMul>(a: T, b: T) -> (T, bool) {
+ a.overflowing_mul(&b)
+ }
+ assert_eq!(overflowing_add(5i16, 2), (7, false));
+ assert_eq!(overflowing_add(i16::MAX, 1), (i16::MIN, true));
+ assert_eq!(overflowing_sub(5i16, 2), (3, false));
+ assert_eq!(overflowing_sub(i16::MIN, 1), (i16::MAX, true));
+ assert_eq!(overflowing_mul(5i16, 2), (10, false));
+ assert_eq!(overflowing_mul(1_000_000_000i32, 10), (1410065408, true));
+}
diff --git a/rust/vendor/num-traits/src/ops/saturating.rs b/rust/vendor/num-traits/src/ops/saturating.rs
new file mode 100644
index 0000000..16a0045
--- /dev/null
+++ b/rust/vendor/num-traits/src/ops/saturating.rs
@@ -0,0 +1,130 @@
+use core::ops::{Add, Mul, Sub};
+
+/// Saturating math operations. Deprecated, use `SaturatingAdd`, `SaturatingSub` and
+/// `SaturatingMul` instead.
+pub trait Saturating {
+ /// Saturating addition operator.
+ /// Returns a+b, saturating at the numeric bounds instead of overflowing.
+ fn saturating_add(self, v: Self) -> Self;
+
+ /// Saturating subtraction operator.
+ /// Returns a-b, saturating at the numeric bounds instead of overflowing.
+ fn saturating_sub(self, v: Self) -> Self;
+}
+
+macro_rules! deprecated_saturating_impl {
+ ($trait_name:ident for $($t:ty)*) => {$(
+ impl $trait_name for $t {
+ #[inline]
+ fn saturating_add(self, v: Self) -> Self {
+ Self::saturating_add(self, v)
+ }
+
+ #[inline]
+ fn saturating_sub(self, v: Self) -> Self {
+ Self::saturating_sub(self, v)
+ }
+ }
+ )*}
+}
+
+deprecated_saturating_impl!(Saturating for isize i8 i16 i32 i64 i128);
+deprecated_saturating_impl!(Saturating for usize u8 u16 u32 u64 u128);
+
+macro_rules! saturating_impl {
+ ($trait_name:ident, $method:ident, $t:ty) => {
+ impl $trait_name for $t {
+ #[inline]
+ fn $method(&self, v: &Self) -> Self {
+ <$t>::$method(*self, *v)
+ }
+ }
+ };
+}
+
+/// Performs addition that saturates at the numeric bounds instead of overflowing.
+pub trait SaturatingAdd: Sized + Add<Self, Output = Self> {
+ /// Saturating addition. Computes `self + other`, saturating at the relevant high or low boundary of
+ /// the type.
+ fn saturating_add(&self, v: &Self) -> Self;
+}
+
+saturating_impl!(SaturatingAdd, saturating_add, u8);
+saturating_impl!(SaturatingAdd, saturating_add, u16);
+saturating_impl!(SaturatingAdd, saturating_add, u32);
+saturating_impl!(SaturatingAdd, saturating_add, u64);
+saturating_impl!(SaturatingAdd, saturating_add, usize);
+saturating_impl!(SaturatingAdd, saturating_add, u128);
+
+saturating_impl!(SaturatingAdd, saturating_add, i8);
+saturating_impl!(SaturatingAdd, saturating_add, i16);
+saturating_impl!(SaturatingAdd, saturating_add, i32);
+saturating_impl!(SaturatingAdd, saturating_add, i64);
+saturating_impl!(SaturatingAdd, saturating_add, isize);
+saturating_impl!(SaturatingAdd, saturating_add, i128);
+
+/// Performs subtraction that saturates at the numeric bounds instead of overflowing.
+pub trait SaturatingSub: Sized + Sub<Self, Output = Self> {
+ /// Saturating subtraction. Computes `self - other`, saturating at the relevant high or low boundary of
+ /// the type.
+ fn saturating_sub(&self, v: &Self) -> Self;
+}
+
+saturating_impl!(SaturatingSub, saturating_sub, u8);
+saturating_impl!(SaturatingSub, saturating_sub, u16);
+saturating_impl!(SaturatingSub, saturating_sub, u32);
+saturating_impl!(SaturatingSub, saturating_sub, u64);
+saturating_impl!(SaturatingSub, saturating_sub, usize);
+saturating_impl!(SaturatingSub, saturating_sub, u128);
+
+saturating_impl!(SaturatingSub, saturating_sub, i8);
+saturating_impl!(SaturatingSub, saturating_sub, i16);
+saturating_impl!(SaturatingSub, saturating_sub, i32);
+saturating_impl!(SaturatingSub, saturating_sub, i64);
+saturating_impl!(SaturatingSub, saturating_sub, isize);
+saturating_impl!(SaturatingSub, saturating_sub, i128);
+
+/// Performs multiplication that saturates at the numeric bounds instead of overflowing.
+pub trait SaturatingMul: Sized + Mul<Self, Output = Self> {
+ /// Saturating multiplication. Computes `self * other`, saturating at the relevant high or low boundary of
+ /// the type.
+ fn saturating_mul(&self, v: &Self) -> Self;
+}
+
+saturating_impl!(SaturatingMul, saturating_mul, u8);
+saturating_impl!(SaturatingMul, saturating_mul, u16);
+saturating_impl!(SaturatingMul, saturating_mul, u32);
+saturating_impl!(SaturatingMul, saturating_mul, u64);
+saturating_impl!(SaturatingMul, saturating_mul, usize);
+saturating_impl!(SaturatingMul, saturating_mul, u128);
+
+saturating_impl!(SaturatingMul, saturating_mul, i8);
+saturating_impl!(SaturatingMul, saturating_mul, i16);
+saturating_impl!(SaturatingMul, saturating_mul, i32);
+saturating_impl!(SaturatingMul, saturating_mul, i64);
+saturating_impl!(SaturatingMul, saturating_mul, isize);
+saturating_impl!(SaturatingMul, saturating_mul, i128);
+
+// TODO: add SaturatingNeg for signed integer primitives once the saturating_neg() API is stable.
+
+#[test]
+fn test_saturating_traits() {
+ fn saturating_add<T: SaturatingAdd>(a: T, b: T) -> T {
+ a.saturating_add(&b)
+ }
+ fn saturating_sub<T: SaturatingSub>(a: T, b: T) -> T {
+ a.saturating_sub(&b)
+ }
+ fn saturating_mul<T: SaturatingMul>(a: T, b: T) -> T {
+ a.saturating_mul(&b)
+ }
+ assert_eq!(saturating_add(255, 1), 255u8);
+ assert_eq!(saturating_add(127, 1), 127i8);
+ assert_eq!(saturating_add(-128, -1), -128i8);
+ assert_eq!(saturating_sub(0, 1), 0u8);
+ assert_eq!(saturating_sub(-128, 1), -128i8);
+ assert_eq!(saturating_sub(127, -1), 127i8);
+ assert_eq!(saturating_mul(255, 2), 255u8);
+ assert_eq!(saturating_mul(127, 2), 127i8);
+ assert_eq!(saturating_mul(-128, 2), -128i8);
+}
diff --git a/rust/vendor/num-traits/src/ops/wrapping.rs b/rust/vendor/num-traits/src/ops/wrapping.rs
new file mode 100644
index 0000000..3a8b331
--- /dev/null
+++ b/rust/vendor/num-traits/src/ops/wrapping.rs
@@ -0,0 +1,327 @@
+use core::num::Wrapping;
+use core::ops::{Add, Mul, Neg, Shl, Shr, Sub};
+
+macro_rules! wrapping_impl {
+ ($trait_name:ident, $method:ident, $t:ty) => {
+ impl $trait_name for $t {
+ #[inline]
+ fn $method(&self, v: &Self) -> Self {
+ <$t>::$method(*self, *v)
+ }
+ }
+ };
+ ($trait_name:ident, $method:ident, $t:ty, $rhs:ty) => {
+ impl $trait_name<$rhs> for $t {
+ #[inline]
+ fn $method(&self, v: &$rhs) -> Self {
+ <$t>::$method(*self, *v)
+ }
+ }
+ };
+}
+
+/// Performs addition that wraps around on overflow.
+pub trait WrappingAdd: Sized + Add<Self, Output = Self> {
+ /// Wrapping (modular) addition. Computes `self + other`, wrapping around at the boundary of
+ /// the type.
+ fn wrapping_add(&self, v: &Self) -> Self;
+}
+
+wrapping_impl!(WrappingAdd, wrapping_add, u8);
+wrapping_impl!(WrappingAdd, wrapping_add, u16);
+wrapping_impl!(WrappingAdd, wrapping_add, u32);
+wrapping_impl!(WrappingAdd, wrapping_add, u64);
+wrapping_impl!(WrappingAdd, wrapping_add, usize);
+wrapping_impl!(WrappingAdd, wrapping_add, u128);
+
+wrapping_impl!(WrappingAdd, wrapping_add, i8);
+wrapping_impl!(WrappingAdd, wrapping_add, i16);
+wrapping_impl!(WrappingAdd, wrapping_add, i32);
+wrapping_impl!(WrappingAdd, wrapping_add, i64);
+wrapping_impl!(WrappingAdd, wrapping_add, isize);
+wrapping_impl!(WrappingAdd, wrapping_add, i128);
+
+/// Performs subtraction that wraps around on overflow.
+pub trait WrappingSub: Sized + Sub<Self, Output = Self> {
+ /// Wrapping (modular) subtraction. Computes `self - other`, wrapping around at the boundary
+ /// of the type.
+ fn wrapping_sub(&self, v: &Self) -> Self;
+}
+
+wrapping_impl!(WrappingSub, wrapping_sub, u8);
+wrapping_impl!(WrappingSub, wrapping_sub, u16);
+wrapping_impl!(WrappingSub, wrapping_sub, u32);
+wrapping_impl!(WrappingSub, wrapping_sub, u64);
+wrapping_impl!(WrappingSub, wrapping_sub, usize);
+wrapping_impl!(WrappingSub, wrapping_sub, u128);
+
+wrapping_impl!(WrappingSub, wrapping_sub, i8);
+wrapping_impl!(WrappingSub, wrapping_sub, i16);
+wrapping_impl!(WrappingSub, wrapping_sub, i32);
+wrapping_impl!(WrappingSub, wrapping_sub, i64);
+wrapping_impl!(WrappingSub, wrapping_sub, isize);
+wrapping_impl!(WrappingSub, wrapping_sub, i128);
+
+/// Performs multiplication that wraps around on overflow.
+pub trait WrappingMul: Sized + Mul<Self, Output = Self> {
+ /// Wrapping (modular) multiplication. Computes `self * other`, wrapping around at the boundary
+ /// of the type.
+ fn wrapping_mul(&self, v: &Self) -> Self;
+}
+
+wrapping_impl!(WrappingMul, wrapping_mul, u8);
+wrapping_impl!(WrappingMul, wrapping_mul, u16);
+wrapping_impl!(WrappingMul, wrapping_mul, u32);
+wrapping_impl!(WrappingMul, wrapping_mul, u64);
+wrapping_impl!(WrappingMul, wrapping_mul, usize);
+wrapping_impl!(WrappingMul, wrapping_mul, u128);
+
+wrapping_impl!(WrappingMul, wrapping_mul, i8);
+wrapping_impl!(WrappingMul, wrapping_mul, i16);
+wrapping_impl!(WrappingMul, wrapping_mul, i32);
+wrapping_impl!(WrappingMul, wrapping_mul, i64);
+wrapping_impl!(WrappingMul, wrapping_mul, isize);
+wrapping_impl!(WrappingMul, wrapping_mul, i128);
+
+macro_rules! wrapping_unary_impl {
+ ($trait_name:ident, $method:ident, $t:ty) => {
+ impl $trait_name for $t {
+ #[inline]
+ fn $method(&self) -> $t {
+ <$t>::$method(*self)
+ }
+ }
+ };
+}
+
+/// Performs a negation that does not panic.
+pub trait WrappingNeg: Sized {
+ /// Wrapping (modular) negation. Computes `-self`,
+ /// wrapping around at the boundary of the type.
+ ///
+ /// Since unsigned types do not have negative equivalents
+ /// all applications of this function will wrap (except for `-0`).
+ /// For values smaller than the corresponding signed type's maximum
+ /// the result is the same as casting the corresponding signed value.
+ /// Any larger values are equivalent to `MAX + 1 - (val - MAX - 1)` where
+ /// `MAX` is the corresponding signed type's maximum.
+ ///
+ /// ```
+ /// use num_traits::WrappingNeg;
+ ///
+ /// assert_eq!(100i8.wrapping_neg(), -100);
+ /// assert_eq!((-100i8).wrapping_neg(), 100);
+ /// assert_eq!((-128i8).wrapping_neg(), -128); // wrapped!
+ /// ```
+ fn wrapping_neg(&self) -> Self;
+}
+
+wrapping_unary_impl!(WrappingNeg, wrapping_neg, u8);
+wrapping_unary_impl!(WrappingNeg, wrapping_neg, u16);
+wrapping_unary_impl!(WrappingNeg, wrapping_neg, u32);
+wrapping_unary_impl!(WrappingNeg, wrapping_neg, u64);
+wrapping_unary_impl!(WrappingNeg, wrapping_neg, usize);
+wrapping_unary_impl!(WrappingNeg, wrapping_neg, u128);
+wrapping_unary_impl!(WrappingNeg, wrapping_neg, i8);
+wrapping_unary_impl!(WrappingNeg, wrapping_neg, i16);
+wrapping_unary_impl!(WrappingNeg, wrapping_neg, i32);
+wrapping_unary_impl!(WrappingNeg, wrapping_neg, i64);
+wrapping_unary_impl!(WrappingNeg, wrapping_neg, isize);
+wrapping_unary_impl!(WrappingNeg, wrapping_neg, i128);
+
+macro_rules! wrapping_shift_impl {
+ ($trait_name:ident, $method:ident, $t:ty) => {
+ impl $trait_name for $t {
+ #[inline]
+ fn $method(&self, rhs: u32) -> $t {
+ <$t>::$method(*self, rhs)
+ }
+ }
+ };
+}
+
+/// Performs a left shift that does not panic.
+pub trait WrappingShl: Sized + Shl<usize, Output = Self> {
+ /// Panic-free bitwise shift-left; yields `self << mask(rhs)`,
+ /// where `mask` removes any high order bits of `rhs` that would
+ /// cause the shift to exceed the bitwidth of the type.
+ ///
+ /// ```
+ /// use num_traits::WrappingShl;
+ ///
+ /// let x: u16 = 0x0001;
+ ///
+ /// assert_eq!(WrappingShl::wrapping_shl(&x, 0), 0x0001);
+ /// assert_eq!(WrappingShl::wrapping_shl(&x, 1), 0x0002);
+ /// assert_eq!(WrappingShl::wrapping_shl(&x, 15), 0x8000);
+ /// assert_eq!(WrappingShl::wrapping_shl(&x, 16), 0x0001);
+ /// ```
+ fn wrapping_shl(&self, rhs: u32) -> Self;
+}
+
+wrapping_shift_impl!(WrappingShl, wrapping_shl, u8);
+wrapping_shift_impl!(WrappingShl, wrapping_shl, u16);
+wrapping_shift_impl!(WrappingShl, wrapping_shl, u32);
+wrapping_shift_impl!(WrappingShl, wrapping_shl, u64);
+wrapping_shift_impl!(WrappingShl, wrapping_shl, usize);
+wrapping_shift_impl!(WrappingShl, wrapping_shl, u128);
+
+wrapping_shift_impl!(WrappingShl, wrapping_shl, i8);
+wrapping_shift_impl!(WrappingShl, wrapping_shl, i16);
+wrapping_shift_impl!(WrappingShl, wrapping_shl, i32);
+wrapping_shift_impl!(WrappingShl, wrapping_shl, i64);
+wrapping_shift_impl!(WrappingShl, wrapping_shl, isize);
+wrapping_shift_impl!(WrappingShl, wrapping_shl, i128);
+
+/// Performs a right shift that does not panic.
+pub trait WrappingShr: Sized + Shr<usize, Output = Self> {
+ /// Panic-free bitwise shift-right; yields `self >> mask(rhs)`,
+ /// where `mask` removes any high order bits of `rhs` that would
+ /// cause the shift to exceed the bitwidth of the type.
+ ///
+ /// ```
+ /// use num_traits::WrappingShr;
+ ///
+ /// let x: u16 = 0x8000;
+ ///
+ /// assert_eq!(WrappingShr::wrapping_shr(&x, 0), 0x8000);
+ /// assert_eq!(WrappingShr::wrapping_shr(&x, 1), 0x4000);
+ /// assert_eq!(WrappingShr::wrapping_shr(&x, 15), 0x0001);
+ /// assert_eq!(WrappingShr::wrapping_shr(&x, 16), 0x8000);
+ /// ```
+ fn wrapping_shr(&self, rhs: u32) -> Self;
+}
+
+wrapping_shift_impl!(WrappingShr, wrapping_shr, u8);
+wrapping_shift_impl!(WrappingShr, wrapping_shr, u16);
+wrapping_shift_impl!(WrappingShr, wrapping_shr, u32);
+wrapping_shift_impl!(WrappingShr, wrapping_shr, u64);
+wrapping_shift_impl!(WrappingShr, wrapping_shr, usize);
+wrapping_shift_impl!(WrappingShr, wrapping_shr, u128);
+
+wrapping_shift_impl!(WrappingShr, wrapping_shr, i8);
+wrapping_shift_impl!(WrappingShr, wrapping_shr, i16);
+wrapping_shift_impl!(WrappingShr, wrapping_shr, i32);
+wrapping_shift_impl!(WrappingShr, wrapping_shr, i64);
+wrapping_shift_impl!(WrappingShr, wrapping_shr, isize);
+wrapping_shift_impl!(WrappingShr, wrapping_shr, i128);
+
+// Well this is a bit funny, but all the more appropriate.
+impl<T: WrappingAdd> WrappingAdd for Wrapping<T>
+where
+ Wrapping<T>: Add<Output = Wrapping<T>>,
+{
+ fn wrapping_add(&self, v: &Self) -> Self {
+ Wrapping(self.0.wrapping_add(&v.0))
+ }
+}
+impl<T: WrappingSub> WrappingSub for Wrapping<T>
+where
+ Wrapping<T>: Sub<Output = Wrapping<T>>,
+{
+ fn wrapping_sub(&self, v: &Self) -> Self {
+ Wrapping(self.0.wrapping_sub(&v.0))
+ }
+}
+impl<T: WrappingMul> WrappingMul for Wrapping<T>
+where
+ Wrapping<T>: Mul<Output = Wrapping<T>>,
+{
+ fn wrapping_mul(&self, v: &Self) -> Self {
+ Wrapping(self.0.wrapping_mul(&v.0))
+ }
+}
+impl<T: WrappingNeg> WrappingNeg for Wrapping<T>
+where
+ Wrapping<T>: Neg<Output = Wrapping<T>>,
+{
+ fn wrapping_neg(&self) -> Self {
+ Wrapping(self.0.wrapping_neg())
+ }
+}
+impl<T: WrappingShl> WrappingShl for Wrapping<T>
+where
+ Wrapping<T>: Shl<usize, Output = Wrapping<T>>,
+{
+ fn wrapping_shl(&self, rhs: u32) -> Self {
+ Wrapping(self.0.wrapping_shl(rhs))
+ }
+}
+impl<T: WrappingShr> WrappingShr for Wrapping<T>
+where
+ Wrapping<T>: Shr<usize, Output = Wrapping<T>>,
+{
+ fn wrapping_shr(&self, rhs: u32) -> Self {
+ Wrapping(self.0.wrapping_shr(rhs))
+ }
+}
+
+#[test]
+fn test_wrapping_traits() {
+ fn wrapping_add<T: WrappingAdd>(a: T, b: T) -> T {
+ a.wrapping_add(&b)
+ }
+ fn wrapping_sub<T: WrappingSub>(a: T, b: T) -> T {
+ a.wrapping_sub(&b)
+ }
+ fn wrapping_mul<T: WrappingMul>(a: T, b: T) -> T {
+ a.wrapping_mul(&b)
+ }
+ fn wrapping_neg<T: WrappingNeg>(a: T) -> T {
+ a.wrapping_neg()
+ }
+ fn wrapping_shl<T: WrappingShl>(a: T, b: u32) -> T {
+ a.wrapping_shl(b)
+ }
+ fn wrapping_shr<T: WrappingShr>(a: T, b: u32) -> T {
+ a.wrapping_shr(b)
+ }
+ assert_eq!(wrapping_add(255, 1), 0u8);
+ assert_eq!(wrapping_sub(0, 1), 255u8);
+ assert_eq!(wrapping_mul(255, 2), 254u8);
+ assert_eq!(wrapping_neg(255), 1u8);
+ assert_eq!(wrapping_shl(255, 8), 255u8);
+ assert_eq!(wrapping_shr(255, 8), 255u8);
+ assert_eq!(wrapping_add(255, 1), (Wrapping(255u8) + Wrapping(1u8)).0);
+ assert_eq!(wrapping_sub(0, 1), (Wrapping(0u8) - Wrapping(1u8)).0);
+ assert_eq!(wrapping_mul(255, 2), (Wrapping(255u8) * Wrapping(2u8)).0);
+ assert_eq!(wrapping_neg(255), (-Wrapping(255u8)).0);
+ assert_eq!(wrapping_shl(255, 8), (Wrapping(255u8) << 8).0);
+ assert_eq!(wrapping_shr(255, 8), (Wrapping(255u8) >> 8).0);
+}
+
+#[test]
+fn wrapping_is_wrappingadd() {
+ fn require_wrappingadd<T: WrappingAdd>(_: &T) {}
+ require_wrappingadd(&Wrapping(42));
+}
+
+#[test]
+fn wrapping_is_wrappingsub() {
+ fn require_wrappingsub<T: WrappingSub>(_: &T) {}
+ require_wrappingsub(&Wrapping(42));
+}
+
+#[test]
+fn wrapping_is_wrappingmul() {
+ fn require_wrappingmul<T: WrappingMul>(_: &T) {}
+ require_wrappingmul(&Wrapping(42));
+}
+
+#[test]
+fn wrapping_is_wrappingneg() {
+ fn require_wrappingneg<T: WrappingNeg>(_: &T) {}
+ require_wrappingneg(&Wrapping(42));
+}
+
+#[test]
+fn wrapping_is_wrappingshl() {
+ fn require_wrappingshl<T: WrappingShl>(_: &T) {}
+ require_wrappingshl(&Wrapping(42));
+}
+
+#[test]
+fn wrapping_is_wrappingshr() {
+ fn require_wrappingshr<T: WrappingShr>(_: &T) {}
+ require_wrappingshr(&Wrapping(42));
+}
diff --git a/rust/vendor/num-traits/src/pow.rs b/rust/vendor/num-traits/src/pow.rs
new file mode 100644
index 0000000..ef51c95
--- /dev/null
+++ b/rust/vendor/num-traits/src/pow.rs
@@ -0,0 +1,242 @@
+use crate::{CheckedMul, One};
+use core::num::Wrapping;
+use core::ops::Mul;
+
+/// Binary operator for raising a value to a power.
+pub trait Pow<RHS> {
+ /// The result after applying the operator.
+ type Output;
+
+ /// Returns `self` to the power `rhs`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use num_traits::Pow;
+ /// assert_eq!(Pow::pow(10u32, 2u32), 100);
+ /// ```
+ fn pow(self, rhs: RHS) -> Self::Output;
+}
+
+macro_rules! pow_impl {
+ ($t:ty) => {
+ pow_impl!($t, u8);
+ pow_impl!($t, usize);
+
+ // FIXME: these should be possible
+ // pow_impl!($t, u16);
+ // pow_impl!($t, u32);
+ // pow_impl!($t, u64);
+ };
+ ($t:ty, $rhs:ty) => {
+ pow_impl!($t, $rhs, usize, pow);
+ };
+ ($t:ty, $rhs:ty, $desired_rhs:ty, $method:expr) => {
+ impl Pow<$rhs> for $t {
+ type Output = $t;
+ #[inline]
+ fn pow(self, rhs: $rhs) -> $t {
+ ($method)(self, <$desired_rhs>::from(rhs))
+ }
+ }
+
+ impl<'a> Pow<&'a $rhs> for $t {
+ type Output = $t;
+ #[inline]
+ fn pow(self, rhs: &'a $rhs) -> $t {
+ ($method)(self, <$desired_rhs>::from(*rhs))
+ }
+ }
+
+ impl<'a> Pow<$rhs> for &'a $t {
+ type Output = $t;
+ #[inline]
+ fn pow(self, rhs: $rhs) -> $t {
+ ($method)(*self, <$desired_rhs>::from(rhs))
+ }
+ }
+
+ impl<'a, 'b> Pow<&'a $rhs> for &'b $t {
+ type Output = $t;
+ #[inline]
+ fn pow(self, rhs: &'a $rhs) -> $t {
+ ($method)(*self, <$desired_rhs>::from(*rhs))
+ }
+ }
+ };
+}
+
+pow_impl!(u8, u8, u32, u8::pow);
+pow_impl!(u8, u16, u32, u8::pow);
+pow_impl!(u8, u32, u32, u8::pow);
+pow_impl!(u8, usize);
+pow_impl!(i8, u8, u32, i8::pow);
+pow_impl!(i8, u16, u32, i8::pow);
+pow_impl!(i8, u32, u32, i8::pow);
+pow_impl!(i8, usize);
+pow_impl!(u16, u8, u32, u16::pow);
+pow_impl!(u16, u16, u32, u16::pow);
+pow_impl!(u16, u32, u32, u16::pow);
+pow_impl!(u16, usize);
+pow_impl!(i16, u8, u32, i16::pow);
+pow_impl!(i16, u16, u32, i16::pow);
+pow_impl!(i16, u32, u32, i16::pow);
+pow_impl!(i16, usize);
+pow_impl!(u32, u8, u32, u32::pow);
+pow_impl!(u32, u16, u32, u32::pow);
+pow_impl!(u32, u32, u32, u32::pow);
+pow_impl!(u32, usize);
+pow_impl!(i32, u8, u32, i32::pow);
+pow_impl!(i32, u16, u32, i32::pow);
+pow_impl!(i32, u32, u32, i32::pow);
+pow_impl!(i32, usize);
+pow_impl!(u64, u8, u32, u64::pow);
+pow_impl!(u64, u16, u32, u64::pow);
+pow_impl!(u64, u32, u32, u64::pow);
+pow_impl!(u64, usize);
+pow_impl!(i64, u8, u32, i64::pow);
+pow_impl!(i64, u16, u32, i64::pow);
+pow_impl!(i64, u32, u32, i64::pow);
+pow_impl!(i64, usize);
+
+pow_impl!(u128, u8, u32, u128::pow);
+pow_impl!(u128, u16, u32, u128::pow);
+pow_impl!(u128, u32, u32, u128::pow);
+pow_impl!(u128, usize);
+
+pow_impl!(i128, u8, u32, i128::pow);
+pow_impl!(i128, u16, u32, i128::pow);
+pow_impl!(i128, u32, u32, i128::pow);
+pow_impl!(i128, usize);
+
+pow_impl!(usize, u8, u32, usize::pow);
+pow_impl!(usize, u16, u32, usize::pow);
+pow_impl!(usize, u32, u32, usize::pow);
+pow_impl!(usize, usize);
+pow_impl!(isize, u8, u32, isize::pow);
+pow_impl!(isize, u16, u32, isize::pow);
+pow_impl!(isize, u32, u32, isize::pow);
+pow_impl!(isize, usize);
+pow_impl!(Wrapping<u8>);
+pow_impl!(Wrapping<i8>);
+pow_impl!(Wrapping<u16>);
+pow_impl!(Wrapping<i16>);
+pow_impl!(Wrapping<u32>);
+pow_impl!(Wrapping<i32>);
+pow_impl!(Wrapping<u64>);
+pow_impl!(Wrapping<i64>);
+pow_impl!(Wrapping<u128>);
+pow_impl!(Wrapping<i128>);
+pow_impl!(Wrapping<usize>);
+pow_impl!(Wrapping<isize>);
+
+// FIXME: these should be possible
+// pow_impl!(u8, u64);
+// pow_impl!(i16, u64);
+// pow_impl!(i8, u64);
+// pow_impl!(u16, u64);
+// pow_impl!(u32, u64);
+// pow_impl!(i32, u64);
+// pow_impl!(u64, u64);
+// pow_impl!(i64, u64);
+// pow_impl!(usize, u64);
+// pow_impl!(isize, u64);
+
+#[cfg(any(feature = "std", feature = "libm"))]
+mod float_impls {
+ use super::Pow;
+ use crate::Float;
+
+ pow_impl!(f32, i8, i32, <f32 as Float>::powi);
+ pow_impl!(f32, u8, i32, <f32 as Float>::powi);
+ pow_impl!(f32, i16, i32, <f32 as Float>::powi);
+ pow_impl!(f32, u16, i32, <f32 as Float>::powi);
+ pow_impl!(f32, i32, i32, <f32 as Float>::powi);
+ pow_impl!(f64, i8, i32, <f64 as Float>::powi);
+ pow_impl!(f64, u8, i32, <f64 as Float>::powi);
+ pow_impl!(f64, i16, i32, <f64 as Float>::powi);
+ pow_impl!(f64, u16, i32, <f64 as Float>::powi);
+ pow_impl!(f64, i32, i32, <f64 as Float>::powi);
+ pow_impl!(f32, f32, f32, <f32 as Float>::powf);
+ pow_impl!(f64, f32, f64, <f64 as Float>::powf);
+ pow_impl!(f64, f64, f64, <f64 as Float>::powf);
+}
+
+/// Raises a value to the power of exp, using exponentiation by squaring.
+///
+/// Note that `0⁰` (`pow(0, 0)`) returns `1`. Mathematically this is undefined.
+///
+/// # Example
+///
+/// ```rust
+/// use num_traits::pow;
+///
+/// assert_eq!(pow(2i8, 4), 16);
+/// assert_eq!(pow(6u8, 3), 216);
+/// assert_eq!(pow(0u8, 0), 1); // Be aware if this case affects you
+/// ```
+#[inline]
+pub fn pow<T: Clone + One + Mul<T, Output = T>>(mut base: T, mut exp: usize) -> T {
+ if exp == 0 {
+ return T::one();
+ }
+
+ while exp & 1 == 0 {
+ base = base.clone() * base;
+ exp >>= 1;
+ }
+ if exp == 1 {
+ return base;
+ }
+
+ let mut acc = base.clone();
+ while exp > 1 {
+ exp >>= 1;
+ base = base.clone() * base;
+ if exp & 1 == 1 {
+ acc = acc * base.clone();
+ }
+ }
+ acc
+}
+
+/// Raises a value to the power of exp, returning `None` if an overflow occurred.
+///
+/// Note that `0⁰` (`checked_pow(0, 0)`) returns `Some(1)`. Mathematically this is undefined.
+///
+/// Otherwise same as the `pow` function.
+///
+/// # Example
+///
+/// ```rust
+/// use num_traits::checked_pow;
+///
+/// assert_eq!(checked_pow(2i8, 4), Some(16));
+/// assert_eq!(checked_pow(7i8, 8), None);
+/// assert_eq!(checked_pow(7u32, 8), Some(5_764_801));
+/// assert_eq!(checked_pow(0u32, 0), Some(1)); // Be aware if this case affect you
+/// ```
+#[inline]
+pub fn checked_pow<T: Clone + One + CheckedMul>(mut base: T, mut exp: usize) -> Option<T> {
+ if exp == 0 {
+ return Some(T::one());
+ }
+
+ while exp & 1 == 0 {
+ base = base.checked_mul(&base)?;
+ exp >>= 1;
+ }
+ if exp == 1 {
+ return Some(base);
+ }
+
+ let mut acc = base.clone();
+ while exp > 1 {
+ exp >>= 1;
+ base = base.checked_mul(&base)?;
+ if exp & 1 == 1 {
+ acc = acc.checked_mul(&base)?;
+ }
+ }
+ Some(acc)
+}
diff --git a/rust/vendor/num-traits/src/real.rs b/rust/vendor/num-traits/src/real.rs
new file mode 100644
index 0000000..d4feee0
--- /dev/null
+++ b/rust/vendor/num-traits/src/real.rs
@@ -0,0 +1,834 @@
+#![cfg(any(feature = "std", feature = "libm"))]
+
+use core::ops::Neg;
+
+use crate::{Float, Num, NumCast};
+
+// NOTE: These doctests have the same issue as those in src/float.rs.
+// They're testing the inherent methods directly, and not those of `Real`.
+
+/// A trait for real number types that do not necessarily have
+/// floating-point-specific characteristics such as NaN and infinity.
+///
+/// See [this Wikipedia article](https://en.wikipedia.org/wiki/Real_data_type)
+/// for a list of data types that could meaningfully implement this trait.
+///
+/// This trait is only available with the `std` feature, or with the `libm` feature otherwise.
+pub trait Real: Num + Copy + NumCast + PartialOrd + Neg<Output = Self> {
+ /// Returns the smallest finite value that this type can represent.
+ ///
+ /// ```
+ /// use num_traits::real::Real;
+ /// use std::f64;
+ ///
+ /// let x: f64 = Real::min_value();
+ ///
+ /// assert_eq!(x, f64::MIN);
+ /// ```
+ fn min_value() -> Self;
+
+ /// Returns the smallest positive, normalized value that this type can represent.
+ ///
+ /// ```
+ /// use num_traits::real::Real;
+ /// use std::f64;
+ ///
+ /// let x: f64 = Real::min_positive_value();
+ ///
+ /// assert_eq!(x, f64::MIN_POSITIVE);
+ /// ```
+ fn min_positive_value() -> Self;
+
+ /// Returns epsilon, a small positive value.
+ ///
+ /// ```
+ /// use num_traits::real::Real;
+ /// use std::f64;
+ ///
+ /// let x: f64 = Real::epsilon();
+ ///
+ /// assert_eq!(x, f64::EPSILON);
+ /// ```
+ ///
+ /// # Panics
+ ///
+ /// The default implementation will panic if `f32::EPSILON` cannot
+ /// be cast to `Self`.
+ fn epsilon() -> Self;
+
+ /// Returns the largest finite value that this type can represent.
+ ///
+ /// ```
+ /// use num_traits::real::Real;
+ /// use std::f64;
+ ///
+ /// let x: f64 = Real::max_value();
+ /// assert_eq!(x, f64::MAX);
+ /// ```
+ fn max_value() -> Self;
+
+ /// Returns the largest integer less than or equal to a number.
+ ///
+ /// ```
+ /// use num_traits::real::Real;
+ ///
+ /// let f = 3.99;
+ /// let g = 3.0;
+ ///
+ /// assert_eq!(f.floor(), 3.0);
+ /// assert_eq!(g.floor(), 3.0);
+ /// ```
+ fn floor(self) -> Self;
+
+ /// Returns the smallest integer greater than or equal to a number.
+ ///
+ /// ```
+ /// use num_traits::real::Real;
+ ///
+ /// let f = 3.01;
+ /// let g = 4.0;
+ ///
+ /// assert_eq!(f.ceil(), 4.0);
+ /// assert_eq!(g.ceil(), 4.0);
+ /// ```
+ fn ceil(self) -> Self;
+
+ /// Returns the nearest integer to a number. Round half-way cases away from
+ /// `0.0`.
+ ///
+ /// ```
+ /// use num_traits::real::Real;
+ ///
+ /// let f = 3.3;
+ /// let g = -3.3;
+ ///
+ /// assert_eq!(f.round(), 3.0);
+ /// assert_eq!(g.round(), -3.0);
+ /// ```
+ fn round(self) -> Self;
+
+ /// Return the integer part of a number.
+ ///
+ /// ```
+ /// use num_traits::real::Real;
+ ///
+ /// let f = 3.3;
+ /// let g = -3.7;
+ ///
+ /// assert_eq!(f.trunc(), 3.0);
+ /// assert_eq!(g.trunc(), -3.0);
+ /// ```
+ fn trunc(self) -> Self;
+
+ /// Returns the fractional part of a number.
+ ///
+ /// ```
+ /// use num_traits::real::Real;
+ ///
+ /// let x = 3.5;
+ /// let y = -3.5;
+ /// let abs_difference_x = (x.fract() - 0.5).abs();
+ /// let abs_difference_y = (y.fract() - (-0.5)).abs();
+ ///
+ /// assert!(abs_difference_x < 1e-10);
+ /// assert!(abs_difference_y < 1e-10);
+ /// ```
+ fn fract(self) -> Self;
+
+ /// Computes the absolute value of `self`. Returns `Float::nan()` if the
+ /// number is `Float::nan()`.
+ ///
+ /// ```
+ /// use num_traits::real::Real;
+ /// use std::f64;
+ ///
+ /// let x = 3.5;
+ /// let y = -3.5;
+ ///
+ /// let abs_difference_x = (x.abs() - x).abs();
+ /// let abs_difference_y = (y.abs() - (-y)).abs();
+ ///
+ /// assert!(abs_difference_x < 1e-10);
+ /// assert!(abs_difference_y < 1e-10);
+ ///
+ /// assert!(::num_traits::Float::is_nan(f64::NAN.abs()));
+ /// ```
+ fn abs(self) -> Self;
+
+ /// Returns a number that represents the sign of `self`.
+ ///
+ /// - `1.0` if the number is positive, `+0.0` or `Float::infinity()`
+ /// - `-1.0` if the number is negative, `-0.0` or `Float::neg_infinity()`
+ /// - `Float::nan()` if the number is `Float::nan()`
+ ///
+ /// ```
+ /// use num_traits::real::Real;
+ /// use std::f64;
+ ///
+ /// let f = 3.5;
+ ///
+ /// assert_eq!(f.signum(), 1.0);
+ /// assert_eq!(f64::NEG_INFINITY.signum(), -1.0);
+ ///
+ /// assert!(f64::NAN.signum().is_nan());
+ /// ```
+ fn signum(self) -> Self;
+
+ /// Returns `true` if `self` is positive, including `+0.0`,
+ /// `Float::infinity()`, and with newer versions of Rust `f64::NAN`.
+ ///
+ /// ```
+ /// use num_traits::real::Real;
+ /// use std::f64;
+ ///
+ /// let neg_nan: f64 = -f64::NAN;
+ ///
+ /// let f = 7.0;
+ /// let g = -7.0;
+ ///
+ /// assert!(f.is_sign_positive());
+ /// assert!(!g.is_sign_positive());
+ /// assert!(!neg_nan.is_sign_positive());
+ /// ```
+ fn is_sign_positive(self) -> bool;
+
+ /// Returns `true` if `self` is negative, including `-0.0`,
+ /// `Float::neg_infinity()`, and with newer versions of Rust `-f64::NAN`.
+ ///
+ /// ```
+ /// use num_traits::real::Real;
+ /// use std::f64;
+ ///
+ /// let nan: f64 = f64::NAN;
+ ///
+ /// let f = 7.0;
+ /// let g = -7.0;
+ ///
+ /// assert!(!f.is_sign_negative());
+ /// assert!(g.is_sign_negative());
+ /// assert!(!nan.is_sign_negative());
+ /// ```
+ fn is_sign_negative(self) -> bool;
+
+ /// Fused multiply-add. Computes `(self * a) + b` with only one rounding
+ /// error, yielding a more accurate result than an unfused multiply-add.
+ ///
+ /// Using `mul_add` can be more performant than an unfused multiply-add if
+ /// the target architecture has a dedicated `fma` CPU instruction.
+ ///
+ /// ```
+ /// use num_traits::real::Real;
+ ///
+ /// let m = 10.0;
+ /// let x = 4.0;
+ /// let b = 60.0;
+ ///
+ /// // 100.0
+ /// let abs_difference = (m.mul_add(x, b) - (m*x + b)).abs();
+ ///
+ /// assert!(abs_difference < 1e-10);
+ /// ```
+ fn mul_add(self, a: Self, b: Self) -> Self;
+
+ /// Take the reciprocal (inverse) of a number, `1/x`.
+ ///
+ /// ```
+ /// use num_traits::real::Real;
+ ///
+ /// let x = 2.0;
+ /// let abs_difference = (x.recip() - (1.0/x)).abs();
+ ///
+ /// assert!(abs_difference < 1e-10);
+ /// ```
+ fn recip(self) -> Self;
+
+ /// Raise a number to an integer power.
+ ///
+ /// Using this function is generally faster than using `powf`
+ ///
+ /// ```
+ /// use num_traits::real::Real;
+ ///
+ /// let x = 2.0;
+ /// let abs_difference = (x.powi(2) - x*x).abs();
+ ///
+ /// assert!(abs_difference < 1e-10);
+ /// ```
+ fn powi(self, n: i32) -> Self;
+
+ /// Raise a number to a real number power.
+ ///
+ /// ```
+ /// use num_traits::real::Real;
+ ///
+ /// let x = 2.0;
+ /// let abs_difference = (x.powf(2.0) - x*x).abs();
+ ///
+ /// assert!(abs_difference < 1e-10);
+ /// ```
+ fn powf(self, n: Self) -> Self;
+
+ /// Take the square root of a number.
+ ///
+ /// Returns NaN if `self` is a negative floating-point number.
+ ///
+ /// # Panics
+ ///
+ /// If the implementing type doesn't support NaN, this method should panic if `self < 0`.
+ ///
+ /// ```
+ /// use num_traits::real::Real;
+ ///
+ /// let positive = 4.0;
+ /// let negative = -4.0;
+ ///
+ /// let abs_difference = (positive.sqrt() - 2.0).abs();
+ ///
+ /// assert!(abs_difference < 1e-10);
+ /// assert!(::num_traits::Float::is_nan(negative.sqrt()));
+ /// ```
+ fn sqrt(self) -> Self;
+
+ /// Returns `e^(self)`, (the exponential function).
+ ///
+ /// ```
+ /// use num_traits::real::Real;
+ ///
+ /// let one = 1.0;
+ /// // e^1
+ /// let e = one.exp();
+ ///
+ /// // ln(e) - 1 == 0
+ /// let abs_difference = (e.ln() - 1.0).abs();
+ ///
+ /// assert!(abs_difference < 1e-10);
+ /// ```
+ fn exp(self) -> Self;
+
+ /// Returns `2^(self)`.
+ ///
+ /// ```
+ /// use num_traits::real::Real;
+ ///
+ /// let f = 2.0;
+ ///
+ /// // 2^2 - 4 == 0
+ /// let abs_difference = (f.exp2() - 4.0).abs();
+ ///
+ /// assert!(abs_difference < 1e-10);
+ /// ```
+ fn exp2(self) -> Self;
+
+ /// Returns the natural logarithm of the number.
+ ///
+ /// # Panics
+ ///
+ /// If `self <= 0` and this type does not support a NaN representation, this function should panic.
+ ///
+ /// ```
+ /// use num_traits::real::Real;
+ ///
+ /// let one = 1.0;
+ /// // e^1
+ /// let e = one.exp();
+ ///
+ /// // ln(e) - 1 == 0
+ /// let abs_difference = (e.ln() - 1.0).abs();
+ ///
+ /// assert!(abs_difference < 1e-10);
+ /// ```
+ fn ln(self) -> Self;
+
+ /// Returns the logarithm of the number with respect to an arbitrary base.
+ ///
+ /// # Panics
+ ///
+ /// If `self <= 0` and this type does not support a NaN representation, this function should panic.
+ ///
+ /// ```
+ /// use num_traits::real::Real;
+ ///
+ /// let ten = 10.0;
+ /// let two = 2.0;
+ ///
+ /// // log10(10) - 1 == 0
+ /// let abs_difference_10 = (ten.log(10.0) - 1.0).abs();
+ ///
+ /// // log2(2) - 1 == 0
+ /// let abs_difference_2 = (two.log(2.0) - 1.0).abs();
+ ///
+ /// assert!(abs_difference_10 < 1e-10);
+ /// assert!(abs_difference_2 < 1e-10);
+ /// ```
+ fn log(self, base: Self) -> Self;
+
+ /// Returns the base 2 logarithm of the number.
+ ///
+ /// # Panics
+ ///
+ /// If `self <= 0` and this type does not support a NaN representation, this function should panic.
+ ///
+ /// ```
+ /// use num_traits::real::Real;
+ ///
+ /// let two = 2.0;
+ ///
+ /// // log2(2) - 1 == 0
+ /// let abs_difference = (two.log2() - 1.0).abs();
+ ///
+ /// assert!(abs_difference < 1e-10);
+ /// ```
+ fn log2(self) -> Self;
+
+ /// Returns the base 10 logarithm of the number.
+ ///
+ /// # Panics
+ ///
+ /// If `self <= 0` and this type does not support a NaN representation, this function should panic.
+ ///
+ ///
+ /// ```
+ /// use num_traits::real::Real;
+ ///
+ /// let ten = 10.0;
+ ///
+ /// // log10(10) - 1 == 0
+ /// let abs_difference = (ten.log10() - 1.0).abs();
+ ///
+ /// assert!(abs_difference < 1e-10);
+ /// ```
+ fn log10(self) -> Self;
+
+ /// Converts radians to degrees.
+ ///
+ /// ```
+ /// use std::f64::consts;
+ ///
+ /// let angle = consts::PI;
+ ///
+ /// let abs_difference = (angle.to_degrees() - 180.0).abs();
+ ///
+ /// assert!(abs_difference < 1e-10);
+ /// ```
+ fn to_degrees(self) -> Self;
+
+ /// Converts degrees to radians.
+ ///
+ /// ```
+ /// use std::f64::consts;
+ ///
+ /// let angle = 180.0_f64;
+ ///
+ /// let abs_difference = (angle.to_radians() - consts::PI).abs();
+ ///
+ /// assert!(abs_difference < 1e-10);
+ /// ```
+ fn to_radians(self) -> Self;
+
+ /// Returns the maximum of the two numbers.
+ ///
+ /// ```
+ /// use num_traits::real::Real;
+ ///
+ /// let x = 1.0;
+ /// let y = 2.0;
+ ///
+ /// assert_eq!(x.max(y), y);
+ /// ```
+ fn max(self, other: Self) -> Self;
+
+ /// Returns the minimum of the two numbers.
+ ///
+ /// ```
+ /// use num_traits::real::Real;
+ ///
+ /// let x = 1.0;
+ /// let y = 2.0;
+ ///
+ /// assert_eq!(x.min(y), x);
+ /// ```
+ fn min(self, other: Self) -> Self;
+
+ /// The positive difference of two numbers.
+ ///
+ /// * If `self <= other`: `0:0`
+ /// * Else: `self - other`
+ ///
+ /// ```
+ /// use num_traits::real::Real;
+ ///
+ /// let x = 3.0;
+ /// let y = -3.0;
+ ///
+ /// let abs_difference_x = (x.abs_sub(1.0) - 2.0).abs();
+ /// let abs_difference_y = (y.abs_sub(1.0) - 0.0).abs();
+ ///
+ /// assert!(abs_difference_x < 1e-10);
+ /// assert!(abs_difference_y < 1e-10);
+ /// ```
+ fn abs_sub(self, other: Self) -> Self;
+
+ /// Take the cubic root of a number.
+ ///
+ /// ```
+ /// use num_traits::real::Real;
+ ///
+ /// let x = 8.0;
+ ///
+ /// // x^(1/3) - 2 == 0
+ /// let abs_difference = (x.cbrt() - 2.0).abs();
+ ///
+ /// assert!(abs_difference < 1e-10);
+ /// ```
+ fn cbrt(self) -> Self;
+
+ /// Calculate the length of the hypotenuse of a right-angle triangle given
+ /// legs of length `x` and `y`.
+ ///
+ /// ```
+ /// use num_traits::real::Real;
+ ///
+ /// let x = 2.0;
+ /// let y = 3.0;
+ ///
+ /// // sqrt(x^2 + y^2)
+ /// let abs_difference = (x.hypot(y) - (x.powi(2) + y.powi(2)).sqrt()).abs();
+ ///
+ /// assert!(abs_difference < 1e-10);
+ /// ```
+ fn hypot(self, other: Self) -> Self;
+
+ /// Computes the sine of a number (in radians).
+ ///
+ /// ```
+ /// use num_traits::real::Real;
+ /// use std::f64;
+ ///
+ /// let x = f64::consts::PI/2.0;
+ ///
+ /// let abs_difference = (x.sin() - 1.0).abs();
+ ///
+ /// assert!(abs_difference < 1e-10);
+ /// ```
+ fn sin(self) -> Self;
+
+ /// Computes the cosine of a number (in radians).
+ ///
+ /// ```
+ /// use num_traits::real::Real;
+ /// use std::f64;
+ ///
+ /// let x = 2.0*f64::consts::PI;
+ ///
+ /// let abs_difference = (x.cos() - 1.0).abs();
+ ///
+ /// assert!(abs_difference < 1e-10);
+ /// ```
+ fn cos(self) -> Self;
+
+ /// Computes the tangent of a number (in radians).
+ ///
+ /// ```
+ /// use num_traits::real::Real;
+ /// use std::f64;
+ ///
+ /// let x = f64::consts::PI/4.0;
+ /// let abs_difference = (x.tan() - 1.0).abs();
+ ///
+ /// assert!(abs_difference < 1e-14);
+ /// ```
+ fn tan(self) -> Self;
+
+ /// Computes the arcsine of a number. Return value is in radians in
+ /// the range [-pi/2, pi/2] or NaN if the number is outside the range
+ /// [-1, 1].
+ ///
+ /// # Panics
+ ///
+ /// If this type does not support a NaN representation, this function should panic
+ /// if the number is outside the range [-1, 1].
+ ///
+ /// ```
+ /// use num_traits::real::Real;
+ /// use std::f64;
+ ///
+ /// let f = f64::consts::PI / 2.0;
+ ///
+ /// // asin(sin(pi/2))
+ /// let abs_difference = (f.sin().asin() - f64::consts::PI / 2.0).abs();
+ ///
+ /// assert!(abs_difference < 1e-10);
+ /// ```
+ fn asin(self) -> Self;
+
+ /// Computes the arccosine of a number. Return value is in radians in
+ /// the range [0, pi] or NaN if the number is outside the range
+ /// [-1, 1].
+ ///
+ /// # Panics
+ ///
+ /// If this type does not support a NaN representation, this function should panic
+ /// if the number is outside the range [-1, 1].
+ ///
+ /// ```
+ /// use num_traits::real::Real;
+ /// use std::f64;
+ ///
+ /// let f = f64::consts::PI / 4.0;
+ ///
+ /// // acos(cos(pi/4))
+ /// let abs_difference = (f.cos().acos() - f64::consts::PI / 4.0).abs();
+ ///
+ /// assert!(abs_difference < 1e-10);
+ /// ```
+ fn acos(self) -> Self;
+
+ /// Computes the arctangent of a number. Return value is in radians in the
+ /// range [-pi/2, pi/2];
+ ///
+ /// ```
+ /// use num_traits::real::Real;
+ ///
+ /// let f = 1.0;
+ ///
+ /// // atan(tan(1))
+ /// let abs_difference = (f.tan().atan() - 1.0).abs();
+ ///
+ /// assert!(abs_difference < 1e-10);
+ /// ```
+ fn atan(self) -> Self;
+
+ /// Computes the four quadrant arctangent of `self` (`y`) and `other` (`x`).
+ ///
+ /// * `x = 0`, `y = 0`: `0`
+ /// * `x >= 0`: `arctan(y/x)` -> `[-pi/2, pi/2]`
+ /// * `y >= 0`: `arctan(y/x) + pi` -> `(pi/2, pi]`
+ /// * `y < 0`: `arctan(y/x) - pi` -> `(-pi, -pi/2)`
+ ///
+ /// ```
+ /// use num_traits::real::Real;
+ /// use std::f64;
+ ///
+ /// let pi = f64::consts::PI;
+ /// // All angles from horizontal right (+x)
+ /// // 45 deg counter-clockwise
+ /// let x1 = 3.0;
+ /// let y1 = -3.0;
+ ///
+ /// // 135 deg clockwise
+ /// let x2 = -3.0;
+ /// let y2 = 3.0;
+ ///
+ /// let abs_difference_1 = (y1.atan2(x1) - (-pi/4.0)).abs();
+ /// let abs_difference_2 = (y2.atan2(x2) - 3.0*pi/4.0).abs();
+ ///
+ /// assert!(abs_difference_1 < 1e-10);
+ /// assert!(abs_difference_2 < 1e-10);
+ /// ```
+ fn atan2(self, other: Self) -> Self;
+
+ /// Simultaneously computes the sine and cosine of the number, `x`. Returns
+ /// `(sin(x), cos(x))`.
+ ///
+ /// ```
+ /// use num_traits::real::Real;
+ /// use std::f64;
+ ///
+ /// let x = f64::consts::PI/4.0;
+ /// let f = x.sin_cos();
+ ///
+ /// let abs_difference_0 = (f.0 - x.sin()).abs();
+ /// let abs_difference_1 = (f.1 - x.cos()).abs();
+ ///
+ /// assert!(abs_difference_0 < 1e-10);
+ /// assert!(abs_difference_0 < 1e-10);
+ /// ```
+ fn sin_cos(self) -> (Self, Self);
+
+ /// Returns `e^(self) - 1` in a way that is accurate even if the
+ /// number is close to zero.
+ ///
+ /// ```
+ /// use num_traits::real::Real;
+ ///
+ /// let x = 7.0;
+ ///
+ /// // e^(ln(7)) - 1
+ /// let abs_difference = (x.ln().exp_m1() - 6.0).abs();
+ ///
+ /// assert!(abs_difference < 1e-10);
+ /// ```
+ fn exp_m1(self) -> Self;
+
+ /// Returns `ln(1+n)` (natural logarithm) more accurately than if
+ /// the operations were performed separately.
+ ///
+ /// # Panics
+ ///
+ /// If this type does not support a NaN representation, this function should panic
+ /// if `self-1 <= 0`.
+ ///
+ /// ```
+ /// use num_traits::real::Real;
+ /// use std::f64;
+ ///
+ /// let x = f64::consts::E - 1.0;
+ ///
+ /// // ln(1 + (e - 1)) == ln(e) == 1
+ /// let abs_difference = (x.ln_1p() - 1.0).abs();
+ ///
+ /// assert!(abs_difference < 1e-10);
+ /// ```
+ fn ln_1p(self) -> Self;
+
+ /// Hyperbolic sine function.
+ ///
+ /// ```
+ /// use num_traits::real::Real;
+ /// use std::f64;
+ ///
+ /// let e = f64::consts::E;
+ /// let x = 1.0;
+ ///
+ /// let f = x.sinh();
+ /// // Solving sinh() at 1 gives `(e^2-1)/(2e)`
+ /// let g = (e*e - 1.0)/(2.0*e);
+ /// let abs_difference = (f - g).abs();
+ ///
+ /// assert!(abs_difference < 1e-10);
+ /// ```
+ fn sinh(self) -> Self;
+
+ /// Hyperbolic cosine function.
+ ///
+ /// ```
+ /// use num_traits::real::Real;
+ /// use std::f64;
+ ///
+ /// let e = f64::consts::E;
+ /// let x = 1.0;
+ /// let f = x.cosh();
+ /// // Solving cosh() at 1 gives this result
+ /// let g = (e*e + 1.0)/(2.0*e);
+ /// let abs_difference = (f - g).abs();
+ ///
+ /// // Same result
+ /// assert!(abs_difference < 1.0e-10);
+ /// ```
+ fn cosh(self) -> Self;
+
+ /// Hyperbolic tangent function.
+ ///
+ /// ```
+ /// use num_traits::real::Real;
+ /// use std::f64;
+ ///
+ /// let e = f64::consts::E;
+ /// let x = 1.0;
+ ///
+ /// let f = x.tanh();
+ /// // Solving tanh() at 1 gives `(1 - e^(-2))/(1 + e^(-2))`
+ /// let g = (1.0 - e.powi(-2))/(1.0 + e.powi(-2));
+ /// let abs_difference = (f - g).abs();
+ ///
+ /// assert!(abs_difference < 1.0e-10);
+ /// ```
+ fn tanh(self) -> Self;
+
+ /// Inverse hyperbolic sine function.
+ ///
+ /// ```
+ /// use num_traits::real::Real;
+ ///
+ /// let x = 1.0;
+ /// let f = x.sinh().asinh();
+ ///
+ /// let abs_difference = (f - x).abs();
+ ///
+ /// assert!(abs_difference < 1.0e-10);
+ /// ```
+ fn asinh(self) -> Self;
+
+ /// Inverse hyperbolic cosine function.
+ ///
+ /// ```
+ /// use num_traits::real::Real;
+ ///
+ /// let x = 1.0;
+ /// let f = x.cosh().acosh();
+ ///
+ /// let abs_difference = (f - x).abs();
+ ///
+ /// assert!(abs_difference < 1.0e-10);
+ /// ```
+ fn acosh(self) -> Self;
+
+ /// Inverse hyperbolic tangent function.
+ ///
+ /// ```
+ /// use num_traits::real::Real;
+ /// use std::f64;
+ ///
+ /// let e = f64::consts::E;
+ /// let f = e.tanh().atanh();
+ ///
+ /// let abs_difference = (f - e).abs();
+ ///
+ /// assert!(abs_difference < 1.0e-10);
+ /// ```
+ fn atanh(self) -> Self;
+}
+
+impl<T: Float> Real for T {
+ forward! {
+ Float::min_value() -> Self;
+ Float::min_positive_value() -> Self;
+ Float::epsilon() -> Self;
+ Float::max_value() -> Self;
+ }
+ forward! {
+ Float::floor(self) -> Self;
+ Float::ceil(self) -> Self;
+ Float::round(self) -> Self;
+ Float::trunc(self) -> Self;
+ Float::fract(self) -> Self;
+ Float::abs(self) -> Self;
+ Float::signum(self) -> Self;
+ Float::is_sign_positive(self) -> bool;
+ Float::is_sign_negative(self) -> bool;
+ Float::mul_add(self, a: Self, b: Self) -> Self;
+ Float::recip(self) -> Self;
+ Float::powi(self, n: i32) -> Self;
+ Float::powf(self, n: Self) -> Self;
+ Float::sqrt(self) -> Self;
+ Float::exp(self) -> Self;
+ Float::exp2(self) -> Self;
+ Float::ln(self) -> Self;
+ Float::log(self, base: Self) -> Self;
+ Float::log2(self) -> Self;
+ Float::log10(self) -> Self;
+ Float::to_degrees(self) -> Self;
+ Float::to_radians(self) -> Self;
+ Float::max(self, other: Self) -> Self;
+ Float::min(self, other: Self) -> Self;
+ Float::abs_sub(self, other: Self) -> Self;
+ Float::cbrt(self) -> Self;
+ Float::hypot(self, other: Self) -> Self;
+ Float::sin(self) -> Self;
+ Float::cos(self) -> Self;
+ Float::tan(self) -> Self;
+ Float::asin(self) -> Self;
+ Float::acos(self) -> Self;
+ Float::atan(self) -> Self;
+ Float::atan2(self, other: Self) -> Self;
+ Float::sin_cos(self) -> (Self, Self);
+ Float::exp_m1(self) -> Self;
+ Float::ln_1p(self) -> Self;
+ Float::sinh(self) -> Self;
+ Float::cosh(self) -> Self;
+ Float::tanh(self) -> Self;
+ Float::asinh(self) -> Self;
+ Float::acosh(self) -> Self;
+ Float::atanh(self) -> Self;
+ }
+}
diff --git a/rust/vendor/num-traits/src/sign.rs b/rust/vendor/num-traits/src/sign.rs
new file mode 100644
index 0000000..a0d6b0f
--- /dev/null
+++ b/rust/vendor/num-traits/src/sign.rs
@@ -0,0 +1,216 @@
+use core::num::Wrapping;
+use core::ops::Neg;
+
+use crate::float::FloatCore;
+use crate::Num;
+
+/// Useful functions for signed numbers (i.e. numbers that can be negative).
+pub trait Signed: Sized + Num + Neg<Output = Self> {
+ /// Computes the absolute value.
+ ///
+ /// For `f32` and `f64`, `NaN` will be returned if the number is `NaN`.
+ ///
+ /// For signed integers, `::MIN` will be returned if the number is `::MIN`.
+ fn abs(&self) -> Self;
+
+ /// The positive difference of two numbers.
+ ///
+ /// Returns `zero` if the number is less than or equal to `other`, otherwise the difference
+ /// between `self` and `other` is returned.
+ fn abs_sub(&self, other: &Self) -> Self;
+
+ /// Returns the sign of the number.
+ ///
+ /// For `f32` and `f64`:
+ ///
+ /// * `1.0` if the number is positive, `+0.0` or `INFINITY`
+ /// * `-1.0` if the number is negative, `-0.0` or `NEG_INFINITY`
+ /// * `NaN` if the number is `NaN`
+ ///
+ /// For signed integers:
+ ///
+ /// * `0` if the number is zero
+ /// * `1` if the number is positive
+ /// * `-1` if the number is negative
+ fn signum(&self) -> Self;
+
+ /// Returns true if the number is positive and false if the number is zero or negative.
+ fn is_positive(&self) -> bool;
+
+ /// Returns true if the number is negative and false if the number is zero or positive.
+ fn is_negative(&self) -> bool;
+}
+
+macro_rules! signed_impl {
+ ($($t:ty)*) => ($(
+ impl Signed for $t {
+ #[inline]
+ fn abs(&self) -> $t {
+ if self.is_negative() { -*self } else { *self }
+ }
+
+ #[inline]
+ fn abs_sub(&self, other: &$t) -> $t {
+ if *self <= *other { 0 } else { *self - *other }
+ }
+
+ #[inline]
+ fn signum(&self) -> $t {
+ match *self {
+ n if n > 0 => 1,
+ 0 => 0,
+ _ => -1,
+ }
+ }
+
+ #[inline]
+ fn is_positive(&self) -> bool { *self > 0 }
+
+ #[inline]
+ fn is_negative(&self) -> bool { *self < 0 }
+ }
+ )*)
+}
+
+signed_impl!(isize i8 i16 i32 i64 i128);
+
+impl<T: Signed> Signed for Wrapping<T>
+where
+ Wrapping<T>: Num + Neg<Output = Wrapping<T>>,
+{
+ #[inline]
+ fn abs(&self) -> Self {
+ Wrapping(self.0.abs())
+ }
+
+ #[inline]
+ fn abs_sub(&self, other: &Self) -> Self {
+ Wrapping(self.0.abs_sub(&other.0))
+ }
+
+ #[inline]
+ fn signum(&self) -> Self {
+ Wrapping(self.0.signum())
+ }
+
+ #[inline]
+ fn is_positive(&self) -> bool {
+ self.0.is_positive()
+ }
+
+ #[inline]
+ fn is_negative(&self) -> bool {
+ self.0.is_negative()
+ }
+}
+
+macro_rules! signed_float_impl {
+ ($t:ty) => {
+ impl Signed for $t {
+ /// Computes the absolute value. Returns `NAN` if the number is `NAN`.
+ #[inline]
+ fn abs(&self) -> $t {
+ FloatCore::abs(*self)
+ }
+
+ /// The positive difference of two numbers. Returns `0.0` if the number is
+ /// less than or equal to `other`, otherwise the difference between`self`
+ /// and `other` is returned.
+ #[inline]
+ fn abs_sub(&self, other: &$t) -> $t {
+ if *self <= *other {
+ 0.
+ } else {
+ *self - *other
+ }
+ }
+
+ /// # Returns
+ ///
+ /// - `1.0` if the number is positive, `+0.0` or `INFINITY`
+ /// - `-1.0` if the number is negative, `-0.0` or `NEG_INFINITY`
+ /// - `NAN` if the number is NaN
+ #[inline]
+ fn signum(&self) -> $t {
+ FloatCore::signum(*self)
+ }
+
+ /// Returns `true` if the number is positive, including `+0.0` and `INFINITY`
+ #[inline]
+ fn is_positive(&self) -> bool {
+ FloatCore::is_sign_positive(*self)
+ }
+
+ /// Returns `true` if the number is negative, including `-0.0` and `NEG_INFINITY`
+ #[inline]
+ fn is_negative(&self) -> bool {
+ FloatCore::is_sign_negative(*self)
+ }
+ }
+ };
+}
+
+signed_float_impl!(f32);
+signed_float_impl!(f64);
+
+/// Computes the absolute value.
+///
+/// For `f32` and `f64`, `NaN` will be returned if the number is `NaN`
+///
+/// For signed integers, `::MIN` will be returned if the number is `::MIN`.
+#[inline(always)]
+pub fn abs<T: Signed>(value: T) -> T {
+ value.abs()
+}
+
+/// The positive difference of two numbers.
+///
+/// Returns zero if `x` is less than or equal to `y`, otherwise the difference
+/// between `x` and `y` is returned.
+#[inline(always)]
+pub fn abs_sub<T: Signed>(x: T, y: T) -> T {
+ x.abs_sub(&y)
+}
+
+/// Returns the sign of the number.
+///
+/// For `f32` and `f64`:
+///
+/// * `1.0` if the number is positive, `+0.0` or `INFINITY`
+/// * `-1.0` if the number is negative, `-0.0` or `NEG_INFINITY`
+/// * `NaN` if the number is `NaN`
+///
+/// For signed integers:
+///
+/// * `0` if the number is zero
+/// * `1` if the number is positive
+/// * `-1` if the number is negative
+#[inline(always)]
+pub fn signum<T: Signed>(value: T) -> T {
+ value.signum()
+}
+
+/// A trait for values which cannot be negative
+pub trait Unsigned: Num {}
+
+macro_rules! empty_trait_impl {
+ ($name:ident for $($t:ty)*) => ($(
+ impl $name for $t {}
+ )*)
+}
+
+empty_trait_impl!(Unsigned for usize u8 u16 u32 u64 u128);
+
+impl<T: Unsigned> Unsigned for Wrapping<T> where Wrapping<T>: Num {}
+
+#[test]
+fn unsigned_wrapping_is_unsigned() {
+ fn require_unsigned<T: Unsigned>(_: &T) {}
+ require_unsigned(&Wrapping(42_u32));
+}
+
+#[test]
+fn signed_wrapping_is_signed() {
+ fn require_signed<T: Signed>(_: &T) {}
+ require_signed(&Wrapping(-42));
+}
diff --git a/rust/vendor/num-traits/tests/cast.rs b/rust/vendor/num-traits/tests/cast.rs
new file mode 100644
index 0000000..4f01d74
--- /dev/null
+++ b/rust/vendor/num-traits/tests/cast.rs
@@ -0,0 +1,387 @@
+//! Tests of `num_traits::cast`.
+
+#![cfg_attr(not(feature = "std"), no_std)]
+
+use num_traits::cast::*;
+use num_traits::Bounded;
+
+use core::{f32, f64};
+use core::{i128, i16, i32, i64, i8, isize};
+use core::{u128, u16, u32, u64, u8, usize};
+
+use core::fmt::Debug;
+use core::mem;
+use core::num::Wrapping;
+
+#[test]
+fn to_primitive_float() {
+ let f32_toolarge = 1e39f64;
+ assert_eq!(f32_toolarge.to_f32(), Some(f32::INFINITY));
+ assert_eq!((-f32_toolarge).to_f32(), Some(f32::NEG_INFINITY));
+ assert_eq!((f32::MAX as f64).to_f32(), Some(f32::MAX));
+ assert_eq!((-f32::MAX as f64).to_f32(), Some(-f32::MAX));
+ assert_eq!(f64::INFINITY.to_f32(), Some(f32::INFINITY));
+ assert_eq!((f64::NEG_INFINITY).to_f32(), Some(f32::NEG_INFINITY));
+ assert!((f64::NAN).to_f32().map_or(false, |f| f.is_nan()));
+}
+
+#[test]
+fn wrapping_to_primitive() {
+ macro_rules! test_wrapping_to_primitive {
+ ($($t:ty)+) => {
+ $({
+ let i: $t = 0;
+ let w = Wrapping(i);
+ assert_eq!(i.to_u8(), w.to_u8());
+ assert_eq!(i.to_u16(), w.to_u16());
+ assert_eq!(i.to_u32(), w.to_u32());
+ assert_eq!(i.to_u64(), w.to_u64());
+ assert_eq!(i.to_usize(), w.to_usize());
+ assert_eq!(i.to_i8(), w.to_i8());
+ assert_eq!(i.to_i16(), w.to_i16());
+ assert_eq!(i.to_i32(), w.to_i32());
+ assert_eq!(i.to_i64(), w.to_i64());
+ assert_eq!(i.to_isize(), w.to_isize());
+ assert_eq!(i.to_f32(), w.to_f32());
+ assert_eq!(i.to_f64(), w.to_f64());
+ })+
+ };
+ }
+
+ test_wrapping_to_primitive!(usize u8 u16 u32 u64 isize i8 i16 i32 i64);
+}
+
+#[test]
+fn wrapping_is_toprimitive() {
+ fn require_toprimitive<T: ToPrimitive>(_: &T) {}
+ require_toprimitive(&Wrapping(42));
+}
+
+#[test]
+fn wrapping_is_fromprimitive() {
+ fn require_fromprimitive<T: FromPrimitive>(_: &T) {}
+ require_fromprimitive(&Wrapping(42));
+}
+
+#[test]
+fn wrapping_is_numcast() {
+ fn require_numcast<T: NumCast>(_: &T) {}
+ require_numcast(&Wrapping(42));
+}
+
+#[test]
+fn as_primitive() {
+ let x: f32 = (1.625f64).as_();
+ assert_eq!(x, 1.625f32);
+
+ let x: f32 = (3.14159265358979323846f64).as_();
+ assert_eq!(x, 3.1415927f32);
+
+ let x: u8 = (768i16).as_();
+ assert_eq!(x, 0);
+}
+
+#[test]
+fn float_to_integer_checks_overflow() {
+ // This will overflow an i32
+ let source: f64 = 1.0e+123f64;
+
+ // Expect the overflow to be caught
+ assert_eq!(cast::<f64, i32>(source), None);
+}
+
+#[test]
+fn cast_to_int_checks_overflow() {
+ let big_f: f64 = 1.0e123;
+ let normal_f: f64 = 1.0;
+ let small_f: f64 = -1.0e123;
+ assert_eq!(None, cast::<f64, isize>(big_f));
+ assert_eq!(None, cast::<f64, i8>(big_f));
+ assert_eq!(None, cast::<f64, i16>(big_f));
+ assert_eq!(None, cast::<f64, i32>(big_f));
+ assert_eq!(None, cast::<f64, i64>(big_f));
+
+ assert_eq!(Some(normal_f as isize), cast::<f64, isize>(normal_f));
+ assert_eq!(Some(normal_f as i8), cast::<f64, i8>(normal_f));
+ assert_eq!(Some(normal_f as i16), cast::<f64, i16>(normal_f));
+ assert_eq!(Some(normal_f as i32), cast::<f64, i32>(normal_f));
+ assert_eq!(Some(normal_f as i64), cast::<f64, i64>(normal_f));
+
+ assert_eq!(None, cast::<f64, isize>(small_f));
+ assert_eq!(None, cast::<f64, i8>(small_f));
+ assert_eq!(None, cast::<f64, i16>(small_f));
+ assert_eq!(None, cast::<f64, i32>(small_f));
+ assert_eq!(None, cast::<f64, i64>(small_f));
+}
+
+#[test]
+fn cast_to_unsigned_int_checks_overflow() {
+ let big_f: f64 = 1.0e123;
+ let normal_f: f64 = 1.0;
+ let small_f: f64 = -1.0e123;
+ assert_eq!(None, cast::<f64, usize>(big_f));
+ assert_eq!(None, cast::<f64, u8>(big_f));
+ assert_eq!(None, cast::<f64, u16>(big_f));
+ assert_eq!(None, cast::<f64, u32>(big_f));
+ assert_eq!(None, cast::<f64, u64>(big_f));
+
+ assert_eq!(Some(normal_f as usize), cast::<f64, usize>(normal_f));
+ assert_eq!(Some(normal_f as u8), cast::<f64, u8>(normal_f));
+ assert_eq!(Some(normal_f as u16), cast::<f64, u16>(normal_f));
+ assert_eq!(Some(normal_f as u32), cast::<f64, u32>(normal_f));
+ assert_eq!(Some(normal_f as u64), cast::<f64, u64>(normal_f));
+
+ assert_eq!(None, cast::<f64, usize>(small_f));
+ assert_eq!(None, cast::<f64, u8>(small_f));
+ assert_eq!(None, cast::<f64, u16>(small_f));
+ assert_eq!(None, cast::<f64, u32>(small_f));
+ assert_eq!(None, cast::<f64, u64>(small_f));
+}
+
+#[test]
+fn cast_to_i128_checks_overflow() {
+ let big_f: f64 = 1.0e123;
+ let normal_f: f64 = 1.0;
+ let small_f: f64 = -1.0e123;
+ assert_eq!(None, cast::<f64, i128>(big_f));
+ assert_eq!(None, cast::<f64, u128>(big_f));
+
+ assert_eq!(Some(normal_f as i128), cast::<f64, i128>(normal_f));
+ assert_eq!(Some(normal_f as u128), cast::<f64, u128>(normal_f));
+
+ assert_eq!(None, cast::<f64, i128>(small_f));
+ assert_eq!(None, cast::<f64, u128>(small_f));
+}
+
+#[cfg(feature = "std")]
+fn dbg(args: ::core::fmt::Arguments<'_>) {
+ println!("{}", args);
+}
+
+#[cfg(not(feature = "std"))]
+fn dbg(_: ::core::fmt::Arguments) {}
+
+// Rust 1.8 doesn't handle cfg on macros correctly
+macro_rules! dbg { ($($tok:tt)*) => { dbg(format_args!($($tok)*)) } }
+
+macro_rules! float_test_edge {
+ ($f:ident -> $($t:ident)+) => { $({
+ dbg!("testing cast edge cases for {} -> {}", stringify!($f), stringify!($t));
+
+ let small = if $t::MIN == 0 || mem::size_of::<$t>() < mem::size_of::<$f>() {
+ $t::MIN as $f - 1.0
+ } else {
+ ($t::MIN as $f).raw_inc().floor()
+ };
+ let fmin = small.raw_dec();
+ dbg!(" testing min {}\n\tvs. {:.0}\n\tand {:.0}", $t::MIN, fmin, small);
+ assert_eq!(Some($t::MIN), cast::<$f, $t>($t::MIN as $f));
+ assert_eq!(Some($t::MIN), cast::<$f, $t>(fmin));
+ assert_eq!(None, cast::<$f, $t>(small));
+
+ let (max, large) = if mem::size_of::<$t>() < mem::size_of::<$f>() {
+ ($t::MAX, $t::MAX as $f + 1.0)
+ } else {
+ let large = $t::MAX as $f; // rounds up!
+ let max = large.raw_dec() as $t; // the next smallest possible
+ assert_eq!(max.count_ones(), $f::MANTISSA_DIGITS);
+ (max, large)
+ };
+ let fmax = large.raw_dec();
+ dbg!(" testing max {}\n\tvs. {:.0}\n\tand {:.0}", max, fmax, large);
+ assert_eq!(Some(max), cast::<$f, $t>(max as $f));
+ assert_eq!(Some(max), cast::<$f, $t>(fmax));
+ assert_eq!(None, cast::<$f, $t>(large));
+
+ dbg!(" testing non-finite values");
+ assert_eq!(None, cast::<$f, $t>($f::NAN));
+ assert_eq!(None, cast::<$f, $t>($f::INFINITY));
+ assert_eq!(None, cast::<$f, $t>($f::NEG_INFINITY));
+ })+}
+}
+
+trait RawOffset: Sized {
+ fn raw_inc(self) -> Self;
+ fn raw_dec(self) -> Self;
+}
+
+impl RawOffset for f32 {
+ fn raw_inc(self) -> Self {
+ Self::from_bits(self.to_bits() + 1)
+ }
+
+ fn raw_dec(self) -> Self {
+ Self::from_bits(self.to_bits() - 1)
+ }
+}
+
+impl RawOffset for f64 {
+ fn raw_inc(self) -> Self {
+ Self::from_bits(self.to_bits() + 1)
+ }
+
+ fn raw_dec(self) -> Self {
+ Self::from_bits(self.to_bits() - 1)
+ }
+}
+
+#[test]
+fn cast_float_to_int_edge_cases() {
+ float_test_edge!(f32 -> isize i8 i16 i32 i64);
+ float_test_edge!(f32 -> usize u8 u16 u32 u64);
+ float_test_edge!(f64 -> isize i8 i16 i32 i64);
+ float_test_edge!(f64 -> usize u8 u16 u32 u64);
+}
+
+#[test]
+fn cast_float_to_i128_edge_cases() {
+ float_test_edge!(f32 -> i128 u128);
+ float_test_edge!(f64 -> i128 u128);
+}
+
+macro_rules! int_test_edge {
+ ($f:ident -> { $($t:ident)+ } with $BigS:ident $BigU:ident ) => { $({
+ #[allow(arithmetic_overflow)] // https://github.com/rust-lang/rust/issues/109731
+ fn test_edge() {
+ dbg!("testing cast edge cases for {} -> {}", stringify!($f), stringify!($t));
+
+ match ($f::MIN as $BigS).cmp(&($t::MIN as $BigS)) {
+ Greater => {
+ assert_eq!(Some($f::MIN as $t), cast::<$f, $t>($f::MIN));
+ }
+ Equal => {
+ assert_eq!(Some($t::MIN), cast::<$f, $t>($f::MIN));
+ }
+ Less => {
+ let min = $t::MIN as $f;
+ assert_eq!(Some($t::MIN), cast::<$f, $t>(min));
+ assert_eq!(None, cast::<$f, $t>(min - 1));
+ }
+ }
+
+ match ($f::MAX as $BigU).cmp(&($t::MAX as $BigU)) {
+ Greater => {
+ let max = $t::MAX as $f;
+ assert_eq!(Some($t::MAX), cast::<$f, $t>(max));
+ assert_eq!(None, cast::<$f, $t>(max + 1));
+ }
+ Equal => {
+ assert_eq!(Some($t::MAX), cast::<$f, $t>($f::MAX));
+ }
+ Less => {
+ assert_eq!(Some($f::MAX as $t), cast::<$f, $t>($f::MAX));
+ }
+ }
+ }
+ test_edge();
+ })+}
+}
+
+#[test]
+fn cast_int_to_int_edge_cases() {
+ use core::cmp::Ordering::*;
+
+ macro_rules! test_edge {
+ ($( $from:ident )+) => { $({
+ int_test_edge!($from -> { isize i8 i16 i32 i64 } with i64 u64);
+ int_test_edge!($from -> { usize u8 u16 u32 u64 } with i64 u64);
+ })+}
+ }
+
+ test_edge!(isize i8 i16 i32 i64);
+ test_edge!(usize u8 u16 u32 u64);
+}
+
+#[test]
+fn cast_int_to_128_edge_cases() {
+ use core::cmp::Ordering::*;
+
+ macro_rules! test_edge {
+ ($( $t:ident )+) => {
+ $(
+ int_test_edge!($t -> { i128 u128 } with i128 u128);
+ )+
+ int_test_edge!(i128 -> { $( $t )+ } with i128 u128);
+ int_test_edge!(u128 -> { $( $t )+ } with i128 u128);
+ }
+ }
+
+ test_edge!(isize i8 i16 i32 i64 i128);
+ test_edge!(usize u8 u16 u32 u64 u128);
+}
+
+#[test]
+fn newtype_from_primitive() {
+ #[derive(PartialEq, Debug)]
+ struct New<T>(T);
+
+ // minimal impl
+ impl<T: FromPrimitive> FromPrimitive for New<T> {
+ fn from_i64(n: i64) -> Option<Self> {
+ T::from_i64(n).map(New)
+ }
+
+ fn from_u64(n: u64) -> Option<Self> {
+ T::from_u64(n).map(New)
+ }
+ }
+
+ macro_rules! assert_eq_from {
+ ($( $from:ident )+) => {$(
+ assert_eq!(T::$from(Bounded::min_value()).map(New),
+ New::<T>::$from(Bounded::min_value()));
+ assert_eq!(T::$from(Bounded::max_value()).map(New),
+ New::<T>::$from(Bounded::max_value()));
+ )+}
+ }
+
+ fn check<T: PartialEq + Debug + FromPrimitive>() {
+ assert_eq_from!(from_i8 from_i16 from_i32 from_i64 from_isize);
+ assert_eq_from!(from_u8 from_u16 from_u32 from_u64 from_usize);
+ assert_eq_from!(from_f32 from_f64);
+ }
+
+ macro_rules! check {
+ ($( $ty:ty )+) => {$( check::<$ty>(); )+}
+ }
+ check!(i8 i16 i32 i64 isize);
+ check!(u8 u16 u32 u64 usize);
+}
+
+#[test]
+fn newtype_to_primitive() {
+ #[derive(PartialEq, Debug)]
+ struct New<T>(T);
+
+ // minimal impl
+ impl<T: ToPrimitive> ToPrimitive for New<T> {
+ fn to_i64(&self) -> Option<i64> {
+ self.0.to_i64()
+ }
+
+ fn to_u64(&self) -> Option<u64> {
+ self.0.to_u64()
+ }
+ }
+
+ macro_rules! assert_eq_to {
+ ($( $to:ident )+) => {$(
+ assert_eq!(T::$to(&Bounded::min_value()),
+ New::<T>::$to(&New(Bounded::min_value())));
+ assert_eq!(T::$to(&Bounded::max_value()),
+ New::<T>::$to(&New(Bounded::max_value())));
+ )+}
+ }
+
+ fn check<T: PartialEq + Debug + Bounded + ToPrimitive>() {
+ assert_eq_to!(to_i8 to_i16 to_i32 to_i64 to_isize);
+ assert_eq_to!(to_u8 to_u16 to_u32 to_u64 to_usize);
+ assert_eq_to!(to_f32 to_f64);
+ }
+
+ macro_rules! check {
+ ($( $ty:ty )+) => {$( check::<$ty>(); )+}
+ }
+ check!(i8 i16 i32 i64 isize);
+ check!(u8 u16 u32 u64 usize);
+}
diff --git a/rust/vendor/num/.cargo-checksum.json b/rust/vendor/num/.cargo-checksum.json
new file mode 100644
index 0000000..b9fb13b
--- /dev/null
+++ b/rust/vendor/num/.cargo-checksum.json
@@ -0,0 +1 @@
+{"files":{"Cargo.toml":"4ce08f1624b297eecf05340b4fe1c0ae53d0d0e30287ebf20b677379e7722b98","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"6485b8ed310d3f0340bf1ad1f47645069ce4069dcc6bb46c7d5c6faf41de1fdb","README.md":"6df9fdf7ad297e5bc448129b55ae76169f89f8d04aada688b6f82a16fe3399b0","RELEASES.md":"40bc428700aa1a6b614ec572ebd74ec05a7419759675163d14d301a2f825a3e8","src/lib.rs":"64843c8ed75f4940e70f45bfd8d67510b565c39dc0455729c13ced11deb71410"},"package":"b8536030f9fea7127f841b45bb6243b27255787fb4eb83958aa1ef9d2fdc0c36"} \ No newline at end of file
diff --git a/rust/vendor/num/Cargo.toml b/rust/vendor/num/Cargo.toml
new file mode 100644
index 0000000..f496f25
--- /dev/null
+++ b/rust/vendor/num/Cargo.toml
@@ -0,0 +1,62 @@
+# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO
+#
+# When uploading crates to the registry Cargo will automatically
+# "normalize" Cargo.toml files for maximal compatibility
+# with all versions of Cargo and also rewrite `path` dependencies
+# to registry (e.g., crates.io) dependencies
+#
+# If you believe there's an error in this file please file an
+# issue against the rust-lang/cargo repository. If you're
+# editing this file be aware that the upstream Cargo.toml
+# will likely look very different (and much more reasonable)
+
+[package]
+name = "num"
+version = "0.2.1"
+authors = ["The Rust Project Developers"]
+exclude = ["/ci/*", "/.travis.yml", "/bors.toml"]
+description = "A collection of numeric types and traits for Rust, including bigint,\ncomplex, rational, range iterators, generic integers, and more!\n"
+homepage = "https://github.com/rust-num/num"
+documentation = "https://docs.rs/num"
+readme = "README.md"
+keywords = ["mathematics", "numerics", "bignum"]
+categories = ["algorithms", "data-structures", "science", "no-std"]
+license = "MIT/Apache-2.0"
+repository = "https://github.com/rust-num/num"
+[package.metadata.docs.rs]
+features = ["std", "serde", "rand"]
+[dependencies.num-bigint]
+version = "0.2.5"
+optional = true
+default-features = false
+
+[dependencies.num-complex]
+version = "0.2.4"
+default-features = false
+
+[dependencies.num-integer]
+version = "0.1.42"
+default-features = false
+
+[dependencies.num-iter]
+version = "0.1.40"
+default-features = false
+
+[dependencies.num-rational]
+version = "0.2.3"
+default-features = false
+
+[dependencies.num-traits]
+version = "0.2.11"
+default-features = false
+
+[dev-dependencies]
+
+[features]
+default = ["std"]
+i128 = ["num-bigint/i128", "num-complex/i128", "num-integer/i128", "num-iter/i128", "num-rational/i128", "num-traits/i128"]
+rand = ["num-bigint/rand", "num-complex/rand"]
+serde = ["num-bigint/serde", "num-complex/serde", "num-rational/serde"]
+std = ["num-bigint/std", "num-complex/std", "num-integer/std", "num-iter/std", "num-rational/std", "num-rational/bigint", "num-traits/std"]
+[badges.travis-ci]
+repository = "rust-num/num"
diff --git a/rust/vendor/num/LICENSE-APACHE b/rust/vendor/num/LICENSE-APACHE
new file mode 100644
index 0000000..16fe87b
--- /dev/null
+++ b/rust/vendor/num/LICENSE-APACHE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+END OF TERMS AND CONDITIONS
+
+APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+Copyright [yyyy] [name of copyright owner]
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
diff --git a/rust/vendor/num/LICENSE-MIT b/rust/vendor/num/LICENSE-MIT
new file mode 100644
index 0000000..39d4bdb
--- /dev/null
+++ b/rust/vendor/num/LICENSE-MIT
@@ -0,0 +1,25 @@
+Copyright (c) 2014 The Rust Project Developers
+
+Permission is hereby granted, free of charge, to any
+person obtaining a copy of this software and associated
+documentation files (the "Software"), to deal in the
+Software without restriction, including without
+limitation the rights to use, copy, modify, merge,
+publish, distribute, sublicense, and/or sell copies of
+the Software, and to permit persons to whom the Software
+is furnished to do so, subject to the following
+conditions:
+
+The above copyright notice and this permission notice
+shall be included in all copies or substantial portions
+of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
+ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
+TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
+PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
+IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+DEALINGS IN THE SOFTWARE.
diff --git a/rust/vendor/num/README.md b/rust/vendor/num/README.md
new file mode 100644
index 0000000..6051ecc
--- /dev/null
+++ b/rust/vendor/num/README.md
@@ -0,0 +1,124 @@
+# num
+
+[![crate](https://img.shields.io/crates/v/num.svg)](https://crates.io/crates/num)
+[![documentation](https://docs.rs/num/badge.svg)](https://docs.rs/num)
+![minimum rustc 1.15](https://img.shields.io/badge/rustc-1.15+-red.svg)
+[![Travis status](https://travis-ci.org/rust-num/num.svg?branch=master)](https://travis-ci.org/rust-num/num)
+
+A collection of numeric types and traits for Rust.
+
+This includes new types for big integers, rationals (aka fractions), and complex numbers,
+new traits for generic programming on numeric properties like `Integer`,
+and generic range iterators.
+
+`num` is a meta-crate, re-exporting items from these sub-crates:
+
+| Repository | Crate | Documentation |
+| ---------- | ----- | ------------- |
+| [`num-bigint`] | [![crate][bigint-cb]][bigint-c] | [![documentation][bigint-db]][bigint-d]
+| [`num-complex`] | [![crate][complex-cb]][complex-c] | [![documentation][complex-db]][complex-d]
+| [`num-integer`] | [![crate][integer-cb]][integer-c] | [![documentation][integer-db]][integer-d]
+| [`num-iter`] | [![crate][iter-cb]][iter-c] | [![documentation][iter-db]][iter-d]
+| [`num-rational`] | [![crate][rational-cb]][rational-c] | [![documentation][rational-db]][rational-d]
+| [`num-traits`] | [![crate][traits-cb]][traits-c] | [![documentation][traits-db]][traits-d]
+| ([`num-derive`]) | [![crate][derive-cb]][derive-c] | [![documentation][derive-db]][derive-d]
+
+Note: `num-derive` is listed here for reference, but it's not directly included
+in `num`. This is a `proc-macro` crate for deriving some of `num`'s traits.
+
+## Usage
+
+Add this to your `Cargo.toml`:
+
+```toml
+[dependencies]
+num = "0.2"
+```
+
+and this to your crate root:
+
+```rust
+extern crate num;
+```
+
+## Features
+
+This crate can be used without the standard library (`#![no_std]`) by disabling
+the default `std` feature. Use this in `Cargo.toml`:
+
+```toml
+[dependencies.num]
+version = "0.2"
+default-features = false
+```
+
+The `num-bigint` crate is only available when `std` is enabled, and the other
+sub-crates may have limited functionality when used without `std`.
+
+Implementations for `i128` and `u128` are only available with Rust 1.26 and
+later. The build script automatically detects this, but you can make it
+mandatory by enabling the `i128` crate feature.
+
+The `rand` feature enables randomization traits in `num-bigint` and
+`num-complex`.
+
+The `serde` feature enables serialization for types in `num-bigint`,
+`num-complex`, and `num-rational`.
+
+The `num` meta-crate no longer supports features to toggle the inclusion of
+the individual sub-crates. If you need such control, you are recommended to
+directly depend on your required crates instead.
+
+## Releases
+
+Release notes are available in [RELEASES.md](RELEASES.md).
+
+## Compatibility
+
+The `num` crate as a whole is tested for rustc 1.15 and greater.
+
+The `num-traits`, `num-integer`, and `num-iter` crates are individually tested
+for rustc 1.8 and greater, if you require such older compatibility.
+
+
+[`num-bigint`]: https://github.com/rust-num/num-bigint
+[bigint-c]: https://crates.io/crates/num-bigint
+[bigint-cb]: https://img.shields.io/crates/v/num-bigint.svg
+[bigint-d]: https://docs.rs/num-bigint/
+[bigint-db]: https://docs.rs/num-bigint/badge.svg
+
+[`num-complex`]: https://github.com/rust-num/num-complex
+[complex-c]: https://crates.io/crates/num-complex
+[complex-cb]: https://img.shields.io/crates/v/num-complex.svg
+[complex-d]: https://docs.rs/num-complex/
+[complex-db]: https://docs.rs/num-complex/badge.svg
+
+[`num-derive`]: https://github.com/rust-num/num-derive
+[derive-c]: https://crates.io/crates/num-derive
+[derive-cb]: https://img.shields.io/crates/v/num-derive.svg
+[derive-d]: https://docs.rs/num-derive/
+[derive-db]: https://docs.rs/num-derive/badge.svg
+
+[`num-integer`]: https://github.com/rust-num/num-integer
+[integer-c]: https://crates.io/crates/num-integer
+[integer-cb]: https://img.shields.io/crates/v/num-integer.svg
+[integer-d]: https://docs.rs/num-integer/
+[integer-db]: https://docs.rs/num-integer/badge.svg
+
+[`num-iter`]: https://github.com/rust-num/num-iter
+[iter-c]: https://crates.io/crates/num-iter
+[iter-cb]: https://img.shields.io/crates/v/num-iter.svg
+[iter-d]: https://docs.rs/num-iter/
+[iter-db]: https://docs.rs/num-iter/badge.svg
+
+[`num-rational`]: https://github.com/rust-num/num-rational
+[rational-c]: https://crates.io/crates/num-rational
+[rational-cb]: https://img.shields.io/crates/v/num-rational.svg
+[rational-d]: https://docs.rs/num-rational/
+[rational-db]: https://docs.rs/num-rational/badge.svg
+
+[`num-traits`]: https://github.com/rust-num/num-traits
+[traits-c]: https://crates.io/crates/num-traits
+[traits-cb]: https://img.shields.io/crates/v/num-traits.svg
+[traits-d]: https://docs.rs/num-traits/
+[traits-db]: https://docs.rs/num-traits/badge.svg
diff --git a/rust/vendor/num/RELEASES.md b/rust/vendor/num/RELEASES.md
new file mode 100644
index 0000000..e605ace
--- /dev/null
+++ b/rust/vendor/num/RELEASES.md
@@ -0,0 +1,62 @@
+# Release 0.2.1 (2019-01-09)
+
+- Updated all sub-crates to their latest versions.
+
+**Contributors**: @cuviper, @ignatenkobrain, @jimbo1qaz
+
+# Release 0.2.0 (2018-06-29)
+
+All items exported from `num-integer`, `num-iter`, and `num-traits` are still
+semver-compatible with those exported by `num` 0.1. If you have these as public
+dependencies in your own crates, it is not a breaking change to move to `num`
+0.2. However, this is not true of `num-bigint`, `num-complex`, or
+`num-rational`, as those exported items are distinct in this release.
+
+A few common changes are listed below, but most of the development happens in
+the individual sub-crates. Please consult their release notes for more details
+about recent changes:
+[`num-bigint`](https://github.com/rust-num/num-bigint/blob/master/RELEASES.md),
+[`num-complex`](https://github.com/rust-num/num-complex/blob/master/RELEASES.md),
+[`num-integer`](https://github.com/rust-num/num-integer/blob/master/RELEASES.md),
+[`num-iter`](https://github.com/rust-num/num-iter/blob/master/RELEASES.md),
+[`num-rational`](https://github.com/rust-num/num-rational/blob/master/RELEASES.md),
+and [`num-traits`](https://github.com/rust-num/num-traits/blob/master/RELEASES.md).
+
+### Enhancements
+
+- Updates to `num-integer`, `num-iter`, and `num-traits` are still compatible
+ with `num` 0.1.
+- 128-bit integers are supported with Rust 1.26 and later.
+- `BigInt`, `BigUint`, `Complex`, and `Ratio` all implement `Sum` and `Product`.
+
+### Breaking Changes
+
+- `num` now requires rustc 1.15 or greater.
+- `num-bigint`, `num-complex`, and `num-rational` have all been updated to 0.2.
+- It's no longer possible to toggle individual `num-*` sub-crates using cargo
+ features. If you need that control, please use those crates directly.
+- There is now a `std` feature, enabled by default, along with the implication
+ that building *without* this feature makes this a `#![no_std]` crate.
+ `num::bigint` is not available without `std`, and the other sub-crates may
+ have limited functionality.
+- The `serde` dependency has been updated to 1.0, still disabled by default.
+ The `rustc-serialize` crate is no longer supported by `num`.
+- The `rand` dependency has been updated to 0.5, now disabled by default. This
+ requires rustc 1.22 or greater for `rand`'s own requirement.
+
+**Contributors**: @CAD97, @cuviper, and the many sub-crate contributors!
+
+# Release 0.1.42 (2018-02-08)
+
+- [All of the num sub-crates now have their own source repositories][num-356].
+- Updated num sub-crates to their latest versions.
+
+**Contributors**: @cuviper
+
+[num-356]: https://github.com/rust-num/num/pull/356
+
+
+# Prior releases
+
+No prior release notes were kept. Thanks all the same to the many
+contributors that have made this crate what it is!
diff --git a/rust/vendor/num/src/lib.rs b/rust/vendor/num/src/lib.rs
new file mode 100644
index 0000000..5754139
--- /dev/null
+++ b/rust/vendor/num/src/lib.rs
@@ -0,0 +1,115 @@
+// Copyright 2014-2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+//! A collection of numeric types and traits for Rust.
+//!
+//! This includes new types for big integers, rationals, and complex numbers,
+//! new traits for generic programming on numeric properties like `Integer`,
+//! and generic range iterators.
+//!
+//! ## Example
+//!
+//! This example uses the BigRational type and [Newton's method][newt] to
+//! approximate a square root to arbitrary precision:
+//!
+//! ```
+//! extern crate num;
+//! # #[cfg(feature = "std")]
+//! # mod test {
+//!
+//! use num::FromPrimitive;
+//! use num::bigint::BigInt;
+//! use num::rational::{Ratio, BigRational};
+//!
+//! # pub
+//! fn approx_sqrt(number: u64, iterations: usize) -> BigRational {
+//! let start: Ratio<BigInt> = Ratio::from_integer(FromPrimitive::from_u64(number).unwrap());
+//! let mut approx = start.clone();
+//!
+//! for _ in 0..iterations {
+//! approx = (&approx + (&start / &approx)) /
+//! Ratio::from_integer(FromPrimitive::from_u64(2).unwrap());
+//! }
+//!
+//! approx
+//! }
+//! # }
+//! # #[cfg(not(feature = "std"))]
+//! # mod test { pub fn approx_sqrt(n: u64, _: usize) -> u64 { n } }
+//! # use test::approx_sqrt;
+//!
+//! fn main() {
+//! println!("{}", approx_sqrt(10, 4)); // prints 4057691201/1283082416
+//! }
+//!
+//! ```
+//!
+//! [newt]: https://en.wikipedia.org/wiki/Methods_of_computing_square_roots#Babylonian_method
+//!
+//! ## Compatibility
+//!
+//! The `num` crate is tested for rustc 1.15 and greater.
+
+#![doc(html_root_url = "https://docs.rs/num/0.2")]
+#![no_std]
+
+#[cfg(feature = "std")]
+extern crate num_bigint;
+extern crate num_complex;
+extern crate num_integer;
+extern crate num_iter;
+extern crate num_rational;
+extern crate num_traits;
+
+#[cfg(feature = "std")]
+pub use num_bigint::{BigInt, BigUint};
+
+pub use num_complex::Complex;
+
+#[cfg(feature = "std")]
+pub use num_rational::BigRational;
+pub use num_rational::Rational;
+
+pub use num_integer::Integer;
+
+pub use num_iter::{range, range_inclusive, range_step, range_step_inclusive};
+
+#[cfg(feature = "std")]
+pub use num_traits::Float;
+pub use num_traits::{
+ abs, abs_sub, cast, checked_pow, clamp, one, pow, signum, zero, Bounded, CheckedAdd,
+ CheckedDiv, CheckedMul, CheckedSub, FromPrimitive, Num, NumCast, One, PrimInt, Saturating,
+ Signed, ToPrimitive, Unsigned, Zero,
+};
+
+#[cfg(feature = "std")]
+pub mod bigint {
+ pub use num_bigint::*;
+}
+
+pub mod complex {
+ pub use num_complex::*;
+}
+
+pub mod integer {
+ pub use num_integer::*;
+}
+
+pub mod iter {
+ pub use num_iter::*;
+}
+
+pub mod traits {
+ pub use num_traits::*;
+}
+
+pub mod rational {
+ pub use num_rational::*;
+}
diff --git a/rust/vendor/num_enum/.cargo-checksum.json b/rust/vendor/num_enum/.cargo-checksum.json
new file mode 100644
index 0000000..e4f33bd
--- /dev/null
+++ b/rust/vendor/num_enum/.cargo-checksum.json
@@ -0,0 +1 @@
+{"files":{"Cargo.toml":"a94ee72ec847b1dc690756e5901db879a2e48aa3e51474145b94753c8b0e184f","LICENSE-APACHE":"62c7a1e35f56406896d7aa7ca52d0cc0d272ac022b5d2796e7d6905db8a3636a","LICENSE-BSD":"0be96d891d00e0ae0df75d7f3289b12871c000a1f5ac744f3b570768d4bb277c","LICENSE-MIT":"23f18e03dc49df91622fe2a76176497404e46ced8a715d9d2b67a7446571cca3","README.md":"808954bb7af3a919b66c792ccef12e31b98070e78e2a83cde665d0e485cb1a11","src/lib.rs":"945470cdb3af273f9e7c3a7800acd3b704f8e5062e650d380ddcad1f8801c144","tests/default.rs":"b53160eca69d72bde6f0b696545c4b218710c3e83cf3a03b218daf9397c50477","tests/from_primitive.rs":"cc336ff4f0072ddf42622fe2fd2bb31746a9333a5f834be498d781f165e39e9b","tests/into_primitive.rs":"1e1549846724f4c173c35d7541500ec9fbdb887554a4e31a7b1bbc87cd7c5f7a","tests/renamed_num_enum.rs":"048e7d5d2b0aaf27eea8372873aa9bd8576759ae1fb363fac7ea662f86ab24b2","tests/try_build.rs":"404e41d74f3649c81c7186f12fafb85d9f1f358737d19e58ccb77a72004fa41f","tests/try_build/compile_fail/alternative_clashes_with_its_discriminant.rs":"18ef4be2bd64fe96f07495a998275e987b1e2062e1e13ec51037b6e9caecdd88","tests/try_build/compile_fail/alternative_clashes_with_its_discriminant.stderr":"e265d10a002188c21e474a453e4667369922356eed48ec5928dcad12442cfe26","tests/try_build/compile_fail/alternative_clashes_with_variant.rs":"f74b83e4cea5771c5ced58e88d619d78717d8907b85ef5f93b0dc6e8b2ff1d9a","tests/try_build/compile_fail/alternative_clashes_with_variant.stderr":"674d0ec2f56810eb35c93b2d5a1d3dbe7d4bec7bebed246cdb34a422a7257400","tests/try_build/compile_fail/alternative_clashes_with_variant_out_of_order.rs":"cbadc1ec15ad48f745b3df5dfc7c611b6a0f7cd952bf7b2922b4454e612891a5","tests/try_build/compile_fail/alternative_clashes_with_variant_out_of_order.stderr":"c3dcd5d4954a63446fa6618af4da5fe1a0b7f4d72de52141e3a2611ad91ff095","tests/try_build/compile_fail/alternative_exprs.rs":"3e43280d93bdafb25153cebea387e1b7c3bbde64a7d5f01ef6faba1a6b1e3a88","tests/try_build/compile_fail/alternative_exprs.stderr":"de738c1101d63a88f6f0641aa051e3e2ef88a09196e6051b9bc9ca985260bd0b","tests/try_build/compile_fail/catch_all_multiple_fields.rs":"2bc1be42a64216ab5a9b3e1e75f70f6c21c566f1608061017431baa7a8809a2e","tests/try_build/compile_fail/catch_all_multiple_fields.stderr":"c91aa50d65874296162d9fedaf6627aad9029fe7d4ce10e6d17ea8224d684966","tests/try_build/compile_fail/catch_all_non_tuple.rs":"69fa21f801ba8ad6ff50456ed521c864d5d863652a16fb68b7aa7f3a55f846ca","tests/try_build/compile_fail/catch_all_non_tuple.stderr":"b848dbe45d0c25b5a724c6baa9ca9489ab1565551fca5957a29464b64cc50f47","tests/try_build/compile_fail/catch_all_type_mismatch.rs":"0a259706b24ab70c9534e54a575c936d51a91d937ebe7f127bedc0930e548ca7","tests/try_build/compile_fail/catch_all_type_mismatch.stderr":"d6c712192bb2c1220bfc9b2a6fd3b7aaf7b482061e9deb3e32f4991f6f472846","tests/try_build/compile_fail/conflicting_default.rs":"22f877dbfbe9d2eca01f02a18f8bd613d9765ff3f67bf5b7639b87df86149614","tests/try_build/compile_fail/conflicting_default.stderr":"de0b7378529a796de31c72d6c055f15679add8684d3cf0be2c4f2b7c07655f73","tests/try_build/compile_fail/conflicting_derive.rs":"f9d8f6e90713e00e8a1188673822d29e919d9ea11c1dde0750223f3c81467fa7","tests/try_build/compile_fail/conflicting_derive.stderr":"62156bf148e699269893e015e6c4fe1947a9f7997f0f7ee78d58c0010fc833a7","tests/try_build/compile_fail/default_and_catch_all.rs":"7b5a419805e268a1e9a78286568d2c73036593aaf7a36bb416cecfc15b0fbd3b","tests/try_build/compile_fail/default_and_catch_all.stderr":"743a1254ffe08555eb822aa8238770d0460b16eb1db988b445163de12710adcc","tests/try_build/compile_fail/default_and_catch_all_alt.rs":"66a8bd540df4007f5eff77cf12127d93f3084aef2b187bf4489dfef36600a7cc","tests/try_build/compile_fail/default_and_catch_all_alt.stderr":"9ea1418d2b5e24d20e202e107a25395693b164d7d9f98d2bf9260711c15527ad","tests/try_build/compile_fail/default_and_catch_all_same_variant.rs":"79891f3d3413daede3bd72eff91dc829d0b26ecfbcb457c0cab45967262d1ab9","tests/try_build/compile_fail/default_and_catch_all_same_variant.stderr":"2cad1a90151db938d72100b00dad9362ddbaf47fd842df8552a5ff1006bfc58e","tests/try_build/compile_fail/default_and_catch_all_same_variant_alt.rs":"9d1f92365434d6f7d783f97bde44468dadf1365587439e756ccc7a37610ba241","tests/try_build/compile_fail/default_and_catch_all_same_variant_alt.stderr":"a62720a7490353b6c3df916e6e067491c7c5009499bfcb2b1f6d762d63e3570a","tests/try_build/compile_fail/features/!complex-expressions/alternate_exprs_with_range.rs":"c253b836587b653011f9db2d436f3ed80af6e642ddf2e96036eace507b192b46","tests/try_build/compile_fail/features/!complex-expressions/alternate_exprs_with_range.stderr":"95825a1a562e44255c12afcb71478abecb41ab72eeda7328fff4878dc5c7a4b9","tests/try_build/compile_fail/features/complex-expressions/alternate_exprs_range_missing_lower_bound.rs":"ea3002c67e89103ef67b070fce67ff35c7395f68ebd92d473dc84e082b1b6b7a","tests/try_build/compile_fail/features/complex-expressions/alternate_exprs_range_missing_lower_bound.stderr":"51fc97bb66585eadb27008d4055566fba36a8bd9a43f15be17b968509383e9d5","tests/try_build/compile_fail/features/complex-expressions/alternate_exprs_range_missing_upper_bound.rs":"de43c27ed1b8b65da19c422303e215bf68d27984325b1bdd8b097016982b1f6f","tests/try_build/compile_fail/features/complex-expressions/alternate_exprs_range_missing_upper_bound.stderr":"508df932d796e4f37249819642beecfb28b17570e4f9012dd447e20677d47359","tests/try_build/compile_fail/features/complex-expressions/alternate_exprs_range_nonlit_lower_bound.rs":"bcfeca85937ecd486a9c833d69ef942250a326ce414e4cf0be011899a20e4b2c","tests/try_build/compile_fail/features/complex-expressions/alternate_exprs_range_nonlit_lower_bound.stderr":"d3f124f5a3254fd7a4fa74f3ce71d584a71b361cd45672e2e2d20bc6f822d022","tests/try_build/compile_fail/features/complex-expressions/alternate_exprs_range_nonlit_upper_bound.rs":"47c910706f765e75183e7e9f78fbfff3620af577a7a25e879b533269b11ec401","tests/try_build/compile_fail/features/complex-expressions/alternate_exprs_range_nonlit_upper_bound.stderr":"ff01351f68e237623b6cdd2bc955ec6087faa9e52c56c35536d8b0d72f9343b0","tests/try_build/compile_fail/features/complex-expressions/alternate_exprs_range_swapped_bounds.rs":"b046c3e6a6c983e902275a72c5d94c489b3c62c84e54de4b7be6deee38a6ebba","tests/try_build/compile_fail/features/complex-expressions/alternate_exprs_range_swapped_bounds.stderr":"72e14558f1655e3181dcc1910ade4ddbf50dc9c501659a10a390e1637bf04625","tests/try_build/compile_fail/garbage_attribute.rs":"a8646a08f39da4446afa4f4bf04824ed3879a58272b3c3101cfcdd9c10e2aff3","tests/try_build/compile_fail/garbage_attribute.stderr":"bd94438fb24277c46670dfd677fe203ce7ed753ed6d8fe22ee6a1d0f238b9ccc","tests/try_build/compile_fail/missing_default.rs":"1553f559db4f546f59fbf372bf1453bc02b95510cb8d29ee4639f7257fffaedc","tests/try_build/compile_fail/missing_default.stderr":"cab89b8f30c413d03c87c639b7770682df727c6cf7c88ab89a8689d87bb388b0","tests/try_build/compile_fail/missing_repr.rs":"98803e70b384b92506aecf7a48d1ecfc881f9e26bf033a4aee21947831350083","tests/try_build/compile_fail/missing_repr.stderr":"5e442ac3a4ff2f58004000b9049d5760ef2c76814424a0f884475d6e1f35de41","tests/try_build/compile_fail/multiple_catch_all.rs":"63032bdb3788bd09dcc79105cc8a0f367c79a39c75cca8affaa336b4a00b79b1","tests/try_build/compile_fail/multiple_catch_all.stderr":"3f6219a7a61d2947cbe15b5d567ea516e6a40f18dbbb5b362efa10b06bf381d4","tests/try_build/compile_fail/multiple_catch_all_same_variant.rs":"450217f33c0557e28ee153f97b6d08551127d2e7061b36ff8e438a85c26f7d7e","tests/try_build/compile_fail/multiple_catch_all_same_variant.stderr":"b61e5ecc3010040f09170510c5a41c9afa40938020e38f34c0c35b12605b8412","tests/try_build/compile_fail/multiple_defaults.rs":"9da7df9f33ca260e50ae471a832a95f292c447fa9a49c8e2034fc08f23ae3de2","tests/try_build/compile_fail/multiple_defaults.stderr":"feae6ac0f9554229b7b16dcd0812d05ebbcee30c89ec88a9dc10b1191f8f4453","tests/try_build/compile_fail/multiple_defaults_different_kinds.rs":"c149c8ad4ebf56702282ae5f371f868b6d7ddb3059050ed9753e605b3f4516e7","tests/try_build/compile_fail/multiple_defaults_different_kinds.stderr":"2ca5d7e4ce8a4d9bb1f7fd03348a8fa53e17cee6ff69e5de1b3cc1bfdceb1aeb","tests/try_build/compile_fail/multiple_num_enum_defaults.rs":"6129a341e46f41065eccf204d8d341ace72351cbb8bc2598ad7f395825ec45a7","tests/try_build/compile_fail/multiple_num_enum_defaults.stderr":"a397a6e0b18e925fb72c72c0b5fc913cb925cb1b7a8656bb708ed47c5e21ae8f","tests/try_build/compile_fail/repr_c.rs":"cbab431a6525baa0f70d9ed8a2d58d4f57688a906706dfa5042bad3d80308639","tests/try_build/compile_fail/repr_c.stderr":"9f3a35a6efdbd4bae628361e0dc2400523ee94ea8777867f188ec7294a11372d","tests/try_build/compile_fail/unpexpected_alternatives.rs":"41d461d0026df7bd2aef69764a8af79ab67eef1aa3f52ed1fbcbaaeeded93e49","tests/try_build/compile_fail/unpexpected_alternatives.stderr":"b2837080027d72897981963e2d34948b3ac017ed83fbce9745a7098bf2947ddf","tests/try_build/compile_fail/unpexpected_default.rs":"ed780083c7003702a15aba9370987c194fd773f5bb485eccdef97651e71f911b","tests/try_build/compile_fail/unpexpected_default.stderr":"6d9c3c7b41d53513d2b68d91632740b24b309516cbe50e6e1b4df503bf5996cb","tests/try_build/compile_fail/variants_with_fields.rs":"3caf42df983145f29cf2f9e112038aed1ba5c76f09eb76c52074b5758c7f8f62","tests/try_build/compile_fail/variants_with_fields.stderr":"586b8a3fb6f3cab82acc9e7b966d3c0f5d6aa97bab2bb3bbd570b6074e43d976","tests/try_build/pass/default_and_alternatives.rs":"d554668fff375a84838e6e08642c2ac36090565828e88b69d407a211feb817de","tests/try_build/pass/exhaustive_enum_try_from.rs":"7dde59fae6f1b789c89798c7dc45582cbe8d90ed385cb272d855ec82508d20d3","tests/try_build/pass/exhaustive_enum_via_alternatives.rs":"a62b9271bd4022a69d283cd2fc81197d8f43c42663594493e5b3c11cabbdfbf1","tests/try_build/pass/exhaustive_enum_via_default.rs":"4f9f2f0fd2b7af8c877f54225c623c45f62d8c7c1d7e360bd5f27fb09d8da2ad","tests/try_build/pass/features/complex-expressions/alternate_exprs_exhaustive_with_range.rs":"c253b836587b653011f9db2d436f3ed80af6e642ddf2e96036eace507b192b46","tests/try_build/pass/features/complex-expressions/alternate_exprs_non_exhaustive_with_range.rs":"df29fb93155c457e3beae446124c0f9cbd5b4561a7a34f644b26a2d075926231","tests/try_from_primitive.rs":"34bbe2be0f02fe8ba1d8d59da7ca3f66898e6518a2cb1cf0b09f5fed9000b83c","tests/unsafe_from_primitive.rs":"5dcb3e8b3125b302880d41208b318f8a7f98fec3e0592433f2a84c120b2ed0fc"},"package":"1f646caf906c20226733ed5b1374287eb97e3c2a5c227ce668c1f2ce20ae57c9"} \ No newline at end of file
diff --git a/rust/vendor/num_enum/Cargo.toml b/rust/vendor/num_enum/Cargo.toml
new file mode 100644
index 0000000..c8cf973
--- /dev/null
+++ b/rust/vendor/num_enum/Cargo.toml
@@ -0,0 +1,64 @@
+# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO
+#
+# When uploading crates to the registry Cargo will automatically
+# "normalize" Cargo.toml files for maximal compatibility
+# with all versions of Cargo and also rewrite `path` dependencies
+# to registry (e.g., crates.io) dependencies.
+#
+# If you are reading this file be aware that the original Cargo.toml
+# will likely look very different (and much more reasonable).
+# See Cargo.toml.orig for the original contents.
+
+[package]
+edition = "2018"
+name = "num_enum"
+version = "0.5.11"
+authors = [
+ "Daniel Wagner-Hall <dawagner@gmail.com>",
+ "Daniel Henry-Mantilla <daniel.henry.mantilla@gmail.com>",
+ "Vincent Esche <regexident@gmail.com>",
+]
+description = "Procedural macros to make inter-operation between primitives and enums easier."
+readme = "README.md"
+keywords = [
+ "enum",
+ "conversion",
+ "safe",
+ "ffi",
+ "derive",
+]
+categories = ["rust-patterns"]
+license = "BSD-3-Clause OR MIT OR Apache-2.0"
+repository = "https://github.com/illicitonion/num_enum"
+
+[package.metadata.docs.rs]
+features = ["external_doc"]
+
+[dependencies.num_enum_derive]
+version = "0.5.11"
+default-features = false
+
+[dev-dependencies.anyhow]
+version = "1.0.14"
+
+[dev-dependencies.rustversion]
+version = "1.0.4"
+
+[dev-dependencies.trybuild]
+version = "1.0.49"
+
+[dev-dependencies.walkdir]
+version = "2"
+
+[features]
+complex-expressions = ["num_enum_derive/complex-expressions"]
+default = ["std"]
+external_doc = []
+std = ["num_enum_derive/std"]
+
+[badges.maintenance]
+status = "passively-maintained"
+
+[badges.travis-ci]
+branch = "master"
+repository = "illicitonion/num_enum"
diff --git a/rust/vendor/num_enum/LICENSE-APACHE b/rust/vendor/num_enum/LICENSE-APACHE
new file mode 100644
index 0000000..1b5ec8b
--- /dev/null
+++ b/rust/vendor/num_enum/LICENSE-APACHE
@@ -0,0 +1,176 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+END OF TERMS AND CONDITIONS
diff --git a/rust/vendor/num_enum/LICENSE-BSD b/rust/vendor/num_enum/LICENSE-BSD
new file mode 100644
index 0000000..b742e29
--- /dev/null
+++ b/rust/vendor/num_enum/LICENSE-BSD
@@ -0,0 +1,27 @@
+Copyright (c) 2018, Daniel Wagner-Hall
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+* Redistributions of source code must retain the above copyright notice, this
+ list of conditions and the following disclaimer.
+
+* Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimer in the documentation
+ and/or other materials provided with the distribution.
+
+* Neither the name of num_enum nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
+FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/rust/vendor/num_enum/LICENSE-MIT b/rust/vendor/num_enum/LICENSE-MIT
new file mode 100644
index 0000000..31aa793
--- /dev/null
+++ b/rust/vendor/num_enum/LICENSE-MIT
@@ -0,0 +1,23 @@
+Permission is hereby granted, free of charge, to any
+person obtaining a copy of this software and associated
+documentation files (the "Software"), to deal in the
+Software without restriction, including without
+limitation the rights to use, copy, modify, merge,
+publish, distribute, sublicense, and/or sell copies of
+the Software, and to permit persons to whom the Software
+is furnished to do so, subject to the following
+conditions:
+
+The above copyright notice and this permission notice
+shall be included in all copies or substantial portions
+of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
+ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
+TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
+PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
+IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+DEALINGS IN THE SOFTWARE.
diff --git a/rust/vendor/num_enum/README.md b/rust/vendor/num_enum/README.md
new file mode 100644
index 0000000..902cee7
--- /dev/null
+++ b/rust/vendor/num_enum/README.md
@@ -0,0 +1,277 @@
+num_enum
+========
+
+Procedural macros to make inter-operation between primitives and enums easier.
+This crate is no_std compatible.
+
+[![crates.io](https://img.shields.io/crates/v/num_enum.svg)](https://crates.io/crates/num_enum)
+[![Documentation](https://docs.rs/num_enum/badge.svg)](https://docs.rs/num_enum)
+[![Build Status](https://travis-ci.org/illicitonion/num_enum.svg?branch=master)](https://travis-ci.org/illicitonion/num_enum)
+
+Turning an enum into a primitive
+--------------------------------
+
+```rust
+use num_enum::IntoPrimitive;
+
+#[derive(IntoPrimitive)]
+#[repr(u8)]
+enum Number {
+ Zero,
+ One,
+}
+
+fn main() {
+ let zero: u8 = Number::Zero.into();
+ assert_eq!(zero, 0u8);
+}
+```
+
+`num_enum`'s `IntoPrimitive` is more type-safe than using `as`, because `as` will silently truncate - `num_enum` only derives `From` for exactly the discriminant type of the enum.
+
+Attempting to turn a primitive into an enum with try_from
+----------------------------------------------
+
+```rust
+use num_enum::TryFromPrimitive;
+use std::convert::TryFrom;
+
+#[derive(Debug, Eq, PartialEq, TryFromPrimitive)]
+#[repr(u8)]
+enum Number {
+ Zero,
+ One,
+}
+
+fn main() {
+ let zero = Number::try_from(0u8);
+ assert_eq!(zero, Ok(Number::Zero));
+
+ let three = Number::try_from(3u8);
+ assert_eq!(
+ three.unwrap_err().to_string(),
+ "No discriminant in enum `Number` matches the value `3`",
+ );
+}
+```
+
+Variant alternatives
+---------------
+
+Sometimes a single enum variant might be representable by multiple numeric values.
+
+The `#[num_enum(alternatives = [..])]` attribute allows you to define additional value alternatives for individual variants.
+
+(The behavior of `IntoPrimitive` is unaffected by this attribute, it will always return the canonical value.)
+
+```rust
+use num_enum::TryFromPrimitive;
+use std::convert::TryFrom;
+
+#[derive(Debug, Eq, PartialEq, TryFromPrimitive)]
+#[repr(u8)]
+enum Number {
+ Zero = 0,
+ #[num_enum(alternatives = [2])]
+ OneOrTwo = 1,
+}
+
+fn main() {
+ let zero = Number::try_from(0u8);
+ assert_eq!(zero, Ok(Number::Zero));
+
+ let one = Number::try_from(1u8);
+ assert_eq!(one, Ok(Number::OneOrTwo));
+
+ let two = Number::try_from(2u8);
+ assert_eq!(two, Ok(Number::OneOrTwo));
+
+ let three = Number::try_from(3u8);
+ assert_eq!(
+ three.unwrap_err().to_string(),
+ "No discriminant in enum `Number` matches the value `3`",
+ );
+}
+```
+
+Range expressions are also supported for alternatives, but this requires enabling the `complex-expressions` feature:
+
+```rust
+use num_enum::TryFromPrimitive;
+use std::convert::TryFrom;
+
+#[derive(Debug, Eq, PartialEq, TryFromPrimitive)]
+#[repr(u8)]
+enum Number {
+ Zero = 0,
+ #[num_enum(alternatives = [2..16])]
+ Some = 1,
+ #[num_enum(alternatives = [17, 18..=255])]
+ Many = 16,
+}
+
+fn main() {
+ let zero = Number::try_from(0u8);
+ assert_eq!(zero, Ok(Number::Zero));
+
+ let some = Number::try_from(15u8);
+ assert_eq!(some, Ok(Number::Some));
+
+ let many = Number::try_from(255u8);
+ assert_eq!(many, Ok(Number::Many));
+}
+```
+
+Default variant
+---------------
+
+Sometimes it is desirable to have an `Other` variant in an enum that acts as a kind of a wildcard matching all the value not yet covered by other variants.
+
+The `#[num_enum(default)]` attribute allows you to mark variant as the default.
+
+(The behavior of `IntoPrimitive` is unaffected by this attribute, it will always return the canonical value.)
+
+```rust
+use num_enum::TryFromPrimitive;
+use std::convert::TryFrom;
+
+#[derive(Debug, Eq, PartialEq, TryFromPrimitive)]
+#[repr(u8)]
+enum Number {
+ Zero = 0,
+ #[num_enum(default)]
+ NonZero = 1,
+}
+
+fn main() {
+ let zero = Number::try_from(0u8);
+ assert_eq!(zero, Ok(Number::Zero));
+
+ let one = Number::try_from(1u8);
+ assert_eq!(one, Ok(Number::NonZero));
+
+ let two = Number::try_from(2u8);
+ assert_eq!(two, Ok(Number::NonZero));
+}
+```
+
+Safely turning a primitive into an exhaustive enum with from_primitive
+-------------------------------------------------------------
+
+If your enum has all possible primitive values covered, you can derive `FromPrimitive` for it (which auto-implement stdlib's `From`):
+
+You can cover all possible values by:
+* Having variants for every possible value
+* Having a variant marked `#[num_enum(default)]`
+* Having a variant marked `#[num_enum(catch_all)]`
+* Having `#[num_enum(alternatives = [...])`s covering values not covered by a variant.
+
+```rust
+use num_enum::FromPrimitive;
+
+#[derive(Debug, Eq, PartialEq, FromPrimitive)]
+#[repr(u8)]
+enum Number {
+ Zero,
+ #[num_enum(default)]
+ NonZero,
+}
+
+fn main() {
+ assert_eq!(
+ Number::Zero,
+ Number::from(0_u8),
+ );
+ assert_eq!(
+ Number::NonZero,
+ Number::from(1_u8),
+ );
+}
+```
+
+Catch-all variant
+-----------------
+
+Sometimes it is desirable to have an `Other` variant which holds the otherwise un-matched value as a field.
+
+The `#[num_enum(catch_all)]` attribute allows you to mark at most one variant for this purpose. The variant it's applied to must be a tuple variant with exactly one field matching the `repr` type.
+
+```rust
+use num_enum::FromPrimitive;
+use std::convert::TryFrom;
+
+#[derive(Debug, Eq, PartialEq, FromPrimitive)]
+#[repr(u8)]
+enum Number {
+ Zero = 0,
+ #[num_enum(catch_all)]
+ NonZero(u8),
+}
+
+fn main() {
+ let zero = Number::from(0u8);
+ assert_eq!(zero, Number::Zero);
+
+ let one = Number::from(1u8);
+ assert_eq!(one, Number::NonZero(1_u8));
+
+ let two = Number::from(2u8);
+ assert_eq!(two, Number::NonZero(2_u8));
+}
+```
+
+As this is naturally exhaustive, this is only supported for `FromPrimitive`, not also `TryFromPrimitive`.
+
+Unsafely turning a primitive into an enum with from_unchecked
+-------------------------------------------------------------
+
+If you're really certain a conversion will succeed (and have not made use of `#[num_enum(default)]` or `#[num_enum(alternatives = [..])]`
+for any of its variants), and want to avoid a small amount of overhead, you can use unsafe code to do this conversion.
+Unless you have data showing that the match statement generated in the `try_from` above is a bottleneck for you,
+you should avoid doing this, as the unsafe code has potential to cause serious memory issues in your program.
+
+```rust
+use num_enum::UnsafeFromPrimitive;
+
+#[derive(Debug, Eq, PartialEq, UnsafeFromPrimitive)]
+#[repr(u8)]
+enum Number {
+ Zero,
+ One,
+}
+
+fn main() {
+ assert_eq!(
+ unsafe { Number::from_unchecked(0_u8) },
+ Number::Zero,
+ );
+ assert_eq!(
+ unsafe { Number::from_unchecked(1_u8) },
+ Number::One,
+ );
+}
+
+unsafe fn undefined_behavior() {
+ let _ = Number::from_unchecked(2); // 2 is not a valid discriminant!
+}
+```
+
+Optional features
+-----------------
+
+Some enum values may be composed of complex expressions, for example:
+
+```rust
+enum Number {
+ Zero = (0, 1).0,
+ One = (0, 1).1,
+}
+```
+
+To cut down on compile time, these are not supported by default, but if you enable the `complex-expressions`
+feature of your dependency on `num_enum`, these should start working.
+
+License
+-------
+
+num_enum may be used under your choice of the BSD 3-clause, Apache 2, or MIT license.
diff --git a/rust/vendor/num_enum/src/lib.rs b/rust/vendor/num_enum/src/lib.rs
new file mode 100644
index 0000000..e349c52
--- /dev/null
+++ b/rust/vendor/num_enum/src/lib.rs
@@ -0,0 +1,67 @@
+// Wrap this in two cfg_attrs so that it continues to parse pre-1.54.0.
+// See https://github.com/rust-lang/rust/issues/82768
+#![cfg_attr(feature = "external_doc", cfg_attr(all(), doc = include_str!("../README.md")))]
+#![cfg_attr(
+ not(feature = "external_doc"),
+ doc = "See <https://docs.rs/num_enum> for more info about this crate."
+)]
+#![cfg_attr(not(feature = "std"), no_std)]
+
+pub use ::num_enum_derive::{
+ Default, FromPrimitive, IntoPrimitive, TryFromPrimitive, UnsafeFromPrimitive,
+};
+
+use ::core::fmt;
+
+pub trait FromPrimitive: Sized {
+ type Primitive: Copy + Eq;
+
+ fn from_primitive(number: Self::Primitive) -> Self;
+}
+
+pub trait TryFromPrimitive: Sized {
+ type Primitive: Copy + Eq + fmt::Debug;
+
+ const NAME: &'static str;
+
+ fn try_from_primitive(number: Self::Primitive) -> Result<Self, TryFromPrimitiveError<Self>>;
+}
+
+pub struct TryFromPrimitiveError<Enum: TryFromPrimitive> {
+ pub number: Enum::Primitive,
+}
+
+impl<Enum: TryFromPrimitive> Copy for TryFromPrimitiveError<Enum> {}
+impl<Enum: TryFromPrimitive> Clone for TryFromPrimitiveError<Enum> {
+ fn clone(&self) -> Self {
+ TryFromPrimitiveError {
+ number: self.number,
+ }
+ }
+}
+impl<Enum: TryFromPrimitive> Eq for TryFromPrimitiveError<Enum> {}
+impl<Enum: TryFromPrimitive> PartialEq for TryFromPrimitiveError<Enum> {
+ fn eq(&self, other: &Self) -> bool {
+ self.number == other.number
+ }
+}
+impl<Enum: TryFromPrimitive> fmt::Debug for TryFromPrimitiveError<Enum> {
+ fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
+ fmt.debug_struct("TryFromPrimitiveError")
+ .field("number", &self.number)
+ .finish()
+ }
+}
+impl<Enum: TryFromPrimitive> fmt::Display for TryFromPrimitiveError<Enum> {
+ fn fmt(&self, stream: &'_ mut fmt::Formatter<'_>) -> fmt::Result {
+ write!(
+ stream,
+ "No discriminant in enum `{name}` matches the value `{input:?}`",
+ name = Enum::NAME,
+ input = self.number,
+ )
+ }
+}
+
+#[cfg(feature = "std")]
+impl<Enum: TryFromPrimitive> ::std::error::Error for TryFromPrimitiveError<Enum> {}
diff --git a/rust/vendor/num_enum/tests/default.rs b/rust/vendor/num_enum/tests/default.rs
new file mode 100644
index 0000000..80e2d89
--- /dev/null
+++ b/rust/vendor/num_enum/tests/default.rs
@@ -0,0 +1,33 @@
+// Guard against https://github.com/illicitonion/num_enum/issues/27
+mod alloc {}
+mod core {}
+mod num_enum {}
+mod std {}
+
+#[test]
+fn default() {
+ #[derive(Debug, Eq, PartialEq, ::num_enum::Default)]
+ #[repr(u8)]
+ enum Enum {
+ #[allow(unused)]
+ Zero = 0,
+ #[num_enum(default)]
+ NonZero = 1,
+ }
+
+ assert_eq!(Enum::NonZero, <Enum as ::core::default::Default>::default());
+}
+
+#[test]
+fn default_standard_default_attribute() {
+ #[derive(Debug, Eq, PartialEq, ::num_enum::Default)]
+ #[repr(u8)]
+ enum Enum {
+ #[allow(unused)]
+ Zero = 0,
+ #[default]
+ NonZero = 1,
+ }
+
+ assert_eq!(Enum::NonZero, <Enum as ::core::default::Default>::default());
+}
diff --git a/rust/vendor/num_enum/tests/from_primitive.rs b/rust/vendor/num_enum/tests/from_primitive.rs
new file mode 100644
index 0000000..1be33fc
--- /dev/null
+++ b/rust/vendor/num_enum/tests/from_primitive.rs
@@ -0,0 +1,144 @@
+use ::std::convert::TryFrom;
+
+use ::num_enum::{FromPrimitive, TryFromPrimitive};
+
+// Guard against https://github.com/illicitonion/num_enum/issues/27
+mod alloc {}
+mod core {}
+mod num_enum {}
+mod std {}
+
+#[test]
+fn has_from_primitive_number_u64() {
+ #[derive(Debug, Eq, PartialEq, FromPrimitive)]
+ #[repr(u64)]
+ enum Enum {
+ Zero = 0,
+ #[num_enum(default)]
+ NonZero = 1,
+ }
+
+ let zero = Enum::from_primitive(0_u64);
+ assert_eq!(zero, Enum::Zero);
+
+ let one = Enum::from_primitive(1_u64);
+ assert_eq!(one, Enum::NonZero);
+
+ let two = Enum::from_primitive(2_u64);
+ assert_eq!(two, Enum::NonZero);
+}
+
+#[test]
+fn has_from_primitive_number() {
+ #[derive(Debug, Eq, PartialEq, FromPrimitive)]
+ #[repr(u8)]
+ enum Enum {
+ Zero = 0,
+ #[num_enum(default)]
+ NonZero = 1,
+ }
+
+ let zero = Enum::from_primitive(0_u8);
+ assert_eq!(zero, Enum::Zero);
+
+ let one = Enum::from_primitive(1_u8);
+ assert_eq!(one, Enum::NonZero);
+
+ let two = Enum::from_primitive(2_u8);
+ assert_eq!(two, Enum::NonZero);
+}
+
+#[test]
+fn has_from_primitive_number_standard_default_attribute() {
+ #[derive(Debug, Eq, PartialEq, FromPrimitive)]
+ #[repr(u8)]
+ enum Enum {
+ Zero = 0,
+ #[default]
+ NonZero = 1,
+ }
+
+ let zero = Enum::from_primitive(0_u8);
+ assert_eq!(zero, Enum::Zero);
+
+ let one = Enum::from_primitive(1_u8);
+ assert_eq!(one, Enum::NonZero);
+
+ let two = Enum::from_primitive(2_u8);
+ assert_eq!(two, Enum::NonZero);
+}
+
+#[test]
+fn from_primitive_number() {
+ #[derive(Debug, Eq, PartialEq, FromPrimitive)]
+ #[repr(u8)]
+ enum Enum {
+ #[num_enum(default)]
+ Whatever = 0,
+ }
+
+ // #[derive(FromPrimitive)] generates implementations for the following traits:
+ //
+ // - `FromPrimitive<T>`
+ // - `From<T>`
+ // - `TryFromPrimitive<T>`
+ // - `TryFrom<T>`
+ let from_primitive = Enum::from_primitive(0_u8);
+ assert_eq!(from_primitive, Enum::Whatever);
+
+ let from = Enum::from(0_u8);
+ assert_eq!(from, Enum::Whatever);
+
+ let try_from_primitive = Enum::try_from_primitive(0_u8);
+ assert_eq!(try_from_primitive, Ok(Enum::Whatever));
+
+ let try_from = Enum::try_from(0_u8);
+ assert_eq!(try_from, Ok(Enum::Whatever));
+}
+
+#[test]
+fn from_primitive_number_catch_all() {
+ #[derive(Debug, Eq, PartialEq, FromPrimitive)]
+ #[repr(u8)]
+ enum Enum {
+ Zero = 0,
+ #[num_enum(catch_all)]
+ NonZero(u8),
+ }
+
+ let zero = Enum::from_primitive(0_u8);
+ assert_eq!(zero, Enum::Zero);
+
+ let one = Enum::from_primitive(1_u8);
+ assert_eq!(one, Enum::NonZero(1_u8));
+
+ let two = Enum::from_primitive(2_u8);
+ assert_eq!(two, Enum::NonZero(2_u8));
+}
+
+#[cfg(feature = "complex-expressions")]
+#[test]
+fn from_primitive_number_with_inclusive_range() {
+ #[derive(Debug, Eq, PartialEq, FromPrimitive)]
+ #[repr(u8)]
+ enum Enum {
+ Zero = 0,
+ #[num_enum(alternatives = [2..=255])]
+ NonZero,
+ }
+
+ let zero = Enum::from_primitive(0_u8);
+ assert_eq!(zero, Enum::Zero);
+
+ let one = Enum::from_primitive(1_u8);
+ assert_eq!(one, Enum::NonZero);
+
+ let two = Enum::from_primitive(2_u8);
+ assert_eq!(two, Enum::NonZero);
+
+ let three = Enum::from_primitive(3_u8);
+ assert_eq!(three, Enum::NonZero);
+
+ let twofivefive = Enum::from_primitive(255_u8);
+ assert_eq!(twofivefive, Enum::NonZero);
+}
diff --git a/rust/vendor/num_enum/tests/into_primitive.rs b/rust/vendor/num_enum/tests/into_primitive.rs
new file mode 100644
index 0000000..a09bf56
--- /dev/null
+++ b/rust/vendor/num_enum/tests/into_primitive.rs
@@ -0,0 +1,47 @@
+use ::num_enum::IntoPrimitive;
+
+// Guard against https://github.com/illicitonion/num_enum/issues/27
+mod alloc {}
+mod core {}
+mod num_enum {}
+mod std {}
+
+#[derive(IntoPrimitive)]
+#[repr(u8)]
+enum Enum {
+ Zero,
+ One,
+ Two,
+}
+
+#[test]
+fn simple() {
+ let zero: u8 = Enum::Zero.into();
+ assert_eq!(zero, 0u8);
+
+ let one: u8 = Enum::One.into();
+ assert_eq!(one, 1u8);
+
+ let two: u8 = Enum::Two.into();
+ assert_eq!(two, 2u8);
+}
+
+#[test]
+fn catch_all() {
+ #[derive(Debug, Eq, PartialEq, IntoPrimitive)]
+ #[repr(u8)]
+ enum Enum {
+ Zero = 0,
+ #[num_enum(catch_all)]
+ NonZero(u8),
+ }
+
+ let zero: u8 = Enum::Zero.into();
+ assert_eq!(zero, 0u8);
+
+ let one: u8 = Enum::NonZero(1u8).into();
+ assert_eq!(one, 1u8);
+
+ let two: u8 = Enum::NonZero(2u8).into();
+ assert_eq!(two, 2u8);
+}
diff --git a/rust/vendor/num_enum/tests/renamed_num_enum.rs b/rust/vendor/num_enum/tests/renamed_num_enum.rs
new file mode 100644
index 0000000..44784ed
--- /dev/null
+++ b/rust/vendor/num_enum/tests/renamed_num_enum.rs
@@ -0,0 +1,33 @@
+#[test]
+fn no_std() {
+ assert!(::std::process::Command::new("cargo")
+ .args([
+ "run",
+ "--manifest-path",
+ concat!(
+ env!("CARGO_MANIFEST_DIR"),
+ "/../renamed_num_enum/Cargo.toml",
+ ),
+ ])
+ .status()
+ .unwrap()
+ .success())
+}
+
+#[test]
+fn std() {
+ assert!(::std::process::Command::new("cargo")
+ .args([
+ "run",
+ "--manifest-path",
+ concat!(
+ env!("CARGO_MANIFEST_DIR"),
+ "/../renamed_num_enum/Cargo.toml",
+ ),
+ "--features",
+ "std",
+ ])
+ .status()
+ .unwrap()
+ .success())
+}
diff --git a/rust/vendor/num_enum/tests/try_build.rs b/rust/vendor/num_enum/tests/try_build.rs
new file mode 100644
index 0000000..3c771d1
--- /dev/null
+++ b/rust/vendor/num_enum/tests/try_build.rs
@@ -0,0 +1,123 @@
+use std::error::Error;
+use std::path::{Path, PathBuf};
+use walkdir::WalkDir;
+
+#[test]
+fn trybuild() {
+ let directory = PathBuf::from("tests/try_build");
+
+ let mut _renamer = None;
+
+ let compile_fail_dir = directory.join("compile_fail");
+
+ // Sometimes error messages change on beta/nightly - allow alternate errors on those.
+ _renamer = Some(Renamer::rename(compile_fail_dir.clone()).unwrap());
+
+ let fail = trybuild::TestCases::new();
+ fail.compile_fail(compile_fail_dir.join("*.rs"));
+ add_feature_dirs(&compile_fail_dir, &fail, ExpectedResult::Fail);
+
+ let pass = trybuild::TestCases::new();
+ let pass_dir = directory.join("pass");
+ pass.pass(pass_dir.join("*.rs"));
+ add_feature_dirs(&pass_dir, &pass, ExpectedResult::Pass);
+}
+
+enum ExpectedResult {
+ Pass,
+ Fail,
+}
+
+fn add_feature_dirs(
+ parent_dir: &Path,
+ test_cases: &trybuild::TestCases,
+ expected_result: ExpectedResult,
+) {
+ let features_dir = parent_dir.join("features");
+ let feature_specific_dir = if cfg!(feature = "complex-expressions") {
+ features_dir.join("complex-expressions")
+ } else {
+ features_dir.join("!complex-expressions")
+ };
+ let tests = feature_specific_dir.join("*.rs");
+ match expected_result {
+ ExpectedResult::Pass => test_cases.pass(tests),
+ ExpectedResult::Fail => test_cases.compile_fail(tests),
+ }
+}
+
+struct Renamer(Vec<PathBuf>);
+
+impl Renamer {
+ const STDERR_EXTENSION: &'static str = "stderr";
+
+ #[rustversion::all(beta)]
+ const VERSION_SPECIFIC_EXTENSION: &'static str = "stderr_beta";
+
+ #[rustversion::all(nightly)]
+ const VERSION_SPECIFIC_EXTENSION: &'static str = "stderr_nightly";
+
+ #[rustversion::all(not(beta), not(nightly))]
+ const VERSION_SPECIFIC_EXTENSION: &'static str = "stderr_doesnotexist";
+
+ const NON_VERSION_SPECIFIC_BACKUP_EXTENSION: &'static str =
+ "stderr_non_version_specific_backup";
+
+ fn rename(dir: PathBuf) -> anyhow::Result<Self> {
+ let nightly_paths = WalkDir::new(dir)
+ .max_depth(1)
+ .into_iter()
+ .filter_map(|dir_entry| {
+ let dir_entry = match dir_entry {
+ Ok(dir_entry) => dir_entry,
+ Err(err) => return Some(Err(err)),
+ };
+ let path = dir_entry.path();
+ if let Some(file_name) = path.file_name() {
+ if Path::new(file_name).extension()
+ == Some(Renamer::VERSION_SPECIFIC_EXTENSION.as_ref())
+ {
+ return Some(Ok(path.to_path_buf()));
+ }
+ }
+ None
+ })
+ .collect::<Result<Vec<_>, _>>()?;
+ // Create early so that if we end up returning an error this gets dropped and undoes any
+ // already-done renames.
+ let renamer = Renamer(nightly_paths);
+
+ for nightly_path in &renamer.0 {
+ std::fs::rename(
+ nightly_path.with_extension(Renamer::STDERR_EXTENSION),
+ nightly_path.with_extension(Renamer::NON_VERSION_SPECIFIC_BACKUP_EXTENSION),
+ )?;
+ std::fs::rename(
+ nightly_path.with_extension(Renamer::VERSION_SPECIFIC_EXTENSION),
+ nightly_path.with_extension(Renamer::STDERR_EXTENSION),
+ )?;
+ }
+ Ok(renamer)
+ }
+}
+
+impl Drop for Renamer {
+ fn drop(&mut self) {
+ for path in &self.0 {
+ ignore_error(std::fs::rename(
+ path.with_extension(Renamer::STDERR_EXTENSION),
+ path.with_extension(Renamer::VERSION_SPECIFIC_EXTENSION),
+ ));
+ ignore_error(std::fs::rename(
+ path.with_extension(Renamer::NON_VERSION_SPECIFIC_BACKUP_EXTENSION),
+ path.with_extension(Renamer::STDERR_EXTENSION),
+ ));
+ }
+ }
+}
+
+fn ignore_error<T, E: Error>(result: Result<T, E>) {
+ if let Err(err) = result {
+ eprintln!("Ignoring error: {}", err);
+ }
+}
diff --git a/rust/vendor/num_enum/tests/try_build/compile_fail/alternative_clashes_with_its_discriminant.rs b/rust/vendor/num_enum/tests/try_build/compile_fail/alternative_clashes_with_its_discriminant.rs
new file mode 100644
index 0000000..dd703d4
--- /dev/null
+++ b/rust/vendor/num_enum/tests/try_build/compile_fail/alternative_clashes_with_its_discriminant.rs
@@ -0,0 +1,10 @@
+#[derive(num_enum::TryFromPrimitive)]
+#[repr(u8)]
+enum Numbers {
+ Zero = 0,
+ #[num_enum(alternatives = [3,1,4])]
+ One = 1,
+ Two = 2,
+}
+
+fn main() {}
diff --git a/rust/vendor/num_enum/tests/try_build/compile_fail/alternative_clashes_with_its_discriminant.stderr b/rust/vendor/num_enum/tests/try_build/compile_fail/alternative_clashes_with_its_discriminant.stderr
new file mode 100644
index 0000000..1c6259a
--- /dev/null
+++ b/rust/vendor/num_enum/tests/try_build/compile_fail/alternative_clashes_with_its_discriminant.stderr
@@ -0,0 +1,5 @@
+error: '1' in the alternative values is already attributed as the discriminant of this variant
+ --> tests/try_build/compile_fail/alternative_clashes_with_its_discriminant.rs:5:34
+ |
+5 | #[num_enum(alternatives = [3,1,4])]
+ | ^
diff --git a/rust/vendor/num_enum/tests/try_build/compile_fail/alternative_clashes_with_variant.rs b/rust/vendor/num_enum/tests/try_build/compile_fail/alternative_clashes_with_variant.rs
new file mode 100644
index 0000000..3146c84
--- /dev/null
+++ b/rust/vendor/num_enum/tests/try_build/compile_fail/alternative_clashes_with_variant.rs
@@ -0,0 +1,12 @@
+#[derive(num_enum::TryFromPrimitive)]
+#[repr(u8)]
+enum Numbers {
+ Zero = 0,
+ #[num_enum(alternatives = [2])]
+ One = 1,
+ Two = 2,
+}
+
+fn main() {
+
+}
diff --git a/rust/vendor/num_enum/tests/try_build/compile_fail/alternative_clashes_with_variant.stderr b/rust/vendor/num_enum/tests/try_build/compile_fail/alternative_clashes_with_variant.stderr
new file mode 100644
index 0000000..d2b1273
--- /dev/null
+++ b/rust/vendor/num_enum/tests/try_build/compile_fail/alternative_clashes_with_variant.stderr
@@ -0,0 +1,5 @@
+error: The discriminant '2' collides with a value attributed to a previous variant
+ --> tests/try_build/compile_fail/alternative_clashes_with_variant.rs:7:5
+ |
+7 | Two = 2,
+ | ^^^
diff --git a/rust/vendor/num_enum/tests/try_build/compile_fail/alternative_clashes_with_variant_out_of_order.rs b/rust/vendor/num_enum/tests/try_build/compile_fail/alternative_clashes_with_variant_out_of_order.rs
new file mode 100644
index 0000000..f680737
--- /dev/null
+++ b/rust/vendor/num_enum/tests/try_build/compile_fail/alternative_clashes_with_variant_out_of_order.rs
@@ -0,0 +1,10 @@
+#[derive(num_enum::TryFromPrimitive)]
+#[repr(u8)]
+enum Numbers {
+ Zero = 0,
+ #[num_enum(alternatives = [5,7,0,3])]
+ One = 1,
+ Two = 2,
+}
+
+fn main() {}
diff --git a/rust/vendor/num_enum/tests/try_build/compile_fail/alternative_clashes_with_variant_out_of_order.stderr b/rust/vendor/num_enum/tests/try_build/compile_fail/alternative_clashes_with_variant_out_of_order.stderr
new file mode 100644
index 0000000..5602a54
--- /dev/null
+++ b/rust/vendor/num_enum/tests/try_build/compile_fail/alternative_clashes_with_variant_out_of_order.stderr
@@ -0,0 +1,5 @@
+error: '0' in the alternative values is already attributed to a previous variant
+ --> tests/try_build/compile_fail/alternative_clashes_with_variant_out_of_order.rs:5:36
+ |
+5 | #[num_enum(alternatives = [5,7,0,3])]
+ | ^
diff --git a/rust/vendor/num_enum/tests/try_build/compile_fail/alternative_exprs.rs b/rust/vendor/num_enum/tests/try_build/compile_fail/alternative_exprs.rs
new file mode 100644
index 0000000..046d9c1
--- /dev/null
+++ b/rust/vendor/num_enum/tests/try_build/compile_fail/alternative_exprs.rs
@@ -0,0 +1,13 @@
+const THREE: u8 = 3;
+
+#[derive(num_enum::TryFromPrimitive)]
+#[repr(i8)]
+enum Numbers {
+ Zero = 0,
+ #[num_enum(alternatives = [-1, 2, THREE])]
+ One = 1,
+}
+
+fn main() {
+
+}
diff --git a/rust/vendor/num_enum/tests/try_build/compile_fail/alternative_exprs.stderr b/rust/vendor/num_enum/tests/try_build/compile_fail/alternative_exprs.stderr
new file mode 100644
index 0000000..33540c5
--- /dev/null
+++ b/rust/vendor/num_enum/tests/try_build/compile_fail/alternative_exprs.stderr
@@ -0,0 +1,5 @@
+error: Only literals are allowed as num_enum alternate values
+ --> tests/try_build/compile_fail/alternative_exprs.rs:7:39
+ |
+7 | #[num_enum(alternatives = [-1, 2, THREE])]
+ | ^^^^^
diff --git a/rust/vendor/num_enum/tests/try_build/compile_fail/catch_all_multiple_fields.rs b/rust/vendor/num_enum/tests/try_build/compile_fail/catch_all_multiple_fields.rs
new file mode 100644
index 0000000..5237811
--- /dev/null
+++ b/rust/vendor/num_enum/tests/try_build/compile_fail/catch_all_multiple_fields.rs
@@ -0,0 +1,9 @@
+#[derive(Debug, Eq, PartialEq, num_enum::FromPrimitive)]
+#[repr(u8)]
+enum Enum {
+ Zero = 0,
+ #[num_enum(catch_all)]
+ NonZero(u8, u8),
+}
+
+fn main() {}
diff --git a/rust/vendor/num_enum/tests/try_build/compile_fail/catch_all_multiple_fields.stderr b/rust/vendor/num_enum/tests/try_build/compile_fail/catch_all_multiple_fields.stderr
new file mode 100644
index 0000000..4854f86
--- /dev/null
+++ b/rust/vendor/num_enum/tests/try_build/compile_fail/catch_all_multiple_fields.stderr
@@ -0,0 +1,5 @@
+error: Variant with `catch_all` must be a tuple with exactly 1 field matching the repr type
+ --> tests/try_build/compile_fail/catch_all_multiple_fields.rs:5:16
+ |
+5 | #[num_enum(catch_all)]
+ | ^^^^^^^^^
diff --git a/rust/vendor/num_enum/tests/try_build/compile_fail/catch_all_non_tuple.rs b/rust/vendor/num_enum/tests/try_build/compile_fail/catch_all_non_tuple.rs
new file mode 100644
index 0000000..08a4e5d
--- /dev/null
+++ b/rust/vendor/num_enum/tests/try_build/compile_fail/catch_all_non_tuple.rs
@@ -0,0 +1,9 @@
+#[derive(Debug, Eq, PartialEq, num_enum::FromPrimitive)]
+#[repr(u8)]
+enum Enum {
+ Zero = 0,
+ #[num_enum(catch_all)]
+ NonZero = 1,
+}
+
+fn main() {}
diff --git a/rust/vendor/num_enum/tests/try_build/compile_fail/catch_all_non_tuple.stderr b/rust/vendor/num_enum/tests/try_build/compile_fail/catch_all_non_tuple.stderr
new file mode 100644
index 0000000..41bf02a
--- /dev/null
+++ b/rust/vendor/num_enum/tests/try_build/compile_fail/catch_all_non_tuple.stderr
@@ -0,0 +1,5 @@
+error: Variant with `catch_all` must be a tuple with exactly 1 field matching the repr type
+ --> tests/try_build/compile_fail/catch_all_non_tuple.rs:5:16
+ |
+5 | #[num_enum(catch_all)]
+ | ^^^^^^^^^
diff --git a/rust/vendor/num_enum/tests/try_build/compile_fail/catch_all_type_mismatch.rs b/rust/vendor/num_enum/tests/try_build/compile_fail/catch_all_type_mismatch.rs
new file mode 100644
index 0000000..d5b91c1
--- /dev/null
+++ b/rust/vendor/num_enum/tests/try_build/compile_fail/catch_all_type_mismatch.rs
@@ -0,0 +1,9 @@
+#[derive(Debug, Eq, PartialEq, num_enum::FromPrimitive)]
+#[repr(u8)]
+enum Enum {
+ Zero = 0,
+ #[num_enum(catch_all)]
+ NonZero(i32),
+}
+
+fn main() {}
diff --git a/rust/vendor/num_enum/tests/try_build/compile_fail/catch_all_type_mismatch.stderr b/rust/vendor/num_enum/tests/try_build/compile_fail/catch_all_type_mismatch.stderr
new file mode 100644
index 0000000..5d57fcc
--- /dev/null
+++ b/rust/vendor/num_enum/tests/try_build/compile_fail/catch_all_type_mismatch.stderr
@@ -0,0 +1,5 @@
+error: Variant with `catch_all` must be a tuple with exactly 1 field matching the repr type
+ --> tests/try_build/compile_fail/catch_all_type_mismatch.rs:5:16
+ |
+5 | #[num_enum(catch_all)]
+ | ^^^^^^^^^
diff --git a/rust/vendor/num_enum/tests/try_build/compile_fail/conflicting_default.rs b/rust/vendor/num_enum/tests/try_build/compile_fail/conflicting_default.rs
new file mode 100644
index 0000000..79a1aee
--- /dev/null
+++ b/rust/vendor/num_enum/tests/try_build/compile_fail/conflicting_default.rs
@@ -0,0 +1,9 @@
+#[derive(Default, num_enum::Default)]
+#[repr(u8)]
+enum Number {
+ #[default]
+ Zero,
+}
+
+fn main() {
+}
diff --git a/rust/vendor/num_enum/tests/try_build/compile_fail/conflicting_default.stderr b/rust/vendor/num_enum/tests/try_build/compile_fail/conflicting_default.stderr
new file mode 100644
index 0000000..69b8872
--- /dev/null
+++ b/rust/vendor/num_enum/tests/try_build/compile_fail/conflicting_default.stderr
@@ -0,0 +1,9 @@
+error[E0119]: conflicting implementations of trait `Default` for type `Number`
+ --> tests/try_build/compile_fail/conflicting_default.rs:1:19
+ |
+1 | #[derive(Default, num_enum::Default)]
+ | ------- ^^^^^^^^^^^^^^^^^ conflicting implementation for `Number`
+ | |
+ | first implementation here
+ |
+ = note: this error originates in the derive macro `num_enum::Default` (in Nightly builds, run with -Z macro-backtrace for more info)
diff --git a/rust/vendor/num_enum/tests/try_build/compile_fail/conflicting_derive.rs b/rust/vendor/num_enum/tests/try_build/compile_fail/conflicting_derive.rs
new file mode 100644
index 0000000..30b9514
--- /dev/null
+++ b/rust/vendor/num_enum/tests/try_build/compile_fail/conflicting_derive.rs
@@ -0,0 +1,11 @@
+#[derive(num_enum::FromPrimitive, num_enum::TryFromPrimitive)]
+#[repr(u8)]
+enum Numbers {
+ Zero,
+ #[num_enum(default)]
+ One,
+}
+
+fn main() {
+
+}
diff --git a/rust/vendor/num_enum/tests/try_build/compile_fail/conflicting_derive.stderr b/rust/vendor/num_enum/tests/try_build/compile_fail/conflicting_derive.stderr
new file mode 100644
index 0000000..679eb06
--- /dev/null
+++ b/rust/vendor/num_enum/tests/try_build/compile_fail/conflicting_derive.stderr
@@ -0,0 +1,20 @@
+error[E0119]: conflicting implementations of trait `TryFromPrimitive` for type `Numbers`
+ --> tests/try_build/compile_fail/conflicting_derive.rs:1:35
+ |
+1 | #[derive(num_enum::FromPrimitive, num_enum::TryFromPrimitive)]
+ | ----------------------- ^^^^^^^^^^^^^^^^^^^^^^^^^^ conflicting implementation for `Numbers`
+ | |
+ | first implementation here
+ |
+ = note: this error originates in the derive macro `num_enum::TryFromPrimitive` (in Nightly builds, run with -Z macro-backtrace for more info)
+
+error[E0119]: conflicting implementations of trait `TryFrom<u8>` for type `Numbers`
+ --> tests/try_build/compile_fail/conflicting_derive.rs:1:35
+ |
+1 | #[derive(num_enum::FromPrimitive, num_enum::TryFromPrimitive)]
+ | ^^^^^^^^^^^^^^^^^^^^^^^^^^
+ |
+ = note: conflicting implementation in crate `core`:
+ - impl<T, U> TryFrom<U> for T
+ where U: Into<T>;
+ = note: this error originates in the derive macro `num_enum::TryFromPrimitive` (in Nightly builds, run with -Z macro-backtrace for more info)
diff --git a/rust/vendor/num_enum/tests/try_build/compile_fail/default_and_catch_all.rs b/rust/vendor/num_enum/tests/try_build/compile_fail/default_and_catch_all.rs
new file mode 100644
index 0000000..8034dd0
--- /dev/null
+++ b/rust/vendor/num_enum/tests/try_build/compile_fail/default_and_catch_all.rs
@@ -0,0 +1,10 @@
+#[derive(Debug, Eq, PartialEq, num_enum::FromPrimitive)]
+#[repr(u8)]
+enum Enum {
+ #[default]
+ Zero = 0,
+ #[num_enum(catch_all)]
+ NonZero(u8),
+}
+
+fn main() {}
diff --git a/rust/vendor/num_enum/tests/try_build/compile_fail/default_and_catch_all.stderr b/rust/vendor/num_enum/tests/try_build/compile_fail/default_and_catch_all.stderr
new file mode 100644
index 0000000..4427013
--- /dev/null
+++ b/rust/vendor/num_enum/tests/try_build/compile_fail/default_and_catch_all.stderr
@@ -0,0 +1,5 @@
+error: Attribute `catch_all` is mutually exclusive with `default`
+ --> tests/try_build/compile_fail/default_and_catch_all.rs:6:16
+ |
+6 | #[num_enum(catch_all)]
+ | ^^^^^^^^^
diff --git a/rust/vendor/num_enum/tests/try_build/compile_fail/default_and_catch_all_alt.rs b/rust/vendor/num_enum/tests/try_build/compile_fail/default_and_catch_all_alt.rs
new file mode 100644
index 0000000..071bbe8
--- /dev/null
+++ b/rust/vendor/num_enum/tests/try_build/compile_fail/default_and_catch_all_alt.rs
@@ -0,0 +1,10 @@
+#[derive(Debug, Eq, PartialEq, num_enum::FromPrimitive)]
+#[repr(u8)]
+enum Enum {
+ #[num_enum(default)]
+ Zero = 0,
+ #[num_enum(catch_all)]
+ NonZero(u8),
+}
+
+fn main() {}
diff --git a/rust/vendor/num_enum/tests/try_build/compile_fail/default_and_catch_all_alt.stderr b/rust/vendor/num_enum/tests/try_build/compile_fail/default_and_catch_all_alt.stderr
new file mode 100644
index 0000000..2ef1683
--- /dev/null
+++ b/rust/vendor/num_enum/tests/try_build/compile_fail/default_and_catch_all_alt.stderr
@@ -0,0 +1,5 @@
+error: Attribute `catch_all` is mutually exclusive with `default`
+ --> tests/try_build/compile_fail/default_and_catch_all_alt.rs:6:16
+ |
+6 | #[num_enum(catch_all)]
+ | ^^^^^^^^^
diff --git a/rust/vendor/num_enum/tests/try_build/compile_fail/default_and_catch_all_same_variant.rs b/rust/vendor/num_enum/tests/try_build/compile_fail/default_and_catch_all_same_variant.rs
new file mode 100644
index 0000000..251785c
--- /dev/null
+++ b/rust/vendor/num_enum/tests/try_build/compile_fail/default_and_catch_all_same_variant.rs
@@ -0,0 +1,10 @@
+#[derive(Debug, Eq, PartialEq, num_enum::FromPrimitive)]
+#[repr(u8)]
+enum Enum {
+ Zero = 0,
+ #[num_enum(catch_all)]
+ #[default]
+ NonZero(u8),
+}
+
+fn main() {}
diff --git a/rust/vendor/num_enum/tests/try_build/compile_fail/default_and_catch_all_same_variant.stderr b/rust/vendor/num_enum/tests/try_build/compile_fail/default_and_catch_all_same_variant.stderr
new file mode 100644
index 0000000..21d77c9
--- /dev/null
+++ b/rust/vendor/num_enum/tests/try_build/compile_fail/default_and_catch_all_same_variant.stderr
@@ -0,0 +1,5 @@
+error: Attribute `default` is mutually exclusive with `catch_all`
+ --> tests/try_build/compile_fail/default_and_catch_all_same_variant.rs:6:5
+ |
+6 | #[default]
+ | ^^^^^^^^^^
diff --git a/rust/vendor/num_enum/tests/try_build/compile_fail/default_and_catch_all_same_variant_alt.rs b/rust/vendor/num_enum/tests/try_build/compile_fail/default_and_catch_all_same_variant_alt.rs
new file mode 100644
index 0000000..cfa884b
--- /dev/null
+++ b/rust/vendor/num_enum/tests/try_build/compile_fail/default_and_catch_all_same_variant_alt.rs
@@ -0,0 +1,10 @@
+#[derive(Debug, Eq, PartialEq, num_enum::FromPrimitive)]
+#[repr(u8)]
+enum Enum {
+ Zero = 0,
+ #[num_enum(catch_all)]
+ #[num_enum(default)]
+ NonZero(u8),
+}
+
+fn main() {}
diff --git a/rust/vendor/num_enum/tests/try_build/compile_fail/default_and_catch_all_same_variant_alt.stderr b/rust/vendor/num_enum/tests/try_build/compile_fail/default_and_catch_all_same_variant_alt.stderr
new file mode 100644
index 0000000..556afd3
--- /dev/null
+++ b/rust/vendor/num_enum/tests/try_build/compile_fail/default_and_catch_all_same_variant_alt.stderr
@@ -0,0 +1,5 @@
+error: Attribute `default` is mutually exclusive with `catch_all`
+ --> tests/try_build/compile_fail/default_and_catch_all_same_variant_alt.rs:6:16
+ |
+6 | #[num_enum(default)]
+ | ^^^^^^^
diff --git a/rust/vendor/num_enum/tests/try_build/compile_fail/features/!complex-expressions/alternate_exprs_with_range.rs b/rust/vendor/num_enum/tests/try_build/compile_fail/features/!complex-expressions/alternate_exprs_with_range.rs
new file mode 100644
index 0000000..9de1028
--- /dev/null
+++ b/rust/vendor/num_enum/tests/try_build/compile_fail/features/!complex-expressions/alternate_exprs_with_range.rs
@@ -0,0 +1,11 @@
+#[derive(num_enum::FromPrimitive)]
+#[repr(u8)]
+enum Numbers {
+ Zero = 0,
+ #[num_enum(alternatives = [2..=255])]
+ NonZero = 1,
+}
+
+fn main() {
+
+}
diff --git a/rust/vendor/num_enum/tests/try_build/compile_fail/features/!complex-expressions/alternate_exprs_with_range.stderr b/rust/vendor/num_enum/tests/try_build/compile_fail/features/!complex-expressions/alternate_exprs_with_range.stderr
new file mode 100644
index 0000000..a60cc4d
--- /dev/null
+++ b/rust/vendor/num_enum/tests/try_build/compile_fail/features/!complex-expressions/alternate_exprs_with_range.stderr
@@ -0,0 +1,5 @@
+error: Ranges are only supported as num_enum alternate values if the `complex-expressions` feature of the crate `num_enum` is enabled
+ --> tests/try_build/compile_fail/features/!complex-expressions/alternate_exprs_with_range.rs:5:5
+ |
+5 | #[num_enum(alternatives = [2..=255])]
+ | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
diff --git a/rust/vendor/num_enum/tests/try_build/compile_fail/features/complex-expressions/alternate_exprs_range_missing_lower_bound.rs b/rust/vendor/num_enum/tests/try_build/compile_fail/features/complex-expressions/alternate_exprs_range_missing_lower_bound.rs
new file mode 100644
index 0000000..bafbfdb
--- /dev/null
+++ b/rust/vendor/num_enum/tests/try_build/compile_fail/features/complex-expressions/alternate_exprs_range_missing_lower_bound.rs
@@ -0,0 +1,11 @@
+#[derive(num_enum::TryFromPrimitive)]
+#[repr(u8)]
+enum Numbers {
+ Zero = 0,
+ #[num_enum(alternatives = [..255])]
+ NonZero = 1,
+}
+
+fn main() {
+
+}
diff --git a/rust/vendor/num_enum/tests/try_build/compile_fail/features/complex-expressions/alternate_exprs_range_missing_lower_bound.stderr b/rust/vendor/num_enum/tests/try_build/compile_fail/features/complex-expressions/alternate_exprs_range_missing_lower_bound.stderr
new file mode 100644
index 0000000..7dc3d59
--- /dev/null
+++ b/rust/vendor/num_enum/tests/try_build/compile_fail/features/complex-expressions/alternate_exprs_range_missing_lower_bound.stderr
@@ -0,0 +1,5 @@
+error: When ranges are used for alternate values, both bounds most be explicitly specified numeric literals
+ --> tests/try_build/compile_fail/features/complex-expressions/alternate_exprs_range_missing_lower_bound.rs:5:32
+ |
+5 | #[num_enum(alternatives = [..255])]
+ | ^^^^^
diff --git a/rust/vendor/num_enum/tests/try_build/compile_fail/features/complex-expressions/alternate_exprs_range_missing_upper_bound.rs b/rust/vendor/num_enum/tests/try_build/compile_fail/features/complex-expressions/alternate_exprs_range_missing_upper_bound.rs
new file mode 100644
index 0000000..2bc2854
--- /dev/null
+++ b/rust/vendor/num_enum/tests/try_build/compile_fail/features/complex-expressions/alternate_exprs_range_missing_upper_bound.rs
@@ -0,0 +1,11 @@
+#[derive(num_enum::TryFromPrimitive)]
+#[repr(u8)]
+enum Numbers {
+ Zero = 0,
+ #[num_enum(alternatives = [2..])]
+ NonZero = 1,
+}
+
+fn main() {
+
+}
diff --git a/rust/vendor/num_enum/tests/try_build/compile_fail/features/complex-expressions/alternate_exprs_range_missing_upper_bound.stderr b/rust/vendor/num_enum/tests/try_build/compile_fail/features/complex-expressions/alternate_exprs_range_missing_upper_bound.stderr
new file mode 100644
index 0000000..f458858
--- /dev/null
+++ b/rust/vendor/num_enum/tests/try_build/compile_fail/features/complex-expressions/alternate_exprs_range_missing_upper_bound.stderr
@@ -0,0 +1,5 @@
+error: When ranges are used for alternate values, both bounds most be explicitly specified numeric literals
+ --> tests/try_build/compile_fail/features/complex-expressions/alternate_exprs_range_missing_upper_bound.rs:5:32
+ |
+5 | #[num_enum(alternatives = [2..])]
+ | ^^^
diff --git a/rust/vendor/num_enum/tests/try_build/compile_fail/features/complex-expressions/alternate_exprs_range_nonlit_lower_bound.rs b/rust/vendor/num_enum/tests/try_build/compile_fail/features/complex-expressions/alternate_exprs_range_nonlit_lower_bound.rs
new file mode 100644
index 0000000..a083acb
--- /dev/null
+++ b/rust/vendor/num_enum/tests/try_build/compile_fail/features/complex-expressions/alternate_exprs_range_nonlit_lower_bound.rs
@@ -0,0 +1,13 @@
+const TWO: u8 = 2;
+
+#[derive(num_enum::TryFromPrimitive)]
+#[repr(u8)]
+enum Numbers {
+ Zero = 0,
+ #[num_enum(alternatives = [TWO..=255])]
+ NonZero = 1,
+}
+
+fn main() {
+
+}
diff --git a/rust/vendor/num_enum/tests/try_build/compile_fail/features/complex-expressions/alternate_exprs_range_nonlit_lower_bound.stderr b/rust/vendor/num_enum/tests/try_build/compile_fail/features/complex-expressions/alternate_exprs_range_nonlit_lower_bound.stderr
new file mode 100644
index 0000000..40249eb
--- /dev/null
+++ b/rust/vendor/num_enum/tests/try_build/compile_fail/features/complex-expressions/alternate_exprs_range_nonlit_lower_bound.stderr
@@ -0,0 +1,5 @@
+error: When ranges are used for alternate values, both bounds most be explicitly specified numeric literals
+ --> tests/try_build/compile_fail/features/complex-expressions/alternate_exprs_range_nonlit_lower_bound.rs:7:32
+ |
+7 | #[num_enum(alternatives = [TWO..=255])]
+ | ^^^^^^^^^
diff --git a/rust/vendor/num_enum/tests/try_build/compile_fail/features/complex-expressions/alternate_exprs_range_nonlit_upper_bound.rs b/rust/vendor/num_enum/tests/try_build/compile_fail/features/complex-expressions/alternate_exprs_range_nonlit_upper_bound.rs
new file mode 100644
index 0000000..c7b3edc
--- /dev/null
+++ b/rust/vendor/num_enum/tests/try_build/compile_fail/features/complex-expressions/alternate_exprs_range_nonlit_upper_bound.rs
@@ -0,0 +1,13 @@
+const TWOFIVEFIVE: u8 = 255;
+
+#[derive(num_enum::TryFromPrimitive)]
+#[repr(u8)]
+enum Numbers {
+ Zero = 0,
+ #[num_enum(alternatives = [2..=TWOFIVEFIVE])]
+ NonZero = 1,
+}
+
+fn main() {
+
+}
diff --git a/rust/vendor/num_enum/tests/try_build/compile_fail/features/complex-expressions/alternate_exprs_range_nonlit_upper_bound.stderr b/rust/vendor/num_enum/tests/try_build/compile_fail/features/complex-expressions/alternate_exprs_range_nonlit_upper_bound.stderr
new file mode 100644
index 0000000..78863b6
--- /dev/null
+++ b/rust/vendor/num_enum/tests/try_build/compile_fail/features/complex-expressions/alternate_exprs_range_nonlit_upper_bound.stderr
@@ -0,0 +1,5 @@
+error: When ranges are used for alternate values, both bounds most be explicitly specified numeric literals
+ --> tests/try_build/compile_fail/features/complex-expressions/alternate_exprs_range_nonlit_upper_bound.rs:7:32
+ |
+7 | #[num_enum(alternatives = [2..=TWOFIVEFIVE])]
+ | ^^^^^^^^^^^^^^^
diff --git a/rust/vendor/num_enum/tests/try_build/compile_fail/features/complex-expressions/alternate_exprs_range_swapped_bounds.rs b/rust/vendor/num_enum/tests/try_build/compile_fail/features/complex-expressions/alternate_exprs_range_swapped_bounds.rs
new file mode 100644
index 0000000..dc17669
--- /dev/null
+++ b/rust/vendor/num_enum/tests/try_build/compile_fail/features/complex-expressions/alternate_exprs_range_swapped_bounds.rs
@@ -0,0 +1,11 @@
+#[derive(num_enum::TryFromPrimitive)]
+#[repr(u8)]
+enum Numbers {
+ Zero = 0,
+ #[num_enum(alternatives = [255..=2])]
+ NonZero = 1,
+}
+
+fn main() {
+
+}
diff --git a/rust/vendor/num_enum/tests/try_build/compile_fail/features/complex-expressions/alternate_exprs_range_swapped_bounds.stderr b/rust/vendor/num_enum/tests/try_build/compile_fail/features/complex-expressions/alternate_exprs_range_swapped_bounds.stderr
new file mode 100644
index 0000000..b933eda
--- /dev/null
+++ b/rust/vendor/num_enum/tests/try_build/compile_fail/features/complex-expressions/alternate_exprs_range_swapped_bounds.stderr
@@ -0,0 +1,5 @@
+error: When using ranges for alternate values, upper bound must not be less than lower bound
+ --> tests/try_build/compile_fail/features/complex-expressions/alternate_exprs_range_swapped_bounds.rs:5:32
+ |
+5 | #[num_enum(alternatives = [255..=2])]
+ | ^^^^^^^
diff --git a/rust/vendor/num_enum/tests/try_build/compile_fail/garbage_attribute.rs b/rust/vendor/num_enum/tests/try_build/compile_fail/garbage_attribute.rs
new file mode 100644
index 0000000..62b9e59
--- /dev/null
+++ b/rust/vendor/num_enum/tests/try_build/compile_fail/garbage_attribute.rs
@@ -0,0 +1,12 @@
+#[derive(num_enum::TryFromPrimitive)]
+#[repr(u8)]
+enum Numbers {
+ Zero = 0,
+ #[num_enum(garbage)]
+ One = 1,
+ Two = 2,
+}
+
+fn main() {
+
+}
diff --git a/rust/vendor/num_enum/tests/try_build/compile_fail/garbage_attribute.stderr b/rust/vendor/num_enum/tests/try_build/compile_fail/garbage_attribute.stderr
new file mode 100644
index 0000000..fa0a80f
--- /dev/null
+++ b/rust/vendor/num_enum/tests/try_build/compile_fail/garbage_attribute.stderr
@@ -0,0 +1,5 @@
+error: Invalid attribute: expected one of: `default`, `catch_all`, `alternatives`
+ --> $DIR/garbage_attribute.rs:5:5
+ |
+5 | #[num_enum(garbage)]
+ | ^^^^^^^^^^^^^^^^^^^^
diff --git a/rust/vendor/num_enum/tests/try_build/compile_fail/missing_default.rs b/rust/vendor/num_enum/tests/try_build/compile_fail/missing_default.rs
new file mode 100644
index 0000000..afe21c1
--- /dev/null
+++ b/rust/vendor/num_enum/tests/try_build/compile_fail/missing_default.rs
@@ -0,0 +1,11 @@
+#[derive(num_enum::FromPrimitive)]
+#[repr(u8)]
+enum Numbers {
+ Zero,
+ One,
+ Two,
+}
+
+fn main() {
+
+}
diff --git a/rust/vendor/num_enum/tests/try_build/compile_fail/missing_default.stderr b/rust/vendor/num_enum/tests/try_build/compile_fail/missing_default.stderr
new file mode 100644
index 0000000..12d40e2
--- /dev/null
+++ b/rust/vendor/num_enum/tests/try_build/compile_fail/missing_default.stderr
@@ -0,0 +1,7 @@
+error: #[derive(num_enum::FromPrimitive)] requires enum to be exhaustive, or a variant marked with `#[default]`, `#[num_enum(default)]`, or `#[num_enum(catch_all)`
+ --> $DIR/missing_default.rs:1:10
+ |
+1 | #[derive(num_enum::FromPrimitive)]
+ | ^^^^^^^^^^^^^^^^^^^^^^^
+ |
+ = note: this error originates in the derive macro `num_enum::FromPrimitive` (in Nightly builds, run with -Z macro-backtrace for more info)
diff --git a/rust/vendor/num_enum/tests/try_build/compile_fail/missing_repr.rs b/rust/vendor/num_enum/tests/try_build/compile_fail/missing_repr.rs
new file mode 100644
index 0000000..e48a4ab
--- /dev/null
+++ b/rust/vendor/num_enum/tests/try_build/compile_fail/missing_repr.rs
@@ -0,0 +1,7 @@
+#[derive(num_enum::IntoPrimitive)]
+enum Numbers {
+ Zero,
+ One,
+}
+
+fn main() {}
diff --git a/rust/vendor/num_enum/tests/try_build/compile_fail/missing_repr.stderr b/rust/vendor/num_enum/tests/try_build/compile_fail/missing_repr.stderr
new file mode 100644
index 0000000..6b0c81b
--- /dev/null
+++ b/rust/vendor/num_enum/tests/try_build/compile_fail/missing_repr.stderr
@@ -0,0 +1,7 @@
+error: Missing `#[repr({Integer})]` attribute
+ --> $DIR/missing_repr.rs:1:10
+ |
+1 | #[derive(num_enum::IntoPrimitive)]
+ | ^^^^^^^^^^^^^^^^^^^^^^^
+ |
+ = note: this error originates in the derive macro `num_enum::IntoPrimitive` (in Nightly builds, run with -Z macro-backtrace for more info)
diff --git a/rust/vendor/num_enum/tests/try_build/compile_fail/multiple_catch_all.rs b/rust/vendor/num_enum/tests/try_build/compile_fail/multiple_catch_all.rs
new file mode 100644
index 0000000..40d6cba
--- /dev/null
+++ b/rust/vendor/num_enum/tests/try_build/compile_fail/multiple_catch_all.rs
@@ -0,0 +1,10 @@
+#[derive(Debug, Eq, PartialEq, num_enum::FromPrimitive)]
+#[repr(u8)]
+enum Enum {
+ #[num_enum(catch_all)]
+ Zero(u8),
+ #[num_enum(catch_all)]
+ NonZero(u8),
+}
+
+fn main() {}
diff --git a/rust/vendor/num_enum/tests/try_build/compile_fail/multiple_catch_all.stderr b/rust/vendor/num_enum/tests/try_build/compile_fail/multiple_catch_all.stderr
new file mode 100644
index 0000000..a8da5e5
--- /dev/null
+++ b/rust/vendor/num_enum/tests/try_build/compile_fail/multiple_catch_all.stderr
@@ -0,0 +1,5 @@
+error: Multiple variants marked with `#[num_enum(catch_all)]`
+ --> tests/try_build/compile_fail/multiple_catch_all.rs:6:16
+ |
+6 | #[num_enum(catch_all)]
+ | ^^^^^^^^^
diff --git a/rust/vendor/num_enum/tests/try_build/compile_fail/multiple_catch_all_same_variant.rs b/rust/vendor/num_enum/tests/try_build/compile_fail/multiple_catch_all_same_variant.rs
new file mode 100644
index 0000000..75bbfd7
--- /dev/null
+++ b/rust/vendor/num_enum/tests/try_build/compile_fail/multiple_catch_all_same_variant.rs
@@ -0,0 +1,10 @@
+#[derive(Debug, Eq, PartialEq, num_enum::FromPrimitive)]
+#[repr(u8)]
+enum Enum {
+ Zero = 0,
+ #[num_enum(catch_all)]
+ #[num_enum(catch_all)]
+ NonZero(u8),
+}
+
+fn main() {}
diff --git a/rust/vendor/num_enum/tests/try_build/compile_fail/multiple_catch_all_same_variant.stderr b/rust/vendor/num_enum/tests/try_build/compile_fail/multiple_catch_all_same_variant.stderr
new file mode 100644
index 0000000..7ddf7e6
--- /dev/null
+++ b/rust/vendor/num_enum/tests/try_build/compile_fail/multiple_catch_all_same_variant.stderr
@@ -0,0 +1,5 @@
+error: Multiple variants marked with `#[num_enum(catch_all)]`
+ --> tests/try_build/compile_fail/multiple_catch_all_same_variant.rs:6:16
+ |
+6 | #[num_enum(catch_all)]
+ | ^^^^^^^^^
diff --git a/rust/vendor/num_enum/tests/try_build/compile_fail/multiple_defaults.rs b/rust/vendor/num_enum/tests/try_build/compile_fail/multiple_defaults.rs
new file mode 100644
index 0000000..9f942d4
--- /dev/null
+++ b/rust/vendor/num_enum/tests/try_build/compile_fail/multiple_defaults.rs
@@ -0,0 +1,13 @@
+#[derive(num_enum::FromPrimitive, num_enum::TryFromPrimitive)]
+#[repr(u8)]
+enum Numbers {
+ Zero,
+ #[default]
+ One,
+ #[default]
+ Two,
+}
+
+fn main() {
+
+}
diff --git a/rust/vendor/num_enum/tests/try_build/compile_fail/multiple_defaults.stderr b/rust/vendor/num_enum/tests/try_build/compile_fail/multiple_defaults.stderr
new file mode 100644
index 0000000..68e6add
--- /dev/null
+++ b/rust/vendor/num_enum/tests/try_build/compile_fail/multiple_defaults.stderr
@@ -0,0 +1,5 @@
+error: Multiple variants marked `#[default]` or `#[num_enum(default)]` found
+ --> $DIR/multiple_defaults.rs:7:5
+ |
+7 | #[default]
+ | ^^^^^^^^^^
diff --git a/rust/vendor/num_enum/tests/try_build/compile_fail/multiple_defaults_different_kinds.rs b/rust/vendor/num_enum/tests/try_build/compile_fail/multiple_defaults_different_kinds.rs
new file mode 100644
index 0000000..d1ecef2
--- /dev/null
+++ b/rust/vendor/num_enum/tests/try_build/compile_fail/multiple_defaults_different_kinds.rs
@@ -0,0 +1,13 @@
+#[derive(num_enum::FromPrimitive, num_enum::TryFromPrimitive)]
+#[repr(u8)]
+enum Numbers {
+ Zero,
+ #[default]
+ One,
+ #[num_enum(default)]
+ Two,
+}
+
+fn main() {
+
+}
diff --git a/rust/vendor/num_enum/tests/try_build/compile_fail/multiple_defaults_different_kinds.stderr b/rust/vendor/num_enum/tests/try_build/compile_fail/multiple_defaults_different_kinds.stderr
new file mode 100644
index 0000000..4e209ae
--- /dev/null
+++ b/rust/vendor/num_enum/tests/try_build/compile_fail/multiple_defaults_different_kinds.stderr
@@ -0,0 +1,5 @@
+error: Multiple variants marked `#[default]` or `#[num_enum(default)]` found
+ --> $DIR/multiple_defaults_different_kinds.rs:7:16
+ |
+7 | #[num_enum(default)]
+ | ^^^^^^^
diff --git a/rust/vendor/num_enum/tests/try_build/compile_fail/multiple_num_enum_defaults.rs b/rust/vendor/num_enum/tests/try_build/compile_fail/multiple_num_enum_defaults.rs
new file mode 100644
index 0000000..f8a24e6
--- /dev/null
+++ b/rust/vendor/num_enum/tests/try_build/compile_fail/multiple_num_enum_defaults.rs
@@ -0,0 +1,13 @@
+#[derive(num_enum::FromPrimitive, num_enum::TryFromPrimitive)]
+#[repr(u8)]
+enum Numbers {
+ Zero,
+ #[num_enum(default)]
+ One,
+ #[num_enum(default)]
+ Two,
+}
+
+fn main() {
+
+}
diff --git a/rust/vendor/num_enum/tests/try_build/compile_fail/multiple_num_enum_defaults.stderr b/rust/vendor/num_enum/tests/try_build/compile_fail/multiple_num_enum_defaults.stderr
new file mode 100644
index 0000000..f481ee2
--- /dev/null
+++ b/rust/vendor/num_enum/tests/try_build/compile_fail/multiple_num_enum_defaults.stderr
@@ -0,0 +1,5 @@
+error: Multiple variants marked `#[default]` or `#[num_enum(default)]` found
+ --> $DIR/multiple_num_enum_defaults.rs:7:16
+ |
+7 | #[num_enum(default)]
+ | ^^^^^^^
diff --git a/rust/vendor/num_enum/tests/try_build/compile_fail/repr_c.rs b/rust/vendor/num_enum/tests/try_build/compile_fail/repr_c.rs
new file mode 100644
index 0000000..70deb70
--- /dev/null
+++ b/rust/vendor/num_enum/tests/try_build/compile_fail/repr_c.rs
@@ -0,0 +1,8 @@
+#[derive(num_enum::IntoPrimitive)]
+#[repr(C)]
+enum Numbers {
+ Zero,
+ One,
+}
+
+fn main() {}
diff --git a/rust/vendor/num_enum/tests/try_build/compile_fail/repr_c.stderr b/rust/vendor/num_enum/tests/try_build/compile_fail/repr_c.stderr
new file mode 100644
index 0000000..fb29b12
--- /dev/null
+++ b/rust/vendor/num_enum/tests/try_build/compile_fail/repr_c.stderr
@@ -0,0 +1,5 @@
+error: repr(C) doesn't have a well defined size
+ --> $DIR/repr_c.rs:2:8
+ |
+2 | #[repr(C)]
+ | ^
diff --git a/rust/vendor/num_enum/tests/try_build/compile_fail/unpexpected_alternatives.rs b/rust/vendor/num_enum/tests/try_build/compile_fail/unpexpected_alternatives.rs
new file mode 100644
index 0000000..4dbdabb
--- /dev/null
+++ b/rust/vendor/num_enum/tests/try_build/compile_fail/unpexpected_alternatives.rs
@@ -0,0 +1,11 @@
+#[derive(num_enum::UnsafeFromPrimitive)]
+#[repr(u8)]
+enum Numbers {
+ Zero,
+ #[num_enum(alternatives = [2])]
+ One,
+}
+
+fn main() {
+
+}
diff --git a/rust/vendor/num_enum/tests/try_build/compile_fail/unpexpected_alternatives.stderr b/rust/vendor/num_enum/tests/try_build/compile_fail/unpexpected_alternatives.stderr
new file mode 100644
index 0000000..7aac45c
--- /dev/null
+++ b/rust/vendor/num_enum/tests/try_build/compile_fail/unpexpected_alternatives.stderr
@@ -0,0 +1,5 @@
+error: #[derive(UnsafeFromPrimitive)] does not support `#[num_enum(alternatives = [..])]`
+ --> $DIR/unpexpected_alternatives.rs:5:16
+ |
+5 | #[num_enum(alternatives = [2])]
+ | ^^^^^^^^^^^^
diff --git a/rust/vendor/num_enum/tests/try_build/compile_fail/unpexpected_default.rs b/rust/vendor/num_enum/tests/try_build/compile_fail/unpexpected_default.rs
new file mode 100644
index 0000000..ffb1a0e
--- /dev/null
+++ b/rust/vendor/num_enum/tests/try_build/compile_fail/unpexpected_default.rs
@@ -0,0 +1,11 @@
+#[derive(num_enum::UnsafeFromPrimitive)]
+#[repr(u8)]
+enum Numbers {
+ Zero,
+ #[num_enum(default)]
+ NoneZero,
+}
+
+fn main() {
+
+}
diff --git a/rust/vendor/num_enum/tests/try_build/compile_fail/unpexpected_default.stderr b/rust/vendor/num_enum/tests/try_build/compile_fail/unpexpected_default.stderr
new file mode 100644
index 0000000..c4127cb
--- /dev/null
+++ b/rust/vendor/num_enum/tests/try_build/compile_fail/unpexpected_default.stderr
@@ -0,0 +1,5 @@
+error: #[derive(UnsafeFromPrimitive)] does not support `#[num_enum(default)]`
+ --> $DIR/unpexpected_default.rs:5:16
+ |
+5 | #[num_enum(default)]
+ | ^^^^^^^
diff --git a/rust/vendor/num_enum/tests/try_build/compile_fail/variants_with_fields.rs b/rust/vendor/num_enum/tests/try_build/compile_fail/variants_with_fields.rs
new file mode 100644
index 0000000..6dd548e
--- /dev/null
+++ b/rust/vendor/num_enum/tests/try_build/compile_fail/variants_with_fields.rs
@@ -0,0 +1,22 @@
+use num_enum::{FromPrimitive, IntoPrimitive, TryFromPrimitive};
+
+#[derive(Debug, Eq, PartialEq, TryFromPrimitive)]
+#[repr(u8)]
+enum Number {
+ Zero,
+ NonZero(u8),
+}
+
+#[derive(Debug, Eq, PartialEq, FromPrimitive)]
+#[repr(u8)]
+enum Colour {
+ Red { intensity: u8 },
+}
+
+#[derive(Debug, Eq, PartialEq, IntoPrimitive)]
+#[repr(u8)]
+enum Meaningless {
+ Beep(),
+}
+
+fn main() {}
diff --git a/rust/vendor/num_enum/tests/try_build/compile_fail/variants_with_fields.stderr b/rust/vendor/num_enum/tests/try_build/compile_fail/variants_with_fields.stderr
new file mode 100644
index 0000000..093cc90
--- /dev/null
+++ b/rust/vendor/num_enum/tests/try_build/compile_fail/variants_with_fields.stderr
@@ -0,0 +1,17 @@
+error: `num_enum` only supports unit variants (with no associated data), but `Number::NonZero` was not a unit variant.
+ --> $DIR/variants_with_fields.rs:7:5
+ |
+7 | NonZero(u8),
+ | ^^^^^^^^^^^
+
+error: `num_enum` only supports unit variants (with no associated data), but `Colour::Red` was not a unit variant.
+ --> $DIR/variants_with_fields.rs:13:5
+ |
+13 | Red { intensity: u8 },
+ | ^^^^^^^^^^^^^^^^^^^^^
+
+error: `num_enum` only supports unit variants (with no associated data), but `Meaningless::Beep` was not a unit variant.
+ --> $DIR/variants_with_fields.rs:19:5
+ |
+19 | Beep(),
+ | ^^^^^^
diff --git a/rust/vendor/num_enum/tests/try_build/pass/default_and_alternatives.rs b/rust/vendor/num_enum/tests/try_build/pass/default_and_alternatives.rs
new file mode 100644
index 0000000..aec5ed3
--- /dev/null
+++ b/rust/vendor/num_enum/tests/try_build/pass/default_and_alternatives.rs
@@ -0,0 +1,66 @@
+#[derive(num_enum::TryFromPrimitive)]
+#[repr(u8)]
+enum Default {
+ #[num_enum(default)]
+ Foo = 0,
+ Bar = 1,
+}
+
+#[derive(num_enum::TryFromPrimitive)]
+#[repr(u8)]
+enum Alternatives {
+ #[num_enum(alternatives = [])]
+ Foo = 0,
+ #[num_enum(alternatives = [3])]
+ Bar = 1,
+ #[num_enum(alternatives = [4, 5])]
+ Baz = 2,
+ #[num_enum(alternatives = [7])]
+ #[num_enum(alternatives = [8])]
+ Blee = 6,
+}
+
+#[derive(num_enum::TryFromPrimitive)]
+#[repr(u8)]
+enum Both {
+ #[num_enum(default)]
+ Foo = 0,
+ #[num_enum(alternatives = [3])]
+ Bar = 1,
+}
+
+mod mixed {
+ #[derive(num_enum::TryFromPrimitive)]
+ #[repr(u8)]
+ enum AlternativesFollowedByDefaultInSingleAttribute {
+ #[num_enum(alternatives = [1, 2], default)]
+ Foo = 0,
+ }
+
+ #[derive(num_enum::TryFromPrimitive)]
+ #[repr(u8)]
+ enum DefaultFollowedByAlternativesInSingleAttribute {
+ #[num_enum(default, alternatives = [1, 2])]
+ Foo = 0,
+ }
+
+ #[derive(num_enum::TryFromPrimitive)]
+ #[repr(u8)]
+ enum AlternativesFollowedByDefaultInMultipleAttributes {
+ #[num_enum(alternatives = [1, 2])]
+ #[num_enum(default)]
+ Foo = 0,
+ }
+
+ #[derive(num_enum::TryFromPrimitive)]
+ #[repr(u8)]
+ enum DefaultFollowedByAlternativesInMultipleAttributes {
+ #[num_enum(default)]
+ #[num_enum(alternatives = [1, 2])]
+ Foo = 0,
+ }
+}
+
+fn main() {
+
+}
diff --git a/rust/vendor/num_enum/tests/try_build/pass/exhaustive_enum_try_from.rs b/rust/vendor/num_enum/tests/try_build/pass/exhaustive_enum_try_from.rs
new file mode 100644
index 0000000..4e9f892
--- /dev/null
+++ b/rust/vendor/num_enum/tests/try_build/pass/exhaustive_enum_try_from.rs
@@ -0,0 +1,40 @@
+#[derive(num_enum::TryFromPrimitive)]
+#[repr(u8)]
+enum ExhaustiveTryFrom {
+ #[num_enum(alternatives = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15])]
+ A = 0,
+ #[num_enum(alternatives = [17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31])]
+ B = 16,
+ #[num_enum(alternatives = [33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47])]
+ C = 32,
+ #[num_enum(alternatives = [49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63])]
+ D = 48,
+ #[num_enum(alternatives = [65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79])]
+ E = 64,
+ #[num_enum(alternatives = [81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95])]
+ F = 80,
+ #[num_enum(alternatives = [97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111])]
+ G = 96,
+ #[num_enum(alternatives = [113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127])]
+ H = 112,
+ #[num_enum(alternatives = [129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143])]
+ I = 128,
+ #[num_enum(alternatives = [145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159])]
+ J = 144,
+ #[num_enum(alternatives = [161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175])]
+ K = 160,
+ #[num_enum(alternatives = [177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191])]
+ L = 176,
+ #[num_enum(alternatives = [193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207])]
+ M = 192,
+ #[num_enum(alternatives = [209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223])]
+ N = 208,
+ #[num_enum(alternatives = [225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239])]
+ O = 224,
+ #[num_enum(alternatives = [241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255])]
+ P = 240,
+}
+
+fn main() {
+
+}
diff --git a/rust/vendor/num_enum/tests/try_build/pass/exhaustive_enum_via_alternatives.rs b/rust/vendor/num_enum/tests/try_build/pass/exhaustive_enum_via_alternatives.rs
new file mode 100644
index 0000000..c065411
--- /dev/null
+++ b/rust/vendor/num_enum/tests/try_build/pass/exhaustive_enum_via_alternatives.rs
@@ -0,0 +1,40 @@
+#[derive(num_enum::FromPrimitive)]
+#[repr(u8)]
+enum ExhaustiveFrom {
+ #[num_enum(alternatives = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15])]
+ A = 0,
+ #[num_enum(alternatives = [17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31])]
+ B = 16,
+ #[num_enum(alternatives = [33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47])]
+ C = 32,
+ #[num_enum(alternatives = [49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63])]
+ D = 48,
+ #[num_enum(alternatives = [65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79])]
+ E = 64,
+ #[num_enum(alternatives = [81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95])]
+ F = 80,
+ #[num_enum(alternatives = [97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111])]
+ G = 96,
+ #[num_enum(alternatives = [113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127])]
+ H = 112,
+ #[num_enum(alternatives = [129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143])]
+ I = 128,
+ #[num_enum(alternatives = [145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159])]
+ J = 144,
+ #[num_enum(alternatives = [161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175])]
+ K = 160,
+ #[num_enum(alternatives = [177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191])]
+ L = 176,
+ #[num_enum(alternatives = [193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207])]
+ M = 192,
+ #[num_enum(alternatives = [209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223])]
+ N = 208,
+ #[num_enum(alternatives = [225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239])]
+ O = 224,
+ #[num_enum(alternatives = [241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255])]
+ P = 240,
+}
+
+fn main() {
+
+}
diff --git a/rust/vendor/num_enum/tests/try_build/pass/exhaustive_enum_via_default.rs b/rust/vendor/num_enum/tests/try_build/pass/exhaustive_enum_via_default.rs
new file mode 100644
index 0000000..eb3c357
--- /dev/null
+++ b/rust/vendor/num_enum/tests/try_build/pass/exhaustive_enum_via_default.rs
@@ -0,0 +1,39 @@
+#[derive(num_enum::FromPrimitive)]
+#[repr(u8)]
+enum ExhaustiveFrom {
+ #[num_enum(default)]
+ #[num_enum(alternatives = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15])]
+ A = 0,
+ #[num_enum(alternatives = [17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31])]
+ B = 16,
+ #[num_enum(alternatives = [33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47])]
+ C = 32,
+ #[num_enum(alternatives = [49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63])]
+ D = 48,
+ #[num_enum(alternatives = [65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79])]
+ E = 64,
+ #[num_enum(alternatives = [81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95])]
+ F = 80,
+ #[num_enum(alternatives = [97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111])]
+ G = 96,
+ #[num_enum(alternatives = [113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127])]
+ H = 112,
+ #[num_enum(alternatives = [129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143])]
+ I = 128,
+ #[num_enum(alternatives = [145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159])]
+ J = 144,
+ #[num_enum(alternatives = [161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175])]
+ K = 160,
+ #[num_enum(alternatives = [177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191])]
+ L = 176,
+ #[num_enum(alternatives = [193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207])]
+ M = 192,
+ #[num_enum(alternatives = [209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223])]
+ N = 208,
+ #[num_enum(alternatives = [225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239])]
+ O = 224,
+}
+
+fn main() {
+
+}
diff --git a/rust/vendor/num_enum/tests/try_build/pass/features/complex-expressions/alternate_exprs_exhaustive_with_range.rs b/rust/vendor/num_enum/tests/try_build/pass/features/complex-expressions/alternate_exprs_exhaustive_with_range.rs
new file mode 100644
index 0000000..9de1028
--- /dev/null
+++ b/rust/vendor/num_enum/tests/try_build/pass/features/complex-expressions/alternate_exprs_exhaustive_with_range.rs
@@ -0,0 +1,11 @@
+#[derive(num_enum::FromPrimitive)]
+#[repr(u8)]
+enum Numbers {
+ Zero = 0,
+ #[num_enum(alternatives = [2..=255])]
+ NonZero = 1,
+}
+
+fn main() {
+
+}
diff --git a/rust/vendor/num_enum/tests/try_build/pass/features/complex-expressions/alternate_exprs_non_exhaustive_with_range.rs b/rust/vendor/num_enum/tests/try_build/pass/features/complex-expressions/alternate_exprs_non_exhaustive_with_range.rs
new file mode 100644
index 0000000..0c7dd52
--- /dev/null
+++ b/rust/vendor/num_enum/tests/try_build/pass/features/complex-expressions/alternate_exprs_non_exhaustive_with_range.rs
@@ -0,0 +1,11 @@
+#[derive(num_enum::TryFromPrimitive)]
+#[repr(u8)]
+enum Numbers {
+ Zero = 0,
+ #[num_enum(alternatives = [2..255])]
+ NonZero = 1,
+}
+
+fn main() {
+
+}
diff --git a/rust/vendor/num_enum/tests/try_from_primitive.rs b/rust/vendor/num_enum/tests/try_from_primitive.rs
new file mode 100644
index 0000000..e6d19f6
--- /dev/null
+++ b/rust/vendor/num_enum/tests/try_from_primitive.rs
@@ -0,0 +1,504 @@
+use ::std::convert::{TryFrom, TryInto};
+
+use ::num_enum::TryFromPrimitive;
+
+// Guard against https://github.com/illicitonion/num_enum/issues/27
+mod alloc {}
+mod core {}
+mod num_enum {}
+mod std {}
+
+#[test]
+fn simple() {
+ #[derive(Debug, Eq, PartialEq, TryFromPrimitive)]
+ #[repr(u8)]
+ enum Enum {
+ Zero,
+ One,
+ Two,
+ }
+
+ let zero: Result<Enum, _> = 0u8.try_into();
+ assert_eq!(zero, Ok(Enum::Zero));
+
+ let three: Result<Enum, _> = 3u8.try_into();
+ assert_eq!(
+ three.unwrap_err().to_string(),
+ "No discriminant in enum `Enum` matches the value `3`"
+ );
+}
+
+#[test]
+fn even() {
+ #[derive(Debug, Eq, PartialEq, TryFromPrimitive)]
+ #[repr(u8)]
+ enum Enum {
+ Zero = 0,
+ Two = 2,
+ Four = 4,
+ }
+
+ let zero: Result<Enum, _> = 0u8.try_into();
+ assert_eq!(zero, Ok(Enum::Zero));
+
+ let one: Result<Enum, _> = 1u8.try_into();
+ assert_eq!(
+ one.unwrap_err().to_string(),
+ "No discriminant in enum `Enum` matches the value `1`"
+ );
+
+ let two: Result<Enum, _> = 2u8.try_into();
+ assert_eq!(two, Ok(Enum::Two));
+
+ let three: Result<Enum, _> = 3u8.try_into();
+ assert_eq!(
+ three.unwrap_err().to_string(),
+ "No discriminant in enum `Enum` matches the value `3`"
+ );
+
+ let four: Result<Enum, _> = 4u8.try_into();
+ assert_eq!(four, Ok(Enum::Four));
+}
+
+#[test]
+fn skipped_value() {
+ #[derive(Debug, Eq, PartialEq, TryFromPrimitive)]
+ #[repr(u8)]
+ enum Enum {
+ Zero,
+ One,
+ Three = 3,
+ Four,
+ }
+
+ let zero: Result<Enum, _> = 0u8.try_into();
+ assert_eq!(zero, Ok(Enum::Zero));
+
+ let one: Result<Enum, _> = 1u8.try_into();
+ assert_eq!(one, Ok(Enum::One));
+
+ let two: Result<Enum, _> = 2u8.try_into();
+ assert_eq!(
+ two.unwrap_err().to_string(),
+ "No discriminant in enum `Enum` matches the value `2`"
+ );
+
+ let three: Result<Enum, _> = 3u8.try_into();
+ assert_eq!(three, Ok(Enum::Three));
+
+ let four: Result<Enum, _> = 4u8.try_into();
+ assert_eq!(four, Ok(Enum::Four));
+}
+
+#[test]
+fn wrong_order() {
+ #[derive(Debug, Eq, PartialEq, TryFromPrimitive)]
+ #[repr(u8)]
+ enum Enum {
+ Four = 4,
+ Three = 3,
+ Zero = 0,
+ One, // Zero + 1
+ }
+
+ let zero: Result<Enum, _> = 0u8.try_into();
+ assert_eq!(zero, Ok(Enum::Zero));
+
+ let one: Result<Enum, _> = 1u8.try_into();
+ assert_eq!(one, Ok(Enum::One));
+
+ let two: Result<Enum, _> = 2u8.try_into();
+ assert_eq!(
+ two.unwrap_err().to_string(),
+ "No discriminant in enum `Enum` matches the value `2`"
+ );
+
+ let three: Result<Enum, _> = 3u8.try_into();
+ assert_eq!(three, Ok(Enum::Three));
+
+ let four: Result<Enum, _> = 4u8.try_into();
+ assert_eq!(four, Ok(Enum::Four));
+}
+
+#[test]
+fn negative_values() {
+ #[derive(Debug, Eq, PartialEq, TryFromPrimitive)]
+ #[repr(i8)]
+ enum Enum {
+ MinusTwo = -2,
+ MinusOne = -1,
+ Zero = 0,
+ One = 1,
+ Two = 2,
+ }
+
+ let minus_two: Result<Enum, _> = (-2i8).try_into();
+ assert_eq!(minus_two, Ok(Enum::MinusTwo));
+
+ let minus_one: Result<Enum, _> = (-1i8).try_into();
+ assert_eq!(minus_one, Ok(Enum::MinusOne));
+
+ let zero: Result<Enum, _> = 0i8.try_into();
+ assert_eq!(zero, Ok(Enum::Zero));
+
+ let one: Result<Enum, _> = 1i8.try_into();
+ assert_eq!(one, Ok(Enum::One));
+
+ let two: Result<Enum, _> = 2i8.try_into();
+ assert_eq!(two, Ok(Enum::Two));
+}
+
+#[test]
+fn discriminant_expressions() {
+ const ONE: u8 = 1;
+
+ #[derive(Debug, Eq, PartialEq, TryFromPrimitive)]
+ #[repr(u8)]
+ enum Enum {
+ Zero,
+ One = ONE,
+ Two,
+ Four = 4u8,
+ Five,
+ Six = ONE + ONE + 2u8 + 2,
+ }
+
+ let zero: Result<Enum, _> = 0u8.try_into();
+ assert_eq!(zero, Ok(Enum::Zero));
+
+ let one: Result<Enum, _> = 1u8.try_into();
+ assert_eq!(one, Ok(Enum::One));
+
+ let two: Result<Enum, _> = 2u8.try_into();
+ assert_eq!(two, Ok(Enum::Two));
+
+ let three: Result<Enum, _> = 3u8.try_into();
+ assert_eq!(
+ three.unwrap_err().to_string(),
+ "No discriminant in enum `Enum` matches the value `3`",
+ );
+
+ let four: Result<Enum, _> = 4u8.try_into();
+ assert_eq!(four, Ok(Enum::Four));
+
+ let five: Result<Enum, _> = 5u8.try_into();
+ assert_eq!(five, Ok(Enum::Five));
+
+ let six: Result<Enum, _> = 6u8.try_into();
+ assert_eq!(six, Ok(Enum::Six));
+}
+
+#[cfg(feature = "complex-expressions")]
+mod complex {
+ use num_enum::TryFromPrimitive;
+ use std::convert::TryInto;
+
+ const ONE: u8 = 1;
+
+ #[derive(Debug, Eq, PartialEq, TryFromPrimitive)]
+ #[repr(u8)]
+ enum Enum {
+ Zero,
+ One = ONE,
+ Two,
+ Four = 4u8,
+ Five,
+ Six = ONE + ONE + 2u8 + 2,
+ Seven = (7, 2).0,
+ }
+
+ #[test]
+ fn different_values() {
+ let zero: Result<Enum, _> = 0u8.try_into();
+ assert_eq!(zero, Ok(Enum::Zero));
+
+ let one: Result<Enum, _> = 1u8.try_into();
+ assert_eq!(one, Ok(Enum::One));
+
+ let two: Result<Enum, _> = 2u8.try_into();
+ assert_eq!(two, Ok(Enum::Two));
+
+ let three: Result<Enum, _> = 3u8.try_into();
+ assert_eq!(
+ three.unwrap_err().to_string(),
+ "No discriminant in enum `Enum` matches the value `3`",
+ );
+
+ let four: Result<Enum, _> = 4u8.try_into();
+ assert_eq!(four, Ok(Enum::Four));
+
+ let five: Result<Enum, _> = 5u8.try_into();
+ assert_eq!(five, Ok(Enum::Five));
+
+ let six: Result<Enum, _> = 6u8.try_into();
+ assert_eq!(six, Ok(Enum::Six));
+
+ let seven: Result<Enum, _> = 7u8.try_into();
+ assert_eq!(seven, Ok(Enum::Seven));
+ }
+
+ #[derive(Debug, Eq, PartialEq, TryFromPrimitive)]
+ #[repr(u8)]
+ enum EnumWithExclusiveRange {
+ Zero = 0,
+ #[num_enum(alternatives = [2..4])]
+ OneOrTwoOrThree,
+ }
+
+ #[test]
+ fn different_values_with_exclusive_range() {
+ let zero: Result<EnumWithExclusiveRange, _> = 0u8.try_into();
+ assert_eq!(zero, Ok(EnumWithExclusiveRange::Zero));
+
+ let one: Result<EnumWithExclusiveRange, _> = 1u8.try_into();
+ assert_eq!(one, Ok(EnumWithExclusiveRange::OneOrTwoOrThree));
+
+ let two: Result<EnumWithExclusiveRange, _> = 2u8.try_into();
+ assert_eq!(two, Ok(EnumWithExclusiveRange::OneOrTwoOrThree));
+
+ let three: Result<EnumWithExclusiveRange, _> = 3u8.try_into();
+ assert_eq!(three, Ok(EnumWithExclusiveRange::OneOrTwoOrThree));
+
+ let four: Result<EnumWithExclusiveRange, _> = 4u8.try_into();
+ assert_eq!(
+ four.unwrap_err().to_string(),
+ "No discriminant in enum `EnumWithExclusiveRange` matches the value `4`",
+ );
+ }
+
+ #[derive(Debug, Eq, PartialEq, TryFromPrimitive)]
+ #[repr(u8)]
+ enum EnumWithInclusiveRange {
+ Zero = 0,
+ #[num_enum(alternatives = [2..=3])]
+ OneOrTwoOrThree,
+ }
+
+ #[test]
+ fn different_values_with_inclusive_range() {
+ let zero: Result<EnumWithInclusiveRange, _> = 0u8.try_into();
+ assert_eq!(zero, Ok(EnumWithInclusiveRange::Zero));
+
+ let one: Result<EnumWithInclusiveRange, _> = 1u8.try_into();
+ assert_eq!(one, Ok(EnumWithInclusiveRange::OneOrTwoOrThree));
+
+ let two: Result<EnumWithInclusiveRange, _> = 2u8.try_into();
+ assert_eq!(two, Ok(EnumWithInclusiveRange::OneOrTwoOrThree));
+
+ let three: Result<EnumWithInclusiveRange, _> = 3u8.try_into();
+ assert_eq!(three, Ok(EnumWithInclusiveRange::OneOrTwoOrThree));
+
+ let four: Result<EnumWithInclusiveRange, _> = 4u8.try_into();
+ assert_eq!(
+ four.unwrap_err().to_string(),
+ "No discriminant in enum `EnumWithInclusiveRange` matches the value `4`",
+ );
+ }
+}
+
+#[test]
+fn missing_trailing_comma() {
+ #[rustfmt::skip]
+#[derive(Debug, Eq, PartialEq, TryFromPrimitive)]
+#[repr(u8)]
+enum Enum {
+ Zero,
+ One
+}
+
+ let zero: Result<Enum, _> = 0u8.try_into();
+ assert_eq!(zero, Ok(Enum::Zero));
+
+ let one: Result<Enum, _> = 1u8.try_into();
+ assert_eq!(one, Ok(Enum::One));
+
+ let two: Result<Enum, _> = 2u8.try_into();
+ assert_eq!(
+ two.unwrap_err().to_string(),
+ "No discriminant in enum `Enum` matches the value `2`"
+ );
+}
+
+#[test]
+fn ignores_extra_attributes() {
+ #[derive(Debug, Eq, PartialEq, TryFromPrimitive)]
+ #[allow(unused)]
+ #[repr(u8)]
+ enum Enum {
+ Zero,
+ #[allow(unused)]
+ One,
+ }
+
+ let zero: Result<Enum, _> = 0u8.try_into();
+ assert_eq!(zero, Ok(Enum::Zero));
+
+ let one: Result<Enum, _> = 1u8.try_into();
+ assert_eq!(one, Ok(Enum::One));
+
+ let two: Result<Enum, _> = 2u8.try_into();
+ assert_eq!(
+ two.unwrap_err().to_string(),
+ "No discriminant in enum `Enum` matches the value `2`"
+ );
+}
+
+#[test]
+fn visibility_is_fine() {
+ #[derive(Debug, Eq, PartialEq, TryFromPrimitive)]
+ #[repr(u8)]
+ pub(crate) enum Enum {
+ Zero,
+ One,
+ }
+
+ let zero: Result<Enum, _> = 0u8.try_into();
+ assert_eq!(zero, Ok(Enum::Zero));
+
+ let one: Result<Enum, _> = 1u8.try_into();
+ assert_eq!(one, Ok(Enum::One));
+
+ let two: Result<Enum, _> = 2u8.try_into();
+ assert_eq!(
+ two.unwrap_err().to_string(),
+ "No discriminant in enum `Enum` matches the value `2`"
+ );
+}
+
+#[test]
+fn error_variant_is_allowed() {
+ #[derive(Debug, Eq, PartialEq, TryFromPrimitive)]
+ #[repr(u8)]
+ pub enum Enum {
+ Ok,
+ Error,
+ }
+
+ let ok: Result<Enum, _> = 0u8.try_into();
+ assert_eq!(ok, Ok(Enum::Ok));
+
+ let err: Result<Enum, _> = 1u8.try_into();
+ assert_eq!(err, Ok(Enum::Error));
+
+ let unknown: Result<Enum, _> = 2u8.try_into();
+ assert_eq!(
+ unknown.unwrap_err().to_string(),
+ "No discriminant in enum `Enum` matches the value `2`"
+ );
+}
+
+#[test]
+fn alternative_values() {
+ #[derive(Debug, Eq, PartialEq, TryFromPrimitive)]
+ #[repr(i8)]
+ enum Enum {
+ Zero = 0,
+ #[num_enum(alternatives = [-1, 2, 3])]
+ OneTwoThreeOrMinusOne = 1,
+ }
+
+ let minus_one: Result<Enum, _> = (-1i8).try_into();
+ assert_eq!(minus_one, Ok(Enum::OneTwoThreeOrMinusOne));
+
+ let zero: Result<Enum, _> = 0i8.try_into();
+ assert_eq!(zero, Ok(Enum::Zero));
+
+ let one: Result<Enum, _> = 1i8.try_into();
+ assert_eq!(one, Ok(Enum::OneTwoThreeOrMinusOne));
+
+ let two: Result<Enum, _> = 2i8.try_into();
+ assert_eq!(two, Ok(Enum::OneTwoThreeOrMinusOne));
+
+ let three: Result<Enum, _> = 3i8.try_into();
+ assert_eq!(three, Ok(Enum::OneTwoThreeOrMinusOne));
+
+ let four: Result<Enum, _> = 4i8.try_into();
+ assert_eq!(
+ four.unwrap_err().to_string(),
+ "No discriminant in enum `Enum` matches the value `4`"
+ );
+}
+
+#[test]
+fn default_value() {
+ #[derive(Debug, Eq, PartialEq, TryFromPrimitive)]
+ #[repr(u8)]
+ enum Enum {
+ Zero = 0,
+ One = 1,
+ #[num_enum(default)]
+ Other = 2,
+ }
+
+ let zero: Result<Enum, _> = 0u8.try_into();
+ assert_eq!(zero, Ok(Enum::Zero));
+
+ let one: Result<Enum, _> = 1u8.try_into();
+ assert_eq!(one, Ok(Enum::One));
+
+ let two: Result<Enum, _> = 2u8.try_into();
+ assert_eq!(two, Ok(Enum::Other));
+
+ let max_value: Result<Enum, _> = u8::max_value().try_into();
+ assert_eq!(max_value, Ok(Enum::Other));
+}
+
+#[test]
+fn alternative_values_and_default_value() {
+ #[derive(Debug, Eq, PartialEq, TryFromPrimitive)]
+ #[repr(u8)]
+ enum Enum {
+ #[num_enum(default)]
+ Zero = 0,
+ One = 1,
+ #[num_enum(alternatives = [3])]
+ TwoOrThree = 2,
+ Four = 4,
+ }
+
+ let zero: Result<Enum, _> = 0u8.try_into();
+ assert_eq!(zero, Ok(Enum::Zero));
+
+ let one: Result<Enum, _> = 1u8.try_into();
+ assert_eq!(one, Ok(Enum::One));
+
+ let two: Result<Enum, _> = 2u8.try_into();
+ assert_eq!(two, Ok(Enum::TwoOrThree));
+
+ let three: Result<Enum, _> = 3u8.try_into();
+ assert_eq!(three, Ok(Enum::TwoOrThree));
+
+ let four: Result<Enum, _> = 4u8.try_into();
+ assert_eq!(four, Ok(Enum::Four));
+
+ let five: Result<Enum, _> = 5u8.try_into();
+ assert_eq!(five, Ok(Enum::Zero));
+}
+
+#[test]
+fn try_from_primitive_number() {
+ #[derive(Debug, Eq, PartialEq, TryFromPrimitive)]
+ #[repr(u8)]
+ enum Enum {
+ #[num_enum(default)]
+ Whatever = 0,
+ }
+
+ // #[derive(FromPrimitive)] generates implementations for the following traits:
+ //
+ // - `TryFromPrimitive<T>`
+ // - `TryFrom<T>`
+
+ let try_from_primitive = Enum::try_from_primitive(0_u8);
+ assert_eq!(try_from_primitive, Ok(Enum::Whatever));
+
+ let try_from = Enum::try_from(0_u8);
+ assert_eq!(try_from, Ok(Enum::Whatever));
+}
+
+// #[derive(FromPrimitive)] generates implementations for the following traits:
+//
+// - `FromPrimitive<T>`
+// - `From<T>`
+// - `TryFromPrimitive<T>`
+// - `TryFrom<T>`
diff --git a/rust/vendor/num_enum/tests/unsafe_from_primitive.rs b/rust/vendor/num_enum/tests/unsafe_from_primitive.rs
new file mode 100644
index 0000000..79fb582
--- /dev/null
+++ b/rust/vendor/num_enum/tests/unsafe_from_primitive.rs
@@ -0,0 +1,22 @@
+use ::num_enum::UnsafeFromPrimitive;
+
+// Guard against https://github.com/illicitonion/num_enum/issues/27
+mod alloc {}
+mod core {}
+mod num_enum {}
+mod std {}
+
+#[test]
+fn has_unsafe_from_primitive_number() {
+ #[derive(Debug, Eq, PartialEq, UnsafeFromPrimitive)]
+ #[repr(u8)]
+ enum Enum {
+ Zero,
+ One,
+ }
+
+ unsafe {
+ assert_eq!(Enum::from_unchecked(0_u8), Enum::Zero);
+ assert_eq!(Enum::from_unchecked(1_u8), Enum::One);
+ }
+}
diff --git a/rust/vendor/num_enum_derive/.cargo-checksum.json b/rust/vendor/num_enum_derive/.cargo-checksum.json
new file mode 100644
index 0000000..4cd36f9
--- /dev/null
+++ b/rust/vendor/num_enum_derive/.cargo-checksum.json
@@ -0,0 +1 @@
+{"files":{"Cargo.toml":"91ca56a63860212f0ad546a431076d9650a190a29a6fe1f42dd9624f44ee05e8","LICENSE-APACHE":"62c7a1e35f56406896d7aa7ca52d0cc0d272ac022b5d2796e7d6905db8a3636a","LICENSE-BSD":"0be96d891d00e0ae0df75d7f3289b12871c000a1f5ac744f3b570768d4bb277c","LICENSE-MIT":"23f18e03dc49df91622fe2a76176497404e46ced8a715d9d2b67a7446571cca3","README.md":"808954bb7af3a919b66c792ccef12e31b98070e78e2a83cde665d0e485cb1a11","src/lib.rs":"92a94fee79ca9c1ae4769246cd88a1f4af28d0b731d416b2643059fed00ef6da"},"package":"dcbff9bc912032c62bf65ef1d5aea88983b420f4f839db1e9b0c281a25c9c799"} \ No newline at end of file
diff --git a/rust/vendor/num_enum_derive/Cargo.toml b/rust/vendor/num_enum_derive/Cargo.toml
new file mode 100644
index 0000000..4397b7d
--- /dev/null
+++ b/rust/vendor/num_enum_derive/Cargo.toml
@@ -0,0 +1,52 @@
+# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO
+#
+# When uploading crates to the registry Cargo will automatically
+# "normalize" Cargo.toml files for maximal compatibility
+# with all versions of Cargo and also rewrite `path` dependencies
+# to registry (e.g., crates.io) dependencies.
+#
+# If you are reading this file be aware that the original Cargo.toml
+# will likely look very different (and much more reasonable).
+# See Cargo.toml.orig for the original contents.
+
+[package]
+edition = "2018"
+name = "num_enum_derive"
+version = "0.5.11"
+authors = [
+ "Daniel Wagner-Hall <dawagner@gmail.com>",
+ "Daniel Henry-Mantilla <daniel.henry.mantilla@gmail.com>",
+ "Vincent Esche <regexident@gmail.com>",
+]
+description = "Internal implementation details for ::num_enum (Procedural macros to make inter-operation between primitives and enums easier)"
+readme = "README.md"
+keywords = []
+categories = []
+license = "BSD-3-Clause OR MIT OR Apache-2.0"
+repository = "https://github.com/illicitonion/num_enum"
+
+[package.metadata.docs.rs]
+features = ["external_doc"]
+
+[lib]
+proc-macro = true
+
+[dependencies.proc-macro-crate]
+version = "1"
+optional = true
+
+[dependencies.proc-macro2]
+version = "1"
+
+[dependencies.quote]
+version = "1"
+
+[dependencies.syn]
+version = "1.0.15"
+features = ["parsing"]
+
+[features]
+complex-expressions = ["syn/full"]
+default = ["std"]
+external_doc = []
+std = ["proc-macro-crate"]
diff --git a/rust/vendor/num_enum_derive/LICENSE-APACHE b/rust/vendor/num_enum_derive/LICENSE-APACHE
new file mode 100644
index 0000000..1b5ec8b
--- /dev/null
+++ b/rust/vendor/num_enum_derive/LICENSE-APACHE
@@ -0,0 +1,176 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+END OF TERMS AND CONDITIONS
diff --git a/rust/vendor/num_enum_derive/LICENSE-BSD b/rust/vendor/num_enum_derive/LICENSE-BSD
new file mode 100644
index 0000000..b742e29
--- /dev/null
+++ b/rust/vendor/num_enum_derive/LICENSE-BSD
@@ -0,0 +1,27 @@
+Copyright (c) 2018, Daniel Wagner-Hall
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+* Redistributions of source code must retain the above copyright notice, this
+ list of conditions and the following disclaimer.
+
+* Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimer in the documentation
+ and/or other materials provided with the distribution.
+
+* Neither the name of num_enum nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
+FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/rust/vendor/num_enum_derive/LICENSE-MIT b/rust/vendor/num_enum_derive/LICENSE-MIT
new file mode 100644
index 0000000..31aa793
--- /dev/null
+++ b/rust/vendor/num_enum_derive/LICENSE-MIT
@@ -0,0 +1,23 @@
+Permission is hereby granted, free of charge, to any
+person obtaining a copy of this software and associated
+documentation files (the "Software"), to deal in the
+Software without restriction, including without
+limitation the rights to use, copy, modify, merge,
+publish, distribute, sublicense, and/or sell copies of
+the Software, and to permit persons to whom the Software
+is furnished to do so, subject to the following
+conditions:
+
+The above copyright notice and this permission notice
+shall be included in all copies or substantial portions
+of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
+ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
+TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
+PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
+IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+DEALINGS IN THE SOFTWARE.
diff --git a/rust/vendor/num_enum_derive/README.md b/rust/vendor/num_enum_derive/README.md
new file mode 100644
index 0000000..902cee7
--- /dev/null
+++ b/rust/vendor/num_enum_derive/README.md
@@ -0,0 +1,277 @@
+num_enum
+========
+
+Procedural macros to make inter-operation between primitives and enums easier.
+This crate is no_std compatible.
+
+[![crates.io](https://img.shields.io/crates/v/num_enum.svg)](https://crates.io/crates/num_enum)
+[![Documentation](https://docs.rs/num_enum/badge.svg)](https://docs.rs/num_enum)
+[![Build Status](https://travis-ci.org/illicitonion/num_enum.svg?branch=master)](https://travis-ci.org/illicitonion/num_enum)
+
+Turning an enum into a primitive
+--------------------------------
+
+```rust
+use num_enum::IntoPrimitive;
+
+#[derive(IntoPrimitive)]
+#[repr(u8)]
+enum Number {
+ Zero,
+ One,
+}
+
+fn main() {
+ let zero: u8 = Number::Zero.into();
+ assert_eq!(zero, 0u8);
+}
+```
+
+`num_enum`'s `IntoPrimitive` is more type-safe than using `as`, because `as` will silently truncate - `num_enum` only derives `From` for exactly the discriminant type of the enum.
+
+Attempting to turn a primitive into an enum with try_from
+----------------------------------------------
+
+```rust
+use num_enum::TryFromPrimitive;
+use std::convert::TryFrom;
+
+#[derive(Debug, Eq, PartialEq, TryFromPrimitive)]
+#[repr(u8)]
+enum Number {
+ Zero,
+ One,
+}
+
+fn main() {
+ let zero = Number::try_from(0u8);
+ assert_eq!(zero, Ok(Number::Zero));
+
+ let three = Number::try_from(3u8);
+ assert_eq!(
+ three.unwrap_err().to_string(),
+ "No discriminant in enum `Number` matches the value `3`",
+ );
+}
+```
+
+Variant alternatives
+---------------
+
+Sometimes a single enum variant might be representable by multiple numeric values.
+
+The `#[num_enum(alternatives = [..])]` attribute allows you to define additional value alternatives for individual variants.
+
+(The behavior of `IntoPrimitive` is unaffected by this attribute, it will always return the canonical value.)
+
+```rust
+use num_enum::TryFromPrimitive;
+use std::convert::TryFrom;
+
+#[derive(Debug, Eq, PartialEq, TryFromPrimitive)]
+#[repr(u8)]
+enum Number {
+ Zero = 0,
+ #[num_enum(alternatives = [2])]
+ OneOrTwo = 1,
+}
+
+fn main() {
+ let zero = Number::try_from(0u8);
+ assert_eq!(zero, Ok(Number::Zero));
+
+ let one = Number::try_from(1u8);
+ assert_eq!(one, Ok(Number::OneOrTwo));
+
+ let two = Number::try_from(2u8);
+ assert_eq!(two, Ok(Number::OneOrTwo));
+
+ let three = Number::try_from(3u8);
+ assert_eq!(
+ three.unwrap_err().to_string(),
+ "No discriminant in enum `Number` matches the value `3`",
+ );
+}
+```
+
+Range expressions are also supported for alternatives, but this requires enabling the `complex-expressions` feature:
+
+```rust
+use num_enum::TryFromPrimitive;
+use std::convert::TryFrom;
+
+#[derive(Debug, Eq, PartialEq, TryFromPrimitive)]
+#[repr(u8)]
+enum Number {
+ Zero = 0,
+ #[num_enum(alternatives = [2..16])]
+ Some = 1,
+ #[num_enum(alternatives = [17, 18..=255])]
+ Many = 16,
+}
+
+fn main() {
+ let zero = Number::try_from(0u8);
+ assert_eq!(zero, Ok(Number::Zero));
+
+ let some = Number::try_from(15u8);
+ assert_eq!(some, Ok(Number::Some));
+
+ let many = Number::try_from(255u8);
+ assert_eq!(many, Ok(Number::Many));
+}
+```
+
+Default variant
+---------------
+
+Sometimes it is desirable to have an `Other` variant in an enum that acts as a kind of a wildcard matching all the value not yet covered by other variants.
+
+The `#[num_enum(default)]` attribute allows you to mark variant as the default.
+
+(The behavior of `IntoPrimitive` is unaffected by this attribute, it will always return the canonical value.)
+
+```rust
+use num_enum::TryFromPrimitive;
+use std::convert::TryFrom;
+
+#[derive(Debug, Eq, PartialEq, TryFromPrimitive)]
+#[repr(u8)]
+enum Number {
+ Zero = 0,
+ #[num_enum(default)]
+ NonZero = 1,
+}
+
+fn main() {
+ let zero = Number::try_from(0u8);
+ assert_eq!(zero, Ok(Number::Zero));
+
+ let one = Number::try_from(1u8);
+ assert_eq!(one, Ok(Number::NonZero));
+
+ let two = Number::try_from(2u8);
+ assert_eq!(two, Ok(Number::NonZero));
+}
+```
+
+Safely turning a primitive into an exhaustive enum with from_primitive
+-------------------------------------------------------------
+
+If your enum has all possible primitive values covered, you can derive `FromPrimitive` for it (which auto-implement stdlib's `From`):
+
+You can cover all possible values by:
+* Having variants for every possible value
+* Having a variant marked `#[num_enum(default)]`
+* Having a variant marked `#[num_enum(catch_all)]`
+* Having `#[num_enum(alternatives = [...])`s covering values not covered by a variant.
+
+```rust
+use num_enum::FromPrimitive;
+
+#[derive(Debug, Eq, PartialEq, FromPrimitive)]
+#[repr(u8)]
+enum Number {
+ Zero,
+ #[num_enum(default)]
+ NonZero,
+}
+
+fn main() {
+ assert_eq!(
+ Number::Zero,
+ Number::from(0_u8),
+ );
+ assert_eq!(
+ Number::NonZero,
+ Number::from(1_u8),
+ );
+}
+```
+
+Catch-all variant
+-----------------
+
+Sometimes it is desirable to have an `Other` variant which holds the otherwise un-matched value as a field.
+
+The `#[num_enum(catch_all)]` attribute allows you to mark at most one variant for this purpose. The variant it's applied to must be a tuple variant with exactly one field matching the `repr` type.
+
+```rust
+use num_enum::FromPrimitive;
+use std::convert::TryFrom;
+
+#[derive(Debug, Eq, PartialEq, FromPrimitive)]
+#[repr(u8)]
+enum Number {
+ Zero = 0,
+ #[num_enum(catch_all)]
+ NonZero(u8),
+}
+
+fn main() {
+ let zero = Number::from(0u8);
+ assert_eq!(zero, Number::Zero);
+
+ let one = Number::from(1u8);
+ assert_eq!(one, Number::NonZero(1_u8));
+
+ let two = Number::from(2u8);
+ assert_eq!(two, Number::NonZero(2_u8));
+}
+```
+
+As this is naturally exhaustive, this is only supported for `FromPrimitive`, not also `TryFromPrimitive`.
+
+Unsafely turning a primitive into an enum with from_unchecked
+-------------------------------------------------------------
+
+If you're really certain a conversion will succeed (and have not made use of `#[num_enum(default)]` or `#[num_enum(alternatives = [..])]`
+for any of its variants), and want to avoid a small amount of overhead, you can use unsafe code to do this conversion.
+Unless you have data showing that the match statement generated in the `try_from` above is a bottleneck for you,
+you should avoid doing this, as the unsafe code has potential to cause serious memory issues in your program.
+
+```rust
+use num_enum::UnsafeFromPrimitive;
+
+#[derive(Debug, Eq, PartialEq, UnsafeFromPrimitive)]
+#[repr(u8)]
+enum Number {
+ Zero,
+ One,
+}
+
+fn main() {
+ assert_eq!(
+ unsafe { Number::from_unchecked(0_u8) },
+ Number::Zero,
+ );
+ assert_eq!(
+ unsafe { Number::from_unchecked(1_u8) },
+ Number::One,
+ );
+}
+
+unsafe fn undefined_behavior() {
+ let _ = Number::from_unchecked(2); // 2 is not a valid discriminant!
+}
+```
+
+Optional features
+-----------------
+
+Some enum values may be composed of complex expressions, for example:
+
+```rust
+enum Number {
+ Zero = (0, 1).0,
+ One = (0, 1).1,
+}
+```
+
+To cut down on compile time, these are not supported by default, but if you enable the `complex-expressions`
+feature of your dependency on `num_enum`, these should start working.
+
+License
+-------
+
+num_enum may be used under your choice of the BSD 3-clause, Apache 2, or MIT license.
diff --git a/rust/vendor/num_enum_derive/src/lib.rs b/rust/vendor/num_enum_derive/src/lib.rs
new file mode 100644
index 0000000..600730f
--- /dev/null
+++ b/rust/vendor/num_enum_derive/src/lib.rs
@@ -0,0 +1,1066 @@
+// Not supported by MSRV
+#![allow(clippy::uninlined_format_args)]
+
+extern crate proc_macro;
+
+use proc_macro::TokenStream;
+use proc_macro2::Span;
+use quote::{format_ident, quote};
+use std::collections::BTreeSet;
+use syn::{
+ parse::{Parse, ParseStream},
+ parse_macro_input, parse_quote,
+ spanned::Spanned,
+ Attribute, Data, DeriveInput, Error, Expr, ExprLit, ExprUnary, Fields, Ident, Lit, LitInt,
+ LitStr, Meta, Result, UnOp,
+};
+
+macro_rules! die {
+ ($spanned:expr=>
+ $msg:expr
+ ) => {
+ return Err(Error::new_spanned($spanned, $msg))
+ };
+
+ (
+ $msg:expr
+ ) => {
+ return Err(Error::new(Span::call_site(), $msg))
+ };
+}
+
+fn literal(i: i128) -> Expr {
+ Expr::Lit(ExprLit {
+ lit: Lit::Int(LitInt::new(&i.to_string(), Span::call_site())),
+ attrs: vec![],
+ })
+}
+
+enum DiscriminantValue {
+ Literal(i128),
+ Expr(Expr),
+}
+
+fn parse_discriminant(val_exp: &Expr) -> Result<DiscriminantValue> {
+ let mut sign = 1;
+ let mut unsigned_expr = val_exp;
+ if let Expr::Unary(ExprUnary {
+ op: UnOp::Neg(..),
+ expr,
+ ..
+ }) = val_exp
+ {
+ unsigned_expr = expr;
+ sign = -1;
+ }
+ if let Expr::Lit(ExprLit {
+ lit: Lit::Int(ref lit_int),
+ ..
+ }) = unsigned_expr
+ {
+ Ok(DiscriminantValue::Literal(
+ sign * lit_int.base10_parse::<i128>()?,
+ ))
+ } else {
+ Ok(DiscriminantValue::Expr(val_exp.clone()))
+ }
+}
+
+#[cfg(feature = "complex-expressions")]
+fn parse_alternative_values(val_expr: &Expr) -> Result<Vec<DiscriminantValue>> {
+ fn range_expr_value_to_number(
+ parent_range_expr: &Expr,
+ range_bound_value: &Option<Box<Expr>>,
+ ) -> Result<i128> {
+ // Avoid needing to calculate what the lower and upper bound would be - these are type dependent,
+ // and also may not be obvious in context (e.g. an omitted bound could reasonably mean "from the last discriminant" or "from the lower bound of the type").
+ if let Some(range_bound_value) = range_bound_value {
+ let range_bound_value = parse_discriminant(range_bound_value.as_ref())?;
+ // If non-literals are used, we can't expand to the mapped values, so can't write a nice match statement or do exhaustiveness checking.
+ // Require literals instead.
+ if let DiscriminantValue::Literal(value) = range_bound_value {
+ return Ok(value);
+ }
+ }
+ die!(parent_range_expr => "When ranges are used for alternate values, both bounds most be explicitly specified numeric literals")
+ }
+
+ if let Expr::Range(syn::ExprRange {
+ from, to, limits, ..
+ }) = val_expr
+ {
+ let lower = range_expr_value_to_number(val_expr, from)?;
+ let upper = range_expr_value_to_number(val_expr, to)?;
+ // While this is technically allowed in Rust, and results in an empty range, it's almost certainly a mistake in this context.
+ if lower > upper {
+ die!(val_expr => "When using ranges for alternate values, upper bound must not be less than lower bound");
+ }
+ let mut values = Vec::with_capacity((upper - lower) as usize);
+ let mut next = lower;
+ loop {
+ match limits {
+ syn::RangeLimits::HalfOpen(..) => {
+ if next == upper {
+ break;
+ }
+ }
+ syn::RangeLimits::Closed(..) => {
+ if next > upper {
+ break;
+ }
+ }
+ }
+ values.push(DiscriminantValue::Literal(next));
+ next += 1;
+ }
+ return Ok(values);
+ }
+ parse_discriminant(val_expr).map(|v| vec![v])
+}
+
+#[cfg(not(feature = "complex-expressions"))]
+fn parse_alternative_values(val_expr: &Expr) -> Result<Vec<DiscriminantValue>> {
+ parse_discriminant(val_expr).map(|v| vec![v])
+}
+
+mod kw {
+ syn::custom_keyword!(default);
+ syn::custom_keyword!(catch_all);
+ syn::custom_keyword!(alternatives);
+}
+
+struct NumEnumVariantAttributes {
+ items: syn::punctuated::Punctuated<NumEnumVariantAttributeItem, syn::Token![,]>,
+}
+
+impl Parse for NumEnumVariantAttributes {
+ fn parse(input: ParseStream<'_>) -> Result<Self> {
+ Ok(Self {
+ items: input.parse_terminated(NumEnumVariantAttributeItem::parse)?,
+ })
+ }
+}
+
+enum NumEnumVariantAttributeItem {
+ Default(VariantDefaultAttribute),
+ CatchAll(VariantCatchAllAttribute),
+ Alternatives(VariantAlternativesAttribute),
+}
+
+impl Parse for NumEnumVariantAttributeItem {
+ fn parse(input: ParseStream<'_>) -> Result<Self> {
+ let lookahead = input.lookahead1();
+ if lookahead.peek(kw::default) {
+ input.parse().map(Self::Default)
+ } else if lookahead.peek(kw::catch_all) {
+ input.parse().map(Self::CatchAll)
+ } else if lookahead.peek(kw::alternatives) {
+ input.parse().map(Self::Alternatives)
+ } else {
+ Err(lookahead.error())
+ }
+ }
+}
+
+struct VariantDefaultAttribute {
+ keyword: kw::default,
+}
+
+impl Parse for VariantDefaultAttribute {
+ fn parse(input: ParseStream) -> Result<Self> {
+ Ok(Self {
+ keyword: input.parse()?,
+ })
+ }
+}
+
+impl Spanned for VariantDefaultAttribute {
+ fn span(&self) -> Span {
+ self.keyword.span()
+ }
+}
+
+struct VariantCatchAllAttribute {
+ keyword: kw::catch_all,
+}
+
+impl Parse for VariantCatchAllAttribute {
+ fn parse(input: ParseStream) -> Result<Self> {
+ Ok(Self {
+ keyword: input.parse()?,
+ })
+ }
+}
+
+impl Spanned for VariantCatchAllAttribute {
+ fn span(&self) -> Span {
+ self.keyword.span()
+ }
+}
+
+struct VariantAlternativesAttribute {
+ keyword: kw::alternatives,
+ _eq_token: syn::Token![=],
+ _bracket_token: syn::token::Bracket,
+ expressions: syn::punctuated::Punctuated<Expr, syn::Token![,]>,
+}
+
+impl Parse for VariantAlternativesAttribute {
+ fn parse(input: ParseStream) -> Result<Self> {
+ let content;
+ let keyword = input.parse()?;
+ let _eq_token = input.parse()?;
+ let _bracket_token = syn::bracketed!(content in input);
+ let expressions = content.parse_terminated(Expr::parse)?;
+ Ok(Self {
+ keyword,
+ _eq_token,
+ _bracket_token,
+ expressions,
+ })
+ }
+}
+
+impl Spanned for VariantAlternativesAttribute {
+ fn span(&self) -> Span {
+ self.keyword.span()
+ }
+}
+
+#[derive(::core::default::Default)]
+struct AttributeSpans {
+ default: Vec<Span>,
+ catch_all: Vec<Span>,
+ alternatives: Vec<Span>,
+}
+
+struct VariantInfo {
+ ident: Ident,
+ attr_spans: AttributeSpans,
+ is_default: bool,
+ is_catch_all: bool,
+ canonical_value: Expr,
+ alternative_values: Vec<Expr>,
+}
+
+impl VariantInfo {
+ fn all_values(&self) -> impl Iterator<Item = &Expr> {
+ ::core::iter::once(&self.canonical_value).chain(self.alternative_values.iter())
+ }
+
+ fn is_complex(&self) -> bool {
+ !self.alternative_values.is_empty()
+ }
+}
+
+struct EnumInfo {
+ name: Ident,
+ repr: Ident,
+ variants: Vec<VariantInfo>,
+}
+
+impl EnumInfo {
+ /// Returns whether the number of variants (ignoring defaults, catch-alls, etc) is the same as
+ /// the capacity of the repr.
+ fn is_naturally_exhaustive(&self) -> Result<bool> {
+ let repr_str = self.repr.to_string();
+ if !repr_str.is_empty() {
+ let suffix = repr_str
+ .strip_prefix('i')
+ .or_else(|| repr_str.strip_prefix('u'));
+ if let Some(suffix) = suffix {
+ if let Ok(bits) = suffix.parse::<u32>() {
+ let variants = 1usize.checked_shl(bits);
+ return Ok(variants.map_or(false, |v| {
+ v == self
+ .variants
+ .iter()
+ .map(|v| v.alternative_values.len() + 1)
+ .sum()
+ }));
+ }
+ }
+ }
+ die!(self.repr.clone() => "Failed to parse repr into bit size");
+ }
+
+ fn has_default_variant(&self) -> bool {
+ self.default().is_some()
+ }
+
+ fn has_complex_variant(&self) -> bool {
+ self.variants.iter().any(|info| info.is_complex())
+ }
+
+ fn default(&self) -> Option<&Ident> {
+ self.variants
+ .iter()
+ .find(|info| info.is_default)
+ .map(|info| &info.ident)
+ }
+
+ fn catch_all(&self) -> Option<&Ident> {
+ self.variants
+ .iter()
+ .find(|info| info.is_catch_all)
+ .map(|info| &info.ident)
+ }
+
+ fn first_default_attr_span(&self) -> Option<&Span> {
+ self.variants
+ .iter()
+ .find_map(|info| info.attr_spans.default.first())
+ }
+
+ fn first_alternatives_attr_span(&self) -> Option<&Span> {
+ self.variants
+ .iter()
+ .find_map(|info| info.attr_spans.alternatives.first())
+ }
+
+ fn variant_idents(&self) -> Vec<Ident> {
+ self.variants
+ .iter()
+ .map(|variant| variant.ident.clone())
+ .collect()
+ }
+
+ fn expression_idents(&self) -> Vec<Vec<Ident>> {
+ self.variants
+ .iter()
+ .filter(|variant| !variant.is_catch_all)
+ .map(|info| {
+ let indices = 0..(info.alternative_values.len() + 1);
+ indices
+ .map(|index| format_ident!("{}__num_enum_{}__", info.ident, index))
+ .collect()
+ })
+ .collect()
+ }
+
+ fn variant_expressions(&self) -> Vec<Vec<Expr>> {
+ self.variants
+ .iter()
+ .map(|variant| variant.all_values().cloned().collect())
+ .collect()
+ }
+}
+
+impl Parse for EnumInfo {
+ fn parse(input: ParseStream) -> Result<Self> {
+ Ok({
+ let input: DeriveInput = input.parse()?;
+ let name = input.ident;
+ let data = match input.data {
+ Data::Enum(data) => data,
+ Data::Union(data) => die!(data.union_token => "Expected enum but found union"),
+ Data::Struct(data) => die!(data.struct_token => "Expected enum but found struct"),
+ };
+
+ let repr: Ident = {
+ let mut attrs = input.attrs.into_iter();
+ loop {
+ if let Some(attr) = attrs.next() {
+ if let Ok(Meta::List(meta_list)) = attr.parse_meta() {
+ if let Some(ident) = meta_list.path.get_ident() {
+ if ident == "repr" {
+ let mut nested = meta_list.nested.iter();
+ if nested.len() != 1 {
+ die!(attr =>
+ "Expected exactly one `repr` argument"
+ );
+ }
+ let repr = nested.next().unwrap();
+ let repr: Ident = parse_quote! {
+ #repr
+ };
+ if repr == "C" {
+ die!(repr =>
+ "repr(C) doesn't have a well defined size"
+ );
+ } else {
+ break repr;
+ }
+ }
+ }
+ }
+ } else {
+ die!("Missing `#[repr({Integer})]` attribute");
+ }
+ }
+ };
+
+ let mut variants: Vec<VariantInfo> = vec![];
+ let mut has_default_variant: bool = false;
+ let mut has_catch_all_variant: bool = false;
+
+ // Vec to keep track of the used discriminants and alt values.
+ let mut discriminant_int_val_set = BTreeSet::new();
+
+ let mut next_discriminant = literal(0);
+ for variant in data.variants.into_iter() {
+ let ident = variant.ident.clone();
+
+ let discriminant = match &variant.discriminant {
+ Some(d) => d.1.clone(),
+ None => next_discriminant.clone(),
+ };
+
+ let mut attr_spans: AttributeSpans = Default::default();
+ let mut raw_alternative_values: Vec<Expr> = vec![];
+ // Keep the attribute around for better error reporting.
+ let mut alt_attr_ref: Vec<&Attribute> = vec![];
+
+ // `#[num_enum(default)]` is required by `#[derive(FromPrimitive)]`
+ // and forbidden by `#[derive(UnsafeFromPrimitive)]`, so we need to
+ // keep track of whether we encountered such an attribute:
+ let mut is_default: bool = false;
+ let mut is_catch_all: bool = false;
+
+ for attribute in &variant.attrs {
+ if attribute.path.is_ident("default") {
+ if has_default_variant {
+ die!(attribute =>
+ "Multiple variants marked `#[default]` or `#[num_enum(default)]` found"
+ );
+ } else if has_catch_all_variant {
+ die!(attribute =>
+ "Attribute `default` is mutually exclusive with `catch_all`"
+ );
+ }
+ attr_spans.default.push(attribute.span());
+ is_default = true;
+ has_default_variant = true;
+ }
+
+ if attribute.path.is_ident("num_enum") {
+ match attribute.parse_args_with(NumEnumVariantAttributes::parse) {
+ Ok(variant_attributes) => {
+ for variant_attribute in variant_attributes.items {
+ match variant_attribute {
+ NumEnumVariantAttributeItem::Default(default) => {
+ if has_default_variant {
+ die!(default.keyword =>
+ "Multiple variants marked `#[default]` or `#[num_enum(default)]` found"
+ );
+ } else if has_catch_all_variant {
+ die!(default.keyword =>
+ "Attribute `default` is mutually exclusive with `catch_all`"
+ );
+ }
+ attr_spans.default.push(default.span());
+ is_default = true;
+ has_default_variant = true;
+ }
+ NumEnumVariantAttributeItem::CatchAll(catch_all) => {
+ if has_catch_all_variant {
+ die!(catch_all.keyword =>
+ "Multiple variants marked with `#[num_enum(catch_all)]`"
+ );
+ } else if has_default_variant {
+ die!(catch_all.keyword =>
+ "Attribute `catch_all` is mutually exclusive with `default`"
+ );
+ }
+
+ match variant
+ .fields
+ .iter()
+ .collect::<Vec<_>>()
+ .as_slice()
+ {
+ [syn::Field {
+ ty: syn::Type::Path(syn::TypePath { path, .. }),
+ ..
+ }] if path.is_ident(&repr) => {
+ attr_spans.catch_all.push(catch_all.span());
+ is_catch_all = true;
+ has_catch_all_variant = true;
+ }
+ _ => {
+ die!(catch_all.keyword =>
+ "Variant with `catch_all` must be a tuple with exactly 1 field matching the repr type"
+ );
+ }
+ }
+ }
+ NumEnumVariantAttributeItem::Alternatives(alternatives) => {
+ attr_spans.alternatives.push(alternatives.span());
+ raw_alternative_values.extend(alternatives.expressions);
+ alt_attr_ref.push(attribute);
+ }
+ }
+ }
+ }
+ Err(err) => {
+ if cfg!(not(feature = "complex-expressions")) {
+ let attribute_str = format!("{}", attribute.tokens);
+ if attribute_str.contains("alternatives")
+ && attribute_str.contains("..")
+ {
+ // Give a nice error message suggesting how to fix the problem.
+ die!(attribute => "Ranges are only supported as num_enum alternate values if the `complex-expressions` feature of the crate `num_enum` is enabled".to_string())
+ }
+ }
+ die!(attribute =>
+ format!("Invalid attribute: {}", err)
+ );
+ }
+ }
+ }
+ }
+
+ if !is_catch_all {
+ match &variant.fields {
+ Fields::Named(_) | Fields::Unnamed(_) => {
+ die!(variant => format!("`{}` only supports unit variants (with no associated data), but `{}::{}` was not a unit variant.", get_crate_name(), name, ident));
+ }
+ Fields::Unit => {}
+ }
+ }
+
+ let discriminant_value = parse_discriminant(&discriminant)?;
+
+ // Check for collision.
+ // We can't do const evaluation, or even compare arbitrary Exprs,
+ // so unfortunately we can't check for duplicates.
+ // That's not the end of the world, just we'll end up with compile errors for
+ // matches with duplicate branches in generated code instead of nice friendly error messages.
+ if let DiscriminantValue::Literal(canonical_value_int) = discriminant_value {
+ if discriminant_int_val_set.contains(&canonical_value_int) {
+ die!(ident => format!("The discriminant '{}' collides with a value attributed to a previous variant", canonical_value_int))
+ }
+ }
+
+ // Deal with the alternative values.
+ let mut flattened_alternative_values = Vec::new();
+ let mut flattened_raw_alternative_values = Vec::new();
+ for raw_alternative_value in raw_alternative_values {
+ let expanded_values = parse_alternative_values(&raw_alternative_value)?;
+ for expanded_value in expanded_values {
+ flattened_alternative_values.push(expanded_value);
+ flattened_raw_alternative_values.push(raw_alternative_value.clone())
+ }
+ }
+
+ if !flattened_alternative_values.is_empty() {
+ let alternate_int_values = flattened_alternative_values
+ .into_iter()
+ .map(|v| {
+ match v {
+ DiscriminantValue::Literal(value) => Ok(value),
+ DiscriminantValue::Expr(expr) => {
+ if let Expr::Range(_) = expr {
+ if cfg!(not(feature = "complex-expressions")) {
+ // Give a nice error message suggesting how to fix the problem.
+ die!(expr => "Ranges are only supported as num_enum alternate values if the `complex-expressions` feature of the crate `num_enum` is enabled".to_string())
+ }
+ }
+ // We can't do uniqueness checking on non-literals, so we don't allow them as alternate values.
+ // We could probably allow them, but there doesn't seem to be much of a use-case,
+ // and it's easier to give good error messages about duplicate values this way,
+ // rather than rustc errors on conflicting match branches.
+ die!(expr => "Only literals are allowed as num_enum alternate values".to_string())
+ },
+ }
+ })
+ .collect::<Result<Vec<i128>>>()?;
+ let mut sorted_alternate_int_values = alternate_int_values.clone();
+ sorted_alternate_int_values.sort_unstable();
+ let sorted_alternate_int_values = sorted_alternate_int_values;
+
+ // Check if the current discriminant is not in the alternative values.
+ if let DiscriminantValue::Literal(canonical_value_int) = discriminant_value {
+ if let Some(index) = alternate_int_values
+ .iter()
+ .position(|&x| x == canonical_value_int)
+ {
+ die!(&flattened_raw_alternative_values[index] => format!("'{}' in the alternative values is already attributed as the discriminant of this variant", canonical_value_int));
+ }
+ }
+
+ // Search for duplicates, the vec is sorted. Warn about them.
+ if (1..sorted_alternate_int_values.len()).any(|i| {
+ sorted_alternate_int_values[i] == sorted_alternate_int_values[i - 1]
+ }) {
+ let attr = *alt_attr_ref.last().unwrap();
+ die!(attr => "There is duplication in the alternative values");
+ }
+ // Search if those discriminant_int_val_set where already attributed.
+ // (discriminant_int_val_set is BTreeSet, and iter().next_back() is the is the maximum in the set.)
+ if let Some(last_upper_val) = discriminant_int_val_set.iter().next_back() {
+ if sorted_alternate_int_values.first().unwrap() <= last_upper_val {
+ for (index, val) in alternate_int_values.iter().enumerate() {
+ if discriminant_int_val_set.contains(val) {
+ die!(&flattened_raw_alternative_values[index] => format!("'{}' in the alternative values is already attributed to a previous variant", val));
+ }
+ }
+ }
+ }
+
+ // Reconstruct the alternative_values vec of Expr but sorted.
+ flattened_raw_alternative_values = sorted_alternate_int_values
+ .iter()
+ .map(|val| literal(val.to_owned()))
+ .collect();
+
+ // Add the alternative values to the the set to keep track.
+ discriminant_int_val_set.extend(sorted_alternate_int_values);
+ }
+
+ // Add the current discriminant to the the set to keep track.
+ if let DiscriminantValue::Literal(canonical_value_int) = discriminant_value {
+ discriminant_int_val_set.insert(canonical_value_int);
+ }
+
+ variants.push(VariantInfo {
+ ident,
+ attr_spans,
+ is_default,
+ is_catch_all,
+ canonical_value: discriminant,
+ alternative_values: flattened_raw_alternative_values,
+ });
+
+ // Get the next value for the discriminant.
+ next_discriminant = match discriminant_value {
+ DiscriminantValue::Literal(int_value) => literal(int_value.wrapping_add(1)),
+ DiscriminantValue::Expr(expr) => {
+ parse_quote! {
+ #repr::wrapping_add(#expr, 1)
+ }
+ }
+ }
+ }
+
+ EnumInfo {
+ name,
+ repr,
+ variants,
+ }
+ })
+ }
+}
+
+/// Implements `Into<Primitive>` for a `#[repr(Primitive)] enum`.
+///
+/// (It actually implements `From<Enum> for Primitive`)
+///
+/// ## Allows turning an enum into a primitive.
+///
+/// ```rust
+/// use num_enum::IntoPrimitive;
+///
+/// #[derive(IntoPrimitive)]
+/// #[repr(u8)]
+/// enum Number {
+/// Zero,
+/// One,
+/// }
+///
+/// let zero: u8 = Number::Zero.into();
+/// assert_eq!(zero, 0u8);
+/// ```
+#[proc_macro_derive(IntoPrimitive, attributes(num_enum, catch_all))]
+pub fn derive_into_primitive(input: TokenStream) -> TokenStream {
+ let enum_info = parse_macro_input!(input as EnumInfo);
+ let catch_all = enum_info.catch_all();
+ let name = &enum_info.name;
+ let repr = &enum_info.repr;
+
+ let body = if let Some(catch_all_ident) = catch_all {
+ quote! {
+ match enum_value {
+ #name::#catch_all_ident(raw) => raw,
+ rest => unsafe { *(&rest as *const #name as *const Self) }
+ }
+ }
+ } else {
+ quote! { enum_value as Self }
+ };
+
+ TokenStream::from(quote! {
+ impl From<#name> for #repr {
+ #[inline]
+ fn from (enum_value: #name) -> Self
+ {
+ #body
+ }
+ }
+ })
+}
+
+/// Implements `From<Primitive>` for a `#[repr(Primitive)] enum`.
+///
+/// Turning a primitive into an enum with `from`.
+/// ----------------------------------------------
+///
+/// ```rust
+/// use num_enum::FromPrimitive;
+///
+/// #[derive(Debug, Eq, PartialEq, FromPrimitive)]
+/// #[repr(u8)]
+/// enum Number {
+/// Zero,
+/// #[num_enum(default)]
+/// NonZero,
+/// }
+///
+/// let zero = Number::from(0u8);
+/// assert_eq!(zero, Number::Zero);
+///
+/// let one = Number::from(1u8);
+/// assert_eq!(one, Number::NonZero);
+///
+/// let two = Number::from(2u8);
+/// assert_eq!(two, Number::NonZero);
+/// ```
+#[proc_macro_derive(FromPrimitive, attributes(num_enum, default, catch_all))]
+pub fn derive_from_primitive(input: TokenStream) -> TokenStream {
+ let enum_info: EnumInfo = parse_macro_input!(input);
+ let krate = Ident::new(&get_crate_name(), Span::call_site());
+
+ let is_naturally_exhaustive = enum_info.is_naturally_exhaustive();
+ let catch_all_body = match is_naturally_exhaustive {
+ Ok(is_naturally_exhaustive) => {
+ if is_naturally_exhaustive {
+ quote! { unreachable!("exhaustive enum") }
+ } else if let Some(default_ident) = enum_info.default() {
+ quote! { Self::#default_ident }
+ } else if let Some(catch_all_ident) = enum_info.catch_all() {
+ quote! { Self::#catch_all_ident(number) }
+ } else {
+ let span = Span::call_site();
+ let message =
+ "#[derive(num_enum::FromPrimitive)] requires enum to be exhaustive, or a variant marked with `#[default]`, `#[num_enum(default)]`, or `#[num_enum(catch_all)`";
+ return syn::Error::new(span, message).to_compile_error().into();
+ }
+ }
+ Err(err) => {
+ return err.to_compile_error().into();
+ }
+ };
+
+ let EnumInfo {
+ ref name, ref repr, ..
+ } = enum_info;
+
+ let variant_idents: Vec<Ident> = enum_info.variant_idents();
+ let expression_idents: Vec<Vec<Ident>> = enum_info.expression_idents();
+ let variant_expressions: Vec<Vec<Expr>> = enum_info.variant_expressions();
+
+ debug_assert_eq!(variant_idents.len(), variant_expressions.len());
+
+ TokenStream::from(quote! {
+ impl ::#krate::FromPrimitive for #name {
+ type Primitive = #repr;
+
+ fn from_primitive(number: Self::Primitive) -> Self {
+ // Use intermediate const(s) so that enums defined like
+ // `Two = ONE + 1u8` work properly.
+ #![allow(non_upper_case_globals)]
+ #(
+ #(
+ const #expression_idents: #repr = #variant_expressions;
+ )*
+ )*
+ #[deny(unreachable_patterns)]
+ match number {
+ #(
+ #( #expression_idents )|*
+ => Self::#variant_idents,
+ )*
+ #[allow(unreachable_patterns)]
+ _ => #catch_all_body,
+ }
+ }
+ }
+
+ impl ::core::convert::From<#repr> for #name {
+ #[inline]
+ fn from (
+ number: #repr,
+ ) -> Self {
+ ::#krate::FromPrimitive::from_primitive(number)
+ }
+ }
+
+ // The Rust stdlib will implement `#name: From<#repr>` for us for free!
+
+ impl ::#krate::TryFromPrimitive for #name {
+ type Primitive = #repr;
+
+ const NAME: &'static str = stringify!(#name);
+
+ #[inline]
+ fn try_from_primitive (
+ number: Self::Primitive,
+ ) -> ::core::result::Result<
+ Self,
+ ::#krate::TryFromPrimitiveError<Self>,
+ >
+ {
+ Ok(::#krate::FromPrimitive::from_primitive(number))
+ }
+ }
+ })
+}
+
+/// Implements `TryFrom<Primitive>` for a `#[repr(Primitive)] enum`.
+///
+/// Attempting to turn a primitive into an enum with `try_from`.
+/// ----------------------------------------------
+///
+/// ```rust
+/// use num_enum::TryFromPrimitive;
+/// use std::convert::TryFrom;
+///
+/// #[derive(Debug, Eq, PartialEq, TryFromPrimitive)]
+/// #[repr(u8)]
+/// enum Number {
+/// Zero,
+/// One,
+/// }
+///
+/// let zero = Number::try_from(0u8);
+/// assert_eq!(zero, Ok(Number::Zero));
+///
+/// let three = Number::try_from(3u8);
+/// assert_eq!(
+/// three.unwrap_err().to_string(),
+/// "No discriminant in enum `Number` matches the value `3`",
+/// );
+/// ```
+#[proc_macro_derive(TryFromPrimitive, attributes(num_enum))]
+pub fn derive_try_from_primitive(input: TokenStream) -> TokenStream {
+ let enum_info: EnumInfo = parse_macro_input!(input);
+ let krate = Ident::new(&get_crate_name(), Span::call_site());
+
+ let EnumInfo {
+ ref name, ref repr, ..
+ } = enum_info;
+
+ let variant_idents: Vec<Ident> = enum_info.variant_idents();
+ let expression_idents: Vec<Vec<Ident>> = enum_info.expression_idents();
+ let variant_expressions: Vec<Vec<Expr>> = enum_info.variant_expressions();
+
+ debug_assert_eq!(variant_idents.len(), variant_expressions.len());
+
+ let default_arm = match enum_info.default() {
+ Some(ident) => {
+ quote! {
+ _ => ::core::result::Result::Ok(
+ #name::#ident
+ )
+ }
+ }
+ None => {
+ quote! {
+ _ => ::core::result::Result::Err(
+ ::#krate::TryFromPrimitiveError { number }
+ )
+ }
+ }
+ };
+
+ TokenStream::from(quote! {
+ impl ::#krate::TryFromPrimitive for #name {
+ type Primitive = #repr;
+
+ const NAME: &'static str = stringify!(#name);
+
+ fn try_from_primitive (
+ number: Self::Primitive,
+ ) -> ::core::result::Result<
+ Self,
+ ::#krate::TryFromPrimitiveError<Self>
+ > {
+ // Use intermediate const(s) so that enums defined like
+ // `Two = ONE + 1u8` work properly.
+ #![allow(non_upper_case_globals)]
+ #(
+ #(
+ const #expression_idents: #repr = #variant_expressions;
+ )*
+ )*
+ #[deny(unreachable_patterns)]
+ match number {
+ #(
+ #( #expression_idents )|*
+ => ::core::result::Result::Ok(Self::#variant_idents),
+ )*
+ #[allow(unreachable_patterns)]
+ #default_arm,
+ }
+ }
+ }
+
+ impl ::core::convert::TryFrom<#repr> for #name {
+ type Error = ::#krate::TryFromPrimitiveError<Self>;
+
+ #[inline]
+ fn try_from (
+ number: #repr,
+ ) -> ::core::result::Result<Self, ::#krate::TryFromPrimitiveError<Self>>
+ {
+ ::#krate::TryFromPrimitive::try_from_primitive(number)
+ }
+ }
+ })
+}
+
+#[cfg(feature = "proc-macro-crate")]
+fn get_crate_name() -> String {
+ let found_crate = proc_macro_crate::crate_name("num_enum").unwrap_or_else(|err| {
+ eprintln!("Warning: {}\n => defaulting to `num_enum`", err,);
+ proc_macro_crate::FoundCrate::Itself
+ });
+
+ match found_crate {
+ proc_macro_crate::FoundCrate::Itself => String::from("num_enum"),
+ proc_macro_crate::FoundCrate::Name(name) => name,
+ }
+}
+
+// Don't depend on proc-macro-crate in no_std environments because it causes an awkward dependency
+// on serde with std.
+//
+// no_std dependees on num_enum cannot rename the num_enum crate when they depend on it. Sorry.
+//
+// See https://github.com/illicitonion/num_enum/issues/18
+#[cfg(not(feature = "proc-macro-crate"))]
+fn get_crate_name() -> String {
+ String::from("num_enum")
+}
+
+/// Generates a `unsafe fn from_unchecked (number: Primitive) -> Self`
+/// associated function.
+///
+/// Allows unsafely turning a primitive into an enum with from_unchecked.
+/// -------------------------------------------------------------
+///
+/// If you're really certain a conversion will succeed, and want to avoid a small amount of overhead, you can use unsafe
+/// code to do this conversion. Unless you have data showing that the match statement generated in the `try_from` above is a
+/// bottleneck for you, you should avoid doing this, as the unsafe code has potential to cause serious memory issues in
+/// your program.
+///
+/// ```rust
+/// use num_enum::UnsafeFromPrimitive;
+///
+/// #[derive(Debug, Eq, PartialEq, UnsafeFromPrimitive)]
+/// #[repr(u8)]
+/// enum Number {
+/// Zero,
+/// One,
+/// }
+///
+/// fn main() {
+/// assert_eq!(
+/// Number::Zero,
+/// unsafe { Number::from_unchecked(0_u8) },
+/// );
+/// assert_eq!(
+/// Number::One,
+/// unsafe { Number::from_unchecked(1_u8) },
+/// );
+/// }
+///
+/// unsafe fn undefined_behavior() {
+/// let _ = Number::from_unchecked(2); // 2 is not a valid discriminant!
+/// }
+/// ```
+#[proc_macro_derive(UnsafeFromPrimitive, attributes(num_enum))]
+pub fn derive_unsafe_from_primitive(stream: TokenStream) -> TokenStream {
+ let enum_info = parse_macro_input!(stream as EnumInfo);
+
+ if enum_info.has_default_variant() {
+ let span = enum_info
+ .first_default_attr_span()
+ .cloned()
+ .expect("Expected span");
+ let message = "#[derive(UnsafeFromPrimitive)] does not support `#[num_enum(default)]`";
+ return syn::Error::new(span, message).to_compile_error().into();
+ }
+
+ if enum_info.has_complex_variant() {
+ let span = enum_info
+ .first_alternatives_attr_span()
+ .cloned()
+ .expect("Expected span");
+ let message =
+ "#[derive(UnsafeFromPrimitive)] does not support `#[num_enum(alternatives = [..])]`";
+ return syn::Error::new(span, message).to_compile_error().into();
+ }
+
+ let EnumInfo {
+ ref name, ref repr, ..
+ } = enum_info;
+
+ let doc_string = LitStr::new(
+ &format!(
+ r#"
+Transmutes `number: {repr}` into a [`{name}`].
+
+# Safety
+
+ - `number` must represent a valid discriminant of [`{name}`]
+"#,
+ repr = repr,
+ name = name,
+ ),
+ Span::call_site(),
+ );
+
+ TokenStream::from(quote! {
+ impl #name {
+ #[doc = #doc_string]
+ #[inline]
+ pub unsafe fn from_unchecked(number: #repr) -> Self {
+ ::core::mem::transmute(number)
+ }
+ }
+ })
+}
+
+/// Implements `core::default::Default` for a `#[repr(Primitive)] enum`.
+///
+/// Whichever variant has the `#[default]` or `#[num_enum(default)]` attribute will be returned.
+/// ----------------------------------------------
+///
+/// ```rust
+/// #[derive(Debug, Eq, PartialEq, num_enum::Default)]
+/// #[repr(u8)]
+/// enum Number {
+/// Zero,
+/// #[default]
+/// One,
+/// }
+///
+/// assert_eq!(Number::One, Number::default());
+/// assert_eq!(Number::One, <Number as ::core::default::Default>::default());
+/// ```
+#[proc_macro_derive(Default, attributes(num_enum, default))]
+pub fn derive_default(stream: TokenStream) -> TokenStream {
+ let enum_info = parse_macro_input!(stream as EnumInfo);
+
+ let default_ident = match enum_info.default() {
+ Some(ident) => ident,
+ None => {
+ let span = Span::call_site();
+ let message =
+ "#[derive(num_enum::Default)] requires enum to be exhaustive, or a variant marked with `#[default]` or `#[num_enum(default)]`";
+ return syn::Error::new(span, message).to_compile_error().into();
+ }
+ };
+
+ let EnumInfo { ref name, .. } = enum_info;
+
+ TokenStream::from(quote! {
+ impl ::core::default::Default for #name {
+ #[inline]
+ fn default() -> Self {
+ Self::#default_ident
+ }
+ }
+ })
+}
diff --git a/rust/vendor/num_threads/.cargo-checksum.json b/rust/vendor/num_threads/.cargo-checksum.json
new file mode 100644
index 0000000..603e1c7
--- /dev/null
+++ b/rust/vendor/num_threads/.cargo-checksum.json
@@ -0,0 +1 @@
+{"files":{"Cargo.toml":"ed1dbfb8f9eb836549858fb0ca704eddaa480cbe576e8dd50ebca1529dca21b3","LICENSE-Apache":"c69b72788ec261765e460532016b4bb78372f238e449b5e70e3268ec1e18fd15","LICENSE-MIT":"b4bf94a9fceb8846320fda938ee53fc16b506572609da5cf1d2d289a5597a8f8","src/apple.rs":"018e729ea67e8e17428d2e8d93f28ed0f7e7c506fcf78b56f56113235ce7dfcf","src/freebsd.rs":"683636294a62d6b958a5de800b52ddfea609234921e0583906272aacc71e18e5","src/imp.rs":"8cc9d07f05b0aa70e9997648abce7f79bd758c6fc76040d1c8f7beb7bf551e9d","src/lib.rs":"c53382612069b9552846414d1508cbb1401c500a4f34e21addd336001ebd8b7e","src/linux.rs":"67e02ecd105b8a421227bf72814e8388a4f77df1d8a44b8902bc8926f7e6698c"},"package":"2819ce041d2ee131036f4fc9d6ae7ae125a3a40e97ba64d04fe799ad9dabbb44"} \ No newline at end of file
diff --git a/rust/vendor/num_threads/Cargo.toml b/rust/vendor/num_threads/Cargo.toml
new file mode 100644
index 0000000..abfb41e
--- /dev/null
+++ b/rust/vendor/num_threads/Cargo.toml
@@ -0,0 +1,36 @@
+# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO
+#
+# When uploading crates to the registry Cargo will automatically
+# "normalize" Cargo.toml files for maximal compatibility
+# with all versions of Cargo and also rewrite `path` dependencies
+# to registry (e.g., crates.io) dependencies.
+#
+# If you are reading this file be aware that the original Cargo.toml
+# will likely look very different (and much more reasonable).
+# See Cargo.toml.orig for the original contents.
+
+[package]
+name = "num_threads"
+version = "0.1.6"
+authors = ["Jacob Pratt <open-source@jhpratt.dev>"]
+include = [
+ "src/**/*",
+ "LICENSE-*",
+]
+description = "A minimal library that determines the number of running threads for the current process."
+categories = [
+ "api-bindings",
+ "hardware-support",
+ "os",
+]
+license = "MIT OR Apache-2.0"
+repository = "https://github.com/jhpratt/num_threads"
+
+[package.metadata.docs.rs]
+all-features = true
+targets = ["x86_64-unknown-linux-gnu"]
+
+[dependencies]
+
+[target."cfg(any(target_os = \"macos\", target_os = \"ios\", target_os = \"freebsd\"))".dependencies.libc]
+version = "0.2.107"
diff --git a/rust/vendor/num_threads/LICENSE-Apache b/rust/vendor/num_threads/LICENSE-Apache
new file mode 100644
index 0000000..8119b40
--- /dev/null
+++ b/rust/vendor/num_threads/LICENSE-Apache
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright 2021 Jacob Pratt
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/rust/vendor/num_threads/LICENSE-MIT b/rust/vendor/num_threads/LICENSE-MIT
new file mode 100644
index 0000000..8daf823
--- /dev/null
+++ b/rust/vendor/num_threads/LICENSE-MIT
@@ -0,0 +1,19 @@
+Copyright (c) 2021 Jacob Pratt
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/rust/vendor/num_threads/src/apple.rs b/rust/vendor/num_threads/src/apple.rs
new file mode 100644
index 0000000..8d71b54
--- /dev/null
+++ b/rust/vendor/num_threads/src/apple.rs
@@ -0,0 +1,45 @@
+extern crate libc;
+
+use std::num::NonZeroUsize;
+
+use self::libc::{kern_return_t, mach_msg_type_number_t, mach_port_t, thread_t};
+
+// This constant is from
+// /Applications/Xcode.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX.sdk/
+// usr/include/mach/machine/thread_state.h.
+//
+// It has not been updated since Apple devices started to support 64-bit ARM (iOS), so it
+// should be very stable.
+const THREAD_STATE_MAX: i32 = 1296;
+#[allow(non_camel_case_types)]
+// https://github.com/apple/darwin-xnu/blob/a1babec6b135d1f35b2590a1990af3c5c5393479/osfmk/mach/mach_types.defs#L155
+type task_inspect_t = mach_port_t;
+#[allow(non_camel_case_types)]
+// https://github.com/apple/darwin-xnu/blob/a1babec6b135d1f35b2590a1990af3c5c5393479/osfmk/mach/mach_types.defs#L238
+type thread_array_t = [thread_t; THREAD_STATE_MAX as usize];
+
+extern "C" {
+ // https://developer.apple.com/documentation/kernel/1537751-task_threads/
+ fn task_threads(
+ target_task: task_inspect_t,
+ act_list: *mut thread_array_t,
+ act_listCnt: *mut mach_msg_type_number_t,
+ ) -> kern_return_t;
+}
+
+pub(crate) fn num_threads() -> Option<NonZeroUsize> {
+ // http://web.mit.edu/darwin/src/modules/xnu/osfmk/man/task_threads.html
+ let mut thread_state = [0u32; THREAD_STATE_MAX as usize];
+ let mut thread_count = 0;
+
+ // Safety: `mach_task_self` always returns a valid value, `thread_state` is large enough, and
+ // both it and `thread_count` are writable.
+ let result =
+ unsafe { task_threads(libc::mach_task_self(), &mut thread_state, &mut thread_count) };
+
+ if result == libc::KERN_SUCCESS {
+ NonZeroUsize::new(thread_count as usize)
+ } else {
+ None
+ }
+}
diff --git a/rust/vendor/num_threads/src/freebsd.rs b/rust/vendor/num_threads/src/freebsd.rs
new file mode 100644
index 0000000..1c3cbc8
--- /dev/null
+++ b/rust/vendor/num_threads/src/freebsd.rs
@@ -0,0 +1,36 @@
+extern crate libc;
+
+use std::num::NonZeroUsize;
+use std::{mem, ptr};
+
+pub(crate) fn num_threads() -> Option<NonZeroUsize> {
+ // Safety: `sysctl` and `getpid` are both thread-safe.
+ // `kip` is only accessed if sysctl() succeeds and agrees with the expected size,
+ // and the data only trusted if both its embedded size and pid match expectations
+ unsafe {
+ let pid = libc::getpid();
+ let mib: [libc::c_int; 4] = [libc::CTL_KERN, libc::KERN_PROC, libc::KERN_PROC_PID, pid];
+ let mut kip: libc::kinfo_proc = mem::zeroed();
+ let expected_kip_len = mem::size_of_val(&kip);
+ let mut kip_len = expected_kip_len;
+
+ let ret = libc::sysctl(
+ mib.as_ptr(),
+ mib.len() as u32,
+ &mut kip as *mut _ as *mut libc::c_void,
+ &mut kip_len,
+ ptr::null(),
+ 0,
+ );
+
+ if ret == 0
+ && kip_len == expected_kip_len
+ && kip.ki_structsize == expected_kip_len as i32
+ && kip.ki_pid == pid
+ {
+ NonZeroUsize::new(kip.ki_numthreads as usize)
+ } else {
+ None
+ }
+ }
+}
diff --git a/rust/vendor/num_threads/src/imp.rs b/rust/vendor/num_threads/src/imp.rs
new file mode 100644
index 0000000..b12465d
--- /dev/null
+++ b/rust/vendor/num_threads/src/imp.rs
@@ -0,0 +1,7 @@
+//! Fallback if no OS matches.
+
+use std::num::NonZeroUsize;
+
+pub(crate) fn num_threads() -> Option<NonZeroUsize> {
+ None
+}
diff --git a/rust/vendor/num_threads/src/lib.rs b/rust/vendor/num_threads/src/lib.rs
new file mode 100644
index 0000000..c213802
--- /dev/null
+++ b/rust/vendor/num_threads/src/lib.rs
@@ -0,0 +1,64 @@
+//! Minimum supported Rust version: 1.28
+
+use std::num::NonZeroUsize;
+
+#[cfg_attr(any(target_os = "linux", target_os = "android"), path = "linux.rs")]
+#[cfg_attr(target_os = "freebsd", path = "freebsd.rs")]
+#[cfg_attr(any(target_os = "macos", target_os = "ios"), path = "apple.rs")]
+mod imp;
+
+/// Obtain the number of threads currently part of the active process. Returns `None` if the number
+/// of threads cannot be determined.
+pub fn num_threads() -> Option<NonZeroUsize> {
+ imp::num_threads()
+}
+
+/// Determine if the current process is single-threaded. Returns `None` if the number of threads
+/// cannot be determined.
+pub fn is_single_threaded() -> Option<bool> {
+ num_threads().map(|n| n.get() == 1)
+}
+
+#[cfg(test)]
+mod test {
+ use std::num::NonZeroUsize;
+
+ // Run each expression in its own thread.
+ macro_rules! threaded {
+ ($first:expr;) => {
+ $first;
+ };
+ ($first:expr; $($rest:expr;)*) => {
+ $first;
+ ::std::thread::spawn(|| {
+ threaded!($($rest;)*);
+ })
+ .join()
+ .unwrap();
+ };
+ }
+
+ #[test]
+ fn num_threads() {
+ threaded! {
+ assert_eq!(super::num_threads().map(NonZeroUsize::get), Some(1));
+ assert_eq!(super::num_threads().map(NonZeroUsize::get), Some(2));
+ assert_eq!(super::num_threads().map(NonZeroUsize::get), Some(3));
+ assert_eq!(super::num_threads().map(NonZeroUsize::get), Some(4));
+ assert_eq!(super::num_threads().map(NonZeroUsize::get), Some(5));
+ assert_eq!(super::num_threads().map(NonZeroUsize::get), Some(6));
+ }
+ }
+
+ #[test]
+ fn is_single_threaded() {
+ threaded! {
+ assert_eq!(super::is_single_threaded(), Some(true));
+ assert_eq!(super::is_single_threaded(), Some(false));
+ assert_eq!(super::is_single_threaded(), Some(false));
+ assert_eq!(super::is_single_threaded(), Some(false));
+ assert_eq!(super::is_single_threaded(), Some(false));
+ assert_eq!(super::is_single_threaded(), Some(false));
+ }
+ }
+}
diff --git a/rust/vendor/num_threads/src/linux.rs b/rust/vendor/num_threads/src/linux.rs
new file mode 100644
index 0000000..641b6b1
--- /dev/null
+++ b/rust/vendor/num_threads/src/linux.rs
@@ -0,0 +1,14 @@
+use std::fs;
+use std::num::NonZeroUsize;
+
+pub(crate) fn num_threads() -> Option<NonZeroUsize> {
+ fs::read_to_string("/proc/self/stat")
+ .ok()
+ .as_ref()
+ // Skip past the pid and (process name) fields
+ .and_then(|stat| stat.rsplit(')').next())
+ // 20th field, less the two we skipped
+ .and_then(|rstat| rstat.split_whitespace().nth(17))
+ .and_then(|num_threads| num_threads.parse::<usize>().ok())
+ .and_then(NonZeroUsize::new)
+}