From 26a029d407be480d791972afb5975cf62c9360a6 Mon Sep 17 00:00:00 2001 From: Daniel Baumann Date: Fri, 19 Apr 2024 02:47:55 +0200 Subject: Adding upstream version 124.0.1. Signed-off-by: Daniel Baumann --- .../rust/minimal-lexical/.cargo-checksum.json | 1 + third_party/rust/minimal-lexical/CHANGELOG | 38 + .../rust/minimal-lexical/CODE_OF_CONDUCT.md | 141 +++ third_party/rust/minimal-lexical/Cargo.toml | 33 + third_party/rust/minimal-lexical/LICENSE-APACHE | 201 ++++ third_party/rust/minimal-lexical/LICENSE-MIT | 23 + third_party/rust/minimal-lexical/LICENSE.md | 37 + third_party/rust/minimal-lexical/README.md | 102 ++ third_party/rust/minimal-lexical/clippy.toml | 1 + third_party/rust/minimal-lexical/rustfmt.toml | 16 + .../rust/minimal-lexical/src/bellerophon.rs | 391 +++++++ third_party/rust/minimal-lexical/src/bigint.rs | 788 +++++++++++++ .../rust/minimal-lexical/src/extended_float.rs | 24 + third_party/rust/minimal-lexical/src/fpu.rs | 98 ++ third_party/rust/minimal-lexical/src/heapvec.rs | 190 +++ third_party/rust/minimal-lexical/src/lemire.rs | 225 ++++ third_party/rust/minimal-lexical/src/lib.rs | 68 ++ third_party/rust/minimal-lexical/src/libm.rs | 1238 ++++++++++++++++++++ third_party/rust/minimal-lexical/src/mask.rs | 60 + third_party/rust/minimal-lexical/src/num.rs | 308 +++++ third_party/rust/minimal-lexical/src/number.rs | 83 ++ third_party/rust/minimal-lexical/src/parse.rs | 201 ++++ third_party/rust/minimal-lexical/src/rounding.rs | 131 +++ third_party/rust/minimal-lexical/src/slow.rs | 403 +++++++ third_party/rust/minimal-lexical/src/stackvec.rs | 308 +++++ third_party/rust/minimal-lexical/src/table.rs | 11 + .../rust/minimal-lexical/src/table_bellerophon.rs | 119 ++ .../rust/minimal-lexical/src/table_lemire.rs | 676 +++++++++++ .../rust/minimal-lexical/src/table_small.rs | 90 ++ .../rust/minimal-lexical/tests/bellerophon.rs | 59 + .../minimal-lexical/tests/bellerophon_tests.rs | 231 ++++ .../minimal-lexical/tests/integration_tests.rs | 228 ++++ .../rust/minimal-lexical/tests/lemire_tests.rs | 378 ++++++ .../rust/minimal-lexical/tests/libm_tests.rs | 289 +++++ .../rust/minimal-lexical/tests/mask_tests.rs | 16 + .../rust/minimal-lexical/tests/number_tests.rs | 88 ++ .../rust/minimal-lexical/tests/parse_tests.rs | 189 +++ .../rust/minimal-lexical/tests/rounding_tests.rs | 64 + .../rust/minimal-lexical/tests/slow_tests.rs | 337 ++++++ third_party/rust/minimal-lexical/tests/stackvec.rs | 32 + .../rust/minimal-lexical/tests/vec_tests.rs | 395 +++++++ 41 files changed, 8311 insertions(+) create mode 100644 third_party/rust/minimal-lexical/.cargo-checksum.json create mode 100644 third_party/rust/minimal-lexical/CHANGELOG create mode 100644 third_party/rust/minimal-lexical/CODE_OF_CONDUCT.md create mode 100644 third_party/rust/minimal-lexical/Cargo.toml create mode 100644 third_party/rust/minimal-lexical/LICENSE-APACHE create mode 100644 third_party/rust/minimal-lexical/LICENSE-MIT create mode 100644 third_party/rust/minimal-lexical/LICENSE.md create mode 100644 third_party/rust/minimal-lexical/README.md create mode 100644 third_party/rust/minimal-lexical/clippy.toml create mode 100644 third_party/rust/minimal-lexical/rustfmt.toml create mode 100644 third_party/rust/minimal-lexical/src/bellerophon.rs create mode 100644 third_party/rust/minimal-lexical/src/bigint.rs create mode 100644 third_party/rust/minimal-lexical/src/extended_float.rs create mode 100644 third_party/rust/minimal-lexical/src/fpu.rs create mode 100644 third_party/rust/minimal-lexical/src/heapvec.rs create mode 100644 third_party/rust/minimal-lexical/src/lemire.rs create mode 100644 third_party/rust/minimal-lexical/src/lib.rs create mode 100644 third_party/rust/minimal-lexical/src/libm.rs create mode 100644 third_party/rust/minimal-lexical/src/mask.rs create mode 100644 third_party/rust/minimal-lexical/src/num.rs create mode 100644 third_party/rust/minimal-lexical/src/number.rs create mode 100644 third_party/rust/minimal-lexical/src/parse.rs create mode 100644 third_party/rust/minimal-lexical/src/rounding.rs create mode 100644 third_party/rust/minimal-lexical/src/slow.rs create mode 100644 third_party/rust/minimal-lexical/src/stackvec.rs create mode 100644 third_party/rust/minimal-lexical/src/table.rs create mode 100644 third_party/rust/minimal-lexical/src/table_bellerophon.rs create mode 100644 third_party/rust/minimal-lexical/src/table_lemire.rs create mode 100644 third_party/rust/minimal-lexical/src/table_small.rs create mode 100644 third_party/rust/minimal-lexical/tests/bellerophon.rs create mode 100644 third_party/rust/minimal-lexical/tests/bellerophon_tests.rs create mode 100644 third_party/rust/minimal-lexical/tests/integration_tests.rs create mode 100644 third_party/rust/minimal-lexical/tests/lemire_tests.rs create mode 100644 third_party/rust/minimal-lexical/tests/libm_tests.rs create mode 100644 third_party/rust/minimal-lexical/tests/mask_tests.rs create mode 100644 third_party/rust/minimal-lexical/tests/number_tests.rs create mode 100644 third_party/rust/minimal-lexical/tests/parse_tests.rs create mode 100644 third_party/rust/minimal-lexical/tests/rounding_tests.rs create mode 100644 third_party/rust/minimal-lexical/tests/slow_tests.rs create mode 100644 third_party/rust/minimal-lexical/tests/stackvec.rs create mode 100644 third_party/rust/minimal-lexical/tests/vec_tests.rs (limited to 'third_party/rust/minimal-lexical') diff --git a/third_party/rust/minimal-lexical/.cargo-checksum.json b/third_party/rust/minimal-lexical/.cargo-checksum.json new file mode 100644 index 0000000000..4fb9f35d66 --- /dev/null +++ b/third_party/rust/minimal-lexical/.cargo-checksum.json @@ -0,0 +1 @@ +{"files":{"CHANGELOG":"55ebcf7ee0fd10987829a98fb9757cbc6f68c62198bc70122384bedc08de9915","CODE_OF_CONDUCT.md":"0bd67c62d204ec67cb29969aaf5aac337a77c84b318937bc2d9dc7e3fcbcdcaa","Cargo.toml":"6c485fa605a0d3de6ec0af125b67c55224515354034f990334b1a1a86988c632","LICENSE-APACHE":"8173d5c29b4f956d532781d2b86e4e30f83e6b7878dce18c919451d6ba707c90","LICENSE-MIT":"23f18e03dc49df91622fe2a76176497404e46ced8a715d9d2b67a7446571cca3","LICENSE.md":"dbe1fff0fb1314b6af94f161511406275cf01c5a32441fbf24528a57a051d599","README.md":"00d521d93124e88edf58d42b3114786c0daaf437e73118eed27b3c716b7514ad","clippy.toml":"8d3aafbcf358ccf45cc148cd705f5fe71e777dabc391ed9a2bae32af73291fe8","rustfmt.toml":"ae46c79a84842907e151ca5d07e36b8d1932b61c6989465500c0f706507f42cc","src/bellerophon.rs":"bbabeccfa7a70ec314a894d968b424d1162df1b9e5e659baa1fd3a37d985fe75","src/bigint.rs":"93d3332d01bb7745bc569bc6b242c6c71c75eb78835b6a05bad91952b989daf0","src/extended_float.rs":"6040bdd49c03f11f8607b2da1703e1b7b5f57ddac9b02322c6958f9c1684496f","src/fpu.rs":"72d63bac2bbfc545128aa59dc35cfad3c553bac64bf575880775b3c4ace415bc","src/heapvec.rs":"836a49d40e5da90d195508e963d869afd78aaf9adb9cd60a0cb8f92b4e105c4f","src/lemire.rs":"100f3cb293deed0b0d1e7ca6b23152ba160f92f887f8924620b28c9ab77326db","src/lib.rs":"626dfc61992c42d4996dddcbfff5775ff2ffae44d116d2d70f6564a3209c0a9a","src/libm.rs":"ed5a3856eaa44a8a05aa123e27c2048b92ff42e4af1ef3f9fa1aff2a50190f4a","src/mask.rs":"63bcda92d14169a55ac54798f45365cef64a1aecd9625c3c3bc3deae202b2a07","src/num.rs":"dbcab14a5fe8e40e381829426dd75a7db672882592b5c4a08897a2fb6d2ae7ea","src/number.rs":"49d0880a99816ecf904fb88f607a821d6770ec270825b96e800a297ab1a01d78","src/parse.rs":"19559db67eddd17d331274cf87d6c4beeb0724dcdf859de9b8ab5995c4b8e682","src/rounding.rs":"8ba42d31618db1e6a381f8b60ffe1f9d216aaccb931b8fac5f279d8465e35cb7","src/slow.rs":"f096e7f83e8372e71568ec1724bc1c9d2c67ca39b80290ba062e60ae94b1f8d1","src/stackvec.rs":"0c921eb3adbd42cbe7be0f363e08ac85b6d5f1dabd4a7b077becddeff731da16","src/table.rs":"e4288891e9b1d8ba60dcc73edc639754cc2351d3219df8c625e694f3f0e58c5a","src/table_bellerophon.rs":"a2102292b27223a81e60a8a6607c42587efde3424526156921167742a0d5937c","src/table_lemire.rs":"c101c353c38b594f5b8987263b759927095b5dcd72e65607cc1c6a7de0bfd0c8","src/table_small.rs":"61b00e13eb3945622bf8bed374c88e29c63dfedb7384b31b35dd4e543cbe62c1","tests/bellerophon.rs":"b17b87b8963ebcd71f684e4d48c1ce619964e4fb719a5875b0ce4514ed528674","tests/bellerophon_tests.rs":"76b71efa2f4cec56a79535e2d292788a5e1b443c901ec7a234800782f36ddb68","tests/integration_tests.rs":"ed1a1fc46fc239eb4ea718057ad6e9869f633797ef72fc6f05b1757ab80e1641","tests/lemire_tests.rs":"6213bcd9b44def655b44a6b760cee0c0ad82d3bb494f48c2ff100698da34625b","tests/libm_tests.rs":"6941e74d7d0adf021edc93b9919275e82810687ff33147a10361613073b22669","tests/mask_tests.rs":"8c2a3daf434815389b6bf88837e3f382d74d97250104b925d70779366bd3d537","tests/number_tests.rs":"df4b4f0c65478f2f6193bd918fa4aad7163e40598c58df44547c4559e4a8b0c7","tests/parse_tests.rs":"bc0066b9257368f0365276fcffa2662c4699a033eaf9a4a7d6faa0e9b915094a","tests/rounding_tests.rs":"99f38b768ad15e726559c446825f9f1bad67935cdd28ffcc1cbcd3e031a901ea","tests/slow_tests.rs":"36c4c2538d2f5a1c1af5deb26ec4eba47f19f9a3c280a13d10704267a16d3b3f","tests/stackvec.rs":"f040611995bcd1bd2cb47694e74aa02ff4fabdffe007f712c9bb788d82dfb8a7","tests/vec_tests.rs":"09b561160df3b1385876db452bb5a67ef2c9fd2cc36b5687e1dfaf8c58947782"},"package":"68354c5c6bd36d73ff3feceb05efa59b6acb7626617f4962be322a825e61f79a"} \ No newline at end of file diff --git a/third_party/rust/minimal-lexical/CHANGELOG b/third_party/rust/minimal-lexical/CHANGELOG new file mode 100644 index 0000000000..b1fd38ac14 --- /dev/null +++ b/third_party/rust/minimal-lexical/CHANGELOG @@ -0,0 +1,38 @@ +# Changelog + +Notes significant changes to minimal-lexical. + +The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), +and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). + +## [0.1.4] 2021-10-02 +### Added +- Missing license details for `src/bellerophon.rs`. + +## [0.2.0] 2021-09-10 +### Changed +- `no_alloc` feature flag was replaced with an `alloc` feature flag. + +## [0.1.3] 2021-09-04 +### Added +- Added the `compact` feature, which sacrifices performance for smaller binary sizes. +- Added the `nightly` feature, which adds inline ASM to use FPU instructions for to ensure proper rounding on x86 targets using the x87 FPU without SSE2. + +### Changed +- Removed stackvec dependent, even on `no_alloc`. +- Improved the algorithms for parsing. +- Simplified big-integer arithmetic, and the slow path algorithms. +- Reduced the binary sizes. +- Added optimizations for small floats. + +## [0.1.2] 2021-05-09 +### Added +- Remove cached_float and infer exponents rather than store them. + +## [0.1.1] 2021-05-08 +### Added +- Added the Eisel-Lemire algorithm. + +## [0.1.0] 2021-04-27 +### Added +- Initial version. diff --git a/third_party/rust/minimal-lexical/CODE_OF_CONDUCT.md b/third_party/rust/minimal-lexical/CODE_OF_CONDUCT.md new file mode 100644 index 0000000000..74fd657be0 --- /dev/null +++ b/third_party/rust/minimal-lexical/CODE_OF_CONDUCT.md @@ -0,0 +1,141 @@ +# Code of Conduct + +## When Something Happens + +If you see a Code of Conduct violation, follow these steps: + +1. Let the person know that what they did is not appropriate and ask them to stop and/or edit their message(s) or commits. +2. That person should immediately stop the behavior and correct the issue. +3. If this doesn’t happen, or if you're uncomfortable speaking up, [contact the maintainers](#contacting-maintainers). +4. As soon as available, a maintainer will look into the issue, and take [further action (see below)](#further-enforcement), starting with a warning, then temporary block, then long-term repo or organization ban. + +When reporting, please include any relevant details, links, screenshots, context, or other information that may be used to better understand and resolve the situation. + +**The maintainer team will prioritize the well-being and comfort of the recipients of the violation over the comfort of the violator.** See [some examples below](#enforcement-examples). + +## Our Pledge + +In the interest of fostering an open and welcoming environment, we as contributors and maintainers of this project pledge to making participation in our community a harassment-free experience for everyone, regardless of age, body size, disability, ethnicity, gender identity and expression, level of experience, technical preferences, nationality, personal appearance, race, religion, or sexual identity and orientation. + +## Our Standards + +Examples of behavior that contributes to creating a positive environment include: + + * Using welcoming and inclusive language. + * Being respectful of differing viewpoints and experiences. + * Gracefully accepting constructive feedback. + * Focusing on what is best for the community. + * Showing empathy and kindness towards other community members. + * Encouraging and raising up your peers in the project so you can all bask in hacks and glory. + +Examples of unacceptable behavior by participants include: + + * The use of sexualized language or imagery and unwelcome sexual attention or advances, including when simulated online. The only exception to sexual topics is channels/spaces specifically for topics of sexual identity. + * Casual mention of slavery or indentured servitude and/or false comparisons of one's occupation or situation to slavery. Please consider using or asking about alternate terminology when referring to such metaphors in technology. + * Making light of/making mocking comments about trigger warnings and content warnings. + * Trolling, insulting/derogatory comments, and personal or political attacks. + * Public or private harassment, deliberate intimidation, or threats. + * Publishing others' private information, such as a physical or electronic address, without explicit permission. This includes any sort of "outing" of any aspect of someone's identity without their consent. + * Publishing private screenshots or quotes of interactions in the context of this project without all quoted users' *explicit* consent. + * Publishing of private communication that doesn't have to do with reporting harrassment. + * Any of the above even when [presented as "ironic" or "joking"](https://en.wikipedia.org/wiki/Hipster_racism). + * Any attempt to present "reverse-ism" versions of the above as violations. Examples of reverse-isms are "reverse racism", "reverse sexism", "heterophobia", and "cisphobia". + * Unsolicited explanations under the assumption that someone doesn't already know it. Ask before you teach! Don't assume what people's knowledge gaps are. + * [Feigning or exaggerating surprise](https://www.recurse.com/manual#no-feigned-surprise) when someone admits to not knowing something. + * "[Well-actuallies](https://www.recurse.com/manual#no-well-actuallys)" + * Other conduct which could reasonably be considered inappropriate in a professional or community setting. + +## Scope + +This Code of Conduct applies both within spaces involving this project and in other spaces involving community members. This includes the repository, its Pull Requests and Issue tracker, private email communications in the context of the project, and any events where members of the project are participating, as well as adjacent communities and venues affecting the project's members. + +Depending on the violation, the maintainers may decide that violations of this code of conduct that have happened outside of the scope of the community may deem an individual unwelcome, and take appropriate action to maintain the comfort and safety of its members. + +### Other Community Standards + +As a project on GitHub, this project is additionally covered by the [GitHub Community Guidelines](https://help.github.com/articles/github-community-guidelines/). + +Enforcement of those guidelines after violations overlapping with the above are the responsibility of the entities, and enforcement may happen in any or all of the services/communities. + +## Maintainer Enforcement Process + +Once the maintainers get involved, they will follow a documented series of steps and do their best to preserve the well-being of project members. This section covers actual concrete steps. + +### Contacting Maintainers + +You may get in touch with the maintainer team through any of the following methods: + + Through email: + ahuszagh@gmail.com (Alex Huszagh) + +### Further Enforcement + +If you've already followed the [initial enforcement steps](#enforcement), these are the steps maintainers will take for further enforcement, as needed: + + 1. Repeat the request to stop. + 2. If the person doubles down, they will have offending messages removed or edited by a maintainers given an official warning. The PR or Issue may be locked. + 3. If the behavior continues or is repeated later, the person will be blocked from participating for 24 hours. + 4. If the behavior continues or is repeated after the temporary block, a long-term (6-12mo) ban will be used. + +On top of this, maintainers may remove any offending messages, images, contributions, etc, as they deem necessary. + +Maintainers reserve full rights to skip any of these steps, at their discretion, if the violation is considered to be a serious and/or immediate threat to the health and well-being of members of the community. These include any threats, serious physical or verbal attacks, and other such behavior that would be completely unacceptable in any social setting that puts our members at risk. + +Members expelled from events or venues with any sort of paid attendance will not be refunded. + +### Who Watches the Watchers? + +Maintainers and other leaders who do not follow or enforce the Code of Conduct in good faith may face temporary or permanent repercussions as determined by other members of the project's leadership. These may include anything from removal from the maintainer team to a permanent ban from the community. + +Additionally, as a project hosted on both GitHub and npm, [their own Codes of Conducts may be applied against maintainers of this project](#other-community-standards), externally of this project's procedures. + +### Enforcement Examples + +#### The Best Case + +The vast majority of situations work out like this. This interaction is common, and generally positive. + +> Alex: "Yeah I used X and it was really crazy!" + +> Patt (not a maintainer): "Hey, could you not use that word? What about 'ridiculous' instead?" + +> Alex: "oh sorry, sure." -> edits old comment to say "it was really confusing!" + +#### The Maintainer Case + +Sometimes, though, you need to get maintainers involved. Maintainers will do their best to resolve conflicts, but people who were harmed by something **will take priority**. + +> Patt: "Honestly, sometimes I just really hate using $library and anyone who uses it probably sucks at their job." + +> Alex: "Whoa there, could you dial it back a bit? There's a CoC thing about attacking folks' tech use like that." + +> Patt: "I'm not attacking anyone, what's your problem?" + +> Alex: "@maintainers hey uh. Can someone look at this issue? Patt is getting a bit aggro. I tried to nudge them about it, but nope." + +> KeeperOfCommitBits: (on issue) "Hey Patt, maintainer here. Could you tone it down? This sort of attack is really not okay in this space." + +> Patt: "Leave me alone I haven't said anything bad wtf is wrong with you." + +> KeeperOfCommitBits: (deletes user's comment), "@patt I mean it. Please refer to the CoC over at (URL to this CoC) if you have questions, but you can consider this an actual warning. I'd appreciate it if you reworded your messages in this thread, since they made folks there uncomfortable. Let's try and be kind, yeah?" + +> Patt: "@keeperofbits Okay sorry. I'm just frustrated and I'm kinda burnt out and I guess I got carried away. I'll DM Alex a note apologizing and edit my messages. Sorry for the trouble." + +> KeeperOfCommitBits: "@patt Thanks for that. I hear you on the stress. Burnout sucks :/. Have a good one!" + +#### The Nope Case + +> PepeTheFrog🐸: "Hi, I am a literal actual nazi and I think white supremacists are quite fashionable." + +> Patt: "NOOOOPE. OH NOPE NOPE." + +> Alex: "JFC NO. NOPE. @keeperofbits NOPE NOPE LOOK HERE" + +> KeeperOfCommitBits: "👀 Nope. NOPE NOPE NOPE. 🔥" + +> PepeTheFrog🐸 has been banned from all organization or user repositories belonging to KeeperOfCommitBits. + +## Attribution + +This Code of Conduct was generated using [WeAllJS Code of Conduct Generator](https://npm.im/weallbehave), which is based on the [WeAllJS Code of Conduct](https://wealljs.org/code-of-conduct), which is itself based on +[Contributor Covenant](http://contributor-covenant.org), version 1.4, available at [http://contributor-covenant.org/version/1/4](http://contributor-covenant.org/version/1/4), and the LGBTQ in Technology Slack [Code of Conduct](http://lgbtq.technology/coc.html). diff --git a/third_party/rust/minimal-lexical/Cargo.toml b/third_party/rust/minimal-lexical/Cargo.toml new file mode 100644 index 0000000000..b9b52cbe3f --- /dev/null +++ b/third_party/rust/minimal-lexical/Cargo.toml @@ -0,0 +1,33 @@ +# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO +# +# When uploading crates to the registry Cargo will automatically +# "normalize" Cargo.toml files for maximal compatibility +# with all versions of Cargo and also rewrite `path` dependencies +# to registry (e.g., crates.io) dependencies. +# +# If you are reading this file be aware that the original Cargo.toml +# will likely look very different (and much more reasonable). +# See Cargo.toml.orig for the original contents. + +[package] +edition = "2018" +name = "minimal-lexical" +version = "0.2.1" +authors = ["Alex Huszagh "] +exclude = ["assets/*", "ci/*", "docs/*", "etc/*", "fuzz/*", "examples/*", "scripts/*"] +autoexamples = false +description = "Fast float parsing conversion routines." +documentation = "https://docs.rs/minimal-lexical" +readme = "README.md" +keywords = ["parsing", "no_std"] +categories = ["parsing", "no-std"] +license = "MIT/Apache-2.0" +repository = "https://github.com/Alexhuszagh/minimal-lexical" + +[features] +alloc = [] +compact = [] +default = ["std"] +lint = [] +nightly = [] +std = [] diff --git a/third_party/rust/minimal-lexical/LICENSE-APACHE b/third_party/rust/minimal-lexical/LICENSE-APACHE new file mode 100644 index 0000000000..11069edd79 --- /dev/null +++ b/third_party/rust/minimal-lexical/LICENSE-APACHE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + +2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + +3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + +4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + +5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + +6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + +8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + +Copyright [yyyy] [name of copyright owner] + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/third_party/rust/minimal-lexical/LICENSE-MIT b/third_party/rust/minimal-lexical/LICENSE-MIT new file mode 100644 index 0000000000..31aa79387f --- /dev/null +++ b/third_party/rust/minimal-lexical/LICENSE-MIT @@ -0,0 +1,23 @@ +Permission is hereby granted, free of charge, to any +person obtaining a copy of this software and associated +documentation files (the "Software"), to deal in the +Software without restriction, including without +limitation the rights to use, copy, modify, merge, +publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software +is furnished to do so, subject to the following +conditions: + +The above copyright notice and this permission notice +shall be included in all copies or substantial portions +of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF +ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED +TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A +PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR +IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +DEALINGS IN THE SOFTWARE. diff --git a/third_party/rust/minimal-lexical/LICENSE.md b/third_party/rust/minimal-lexical/LICENSE.md new file mode 100644 index 0000000000..2442bbfbf3 --- /dev/null +++ b/third_party/rust/minimal-lexical/LICENSE.md @@ -0,0 +1,37 @@ +Minimal-lexical is dual licensed under the Apache 2.0 license as well as the MIT +license. See the LICENCE-MIT and the LICENCE-APACHE files for the licenses. + +--- + +`src/bellerophon.rs` is loosely based off the Golang implementation, +found [here](https://github.com/golang/go/blob/b10849fbb97a2244c086991b4623ae9f32c212d0/src/strconv/extfloat.go). +That code (used if the `compact` feature is enabled) is subject to a +[3-clause BSD license](https://github.com/golang/go/blob/b10849fbb97a2244c086991b4623ae9f32c212d0/LICENSE): + +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/third_party/rust/minimal-lexical/README.md b/third_party/rust/minimal-lexical/README.md new file mode 100644 index 0000000000..7805296510 --- /dev/null +++ b/third_party/rust/minimal-lexical/README.md @@ -0,0 +1,102 @@ +minimal-lexical +=============== + +This is a minimal version of [rust-lexical](https://github.com/Alexhuszagh/rust-lexical), meant to allow efficient round-trip float parsing. minimal-lexical implements a correct, fast float parser. + +Due to the small, stable nature of minimal-lexical, it is also well-adapted to private forks. If you do privately fork minimal-lexical, I recommend you contact me via [email](mailto:ahuszagh@gmail.com) or [Twitter](https://twitter.com/KardOnIce), so I can notify you of feature updates, bug fixes, or security vulnerabilities, as well as help you implement custom feature requests. I will not use your information for any other purpose, including, but not limited to disclosing your project or organization's use of minimal-lexical. + +minimal-lexical is designed for fast compile times and small binaries sizes, at the expense of a minor amount of performance. For improved performance, feel free to fork minimal-lexical with more aggressive inlining. + +**Similar Projects** + +For a high-level, all-in-one number conversion routines, see [rust-lexical](https://github.com/Alexhuszagh/rust-lexical). + +**Table Of Contents** + +- [Getting Started](#getting-started) +- [Recipes](#recipes) +- [Algorithms](#algorithms) +- [Platform Support](platform-support) +- [Minimum Version Support](minimum-version-support) +- [Changelog](#changelog) +- [License](#license) +- [Contributing](#contributing) + +# Getting Started + +First, add the following to your `Cargo.toml`. + +```toml +[dependencies] +minimal-lexical = "0.2" +``` + +Next, to parse a simple float, use the following: + +```rust +extern crate minimal_lexical; + +// Let's say we want to parse "1.2345". +// First, we need an external parser to extract the integer digits ("1"), +// the fraction digits ("2345"), and then parse the exponent to a 32-bit +// integer (0). +// Warning: +// -------- +// Please note that leading zeros must be trimmed from the integer, +// and trailing zeros must be trimmed from the fraction. This cannot +// be handled by minimal-lexical, since we accept iterators +let integer = b"1"; +let fraction = b"2345"; +let float: f64 = minimal_lexical::parse_float(integer.iter(), fraction.iter(), 0); +println!("float={:?}", float); // 1.235 +``` + +# Recipes + +You may be asking: where is the actual parser? Due to variation in float formats, and the goal of integrating utility for various data-interchange language parsers, such functionality would be beyond the scope of this library. + +For example, the following float is valid in Rust strings, but is invalid in JSON or TOML: +```json +1.e7 +``` + +Therefore, to use the library, you need functionality that extracts the significant digits to pass to `create_float`. Please see [simple-example](https://github.com/Alexhuszagh/minimal-lexical/blob/master/examples/simple.rs) for a simple, annotated example on how to use minimal-lexical as a parser. + +# Algorithms + +For an in-depth explanation on the algorithms minimal-lexical uses, please see [lexical-core#string-to-float](https://github.com/Alexhuszagh/rust-lexical/tree/master/lexical-core#string-to-float). + +# Platform Support + +minimal-lexical is tested on a wide variety of platforms, including big and small-endian systems, to ensure portable code. Supported architectures include: +- x86_64 Linux, Windows, macOS, Android, iOS, FreeBSD, and NetBSD. +- x86 Linux, macOS, Android, iOS, and FreeBSD. +- aarch64 (ARM8v8-A) Linux, Android, and iOS. +- armv7 (ARMv7-A) Linux, Android, and iOS. +- arm (ARMv6) Linux, and Android. +- mips (MIPS) Linux. +- mipsel (MIPS LE) Linux. +- mips64 (MIPS64 BE) Linux. +- mips64el (MIPS64 LE) Linux. +- powerpc (PowerPC) Linux. +- powerpc64 (PPC64) Linux. +- powerpc64le (PPC64LE) Linux. +- s390x (IBM Z) Linux. + +minimal-lexical should also work on a wide variety of other architectures and ISAs. If you have any issue compiling minimal-lexical on any architecture, please file a bug report. + +# Minimum Version Support + +Minimal-lexical is tested to support Rustc 1.36+, including stable, beta, and nightly. Please report any errors compiling a supported lexical version on a compatible Rustc version. Please note we may increment the MSRV for compiler versions older than 18 months, to support at least the current Debian stable version, without breaking changes. + +# Changelog + +All changes are documented in [CHANGELOG](CHANGELOG). + +# License + +Minimal-lexical is dual licensed under the Apache 2.0 license as well as the MIT license. See the [LICENSE.md](LICENSE.md) file for full license details. + +# Contributing + +Unless you explicitly state otherwise, any contribution intentionally submitted for inclusion in minimal-lexical by you, as defined in the Apache-2.0 license, shall be dual licensed as above, without any additional terms or conditions. diff --git a/third_party/rust/minimal-lexical/clippy.toml b/third_party/rust/minimal-lexical/clippy.toml new file mode 100644 index 0000000000..cda8d17eed --- /dev/null +++ b/third_party/rust/minimal-lexical/clippy.toml @@ -0,0 +1 @@ +avoid-breaking-exported-api = false diff --git a/third_party/rust/minimal-lexical/rustfmt.toml b/third_party/rust/minimal-lexical/rustfmt.toml new file mode 100644 index 0000000000..2361c9d479 --- /dev/null +++ b/third_party/rust/minimal-lexical/rustfmt.toml @@ -0,0 +1,16 @@ +# Requires nightly to do proper formatting. +use_small_heuristics = "Off" +use_field_init_shorthand = true +trailing_semicolon = true +newline_style = "Unix" +match_block_trailing_comma = true +empty_item_single_line = false +enum_discrim_align_threshold = 40 +fn_args_layout = "Tall" +fn_single_line = false +format_macro_matchers = true +format_macro_bodies = true +imports_indent = "Block" +imports_layout = "HorizontalVertical" +indent_style = "Block" +match_arm_blocks = true diff --git a/third_party/rust/minimal-lexical/src/bellerophon.rs b/third_party/rust/minimal-lexical/src/bellerophon.rs new file mode 100644 index 0000000000..86a2023d09 --- /dev/null +++ b/third_party/rust/minimal-lexical/src/bellerophon.rs @@ -0,0 +1,391 @@ +//! An implementation of Clinger's Bellerophon algorithm. +//! +//! This is a moderate path algorithm that uses an extended-precision +//! float, represented in 80 bits, by calculating the bits of slop +//! and determining if those bits could prevent unambiguous rounding. +//! +//! This algorithm requires less static storage than the Lemire algorithm, +//! and has decent performance, and is therefore used when non-decimal, +//! non-power-of-two strings need to be parsed. Clinger's algorithm +//! is described in depth in "How to Read Floating Point Numbers Accurately.", +//! available online [here](http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.45.4152&rep=rep1&type=pdf). +//! +//! This implementation is loosely based off the Golang implementation, +//! found [here](https://github.com/golang/go/blob/b10849fbb97a2244c086991b4623ae9f32c212d0/src/strconv/extfloat.go). +//! This code is therefore subject to a 3-clause BSD license. + +#![cfg(feature = "compact")] +#![doc(hidden)] + +use crate::extended_float::ExtendedFloat; +use crate::mask::{lower_n_halfway, lower_n_mask}; +use crate::num::Float; +use crate::number::Number; +use crate::rounding::{round, round_nearest_tie_even}; +use crate::table::BASE10_POWERS; + +// ALGORITHM +// --------- + +/// Core implementation of the Bellerophon algorithm. +/// +/// Create an extended-precision float, scale it to the proper radix power, +/// calculate the bits of slop, and return the representation. The value +/// will always be guaranteed to be within 1 bit, rounded-down, of the real +/// value. If a negative exponent is returned, this represents we were +/// unable to unambiguously round the significant digits. +/// +/// This has been modified to return a biased, rather than unbiased exponent. +pub fn bellerophon(num: &Number) -> ExtendedFloat { + let fp_zero = ExtendedFloat { + mant: 0, + exp: 0, + }; + let fp_inf = ExtendedFloat { + mant: 0, + exp: F::INFINITE_POWER, + }; + + // Early short-circuit, in case of literal 0 or infinity. + // This allows us to avoid narrow casts causing numeric overflow, + // and is a quick check for any radix. + if num.mantissa == 0 || num.exponent <= -0x1000 { + return fp_zero; + } else if num.exponent >= 0x1000 { + return fp_inf; + } + + // Calculate our indexes for our extended-precision multiplication. + // This narrowing cast is safe, since exponent must be in a valid range. + let exponent = num.exponent as i32 + BASE10_POWERS.bias; + let small_index = exponent % BASE10_POWERS.step; + let large_index = exponent / BASE10_POWERS.step; + + if exponent < 0 { + // Guaranteed underflow (assign 0). + return fp_zero; + } + if large_index as usize >= BASE10_POWERS.large.len() { + // Overflow (assign infinity) + return fp_inf; + } + + // Within the valid exponent range, multiply by the large and small + // exponents and return the resulting value. + + // Track errors to as a factor of unit in last-precision. + let mut errors: u32 = 0; + if num.many_digits { + errors += error_halfscale(); + } + + // Multiply by the small power. + // Check if we can directly multiply by an integer, if not, + // use extended-precision multiplication. + let mut fp = ExtendedFloat { + mant: num.mantissa, + exp: 0, + }; + match fp.mant.overflowing_mul(BASE10_POWERS.get_small_int(small_index as usize)) { + // Overflow, multiplication unsuccessful, go slow path. + (_, true) => { + normalize(&mut fp); + fp = mul(&fp, &BASE10_POWERS.get_small(small_index as usize)); + errors += error_halfscale(); + }, + // No overflow, multiplication successful. + (mant, false) => { + fp.mant = mant; + normalize(&mut fp); + }, + } + + // Multiply by the large power. + fp = mul(&fp, &BASE10_POWERS.get_large(large_index as usize)); + if errors > 0 { + errors += 1; + } + errors += error_halfscale(); + + // Normalize the floating point (and the errors). + let shift = normalize(&mut fp); + errors <<= shift; + fp.exp += F::EXPONENT_BIAS; + + // Check for literal overflow, even with halfway cases. + if -fp.exp + 1 > 65 { + return fp_zero; + } + + // Too many errors accumulated, return an error. + if !error_is_accurate::(errors, &fp) { + // Bias the exponent so we know it's invalid. + fp.exp += F::INVALID_FP; + return fp; + } + + // Check if we have a literal 0 or overflow here. + // If we have an exponent of -63, we can still have a valid shift, + // giving a case where we have too many errors and need to round-up. + if -fp.exp + 1 == 65 { + // Have more than 64 bits below the minimum exponent, must be 0. + return fp_zero; + } + + round::(&mut fp, |f, s| { + round_nearest_tie_even(f, s, |is_odd, is_halfway, is_above| { + is_above || (is_odd && is_halfway) + }); + }); + fp +} + +// ERRORS +// ------ + +// Calculate if the errors in calculating the extended-precision float. +// +// Specifically, we want to know if we are close to a halfway representation, +// or halfway between `b` and `b+1`, or `b+h`. The halfway representation +// has the form: +// SEEEEEEEHMMMMMMMMMMMMMMMMMMMMMMM100... +// where: +// S = Sign Bit +// E = Exponent Bits +// H = Hidden Bit +// M = Mantissa Bits +// +// The halfway representation has a bit set 1-after the mantissa digits, +// and no bits set immediately afterward, making it impossible to +// round between `b` and `b+1` with this representation. + +/// Get the full error scale. +#[inline(always)] +const fn error_scale() -> u32 { + 8 +} + +/// Get the half error scale. +#[inline(always)] +const fn error_halfscale() -> u32 { + error_scale() / 2 +} + +/// Determine if the number of errors is tolerable for float precision. +fn error_is_accurate(errors: u32, fp: &ExtendedFloat) -> bool { + // Check we can't have a literal 0 denormal float. + debug_assert!(fp.exp >= -64); + + // Determine if extended-precision float is a good approximation. + // If the error has affected too many units, the float will be + // inaccurate, or if the representation is too close to halfway + // that any operations could affect this halfway representation. + // See the documentation for dtoa for more information. + + // This is always a valid u32, since `fp.exp >= -64` + // will always be positive and the significand size is {23, 52}. + let mantissa_shift = 64 - F::MANTISSA_SIZE - 1; + + // The unbiased exponent checks is `unbiased_exp <= F::MANTISSA_SIZE + // - F::EXPONENT_BIAS -64 + 1`, or `biased_exp <= F::MANTISSA_SIZE - 63`, + // or `biased_exp <= mantissa_shift`. + let extrabits = match fp.exp <= -mantissa_shift { + // Denormal, since shifting to the hidden bit still has a negative exponent. + // The unbiased check calculation for bits is `1 - F::EXPONENT_BIAS - unbiased_exp`, + // or `1 - biased_exp`. + true => 1 - fp.exp, + false => 64 - F::MANTISSA_SIZE - 1, + }; + + // Our logic is as follows: we want to determine if the actual + // mantissa and the errors during calculation differ significantly + // from the rounding point. The rounding point for round-nearest + // is the halfway point, IE, this when the truncated bits start + // with b1000..., while the rounding point for the round-toward + // is when the truncated bits are equal to 0. + // To do so, we can check whether the rounding point +/- the error + // are >/< the actual lower n bits. + // + // For whether we need to use signed or unsigned types for this + // analysis, see this example, using u8 rather than u64 to simplify + // things. + // + // # Comparisons + // cmp1 = (halfway - errors) < extra + // cmp1 = extra < (halfway + errors) + // + // # Large Extrabits, Low Errors + // + // extrabits = 8 + // halfway = 0b10000000 + // extra = 0b10000010 + // errors = 0b00000100 + // halfway - errors = 0b01111100 + // halfway + errors = 0b10000100 + // + // Unsigned: + // halfway - errors = 124 + // halfway + errors = 132 + // extra = 130 + // cmp1 = true + // cmp2 = true + // Signed: + // halfway - errors = 124 + // halfway + errors = -124 + // extra = -126 + // cmp1 = false + // cmp2 = true + // + // # Conclusion + // + // Since errors will always be small, and since we want to detect + // if the representation is accurate, we need to use an **unsigned** + // type for comparisons. + let maskbits = extrabits as u64; + let errors = errors as u64; + + // Round-to-nearest, need to use the halfway point. + if extrabits > 64 { + // Underflow, we have a shift larger than the mantissa. + // Representation is valid **only** if the value is close enough + // overflow to the next bit within errors. If it overflows, + // the representation is **not** valid. + !fp.mant.overflowing_add(errors).1 + } else { + let mask = lower_n_mask(maskbits); + let extra = fp.mant & mask; + + // Round-to-nearest, need to check if we're close to halfway. + // IE, b10100 | 100000, where `|` signifies the truncation point. + let halfway = lower_n_halfway(maskbits); + let cmp1 = halfway.wrapping_sub(errors) < extra; + let cmp2 = extra < halfway.wrapping_add(errors); + + // If both comparisons are true, we have significant rounding error, + // and the value cannot be exactly represented. Otherwise, the + // representation is valid. + !(cmp1 && cmp2) + } +} + +// MATH +// ---- + +/// Normalize float-point number. +/// +/// Shift the mantissa so the number of leading zeros is 0, or the value +/// itself is 0. +/// +/// Get the number of bytes shifted. +pub fn normalize(fp: &mut ExtendedFloat) -> i32 { + // Note: + // Using the ctlz intrinsic via leading_zeros is way faster (~10x) + // than shifting 1-bit at a time, via while loop, and also way + // faster (~2x) than an unrolled loop that checks at 32, 16, 4, + // 2, and 1 bit. + // + // Using a modulus of pow2 (which will get optimized to a bitwise + // and with 0x3F or faster) is slightly slower than an if/then, + // however, removing the if/then will likely optimize more branched + // code as it removes conditional logic. + + // Calculate the number of leading zeros, and then zero-out + // any overflowing bits, to avoid shl overflow when self.mant == 0. + if fp.mant != 0 { + let shift = fp.mant.leading_zeros() as i32; + fp.mant <<= shift; + fp.exp -= shift; + shift + } else { + 0 + } +} + +/// Multiply two normalized extended-precision floats, as if by `a*b`. +/// +/// The precision is maximal when the numbers are normalized, however, +/// decent precision will occur as long as both values have high bits +/// set. The result is not normalized. +/// +/// Algorithm: +/// 1. Non-signed multiplication of mantissas (requires 2x as many bits as input). +/// 2. Normalization of the result (not done here). +/// 3. Addition of exponents. +pub fn mul(x: &ExtendedFloat, y: &ExtendedFloat) -> ExtendedFloat { + // Logic check, values must be decently normalized prior to multiplication. + debug_assert!(x.mant >> 32 != 0); + debug_assert!(y.mant >> 32 != 0); + + // Extract high-and-low masks. + // Mask is u32::MAX for older Rustc versions. + const LOMASK: u64 = 0xffff_ffff; + let x1 = x.mant >> 32; + let x0 = x.mant & LOMASK; + let y1 = y.mant >> 32; + let y0 = y.mant & LOMASK; + + // Get our products + let x1_y0 = x1 * y0; + let x0_y1 = x0 * y1; + let x0_y0 = x0 * y0; + let x1_y1 = x1 * y1; + + let mut tmp = (x1_y0 & LOMASK) + (x0_y1 & LOMASK) + (x0_y0 >> 32); + // round up + tmp += 1 << (32 - 1); + + ExtendedFloat { + mant: x1_y1 + (x1_y0 >> 32) + (x0_y1 >> 32) + (tmp >> 32), + exp: x.exp + y.exp + 64, + } +} + +// POWERS +// ------ + +/// Precalculated powers of base N for the Bellerophon algorithm. +pub struct BellerophonPowers { + // Pre-calculated small powers. + pub small: &'static [u64], + // Pre-calculated large powers. + pub large: &'static [u64], + /// Pre-calculated small powers as 64-bit integers + pub small_int: &'static [u64], + // Step between large powers and number of small powers. + pub step: i32, + // Exponent bias for the large powers. + pub bias: i32, + /// ceil(log2(radix)) scaled as a multiplier. + pub log2: i64, + /// Bitshift for the log2 multiplier. + pub log2_shift: i32, +} + +/// Allow indexing of values without bounds checking +impl BellerophonPowers { + #[inline] + pub fn get_small(&self, index: usize) -> ExtendedFloat { + let mant = self.small[index]; + let exp = (1 - 64) + ((self.log2 * index as i64) >> self.log2_shift); + ExtendedFloat { + mant, + exp: exp as i32, + } + } + + #[inline] + pub fn get_large(&self, index: usize) -> ExtendedFloat { + let mant = self.large[index]; + let biased_e = index as i64 * self.step as i64 - self.bias as i64; + let exp = (1 - 64) + ((self.log2 * biased_e) >> self.log2_shift); + ExtendedFloat { + mant, + exp: exp as i32, + } + } + + #[inline] + pub fn get_small_int(&self, index: usize) -> u64 { + self.small_int[index] + } +} diff --git a/third_party/rust/minimal-lexical/src/bigint.rs b/third_party/rust/minimal-lexical/src/bigint.rs new file mode 100644 index 0000000000..d1d5027a19 --- /dev/null +++ b/third_party/rust/minimal-lexical/src/bigint.rs @@ -0,0 +1,788 @@ +//! A simple big-integer type for slow path algorithms. +//! +//! This includes minimal stackvector for use in big-integer arithmetic. + +#![doc(hidden)] + +#[cfg(feature = "alloc")] +use crate::heapvec::HeapVec; +use crate::num::Float; +#[cfg(not(feature = "alloc"))] +use crate::stackvec::StackVec; +#[cfg(not(feature = "compact"))] +use crate::table::{LARGE_POW5, LARGE_POW5_STEP}; +use core::{cmp, ops, ptr}; + +/// Number of bits in a Bigint. +/// +/// This needs to be at least the number of bits required to store +/// a Bigint, which is `log2(radix**digits)`. +/// ≅ 3600 for base-10, rounded-up. +pub const BIGINT_BITS: usize = 4000; + +/// The number of limbs for the bigint. +pub const BIGINT_LIMBS: usize = BIGINT_BITS / LIMB_BITS; + +#[cfg(feature = "alloc")] +pub type VecType = HeapVec; + +#[cfg(not(feature = "alloc"))] +pub type VecType = StackVec; + +/// Storage for a big integer type. +/// +/// This is used for algorithms when we have a finite number of digits. +/// Specifically, it stores all the significant digits scaled to the +/// proper exponent, as an integral type, and then directly compares +/// these digits. +/// +/// This requires us to store the number of significant bits, plus the +/// number of exponent bits (required) since we scale everything +/// to the same exponent. +#[derive(Clone, PartialEq, Eq)] +pub struct Bigint { + /// Significant digits for the float, stored in a big integer in LE order. + /// + /// This is pretty much the same number of digits for any radix, since the + /// significant digits balances out the zeros from the exponent: + /// 1. Decimal is 1091 digits, 767 mantissa digits + 324 exponent zeros. + /// 2. Base 6 is 1097 digits, or 680 mantissa digits + 417 exponent zeros. + /// 3. Base 36 is 1086 digits, or 877 mantissa digits + 209 exponent zeros. + /// + /// However, the number of bytes required is larger for large radixes: + /// for decimal, we need `log2(10**1091) ≅ 3600`, while for base 36 + /// we need `log2(36**1086) ≅ 5600`. Since we use uninitialized data, + /// we avoid a major performance hit from the large buffer size. + pub data: VecType, +} + +#[allow(clippy::new_without_default)] +impl Bigint { + /// Construct a bigint representing 0. + #[inline(always)] + pub fn new() -> Self { + Self { + data: VecType::new(), + } + } + + /// Construct a bigint from an integer. + #[inline(always)] + pub fn from_u64(value: u64) -> Self { + Self { + data: VecType::from_u64(value), + } + } + + #[inline(always)] + pub fn hi64(&self) -> (u64, bool) { + self.data.hi64() + } + + /// Multiply and assign as if by exponentiation by a power. + #[inline] + pub fn pow(&mut self, base: u32, exp: u32) -> Option<()> { + debug_assert!(base == 2 || base == 5 || base == 10); + if base % 5 == 0 { + pow(&mut self.data, exp)?; + } + if base % 2 == 0 { + shl(&mut self.data, exp as usize)?; + } + Some(()) + } + + /// Calculate the bit-length of the big-integer. + #[inline] + pub fn bit_length(&self) -> u32 { + bit_length(&self.data) + } +} + +impl ops::MulAssign<&Bigint> for Bigint { + fn mul_assign(&mut self, rhs: &Bigint) { + self.data *= &rhs.data; + } +} + +/// REVERSE VIEW + +/// Reverse, immutable view of a sequence. +pub struct ReverseView<'a, T: 'a> { + inner: &'a [T], +} + +impl<'a, T> ops::Index for ReverseView<'a, T> { + type Output = T; + + #[inline] + fn index(&self, index: usize) -> &T { + let len = self.inner.len(); + &(*self.inner)[len - index - 1] + } +} + +/// Create a reverse view of the vector for indexing. +#[inline] +pub fn rview(x: &[Limb]) -> ReverseView { + ReverseView { + inner: x, + } +} + +// COMPARE +// ------- + +/// Compare `x` to `y`, in little-endian order. +#[inline] +pub fn compare(x: &[Limb], y: &[Limb]) -> cmp::Ordering { + match x.len().cmp(&y.len()) { + cmp::Ordering::Equal => { + let iter = x.iter().rev().zip(y.iter().rev()); + for (&xi, yi) in iter { + match xi.cmp(yi) { + cmp::Ordering::Equal => (), + ord => return ord, + } + } + // Equal case. + cmp::Ordering::Equal + }, + ord => ord, + } +} + +// NORMALIZE +// --------- + +/// Normalize the integer, so any leading zero values are removed. +#[inline] +pub fn normalize(x: &mut VecType) { + // We don't care if this wraps: the index is bounds-checked. + while let Some(&value) = x.get(x.len().wrapping_sub(1)) { + if value == 0 { + unsafe { x.set_len(x.len() - 1) }; + } else { + break; + } + } +} + +/// Get if the big integer is normalized. +#[inline] +#[allow(clippy::match_like_matches_macro)] +pub fn is_normalized(x: &[Limb]) -> bool { + // We don't care if this wraps: the index is bounds-checked. + match x.get(x.len().wrapping_sub(1)) { + Some(&0) => false, + _ => true, + } +} + +// FROM +// ---- + +/// Create StackVec from u64 value. +#[inline(always)] +#[allow(clippy::branches_sharing_code)] +pub fn from_u64(x: u64) -> VecType { + let mut vec = VecType::new(); + debug_assert!(vec.capacity() >= 2); + if LIMB_BITS == 32 { + vec.try_push(x as Limb).unwrap(); + vec.try_push((x >> 32) as Limb).unwrap(); + } else { + vec.try_push(x as Limb).unwrap(); + } + vec.normalize(); + vec +} + +// HI +// -- + +/// Check if any of the remaining bits are non-zero. +/// +/// # Safety +/// +/// Safe as long as `rindex <= x.len()`. +#[inline] +pub fn nonzero(x: &[Limb], rindex: usize) -> bool { + debug_assert!(rindex <= x.len()); + + let len = x.len(); + let slc = &x[..len - rindex]; + slc.iter().rev().any(|&x| x != 0) +} + +// These return the high X bits and if the bits were truncated. + +/// Shift 32-bit integer to high 64-bits. +#[inline] +pub fn u32_to_hi64_1(r0: u32) -> (u64, bool) { + u64_to_hi64_1(r0 as u64) +} + +/// Shift 2 32-bit integers to high 64-bits. +#[inline] +pub fn u32_to_hi64_2(r0: u32, r1: u32) -> (u64, bool) { + let r0 = (r0 as u64) << 32; + let r1 = r1 as u64; + u64_to_hi64_1(r0 | r1) +} + +/// Shift 3 32-bit integers to high 64-bits. +#[inline] +pub fn u32_to_hi64_3(r0: u32, r1: u32, r2: u32) -> (u64, bool) { + let r0 = r0 as u64; + let r1 = (r1 as u64) << 32; + let r2 = r2 as u64; + u64_to_hi64_2(r0, r1 | r2) +} + +/// Shift 64-bit integer to high 64-bits. +#[inline] +pub fn u64_to_hi64_1(r0: u64) -> (u64, bool) { + let ls = r0.leading_zeros(); + (r0 << ls, false) +} + +/// Shift 2 64-bit integers to high 64-bits. +#[inline] +pub fn u64_to_hi64_2(r0: u64, r1: u64) -> (u64, bool) { + let ls = r0.leading_zeros(); + let rs = 64 - ls; + let v = match ls { + 0 => r0, + _ => (r0 << ls) | (r1 >> rs), + }; + let n = r1 << ls != 0; + (v, n) +} + +/// Extract the hi bits from the buffer. +macro_rules! hi { + // # Safety + // + // Safe as long as the `stackvec.len() >= 1`. + (@1 $self:ident, $rview:ident, $t:ident, $fn:ident) => {{ + $fn($rview[0] as $t) + }}; + + // # Safety + // + // Safe as long as the `stackvec.len() >= 2`. + (@2 $self:ident, $rview:ident, $t:ident, $fn:ident) => {{ + let r0 = $rview[0] as $t; + let r1 = $rview[1] as $t; + $fn(r0, r1) + }}; + + // # Safety + // + // Safe as long as the `stackvec.len() >= 2`. + (@nonzero2 $self:ident, $rview:ident, $t:ident, $fn:ident) => {{ + let (v, n) = hi!(@2 $self, $rview, $t, $fn); + (v, n || nonzero($self, 2 )) + }}; + + // # Safety + // + // Safe as long as the `stackvec.len() >= 3`. + (@3 $self:ident, $rview:ident, $t:ident, $fn:ident) => {{ + let r0 = $rview[0] as $t; + let r1 = $rview[1] as $t; + let r2 = $rview[2] as $t; + $fn(r0, r1, r2) + }}; + + // # Safety + // + // Safe as long as the `stackvec.len() >= 3`. + (@nonzero3 $self:ident, $rview:ident, $t:ident, $fn:ident) => {{ + let (v, n) = hi!(@3 $self, $rview, $t, $fn); + (v, n || nonzero($self, 3)) + }}; +} + +/// Get the high 64 bits from the vector. +#[inline(always)] +pub fn hi64(x: &[Limb]) -> (u64, bool) { + let rslc = rview(x); + // SAFETY: the buffer must be at least length bytes long. + match x.len() { + 0 => (0, false), + 1 if LIMB_BITS == 32 => hi!(@1 x, rslc, u32, u32_to_hi64_1), + 1 => hi!(@1 x, rslc, u64, u64_to_hi64_1), + 2 if LIMB_BITS == 32 => hi!(@2 x, rslc, u32, u32_to_hi64_2), + 2 => hi!(@2 x, rslc, u64, u64_to_hi64_2), + _ if LIMB_BITS == 32 => hi!(@nonzero3 x, rslc, u32, u32_to_hi64_3), + _ => hi!(@nonzero2 x, rslc, u64, u64_to_hi64_2), + } +} + +// POWERS +// ------ + +/// MulAssign by a power of 5. +/// +/// Theoretically... +/// +/// Use an exponentiation by squaring method, since it reduces the time +/// complexity of the multiplication to ~`O(log(n))` for the squaring, +/// and `O(n*m)` for the result. Since `m` is typically a lower-order +/// factor, this significantly reduces the number of multiplications +/// we need to do. Iteratively multiplying by small powers follows +/// the nth triangular number series, which scales as `O(p^2)`, but +/// where `p` is `n+m`. In short, it scales very poorly. +/// +/// Practically.... +/// +/// Exponentiation by Squaring: +/// running 2 tests +/// test bigcomp_f32_lexical ... bench: 1,018 ns/iter (+/- 78) +/// test bigcomp_f64_lexical ... bench: 3,639 ns/iter (+/- 1,007) +/// +/// Exponentiation by Iterative Small Powers: +/// running 2 tests +/// test bigcomp_f32_lexical ... bench: 518 ns/iter (+/- 31) +/// test bigcomp_f64_lexical ... bench: 583 ns/iter (+/- 47) +/// +/// Exponentiation by Iterative Large Powers (of 2): +/// running 2 tests +/// test bigcomp_f32_lexical ... bench: 671 ns/iter (+/- 31) +/// test bigcomp_f64_lexical ... bench: 1,394 ns/iter (+/- 47) +/// +/// The following benchmarks were run on `1 * 5^300`, using native `pow`, +/// a version with only small powers, and one with pre-computed powers +/// of `5^(3 * max_exp)`, rather than `5^(5 * max_exp)`. +/// +/// However, using large powers is crucial for good performance for higher +/// powers. +/// pow/default time: [426.20 ns 427.96 ns 429.89 ns] +/// pow/small time: [2.9270 us 2.9411 us 2.9565 us] +/// pow/large:3 time: [838.51 ns 842.21 ns 846.27 ns] +/// +/// Even using worst-case scenarios, exponentiation by squaring is +/// significantly slower for our workloads. Just multiply by small powers, +/// in simple cases, and use precalculated large powers in other cases. +/// +/// Furthermore, using sufficiently big large powers is also crucial for +/// performance. This is a tradeoff of binary size and performance, and +/// using a single value at ~`5^(5 * max_exp)` seems optimal. +pub fn pow(x: &mut VecType, mut exp: u32) -> Option<()> { + // Minimize the number of iterations for large exponents: just + // do a few steps with a large powers. + #[cfg(not(feature = "compact"))] + { + while exp >= LARGE_POW5_STEP { + large_mul(x, &LARGE_POW5)?; + exp -= LARGE_POW5_STEP; + } + } + + // Now use our pre-computed small powers iteratively. + // This is calculated as `⌊log(2^BITS - 1, 5)⌋`. + let small_step = if LIMB_BITS == 32 { + 13 + } else { + 27 + }; + let max_native = (5 as Limb).pow(small_step); + while exp >= small_step { + small_mul(x, max_native)?; + exp -= small_step; + } + if exp != 0 { + // SAFETY: safe, since `exp < small_step`. + let small_power = unsafe { f64::int_pow_fast_path(exp as usize, 5) }; + small_mul(x, small_power as Limb)?; + } + Some(()) +} + +// SCALAR +// ------ + +/// Add two small integers and return the resulting value and if overflow happens. +#[inline(always)] +pub fn scalar_add(x: Limb, y: Limb) -> (Limb, bool) { + x.overflowing_add(y) +} + +/// Multiply two small integers (with carry) (and return the overflow contribution). +/// +/// Returns the (low, high) components. +#[inline(always)] +pub fn scalar_mul(x: Limb, y: Limb, carry: Limb) -> (Limb, Limb) { + // Cannot overflow, as long as wide is 2x as wide. This is because + // the following is always true: + // `Wide::MAX - (Narrow::MAX * Narrow::MAX) >= Narrow::MAX` + let z: Wide = (x as Wide) * (y as Wide) + (carry as Wide); + (z as Limb, (z >> LIMB_BITS) as Limb) +} + +// SMALL +// ----- + +/// Add small integer to bigint starting from offset. +#[inline] +pub fn small_add_from(x: &mut VecType, y: Limb, start: usize) -> Option<()> { + let mut index = start; + let mut carry = y; + while carry != 0 && index < x.len() { + let result = scalar_add(x[index], carry); + x[index] = result.0; + carry = result.1 as Limb; + index += 1; + } + // If we carried past all the elements, add to the end of the buffer. + if carry != 0 { + x.try_push(carry)?; + } + Some(()) +} + +/// Add small integer to bigint. +#[inline(always)] +pub fn small_add(x: &mut VecType, y: Limb) -> Option<()> { + small_add_from(x, y, 0) +} + +/// Multiply bigint by small integer. +#[inline] +pub fn small_mul(x: &mut VecType, y: Limb) -> Option<()> { + let mut carry = 0; + for xi in x.iter_mut() { + let result = scalar_mul(*xi, y, carry); + *xi = result.0; + carry = result.1; + } + // If we carried past all the elements, add to the end of the buffer. + if carry != 0 { + x.try_push(carry)?; + } + Some(()) +} + +// LARGE +// ----- + +/// Add bigint to bigint starting from offset. +pub fn large_add_from(x: &mut VecType, y: &[Limb], start: usize) -> Option<()> { + // The effective x buffer is from `xstart..x.len()`, so we need to treat + // that as the current range. If the effective y buffer is longer, need + // to resize to that, + the start index. + if y.len() > x.len().saturating_sub(start) { + // Ensure we panic if we can't extend the buffer. + // This avoids any unsafe behavior afterwards. + x.try_resize(y.len() + start, 0)?; + } + + // Iteratively add elements from y to x. + let mut carry = false; + for (index, &yi) in y.iter().enumerate() { + // We panicked in `try_resize` if this wasn't true. + let xi = x.get_mut(start + index).unwrap(); + + // Only one op of the two ops can overflow, since we added at max + // Limb::max_value() + Limb::max_value(). Add the previous carry, + // and store the current carry for the next. + let result = scalar_add(*xi, yi); + *xi = result.0; + let mut tmp = result.1; + if carry { + let result = scalar_add(*xi, 1); + *xi = result.0; + tmp |= result.1; + } + carry = tmp; + } + + // Handle overflow. + if carry { + small_add_from(x, 1, y.len() + start)?; + } + Some(()) +} + +/// Add bigint to bigint. +#[inline(always)] +pub fn large_add(x: &mut VecType, y: &[Limb]) -> Option<()> { + large_add_from(x, y, 0) +} + +/// Grade-school multiplication algorithm. +/// +/// Slow, naive algorithm, using limb-bit bases and just shifting left for +/// each iteration. This could be optimized with numerous other algorithms, +/// but it's extremely simple, and works in O(n*m) time, which is fine +/// by me. Each iteration, of which there are `m` iterations, requires +/// `n` multiplications, and `n` additions, or grade-school multiplication. +/// +/// Don't use Karatsuba multiplication, since out implementation seems to +/// be slower asymptotically, which is likely just due to the small sizes +/// we deal with here. For example, running on the following data: +/// +/// ```text +/// const SMALL_X: &[u32] = &[ +/// 766857581, 3588187092, 1583923090, 2204542082, 1564708913, 2695310100, 3676050286, +/// 1022770393, 468044626, 446028186 +/// ]; +/// const SMALL_Y: &[u32] = &[ +/// 3945492125, 3250752032, 1282554898, 1708742809, 1131807209, 3171663979, 1353276095, +/// 1678845844, 2373924447, 3640713171 +/// ]; +/// const LARGE_X: &[u32] = &[ +/// 3647536243, 2836434412, 2154401029, 1297917894, 137240595, 790694805, 2260404854, +/// 3872698172, 690585094, 99641546, 3510774932, 1672049983, 2313458559, 2017623719, +/// 638180197, 1140936565, 1787190494, 1797420655, 14113450, 2350476485, 3052941684, +/// 1993594787, 2901001571, 4156930025, 1248016552, 848099908, 2660577483, 4030871206, +/// 692169593, 2835966319, 1781364505, 4266390061, 1813581655, 4210899844, 2137005290, +/// 2346701569, 3715571980, 3386325356, 1251725092, 2267270902, 474686922, 2712200426, +/// 197581715, 3087636290, 1379224439, 1258285015, 3230794403, 2759309199, 1494932094, +/// 326310242 +/// ]; +/// const LARGE_Y: &[u32] = &[ +/// 1574249566, 868970575, 76716509, 3198027972, 1541766986, 1095120699, 3891610505, +/// 2322545818, 1677345138, 865101357, 2650232883, 2831881215, 3985005565, 2294283760, +/// 3468161605, 393539559, 3665153349, 1494067812, 106699483, 2596454134, 797235106, +/// 705031740, 1209732933, 2732145769, 4122429072, 141002534, 790195010, 4014829800, +/// 1303930792, 3649568494, 308065964, 1233648836, 2807326116, 79326486, 1262500691, +/// 621809229, 2258109428, 3819258501, 171115668, 1139491184, 2979680603, 1333372297, +/// 1657496603, 2790845317, 4090236532, 4220374789, 601876604, 1828177209, 2372228171, +/// 2247372529 +/// ]; +/// ``` +/// +/// We get the following results: + +/// ```text +/// mul/small:long time: [220.23 ns 221.47 ns 222.81 ns] +/// Found 4 outliers among 100 measurements (4.00%) +/// 2 (2.00%) high mild +/// 2 (2.00%) high severe +/// mul/small:karatsuba time: [233.88 ns 234.63 ns 235.44 ns] +/// Found 11 outliers among 100 measurements (11.00%) +/// 8 (8.00%) high mild +/// 3 (3.00%) high severe +/// mul/large:long time: [1.9365 us 1.9455 us 1.9558 us] +/// Found 12 outliers among 100 measurements (12.00%) +/// 7 (7.00%) high mild +/// 5 (5.00%) high severe +/// mul/large:karatsuba time: [4.4250 us 4.4515 us 4.4812 us] +/// ``` +/// +/// In short, Karatsuba multiplication is never worthwhile for out use-case. +pub fn long_mul(x: &[Limb], y: &[Limb]) -> Option { + // Using the immutable value, multiply by all the scalars in y, using + // the algorithm defined above. Use a single buffer to avoid + // frequent reallocations. Handle the first case to avoid a redundant + // addition, since we know y.len() >= 1. + let mut z = VecType::try_from(x)?; + if !y.is_empty() { + let y0 = y[0]; + small_mul(&mut z, y0)?; + + for (index, &yi) in y.iter().enumerate().skip(1) { + if yi != 0 { + let mut zi = VecType::try_from(x)?; + small_mul(&mut zi, yi)?; + large_add_from(&mut z, &zi, index)?; + } + } + } + + z.normalize(); + Some(z) +} + +/// Multiply bigint by bigint using grade-school multiplication algorithm. +#[inline(always)] +pub fn large_mul(x: &mut VecType, y: &[Limb]) -> Option<()> { + // Karatsuba multiplication never makes sense, so just use grade school + // multiplication. + if y.len() == 1 { + // SAFETY: safe since `y.len() == 1`. + small_mul(x, y[0])?; + } else { + *x = long_mul(y, x)?; + } + Some(()) +} + +// SHIFT +// ----- + +/// Shift-left `n` bits inside a buffer. +#[inline] +pub fn shl_bits(x: &mut VecType, n: usize) -> Option<()> { + debug_assert!(n != 0); + + // Internally, for each item, we shift left by n, and add the previous + // right shifted limb-bits. + // For example, we transform (for u8) shifted left 2, to: + // b10100100 b01000010 + // b10 b10010001 b00001000 + debug_assert!(n < LIMB_BITS); + let rshift = LIMB_BITS - n; + let lshift = n; + let mut prev: Limb = 0; + for xi in x.iter_mut() { + let tmp = *xi; + *xi <<= lshift; + *xi |= prev >> rshift; + prev = tmp; + } + + // Always push the carry, even if it creates a non-normal result. + let carry = prev >> rshift; + if carry != 0 { + x.try_push(carry)?; + } + + Some(()) +} + +/// Shift-left `n` limbs inside a buffer. +#[inline] +pub fn shl_limbs(x: &mut VecType, n: usize) -> Option<()> { + debug_assert!(n != 0); + if n + x.len() > x.capacity() { + None + } else if !x.is_empty() { + let len = n + x.len(); + // SAFE: since x is not empty, and `x.len() + n <= x.capacity()`. + unsafe { + // Move the elements. + let src = x.as_ptr(); + let dst = x.as_mut_ptr().add(n); + ptr::copy(src, dst, x.len()); + // Write our 0s. + ptr::write_bytes(x.as_mut_ptr(), 0, n); + x.set_len(len); + } + Some(()) + } else { + Some(()) + } +} + +/// Shift-left buffer by n bits. +#[inline] +pub fn shl(x: &mut VecType, n: usize) -> Option<()> { + let rem = n % LIMB_BITS; + let div = n / LIMB_BITS; + if rem != 0 { + shl_bits(x, rem)?; + } + if div != 0 { + shl_limbs(x, div)?; + } + Some(()) +} + +/// Get number of leading zero bits in the storage. +#[inline] +pub fn leading_zeros(x: &[Limb]) -> u32 { + let length = x.len(); + // wrapping_sub is fine, since it'll just return None. + if let Some(&value) = x.get(length.wrapping_sub(1)) { + value.leading_zeros() + } else { + 0 + } +} + +/// Calculate the bit-length of the big-integer. +#[inline] +pub fn bit_length(x: &[Limb]) -> u32 { + let nlz = leading_zeros(x); + LIMB_BITS as u32 * x.len() as u32 - nlz +} + +// LIMB +// ---- + +// Type for a single limb of the big integer. +// +// A limb is analogous to a digit in base10, except, it stores 32-bit +// or 64-bit numbers instead. We want types where 64-bit multiplication +// is well-supported by the architecture, rather than emulated in 3 +// instructions. The quickest way to check this support is using a +// cross-compiler for numerous architectures, along with the following +// source file and command: +// +// Compile with `gcc main.c -c -S -O3 -masm=intel` +// +// And the source code is: +// ```text +// #include +// +// struct i128 { +// uint64_t hi; +// uint64_t lo; +// }; +// +// // Type your code here, or load an example. +// struct i128 square(uint64_t x, uint64_t y) { +// __int128 prod = (__int128)x * (__int128)y; +// struct i128 z; +// z.hi = (uint64_t)(prod >> 64); +// z.lo = (uint64_t)prod; +// return z; +// } +// ``` +// +// If the result contains `call __multi3`, then the multiplication +// is emulated by the compiler. Otherwise, it's natively supported. +// +// This should be all-known 64-bit platforms supported by Rust. +// https://forge.rust-lang.org/platform-support.html +// +// # Supported +// +// Platforms where native 128-bit multiplication is explicitly supported: +// - x86_64 (Supported via `MUL`). +// - mips64 (Supported via `DMULTU`, which `HI` and `LO` can be read-from). +// - s390x (Supported via `MLGR`). +// +// # Efficient +// +// Platforms where native 64-bit multiplication is supported and +// you can extract hi-lo for 64-bit multiplications. +// - aarch64 (Requires `UMULH` and `MUL` to capture high and low bits). +// - powerpc64 (Requires `MULHDU` and `MULLD` to capture high and low bits). +// - riscv64 (Requires `MUL` and `MULH` to capture high and low bits). +// +// # Unsupported +// +// Platforms where native 128-bit multiplication is not supported, +// requiring software emulation. +// sparc64 (`UMUL` only supports double-word arguments). +// sparcv9 (Same as sparc64). +// +// These tests are run via `xcross`, my own library for C cross-compiling, +// which supports numerous targets (far in excess of Rust's tier 1 support, +// or rust-embedded/cross's list). xcross may be found here: +// https://github.com/Alexhuszagh/xcross +// +// To compile for the given target, run: +// `xcross gcc main.c -c -S -O3 --target $target` +// +// All 32-bit architectures inherently do not have support. That means +// we can essentially look for 64-bit architectures that are not SPARC. + +#[cfg(all(target_pointer_width = "64", not(target_arch = "sparc")))] +pub type Limb = u64; +#[cfg(all(target_pointer_width = "64", not(target_arch = "sparc")))] +pub type Wide = u128; +#[cfg(all(target_pointer_width = "64", not(target_arch = "sparc")))] +pub const LIMB_BITS: usize = 64; + +#[cfg(not(all(target_pointer_width = "64", not(target_arch = "sparc"))))] +pub type Limb = u32; +#[cfg(not(all(target_pointer_width = "64", not(target_arch = "sparc"))))] +pub type Wide = u64; +#[cfg(not(all(target_pointer_width = "64", not(target_arch = "sparc"))))] +pub const LIMB_BITS: usize = 32; diff --git a/third_party/rust/minimal-lexical/src/extended_float.rs b/third_party/rust/minimal-lexical/src/extended_float.rs new file mode 100644 index 0000000000..7397e199c8 --- /dev/null +++ b/third_party/rust/minimal-lexical/src/extended_float.rs @@ -0,0 +1,24 @@ +// FLOAT TYPE + +#![doc(hidden)] + +use crate::num::Float; + +/// Extended precision floating-point type. +/// +/// Private implementation, exposed only for testing purposes. +#[derive(Clone, Copy, Debug, PartialEq, Eq)] +pub struct ExtendedFloat { + /// Mantissa for the extended-precision float. + pub mant: u64, + /// Binary exponent for the extended-precision float. + pub exp: i32, +} + +/// Converts an `ExtendedFloat` to the closest machine float type. +#[inline(always)] +pub fn extended_to_float(x: ExtendedFloat) -> F { + let mut word = x.mant; + word |= (x.exp as u64) << F::MANTISSA_SIZE; + F::from_bits(word) +} diff --git a/third_party/rust/minimal-lexical/src/fpu.rs b/third_party/rust/minimal-lexical/src/fpu.rs new file mode 100644 index 0000000000..42059a080a --- /dev/null +++ b/third_party/rust/minimal-lexical/src/fpu.rs @@ -0,0 +1,98 @@ +//! Platform-specific, assembly instructions to avoid +//! intermediate rounding on architectures with FPUs. +//! +//! This is adapted from the implementation in the Rust core library, +//! the original implementation can be [here](https://github.com/rust-lang/rust/blob/master/library/core/src/num/dec2flt/fpu.rs). +//! +//! It is therefore also subject to a Apache2.0/MIT license. + +#![cfg(feature = "nightly")] +#![doc(hidden)] + +pub use fpu_precision::set_precision; + +// On x86, the x87 FPU is used for float operations if the SSE/SSE2 extensions are not available. +// The x87 FPU operates with 80 bits of precision by default, which means that operations will +// round to 80 bits causing double rounding to happen when values are eventually represented as +// 32/64 bit float values. To overcome this, the FPU control word can be set so that the +// computations are performed in the desired precision. +#[cfg(all(target_arch = "x86", not(target_feature = "sse2")))] +mod fpu_precision { + use core::mem::size_of; + + /// A structure used to preserve the original value of the FPU control word, so that it can be + /// restored when the structure is dropped. + /// + /// The x87 FPU is a 16-bits register whose fields are as follows: + /// + /// | 12-15 | 10-11 | 8-9 | 6-7 | 5 | 4 | 3 | 2 | 1 | 0 | + /// |------:|------:|----:|----:|---:|---:|---:|---:|---:|---:| + /// | | RC | PC | | PM | UM | OM | ZM | DM | IM | + /// + /// The documentation for all of the fields is available in the IA-32 Architectures Software + /// Developer's Manual (Volume 1). + /// + /// The only field which is relevant for the following code is PC, Precision Control. This + /// field determines the precision of the operations performed by the FPU. It can be set to: + /// - 0b00, single precision i.e., 32-bits + /// - 0b10, double precision i.e., 64-bits + /// - 0b11, double extended precision i.e., 80-bits (default state) + /// The 0b01 value is reserved and should not be used. + pub struct FPUControlWord(u16); + + fn set_cw(cw: u16) { + // SAFETY: the `fldcw` instruction has been audited to be able to work correctly with + // any `u16` + unsafe { + asm!( + "fldcw word ptr [{}]", + in(reg) &cw, + options(nostack), + ) + } + } + + /// Sets the precision field of the FPU to `T` and returns a `FPUControlWord`. + pub fn set_precision() -> FPUControlWord { + let mut cw = 0_u16; + + // Compute the value for the Precision Control field that is appropriate for `T`. + let cw_precision = match size_of::() { + 4 => 0x0000, // 32 bits + 8 => 0x0200, // 64 bits + _ => 0x0300, // default, 80 bits + }; + + // Get the original value of the control word to restore it later, when the + // `FPUControlWord` structure is dropped + // SAFETY: the `fnstcw` instruction has been audited to be able to work correctly with + // any `u16` + unsafe { + asm!( + "fnstcw word ptr [{}]", + in(reg) &mut cw, + options(nostack), + ) + } + + // Set the control word to the desired precision. This is achieved by masking away the old + // precision (bits 8 and 9, 0x300) and replacing it with the precision flag computed above. + set_cw((cw & 0xFCFF) | cw_precision); + + FPUControlWord(cw) + } + + impl Drop for FPUControlWord { + fn drop(&mut self) { + set_cw(self.0) + } + } +} + +// In most architectures, floating point operations have an explicit bit size, therefore the +// precision of the computation is determined on a per-operation basis. +#[cfg(any(not(target_arch = "x86"), target_feature = "sse2"))] +mod fpu_precision { + pub fn set_precision() { + } +} diff --git a/third_party/rust/minimal-lexical/src/heapvec.rs b/third_party/rust/minimal-lexical/src/heapvec.rs new file mode 100644 index 0000000000..035926018a --- /dev/null +++ b/third_party/rust/minimal-lexical/src/heapvec.rs @@ -0,0 +1,190 @@ +//! Simple heap-allocated vector. + +#![cfg(feature = "alloc")] +#![doc(hidden)] + +use crate::bigint; +#[cfg(not(feature = "std"))] +use alloc::vec::Vec; +use core::{cmp, ops}; +#[cfg(feature = "std")] +use std::vec::Vec; + +/// Simple heap vector implementation. +#[derive(Clone)] +pub struct HeapVec { + /// The heap-allocated buffer for the elements. + data: Vec, +} + +#[allow(clippy::new_without_default)] +impl HeapVec { + /// Construct an empty vector. + #[inline] + pub fn new() -> Self { + Self { + data: Vec::with_capacity(bigint::BIGINT_LIMBS), + } + } + + /// Construct a vector from an existing slice. + #[inline] + pub fn try_from(x: &[bigint::Limb]) -> Option { + let mut vec = Self::new(); + vec.try_extend(x)?; + Some(vec) + } + + /// Sets the length of a vector. + /// + /// This will explicitly set the size of the vector, without actually + /// modifying its buffers, so it is up to the caller to ensure that the + /// vector is actually the specified size. + /// + /// # Safety + /// + /// Safe as long as `len` is less than `self.capacity()` and has been initialized. + #[inline] + pub unsafe fn set_len(&mut self, len: usize) { + debug_assert!(len <= bigint::BIGINT_LIMBS); + unsafe { self.data.set_len(len) }; + } + + /// The number of elements stored in the vector. + #[inline] + pub fn len(&self) -> usize { + self.data.len() + } + + /// If the vector is empty. + #[inline] + pub fn is_empty(&self) -> bool { + self.len() == 0 + } + + /// The number of items the vector can hold. + #[inline] + pub fn capacity(&self) -> usize { + self.data.capacity() + } + + /// Append an item to the vector. + #[inline] + pub fn try_push(&mut self, value: bigint::Limb) -> Option<()> { + self.data.push(value); + Some(()) + } + + /// Remove an item from the end of the vector and return it, or None if empty. + #[inline] + pub fn pop(&mut self) -> Option { + self.data.pop() + } + + /// Copy elements from a slice and append them to the vector. + #[inline] + pub fn try_extend(&mut self, slc: &[bigint::Limb]) -> Option<()> { + self.data.extend_from_slice(slc); + Some(()) + } + + /// Try to resize the buffer. + /// + /// If the new length is smaller than the current length, truncate + /// the input. If it's larger, then append elements to the buffer. + #[inline] + pub fn try_resize(&mut self, len: usize, value: bigint::Limb) -> Option<()> { + self.data.resize(len, value); + Some(()) + } + + // HI + + /// Get the high 64 bits from the vector. + #[inline(always)] + pub fn hi64(&self) -> (u64, bool) { + bigint::hi64(&self.data) + } + + // FROM + + /// Create StackVec from u64 value. + #[inline(always)] + pub fn from_u64(x: u64) -> Self { + bigint::from_u64(x) + } + + // MATH + + /// Normalize the integer, so any leading zero values are removed. + #[inline] + pub fn normalize(&mut self) { + bigint::normalize(self) + } + + /// Get if the big integer is normalized. + #[inline] + pub fn is_normalized(&self) -> bool { + bigint::is_normalized(self) + } + + /// AddAssign small integer. + #[inline] + pub fn add_small(&mut self, y: bigint::Limb) -> Option<()> { + bigint::small_add(self, y) + } + + /// MulAssign small integer. + #[inline] + pub fn mul_small(&mut self, y: bigint::Limb) -> Option<()> { + bigint::small_mul(self, y) + } +} + +impl PartialEq for HeapVec { + #[inline] + #[allow(clippy::op_ref)] + fn eq(&self, other: &Self) -> bool { + use core::ops::Deref; + self.len() == other.len() && self.deref() == other.deref() + } +} + +impl Eq for HeapVec { +} + +impl cmp::PartialOrd for HeapVec { + #[inline] + fn partial_cmp(&self, other: &Self) -> Option { + Some(bigint::compare(self, other)) + } +} + +impl cmp::Ord for HeapVec { + #[inline] + fn cmp(&self, other: &Self) -> cmp::Ordering { + bigint::compare(self, other) + } +} + +impl ops::Deref for HeapVec { + type Target = [bigint::Limb]; + #[inline] + fn deref(&self) -> &[bigint::Limb] { + &self.data + } +} + +impl ops::DerefMut for HeapVec { + #[inline] + fn deref_mut(&mut self) -> &mut [bigint::Limb] { + &mut self.data + } +} + +impl ops::MulAssign<&[bigint::Limb]> for HeapVec { + #[inline] + fn mul_assign(&mut self, rhs: &[bigint::Limb]) { + bigint::large_mul(self, rhs).unwrap(); + } +} diff --git a/third_party/rust/minimal-lexical/src/lemire.rs b/third_party/rust/minimal-lexical/src/lemire.rs new file mode 100644 index 0000000000..99b1ae7059 --- /dev/null +++ b/third_party/rust/minimal-lexical/src/lemire.rs @@ -0,0 +1,225 @@ +//! Implementation of the Eisel-Lemire algorithm. +//! +//! This is adapted from [fast-float-rust](https://github.com/aldanor/fast-float-rust), +//! a port of [fast_float](https://github.com/fastfloat/fast_float) to Rust. + +#![cfg(not(feature = "compact"))] +#![doc(hidden)] + +use crate::extended_float::ExtendedFloat; +use crate::num::Float; +use crate::number::Number; +use crate::table::{LARGEST_POWER_OF_FIVE, POWER_OF_FIVE_128, SMALLEST_POWER_OF_FIVE}; + +/// Ensure truncation of digits doesn't affect our computation, by doing 2 passes. +#[inline] +pub fn lemire(num: &Number) -> ExtendedFloat { + // If significant digits were truncated, then we can have rounding error + // only if `mantissa + 1` produces a different result. We also avoid + // redundantly using the Eisel-Lemire algorithm if it was unable to + // correctly round on the first pass. + let mut fp = compute_float::(num.exponent, num.mantissa); + if num.many_digits && fp.exp >= 0 && fp != compute_float::(num.exponent, num.mantissa + 1) { + // Need to re-calculate, since the previous values are rounded + // when the slow path algorithm expects a normalized extended float. + fp = compute_error::(num.exponent, num.mantissa); + } + fp +} + +/// Compute a float using an extended-precision representation. +/// +/// Fast conversion of a the significant digits and decimal exponent +/// a float to a extended representation with a binary float. This +/// algorithm will accurately parse the vast majority of cases, +/// and uses a 128-bit representation (with a fallback 192-bit +/// representation). +/// +/// This algorithm scales the exponent by the decimal exponent +/// using pre-computed powers-of-5, and calculates if the +/// representation can be unambiguously rounded to the nearest +/// machine float. Near-halfway cases are not handled here, +/// and are represented by a negative, biased binary exponent. +/// +/// The algorithm is described in detail in "Daniel Lemire, Number Parsing +/// at a Gigabyte per Second" in section 5, "Fast Algorithm", and +/// section 6, "Exact Numbers And Ties", available online: +/// . +pub fn compute_float(q: i32, mut w: u64) -> ExtendedFloat { + let fp_zero = ExtendedFloat { + mant: 0, + exp: 0, + }; + let fp_inf = ExtendedFloat { + mant: 0, + exp: F::INFINITE_POWER, + }; + + // Short-circuit if the value can only be a literal 0 or infinity. + if w == 0 || q < F::SMALLEST_POWER_OF_TEN { + return fp_zero; + } else if q > F::LARGEST_POWER_OF_TEN { + return fp_inf; + } + // Normalize our significant digits, so the most-significant bit is set. + let lz = w.leading_zeros() as i32; + w <<= lz; + let (lo, hi) = compute_product_approx(q, w, F::MANTISSA_SIZE as usize + 3); + if lo == 0xFFFF_FFFF_FFFF_FFFF { + // If we have failed to approximate w x 5^-q with our 128-bit value. + // Since the addition of 1 could lead to an overflow which could then + // round up over the half-way point, this can lead to improper rounding + // of a float. + // + // However, this can only occur if q ∈ [-27, 55]. The upper bound of q + // is 55 because 5^55 < 2^128, however, this can only happen if 5^q > 2^64, + // since otherwise the product can be represented in 64-bits, producing + // an exact result. For negative exponents, rounding-to-even can + // only occur if 5^-q < 2^64. + // + // For detailed explanations of rounding for negative exponents, see + // . For detailed + // explanations of rounding for positive exponents, see + // . + let inside_safe_exponent = (q >= -27) && (q <= 55); + if !inside_safe_exponent { + return compute_error_scaled::(q, hi, lz); + } + } + let upperbit = (hi >> 63) as i32; + let mut mantissa = hi >> (upperbit + 64 - F::MANTISSA_SIZE - 3); + let mut power2 = power(q) + upperbit - lz - F::MINIMUM_EXPONENT; + if power2 <= 0 { + if -power2 + 1 >= 64 { + // Have more than 64 bits below the minimum exponent, must be 0. + return fp_zero; + } + // Have a subnormal value. + mantissa >>= -power2 + 1; + mantissa += mantissa & 1; + mantissa >>= 1; + power2 = (mantissa >= (1_u64 << F::MANTISSA_SIZE)) as i32; + return ExtendedFloat { + mant: mantissa, + exp: power2, + }; + } + // Need to handle rounding ties. Normally, we need to round up, + // but if we fall right in between and and we have an even basis, we + // need to round down. + // + // This will only occur if: + // 1. The lower 64 bits of the 128-bit representation is 0. + // IE, 5^q fits in single 64-bit word. + // 2. The least-significant bit prior to truncated mantissa is odd. + // 3. All the bits truncated when shifting to mantissa bits + 1 are 0. + // + // Or, we may fall between two floats: we are exactly halfway. + if lo <= 1 + && q >= F::MIN_EXPONENT_ROUND_TO_EVEN + && q <= F::MAX_EXPONENT_ROUND_TO_EVEN + && mantissa & 3 == 1 + && (mantissa << (upperbit + 64 - F::MANTISSA_SIZE - 3)) == hi + { + // Zero the lowest bit, so we don't round up. + mantissa &= !1_u64; + } + // Round-to-even, then shift the significant digits into place. + mantissa += mantissa & 1; + mantissa >>= 1; + if mantissa >= (2_u64 << F::MANTISSA_SIZE) { + // Rounding up overflowed, so the carry bit is set. Set the + // mantissa to 1 (only the implicit, hidden bit is set) and + // increase the exponent. + mantissa = 1_u64 << F::MANTISSA_SIZE; + power2 += 1; + } + // Zero out the hidden bit. + mantissa &= !(1_u64 << F::MANTISSA_SIZE); + if power2 >= F::INFINITE_POWER { + // Exponent is above largest normal value, must be infinite. + return fp_inf; + } + ExtendedFloat { + mant: mantissa, + exp: power2, + } +} + +/// Fallback algorithm to calculate the non-rounded representation. +/// This calculates the extended representation, and then normalizes +/// the resulting representation, so the high bit is set. +#[inline] +pub fn compute_error(q: i32, mut w: u64) -> ExtendedFloat { + let lz = w.leading_zeros() as i32; + w <<= lz; + let hi = compute_product_approx(q, w, F::MANTISSA_SIZE as usize + 3).1; + compute_error_scaled::(q, hi, lz) +} + +/// Compute the error from a mantissa scaled to the exponent. +#[inline] +pub fn compute_error_scaled(q: i32, mut w: u64, lz: i32) -> ExtendedFloat { + // Want to normalize the float, but this is faster than ctlz on most architectures. + let hilz = (w >> 63) as i32 ^ 1; + w <<= hilz; + let power2 = power(q as i32) + F::EXPONENT_BIAS - hilz - lz - 62; + + ExtendedFloat { + mant: w, + exp: power2 + F::INVALID_FP, + } +} + +/// Calculate a base 2 exponent from a decimal exponent. +/// This uses a pre-computed integer approximation for +/// log2(10), where 217706 / 2^16 is accurate for the +/// entire range of non-finite decimal exponents. +#[inline] +fn power(q: i32) -> i32 { + (q.wrapping_mul(152_170 + 65536) >> 16) + 63 +} + +#[inline] +fn full_multiplication(a: u64, b: u64) -> (u64, u64) { + let r = (a as u128) * (b as u128); + (r as u64, (r >> 64) as u64) +} + +// This will compute or rather approximate w * 5**q and return a pair of 64-bit words +// approximating the result, with the "high" part corresponding to the most significant +// bits and the low part corresponding to the least significant bits. +fn compute_product_approx(q: i32, w: u64, precision: usize) -> (u64, u64) { + debug_assert!(q >= SMALLEST_POWER_OF_FIVE); + debug_assert!(q <= LARGEST_POWER_OF_FIVE); + debug_assert!(precision <= 64); + + let mask = if precision < 64 { + 0xFFFF_FFFF_FFFF_FFFF_u64 >> precision + } else { + 0xFFFF_FFFF_FFFF_FFFF_u64 + }; + + // 5^q < 2^64, then the multiplication always provides an exact value. + // That means whenever we need to round ties to even, we always have + // an exact value. + let index = (q - SMALLEST_POWER_OF_FIVE) as usize; + let (lo5, hi5) = POWER_OF_FIVE_128[index]; + // Only need one multiplication as long as there is 1 zero but + // in the explicit mantissa bits, +1 for the hidden bit, +1 to + // determine the rounding direction, +1 for if the computed + // product has a leading zero. + let (mut first_lo, mut first_hi) = full_multiplication(w, lo5); + if first_hi & mask == mask { + // Need to do a second multiplication to get better precision + // for the lower product. This will always be exact + // where q is < 55, since 5^55 < 2^128. If this wraps, + // then we need to need to round up the hi product. + let (_, second_hi) = full_multiplication(w, hi5); + first_lo = first_lo.wrapping_add(second_hi); + if second_hi > first_lo { + first_hi += 1; + } + } + (first_lo, first_hi) +} diff --git a/third_party/rust/minimal-lexical/src/lib.rs b/third_party/rust/minimal-lexical/src/lib.rs new file mode 100644 index 0000000000..75f923475f --- /dev/null +++ b/third_party/rust/minimal-lexical/src/lib.rs @@ -0,0 +1,68 @@ +//! Fast, minimal float-parsing algorithm. +//! +//! minimal-lexical has a simple, high-level API with a single +//! exported function: [`parse_float`]. +//! +//! [`parse_float`] expects a forward iterator for the integer +//! and fraction digits, as well as a parsed exponent as an [`i32`]. +//! +//! For more examples, please see [simple-example](https://github.com/Alexhuszagh/minimal-lexical/blob/master/examples/simple.rs). +//! +//! EXAMPLES +//! -------- +//! +//! ``` +//! extern crate minimal_lexical; +//! +//! // Let's say we want to parse "1.2345". +//! // First, we need an external parser to extract the integer digits ("1"), +//! // the fraction digits ("2345"), and then parse the exponent to a 32-bit +//! // integer (0). +//! // Warning: +//! // -------- +//! // Please note that leading zeros must be trimmed from the integer, +//! // and trailing zeros must be trimmed from the fraction. This cannot +//! // be handled by minimal-lexical, since we accept iterators. +//! let integer = b"1"; +//! let fraction = b"2345"; +//! let float: f64 = minimal_lexical::parse_float(integer.iter(), fraction.iter(), 0); +//! println!("float={:?}", float); // 1.235 +//! ``` +//! +//! [`parse_float`]: fn.parse_float.html +//! [`i32`]: https://doc.rust-lang.org/stable/std/primitive.i32.html + +// FEATURES + +// We want to have the same safety guarantees as Rust core, +// so we allow unused unsafe to clearly document safety guarantees. +#![allow(unused_unsafe)] +#![cfg_attr(feature = "lint", warn(unsafe_op_in_unsafe_fn))] +#![cfg_attr(not(feature = "std"), no_std)] + +#[cfg(all(feature = "alloc", not(feature = "std")))] +extern crate alloc; + +pub mod bellerophon; +pub mod bigint; +pub mod extended_float; +pub mod fpu; +pub mod heapvec; +pub mod lemire; +pub mod libm; +pub mod mask; +pub mod num; +pub mod number; +pub mod parse; +pub mod rounding; +pub mod slow; +pub mod stackvec; +pub mod table; + +mod table_bellerophon; +mod table_lemire; +mod table_small; + +// API +pub use self::num::Float; +pub use self::parse::parse_float; diff --git a/third_party/rust/minimal-lexical/src/libm.rs b/third_party/rust/minimal-lexical/src/libm.rs new file mode 100644 index 0000000000..c9f93d36ac --- /dev/null +++ b/third_party/rust/minimal-lexical/src/libm.rs @@ -0,0 +1,1238 @@ +//! A small number of math routines for floats and doubles. +//! +//! These are adapted from libm, a port of musl libc's libm to Rust. +//! libm can be found online [here](https://github.com/rust-lang/libm), +//! and is similarly licensed under an Apache2.0/MIT license + +#![cfg(all(not(feature = "std"), feature = "compact"))] +#![doc(hidden)] + +/* origin: FreeBSD /usr/src/lib/msun/src/e_powf.c */ +/* + * Conversion to float by Ian Lance Taylor, Cygnus Support, ian@cygnus.com. + */ +/* + * ==================================================== + * Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved. + * + * Developed at SunPro, a Sun Microsystems, Inc. business. + * Permission to use, copy, modify, and distribute this + * software is freely granted, provided that this notice + * is preserved. + * ==================================================== + */ + +/// # Safety +/// +/// Safe if `index < array.len()`. +macro_rules! i { + ($array:ident, $index:expr) => { + // SAFETY: safe if `index < array.len()`. + unsafe { *$array.get_unchecked($index) } + }; +} + +pub fn powf(x: f32, y: f32) -> f32 { + const BP: [f32; 2] = [1.0, 1.5]; + const DP_H: [f32; 2] = [0.0, 5.84960938e-01]; /* 0x3f15c000 */ + const DP_L: [f32; 2] = [0.0, 1.56322085e-06]; /* 0x35d1cfdc */ + const TWO24: f32 = 16777216.0; /* 0x4b800000 */ + const HUGE: f32 = 1.0e30; + const TINY: f32 = 1.0e-30; + const L1: f32 = 6.0000002384e-01; /* 0x3f19999a */ + const L2: f32 = 4.2857143283e-01; /* 0x3edb6db7 */ + const L3: f32 = 3.3333334327e-01; /* 0x3eaaaaab */ + const L4: f32 = 2.7272811532e-01; /* 0x3e8ba305 */ + const L5: f32 = 2.3066075146e-01; /* 0x3e6c3255 */ + const L6: f32 = 2.0697501302e-01; /* 0x3e53f142 */ + const P1: f32 = 1.6666667163e-01; /* 0x3e2aaaab */ + const P2: f32 = -2.7777778450e-03; /* 0xbb360b61 */ + const P3: f32 = 6.6137559770e-05; /* 0x388ab355 */ + const P4: f32 = -1.6533901999e-06; /* 0xb5ddea0e */ + const P5: f32 = 4.1381369442e-08; /* 0x3331bb4c */ + const LG2: f32 = 6.9314718246e-01; /* 0x3f317218 */ + const LG2_H: f32 = 6.93145752e-01; /* 0x3f317200 */ + const LG2_L: f32 = 1.42860654e-06; /* 0x35bfbe8c */ + const OVT: f32 = 4.2995665694e-08; /* -(128-log2(ovfl+.5ulp)) */ + const CP: f32 = 9.6179670095e-01; /* 0x3f76384f =2/(3ln2) */ + const CP_H: f32 = 9.6191406250e-01; /* 0x3f764000 =12b cp */ + const CP_L: f32 = -1.1736857402e-04; /* 0xb8f623c6 =tail of cp_h */ + const IVLN2: f32 = 1.4426950216e+00; + const IVLN2_H: f32 = 1.4426879883e+00; + const IVLN2_L: f32 = 7.0526075433e-06; + + let mut z: f32; + let mut ax: f32; + let z_h: f32; + let z_l: f32; + let mut p_h: f32; + let mut p_l: f32; + let y1: f32; + let mut t1: f32; + let t2: f32; + let mut r: f32; + let s: f32; + let mut sn: f32; + let mut t: f32; + let mut u: f32; + let mut v: f32; + let mut w: f32; + let i: i32; + let mut j: i32; + let mut k: i32; + let mut yisint: i32; + let mut n: i32; + let hx: i32; + let hy: i32; + let mut ix: i32; + let iy: i32; + let mut is: i32; + + hx = x.to_bits() as i32; + hy = y.to_bits() as i32; + + ix = hx & 0x7fffffff; + iy = hy & 0x7fffffff; + + /* x**0 = 1, even if x is NaN */ + if iy == 0 { + return 1.0; + } + + /* 1**y = 1, even if y is NaN */ + if hx == 0x3f800000 { + return 1.0; + } + + /* NaN if either arg is NaN */ + if ix > 0x7f800000 || iy > 0x7f800000 { + return x + y; + } + + /* determine if y is an odd int when x < 0 + * yisint = 0 ... y is not an integer + * yisint = 1 ... y is an odd int + * yisint = 2 ... y is an even int + */ + yisint = 0; + if hx < 0 { + if iy >= 0x4b800000 { + yisint = 2; /* even integer y */ + } else if iy >= 0x3f800000 { + k = (iy >> 23) - 0x7f; /* exponent */ + j = iy >> (23 - k); + if (j << (23 - k)) == iy { + yisint = 2 - (j & 1); + } + } + } + + /* special value of y */ + if iy == 0x7f800000 { + /* y is +-inf */ + if ix == 0x3f800000 { + /* (-1)**+-inf is 1 */ + return 1.0; + } else if ix > 0x3f800000 { + /* (|x|>1)**+-inf = inf,0 */ + return if hy >= 0 { + y + } else { + 0.0 + }; + } else { + /* (|x|<1)**+-inf = 0,inf */ + return if hy >= 0 { + 0.0 + } else { + -y + }; + } + } + if iy == 0x3f800000 { + /* y is +-1 */ + return if hy >= 0 { + x + } else { + 1.0 / x + }; + } + + if hy == 0x40000000 { + /* y is 2 */ + return x * x; + } + + if hy == 0x3f000000 + /* y is 0.5 */ + && hx >= 0 + { + /* x >= +0 */ + return sqrtf(x); + } + + ax = fabsf(x); + /* special value of x */ + if ix == 0x7f800000 || ix == 0 || ix == 0x3f800000 { + /* x is +-0,+-inf,+-1 */ + z = ax; + if hy < 0 { + /* z = (1/|x|) */ + z = 1.0 / z; + } + + if hx < 0 { + if ((ix - 0x3f800000) | yisint) == 0 { + z = (z - z) / (z - z); /* (-1)**non-int is NaN */ + } else if yisint == 1 { + z = -z; /* (x<0)**odd = -(|x|**odd) */ + } + } + return z; + } + + sn = 1.0; /* sign of result */ + if hx < 0 { + if yisint == 0 { + /* (x<0)**(non-int) is NaN */ + return (x - x) / (x - x); + } + + if yisint == 1 { + /* (x<0)**(odd int) */ + sn = -1.0; + } + } + + /* |y| is HUGE */ + if iy > 0x4d000000 { + /* if |y| > 2**27 */ + /* over/underflow if x is not close to one */ + if ix < 0x3f7ffff8 { + return if hy < 0 { + sn * HUGE * HUGE + } else { + sn * TINY * TINY + }; + } + + if ix > 0x3f800007 { + return if hy > 0 { + sn * HUGE * HUGE + } else { + sn * TINY * TINY + }; + } + + /* now |1-x| is TINY <= 2**-20, suffice to compute + log(x) by x-x^2/2+x^3/3-x^4/4 */ + t = ax - 1.; /* t has 20 trailing zeros */ + w = (t * t) * (0.5 - t * (0.333333333333 - t * 0.25)); + u = IVLN2_H * t; /* IVLN2_H has 16 sig. bits */ + v = t * IVLN2_L - w * IVLN2; + t1 = u + v; + is = t1.to_bits() as i32; + t1 = f32::from_bits(is as u32 & 0xfffff000); + t2 = v - (t1 - u); + } else { + let mut s2: f32; + let mut s_h: f32; + let s_l: f32; + let mut t_h: f32; + let mut t_l: f32; + + n = 0; + /* take care subnormal number */ + if ix < 0x00800000 { + ax *= TWO24; + n -= 24; + ix = ax.to_bits() as i32; + } + n += ((ix) >> 23) - 0x7f; + j = ix & 0x007fffff; + /* determine interval */ + ix = j | 0x3f800000; /* normalize ix */ + if j <= 0x1cc471 { + /* |x|> 1) & 0xfffff000) | 0x20000000) as i32; + t_h = f32::from_bits(is as u32 + 0x00400000 + ((k as u32) << 21)); + t_l = ax - (t_h - i!(BP, k as usize)); + s_l = v * ((u - s_h * t_h) - s_h * t_l); + /* compute log(ax) */ + s2 = s * s; + r = s2 * s2 * (L1 + s2 * (L2 + s2 * (L3 + s2 * (L4 + s2 * (L5 + s2 * L6))))); + r += s_l * (s_h + s); + s2 = s_h * s_h; + t_h = 3.0 + s2 + r; + is = t_h.to_bits() as i32; + t_h = f32::from_bits(is as u32 & 0xfffff000); + t_l = r - ((t_h - 3.0) - s2); + /* u+v = s*(1+...) */ + u = s_h * t_h; + v = s_l * t_h + t_l * s; + /* 2/(3log2)*(s+...) */ + p_h = u + v; + is = p_h.to_bits() as i32; + p_h = f32::from_bits(is as u32 & 0xfffff000); + p_l = v - (p_h - u); + z_h = CP_H * p_h; /* cp_h+cp_l = 2/(3*log2) */ + z_l = CP_L * p_h + p_l * CP + i!(DP_L, k as usize); + /* log2(ax) = (s+..)*2/(3*log2) = n + dp_h + z_h + z_l */ + t = n as f32; + t1 = ((z_h + z_l) + i!(DP_H, k as usize)) + t; + is = t1.to_bits() as i32; + t1 = f32::from_bits(is as u32 & 0xfffff000); + t2 = z_l - (((t1 - t) - i!(DP_H, k as usize)) - z_h); + }; + + /* split up y into y1+y2 and compute (y1+y2)*(t1+t2) */ + is = y.to_bits() as i32; + y1 = f32::from_bits(is as u32 & 0xfffff000); + p_l = (y - y1) * t1 + y * t2; + p_h = y1 * t1; + z = p_l + p_h; + j = z.to_bits() as i32; + if j > 0x43000000 { + /* if z > 128 */ + return sn * HUGE * HUGE; /* overflow */ + } else if j == 0x43000000 { + /* if z == 128 */ + if p_l + OVT > z - p_h { + return sn * HUGE * HUGE; /* overflow */ + } + } else if (j & 0x7fffffff) > 0x43160000 { + /* z < -150 */ + // FIXME: check should be (uint32_t)j > 0xc3160000 + return sn * TINY * TINY; /* underflow */ + } else if j as u32 == 0xc3160000 + /* z == -150 */ + && p_l <= z - p_h + { + return sn * TINY * TINY; /* underflow */ + } + + /* + * compute 2**(p_h+p_l) + */ + i = j & 0x7fffffff; + k = (i >> 23) - 0x7f; + n = 0; + if i > 0x3f000000 { + /* if |z| > 0.5, set n = [z+0.5] */ + n = j + (0x00800000 >> (k + 1)); + k = ((n & 0x7fffffff) >> 23) - 0x7f; /* new k for n */ + t = f32::from_bits(n as u32 & !(0x007fffff >> k)); + n = ((n & 0x007fffff) | 0x00800000) >> (23 - k); + if j < 0 { + n = -n; + } + p_h -= t; + } + t = p_l + p_h; + is = t.to_bits() as i32; + t = f32::from_bits(is as u32 & 0xffff8000); + u = t * LG2_H; + v = (p_l - (t - p_h)) * LG2 + t * LG2_L; + z = u + v; + w = v - (z - u); + t = z * z; + t1 = z - t * (P1 + t * (P2 + t * (P3 + t * (P4 + t * P5)))); + r = (z * t1) / (t1 - 2.0) - (w + z * w); + z = 1.0 - (r - z); + j = z.to_bits() as i32; + j += n << 23; + if (j >> 23) <= 0 { + /* subnormal output */ + z = scalbnf(z, n); + } else { + z = f32::from_bits(j as u32); + } + sn * z +} + +/* origin: FreeBSD /usr/src/lib/msun/src/e_sqrtf.c */ +/* + * Conversion to float by Ian Lance Taylor, Cygnus Support, ian@cygnus.com. + */ +/* + * ==================================================== + * Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved. + * + * Developed at SunPro, a Sun Microsystems, Inc. business. + * Permission to use, copy, modify, and distribute this + * software is freely granted, provided that this notice + * is preserved. + * ==================================================== + */ + +pub fn sqrtf(x: f32) -> f32 { + #[cfg(target_feature = "sse")] + { + // Note: This path is unlikely since LLVM will usually have already + // optimized sqrt calls into hardware instructions if sse is available, + // but if someone does end up here they'll apprected the speed increase. + #[cfg(target_arch = "x86")] + use core::arch::x86::*; + #[cfg(target_arch = "x86_64")] + use core::arch::x86_64::*; + // SAFETY: safe, since `_mm_set_ss` takes a 32-bit float, and returns + // a 128-bit type with the lowest 32-bits as `x`, `_mm_sqrt_ss` calculates + // the sqrt of this 128-bit vector, and `_mm_cvtss_f32` extracts the lower + // 32-bits as a 32-bit float. + unsafe { + let m = _mm_set_ss(x); + let m_sqrt = _mm_sqrt_ss(m); + _mm_cvtss_f32(m_sqrt) + } + } + #[cfg(not(target_feature = "sse"))] + { + const TINY: f32 = 1.0e-30; + + let mut z: f32; + let sign: i32 = 0x80000000u32 as i32; + let mut ix: i32; + let mut s: i32; + let mut q: i32; + let mut m: i32; + let mut t: i32; + let mut i: i32; + let mut r: u32; + + ix = x.to_bits() as i32; + + /* take care of Inf and NaN */ + if (ix as u32 & 0x7f800000) == 0x7f800000 { + return x * x + x; /* sqrt(NaN)=NaN, sqrt(+inf)=+inf, sqrt(-inf)=sNaN */ + } + + /* take care of zero */ + if ix <= 0 { + if (ix & !sign) == 0 { + return x; /* sqrt(+-0) = +-0 */ + } + if ix < 0 { + return (x - x) / (x - x); /* sqrt(-ve) = sNaN */ + } + } + + /* normalize x */ + m = ix >> 23; + if m == 0 { + /* subnormal x */ + i = 0; + while ix & 0x00800000 == 0 { + ix <<= 1; + i = i + 1; + } + m -= i - 1; + } + m -= 127; /* unbias exponent */ + ix = (ix & 0x007fffff) | 0x00800000; + if m & 1 == 1 { + /* odd m, double x to make it even */ + ix += ix; + } + m >>= 1; /* m = [m/2] */ + + /* generate sqrt(x) bit by bit */ + ix += ix; + q = 0; + s = 0; + r = 0x01000000; /* r = moving bit from right to left */ + + while r != 0 { + t = s + r as i32; + if t <= ix { + s = t + r as i32; + ix -= t; + q += r as i32; + } + ix += ix; + r >>= 1; + } + + /* use floating add to find out rounding direction */ + if ix != 0 { + z = 1.0 - TINY; /* raise inexact flag */ + if z >= 1.0 { + z = 1.0 + TINY; + if z > 1.0 { + q += 2; + } else { + q += q & 1; + } + } + } + + ix = (q >> 1) + 0x3f000000; + ix += m << 23; + f32::from_bits(ix as u32) + } +} + +/// Absolute value (magnitude) (f32) +/// Calculates the absolute value (magnitude) of the argument `x`, +/// by direct manipulation of the bit representation of `x`. +pub fn fabsf(x: f32) -> f32 { + f32::from_bits(x.to_bits() & 0x7fffffff) +} + +pub fn scalbnf(mut x: f32, mut n: i32) -> f32 { + let x1p127 = f32::from_bits(0x7f000000); // 0x1p127f === 2 ^ 127 + let x1p_126 = f32::from_bits(0x800000); // 0x1p-126f === 2 ^ -126 + let x1p24 = f32::from_bits(0x4b800000); // 0x1p24f === 2 ^ 24 + + if n > 127 { + x *= x1p127; + n -= 127; + if n > 127 { + x *= x1p127; + n -= 127; + if n > 127 { + n = 127; + } + } + } else if n < -126 { + x *= x1p_126 * x1p24; + n += 126 - 24; + if n < -126 { + x *= x1p_126 * x1p24; + n += 126 - 24; + if n < -126 { + n = -126; + } + } + } + x * f32::from_bits(((0x7f + n) as u32) << 23) +} + +/* origin: FreeBSD /usr/src/lib/msun/src/e_pow.c */ +/* + * ==================================================== + * Copyright (C) 2004 by Sun Microsystems, Inc. All rights reserved. + * + * Permission to use, copy, modify, and distribute this + * software is freely granted, provided that this notice + * is preserved. + * ==================================================== + */ + +// pow(x,y) return x**y +// +// n +// Method: Let x = 2 * (1+f) +// 1. Compute and return log2(x) in two pieces: +// log2(x) = w1 + w2, +// where w1 has 53-24 = 29 bit trailing zeros. +// 2. Perform y*log2(x) = n+y' by simulating muti-precision +// arithmetic, where |y'|<=0.5. +// 3. Return x**y = 2**n*exp(y'*log2) +// +// Special cases: +// 1. (anything) ** 0 is 1 +// 2. 1 ** (anything) is 1 +// 3. (anything except 1) ** NAN is NAN +// 4. NAN ** (anything except 0) is NAN +// 5. +-(|x| > 1) ** +INF is +INF +// 6. +-(|x| > 1) ** -INF is +0 +// 7. +-(|x| < 1) ** +INF is +0 +// 8. +-(|x| < 1) ** -INF is +INF +// 9. -1 ** +-INF is 1 +// 10. +0 ** (+anything except 0, NAN) is +0 +// 11. -0 ** (+anything except 0, NAN, odd integer) is +0 +// 12. +0 ** (-anything except 0, NAN) is +INF, raise divbyzero +// 13. -0 ** (-anything except 0, NAN, odd integer) is +INF, raise divbyzero +// 14. -0 ** (+odd integer) is -0 +// 15. -0 ** (-odd integer) is -INF, raise divbyzero +// 16. +INF ** (+anything except 0,NAN) is +INF +// 17. +INF ** (-anything except 0,NAN) is +0 +// 18. -INF ** (+odd integer) is -INF +// 19. -INF ** (anything) = -0 ** (-anything), (anything except odd integer) +// 20. (anything) ** 1 is (anything) +// 21. (anything) ** -1 is 1/(anything) +// 22. (-anything) ** (integer) is (-1)**(integer)*(+anything**integer) +// 23. (-anything except 0 and inf) ** (non-integer) is NAN +// +// Accuracy: +// pow(x,y) returns x**y nearly rounded. In particular +// pow(integer,integer) +// always returns the correct integer provided it is +// representable. +// +// Constants : +// The hexadecimal values are the intended ones for the following +// constants. The decimal values may be used, provided that the +// compiler will convert from decimal to binary accurately enough +// to produce the hexadecimal values shown. + +pub fn powd(x: f64, y: f64) -> f64 { + const BP: [f64; 2] = [1.0, 1.5]; + const DP_H: [f64; 2] = [0.0, 5.84962487220764160156e-01]; /* 0x3fe2b803_40000000 */ + const DP_L: [f64; 2] = [0.0, 1.35003920212974897128e-08]; /* 0x3E4CFDEB, 0x43CFD006 */ + const TWO53: f64 = 9007199254740992.0; /* 0x43400000_00000000 */ + const HUGE: f64 = 1.0e300; + const TINY: f64 = 1.0e-300; + + // poly coefs for (3/2)*(log(x)-2s-2/3*s**3: + const L1: f64 = 5.99999999999994648725e-01; /* 0x3fe33333_33333303 */ + const L2: f64 = 4.28571428578550184252e-01; /* 0x3fdb6db6_db6fabff */ + const L3: f64 = 3.33333329818377432918e-01; /* 0x3fd55555_518f264d */ + const L4: f64 = 2.72728123808534006489e-01; /* 0x3fd17460_a91d4101 */ + const L5: f64 = 2.30660745775561754067e-01; /* 0x3fcd864a_93c9db65 */ + const L6: f64 = 2.06975017800338417784e-01; /* 0x3fca7e28_4a454eef */ + const P1: f64 = 1.66666666666666019037e-01; /* 0x3fc55555_5555553e */ + const P2: f64 = -2.77777777770155933842e-03; /* 0xbf66c16c_16bebd93 */ + const P3: f64 = 6.61375632143793436117e-05; /* 0x3f11566a_af25de2c */ + const P4: f64 = -1.65339022054652515390e-06; /* 0xbebbbd41_c5d26bf1 */ + const P5: f64 = 4.13813679705723846039e-08; /* 0x3e663769_72bea4d0 */ + const LG2: f64 = 6.93147180559945286227e-01; /* 0x3fe62e42_fefa39ef */ + const LG2_H: f64 = 6.93147182464599609375e-01; /* 0x3fe62e43_00000000 */ + const LG2_L: f64 = -1.90465429995776804525e-09; /* 0xbe205c61_0ca86c39 */ + const OVT: f64 = 8.0085662595372944372e-017; /* -(1024-log2(ovfl+.5ulp)) */ + const CP: f64 = 9.61796693925975554329e-01; /* 0x3feec709_dc3a03fd =2/(3ln2) */ + const CP_H: f64 = 9.61796700954437255859e-01; /* 0x3feec709_e0000000 =(float)cp */ + const CP_L: f64 = -7.02846165095275826516e-09; /* 0xbe3e2fe0_145b01f5 =tail of cp_h*/ + const IVLN2: f64 = 1.44269504088896338700e+00; /* 0x3ff71547_652b82fe =1/ln2 */ + const IVLN2_H: f64 = 1.44269502162933349609e+00; /* 0x3ff71547_60000000 =24b 1/ln2*/ + const IVLN2_L: f64 = 1.92596299112661746887e-08; /* 0x3e54ae0b_f85ddf44 =1/ln2 tail*/ + + let t1: f64; + let t2: f64; + + let (hx, lx): (i32, u32) = ((x.to_bits() >> 32) as i32, x.to_bits() as u32); + let (hy, ly): (i32, u32) = ((y.to_bits() >> 32) as i32, y.to_bits() as u32); + + let mut ix: i32 = (hx & 0x7fffffff) as i32; + let iy: i32 = (hy & 0x7fffffff) as i32; + + /* x**0 = 1, even if x is NaN */ + if ((iy as u32) | ly) == 0 { + return 1.0; + } + + /* 1**y = 1, even if y is NaN */ + if hx == 0x3ff00000 && lx == 0 { + return 1.0; + } + + /* NaN if either arg is NaN */ + if ix > 0x7ff00000 + || (ix == 0x7ff00000 && lx != 0) + || iy > 0x7ff00000 + || (iy == 0x7ff00000 && ly != 0) + { + return x + y; + } + + /* determine if y is an odd int when x < 0 + * yisint = 0 ... y is not an integer + * yisint = 1 ... y is an odd int + * yisint = 2 ... y is an even int + */ + let mut yisint: i32 = 0; + let mut k: i32; + let mut j: i32; + if hx < 0 { + if iy >= 0x43400000 { + yisint = 2; /* even integer y */ + } else if iy >= 0x3ff00000 { + k = (iy >> 20) - 0x3ff; /* exponent */ + + if k > 20 { + j = (ly >> (52 - k)) as i32; + + if (j << (52 - k)) == (ly as i32) { + yisint = 2 - (j & 1); + } + } else if ly == 0 { + j = iy >> (20 - k); + + if (j << (20 - k)) == iy { + yisint = 2 - (j & 1); + } + } + } + } + + if ly == 0 { + /* special value of y */ + if iy == 0x7ff00000 { + /* y is +-inf */ + + return if ((ix - 0x3ff00000) | (lx as i32)) == 0 { + /* (-1)**+-inf is 1 */ + 1.0 + } else if ix >= 0x3ff00000 { + /* (|x|>1)**+-inf = inf,0 */ + if hy >= 0 { + y + } else { + 0.0 + } + } else { + /* (|x|<1)**+-inf = 0,inf */ + if hy >= 0 { + 0.0 + } else { + -y + } + }; + } + + if iy == 0x3ff00000 { + /* y is +-1 */ + return if hy >= 0 { + x + } else { + 1.0 / x + }; + } + + if hy == 0x40000000 { + /* y is 2 */ + return x * x; + } + + if hy == 0x3fe00000 { + /* y is 0.5 */ + if hx >= 0 { + /* x >= +0 */ + return sqrtd(x); + } + } + } + + let mut ax: f64 = fabsd(x); + if lx == 0 { + /* special value of x */ + if ix == 0x7ff00000 || ix == 0 || ix == 0x3ff00000 { + /* x is +-0,+-inf,+-1 */ + let mut z: f64 = ax; + + if hy < 0 { + /* z = (1/|x|) */ + z = 1.0 / z; + } + + if hx < 0 { + if ((ix - 0x3ff00000) | yisint) == 0 { + z = (z - z) / (z - z); /* (-1)**non-int is NaN */ + } else if yisint == 1 { + z = -z; /* (x<0)**odd = -(|x|**odd) */ + } + } + + return z; + } + } + + let mut s: f64 = 1.0; /* sign of result */ + if hx < 0 { + if yisint == 0 { + /* (x<0)**(non-int) is NaN */ + return (x - x) / (x - x); + } + + if yisint == 1 { + /* (x<0)**(odd int) */ + s = -1.0; + } + } + + /* |y| is HUGE */ + if iy > 0x41e00000 { + /* if |y| > 2**31 */ + if iy > 0x43f00000 { + /* if |y| > 2**64, must o/uflow */ + if ix <= 0x3fefffff { + return if hy < 0 { + HUGE * HUGE + } else { + TINY * TINY + }; + } + + if ix >= 0x3ff00000 { + return if hy > 0 { + HUGE * HUGE + } else { + TINY * TINY + }; + } + } + + /* over/underflow if x is not close to one */ + if ix < 0x3fefffff { + return if hy < 0 { + s * HUGE * HUGE + } else { + s * TINY * TINY + }; + } + if ix > 0x3ff00000 { + return if hy > 0 { + s * HUGE * HUGE + } else { + s * TINY * TINY + }; + } + + /* now |1-x| is TINY <= 2**-20, suffice to compute + log(x) by x-x^2/2+x^3/3-x^4/4 */ + let t: f64 = ax - 1.0; /* t has 20 trailing zeros */ + let w: f64 = (t * t) * (0.5 - t * (0.3333333333333333333333 - t * 0.25)); + let u: f64 = IVLN2_H * t; /* ivln2_h has 21 sig. bits */ + let v: f64 = t * IVLN2_L - w * IVLN2; + t1 = with_set_low_word(u + v, 0); + t2 = v - (t1 - u); + } else { + // double ss,s2,s_h,s_l,t_h,t_l; + let mut n: i32 = 0; + + if ix < 0x00100000 { + /* take care subnormal number */ + ax *= TWO53; + n -= 53; + ix = get_high_word(ax) as i32; + } + + n += (ix >> 20) - 0x3ff; + j = ix & 0x000fffff; + + /* determine interval */ + let k: i32; + ix = j | 0x3ff00000; /* normalize ix */ + if j <= 0x3988E { + /* |x|> 1) | 0x20000000) + 0x00080000 + ((k as u32) << 18), + ); + let t_l: f64 = ax - (t_h - i!(BP, k as usize)); + let s_l: f64 = v * ((u - s_h * t_h) - s_h * t_l); + + /* compute log(ax) */ + let s2: f64 = ss * ss; + let mut r: f64 = s2 * s2 * (L1 + s2 * (L2 + s2 * (L3 + s2 * (L4 + s2 * (L5 + s2 * L6))))); + r += s_l * (s_h + ss); + let s2: f64 = s_h * s_h; + let t_h: f64 = with_set_low_word(3.0 + s2 + r, 0); + let t_l: f64 = r - ((t_h - 3.0) - s2); + + /* u+v = ss*(1+...) */ + let u: f64 = s_h * t_h; + let v: f64 = s_l * t_h + t_l * ss; + + /* 2/(3log2)*(ss+...) */ + let p_h: f64 = with_set_low_word(u + v, 0); + let p_l = v - (p_h - u); + let z_h: f64 = CP_H * p_h; /* cp_h+cp_l = 2/(3*log2) */ + let z_l: f64 = CP_L * p_h + p_l * CP + i!(DP_L, k as usize); + + /* log2(ax) = (ss+..)*2/(3*log2) = n + dp_h + z_h + z_l */ + let t: f64 = n as f64; + t1 = with_set_low_word(((z_h + z_l) + i!(DP_H, k as usize)) + t, 0); + t2 = z_l - (((t1 - t) - i!(DP_H, k as usize)) - z_h); + } + + /* split up y into y1+y2 and compute (y1+y2)*(t1+t2) */ + let y1: f64 = with_set_low_word(y, 0); + let p_l: f64 = (y - y1) * t1 + y * t2; + let mut p_h: f64 = y1 * t1; + let z: f64 = p_l + p_h; + let mut j: i32 = (z.to_bits() >> 32) as i32; + let i: i32 = z.to_bits() as i32; + // let (j, i): (i32, i32) = ((z.to_bits() >> 32) as i32, z.to_bits() as i32); + + if j >= 0x40900000 { + /* z >= 1024 */ + if (j - 0x40900000) | i != 0 { + /* if z > 1024 */ + return s * HUGE * HUGE; /* overflow */ + } + + if p_l + OVT > z - p_h { + return s * HUGE * HUGE; /* overflow */ + } + } else if (j & 0x7fffffff) >= 0x4090cc00 { + /* z <= -1075 */ + // FIXME: instead of abs(j) use unsigned j + + if (((j as u32) - 0xc090cc00) | (i as u32)) != 0 { + /* z < -1075 */ + return s * TINY * TINY; /* underflow */ + } + + if p_l <= z - p_h { + return s * TINY * TINY; /* underflow */ + } + } + + /* compute 2**(p_h+p_l) */ + let i: i32 = j & (0x7fffffff as i32); + k = (i >> 20) - 0x3ff; + let mut n: i32 = 0; + + if i > 0x3fe00000 { + /* if |z| > 0.5, set n = [z+0.5] */ + n = j + (0x00100000 >> (k + 1)); + k = ((n & 0x7fffffff) >> 20) - 0x3ff; /* new k for n */ + let t: f64 = with_set_high_word(0.0, (n & !(0x000fffff >> k)) as u32); + n = ((n & 0x000fffff) | 0x00100000) >> (20 - k); + if j < 0 { + n = -n; + } + p_h -= t; + } + + let t: f64 = with_set_low_word(p_l + p_h, 0); + let u: f64 = t * LG2_H; + let v: f64 = (p_l - (t - p_h)) * LG2 + t * LG2_L; + let mut z: f64 = u + v; + let w: f64 = v - (z - u); + let t: f64 = z * z; + let t1: f64 = z - t * (P1 + t * (P2 + t * (P3 + t * (P4 + t * P5)))); + let r: f64 = (z * t1) / (t1 - 2.0) - (w + z * w); + z = 1.0 - (r - z); + j = get_high_word(z) as i32; + j += n << 20; + + if (j >> 20) <= 0 { + /* subnormal output */ + z = scalbnd(z, n); + } else { + z = with_set_high_word(z, j as u32); + } + + s * z +} + +/// Absolute value (magnitude) (f64) +/// Calculates the absolute value (magnitude) of the argument `x`, +/// by direct manipulation of the bit representation of `x`. +pub fn fabsd(x: f64) -> f64 { + f64::from_bits(x.to_bits() & (u64::MAX / 2)) +} + +pub fn scalbnd(x: f64, mut n: i32) -> f64 { + let x1p1023 = f64::from_bits(0x7fe0000000000000); // 0x1p1023 === 2 ^ 1023 + let x1p53 = f64::from_bits(0x4340000000000000); // 0x1p53 === 2 ^ 53 + let x1p_1022 = f64::from_bits(0x0010000000000000); // 0x1p-1022 === 2 ^ (-1022) + + let mut y = x; + + if n > 1023 { + y *= x1p1023; + n -= 1023; + if n > 1023 { + y *= x1p1023; + n -= 1023; + if n > 1023 { + n = 1023; + } + } + } else if n < -1022 { + /* make sure final n < -53 to avoid double + rounding in the subnormal range */ + y *= x1p_1022 * x1p53; + n += 1022 - 53; + if n < -1022 { + y *= x1p_1022 * x1p53; + n += 1022 - 53; + if n < -1022 { + n = -1022; + } + } + } + y * f64::from_bits(((0x3ff + n) as u64) << 52) +} + +/* origin: FreeBSD /usr/src/lib/msun/src/e_sqrt.c */ +/* + * ==================================================== + * Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved. + * + * Developed at SunSoft, a Sun Microsystems, Inc. business. + * Permission to use, copy, modify, and distribute this + * software is freely granted, provided that this notice + * is preserved. + * ==================================================== + */ +/* sqrt(x) + * Return correctly rounded sqrt. + * ------------------------------------------ + * | Use the hardware sqrt if you have one | + * ------------------------------------------ + * Method: + * Bit by bit method using integer arithmetic. (Slow, but portable) + * 1. Normalization + * Scale x to y in [1,4) with even powers of 2: + * find an integer k such that 1 <= (y=x*2^(2k)) < 4, then + * sqrt(x) = 2^k * sqrt(y) + * 2. Bit by bit computation + * Let q = sqrt(y) truncated to i bit after binary point (q = 1), + * i 0 + * i+1 2 + * s = 2*q , and y = 2 * ( y - q ). (1) + * i i i i + * + * To compute q from q , one checks whether + * i+1 i + * + * -(i+1) 2 + * (q + 2 ) <= y. (2) + * i + * -(i+1) + * If (2) is false, then q = q ; otherwise q = q + 2 . + * i+1 i i+1 i + * + * With some algebraic manipulation, it is not difficult to see + * that (2) is equivalent to + * -(i+1) + * s + 2 <= y (3) + * i i + * + * The advantage of (3) is that s and y can be computed by + * i i + * the following recurrence formula: + * if (3) is false + * + * s = s , y = y ; (4) + * i+1 i i+1 i + * + * otherwise, + * -i -(i+1) + * s = s + 2 , y = y - s - 2 (5) + * i+1 i i+1 i i + * + * One may easily use induction to prove (4) and (5). + * Note. Since the left hand side of (3) contain only i+2 bits, + * it does not necessary to do a full (53-bit) comparison + * in (3). + * 3. Final rounding + * After generating the 53 bits result, we compute one more bit. + * Together with the remainder, we can decide whether the + * result is exact, bigger than 1/2ulp, or less than 1/2ulp + * (it will never equal to 1/2ulp). + * The rounding mode can be detected by checking whether + * huge + tiny is equal to huge, and whether huge - tiny is + * equal to huge for some floating point number "huge" and "tiny". + * + * Special cases: + * sqrt(+-0) = +-0 ... exact + * sqrt(inf) = inf + * sqrt(-ve) = NaN ... with invalid signal + * sqrt(NaN) = NaN ... with invalid signal for signaling NaN + */ + +pub fn sqrtd(x: f64) -> f64 { + #[cfg(target_feature = "sse2")] + { + // Note: This path is unlikely since LLVM will usually have already + // optimized sqrt calls into hardware instructions if sse2 is available, + // but if someone does end up here they'll apprected the speed increase. + #[cfg(target_arch = "x86")] + use core::arch::x86::*; + #[cfg(target_arch = "x86_64")] + use core::arch::x86_64::*; + // SAFETY: safe, since `_mm_set_sd` takes a 64-bit float, and returns + // a 128-bit type with the lowest 64-bits as `x`, `_mm_sqrt_ss` calculates + // the sqrt of this 128-bit vector, and `_mm_cvtss_f64` extracts the lower + // 64-bits as a 64-bit float. + unsafe { + let m = _mm_set_sd(x); + let m_sqrt = _mm_sqrt_pd(m); + _mm_cvtsd_f64(m_sqrt) + } + } + #[cfg(not(target_feature = "sse2"))] + { + use core::num::Wrapping; + + const TINY: f64 = 1.0e-300; + + let mut z: f64; + let sign: Wrapping = Wrapping(0x80000000); + let mut ix0: i32; + let mut s0: i32; + let mut q: i32; + let mut m: i32; + let mut t: i32; + let mut i: i32; + let mut r: Wrapping; + let mut t1: Wrapping; + let mut s1: Wrapping; + let mut ix1: Wrapping; + let mut q1: Wrapping; + + ix0 = (x.to_bits() >> 32) as i32; + ix1 = Wrapping(x.to_bits() as u32); + + /* take care of Inf and NaN */ + if (ix0 & 0x7ff00000) == 0x7ff00000 { + return x * x + x; /* sqrt(NaN)=NaN, sqrt(+inf)=+inf, sqrt(-inf)=sNaN */ + } + /* take care of zero */ + if ix0 <= 0 { + if ((ix0 & !(sign.0 as i32)) | ix1.0 as i32) == 0 { + return x; /* sqrt(+-0) = +-0 */ + } + if ix0 < 0 { + return (x - x) / (x - x); /* sqrt(-ve) = sNaN */ + } + } + /* normalize x */ + m = ix0 >> 20; + if m == 0 { + /* subnormal x */ + while ix0 == 0 { + m -= 21; + ix0 |= (ix1 >> 11).0 as i32; + ix1 <<= 21; + } + i = 0; + while (ix0 & 0x00100000) == 0 { + i += 1; + ix0 <<= 1; + } + m -= i - 1; + ix0 |= (ix1 >> (32 - i) as usize).0 as i32; + ix1 = ix1 << i as usize; + } + m -= 1023; /* unbias exponent */ + ix0 = (ix0 & 0x000fffff) | 0x00100000; + if (m & 1) == 1 { + /* odd m, double x to make it even */ + ix0 += ix0 + ((ix1 & sign) >> 31).0 as i32; + ix1 += ix1; + } + m >>= 1; /* m = [m/2] */ + + /* generate sqrt(x) bit by bit */ + ix0 += ix0 + ((ix1 & sign) >> 31).0 as i32; + ix1 += ix1; + q = 0; /* [q,q1] = sqrt(x) */ + q1 = Wrapping(0); + s0 = 0; + s1 = Wrapping(0); + r = Wrapping(0x00200000); /* r = moving bit from right to left */ + + while r != Wrapping(0) { + t = s0 + r.0 as i32; + if t <= ix0 { + s0 = t + r.0 as i32; + ix0 -= t; + q += r.0 as i32; + } + ix0 += ix0 + ((ix1 & sign) >> 31).0 as i32; + ix1 += ix1; + r >>= 1; + } + + r = sign; + while r != Wrapping(0) { + t1 = s1 + r; + t = s0; + if t < ix0 || (t == ix0 && t1 <= ix1) { + s1 = t1 + r; + if (t1 & sign) == sign && (s1 & sign) == Wrapping(0) { + s0 += 1; + } + ix0 -= t; + if ix1 < t1 { + ix0 -= 1; + } + ix1 -= t1; + q1 += r; + } + ix0 += ix0 + ((ix1 & sign) >> 31).0 as i32; + ix1 += ix1; + r >>= 1; + } + + /* use floating add to find out rounding direction */ + if (ix0 as u32 | ix1.0) != 0 { + z = 1.0 - TINY; /* raise inexact flag */ + if z >= 1.0 { + z = 1.0 + TINY; + if q1.0 == 0xffffffff { + q1 = Wrapping(0); + q += 1; + } else if z > 1.0 { + if q1.0 == 0xfffffffe { + q += 1; + } + q1 += Wrapping(2); + } else { + q1 += q1 & Wrapping(1); + } + } + } + ix0 = (q >> 1) + 0x3fe00000; + ix1 = q1 >> 1; + if (q & 1) == 1 { + ix1 |= sign; + } + ix0 += m << 20; + f64::from_bits((ix0 as u64) << 32 | ix1.0 as u64) + } +} + +#[inline] +fn get_high_word(x: f64) -> u32 { + (x.to_bits() >> 32) as u32 +} + +#[inline] +fn with_set_high_word(f: f64, hi: u32) -> f64 { + let mut tmp = f.to_bits(); + tmp &= 0x00000000_ffffffff; + tmp |= (hi as u64) << 32; + f64::from_bits(tmp) +} + +#[inline] +fn with_set_low_word(f: f64, lo: u32) -> f64 { + let mut tmp = f.to_bits(); + tmp &= 0xffffffff_00000000; + tmp |= lo as u64; + f64::from_bits(tmp) +} diff --git a/third_party/rust/minimal-lexical/src/mask.rs b/third_party/rust/minimal-lexical/src/mask.rs new file mode 100644 index 0000000000..1957c8be03 --- /dev/null +++ b/third_party/rust/minimal-lexical/src/mask.rs @@ -0,0 +1,60 @@ +//! Utilities to generate bitmasks. + +#![doc(hidden)] + +/// Generate a bitwise mask for the lower `n` bits. +/// +/// # Examples +/// +/// ```rust +/// # use minimal_lexical::mask::lower_n_mask; +/// # pub fn main() { +/// assert_eq!(lower_n_mask(2), 0b11); +/// # } +/// ``` +#[inline] +pub fn lower_n_mask(n: u64) -> u64 { + debug_assert!(n <= 64, "lower_n_mask() overflow in shl."); + + match n == 64 { + // u64::MAX for older Rustc versions. + true => 0xffff_ffff_ffff_ffff, + false => (1 << n) - 1, + } +} + +/// Calculate the halfway point for the lower `n` bits. +/// +/// # Examples +/// +/// ```rust +/// # use minimal_lexical::mask::lower_n_halfway; +/// # pub fn main() { +/// assert_eq!(lower_n_halfway(2), 0b10); +/// # } +/// ``` +#[inline] +pub fn lower_n_halfway(n: u64) -> u64 { + debug_assert!(n <= 64, "lower_n_halfway() overflow in shl."); + + match n == 0 { + true => 0, + false => nth_bit(n - 1), + } +} + +/// Calculate a scalar factor of 2 above the halfway point. +/// +/// # Examples +/// +/// ```rust +/// # use minimal_lexical::mask::nth_bit; +/// # pub fn main() { +/// assert_eq!(nth_bit(2), 0b100); +/// # } +/// ``` +#[inline] +pub fn nth_bit(n: u64) -> u64 { + debug_assert!(n < 64, "nth_bit() overflow in shl."); + 1 << n +} diff --git a/third_party/rust/minimal-lexical/src/num.rs b/third_party/rust/minimal-lexical/src/num.rs new file mode 100644 index 0000000000..9f682b9cbb --- /dev/null +++ b/third_party/rust/minimal-lexical/src/num.rs @@ -0,0 +1,308 @@ +//! Utilities for Rust numbers. + +#![doc(hidden)] + +#[cfg(all(not(feature = "std"), feature = "compact"))] +use crate::libm::{powd, powf}; +#[cfg(not(feature = "compact"))] +use crate::table::{SMALL_F32_POW10, SMALL_F64_POW10, SMALL_INT_POW10, SMALL_INT_POW5}; +#[cfg(not(feature = "compact"))] +use core::hint; +use core::ops; + +/// Generic floating-point type, to be used in generic code for parsing. +/// +/// Although the trait is part of the public API, the trait provides methods +/// and constants that are effectively non-public: they may be removed +/// at any time without any breaking changes. +pub trait Float: + Sized + + Copy + + PartialEq + + PartialOrd + + Send + + Sync + + ops::Add + + ops::AddAssign + + ops::Div + + ops::DivAssign + + ops::Mul + + ops::MulAssign + + ops::Rem + + ops::RemAssign + + ops::Sub + + ops::SubAssign + + ops::Neg +{ + /// Maximum number of digits that can contribute in the mantissa. + /// + /// We can exactly represent a float in radix `b` from radix 2 if + /// `b` is divisible by 2. This function calculates the exact number of + /// digits required to exactly represent that float. + /// + /// According to the "Handbook of Floating Point Arithmetic", + /// for IEEE754, with emin being the min exponent, p2 being the + /// precision, and b being the radix, the number of digits follows as: + /// + /// `−emin + p2 + ⌊(emin + 1) log(2, b) − log(1 − 2^(−p2), b)⌋` + /// + /// For f32, this follows as: + /// emin = -126 + /// p2 = 24 + /// + /// For f64, this follows as: + /// emin = -1022 + /// p2 = 53 + /// + /// In Python: + /// `-emin + p2 + math.floor((emin+1)*math.log(2, b) - math.log(1-2**(-p2), b))` + /// + /// This was used to calculate the maximum number of digits for [2, 36]. + const MAX_DIGITS: usize; + + // MASKS + + /// Bitmask for the sign bit. + const SIGN_MASK: u64; + /// Bitmask for the exponent, including the hidden bit. + const EXPONENT_MASK: u64; + /// Bitmask for the hidden bit in exponent, which is an implicit 1 in the fraction. + const HIDDEN_BIT_MASK: u64; + /// Bitmask for the mantissa (fraction), excluding the hidden bit. + const MANTISSA_MASK: u64; + + // PROPERTIES + + /// Size of the significand (mantissa) without hidden bit. + const MANTISSA_SIZE: i32; + /// Bias of the exponet + const EXPONENT_BIAS: i32; + /// Exponent portion of a denormal float. + const DENORMAL_EXPONENT: i32; + /// Maximum exponent value in float. + const MAX_EXPONENT: i32; + + // ROUNDING + + /// Mask to determine if a full-carry occurred (1 in bit above hidden bit). + const CARRY_MASK: u64; + + /// Bias for marking an invalid extended float. + // Value is `i16::MIN`, using hard-coded constants for older Rustc versions. + const INVALID_FP: i32 = -0x8000; + + // Maximum mantissa for the fast-path (`1 << 53` for f64). + const MAX_MANTISSA_FAST_PATH: u64 = 2_u64 << Self::MANTISSA_SIZE; + + // Largest exponent value `(1 << EXP_BITS) - 1`. + const INFINITE_POWER: i32 = Self::MAX_EXPONENT + Self::EXPONENT_BIAS; + + // Round-to-even only happens for negative values of q + // when q ≥ −4 in the 64-bit case and when q ≥ −17 in + // the 32-bitcase. + // + // When q ≥ 0,we have that 5^q ≤ 2m+1. In the 64-bit case,we + // have 5^q ≤ 2m+1 ≤ 2^54 or q ≤ 23. In the 32-bit case,we have + // 5^q ≤ 2m+1 ≤ 2^25 or q ≤ 10. + // + // When q < 0, we have w ≥ (2m+1)×5^−q. We must have that w < 2^64 + // so (2m+1)×5^−q < 2^64. We have that 2m+1 > 2^53 (64-bit case) + // or 2m+1 > 2^24 (32-bit case). Hence,we must have 2^53×5^−q < 2^64 + // (64-bit) and 2^24×5^−q < 2^64 (32-bit). Hence we have 5^−q < 2^11 + // or q ≥ −4 (64-bit case) and 5^−q < 2^40 or q ≥ −17 (32-bitcase). + // + // Thus we have that we only need to round ties to even when + // we have that q ∈ [−4,23](in the 64-bit case) or q∈[−17,10] + // (in the 32-bit case). In both cases,the power of five(5^|q|) + // fits in a 64-bit word. + const MIN_EXPONENT_ROUND_TO_EVEN: i32; + const MAX_EXPONENT_ROUND_TO_EVEN: i32; + + /// Minimum normal exponent value `-(1 << (EXPONENT_SIZE - 1)) + 1`. + const MINIMUM_EXPONENT: i32; + + /// Smallest decimal exponent for a non-zero value. + const SMALLEST_POWER_OF_TEN: i32; + + /// Largest decimal exponent for a non-infinite value. + const LARGEST_POWER_OF_TEN: i32; + + /// Minimum exponent that for a fast path case, or `-⌊(MANTISSA_SIZE+1)/log2(10)⌋` + const MIN_EXPONENT_FAST_PATH: i32; + + /// Maximum exponent that for a fast path case, or `⌊(MANTISSA_SIZE+1)/log2(5)⌋` + const MAX_EXPONENT_FAST_PATH: i32; + + /// Maximum exponent that can be represented for a disguised-fast path case. + /// This is `MAX_EXPONENT_FAST_PATH + ⌊(MANTISSA_SIZE+1)/log2(10)⌋` + const MAX_EXPONENT_DISGUISED_FAST_PATH: i32; + + /// Convert 64-bit integer to float. + fn from_u64(u: u64) -> Self; + + // Re-exported methods from std. + fn from_bits(u: u64) -> Self; + fn to_bits(self) -> u64; + + /// Get a small power-of-radix for fast-path multiplication. + /// + /// # Safety + /// + /// Safe as long as the exponent is smaller than the table size. + unsafe fn pow_fast_path(exponent: usize) -> Self; + + /// Get a small, integral power-of-radix for fast-path multiplication. + /// + /// # Safety + /// + /// Safe as long as the exponent is smaller than the table size. + #[inline(always)] + unsafe fn int_pow_fast_path(exponent: usize, radix: u32) -> u64 { + // SAFETY: safe as long as the exponent is smaller than the radix table. + #[cfg(not(feature = "compact"))] + return match radix { + 5 => unsafe { *SMALL_INT_POW5.get_unchecked(exponent) }, + 10 => unsafe { *SMALL_INT_POW10.get_unchecked(exponent) }, + _ => unsafe { hint::unreachable_unchecked() }, + }; + + #[cfg(feature = "compact")] + return (radix as u64).pow(exponent as u32); + } + + /// Returns true if the float is a denormal. + #[inline] + fn is_denormal(self) -> bool { + self.to_bits() & Self::EXPONENT_MASK == 0 + } + + /// Get exponent component from the float. + #[inline] + fn exponent(self) -> i32 { + if self.is_denormal() { + return Self::DENORMAL_EXPONENT; + } + + let bits = self.to_bits(); + let biased_e: i32 = ((bits & Self::EXPONENT_MASK) >> Self::MANTISSA_SIZE) as i32; + biased_e - Self::EXPONENT_BIAS + } + + /// Get mantissa (significand) component from float. + #[inline] + fn mantissa(self) -> u64 { + let bits = self.to_bits(); + let s = bits & Self::MANTISSA_MASK; + if !self.is_denormal() { + s + Self::HIDDEN_BIT_MASK + } else { + s + } + } +} + +impl Float for f32 { + const MAX_DIGITS: usize = 114; + const SIGN_MASK: u64 = 0x80000000; + const EXPONENT_MASK: u64 = 0x7F800000; + const HIDDEN_BIT_MASK: u64 = 0x00800000; + const MANTISSA_MASK: u64 = 0x007FFFFF; + const MANTISSA_SIZE: i32 = 23; + const EXPONENT_BIAS: i32 = 127 + Self::MANTISSA_SIZE; + const DENORMAL_EXPONENT: i32 = 1 - Self::EXPONENT_BIAS; + const MAX_EXPONENT: i32 = 0xFF - Self::EXPONENT_BIAS; + const CARRY_MASK: u64 = 0x1000000; + const MIN_EXPONENT_ROUND_TO_EVEN: i32 = -17; + const MAX_EXPONENT_ROUND_TO_EVEN: i32 = 10; + const MINIMUM_EXPONENT: i32 = -127; + const SMALLEST_POWER_OF_TEN: i32 = -65; + const LARGEST_POWER_OF_TEN: i32 = 38; + const MIN_EXPONENT_FAST_PATH: i32 = -10; + const MAX_EXPONENT_FAST_PATH: i32 = 10; + const MAX_EXPONENT_DISGUISED_FAST_PATH: i32 = 17; + + #[inline(always)] + unsafe fn pow_fast_path(exponent: usize) -> Self { + // SAFETY: safe as long as the exponent is smaller than the radix table. + #[cfg(not(feature = "compact"))] + return unsafe { *SMALL_F32_POW10.get_unchecked(exponent) }; + + #[cfg(feature = "compact")] + return powf(10.0f32, exponent as f32); + } + + #[inline] + fn from_u64(u: u64) -> f32 { + u as _ + } + + #[inline] + fn from_bits(u: u64) -> f32 { + // Constant is `u32::MAX` for older Rustc versions. + debug_assert!(u <= 0xffff_ffff); + f32::from_bits(u as u32) + } + + #[inline] + fn to_bits(self) -> u64 { + f32::to_bits(self) as u64 + } +} + +impl Float for f64 { + const MAX_DIGITS: usize = 769; + const SIGN_MASK: u64 = 0x8000000000000000; + const EXPONENT_MASK: u64 = 0x7FF0000000000000; + const HIDDEN_BIT_MASK: u64 = 0x0010000000000000; + const MANTISSA_MASK: u64 = 0x000FFFFFFFFFFFFF; + const MANTISSA_SIZE: i32 = 52; + const EXPONENT_BIAS: i32 = 1023 + Self::MANTISSA_SIZE; + const DENORMAL_EXPONENT: i32 = 1 - Self::EXPONENT_BIAS; + const MAX_EXPONENT: i32 = 0x7FF - Self::EXPONENT_BIAS; + const CARRY_MASK: u64 = 0x20000000000000; + const MIN_EXPONENT_ROUND_TO_EVEN: i32 = -4; + const MAX_EXPONENT_ROUND_TO_EVEN: i32 = 23; + const MINIMUM_EXPONENT: i32 = -1023; + const SMALLEST_POWER_OF_TEN: i32 = -342; + const LARGEST_POWER_OF_TEN: i32 = 308; + const MIN_EXPONENT_FAST_PATH: i32 = -22; + const MAX_EXPONENT_FAST_PATH: i32 = 22; + const MAX_EXPONENT_DISGUISED_FAST_PATH: i32 = 37; + + #[inline(always)] + unsafe fn pow_fast_path(exponent: usize) -> Self { + // SAFETY: safe as long as the exponent is smaller than the radix table. + #[cfg(not(feature = "compact"))] + return unsafe { *SMALL_F64_POW10.get_unchecked(exponent) }; + + #[cfg(feature = "compact")] + return powd(10.0f64, exponent as f64); + } + + #[inline] + fn from_u64(u: u64) -> f64 { + u as _ + } + + #[inline] + fn from_bits(u: u64) -> f64 { + f64::from_bits(u) + } + + #[inline] + fn to_bits(self) -> u64 { + f64::to_bits(self) + } +} + +#[inline(always)] +#[cfg(all(feature = "std", feature = "compact"))] +pub fn powf(x: f32, y: f32) -> f32 { + x.powf(y) +} + +#[inline(always)] +#[cfg(all(feature = "std", feature = "compact"))] +pub fn powd(x: f64, y: f64) -> f64 { + x.powf(y) +} diff --git a/third_party/rust/minimal-lexical/src/number.rs b/third_party/rust/minimal-lexical/src/number.rs new file mode 100644 index 0000000000..5981f9dd79 --- /dev/null +++ b/third_party/rust/minimal-lexical/src/number.rs @@ -0,0 +1,83 @@ +//! Representation of a float as the significant digits and exponent. +//! +//! This is adapted from [fast-float-rust](https://github.com/aldanor/fast-float-rust), +//! a port of [fast_float](https://github.com/fastfloat/fast_float) to Rust. + +#![doc(hidden)] + +#[cfg(feature = "nightly")] +use crate::fpu::set_precision; +use crate::num::Float; + +/// Representation of a number as the significant digits and exponent. +/// +/// This is only used if the exponent base and the significant digit +/// radix are the same, since we need to be able to move powers in and +/// out of the exponent. +#[derive(Clone, Copy, Debug, Default, PartialEq, Eq)] +pub struct Number { + /// The exponent of the float, scaled to the mantissa. + pub exponent: i32, + /// The significant digits of the float. + pub mantissa: u64, + /// If the significant digits were truncated. + pub many_digits: bool, +} + +impl Number { + /// Detect if the float can be accurately reconstructed from native floats. + #[inline] + pub fn is_fast_path(&self) -> bool { + F::MIN_EXPONENT_FAST_PATH <= self.exponent + && self.exponent <= F::MAX_EXPONENT_DISGUISED_FAST_PATH + && self.mantissa <= F::MAX_MANTISSA_FAST_PATH + && !self.many_digits + } + + /// The fast path algorithmn using machine-sized integers and floats. + /// + /// This is extracted into a separate function so that it can be attempted before constructing + /// a Decimal. This only works if both the mantissa and the exponent + /// can be exactly represented as a machine float, since IEE-754 guarantees + /// no rounding will occur. + /// + /// There is an exception: disguised fast-path cases, where we can shift + /// powers-of-10 from the exponent to the significant digits. + pub fn try_fast_path(&self) -> Option { + // The fast path crucially depends on arithmetic being rounded to the correct number of bits + // without any intermediate rounding. On x86 (without SSE or SSE2) this requires the precision + // of the x87 FPU stack to be changed so that it directly rounds to 64/32 bit. + // The `set_precision` function takes care of setting the precision on architectures which + // require setting it by changing the global state (like the control word of the x87 FPU). + #[cfg(feature = "nightly")] + let _cw = set_precision::(); + + if self.is_fast_path::() { + let max_exponent = F::MAX_EXPONENT_FAST_PATH; + Some(if self.exponent <= max_exponent { + // normal fast path + let value = F::from_u64(self.mantissa); + if self.exponent < 0 { + // SAFETY: safe, since the `exponent <= max_exponent`. + value / unsafe { F::pow_fast_path((-self.exponent) as _) } + } else { + // SAFETY: safe, since the `exponent <= max_exponent`. + value * unsafe { F::pow_fast_path(self.exponent as _) } + } + } else { + // disguised fast path + let shift = self.exponent - max_exponent; + // SAFETY: safe, since `shift <= (max_disguised - max_exponent)`. + let int_power = unsafe { F::int_pow_fast_path(shift as usize, 10) }; + let mantissa = self.mantissa.checked_mul(int_power)?; + if mantissa > F::MAX_MANTISSA_FAST_PATH { + return None; + } + // SAFETY: safe, since the `table.len() - 1 == max_exponent`. + F::from_u64(mantissa) * unsafe { F::pow_fast_path(max_exponent as _) } + }) + } else { + None + } + } +} diff --git a/third_party/rust/minimal-lexical/src/parse.rs b/third_party/rust/minimal-lexical/src/parse.rs new file mode 100644 index 0000000000..9349699eb3 --- /dev/null +++ b/third_party/rust/minimal-lexical/src/parse.rs @@ -0,0 +1,201 @@ +//! Parse byte iterators to float. + +#![doc(hidden)] + +#[cfg(feature = "compact")] +use crate::bellerophon::bellerophon; +use crate::extended_float::{extended_to_float, ExtendedFloat}; +#[cfg(not(feature = "compact"))] +use crate::lemire::lemire; +use crate::num::Float; +use crate::number::Number; +use crate::slow::slow; + +/// Try to parse the significant digits quickly. +/// +/// This attempts a very quick parse, to deal with common cases. +/// +/// * `integer` - Slice containing the integer digits. +/// * `fraction` - Slice containing the fraction digits. +#[inline] +fn parse_number_fast<'a, Iter1, Iter2>( + integer: Iter1, + fraction: Iter2, + exponent: i32, +) -> Option +where + Iter1: Iterator, + Iter2: Iterator, +{ + let mut num = Number::default(); + let mut integer_count: usize = 0; + let mut fraction_count: usize = 0; + for &c in integer { + integer_count += 1; + let digit = c - b'0'; + num.mantissa = num.mantissa.wrapping_mul(10).wrapping_add(digit as u64); + } + for &c in fraction { + fraction_count += 1; + let digit = c - b'0'; + num.mantissa = num.mantissa.wrapping_mul(10).wrapping_add(digit as u64); + } + + if integer_count + fraction_count <= 19 { + // Can't overflow, since must be <= 19. + num.exponent = exponent.saturating_sub(fraction_count as i32); + Some(num) + } else { + None + } +} + +/// Parse the significant digits of the float and adjust the exponent. +/// +/// * `integer` - Slice containing the integer digits. +/// * `fraction` - Slice containing the fraction digits. +#[inline] +fn parse_number<'a, Iter1, Iter2>(mut integer: Iter1, mut fraction: Iter2, exponent: i32) -> Number +where + Iter1: Iterator + Clone, + Iter2: Iterator + Clone, +{ + // NOTE: for performance, we do this in 2 passes: + if let Some(num) = parse_number_fast(integer.clone(), fraction.clone(), exponent) { + return num; + } + + // Can only add 19 digits. + let mut num = Number::default(); + let mut count = 0; + while let Some(&c) = integer.next() { + count += 1; + if count == 20 { + // Only the integer digits affect the exponent. + num.many_digits = true; + num.exponent = exponent.saturating_add(into_i32(1 + integer.count())); + return num; + } else { + let digit = c - b'0'; + num.mantissa = num.mantissa * 10 + digit as u64; + } + } + + // Skip leading fraction zeros. + // This is required otherwise we might have a 0 mantissa and many digits. + let mut fraction_count: usize = 0; + if count == 0 { + for &c in &mut fraction { + fraction_count += 1; + if c != b'0' { + count += 1; + let digit = c - b'0'; + num.mantissa = num.mantissa * 10 + digit as u64; + break; + } + } + } + for c in fraction { + fraction_count += 1; + count += 1; + if count == 20 { + num.many_digits = true; + // This can't wrap, since we have at most 20 digits. + // We've adjusted the exponent too high by `fraction_count - 1`. + // Note: -1 is due to incrementing this loop iteration, which we + // didn't use. + num.exponent = exponent.saturating_sub(fraction_count as i32 - 1); + return num; + } else { + let digit = c - b'0'; + num.mantissa = num.mantissa * 10 + digit as u64; + } + } + + // No truncated digits: easy. + // Cannot overflow: <= 20 digits. + num.exponent = exponent.saturating_sub(fraction_count as i32); + num +} + +/// Parse float from extracted float components. +/// +/// * `integer` - Cloneable, forward iterator over integer digits. +/// * `fraction` - Cloneable, forward iterator over integer digits. +/// * `exponent` - Parsed, 32-bit exponent. +/// +/// # Preconditions +/// 1. The integer should not have leading zeros. +/// 2. The fraction should not have trailing zeros. +/// 3. All bytes in `integer` and `fraction` should be valid digits, +/// in the range [`b'0', b'9']. +/// +/// # Panics +/// +/// Although passing garbage input will not cause memory safety issues, +/// it is very likely to cause a panic with a large number of digits, or +/// in debug mode. The big-integer arithmetic without the `alloc` feature +/// assumes a maximum, fixed-width input, which assumes at maximum a +/// value of `10^(769 + 342)`, or ~4000 bits of storage. Passing in +/// nonsensical digits may require up to ~6000 bits of storage, which will +/// panic when attempting to add it to the big integer. It is therefore +/// up to the caller to validate this input. +/// +/// We cannot efficiently remove trailing zeros while only accepting a +/// forward iterator. +pub fn parse_float<'a, F, Iter1, Iter2>(integer: Iter1, fraction: Iter2, exponent: i32) -> F +where + F: Float, + Iter1: Iterator + Clone, + Iter2: Iterator + Clone, +{ + // Parse the mantissa and attempt the fast and moderate-path algorithms. + let num = parse_number(integer.clone(), fraction.clone(), exponent); + // Try the fast-path algorithm. + if let Some(value) = num.try_fast_path() { + return value; + } + + // Now try the moderate path algorithm. + let mut fp = moderate_path::(&num); + if fp.exp < 0 { + // Undo the invalid extended float biasing. + fp.exp -= F::INVALID_FP; + fp = slow::(num, fp, integer, fraction); + } + + // Unable to correctly round the float using the fast or moderate algorithms. + // Fallback to a slower, but always correct algorithm. If we have + // lossy, we can't be here. + extended_to_float::(fp) +} + +/// Wrapper for different moderate-path algorithms. +/// A return exponent of `-1` indicates an invalid value. +#[inline] +pub fn moderate_path(num: &Number) -> ExtendedFloat { + #[cfg(not(feature = "compact"))] + return lemire::(num); + + #[cfg(feature = "compact")] + return bellerophon::(num); +} + +/// Convert usize into i32 without overflow. +/// +/// This is needed to ensure when adjusting the exponent relative to +/// the mantissa we do not overflow for comically-long exponents. +#[inline] +fn into_i32(value: usize) -> i32 { + if value > i32::max_value() as usize { + i32::max_value() + } else { + value as i32 + } +} + +// Add digit to mantissa. +#[inline] +pub fn add_digit(value: u64, digit: u8) -> Option { + value.checked_mul(10)?.checked_add(digit as u64) +} diff --git a/third_party/rust/minimal-lexical/src/rounding.rs b/third_party/rust/minimal-lexical/src/rounding.rs new file mode 100644 index 0000000000..7c466dec4d --- /dev/null +++ b/third_party/rust/minimal-lexical/src/rounding.rs @@ -0,0 +1,131 @@ +//! Defines rounding schemes for floating-point numbers. + +#![doc(hidden)] + +use crate::extended_float::ExtendedFloat; +use crate::mask::{lower_n_halfway, lower_n_mask}; +use crate::num::Float; + +// ROUNDING +// -------- + +/// Round an extended-precision float to the nearest machine float. +/// +/// Shifts the significant digits into place, adjusts the exponent, +/// so it can be easily converted to a native float. +#[cfg_attr(not(feature = "compact"), inline)] +pub fn round(fp: &mut ExtendedFloat, cb: Cb) +where + F: Float, + Cb: Fn(&mut ExtendedFloat, i32), +{ + let fp_inf = ExtendedFloat { + mant: 0, + exp: F::INFINITE_POWER, + }; + + // Calculate our shift in significant digits. + let mantissa_shift = 64 - F::MANTISSA_SIZE - 1; + + // Check for a denormal float, if after the shift the exponent is negative. + if -fp.exp >= mantissa_shift { + // Have a denormal float that isn't a literal 0. + // The extra 1 is to adjust for the denormal float, which is + // `1 - F::EXPONENT_BIAS`. This works as before, because our + // old logic rounded to `F::DENORMAL_EXPONENT` (now 1), and then + // checked if `exp == F::DENORMAL_EXPONENT` and no hidden mask + // bit was set. Here, we handle that here, rather than later. + // + // This might round-down to 0, but shift will be at **max** 65, + // for halfway cases rounding towards 0. + let shift = -fp.exp + 1; + debug_assert!(shift <= 65); + cb(fp, shift.min(64)); + // Check for round-up: if rounding-nearest carried us to the hidden bit. + fp.exp = (fp.mant >= F::HIDDEN_BIT_MASK) as i32; + return; + } + + // The float is normal, round to the hidden bit. + cb(fp, mantissa_shift); + + // Check if we carried, and if so, shift the bit to the hidden bit. + let carry_mask = F::CARRY_MASK; + if fp.mant & carry_mask == carry_mask { + fp.mant >>= 1; + fp.exp += 1; + } + + // Handle if we carried and check for overflow again. + if fp.exp >= F::INFINITE_POWER { + // Exponent is above largest normal value, must be infinite. + *fp = fp_inf; + return; + } + + // Remove the hidden bit. + fp.mant &= F::MANTISSA_MASK; +} + +/// Shift right N-bytes and round towards a direction. +/// +/// Callback should take the following parameters: +/// 1. is_odd +/// 1. is_halfway +/// 1. is_above +#[cfg_attr(not(feature = "compact"), inline)] +pub fn round_nearest_tie_even(fp: &mut ExtendedFloat, shift: i32, cb: Cb) +where + // is_odd, is_halfway, is_above + Cb: Fn(bool, bool, bool) -> bool, +{ + // Ensure we've already handled denormal values that underflow. + debug_assert!(shift <= 64); + + // Extract the truncated bits using mask. + // Calculate if the value of the truncated bits are either above + // the mid-way point, or equal to it. + // + // For example, for 4 truncated bytes, the mask would be 0b1111 + // and the midway point would be 0b1000. + let mask = lower_n_mask(shift as u64); + let halfway = lower_n_halfway(shift as u64); + let truncated_bits = fp.mant & mask; + let is_above = truncated_bits > halfway; + let is_halfway = truncated_bits == halfway; + + // Bit shift so the leading bit is in the hidden bit. + // This optimixes pretty well: + // ```text + // mov ecx, esi + // shr rdi, cl + // xor eax, eax + // cmp esi, 64 + // cmovne rax, rdi + // ret + // ``` + fp.mant = match shift == 64 { + true => 0, + false => fp.mant >> shift, + }; + fp.exp += shift; + + // Extract the last bit after shifting (and determine if it is odd). + let is_odd = fp.mant & 1 == 1; + + // Calculate if we need to roundup. + // We need to roundup if we are above halfway, or if we are odd + // and at half-way (need to tie-to-even). Avoid the branch here. + fp.mant += cb(is_odd, is_halfway, is_above) as u64; +} + +/// Round our significant digits into place, truncating them. +#[cfg_attr(not(feature = "compact"), inline)] +pub fn round_down(fp: &mut ExtendedFloat, shift: i32) { + // Might have a shift greater than 64 if we have an error. + fp.mant = match shift == 64 { + true => 0, + false => fp.mant >> shift, + }; + fp.exp += shift; +} diff --git a/third_party/rust/minimal-lexical/src/slow.rs b/third_party/rust/minimal-lexical/src/slow.rs new file mode 100644 index 0000000000..59d526ba42 --- /dev/null +++ b/third_party/rust/minimal-lexical/src/slow.rs @@ -0,0 +1,403 @@ +//! Slow, fallback cases where we cannot unambiguously round a float. +//! +//! This occurs when we cannot determine the exact representation using +//! both the fast path (native) cases nor the Lemire/Bellerophon algorithms, +//! and therefore must fallback to a slow, arbitrary-precision representation. + +#![doc(hidden)] + +use crate::bigint::{Bigint, Limb, LIMB_BITS}; +use crate::extended_float::{extended_to_float, ExtendedFloat}; +use crate::num::Float; +use crate::number::Number; +use crate::rounding::{round, round_down, round_nearest_tie_even}; +use core::cmp; + +// ALGORITHM +// --------- + +/// Parse the significant digits and biased, binary exponent of a float. +/// +/// This is a fallback algorithm that uses a big-integer representation +/// of the float, and therefore is considerably slower than faster +/// approximations. However, it will always determine how to round +/// the significant digits to the nearest machine float, allowing +/// use to handle near half-way cases. +/// +/// Near half-way cases are halfway between two consecutive machine floats. +/// For example, the float `16777217.0` has a bitwise representation of +/// `100000000000000000000000 1`. Rounding to a single-precision float, +/// the trailing `1` is truncated. Using round-nearest, tie-even, any +/// value above `16777217.0` must be rounded up to `16777218.0`, while +/// any value before or equal to `16777217.0` must be rounded down +/// to `16777216.0`. These near-halfway conversions therefore may require +/// a large number of digits to unambiguously determine how to round. +#[inline] +pub fn slow<'a, F, Iter1, Iter2>( + num: Number, + fp: ExtendedFloat, + integer: Iter1, + fraction: Iter2, +) -> ExtendedFloat +where + F: Float, + Iter1: Iterator + Clone, + Iter2: Iterator + Clone, +{ + // Ensure our preconditions are valid: + // 1. The significant digits are not shifted into place. + debug_assert!(fp.mant & (1 << 63) != 0); + + // This assumes the sign bit has already been parsed, and we're + // starting with the integer digits, and the float format has been + // correctly validated. + let sci_exp = scientific_exponent(&num); + + // We have 2 major algorithms we use for this: + // 1. An algorithm with a finite number of digits and a positive exponent. + // 2. An algorithm with a finite number of digits and a negative exponent. + let (bigmant, digits) = parse_mantissa(integer, fraction, F::MAX_DIGITS); + let exponent = sci_exp + 1 - digits as i32; + if exponent >= 0 { + positive_digit_comp::(bigmant, exponent) + } else { + negative_digit_comp::(bigmant, fp, exponent) + } +} + +/// Generate the significant digits with a positive exponent relative to mantissa. +pub fn positive_digit_comp(mut bigmant: Bigint, exponent: i32) -> ExtendedFloat { + // Simple, we just need to multiply by the power of the radix. + // Now, we can calculate the mantissa and the exponent from this. + // The binary exponent is the binary exponent for the mantissa + // shifted to the hidden bit. + bigmant.pow(10, exponent as u32).unwrap(); + + // Get the exact representation of the float from the big integer. + // hi64 checks **all** the remaining bits after the mantissa, + // so it will check if **any** truncated digits exist. + let (mant, is_truncated) = bigmant.hi64(); + let exp = bigmant.bit_length() as i32 - 64 + F::EXPONENT_BIAS; + let mut fp = ExtendedFloat { + mant, + exp, + }; + + // Shift the digits into position and determine if we need to round-up. + round::(&mut fp, |f, s| { + round_nearest_tie_even(f, s, |is_odd, is_halfway, is_above| { + is_above || (is_halfway && is_truncated) || (is_odd && is_halfway) + }); + }); + fp +} + +/// Generate the significant digits with a negative exponent relative to mantissa. +/// +/// This algorithm is quite simple: we have the significant digits `m1 * b^N1`, +/// where `m1` is the bigint mantissa, `b` is the radix, and `N1` is the radix +/// exponent. We then calculate the theoretical representation of `b+h`, which +/// is `m2 * 2^N2`, where `m2` is the bigint mantissa and `N2` is the binary +/// exponent. If we had infinite, efficient floating precision, this would be +/// equal to `m1 / b^-N1` and then compare it to `m2 * 2^N2`. +/// +/// Since we cannot divide and keep precision, we must multiply the other: +/// if we want to do `m1 / b^-N1 >= m2 * 2^N2`, we can do +/// `m1 >= m2 * b^-N1 * 2^N2` Going to the decimal case, we can show and example +/// and simplify this further: `m1 >= m2 * 2^N2 * 10^-N1`. Since we can remove +/// a power-of-two, this is `m1 >= m2 * 2^(N2 - N1) * 5^-N1`. Therefore, if +/// `N2 - N1 > 0`, we need have `m1 >= m2 * 2^(N2 - N1) * 5^-N1`, otherwise, +/// we have `m1 * 2^(N1 - N2) >= m2 * 5^-N1`, where the resulting exponents +/// are all positive. +/// +/// This allows us to compare both floats using integers efficiently +/// without any loss of precision. +#[allow(clippy::comparison_chain)] +pub fn negative_digit_comp( + bigmant: Bigint, + mut fp: ExtendedFloat, + exponent: i32, +) -> ExtendedFloat { + // Ensure our preconditions are valid: + // 1. The significant digits are not shifted into place. + debug_assert!(fp.mant & (1 << 63) != 0); + + // Get the significant digits and radix exponent for the real digits. + let mut real_digits = bigmant; + let real_exp = exponent; + debug_assert!(real_exp < 0); + + // Round down our extended-precision float and calculate `b`. + let mut b = fp; + round::(&mut b, round_down); + let b = extended_to_float::(b); + + // Get the significant digits and the binary exponent for `b+h`. + let theor = bh(b); + let mut theor_digits = Bigint::from_u64(theor.mant); + let theor_exp = theor.exp; + + // We need to scale the real digits and `b+h` digits to be the same + // order. We currently have `real_exp`, in `radix`, that needs to be + // shifted to `theor_digits` (since it is negative), and `theor_exp` + // to either `theor_digits` or `real_digits` as a power of 2 (since it + // may be positive or negative). Try to remove as many powers of 2 + // as possible. All values are relative to `theor_digits`, that is, + // reflect the power you need to multiply `theor_digits` by. + // + // Both are on opposite-sides of equation, can factor out a + // power of two. + // + // Example: 10^-10, 2^-10 -> ( 0, 10, 0) + // Example: 10^-10, 2^-15 -> (-5, 10, 0) + // Example: 10^-10, 2^-5 -> ( 5, 10, 0) + // Example: 10^-10, 2^5 -> (15, 10, 0) + let binary_exp = theor_exp - real_exp; + let halfradix_exp = -real_exp; + if halfradix_exp != 0 { + theor_digits.pow(5, halfradix_exp as u32).unwrap(); + } + if binary_exp > 0 { + theor_digits.pow(2, binary_exp as u32).unwrap(); + } else if binary_exp < 0 { + real_digits.pow(2, (-binary_exp) as u32).unwrap(); + } + + // Compare our theoretical and real digits and round nearest, tie even. + let ord = real_digits.data.cmp(&theor_digits.data); + round::(&mut fp, |f, s| { + round_nearest_tie_even(f, s, |is_odd, _, _| { + // Can ignore `is_halfway` and `is_above`, since those were + // calculates using less significant digits. + match ord { + cmp::Ordering::Greater => true, + cmp::Ordering::Less => false, + cmp::Ordering::Equal if is_odd => true, + cmp::Ordering::Equal => false, + } + }); + }); + fp +} + +/// Add a digit to the temporary value. +macro_rules! add_digit { + ($c:ident, $value:ident, $counter:ident, $count:ident) => {{ + let digit = $c - b'0'; + $value *= 10 as Limb; + $value += digit as Limb; + + // Increment our counters. + $counter += 1; + $count += 1; + }}; +} + +/// Add a temporary value to our mantissa. +macro_rules! add_temporary { + // Multiply by the small power and add the native value. + (@mul $result:ident, $power:expr, $value:expr) => { + $result.data.mul_small($power).unwrap(); + $result.data.add_small($value).unwrap(); + }; + + // # Safety + // + // Safe is `counter <= step`, or smaller than the table size. + ($format:ident, $result:ident, $counter:ident, $value:ident) => { + if $counter != 0 { + // SAFETY: safe, since `counter <= step`, or smaller than the table size. + let small_power = unsafe { f64::int_pow_fast_path($counter, 10) }; + add_temporary!(@mul $result, small_power as Limb, $value); + $counter = 0; + $value = 0; + } + }; + + // Add a temporary where we won't read the counter results internally. + // + // # Safety + // + // Safe is `counter <= step`, or smaller than the table size. + (@end $format:ident, $result:ident, $counter:ident, $value:ident) => { + if $counter != 0 { + // SAFETY: safe, since `counter <= step`, or smaller than the table size. + let small_power = unsafe { f64::int_pow_fast_path($counter, 10) }; + add_temporary!(@mul $result, small_power as Limb, $value); + } + }; + + // Add the maximum native value. + (@max $format:ident, $result:ident, $counter:ident, $value:ident, $max:ident) => { + add_temporary!(@mul $result, $max, $value); + $counter = 0; + $value = 0; + }; +} + +/// Round-up a truncated value. +macro_rules! round_up_truncated { + ($format:ident, $result:ident, $count:ident) => {{ + // Need to round-up. + // Can't just add 1, since this can accidentally round-up + // values to a halfway point, which can cause invalid results. + add_temporary!(@mul $result, 10, 1); + $count += 1; + }}; +} + +/// Check and round-up the fraction if any non-zero digits exist. +macro_rules! round_up_nonzero { + ($format:ident, $iter:expr, $result:ident, $count:ident) => {{ + for &digit in $iter { + if digit != b'0' { + round_up_truncated!($format, $result, $count); + return ($result, $count); + } + } + }}; +} + +/// Parse the full mantissa into a big integer. +/// +/// Returns the parsed mantissa and the number of digits in the mantissa. +/// The max digits is the maximum number of digits plus one. +pub fn parse_mantissa<'a, Iter1, Iter2>( + mut integer: Iter1, + mut fraction: Iter2, + max_digits: usize, +) -> (Bigint, usize) +where + Iter1: Iterator + Clone, + Iter2: Iterator + Clone, +{ + // Iteratively process all the data in the mantissa. + // We do this via small, intermediate values which once we reach + // the maximum number of digits we can process without overflow, + // we add the temporary to the big integer. + let mut counter: usize = 0; + let mut count: usize = 0; + let mut value: Limb = 0; + let mut result = Bigint::new(); + + // Now use our pre-computed small powers iteratively. + // This is calculated as `⌊log(2^BITS - 1, 10)⌋`. + let step: usize = if LIMB_BITS == 32 { + 9 + } else { + 19 + }; + let max_native = (10 as Limb).pow(step as u32); + + // Process the integer digits. + 'integer: loop { + // Parse a digit at a time, until we reach step. + while counter < step && count < max_digits { + if let Some(&c) = integer.next() { + add_digit!(c, value, counter, count); + } else { + break 'integer; + } + } + + // Check if we've exhausted our max digits. + if count == max_digits { + // Need to check if we're truncated, and round-up accordingly. + // SAFETY: safe since `counter <= step`. + add_temporary!(@end format, result, counter, value); + round_up_nonzero!(format, integer, result, count); + round_up_nonzero!(format, fraction, result, count); + return (result, count); + } else { + // Add our temporary from the loop. + // SAFETY: safe since `counter <= step`. + add_temporary!(@max format, result, counter, value, max_native); + } + } + + // Skip leading fraction zeros. + // Required to get an accurate count. + if count == 0 { + for &c in &mut fraction { + if c != b'0' { + add_digit!(c, value, counter, count); + break; + } + } + } + + // Process the fraction digits. + 'fraction: loop { + // Parse a digit at a time, until we reach step. + while counter < step && count < max_digits { + if let Some(&c) = fraction.next() { + add_digit!(c, value, counter, count); + } else { + break 'fraction; + } + } + + // Check if we've exhausted our max digits. + if count == max_digits { + // SAFETY: safe since `counter <= step`. + add_temporary!(@end format, result, counter, value); + round_up_nonzero!(format, fraction, result, count); + return (result, count); + } else { + // Add our temporary from the loop. + // SAFETY: safe since `counter <= step`. + add_temporary!(@max format, result, counter, value, max_native); + } + } + + // We will always have a remainder, as long as we entered the loop + // once, or counter % step is 0. + // SAFETY: safe since `counter <= step`. + add_temporary!(@end format, result, counter, value); + + (result, count) +} + +// SCALING +// ------- + +/// Calculate the scientific exponent from a `Number` value. +/// Any other attempts would require slowdowns for faster algorithms. +#[inline] +pub fn scientific_exponent(num: &Number) -> i32 { + // Use power reduction to make this faster. + let mut mantissa = num.mantissa; + let mut exponent = num.exponent; + while mantissa >= 10000 { + mantissa /= 10000; + exponent += 4; + } + while mantissa >= 100 { + mantissa /= 100; + exponent += 2; + } + while mantissa >= 10 { + mantissa /= 10; + exponent += 1; + } + exponent as i32 +} + +/// Calculate `b` from a a representation of `b` as a float. +#[inline] +pub fn b(float: F) -> ExtendedFloat { + ExtendedFloat { + mant: float.mantissa(), + exp: float.exponent(), + } +} + +/// Calculate `b+h` from a a representation of `b` as a float. +#[inline] +pub fn bh(float: F) -> ExtendedFloat { + let fp = b(float); + ExtendedFloat { + mant: (fp.mant << 1) + 1, + exp: fp.exp - 1, + } +} diff --git a/third_party/rust/minimal-lexical/src/stackvec.rs b/third_party/rust/minimal-lexical/src/stackvec.rs new file mode 100644 index 0000000000..d9bc259555 --- /dev/null +++ b/third_party/rust/minimal-lexical/src/stackvec.rs @@ -0,0 +1,308 @@ +//! Simple stack-allocated vector. + +#![cfg(not(feature = "alloc"))] +#![doc(hidden)] + +use crate::bigint; +use core::{cmp, mem, ops, ptr, slice}; + +/// Simple stack vector implementation. +#[derive(Clone)] +pub struct StackVec { + /// The raw buffer for the elements. + data: [mem::MaybeUninit; bigint::BIGINT_LIMBS], + /// The number of elements in the array (we never need more than u16::MAX). + length: u16, +} + +#[allow(clippy::new_without_default)] +impl StackVec { + /// Construct an empty vector. + #[inline] + pub const fn new() -> Self { + Self { + length: 0, + data: [mem::MaybeUninit::uninit(); bigint::BIGINT_LIMBS], + } + } + + /// Construct a vector from an existing slice. + #[inline] + pub fn try_from(x: &[bigint::Limb]) -> Option { + let mut vec = Self::new(); + vec.try_extend(x)?; + Some(vec) + } + + /// Sets the length of a vector. + /// + /// This will explicitly set the size of the vector, without actually + /// modifying its buffers, so it is up to the caller to ensure that the + /// vector is actually the specified size. + /// + /// # Safety + /// + /// Safe as long as `len` is less than `BIGINT_LIMBS`. + #[inline] + pub unsafe fn set_len(&mut self, len: usize) { + // Constant is `u16::MAX` for older Rustc versions. + debug_assert!(len <= 0xffff); + debug_assert!(len <= bigint::BIGINT_LIMBS); + self.length = len as u16; + } + + /// The number of elements stored in the vector. + #[inline] + pub const fn len(&self) -> usize { + self.length as usize + } + + /// If the vector is empty. + #[inline] + pub const fn is_empty(&self) -> bool { + self.len() == 0 + } + + /// The number of items the vector can hold. + #[inline] + pub const fn capacity(&self) -> usize { + bigint::BIGINT_LIMBS as usize + } + + /// Append an item to the vector, without bounds checking. + /// + /// # Safety + /// + /// Safe if `self.len() < self.capacity()`. + #[inline] + pub unsafe fn push_unchecked(&mut self, value: bigint::Limb) { + debug_assert!(self.len() < self.capacity()); + // SAFETY: safe, capacity is less than the current size. + unsafe { + ptr::write(self.as_mut_ptr().add(self.len()), value); + self.length += 1; + } + } + + /// Append an item to the vector. + #[inline] + pub fn try_push(&mut self, value: bigint::Limb) -> Option<()> { + if self.len() < self.capacity() { + // SAFETY: safe, capacity is less than the current size. + unsafe { self.push_unchecked(value) }; + Some(()) + } else { + None + } + } + + /// Remove an item from the end of a vector, without bounds checking. + /// + /// # Safety + /// + /// Safe if `self.len() > 0`. + #[inline] + pub unsafe fn pop_unchecked(&mut self) -> bigint::Limb { + debug_assert!(!self.is_empty()); + // SAFETY: safe if `self.length > 0`. + // We have a trivial drop and copy, so this is safe. + self.length -= 1; + unsafe { ptr::read(self.as_mut_ptr().add(self.len())) } + } + + /// Remove an item from the end of the vector and return it, or None if empty. + #[inline] + pub fn pop(&mut self) -> Option { + if self.is_empty() { + None + } else { + // SAFETY: safe, since `self.len() > 0`. + unsafe { Some(self.pop_unchecked()) } + } + } + + /// Add items from a slice to the vector, without bounds checking. + /// + /// # Safety + /// + /// Safe if `self.len() + slc.len() <= self.capacity()`. + #[inline] + pub unsafe fn extend_unchecked(&mut self, slc: &[bigint::Limb]) { + let index = self.len(); + let new_len = index + slc.len(); + debug_assert!(self.len() + slc.len() <= self.capacity()); + let src = slc.as_ptr(); + // SAFETY: safe if `self.len() + slc.len() <= self.capacity()`. + unsafe { + let dst = self.as_mut_ptr().add(index); + ptr::copy_nonoverlapping(src, dst, slc.len()); + self.set_len(new_len); + } + } + + /// Copy elements from a slice and append them to the vector. + #[inline] + pub fn try_extend(&mut self, slc: &[bigint::Limb]) -> Option<()> { + if self.len() + slc.len() <= self.capacity() { + // SAFETY: safe, since `self.len() + slc.len() <= self.capacity()`. + unsafe { self.extend_unchecked(slc) }; + Some(()) + } else { + None + } + } + + /// Truncate vector to new length, dropping any items after `len`. + /// + /// # Safety + /// + /// Safe as long as `len <= self.capacity()`. + unsafe fn truncate_unchecked(&mut self, len: usize) { + debug_assert!(len <= self.capacity()); + self.length = len as u16; + } + + /// Resize the buffer, without bounds checking. + /// + /// # Safety + /// + /// Safe as long as `len <= self.capacity()`. + #[inline] + pub unsafe fn resize_unchecked(&mut self, len: usize, value: bigint::Limb) { + debug_assert!(len <= self.capacity()); + let old_len = self.len(); + if len > old_len { + // We have a trivial drop, so there's no worry here. + // Just, don't set the length until all values have been written, + // so we don't accidentally read uninitialized memory. + + // SAFETY: safe if `len < self.capacity()`. + let count = len - old_len; + for index in 0..count { + unsafe { + let dst = self.as_mut_ptr().add(old_len + index); + ptr::write(dst, value); + } + } + self.length = len as u16; + } else { + // SAFETY: safe since `len < self.len()`. + unsafe { self.truncate_unchecked(len) }; + } + } + + /// Try to resize the buffer. + /// + /// If the new length is smaller than the current length, truncate + /// the input. If it's larger, then append elements to the buffer. + #[inline] + pub fn try_resize(&mut self, len: usize, value: bigint::Limb) -> Option<()> { + if len > self.capacity() { + None + } else { + // SAFETY: safe, since `len <= self.capacity()`. + unsafe { self.resize_unchecked(len, value) }; + Some(()) + } + } + + // HI + + /// Get the high 64 bits from the vector. + #[inline(always)] + pub fn hi64(&self) -> (u64, bool) { + bigint::hi64(self) + } + + // FROM + + /// Create StackVec from u64 value. + #[inline(always)] + pub fn from_u64(x: u64) -> Self { + bigint::from_u64(x) + } + + // MATH + + /// Normalize the integer, so any leading zero values are removed. + #[inline] + pub fn normalize(&mut self) { + bigint::normalize(self) + } + + /// Get if the big integer is normalized. + #[inline] + pub fn is_normalized(&self) -> bool { + bigint::is_normalized(self) + } + + /// AddAssign small integer. + #[inline] + pub fn add_small(&mut self, y: bigint::Limb) -> Option<()> { + bigint::small_add(self, y) + } + + /// MulAssign small integer. + #[inline] + pub fn mul_small(&mut self, y: bigint::Limb) -> Option<()> { + bigint::small_mul(self, y) + } +} + +impl PartialEq for StackVec { + #[inline] + #[allow(clippy::op_ref)] + fn eq(&self, other: &Self) -> bool { + use core::ops::Deref; + self.len() == other.len() && self.deref() == other.deref() + } +} + +impl Eq for StackVec { +} + +impl cmp::PartialOrd for StackVec { + #[inline] + fn partial_cmp(&self, other: &Self) -> Option { + Some(bigint::compare(self, other)) + } +} + +impl cmp::Ord for StackVec { + #[inline] + fn cmp(&self, other: &Self) -> cmp::Ordering { + bigint::compare(self, other) + } +} + +impl ops::Deref for StackVec { + type Target = [bigint::Limb]; + #[inline] + fn deref(&self) -> &[bigint::Limb] { + // SAFETY: safe since `self.data[..self.len()]` must be initialized + // and `self.len() <= self.capacity()`. + unsafe { + let ptr = self.data.as_ptr() as *const bigint::Limb; + slice::from_raw_parts(ptr, self.len()) + } + } +} + +impl ops::DerefMut for StackVec { + #[inline] + fn deref_mut(&mut self) -> &mut [bigint::Limb] { + // SAFETY: safe since `self.data[..self.len()]` must be initialized + // and `self.len() <= self.capacity()`. + unsafe { + let ptr = self.data.as_mut_ptr() as *mut bigint::Limb; + slice::from_raw_parts_mut(ptr, self.len()) + } + } +} + +impl ops::MulAssign<&[bigint::Limb]> for StackVec { + #[inline] + fn mul_assign(&mut self, rhs: &[bigint::Limb]) { + bigint::large_mul(self, rhs).unwrap(); + } +} diff --git a/third_party/rust/minimal-lexical/src/table.rs b/third_party/rust/minimal-lexical/src/table.rs new file mode 100644 index 0000000000..7b1367e326 --- /dev/null +++ b/third_party/rust/minimal-lexical/src/table.rs @@ -0,0 +1,11 @@ +//! Pre-computed tables for parsing float strings. + +#![doc(hidden)] + +// Re-export all the feature-specific files. +#[cfg(feature = "compact")] +pub use crate::table_bellerophon::*; +#[cfg(not(feature = "compact"))] +pub use crate::table_lemire::*; +#[cfg(not(feature = "compact"))] +pub use crate::table_small::*; diff --git a/third_party/rust/minimal-lexical/src/table_bellerophon.rs b/third_party/rust/minimal-lexical/src/table_bellerophon.rs new file mode 100644 index 0000000000..f85f8e6fb3 --- /dev/null +++ b/third_party/rust/minimal-lexical/src/table_bellerophon.rs @@ -0,0 +1,119 @@ +//! Cached exponents for basen values with 80-bit extended floats. +//! +//! Exact versions of base**n as an extended-precision float, with both +//! large and small powers. Use the large powers to minimize the amount +//! of compounded error. This is used in the Bellerophon algorithm. +//! +//! These values were calculated using Python, using the arbitrary-precision +//! integer to calculate exact extended-representation of each value. +//! These values are all normalized. +//! +//! DO NOT MODIFY: Generated by `etc/bellerophon_table.py` + +#![cfg(feature = "compact")] +#![doc(hidden)] + +use crate::bellerophon::BellerophonPowers; + +// HIGH LEVEL +// ---------- + +pub const BASE10_POWERS: BellerophonPowers = BellerophonPowers { + small: &BASE10_SMALL_MANTISSA, + large: &BASE10_LARGE_MANTISSA, + small_int: &BASE10_SMALL_INT_POWERS, + step: BASE10_STEP, + bias: BASE10_BIAS, + log2: BASE10_LOG2_MULT, + log2_shift: BASE10_LOG2_SHIFT, +}; + +// LOW-LEVEL +// --------- + +const BASE10_SMALL_MANTISSA: [u64; 10] = [ + 9223372036854775808, // 10^0 + 11529215046068469760, // 10^1 + 14411518807585587200, // 10^2 + 18014398509481984000, // 10^3 + 11258999068426240000, // 10^4 + 14073748835532800000, // 10^5 + 17592186044416000000, // 10^6 + 10995116277760000000, // 10^7 + 13743895347200000000, // 10^8 + 17179869184000000000, // 10^9 +]; +const BASE10_LARGE_MANTISSA: [u64; 66] = [ + 11555125961253852697, // 10^-350 + 13451937075301367670, // 10^-340 + 15660115838168849784, // 10^-330 + 18230774251475056848, // 10^-320 + 10611707258198326947, // 10^-310 + 12353653155963782858, // 10^-300 + 14381545078898527261, // 10^-290 + 16742321987285426889, // 10^-280 + 9745314011399999080, // 10^-270 + 11345038669416679861, // 10^-260 + 13207363278391631158, // 10^-250 + 15375394465392026070, // 10^-240 + 17899314949046850752, // 10^-230 + 10418772551374772303, // 10^-220 + 12129047596099288555, // 10^-210 + 14120069793541087484, // 10^-200 + 16437924692338667210, // 10^-190 + 9568131466127621947, // 10^-180 + 11138771039116687545, // 10^-170 + 12967236152753102995, // 10^-160 + 15095849699286165408, // 10^-150 + 17573882009934360870, // 10^-140 + 10229345649675443343, // 10^-130 + 11908525658859223294, // 10^-120 + 13863348470604074297, // 10^-110 + 16139061738043178685, // 10^-100 + 9394170331095332911, // 10^-90 + 10936253623915059621, // 10^-80 + 12731474852090538039, // 10^-70 + 14821387422376473014, // 10^-60 + 17254365866976409468, // 10^-50 + 10043362776618689222, // 10^-40 + 11692013098647223345, // 10^-30 + 13611294676837538538, // 10^-20 + 15845632502852867518, // 10^-10 + 9223372036854775808, // 10^0 + 10737418240000000000, // 10^10 + 12500000000000000000, // 10^20 + 14551915228366851806, // 10^30 + 16940658945086006781, // 10^40 + 9860761315262647567, // 10^50 + 11479437019748901445, // 10^60 + 13363823550460978230, // 10^70 + 15557538194652854267, // 10^80 + 18111358157653424735, // 10^90 + 10542197943230523224, // 10^100 + 12272733663244316382, // 10^110 + 14287342391028437277, // 10^120 + 16632655625031838749, // 10^130 + 9681479787123295682, // 10^140 + 11270725851789228247, // 10^150 + 13120851772591970218, // 10^160 + 15274681817498023410, // 10^170 + 17782069995880619867, // 10^180 + 10350527006597618960, // 10^190 + 12049599325514420588, // 10^200 + 14027579833653779454, // 10^210 + 16330252207878254650, // 10^220 + 9505457831475799117, // 10^230 + 11065809325636130661, // 10^240 + 12882297539194266616, // 10^250 + 14996968138956309548, // 10^260 + 17458768723248864463, // 10^270 + 10162340898095201970, // 10^280 + 11830521861667747109, // 10^290 + 13772540099066387756, // 10^300 +]; +const BASE10_SMALL_INT_POWERS: [u64; 10] = + [1, 10, 100, 1000, 10000, 100000, 1000000, 10000000, 100000000, 1000000000]; +const BASE10_STEP: i32 = 10; +const BASE10_BIAS: i32 = 350; +const BASE10_LOG2_MULT: i64 = 217706; +const BASE10_LOG2_SHIFT: i32 = 16; diff --git a/third_party/rust/minimal-lexical/src/table_lemire.rs b/third_party/rust/minimal-lexical/src/table_lemire.rs new file mode 100644 index 0000000000..110e1dab2b --- /dev/null +++ b/third_party/rust/minimal-lexical/src/table_lemire.rs @@ -0,0 +1,676 @@ +//! Pre-computed tables powers-of-5 for extended-precision representations. +//! +//! These tables enable fast scaling of the significant digits +//! of a float to the decimal exponent, with minimal rounding +//! errors, in a 128 or 192-bit representation. +//! +//! DO NOT MODIFY: Generated by `etc/lemire_table.py` +//! +//! This adapted from the Rust implementation, based on the fast-float-rust +//! implementation, and is similarly subject to an Apache2.0/MIT license. + +#![doc(hidden)] +#![cfg(not(feature = "compact"))] + +pub const SMALLEST_POWER_OF_FIVE: i32 = -342; +pub const LARGEST_POWER_OF_FIVE: i32 = 308; +pub const N_POWERS_OF_FIVE: usize = (LARGEST_POWER_OF_FIVE - SMALLEST_POWER_OF_FIVE + 1) as usize; + +// Use static to avoid long compile times: Rust compiler errors +// can have the entire table compiled multiple times, and then +// emit code multiple times, even if it's stripped out in +// the final binary. +#[rustfmt::skip] +pub static POWER_OF_FIVE_128: [(u64, u64); N_POWERS_OF_FIVE] = [ + (0xeef453d6923bd65a, 0x113faa2906a13b3f), // 5^-342 + (0x9558b4661b6565f8, 0x4ac7ca59a424c507), // 5^-341 + (0xbaaee17fa23ebf76, 0x5d79bcf00d2df649), // 5^-340 + (0xe95a99df8ace6f53, 0xf4d82c2c107973dc), // 5^-339 + (0x91d8a02bb6c10594, 0x79071b9b8a4be869), // 5^-338 + (0xb64ec836a47146f9, 0x9748e2826cdee284), // 5^-337 + (0xe3e27a444d8d98b7, 0xfd1b1b2308169b25), // 5^-336 + (0x8e6d8c6ab0787f72, 0xfe30f0f5e50e20f7), // 5^-335 + (0xb208ef855c969f4f, 0xbdbd2d335e51a935), // 5^-334 + (0xde8b2b66b3bc4723, 0xad2c788035e61382), // 5^-333 + (0x8b16fb203055ac76, 0x4c3bcb5021afcc31), // 5^-332 + (0xaddcb9e83c6b1793, 0xdf4abe242a1bbf3d), // 5^-331 + (0xd953e8624b85dd78, 0xd71d6dad34a2af0d), // 5^-330 + (0x87d4713d6f33aa6b, 0x8672648c40e5ad68), // 5^-329 + (0xa9c98d8ccb009506, 0x680efdaf511f18c2), // 5^-328 + (0xd43bf0effdc0ba48, 0x212bd1b2566def2), // 5^-327 + (0x84a57695fe98746d, 0x14bb630f7604b57), // 5^-326 + (0xa5ced43b7e3e9188, 0x419ea3bd35385e2d), // 5^-325 + (0xcf42894a5dce35ea, 0x52064cac828675b9), // 5^-324 + (0x818995ce7aa0e1b2, 0x7343efebd1940993), // 5^-323 + (0xa1ebfb4219491a1f, 0x1014ebe6c5f90bf8), // 5^-322 + (0xca66fa129f9b60a6, 0xd41a26e077774ef6), // 5^-321 + (0xfd00b897478238d0, 0x8920b098955522b4), // 5^-320 + (0x9e20735e8cb16382, 0x55b46e5f5d5535b0), // 5^-319 + (0xc5a890362fddbc62, 0xeb2189f734aa831d), // 5^-318 + (0xf712b443bbd52b7b, 0xa5e9ec7501d523e4), // 5^-317 + (0x9a6bb0aa55653b2d, 0x47b233c92125366e), // 5^-316 + (0xc1069cd4eabe89f8, 0x999ec0bb696e840a), // 5^-315 + (0xf148440a256e2c76, 0xc00670ea43ca250d), // 5^-314 + (0x96cd2a865764dbca, 0x380406926a5e5728), // 5^-313 + (0xbc807527ed3e12bc, 0xc605083704f5ecf2), // 5^-312 + (0xeba09271e88d976b, 0xf7864a44c633682e), // 5^-311 + (0x93445b8731587ea3, 0x7ab3ee6afbe0211d), // 5^-310 + (0xb8157268fdae9e4c, 0x5960ea05bad82964), // 5^-309 + (0xe61acf033d1a45df, 0x6fb92487298e33bd), // 5^-308 + (0x8fd0c16206306bab, 0xa5d3b6d479f8e056), // 5^-307 + (0xb3c4f1ba87bc8696, 0x8f48a4899877186c), // 5^-306 + (0xe0b62e2929aba83c, 0x331acdabfe94de87), // 5^-305 + (0x8c71dcd9ba0b4925, 0x9ff0c08b7f1d0b14), // 5^-304 + (0xaf8e5410288e1b6f, 0x7ecf0ae5ee44dd9), // 5^-303 + (0xdb71e91432b1a24a, 0xc9e82cd9f69d6150), // 5^-302 + (0x892731ac9faf056e, 0xbe311c083a225cd2), // 5^-301 + (0xab70fe17c79ac6ca, 0x6dbd630a48aaf406), // 5^-300 + (0xd64d3d9db981787d, 0x92cbbccdad5b108), // 5^-299 + (0x85f0468293f0eb4e, 0x25bbf56008c58ea5), // 5^-298 + (0xa76c582338ed2621, 0xaf2af2b80af6f24e), // 5^-297 + (0xd1476e2c07286faa, 0x1af5af660db4aee1), // 5^-296 + (0x82cca4db847945ca, 0x50d98d9fc890ed4d), // 5^-295 + (0xa37fce126597973c, 0xe50ff107bab528a0), // 5^-294 + (0xcc5fc196fefd7d0c, 0x1e53ed49a96272c8), // 5^-293 + (0xff77b1fcbebcdc4f, 0x25e8e89c13bb0f7a), // 5^-292 + (0x9faacf3df73609b1, 0x77b191618c54e9ac), // 5^-291 + (0xc795830d75038c1d, 0xd59df5b9ef6a2417), // 5^-290 + (0xf97ae3d0d2446f25, 0x4b0573286b44ad1d), // 5^-289 + (0x9becce62836ac577, 0x4ee367f9430aec32), // 5^-288 + (0xc2e801fb244576d5, 0x229c41f793cda73f), // 5^-287 + (0xf3a20279ed56d48a, 0x6b43527578c1110f), // 5^-286 + (0x9845418c345644d6, 0x830a13896b78aaa9), // 5^-285 + (0xbe5691ef416bd60c, 0x23cc986bc656d553), // 5^-284 + (0xedec366b11c6cb8f, 0x2cbfbe86b7ec8aa8), // 5^-283 + (0x94b3a202eb1c3f39, 0x7bf7d71432f3d6a9), // 5^-282 + (0xb9e08a83a5e34f07, 0xdaf5ccd93fb0cc53), // 5^-281 + (0xe858ad248f5c22c9, 0xd1b3400f8f9cff68), // 5^-280 + (0x91376c36d99995be, 0x23100809b9c21fa1), // 5^-279 + (0xb58547448ffffb2d, 0xabd40a0c2832a78a), // 5^-278 + (0xe2e69915b3fff9f9, 0x16c90c8f323f516c), // 5^-277 + (0x8dd01fad907ffc3b, 0xae3da7d97f6792e3), // 5^-276 + (0xb1442798f49ffb4a, 0x99cd11cfdf41779c), // 5^-275 + (0xdd95317f31c7fa1d, 0x40405643d711d583), // 5^-274 + (0x8a7d3eef7f1cfc52, 0x482835ea666b2572), // 5^-273 + (0xad1c8eab5ee43b66, 0xda3243650005eecf), // 5^-272 + (0xd863b256369d4a40, 0x90bed43e40076a82), // 5^-271 + (0x873e4f75e2224e68, 0x5a7744a6e804a291), // 5^-270 + (0xa90de3535aaae202, 0x711515d0a205cb36), // 5^-269 + (0xd3515c2831559a83, 0xd5a5b44ca873e03), // 5^-268 + (0x8412d9991ed58091, 0xe858790afe9486c2), // 5^-267 + (0xa5178fff668ae0b6, 0x626e974dbe39a872), // 5^-266 + (0xce5d73ff402d98e3, 0xfb0a3d212dc8128f), // 5^-265 + (0x80fa687f881c7f8e, 0x7ce66634bc9d0b99), // 5^-264 + (0xa139029f6a239f72, 0x1c1fffc1ebc44e80), // 5^-263 + (0xc987434744ac874e, 0xa327ffb266b56220), // 5^-262 + (0xfbe9141915d7a922, 0x4bf1ff9f0062baa8), // 5^-261 + (0x9d71ac8fada6c9b5, 0x6f773fc3603db4a9), // 5^-260 + (0xc4ce17b399107c22, 0xcb550fb4384d21d3), // 5^-259 + (0xf6019da07f549b2b, 0x7e2a53a146606a48), // 5^-258 + (0x99c102844f94e0fb, 0x2eda7444cbfc426d), // 5^-257 + (0xc0314325637a1939, 0xfa911155fefb5308), // 5^-256 + (0xf03d93eebc589f88, 0x793555ab7eba27ca), // 5^-255 + (0x96267c7535b763b5, 0x4bc1558b2f3458de), // 5^-254 + (0xbbb01b9283253ca2, 0x9eb1aaedfb016f16), // 5^-253 + (0xea9c227723ee8bcb, 0x465e15a979c1cadc), // 5^-252 + (0x92a1958a7675175f, 0xbfacd89ec191ec9), // 5^-251 + (0xb749faed14125d36, 0xcef980ec671f667b), // 5^-250 + (0xe51c79a85916f484, 0x82b7e12780e7401a), // 5^-249 + (0x8f31cc0937ae58d2, 0xd1b2ecb8b0908810), // 5^-248 + (0xb2fe3f0b8599ef07, 0x861fa7e6dcb4aa15), // 5^-247 + (0xdfbdcece67006ac9, 0x67a791e093e1d49a), // 5^-246 + (0x8bd6a141006042bd, 0xe0c8bb2c5c6d24e0), // 5^-245 + (0xaecc49914078536d, 0x58fae9f773886e18), // 5^-244 + (0xda7f5bf590966848, 0xaf39a475506a899e), // 5^-243 + (0x888f99797a5e012d, 0x6d8406c952429603), // 5^-242 + (0xaab37fd7d8f58178, 0xc8e5087ba6d33b83), // 5^-241 + (0xd5605fcdcf32e1d6, 0xfb1e4a9a90880a64), // 5^-240 + (0x855c3be0a17fcd26, 0x5cf2eea09a55067f), // 5^-239 + (0xa6b34ad8c9dfc06f, 0xf42faa48c0ea481e), // 5^-238 + (0xd0601d8efc57b08b, 0xf13b94daf124da26), // 5^-237 + (0x823c12795db6ce57, 0x76c53d08d6b70858), // 5^-236 + (0xa2cb1717b52481ed, 0x54768c4b0c64ca6e), // 5^-235 + (0xcb7ddcdda26da268, 0xa9942f5dcf7dfd09), // 5^-234 + (0xfe5d54150b090b02, 0xd3f93b35435d7c4c), // 5^-233 + (0x9efa548d26e5a6e1, 0xc47bc5014a1a6daf), // 5^-232 + (0xc6b8e9b0709f109a, 0x359ab6419ca1091b), // 5^-231 + (0xf867241c8cc6d4c0, 0xc30163d203c94b62), // 5^-230 + (0x9b407691d7fc44f8, 0x79e0de63425dcf1d), // 5^-229 + (0xc21094364dfb5636, 0x985915fc12f542e4), // 5^-228 + (0xf294b943e17a2bc4, 0x3e6f5b7b17b2939d), // 5^-227 + (0x979cf3ca6cec5b5a, 0xa705992ceecf9c42), // 5^-226 + (0xbd8430bd08277231, 0x50c6ff782a838353), // 5^-225 + (0xece53cec4a314ebd, 0xa4f8bf5635246428), // 5^-224 + (0x940f4613ae5ed136, 0x871b7795e136be99), // 5^-223 + (0xb913179899f68584, 0x28e2557b59846e3f), // 5^-222 + (0xe757dd7ec07426e5, 0x331aeada2fe589cf), // 5^-221 + (0x9096ea6f3848984f, 0x3ff0d2c85def7621), // 5^-220 + (0xb4bca50b065abe63, 0xfed077a756b53a9), // 5^-219 + (0xe1ebce4dc7f16dfb, 0xd3e8495912c62894), // 5^-218 + (0x8d3360f09cf6e4bd, 0x64712dd7abbbd95c), // 5^-217 + (0xb080392cc4349dec, 0xbd8d794d96aacfb3), // 5^-216 + (0xdca04777f541c567, 0xecf0d7a0fc5583a0), // 5^-215 + (0x89e42caaf9491b60, 0xf41686c49db57244), // 5^-214 + (0xac5d37d5b79b6239, 0x311c2875c522ced5), // 5^-213 + (0xd77485cb25823ac7, 0x7d633293366b828b), // 5^-212 + (0x86a8d39ef77164bc, 0xae5dff9c02033197), // 5^-211 + (0xa8530886b54dbdeb, 0xd9f57f830283fdfc), // 5^-210 + (0xd267caa862a12d66, 0xd072df63c324fd7b), // 5^-209 + (0x8380dea93da4bc60, 0x4247cb9e59f71e6d), // 5^-208 + (0xa46116538d0deb78, 0x52d9be85f074e608), // 5^-207 + (0xcd795be870516656, 0x67902e276c921f8b), // 5^-206 + (0x806bd9714632dff6, 0xba1cd8a3db53b6), // 5^-205 + (0xa086cfcd97bf97f3, 0x80e8a40eccd228a4), // 5^-204 + (0xc8a883c0fdaf7df0, 0x6122cd128006b2cd), // 5^-203 + (0xfad2a4b13d1b5d6c, 0x796b805720085f81), // 5^-202 + (0x9cc3a6eec6311a63, 0xcbe3303674053bb0), // 5^-201 + (0xc3f490aa77bd60fc, 0xbedbfc4411068a9c), // 5^-200 + (0xf4f1b4d515acb93b, 0xee92fb5515482d44), // 5^-199 + (0x991711052d8bf3c5, 0x751bdd152d4d1c4a), // 5^-198 + (0xbf5cd54678eef0b6, 0xd262d45a78a0635d), // 5^-197 + (0xef340a98172aace4, 0x86fb897116c87c34), // 5^-196 + (0x9580869f0e7aac0e, 0xd45d35e6ae3d4da0), // 5^-195 + (0xbae0a846d2195712, 0x8974836059cca109), // 5^-194 + (0xe998d258869facd7, 0x2bd1a438703fc94b), // 5^-193 + (0x91ff83775423cc06, 0x7b6306a34627ddcf), // 5^-192 + (0xb67f6455292cbf08, 0x1a3bc84c17b1d542), // 5^-191 + (0xe41f3d6a7377eeca, 0x20caba5f1d9e4a93), // 5^-190 + (0x8e938662882af53e, 0x547eb47b7282ee9c), // 5^-189 + (0xb23867fb2a35b28d, 0xe99e619a4f23aa43), // 5^-188 + (0xdec681f9f4c31f31, 0x6405fa00e2ec94d4), // 5^-187 + (0x8b3c113c38f9f37e, 0xde83bc408dd3dd04), // 5^-186 + (0xae0b158b4738705e, 0x9624ab50b148d445), // 5^-185 + (0xd98ddaee19068c76, 0x3badd624dd9b0957), // 5^-184 + (0x87f8a8d4cfa417c9, 0xe54ca5d70a80e5d6), // 5^-183 + (0xa9f6d30a038d1dbc, 0x5e9fcf4ccd211f4c), // 5^-182 + (0xd47487cc8470652b, 0x7647c3200069671f), // 5^-181 + (0x84c8d4dfd2c63f3b, 0x29ecd9f40041e073), // 5^-180 + (0xa5fb0a17c777cf09, 0xf468107100525890), // 5^-179 + (0xcf79cc9db955c2cc, 0x7182148d4066eeb4), // 5^-178 + (0x81ac1fe293d599bf, 0xc6f14cd848405530), // 5^-177 + (0xa21727db38cb002f, 0xb8ada00e5a506a7c), // 5^-176 + (0xca9cf1d206fdc03b, 0xa6d90811f0e4851c), // 5^-175 + (0xfd442e4688bd304a, 0x908f4a166d1da663), // 5^-174 + (0x9e4a9cec15763e2e, 0x9a598e4e043287fe), // 5^-173 + (0xc5dd44271ad3cdba, 0x40eff1e1853f29fd), // 5^-172 + (0xf7549530e188c128, 0xd12bee59e68ef47c), // 5^-171 + (0x9a94dd3e8cf578b9, 0x82bb74f8301958ce), // 5^-170 + (0xc13a148e3032d6e7, 0xe36a52363c1faf01), // 5^-169 + (0xf18899b1bc3f8ca1, 0xdc44e6c3cb279ac1), // 5^-168 + (0x96f5600f15a7b7e5, 0x29ab103a5ef8c0b9), // 5^-167 + (0xbcb2b812db11a5de, 0x7415d448f6b6f0e7), // 5^-166 + (0xebdf661791d60f56, 0x111b495b3464ad21), // 5^-165 + (0x936b9fcebb25c995, 0xcab10dd900beec34), // 5^-164 + (0xb84687c269ef3bfb, 0x3d5d514f40eea742), // 5^-163 + (0xe65829b3046b0afa, 0xcb4a5a3112a5112), // 5^-162 + (0x8ff71a0fe2c2e6dc, 0x47f0e785eaba72ab), // 5^-161 + (0xb3f4e093db73a093, 0x59ed216765690f56), // 5^-160 + (0xe0f218b8d25088b8, 0x306869c13ec3532c), // 5^-159 + (0x8c974f7383725573, 0x1e414218c73a13fb), // 5^-158 + (0xafbd2350644eeacf, 0xe5d1929ef90898fa), // 5^-157 + (0xdbac6c247d62a583, 0xdf45f746b74abf39), // 5^-156 + (0x894bc396ce5da772, 0x6b8bba8c328eb783), // 5^-155 + (0xab9eb47c81f5114f, 0x66ea92f3f326564), // 5^-154 + (0xd686619ba27255a2, 0xc80a537b0efefebd), // 5^-153 + (0x8613fd0145877585, 0xbd06742ce95f5f36), // 5^-152 + (0xa798fc4196e952e7, 0x2c48113823b73704), // 5^-151 + (0xd17f3b51fca3a7a0, 0xf75a15862ca504c5), // 5^-150 + (0x82ef85133de648c4, 0x9a984d73dbe722fb), // 5^-149 + (0xa3ab66580d5fdaf5, 0xc13e60d0d2e0ebba), // 5^-148 + (0xcc963fee10b7d1b3, 0x318df905079926a8), // 5^-147 + (0xffbbcfe994e5c61f, 0xfdf17746497f7052), // 5^-146 + (0x9fd561f1fd0f9bd3, 0xfeb6ea8bedefa633), // 5^-145 + (0xc7caba6e7c5382c8, 0xfe64a52ee96b8fc0), // 5^-144 + (0xf9bd690a1b68637b, 0x3dfdce7aa3c673b0), // 5^-143 + (0x9c1661a651213e2d, 0x6bea10ca65c084e), // 5^-142 + (0xc31bfa0fe5698db8, 0x486e494fcff30a62), // 5^-141 + (0xf3e2f893dec3f126, 0x5a89dba3c3efccfa), // 5^-140 + (0x986ddb5c6b3a76b7, 0xf89629465a75e01c), // 5^-139 + (0xbe89523386091465, 0xf6bbb397f1135823), // 5^-138 + (0xee2ba6c0678b597f, 0x746aa07ded582e2c), // 5^-137 + (0x94db483840b717ef, 0xa8c2a44eb4571cdc), // 5^-136 + (0xba121a4650e4ddeb, 0x92f34d62616ce413), // 5^-135 + (0xe896a0d7e51e1566, 0x77b020baf9c81d17), // 5^-134 + (0x915e2486ef32cd60, 0xace1474dc1d122e), // 5^-133 + (0xb5b5ada8aaff80b8, 0xd819992132456ba), // 5^-132 + (0xe3231912d5bf60e6, 0x10e1fff697ed6c69), // 5^-131 + (0x8df5efabc5979c8f, 0xca8d3ffa1ef463c1), // 5^-130 + (0xb1736b96b6fd83b3, 0xbd308ff8a6b17cb2), // 5^-129 + (0xddd0467c64bce4a0, 0xac7cb3f6d05ddbde), // 5^-128 + (0x8aa22c0dbef60ee4, 0x6bcdf07a423aa96b), // 5^-127 + (0xad4ab7112eb3929d, 0x86c16c98d2c953c6), // 5^-126 + (0xd89d64d57a607744, 0xe871c7bf077ba8b7), // 5^-125 + (0x87625f056c7c4a8b, 0x11471cd764ad4972), // 5^-124 + (0xa93af6c6c79b5d2d, 0xd598e40d3dd89bcf), // 5^-123 + (0xd389b47879823479, 0x4aff1d108d4ec2c3), // 5^-122 + (0x843610cb4bf160cb, 0xcedf722a585139ba), // 5^-121 + (0xa54394fe1eedb8fe, 0xc2974eb4ee658828), // 5^-120 + (0xce947a3da6a9273e, 0x733d226229feea32), // 5^-119 + (0x811ccc668829b887, 0x806357d5a3f525f), // 5^-118 + (0xa163ff802a3426a8, 0xca07c2dcb0cf26f7), // 5^-117 + (0xc9bcff6034c13052, 0xfc89b393dd02f0b5), // 5^-116 + (0xfc2c3f3841f17c67, 0xbbac2078d443ace2), // 5^-115 + (0x9d9ba7832936edc0, 0xd54b944b84aa4c0d), // 5^-114 + (0xc5029163f384a931, 0xa9e795e65d4df11), // 5^-113 + (0xf64335bcf065d37d, 0x4d4617b5ff4a16d5), // 5^-112 + (0x99ea0196163fa42e, 0x504bced1bf8e4e45), // 5^-111 + (0xc06481fb9bcf8d39, 0xe45ec2862f71e1d6), // 5^-110 + (0xf07da27a82c37088, 0x5d767327bb4e5a4c), // 5^-109 + (0x964e858c91ba2655, 0x3a6a07f8d510f86f), // 5^-108 + (0xbbe226efb628afea, 0x890489f70a55368b), // 5^-107 + (0xeadab0aba3b2dbe5, 0x2b45ac74ccea842e), // 5^-106 + (0x92c8ae6b464fc96f, 0x3b0b8bc90012929d), // 5^-105 + (0xb77ada0617e3bbcb, 0x9ce6ebb40173744), // 5^-104 + (0xe55990879ddcaabd, 0xcc420a6a101d0515), // 5^-103 + (0x8f57fa54c2a9eab6, 0x9fa946824a12232d), // 5^-102 + (0xb32df8e9f3546564, 0x47939822dc96abf9), // 5^-101 + (0xdff9772470297ebd, 0x59787e2b93bc56f7), // 5^-100 + (0x8bfbea76c619ef36, 0x57eb4edb3c55b65a), // 5^-99 + (0xaefae51477a06b03, 0xede622920b6b23f1), // 5^-98 + (0xdab99e59958885c4, 0xe95fab368e45eced), // 5^-97 + (0x88b402f7fd75539b, 0x11dbcb0218ebb414), // 5^-96 + (0xaae103b5fcd2a881, 0xd652bdc29f26a119), // 5^-95 + (0xd59944a37c0752a2, 0x4be76d3346f0495f), // 5^-94 + (0x857fcae62d8493a5, 0x6f70a4400c562ddb), // 5^-93 + (0xa6dfbd9fb8e5b88e, 0xcb4ccd500f6bb952), // 5^-92 + (0xd097ad07a71f26b2, 0x7e2000a41346a7a7), // 5^-91 + (0x825ecc24c873782f, 0x8ed400668c0c28c8), // 5^-90 + (0xa2f67f2dfa90563b, 0x728900802f0f32fa), // 5^-89 + (0xcbb41ef979346bca, 0x4f2b40a03ad2ffb9), // 5^-88 + (0xfea126b7d78186bc, 0xe2f610c84987bfa8), // 5^-87 + (0x9f24b832e6b0f436, 0xdd9ca7d2df4d7c9), // 5^-86 + (0xc6ede63fa05d3143, 0x91503d1c79720dbb), // 5^-85 + (0xf8a95fcf88747d94, 0x75a44c6397ce912a), // 5^-84 + (0x9b69dbe1b548ce7c, 0xc986afbe3ee11aba), // 5^-83 + (0xc24452da229b021b, 0xfbe85badce996168), // 5^-82 + (0xf2d56790ab41c2a2, 0xfae27299423fb9c3), // 5^-81 + (0x97c560ba6b0919a5, 0xdccd879fc967d41a), // 5^-80 + (0xbdb6b8e905cb600f, 0x5400e987bbc1c920), // 5^-79 + (0xed246723473e3813, 0x290123e9aab23b68), // 5^-78 + (0x9436c0760c86e30b, 0xf9a0b6720aaf6521), // 5^-77 + (0xb94470938fa89bce, 0xf808e40e8d5b3e69), // 5^-76 + (0xe7958cb87392c2c2, 0xb60b1d1230b20e04), // 5^-75 + (0x90bd77f3483bb9b9, 0xb1c6f22b5e6f48c2), // 5^-74 + (0xb4ecd5f01a4aa828, 0x1e38aeb6360b1af3), // 5^-73 + (0xe2280b6c20dd5232, 0x25c6da63c38de1b0), // 5^-72 + (0x8d590723948a535f, 0x579c487e5a38ad0e), // 5^-71 + (0xb0af48ec79ace837, 0x2d835a9df0c6d851), // 5^-70 + (0xdcdb1b2798182244, 0xf8e431456cf88e65), // 5^-69 + (0x8a08f0f8bf0f156b, 0x1b8e9ecb641b58ff), // 5^-68 + (0xac8b2d36eed2dac5, 0xe272467e3d222f3f), // 5^-67 + (0xd7adf884aa879177, 0x5b0ed81dcc6abb0f), // 5^-66 + (0x86ccbb52ea94baea, 0x98e947129fc2b4e9), // 5^-65 + (0xa87fea27a539e9a5, 0x3f2398d747b36224), // 5^-64 + (0xd29fe4b18e88640e, 0x8eec7f0d19a03aad), // 5^-63 + (0x83a3eeeef9153e89, 0x1953cf68300424ac), // 5^-62 + (0xa48ceaaab75a8e2b, 0x5fa8c3423c052dd7), // 5^-61 + (0xcdb02555653131b6, 0x3792f412cb06794d), // 5^-60 + (0x808e17555f3ebf11, 0xe2bbd88bbee40bd0), // 5^-59 + (0xa0b19d2ab70e6ed6, 0x5b6aceaeae9d0ec4), // 5^-58 + (0xc8de047564d20a8b, 0xf245825a5a445275), // 5^-57 + (0xfb158592be068d2e, 0xeed6e2f0f0d56712), // 5^-56 + (0x9ced737bb6c4183d, 0x55464dd69685606b), // 5^-55 + (0xc428d05aa4751e4c, 0xaa97e14c3c26b886), // 5^-54 + (0xf53304714d9265df, 0xd53dd99f4b3066a8), // 5^-53 + (0x993fe2c6d07b7fab, 0xe546a8038efe4029), // 5^-52 + (0xbf8fdb78849a5f96, 0xde98520472bdd033), // 5^-51 + (0xef73d256a5c0f77c, 0x963e66858f6d4440), // 5^-50 + (0x95a8637627989aad, 0xdde7001379a44aa8), // 5^-49 + (0xbb127c53b17ec159, 0x5560c018580d5d52), // 5^-48 + (0xe9d71b689dde71af, 0xaab8f01e6e10b4a6), // 5^-47 + (0x9226712162ab070d, 0xcab3961304ca70e8), // 5^-46 + (0xb6b00d69bb55c8d1, 0x3d607b97c5fd0d22), // 5^-45 + (0xe45c10c42a2b3b05, 0x8cb89a7db77c506a), // 5^-44 + (0x8eb98a7a9a5b04e3, 0x77f3608e92adb242), // 5^-43 + (0xb267ed1940f1c61c, 0x55f038b237591ed3), // 5^-42 + (0xdf01e85f912e37a3, 0x6b6c46dec52f6688), // 5^-41 + (0x8b61313bbabce2c6, 0x2323ac4b3b3da015), // 5^-40 + (0xae397d8aa96c1b77, 0xabec975e0a0d081a), // 5^-39 + (0xd9c7dced53c72255, 0x96e7bd358c904a21), // 5^-38 + (0x881cea14545c7575, 0x7e50d64177da2e54), // 5^-37 + (0xaa242499697392d2, 0xdde50bd1d5d0b9e9), // 5^-36 + (0xd4ad2dbfc3d07787, 0x955e4ec64b44e864), // 5^-35 + (0x84ec3c97da624ab4, 0xbd5af13bef0b113e), // 5^-34 + (0xa6274bbdd0fadd61, 0xecb1ad8aeacdd58e), // 5^-33 + (0xcfb11ead453994ba, 0x67de18eda5814af2), // 5^-32 + (0x81ceb32c4b43fcf4, 0x80eacf948770ced7), // 5^-31 + (0xa2425ff75e14fc31, 0xa1258379a94d028d), // 5^-30 + (0xcad2f7f5359a3b3e, 0x96ee45813a04330), // 5^-29 + (0xfd87b5f28300ca0d, 0x8bca9d6e188853fc), // 5^-28 + (0x9e74d1b791e07e48, 0x775ea264cf55347e), // 5^-27 + (0xc612062576589dda, 0x95364afe032a819e), // 5^-26 + (0xf79687aed3eec551, 0x3a83ddbd83f52205), // 5^-25 + (0x9abe14cd44753b52, 0xc4926a9672793543), // 5^-24 + (0xc16d9a0095928a27, 0x75b7053c0f178294), // 5^-23 + (0xf1c90080baf72cb1, 0x5324c68b12dd6339), // 5^-22 + (0x971da05074da7bee, 0xd3f6fc16ebca5e04), // 5^-21 + (0xbce5086492111aea, 0x88f4bb1ca6bcf585), // 5^-20 + (0xec1e4a7db69561a5, 0x2b31e9e3d06c32e6), // 5^-19 + (0x9392ee8e921d5d07, 0x3aff322e62439fd0), // 5^-18 + (0xb877aa3236a4b449, 0x9befeb9fad487c3), // 5^-17 + (0xe69594bec44de15b, 0x4c2ebe687989a9b4), // 5^-16 + (0x901d7cf73ab0acd9, 0xf9d37014bf60a11), // 5^-15 + (0xb424dc35095cd80f, 0x538484c19ef38c95), // 5^-14 + (0xe12e13424bb40e13, 0x2865a5f206b06fba), // 5^-13 + (0x8cbccc096f5088cb, 0xf93f87b7442e45d4), // 5^-12 + (0xafebff0bcb24aafe, 0xf78f69a51539d749), // 5^-11 + (0xdbe6fecebdedd5be, 0xb573440e5a884d1c), // 5^-10 + (0x89705f4136b4a597, 0x31680a88f8953031), // 5^-9 + (0xabcc77118461cefc, 0xfdc20d2b36ba7c3e), // 5^-8 + (0xd6bf94d5e57a42bc, 0x3d32907604691b4d), // 5^-7 + (0x8637bd05af6c69b5, 0xa63f9a49c2c1b110), // 5^-6 + (0xa7c5ac471b478423, 0xfcf80dc33721d54), // 5^-5 + (0xd1b71758e219652b, 0xd3c36113404ea4a9), // 5^-4 + (0x83126e978d4fdf3b, 0x645a1cac083126ea), // 5^-3 + (0xa3d70a3d70a3d70a, 0x3d70a3d70a3d70a4), // 5^-2 + (0xcccccccccccccccc, 0xcccccccccccccccd), // 5^-1 + (0x8000000000000000, 0x0), // 5^0 + (0xa000000000000000, 0x0), // 5^1 + (0xc800000000000000, 0x0), // 5^2 + (0xfa00000000000000, 0x0), // 5^3 + (0x9c40000000000000, 0x0), // 5^4 + (0xc350000000000000, 0x0), // 5^5 + (0xf424000000000000, 0x0), // 5^6 + (0x9896800000000000, 0x0), // 5^7 + (0xbebc200000000000, 0x0), // 5^8 + (0xee6b280000000000, 0x0), // 5^9 + (0x9502f90000000000, 0x0), // 5^10 + (0xba43b74000000000, 0x0), // 5^11 + (0xe8d4a51000000000, 0x0), // 5^12 + (0x9184e72a00000000, 0x0), // 5^13 + (0xb5e620f480000000, 0x0), // 5^14 + (0xe35fa931a0000000, 0x0), // 5^15 + (0x8e1bc9bf04000000, 0x0), // 5^16 + (0xb1a2bc2ec5000000, 0x0), // 5^17 + (0xde0b6b3a76400000, 0x0), // 5^18 + (0x8ac7230489e80000, 0x0), // 5^19 + (0xad78ebc5ac620000, 0x0), // 5^20 + (0xd8d726b7177a8000, 0x0), // 5^21 + (0x878678326eac9000, 0x0), // 5^22 + (0xa968163f0a57b400, 0x0), // 5^23 + (0xd3c21bcecceda100, 0x0), // 5^24 + (0x84595161401484a0, 0x0), // 5^25 + (0xa56fa5b99019a5c8, 0x0), // 5^26 + (0xcecb8f27f4200f3a, 0x0), // 5^27 + (0x813f3978f8940984, 0x4000000000000000), // 5^28 + (0xa18f07d736b90be5, 0x5000000000000000), // 5^29 + (0xc9f2c9cd04674ede, 0xa400000000000000), // 5^30 + (0xfc6f7c4045812296, 0x4d00000000000000), // 5^31 + (0x9dc5ada82b70b59d, 0xf020000000000000), // 5^32 + (0xc5371912364ce305, 0x6c28000000000000), // 5^33 + (0xf684df56c3e01bc6, 0xc732000000000000), // 5^34 + (0x9a130b963a6c115c, 0x3c7f400000000000), // 5^35 + (0xc097ce7bc90715b3, 0x4b9f100000000000), // 5^36 + (0xf0bdc21abb48db20, 0x1e86d40000000000), // 5^37 + (0x96769950b50d88f4, 0x1314448000000000), // 5^38 + (0xbc143fa4e250eb31, 0x17d955a000000000), // 5^39 + (0xeb194f8e1ae525fd, 0x5dcfab0800000000), // 5^40 + (0x92efd1b8d0cf37be, 0x5aa1cae500000000), // 5^41 + (0xb7abc627050305ad, 0xf14a3d9e40000000), // 5^42 + (0xe596b7b0c643c719, 0x6d9ccd05d0000000), // 5^43 + (0x8f7e32ce7bea5c6f, 0xe4820023a2000000), // 5^44 + (0xb35dbf821ae4f38b, 0xdda2802c8a800000), // 5^45 + (0xe0352f62a19e306e, 0xd50b2037ad200000), // 5^46 + (0x8c213d9da502de45, 0x4526f422cc340000), // 5^47 + (0xaf298d050e4395d6, 0x9670b12b7f410000), // 5^48 + (0xdaf3f04651d47b4c, 0x3c0cdd765f114000), // 5^49 + (0x88d8762bf324cd0f, 0xa5880a69fb6ac800), // 5^50 + (0xab0e93b6efee0053, 0x8eea0d047a457a00), // 5^51 + (0xd5d238a4abe98068, 0x72a4904598d6d880), // 5^52 + (0x85a36366eb71f041, 0x47a6da2b7f864750), // 5^53 + (0xa70c3c40a64e6c51, 0x999090b65f67d924), // 5^54 + (0xd0cf4b50cfe20765, 0xfff4b4e3f741cf6d), // 5^55 + (0x82818f1281ed449f, 0xbff8f10e7a8921a4), // 5^56 + (0xa321f2d7226895c7, 0xaff72d52192b6a0d), // 5^57 + (0xcbea6f8ceb02bb39, 0x9bf4f8a69f764490), // 5^58 + (0xfee50b7025c36a08, 0x2f236d04753d5b4), // 5^59 + (0x9f4f2726179a2245, 0x1d762422c946590), // 5^60 + (0xc722f0ef9d80aad6, 0x424d3ad2b7b97ef5), // 5^61 + (0xf8ebad2b84e0d58b, 0xd2e0898765a7deb2), // 5^62 + (0x9b934c3b330c8577, 0x63cc55f49f88eb2f), // 5^63 + (0xc2781f49ffcfa6d5, 0x3cbf6b71c76b25fb), // 5^64 + (0xf316271c7fc3908a, 0x8bef464e3945ef7a), // 5^65 + (0x97edd871cfda3a56, 0x97758bf0e3cbb5ac), // 5^66 + (0xbde94e8e43d0c8ec, 0x3d52eeed1cbea317), // 5^67 + (0xed63a231d4c4fb27, 0x4ca7aaa863ee4bdd), // 5^68 + (0x945e455f24fb1cf8, 0x8fe8caa93e74ef6a), // 5^69 + (0xb975d6b6ee39e436, 0xb3e2fd538e122b44), // 5^70 + (0xe7d34c64a9c85d44, 0x60dbbca87196b616), // 5^71 + (0x90e40fbeea1d3a4a, 0xbc8955e946fe31cd), // 5^72 + (0xb51d13aea4a488dd, 0x6babab6398bdbe41), // 5^73 + (0xe264589a4dcdab14, 0xc696963c7eed2dd1), // 5^74 + (0x8d7eb76070a08aec, 0xfc1e1de5cf543ca2), // 5^75 + (0xb0de65388cc8ada8, 0x3b25a55f43294bcb), // 5^76 + (0xdd15fe86affad912, 0x49ef0eb713f39ebe), // 5^77 + (0x8a2dbf142dfcc7ab, 0x6e3569326c784337), // 5^78 + (0xacb92ed9397bf996, 0x49c2c37f07965404), // 5^79 + (0xd7e77a8f87daf7fb, 0xdc33745ec97be906), // 5^80 + (0x86f0ac99b4e8dafd, 0x69a028bb3ded71a3), // 5^81 + (0xa8acd7c0222311bc, 0xc40832ea0d68ce0c), // 5^82 + (0xd2d80db02aabd62b, 0xf50a3fa490c30190), // 5^83 + (0x83c7088e1aab65db, 0x792667c6da79e0fa), // 5^84 + (0xa4b8cab1a1563f52, 0x577001b891185938), // 5^85 + (0xcde6fd5e09abcf26, 0xed4c0226b55e6f86), // 5^86 + (0x80b05e5ac60b6178, 0x544f8158315b05b4), // 5^87 + (0xa0dc75f1778e39d6, 0x696361ae3db1c721), // 5^88 + (0xc913936dd571c84c, 0x3bc3a19cd1e38e9), // 5^89 + (0xfb5878494ace3a5f, 0x4ab48a04065c723), // 5^90 + (0x9d174b2dcec0e47b, 0x62eb0d64283f9c76), // 5^91 + (0xc45d1df942711d9a, 0x3ba5d0bd324f8394), // 5^92 + (0xf5746577930d6500, 0xca8f44ec7ee36479), // 5^93 + (0x9968bf6abbe85f20, 0x7e998b13cf4e1ecb), // 5^94 + (0xbfc2ef456ae276e8, 0x9e3fedd8c321a67e), // 5^95 + (0xefb3ab16c59b14a2, 0xc5cfe94ef3ea101e), // 5^96 + (0x95d04aee3b80ece5, 0xbba1f1d158724a12), // 5^97 + (0xbb445da9ca61281f, 0x2a8a6e45ae8edc97), // 5^98 + (0xea1575143cf97226, 0xf52d09d71a3293bd), // 5^99 + (0x924d692ca61be758, 0x593c2626705f9c56), // 5^100 + (0xb6e0c377cfa2e12e, 0x6f8b2fb00c77836c), // 5^101 + (0xe498f455c38b997a, 0xb6dfb9c0f956447), // 5^102 + (0x8edf98b59a373fec, 0x4724bd4189bd5eac), // 5^103 + (0xb2977ee300c50fe7, 0x58edec91ec2cb657), // 5^104 + (0xdf3d5e9bc0f653e1, 0x2f2967b66737e3ed), // 5^105 + (0x8b865b215899f46c, 0xbd79e0d20082ee74), // 5^106 + (0xae67f1e9aec07187, 0xecd8590680a3aa11), // 5^107 + (0xda01ee641a708de9, 0xe80e6f4820cc9495), // 5^108 + (0x884134fe908658b2, 0x3109058d147fdcdd), // 5^109 + (0xaa51823e34a7eede, 0xbd4b46f0599fd415), // 5^110 + (0xd4e5e2cdc1d1ea96, 0x6c9e18ac7007c91a), // 5^111 + (0x850fadc09923329e, 0x3e2cf6bc604ddb0), // 5^112 + (0xa6539930bf6bff45, 0x84db8346b786151c), // 5^113 + (0xcfe87f7cef46ff16, 0xe612641865679a63), // 5^114 + (0x81f14fae158c5f6e, 0x4fcb7e8f3f60c07e), // 5^115 + (0xa26da3999aef7749, 0xe3be5e330f38f09d), // 5^116 + (0xcb090c8001ab551c, 0x5cadf5bfd3072cc5), // 5^117 + (0xfdcb4fa002162a63, 0x73d9732fc7c8f7f6), // 5^118 + (0x9e9f11c4014dda7e, 0x2867e7fddcdd9afa), // 5^119 + (0xc646d63501a1511d, 0xb281e1fd541501b8), // 5^120 + (0xf7d88bc24209a565, 0x1f225a7ca91a4226), // 5^121 + (0x9ae757596946075f, 0x3375788de9b06958), // 5^122 + (0xc1a12d2fc3978937, 0x52d6b1641c83ae), // 5^123 + (0xf209787bb47d6b84, 0xc0678c5dbd23a49a), // 5^124 + (0x9745eb4d50ce6332, 0xf840b7ba963646e0), // 5^125 + (0xbd176620a501fbff, 0xb650e5a93bc3d898), // 5^126 + (0xec5d3fa8ce427aff, 0xa3e51f138ab4cebe), // 5^127 + (0x93ba47c980e98cdf, 0xc66f336c36b10137), // 5^128 + (0xb8a8d9bbe123f017, 0xb80b0047445d4184), // 5^129 + (0xe6d3102ad96cec1d, 0xa60dc059157491e5), // 5^130 + (0x9043ea1ac7e41392, 0x87c89837ad68db2f), // 5^131 + (0xb454e4a179dd1877, 0x29babe4598c311fb), // 5^132 + (0xe16a1dc9d8545e94, 0xf4296dd6fef3d67a), // 5^133 + (0x8ce2529e2734bb1d, 0x1899e4a65f58660c), // 5^134 + (0xb01ae745b101e9e4, 0x5ec05dcff72e7f8f), // 5^135 + (0xdc21a1171d42645d, 0x76707543f4fa1f73), // 5^136 + (0x899504ae72497eba, 0x6a06494a791c53a8), // 5^137 + (0xabfa45da0edbde69, 0x487db9d17636892), // 5^138 + (0xd6f8d7509292d603, 0x45a9d2845d3c42b6), // 5^139 + (0x865b86925b9bc5c2, 0xb8a2392ba45a9b2), // 5^140 + (0xa7f26836f282b732, 0x8e6cac7768d7141e), // 5^141 + (0xd1ef0244af2364ff, 0x3207d795430cd926), // 5^142 + (0x8335616aed761f1f, 0x7f44e6bd49e807b8), // 5^143 + (0xa402b9c5a8d3a6e7, 0x5f16206c9c6209a6), // 5^144 + (0xcd036837130890a1, 0x36dba887c37a8c0f), // 5^145 + (0x802221226be55a64, 0xc2494954da2c9789), // 5^146 + (0xa02aa96b06deb0fd, 0xf2db9baa10b7bd6c), // 5^147 + (0xc83553c5c8965d3d, 0x6f92829494e5acc7), // 5^148 + (0xfa42a8b73abbf48c, 0xcb772339ba1f17f9), // 5^149 + (0x9c69a97284b578d7, 0xff2a760414536efb), // 5^150 + (0xc38413cf25e2d70d, 0xfef5138519684aba), // 5^151 + (0xf46518c2ef5b8cd1, 0x7eb258665fc25d69), // 5^152 + (0x98bf2f79d5993802, 0xef2f773ffbd97a61), // 5^153 + (0xbeeefb584aff8603, 0xaafb550ffacfd8fa), // 5^154 + (0xeeaaba2e5dbf6784, 0x95ba2a53f983cf38), // 5^155 + (0x952ab45cfa97a0b2, 0xdd945a747bf26183), // 5^156 + (0xba756174393d88df, 0x94f971119aeef9e4), // 5^157 + (0xe912b9d1478ceb17, 0x7a37cd5601aab85d), // 5^158 + (0x91abb422ccb812ee, 0xac62e055c10ab33a), // 5^159 + (0xb616a12b7fe617aa, 0x577b986b314d6009), // 5^160 + (0xe39c49765fdf9d94, 0xed5a7e85fda0b80b), // 5^161 + (0x8e41ade9fbebc27d, 0x14588f13be847307), // 5^162 + (0xb1d219647ae6b31c, 0x596eb2d8ae258fc8), // 5^163 + (0xde469fbd99a05fe3, 0x6fca5f8ed9aef3bb), // 5^164 + (0x8aec23d680043bee, 0x25de7bb9480d5854), // 5^165 + (0xada72ccc20054ae9, 0xaf561aa79a10ae6a), // 5^166 + (0xd910f7ff28069da4, 0x1b2ba1518094da04), // 5^167 + (0x87aa9aff79042286, 0x90fb44d2f05d0842), // 5^168 + (0xa99541bf57452b28, 0x353a1607ac744a53), // 5^169 + (0xd3fa922f2d1675f2, 0x42889b8997915ce8), // 5^170 + (0x847c9b5d7c2e09b7, 0x69956135febada11), // 5^171 + (0xa59bc234db398c25, 0x43fab9837e699095), // 5^172 + (0xcf02b2c21207ef2e, 0x94f967e45e03f4bb), // 5^173 + (0x8161afb94b44f57d, 0x1d1be0eebac278f5), // 5^174 + (0xa1ba1ba79e1632dc, 0x6462d92a69731732), // 5^175 + (0xca28a291859bbf93, 0x7d7b8f7503cfdcfe), // 5^176 + (0xfcb2cb35e702af78, 0x5cda735244c3d43e), // 5^177 + (0x9defbf01b061adab, 0x3a0888136afa64a7), // 5^178 + (0xc56baec21c7a1916, 0x88aaa1845b8fdd0), // 5^179 + (0xf6c69a72a3989f5b, 0x8aad549e57273d45), // 5^180 + (0x9a3c2087a63f6399, 0x36ac54e2f678864b), // 5^181 + (0xc0cb28a98fcf3c7f, 0x84576a1bb416a7dd), // 5^182 + (0xf0fdf2d3f3c30b9f, 0x656d44a2a11c51d5), // 5^183 + (0x969eb7c47859e743, 0x9f644ae5a4b1b325), // 5^184 + (0xbc4665b596706114, 0x873d5d9f0dde1fee), // 5^185 + (0xeb57ff22fc0c7959, 0xa90cb506d155a7ea), // 5^186 + (0x9316ff75dd87cbd8, 0x9a7f12442d588f2), // 5^187 + (0xb7dcbf5354e9bece, 0xc11ed6d538aeb2f), // 5^188 + (0xe5d3ef282a242e81, 0x8f1668c8a86da5fa), // 5^189 + (0x8fa475791a569d10, 0xf96e017d694487bc), // 5^190 + (0xb38d92d760ec4455, 0x37c981dcc395a9ac), // 5^191 + (0xe070f78d3927556a, 0x85bbe253f47b1417), // 5^192 + (0x8c469ab843b89562, 0x93956d7478ccec8e), // 5^193 + (0xaf58416654a6babb, 0x387ac8d1970027b2), // 5^194 + (0xdb2e51bfe9d0696a, 0x6997b05fcc0319e), // 5^195 + (0x88fcf317f22241e2, 0x441fece3bdf81f03), // 5^196 + (0xab3c2fddeeaad25a, 0xd527e81cad7626c3), // 5^197 + (0xd60b3bd56a5586f1, 0x8a71e223d8d3b074), // 5^198 + (0x85c7056562757456, 0xf6872d5667844e49), // 5^199 + (0xa738c6bebb12d16c, 0xb428f8ac016561db), // 5^200 + (0xd106f86e69d785c7, 0xe13336d701beba52), // 5^201 + (0x82a45b450226b39c, 0xecc0024661173473), // 5^202 + (0xa34d721642b06084, 0x27f002d7f95d0190), // 5^203 + (0xcc20ce9bd35c78a5, 0x31ec038df7b441f4), // 5^204 + (0xff290242c83396ce, 0x7e67047175a15271), // 5^205 + (0x9f79a169bd203e41, 0xf0062c6e984d386), // 5^206 + (0xc75809c42c684dd1, 0x52c07b78a3e60868), // 5^207 + (0xf92e0c3537826145, 0xa7709a56ccdf8a82), // 5^208 + (0x9bbcc7a142b17ccb, 0x88a66076400bb691), // 5^209 + (0xc2abf989935ddbfe, 0x6acff893d00ea435), // 5^210 + (0xf356f7ebf83552fe, 0x583f6b8c4124d43), // 5^211 + (0x98165af37b2153de, 0xc3727a337a8b704a), // 5^212 + (0xbe1bf1b059e9a8d6, 0x744f18c0592e4c5c), // 5^213 + (0xeda2ee1c7064130c, 0x1162def06f79df73), // 5^214 + (0x9485d4d1c63e8be7, 0x8addcb5645ac2ba8), // 5^215 + (0xb9a74a0637ce2ee1, 0x6d953e2bd7173692), // 5^216 + (0xe8111c87c5c1ba99, 0xc8fa8db6ccdd0437), // 5^217 + (0x910ab1d4db9914a0, 0x1d9c9892400a22a2), // 5^218 + (0xb54d5e4a127f59c8, 0x2503beb6d00cab4b), // 5^219 + (0xe2a0b5dc971f303a, 0x2e44ae64840fd61d), // 5^220 + (0x8da471a9de737e24, 0x5ceaecfed289e5d2), // 5^221 + (0xb10d8e1456105dad, 0x7425a83e872c5f47), // 5^222 + (0xdd50f1996b947518, 0xd12f124e28f77719), // 5^223 + (0x8a5296ffe33cc92f, 0x82bd6b70d99aaa6f), // 5^224 + (0xace73cbfdc0bfb7b, 0x636cc64d1001550b), // 5^225 + (0xd8210befd30efa5a, 0x3c47f7e05401aa4e), // 5^226 + (0x8714a775e3e95c78, 0x65acfaec34810a71), // 5^227 + (0xa8d9d1535ce3b396, 0x7f1839a741a14d0d), // 5^228 + (0xd31045a8341ca07c, 0x1ede48111209a050), // 5^229 + (0x83ea2b892091e44d, 0x934aed0aab460432), // 5^230 + (0xa4e4b66b68b65d60, 0xf81da84d5617853f), // 5^231 + (0xce1de40642e3f4b9, 0x36251260ab9d668e), // 5^232 + (0x80d2ae83e9ce78f3, 0xc1d72b7c6b426019), // 5^233 + (0xa1075a24e4421730, 0xb24cf65b8612f81f), // 5^234 + (0xc94930ae1d529cfc, 0xdee033f26797b627), // 5^235 + (0xfb9b7cd9a4a7443c, 0x169840ef017da3b1), // 5^236 + (0x9d412e0806e88aa5, 0x8e1f289560ee864e), // 5^237 + (0xc491798a08a2ad4e, 0xf1a6f2bab92a27e2), // 5^238 + (0xf5b5d7ec8acb58a2, 0xae10af696774b1db), // 5^239 + (0x9991a6f3d6bf1765, 0xacca6da1e0a8ef29), // 5^240 + (0xbff610b0cc6edd3f, 0x17fd090a58d32af3), // 5^241 + (0xeff394dcff8a948e, 0xddfc4b4cef07f5b0), // 5^242 + (0x95f83d0a1fb69cd9, 0x4abdaf101564f98e), // 5^243 + (0xbb764c4ca7a4440f, 0x9d6d1ad41abe37f1), // 5^244 + (0xea53df5fd18d5513, 0x84c86189216dc5ed), // 5^245 + (0x92746b9be2f8552c, 0x32fd3cf5b4e49bb4), // 5^246 + (0xb7118682dbb66a77, 0x3fbc8c33221dc2a1), // 5^247 + (0xe4d5e82392a40515, 0xfabaf3feaa5334a), // 5^248 + (0x8f05b1163ba6832d, 0x29cb4d87f2a7400e), // 5^249 + (0xb2c71d5bca9023f8, 0x743e20e9ef511012), // 5^250 + (0xdf78e4b2bd342cf6, 0x914da9246b255416), // 5^251 + (0x8bab8eefb6409c1a, 0x1ad089b6c2f7548e), // 5^252 + (0xae9672aba3d0c320, 0xa184ac2473b529b1), // 5^253 + (0xda3c0f568cc4f3e8, 0xc9e5d72d90a2741e), // 5^254 + (0x8865899617fb1871, 0x7e2fa67c7a658892), // 5^255 + (0xaa7eebfb9df9de8d, 0xddbb901b98feeab7), // 5^256 + (0xd51ea6fa85785631, 0x552a74227f3ea565), // 5^257 + (0x8533285c936b35de, 0xd53a88958f87275f), // 5^258 + (0xa67ff273b8460356, 0x8a892abaf368f137), // 5^259 + (0xd01fef10a657842c, 0x2d2b7569b0432d85), // 5^260 + (0x8213f56a67f6b29b, 0x9c3b29620e29fc73), // 5^261 + (0xa298f2c501f45f42, 0x8349f3ba91b47b8f), // 5^262 + (0xcb3f2f7642717713, 0x241c70a936219a73), // 5^263 + (0xfe0efb53d30dd4d7, 0xed238cd383aa0110), // 5^264 + (0x9ec95d1463e8a506, 0xf4363804324a40aa), // 5^265 + (0xc67bb4597ce2ce48, 0xb143c6053edcd0d5), // 5^266 + (0xf81aa16fdc1b81da, 0xdd94b7868e94050a), // 5^267 + (0x9b10a4e5e9913128, 0xca7cf2b4191c8326), // 5^268 + (0xc1d4ce1f63f57d72, 0xfd1c2f611f63a3f0), // 5^269 + (0xf24a01a73cf2dccf, 0xbc633b39673c8cec), // 5^270 + (0x976e41088617ca01, 0xd5be0503e085d813), // 5^271 + (0xbd49d14aa79dbc82, 0x4b2d8644d8a74e18), // 5^272 + (0xec9c459d51852ba2, 0xddf8e7d60ed1219e), // 5^273 + (0x93e1ab8252f33b45, 0xcabb90e5c942b503), // 5^274 + (0xb8da1662e7b00a17, 0x3d6a751f3b936243), // 5^275 + (0xe7109bfba19c0c9d, 0xcc512670a783ad4), // 5^276 + (0x906a617d450187e2, 0x27fb2b80668b24c5), // 5^277 + (0xb484f9dc9641e9da, 0xb1f9f660802dedf6), // 5^278 + (0xe1a63853bbd26451, 0x5e7873f8a0396973), // 5^279 + (0x8d07e33455637eb2, 0xdb0b487b6423e1e8), // 5^280 + (0xb049dc016abc5e5f, 0x91ce1a9a3d2cda62), // 5^281 + (0xdc5c5301c56b75f7, 0x7641a140cc7810fb), // 5^282 + (0x89b9b3e11b6329ba, 0xa9e904c87fcb0a9d), // 5^283 + (0xac2820d9623bf429, 0x546345fa9fbdcd44), // 5^284 + (0xd732290fbacaf133, 0xa97c177947ad4095), // 5^285 + (0x867f59a9d4bed6c0, 0x49ed8eabcccc485d), // 5^286 + (0xa81f301449ee8c70, 0x5c68f256bfff5a74), // 5^287 + (0xd226fc195c6a2f8c, 0x73832eec6fff3111), // 5^288 + (0x83585d8fd9c25db7, 0xc831fd53c5ff7eab), // 5^289 + (0xa42e74f3d032f525, 0xba3e7ca8b77f5e55), // 5^290 + (0xcd3a1230c43fb26f, 0x28ce1bd2e55f35eb), // 5^291 + (0x80444b5e7aa7cf85, 0x7980d163cf5b81b3), // 5^292 + (0xa0555e361951c366, 0xd7e105bcc332621f), // 5^293 + (0xc86ab5c39fa63440, 0x8dd9472bf3fefaa7), // 5^294 + (0xfa856334878fc150, 0xb14f98f6f0feb951), // 5^295 + (0x9c935e00d4b9d8d2, 0x6ed1bf9a569f33d3), // 5^296 + (0xc3b8358109e84f07, 0xa862f80ec4700c8), // 5^297 + (0xf4a642e14c6262c8, 0xcd27bb612758c0fa), // 5^298 + (0x98e7e9cccfbd7dbd, 0x8038d51cb897789c), // 5^299 + (0xbf21e44003acdd2c, 0xe0470a63e6bd56c3), // 5^300 + (0xeeea5d5004981478, 0x1858ccfce06cac74), // 5^301 + (0x95527a5202df0ccb, 0xf37801e0c43ebc8), // 5^302 + (0xbaa718e68396cffd, 0xd30560258f54e6ba), // 5^303 + (0xe950df20247c83fd, 0x47c6b82ef32a2069), // 5^304 + (0x91d28b7416cdd27e, 0x4cdc331d57fa5441), // 5^305 + (0xb6472e511c81471d, 0xe0133fe4adf8e952), // 5^306 + (0xe3d8f9e563a198e5, 0x58180fddd97723a6), // 5^307 + (0x8e679c2f5e44ff8f, 0x570f09eaa7ea7648), // 5^308 +]; diff --git a/third_party/rust/minimal-lexical/src/table_small.rs b/third_party/rust/minimal-lexical/src/table_small.rs new file mode 100644 index 0000000000..9da69916fb --- /dev/null +++ b/third_party/rust/minimal-lexical/src/table_small.rs @@ -0,0 +1,90 @@ +//! Pre-computed small tables for parsing decimal strings. + +#![doc(hidden)] +#![cfg(not(feature = "compact"))] + +/// Pre-computed, small powers-of-5. +pub const SMALL_INT_POW5: [u64; 28] = [ + 1, + 5, + 25, + 125, + 625, + 3125, + 15625, + 78125, + 390625, + 1953125, + 9765625, + 48828125, + 244140625, + 1220703125, + 6103515625, + 30517578125, + 152587890625, + 762939453125, + 3814697265625, + 19073486328125, + 95367431640625, + 476837158203125, + 2384185791015625, + 11920928955078125, + 59604644775390625, + 298023223876953125, + 1490116119384765625, + 7450580596923828125, +]; + +/// Pre-computed, small powers-of-10. +pub const SMALL_INT_POW10: [u64; 20] = [ + 1, + 10, + 100, + 1000, + 10000, + 100000, + 1000000, + 10000000, + 100000000, + 1000000000, + 10000000000, + 100000000000, + 1000000000000, + 10000000000000, + 100000000000000, + 1000000000000000, + 10000000000000000, + 100000000000000000, + 1000000000000000000, + 10000000000000000000, +]; + +/// Pre-computed, small powers-of-10. +pub const SMALL_F32_POW10: [f32; 16] = + [1e0, 1e1, 1e2, 1e3, 1e4, 1e5, 1e6, 1e7, 1e8, 1e9, 1e10, 0., 0., 0., 0., 0.]; + +/// Pre-computed, small powers-of-10. +pub const SMALL_F64_POW10: [f64; 32] = [ + 1e0, 1e1, 1e2, 1e3, 1e4, 1e5, 1e6, 1e7, 1e8, 1e9, 1e10, 1e11, 1e12, 1e13, 1e14, 1e15, 1e16, + 1e17, 1e18, 1e19, 1e20, 1e21, 1e22, 0., 0., 0., 0., 0., 0., 0., 0., 0., +]; + +/// Pre-computed large power-of-5 for 32-bit limbs. +#[cfg(not(all(target_pointer_width = "64", not(target_arch = "sparc"))))] +pub const LARGE_POW5: [u32; 10] = [ + 4279965485, 329373468, 4020270615, 2137533757, 4287402176, 1057042919, 1071430142, 2440757623, + 381945767, 46164893, +]; + +/// Pre-computed large power-of-5 for 64-bit limbs. +#[cfg(all(target_pointer_width = "64", not(target_arch = "sparc")))] +pub const LARGE_POW5: [u64; 5] = [ + 1414648277510068013, + 9180637584431281687, + 4539964771860779200, + 10482974169319127550, + 198276706040285095, +]; + +/// Step for large power-of-5 for 32-bit limbs. +pub const LARGE_POW5_STEP: u32 = 135; diff --git a/third_party/rust/minimal-lexical/tests/bellerophon.rs b/third_party/rust/minimal-lexical/tests/bellerophon.rs new file mode 100644 index 0000000000..99cd89acfc --- /dev/null +++ b/third_party/rust/minimal-lexical/tests/bellerophon.rs @@ -0,0 +1,59 @@ +#![cfg(feature = "compact")] +#![allow(dead_code)] + +use minimal_lexical::bellerophon::bellerophon; +use minimal_lexical::extended_float::{extended_to_float, ExtendedFloat}; +use minimal_lexical::num::Float; +use minimal_lexical::number::Number; + +pub fn bellerophon_test( + xmant: u64, + xexp: i32, + many_digits: bool, + ymant: u64, + yexp: i32, +) { + let num = Number { + exponent: xexp, + mantissa: xmant, + many_digits, + }; + let xfp = bellerophon::(&num); + let yfp = ExtendedFloat { + mant: ymant, + exp: yexp, + }; + // Given us useful error messages if the floats are valid. + if xfp.exp >= 0 && yfp.exp >= 0 { + assert!( + xfp == yfp, + "x != y, xfp={:?}, yfp={:?}, x={:?}, y={:?}", + xfp, + yfp, + extended_to_float::(xfp), + extended_to_float::(yfp) + ); + } else { + assert_eq!(xfp, yfp); + } +} + +pub fn compute_float32(q: i32, w: u64) -> (i32, u64) { + let num = Number { + exponent: q, + mantissa: w, + many_digits: false, + }; + let fp = bellerophon::(&num); + (fp.exp, fp.mant) +} + +pub fn compute_float64(q: i32, w: u64) -> (i32, u64) { + let num = Number { + exponent: q, + mantissa: w, + many_digits: false, + }; + let fp = bellerophon::(&num); + (fp.exp, fp.mant) +} diff --git a/third_party/rust/minimal-lexical/tests/bellerophon_tests.rs b/third_party/rust/minimal-lexical/tests/bellerophon_tests.rs new file mode 100644 index 0000000000..f5826c615d --- /dev/null +++ b/third_party/rust/minimal-lexical/tests/bellerophon_tests.rs @@ -0,0 +1,231 @@ +#![cfg(feature = "compact")] + +mod bellerophon; + +use bellerophon::{bellerophon_test, compute_float32, compute_float64}; +use minimal_lexical::num::Float; + +#[test] +fn halfway_round_down_test() { + // Halfway, round-down tests + bellerophon_test::(9007199254740992, 0, false, 0, 1076); + bellerophon_test::( + 9007199254740993, + 0, + false, + 9223372036854776832, + 1065 + f64::INVALID_FP, + ); + bellerophon_test::(9007199254740994, 0, false, 1, 1076); + + bellerophon_test::(18014398509481984, 0, false, 0, 1077); + bellerophon_test::( + 18014398509481986, + 0, + false, + 9223372036854776832, + 1066 + f64::INVALID_FP, + ); + bellerophon_test::(18014398509481988, 0, false, 1, 1077); + + bellerophon_test::(9223372036854775808, 0, false, 0, 1086); + bellerophon_test::( + 9223372036854776832, + 0, + false, + 9223372036854776832, + 1075 + f64::INVALID_FP, + ); + bellerophon_test::(9223372036854777856, 0, false, 1, 1086); + + // Add a 0 but say we're truncated. + bellerophon_test::(9007199254740992000, -3, true, 0, 1076); + bellerophon_test::( + 9007199254740993000, + -3, + true, + 9223372036854776832, + 1065 + f64::INVALID_FP, + ); + bellerophon_test::(9007199254740994000, -3, true, 1, 1076); +} + +#[test] +fn halfway_round_up_test() { + // Halfway, round-up tests + bellerophon_test::(9007199254740994, 0, false, 1, 1076); + bellerophon_test::( + 9007199254740995, + 0, + false, + 9223372036854778880, + 1065 + f64::INVALID_FP, + ); + bellerophon_test::(9007199254740996, 0, false, 2, 1076); + + bellerophon_test::(18014398509481988, 0, false, 1, 1077); + bellerophon_test::( + 18014398509481990, + 0, + false, + 9223372036854778880, + 1066 + f64::INVALID_FP, + ); + bellerophon_test::(18014398509481992, 0, false, 2, 1077); + + bellerophon_test::(9223372036854777856, 0, false, 1, 1086); + bellerophon_test::( + 9223372036854778880, + 0, + false, + 9223372036854778880, + 1075 + f64::INVALID_FP, + ); + bellerophon_test::(9223372036854779904, 0, false, 2, 1086); + + // Add a 0 but say we're truncated. + bellerophon_test::(9007199254740994000, -3, true, 1, 1076); + bellerophon_test::( + 9007199254740994990, + -3, + true, + 9223372036854778869, + 1065 + f64::INVALID_FP, + ); + bellerophon_test::( + 9007199254740995000, + -3, + true, + 9223372036854778879, + 1065 + f64::INVALID_FP, + ); + bellerophon_test::( + 9007199254740995010, + -3, + true, + 9223372036854778890, + 1065 + f64::INVALID_FP, + ); + bellerophon_test::(9007199254740995050, -3, true, 2, 1076); + bellerophon_test::(9007199254740996000, -3, true, 2, 1076); +} + +#[test] +fn extremes_test() { + // Need to check we get proper results with rounding for near-infinity + // and near-zero and/or denormal floats. + bellerophon_test::(5, -324, false, 1, 0); + bellerophon_test::(10, -324, false, 2, 0); + // This is very close to 2.4703282292062327206e-342. + bellerophon_test::( + 2470328229206232720, + -342, + false, + 18446744073709551608, + -64 + f64::INVALID_FP, + ); + bellerophon_test::(2470328229206232721, -342, false, 9223372036854775808, -32831); + bellerophon_test::(2470328229206232725, -342, false, 9223372036854775824, -32831); + bellerophon_test::(2470328229206232726, -342, false, 1, 0); + bellerophon_test::(2470328229206232730, -342, false, 1, 0); + // Check very close to literal infinity. + // 17.976931348623155 + // 1.797693134862315508561243283845062402343434371574593359244049e+308 + // 1.797693134862315708145274237317043567980705675258449965989175e+308 + bellerophon_test::(17976931348623155, 292, false, 4503599627370494, 2046); + bellerophon_test::(17976931348623156, 292, false, 4503599627370494, 2046); + bellerophon_test::(1797693134862315605, 290, false, 4503599627370494, 2046); + bellerophon_test::(1797693134862315607, 290, false, 4503599627370494, 2046); + bellerophon_test::(1797693134862315608, 290, false, 18446744073709548540, -30733); + bellerophon_test::(1797693134862315609, 290, false, 18446744073709548550, -30733); + bellerophon_test::(179769313486231561, 291, false, 4503599627370495, 2046); + bellerophon_test::(17976931348623157, 292, false, 4503599627370495, 2046); + + // Check existing issues and underflow. + bellerophon_test::(2470328229206232726, -343, false, 0, 0); + bellerophon_test::(2470328229206232726, -342, false, 1, 0); + bellerophon_test::(1, -250, false, 1945308223406668, 192); + bellerophon_test::(1, -150, false, 2867420733609077, 524); + bellerophon_test::(1, -45, false, 1924152549665465, 873); + bellerophon_test::(1, -40, false, 400386103400348, 890); + bellerophon_test::(1, -20, false, 2142540351554083, 956); + bellerophon_test::(1, 0, false, 0, 1023); + bellerophon_test::(1, 20, false, 1599915997629504, 1089); + bellerophon_test::(1, 40, false, 3768206498159781, 1155); + bellerophon_test::(1, 150, false, 999684479948463, 1521); + bellerophon_test::(1, 250, false, 1786584717939204, 1853); + // Minimum positive normal float. + bellerophon_test::(22250738585072014, -324, false, 0, 1); + // Maximum positive subnormal float. + bellerophon_test::(2225073858507201, -323, false, 4503599627370495, 0); + // Next highest subnormal float. + bellerophon_test::(22250738585072004, -324, false, 4503599627370494, 0); + bellerophon_test::(22250738585072006, -324, false, 4503599627370494, 0); + bellerophon_test::(22250738585072007, -324, false, 4503599627370495, 0); + bellerophon_test::(222507385850720062, -325, false, 4503599627370494, 0); + bellerophon_test::(222507385850720063, -325, false, 4503599627370494, 0); + bellerophon_test::(222507385850720064, -325, false, 4503599627370494, 0); + bellerophon_test::(2225073858507200641, -326, false, 18446744073709545462, -32779); + bellerophon_test::(2225073858507200642, -326, false, 18446744073709545472, -32779); + bellerophon_test::(222507385850720065, -325, false, 4503599627370495, 0); +} + +#[test] +fn compute_float_f32_test() { + // These test near-halfway cases for single-precision floats. + assert_eq!(compute_float32(0, 16777216), (151, 0)); + assert_eq!(compute_float32(0, 16777217), (111 + f32::INVALID_FP, 9223372586610589696)); + assert_eq!(compute_float32(0, 16777218), (151, 1)); + assert_eq!(compute_float32(0, 16777219), (111 + f32::INVALID_FP, 9223373686122217472)); + assert_eq!(compute_float32(0, 16777220), (151, 2)); + + // These are examples of the above tests, with + // digits from the exponent shifted to the mantissa. + assert_eq!(compute_float32(-10, 167772160000000000), (151, 0)); + assert_eq!( + compute_float32(-10, 167772170000000000), + (111 + f32::INVALID_FP, 9223372586610589696) + ); + assert_eq!(compute_float32(-10, 167772180000000000), (151, 1)); + // Let's check the lines to see if anything is different in table... + assert_eq!( + compute_float32(-10, 167772190000000000), + (111 + f32::INVALID_FP, 9223373686122217472) + ); + assert_eq!(compute_float32(-10, 167772200000000000), (151, 2)); +} + +#[test] +fn compute_float_f64_test() { + // These test near-halfway cases for double-precision floats. + assert_eq!(compute_float64(0, 9007199254740992), (1076, 0)); + assert_eq!(compute_float64(0, 9007199254740993), (1065 + f64::INVALID_FP, 9223372036854776832)); + assert_eq!(compute_float64(0, 9007199254740994), (1076, 1)); + assert_eq!(compute_float64(0, 9007199254740995), (1065 + f64::INVALID_FP, 9223372036854778880)); + assert_eq!(compute_float64(0, 9007199254740996), (1076, 2)); + assert_eq!(compute_float64(0, 18014398509481984), (1077, 0)); + assert_eq!( + compute_float64(0, 18014398509481986), + (1066 + f64::INVALID_FP, 9223372036854776832) + ); + assert_eq!(compute_float64(0, 18014398509481988), (1077, 1)); + assert_eq!( + compute_float64(0, 18014398509481990), + (1066 + f64::INVALID_FP, 9223372036854778880) + ); + assert_eq!(compute_float64(0, 18014398509481992), (1077, 2)); + + // These are examples of the above tests, with + // digits from the exponent shifted to the mantissa. + assert_eq!(compute_float64(-3, 9007199254740992000), (1076, 0)); + assert_eq!( + compute_float64(-3, 9007199254740993000), + (1065 + f64::INVALID_FP, 9223372036854776832) + ); + assert_eq!(compute_float64(-3, 9007199254740994000), (1076, 1)); + assert_eq!( + compute_float64(-3, 9007199254740995000), + (1065 + f64::INVALID_FP, 9223372036854778879) + ); + assert_eq!(compute_float64(-3, 9007199254740996000), (1076, 2)); +} diff --git a/third_party/rust/minimal-lexical/tests/integration_tests.rs b/third_party/rust/minimal-lexical/tests/integration_tests.rs new file mode 100644 index 0000000000..a8f2ff8a0e --- /dev/null +++ b/third_party/rust/minimal-lexical/tests/integration_tests.rs @@ -0,0 +1,228 @@ +/// Find and parse sign and get remaining bytes. +#[inline] +fn parse_sign<'a>(bytes: &'a [u8]) -> (bool, &'a [u8]) { + match bytes.get(0) { + Some(&b'+') => (true, &bytes[1..]), + Some(&b'-') => (false, &bytes[1..]), + _ => (true, bytes), + } +} + +// Convert u8 to digit. +#[inline] +fn to_digit(c: u8) -> Option { + (c as char).to_digit(10) +} + +// Add digit from exponent. +#[inline] +fn add_digit_i32(value: i32, digit: u32) -> Option { + return value.checked_mul(10)?.checked_add(digit as i32); +} + +// Subtract digit from exponent. +#[inline] +fn sub_digit_i32(value: i32, digit: u32) -> Option { + return value.checked_mul(10)?.checked_sub(digit as i32); +} + +// Convert character to digit. +#[inline] +fn is_digit(c: u8) -> bool { + to_digit(c).is_some() +} + +// Split buffer at index. +#[inline] +fn split_at_index<'a>(digits: &'a [u8], index: usize) -> (&'a [u8], &'a [u8]) { + (&digits[..index], &digits[index..]) +} + +/// Consume until a an invalid digit is found. +/// +/// - `digits` - Slice containing 0 or more digits. +#[inline] +fn consume_digits<'a>(digits: &'a [u8]) -> (&'a [u8], &'a [u8]) { + // Consume all digits. + let mut index = 0; + while index < digits.len() && is_digit(digits[index]) { + index += 1; + } + split_at_index(digits, index) +} + +// Trim leading 0s. +#[inline] +fn ltrim_zero<'a>(bytes: &'a [u8]) -> &'a [u8] { + let count = bytes.iter().take_while(|&&si| si == b'0').count(); + &bytes[count..] +} + +// Trim trailing 0s. +#[inline] +fn rtrim_zero<'a>(bytes: &'a [u8]) -> &'a [u8] { + let count = bytes.iter().rev().take_while(|&&si| si == b'0').count(); + let index = bytes.len() - count; + &bytes[..index] +} + +// PARSERS +// ------- + +/// Parse the exponent of the float. +/// +/// * `exponent` - Slice containing the exponent digits. +/// * `is_positive` - If the exponent sign is positive. +fn parse_exponent(exponent: &[u8], is_positive: bool) -> i32 { + // Parse the sign bit or current data. + let mut value: i32 = 0; + match is_positive { + true => { + for c in exponent { + value = match add_digit_i32(value, to_digit(*c).unwrap()) { + Some(v) => v, + None => return i32::max_value(), + }; + } + }, + false => { + for c in exponent { + value = match sub_digit_i32(value, to_digit(*c).unwrap()) { + Some(v) => v, + None => return i32::min_value(), + }; + } + }, + } + + value +} + +pub fn case_insensitive_starts_with<'a, 'b, Iter1, Iter2>(mut x: Iter1, mut y: Iter2) -> bool +where + Iter1: Iterator, + Iter2: Iterator, +{ + // We use a faster optimization here for ASCII letters, which NaN + // and infinite strings **must** be. [A-Z] is 0x41-0x5A, while + // [a-z] is 0x61-0x7A. Therefore, the xor must be 0 or 32 if they + // are case-insensitive equal, but only if at least 1 of the inputs + // is an ASCII letter. + loop { + let yi = y.next(); + if yi.is_none() { + return true; + } + let yi = *yi.unwrap(); + let is_not_equal = x.next().map_or(true, |&xi| { + let xor = xi ^ yi; + xor != 0 && xor != 0x20 + }); + if is_not_equal { + return false; + } + } +} + +/// Parse float from input bytes, returning the float and the remaining bytes. +/// +/// * `bytes` - Array of bytes leading with float-data. +pub fn parse_float<'a, F>(bytes: &'a [u8]) -> (F, &'a [u8]) +where + F: minimal_lexical::Float, +{ + let start = bytes; + + // Parse the sign. + let (is_positive, bytes) = parse_sign(bytes); + + // Check NaN, Inf, Infinity + if case_insensitive_starts_with(bytes.iter(), b"NaN".iter()) { + let mut float = F::from_bits(F::EXPONENT_MASK | (F::HIDDEN_BIT_MASK >> 1)); + if !is_positive { + float = -float; + } + return (float, &bytes[3..]); + } else if case_insensitive_starts_with(bytes.iter(), b"Infinity".iter()) { + let mut float = F::from_bits(F::EXPONENT_MASK); + if !is_positive { + float = -float; + } + return (float, &bytes[8..]); + } else if case_insensitive_starts_with(bytes.iter(), b"inf".iter()) { + let mut float = F::from_bits(F::EXPONENT_MASK); + if !is_positive { + float = -float; + } + return (float, &bytes[3..]); + } + + // Extract and parse the float components: + // 1. Integer + // 2. Fraction + // 3. Exponent + let (integer_slc, bytes) = consume_digits(bytes); + let (fraction_slc, bytes) = match bytes.first() { + Some(&b'.') => consume_digits(&bytes[1..]), + _ => (&bytes[..0], bytes), + }; + let (exponent, bytes) = match bytes.first() { + Some(&b'e') | Some(&b'E') => { + // Extract and parse the exponent. + let (is_positive, bytes) = parse_sign(&bytes[1..]); + let (exponent, bytes) = consume_digits(bytes); + (parse_exponent(exponent, is_positive), bytes) + }, + _ => (0, bytes), + }; + + if bytes.len() == start.len() { + return (F::from_u64(0), bytes); + } + + // Note: You may want to check and validate the float data here: + // 1). Many floats require integer or fraction digits, if a fraction + // is present. + // 2). All floats require either integer or fraction digits. + // 3). Some floats do not allow a '+' sign before the significant digits. + // 4). Many floats require exponent digits after the exponent symbol. + // 5). Some floats do not allow a '+' sign before the exponent. + + // We now need to trim leading and trailing 0s from the integer + // and fraction, respectively. This is required to make the + // fast and moderate paths more efficient, and for the slow + // path. + let integer_slc = ltrim_zero(integer_slc); + let fraction_slc = rtrim_zero(fraction_slc); + + // Create the float and return our data. + let mut float: F = + minimal_lexical::parse_float(integer_slc.iter(), fraction_slc.iter(), exponent); + if !is_positive { + float = -float; + } + + (float, bytes) +} + +macro_rules! b { + ($x:literal) => { + $x.as_bytes() + }; +} + +#[test] +fn f32_test() { + assert_eq!( + (184467440000000000000.0, b!("\x00\x00006")), + parse_float::(b"000184467440737095516150\x00\x00006") + ); +} + +#[test] +fn f64_test() { + assert_eq!( + (184467440737095500000.0, b!("\x00\x00006")), + parse_float::(b"000184467440737095516150\x00\x00006") + ); +} diff --git a/third_party/rust/minimal-lexical/tests/lemire_tests.rs b/third_party/rust/minimal-lexical/tests/lemire_tests.rs new file mode 100644 index 0000000000..0523ca5b2a --- /dev/null +++ b/third_party/rust/minimal-lexical/tests/lemire_tests.rs @@ -0,0 +1,378 @@ +//! These tests are adapted from the Rust core library's unittests. + +#![cfg(not(feature = "compact"))] + +use minimal_lexical::lemire; +use minimal_lexical::num::Float; + +fn compute_error32(q: i32, w: u64) -> (i32, u64) { + let fp = lemire::compute_error::(q, w); + (fp.exp, fp.mant) +} + +fn compute_error64(q: i32, w: u64) -> (i32, u64) { + let fp = lemire::compute_error::(q, w); + (fp.exp, fp.mant) +} + +fn compute_error_scaled32(q: i32, w: u64, lz: i32) -> (i32, u64) { + let fp = lemire::compute_error_scaled::(q, w, lz); + (fp.exp, fp.mant) +} + +fn compute_error_scaled64(q: i32, w: u64, lz: i32) -> (i32, u64) { + let fp = lemire::compute_error_scaled::(q, w, lz); + (fp.exp, fp.mant) +} + +fn compute_float32(q: i32, w: u64) -> (i32, u64) { + let fp = lemire::compute_float::(q, w); + (fp.exp, fp.mant) +} + +fn compute_float64(q: i32, w: u64) -> (i32, u64) { + let fp = lemire::compute_float::(q, w); + (fp.exp, fp.mant) +} + +#[test] +fn compute_error32_test() { + // These test near-halfway cases for single-precision floats. + assert_eq!(compute_error32(0, 16777216), (111 + f32::INVALID_FP, 9223372036854775808)); + assert_eq!(compute_error32(0, 16777217), (111 + f32::INVALID_FP, 9223372586610589696)); + assert_eq!(compute_error32(0, 16777218), (111 + f32::INVALID_FP, 9223373136366403584)); + assert_eq!(compute_error32(0, 16777219), (111 + f32::INVALID_FP, 9223373686122217472)); + assert_eq!(compute_error32(0, 16777220), (111 + f32::INVALID_FP, 9223374235878031360)); + + // These are examples of the above tests, with + // digits from the exponent shifted to the mantissa. + assert_eq!( + compute_error32(-10, 167772160000000000), + (111 + f32::INVALID_FP, 9223372036854775808) + ); + assert_eq!( + compute_error32(-10, 167772170000000000), + (111 + f32::INVALID_FP, 9223372586610589696) + ); + assert_eq!( + compute_error32(-10, 167772180000000000), + (111 + f32::INVALID_FP, 9223373136366403584) + ); + // Let's check the lines to see if anything is different in table... + assert_eq!( + compute_error32(-10, 167772190000000000), + (111 + f32::INVALID_FP, 9223373686122217472) + ); + assert_eq!( + compute_error32(-10, 167772200000000000), + (111 + f32::INVALID_FP, 9223374235878031360) + ); +} + +#[test] +fn compute_error64_test() { + // These test near-halfway cases for double-precision floats. + assert_eq!(compute_error64(0, 9007199254740992), (1065 + f64::INVALID_FP, 9223372036854775808)); + assert_eq!(compute_error64(0, 9007199254740993), (1065 + f64::INVALID_FP, 9223372036854776832)); + assert_eq!(compute_error64(0, 9007199254740994), (1065 + f64::INVALID_FP, 9223372036854777856)); + assert_eq!(compute_error64(0, 9007199254740995), (1065 + f64::INVALID_FP, 9223372036854778880)); + assert_eq!(compute_error64(0, 9007199254740996), (1065 + f64::INVALID_FP, 9223372036854779904)); + assert_eq!( + compute_error64(0, 18014398509481984), + (1066 + f64::INVALID_FP, 9223372036854775808) + ); + assert_eq!( + compute_error64(0, 18014398509481986), + (1066 + f64::INVALID_FP, 9223372036854776832) + ); + assert_eq!( + compute_error64(0, 18014398509481988), + (1066 + f64::INVALID_FP, 9223372036854777856) + ); + assert_eq!( + compute_error64(0, 18014398509481990), + (1066 + f64::INVALID_FP, 9223372036854778880) + ); + assert_eq!( + compute_error64(0, 18014398509481992), + (1066 + f64::INVALID_FP, 9223372036854779904) + ); + + // Test a much closer set of examples. + assert_eq!( + compute_error64(0, 9007199254740991), + (1064 + f64::INVALID_FP, 18446744073709549568) + ); + assert_eq!( + compute_error64(0, 9223372036854776831), + (1075 + f64::INVALID_FP, 9223372036854776830) + ); + assert_eq!( + compute_error64(0, 9223372036854776832), + (1075 + f64::INVALID_FP, 9223372036854776832) + ); + assert_eq!( + compute_error64(0, 9223372036854776833), + (1075 + f64::INVALID_FP, 9223372036854776832) + ); + assert_eq!( + compute_error64(-42, 9123456727292927), + (925 + f64::INVALID_FP, 13021432563531497894) + ); + assert_eq!( + compute_error64(-43, 91234567272929275), + (925 + f64::INVALID_FP, 13021432563531498606) + ); + assert_eq!( + compute_error64(-42, 9123456727292928), + (925 + f64::INVALID_FP, 13021432563531499320) + ); + + // These are examples of the above tests, with + // digits from the exponent shifted to the mantissa. + assert_eq!( + compute_error64(-3, 9007199254740992000), + (1065 + f64::INVALID_FP, 9223372036854775808) + ); + assert_eq!( + compute_error64(-3, 9007199254740993000), + (1065 + f64::INVALID_FP, 9223372036854776832) + ); + assert_eq!( + compute_error64(-3, 9007199254740994000), + (1065 + f64::INVALID_FP, 9223372036854777856) + ); + assert_eq!( + compute_error64(-3, 9007199254740995000), + (1065 + f64::INVALID_FP, 9223372036854778880) + ); + assert_eq!( + compute_error64(-3, 9007199254740996000), + (1065 + f64::INVALID_FP, 9223372036854779904) + ); + + // Test from errors in atof. + assert_eq!( + compute_error64(-18, 1000000178813934326), + (1012 + f64::INVALID_FP, 9223373686122217470) + ); + + // Check edge-cases from previous errors. + assert_eq!( + compute_error64(-342, 2470328229206232720), + (-64 + f64::INVALID_FP, 18446744073709551608) + ); +} + +#[test] +fn compute_error_scaled32_test() { + // These are the same examples above, just using pre-computed scaled values. + + // These test near-halfway cases for single-precision floats. + assert_eq!( + compute_error_scaled32(0, 4611686018427387904, 39), + (111 + f32::INVALID_FP, 9223372036854775808) + ); + assert_eq!( + compute_error_scaled32(0, 4611686293305294848, 39), + (111 + f32::INVALID_FP, 9223372586610589696) + ); + assert_eq!( + compute_error_scaled32(0, 4611686568183201792, 39), + (111 + f32::INVALID_FP, 9223373136366403584) + ); + assert_eq!( + compute_error_scaled32(0, 4611686843061108736, 39), + (111 + f32::INVALID_FP, 9223373686122217472) + ); + assert_eq!( + compute_error_scaled32(0, 4611687117939015680, 39), + (111 + f32::INVALID_FP, 9223374235878031360) + ); + + assert_eq!( + compute_error_scaled32(-10, 9223372036854775808, 6), + (111 + f32::INVALID_FP, 9223372036854775808) + ); + assert_eq!( + compute_error_scaled32(-10, 9223372586610589696, 6), + (111 + f32::INVALID_FP, 9223372586610589696) + ); + assert_eq!( + compute_error_scaled32(-10, 9223373136366403584, 6), + (111 + f32::INVALID_FP, 9223373136366403584) + ); + assert_eq!( + compute_error_scaled32(-10, 9223373686122217472, 6), + (111 + f32::INVALID_FP, 9223373686122217472) + ); + assert_eq!( + compute_error_scaled32(-10, 9223374235878031360, 6), + (111 + f32::INVALID_FP, 9223374235878031360) + ); +} + +#[test] +fn compute_error_scaled64_test() { + // These are the same examples above, just using pre-computed scaled values. + + // These test near-halfway cases for double-precision floats. + assert_eq!( + compute_error_scaled64(0, 4611686018427387904, 10), + (1065 + f64::INVALID_FP, 9223372036854775808) + ); + assert_eq!( + compute_error_scaled64(0, 4611686018427388416, 10), + (1065 + f64::INVALID_FP, 9223372036854776832) + ); + assert_eq!( + compute_error_scaled64(0, 4611686018427388928, 10), + (1065 + f64::INVALID_FP, 9223372036854777856) + ); + assert_eq!( + compute_error_scaled64(0, 4611686018427389440, 10), + (1065 + f64::INVALID_FP, 9223372036854778880) + ); + assert_eq!( + compute_error_scaled64(0, 4611686018427389952, 10), + (1065 + f64::INVALID_FP, 9223372036854779904) + ); + assert_eq!( + compute_error_scaled64(0, 4611686018427387904, 9), + (1066 + f64::INVALID_FP, 9223372036854775808) + ); + assert_eq!( + compute_error_scaled64(0, 4611686018427388416, 9), + (1066 + f64::INVALID_FP, 9223372036854776832) + ); + assert_eq!( + compute_error_scaled64(0, 4611686018427388928, 9), + (1066 + f64::INVALID_FP, 9223372036854777856) + ); + assert_eq!( + compute_error_scaled64(0, 4611686018427389440, 9), + (1066 + f64::INVALID_FP, 9223372036854778880) + ); + assert_eq!( + compute_error_scaled64(0, 4611686018427389952, 9), + (1066 + f64::INVALID_FP, 9223372036854779904) + ); + + // Test a much closer set of examples. + assert_eq!( + compute_error_scaled64(0, 9223372036854774784, 11), + (1064 + f64::INVALID_FP, 18446744073709549568) + ); + assert_eq!( + compute_error_scaled64(0, 4611686018427388415, 0), + (1075 + f64::INVALID_FP, 9223372036854776830) + ); + assert_eq!( + compute_error_scaled64(0, 4611686018427388416, 0), + (1075 + f64::INVALID_FP, 9223372036854776832) + ); + assert_eq!( + compute_error_scaled64(0, 4611686018427388416, 0), + (1075 + f64::INVALID_FP, 9223372036854776832) + ); + assert_eq!( + compute_error_scaled64(-42, 6510716281765748947, 10), + (925 + f64::INVALID_FP, 13021432563531497894) + ); + assert_eq!( + compute_error_scaled64(-43, 6510716281765749303, 7), + (925 + f64::INVALID_FP, 13021432563531498606) + ); + assert_eq!( + compute_error_scaled64(-42, 6510716281765749660, 10), + (925 + f64::INVALID_FP, 13021432563531499320) + ); + + // These are examples of the above tests, with + // digits from the exponent shifted to the mantissa. + assert_eq!( + compute_error_scaled64(-3, 9223372036854775808, 1), + (1065 + f64::INVALID_FP, 9223372036854775808) + ); + assert_eq!( + compute_error_scaled64(-3, 9223372036854776832, 1), + (1065 + f64::INVALID_FP, 9223372036854776832) + ); + assert_eq!( + compute_error_scaled64(-3, 9223372036854777856, 1), + (1065 + f64::INVALID_FP, 9223372036854777856) + ); + assert_eq!( + compute_error_scaled64(-3, 9223372036854778880, 1), + (1065 + f64::INVALID_FP, 9223372036854778880) + ); + assert_eq!( + compute_error_scaled64(-3, 9223372036854779904, 1), + (1065 + f64::INVALID_FP, 9223372036854779904) + ); + + // Test from errors in atof. + assert_eq!( + compute_error_scaled64(-18, 9223373686122217470, 4), + (1012 + f64::INVALID_FP, 9223373686122217470) + ); + + // Check edge-cases from previous errors. + assert_eq!( + compute_error_scaled64(-342, 9223372036854775804, 2), + (-64 + f64::INVALID_FP, 18446744073709551608) + ); +} + +#[test] +fn compute_float_f32_rounding() { + // These test near-halfway cases for single-precision floats. + assert_eq!(compute_float32(0, 16777216), (151, 0)); + assert_eq!(compute_float32(0, 16777217), (151, 0)); + assert_eq!(compute_float32(0, 16777218), (151, 1)); + assert_eq!(compute_float32(0, 16777219), (151, 2)); + assert_eq!(compute_float32(0, 16777220), (151, 2)); + + // These are examples of the above tests, with + // digits from the exponent shifted to the mantissa. + assert_eq!(compute_float32(-10, 167772160000000000), (151, 0)); + assert_eq!(compute_float32(-10, 167772170000000000), (151, 0)); + assert_eq!(compute_float32(-10, 167772180000000000), (151, 1)); + // Let's check the lines to see if anything is different in table... + assert_eq!(compute_float32(-10, 167772190000000000), (151, 2)); + assert_eq!(compute_float32(-10, 167772200000000000), (151, 2)); +} + +#[test] +fn compute_float_f64_rounding() { + // Also need to check halfway cases **inside** that exponent range. + + // These test near-halfway cases for double-precision floats. + assert_eq!(compute_float64(0, 9007199254740992), (1076, 0)); + assert_eq!(compute_float64(0, 9007199254740993), (1076, 0)); + assert_eq!(compute_float64(0, 9007199254740994), (1076, 1)); + assert_eq!(compute_float64(0, 9007199254740995), (1076, 2)); + assert_eq!(compute_float64(0, 9007199254740996), (1076, 2)); + assert_eq!(compute_float64(0, 18014398509481984), (1077, 0)); + assert_eq!(compute_float64(0, 18014398509481986), (1077, 0)); + assert_eq!(compute_float64(0, 18014398509481988), (1077, 1)); + assert_eq!(compute_float64(0, 18014398509481990), (1077, 2)); + assert_eq!(compute_float64(0, 18014398509481992), (1077, 2)); + + // Test a much closer set of examples. + assert_eq!(compute_float64(0, 9007199254740991), (1075, 4503599627370495)); + assert_eq!(compute_float64(0, 9223372036854776831), (1086, 0)); + assert_eq!(compute_float64(0, 9223372036854776832), (1086, 0)); + assert_eq!(compute_float64(0, 9223372036854776833), (1086, 1)); + assert_eq!(compute_float64(-42, 9123456727292927), (936, 1854521741541368)); + assert_eq!(compute_float64(-43, 91234567272929275), (936, 1854521741541369)); + assert_eq!(compute_float64(-42, 9123456727292928), (936, 1854521741541369)); + + // These are examples of the above tests, with + // digits from the exponent shifted to the mantissa. + assert_eq!(compute_float64(-3, 9007199254740992000), (1076, 0)); + assert_eq!(compute_float64(-3, 9007199254740993000), (1076, 0)); + assert_eq!(compute_float64(-3, 9007199254740994000), (1076, 1)); + assert_eq!(compute_float64(-3, 9007199254740995000), (1076, 2)); + assert_eq!(compute_float64(-3, 9007199254740996000), (1076, 2)); +} diff --git a/third_party/rust/minimal-lexical/tests/libm_tests.rs b/third_party/rust/minimal-lexical/tests/libm_tests.rs new file mode 100644 index 0000000000..7f5352e193 --- /dev/null +++ b/third_party/rust/minimal-lexical/tests/libm_tests.rs @@ -0,0 +1,289 @@ +#![cfg(all(not(feature = "std"), feature = "compact"))] + +// These are adapted from libm, a port of musl libc's libm to Rust. +// libm can be found online [here](https://github.com/rust-lang/libm), +// and is similarly licensed under an Apache2.0/MIT license + +use core::f64; +use minimal_lexical::libm; + +#[test] +fn fabsf_sanity_test() { + assert_eq!(libm::fabsf(-1.0), 1.0); + assert_eq!(libm::fabsf(2.8), 2.8); +} + +/// The spec: https://en.cppreference.com/w/cpp/numeric/math/fabs +#[test] +fn fabsf_spec_test() { + assert!(libm::fabsf(f32::NAN).is_nan()); + for f in [0.0, -0.0].iter().copied() { + assert_eq!(libm::fabsf(f), 0.0); + } + for f in [f32::INFINITY, f32::NEG_INFINITY].iter().copied() { + assert_eq!(libm::fabsf(f), f32::INFINITY); + } +} + +#[test] +fn sqrtf_sanity_test() { + assert_eq!(libm::sqrtf(100.0), 10.0); + assert_eq!(libm::sqrtf(4.0), 2.0); +} + +/// The spec: https://en.cppreference.com/w/cpp/numeric/math/sqrt +#[test] +fn sqrtf_spec_test() { + // Not Asserted: FE_INVALID exception is raised if argument is negative. + assert!(libm::sqrtf(-1.0).is_nan()); + assert!(libm::sqrtf(f32::NAN).is_nan()); + for f in [0.0, -0.0, f32::INFINITY].iter().copied() { + assert_eq!(libm::sqrtf(f), f); + } +} + +const POS_ZERO: &[f64] = &[0.0]; +const NEG_ZERO: &[f64] = &[-0.0]; +const POS_ONE: &[f64] = &[1.0]; +const NEG_ONE: &[f64] = &[-1.0]; +const POS_FLOATS: &[f64] = &[99.0 / 70.0, f64::consts::E, f64::consts::PI]; +const NEG_FLOATS: &[f64] = &[-99.0 / 70.0, -f64::consts::E, -f64::consts::PI]; +const POS_SMALL_FLOATS: &[f64] = &[(1.0 / 2.0), f64::MIN_POSITIVE, f64::EPSILON]; +const NEG_SMALL_FLOATS: &[f64] = &[-(1.0 / 2.0), -f64::MIN_POSITIVE, -f64::EPSILON]; +const POS_EVENS: &[f64] = &[2.0, 6.0, 8.0, 10.0, 22.0, 100.0, f64::MAX]; +const NEG_EVENS: &[f64] = &[f64::MIN, -100.0, -22.0, -10.0, -8.0, -6.0, -2.0]; +const POS_ODDS: &[f64] = &[3.0, 7.0]; +const NEG_ODDS: &[f64] = &[-7.0, -3.0]; +const NANS: &[f64] = &[f64::NAN]; +const POS_INF: &[f64] = &[f64::INFINITY]; +const NEG_INF: &[f64] = &[f64::NEG_INFINITY]; + +const ALL: &[&[f64]] = &[ + POS_ZERO, + NEG_ZERO, + NANS, + NEG_SMALL_FLOATS, + POS_SMALL_FLOATS, + NEG_FLOATS, + POS_FLOATS, + NEG_EVENS, + POS_EVENS, + NEG_ODDS, + POS_ODDS, + NEG_INF, + POS_INF, + NEG_ONE, + POS_ONE, +]; +const POS: &[&[f64]] = &[POS_ZERO, POS_ODDS, POS_ONE, POS_FLOATS, POS_EVENS, POS_INF]; +const NEG: &[&[f64]] = &[NEG_ZERO, NEG_ODDS, NEG_ONE, NEG_FLOATS, NEG_EVENS, NEG_INF]; + +fn powd(base: f64, exponent: f64, expected: f64) { + let res = libm::powd(base, exponent); + assert!( + if expected.is_nan() { + res.is_nan() + } else { + libm::powd(base, exponent) == expected + }, + "{} ** {} was {} instead of {}", + base, + exponent, + res, + expected + ); +} + +fn powd_test_sets_as_base(sets: &[&[f64]], exponent: f64, expected: f64) { + sets.iter().for_each(|s| s.iter().for_each(|val| powd(*val, exponent, expected))); +} + +fn powd_test_sets_as_exponent(base: f64, sets: &[&[f64]], expected: f64) { + sets.iter().for_each(|s| s.iter().for_each(|val| powd(base, *val, expected))); +} + +fn powd_test_sets(sets: &[&[f64]], computed: &dyn Fn(f64) -> f64, expected: &dyn Fn(f64) -> f64) { + sets.iter().for_each(|s| { + s.iter().for_each(|val| { + let exp = expected(*val); + let res = computed(*val); + + assert!( + if exp.is_nan() { + res.is_nan() + } else { + exp == res + }, + "test for {} was {} instead of {}", + val, + res, + exp + ); + }) + }); +} + +#[test] +fn powd_zero_as_exponent() { + powd_test_sets_as_base(ALL, 0.0, 1.0); + powd_test_sets_as_base(ALL, -0.0, 1.0); +} + +#[test] +fn powd_one_as_base() { + powd_test_sets_as_exponent(1.0, ALL, 1.0); +} + +#[test] +fn powd_nan_inputs() { + // NAN as the base: + // (NAN ^ anything *but 0* should be NAN) + powd_test_sets_as_exponent(f64::NAN, &ALL[2..], f64::NAN); + + // NAN as the exponent: + // (anything *but 1* ^ NAN should be NAN) + powd_test_sets_as_base(&ALL[..(ALL.len() - 2)], f64::NAN, f64::NAN); +} + +#[test] +fn powd_infinity_as_base() { + // Positive Infinity as the base: + // (+Infinity ^ positive anything but 0 and NAN should be +Infinity) + powd_test_sets_as_exponent(f64::INFINITY, &POS[1..], f64::INFINITY); + + // (+Infinity ^ negative anything except 0 and NAN should be 0.0) + powd_test_sets_as_exponent(f64::INFINITY, &NEG[1..], 0.0); + + // Negative Infinity as the base: + // (-Infinity ^ positive odd ints should be -Infinity) + powd_test_sets_as_exponent(f64::NEG_INFINITY, &[POS_ODDS], f64::NEG_INFINITY); + + // (-Infinity ^ anything but odd ints should be == -0 ^ (-anything)) + // We can lump in pos/neg odd ints here because they don't seem to + // cause panics (div by zero) in release mode (I think). + powd_test_sets(ALL, &|v: f64| libm::powd(f64::NEG_INFINITY, v), &|v: f64| libm::powd(-0.0, -v)); +} + +#[test] +fn infinity_as_exponent() { + // Positive/Negative base greater than 1: + // (pos/neg > 1 ^ Infinity should be Infinity - note this excludes NAN as the base) + powd_test_sets_as_base(&ALL[5..(ALL.len() - 2)], f64::INFINITY, f64::INFINITY); + + // (pos/neg > 1 ^ -Infinity should be 0.0) + powd_test_sets_as_base(&ALL[5..ALL.len() - 2], f64::NEG_INFINITY, 0.0); + + // Positive/Negative base less than 1: + let base_below_one = &[POS_ZERO, NEG_ZERO, NEG_SMALL_FLOATS, POS_SMALL_FLOATS]; + + // (pos/neg < 1 ^ Infinity should be 0.0 - this also excludes NAN as the base) + powd_test_sets_as_base(base_below_one, f64::INFINITY, 0.0); + + // (pos/neg < 1 ^ -Infinity should be Infinity) + powd_test_sets_as_base(base_below_one, f64::NEG_INFINITY, f64::INFINITY); + + // Positive/Negative 1 as the base: + // (pos/neg 1 ^ Infinity should be 1) + powd_test_sets_as_base(&[NEG_ONE, POS_ONE], f64::INFINITY, 1.0); + + // (pos/neg 1 ^ -Infinity should be 1) + powd_test_sets_as_base(&[NEG_ONE, POS_ONE], f64::NEG_INFINITY, 1.0); +} + +#[test] +fn powd_zero_as_base() { + // Positive Zero as the base: + // (+0 ^ anything positive but 0 and NAN should be +0) + powd_test_sets_as_exponent(0.0, &POS[1..], 0.0); + + // (+0 ^ anything negative but 0 and NAN should be Infinity) + // (this should panic because we're dividing by zero) + powd_test_sets_as_exponent(0.0, &NEG[1..], f64::INFINITY); + + // Negative Zero as the base: + // (-0 ^ anything positive but 0, NAN, and odd ints should be +0) + powd_test_sets_as_exponent(-0.0, &POS[3..], 0.0); + + // (-0 ^ anything negative but 0, NAN, and odd ints should be Infinity) + // (should panic because of divide by zero) + powd_test_sets_as_exponent(-0.0, &NEG[3..], f64::INFINITY); + + // (-0 ^ positive odd ints should be -0) + powd_test_sets_as_exponent(-0.0, &[POS_ODDS], -0.0); + + // (-0 ^ negative odd ints should be -Infinity) + // (should panic because of divide by zero) + powd_test_sets_as_exponent(-0.0, &[NEG_ODDS], f64::NEG_INFINITY); +} + +#[test] +fn special_cases() { + // One as the exponent: + // (anything ^ 1 should be anything - i.e. the base) + powd_test_sets(ALL, &|v: f64| libm::powd(v, 1.0), &|v: f64| v); + + // Negative One as the exponent: + // (anything ^ -1 should be 1/anything) + powd_test_sets(ALL, &|v: f64| libm::powd(v, -1.0), &|v: f64| 1.0 / v); + + // Factoring -1 out: + // (negative anything ^ integer should be (-1 ^ integer) * (positive anything ^ integer)) + [POS_ZERO, NEG_ZERO, POS_ONE, NEG_ONE, POS_EVENS, NEG_EVENS].iter().for_each(|int_set| { + int_set.iter().for_each(|int| { + powd_test_sets(ALL, &|v: f64| libm::powd(-v, *int), &|v: f64| { + libm::powd(-1.0, *int) * libm::powd(v, *int) + }); + }) + }); + + // Negative base (imaginary results): + // (-anything except 0 and Infinity ^ non-integer should be NAN) + NEG[1..(NEG.len() - 1)].iter().for_each(|set| { + set.iter().for_each(|val| { + powd_test_sets(&ALL[3..7], &|v: f64| libm::powd(*val, v), &|_| f64::NAN); + }) + }); +} + +#[test] +fn normal_cases() { + assert_eq!(libm::powd(2.0, 20.0), (1 << 20) as f64); + assert_eq!(libm::powd(-1.0, 9.0), -1.0); + assert!(libm::powd(-1.0, 2.2).is_nan()); + assert!(libm::powd(-1.0, -1.14).is_nan()); +} + +#[test] +fn fabsd_sanity_test() { + assert_eq!(libm::fabsd(-1.0), 1.0); + assert_eq!(libm::fabsd(2.8), 2.8); +} + +/// The spec: https://en.cppreference.com/w/cpp/numeric/math/fabs +#[test] +fn fabsd_spec_test() { + assert!(libm::fabsd(f64::NAN).is_nan()); + for f in [0.0, -0.0].iter().copied() { + assert_eq!(libm::fabsd(f), 0.0); + } + for f in [f64::INFINITY, f64::NEG_INFINITY].iter().copied() { + assert_eq!(libm::fabsd(f), f64::INFINITY); + } +} + +#[test] +fn sqrtd_sanity_test() { + assert_eq!(libm::sqrtd(100.0), 10.0); + assert_eq!(libm::sqrtd(4.0), 2.0); +} + +/// The spec: https://en.cppreference.com/w/cpp/numeric/math/sqrt +#[test] +fn sqrtd_spec_test() { + // Not Asserted: FE_INVALID exception is raised if argument is negative. + assert!(libm::sqrtd(-1.0).is_nan()); + assert!(libm::sqrtd(f64::NAN).is_nan()); + for f in [0.0, -0.0, f64::INFINITY].iter().copied() { + assert_eq!(libm::sqrtd(f), f); + } +} diff --git a/third_party/rust/minimal-lexical/tests/mask_tests.rs b/third_party/rust/minimal-lexical/tests/mask_tests.rs new file mode 100644 index 0000000000..97e70a72b8 --- /dev/null +++ b/third_party/rust/minimal-lexical/tests/mask_tests.rs @@ -0,0 +1,16 @@ +use minimal_lexical::mask; + +#[test] +fn lower_n_mask_test() { + assert_eq!(mask::lower_n_mask(2), 0b11); +} + +#[test] +fn lower_n_halfway_test() { + assert_eq!(mask::lower_n_halfway(2), 0b10); +} + +#[test] +fn nth_bit_test() { + assert_eq!(mask::nth_bit(2), 0b100); +} diff --git a/third_party/rust/minimal-lexical/tests/number_tests.rs b/third_party/rust/minimal-lexical/tests/number_tests.rs new file mode 100644 index 0000000000..947be394c9 --- /dev/null +++ b/third_party/rust/minimal-lexical/tests/number_tests.rs @@ -0,0 +1,88 @@ +use minimal_lexical::number::Number; + +#[test] +fn is_fast_path_test() { + let mut number = Number { + exponent: -4, + mantissa: 12345, + many_digits: false, + }; + assert_eq!(number.is_fast_path::(), true); + assert_eq!(number.is_fast_path::(), true); + + number.exponent = -15; + assert_eq!(number.is_fast_path::(), false); + assert_eq!(number.is_fast_path::(), true); + + number.exponent = -25; + assert_eq!(number.is_fast_path::(), false); + assert_eq!(number.is_fast_path::(), false); + + number.exponent = 25; + assert_eq!(number.is_fast_path::(), false); + assert_eq!(number.is_fast_path::(), true); + + number.exponent = 36; + assert_eq!(number.is_fast_path::(), false); + assert_eq!(number.is_fast_path::(), true); + + number.exponent = 38; + assert_eq!(number.is_fast_path::(), false); + assert_eq!(number.is_fast_path::(), false); + + number.mantissa = 1 << 25; + number.exponent = 0; + assert_eq!(number.is_fast_path::(), false); + assert_eq!(number.is_fast_path::(), true); + + number.mantissa = 1 << 54; + assert_eq!(number.is_fast_path::(), false); + assert_eq!(number.is_fast_path::(), false); + + number.mantissa = 1 << 52; + assert_eq!(number.is_fast_path::(), false); + assert_eq!(number.is_fast_path::(), true); + + number.many_digits = true; + assert_eq!(number.is_fast_path::(), false); + assert_eq!(number.is_fast_path::(), false); +} + +#[test] +fn try_fast_path_test() { + let mut number = Number { + exponent: -4, + mantissa: 12345, + many_digits: false, + }; + assert_eq!(number.try_fast_path::(), Some(1.2345)); + assert_eq!(number.try_fast_path::(), Some(1.2345)); + + number.exponent = -10; + assert_eq!(number.try_fast_path::(), Some(1.2345e-6)); + assert_eq!(number.try_fast_path::(), Some(1.2345e-6)); + + number.exponent = -20; + assert_eq!(number.try_fast_path::(), None); + assert_eq!(number.try_fast_path::(), Some(1.2345e-16)); + + number.exponent = -25; + assert_eq!(number.try_fast_path::(), None); + assert_eq!(number.try_fast_path::(), None); + + number.exponent = 12; + assert_eq!(number.try_fast_path::(), Some(1.2345e16)); + assert_eq!(number.try_fast_path::(), Some(1.2345e16)); + + number.exponent = 25; + assert_eq!(number.try_fast_path::(), None); + assert_eq!(number.try_fast_path::(), Some(1.2345e29)); + + number.exponent = 32; + assert_eq!(number.try_fast_path::(), None); + assert_eq!(number.try_fast_path::(), Some(1.2345e36)); + + number.exponent = 36; + assert_eq!(number.try_fast_path::(), None); + assert_eq!(number.try_fast_path::(), None); +} diff --git a/third_party/rust/minimal-lexical/tests/parse_tests.rs b/third_party/rust/minimal-lexical/tests/parse_tests.rs new file mode 100644 index 0000000000..48856fd1cc --- /dev/null +++ b/third_party/rust/minimal-lexical/tests/parse_tests.rs @@ -0,0 +1,189 @@ +use core::f64; +use minimal_lexical::{num, parse}; + +fn check_parse_float(integer: &str, fraction: &str, exponent: i32, expected: F) { + let integer = integer.as_bytes().iter(); + let fraction = fraction.as_bytes().iter(); + assert!(expected == parse::parse_float::(integer, fraction, exponent)); +} + +#[test] +fn parse_f32_test() { + check_parse_float("", "", 0, 0.0_f32); + check_parse_float("1", "2345", 0, 1.2345_f32); + check_parse_float("12", "345", 0, 12.345_f32); + check_parse_float("12345", "6789", 0, 12345.6789_f32); + check_parse_float("1", "2345", 10, 1.2345e10_f32); + check_parse_float("1", "2345", -38, 1.2345e-38_f32); + + // Check expected rounding, using borderline cases. + // Round-down, halfway + check_parse_float("16777216", "", 0, 16777216.0_f32); + check_parse_float("16777217", "", 0, 16777216.0_f32); + check_parse_float("16777218", "", 0, 16777218.0_f32); + check_parse_float("33554432", "", 0, 33554432.0_f32); + check_parse_float("33554434", "", 0, 33554432.0_f32); + check_parse_float("33554436", "", 0, 33554436.0_f32); + check_parse_float("17179869184", "", 0, 17179869184.0_f32); + check_parse_float("17179870208", "", 0, 17179869184.0_f32); + check_parse_float("17179871232", "", 0, 17179871232.0_f32); + + // Round-up, halfway + check_parse_float("16777218", "", 0, 16777218.0_f32); + check_parse_float("16777219", "", 0, 16777220.0_f32); + check_parse_float("16777220", "", 0, 16777220.0_f32); + + check_parse_float("33554436", "", 0, 33554436.0_f32); + check_parse_float("33554438", "", 0, 33554440.0_f32); + check_parse_float("33554440", "", 0, 33554440.0_f32); + + check_parse_float("17179871232", "", 0, 17179871232.0_f32); + check_parse_float("17179872256", "", 0, 17179873280.0_f32); + check_parse_float("17179873280", "", 0, 17179873280.0_f32); + + // Round-up, above halfway + check_parse_float("33554435", "", 0, 33554436.0_f32); + check_parse_float("17179870209", "", 0, 17179871232.0_f32); + + // Check exactly halfway, round-up at halfway + check_parse_float("1", "00000017881393432617187499", 0, 1.0000001_f32); + check_parse_float("1", "000000178813934326171875", 0, 1.0000002_f32); + check_parse_float("1", "00000017881393432617187501", 0, 1.0000002_f32); + + check_parse_float("", "000000000000000000000000000000000000011754943508222875079687365372222456778186655567720875215087517062784172594547271728515625", 0, 1.1754943508222875e-38f32); +} + +#[test] +fn parse_f64_test() { + check_parse_float("", "", 0, 0.0_f64); + check_parse_float("1", "2345", 0, 1.2345_f64); + check_parse_float("12", "345", 0, 12.345_f64); + check_parse_float("12345", "6789", 0, 12345.6789_f64); + check_parse_float("1", "2345", 10, 1.2345e10_f64); + check_parse_float("1", "2345", -308, 1.2345e-308_f64); + + // Check expected rounding, using borderline cases. + // Round-down, halfway + check_parse_float("9007199254740992", "", 0, 9007199254740992.0_f64); + check_parse_float("9007199254740993", "", 0, 9007199254740992.0_f64); + check_parse_float("9007199254740994", "", 0, 9007199254740994.0_f64); + + check_parse_float("18014398509481984", "", 0, 18014398509481984.0_f64); + check_parse_float("18014398509481986", "", 0, 18014398509481984.0_f64); + check_parse_float("18014398509481988", "", 0, 18014398509481988.0_f64); + + check_parse_float("9223372036854775808", "", 0, 9223372036854775808.0_f64); + check_parse_float("9223372036854776832", "", 0, 9223372036854775808.0_f64); + check_parse_float("9223372036854777856", "", 0, 9223372036854777856.0_f64); + + check_parse_float( + "11417981541647679048466287755595961091061972992", + "", + 0, + 11417981541647679048466287755595961091061972992.0_f64, + ); + check_parse_float( + "11417981541647680316116887983825362587765178368", + "", + 0, + 11417981541647679048466287755595961091061972992.0_f64, + ); + check_parse_float( + "11417981541647681583767488212054764084468383744", + "", + 0, + 11417981541647681583767488212054764084468383744.0_f64, + ); + + // Round-up, halfway + check_parse_float("9007199254740994", "", 0, 9007199254740994.0_f64); + check_parse_float("9007199254740995", "", 0, 9007199254740996.0_f64); + check_parse_float("9007199254740996", "", 0, 9007199254740996.0_f64); + + check_parse_float("18014398509481988", "", 0, 18014398509481988.0_f64); + check_parse_float("18014398509481990", "", 0, 18014398509481992.0_f64); + check_parse_float("18014398509481992", "", 0, 18014398509481992.0_f64); + + check_parse_float("9223372036854777856", "", 0, 9223372036854777856.0_f64); + check_parse_float("9223372036854778880", "", 0, 9223372036854779904.0_f64); + check_parse_float("9223372036854779904", "", 0, 9223372036854779904.0_f64); + + check_parse_float( + "11417981541647681583767488212054764084468383744", + "", + 0, + 11417981541647681583767488212054764084468383744.0_f64, + ); + check_parse_float( + "11417981541647682851418088440284165581171589120", + "", + 0, + 11417981541647684119068688668513567077874794496.0_f64, + ); + check_parse_float( + "11417981541647684119068688668513567077874794496", + "", + 0, + 11417981541647684119068688668513567077874794496.0_f64, + ); + + // Round-up, above halfway + check_parse_float("9223372036854776833", "", 0, 9223372036854777856.0_f64); + check_parse_float( + "11417981541647680316116887983825362587765178369", + "", + 0, + 11417981541647681583767488212054764084468383744.0_f64, + ); + + // Rounding error + // Adapted from failures in strtod. + check_parse_float("2", "2250738585072014", -308, 2.2250738585072014e-308_f64); + check_parse_float("2", "2250738585072011360574097967091319759348195463516456480234261097248222220210769455165295239081350879141491589130396211068700864386945946455276572074078206217433799881410632673292535522868813721490129811224514518898490572223072852551331557550159143974763979834118019993239625482890171070818506906306666559949382757725720157630626906633326475653000092458883164330377797918696120494973903778297049050510806099407302629371289589500035837999672072543043602840788957717961509455167482434710307026091446215722898802581825451803257070188608721131280795122334262883686223215037756666225039825343359745688844239002654981983854879482922068947216898310996983658468140228542433306603398508864458040010349339704275671864433837704860378616227717385456230658746790140867233276367187499", -308, 2.225073858507201e-308_f64); + check_parse_float("2", "22507385850720113605740979670913197593481954635164564802342610972482222202107694551652952390813508791414915891303962110687008643869459464552765720740782062174337998814106326732925355228688137214901298112245145188984905722230728525513315575501591439747639798341180199932396254828901710708185069063066665599493827577257201576306269066333264756530000924588831643303777979186961204949739037782970490505108060994073026293712895895000358379996720725430436028407889577179615094551674824347103070260914462157228988025818254518032570701886087211312807951223342628836862232150377566662250398253433597456888442390026549819838548794829220689472168983109969836584681402285424333066033985088644580400103493397042756718644338377048603786162277173854562306587467901408672332763671875", -308, 2.2250738585072014e-308_f64); + check_parse_float("2", "2250738585072011360574097967091319759348195463516456480234261097248222220210769455165295239081350879141491589130396211068700864386945946455276572074078206217433799881410632673292535522868813721490129811224514518898490572223072852551331557550159143974763979834118019993239625482890171070818506906306666559949382757725720157630626906633326475653000092458883164330377797918696120494973903778297049050510806099407302629371289589500035837999672072543043602840788957717961509455167482434710307026091446215722898802581825451803257070188608721131280795122334262883686223215037756666225039825343359745688844239002654981983854879482922068947216898310996983658468140228542433306603398508864458040010349339704275671864433837704860378616227717385456230658746790140867233276367187501", -308, 2.2250738585072014e-308_f64); + check_parse_float("179769313486231580793728971405303415079934132710037826936173778980444968292764750946649017977587207096330286416692887910946555547851940402630657488671505820681908902000708383676273854845817711531764475730270069855571366959622842914819860834936475292719074168444365510704342711559699508093042880177904174497791", "9999999999999999999999999999999999999999999999999999999999999999999999", 0, 1.7976931348623157e+308_f64); + check_parse_float("7", "4109846876186981626485318930233205854758970392148714663837852375101326090531312779794975454245398856969484704316857659638998506553390969459816219401617281718945106978546710679176872575177347315553307795408549809608457500958111373034747658096871009590975442271004757307809711118935784838675653998783503015228055934046593739791790738723868299395818481660169122019456499931289798411362062484498678713572180352209017023903285791732520220528974020802906854021606612375549983402671300035812486479041385743401875520901590172592547146296175134159774938718574737870961645638908718119841271673056017045493004705269590165763776884908267986972573366521765567941072508764337560846003984904972149117463085539556354188641513168478436313080237596295773983001708984374999", -324, 5.0e-324_f64); + check_parse_float("7", "4109846876186981626485318930233205854758970392148714663837852375101326090531312779794975454245398856969484704316857659638998506553390969459816219401617281718945106978546710679176872575177347315553307795408549809608457500958111373034747658096871009590975442271004757307809711118935784838675653998783503015228055934046593739791790738723868299395818481660169122019456499931289798411362062484498678713572180352209017023903285791732520220528974020802906854021606612375549983402671300035812486479041385743401875520901590172592547146296175134159774938718574737870961645638908718119841271673056017045493004705269590165763776884908267986972573366521765567941072508764337560846003984904972149117463085539556354188641513168478436313080237596295773983001708984375", -324, 1.0e-323_f64); + check_parse_float("7", "4109846876186981626485318930233205854758970392148714663837852375101326090531312779794975454245398856969484704316857659638998506553390969459816219401617281718945106978546710679176872575177347315553307795408549809608457500958111373034747658096871009590975442271004757307809711118935784838675653998783503015228055934046593739791790738723868299395818481660169122019456499931289798411362062484498678713572180352209017023903285791732520220528974020802906854021606612375549983402671300035812486479041385743401875520901590172592547146296175134159774938718574737870961645638908718119841271673056017045493004705269590165763776884908267986972573366521765567941072508764337560846003984904972149117463085539556354188641513168478436313080237596295773983001708984375001", -324, 1.0e-323_f64); + check_parse_float("", "0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000024703282292062327208828439643411068618252990130716238221279284125033775363510437593264991818081799618989828234772285886546332835517796989819938739800539093906315035659515570226392290858392449105184435931802849936536152500319370457678249219365623669863658480757001585769269903706311928279558551332927834338409351978015531246597263579574622766465272827220056374006485499977096599470454020828166226237857393450736339007967761930577506740176324673600968951340535537458516661134223766678604162159680461914467291840300530057530849048765391711386591646239524912623653881879636239373280423891018672348497668235089863388587925628302755995657524455507255189313690836254779186948667994968324049705821028513185451396213837722826145437693412532098591327667236328125", 0, 0.0_f64); + + // Rounding error + // Adapted from: + // https://www.exploringbinary.com/how-glibc-strtod-works/ + check_parse_float("", "000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000022250738585072008890245868760858598876504231122409594654935248025624400092282356951787758888037591552642309780950434312085877387158357291821993020294379224223559819827501242041788969571311791082261043971979604000454897391938079198936081525613113376149842043271751033627391549782731594143828136275113838604094249464942286316695429105080201815926642134996606517803095075913058719846423906068637102005108723282784678843631944515866135041223479014792369585208321597621066375401613736583044193603714778355306682834535634005074073040135602968046375918583163124224521599262546494300836851861719422417646455137135420132217031370496583210154654068035397417906022589503023501937519773030945763173210852507299305089761582519159720757232455434770912461317493580281734466552734375", 0, 2.2250738585072011e-308_f64); + + // Rounding error + // Adapted from test-parse-random failures. + check_parse_float("1009", "", -31, 1.009e-28_f64); + check_parse_float("18294", "", 304, f64::INFINITY); + + // Rounding error + // Adapted from a @dangrabcad's issue #20. + check_parse_float("7", "689539722041643", 164, 7.689539722041643e164_f64); + check_parse_float("768953972204164300000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", "", 0, 7.689539722041643e164_f64); + check_parse_float("768953972204164300000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", "0", 0, 7.689539722041643e164_f64); + + // Check other cases similar to @dangrabcad's issue #20. + check_parse_float("9223372036854776833", "0", 0, 9223372036854777856.0_f64); + check_parse_float( + "11417981541647680316116887983825362587765178369", + "0", + 0, + 11417981541647681583767488212054764084468383744.0_f64, + ); + check_parse_float("9007199254740995", "0", 0, 9007199254740996.0_f64); + check_parse_float("18014398509481990", "0", 0, 18014398509481992.0_f64); + check_parse_float("9223372036854778880", "0", 0, 9223372036854779904.0_f64); + check_parse_float( + "11417981541647682851418088440284165581171589120", + "0", + 0, + 11417981541647684119068688668513567077874794496.0_f64, + ); + + // Check other cases ostensibly identified via proptest. + check_parse_float("71610528364411830000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", "0", 0, 71610528364411830000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000.0_f64); + check_parse_float("126769393745745060000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", "0", 0, 126769393745745060000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000.0_f64); + check_parse_float("38652960461239320000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", "0", 0, 38652960461239320000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000.0_f64); +} diff --git a/third_party/rust/minimal-lexical/tests/rounding_tests.rs b/third_party/rust/minimal-lexical/tests/rounding_tests.rs new file mode 100644 index 0000000000..794d696fb8 --- /dev/null +++ b/third_party/rust/minimal-lexical/tests/rounding_tests.rs @@ -0,0 +1,64 @@ +use minimal_lexical::extended_float::ExtendedFloat; +use minimal_lexical::rounding; + +#[test] +fn round_test() { + let mut fp = ExtendedFloat { + mant: 9223372036854776832, + exp: -10, + }; + rounding::round::(&mut fp, |f, s| { + f.mant >>= s; + f.exp += s; + }); + assert_eq!(fp.mant, 0); + assert_eq!(fp.exp, 1); + + let mut fp = ExtendedFloat { + mant: 9223372036854776832, + exp: -10, + }; + rounding::round::(&mut fp, |f, s| { + f.mant >>= s; + f.exp += s; + // Round-up. + f.mant += 1; + }); + assert_eq!(fp.mant, 1); + assert_eq!(fp.exp, 1); + + // Round-down + let mut fp = ExtendedFloat { + mant: 9223372036854776832, + exp: -10, + }; + rounding::round::(&mut fp, |f, s| { + rounding::round_nearest_tie_even(f, s, |is_odd, is_halfway, is_above| { + is_above || (is_odd && is_halfway) + }); + }); + assert_eq!(fp.mant, 0); + assert_eq!(fp.exp, 1); + + // Round up + let mut fp = ExtendedFloat { + mant: 9223372036854778880, + exp: -10, + }; + rounding::round::(&mut fp, |f, s| { + rounding::round_nearest_tie_even(f, s, |is_odd, is_halfway, is_above| { + is_above || (is_odd && is_halfway) + }); + }); + assert_eq!(fp.mant, 2); + assert_eq!(fp.exp, 1); + + // Round down + let mut fp = ExtendedFloat { + mant: 9223372036854778880, + exp: -10, + }; + rounding::round::(&mut fp, rounding::round_down); + assert_eq!(fp.mant, 1); + assert_eq!(fp.exp, 1); +} diff --git a/third_party/rust/minimal-lexical/tests/slow_tests.rs b/third_party/rust/minimal-lexical/tests/slow_tests.rs new file mode 100644 index 0000000000..2afea69e90 --- /dev/null +++ b/third_party/rust/minimal-lexical/tests/slow_tests.rs @@ -0,0 +1,337 @@ +mod stackvec; + +use minimal_lexical::bigint::Bigint; +use minimal_lexical::extended_float::ExtendedFloat; +use minimal_lexical::num::Float; +use minimal_lexical::number::Number; +use minimal_lexical::slow; +use stackvec::vec_from_u32; + +fn b(float: F) -> (u64, i32) { + let fp = slow::b(float); + (fp.mant, fp.exp) +} + +fn bh(float: F) -> (u64, i32) { + let fp = slow::bh(float); + (fp.mant, fp.exp) +} + +#[test] +fn b_test() { + assert_eq!(b(1e-45_f32), (1, -149)); + assert_eq!(b(5e-324_f64), (1, -1074)); + assert_eq!(b(1e-323_f64), (2, -1074)); + assert_eq!(b(2e-323_f64), (4, -1074)); + assert_eq!(b(3e-323_f64), (6, -1074)); + assert_eq!(b(4e-323_f64), (8, -1074)); + assert_eq!(b(5e-323_f64), (10, -1074)); + assert_eq!(b(6e-323_f64), (12, -1074)); + assert_eq!(b(7e-323_f64), (14, -1074)); + assert_eq!(b(8e-323_f64), (16, -1074)); + assert_eq!(b(9e-323_f64), (18, -1074)); + assert_eq!(b(1_f32), (8388608, -23)); + assert_eq!(b(1_f64), (4503599627370496, -52)); + assert_eq!(b(1e38_f32), (9860761, 103)); + assert_eq!(b(1e308_f64), (5010420900022432, 971)); +} + +#[test] +fn bh_test() { + assert_eq!(bh(1e-45_f32), (3, -150)); + assert_eq!(bh(5e-324_f64), (3, -1075)); + assert_eq!(bh(1_f32), (16777217, -24)); + assert_eq!(bh(1_f64), (9007199254740993, -53)); + assert_eq!(bh(1e38_f32), (19721523, 102)); + assert_eq!(bh(1e308_f64), (10020841800044865, 970)); +} + +#[test] +fn slow_test() { + // 5e-324, round-down. + let integer = b"2"; + let fraction = b"4703282292062327208828439643411068618252990130716238221279284125033775363510437593264991818081799618989828234772285886546332835517796989819938739800539093906315035659515570226392290858392449105184435931802849936536152500319370457678249219365623669863658480757001585769269903706311928279558551332927834338409351978015531246597263579574622766465272827220056374006485499977096599470454020828166226237857393450736339007967761930577506740176324673600968951340535537458516661134223766678604162159680461914467291840300530057530849048765391711386591646239524912623653881879636239373280423891018672348497668235089863388587925628302755995657524455507255189313690836254779186948667994968324049705821028513185451396213837722826145437693412532098591327667236328124999"; + let num = Number { + mantissa: 2470328229206232720, + exponent: -342, + many_digits: true, + }; + let fp = ExtendedFloat { + mant: 1 << 63, + exp: -63, + }; + let result = slow::slow::(num.clone(), fp, integer.iter(), fraction.iter()); + assert_eq!(result.mant, 0); + assert_eq!(result.exp, 0); + + // 5e-324, round-up. + let fraction = b"47032822920623272088284396434110686182529901307162382212792841250337753635104375932649918180817996189898282347722858865463328355177969898199387398005390939063150356595155702263922908583924491051844359318028499365361525003193704576782492193656236698636584807570015857692699037063119282795585513329278343384093519780155312465972635795746227664652728272200563740064854999770965994704540208281662262378573934507363390079677619305775067401763246736009689513405355374585166611342237666786041621596804619144672918403005300575308490487653917113865916462395249126236538818796362393732804238910186723484976682350898633885879256283027559956575244555072551893136908362547791869486679949683240497058210285131854513962138377228261454376934125320985913276672363281251"; + let result = slow::slow::(num.clone(), fp, integer.iter(), fraction.iter()); + assert_eq!(result.mant, 1); + assert_eq!(result.exp, 0); + + // 8.98846567431158e+307 + let integer = b"8"; + let fraction = b"9884656743115805365666807213050294962762414131308158973971342756154045415486693752413698006024096935349884403114202125541629105369684531108613657287705365884742938136589844238179474556051429647415148697857438797685859063890851407391008830874765563025951597582513936655578157348020066364210154316532161708032"; + let num = Number { + mantissa: 8988465674311580536, + exponent: 289, + many_digits: true, + }; + let fp = ExtendedFloat { + mant: 9223372036854776832, + exp: 2035, + }; + let result = slow::slow::(num.clone(), fp, integer.iter(), fraction.iter()); + assert_eq!(result.mant, 0); + assert_eq!(result.exp, 2046); + + // 8.988465674311582e+307 + let fraction = b"98846567431158053656668072130502949627624141313081589739713427561540454154866937524136980060240969353498844031142021255416291053696845311086136572877053658847429381365898442381794745560514296474151486978574387976858590638908514073910088308747655630259515975825139366555781573480200663642101543165321617080321"; + let result = slow::slow::(num.clone(), fp, integer.iter(), fraction.iter()); + assert_eq!(result.mant, 1); + assert_eq!(result.exp, 2046); +} + +#[test] +fn positive_digit_comp_test() { + // 8.98846567431158e+307 + let bigmant = Bigint { + data: vec_from_u32(&[ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 1024, 2147483648, + ]), + }; + let exponent = 307 + 1 - 308; + let result = slow::positive_digit_comp::(bigmant, exponent); + assert_eq!(result.mant, 0); + assert_eq!(result.exp, 2046); + + // 8.988465674311582e+307 + let bigmant = Bigint { + data: vec_from_u32(&[ + 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 1024, 2147483648, + ]), + }; + let exponent = 307 + 1 - 308; + let result = slow::positive_digit_comp::(bigmant, exponent); + assert_eq!(result.mant, 1); + assert_eq!(result.exp, 2046); +} + +#[test] +fn negative_digit_comp_test() { + // 5e-324, below halfway, round-down to 0.0. + let bigmant = Bigint { + data: vec_from_u32(&[ + 1727738439, 330069557, 3509095598, 686205316, 156923684, 750687444, 2688855918, + 28211928, 1887482096, 3222998811, 913348873, 1652282845, 1600735541, 1664240266, + 84454144, 1487769792, 1855966778, 2832488299, 507030148, 1410055467, 2513359584, + 3453963205, 779237894, 3456088326, 3671009895, 3094451696, 1250165638, 2682979794, + 357925323, 1713890438, 3271046672, 3485897285, 3934710962, 1813530592, 199705026, + 976390839, 2805488572, 2194288220, 2094065006, 2592523639, 3798974617, 586957244, + 1409218821, 3442050171, 3789534764, 1380190380, 2055222457, 3535299831, 429482276, + 389342206, 133558576, 721875297, 3013586570, 540178306, 2389746866, 2313334501, + 422440635, 1288499129, 864978311, 842263325, 3016323856, 2282442263, 1440906063, + 3931458696, 3511314276, 1884879882, 946366824, 4260548261, 1073379659, 1732329252, + 3828972211, 1915607049, 3665440937, 1844358779, 3735281178, 2646335050, 1457460927, + 2940016422, 1051, + ]), + }; + let fp = ExtendedFloat { + mant: 1 << 63, + exp: -63, + }; + let exponent = -324 + 1 - 755; + let result = slow::negative_digit_comp::(bigmant, fp, exponent); + assert_eq!(result.mant, 0); + assert_eq!(result.exp, 0); + + // 5e-324, halfway, round-down to 0.0. + let bigmant = Bigint { + data: vec_from_u32(&[ + 2084786877, 507136210, 2666388819, 3110242527, 3178432722, 541916566, 208847286, + 3092404665, 83491860, 2893735989, 3973758097, 2600107496, 147629623, 1754010897, + 4226332273, 2587058081, 942453804, 88731834, 1319061990, 173208747, 1982493283, + 3808794987, 3874839738, 1854586992, 3508364323, 2021729080, 1899625710, 2420749567, + 816401711, 3059730605, 1570934109, 3138812023, 1756281367, 3205859133, 2985201975, + 1014588672, 3799556578, 577719905, 4052248225, 3649019757, 398935965, 56421532, + 976366795, 1876047791, 3147705595, 4025764546, 1097271882, 1910500779, 2397021233, + 1340419138, 2753207595, 3067328524, 2210626776, 1280440432, 3940874757, 4172726578, + 1035509558, 1062145421, 1465448826, 2990139501, 1785427751, 2093931515, 4055890033, + 3388365687, 2245484242, 3609657408, 3527114516, 1013577862, 2389075196, 426934091, + 3237939346, 1071362463, 4070999470, 250952461, 2280067948, 1097862995, 2226250520, + 221983348, 1, + ]), + }; + let exponent = -324 + 1 - 752; + let result = slow::negative_digit_comp::(bigmant, fp, exponent); + assert_eq!(result.mant, 0); + assert_eq!(result.exp, 0); + + // 5e-324, above halfway, round-up to 5e-324. + let bigmant = Bigint { + data: vec_from_u32(&[ + 3667999587, 776394808, 894084415, 1037654204, 1719556155, 1124198371, 2088472861, + 859275578, 834918607, 3167556114, 1082875312, 231271193, 1476296236, 360239786, + 3608617070, 100777043, 834603454, 887318342, 305718012, 1732087473, 2645063646, + 3728211506, 93691724, 1366000745, 723904866, 3037421624, 1816387920, 2732659194, + 3869049819, 532534979, 2824439209, 1323349161, 382944493, 1993820262, 4082215981, + 1555952134, 3635827414, 1482231762, 1867776587, 2130459211, 3989359658, 564215320, + 1173733358, 1580608728, 1412284882, 1602939803, 2382784237, 1925138608, 2495375854, + 519289497, 1762272177, 608514174, 631431287, 4214469733, 754041908, 3072560125, + 1765160997, 2031519620, 1769586374, 4131591237, 674408332, 3759445970, 1904194670, + 3818885807, 980005947, 1736835717, 911406800, 1545844036, 2415915482, 4269340915, + 2314622388, 2123690045, 2055289038, 2509524619, 1325843000, 2388695363, 787668722, + 2219833485, 10, + ]), + }; + let exponent = -324 + 1 - 753; + let result = slow::negative_digit_comp::(bigmant, fp, exponent); + assert_eq!(result.mant, 1); + assert_eq!(result.exp, 0); + + // 1e-323, below halfway, round-down to 5e-324. + let bigmant = Bigint { + data: vec_from_u32(&[ + 888248023, 990208672, 1937352202, 2058615950, 470771052, 2252062332, 3771600458, + 84635785, 1367478992, 1079061842, 2740046621, 661881239, 507239328, 697753503, + 253362433, 168342080, 1272933039, 4202497602, 1521090445, 4230166401, 3245111456, + 1771955024, 2337713684, 1778330386, 2423095095, 693420498, 3750496916, 3753972086, + 1073775970, 846704018, 1223205425, 1867757265, 3214198296, 1145624482, 599115079, + 2929172517, 4121498420, 2287897365, 1987227723, 3482603622, 2806989260, 1760871734, + 4227656463, 1736215921, 2778669702, 4140571142, 1870700075, 2015964902, 1288446830, + 1168026618, 400675728, 2165625891, 450825118, 1620534920, 2874273302, 2645036208, + 1267321906, 3865497387, 2594934933, 2526789975, 459036976, 2552359495, 27750894, + 3204441497, 1944008238, 1359672352, 2839100473, 4191710191, 3220138979, 902020460, + 2896982042, 1451853853, 2406388220, 1238109043, 2615908943, 3644037856, 77415486, + 230114675, 3155, + ]), + }; + let fp = ExtendedFloat { + mant: 1 << 63, + exp: -62, + }; + let exponent = -324 + 1 - 755; + let result = slow::negative_digit_comp::(bigmant, fp, exponent); + assert_eq!(result.mant, 1); + assert_eq!(result.exp, 0); + + // 1e-323, halfway, round-up to 1e-323. + let bigmant = Bigint { + data: vec_from_u32(&[ + 1959393335, 1521408631, 3704199161, 740792990, 945363576, 1625749700, 626541858, + 687279403, 250475582, 91273375, 3331339701, 3505355194, 442888870, 967065395, + 4089062228, 3466206949, 2827361413, 266195502, 3957185970, 519626241, 1652512553, + 2836450370, 3034584624, 1268793682, 1935158378, 1770219946, 1403909835, 2967281406, + 2449205134, 589257223, 417835033, 826501478, 973876807, 1027642808, 365671335, + 3043766018, 2808735142, 1733159717, 3566810083, 2357124681, 1196807897, 169264596, + 2929100385, 1333176077, 853182194, 3487359048, 3291815648, 1436535041, 2896096404, + 4021257415, 3964655489, 612050981, 2336913034, 3841321297, 3232689679, 3928245144, + 3106528676, 3186436263, 101379182, 380483912, 1061315959, 1986827250, 3577735508, + 1575162471, 2441485432, 2239037633, 1991408958, 3040733588, 2872258292, 1280802274, + 1123883446, 3214087391, 3623063818, 752857385, 2545236548, 3293588986, 2383784264, + 665950045, 3, + ]), + }; + let exponent = -324 + 1 - 752; + let result = slow::negative_digit_comp::(bigmant, fp, exponent); + assert_eq!(result.mant, 2); + assert_eq!(result.exp, 0); + + // 1e-323, above halfway, round-up to 1e-323. + let bigmant = Bigint { + data: vec_from_u32(&[ + 2414064167, 2329184426, 2682253245, 3112962612, 863701169, 3372595114, 1970451287, + 2577826735, 2504755821, 912733750, 3248625938, 693813579, 133921412, 1080719359, + 2235916618, 302331131, 2503810362, 2661955026, 917154036, 901295123, 3640223643, + 2594699927, 281075174, 4098002235, 2171714598, 522330280, 1154196466, 3903010287, + 3017214866, 1597604939, 4178350331, 3970047484, 1148833479, 1686493490, 3656713352, + 372889108, 2317547651, 151727992, 1308362466, 2096410338, 3378144383, 1692645962, + 3521200074, 446858888, 4236854647, 513852113, 2853385416, 1480448529, 3191160267, + 1557868492, 991849235, 1825542523, 1894293861, 4053474607, 2262125726, 627745783, + 1000515697, 1799591565, 1013791827, 3804839120, 2023224998, 2688403318, 1417616716, + 2866722830, 2940017843, 915539855, 2734220401, 342564812, 2952779151, 4218088154, + 2648899870, 2076102840, 1870899819, 3233606562, 3977529001, 2871118793, 2363006167, + 2364533159, 31, + ]), + }; + let exponent = -324 + 1 - 753; + let result = slow::negative_digit_comp::(bigmant, fp, exponent); + assert_eq!(result.mant, 2); + assert_eq!(result.exp, 0); +} + +#[test] +fn parse_mantissa_test() { + let max_digits = f64::MAX_DIGITS; + + // Large number of digits. + let integer = b"2"; + let fraction = b"4703282292062327208828439643411068618252990130716238221279284125033775363510437593264991818081799618989828234772285886546332835517796989819938739800539093906315035659515570226392290858392449105184435931802849936536152500319370457678249219365623669863658480757001585769269903706311928279558551332927834338409351978015531246597263579574622766465272827220056374006485499977096599470454020828166226237857393450736339007967761930577506740176324673600968951340535537458516661134223766678604162159680461914467291840300530057530849048765391711386591646239524912623653881879636239373280423891018672348497668235089863388587925628302755995657524455507255189313690836254779186948667994968324049705821028513185451396213837722826145437693412532098591327667236328124999"; + let (bigmant, count) = slow::parse_mantissa(integer.iter(), fraction.iter(), max_digits); + let expected = vec_from_u32(&[ + 1727738439, 330069557, 3509095598, 686205316, 156923684, 750687444, 2688855918, 28211928, + 1887482096, 3222998811, 913348873, 1652282845, 1600735541, 1664240266, 84454144, + 1487769792, 1855966778, 2832488299, 507030148, 1410055467, 2513359584, 3453963205, + 779237894, 3456088326, 3671009895, 3094451696, 1250165638, 2682979794, 357925323, + 1713890438, 3271046672, 3485897285, 3934710962, 1813530592, 199705026, 976390839, + 2805488572, 2194288220, 2094065006, 2592523639, 3798974617, 586957244, 1409218821, + 3442050171, 3789534764, 1380190380, 2055222457, 3535299831, 429482276, 389342206, + 133558576, 721875297, 3013586570, 540178306, 2389746866, 2313334501, 422440635, 1288499129, + 864978311, 842263325, 3016323856, 2282442263, 1440906063, 3931458696, 3511314276, + 1884879882, 946366824, 4260548261, 1073379659, 1732329252, 3828972211, 1915607049, + 3665440937, 1844358779, 3735281178, 2646335050, 1457460927, 2940016422, 1051, + ]); + assert_eq!(&*bigmant.data, &*expected); + assert_eq!(count, 755); + + // Truncation. + let integer = b"7"; + let fraction = b"4109846876186981626485318930233205854758970392148714663837852375101326090531312779794975454245398856969484704316857659638998506553390969459816219401617281718945106978546710679176872575177347315553307795408549809608457500958111373034747658096871009590975442271004757307809711118935784838675653998783503015228055934046593739791790738723868299395818481660169122019456499931289798411362062484498678713572180352209017023903285791732520220528974020802906854021606612375549983402671300035812486479041385743401875520901590172592547146296175134159774938718574737870961645638908718119841271673056017045493004705269590165763776884908267986972573366521765567941072508764337560846003984904972149117463085539556354188641513168478436313080237596295773983001708984375332669816033062329967789262837"; + let (bigmant, count) = slow::parse_mantissa(integer.iter(), fraction.iter(), max_digits); + let expected = vec_from_u32(&[ + 983641521, 2202462645, 4170685875, 1591772364, 529830014, 803977727, 126733331, 1695971390, + 4089590927, 1532849076, 2705586665, 4046282448, 4076195232, 3230469892, 3059053929, + 79035789, 744229654, 2026438108, 3570486781, 2818088662, 3485839733, 3653138023, + 2857937689, 602717004, 3689362390, 283607819, 1783392475, 2053068939, 1888214698, + 550023429, 296880187, 1046779059, 1285361259, 84614934, 1627922685, 2023868765, 1987523901, + 743493573, 3897769089, 2210613570, 2261081349, 3015057659, 3949711644, 3346092916, + 2433639051, 36411806, 1050442, 269209477, 2649742673, 1494221829, 2763524503, 2514491481, + 2325312415, 1741242814, 2479923579, 1098250122, 2416211509, 3612906464, 403420662, + 3663250314, 1993722098, 365907183, 4270226312, 3962131185, 432952495, 2963635838, + 2996289227, 3200289391, 2753231690, 2780286109, 884373163, 1418533204, 3382415762, + 499541562, 3369625401, 3421327641, 3526770155, 3109983188, 1157439767, 734593155, + ]); + assert_eq!(&*bigmant.data, &*expected); + assert_eq!(count, max_digits + 1); + + // No fraction digits. + let integer = b"74109846876186981626485318930233205854758970392148714663837852375101326090531312779794975454245398856969484704316857659638998506553390969459816219401617281718945106978546710679176872575177347315553307795408549809608457500958111373034747658096871009590975442271004757307809711118935784838675653998783503015228055934046593739791790738723868299395818481660169122019456499931289798411362062484498678713572180352209017023903285791732520220528974020802906854021606612375549983402671300035812486479041385743401875520901590172592547146296175134159774938718574737870961645638908718119841271673056017045493004705269590165763776884908267986972573366521765567941072508764337560846003984904972149117463085539556354188641513168478436313080237596295773983001708984375332669816033062329967789262837"; + let fraction = b""; + let (bigmant, count) = slow::parse_mantissa(integer.iter(), fraction.iter(), max_digits); + assert_eq!(&*bigmant.data, &*expected); + assert_eq!(count, max_digits + 1); + + // Multiple of step (check we add our temporary correctly). + let integer = b"7410984687618698162648531893023320585475897039214871466383785237510132609053131277979497545424539885696948470431685765963899850655339096945981621940161728171894510697854671067917687257517734731555330779540854980960845750095811137303474765809687100959097544227100475730780971111893578483867565399878350301522805593404659373979179073872386829939581848166016912201945649993128979841136206248449867871357218035220901702390328579173252022052897402080290685402160661237554998340267130003581248647904138574340187552090159017259254714629617513415977493871857473787096164563890871811984127167305601704549300470526959016576377688490826798697257336652176556794107250876433756084600398490497214911746308553955635418864151316847843631308023759629577398300170898437533266981"; + let fraction = b""; + let (bigmant, count) = slow::parse_mantissa(integer.iter(), fraction.iter(), max_digits); + let expected = vec_from_u32(&[ + 617018405, 396211401, 2130402383, 3812547827, 4263683770, 3918012496, 1787721490, + 2493014694, 435464626, 3720854431, 2928509507, 2677932436, 369049650, 3606588290, + 231237141, 2231172875, 3358152367, 95217925, 2777810007, 1016185079, 596681915, 2331711780, + 593487272, 4212730845, 339602972, 4097829793, 262427536, 4182115035, 3414687403, + 3711518952, 4168896929, 483727327, 1657080031, 2785588628, 1009114769, 482126749, + 485376744, 1123705337, 3225501941, 2939050108, 1338451005, 2104263947, 3425461126, + 1834224928, 4061025704, 792093815, 2707019125, 3610271203, 4254101529, 1026215278, + 4117890107, 1748110416, 2535111606, 80965120, 3823822115, 2354910057, 590658512, + 2682089507, 159300272, 1776569442, 3382166479, 3222978591, 540586210, 934713382, + 2014123057, 1455555790, 4119131465, 3685912982, 3019947291, 3437891678, 2660105801, + 2605860762, 394373515, 4177081532, 1616198650, 1580399082, 2017617452, 3327697130, + 315505357, + ]); + assert_eq!(&*bigmant.data, &*expected); + assert_eq!(count, 760); +} diff --git a/third_party/rust/minimal-lexical/tests/stackvec.rs b/third_party/rust/minimal-lexical/tests/stackvec.rs new file mode 100644 index 0000000000..d5587f23f8 --- /dev/null +++ b/third_party/rust/minimal-lexical/tests/stackvec.rs @@ -0,0 +1,32 @@ +use minimal_lexical::bigint; +#[cfg(feature = "alloc")] +pub use minimal_lexical::heapvec::HeapVec as VecType; +#[cfg(not(feature = "alloc"))] +pub use minimal_lexical::stackvec::StackVec as VecType; + +pub fn vec_from_u32(x: &[u32]) -> VecType { + let mut vec = VecType::new(); + #[cfg(not(all(target_pointer_width = "64", not(target_arch = "sparc"))))] + { + for &xi in x { + vec.try_push(xi as bigint::Limb).unwrap(); + } + } + + #[cfg(all(target_pointer_width = "64", not(target_arch = "sparc")))] + { + for xi in x.chunks(2) { + match xi.len() { + 1 => vec.try_push(xi[0] as bigint::Limb).unwrap(), + 2 => { + let xi0 = xi[0] as bigint::Limb; + let xi1 = xi[1] as bigint::Limb; + vec.try_push((xi1 << 32) | xi0).unwrap() + }, + _ => unreachable!(), + } + } + } + + vec +} diff --git a/third_party/rust/minimal-lexical/tests/vec_tests.rs b/third_party/rust/minimal-lexical/tests/vec_tests.rs new file mode 100644 index 0000000000..3a5f5886ad --- /dev/null +++ b/third_party/rust/minimal-lexical/tests/vec_tests.rs @@ -0,0 +1,395 @@ +mod stackvec; + +use core::cmp; +use minimal_lexical::bigint; +use stackvec::{vec_from_u32, VecType}; + +// u64::MAX and Limb::MAX for older Rustc versions. +const U64_MAX: u64 = 0xffff_ffff_ffff_ffff; +// LIMB_MAX +#[cfg(all(target_pointer_width = "64", not(target_arch = "sparc")))] +const LIMB_MAX: u64 = U64_MAX; +#[cfg(not(all(target_pointer_width = "64", not(target_arch = "sparc"))))] +const LIMB_MAX: u32 = 0xffff_ffff; + +#[test] +fn simple_test() { + // Test the simple properties of the stack vector. + let mut x = VecType::from_u64(1); + assert_eq!(x.len(), 1); + assert_eq!(x.is_empty(), false); + assert_eq!(x.capacity(), bigint::BIGINT_LIMBS); + x.try_push(5).unwrap(); + assert_eq!(x.len(), 2); + assert_eq!(x.pop(), Some(5)); + assert_eq!(x.len(), 1); + assert_eq!(&*x, &[1]); + x.try_extend(&[2, 3, 4]).unwrap(); + assert_eq!(x.len(), 4); + assert_eq!(&*x, &[1, 2, 3, 4]); + x.try_resize(6, 0).unwrap(); + assert_eq!(x.len(), 6); + assert_eq!(&*x, &[1, 2, 3, 4, 0, 0]); + x.try_resize(0, 0).unwrap(); + assert_eq!(x.len(), 0); + assert_eq!(x.is_empty(), true); + + let x = VecType::try_from(&[5, 1]).unwrap(); + assert_eq!(x.len(), 2); + assert_eq!(x.is_empty(), false); + if bigint::LIMB_BITS == 32 { + assert_eq!(x.hi64(), (0x8000000280000000, false)); + } else { + assert_eq!(x.hi64(), (0x8000000000000002, true)); + } + let rview = bigint::rview(&x); + assert_eq!(x[0], 5); + assert_eq!(x[1], 1); + assert_eq!(rview[0], 1); + assert_eq!(rview[1], 5); + assert_eq!(x.len(), 2); + + assert_eq!(VecType::from_u64(U64_MAX).hi64(), (U64_MAX, false)); +} + +#[test] +fn hi64_test() { + assert_eq!(VecType::from_u64(0xA).hi64(), (0xA000000000000000, false)); + assert_eq!(VecType::from_u64(0xAB).hi64(), (0xAB00000000000000, false)); + assert_eq!(VecType::from_u64(0xAB00000000).hi64(), (0xAB00000000000000, false)); + assert_eq!(VecType::from_u64(0xA23456789A).hi64(), (0xA23456789A000000, false)); +} + +#[test] +fn cmp_test() { + // Simple + let x = VecType::from_u64(1); + let y = VecType::from_u64(2); + assert_eq!(x.partial_cmp(&x), Some(cmp::Ordering::Equal)); + assert_eq!(x.cmp(&x), cmp::Ordering::Equal); + assert_eq!(x.cmp(&y), cmp::Ordering::Less); + + // Check asymmetric + let x = VecType::try_from(&[5, 1]).unwrap(); + let y = VecType::from_u64(2); + assert_eq!(x.cmp(&x), cmp::Ordering::Equal); + assert_eq!(x.cmp(&y), cmp::Ordering::Greater); + + // Check when we use reverse ordering properly. + let x = VecType::try_from(&[5, 1, 9]).unwrap(); + let y = VecType::try_from(&[6, 2, 8]).unwrap(); + assert_eq!(x.cmp(&x), cmp::Ordering::Equal); + assert_eq!(x.cmp(&y), cmp::Ordering::Greater); + + // Complex scenario, check it properly uses reverse ordering. + let x = VecType::try_from(&[0, 1, 9]).unwrap(); + let y = VecType::try_from(&[4294967295, 0, 9]).unwrap(); + assert_eq!(x.cmp(&x), cmp::Ordering::Equal); + assert_eq!(x.cmp(&y), cmp::Ordering::Greater); +} + +#[test] +fn math_test() { + let mut x = VecType::try_from(&[0, 1, 9]).unwrap(); + assert_eq!(x.is_normalized(), true); + x.try_push(0).unwrap(); + assert_eq!(&*x, &[0, 1, 9, 0]); + assert_eq!(x.is_normalized(), false); + x.normalize(); + assert_eq!(&*x, &[0, 1, 9]); + assert_eq!(x.is_normalized(), true); + + x.add_small(1); + assert_eq!(&*x, &[1, 1, 9]); + x.add_small(LIMB_MAX); + assert_eq!(&*x, &[0, 2, 9]); + + x.mul_small(3); + assert_eq!(&*x, &[0, 6, 27]); + x.mul_small(LIMB_MAX); + let expected: VecType = if bigint::LIMB_BITS == 32 { + vec_from_u32(&[0, 4294967290, 4294967274, 26]) + } else { + vec_from_u32(&[0, 0, 4294967290, 4294967295, 4294967274, 4294967295, 26]) + }; + assert_eq!(&*x, &*expected); + + let mut x = VecType::from_u64(0xFFFFFFFF); + let y = VecType::from_u64(5); + x *= &y; + let expected: VecType = vec_from_u32(&[0xFFFFFFFB, 0x4]); + assert_eq!(&*x, &*expected); + + // Test with carry + let mut x = VecType::from_u64(1); + assert_eq!(&*x, &[1]); + x.add_small(LIMB_MAX); + assert_eq!(&*x, &[0, 1]); +} + +#[test] +fn scalar_add_test() { + assert_eq!(bigint::scalar_add(5, 5), (10, false)); + assert_eq!(bigint::scalar_add(LIMB_MAX, 1), (0, true)); +} + +#[test] +fn scalar_mul_test() { + assert_eq!(bigint::scalar_mul(5, 5, 0), (25, 0)); + assert_eq!(bigint::scalar_mul(5, 5, 1), (26, 0)); + assert_eq!(bigint::scalar_mul(LIMB_MAX, 2, 0), (LIMB_MAX - 1, 1)); +} + +#[test] +fn small_add_test() { + let mut x = VecType::from_u64(4294967295); + bigint::small_add(&mut x, 5); + let expected: VecType = vec_from_u32(&[4, 1]); + assert_eq!(&*x, &*expected); + + let mut x = VecType::from_u64(5); + bigint::small_add(&mut x, 7); + let expected = VecType::from_u64(12); + assert_eq!(&*x, &*expected); + + // Single carry, internal overflow + let mut x = VecType::from_u64(0x80000000FFFFFFFF); + bigint::small_add(&mut x, 7); + let expected: VecType = vec_from_u32(&[6, 0x80000001]); + assert_eq!(&*x, &*expected); + + // Double carry, overflow + let mut x = VecType::from_u64(0xFFFFFFFFFFFFFFFF); + bigint::small_add(&mut x, 7); + let expected: VecType = vec_from_u32(&[6, 0, 1]); + assert_eq!(&*x, &*expected); +} + +#[test] +fn small_mul_test() { + // No overflow check, 1-int. + let mut x = VecType::from_u64(5); + bigint::small_mul(&mut x, 7); + let expected = VecType::from_u64(35); + assert_eq!(&*x, &*expected); + + // No overflow check, 2-ints. + let mut x = VecType::from_u64(0x4000000040000); + bigint::small_mul(&mut x, 5); + let expected: VecType = vec_from_u32(&[0x00140000, 0x140000]); + assert_eq!(&*x, &*expected); + + // Overflow, 1 carry. + let mut x = VecType::from_u64(0x33333334); + bigint::small_mul(&mut x, 5); + let expected: VecType = vec_from_u32(&[4, 1]); + assert_eq!(&*x, &*expected); + + // Overflow, 1 carry, internal. + let mut x = VecType::from_u64(0x133333334); + bigint::small_mul(&mut x, 5); + let expected: VecType = vec_from_u32(&[4, 6]); + assert_eq!(&*x, &*expected); + + // Overflow, 2 carries. + let mut x = VecType::from_u64(0x3333333333333334); + bigint::small_mul(&mut x, 5); + let expected: VecType = vec_from_u32(&[4, 0, 1]); + assert_eq!(&*x, &*expected); +} + +#[test] +fn pow_test() { + let mut x = VecType::from_u64(1); + bigint::pow(&mut x, 2); + let expected = VecType::from_u64(25); + assert_eq!(&*x, &*expected); + + let mut x = VecType::from_u64(1); + bigint::pow(&mut x, 15); + let expected: VecType = vec_from_u32(&[452807053, 7]); + assert_eq!(&*x, &*expected); + + let mut x = VecType::from_u64(1); + bigint::pow(&mut x, 16); + let expected: VecType = vec_from_u32(&[2264035265, 35]); + assert_eq!(&*x, &*expected); + + let mut x = VecType::from_u64(1); + bigint::pow(&mut x, 17); + let expected: VecType = vec_from_u32(&[2730241733, 177]); + assert_eq!(&*x, &*expected); + + let mut x = VecType::from_u64(1); + bigint::pow(&mut x, 302); + let expected: VecType = vec_from_u32(&[ + 2443090281, 2149694430, 2297493928, 1584384001, 1279504719, 1930002239, 3312868939, + 3735173465, 3523274756, 2025818732, 1641675015, 2431239749, 4292780461, 3719612855, + 4174476133, 3296847770, 2677357556, 638848153, 2198928114, 3285049351, 2159526706, + 626302612, + ]); + assert_eq!(&*x, &*expected); +} + +#[test] +fn large_add_test() { + // Overflow, both single values + let mut x = VecType::from_u64(4294967295); + let y = VecType::from_u64(5); + bigint::large_add(&mut x, &y); + let expected: VecType = vec_from_u32(&[4, 1]); + assert_eq!(&*x, &*expected); + + // No overflow, single value + let mut x = VecType::from_u64(5); + let y = VecType::from_u64(7); + bigint::large_add(&mut x, &y); + let expected = VecType::from_u64(12); + assert_eq!(&*x, &*expected); + + // Single carry, internal overflow + let mut x = VecType::from_u64(0x80000000FFFFFFFF); + let y = VecType::from_u64(7); + bigint::large_add(&mut x, &y); + let expected: VecType = vec_from_u32(&[6, 0x80000001]); + assert_eq!(&*x, &*expected); + + // 1st overflows, 2nd doesn't. + let mut x = VecType::from_u64(0x7FFFFFFFFFFFFFFF); + let y = VecType::from_u64(0x7FFFFFFFFFFFFFFF); + bigint::large_add(&mut x, &y); + let expected: VecType = vec_from_u32(&[0xFFFFFFFE, 0xFFFFFFFF]); + assert_eq!(&*x, &*expected); + + // Both overflow. + let mut x = VecType::from_u64(0x8FFFFFFFFFFFFFFF); + let y = VecType::from_u64(0x7FFFFFFFFFFFFFFF); + bigint::large_add(&mut x, &y); + let expected: VecType = vec_from_u32(&[0xFFFFFFFE, 0x0FFFFFFF, 1]); + assert_eq!(&*x, &*expected); +} + +#[test] +fn large_mul_test() { + // Test by empty + let mut x = VecType::from_u64(0xFFFFFFFF); + let y = VecType::new(); + bigint::large_mul(&mut x, &y); + let expected = VecType::new(); + assert_eq!(&*x, &*expected); + + // Simple case + let mut x = VecType::from_u64(0xFFFFFFFF); + let y = VecType::from_u64(5); + bigint::large_mul(&mut x, &y); + let expected: VecType = vec_from_u32(&[0xFFFFFFFB, 0x4]); + assert_eq!(&*x, &*expected); + + // Large u32, but still just as easy. + let mut x = VecType::from_u64(0xFFFFFFFF); + let y = VecType::from_u64(0xFFFFFFFE); + bigint::large_mul(&mut x, &y); + let expected: VecType = vec_from_u32(&[0x2, 0xFFFFFFFD]); + assert_eq!(&*x, &*expected); + + // Let's multiply two large values together. + let mut x: VecType = vec_from_u32(&[0xFFFFFFFE, 0x0FFFFFFF, 1]); + let y: VecType = vec_from_u32(&[0x99999999, 0x99999999, 0xCCCD9999, 0xCCCC]); + bigint::large_mul(&mut x, &y); + let expected: VecType = + vec_from_u32(&[0xCCCCCCCE, 0x5CCCCCCC, 0x9997FFFF, 0x33319999, 0x999A7333, 0xD999]); + assert_eq!(&*x, &*expected); +} + +#[test] +fn very_large_mul_test() { + // Test cases triggered to that would normally use `karatsuba_mul`. + // Karatsuba multiplication was ripped out, however, these are useful + // test cases. + let mut x: VecType = vec_from_u32(&[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]); + let y: VecType = vec_from_u32(&[4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19]); + bigint::large_mul(&mut x, &y); + let expected: VecType = vec_from_u32(&[ + 4, 13, 28, 50, 80, 119, 168, 228, 300, 385, 484, 598, 728, 875, 1040, 1224, 1340, 1435, + 1508, 1558, 1584, 1585, 1560, 1508, 1428, 1319, 1180, 1010, 808, 573, 304, + ]); + assert_eq!(&*x, &*expected); + + // Test cases triggered to that would normally use `karatsuba_uneven_mul`. + let mut x: VecType = vec_from_u32(&[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]); + let y: VecType = vec_from_u32(&[ + 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, + 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, + ]); + bigint::large_mul(&mut x, &y); + let expected: VecType = vec_from_u32(&[ + 4, 13, 28, 50, 80, 119, 168, 228, 300, 385, 484, 598, 728, 875, 1040, 1224, 1360, 1496, + 1632, 1768, 1904, 2040, 2176, 2312, 2448, 2584, 2720, 2856, 2992, 3128, 3264, 3400, 3536, + 3672, 3770, 3829, 3848, 3826, 3762, 3655, 3504, 3308, 3066, 2777, 2440, 2054, 1618, 1131, + 592, + ]); + assert_eq!(&*x, &*expected); +} + +#[test] +fn bit_length_test() { + let x: VecType = vec_from_u32(&[0, 0, 0, 1]); + assert_eq!(bigint::bit_length(&x), 97); + + let x: VecType = vec_from_u32(&[0, 0, 0, 3]); + assert_eq!(bigint::bit_length(&x), 98); + + let x = VecType::from_u64(1 << 31); + assert_eq!(bigint::bit_length(&x), 32); +} + +#[test] +fn shl_bits_test() { + let mut x = VecType::from_u64(0xD2210408); + bigint::shl_bits(&mut x, 5); + let expected: VecType = vec_from_u32(&[0x44208100, 0x1A]); + assert_eq!(&*x, &*expected); +} + +#[test] +fn shl_limbs_test() { + let mut x = VecType::from_u64(0xD2210408); + bigint::shl_limbs(&mut x, 2); + let expected: VecType = if bigint::LIMB_BITS == 32 { + vec_from_u32(&[0, 0, 0xD2210408]) + } else { + vec_from_u32(&[0, 0, 0, 0, 0xD2210408]) + }; + assert_eq!(&*x, &*expected); +} + +#[test] +fn shl_test() { + // Pattern generated via `''.join(["1" +"0"*i for i in range(20)])` + let mut x = VecType::from_u64(0xD2210408); + bigint::shl(&mut x, 5); + let expected: VecType = vec_from_u32(&[0x44208100, 0x1A]); + assert_eq!(&*x, &*expected); + + bigint::shl(&mut x, 32); + let expected: VecType = vec_from_u32(&[0, 0x44208100, 0x1A]); + assert_eq!(&*x, &*expected); + + bigint::shl(&mut x, 27); + let expected: VecType = vec_from_u32(&[0, 0, 0xD2210408]); + assert_eq!(&*x, &*expected); + + // 96-bits of previous pattern + let mut x: VecType = vec_from_u32(&[0x20020010, 0x8040100, 0xD2210408]); + bigint::shl(&mut x, 5); + let expected: VecType = vec_from_u32(&[0x400200, 0x802004, 0x44208101, 0x1A]); + assert_eq!(&*x, &*expected); + + bigint::shl(&mut x, 32); + let expected: VecType = vec_from_u32(&[0, 0x400200, 0x802004, 0x44208101, 0x1A]); + assert_eq!(&*x, &*expected); + + bigint::shl(&mut x, 27); + let expected: VecType = vec_from_u32(&[0, 0, 0x20020010, 0x8040100, 0xD2210408]); + assert_eq!(&*x, &*expected); +} -- cgit v1.2.3