summaryrefslogtreecommitdiffstats
path: root/vendor/compiler_builtins
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-30 03:57:31 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-30 03:57:31 +0000
commitdc0db358abe19481e475e10c32149b53370f1a1c (patch)
treeab8ce99c4b255ce46f99ef402c27916055b899ee /vendor/compiler_builtins
parentReleasing progress-linux version 1.71.1+dfsg1-2~progress7.99u1. (diff)
downloadrustc-dc0db358abe19481e475e10c32149b53370f1a1c.tar.xz
rustc-dc0db358abe19481e475e10c32149b53370f1a1c.zip
Merging upstream version 1.72.1+dfsg1.
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'vendor/compiler_builtins')
-rw-r--r--vendor/compiler_builtins/.cargo-checksum.json2
-rw-r--r--vendor/compiler_builtins/Cargo.lock2
-rw-r--r--vendor/compiler_builtins/Cargo.toml2
-rw-r--r--vendor/compiler_builtins/README.md4
-rw-r--r--vendor/compiler_builtins/build.rs61
-rw-r--r--vendor/compiler_builtins/examples/intrinsics.rs1
-rw-r--r--vendor/compiler_builtins/src/aarch64_linux.rs277
-rw-r--r--vendor/compiler_builtins/src/float/cmp.rs14
-rw-r--r--vendor/compiler_builtins/src/float/conv.rs2
-rw-r--r--vendor/compiler_builtins/src/float/div.rs857
-rw-r--r--vendor/compiler_builtins/src/lib.rs4
-rw-r--r--vendor/compiler_builtins/src/macros.rs20
-rw-r--r--vendor/compiler_builtins/src/math.rs5
13 files changed, 1032 insertions, 219 deletions
diff --git a/vendor/compiler_builtins/.cargo-checksum.json b/vendor/compiler_builtins/.cargo-checksum.json
index 7f15b6576..82891782c 100644
--- a/vendor/compiler_builtins/.cargo-checksum.json
+++ b/vendor/compiler_builtins/.cargo-checksum.json
@@ -1 +1 @@
-{"files":{"Cargo.lock":"9ecf79f50992d034b2d83daf32ae512b7175916fe9a4b02d0e9b230543cd263c","Cargo.toml":"a5ba02ac8045d9d01708808f8a1d5a74ee82927d2f1c26ada47af280a05da88d","LICENSE.txt":"0e13fed90654e0bc677d624a2d770833a09541fe0c0bdb3d051b3d081207393a","README.md":"5eb36fbab30693dbbe9f0de54749c95bd06fd6e42013b5b9eff3c062b9fdd34f","build.rs":"45ab03c8f5369df7579abb8adc2554a892f415187a384c85832e75abe898d357","examples/intrinsics.rs":"a7aa69c17af3aa8f6edff32c214e80827d3cbe3aea386a2be42244444752d253","libm/src/math/acos.rs":"fb066ba84aba1372d706425ec14f35ff8d971756d15eeebd22ecf42a716493bb","libm/src/math/acosf.rs":"a112b82309bba1d35c4e3d6ad4d6c21ef305343d9ab601ddf4bc61d43bc9f1af","libm/src/math/acosh.rs":"99de01ded7922bb93a882ad5ad8b472b5cae0059dea0bdca2077f65e94483150","libm/src/math/acoshf.rs":"10750c4d39ef6717b20a15ef1ce43e15eb851682d2f820f7e94501adec98b9a5","libm/src/math/asin.rs":"095a1e98996daff45df0b154ca0ec35bbf31db964ee9fdda0207308cb20df441","libm/src/math/asinf.rs":"49cccb4db2881982643a4a7d5453f4f8daf527711bbb67313607a3c178856d61","libm/src/math/asinh.rs":"4dd51affa71cce34a192ad66154e248f8d1c4b40fb497f29052333e425bb740f","libm/src/math/asinhf.rs":"914bfecf449f5e2bce786aa12c056d419073c6011d41c1bab7c39ba765fa4c53","libm/src/math/atan.rs":"d4fe46e1c5739dd09997869dcfbc3c85f03c534af52e700d6c6bcf9c3fedda07","libm/src/math/atan2.rs":"2623bc8ca707d13a7092ce49adf68e9cbf4452ad1bf4a861dc40ca858606a747","libm/src/math/atan2f.rs":"dd01943e0e1f1955912e5c3ffc9467529cf64bd02ac0a6ad5ab31dbe6657f05d","libm/src/math/atanf.rs":"e41b41569474a59c970ede3538e00bda4072cf4d90040017101cc79d7dc28caa","libm/src/math/atanh.rs":"57a8fb3f0f116fa4a966ac6bc2abd5f80236ead8e79013f468bd3786921f7110","libm/src/math/atanhf.rs":"6f2e57aaec1b5fc7609cb3938b3d155f51b4237dbda530739c34a0448cd9beb9","libm/src/math/cbrt.rs":"f2c45612d2eecd93cfcdd9ebf824c754fc8f8dfd6d16862c0b9c4ccea78c2a0f","libm/src/math/cbrtf.rs":"ad0b483854aa9f17a44d36c049bf0e8ebab34c27e90b787c05f45cc230ec7d19","libm/src/math/ceil.rs":"57ba5b6e207a0ccbd34190d1aa544389ca12126be23821dfb5746497f620ce03","libm/src/math/ceilf.rs":"c922a0475a599b9ea5473e615f74700b99707cebd6927f24ea59cb2a3cb3bbc3","libm/src/math/copysign.rs":"8b6440a251f0f1509d87f18122f74d0d5c03d0b60517e89e441434a3c5d84591","libm/src/math/copysignf.rs":"87d35436d224852ada93a2e93f6730cf1a727b808dd10e7d49ab4585866e336b","libm/src/math/cos.rs":"74babdc13ede78e400c5ca1854c3e22d2e08cbdc5618aefa5bba6f9303ef65b6","libm/src/math/cosf.rs":"09c40f93c445b741e22477ceedf163ca33b6a47f973f7c9876cfba2692edb29c","libm/src/math/cosh.rs":"0d0a7cef18577f321996b8b87561963139f754ad7f2ea0a3b3883811f3f0693a","libm/src/math/coshf.rs":"be8ca8739e4cf1978425b349f941cb4838bba8c10cb559c7940b9fd4fdde21ad","libm/src/math/erf.rs":"de69e6669ce1014e5b5086a7a6d01c4755f2f0590e204d2a77bea455764114f7","libm/src/math/erff.rs":"6acdbb07f74296067bb0380b850918cfb5806a89f9ff04352a7a0b921d728944","libm/src/math/exp.rs":"ca7405ad0d1993fffcf9aae96f9256307bed3c4916545aaebd1cf1d2df1807fa","libm/src/math/exp10.rs":"2e136c6ecedd8e57a6c31796f57fae4546fcfd8bc6be66c836f553df9c74b907","libm/src/math/exp10f.rs":"9a3ce506ec587066a355ab74e0eb69a03a214ac405718087ae9772365050b20b","libm/src/math/exp2.rs":"94a9304a2ce3bc81f6d2aefd3cde6faa30f13260d46cb13692863cdea1c9a3a1","libm/src/math/exp2f.rs":"785f2630accd35118ec07bf60273e219ed91a215b956b1552eeea5bc2a708cc8","libm/src/math/expf.rs":"ec14c18f891a9e37735ec39e6fc2e9bf674a2c2e083f22e2533b481177359c98","libm/src/math/expm1.rs":"124069f456c8ad331f265c7509d9e223b2a300e461bbfd3d6adfdcdd2ee5b8ac","libm/src/math/expm1f.rs":"18e2116d31ea8410051cc709b9d04b754b0e3ba6758ee1bf0b48749f4999b840","libm/src/math/expo2.rs":"4f4f9fecfccb43f30c2784aa7c0bb656754a52b8ab431f7d1b551c673ab133f1","libm/src/math/fabs.rs":"e6c7db39f98508098cdf64ac0c2f53866c466149a7490afb9fe22b44c4dd81b3","libm/src/math/fabsf.rs":"83a1f5f4d9ca899ba2b701d7332e18b40258b83e111db4c5d8fab2cc1be58aa3","libm/src/math/fdim.rs":"8ec091996005207297c2389ae563e1b18dbc6a9eac951de29a976c5cd7bc32a7","libm/src/math/fdimf.rs":"c7f3f2269834d55be26b6580ddc07c42531577955fa4de35bad1e2a361085614","libm/src/math/fenv.rs":"916ae11e4763588518d64dee82afb41be9d1ee38ecc0679c821d4e7e22cd3dc5","libm/src/math/floor.rs":"5050804cae173af6775c0678d6c1aafb5ca2b744bc8a2f50d9d03b95dcee1fb0","libm/src/math/floorf.rs":"c903e0c57bc60a888c513eb7a873a87a4759ba68fc791b6b931652f8ee74cc03","libm/src/math/fma.rs":"d87963472cd5bfcb83eb4010c67f3653857cf28f11378e06d63abae14c723e5d","libm/src/math/fmaf.rs":"1db6ee0d47ddbdb441cfe167edf89b431239f5805708fd0376cf5c01349a4bd6","libm/src/math/fmax.rs":"f6c8e96a8b1a170648d2fa3513e7b6b459085d708c839869f82e305fe58fac37","libm/src/math/fmaxf.rs":"dff0025433232e8a5ec7bd54d847ccf596d762ea4e35f5c54fbaac9404d732fd","libm/src/math/fmin.rs":"95b6cb66ca0e0e22276f0bf88dbe8fb69796a69a196a7491bd4802efbcf2e298","libm/src/math/fminf.rs":"304bc839b15ea3d84e68d2af9f40524ec120d30a36a667b22fcb98a6c258f4c7","libm/src/math/fmod.rs":"a1c0550fc7df8164733d914e222ff0966a2ab886d6e75a1098f24fe0283ae227","libm/src/math/fmodf.rs":"ee51ed092c0eeb8195f35735ff725cfd46612e0d689a7c483538bd92fbe61828","libm/src/math/frexp.rs":"28af70026922a8ab979744c7ad4d8faba6079c4743b7eeb6d14c983a982fbbcc","libm/src/math/frexpf.rs":"2e2593ae8002ba420809ebfaf737ef001cdc912354be3d978a8c0cb930350d4d","libm/src/math/hypot.rs":"841131c4a0cea75bc8a86e29f3f6d0815a61fc99731c9984651ce83d3050d218","libm/src/math/hypotf.rs":"5f317323edc2eb699580fe54b074b7e570a7734d51a0a149c0b49b54470a836c","libm/src/math/ilogb.rs":"d178ad7ca3439f82d565962b143f20448e45b2e2c51357b127abaec683297e32","libm/src/math/ilogbf.rs":"00f2b1b0496e21c6a42d68aea74d7156fa2ff0a735741b9051f3ca1cf0f57586","libm/src/math/j0.rs":"9572b6396c489927d332d0e717920e61ec0618e5e9c31f7eeeec70f5e4abab06","libm/src/math/j0f.rs":"802c8254bded9b3afb6eea8b9af240038a5a4a5d811396729f69ca509e3e7d87","libm/src/math/j1.rs":"97b1af1611fa3d110c2b349ee8e4176100132ea1391b619086b47ac063b81803","libm/src/math/j1f.rs":"9c9b128752e8ea2e7d81b637ba84907ab54a545e7602c49167b313743927930b","libm/src/math/jn.rs":"847d122334e5707ad9627146cddccc082a1f2f5bcd3e5ef54399013a7007ce88","libm/src/math/jnf.rs":"4045076f7d1a1b89882ed60d4dd60a4cbbc66b85cfb90491378c8015effcc476","libm/src/math/k_cos.rs":"f34a69e44d6b8901b03b578a75972f438ab20a7b98a0903fc1903d6fde3899be","libm/src/math/k_cosf.rs":"8f7117ff21cebf8e890a5bcfd7ea858a94172f4172b79a66d53824c2cb0888b1","libm/src/math/k_expo2.rs":"eb4ca9e6a525b7ea6da868c3cb136896682cc46f8396ba2a2ebc3ae9e9ba54b0","libm/src/math/k_expo2f.rs":"d51ad5df61cb5d1258bdb90c52bfed4572bb446a9337de9c04411ed9454ae0cb","libm/src/math/k_sin.rs":"14b2aba6ca07150c92768b5a72acaf5cde6a11d6619e14896512a7ba242e289a","libm/src/math/k_sinf.rs":"2775fcc710807164e6f37a4f8da3c8143cd5f16e19ce7c31c5591522151d7a96","libm/src/math/k_tan.rs":"a72beae4ccd9631eeeb61d6365bbeecae81c8411f3120a999c515cca0d5ea5c5","libm/src/math/k_tanf.rs":"6a794be56fa4b2f60452b9bab19af01c388f174560acbf829a351378ea39495d","libm/src/math/ldexp.rs":"b647f0096e80e4d926d8dd18d294c892ee2cb1778effe2c5e1b2664ae5cb1a4e","libm/src/math/ldexpf.rs":"98743fad2cd97a7be496f40ba3157ac1438fce0d0c25d5ab90c3b8c71c3fd0ed","libm/src/math/lgamma.rs":"0edd18e4f96bfcbe8b1b5af3eeca5208cd6d2d479dfa5ad117c9dfeccecf614f","libm/src/math/lgamma_r.rs":"f44a37aeccd56559ef784ae8edf217d14ad5cc2d910f0a65e70ffc86d7dc23dd","libm/src/math/lgammaf.rs":"967845357758b868a571857ec001f9f9154001110b8e97c08b6d10586bed9c49","libm/src/math/lgammaf_r.rs":"7143016d60e11fa235d53968125e57231b1104ce52149b5e1eed39629e0d1ff0","libm/src/math/log.rs":"b5e0c5f30d9e94351488732801be3107c12b854c3f95ad37e256dd88eeca408f","libm/src/math/log10.rs":"3425ff8be001fd1646ba15e254eb6ef4bdc6ccaf0cbee27ddf1fa84e04178b90","libm/src/math/log10f.rs":"fee4f71879bc4c99259e68c0c641364901629fb29a8ebddfcc0d090102cceddd","libm/src/math/log1p.rs":"9cf400852f165e6be19b97036ae9521fb9ca857d0a9a91c117d9123221622185","libm/src/math/log1pf.rs":"2716e6d2afa271996b7c8f47fd9e4952c88f4c1fd8c07c3e8ce8c62794bf71d8","libm/src/math/log2.rs":"dbbbfbaaa8aa6a4dbefea554ea3983090a9691228b011910c751f6adca912c40","libm/src/math/log2f.rs":"92a90350d8edce21c31c285c3e620fca7c62a2366008921715945c2c73b5b79f","libm/src/math/logf.rs":"845342cffc34d3db1f5ec12d8e5b773cd5a79056e28662fcb9bcd80207596f50","libm/src/math/mod.rs":"d694260529d51d0bc17f88ad557d852b9bb0bc3f7466cf7f62b679dc95ebba42","libm/src/math/modf.rs":"d012ed5a708ef52b6d1313c22a46cadaf5764dde1220816e3df2f03a0fcc60ae","libm/src/math/modff.rs":"f8f1e4c27a85d2cdb3c8e74439d59ef64aa543b948f22c23227d02d8388d61c2","libm/src/math/nextafter.rs":"3282e7eef214a32736fb6928d490198ad394b26b402b45495115b104839eebfe","libm/src/math/nextafterf.rs":"0937dc8a8155c19842c12181e741cec1f7df1f7a00cee81fcb2475e2842761b7","libm/src/math/pow.rs":"17c38297c5bf99accd915f292b777f8716ecf328916297c8bb9dfde6fd8ce522","libm/src/math/powf.rs":"2c423a0ea57fdc4e20f3533f744c6e6288c998b4de8f2914fafaa0e78be81b04","libm/src/math/rem_pio2.rs":"3e53234977daf61c89c29c940791714aad2f676a6f38188c7d17543a2aa8806f","libm/src/math/rem_pio2_large.rs":"482f31ff4e4eacf885f6130ae26a1d59f76b382059d6c742f30e5036811d3ca8","libm/src/math/rem_pio2f.rs":"07fb48f6d5cbadfd32ce4124b2b74af98b8391a2a6f36ce2a7d32e4500cb65ac","libm/src/math/remainder.rs":"63865f4370853c476b45bb27a5c54a4072146aa4a626835ae5263871a4e7e5dc","libm/src/math/remainderf.rs":"dd3fa432dbda8f2135428198be7bd69c57f8d13df3f365b12f52bf6a82352ac4","libm/src/math/remquo.rs":"3cc0bf55069f165c4843f2c358b3a27279c01e8cdd99f9057a3f7f31f45408f2","libm/src/math/remquof.rs":"cc749e18ecb7e766b8b8eeabdbf89ac99087d3d587e71e30f690676a3d2c1f9b","libm/src/math/rint.rs":"2c17047bcfd0ccdca8669f7cf70c628154ae4abc142660f30e37f9c073928706","libm/src/math/rintf.rs":"3b54af9eaa1bb6808159ca435246acf6a4e7aebbc344e3f4a4c5636345155897","libm/src/math/round.rs":"f10797ef15dd34a74e912ba8621d60bc0200c87b94308c9de3cc88d7aec4feb4","libm/src/math/roundf.rs":"27e37cfcf82373709e7debf9c0c18f7ed00ae0f5d97a214c388041f7a6996d35","libm/src/math/scalbn.rs":"b5c9d6d4177fe393cbfe1c634d75ce14b754f6cbce87c5bf979a9661491748a2","libm/src/math/scalbnf.rs":"4f198d06db1896386256fb9a5ac5b805b16b836226c18780a475cf18d7c1449c","libm/src/math/sin.rs":"bb483a2138ca779e03a191222636f0c60fd75a77a2a12f263bda4b6aa9136317","libm/src/math/sincos.rs":"1cf62a16c215e367f51078a3ba23a3f257682032a8f3c657293029a886b18d82","libm/src/math/sincosf.rs":"b0f589e6ada8215944d7784f420c6721c90387d799e349ce7676674f3c475e75","libm/src/math/sinf.rs":"dcddac1d56b084cbb8d0e019433c9c5fe2201d9b257a7dcf2f85c9a8f14b79cf","libm/src/math/sinh.rs":"d8ee4c7af883a526f36c1a6da13bb81fba9181b477e2f2538161a2bee97edc35","libm/src/math/sinhf.rs":"d06eb030ba9dbf7094df127262bfe99f149b4db49fa8ab8c15499660f1e46b26","libm/src/math/sqrt.rs":"5f3a0a582b174fcfccb9c5274899cb05b664ccb92bf1d42caa58890947b68256","libm/src/math/sqrtf.rs":"da926ac27af6eecdf8b62d8baeefcfe1627110592e44298f6b7f05b7ac12fe7e","libm/src/math/tan.rs":"930ecedaadc60f704c2dfa4e15186f59713c1ba7d948529d215223b424827db5","libm/src/math/tanf.rs":"894156a3b107aee08461eb4e7e412fc049aa237d176ae705c6e3e2d7060d94e3","libm/src/math/tanh.rs":"f1f08eb98ed959a17370a7aaf0177be36e3764543424e78feb033ed3f5e8ec98","libm/src/math/tanhf.rs":"74027b0c672a4e64bdef6d7a3069b90caec50e1e7dbb2c12d2828f310502f41e","libm/src/math/tgamma.rs":"c889cfa49bbeb4dbb0941fe9fac3b4da7d5879dcf04a3b9bb6e56de529baf374","libm/src/math/tgammaf.rs":"0737b34777095d0e4d07fe533e8f105082dd4e8ece411bba6ae5993b45b9388c","libm/src/math/trunc.rs":"642264897cc1505e720c8cf313be81aa9fd53aae866644a2e988d01dbc77fd8a","libm/src/math/truncf.rs":"dee3607baf1af0f01deae46e429e097234c50b268eaefebbe716f19f38597900","src/arm.rs":"3a7a2e21de7f475fbc2765109a18a1301bfb5c7be361c8344dde5ec23e8d7ace","src/arm_linux.rs":"35a4cb7b75015543feb15b0c692da0faf0e6037d3b97a4a18067ba416eae1a70","src/float/add.rs":"3ec32ceaf470a89777b54f9cde61832fdadeade0f4894f268a949e968520bc57","src/float/cmp.rs":"79b1fdc8d5f943c4ad5ea4ad32623b18f63e17ac3852fbc64a4942228007e1fc","src/float/conv.rs":"d95b386e483d2bc77b2d5c41b62d01a8cc791fb3fb18ce97317947ecd5a3c02b","src/float/div.rs":"fe21115ecb1b3330569fd85cb51c650bf80683f152333db988d8e0d564a9ae11","src/float/extend.rs":"180b2e791c58e0526de0a798845c580ce3222c8a15c8665e6e6a4bf5cf1a34aa","src/float/mod.rs":"a91cf65abb6e715c5559e3e4bd87a69cd99a9552d54804d0b7137c02c513f158","src/float/mul.rs":"0d0c1f0c28c149ecadeafd459d3c4c9327e4cfcae2cba479957bb8010ef51a01","src/float/pow.rs":"2ada190738731eb6f24104f8fb8c4d6f03cfb16451536dbee32f2b33db0c4b19","src/float/sub.rs":"c2a87f4628f51d5d908d0f25b5d51ce0599dc559d5a72b20e131261f484d5848","src/float/trunc.rs":"d21d2a2f9a1918b4bbb594691e397972a7c04b74b2acf04016c55693abf6d24b","src/int/addsub.rs":"7ec45ce1ba15b56a5b7129d3e5722c4db764c6545306d3fa9090983bcabd6f17","src/int/leading_zeros.rs":"ccf5e9d098c80034dcf6e38437c9a2eb670fa8043558bbfb574f2293164729a6","src/int/mod.rs":"bab1b77535ceebdebb89fd4e59e7105f8c45347bb638351a626615b24544a0b1","src/int/mul.rs":"bb48d8fd42d8f9f5fe9271d8d0f7a92dbae320bf4346e19d1071eb2093cb8ed9","src/int/sdiv.rs":"ace4cb0ec388a38834e01cab2c5bc87182d31588dfc0b1ae117c11ed0c4781cf","src/int/shift.rs":"ff1e0dab608f2b9b3c68c7dfabd1e9bf6500518f5926a9ce4c1d84178bfe19f4","src/int/specialized_div_rem/asymmetric.rs":"27f5bf70a35109f9d4e4e1ad1e8003aa17da5a1e436bf3e63a493d7528a3a566","src/int/specialized_div_rem/binary_long.rs":"9f1ced81a394f000a21a329683144d68ee431a954136a3634eb55b1ee2cf6d51","src/int/specialized_div_rem/delegate.rs":"9df141af98e391361e25d71ae38d5e845a91d896edd2c041132fd46af8268e85","src/int/specialized_div_rem/mod.rs":"73c98b9f69cc9b101ae4c9081e82d66af1df4a58cf0c9bb2a8c8659265687f12","src/int/specialized_div_rem/norm_shift.rs":"3be7ee0dea545c1f702d9daf67dad2b624bf7b17b075c8b90d3d3f7b53df4c21","src/int/specialized_div_rem/trifecta.rs":"87eef69da255b809fd710b14f2eb3f9f59e3dac625f8564ebc8ba78f9763523b","src/int/udiv.rs":"3732b490a472505411577f008b92f489287745968ce6791665201201377d3475","src/lib.rs":"bbdb98c20ac43df24dcde957d8cfb066abdf09b7d62eb893f4d978fb8ab5332a","src/macros.rs":"85a9c368671803a7cd46365173cf4999b00f8f2b2da4a8834015e98cd3429eed","src/math.rs":"840f609cd634b8ffc05c0ad4d8f0b3ce35d7d4c7e7f009bbadeaa6e0d46c873d","src/mem/impls.rs":"8b389b9aeb43dd55351a86abd4b5fc311f7161e1a4023ca3c5a4c57b49650881","src/mem/mod.rs":"714763d045a20e0a68c04f929d14fb3d7b28662dda4a2622970416642af833dc","src/mem/x86_64.rs":"2f29fb392086b3f7e2e78fcfcbf0f0e205822eb4599f1bdf93e41833e1bd2766","src/probestack.rs":"ef5c07e9b95de7b2b77a937789fcfefd9846274317489ad6d623e377c9888601","src/riscv.rs":"50ddd6c732a9f810ab6e15a97b22fdc94cfc1dea09c45d87c833937f9206bee0","src/x86.rs":"117b50d6725ee0af0a7b3d197ea580655561f66a870ebc450d96af22bf7f39f6","src/x86_64.rs":"4f16bc9fad7757d48a6da3a078c715dd3a22154aadb4f1998d4c1b5d91396f9e"},"package":"64518f1ae689f74db058bbfb3238dfe6eb53f59f4ae712f1ff4348628522e190"} \ No newline at end of file
+{"files":{"Cargo.lock":"a93bdfef36f65551ed4fd8a99679c6c9a7661dd3b9236f90a2a1a38f65d93e44","Cargo.toml":"14821c5a0ba22dfb7450dbcccd28d6a864120db28bad293845da3d8c4bab8899","LICENSE.txt":"0e13fed90654e0bc677d624a2d770833a09541fe0c0bdb3d051b3d081207393a","README.md":"693b529db2e5dd13069c05ef2e76e26c1fe1b2a590b07cd8e26dfb2df087ba62","build.rs":"3f2f9589f896c5dc7e7ce39f5809f4f63d1199e5f80ae23b343a7f1d889d0206","examples/intrinsics.rs":"cdf17d36ed38e703e954ccb0d6626037afcef9b036b4a12b1f61b729ca21079a","libm/src/math/acos.rs":"fb066ba84aba1372d706425ec14f35ff8d971756d15eeebd22ecf42a716493bb","libm/src/math/acosf.rs":"a112b82309bba1d35c4e3d6ad4d6c21ef305343d9ab601ddf4bc61d43bc9f1af","libm/src/math/acosh.rs":"99de01ded7922bb93a882ad5ad8b472b5cae0059dea0bdca2077f65e94483150","libm/src/math/acoshf.rs":"10750c4d39ef6717b20a15ef1ce43e15eb851682d2f820f7e94501adec98b9a5","libm/src/math/asin.rs":"095a1e98996daff45df0b154ca0ec35bbf31db964ee9fdda0207308cb20df441","libm/src/math/asinf.rs":"49cccb4db2881982643a4a7d5453f4f8daf527711bbb67313607a3c178856d61","libm/src/math/asinh.rs":"4dd51affa71cce34a192ad66154e248f8d1c4b40fb497f29052333e425bb740f","libm/src/math/asinhf.rs":"914bfecf449f5e2bce786aa12c056d419073c6011d41c1bab7c39ba765fa4c53","libm/src/math/atan.rs":"d4fe46e1c5739dd09997869dcfbc3c85f03c534af52e700d6c6bcf9c3fedda07","libm/src/math/atan2.rs":"2623bc8ca707d13a7092ce49adf68e9cbf4452ad1bf4a861dc40ca858606a747","libm/src/math/atan2f.rs":"dd01943e0e1f1955912e5c3ffc9467529cf64bd02ac0a6ad5ab31dbe6657f05d","libm/src/math/atanf.rs":"e41b41569474a59c970ede3538e00bda4072cf4d90040017101cc79d7dc28caa","libm/src/math/atanh.rs":"57a8fb3f0f116fa4a966ac6bc2abd5f80236ead8e79013f468bd3786921f7110","libm/src/math/atanhf.rs":"6f2e57aaec1b5fc7609cb3938b3d155f51b4237dbda530739c34a0448cd9beb9","libm/src/math/cbrt.rs":"f2c45612d2eecd93cfcdd9ebf824c754fc8f8dfd6d16862c0b9c4ccea78c2a0f","libm/src/math/cbrtf.rs":"ad0b483854aa9f17a44d36c049bf0e8ebab34c27e90b787c05f45cc230ec7d19","libm/src/math/ceil.rs":"57ba5b6e207a0ccbd34190d1aa544389ca12126be23821dfb5746497f620ce03","libm/src/math/ceilf.rs":"c922a0475a599b9ea5473e615f74700b99707cebd6927f24ea59cb2a3cb3bbc3","libm/src/math/copysign.rs":"8b6440a251f0f1509d87f18122f74d0d5c03d0b60517e89e441434a3c5d84591","libm/src/math/copysignf.rs":"87d35436d224852ada93a2e93f6730cf1a727b808dd10e7d49ab4585866e336b","libm/src/math/cos.rs":"74babdc13ede78e400c5ca1854c3e22d2e08cbdc5618aefa5bba6f9303ef65b6","libm/src/math/cosf.rs":"09c40f93c445b741e22477ceedf163ca33b6a47f973f7c9876cfba2692edb29c","libm/src/math/cosh.rs":"0d0a7cef18577f321996b8b87561963139f754ad7f2ea0a3b3883811f3f0693a","libm/src/math/coshf.rs":"be8ca8739e4cf1978425b349f941cb4838bba8c10cb559c7940b9fd4fdde21ad","libm/src/math/erf.rs":"de69e6669ce1014e5b5086a7a6d01c4755f2f0590e204d2a77bea455764114f7","libm/src/math/erff.rs":"6acdbb07f74296067bb0380b850918cfb5806a89f9ff04352a7a0b921d728944","libm/src/math/exp.rs":"ca7405ad0d1993fffcf9aae96f9256307bed3c4916545aaebd1cf1d2df1807fa","libm/src/math/exp10.rs":"2e136c6ecedd8e57a6c31796f57fae4546fcfd8bc6be66c836f553df9c74b907","libm/src/math/exp10f.rs":"9a3ce506ec587066a355ab74e0eb69a03a214ac405718087ae9772365050b20b","libm/src/math/exp2.rs":"94a9304a2ce3bc81f6d2aefd3cde6faa30f13260d46cb13692863cdea1c9a3a1","libm/src/math/exp2f.rs":"785f2630accd35118ec07bf60273e219ed91a215b956b1552eeea5bc2a708cc8","libm/src/math/expf.rs":"ec14c18f891a9e37735ec39e6fc2e9bf674a2c2e083f22e2533b481177359c98","libm/src/math/expm1.rs":"124069f456c8ad331f265c7509d9e223b2a300e461bbfd3d6adfdcdd2ee5b8ac","libm/src/math/expm1f.rs":"18e2116d31ea8410051cc709b9d04b754b0e3ba6758ee1bf0b48749f4999b840","libm/src/math/expo2.rs":"4f4f9fecfccb43f30c2784aa7c0bb656754a52b8ab431f7d1b551c673ab133f1","libm/src/math/fabs.rs":"e6c7db39f98508098cdf64ac0c2f53866c466149a7490afb9fe22b44c4dd81b3","libm/src/math/fabsf.rs":"83a1f5f4d9ca899ba2b701d7332e18b40258b83e111db4c5d8fab2cc1be58aa3","libm/src/math/fdim.rs":"8ec091996005207297c2389ae563e1b18dbc6a9eac951de29a976c5cd7bc32a7","libm/src/math/fdimf.rs":"c7f3f2269834d55be26b6580ddc07c42531577955fa4de35bad1e2a361085614","libm/src/math/fenv.rs":"916ae11e4763588518d64dee82afb41be9d1ee38ecc0679c821d4e7e22cd3dc5","libm/src/math/floor.rs":"5050804cae173af6775c0678d6c1aafb5ca2b744bc8a2f50d9d03b95dcee1fb0","libm/src/math/floorf.rs":"c903e0c57bc60a888c513eb7a873a87a4759ba68fc791b6b931652f8ee74cc03","libm/src/math/fma.rs":"d87963472cd5bfcb83eb4010c67f3653857cf28f11378e06d63abae14c723e5d","libm/src/math/fmaf.rs":"1db6ee0d47ddbdb441cfe167edf89b431239f5805708fd0376cf5c01349a4bd6","libm/src/math/fmax.rs":"f6c8e96a8b1a170648d2fa3513e7b6b459085d708c839869f82e305fe58fac37","libm/src/math/fmaxf.rs":"dff0025433232e8a5ec7bd54d847ccf596d762ea4e35f5c54fbaac9404d732fd","libm/src/math/fmin.rs":"95b6cb66ca0e0e22276f0bf88dbe8fb69796a69a196a7491bd4802efbcf2e298","libm/src/math/fminf.rs":"304bc839b15ea3d84e68d2af9f40524ec120d30a36a667b22fcb98a6c258f4c7","libm/src/math/fmod.rs":"a1c0550fc7df8164733d914e222ff0966a2ab886d6e75a1098f24fe0283ae227","libm/src/math/fmodf.rs":"ee51ed092c0eeb8195f35735ff725cfd46612e0d689a7c483538bd92fbe61828","libm/src/math/frexp.rs":"28af70026922a8ab979744c7ad4d8faba6079c4743b7eeb6d14c983a982fbbcc","libm/src/math/frexpf.rs":"2e2593ae8002ba420809ebfaf737ef001cdc912354be3d978a8c0cb930350d4d","libm/src/math/hypot.rs":"841131c4a0cea75bc8a86e29f3f6d0815a61fc99731c9984651ce83d3050d218","libm/src/math/hypotf.rs":"5f317323edc2eb699580fe54b074b7e570a7734d51a0a149c0b49b54470a836c","libm/src/math/ilogb.rs":"d178ad7ca3439f82d565962b143f20448e45b2e2c51357b127abaec683297e32","libm/src/math/ilogbf.rs":"00f2b1b0496e21c6a42d68aea74d7156fa2ff0a735741b9051f3ca1cf0f57586","libm/src/math/j0.rs":"9572b6396c489927d332d0e717920e61ec0618e5e9c31f7eeeec70f5e4abab06","libm/src/math/j0f.rs":"802c8254bded9b3afb6eea8b9af240038a5a4a5d811396729f69ca509e3e7d87","libm/src/math/j1.rs":"97b1af1611fa3d110c2b349ee8e4176100132ea1391b619086b47ac063b81803","libm/src/math/j1f.rs":"9c9b128752e8ea2e7d81b637ba84907ab54a545e7602c49167b313743927930b","libm/src/math/jn.rs":"847d122334e5707ad9627146cddccc082a1f2f5bcd3e5ef54399013a7007ce88","libm/src/math/jnf.rs":"4045076f7d1a1b89882ed60d4dd60a4cbbc66b85cfb90491378c8015effcc476","libm/src/math/k_cos.rs":"f34a69e44d6b8901b03b578a75972f438ab20a7b98a0903fc1903d6fde3899be","libm/src/math/k_cosf.rs":"8f7117ff21cebf8e890a5bcfd7ea858a94172f4172b79a66d53824c2cb0888b1","libm/src/math/k_expo2.rs":"eb4ca9e6a525b7ea6da868c3cb136896682cc46f8396ba2a2ebc3ae9e9ba54b0","libm/src/math/k_expo2f.rs":"d51ad5df61cb5d1258bdb90c52bfed4572bb446a9337de9c04411ed9454ae0cb","libm/src/math/k_sin.rs":"14b2aba6ca07150c92768b5a72acaf5cde6a11d6619e14896512a7ba242e289a","libm/src/math/k_sinf.rs":"2775fcc710807164e6f37a4f8da3c8143cd5f16e19ce7c31c5591522151d7a96","libm/src/math/k_tan.rs":"a72beae4ccd9631eeeb61d6365bbeecae81c8411f3120a999c515cca0d5ea5c5","libm/src/math/k_tanf.rs":"6a794be56fa4b2f60452b9bab19af01c388f174560acbf829a351378ea39495d","libm/src/math/ldexp.rs":"b647f0096e80e4d926d8dd18d294c892ee2cb1778effe2c5e1b2664ae5cb1a4e","libm/src/math/ldexpf.rs":"98743fad2cd97a7be496f40ba3157ac1438fce0d0c25d5ab90c3b8c71c3fd0ed","libm/src/math/lgamma.rs":"0edd18e4f96bfcbe8b1b5af3eeca5208cd6d2d479dfa5ad117c9dfeccecf614f","libm/src/math/lgamma_r.rs":"f44a37aeccd56559ef784ae8edf217d14ad5cc2d910f0a65e70ffc86d7dc23dd","libm/src/math/lgammaf.rs":"967845357758b868a571857ec001f9f9154001110b8e97c08b6d10586bed9c49","libm/src/math/lgammaf_r.rs":"7143016d60e11fa235d53968125e57231b1104ce52149b5e1eed39629e0d1ff0","libm/src/math/log.rs":"b5e0c5f30d9e94351488732801be3107c12b854c3f95ad37e256dd88eeca408f","libm/src/math/log10.rs":"3425ff8be001fd1646ba15e254eb6ef4bdc6ccaf0cbee27ddf1fa84e04178b90","libm/src/math/log10f.rs":"fee4f71879bc4c99259e68c0c641364901629fb29a8ebddfcc0d090102cceddd","libm/src/math/log1p.rs":"9cf400852f165e6be19b97036ae9521fb9ca857d0a9a91c117d9123221622185","libm/src/math/log1pf.rs":"2716e6d2afa271996b7c8f47fd9e4952c88f4c1fd8c07c3e8ce8c62794bf71d8","libm/src/math/log2.rs":"dbbbfbaaa8aa6a4dbefea554ea3983090a9691228b011910c751f6adca912c40","libm/src/math/log2f.rs":"92a90350d8edce21c31c285c3e620fca7c62a2366008921715945c2c73b5b79f","libm/src/math/logf.rs":"845342cffc34d3db1f5ec12d8e5b773cd5a79056e28662fcb9bcd80207596f50","libm/src/math/mod.rs":"d694260529d51d0bc17f88ad557d852b9bb0bc3f7466cf7f62b679dc95ebba42","libm/src/math/modf.rs":"d012ed5a708ef52b6d1313c22a46cadaf5764dde1220816e3df2f03a0fcc60ae","libm/src/math/modff.rs":"f8f1e4c27a85d2cdb3c8e74439d59ef64aa543b948f22c23227d02d8388d61c2","libm/src/math/nextafter.rs":"3282e7eef214a32736fb6928d490198ad394b26b402b45495115b104839eebfe","libm/src/math/nextafterf.rs":"0937dc8a8155c19842c12181e741cec1f7df1f7a00cee81fcb2475e2842761b7","libm/src/math/pow.rs":"17c38297c5bf99accd915f292b777f8716ecf328916297c8bb9dfde6fd8ce522","libm/src/math/powf.rs":"2c423a0ea57fdc4e20f3533f744c6e6288c998b4de8f2914fafaa0e78be81b04","libm/src/math/rem_pio2.rs":"3e53234977daf61c89c29c940791714aad2f676a6f38188c7d17543a2aa8806f","libm/src/math/rem_pio2_large.rs":"482f31ff4e4eacf885f6130ae26a1d59f76b382059d6c742f30e5036811d3ca8","libm/src/math/rem_pio2f.rs":"07fb48f6d5cbadfd32ce4124b2b74af98b8391a2a6f36ce2a7d32e4500cb65ac","libm/src/math/remainder.rs":"63865f4370853c476b45bb27a5c54a4072146aa4a626835ae5263871a4e7e5dc","libm/src/math/remainderf.rs":"dd3fa432dbda8f2135428198be7bd69c57f8d13df3f365b12f52bf6a82352ac4","libm/src/math/remquo.rs":"3cc0bf55069f165c4843f2c358b3a27279c01e8cdd99f9057a3f7f31f45408f2","libm/src/math/remquof.rs":"cc749e18ecb7e766b8b8eeabdbf89ac99087d3d587e71e30f690676a3d2c1f9b","libm/src/math/rint.rs":"2c17047bcfd0ccdca8669f7cf70c628154ae4abc142660f30e37f9c073928706","libm/src/math/rintf.rs":"3b54af9eaa1bb6808159ca435246acf6a4e7aebbc344e3f4a4c5636345155897","libm/src/math/round.rs":"f10797ef15dd34a74e912ba8621d60bc0200c87b94308c9de3cc88d7aec4feb4","libm/src/math/roundf.rs":"27e37cfcf82373709e7debf9c0c18f7ed00ae0f5d97a214c388041f7a6996d35","libm/src/math/scalbn.rs":"b5c9d6d4177fe393cbfe1c634d75ce14b754f6cbce87c5bf979a9661491748a2","libm/src/math/scalbnf.rs":"4f198d06db1896386256fb9a5ac5b805b16b836226c18780a475cf18d7c1449c","libm/src/math/sin.rs":"bb483a2138ca779e03a191222636f0c60fd75a77a2a12f263bda4b6aa9136317","libm/src/math/sincos.rs":"1cf62a16c215e367f51078a3ba23a3f257682032a8f3c657293029a886b18d82","libm/src/math/sincosf.rs":"b0f589e6ada8215944d7784f420c6721c90387d799e349ce7676674f3c475e75","libm/src/math/sinf.rs":"dcddac1d56b084cbb8d0e019433c9c5fe2201d9b257a7dcf2f85c9a8f14b79cf","libm/src/math/sinh.rs":"d8ee4c7af883a526f36c1a6da13bb81fba9181b477e2f2538161a2bee97edc35","libm/src/math/sinhf.rs":"d06eb030ba9dbf7094df127262bfe99f149b4db49fa8ab8c15499660f1e46b26","libm/src/math/sqrt.rs":"5f3a0a582b174fcfccb9c5274899cb05b664ccb92bf1d42caa58890947b68256","libm/src/math/sqrtf.rs":"da926ac27af6eecdf8b62d8baeefcfe1627110592e44298f6b7f05b7ac12fe7e","libm/src/math/tan.rs":"930ecedaadc60f704c2dfa4e15186f59713c1ba7d948529d215223b424827db5","libm/src/math/tanf.rs":"894156a3b107aee08461eb4e7e412fc049aa237d176ae705c6e3e2d7060d94e3","libm/src/math/tanh.rs":"f1f08eb98ed959a17370a7aaf0177be36e3764543424e78feb033ed3f5e8ec98","libm/src/math/tanhf.rs":"74027b0c672a4e64bdef6d7a3069b90caec50e1e7dbb2c12d2828f310502f41e","libm/src/math/tgamma.rs":"c889cfa49bbeb4dbb0941fe9fac3b4da7d5879dcf04a3b9bb6e56de529baf374","libm/src/math/tgammaf.rs":"0737b34777095d0e4d07fe533e8f105082dd4e8ece411bba6ae5993b45b9388c","libm/src/math/trunc.rs":"642264897cc1505e720c8cf313be81aa9fd53aae866644a2e988d01dbc77fd8a","libm/src/math/truncf.rs":"dee3607baf1af0f01deae46e429e097234c50b268eaefebbe716f19f38597900","src/aarch64_linux.rs":"a4bf136ba1624f253132a8588aac438ce23244a16655149320338ac980ad48cc","src/arm.rs":"3a7a2e21de7f475fbc2765109a18a1301bfb5c7be361c8344dde5ec23e8d7ace","src/arm_linux.rs":"35a4cb7b75015543feb15b0c692da0faf0e6037d3b97a4a18067ba416eae1a70","src/float/add.rs":"3ec32ceaf470a89777b54f9cde61832fdadeade0f4894f268a949e968520bc57","src/float/cmp.rs":"5a9d28640f76b3009f0829421f15898932d7d38da123b3e6215415d11221f91a","src/float/conv.rs":"8bf710288f88cfbf67e510f68abbb5a4f7173d2ea9ef32f98d594935fc051641","src/float/div.rs":"9b569e3f40135d4a3bfb59a53b1b65991c1b3cde38c7f9bb0f5597d2a6222151","src/float/extend.rs":"180b2e791c58e0526de0a798845c580ce3222c8a15c8665e6e6a4bf5cf1a34aa","src/float/mod.rs":"a91cf65abb6e715c5559e3e4bd87a69cd99a9552d54804d0b7137c02c513f158","src/float/mul.rs":"0d0c1f0c28c149ecadeafd459d3c4c9327e4cfcae2cba479957bb8010ef51a01","src/float/pow.rs":"2ada190738731eb6f24104f8fb8c4d6f03cfb16451536dbee32f2b33db0c4b19","src/float/sub.rs":"c2a87f4628f51d5d908d0f25b5d51ce0599dc559d5a72b20e131261f484d5848","src/float/trunc.rs":"d21d2a2f9a1918b4bbb594691e397972a7c04b74b2acf04016c55693abf6d24b","src/int/addsub.rs":"7ec45ce1ba15b56a5b7129d3e5722c4db764c6545306d3fa9090983bcabd6f17","src/int/leading_zeros.rs":"ccf5e9d098c80034dcf6e38437c9a2eb670fa8043558bbfb574f2293164729a6","src/int/mod.rs":"bab1b77535ceebdebb89fd4e59e7105f8c45347bb638351a626615b24544a0b1","src/int/mul.rs":"bb48d8fd42d8f9f5fe9271d8d0f7a92dbae320bf4346e19d1071eb2093cb8ed9","src/int/sdiv.rs":"ace4cb0ec388a38834e01cab2c5bc87182d31588dfc0b1ae117c11ed0c4781cf","src/int/shift.rs":"ff1e0dab608f2b9b3c68c7dfabd1e9bf6500518f5926a9ce4c1d84178bfe19f4","src/int/specialized_div_rem/asymmetric.rs":"27f5bf70a35109f9d4e4e1ad1e8003aa17da5a1e436bf3e63a493d7528a3a566","src/int/specialized_div_rem/binary_long.rs":"9f1ced81a394f000a21a329683144d68ee431a954136a3634eb55b1ee2cf6d51","src/int/specialized_div_rem/delegate.rs":"9df141af98e391361e25d71ae38d5e845a91d896edd2c041132fd46af8268e85","src/int/specialized_div_rem/mod.rs":"73c98b9f69cc9b101ae4c9081e82d66af1df4a58cf0c9bb2a8c8659265687f12","src/int/specialized_div_rem/norm_shift.rs":"3be7ee0dea545c1f702d9daf67dad2b624bf7b17b075c8b90d3d3f7b53df4c21","src/int/specialized_div_rem/trifecta.rs":"87eef69da255b809fd710b14f2eb3f9f59e3dac625f8564ebc8ba78f9763523b","src/int/udiv.rs":"3732b490a472505411577f008b92f489287745968ce6791665201201377d3475","src/lib.rs":"0a8a36f51f093f3063aebcbd7a5dc3d7d67efab598f246ba1fe5ac323c0c08ed","src/macros.rs":"a1872f50851bfcb822c1e90a98baa20f38cf1f470124b8213eebd37a92de7c37","src/math.rs":"84c616b3a8504594266a758719074e4b5406337b696b534495c123d56ec0326e","src/mem/impls.rs":"8b389b9aeb43dd55351a86abd4b5fc311f7161e1a4023ca3c5a4c57b49650881","src/mem/mod.rs":"714763d045a20e0a68c04f929d14fb3d7b28662dda4a2622970416642af833dc","src/mem/x86_64.rs":"2f29fb392086b3f7e2e78fcfcbf0f0e205822eb4599f1bdf93e41833e1bd2766","src/probestack.rs":"ef5c07e9b95de7b2b77a937789fcfefd9846274317489ad6d623e377c9888601","src/riscv.rs":"50ddd6c732a9f810ab6e15a97b22fdc94cfc1dea09c45d87c833937f9206bee0","src/x86.rs":"117b50d6725ee0af0a7b3d197ea580655561f66a870ebc450d96af22bf7f39f6","src/x86_64.rs":"4f16bc9fad7757d48a6da3a078c715dd3a22154aadb4f1998d4c1b5d91396f9e"},"package":"6866e0f3638013234db3c89ead7a14d278354338e7237257407500009012b23f"} \ No newline at end of file
diff --git a/vendor/compiler_builtins/Cargo.lock b/vendor/compiler_builtins/Cargo.lock
index 295e480ec..14025dbbf 100644
--- a/vendor/compiler_builtins/Cargo.lock
+++ b/vendor/compiler_builtins/Cargo.lock
@@ -10,7 +10,7 @@ checksum = "7db2f146208d7e0fbee761b09cd65a7f51ccc38705d4e7262dad4d73b12a76b1"
[[package]]
name = "compiler_builtins"
-version = "0.1.92"
+version = "0.1.95"
dependencies = [
"cc",
"rustc-std-workspace-core",
diff --git a/vendor/compiler_builtins/Cargo.toml b/vendor/compiler_builtins/Cargo.toml
index c0ea1fd6e..ff4c0328d 100644
--- a/vendor/compiler_builtins/Cargo.toml
+++ b/vendor/compiler_builtins/Cargo.toml
@@ -11,7 +11,7 @@
[package]
name = "compiler_builtins"
-version = "0.1.92"
+version = "0.1.95"
authors = ["Jorge Aparicio <japaricious@gmail.com>"]
links = "compiler-rt"
include = [
diff --git a/vendor/compiler_builtins/README.md b/vendor/compiler_builtins/README.md
index 8b25558a8..da0adbce7 100644
--- a/vendor/compiler_builtins/README.md
+++ b/vendor/compiler_builtins/README.md
@@ -59,8 +59,8 @@ features = ["c"]
5. Once the PR passes our extensive [testing infrastructure][4], we'll merge it!
6. Celebrate :tada:
-[1]: https://github.com/rust-lang/compiler-rt/tree/8598065bd965d9713bfafb6c1e766d63a7b17b89/test/builtins/Unit
-[2]: https://github.com/rust-lang/compiler-rt/tree/8598065bd965d9713bfafb6c1e766d63a7b17b89/lib/builtins
+[1]: https://github.com/rust-lang/llvm-project/tree/9e3de9490ff580cd484fbfa2908292b4838d56e7/compiler-rt/test/builtins/Unit
+[2]: https://github.com/rust-lang/llvm-project/tree/9e3de9490ff580cd484fbfa2908292b4838d56e7/compiler-rt/lib/builtins
[3]: https://github.com/rust-lang/compiler-builtins/blob/0ba07e49264a54cb5bbd4856fcea083bb3fbec15/build.rs#L180-L265
[4]: https://travis-ci.org/rust-lang/compiler-builtins
diff --git a/vendor/compiler_builtins/build.rs b/vendor/compiler_builtins/build.rs
index 766dec05d..4549d0b4f 100644
--- a/vendor/compiler_builtins/build.rs
+++ b/vendor/compiler_builtins/build.rs
@@ -1,4 +1,4 @@
-use std::env;
+use std::{collections::HashMap, env, sync::atomic::Ordering};
fn main() {
println!("cargo:rerun-if-changed=build.rs");
@@ -90,6 +90,65 @@ fn main() {
{
println!("cargo:rustc-cfg=kernel_user_helpers")
}
+
+ if llvm_target[0] == "aarch64" {
+ generate_aarch64_outlined_atomics();
+ }
+}
+
+fn aarch64_symbol(ordering: Ordering) -> &'static str {
+ match ordering {
+ Ordering::Relaxed => "relax",
+ Ordering::Acquire => "acq",
+ Ordering::Release => "rel",
+ Ordering::AcqRel => "acq_rel",
+ _ => panic!("unknown symbol for {:?}", ordering),
+ }
+}
+
+/// The `concat_idents` macro is extremely annoying and doesn't allow us to define new items.
+/// Define them from the build script instead.
+/// Note that the majority of the code is still defined in `aarch64.rs` through inline macros.
+fn generate_aarch64_outlined_atomics() {
+ use std::fmt::Write;
+ // #[macro_export] so that we can use this in tests
+ let gen_macro =
+ |name| format!("#[macro_export] macro_rules! foreach_{name} {{ ($macro:path) => {{\n");
+
+ // Generate different macros for add/clr/eor/set so that we can test them separately.
+ let sym_names = ["cas", "ldadd", "ldclr", "ldeor", "ldset", "swp"];
+ let mut macros = HashMap::new();
+ for sym in sym_names {
+ macros.insert(sym, gen_macro(sym));
+ }
+
+ // Only CAS supports 16 bytes, and it has a different implementation that uses a different macro.
+ let mut cas16 = gen_macro("cas16");
+
+ for ordering in [
+ Ordering::Relaxed,
+ Ordering::Acquire,
+ Ordering::Release,
+ Ordering::AcqRel,
+ ] {
+ let sym_ordering = aarch64_symbol(ordering);
+ for size in [1, 2, 4, 8] {
+ for (sym, macro_) in &mut macros {
+ let name = format!("__aarch64_{sym}{size}_{sym_ordering}");
+ writeln!(macro_, "$macro!( {ordering:?}, {size}, {name} );").unwrap();
+ }
+ }
+ let name = format!("__aarch64_cas16_{sym_ordering}");
+ writeln!(cas16, "$macro!( {ordering:?}, {name} );").unwrap();
+ }
+
+ let mut buf = String::new();
+ for macro_def in macros.values().chain(std::iter::once(&cas16)) {
+ buf += macro_def;
+ buf += "}; }";
+ }
+ let dst = std::env::var("OUT_DIR").unwrap() + "/outlined_atomics.rs";
+ std::fs::write(dst, buf).unwrap();
}
#[cfg(feature = "c")]
diff --git a/vendor/compiler_builtins/examples/intrinsics.rs b/vendor/compiler_builtins/examples/intrinsics.rs
index 0ca30c215..19bb569b5 100644
--- a/vendor/compiler_builtins/examples/intrinsics.rs
+++ b/vendor/compiler_builtins/examples/intrinsics.rs
@@ -4,6 +4,7 @@
// to link due to the missing intrinsic (symbol).
#![allow(unused_features)]
+#![allow(stable_features)] // bench_black_box feature is stable, leaving for backcompat
#![cfg_attr(thumb, no_main)]
#![deny(dead_code)]
#![feature(bench_black_box)]
diff --git a/vendor/compiler_builtins/src/aarch64_linux.rs b/vendor/compiler_builtins/src/aarch64_linux.rs
new file mode 100644
index 000000000..62144e531
--- /dev/null
+++ b/vendor/compiler_builtins/src/aarch64_linux.rs
@@ -0,0 +1,277 @@
+//! Aarch64 targets have two possible implementations for atomics:
+//! 1. Load-Locked, Store-Conditional (LL/SC), older and slower.
+//! 2. Large System Extensions (LSE), newer and faster.
+//! To avoid breaking backwards compat, C toolchains introduced a concept of "outlined atomics",
+//! where atomic operations call into the compiler runtime to dispatch between two depending on
+//! which is supported on the current CPU.
+//! See https://community.arm.com/arm-community-blogs/b/tools-software-ides-blog/posts/making-the-most-of-the-arm-architecture-in-gcc-10#:~:text=out%20of%20line%20atomics for more discussion.
+//!
+//! Currently we only support LL/SC, because LSE requires `getauxval` from libc in order to do runtime detection.
+//! Use the `compiler-rt` intrinsics if you want LSE support.
+//!
+//! Ported from `aarch64/lse.S` in LLVM's compiler-rt.
+//!
+//! Generate functions for each of the following symbols:
+//! __aarch64_casM_ORDER
+//! __aarch64_swpN_ORDER
+//! __aarch64_ldaddN_ORDER
+//! __aarch64_ldclrN_ORDER
+//! __aarch64_ldeorN_ORDER
+//! __aarch64_ldsetN_ORDER
+//! for N = {1, 2, 4, 8}, M = {1, 2, 4, 8, 16}, ORDER = { relax, acq, rel, acq_rel }
+//!
+//! The original `lse.S` has some truly horrifying code that expects to be compiled multiple times with different constants.
+//! We do something similar, but with macro arguments.
+#![cfg_attr(feature = "c", allow(unused_macros))] // avoid putting the macros into a submodule
+
+// We don't do runtime dispatch so we don't have to worry about the `__aarch64_have_lse_atomics` global ctor.
+
+/// Translate a byte size to a Rust type.
+#[rustfmt::skip]
+macro_rules! int_ty {
+ (1) => { i8 };
+ (2) => { i16 };
+ (4) => { i32 };
+ (8) => { i64 };
+ (16) => { i128 };
+}
+
+/// Given a byte size and a register number, return a register of the appropriate size.
+///
+/// See <https://developer.arm.com/documentation/102374/0101/Registers-in-AArch64---general-purpose-registers>.
+#[rustfmt::skip]
+macro_rules! reg {
+ (1, $num:literal) => { concat!("w", $num) };
+ (2, $num:literal) => { concat!("w", $num) };
+ (4, $num:literal) => { concat!("w", $num) };
+ (8, $num:literal) => { concat!("x", $num) };
+}
+
+/// Given an atomic ordering, translate it to the acquire suffix for the lxdr aarch64 ASM instruction.
+#[rustfmt::skip]
+macro_rules! acquire {
+ (Relaxed) => { "" };
+ (Acquire) => { "a" };
+ (Release) => { "" };
+ (AcqRel) => { "a" };
+}
+
+/// Given an atomic ordering, translate it to the release suffix for the stxr aarch64 ASM instruction.
+#[rustfmt::skip]
+macro_rules! release {
+ (Relaxed) => { "" };
+ (Acquire) => { "" };
+ (Release) => { "l" };
+ (AcqRel) => { "l" };
+}
+
+/// Given a size in bytes, translate it to the byte suffix for an aarch64 ASM instruction.
+#[rustfmt::skip]
+macro_rules! size {
+ (1) => { "b" };
+ (2) => { "h" };
+ (4) => { "" };
+ (8) => { "" };
+ (16) => { "" };
+}
+
+/// Given a byte size, translate it to an Unsigned eXTend instruction
+/// with the correct semantics.
+///
+/// See <https://developer.arm.com/documentation/ddi0596/2020-12/Base-Instructions/UXTB--Unsigned-Extend-Byte--an-alias-of-UBFM->
+#[rustfmt::skip]
+macro_rules! uxt {
+ (1) => { "uxtb" };
+ (2) => { "uxth" };
+ ($_:tt) => { "mov" };
+}
+
+/// Given an atomic ordering and byte size, translate it to a LoaD eXclusive Register instruction
+/// with the correct semantics.
+///
+/// See <https://developer.arm.com/documentation/ddi0596/2020-12/Base-Instructions/LDXR--Load-Exclusive-Register->.
+macro_rules! ldxr {
+ ($ordering:ident, $bytes:tt) => {
+ concat!("ld", acquire!($ordering), "xr", size!($bytes))
+ };
+}
+
+/// Given an atomic ordering and byte size, translate it to a STore eXclusive Register instruction
+/// with the correct semantics.
+///
+/// See <https://developer.arm.com/documentation/ddi0596/2020-12/Base-Instructions/STXR--Store-Exclusive-Register->.
+macro_rules! stxr {
+ ($ordering:ident, $bytes:tt) => {
+ concat!("st", release!($ordering), "xr", size!($bytes))
+ };
+}
+
+/// Given an atomic ordering and byte size, translate it to a LoaD eXclusive Pair of registers instruction
+/// with the correct semantics.
+///
+/// See <https://developer.arm.com/documentation/ddi0596/2020-12/Base-Instructions/LDXP--Load-Exclusive-Pair-of-Registers->
+macro_rules! ldxp {
+ ($ordering:ident) => {
+ concat!("ld", acquire!($ordering), "xp")
+ };
+}
+
+/// Given an atomic ordering and byte size, translate it to a STore eXclusive Pair of registers instruction
+/// with the correct semantics.
+///
+/// See <https://developer.arm.com/documentation/ddi0596/2020-12/Base-Instructions/STXP--Store-Exclusive-Pair-of-registers->.
+macro_rules! stxp {
+ ($ordering:ident) => {
+ concat!("st", release!($ordering), "xp")
+ };
+}
+
+/// See <https://doc.rust-lang.org/stable/std/sync/atomic/struct.AtomicI8.html#method.compare_and_swap>.
+macro_rules! compare_and_swap {
+ ($ordering:ident, $bytes:tt, $name:ident) => {
+ intrinsics! {
+ #[maybe_use_optimized_c_shim]
+ #[naked]
+ pub unsafe extern "C" fn $name (
+ expected: int_ty!($bytes), desired: int_ty!($bytes), ptr: *mut int_ty!($bytes)
+ ) -> int_ty!($bytes) {
+ // We can't use `AtomicI8::compare_and_swap`; we *are* compare_and_swap.
+ unsafe { core::arch::asm! {
+ // UXT s(tmp0), s(0)
+ concat!(uxt!($bytes), " ", reg!($bytes, 16), ", ", reg!($bytes, 0)),
+ "0:",
+ // LDXR s(0), [x2]
+ concat!(ldxr!($ordering, $bytes), " ", reg!($bytes, 0), ", [x2]"),
+ // cmp s(0), s(tmp0)
+ concat!("cmp ", reg!($bytes, 0), ", ", reg!($bytes, 16)),
+ "bne 1f",
+ // STXR w(tmp1), s(1), [x2]
+ concat!(stxr!($ordering, $bytes), " w17, ", reg!($bytes, 1), ", [x2]"),
+ "cbnz w17, 0b",
+ "1:",
+ "ret",
+ options(noreturn)
+ } }
+ }
+ }
+ };
+}
+
+// i128 uses a completely different impl, so it has its own macro.
+macro_rules! compare_and_swap_i128 {
+ ($ordering:ident, $name:ident) => {
+ intrinsics! {
+ #[maybe_use_optimized_c_shim]
+ #[naked]
+ pub unsafe extern "C" fn $name (
+ expected: i128, desired: i128, ptr: *mut i128
+ ) -> i128 {
+ unsafe { core::arch::asm! {
+ "mov x16, x0",
+ "mov x17, x1",
+ "0:",
+ // LDXP x0, x1, [x4]
+ concat!(ldxp!($ordering), " x0, x1, [x4]"),
+ "cmp x0, x16",
+ "ccmp x1, x17, #0, eq",
+ "bne 1f",
+ // STXP w(tmp2), x2, x3, [x4]
+ concat!(stxp!($ordering), " w15, x2, x3, [x4]"),
+ "cbnz w15, 0b",
+ "1:",
+ "ret",
+ options(noreturn)
+ } }
+ }
+ }
+ };
+}
+
+/// See <https://doc.rust-lang.org/stable/std/sync/atomic/struct.AtomicI8.html#method.swap>.
+macro_rules! swap {
+ ($ordering:ident, $bytes:tt, $name:ident) => {
+ intrinsics! {
+ #[maybe_use_optimized_c_shim]
+ #[naked]
+ pub unsafe extern "C" fn $name (
+ left: int_ty!($bytes), right_ptr: *mut int_ty!($bytes)
+ ) -> int_ty!($bytes) {
+ unsafe { core::arch::asm! {
+ // mov s(tmp0), s(0)
+ concat!("mov ", reg!($bytes, 16), ", ", reg!($bytes, 0)),
+ "0:",
+ // LDXR s(0), [x1]
+ concat!(ldxr!($ordering, $bytes), " ", reg!($bytes, 0), ", [x1]"),
+ // STXR w(tmp1), s(tmp0), [x1]
+ concat!(stxr!($ordering, $bytes), " w17, ", reg!($bytes, 16), ", [x1]"),
+ "cbnz w17, 0b",
+ "ret",
+ options(noreturn)
+ } }
+ }
+ }
+ };
+}
+
+/// See (e.g.) <https://doc.rust-lang.org/stable/std/sync/atomic/struct.AtomicI8.html#method.fetch_add>.
+macro_rules! fetch_op {
+ ($ordering:ident, $bytes:tt, $name:ident, $op:literal) => {
+ intrinsics! {
+ #[maybe_use_optimized_c_shim]
+ #[naked]
+ pub unsafe extern "C" fn $name (
+ val: int_ty!($bytes), ptr: *mut int_ty!($bytes)
+ ) -> int_ty!($bytes) {
+ unsafe { core::arch::asm! {
+ // mov s(tmp0), s(0)
+ concat!("mov ", reg!($bytes, 16), ", ", reg!($bytes, 0)),
+ "0:",
+ // LDXR s(0), [x1]
+ concat!(ldxr!($ordering, $bytes), " ", reg!($bytes, 0), ", [x1]"),
+ // OP s(tmp1), s(0), s(tmp0)
+ concat!($op, " ", reg!($bytes, 17), ", ", reg!($bytes, 0), ", ", reg!($bytes, 16)),
+ // STXR w(tmp2), s(tmp1), [x1]
+ concat!(stxr!($ordering, $bytes), " w15, ", reg!($bytes, 17), ", [x1]"),
+ "cbnz w15, 0b",
+ "ret",
+ options(noreturn)
+ } }
+ }
+ }
+ }
+}
+
+// We need a single macro to pass to `foreach_ldadd`.
+macro_rules! add {
+ ($ordering:ident, $bytes:tt, $name:ident) => {
+ fetch_op! { $ordering, $bytes, $name, "add" }
+ };
+}
+
+macro_rules! and {
+ ($ordering:ident, $bytes:tt, $name:ident) => {
+ fetch_op! { $ordering, $bytes, $name, "bic" }
+ };
+}
+
+macro_rules! xor {
+ ($ordering:ident, $bytes:tt, $name:ident) => {
+ fetch_op! { $ordering, $bytes, $name, "eor" }
+ };
+}
+
+macro_rules! or {
+ ($ordering:ident, $bytes:tt, $name:ident) => {
+ fetch_op! { $ordering, $bytes, $name, "orr" }
+ };
+}
+
+// See `generate_aarch64_outlined_atomics` in build.rs.
+include!(concat!(env!("OUT_DIR"), "/outlined_atomics.rs"));
+foreach_cas!(compare_and_swap);
+foreach_cas16!(compare_and_swap_i128);
+foreach_swp!(swap);
+foreach_ldadd!(add);
+foreach_ldclr!(and);
+foreach_ldeor!(xor);
+foreach_ldset!(or);
diff --git a/vendor/compiler_builtins/src/float/cmp.rs b/vendor/compiler_builtins/src/float/cmp.rs
index 1d4e38433..1bd7aa284 100644
--- a/vendor/compiler_builtins/src/float/cmp.rs
+++ b/vendor/compiler_builtins/src/float/cmp.rs
@@ -99,60 +99,74 @@ fn unord<F: Float>(a: F, b: F) -> bool {
}
intrinsics! {
+ #[avr_skip]
pub extern "C" fn __lesf2(a: f32, b: f32) -> i32 {
cmp(a, b).to_le_abi()
}
+ #[avr_skip]
pub extern "C" fn __gesf2(a: f32, b: f32) -> i32 {
cmp(a, b).to_ge_abi()
}
+ #[avr_skip]
#[arm_aeabi_alias = __aeabi_fcmpun]
pub extern "C" fn __unordsf2(a: f32, b: f32) -> i32 {
unord(a, b) as i32
}
+ #[avr_skip]
pub extern "C" fn __eqsf2(a: f32, b: f32) -> i32 {
cmp(a, b).to_le_abi()
}
+ #[avr_skip]
pub extern "C" fn __ltsf2(a: f32, b: f32) -> i32 {
cmp(a, b).to_le_abi()
}
+ #[avr_skip]
pub extern "C" fn __nesf2(a: f32, b: f32) -> i32 {
cmp(a, b).to_le_abi()
}
+ #[avr_skip]
pub extern "C" fn __gtsf2(a: f32, b: f32) -> i32 {
cmp(a, b).to_ge_abi()
}
+ #[avr_skip]
pub extern "C" fn __ledf2(a: f64, b: f64) -> i32 {
cmp(a, b).to_le_abi()
}
+ #[avr_skip]
pub extern "C" fn __gedf2(a: f64, b: f64) -> i32 {
cmp(a, b).to_ge_abi()
}
+ #[avr_skip]
#[arm_aeabi_alias = __aeabi_dcmpun]
pub extern "C" fn __unorddf2(a: f64, b: f64) -> i32 {
unord(a, b) as i32
}
+ #[avr_skip]
pub extern "C" fn __eqdf2(a: f64, b: f64) -> i32 {
cmp(a, b).to_le_abi()
}
+ #[avr_skip]
pub extern "C" fn __ltdf2(a: f64, b: f64) -> i32 {
cmp(a, b).to_le_abi()
}
+ #[avr_skip]
pub extern "C" fn __nedf2(a: f64, b: f64) -> i32 {
cmp(a, b).to_le_abi()
}
+ #[avr_skip]
pub extern "C" fn __gtdf2(a: f64, b: f64) -> i32 {
cmp(a, b).to_ge_abi()
}
diff --git a/vendor/compiler_builtins/src/float/conv.rs b/vendor/compiler_builtins/src/float/conv.rs
index a27d542fa..790c0ab9f 100644
--- a/vendor/compiler_builtins/src/float/conv.rs
+++ b/vendor/compiler_builtins/src/float/conv.rs
@@ -3,7 +3,7 @@
/// These are hand-optimized bit twiddling code,
/// which unfortunately isn't the easiest kind of code to read.
///
-/// The algorithm is explained here: https://blog.m-ou.se/floats/
+/// The algorithm is explained here: <https://blog.m-ou.se/floats/>
mod int_to_float {
pub fn u32_to_f32_bits(i: u32) -> u32 {
if i == 0 {
diff --git a/vendor/compiler_builtins/src/float/div.rs b/vendor/compiler_builtins/src/float/div.rs
index c2d6c07e7..c0aae34fb 100644
--- a/vendor/compiler_builtins/src/float/div.rs
+++ b/vendor/compiler_builtins/src/float/div.rs
@@ -12,11 +12,17 @@ where
i32: CastInto<F::Int>,
F::Int: CastInto<i32>,
F::Int: HInt,
+ <F as Float>::Int: core::ops::Mul,
{
+ const NUMBER_OF_HALF_ITERATIONS: usize = 0;
+ const NUMBER_OF_FULL_ITERATIONS: usize = 3;
+ const USE_NATIVE_FULL_ITERATIONS: bool = true;
+
let one = F::Int::ONE;
let zero = F::Int::ZERO;
+ let hw = F::BITS / 2;
+ let lo_mask = u32::MAX >> hw;
- // let bits = F::BITS;
let significand_bits = F::SIGNIFICAND_BITS;
let max_exponent = F::EXPONENT_MAX;
@@ -109,101 +115,341 @@ where
}
}
- // Or in the implicit significand bit. (If we fell through from the
+ // Set the implicit significand bit. If we fell through from the
// denormal path it was already set by normalize( ), but setting it twice
- // won't hurt anything.)
+ // won't hurt anything.
a_significand |= implicit_bit;
b_significand |= implicit_bit;
- let mut quotient_exponent: i32 = CastInto::<i32>::cast(a_exponent)
- .wrapping_sub(CastInto::<i32>::cast(b_exponent))
- .wrapping_add(scale);
-
- // Align the significand of b as a Q31 fixed-point number in the range
- // [1, 2.0) and get a Q32 approximate reciprocal using a small minimax
- // polynomial approximation: reciprocal = 3/4 + 1/sqrt(2) - b/2. This
- // is accurate to about 3.5 binary digits.
- let q31b = CastInto::<u32>::cast(b_significand << 8.cast());
- let mut reciprocal = (0x7504f333u32).wrapping_sub(q31b);
-
- // Now refine the reciprocal estimate using a Newton-Raphson iteration:
- //
- // x1 = x0 * (2 - x0 * b)
- //
- // This doubles the number of correct binary digits in the approximation
- // with each iteration, so after three iterations, we have about 28 binary
- // digits of accuracy.
-
- let mut correction: u32 =
- negate_u32(((reciprocal as u64).wrapping_mul(q31b as u64) >> 32) as u32);
- reciprocal = ((reciprocal as u64).wrapping_mul(correction as u64) >> 31) as u32;
- correction = negate_u32(((reciprocal as u64).wrapping_mul(q31b as u64) >> 32) as u32);
- reciprocal = ((reciprocal as u64).wrapping_mul(correction as u64) >> 31) as u32;
- correction = negate_u32(((reciprocal as u64).wrapping_mul(q31b as u64) >> 32) as u32);
- reciprocal = ((reciprocal as u64).wrapping_mul(correction as u64) >> 31) as u32;
-
- // Exhaustive testing shows that the error in reciprocal after three steps
- // is in the interval [-0x1.f58108p-31, 0x1.d0e48cp-29], in line with our
- // expectations. We bump the reciprocal by a tiny value to force the error
- // to be strictly positive (in the range [0x1.4fdfp-37,0x1.287246p-29], to
- // be specific). This also causes 1/1 to give a sensible approximation
- // instead of zero (due to overflow).
- reciprocal = reciprocal.wrapping_sub(2);
-
- // The numerical reciprocal is accurate to within 2^-28, lies in the
- // interval [0x1.000000eep-1, 0x1.fffffffcp-1], and is strictly smaller
- // than the true reciprocal of b. Multiplying a by this reciprocal thus
- // gives a numerical q = a/b in Q24 with the following properties:
- //
- // 1. q < a/b
- // 2. q is in the interval [0x1.000000eep-1, 0x1.fffffffcp0)
- // 3. the error in q is at most 2^-24 + 2^-27 -- the 2^24 term comes
- // from the fact that we truncate the product, and the 2^27 term
- // is the error in the reciprocal of b scaled by the maximum
- // possible value of a. As a consequence of this error bound,
- // either q or nextafter(q) is the correctly rounded
- let mut quotient = (a_significand << 1).widen_mul(reciprocal.cast()).hi();
-
- // Two cases: quotient is in [0.5, 1.0) or quotient is in [1.0, 2.0).
- // In either case, we are going to compute a residual of the form
- //
- // r = a - q*b
+
+ let written_exponent: i32 = CastInto::<u32>::cast(
+ a_exponent
+ .wrapping_sub(b_exponent)
+ .wrapping_add(scale.cast()),
+ )
+ .wrapping_add(exponent_bias) as i32;
+ let b_uq1 = b_significand << (F::BITS - significand_bits - 1);
+
+ // Align the significand of b as a UQ1.(n-1) fixed-point number in the range
+ // [1.0, 2.0) and get a UQ0.n approximate reciprocal using a small minimax
+ // polynomial approximation: x0 = 3/4 + 1/sqrt(2) - b/2.
+ // The max error for this approximation is achieved at endpoints, so
+ // abs(x0(b) - 1/b) <= abs(x0(1) - 1/1) = 3/4 - 1/sqrt(2) = 0.04289...,
+ // which is about 4.5 bits.
+ // The initial approximation is between x0(1.0) = 0.9571... and x0(2.0) = 0.4571...
+
+ // Then, refine the reciprocal estimate using a quadratically converging
+ // Newton-Raphson iteration:
+ // x_{n+1} = x_n * (2 - x_n * b)
//
- // We know from the construction of q that r satisfies:
+ // Let b be the original divisor considered "in infinite precision" and
+ // obtained from IEEE754 representation of function argument (with the
+ // implicit bit set). Corresponds to rep_t-sized b_UQ1 represented in
+ // UQ1.(W-1).
//
- // 0 <= r < ulp(q)*b
+ // Let b_hw be an infinitely precise number obtained from the highest (HW-1)
+ // bits of divisor significand (with the implicit bit set). Corresponds to
+ // half_rep_t-sized b_UQ1_hw represented in UQ1.(HW-1) that is a **truncated**
+ // version of b_UQ1.
//
- // if r is greater than 1/2 ulp(q)*b, then q rounds up. Otherwise, we
- // already have the correct result. The exact halfway case cannot occur.
- // We also take this time to right shift quotient if it falls in the [1,2)
- // range and adjust the exponent accordingly.
- let residual = if quotient < (implicit_bit << 1) {
- quotient_exponent = quotient_exponent.wrapping_sub(1);
- (a_significand << (significand_bits + 1)).wrapping_sub(quotient.wrapping_mul(b_significand))
+ // Let e_n := x_n - 1/b_hw
+ // E_n := x_n - 1/b
+ // abs(E_n) <= abs(e_n) + (1/b_hw - 1/b)
+ // = abs(e_n) + (b - b_hw) / (b*b_hw)
+ // <= abs(e_n) + 2 * 2^-HW
+
+ // rep_t-sized iterations may be slower than the corresponding half-width
+ // variant depending on the handware and whether single/double/quad precision
+ // is selected.
+ // NB: Using half-width iterations increases computation errors due to
+ // rounding, so error estimations have to be computed taking the selected
+ // mode into account!
+
+ #[allow(clippy::absurd_extreme_comparisons)]
+ let mut x_uq0 = if NUMBER_OF_HALF_ITERATIONS > 0 {
+ // Starting with (n-1) half-width iterations
+ let b_uq1_hw: u16 =
+ (CastInto::<u32>::cast(b_significand) >> (significand_bits + 1 - hw)) as u16;
+
+ // C is (3/4 + 1/sqrt(2)) - 1 truncated to W0 fractional bits as UQ0.HW
+ // with W0 being either 16 or 32 and W0 <= HW.
+ // That is, C is the aforementioned 3/4 + 1/sqrt(2) constant (from which
+ // b/2 is subtracted to obtain x0) wrapped to [0, 1) range.
+
+ // HW is at least 32. Shifting into the highest bits if needed.
+ let c_hw = (0x7504_u32 as u16).wrapping_shl(hw.wrapping_sub(32));
+
+ // b >= 1, thus an upper bound for 3/4 + 1/sqrt(2) - b/2 is about 0.9572,
+ // so x0 fits to UQ0.HW without wrapping.
+ let x_uq0_hw: u16 = {
+ let mut x_uq0_hw: u16 = c_hw.wrapping_sub(b_uq1_hw /* exact b_hw/2 as UQ0.HW */);
+ // An e_0 error is comprised of errors due to
+ // * x0 being an inherently imprecise first approximation of 1/b_hw
+ // * C_hw being some (irrational) number **truncated** to W0 bits
+ // Please note that e_0 is calculated against the infinitely precise
+ // reciprocal of b_hw (that is, **truncated** version of b).
+ //
+ // e_0 <= 3/4 - 1/sqrt(2) + 2^-W0
+
+ // By construction, 1 <= b < 2
+ // f(x) = x * (2 - b*x) = 2*x - b*x^2
+ // f'(x) = 2 * (1 - b*x)
+ //
+ // On the [0, 1] interval, f(0) = 0,
+ // then it increses until f(1/b) = 1 / b, maximum on (0, 1),
+ // then it decreses to f(1) = 2 - b
+ //
+ // Let g(x) = x - f(x) = b*x^2 - x.
+ // On (0, 1/b), g(x) < 0 <=> f(x) > x
+ // On (1/b, 1], g(x) > 0 <=> f(x) < x
+ //
+ // For half-width iterations, b_hw is used instead of b.
+ #[allow(clippy::reversed_empty_ranges)]
+ for _ in 0..NUMBER_OF_HALF_ITERATIONS {
+ // corr_UQ1_hw can be **larger** than 2 - b_hw*x by at most 1*Ulp
+ // of corr_UQ1_hw.
+ // "0.0 - (...)" is equivalent to "2.0 - (...)" in UQ1.(HW-1).
+ // On the other hand, corr_UQ1_hw should not overflow from 2.0 to 0.0 provided
+ // no overflow occurred earlier: ((rep_t)x_UQ0_hw * b_UQ1_hw >> HW) is
+ // expected to be strictly positive because b_UQ1_hw has its highest bit set
+ // and x_UQ0_hw should be rather large (it converges to 1/2 < 1/b_hw <= 1).
+ let corr_uq1_hw: u16 =
+ 0.wrapping_sub((x_uq0_hw as u32).wrapping_mul(b_uq1_hw.cast()) >> hw) as u16;
+
+ // Now, we should multiply UQ0.HW and UQ1.(HW-1) numbers, naturally
+ // obtaining an UQ1.(HW-1) number and proving its highest bit could be
+ // considered to be 0 to be able to represent it in UQ0.HW.
+ // From the above analysis of f(x), if corr_UQ1_hw would be represented
+ // without any intermediate loss of precision (that is, in twice_rep_t)
+ // x_UQ0_hw could be at most [1.]000... if b_hw is exactly 1.0 and strictly
+ // less otherwise. On the other hand, to obtain [1.]000..., one have to pass
+ // 1/b_hw == 1.0 to f(x), so this cannot occur at all without overflow (due
+ // to 1.0 being not representable as UQ0.HW).
+ // The fact corr_UQ1_hw was virtually round up (due to result of
+ // multiplication being **first** truncated, then negated - to improve
+ // error estimations) can increase x_UQ0_hw by up to 2*Ulp of x_UQ0_hw.
+ x_uq0_hw = ((x_uq0_hw as u32).wrapping_mul(corr_uq1_hw as u32) >> (hw - 1)) as u16;
+ // Now, either no overflow occurred or x_UQ0_hw is 0 or 1 in its half_rep_t
+ // representation. In the latter case, x_UQ0_hw will be either 0 or 1 after
+ // any number of iterations, so just subtract 2 from the reciprocal
+ // approximation after last iteration.
+
+ // In infinite precision, with 0 <= eps1, eps2 <= U = 2^-HW:
+ // corr_UQ1_hw = 2 - (1/b_hw + e_n) * b_hw + 2*eps1
+ // = 1 - e_n * b_hw + 2*eps1
+ // x_UQ0_hw = (1/b_hw + e_n) * (1 - e_n*b_hw + 2*eps1) - eps2
+ // = 1/b_hw - e_n + 2*eps1/b_hw + e_n - e_n^2*b_hw + 2*e_n*eps1 - eps2
+ // = 1/b_hw + 2*eps1/b_hw - e_n^2*b_hw + 2*e_n*eps1 - eps2
+ // e_{n+1} = -e_n^2*b_hw + 2*eps1/b_hw + 2*e_n*eps1 - eps2
+ // = 2*e_n*eps1 - (e_n^2*b_hw + eps2) + 2*eps1/b_hw
+ // \------ >0 -------/ \-- >0 ---/
+ // abs(e_{n+1}) <= 2*abs(e_n)*U + max(2*e_n^2 + U, 2 * U)
+ }
+ // For initial half-width iterations, U = 2^-HW
+ // Let abs(e_n) <= u_n * U,
+ // then abs(e_{n+1}) <= 2 * u_n * U^2 + max(2 * u_n^2 * U^2 + U, 2 * U)
+ // u_{n+1} <= 2 * u_n * U + max(2 * u_n^2 * U + 1, 2)
+
+ // Account for possible overflow (see above). For an overflow to occur for the
+ // first time, for "ideal" corr_UQ1_hw (that is, without intermediate
+ // truncation), the result of x_UQ0_hw * corr_UQ1_hw should be either maximum
+ // value representable in UQ0.HW or less by 1. This means that 1/b_hw have to
+ // be not below that value (see g(x) above), so it is safe to decrement just
+ // once after the final iteration. On the other hand, an effective value of
+ // divisor changes after this point (from b_hw to b), so adjust here.
+ x_uq0_hw.wrapping_sub(1_u16)
+ };
+
+ // Error estimations for full-precision iterations are calculated just
+ // as above, but with U := 2^-W and taking extra decrementing into account.
+ // We need at least one such iteration.
+
+ // Simulating operations on a twice_rep_t to perform a single final full-width
+ // iteration. Using ad-hoc multiplication implementations to take advantage
+ // of particular structure of operands.
+
+ let blo: u32 = (CastInto::<u32>::cast(b_uq1)) & lo_mask;
+ // x_UQ0 = x_UQ0_hw * 2^HW - 1
+ // x_UQ0 * b_UQ1 = (x_UQ0_hw * 2^HW) * (b_UQ1_hw * 2^HW + blo) - b_UQ1
+ //
+ // <--- higher half ---><--- lower half --->
+ // [x_UQ0_hw * b_UQ1_hw]
+ // + [ x_UQ0_hw * blo ]
+ // - [ b_UQ1 ]
+ // = [ result ][.... discarded ...]
+ let corr_uq1 = negate_u32(
+ (x_uq0_hw as u32) * (b_uq1_hw as u32) + (((x_uq0_hw as u32) * (blo)) >> hw) - 1,
+ ); // account for *possible* carry
+ let lo_corr = corr_uq1 & lo_mask;
+ let hi_corr = corr_uq1 >> hw;
+ // x_UQ0 * corr_UQ1 = (x_UQ0_hw * 2^HW) * (hi_corr * 2^HW + lo_corr) - corr_UQ1
+ let mut x_uq0: <F as Float>::Int = ((((x_uq0_hw as u32) * hi_corr) << 1)
+ .wrapping_add(((x_uq0_hw as u32) * lo_corr) >> (hw - 1))
+ .wrapping_sub(2))
+ .cast(); // 1 to account for the highest bit of corr_UQ1 can be 1
+ // 1 to account for possible carry
+ // Just like the case of half-width iterations but with possibility
+ // of overflowing by one extra Ulp of x_UQ0.
+ x_uq0 -= one;
+ // ... and then traditional fixup by 2 should work
+
+ // On error estimation:
+ // abs(E_{N-1}) <= (u_{N-1} + 2 /* due to conversion e_n -> E_n */) * 2^-HW
+ // + (2^-HW + 2^-W))
+ // abs(E_{N-1}) <= (u_{N-1} + 3.01) * 2^-HW
+
+ // Then like for the half-width iterations:
+ // With 0 <= eps1, eps2 < 2^-W
+ // E_N = 4 * E_{N-1} * eps1 - (E_{N-1}^2 * b + 4 * eps2) + 4 * eps1 / b
+ // abs(E_N) <= 2^-W * [ 4 * abs(E_{N-1}) + max(2 * abs(E_{N-1})^2 * 2^W + 4, 8)) ]
+ // abs(E_N) <= 2^-W * [ 4 * (u_{N-1} + 3.01) * 2^-HW + max(4 + 2 * (u_{N-1} + 3.01)^2, 8) ]
+ x_uq0
} else {
- quotient >>= 1;
- (a_significand << significand_bits).wrapping_sub(quotient.wrapping_mul(b_significand))
+ // C is (3/4 + 1/sqrt(2)) - 1 truncated to 32 fractional bits as UQ0.n
+ let c: <F as Float>::Int = (0x7504F333 << (F::BITS - 32)).cast();
+ let x_uq0: <F as Float>::Int = c.wrapping_sub(b_uq1);
+ // E_0 <= 3/4 - 1/sqrt(2) + 2 * 2^-32
+ x_uq0
+ };
+
+ let mut x_uq0 = if USE_NATIVE_FULL_ITERATIONS {
+ for _ in 0..NUMBER_OF_FULL_ITERATIONS {
+ let corr_uq1: u32 = 0.wrapping_sub(
+ ((CastInto::<u32>::cast(x_uq0) as u64) * (CastInto::<u32>::cast(b_uq1) as u64))
+ >> F::BITS,
+ ) as u32;
+ x_uq0 = ((((CastInto::<u32>::cast(x_uq0) as u64) * (corr_uq1 as u64)) >> (F::BITS - 1))
+ as u32)
+ .cast();
+ }
+ x_uq0
+ } else {
+ // not using native full iterations
+ x_uq0
};
- let written_exponent = quotient_exponent.wrapping_add(exponent_bias as i32);
+ // Finally, account for possible overflow, as explained above.
+ x_uq0 = x_uq0.wrapping_sub(2.cast());
+
+ // u_n for different precisions (with N-1 half-width iterations):
+ // W0 is the precision of C
+ // u_0 = (3/4 - 1/sqrt(2) + 2^-W0) * 2^HW
+
+ // Estimated with bc:
+ // define half1(un) { return 2.0 * (un + un^2) / 2.0^hw + 1.0; }
+ // define half2(un) { return 2.0 * un / 2.0^hw + 2.0; }
+ // define full1(un) { return 4.0 * (un + 3.01) / 2.0^hw + 2.0 * (un + 3.01)^2 + 4.0; }
+ // define full2(un) { return 4.0 * (un + 3.01) / 2.0^hw + 8.0; }
+
+ // | f32 (0 + 3) | f32 (2 + 1) | f64 (3 + 1) | f128 (4 + 1)
+ // u_0 | < 184224974 | < 2812.1 | < 184224974 | < 791240234244348797
+ // u_1 | < 15804007 | < 242.7 | < 15804007 | < 67877681371350440
+ // u_2 | < 116308 | < 2.81 | < 116308 | < 499533100252317
+ // u_3 | < 7.31 | | < 7.31 | < 27054456580
+ // u_4 | | | | < 80.4
+ // Final (U_N) | same as u_3 | < 72 | < 218 | < 13920
+
+ // Add 2 to U_N due to final decrement.
+
+ let reciprocal_precision: <F as Float>::Int = 10.cast();
+
+ // Suppose 1/b - P * 2^-W < x < 1/b + P * 2^-W
+ let x_uq0 = x_uq0 - reciprocal_precision;
+ // Now 1/b - (2*P) * 2^-W < x < 1/b
+ // FIXME Is x_UQ0 still >= 0.5?
+
+ let mut quotient: <F as Float>::Int = x_uq0.widen_mul(a_significand << 1).hi();
+ // Now, a/b - 4*P * 2^-W < q < a/b for q=<quotient_UQ1:dummy> in UQ1.(SB+1+W).
+
+ // quotient_UQ1 is in [0.5, 2.0) as UQ1.(SB+1),
+ // adjust it to be in [1.0, 2.0) as UQ1.SB.
+ let (mut residual, written_exponent) = if quotient < (implicit_bit << 1) {
+ // Highest bit is 0, so just reinterpret quotient_UQ1 as UQ1.SB,
+ // effectively doubling its value as well as its error estimation.
+ let residual_lo = (a_significand << (significand_bits + 1)).wrapping_sub(
+ (CastInto::<u32>::cast(quotient).wrapping_mul(CastInto::<u32>::cast(b_significand)))
+ .cast(),
+ );
+ a_significand <<= 1;
+ (residual_lo, written_exponent.wrapping_sub(1))
+ } else {
+ // Highest bit is 1 (the UQ1.(SB+1) value is in [1, 2)), convert it
+ // to UQ1.SB by right shifting by 1. Least significant bit is omitted.
+ quotient >>= 1;
+ let residual_lo = (a_significand << significand_bits).wrapping_sub(
+ (CastInto::<u32>::cast(quotient).wrapping_mul(CastInto::<u32>::cast(b_significand)))
+ .cast(),
+ );
+ (residual_lo, written_exponent)
+ };
+ //drop mutability
+ let quotient = quotient;
+
+ // NB: residualLo is calculated above for the normal result case.
+ // It is re-computed on denormal path that is expected to be not so
+ // performance-sensitive.
+
+ // Now, q cannot be greater than a/b and can differ by at most 8*P * 2^-W + 2^-SB
+ // Each NextAfter() increments the floating point value by at least 2^-SB
+ // (more, if exponent was incremented).
+ // Different cases (<---> is of 2^-SB length, * = a/b that is shown as a midpoint):
+ // q
+ // | | * | | | | |
+ // <---> 2^t
+ // | | | | | * | |
+ // q
+ // To require at most one NextAfter(), an error should be less than 1.5 * 2^-SB.
+ // (8*P) * 2^-W + 2^-SB < 1.5 * 2^-SB
+ // (8*P) * 2^-W < 0.5 * 2^-SB
+ // P < 2^(W-4-SB)
+ // Generally, for at most R NextAfter() to be enough,
+ // P < (2*R - 1) * 2^(W-4-SB)
+ // For f32 (0+3): 10 < 32 (OK)
+ // For f32 (2+1): 32 < 74 < 32 * 3, so two NextAfter() are required
+ // For f64: 220 < 256 (OK)
+ // For f128: 4096 * 3 < 13922 < 4096 * 5 (three NextAfter() are required)
+
+ // If we have overflowed the exponent, return infinity
if written_exponent >= max_exponent as i32 {
- // If we have overflowed the exponent, return infinity.
return F::from_repr(inf_rep | quotient_sign);
- } else if written_exponent < 1 {
- // Flush denormals to zero. In the future, it would be nice to add
- // code to round them correctly.
- return F::from_repr(quotient_sign);
- } else {
- let round = ((residual << 1) > b_significand) as u32;
- // Clear the implicit bits
- let mut abs_result = quotient & significand_mask;
- // Insert the exponent
- abs_result |= written_exponent.cast() << significand_bits;
- // Round
- abs_result = abs_result.wrapping_add(round.cast());
- // Insert the sign and return
- return F::from_repr(abs_result | quotient_sign);
}
+
+ // Now, quotient <= the correctly-rounded result
+ // and may need taking NextAfter() up to 3 times (see error estimates above)
+ // r = a - b * q
+ let abs_result = if written_exponent > 0 {
+ let mut ret = quotient & significand_mask;
+ ret |= ((written_exponent as u32) << significand_bits).cast();
+ residual <<= 1;
+ ret
+ } else {
+ if (significand_bits as i32 + written_exponent) < 0 {
+ return F::from_repr(quotient_sign);
+ }
+ let ret = quotient.wrapping_shr(negate_u32(CastInto::<u32>::cast(written_exponent)) + 1);
+ residual = (CastInto::<u32>::cast(
+ a_significand.wrapping_shl(
+ significand_bits.wrapping_add(CastInto::<u32>::cast(written_exponent)),
+ ),
+ )
+ .wrapping_sub(
+ (CastInto::<u32>::cast(ret).wrapping_mul(CastInto::<u32>::cast(b_significand))) << 1,
+ ))
+ .cast();
+ ret
+ };
+ // Round
+ let abs_result = {
+ residual += abs_result & one; // tie to even
+ // The above line conditionally turns the below LT comparison into LTE
+
+ if residual > b_significand {
+ abs_result + one
+ } else {
+ abs_result
+ }
+ };
+ F::from_repr(abs_result | quotient_sign)
}
fn div64<F: Float>(a: F, b: F) -> F
@@ -218,10 +464,15 @@ where
F::Int: CastInto<i64>,
F::Int: HInt,
{
+ const NUMBER_OF_HALF_ITERATIONS: usize = 3;
+ const NUMBER_OF_FULL_ITERATIONS: usize = 1;
+ const USE_NATIVE_FULL_ITERATIONS: bool = false;
+
let one = F::Int::ONE;
let zero = F::Int::ZERO;
+ let hw = F::BITS / 2;
+ let lo_mask = u64::MAX >> hw;
- // let bits = F::BITS;
let significand_bits = F::SIGNIFICAND_BITS;
let max_exponent = F::EXPONENT_MAX;
@@ -235,12 +486,6 @@ where
let inf_rep = exponent_mask;
let quiet_bit = implicit_bit >> 1;
let qnan_rep = exponent_mask | quiet_bit;
- // let exponent_bits = F::EXPONENT_BITS;
-
- #[inline(always)]
- fn negate_u32(a: u32) -> u32 {
- (<i32>::wrapping_neg(a as i32)) as u32
- }
#[inline(always)]
fn negate_u64(a: u64) -> u64 {
@@ -320,128 +565,340 @@ where
}
}
- // Or in the implicit significand bit. (If we fell through from the
+ // Set the implicit significand bit. If we fell through from the
// denormal path it was already set by normalize( ), but setting it twice
- // won't hurt anything.)
+ // won't hurt anything.
a_significand |= implicit_bit;
b_significand |= implicit_bit;
- let mut quotient_exponent: i32 = CastInto::<i32>::cast(a_exponent)
- .wrapping_sub(CastInto::<i32>::cast(b_exponent))
- .wrapping_add(scale);
-
- // Align the significand of b as a Q31 fixed-point number in the range
- // [1, 2.0) and get a Q32 approximate reciprocal using a small minimax
- // polynomial approximation: reciprocal = 3/4 + 1/sqrt(2) - b/2. This
- // is accurate to about 3.5 binary digits.
- let q31b = CastInto::<u32>::cast(b_significand >> 21.cast());
- let mut recip32 = (0x7504f333u32).wrapping_sub(q31b);
-
- // Now refine the reciprocal estimate using a Newton-Raphson iteration:
- //
- // x1 = x0 * (2 - x0 * b)
- //
- // This doubles the number of correct binary digits in the approximation
- // with each iteration, so after three iterations, we have about 28 binary
- // digits of accuracy.
-
- let mut correction32: u32 =
- negate_u32(((recip32 as u64).wrapping_mul(q31b as u64) >> 32) as u32);
- recip32 = ((recip32 as u64).wrapping_mul(correction32 as u64) >> 31) as u32;
- correction32 = negate_u32(((recip32 as u64).wrapping_mul(q31b as u64) >> 32) as u32);
- recip32 = ((recip32 as u64).wrapping_mul(correction32 as u64) >> 31) as u32;
- correction32 = negate_u32(((recip32 as u64).wrapping_mul(q31b as u64) >> 32) as u32);
- recip32 = ((recip32 as u64).wrapping_mul(correction32 as u64) >> 31) as u32;
-
- // recip32 might have overflowed to exactly zero in the preceeding
- // computation if the high word of b is exactly 1.0. This would sabotage
- // the full-width final stage of the computation that follows, so we adjust
- // recip32 downward by one bit.
- recip32 = recip32.wrapping_sub(1);
-
- // We need to perform one more iteration to get us to 56 binary digits;
- // The last iteration needs to happen with extra precision.
- let q63blo = CastInto::<u32>::cast(b_significand << 11.cast());
-
- let correction: u64 = negate_u64(
- (recip32 as u64)
- .wrapping_mul(q31b as u64)
- .wrapping_add((recip32 as u64).wrapping_mul(q63blo as u64) >> 32),
- );
- let c_hi = (correction >> 32) as u32;
- let c_lo = correction as u32;
- let mut reciprocal: u64 = (recip32 as u64)
- .wrapping_mul(c_hi as u64)
- .wrapping_add((recip32 as u64).wrapping_mul(c_lo as u64) >> 32);
-
- // We already adjusted the 32-bit estimate, now we need to adjust the final
- // 64-bit reciprocal estimate downward to ensure that it is strictly smaller
- // than the infinitely precise exact reciprocal. Because the computation
- // of the Newton-Raphson step is truncating at every step, this adjustment
- // is small; most of the work is already done.
- reciprocal = reciprocal.wrapping_sub(2);
-
- // The numerical reciprocal is accurate to within 2^-56, lies in the
- // interval [0.5, 1.0), and is strictly smaller than the true reciprocal
- // of b. Multiplying a by this reciprocal thus gives a numerical q = a/b
- // in Q53 with the following properties:
- //
- // 1. q < a/b
- // 2. q is in the interval [0.5, 2.0)
- // 3. the error in q is bounded away from 2^-53 (actually, we have a
- // couple of bits to spare, but this is all we need).
-
- // We need a 64 x 64 multiply high to compute q, which isn't a basic
- // operation in C, so we need to be a little bit fussy.
- // let mut quotient: F::Int = ((((reciprocal as u64)
- // .wrapping_mul(CastInto::<u32>::cast(a_significand << 1) as u64))
- // >> 32) as u32)
- // .cast();
-
- // We need a 64 x 64 multiply high to compute q, which isn't a basic
- // operation in C, so we need to be a little bit fussy.
- let mut quotient = (a_significand << 2).widen_mul(reciprocal.cast()).hi();
-
- // Two cases: quotient is in [0.5, 1.0) or quotient is in [1.0, 2.0).
- // In either case, we are going to compute a residual of the form
- //
- // r = a - q*b
+
+ let written_exponent: i64 = CastInto::<u64>::cast(
+ a_exponent
+ .wrapping_sub(b_exponent)
+ .wrapping_add(scale.cast()),
+ )
+ .wrapping_add(exponent_bias as u64) as i64;
+ let b_uq1 = b_significand << (F::BITS - significand_bits - 1);
+
+ // Align the significand of b as a UQ1.(n-1) fixed-point number in the range
+ // [1.0, 2.0) and get a UQ0.n approximate reciprocal using a small minimax
+ // polynomial approximation: x0 = 3/4 + 1/sqrt(2) - b/2.
+ // The max error for this approximation is achieved at endpoints, so
+ // abs(x0(b) - 1/b) <= abs(x0(1) - 1/1) = 3/4 - 1/sqrt(2) = 0.04289...,
+ // which is about 4.5 bits.
+ // The initial approximation is between x0(1.0) = 0.9571... and x0(2.0) = 0.4571...
+
+ // Then, refine the reciprocal estimate using a quadratically converging
+ // Newton-Raphson iteration:
+ // x_{n+1} = x_n * (2 - x_n * b)
//
- // We know from the construction of q that r satisfies:
+ // Let b be the original divisor considered "in infinite precision" and
+ // obtained from IEEE754 representation of function argument (with the
+ // implicit bit set). Corresponds to rep_t-sized b_UQ1 represented in
+ // UQ1.(W-1).
//
- // 0 <= r < ulp(q)*b
+ // Let b_hw be an infinitely precise number obtained from the highest (HW-1)
+ // bits of divisor significand (with the implicit bit set). Corresponds to
+ // half_rep_t-sized b_UQ1_hw represented in UQ1.(HW-1) that is a **truncated**
+ // version of b_UQ1.
//
- // if r is greater than 1/2 ulp(q)*b, then q rounds up. Otherwise, we
- // already have the correct result. The exact halfway case cannot occur.
- // We also take this time to right shift quotient if it falls in the [1,2)
- // range and adjust the exponent accordingly.
- let residual = if quotient < (implicit_bit << 1) {
- quotient_exponent = quotient_exponent.wrapping_sub(1);
- (a_significand << (significand_bits + 1)).wrapping_sub(quotient.wrapping_mul(b_significand))
+ // Let e_n := x_n - 1/b_hw
+ // E_n := x_n - 1/b
+ // abs(E_n) <= abs(e_n) + (1/b_hw - 1/b)
+ // = abs(e_n) + (b - b_hw) / (b*b_hw)
+ // <= abs(e_n) + 2 * 2^-HW
+
+ // rep_t-sized iterations may be slower than the corresponding half-width
+ // variant depending on the handware and whether single/double/quad precision
+ // is selected.
+ // NB: Using half-width iterations increases computation errors due to
+ // rounding, so error estimations have to be computed taking the selected
+ // mode into account!
+
+ let mut x_uq0 = if NUMBER_OF_HALF_ITERATIONS > 0 {
+ // Starting with (n-1) half-width iterations
+ let b_uq1_hw: u32 =
+ (CastInto::<u64>::cast(b_significand) >> (significand_bits + 1 - hw)) as u32;
+
+ // C is (3/4 + 1/sqrt(2)) - 1 truncated to W0 fractional bits as UQ0.HW
+ // with W0 being either 16 or 32 and W0 <= HW.
+ // That is, C is the aforementioned 3/4 + 1/sqrt(2) constant (from which
+ // b/2 is subtracted to obtain x0) wrapped to [0, 1) range.
+
+ // HW is at least 32. Shifting into the highest bits if needed.
+ let c_hw = (0x7504F333_u64 as u32).wrapping_shl(hw.wrapping_sub(32));
+
+ // b >= 1, thus an upper bound for 3/4 + 1/sqrt(2) - b/2 is about 0.9572,
+ // so x0 fits to UQ0.HW without wrapping.
+ let x_uq0_hw: u32 = {
+ let mut x_uq0_hw: u32 = c_hw.wrapping_sub(b_uq1_hw /* exact b_hw/2 as UQ0.HW */);
+ // dbg!(x_uq0_hw);
+ // An e_0 error is comprised of errors due to
+ // * x0 being an inherently imprecise first approximation of 1/b_hw
+ // * C_hw being some (irrational) number **truncated** to W0 bits
+ // Please note that e_0 is calculated against the infinitely precise
+ // reciprocal of b_hw (that is, **truncated** version of b).
+ //
+ // e_0 <= 3/4 - 1/sqrt(2) + 2^-W0
+
+ // By construction, 1 <= b < 2
+ // f(x) = x * (2 - b*x) = 2*x - b*x^2
+ // f'(x) = 2 * (1 - b*x)
+ //
+ // On the [0, 1] interval, f(0) = 0,
+ // then it increses until f(1/b) = 1 / b, maximum on (0, 1),
+ // then it decreses to f(1) = 2 - b
+ //
+ // Let g(x) = x - f(x) = b*x^2 - x.
+ // On (0, 1/b), g(x) < 0 <=> f(x) > x
+ // On (1/b, 1], g(x) > 0 <=> f(x) < x
+ //
+ // For half-width iterations, b_hw is used instead of b.
+ for _ in 0..NUMBER_OF_HALF_ITERATIONS {
+ // corr_UQ1_hw can be **larger** than 2 - b_hw*x by at most 1*Ulp
+ // of corr_UQ1_hw.
+ // "0.0 - (...)" is equivalent to "2.0 - (...)" in UQ1.(HW-1).
+ // On the other hand, corr_UQ1_hw should not overflow from 2.0 to 0.0 provided
+ // no overflow occurred earlier: ((rep_t)x_UQ0_hw * b_UQ1_hw >> HW) is
+ // expected to be strictly positive because b_UQ1_hw has its highest bit set
+ // and x_UQ0_hw should be rather large (it converges to 1/2 < 1/b_hw <= 1).
+ let corr_uq1_hw: u32 =
+ 0.wrapping_sub(((x_uq0_hw as u64).wrapping_mul(b_uq1_hw as u64)) >> hw) as u32;
+ // dbg!(corr_uq1_hw);
+
+ // Now, we should multiply UQ0.HW and UQ1.(HW-1) numbers, naturally
+ // obtaining an UQ1.(HW-1) number and proving its highest bit could be
+ // considered to be 0 to be able to represent it in UQ0.HW.
+ // From the above analysis of f(x), if corr_UQ1_hw would be represented
+ // without any intermediate loss of precision (that is, in twice_rep_t)
+ // x_UQ0_hw could be at most [1.]000... if b_hw is exactly 1.0 and strictly
+ // less otherwise. On the other hand, to obtain [1.]000..., one have to pass
+ // 1/b_hw == 1.0 to f(x), so this cannot occur at all without overflow (due
+ // to 1.0 being not representable as UQ0.HW).
+ // The fact corr_UQ1_hw was virtually round up (due to result of
+ // multiplication being **first** truncated, then negated - to improve
+ // error estimations) can increase x_UQ0_hw by up to 2*Ulp of x_UQ0_hw.
+ x_uq0_hw = ((x_uq0_hw as u64).wrapping_mul(corr_uq1_hw as u64) >> (hw - 1)) as u32;
+ // dbg!(x_uq0_hw);
+ // Now, either no overflow occurred or x_UQ0_hw is 0 or 1 in its half_rep_t
+ // representation. In the latter case, x_UQ0_hw will be either 0 or 1 after
+ // any number of iterations, so just subtract 2 from the reciprocal
+ // approximation after last iteration.
+
+ // In infinite precision, with 0 <= eps1, eps2 <= U = 2^-HW:
+ // corr_UQ1_hw = 2 - (1/b_hw + e_n) * b_hw + 2*eps1
+ // = 1 - e_n * b_hw + 2*eps1
+ // x_UQ0_hw = (1/b_hw + e_n) * (1 - e_n*b_hw + 2*eps1) - eps2
+ // = 1/b_hw - e_n + 2*eps1/b_hw + e_n - e_n^2*b_hw + 2*e_n*eps1 - eps2
+ // = 1/b_hw + 2*eps1/b_hw - e_n^2*b_hw + 2*e_n*eps1 - eps2
+ // e_{n+1} = -e_n^2*b_hw + 2*eps1/b_hw + 2*e_n*eps1 - eps2
+ // = 2*e_n*eps1 - (e_n^2*b_hw + eps2) + 2*eps1/b_hw
+ // \------ >0 -------/ \-- >0 ---/
+ // abs(e_{n+1}) <= 2*abs(e_n)*U + max(2*e_n^2 + U, 2 * U)
+ }
+ // For initial half-width iterations, U = 2^-HW
+ // Let abs(e_n) <= u_n * U,
+ // then abs(e_{n+1}) <= 2 * u_n * U^2 + max(2 * u_n^2 * U^2 + U, 2 * U)
+ // u_{n+1} <= 2 * u_n * U + max(2 * u_n^2 * U + 1, 2)
+
+ // Account for possible overflow (see above). For an overflow to occur for the
+ // first time, for "ideal" corr_UQ1_hw (that is, without intermediate
+ // truncation), the result of x_UQ0_hw * corr_UQ1_hw should be either maximum
+ // value representable in UQ0.HW or less by 1. This means that 1/b_hw have to
+ // be not below that value (see g(x) above), so it is safe to decrement just
+ // once after the final iteration. On the other hand, an effective value of
+ // divisor changes after this point (from b_hw to b), so adjust here.
+ x_uq0_hw.wrapping_sub(1_u32)
+ };
+
+ // Error estimations for full-precision iterations are calculated just
+ // as above, but with U := 2^-W and taking extra decrementing into account.
+ // We need at least one such iteration.
+
+ // Simulating operations on a twice_rep_t to perform a single final full-width
+ // iteration. Using ad-hoc multiplication implementations to take advantage
+ // of particular structure of operands.
+ let blo: u64 = (CastInto::<u64>::cast(b_uq1)) & lo_mask;
+ // x_UQ0 = x_UQ0_hw * 2^HW - 1
+ // x_UQ0 * b_UQ1 = (x_UQ0_hw * 2^HW) * (b_UQ1_hw * 2^HW + blo) - b_UQ1
+ //
+ // <--- higher half ---><--- lower half --->
+ // [x_UQ0_hw * b_UQ1_hw]
+ // + [ x_UQ0_hw * blo ]
+ // - [ b_UQ1 ]
+ // = [ result ][.... discarded ...]
+ let corr_uq1 = negate_u64(
+ (x_uq0_hw as u64) * (b_uq1_hw as u64) + (((x_uq0_hw as u64) * (blo)) >> hw) - 1,
+ ); // account for *possible* carry
+ let lo_corr = corr_uq1 & lo_mask;
+ let hi_corr = corr_uq1 >> hw;
+ // x_UQ0 * corr_UQ1 = (x_UQ0_hw * 2^HW) * (hi_corr * 2^HW + lo_corr) - corr_UQ1
+ let mut x_uq0: <F as Float>::Int = ((((x_uq0_hw as u64) * hi_corr) << 1)
+ .wrapping_add(((x_uq0_hw as u64) * lo_corr) >> (hw - 1))
+ .wrapping_sub(2))
+ .cast(); // 1 to account for the highest bit of corr_UQ1 can be 1
+ // 1 to account for possible carry
+ // Just like the case of half-width iterations but with possibility
+ // of overflowing by one extra Ulp of x_UQ0.
+ x_uq0 -= one;
+ // ... and then traditional fixup by 2 should work
+
+ // On error estimation:
+ // abs(E_{N-1}) <= (u_{N-1} + 2 /* due to conversion e_n -> E_n */) * 2^-HW
+ // + (2^-HW + 2^-W))
+ // abs(E_{N-1}) <= (u_{N-1} + 3.01) * 2^-HW
+
+ // Then like for the half-width iterations:
+ // With 0 <= eps1, eps2 < 2^-W
+ // E_N = 4 * E_{N-1} * eps1 - (E_{N-1}^2 * b + 4 * eps2) + 4 * eps1 / b
+ // abs(E_N) <= 2^-W * [ 4 * abs(E_{N-1}) + max(2 * abs(E_{N-1})^2 * 2^W + 4, 8)) ]
+ // abs(E_N) <= 2^-W * [ 4 * (u_{N-1} + 3.01) * 2^-HW + max(4 + 2 * (u_{N-1} + 3.01)^2, 8) ]
+ x_uq0
} else {
- quotient >>= 1;
- (a_significand << significand_bits).wrapping_sub(quotient.wrapping_mul(b_significand))
+ // C is (3/4 + 1/sqrt(2)) - 1 truncated to 64 fractional bits as UQ0.n
+ let c: <F as Float>::Int = (0x7504F333 << (F::BITS - 32)).cast();
+ let x_uq0: <F as Float>::Int = c.wrapping_sub(b_uq1);
+ // E_0 <= 3/4 - 1/sqrt(2) + 2 * 2^-64
+ x_uq0
+ };
+
+ let mut x_uq0 = if USE_NATIVE_FULL_ITERATIONS {
+ for _ in 0..NUMBER_OF_FULL_ITERATIONS {
+ let corr_uq1: u64 = 0.wrapping_sub(
+ (CastInto::<u64>::cast(x_uq0) * (CastInto::<u64>::cast(b_uq1))) >> F::BITS,
+ );
+ x_uq0 = ((((CastInto::<u64>::cast(x_uq0) as u128) * (corr_uq1 as u128))
+ >> (F::BITS - 1)) as u64)
+ .cast();
+ }
+ x_uq0
+ } else {
+ // not using native full iterations
+ x_uq0
};
- let written_exponent = quotient_exponent.wrapping_add(exponent_bias as i32);
+ // Finally, account for possible overflow, as explained above.
+ x_uq0 = x_uq0.wrapping_sub(2.cast());
+
+ // u_n for different precisions (with N-1 half-width iterations):
+ // W0 is the precision of C
+ // u_0 = (3/4 - 1/sqrt(2) + 2^-W0) * 2^HW
+
+ // Estimated with bc:
+ // define half1(un) { return 2.0 * (un + un^2) / 2.0^hw + 1.0; }
+ // define half2(un) { return 2.0 * un / 2.0^hw + 2.0; }
+ // define full1(un) { return 4.0 * (un + 3.01) / 2.0^hw + 2.0 * (un + 3.01)^2 + 4.0; }
+ // define full2(un) { return 4.0 * (un + 3.01) / 2.0^hw + 8.0; }
+
+ // | f32 (0 + 3) | f32 (2 + 1) | f64 (3 + 1) | f128 (4 + 1)
+ // u_0 | < 184224974 | < 2812.1 | < 184224974 | < 791240234244348797
+ // u_1 | < 15804007 | < 242.7 | < 15804007 | < 67877681371350440
+ // u_2 | < 116308 | < 2.81 | < 116308 | < 499533100252317
+ // u_3 | < 7.31 | | < 7.31 | < 27054456580
+ // u_4 | | | | < 80.4
+ // Final (U_N) | same as u_3 | < 72 | < 218 | < 13920
+
+ // Add 2 to U_N due to final decrement.
+
+ let reciprocal_precision: <F as Float>::Int = 220.cast();
+
+ // Suppose 1/b - P * 2^-W < x < 1/b + P * 2^-W
+ let x_uq0 = x_uq0 - reciprocal_precision;
+ // Now 1/b - (2*P) * 2^-W < x < 1/b
+ // FIXME Is x_UQ0 still >= 0.5?
+
+ let mut quotient: <F as Float>::Int = x_uq0.widen_mul(a_significand << 1).hi();
+ // Now, a/b - 4*P * 2^-W < q < a/b for q=<quotient_UQ1:dummy> in UQ1.(SB+1+W).
+
+ // quotient_UQ1 is in [0.5, 2.0) as UQ1.(SB+1),
+ // adjust it to be in [1.0, 2.0) as UQ1.SB.
+ let (mut residual, written_exponent) = if quotient < (implicit_bit << 1) {
+ // Highest bit is 0, so just reinterpret quotient_UQ1 as UQ1.SB,
+ // effectively doubling its value as well as its error estimation.
+ let residual_lo = (a_significand << (significand_bits + 1)).wrapping_sub(
+ (CastInto::<u64>::cast(quotient).wrapping_mul(CastInto::<u64>::cast(b_significand)))
+ .cast(),
+ );
+ a_significand <<= 1;
+ (residual_lo, written_exponent.wrapping_sub(1))
+ } else {
+ // Highest bit is 1 (the UQ1.(SB+1) value is in [1, 2)), convert it
+ // to UQ1.SB by right shifting by 1. Least significant bit is omitted.
+ quotient >>= 1;
+ let residual_lo = (a_significand << significand_bits).wrapping_sub(
+ (CastInto::<u64>::cast(quotient).wrapping_mul(CastInto::<u64>::cast(b_significand)))
+ .cast(),
+ );
+ (residual_lo, written_exponent)
+ };
- if written_exponent >= max_exponent as i32 {
- // If we have overflowed the exponent, return infinity.
+ //drop mutability
+ let quotient = quotient;
+
+ // NB: residualLo is calculated above for the normal result case.
+ // It is re-computed on denormal path that is expected to be not so
+ // performance-sensitive.
+
+ // Now, q cannot be greater than a/b and can differ by at most 8*P * 2^-W + 2^-SB
+ // Each NextAfter() increments the floating point value by at least 2^-SB
+ // (more, if exponent was incremented).
+ // Different cases (<---> is of 2^-SB length, * = a/b that is shown as a midpoint):
+ // q
+ // | | * | | | | |
+ // <---> 2^t
+ // | | | | | * | |
+ // q
+ // To require at most one NextAfter(), an error should be less than 1.5 * 2^-SB.
+ // (8*P) * 2^-W + 2^-SB < 1.5 * 2^-SB
+ // (8*P) * 2^-W < 0.5 * 2^-SB
+ // P < 2^(W-4-SB)
+ // Generally, for at most R NextAfter() to be enough,
+ // P < (2*R - 1) * 2^(W-4-SB)
+ // For f32 (0+3): 10 < 32 (OK)
+ // For f32 (2+1): 32 < 74 < 32 * 3, so two NextAfter() are required
+ // For f64: 220 < 256 (OK)
+ // For f128: 4096 * 3 < 13922 < 4096 * 5 (three NextAfter() are required)
+
+ // If we have overflowed the exponent, return infinity
+ if written_exponent >= max_exponent as i64 {
return F::from_repr(inf_rep | quotient_sign);
- } else if written_exponent < 1 {
- // Flush denormals to zero. In the future, it would be nice to add
- // code to round them correctly.
- return F::from_repr(quotient_sign);
- } else {
- let round = ((residual << 1) > b_significand) as u32;
- // Clear the implicit bits
- let mut abs_result = quotient & significand_mask;
- // Insert the exponent
- abs_result |= written_exponent.cast() << significand_bits;
- // Round
- abs_result = abs_result.wrapping_add(round.cast());
- // Insert the sign and return
- return F::from_repr(abs_result | quotient_sign);
}
+
+ // Now, quotient <= the correctly-rounded result
+ // and may need taking NextAfter() up to 3 times (see error estimates above)
+ // r = a - b * q
+ let abs_result = if written_exponent > 0 {
+ let mut ret = quotient & significand_mask;
+ ret |= ((written_exponent as u64) << significand_bits).cast();
+ residual <<= 1;
+ ret
+ } else {
+ if (significand_bits as i64 + written_exponent) < 0 {
+ return F::from_repr(quotient_sign);
+ }
+ let ret =
+ quotient.wrapping_shr((negate_u64(CastInto::<u64>::cast(written_exponent)) + 1) as u32);
+ residual = (CastInto::<u64>::cast(
+ a_significand.wrapping_shl(
+ significand_bits.wrapping_add(CastInto::<u32>::cast(written_exponent)),
+ ),
+ )
+ .wrapping_sub(
+ (CastInto::<u64>::cast(ret).wrapping_mul(CastInto::<u64>::cast(b_significand))) << 1,
+ ))
+ .cast();
+ ret
+ };
+ // Round
+ let abs_result = {
+ residual += abs_result & one; // tie to even
+ // conditionally turns the below LT comparison into LTE
+ if residual > b_significand {
+ abs_result + one
+ } else {
+ abs_result
+ }
+ };
+ F::from_repr(abs_result | quotient_sign)
}
intrinsics! {
diff --git a/vendor/compiler_builtins/src/lib.rs b/vendor/compiler_builtins/src/lib.rs
index 71f249c8e..a6b61bdf5 100644
--- a/vendor/compiler_builtins/src/lib.rs
+++ b/vendor/compiler_builtins/src/lib.rs
@@ -48,6 +48,7 @@ pub mod int;
all(target_arch = "x86_64", target_os = "uefi"),
all(target_arch = "arm", target_os = "none"),
all(target_arch = "xtensa", target_os = "none"),
+ all(target_arch = "mips", target_os = "none"),
target_os = "xous",
all(target_vendor = "fortanix", target_env = "sgx")
))]
@@ -57,6 +58,9 @@ pub mod mem;
#[cfg(target_arch = "arm")]
pub mod arm;
+#[cfg(all(target_arch = "aarch64", target_os = "linux", not(feature = "no-asm"),))]
+pub mod aarch64_linux;
+
#[cfg(all(
kernel_user_helpers,
any(target_os = "linux", target_os = "android"),
diff --git a/vendor/compiler_builtins/src/macros.rs b/vendor/compiler_builtins/src/macros.rs
index 59f25317e..b11114f12 100644
--- a/vendor/compiler_builtins/src/macros.rs
+++ b/vendor/compiler_builtins/src/macros.rs
@@ -33,7 +33,7 @@ macro_rules! public_test_dep {
///
/// This macro is structured to be invoked with a bunch of functions that looks
/// like:
-///
+/// ```ignore
/// intrinsics! {
/// pub extern "C" fn foo(a: i32) -> u32 {
/// // ...
@@ -44,6 +44,7 @@ macro_rules! public_test_dep {
/// // ...
/// }
/// }
+/// ```
///
/// Each function is defined in a manner that looks like a normal Rust function.
/// The macro then accepts a few nonstandard attributes that can decorate
@@ -203,7 +204,7 @@ macro_rules! intrinsics {
(
#[maybe_use_optimized_c_shim]
$(#[$($attr:tt)*])*
- pub extern $abi:tt fn $name:ident( $($argname:ident: $ty:ty),* ) $(-> $ret:ty)? {
+ pub $(unsafe $(@ $empty:tt)? )? extern $abi:tt fn $name:ident( $($argname:ident: $ty:ty),* ) $(-> $ret:ty)? {
$($body:tt)*
}
@@ -211,7 +212,7 @@ macro_rules! intrinsics {
) => (
#[cfg($name = "optimized-c")]
#[cfg_attr(feature = "weak-intrinsics", linkage = "weak")]
- pub extern $abi fn $name( $($argname: $ty),* ) $(-> $ret)? {
+ pub $(unsafe $($empty)? )? extern $abi fn $name( $($argname: $ty),* ) $(-> $ret)? {
extern $abi {
fn $name($($argname: $ty),*) $(-> $ret)?;
}
@@ -223,7 +224,7 @@ macro_rules! intrinsics {
#[cfg(not($name = "optimized-c"))]
intrinsics! {
$(#[$($attr)*])*
- pub extern $abi fn $name( $($argname: $ty),* ) $(-> $ret)? {
+ pub $(unsafe $($empty)? )? extern $abi fn $name( $($argname: $ty),* ) $(-> $ret)? {
$($body)*
}
}
@@ -437,12 +438,11 @@ macro_rules! intrinsics {
intrinsics!($($rest)*);
);
- // For division and modulo, AVR uses a custom calling convention¹ that does
- // not match our definitions here. Ideally we would just use hand-written
- // naked functions, but that's quite a lot of code to port² - so for the
- // time being we are just ignoring the problematic functions, letting
- // avr-gcc (which is required to compile to AVR anyway) link them from
- // libgcc.
+ // For some intrinsics, AVR uses a custom calling convention¹ that does not
+ // match our definitions here. Ideally we would just use hand-written naked
+ // functions, but that's quite a lot of code to port² - so for the time
+ // being we are just ignoring the problematic functions, letting avr-gcc
+ // (which is required to compile to AVR anyway) link them from libgcc.
//
// ¹ https://gcc.gnu.org/wiki/avr-gcc (see "Exceptions to the Calling
// Convention")
diff --git a/vendor/compiler_builtins/src/math.rs b/vendor/compiler_builtins/src/math.rs
index 498e4d85f..b4e5fc113 100644
--- a/vendor/compiler_builtins/src/math.rs
+++ b/vendor/compiler_builtins/src/math.rs
@@ -136,11 +136,12 @@ no_mangle! {
fn truncf(x: f32) -> f32;
}
-// only for the thumb*-none-eabi*, riscv32*-none-elf and x86_64-unknown-none targets that lack the floating point instruction set
+// only for the thumb*-none-eabi*, riscv32*-none-elf, x86_64-unknown-none and mips*-unknown-none targets that lack the floating point instruction set
#[cfg(any(
all(target_arch = "arm", target_os = "none"),
all(target_arch = "riscv32", not(target_feature = "f"), target_os = "none"),
- all(target_arch = "x86_64", target_os = "none")
+ all(target_arch = "x86_64", target_os = "none"),
+ all(target_arch = "mips", target_os = "none"),
))]
no_mangle! {
fn fmin(x: f64, y: f64) -> f64;