From ccd992355df7192993c666236047820244914598 Mon Sep 17 00:00:00 2001 From: Daniel Baumann Date: Tue, 16 Apr 2024 21:19:13 +0200 Subject: Adding upstream version 1.21.8. Signed-off-by: Daniel Baumann --- src/runtime/softfloat64.go | 627 +++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 627 insertions(+) create mode 100644 src/runtime/softfloat64.go (limited to 'src/runtime/softfloat64.go') diff --git a/src/runtime/softfloat64.go b/src/runtime/softfloat64.go new file mode 100644 index 0000000..42ef009 --- /dev/null +++ b/src/runtime/softfloat64.go @@ -0,0 +1,627 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Software IEEE754 64-bit floating point. +// Only referred to (and thus linked in) by softfloat targets +// and by tests in this directory. + +package runtime + +const ( + mantbits64 uint = 52 + expbits64 uint = 11 + bias64 = -1<<(expbits64-1) + 1 + + nan64 uint64 = (1<>mantbits64) & (1<>mantbits32) & (1<= 4<>= 1 + exp++ + } + if mant >= 2<= 4<>= 1 + exp++ + } + } + mant >>= 1 + exp++ + } + if exp >= 1<>= 1 + exp++ + } + if mant&1 != 0 && (trunc != 0 || mant&2 != 0) { + mant++ + } + mant >>= 1 + exp++ + if mant < 1<= 4<>= 1 + exp++ + } + if mant >= 2<= 4<>= 1 + exp++ + } + } + mant >>= 1 + exp++ + } + if exp >= 1<>= 1 + exp++ + } + if mant&1 != 0 && (trunc != 0 || mant&2 != 0) { + mant++ + } + mant >>= 1 + exp++ + if mant < 1<>= shift + if fs == gs { + fm += gm + } else { + fm -= gm + if trunc != 0 { + fm-- + } + } + if fm == 0 { + fs = 0 + } + return fpack64(fs, fm, fe-2, trunc) +} + +func fsub64(f, g uint64) uint64 { + return fadd64(f, fneg64(g)) +} + +func fneg64(f uint64) uint64 { + return f ^ (1 << (mantbits64 + expbits64)) +} + +func fmul64(f, g uint64) uint64 { + fs, fm, fe, fi, fn := funpack64(f) + gs, gm, ge, gi, gn := funpack64(g) + + // Special cases. + switch { + case fn || gn: // NaN * g or f * NaN = NaN + return nan64 + + case fi && gi: // Inf * Inf = Inf (with sign adjusted) + return f ^ gs + + case fi && gm == 0, fm == 0 && gi: // 0 * Inf = Inf * 0 = NaN + return nan64 + + case fm == 0: // 0 * x = 0 (with sign adjusted) + return f ^ gs + + case gm == 0: // x * 0 = 0 (with sign adjusted) + return g ^ fs + } + + // 53-bit * 53-bit = 107- or 108-bit + lo, hi := mullu(fm, gm) + shift := mantbits64 - 1 + trunc := lo & (1<>shift + return fpack64(fs^gs, mant, fe+ge-1, trunc) +} + +func fdiv64(f, g uint64) uint64 { + fs, fm, fe, fi, fn := funpack64(f) + gs, gm, ge, gi, gn := funpack64(g) + + // Special cases. + switch { + case fn || gn: // NaN / g = f / NaN = NaN + return nan64 + + case fi && gi: // ±Inf / ±Inf = NaN + return nan64 + + case !fi && !gi && fm == 0 && gm == 0: // 0 / 0 = NaN + return nan64 + + case fi, !gi && gm == 0: // Inf / g = f / 0 = Inf + return fs ^ gs ^ inf64 + + case gi, fm == 0: // f / Inf = 0 / g = Inf + return fs ^ gs ^ 0 + } + _, _, _, _ = fi, fn, gi, gn + + // 53-bit<<54 / 53-bit = 53- or 54-bit. + shift := mantbits64 + 2 + q, r := divlu(fm>>(64-shift), fm<> 32) + if fi { + return fs32 ^ inf32 + } + const d = mantbits64 - mantbits32 - 1 + return fpack32(fs32, uint32(fm>>d), fe-1, uint32(fm&(1< gs: // f < 0, g > 0 + return -1, false + + case fs < gs: // f > 0, g < 0 + return +1, false + + // Same sign, not NaN. + // Can compare encodings directly now. + // Reverse for sign. + case fs == 0 && f < g, fs != 0 && f > g: + return -1, false + + case fs == 0 && f > g, fs != 0 && f < g: + return +1, false + } + + // f == g + return 0, false +} + +func f64toint(f uint64) (val int64, ok bool) { + fs, fm, fe, fi, fn := funpack64(f) + + switch { + case fi, fn: // NaN + return 0, false + + case fe < -1: // f < 0.5 + return 0, false + + case fe > 63: // f >= 2^63 + if fs != 0 && fm == 0 { // f == -2^63 + return -1 << 63, true + } + if fs != 0 { + return 0, false + } + return 0, false + } + + for fe > int(mantbits64) { + fe-- + fm <<= 1 + } + for fe < int(mantbits64) { + fe++ + fm >>= 1 + } + val = int64(fm) + if fs != 0 { + val = -val + } + return val, true +} + +func fintto64(val int64) (f uint64) { + fs := uint64(val) & (1 << 63) + mant := uint64(val) + if fs != 0 { + mant = -mant + } + return fpack64(fs, mant, int(mantbits64), 0) +} +func fintto32(val int64) (f uint32) { + fs := uint64(val) & (1 << 63) + mant := uint64(val) + if fs != 0 { + mant = -mant + } + // Reduce mantissa size until it fits into a uint32. + // Keep track of the bits we throw away, and if any are + // nonzero or them into the lowest bit. + exp := int(mantbits32) + var trunc uint32 + for mant >= 1<<32 { + trunc |= uint32(mant) & 1 + mant >>= 1 + exp++ + } + + return fpack32(uint32(fs>>32), uint32(mant), exp, trunc) +} + +// 64x64 -> 128 multiply. +// adapted from hacker's delight. +func mullu(u, v uint64) (lo, hi uint64) { + const ( + s = 32 + mask = 1<> s + v0 := v & mask + v1 := v >> s + w0 := u0 * v0 + t := u1*v0 + w0>>s + w1 := t & mask + w2 := t >> s + w1 += u0 * v1 + return u * v, u1*v1 + w2 + w1>>s +} + +// 128/64 -> 64 quotient, 64 remainder. +// adapted from hacker's delight +func divlu(u1, u0, v uint64) (q, r uint64) { + const b = 1 << 32 + + if u1 >= v { + return 1<<64 - 1, 1<<64 - 1 + } + + // s = nlz(v); v <<= s + s := uint(0) + for v&(1<<63) == 0 { + s++ + v <<= 1 + } + + vn1 := v >> 32 + vn0 := v & (1<<32 - 1) + un32 := u1<>(64-s) + un10 := u0 << s + un1 := un10 >> 32 + un0 := un10 & (1<<32 - 1) + q1 := un32 / vn1 + rhat := un32 - q1*vn1 + +again1: + if q1 >= b || q1*vn0 > b*rhat+un1 { + q1-- + rhat += vn1 + if rhat < b { + goto again1 + } + } + + un21 := un32*b + un1 - q1*v + q0 := un21 / vn1 + rhat = un21 - q0*vn1 + +again2: + if q0 >= b || q0*vn0 > b*rhat+un0 { + q0-- + rhat += vn1 + if rhat < b { + goto again2 + } + } + + return q1*b + q0, (un21*b + un0 - q0*v) >> s +} + +func fadd32(x, y uint32) uint32 { + return f64to32(fadd64(f32to64(x), f32to64(y))) +} + +func fmul32(x, y uint32) uint32 { + return f64to32(fmul64(f32to64(x), f32to64(y))) +} + +func fdiv32(x, y uint32) uint32 { + // TODO: are there double-rounding problems here? See issue 48807. + return f64to32(fdiv64(f32to64(x), f32to64(y))) +} + +func feq32(x, y uint32) bool { + cmp, nan := fcmp64(f32to64(x), f32to64(y)) + return cmp == 0 && !nan +} + +func fgt32(x, y uint32) bool { + cmp, nan := fcmp64(f32to64(x), f32to64(y)) + return cmp >= 1 && !nan +} + +func fge32(x, y uint32) bool { + cmp, nan := fcmp64(f32to64(x), f32to64(y)) + return cmp >= 0 && !nan +} + +func feq64(x, y uint64) bool { + cmp, nan := fcmp64(x, y) + return cmp == 0 && !nan +} + +func fgt64(x, y uint64) bool { + cmp, nan := fcmp64(x, y) + return cmp >= 1 && !nan +} + +func fge64(x, y uint64) bool { + cmp, nan := fcmp64(x, y) + return cmp >= 0 && !nan +} + +func fint32to32(x int32) uint32 { + return fintto32(int64(x)) +} + +func fint32to64(x int32) uint64 { + return fintto64(int64(x)) +} + +func fint64to32(x int64) uint32 { + return fintto32(x) +} + +func fint64to64(x int64) uint64 { + return fintto64(x) +} + +func f32toint32(x uint32) int32 { + val, _ := f64toint(f32to64(x)) + return int32(val) +} + +func f32toint64(x uint32) int64 { + val, _ := f64toint(f32to64(x)) + return val +} + +func f64toint32(x uint64) int32 { + val, _ := f64toint(x) + return int32(val) +} + +func f64toint64(x uint64) int64 { + val, _ := f64toint(x) + return val +} + +func f64touint64(x uint64) uint64 { + var m uint64 = 0x43e0000000000000 // float64 1<<63 + if fgt64(m, x) { + return uint64(f64toint64(x)) + } + y := fadd64(x, -m) + z := uint64(f64toint64(y)) + return z | (1 << 63) +} + +func f32touint64(x uint32) uint64 { + var m uint32 = 0x5f000000 // float32 1<<63 + if fgt32(m, x) { + return uint64(f32toint64(x)) + } + y := fadd32(x, -m) + z := uint64(f32toint64(y)) + return z | (1 << 63) +} + +func fuint64to64(x uint64) uint64 { + if int64(x) >= 0 { + return fint64to64(int64(x)) + } + // See ../cmd/compile/internal/ssagen/ssa.go:uint64Tofloat + y := x & 1 + z := x >> 1 + z = z | y + r := fint64to64(int64(z)) + return fadd64(r, r) +} + +func fuint64to32(x uint64) uint32 { + if int64(x) >= 0 { + return fint64to32(int64(x)) + } + // See ../cmd/compile/internal/ssagen/ssa.go:uint64Tofloat + y := x & 1 + z := x >> 1 + z = z | y + r := fint64to32(int64(z)) + return fadd32(r, r) +} -- cgit v1.2.3