diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-16 19:23:18 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-16 19:23:18 +0000 |
commit | 43a123c1ae6613b3efeed291fa552ecd909d3acf (patch) | |
tree | fd92518b7024bc74031f78a1cf9e454b65e73665 /src/math/bits | |
parent | Initial commit. (diff) | |
download | golang-1.20-43a123c1ae6613b3efeed291fa552ecd909d3acf.tar.xz golang-1.20-43a123c1ae6613b3efeed291fa552ecd909d3acf.zip |
Adding upstream version 1.20.14.upstream/1.20.14upstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'src/math/bits')
-rw-r--r-- | src/math/bits/bits.go | 599 | ||||
-rw-r--r-- | src/math/bits/bits_errors.go | 16 | ||||
-rw-r--r-- | src/math/bits/bits_errors_bootstrap.go | 23 | ||||
-rw-r--r-- | src/math/bits/bits_tables.go | 79 | ||||
-rw-r--r-- | src/math/bits/bits_test.go | 1347 | ||||
-rw-r--r-- | src/math/bits/example_math_test.go | 202 | ||||
-rw-r--r-- | src/math/bits/example_test.go | 210 | ||||
-rw-r--r-- | src/math/bits/export_test.go | 7 | ||||
-rw-r--r-- | src/math/bits/make_examples.go | 113 | ||||
-rw-r--r-- | src/math/bits/make_tables.go | 92 |
10 files changed, 2688 insertions, 0 deletions
diff --git a/src/math/bits/bits.go b/src/math/bits/bits.go new file mode 100644 index 0000000..c1c7b79 --- /dev/null +++ b/src/math/bits/bits.go @@ -0,0 +1,599 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:generate go run make_tables.go + +// Package bits implements bit counting and manipulation +// functions for the predeclared unsigned integer types. +// +// Functions in this package may be implemented directly by +// the compiler, for better performance. For those functions +// the code in this package will not be used. Which +// functions are implemented by the compiler depends on the +// architecture and the Go release. +package bits + +const uintSize = 32 << (^uint(0) >> 63) // 32 or 64 + +// UintSize is the size of a uint in bits. +const UintSize = uintSize + +// --- LeadingZeros --- + +// LeadingZeros returns the number of leading zero bits in x; the result is UintSize for x == 0. +func LeadingZeros(x uint) int { return UintSize - Len(x) } + +// LeadingZeros8 returns the number of leading zero bits in x; the result is 8 for x == 0. +func LeadingZeros8(x uint8) int { return 8 - Len8(x) } + +// LeadingZeros16 returns the number of leading zero bits in x; the result is 16 for x == 0. +func LeadingZeros16(x uint16) int { return 16 - Len16(x) } + +// LeadingZeros32 returns the number of leading zero bits in x; the result is 32 for x == 0. +func LeadingZeros32(x uint32) int { return 32 - Len32(x) } + +// LeadingZeros64 returns the number of leading zero bits in x; the result is 64 for x == 0. +func LeadingZeros64(x uint64) int { return 64 - Len64(x) } + +// --- TrailingZeros --- + +// See http://supertech.csail.mit.edu/papers/debruijn.pdf +const deBruijn32 = 0x077CB531 + +var deBruijn32tab = [32]byte{ + 0, 1, 28, 2, 29, 14, 24, 3, 30, 22, 20, 15, 25, 17, 4, 8, + 31, 27, 13, 23, 21, 19, 16, 7, 26, 12, 18, 6, 11, 5, 10, 9, +} + +const deBruijn64 = 0x03f79d71b4ca8b09 + +var deBruijn64tab = [64]byte{ + 0, 1, 56, 2, 57, 49, 28, 3, 61, 58, 42, 50, 38, 29, 17, 4, + 62, 47, 59, 36, 45, 43, 51, 22, 53, 39, 33, 30, 24, 18, 12, 5, + 63, 55, 48, 27, 60, 41, 37, 16, 46, 35, 44, 21, 52, 32, 23, 11, + 54, 26, 40, 15, 34, 20, 31, 10, 25, 14, 19, 9, 13, 8, 7, 6, +} + +// TrailingZeros returns the number of trailing zero bits in x; the result is UintSize for x == 0. +func TrailingZeros(x uint) int { + if UintSize == 32 { + return TrailingZeros32(uint32(x)) + } + return TrailingZeros64(uint64(x)) +} + +// TrailingZeros8 returns the number of trailing zero bits in x; the result is 8 for x == 0. +func TrailingZeros8(x uint8) int { + return int(ntz8tab[x]) +} + +// TrailingZeros16 returns the number of trailing zero bits in x; the result is 16 for x == 0. +func TrailingZeros16(x uint16) int { + if x == 0 { + return 16 + } + // see comment in TrailingZeros64 + return int(deBruijn32tab[uint32(x&-x)*deBruijn32>>(32-5)]) +} + +// TrailingZeros32 returns the number of trailing zero bits in x; the result is 32 for x == 0. +func TrailingZeros32(x uint32) int { + if x == 0 { + return 32 + } + // see comment in TrailingZeros64 + return int(deBruijn32tab[(x&-x)*deBruijn32>>(32-5)]) +} + +// TrailingZeros64 returns the number of trailing zero bits in x; the result is 64 for x == 0. +func TrailingZeros64(x uint64) int { + if x == 0 { + return 64 + } + // If popcount is fast, replace code below with return popcount(^x & (x - 1)). + // + // x & -x leaves only the right-most bit set in the word. Let k be the + // index of that bit. Since only a single bit is set, the value is two + // to the power of k. Multiplying by a power of two is equivalent to + // left shifting, in this case by k bits. The de Bruijn (64 bit) constant + // is such that all six bit, consecutive substrings are distinct. + // Therefore, if we have a left shifted version of this constant we can + // find by how many bits it was shifted by looking at which six bit + // substring ended up at the top of the word. + // (Knuth, volume 4, section 7.3.1) + return int(deBruijn64tab[(x&-x)*deBruijn64>>(64-6)]) +} + +// --- OnesCount --- + +const m0 = 0x5555555555555555 // 01010101 ... +const m1 = 0x3333333333333333 // 00110011 ... +const m2 = 0x0f0f0f0f0f0f0f0f // 00001111 ... +const m3 = 0x00ff00ff00ff00ff // etc. +const m4 = 0x0000ffff0000ffff + +// OnesCount returns the number of one bits ("population count") in x. +func OnesCount(x uint) int { + if UintSize == 32 { + return OnesCount32(uint32(x)) + } + return OnesCount64(uint64(x)) +} + +// OnesCount8 returns the number of one bits ("population count") in x. +func OnesCount8(x uint8) int { + return int(pop8tab[x]) +} + +// OnesCount16 returns the number of one bits ("population count") in x. +func OnesCount16(x uint16) int { + return int(pop8tab[x>>8] + pop8tab[x&0xff]) +} + +// OnesCount32 returns the number of one bits ("population count") in x. +func OnesCount32(x uint32) int { + return int(pop8tab[x>>24] + pop8tab[x>>16&0xff] + pop8tab[x>>8&0xff] + pop8tab[x&0xff]) +} + +// OnesCount64 returns the number of one bits ("population count") in x. +func OnesCount64(x uint64) int { + // Implementation: Parallel summing of adjacent bits. + // See "Hacker's Delight", Chap. 5: Counting Bits. + // The following pattern shows the general approach: + // + // x = x>>1&(m0&m) + x&(m0&m) + // x = x>>2&(m1&m) + x&(m1&m) + // x = x>>4&(m2&m) + x&(m2&m) + // x = x>>8&(m3&m) + x&(m3&m) + // x = x>>16&(m4&m) + x&(m4&m) + // x = x>>32&(m5&m) + x&(m5&m) + // return int(x) + // + // Masking (& operations) can be left away when there's no + // danger that a field's sum will carry over into the next + // field: Since the result cannot be > 64, 8 bits is enough + // and we can ignore the masks for the shifts by 8 and up. + // Per "Hacker's Delight", the first line can be simplified + // more, but it saves at best one instruction, so we leave + // it alone for clarity. + const m = 1<<64 - 1 + x = x>>1&(m0&m) + x&(m0&m) + x = x>>2&(m1&m) + x&(m1&m) + x = (x>>4 + x) & (m2 & m) + x += x >> 8 + x += x >> 16 + x += x >> 32 + return int(x) & (1<<7 - 1) +} + +// --- RotateLeft --- + +// RotateLeft returns the value of x rotated left by (k mod UintSize) bits. +// To rotate x right by k bits, call RotateLeft(x, -k). +// +// This function's execution time does not depend on the inputs. +func RotateLeft(x uint, k int) uint { + if UintSize == 32 { + return uint(RotateLeft32(uint32(x), k)) + } + return uint(RotateLeft64(uint64(x), k)) +} + +// RotateLeft8 returns the value of x rotated left by (k mod 8) bits. +// To rotate x right by k bits, call RotateLeft8(x, -k). +// +// This function's execution time does not depend on the inputs. +func RotateLeft8(x uint8, k int) uint8 { + const n = 8 + s := uint(k) & (n - 1) + return x<<s | x>>(n-s) +} + +// RotateLeft16 returns the value of x rotated left by (k mod 16) bits. +// To rotate x right by k bits, call RotateLeft16(x, -k). +// +// This function's execution time does not depend on the inputs. +func RotateLeft16(x uint16, k int) uint16 { + const n = 16 + s := uint(k) & (n - 1) + return x<<s | x>>(n-s) +} + +// RotateLeft32 returns the value of x rotated left by (k mod 32) bits. +// To rotate x right by k bits, call RotateLeft32(x, -k). +// +// This function's execution time does not depend on the inputs. +func RotateLeft32(x uint32, k int) uint32 { + const n = 32 + s := uint(k) & (n - 1) + return x<<s | x>>(n-s) +} + +// RotateLeft64 returns the value of x rotated left by (k mod 64) bits. +// To rotate x right by k bits, call RotateLeft64(x, -k). +// +// This function's execution time does not depend on the inputs. +func RotateLeft64(x uint64, k int) uint64 { + const n = 64 + s := uint(k) & (n - 1) + return x<<s | x>>(n-s) +} + +// --- Reverse --- + +// Reverse returns the value of x with its bits in reversed order. +func Reverse(x uint) uint { + if UintSize == 32 { + return uint(Reverse32(uint32(x))) + } + return uint(Reverse64(uint64(x))) +} + +// Reverse8 returns the value of x with its bits in reversed order. +func Reverse8(x uint8) uint8 { + return rev8tab[x] +} + +// Reverse16 returns the value of x with its bits in reversed order. +func Reverse16(x uint16) uint16 { + return uint16(rev8tab[x>>8]) | uint16(rev8tab[x&0xff])<<8 +} + +// Reverse32 returns the value of x with its bits in reversed order. +func Reverse32(x uint32) uint32 { + const m = 1<<32 - 1 + x = x>>1&(m0&m) | x&(m0&m)<<1 + x = x>>2&(m1&m) | x&(m1&m)<<2 + x = x>>4&(m2&m) | x&(m2&m)<<4 + return ReverseBytes32(x) +} + +// Reverse64 returns the value of x with its bits in reversed order. +func Reverse64(x uint64) uint64 { + const m = 1<<64 - 1 + x = x>>1&(m0&m) | x&(m0&m)<<1 + x = x>>2&(m1&m) | x&(m1&m)<<2 + x = x>>4&(m2&m) | x&(m2&m)<<4 + return ReverseBytes64(x) +} + +// --- ReverseBytes --- + +// ReverseBytes returns the value of x with its bytes in reversed order. +// +// This function's execution time does not depend on the inputs. +func ReverseBytes(x uint) uint { + if UintSize == 32 { + return uint(ReverseBytes32(uint32(x))) + } + return uint(ReverseBytes64(uint64(x))) +} + +// ReverseBytes16 returns the value of x with its bytes in reversed order. +// +// This function's execution time does not depend on the inputs. +func ReverseBytes16(x uint16) uint16 { + return x>>8 | x<<8 +} + +// ReverseBytes32 returns the value of x with its bytes in reversed order. +// +// This function's execution time does not depend on the inputs. +func ReverseBytes32(x uint32) uint32 { + const m = 1<<32 - 1 + x = x>>8&(m3&m) | x&(m3&m)<<8 + return x>>16 | x<<16 +} + +// ReverseBytes64 returns the value of x with its bytes in reversed order. +// +// This function's execution time does not depend on the inputs. +func ReverseBytes64(x uint64) uint64 { + const m = 1<<64 - 1 + x = x>>8&(m3&m) | x&(m3&m)<<8 + x = x>>16&(m4&m) | x&(m4&m)<<16 + return x>>32 | x<<32 +} + +// --- Len --- + +// Len returns the minimum number of bits required to represent x; the result is 0 for x == 0. +func Len(x uint) int { + if UintSize == 32 { + return Len32(uint32(x)) + } + return Len64(uint64(x)) +} + +// Len8 returns the minimum number of bits required to represent x; the result is 0 for x == 0. +func Len8(x uint8) int { + return int(len8tab[x]) +} + +// Len16 returns the minimum number of bits required to represent x; the result is 0 for x == 0. +func Len16(x uint16) (n int) { + if x >= 1<<8 { + x >>= 8 + n = 8 + } + return n + int(len8tab[x]) +} + +// Len32 returns the minimum number of bits required to represent x; the result is 0 for x == 0. +func Len32(x uint32) (n int) { + if x >= 1<<16 { + x >>= 16 + n = 16 + } + if x >= 1<<8 { + x >>= 8 + n += 8 + } + return n + int(len8tab[x]) +} + +// Len64 returns the minimum number of bits required to represent x; the result is 0 for x == 0. +func Len64(x uint64) (n int) { + if x >= 1<<32 { + x >>= 32 + n = 32 + } + if x >= 1<<16 { + x >>= 16 + n += 16 + } + if x >= 1<<8 { + x >>= 8 + n += 8 + } + return n + int(len8tab[x]) +} + +// --- Add with carry --- + +// Add returns the sum with carry of x, y and carry: sum = x + y + carry. +// The carry input must be 0 or 1; otherwise the behavior is undefined. +// The carryOut output is guaranteed to be 0 or 1. +// +// This function's execution time does not depend on the inputs. +func Add(x, y, carry uint) (sum, carryOut uint) { + if UintSize == 32 { + s32, c32 := Add32(uint32(x), uint32(y), uint32(carry)) + return uint(s32), uint(c32) + } + s64, c64 := Add64(uint64(x), uint64(y), uint64(carry)) + return uint(s64), uint(c64) +} + +// Add32 returns the sum with carry of x, y and carry: sum = x + y + carry. +// The carry input must be 0 or 1; otherwise the behavior is undefined. +// The carryOut output is guaranteed to be 0 or 1. +// +// This function's execution time does not depend on the inputs. +func Add32(x, y, carry uint32) (sum, carryOut uint32) { + sum64 := uint64(x) + uint64(y) + uint64(carry) + sum = uint32(sum64) + carryOut = uint32(sum64 >> 32) + return +} + +// Add64 returns the sum with carry of x, y and carry: sum = x + y + carry. +// The carry input must be 0 or 1; otherwise the behavior is undefined. +// The carryOut output is guaranteed to be 0 or 1. +// +// This function's execution time does not depend on the inputs. +func Add64(x, y, carry uint64) (sum, carryOut uint64) { + sum = x + y + carry + // The sum will overflow if both top bits are set (x & y) or if one of them + // is (x | y), and a carry from the lower place happened. If such a carry + // happens, the top bit will be 1 + 0 + 1 = 0 (&^ sum). + carryOut = ((x & y) | ((x | y) &^ sum)) >> 63 + return +} + +// --- Subtract with borrow --- + +// Sub returns the difference of x, y and borrow: diff = x - y - borrow. +// The borrow input must be 0 or 1; otherwise the behavior is undefined. +// The borrowOut output is guaranteed to be 0 or 1. +// +// This function's execution time does not depend on the inputs. +func Sub(x, y, borrow uint) (diff, borrowOut uint) { + if UintSize == 32 { + d32, b32 := Sub32(uint32(x), uint32(y), uint32(borrow)) + return uint(d32), uint(b32) + } + d64, b64 := Sub64(uint64(x), uint64(y), uint64(borrow)) + return uint(d64), uint(b64) +} + +// Sub32 returns the difference of x, y and borrow, diff = x - y - borrow. +// The borrow input must be 0 or 1; otherwise the behavior is undefined. +// The borrowOut output is guaranteed to be 0 or 1. +// +// This function's execution time does not depend on the inputs. +func Sub32(x, y, borrow uint32) (diff, borrowOut uint32) { + diff = x - y - borrow + // The difference will underflow if the top bit of x is not set and the top + // bit of y is set (^x & y) or if they are the same (^(x ^ y)) and a borrow + // from the lower place happens. If that borrow happens, the result will be + // 1 - 1 - 1 = 0 - 0 - 1 = 1 (& diff). + borrowOut = ((^x & y) | (^(x ^ y) & diff)) >> 31 + return +} + +// Sub64 returns the difference of x, y and borrow: diff = x - y - borrow. +// The borrow input must be 0 or 1; otherwise the behavior is undefined. +// The borrowOut output is guaranteed to be 0 or 1. +// +// This function's execution time does not depend on the inputs. +func Sub64(x, y, borrow uint64) (diff, borrowOut uint64) { + diff = x - y - borrow + // See Sub32 for the bit logic. + borrowOut = ((^x & y) | (^(x ^ y) & diff)) >> 63 + return +} + +// --- Full-width multiply --- + +// Mul returns the full-width product of x and y: (hi, lo) = x * y +// with the product bits' upper half returned in hi and the lower +// half returned in lo. +// +// This function's execution time does not depend on the inputs. +func Mul(x, y uint) (hi, lo uint) { + if UintSize == 32 { + h, l := Mul32(uint32(x), uint32(y)) + return uint(h), uint(l) + } + h, l := Mul64(uint64(x), uint64(y)) + return uint(h), uint(l) +} + +// Mul32 returns the 64-bit product of x and y: (hi, lo) = x * y +// with the product bits' upper half returned in hi and the lower +// half returned in lo. +// +// This function's execution time does not depend on the inputs. +func Mul32(x, y uint32) (hi, lo uint32) { + tmp := uint64(x) * uint64(y) + hi, lo = uint32(tmp>>32), uint32(tmp) + return +} + +// Mul64 returns the 128-bit product of x and y: (hi, lo) = x * y +// with the product bits' upper half returned in hi and the lower +// half returned in lo. +// +// This function's execution time does not depend on the inputs. +func Mul64(x, y uint64) (hi, lo uint64) { + const mask32 = 1<<32 - 1 + x0 := x & mask32 + x1 := x >> 32 + y0 := y & mask32 + y1 := y >> 32 + w0 := x0 * y0 + t := x1*y0 + w0>>32 + w1 := t & mask32 + w2 := t >> 32 + w1 += x0 * y1 + hi = x1*y1 + w2 + w1>>32 + lo = x * y + return +} + +// --- Full-width divide --- + +// Div returns the quotient and remainder of (hi, lo) divided by y: +// quo = (hi, lo)/y, rem = (hi, lo)%y with the dividend bits' upper +// half in parameter hi and the lower half in parameter lo. +// Div panics for y == 0 (division by zero) or y <= hi (quotient overflow). +func Div(hi, lo, y uint) (quo, rem uint) { + if UintSize == 32 { + q, r := Div32(uint32(hi), uint32(lo), uint32(y)) + return uint(q), uint(r) + } + q, r := Div64(uint64(hi), uint64(lo), uint64(y)) + return uint(q), uint(r) +} + +// Div32 returns the quotient and remainder of (hi, lo) divided by y: +// quo = (hi, lo)/y, rem = (hi, lo)%y with the dividend bits' upper +// half in parameter hi and the lower half in parameter lo. +// Div32 panics for y == 0 (division by zero) or y <= hi (quotient overflow). +func Div32(hi, lo, y uint32) (quo, rem uint32) { + if y != 0 && y <= hi { + panic(overflowError) + } + z := uint64(hi)<<32 | uint64(lo) + quo, rem = uint32(z/uint64(y)), uint32(z%uint64(y)) + return +} + +// Div64 returns the quotient and remainder of (hi, lo) divided by y: +// quo = (hi, lo)/y, rem = (hi, lo)%y with the dividend bits' upper +// half in parameter hi and the lower half in parameter lo. +// Div64 panics for y == 0 (division by zero) or y <= hi (quotient overflow). +func Div64(hi, lo, y uint64) (quo, rem uint64) { + if y == 0 { + panic(divideError) + } + if y <= hi { + panic(overflowError) + } + + // If high part is zero, we can directly return the results. + if hi == 0 { + return lo / y, lo % y + } + + s := uint(LeadingZeros64(y)) + y <<= s + + const ( + two32 = 1 << 32 + mask32 = two32 - 1 + ) + yn1 := y >> 32 + yn0 := y & mask32 + un32 := hi<<s | lo>>(64-s) + un10 := lo << s + un1 := un10 >> 32 + un0 := un10 & mask32 + q1 := un32 / yn1 + rhat := un32 - q1*yn1 + + for q1 >= two32 || q1*yn0 > two32*rhat+un1 { + q1-- + rhat += yn1 + if rhat >= two32 { + break + } + } + + un21 := un32*two32 + un1 - q1*y + q0 := un21 / yn1 + rhat = un21 - q0*yn1 + + for q0 >= two32 || q0*yn0 > two32*rhat+un0 { + q0-- + rhat += yn1 + if rhat >= two32 { + break + } + } + + return q1*two32 + q0, (un21*two32 + un0 - q0*y) >> s +} + +// Rem returns the remainder of (hi, lo) divided by y. Rem panics for +// y == 0 (division by zero) but, unlike Div, it doesn't panic on a +// quotient overflow. +func Rem(hi, lo, y uint) uint { + if UintSize == 32 { + return uint(Rem32(uint32(hi), uint32(lo), uint32(y))) + } + return uint(Rem64(uint64(hi), uint64(lo), uint64(y))) +} + +// Rem32 returns the remainder of (hi, lo) divided by y. Rem32 panics +// for y == 0 (division by zero) but, unlike Div32, it doesn't panic +// on a quotient overflow. +func Rem32(hi, lo, y uint32) uint32 { + return uint32((uint64(hi)<<32 | uint64(lo)) % uint64(y)) +} + +// Rem64 returns the remainder of (hi, lo) divided by y. Rem64 panics +// for y == 0 (division by zero) but, unlike Div64, it doesn't panic +// on a quotient overflow. +func Rem64(hi, lo, y uint64) uint64 { + // We scale down hi so that hi < y, then use Div64 to compute the + // rem with the guarantee that it won't panic on quotient overflow. + // Given that + // hi ≡ hi%y (mod y) + // we have + // hi<<64 + lo ≡ (hi%y)<<64 + lo (mod y) + _, rem := Div64(hi%y, lo, y) + return rem +} diff --git a/src/math/bits/bits_errors.go b/src/math/bits/bits_errors.go new file mode 100644 index 0000000..61cb5c9 --- /dev/null +++ b/src/math/bits/bits_errors.go @@ -0,0 +1,16 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !compiler_bootstrap +// +build !compiler_bootstrap + +package bits + +import _ "unsafe" + +//go:linkname overflowError runtime.overflowError +var overflowError error + +//go:linkname divideError runtime.divideError +var divideError error diff --git a/src/math/bits/bits_errors_bootstrap.go b/src/math/bits/bits_errors_bootstrap.go new file mode 100644 index 0000000..4d610d3 --- /dev/null +++ b/src/math/bits/bits_errors_bootstrap.go @@ -0,0 +1,23 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build compiler_bootstrap +// +build compiler_bootstrap + +// This version used only for bootstrap (on this path we want +// to avoid use of go:linkname as applied to variables). + +package bits + +type errorString string + +func (e errorString) RuntimeError() {} + +func (e errorString) Error() string { + return "runtime error: " + string(e) +} + +var overflowError = error(errorString("integer overflow")) + +var divideError = error(errorString("integer divide by zero")) diff --git a/src/math/bits/bits_tables.go b/src/math/bits/bits_tables.go new file mode 100644 index 0000000..f869b8d --- /dev/null +++ b/src/math/bits/bits_tables.go @@ -0,0 +1,79 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Code generated by go run make_tables.go. DO NOT EDIT. + +package bits + +const ntz8tab = "" + + "\x08\x00\x01\x00\x02\x00\x01\x00\x03\x00\x01\x00\x02\x00\x01\x00" + + "\x04\x00\x01\x00\x02\x00\x01\x00\x03\x00\x01\x00\x02\x00\x01\x00" + + "\x05\x00\x01\x00\x02\x00\x01\x00\x03\x00\x01\x00\x02\x00\x01\x00" + + "\x04\x00\x01\x00\x02\x00\x01\x00\x03\x00\x01\x00\x02\x00\x01\x00" + + "\x06\x00\x01\x00\x02\x00\x01\x00\x03\x00\x01\x00\x02\x00\x01\x00" + + "\x04\x00\x01\x00\x02\x00\x01\x00\x03\x00\x01\x00\x02\x00\x01\x00" + + "\x05\x00\x01\x00\x02\x00\x01\x00\x03\x00\x01\x00\x02\x00\x01\x00" + + "\x04\x00\x01\x00\x02\x00\x01\x00\x03\x00\x01\x00\x02\x00\x01\x00" + + "\x07\x00\x01\x00\x02\x00\x01\x00\x03\x00\x01\x00\x02\x00\x01\x00" + + "\x04\x00\x01\x00\x02\x00\x01\x00\x03\x00\x01\x00\x02\x00\x01\x00" + + "\x05\x00\x01\x00\x02\x00\x01\x00\x03\x00\x01\x00\x02\x00\x01\x00" + + "\x04\x00\x01\x00\x02\x00\x01\x00\x03\x00\x01\x00\x02\x00\x01\x00" + + "\x06\x00\x01\x00\x02\x00\x01\x00\x03\x00\x01\x00\x02\x00\x01\x00" + + "\x04\x00\x01\x00\x02\x00\x01\x00\x03\x00\x01\x00\x02\x00\x01\x00" + + "\x05\x00\x01\x00\x02\x00\x01\x00\x03\x00\x01\x00\x02\x00\x01\x00" + + "\x04\x00\x01\x00\x02\x00\x01\x00\x03\x00\x01\x00\x02\x00\x01\x00" + +const pop8tab = "" + + "\x00\x01\x01\x02\x01\x02\x02\x03\x01\x02\x02\x03\x02\x03\x03\x04" + + "\x01\x02\x02\x03\x02\x03\x03\x04\x02\x03\x03\x04\x03\x04\x04\x05" + + "\x01\x02\x02\x03\x02\x03\x03\x04\x02\x03\x03\x04\x03\x04\x04\x05" + + "\x02\x03\x03\x04\x03\x04\x04\x05\x03\x04\x04\x05\x04\x05\x05\x06" + + "\x01\x02\x02\x03\x02\x03\x03\x04\x02\x03\x03\x04\x03\x04\x04\x05" + + "\x02\x03\x03\x04\x03\x04\x04\x05\x03\x04\x04\x05\x04\x05\x05\x06" + + "\x02\x03\x03\x04\x03\x04\x04\x05\x03\x04\x04\x05\x04\x05\x05\x06" + + "\x03\x04\x04\x05\x04\x05\x05\x06\x04\x05\x05\x06\x05\x06\x06\x07" + + "\x01\x02\x02\x03\x02\x03\x03\x04\x02\x03\x03\x04\x03\x04\x04\x05" + + "\x02\x03\x03\x04\x03\x04\x04\x05\x03\x04\x04\x05\x04\x05\x05\x06" + + "\x02\x03\x03\x04\x03\x04\x04\x05\x03\x04\x04\x05\x04\x05\x05\x06" + + "\x03\x04\x04\x05\x04\x05\x05\x06\x04\x05\x05\x06\x05\x06\x06\x07" + + "\x02\x03\x03\x04\x03\x04\x04\x05\x03\x04\x04\x05\x04\x05\x05\x06" + + "\x03\x04\x04\x05\x04\x05\x05\x06\x04\x05\x05\x06\x05\x06\x06\x07" + + "\x03\x04\x04\x05\x04\x05\x05\x06\x04\x05\x05\x06\x05\x06\x06\x07" + + "\x04\x05\x05\x06\x05\x06\x06\x07\x05\x06\x06\x07\x06\x07\x07\x08" + +const rev8tab = "" + + "\x00\x80\x40\xc0\x20\xa0\x60\xe0\x10\x90\x50\xd0\x30\xb0\x70\xf0" + + "\x08\x88\x48\xc8\x28\xa8\x68\xe8\x18\x98\x58\xd8\x38\xb8\x78\xf8" + + "\x04\x84\x44\xc4\x24\xa4\x64\xe4\x14\x94\x54\xd4\x34\xb4\x74\xf4" + + "\x0c\x8c\x4c\xcc\x2c\xac\x6c\xec\x1c\x9c\x5c\xdc\x3c\xbc\x7c\xfc" + + "\x02\x82\x42\xc2\x22\xa2\x62\xe2\x12\x92\x52\xd2\x32\xb2\x72\xf2" + + "\x0a\x8a\x4a\xca\x2a\xaa\x6a\xea\x1a\x9a\x5a\xda\x3a\xba\x7a\xfa" + + "\x06\x86\x46\xc6\x26\xa6\x66\xe6\x16\x96\x56\xd6\x36\xb6\x76\xf6" + + "\x0e\x8e\x4e\xce\x2e\xae\x6e\xee\x1e\x9e\x5e\xde\x3e\xbe\x7e\xfe" + + "\x01\x81\x41\xc1\x21\xa1\x61\xe1\x11\x91\x51\xd1\x31\xb1\x71\xf1" + + "\x09\x89\x49\xc9\x29\xa9\x69\xe9\x19\x99\x59\xd9\x39\xb9\x79\xf9" + + "\x05\x85\x45\xc5\x25\xa5\x65\xe5\x15\x95\x55\xd5\x35\xb5\x75\xf5" + + "\x0d\x8d\x4d\xcd\x2d\xad\x6d\xed\x1d\x9d\x5d\xdd\x3d\xbd\x7d\xfd" + + "\x03\x83\x43\xc3\x23\xa3\x63\xe3\x13\x93\x53\xd3\x33\xb3\x73\xf3" + + "\x0b\x8b\x4b\xcb\x2b\xab\x6b\xeb\x1b\x9b\x5b\xdb\x3b\xbb\x7b\xfb" + + "\x07\x87\x47\xc7\x27\xa7\x67\xe7\x17\x97\x57\xd7\x37\xb7\x77\xf7" + + "\x0f\x8f\x4f\xcf\x2f\xaf\x6f\xef\x1f\x9f\x5f\xdf\x3f\xbf\x7f\xff" + +const len8tab = "" + + "\x00\x01\x02\x02\x03\x03\x03\x03\x04\x04\x04\x04\x04\x04\x04\x04" + + "\x05\x05\x05\x05\x05\x05\x05\x05\x05\x05\x05\x05\x05\x05\x05\x05" + + "\x06\x06\x06\x06\x06\x06\x06\x06\x06\x06\x06\x06\x06\x06\x06\x06" + + "\x06\x06\x06\x06\x06\x06\x06\x06\x06\x06\x06\x06\x06\x06\x06\x06" + + "\x07\x07\x07\x07\x07\x07\x07\x07\x07\x07\x07\x07\x07\x07\x07\x07" + + "\x07\x07\x07\x07\x07\x07\x07\x07\x07\x07\x07\x07\x07\x07\x07\x07" + + "\x07\x07\x07\x07\x07\x07\x07\x07\x07\x07\x07\x07\x07\x07\x07\x07" + + "\x07\x07\x07\x07\x07\x07\x07\x07\x07\x07\x07\x07\x07\x07\x07\x07" + + "\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08" + + "\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08" + + "\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08" + + "\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08" + + "\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08" + + "\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08" + + "\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08" + + "\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08" diff --git a/src/math/bits/bits_test.go b/src/math/bits/bits_test.go new file mode 100644 index 0000000..23b4539 --- /dev/null +++ b/src/math/bits/bits_test.go @@ -0,0 +1,1347 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package bits_test + +import ( + . "math/bits" + "runtime" + "testing" + "unsafe" +) + +func TestUintSize(t *testing.T) { + var x uint + if want := unsafe.Sizeof(x) * 8; UintSize != want { + t.Fatalf("UintSize = %d; want %d", UintSize, want) + } +} + +func TestLeadingZeros(t *testing.T) { + for i := 0; i < 256; i++ { + nlz := tab[i].nlz + for k := 0; k < 64-8; k++ { + x := uint64(i) << uint(k) + if x <= 1<<8-1 { + got := LeadingZeros8(uint8(x)) + want := nlz - k + (8 - 8) + if x == 0 { + want = 8 + } + if got != want { + t.Fatalf("LeadingZeros8(%#02x) == %d; want %d", x, got, want) + } + } + + if x <= 1<<16-1 { + got := LeadingZeros16(uint16(x)) + want := nlz - k + (16 - 8) + if x == 0 { + want = 16 + } + if got != want { + t.Fatalf("LeadingZeros16(%#04x) == %d; want %d", x, got, want) + } + } + + if x <= 1<<32-1 { + got := LeadingZeros32(uint32(x)) + want := nlz - k + (32 - 8) + if x == 0 { + want = 32 + } + if got != want { + t.Fatalf("LeadingZeros32(%#08x) == %d; want %d", x, got, want) + } + if UintSize == 32 { + got = LeadingZeros(uint(x)) + if got != want { + t.Fatalf("LeadingZeros(%#08x) == %d; want %d", x, got, want) + } + } + } + + if x <= 1<<64-1 { + got := LeadingZeros64(uint64(x)) + want := nlz - k + (64 - 8) + if x == 0 { + want = 64 + } + if got != want { + t.Fatalf("LeadingZeros64(%#016x) == %d; want %d", x, got, want) + } + if UintSize == 64 { + got = LeadingZeros(uint(x)) + if got != want { + t.Fatalf("LeadingZeros(%#016x) == %d; want %d", x, got, want) + } + } + } + } + } +} + +// Exported (global) variable serving as input for some +// of the benchmarks to ensure side-effect free calls +// are not optimized away. +var Input uint64 = DeBruijn64 + +// Exported (global) variable to store function results +// during benchmarking to ensure side-effect free calls +// are not optimized away. +var Output int + +func BenchmarkLeadingZeros(b *testing.B) { + var s int + for i := 0; i < b.N; i++ { + s += LeadingZeros(uint(Input) >> (uint(i) % UintSize)) + } + Output = s +} + +func BenchmarkLeadingZeros8(b *testing.B) { + var s int + for i := 0; i < b.N; i++ { + s += LeadingZeros8(uint8(Input) >> (uint(i) % 8)) + } + Output = s +} + +func BenchmarkLeadingZeros16(b *testing.B) { + var s int + for i := 0; i < b.N; i++ { + s += LeadingZeros16(uint16(Input) >> (uint(i) % 16)) + } + Output = s +} + +func BenchmarkLeadingZeros32(b *testing.B) { + var s int + for i := 0; i < b.N; i++ { + s += LeadingZeros32(uint32(Input) >> (uint(i) % 32)) + } + Output = s +} + +func BenchmarkLeadingZeros64(b *testing.B) { + var s int + for i := 0; i < b.N; i++ { + s += LeadingZeros64(uint64(Input) >> (uint(i) % 64)) + } + Output = s +} + +func TestTrailingZeros(t *testing.T) { + for i := 0; i < 256; i++ { + ntz := tab[i].ntz + for k := 0; k < 64-8; k++ { + x := uint64(i) << uint(k) + want := ntz + k + if x <= 1<<8-1 { + got := TrailingZeros8(uint8(x)) + if x == 0 { + want = 8 + } + if got != want { + t.Fatalf("TrailingZeros8(%#02x) == %d; want %d", x, got, want) + } + } + + if x <= 1<<16-1 { + got := TrailingZeros16(uint16(x)) + if x == 0 { + want = 16 + } + if got != want { + t.Fatalf("TrailingZeros16(%#04x) == %d; want %d", x, got, want) + } + } + + if x <= 1<<32-1 { + got := TrailingZeros32(uint32(x)) + if x == 0 { + want = 32 + } + if got != want { + t.Fatalf("TrailingZeros32(%#08x) == %d; want %d", x, got, want) + } + if UintSize == 32 { + got = TrailingZeros(uint(x)) + if got != want { + t.Fatalf("TrailingZeros(%#08x) == %d; want %d", x, got, want) + } + } + } + + if x <= 1<<64-1 { + got := TrailingZeros64(uint64(x)) + if x == 0 { + want = 64 + } + if got != want { + t.Fatalf("TrailingZeros64(%#016x) == %d; want %d", x, got, want) + } + if UintSize == 64 { + got = TrailingZeros(uint(x)) + if got != want { + t.Fatalf("TrailingZeros(%#016x) == %d; want %d", x, got, want) + } + } + } + } + } +} + +func BenchmarkTrailingZeros(b *testing.B) { + var s int + for i := 0; i < b.N; i++ { + s += TrailingZeros(uint(Input) << (uint(i) % UintSize)) + } + Output = s +} + +func BenchmarkTrailingZeros8(b *testing.B) { + var s int + for i := 0; i < b.N; i++ { + s += TrailingZeros8(uint8(Input) << (uint(i) % 8)) + } + Output = s +} + +func BenchmarkTrailingZeros16(b *testing.B) { + var s int + for i := 0; i < b.N; i++ { + s += TrailingZeros16(uint16(Input) << (uint(i) % 16)) + } + Output = s +} + +func BenchmarkTrailingZeros32(b *testing.B) { + var s int + for i := 0; i < b.N; i++ { + s += TrailingZeros32(uint32(Input) << (uint(i) % 32)) + } + Output = s +} + +func BenchmarkTrailingZeros64(b *testing.B) { + var s int + for i := 0; i < b.N; i++ { + s += TrailingZeros64(uint64(Input) << (uint(i) % 64)) + } + Output = s +} + +func TestOnesCount(t *testing.T) { + var x uint64 + for i := 0; i <= 64; i++ { + testOnesCount(t, x, i) + x = x<<1 | 1 + } + + for i := 64; i >= 0; i-- { + testOnesCount(t, x, i) + x = x << 1 + } + + for i := 0; i < 256; i++ { + for k := 0; k < 64-8; k++ { + testOnesCount(t, uint64(i)<<uint(k), tab[i].pop) + } + } +} + +func testOnesCount(t *testing.T, x uint64, want int) { + if x <= 1<<8-1 { + got := OnesCount8(uint8(x)) + if got != want { + t.Fatalf("OnesCount8(%#02x) == %d; want %d", uint8(x), got, want) + } + } + + if x <= 1<<16-1 { + got := OnesCount16(uint16(x)) + if got != want { + t.Fatalf("OnesCount16(%#04x) == %d; want %d", uint16(x), got, want) + } + } + + if x <= 1<<32-1 { + got := OnesCount32(uint32(x)) + if got != want { + t.Fatalf("OnesCount32(%#08x) == %d; want %d", uint32(x), got, want) + } + if UintSize == 32 { + got = OnesCount(uint(x)) + if got != want { + t.Fatalf("OnesCount(%#08x) == %d; want %d", uint32(x), got, want) + } + } + } + + if x <= 1<<64-1 { + got := OnesCount64(uint64(x)) + if got != want { + t.Fatalf("OnesCount64(%#016x) == %d; want %d", x, got, want) + } + if UintSize == 64 { + got = OnesCount(uint(x)) + if got != want { + t.Fatalf("OnesCount(%#016x) == %d; want %d", x, got, want) + } + } + } +} + +func BenchmarkOnesCount(b *testing.B) { + var s int + for i := 0; i < b.N; i++ { + s += OnesCount(uint(Input)) + } + Output = s +} + +func BenchmarkOnesCount8(b *testing.B) { + var s int + for i := 0; i < b.N; i++ { + s += OnesCount8(uint8(Input)) + } + Output = s +} + +func BenchmarkOnesCount16(b *testing.B) { + var s int + for i := 0; i < b.N; i++ { + s += OnesCount16(uint16(Input)) + } + Output = s +} + +func BenchmarkOnesCount32(b *testing.B) { + var s int + for i := 0; i < b.N; i++ { + s += OnesCount32(uint32(Input)) + } + Output = s +} + +func BenchmarkOnesCount64(b *testing.B) { + var s int + for i := 0; i < b.N; i++ { + s += OnesCount64(uint64(Input)) + } + Output = s +} + +func TestRotateLeft(t *testing.T) { + var m uint64 = DeBruijn64 + + for k := uint(0); k < 128; k++ { + x8 := uint8(m) + got8 := RotateLeft8(x8, int(k)) + want8 := x8<<(k&0x7) | x8>>(8-k&0x7) + if got8 != want8 { + t.Fatalf("RotateLeft8(%#02x, %d) == %#02x; want %#02x", x8, k, got8, want8) + } + got8 = RotateLeft8(want8, -int(k)) + if got8 != x8 { + t.Fatalf("RotateLeft8(%#02x, -%d) == %#02x; want %#02x", want8, k, got8, x8) + } + + x16 := uint16(m) + got16 := RotateLeft16(x16, int(k)) + want16 := x16<<(k&0xf) | x16>>(16-k&0xf) + if got16 != want16 { + t.Fatalf("RotateLeft16(%#04x, %d) == %#04x; want %#04x", x16, k, got16, want16) + } + got16 = RotateLeft16(want16, -int(k)) + if got16 != x16 { + t.Fatalf("RotateLeft16(%#04x, -%d) == %#04x; want %#04x", want16, k, got16, x16) + } + + x32 := uint32(m) + got32 := RotateLeft32(x32, int(k)) + want32 := x32<<(k&0x1f) | x32>>(32-k&0x1f) + if got32 != want32 { + t.Fatalf("RotateLeft32(%#08x, %d) == %#08x; want %#08x", x32, k, got32, want32) + } + got32 = RotateLeft32(want32, -int(k)) + if got32 != x32 { + t.Fatalf("RotateLeft32(%#08x, -%d) == %#08x; want %#08x", want32, k, got32, x32) + } + if UintSize == 32 { + x := uint(m) + got := RotateLeft(x, int(k)) + want := x<<(k&0x1f) | x>>(32-k&0x1f) + if got != want { + t.Fatalf("RotateLeft(%#08x, %d) == %#08x; want %#08x", x, k, got, want) + } + got = RotateLeft(want, -int(k)) + if got != x { + t.Fatalf("RotateLeft(%#08x, -%d) == %#08x; want %#08x", want, k, got, x) + } + } + + x64 := uint64(m) + got64 := RotateLeft64(x64, int(k)) + want64 := x64<<(k&0x3f) | x64>>(64-k&0x3f) + if got64 != want64 { + t.Fatalf("RotateLeft64(%#016x, %d) == %#016x; want %#016x", x64, k, got64, want64) + } + got64 = RotateLeft64(want64, -int(k)) + if got64 != x64 { + t.Fatalf("RotateLeft64(%#016x, -%d) == %#016x; want %#016x", want64, k, got64, x64) + } + if UintSize == 64 { + x := uint(m) + got := RotateLeft(x, int(k)) + want := x<<(k&0x3f) | x>>(64-k&0x3f) + if got != want { + t.Fatalf("RotateLeft(%#016x, %d) == %#016x; want %#016x", x, k, got, want) + } + got = RotateLeft(want, -int(k)) + if got != x { + t.Fatalf("RotateLeft(%#08x, -%d) == %#08x; want %#08x", want, k, got, x) + } + } + } +} + +func BenchmarkRotateLeft(b *testing.B) { + var s uint + for i := 0; i < b.N; i++ { + s += RotateLeft(uint(Input), i) + } + Output = int(s) +} + +func BenchmarkRotateLeft8(b *testing.B) { + var s uint8 + for i := 0; i < b.N; i++ { + s += RotateLeft8(uint8(Input), i) + } + Output = int(s) +} + +func BenchmarkRotateLeft16(b *testing.B) { + var s uint16 + for i := 0; i < b.N; i++ { + s += RotateLeft16(uint16(Input), i) + } + Output = int(s) +} + +func BenchmarkRotateLeft32(b *testing.B) { + var s uint32 + for i := 0; i < b.N; i++ { + s += RotateLeft32(uint32(Input), i) + } + Output = int(s) +} + +func BenchmarkRotateLeft64(b *testing.B) { + var s uint64 + for i := 0; i < b.N; i++ { + s += RotateLeft64(uint64(Input), i) + } + Output = int(s) +} + +func TestReverse(t *testing.T) { + // test each bit + for i := uint(0); i < 64; i++ { + testReverse(t, uint64(1)<<i, uint64(1)<<(63-i)) + } + + // test a few patterns + for _, test := range []struct { + x, r uint64 + }{ + {0, 0}, + {0x1, 0x8 << 60}, + {0x2, 0x4 << 60}, + {0x3, 0xc << 60}, + {0x4, 0x2 << 60}, + {0x5, 0xa << 60}, + {0x6, 0x6 << 60}, + {0x7, 0xe << 60}, + {0x8, 0x1 << 60}, + {0x9, 0x9 << 60}, + {0xa, 0x5 << 60}, + {0xb, 0xd << 60}, + {0xc, 0x3 << 60}, + {0xd, 0xb << 60}, + {0xe, 0x7 << 60}, + {0xf, 0xf << 60}, + {0x5686487, 0xe12616a000000000}, + {0x0123456789abcdef, 0xf7b3d591e6a2c480}, + } { + testReverse(t, test.x, test.r) + testReverse(t, test.r, test.x) + } +} + +func testReverse(t *testing.T, x64, want64 uint64) { + x8 := uint8(x64) + got8 := Reverse8(x8) + want8 := uint8(want64 >> (64 - 8)) + if got8 != want8 { + t.Fatalf("Reverse8(%#02x) == %#02x; want %#02x", x8, got8, want8) + } + + x16 := uint16(x64) + got16 := Reverse16(x16) + want16 := uint16(want64 >> (64 - 16)) + if got16 != want16 { + t.Fatalf("Reverse16(%#04x) == %#04x; want %#04x", x16, got16, want16) + } + + x32 := uint32(x64) + got32 := Reverse32(x32) + want32 := uint32(want64 >> (64 - 32)) + if got32 != want32 { + t.Fatalf("Reverse32(%#08x) == %#08x; want %#08x", x32, got32, want32) + } + if UintSize == 32 { + x := uint(x32) + got := Reverse(x) + want := uint(want32) + if got != want { + t.Fatalf("Reverse(%#08x) == %#08x; want %#08x", x, got, want) + } + } + + got64 := Reverse64(x64) + if got64 != want64 { + t.Fatalf("Reverse64(%#016x) == %#016x; want %#016x", x64, got64, want64) + } + if UintSize == 64 { + x := uint(x64) + got := Reverse(x) + want := uint(want64) + if got != want { + t.Fatalf("Reverse(%#08x) == %#016x; want %#016x", x, got, want) + } + } +} + +func BenchmarkReverse(b *testing.B) { + var s uint + for i := 0; i < b.N; i++ { + s += Reverse(uint(i)) + } + Output = int(s) +} + +func BenchmarkReverse8(b *testing.B) { + var s uint8 + for i := 0; i < b.N; i++ { + s += Reverse8(uint8(i)) + } + Output = int(s) +} + +func BenchmarkReverse16(b *testing.B) { + var s uint16 + for i := 0; i < b.N; i++ { + s += Reverse16(uint16(i)) + } + Output = int(s) +} + +func BenchmarkReverse32(b *testing.B) { + var s uint32 + for i := 0; i < b.N; i++ { + s += Reverse32(uint32(i)) + } + Output = int(s) +} + +func BenchmarkReverse64(b *testing.B) { + var s uint64 + for i := 0; i < b.N; i++ { + s += Reverse64(uint64(i)) + } + Output = int(s) +} + +func TestReverseBytes(t *testing.T) { + for _, test := range []struct { + x, r uint64 + }{ + {0, 0}, + {0x01, 0x01 << 56}, + {0x0123, 0x2301 << 48}, + {0x012345, 0x452301 << 40}, + {0x01234567, 0x67452301 << 32}, + {0x0123456789, 0x8967452301 << 24}, + {0x0123456789ab, 0xab8967452301 << 16}, + {0x0123456789abcd, 0xcdab8967452301 << 8}, + {0x0123456789abcdef, 0xefcdab8967452301 << 0}, + } { + testReverseBytes(t, test.x, test.r) + testReverseBytes(t, test.r, test.x) + } +} + +func testReverseBytes(t *testing.T, x64, want64 uint64) { + x16 := uint16(x64) + got16 := ReverseBytes16(x16) + want16 := uint16(want64 >> (64 - 16)) + if got16 != want16 { + t.Fatalf("ReverseBytes16(%#04x) == %#04x; want %#04x", x16, got16, want16) + } + + x32 := uint32(x64) + got32 := ReverseBytes32(x32) + want32 := uint32(want64 >> (64 - 32)) + if got32 != want32 { + t.Fatalf("ReverseBytes32(%#08x) == %#08x; want %#08x", x32, got32, want32) + } + if UintSize == 32 { + x := uint(x32) + got := ReverseBytes(x) + want := uint(want32) + if got != want { + t.Fatalf("ReverseBytes(%#08x) == %#08x; want %#08x", x, got, want) + } + } + + got64 := ReverseBytes64(x64) + if got64 != want64 { + t.Fatalf("ReverseBytes64(%#016x) == %#016x; want %#016x", x64, got64, want64) + } + if UintSize == 64 { + x := uint(x64) + got := ReverseBytes(x) + want := uint(want64) + if got != want { + t.Fatalf("ReverseBytes(%#016x) == %#016x; want %#016x", x, got, want) + } + } +} + +func BenchmarkReverseBytes(b *testing.B) { + var s uint + for i := 0; i < b.N; i++ { + s += ReverseBytes(uint(i)) + } + Output = int(s) +} + +func BenchmarkReverseBytes16(b *testing.B) { + var s uint16 + for i := 0; i < b.N; i++ { + s += ReverseBytes16(uint16(i)) + } + Output = int(s) +} + +func BenchmarkReverseBytes32(b *testing.B) { + var s uint32 + for i := 0; i < b.N; i++ { + s += ReverseBytes32(uint32(i)) + } + Output = int(s) +} + +func BenchmarkReverseBytes64(b *testing.B) { + var s uint64 + for i := 0; i < b.N; i++ { + s += ReverseBytes64(uint64(i)) + } + Output = int(s) +} + +func TestLen(t *testing.T) { + for i := 0; i < 256; i++ { + len := 8 - tab[i].nlz + for k := 0; k < 64-8; k++ { + x := uint64(i) << uint(k) + want := 0 + if x != 0 { + want = len + k + } + if x <= 1<<8-1 { + got := Len8(uint8(x)) + if got != want { + t.Fatalf("Len8(%#02x) == %d; want %d", x, got, want) + } + } + + if x <= 1<<16-1 { + got := Len16(uint16(x)) + if got != want { + t.Fatalf("Len16(%#04x) == %d; want %d", x, got, want) + } + } + + if x <= 1<<32-1 { + got := Len32(uint32(x)) + if got != want { + t.Fatalf("Len32(%#08x) == %d; want %d", x, got, want) + } + if UintSize == 32 { + got := Len(uint(x)) + if got != want { + t.Fatalf("Len(%#08x) == %d; want %d", x, got, want) + } + } + } + + if x <= 1<<64-1 { + got := Len64(uint64(x)) + if got != want { + t.Fatalf("Len64(%#016x) == %d; want %d", x, got, want) + } + if UintSize == 64 { + got := Len(uint(x)) + if got != want { + t.Fatalf("Len(%#016x) == %d; want %d", x, got, want) + } + } + } + } + } +} + +const ( + _M = 1<<UintSize - 1 + _M32 = 1<<32 - 1 + _M64 = 1<<64 - 1 +) + +func TestAddSubUint(t *testing.T) { + test := func(msg string, f func(x, y, c uint) (z, cout uint), x, y, c, z, cout uint) { + z1, cout1 := f(x, y, c) + if z1 != z || cout1 != cout { + t.Errorf("%s: got z:cout = %#x:%#x; want %#x:%#x", msg, z1, cout1, z, cout) + } + } + for _, a := range []struct{ x, y, c, z, cout uint }{ + {0, 0, 0, 0, 0}, + {0, 1, 0, 1, 0}, + {0, 0, 1, 1, 0}, + {0, 1, 1, 2, 0}, + {12345, 67890, 0, 80235, 0}, + {12345, 67890, 1, 80236, 0}, + {_M, 1, 0, 0, 1}, + {_M, 0, 1, 0, 1}, + {_M, 1, 1, 1, 1}, + {_M, _M, 0, _M - 1, 1}, + {_M, _M, 1, _M, 1}, + } { + test("Add", Add, a.x, a.y, a.c, a.z, a.cout) + test("Add symmetric", Add, a.y, a.x, a.c, a.z, a.cout) + test("Sub", Sub, a.z, a.x, a.c, a.y, a.cout) + test("Sub symmetric", Sub, a.z, a.y, a.c, a.x, a.cout) + // The above code can't test intrinsic implementation, because the passed function is not called directly. + // The following code uses a closure to test the intrinsic version in case the function is intrinsified. + test("Add intrinsic", func(x, y, c uint) (uint, uint) { return Add(x, y, c) }, a.x, a.y, a.c, a.z, a.cout) + test("Add intrinsic symmetric", func(x, y, c uint) (uint, uint) { return Add(x, y, c) }, a.y, a.x, a.c, a.z, a.cout) + test("Sub intrinsic", func(x, y, c uint) (uint, uint) { return Sub(x, y, c) }, a.z, a.x, a.c, a.y, a.cout) + test("Sub intrinsic symmetric", func(x, y, c uint) (uint, uint) { return Sub(x, y, c) }, a.z, a.y, a.c, a.x, a.cout) + + } +} + +func TestAddSubUint32(t *testing.T) { + test := func(msg string, f func(x, y, c uint32) (z, cout uint32), x, y, c, z, cout uint32) { + z1, cout1 := f(x, y, c) + if z1 != z || cout1 != cout { + t.Errorf("%s: got z:cout = %#x:%#x; want %#x:%#x", msg, z1, cout1, z, cout) + } + } + for _, a := range []struct{ x, y, c, z, cout uint32 }{ + {0, 0, 0, 0, 0}, + {0, 1, 0, 1, 0}, + {0, 0, 1, 1, 0}, + {0, 1, 1, 2, 0}, + {12345, 67890, 0, 80235, 0}, + {12345, 67890, 1, 80236, 0}, + {_M32, 1, 0, 0, 1}, + {_M32, 0, 1, 0, 1}, + {_M32, 1, 1, 1, 1}, + {_M32, _M32, 0, _M32 - 1, 1}, + {_M32, _M32, 1, _M32, 1}, + } { + test("Add32", Add32, a.x, a.y, a.c, a.z, a.cout) + test("Add32 symmetric", Add32, a.y, a.x, a.c, a.z, a.cout) + test("Sub32", Sub32, a.z, a.x, a.c, a.y, a.cout) + test("Sub32 symmetric", Sub32, a.z, a.y, a.c, a.x, a.cout) + } +} + +func TestAddSubUint64(t *testing.T) { + test := func(msg string, f func(x, y, c uint64) (z, cout uint64), x, y, c, z, cout uint64) { + z1, cout1 := f(x, y, c) + if z1 != z || cout1 != cout { + t.Errorf("%s: got z:cout = %#x:%#x; want %#x:%#x", msg, z1, cout1, z, cout) + } + } + for _, a := range []struct{ x, y, c, z, cout uint64 }{ + {0, 0, 0, 0, 0}, + {0, 1, 0, 1, 0}, + {0, 0, 1, 1, 0}, + {0, 1, 1, 2, 0}, + {12345, 67890, 0, 80235, 0}, + {12345, 67890, 1, 80236, 0}, + {_M64, 1, 0, 0, 1}, + {_M64, 0, 1, 0, 1}, + {_M64, 1, 1, 1, 1}, + {_M64, _M64, 0, _M64 - 1, 1}, + {_M64, _M64, 1, _M64, 1}, + } { + test("Add64", Add64, a.x, a.y, a.c, a.z, a.cout) + test("Add64 symmetric", Add64, a.y, a.x, a.c, a.z, a.cout) + test("Sub64", Sub64, a.z, a.x, a.c, a.y, a.cout) + test("Sub64 symmetric", Sub64, a.z, a.y, a.c, a.x, a.cout) + // The above code can't test intrinsic implementation, because the passed function is not called directly. + // The following code uses a closure to test the intrinsic version in case the function is intrinsified. + test("Add64 intrinsic", func(x, y, c uint64) (uint64, uint64) { return Add64(x, y, c) }, a.x, a.y, a.c, a.z, a.cout) + test("Add64 intrinsic symmetric", func(x, y, c uint64) (uint64, uint64) { return Add64(x, y, c) }, a.y, a.x, a.c, a.z, a.cout) + test("Sub64 intrinsic", func(x, y, c uint64) (uint64, uint64) { return Sub64(x, y, c) }, a.z, a.x, a.c, a.y, a.cout) + test("Sub64 intrinsic symmetric", func(x, y, c uint64) (uint64, uint64) { return Sub64(x, y, c) }, a.z, a.y, a.c, a.x, a.cout) + } +} + +func TestAdd64OverflowPanic(t *testing.T) { + // Test that 64-bit overflow panics fire correctly. + // These are designed to improve coverage of compiler intrinsics. + tests := []func(uint64, uint64) uint64{ + func(a, b uint64) uint64 { + x, c := Add64(a, b, 0) + if c > 0 { + panic("overflow") + } + return x + }, + func(a, b uint64) uint64 { + x, c := Add64(a, b, 0) + if c != 0 { + panic("overflow") + } + return x + }, + func(a, b uint64) uint64 { + x, c := Add64(a, b, 0) + if c == 1 { + panic("overflow") + } + return x + }, + func(a, b uint64) uint64 { + x, c := Add64(a, b, 0) + if c != 1 { + return x + } + panic("overflow") + }, + func(a, b uint64) uint64 { + x, c := Add64(a, b, 0) + if c == 0 { + return x + } + panic("overflow") + }, + } + for _, test := range tests { + shouldPanic := func(f func()) { + defer func() { + if err := recover(); err == nil { + t.Fatalf("expected panic") + } + }() + f() + } + + // overflow + shouldPanic(func() { test(_M64, 1) }) + shouldPanic(func() { test(1, _M64) }) + shouldPanic(func() { test(_M64, _M64) }) + + // no overflow + test(_M64, 0) + test(0, 0) + test(1, 1) + } +} + +func TestSub64OverflowPanic(t *testing.T) { + // Test that 64-bit overflow panics fire correctly. + // These are designed to improve coverage of compiler intrinsics. + tests := []func(uint64, uint64) uint64{ + func(a, b uint64) uint64 { + x, c := Sub64(a, b, 0) + if c > 0 { + panic("overflow") + } + return x + }, + func(a, b uint64) uint64 { + x, c := Sub64(a, b, 0) + if c != 0 { + panic("overflow") + } + return x + }, + func(a, b uint64) uint64 { + x, c := Sub64(a, b, 0) + if c == 1 { + panic("overflow") + } + return x + }, + func(a, b uint64) uint64 { + x, c := Sub64(a, b, 0) + if c != 1 { + return x + } + panic("overflow") + }, + func(a, b uint64) uint64 { + x, c := Sub64(a, b, 0) + if c == 0 { + return x + } + panic("overflow") + }, + } + for _, test := range tests { + shouldPanic := func(f func()) { + defer func() { + if err := recover(); err == nil { + t.Fatalf("expected panic") + } + }() + f() + } + + // overflow + shouldPanic(func() { test(0, 1) }) + shouldPanic(func() { test(1, _M64) }) + shouldPanic(func() { test(_M64-1, _M64) }) + + // no overflow + test(_M64, 0) + test(0, 0) + test(1, 1) + } +} + +func TestMulDiv(t *testing.T) { + testMul := func(msg string, f func(x, y uint) (hi, lo uint), x, y, hi, lo uint) { + hi1, lo1 := f(x, y) + if hi1 != hi || lo1 != lo { + t.Errorf("%s: got hi:lo = %#x:%#x; want %#x:%#x", msg, hi1, lo1, hi, lo) + } + } + testDiv := func(msg string, f func(hi, lo, y uint) (q, r uint), hi, lo, y, q, r uint) { + q1, r1 := f(hi, lo, y) + if q1 != q || r1 != r { + t.Errorf("%s: got q:r = %#x:%#x; want %#x:%#x", msg, q1, r1, q, r) + } + } + for _, a := range []struct { + x, y uint + hi, lo, r uint + }{ + {1 << (UintSize - 1), 2, 1, 0, 1}, + {_M, _M, _M - 1, 1, 42}, + } { + testMul("Mul", Mul, a.x, a.y, a.hi, a.lo) + testMul("Mul symmetric", Mul, a.y, a.x, a.hi, a.lo) + testDiv("Div", Div, a.hi, a.lo+a.r, a.y, a.x, a.r) + testDiv("Div symmetric", Div, a.hi, a.lo+a.r, a.x, a.y, a.r) + // The above code can't test intrinsic implementation, because the passed function is not called directly. + // The following code uses a closure to test the intrinsic version in case the function is intrinsified. + testMul("Mul intrinsic", func(x, y uint) (uint, uint) { return Mul(x, y) }, a.x, a.y, a.hi, a.lo) + testMul("Mul intrinsic symmetric", func(x, y uint) (uint, uint) { return Mul(x, y) }, a.y, a.x, a.hi, a.lo) + testDiv("Div intrinsic", func(hi, lo, y uint) (uint, uint) { return Div(hi, lo, y) }, a.hi, a.lo+a.r, a.y, a.x, a.r) + testDiv("Div intrinsic symmetric", func(hi, lo, y uint) (uint, uint) { return Div(hi, lo, y) }, a.hi, a.lo+a.r, a.x, a.y, a.r) + } +} + +func TestMulDiv32(t *testing.T) { + testMul := func(msg string, f func(x, y uint32) (hi, lo uint32), x, y, hi, lo uint32) { + hi1, lo1 := f(x, y) + if hi1 != hi || lo1 != lo { + t.Errorf("%s: got hi:lo = %#x:%#x; want %#x:%#x", msg, hi1, lo1, hi, lo) + } + } + testDiv := func(msg string, f func(hi, lo, y uint32) (q, r uint32), hi, lo, y, q, r uint32) { + q1, r1 := f(hi, lo, y) + if q1 != q || r1 != r { + t.Errorf("%s: got q:r = %#x:%#x; want %#x:%#x", msg, q1, r1, q, r) + } + } + for _, a := range []struct { + x, y uint32 + hi, lo, r uint32 + }{ + {1 << 31, 2, 1, 0, 1}, + {0xc47dfa8c, 50911, 0x98a4, 0x998587f4, 13}, + {_M32, _M32, _M32 - 1, 1, 42}, + } { + testMul("Mul32", Mul32, a.x, a.y, a.hi, a.lo) + testMul("Mul32 symmetric", Mul32, a.y, a.x, a.hi, a.lo) + testDiv("Div32", Div32, a.hi, a.lo+a.r, a.y, a.x, a.r) + testDiv("Div32 symmetric", Div32, a.hi, a.lo+a.r, a.x, a.y, a.r) + } +} + +func TestMulDiv64(t *testing.T) { + testMul := func(msg string, f func(x, y uint64) (hi, lo uint64), x, y, hi, lo uint64) { + hi1, lo1 := f(x, y) + if hi1 != hi || lo1 != lo { + t.Errorf("%s: got hi:lo = %#x:%#x; want %#x:%#x", msg, hi1, lo1, hi, lo) + } + } + testDiv := func(msg string, f func(hi, lo, y uint64) (q, r uint64), hi, lo, y, q, r uint64) { + q1, r1 := f(hi, lo, y) + if q1 != q || r1 != r { + t.Errorf("%s: got q:r = %#x:%#x; want %#x:%#x", msg, q1, r1, q, r) + } + } + for _, a := range []struct { + x, y uint64 + hi, lo, r uint64 + }{ + {1 << 63, 2, 1, 0, 1}, + {0x3626229738a3b9, 0xd8988a9f1cc4a61, 0x2dd0712657fe8, 0x9dd6a3364c358319, 13}, + {_M64, _M64, _M64 - 1, 1, 42}, + } { + testMul("Mul64", Mul64, a.x, a.y, a.hi, a.lo) + testMul("Mul64 symmetric", Mul64, a.y, a.x, a.hi, a.lo) + testDiv("Div64", Div64, a.hi, a.lo+a.r, a.y, a.x, a.r) + testDiv("Div64 symmetric", Div64, a.hi, a.lo+a.r, a.x, a.y, a.r) + // The above code can't test intrinsic implementation, because the passed function is not called directly. + // The following code uses a closure to test the intrinsic version in case the function is intrinsified. + testMul("Mul64 intrinsic", func(x, y uint64) (uint64, uint64) { return Mul64(x, y) }, a.x, a.y, a.hi, a.lo) + testMul("Mul64 intrinsic symmetric", func(x, y uint64) (uint64, uint64) { return Mul64(x, y) }, a.y, a.x, a.hi, a.lo) + testDiv("Div64 intrinsic", func(hi, lo, y uint64) (uint64, uint64) { return Div64(hi, lo, y) }, a.hi, a.lo+a.r, a.y, a.x, a.r) + testDiv("Div64 intrinsic symmetric", func(hi, lo, y uint64) (uint64, uint64) { return Div64(hi, lo, y) }, a.hi, a.lo+a.r, a.x, a.y, a.r) + } +} + +const ( + divZeroError = "runtime error: integer divide by zero" + overflowError = "runtime error: integer overflow" +) + +func TestDivPanicOverflow(t *testing.T) { + // Expect a panic + defer func() { + if err := recover(); err == nil { + t.Error("Div should have panicked when y<=hi") + } else if e, ok := err.(runtime.Error); !ok || e.Error() != overflowError { + t.Errorf("Div expected panic: %q, got: %q ", overflowError, e.Error()) + } + }() + q, r := Div(1, 0, 1) + t.Errorf("undefined q, r = %v, %v calculated when Div should have panicked", q, r) +} + +func TestDiv32PanicOverflow(t *testing.T) { + // Expect a panic + defer func() { + if err := recover(); err == nil { + t.Error("Div32 should have panicked when y<=hi") + } else if e, ok := err.(runtime.Error); !ok || e.Error() != overflowError { + t.Errorf("Div32 expected panic: %q, got: %q ", overflowError, e.Error()) + } + }() + q, r := Div32(1, 0, 1) + t.Errorf("undefined q, r = %v, %v calculated when Div32 should have panicked", q, r) +} + +func TestDiv64PanicOverflow(t *testing.T) { + // Expect a panic + defer func() { + if err := recover(); err == nil { + t.Error("Div64 should have panicked when y<=hi") + } else if e, ok := err.(runtime.Error); !ok || e.Error() != overflowError { + t.Errorf("Div64 expected panic: %q, got: %q ", overflowError, e.Error()) + } + }() + q, r := Div64(1, 0, 1) + t.Errorf("undefined q, r = %v, %v calculated when Div64 should have panicked", q, r) +} + +func TestDivPanicZero(t *testing.T) { + // Expect a panic + defer func() { + if err := recover(); err == nil { + t.Error("Div should have panicked when y==0") + } else if e, ok := err.(runtime.Error); !ok || e.Error() != divZeroError { + t.Errorf("Div expected panic: %q, got: %q ", divZeroError, e.Error()) + } + }() + q, r := Div(1, 1, 0) + t.Errorf("undefined q, r = %v, %v calculated when Div should have panicked", q, r) +} + +func TestDiv32PanicZero(t *testing.T) { + // Expect a panic + defer func() { + if err := recover(); err == nil { + t.Error("Div32 should have panicked when y==0") + } else if e, ok := err.(runtime.Error); !ok || e.Error() != divZeroError { + t.Errorf("Div32 expected panic: %q, got: %q ", divZeroError, e.Error()) + } + }() + q, r := Div32(1, 1, 0) + t.Errorf("undefined q, r = %v, %v calculated when Div32 should have panicked", q, r) +} + +func TestDiv64PanicZero(t *testing.T) { + // Expect a panic + defer func() { + if err := recover(); err == nil { + t.Error("Div64 should have panicked when y==0") + } else if e, ok := err.(runtime.Error); !ok || e.Error() != divZeroError { + t.Errorf("Div64 expected panic: %q, got: %q ", divZeroError, e.Error()) + } + }() + q, r := Div64(1, 1, 0) + t.Errorf("undefined q, r = %v, %v calculated when Div64 should have panicked", q, r) +} + +func TestRem32(t *testing.T) { + // Sanity check: for non-oveflowing dividends, the result is the + // same as the rem returned by Div32 + hi, lo, y := uint32(510510), uint32(9699690), uint32(510510+1) // ensure hi < y + for i := 0; i < 1000; i++ { + r := Rem32(hi, lo, y) + _, r2 := Div32(hi, lo, y) + if r != r2 { + t.Errorf("Rem32(%v, %v, %v) returned %v, but Div32 returned rem %v", hi, lo, y, r, r2) + } + y += 13 + } +} + +func TestRem32Overflow(t *testing.T) { + // To trigger a quotient overflow, we need y <= hi + hi, lo, y := uint32(510510), uint32(9699690), uint32(7) + for i := 0; i < 1000; i++ { + r := Rem32(hi, lo, y) + _, r2 := Div64(0, uint64(hi)<<32|uint64(lo), uint64(y)) + if r != uint32(r2) { + t.Errorf("Rem32(%v, %v, %v) returned %v, but Div64 returned rem %v", hi, lo, y, r, r2) + } + y += 13 + } +} + +func TestRem64(t *testing.T) { + // Sanity check: for non-oveflowing dividends, the result is the + // same as the rem returned by Div64 + hi, lo, y := uint64(510510), uint64(9699690), uint64(510510+1) // ensure hi < y + for i := 0; i < 1000; i++ { + r := Rem64(hi, lo, y) + _, r2 := Div64(hi, lo, y) + if r != r2 { + t.Errorf("Rem64(%v, %v, %v) returned %v, but Div64 returned rem %v", hi, lo, y, r, r2) + } + y += 13 + } +} + +func TestRem64Overflow(t *testing.T) { + Rem64Tests := []struct { + hi, lo, y uint64 + rem uint64 + }{ + // Testcases computed using Python 3, as: + // >>> hi = 42; lo = 1119; y = 42 + // >>> ((hi<<64)+lo) % y + {42, 1119, 42, 27}, + {42, 1119, 38, 9}, + {42, 1119, 26, 23}, + {469, 0, 467, 271}, + {469, 0, 113, 58}, + {111111, 111111, 1171, 803}, + {3968194946088682615, 3192705705065114702, 1000037, 56067}, + } + + for _, rt := range Rem64Tests { + if rt.hi < rt.y { + t.Fatalf("Rem64(%v, %v, %v) is not a test with quo overflow", rt.hi, rt.lo, rt.y) + } + rem := Rem64(rt.hi, rt.lo, rt.y) + if rem != rt.rem { + t.Errorf("Rem64(%v, %v, %v) returned %v, wanted %v", + rt.hi, rt.lo, rt.y, rem, rt.rem) + } + } +} + +func BenchmarkAdd(b *testing.B) { + var z, c uint + for i := 0; i < b.N; i++ { + z, c = Add(uint(Input), uint(i), c) + } + Output = int(z + c) +} + +func BenchmarkAdd32(b *testing.B) { + var z, c uint32 + for i := 0; i < b.N; i++ { + z, c = Add32(uint32(Input), uint32(i), c) + } + Output = int(z + c) +} + +func BenchmarkAdd64(b *testing.B) { + var z, c uint64 + for i := 0; i < b.N; i++ { + z, c = Add64(uint64(Input), uint64(i), c) + } + Output = int(z + c) +} + +func BenchmarkAdd64multiple(b *testing.B) { + var z0 = uint64(Input) + var z1 = uint64(Input) + var z2 = uint64(Input) + var z3 = uint64(Input) + for i := 0; i < b.N; i++ { + var c uint64 + z0, c = Add64(z0, uint64(i), c) + z1, c = Add64(z1, uint64(i), c) + z2, c = Add64(z2, uint64(i), c) + z3, _ = Add64(z3, uint64(i), c) + } + Output = int(z0 + z1 + z2 + z3) +} + +func BenchmarkSub(b *testing.B) { + var z, c uint + for i := 0; i < b.N; i++ { + z, c = Sub(uint(Input), uint(i), c) + } + Output = int(z + c) +} + +func BenchmarkSub32(b *testing.B) { + var z, c uint32 + for i := 0; i < b.N; i++ { + z, c = Sub32(uint32(Input), uint32(i), c) + } + Output = int(z + c) +} + +func BenchmarkSub64(b *testing.B) { + var z, c uint64 + for i := 0; i < b.N; i++ { + z, c = Sub64(uint64(Input), uint64(i), c) + } + Output = int(z + c) +} + +func BenchmarkSub64multiple(b *testing.B) { + var z0 = uint64(Input) + var z1 = uint64(Input) + var z2 = uint64(Input) + var z3 = uint64(Input) + for i := 0; i < b.N; i++ { + var c uint64 + z0, c = Sub64(z0, uint64(i), c) + z1, c = Sub64(z1, uint64(i), c) + z2, c = Sub64(z2, uint64(i), c) + z3, _ = Sub64(z3, uint64(i), c) + } + Output = int(z0 + z1 + z2 + z3) +} + +func BenchmarkMul(b *testing.B) { + var hi, lo uint + for i := 0; i < b.N; i++ { + hi, lo = Mul(uint(Input), uint(i)) + } + Output = int(hi + lo) +} + +func BenchmarkMul32(b *testing.B) { + var hi, lo uint32 + for i := 0; i < b.N; i++ { + hi, lo = Mul32(uint32(Input), uint32(i)) + } + Output = int(hi + lo) +} + +func BenchmarkMul64(b *testing.B) { + var hi, lo uint64 + for i := 0; i < b.N; i++ { + hi, lo = Mul64(uint64(Input), uint64(i)) + } + Output = int(hi + lo) +} + +func BenchmarkDiv(b *testing.B) { + var q, r uint + for i := 0; i < b.N; i++ { + q, r = Div(1, uint(i), uint(Input)) + } + Output = int(q + r) +} + +func BenchmarkDiv32(b *testing.B) { + var q, r uint32 + for i := 0; i < b.N; i++ { + q, r = Div32(1, uint32(i), uint32(Input)) + } + Output = int(q + r) +} + +func BenchmarkDiv64(b *testing.B) { + var q, r uint64 + for i := 0; i < b.N; i++ { + q, r = Div64(1, uint64(i), uint64(Input)) + } + Output = int(q + r) +} + +// ---------------------------------------------------------------------------- +// Testing support + +type entry = struct { + nlz, ntz, pop int +} + +// tab contains results for all uint8 values +var tab [256]entry + +func init() { + tab[0] = entry{8, 8, 0} + for i := 1; i < len(tab); i++ { + // nlz + x := i // x != 0 + n := 0 + for x&0x80 == 0 { + n++ + x <<= 1 + } + tab[i].nlz = n + + // ntz + x = i // x != 0 + n = 0 + for x&1 == 0 { + n++ + x >>= 1 + } + tab[i].ntz = n + + // pop + x = i // x != 0 + n = 0 + for x != 0 { + n += int(x & 1) + x >>= 1 + } + tab[i].pop = n + } +} diff --git a/src/math/bits/example_math_test.go b/src/math/bits/example_math_test.go new file mode 100644 index 0000000..4bb466f --- /dev/null +++ b/src/math/bits/example_math_test.go @@ -0,0 +1,202 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package bits_test + +import ( + "fmt" + "math/bits" +) + +func ExampleAdd32() { + // First number is 33<<32 + 12 + n1 := []uint32{33, 12} + // Second number is 21<<32 + 23 + n2 := []uint32{21, 23} + // Add them together without producing carry. + d1, carry := bits.Add32(n1[1], n2[1], 0) + d0, _ := bits.Add32(n1[0], n2[0], carry) + nsum := []uint32{d0, d1} + fmt.Printf("%v + %v = %v (carry bit was %v)\n", n1, n2, nsum, carry) + + // First number is 1<<32 + 2147483648 + n1 = []uint32{1, 0x80000000} + // Second number is 1<<32 + 2147483648 + n2 = []uint32{1, 0x80000000} + // Add them together producing carry. + d1, carry = bits.Add32(n1[1], n2[1], 0) + d0, _ = bits.Add32(n1[0], n2[0], carry) + nsum = []uint32{d0, d1} + fmt.Printf("%v + %v = %v (carry bit was %v)\n", n1, n2, nsum, carry) + // Output: + // [33 12] + [21 23] = [54 35] (carry bit was 0) + // [1 2147483648] + [1 2147483648] = [3 0] (carry bit was 1) +} + +func ExampleAdd64() { + // First number is 33<<64 + 12 + n1 := []uint64{33, 12} + // Second number is 21<<64 + 23 + n2 := []uint64{21, 23} + // Add them together without producing carry. + d1, carry := bits.Add64(n1[1], n2[1], 0) + d0, _ := bits.Add64(n1[0], n2[0], carry) + nsum := []uint64{d0, d1} + fmt.Printf("%v + %v = %v (carry bit was %v)\n", n1, n2, nsum, carry) + + // First number is 1<<64 + 9223372036854775808 + n1 = []uint64{1, 0x8000000000000000} + // Second number is 1<<64 + 9223372036854775808 + n2 = []uint64{1, 0x8000000000000000} + // Add them together producing carry. + d1, carry = bits.Add64(n1[1], n2[1], 0) + d0, _ = bits.Add64(n1[0], n2[0], carry) + nsum = []uint64{d0, d1} + fmt.Printf("%v + %v = %v (carry bit was %v)\n", n1, n2, nsum, carry) + // Output: + // [33 12] + [21 23] = [54 35] (carry bit was 0) + // [1 9223372036854775808] + [1 9223372036854775808] = [3 0] (carry bit was 1) +} + +func ExampleSub32() { + // First number is 33<<32 + 23 + n1 := []uint32{33, 23} + // Second number is 21<<32 + 12 + n2 := []uint32{21, 12} + // Sub them together without producing carry. + d1, carry := bits.Sub32(n1[1], n2[1], 0) + d0, _ := bits.Sub32(n1[0], n2[0], carry) + nsum := []uint32{d0, d1} + fmt.Printf("%v - %v = %v (carry bit was %v)\n", n1, n2, nsum, carry) + + // First number is 3<<32 + 2147483647 + n1 = []uint32{3, 0x7fffffff} + // Second number is 1<<32 + 2147483648 + n2 = []uint32{1, 0x80000000} + // Sub them together producing carry. + d1, carry = bits.Sub32(n1[1], n2[1], 0) + d0, _ = bits.Sub32(n1[0], n2[0], carry) + nsum = []uint32{d0, d1} + fmt.Printf("%v - %v = %v (carry bit was %v)\n", n1, n2, nsum, carry) + // Output: + // [33 23] - [21 12] = [12 11] (carry bit was 0) + // [3 2147483647] - [1 2147483648] = [1 4294967295] (carry bit was 1) +} + +func ExampleSub64() { + // First number is 33<<64 + 23 + n1 := []uint64{33, 23} + // Second number is 21<<64 + 12 + n2 := []uint64{21, 12} + // Sub them together without producing carry. + d1, carry := bits.Sub64(n1[1], n2[1], 0) + d0, _ := bits.Sub64(n1[0], n2[0], carry) + nsum := []uint64{d0, d1} + fmt.Printf("%v - %v = %v (carry bit was %v)\n", n1, n2, nsum, carry) + + // First number is 3<<64 + 9223372036854775807 + n1 = []uint64{3, 0x7fffffffffffffff} + // Second number is 1<<64 + 9223372036854775808 + n2 = []uint64{1, 0x8000000000000000} + // Sub them together producing carry. + d1, carry = bits.Sub64(n1[1], n2[1], 0) + d0, _ = bits.Sub64(n1[0], n2[0], carry) + nsum = []uint64{d0, d1} + fmt.Printf("%v - %v = %v (carry bit was %v)\n", n1, n2, nsum, carry) + // Output: + // [33 23] - [21 12] = [12 11] (carry bit was 0) + // [3 9223372036854775807] - [1 9223372036854775808] = [1 18446744073709551615] (carry bit was 1) +} + +func ExampleMul32() { + // First number is 0<<32 + 12 + n1 := []uint32{0, 12} + // Second number is 0<<32 + 12 + n2 := []uint32{0, 12} + // Multiply them together without producing overflow. + hi, lo := bits.Mul32(n1[1], n2[1]) + nsum := []uint32{hi, lo} + fmt.Printf("%v * %v = %v\n", n1[1], n2[1], nsum) + + // First number is 0<<32 + 2147483648 + n1 = []uint32{0, 0x80000000} + // Second number is 0<<32 + 2 + n2 = []uint32{0, 2} + // Multiply them together producing overflow. + hi, lo = bits.Mul32(n1[1], n2[1]) + nsum = []uint32{hi, lo} + fmt.Printf("%v * %v = %v\n", n1[1], n2[1], nsum) + // Output: + // 12 * 12 = [0 144] + // 2147483648 * 2 = [1 0] +} + +func ExampleMul64() { + // First number is 0<<64 + 12 + n1 := []uint64{0, 12} + // Second number is 0<<64 + 12 + n2 := []uint64{0, 12} + // Multiply them together without producing overflow. + hi, lo := bits.Mul64(n1[1], n2[1]) + nsum := []uint64{hi, lo} + fmt.Printf("%v * %v = %v\n", n1[1], n2[1], nsum) + + // First number is 0<<64 + 9223372036854775808 + n1 = []uint64{0, 0x8000000000000000} + // Second number is 0<<64 + 2 + n2 = []uint64{0, 2} + // Multiply them together producing overflow. + hi, lo = bits.Mul64(n1[1], n2[1]) + nsum = []uint64{hi, lo} + fmt.Printf("%v * %v = %v\n", n1[1], n2[1], nsum) + // Output: + // 12 * 12 = [0 144] + // 9223372036854775808 * 2 = [1 0] +} + +func ExampleDiv32() { + // First number is 0<<32 + 6 + n1 := []uint32{0, 6} + // Second number is 0<<32 + 3 + n2 := []uint32{0, 3} + // Divide them together. + quo, rem := bits.Div32(n1[0], n1[1], n2[1]) + nsum := []uint32{quo, rem} + fmt.Printf("[%v %v] / %v = %v\n", n1[0], n1[1], n2[1], nsum) + + // First number is 2<<32 + 2147483648 + n1 = []uint32{2, 0x80000000} + // Second number is 0<<32 + 2147483648 + n2 = []uint32{0, 0x80000000} + // Divide them together. + quo, rem = bits.Div32(n1[0], n1[1], n2[1]) + nsum = []uint32{quo, rem} + fmt.Printf("[%v %v] / %v = %v\n", n1[0], n1[1], n2[1], nsum) + // Output: + // [0 6] / 3 = [2 0] + // [2 2147483648] / 2147483648 = [5 0] +} + +func ExampleDiv64() { + // First number is 0<<64 + 6 + n1 := []uint64{0, 6} + // Second number is 0<<64 + 3 + n2 := []uint64{0, 3} + // Divide them together. + quo, rem := bits.Div64(n1[0], n1[1], n2[1]) + nsum := []uint64{quo, rem} + fmt.Printf("[%v %v] / %v = %v\n", n1[0], n1[1], n2[1], nsum) + + // First number is 2<<64 + 9223372036854775808 + n1 = []uint64{2, 0x8000000000000000} + // Second number is 0<<64 + 9223372036854775808 + n2 = []uint64{0, 0x8000000000000000} + // Divide them together. + quo, rem = bits.Div64(n1[0], n1[1], n2[1]) + nsum = []uint64{quo, rem} + fmt.Printf("[%v %v] / %v = %v\n", n1[0], n1[1], n2[1], nsum) + // Output: + // [0 6] / 3 = [2 0] + // [2 9223372036854775808] / 9223372036854775808 = [5 0] +} diff --git a/src/math/bits/example_test.go b/src/math/bits/example_test.go new file mode 100644 index 0000000..b2ed2cb --- /dev/null +++ b/src/math/bits/example_test.go @@ -0,0 +1,210 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Code generated by go run make_examples.go. DO NOT EDIT. + +package bits_test + +import ( + "fmt" + "math/bits" +) + +func ExampleLeadingZeros8() { + fmt.Printf("LeadingZeros8(%08b) = %d\n", 1, bits.LeadingZeros8(1)) + // Output: + // LeadingZeros8(00000001) = 7 +} + +func ExampleLeadingZeros16() { + fmt.Printf("LeadingZeros16(%016b) = %d\n", 1, bits.LeadingZeros16(1)) + // Output: + // LeadingZeros16(0000000000000001) = 15 +} + +func ExampleLeadingZeros32() { + fmt.Printf("LeadingZeros32(%032b) = %d\n", 1, bits.LeadingZeros32(1)) + // Output: + // LeadingZeros32(00000000000000000000000000000001) = 31 +} + +func ExampleLeadingZeros64() { + fmt.Printf("LeadingZeros64(%064b) = %d\n", 1, bits.LeadingZeros64(1)) + // Output: + // LeadingZeros64(0000000000000000000000000000000000000000000000000000000000000001) = 63 +} + +func ExampleTrailingZeros8() { + fmt.Printf("TrailingZeros8(%08b) = %d\n", 14, bits.TrailingZeros8(14)) + // Output: + // TrailingZeros8(00001110) = 1 +} + +func ExampleTrailingZeros16() { + fmt.Printf("TrailingZeros16(%016b) = %d\n", 14, bits.TrailingZeros16(14)) + // Output: + // TrailingZeros16(0000000000001110) = 1 +} + +func ExampleTrailingZeros32() { + fmt.Printf("TrailingZeros32(%032b) = %d\n", 14, bits.TrailingZeros32(14)) + // Output: + // TrailingZeros32(00000000000000000000000000001110) = 1 +} + +func ExampleTrailingZeros64() { + fmt.Printf("TrailingZeros64(%064b) = %d\n", 14, bits.TrailingZeros64(14)) + // Output: + // TrailingZeros64(0000000000000000000000000000000000000000000000000000000000001110) = 1 +} + +func ExampleOnesCount() { + fmt.Printf("OnesCount(%b) = %d\n", 14, bits.OnesCount(14)) + // Output: + // OnesCount(1110) = 3 +} + +func ExampleOnesCount8() { + fmt.Printf("OnesCount8(%08b) = %d\n", 14, bits.OnesCount8(14)) + // Output: + // OnesCount8(00001110) = 3 +} + +func ExampleOnesCount16() { + fmt.Printf("OnesCount16(%016b) = %d\n", 14, bits.OnesCount16(14)) + // Output: + // OnesCount16(0000000000001110) = 3 +} + +func ExampleOnesCount32() { + fmt.Printf("OnesCount32(%032b) = %d\n", 14, bits.OnesCount32(14)) + // Output: + // OnesCount32(00000000000000000000000000001110) = 3 +} + +func ExampleOnesCount64() { + fmt.Printf("OnesCount64(%064b) = %d\n", 14, bits.OnesCount64(14)) + // Output: + // OnesCount64(0000000000000000000000000000000000000000000000000000000000001110) = 3 +} + +func ExampleRotateLeft8() { + fmt.Printf("%08b\n", 15) + fmt.Printf("%08b\n", bits.RotateLeft8(15, 2)) + fmt.Printf("%08b\n", bits.RotateLeft8(15, -2)) + // Output: + // 00001111 + // 00111100 + // 11000011 +} + +func ExampleRotateLeft16() { + fmt.Printf("%016b\n", 15) + fmt.Printf("%016b\n", bits.RotateLeft16(15, 2)) + fmt.Printf("%016b\n", bits.RotateLeft16(15, -2)) + // Output: + // 0000000000001111 + // 0000000000111100 + // 1100000000000011 +} + +func ExampleRotateLeft32() { + fmt.Printf("%032b\n", 15) + fmt.Printf("%032b\n", bits.RotateLeft32(15, 2)) + fmt.Printf("%032b\n", bits.RotateLeft32(15, -2)) + // Output: + // 00000000000000000000000000001111 + // 00000000000000000000000000111100 + // 11000000000000000000000000000011 +} + +func ExampleRotateLeft64() { + fmt.Printf("%064b\n", 15) + fmt.Printf("%064b\n", bits.RotateLeft64(15, 2)) + fmt.Printf("%064b\n", bits.RotateLeft64(15, -2)) + // Output: + // 0000000000000000000000000000000000000000000000000000000000001111 + // 0000000000000000000000000000000000000000000000000000000000111100 + // 1100000000000000000000000000000000000000000000000000000000000011 +} + +func ExampleReverse8() { + fmt.Printf("%08b\n", 19) + fmt.Printf("%08b\n", bits.Reverse8(19)) + // Output: + // 00010011 + // 11001000 +} + +func ExampleReverse16() { + fmt.Printf("%016b\n", 19) + fmt.Printf("%016b\n", bits.Reverse16(19)) + // Output: + // 0000000000010011 + // 1100100000000000 +} + +func ExampleReverse32() { + fmt.Printf("%032b\n", 19) + fmt.Printf("%032b\n", bits.Reverse32(19)) + // Output: + // 00000000000000000000000000010011 + // 11001000000000000000000000000000 +} + +func ExampleReverse64() { + fmt.Printf("%064b\n", 19) + fmt.Printf("%064b\n", bits.Reverse64(19)) + // Output: + // 0000000000000000000000000000000000000000000000000000000000010011 + // 1100100000000000000000000000000000000000000000000000000000000000 +} + +func ExampleReverseBytes16() { + fmt.Printf("%016b\n", 15) + fmt.Printf("%016b\n", bits.ReverseBytes16(15)) + // Output: + // 0000000000001111 + // 0000111100000000 +} + +func ExampleReverseBytes32() { + fmt.Printf("%032b\n", 15) + fmt.Printf("%032b\n", bits.ReverseBytes32(15)) + // Output: + // 00000000000000000000000000001111 + // 00001111000000000000000000000000 +} + +func ExampleReverseBytes64() { + fmt.Printf("%064b\n", 15) + fmt.Printf("%064b\n", bits.ReverseBytes64(15)) + // Output: + // 0000000000000000000000000000000000000000000000000000000000001111 + // 0000111100000000000000000000000000000000000000000000000000000000 +} + +func ExampleLen8() { + fmt.Printf("Len8(%08b) = %d\n", 8, bits.Len8(8)) + // Output: + // Len8(00001000) = 4 +} + +func ExampleLen16() { + fmt.Printf("Len16(%016b) = %d\n", 8, bits.Len16(8)) + // Output: + // Len16(0000000000001000) = 4 +} + +func ExampleLen32() { + fmt.Printf("Len32(%032b) = %d\n", 8, bits.Len32(8)) + // Output: + // Len32(00000000000000000000000000001000) = 4 +} + +func ExampleLen64() { + fmt.Printf("Len64(%064b) = %d\n", 8, bits.Len64(8)) + // Output: + // Len64(0000000000000000000000000000000000000000000000000000000000001000) = 4 +} diff --git a/src/math/bits/export_test.go b/src/math/bits/export_test.go new file mode 100644 index 0000000..8c6f933 --- /dev/null +++ b/src/math/bits/export_test.go @@ -0,0 +1,7 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package bits + +const DeBruijn64 = deBruijn64 diff --git a/src/math/bits/make_examples.go b/src/math/bits/make_examples.go new file mode 100644 index 0000000..92e9aab --- /dev/null +++ b/src/math/bits/make_examples.go @@ -0,0 +1,113 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build ignore +// +build ignore + +// This program generates example_test.go. + +package main + +import ( + "bytes" + "fmt" + "log" + "math/bits" + "os" +) + +const header = `// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Code generated by go run make_examples.go. DO NOT EDIT. + +package bits_test + +import ( + "fmt" + "math/bits" +) +` + +func main() { + w := bytes.NewBuffer([]byte(header)) + + for _, e := range []struct { + name string + in int + out [4]any + out2 [4]any + }{ + { + name: "LeadingZeros", + in: 1, + out: [4]any{bits.LeadingZeros8(1), bits.LeadingZeros16(1), bits.LeadingZeros32(1), bits.LeadingZeros64(1)}, + }, + { + name: "TrailingZeros", + in: 14, + out: [4]any{bits.TrailingZeros8(14), bits.TrailingZeros16(14), bits.TrailingZeros32(14), bits.TrailingZeros64(14)}, + }, + { + name: "OnesCount", + in: 14, + out: [4]any{bits.OnesCount8(14), bits.OnesCount16(14), bits.OnesCount32(14), bits.OnesCount64(14)}, + }, + { + name: "RotateLeft", + in: 15, + out: [4]any{bits.RotateLeft8(15, 2), bits.RotateLeft16(15, 2), bits.RotateLeft32(15, 2), bits.RotateLeft64(15, 2)}, + out2: [4]any{bits.RotateLeft8(15, -2), bits.RotateLeft16(15, -2), bits.RotateLeft32(15, -2), bits.RotateLeft64(15, -2)}, + }, + { + name: "Reverse", + in: 19, + out: [4]any{bits.Reverse8(19), bits.Reverse16(19), bits.Reverse32(19), bits.Reverse64(19)}, + }, + { + name: "ReverseBytes", + in: 15, + out: [4]any{nil, bits.ReverseBytes16(15), bits.ReverseBytes32(15), bits.ReverseBytes64(15)}, + }, + { + name: "Len", + in: 8, + out: [4]any{bits.Len8(8), bits.Len16(8), bits.Len32(8), bits.Len64(8)}, + }, + } { + for i, size := range []int{8, 16, 32, 64} { + if e.out[i] == nil { + continue // function doesn't exist + } + f := fmt.Sprintf("%s%d", e.name, size) + fmt.Fprintf(w, "\nfunc Example%s() {\n", f) + switch e.name { + case "RotateLeft", "Reverse", "ReverseBytes": + fmt.Fprintf(w, "\tfmt.Printf(\"%%0%db\\n\", %d)\n", size, e.in) + if e.name == "RotateLeft" { + fmt.Fprintf(w, "\tfmt.Printf(\"%%0%db\\n\", bits.%s(%d, 2))\n", size, f, e.in) + fmt.Fprintf(w, "\tfmt.Printf(\"%%0%db\\n\", bits.%s(%d, -2))\n", size, f, e.in) + } else { + fmt.Fprintf(w, "\tfmt.Printf(\"%%0%db\\n\", bits.%s(%d))\n", size, f, e.in) + } + fmt.Fprintf(w, "\t// Output:\n") + fmt.Fprintf(w, "\t// %0*b\n", size, e.in) + fmt.Fprintf(w, "\t// %0*b\n", size, e.out[i]) + if e.name == "RotateLeft" && e.out2[i] != nil { + fmt.Fprintf(w, "\t// %0*b\n", size, e.out2[i]) + } + default: + fmt.Fprintf(w, "\tfmt.Printf(\"%s(%%0%db) = %%d\\n\", %d, bits.%s(%d))\n", f, size, e.in, f, e.in) + fmt.Fprintf(w, "\t// Output:\n") + fmt.Fprintf(w, "\t// %s(%0*b) = %d\n", f, size, e.in, e.out[i]) + } + fmt.Fprintf(w, "}\n") + } + } + + if err := os.WriteFile("example_test.go", w.Bytes(), 0666); err != nil { + log.Fatal(err) + } +} diff --git a/src/math/bits/make_tables.go b/src/math/bits/make_tables.go new file mode 100644 index 0000000..867025e --- /dev/null +++ b/src/math/bits/make_tables.go @@ -0,0 +1,92 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build ignore +// +build ignore + +// This program generates bits_tables.go. + +package main + +import ( + "bytes" + "fmt" + "go/format" + "io" + "log" + "os" +) + +var header = []byte(`// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Code generated by go run make_tables.go. DO NOT EDIT. + +package bits + +`) + +func main() { + buf := bytes.NewBuffer(header) + + gen(buf, "ntz8tab", ntz8) + gen(buf, "pop8tab", pop8) + gen(buf, "rev8tab", rev8) + gen(buf, "len8tab", len8) + + out, err := format.Source(buf.Bytes()) + if err != nil { + log.Fatal(err) + } + + err = os.WriteFile("bits_tables.go", out, 0666) + if err != nil { + log.Fatal(err) + } +} + +func gen(w io.Writer, name string, f func(uint8) uint8) { + // Use a const string to allow the compiler to constant-evaluate lookups at constant index. + fmt.Fprintf(w, "const %s = \"\"+\n\"", name) + for i := 0; i < 256; i++ { + fmt.Fprintf(w, "\\x%02x", f(uint8(i))) + if i%16 == 15 && i != 255 { + fmt.Fprint(w, "\"+\n\"") + } + } + fmt.Fprint(w, "\"\n\n") +} + +func ntz8(x uint8) (n uint8) { + for x&1 == 0 && n < 8 { + x >>= 1 + n++ + } + return +} + +func pop8(x uint8) (n uint8) { + for x != 0 { + x &= x - 1 + n++ + } + return +} + +func rev8(x uint8) (r uint8) { + for i := 8; i > 0; i-- { + r = r<<1 | x&1 + x >>= 1 + } + return +} + +func len8(x uint8) (n uint8) { + for x != 0 { + x >>= 1 + n++ + } + return +} |