summaryrefslogtreecommitdiffstats
path: root/src/math/bits
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-28 13:14:23 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-28 13:14:23 +0000
commit73df946d56c74384511a194dd01dbe099584fd1a (patch)
treefd0bcea490dd81327ddfbb31e215439672c9a068 /src/math/bits
parentInitial commit. (diff)
downloadgolang-1.16-73df946d56c74384511a194dd01dbe099584fd1a.tar.xz
golang-1.16-73df946d56c74384511a194dd01dbe099584fd1a.zip
Adding upstream version 1.16.10.upstream/1.16.10upstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to '')
-rw-r--r--src/math/bits.go62
-rw-r--r--src/math/bits/bits.go588
-rw-r--r--src/math/bits/bits_errors.go15
-rw-r--r--src/math/bits/bits_errors_bootstrap.go22
-rw-r--r--src/math/bits/bits_tables.go83
-rw-r--r--src/math/bits/bits_test.go1347
-rw-r--r--src/math/bits/example_test.go210
-rw-r--r--src/math/bits/export_test.go7
-rw-r--r--src/math/bits/make_examples.go112
-rw-r--r--src/math/bits/make_tables.go92
10 files changed, 2538 insertions, 0 deletions
diff --git a/src/math/bits.go b/src/math/bits.go
new file mode 100644
index 0000000..77bcdbe
--- /dev/null
+++ b/src/math/bits.go
@@ -0,0 +1,62 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package math
+
+const (
+ uvnan = 0x7FF8000000000001
+ uvinf = 0x7FF0000000000000
+ uvneginf = 0xFFF0000000000000
+ uvone = 0x3FF0000000000000
+ mask = 0x7FF
+ shift = 64 - 11 - 1
+ bias = 1023
+ signMask = 1 << 63
+ fracMask = 1<<shift - 1
+)
+
+// Inf returns positive infinity if sign >= 0, negative infinity if sign < 0.
+func Inf(sign int) float64 {
+ var v uint64
+ if sign >= 0 {
+ v = uvinf
+ } else {
+ v = uvneginf
+ }
+ return Float64frombits(v)
+}
+
+// NaN returns an IEEE 754 ``not-a-number'' value.
+func NaN() float64 { return Float64frombits(uvnan) }
+
+// IsNaN reports whether f is an IEEE 754 ``not-a-number'' value.
+func IsNaN(f float64) (is bool) {
+ // IEEE 754 says that only NaNs satisfy f != f.
+ // To avoid the floating-point hardware, could use:
+ // x := Float64bits(f);
+ // return uint32(x>>shift)&mask == mask && x != uvinf && x != uvneginf
+ return f != f
+}
+
+// IsInf reports whether f is an infinity, according to sign.
+// If sign > 0, IsInf reports whether f is positive infinity.
+// If sign < 0, IsInf reports whether f is negative infinity.
+// If sign == 0, IsInf reports whether f is either infinity.
+func IsInf(f float64, sign int) bool {
+ // Test for infinity by comparing against maximum float.
+ // To avoid the floating-point hardware, could use:
+ // x := Float64bits(f);
+ // return sign >= 0 && x == uvinf || sign <= 0 && x == uvneginf;
+ return sign >= 0 && f > MaxFloat64 || sign <= 0 && f < -MaxFloat64
+}
+
+// normalize returns a normal number y and exponent exp
+// satisfying x == y × 2**exp. It assumes x is finite and non-zero.
+func normalize(x float64) (y float64, exp int) {
+ const SmallestNormal = 2.2250738585072014e-308 // 2**-1022
+ if Abs(x) < SmallestNormal {
+ return x * (1 << 52), -52
+ }
+ return x, 0
+}
diff --git a/src/math/bits/bits.go b/src/math/bits/bits.go
new file mode 100644
index 0000000..879ef2d
--- /dev/null
+++ b/src/math/bits/bits.go
@@ -0,0 +1,588 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:generate go run make_tables.go
+
+// Package bits implements bit counting and manipulation
+// functions for the predeclared unsigned integer types.
+package bits
+
+const uintSize = 32 << (^uint(0) >> 32 & 1) // 32 or 64
+
+// UintSize is the size of a uint in bits.
+const UintSize = uintSize
+
+// --- LeadingZeros ---
+
+// LeadingZeros returns the number of leading zero bits in x; the result is UintSize for x == 0.
+func LeadingZeros(x uint) int { return UintSize - Len(x) }
+
+// LeadingZeros8 returns the number of leading zero bits in x; the result is 8 for x == 0.
+func LeadingZeros8(x uint8) int { return 8 - Len8(x) }
+
+// LeadingZeros16 returns the number of leading zero bits in x; the result is 16 for x == 0.
+func LeadingZeros16(x uint16) int { return 16 - Len16(x) }
+
+// LeadingZeros32 returns the number of leading zero bits in x; the result is 32 for x == 0.
+func LeadingZeros32(x uint32) int { return 32 - Len32(x) }
+
+// LeadingZeros64 returns the number of leading zero bits in x; the result is 64 for x == 0.
+func LeadingZeros64(x uint64) int { return 64 - Len64(x) }
+
+// --- TrailingZeros ---
+
+// See http://supertech.csail.mit.edu/papers/debruijn.pdf
+const deBruijn32 = 0x077CB531
+
+var deBruijn32tab = [32]byte{
+ 0, 1, 28, 2, 29, 14, 24, 3, 30, 22, 20, 15, 25, 17, 4, 8,
+ 31, 27, 13, 23, 21, 19, 16, 7, 26, 12, 18, 6, 11, 5, 10, 9,
+}
+
+const deBruijn64 = 0x03f79d71b4ca8b09
+
+var deBruijn64tab = [64]byte{
+ 0, 1, 56, 2, 57, 49, 28, 3, 61, 58, 42, 50, 38, 29, 17, 4,
+ 62, 47, 59, 36, 45, 43, 51, 22, 53, 39, 33, 30, 24, 18, 12, 5,
+ 63, 55, 48, 27, 60, 41, 37, 16, 46, 35, 44, 21, 52, 32, 23, 11,
+ 54, 26, 40, 15, 34, 20, 31, 10, 25, 14, 19, 9, 13, 8, 7, 6,
+}
+
+// TrailingZeros returns the number of trailing zero bits in x; the result is UintSize for x == 0.
+func TrailingZeros(x uint) int {
+ if UintSize == 32 {
+ return TrailingZeros32(uint32(x))
+ }
+ return TrailingZeros64(uint64(x))
+}
+
+// TrailingZeros8 returns the number of trailing zero bits in x; the result is 8 for x == 0.
+func TrailingZeros8(x uint8) int {
+ return int(ntz8tab[x])
+}
+
+// TrailingZeros16 returns the number of trailing zero bits in x; the result is 16 for x == 0.
+func TrailingZeros16(x uint16) int {
+ if x == 0 {
+ return 16
+ }
+ // see comment in TrailingZeros64
+ return int(deBruijn32tab[uint32(x&-x)*deBruijn32>>(32-5)])
+}
+
+// TrailingZeros32 returns the number of trailing zero bits in x; the result is 32 for x == 0.
+func TrailingZeros32(x uint32) int {
+ if x == 0 {
+ return 32
+ }
+ // see comment in TrailingZeros64
+ return int(deBruijn32tab[(x&-x)*deBruijn32>>(32-5)])
+}
+
+// TrailingZeros64 returns the number of trailing zero bits in x; the result is 64 for x == 0.
+func TrailingZeros64(x uint64) int {
+ if x == 0 {
+ return 64
+ }
+ // If popcount is fast, replace code below with return popcount(^x & (x - 1)).
+ //
+ // x & -x leaves only the right-most bit set in the word. Let k be the
+ // index of that bit. Since only a single bit is set, the value is two
+ // to the power of k. Multiplying by a power of two is equivalent to
+ // left shifting, in this case by k bits. The de Bruijn (64 bit) constant
+ // is such that all six bit, consecutive substrings are distinct.
+ // Therefore, if we have a left shifted version of this constant we can
+ // find by how many bits it was shifted by looking at which six bit
+ // substring ended up at the top of the word.
+ // (Knuth, volume 4, section 7.3.1)
+ return int(deBruijn64tab[(x&-x)*deBruijn64>>(64-6)])
+}
+
+// --- OnesCount ---
+
+const m0 = 0x5555555555555555 // 01010101 ...
+const m1 = 0x3333333333333333 // 00110011 ...
+const m2 = 0x0f0f0f0f0f0f0f0f // 00001111 ...
+const m3 = 0x00ff00ff00ff00ff // etc.
+const m4 = 0x0000ffff0000ffff
+
+// OnesCount returns the number of one bits ("population count") in x.
+func OnesCount(x uint) int {
+ if UintSize == 32 {
+ return OnesCount32(uint32(x))
+ }
+ return OnesCount64(uint64(x))
+}
+
+// OnesCount8 returns the number of one bits ("population count") in x.
+func OnesCount8(x uint8) int {
+ return int(pop8tab[x])
+}
+
+// OnesCount16 returns the number of one bits ("population count") in x.
+func OnesCount16(x uint16) int {
+ return int(pop8tab[x>>8] + pop8tab[x&0xff])
+}
+
+// OnesCount32 returns the number of one bits ("population count") in x.
+func OnesCount32(x uint32) int {
+ return int(pop8tab[x>>24] + pop8tab[x>>16&0xff] + pop8tab[x>>8&0xff] + pop8tab[x&0xff])
+}
+
+// OnesCount64 returns the number of one bits ("population count") in x.
+func OnesCount64(x uint64) int {
+ // Implementation: Parallel summing of adjacent bits.
+ // See "Hacker's Delight", Chap. 5: Counting Bits.
+ // The following pattern shows the general approach:
+ //
+ // x = x>>1&(m0&m) + x&(m0&m)
+ // x = x>>2&(m1&m) + x&(m1&m)
+ // x = x>>4&(m2&m) + x&(m2&m)
+ // x = x>>8&(m3&m) + x&(m3&m)
+ // x = x>>16&(m4&m) + x&(m4&m)
+ // x = x>>32&(m5&m) + x&(m5&m)
+ // return int(x)
+ //
+ // Masking (& operations) can be left away when there's no
+ // danger that a field's sum will carry over into the next
+ // field: Since the result cannot be > 64, 8 bits is enough
+ // and we can ignore the masks for the shifts by 8 and up.
+ // Per "Hacker's Delight", the first line can be simplified
+ // more, but it saves at best one instruction, so we leave
+ // it alone for clarity.
+ const m = 1<<64 - 1
+ x = x>>1&(m0&m) + x&(m0&m)
+ x = x>>2&(m1&m) + x&(m1&m)
+ x = (x>>4 + x) & (m2 & m)
+ x += x >> 8
+ x += x >> 16
+ x += x >> 32
+ return int(x) & (1<<7 - 1)
+}
+
+// --- RotateLeft ---
+
+// RotateLeft returns the value of x rotated left by (k mod UintSize) bits.
+// To rotate x right by k bits, call RotateLeft(x, -k).
+//
+// This function's execution time does not depend on the inputs.
+func RotateLeft(x uint, k int) uint {
+ if UintSize == 32 {
+ return uint(RotateLeft32(uint32(x), k))
+ }
+ return uint(RotateLeft64(uint64(x), k))
+}
+
+// RotateLeft8 returns the value of x rotated left by (k mod 8) bits.
+// To rotate x right by k bits, call RotateLeft8(x, -k).
+//
+// This function's execution time does not depend on the inputs.
+func RotateLeft8(x uint8, k int) uint8 {
+ const n = 8
+ s := uint(k) & (n - 1)
+ return x<<s | x>>(n-s)
+}
+
+// RotateLeft16 returns the value of x rotated left by (k mod 16) bits.
+// To rotate x right by k bits, call RotateLeft16(x, -k).
+//
+// This function's execution time does not depend on the inputs.
+func RotateLeft16(x uint16, k int) uint16 {
+ const n = 16
+ s := uint(k) & (n - 1)
+ return x<<s | x>>(n-s)
+}
+
+// RotateLeft32 returns the value of x rotated left by (k mod 32) bits.
+// To rotate x right by k bits, call RotateLeft32(x, -k).
+//
+// This function's execution time does not depend on the inputs.
+func RotateLeft32(x uint32, k int) uint32 {
+ const n = 32
+ s := uint(k) & (n - 1)
+ return x<<s | x>>(n-s)
+}
+
+// RotateLeft64 returns the value of x rotated left by (k mod 64) bits.
+// To rotate x right by k bits, call RotateLeft64(x, -k).
+//
+// This function's execution time does not depend on the inputs.
+func RotateLeft64(x uint64, k int) uint64 {
+ const n = 64
+ s := uint(k) & (n - 1)
+ return x<<s | x>>(n-s)
+}
+
+// --- Reverse ---
+
+// Reverse returns the value of x with its bits in reversed order.
+func Reverse(x uint) uint {
+ if UintSize == 32 {
+ return uint(Reverse32(uint32(x)))
+ }
+ return uint(Reverse64(uint64(x)))
+}
+
+// Reverse8 returns the value of x with its bits in reversed order.
+func Reverse8(x uint8) uint8 {
+ return rev8tab[x]
+}
+
+// Reverse16 returns the value of x with its bits in reversed order.
+func Reverse16(x uint16) uint16 {
+ return uint16(rev8tab[x>>8]) | uint16(rev8tab[x&0xff])<<8
+}
+
+// Reverse32 returns the value of x with its bits in reversed order.
+func Reverse32(x uint32) uint32 {
+ const m = 1<<32 - 1
+ x = x>>1&(m0&m) | x&(m0&m)<<1
+ x = x>>2&(m1&m) | x&(m1&m)<<2
+ x = x>>4&(m2&m) | x&(m2&m)<<4
+ return ReverseBytes32(x)
+}
+
+// Reverse64 returns the value of x with its bits in reversed order.
+func Reverse64(x uint64) uint64 {
+ const m = 1<<64 - 1
+ x = x>>1&(m0&m) | x&(m0&m)<<1
+ x = x>>2&(m1&m) | x&(m1&m)<<2
+ x = x>>4&(m2&m) | x&(m2&m)<<4
+ return ReverseBytes64(x)
+}
+
+// --- ReverseBytes ---
+
+// ReverseBytes returns the value of x with its bytes in reversed order.
+//
+// This function's execution time does not depend on the inputs.
+func ReverseBytes(x uint) uint {
+ if UintSize == 32 {
+ return uint(ReverseBytes32(uint32(x)))
+ }
+ return uint(ReverseBytes64(uint64(x)))
+}
+
+// ReverseBytes16 returns the value of x with its bytes in reversed order.
+//
+// This function's execution time does not depend on the inputs.
+func ReverseBytes16(x uint16) uint16 {
+ return x>>8 | x<<8
+}
+
+// ReverseBytes32 returns the value of x with its bytes in reversed order.
+//
+// This function's execution time does not depend on the inputs.
+func ReverseBytes32(x uint32) uint32 {
+ const m = 1<<32 - 1
+ x = x>>8&(m3&m) | x&(m3&m)<<8
+ return x>>16 | x<<16
+}
+
+// ReverseBytes64 returns the value of x with its bytes in reversed order.
+//
+// This function's execution time does not depend on the inputs.
+func ReverseBytes64(x uint64) uint64 {
+ const m = 1<<64 - 1
+ x = x>>8&(m3&m) | x&(m3&m)<<8
+ x = x>>16&(m4&m) | x&(m4&m)<<16
+ return x>>32 | x<<32
+}
+
+// --- Len ---
+
+// Len returns the minimum number of bits required to represent x; the result is 0 for x == 0.
+func Len(x uint) int {
+ if UintSize == 32 {
+ return Len32(uint32(x))
+ }
+ return Len64(uint64(x))
+}
+
+// Len8 returns the minimum number of bits required to represent x; the result is 0 for x == 0.
+func Len8(x uint8) int {
+ return int(len8tab[x])
+}
+
+// Len16 returns the minimum number of bits required to represent x; the result is 0 for x == 0.
+func Len16(x uint16) (n int) {
+ if x >= 1<<8 {
+ x >>= 8
+ n = 8
+ }
+ return n + int(len8tab[x])
+}
+
+// Len32 returns the minimum number of bits required to represent x; the result is 0 for x == 0.
+func Len32(x uint32) (n int) {
+ if x >= 1<<16 {
+ x >>= 16
+ n = 16
+ }
+ if x >= 1<<8 {
+ x >>= 8
+ n += 8
+ }
+ return n + int(len8tab[x])
+}
+
+// Len64 returns the minimum number of bits required to represent x; the result is 0 for x == 0.
+func Len64(x uint64) (n int) {
+ if x >= 1<<32 {
+ x >>= 32
+ n = 32
+ }
+ if x >= 1<<16 {
+ x >>= 16
+ n += 16
+ }
+ if x >= 1<<8 {
+ x >>= 8
+ n += 8
+ }
+ return n + int(len8tab[x])
+}
+
+// --- Add with carry ---
+
+// Add returns the sum with carry of x, y and carry: sum = x + y + carry.
+// The carry input must be 0 or 1; otherwise the behavior is undefined.
+// The carryOut output is guaranteed to be 0 or 1.
+//
+// This function's execution time does not depend on the inputs.
+func Add(x, y, carry uint) (sum, carryOut uint) {
+ if UintSize == 32 {
+ s32, c32 := Add32(uint32(x), uint32(y), uint32(carry))
+ return uint(s32), uint(c32)
+ }
+ s64, c64 := Add64(uint64(x), uint64(y), uint64(carry))
+ return uint(s64), uint(c64)
+}
+
+// Add32 returns the sum with carry of x, y and carry: sum = x + y + carry.
+// The carry input must be 0 or 1; otherwise the behavior is undefined.
+// The carryOut output is guaranteed to be 0 or 1.
+//
+// This function's execution time does not depend on the inputs.
+func Add32(x, y, carry uint32) (sum, carryOut uint32) {
+ sum64 := uint64(x) + uint64(y) + uint64(carry)
+ sum = uint32(sum64)
+ carryOut = uint32(sum64 >> 32)
+ return
+}
+
+// Add64 returns the sum with carry of x, y and carry: sum = x + y + carry.
+// The carry input must be 0 or 1; otherwise the behavior is undefined.
+// The carryOut output is guaranteed to be 0 or 1.
+//
+// This function's execution time does not depend on the inputs.
+func Add64(x, y, carry uint64) (sum, carryOut uint64) {
+ sum = x + y + carry
+ // The sum will overflow if both top bits are set (x & y) or if one of them
+ // is (x | y), and a carry from the lower place happened. If such a carry
+ // happens, the top bit will be 1 + 0 + 1 = 0 (&^ sum).
+ carryOut = ((x & y) | ((x | y) &^ sum)) >> 63
+ return
+}
+
+// --- Subtract with borrow ---
+
+// Sub returns the difference of x, y and borrow: diff = x - y - borrow.
+// The borrow input must be 0 or 1; otherwise the behavior is undefined.
+// The borrowOut output is guaranteed to be 0 or 1.
+//
+// This function's execution time does not depend on the inputs.
+func Sub(x, y, borrow uint) (diff, borrowOut uint) {
+ if UintSize == 32 {
+ d32, b32 := Sub32(uint32(x), uint32(y), uint32(borrow))
+ return uint(d32), uint(b32)
+ }
+ d64, b64 := Sub64(uint64(x), uint64(y), uint64(borrow))
+ return uint(d64), uint(b64)
+}
+
+// Sub32 returns the difference of x, y and borrow, diff = x - y - borrow.
+// The borrow input must be 0 or 1; otherwise the behavior is undefined.
+// The borrowOut output is guaranteed to be 0 or 1.
+//
+// This function's execution time does not depend on the inputs.
+func Sub32(x, y, borrow uint32) (diff, borrowOut uint32) {
+ diff = x - y - borrow
+ // The difference will underflow if the top bit of x is not set and the top
+ // bit of y is set (^x & y) or if they are the same (^(x ^ y)) and a borrow
+ // from the lower place happens. If that borrow happens, the result will be
+ // 1 - 1 - 1 = 0 - 0 - 1 = 1 (& diff).
+ borrowOut = ((^x & y) | (^(x ^ y) & diff)) >> 31
+ return
+}
+
+// Sub64 returns the difference of x, y and borrow: diff = x - y - borrow.
+// The borrow input must be 0 or 1; otherwise the behavior is undefined.
+// The borrowOut output is guaranteed to be 0 or 1.
+//
+// This function's execution time does not depend on the inputs.
+func Sub64(x, y, borrow uint64) (diff, borrowOut uint64) {
+ diff = x - y - borrow
+ // See Sub32 for the bit logic.
+ borrowOut = ((^x & y) | (^(x ^ y) & diff)) >> 63
+ return
+}
+
+// --- Full-width multiply ---
+
+// Mul returns the full-width product of x and y: (hi, lo) = x * y
+// with the product bits' upper half returned in hi and the lower
+// half returned in lo.
+//
+// This function's execution time does not depend on the inputs.
+func Mul(x, y uint) (hi, lo uint) {
+ if UintSize == 32 {
+ h, l := Mul32(uint32(x), uint32(y))
+ return uint(h), uint(l)
+ }
+ h, l := Mul64(uint64(x), uint64(y))
+ return uint(h), uint(l)
+}
+
+// Mul32 returns the 64-bit product of x and y: (hi, lo) = x * y
+// with the product bits' upper half returned in hi and the lower
+// half returned in lo.
+//
+// This function's execution time does not depend on the inputs.
+func Mul32(x, y uint32) (hi, lo uint32) {
+ tmp := uint64(x) * uint64(y)
+ hi, lo = uint32(tmp>>32), uint32(tmp)
+ return
+}
+
+// Mul64 returns the 128-bit product of x and y: (hi, lo) = x * y
+// with the product bits' upper half returned in hi and the lower
+// half returned in lo.
+//
+// This function's execution time does not depend on the inputs.
+func Mul64(x, y uint64) (hi, lo uint64) {
+ const mask32 = 1<<32 - 1
+ x0 := x & mask32
+ x1 := x >> 32
+ y0 := y & mask32
+ y1 := y >> 32
+ w0 := x0 * y0
+ t := x1*y0 + w0>>32
+ w1 := t & mask32
+ w2 := t >> 32
+ w1 += x0 * y1
+ hi = x1*y1 + w2 + w1>>32
+ lo = x * y
+ return
+}
+
+// --- Full-width divide ---
+
+// Div returns the quotient and remainder of (hi, lo) divided by y:
+// quo = (hi, lo)/y, rem = (hi, lo)%y with the dividend bits' upper
+// half in parameter hi and the lower half in parameter lo.
+// Div panics for y == 0 (division by zero) or y <= hi (quotient overflow).
+func Div(hi, lo, y uint) (quo, rem uint) {
+ if UintSize == 32 {
+ q, r := Div32(uint32(hi), uint32(lo), uint32(y))
+ return uint(q), uint(r)
+ }
+ q, r := Div64(uint64(hi), uint64(lo), uint64(y))
+ return uint(q), uint(r)
+}
+
+// Div32 returns the quotient and remainder of (hi, lo) divided by y:
+// quo = (hi, lo)/y, rem = (hi, lo)%y with the dividend bits' upper
+// half in parameter hi and the lower half in parameter lo.
+// Div32 panics for y == 0 (division by zero) or y <= hi (quotient overflow).
+func Div32(hi, lo, y uint32) (quo, rem uint32) {
+ if y != 0 && y <= hi {
+ panic(overflowError)
+ }
+ z := uint64(hi)<<32 | uint64(lo)
+ quo, rem = uint32(z/uint64(y)), uint32(z%uint64(y))
+ return
+}
+
+// Div64 returns the quotient and remainder of (hi, lo) divided by y:
+// quo = (hi, lo)/y, rem = (hi, lo)%y with the dividend bits' upper
+// half in parameter hi and the lower half in parameter lo.
+// Div64 panics for y == 0 (division by zero) or y <= hi (quotient overflow).
+func Div64(hi, lo, y uint64) (quo, rem uint64) {
+ const (
+ two32 = 1 << 32
+ mask32 = two32 - 1
+ )
+ if y == 0 {
+ panic(divideError)
+ }
+ if y <= hi {
+ panic(overflowError)
+ }
+
+ s := uint(LeadingZeros64(y))
+ y <<= s
+
+ yn1 := y >> 32
+ yn0 := y & mask32
+ un32 := hi<<s | lo>>(64-s)
+ un10 := lo << s
+ un1 := un10 >> 32
+ un0 := un10 & mask32
+ q1 := un32 / yn1
+ rhat := un32 - q1*yn1
+
+ for q1 >= two32 || q1*yn0 > two32*rhat+un1 {
+ q1--
+ rhat += yn1
+ if rhat >= two32 {
+ break
+ }
+ }
+
+ un21 := un32*two32 + un1 - q1*y
+ q0 := un21 / yn1
+ rhat = un21 - q0*yn1
+
+ for q0 >= two32 || q0*yn0 > two32*rhat+un0 {
+ q0--
+ rhat += yn1
+ if rhat >= two32 {
+ break
+ }
+ }
+
+ return q1*two32 + q0, (un21*two32 + un0 - q0*y) >> s
+}
+
+// Rem returns the remainder of (hi, lo) divided by y. Rem panics for
+// y == 0 (division by zero) but, unlike Div, it doesn't panic on a
+// quotient overflow.
+func Rem(hi, lo, y uint) uint {
+ if UintSize == 32 {
+ return uint(Rem32(uint32(hi), uint32(lo), uint32(y)))
+ }
+ return uint(Rem64(uint64(hi), uint64(lo), uint64(y)))
+}
+
+// Rem32 returns the remainder of (hi, lo) divided by y. Rem32 panics
+// for y == 0 (division by zero) but, unlike Div32, it doesn't panic
+// on a quotient overflow.
+func Rem32(hi, lo, y uint32) uint32 {
+ return uint32((uint64(hi)<<32 | uint64(lo)) % uint64(y))
+}
+
+// Rem64 returns the remainder of (hi, lo) divided by y. Rem64 panics
+// for y == 0 (division by zero) but, unlike Div64, it doesn't panic
+// on a quotient overflow.
+func Rem64(hi, lo, y uint64) uint64 {
+ // We scale down hi so that hi < y, then use Div64 to compute the
+ // rem with the guarantee that it won't panic on quotient overflow.
+ // Given that
+ // hi ≡ hi%y (mod y)
+ // we have
+ // hi<<64 + lo ≡ (hi%y)<<64 + lo (mod y)
+ _, rem := Div64(hi%y, lo, y)
+ return rem
+}
diff --git a/src/math/bits/bits_errors.go b/src/math/bits/bits_errors.go
new file mode 100644
index 0000000..192b4be
--- /dev/null
+++ b/src/math/bits/bits_errors.go
@@ -0,0 +1,15 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !compiler_bootstrap
+
+package bits
+
+import _ "unsafe"
+
+//go:linkname overflowError runtime.overflowError
+var overflowError error
+
+//go:linkname divideError runtime.divideError
+var divideError error
diff --git a/src/math/bits/bits_errors_bootstrap.go b/src/math/bits/bits_errors_bootstrap.go
new file mode 100644
index 0000000..5df5738
--- /dev/null
+++ b/src/math/bits/bits_errors_bootstrap.go
@@ -0,0 +1,22 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build compiler_bootstrap
+
+// This version used only for bootstrap (on this path we want
+// to avoid use of go:linkname as applied to variables).
+
+package bits
+
+type errorString string
+
+func (e errorString) RuntimeError() {}
+
+func (e errorString) Error() string {
+ return "runtime error: " + string(e)
+}
+
+var overflowError = error(errorString("integer overflow"))
+
+var divideError = error(errorString("integer divide by zero"))
diff --git a/src/math/bits/bits_tables.go b/src/math/bits/bits_tables.go
new file mode 100644
index 0000000..f1e15a0
--- /dev/null
+++ b/src/math/bits/bits_tables.go
@@ -0,0 +1,83 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Code generated by go run make_tables.go. DO NOT EDIT.
+
+package bits
+
+var ntz8tab = [256]uint8{
+ 0x08, 0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x00, 0x03, 0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x00,
+ 0x04, 0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x00, 0x03, 0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x00,
+ 0x05, 0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x00, 0x03, 0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x00,
+ 0x04, 0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x00, 0x03, 0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x00,
+ 0x06, 0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x00, 0x03, 0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x00,
+ 0x04, 0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x00, 0x03, 0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x00,
+ 0x05, 0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x00, 0x03, 0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x00,
+ 0x04, 0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x00, 0x03, 0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x00,
+ 0x07, 0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x00, 0x03, 0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x00,
+ 0x04, 0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x00, 0x03, 0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x00,
+ 0x05, 0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x00, 0x03, 0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x00,
+ 0x04, 0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x00, 0x03, 0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x00,
+ 0x06, 0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x00, 0x03, 0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x00,
+ 0x04, 0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x00, 0x03, 0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x00,
+ 0x05, 0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x00, 0x03, 0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x00,
+ 0x04, 0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x00, 0x03, 0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x00,
+}
+
+var pop8tab = [256]uint8{
+ 0x00, 0x01, 0x01, 0x02, 0x01, 0x02, 0x02, 0x03, 0x01, 0x02, 0x02, 0x03, 0x02, 0x03, 0x03, 0x04,
+ 0x01, 0x02, 0x02, 0x03, 0x02, 0x03, 0x03, 0x04, 0x02, 0x03, 0x03, 0x04, 0x03, 0x04, 0x04, 0x05,
+ 0x01, 0x02, 0x02, 0x03, 0x02, 0x03, 0x03, 0x04, 0x02, 0x03, 0x03, 0x04, 0x03, 0x04, 0x04, 0x05,
+ 0x02, 0x03, 0x03, 0x04, 0x03, 0x04, 0x04, 0x05, 0x03, 0x04, 0x04, 0x05, 0x04, 0x05, 0x05, 0x06,
+ 0x01, 0x02, 0x02, 0x03, 0x02, 0x03, 0x03, 0x04, 0x02, 0x03, 0x03, 0x04, 0x03, 0x04, 0x04, 0x05,
+ 0x02, 0x03, 0x03, 0x04, 0x03, 0x04, 0x04, 0x05, 0x03, 0x04, 0x04, 0x05, 0x04, 0x05, 0x05, 0x06,
+ 0x02, 0x03, 0x03, 0x04, 0x03, 0x04, 0x04, 0x05, 0x03, 0x04, 0x04, 0x05, 0x04, 0x05, 0x05, 0x06,
+ 0x03, 0x04, 0x04, 0x05, 0x04, 0x05, 0x05, 0x06, 0x04, 0x05, 0x05, 0x06, 0x05, 0x06, 0x06, 0x07,
+ 0x01, 0x02, 0x02, 0x03, 0x02, 0x03, 0x03, 0x04, 0x02, 0x03, 0x03, 0x04, 0x03, 0x04, 0x04, 0x05,
+ 0x02, 0x03, 0x03, 0x04, 0x03, 0x04, 0x04, 0x05, 0x03, 0x04, 0x04, 0x05, 0x04, 0x05, 0x05, 0x06,
+ 0x02, 0x03, 0x03, 0x04, 0x03, 0x04, 0x04, 0x05, 0x03, 0x04, 0x04, 0x05, 0x04, 0x05, 0x05, 0x06,
+ 0x03, 0x04, 0x04, 0x05, 0x04, 0x05, 0x05, 0x06, 0x04, 0x05, 0x05, 0x06, 0x05, 0x06, 0x06, 0x07,
+ 0x02, 0x03, 0x03, 0x04, 0x03, 0x04, 0x04, 0x05, 0x03, 0x04, 0x04, 0x05, 0x04, 0x05, 0x05, 0x06,
+ 0x03, 0x04, 0x04, 0x05, 0x04, 0x05, 0x05, 0x06, 0x04, 0x05, 0x05, 0x06, 0x05, 0x06, 0x06, 0x07,
+ 0x03, 0x04, 0x04, 0x05, 0x04, 0x05, 0x05, 0x06, 0x04, 0x05, 0x05, 0x06, 0x05, 0x06, 0x06, 0x07,
+ 0x04, 0x05, 0x05, 0x06, 0x05, 0x06, 0x06, 0x07, 0x05, 0x06, 0x06, 0x07, 0x06, 0x07, 0x07, 0x08,
+}
+
+var rev8tab = [256]uint8{
+ 0x00, 0x80, 0x40, 0xc0, 0x20, 0xa0, 0x60, 0xe0, 0x10, 0x90, 0x50, 0xd0, 0x30, 0xb0, 0x70, 0xf0,
+ 0x08, 0x88, 0x48, 0xc8, 0x28, 0xa8, 0x68, 0xe8, 0x18, 0x98, 0x58, 0xd8, 0x38, 0xb8, 0x78, 0xf8,
+ 0x04, 0x84, 0x44, 0xc4, 0x24, 0xa4, 0x64, 0xe4, 0x14, 0x94, 0x54, 0xd4, 0x34, 0xb4, 0x74, 0xf4,
+ 0x0c, 0x8c, 0x4c, 0xcc, 0x2c, 0xac, 0x6c, 0xec, 0x1c, 0x9c, 0x5c, 0xdc, 0x3c, 0xbc, 0x7c, 0xfc,
+ 0x02, 0x82, 0x42, 0xc2, 0x22, 0xa2, 0x62, 0xe2, 0x12, 0x92, 0x52, 0xd2, 0x32, 0xb2, 0x72, 0xf2,
+ 0x0a, 0x8a, 0x4a, 0xca, 0x2a, 0xaa, 0x6a, 0xea, 0x1a, 0x9a, 0x5a, 0xda, 0x3a, 0xba, 0x7a, 0xfa,
+ 0x06, 0x86, 0x46, 0xc6, 0x26, 0xa6, 0x66, 0xe6, 0x16, 0x96, 0x56, 0xd6, 0x36, 0xb6, 0x76, 0xf6,
+ 0x0e, 0x8e, 0x4e, 0xce, 0x2e, 0xae, 0x6e, 0xee, 0x1e, 0x9e, 0x5e, 0xde, 0x3e, 0xbe, 0x7e, 0xfe,
+ 0x01, 0x81, 0x41, 0xc1, 0x21, 0xa1, 0x61, 0xe1, 0x11, 0x91, 0x51, 0xd1, 0x31, 0xb1, 0x71, 0xf1,
+ 0x09, 0x89, 0x49, 0xc9, 0x29, 0xa9, 0x69, 0xe9, 0x19, 0x99, 0x59, 0xd9, 0x39, 0xb9, 0x79, 0xf9,
+ 0x05, 0x85, 0x45, 0xc5, 0x25, 0xa5, 0x65, 0xe5, 0x15, 0x95, 0x55, 0xd5, 0x35, 0xb5, 0x75, 0xf5,
+ 0x0d, 0x8d, 0x4d, 0xcd, 0x2d, 0xad, 0x6d, 0xed, 0x1d, 0x9d, 0x5d, 0xdd, 0x3d, 0xbd, 0x7d, 0xfd,
+ 0x03, 0x83, 0x43, 0xc3, 0x23, 0xa3, 0x63, 0xe3, 0x13, 0x93, 0x53, 0xd3, 0x33, 0xb3, 0x73, 0xf3,
+ 0x0b, 0x8b, 0x4b, 0xcb, 0x2b, 0xab, 0x6b, 0xeb, 0x1b, 0x9b, 0x5b, 0xdb, 0x3b, 0xbb, 0x7b, 0xfb,
+ 0x07, 0x87, 0x47, 0xc7, 0x27, 0xa7, 0x67, 0xe7, 0x17, 0x97, 0x57, 0xd7, 0x37, 0xb7, 0x77, 0xf7,
+ 0x0f, 0x8f, 0x4f, 0xcf, 0x2f, 0xaf, 0x6f, 0xef, 0x1f, 0x9f, 0x5f, 0xdf, 0x3f, 0xbf, 0x7f, 0xff,
+}
+
+var len8tab = [256]uint8{
+ 0x00, 0x01, 0x02, 0x02, 0x03, 0x03, 0x03, 0x03, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04,
+ 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05,
+ 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06,
+ 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06,
+ 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07,
+ 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07,
+ 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07,
+ 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07,
+ 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08,
+ 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08,
+ 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08,
+ 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08,
+ 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08,
+ 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08,
+ 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08,
+ 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08,
+}
diff --git a/src/math/bits/bits_test.go b/src/math/bits/bits_test.go
new file mode 100644
index 0000000..23b4539
--- /dev/null
+++ b/src/math/bits/bits_test.go
@@ -0,0 +1,1347 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package bits_test
+
+import (
+ . "math/bits"
+ "runtime"
+ "testing"
+ "unsafe"
+)
+
+func TestUintSize(t *testing.T) {
+ var x uint
+ if want := unsafe.Sizeof(x) * 8; UintSize != want {
+ t.Fatalf("UintSize = %d; want %d", UintSize, want)
+ }
+}
+
+func TestLeadingZeros(t *testing.T) {
+ for i := 0; i < 256; i++ {
+ nlz := tab[i].nlz
+ for k := 0; k < 64-8; k++ {
+ x := uint64(i) << uint(k)
+ if x <= 1<<8-1 {
+ got := LeadingZeros8(uint8(x))
+ want := nlz - k + (8 - 8)
+ if x == 0 {
+ want = 8
+ }
+ if got != want {
+ t.Fatalf("LeadingZeros8(%#02x) == %d; want %d", x, got, want)
+ }
+ }
+
+ if x <= 1<<16-1 {
+ got := LeadingZeros16(uint16(x))
+ want := nlz - k + (16 - 8)
+ if x == 0 {
+ want = 16
+ }
+ if got != want {
+ t.Fatalf("LeadingZeros16(%#04x) == %d; want %d", x, got, want)
+ }
+ }
+
+ if x <= 1<<32-1 {
+ got := LeadingZeros32(uint32(x))
+ want := nlz - k + (32 - 8)
+ if x == 0 {
+ want = 32
+ }
+ if got != want {
+ t.Fatalf("LeadingZeros32(%#08x) == %d; want %d", x, got, want)
+ }
+ if UintSize == 32 {
+ got = LeadingZeros(uint(x))
+ if got != want {
+ t.Fatalf("LeadingZeros(%#08x) == %d; want %d", x, got, want)
+ }
+ }
+ }
+
+ if x <= 1<<64-1 {
+ got := LeadingZeros64(uint64(x))
+ want := nlz - k + (64 - 8)
+ if x == 0 {
+ want = 64
+ }
+ if got != want {
+ t.Fatalf("LeadingZeros64(%#016x) == %d; want %d", x, got, want)
+ }
+ if UintSize == 64 {
+ got = LeadingZeros(uint(x))
+ if got != want {
+ t.Fatalf("LeadingZeros(%#016x) == %d; want %d", x, got, want)
+ }
+ }
+ }
+ }
+ }
+}
+
+// Exported (global) variable serving as input for some
+// of the benchmarks to ensure side-effect free calls
+// are not optimized away.
+var Input uint64 = DeBruijn64
+
+// Exported (global) variable to store function results
+// during benchmarking to ensure side-effect free calls
+// are not optimized away.
+var Output int
+
+func BenchmarkLeadingZeros(b *testing.B) {
+ var s int
+ for i := 0; i < b.N; i++ {
+ s += LeadingZeros(uint(Input) >> (uint(i) % UintSize))
+ }
+ Output = s
+}
+
+func BenchmarkLeadingZeros8(b *testing.B) {
+ var s int
+ for i := 0; i < b.N; i++ {
+ s += LeadingZeros8(uint8(Input) >> (uint(i) % 8))
+ }
+ Output = s
+}
+
+func BenchmarkLeadingZeros16(b *testing.B) {
+ var s int
+ for i := 0; i < b.N; i++ {
+ s += LeadingZeros16(uint16(Input) >> (uint(i) % 16))
+ }
+ Output = s
+}
+
+func BenchmarkLeadingZeros32(b *testing.B) {
+ var s int
+ for i := 0; i < b.N; i++ {
+ s += LeadingZeros32(uint32(Input) >> (uint(i) % 32))
+ }
+ Output = s
+}
+
+func BenchmarkLeadingZeros64(b *testing.B) {
+ var s int
+ for i := 0; i < b.N; i++ {
+ s += LeadingZeros64(uint64(Input) >> (uint(i) % 64))
+ }
+ Output = s
+}
+
+func TestTrailingZeros(t *testing.T) {
+ for i := 0; i < 256; i++ {
+ ntz := tab[i].ntz
+ for k := 0; k < 64-8; k++ {
+ x := uint64(i) << uint(k)
+ want := ntz + k
+ if x <= 1<<8-1 {
+ got := TrailingZeros8(uint8(x))
+ if x == 0 {
+ want = 8
+ }
+ if got != want {
+ t.Fatalf("TrailingZeros8(%#02x) == %d; want %d", x, got, want)
+ }
+ }
+
+ if x <= 1<<16-1 {
+ got := TrailingZeros16(uint16(x))
+ if x == 0 {
+ want = 16
+ }
+ if got != want {
+ t.Fatalf("TrailingZeros16(%#04x) == %d; want %d", x, got, want)
+ }
+ }
+
+ if x <= 1<<32-1 {
+ got := TrailingZeros32(uint32(x))
+ if x == 0 {
+ want = 32
+ }
+ if got != want {
+ t.Fatalf("TrailingZeros32(%#08x) == %d; want %d", x, got, want)
+ }
+ if UintSize == 32 {
+ got = TrailingZeros(uint(x))
+ if got != want {
+ t.Fatalf("TrailingZeros(%#08x) == %d; want %d", x, got, want)
+ }
+ }
+ }
+
+ if x <= 1<<64-1 {
+ got := TrailingZeros64(uint64(x))
+ if x == 0 {
+ want = 64
+ }
+ if got != want {
+ t.Fatalf("TrailingZeros64(%#016x) == %d; want %d", x, got, want)
+ }
+ if UintSize == 64 {
+ got = TrailingZeros(uint(x))
+ if got != want {
+ t.Fatalf("TrailingZeros(%#016x) == %d; want %d", x, got, want)
+ }
+ }
+ }
+ }
+ }
+}
+
+func BenchmarkTrailingZeros(b *testing.B) {
+ var s int
+ for i := 0; i < b.N; i++ {
+ s += TrailingZeros(uint(Input) << (uint(i) % UintSize))
+ }
+ Output = s
+}
+
+func BenchmarkTrailingZeros8(b *testing.B) {
+ var s int
+ for i := 0; i < b.N; i++ {
+ s += TrailingZeros8(uint8(Input) << (uint(i) % 8))
+ }
+ Output = s
+}
+
+func BenchmarkTrailingZeros16(b *testing.B) {
+ var s int
+ for i := 0; i < b.N; i++ {
+ s += TrailingZeros16(uint16(Input) << (uint(i) % 16))
+ }
+ Output = s
+}
+
+func BenchmarkTrailingZeros32(b *testing.B) {
+ var s int
+ for i := 0; i < b.N; i++ {
+ s += TrailingZeros32(uint32(Input) << (uint(i) % 32))
+ }
+ Output = s
+}
+
+func BenchmarkTrailingZeros64(b *testing.B) {
+ var s int
+ for i := 0; i < b.N; i++ {
+ s += TrailingZeros64(uint64(Input) << (uint(i) % 64))
+ }
+ Output = s
+}
+
+func TestOnesCount(t *testing.T) {
+ var x uint64
+ for i := 0; i <= 64; i++ {
+ testOnesCount(t, x, i)
+ x = x<<1 | 1
+ }
+
+ for i := 64; i >= 0; i-- {
+ testOnesCount(t, x, i)
+ x = x << 1
+ }
+
+ for i := 0; i < 256; i++ {
+ for k := 0; k < 64-8; k++ {
+ testOnesCount(t, uint64(i)<<uint(k), tab[i].pop)
+ }
+ }
+}
+
+func testOnesCount(t *testing.T, x uint64, want int) {
+ if x <= 1<<8-1 {
+ got := OnesCount8(uint8(x))
+ if got != want {
+ t.Fatalf("OnesCount8(%#02x) == %d; want %d", uint8(x), got, want)
+ }
+ }
+
+ if x <= 1<<16-1 {
+ got := OnesCount16(uint16(x))
+ if got != want {
+ t.Fatalf("OnesCount16(%#04x) == %d; want %d", uint16(x), got, want)
+ }
+ }
+
+ if x <= 1<<32-1 {
+ got := OnesCount32(uint32(x))
+ if got != want {
+ t.Fatalf("OnesCount32(%#08x) == %d; want %d", uint32(x), got, want)
+ }
+ if UintSize == 32 {
+ got = OnesCount(uint(x))
+ if got != want {
+ t.Fatalf("OnesCount(%#08x) == %d; want %d", uint32(x), got, want)
+ }
+ }
+ }
+
+ if x <= 1<<64-1 {
+ got := OnesCount64(uint64(x))
+ if got != want {
+ t.Fatalf("OnesCount64(%#016x) == %d; want %d", x, got, want)
+ }
+ if UintSize == 64 {
+ got = OnesCount(uint(x))
+ if got != want {
+ t.Fatalf("OnesCount(%#016x) == %d; want %d", x, got, want)
+ }
+ }
+ }
+}
+
+func BenchmarkOnesCount(b *testing.B) {
+ var s int
+ for i := 0; i < b.N; i++ {
+ s += OnesCount(uint(Input))
+ }
+ Output = s
+}
+
+func BenchmarkOnesCount8(b *testing.B) {
+ var s int
+ for i := 0; i < b.N; i++ {
+ s += OnesCount8(uint8(Input))
+ }
+ Output = s
+}
+
+func BenchmarkOnesCount16(b *testing.B) {
+ var s int
+ for i := 0; i < b.N; i++ {
+ s += OnesCount16(uint16(Input))
+ }
+ Output = s
+}
+
+func BenchmarkOnesCount32(b *testing.B) {
+ var s int
+ for i := 0; i < b.N; i++ {
+ s += OnesCount32(uint32(Input))
+ }
+ Output = s
+}
+
+func BenchmarkOnesCount64(b *testing.B) {
+ var s int
+ for i := 0; i < b.N; i++ {
+ s += OnesCount64(uint64(Input))
+ }
+ Output = s
+}
+
+func TestRotateLeft(t *testing.T) {
+ var m uint64 = DeBruijn64
+
+ for k := uint(0); k < 128; k++ {
+ x8 := uint8(m)
+ got8 := RotateLeft8(x8, int(k))
+ want8 := x8<<(k&0x7) | x8>>(8-k&0x7)
+ if got8 != want8 {
+ t.Fatalf("RotateLeft8(%#02x, %d) == %#02x; want %#02x", x8, k, got8, want8)
+ }
+ got8 = RotateLeft8(want8, -int(k))
+ if got8 != x8 {
+ t.Fatalf("RotateLeft8(%#02x, -%d) == %#02x; want %#02x", want8, k, got8, x8)
+ }
+
+ x16 := uint16(m)
+ got16 := RotateLeft16(x16, int(k))
+ want16 := x16<<(k&0xf) | x16>>(16-k&0xf)
+ if got16 != want16 {
+ t.Fatalf("RotateLeft16(%#04x, %d) == %#04x; want %#04x", x16, k, got16, want16)
+ }
+ got16 = RotateLeft16(want16, -int(k))
+ if got16 != x16 {
+ t.Fatalf("RotateLeft16(%#04x, -%d) == %#04x; want %#04x", want16, k, got16, x16)
+ }
+
+ x32 := uint32(m)
+ got32 := RotateLeft32(x32, int(k))
+ want32 := x32<<(k&0x1f) | x32>>(32-k&0x1f)
+ if got32 != want32 {
+ t.Fatalf("RotateLeft32(%#08x, %d) == %#08x; want %#08x", x32, k, got32, want32)
+ }
+ got32 = RotateLeft32(want32, -int(k))
+ if got32 != x32 {
+ t.Fatalf("RotateLeft32(%#08x, -%d) == %#08x; want %#08x", want32, k, got32, x32)
+ }
+ if UintSize == 32 {
+ x := uint(m)
+ got := RotateLeft(x, int(k))
+ want := x<<(k&0x1f) | x>>(32-k&0x1f)
+ if got != want {
+ t.Fatalf("RotateLeft(%#08x, %d) == %#08x; want %#08x", x, k, got, want)
+ }
+ got = RotateLeft(want, -int(k))
+ if got != x {
+ t.Fatalf("RotateLeft(%#08x, -%d) == %#08x; want %#08x", want, k, got, x)
+ }
+ }
+
+ x64 := uint64(m)
+ got64 := RotateLeft64(x64, int(k))
+ want64 := x64<<(k&0x3f) | x64>>(64-k&0x3f)
+ if got64 != want64 {
+ t.Fatalf("RotateLeft64(%#016x, %d) == %#016x; want %#016x", x64, k, got64, want64)
+ }
+ got64 = RotateLeft64(want64, -int(k))
+ if got64 != x64 {
+ t.Fatalf("RotateLeft64(%#016x, -%d) == %#016x; want %#016x", want64, k, got64, x64)
+ }
+ if UintSize == 64 {
+ x := uint(m)
+ got := RotateLeft(x, int(k))
+ want := x<<(k&0x3f) | x>>(64-k&0x3f)
+ if got != want {
+ t.Fatalf("RotateLeft(%#016x, %d) == %#016x; want %#016x", x, k, got, want)
+ }
+ got = RotateLeft(want, -int(k))
+ if got != x {
+ t.Fatalf("RotateLeft(%#08x, -%d) == %#08x; want %#08x", want, k, got, x)
+ }
+ }
+ }
+}
+
+func BenchmarkRotateLeft(b *testing.B) {
+ var s uint
+ for i := 0; i < b.N; i++ {
+ s += RotateLeft(uint(Input), i)
+ }
+ Output = int(s)
+}
+
+func BenchmarkRotateLeft8(b *testing.B) {
+ var s uint8
+ for i := 0; i < b.N; i++ {
+ s += RotateLeft8(uint8(Input), i)
+ }
+ Output = int(s)
+}
+
+func BenchmarkRotateLeft16(b *testing.B) {
+ var s uint16
+ for i := 0; i < b.N; i++ {
+ s += RotateLeft16(uint16(Input), i)
+ }
+ Output = int(s)
+}
+
+func BenchmarkRotateLeft32(b *testing.B) {
+ var s uint32
+ for i := 0; i < b.N; i++ {
+ s += RotateLeft32(uint32(Input), i)
+ }
+ Output = int(s)
+}
+
+func BenchmarkRotateLeft64(b *testing.B) {
+ var s uint64
+ for i := 0; i < b.N; i++ {
+ s += RotateLeft64(uint64(Input), i)
+ }
+ Output = int(s)
+}
+
+func TestReverse(t *testing.T) {
+ // test each bit
+ for i := uint(0); i < 64; i++ {
+ testReverse(t, uint64(1)<<i, uint64(1)<<(63-i))
+ }
+
+ // test a few patterns
+ for _, test := range []struct {
+ x, r uint64
+ }{
+ {0, 0},
+ {0x1, 0x8 << 60},
+ {0x2, 0x4 << 60},
+ {0x3, 0xc << 60},
+ {0x4, 0x2 << 60},
+ {0x5, 0xa << 60},
+ {0x6, 0x6 << 60},
+ {0x7, 0xe << 60},
+ {0x8, 0x1 << 60},
+ {0x9, 0x9 << 60},
+ {0xa, 0x5 << 60},
+ {0xb, 0xd << 60},
+ {0xc, 0x3 << 60},
+ {0xd, 0xb << 60},
+ {0xe, 0x7 << 60},
+ {0xf, 0xf << 60},
+ {0x5686487, 0xe12616a000000000},
+ {0x0123456789abcdef, 0xf7b3d591e6a2c480},
+ } {
+ testReverse(t, test.x, test.r)
+ testReverse(t, test.r, test.x)
+ }
+}
+
+func testReverse(t *testing.T, x64, want64 uint64) {
+ x8 := uint8(x64)
+ got8 := Reverse8(x8)
+ want8 := uint8(want64 >> (64 - 8))
+ if got8 != want8 {
+ t.Fatalf("Reverse8(%#02x) == %#02x; want %#02x", x8, got8, want8)
+ }
+
+ x16 := uint16(x64)
+ got16 := Reverse16(x16)
+ want16 := uint16(want64 >> (64 - 16))
+ if got16 != want16 {
+ t.Fatalf("Reverse16(%#04x) == %#04x; want %#04x", x16, got16, want16)
+ }
+
+ x32 := uint32(x64)
+ got32 := Reverse32(x32)
+ want32 := uint32(want64 >> (64 - 32))
+ if got32 != want32 {
+ t.Fatalf("Reverse32(%#08x) == %#08x; want %#08x", x32, got32, want32)
+ }
+ if UintSize == 32 {
+ x := uint(x32)
+ got := Reverse(x)
+ want := uint(want32)
+ if got != want {
+ t.Fatalf("Reverse(%#08x) == %#08x; want %#08x", x, got, want)
+ }
+ }
+
+ got64 := Reverse64(x64)
+ if got64 != want64 {
+ t.Fatalf("Reverse64(%#016x) == %#016x; want %#016x", x64, got64, want64)
+ }
+ if UintSize == 64 {
+ x := uint(x64)
+ got := Reverse(x)
+ want := uint(want64)
+ if got != want {
+ t.Fatalf("Reverse(%#08x) == %#016x; want %#016x", x, got, want)
+ }
+ }
+}
+
+func BenchmarkReverse(b *testing.B) {
+ var s uint
+ for i := 0; i < b.N; i++ {
+ s += Reverse(uint(i))
+ }
+ Output = int(s)
+}
+
+func BenchmarkReverse8(b *testing.B) {
+ var s uint8
+ for i := 0; i < b.N; i++ {
+ s += Reverse8(uint8(i))
+ }
+ Output = int(s)
+}
+
+func BenchmarkReverse16(b *testing.B) {
+ var s uint16
+ for i := 0; i < b.N; i++ {
+ s += Reverse16(uint16(i))
+ }
+ Output = int(s)
+}
+
+func BenchmarkReverse32(b *testing.B) {
+ var s uint32
+ for i := 0; i < b.N; i++ {
+ s += Reverse32(uint32(i))
+ }
+ Output = int(s)
+}
+
+func BenchmarkReverse64(b *testing.B) {
+ var s uint64
+ for i := 0; i < b.N; i++ {
+ s += Reverse64(uint64(i))
+ }
+ Output = int(s)
+}
+
+func TestReverseBytes(t *testing.T) {
+ for _, test := range []struct {
+ x, r uint64
+ }{
+ {0, 0},
+ {0x01, 0x01 << 56},
+ {0x0123, 0x2301 << 48},
+ {0x012345, 0x452301 << 40},
+ {0x01234567, 0x67452301 << 32},
+ {0x0123456789, 0x8967452301 << 24},
+ {0x0123456789ab, 0xab8967452301 << 16},
+ {0x0123456789abcd, 0xcdab8967452301 << 8},
+ {0x0123456789abcdef, 0xefcdab8967452301 << 0},
+ } {
+ testReverseBytes(t, test.x, test.r)
+ testReverseBytes(t, test.r, test.x)
+ }
+}
+
+func testReverseBytes(t *testing.T, x64, want64 uint64) {
+ x16 := uint16(x64)
+ got16 := ReverseBytes16(x16)
+ want16 := uint16(want64 >> (64 - 16))
+ if got16 != want16 {
+ t.Fatalf("ReverseBytes16(%#04x) == %#04x; want %#04x", x16, got16, want16)
+ }
+
+ x32 := uint32(x64)
+ got32 := ReverseBytes32(x32)
+ want32 := uint32(want64 >> (64 - 32))
+ if got32 != want32 {
+ t.Fatalf("ReverseBytes32(%#08x) == %#08x; want %#08x", x32, got32, want32)
+ }
+ if UintSize == 32 {
+ x := uint(x32)
+ got := ReverseBytes(x)
+ want := uint(want32)
+ if got != want {
+ t.Fatalf("ReverseBytes(%#08x) == %#08x; want %#08x", x, got, want)
+ }
+ }
+
+ got64 := ReverseBytes64(x64)
+ if got64 != want64 {
+ t.Fatalf("ReverseBytes64(%#016x) == %#016x; want %#016x", x64, got64, want64)
+ }
+ if UintSize == 64 {
+ x := uint(x64)
+ got := ReverseBytes(x)
+ want := uint(want64)
+ if got != want {
+ t.Fatalf("ReverseBytes(%#016x) == %#016x; want %#016x", x, got, want)
+ }
+ }
+}
+
+func BenchmarkReverseBytes(b *testing.B) {
+ var s uint
+ for i := 0; i < b.N; i++ {
+ s += ReverseBytes(uint(i))
+ }
+ Output = int(s)
+}
+
+func BenchmarkReverseBytes16(b *testing.B) {
+ var s uint16
+ for i := 0; i < b.N; i++ {
+ s += ReverseBytes16(uint16(i))
+ }
+ Output = int(s)
+}
+
+func BenchmarkReverseBytes32(b *testing.B) {
+ var s uint32
+ for i := 0; i < b.N; i++ {
+ s += ReverseBytes32(uint32(i))
+ }
+ Output = int(s)
+}
+
+func BenchmarkReverseBytes64(b *testing.B) {
+ var s uint64
+ for i := 0; i < b.N; i++ {
+ s += ReverseBytes64(uint64(i))
+ }
+ Output = int(s)
+}
+
+func TestLen(t *testing.T) {
+ for i := 0; i < 256; i++ {
+ len := 8 - tab[i].nlz
+ for k := 0; k < 64-8; k++ {
+ x := uint64(i) << uint(k)
+ want := 0
+ if x != 0 {
+ want = len + k
+ }
+ if x <= 1<<8-1 {
+ got := Len8(uint8(x))
+ if got != want {
+ t.Fatalf("Len8(%#02x) == %d; want %d", x, got, want)
+ }
+ }
+
+ if x <= 1<<16-1 {
+ got := Len16(uint16(x))
+ if got != want {
+ t.Fatalf("Len16(%#04x) == %d; want %d", x, got, want)
+ }
+ }
+
+ if x <= 1<<32-1 {
+ got := Len32(uint32(x))
+ if got != want {
+ t.Fatalf("Len32(%#08x) == %d; want %d", x, got, want)
+ }
+ if UintSize == 32 {
+ got := Len(uint(x))
+ if got != want {
+ t.Fatalf("Len(%#08x) == %d; want %d", x, got, want)
+ }
+ }
+ }
+
+ if x <= 1<<64-1 {
+ got := Len64(uint64(x))
+ if got != want {
+ t.Fatalf("Len64(%#016x) == %d; want %d", x, got, want)
+ }
+ if UintSize == 64 {
+ got := Len(uint(x))
+ if got != want {
+ t.Fatalf("Len(%#016x) == %d; want %d", x, got, want)
+ }
+ }
+ }
+ }
+ }
+}
+
+const (
+ _M = 1<<UintSize - 1
+ _M32 = 1<<32 - 1
+ _M64 = 1<<64 - 1
+)
+
+func TestAddSubUint(t *testing.T) {
+ test := func(msg string, f func(x, y, c uint) (z, cout uint), x, y, c, z, cout uint) {
+ z1, cout1 := f(x, y, c)
+ if z1 != z || cout1 != cout {
+ t.Errorf("%s: got z:cout = %#x:%#x; want %#x:%#x", msg, z1, cout1, z, cout)
+ }
+ }
+ for _, a := range []struct{ x, y, c, z, cout uint }{
+ {0, 0, 0, 0, 0},
+ {0, 1, 0, 1, 0},
+ {0, 0, 1, 1, 0},
+ {0, 1, 1, 2, 0},
+ {12345, 67890, 0, 80235, 0},
+ {12345, 67890, 1, 80236, 0},
+ {_M, 1, 0, 0, 1},
+ {_M, 0, 1, 0, 1},
+ {_M, 1, 1, 1, 1},
+ {_M, _M, 0, _M - 1, 1},
+ {_M, _M, 1, _M, 1},
+ } {
+ test("Add", Add, a.x, a.y, a.c, a.z, a.cout)
+ test("Add symmetric", Add, a.y, a.x, a.c, a.z, a.cout)
+ test("Sub", Sub, a.z, a.x, a.c, a.y, a.cout)
+ test("Sub symmetric", Sub, a.z, a.y, a.c, a.x, a.cout)
+ // The above code can't test intrinsic implementation, because the passed function is not called directly.
+ // The following code uses a closure to test the intrinsic version in case the function is intrinsified.
+ test("Add intrinsic", func(x, y, c uint) (uint, uint) { return Add(x, y, c) }, a.x, a.y, a.c, a.z, a.cout)
+ test("Add intrinsic symmetric", func(x, y, c uint) (uint, uint) { return Add(x, y, c) }, a.y, a.x, a.c, a.z, a.cout)
+ test("Sub intrinsic", func(x, y, c uint) (uint, uint) { return Sub(x, y, c) }, a.z, a.x, a.c, a.y, a.cout)
+ test("Sub intrinsic symmetric", func(x, y, c uint) (uint, uint) { return Sub(x, y, c) }, a.z, a.y, a.c, a.x, a.cout)
+
+ }
+}
+
+func TestAddSubUint32(t *testing.T) {
+ test := func(msg string, f func(x, y, c uint32) (z, cout uint32), x, y, c, z, cout uint32) {
+ z1, cout1 := f(x, y, c)
+ if z1 != z || cout1 != cout {
+ t.Errorf("%s: got z:cout = %#x:%#x; want %#x:%#x", msg, z1, cout1, z, cout)
+ }
+ }
+ for _, a := range []struct{ x, y, c, z, cout uint32 }{
+ {0, 0, 0, 0, 0},
+ {0, 1, 0, 1, 0},
+ {0, 0, 1, 1, 0},
+ {0, 1, 1, 2, 0},
+ {12345, 67890, 0, 80235, 0},
+ {12345, 67890, 1, 80236, 0},
+ {_M32, 1, 0, 0, 1},
+ {_M32, 0, 1, 0, 1},
+ {_M32, 1, 1, 1, 1},
+ {_M32, _M32, 0, _M32 - 1, 1},
+ {_M32, _M32, 1, _M32, 1},
+ } {
+ test("Add32", Add32, a.x, a.y, a.c, a.z, a.cout)
+ test("Add32 symmetric", Add32, a.y, a.x, a.c, a.z, a.cout)
+ test("Sub32", Sub32, a.z, a.x, a.c, a.y, a.cout)
+ test("Sub32 symmetric", Sub32, a.z, a.y, a.c, a.x, a.cout)
+ }
+}
+
+func TestAddSubUint64(t *testing.T) {
+ test := func(msg string, f func(x, y, c uint64) (z, cout uint64), x, y, c, z, cout uint64) {
+ z1, cout1 := f(x, y, c)
+ if z1 != z || cout1 != cout {
+ t.Errorf("%s: got z:cout = %#x:%#x; want %#x:%#x", msg, z1, cout1, z, cout)
+ }
+ }
+ for _, a := range []struct{ x, y, c, z, cout uint64 }{
+ {0, 0, 0, 0, 0},
+ {0, 1, 0, 1, 0},
+ {0, 0, 1, 1, 0},
+ {0, 1, 1, 2, 0},
+ {12345, 67890, 0, 80235, 0},
+ {12345, 67890, 1, 80236, 0},
+ {_M64, 1, 0, 0, 1},
+ {_M64, 0, 1, 0, 1},
+ {_M64, 1, 1, 1, 1},
+ {_M64, _M64, 0, _M64 - 1, 1},
+ {_M64, _M64, 1, _M64, 1},
+ } {
+ test("Add64", Add64, a.x, a.y, a.c, a.z, a.cout)
+ test("Add64 symmetric", Add64, a.y, a.x, a.c, a.z, a.cout)
+ test("Sub64", Sub64, a.z, a.x, a.c, a.y, a.cout)
+ test("Sub64 symmetric", Sub64, a.z, a.y, a.c, a.x, a.cout)
+ // The above code can't test intrinsic implementation, because the passed function is not called directly.
+ // The following code uses a closure to test the intrinsic version in case the function is intrinsified.
+ test("Add64 intrinsic", func(x, y, c uint64) (uint64, uint64) { return Add64(x, y, c) }, a.x, a.y, a.c, a.z, a.cout)
+ test("Add64 intrinsic symmetric", func(x, y, c uint64) (uint64, uint64) { return Add64(x, y, c) }, a.y, a.x, a.c, a.z, a.cout)
+ test("Sub64 intrinsic", func(x, y, c uint64) (uint64, uint64) { return Sub64(x, y, c) }, a.z, a.x, a.c, a.y, a.cout)
+ test("Sub64 intrinsic symmetric", func(x, y, c uint64) (uint64, uint64) { return Sub64(x, y, c) }, a.z, a.y, a.c, a.x, a.cout)
+ }
+}
+
+func TestAdd64OverflowPanic(t *testing.T) {
+ // Test that 64-bit overflow panics fire correctly.
+ // These are designed to improve coverage of compiler intrinsics.
+ tests := []func(uint64, uint64) uint64{
+ func(a, b uint64) uint64 {
+ x, c := Add64(a, b, 0)
+ if c > 0 {
+ panic("overflow")
+ }
+ return x
+ },
+ func(a, b uint64) uint64 {
+ x, c := Add64(a, b, 0)
+ if c != 0 {
+ panic("overflow")
+ }
+ return x
+ },
+ func(a, b uint64) uint64 {
+ x, c := Add64(a, b, 0)
+ if c == 1 {
+ panic("overflow")
+ }
+ return x
+ },
+ func(a, b uint64) uint64 {
+ x, c := Add64(a, b, 0)
+ if c != 1 {
+ return x
+ }
+ panic("overflow")
+ },
+ func(a, b uint64) uint64 {
+ x, c := Add64(a, b, 0)
+ if c == 0 {
+ return x
+ }
+ panic("overflow")
+ },
+ }
+ for _, test := range tests {
+ shouldPanic := func(f func()) {
+ defer func() {
+ if err := recover(); err == nil {
+ t.Fatalf("expected panic")
+ }
+ }()
+ f()
+ }
+
+ // overflow
+ shouldPanic(func() { test(_M64, 1) })
+ shouldPanic(func() { test(1, _M64) })
+ shouldPanic(func() { test(_M64, _M64) })
+
+ // no overflow
+ test(_M64, 0)
+ test(0, 0)
+ test(1, 1)
+ }
+}
+
+func TestSub64OverflowPanic(t *testing.T) {
+ // Test that 64-bit overflow panics fire correctly.
+ // These are designed to improve coverage of compiler intrinsics.
+ tests := []func(uint64, uint64) uint64{
+ func(a, b uint64) uint64 {
+ x, c := Sub64(a, b, 0)
+ if c > 0 {
+ panic("overflow")
+ }
+ return x
+ },
+ func(a, b uint64) uint64 {
+ x, c := Sub64(a, b, 0)
+ if c != 0 {
+ panic("overflow")
+ }
+ return x
+ },
+ func(a, b uint64) uint64 {
+ x, c := Sub64(a, b, 0)
+ if c == 1 {
+ panic("overflow")
+ }
+ return x
+ },
+ func(a, b uint64) uint64 {
+ x, c := Sub64(a, b, 0)
+ if c != 1 {
+ return x
+ }
+ panic("overflow")
+ },
+ func(a, b uint64) uint64 {
+ x, c := Sub64(a, b, 0)
+ if c == 0 {
+ return x
+ }
+ panic("overflow")
+ },
+ }
+ for _, test := range tests {
+ shouldPanic := func(f func()) {
+ defer func() {
+ if err := recover(); err == nil {
+ t.Fatalf("expected panic")
+ }
+ }()
+ f()
+ }
+
+ // overflow
+ shouldPanic(func() { test(0, 1) })
+ shouldPanic(func() { test(1, _M64) })
+ shouldPanic(func() { test(_M64-1, _M64) })
+
+ // no overflow
+ test(_M64, 0)
+ test(0, 0)
+ test(1, 1)
+ }
+}
+
+func TestMulDiv(t *testing.T) {
+ testMul := func(msg string, f func(x, y uint) (hi, lo uint), x, y, hi, lo uint) {
+ hi1, lo1 := f(x, y)
+ if hi1 != hi || lo1 != lo {
+ t.Errorf("%s: got hi:lo = %#x:%#x; want %#x:%#x", msg, hi1, lo1, hi, lo)
+ }
+ }
+ testDiv := func(msg string, f func(hi, lo, y uint) (q, r uint), hi, lo, y, q, r uint) {
+ q1, r1 := f(hi, lo, y)
+ if q1 != q || r1 != r {
+ t.Errorf("%s: got q:r = %#x:%#x; want %#x:%#x", msg, q1, r1, q, r)
+ }
+ }
+ for _, a := range []struct {
+ x, y uint
+ hi, lo, r uint
+ }{
+ {1 << (UintSize - 1), 2, 1, 0, 1},
+ {_M, _M, _M - 1, 1, 42},
+ } {
+ testMul("Mul", Mul, a.x, a.y, a.hi, a.lo)
+ testMul("Mul symmetric", Mul, a.y, a.x, a.hi, a.lo)
+ testDiv("Div", Div, a.hi, a.lo+a.r, a.y, a.x, a.r)
+ testDiv("Div symmetric", Div, a.hi, a.lo+a.r, a.x, a.y, a.r)
+ // The above code can't test intrinsic implementation, because the passed function is not called directly.
+ // The following code uses a closure to test the intrinsic version in case the function is intrinsified.
+ testMul("Mul intrinsic", func(x, y uint) (uint, uint) { return Mul(x, y) }, a.x, a.y, a.hi, a.lo)
+ testMul("Mul intrinsic symmetric", func(x, y uint) (uint, uint) { return Mul(x, y) }, a.y, a.x, a.hi, a.lo)
+ testDiv("Div intrinsic", func(hi, lo, y uint) (uint, uint) { return Div(hi, lo, y) }, a.hi, a.lo+a.r, a.y, a.x, a.r)
+ testDiv("Div intrinsic symmetric", func(hi, lo, y uint) (uint, uint) { return Div(hi, lo, y) }, a.hi, a.lo+a.r, a.x, a.y, a.r)
+ }
+}
+
+func TestMulDiv32(t *testing.T) {
+ testMul := func(msg string, f func(x, y uint32) (hi, lo uint32), x, y, hi, lo uint32) {
+ hi1, lo1 := f(x, y)
+ if hi1 != hi || lo1 != lo {
+ t.Errorf("%s: got hi:lo = %#x:%#x; want %#x:%#x", msg, hi1, lo1, hi, lo)
+ }
+ }
+ testDiv := func(msg string, f func(hi, lo, y uint32) (q, r uint32), hi, lo, y, q, r uint32) {
+ q1, r1 := f(hi, lo, y)
+ if q1 != q || r1 != r {
+ t.Errorf("%s: got q:r = %#x:%#x; want %#x:%#x", msg, q1, r1, q, r)
+ }
+ }
+ for _, a := range []struct {
+ x, y uint32
+ hi, lo, r uint32
+ }{
+ {1 << 31, 2, 1, 0, 1},
+ {0xc47dfa8c, 50911, 0x98a4, 0x998587f4, 13},
+ {_M32, _M32, _M32 - 1, 1, 42},
+ } {
+ testMul("Mul32", Mul32, a.x, a.y, a.hi, a.lo)
+ testMul("Mul32 symmetric", Mul32, a.y, a.x, a.hi, a.lo)
+ testDiv("Div32", Div32, a.hi, a.lo+a.r, a.y, a.x, a.r)
+ testDiv("Div32 symmetric", Div32, a.hi, a.lo+a.r, a.x, a.y, a.r)
+ }
+}
+
+func TestMulDiv64(t *testing.T) {
+ testMul := func(msg string, f func(x, y uint64) (hi, lo uint64), x, y, hi, lo uint64) {
+ hi1, lo1 := f(x, y)
+ if hi1 != hi || lo1 != lo {
+ t.Errorf("%s: got hi:lo = %#x:%#x; want %#x:%#x", msg, hi1, lo1, hi, lo)
+ }
+ }
+ testDiv := func(msg string, f func(hi, lo, y uint64) (q, r uint64), hi, lo, y, q, r uint64) {
+ q1, r1 := f(hi, lo, y)
+ if q1 != q || r1 != r {
+ t.Errorf("%s: got q:r = %#x:%#x; want %#x:%#x", msg, q1, r1, q, r)
+ }
+ }
+ for _, a := range []struct {
+ x, y uint64
+ hi, lo, r uint64
+ }{
+ {1 << 63, 2, 1, 0, 1},
+ {0x3626229738a3b9, 0xd8988a9f1cc4a61, 0x2dd0712657fe8, 0x9dd6a3364c358319, 13},
+ {_M64, _M64, _M64 - 1, 1, 42},
+ } {
+ testMul("Mul64", Mul64, a.x, a.y, a.hi, a.lo)
+ testMul("Mul64 symmetric", Mul64, a.y, a.x, a.hi, a.lo)
+ testDiv("Div64", Div64, a.hi, a.lo+a.r, a.y, a.x, a.r)
+ testDiv("Div64 symmetric", Div64, a.hi, a.lo+a.r, a.x, a.y, a.r)
+ // The above code can't test intrinsic implementation, because the passed function is not called directly.
+ // The following code uses a closure to test the intrinsic version in case the function is intrinsified.
+ testMul("Mul64 intrinsic", func(x, y uint64) (uint64, uint64) { return Mul64(x, y) }, a.x, a.y, a.hi, a.lo)
+ testMul("Mul64 intrinsic symmetric", func(x, y uint64) (uint64, uint64) { return Mul64(x, y) }, a.y, a.x, a.hi, a.lo)
+ testDiv("Div64 intrinsic", func(hi, lo, y uint64) (uint64, uint64) { return Div64(hi, lo, y) }, a.hi, a.lo+a.r, a.y, a.x, a.r)
+ testDiv("Div64 intrinsic symmetric", func(hi, lo, y uint64) (uint64, uint64) { return Div64(hi, lo, y) }, a.hi, a.lo+a.r, a.x, a.y, a.r)
+ }
+}
+
+const (
+ divZeroError = "runtime error: integer divide by zero"
+ overflowError = "runtime error: integer overflow"
+)
+
+func TestDivPanicOverflow(t *testing.T) {
+ // Expect a panic
+ defer func() {
+ if err := recover(); err == nil {
+ t.Error("Div should have panicked when y<=hi")
+ } else if e, ok := err.(runtime.Error); !ok || e.Error() != overflowError {
+ t.Errorf("Div expected panic: %q, got: %q ", overflowError, e.Error())
+ }
+ }()
+ q, r := Div(1, 0, 1)
+ t.Errorf("undefined q, r = %v, %v calculated when Div should have panicked", q, r)
+}
+
+func TestDiv32PanicOverflow(t *testing.T) {
+ // Expect a panic
+ defer func() {
+ if err := recover(); err == nil {
+ t.Error("Div32 should have panicked when y<=hi")
+ } else if e, ok := err.(runtime.Error); !ok || e.Error() != overflowError {
+ t.Errorf("Div32 expected panic: %q, got: %q ", overflowError, e.Error())
+ }
+ }()
+ q, r := Div32(1, 0, 1)
+ t.Errorf("undefined q, r = %v, %v calculated when Div32 should have panicked", q, r)
+}
+
+func TestDiv64PanicOverflow(t *testing.T) {
+ // Expect a panic
+ defer func() {
+ if err := recover(); err == nil {
+ t.Error("Div64 should have panicked when y<=hi")
+ } else if e, ok := err.(runtime.Error); !ok || e.Error() != overflowError {
+ t.Errorf("Div64 expected panic: %q, got: %q ", overflowError, e.Error())
+ }
+ }()
+ q, r := Div64(1, 0, 1)
+ t.Errorf("undefined q, r = %v, %v calculated when Div64 should have panicked", q, r)
+}
+
+func TestDivPanicZero(t *testing.T) {
+ // Expect a panic
+ defer func() {
+ if err := recover(); err == nil {
+ t.Error("Div should have panicked when y==0")
+ } else if e, ok := err.(runtime.Error); !ok || e.Error() != divZeroError {
+ t.Errorf("Div expected panic: %q, got: %q ", divZeroError, e.Error())
+ }
+ }()
+ q, r := Div(1, 1, 0)
+ t.Errorf("undefined q, r = %v, %v calculated when Div should have panicked", q, r)
+}
+
+func TestDiv32PanicZero(t *testing.T) {
+ // Expect a panic
+ defer func() {
+ if err := recover(); err == nil {
+ t.Error("Div32 should have panicked when y==0")
+ } else if e, ok := err.(runtime.Error); !ok || e.Error() != divZeroError {
+ t.Errorf("Div32 expected panic: %q, got: %q ", divZeroError, e.Error())
+ }
+ }()
+ q, r := Div32(1, 1, 0)
+ t.Errorf("undefined q, r = %v, %v calculated when Div32 should have panicked", q, r)
+}
+
+func TestDiv64PanicZero(t *testing.T) {
+ // Expect a panic
+ defer func() {
+ if err := recover(); err == nil {
+ t.Error("Div64 should have panicked when y==0")
+ } else if e, ok := err.(runtime.Error); !ok || e.Error() != divZeroError {
+ t.Errorf("Div64 expected panic: %q, got: %q ", divZeroError, e.Error())
+ }
+ }()
+ q, r := Div64(1, 1, 0)
+ t.Errorf("undefined q, r = %v, %v calculated when Div64 should have panicked", q, r)
+}
+
+func TestRem32(t *testing.T) {
+ // Sanity check: for non-oveflowing dividends, the result is the
+ // same as the rem returned by Div32
+ hi, lo, y := uint32(510510), uint32(9699690), uint32(510510+1) // ensure hi < y
+ for i := 0; i < 1000; i++ {
+ r := Rem32(hi, lo, y)
+ _, r2 := Div32(hi, lo, y)
+ if r != r2 {
+ t.Errorf("Rem32(%v, %v, %v) returned %v, but Div32 returned rem %v", hi, lo, y, r, r2)
+ }
+ y += 13
+ }
+}
+
+func TestRem32Overflow(t *testing.T) {
+ // To trigger a quotient overflow, we need y <= hi
+ hi, lo, y := uint32(510510), uint32(9699690), uint32(7)
+ for i := 0; i < 1000; i++ {
+ r := Rem32(hi, lo, y)
+ _, r2 := Div64(0, uint64(hi)<<32|uint64(lo), uint64(y))
+ if r != uint32(r2) {
+ t.Errorf("Rem32(%v, %v, %v) returned %v, but Div64 returned rem %v", hi, lo, y, r, r2)
+ }
+ y += 13
+ }
+}
+
+func TestRem64(t *testing.T) {
+ // Sanity check: for non-oveflowing dividends, the result is the
+ // same as the rem returned by Div64
+ hi, lo, y := uint64(510510), uint64(9699690), uint64(510510+1) // ensure hi < y
+ for i := 0; i < 1000; i++ {
+ r := Rem64(hi, lo, y)
+ _, r2 := Div64(hi, lo, y)
+ if r != r2 {
+ t.Errorf("Rem64(%v, %v, %v) returned %v, but Div64 returned rem %v", hi, lo, y, r, r2)
+ }
+ y += 13
+ }
+}
+
+func TestRem64Overflow(t *testing.T) {
+ Rem64Tests := []struct {
+ hi, lo, y uint64
+ rem uint64
+ }{
+ // Testcases computed using Python 3, as:
+ // >>> hi = 42; lo = 1119; y = 42
+ // >>> ((hi<<64)+lo) % y
+ {42, 1119, 42, 27},
+ {42, 1119, 38, 9},
+ {42, 1119, 26, 23},
+ {469, 0, 467, 271},
+ {469, 0, 113, 58},
+ {111111, 111111, 1171, 803},
+ {3968194946088682615, 3192705705065114702, 1000037, 56067},
+ }
+
+ for _, rt := range Rem64Tests {
+ if rt.hi < rt.y {
+ t.Fatalf("Rem64(%v, %v, %v) is not a test with quo overflow", rt.hi, rt.lo, rt.y)
+ }
+ rem := Rem64(rt.hi, rt.lo, rt.y)
+ if rem != rt.rem {
+ t.Errorf("Rem64(%v, %v, %v) returned %v, wanted %v",
+ rt.hi, rt.lo, rt.y, rem, rt.rem)
+ }
+ }
+}
+
+func BenchmarkAdd(b *testing.B) {
+ var z, c uint
+ for i := 0; i < b.N; i++ {
+ z, c = Add(uint(Input), uint(i), c)
+ }
+ Output = int(z + c)
+}
+
+func BenchmarkAdd32(b *testing.B) {
+ var z, c uint32
+ for i := 0; i < b.N; i++ {
+ z, c = Add32(uint32(Input), uint32(i), c)
+ }
+ Output = int(z + c)
+}
+
+func BenchmarkAdd64(b *testing.B) {
+ var z, c uint64
+ for i := 0; i < b.N; i++ {
+ z, c = Add64(uint64(Input), uint64(i), c)
+ }
+ Output = int(z + c)
+}
+
+func BenchmarkAdd64multiple(b *testing.B) {
+ var z0 = uint64(Input)
+ var z1 = uint64(Input)
+ var z2 = uint64(Input)
+ var z3 = uint64(Input)
+ for i := 0; i < b.N; i++ {
+ var c uint64
+ z0, c = Add64(z0, uint64(i), c)
+ z1, c = Add64(z1, uint64(i), c)
+ z2, c = Add64(z2, uint64(i), c)
+ z3, _ = Add64(z3, uint64(i), c)
+ }
+ Output = int(z0 + z1 + z2 + z3)
+}
+
+func BenchmarkSub(b *testing.B) {
+ var z, c uint
+ for i := 0; i < b.N; i++ {
+ z, c = Sub(uint(Input), uint(i), c)
+ }
+ Output = int(z + c)
+}
+
+func BenchmarkSub32(b *testing.B) {
+ var z, c uint32
+ for i := 0; i < b.N; i++ {
+ z, c = Sub32(uint32(Input), uint32(i), c)
+ }
+ Output = int(z + c)
+}
+
+func BenchmarkSub64(b *testing.B) {
+ var z, c uint64
+ for i := 0; i < b.N; i++ {
+ z, c = Sub64(uint64(Input), uint64(i), c)
+ }
+ Output = int(z + c)
+}
+
+func BenchmarkSub64multiple(b *testing.B) {
+ var z0 = uint64(Input)
+ var z1 = uint64(Input)
+ var z2 = uint64(Input)
+ var z3 = uint64(Input)
+ for i := 0; i < b.N; i++ {
+ var c uint64
+ z0, c = Sub64(z0, uint64(i), c)
+ z1, c = Sub64(z1, uint64(i), c)
+ z2, c = Sub64(z2, uint64(i), c)
+ z3, _ = Sub64(z3, uint64(i), c)
+ }
+ Output = int(z0 + z1 + z2 + z3)
+}
+
+func BenchmarkMul(b *testing.B) {
+ var hi, lo uint
+ for i := 0; i < b.N; i++ {
+ hi, lo = Mul(uint(Input), uint(i))
+ }
+ Output = int(hi + lo)
+}
+
+func BenchmarkMul32(b *testing.B) {
+ var hi, lo uint32
+ for i := 0; i < b.N; i++ {
+ hi, lo = Mul32(uint32(Input), uint32(i))
+ }
+ Output = int(hi + lo)
+}
+
+func BenchmarkMul64(b *testing.B) {
+ var hi, lo uint64
+ for i := 0; i < b.N; i++ {
+ hi, lo = Mul64(uint64(Input), uint64(i))
+ }
+ Output = int(hi + lo)
+}
+
+func BenchmarkDiv(b *testing.B) {
+ var q, r uint
+ for i := 0; i < b.N; i++ {
+ q, r = Div(1, uint(i), uint(Input))
+ }
+ Output = int(q + r)
+}
+
+func BenchmarkDiv32(b *testing.B) {
+ var q, r uint32
+ for i := 0; i < b.N; i++ {
+ q, r = Div32(1, uint32(i), uint32(Input))
+ }
+ Output = int(q + r)
+}
+
+func BenchmarkDiv64(b *testing.B) {
+ var q, r uint64
+ for i := 0; i < b.N; i++ {
+ q, r = Div64(1, uint64(i), uint64(Input))
+ }
+ Output = int(q + r)
+}
+
+// ----------------------------------------------------------------------------
+// Testing support
+
+type entry = struct {
+ nlz, ntz, pop int
+}
+
+// tab contains results for all uint8 values
+var tab [256]entry
+
+func init() {
+ tab[0] = entry{8, 8, 0}
+ for i := 1; i < len(tab); i++ {
+ // nlz
+ x := i // x != 0
+ n := 0
+ for x&0x80 == 0 {
+ n++
+ x <<= 1
+ }
+ tab[i].nlz = n
+
+ // ntz
+ x = i // x != 0
+ n = 0
+ for x&1 == 0 {
+ n++
+ x >>= 1
+ }
+ tab[i].ntz = n
+
+ // pop
+ x = i // x != 0
+ n = 0
+ for x != 0 {
+ n += int(x & 1)
+ x >>= 1
+ }
+ tab[i].pop = n
+ }
+}
diff --git a/src/math/bits/example_test.go b/src/math/bits/example_test.go
new file mode 100644
index 0000000..b2ed2cb
--- /dev/null
+++ b/src/math/bits/example_test.go
@@ -0,0 +1,210 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Code generated by go run make_examples.go. DO NOT EDIT.
+
+package bits_test
+
+import (
+ "fmt"
+ "math/bits"
+)
+
+func ExampleLeadingZeros8() {
+ fmt.Printf("LeadingZeros8(%08b) = %d\n", 1, bits.LeadingZeros8(1))
+ // Output:
+ // LeadingZeros8(00000001) = 7
+}
+
+func ExampleLeadingZeros16() {
+ fmt.Printf("LeadingZeros16(%016b) = %d\n", 1, bits.LeadingZeros16(1))
+ // Output:
+ // LeadingZeros16(0000000000000001) = 15
+}
+
+func ExampleLeadingZeros32() {
+ fmt.Printf("LeadingZeros32(%032b) = %d\n", 1, bits.LeadingZeros32(1))
+ // Output:
+ // LeadingZeros32(00000000000000000000000000000001) = 31
+}
+
+func ExampleLeadingZeros64() {
+ fmt.Printf("LeadingZeros64(%064b) = %d\n", 1, bits.LeadingZeros64(1))
+ // Output:
+ // LeadingZeros64(0000000000000000000000000000000000000000000000000000000000000001) = 63
+}
+
+func ExampleTrailingZeros8() {
+ fmt.Printf("TrailingZeros8(%08b) = %d\n", 14, bits.TrailingZeros8(14))
+ // Output:
+ // TrailingZeros8(00001110) = 1
+}
+
+func ExampleTrailingZeros16() {
+ fmt.Printf("TrailingZeros16(%016b) = %d\n", 14, bits.TrailingZeros16(14))
+ // Output:
+ // TrailingZeros16(0000000000001110) = 1
+}
+
+func ExampleTrailingZeros32() {
+ fmt.Printf("TrailingZeros32(%032b) = %d\n", 14, bits.TrailingZeros32(14))
+ // Output:
+ // TrailingZeros32(00000000000000000000000000001110) = 1
+}
+
+func ExampleTrailingZeros64() {
+ fmt.Printf("TrailingZeros64(%064b) = %d\n", 14, bits.TrailingZeros64(14))
+ // Output:
+ // TrailingZeros64(0000000000000000000000000000000000000000000000000000000000001110) = 1
+}
+
+func ExampleOnesCount() {
+ fmt.Printf("OnesCount(%b) = %d\n", 14, bits.OnesCount(14))
+ // Output:
+ // OnesCount(1110) = 3
+}
+
+func ExampleOnesCount8() {
+ fmt.Printf("OnesCount8(%08b) = %d\n", 14, bits.OnesCount8(14))
+ // Output:
+ // OnesCount8(00001110) = 3
+}
+
+func ExampleOnesCount16() {
+ fmt.Printf("OnesCount16(%016b) = %d\n", 14, bits.OnesCount16(14))
+ // Output:
+ // OnesCount16(0000000000001110) = 3
+}
+
+func ExampleOnesCount32() {
+ fmt.Printf("OnesCount32(%032b) = %d\n", 14, bits.OnesCount32(14))
+ // Output:
+ // OnesCount32(00000000000000000000000000001110) = 3
+}
+
+func ExampleOnesCount64() {
+ fmt.Printf("OnesCount64(%064b) = %d\n", 14, bits.OnesCount64(14))
+ // Output:
+ // OnesCount64(0000000000000000000000000000000000000000000000000000000000001110) = 3
+}
+
+func ExampleRotateLeft8() {
+ fmt.Printf("%08b\n", 15)
+ fmt.Printf("%08b\n", bits.RotateLeft8(15, 2))
+ fmt.Printf("%08b\n", bits.RotateLeft8(15, -2))
+ // Output:
+ // 00001111
+ // 00111100
+ // 11000011
+}
+
+func ExampleRotateLeft16() {
+ fmt.Printf("%016b\n", 15)
+ fmt.Printf("%016b\n", bits.RotateLeft16(15, 2))
+ fmt.Printf("%016b\n", bits.RotateLeft16(15, -2))
+ // Output:
+ // 0000000000001111
+ // 0000000000111100
+ // 1100000000000011
+}
+
+func ExampleRotateLeft32() {
+ fmt.Printf("%032b\n", 15)
+ fmt.Printf("%032b\n", bits.RotateLeft32(15, 2))
+ fmt.Printf("%032b\n", bits.RotateLeft32(15, -2))
+ // Output:
+ // 00000000000000000000000000001111
+ // 00000000000000000000000000111100
+ // 11000000000000000000000000000011
+}
+
+func ExampleRotateLeft64() {
+ fmt.Printf("%064b\n", 15)
+ fmt.Printf("%064b\n", bits.RotateLeft64(15, 2))
+ fmt.Printf("%064b\n", bits.RotateLeft64(15, -2))
+ // Output:
+ // 0000000000000000000000000000000000000000000000000000000000001111
+ // 0000000000000000000000000000000000000000000000000000000000111100
+ // 1100000000000000000000000000000000000000000000000000000000000011
+}
+
+func ExampleReverse8() {
+ fmt.Printf("%08b\n", 19)
+ fmt.Printf("%08b\n", bits.Reverse8(19))
+ // Output:
+ // 00010011
+ // 11001000
+}
+
+func ExampleReverse16() {
+ fmt.Printf("%016b\n", 19)
+ fmt.Printf("%016b\n", bits.Reverse16(19))
+ // Output:
+ // 0000000000010011
+ // 1100100000000000
+}
+
+func ExampleReverse32() {
+ fmt.Printf("%032b\n", 19)
+ fmt.Printf("%032b\n", bits.Reverse32(19))
+ // Output:
+ // 00000000000000000000000000010011
+ // 11001000000000000000000000000000
+}
+
+func ExampleReverse64() {
+ fmt.Printf("%064b\n", 19)
+ fmt.Printf("%064b\n", bits.Reverse64(19))
+ // Output:
+ // 0000000000000000000000000000000000000000000000000000000000010011
+ // 1100100000000000000000000000000000000000000000000000000000000000
+}
+
+func ExampleReverseBytes16() {
+ fmt.Printf("%016b\n", 15)
+ fmt.Printf("%016b\n", bits.ReverseBytes16(15))
+ // Output:
+ // 0000000000001111
+ // 0000111100000000
+}
+
+func ExampleReverseBytes32() {
+ fmt.Printf("%032b\n", 15)
+ fmt.Printf("%032b\n", bits.ReverseBytes32(15))
+ // Output:
+ // 00000000000000000000000000001111
+ // 00001111000000000000000000000000
+}
+
+func ExampleReverseBytes64() {
+ fmt.Printf("%064b\n", 15)
+ fmt.Printf("%064b\n", bits.ReverseBytes64(15))
+ // Output:
+ // 0000000000000000000000000000000000000000000000000000000000001111
+ // 0000111100000000000000000000000000000000000000000000000000000000
+}
+
+func ExampleLen8() {
+ fmt.Printf("Len8(%08b) = %d\n", 8, bits.Len8(8))
+ // Output:
+ // Len8(00001000) = 4
+}
+
+func ExampleLen16() {
+ fmt.Printf("Len16(%016b) = %d\n", 8, bits.Len16(8))
+ // Output:
+ // Len16(0000000000001000) = 4
+}
+
+func ExampleLen32() {
+ fmt.Printf("Len32(%032b) = %d\n", 8, bits.Len32(8))
+ // Output:
+ // Len32(00000000000000000000000000001000) = 4
+}
+
+func ExampleLen64() {
+ fmt.Printf("Len64(%064b) = %d\n", 8, bits.Len64(8))
+ // Output:
+ // Len64(0000000000000000000000000000000000000000000000000000000000001000) = 4
+}
diff --git a/src/math/bits/export_test.go b/src/math/bits/export_test.go
new file mode 100644
index 0000000..8c6f933
--- /dev/null
+++ b/src/math/bits/export_test.go
@@ -0,0 +1,7 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package bits
+
+const DeBruijn64 = deBruijn64
diff --git a/src/math/bits/make_examples.go b/src/math/bits/make_examples.go
new file mode 100644
index 0000000..1d3ad53
--- /dev/null
+++ b/src/math/bits/make_examples.go
@@ -0,0 +1,112 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build ignore
+
+// This program generates example_test.go.
+
+package main
+
+import (
+ "bytes"
+ "fmt"
+ "log"
+ "math/bits"
+ "os"
+)
+
+const header = `// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Code generated by go run make_examples.go. DO NOT EDIT.
+
+package bits_test
+
+import (
+ "fmt"
+ "math/bits"
+)
+`
+
+func main() {
+ w := bytes.NewBuffer([]byte(header))
+
+ for _, e := range []struct {
+ name string
+ in int
+ out [4]interface{}
+ out2 [4]interface{}
+ }{
+ {
+ name: "LeadingZeros",
+ in: 1,
+ out: [4]interface{}{bits.LeadingZeros8(1), bits.LeadingZeros16(1), bits.LeadingZeros32(1), bits.LeadingZeros64(1)},
+ },
+ {
+ name: "TrailingZeros",
+ in: 14,
+ out: [4]interface{}{bits.TrailingZeros8(14), bits.TrailingZeros16(14), bits.TrailingZeros32(14), bits.TrailingZeros64(14)},
+ },
+ {
+ name: "OnesCount",
+ in: 14,
+ out: [4]interface{}{bits.OnesCount8(14), bits.OnesCount16(14), bits.OnesCount32(14), bits.OnesCount64(14)},
+ },
+ {
+ name: "RotateLeft",
+ in: 15,
+ out: [4]interface{}{bits.RotateLeft8(15, 2), bits.RotateLeft16(15, 2), bits.RotateLeft32(15, 2), bits.RotateLeft64(15, 2)},
+ out2: [4]interface{}{bits.RotateLeft8(15, -2), bits.RotateLeft16(15, -2), bits.RotateLeft32(15, -2), bits.RotateLeft64(15, -2)},
+ },
+ {
+ name: "Reverse",
+ in: 19,
+ out: [4]interface{}{bits.Reverse8(19), bits.Reverse16(19), bits.Reverse32(19), bits.Reverse64(19)},
+ },
+ {
+ name: "ReverseBytes",
+ in: 15,
+ out: [4]interface{}{nil, bits.ReverseBytes16(15), bits.ReverseBytes32(15), bits.ReverseBytes64(15)},
+ },
+ {
+ name: "Len",
+ in: 8,
+ out: [4]interface{}{bits.Len8(8), bits.Len16(8), bits.Len32(8), bits.Len64(8)},
+ },
+ } {
+ for i, size := range []int{8, 16, 32, 64} {
+ if e.out[i] == nil {
+ continue // function doesn't exist
+ }
+ f := fmt.Sprintf("%s%d", e.name, size)
+ fmt.Fprintf(w, "\nfunc Example%s() {\n", f)
+ switch e.name {
+ case "RotateLeft", "Reverse", "ReverseBytes":
+ fmt.Fprintf(w, "\tfmt.Printf(\"%%0%db\\n\", %d)\n", size, e.in)
+ if e.name == "RotateLeft" {
+ fmt.Fprintf(w, "\tfmt.Printf(\"%%0%db\\n\", bits.%s(%d, 2))\n", size, f, e.in)
+ fmt.Fprintf(w, "\tfmt.Printf(\"%%0%db\\n\", bits.%s(%d, -2))\n", size, f, e.in)
+ } else {
+ fmt.Fprintf(w, "\tfmt.Printf(\"%%0%db\\n\", bits.%s(%d))\n", size, f, e.in)
+ }
+ fmt.Fprintf(w, "\t// Output:\n")
+ fmt.Fprintf(w, "\t// %0*b\n", size, e.in)
+ fmt.Fprintf(w, "\t// %0*b\n", size, e.out[i])
+ if e.name == "RotateLeft" && e.out2[i] != nil {
+ fmt.Fprintf(w, "\t// %0*b\n", size, e.out2[i])
+ }
+ default:
+ fmt.Fprintf(w, "\tfmt.Printf(\"%s(%%0%db) = %%d\\n\", %d, bits.%s(%d))\n", f, size, e.in, f, e.in)
+ fmt.Fprintf(w, "\t// Output:\n")
+ fmt.Fprintf(w, "\t// %s(%0*b) = %d\n", f, size, e.in, e.out[i])
+ }
+ fmt.Fprintf(w, "}\n")
+ }
+ }
+
+ if err := os.WriteFile("example_test.go", w.Bytes(), 0666); err != nil {
+ log.Fatal(err)
+ }
+}
diff --git a/src/math/bits/make_tables.go b/src/math/bits/make_tables.go
new file mode 100644
index 0000000..b068d5e
--- /dev/null
+++ b/src/math/bits/make_tables.go
@@ -0,0 +1,92 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build ignore
+
+// This program generates bits_tables.go.
+
+package main
+
+import (
+ "bytes"
+ "fmt"
+ "go/format"
+ "io"
+ "log"
+ "os"
+)
+
+var header = []byte(`// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Code generated by go run make_tables.go. DO NOT EDIT.
+
+package bits
+
+`)
+
+func main() {
+ buf := bytes.NewBuffer(header)
+
+ gen(buf, "ntz8tab", ntz8)
+ gen(buf, "pop8tab", pop8)
+ gen(buf, "rev8tab", rev8)
+ gen(buf, "len8tab", len8)
+
+ out, err := format.Source(buf.Bytes())
+ if err != nil {
+ log.Fatal(err)
+ }
+
+ err = os.WriteFile("bits_tables.go", out, 0666)
+ if err != nil {
+ log.Fatal(err)
+ }
+}
+
+func gen(w io.Writer, name string, f func(uint8) uint8) {
+ fmt.Fprintf(w, "var %s = [256]uint8{", name)
+ for i := 0; i < 256; i++ {
+ if i%16 == 0 {
+ fmt.Fprint(w, "\n\t")
+ } else {
+ fmt.Fprint(w, " ")
+ }
+ fmt.Fprintf(w, "%#02x,", f(uint8(i)))
+ }
+ fmt.Fprint(w, "\n}\n\n")
+}
+
+func ntz8(x uint8) (n uint8) {
+ for x&1 == 0 && n < 8 {
+ x >>= 1
+ n++
+ }
+ return
+}
+
+func pop8(x uint8) (n uint8) {
+ for x != 0 {
+ x &= x - 1
+ n++
+ }
+ return
+}
+
+func rev8(x uint8) (r uint8) {
+ for i := 8; i > 0; i-- {
+ r = r<<1 | x&1
+ x >>= 1
+ }
+ return
+}
+
+func len8(x uint8) (n uint8) {
+ for x != 0 {
+ x >>= 1
+ n++
+ }
+ return
+}