summaryrefslogtreecommitdiffstats
path: root/src/crypto/elliptic
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-28 13:16:40 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-28 13:16:40 +0000
commit47ab3d4a42e9ab51c465c4322d2ec233f6324e6b (patch)
treea61a0ffd83f4a3def4b36e5c8e99630c559aa723 /src/crypto/elliptic
parentInitial commit. (diff)
downloadgolang-1.18-47ab3d4a42e9ab51c465c4322d2ec233f6324e6b.tar.xz
golang-1.18-47ab3d4a42e9ab51c465c4322d2ec233f6324e6b.zip
Adding upstream version 1.18.10.upstream/1.18.10upstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'src/crypto/elliptic')
-rw-r--r--src/crypto/elliptic/elliptic.go496
-rw-r--r--src/crypto/elliptic/elliptic_test.go375
-rw-r--r--src/crypto/elliptic/export_generate.go16
-rw-r--r--src/crypto/elliptic/fuzz_test.go53
-rw-r--r--src/crypto/elliptic/gen_p256_table.go73
-rw-r--r--src/crypto/elliptic/internal/fiat/Dockerfile12
-rw-r--r--src/crypto/elliptic/internal/fiat/README34
-rw-r--r--src/crypto/elliptic/internal/fiat/fiat_test.go64
-rw-r--r--src/crypto/elliptic/internal/fiat/generate.go330
-rw-r--r--src/crypto/elliptic/internal/fiat/p224.go135
-rw-r--r--src/crypto/elliptic/internal/fiat/p224_fiat64.go1429
-rw-r--r--src/crypto/elliptic/internal/fiat/p224_invert.go87
-rw-r--r--src/crypto/elliptic/internal/fiat/p384.go135
-rw-r--r--src/crypto/elliptic/internal/fiat/p384_fiat64.go3004
-rw-r--r--src/crypto/elliptic/internal/fiat/p384_invert.go102
-rw-r--r--src/crypto/elliptic/internal/fiat/p521.go135
-rw-r--r--src/crypto/elliptic/internal/fiat/p521_fiat64.go5509
-rw-r--r--src/crypto/elliptic/internal/fiat/p521_invert.go89
-rw-r--r--src/crypto/elliptic/internal/nistec/nistec_test.go94
-rw-r--r--src/crypto/elliptic/internal/nistec/p224.go293
-rw-r--r--src/crypto/elliptic/internal/nistec/p384.go298
-rw-r--r--src/crypto/elliptic/internal/nistec/p521.go310
-rw-r--r--src/crypto/elliptic/p224.go139
-rw-r--r--src/crypto/elliptic/p224_test.go325
-rw-r--r--src/crypto/elliptic/p256.go1195
-rw-r--r--src/crypto/elliptic/p256_asm.go544
-rw-r--r--src/crypto/elliptic/p256_asm_amd64.s2347
-rw-r--r--src/crypto/elliptic/p256_asm_arm64.s1529
-rw-r--r--src/crypto/elliptic/p256_asm_ppc64le.s2494
-rw-r--r--src/crypto/elliptic/p256_asm_s390x.s2714
-rw-r--r--src/crypto/elliptic/p256_asm_table.binbin0 -> 88064 bytes
-rw-r--r--src/crypto/elliptic/p256_asm_table_test.go64
-rw-r--r--src/crypto/elliptic/p256_generic.go14
-rw-r--r--src/crypto/elliptic/p256_ppc64le.go521
-rw-r--r--src/crypto/elliptic/p256_s390x.go576
-rw-r--r--src/crypto/elliptic/p256_test.go152
-rw-r--r--src/crypto/elliptic/p384.go144
-rw-r--r--src/crypto/elliptic/p521.go165
38 files changed, 25996 insertions, 0 deletions
diff --git a/src/crypto/elliptic/elliptic.go b/src/crypto/elliptic/elliptic.go
new file mode 100644
index 0000000..7ead09f
--- /dev/null
+++ b/src/crypto/elliptic/elliptic.go
@@ -0,0 +1,496 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package elliptic implements the standard NIST P-224, P-256, P-384, and P-521
+// elliptic curves over prime fields.
+package elliptic
+
+import (
+ "io"
+ "math/big"
+ "sync"
+)
+
+// A Curve represents a short-form Weierstrass curve with a=-3.
+//
+// The behavior of Add, Double, and ScalarMult when the input is not a point on
+// the curve is undefined.
+//
+// Note that the conventional point at infinity (0, 0) is not considered on the
+// curve, although it can be returned by Add, Double, ScalarMult, or
+// ScalarBaseMult (but not the Unmarshal or UnmarshalCompressed functions).
+type Curve interface {
+ // Params returns the parameters for the curve.
+ Params() *CurveParams
+ // IsOnCurve reports whether the given (x,y) lies on the curve.
+ IsOnCurve(x, y *big.Int) bool
+ // Add returns the sum of (x1,y1) and (x2,y2)
+ Add(x1, y1, x2, y2 *big.Int) (x, y *big.Int)
+ // Double returns 2*(x,y)
+ Double(x1, y1 *big.Int) (x, y *big.Int)
+ // ScalarMult returns k*(Bx,By) where k is a number in big-endian form.
+ ScalarMult(x1, y1 *big.Int, k []byte) (x, y *big.Int)
+ // ScalarBaseMult returns k*G, where G is the base point of the group
+ // and k is an integer in big-endian form.
+ ScalarBaseMult(k []byte) (x, y *big.Int)
+}
+
+func matchesSpecificCurve(params *CurveParams, available ...Curve) (Curve, bool) {
+ for _, c := range available {
+ if params == c.Params() {
+ return c, true
+ }
+ }
+ return nil, false
+}
+
+// CurveParams contains the parameters of an elliptic curve and also provides
+// a generic, non-constant time implementation of Curve.
+type CurveParams struct {
+ P *big.Int // the order of the underlying field
+ N *big.Int // the order of the base point
+ B *big.Int // the constant of the curve equation
+ Gx, Gy *big.Int // (x,y) of the base point
+ BitSize int // the size of the underlying field
+ Name string // the canonical name of the curve
+}
+
+func (curve *CurveParams) Params() *CurveParams {
+ return curve
+}
+
+// CurveParams operates, internally, on Jacobian coordinates. For a given
+// (x, y) position on the curve, the Jacobian coordinates are (x1, y1, z1)
+// where x = x1/z1² and y = y1/z1³. The greatest speedups come when the whole
+// calculation can be performed within the transform (as in ScalarMult and
+// ScalarBaseMult). But even for Add and Double, it's faster to apply and
+// reverse the transform than to operate in affine coordinates.
+
+// polynomial returns x³ - 3x + b.
+func (curve *CurveParams) polynomial(x *big.Int) *big.Int {
+ x3 := new(big.Int).Mul(x, x)
+ x3.Mul(x3, x)
+
+ threeX := new(big.Int).Lsh(x, 1)
+ threeX.Add(threeX, x)
+
+ x3.Sub(x3, threeX)
+ x3.Add(x3, curve.B)
+ x3.Mod(x3, curve.P)
+
+ return x3
+}
+
+func (curve *CurveParams) IsOnCurve(x, y *big.Int) bool {
+ // If there is a dedicated constant-time implementation for this curve operation,
+ // use that instead of the generic one.
+ if specific, ok := matchesSpecificCurve(curve, p224, p384, p521); ok {
+ return specific.IsOnCurve(x, y)
+ }
+
+ if x.Sign() < 0 || x.Cmp(curve.P) >= 0 ||
+ y.Sign() < 0 || y.Cmp(curve.P) >= 0 {
+ return false
+ }
+
+ // y² = x³ - 3x + b
+ y2 := new(big.Int).Mul(y, y)
+ y2.Mod(y2, curve.P)
+
+ return curve.polynomial(x).Cmp(y2) == 0
+}
+
+// zForAffine returns a Jacobian Z value for the affine point (x, y). If x and
+// y are zero, it assumes that they represent the point at infinity because (0,
+// 0) is not on the any of the curves handled here.
+func zForAffine(x, y *big.Int) *big.Int {
+ z := new(big.Int)
+ if x.Sign() != 0 || y.Sign() != 0 {
+ z.SetInt64(1)
+ }
+ return z
+}
+
+// affineFromJacobian reverses the Jacobian transform. See the comment at the
+// top of the file. If the point is ∞ it returns 0, 0.
+func (curve *CurveParams) affineFromJacobian(x, y, z *big.Int) (xOut, yOut *big.Int) {
+ if z.Sign() == 0 {
+ return new(big.Int), new(big.Int)
+ }
+
+ zinv := new(big.Int).ModInverse(z, curve.P)
+ zinvsq := new(big.Int).Mul(zinv, zinv)
+
+ xOut = new(big.Int).Mul(x, zinvsq)
+ xOut.Mod(xOut, curve.P)
+ zinvsq.Mul(zinvsq, zinv)
+ yOut = new(big.Int).Mul(y, zinvsq)
+ yOut.Mod(yOut, curve.P)
+ return
+}
+
+func (curve *CurveParams) Add(x1, y1, x2, y2 *big.Int) (*big.Int, *big.Int) {
+ // If there is a dedicated constant-time implementation for this curve operation,
+ // use that instead of the generic one.
+ if specific, ok := matchesSpecificCurve(curve, p224, p384, p521); ok {
+ return specific.Add(x1, y1, x2, y2)
+ }
+
+ z1 := zForAffine(x1, y1)
+ z2 := zForAffine(x2, y2)
+ return curve.affineFromJacobian(curve.addJacobian(x1, y1, z1, x2, y2, z2))
+}
+
+// addJacobian takes two points in Jacobian coordinates, (x1, y1, z1) and
+// (x2, y2, z2) and returns their sum, also in Jacobian form.
+func (curve *CurveParams) addJacobian(x1, y1, z1, x2, y2, z2 *big.Int) (*big.Int, *big.Int, *big.Int) {
+ // See https://hyperelliptic.org/EFD/g1p/auto-shortw-jacobian-3.html#addition-add-2007-bl
+ x3, y3, z3 := new(big.Int), new(big.Int), new(big.Int)
+ if z1.Sign() == 0 {
+ x3.Set(x2)
+ y3.Set(y2)
+ z3.Set(z2)
+ return x3, y3, z3
+ }
+ if z2.Sign() == 0 {
+ x3.Set(x1)
+ y3.Set(y1)
+ z3.Set(z1)
+ return x3, y3, z3
+ }
+
+ z1z1 := new(big.Int).Mul(z1, z1)
+ z1z1.Mod(z1z1, curve.P)
+ z2z2 := new(big.Int).Mul(z2, z2)
+ z2z2.Mod(z2z2, curve.P)
+
+ u1 := new(big.Int).Mul(x1, z2z2)
+ u1.Mod(u1, curve.P)
+ u2 := new(big.Int).Mul(x2, z1z1)
+ u2.Mod(u2, curve.P)
+ h := new(big.Int).Sub(u2, u1)
+ xEqual := h.Sign() == 0
+ if h.Sign() == -1 {
+ h.Add(h, curve.P)
+ }
+ i := new(big.Int).Lsh(h, 1)
+ i.Mul(i, i)
+ j := new(big.Int).Mul(h, i)
+
+ s1 := new(big.Int).Mul(y1, z2)
+ s1.Mul(s1, z2z2)
+ s1.Mod(s1, curve.P)
+ s2 := new(big.Int).Mul(y2, z1)
+ s2.Mul(s2, z1z1)
+ s2.Mod(s2, curve.P)
+ r := new(big.Int).Sub(s2, s1)
+ if r.Sign() == -1 {
+ r.Add(r, curve.P)
+ }
+ yEqual := r.Sign() == 0
+ if xEqual && yEqual {
+ return curve.doubleJacobian(x1, y1, z1)
+ }
+ r.Lsh(r, 1)
+ v := new(big.Int).Mul(u1, i)
+
+ x3.Set(r)
+ x3.Mul(x3, x3)
+ x3.Sub(x3, j)
+ x3.Sub(x3, v)
+ x3.Sub(x3, v)
+ x3.Mod(x3, curve.P)
+
+ y3.Set(r)
+ v.Sub(v, x3)
+ y3.Mul(y3, v)
+ s1.Mul(s1, j)
+ s1.Lsh(s1, 1)
+ y3.Sub(y3, s1)
+ y3.Mod(y3, curve.P)
+
+ z3.Add(z1, z2)
+ z3.Mul(z3, z3)
+ z3.Sub(z3, z1z1)
+ z3.Sub(z3, z2z2)
+ z3.Mul(z3, h)
+ z3.Mod(z3, curve.P)
+
+ return x3, y3, z3
+}
+
+func (curve *CurveParams) Double(x1, y1 *big.Int) (*big.Int, *big.Int) {
+ // If there is a dedicated constant-time implementation for this curve operation,
+ // use that instead of the generic one.
+ if specific, ok := matchesSpecificCurve(curve, p224, p384, p521); ok {
+ return specific.Double(x1, y1)
+ }
+
+ z1 := zForAffine(x1, y1)
+ return curve.affineFromJacobian(curve.doubleJacobian(x1, y1, z1))
+}
+
+// doubleJacobian takes a point in Jacobian coordinates, (x, y, z), and
+// returns its double, also in Jacobian form.
+func (curve *CurveParams) doubleJacobian(x, y, z *big.Int) (*big.Int, *big.Int, *big.Int) {
+ // See https://hyperelliptic.org/EFD/g1p/auto-shortw-jacobian-3.html#doubling-dbl-2001-b
+ delta := new(big.Int).Mul(z, z)
+ delta.Mod(delta, curve.P)
+ gamma := new(big.Int).Mul(y, y)
+ gamma.Mod(gamma, curve.P)
+ alpha := new(big.Int).Sub(x, delta)
+ if alpha.Sign() == -1 {
+ alpha.Add(alpha, curve.P)
+ }
+ alpha2 := new(big.Int).Add(x, delta)
+ alpha.Mul(alpha, alpha2)
+ alpha2.Set(alpha)
+ alpha.Lsh(alpha, 1)
+ alpha.Add(alpha, alpha2)
+
+ beta := alpha2.Mul(x, gamma)
+
+ x3 := new(big.Int).Mul(alpha, alpha)
+ beta8 := new(big.Int).Lsh(beta, 3)
+ beta8.Mod(beta8, curve.P)
+ x3.Sub(x3, beta8)
+ if x3.Sign() == -1 {
+ x3.Add(x3, curve.P)
+ }
+ x3.Mod(x3, curve.P)
+
+ z3 := new(big.Int).Add(y, z)
+ z3.Mul(z3, z3)
+ z3.Sub(z3, gamma)
+ if z3.Sign() == -1 {
+ z3.Add(z3, curve.P)
+ }
+ z3.Sub(z3, delta)
+ if z3.Sign() == -1 {
+ z3.Add(z3, curve.P)
+ }
+ z3.Mod(z3, curve.P)
+
+ beta.Lsh(beta, 2)
+ beta.Sub(beta, x3)
+ if beta.Sign() == -1 {
+ beta.Add(beta, curve.P)
+ }
+ y3 := alpha.Mul(alpha, beta)
+
+ gamma.Mul(gamma, gamma)
+ gamma.Lsh(gamma, 3)
+ gamma.Mod(gamma, curve.P)
+
+ y3.Sub(y3, gamma)
+ if y3.Sign() == -1 {
+ y3.Add(y3, curve.P)
+ }
+ y3.Mod(y3, curve.P)
+
+ return x3, y3, z3
+}
+
+func (curve *CurveParams) ScalarMult(Bx, By *big.Int, k []byte) (*big.Int, *big.Int) {
+ // If there is a dedicated constant-time implementation for this curve operation,
+ // use that instead of the generic one.
+ if specific, ok := matchesSpecificCurve(curve, p224, p256, p384, p521); ok {
+ return specific.ScalarMult(Bx, By, k)
+ }
+
+ Bz := new(big.Int).SetInt64(1)
+ x, y, z := new(big.Int), new(big.Int), new(big.Int)
+
+ for _, byte := range k {
+ for bitNum := 0; bitNum < 8; bitNum++ {
+ x, y, z = curve.doubleJacobian(x, y, z)
+ if byte&0x80 == 0x80 {
+ x, y, z = curve.addJacobian(Bx, By, Bz, x, y, z)
+ }
+ byte <<= 1
+ }
+ }
+
+ return curve.affineFromJacobian(x, y, z)
+}
+
+func (curve *CurveParams) ScalarBaseMult(k []byte) (*big.Int, *big.Int) {
+ // If there is a dedicated constant-time implementation for this curve operation,
+ // use that instead of the generic one.
+ if specific, ok := matchesSpecificCurve(curve, p224, p256, p384, p521); ok {
+ return specific.ScalarBaseMult(k)
+ }
+
+ return curve.ScalarMult(curve.Gx, curve.Gy, k)
+}
+
+var mask = []byte{0xff, 0x1, 0x3, 0x7, 0xf, 0x1f, 0x3f, 0x7f}
+
+// GenerateKey returns a public/private key pair. The private key is
+// generated using the given reader, which must return random data.
+func GenerateKey(curve Curve, rand io.Reader) (priv []byte, x, y *big.Int, err error) {
+ N := curve.Params().N
+ bitSize := N.BitLen()
+ byteLen := (bitSize + 7) / 8
+ priv = make([]byte, byteLen)
+
+ for x == nil {
+ _, err = io.ReadFull(rand, priv)
+ if err != nil {
+ return
+ }
+ // We have to mask off any excess bits in the case that the size of the
+ // underlying field is not a whole number of bytes.
+ priv[0] &= mask[bitSize%8]
+ // This is because, in tests, rand will return all zeros and we don't
+ // want to get the point at infinity and loop forever.
+ priv[1] ^= 0x42
+
+ // If the scalar is out of range, sample another random number.
+ if new(big.Int).SetBytes(priv).Cmp(N) >= 0 {
+ continue
+ }
+
+ x, y = curve.ScalarBaseMult(priv)
+ }
+ return
+}
+
+// Marshal converts a point on the curve into the uncompressed form specified in
+// SEC 1, Version 2.0, Section 2.3.3. If the point is not on the curve (or is
+// the conventional point at infinity), the behavior is undefined.
+func Marshal(curve Curve, x, y *big.Int) []byte {
+ byteLen := (curve.Params().BitSize + 7) / 8
+
+ ret := make([]byte, 1+2*byteLen)
+ ret[0] = 4 // uncompressed point
+
+ x.FillBytes(ret[1 : 1+byteLen])
+ y.FillBytes(ret[1+byteLen : 1+2*byteLen])
+
+ return ret
+}
+
+// MarshalCompressed converts a point on the curve into the compressed form
+// specified in SEC 1, Version 2.0, Section 2.3.3. If the point is not on the
+// curve (or is the conventional point at infinity), the behavior is undefined.
+func MarshalCompressed(curve Curve, x, y *big.Int) []byte {
+ byteLen := (curve.Params().BitSize + 7) / 8
+ compressed := make([]byte, 1+byteLen)
+ compressed[0] = byte(y.Bit(0)) | 2
+ x.FillBytes(compressed[1:])
+ return compressed
+}
+
+// Unmarshal converts a point, serialized by Marshal, into an x, y pair. It is
+// an error if the point is not in uncompressed form, is not on the curve, or is
+// the point at infinity. On error, x = nil.
+func Unmarshal(curve Curve, data []byte) (x, y *big.Int) {
+ byteLen := (curve.Params().BitSize + 7) / 8
+ if len(data) != 1+2*byteLen {
+ return nil, nil
+ }
+ if data[0] != 4 { // uncompressed form
+ return nil, nil
+ }
+ p := curve.Params().P
+ x = new(big.Int).SetBytes(data[1 : 1+byteLen])
+ y = new(big.Int).SetBytes(data[1+byteLen:])
+ if x.Cmp(p) >= 0 || y.Cmp(p) >= 0 {
+ return nil, nil
+ }
+ if !curve.IsOnCurve(x, y) {
+ return nil, nil
+ }
+ return
+}
+
+// UnmarshalCompressed converts a point, serialized by MarshalCompressed, into
+// an x, y pair. It is an error if the point is not in compressed form, is not
+// on the curve, or is the point at infinity. On error, x = nil.
+func UnmarshalCompressed(curve Curve, data []byte) (x, y *big.Int) {
+ byteLen := (curve.Params().BitSize + 7) / 8
+ if len(data) != 1+byteLen {
+ return nil, nil
+ }
+ if data[0] != 2 && data[0] != 3 { // compressed form
+ return nil, nil
+ }
+ p := curve.Params().P
+ x = new(big.Int).SetBytes(data[1:])
+ if x.Cmp(p) >= 0 {
+ return nil, nil
+ }
+ // y² = x³ - 3x + b
+ y = curve.Params().polynomial(x)
+ y = y.ModSqrt(y, p)
+ if y == nil {
+ return nil, nil
+ }
+ if byte(y.Bit(0)) != data[0]&1 {
+ y.Neg(y).Mod(y, p)
+ }
+ if !curve.IsOnCurve(x, y) {
+ return nil, nil
+ }
+ return
+}
+
+var initonce sync.Once
+
+func initAll() {
+ initP224()
+ initP256()
+ initP384()
+ initP521()
+}
+
+// P224 returns a Curve which implements NIST P-224 (FIPS 186-3, section D.2.2),
+// also known as secp224r1. The CurveParams.Name of this Curve is "P-224".
+//
+// Multiple invocations of this function will return the same value, so it can
+// be used for equality checks and switch statements.
+//
+// The cryptographic operations are implemented using constant-time algorithms.
+func P224() Curve {
+ initonce.Do(initAll)
+ return p224
+}
+
+// P256 returns a Curve which implements NIST P-256 (FIPS 186-3, section D.2.3),
+// also known as secp256r1 or prime256v1. The CurveParams.Name of this Curve is
+// "P-256".
+//
+// Multiple invocations of this function will return the same value, so it can
+// be used for equality checks and switch statements.
+//
+// ScalarMult and ScalarBaseMult are implemented using constant-time algorithms.
+func P256() Curve {
+ initonce.Do(initAll)
+ return p256
+}
+
+// P384 returns a Curve which implements NIST P-384 (FIPS 186-3, section D.2.4),
+// also known as secp384r1. The CurveParams.Name of this Curve is "P-384".
+//
+// Multiple invocations of this function will return the same value, so it can
+// be used for equality checks and switch statements.
+//
+// The cryptographic operations are implemented using constant-time algorithms.
+func P384() Curve {
+ initonce.Do(initAll)
+ return p384
+}
+
+// P521 returns a Curve which implements NIST P-521 (FIPS 186-3, section D.2.5),
+// also known as secp521r1. The CurveParams.Name of this Curve is "P-521".
+//
+// Multiple invocations of this function will return the same value, so it can
+// be used for equality checks and switch statements.
+//
+// The cryptographic operations are implemented using constant-time algorithms.
+func P521() Curve {
+ initonce.Do(initAll)
+ return p521
+}
diff --git a/src/crypto/elliptic/elliptic_test.go b/src/crypto/elliptic/elliptic_test.go
new file mode 100644
index 0000000..5481929
--- /dev/null
+++ b/src/crypto/elliptic/elliptic_test.go
@@ -0,0 +1,375 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package elliptic
+
+import (
+ "bytes"
+ "crypto/rand"
+ "encoding/hex"
+ "math/big"
+ "testing"
+)
+
+// genericParamsForCurve returns the dereferenced CurveParams for
+// the specified curve. This is used to avoid the logic for
+// upgrading a curve to its specific implementation, forcing
+// usage of the generic implementation.
+func genericParamsForCurve(c Curve) *CurveParams {
+ d := *(c.Params())
+ return &d
+}
+
+func testAllCurves(t *testing.T, f func(*testing.T, Curve)) {
+ tests := []struct {
+ name string
+ curve Curve
+ }{
+ {"P256", P256()},
+ {"P256/Params", genericParamsForCurve(P256())},
+ {"P224", P224()},
+ {"P224/Params", genericParamsForCurve(P224())},
+ {"P384", P384()},
+ {"P384/Params", genericParamsForCurve(P384())},
+ {"P521", P521()},
+ {"P521/Params", genericParamsForCurve(P521())},
+ }
+ if testing.Short() {
+ tests = tests[:1]
+ }
+ for _, test := range tests {
+ curve := test.curve
+ t.Run(test.name, func(t *testing.T) {
+ t.Parallel()
+ f(t, curve)
+ })
+ }
+}
+
+func TestOnCurve(t *testing.T) {
+ testAllCurves(t, func(t *testing.T, curve Curve) {
+ if !curve.IsOnCurve(curve.Params().Gx, curve.Params().Gy) {
+ t.Error("basepoint is not on the curve")
+ }
+ })
+}
+
+func TestOffCurve(t *testing.T) {
+ testAllCurves(t, func(t *testing.T, curve Curve) {
+ x, y := new(big.Int).SetInt64(1), new(big.Int).SetInt64(1)
+ if curve.IsOnCurve(x, y) {
+ t.Errorf("point off curve is claimed to be on the curve")
+ }
+ b := Marshal(curve, x, y)
+ x1, y1 := Unmarshal(curve, b)
+ if x1 != nil || y1 != nil {
+ t.Errorf("unmarshaling a point not on the curve succeeded")
+ }
+ })
+}
+
+func TestInfinity(t *testing.T) {
+ testAllCurves(t, testInfinity)
+}
+
+func testInfinity(t *testing.T, curve Curve) {
+ _, x, y, _ := GenerateKey(curve, rand.Reader)
+ x, y = curve.ScalarMult(x, y, curve.Params().N.Bytes())
+ if x.Sign() != 0 || y.Sign() != 0 {
+ t.Errorf("x^q != ∞")
+ }
+
+ x, y = curve.ScalarBaseMult([]byte{0})
+ if x.Sign() != 0 || y.Sign() != 0 {
+ t.Errorf("b^0 != ∞")
+ x.SetInt64(0)
+ y.SetInt64(0)
+ }
+
+ x2, y2 := curve.Double(x, y)
+ if x2.Sign() != 0 || y2.Sign() != 0 {
+ t.Errorf("2∞ != ∞")
+ }
+
+ baseX := curve.Params().Gx
+ baseY := curve.Params().Gy
+
+ x3, y3 := curve.Add(baseX, baseY, x, y)
+ if x3.Cmp(baseX) != 0 || y3.Cmp(baseY) != 0 {
+ t.Errorf("x+∞ != x")
+ }
+
+ x4, y4 := curve.Add(x, y, baseX, baseY)
+ if x4.Cmp(baseX) != 0 || y4.Cmp(baseY) != 0 {
+ t.Errorf("∞+x != x")
+ }
+
+ if curve.IsOnCurve(x, y) {
+ t.Errorf("IsOnCurve(∞) == true")
+ }
+
+ if xx, yy := Unmarshal(curve, Marshal(curve, x, y)); xx != nil || yy != nil {
+ t.Errorf("Unmarshal(Marshal(∞)) did not return an error")
+ }
+ // We don't test UnmarshalCompressed(MarshalCompressed(∞)) because there are
+ // two valid points with x = 0.
+ if xx, yy := Unmarshal(curve, []byte{0x00}); xx != nil || yy != nil {
+ t.Errorf("Unmarshal(∞) did not return an error")
+ }
+}
+
+func TestMarshal(t *testing.T) {
+ testAllCurves(t, func(t *testing.T, curve Curve) {
+ _, x, y, err := GenerateKey(curve, rand.Reader)
+ if err != nil {
+ t.Fatal(err)
+ }
+ serialized := Marshal(curve, x, y)
+ xx, yy := Unmarshal(curve, serialized)
+ if xx == nil {
+ t.Fatal("failed to unmarshal")
+ }
+ if xx.Cmp(x) != 0 || yy.Cmp(y) != 0 {
+ t.Fatal("unmarshal returned different values")
+ }
+ })
+}
+
+func TestUnmarshalToLargeCoordinates(t *testing.T) {
+ // See https://golang.org/issues/20482.
+ testAllCurves(t, testUnmarshalToLargeCoordinates)
+}
+
+func testUnmarshalToLargeCoordinates(t *testing.T, curve Curve) {
+ p := curve.Params().P
+ byteLen := (p.BitLen() + 7) / 8
+
+ // Set x to be greater than curve's parameter P – specifically, to P+5.
+ // Set y to mod_sqrt(x^3 - 3x + B)) so that (x mod P = 5 , y) is on the
+ // curve.
+ x := new(big.Int).Add(p, big.NewInt(5))
+ y := curve.Params().polynomial(x)
+ y.ModSqrt(y, p)
+
+ invalid := make([]byte, byteLen*2+1)
+ invalid[0] = 4 // uncompressed encoding
+ x.FillBytes(invalid[1 : 1+byteLen])
+ y.FillBytes(invalid[1+byteLen:])
+
+ if X, Y := Unmarshal(curve, invalid); X != nil || Y != nil {
+ t.Errorf("Unmarshal accepts invalid X coordinate")
+ }
+
+ if curve == p256 {
+ // This is a point on the curve with a small y value, small enough that
+ // we can add p and still be within 32 bytes.
+ x, _ = new(big.Int).SetString("31931927535157963707678568152204072984517581467226068221761862915403492091210", 10)
+ y, _ = new(big.Int).SetString("5208467867388784005506817585327037698770365050895731383201516607147", 10)
+ y.Add(y, p)
+
+ if p.Cmp(y) > 0 || y.BitLen() != 256 {
+ t.Fatal("y not within expected range")
+ }
+
+ // marshal
+ x.FillBytes(invalid[1 : 1+byteLen])
+ y.FillBytes(invalid[1+byteLen:])
+
+ if X, Y := Unmarshal(curve, invalid); X != nil || Y != nil {
+ t.Errorf("Unmarshal accepts invalid Y coordinate")
+ }
+ }
+}
+
+// TestInvalidCoordinates tests big.Int values that are not valid field elements
+// (negative or bigger than P). They are expected to return false from
+// IsOnCurve, all other behavior is undefined.
+func TestInvalidCoordinates(t *testing.T) {
+ testAllCurves(t, testInvalidCoordinates)
+}
+
+func testInvalidCoordinates(t *testing.T, curve Curve) {
+ checkIsOnCurveFalse := func(name string, x, y *big.Int) {
+ if curve.IsOnCurve(x, y) {
+ t.Errorf("IsOnCurve(%s) unexpectedly returned true", name)
+ }
+ }
+
+ p := curve.Params().P
+ _, x, y, _ := GenerateKey(curve, rand.Reader)
+ xx, yy := new(big.Int), new(big.Int)
+
+ // Check if the sign is getting dropped.
+ xx.Neg(x)
+ checkIsOnCurveFalse("-x, y", xx, y)
+ yy.Neg(y)
+ checkIsOnCurveFalse("x, -y", x, yy)
+
+ // Check if negative values are reduced modulo P.
+ xx.Sub(x, p)
+ checkIsOnCurveFalse("x-P, y", xx, y)
+ yy.Sub(y, p)
+ checkIsOnCurveFalse("x, y-P", x, yy)
+
+ // Check if positive values are reduced modulo P.
+ xx.Add(x, p)
+ checkIsOnCurveFalse("x+P, y", xx, y)
+ yy.Add(y, p)
+ checkIsOnCurveFalse("x, y+P", x, yy)
+
+ // Check if the overflow is dropped.
+ xx.Add(x, new(big.Int).Lsh(big.NewInt(1), 535))
+ checkIsOnCurveFalse("x+2⁵³⁵, y", xx, y)
+ yy.Add(y, new(big.Int).Lsh(big.NewInt(1), 535))
+ checkIsOnCurveFalse("x, y+2⁵³⁵", x, yy)
+
+ // Check if P is treated like zero (if possible).
+ // y^2 = x^3 - 3x + B
+ // y = mod_sqrt(x^3 - 3x + B)
+ // y = mod_sqrt(B) if x = 0
+ // If there is no modsqrt, there is no point with x = 0, can't test x = P.
+ if yy := new(big.Int).ModSqrt(curve.Params().B, p); yy != nil {
+ if !curve.IsOnCurve(big.NewInt(0), yy) {
+ t.Fatal("(0, mod_sqrt(B)) is not on the curve?")
+ }
+ checkIsOnCurveFalse("P, y", p, yy)
+ }
+}
+
+func TestMarshalCompressed(t *testing.T) {
+ t.Run("P-256/03", func(t *testing.T) {
+ data, _ := hex.DecodeString("031e3987d9f9ea9d7dd7155a56a86b2009e1e0ab332f962d10d8beb6406ab1ad79")
+ x, _ := new(big.Int).SetString("13671033352574878777044637384712060483119675368076128232297328793087057702265", 10)
+ y, _ := new(big.Int).SetString("66200849279091436748794323380043701364391950689352563629885086590854940586447", 10)
+ testMarshalCompressed(t, P256(), x, y, data)
+ })
+ t.Run("P-256/02", func(t *testing.T) {
+ data, _ := hex.DecodeString("021e3987d9f9ea9d7dd7155a56a86b2009e1e0ab332f962d10d8beb6406ab1ad79")
+ x, _ := new(big.Int).SetString("13671033352574878777044637384712060483119675368076128232297328793087057702265", 10)
+ y, _ := new(big.Int).SetString("49591239931264812013903123569363872165694192725937750565648544718012157267504", 10)
+ testMarshalCompressed(t, P256(), x, y, data)
+ })
+
+ t.Run("Invalid", func(t *testing.T) {
+ data, _ := hex.DecodeString("02fd4bf61763b46581fd9174d623516cf3c81edd40e29ffa2777fb6cb0ae3ce535")
+ X, Y := UnmarshalCompressed(P256(), data)
+ if X != nil || Y != nil {
+ t.Error("expected an error for invalid encoding")
+ }
+ })
+
+ if testing.Short() {
+ t.Skip("skipping other curves on short test")
+ }
+
+ testAllCurves(t, func(t *testing.T, curve Curve) {
+ _, x, y, err := GenerateKey(curve, rand.Reader)
+ if err != nil {
+ t.Fatal(err)
+ }
+ testMarshalCompressed(t, curve, x, y, nil)
+ })
+
+}
+
+func testMarshalCompressed(t *testing.T, curve Curve, x, y *big.Int, want []byte) {
+ if !curve.IsOnCurve(x, y) {
+ t.Fatal("invalid test point")
+ }
+ got := MarshalCompressed(curve, x, y)
+ if want != nil && !bytes.Equal(got, want) {
+ t.Errorf("got unexpected MarshalCompressed result: got %x, want %x", got, want)
+ }
+
+ X, Y := UnmarshalCompressed(curve, got)
+ if X == nil || Y == nil {
+ t.Fatalf("UnmarshalCompressed failed unexpectedly")
+ }
+
+ if !curve.IsOnCurve(X, Y) {
+ t.Error("UnmarshalCompressed returned a point not on the curve")
+ }
+ if X.Cmp(x) != 0 || Y.Cmp(y) != 0 {
+ t.Errorf("point did not round-trip correctly: got (%v, %v), want (%v, %v)", X, Y, x, y)
+ }
+}
+
+func TestLargeIsOnCurve(t *testing.T) {
+ testAllCurves(t, func(t *testing.T, curve Curve) {
+ large := big.NewInt(1)
+ large.Lsh(large, 1000)
+ if curve.IsOnCurve(large, large) {
+ t.Errorf("(2^1000, 2^1000) is reported on the curve")
+ }
+ })
+}
+
+func benchmarkAllCurves(t *testing.B, f func(*testing.B, Curve)) {
+ tests := []struct {
+ name string
+ curve Curve
+ }{
+ {"P256", P256()},
+ {"P224", P224()},
+ {"P384", P384()},
+ {"P521", P521()},
+ }
+ for _, test := range tests {
+ curve := test.curve
+ t.Run(test.name, func(t *testing.B) {
+ f(t, curve)
+ })
+ }
+}
+
+func BenchmarkScalarBaseMult(b *testing.B) {
+ benchmarkAllCurves(b, func(b *testing.B, curve Curve) {
+ priv, _, _, _ := GenerateKey(curve, rand.Reader)
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ x, _ := curve.ScalarBaseMult(priv)
+ // Prevent the compiler from optimizing out the operation.
+ priv[0] ^= byte(x.Bits()[0])
+ }
+ })
+}
+
+func BenchmarkScalarMult(b *testing.B) {
+ benchmarkAllCurves(b, func(b *testing.B, curve Curve) {
+ _, x, y, _ := GenerateKey(curve, rand.Reader)
+ priv, _, _, _ := GenerateKey(curve, rand.Reader)
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ x, y = curve.ScalarMult(x, y, priv)
+ }
+ })
+}
+
+func BenchmarkMarshalUnmarshal(b *testing.B) {
+ benchmarkAllCurves(b, func(b *testing.B, curve Curve) {
+ _, x, y, _ := GenerateKey(curve, rand.Reader)
+ b.Run("Uncompressed", func(b *testing.B) {
+ b.ReportAllocs()
+ for i := 0; i < b.N; i++ {
+ buf := Marshal(curve, x, y)
+ xx, yy := Unmarshal(curve, buf)
+ if xx.Cmp(x) != 0 || yy.Cmp(y) != 0 {
+ b.Error("Unmarshal output different from Marshal input")
+ }
+ }
+ })
+ b.Run("Compressed", func(b *testing.B) {
+ b.ReportAllocs()
+ for i := 0; i < b.N; i++ {
+ buf := Marshal(curve, x, y)
+ xx, yy := Unmarshal(curve, buf)
+ if xx.Cmp(x) != 0 || yy.Cmp(y) != 0 {
+ b.Error("Unmarshal output different from Marshal input")
+ }
+ }
+ })
+ })
+}
diff --git a/src/crypto/elliptic/export_generate.go b/src/crypto/elliptic/export_generate.go
new file mode 100644
index 0000000..f15b302
--- /dev/null
+++ b/src/crypto/elliptic/export_generate.go
@@ -0,0 +1,16 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build tablegen
+
+package elliptic
+
+// This block exports p256-related internals for the p256 table generator in internal/gen.
+var (
+ P256PointDoubleAsm = p256PointDoubleAsm
+ P256PointAddAsm = p256PointAddAsm
+ P256Inverse = p256Inverse
+ P256Sqr = p256Sqr
+ P256Mul = p256Mul
+)
diff --git a/src/crypto/elliptic/fuzz_test.go b/src/crypto/elliptic/fuzz_test.go
new file mode 100644
index 0000000..2b5ddae
--- /dev/null
+++ b/src/crypto/elliptic/fuzz_test.go
@@ -0,0 +1,53 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build amd64 || arm64 || ppc64le
+
+package elliptic
+
+import (
+ "crypto/rand"
+ "testing"
+ "time"
+)
+
+func TestFuzz(t *testing.T) {
+ p256 := P256()
+ p256Generic := p256.Params()
+
+ var scalar1 [32]byte
+ var scalar2 [32]byte
+ var timeout *time.Timer
+
+ if testing.Short() {
+ timeout = time.NewTimer(10 * time.Millisecond)
+ } else {
+ timeout = time.NewTimer(2 * time.Second)
+ }
+
+ for {
+ select {
+ case <-timeout.C:
+ return
+ default:
+ }
+
+ rand.Read(scalar1[:])
+ rand.Read(scalar2[:])
+
+ x, y := p256.ScalarBaseMult(scalar1[:])
+ x2, y2 := p256Generic.ScalarBaseMult(scalar1[:])
+
+ xx, yy := p256.ScalarMult(x, y, scalar2[:])
+ xx2, yy2 := p256Generic.ScalarMult(x2, y2, scalar2[:])
+
+ if x.Cmp(x2) != 0 || y.Cmp(y2) != 0 {
+ t.Fatalf("ScalarBaseMult does not match reference result with scalar: %x, please report this error to security@golang.org", scalar1)
+ }
+
+ if xx.Cmp(xx2) != 0 || yy.Cmp(yy2) != 0 {
+ t.Fatalf("ScalarMult does not match reference result with scalars: %x and %x, please report this error to security@golang.org", scalar1, scalar2)
+ }
+ }
+}
diff --git a/src/crypto/elliptic/gen_p256_table.go b/src/crypto/elliptic/gen_p256_table.go
new file mode 100644
index 0000000..0ebbc66
--- /dev/null
+++ b/src/crypto/elliptic/gen_p256_table.go
@@ -0,0 +1,73 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build ignore
+
+package main
+
+import (
+ "crypto/elliptic"
+ "encoding/binary"
+ "log"
+ "os"
+)
+
+func main() {
+ // Generate precomputed p256 tables.
+ var pre [43][32 * 8]uint64
+ basePoint := []uint64{
+ 0x79e730d418a9143c, 0x75ba95fc5fedb601, 0x79fb732b77622510, 0x18905f76a53755c6,
+ 0xddf25357ce95560a, 0x8b4ab8e4ba19e45c, 0xd2e88688dd21f325, 0x8571ff1825885d85,
+ 0x0000000000000001, 0xffffffff00000000, 0xffffffffffffffff, 0x00000000fffffffe,
+ }
+ t1 := make([]uint64, 12)
+ t2 := make([]uint64, 12)
+ copy(t2, basePoint)
+ zInv := make([]uint64, 4)
+ zInvSq := make([]uint64, 4)
+ for j := 0; j < 32; j++ {
+ copy(t1, t2)
+ for i := 0; i < 43; i++ {
+ // The window size is 6 so we need to double 6 times.
+ if i != 0 {
+ for k := 0; k < 6; k++ {
+ elliptic.P256PointDoubleAsm(t1, t1)
+ }
+ }
+ // Convert the point to affine form. (Its values are
+ // still in Montgomery form however.)
+ elliptic.P256Inverse(zInv, t1[8:12])
+ elliptic.P256Sqr(zInvSq, zInv, 1)
+ elliptic.P256Mul(zInv, zInv, zInvSq)
+ elliptic.P256Mul(t1[:4], t1[:4], zInvSq)
+ elliptic.P256Mul(t1[4:8], t1[4:8], zInv)
+ copy(t1[8:12], basePoint[8:12])
+ // Update the table entry
+ copy(pre[i][j*8:], t1[:8])
+ }
+ if j == 0 {
+ elliptic.P256PointDoubleAsm(t2, basePoint)
+ } else {
+ elliptic.P256PointAddAsm(t2, t2, basePoint)
+ }
+ }
+
+ var bin []byte
+
+ // Dump the precomputed tables, flattened, little-endian.
+ // These tables are used directly by assembly on little-endian platforms.
+ // go:embedding the data into a string lets it be stored readonly.
+ for i := range &pre {
+ for _, v := range &pre[i] {
+ var u8 [8]byte
+ binary.LittleEndian.PutUint64(u8[:], v)
+ bin = append(bin, u8[:]...)
+ }
+ }
+
+ err := os.WriteFile("p256_asm_table.bin", bin, 0644)
+ if err != nil {
+ log.Fatal(err)
+ }
+}
diff --git a/src/crypto/elliptic/internal/fiat/Dockerfile b/src/crypto/elliptic/internal/fiat/Dockerfile
new file mode 100644
index 0000000..2877e0b
--- /dev/null
+++ b/src/crypto/elliptic/internal/fiat/Dockerfile
@@ -0,0 +1,12 @@
+# Copyright 2021 The Go Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style
+# license that can be found in the LICENSE file.
+
+FROM coqorg/coq:8.13.2
+
+RUN git clone https://github.com/mit-plv/fiat-crypto && cd fiat-crypto && \
+ git checkout 23d2dbc4ab897d14bde4404f70cd6991635f9c01 && \
+ git submodule update --init --recursive
+RUN cd fiat-crypto && eval $(opam env) && make -j4 standalone-ocaml SKIP_BEDROCK2=1
+
+ENV PATH /home/coq/fiat-crypto/src/ExtractionOCaml:$PATH
diff --git a/src/crypto/elliptic/internal/fiat/README b/src/crypto/elliptic/internal/fiat/README
new file mode 100644
index 0000000..916ebc1
--- /dev/null
+++ b/src/crypto/elliptic/internal/fiat/README
@@ -0,0 +1,34 @@
+The code in this package was autogenerated by the fiat-crypto project
+at version v0.0.9 from a formally verified model, and by the addchain
+project at a recent tip version.
+
+ docker build -t fiat-crypto:v0.0.9 .
+ go install github.com/mmcloughlin/addchain/cmd/addchain@v0.3.1-0.20211027081849-6a7d3decbe08
+ ../../../../../bin/go run generate.go
+
+fiat-crypto code comes under the following license.
+
+ Copyright (c) 2015-2020 The fiat-crypto Authors. All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are
+ met:
+
+ 1. Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+
+ THIS SOFTWARE IS PROVIDED BY the fiat-crypto authors "AS IS"
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL Berkeley Software Design,
+ Inc. BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+ LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+ NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+The authors are listed at
+
+ https://github.com/mit-plv/fiat-crypto/blob/master/AUTHORS
diff --git a/src/crypto/elliptic/internal/fiat/fiat_test.go b/src/crypto/elliptic/internal/fiat/fiat_test.go
new file mode 100644
index 0000000..9ecd863
--- /dev/null
+++ b/src/crypto/elliptic/internal/fiat/fiat_test.go
@@ -0,0 +1,64 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package fiat_test
+
+import (
+ "crypto/elliptic/internal/fiat"
+ "testing"
+)
+
+func BenchmarkMul(b *testing.B) {
+ b.Run("P224", func(b *testing.B) {
+ v := new(fiat.P224Element).One()
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ v.Mul(v, v)
+ }
+ })
+ b.Run("P384", func(b *testing.B) {
+ v := new(fiat.P384Element).One()
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ v.Mul(v, v)
+ }
+ })
+ b.Run("P521", func(b *testing.B) {
+ v := new(fiat.P521Element).One()
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ v.Mul(v, v)
+ }
+ })
+}
+
+func BenchmarkSquare(b *testing.B) {
+ b.Run("P224", func(b *testing.B) {
+ v := new(fiat.P224Element).One()
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ v.Square(v)
+ }
+ })
+ b.Run("P384", func(b *testing.B) {
+ v := new(fiat.P384Element).One()
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ v.Square(v)
+ }
+ })
+ b.Run("P521", func(b *testing.B) {
+ v := new(fiat.P521Element).One()
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ v.Square(v)
+ }
+ })
+}
diff --git a/src/crypto/elliptic/internal/fiat/generate.go b/src/crypto/elliptic/internal/fiat/generate.go
new file mode 100644
index 0000000..fd8509d
--- /dev/null
+++ b/src/crypto/elliptic/internal/fiat/generate.go
@@ -0,0 +1,330 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build ignore
+
+package main
+
+import (
+ "bytes"
+ "go/format"
+ "io"
+ "log"
+ "os"
+ "os/exec"
+ "text/template"
+)
+
+var curves = []struct {
+ Element string
+ Prime string
+ Prefix string
+ FiatType string
+ BytesLen int
+}{
+ {
+ Element: "P224Element",
+ Prime: "2^224 - 2^96 + 1",
+ Prefix: "p224",
+ FiatType: "[4]uint64",
+ BytesLen: 28,
+ },
+ // The 32-bit pure Go P-256 in crypto/elliptic is still faster than the
+ // autogenerated code here, regrettably.
+ // {
+ // Element: "P256Element",
+ // Prime: "2^256 - 2^224 + 2^192 + 2^96 - 1",
+ // Prefix: "p256",
+ // FiatType: "[4]uint64",
+ // BytesLen: 32,
+ // },
+ {
+ Element: "P384Element",
+ Prime: "2^384 - 2^128 - 2^96 + 2^32 - 1",
+ Prefix: "p384",
+ FiatType: "[6]uint64",
+ BytesLen: 48,
+ },
+ // Note that unsaturated_solinas would be about 2x faster than
+ // word_by_word_montgomery for P-521, but this curve is used rarely enough
+ // that it's not worth carrying unsaturated_solinas support for it.
+ {
+ Element: "P521Element",
+ Prime: "2^521 - 1",
+ Prefix: "p521",
+ FiatType: "[9]uint64",
+ BytesLen: 66,
+ },
+}
+
+func main() {
+ t := template.Must(template.New("montgomery").Parse(tmplWrapper))
+
+ tmplAddchainFile, err := os.CreateTemp("", "addchain-template")
+ if err != nil {
+ log.Fatal(err)
+ }
+ defer os.Remove(tmplAddchainFile.Name())
+ if _, err := io.WriteString(tmplAddchainFile, tmplAddchain); err != nil {
+ log.Fatal(err)
+ }
+ if err := tmplAddchainFile.Close(); err != nil {
+ log.Fatal(err)
+ }
+
+ for _, c := range curves {
+ log.Printf("Generating %s.go...", c.Prefix)
+ f, err := os.Create(c.Prefix + ".go")
+ if err != nil {
+ log.Fatal(err)
+ }
+ if err := t.Execute(f, c); err != nil {
+ log.Fatal(err)
+ }
+ if err := f.Close(); err != nil {
+ log.Fatal(err)
+ }
+
+ log.Printf("Generating %s_fiat64.go...", c.Prefix)
+ cmd := exec.Command("docker", "run", "--rm", "--entrypoint", "word_by_word_montgomery",
+ "fiat-crypto:v0.0.9", "--lang", "Go", "--no-wide-int", "--cmovznz-by-mul",
+ "--relax-primitive-carry-to-bitwidth", "32,64", "--internal-static",
+ "--public-function-case", "camelCase", "--public-type-case", "camelCase",
+ "--private-function-case", "camelCase", "--private-type-case", "camelCase",
+ "--doc-text-before-function-name", "", "--doc-newline-before-package-declaration",
+ "--doc-prepend-header", "Code generated by Fiat Cryptography. DO NOT EDIT.",
+ "--package-name", "fiat", "--no-prefix-fiat", c.Prefix, "64", c.Prime,
+ "mul", "square", "add", "sub", "one", "from_montgomery", "to_montgomery",
+ "selectznz", "to_bytes", "from_bytes")
+ cmd.Stderr = os.Stderr
+ out, err := cmd.Output()
+ if err != nil {
+ log.Fatal(err)
+ }
+ out, err = format.Source(out)
+ if err != nil {
+ log.Fatal(err)
+ }
+ if err := os.WriteFile(c.Prefix+"_fiat64.go", out, 0644); err != nil {
+ log.Fatal(err)
+ }
+
+ log.Printf("Generating %s_invert.go...", c.Prefix)
+ f, err = os.CreateTemp("", "addchain-"+c.Prefix)
+ if err != nil {
+ log.Fatal(err)
+ }
+ defer os.Remove(f.Name())
+ cmd = exec.Command("addchain", "search", c.Prime+" - 2")
+ cmd.Stderr = os.Stderr
+ cmd.Stdout = f
+ if err := cmd.Run(); err != nil {
+ log.Fatal(err)
+ }
+ if err := f.Close(); err != nil {
+ log.Fatal(err)
+ }
+ cmd = exec.Command("addchain", "gen", "-tmpl", tmplAddchainFile.Name(), f.Name())
+ cmd.Stderr = os.Stderr
+ out, err = cmd.Output()
+ if err != nil {
+ log.Fatal(err)
+ }
+ out = bytes.Replace(out, []byte("Element"), []byte(c.Element), -1)
+ out, err = format.Source(out)
+ if err != nil {
+ log.Fatal(err)
+ }
+ if err := os.WriteFile(c.Prefix+"_invert.go", out, 0644); err != nil {
+ log.Fatal(err)
+ }
+ }
+}
+
+const tmplWrapper = `// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Code generated by generate.go. DO NOT EDIT.
+
+package fiat
+
+import (
+ "crypto/subtle"
+ "errors"
+)
+
+// {{ .Element }} is an integer modulo {{ .Prime }}.
+//
+// The zero value is a valid zero element.
+type {{ .Element }} struct {
+ // Values are represented internally always in the Montgomery domain, and
+ // converted in Bytes and SetBytes.
+ x {{ .Prefix }}MontgomeryDomainFieldElement
+}
+
+const {{ .Prefix }}ElementLen = {{ .BytesLen }}
+
+type {{ .Prefix }}UntypedFieldElement = {{ .FiatType }}
+
+// One sets e = 1, and returns e.
+func (e *{{ .Element }}) One() *{{ .Element }} {
+ {{ .Prefix }}SetOne(&e.x)
+ return e
+}
+
+// Equal returns 1 if e == t, and zero otherwise.
+func (e *{{ .Element }}) Equal(t *{{ .Element }}) int {
+ eBytes := e.Bytes()
+ tBytes := t.Bytes()
+ return subtle.ConstantTimeCompare(eBytes, tBytes)
+}
+
+var {{ .Prefix }}ZeroEncoding = new({{ .Element }}).Bytes()
+
+// IsZero returns 1 if e == 0, and zero otherwise.
+func (e *{{ .Element }}) IsZero() int {
+ eBytes := e.Bytes()
+ return subtle.ConstantTimeCompare(eBytes, {{ .Prefix }}ZeroEncoding)
+}
+
+// Set sets e = t, and returns e.
+func (e *{{ .Element }}) Set(t *{{ .Element }}) *{{ .Element }} {
+ e.x = t.x
+ return e
+}
+
+// Bytes returns the {{ .BytesLen }}-byte big-endian encoding of e.
+func (e *{{ .Element }}) Bytes() []byte {
+ // This function is outlined to make the allocations inline in the caller
+ // rather than happen on the heap.
+ var out [{{ .Prefix }}ElementLen]byte
+ return e.bytes(&out)
+}
+
+func (e *{{ .Element }}) bytes(out *[{{ .Prefix }}ElementLen]byte) []byte {
+ var tmp {{ .Prefix }}NonMontgomeryDomainFieldElement
+ {{ .Prefix }}FromMontgomery(&tmp, &e.x)
+ {{ .Prefix }}ToBytes(out, (*{{ .Prefix }}UntypedFieldElement)(&tmp))
+ {{ .Prefix }}InvertEndianness(out[:])
+ return out[:]
+}
+
+// {{ .Prefix }}MinusOneEncoding is the encoding of -1 mod p, so p - 1, the
+// highest canonical encoding. It is used by SetBytes to check for non-canonical
+// encodings such as p + k, 2p + k, etc.
+var {{ .Prefix }}MinusOneEncoding = new({{ .Element }}).Sub(
+ new({{ .Element }}), new({{ .Element }}).One()).Bytes()
+
+// SetBytes sets e = v, where v is a big-endian {{ .BytesLen }}-byte encoding, and returns e.
+// If v is not {{ .BytesLen }} bytes or it encodes a value higher than {{ .Prime }},
+// SetBytes returns nil and an error, and e is unchanged.
+func (e *{{ .Element }}) SetBytes(v []byte) (*{{ .Element }}, error) {
+ if len(v) != {{ .Prefix }}ElementLen {
+ return nil, errors.New("invalid {{ .Element }} encoding")
+ }
+ for i := range v {
+ if v[i] < {{ .Prefix }}MinusOneEncoding[i] {
+ break
+ }
+ if v[i] > {{ .Prefix }}MinusOneEncoding[i] {
+ return nil, errors.New("invalid {{ .Element }} encoding")
+ }
+ }
+ var in [{{ .Prefix }}ElementLen]byte
+ copy(in[:], v)
+ {{ .Prefix }}InvertEndianness(in[:])
+ var tmp {{ .Prefix }}NonMontgomeryDomainFieldElement
+ {{ .Prefix }}FromBytes((*{{ .Prefix }}UntypedFieldElement)(&tmp), &in)
+ {{ .Prefix }}ToMontgomery(&e.x, &tmp)
+ return e, nil
+}
+
+// Add sets e = t1 + t2, and returns e.
+func (e *{{ .Element }}) Add(t1, t2 *{{ .Element }}) *{{ .Element }} {
+ {{ .Prefix }}Add(&e.x, &t1.x, &t2.x)
+ return e
+}
+
+// Sub sets e = t1 - t2, and returns e.
+func (e *{{ .Element }}) Sub(t1, t2 *{{ .Element }}) *{{ .Element }} {
+ {{ .Prefix }}Sub(&e.x, &t1.x, &t2.x)
+ return e
+}
+
+// Mul sets e = t1 * t2, and returns e.
+func (e *{{ .Element }}) Mul(t1, t2 *{{ .Element }}) *{{ .Element }} {
+ {{ .Prefix }}Mul(&e.x, &t1.x, &t2.x)
+ return e
+}
+
+// Square sets e = t * t, and returns e.
+func (e *{{ .Element }}) Square(t *{{ .Element }}) *{{ .Element }} {
+ {{ .Prefix }}Square(&e.x, &t.x)
+ return e
+}
+
+// Select sets v to a if cond == 1, and to b if cond == 0.
+func (v *{{ .Element }}) Select(a, b *{{ .Element }}, cond int) *{{ .Element }} {
+ {{ .Prefix }}Selectznz((*{{ .Prefix }}UntypedFieldElement)(&v.x), {{ .Prefix }}Uint1(cond),
+ (*{{ .Prefix }}UntypedFieldElement)(&b.x), (*{{ .Prefix }}UntypedFieldElement)(&a.x))
+ return v
+}
+
+func {{ .Prefix }}InvertEndianness(v []byte) {
+ for i := 0; i < len(v)/2; i++ {
+ v[i], v[len(v)-1-i] = v[len(v)-1-i], v[i]
+ }
+}
+`
+
+const tmplAddchain = `// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Code generated by {{ .Meta.Name }}. DO NOT EDIT.
+
+package fiat
+
+// Invert sets e = 1/x, and returns e.
+//
+// If x == 0, Invert returns e = 0.
+func (e *Element) Invert(x *Element) *Element {
+ // Inversion is implemented as exponentiation with exponent p − 2.
+ // The sequence of {{ .Ops.Adds }} multiplications and {{ .Ops.Doubles }} squarings is derived from the
+ // following addition chain generated with {{ .Meta.Module }} {{ .Meta.ReleaseTag }}.
+ //
+ {{- range lines (format .Script) }}
+ // {{ . }}
+ {{- end }}
+ //
+
+ var z = new(Element).Set(e)
+ {{- range .Program.Temporaries }}
+ var {{ . }} = new(Element)
+ {{- end }}
+ {{ range $i := .Program.Instructions -}}
+ {{- with add $i.Op }}
+ {{ $i.Output }}.Mul({{ .X }}, {{ .Y }})
+ {{- end -}}
+
+ {{- with double $i.Op }}
+ {{ $i.Output }}.Square({{ .X }})
+ {{- end -}}
+
+ {{- with shift $i.Op -}}
+ {{- $first := 0 -}}
+ {{- if ne $i.Output.Identifier .X.Identifier }}
+ {{ $i.Output }}.Square({{ .X }})
+ {{- $first = 1 -}}
+ {{- end }}
+ for s := {{ $first }}; s < {{ .S }}; s++ {
+ {{ $i.Output }}.Square({{ $i.Output }})
+ }
+ {{- end -}}
+ {{- end }}
+
+ return e.Set(z)
+}
+`
diff --git a/src/crypto/elliptic/internal/fiat/p224.go b/src/crypto/elliptic/internal/fiat/p224.go
new file mode 100644
index 0000000..4dddeb0
--- /dev/null
+++ b/src/crypto/elliptic/internal/fiat/p224.go
@@ -0,0 +1,135 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Code generated by generate.go. DO NOT EDIT.
+
+package fiat
+
+import (
+ "crypto/subtle"
+ "errors"
+)
+
+// P224Element is an integer modulo 2^224 - 2^96 + 1.
+//
+// The zero value is a valid zero element.
+type P224Element struct {
+ // Values are represented internally always in the Montgomery domain, and
+ // converted in Bytes and SetBytes.
+ x p224MontgomeryDomainFieldElement
+}
+
+const p224ElementLen = 28
+
+type p224UntypedFieldElement = [4]uint64
+
+// One sets e = 1, and returns e.
+func (e *P224Element) One() *P224Element {
+ p224SetOne(&e.x)
+ return e
+}
+
+// Equal returns 1 if e == t, and zero otherwise.
+func (e *P224Element) Equal(t *P224Element) int {
+ eBytes := e.Bytes()
+ tBytes := t.Bytes()
+ return subtle.ConstantTimeCompare(eBytes, tBytes)
+}
+
+var p224ZeroEncoding = new(P224Element).Bytes()
+
+// IsZero returns 1 if e == 0, and zero otherwise.
+func (e *P224Element) IsZero() int {
+ eBytes := e.Bytes()
+ return subtle.ConstantTimeCompare(eBytes, p224ZeroEncoding)
+}
+
+// Set sets e = t, and returns e.
+func (e *P224Element) Set(t *P224Element) *P224Element {
+ e.x = t.x
+ return e
+}
+
+// Bytes returns the 28-byte big-endian encoding of e.
+func (e *P224Element) Bytes() []byte {
+ // This function is outlined to make the allocations inline in the caller
+ // rather than happen on the heap.
+ var out [p224ElementLen]byte
+ return e.bytes(&out)
+}
+
+func (e *P224Element) bytes(out *[p224ElementLen]byte) []byte {
+ var tmp p224NonMontgomeryDomainFieldElement
+ p224FromMontgomery(&tmp, &e.x)
+ p224ToBytes(out, (*p224UntypedFieldElement)(&tmp))
+ p224InvertEndianness(out[:])
+ return out[:]
+}
+
+// p224MinusOneEncoding is the encoding of -1 mod p, so p - 1, the
+// highest canonical encoding. It is used by SetBytes to check for non-canonical
+// encodings such as p + k, 2p + k, etc.
+var p224MinusOneEncoding = new(P224Element).Sub(
+ new(P224Element), new(P224Element).One()).Bytes()
+
+// SetBytes sets e = v, where v is a big-endian 28-byte encoding, and returns e.
+// If v is not 28 bytes or it encodes a value higher than 2^224 - 2^96 + 1,
+// SetBytes returns nil and an error, and e is unchanged.
+func (e *P224Element) SetBytes(v []byte) (*P224Element, error) {
+ if len(v) != p224ElementLen {
+ return nil, errors.New("invalid P224Element encoding")
+ }
+ for i := range v {
+ if v[i] < p224MinusOneEncoding[i] {
+ break
+ }
+ if v[i] > p224MinusOneEncoding[i] {
+ return nil, errors.New("invalid P224Element encoding")
+ }
+ }
+ var in [p224ElementLen]byte
+ copy(in[:], v)
+ p224InvertEndianness(in[:])
+ var tmp p224NonMontgomeryDomainFieldElement
+ p224FromBytes((*p224UntypedFieldElement)(&tmp), &in)
+ p224ToMontgomery(&e.x, &tmp)
+ return e, nil
+}
+
+// Add sets e = t1 + t2, and returns e.
+func (e *P224Element) Add(t1, t2 *P224Element) *P224Element {
+ p224Add(&e.x, &t1.x, &t2.x)
+ return e
+}
+
+// Sub sets e = t1 - t2, and returns e.
+func (e *P224Element) Sub(t1, t2 *P224Element) *P224Element {
+ p224Sub(&e.x, &t1.x, &t2.x)
+ return e
+}
+
+// Mul sets e = t1 * t2, and returns e.
+func (e *P224Element) Mul(t1, t2 *P224Element) *P224Element {
+ p224Mul(&e.x, &t1.x, &t2.x)
+ return e
+}
+
+// Square sets e = t * t, and returns e.
+func (e *P224Element) Square(t *P224Element) *P224Element {
+ p224Square(&e.x, &t.x)
+ return e
+}
+
+// Select sets v to a if cond == 1, and to b if cond == 0.
+func (v *P224Element) Select(a, b *P224Element, cond int) *P224Element {
+ p224Selectznz((*p224UntypedFieldElement)(&v.x), p224Uint1(cond),
+ (*p224UntypedFieldElement)(&b.x), (*p224UntypedFieldElement)(&a.x))
+ return v
+}
+
+func p224InvertEndianness(v []byte) {
+ for i := 0; i < len(v)/2; i++ {
+ v[i], v[len(v)-1-i] = v[len(v)-1-i], v[i]
+ }
+}
diff --git a/src/crypto/elliptic/internal/fiat/p224_fiat64.go b/src/crypto/elliptic/internal/fiat/p224_fiat64.go
new file mode 100644
index 0000000..4ece3e9
--- /dev/null
+++ b/src/crypto/elliptic/internal/fiat/p224_fiat64.go
@@ -0,0 +1,1429 @@
+// Code generated by Fiat Cryptography. DO NOT EDIT.
+//
+// Autogenerated: word_by_word_montgomery --lang Go --no-wide-int --cmovznz-by-mul --relax-primitive-carry-to-bitwidth 32,64 --internal-static --public-function-case camelCase --public-type-case camelCase --private-function-case camelCase --private-type-case camelCase --doc-text-before-function-name '' --doc-newline-before-package-declaration --doc-prepend-header 'Code generated by Fiat Cryptography. DO NOT EDIT.' --package-name fiat --no-prefix-fiat p224 64 '2^224 - 2^96 + 1' mul square add sub one from_montgomery to_montgomery selectznz to_bytes from_bytes
+//
+// curve description: p224
+//
+// machine_wordsize = 64 (from "64")
+//
+// requested operations: mul, square, add, sub, one, from_montgomery, to_montgomery, selectznz, to_bytes, from_bytes
+//
+// m = 0xffffffffffffffffffffffffffffffff000000000000000000000001 (from "2^224 - 2^96 + 1")
+//
+//
+//
+// NOTE: In addition to the bounds specified above each function, all
+//
+// functions synthesized for this Montgomery arithmetic require the
+//
+// input to be strictly less than the prime modulus (m), and also
+//
+// require the input to be in the unique saturated representation.
+//
+// All functions also ensure that these two properties are true of
+//
+// return values.
+//
+//
+//
+// Computed values:
+//
+// eval z = z[0] + (z[1] << 64) + (z[2] << 128) + (z[3] << 192)
+//
+// bytes_eval z = z[0] + (z[1] << 8) + (z[2] << 16) + (z[3] << 24) + (z[4] << 32) + (z[5] << 40) + (z[6] << 48) + (z[7] << 56) + (z[8] << 64) + (z[9] << 72) + (z[10] << 80) + (z[11] << 88) + (z[12] << 96) + (z[13] << 104) + (z[14] << 112) + (z[15] << 120) + (z[16] << 128) + (z[17] << 136) + (z[18] << 144) + (z[19] << 152) + (z[20] << 160) + (z[21] << 168) + (z[22] << 176) + (z[23] << 184) + (z[24] << 192) + (z[25] << 200) + (z[26] << 208) + (z[27] << 216)
+//
+// twos_complement_eval z = let x1 := z[0] + (z[1] << 64) + (z[2] << 128) + (z[3] << 192) in
+//
+// if x1 & (2^256-1) < 2^255 then x1 & (2^256-1) else (x1 & (2^256-1)) - 2^256
+
+package fiat
+
+import "math/bits"
+
+type p224Uint1 uint64 // We use uint64 instead of a more narrow type for performance reasons; see https://github.com/mit-plv/fiat-crypto/pull/1006#issuecomment-892625927
+type p224Int1 int64 // We use uint64 instead of a more narrow type for performance reasons; see https://github.com/mit-plv/fiat-crypto/pull/1006#issuecomment-892625927
+
+// The type p224MontgomeryDomainFieldElement is a field element in the Montgomery domain.
+//
+// Bounds: [[0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff]]
+type p224MontgomeryDomainFieldElement [4]uint64
+
+// The type p224NonMontgomeryDomainFieldElement is a field element NOT in the Montgomery domain.
+//
+// Bounds: [[0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff]]
+type p224NonMontgomeryDomainFieldElement [4]uint64
+
+// p224CmovznzU64 is a single-word conditional move.
+//
+// Postconditions:
+// out1 = (if arg1 = 0 then arg2 else arg3)
+//
+// Input Bounds:
+// arg1: [0x0 ~> 0x1]
+// arg2: [0x0 ~> 0xffffffffffffffff]
+// arg3: [0x0 ~> 0xffffffffffffffff]
+// Output Bounds:
+// out1: [0x0 ~> 0xffffffffffffffff]
+func p224CmovznzU64(out1 *uint64, arg1 p224Uint1, arg2 uint64, arg3 uint64) {
+ x1 := (uint64(arg1) * 0xffffffffffffffff)
+ x2 := ((x1 & arg3) | ((^x1) & arg2))
+ *out1 = x2
+}
+
+// p224Mul multiplies two field elements in the Montgomery domain.
+//
+// Preconditions:
+// 0 ≤ eval arg1 < m
+// 0 ≤ eval arg2 < m
+// Postconditions:
+// eval (from_montgomery out1) mod m = (eval (from_montgomery arg1) * eval (from_montgomery arg2)) mod m
+// 0 ≤ eval out1 < m
+//
+func p224Mul(out1 *p224MontgomeryDomainFieldElement, arg1 *p224MontgomeryDomainFieldElement, arg2 *p224MontgomeryDomainFieldElement) {
+ x1 := arg1[1]
+ x2 := arg1[2]
+ x3 := arg1[3]
+ x4 := arg1[0]
+ var x5 uint64
+ var x6 uint64
+ x6, x5 = bits.Mul64(x4, arg2[3])
+ var x7 uint64
+ var x8 uint64
+ x8, x7 = bits.Mul64(x4, arg2[2])
+ var x9 uint64
+ var x10 uint64
+ x10, x9 = bits.Mul64(x4, arg2[1])
+ var x11 uint64
+ var x12 uint64
+ x12, x11 = bits.Mul64(x4, arg2[0])
+ var x13 uint64
+ var x14 uint64
+ x13, x14 = bits.Add64(x12, x9, uint64(0x0))
+ var x15 uint64
+ var x16 uint64
+ x15, x16 = bits.Add64(x10, x7, uint64(p224Uint1(x14)))
+ var x17 uint64
+ var x18 uint64
+ x17, x18 = bits.Add64(x8, x5, uint64(p224Uint1(x16)))
+ x19 := (uint64(p224Uint1(x18)) + x6)
+ var x20 uint64
+ _, x20 = bits.Mul64(x11, 0xffffffffffffffff)
+ var x22 uint64
+ var x23 uint64
+ x23, x22 = bits.Mul64(x20, 0xffffffff)
+ var x24 uint64
+ var x25 uint64
+ x25, x24 = bits.Mul64(x20, 0xffffffffffffffff)
+ var x26 uint64
+ var x27 uint64
+ x27, x26 = bits.Mul64(x20, 0xffffffff00000000)
+ var x28 uint64
+ var x29 uint64
+ x28, x29 = bits.Add64(x27, x24, uint64(0x0))
+ var x30 uint64
+ var x31 uint64
+ x30, x31 = bits.Add64(x25, x22, uint64(p224Uint1(x29)))
+ x32 := (uint64(p224Uint1(x31)) + x23)
+ var x34 uint64
+ _, x34 = bits.Add64(x11, x20, uint64(0x0))
+ var x35 uint64
+ var x36 uint64
+ x35, x36 = bits.Add64(x13, x26, uint64(p224Uint1(x34)))
+ var x37 uint64
+ var x38 uint64
+ x37, x38 = bits.Add64(x15, x28, uint64(p224Uint1(x36)))
+ var x39 uint64
+ var x40 uint64
+ x39, x40 = bits.Add64(x17, x30, uint64(p224Uint1(x38)))
+ var x41 uint64
+ var x42 uint64
+ x41, x42 = bits.Add64(x19, x32, uint64(p224Uint1(x40)))
+ var x43 uint64
+ var x44 uint64
+ x44, x43 = bits.Mul64(x1, arg2[3])
+ var x45 uint64
+ var x46 uint64
+ x46, x45 = bits.Mul64(x1, arg2[2])
+ var x47 uint64
+ var x48 uint64
+ x48, x47 = bits.Mul64(x1, arg2[1])
+ var x49 uint64
+ var x50 uint64
+ x50, x49 = bits.Mul64(x1, arg2[0])
+ var x51 uint64
+ var x52 uint64
+ x51, x52 = bits.Add64(x50, x47, uint64(0x0))
+ var x53 uint64
+ var x54 uint64
+ x53, x54 = bits.Add64(x48, x45, uint64(p224Uint1(x52)))
+ var x55 uint64
+ var x56 uint64
+ x55, x56 = bits.Add64(x46, x43, uint64(p224Uint1(x54)))
+ x57 := (uint64(p224Uint1(x56)) + x44)
+ var x58 uint64
+ var x59 uint64
+ x58, x59 = bits.Add64(x35, x49, uint64(0x0))
+ var x60 uint64
+ var x61 uint64
+ x60, x61 = bits.Add64(x37, x51, uint64(p224Uint1(x59)))
+ var x62 uint64
+ var x63 uint64
+ x62, x63 = bits.Add64(x39, x53, uint64(p224Uint1(x61)))
+ var x64 uint64
+ var x65 uint64
+ x64, x65 = bits.Add64(x41, x55, uint64(p224Uint1(x63)))
+ var x66 uint64
+ var x67 uint64
+ x66, x67 = bits.Add64(uint64(p224Uint1(x42)), x57, uint64(p224Uint1(x65)))
+ var x68 uint64
+ _, x68 = bits.Mul64(x58, 0xffffffffffffffff)
+ var x70 uint64
+ var x71 uint64
+ x71, x70 = bits.Mul64(x68, 0xffffffff)
+ var x72 uint64
+ var x73 uint64
+ x73, x72 = bits.Mul64(x68, 0xffffffffffffffff)
+ var x74 uint64
+ var x75 uint64
+ x75, x74 = bits.Mul64(x68, 0xffffffff00000000)
+ var x76 uint64
+ var x77 uint64
+ x76, x77 = bits.Add64(x75, x72, uint64(0x0))
+ var x78 uint64
+ var x79 uint64
+ x78, x79 = bits.Add64(x73, x70, uint64(p224Uint1(x77)))
+ x80 := (uint64(p224Uint1(x79)) + x71)
+ var x82 uint64
+ _, x82 = bits.Add64(x58, x68, uint64(0x0))
+ var x83 uint64
+ var x84 uint64
+ x83, x84 = bits.Add64(x60, x74, uint64(p224Uint1(x82)))
+ var x85 uint64
+ var x86 uint64
+ x85, x86 = bits.Add64(x62, x76, uint64(p224Uint1(x84)))
+ var x87 uint64
+ var x88 uint64
+ x87, x88 = bits.Add64(x64, x78, uint64(p224Uint1(x86)))
+ var x89 uint64
+ var x90 uint64
+ x89, x90 = bits.Add64(x66, x80, uint64(p224Uint1(x88)))
+ x91 := (uint64(p224Uint1(x90)) + uint64(p224Uint1(x67)))
+ var x92 uint64
+ var x93 uint64
+ x93, x92 = bits.Mul64(x2, arg2[3])
+ var x94 uint64
+ var x95 uint64
+ x95, x94 = bits.Mul64(x2, arg2[2])
+ var x96 uint64
+ var x97 uint64
+ x97, x96 = bits.Mul64(x2, arg2[1])
+ var x98 uint64
+ var x99 uint64
+ x99, x98 = bits.Mul64(x2, arg2[0])
+ var x100 uint64
+ var x101 uint64
+ x100, x101 = bits.Add64(x99, x96, uint64(0x0))
+ var x102 uint64
+ var x103 uint64
+ x102, x103 = bits.Add64(x97, x94, uint64(p224Uint1(x101)))
+ var x104 uint64
+ var x105 uint64
+ x104, x105 = bits.Add64(x95, x92, uint64(p224Uint1(x103)))
+ x106 := (uint64(p224Uint1(x105)) + x93)
+ var x107 uint64
+ var x108 uint64
+ x107, x108 = bits.Add64(x83, x98, uint64(0x0))
+ var x109 uint64
+ var x110 uint64
+ x109, x110 = bits.Add64(x85, x100, uint64(p224Uint1(x108)))
+ var x111 uint64
+ var x112 uint64
+ x111, x112 = bits.Add64(x87, x102, uint64(p224Uint1(x110)))
+ var x113 uint64
+ var x114 uint64
+ x113, x114 = bits.Add64(x89, x104, uint64(p224Uint1(x112)))
+ var x115 uint64
+ var x116 uint64
+ x115, x116 = bits.Add64(x91, x106, uint64(p224Uint1(x114)))
+ var x117 uint64
+ _, x117 = bits.Mul64(x107, 0xffffffffffffffff)
+ var x119 uint64
+ var x120 uint64
+ x120, x119 = bits.Mul64(x117, 0xffffffff)
+ var x121 uint64
+ var x122 uint64
+ x122, x121 = bits.Mul64(x117, 0xffffffffffffffff)
+ var x123 uint64
+ var x124 uint64
+ x124, x123 = bits.Mul64(x117, 0xffffffff00000000)
+ var x125 uint64
+ var x126 uint64
+ x125, x126 = bits.Add64(x124, x121, uint64(0x0))
+ var x127 uint64
+ var x128 uint64
+ x127, x128 = bits.Add64(x122, x119, uint64(p224Uint1(x126)))
+ x129 := (uint64(p224Uint1(x128)) + x120)
+ var x131 uint64
+ _, x131 = bits.Add64(x107, x117, uint64(0x0))
+ var x132 uint64
+ var x133 uint64
+ x132, x133 = bits.Add64(x109, x123, uint64(p224Uint1(x131)))
+ var x134 uint64
+ var x135 uint64
+ x134, x135 = bits.Add64(x111, x125, uint64(p224Uint1(x133)))
+ var x136 uint64
+ var x137 uint64
+ x136, x137 = bits.Add64(x113, x127, uint64(p224Uint1(x135)))
+ var x138 uint64
+ var x139 uint64
+ x138, x139 = bits.Add64(x115, x129, uint64(p224Uint1(x137)))
+ x140 := (uint64(p224Uint1(x139)) + uint64(p224Uint1(x116)))
+ var x141 uint64
+ var x142 uint64
+ x142, x141 = bits.Mul64(x3, arg2[3])
+ var x143 uint64
+ var x144 uint64
+ x144, x143 = bits.Mul64(x3, arg2[2])
+ var x145 uint64
+ var x146 uint64
+ x146, x145 = bits.Mul64(x3, arg2[1])
+ var x147 uint64
+ var x148 uint64
+ x148, x147 = bits.Mul64(x3, arg2[0])
+ var x149 uint64
+ var x150 uint64
+ x149, x150 = bits.Add64(x148, x145, uint64(0x0))
+ var x151 uint64
+ var x152 uint64
+ x151, x152 = bits.Add64(x146, x143, uint64(p224Uint1(x150)))
+ var x153 uint64
+ var x154 uint64
+ x153, x154 = bits.Add64(x144, x141, uint64(p224Uint1(x152)))
+ x155 := (uint64(p224Uint1(x154)) + x142)
+ var x156 uint64
+ var x157 uint64
+ x156, x157 = bits.Add64(x132, x147, uint64(0x0))
+ var x158 uint64
+ var x159 uint64
+ x158, x159 = bits.Add64(x134, x149, uint64(p224Uint1(x157)))
+ var x160 uint64
+ var x161 uint64
+ x160, x161 = bits.Add64(x136, x151, uint64(p224Uint1(x159)))
+ var x162 uint64
+ var x163 uint64
+ x162, x163 = bits.Add64(x138, x153, uint64(p224Uint1(x161)))
+ var x164 uint64
+ var x165 uint64
+ x164, x165 = bits.Add64(x140, x155, uint64(p224Uint1(x163)))
+ var x166 uint64
+ _, x166 = bits.Mul64(x156, 0xffffffffffffffff)
+ var x168 uint64
+ var x169 uint64
+ x169, x168 = bits.Mul64(x166, 0xffffffff)
+ var x170 uint64
+ var x171 uint64
+ x171, x170 = bits.Mul64(x166, 0xffffffffffffffff)
+ var x172 uint64
+ var x173 uint64
+ x173, x172 = bits.Mul64(x166, 0xffffffff00000000)
+ var x174 uint64
+ var x175 uint64
+ x174, x175 = bits.Add64(x173, x170, uint64(0x0))
+ var x176 uint64
+ var x177 uint64
+ x176, x177 = bits.Add64(x171, x168, uint64(p224Uint1(x175)))
+ x178 := (uint64(p224Uint1(x177)) + x169)
+ var x180 uint64
+ _, x180 = bits.Add64(x156, x166, uint64(0x0))
+ var x181 uint64
+ var x182 uint64
+ x181, x182 = bits.Add64(x158, x172, uint64(p224Uint1(x180)))
+ var x183 uint64
+ var x184 uint64
+ x183, x184 = bits.Add64(x160, x174, uint64(p224Uint1(x182)))
+ var x185 uint64
+ var x186 uint64
+ x185, x186 = bits.Add64(x162, x176, uint64(p224Uint1(x184)))
+ var x187 uint64
+ var x188 uint64
+ x187, x188 = bits.Add64(x164, x178, uint64(p224Uint1(x186)))
+ x189 := (uint64(p224Uint1(x188)) + uint64(p224Uint1(x165)))
+ var x190 uint64
+ var x191 uint64
+ x190, x191 = bits.Sub64(x181, uint64(0x1), uint64(0x0))
+ var x192 uint64
+ var x193 uint64
+ x192, x193 = bits.Sub64(x183, 0xffffffff00000000, uint64(p224Uint1(x191)))
+ var x194 uint64
+ var x195 uint64
+ x194, x195 = bits.Sub64(x185, 0xffffffffffffffff, uint64(p224Uint1(x193)))
+ var x196 uint64
+ var x197 uint64
+ x196, x197 = bits.Sub64(x187, 0xffffffff, uint64(p224Uint1(x195)))
+ var x199 uint64
+ _, x199 = bits.Sub64(x189, uint64(0x0), uint64(p224Uint1(x197)))
+ var x200 uint64
+ p224CmovznzU64(&x200, p224Uint1(x199), x190, x181)
+ var x201 uint64
+ p224CmovznzU64(&x201, p224Uint1(x199), x192, x183)
+ var x202 uint64
+ p224CmovznzU64(&x202, p224Uint1(x199), x194, x185)
+ var x203 uint64
+ p224CmovznzU64(&x203, p224Uint1(x199), x196, x187)
+ out1[0] = x200
+ out1[1] = x201
+ out1[2] = x202
+ out1[3] = x203
+}
+
+// p224Square squares a field element in the Montgomery domain.
+//
+// Preconditions:
+// 0 ≤ eval arg1 < m
+// Postconditions:
+// eval (from_montgomery out1) mod m = (eval (from_montgomery arg1) * eval (from_montgomery arg1)) mod m
+// 0 ≤ eval out1 < m
+//
+func p224Square(out1 *p224MontgomeryDomainFieldElement, arg1 *p224MontgomeryDomainFieldElement) {
+ x1 := arg1[1]
+ x2 := arg1[2]
+ x3 := arg1[3]
+ x4 := arg1[0]
+ var x5 uint64
+ var x6 uint64
+ x6, x5 = bits.Mul64(x4, arg1[3])
+ var x7 uint64
+ var x8 uint64
+ x8, x7 = bits.Mul64(x4, arg1[2])
+ var x9 uint64
+ var x10 uint64
+ x10, x9 = bits.Mul64(x4, arg1[1])
+ var x11 uint64
+ var x12 uint64
+ x12, x11 = bits.Mul64(x4, arg1[0])
+ var x13 uint64
+ var x14 uint64
+ x13, x14 = bits.Add64(x12, x9, uint64(0x0))
+ var x15 uint64
+ var x16 uint64
+ x15, x16 = bits.Add64(x10, x7, uint64(p224Uint1(x14)))
+ var x17 uint64
+ var x18 uint64
+ x17, x18 = bits.Add64(x8, x5, uint64(p224Uint1(x16)))
+ x19 := (uint64(p224Uint1(x18)) + x6)
+ var x20 uint64
+ _, x20 = bits.Mul64(x11, 0xffffffffffffffff)
+ var x22 uint64
+ var x23 uint64
+ x23, x22 = bits.Mul64(x20, 0xffffffff)
+ var x24 uint64
+ var x25 uint64
+ x25, x24 = bits.Mul64(x20, 0xffffffffffffffff)
+ var x26 uint64
+ var x27 uint64
+ x27, x26 = bits.Mul64(x20, 0xffffffff00000000)
+ var x28 uint64
+ var x29 uint64
+ x28, x29 = bits.Add64(x27, x24, uint64(0x0))
+ var x30 uint64
+ var x31 uint64
+ x30, x31 = bits.Add64(x25, x22, uint64(p224Uint1(x29)))
+ x32 := (uint64(p224Uint1(x31)) + x23)
+ var x34 uint64
+ _, x34 = bits.Add64(x11, x20, uint64(0x0))
+ var x35 uint64
+ var x36 uint64
+ x35, x36 = bits.Add64(x13, x26, uint64(p224Uint1(x34)))
+ var x37 uint64
+ var x38 uint64
+ x37, x38 = bits.Add64(x15, x28, uint64(p224Uint1(x36)))
+ var x39 uint64
+ var x40 uint64
+ x39, x40 = bits.Add64(x17, x30, uint64(p224Uint1(x38)))
+ var x41 uint64
+ var x42 uint64
+ x41, x42 = bits.Add64(x19, x32, uint64(p224Uint1(x40)))
+ var x43 uint64
+ var x44 uint64
+ x44, x43 = bits.Mul64(x1, arg1[3])
+ var x45 uint64
+ var x46 uint64
+ x46, x45 = bits.Mul64(x1, arg1[2])
+ var x47 uint64
+ var x48 uint64
+ x48, x47 = bits.Mul64(x1, arg1[1])
+ var x49 uint64
+ var x50 uint64
+ x50, x49 = bits.Mul64(x1, arg1[0])
+ var x51 uint64
+ var x52 uint64
+ x51, x52 = bits.Add64(x50, x47, uint64(0x0))
+ var x53 uint64
+ var x54 uint64
+ x53, x54 = bits.Add64(x48, x45, uint64(p224Uint1(x52)))
+ var x55 uint64
+ var x56 uint64
+ x55, x56 = bits.Add64(x46, x43, uint64(p224Uint1(x54)))
+ x57 := (uint64(p224Uint1(x56)) + x44)
+ var x58 uint64
+ var x59 uint64
+ x58, x59 = bits.Add64(x35, x49, uint64(0x0))
+ var x60 uint64
+ var x61 uint64
+ x60, x61 = bits.Add64(x37, x51, uint64(p224Uint1(x59)))
+ var x62 uint64
+ var x63 uint64
+ x62, x63 = bits.Add64(x39, x53, uint64(p224Uint1(x61)))
+ var x64 uint64
+ var x65 uint64
+ x64, x65 = bits.Add64(x41, x55, uint64(p224Uint1(x63)))
+ var x66 uint64
+ var x67 uint64
+ x66, x67 = bits.Add64(uint64(p224Uint1(x42)), x57, uint64(p224Uint1(x65)))
+ var x68 uint64
+ _, x68 = bits.Mul64(x58, 0xffffffffffffffff)
+ var x70 uint64
+ var x71 uint64
+ x71, x70 = bits.Mul64(x68, 0xffffffff)
+ var x72 uint64
+ var x73 uint64
+ x73, x72 = bits.Mul64(x68, 0xffffffffffffffff)
+ var x74 uint64
+ var x75 uint64
+ x75, x74 = bits.Mul64(x68, 0xffffffff00000000)
+ var x76 uint64
+ var x77 uint64
+ x76, x77 = bits.Add64(x75, x72, uint64(0x0))
+ var x78 uint64
+ var x79 uint64
+ x78, x79 = bits.Add64(x73, x70, uint64(p224Uint1(x77)))
+ x80 := (uint64(p224Uint1(x79)) + x71)
+ var x82 uint64
+ _, x82 = bits.Add64(x58, x68, uint64(0x0))
+ var x83 uint64
+ var x84 uint64
+ x83, x84 = bits.Add64(x60, x74, uint64(p224Uint1(x82)))
+ var x85 uint64
+ var x86 uint64
+ x85, x86 = bits.Add64(x62, x76, uint64(p224Uint1(x84)))
+ var x87 uint64
+ var x88 uint64
+ x87, x88 = bits.Add64(x64, x78, uint64(p224Uint1(x86)))
+ var x89 uint64
+ var x90 uint64
+ x89, x90 = bits.Add64(x66, x80, uint64(p224Uint1(x88)))
+ x91 := (uint64(p224Uint1(x90)) + uint64(p224Uint1(x67)))
+ var x92 uint64
+ var x93 uint64
+ x93, x92 = bits.Mul64(x2, arg1[3])
+ var x94 uint64
+ var x95 uint64
+ x95, x94 = bits.Mul64(x2, arg1[2])
+ var x96 uint64
+ var x97 uint64
+ x97, x96 = bits.Mul64(x2, arg1[1])
+ var x98 uint64
+ var x99 uint64
+ x99, x98 = bits.Mul64(x2, arg1[0])
+ var x100 uint64
+ var x101 uint64
+ x100, x101 = bits.Add64(x99, x96, uint64(0x0))
+ var x102 uint64
+ var x103 uint64
+ x102, x103 = bits.Add64(x97, x94, uint64(p224Uint1(x101)))
+ var x104 uint64
+ var x105 uint64
+ x104, x105 = bits.Add64(x95, x92, uint64(p224Uint1(x103)))
+ x106 := (uint64(p224Uint1(x105)) + x93)
+ var x107 uint64
+ var x108 uint64
+ x107, x108 = bits.Add64(x83, x98, uint64(0x0))
+ var x109 uint64
+ var x110 uint64
+ x109, x110 = bits.Add64(x85, x100, uint64(p224Uint1(x108)))
+ var x111 uint64
+ var x112 uint64
+ x111, x112 = bits.Add64(x87, x102, uint64(p224Uint1(x110)))
+ var x113 uint64
+ var x114 uint64
+ x113, x114 = bits.Add64(x89, x104, uint64(p224Uint1(x112)))
+ var x115 uint64
+ var x116 uint64
+ x115, x116 = bits.Add64(x91, x106, uint64(p224Uint1(x114)))
+ var x117 uint64
+ _, x117 = bits.Mul64(x107, 0xffffffffffffffff)
+ var x119 uint64
+ var x120 uint64
+ x120, x119 = bits.Mul64(x117, 0xffffffff)
+ var x121 uint64
+ var x122 uint64
+ x122, x121 = bits.Mul64(x117, 0xffffffffffffffff)
+ var x123 uint64
+ var x124 uint64
+ x124, x123 = bits.Mul64(x117, 0xffffffff00000000)
+ var x125 uint64
+ var x126 uint64
+ x125, x126 = bits.Add64(x124, x121, uint64(0x0))
+ var x127 uint64
+ var x128 uint64
+ x127, x128 = bits.Add64(x122, x119, uint64(p224Uint1(x126)))
+ x129 := (uint64(p224Uint1(x128)) + x120)
+ var x131 uint64
+ _, x131 = bits.Add64(x107, x117, uint64(0x0))
+ var x132 uint64
+ var x133 uint64
+ x132, x133 = bits.Add64(x109, x123, uint64(p224Uint1(x131)))
+ var x134 uint64
+ var x135 uint64
+ x134, x135 = bits.Add64(x111, x125, uint64(p224Uint1(x133)))
+ var x136 uint64
+ var x137 uint64
+ x136, x137 = bits.Add64(x113, x127, uint64(p224Uint1(x135)))
+ var x138 uint64
+ var x139 uint64
+ x138, x139 = bits.Add64(x115, x129, uint64(p224Uint1(x137)))
+ x140 := (uint64(p224Uint1(x139)) + uint64(p224Uint1(x116)))
+ var x141 uint64
+ var x142 uint64
+ x142, x141 = bits.Mul64(x3, arg1[3])
+ var x143 uint64
+ var x144 uint64
+ x144, x143 = bits.Mul64(x3, arg1[2])
+ var x145 uint64
+ var x146 uint64
+ x146, x145 = bits.Mul64(x3, arg1[1])
+ var x147 uint64
+ var x148 uint64
+ x148, x147 = bits.Mul64(x3, arg1[0])
+ var x149 uint64
+ var x150 uint64
+ x149, x150 = bits.Add64(x148, x145, uint64(0x0))
+ var x151 uint64
+ var x152 uint64
+ x151, x152 = bits.Add64(x146, x143, uint64(p224Uint1(x150)))
+ var x153 uint64
+ var x154 uint64
+ x153, x154 = bits.Add64(x144, x141, uint64(p224Uint1(x152)))
+ x155 := (uint64(p224Uint1(x154)) + x142)
+ var x156 uint64
+ var x157 uint64
+ x156, x157 = bits.Add64(x132, x147, uint64(0x0))
+ var x158 uint64
+ var x159 uint64
+ x158, x159 = bits.Add64(x134, x149, uint64(p224Uint1(x157)))
+ var x160 uint64
+ var x161 uint64
+ x160, x161 = bits.Add64(x136, x151, uint64(p224Uint1(x159)))
+ var x162 uint64
+ var x163 uint64
+ x162, x163 = bits.Add64(x138, x153, uint64(p224Uint1(x161)))
+ var x164 uint64
+ var x165 uint64
+ x164, x165 = bits.Add64(x140, x155, uint64(p224Uint1(x163)))
+ var x166 uint64
+ _, x166 = bits.Mul64(x156, 0xffffffffffffffff)
+ var x168 uint64
+ var x169 uint64
+ x169, x168 = bits.Mul64(x166, 0xffffffff)
+ var x170 uint64
+ var x171 uint64
+ x171, x170 = bits.Mul64(x166, 0xffffffffffffffff)
+ var x172 uint64
+ var x173 uint64
+ x173, x172 = bits.Mul64(x166, 0xffffffff00000000)
+ var x174 uint64
+ var x175 uint64
+ x174, x175 = bits.Add64(x173, x170, uint64(0x0))
+ var x176 uint64
+ var x177 uint64
+ x176, x177 = bits.Add64(x171, x168, uint64(p224Uint1(x175)))
+ x178 := (uint64(p224Uint1(x177)) + x169)
+ var x180 uint64
+ _, x180 = bits.Add64(x156, x166, uint64(0x0))
+ var x181 uint64
+ var x182 uint64
+ x181, x182 = bits.Add64(x158, x172, uint64(p224Uint1(x180)))
+ var x183 uint64
+ var x184 uint64
+ x183, x184 = bits.Add64(x160, x174, uint64(p224Uint1(x182)))
+ var x185 uint64
+ var x186 uint64
+ x185, x186 = bits.Add64(x162, x176, uint64(p224Uint1(x184)))
+ var x187 uint64
+ var x188 uint64
+ x187, x188 = bits.Add64(x164, x178, uint64(p224Uint1(x186)))
+ x189 := (uint64(p224Uint1(x188)) + uint64(p224Uint1(x165)))
+ var x190 uint64
+ var x191 uint64
+ x190, x191 = bits.Sub64(x181, uint64(0x1), uint64(0x0))
+ var x192 uint64
+ var x193 uint64
+ x192, x193 = bits.Sub64(x183, 0xffffffff00000000, uint64(p224Uint1(x191)))
+ var x194 uint64
+ var x195 uint64
+ x194, x195 = bits.Sub64(x185, 0xffffffffffffffff, uint64(p224Uint1(x193)))
+ var x196 uint64
+ var x197 uint64
+ x196, x197 = bits.Sub64(x187, 0xffffffff, uint64(p224Uint1(x195)))
+ var x199 uint64
+ _, x199 = bits.Sub64(x189, uint64(0x0), uint64(p224Uint1(x197)))
+ var x200 uint64
+ p224CmovznzU64(&x200, p224Uint1(x199), x190, x181)
+ var x201 uint64
+ p224CmovznzU64(&x201, p224Uint1(x199), x192, x183)
+ var x202 uint64
+ p224CmovznzU64(&x202, p224Uint1(x199), x194, x185)
+ var x203 uint64
+ p224CmovznzU64(&x203, p224Uint1(x199), x196, x187)
+ out1[0] = x200
+ out1[1] = x201
+ out1[2] = x202
+ out1[3] = x203
+}
+
+// p224Add adds two field elements in the Montgomery domain.
+//
+// Preconditions:
+// 0 ≤ eval arg1 < m
+// 0 ≤ eval arg2 < m
+// Postconditions:
+// eval (from_montgomery out1) mod m = (eval (from_montgomery arg1) + eval (from_montgomery arg2)) mod m
+// 0 ≤ eval out1 < m
+//
+func p224Add(out1 *p224MontgomeryDomainFieldElement, arg1 *p224MontgomeryDomainFieldElement, arg2 *p224MontgomeryDomainFieldElement) {
+ var x1 uint64
+ var x2 uint64
+ x1, x2 = bits.Add64(arg1[0], arg2[0], uint64(0x0))
+ var x3 uint64
+ var x4 uint64
+ x3, x4 = bits.Add64(arg1[1], arg2[1], uint64(p224Uint1(x2)))
+ var x5 uint64
+ var x6 uint64
+ x5, x6 = bits.Add64(arg1[2], arg2[2], uint64(p224Uint1(x4)))
+ var x7 uint64
+ var x8 uint64
+ x7, x8 = bits.Add64(arg1[3], arg2[3], uint64(p224Uint1(x6)))
+ var x9 uint64
+ var x10 uint64
+ x9, x10 = bits.Sub64(x1, uint64(0x1), uint64(0x0))
+ var x11 uint64
+ var x12 uint64
+ x11, x12 = bits.Sub64(x3, 0xffffffff00000000, uint64(p224Uint1(x10)))
+ var x13 uint64
+ var x14 uint64
+ x13, x14 = bits.Sub64(x5, 0xffffffffffffffff, uint64(p224Uint1(x12)))
+ var x15 uint64
+ var x16 uint64
+ x15, x16 = bits.Sub64(x7, 0xffffffff, uint64(p224Uint1(x14)))
+ var x18 uint64
+ _, x18 = bits.Sub64(uint64(p224Uint1(x8)), uint64(0x0), uint64(p224Uint1(x16)))
+ var x19 uint64
+ p224CmovznzU64(&x19, p224Uint1(x18), x9, x1)
+ var x20 uint64
+ p224CmovznzU64(&x20, p224Uint1(x18), x11, x3)
+ var x21 uint64
+ p224CmovznzU64(&x21, p224Uint1(x18), x13, x5)
+ var x22 uint64
+ p224CmovznzU64(&x22, p224Uint1(x18), x15, x7)
+ out1[0] = x19
+ out1[1] = x20
+ out1[2] = x21
+ out1[3] = x22
+}
+
+// p224Sub subtracts two field elements in the Montgomery domain.
+//
+// Preconditions:
+// 0 ≤ eval arg1 < m
+// 0 ≤ eval arg2 < m
+// Postconditions:
+// eval (from_montgomery out1) mod m = (eval (from_montgomery arg1) - eval (from_montgomery arg2)) mod m
+// 0 ≤ eval out1 < m
+//
+func p224Sub(out1 *p224MontgomeryDomainFieldElement, arg1 *p224MontgomeryDomainFieldElement, arg2 *p224MontgomeryDomainFieldElement) {
+ var x1 uint64
+ var x2 uint64
+ x1, x2 = bits.Sub64(arg1[0], arg2[0], uint64(0x0))
+ var x3 uint64
+ var x4 uint64
+ x3, x4 = bits.Sub64(arg1[1], arg2[1], uint64(p224Uint1(x2)))
+ var x5 uint64
+ var x6 uint64
+ x5, x6 = bits.Sub64(arg1[2], arg2[2], uint64(p224Uint1(x4)))
+ var x7 uint64
+ var x8 uint64
+ x7, x8 = bits.Sub64(arg1[3], arg2[3], uint64(p224Uint1(x6)))
+ var x9 uint64
+ p224CmovznzU64(&x9, p224Uint1(x8), uint64(0x0), 0xffffffffffffffff)
+ var x10 uint64
+ var x11 uint64
+ x10, x11 = bits.Add64(x1, uint64((p224Uint1(x9) & 0x1)), uint64(0x0))
+ var x12 uint64
+ var x13 uint64
+ x12, x13 = bits.Add64(x3, (x9 & 0xffffffff00000000), uint64(p224Uint1(x11)))
+ var x14 uint64
+ var x15 uint64
+ x14, x15 = bits.Add64(x5, x9, uint64(p224Uint1(x13)))
+ var x16 uint64
+ x16, _ = bits.Add64(x7, (x9 & 0xffffffff), uint64(p224Uint1(x15)))
+ out1[0] = x10
+ out1[1] = x12
+ out1[2] = x14
+ out1[3] = x16
+}
+
+// p224SetOne returns the field element one in the Montgomery domain.
+//
+// Postconditions:
+// eval (from_montgomery out1) mod m = 1 mod m
+// 0 ≤ eval out1 < m
+//
+func p224SetOne(out1 *p224MontgomeryDomainFieldElement) {
+ out1[0] = 0xffffffff00000000
+ out1[1] = 0xffffffffffffffff
+ out1[2] = uint64(0x0)
+ out1[3] = uint64(0x0)
+}
+
+// p224FromMontgomery translates a field element out of the Montgomery domain.
+//
+// Preconditions:
+// 0 ≤ eval arg1 < m
+// Postconditions:
+// eval out1 mod m = (eval arg1 * ((2^64)⁻¹ mod m)^4) mod m
+// 0 ≤ eval out1 < m
+//
+func p224FromMontgomery(out1 *p224NonMontgomeryDomainFieldElement, arg1 *p224MontgomeryDomainFieldElement) {
+ x1 := arg1[0]
+ var x2 uint64
+ _, x2 = bits.Mul64(x1, 0xffffffffffffffff)
+ var x4 uint64
+ var x5 uint64
+ x5, x4 = bits.Mul64(x2, 0xffffffff)
+ var x6 uint64
+ var x7 uint64
+ x7, x6 = bits.Mul64(x2, 0xffffffffffffffff)
+ var x8 uint64
+ var x9 uint64
+ x9, x8 = bits.Mul64(x2, 0xffffffff00000000)
+ var x10 uint64
+ var x11 uint64
+ x10, x11 = bits.Add64(x9, x6, uint64(0x0))
+ var x12 uint64
+ var x13 uint64
+ x12, x13 = bits.Add64(x7, x4, uint64(p224Uint1(x11)))
+ var x15 uint64
+ _, x15 = bits.Add64(x1, x2, uint64(0x0))
+ var x16 uint64
+ var x17 uint64
+ x16, x17 = bits.Add64(uint64(0x0), x8, uint64(p224Uint1(x15)))
+ var x18 uint64
+ var x19 uint64
+ x18, x19 = bits.Add64(uint64(0x0), x10, uint64(p224Uint1(x17)))
+ var x20 uint64
+ var x21 uint64
+ x20, x21 = bits.Add64(uint64(0x0), x12, uint64(p224Uint1(x19)))
+ var x22 uint64
+ var x23 uint64
+ x22, x23 = bits.Add64(x16, arg1[1], uint64(0x0))
+ var x24 uint64
+ var x25 uint64
+ x24, x25 = bits.Add64(x18, uint64(0x0), uint64(p224Uint1(x23)))
+ var x26 uint64
+ var x27 uint64
+ x26, x27 = bits.Add64(x20, uint64(0x0), uint64(p224Uint1(x25)))
+ var x28 uint64
+ _, x28 = bits.Mul64(x22, 0xffffffffffffffff)
+ var x30 uint64
+ var x31 uint64
+ x31, x30 = bits.Mul64(x28, 0xffffffff)
+ var x32 uint64
+ var x33 uint64
+ x33, x32 = bits.Mul64(x28, 0xffffffffffffffff)
+ var x34 uint64
+ var x35 uint64
+ x35, x34 = bits.Mul64(x28, 0xffffffff00000000)
+ var x36 uint64
+ var x37 uint64
+ x36, x37 = bits.Add64(x35, x32, uint64(0x0))
+ var x38 uint64
+ var x39 uint64
+ x38, x39 = bits.Add64(x33, x30, uint64(p224Uint1(x37)))
+ var x41 uint64
+ _, x41 = bits.Add64(x22, x28, uint64(0x0))
+ var x42 uint64
+ var x43 uint64
+ x42, x43 = bits.Add64(x24, x34, uint64(p224Uint1(x41)))
+ var x44 uint64
+ var x45 uint64
+ x44, x45 = bits.Add64(x26, x36, uint64(p224Uint1(x43)))
+ var x46 uint64
+ var x47 uint64
+ x46, x47 = bits.Add64((uint64(p224Uint1(x27)) + (uint64(p224Uint1(x21)) + (uint64(p224Uint1(x13)) + x5))), x38, uint64(p224Uint1(x45)))
+ var x48 uint64
+ var x49 uint64
+ x48, x49 = bits.Add64(x42, arg1[2], uint64(0x0))
+ var x50 uint64
+ var x51 uint64
+ x50, x51 = bits.Add64(x44, uint64(0x0), uint64(p224Uint1(x49)))
+ var x52 uint64
+ var x53 uint64
+ x52, x53 = bits.Add64(x46, uint64(0x0), uint64(p224Uint1(x51)))
+ var x54 uint64
+ _, x54 = bits.Mul64(x48, 0xffffffffffffffff)
+ var x56 uint64
+ var x57 uint64
+ x57, x56 = bits.Mul64(x54, 0xffffffff)
+ var x58 uint64
+ var x59 uint64
+ x59, x58 = bits.Mul64(x54, 0xffffffffffffffff)
+ var x60 uint64
+ var x61 uint64
+ x61, x60 = bits.Mul64(x54, 0xffffffff00000000)
+ var x62 uint64
+ var x63 uint64
+ x62, x63 = bits.Add64(x61, x58, uint64(0x0))
+ var x64 uint64
+ var x65 uint64
+ x64, x65 = bits.Add64(x59, x56, uint64(p224Uint1(x63)))
+ var x67 uint64
+ _, x67 = bits.Add64(x48, x54, uint64(0x0))
+ var x68 uint64
+ var x69 uint64
+ x68, x69 = bits.Add64(x50, x60, uint64(p224Uint1(x67)))
+ var x70 uint64
+ var x71 uint64
+ x70, x71 = bits.Add64(x52, x62, uint64(p224Uint1(x69)))
+ var x72 uint64
+ var x73 uint64
+ x72, x73 = bits.Add64((uint64(p224Uint1(x53)) + (uint64(p224Uint1(x47)) + (uint64(p224Uint1(x39)) + x31))), x64, uint64(p224Uint1(x71)))
+ var x74 uint64
+ var x75 uint64
+ x74, x75 = bits.Add64(x68, arg1[3], uint64(0x0))
+ var x76 uint64
+ var x77 uint64
+ x76, x77 = bits.Add64(x70, uint64(0x0), uint64(p224Uint1(x75)))
+ var x78 uint64
+ var x79 uint64
+ x78, x79 = bits.Add64(x72, uint64(0x0), uint64(p224Uint1(x77)))
+ var x80 uint64
+ _, x80 = bits.Mul64(x74, 0xffffffffffffffff)
+ var x82 uint64
+ var x83 uint64
+ x83, x82 = bits.Mul64(x80, 0xffffffff)
+ var x84 uint64
+ var x85 uint64
+ x85, x84 = bits.Mul64(x80, 0xffffffffffffffff)
+ var x86 uint64
+ var x87 uint64
+ x87, x86 = bits.Mul64(x80, 0xffffffff00000000)
+ var x88 uint64
+ var x89 uint64
+ x88, x89 = bits.Add64(x87, x84, uint64(0x0))
+ var x90 uint64
+ var x91 uint64
+ x90, x91 = bits.Add64(x85, x82, uint64(p224Uint1(x89)))
+ var x93 uint64
+ _, x93 = bits.Add64(x74, x80, uint64(0x0))
+ var x94 uint64
+ var x95 uint64
+ x94, x95 = bits.Add64(x76, x86, uint64(p224Uint1(x93)))
+ var x96 uint64
+ var x97 uint64
+ x96, x97 = bits.Add64(x78, x88, uint64(p224Uint1(x95)))
+ var x98 uint64
+ var x99 uint64
+ x98, x99 = bits.Add64((uint64(p224Uint1(x79)) + (uint64(p224Uint1(x73)) + (uint64(p224Uint1(x65)) + x57))), x90, uint64(p224Uint1(x97)))
+ x100 := (uint64(p224Uint1(x99)) + (uint64(p224Uint1(x91)) + x83))
+ var x101 uint64
+ var x102 uint64
+ x101, x102 = bits.Sub64(x94, uint64(0x1), uint64(0x0))
+ var x103 uint64
+ var x104 uint64
+ x103, x104 = bits.Sub64(x96, 0xffffffff00000000, uint64(p224Uint1(x102)))
+ var x105 uint64
+ var x106 uint64
+ x105, x106 = bits.Sub64(x98, 0xffffffffffffffff, uint64(p224Uint1(x104)))
+ var x107 uint64
+ var x108 uint64
+ x107, x108 = bits.Sub64(x100, 0xffffffff, uint64(p224Uint1(x106)))
+ var x110 uint64
+ _, x110 = bits.Sub64(uint64(0x0), uint64(0x0), uint64(p224Uint1(x108)))
+ var x111 uint64
+ p224CmovznzU64(&x111, p224Uint1(x110), x101, x94)
+ var x112 uint64
+ p224CmovznzU64(&x112, p224Uint1(x110), x103, x96)
+ var x113 uint64
+ p224CmovznzU64(&x113, p224Uint1(x110), x105, x98)
+ var x114 uint64
+ p224CmovznzU64(&x114, p224Uint1(x110), x107, x100)
+ out1[0] = x111
+ out1[1] = x112
+ out1[2] = x113
+ out1[3] = x114
+}
+
+// p224ToMontgomery translates a field element into the Montgomery domain.
+//
+// Preconditions:
+// 0 ≤ eval arg1 < m
+// Postconditions:
+// eval (from_montgomery out1) mod m = eval arg1 mod m
+// 0 ≤ eval out1 < m
+//
+func p224ToMontgomery(out1 *p224MontgomeryDomainFieldElement, arg1 *p224NonMontgomeryDomainFieldElement) {
+ x1 := arg1[1]
+ x2 := arg1[2]
+ x3 := arg1[3]
+ x4 := arg1[0]
+ var x5 uint64
+ var x6 uint64
+ x6, x5 = bits.Mul64(x4, 0xffffffff)
+ var x7 uint64
+ var x8 uint64
+ x8, x7 = bits.Mul64(x4, 0xfffffffe00000000)
+ var x9 uint64
+ var x10 uint64
+ x10, x9 = bits.Mul64(x4, 0xffffffff00000000)
+ var x11 uint64
+ var x12 uint64
+ x12, x11 = bits.Mul64(x4, 0xffffffff00000001)
+ var x13 uint64
+ var x14 uint64
+ x13, x14 = bits.Add64(x12, x9, uint64(0x0))
+ var x15 uint64
+ var x16 uint64
+ x15, x16 = bits.Add64(x10, x7, uint64(p224Uint1(x14)))
+ var x17 uint64
+ var x18 uint64
+ x17, x18 = bits.Add64(x8, x5, uint64(p224Uint1(x16)))
+ var x19 uint64
+ _, x19 = bits.Mul64(x11, 0xffffffffffffffff)
+ var x21 uint64
+ var x22 uint64
+ x22, x21 = bits.Mul64(x19, 0xffffffff)
+ var x23 uint64
+ var x24 uint64
+ x24, x23 = bits.Mul64(x19, 0xffffffffffffffff)
+ var x25 uint64
+ var x26 uint64
+ x26, x25 = bits.Mul64(x19, 0xffffffff00000000)
+ var x27 uint64
+ var x28 uint64
+ x27, x28 = bits.Add64(x26, x23, uint64(0x0))
+ var x29 uint64
+ var x30 uint64
+ x29, x30 = bits.Add64(x24, x21, uint64(p224Uint1(x28)))
+ var x32 uint64
+ _, x32 = bits.Add64(x11, x19, uint64(0x0))
+ var x33 uint64
+ var x34 uint64
+ x33, x34 = bits.Add64(x13, x25, uint64(p224Uint1(x32)))
+ var x35 uint64
+ var x36 uint64
+ x35, x36 = bits.Add64(x15, x27, uint64(p224Uint1(x34)))
+ var x37 uint64
+ var x38 uint64
+ x37, x38 = bits.Add64(x17, x29, uint64(p224Uint1(x36)))
+ var x39 uint64
+ var x40 uint64
+ x40, x39 = bits.Mul64(x1, 0xffffffff)
+ var x41 uint64
+ var x42 uint64
+ x42, x41 = bits.Mul64(x1, 0xfffffffe00000000)
+ var x43 uint64
+ var x44 uint64
+ x44, x43 = bits.Mul64(x1, 0xffffffff00000000)
+ var x45 uint64
+ var x46 uint64
+ x46, x45 = bits.Mul64(x1, 0xffffffff00000001)
+ var x47 uint64
+ var x48 uint64
+ x47, x48 = bits.Add64(x46, x43, uint64(0x0))
+ var x49 uint64
+ var x50 uint64
+ x49, x50 = bits.Add64(x44, x41, uint64(p224Uint1(x48)))
+ var x51 uint64
+ var x52 uint64
+ x51, x52 = bits.Add64(x42, x39, uint64(p224Uint1(x50)))
+ var x53 uint64
+ var x54 uint64
+ x53, x54 = bits.Add64(x33, x45, uint64(0x0))
+ var x55 uint64
+ var x56 uint64
+ x55, x56 = bits.Add64(x35, x47, uint64(p224Uint1(x54)))
+ var x57 uint64
+ var x58 uint64
+ x57, x58 = bits.Add64(x37, x49, uint64(p224Uint1(x56)))
+ var x59 uint64
+ var x60 uint64
+ x59, x60 = bits.Add64(((uint64(p224Uint1(x38)) + (uint64(p224Uint1(x18)) + x6)) + (uint64(p224Uint1(x30)) + x22)), x51, uint64(p224Uint1(x58)))
+ var x61 uint64
+ _, x61 = bits.Mul64(x53, 0xffffffffffffffff)
+ var x63 uint64
+ var x64 uint64
+ x64, x63 = bits.Mul64(x61, 0xffffffff)
+ var x65 uint64
+ var x66 uint64
+ x66, x65 = bits.Mul64(x61, 0xffffffffffffffff)
+ var x67 uint64
+ var x68 uint64
+ x68, x67 = bits.Mul64(x61, 0xffffffff00000000)
+ var x69 uint64
+ var x70 uint64
+ x69, x70 = bits.Add64(x68, x65, uint64(0x0))
+ var x71 uint64
+ var x72 uint64
+ x71, x72 = bits.Add64(x66, x63, uint64(p224Uint1(x70)))
+ var x74 uint64
+ _, x74 = bits.Add64(x53, x61, uint64(0x0))
+ var x75 uint64
+ var x76 uint64
+ x75, x76 = bits.Add64(x55, x67, uint64(p224Uint1(x74)))
+ var x77 uint64
+ var x78 uint64
+ x77, x78 = bits.Add64(x57, x69, uint64(p224Uint1(x76)))
+ var x79 uint64
+ var x80 uint64
+ x79, x80 = bits.Add64(x59, x71, uint64(p224Uint1(x78)))
+ var x81 uint64
+ var x82 uint64
+ x82, x81 = bits.Mul64(x2, 0xffffffff)
+ var x83 uint64
+ var x84 uint64
+ x84, x83 = bits.Mul64(x2, 0xfffffffe00000000)
+ var x85 uint64
+ var x86 uint64
+ x86, x85 = bits.Mul64(x2, 0xffffffff00000000)
+ var x87 uint64
+ var x88 uint64
+ x88, x87 = bits.Mul64(x2, 0xffffffff00000001)
+ var x89 uint64
+ var x90 uint64
+ x89, x90 = bits.Add64(x88, x85, uint64(0x0))
+ var x91 uint64
+ var x92 uint64
+ x91, x92 = bits.Add64(x86, x83, uint64(p224Uint1(x90)))
+ var x93 uint64
+ var x94 uint64
+ x93, x94 = bits.Add64(x84, x81, uint64(p224Uint1(x92)))
+ var x95 uint64
+ var x96 uint64
+ x95, x96 = bits.Add64(x75, x87, uint64(0x0))
+ var x97 uint64
+ var x98 uint64
+ x97, x98 = bits.Add64(x77, x89, uint64(p224Uint1(x96)))
+ var x99 uint64
+ var x100 uint64
+ x99, x100 = bits.Add64(x79, x91, uint64(p224Uint1(x98)))
+ var x101 uint64
+ var x102 uint64
+ x101, x102 = bits.Add64(((uint64(p224Uint1(x80)) + (uint64(p224Uint1(x60)) + (uint64(p224Uint1(x52)) + x40))) + (uint64(p224Uint1(x72)) + x64)), x93, uint64(p224Uint1(x100)))
+ var x103 uint64
+ _, x103 = bits.Mul64(x95, 0xffffffffffffffff)
+ var x105 uint64
+ var x106 uint64
+ x106, x105 = bits.Mul64(x103, 0xffffffff)
+ var x107 uint64
+ var x108 uint64
+ x108, x107 = bits.Mul64(x103, 0xffffffffffffffff)
+ var x109 uint64
+ var x110 uint64
+ x110, x109 = bits.Mul64(x103, 0xffffffff00000000)
+ var x111 uint64
+ var x112 uint64
+ x111, x112 = bits.Add64(x110, x107, uint64(0x0))
+ var x113 uint64
+ var x114 uint64
+ x113, x114 = bits.Add64(x108, x105, uint64(p224Uint1(x112)))
+ var x116 uint64
+ _, x116 = bits.Add64(x95, x103, uint64(0x0))
+ var x117 uint64
+ var x118 uint64
+ x117, x118 = bits.Add64(x97, x109, uint64(p224Uint1(x116)))
+ var x119 uint64
+ var x120 uint64
+ x119, x120 = bits.Add64(x99, x111, uint64(p224Uint1(x118)))
+ var x121 uint64
+ var x122 uint64
+ x121, x122 = bits.Add64(x101, x113, uint64(p224Uint1(x120)))
+ var x123 uint64
+ var x124 uint64
+ x124, x123 = bits.Mul64(x3, 0xffffffff)
+ var x125 uint64
+ var x126 uint64
+ x126, x125 = bits.Mul64(x3, 0xfffffffe00000000)
+ var x127 uint64
+ var x128 uint64
+ x128, x127 = bits.Mul64(x3, 0xffffffff00000000)
+ var x129 uint64
+ var x130 uint64
+ x130, x129 = bits.Mul64(x3, 0xffffffff00000001)
+ var x131 uint64
+ var x132 uint64
+ x131, x132 = bits.Add64(x130, x127, uint64(0x0))
+ var x133 uint64
+ var x134 uint64
+ x133, x134 = bits.Add64(x128, x125, uint64(p224Uint1(x132)))
+ var x135 uint64
+ var x136 uint64
+ x135, x136 = bits.Add64(x126, x123, uint64(p224Uint1(x134)))
+ var x137 uint64
+ var x138 uint64
+ x137, x138 = bits.Add64(x117, x129, uint64(0x0))
+ var x139 uint64
+ var x140 uint64
+ x139, x140 = bits.Add64(x119, x131, uint64(p224Uint1(x138)))
+ var x141 uint64
+ var x142 uint64
+ x141, x142 = bits.Add64(x121, x133, uint64(p224Uint1(x140)))
+ var x143 uint64
+ var x144 uint64
+ x143, x144 = bits.Add64(((uint64(p224Uint1(x122)) + (uint64(p224Uint1(x102)) + (uint64(p224Uint1(x94)) + x82))) + (uint64(p224Uint1(x114)) + x106)), x135, uint64(p224Uint1(x142)))
+ var x145 uint64
+ _, x145 = bits.Mul64(x137, 0xffffffffffffffff)
+ var x147 uint64
+ var x148 uint64
+ x148, x147 = bits.Mul64(x145, 0xffffffff)
+ var x149 uint64
+ var x150 uint64
+ x150, x149 = bits.Mul64(x145, 0xffffffffffffffff)
+ var x151 uint64
+ var x152 uint64
+ x152, x151 = bits.Mul64(x145, 0xffffffff00000000)
+ var x153 uint64
+ var x154 uint64
+ x153, x154 = bits.Add64(x152, x149, uint64(0x0))
+ var x155 uint64
+ var x156 uint64
+ x155, x156 = bits.Add64(x150, x147, uint64(p224Uint1(x154)))
+ var x158 uint64
+ _, x158 = bits.Add64(x137, x145, uint64(0x0))
+ var x159 uint64
+ var x160 uint64
+ x159, x160 = bits.Add64(x139, x151, uint64(p224Uint1(x158)))
+ var x161 uint64
+ var x162 uint64
+ x161, x162 = bits.Add64(x141, x153, uint64(p224Uint1(x160)))
+ var x163 uint64
+ var x164 uint64
+ x163, x164 = bits.Add64(x143, x155, uint64(p224Uint1(x162)))
+ x165 := ((uint64(p224Uint1(x164)) + (uint64(p224Uint1(x144)) + (uint64(p224Uint1(x136)) + x124))) + (uint64(p224Uint1(x156)) + x148))
+ var x166 uint64
+ var x167 uint64
+ x166, x167 = bits.Sub64(x159, uint64(0x1), uint64(0x0))
+ var x168 uint64
+ var x169 uint64
+ x168, x169 = bits.Sub64(x161, 0xffffffff00000000, uint64(p224Uint1(x167)))
+ var x170 uint64
+ var x171 uint64
+ x170, x171 = bits.Sub64(x163, 0xffffffffffffffff, uint64(p224Uint1(x169)))
+ var x172 uint64
+ var x173 uint64
+ x172, x173 = bits.Sub64(x165, 0xffffffff, uint64(p224Uint1(x171)))
+ var x175 uint64
+ _, x175 = bits.Sub64(uint64(0x0), uint64(0x0), uint64(p224Uint1(x173)))
+ var x176 uint64
+ p224CmovznzU64(&x176, p224Uint1(x175), x166, x159)
+ var x177 uint64
+ p224CmovznzU64(&x177, p224Uint1(x175), x168, x161)
+ var x178 uint64
+ p224CmovznzU64(&x178, p224Uint1(x175), x170, x163)
+ var x179 uint64
+ p224CmovznzU64(&x179, p224Uint1(x175), x172, x165)
+ out1[0] = x176
+ out1[1] = x177
+ out1[2] = x178
+ out1[3] = x179
+}
+
+// p224Selectznz is a multi-limb conditional select.
+//
+// Postconditions:
+// eval out1 = (if arg1 = 0 then eval arg2 else eval arg3)
+//
+// Input Bounds:
+// arg1: [0x0 ~> 0x1]
+// arg2: [[0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff]]
+// arg3: [[0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff]]
+// Output Bounds:
+// out1: [[0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff]]
+func p224Selectznz(out1 *[4]uint64, arg1 p224Uint1, arg2 *[4]uint64, arg3 *[4]uint64) {
+ var x1 uint64
+ p224CmovznzU64(&x1, arg1, arg2[0], arg3[0])
+ var x2 uint64
+ p224CmovznzU64(&x2, arg1, arg2[1], arg3[1])
+ var x3 uint64
+ p224CmovznzU64(&x3, arg1, arg2[2], arg3[2])
+ var x4 uint64
+ p224CmovznzU64(&x4, arg1, arg2[3], arg3[3])
+ out1[0] = x1
+ out1[1] = x2
+ out1[2] = x3
+ out1[3] = x4
+}
+
+// p224ToBytes serializes a field element NOT in the Montgomery domain to bytes in little-endian order.
+//
+// Preconditions:
+// 0 ≤ eval arg1 < m
+// Postconditions:
+// out1 = map (λ x, ⌊((eval arg1 mod m) mod 2^(8 * (x + 1))) / 2^(8 * x)⌋) [0..27]
+//
+// Input Bounds:
+// arg1: [[0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffff]]
+// Output Bounds:
+// out1: [[0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff]]
+func p224ToBytes(out1 *[28]uint8, arg1 *[4]uint64) {
+ x1 := arg1[3]
+ x2 := arg1[2]
+ x3 := arg1[1]
+ x4 := arg1[0]
+ x5 := (uint8(x4) & 0xff)
+ x6 := (x4 >> 8)
+ x7 := (uint8(x6) & 0xff)
+ x8 := (x6 >> 8)
+ x9 := (uint8(x8) & 0xff)
+ x10 := (x8 >> 8)
+ x11 := (uint8(x10) & 0xff)
+ x12 := (x10 >> 8)
+ x13 := (uint8(x12) & 0xff)
+ x14 := (x12 >> 8)
+ x15 := (uint8(x14) & 0xff)
+ x16 := (x14 >> 8)
+ x17 := (uint8(x16) & 0xff)
+ x18 := uint8((x16 >> 8))
+ x19 := (uint8(x3) & 0xff)
+ x20 := (x3 >> 8)
+ x21 := (uint8(x20) & 0xff)
+ x22 := (x20 >> 8)
+ x23 := (uint8(x22) & 0xff)
+ x24 := (x22 >> 8)
+ x25 := (uint8(x24) & 0xff)
+ x26 := (x24 >> 8)
+ x27 := (uint8(x26) & 0xff)
+ x28 := (x26 >> 8)
+ x29 := (uint8(x28) & 0xff)
+ x30 := (x28 >> 8)
+ x31 := (uint8(x30) & 0xff)
+ x32 := uint8((x30 >> 8))
+ x33 := (uint8(x2) & 0xff)
+ x34 := (x2 >> 8)
+ x35 := (uint8(x34) & 0xff)
+ x36 := (x34 >> 8)
+ x37 := (uint8(x36) & 0xff)
+ x38 := (x36 >> 8)
+ x39 := (uint8(x38) & 0xff)
+ x40 := (x38 >> 8)
+ x41 := (uint8(x40) & 0xff)
+ x42 := (x40 >> 8)
+ x43 := (uint8(x42) & 0xff)
+ x44 := (x42 >> 8)
+ x45 := (uint8(x44) & 0xff)
+ x46 := uint8((x44 >> 8))
+ x47 := (uint8(x1) & 0xff)
+ x48 := (x1 >> 8)
+ x49 := (uint8(x48) & 0xff)
+ x50 := (x48 >> 8)
+ x51 := (uint8(x50) & 0xff)
+ x52 := uint8((x50 >> 8))
+ out1[0] = x5
+ out1[1] = x7
+ out1[2] = x9
+ out1[3] = x11
+ out1[4] = x13
+ out1[5] = x15
+ out1[6] = x17
+ out1[7] = x18
+ out1[8] = x19
+ out1[9] = x21
+ out1[10] = x23
+ out1[11] = x25
+ out1[12] = x27
+ out1[13] = x29
+ out1[14] = x31
+ out1[15] = x32
+ out1[16] = x33
+ out1[17] = x35
+ out1[18] = x37
+ out1[19] = x39
+ out1[20] = x41
+ out1[21] = x43
+ out1[22] = x45
+ out1[23] = x46
+ out1[24] = x47
+ out1[25] = x49
+ out1[26] = x51
+ out1[27] = x52
+}
+
+// p224FromBytes deserializes a field element NOT in the Montgomery domain from bytes in little-endian order.
+//
+// Preconditions:
+// 0 ≤ bytes_eval arg1 < m
+// Postconditions:
+// eval out1 mod m = bytes_eval arg1 mod m
+// 0 ≤ eval out1 < m
+//
+// Input Bounds:
+// arg1: [[0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff]]
+// Output Bounds:
+// out1: [[0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffff]]
+func p224FromBytes(out1 *[4]uint64, arg1 *[28]uint8) {
+ x1 := (uint64(arg1[27]) << 24)
+ x2 := (uint64(arg1[26]) << 16)
+ x3 := (uint64(arg1[25]) << 8)
+ x4 := arg1[24]
+ x5 := (uint64(arg1[23]) << 56)
+ x6 := (uint64(arg1[22]) << 48)
+ x7 := (uint64(arg1[21]) << 40)
+ x8 := (uint64(arg1[20]) << 32)
+ x9 := (uint64(arg1[19]) << 24)
+ x10 := (uint64(arg1[18]) << 16)
+ x11 := (uint64(arg1[17]) << 8)
+ x12 := arg1[16]
+ x13 := (uint64(arg1[15]) << 56)
+ x14 := (uint64(arg1[14]) << 48)
+ x15 := (uint64(arg1[13]) << 40)
+ x16 := (uint64(arg1[12]) << 32)
+ x17 := (uint64(arg1[11]) << 24)
+ x18 := (uint64(arg1[10]) << 16)
+ x19 := (uint64(arg1[9]) << 8)
+ x20 := arg1[8]
+ x21 := (uint64(arg1[7]) << 56)
+ x22 := (uint64(arg1[6]) << 48)
+ x23 := (uint64(arg1[5]) << 40)
+ x24 := (uint64(arg1[4]) << 32)
+ x25 := (uint64(arg1[3]) << 24)
+ x26 := (uint64(arg1[2]) << 16)
+ x27 := (uint64(arg1[1]) << 8)
+ x28 := arg1[0]
+ x29 := (x27 + uint64(x28))
+ x30 := (x26 + x29)
+ x31 := (x25 + x30)
+ x32 := (x24 + x31)
+ x33 := (x23 + x32)
+ x34 := (x22 + x33)
+ x35 := (x21 + x34)
+ x36 := (x19 + uint64(x20))
+ x37 := (x18 + x36)
+ x38 := (x17 + x37)
+ x39 := (x16 + x38)
+ x40 := (x15 + x39)
+ x41 := (x14 + x40)
+ x42 := (x13 + x41)
+ x43 := (x11 + uint64(x12))
+ x44 := (x10 + x43)
+ x45 := (x9 + x44)
+ x46 := (x8 + x45)
+ x47 := (x7 + x46)
+ x48 := (x6 + x47)
+ x49 := (x5 + x48)
+ x50 := (x3 + uint64(x4))
+ x51 := (x2 + x50)
+ x52 := (x1 + x51)
+ out1[0] = x35
+ out1[1] = x42
+ out1[2] = x49
+ out1[3] = x52
+}
diff --git a/src/crypto/elliptic/internal/fiat/p224_invert.go b/src/crypto/elliptic/internal/fiat/p224_invert.go
new file mode 100644
index 0000000..4163ed0
--- /dev/null
+++ b/src/crypto/elliptic/internal/fiat/p224_invert.go
@@ -0,0 +1,87 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Code generated by addchain. DO NOT EDIT.
+
+package fiat
+
+// Invert sets e = 1/x, and returns e.
+//
+// If x == 0, Invert returns e = 0.
+func (e *P224Element) Invert(x *P224Element) *P224Element {
+ // Inversion is implemented as exponentiation with exponent p − 2.
+ // The sequence of 11 multiplications and 223 squarings is derived from the
+ // following addition chain generated with github.com/mmcloughlin/addchain v0.3.0.
+ //
+ // _10 = 2*1
+ // _11 = 1 + _10
+ // _110 = 2*_11
+ // _111 = 1 + _110
+ // _111000 = _111 << 3
+ // _111111 = _111 + _111000
+ // x12 = _111111 << 6 + _111111
+ // x14 = x12 << 2 + _11
+ // x17 = x14 << 3 + _111
+ // x31 = x17 << 14 + x14
+ // x48 = x31 << 17 + x17
+ // x96 = x48 << 48 + x48
+ // x127 = x96 << 31 + x31
+ // return x127 << 97 + x96
+ //
+
+ var z = new(P224Element).Set(e)
+ var t0 = new(P224Element)
+ var t1 = new(P224Element)
+ var t2 = new(P224Element)
+
+ z.Square(x)
+ t0.Mul(x, z)
+ z.Square(t0)
+ z.Mul(x, z)
+ t1.Square(z)
+ for s := 1; s < 3; s++ {
+ t1.Square(t1)
+ }
+ t1.Mul(z, t1)
+ t2.Square(t1)
+ for s := 1; s < 6; s++ {
+ t2.Square(t2)
+ }
+ t1.Mul(t1, t2)
+ for s := 0; s < 2; s++ {
+ t1.Square(t1)
+ }
+ t0.Mul(t0, t1)
+ t1.Square(t0)
+ for s := 1; s < 3; s++ {
+ t1.Square(t1)
+ }
+ z.Mul(z, t1)
+ t1.Square(z)
+ for s := 1; s < 14; s++ {
+ t1.Square(t1)
+ }
+ t0.Mul(t0, t1)
+ t1.Square(t0)
+ for s := 1; s < 17; s++ {
+ t1.Square(t1)
+ }
+ z.Mul(z, t1)
+ t1.Square(z)
+ for s := 1; s < 48; s++ {
+ t1.Square(t1)
+ }
+ z.Mul(z, t1)
+ t1.Square(z)
+ for s := 1; s < 31; s++ {
+ t1.Square(t1)
+ }
+ t0.Mul(t0, t1)
+ for s := 0; s < 97; s++ {
+ t0.Square(t0)
+ }
+ z.Mul(z, t0)
+
+ return e.Set(z)
+}
diff --git a/src/crypto/elliptic/internal/fiat/p384.go b/src/crypto/elliptic/internal/fiat/p384.go
new file mode 100644
index 0000000..5474d77
--- /dev/null
+++ b/src/crypto/elliptic/internal/fiat/p384.go
@@ -0,0 +1,135 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Code generated by generate.go. DO NOT EDIT.
+
+package fiat
+
+import (
+ "crypto/subtle"
+ "errors"
+)
+
+// P384Element is an integer modulo 2^384 - 2^128 - 2^96 + 2^32 - 1.
+//
+// The zero value is a valid zero element.
+type P384Element struct {
+ // Values are represented internally always in the Montgomery domain, and
+ // converted in Bytes and SetBytes.
+ x p384MontgomeryDomainFieldElement
+}
+
+const p384ElementLen = 48
+
+type p384UntypedFieldElement = [6]uint64
+
+// One sets e = 1, and returns e.
+func (e *P384Element) One() *P384Element {
+ p384SetOne(&e.x)
+ return e
+}
+
+// Equal returns 1 if e == t, and zero otherwise.
+func (e *P384Element) Equal(t *P384Element) int {
+ eBytes := e.Bytes()
+ tBytes := t.Bytes()
+ return subtle.ConstantTimeCompare(eBytes, tBytes)
+}
+
+var p384ZeroEncoding = new(P384Element).Bytes()
+
+// IsZero returns 1 if e == 0, and zero otherwise.
+func (e *P384Element) IsZero() int {
+ eBytes := e.Bytes()
+ return subtle.ConstantTimeCompare(eBytes, p384ZeroEncoding)
+}
+
+// Set sets e = t, and returns e.
+func (e *P384Element) Set(t *P384Element) *P384Element {
+ e.x = t.x
+ return e
+}
+
+// Bytes returns the 48-byte big-endian encoding of e.
+func (e *P384Element) Bytes() []byte {
+ // This function is outlined to make the allocations inline in the caller
+ // rather than happen on the heap.
+ var out [p384ElementLen]byte
+ return e.bytes(&out)
+}
+
+func (e *P384Element) bytes(out *[p384ElementLen]byte) []byte {
+ var tmp p384NonMontgomeryDomainFieldElement
+ p384FromMontgomery(&tmp, &e.x)
+ p384ToBytes(out, (*p384UntypedFieldElement)(&tmp))
+ p384InvertEndianness(out[:])
+ return out[:]
+}
+
+// p384MinusOneEncoding is the encoding of -1 mod p, so p - 1, the
+// highest canonical encoding. It is used by SetBytes to check for non-canonical
+// encodings such as p + k, 2p + k, etc.
+var p384MinusOneEncoding = new(P384Element).Sub(
+ new(P384Element), new(P384Element).One()).Bytes()
+
+// SetBytes sets e = v, where v is a big-endian 48-byte encoding, and returns e.
+// If v is not 48 bytes or it encodes a value higher than 2^384 - 2^128 - 2^96 + 2^32 - 1,
+// SetBytes returns nil and an error, and e is unchanged.
+func (e *P384Element) SetBytes(v []byte) (*P384Element, error) {
+ if len(v) != p384ElementLen {
+ return nil, errors.New("invalid P384Element encoding")
+ }
+ for i := range v {
+ if v[i] < p384MinusOneEncoding[i] {
+ break
+ }
+ if v[i] > p384MinusOneEncoding[i] {
+ return nil, errors.New("invalid P384Element encoding")
+ }
+ }
+ var in [p384ElementLen]byte
+ copy(in[:], v)
+ p384InvertEndianness(in[:])
+ var tmp p384NonMontgomeryDomainFieldElement
+ p384FromBytes((*p384UntypedFieldElement)(&tmp), &in)
+ p384ToMontgomery(&e.x, &tmp)
+ return e, nil
+}
+
+// Add sets e = t1 + t2, and returns e.
+func (e *P384Element) Add(t1, t2 *P384Element) *P384Element {
+ p384Add(&e.x, &t1.x, &t2.x)
+ return e
+}
+
+// Sub sets e = t1 - t2, and returns e.
+func (e *P384Element) Sub(t1, t2 *P384Element) *P384Element {
+ p384Sub(&e.x, &t1.x, &t2.x)
+ return e
+}
+
+// Mul sets e = t1 * t2, and returns e.
+func (e *P384Element) Mul(t1, t2 *P384Element) *P384Element {
+ p384Mul(&e.x, &t1.x, &t2.x)
+ return e
+}
+
+// Square sets e = t * t, and returns e.
+func (e *P384Element) Square(t *P384Element) *P384Element {
+ p384Square(&e.x, &t.x)
+ return e
+}
+
+// Select sets v to a if cond == 1, and to b if cond == 0.
+func (v *P384Element) Select(a, b *P384Element, cond int) *P384Element {
+ p384Selectznz((*p384UntypedFieldElement)(&v.x), p384Uint1(cond),
+ (*p384UntypedFieldElement)(&b.x), (*p384UntypedFieldElement)(&a.x))
+ return v
+}
+
+func p384InvertEndianness(v []byte) {
+ for i := 0; i < len(v)/2; i++ {
+ v[i], v[len(v)-1-i] = v[len(v)-1-i], v[i]
+ }
+}
diff --git a/src/crypto/elliptic/internal/fiat/p384_fiat64.go b/src/crypto/elliptic/internal/fiat/p384_fiat64.go
new file mode 100644
index 0000000..493bed4
--- /dev/null
+++ b/src/crypto/elliptic/internal/fiat/p384_fiat64.go
@@ -0,0 +1,3004 @@
+// Code generated by Fiat Cryptography. DO NOT EDIT.
+//
+// Autogenerated: word_by_word_montgomery --lang Go --no-wide-int --cmovznz-by-mul --relax-primitive-carry-to-bitwidth 32,64 --internal-static --public-function-case camelCase --public-type-case camelCase --private-function-case camelCase --private-type-case camelCase --doc-text-before-function-name '' --doc-newline-before-package-declaration --doc-prepend-header 'Code generated by Fiat Cryptography. DO NOT EDIT.' --package-name fiat --no-prefix-fiat p384 64 '2^384 - 2^128 - 2^96 + 2^32 - 1' mul square add sub one from_montgomery to_montgomery selectznz to_bytes from_bytes
+//
+// curve description: p384
+//
+// machine_wordsize = 64 (from "64")
+//
+// requested operations: mul, square, add, sub, one, from_montgomery, to_montgomery, selectznz, to_bytes, from_bytes
+//
+// m = 0xfffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffeffffffff0000000000000000ffffffff (from "2^384 - 2^128 - 2^96 + 2^32 - 1")
+//
+//
+//
+// NOTE: In addition to the bounds specified above each function, all
+//
+// functions synthesized for this Montgomery arithmetic require the
+//
+// input to be strictly less than the prime modulus (m), and also
+//
+// require the input to be in the unique saturated representation.
+//
+// All functions also ensure that these two properties are true of
+//
+// return values.
+//
+//
+//
+// Computed values:
+//
+// eval z = z[0] + (z[1] << 64) + (z[2] << 128) + (z[3] << 192) + (z[4] << 256) + (z[5] << 0x140)
+//
+// bytes_eval z = z[0] + (z[1] << 8) + (z[2] << 16) + (z[3] << 24) + (z[4] << 32) + (z[5] << 40) + (z[6] << 48) + (z[7] << 56) + (z[8] << 64) + (z[9] << 72) + (z[10] << 80) + (z[11] << 88) + (z[12] << 96) + (z[13] << 104) + (z[14] << 112) + (z[15] << 120) + (z[16] << 128) + (z[17] << 136) + (z[18] << 144) + (z[19] << 152) + (z[20] << 160) + (z[21] << 168) + (z[22] << 176) + (z[23] << 184) + (z[24] << 192) + (z[25] << 200) + (z[26] << 208) + (z[27] << 216) + (z[28] << 224) + (z[29] << 232) + (z[30] << 240) + (z[31] << 248) + (z[32] << 256) + (z[33] << 0x108) + (z[34] << 0x110) + (z[35] << 0x118) + (z[36] << 0x120) + (z[37] << 0x128) + (z[38] << 0x130) + (z[39] << 0x138) + (z[40] << 0x140) + (z[41] << 0x148) + (z[42] << 0x150) + (z[43] << 0x158) + (z[44] << 0x160) + (z[45] << 0x168) + (z[46] << 0x170) + (z[47] << 0x178)
+//
+// twos_complement_eval z = let x1 := z[0] + (z[1] << 64) + (z[2] << 128) + (z[3] << 192) + (z[4] << 256) + (z[5] << 0x140) in
+//
+// if x1 & (2^384-1) < 2^383 then x1 & (2^384-1) else (x1 & (2^384-1)) - 2^384
+
+package fiat
+
+import "math/bits"
+
+type p384Uint1 uint64 // We use uint64 instead of a more narrow type for performance reasons; see https://github.com/mit-plv/fiat-crypto/pull/1006#issuecomment-892625927
+type p384Int1 int64 // We use uint64 instead of a more narrow type for performance reasons; see https://github.com/mit-plv/fiat-crypto/pull/1006#issuecomment-892625927
+
+// The type p384MontgomeryDomainFieldElement is a field element in the Montgomery domain.
+//
+// Bounds: [[0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff]]
+type p384MontgomeryDomainFieldElement [6]uint64
+
+// The type p384NonMontgomeryDomainFieldElement is a field element NOT in the Montgomery domain.
+//
+// Bounds: [[0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff]]
+type p384NonMontgomeryDomainFieldElement [6]uint64
+
+// p384CmovznzU64 is a single-word conditional move.
+//
+// Postconditions:
+// out1 = (if arg1 = 0 then arg2 else arg3)
+//
+// Input Bounds:
+// arg1: [0x0 ~> 0x1]
+// arg2: [0x0 ~> 0xffffffffffffffff]
+// arg3: [0x0 ~> 0xffffffffffffffff]
+// Output Bounds:
+// out1: [0x0 ~> 0xffffffffffffffff]
+func p384CmovznzU64(out1 *uint64, arg1 p384Uint1, arg2 uint64, arg3 uint64) {
+ x1 := (uint64(arg1) * 0xffffffffffffffff)
+ x2 := ((x1 & arg3) | ((^x1) & arg2))
+ *out1 = x2
+}
+
+// p384Mul multiplies two field elements in the Montgomery domain.
+//
+// Preconditions:
+// 0 ≤ eval arg1 < m
+// 0 ≤ eval arg2 < m
+// Postconditions:
+// eval (from_montgomery out1) mod m = (eval (from_montgomery arg1) * eval (from_montgomery arg2)) mod m
+// 0 ≤ eval out1 < m
+//
+func p384Mul(out1 *p384MontgomeryDomainFieldElement, arg1 *p384MontgomeryDomainFieldElement, arg2 *p384MontgomeryDomainFieldElement) {
+ x1 := arg1[1]
+ x2 := arg1[2]
+ x3 := arg1[3]
+ x4 := arg1[4]
+ x5 := arg1[5]
+ x6 := arg1[0]
+ var x7 uint64
+ var x8 uint64
+ x8, x7 = bits.Mul64(x6, arg2[5])
+ var x9 uint64
+ var x10 uint64
+ x10, x9 = bits.Mul64(x6, arg2[4])
+ var x11 uint64
+ var x12 uint64
+ x12, x11 = bits.Mul64(x6, arg2[3])
+ var x13 uint64
+ var x14 uint64
+ x14, x13 = bits.Mul64(x6, arg2[2])
+ var x15 uint64
+ var x16 uint64
+ x16, x15 = bits.Mul64(x6, arg2[1])
+ var x17 uint64
+ var x18 uint64
+ x18, x17 = bits.Mul64(x6, arg2[0])
+ var x19 uint64
+ var x20 uint64
+ x19, x20 = bits.Add64(x18, x15, uint64(0x0))
+ var x21 uint64
+ var x22 uint64
+ x21, x22 = bits.Add64(x16, x13, uint64(p384Uint1(x20)))
+ var x23 uint64
+ var x24 uint64
+ x23, x24 = bits.Add64(x14, x11, uint64(p384Uint1(x22)))
+ var x25 uint64
+ var x26 uint64
+ x25, x26 = bits.Add64(x12, x9, uint64(p384Uint1(x24)))
+ var x27 uint64
+ var x28 uint64
+ x27, x28 = bits.Add64(x10, x7, uint64(p384Uint1(x26)))
+ x29 := (uint64(p384Uint1(x28)) + x8)
+ var x30 uint64
+ _, x30 = bits.Mul64(x17, 0x100000001)
+ var x32 uint64
+ var x33 uint64
+ x33, x32 = bits.Mul64(x30, 0xffffffffffffffff)
+ var x34 uint64
+ var x35 uint64
+ x35, x34 = bits.Mul64(x30, 0xffffffffffffffff)
+ var x36 uint64
+ var x37 uint64
+ x37, x36 = bits.Mul64(x30, 0xffffffffffffffff)
+ var x38 uint64
+ var x39 uint64
+ x39, x38 = bits.Mul64(x30, 0xfffffffffffffffe)
+ var x40 uint64
+ var x41 uint64
+ x41, x40 = bits.Mul64(x30, 0xffffffff00000000)
+ var x42 uint64
+ var x43 uint64
+ x43, x42 = bits.Mul64(x30, 0xffffffff)
+ var x44 uint64
+ var x45 uint64
+ x44, x45 = bits.Add64(x43, x40, uint64(0x0))
+ var x46 uint64
+ var x47 uint64
+ x46, x47 = bits.Add64(x41, x38, uint64(p384Uint1(x45)))
+ var x48 uint64
+ var x49 uint64
+ x48, x49 = bits.Add64(x39, x36, uint64(p384Uint1(x47)))
+ var x50 uint64
+ var x51 uint64
+ x50, x51 = bits.Add64(x37, x34, uint64(p384Uint1(x49)))
+ var x52 uint64
+ var x53 uint64
+ x52, x53 = bits.Add64(x35, x32, uint64(p384Uint1(x51)))
+ x54 := (uint64(p384Uint1(x53)) + x33)
+ var x56 uint64
+ _, x56 = bits.Add64(x17, x42, uint64(0x0))
+ var x57 uint64
+ var x58 uint64
+ x57, x58 = bits.Add64(x19, x44, uint64(p384Uint1(x56)))
+ var x59 uint64
+ var x60 uint64
+ x59, x60 = bits.Add64(x21, x46, uint64(p384Uint1(x58)))
+ var x61 uint64
+ var x62 uint64
+ x61, x62 = bits.Add64(x23, x48, uint64(p384Uint1(x60)))
+ var x63 uint64
+ var x64 uint64
+ x63, x64 = bits.Add64(x25, x50, uint64(p384Uint1(x62)))
+ var x65 uint64
+ var x66 uint64
+ x65, x66 = bits.Add64(x27, x52, uint64(p384Uint1(x64)))
+ var x67 uint64
+ var x68 uint64
+ x67, x68 = bits.Add64(x29, x54, uint64(p384Uint1(x66)))
+ var x69 uint64
+ var x70 uint64
+ x70, x69 = bits.Mul64(x1, arg2[5])
+ var x71 uint64
+ var x72 uint64
+ x72, x71 = bits.Mul64(x1, arg2[4])
+ var x73 uint64
+ var x74 uint64
+ x74, x73 = bits.Mul64(x1, arg2[3])
+ var x75 uint64
+ var x76 uint64
+ x76, x75 = bits.Mul64(x1, arg2[2])
+ var x77 uint64
+ var x78 uint64
+ x78, x77 = bits.Mul64(x1, arg2[1])
+ var x79 uint64
+ var x80 uint64
+ x80, x79 = bits.Mul64(x1, arg2[0])
+ var x81 uint64
+ var x82 uint64
+ x81, x82 = bits.Add64(x80, x77, uint64(0x0))
+ var x83 uint64
+ var x84 uint64
+ x83, x84 = bits.Add64(x78, x75, uint64(p384Uint1(x82)))
+ var x85 uint64
+ var x86 uint64
+ x85, x86 = bits.Add64(x76, x73, uint64(p384Uint1(x84)))
+ var x87 uint64
+ var x88 uint64
+ x87, x88 = bits.Add64(x74, x71, uint64(p384Uint1(x86)))
+ var x89 uint64
+ var x90 uint64
+ x89, x90 = bits.Add64(x72, x69, uint64(p384Uint1(x88)))
+ x91 := (uint64(p384Uint1(x90)) + x70)
+ var x92 uint64
+ var x93 uint64
+ x92, x93 = bits.Add64(x57, x79, uint64(0x0))
+ var x94 uint64
+ var x95 uint64
+ x94, x95 = bits.Add64(x59, x81, uint64(p384Uint1(x93)))
+ var x96 uint64
+ var x97 uint64
+ x96, x97 = bits.Add64(x61, x83, uint64(p384Uint1(x95)))
+ var x98 uint64
+ var x99 uint64
+ x98, x99 = bits.Add64(x63, x85, uint64(p384Uint1(x97)))
+ var x100 uint64
+ var x101 uint64
+ x100, x101 = bits.Add64(x65, x87, uint64(p384Uint1(x99)))
+ var x102 uint64
+ var x103 uint64
+ x102, x103 = bits.Add64(x67, x89, uint64(p384Uint1(x101)))
+ var x104 uint64
+ var x105 uint64
+ x104, x105 = bits.Add64(uint64(p384Uint1(x68)), x91, uint64(p384Uint1(x103)))
+ var x106 uint64
+ _, x106 = bits.Mul64(x92, 0x100000001)
+ var x108 uint64
+ var x109 uint64
+ x109, x108 = bits.Mul64(x106, 0xffffffffffffffff)
+ var x110 uint64
+ var x111 uint64
+ x111, x110 = bits.Mul64(x106, 0xffffffffffffffff)
+ var x112 uint64
+ var x113 uint64
+ x113, x112 = bits.Mul64(x106, 0xffffffffffffffff)
+ var x114 uint64
+ var x115 uint64
+ x115, x114 = bits.Mul64(x106, 0xfffffffffffffffe)
+ var x116 uint64
+ var x117 uint64
+ x117, x116 = bits.Mul64(x106, 0xffffffff00000000)
+ var x118 uint64
+ var x119 uint64
+ x119, x118 = bits.Mul64(x106, 0xffffffff)
+ var x120 uint64
+ var x121 uint64
+ x120, x121 = bits.Add64(x119, x116, uint64(0x0))
+ var x122 uint64
+ var x123 uint64
+ x122, x123 = bits.Add64(x117, x114, uint64(p384Uint1(x121)))
+ var x124 uint64
+ var x125 uint64
+ x124, x125 = bits.Add64(x115, x112, uint64(p384Uint1(x123)))
+ var x126 uint64
+ var x127 uint64
+ x126, x127 = bits.Add64(x113, x110, uint64(p384Uint1(x125)))
+ var x128 uint64
+ var x129 uint64
+ x128, x129 = bits.Add64(x111, x108, uint64(p384Uint1(x127)))
+ x130 := (uint64(p384Uint1(x129)) + x109)
+ var x132 uint64
+ _, x132 = bits.Add64(x92, x118, uint64(0x0))
+ var x133 uint64
+ var x134 uint64
+ x133, x134 = bits.Add64(x94, x120, uint64(p384Uint1(x132)))
+ var x135 uint64
+ var x136 uint64
+ x135, x136 = bits.Add64(x96, x122, uint64(p384Uint1(x134)))
+ var x137 uint64
+ var x138 uint64
+ x137, x138 = bits.Add64(x98, x124, uint64(p384Uint1(x136)))
+ var x139 uint64
+ var x140 uint64
+ x139, x140 = bits.Add64(x100, x126, uint64(p384Uint1(x138)))
+ var x141 uint64
+ var x142 uint64
+ x141, x142 = bits.Add64(x102, x128, uint64(p384Uint1(x140)))
+ var x143 uint64
+ var x144 uint64
+ x143, x144 = bits.Add64(x104, x130, uint64(p384Uint1(x142)))
+ x145 := (uint64(p384Uint1(x144)) + uint64(p384Uint1(x105)))
+ var x146 uint64
+ var x147 uint64
+ x147, x146 = bits.Mul64(x2, arg2[5])
+ var x148 uint64
+ var x149 uint64
+ x149, x148 = bits.Mul64(x2, arg2[4])
+ var x150 uint64
+ var x151 uint64
+ x151, x150 = bits.Mul64(x2, arg2[3])
+ var x152 uint64
+ var x153 uint64
+ x153, x152 = bits.Mul64(x2, arg2[2])
+ var x154 uint64
+ var x155 uint64
+ x155, x154 = bits.Mul64(x2, arg2[1])
+ var x156 uint64
+ var x157 uint64
+ x157, x156 = bits.Mul64(x2, arg2[0])
+ var x158 uint64
+ var x159 uint64
+ x158, x159 = bits.Add64(x157, x154, uint64(0x0))
+ var x160 uint64
+ var x161 uint64
+ x160, x161 = bits.Add64(x155, x152, uint64(p384Uint1(x159)))
+ var x162 uint64
+ var x163 uint64
+ x162, x163 = bits.Add64(x153, x150, uint64(p384Uint1(x161)))
+ var x164 uint64
+ var x165 uint64
+ x164, x165 = bits.Add64(x151, x148, uint64(p384Uint1(x163)))
+ var x166 uint64
+ var x167 uint64
+ x166, x167 = bits.Add64(x149, x146, uint64(p384Uint1(x165)))
+ x168 := (uint64(p384Uint1(x167)) + x147)
+ var x169 uint64
+ var x170 uint64
+ x169, x170 = bits.Add64(x133, x156, uint64(0x0))
+ var x171 uint64
+ var x172 uint64
+ x171, x172 = bits.Add64(x135, x158, uint64(p384Uint1(x170)))
+ var x173 uint64
+ var x174 uint64
+ x173, x174 = bits.Add64(x137, x160, uint64(p384Uint1(x172)))
+ var x175 uint64
+ var x176 uint64
+ x175, x176 = bits.Add64(x139, x162, uint64(p384Uint1(x174)))
+ var x177 uint64
+ var x178 uint64
+ x177, x178 = bits.Add64(x141, x164, uint64(p384Uint1(x176)))
+ var x179 uint64
+ var x180 uint64
+ x179, x180 = bits.Add64(x143, x166, uint64(p384Uint1(x178)))
+ var x181 uint64
+ var x182 uint64
+ x181, x182 = bits.Add64(x145, x168, uint64(p384Uint1(x180)))
+ var x183 uint64
+ _, x183 = bits.Mul64(x169, 0x100000001)
+ var x185 uint64
+ var x186 uint64
+ x186, x185 = bits.Mul64(x183, 0xffffffffffffffff)
+ var x187 uint64
+ var x188 uint64
+ x188, x187 = bits.Mul64(x183, 0xffffffffffffffff)
+ var x189 uint64
+ var x190 uint64
+ x190, x189 = bits.Mul64(x183, 0xffffffffffffffff)
+ var x191 uint64
+ var x192 uint64
+ x192, x191 = bits.Mul64(x183, 0xfffffffffffffffe)
+ var x193 uint64
+ var x194 uint64
+ x194, x193 = bits.Mul64(x183, 0xffffffff00000000)
+ var x195 uint64
+ var x196 uint64
+ x196, x195 = bits.Mul64(x183, 0xffffffff)
+ var x197 uint64
+ var x198 uint64
+ x197, x198 = bits.Add64(x196, x193, uint64(0x0))
+ var x199 uint64
+ var x200 uint64
+ x199, x200 = bits.Add64(x194, x191, uint64(p384Uint1(x198)))
+ var x201 uint64
+ var x202 uint64
+ x201, x202 = bits.Add64(x192, x189, uint64(p384Uint1(x200)))
+ var x203 uint64
+ var x204 uint64
+ x203, x204 = bits.Add64(x190, x187, uint64(p384Uint1(x202)))
+ var x205 uint64
+ var x206 uint64
+ x205, x206 = bits.Add64(x188, x185, uint64(p384Uint1(x204)))
+ x207 := (uint64(p384Uint1(x206)) + x186)
+ var x209 uint64
+ _, x209 = bits.Add64(x169, x195, uint64(0x0))
+ var x210 uint64
+ var x211 uint64
+ x210, x211 = bits.Add64(x171, x197, uint64(p384Uint1(x209)))
+ var x212 uint64
+ var x213 uint64
+ x212, x213 = bits.Add64(x173, x199, uint64(p384Uint1(x211)))
+ var x214 uint64
+ var x215 uint64
+ x214, x215 = bits.Add64(x175, x201, uint64(p384Uint1(x213)))
+ var x216 uint64
+ var x217 uint64
+ x216, x217 = bits.Add64(x177, x203, uint64(p384Uint1(x215)))
+ var x218 uint64
+ var x219 uint64
+ x218, x219 = bits.Add64(x179, x205, uint64(p384Uint1(x217)))
+ var x220 uint64
+ var x221 uint64
+ x220, x221 = bits.Add64(x181, x207, uint64(p384Uint1(x219)))
+ x222 := (uint64(p384Uint1(x221)) + uint64(p384Uint1(x182)))
+ var x223 uint64
+ var x224 uint64
+ x224, x223 = bits.Mul64(x3, arg2[5])
+ var x225 uint64
+ var x226 uint64
+ x226, x225 = bits.Mul64(x3, arg2[4])
+ var x227 uint64
+ var x228 uint64
+ x228, x227 = bits.Mul64(x3, arg2[3])
+ var x229 uint64
+ var x230 uint64
+ x230, x229 = bits.Mul64(x3, arg2[2])
+ var x231 uint64
+ var x232 uint64
+ x232, x231 = bits.Mul64(x3, arg2[1])
+ var x233 uint64
+ var x234 uint64
+ x234, x233 = bits.Mul64(x3, arg2[0])
+ var x235 uint64
+ var x236 uint64
+ x235, x236 = bits.Add64(x234, x231, uint64(0x0))
+ var x237 uint64
+ var x238 uint64
+ x237, x238 = bits.Add64(x232, x229, uint64(p384Uint1(x236)))
+ var x239 uint64
+ var x240 uint64
+ x239, x240 = bits.Add64(x230, x227, uint64(p384Uint1(x238)))
+ var x241 uint64
+ var x242 uint64
+ x241, x242 = bits.Add64(x228, x225, uint64(p384Uint1(x240)))
+ var x243 uint64
+ var x244 uint64
+ x243, x244 = bits.Add64(x226, x223, uint64(p384Uint1(x242)))
+ x245 := (uint64(p384Uint1(x244)) + x224)
+ var x246 uint64
+ var x247 uint64
+ x246, x247 = bits.Add64(x210, x233, uint64(0x0))
+ var x248 uint64
+ var x249 uint64
+ x248, x249 = bits.Add64(x212, x235, uint64(p384Uint1(x247)))
+ var x250 uint64
+ var x251 uint64
+ x250, x251 = bits.Add64(x214, x237, uint64(p384Uint1(x249)))
+ var x252 uint64
+ var x253 uint64
+ x252, x253 = bits.Add64(x216, x239, uint64(p384Uint1(x251)))
+ var x254 uint64
+ var x255 uint64
+ x254, x255 = bits.Add64(x218, x241, uint64(p384Uint1(x253)))
+ var x256 uint64
+ var x257 uint64
+ x256, x257 = bits.Add64(x220, x243, uint64(p384Uint1(x255)))
+ var x258 uint64
+ var x259 uint64
+ x258, x259 = bits.Add64(x222, x245, uint64(p384Uint1(x257)))
+ var x260 uint64
+ _, x260 = bits.Mul64(x246, 0x100000001)
+ var x262 uint64
+ var x263 uint64
+ x263, x262 = bits.Mul64(x260, 0xffffffffffffffff)
+ var x264 uint64
+ var x265 uint64
+ x265, x264 = bits.Mul64(x260, 0xffffffffffffffff)
+ var x266 uint64
+ var x267 uint64
+ x267, x266 = bits.Mul64(x260, 0xffffffffffffffff)
+ var x268 uint64
+ var x269 uint64
+ x269, x268 = bits.Mul64(x260, 0xfffffffffffffffe)
+ var x270 uint64
+ var x271 uint64
+ x271, x270 = bits.Mul64(x260, 0xffffffff00000000)
+ var x272 uint64
+ var x273 uint64
+ x273, x272 = bits.Mul64(x260, 0xffffffff)
+ var x274 uint64
+ var x275 uint64
+ x274, x275 = bits.Add64(x273, x270, uint64(0x0))
+ var x276 uint64
+ var x277 uint64
+ x276, x277 = bits.Add64(x271, x268, uint64(p384Uint1(x275)))
+ var x278 uint64
+ var x279 uint64
+ x278, x279 = bits.Add64(x269, x266, uint64(p384Uint1(x277)))
+ var x280 uint64
+ var x281 uint64
+ x280, x281 = bits.Add64(x267, x264, uint64(p384Uint1(x279)))
+ var x282 uint64
+ var x283 uint64
+ x282, x283 = bits.Add64(x265, x262, uint64(p384Uint1(x281)))
+ x284 := (uint64(p384Uint1(x283)) + x263)
+ var x286 uint64
+ _, x286 = bits.Add64(x246, x272, uint64(0x0))
+ var x287 uint64
+ var x288 uint64
+ x287, x288 = bits.Add64(x248, x274, uint64(p384Uint1(x286)))
+ var x289 uint64
+ var x290 uint64
+ x289, x290 = bits.Add64(x250, x276, uint64(p384Uint1(x288)))
+ var x291 uint64
+ var x292 uint64
+ x291, x292 = bits.Add64(x252, x278, uint64(p384Uint1(x290)))
+ var x293 uint64
+ var x294 uint64
+ x293, x294 = bits.Add64(x254, x280, uint64(p384Uint1(x292)))
+ var x295 uint64
+ var x296 uint64
+ x295, x296 = bits.Add64(x256, x282, uint64(p384Uint1(x294)))
+ var x297 uint64
+ var x298 uint64
+ x297, x298 = bits.Add64(x258, x284, uint64(p384Uint1(x296)))
+ x299 := (uint64(p384Uint1(x298)) + uint64(p384Uint1(x259)))
+ var x300 uint64
+ var x301 uint64
+ x301, x300 = bits.Mul64(x4, arg2[5])
+ var x302 uint64
+ var x303 uint64
+ x303, x302 = bits.Mul64(x4, arg2[4])
+ var x304 uint64
+ var x305 uint64
+ x305, x304 = bits.Mul64(x4, arg2[3])
+ var x306 uint64
+ var x307 uint64
+ x307, x306 = bits.Mul64(x4, arg2[2])
+ var x308 uint64
+ var x309 uint64
+ x309, x308 = bits.Mul64(x4, arg2[1])
+ var x310 uint64
+ var x311 uint64
+ x311, x310 = bits.Mul64(x4, arg2[0])
+ var x312 uint64
+ var x313 uint64
+ x312, x313 = bits.Add64(x311, x308, uint64(0x0))
+ var x314 uint64
+ var x315 uint64
+ x314, x315 = bits.Add64(x309, x306, uint64(p384Uint1(x313)))
+ var x316 uint64
+ var x317 uint64
+ x316, x317 = bits.Add64(x307, x304, uint64(p384Uint1(x315)))
+ var x318 uint64
+ var x319 uint64
+ x318, x319 = bits.Add64(x305, x302, uint64(p384Uint1(x317)))
+ var x320 uint64
+ var x321 uint64
+ x320, x321 = bits.Add64(x303, x300, uint64(p384Uint1(x319)))
+ x322 := (uint64(p384Uint1(x321)) + x301)
+ var x323 uint64
+ var x324 uint64
+ x323, x324 = bits.Add64(x287, x310, uint64(0x0))
+ var x325 uint64
+ var x326 uint64
+ x325, x326 = bits.Add64(x289, x312, uint64(p384Uint1(x324)))
+ var x327 uint64
+ var x328 uint64
+ x327, x328 = bits.Add64(x291, x314, uint64(p384Uint1(x326)))
+ var x329 uint64
+ var x330 uint64
+ x329, x330 = bits.Add64(x293, x316, uint64(p384Uint1(x328)))
+ var x331 uint64
+ var x332 uint64
+ x331, x332 = bits.Add64(x295, x318, uint64(p384Uint1(x330)))
+ var x333 uint64
+ var x334 uint64
+ x333, x334 = bits.Add64(x297, x320, uint64(p384Uint1(x332)))
+ var x335 uint64
+ var x336 uint64
+ x335, x336 = bits.Add64(x299, x322, uint64(p384Uint1(x334)))
+ var x337 uint64
+ _, x337 = bits.Mul64(x323, 0x100000001)
+ var x339 uint64
+ var x340 uint64
+ x340, x339 = bits.Mul64(x337, 0xffffffffffffffff)
+ var x341 uint64
+ var x342 uint64
+ x342, x341 = bits.Mul64(x337, 0xffffffffffffffff)
+ var x343 uint64
+ var x344 uint64
+ x344, x343 = bits.Mul64(x337, 0xffffffffffffffff)
+ var x345 uint64
+ var x346 uint64
+ x346, x345 = bits.Mul64(x337, 0xfffffffffffffffe)
+ var x347 uint64
+ var x348 uint64
+ x348, x347 = bits.Mul64(x337, 0xffffffff00000000)
+ var x349 uint64
+ var x350 uint64
+ x350, x349 = bits.Mul64(x337, 0xffffffff)
+ var x351 uint64
+ var x352 uint64
+ x351, x352 = bits.Add64(x350, x347, uint64(0x0))
+ var x353 uint64
+ var x354 uint64
+ x353, x354 = bits.Add64(x348, x345, uint64(p384Uint1(x352)))
+ var x355 uint64
+ var x356 uint64
+ x355, x356 = bits.Add64(x346, x343, uint64(p384Uint1(x354)))
+ var x357 uint64
+ var x358 uint64
+ x357, x358 = bits.Add64(x344, x341, uint64(p384Uint1(x356)))
+ var x359 uint64
+ var x360 uint64
+ x359, x360 = bits.Add64(x342, x339, uint64(p384Uint1(x358)))
+ x361 := (uint64(p384Uint1(x360)) + x340)
+ var x363 uint64
+ _, x363 = bits.Add64(x323, x349, uint64(0x0))
+ var x364 uint64
+ var x365 uint64
+ x364, x365 = bits.Add64(x325, x351, uint64(p384Uint1(x363)))
+ var x366 uint64
+ var x367 uint64
+ x366, x367 = bits.Add64(x327, x353, uint64(p384Uint1(x365)))
+ var x368 uint64
+ var x369 uint64
+ x368, x369 = bits.Add64(x329, x355, uint64(p384Uint1(x367)))
+ var x370 uint64
+ var x371 uint64
+ x370, x371 = bits.Add64(x331, x357, uint64(p384Uint1(x369)))
+ var x372 uint64
+ var x373 uint64
+ x372, x373 = bits.Add64(x333, x359, uint64(p384Uint1(x371)))
+ var x374 uint64
+ var x375 uint64
+ x374, x375 = bits.Add64(x335, x361, uint64(p384Uint1(x373)))
+ x376 := (uint64(p384Uint1(x375)) + uint64(p384Uint1(x336)))
+ var x377 uint64
+ var x378 uint64
+ x378, x377 = bits.Mul64(x5, arg2[5])
+ var x379 uint64
+ var x380 uint64
+ x380, x379 = bits.Mul64(x5, arg2[4])
+ var x381 uint64
+ var x382 uint64
+ x382, x381 = bits.Mul64(x5, arg2[3])
+ var x383 uint64
+ var x384 uint64
+ x384, x383 = bits.Mul64(x5, arg2[2])
+ var x385 uint64
+ var x386 uint64
+ x386, x385 = bits.Mul64(x5, arg2[1])
+ var x387 uint64
+ var x388 uint64
+ x388, x387 = bits.Mul64(x5, arg2[0])
+ var x389 uint64
+ var x390 uint64
+ x389, x390 = bits.Add64(x388, x385, uint64(0x0))
+ var x391 uint64
+ var x392 uint64
+ x391, x392 = bits.Add64(x386, x383, uint64(p384Uint1(x390)))
+ var x393 uint64
+ var x394 uint64
+ x393, x394 = bits.Add64(x384, x381, uint64(p384Uint1(x392)))
+ var x395 uint64
+ var x396 uint64
+ x395, x396 = bits.Add64(x382, x379, uint64(p384Uint1(x394)))
+ var x397 uint64
+ var x398 uint64
+ x397, x398 = bits.Add64(x380, x377, uint64(p384Uint1(x396)))
+ x399 := (uint64(p384Uint1(x398)) + x378)
+ var x400 uint64
+ var x401 uint64
+ x400, x401 = bits.Add64(x364, x387, uint64(0x0))
+ var x402 uint64
+ var x403 uint64
+ x402, x403 = bits.Add64(x366, x389, uint64(p384Uint1(x401)))
+ var x404 uint64
+ var x405 uint64
+ x404, x405 = bits.Add64(x368, x391, uint64(p384Uint1(x403)))
+ var x406 uint64
+ var x407 uint64
+ x406, x407 = bits.Add64(x370, x393, uint64(p384Uint1(x405)))
+ var x408 uint64
+ var x409 uint64
+ x408, x409 = bits.Add64(x372, x395, uint64(p384Uint1(x407)))
+ var x410 uint64
+ var x411 uint64
+ x410, x411 = bits.Add64(x374, x397, uint64(p384Uint1(x409)))
+ var x412 uint64
+ var x413 uint64
+ x412, x413 = bits.Add64(x376, x399, uint64(p384Uint1(x411)))
+ var x414 uint64
+ _, x414 = bits.Mul64(x400, 0x100000001)
+ var x416 uint64
+ var x417 uint64
+ x417, x416 = bits.Mul64(x414, 0xffffffffffffffff)
+ var x418 uint64
+ var x419 uint64
+ x419, x418 = bits.Mul64(x414, 0xffffffffffffffff)
+ var x420 uint64
+ var x421 uint64
+ x421, x420 = bits.Mul64(x414, 0xffffffffffffffff)
+ var x422 uint64
+ var x423 uint64
+ x423, x422 = bits.Mul64(x414, 0xfffffffffffffffe)
+ var x424 uint64
+ var x425 uint64
+ x425, x424 = bits.Mul64(x414, 0xffffffff00000000)
+ var x426 uint64
+ var x427 uint64
+ x427, x426 = bits.Mul64(x414, 0xffffffff)
+ var x428 uint64
+ var x429 uint64
+ x428, x429 = bits.Add64(x427, x424, uint64(0x0))
+ var x430 uint64
+ var x431 uint64
+ x430, x431 = bits.Add64(x425, x422, uint64(p384Uint1(x429)))
+ var x432 uint64
+ var x433 uint64
+ x432, x433 = bits.Add64(x423, x420, uint64(p384Uint1(x431)))
+ var x434 uint64
+ var x435 uint64
+ x434, x435 = bits.Add64(x421, x418, uint64(p384Uint1(x433)))
+ var x436 uint64
+ var x437 uint64
+ x436, x437 = bits.Add64(x419, x416, uint64(p384Uint1(x435)))
+ x438 := (uint64(p384Uint1(x437)) + x417)
+ var x440 uint64
+ _, x440 = bits.Add64(x400, x426, uint64(0x0))
+ var x441 uint64
+ var x442 uint64
+ x441, x442 = bits.Add64(x402, x428, uint64(p384Uint1(x440)))
+ var x443 uint64
+ var x444 uint64
+ x443, x444 = bits.Add64(x404, x430, uint64(p384Uint1(x442)))
+ var x445 uint64
+ var x446 uint64
+ x445, x446 = bits.Add64(x406, x432, uint64(p384Uint1(x444)))
+ var x447 uint64
+ var x448 uint64
+ x447, x448 = bits.Add64(x408, x434, uint64(p384Uint1(x446)))
+ var x449 uint64
+ var x450 uint64
+ x449, x450 = bits.Add64(x410, x436, uint64(p384Uint1(x448)))
+ var x451 uint64
+ var x452 uint64
+ x451, x452 = bits.Add64(x412, x438, uint64(p384Uint1(x450)))
+ x453 := (uint64(p384Uint1(x452)) + uint64(p384Uint1(x413)))
+ var x454 uint64
+ var x455 uint64
+ x454, x455 = bits.Sub64(x441, 0xffffffff, uint64(0x0))
+ var x456 uint64
+ var x457 uint64
+ x456, x457 = bits.Sub64(x443, 0xffffffff00000000, uint64(p384Uint1(x455)))
+ var x458 uint64
+ var x459 uint64
+ x458, x459 = bits.Sub64(x445, 0xfffffffffffffffe, uint64(p384Uint1(x457)))
+ var x460 uint64
+ var x461 uint64
+ x460, x461 = bits.Sub64(x447, 0xffffffffffffffff, uint64(p384Uint1(x459)))
+ var x462 uint64
+ var x463 uint64
+ x462, x463 = bits.Sub64(x449, 0xffffffffffffffff, uint64(p384Uint1(x461)))
+ var x464 uint64
+ var x465 uint64
+ x464, x465 = bits.Sub64(x451, 0xffffffffffffffff, uint64(p384Uint1(x463)))
+ var x467 uint64
+ _, x467 = bits.Sub64(x453, uint64(0x0), uint64(p384Uint1(x465)))
+ var x468 uint64
+ p384CmovznzU64(&x468, p384Uint1(x467), x454, x441)
+ var x469 uint64
+ p384CmovznzU64(&x469, p384Uint1(x467), x456, x443)
+ var x470 uint64
+ p384CmovznzU64(&x470, p384Uint1(x467), x458, x445)
+ var x471 uint64
+ p384CmovznzU64(&x471, p384Uint1(x467), x460, x447)
+ var x472 uint64
+ p384CmovznzU64(&x472, p384Uint1(x467), x462, x449)
+ var x473 uint64
+ p384CmovznzU64(&x473, p384Uint1(x467), x464, x451)
+ out1[0] = x468
+ out1[1] = x469
+ out1[2] = x470
+ out1[3] = x471
+ out1[4] = x472
+ out1[5] = x473
+}
+
+// p384Square squares a field element in the Montgomery domain.
+//
+// Preconditions:
+// 0 ≤ eval arg1 < m
+// Postconditions:
+// eval (from_montgomery out1) mod m = (eval (from_montgomery arg1) * eval (from_montgomery arg1)) mod m
+// 0 ≤ eval out1 < m
+//
+func p384Square(out1 *p384MontgomeryDomainFieldElement, arg1 *p384MontgomeryDomainFieldElement) {
+ x1 := arg1[1]
+ x2 := arg1[2]
+ x3 := arg1[3]
+ x4 := arg1[4]
+ x5 := arg1[5]
+ x6 := arg1[0]
+ var x7 uint64
+ var x8 uint64
+ x8, x7 = bits.Mul64(x6, arg1[5])
+ var x9 uint64
+ var x10 uint64
+ x10, x9 = bits.Mul64(x6, arg1[4])
+ var x11 uint64
+ var x12 uint64
+ x12, x11 = bits.Mul64(x6, arg1[3])
+ var x13 uint64
+ var x14 uint64
+ x14, x13 = bits.Mul64(x6, arg1[2])
+ var x15 uint64
+ var x16 uint64
+ x16, x15 = bits.Mul64(x6, arg1[1])
+ var x17 uint64
+ var x18 uint64
+ x18, x17 = bits.Mul64(x6, arg1[0])
+ var x19 uint64
+ var x20 uint64
+ x19, x20 = bits.Add64(x18, x15, uint64(0x0))
+ var x21 uint64
+ var x22 uint64
+ x21, x22 = bits.Add64(x16, x13, uint64(p384Uint1(x20)))
+ var x23 uint64
+ var x24 uint64
+ x23, x24 = bits.Add64(x14, x11, uint64(p384Uint1(x22)))
+ var x25 uint64
+ var x26 uint64
+ x25, x26 = bits.Add64(x12, x9, uint64(p384Uint1(x24)))
+ var x27 uint64
+ var x28 uint64
+ x27, x28 = bits.Add64(x10, x7, uint64(p384Uint1(x26)))
+ x29 := (uint64(p384Uint1(x28)) + x8)
+ var x30 uint64
+ _, x30 = bits.Mul64(x17, 0x100000001)
+ var x32 uint64
+ var x33 uint64
+ x33, x32 = bits.Mul64(x30, 0xffffffffffffffff)
+ var x34 uint64
+ var x35 uint64
+ x35, x34 = bits.Mul64(x30, 0xffffffffffffffff)
+ var x36 uint64
+ var x37 uint64
+ x37, x36 = bits.Mul64(x30, 0xffffffffffffffff)
+ var x38 uint64
+ var x39 uint64
+ x39, x38 = bits.Mul64(x30, 0xfffffffffffffffe)
+ var x40 uint64
+ var x41 uint64
+ x41, x40 = bits.Mul64(x30, 0xffffffff00000000)
+ var x42 uint64
+ var x43 uint64
+ x43, x42 = bits.Mul64(x30, 0xffffffff)
+ var x44 uint64
+ var x45 uint64
+ x44, x45 = bits.Add64(x43, x40, uint64(0x0))
+ var x46 uint64
+ var x47 uint64
+ x46, x47 = bits.Add64(x41, x38, uint64(p384Uint1(x45)))
+ var x48 uint64
+ var x49 uint64
+ x48, x49 = bits.Add64(x39, x36, uint64(p384Uint1(x47)))
+ var x50 uint64
+ var x51 uint64
+ x50, x51 = bits.Add64(x37, x34, uint64(p384Uint1(x49)))
+ var x52 uint64
+ var x53 uint64
+ x52, x53 = bits.Add64(x35, x32, uint64(p384Uint1(x51)))
+ x54 := (uint64(p384Uint1(x53)) + x33)
+ var x56 uint64
+ _, x56 = bits.Add64(x17, x42, uint64(0x0))
+ var x57 uint64
+ var x58 uint64
+ x57, x58 = bits.Add64(x19, x44, uint64(p384Uint1(x56)))
+ var x59 uint64
+ var x60 uint64
+ x59, x60 = bits.Add64(x21, x46, uint64(p384Uint1(x58)))
+ var x61 uint64
+ var x62 uint64
+ x61, x62 = bits.Add64(x23, x48, uint64(p384Uint1(x60)))
+ var x63 uint64
+ var x64 uint64
+ x63, x64 = bits.Add64(x25, x50, uint64(p384Uint1(x62)))
+ var x65 uint64
+ var x66 uint64
+ x65, x66 = bits.Add64(x27, x52, uint64(p384Uint1(x64)))
+ var x67 uint64
+ var x68 uint64
+ x67, x68 = bits.Add64(x29, x54, uint64(p384Uint1(x66)))
+ var x69 uint64
+ var x70 uint64
+ x70, x69 = bits.Mul64(x1, arg1[5])
+ var x71 uint64
+ var x72 uint64
+ x72, x71 = bits.Mul64(x1, arg1[4])
+ var x73 uint64
+ var x74 uint64
+ x74, x73 = bits.Mul64(x1, arg1[3])
+ var x75 uint64
+ var x76 uint64
+ x76, x75 = bits.Mul64(x1, arg1[2])
+ var x77 uint64
+ var x78 uint64
+ x78, x77 = bits.Mul64(x1, arg1[1])
+ var x79 uint64
+ var x80 uint64
+ x80, x79 = bits.Mul64(x1, arg1[0])
+ var x81 uint64
+ var x82 uint64
+ x81, x82 = bits.Add64(x80, x77, uint64(0x0))
+ var x83 uint64
+ var x84 uint64
+ x83, x84 = bits.Add64(x78, x75, uint64(p384Uint1(x82)))
+ var x85 uint64
+ var x86 uint64
+ x85, x86 = bits.Add64(x76, x73, uint64(p384Uint1(x84)))
+ var x87 uint64
+ var x88 uint64
+ x87, x88 = bits.Add64(x74, x71, uint64(p384Uint1(x86)))
+ var x89 uint64
+ var x90 uint64
+ x89, x90 = bits.Add64(x72, x69, uint64(p384Uint1(x88)))
+ x91 := (uint64(p384Uint1(x90)) + x70)
+ var x92 uint64
+ var x93 uint64
+ x92, x93 = bits.Add64(x57, x79, uint64(0x0))
+ var x94 uint64
+ var x95 uint64
+ x94, x95 = bits.Add64(x59, x81, uint64(p384Uint1(x93)))
+ var x96 uint64
+ var x97 uint64
+ x96, x97 = bits.Add64(x61, x83, uint64(p384Uint1(x95)))
+ var x98 uint64
+ var x99 uint64
+ x98, x99 = bits.Add64(x63, x85, uint64(p384Uint1(x97)))
+ var x100 uint64
+ var x101 uint64
+ x100, x101 = bits.Add64(x65, x87, uint64(p384Uint1(x99)))
+ var x102 uint64
+ var x103 uint64
+ x102, x103 = bits.Add64(x67, x89, uint64(p384Uint1(x101)))
+ var x104 uint64
+ var x105 uint64
+ x104, x105 = bits.Add64(uint64(p384Uint1(x68)), x91, uint64(p384Uint1(x103)))
+ var x106 uint64
+ _, x106 = bits.Mul64(x92, 0x100000001)
+ var x108 uint64
+ var x109 uint64
+ x109, x108 = bits.Mul64(x106, 0xffffffffffffffff)
+ var x110 uint64
+ var x111 uint64
+ x111, x110 = bits.Mul64(x106, 0xffffffffffffffff)
+ var x112 uint64
+ var x113 uint64
+ x113, x112 = bits.Mul64(x106, 0xffffffffffffffff)
+ var x114 uint64
+ var x115 uint64
+ x115, x114 = bits.Mul64(x106, 0xfffffffffffffffe)
+ var x116 uint64
+ var x117 uint64
+ x117, x116 = bits.Mul64(x106, 0xffffffff00000000)
+ var x118 uint64
+ var x119 uint64
+ x119, x118 = bits.Mul64(x106, 0xffffffff)
+ var x120 uint64
+ var x121 uint64
+ x120, x121 = bits.Add64(x119, x116, uint64(0x0))
+ var x122 uint64
+ var x123 uint64
+ x122, x123 = bits.Add64(x117, x114, uint64(p384Uint1(x121)))
+ var x124 uint64
+ var x125 uint64
+ x124, x125 = bits.Add64(x115, x112, uint64(p384Uint1(x123)))
+ var x126 uint64
+ var x127 uint64
+ x126, x127 = bits.Add64(x113, x110, uint64(p384Uint1(x125)))
+ var x128 uint64
+ var x129 uint64
+ x128, x129 = bits.Add64(x111, x108, uint64(p384Uint1(x127)))
+ x130 := (uint64(p384Uint1(x129)) + x109)
+ var x132 uint64
+ _, x132 = bits.Add64(x92, x118, uint64(0x0))
+ var x133 uint64
+ var x134 uint64
+ x133, x134 = bits.Add64(x94, x120, uint64(p384Uint1(x132)))
+ var x135 uint64
+ var x136 uint64
+ x135, x136 = bits.Add64(x96, x122, uint64(p384Uint1(x134)))
+ var x137 uint64
+ var x138 uint64
+ x137, x138 = bits.Add64(x98, x124, uint64(p384Uint1(x136)))
+ var x139 uint64
+ var x140 uint64
+ x139, x140 = bits.Add64(x100, x126, uint64(p384Uint1(x138)))
+ var x141 uint64
+ var x142 uint64
+ x141, x142 = bits.Add64(x102, x128, uint64(p384Uint1(x140)))
+ var x143 uint64
+ var x144 uint64
+ x143, x144 = bits.Add64(x104, x130, uint64(p384Uint1(x142)))
+ x145 := (uint64(p384Uint1(x144)) + uint64(p384Uint1(x105)))
+ var x146 uint64
+ var x147 uint64
+ x147, x146 = bits.Mul64(x2, arg1[5])
+ var x148 uint64
+ var x149 uint64
+ x149, x148 = bits.Mul64(x2, arg1[4])
+ var x150 uint64
+ var x151 uint64
+ x151, x150 = bits.Mul64(x2, arg1[3])
+ var x152 uint64
+ var x153 uint64
+ x153, x152 = bits.Mul64(x2, arg1[2])
+ var x154 uint64
+ var x155 uint64
+ x155, x154 = bits.Mul64(x2, arg1[1])
+ var x156 uint64
+ var x157 uint64
+ x157, x156 = bits.Mul64(x2, arg1[0])
+ var x158 uint64
+ var x159 uint64
+ x158, x159 = bits.Add64(x157, x154, uint64(0x0))
+ var x160 uint64
+ var x161 uint64
+ x160, x161 = bits.Add64(x155, x152, uint64(p384Uint1(x159)))
+ var x162 uint64
+ var x163 uint64
+ x162, x163 = bits.Add64(x153, x150, uint64(p384Uint1(x161)))
+ var x164 uint64
+ var x165 uint64
+ x164, x165 = bits.Add64(x151, x148, uint64(p384Uint1(x163)))
+ var x166 uint64
+ var x167 uint64
+ x166, x167 = bits.Add64(x149, x146, uint64(p384Uint1(x165)))
+ x168 := (uint64(p384Uint1(x167)) + x147)
+ var x169 uint64
+ var x170 uint64
+ x169, x170 = bits.Add64(x133, x156, uint64(0x0))
+ var x171 uint64
+ var x172 uint64
+ x171, x172 = bits.Add64(x135, x158, uint64(p384Uint1(x170)))
+ var x173 uint64
+ var x174 uint64
+ x173, x174 = bits.Add64(x137, x160, uint64(p384Uint1(x172)))
+ var x175 uint64
+ var x176 uint64
+ x175, x176 = bits.Add64(x139, x162, uint64(p384Uint1(x174)))
+ var x177 uint64
+ var x178 uint64
+ x177, x178 = bits.Add64(x141, x164, uint64(p384Uint1(x176)))
+ var x179 uint64
+ var x180 uint64
+ x179, x180 = bits.Add64(x143, x166, uint64(p384Uint1(x178)))
+ var x181 uint64
+ var x182 uint64
+ x181, x182 = bits.Add64(x145, x168, uint64(p384Uint1(x180)))
+ var x183 uint64
+ _, x183 = bits.Mul64(x169, 0x100000001)
+ var x185 uint64
+ var x186 uint64
+ x186, x185 = bits.Mul64(x183, 0xffffffffffffffff)
+ var x187 uint64
+ var x188 uint64
+ x188, x187 = bits.Mul64(x183, 0xffffffffffffffff)
+ var x189 uint64
+ var x190 uint64
+ x190, x189 = bits.Mul64(x183, 0xffffffffffffffff)
+ var x191 uint64
+ var x192 uint64
+ x192, x191 = bits.Mul64(x183, 0xfffffffffffffffe)
+ var x193 uint64
+ var x194 uint64
+ x194, x193 = bits.Mul64(x183, 0xffffffff00000000)
+ var x195 uint64
+ var x196 uint64
+ x196, x195 = bits.Mul64(x183, 0xffffffff)
+ var x197 uint64
+ var x198 uint64
+ x197, x198 = bits.Add64(x196, x193, uint64(0x0))
+ var x199 uint64
+ var x200 uint64
+ x199, x200 = bits.Add64(x194, x191, uint64(p384Uint1(x198)))
+ var x201 uint64
+ var x202 uint64
+ x201, x202 = bits.Add64(x192, x189, uint64(p384Uint1(x200)))
+ var x203 uint64
+ var x204 uint64
+ x203, x204 = bits.Add64(x190, x187, uint64(p384Uint1(x202)))
+ var x205 uint64
+ var x206 uint64
+ x205, x206 = bits.Add64(x188, x185, uint64(p384Uint1(x204)))
+ x207 := (uint64(p384Uint1(x206)) + x186)
+ var x209 uint64
+ _, x209 = bits.Add64(x169, x195, uint64(0x0))
+ var x210 uint64
+ var x211 uint64
+ x210, x211 = bits.Add64(x171, x197, uint64(p384Uint1(x209)))
+ var x212 uint64
+ var x213 uint64
+ x212, x213 = bits.Add64(x173, x199, uint64(p384Uint1(x211)))
+ var x214 uint64
+ var x215 uint64
+ x214, x215 = bits.Add64(x175, x201, uint64(p384Uint1(x213)))
+ var x216 uint64
+ var x217 uint64
+ x216, x217 = bits.Add64(x177, x203, uint64(p384Uint1(x215)))
+ var x218 uint64
+ var x219 uint64
+ x218, x219 = bits.Add64(x179, x205, uint64(p384Uint1(x217)))
+ var x220 uint64
+ var x221 uint64
+ x220, x221 = bits.Add64(x181, x207, uint64(p384Uint1(x219)))
+ x222 := (uint64(p384Uint1(x221)) + uint64(p384Uint1(x182)))
+ var x223 uint64
+ var x224 uint64
+ x224, x223 = bits.Mul64(x3, arg1[5])
+ var x225 uint64
+ var x226 uint64
+ x226, x225 = bits.Mul64(x3, arg1[4])
+ var x227 uint64
+ var x228 uint64
+ x228, x227 = bits.Mul64(x3, arg1[3])
+ var x229 uint64
+ var x230 uint64
+ x230, x229 = bits.Mul64(x3, arg1[2])
+ var x231 uint64
+ var x232 uint64
+ x232, x231 = bits.Mul64(x3, arg1[1])
+ var x233 uint64
+ var x234 uint64
+ x234, x233 = bits.Mul64(x3, arg1[0])
+ var x235 uint64
+ var x236 uint64
+ x235, x236 = bits.Add64(x234, x231, uint64(0x0))
+ var x237 uint64
+ var x238 uint64
+ x237, x238 = bits.Add64(x232, x229, uint64(p384Uint1(x236)))
+ var x239 uint64
+ var x240 uint64
+ x239, x240 = bits.Add64(x230, x227, uint64(p384Uint1(x238)))
+ var x241 uint64
+ var x242 uint64
+ x241, x242 = bits.Add64(x228, x225, uint64(p384Uint1(x240)))
+ var x243 uint64
+ var x244 uint64
+ x243, x244 = bits.Add64(x226, x223, uint64(p384Uint1(x242)))
+ x245 := (uint64(p384Uint1(x244)) + x224)
+ var x246 uint64
+ var x247 uint64
+ x246, x247 = bits.Add64(x210, x233, uint64(0x0))
+ var x248 uint64
+ var x249 uint64
+ x248, x249 = bits.Add64(x212, x235, uint64(p384Uint1(x247)))
+ var x250 uint64
+ var x251 uint64
+ x250, x251 = bits.Add64(x214, x237, uint64(p384Uint1(x249)))
+ var x252 uint64
+ var x253 uint64
+ x252, x253 = bits.Add64(x216, x239, uint64(p384Uint1(x251)))
+ var x254 uint64
+ var x255 uint64
+ x254, x255 = bits.Add64(x218, x241, uint64(p384Uint1(x253)))
+ var x256 uint64
+ var x257 uint64
+ x256, x257 = bits.Add64(x220, x243, uint64(p384Uint1(x255)))
+ var x258 uint64
+ var x259 uint64
+ x258, x259 = bits.Add64(x222, x245, uint64(p384Uint1(x257)))
+ var x260 uint64
+ _, x260 = bits.Mul64(x246, 0x100000001)
+ var x262 uint64
+ var x263 uint64
+ x263, x262 = bits.Mul64(x260, 0xffffffffffffffff)
+ var x264 uint64
+ var x265 uint64
+ x265, x264 = bits.Mul64(x260, 0xffffffffffffffff)
+ var x266 uint64
+ var x267 uint64
+ x267, x266 = bits.Mul64(x260, 0xffffffffffffffff)
+ var x268 uint64
+ var x269 uint64
+ x269, x268 = bits.Mul64(x260, 0xfffffffffffffffe)
+ var x270 uint64
+ var x271 uint64
+ x271, x270 = bits.Mul64(x260, 0xffffffff00000000)
+ var x272 uint64
+ var x273 uint64
+ x273, x272 = bits.Mul64(x260, 0xffffffff)
+ var x274 uint64
+ var x275 uint64
+ x274, x275 = bits.Add64(x273, x270, uint64(0x0))
+ var x276 uint64
+ var x277 uint64
+ x276, x277 = bits.Add64(x271, x268, uint64(p384Uint1(x275)))
+ var x278 uint64
+ var x279 uint64
+ x278, x279 = bits.Add64(x269, x266, uint64(p384Uint1(x277)))
+ var x280 uint64
+ var x281 uint64
+ x280, x281 = bits.Add64(x267, x264, uint64(p384Uint1(x279)))
+ var x282 uint64
+ var x283 uint64
+ x282, x283 = bits.Add64(x265, x262, uint64(p384Uint1(x281)))
+ x284 := (uint64(p384Uint1(x283)) + x263)
+ var x286 uint64
+ _, x286 = bits.Add64(x246, x272, uint64(0x0))
+ var x287 uint64
+ var x288 uint64
+ x287, x288 = bits.Add64(x248, x274, uint64(p384Uint1(x286)))
+ var x289 uint64
+ var x290 uint64
+ x289, x290 = bits.Add64(x250, x276, uint64(p384Uint1(x288)))
+ var x291 uint64
+ var x292 uint64
+ x291, x292 = bits.Add64(x252, x278, uint64(p384Uint1(x290)))
+ var x293 uint64
+ var x294 uint64
+ x293, x294 = bits.Add64(x254, x280, uint64(p384Uint1(x292)))
+ var x295 uint64
+ var x296 uint64
+ x295, x296 = bits.Add64(x256, x282, uint64(p384Uint1(x294)))
+ var x297 uint64
+ var x298 uint64
+ x297, x298 = bits.Add64(x258, x284, uint64(p384Uint1(x296)))
+ x299 := (uint64(p384Uint1(x298)) + uint64(p384Uint1(x259)))
+ var x300 uint64
+ var x301 uint64
+ x301, x300 = bits.Mul64(x4, arg1[5])
+ var x302 uint64
+ var x303 uint64
+ x303, x302 = bits.Mul64(x4, arg1[4])
+ var x304 uint64
+ var x305 uint64
+ x305, x304 = bits.Mul64(x4, arg1[3])
+ var x306 uint64
+ var x307 uint64
+ x307, x306 = bits.Mul64(x4, arg1[2])
+ var x308 uint64
+ var x309 uint64
+ x309, x308 = bits.Mul64(x4, arg1[1])
+ var x310 uint64
+ var x311 uint64
+ x311, x310 = bits.Mul64(x4, arg1[0])
+ var x312 uint64
+ var x313 uint64
+ x312, x313 = bits.Add64(x311, x308, uint64(0x0))
+ var x314 uint64
+ var x315 uint64
+ x314, x315 = bits.Add64(x309, x306, uint64(p384Uint1(x313)))
+ var x316 uint64
+ var x317 uint64
+ x316, x317 = bits.Add64(x307, x304, uint64(p384Uint1(x315)))
+ var x318 uint64
+ var x319 uint64
+ x318, x319 = bits.Add64(x305, x302, uint64(p384Uint1(x317)))
+ var x320 uint64
+ var x321 uint64
+ x320, x321 = bits.Add64(x303, x300, uint64(p384Uint1(x319)))
+ x322 := (uint64(p384Uint1(x321)) + x301)
+ var x323 uint64
+ var x324 uint64
+ x323, x324 = bits.Add64(x287, x310, uint64(0x0))
+ var x325 uint64
+ var x326 uint64
+ x325, x326 = bits.Add64(x289, x312, uint64(p384Uint1(x324)))
+ var x327 uint64
+ var x328 uint64
+ x327, x328 = bits.Add64(x291, x314, uint64(p384Uint1(x326)))
+ var x329 uint64
+ var x330 uint64
+ x329, x330 = bits.Add64(x293, x316, uint64(p384Uint1(x328)))
+ var x331 uint64
+ var x332 uint64
+ x331, x332 = bits.Add64(x295, x318, uint64(p384Uint1(x330)))
+ var x333 uint64
+ var x334 uint64
+ x333, x334 = bits.Add64(x297, x320, uint64(p384Uint1(x332)))
+ var x335 uint64
+ var x336 uint64
+ x335, x336 = bits.Add64(x299, x322, uint64(p384Uint1(x334)))
+ var x337 uint64
+ _, x337 = bits.Mul64(x323, 0x100000001)
+ var x339 uint64
+ var x340 uint64
+ x340, x339 = bits.Mul64(x337, 0xffffffffffffffff)
+ var x341 uint64
+ var x342 uint64
+ x342, x341 = bits.Mul64(x337, 0xffffffffffffffff)
+ var x343 uint64
+ var x344 uint64
+ x344, x343 = bits.Mul64(x337, 0xffffffffffffffff)
+ var x345 uint64
+ var x346 uint64
+ x346, x345 = bits.Mul64(x337, 0xfffffffffffffffe)
+ var x347 uint64
+ var x348 uint64
+ x348, x347 = bits.Mul64(x337, 0xffffffff00000000)
+ var x349 uint64
+ var x350 uint64
+ x350, x349 = bits.Mul64(x337, 0xffffffff)
+ var x351 uint64
+ var x352 uint64
+ x351, x352 = bits.Add64(x350, x347, uint64(0x0))
+ var x353 uint64
+ var x354 uint64
+ x353, x354 = bits.Add64(x348, x345, uint64(p384Uint1(x352)))
+ var x355 uint64
+ var x356 uint64
+ x355, x356 = bits.Add64(x346, x343, uint64(p384Uint1(x354)))
+ var x357 uint64
+ var x358 uint64
+ x357, x358 = bits.Add64(x344, x341, uint64(p384Uint1(x356)))
+ var x359 uint64
+ var x360 uint64
+ x359, x360 = bits.Add64(x342, x339, uint64(p384Uint1(x358)))
+ x361 := (uint64(p384Uint1(x360)) + x340)
+ var x363 uint64
+ _, x363 = bits.Add64(x323, x349, uint64(0x0))
+ var x364 uint64
+ var x365 uint64
+ x364, x365 = bits.Add64(x325, x351, uint64(p384Uint1(x363)))
+ var x366 uint64
+ var x367 uint64
+ x366, x367 = bits.Add64(x327, x353, uint64(p384Uint1(x365)))
+ var x368 uint64
+ var x369 uint64
+ x368, x369 = bits.Add64(x329, x355, uint64(p384Uint1(x367)))
+ var x370 uint64
+ var x371 uint64
+ x370, x371 = bits.Add64(x331, x357, uint64(p384Uint1(x369)))
+ var x372 uint64
+ var x373 uint64
+ x372, x373 = bits.Add64(x333, x359, uint64(p384Uint1(x371)))
+ var x374 uint64
+ var x375 uint64
+ x374, x375 = bits.Add64(x335, x361, uint64(p384Uint1(x373)))
+ x376 := (uint64(p384Uint1(x375)) + uint64(p384Uint1(x336)))
+ var x377 uint64
+ var x378 uint64
+ x378, x377 = bits.Mul64(x5, arg1[5])
+ var x379 uint64
+ var x380 uint64
+ x380, x379 = bits.Mul64(x5, arg1[4])
+ var x381 uint64
+ var x382 uint64
+ x382, x381 = bits.Mul64(x5, arg1[3])
+ var x383 uint64
+ var x384 uint64
+ x384, x383 = bits.Mul64(x5, arg1[2])
+ var x385 uint64
+ var x386 uint64
+ x386, x385 = bits.Mul64(x5, arg1[1])
+ var x387 uint64
+ var x388 uint64
+ x388, x387 = bits.Mul64(x5, arg1[0])
+ var x389 uint64
+ var x390 uint64
+ x389, x390 = bits.Add64(x388, x385, uint64(0x0))
+ var x391 uint64
+ var x392 uint64
+ x391, x392 = bits.Add64(x386, x383, uint64(p384Uint1(x390)))
+ var x393 uint64
+ var x394 uint64
+ x393, x394 = bits.Add64(x384, x381, uint64(p384Uint1(x392)))
+ var x395 uint64
+ var x396 uint64
+ x395, x396 = bits.Add64(x382, x379, uint64(p384Uint1(x394)))
+ var x397 uint64
+ var x398 uint64
+ x397, x398 = bits.Add64(x380, x377, uint64(p384Uint1(x396)))
+ x399 := (uint64(p384Uint1(x398)) + x378)
+ var x400 uint64
+ var x401 uint64
+ x400, x401 = bits.Add64(x364, x387, uint64(0x0))
+ var x402 uint64
+ var x403 uint64
+ x402, x403 = bits.Add64(x366, x389, uint64(p384Uint1(x401)))
+ var x404 uint64
+ var x405 uint64
+ x404, x405 = bits.Add64(x368, x391, uint64(p384Uint1(x403)))
+ var x406 uint64
+ var x407 uint64
+ x406, x407 = bits.Add64(x370, x393, uint64(p384Uint1(x405)))
+ var x408 uint64
+ var x409 uint64
+ x408, x409 = bits.Add64(x372, x395, uint64(p384Uint1(x407)))
+ var x410 uint64
+ var x411 uint64
+ x410, x411 = bits.Add64(x374, x397, uint64(p384Uint1(x409)))
+ var x412 uint64
+ var x413 uint64
+ x412, x413 = bits.Add64(x376, x399, uint64(p384Uint1(x411)))
+ var x414 uint64
+ _, x414 = bits.Mul64(x400, 0x100000001)
+ var x416 uint64
+ var x417 uint64
+ x417, x416 = bits.Mul64(x414, 0xffffffffffffffff)
+ var x418 uint64
+ var x419 uint64
+ x419, x418 = bits.Mul64(x414, 0xffffffffffffffff)
+ var x420 uint64
+ var x421 uint64
+ x421, x420 = bits.Mul64(x414, 0xffffffffffffffff)
+ var x422 uint64
+ var x423 uint64
+ x423, x422 = bits.Mul64(x414, 0xfffffffffffffffe)
+ var x424 uint64
+ var x425 uint64
+ x425, x424 = bits.Mul64(x414, 0xffffffff00000000)
+ var x426 uint64
+ var x427 uint64
+ x427, x426 = bits.Mul64(x414, 0xffffffff)
+ var x428 uint64
+ var x429 uint64
+ x428, x429 = bits.Add64(x427, x424, uint64(0x0))
+ var x430 uint64
+ var x431 uint64
+ x430, x431 = bits.Add64(x425, x422, uint64(p384Uint1(x429)))
+ var x432 uint64
+ var x433 uint64
+ x432, x433 = bits.Add64(x423, x420, uint64(p384Uint1(x431)))
+ var x434 uint64
+ var x435 uint64
+ x434, x435 = bits.Add64(x421, x418, uint64(p384Uint1(x433)))
+ var x436 uint64
+ var x437 uint64
+ x436, x437 = bits.Add64(x419, x416, uint64(p384Uint1(x435)))
+ x438 := (uint64(p384Uint1(x437)) + x417)
+ var x440 uint64
+ _, x440 = bits.Add64(x400, x426, uint64(0x0))
+ var x441 uint64
+ var x442 uint64
+ x441, x442 = bits.Add64(x402, x428, uint64(p384Uint1(x440)))
+ var x443 uint64
+ var x444 uint64
+ x443, x444 = bits.Add64(x404, x430, uint64(p384Uint1(x442)))
+ var x445 uint64
+ var x446 uint64
+ x445, x446 = bits.Add64(x406, x432, uint64(p384Uint1(x444)))
+ var x447 uint64
+ var x448 uint64
+ x447, x448 = bits.Add64(x408, x434, uint64(p384Uint1(x446)))
+ var x449 uint64
+ var x450 uint64
+ x449, x450 = bits.Add64(x410, x436, uint64(p384Uint1(x448)))
+ var x451 uint64
+ var x452 uint64
+ x451, x452 = bits.Add64(x412, x438, uint64(p384Uint1(x450)))
+ x453 := (uint64(p384Uint1(x452)) + uint64(p384Uint1(x413)))
+ var x454 uint64
+ var x455 uint64
+ x454, x455 = bits.Sub64(x441, 0xffffffff, uint64(0x0))
+ var x456 uint64
+ var x457 uint64
+ x456, x457 = bits.Sub64(x443, 0xffffffff00000000, uint64(p384Uint1(x455)))
+ var x458 uint64
+ var x459 uint64
+ x458, x459 = bits.Sub64(x445, 0xfffffffffffffffe, uint64(p384Uint1(x457)))
+ var x460 uint64
+ var x461 uint64
+ x460, x461 = bits.Sub64(x447, 0xffffffffffffffff, uint64(p384Uint1(x459)))
+ var x462 uint64
+ var x463 uint64
+ x462, x463 = bits.Sub64(x449, 0xffffffffffffffff, uint64(p384Uint1(x461)))
+ var x464 uint64
+ var x465 uint64
+ x464, x465 = bits.Sub64(x451, 0xffffffffffffffff, uint64(p384Uint1(x463)))
+ var x467 uint64
+ _, x467 = bits.Sub64(x453, uint64(0x0), uint64(p384Uint1(x465)))
+ var x468 uint64
+ p384CmovznzU64(&x468, p384Uint1(x467), x454, x441)
+ var x469 uint64
+ p384CmovznzU64(&x469, p384Uint1(x467), x456, x443)
+ var x470 uint64
+ p384CmovznzU64(&x470, p384Uint1(x467), x458, x445)
+ var x471 uint64
+ p384CmovznzU64(&x471, p384Uint1(x467), x460, x447)
+ var x472 uint64
+ p384CmovznzU64(&x472, p384Uint1(x467), x462, x449)
+ var x473 uint64
+ p384CmovznzU64(&x473, p384Uint1(x467), x464, x451)
+ out1[0] = x468
+ out1[1] = x469
+ out1[2] = x470
+ out1[3] = x471
+ out1[4] = x472
+ out1[5] = x473
+}
+
+// p384Add adds two field elements in the Montgomery domain.
+//
+// Preconditions:
+// 0 ≤ eval arg1 < m
+// 0 ≤ eval arg2 < m
+// Postconditions:
+// eval (from_montgomery out1) mod m = (eval (from_montgomery arg1) + eval (from_montgomery arg2)) mod m
+// 0 ≤ eval out1 < m
+//
+func p384Add(out1 *p384MontgomeryDomainFieldElement, arg1 *p384MontgomeryDomainFieldElement, arg2 *p384MontgomeryDomainFieldElement) {
+ var x1 uint64
+ var x2 uint64
+ x1, x2 = bits.Add64(arg1[0], arg2[0], uint64(0x0))
+ var x3 uint64
+ var x4 uint64
+ x3, x4 = bits.Add64(arg1[1], arg2[1], uint64(p384Uint1(x2)))
+ var x5 uint64
+ var x6 uint64
+ x5, x6 = bits.Add64(arg1[2], arg2[2], uint64(p384Uint1(x4)))
+ var x7 uint64
+ var x8 uint64
+ x7, x8 = bits.Add64(arg1[3], arg2[3], uint64(p384Uint1(x6)))
+ var x9 uint64
+ var x10 uint64
+ x9, x10 = bits.Add64(arg1[4], arg2[4], uint64(p384Uint1(x8)))
+ var x11 uint64
+ var x12 uint64
+ x11, x12 = bits.Add64(arg1[5], arg2[5], uint64(p384Uint1(x10)))
+ var x13 uint64
+ var x14 uint64
+ x13, x14 = bits.Sub64(x1, 0xffffffff, uint64(0x0))
+ var x15 uint64
+ var x16 uint64
+ x15, x16 = bits.Sub64(x3, 0xffffffff00000000, uint64(p384Uint1(x14)))
+ var x17 uint64
+ var x18 uint64
+ x17, x18 = bits.Sub64(x5, 0xfffffffffffffffe, uint64(p384Uint1(x16)))
+ var x19 uint64
+ var x20 uint64
+ x19, x20 = bits.Sub64(x7, 0xffffffffffffffff, uint64(p384Uint1(x18)))
+ var x21 uint64
+ var x22 uint64
+ x21, x22 = bits.Sub64(x9, 0xffffffffffffffff, uint64(p384Uint1(x20)))
+ var x23 uint64
+ var x24 uint64
+ x23, x24 = bits.Sub64(x11, 0xffffffffffffffff, uint64(p384Uint1(x22)))
+ var x26 uint64
+ _, x26 = bits.Sub64(uint64(p384Uint1(x12)), uint64(0x0), uint64(p384Uint1(x24)))
+ var x27 uint64
+ p384CmovznzU64(&x27, p384Uint1(x26), x13, x1)
+ var x28 uint64
+ p384CmovznzU64(&x28, p384Uint1(x26), x15, x3)
+ var x29 uint64
+ p384CmovznzU64(&x29, p384Uint1(x26), x17, x5)
+ var x30 uint64
+ p384CmovznzU64(&x30, p384Uint1(x26), x19, x7)
+ var x31 uint64
+ p384CmovznzU64(&x31, p384Uint1(x26), x21, x9)
+ var x32 uint64
+ p384CmovznzU64(&x32, p384Uint1(x26), x23, x11)
+ out1[0] = x27
+ out1[1] = x28
+ out1[2] = x29
+ out1[3] = x30
+ out1[4] = x31
+ out1[5] = x32
+}
+
+// p384Sub subtracts two field elements in the Montgomery domain.
+//
+// Preconditions:
+// 0 ≤ eval arg1 < m
+// 0 ≤ eval arg2 < m
+// Postconditions:
+// eval (from_montgomery out1) mod m = (eval (from_montgomery arg1) - eval (from_montgomery arg2)) mod m
+// 0 ≤ eval out1 < m
+//
+func p384Sub(out1 *p384MontgomeryDomainFieldElement, arg1 *p384MontgomeryDomainFieldElement, arg2 *p384MontgomeryDomainFieldElement) {
+ var x1 uint64
+ var x2 uint64
+ x1, x2 = bits.Sub64(arg1[0], arg2[0], uint64(0x0))
+ var x3 uint64
+ var x4 uint64
+ x3, x4 = bits.Sub64(arg1[1], arg2[1], uint64(p384Uint1(x2)))
+ var x5 uint64
+ var x6 uint64
+ x5, x6 = bits.Sub64(arg1[2], arg2[2], uint64(p384Uint1(x4)))
+ var x7 uint64
+ var x8 uint64
+ x7, x8 = bits.Sub64(arg1[3], arg2[3], uint64(p384Uint1(x6)))
+ var x9 uint64
+ var x10 uint64
+ x9, x10 = bits.Sub64(arg1[4], arg2[4], uint64(p384Uint1(x8)))
+ var x11 uint64
+ var x12 uint64
+ x11, x12 = bits.Sub64(arg1[5], arg2[5], uint64(p384Uint1(x10)))
+ var x13 uint64
+ p384CmovznzU64(&x13, p384Uint1(x12), uint64(0x0), 0xffffffffffffffff)
+ var x14 uint64
+ var x15 uint64
+ x14, x15 = bits.Add64(x1, (x13 & 0xffffffff), uint64(0x0))
+ var x16 uint64
+ var x17 uint64
+ x16, x17 = bits.Add64(x3, (x13 & 0xffffffff00000000), uint64(p384Uint1(x15)))
+ var x18 uint64
+ var x19 uint64
+ x18, x19 = bits.Add64(x5, (x13 & 0xfffffffffffffffe), uint64(p384Uint1(x17)))
+ var x20 uint64
+ var x21 uint64
+ x20, x21 = bits.Add64(x7, x13, uint64(p384Uint1(x19)))
+ var x22 uint64
+ var x23 uint64
+ x22, x23 = bits.Add64(x9, x13, uint64(p384Uint1(x21)))
+ var x24 uint64
+ x24, _ = bits.Add64(x11, x13, uint64(p384Uint1(x23)))
+ out1[0] = x14
+ out1[1] = x16
+ out1[2] = x18
+ out1[3] = x20
+ out1[4] = x22
+ out1[5] = x24
+}
+
+// p384SetOne returns the field element one in the Montgomery domain.
+//
+// Postconditions:
+// eval (from_montgomery out1) mod m = 1 mod m
+// 0 ≤ eval out1 < m
+//
+func p384SetOne(out1 *p384MontgomeryDomainFieldElement) {
+ out1[0] = 0xffffffff00000001
+ out1[1] = 0xffffffff
+ out1[2] = uint64(0x1)
+ out1[3] = uint64(0x0)
+ out1[4] = uint64(0x0)
+ out1[5] = uint64(0x0)
+}
+
+// p384FromMontgomery translates a field element out of the Montgomery domain.
+//
+// Preconditions:
+// 0 ≤ eval arg1 < m
+// Postconditions:
+// eval out1 mod m = (eval arg1 * ((2^64)⁻¹ mod m)^6) mod m
+// 0 ≤ eval out1 < m
+//
+func p384FromMontgomery(out1 *p384NonMontgomeryDomainFieldElement, arg1 *p384MontgomeryDomainFieldElement) {
+ x1 := arg1[0]
+ var x2 uint64
+ _, x2 = bits.Mul64(x1, 0x100000001)
+ var x4 uint64
+ var x5 uint64
+ x5, x4 = bits.Mul64(x2, 0xffffffffffffffff)
+ var x6 uint64
+ var x7 uint64
+ x7, x6 = bits.Mul64(x2, 0xffffffffffffffff)
+ var x8 uint64
+ var x9 uint64
+ x9, x8 = bits.Mul64(x2, 0xffffffffffffffff)
+ var x10 uint64
+ var x11 uint64
+ x11, x10 = bits.Mul64(x2, 0xfffffffffffffffe)
+ var x12 uint64
+ var x13 uint64
+ x13, x12 = bits.Mul64(x2, 0xffffffff00000000)
+ var x14 uint64
+ var x15 uint64
+ x15, x14 = bits.Mul64(x2, 0xffffffff)
+ var x16 uint64
+ var x17 uint64
+ x16, x17 = bits.Add64(x15, x12, uint64(0x0))
+ var x18 uint64
+ var x19 uint64
+ x18, x19 = bits.Add64(x13, x10, uint64(p384Uint1(x17)))
+ var x20 uint64
+ var x21 uint64
+ x20, x21 = bits.Add64(x11, x8, uint64(p384Uint1(x19)))
+ var x22 uint64
+ var x23 uint64
+ x22, x23 = bits.Add64(x9, x6, uint64(p384Uint1(x21)))
+ var x24 uint64
+ var x25 uint64
+ x24, x25 = bits.Add64(x7, x4, uint64(p384Uint1(x23)))
+ var x27 uint64
+ _, x27 = bits.Add64(x1, x14, uint64(0x0))
+ var x28 uint64
+ var x29 uint64
+ x28, x29 = bits.Add64(uint64(0x0), x16, uint64(p384Uint1(x27)))
+ var x30 uint64
+ var x31 uint64
+ x30, x31 = bits.Add64(uint64(0x0), x18, uint64(p384Uint1(x29)))
+ var x32 uint64
+ var x33 uint64
+ x32, x33 = bits.Add64(uint64(0x0), x20, uint64(p384Uint1(x31)))
+ var x34 uint64
+ var x35 uint64
+ x34, x35 = bits.Add64(uint64(0x0), x22, uint64(p384Uint1(x33)))
+ var x36 uint64
+ var x37 uint64
+ x36, x37 = bits.Add64(uint64(0x0), x24, uint64(p384Uint1(x35)))
+ var x38 uint64
+ var x39 uint64
+ x38, x39 = bits.Add64(uint64(0x0), (uint64(p384Uint1(x25)) + x5), uint64(p384Uint1(x37)))
+ var x40 uint64
+ var x41 uint64
+ x40, x41 = bits.Add64(x28, arg1[1], uint64(0x0))
+ var x42 uint64
+ var x43 uint64
+ x42, x43 = bits.Add64(x30, uint64(0x0), uint64(p384Uint1(x41)))
+ var x44 uint64
+ var x45 uint64
+ x44, x45 = bits.Add64(x32, uint64(0x0), uint64(p384Uint1(x43)))
+ var x46 uint64
+ var x47 uint64
+ x46, x47 = bits.Add64(x34, uint64(0x0), uint64(p384Uint1(x45)))
+ var x48 uint64
+ var x49 uint64
+ x48, x49 = bits.Add64(x36, uint64(0x0), uint64(p384Uint1(x47)))
+ var x50 uint64
+ var x51 uint64
+ x50, x51 = bits.Add64(x38, uint64(0x0), uint64(p384Uint1(x49)))
+ var x52 uint64
+ _, x52 = bits.Mul64(x40, 0x100000001)
+ var x54 uint64
+ var x55 uint64
+ x55, x54 = bits.Mul64(x52, 0xffffffffffffffff)
+ var x56 uint64
+ var x57 uint64
+ x57, x56 = bits.Mul64(x52, 0xffffffffffffffff)
+ var x58 uint64
+ var x59 uint64
+ x59, x58 = bits.Mul64(x52, 0xffffffffffffffff)
+ var x60 uint64
+ var x61 uint64
+ x61, x60 = bits.Mul64(x52, 0xfffffffffffffffe)
+ var x62 uint64
+ var x63 uint64
+ x63, x62 = bits.Mul64(x52, 0xffffffff00000000)
+ var x64 uint64
+ var x65 uint64
+ x65, x64 = bits.Mul64(x52, 0xffffffff)
+ var x66 uint64
+ var x67 uint64
+ x66, x67 = bits.Add64(x65, x62, uint64(0x0))
+ var x68 uint64
+ var x69 uint64
+ x68, x69 = bits.Add64(x63, x60, uint64(p384Uint1(x67)))
+ var x70 uint64
+ var x71 uint64
+ x70, x71 = bits.Add64(x61, x58, uint64(p384Uint1(x69)))
+ var x72 uint64
+ var x73 uint64
+ x72, x73 = bits.Add64(x59, x56, uint64(p384Uint1(x71)))
+ var x74 uint64
+ var x75 uint64
+ x74, x75 = bits.Add64(x57, x54, uint64(p384Uint1(x73)))
+ var x77 uint64
+ _, x77 = bits.Add64(x40, x64, uint64(0x0))
+ var x78 uint64
+ var x79 uint64
+ x78, x79 = bits.Add64(x42, x66, uint64(p384Uint1(x77)))
+ var x80 uint64
+ var x81 uint64
+ x80, x81 = bits.Add64(x44, x68, uint64(p384Uint1(x79)))
+ var x82 uint64
+ var x83 uint64
+ x82, x83 = bits.Add64(x46, x70, uint64(p384Uint1(x81)))
+ var x84 uint64
+ var x85 uint64
+ x84, x85 = bits.Add64(x48, x72, uint64(p384Uint1(x83)))
+ var x86 uint64
+ var x87 uint64
+ x86, x87 = bits.Add64(x50, x74, uint64(p384Uint1(x85)))
+ var x88 uint64
+ var x89 uint64
+ x88, x89 = bits.Add64((uint64(p384Uint1(x51)) + uint64(p384Uint1(x39))), (uint64(p384Uint1(x75)) + x55), uint64(p384Uint1(x87)))
+ var x90 uint64
+ var x91 uint64
+ x90, x91 = bits.Add64(x78, arg1[2], uint64(0x0))
+ var x92 uint64
+ var x93 uint64
+ x92, x93 = bits.Add64(x80, uint64(0x0), uint64(p384Uint1(x91)))
+ var x94 uint64
+ var x95 uint64
+ x94, x95 = bits.Add64(x82, uint64(0x0), uint64(p384Uint1(x93)))
+ var x96 uint64
+ var x97 uint64
+ x96, x97 = bits.Add64(x84, uint64(0x0), uint64(p384Uint1(x95)))
+ var x98 uint64
+ var x99 uint64
+ x98, x99 = bits.Add64(x86, uint64(0x0), uint64(p384Uint1(x97)))
+ var x100 uint64
+ var x101 uint64
+ x100, x101 = bits.Add64(x88, uint64(0x0), uint64(p384Uint1(x99)))
+ var x102 uint64
+ _, x102 = bits.Mul64(x90, 0x100000001)
+ var x104 uint64
+ var x105 uint64
+ x105, x104 = bits.Mul64(x102, 0xffffffffffffffff)
+ var x106 uint64
+ var x107 uint64
+ x107, x106 = bits.Mul64(x102, 0xffffffffffffffff)
+ var x108 uint64
+ var x109 uint64
+ x109, x108 = bits.Mul64(x102, 0xffffffffffffffff)
+ var x110 uint64
+ var x111 uint64
+ x111, x110 = bits.Mul64(x102, 0xfffffffffffffffe)
+ var x112 uint64
+ var x113 uint64
+ x113, x112 = bits.Mul64(x102, 0xffffffff00000000)
+ var x114 uint64
+ var x115 uint64
+ x115, x114 = bits.Mul64(x102, 0xffffffff)
+ var x116 uint64
+ var x117 uint64
+ x116, x117 = bits.Add64(x115, x112, uint64(0x0))
+ var x118 uint64
+ var x119 uint64
+ x118, x119 = bits.Add64(x113, x110, uint64(p384Uint1(x117)))
+ var x120 uint64
+ var x121 uint64
+ x120, x121 = bits.Add64(x111, x108, uint64(p384Uint1(x119)))
+ var x122 uint64
+ var x123 uint64
+ x122, x123 = bits.Add64(x109, x106, uint64(p384Uint1(x121)))
+ var x124 uint64
+ var x125 uint64
+ x124, x125 = bits.Add64(x107, x104, uint64(p384Uint1(x123)))
+ var x127 uint64
+ _, x127 = bits.Add64(x90, x114, uint64(0x0))
+ var x128 uint64
+ var x129 uint64
+ x128, x129 = bits.Add64(x92, x116, uint64(p384Uint1(x127)))
+ var x130 uint64
+ var x131 uint64
+ x130, x131 = bits.Add64(x94, x118, uint64(p384Uint1(x129)))
+ var x132 uint64
+ var x133 uint64
+ x132, x133 = bits.Add64(x96, x120, uint64(p384Uint1(x131)))
+ var x134 uint64
+ var x135 uint64
+ x134, x135 = bits.Add64(x98, x122, uint64(p384Uint1(x133)))
+ var x136 uint64
+ var x137 uint64
+ x136, x137 = bits.Add64(x100, x124, uint64(p384Uint1(x135)))
+ var x138 uint64
+ var x139 uint64
+ x138, x139 = bits.Add64((uint64(p384Uint1(x101)) + uint64(p384Uint1(x89))), (uint64(p384Uint1(x125)) + x105), uint64(p384Uint1(x137)))
+ var x140 uint64
+ var x141 uint64
+ x140, x141 = bits.Add64(x128, arg1[3], uint64(0x0))
+ var x142 uint64
+ var x143 uint64
+ x142, x143 = bits.Add64(x130, uint64(0x0), uint64(p384Uint1(x141)))
+ var x144 uint64
+ var x145 uint64
+ x144, x145 = bits.Add64(x132, uint64(0x0), uint64(p384Uint1(x143)))
+ var x146 uint64
+ var x147 uint64
+ x146, x147 = bits.Add64(x134, uint64(0x0), uint64(p384Uint1(x145)))
+ var x148 uint64
+ var x149 uint64
+ x148, x149 = bits.Add64(x136, uint64(0x0), uint64(p384Uint1(x147)))
+ var x150 uint64
+ var x151 uint64
+ x150, x151 = bits.Add64(x138, uint64(0x0), uint64(p384Uint1(x149)))
+ var x152 uint64
+ _, x152 = bits.Mul64(x140, 0x100000001)
+ var x154 uint64
+ var x155 uint64
+ x155, x154 = bits.Mul64(x152, 0xffffffffffffffff)
+ var x156 uint64
+ var x157 uint64
+ x157, x156 = bits.Mul64(x152, 0xffffffffffffffff)
+ var x158 uint64
+ var x159 uint64
+ x159, x158 = bits.Mul64(x152, 0xffffffffffffffff)
+ var x160 uint64
+ var x161 uint64
+ x161, x160 = bits.Mul64(x152, 0xfffffffffffffffe)
+ var x162 uint64
+ var x163 uint64
+ x163, x162 = bits.Mul64(x152, 0xffffffff00000000)
+ var x164 uint64
+ var x165 uint64
+ x165, x164 = bits.Mul64(x152, 0xffffffff)
+ var x166 uint64
+ var x167 uint64
+ x166, x167 = bits.Add64(x165, x162, uint64(0x0))
+ var x168 uint64
+ var x169 uint64
+ x168, x169 = bits.Add64(x163, x160, uint64(p384Uint1(x167)))
+ var x170 uint64
+ var x171 uint64
+ x170, x171 = bits.Add64(x161, x158, uint64(p384Uint1(x169)))
+ var x172 uint64
+ var x173 uint64
+ x172, x173 = bits.Add64(x159, x156, uint64(p384Uint1(x171)))
+ var x174 uint64
+ var x175 uint64
+ x174, x175 = bits.Add64(x157, x154, uint64(p384Uint1(x173)))
+ var x177 uint64
+ _, x177 = bits.Add64(x140, x164, uint64(0x0))
+ var x178 uint64
+ var x179 uint64
+ x178, x179 = bits.Add64(x142, x166, uint64(p384Uint1(x177)))
+ var x180 uint64
+ var x181 uint64
+ x180, x181 = bits.Add64(x144, x168, uint64(p384Uint1(x179)))
+ var x182 uint64
+ var x183 uint64
+ x182, x183 = bits.Add64(x146, x170, uint64(p384Uint1(x181)))
+ var x184 uint64
+ var x185 uint64
+ x184, x185 = bits.Add64(x148, x172, uint64(p384Uint1(x183)))
+ var x186 uint64
+ var x187 uint64
+ x186, x187 = bits.Add64(x150, x174, uint64(p384Uint1(x185)))
+ var x188 uint64
+ var x189 uint64
+ x188, x189 = bits.Add64((uint64(p384Uint1(x151)) + uint64(p384Uint1(x139))), (uint64(p384Uint1(x175)) + x155), uint64(p384Uint1(x187)))
+ var x190 uint64
+ var x191 uint64
+ x190, x191 = bits.Add64(x178, arg1[4], uint64(0x0))
+ var x192 uint64
+ var x193 uint64
+ x192, x193 = bits.Add64(x180, uint64(0x0), uint64(p384Uint1(x191)))
+ var x194 uint64
+ var x195 uint64
+ x194, x195 = bits.Add64(x182, uint64(0x0), uint64(p384Uint1(x193)))
+ var x196 uint64
+ var x197 uint64
+ x196, x197 = bits.Add64(x184, uint64(0x0), uint64(p384Uint1(x195)))
+ var x198 uint64
+ var x199 uint64
+ x198, x199 = bits.Add64(x186, uint64(0x0), uint64(p384Uint1(x197)))
+ var x200 uint64
+ var x201 uint64
+ x200, x201 = bits.Add64(x188, uint64(0x0), uint64(p384Uint1(x199)))
+ var x202 uint64
+ _, x202 = bits.Mul64(x190, 0x100000001)
+ var x204 uint64
+ var x205 uint64
+ x205, x204 = bits.Mul64(x202, 0xffffffffffffffff)
+ var x206 uint64
+ var x207 uint64
+ x207, x206 = bits.Mul64(x202, 0xffffffffffffffff)
+ var x208 uint64
+ var x209 uint64
+ x209, x208 = bits.Mul64(x202, 0xffffffffffffffff)
+ var x210 uint64
+ var x211 uint64
+ x211, x210 = bits.Mul64(x202, 0xfffffffffffffffe)
+ var x212 uint64
+ var x213 uint64
+ x213, x212 = bits.Mul64(x202, 0xffffffff00000000)
+ var x214 uint64
+ var x215 uint64
+ x215, x214 = bits.Mul64(x202, 0xffffffff)
+ var x216 uint64
+ var x217 uint64
+ x216, x217 = bits.Add64(x215, x212, uint64(0x0))
+ var x218 uint64
+ var x219 uint64
+ x218, x219 = bits.Add64(x213, x210, uint64(p384Uint1(x217)))
+ var x220 uint64
+ var x221 uint64
+ x220, x221 = bits.Add64(x211, x208, uint64(p384Uint1(x219)))
+ var x222 uint64
+ var x223 uint64
+ x222, x223 = bits.Add64(x209, x206, uint64(p384Uint1(x221)))
+ var x224 uint64
+ var x225 uint64
+ x224, x225 = bits.Add64(x207, x204, uint64(p384Uint1(x223)))
+ var x227 uint64
+ _, x227 = bits.Add64(x190, x214, uint64(0x0))
+ var x228 uint64
+ var x229 uint64
+ x228, x229 = bits.Add64(x192, x216, uint64(p384Uint1(x227)))
+ var x230 uint64
+ var x231 uint64
+ x230, x231 = bits.Add64(x194, x218, uint64(p384Uint1(x229)))
+ var x232 uint64
+ var x233 uint64
+ x232, x233 = bits.Add64(x196, x220, uint64(p384Uint1(x231)))
+ var x234 uint64
+ var x235 uint64
+ x234, x235 = bits.Add64(x198, x222, uint64(p384Uint1(x233)))
+ var x236 uint64
+ var x237 uint64
+ x236, x237 = bits.Add64(x200, x224, uint64(p384Uint1(x235)))
+ var x238 uint64
+ var x239 uint64
+ x238, x239 = bits.Add64((uint64(p384Uint1(x201)) + uint64(p384Uint1(x189))), (uint64(p384Uint1(x225)) + x205), uint64(p384Uint1(x237)))
+ var x240 uint64
+ var x241 uint64
+ x240, x241 = bits.Add64(x228, arg1[5], uint64(0x0))
+ var x242 uint64
+ var x243 uint64
+ x242, x243 = bits.Add64(x230, uint64(0x0), uint64(p384Uint1(x241)))
+ var x244 uint64
+ var x245 uint64
+ x244, x245 = bits.Add64(x232, uint64(0x0), uint64(p384Uint1(x243)))
+ var x246 uint64
+ var x247 uint64
+ x246, x247 = bits.Add64(x234, uint64(0x0), uint64(p384Uint1(x245)))
+ var x248 uint64
+ var x249 uint64
+ x248, x249 = bits.Add64(x236, uint64(0x0), uint64(p384Uint1(x247)))
+ var x250 uint64
+ var x251 uint64
+ x250, x251 = bits.Add64(x238, uint64(0x0), uint64(p384Uint1(x249)))
+ var x252 uint64
+ _, x252 = bits.Mul64(x240, 0x100000001)
+ var x254 uint64
+ var x255 uint64
+ x255, x254 = bits.Mul64(x252, 0xffffffffffffffff)
+ var x256 uint64
+ var x257 uint64
+ x257, x256 = bits.Mul64(x252, 0xffffffffffffffff)
+ var x258 uint64
+ var x259 uint64
+ x259, x258 = bits.Mul64(x252, 0xffffffffffffffff)
+ var x260 uint64
+ var x261 uint64
+ x261, x260 = bits.Mul64(x252, 0xfffffffffffffffe)
+ var x262 uint64
+ var x263 uint64
+ x263, x262 = bits.Mul64(x252, 0xffffffff00000000)
+ var x264 uint64
+ var x265 uint64
+ x265, x264 = bits.Mul64(x252, 0xffffffff)
+ var x266 uint64
+ var x267 uint64
+ x266, x267 = bits.Add64(x265, x262, uint64(0x0))
+ var x268 uint64
+ var x269 uint64
+ x268, x269 = bits.Add64(x263, x260, uint64(p384Uint1(x267)))
+ var x270 uint64
+ var x271 uint64
+ x270, x271 = bits.Add64(x261, x258, uint64(p384Uint1(x269)))
+ var x272 uint64
+ var x273 uint64
+ x272, x273 = bits.Add64(x259, x256, uint64(p384Uint1(x271)))
+ var x274 uint64
+ var x275 uint64
+ x274, x275 = bits.Add64(x257, x254, uint64(p384Uint1(x273)))
+ var x277 uint64
+ _, x277 = bits.Add64(x240, x264, uint64(0x0))
+ var x278 uint64
+ var x279 uint64
+ x278, x279 = bits.Add64(x242, x266, uint64(p384Uint1(x277)))
+ var x280 uint64
+ var x281 uint64
+ x280, x281 = bits.Add64(x244, x268, uint64(p384Uint1(x279)))
+ var x282 uint64
+ var x283 uint64
+ x282, x283 = bits.Add64(x246, x270, uint64(p384Uint1(x281)))
+ var x284 uint64
+ var x285 uint64
+ x284, x285 = bits.Add64(x248, x272, uint64(p384Uint1(x283)))
+ var x286 uint64
+ var x287 uint64
+ x286, x287 = bits.Add64(x250, x274, uint64(p384Uint1(x285)))
+ var x288 uint64
+ var x289 uint64
+ x288, x289 = bits.Add64((uint64(p384Uint1(x251)) + uint64(p384Uint1(x239))), (uint64(p384Uint1(x275)) + x255), uint64(p384Uint1(x287)))
+ var x290 uint64
+ var x291 uint64
+ x290, x291 = bits.Sub64(x278, 0xffffffff, uint64(0x0))
+ var x292 uint64
+ var x293 uint64
+ x292, x293 = bits.Sub64(x280, 0xffffffff00000000, uint64(p384Uint1(x291)))
+ var x294 uint64
+ var x295 uint64
+ x294, x295 = bits.Sub64(x282, 0xfffffffffffffffe, uint64(p384Uint1(x293)))
+ var x296 uint64
+ var x297 uint64
+ x296, x297 = bits.Sub64(x284, 0xffffffffffffffff, uint64(p384Uint1(x295)))
+ var x298 uint64
+ var x299 uint64
+ x298, x299 = bits.Sub64(x286, 0xffffffffffffffff, uint64(p384Uint1(x297)))
+ var x300 uint64
+ var x301 uint64
+ x300, x301 = bits.Sub64(x288, 0xffffffffffffffff, uint64(p384Uint1(x299)))
+ var x303 uint64
+ _, x303 = bits.Sub64(uint64(p384Uint1(x289)), uint64(0x0), uint64(p384Uint1(x301)))
+ var x304 uint64
+ p384CmovznzU64(&x304, p384Uint1(x303), x290, x278)
+ var x305 uint64
+ p384CmovznzU64(&x305, p384Uint1(x303), x292, x280)
+ var x306 uint64
+ p384CmovznzU64(&x306, p384Uint1(x303), x294, x282)
+ var x307 uint64
+ p384CmovznzU64(&x307, p384Uint1(x303), x296, x284)
+ var x308 uint64
+ p384CmovznzU64(&x308, p384Uint1(x303), x298, x286)
+ var x309 uint64
+ p384CmovznzU64(&x309, p384Uint1(x303), x300, x288)
+ out1[0] = x304
+ out1[1] = x305
+ out1[2] = x306
+ out1[3] = x307
+ out1[4] = x308
+ out1[5] = x309
+}
+
+// p384ToMontgomery translates a field element into the Montgomery domain.
+//
+// Preconditions:
+// 0 ≤ eval arg1 < m
+// Postconditions:
+// eval (from_montgomery out1) mod m = eval arg1 mod m
+// 0 ≤ eval out1 < m
+//
+func p384ToMontgomery(out1 *p384MontgomeryDomainFieldElement, arg1 *p384NonMontgomeryDomainFieldElement) {
+ x1 := arg1[1]
+ x2 := arg1[2]
+ x3 := arg1[3]
+ x4 := arg1[4]
+ x5 := arg1[5]
+ x6 := arg1[0]
+ var x7 uint64
+ var x8 uint64
+ x8, x7 = bits.Mul64(x6, 0x200000000)
+ var x9 uint64
+ var x10 uint64
+ x10, x9 = bits.Mul64(x6, 0xfffffffe00000000)
+ var x11 uint64
+ var x12 uint64
+ x12, x11 = bits.Mul64(x6, 0x200000000)
+ var x13 uint64
+ var x14 uint64
+ x14, x13 = bits.Mul64(x6, 0xfffffffe00000001)
+ var x15 uint64
+ var x16 uint64
+ x15, x16 = bits.Add64(x14, x11, uint64(0x0))
+ var x17 uint64
+ var x18 uint64
+ x17, x18 = bits.Add64(x12, x9, uint64(p384Uint1(x16)))
+ var x19 uint64
+ var x20 uint64
+ x19, x20 = bits.Add64(x10, x7, uint64(p384Uint1(x18)))
+ var x21 uint64
+ var x22 uint64
+ x21, x22 = bits.Add64(x8, x6, uint64(p384Uint1(x20)))
+ var x23 uint64
+ _, x23 = bits.Mul64(x13, 0x100000001)
+ var x25 uint64
+ var x26 uint64
+ x26, x25 = bits.Mul64(x23, 0xffffffffffffffff)
+ var x27 uint64
+ var x28 uint64
+ x28, x27 = bits.Mul64(x23, 0xffffffffffffffff)
+ var x29 uint64
+ var x30 uint64
+ x30, x29 = bits.Mul64(x23, 0xffffffffffffffff)
+ var x31 uint64
+ var x32 uint64
+ x32, x31 = bits.Mul64(x23, 0xfffffffffffffffe)
+ var x33 uint64
+ var x34 uint64
+ x34, x33 = bits.Mul64(x23, 0xffffffff00000000)
+ var x35 uint64
+ var x36 uint64
+ x36, x35 = bits.Mul64(x23, 0xffffffff)
+ var x37 uint64
+ var x38 uint64
+ x37, x38 = bits.Add64(x36, x33, uint64(0x0))
+ var x39 uint64
+ var x40 uint64
+ x39, x40 = bits.Add64(x34, x31, uint64(p384Uint1(x38)))
+ var x41 uint64
+ var x42 uint64
+ x41, x42 = bits.Add64(x32, x29, uint64(p384Uint1(x40)))
+ var x43 uint64
+ var x44 uint64
+ x43, x44 = bits.Add64(x30, x27, uint64(p384Uint1(x42)))
+ var x45 uint64
+ var x46 uint64
+ x45, x46 = bits.Add64(x28, x25, uint64(p384Uint1(x44)))
+ var x48 uint64
+ _, x48 = bits.Add64(x13, x35, uint64(0x0))
+ var x49 uint64
+ var x50 uint64
+ x49, x50 = bits.Add64(x15, x37, uint64(p384Uint1(x48)))
+ var x51 uint64
+ var x52 uint64
+ x51, x52 = bits.Add64(x17, x39, uint64(p384Uint1(x50)))
+ var x53 uint64
+ var x54 uint64
+ x53, x54 = bits.Add64(x19, x41, uint64(p384Uint1(x52)))
+ var x55 uint64
+ var x56 uint64
+ x55, x56 = bits.Add64(x21, x43, uint64(p384Uint1(x54)))
+ var x57 uint64
+ var x58 uint64
+ x57, x58 = bits.Add64(uint64(p384Uint1(x22)), x45, uint64(p384Uint1(x56)))
+ var x59 uint64
+ var x60 uint64
+ x59, x60 = bits.Add64(uint64(0x0), (uint64(p384Uint1(x46)) + x26), uint64(p384Uint1(x58)))
+ var x61 uint64
+ var x62 uint64
+ x62, x61 = bits.Mul64(x1, 0x200000000)
+ var x63 uint64
+ var x64 uint64
+ x64, x63 = bits.Mul64(x1, 0xfffffffe00000000)
+ var x65 uint64
+ var x66 uint64
+ x66, x65 = bits.Mul64(x1, 0x200000000)
+ var x67 uint64
+ var x68 uint64
+ x68, x67 = bits.Mul64(x1, 0xfffffffe00000001)
+ var x69 uint64
+ var x70 uint64
+ x69, x70 = bits.Add64(x68, x65, uint64(0x0))
+ var x71 uint64
+ var x72 uint64
+ x71, x72 = bits.Add64(x66, x63, uint64(p384Uint1(x70)))
+ var x73 uint64
+ var x74 uint64
+ x73, x74 = bits.Add64(x64, x61, uint64(p384Uint1(x72)))
+ var x75 uint64
+ var x76 uint64
+ x75, x76 = bits.Add64(x62, x1, uint64(p384Uint1(x74)))
+ var x77 uint64
+ var x78 uint64
+ x77, x78 = bits.Add64(x49, x67, uint64(0x0))
+ var x79 uint64
+ var x80 uint64
+ x79, x80 = bits.Add64(x51, x69, uint64(p384Uint1(x78)))
+ var x81 uint64
+ var x82 uint64
+ x81, x82 = bits.Add64(x53, x71, uint64(p384Uint1(x80)))
+ var x83 uint64
+ var x84 uint64
+ x83, x84 = bits.Add64(x55, x73, uint64(p384Uint1(x82)))
+ var x85 uint64
+ var x86 uint64
+ x85, x86 = bits.Add64(x57, x75, uint64(p384Uint1(x84)))
+ var x87 uint64
+ var x88 uint64
+ x87, x88 = bits.Add64(x59, uint64(p384Uint1(x76)), uint64(p384Uint1(x86)))
+ var x89 uint64
+ _, x89 = bits.Mul64(x77, 0x100000001)
+ var x91 uint64
+ var x92 uint64
+ x92, x91 = bits.Mul64(x89, 0xffffffffffffffff)
+ var x93 uint64
+ var x94 uint64
+ x94, x93 = bits.Mul64(x89, 0xffffffffffffffff)
+ var x95 uint64
+ var x96 uint64
+ x96, x95 = bits.Mul64(x89, 0xffffffffffffffff)
+ var x97 uint64
+ var x98 uint64
+ x98, x97 = bits.Mul64(x89, 0xfffffffffffffffe)
+ var x99 uint64
+ var x100 uint64
+ x100, x99 = bits.Mul64(x89, 0xffffffff00000000)
+ var x101 uint64
+ var x102 uint64
+ x102, x101 = bits.Mul64(x89, 0xffffffff)
+ var x103 uint64
+ var x104 uint64
+ x103, x104 = bits.Add64(x102, x99, uint64(0x0))
+ var x105 uint64
+ var x106 uint64
+ x105, x106 = bits.Add64(x100, x97, uint64(p384Uint1(x104)))
+ var x107 uint64
+ var x108 uint64
+ x107, x108 = bits.Add64(x98, x95, uint64(p384Uint1(x106)))
+ var x109 uint64
+ var x110 uint64
+ x109, x110 = bits.Add64(x96, x93, uint64(p384Uint1(x108)))
+ var x111 uint64
+ var x112 uint64
+ x111, x112 = bits.Add64(x94, x91, uint64(p384Uint1(x110)))
+ var x114 uint64
+ _, x114 = bits.Add64(x77, x101, uint64(0x0))
+ var x115 uint64
+ var x116 uint64
+ x115, x116 = bits.Add64(x79, x103, uint64(p384Uint1(x114)))
+ var x117 uint64
+ var x118 uint64
+ x117, x118 = bits.Add64(x81, x105, uint64(p384Uint1(x116)))
+ var x119 uint64
+ var x120 uint64
+ x119, x120 = bits.Add64(x83, x107, uint64(p384Uint1(x118)))
+ var x121 uint64
+ var x122 uint64
+ x121, x122 = bits.Add64(x85, x109, uint64(p384Uint1(x120)))
+ var x123 uint64
+ var x124 uint64
+ x123, x124 = bits.Add64(x87, x111, uint64(p384Uint1(x122)))
+ var x125 uint64
+ var x126 uint64
+ x125, x126 = bits.Add64((uint64(p384Uint1(x88)) + uint64(p384Uint1(x60))), (uint64(p384Uint1(x112)) + x92), uint64(p384Uint1(x124)))
+ var x127 uint64
+ var x128 uint64
+ x128, x127 = bits.Mul64(x2, 0x200000000)
+ var x129 uint64
+ var x130 uint64
+ x130, x129 = bits.Mul64(x2, 0xfffffffe00000000)
+ var x131 uint64
+ var x132 uint64
+ x132, x131 = bits.Mul64(x2, 0x200000000)
+ var x133 uint64
+ var x134 uint64
+ x134, x133 = bits.Mul64(x2, 0xfffffffe00000001)
+ var x135 uint64
+ var x136 uint64
+ x135, x136 = bits.Add64(x134, x131, uint64(0x0))
+ var x137 uint64
+ var x138 uint64
+ x137, x138 = bits.Add64(x132, x129, uint64(p384Uint1(x136)))
+ var x139 uint64
+ var x140 uint64
+ x139, x140 = bits.Add64(x130, x127, uint64(p384Uint1(x138)))
+ var x141 uint64
+ var x142 uint64
+ x141, x142 = bits.Add64(x128, x2, uint64(p384Uint1(x140)))
+ var x143 uint64
+ var x144 uint64
+ x143, x144 = bits.Add64(x115, x133, uint64(0x0))
+ var x145 uint64
+ var x146 uint64
+ x145, x146 = bits.Add64(x117, x135, uint64(p384Uint1(x144)))
+ var x147 uint64
+ var x148 uint64
+ x147, x148 = bits.Add64(x119, x137, uint64(p384Uint1(x146)))
+ var x149 uint64
+ var x150 uint64
+ x149, x150 = bits.Add64(x121, x139, uint64(p384Uint1(x148)))
+ var x151 uint64
+ var x152 uint64
+ x151, x152 = bits.Add64(x123, x141, uint64(p384Uint1(x150)))
+ var x153 uint64
+ var x154 uint64
+ x153, x154 = bits.Add64(x125, uint64(p384Uint1(x142)), uint64(p384Uint1(x152)))
+ var x155 uint64
+ _, x155 = bits.Mul64(x143, 0x100000001)
+ var x157 uint64
+ var x158 uint64
+ x158, x157 = bits.Mul64(x155, 0xffffffffffffffff)
+ var x159 uint64
+ var x160 uint64
+ x160, x159 = bits.Mul64(x155, 0xffffffffffffffff)
+ var x161 uint64
+ var x162 uint64
+ x162, x161 = bits.Mul64(x155, 0xffffffffffffffff)
+ var x163 uint64
+ var x164 uint64
+ x164, x163 = bits.Mul64(x155, 0xfffffffffffffffe)
+ var x165 uint64
+ var x166 uint64
+ x166, x165 = bits.Mul64(x155, 0xffffffff00000000)
+ var x167 uint64
+ var x168 uint64
+ x168, x167 = bits.Mul64(x155, 0xffffffff)
+ var x169 uint64
+ var x170 uint64
+ x169, x170 = bits.Add64(x168, x165, uint64(0x0))
+ var x171 uint64
+ var x172 uint64
+ x171, x172 = bits.Add64(x166, x163, uint64(p384Uint1(x170)))
+ var x173 uint64
+ var x174 uint64
+ x173, x174 = bits.Add64(x164, x161, uint64(p384Uint1(x172)))
+ var x175 uint64
+ var x176 uint64
+ x175, x176 = bits.Add64(x162, x159, uint64(p384Uint1(x174)))
+ var x177 uint64
+ var x178 uint64
+ x177, x178 = bits.Add64(x160, x157, uint64(p384Uint1(x176)))
+ var x180 uint64
+ _, x180 = bits.Add64(x143, x167, uint64(0x0))
+ var x181 uint64
+ var x182 uint64
+ x181, x182 = bits.Add64(x145, x169, uint64(p384Uint1(x180)))
+ var x183 uint64
+ var x184 uint64
+ x183, x184 = bits.Add64(x147, x171, uint64(p384Uint1(x182)))
+ var x185 uint64
+ var x186 uint64
+ x185, x186 = bits.Add64(x149, x173, uint64(p384Uint1(x184)))
+ var x187 uint64
+ var x188 uint64
+ x187, x188 = bits.Add64(x151, x175, uint64(p384Uint1(x186)))
+ var x189 uint64
+ var x190 uint64
+ x189, x190 = bits.Add64(x153, x177, uint64(p384Uint1(x188)))
+ var x191 uint64
+ var x192 uint64
+ x191, x192 = bits.Add64((uint64(p384Uint1(x154)) + uint64(p384Uint1(x126))), (uint64(p384Uint1(x178)) + x158), uint64(p384Uint1(x190)))
+ var x193 uint64
+ var x194 uint64
+ x194, x193 = bits.Mul64(x3, 0x200000000)
+ var x195 uint64
+ var x196 uint64
+ x196, x195 = bits.Mul64(x3, 0xfffffffe00000000)
+ var x197 uint64
+ var x198 uint64
+ x198, x197 = bits.Mul64(x3, 0x200000000)
+ var x199 uint64
+ var x200 uint64
+ x200, x199 = bits.Mul64(x3, 0xfffffffe00000001)
+ var x201 uint64
+ var x202 uint64
+ x201, x202 = bits.Add64(x200, x197, uint64(0x0))
+ var x203 uint64
+ var x204 uint64
+ x203, x204 = bits.Add64(x198, x195, uint64(p384Uint1(x202)))
+ var x205 uint64
+ var x206 uint64
+ x205, x206 = bits.Add64(x196, x193, uint64(p384Uint1(x204)))
+ var x207 uint64
+ var x208 uint64
+ x207, x208 = bits.Add64(x194, x3, uint64(p384Uint1(x206)))
+ var x209 uint64
+ var x210 uint64
+ x209, x210 = bits.Add64(x181, x199, uint64(0x0))
+ var x211 uint64
+ var x212 uint64
+ x211, x212 = bits.Add64(x183, x201, uint64(p384Uint1(x210)))
+ var x213 uint64
+ var x214 uint64
+ x213, x214 = bits.Add64(x185, x203, uint64(p384Uint1(x212)))
+ var x215 uint64
+ var x216 uint64
+ x215, x216 = bits.Add64(x187, x205, uint64(p384Uint1(x214)))
+ var x217 uint64
+ var x218 uint64
+ x217, x218 = bits.Add64(x189, x207, uint64(p384Uint1(x216)))
+ var x219 uint64
+ var x220 uint64
+ x219, x220 = bits.Add64(x191, uint64(p384Uint1(x208)), uint64(p384Uint1(x218)))
+ var x221 uint64
+ _, x221 = bits.Mul64(x209, 0x100000001)
+ var x223 uint64
+ var x224 uint64
+ x224, x223 = bits.Mul64(x221, 0xffffffffffffffff)
+ var x225 uint64
+ var x226 uint64
+ x226, x225 = bits.Mul64(x221, 0xffffffffffffffff)
+ var x227 uint64
+ var x228 uint64
+ x228, x227 = bits.Mul64(x221, 0xffffffffffffffff)
+ var x229 uint64
+ var x230 uint64
+ x230, x229 = bits.Mul64(x221, 0xfffffffffffffffe)
+ var x231 uint64
+ var x232 uint64
+ x232, x231 = bits.Mul64(x221, 0xffffffff00000000)
+ var x233 uint64
+ var x234 uint64
+ x234, x233 = bits.Mul64(x221, 0xffffffff)
+ var x235 uint64
+ var x236 uint64
+ x235, x236 = bits.Add64(x234, x231, uint64(0x0))
+ var x237 uint64
+ var x238 uint64
+ x237, x238 = bits.Add64(x232, x229, uint64(p384Uint1(x236)))
+ var x239 uint64
+ var x240 uint64
+ x239, x240 = bits.Add64(x230, x227, uint64(p384Uint1(x238)))
+ var x241 uint64
+ var x242 uint64
+ x241, x242 = bits.Add64(x228, x225, uint64(p384Uint1(x240)))
+ var x243 uint64
+ var x244 uint64
+ x243, x244 = bits.Add64(x226, x223, uint64(p384Uint1(x242)))
+ var x246 uint64
+ _, x246 = bits.Add64(x209, x233, uint64(0x0))
+ var x247 uint64
+ var x248 uint64
+ x247, x248 = bits.Add64(x211, x235, uint64(p384Uint1(x246)))
+ var x249 uint64
+ var x250 uint64
+ x249, x250 = bits.Add64(x213, x237, uint64(p384Uint1(x248)))
+ var x251 uint64
+ var x252 uint64
+ x251, x252 = bits.Add64(x215, x239, uint64(p384Uint1(x250)))
+ var x253 uint64
+ var x254 uint64
+ x253, x254 = bits.Add64(x217, x241, uint64(p384Uint1(x252)))
+ var x255 uint64
+ var x256 uint64
+ x255, x256 = bits.Add64(x219, x243, uint64(p384Uint1(x254)))
+ var x257 uint64
+ var x258 uint64
+ x257, x258 = bits.Add64((uint64(p384Uint1(x220)) + uint64(p384Uint1(x192))), (uint64(p384Uint1(x244)) + x224), uint64(p384Uint1(x256)))
+ var x259 uint64
+ var x260 uint64
+ x260, x259 = bits.Mul64(x4, 0x200000000)
+ var x261 uint64
+ var x262 uint64
+ x262, x261 = bits.Mul64(x4, 0xfffffffe00000000)
+ var x263 uint64
+ var x264 uint64
+ x264, x263 = bits.Mul64(x4, 0x200000000)
+ var x265 uint64
+ var x266 uint64
+ x266, x265 = bits.Mul64(x4, 0xfffffffe00000001)
+ var x267 uint64
+ var x268 uint64
+ x267, x268 = bits.Add64(x266, x263, uint64(0x0))
+ var x269 uint64
+ var x270 uint64
+ x269, x270 = bits.Add64(x264, x261, uint64(p384Uint1(x268)))
+ var x271 uint64
+ var x272 uint64
+ x271, x272 = bits.Add64(x262, x259, uint64(p384Uint1(x270)))
+ var x273 uint64
+ var x274 uint64
+ x273, x274 = bits.Add64(x260, x4, uint64(p384Uint1(x272)))
+ var x275 uint64
+ var x276 uint64
+ x275, x276 = bits.Add64(x247, x265, uint64(0x0))
+ var x277 uint64
+ var x278 uint64
+ x277, x278 = bits.Add64(x249, x267, uint64(p384Uint1(x276)))
+ var x279 uint64
+ var x280 uint64
+ x279, x280 = bits.Add64(x251, x269, uint64(p384Uint1(x278)))
+ var x281 uint64
+ var x282 uint64
+ x281, x282 = bits.Add64(x253, x271, uint64(p384Uint1(x280)))
+ var x283 uint64
+ var x284 uint64
+ x283, x284 = bits.Add64(x255, x273, uint64(p384Uint1(x282)))
+ var x285 uint64
+ var x286 uint64
+ x285, x286 = bits.Add64(x257, uint64(p384Uint1(x274)), uint64(p384Uint1(x284)))
+ var x287 uint64
+ _, x287 = bits.Mul64(x275, 0x100000001)
+ var x289 uint64
+ var x290 uint64
+ x290, x289 = bits.Mul64(x287, 0xffffffffffffffff)
+ var x291 uint64
+ var x292 uint64
+ x292, x291 = bits.Mul64(x287, 0xffffffffffffffff)
+ var x293 uint64
+ var x294 uint64
+ x294, x293 = bits.Mul64(x287, 0xffffffffffffffff)
+ var x295 uint64
+ var x296 uint64
+ x296, x295 = bits.Mul64(x287, 0xfffffffffffffffe)
+ var x297 uint64
+ var x298 uint64
+ x298, x297 = bits.Mul64(x287, 0xffffffff00000000)
+ var x299 uint64
+ var x300 uint64
+ x300, x299 = bits.Mul64(x287, 0xffffffff)
+ var x301 uint64
+ var x302 uint64
+ x301, x302 = bits.Add64(x300, x297, uint64(0x0))
+ var x303 uint64
+ var x304 uint64
+ x303, x304 = bits.Add64(x298, x295, uint64(p384Uint1(x302)))
+ var x305 uint64
+ var x306 uint64
+ x305, x306 = bits.Add64(x296, x293, uint64(p384Uint1(x304)))
+ var x307 uint64
+ var x308 uint64
+ x307, x308 = bits.Add64(x294, x291, uint64(p384Uint1(x306)))
+ var x309 uint64
+ var x310 uint64
+ x309, x310 = bits.Add64(x292, x289, uint64(p384Uint1(x308)))
+ var x312 uint64
+ _, x312 = bits.Add64(x275, x299, uint64(0x0))
+ var x313 uint64
+ var x314 uint64
+ x313, x314 = bits.Add64(x277, x301, uint64(p384Uint1(x312)))
+ var x315 uint64
+ var x316 uint64
+ x315, x316 = bits.Add64(x279, x303, uint64(p384Uint1(x314)))
+ var x317 uint64
+ var x318 uint64
+ x317, x318 = bits.Add64(x281, x305, uint64(p384Uint1(x316)))
+ var x319 uint64
+ var x320 uint64
+ x319, x320 = bits.Add64(x283, x307, uint64(p384Uint1(x318)))
+ var x321 uint64
+ var x322 uint64
+ x321, x322 = bits.Add64(x285, x309, uint64(p384Uint1(x320)))
+ var x323 uint64
+ var x324 uint64
+ x323, x324 = bits.Add64((uint64(p384Uint1(x286)) + uint64(p384Uint1(x258))), (uint64(p384Uint1(x310)) + x290), uint64(p384Uint1(x322)))
+ var x325 uint64
+ var x326 uint64
+ x326, x325 = bits.Mul64(x5, 0x200000000)
+ var x327 uint64
+ var x328 uint64
+ x328, x327 = bits.Mul64(x5, 0xfffffffe00000000)
+ var x329 uint64
+ var x330 uint64
+ x330, x329 = bits.Mul64(x5, 0x200000000)
+ var x331 uint64
+ var x332 uint64
+ x332, x331 = bits.Mul64(x5, 0xfffffffe00000001)
+ var x333 uint64
+ var x334 uint64
+ x333, x334 = bits.Add64(x332, x329, uint64(0x0))
+ var x335 uint64
+ var x336 uint64
+ x335, x336 = bits.Add64(x330, x327, uint64(p384Uint1(x334)))
+ var x337 uint64
+ var x338 uint64
+ x337, x338 = bits.Add64(x328, x325, uint64(p384Uint1(x336)))
+ var x339 uint64
+ var x340 uint64
+ x339, x340 = bits.Add64(x326, x5, uint64(p384Uint1(x338)))
+ var x341 uint64
+ var x342 uint64
+ x341, x342 = bits.Add64(x313, x331, uint64(0x0))
+ var x343 uint64
+ var x344 uint64
+ x343, x344 = bits.Add64(x315, x333, uint64(p384Uint1(x342)))
+ var x345 uint64
+ var x346 uint64
+ x345, x346 = bits.Add64(x317, x335, uint64(p384Uint1(x344)))
+ var x347 uint64
+ var x348 uint64
+ x347, x348 = bits.Add64(x319, x337, uint64(p384Uint1(x346)))
+ var x349 uint64
+ var x350 uint64
+ x349, x350 = bits.Add64(x321, x339, uint64(p384Uint1(x348)))
+ var x351 uint64
+ var x352 uint64
+ x351, x352 = bits.Add64(x323, uint64(p384Uint1(x340)), uint64(p384Uint1(x350)))
+ var x353 uint64
+ _, x353 = bits.Mul64(x341, 0x100000001)
+ var x355 uint64
+ var x356 uint64
+ x356, x355 = bits.Mul64(x353, 0xffffffffffffffff)
+ var x357 uint64
+ var x358 uint64
+ x358, x357 = bits.Mul64(x353, 0xffffffffffffffff)
+ var x359 uint64
+ var x360 uint64
+ x360, x359 = bits.Mul64(x353, 0xffffffffffffffff)
+ var x361 uint64
+ var x362 uint64
+ x362, x361 = bits.Mul64(x353, 0xfffffffffffffffe)
+ var x363 uint64
+ var x364 uint64
+ x364, x363 = bits.Mul64(x353, 0xffffffff00000000)
+ var x365 uint64
+ var x366 uint64
+ x366, x365 = bits.Mul64(x353, 0xffffffff)
+ var x367 uint64
+ var x368 uint64
+ x367, x368 = bits.Add64(x366, x363, uint64(0x0))
+ var x369 uint64
+ var x370 uint64
+ x369, x370 = bits.Add64(x364, x361, uint64(p384Uint1(x368)))
+ var x371 uint64
+ var x372 uint64
+ x371, x372 = bits.Add64(x362, x359, uint64(p384Uint1(x370)))
+ var x373 uint64
+ var x374 uint64
+ x373, x374 = bits.Add64(x360, x357, uint64(p384Uint1(x372)))
+ var x375 uint64
+ var x376 uint64
+ x375, x376 = bits.Add64(x358, x355, uint64(p384Uint1(x374)))
+ var x378 uint64
+ _, x378 = bits.Add64(x341, x365, uint64(0x0))
+ var x379 uint64
+ var x380 uint64
+ x379, x380 = bits.Add64(x343, x367, uint64(p384Uint1(x378)))
+ var x381 uint64
+ var x382 uint64
+ x381, x382 = bits.Add64(x345, x369, uint64(p384Uint1(x380)))
+ var x383 uint64
+ var x384 uint64
+ x383, x384 = bits.Add64(x347, x371, uint64(p384Uint1(x382)))
+ var x385 uint64
+ var x386 uint64
+ x385, x386 = bits.Add64(x349, x373, uint64(p384Uint1(x384)))
+ var x387 uint64
+ var x388 uint64
+ x387, x388 = bits.Add64(x351, x375, uint64(p384Uint1(x386)))
+ var x389 uint64
+ var x390 uint64
+ x389, x390 = bits.Add64((uint64(p384Uint1(x352)) + uint64(p384Uint1(x324))), (uint64(p384Uint1(x376)) + x356), uint64(p384Uint1(x388)))
+ var x391 uint64
+ var x392 uint64
+ x391, x392 = bits.Sub64(x379, 0xffffffff, uint64(0x0))
+ var x393 uint64
+ var x394 uint64
+ x393, x394 = bits.Sub64(x381, 0xffffffff00000000, uint64(p384Uint1(x392)))
+ var x395 uint64
+ var x396 uint64
+ x395, x396 = bits.Sub64(x383, 0xfffffffffffffffe, uint64(p384Uint1(x394)))
+ var x397 uint64
+ var x398 uint64
+ x397, x398 = bits.Sub64(x385, 0xffffffffffffffff, uint64(p384Uint1(x396)))
+ var x399 uint64
+ var x400 uint64
+ x399, x400 = bits.Sub64(x387, 0xffffffffffffffff, uint64(p384Uint1(x398)))
+ var x401 uint64
+ var x402 uint64
+ x401, x402 = bits.Sub64(x389, 0xffffffffffffffff, uint64(p384Uint1(x400)))
+ var x404 uint64
+ _, x404 = bits.Sub64(uint64(p384Uint1(x390)), uint64(0x0), uint64(p384Uint1(x402)))
+ var x405 uint64
+ p384CmovznzU64(&x405, p384Uint1(x404), x391, x379)
+ var x406 uint64
+ p384CmovznzU64(&x406, p384Uint1(x404), x393, x381)
+ var x407 uint64
+ p384CmovznzU64(&x407, p384Uint1(x404), x395, x383)
+ var x408 uint64
+ p384CmovznzU64(&x408, p384Uint1(x404), x397, x385)
+ var x409 uint64
+ p384CmovznzU64(&x409, p384Uint1(x404), x399, x387)
+ var x410 uint64
+ p384CmovznzU64(&x410, p384Uint1(x404), x401, x389)
+ out1[0] = x405
+ out1[1] = x406
+ out1[2] = x407
+ out1[3] = x408
+ out1[4] = x409
+ out1[5] = x410
+}
+
+// p384Selectznz is a multi-limb conditional select.
+//
+// Postconditions:
+// eval out1 = (if arg1 = 0 then eval arg2 else eval arg3)
+//
+// Input Bounds:
+// arg1: [0x0 ~> 0x1]
+// arg2: [[0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff]]
+// arg3: [[0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff]]
+// Output Bounds:
+// out1: [[0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff]]
+func p384Selectznz(out1 *[6]uint64, arg1 p384Uint1, arg2 *[6]uint64, arg3 *[6]uint64) {
+ var x1 uint64
+ p384CmovznzU64(&x1, arg1, arg2[0], arg3[0])
+ var x2 uint64
+ p384CmovznzU64(&x2, arg1, arg2[1], arg3[1])
+ var x3 uint64
+ p384CmovznzU64(&x3, arg1, arg2[2], arg3[2])
+ var x4 uint64
+ p384CmovznzU64(&x4, arg1, arg2[3], arg3[3])
+ var x5 uint64
+ p384CmovznzU64(&x5, arg1, arg2[4], arg3[4])
+ var x6 uint64
+ p384CmovznzU64(&x6, arg1, arg2[5], arg3[5])
+ out1[0] = x1
+ out1[1] = x2
+ out1[2] = x3
+ out1[3] = x4
+ out1[4] = x5
+ out1[5] = x6
+}
+
+// p384ToBytes serializes a field element NOT in the Montgomery domain to bytes in little-endian order.
+//
+// Preconditions:
+// 0 ≤ eval arg1 < m
+// Postconditions:
+// out1 = map (λ x, ⌊((eval arg1 mod m) mod 2^(8 * (x + 1))) / 2^(8 * x)⌋) [0..47]
+//
+// Input Bounds:
+// arg1: [[0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff]]
+// Output Bounds:
+// out1: [[0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff]]
+func p384ToBytes(out1 *[48]uint8, arg1 *[6]uint64) {
+ x1 := arg1[5]
+ x2 := arg1[4]
+ x3 := arg1[3]
+ x4 := arg1[2]
+ x5 := arg1[1]
+ x6 := arg1[0]
+ x7 := (uint8(x6) & 0xff)
+ x8 := (x6 >> 8)
+ x9 := (uint8(x8) & 0xff)
+ x10 := (x8 >> 8)
+ x11 := (uint8(x10) & 0xff)
+ x12 := (x10 >> 8)
+ x13 := (uint8(x12) & 0xff)
+ x14 := (x12 >> 8)
+ x15 := (uint8(x14) & 0xff)
+ x16 := (x14 >> 8)
+ x17 := (uint8(x16) & 0xff)
+ x18 := (x16 >> 8)
+ x19 := (uint8(x18) & 0xff)
+ x20 := uint8((x18 >> 8))
+ x21 := (uint8(x5) & 0xff)
+ x22 := (x5 >> 8)
+ x23 := (uint8(x22) & 0xff)
+ x24 := (x22 >> 8)
+ x25 := (uint8(x24) & 0xff)
+ x26 := (x24 >> 8)
+ x27 := (uint8(x26) & 0xff)
+ x28 := (x26 >> 8)
+ x29 := (uint8(x28) & 0xff)
+ x30 := (x28 >> 8)
+ x31 := (uint8(x30) & 0xff)
+ x32 := (x30 >> 8)
+ x33 := (uint8(x32) & 0xff)
+ x34 := uint8((x32 >> 8))
+ x35 := (uint8(x4) & 0xff)
+ x36 := (x4 >> 8)
+ x37 := (uint8(x36) & 0xff)
+ x38 := (x36 >> 8)
+ x39 := (uint8(x38) & 0xff)
+ x40 := (x38 >> 8)
+ x41 := (uint8(x40) & 0xff)
+ x42 := (x40 >> 8)
+ x43 := (uint8(x42) & 0xff)
+ x44 := (x42 >> 8)
+ x45 := (uint8(x44) & 0xff)
+ x46 := (x44 >> 8)
+ x47 := (uint8(x46) & 0xff)
+ x48 := uint8((x46 >> 8))
+ x49 := (uint8(x3) & 0xff)
+ x50 := (x3 >> 8)
+ x51 := (uint8(x50) & 0xff)
+ x52 := (x50 >> 8)
+ x53 := (uint8(x52) & 0xff)
+ x54 := (x52 >> 8)
+ x55 := (uint8(x54) & 0xff)
+ x56 := (x54 >> 8)
+ x57 := (uint8(x56) & 0xff)
+ x58 := (x56 >> 8)
+ x59 := (uint8(x58) & 0xff)
+ x60 := (x58 >> 8)
+ x61 := (uint8(x60) & 0xff)
+ x62 := uint8((x60 >> 8))
+ x63 := (uint8(x2) & 0xff)
+ x64 := (x2 >> 8)
+ x65 := (uint8(x64) & 0xff)
+ x66 := (x64 >> 8)
+ x67 := (uint8(x66) & 0xff)
+ x68 := (x66 >> 8)
+ x69 := (uint8(x68) & 0xff)
+ x70 := (x68 >> 8)
+ x71 := (uint8(x70) & 0xff)
+ x72 := (x70 >> 8)
+ x73 := (uint8(x72) & 0xff)
+ x74 := (x72 >> 8)
+ x75 := (uint8(x74) & 0xff)
+ x76 := uint8((x74 >> 8))
+ x77 := (uint8(x1) & 0xff)
+ x78 := (x1 >> 8)
+ x79 := (uint8(x78) & 0xff)
+ x80 := (x78 >> 8)
+ x81 := (uint8(x80) & 0xff)
+ x82 := (x80 >> 8)
+ x83 := (uint8(x82) & 0xff)
+ x84 := (x82 >> 8)
+ x85 := (uint8(x84) & 0xff)
+ x86 := (x84 >> 8)
+ x87 := (uint8(x86) & 0xff)
+ x88 := (x86 >> 8)
+ x89 := (uint8(x88) & 0xff)
+ x90 := uint8((x88 >> 8))
+ out1[0] = x7
+ out1[1] = x9
+ out1[2] = x11
+ out1[3] = x13
+ out1[4] = x15
+ out1[5] = x17
+ out1[6] = x19
+ out1[7] = x20
+ out1[8] = x21
+ out1[9] = x23
+ out1[10] = x25
+ out1[11] = x27
+ out1[12] = x29
+ out1[13] = x31
+ out1[14] = x33
+ out1[15] = x34
+ out1[16] = x35
+ out1[17] = x37
+ out1[18] = x39
+ out1[19] = x41
+ out1[20] = x43
+ out1[21] = x45
+ out1[22] = x47
+ out1[23] = x48
+ out1[24] = x49
+ out1[25] = x51
+ out1[26] = x53
+ out1[27] = x55
+ out1[28] = x57
+ out1[29] = x59
+ out1[30] = x61
+ out1[31] = x62
+ out1[32] = x63
+ out1[33] = x65
+ out1[34] = x67
+ out1[35] = x69
+ out1[36] = x71
+ out1[37] = x73
+ out1[38] = x75
+ out1[39] = x76
+ out1[40] = x77
+ out1[41] = x79
+ out1[42] = x81
+ out1[43] = x83
+ out1[44] = x85
+ out1[45] = x87
+ out1[46] = x89
+ out1[47] = x90
+}
+
+// p384FromBytes deserializes a field element NOT in the Montgomery domain from bytes in little-endian order.
+//
+// Preconditions:
+// 0 ≤ bytes_eval arg1 < m
+// Postconditions:
+// eval out1 mod m = bytes_eval arg1 mod m
+// 0 ≤ eval out1 < m
+//
+// Input Bounds:
+// arg1: [[0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff]]
+// Output Bounds:
+// out1: [[0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff]]
+func p384FromBytes(out1 *[6]uint64, arg1 *[48]uint8) {
+ x1 := (uint64(arg1[47]) << 56)
+ x2 := (uint64(arg1[46]) << 48)
+ x3 := (uint64(arg1[45]) << 40)
+ x4 := (uint64(arg1[44]) << 32)
+ x5 := (uint64(arg1[43]) << 24)
+ x6 := (uint64(arg1[42]) << 16)
+ x7 := (uint64(arg1[41]) << 8)
+ x8 := arg1[40]
+ x9 := (uint64(arg1[39]) << 56)
+ x10 := (uint64(arg1[38]) << 48)
+ x11 := (uint64(arg1[37]) << 40)
+ x12 := (uint64(arg1[36]) << 32)
+ x13 := (uint64(arg1[35]) << 24)
+ x14 := (uint64(arg1[34]) << 16)
+ x15 := (uint64(arg1[33]) << 8)
+ x16 := arg1[32]
+ x17 := (uint64(arg1[31]) << 56)
+ x18 := (uint64(arg1[30]) << 48)
+ x19 := (uint64(arg1[29]) << 40)
+ x20 := (uint64(arg1[28]) << 32)
+ x21 := (uint64(arg1[27]) << 24)
+ x22 := (uint64(arg1[26]) << 16)
+ x23 := (uint64(arg1[25]) << 8)
+ x24 := arg1[24]
+ x25 := (uint64(arg1[23]) << 56)
+ x26 := (uint64(arg1[22]) << 48)
+ x27 := (uint64(arg1[21]) << 40)
+ x28 := (uint64(arg1[20]) << 32)
+ x29 := (uint64(arg1[19]) << 24)
+ x30 := (uint64(arg1[18]) << 16)
+ x31 := (uint64(arg1[17]) << 8)
+ x32 := arg1[16]
+ x33 := (uint64(arg1[15]) << 56)
+ x34 := (uint64(arg1[14]) << 48)
+ x35 := (uint64(arg1[13]) << 40)
+ x36 := (uint64(arg1[12]) << 32)
+ x37 := (uint64(arg1[11]) << 24)
+ x38 := (uint64(arg1[10]) << 16)
+ x39 := (uint64(arg1[9]) << 8)
+ x40 := arg1[8]
+ x41 := (uint64(arg1[7]) << 56)
+ x42 := (uint64(arg1[6]) << 48)
+ x43 := (uint64(arg1[5]) << 40)
+ x44 := (uint64(arg1[4]) << 32)
+ x45 := (uint64(arg1[3]) << 24)
+ x46 := (uint64(arg1[2]) << 16)
+ x47 := (uint64(arg1[1]) << 8)
+ x48 := arg1[0]
+ x49 := (x47 + uint64(x48))
+ x50 := (x46 + x49)
+ x51 := (x45 + x50)
+ x52 := (x44 + x51)
+ x53 := (x43 + x52)
+ x54 := (x42 + x53)
+ x55 := (x41 + x54)
+ x56 := (x39 + uint64(x40))
+ x57 := (x38 + x56)
+ x58 := (x37 + x57)
+ x59 := (x36 + x58)
+ x60 := (x35 + x59)
+ x61 := (x34 + x60)
+ x62 := (x33 + x61)
+ x63 := (x31 + uint64(x32))
+ x64 := (x30 + x63)
+ x65 := (x29 + x64)
+ x66 := (x28 + x65)
+ x67 := (x27 + x66)
+ x68 := (x26 + x67)
+ x69 := (x25 + x68)
+ x70 := (x23 + uint64(x24))
+ x71 := (x22 + x70)
+ x72 := (x21 + x71)
+ x73 := (x20 + x72)
+ x74 := (x19 + x73)
+ x75 := (x18 + x74)
+ x76 := (x17 + x75)
+ x77 := (x15 + uint64(x16))
+ x78 := (x14 + x77)
+ x79 := (x13 + x78)
+ x80 := (x12 + x79)
+ x81 := (x11 + x80)
+ x82 := (x10 + x81)
+ x83 := (x9 + x82)
+ x84 := (x7 + uint64(x8))
+ x85 := (x6 + x84)
+ x86 := (x5 + x85)
+ x87 := (x4 + x86)
+ x88 := (x3 + x87)
+ x89 := (x2 + x88)
+ x90 := (x1 + x89)
+ out1[0] = x55
+ out1[1] = x62
+ out1[2] = x69
+ out1[3] = x76
+ out1[4] = x83
+ out1[5] = x90
+}
diff --git a/src/crypto/elliptic/internal/fiat/p384_invert.go b/src/crypto/elliptic/internal/fiat/p384_invert.go
new file mode 100644
index 0000000..24169e9
--- /dev/null
+++ b/src/crypto/elliptic/internal/fiat/p384_invert.go
@@ -0,0 +1,102 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Code generated by addchain. DO NOT EDIT.
+
+package fiat
+
+// Invert sets e = 1/x, and returns e.
+//
+// If x == 0, Invert returns e = 0.
+func (e *P384Element) Invert(x *P384Element) *P384Element {
+ // Inversion is implemented as exponentiation with exponent p − 2.
+ // The sequence of 15 multiplications and 383 squarings is derived from the
+ // following addition chain generated with github.com/mmcloughlin/addchain v0.3.0.
+ //
+ // _10 = 2*1
+ // _11 = 1 + _10
+ // _110 = 2*_11
+ // _111 = 1 + _110
+ // _111000 = _111 << 3
+ // _111111 = _111 + _111000
+ // x12 = _111111 << 6 + _111111
+ // x24 = x12 << 12 + x12
+ // x30 = x24 << 6 + _111111
+ // x31 = 2*x30 + 1
+ // x32 = 2*x31 + 1
+ // x63 = x32 << 31 + x31
+ // x126 = x63 << 63 + x63
+ // x252 = x126 << 126 + x126
+ // x255 = x252 << 3 + _111
+ // i397 = ((x255 << 33 + x32) << 94 + x30) << 2
+ // return 1 + i397
+ //
+
+ var z = new(P384Element).Set(e)
+ var t0 = new(P384Element)
+ var t1 = new(P384Element)
+ var t2 = new(P384Element)
+ var t3 = new(P384Element)
+
+ z.Square(x)
+ z.Mul(x, z)
+ z.Square(z)
+ t1.Mul(x, z)
+ z.Square(t1)
+ for s := 1; s < 3; s++ {
+ z.Square(z)
+ }
+ z.Mul(t1, z)
+ t0.Square(z)
+ for s := 1; s < 6; s++ {
+ t0.Square(t0)
+ }
+ t0.Mul(z, t0)
+ t2.Square(t0)
+ for s := 1; s < 12; s++ {
+ t2.Square(t2)
+ }
+ t0.Mul(t0, t2)
+ for s := 0; s < 6; s++ {
+ t0.Square(t0)
+ }
+ z.Mul(z, t0)
+ t0.Square(z)
+ t2.Mul(x, t0)
+ t0.Square(t2)
+ t0.Mul(x, t0)
+ t3.Square(t0)
+ for s := 1; s < 31; s++ {
+ t3.Square(t3)
+ }
+ t2.Mul(t2, t3)
+ t3.Square(t2)
+ for s := 1; s < 63; s++ {
+ t3.Square(t3)
+ }
+ t2.Mul(t2, t3)
+ t3.Square(t2)
+ for s := 1; s < 126; s++ {
+ t3.Square(t3)
+ }
+ t2.Mul(t2, t3)
+ for s := 0; s < 3; s++ {
+ t2.Square(t2)
+ }
+ t1.Mul(t1, t2)
+ for s := 0; s < 33; s++ {
+ t1.Square(t1)
+ }
+ t0.Mul(t0, t1)
+ for s := 0; s < 94; s++ {
+ t0.Square(t0)
+ }
+ z.Mul(z, t0)
+ for s := 0; s < 2; s++ {
+ z.Square(z)
+ }
+ z.Mul(x, z)
+
+ return e.Set(z)
+}
diff --git a/src/crypto/elliptic/internal/fiat/p521.go b/src/crypto/elliptic/internal/fiat/p521.go
new file mode 100644
index 0000000..3d12117
--- /dev/null
+++ b/src/crypto/elliptic/internal/fiat/p521.go
@@ -0,0 +1,135 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Code generated by generate.go. DO NOT EDIT.
+
+package fiat
+
+import (
+ "crypto/subtle"
+ "errors"
+)
+
+// P521Element is an integer modulo 2^521 - 1.
+//
+// The zero value is a valid zero element.
+type P521Element struct {
+ // Values are represented internally always in the Montgomery domain, and
+ // converted in Bytes and SetBytes.
+ x p521MontgomeryDomainFieldElement
+}
+
+const p521ElementLen = 66
+
+type p521UntypedFieldElement = [9]uint64
+
+// One sets e = 1, and returns e.
+func (e *P521Element) One() *P521Element {
+ p521SetOne(&e.x)
+ return e
+}
+
+// Equal returns 1 if e == t, and zero otherwise.
+func (e *P521Element) Equal(t *P521Element) int {
+ eBytes := e.Bytes()
+ tBytes := t.Bytes()
+ return subtle.ConstantTimeCompare(eBytes, tBytes)
+}
+
+var p521ZeroEncoding = new(P521Element).Bytes()
+
+// IsZero returns 1 if e == 0, and zero otherwise.
+func (e *P521Element) IsZero() int {
+ eBytes := e.Bytes()
+ return subtle.ConstantTimeCompare(eBytes, p521ZeroEncoding)
+}
+
+// Set sets e = t, and returns e.
+func (e *P521Element) Set(t *P521Element) *P521Element {
+ e.x = t.x
+ return e
+}
+
+// Bytes returns the 66-byte big-endian encoding of e.
+func (e *P521Element) Bytes() []byte {
+ // This function is outlined to make the allocations inline in the caller
+ // rather than happen on the heap.
+ var out [p521ElementLen]byte
+ return e.bytes(&out)
+}
+
+func (e *P521Element) bytes(out *[p521ElementLen]byte) []byte {
+ var tmp p521NonMontgomeryDomainFieldElement
+ p521FromMontgomery(&tmp, &e.x)
+ p521ToBytes(out, (*p521UntypedFieldElement)(&tmp))
+ p521InvertEndianness(out[:])
+ return out[:]
+}
+
+// p521MinusOneEncoding is the encoding of -1 mod p, so p - 1, the
+// highest canonical encoding. It is used by SetBytes to check for non-canonical
+// encodings such as p + k, 2p + k, etc.
+var p521MinusOneEncoding = new(P521Element).Sub(
+ new(P521Element), new(P521Element).One()).Bytes()
+
+// SetBytes sets e = v, where v is a big-endian 66-byte encoding, and returns e.
+// If v is not 66 bytes or it encodes a value higher than 2^521 - 1,
+// SetBytes returns nil and an error, and e is unchanged.
+func (e *P521Element) SetBytes(v []byte) (*P521Element, error) {
+ if len(v) != p521ElementLen {
+ return nil, errors.New("invalid P521Element encoding")
+ }
+ for i := range v {
+ if v[i] < p521MinusOneEncoding[i] {
+ break
+ }
+ if v[i] > p521MinusOneEncoding[i] {
+ return nil, errors.New("invalid P521Element encoding")
+ }
+ }
+ var in [p521ElementLen]byte
+ copy(in[:], v)
+ p521InvertEndianness(in[:])
+ var tmp p521NonMontgomeryDomainFieldElement
+ p521FromBytes((*p521UntypedFieldElement)(&tmp), &in)
+ p521ToMontgomery(&e.x, &tmp)
+ return e, nil
+}
+
+// Add sets e = t1 + t2, and returns e.
+func (e *P521Element) Add(t1, t2 *P521Element) *P521Element {
+ p521Add(&e.x, &t1.x, &t2.x)
+ return e
+}
+
+// Sub sets e = t1 - t2, and returns e.
+func (e *P521Element) Sub(t1, t2 *P521Element) *P521Element {
+ p521Sub(&e.x, &t1.x, &t2.x)
+ return e
+}
+
+// Mul sets e = t1 * t2, and returns e.
+func (e *P521Element) Mul(t1, t2 *P521Element) *P521Element {
+ p521Mul(&e.x, &t1.x, &t2.x)
+ return e
+}
+
+// Square sets e = t * t, and returns e.
+func (e *P521Element) Square(t *P521Element) *P521Element {
+ p521Square(&e.x, &t.x)
+ return e
+}
+
+// Select sets v to a if cond == 1, and to b if cond == 0.
+func (v *P521Element) Select(a, b *P521Element, cond int) *P521Element {
+ p521Selectznz((*p521UntypedFieldElement)(&v.x), p521Uint1(cond),
+ (*p521UntypedFieldElement)(&b.x), (*p521UntypedFieldElement)(&a.x))
+ return v
+}
+
+func p521InvertEndianness(v []byte) {
+ for i := 0; i < len(v)/2; i++ {
+ v[i], v[len(v)-1-i] = v[len(v)-1-i], v[i]
+ }
+}
diff --git a/src/crypto/elliptic/internal/fiat/p521_fiat64.go b/src/crypto/elliptic/internal/fiat/p521_fiat64.go
new file mode 100644
index 0000000..9f4f290
--- /dev/null
+++ b/src/crypto/elliptic/internal/fiat/p521_fiat64.go
@@ -0,0 +1,5509 @@
+// Code generated by Fiat Cryptography. DO NOT EDIT.
+//
+// Autogenerated: word_by_word_montgomery --lang Go --no-wide-int --cmovznz-by-mul --relax-primitive-carry-to-bitwidth 32,64 --internal-static --public-function-case camelCase --public-type-case camelCase --private-function-case camelCase --private-type-case camelCase --doc-text-before-function-name '' --doc-newline-before-package-declaration --doc-prepend-header 'Code generated by Fiat Cryptography. DO NOT EDIT.' --package-name fiat --no-prefix-fiat p521 64 '2^521 - 1' mul square add sub one from_montgomery to_montgomery selectznz to_bytes from_bytes
+//
+// curve description: p521
+//
+// machine_wordsize = 64 (from "64")
+//
+// requested operations: mul, square, add, sub, one, from_montgomery, to_montgomery, selectznz, to_bytes, from_bytes
+//
+// m = 0x1ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff (from "2^521 - 1")
+//
+//
+//
+// NOTE: In addition to the bounds specified above each function, all
+//
+// functions synthesized for this Montgomery arithmetic require the
+//
+// input to be strictly less than the prime modulus (m), and also
+//
+// require the input to be in the unique saturated representation.
+//
+// All functions also ensure that these two properties are true of
+//
+// return values.
+//
+//
+//
+// Computed values:
+//
+// eval z = z[0] + (z[1] << 64) + (z[2] << 128) + (z[3] << 192) + (z[4] << 256) + (z[5] << 0x140) + (z[6] << 0x180) + (z[7] << 0x1c0) + (z[8] << 2^9)
+//
+// bytes_eval z = z[0] + (z[1] << 8) + (z[2] << 16) + (z[3] << 24) + (z[4] << 32) + (z[5] << 40) + (z[6] << 48) + (z[7] << 56) + (z[8] << 64) + (z[9] << 72) + (z[10] << 80) + (z[11] << 88) + (z[12] << 96) + (z[13] << 104) + (z[14] << 112) + (z[15] << 120) + (z[16] << 128) + (z[17] << 136) + (z[18] << 144) + (z[19] << 152) + (z[20] << 160) + (z[21] << 168) + (z[22] << 176) + (z[23] << 184) + (z[24] << 192) + (z[25] << 200) + (z[26] << 208) + (z[27] << 216) + (z[28] << 224) + (z[29] << 232) + (z[30] << 240) + (z[31] << 248) + (z[32] << 256) + (z[33] << 0x108) + (z[34] << 0x110) + (z[35] << 0x118) + (z[36] << 0x120) + (z[37] << 0x128) + (z[38] << 0x130) + (z[39] << 0x138) + (z[40] << 0x140) + (z[41] << 0x148) + (z[42] << 0x150) + (z[43] << 0x158) + (z[44] << 0x160) + (z[45] << 0x168) + (z[46] << 0x170) + (z[47] << 0x178) + (z[48] << 0x180) + (z[49] << 0x188) + (z[50] << 0x190) + (z[51] << 0x198) + (z[52] << 0x1a0) + (z[53] << 0x1a8) + (z[54] << 0x1b0) + (z[55] << 0x1b8) + (z[56] << 0x1c0) + (z[57] << 0x1c8) + (z[58] << 0x1d0) + (z[59] << 0x1d8) + (z[60] << 0x1e0) + (z[61] << 0x1e8) + (z[62] << 0x1f0) + (z[63] << 0x1f8) + (z[64] << 2^9) + (z[65] << 0x208)
+//
+// twos_complement_eval z = let x1 := z[0] + (z[1] << 64) + (z[2] << 128) + (z[3] << 192) + (z[4] << 256) + (z[5] << 0x140) + (z[6] << 0x180) + (z[7] << 0x1c0) + (z[8] << 2^9) in
+//
+// if x1 & (2^576-1) < 2^575 then x1 & (2^576-1) else (x1 & (2^576-1)) - 2^576
+
+package fiat
+
+import "math/bits"
+
+type p521Uint1 uint64 // We use uint64 instead of a more narrow type for performance reasons; see https://github.com/mit-plv/fiat-crypto/pull/1006#issuecomment-892625927
+type p521Int1 int64 // We use uint64 instead of a more narrow type for performance reasons; see https://github.com/mit-plv/fiat-crypto/pull/1006#issuecomment-892625927
+
+// The type p521MontgomeryDomainFieldElement is a field element in the Montgomery domain.
+//
+// Bounds: [[0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff]]
+type p521MontgomeryDomainFieldElement [9]uint64
+
+// The type p521NonMontgomeryDomainFieldElement is a field element NOT in the Montgomery domain.
+//
+// Bounds: [[0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff]]
+type p521NonMontgomeryDomainFieldElement [9]uint64
+
+// p521CmovznzU64 is a single-word conditional move.
+//
+// Postconditions:
+// out1 = (if arg1 = 0 then arg2 else arg3)
+//
+// Input Bounds:
+// arg1: [0x0 ~> 0x1]
+// arg2: [0x0 ~> 0xffffffffffffffff]
+// arg3: [0x0 ~> 0xffffffffffffffff]
+// Output Bounds:
+// out1: [0x0 ~> 0xffffffffffffffff]
+func p521CmovznzU64(out1 *uint64, arg1 p521Uint1, arg2 uint64, arg3 uint64) {
+ x1 := (uint64(arg1) * 0xffffffffffffffff)
+ x2 := ((x1 & arg3) | ((^x1) & arg2))
+ *out1 = x2
+}
+
+// p521Mul multiplies two field elements in the Montgomery domain.
+//
+// Preconditions:
+// 0 ≤ eval arg1 < m
+// 0 ≤ eval arg2 < m
+// Postconditions:
+// eval (from_montgomery out1) mod m = (eval (from_montgomery arg1) * eval (from_montgomery arg2)) mod m
+// 0 ≤ eval out1 < m
+//
+func p521Mul(out1 *p521MontgomeryDomainFieldElement, arg1 *p521MontgomeryDomainFieldElement, arg2 *p521MontgomeryDomainFieldElement) {
+ x1 := arg1[1]
+ x2 := arg1[2]
+ x3 := arg1[3]
+ x4 := arg1[4]
+ x5 := arg1[5]
+ x6 := arg1[6]
+ x7 := arg1[7]
+ x8 := arg1[8]
+ x9 := arg1[0]
+ var x10 uint64
+ var x11 uint64
+ x11, x10 = bits.Mul64(x9, arg2[8])
+ var x12 uint64
+ var x13 uint64
+ x13, x12 = bits.Mul64(x9, arg2[7])
+ var x14 uint64
+ var x15 uint64
+ x15, x14 = bits.Mul64(x9, arg2[6])
+ var x16 uint64
+ var x17 uint64
+ x17, x16 = bits.Mul64(x9, arg2[5])
+ var x18 uint64
+ var x19 uint64
+ x19, x18 = bits.Mul64(x9, arg2[4])
+ var x20 uint64
+ var x21 uint64
+ x21, x20 = bits.Mul64(x9, arg2[3])
+ var x22 uint64
+ var x23 uint64
+ x23, x22 = bits.Mul64(x9, arg2[2])
+ var x24 uint64
+ var x25 uint64
+ x25, x24 = bits.Mul64(x9, arg2[1])
+ var x26 uint64
+ var x27 uint64
+ x27, x26 = bits.Mul64(x9, arg2[0])
+ var x28 uint64
+ var x29 uint64
+ x28, x29 = bits.Add64(x27, x24, uint64(0x0))
+ var x30 uint64
+ var x31 uint64
+ x30, x31 = bits.Add64(x25, x22, uint64(p521Uint1(x29)))
+ var x32 uint64
+ var x33 uint64
+ x32, x33 = bits.Add64(x23, x20, uint64(p521Uint1(x31)))
+ var x34 uint64
+ var x35 uint64
+ x34, x35 = bits.Add64(x21, x18, uint64(p521Uint1(x33)))
+ var x36 uint64
+ var x37 uint64
+ x36, x37 = bits.Add64(x19, x16, uint64(p521Uint1(x35)))
+ var x38 uint64
+ var x39 uint64
+ x38, x39 = bits.Add64(x17, x14, uint64(p521Uint1(x37)))
+ var x40 uint64
+ var x41 uint64
+ x40, x41 = bits.Add64(x15, x12, uint64(p521Uint1(x39)))
+ var x42 uint64
+ var x43 uint64
+ x42, x43 = bits.Add64(x13, x10, uint64(p521Uint1(x41)))
+ x44 := (uint64(p521Uint1(x43)) + x11)
+ var x45 uint64
+ var x46 uint64
+ x46, x45 = bits.Mul64(x26, 0x1ff)
+ var x47 uint64
+ var x48 uint64
+ x48, x47 = bits.Mul64(x26, 0xffffffffffffffff)
+ var x49 uint64
+ var x50 uint64
+ x50, x49 = bits.Mul64(x26, 0xffffffffffffffff)
+ var x51 uint64
+ var x52 uint64
+ x52, x51 = bits.Mul64(x26, 0xffffffffffffffff)
+ var x53 uint64
+ var x54 uint64
+ x54, x53 = bits.Mul64(x26, 0xffffffffffffffff)
+ var x55 uint64
+ var x56 uint64
+ x56, x55 = bits.Mul64(x26, 0xffffffffffffffff)
+ var x57 uint64
+ var x58 uint64
+ x58, x57 = bits.Mul64(x26, 0xffffffffffffffff)
+ var x59 uint64
+ var x60 uint64
+ x60, x59 = bits.Mul64(x26, 0xffffffffffffffff)
+ var x61 uint64
+ var x62 uint64
+ x62, x61 = bits.Mul64(x26, 0xffffffffffffffff)
+ var x63 uint64
+ var x64 uint64
+ x63, x64 = bits.Add64(x62, x59, uint64(0x0))
+ var x65 uint64
+ var x66 uint64
+ x65, x66 = bits.Add64(x60, x57, uint64(p521Uint1(x64)))
+ var x67 uint64
+ var x68 uint64
+ x67, x68 = bits.Add64(x58, x55, uint64(p521Uint1(x66)))
+ var x69 uint64
+ var x70 uint64
+ x69, x70 = bits.Add64(x56, x53, uint64(p521Uint1(x68)))
+ var x71 uint64
+ var x72 uint64
+ x71, x72 = bits.Add64(x54, x51, uint64(p521Uint1(x70)))
+ var x73 uint64
+ var x74 uint64
+ x73, x74 = bits.Add64(x52, x49, uint64(p521Uint1(x72)))
+ var x75 uint64
+ var x76 uint64
+ x75, x76 = bits.Add64(x50, x47, uint64(p521Uint1(x74)))
+ var x77 uint64
+ var x78 uint64
+ x77, x78 = bits.Add64(x48, x45, uint64(p521Uint1(x76)))
+ x79 := (uint64(p521Uint1(x78)) + x46)
+ var x81 uint64
+ _, x81 = bits.Add64(x26, x61, uint64(0x0))
+ var x82 uint64
+ var x83 uint64
+ x82, x83 = bits.Add64(x28, x63, uint64(p521Uint1(x81)))
+ var x84 uint64
+ var x85 uint64
+ x84, x85 = bits.Add64(x30, x65, uint64(p521Uint1(x83)))
+ var x86 uint64
+ var x87 uint64
+ x86, x87 = bits.Add64(x32, x67, uint64(p521Uint1(x85)))
+ var x88 uint64
+ var x89 uint64
+ x88, x89 = bits.Add64(x34, x69, uint64(p521Uint1(x87)))
+ var x90 uint64
+ var x91 uint64
+ x90, x91 = bits.Add64(x36, x71, uint64(p521Uint1(x89)))
+ var x92 uint64
+ var x93 uint64
+ x92, x93 = bits.Add64(x38, x73, uint64(p521Uint1(x91)))
+ var x94 uint64
+ var x95 uint64
+ x94, x95 = bits.Add64(x40, x75, uint64(p521Uint1(x93)))
+ var x96 uint64
+ var x97 uint64
+ x96, x97 = bits.Add64(x42, x77, uint64(p521Uint1(x95)))
+ var x98 uint64
+ var x99 uint64
+ x98, x99 = bits.Add64(x44, x79, uint64(p521Uint1(x97)))
+ var x100 uint64
+ var x101 uint64
+ x101, x100 = bits.Mul64(x1, arg2[8])
+ var x102 uint64
+ var x103 uint64
+ x103, x102 = bits.Mul64(x1, arg2[7])
+ var x104 uint64
+ var x105 uint64
+ x105, x104 = bits.Mul64(x1, arg2[6])
+ var x106 uint64
+ var x107 uint64
+ x107, x106 = bits.Mul64(x1, arg2[5])
+ var x108 uint64
+ var x109 uint64
+ x109, x108 = bits.Mul64(x1, arg2[4])
+ var x110 uint64
+ var x111 uint64
+ x111, x110 = bits.Mul64(x1, arg2[3])
+ var x112 uint64
+ var x113 uint64
+ x113, x112 = bits.Mul64(x1, arg2[2])
+ var x114 uint64
+ var x115 uint64
+ x115, x114 = bits.Mul64(x1, arg2[1])
+ var x116 uint64
+ var x117 uint64
+ x117, x116 = bits.Mul64(x1, arg2[0])
+ var x118 uint64
+ var x119 uint64
+ x118, x119 = bits.Add64(x117, x114, uint64(0x0))
+ var x120 uint64
+ var x121 uint64
+ x120, x121 = bits.Add64(x115, x112, uint64(p521Uint1(x119)))
+ var x122 uint64
+ var x123 uint64
+ x122, x123 = bits.Add64(x113, x110, uint64(p521Uint1(x121)))
+ var x124 uint64
+ var x125 uint64
+ x124, x125 = bits.Add64(x111, x108, uint64(p521Uint1(x123)))
+ var x126 uint64
+ var x127 uint64
+ x126, x127 = bits.Add64(x109, x106, uint64(p521Uint1(x125)))
+ var x128 uint64
+ var x129 uint64
+ x128, x129 = bits.Add64(x107, x104, uint64(p521Uint1(x127)))
+ var x130 uint64
+ var x131 uint64
+ x130, x131 = bits.Add64(x105, x102, uint64(p521Uint1(x129)))
+ var x132 uint64
+ var x133 uint64
+ x132, x133 = bits.Add64(x103, x100, uint64(p521Uint1(x131)))
+ x134 := (uint64(p521Uint1(x133)) + x101)
+ var x135 uint64
+ var x136 uint64
+ x135, x136 = bits.Add64(x82, x116, uint64(0x0))
+ var x137 uint64
+ var x138 uint64
+ x137, x138 = bits.Add64(x84, x118, uint64(p521Uint1(x136)))
+ var x139 uint64
+ var x140 uint64
+ x139, x140 = bits.Add64(x86, x120, uint64(p521Uint1(x138)))
+ var x141 uint64
+ var x142 uint64
+ x141, x142 = bits.Add64(x88, x122, uint64(p521Uint1(x140)))
+ var x143 uint64
+ var x144 uint64
+ x143, x144 = bits.Add64(x90, x124, uint64(p521Uint1(x142)))
+ var x145 uint64
+ var x146 uint64
+ x145, x146 = bits.Add64(x92, x126, uint64(p521Uint1(x144)))
+ var x147 uint64
+ var x148 uint64
+ x147, x148 = bits.Add64(x94, x128, uint64(p521Uint1(x146)))
+ var x149 uint64
+ var x150 uint64
+ x149, x150 = bits.Add64(x96, x130, uint64(p521Uint1(x148)))
+ var x151 uint64
+ var x152 uint64
+ x151, x152 = bits.Add64(x98, x132, uint64(p521Uint1(x150)))
+ var x153 uint64
+ var x154 uint64
+ x153, x154 = bits.Add64(uint64(p521Uint1(x99)), x134, uint64(p521Uint1(x152)))
+ var x155 uint64
+ var x156 uint64
+ x156, x155 = bits.Mul64(x135, 0x1ff)
+ var x157 uint64
+ var x158 uint64
+ x158, x157 = bits.Mul64(x135, 0xffffffffffffffff)
+ var x159 uint64
+ var x160 uint64
+ x160, x159 = bits.Mul64(x135, 0xffffffffffffffff)
+ var x161 uint64
+ var x162 uint64
+ x162, x161 = bits.Mul64(x135, 0xffffffffffffffff)
+ var x163 uint64
+ var x164 uint64
+ x164, x163 = bits.Mul64(x135, 0xffffffffffffffff)
+ var x165 uint64
+ var x166 uint64
+ x166, x165 = bits.Mul64(x135, 0xffffffffffffffff)
+ var x167 uint64
+ var x168 uint64
+ x168, x167 = bits.Mul64(x135, 0xffffffffffffffff)
+ var x169 uint64
+ var x170 uint64
+ x170, x169 = bits.Mul64(x135, 0xffffffffffffffff)
+ var x171 uint64
+ var x172 uint64
+ x172, x171 = bits.Mul64(x135, 0xffffffffffffffff)
+ var x173 uint64
+ var x174 uint64
+ x173, x174 = bits.Add64(x172, x169, uint64(0x0))
+ var x175 uint64
+ var x176 uint64
+ x175, x176 = bits.Add64(x170, x167, uint64(p521Uint1(x174)))
+ var x177 uint64
+ var x178 uint64
+ x177, x178 = bits.Add64(x168, x165, uint64(p521Uint1(x176)))
+ var x179 uint64
+ var x180 uint64
+ x179, x180 = bits.Add64(x166, x163, uint64(p521Uint1(x178)))
+ var x181 uint64
+ var x182 uint64
+ x181, x182 = bits.Add64(x164, x161, uint64(p521Uint1(x180)))
+ var x183 uint64
+ var x184 uint64
+ x183, x184 = bits.Add64(x162, x159, uint64(p521Uint1(x182)))
+ var x185 uint64
+ var x186 uint64
+ x185, x186 = bits.Add64(x160, x157, uint64(p521Uint1(x184)))
+ var x187 uint64
+ var x188 uint64
+ x187, x188 = bits.Add64(x158, x155, uint64(p521Uint1(x186)))
+ x189 := (uint64(p521Uint1(x188)) + x156)
+ var x191 uint64
+ _, x191 = bits.Add64(x135, x171, uint64(0x0))
+ var x192 uint64
+ var x193 uint64
+ x192, x193 = bits.Add64(x137, x173, uint64(p521Uint1(x191)))
+ var x194 uint64
+ var x195 uint64
+ x194, x195 = bits.Add64(x139, x175, uint64(p521Uint1(x193)))
+ var x196 uint64
+ var x197 uint64
+ x196, x197 = bits.Add64(x141, x177, uint64(p521Uint1(x195)))
+ var x198 uint64
+ var x199 uint64
+ x198, x199 = bits.Add64(x143, x179, uint64(p521Uint1(x197)))
+ var x200 uint64
+ var x201 uint64
+ x200, x201 = bits.Add64(x145, x181, uint64(p521Uint1(x199)))
+ var x202 uint64
+ var x203 uint64
+ x202, x203 = bits.Add64(x147, x183, uint64(p521Uint1(x201)))
+ var x204 uint64
+ var x205 uint64
+ x204, x205 = bits.Add64(x149, x185, uint64(p521Uint1(x203)))
+ var x206 uint64
+ var x207 uint64
+ x206, x207 = bits.Add64(x151, x187, uint64(p521Uint1(x205)))
+ var x208 uint64
+ var x209 uint64
+ x208, x209 = bits.Add64(x153, x189, uint64(p521Uint1(x207)))
+ x210 := (uint64(p521Uint1(x209)) + uint64(p521Uint1(x154)))
+ var x211 uint64
+ var x212 uint64
+ x212, x211 = bits.Mul64(x2, arg2[8])
+ var x213 uint64
+ var x214 uint64
+ x214, x213 = bits.Mul64(x2, arg2[7])
+ var x215 uint64
+ var x216 uint64
+ x216, x215 = bits.Mul64(x2, arg2[6])
+ var x217 uint64
+ var x218 uint64
+ x218, x217 = bits.Mul64(x2, arg2[5])
+ var x219 uint64
+ var x220 uint64
+ x220, x219 = bits.Mul64(x2, arg2[4])
+ var x221 uint64
+ var x222 uint64
+ x222, x221 = bits.Mul64(x2, arg2[3])
+ var x223 uint64
+ var x224 uint64
+ x224, x223 = bits.Mul64(x2, arg2[2])
+ var x225 uint64
+ var x226 uint64
+ x226, x225 = bits.Mul64(x2, arg2[1])
+ var x227 uint64
+ var x228 uint64
+ x228, x227 = bits.Mul64(x2, arg2[0])
+ var x229 uint64
+ var x230 uint64
+ x229, x230 = bits.Add64(x228, x225, uint64(0x0))
+ var x231 uint64
+ var x232 uint64
+ x231, x232 = bits.Add64(x226, x223, uint64(p521Uint1(x230)))
+ var x233 uint64
+ var x234 uint64
+ x233, x234 = bits.Add64(x224, x221, uint64(p521Uint1(x232)))
+ var x235 uint64
+ var x236 uint64
+ x235, x236 = bits.Add64(x222, x219, uint64(p521Uint1(x234)))
+ var x237 uint64
+ var x238 uint64
+ x237, x238 = bits.Add64(x220, x217, uint64(p521Uint1(x236)))
+ var x239 uint64
+ var x240 uint64
+ x239, x240 = bits.Add64(x218, x215, uint64(p521Uint1(x238)))
+ var x241 uint64
+ var x242 uint64
+ x241, x242 = bits.Add64(x216, x213, uint64(p521Uint1(x240)))
+ var x243 uint64
+ var x244 uint64
+ x243, x244 = bits.Add64(x214, x211, uint64(p521Uint1(x242)))
+ x245 := (uint64(p521Uint1(x244)) + x212)
+ var x246 uint64
+ var x247 uint64
+ x246, x247 = bits.Add64(x192, x227, uint64(0x0))
+ var x248 uint64
+ var x249 uint64
+ x248, x249 = bits.Add64(x194, x229, uint64(p521Uint1(x247)))
+ var x250 uint64
+ var x251 uint64
+ x250, x251 = bits.Add64(x196, x231, uint64(p521Uint1(x249)))
+ var x252 uint64
+ var x253 uint64
+ x252, x253 = bits.Add64(x198, x233, uint64(p521Uint1(x251)))
+ var x254 uint64
+ var x255 uint64
+ x254, x255 = bits.Add64(x200, x235, uint64(p521Uint1(x253)))
+ var x256 uint64
+ var x257 uint64
+ x256, x257 = bits.Add64(x202, x237, uint64(p521Uint1(x255)))
+ var x258 uint64
+ var x259 uint64
+ x258, x259 = bits.Add64(x204, x239, uint64(p521Uint1(x257)))
+ var x260 uint64
+ var x261 uint64
+ x260, x261 = bits.Add64(x206, x241, uint64(p521Uint1(x259)))
+ var x262 uint64
+ var x263 uint64
+ x262, x263 = bits.Add64(x208, x243, uint64(p521Uint1(x261)))
+ var x264 uint64
+ var x265 uint64
+ x264, x265 = bits.Add64(x210, x245, uint64(p521Uint1(x263)))
+ var x266 uint64
+ var x267 uint64
+ x267, x266 = bits.Mul64(x246, 0x1ff)
+ var x268 uint64
+ var x269 uint64
+ x269, x268 = bits.Mul64(x246, 0xffffffffffffffff)
+ var x270 uint64
+ var x271 uint64
+ x271, x270 = bits.Mul64(x246, 0xffffffffffffffff)
+ var x272 uint64
+ var x273 uint64
+ x273, x272 = bits.Mul64(x246, 0xffffffffffffffff)
+ var x274 uint64
+ var x275 uint64
+ x275, x274 = bits.Mul64(x246, 0xffffffffffffffff)
+ var x276 uint64
+ var x277 uint64
+ x277, x276 = bits.Mul64(x246, 0xffffffffffffffff)
+ var x278 uint64
+ var x279 uint64
+ x279, x278 = bits.Mul64(x246, 0xffffffffffffffff)
+ var x280 uint64
+ var x281 uint64
+ x281, x280 = bits.Mul64(x246, 0xffffffffffffffff)
+ var x282 uint64
+ var x283 uint64
+ x283, x282 = bits.Mul64(x246, 0xffffffffffffffff)
+ var x284 uint64
+ var x285 uint64
+ x284, x285 = bits.Add64(x283, x280, uint64(0x0))
+ var x286 uint64
+ var x287 uint64
+ x286, x287 = bits.Add64(x281, x278, uint64(p521Uint1(x285)))
+ var x288 uint64
+ var x289 uint64
+ x288, x289 = bits.Add64(x279, x276, uint64(p521Uint1(x287)))
+ var x290 uint64
+ var x291 uint64
+ x290, x291 = bits.Add64(x277, x274, uint64(p521Uint1(x289)))
+ var x292 uint64
+ var x293 uint64
+ x292, x293 = bits.Add64(x275, x272, uint64(p521Uint1(x291)))
+ var x294 uint64
+ var x295 uint64
+ x294, x295 = bits.Add64(x273, x270, uint64(p521Uint1(x293)))
+ var x296 uint64
+ var x297 uint64
+ x296, x297 = bits.Add64(x271, x268, uint64(p521Uint1(x295)))
+ var x298 uint64
+ var x299 uint64
+ x298, x299 = bits.Add64(x269, x266, uint64(p521Uint1(x297)))
+ x300 := (uint64(p521Uint1(x299)) + x267)
+ var x302 uint64
+ _, x302 = bits.Add64(x246, x282, uint64(0x0))
+ var x303 uint64
+ var x304 uint64
+ x303, x304 = bits.Add64(x248, x284, uint64(p521Uint1(x302)))
+ var x305 uint64
+ var x306 uint64
+ x305, x306 = bits.Add64(x250, x286, uint64(p521Uint1(x304)))
+ var x307 uint64
+ var x308 uint64
+ x307, x308 = bits.Add64(x252, x288, uint64(p521Uint1(x306)))
+ var x309 uint64
+ var x310 uint64
+ x309, x310 = bits.Add64(x254, x290, uint64(p521Uint1(x308)))
+ var x311 uint64
+ var x312 uint64
+ x311, x312 = bits.Add64(x256, x292, uint64(p521Uint1(x310)))
+ var x313 uint64
+ var x314 uint64
+ x313, x314 = bits.Add64(x258, x294, uint64(p521Uint1(x312)))
+ var x315 uint64
+ var x316 uint64
+ x315, x316 = bits.Add64(x260, x296, uint64(p521Uint1(x314)))
+ var x317 uint64
+ var x318 uint64
+ x317, x318 = bits.Add64(x262, x298, uint64(p521Uint1(x316)))
+ var x319 uint64
+ var x320 uint64
+ x319, x320 = bits.Add64(x264, x300, uint64(p521Uint1(x318)))
+ x321 := (uint64(p521Uint1(x320)) + uint64(p521Uint1(x265)))
+ var x322 uint64
+ var x323 uint64
+ x323, x322 = bits.Mul64(x3, arg2[8])
+ var x324 uint64
+ var x325 uint64
+ x325, x324 = bits.Mul64(x3, arg2[7])
+ var x326 uint64
+ var x327 uint64
+ x327, x326 = bits.Mul64(x3, arg2[6])
+ var x328 uint64
+ var x329 uint64
+ x329, x328 = bits.Mul64(x3, arg2[5])
+ var x330 uint64
+ var x331 uint64
+ x331, x330 = bits.Mul64(x3, arg2[4])
+ var x332 uint64
+ var x333 uint64
+ x333, x332 = bits.Mul64(x3, arg2[3])
+ var x334 uint64
+ var x335 uint64
+ x335, x334 = bits.Mul64(x3, arg2[2])
+ var x336 uint64
+ var x337 uint64
+ x337, x336 = bits.Mul64(x3, arg2[1])
+ var x338 uint64
+ var x339 uint64
+ x339, x338 = bits.Mul64(x3, arg2[0])
+ var x340 uint64
+ var x341 uint64
+ x340, x341 = bits.Add64(x339, x336, uint64(0x0))
+ var x342 uint64
+ var x343 uint64
+ x342, x343 = bits.Add64(x337, x334, uint64(p521Uint1(x341)))
+ var x344 uint64
+ var x345 uint64
+ x344, x345 = bits.Add64(x335, x332, uint64(p521Uint1(x343)))
+ var x346 uint64
+ var x347 uint64
+ x346, x347 = bits.Add64(x333, x330, uint64(p521Uint1(x345)))
+ var x348 uint64
+ var x349 uint64
+ x348, x349 = bits.Add64(x331, x328, uint64(p521Uint1(x347)))
+ var x350 uint64
+ var x351 uint64
+ x350, x351 = bits.Add64(x329, x326, uint64(p521Uint1(x349)))
+ var x352 uint64
+ var x353 uint64
+ x352, x353 = bits.Add64(x327, x324, uint64(p521Uint1(x351)))
+ var x354 uint64
+ var x355 uint64
+ x354, x355 = bits.Add64(x325, x322, uint64(p521Uint1(x353)))
+ x356 := (uint64(p521Uint1(x355)) + x323)
+ var x357 uint64
+ var x358 uint64
+ x357, x358 = bits.Add64(x303, x338, uint64(0x0))
+ var x359 uint64
+ var x360 uint64
+ x359, x360 = bits.Add64(x305, x340, uint64(p521Uint1(x358)))
+ var x361 uint64
+ var x362 uint64
+ x361, x362 = bits.Add64(x307, x342, uint64(p521Uint1(x360)))
+ var x363 uint64
+ var x364 uint64
+ x363, x364 = bits.Add64(x309, x344, uint64(p521Uint1(x362)))
+ var x365 uint64
+ var x366 uint64
+ x365, x366 = bits.Add64(x311, x346, uint64(p521Uint1(x364)))
+ var x367 uint64
+ var x368 uint64
+ x367, x368 = bits.Add64(x313, x348, uint64(p521Uint1(x366)))
+ var x369 uint64
+ var x370 uint64
+ x369, x370 = bits.Add64(x315, x350, uint64(p521Uint1(x368)))
+ var x371 uint64
+ var x372 uint64
+ x371, x372 = bits.Add64(x317, x352, uint64(p521Uint1(x370)))
+ var x373 uint64
+ var x374 uint64
+ x373, x374 = bits.Add64(x319, x354, uint64(p521Uint1(x372)))
+ var x375 uint64
+ var x376 uint64
+ x375, x376 = bits.Add64(x321, x356, uint64(p521Uint1(x374)))
+ var x377 uint64
+ var x378 uint64
+ x378, x377 = bits.Mul64(x357, 0x1ff)
+ var x379 uint64
+ var x380 uint64
+ x380, x379 = bits.Mul64(x357, 0xffffffffffffffff)
+ var x381 uint64
+ var x382 uint64
+ x382, x381 = bits.Mul64(x357, 0xffffffffffffffff)
+ var x383 uint64
+ var x384 uint64
+ x384, x383 = bits.Mul64(x357, 0xffffffffffffffff)
+ var x385 uint64
+ var x386 uint64
+ x386, x385 = bits.Mul64(x357, 0xffffffffffffffff)
+ var x387 uint64
+ var x388 uint64
+ x388, x387 = bits.Mul64(x357, 0xffffffffffffffff)
+ var x389 uint64
+ var x390 uint64
+ x390, x389 = bits.Mul64(x357, 0xffffffffffffffff)
+ var x391 uint64
+ var x392 uint64
+ x392, x391 = bits.Mul64(x357, 0xffffffffffffffff)
+ var x393 uint64
+ var x394 uint64
+ x394, x393 = bits.Mul64(x357, 0xffffffffffffffff)
+ var x395 uint64
+ var x396 uint64
+ x395, x396 = bits.Add64(x394, x391, uint64(0x0))
+ var x397 uint64
+ var x398 uint64
+ x397, x398 = bits.Add64(x392, x389, uint64(p521Uint1(x396)))
+ var x399 uint64
+ var x400 uint64
+ x399, x400 = bits.Add64(x390, x387, uint64(p521Uint1(x398)))
+ var x401 uint64
+ var x402 uint64
+ x401, x402 = bits.Add64(x388, x385, uint64(p521Uint1(x400)))
+ var x403 uint64
+ var x404 uint64
+ x403, x404 = bits.Add64(x386, x383, uint64(p521Uint1(x402)))
+ var x405 uint64
+ var x406 uint64
+ x405, x406 = bits.Add64(x384, x381, uint64(p521Uint1(x404)))
+ var x407 uint64
+ var x408 uint64
+ x407, x408 = bits.Add64(x382, x379, uint64(p521Uint1(x406)))
+ var x409 uint64
+ var x410 uint64
+ x409, x410 = bits.Add64(x380, x377, uint64(p521Uint1(x408)))
+ x411 := (uint64(p521Uint1(x410)) + x378)
+ var x413 uint64
+ _, x413 = bits.Add64(x357, x393, uint64(0x0))
+ var x414 uint64
+ var x415 uint64
+ x414, x415 = bits.Add64(x359, x395, uint64(p521Uint1(x413)))
+ var x416 uint64
+ var x417 uint64
+ x416, x417 = bits.Add64(x361, x397, uint64(p521Uint1(x415)))
+ var x418 uint64
+ var x419 uint64
+ x418, x419 = bits.Add64(x363, x399, uint64(p521Uint1(x417)))
+ var x420 uint64
+ var x421 uint64
+ x420, x421 = bits.Add64(x365, x401, uint64(p521Uint1(x419)))
+ var x422 uint64
+ var x423 uint64
+ x422, x423 = bits.Add64(x367, x403, uint64(p521Uint1(x421)))
+ var x424 uint64
+ var x425 uint64
+ x424, x425 = bits.Add64(x369, x405, uint64(p521Uint1(x423)))
+ var x426 uint64
+ var x427 uint64
+ x426, x427 = bits.Add64(x371, x407, uint64(p521Uint1(x425)))
+ var x428 uint64
+ var x429 uint64
+ x428, x429 = bits.Add64(x373, x409, uint64(p521Uint1(x427)))
+ var x430 uint64
+ var x431 uint64
+ x430, x431 = bits.Add64(x375, x411, uint64(p521Uint1(x429)))
+ x432 := (uint64(p521Uint1(x431)) + uint64(p521Uint1(x376)))
+ var x433 uint64
+ var x434 uint64
+ x434, x433 = bits.Mul64(x4, arg2[8])
+ var x435 uint64
+ var x436 uint64
+ x436, x435 = bits.Mul64(x4, arg2[7])
+ var x437 uint64
+ var x438 uint64
+ x438, x437 = bits.Mul64(x4, arg2[6])
+ var x439 uint64
+ var x440 uint64
+ x440, x439 = bits.Mul64(x4, arg2[5])
+ var x441 uint64
+ var x442 uint64
+ x442, x441 = bits.Mul64(x4, arg2[4])
+ var x443 uint64
+ var x444 uint64
+ x444, x443 = bits.Mul64(x4, arg2[3])
+ var x445 uint64
+ var x446 uint64
+ x446, x445 = bits.Mul64(x4, arg2[2])
+ var x447 uint64
+ var x448 uint64
+ x448, x447 = bits.Mul64(x4, arg2[1])
+ var x449 uint64
+ var x450 uint64
+ x450, x449 = bits.Mul64(x4, arg2[0])
+ var x451 uint64
+ var x452 uint64
+ x451, x452 = bits.Add64(x450, x447, uint64(0x0))
+ var x453 uint64
+ var x454 uint64
+ x453, x454 = bits.Add64(x448, x445, uint64(p521Uint1(x452)))
+ var x455 uint64
+ var x456 uint64
+ x455, x456 = bits.Add64(x446, x443, uint64(p521Uint1(x454)))
+ var x457 uint64
+ var x458 uint64
+ x457, x458 = bits.Add64(x444, x441, uint64(p521Uint1(x456)))
+ var x459 uint64
+ var x460 uint64
+ x459, x460 = bits.Add64(x442, x439, uint64(p521Uint1(x458)))
+ var x461 uint64
+ var x462 uint64
+ x461, x462 = bits.Add64(x440, x437, uint64(p521Uint1(x460)))
+ var x463 uint64
+ var x464 uint64
+ x463, x464 = bits.Add64(x438, x435, uint64(p521Uint1(x462)))
+ var x465 uint64
+ var x466 uint64
+ x465, x466 = bits.Add64(x436, x433, uint64(p521Uint1(x464)))
+ x467 := (uint64(p521Uint1(x466)) + x434)
+ var x468 uint64
+ var x469 uint64
+ x468, x469 = bits.Add64(x414, x449, uint64(0x0))
+ var x470 uint64
+ var x471 uint64
+ x470, x471 = bits.Add64(x416, x451, uint64(p521Uint1(x469)))
+ var x472 uint64
+ var x473 uint64
+ x472, x473 = bits.Add64(x418, x453, uint64(p521Uint1(x471)))
+ var x474 uint64
+ var x475 uint64
+ x474, x475 = bits.Add64(x420, x455, uint64(p521Uint1(x473)))
+ var x476 uint64
+ var x477 uint64
+ x476, x477 = bits.Add64(x422, x457, uint64(p521Uint1(x475)))
+ var x478 uint64
+ var x479 uint64
+ x478, x479 = bits.Add64(x424, x459, uint64(p521Uint1(x477)))
+ var x480 uint64
+ var x481 uint64
+ x480, x481 = bits.Add64(x426, x461, uint64(p521Uint1(x479)))
+ var x482 uint64
+ var x483 uint64
+ x482, x483 = bits.Add64(x428, x463, uint64(p521Uint1(x481)))
+ var x484 uint64
+ var x485 uint64
+ x484, x485 = bits.Add64(x430, x465, uint64(p521Uint1(x483)))
+ var x486 uint64
+ var x487 uint64
+ x486, x487 = bits.Add64(x432, x467, uint64(p521Uint1(x485)))
+ var x488 uint64
+ var x489 uint64
+ x489, x488 = bits.Mul64(x468, 0x1ff)
+ var x490 uint64
+ var x491 uint64
+ x491, x490 = bits.Mul64(x468, 0xffffffffffffffff)
+ var x492 uint64
+ var x493 uint64
+ x493, x492 = bits.Mul64(x468, 0xffffffffffffffff)
+ var x494 uint64
+ var x495 uint64
+ x495, x494 = bits.Mul64(x468, 0xffffffffffffffff)
+ var x496 uint64
+ var x497 uint64
+ x497, x496 = bits.Mul64(x468, 0xffffffffffffffff)
+ var x498 uint64
+ var x499 uint64
+ x499, x498 = bits.Mul64(x468, 0xffffffffffffffff)
+ var x500 uint64
+ var x501 uint64
+ x501, x500 = bits.Mul64(x468, 0xffffffffffffffff)
+ var x502 uint64
+ var x503 uint64
+ x503, x502 = bits.Mul64(x468, 0xffffffffffffffff)
+ var x504 uint64
+ var x505 uint64
+ x505, x504 = bits.Mul64(x468, 0xffffffffffffffff)
+ var x506 uint64
+ var x507 uint64
+ x506, x507 = bits.Add64(x505, x502, uint64(0x0))
+ var x508 uint64
+ var x509 uint64
+ x508, x509 = bits.Add64(x503, x500, uint64(p521Uint1(x507)))
+ var x510 uint64
+ var x511 uint64
+ x510, x511 = bits.Add64(x501, x498, uint64(p521Uint1(x509)))
+ var x512 uint64
+ var x513 uint64
+ x512, x513 = bits.Add64(x499, x496, uint64(p521Uint1(x511)))
+ var x514 uint64
+ var x515 uint64
+ x514, x515 = bits.Add64(x497, x494, uint64(p521Uint1(x513)))
+ var x516 uint64
+ var x517 uint64
+ x516, x517 = bits.Add64(x495, x492, uint64(p521Uint1(x515)))
+ var x518 uint64
+ var x519 uint64
+ x518, x519 = bits.Add64(x493, x490, uint64(p521Uint1(x517)))
+ var x520 uint64
+ var x521 uint64
+ x520, x521 = bits.Add64(x491, x488, uint64(p521Uint1(x519)))
+ x522 := (uint64(p521Uint1(x521)) + x489)
+ var x524 uint64
+ _, x524 = bits.Add64(x468, x504, uint64(0x0))
+ var x525 uint64
+ var x526 uint64
+ x525, x526 = bits.Add64(x470, x506, uint64(p521Uint1(x524)))
+ var x527 uint64
+ var x528 uint64
+ x527, x528 = bits.Add64(x472, x508, uint64(p521Uint1(x526)))
+ var x529 uint64
+ var x530 uint64
+ x529, x530 = bits.Add64(x474, x510, uint64(p521Uint1(x528)))
+ var x531 uint64
+ var x532 uint64
+ x531, x532 = bits.Add64(x476, x512, uint64(p521Uint1(x530)))
+ var x533 uint64
+ var x534 uint64
+ x533, x534 = bits.Add64(x478, x514, uint64(p521Uint1(x532)))
+ var x535 uint64
+ var x536 uint64
+ x535, x536 = bits.Add64(x480, x516, uint64(p521Uint1(x534)))
+ var x537 uint64
+ var x538 uint64
+ x537, x538 = bits.Add64(x482, x518, uint64(p521Uint1(x536)))
+ var x539 uint64
+ var x540 uint64
+ x539, x540 = bits.Add64(x484, x520, uint64(p521Uint1(x538)))
+ var x541 uint64
+ var x542 uint64
+ x541, x542 = bits.Add64(x486, x522, uint64(p521Uint1(x540)))
+ x543 := (uint64(p521Uint1(x542)) + uint64(p521Uint1(x487)))
+ var x544 uint64
+ var x545 uint64
+ x545, x544 = bits.Mul64(x5, arg2[8])
+ var x546 uint64
+ var x547 uint64
+ x547, x546 = bits.Mul64(x5, arg2[7])
+ var x548 uint64
+ var x549 uint64
+ x549, x548 = bits.Mul64(x5, arg2[6])
+ var x550 uint64
+ var x551 uint64
+ x551, x550 = bits.Mul64(x5, arg2[5])
+ var x552 uint64
+ var x553 uint64
+ x553, x552 = bits.Mul64(x5, arg2[4])
+ var x554 uint64
+ var x555 uint64
+ x555, x554 = bits.Mul64(x5, arg2[3])
+ var x556 uint64
+ var x557 uint64
+ x557, x556 = bits.Mul64(x5, arg2[2])
+ var x558 uint64
+ var x559 uint64
+ x559, x558 = bits.Mul64(x5, arg2[1])
+ var x560 uint64
+ var x561 uint64
+ x561, x560 = bits.Mul64(x5, arg2[0])
+ var x562 uint64
+ var x563 uint64
+ x562, x563 = bits.Add64(x561, x558, uint64(0x0))
+ var x564 uint64
+ var x565 uint64
+ x564, x565 = bits.Add64(x559, x556, uint64(p521Uint1(x563)))
+ var x566 uint64
+ var x567 uint64
+ x566, x567 = bits.Add64(x557, x554, uint64(p521Uint1(x565)))
+ var x568 uint64
+ var x569 uint64
+ x568, x569 = bits.Add64(x555, x552, uint64(p521Uint1(x567)))
+ var x570 uint64
+ var x571 uint64
+ x570, x571 = bits.Add64(x553, x550, uint64(p521Uint1(x569)))
+ var x572 uint64
+ var x573 uint64
+ x572, x573 = bits.Add64(x551, x548, uint64(p521Uint1(x571)))
+ var x574 uint64
+ var x575 uint64
+ x574, x575 = bits.Add64(x549, x546, uint64(p521Uint1(x573)))
+ var x576 uint64
+ var x577 uint64
+ x576, x577 = bits.Add64(x547, x544, uint64(p521Uint1(x575)))
+ x578 := (uint64(p521Uint1(x577)) + x545)
+ var x579 uint64
+ var x580 uint64
+ x579, x580 = bits.Add64(x525, x560, uint64(0x0))
+ var x581 uint64
+ var x582 uint64
+ x581, x582 = bits.Add64(x527, x562, uint64(p521Uint1(x580)))
+ var x583 uint64
+ var x584 uint64
+ x583, x584 = bits.Add64(x529, x564, uint64(p521Uint1(x582)))
+ var x585 uint64
+ var x586 uint64
+ x585, x586 = bits.Add64(x531, x566, uint64(p521Uint1(x584)))
+ var x587 uint64
+ var x588 uint64
+ x587, x588 = bits.Add64(x533, x568, uint64(p521Uint1(x586)))
+ var x589 uint64
+ var x590 uint64
+ x589, x590 = bits.Add64(x535, x570, uint64(p521Uint1(x588)))
+ var x591 uint64
+ var x592 uint64
+ x591, x592 = bits.Add64(x537, x572, uint64(p521Uint1(x590)))
+ var x593 uint64
+ var x594 uint64
+ x593, x594 = bits.Add64(x539, x574, uint64(p521Uint1(x592)))
+ var x595 uint64
+ var x596 uint64
+ x595, x596 = bits.Add64(x541, x576, uint64(p521Uint1(x594)))
+ var x597 uint64
+ var x598 uint64
+ x597, x598 = bits.Add64(x543, x578, uint64(p521Uint1(x596)))
+ var x599 uint64
+ var x600 uint64
+ x600, x599 = bits.Mul64(x579, 0x1ff)
+ var x601 uint64
+ var x602 uint64
+ x602, x601 = bits.Mul64(x579, 0xffffffffffffffff)
+ var x603 uint64
+ var x604 uint64
+ x604, x603 = bits.Mul64(x579, 0xffffffffffffffff)
+ var x605 uint64
+ var x606 uint64
+ x606, x605 = bits.Mul64(x579, 0xffffffffffffffff)
+ var x607 uint64
+ var x608 uint64
+ x608, x607 = bits.Mul64(x579, 0xffffffffffffffff)
+ var x609 uint64
+ var x610 uint64
+ x610, x609 = bits.Mul64(x579, 0xffffffffffffffff)
+ var x611 uint64
+ var x612 uint64
+ x612, x611 = bits.Mul64(x579, 0xffffffffffffffff)
+ var x613 uint64
+ var x614 uint64
+ x614, x613 = bits.Mul64(x579, 0xffffffffffffffff)
+ var x615 uint64
+ var x616 uint64
+ x616, x615 = bits.Mul64(x579, 0xffffffffffffffff)
+ var x617 uint64
+ var x618 uint64
+ x617, x618 = bits.Add64(x616, x613, uint64(0x0))
+ var x619 uint64
+ var x620 uint64
+ x619, x620 = bits.Add64(x614, x611, uint64(p521Uint1(x618)))
+ var x621 uint64
+ var x622 uint64
+ x621, x622 = bits.Add64(x612, x609, uint64(p521Uint1(x620)))
+ var x623 uint64
+ var x624 uint64
+ x623, x624 = bits.Add64(x610, x607, uint64(p521Uint1(x622)))
+ var x625 uint64
+ var x626 uint64
+ x625, x626 = bits.Add64(x608, x605, uint64(p521Uint1(x624)))
+ var x627 uint64
+ var x628 uint64
+ x627, x628 = bits.Add64(x606, x603, uint64(p521Uint1(x626)))
+ var x629 uint64
+ var x630 uint64
+ x629, x630 = bits.Add64(x604, x601, uint64(p521Uint1(x628)))
+ var x631 uint64
+ var x632 uint64
+ x631, x632 = bits.Add64(x602, x599, uint64(p521Uint1(x630)))
+ x633 := (uint64(p521Uint1(x632)) + x600)
+ var x635 uint64
+ _, x635 = bits.Add64(x579, x615, uint64(0x0))
+ var x636 uint64
+ var x637 uint64
+ x636, x637 = bits.Add64(x581, x617, uint64(p521Uint1(x635)))
+ var x638 uint64
+ var x639 uint64
+ x638, x639 = bits.Add64(x583, x619, uint64(p521Uint1(x637)))
+ var x640 uint64
+ var x641 uint64
+ x640, x641 = bits.Add64(x585, x621, uint64(p521Uint1(x639)))
+ var x642 uint64
+ var x643 uint64
+ x642, x643 = bits.Add64(x587, x623, uint64(p521Uint1(x641)))
+ var x644 uint64
+ var x645 uint64
+ x644, x645 = bits.Add64(x589, x625, uint64(p521Uint1(x643)))
+ var x646 uint64
+ var x647 uint64
+ x646, x647 = bits.Add64(x591, x627, uint64(p521Uint1(x645)))
+ var x648 uint64
+ var x649 uint64
+ x648, x649 = bits.Add64(x593, x629, uint64(p521Uint1(x647)))
+ var x650 uint64
+ var x651 uint64
+ x650, x651 = bits.Add64(x595, x631, uint64(p521Uint1(x649)))
+ var x652 uint64
+ var x653 uint64
+ x652, x653 = bits.Add64(x597, x633, uint64(p521Uint1(x651)))
+ x654 := (uint64(p521Uint1(x653)) + uint64(p521Uint1(x598)))
+ var x655 uint64
+ var x656 uint64
+ x656, x655 = bits.Mul64(x6, arg2[8])
+ var x657 uint64
+ var x658 uint64
+ x658, x657 = bits.Mul64(x6, arg2[7])
+ var x659 uint64
+ var x660 uint64
+ x660, x659 = bits.Mul64(x6, arg2[6])
+ var x661 uint64
+ var x662 uint64
+ x662, x661 = bits.Mul64(x6, arg2[5])
+ var x663 uint64
+ var x664 uint64
+ x664, x663 = bits.Mul64(x6, arg2[4])
+ var x665 uint64
+ var x666 uint64
+ x666, x665 = bits.Mul64(x6, arg2[3])
+ var x667 uint64
+ var x668 uint64
+ x668, x667 = bits.Mul64(x6, arg2[2])
+ var x669 uint64
+ var x670 uint64
+ x670, x669 = bits.Mul64(x6, arg2[1])
+ var x671 uint64
+ var x672 uint64
+ x672, x671 = bits.Mul64(x6, arg2[0])
+ var x673 uint64
+ var x674 uint64
+ x673, x674 = bits.Add64(x672, x669, uint64(0x0))
+ var x675 uint64
+ var x676 uint64
+ x675, x676 = bits.Add64(x670, x667, uint64(p521Uint1(x674)))
+ var x677 uint64
+ var x678 uint64
+ x677, x678 = bits.Add64(x668, x665, uint64(p521Uint1(x676)))
+ var x679 uint64
+ var x680 uint64
+ x679, x680 = bits.Add64(x666, x663, uint64(p521Uint1(x678)))
+ var x681 uint64
+ var x682 uint64
+ x681, x682 = bits.Add64(x664, x661, uint64(p521Uint1(x680)))
+ var x683 uint64
+ var x684 uint64
+ x683, x684 = bits.Add64(x662, x659, uint64(p521Uint1(x682)))
+ var x685 uint64
+ var x686 uint64
+ x685, x686 = bits.Add64(x660, x657, uint64(p521Uint1(x684)))
+ var x687 uint64
+ var x688 uint64
+ x687, x688 = bits.Add64(x658, x655, uint64(p521Uint1(x686)))
+ x689 := (uint64(p521Uint1(x688)) + x656)
+ var x690 uint64
+ var x691 uint64
+ x690, x691 = bits.Add64(x636, x671, uint64(0x0))
+ var x692 uint64
+ var x693 uint64
+ x692, x693 = bits.Add64(x638, x673, uint64(p521Uint1(x691)))
+ var x694 uint64
+ var x695 uint64
+ x694, x695 = bits.Add64(x640, x675, uint64(p521Uint1(x693)))
+ var x696 uint64
+ var x697 uint64
+ x696, x697 = bits.Add64(x642, x677, uint64(p521Uint1(x695)))
+ var x698 uint64
+ var x699 uint64
+ x698, x699 = bits.Add64(x644, x679, uint64(p521Uint1(x697)))
+ var x700 uint64
+ var x701 uint64
+ x700, x701 = bits.Add64(x646, x681, uint64(p521Uint1(x699)))
+ var x702 uint64
+ var x703 uint64
+ x702, x703 = bits.Add64(x648, x683, uint64(p521Uint1(x701)))
+ var x704 uint64
+ var x705 uint64
+ x704, x705 = bits.Add64(x650, x685, uint64(p521Uint1(x703)))
+ var x706 uint64
+ var x707 uint64
+ x706, x707 = bits.Add64(x652, x687, uint64(p521Uint1(x705)))
+ var x708 uint64
+ var x709 uint64
+ x708, x709 = bits.Add64(x654, x689, uint64(p521Uint1(x707)))
+ var x710 uint64
+ var x711 uint64
+ x711, x710 = bits.Mul64(x690, 0x1ff)
+ var x712 uint64
+ var x713 uint64
+ x713, x712 = bits.Mul64(x690, 0xffffffffffffffff)
+ var x714 uint64
+ var x715 uint64
+ x715, x714 = bits.Mul64(x690, 0xffffffffffffffff)
+ var x716 uint64
+ var x717 uint64
+ x717, x716 = bits.Mul64(x690, 0xffffffffffffffff)
+ var x718 uint64
+ var x719 uint64
+ x719, x718 = bits.Mul64(x690, 0xffffffffffffffff)
+ var x720 uint64
+ var x721 uint64
+ x721, x720 = bits.Mul64(x690, 0xffffffffffffffff)
+ var x722 uint64
+ var x723 uint64
+ x723, x722 = bits.Mul64(x690, 0xffffffffffffffff)
+ var x724 uint64
+ var x725 uint64
+ x725, x724 = bits.Mul64(x690, 0xffffffffffffffff)
+ var x726 uint64
+ var x727 uint64
+ x727, x726 = bits.Mul64(x690, 0xffffffffffffffff)
+ var x728 uint64
+ var x729 uint64
+ x728, x729 = bits.Add64(x727, x724, uint64(0x0))
+ var x730 uint64
+ var x731 uint64
+ x730, x731 = bits.Add64(x725, x722, uint64(p521Uint1(x729)))
+ var x732 uint64
+ var x733 uint64
+ x732, x733 = bits.Add64(x723, x720, uint64(p521Uint1(x731)))
+ var x734 uint64
+ var x735 uint64
+ x734, x735 = bits.Add64(x721, x718, uint64(p521Uint1(x733)))
+ var x736 uint64
+ var x737 uint64
+ x736, x737 = bits.Add64(x719, x716, uint64(p521Uint1(x735)))
+ var x738 uint64
+ var x739 uint64
+ x738, x739 = bits.Add64(x717, x714, uint64(p521Uint1(x737)))
+ var x740 uint64
+ var x741 uint64
+ x740, x741 = bits.Add64(x715, x712, uint64(p521Uint1(x739)))
+ var x742 uint64
+ var x743 uint64
+ x742, x743 = bits.Add64(x713, x710, uint64(p521Uint1(x741)))
+ x744 := (uint64(p521Uint1(x743)) + x711)
+ var x746 uint64
+ _, x746 = bits.Add64(x690, x726, uint64(0x0))
+ var x747 uint64
+ var x748 uint64
+ x747, x748 = bits.Add64(x692, x728, uint64(p521Uint1(x746)))
+ var x749 uint64
+ var x750 uint64
+ x749, x750 = bits.Add64(x694, x730, uint64(p521Uint1(x748)))
+ var x751 uint64
+ var x752 uint64
+ x751, x752 = bits.Add64(x696, x732, uint64(p521Uint1(x750)))
+ var x753 uint64
+ var x754 uint64
+ x753, x754 = bits.Add64(x698, x734, uint64(p521Uint1(x752)))
+ var x755 uint64
+ var x756 uint64
+ x755, x756 = bits.Add64(x700, x736, uint64(p521Uint1(x754)))
+ var x757 uint64
+ var x758 uint64
+ x757, x758 = bits.Add64(x702, x738, uint64(p521Uint1(x756)))
+ var x759 uint64
+ var x760 uint64
+ x759, x760 = bits.Add64(x704, x740, uint64(p521Uint1(x758)))
+ var x761 uint64
+ var x762 uint64
+ x761, x762 = bits.Add64(x706, x742, uint64(p521Uint1(x760)))
+ var x763 uint64
+ var x764 uint64
+ x763, x764 = bits.Add64(x708, x744, uint64(p521Uint1(x762)))
+ x765 := (uint64(p521Uint1(x764)) + uint64(p521Uint1(x709)))
+ var x766 uint64
+ var x767 uint64
+ x767, x766 = bits.Mul64(x7, arg2[8])
+ var x768 uint64
+ var x769 uint64
+ x769, x768 = bits.Mul64(x7, arg2[7])
+ var x770 uint64
+ var x771 uint64
+ x771, x770 = bits.Mul64(x7, arg2[6])
+ var x772 uint64
+ var x773 uint64
+ x773, x772 = bits.Mul64(x7, arg2[5])
+ var x774 uint64
+ var x775 uint64
+ x775, x774 = bits.Mul64(x7, arg2[4])
+ var x776 uint64
+ var x777 uint64
+ x777, x776 = bits.Mul64(x7, arg2[3])
+ var x778 uint64
+ var x779 uint64
+ x779, x778 = bits.Mul64(x7, arg2[2])
+ var x780 uint64
+ var x781 uint64
+ x781, x780 = bits.Mul64(x7, arg2[1])
+ var x782 uint64
+ var x783 uint64
+ x783, x782 = bits.Mul64(x7, arg2[0])
+ var x784 uint64
+ var x785 uint64
+ x784, x785 = bits.Add64(x783, x780, uint64(0x0))
+ var x786 uint64
+ var x787 uint64
+ x786, x787 = bits.Add64(x781, x778, uint64(p521Uint1(x785)))
+ var x788 uint64
+ var x789 uint64
+ x788, x789 = bits.Add64(x779, x776, uint64(p521Uint1(x787)))
+ var x790 uint64
+ var x791 uint64
+ x790, x791 = bits.Add64(x777, x774, uint64(p521Uint1(x789)))
+ var x792 uint64
+ var x793 uint64
+ x792, x793 = bits.Add64(x775, x772, uint64(p521Uint1(x791)))
+ var x794 uint64
+ var x795 uint64
+ x794, x795 = bits.Add64(x773, x770, uint64(p521Uint1(x793)))
+ var x796 uint64
+ var x797 uint64
+ x796, x797 = bits.Add64(x771, x768, uint64(p521Uint1(x795)))
+ var x798 uint64
+ var x799 uint64
+ x798, x799 = bits.Add64(x769, x766, uint64(p521Uint1(x797)))
+ x800 := (uint64(p521Uint1(x799)) + x767)
+ var x801 uint64
+ var x802 uint64
+ x801, x802 = bits.Add64(x747, x782, uint64(0x0))
+ var x803 uint64
+ var x804 uint64
+ x803, x804 = bits.Add64(x749, x784, uint64(p521Uint1(x802)))
+ var x805 uint64
+ var x806 uint64
+ x805, x806 = bits.Add64(x751, x786, uint64(p521Uint1(x804)))
+ var x807 uint64
+ var x808 uint64
+ x807, x808 = bits.Add64(x753, x788, uint64(p521Uint1(x806)))
+ var x809 uint64
+ var x810 uint64
+ x809, x810 = bits.Add64(x755, x790, uint64(p521Uint1(x808)))
+ var x811 uint64
+ var x812 uint64
+ x811, x812 = bits.Add64(x757, x792, uint64(p521Uint1(x810)))
+ var x813 uint64
+ var x814 uint64
+ x813, x814 = bits.Add64(x759, x794, uint64(p521Uint1(x812)))
+ var x815 uint64
+ var x816 uint64
+ x815, x816 = bits.Add64(x761, x796, uint64(p521Uint1(x814)))
+ var x817 uint64
+ var x818 uint64
+ x817, x818 = bits.Add64(x763, x798, uint64(p521Uint1(x816)))
+ var x819 uint64
+ var x820 uint64
+ x819, x820 = bits.Add64(x765, x800, uint64(p521Uint1(x818)))
+ var x821 uint64
+ var x822 uint64
+ x822, x821 = bits.Mul64(x801, 0x1ff)
+ var x823 uint64
+ var x824 uint64
+ x824, x823 = bits.Mul64(x801, 0xffffffffffffffff)
+ var x825 uint64
+ var x826 uint64
+ x826, x825 = bits.Mul64(x801, 0xffffffffffffffff)
+ var x827 uint64
+ var x828 uint64
+ x828, x827 = bits.Mul64(x801, 0xffffffffffffffff)
+ var x829 uint64
+ var x830 uint64
+ x830, x829 = bits.Mul64(x801, 0xffffffffffffffff)
+ var x831 uint64
+ var x832 uint64
+ x832, x831 = bits.Mul64(x801, 0xffffffffffffffff)
+ var x833 uint64
+ var x834 uint64
+ x834, x833 = bits.Mul64(x801, 0xffffffffffffffff)
+ var x835 uint64
+ var x836 uint64
+ x836, x835 = bits.Mul64(x801, 0xffffffffffffffff)
+ var x837 uint64
+ var x838 uint64
+ x838, x837 = bits.Mul64(x801, 0xffffffffffffffff)
+ var x839 uint64
+ var x840 uint64
+ x839, x840 = bits.Add64(x838, x835, uint64(0x0))
+ var x841 uint64
+ var x842 uint64
+ x841, x842 = bits.Add64(x836, x833, uint64(p521Uint1(x840)))
+ var x843 uint64
+ var x844 uint64
+ x843, x844 = bits.Add64(x834, x831, uint64(p521Uint1(x842)))
+ var x845 uint64
+ var x846 uint64
+ x845, x846 = bits.Add64(x832, x829, uint64(p521Uint1(x844)))
+ var x847 uint64
+ var x848 uint64
+ x847, x848 = bits.Add64(x830, x827, uint64(p521Uint1(x846)))
+ var x849 uint64
+ var x850 uint64
+ x849, x850 = bits.Add64(x828, x825, uint64(p521Uint1(x848)))
+ var x851 uint64
+ var x852 uint64
+ x851, x852 = bits.Add64(x826, x823, uint64(p521Uint1(x850)))
+ var x853 uint64
+ var x854 uint64
+ x853, x854 = bits.Add64(x824, x821, uint64(p521Uint1(x852)))
+ x855 := (uint64(p521Uint1(x854)) + x822)
+ var x857 uint64
+ _, x857 = bits.Add64(x801, x837, uint64(0x0))
+ var x858 uint64
+ var x859 uint64
+ x858, x859 = bits.Add64(x803, x839, uint64(p521Uint1(x857)))
+ var x860 uint64
+ var x861 uint64
+ x860, x861 = bits.Add64(x805, x841, uint64(p521Uint1(x859)))
+ var x862 uint64
+ var x863 uint64
+ x862, x863 = bits.Add64(x807, x843, uint64(p521Uint1(x861)))
+ var x864 uint64
+ var x865 uint64
+ x864, x865 = bits.Add64(x809, x845, uint64(p521Uint1(x863)))
+ var x866 uint64
+ var x867 uint64
+ x866, x867 = bits.Add64(x811, x847, uint64(p521Uint1(x865)))
+ var x868 uint64
+ var x869 uint64
+ x868, x869 = bits.Add64(x813, x849, uint64(p521Uint1(x867)))
+ var x870 uint64
+ var x871 uint64
+ x870, x871 = bits.Add64(x815, x851, uint64(p521Uint1(x869)))
+ var x872 uint64
+ var x873 uint64
+ x872, x873 = bits.Add64(x817, x853, uint64(p521Uint1(x871)))
+ var x874 uint64
+ var x875 uint64
+ x874, x875 = bits.Add64(x819, x855, uint64(p521Uint1(x873)))
+ x876 := (uint64(p521Uint1(x875)) + uint64(p521Uint1(x820)))
+ var x877 uint64
+ var x878 uint64
+ x878, x877 = bits.Mul64(x8, arg2[8])
+ var x879 uint64
+ var x880 uint64
+ x880, x879 = bits.Mul64(x8, arg2[7])
+ var x881 uint64
+ var x882 uint64
+ x882, x881 = bits.Mul64(x8, arg2[6])
+ var x883 uint64
+ var x884 uint64
+ x884, x883 = bits.Mul64(x8, arg2[5])
+ var x885 uint64
+ var x886 uint64
+ x886, x885 = bits.Mul64(x8, arg2[4])
+ var x887 uint64
+ var x888 uint64
+ x888, x887 = bits.Mul64(x8, arg2[3])
+ var x889 uint64
+ var x890 uint64
+ x890, x889 = bits.Mul64(x8, arg2[2])
+ var x891 uint64
+ var x892 uint64
+ x892, x891 = bits.Mul64(x8, arg2[1])
+ var x893 uint64
+ var x894 uint64
+ x894, x893 = bits.Mul64(x8, arg2[0])
+ var x895 uint64
+ var x896 uint64
+ x895, x896 = bits.Add64(x894, x891, uint64(0x0))
+ var x897 uint64
+ var x898 uint64
+ x897, x898 = bits.Add64(x892, x889, uint64(p521Uint1(x896)))
+ var x899 uint64
+ var x900 uint64
+ x899, x900 = bits.Add64(x890, x887, uint64(p521Uint1(x898)))
+ var x901 uint64
+ var x902 uint64
+ x901, x902 = bits.Add64(x888, x885, uint64(p521Uint1(x900)))
+ var x903 uint64
+ var x904 uint64
+ x903, x904 = bits.Add64(x886, x883, uint64(p521Uint1(x902)))
+ var x905 uint64
+ var x906 uint64
+ x905, x906 = bits.Add64(x884, x881, uint64(p521Uint1(x904)))
+ var x907 uint64
+ var x908 uint64
+ x907, x908 = bits.Add64(x882, x879, uint64(p521Uint1(x906)))
+ var x909 uint64
+ var x910 uint64
+ x909, x910 = bits.Add64(x880, x877, uint64(p521Uint1(x908)))
+ x911 := (uint64(p521Uint1(x910)) + x878)
+ var x912 uint64
+ var x913 uint64
+ x912, x913 = bits.Add64(x858, x893, uint64(0x0))
+ var x914 uint64
+ var x915 uint64
+ x914, x915 = bits.Add64(x860, x895, uint64(p521Uint1(x913)))
+ var x916 uint64
+ var x917 uint64
+ x916, x917 = bits.Add64(x862, x897, uint64(p521Uint1(x915)))
+ var x918 uint64
+ var x919 uint64
+ x918, x919 = bits.Add64(x864, x899, uint64(p521Uint1(x917)))
+ var x920 uint64
+ var x921 uint64
+ x920, x921 = bits.Add64(x866, x901, uint64(p521Uint1(x919)))
+ var x922 uint64
+ var x923 uint64
+ x922, x923 = bits.Add64(x868, x903, uint64(p521Uint1(x921)))
+ var x924 uint64
+ var x925 uint64
+ x924, x925 = bits.Add64(x870, x905, uint64(p521Uint1(x923)))
+ var x926 uint64
+ var x927 uint64
+ x926, x927 = bits.Add64(x872, x907, uint64(p521Uint1(x925)))
+ var x928 uint64
+ var x929 uint64
+ x928, x929 = bits.Add64(x874, x909, uint64(p521Uint1(x927)))
+ var x930 uint64
+ var x931 uint64
+ x930, x931 = bits.Add64(x876, x911, uint64(p521Uint1(x929)))
+ var x932 uint64
+ var x933 uint64
+ x933, x932 = bits.Mul64(x912, 0x1ff)
+ var x934 uint64
+ var x935 uint64
+ x935, x934 = bits.Mul64(x912, 0xffffffffffffffff)
+ var x936 uint64
+ var x937 uint64
+ x937, x936 = bits.Mul64(x912, 0xffffffffffffffff)
+ var x938 uint64
+ var x939 uint64
+ x939, x938 = bits.Mul64(x912, 0xffffffffffffffff)
+ var x940 uint64
+ var x941 uint64
+ x941, x940 = bits.Mul64(x912, 0xffffffffffffffff)
+ var x942 uint64
+ var x943 uint64
+ x943, x942 = bits.Mul64(x912, 0xffffffffffffffff)
+ var x944 uint64
+ var x945 uint64
+ x945, x944 = bits.Mul64(x912, 0xffffffffffffffff)
+ var x946 uint64
+ var x947 uint64
+ x947, x946 = bits.Mul64(x912, 0xffffffffffffffff)
+ var x948 uint64
+ var x949 uint64
+ x949, x948 = bits.Mul64(x912, 0xffffffffffffffff)
+ var x950 uint64
+ var x951 uint64
+ x950, x951 = bits.Add64(x949, x946, uint64(0x0))
+ var x952 uint64
+ var x953 uint64
+ x952, x953 = bits.Add64(x947, x944, uint64(p521Uint1(x951)))
+ var x954 uint64
+ var x955 uint64
+ x954, x955 = bits.Add64(x945, x942, uint64(p521Uint1(x953)))
+ var x956 uint64
+ var x957 uint64
+ x956, x957 = bits.Add64(x943, x940, uint64(p521Uint1(x955)))
+ var x958 uint64
+ var x959 uint64
+ x958, x959 = bits.Add64(x941, x938, uint64(p521Uint1(x957)))
+ var x960 uint64
+ var x961 uint64
+ x960, x961 = bits.Add64(x939, x936, uint64(p521Uint1(x959)))
+ var x962 uint64
+ var x963 uint64
+ x962, x963 = bits.Add64(x937, x934, uint64(p521Uint1(x961)))
+ var x964 uint64
+ var x965 uint64
+ x964, x965 = bits.Add64(x935, x932, uint64(p521Uint1(x963)))
+ x966 := (uint64(p521Uint1(x965)) + x933)
+ var x968 uint64
+ _, x968 = bits.Add64(x912, x948, uint64(0x0))
+ var x969 uint64
+ var x970 uint64
+ x969, x970 = bits.Add64(x914, x950, uint64(p521Uint1(x968)))
+ var x971 uint64
+ var x972 uint64
+ x971, x972 = bits.Add64(x916, x952, uint64(p521Uint1(x970)))
+ var x973 uint64
+ var x974 uint64
+ x973, x974 = bits.Add64(x918, x954, uint64(p521Uint1(x972)))
+ var x975 uint64
+ var x976 uint64
+ x975, x976 = bits.Add64(x920, x956, uint64(p521Uint1(x974)))
+ var x977 uint64
+ var x978 uint64
+ x977, x978 = bits.Add64(x922, x958, uint64(p521Uint1(x976)))
+ var x979 uint64
+ var x980 uint64
+ x979, x980 = bits.Add64(x924, x960, uint64(p521Uint1(x978)))
+ var x981 uint64
+ var x982 uint64
+ x981, x982 = bits.Add64(x926, x962, uint64(p521Uint1(x980)))
+ var x983 uint64
+ var x984 uint64
+ x983, x984 = bits.Add64(x928, x964, uint64(p521Uint1(x982)))
+ var x985 uint64
+ var x986 uint64
+ x985, x986 = bits.Add64(x930, x966, uint64(p521Uint1(x984)))
+ x987 := (uint64(p521Uint1(x986)) + uint64(p521Uint1(x931)))
+ var x988 uint64
+ var x989 uint64
+ x988, x989 = bits.Sub64(x969, 0xffffffffffffffff, uint64(0x0))
+ var x990 uint64
+ var x991 uint64
+ x990, x991 = bits.Sub64(x971, 0xffffffffffffffff, uint64(p521Uint1(x989)))
+ var x992 uint64
+ var x993 uint64
+ x992, x993 = bits.Sub64(x973, 0xffffffffffffffff, uint64(p521Uint1(x991)))
+ var x994 uint64
+ var x995 uint64
+ x994, x995 = bits.Sub64(x975, 0xffffffffffffffff, uint64(p521Uint1(x993)))
+ var x996 uint64
+ var x997 uint64
+ x996, x997 = bits.Sub64(x977, 0xffffffffffffffff, uint64(p521Uint1(x995)))
+ var x998 uint64
+ var x999 uint64
+ x998, x999 = bits.Sub64(x979, 0xffffffffffffffff, uint64(p521Uint1(x997)))
+ var x1000 uint64
+ var x1001 uint64
+ x1000, x1001 = bits.Sub64(x981, 0xffffffffffffffff, uint64(p521Uint1(x999)))
+ var x1002 uint64
+ var x1003 uint64
+ x1002, x1003 = bits.Sub64(x983, 0xffffffffffffffff, uint64(p521Uint1(x1001)))
+ var x1004 uint64
+ var x1005 uint64
+ x1004, x1005 = bits.Sub64(x985, 0x1ff, uint64(p521Uint1(x1003)))
+ var x1007 uint64
+ _, x1007 = bits.Sub64(x987, uint64(0x0), uint64(p521Uint1(x1005)))
+ var x1008 uint64
+ p521CmovznzU64(&x1008, p521Uint1(x1007), x988, x969)
+ var x1009 uint64
+ p521CmovznzU64(&x1009, p521Uint1(x1007), x990, x971)
+ var x1010 uint64
+ p521CmovznzU64(&x1010, p521Uint1(x1007), x992, x973)
+ var x1011 uint64
+ p521CmovznzU64(&x1011, p521Uint1(x1007), x994, x975)
+ var x1012 uint64
+ p521CmovznzU64(&x1012, p521Uint1(x1007), x996, x977)
+ var x1013 uint64
+ p521CmovznzU64(&x1013, p521Uint1(x1007), x998, x979)
+ var x1014 uint64
+ p521CmovznzU64(&x1014, p521Uint1(x1007), x1000, x981)
+ var x1015 uint64
+ p521CmovznzU64(&x1015, p521Uint1(x1007), x1002, x983)
+ var x1016 uint64
+ p521CmovznzU64(&x1016, p521Uint1(x1007), x1004, x985)
+ out1[0] = x1008
+ out1[1] = x1009
+ out1[2] = x1010
+ out1[3] = x1011
+ out1[4] = x1012
+ out1[5] = x1013
+ out1[6] = x1014
+ out1[7] = x1015
+ out1[8] = x1016
+}
+
+// p521Square squares a field element in the Montgomery domain.
+//
+// Preconditions:
+// 0 ≤ eval arg1 < m
+// Postconditions:
+// eval (from_montgomery out1) mod m = (eval (from_montgomery arg1) * eval (from_montgomery arg1)) mod m
+// 0 ≤ eval out1 < m
+//
+func p521Square(out1 *p521MontgomeryDomainFieldElement, arg1 *p521MontgomeryDomainFieldElement) {
+ x1 := arg1[1]
+ x2 := arg1[2]
+ x3 := arg1[3]
+ x4 := arg1[4]
+ x5 := arg1[5]
+ x6 := arg1[6]
+ x7 := arg1[7]
+ x8 := arg1[8]
+ x9 := arg1[0]
+ var x10 uint64
+ var x11 uint64
+ x11, x10 = bits.Mul64(x9, arg1[8])
+ var x12 uint64
+ var x13 uint64
+ x13, x12 = bits.Mul64(x9, arg1[7])
+ var x14 uint64
+ var x15 uint64
+ x15, x14 = bits.Mul64(x9, arg1[6])
+ var x16 uint64
+ var x17 uint64
+ x17, x16 = bits.Mul64(x9, arg1[5])
+ var x18 uint64
+ var x19 uint64
+ x19, x18 = bits.Mul64(x9, arg1[4])
+ var x20 uint64
+ var x21 uint64
+ x21, x20 = bits.Mul64(x9, arg1[3])
+ var x22 uint64
+ var x23 uint64
+ x23, x22 = bits.Mul64(x9, arg1[2])
+ var x24 uint64
+ var x25 uint64
+ x25, x24 = bits.Mul64(x9, arg1[1])
+ var x26 uint64
+ var x27 uint64
+ x27, x26 = bits.Mul64(x9, arg1[0])
+ var x28 uint64
+ var x29 uint64
+ x28, x29 = bits.Add64(x27, x24, uint64(0x0))
+ var x30 uint64
+ var x31 uint64
+ x30, x31 = bits.Add64(x25, x22, uint64(p521Uint1(x29)))
+ var x32 uint64
+ var x33 uint64
+ x32, x33 = bits.Add64(x23, x20, uint64(p521Uint1(x31)))
+ var x34 uint64
+ var x35 uint64
+ x34, x35 = bits.Add64(x21, x18, uint64(p521Uint1(x33)))
+ var x36 uint64
+ var x37 uint64
+ x36, x37 = bits.Add64(x19, x16, uint64(p521Uint1(x35)))
+ var x38 uint64
+ var x39 uint64
+ x38, x39 = bits.Add64(x17, x14, uint64(p521Uint1(x37)))
+ var x40 uint64
+ var x41 uint64
+ x40, x41 = bits.Add64(x15, x12, uint64(p521Uint1(x39)))
+ var x42 uint64
+ var x43 uint64
+ x42, x43 = bits.Add64(x13, x10, uint64(p521Uint1(x41)))
+ x44 := (uint64(p521Uint1(x43)) + x11)
+ var x45 uint64
+ var x46 uint64
+ x46, x45 = bits.Mul64(x26, 0x1ff)
+ var x47 uint64
+ var x48 uint64
+ x48, x47 = bits.Mul64(x26, 0xffffffffffffffff)
+ var x49 uint64
+ var x50 uint64
+ x50, x49 = bits.Mul64(x26, 0xffffffffffffffff)
+ var x51 uint64
+ var x52 uint64
+ x52, x51 = bits.Mul64(x26, 0xffffffffffffffff)
+ var x53 uint64
+ var x54 uint64
+ x54, x53 = bits.Mul64(x26, 0xffffffffffffffff)
+ var x55 uint64
+ var x56 uint64
+ x56, x55 = bits.Mul64(x26, 0xffffffffffffffff)
+ var x57 uint64
+ var x58 uint64
+ x58, x57 = bits.Mul64(x26, 0xffffffffffffffff)
+ var x59 uint64
+ var x60 uint64
+ x60, x59 = bits.Mul64(x26, 0xffffffffffffffff)
+ var x61 uint64
+ var x62 uint64
+ x62, x61 = bits.Mul64(x26, 0xffffffffffffffff)
+ var x63 uint64
+ var x64 uint64
+ x63, x64 = bits.Add64(x62, x59, uint64(0x0))
+ var x65 uint64
+ var x66 uint64
+ x65, x66 = bits.Add64(x60, x57, uint64(p521Uint1(x64)))
+ var x67 uint64
+ var x68 uint64
+ x67, x68 = bits.Add64(x58, x55, uint64(p521Uint1(x66)))
+ var x69 uint64
+ var x70 uint64
+ x69, x70 = bits.Add64(x56, x53, uint64(p521Uint1(x68)))
+ var x71 uint64
+ var x72 uint64
+ x71, x72 = bits.Add64(x54, x51, uint64(p521Uint1(x70)))
+ var x73 uint64
+ var x74 uint64
+ x73, x74 = bits.Add64(x52, x49, uint64(p521Uint1(x72)))
+ var x75 uint64
+ var x76 uint64
+ x75, x76 = bits.Add64(x50, x47, uint64(p521Uint1(x74)))
+ var x77 uint64
+ var x78 uint64
+ x77, x78 = bits.Add64(x48, x45, uint64(p521Uint1(x76)))
+ x79 := (uint64(p521Uint1(x78)) + x46)
+ var x81 uint64
+ _, x81 = bits.Add64(x26, x61, uint64(0x0))
+ var x82 uint64
+ var x83 uint64
+ x82, x83 = bits.Add64(x28, x63, uint64(p521Uint1(x81)))
+ var x84 uint64
+ var x85 uint64
+ x84, x85 = bits.Add64(x30, x65, uint64(p521Uint1(x83)))
+ var x86 uint64
+ var x87 uint64
+ x86, x87 = bits.Add64(x32, x67, uint64(p521Uint1(x85)))
+ var x88 uint64
+ var x89 uint64
+ x88, x89 = bits.Add64(x34, x69, uint64(p521Uint1(x87)))
+ var x90 uint64
+ var x91 uint64
+ x90, x91 = bits.Add64(x36, x71, uint64(p521Uint1(x89)))
+ var x92 uint64
+ var x93 uint64
+ x92, x93 = bits.Add64(x38, x73, uint64(p521Uint1(x91)))
+ var x94 uint64
+ var x95 uint64
+ x94, x95 = bits.Add64(x40, x75, uint64(p521Uint1(x93)))
+ var x96 uint64
+ var x97 uint64
+ x96, x97 = bits.Add64(x42, x77, uint64(p521Uint1(x95)))
+ var x98 uint64
+ var x99 uint64
+ x98, x99 = bits.Add64(x44, x79, uint64(p521Uint1(x97)))
+ var x100 uint64
+ var x101 uint64
+ x101, x100 = bits.Mul64(x1, arg1[8])
+ var x102 uint64
+ var x103 uint64
+ x103, x102 = bits.Mul64(x1, arg1[7])
+ var x104 uint64
+ var x105 uint64
+ x105, x104 = bits.Mul64(x1, arg1[6])
+ var x106 uint64
+ var x107 uint64
+ x107, x106 = bits.Mul64(x1, arg1[5])
+ var x108 uint64
+ var x109 uint64
+ x109, x108 = bits.Mul64(x1, arg1[4])
+ var x110 uint64
+ var x111 uint64
+ x111, x110 = bits.Mul64(x1, arg1[3])
+ var x112 uint64
+ var x113 uint64
+ x113, x112 = bits.Mul64(x1, arg1[2])
+ var x114 uint64
+ var x115 uint64
+ x115, x114 = bits.Mul64(x1, arg1[1])
+ var x116 uint64
+ var x117 uint64
+ x117, x116 = bits.Mul64(x1, arg1[0])
+ var x118 uint64
+ var x119 uint64
+ x118, x119 = bits.Add64(x117, x114, uint64(0x0))
+ var x120 uint64
+ var x121 uint64
+ x120, x121 = bits.Add64(x115, x112, uint64(p521Uint1(x119)))
+ var x122 uint64
+ var x123 uint64
+ x122, x123 = bits.Add64(x113, x110, uint64(p521Uint1(x121)))
+ var x124 uint64
+ var x125 uint64
+ x124, x125 = bits.Add64(x111, x108, uint64(p521Uint1(x123)))
+ var x126 uint64
+ var x127 uint64
+ x126, x127 = bits.Add64(x109, x106, uint64(p521Uint1(x125)))
+ var x128 uint64
+ var x129 uint64
+ x128, x129 = bits.Add64(x107, x104, uint64(p521Uint1(x127)))
+ var x130 uint64
+ var x131 uint64
+ x130, x131 = bits.Add64(x105, x102, uint64(p521Uint1(x129)))
+ var x132 uint64
+ var x133 uint64
+ x132, x133 = bits.Add64(x103, x100, uint64(p521Uint1(x131)))
+ x134 := (uint64(p521Uint1(x133)) + x101)
+ var x135 uint64
+ var x136 uint64
+ x135, x136 = bits.Add64(x82, x116, uint64(0x0))
+ var x137 uint64
+ var x138 uint64
+ x137, x138 = bits.Add64(x84, x118, uint64(p521Uint1(x136)))
+ var x139 uint64
+ var x140 uint64
+ x139, x140 = bits.Add64(x86, x120, uint64(p521Uint1(x138)))
+ var x141 uint64
+ var x142 uint64
+ x141, x142 = bits.Add64(x88, x122, uint64(p521Uint1(x140)))
+ var x143 uint64
+ var x144 uint64
+ x143, x144 = bits.Add64(x90, x124, uint64(p521Uint1(x142)))
+ var x145 uint64
+ var x146 uint64
+ x145, x146 = bits.Add64(x92, x126, uint64(p521Uint1(x144)))
+ var x147 uint64
+ var x148 uint64
+ x147, x148 = bits.Add64(x94, x128, uint64(p521Uint1(x146)))
+ var x149 uint64
+ var x150 uint64
+ x149, x150 = bits.Add64(x96, x130, uint64(p521Uint1(x148)))
+ var x151 uint64
+ var x152 uint64
+ x151, x152 = bits.Add64(x98, x132, uint64(p521Uint1(x150)))
+ var x153 uint64
+ var x154 uint64
+ x153, x154 = bits.Add64(uint64(p521Uint1(x99)), x134, uint64(p521Uint1(x152)))
+ var x155 uint64
+ var x156 uint64
+ x156, x155 = bits.Mul64(x135, 0x1ff)
+ var x157 uint64
+ var x158 uint64
+ x158, x157 = bits.Mul64(x135, 0xffffffffffffffff)
+ var x159 uint64
+ var x160 uint64
+ x160, x159 = bits.Mul64(x135, 0xffffffffffffffff)
+ var x161 uint64
+ var x162 uint64
+ x162, x161 = bits.Mul64(x135, 0xffffffffffffffff)
+ var x163 uint64
+ var x164 uint64
+ x164, x163 = bits.Mul64(x135, 0xffffffffffffffff)
+ var x165 uint64
+ var x166 uint64
+ x166, x165 = bits.Mul64(x135, 0xffffffffffffffff)
+ var x167 uint64
+ var x168 uint64
+ x168, x167 = bits.Mul64(x135, 0xffffffffffffffff)
+ var x169 uint64
+ var x170 uint64
+ x170, x169 = bits.Mul64(x135, 0xffffffffffffffff)
+ var x171 uint64
+ var x172 uint64
+ x172, x171 = bits.Mul64(x135, 0xffffffffffffffff)
+ var x173 uint64
+ var x174 uint64
+ x173, x174 = bits.Add64(x172, x169, uint64(0x0))
+ var x175 uint64
+ var x176 uint64
+ x175, x176 = bits.Add64(x170, x167, uint64(p521Uint1(x174)))
+ var x177 uint64
+ var x178 uint64
+ x177, x178 = bits.Add64(x168, x165, uint64(p521Uint1(x176)))
+ var x179 uint64
+ var x180 uint64
+ x179, x180 = bits.Add64(x166, x163, uint64(p521Uint1(x178)))
+ var x181 uint64
+ var x182 uint64
+ x181, x182 = bits.Add64(x164, x161, uint64(p521Uint1(x180)))
+ var x183 uint64
+ var x184 uint64
+ x183, x184 = bits.Add64(x162, x159, uint64(p521Uint1(x182)))
+ var x185 uint64
+ var x186 uint64
+ x185, x186 = bits.Add64(x160, x157, uint64(p521Uint1(x184)))
+ var x187 uint64
+ var x188 uint64
+ x187, x188 = bits.Add64(x158, x155, uint64(p521Uint1(x186)))
+ x189 := (uint64(p521Uint1(x188)) + x156)
+ var x191 uint64
+ _, x191 = bits.Add64(x135, x171, uint64(0x0))
+ var x192 uint64
+ var x193 uint64
+ x192, x193 = bits.Add64(x137, x173, uint64(p521Uint1(x191)))
+ var x194 uint64
+ var x195 uint64
+ x194, x195 = bits.Add64(x139, x175, uint64(p521Uint1(x193)))
+ var x196 uint64
+ var x197 uint64
+ x196, x197 = bits.Add64(x141, x177, uint64(p521Uint1(x195)))
+ var x198 uint64
+ var x199 uint64
+ x198, x199 = bits.Add64(x143, x179, uint64(p521Uint1(x197)))
+ var x200 uint64
+ var x201 uint64
+ x200, x201 = bits.Add64(x145, x181, uint64(p521Uint1(x199)))
+ var x202 uint64
+ var x203 uint64
+ x202, x203 = bits.Add64(x147, x183, uint64(p521Uint1(x201)))
+ var x204 uint64
+ var x205 uint64
+ x204, x205 = bits.Add64(x149, x185, uint64(p521Uint1(x203)))
+ var x206 uint64
+ var x207 uint64
+ x206, x207 = bits.Add64(x151, x187, uint64(p521Uint1(x205)))
+ var x208 uint64
+ var x209 uint64
+ x208, x209 = bits.Add64(x153, x189, uint64(p521Uint1(x207)))
+ x210 := (uint64(p521Uint1(x209)) + uint64(p521Uint1(x154)))
+ var x211 uint64
+ var x212 uint64
+ x212, x211 = bits.Mul64(x2, arg1[8])
+ var x213 uint64
+ var x214 uint64
+ x214, x213 = bits.Mul64(x2, arg1[7])
+ var x215 uint64
+ var x216 uint64
+ x216, x215 = bits.Mul64(x2, arg1[6])
+ var x217 uint64
+ var x218 uint64
+ x218, x217 = bits.Mul64(x2, arg1[5])
+ var x219 uint64
+ var x220 uint64
+ x220, x219 = bits.Mul64(x2, arg1[4])
+ var x221 uint64
+ var x222 uint64
+ x222, x221 = bits.Mul64(x2, arg1[3])
+ var x223 uint64
+ var x224 uint64
+ x224, x223 = bits.Mul64(x2, arg1[2])
+ var x225 uint64
+ var x226 uint64
+ x226, x225 = bits.Mul64(x2, arg1[1])
+ var x227 uint64
+ var x228 uint64
+ x228, x227 = bits.Mul64(x2, arg1[0])
+ var x229 uint64
+ var x230 uint64
+ x229, x230 = bits.Add64(x228, x225, uint64(0x0))
+ var x231 uint64
+ var x232 uint64
+ x231, x232 = bits.Add64(x226, x223, uint64(p521Uint1(x230)))
+ var x233 uint64
+ var x234 uint64
+ x233, x234 = bits.Add64(x224, x221, uint64(p521Uint1(x232)))
+ var x235 uint64
+ var x236 uint64
+ x235, x236 = bits.Add64(x222, x219, uint64(p521Uint1(x234)))
+ var x237 uint64
+ var x238 uint64
+ x237, x238 = bits.Add64(x220, x217, uint64(p521Uint1(x236)))
+ var x239 uint64
+ var x240 uint64
+ x239, x240 = bits.Add64(x218, x215, uint64(p521Uint1(x238)))
+ var x241 uint64
+ var x242 uint64
+ x241, x242 = bits.Add64(x216, x213, uint64(p521Uint1(x240)))
+ var x243 uint64
+ var x244 uint64
+ x243, x244 = bits.Add64(x214, x211, uint64(p521Uint1(x242)))
+ x245 := (uint64(p521Uint1(x244)) + x212)
+ var x246 uint64
+ var x247 uint64
+ x246, x247 = bits.Add64(x192, x227, uint64(0x0))
+ var x248 uint64
+ var x249 uint64
+ x248, x249 = bits.Add64(x194, x229, uint64(p521Uint1(x247)))
+ var x250 uint64
+ var x251 uint64
+ x250, x251 = bits.Add64(x196, x231, uint64(p521Uint1(x249)))
+ var x252 uint64
+ var x253 uint64
+ x252, x253 = bits.Add64(x198, x233, uint64(p521Uint1(x251)))
+ var x254 uint64
+ var x255 uint64
+ x254, x255 = bits.Add64(x200, x235, uint64(p521Uint1(x253)))
+ var x256 uint64
+ var x257 uint64
+ x256, x257 = bits.Add64(x202, x237, uint64(p521Uint1(x255)))
+ var x258 uint64
+ var x259 uint64
+ x258, x259 = bits.Add64(x204, x239, uint64(p521Uint1(x257)))
+ var x260 uint64
+ var x261 uint64
+ x260, x261 = bits.Add64(x206, x241, uint64(p521Uint1(x259)))
+ var x262 uint64
+ var x263 uint64
+ x262, x263 = bits.Add64(x208, x243, uint64(p521Uint1(x261)))
+ var x264 uint64
+ var x265 uint64
+ x264, x265 = bits.Add64(x210, x245, uint64(p521Uint1(x263)))
+ var x266 uint64
+ var x267 uint64
+ x267, x266 = bits.Mul64(x246, 0x1ff)
+ var x268 uint64
+ var x269 uint64
+ x269, x268 = bits.Mul64(x246, 0xffffffffffffffff)
+ var x270 uint64
+ var x271 uint64
+ x271, x270 = bits.Mul64(x246, 0xffffffffffffffff)
+ var x272 uint64
+ var x273 uint64
+ x273, x272 = bits.Mul64(x246, 0xffffffffffffffff)
+ var x274 uint64
+ var x275 uint64
+ x275, x274 = bits.Mul64(x246, 0xffffffffffffffff)
+ var x276 uint64
+ var x277 uint64
+ x277, x276 = bits.Mul64(x246, 0xffffffffffffffff)
+ var x278 uint64
+ var x279 uint64
+ x279, x278 = bits.Mul64(x246, 0xffffffffffffffff)
+ var x280 uint64
+ var x281 uint64
+ x281, x280 = bits.Mul64(x246, 0xffffffffffffffff)
+ var x282 uint64
+ var x283 uint64
+ x283, x282 = bits.Mul64(x246, 0xffffffffffffffff)
+ var x284 uint64
+ var x285 uint64
+ x284, x285 = bits.Add64(x283, x280, uint64(0x0))
+ var x286 uint64
+ var x287 uint64
+ x286, x287 = bits.Add64(x281, x278, uint64(p521Uint1(x285)))
+ var x288 uint64
+ var x289 uint64
+ x288, x289 = bits.Add64(x279, x276, uint64(p521Uint1(x287)))
+ var x290 uint64
+ var x291 uint64
+ x290, x291 = bits.Add64(x277, x274, uint64(p521Uint1(x289)))
+ var x292 uint64
+ var x293 uint64
+ x292, x293 = bits.Add64(x275, x272, uint64(p521Uint1(x291)))
+ var x294 uint64
+ var x295 uint64
+ x294, x295 = bits.Add64(x273, x270, uint64(p521Uint1(x293)))
+ var x296 uint64
+ var x297 uint64
+ x296, x297 = bits.Add64(x271, x268, uint64(p521Uint1(x295)))
+ var x298 uint64
+ var x299 uint64
+ x298, x299 = bits.Add64(x269, x266, uint64(p521Uint1(x297)))
+ x300 := (uint64(p521Uint1(x299)) + x267)
+ var x302 uint64
+ _, x302 = bits.Add64(x246, x282, uint64(0x0))
+ var x303 uint64
+ var x304 uint64
+ x303, x304 = bits.Add64(x248, x284, uint64(p521Uint1(x302)))
+ var x305 uint64
+ var x306 uint64
+ x305, x306 = bits.Add64(x250, x286, uint64(p521Uint1(x304)))
+ var x307 uint64
+ var x308 uint64
+ x307, x308 = bits.Add64(x252, x288, uint64(p521Uint1(x306)))
+ var x309 uint64
+ var x310 uint64
+ x309, x310 = bits.Add64(x254, x290, uint64(p521Uint1(x308)))
+ var x311 uint64
+ var x312 uint64
+ x311, x312 = bits.Add64(x256, x292, uint64(p521Uint1(x310)))
+ var x313 uint64
+ var x314 uint64
+ x313, x314 = bits.Add64(x258, x294, uint64(p521Uint1(x312)))
+ var x315 uint64
+ var x316 uint64
+ x315, x316 = bits.Add64(x260, x296, uint64(p521Uint1(x314)))
+ var x317 uint64
+ var x318 uint64
+ x317, x318 = bits.Add64(x262, x298, uint64(p521Uint1(x316)))
+ var x319 uint64
+ var x320 uint64
+ x319, x320 = bits.Add64(x264, x300, uint64(p521Uint1(x318)))
+ x321 := (uint64(p521Uint1(x320)) + uint64(p521Uint1(x265)))
+ var x322 uint64
+ var x323 uint64
+ x323, x322 = bits.Mul64(x3, arg1[8])
+ var x324 uint64
+ var x325 uint64
+ x325, x324 = bits.Mul64(x3, arg1[7])
+ var x326 uint64
+ var x327 uint64
+ x327, x326 = bits.Mul64(x3, arg1[6])
+ var x328 uint64
+ var x329 uint64
+ x329, x328 = bits.Mul64(x3, arg1[5])
+ var x330 uint64
+ var x331 uint64
+ x331, x330 = bits.Mul64(x3, arg1[4])
+ var x332 uint64
+ var x333 uint64
+ x333, x332 = bits.Mul64(x3, arg1[3])
+ var x334 uint64
+ var x335 uint64
+ x335, x334 = bits.Mul64(x3, arg1[2])
+ var x336 uint64
+ var x337 uint64
+ x337, x336 = bits.Mul64(x3, arg1[1])
+ var x338 uint64
+ var x339 uint64
+ x339, x338 = bits.Mul64(x3, arg1[0])
+ var x340 uint64
+ var x341 uint64
+ x340, x341 = bits.Add64(x339, x336, uint64(0x0))
+ var x342 uint64
+ var x343 uint64
+ x342, x343 = bits.Add64(x337, x334, uint64(p521Uint1(x341)))
+ var x344 uint64
+ var x345 uint64
+ x344, x345 = bits.Add64(x335, x332, uint64(p521Uint1(x343)))
+ var x346 uint64
+ var x347 uint64
+ x346, x347 = bits.Add64(x333, x330, uint64(p521Uint1(x345)))
+ var x348 uint64
+ var x349 uint64
+ x348, x349 = bits.Add64(x331, x328, uint64(p521Uint1(x347)))
+ var x350 uint64
+ var x351 uint64
+ x350, x351 = bits.Add64(x329, x326, uint64(p521Uint1(x349)))
+ var x352 uint64
+ var x353 uint64
+ x352, x353 = bits.Add64(x327, x324, uint64(p521Uint1(x351)))
+ var x354 uint64
+ var x355 uint64
+ x354, x355 = bits.Add64(x325, x322, uint64(p521Uint1(x353)))
+ x356 := (uint64(p521Uint1(x355)) + x323)
+ var x357 uint64
+ var x358 uint64
+ x357, x358 = bits.Add64(x303, x338, uint64(0x0))
+ var x359 uint64
+ var x360 uint64
+ x359, x360 = bits.Add64(x305, x340, uint64(p521Uint1(x358)))
+ var x361 uint64
+ var x362 uint64
+ x361, x362 = bits.Add64(x307, x342, uint64(p521Uint1(x360)))
+ var x363 uint64
+ var x364 uint64
+ x363, x364 = bits.Add64(x309, x344, uint64(p521Uint1(x362)))
+ var x365 uint64
+ var x366 uint64
+ x365, x366 = bits.Add64(x311, x346, uint64(p521Uint1(x364)))
+ var x367 uint64
+ var x368 uint64
+ x367, x368 = bits.Add64(x313, x348, uint64(p521Uint1(x366)))
+ var x369 uint64
+ var x370 uint64
+ x369, x370 = bits.Add64(x315, x350, uint64(p521Uint1(x368)))
+ var x371 uint64
+ var x372 uint64
+ x371, x372 = bits.Add64(x317, x352, uint64(p521Uint1(x370)))
+ var x373 uint64
+ var x374 uint64
+ x373, x374 = bits.Add64(x319, x354, uint64(p521Uint1(x372)))
+ var x375 uint64
+ var x376 uint64
+ x375, x376 = bits.Add64(x321, x356, uint64(p521Uint1(x374)))
+ var x377 uint64
+ var x378 uint64
+ x378, x377 = bits.Mul64(x357, 0x1ff)
+ var x379 uint64
+ var x380 uint64
+ x380, x379 = bits.Mul64(x357, 0xffffffffffffffff)
+ var x381 uint64
+ var x382 uint64
+ x382, x381 = bits.Mul64(x357, 0xffffffffffffffff)
+ var x383 uint64
+ var x384 uint64
+ x384, x383 = bits.Mul64(x357, 0xffffffffffffffff)
+ var x385 uint64
+ var x386 uint64
+ x386, x385 = bits.Mul64(x357, 0xffffffffffffffff)
+ var x387 uint64
+ var x388 uint64
+ x388, x387 = bits.Mul64(x357, 0xffffffffffffffff)
+ var x389 uint64
+ var x390 uint64
+ x390, x389 = bits.Mul64(x357, 0xffffffffffffffff)
+ var x391 uint64
+ var x392 uint64
+ x392, x391 = bits.Mul64(x357, 0xffffffffffffffff)
+ var x393 uint64
+ var x394 uint64
+ x394, x393 = bits.Mul64(x357, 0xffffffffffffffff)
+ var x395 uint64
+ var x396 uint64
+ x395, x396 = bits.Add64(x394, x391, uint64(0x0))
+ var x397 uint64
+ var x398 uint64
+ x397, x398 = bits.Add64(x392, x389, uint64(p521Uint1(x396)))
+ var x399 uint64
+ var x400 uint64
+ x399, x400 = bits.Add64(x390, x387, uint64(p521Uint1(x398)))
+ var x401 uint64
+ var x402 uint64
+ x401, x402 = bits.Add64(x388, x385, uint64(p521Uint1(x400)))
+ var x403 uint64
+ var x404 uint64
+ x403, x404 = bits.Add64(x386, x383, uint64(p521Uint1(x402)))
+ var x405 uint64
+ var x406 uint64
+ x405, x406 = bits.Add64(x384, x381, uint64(p521Uint1(x404)))
+ var x407 uint64
+ var x408 uint64
+ x407, x408 = bits.Add64(x382, x379, uint64(p521Uint1(x406)))
+ var x409 uint64
+ var x410 uint64
+ x409, x410 = bits.Add64(x380, x377, uint64(p521Uint1(x408)))
+ x411 := (uint64(p521Uint1(x410)) + x378)
+ var x413 uint64
+ _, x413 = bits.Add64(x357, x393, uint64(0x0))
+ var x414 uint64
+ var x415 uint64
+ x414, x415 = bits.Add64(x359, x395, uint64(p521Uint1(x413)))
+ var x416 uint64
+ var x417 uint64
+ x416, x417 = bits.Add64(x361, x397, uint64(p521Uint1(x415)))
+ var x418 uint64
+ var x419 uint64
+ x418, x419 = bits.Add64(x363, x399, uint64(p521Uint1(x417)))
+ var x420 uint64
+ var x421 uint64
+ x420, x421 = bits.Add64(x365, x401, uint64(p521Uint1(x419)))
+ var x422 uint64
+ var x423 uint64
+ x422, x423 = bits.Add64(x367, x403, uint64(p521Uint1(x421)))
+ var x424 uint64
+ var x425 uint64
+ x424, x425 = bits.Add64(x369, x405, uint64(p521Uint1(x423)))
+ var x426 uint64
+ var x427 uint64
+ x426, x427 = bits.Add64(x371, x407, uint64(p521Uint1(x425)))
+ var x428 uint64
+ var x429 uint64
+ x428, x429 = bits.Add64(x373, x409, uint64(p521Uint1(x427)))
+ var x430 uint64
+ var x431 uint64
+ x430, x431 = bits.Add64(x375, x411, uint64(p521Uint1(x429)))
+ x432 := (uint64(p521Uint1(x431)) + uint64(p521Uint1(x376)))
+ var x433 uint64
+ var x434 uint64
+ x434, x433 = bits.Mul64(x4, arg1[8])
+ var x435 uint64
+ var x436 uint64
+ x436, x435 = bits.Mul64(x4, arg1[7])
+ var x437 uint64
+ var x438 uint64
+ x438, x437 = bits.Mul64(x4, arg1[6])
+ var x439 uint64
+ var x440 uint64
+ x440, x439 = bits.Mul64(x4, arg1[5])
+ var x441 uint64
+ var x442 uint64
+ x442, x441 = bits.Mul64(x4, arg1[4])
+ var x443 uint64
+ var x444 uint64
+ x444, x443 = bits.Mul64(x4, arg1[3])
+ var x445 uint64
+ var x446 uint64
+ x446, x445 = bits.Mul64(x4, arg1[2])
+ var x447 uint64
+ var x448 uint64
+ x448, x447 = bits.Mul64(x4, arg1[1])
+ var x449 uint64
+ var x450 uint64
+ x450, x449 = bits.Mul64(x4, arg1[0])
+ var x451 uint64
+ var x452 uint64
+ x451, x452 = bits.Add64(x450, x447, uint64(0x0))
+ var x453 uint64
+ var x454 uint64
+ x453, x454 = bits.Add64(x448, x445, uint64(p521Uint1(x452)))
+ var x455 uint64
+ var x456 uint64
+ x455, x456 = bits.Add64(x446, x443, uint64(p521Uint1(x454)))
+ var x457 uint64
+ var x458 uint64
+ x457, x458 = bits.Add64(x444, x441, uint64(p521Uint1(x456)))
+ var x459 uint64
+ var x460 uint64
+ x459, x460 = bits.Add64(x442, x439, uint64(p521Uint1(x458)))
+ var x461 uint64
+ var x462 uint64
+ x461, x462 = bits.Add64(x440, x437, uint64(p521Uint1(x460)))
+ var x463 uint64
+ var x464 uint64
+ x463, x464 = bits.Add64(x438, x435, uint64(p521Uint1(x462)))
+ var x465 uint64
+ var x466 uint64
+ x465, x466 = bits.Add64(x436, x433, uint64(p521Uint1(x464)))
+ x467 := (uint64(p521Uint1(x466)) + x434)
+ var x468 uint64
+ var x469 uint64
+ x468, x469 = bits.Add64(x414, x449, uint64(0x0))
+ var x470 uint64
+ var x471 uint64
+ x470, x471 = bits.Add64(x416, x451, uint64(p521Uint1(x469)))
+ var x472 uint64
+ var x473 uint64
+ x472, x473 = bits.Add64(x418, x453, uint64(p521Uint1(x471)))
+ var x474 uint64
+ var x475 uint64
+ x474, x475 = bits.Add64(x420, x455, uint64(p521Uint1(x473)))
+ var x476 uint64
+ var x477 uint64
+ x476, x477 = bits.Add64(x422, x457, uint64(p521Uint1(x475)))
+ var x478 uint64
+ var x479 uint64
+ x478, x479 = bits.Add64(x424, x459, uint64(p521Uint1(x477)))
+ var x480 uint64
+ var x481 uint64
+ x480, x481 = bits.Add64(x426, x461, uint64(p521Uint1(x479)))
+ var x482 uint64
+ var x483 uint64
+ x482, x483 = bits.Add64(x428, x463, uint64(p521Uint1(x481)))
+ var x484 uint64
+ var x485 uint64
+ x484, x485 = bits.Add64(x430, x465, uint64(p521Uint1(x483)))
+ var x486 uint64
+ var x487 uint64
+ x486, x487 = bits.Add64(x432, x467, uint64(p521Uint1(x485)))
+ var x488 uint64
+ var x489 uint64
+ x489, x488 = bits.Mul64(x468, 0x1ff)
+ var x490 uint64
+ var x491 uint64
+ x491, x490 = bits.Mul64(x468, 0xffffffffffffffff)
+ var x492 uint64
+ var x493 uint64
+ x493, x492 = bits.Mul64(x468, 0xffffffffffffffff)
+ var x494 uint64
+ var x495 uint64
+ x495, x494 = bits.Mul64(x468, 0xffffffffffffffff)
+ var x496 uint64
+ var x497 uint64
+ x497, x496 = bits.Mul64(x468, 0xffffffffffffffff)
+ var x498 uint64
+ var x499 uint64
+ x499, x498 = bits.Mul64(x468, 0xffffffffffffffff)
+ var x500 uint64
+ var x501 uint64
+ x501, x500 = bits.Mul64(x468, 0xffffffffffffffff)
+ var x502 uint64
+ var x503 uint64
+ x503, x502 = bits.Mul64(x468, 0xffffffffffffffff)
+ var x504 uint64
+ var x505 uint64
+ x505, x504 = bits.Mul64(x468, 0xffffffffffffffff)
+ var x506 uint64
+ var x507 uint64
+ x506, x507 = bits.Add64(x505, x502, uint64(0x0))
+ var x508 uint64
+ var x509 uint64
+ x508, x509 = bits.Add64(x503, x500, uint64(p521Uint1(x507)))
+ var x510 uint64
+ var x511 uint64
+ x510, x511 = bits.Add64(x501, x498, uint64(p521Uint1(x509)))
+ var x512 uint64
+ var x513 uint64
+ x512, x513 = bits.Add64(x499, x496, uint64(p521Uint1(x511)))
+ var x514 uint64
+ var x515 uint64
+ x514, x515 = bits.Add64(x497, x494, uint64(p521Uint1(x513)))
+ var x516 uint64
+ var x517 uint64
+ x516, x517 = bits.Add64(x495, x492, uint64(p521Uint1(x515)))
+ var x518 uint64
+ var x519 uint64
+ x518, x519 = bits.Add64(x493, x490, uint64(p521Uint1(x517)))
+ var x520 uint64
+ var x521 uint64
+ x520, x521 = bits.Add64(x491, x488, uint64(p521Uint1(x519)))
+ x522 := (uint64(p521Uint1(x521)) + x489)
+ var x524 uint64
+ _, x524 = bits.Add64(x468, x504, uint64(0x0))
+ var x525 uint64
+ var x526 uint64
+ x525, x526 = bits.Add64(x470, x506, uint64(p521Uint1(x524)))
+ var x527 uint64
+ var x528 uint64
+ x527, x528 = bits.Add64(x472, x508, uint64(p521Uint1(x526)))
+ var x529 uint64
+ var x530 uint64
+ x529, x530 = bits.Add64(x474, x510, uint64(p521Uint1(x528)))
+ var x531 uint64
+ var x532 uint64
+ x531, x532 = bits.Add64(x476, x512, uint64(p521Uint1(x530)))
+ var x533 uint64
+ var x534 uint64
+ x533, x534 = bits.Add64(x478, x514, uint64(p521Uint1(x532)))
+ var x535 uint64
+ var x536 uint64
+ x535, x536 = bits.Add64(x480, x516, uint64(p521Uint1(x534)))
+ var x537 uint64
+ var x538 uint64
+ x537, x538 = bits.Add64(x482, x518, uint64(p521Uint1(x536)))
+ var x539 uint64
+ var x540 uint64
+ x539, x540 = bits.Add64(x484, x520, uint64(p521Uint1(x538)))
+ var x541 uint64
+ var x542 uint64
+ x541, x542 = bits.Add64(x486, x522, uint64(p521Uint1(x540)))
+ x543 := (uint64(p521Uint1(x542)) + uint64(p521Uint1(x487)))
+ var x544 uint64
+ var x545 uint64
+ x545, x544 = bits.Mul64(x5, arg1[8])
+ var x546 uint64
+ var x547 uint64
+ x547, x546 = bits.Mul64(x5, arg1[7])
+ var x548 uint64
+ var x549 uint64
+ x549, x548 = bits.Mul64(x5, arg1[6])
+ var x550 uint64
+ var x551 uint64
+ x551, x550 = bits.Mul64(x5, arg1[5])
+ var x552 uint64
+ var x553 uint64
+ x553, x552 = bits.Mul64(x5, arg1[4])
+ var x554 uint64
+ var x555 uint64
+ x555, x554 = bits.Mul64(x5, arg1[3])
+ var x556 uint64
+ var x557 uint64
+ x557, x556 = bits.Mul64(x5, arg1[2])
+ var x558 uint64
+ var x559 uint64
+ x559, x558 = bits.Mul64(x5, arg1[1])
+ var x560 uint64
+ var x561 uint64
+ x561, x560 = bits.Mul64(x5, arg1[0])
+ var x562 uint64
+ var x563 uint64
+ x562, x563 = bits.Add64(x561, x558, uint64(0x0))
+ var x564 uint64
+ var x565 uint64
+ x564, x565 = bits.Add64(x559, x556, uint64(p521Uint1(x563)))
+ var x566 uint64
+ var x567 uint64
+ x566, x567 = bits.Add64(x557, x554, uint64(p521Uint1(x565)))
+ var x568 uint64
+ var x569 uint64
+ x568, x569 = bits.Add64(x555, x552, uint64(p521Uint1(x567)))
+ var x570 uint64
+ var x571 uint64
+ x570, x571 = bits.Add64(x553, x550, uint64(p521Uint1(x569)))
+ var x572 uint64
+ var x573 uint64
+ x572, x573 = bits.Add64(x551, x548, uint64(p521Uint1(x571)))
+ var x574 uint64
+ var x575 uint64
+ x574, x575 = bits.Add64(x549, x546, uint64(p521Uint1(x573)))
+ var x576 uint64
+ var x577 uint64
+ x576, x577 = bits.Add64(x547, x544, uint64(p521Uint1(x575)))
+ x578 := (uint64(p521Uint1(x577)) + x545)
+ var x579 uint64
+ var x580 uint64
+ x579, x580 = bits.Add64(x525, x560, uint64(0x0))
+ var x581 uint64
+ var x582 uint64
+ x581, x582 = bits.Add64(x527, x562, uint64(p521Uint1(x580)))
+ var x583 uint64
+ var x584 uint64
+ x583, x584 = bits.Add64(x529, x564, uint64(p521Uint1(x582)))
+ var x585 uint64
+ var x586 uint64
+ x585, x586 = bits.Add64(x531, x566, uint64(p521Uint1(x584)))
+ var x587 uint64
+ var x588 uint64
+ x587, x588 = bits.Add64(x533, x568, uint64(p521Uint1(x586)))
+ var x589 uint64
+ var x590 uint64
+ x589, x590 = bits.Add64(x535, x570, uint64(p521Uint1(x588)))
+ var x591 uint64
+ var x592 uint64
+ x591, x592 = bits.Add64(x537, x572, uint64(p521Uint1(x590)))
+ var x593 uint64
+ var x594 uint64
+ x593, x594 = bits.Add64(x539, x574, uint64(p521Uint1(x592)))
+ var x595 uint64
+ var x596 uint64
+ x595, x596 = bits.Add64(x541, x576, uint64(p521Uint1(x594)))
+ var x597 uint64
+ var x598 uint64
+ x597, x598 = bits.Add64(x543, x578, uint64(p521Uint1(x596)))
+ var x599 uint64
+ var x600 uint64
+ x600, x599 = bits.Mul64(x579, 0x1ff)
+ var x601 uint64
+ var x602 uint64
+ x602, x601 = bits.Mul64(x579, 0xffffffffffffffff)
+ var x603 uint64
+ var x604 uint64
+ x604, x603 = bits.Mul64(x579, 0xffffffffffffffff)
+ var x605 uint64
+ var x606 uint64
+ x606, x605 = bits.Mul64(x579, 0xffffffffffffffff)
+ var x607 uint64
+ var x608 uint64
+ x608, x607 = bits.Mul64(x579, 0xffffffffffffffff)
+ var x609 uint64
+ var x610 uint64
+ x610, x609 = bits.Mul64(x579, 0xffffffffffffffff)
+ var x611 uint64
+ var x612 uint64
+ x612, x611 = bits.Mul64(x579, 0xffffffffffffffff)
+ var x613 uint64
+ var x614 uint64
+ x614, x613 = bits.Mul64(x579, 0xffffffffffffffff)
+ var x615 uint64
+ var x616 uint64
+ x616, x615 = bits.Mul64(x579, 0xffffffffffffffff)
+ var x617 uint64
+ var x618 uint64
+ x617, x618 = bits.Add64(x616, x613, uint64(0x0))
+ var x619 uint64
+ var x620 uint64
+ x619, x620 = bits.Add64(x614, x611, uint64(p521Uint1(x618)))
+ var x621 uint64
+ var x622 uint64
+ x621, x622 = bits.Add64(x612, x609, uint64(p521Uint1(x620)))
+ var x623 uint64
+ var x624 uint64
+ x623, x624 = bits.Add64(x610, x607, uint64(p521Uint1(x622)))
+ var x625 uint64
+ var x626 uint64
+ x625, x626 = bits.Add64(x608, x605, uint64(p521Uint1(x624)))
+ var x627 uint64
+ var x628 uint64
+ x627, x628 = bits.Add64(x606, x603, uint64(p521Uint1(x626)))
+ var x629 uint64
+ var x630 uint64
+ x629, x630 = bits.Add64(x604, x601, uint64(p521Uint1(x628)))
+ var x631 uint64
+ var x632 uint64
+ x631, x632 = bits.Add64(x602, x599, uint64(p521Uint1(x630)))
+ x633 := (uint64(p521Uint1(x632)) + x600)
+ var x635 uint64
+ _, x635 = bits.Add64(x579, x615, uint64(0x0))
+ var x636 uint64
+ var x637 uint64
+ x636, x637 = bits.Add64(x581, x617, uint64(p521Uint1(x635)))
+ var x638 uint64
+ var x639 uint64
+ x638, x639 = bits.Add64(x583, x619, uint64(p521Uint1(x637)))
+ var x640 uint64
+ var x641 uint64
+ x640, x641 = bits.Add64(x585, x621, uint64(p521Uint1(x639)))
+ var x642 uint64
+ var x643 uint64
+ x642, x643 = bits.Add64(x587, x623, uint64(p521Uint1(x641)))
+ var x644 uint64
+ var x645 uint64
+ x644, x645 = bits.Add64(x589, x625, uint64(p521Uint1(x643)))
+ var x646 uint64
+ var x647 uint64
+ x646, x647 = bits.Add64(x591, x627, uint64(p521Uint1(x645)))
+ var x648 uint64
+ var x649 uint64
+ x648, x649 = bits.Add64(x593, x629, uint64(p521Uint1(x647)))
+ var x650 uint64
+ var x651 uint64
+ x650, x651 = bits.Add64(x595, x631, uint64(p521Uint1(x649)))
+ var x652 uint64
+ var x653 uint64
+ x652, x653 = bits.Add64(x597, x633, uint64(p521Uint1(x651)))
+ x654 := (uint64(p521Uint1(x653)) + uint64(p521Uint1(x598)))
+ var x655 uint64
+ var x656 uint64
+ x656, x655 = bits.Mul64(x6, arg1[8])
+ var x657 uint64
+ var x658 uint64
+ x658, x657 = bits.Mul64(x6, arg1[7])
+ var x659 uint64
+ var x660 uint64
+ x660, x659 = bits.Mul64(x6, arg1[6])
+ var x661 uint64
+ var x662 uint64
+ x662, x661 = bits.Mul64(x6, arg1[5])
+ var x663 uint64
+ var x664 uint64
+ x664, x663 = bits.Mul64(x6, arg1[4])
+ var x665 uint64
+ var x666 uint64
+ x666, x665 = bits.Mul64(x6, arg1[3])
+ var x667 uint64
+ var x668 uint64
+ x668, x667 = bits.Mul64(x6, arg1[2])
+ var x669 uint64
+ var x670 uint64
+ x670, x669 = bits.Mul64(x6, arg1[1])
+ var x671 uint64
+ var x672 uint64
+ x672, x671 = bits.Mul64(x6, arg1[0])
+ var x673 uint64
+ var x674 uint64
+ x673, x674 = bits.Add64(x672, x669, uint64(0x0))
+ var x675 uint64
+ var x676 uint64
+ x675, x676 = bits.Add64(x670, x667, uint64(p521Uint1(x674)))
+ var x677 uint64
+ var x678 uint64
+ x677, x678 = bits.Add64(x668, x665, uint64(p521Uint1(x676)))
+ var x679 uint64
+ var x680 uint64
+ x679, x680 = bits.Add64(x666, x663, uint64(p521Uint1(x678)))
+ var x681 uint64
+ var x682 uint64
+ x681, x682 = bits.Add64(x664, x661, uint64(p521Uint1(x680)))
+ var x683 uint64
+ var x684 uint64
+ x683, x684 = bits.Add64(x662, x659, uint64(p521Uint1(x682)))
+ var x685 uint64
+ var x686 uint64
+ x685, x686 = bits.Add64(x660, x657, uint64(p521Uint1(x684)))
+ var x687 uint64
+ var x688 uint64
+ x687, x688 = bits.Add64(x658, x655, uint64(p521Uint1(x686)))
+ x689 := (uint64(p521Uint1(x688)) + x656)
+ var x690 uint64
+ var x691 uint64
+ x690, x691 = bits.Add64(x636, x671, uint64(0x0))
+ var x692 uint64
+ var x693 uint64
+ x692, x693 = bits.Add64(x638, x673, uint64(p521Uint1(x691)))
+ var x694 uint64
+ var x695 uint64
+ x694, x695 = bits.Add64(x640, x675, uint64(p521Uint1(x693)))
+ var x696 uint64
+ var x697 uint64
+ x696, x697 = bits.Add64(x642, x677, uint64(p521Uint1(x695)))
+ var x698 uint64
+ var x699 uint64
+ x698, x699 = bits.Add64(x644, x679, uint64(p521Uint1(x697)))
+ var x700 uint64
+ var x701 uint64
+ x700, x701 = bits.Add64(x646, x681, uint64(p521Uint1(x699)))
+ var x702 uint64
+ var x703 uint64
+ x702, x703 = bits.Add64(x648, x683, uint64(p521Uint1(x701)))
+ var x704 uint64
+ var x705 uint64
+ x704, x705 = bits.Add64(x650, x685, uint64(p521Uint1(x703)))
+ var x706 uint64
+ var x707 uint64
+ x706, x707 = bits.Add64(x652, x687, uint64(p521Uint1(x705)))
+ var x708 uint64
+ var x709 uint64
+ x708, x709 = bits.Add64(x654, x689, uint64(p521Uint1(x707)))
+ var x710 uint64
+ var x711 uint64
+ x711, x710 = bits.Mul64(x690, 0x1ff)
+ var x712 uint64
+ var x713 uint64
+ x713, x712 = bits.Mul64(x690, 0xffffffffffffffff)
+ var x714 uint64
+ var x715 uint64
+ x715, x714 = bits.Mul64(x690, 0xffffffffffffffff)
+ var x716 uint64
+ var x717 uint64
+ x717, x716 = bits.Mul64(x690, 0xffffffffffffffff)
+ var x718 uint64
+ var x719 uint64
+ x719, x718 = bits.Mul64(x690, 0xffffffffffffffff)
+ var x720 uint64
+ var x721 uint64
+ x721, x720 = bits.Mul64(x690, 0xffffffffffffffff)
+ var x722 uint64
+ var x723 uint64
+ x723, x722 = bits.Mul64(x690, 0xffffffffffffffff)
+ var x724 uint64
+ var x725 uint64
+ x725, x724 = bits.Mul64(x690, 0xffffffffffffffff)
+ var x726 uint64
+ var x727 uint64
+ x727, x726 = bits.Mul64(x690, 0xffffffffffffffff)
+ var x728 uint64
+ var x729 uint64
+ x728, x729 = bits.Add64(x727, x724, uint64(0x0))
+ var x730 uint64
+ var x731 uint64
+ x730, x731 = bits.Add64(x725, x722, uint64(p521Uint1(x729)))
+ var x732 uint64
+ var x733 uint64
+ x732, x733 = bits.Add64(x723, x720, uint64(p521Uint1(x731)))
+ var x734 uint64
+ var x735 uint64
+ x734, x735 = bits.Add64(x721, x718, uint64(p521Uint1(x733)))
+ var x736 uint64
+ var x737 uint64
+ x736, x737 = bits.Add64(x719, x716, uint64(p521Uint1(x735)))
+ var x738 uint64
+ var x739 uint64
+ x738, x739 = bits.Add64(x717, x714, uint64(p521Uint1(x737)))
+ var x740 uint64
+ var x741 uint64
+ x740, x741 = bits.Add64(x715, x712, uint64(p521Uint1(x739)))
+ var x742 uint64
+ var x743 uint64
+ x742, x743 = bits.Add64(x713, x710, uint64(p521Uint1(x741)))
+ x744 := (uint64(p521Uint1(x743)) + x711)
+ var x746 uint64
+ _, x746 = bits.Add64(x690, x726, uint64(0x0))
+ var x747 uint64
+ var x748 uint64
+ x747, x748 = bits.Add64(x692, x728, uint64(p521Uint1(x746)))
+ var x749 uint64
+ var x750 uint64
+ x749, x750 = bits.Add64(x694, x730, uint64(p521Uint1(x748)))
+ var x751 uint64
+ var x752 uint64
+ x751, x752 = bits.Add64(x696, x732, uint64(p521Uint1(x750)))
+ var x753 uint64
+ var x754 uint64
+ x753, x754 = bits.Add64(x698, x734, uint64(p521Uint1(x752)))
+ var x755 uint64
+ var x756 uint64
+ x755, x756 = bits.Add64(x700, x736, uint64(p521Uint1(x754)))
+ var x757 uint64
+ var x758 uint64
+ x757, x758 = bits.Add64(x702, x738, uint64(p521Uint1(x756)))
+ var x759 uint64
+ var x760 uint64
+ x759, x760 = bits.Add64(x704, x740, uint64(p521Uint1(x758)))
+ var x761 uint64
+ var x762 uint64
+ x761, x762 = bits.Add64(x706, x742, uint64(p521Uint1(x760)))
+ var x763 uint64
+ var x764 uint64
+ x763, x764 = bits.Add64(x708, x744, uint64(p521Uint1(x762)))
+ x765 := (uint64(p521Uint1(x764)) + uint64(p521Uint1(x709)))
+ var x766 uint64
+ var x767 uint64
+ x767, x766 = bits.Mul64(x7, arg1[8])
+ var x768 uint64
+ var x769 uint64
+ x769, x768 = bits.Mul64(x7, arg1[7])
+ var x770 uint64
+ var x771 uint64
+ x771, x770 = bits.Mul64(x7, arg1[6])
+ var x772 uint64
+ var x773 uint64
+ x773, x772 = bits.Mul64(x7, arg1[5])
+ var x774 uint64
+ var x775 uint64
+ x775, x774 = bits.Mul64(x7, arg1[4])
+ var x776 uint64
+ var x777 uint64
+ x777, x776 = bits.Mul64(x7, arg1[3])
+ var x778 uint64
+ var x779 uint64
+ x779, x778 = bits.Mul64(x7, arg1[2])
+ var x780 uint64
+ var x781 uint64
+ x781, x780 = bits.Mul64(x7, arg1[1])
+ var x782 uint64
+ var x783 uint64
+ x783, x782 = bits.Mul64(x7, arg1[0])
+ var x784 uint64
+ var x785 uint64
+ x784, x785 = bits.Add64(x783, x780, uint64(0x0))
+ var x786 uint64
+ var x787 uint64
+ x786, x787 = bits.Add64(x781, x778, uint64(p521Uint1(x785)))
+ var x788 uint64
+ var x789 uint64
+ x788, x789 = bits.Add64(x779, x776, uint64(p521Uint1(x787)))
+ var x790 uint64
+ var x791 uint64
+ x790, x791 = bits.Add64(x777, x774, uint64(p521Uint1(x789)))
+ var x792 uint64
+ var x793 uint64
+ x792, x793 = bits.Add64(x775, x772, uint64(p521Uint1(x791)))
+ var x794 uint64
+ var x795 uint64
+ x794, x795 = bits.Add64(x773, x770, uint64(p521Uint1(x793)))
+ var x796 uint64
+ var x797 uint64
+ x796, x797 = bits.Add64(x771, x768, uint64(p521Uint1(x795)))
+ var x798 uint64
+ var x799 uint64
+ x798, x799 = bits.Add64(x769, x766, uint64(p521Uint1(x797)))
+ x800 := (uint64(p521Uint1(x799)) + x767)
+ var x801 uint64
+ var x802 uint64
+ x801, x802 = bits.Add64(x747, x782, uint64(0x0))
+ var x803 uint64
+ var x804 uint64
+ x803, x804 = bits.Add64(x749, x784, uint64(p521Uint1(x802)))
+ var x805 uint64
+ var x806 uint64
+ x805, x806 = bits.Add64(x751, x786, uint64(p521Uint1(x804)))
+ var x807 uint64
+ var x808 uint64
+ x807, x808 = bits.Add64(x753, x788, uint64(p521Uint1(x806)))
+ var x809 uint64
+ var x810 uint64
+ x809, x810 = bits.Add64(x755, x790, uint64(p521Uint1(x808)))
+ var x811 uint64
+ var x812 uint64
+ x811, x812 = bits.Add64(x757, x792, uint64(p521Uint1(x810)))
+ var x813 uint64
+ var x814 uint64
+ x813, x814 = bits.Add64(x759, x794, uint64(p521Uint1(x812)))
+ var x815 uint64
+ var x816 uint64
+ x815, x816 = bits.Add64(x761, x796, uint64(p521Uint1(x814)))
+ var x817 uint64
+ var x818 uint64
+ x817, x818 = bits.Add64(x763, x798, uint64(p521Uint1(x816)))
+ var x819 uint64
+ var x820 uint64
+ x819, x820 = bits.Add64(x765, x800, uint64(p521Uint1(x818)))
+ var x821 uint64
+ var x822 uint64
+ x822, x821 = bits.Mul64(x801, 0x1ff)
+ var x823 uint64
+ var x824 uint64
+ x824, x823 = bits.Mul64(x801, 0xffffffffffffffff)
+ var x825 uint64
+ var x826 uint64
+ x826, x825 = bits.Mul64(x801, 0xffffffffffffffff)
+ var x827 uint64
+ var x828 uint64
+ x828, x827 = bits.Mul64(x801, 0xffffffffffffffff)
+ var x829 uint64
+ var x830 uint64
+ x830, x829 = bits.Mul64(x801, 0xffffffffffffffff)
+ var x831 uint64
+ var x832 uint64
+ x832, x831 = bits.Mul64(x801, 0xffffffffffffffff)
+ var x833 uint64
+ var x834 uint64
+ x834, x833 = bits.Mul64(x801, 0xffffffffffffffff)
+ var x835 uint64
+ var x836 uint64
+ x836, x835 = bits.Mul64(x801, 0xffffffffffffffff)
+ var x837 uint64
+ var x838 uint64
+ x838, x837 = bits.Mul64(x801, 0xffffffffffffffff)
+ var x839 uint64
+ var x840 uint64
+ x839, x840 = bits.Add64(x838, x835, uint64(0x0))
+ var x841 uint64
+ var x842 uint64
+ x841, x842 = bits.Add64(x836, x833, uint64(p521Uint1(x840)))
+ var x843 uint64
+ var x844 uint64
+ x843, x844 = bits.Add64(x834, x831, uint64(p521Uint1(x842)))
+ var x845 uint64
+ var x846 uint64
+ x845, x846 = bits.Add64(x832, x829, uint64(p521Uint1(x844)))
+ var x847 uint64
+ var x848 uint64
+ x847, x848 = bits.Add64(x830, x827, uint64(p521Uint1(x846)))
+ var x849 uint64
+ var x850 uint64
+ x849, x850 = bits.Add64(x828, x825, uint64(p521Uint1(x848)))
+ var x851 uint64
+ var x852 uint64
+ x851, x852 = bits.Add64(x826, x823, uint64(p521Uint1(x850)))
+ var x853 uint64
+ var x854 uint64
+ x853, x854 = bits.Add64(x824, x821, uint64(p521Uint1(x852)))
+ x855 := (uint64(p521Uint1(x854)) + x822)
+ var x857 uint64
+ _, x857 = bits.Add64(x801, x837, uint64(0x0))
+ var x858 uint64
+ var x859 uint64
+ x858, x859 = bits.Add64(x803, x839, uint64(p521Uint1(x857)))
+ var x860 uint64
+ var x861 uint64
+ x860, x861 = bits.Add64(x805, x841, uint64(p521Uint1(x859)))
+ var x862 uint64
+ var x863 uint64
+ x862, x863 = bits.Add64(x807, x843, uint64(p521Uint1(x861)))
+ var x864 uint64
+ var x865 uint64
+ x864, x865 = bits.Add64(x809, x845, uint64(p521Uint1(x863)))
+ var x866 uint64
+ var x867 uint64
+ x866, x867 = bits.Add64(x811, x847, uint64(p521Uint1(x865)))
+ var x868 uint64
+ var x869 uint64
+ x868, x869 = bits.Add64(x813, x849, uint64(p521Uint1(x867)))
+ var x870 uint64
+ var x871 uint64
+ x870, x871 = bits.Add64(x815, x851, uint64(p521Uint1(x869)))
+ var x872 uint64
+ var x873 uint64
+ x872, x873 = bits.Add64(x817, x853, uint64(p521Uint1(x871)))
+ var x874 uint64
+ var x875 uint64
+ x874, x875 = bits.Add64(x819, x855, uint64(p521Uint1(x873)))
+ x876 := (uint64(p521Uint1(x875)) + uint64(p521Uint1(x820)))
+ var x877 uint64
+ var x878 uint64
+ x878, x877 = bits.Mul64(x8, arg1[8])
+ var x879 uint64
+ var x880 uint64
+ x880, x879 = bits.Mul64(x8, arg1[7])
+ var x881 uint64
+ var x882 uint64
+ x882, x881 = bits.Mul64(x8, arg1[6])
+ var x883 uint64
+ var x884 uint64
+ x884, x883 = bits.Mul64(x8, arg1[5])
+ var x885 uint64
+ var x886 uint64
+ x886, x885 = bits.Mul64(x8, arg1[4])
+ var x887 uint64
+ var x888 uint64
+ x888, x887 = bits.Mul64(x8, arg1[3])
+ var x889 uint64
+ var x890 uint64
+ x890, x889 = bits.Mul64(x8, arg1[2])
+ var x891 uint64
+ var x892 uint64
+ x892, x891 = bits.Mul64(x8, arg1[1])
+ var x893 uint64
+ var x894 uint64
+ x894, x893 = bits.Mul64(x8, arg1[0])
+ var x895 uint64
+ var x896 uint64
+ x895, x896 = bits.Add64(x894, x891, uint64(0x0))
+ var x897 uint64
+ var x898 uint64
+ x897, x898 = bits.Add64(x892, x889, uint64(p521Uint1(x896)))
+ var x899 uint64
+ var x900 uint64
+ x899, x900 = bits.Add64(x890, x887, uint64(p521Uint1(x898)))
+ var x901 uint64
+ var x902 uint64
+ x901, x902 = bits.Add64(x888, x885, uint64(p521Uint1(x900)))
+ var x903 uint64
+ var x904 uint64
+ x903, x904 = bits.Add64(x886, x883, uint64(p521Uint1(x902)))
+ var x905 uint64
+ var x906 uint64
+ x905, x906 = bits.Add64(x884, x881, uint64(p521Uint1(x904)))
+ var x907 uint64
+ var x908 uint64
+ x907, x908 = bits.Add64(x882, x879, uint64(p521Uint1(x906)))
+ var x909 uint64
+ var x910 uint64
+ x909, x910 = bits.Add64(x880, x877, uint64(p521Uint1(x908)))
+ x911 := (uint64(p521Uint1(x910)) + x878)
+ var x912 uint64
+ var x913 uint64
+ x912, x913 = bits.Add64(x858, x893, uint64(0x0))
+ var x914 uint64
+ var x915 uint64
+ x914, x915 = bits.Add64(x860, x895, uint64(p521Uint1(x913)))
+ var x916 uint64
+ var x917 uint64
+ x916, x917 = bits.Add64(x862, x897, uint64(p521Uint1(x915)))
+ var x918 uint64
+ var x919 uint64
+ x918, x919 = bits.Add64(x864, x899, uint64(p521Uint1(x917)))
+ var x920 uint64
+ var x921 uint64
+ x920, x921 = bits.Add64(x866, x901, uint64(p521Uint1(x919)))
+ var x922 uint64
+ var x923 uint64
+ x922, x923 = bits.Add64(x868, x903, uint64(p521Uint1(x921)))
+ var x924 uint64
+ var x925 uint64
+ x924, x925 = bits.Add64(x870, x905, uint64(p521Uint1(x923)))
+ var x926 uint64
+ var x927 uint64
+ x926, x927 = bits.Add64(x872, x907, uint64(p521Uint1(x925)))
+ var x928 uint64
+ var x929 uint64
+ x928, x929 = bits.Add64(x874, x909, uint64(p521Uint1(x927)))
+ var x930 uint64
+ var x931 uint64
+ x930, x931 = bits.Add64(x876, x911, uint64(p521Uint1(x929)))
+ var x932 uint64
+ var x933 uint64
+ x933, x932 = bits.Mul64(x912, 0x1ff)
+ var x934 uint64
+ var x935 uint64
+ x935, x934 = bits.Mul64(x912, 0xffffffffffffffff)
+ var x936 uint64
+ var x937 uint64
+ x937, x936 = bits.Mul64(x912, 0xffffffffffffffff)
+ var x938 uint64
+ var x939 uint64
+ x939, x938 = bits.Mul64(x912, 0xffffffffffffffff)
+ var x940 uint64
+ var x941 uint64
+ x941, x940 = bits.Mul64(x912, 0xffffffffffffffff)
+ var x942 uint64
+ var x943 uint64
+ x943, x942 = bits.Mul64(x912, 0xffffffffffffffff)
+ var x944 uint64
+ var x945 uint64
+ x945, x944 = bits.Mul64(x912, 0xffffffffffffffff)
+ var x946 uint64
+ var x947 uint64
+ x947, x946 = bits.Mul64(x912, 0xffffffffffffffff)
+ var x948 uint64
+ var x949 uint64
+ x949, x948 = bits.Mul64(x912, 0xffffffffffffffff)
+ var x950 uint64
+ var x951 uint64
+ x950, x951 = bits.Add64(x949, x946, uint64(0x0))
+ var x952 uint64
+ var x953 uint64
+ x952, x953 = bits.Add64(x947, x944, uint64(p521Uint1(x951)))
+ var x954 uint64
+ var x955 uint64
+ x954, x955 = bits.Add64(x945, x942, uint64(p521Uint1(x953)))
+ var x956 uint64
+ var x957 uint64
+ x956, x957 = bits.Add64(x943, x940, uint64(p521Uint1(x955)))
+ var x958 uint64
+ var x959 uint64
+ x958, x959 = bits.Add64(x941, x938, uint64(p521Uint1(x957)))
+ var x960 uint64
+ var x961 uint64
+ x960, x961 = bits.Add64(x939, x936, uint64(p521Uint1(x959)))
+ var x962 uint64
+ var x963 uint64
+ x962, x963 = bits.Add64(x937, x934, uint64(p521Uint1(x961)))
+ var x964 uint64
+ var x965 uint64
+ x964, x965 = bits.Add64(x935, x932, uint64(p521Uint1(x963)))
+ x966 := (uint64(p521Uint1(x965)) + x933)
+ var x968 uint64
+ _, x968 = bits.Add64(x912, x948, uint64(0x0))
+ var x969 uint64
+ var x970 uint64
+ x969, x970 = bits.Add64(x914, x950, uint64(p521Uint1(x968)))
+ var x971 uint64
+ var x972 uint64
+ x971, x972 = bits.Add64(x916, x952, uint64(p521Uint1(x970)))
+ var x973 uint64
+ var x974 uint64
+ x973, x974 = bits.Add64(x918, x954, uint64(p521Uint1(x972)))
+ var x975 uint64
+ var x976 uint64
+ x975, x976 = bits.Add64(x920, x956, uint64(p521Uint1(x974)))
+ var x977 uint64
+ var x978 uint64
+ x977, x978 = bits.Add64(x922, x958, uint64(p521Uint1(x976)))
+ var x979 uint64
+ var x980 uint64
+ x979, x980 = bits.Add64(x924, x960, uint64(p521Uint1(x978)))
+ var x981 uint64
+ var x982 uint64
+ x981, x982 = bits.Add64(x926, x962, uint64(p521Uint1(x980)))
+ var x983 uint64
+ var x984 uint64
+ x983, x984 = bits.Add64(x928, x964, uint64(p521Uint1(x982)))
+ var x985 uint64
+ var x986 uint64
+ x985, x986 = bits.Add64(x930, x966, uint64(p521Uint1(x984)))
+ x987 := (uint64(p521Uint1(x986)) + uint64(p521Uint1(x931)))
+ var x988 uint64
+ var x989 uint64
+ x988, x989 = bits.Sub64(x969, 0xffffffffffffffff, uint64(0x0))
+ var x990 uint64
+ var x991 uint64
+ x990, x991 = bits.Sub64(x971, 0xffffffffffffffff, uint64(p521Uint1(x989)))
+ var x992 uint64
+ var x993 uint64
+ x992, x993 = bits.Sub64(x973, 0xffffffffffffffff, uint64(p521Uint1(x991)))
+ var x994 uint64
+ var x995 uint64
+ x994, x995 = bits.Sub64(x975, 0xffffffffffffffff, uint64(p521Uint1(x993)))
+ var x996 uint64
+ var x997 uint64
+ x996, x997 = bits.Sub64(x977, 0xffffffffffffffff, uint64(p521Uint1(x995)))
+ var x998 uint64
+ var x999 uint64
+ x998, x999 = bits.Sub64(x979, 0xffffffffffffffff, uint64(p521Uint1(x997)))
+ var x1000 uint64
+ var x1001 uint64
+ x1000, x1001 = bits.Sub64(x981, 0xffffffffffffffff, uint64(p521Uint1(x999)))
+ var x1002 uint64
+ var x1003 uint64
+ x1002, x1003 = bits.Sub64(x983, 0xffffffffffffffff, uint64(p521Uint1(x1001)))
+ var x1004 uint64
+ var x1005 uint64
+ x1004, x1005 = bits.Sub64(x985, 0x1ff, uint64(p521Uint1(x1003)))
+ var x1007 uint64
+ _, x1007 = bits.Sub64(x987, uint64(0x0), uint64(p521Uint1(x1005)))
+ var x1008 uint64
+ p521CmovznzU64(&x1008, p521Uint1(x1007), x988, x969)
+ var x1009 uint64
+ p521CmovznzU64(&x1009, p521Uint1(x1007), x990, x971)
+ var x1010 uint64
+ p521CmovznzU64(&x1010, p521Uint1(x1007), x992, x973)
+ var x1011 uint64
+ p521CmovznzU64(&x1011, p521Uint1(x1007), x994, x975)
+ var x1012 uint64
+ p521CmovznzU64(&x1012, p521Uint1(x1007), x996, x977)
+ var x1013 uint64
+ p521CmovznzU64(&x1013, p521Uint1(x1007), x998, x979)
+ var x1014 uint64
+ p521CmovznzU64(&x1014, p521Uint1(x1007), x1000, x981)
+ var x1015 uint64
+ p521CmovznzU64(&x1015, p521Uint1(x1007), x1002, x983)
+ var x1016 uint64
+ p521CmovznzU64(&x1016, p521Uint1(x1007), x1004, x985)
+ out1[0] = x1008
+ out1[1] = x1009
+ out1[2] = x1010
+ out1[3] = x1011
+ out1[4] = x1012
+ out1[5] = x1013
+ out1[6] = x1014
+ out1[7] = x1015
+ out1[8] = x1016
+}
+
+// p521Add adds two field elements in the Montgomery domain.
+//
+// Preconditions:
+// 0 ≤ eval arg1 < m
+// 0 ≤ eval arg2 < m
+// Postconditions:
+// eval (from_montgomery out1) mod m = (eval (from_montgomery arg1) + eval (from_montgomery arg2)) mod m
+// 0 ≤ eval out1 < m
+//
+func p521Add(out1 *p521MontgomeryDomainFieldElement, arg1 *p521MontgomeryDomainFieldElement, arg2 *p521MontgomeryDomainFieldElement) {
+ var x1 uint64
+ var x2 uint64
+ x1, x2 = bits.Add64(arg1[0], arg2[0], uint64(0x0))
+ var x3 uint64
+ var x4 uint64
+ x3, x4 = bits.Add64(arg1[1], arg2[1], uint64(p521Uint1(x2)))
+ var x5 uint64
+ var x6 uint64
+ x5, x6 = bits.Add64(arg1[2], arg2[2], uint64(p521Uint1(x4)))
+ var x7 uint64
+ var x8 uint64
+ x7, x8 = bits.Add64(arg1[3], arg2[3], uint64(p521Uint1(x6)))
+ var x9 uint64
+ var x10 uint64
+ x9, x10 = bits.Add64(arg1[4], arg2[4], uint64(p521Uint1(x8)))
+ var x11 uint64
+ var x12 uint64
+ x11, x12 = bits.Add64(arg1[5], arg2[5], uint64(p521Uint1(x10)))
+ var x13 uint64
+ var x14 uint64
+ x13, x14 = bits.Add64(arg1[6], arg2[6], uint64(p521Uint1(x12)))
+ var x15 uint64
+ var x16 uint64
+ x15, x16 = bits.Add64(arg1[7], arg2[7], uint64(p521Uint1(x14)))
+ var x17 uint64
+ var x18 uint64
+ x17, x18 = bits.Add64(arg1[8], arg2[8], uint64(p521Uint1(x16)))
+ var x19 uint64
+ var x20 uint64
+ x19, x20 = bits.Sub64(x1, 0xffffffffffffffff, uint64(0x0))
+ var x21 uint64
+ var x22 uint64
+ x21, x22 = bits.Sub64(x3, 0xffffffffffffffff, uint64(p521Uint1(x20)))
+ var x23 uint64
+ var x24 uint64
+ x23, x24 = bits.Sub64(x5, 0xffffffffffffffff, uint64(p521Uint1(x22)))
+ var x25 uint64
+ var x26 uint64
+ x25, x26 = bits.Sub64(x7, 0xffffffffffffffff, uint64(p521Uint1(x24)))
+ var x27 uint64
+ var x28 uint64
+ x27, x28 = bits.Sub64(x9, 0xffffffffffffffff, uint64(p521Uint1(x26)))
+ var x29 uint64
+ var x30 uint64
+ x29, x30 = bits.Sub64(x11, 0xffffffffffffffff, uint64(p521Uint1(x28)))
+ var x31 uint64
+ var x32 uint64
+ x31, x32 = bits.Sub64(x13, 0xffffffffffffffff, uint64(p521Uint1(x30)))
+ var x33 uint64
+ var x34 uint64
+ x33, x34 = bits.Sub64(x15, 0xffffffffffffffff, uint64(p521Uint1(x32)))
+ var x35 uint64
+ var x36 uint64
+ x35, x36 = bits.Sub64(x17, 0x1ff, uint64(p521Uint1(x34)))
+ var x38 uint64
+ _, x38 = bits.Sub64(uint64(p521Uint1(x18)), uint64(0x0), uint64(p521Uint1(x36)))
+ var x39 uint64
+ p521CmovznzU64(&x39, p521Uint1(x38), x19, x1)
+ var x40 uint64
+ p521CmovznzU64(&x40, p521Uint1(x38), x21, x3)
+ var x41 uint64
+ p521CmovznzU64(&x41, p521Uint1(x38), x23, x5)
+ var x42 uint64
+ p521CmovznzU64(&x42, p521Uint1(x38), x25, x7)
+ var x43 uint64
+ p521CmovznzU64(&x43, p521Uint1(x38), x27, x9)
+ var x44 uint64
+ p521CmovznzU64(&x44, p521Uint1(x38), x29, x11)
+ var x45 uint64
+ p521CmovznzU64(&x45, p521Uint1(x38), x31, x13)
+ var x46 uint64
+ p521CmovznzU64(&x46, p521Uint1(x38), x33, x15)
+ var x47 uint64
+ p521CmovznzU64(&x47, p521Uint1(x38), x35, x17)
+ out1[0] = x39
+ out1[1] = x40
+ out1[2] = x41
+ out1[3] = x42
+ out1[4] = x43
+ out1[5] = x44
+ out1[6] = x45
+ out1[7] = x46
+ out1[8] = x47
+}
+
+// p521Sub subtracts two field elements in the Montgomery domain.
+//
+// Preconditions:
+// 0 ≤ eval arg1 < m
+// 0 ≤ eval arg2 < m
+// Postconditions:
+// eval (from_montgomery out1) mod m = (eval (from_montgomery arg1) - eval (from_montgomery arg2)) mod m
+// 0 ≤ eval out1 < m
+//
+func p521Sub(out1 *p521MontgomeryDomainFieldElement, arg1 *p521MontgomeryDomainFieldElement, arg2 *p521MontgomeryDomainFieldElement) {
+ var x1 uint64
+ var x2 uint64
+ x1, x2 = bits.Sub64(arg1[0], arg2[0], uint64(0x0))
+ var x3 uint64
+ var x4 uint64
+ x3, x4 = bits.Sub64(arg1[1], arg2[1], uint64(p521Uint1(x2)))
+ var x5 uint64
+ var x6 uint64
+ x5, x6 = bits.Sub64(arg1[2], arg2[2], uint64(p521Uint1(x4)))
+ var x7 uint64
+ var x8 uint64
+ x7, x8 = bits.Sub64(arg1[3], arg2[3], uint64(p521Uint1(x6)))
+ var x9 uint64
+ var x10 uint64
+ x9, x10 = bits.Sub64(arg1[4], arg2[4], uint64(p521Uint1(x8)))
+ var x11 uint64
+ var x12 uint64
+ x11, x12 = bits.Sub64(arg1[5], arg2[5], uint64(p521Uint1(x10)))
+ var x13 uint64
+ var x14 uint64
+ x13, x14 = bits.Sub64(arg1[6], arg2[6], uint64(p521Uint1(x12)))
+ var x15 uint64
+ var x16 uint64
+ x15, x16 = bits.Sub64(arg1[7], arg2[7], uint64(p521Uint1(x14)))
+ var x17 uint64
+ var x18 uint64
+ x17, x18 = bits.Sub64(arg1[8], arg2[8], uint64(p521Uint1(x16)))
+ var x19 uint64
+ p521CmovznzU64(&x19, p521Uint1(x18), uint64(0x0), 0xffffffffffffffff)
+ var x20 uint64
+ var x21 uint64
+ x20, x21 = bits.Add64(x1, x19, uint64(0x0))
+ var x22 uint64
+ var x23 uint64
+ x22, x23 = bits.Add64(x3, x19, uint64(p521Uint1(x21)))
+ var x24 uint64
+ var x25 uint64
+ x24, x25 = bits.Add64(x5, x19, uint64(p521Uint1(x23)))
+ var x26 uint64
+ var x27 uint64
+ x26, x27 = bits.Add64(x7, x19, uint64(p521Uint1(x25)))
+ var x28 uint64
+ var x29 uint64
+ x28, x29 = bits.Add64(x9, x19, uint64(p521Uint1(x27)))
+ var x30 uint64
+ var x31 uint64
+ x30, x31 = bits.Add64(x11, x19, uint64(p521Uint1(x29)))
+ var x32 uint64
+ var x33 uint64
+ x32, x33 = bits.Add64(x13, x19, uint64(p521Uint1(x31)))
+ var x34 uint64
+ var x35 uint64
+ x34, x35 = bits.Add64(x15, x19, uint64(p521Uint1(x33)))
+ var x36 uint64
+ x36, _ = bits.Add64(x17, (x19 & 0x1ff), uint64(p521Uint1(x35)))
+ out1[0] = x20
+ out1[1] = x22
+ out1[2] = x24
+ out1[3] = x26
+ out1[4] = x28
+ out1[5] = x30
+ out1[6] = x32
+ out1[7] = x34
+ out1[8] = x36
+}
+
+// p521SetOne returns the field element one in the Montgomery domain.
+//
+// Postconditions:
+// eval (from_montgomery out1) mod m = 1 mod m
+// 0 ≤ eval out1 < m
+//
+func p521SetOne(out1 *p521MontgomeryDomainFieldElement) {
+ out1[0] = 0x80000000000000
+ out1[1] = uint64(0x0)
+ out1[2] = uint64(0x0)
+ out1[3] = uint64(0x0)
+ out1[4] = uint64(0x0)
+ out1[5] = uint64(0x0)
+ out1[6] = uint64(0x0)
+ out1[7] = uint64(0x0)
+ out1[8] = uint64(0x0)
+}
+
+// p521FromMontgomery translates a field element out of the Montgomery domain.
+//
+// Preconditions:
+// 0 ≤ eval arg1 < m
+// Postconditions:
+// eval out1 mod m = (eval arg1 * ((2^64)⁻¹ mod m)^9) mod m
+// 0 ≤ eval out1 < m
+//
+func p521FromMontgomery(out1 *p521NonMontgomeryDomainFieldElement, arg1 *p521MontgomeryDomainFieldElement) {
+ x1 := arg1[0]
+ var x2 uint64
+ var x3 uint64
+ x3, x2 = bits.Mul64(x1, 0x1ff)
+ var x4 uint64
+ var x5 uint64
+ x5, x4 = bits.Mul64(x1, 0xffffffffffffffff)
+ var x6 uint64
+ var x7 uint64
+ x7, x6 = bits.Mul64(x1, 0xffffffffffffffff)
+ var x8 uint64
+ var x9 uint64
+ x9, x8 = bits.Mul64(x1, 0xffffffffffffffff)
+ var x10 uint64
+ var x11 uint64
+ x11, x10 = bits.Mul64(x1, 0xffffffffffffffff)
+ var x12 uint64
+ var x13 uint64
+ x13, x12 = bits.Mul64(x1, 0xffffffffffffffff)
+ var x14 uint64
+ var x15 uint64
+ x15, x14 = bits.Mul64(x1, 0xffffffffffffffff)
+ var x16 uint64
+ var x17 uint64
+ x17, x16 = bits.Mul64(x1, 0xffffffffffffffff)
+ var x18 uint64
+ var x19 uint64
+ x19, x18 = bits.Mul64(x1, 0xffffffffffffffff)
+ var x20 uint64
+ var x21 uint64
+ x20, x21 = bits.Add64(x19, x16, uint64(0x0))
+ var x22 uint64
+ var x23 uint64
+ x22, x23 = bits.Add64(x17, x14, uint64(p521Uint1(x21)))
+ var x24 uint64
+ var x25 uint64
+ x24, x25 = bits.Add64(x15, x12, uint64(p521Uint1(x23)))
+ var x26 uint64
+ var x27 uint64
+ x26, x27 = bits.Add64(x13, x10, uint64(p521Uint1(x25)))
+ var x28 uint64
+ var x29 uint64
+ x28, x29 = bits.Add64(x11, x8, uint64(p521Uint1(x27)))
+ var x30 uint64
+ var x31 uint64
+ x30, x31 = bits.Add64(x9, x6, uint64(p521Uint1(x29)))
+ var x32 uint64
+ var x33 uint64
+ x32, x33 = bits.Add64(x7, x4, uint64(p521Uint1(x31)))
+ var x34 uint64
+ var x35 uint64
+ x34, x35 = bits.Add64(x5, x2, uint64(p521Uint1(x33)))
+ var x37 uint64
+ _, x37 = bits.Add64(x1, x18, uint64(0x0))
+ var x38 uint64
+ var x39 uint64
+ x38, x39 = bits.Add64(uint64(0x0), x20, uint64(p521Uint1(x37)))
+ var x40 uint64
+ var x41 uint64
+ x40, x41 = bits.Add64(uint64(0x0), x22, uint64(p521Uint1(x39)))
+ var x42 uint64
+ var x43 uint64
+ x42, x43 = bits.Add64(uint64(0x0), x24, uint64(p521Uint1(x41)))
+ var x44 uint64
+ var x45 uint64
+ x44, x45 = bits.Add64(uint64(0x0), x26, uint64(p521Uint1(x43)))
+ var x46 uint64
+ var x47 uint64
+ x46, x47 = bits.Add64(uint64(0x0), x28, uint64(p521Uint1(x45)))
+ var x48 uint64
+ var x49 uint64
+ x48, x49 = bits.Add64(uint64(0x0), x30, uint64(p521Uint1(x47)))
+ var x50 uint64
+ var x51 uint64
+ x50, x51 = bits.Add64(uint64(0x0), x32, uint64(p521Uint1(x49)))
+ var x52 uint64
+ var x53 uint64
+ x52, x53 = bits.Add64(uint64(0x0), x34, uint64(p521Uint1(x51)))
+ var x54 uint64
+ var x55 uint64
+ x54, x55 = bits.Add64(x38, arg1[1], uint64(0x0))
+ var x56 uint64
+ var x57 uint64
+ x56, x57 = bits.Add64(x40, uint64(0x0), uint64(p521Uint1(x55)))
+ var x58 uint64
+ var x59 uint64
+ x58, x59 = bits.Add64(x42, uint64(0x0), uint64(p521Uint1(x57)))
+ var x60 uint64
+ var x61 uint64
+ x60, x61 = bits.Add64(x44, uint64(0x0), uint64(p521Uint1(x59)))
+ var x62 uint64
+ var x63 uint64
+ x62, x63 = bits.Add64(x46, uint64(0x0), uint64(p521Uint1(x61)))
+ var x64 uint64
+ var x65 uint64
+ x64, x65 = bits.Add64(x48, uint64(0x0), uint64(p521Uint1(x63)))
+ var x66 uint64
+ var x67 uint64
+ x66, x67 = bits.Add64(x50, uint64(0x0), uint64(p521Uint1(x65)))
+ var x68 uint64
+ var x69 uint64
+ x68, x69 = bits.Add64(x52, uint64(0x0), uint64(p521Uint1(x67)))
+ var x70 uint64
+ var x71 uint64
+ x71, x70 = bits.Mul64(x54, 0x1ff)
+ var x72 uint64
+ var x73 uint64
+ x73, x72 = bits.Mul64(x54, 0xffffffffffffffff)
+ var x74 uint64
+ var x75 uint64
+ x75, x74 = bits.Mul64(x54, 0xffffffffffffffff)
+ var x76 uint64
+ var x77 uint64
+ x77, x76 = bits.Mul64(x54, 0xffffffffffffffff)
+ var x78 uint64
+ var x79 uint64
+ x79, x78 = bits.Mul64(x54, 0xffffffffffffffff)
+ var x80 uint64
+ var x81 uint64
+ x81, x80 = bits.Mul64(x54, 0xffffffffffffffff)
+ var x82 uint64
+ var x83 uint64
+ x83, x82 = bits.Mul64(x54, 0xffffffffffffffff)
+ var x84 uint64
+ var x85 uint64
+ x85, x84 = bits.Mul64(x54, 0xffffffffffffffff)
+ var x86 uint64
+ var x87 uint64
+ x87, x86 = bits.Mul64(x54, 0xffffffffffffffff)
+ var x88 uint64
+ var x89 uint64
+ x88, x89 = bits.Add64(x87, x84, uint64(0x0))
+ var x90 uint64
+ var x91 uint64
+ x90, x91 = bits.Add64(x85, x82, uint64(p521Uint1(x89)))
+ var x92 uint64
+ var x93 uint64
+ x92, x93 = bits.Add64(x83, x80, uint64(p521Uint1(x91)))
+ var x94 uint64
+ var x95 uint64
+ x94, x95 = bits.Add64(x81, x78, uint64(p521Uint1(x93)))
+ var x96 uint64
+ var x97 uint64
+ x96, x97 = bits.Add64(x79, x76, uint64(p521Uint1(x95)))
+ var x98 uint64
+ var x99 uint64
+ x98, x99 = bits.Add64(x77, x74, uint64(p521Uint1(x97)))
+ var x100 uint64
+ var x101 uint64
+ x100, x101 = bits.Add64(x75, x72, uint64(p521Uint1(x99)))
+ var x102 uint64
+ var x103 uint64
+ x102, x103 = bits.Add64(x73, x70, uint64(p521Uint1(x101)))
+ var x105 uint64
+ _, x105 = bits.Add64(x54, x86, uint64(0x0))
+ var x106 uint64
+ var x107 uint64
+ x106, x107 = bits.Add64(x56, x88, uint64(p521Uint1(x105)))
+ var x108 uint64
+ var x109 uint64
+ x108, x109 = bits.Add64(x58, x90, uint64(p521Uint1(x107)))
+ var x110 uint64
+ var x111 uint64
+ x110, x111 = bits.Add64(x60, x92, uint64(p521Uint1(x109)))
+ var x112 uint64
+ var x113 uint64
+ x112, x113 = bits.Add64(x62, x94, uint64(p521Uint1(x111)))
+ var x114 uint64
+ var x115 uint64
+ x114, x115 = bits.Add64(x64, x96, uint64(p521Uint1(x113)))
+ var x116 uint64
+ var x117 uint64
+ x116, x117 = bits.Add64(x66, x98, uint64(p521Uint1(x115)))
+ var x118 uint64
+ var x119 uint64
+ x118, x119 = bits.Add64(x68, x100, uint64(p521Uint1(x117)))
+ var x120 uint64
+ var x121 uint64
+ x120, x121 = bits.Add64((uint64(p521Uint1(x69)) + (uint64(p521Uint1(x53)) + (uint64(p521Uint1(x35)) + x3))), x102, uint64(p521Uint1(x119)))
+ var x122 uint64
+ var x123 uint64
+ x122, x123 = bits.Add64(x106, arg1[2], uint64(0x0))
+ var x124 uint64
+ var x125 uint64
+ x124, x125 = bits.Add64(x108, uint64(0x0), uint64(p521Uint1(x123)))
+ var x126 uint64
+ var x127 uint64
+ x126, x127 = bits.Add64(x110, uint64(0x0), uint64(p521Uint1(x125)))
+ var x128 uint64
+ var x129 uint64
+ x128, x129 = bits.Add64(x112, uint64(0x0), uint64(p521Uint1(x127)))
+ var x130 uint64
+ var x131 uint64
+ x130, x131 = bits.Add64(x114, uint64(0x0), uint64(p521Uint1(x129)))
+ var x132 uint64
+ var x133 uint64
+ x132, x133 = bits.Add64(x116, uint64(0x0), uint64(p521Uint1(x131)))
+ var x134 uint64
+ var x135 uint64
+ x134, x135 = bits.Add64(x118, uint64(0x0), uint64(p521Uint1(x133)))
+ var x136 uint64
+ var x137 uint64
+ x136, x137 = bits.Add64(x120, uint64(0x0), uint64(p521Uint1(x135)))
+ var x138 uint64
+ var x139 uint64
+ x139, x138 = bits.Mul64(x122, 0x1ff)
+ var x140 uint64
+ var x141 uint64
+ x141, x140 = bits.Mul64(x122, 0xffffffffffffffff)
+ var x142 uint64
+ var x143 uint64
+ x143, x142 = bits.Mul64(x122, 0xffffffffffffffff)
+ var x144 uint64
+ var x145 uint64
+ x145, x144 = bits.Mul64(x122, 0xffffffffffffffff)
+ var x146 uint64
+ var x147 uint64
+ x147, x146 = bits.Mul64(x122, 0xffffffffffffffff)
+ var x148 uint64
+ var x149 uint64
+ x149, x148 = bits.Mul64(x122, 0xffffffffffffffff)
+ var x150 uint64
+ var x151 uint64
+ x151, x150 = bits.Mul64(x122, 0xffffffffffffffff)
+ var x152 uint64
+ var x153 uint64
+ x153, x152 = bits.Mul64(x122, 0xffffffffffffffff)
+ var x154 uint64
+ var x155 uint64
+ x155, x154 = bits.Mul64(x122, 0xffffffffffffffff)
+ var x156 uint64
+ var x157 uint64
+ x156, x157 = bits.Add64(x155, x152, uint64(0x0))
+ var x158 uint64
+ var x159 uint64
+ x158, x159 = bits.Add64(x153, x150, uint64(p521Uint1(x157)))
+ var x160 uint64
+ var x161 uint64
+ x160, x161 = bits.Add64(x151, x148, uint64(p521Uint1(x159)))
+ var x162 uint64
+ var x163 uint64
+ x162, x163 = bits.Add64(x149, x146, uint64(p521Uint1(x161)))
+ var x164 uint64
+ var x165 uint64
+ x164, x165 = bits.Add64(x147, x144, uint64(p521Uint1(x163)))
+ var x166 uint64
+ var x167 uint64
+ x166, x167 = bits.Add64(x145, x142, uint64(p521Uint1(x165)))
+ var x168 uint64
+ var x169 uint64
+ x168, x169 = bits.Add64(x143, x140, uint64(p521Uint1(x167)))
+ var x170 uint64
+ var x171 uint64
+ x170, x171 = bits.Add64(x141, x138, uint64(p521Uint1(x169)))
+ var x173 uint64
+ _, x173 = bits.Add64(x122, x154, uint64(0x0))
+ var x174 uint64
+ var x175 uint64
+ x174, x175 = bits.Add64(x124, x156, uint64(p521Uint1(x173)))
+ var x176 uint64
+ var x177 uint64
+ x176, x177 = bits.Add64(x126, x158, uint64(p521Uint1(x175)))
+ var x178 uint64
+ var x179 uint64
+ x178, x179 = bits.Add64(x128, x160, uint64(p521Uint1(x177)))
+ var x180 uint64
+ var x181 uint64
+ x180, x181 = bits.Add64(x130, x162, uint64(p521Uint1(x179)))
+ var x182 uint64
+ var x183 uint64
+ x182, x183 = bits.Add64(x132, x164, uint64(p521Uint1(x181)))
+ var x184 uint64
+ var x185 uint64
+ x184, x185 = bits.Add64(x134, x166, uint64(p521Uint1(x183)))
+ var x186 uint64
+ var x187 uint64
+ x186, x187 = bits.Add64(x136, x168, uint64(p521Uint1(x185)))
+ var x188 uint64
+ var x189 uint64
+ x188, x189 = bits.Add64((uint64(p521Uint1(x137)) + (uint64(p521Uint1(x121)) + (uint64(p521Uint1(x103)) + x71))), x170, uint64(p521Uint1(x187)))
+ var x190 uint64
+ var x191 uint64
+ x190, x191 = bits.Add64(x174, arg1[3], uint64(0x0))
+ var x192 uint64
+ var x193 uint64
+ x192, x193 = bits.Add64(x176, uint64(0x0), uint64(p521Uint1(x191)))
+ var x194 uint64
+ var x195 uint64
+ x194, x195 = bits.Add64(x178, uint64(0x0), uint64(p521Uint1(x193)))
+ var x196 uint64
+ var x197 uint64
+ x196, x197 = bits.Add64(x180, uint64(0x0), uint64(p521Uint1(x195)))
+ var x198 uint64
+ var x199 uint64
+ x198, x199 = bits.Add64(x182, uint64(0x0), uint64(p521Uint1(x197)))
+ var x200 uint64
+ var x201 uint64
+ x200, x201 = bits.Add64(x184, uint64(0x0), uint64(p521Uint1(x199)))
+ var x202 uint64
+ var x203 uint64
+ x202, x203 = bits.Add64(x186, uint64(0x0), uint64(p521Uint1(x201)))
+ var x204 uint64
+ var x205 uint64
+ x204, x205 = bits.Add64(x188, uint64(0x0), uint64(p521Uint1(x203)))
+ var x206 uint64
+ var x207 uint64
+ x207, x206 = bits.Mul64(x190, 0x1ff)
+ var x208 uint64
+ var x209 uint64
+ x209, x208 = bits.Mul64(x190, 0xffffffffffffffff)
+ var x210 uint64
+ var x211 uint64
+ x211, x210 = bits.Mul64(x190, 0xffffffffffffffff)
+ var x212 uint64
+ var x213 uint64
+ x213, x212 = bits.Mul64(x190, 0xffffffffffffffff)
+ var x214 uint64
+ var x215 uint64
+ x215, x214 = bits.Mul64(x190, 0xffffffffffffffff)
+ var x216 uint64
+ var x217 uint64
+ x217, x216 = bits.Mul64(x190, 0xffffffffffffffff)
+ var x218 uint64
+ var x219 uint64
+ x219, x218 = bits.Mul64(x190, 0xffffffffffffffff)
+ var x220 uint64
+ var x221 uint64
+ x221, x220 = bits.Mul64(x190, 0xffffffffffffffff)
+ var x222 uint64
+ var x223 uint64
+ x223, x222 = bits.Mul64(x190, 0xffffffffffffffff)
+ var x224 uint64
+ var x225 uint64
+ x224, x225 = bits.Add64(x223, x220, uint64(0x0))
+ var x226 uint64
+ var x227 uint64
+ x226, x227 = bits.Add64(x221, x218, uint64(p521Uint1(x225)))
+ var x228 uint64
+ var x229 uint64
+ x228, x229 = bits.Add64(x219, x216, uint64(p521Uint1(x227)))
+ var x230 uint64
+ var x231 uint64
+ x230, x231 = bits.Add64(x217, x214, uint64(p521Uint1(x229)))
+ var x232 uint64
+ var x233 uint64
+ x232, x233 = bits.Add64(x215, x212, uint64(p521Uint1(x231)))
+ var x234 uint64
+ var x235 uint64
+ x234, x235 = bits.Add64(x213, x210, uint64(p521Uint1(x233)))
+ var x236 uint64
+ var x237 uint64
+ x236, x237 = bits.Add64(x211, x208, uint64(p521Uint1(x235)))
+ var x238 uint64
+ var x239 uint64
+ x238, x239 = bits.Add64(x209, x206, uint64(p521Uint1(x237)))
+ var x241 uint64
+ _, x241 = bits.Add64(x190, x222, uint64(0x0))
+ var x242 uint64
+ var x243 uint64
+ x242, x243 = bits.Add64(x192, x224, uint64(p521Uint1(x241)))
+ var x244 uint64
+ var x245 uint64
+ x244, x245 = bits.Add64(x194, x226, uint64(p521Uint1(x243)))
+ var x246 uint64
+ var x247 uint64
+ x246, x247 = bits.Add64(x196, x228, uint64(p521Uint1(x245)))
+ var x248 uint64
+ var x249 uint64
+ x248, x249 = bits.Add64(x198, x230, uint64(p521Uint1(x247)))
+ var x250 uint64
+ var x251 uint64
+ x250, x251 = bits.Add64(x200, x232, uint64(p521Uint1(x249)))
+ var x252 uint64
+ var x253 uint64
+ x252, x253 = bits.Add64(x202, x234, uint64(p521Uint1(x251)))
+ var x254 uint64
+ var x255 uint64
+ x254, x255 = bits.Add64(x204, x236, uint64(p521Uint1(x253)))
+ var x256 uint64
+ var x257 uint64
+ x256, x257 = bits.Add64((uint64(p521Uint1(x205)) + (uint64(p521Uint1(x189)) + (uint64(p521Uint1(x171)) + x139))), x238, uint64(p521Uint1(x255)))
+ var x258 uint64
+ var x259 uint64
+ x258, x259 = bits.Add64(x242, arg1[4], uint64(0x0))
+ var x260 uint64
+ var x261 uint64
+ x260, x261 = bits.Add64(x244, uint64(0x0), uint64(p521Uint1(x259)))
+ var x262 uint64
+ var x263 uint64
+ x262, x263 = bits.Add64(x246, uint64(0x0), uint64(p521Uint1(x261)))
+ var x264 uint64
+ var x265 uint64
+ x264, x265 = bits.Add64(x248, uint64(0x0), uint64(p521Uint1(x263)))
+ var x266 uint64
+ var x267 uint64
+ x266, x267 = bits.Add64(x250, uint64(0x0), uint64(p521Uint1(x265)))
+ var x268 uint64
+ var x269 uint64
+ x268, x269 = bits.Add64(x252, uint64(0x0), uint64(p521Uint1(x267)))
+ var x270 uint64
+ var x271 uint64
+ x270, x271 = bits.Add64(x254, uint64(0x0), uint64(p521Uint1(x269)))
+ var x272 uint64
+ var x273 uint64
+ x272, x273 = bits.Add64(x256, uint64(0x0), uint64(p521Uint1(x271)))
+ var x274 uint64
+ var x275 uint64
+ x275, x274 = bits.Mul64(x258, 0x1ff)
+ var x276 uint64
+ var x277 uint64
+ x277, x276 = bits.Mul64(x258, 0xffffffffffffffff)
+ var x278 uint64
+ var x279 uint64
+ x279, x278 = bits.Mul64(x258, 0xffffffffffffffff)
+ var x280 uint64
+ var x281 uint64
+ x281, x280 = bits.Mul64(x258, 0xffffffffffffffff)
+ var x282 uint64
+ var x283 uint64
+ x283, x282 = bits.Mul64(x258, 0xffffffffffffffff)
+ var x284 uint64
+ var x285 uint64
+ x285, x284 = bits.Mul64(x258, 0xffffffffffffffff)
+ var x286 uint64
+ var x287 uint64
+ x287, x286 = bits.Mul64(x258, 0xffffffffffffffff)
+ var x288 uint64
+ var x289 uint64
+ x289, x288 = bits.Mul64(x258, 0xffffffffffffffff)
+ var x290 uint64
+ var x291 uint64
+ x291, x290 = bits.Mul64(x258, 0xffffffffffffffff)
+ var x292 uint64
+ var x293 uint64
+ x292, x293 = bits.Add64(x291, x288, uint64(0x0))
+ var x294 uint64
+ var x295 uint64
+ x294, x295 = bits.Add64(x289, x286, uint64(p521Uint1(x293)))
+ var x296 uint64
+ var x297 uint64
+ x296, x297 = bits.Add64(x287, x284, uint64(p521Uint1(x295)))
+ var x298 uint64
+ var x299 uint64
+ x298, x299 = bits.Add64(x285, x282, uint64(p521Uint1(x297)))
+ var x300 uint64
+ var x301 uint64
+ x300, x301 = bits.Add64(x283, x280, uint64(p521Uint1(x299)))
+ var x302 uint64
+ var x303 uint64
+ x302, x303 = bits.Add64(x281, x278, uint64(p521Uint1(x301)))
+ var x304 uint64
+ var x305 uint64
+ x304, x305 = bits.Add64(x279, x276, uint64(p521Uint1(x303)))
+ var x306 uint64
+ var x307 uint64
+ x306, x307 = bits.Add64(x277, x274, uint64(p521Uint1(x305)))
+ var x309 uint64
+ _, x309 = bits.Add64(x258, x290, uint64(0x0))
+ var x310 uint64
+ var x311 uint64
+ x310, x311 = bits.Add64(x260, x292, uint64(p521Uint1(x309)))
+ var x312 uint64
+ var x313 uint64
+ x312, x313 = bits.Add64(x262, x294, uint64(p521Uint1(x311)))
+ var x314 uint64
+ var x315 uint64
+ x314, x315 = bits.Add64(x264, x296, uint64(p521Uint1(x313)))
+ var x316 uint64
+ var x317 uint64
+ x316, x317 = bits.Add64(x266, x298, uint64(p521Uint1(x315)))
+ var x318 uint64
+ var x319 uint64
+ x318, x319 = bits.Add64(x268, x300, uint64(p521Uint1(x317)))
+ var x320 uint64
+ var x321 uint64
+ x320, x321 = bits.Add64(x270, x302, uint64(p521Uint1(x319)))
+ var x322 uint64
+ var x323 uint64
+ x322, x323 = bits.Add64(x272, x304, uint64(p521Uint1(x321)))
+ var x324 uint64
+ var x325 uint64
+ x324, x325 = bits.Add64((uint64(p521Uint1(x273)) + (uint64(p521Uint1(x257)) + (uint64(p521Uint1(x239)) + x207))), x306, uint64(p521Uint1(x323)))
+ var x326 uint64
+ var x327 uint64
+ x326, x327 = bits.Add64(x310, arg1[5], uint64(0x0))
+ var x328 uint64
+ var x329 uint64
+ x328, x329 = bits.Add64(x312, uint64(0x0), uint64(p521Uint1(x327)))
+ var x330 uint64
+ var x331 uint64
+ x330, x331 = bits.Add64(x314, uint64(0x0), uint64(p521Uint1(x329)))
+ var x332 uint64
+ var x333 uint64
+ x332, x333 = bits.Add64(x316, uint64(0x0), uint64(p521Uint1(x331)))
+ var x334 uint64
+ var x335 uint64
+ x334, x335 = bits.Add64(x318, uint64(0x0), uint64(p521Uint1(x333)))
+ var x336 uint64
+ var x337 uint64
+ x336, x337 = bits.Add64(x320, uint64(0x0), uint64(p521Uint1(x335)))
+ var x338 uint64
+ var x339 uint64
+ x338, x339 = bits.Add64(x322, uint64(0x0), uint64(p521Uint1(x337)))
+ var x340 uint64
+ var x341 uint64
+ x340, x341 = bits.Add64(x324, uint64(0x0), uint64(p521Uint1(x339)))
+ var x342 uint64
+ var x343 uint64
+ x343, x342 = bits.Mul64(x326, 0x1ff)
+ var x344 uint64
+ var x345 uint64
+ x345, x344 = bits.Mul64(x326, 0xffffffffffffffff)
+ var x346 uint64
+ var x347 uint64
+ x347, x346 = bits.Mul64(x326, 0xffffffffffffffff)
+ var x348 uint64
+ var x349 uint64
+ x349, x348 = bits.Mul64(x326, 0xffffffffffffffff)
+ var x350 uint64
+ var x351 uint64
+ x351, x350 = bits.Mul64(x326, 0xffffffffffffffff)
+ var x352 uint64
+ var x353 uint64
+ x353, x352 = bits.Mul64(x326, 0xffffffffffffffff)
+ var x354 uint64
+ var x355 uint64
+ x355, x354 = bits.Mul64(x326, 0xffffffffffffffff)
+ var x356 uint64
+ var x357 uint64
+ x357, x356 = bits.Mul64(x326, 0xffffffffffffffff)
+ var x358 uint64
+ var x359 uint64
+ x359, x358 = bits.Mul64(x326, 0xffffffffffffffff)
+ var x360 uint64
+ var x361 uint64
+ x360, x361 = bits.Add64(x359, x356, uint64(0x0))
+ var x362 uint64
+ var x363 uint64
+ x362, x363 = bits.Add64(x357, x354, uint64(p521Uint1(x361)))
+ var x364 uint64
+ var x365 uint64
+ x364, x365 = bits.Add64(x355, x352, uint64(p521Uint1(x363)))
+ var x366 uint64
+ var x367 uint64
+ x366, x367 = bits.Add64(x353, x350, uint64(p521Uint1(x365)))
+ var x368 uint64
+ var x369 uint64
+ x368, x369 = bits.Add64(x351, x348, uint64(p521Uint1(x367)))
+ var x370 uint64
+ var x371 uint64
+ x370, x371 = bits.Add64(x349, x346, uint64(p521Uint1(x369)))
+ var x372 uint64
+ var x373 uint64
+ x372, x373 = bits.Add64(x347, x344, uint64(p521Uint1(x371)))
+ var x374 uint64
+ var x375 uint64
+ x374, x375 = bits.Add64(x345, x342, uint64(p521Uint1(x373)))
+ var x377 uint64
+ _, x377 = bits.Add64(x326, x358, uint64(0x0))
+ var x378 uint64
+ var x379 uint64
+ x378, x379 = bits.Add64(x328, x360, uint64(p521Uint1(x377)))
+ var x380 uint64
+ var x381 uint64
+ x380, x381 = bits.Add64(x330, x362, uint64(p521Uint1(x379)))
+ var x382 uint64
+ var x383 uint64
+ x382, x383 = bits.Add64(x332, x364, uint64(p521Uint1(x381)))
+ var x384 uint64
+ var x385 uint64
+ x384, x385 = bits.Add64(x334, x366, uint64(p521Uint1(x383)))
+ var x386 uint64
+ var x387 uint64
+ x386, x387 = bits.Add64(x336, x368, uint64(p521Uint1(x385)))
+ var x388 uint64
+ var x389 uint64
+ x388, x389 = bits.Add64(x338, x370, uint64(p521Uint1(x387)))
+ var x390 uint64
+ var x391 uint64
+ x390, x391 = bits.Add64(x340, x372, uint64(p521Uint1(x389)))
+ var x392 uint64
+ var x393 uint64
+ x392, x393 = bits.Add64((uint64(p521Uint1(x341)) + (uint64(p521Uint1(x325)) + (uint64(p521Uint1(x307)) + x275))), x374, uint64(p521Uint1(x391)))
+ var x394 uint64
+ var x395 uint64
+ x394, x395 = bits.Add64(x378, arg1[6], uint64(0x0))
+ var x396 uint64
+ var x397 uint64
+ x396, x397 = bits.Add64(x380, uint64(0x0), uint64(p521Uint1(x395)))
+ var x398 uint64
+ var x399 uint64
+ x398, x399 = bits.Add64(x382, uint64(0x0), uint64(p521Uint1(x397)))
+ var x400 uint64
+ var x401 uint64
+ x400, x401 = bits.Add64(x384, uint64(0x0), uint64(p521Uint1(x399)))
+ var x402 uint64
+ var x403 uint64
+ x402, x403 = bits.Add64(x386, uint64(0x0), uint64(p521Uint1(x401)))
+ var x404 uint64
+ var x405 uint64
+ x404, x405 = bits.Add64(x388, uint64(0x0), uint64(p521Uint1(x403)))
+ var x406 uint64
+ var x407 uint64
+ x406, x407 = bits.Add64(x390, uint64(0x0), uint64(p521Uint1(x405)))
+ var x408 uint64
+ var x409 uint64
+ x408, x409 = bits.Add64(x392, uint64(0x0), uint64(p521Uint1(x407)))
+ var x410 uint64
+ var x411 uint64
+ x411, x410 = bits.Mul64(x394, 0x1ff)
+ var x412 uint64
+ var x413 uint64
+ x413, x412 = bits.Mul64(x394, 0xffffffffffffffff)
+ var x414 uint64
+ var x415 uint64
+ x415, x414 = bits.Mul64(x394, 0xffffffffffffffff)
+ var x416 uint64
+ var x417 uint64
+ x417, x416 = bits.Mul64(x394, 0xffffffffffffffff)
+ var x418 uint64
+ var x419 uint64
+ x419, x418 = bits.Mul64(x394, 0xffffffffffffffff)
+ var x420 uint64
+ var x421 uint64
+ x421, x420 = bits.Mul64(x394, 0xffffffffffffffff)
+ var x422 uint64
+ var x423 uint64
+ x423, x422 = bits.Mul64(x394, 0xffffffffffffffff)
+ var x424 uint64
+ var x425 uint64
+ x425, x424 = bits.Mul64(x394, 0xffffffffffffffff)
+ var x426 uint64
+ var x427 uint64
+ x427, x426 = bits.Mul64(x394, 0xffffffffffffffff)
+ var x428 uint64
+ var x429 uint64
+ x428, x429 = bits.Add64(x427, x424, uint64(0x0))
+ var x430 uint64
+ var x431 uint64
+ x430, x431 = bits.Add64(x425, x422, uint64(p521Uint1(x429)))
+ var x432 uint64
+ var x433 uint64
+ x432, x433 = bits.Add64(x423, x420, uint64(p521Uint1(x431)))
+ var x434 uint64
+ var x435 uint64
+ x434, x435 = bits.Add64(x421, x418, uint64(p521Uint1(x433)))
+ var x436 uint64
+ var x437 uint64
+ x436, x437 = bits.Add64(x419, x416, uint64(p521Uint1(x435)))
+ var x438 uint64
+ var x439 uint64
+ x438, x439 = bits.Add64(x417, x414, uint64(p521Uint1(x437)))
+ var x440 uint64
+ var x441 uint64
+ x440, x441 = bits.Add64(x415, x412, uint64(p521Uint1(x439)))
+ var x442 uint64
+ var x443 uint64
+ x442, x443 = bits.Add64(x413, x410, uint64(p521Uint1(x441)))
+ var x445 uint64
+ _, x445 = bits.Add64(x394, x426, uint64(0x0))
+ var x446 uint64
+ var x447 uint64
+ x446, x447 = bits.Add64(x396, x428, uint64(p521Uint1(x445)))
+ var x448 uint64
+ var x449 uint64
+ x448, x449 = bits.Add64(x398, x430, uint64(p521Uint1(x447)))
+ var x450 uint64
+ var x451 uint64
+ x450, x451 = bits.Add64(x400, x432, uint64(p521Uint1(x449)))
+ var x452 uint64
+ var x453 uint64
+ x452, x453 = bits.Add64(x402, x434, uint64(p521Uint1(x451)))
+ var x454 uint64
+ var x455 uint64
+ x454, x455 = bits.Add64(x404, x436, uint64(p521Uint1(x453)))
+ var x456 uint64
+ var x457 uint64
+ x456, x457 = bits.Add64(x406, x438, uint64(p521Uint1(x455)))
+ var x458 uint64
+ var x459 uint64
+ x458, x459 = bits.Add64(x408, x440, uint64(p521Uint1(x457)))
+ var x460 uint64
+ var x461 uint64
+ x460, x461 = bits.Add64((uint64(p521Uint1(x409)) + (uint64(p521Uint1(x393)) + (uint64(p521Uint1(x375)) + x343))), x442, uint64(p521Uint1(x459)))
+ var x462 uint64
+ var x463 uint64
+ x462, x463 = bits.Add64(x446, arg1[7], uint64(0x0))
+ var x464 uint64
+ var x465 uint64
+ x464, x465 = bits.Add64(x448, uint64(0x0), uint64(p521Uint1(x463)))
+ var x466 uint64
+ var x467 uint64
+ x466, x467 = bits.Add64(x450, uint64(0x0), uint64(p521Uint1(x465)))
+ var x468 uint64
+ var x469 uint64
+ x468, x469 = bits.Add64(x452, uint64(0x0), uint64(p521Uint1(x467)))
+ var x470 uint64
+ var x471 uint64
+ x470, x471 = bits.Add64(x454, uint64(0x0), uint64(p521Uint1(x469)))
+ var x472 uint64
+ var x473 uint64
+ x472, x473 = bits.Add64(x456, uint64(0x0), uint64(p521Uint1(x471)))
+ var x474 uint64
+ var x475 uint64
+ x474, x475 = bits.Add64(x458, uint64(0x0), uint64(p521Uint1(x473)))
+ var x476 uint64
+ var x477 uint64
+ x476, x477 = bits.Add64(x460, uint64(0x0), uint64(p521Uint1(x475)))
+ var x478 uint64
+ var x479 uint64
+ x479, x478 = bits.Mul64(x462, 0x1ff)
+ var x480 uint64
+ var x481 uint64
+ x481, x480 = bits.Mul64(x462, 0xffffffffffffffff)
+ var x482 uint64
+ var x483 uint64
+ x483, x482 = bits.Mul64(x462, 0xffffffffffffffff)
+ var x484 uint64
+ var x485 uint64
+ x485, x484 = bits.Mul64(x462, 0xffffffffffffffff)
+ var x486 uint64
+ var x487 uint64
+ x487, x486 = bits.Mul64(x462, 0xffffffffffffffff)
+ var x488 uint64
+ var x489 uint64
+ x489, x488 = bits.Mul64(x462, 0xffffffffffffffff)
+ var x490 uint64
+ var x491 uint64
+ x491, x490 = bits.Mul64(x462, 0xffffffffffffffff)
+ var x492 uint64
+ var x493 uint64
+ x493, x492 = bits.Mul64(x462, 0xffffffffffffffff)
+ var x494 uint64
+ var x495 uint64
+ x495, x494 = bits.Mul64(x462, 0xffffffffffffffff)
+ var x496 uint64
+ var x497 uint64
+ x496, x497 = bits.Add64(x495, x492, uint64(0x0))
+ var x498 uint64
+ var x499 uint64
+ x498, x499 = bits.Add64(x493, x490, uint64(p521Uint1(x497)))
+ var x500 uint64
+ var x501 uint64
+ x500, x501 = bits.Add64(x491, x488, uint64(p521Uint1(x499)))
+ var x502 uint64
+ var x503 uint64
+ x502, x503 = bits.Add64(x489, x486, uint64(p521Uint1(x501)))
+ var x504 uint64
+ var x505 uint64
+ x504, x505 = bits.Add64(x487, x484, uint64(p521Uint1(x503)))
+ var x506 uint64
+ var x507 uint64
+ x506, x507 = bits.Add64(x485, x482, uint64(p521Uint1(x505)))
+ var x508 uint64
+ var x509 uint64
+ x508, x509 = bits.Add64(x483, x480, uint64(p521Uint1(x507)))
+ var x510 uint64
+ var x511 uint64
+ x510, x511 = bits.Add64(x481, x478, uint64(p521Uint1(x509)))
+ var x513 uint64
+ _, x513 = bits.Add64(x462, x494, uint64(0x0))
+ var x514 uint64
+ var x515 uint64
+ x514, x515 = bits.Add64(x464, x496, uint64(p521Uint1(x513)))
+ var x516 uint64
+ var x517 uint64
+ x516, x517 = bits.Add64(x466, x498, uint64(p521Uint1(x515)))
+ var x518 uint64
+ var x519 uint64
+ x518, x519 = bits.Add64(x468, x500, uint64(p521Uint1(x517)))
+ var x520 uint64
+ var x521 uint64
+ x520, x521 = bits.Add64(x470, x502, uint64(p521Uint1(x519)))
+ var x522 uint64
+ var x523 uint64
+ x522, x523 = bits.Add64(x472, x504, uint64(p521Uint1(x521)))
+ var x524 uint64
+ var x525 uint64
+ x524, x525 = bits.Add64(x474, x506, uint64(p521Uint1(x523)))
+ var x526 uint64
+ var x527 uint64
+ x526, x527 = bits.Add64(x476, x508, uint64(p521Uint1(x525)))
+ var x528 uint64
+ var x529 uint64
+ x528, x529 = bits.Add64((uint64(p521Uint1(x477)) + (uint64(p521Uint1(x461)) + (uint64(p521Uint1(x443)) + x411))), x510, uint64(p521Uint1(x527)))
+ var x530 uint64
+ var x531 uint64
+ x530, x531 = bits.Add64(x514, arg1[8], uint64(0x0))
+ var x532 uint64
+ var x533 uint64
+ x532, x533 = bits.Add64(x516, uint64(0x0), uint64(p521Uint1(x531)))
+ var x534 uint64
+ var x535 uint64
+ x534, x535 = bits.Add64(x518, uint64(0x0), uint64(p521Uint1(x533)))
+ var x536 uint64
+ var x537 uint64
+ x536, x537 = bits.Add64(x520, uint64(0x0), uint64(p521Uint1(x535)))
+ var x538 uint64
+ var x539 uint64
+ x538, x539 = bits.Add64(x522, uint64(0x0), uint64(p521Uint1(x537)))
+ var x540 uint64
+ var x541 uint64
+ x540, x541 = bits.Add64(x524, uint64(0x0), uint64(p521Uint1(x539)))
+ var x542 uint64
+ var x543 uint64
+ x542, x543 = bits.Add64(x526, uint64(0x0), uint64(p521Uint1(x541)))
+ var x544 uint64
+ var x545 uint64
+ x544, x545 = bits.Add64(x528, uint64(0x0), uint64(p521Uint1(x543)))
+ var x546 uint64
+ var x547 uint64
+ x547, x546 = bits.Mul64(x530, 0x1ff)
+ var x548 uint64
+ var x549 uint64
+ x549, x548 = bits.Mul64(x530, 0xffffffffffffffff)
+ var x550 uint64
+ var x551 uint64
+ x551, x550 = bits.Mul64(x530, 0xffffffffffffffff)
+ var x552 uint64
+ var x553 uint64
+ x553, x552 = bits.Mul64(x530, 0xffffffffffffffff)
+ var x554 uint64
+ var x555 uint64
+ x555, x554 = bits.Mul64(x530, 0xffffffffffffffff)
+ var x556 uint64
+ var x557 uint64
+ x557, x556 = bits.Mul64(x530, 0xffffffffffffffff)
+ var x558 uint64
+ var x559 uint64
+ x559, x558 = bits.Mul64(x530, 0xffffffffffffffff)
+ var x560 uint64
+ var x561 uint64
+ x561, x560 = bits.Mul64(x530, 0xffffffffffffffff)
+ var x562 uint64
+ var x563 uint64
+ x563, x562 = bits.Mul64(x530, 0xffffffffffffffff)
+ var x564 uint64
+ var x565 uint64
+ x564, x565 = bits.Add64(x563, x560, uint64(0x0))
+ var x566 uint64
+ var x567 uint64
+ x566, x567 = bits.Add64(x561, x558, uint64(p521Uint1(x565)))
+ var x568 uint64
+ var x569 uint64
+ x568, x569 = bits.Add64(x559, x556, uint64(p521Uint1(x567)))
+ var x570 uint64
+ var x571 uint64
+ x570, x571 = bits.Add64(x557, x554, uint64(p521Uint1(x569)))
+ var x572 uint64
+ var x573 uint64
+ x572, x573 = bits.Add64(x555, x552, uint64(p521Uint1(x571)))
+ var x574 uint64
+ var x575 uint64
+ x574, x575 = bits.Add64(x553, x550, uint64(p521Uint1(x573)))
+ var x576 uint64
+ var x577 uint64
+ x576, x577 = bits.Add64(x551, x548, uint64(p521Uint1(x575)))
+ var x578 uint64
+ var x579 uint64
+ x578, x579 = bits.Add64(x549, x546, uint64(p521Uint1(x577)))
+ var x581 uint64
+ _, x581 = bits.Add64(x530, x562, uint64(0x0))
+ var x582 uint64
+ var x583 uint64
+ x582, x583 = bits.Add64(x532, x564, uint64(p521Uint1(x581)))
+ var x584 uint64
+ var x585 uint64
+ x584, x585 = bits.Add64(x534, x566, uint64(p521Uint1(x583)))
+ var x586 uint64
+ var x587 uint64
+ x586, x587 = bits.Add64(x536, x568, uint64(p521Uint1(x585)))
+ var x588 uint64
+ var x589 uint64
+ x588, x589 = bits.Add64(x538, x570, uint64(p521Uint1(x587)))
+ var x590 uint64
+ var x591 uint64
+ x590, x591 = bits.Add64(x540, x572, uint64(p521Uint1(x589)))
+ var x592 uint64
+ var x593 uint64
+ x592, x593 = bits.Add64(x542, x574, uint64(p521Uint1(x591)))
+ var x594 uint64
+ var x595 uint64
+ x594, x595 = bits.Add64(x544, x576, uint64(p521Uint1(x593)))
+ var x596 uint64
+ var x597 uint64
+ x596, x597 = bits.Add64((uint64(p521Uint1(x545)) + (uint64(p521Uint1(x529)) + (uint64(p521Uint1(x511)) + x479))), x578, uint64(p521Uint1(x595)))
+ x598 := (uint64(p521Uint1(x597)) + (uint64(p521Uint1(x579)) + x547))
+ var x599 uint64
+ var x600 uint64
+ x599, x600 = bits.Sub64(x582, 0xffffffffffffffff, uint64(0x0))
+ var x601 uint64
+ var x602 uint64
+ x601, x602 = bits.Sub64(x584, 0xffffffffffffffff, uint64(p521Uint1(x600)))
+ var x603 uint64
+ var x604 uint64
+ x603, x604 = bits.Sub64(x586, 0xffffffffffffffff, uint64(p521Uint1(x602)))
+ var x605 uint64
+ var x606 uint64
+ x605, x606 = bits.Sub64(x588, 0xffffffffffffffff, uint64(p521Uint1(x604)))
+ var x607 uint64
+ var x608 uint64
+ x607, x608 = bits.Sub64(x590, 0xffffffffffffffff, uint64(p521Uint1(x606)))
+ var x609 uint64
+ var x610 uint64
+ x609, x610 = bits.Sub64(x592, 0xffffffffffffffff, uint64(p521Uint1(x608)))
+ var x611 uint64
+ var x612 uint64
+ x611, x612 = bits.Sub64(x594, 0xffffffffffffffff, uint64(p521Uint1(x610)))
+ var x613 uint64
+ var x614 uint64
+ x613, x614 = bits.Sub64(x596, 0xffffffffffffffff, uint64(p521Uint1(x612)))
+ var x615 uint64
+ var x616 uint64
+ x615, x616 = bits.Sub64(x598, 0x1ff, uint64(p521Uint1(x614)))
+ var x618 uint64
+ _, x618 = bits.Sub64(uint64(0x0), uint64(0x0), uint64(p521Uint1(x616)))
+ var x619 uint64
+ p521CmovznzU64(&x619, p521Uint1(x618), x599, x582)
+ var x620 uint64
+ p521CmovznzU64(&x620, p521Uint1(x618), x601, x584)
+ var x621 uint64
+ p521CmovznzU64(&x621, p521Uint1(x618), x603, x586)
+ var x622 uint64
+ p521CmovznzU64(&x622, p521Uint1(x618), x605, x588)
+ var x623 uint64
+ p521CmovznzU64(&x623, p521Uint1(x618), x607, x590)
+ var x624 uint64
+ p521CmovznzU64(&x624, p521Uint1(x618), x609, x592)
+ var x625 uint64
+ p521CmovznzU64(&x625, p521Uint1(x618), x611, x594)
+ var x626 uint64
+ p521CmovznzU64(&x626, p521Uint1(x618), x613, x596)
+ var x627 uint64
+ p521CmovznzU64(&x627, p521Uint1(x618), x615, x598)
+ out1[0] = x619
+ out1[1] = x620
+ out1[2] = x621
+ out1[3] = x622
+ out1[4] = x623
+ out1[5] = x624
+ out1[6] = x625
+ out1[7] = x626
+ out1[8] = x627
+}
+
+// p521ToMontgomery translates a field element into the Montgomery domain.
+//
+// Preconditions:
+// 0 ≤ eval arg1 < m
+// Postconditions:
+// eval (from_montgomery out1) mod m = eval arg1 mod m
+// 0 ≤ eval out1 < m
+//
+func p521ToMontgomery(out1 *p521MontgomeryDomainFieldElement, arg1 *p521NonMontgomeryDomainFieldElement) {
+ var x1 uint64
+ var x2 uint64
+ x2, x1 = bits.Mul64(arg1[0], 0x400000000000)
+ var x3 uint64
+ var x4 uint64
+ x4, x3 = bits.Mul64(arg1[1], 0x400000000000)
+ var x5 uint64
+ var x6 uint64
+ x5, x6 = bits.Add64(x2, x3, uint64(0x0))
+ var x7 uint64
+ var x8 uint64
+ x8, x7 = bits.Mul64(x1, 0x1ff)
+ var x9 uint64
+ var x10 uint64
+ x10, x9 = bits.Mul64(x1, 0xffffffffffffffff)
+ var x11 uint64
+ var x12 uint64
+ x12, x11 = bits.Mul64(x1, 0xffffffffffffffff)
+ var x13 uint64
+ var x14 uint64
+ x14, x13 = bits.Mul64(x1, 0xffffffffffffffff)
+ var x15 uint64
+ var x16 uint64
+ x16, x15 = bits.Mul64(x1, 0xffffffffffffffff)
+ var x17 uint64
+ var x18 uint64
+ x18, x17 = bits.Mul64(x1, 0xffffffffffffffff)
+ var x19 uint64
+ var x20 uint64
+ x20, x19 = bits.Mul64(x1, 0xffffffffffffffff)
+ var x21 uint64
+ var x22 uint64
+ x22, x21 = bits.Mul64(x1, 0xffffffffffffffff)
+ var x23 uint64
+ var x24 uint64
+ x24, x23 = bits.Mul64(x1, 0xffffffffffffffff)
+ var x25 uint64
+ var x26 uint64
+ x25, x26 = bits.Add64(x24, x21, uint64(0x0))
+ var x27 uint64
+ var x28 uint64
+ x27, x28 = bits.Add64(x22, x19, uint64(p521Uint1(x26)))
+ var x29 uint64
+ var x30 uint64
+ x29, x30 = bits.Add64(x20, x17, uint64(p521Uint1(x28)))
+ var x31 uint64
+ var x32 uint64
+ x31, x32 = bits.Add64(x18, x15, uint64(p521Uint1(x30)))
+ var x33 uint64
+ var x34 uint64
+ x33, x34 = bits.Add64(x16, x13, uint64(p521Uint1(x32)))
+ var x35 uint64
+ var x36 uint64
+ x35, x36 = bits.Add64(x14, x11, uint64(p521Uint1(x34)))
+ var x37 uint64
+ var x38 uint64
+ x37, x38 = bits.Add64(x12, x9, uint64(p521Uint1(x36)))
+ var x39 uint64
+ var x40 uint64
+ x39, x40 = bits.Add64(x10, x7, uint64(p521Uint1(x38)))
+ var x42 uint64
+ _, x42 = bits.Add64(x1, x23, uint64(0x0))
+ var x43 uint64
+ var x44 uint64
+ x43, x44 = bits.Add64(x5, x25, uint64(p521Uint1(x42)))
+ var x45 uint64
+ var x46 uint64
+ x45, x46 = bits.Add64((uint64(p521Uint1(x6)) + x4), x27, uint64(p521Uint1(x44)))
+ var x47 uint64
+ var x48 uint64
+ x47, x48 = bits.Add64(uint64(0x0), x29, uint64(p521Uint1(x46)))
+ var x49 uint64
+ var x50 uint64
+ x49, x50 = bits.Add64(uint64(0x0), x31, uint64(p521Uint1(x48)))
+ var x51 uint64
+ var x52 uint64
+ x51, x52 = bits.Add64(uint64(0x0), x33, uint64(p521Uint1(x50)))
+ var x53 uint64
+ var x54 uint64
+ x53, x54 = bits.Add64(uint64(0x0), x35, uint64(p521Uint1(x52)))
+ var x55 uint64
+ var x56 uint64
+ x55, x56 = bits.Add64(uint64(0x0), x37, uint64(p521Uint1(x54)))
+ var x57 uint64
+ var x58 uint64
+ x57, x58 = bits.Add64(uint64(0x0), x39, uint64(p521Uint1(x56)))
+ var x59 uint64
+ var x60 uint64
+ x60, x59 = bits.Mul64(arg1[2], 0x400000000000)
+ var x61 uint64
+ var x62 uint64
+ x61, x62 = bits.Add64(x45, x59, uint64(0x0))
+ var x63 uint64
+ var x64 uint64
+ x63, x64 = bits.Add64(x47, x60, uint64(p521Uint1(x62)))
+ var x65 uint64
+ var x66 uint64
+ x65, x66 = bits.Add64(x49, uint64(0x0), uint64(p521Uint1(x64)))
+ var x67 uint64
+ var x68 uint64
+ x67, x68 = bits.Add64(x51, uint64(0x0), uint64(p521Uint1(x66)))
+ var x69 uint64
+ var x70 uint64
+ x69, x70 = bits.Add64(x53, uint64(0x0), uint64(p521Uint1(x68)))
+ var x71 uint64
+ var x72 uint64
+ x71, x72 = bits.Add64(x55, uint64(0x0), uint64(p521Uint1(x70)))
+ var x73 uint64
+ var x74 uint64
+ x73, x74 = bits.Add64(x57, uint64(0x0), uint64(p521Uint1(x72)))
+ var x75 uint64
+ var x76 uint64
+ x76, x75 = bits.Mul64(x43, 0x1ff)
+ var x77 uint64
+ var x78 uint64
+ x78, x77 = bits.Mul64(x43, 0xffffffffffffffff)
+ var x79 uint64
+ var x80 uint64
+ x80, x79 = bits.Mul64(x43, 0xffffffffffffffff)
+ var x81 uint64
+ var x82 uint64
+ x82, x81 = bits.Mul64(x43, 0xffffffffffffffff)
+ var x83 uint64
+ var x84 uint64
+ x84, x83 = bits.Mul64(x43, 0xffffffffffffffff)
+ var x85 uint64
+ var x86 uint64
+ x86, x85 = bits.Mul64(x43, 0xffffffffffffffff)
+ var x87 uint64
+ var x88 uint64
+ x88, x87 = bits.Mul64(x43, 0xffffffffffffffff)
+ var x89 uint64
+ var x90 uint64
+ x90, x89 = bits.Mul64(x43, 0xffffffffffffffff)
+ var x91 uint64
+ var x92 uint64
+ x92, x91 = bits.Mul64(x43, 0xffffffffffffffff)
+ var x93 uint64
+ var x94 uint64
+ x93, x94 = bits.Add64(x92, x89, uint64(0x0))
+ var x95 uint64
+ var x96 uint64
+ x95, x96 = bits.Add64(x90, x87, uint64(p521Uint1(x94)))
+ var x97 uint64
+ var x98 uint64
+ x97, x98 = bits.Add64(x88, x85, uint64(p521Uint1(x96)))
+ var x99 uint64
+ var x100 uint64
+ x99, x100 = bits.Add64(x86, x83, uint64(p521Uint1(x98)))
+ var x101 uint64
+ var x102 uint64
+ x101, x102 = bits.Add64(x84, x81, uint64(p521Uint1(x100)))
+ var x103 uint64
+ var x104 uint64
+ x103, x104 = bits.Add64(x82, x79, uint64(p521Uint1(x102)))
+ var x105 uint64
+ var x106 uint64
+ x105, x106 = bits.Add64(x80, x77, uint64(p521Uint1(x104)))
+ var x107 uint64
+ var x108 uint64
+ x107, x108 = bits.Add64(x78, x75, uint64(p521Uint1(x106)))
+ var x110 uint64
+ _, x110 = bits.Add64(x43, x91, uint64(0x0))
+ var x111 uint64
+ var x112 uint64
+ x111, x112 = bits.Add64(x61, x93, uint64(p521Uint1(x110)))
+ var x113 uint64
+ var x114 uint64
+ x113, x114 = bits.Add64(x63, x95, uint64(p521Uint1(x112)))
+ var x115 uint64
+ var x116 uint64
+ x115, x116 = bits.Add64(x65, x97, uint64(p521Uint1(x114)))
+ var x117 uint64
+ var x118 uint64
+ x117, x118 = bits.Add64(x67, x99, uint64(p521Uint1(x116)))
+ var x119 uint64
+ var x120 uint64
+ x119, x120 = bits.Add64(x69, x101, uint64(p521Uint1(x118)))
+ var x121 uint64
+ var x122 uint64
+ x121, x122 = bits.Add64(x71, x103, uint64(p521Uint1(x120)))
+ var x123 uint64
+ var x124 uint64
+ x123, x124 = bits.Add64(x73, x105, uint64(p521Uint1(x122)))
+ var x125 uint64
+ var x126 uint64
+ x125, x126 = bits.Add64((uint64(p521Uint1(x74)) + (uint64(p521Uint1(x58)) + (uint64(p521Uint1(x40)) + x8))), x107, uint64(p521Uint1(x124)))
+ var x127 uint64
+ var x128 uint64
+ x128, x127 = bits.Mul64(arg1[3], 0x400000000000)
+ var x129 uint64
+ var x130 uint64
+ x129, x130 = bits.Add64(x113, x127, uint64(0x0))
+ var x131 uint64
+ var x132 uint64
+ x131, x132 = bits.Add64(x115, x128, uint64(p521Uint1(x130)))
+ var x133 uint64
+ var x134 uint64
+ x133, x134 = bits.Add64(x117, uint64(0x0), uint64(p521Uint1(x132)))
+ var x135 uint64
+ var x136 uint64
+ x135, x136 = bits.Add64(x119, uint64(0x0), uint64(p521Uint1(x134)))
+ var x137 uint64
+ var x138 uint64
+ x137, x138 = bits.Add64(x121, uint64(0x0), uint64(p521Uint1(x136)))
+ var x139 uint64
+ var x140 uint64
+ x139, x140 = bits.Add64(x123, uint64(0x0), uint64(p521Uint1(x138)))
+ var x141 uint64
+ var x142 uint64
+ x141, x142 = bits.Add64(x125, uint64(0x0), uint64(p521Uint1(x140)))
+ var x143 uint64
+ var x144 uint64
+ x144, x143 = bits.Mul64(x111, 0x1ff)
+ var x145 uint64
+ var x146 uint64
+ x146, x145 = bits.Mul64(x111, 0xffffffffffffffff)
+ var x147 uint64
+ var x148 uint64
+ x148, x147 = bits.Mul64(x111, 0xffffffffffffffff)
+ var x149 uint64
+ var x150 uint64
+ x150, x149 = bits.Mul64(x111, 0xffffffffffffffff)
+ var x151 uint64
+ var x152 uint64
+ x152, x151 = bits.Mul64(x111, 0xffffffffffffffff)
+ var x153 uint64
+ var x154 uint64
+ x154, x153 = bits.Mul64(x111, 0xffffffffffffffff)
+ var x155 uint64
+ var x156 uint64
+ x156, x155 = bits.Mul64(x111, 0xffffffffffffffff)
+ var x157 uint64
+ var x158 uint64
+ x158, x157 = bits.Mul64(x111, 0xffffffffffffffff)
+ var x159 uint64
+ var x160 uint64
+ x160, x159 = bits.Mul64(x111, 0xffffffffffffffff)
+ var x161 uint64
+ var x162 uint64
+ x161, x162 = bits.Add64(x160, x157, uint64(0x0))
+ var x163 uint64
+ var x164 uint64
+ x163, x164 = bits.Add64(x158, x155, uint64(p521Uint1(x162)))
+ var x165 uint64
+ var x166 uint64
+ x165, x166 = bits.Add64(x156, x153, uint64(p521Uint1(x164)))
+ var x167 uint64
+ var x168 uint64
+ x167, x168 = bits.Add64(x154, x151, uint64(p521Uint1(x166)))
+ var x169 uint64
+ var x170 uint64
+ x169, x170 = bits.Add64(x152, x149, uint64(p521Uint1(x168)))
+ var x171 uint64
+ var x172 uint64
+ x171, x172 = bits.Add64(x150, x147, uint64(p521Uint1(x170)))
+ var x173 uint64
+ var x174 uint64
+ x173, x174 = bits.Add64(x148, x145, uint64(p521Uint1(x172)))
+ var x175 uint64
+ var x176 uint64
+ x175, x176 = bits.Add64(x146, x143, uint64(p521Uint1(x174)))
+ var x178 uint64
+ _, x178 = bits.Add64(x111, x159, uint64(0x0))
+ var x179 uint64
+ var x180 uint64
+ x179, x180 = bits.Add64(x129, x161, uint64(p521Uint1(x178)))
+ var x181 uint64
+ var x182 uint64
+ x181, x182 = bits.Add64(x131, x163, uint64(p521Uint1(x180)))
+ var x183 uint64
+ var x184 uint64
+ x183, x184 = bits.Add64(x133, x165, uint64(p521Uint1(x182)))
+ var x185 uint64
+ var x186 uint64
+ x185, x186 = bits.Add64(x135, x167, uint64(p521Uint1(x184)))
+ var x187 uint64
+ var x188 uint64
+ x187, x188 = bits.Add64(x137, x169, uint64(p521Uint1(x186)))
+ var x189 uint64
+ var x190 uint64
+ x189, x190 = bits.Add64(x139, x171, uint64(p521Uint1(x188)))
+ var x191 uint64
+ var x192 uint64
+ x191, x192 = bits.Add64(x141, x173, uint64(p521Uint1(x190)))
+ var x193 uint64
+ var x194 uint64
+ x193, x194 = bits.Add64((uint64(p521Uint1(x142)) + (uint64(p521Uint1(x126)) + (uint64(p521Uint1(x108)) + x76))), x175, uint64(p521Uint1(x192)))
+ var x195 uint64
+ var x196 uint64
+ x196, x195 = bits.Mul64(arg1[4], 0x400000000000)
+ var x197 uint64
+ var x198 uint64
+ x197, x198 = bits.Add64(x181, x195, uint64(0x0))
+ var x199 uint64
+ var x200 uint64
+ x199, x200 = bits.Add64(x183, x196, uint64(p521Uint1(x198)))
+ var x201 uint64
+ var x202 uint64
+ x201, x202 = bits.Add64(x185, uint64(0x0), uint64(p521Uint1(x200)))
+ var x203 uint64
+ var x204 uint64
+ x203, x204 = bits.Add64(x187, uint64(0x0), uint64(p521Uint1(x202)))
+ var x205 uint64
+ var x206 uint64
+ x205, x206 = bits.Add64(x189, uint64(0x0), uint64(p521Uint1(x204)))
+ var x207 uint64
+ var x208 uint64
+ x207, x208 = bits.Add64(x191, uint64(0x0), uint64(p521Uint1(x206)))
+ var x209 uint64
+ var x210 uint64
+ x209, x210 = bits.Add64(x193, uint64(0x0), uint64(p521Uint1(x208)))
+ var x211 uint64
+ var x212 uint64
+ x212, x211 = bits.Mul64(x179, 0x1ff)
+ var x213 uint64
+ var x214 uint64
+ x214, x213 = bits.Mul64(x179, 0xffffffffffffffff)
+ var x215 uint64
+ var x216 uint64
+ x216, x215 = bits.Mul64(x179, 0xffffffffffffffff)
+ var x217 uint64
+ var x218 uint64
+ x218, x217 = bits.Mul64(x179, 0xffffffffffffffff)
+ var x219 uint64
+ var x220 uint64
+ x220, x219 = bits.Mul64(x179, 0xffffffffffffffff)
+ var x221 uint64
+ var x222 uint64
+ x222, x221 = bits.Mul64(x179, 0xffffffffffffffff)
+ var x223 uint64
+ var x224 uint64
+ x224, x223 = bits.Mul64(x179, 0xffffffffffffffff)
+ var x225 uint64
+ var x226 uint64
+ x226, x225 = bits.Mul64(x179, 0xffffffffffffffff)
+ var x227 uint64
+ var x228 uint64
+ x228, x227 = bits.Mul64(x179, 0xffffffffffffffff)
+ var x229 uint64
+ var x230 uint64
+ x229, x230 = bits.Add64(x228, x225, uint64(0x0))
+ var x231 uint64
+ var x232 uint64
+ x231, x232 = bits.Add64(x226, x223, uint64(p521Uint1(x230)))
+ var x233 uint64
+ var x234 uint64
+ x233, x234 = bits.Add64(x224, x221, uint64(p521Uint1(x232)))
+ var x235 uint64
+ var x236 uint64
+ x235, x236 = bits.Add64(x222, x219, uint64(p521Uint1(x234)))
+ var x237 uint64
+ var x238 uint64
+ x237, x238 = bits.Add64(x220, x217, uint64(p521Uint1(x236)))
+ var x239 uint64
+ var x240 uint64
+ x239, x240 = bits.Add64(x218, x215, uint64(p521Uint1(x238)))
+ var x241 uint64
+ var x242 uint64
+ x241, x242 = bits.Add64(x216, x213, uint64(p521Uint1(x240)))
+ var x243 uint64
+ var x244 uint64
+ x243, x244 = bits.Add64(x214, x211, uint64(p521Uint1(x242)))
+ var x246 uint64
+ _, x246 = bits.Add64(x179, x227, uint64(0x0))
+ var x247 uint64
+ var x248 uint64
+ x247, x248 = bits.Add64(x197, x229, uint64(p521Uint1(x246)))
+ var x249 uint64
+ var x250 uint64
+ x249, x250 = bits.Add64(x199, x231, uint64(p521Uint1(x248)))
+ var x251 uint64
+ var x252 uint64
+ x251, x252 = bits.Add64(x201, x233, uint64(p521Uint1(x250)))
+ var x253 uint64
+ var x254 uint64
+ x253, x254 = bits.Add64(x203, x235, uint64(p521Uint1(x252)))
+ var x255 uint64
+ var x256 uint64
+ x255, x256 = bits.Add64(x205, x237, uint64(p521Uint1(x254)))
+ var x257 uint64
+ var x258 uint64
+ x257, x258 = bits.Add64(x207, x239, uint64(p521Uint1(x256)))
+ var x259 uint64
+ var x260 uint64
+ x259, x260 = bits.Add64(x209, x241, uint64(p521Uint1(x258)))
+ var x261 uint64
+ var x262 uint64
+ x261, x262 = bits.Add64((uint64(p521Uint1(x210)) + (uint64(p521Uint1(x194)) + (uint64(p521Uint1(x176)) + x144))), x243, uint64(p521Uint1(x260)))
+ var x263 uint64
+ var x264 uint64
+ x264, x263 = bits.Mul64(arg1[5], 0x400000000000)
+ var x265 uint64
+ var x266 uint64
+ x265, x266 = bits.Add64(x249, x263, uint64(0x0))
+ var x267 uint64
+ var x268 uint64
+ x267, x268 = bits.Add64(x251, x264, uint64(p521Uint1(x266)))
+ var x269 uint64
+ var x270 uint64
+ x269, x270 = bits.Add64(x253, uint64(0x0), uint64(p521Uint1(x268)))
+ var x271 uint64
+ var x272 uint64
+ x271, x272 = bits.Add64(x255, uint64(0x0), uint64(p521Uint1(x270)))
+ var x273 uint64
+ var x274 uint64
+ x273, x274 = bits.Add64(x257, uint64(0x0), uint64(p521Uint1(x272)))
+ var x275 uint64
+ var x276 uint64
+ x275, x276 = bits.Add64(x259, uint64(0x0), uint64(p521Uint1(x274)))
+ var x277 uint64
+ var x278 uint64
+ x277, x278 = bits.Add64(x261, uint64(0x0), uint64(p521Uint1(x276)))
+ var x279 uint64
+ var x280 uint64
+ x280, x279 = bits.Mul64(x247, 0x1ff)
+ var x281 uint64
+ var x282 uint64
+ x282, x281 = bits.Mul64(x247, 0xffffffffffffffff)
+ var x283 uint64
+ var x284 uint64
+ x284, x283 = bits.Mul64(x247, 0xffffffffffffffff)
+ var x285 uint64
+ var x286 uint64
+ x286, x285 = bits.Mul64(x247, 0xffffffffffffffff)
+ var x287 uint64
+ var x288 uint64
+ x288, x287 = bits.Mul64(x247, 0xffffffffffffffff)
+ var x289 uint64
+ var x290 uint64
+ x290, x289 = bits.Mul64(x247, 0xffffffffffffffff)
+ var x291 uint64
+ var x292 uint64
+ x292, x291 = bits.Mul64(x247, 0xffffffffffffffff)
+ var x293 uint64
+ var x294 uint64
+ x294, x293 = bits.Mul64(x247, 0xffffffffffffffff)
+ var x295 uint64
+ var x296 uint64
+ x296, x295 = bits.Mul64(x247, 0xffffffffffffffff)
+ var x297 uint64
+ var x298 uint64
+ x297, x298 = bits.Add64(x296, x293, uint64(0x0))
+ var x299 uint64
+ var x300 uint64
+ x299, x300 = bits.Add64(x294, x291, uint64(p521Uint1(x298)))
+ var x301 uint64
+ var x302 uint64
+ x301, x302 = bits.Add64(x292, x289, uint64(p521Uint1(x300)))
+ var x303 uint64
+ var x304 uint64
+ x303, x304 = bits.Add64(x290, x287, uint64(p521Uint1(x302)))
+ var x305 uint64
+ var x306 uint64
+ x305, x306 = bits.Add64(x288, x285, uint64(p521Uint1(x304)))
+ var x307 uint64
+ var x308 uint64
+ x307, x308 = bits.Add64(x286, x283, uint64(p521Uint1(x306)))
+ var x309 uint64
+ var x310 uint64
+ x309, x310 = bits.Add64(x284, x281, uint64(p521Uint1(x308)))
+ var x311 uint64
+ var x312 uint64
+ x311, x312 = bits.Add64(x282, x279, uint64(p521Uint1(x310)))
+ var x314 uint64
+ _, x314 = bits.Add64(x247, x295, uint64(0x0))
+ var x315 uint64
+ var x316 uint64
+ x315, x316 = bits.Add64(x265, x297, uint64(p521Uint1(x314)))
+ var x317 uint64
+ var x318 uint64
+ x317, x318 = bits.Add64(x267, x299, uint64(p521Uint1(x316)))
+ var x319 uint64
+ var x320 uint64
+ x319, x320 = bits.Add64(x269, x301, uint64(p521Uint1(x318)))
+ var x321 uint64
+ var x322 uint64
+ x321, x322 = bits.Add64(x271, x303, uint64(p521Uint1(x320)))
+ var x323 uint64
+ var x324 uint64
+ x323, x324 = bits.Add64(x273, x305, uint64(p521Uint1(x322)))
+ var x325 uint64
+ var x326 uint64
+ x325, x326 = bits.Add64(x275, x307, uint64(p521Uint1(x324)))
+ var x327 uint64
+ var x328 uint64
+ x327, x328 = bits.Add64(x277, x309, uint64(p521Uint1(x326)))
+ var x329 uint64
+ var x330 uint64
+ x329, x330 = bits.Add64((uint64(p521Uint1(x278)) + (uint64(p521Uint1(x262)) + (uint64(p521Uint1(x244)) + x212))), x311, uint64(p521Uint1(x328)))
+ var x331 uint64
+ var x332 uint64
+ x332, x331 = bits.Mul64(arg1[6], 0x400000000000)
+ var x333 uint64
+ var x334 uint64
+ x333, x334 = bits.Add64(x317, x331, uint64(0x0))
+ var x335 uint64
+ var x336 uint64
+ x335, x336 = bits.Add64(x319, x332, uint64(p521Uint1(x334)))
+ var x337 uint64
+ var x338 uint64
+ x337, x338 = bits.Add64(x321, uint64(0x0), uint64(p521Uint1(x336)))
+ var x339 uint64
+ var x340 uint64
+ x339, x340 = bits.Add64(x323, uint64(0x0), uint64(p521Uint1(x338)))
+ var x341 uint64
+ var x342 uint64
+ x341, x342 = bits.Add64(x325, uint64(0x0), uint64(p521Uint1(x340)))
+ var x343 uint64
+ var x344 uint64
+ x343, x344 = bits.Add64(x327, uint64(0x0), uint64(p521Uint1(x342)))
+ var x345 uint64
+ var x346 uint64
+ x345, x346 = bits.Add64(x329, uint64(0x0), uint64(p521Uint1(x344)))
+ var x347 uint64
+ var x348 uint64
+ x348, x347 = bits.Mul64(x315, 0x1ff)
+ var x349 uint64
+ var x350 uint64
+ x350, x349 = bits.Mul64(x315, 0xffffffffffffffff)
+ var x351 uint64
+ var x352 uint64
+ x352, x351 = bits.Mul64(x315, 0xffffffffffffffff)
+ var x353 uint64
+ var x354 uint64
+ x354, x353 = bits.Mul64(x315, 0xffffffffffffffff)
+ var x355 uint64
+ var x356 uint64
+ x356, x355 = bits.Mul64(x315, 0xffffffffffffffff)
+ var x357 uint64
+ var x358 uint64
+ x358, x357 = bits.Mul64(x315, 0xffffffffffffffff)
+ var x359 uint64
+ var x360 uint64
+ x360, x359 = bits.Mul64(x315, 0xffffffffffffffff)
+ var x361 uint64
+ var x362 uint64
+ x362, x361 = bits.Mul64(x315, 0xffffffffffffffff)
+ var x363 uint64
+ var x364 uint64
+ x364, x363 = bits.Mul64(x315, 0xffffffffffffffff)
+ var x365 uint64
+ var x366 uint64
+ x365, x366 = bits.Add64(x364, x361, uint64(0x0))
+ var x367 uint64
+ var x368 uint64
+ x367, x368 = bits.Add64(x362, x359, uint64(p521Uint1(x366)))
+ var x369 uint64
+ var x370 uint64
+ x369, x370 = bits.Add64(x360, x357, uint64(p521Uint1(x368)))
+ var x371 uint64
+ var x372 uint64
+ x371, x372 = bits.Add64(x358, x355, uint64(p521Uint1(x370)))
+ var x373 uint64
+ var x374 uint64
+ x373, x374 = bits.Add64(x356, x353, uint64(p521Uint1(x372)))
+ var x375 uint64
+ var x376 uint64
+ x375, x376 = bits.Add64(x354, x351, uint64(p521Uint1(x374)))
+ var x377 uint64
+ var x378 uint64
+ x377, x378 = bits.Add64(x352, x349, uint64(p521Uint1(x376)))
+ var x379 uint64
+ var x380 uint64
+ x379, x380 = bits.Add64(x350, x347, uint64(p521Uint1(x378)))
+ var x382 uint64
+ _, x382 = bits.Add64(x315, x363, uint64(0x0))
+ var x383 uint64
+ var x384 uint64
+ x383, x384 = bits.Add64(x333, x365, uint64(p521Uint1(x382)))
+ var x385 uint64
+ var x386 uint64
+ x385, x386 = bits.Add64(x335, x367, uint64(p521Uint1(x384)))
+ var x387 uint64
+ var x388 uint64
+ x387, x388 = bits.Add64(x337, x369, uint64(p521Uint1(x386)))
+ var x389 uint64
+ var x390 uint64
+ x389, x390 = bits.Add64(x339, x371, uint64(p521Uint1(x388)))
+ var x391 uint64
+ var x392 uint64
+ x391, x392 = bits.Add64(x341, x373, uint64(p521Uint1(x390)))
+ var x393 uint64
+ var x394 uint64
+ x393, x394 = bits.Add64(x343, x375, uint64(p521Uint1(x392)))
+ var x395 uint64
+ var x396 uint64
+ x395, x396 = bits.Add64(x345, x377, uint64(p521Uint1(x394)))
+ var x397 uint64
+ var x398 uint64
+ x397, x398 = bits.Add64((uint64(p521Uint1(x346)) + (uint64(p521Uint1(x330)) + (uint64(p521Uint1(x312)) + x280))), x379, uint64(p521Uint1(x396)))
+ var x399 uint64
+ var x400 uint64
+ x400, x399 = bits.Mul64(arg1[7], 0x400000000000)
+ var x401 uint64
+ var x402 uint64
+ x401, x402 = bits.Add64(x385, x399, uint64(0x0))
+ var x403 uint64
+ var x404 uint64
+ x403, x404 = bits.Add64(x387, x400, uint64(p521Uint1(x402)))
+ var x405 uint64
+ var x406 uint64
+ x405, x406 = bits.Add64(x389, uint64(0x0), uint64(p521Uint1(x404)))
+ var x407 uint64
+ var x408 uint64
+ x407, x408 = bits.Add64(x391, uint64(0x0), uint64(p521Uint1(x406)))
+ var x409 uint64
+ var x410 uint64
+ x409, x410 = bits.Add64(x393, uint64(0x0), uint64(p521Uint1(x408)))
+ var x411 uint64
+ var x412 uint64
+ x411, x412 = bits.Add64(x395, uint64(0x0), uint64(p521Uint1(x410)))
+ var x413 uint64
+ var x414 uint64
+ x413, x414 = bits.Add64(x397, uint64(0x0), uint64(p521Uint1(x412)))
+ var x415 uint64
+ var x416 uint64
+ x416, x415 = bits.Mul64(x383, 0x1ff)
+ var x417 uint64
+ var x418 uint64
+ x418, x417 = bits.Mul64(x383, 0xffffffffffffffff)
+ var x419 uint64
+ var x420 uint64
+ x420, x419 = bits.Mul64(x383, 0xffffffffffffffff)
+ var x421 uint64
+ var x422 uint64
+ x422, x421 = bits.Mul64(x383, 0xffffffffffffffff)
+ var x423 uint64
+ var x424 uint64
+ x424, x423 = bits.Mul64(x383, 0xffffffffffffffff)
+ var x425 uint64
+ var x426 uint64
+ x426, x425 = bits.Mul64(x383, 0xffffffffffffffff)
+ var x427 uint64
+ var x428 uint64
+ x428, x427 = bits.Mul64(x383, 0xffffffffffffffff)
+ var x429 uint64
+ var x430 uint64
+ x430, x429 = bits.Mul64(x383, 0xffffffffffffffff)
+ var x431 uint64
+ var x432 uint64
+ x432, x431 = bits.Mul64(x383, 0xffffffffffffffff)
+ var x433 uint64
+ var x434 uint64
+ x433, x434 = bits.Add64(x432, x429, uint64(0x0))
+ var x435 uint64
+ var x436 uint64
+ x435, x436 = bits.Add64(x430, x427, uint64(p521Uint1(x434)))
+ var x437 uint64
+ var x438 uint64
+ x437, x438 = bits.Add64(x428, x425, uint64(p521Uint1(x436)))
+ var x439 uint64
+ var x440 uint64
+ x439, x440 = bits.Add64(x426, x423, uint64(p521Uint1(x438)))
+ var x441 uint64
+ var x442 uint64
+ x441, x442 = bits.Add64(x424, x421, uint64(p521Uint1(x440)))
+ var x443 uint64
+ var x444 uint64
+ x443, x444 = bits.Add64(x422, x419, uint64(p521Uint1(x442)))
+ var x445 uint64
+ var x446 uint64
+ x445, x446 = bits.Add64(x420, x417, uint64(p521Uint1(x444)))
+ var x447 uint64
+ var x448 uint64
+ x447, x448 = bits.Add64(x418, x415, uint64(p521Uint1(x446)))
+ var x450 uint64
+ _, x450 = bits.Add64(x383, x431, uint64(0x0))
+ var x451 uint64
+ var x452 uint64
+ x451, x452 = bits.Add64(x401, x433, uint64(p521Uint1(x450)))
+ var x453 uint64
+ var x454 uint64
+ x453, x454 = bits.Add64(x403, x435, uint64(p521Uint1(x452)))
+ var x455 uint64
+ var x456 uint64
+ x455, x456 = bits.Add64(x405, x437, uint64(p521Uint1(x454)))
+ var x457 uint64
+ var x458 uint64
+ x457, x458 = bits.Add64(x407, x439, uint64(p521Uint1(x456)))
+ var x459 uint64
+ var x460 uint64
+ x459, x460 = bits.Add64(x409, x441, uint64(p521Uint1(x458)))
+ var x461 uint64
+ var x462 uint64
+ x461, x462 = bits.Add64(x411, x443, uint64(p521Uint1(x460)))
+ var x463 uint64
+ var x464 uint64
+ x463, x464 = bits.Add64(x413, x445, uint64(p521Uint1(x462)))
+ var x465 uint64
+ var x466 uint64
+ x465, x466 = bits.Add64((uint64(p521Uint1(x414)) + (uint64(p521Uint1(x398)) + (uint64(p521Uint1(x380)) + x348))), x447, uint64(p521Uint1(x464)))
+ var x467 uint64
+ var x468 uint64
+ x468, x467 = bits.Mul64(arg1[8], 0x400000000000)
+ var x469 uint64
+ var x470 uint64
+ x469, x470 = bits.Add64(x453, x467, uint64(0x0))
+ var x471 uint64
+ var x472 uint64
+ x471, x472 = bits.Add64(x455, x468, uint64(p521Uint1(x470)))
+ var x473 uint64
+ var x474 uint64
+ x473, x474 = bits.Add64(x457, uint64(0x0), uint64(p521Uint1(x472)))
+ var x475 uint64
+ var x476 uint64
+ x475, x476 = bits.Add64(x459, uint64(0x0), uint64(p521Uint1(x474)))
+ var x477 uint64
+ var x478 uint64
+ x477, x478 = bits.Add64(x461, uint64(0x0), uint64(p521Uint1(x476)))
+ var x479 uint64
+ var x480 uint64
+ x479, x480 = bits.Add64(x463, uint64(0x0), uint64(p521Uint1(x478)))
+ var x481 uint64
+ var x482 uint64
+ x481, x482 = bits.Add64(x465, uint64(0x0), uint64(p521Uint1(x480)))
+ var x483 uint64
+ var x484 uint64
+ x484, x483 = bits.Mul64(x451, 0x1ff)
+ var x485 uint64
+ var x486 uint64
+ x486, x485 = bits.Mul64(x451, 0xffffffffffffffff)
+ var x487 uint64
+ var x488 uint64
+ x488, x487 = bits.Mul64(x451, 0xffffffffffffffff)
+ var x489 uint64
+ var x490 uint64
+ x490, x489 = bits.Mul64(x451, 0xffffffffffffffff)
+ var x491 uint64
+ var x492 uint64
+ x492, x491 = bits.Mul64(x451, 0xffffffffffffffff)
+ var x493 uint64
+ var x494 uint64
+ x494, x493 = bits.Mul64(x451, 0xffffffffffffffff)
+ var x495 uint64
+ var x496 uint64
+ x496, x495 = bits.Mul64(x451, 0xffffffffffffffff)
+ var x497 uint64
+ var x498 uint64
+ x498, x497 = bits.Mul64(x451, 0xffffffffffffffff)
+ var x499 uint64
+ var x500 uint64
+ x500, x499 = bits.Mul64(x451, 0xffffffffffffffff)
+ var x501 uint64
+ var x502 uint64
+ x501, x502 = bits.Add64(x500, x497, uint64(0x0))
+ var x503 uint64
+ var x504 uint64
+ x503, x504 = bits.Add64(x498, x495, uint64(p521Uint1(x502)))
+ var x505 uint64
+ var x506 uint64
+ x505, x506 = bits.Add64(x496, x493, uint64(p521Uint1(x504)))
+ var x507 uint64
+ var x508 uint64
+ x507, x508 = bits.Add64(x494, x491, uint64(p521Uint1(x506)))
+ var x509 uint64
+ var x510 uint64
+ x509, x510 = bits.Add64(x492, x489, uint64(p521Uint1(x508)))
+ var x511 uint64
+ var x512 uint64
+ x511, x512 = bits.Add64(x490, x487, uint64(p521Uint1(x510)))
+ var x513 uint64
+ var x514 uint64
+ x513, x514 = bits.Add64(x488, x485, uint64(p521Uint1(x512)))
+ var x515 uint64
+ var x516 uint64
+ x515, x516 = bits.Add64(x486, x483, uint64(p521Uint1(x514)))
+ var x518 uint64
+ _, x518 = bits.Add64(x451, x499, uint64(0x0))
+ var x519 uint64
+ var x520 uint64
+ x519, x520 = bits.Add64(x469, x501, uint64(p521Uint1(x518)))
+ var x521 uint64
+ var x522 uint64
+ x521, x522 = bits.Add64(x471, x503, uint64(p521Uint1(x520)))
+ var x523 uint64
+ var x524 uint64
+ x523, x524 = bits.Add64(x473, x505, uint64(p521Uint1(x522)))
+ var x525 uint64
+ var x526 uint64
+ x525, x526 = bits.Add64(x475, x507, uint64(p521Uint1(x524)))
+ var x527 uint64
+ var x528 uint64
+ x527, x528 = bits.Add64(x477, x509, uint64(p521Uint1(x526)))
+ var x529 uint64
+ var x530 uint64
+ x529, x530 = bits.Add64(x479, x511, uint64(p521Uint1(x528)))
+ var x531 uint64
+ var x532 uint64
+ x531, x532 = bits.Add64(x481, x513, uint64(p521Uint1(x530)))
+ var x533 uint64
+ var x534 uint64
+ x533, x534 = bits.Add64((uint64(p521Uint1(x482)) + (uint64(p521Uint1(x466)) + (uint64(p521Uint1(x448)) + x416))), x515, uint64(p521Uint1(x532)))
+ x535 := (uint64(p521Uint1(x534)) + (uint64(p521Uint1(x516)) + x484))
+ var x536 uint64
+ var x537 uint64
+ x536, x537 = bits.Sub64(x519, 0xffffffffffffffff, uint64(0x0))
+ var x538 uint64
+ var x539 uint64
+ x538, x539 = bits.Sub64(x521, 0xffffffffffffffff, uint64(p521Uint1(x537)))
+ var x540 uint64
+ var x541 uint64
+ x540, x541 = bits.Sub64(x523, 0xffffffffffffffff, uint64(p521Uint1(x539)))
+ var x542 uint64
+ var x543 uint64
+ x542, x543 = bits.Sub64(x525, 0xffffffffffffffff, uint64(p521Uint1(x541)))
+ var x544 uint64
+ var x545 uint64
+ x544, x545 = bits.Sub64(x527, 0xffffffffffffffff, uint64(p521Uint1(x543)))
+ var x546 uint64
+ var x547 uint64
+ x546, x547 = bits.Sub64(x529, 0xffffffffffffffff, uint64(p521Uint1(x545)))
+ var x548 uint64
+ var x549 uint64
+ x548, x549 = bits.Sub64(x531, 0xffffffffffffffff, uint64(p521Uint1(x547)))
+ var x550 uint64
+ var x551 uint64
+ x550, x551 = bits.Sub64(x533, 0xffffffffffffffff, uint64(p521Uint1(x549)))
+ var x552 uint64
+ var x553 uint64
+ x552, x553 = bits.Sub64(x535, 0x1ff, uint64(p521Uint1(x551)))
+ var x555 uint64
+ _, x555 = bits.Sub64(uint64(0x0), uint64(0x0), uint64(p521Uint1(x553)))
+ var x556 uint64
+ p521CmovznzU64(&x556, p521Uint1(x555), x536, x519)
+ var x557 uint64
+ p521CmovznzU64(&x557, p521Uint1(x555), x538, x521)
+ var x558 uint64
+ p521CmovznzU64(&x558, p521Uint1(x555), x540, x523)
+ var x559 uint64
+ p521CmovznzU64(&x559, p521Uint1(x555), x542, x525)
+ var x560 uint64
+ p521CmovznzU64(&x560, p521Uint1(x555), x544, x527)
+ var x561 uint64
+ p521CmovznzU64(&x561, p521Uint1(x555), x546, x529)
+ var x562 uint64
+ p521CmovznzU64(&x562, p521Uint1(x555), x548, x531)
+ var x563 uint64
+ p521CmovznzU64(&x563, p521Uint1(x555), x550, x533)
+ var x564 uint64
+ p521CmovznzU64(&x564, p521Uint1(x555), x552, x535)
+ out1[0] = x556
+ out1[1] = x557
+ out1[2] = x558
+ out1[3] = x559
+ out1[4] = x560
+ out1[5] = x561
+ out1[6] = x562
+ out1[7] = x563
+ out1[8] = x564
+}
+
+// p521Selectznz is a multi-limb conditional select.
+//
+// Postconditions:
+// eval out1 = (if arg1 = 0 then eval arg2 else eval arg3)
+//
+// Input Bounds:
+// arg1: [0x0 ~> 0x1]
+// arg2: [[0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff]]
+// arg3: [[0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff]]
+// Output Bounds:
+// out1: [[0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff]]
+func p521Selectznz(out1 *[9]uint64, arg1 p521Uint1, arg2 *[9]uint64, arg3 *[9]uint64) {
+ var x1 uint64
+ p521CmovznzU64(&x1, arg1, arg2[0], arg3[0])
+ var x2 uint64
+ p521CmovznzU64(&x2, arg1, arg2[1], arg3[1])
+ var x3 uint64
+ p521CmovznzU64(&x3, arg1, arg2[2], arg3[2])
+ var x4 uint64
+ p521CmovznzU64(&x4, arg1, arg2[3], arg3[3])
+ var x5 uint64
+ p521CmovznzU64(&x5, arg1, arg2[4], arg3[4])
+ var x6 uint64
+ p521CmovznzU64(&x6, arg1, arg2[5], arg3[5])
+ var x7 uint64
+ p521CmovznzU64(&x7, arg1, arg2[6], arg3[6])
+ var x8 uint64
+ p521CmovznzU64(&x8, arg1, arg2[7], arg3[7])
+ var x9 uint64
+ p521CmovznzU64(&x9, arg1, arg2[8], arg3[8])
+ out1[0] = x1
+ out1[1] = x2
+ out1[2] = x3
+ out1[3] = x4
+ out1[4] = x5
+ out1[5] = x6
+ out1[6] = x7
+ out1[7] = x8
+ out1[8] = x9
+}
+
+// p521ToBytes serializes a field element NOT in the Montgomery domain to bytes in little-endian order.
+//
+// Preconditions:
+// 0 ≤ eval arg1 < m
+// Postconditions:
+// out1 = map (λ x, ⌊((eval arg1 mod m) mod 2^(8 * (x + 1))) / 2^(8 * x)⌋) [0..65]
+//
+// Input Bounds:
+// arg1: [[0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0x1ff]]
+// Output Bounds:
+// out1: [[0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0x1]]
+func p521ToBytes(out1 *[66]uint8, arg1 *[9]uint64) {
+ x1 := arg1[8]
+ x2 := arg1[7]
+ x3 := arg1[6]
+ x4 := arg1[5]
+ x5 := arg1[4]
+ x6 := arg1[3]
+ x7 := arg1[2]
+ x8 := arg1[1]
+ x9 := arg1[0]
+ x10 := (uint8(x9) & 0xff)
+ x11 := (x9 >> 8)
+ x12 := (uint8(x11) & 0xff)
+ x13 := (x11 >> 8)
+ x14 := (uint8(x13) & 0xff)
+ x15 := (x13 >> 8)
+ x16 := (uint8(x15) & 0xff)
+ x17 := (x15 >> 8)
+ x18 := (uint8(x17) & 0xff)
+ x19 := (x17 >> 8)
+ x20 := (uint8(x19) & 0xff)
+ x21 := (x19 >> 8)
+ x22 := (uint8(x21) & 0xff)
+ x23 := uint8((x21 >> 8))
+ x24 := (uint8(x8) & 0xff)
+ x25 := (x8 >> 8)
+ x26 := (uint8(x25) & 0xff)
+ x27 := (x25 >> 8)
+ x28 := (uint8(x27) & 0xff)
+ x29 := (x27 >> 8)
+ x30 := (uint8(x29) & 0xff)
+ x31 := (x29 >> 8)
+ x32 := (uint8(x31) & 0xff)
+ x33 := (x31 >> 8)
+ x34 := (uint8(x33) & 0xff)
+ x35 := (x33 >> 8)
+ x36 := (uint8(x35) & 0xff)
+ x37 := uint8((x35 >> 8))
+ x38 := (uint8(x7) & 0xff)
+ x39 := (x7 >> 8)
+ x40 := (uint8(x39) & 0xff)
+ x41 := (x39 >> 8)
+ x42 := (uint8(x41) & 0xff)
+ x43 := (x41 >> 8)
+ x44 := (uint8(x43) & 0xff)
+ x45 := (x43 >> 8)
+ x46 := (uint8(x45) & 0xff)
+ x47 := (x45 >> 8)
+ x48 := (uint8(x47) & 0xff)
+ x49 := (x47 >> 8)
+ x50 := (uint8(x49) & 0xff)
+ x51 := uint8((x49 >> 8))
+ x52 := (uint8(x6) & 0xff)
+ x53 := (x6 >> 8)
+ x54 := (uint8(x53) & 0xff)
+ x55 := (x53 >> 8)
+ x56 := (uint8(x55) & 0xff)
+ x57 := (x55 >> 8)
+ x58 := (uint8(x57) & 0xff)
+ x59 := (x57 >> 8)
+ x60 := (uint8(x59) & 0xff)
+ x61 := (x59 >> 8)
+ x62 := (uint8(x61) & 0xff)
+ x63 := (x61 >> 8)
+ x64 := (uint8(x63) & 0xff)
+ x65 := uint8((x63 >> 8))
+ x66 := (uint8(x5) & 0xff)
+ x67 := (x5 >> 8)
+ x68 := (uint8(x67) & 0xff)
+ x69 := (x67 >> 8)
+ x70 := (uint8(x69) & 0xff)
+ x71 := (x69 >> 8)
+ x72 := (uint8(x71) & 0xff)
+ x73 := (x71 >> 8)
+ x74 := (uint8(x73) & 0xff)
+ x75 := (x73 >> 8)
+ x76 := (uint8(x75) & 0xff)
+ x77 := (x75 >> 8)
+ x78 := (uint8(x77) & 0xff)
+ x79 := uint8((x77 >> 8))
+ x80 := (uint8(x4) & 0xff)
+ x81 := (x4 >> 8)
+ x82 := (uint8(x81) & 0xff)
+ x83 := (x81 >> 8)
+ x84 := (uint8(x83) & 0xff)
+ x85 := (x83 >> 8)
+ x86 := (uint8(x85) & 0xff)
+ x87 := (x85 >> 8)
+ x88 := (uint8(x87) & 0xff)
+ x89 := (x87 >> 8)
+ x90 := (uint8(x89) & 0xff)
+ x91 := (x89 >> 8)
+ x92 := (uint8(x91) & 0xff)
+ x93 := uint8((x91 >> 8))
+ x94 := (uint8(x3) & 0xff)
+ x95 := (x3 >> 8)
+ x96 := (uint8(x95) & 0xff)
+ x97 := (x95 >> 8)
+ x98 := (uint8(x97) & 0xff)
+ x99 := (x97 >> 8)
+ x100 := (uint8(x99) & 0xff)
+ x101 := (x99 >> 8)
+ x102 := (uint8(x101) & 0xff)
+ x103 := (x101 >> 8)
+ x104 := (uint8(x103) & 0xff)
+ x105 := (x103 >> 8)
+ x106 := (uint8(x105) & 0xff)
+ x107 := uint8((x105 >> 8))
+ x108 := (uint8(x2) & 0xff)
+ x109 := (x2 >> 8)
+ x110 := (uint8(x109) & 0xff)
+ x111 := (x109 >> 8)
+ x112 := (uint8(x111) & 0xff)
+ x113 := (x111 >> 8)
+ x114 := (uint8(x113) & 0xff)
+ x115 := (x113 >> 8)
+ x116 := (uint8(x115) & 0xff)
+ x117 := (x115 >> 8)
+ x118 := (uint8(x117) & 0xff)
+ x119 := (x117 >> 8)
+ x120 := (uint8(x119) & 0xff)
+ x121 := uint8((x119 >> 8))
+ x122 := (uint8(x1) & 0xff)
+ x123 := p521Uint1((x1 >> 8))
+ out1[0] = x10
+ out1[1] = x12
+ out1[2] = x14
+ out1[3] = x16
+ out1[4] = x18
+ out1[5] = x20
+ out1[6] = x22
+ out1[7] = x23
+ out1[8] = x24
+ out1[9] = x26
+ out1[10] = x28
+ out1[11] = x30
+ out1[12] = x32
+ out1[13] = x34
+ out1[14] = x36
+ out1[15] = x37
+ out1[16] = x38
+ out1[17] = x40
+ out1[18] = x42
+ out1[19] = x44
+ out1[20] = x46
+ out1[21] = x48
+ out1[22] = x50
+ out1[23] = x51
+ out1[24] = x52
+ out1[25] = x54
+ out1[26] = x56
+ out1[27] = x58
+ out1[28] = x60
+ out1[29] = x62
+ out1[30] = x64
+ out1[31] = x65
+ out1[32] = x66
+ out1[33] = x68
+ out1[34] = x70
+ out1[35] = x72
+ out1[36] = x74
+ out1[37] = x76
+ out1[38] = x78
+ out1[39] = x79
+ out1[40] = x80
+ out1[41] = x82
+ out1[42] = x84
+ out1[43] = x86
+ out1[44] = x88
+ out1[45] = x90
+ out1[46] = x92
+ out1[47] = x93
+ out1[48] = x94
+ out1[49] = x96
+ out1[50] = x98
+ out1[51] = x100
+ out1[52] = x102
+ out1[53] = x104
+ out1[54] = x106
+ out1[55] = x107
+ out1[56] = x108
+ out1[57] = x110
+ out1[58] = x112
+ out1[59] = x114
+ out1[60] = x116
+ out1[61] = x118
+ out1[62] = x120
+ out1[63] = x121
+ out1[64] = x122
+ out1[65] = uint8(x123)
+}
+
+// p521FromBytes deserializes a field element NOT in the Montgomery domain from bytes in little-endian order.
+//
+// Preconditions:
+// 0 ≤ bytes_eval arg1 < m
+// Postconditions:
+// eval out1 mod m = bytes_eval arg1 mod m
+// 0 ≤ eval out1 < m
+//
+// Input Bounds:
+// arg1: [[0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0x1]]
+// Output Bounds:
+// out1: [[0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0x1ff]]
+func p521FromBytes(out1 *[9]uint64, arg1 *[66]uint8) {
+ x1 := (uint64(p521Uint1(arg1[65])) << 8)
+ x2 := arg1[64]
+ x3 := (uint64(arg1[63]) << 56)
+ x4 := (uint64(arg1[62]) << 48)
+ x5 := (uint64(arg1[61]) << 40)
+ x6 := (uint64(arg1[60]) << 32)
+ x7 := (uint64(arg1[59]) << 24)
+ x8 := (uint64(arg1[58]) << 16)
+ x9 := (uint64(arg1[57]) << 8)
+ x10 := arg1[56]
+ x11 := (uint64(arg1[55]) << 56)
+ x12 := (uint64(arg1[54]) << 48)
+ x13 := (uint64(arg1[53]) << 40)
+ x14 := (uint64(arg1[52]) << 32)
+ x15 := (uint64(arg1[51]) << 24)
+ x16 := (uint64(arg1[50]) << 16)
+ x17 := (uint64(arg1[49]) << 8)
+ x18 := arg1[48]
+ x19 := (uint64(arg1[47]) << 56)
+ x20 := (uint64(arg1[46]) << 48)
+ x21 := (uint64(arg1[45]) << 40)
+ x22 := (uint64(arg1[44]) << 32)
+ x23 := (uint64(arg1[43]) << 24)
+ x24 := (uint64(arg1[42]) << 16)
+ x25 := (uint64(arg1[41]) << 8)
+ x26 := arg1[40]
+ x27 := (uint64(arg1[39]) << 56)
+ x28 := (uint64(arg1[38]) << 48)
+ x29 := (uint64(arg1[37]) << 40)
+ x30 := (uint64(arg1[36]) << 32)
+ x31 := (uint64(arg1[35]) << 24)
+ x32 := (uint64(arg1[34]) << 16)
+ x33 := (uint64(arg1[33]) << 8)
+ x34 := arg1[32]
+ x35 := (uint64(arg1[31]) << 56)
+ x36 := (uint64(arg1[30]) << 48)
+ x37 := (uint64(arg1[29]) << 40)
+ x38 := (uint64(arg1[28]) << 32)
+ x39 := (uint64(arg1[27]) << 24)
+ x40 := (uint64(arg1[26]) << 16)
+ x41 := (uint64(arg1[25]) << 8)
+ x42 := arg1[24]
+ x43 := (uint64(arg1[23]) << 56)
+ x44 := (uint64(arg1[22]) << 48)
+ x45 := (uint64(arg1[21]) << 40)
+ x46 := (uint64(arg1[20]) << 32)
+ x47 := (uint64(arg1[19]) << 24)
+ x48 := (uint64(arg1[18]) << 16)
+ x49 := (uint64(arg1[17]) << 8)
+ x50 := arg1[16]
+ x51 := (uint64(arg1[15]) << 56)
+ x52 := (uint64(arg1[14]) << 48)
+ x53 := (uint64(arg1[13]) << 40)
+ x54 := (uint64(arg1[12]) << 32)
+ x55 := (uint64(arg1[11]) << 24)
+ x56 := (uint64(arg1[10]) << 16)
+ x57 := (uint64(arg1[9]) << 8)
+ x58 := arg1[8]
+ x59 := (uint64(arg1[7]) << 56)
+ x60 := (uint64(arg1[6]) << 48)
+ x61 := (uint64(arg1[5]) << 40)
+ x62 := (uint64(arg1[4]) << 32)
+ x63 := (uint64(arg1[3]) << 24)
+ x64 := (uint64(arg1[2]) << 16)
+ x65 := (uint64(arg1[1]) << 8)
+ x66 := arg1[0]
+ x67 := (x65 + uint64(x66))
+ x68 := (x64 + x67)
+ x69 := (x63 + x68)
+ x70 := (x62 + x69)
+ x71 := (x61 + x70)
+ x72 := (x60 + x71)
+ x73 := (x59 + x72)
+ x74 := (x57 + uint64(x58))
+ x75 := (x56 + x74)
+ x76 := (x55 + x75)
+ x77 := (x54 + x76)
+ x78 := (x53 + x77)
+ x79 := (x52 + x78)
+ x80 := (x51 + x79)
+ x81 := (x49 + uint64(x50))
+ x82 := (x48 + x81)
+ x83 := (x47 + x82)
+ x84 := (x46 + x83)
+ x85 := (x45 + x84)
+ x86 := (x44 + x85)
+ x87 := (x43 + x86)
+ x88 := (x41 + uint64(x42))
+ x89 := (x40 + x88)
+ x90 := (x39 + x89)
+ x91 := (x38 + x90)
+ x92 := (x37 + x91)
+ x93 := (x36 + x92)
+ x94 := (x35 + x93)
+ x95 := (x33 + uint64(x34))
+ x96 := (x32 + x95)
+ x97 := (x31 + x96)
+ x98 := (x30 + x97)
+ x99 := (x29 + x98)
+ x100 := (x28 + x99)
+ x101 := (x27 + x100)
+ x102 := (x25 + uint64(x26))
+ x103 := (x24 + x102)
+ x104 := (x23 + x103)
+ x105 := (x22 + x104)
+ x106 := (x21 + x105)
+ x107 := (x20 + x106)
+ x108 := (x19 + x107)
+ x109 := (x17 + uint64(x18))
+ x110 := (x16 + x109)
+ x111 := (x15 + x110)
+ x112 := (x14 + x111)
+ x113 := (x13 + x112)
+ x114 := (x12 + x113)
+ x115 := (x11 + x114)
+ x116 := (x9 + uint64(x10))
+ x117 := (x8 + x116)
+ x118 := (x7 + x117)
+ x119 := (x6 + x118)
+ x120 := (x5 + x119)
+ x121 := (x4 + x120)
+ x122 := (x3 + x121)
+ x123 := (x1 + uint64(x2))
+ out1[0] = x73
+ out1[1] = x80
+ out1[2] = x87
+ out1[3] = x94
+ out1[4] = x101
+ out1[5] = x108
+ out1[6] = x115
+ out1[7] = x122
+ out1[8] = x123
+}
diff --git a/src/crypto/elliptic/internal/fiat/p521_invert.go b/src/crypto/elliptic/internal/fiat/p521_invert.go
new file mode 100644
index 0000000..407711a
--- /dev/null
+++ b/src/crypto/elliptic/internal/fiat/p521_invert.go
@@ -0,0 +1,89 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Code generated by addchain. DO NOT EDIT.
+
+package fiat
+
+// Invert sets e = 1/x, and returns e.
+//
+// If x == 0, Invert returns e = 0.
+func (e *P521Element) Invert(x *P521Element) *P521Element {
+ // Inversion is implemented as exponentiation with exponent p − 2.
+ // The sequence of 13 multiplications and 520 squarings is derived from the
+ // following addition chain generated with github.com/mmcloughlin/addchain v0.3.0.
+ //
+ // _10 = 2*1
+ // _11 = 1 + _10
+ // _1100 = _11 << 2
+ // _1111 = _11 + _1100
+ // _11110000 = _1111 << 4
+ // _11111111 = _1111 + _11110000
+ // x16 = _11111111 << 8 + _11111111
+ // x32 = x16 << 16 + x16
+ // x64 = x32 << 32 + x32
+ // x65 = 2*x64 + 1
+ // x129 = x65 << 64 + x64
+ // x130 = 2*x129 + 1
+ // x259 = x130 << 129 + x129
+ // x260 = 2*x259 + 1
+ // x519 = x260 << 259 + x259
+ // return x519 << 2 + 1
+ //
+
+ var z = new(P521Element).Set(e)
+ var t0 = new(P521Element)
+
+ z.Square(x)
+ z.Mul(x, z)
+ t0.Square(z)
+ for s := 1; s < 2; s++ {
+ t0.Square(t0)
+ }
+ z.Mul(z, t0)
+ t0.Square(z)
+ for s := 1; s < 4; s++ {
+ t0.Square(t0)
+ }
+ z.Mul(z, t0)
+ t0.Square(z)
+ for s := 1; s < 8; s++ {
+ t0.Square(t0)
+ }
+ z.Mul(z, t0)
+ t0.Square(z)
+ for s := 1; s < 16; s++ {
+ t0.Square(t0)
+ }
+ z.Mul(z, t0)
+ t0.Square(z)
+ for s := 1; s < 32; s++ {
+ t0.Square(t0)
+ }
+ z.Mul(z, t0)
+ t0.Square(z)
+ t0.Mul(x, t0)
+ for s := 0; s < 64; s++ {
+ t0.Square(t0)
+ }
+ z.Mul(z, t0)
+ t0.Square(z)
+ t0.Mul(x, t0)
+ for s := 0; s < 129; s++ {
+ t0.Square(t0)
+ }
+ z.Mul(z, t0)
+ t0.Square(z)
+ t0.Mul(x, t0)
+ for s := 0; s < 259; s++ {
+ t0.Square(t0)
+ }
+ z.Mul(z, t0)
+ for s := 0; s < 2; s++ {
+ z.Square(z)
+ }
+ z.Mul(x, z)
+
+ return e.Set(z)
+}
diff --git a/src/crypto/elliptic/internal/nistec/nistec_test.go b/src/crypto/elliptic/internal/nistec/nistec_test.go
new file mode 100644
index 0000000..4eae998
--- /dev/null
+++ b/src/crypto/elliptic/internal/nistec/nistec_test.go
@@ -0,0 +1,94 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package nistec_test
+
+import (
+ "crypto/elliptic/internal/nistec"
+ "math/rand"
+ "os"
+ "strings"
+ "testing"
+)
+
+func TestAllocations(t *testing.T) {
+ if strings.HasSuffix(os.Getenv("GO_BUILDER_NAME"), "-noopt") {
+ t.Skip("skipping allocations test without relevant optimizations")
+ }
+ t.Run("P224", func(t *testing.T) {
+ if allocs := testing.AllocsPerRun(100, func() {
+ p := nistec.NewP224Generator()
+ scalar := make([]byte, 66)
+ rand.Read(scalar)
+ p.ScalarMult(p, scalar)
+ out := p.Bytes()
+ if _, err := p.SetBytes(out); err != nil {
+ t.Fatal(err)
+ }
+ }); allocs > 0 {
+ t.Errorf("expected zero allocations, got %0.1f", allocs)
+ }
+ })
+ t.Run("P384", func(t *testing.T) {
+ if allocs := testing.AllocsPerRun(100, func() {
+ p := nistec.NewP384Generator()
+ scalar := make([]byte, 66)
+ rand.Read(scalar)
+ p.ScalarMult(p, scalar)
+ out := p.Bytes()
+ if _, err := p.SetBytes(out); err != nil {
+ t.Fatal(err)
+ }
+ }); allocs > 0 {
+ t.Errorf("expected zero allocations, got %0.1f", allocs)
+ }
+ })
+ t.Run("P521", func(t *testing.T) {
+ if allocs := testing.AllocsPerRun(100, func() {
+ p := nistec.NewP521Generator()
+ scalar := make([]byte, 66)
+ rand.Read(scalar)
+ p.ScalarMult(p, scalar)
+ out := p.Bytes()
+ if _, err := p.SetBytes(out); err != nil {
+ t.Fatal(err)
+ }
+ }); allocs > 0 {
+ t.Errorf("expected zero allocations, got %0.1f", allocs)
+ }
+ })
+}
+
+func BenchmarkScalarMult(b *testing.B) {
+ b.Run("P224", func(b *testing.B) {
+ scalar := make([]byte, 66)
+ rand.Read(scalar)
+ p := nistec.NewP224Generator()
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ p.ScalarMult(p, scalar)
+ }
+ })
+ b.Run("P384", func(b *testing.B) {
+ scalar := make([]byte, 66)
+ rand.Read(scalar)
+ p := nistec.NewP384Generator()
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ p.ScalarMult(p, scalar)
+ }
+ })
+ b.Run("P521", func(b *testing.B) {
+ scalar := make([]byte, 66)
+ rand.Read(scalar)
+ p := nistec.NewP521Generator()
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ p.ScalarMult(p, scalar)
+ }
+ })
+}
diff --git a/src/crypto/elliptic/internal/nistec/p224.go b/src/crypto/elliptic/internal/nistec/p224.go
new file mode 100644
index 0000000..74dbc18
--- /dev/null
+++ b/src/crypto/elliptic/internal/nistec/p224.go
@@ -0,0 +1,293 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package nistec
+
+import (
+ "crypto/elliptic/internal/fiat"
+ "crypto/subtle"
+ "errors"
+)
+
+var p224B, _ = new(fiat.P224Element).SetBytes([]byte{0xb4, 0x05, 0x0a, 0x85,
+ 0x0c, 0x04, 0xb3, 0xab, 0xf5, 0x41, 0x32, 0x56, 0x50, 0x44, 0xb0, 0xb7,
+ 0xd7, 0xbf, 0xd8, 0xba, 0x27, 0x0b, 0x39, 0x43, 0x23, 0x55, 0xff, 0xb4})
+
+var p224G, _ = NewP224Point().SetBytes([]byte{0x04,
+ 0xb7, 0x0e, 0x0c, 0xbd, 0x6b, 0xb4, 0xbf, 0x7f, 0x32, 0x13, 0x90, 0xb9,
+ 0x4a, 0x03, 0xc1, 0xd3, 0x56, 0xc2, 0x11, 0x22, 0x34, 0x32, 0x80, 0xd6,
+ 0x11, 0x5c, 0x1d, 0x21, 0xbd, 0x37, 0x63, 0x88, 0xb5, 0xf7, 0x23, 0xfb,
+ 0x4c, 0x22, 0xdf, 0xe6, 0xcd, 0x43, 0x75, 0xa0, 0x5a, 0x07, 0x47, 0x64,
+ 0x44, 0xd5, 0x81, 0x99, 0x85, 0x0, 0x7e, 0x34})
+
+const p224ElementLength = 28
+
+// P224Point is a P-224 point. The zero value is NOT valid.
+type P224Point struct {
+ // The point is represented in projective coordinates (X:Y:Z),
+ // where x = X/Z and y = Y/Z.
+ x, y, z *fiat.P224Element
+}
+
+// NewP224Point returns a new P224Point representing the point at infinity point.
+func NewP224Point() *P224Point {
+ return &P224Point{
+ x: new(fiat.P224Element),
+ y: new(fiat.P224Element).One(),
+ z: new(fiat.P224Element),
+ }
+}
+
+// NewP224Generator returns a new P224Point set to the canonical generator.
+func NewP224Generator() *P224Point {
+ return (&P224Point{
+ x: new(fiat.P224Element),
+ y: new(fiat.P224Element),
+ z: new(fiat.P224Element),
+ }).Set(p224G)
+}
+
+// Set sets p = q and returns p.
+func (p *P224Point) Set(q *P224Point) *P224Point {
+ p.x.Set(q.x)
+ p.y.Set(q.y)
+ p.z.Set(q.z)
+ return p
+}
+
+// SetBytes sets p to the compressed, uncompressed, or infinity value encoded in
+// b, as specified in SEC 1, Version 2.0, Section 2.3.4. If the point is not on
+// the curve, it returns nil and an error, and the receiver is unchanged.
+// Otherwise, it returns p.
+func (p *P224Point) SetBytes(b []byte) (*P224Point, error) {
+ switch {
+ // Point at infinity.
+ case len(b) == 1 && b[0] == 0:
+ return p.Set(NewP224Point()), nil
+
+ // Uncompressed form.
+ case len(b) == 1+2*p224ElementLength && b[0] == 4:
+ x, err := new(fiat.P224Element).SetBytes(b[1 : 1+p224ElementLength])
+ if err != nil {
+ return nil, err
+ }
+ y, err := new(fiat.P224Element).SetBytes(b[1+p224ElementLength:])
+ if err != nil {
+ return nil, err
+ }
+ if err := p224CheckOnCurve(x, y); err != nil {
+ return nil, err
+ }
+ p.x.Set(x)
+ p.y.Set(y)
+ p.z.One()
+ return p, nil
+
+ // Compressed form
+ case len(b) == 1+p224ElementLength && b[0] == 0:
+ return nil, errors.New("unimplemented") // TODO(filippo)
+
+ default:
+ return nil, errors.New("invalid P224 point encoding")
+ }
+}
+
+func p224CheckOnCurve(x, y *fiat.P224Element) error {
+ // x³ - 3x + b.
+ x3 := new(fiat.P224Element).Square(x)
+ x3.Mul(x3, x)
+
+ threeX := new(fiat.P224Element).Add(x, x)
+ threeX.Add(threeX, x)
+
+ x3.Sub(x3, threeX)
+ x3.Add(x3, p224B)
+
+ // y² = x³ - 3x + b
+ y2 := new(fiat.P224Element).Square(y)
+
+ if x3.Equal(y2) != 1 {
+ return errors.New("P224 point not on curve")
+ }
+ return nil
+}
+
+// Bytes returns the uncompressed or infinity encoding of p, as specified in
+// SEC 1, Version 2.0, Section 2.3.3. Note that the encoding of the point at
+// infinity is shorter than all other encodings.
+func (p *P224Point) Bytes() []byte {
+ // This function is outlined to make the allocations inline in the caller
+ // rather than happen on the heap.
+ var out [133]byte
+ return p.bytes(&out)
+}
+
+func (p *P224Point) bytes(out *[133]byte) []byte {
+ if p.z.IsZero() == 1 {
+ return append(out[:0], 0)
+ }
+
+ zinv := new(fiat.P224Element).Invert(p.z)
+ xx := new(fiat.P224Element).Mul(p.x, zinv)
+ yy := new(fiat.P224Element).Mul(p.y, zinv)
+
+ buf := append(out[:0], 4)
+ buf = append(buf, xx.Bytes()...)
+ buf = append(buf, yy.Bytes()...)
+ return buf
+}
+
+// Add sets q = p1 + p2, and returns q. The points may overlap.
+func (q *P224Point) Add(p1, p2 *P224Point) *P224Point {
+ // Complete addition formula for a = -3 from "Complete addition formulas for
+ // prime order elliptic curves" (https://eprint.iacr.org/2015/1060), §A.2.
+
+ t0 := new(fiat.P224Element).Mul(p1.x, p2.x) // t0 := X1 * X2
+ t1 := new(fiat.P224Element).Mul(p1.y, p2.y) // t1 := Y1 * Y2
+ t2 := new(fiat.P224Element).Mul(p1.z, p2.z) // t2 := Z1 * Z2
+ t3 := new(fiat.P224Element).Add(p1.x, p1.y) // t3 := X1 + Y1
+ t4 := new(fiat.P224Element).Add(p2.x, p2.y) // t4 := X2 + Y2
+ t3.Mul(t3, t4) // t3 := t3 * t4
+ t4.Add(t0, t1) // t4 := t0 + t1
+ t3.Sub(t3, t4) // t3 := t3 - t4
+ t4.Add(p1.y, p1.z) // t4 := Y1 + Z1
+ x3 := new(fiat.P224Element).Add(p2.y, p2.z) // X3 := Y2 + Z2
+ t4.Mul(t4, x3) // t4 := t4 * X3
+ x3.Add(t1, t2) // X3 := t1 + t2
+ t4.Sub(t4, x3) // t4 := t4 - X3
+ x3.Add(p1.x, p1.z) // X3 := X1 + Z1
+ y3 := new(fiat.P224Element).Add(p2.x, p2.z) // Y3 := X2 + Z2
+ x3.Mul(x3, y3) // X3 := X3 * Y3
+ y3.Add(t0, t2) // Y3 := t0 + t2
+ y3.Sub(x3, y3) // Y3 := X3 - Y3
+ z3 := new(fiat.P224Element).Mul(p224B, t2) // Z3 := b * t2
+ x3.Sub(y3, z3) // X3 := Y3 - Z3
+ z3.Add(x3, x3) // Z3 := X3 + X3
+ x3.Add(x3, z3) // X3 := X3 + Z3
+ z3.Sub(t1, x3) // Z3 := t1 - X3
+ x3.Add(t1, x3) // X3 := t1 + X3
+ y3.Mul(p224B, y3) // Y3 := b * Y3
+ t1.Add(t2, t2) // t1 := t2 + t2
+ t2.Add(t1, t2) // t2 := t1 + t2
+ y3.Sub(y3, t2) // Y3 := Y3 - t2
+ y3.Sub(y3, t0) // Y3 := Y3 - t0
+ t1.Add(y3, y3) // t1 := Y3 + Y3
+ y3.Add(t1, y3) // Y3 := t1 + Y3
+ t1.Add(t0, t0) // t1 := t0 + t0
+ t0.Add(t1, t0) // t0 := t1 + t0
+ t0.Sub(t0, t2) // t0 := t0 - t2
+ t1.Mul(t4, y3) // t1 := t4 * Y3
+ t2.Mul(t0, y3) // t2 := t0 * Y3
+ y3.Mul(x3, z3) // Y3 := X3 * Z3
+ y3.Add(y3, t2) // Y3 := Y3 + t2
+ x3.Mul(t3, x3) // X3 := t3 * X3
+ x3.Sub(x3, t1) // X3 := X3 - t1
+ z3.Mul(t4, z3) // Z3 := t4 * Z3
+ t1.Mul(t3, t0) // t1 := t3 * t0
+ z3.Add(z3, t1) // Z3 := Z3 + t1
+
+ q.x.Set(x3)
+ q.y.Set(y3)
+ q.z.Set(z3)
+ return q
+}
+
+// Double sets q = p + p, and returns q. The points may overlap.
+func (q *P224Point) Double(p *P224Point) *P224Point {
+ // Complete addition formula for a = -3 from "Complete addition formulas for
+ // prime order elliptic curves" (https://eprint.iacr.org/2015/1060), §A.2.
+
+ t0 := new(fiat.P224Element).Square(p.x) // t0 := X ^ 2
+ t1 := new(fiat.P224Element).Square(p.y) // t1 := Y ^ 2
+ t2 := new(fiat.P224Element).Square(p.z) // t2 := Z ^ 2
+ t3 := new(fiat.P224Element).Mul(p.x, p.y) // t3 := X * Y
+ t3.Add(t3, t3) // t3 := t3 + t3
+ z3 := new(fiat.P224Element).Mul(p.x, p.z) // Z3 := X * Z
+ z3.Add(z3, z3) // Z3 := Z3 + Z3
+ y3 := new(fiat.P224Element).Mul(p224B, t2) // Y3 := b * t2
+ y3.Sub(y3, z3) // Y3 := Y3 - Z3
+ x3 := new(fiat.P224Element).Add(y3, y3) // X3 := Y3 + Y3
+ y3.Add(x3, y3) // Y3 := X3 + Y3
+ x3.Sub(t1, y3) // X3 := t1 - Y3
+ y3.Add(t1, y3) // Y3 := t1 + Y3
+ y3.Mul(x3, y3) // Y3 := X3 * Y3
+ x3.Mul(x3, t3) // X3 := X3 * t3
+ t3.Add(t2, t2) // t3 := t2 + t2
+ t2.Add(t2, t3) // t2 := t2 + t3
+ z3.Mul(p224B, z3) // Z3 := b * Z3
+ z3.Sub(z3, t2) // Z3 := Z3 - t2
+ z3.Sub(z3, t0) // Z3 := Z3 - t0
+ t3.Add(z3, z3) // t3 := Z3 + Z3
+ z3.Add(z3, t3) // Z3 := Z3 + t3
+ t3.Add(t0, t0) // t3 := t0 + t0
+ t0.Add(t3, t0) // t0 := t3 + t0
+ t0.Sub(t0, t2) // t0 := t0 - t2
+ t0.Mul(t0, z3) // t0 := t0 * Z3
+ y3.Add(y3, t0) // Y3 := Y3 + t0
+ t0.Mul(p.y, p.z) // t0 := Y * Z
+ t0.Add(t0, t0) // t0 := t0 + t0
+ z3.Mul(t0, z3) // Z3 := t0 * Z3
+ x3.Sub(x3, z3) // X3 := X3 - Z3
+ z3.Mul(t0, t1) // Z3 := t0 * t1
+ z3.Add(z3, z3) // Z3 := Z3 + Z3
+ z3.Add(z3, z3) // Z3 := Z3 + Z3
+
+ q.x.Set(x3)
+ q.y.Set(y3)
+ q.z.Set(z3)
+ return q
+}
+
+// Select sets q to p1 if cond == 1, and to p2 if cond == 0.
+func (q *P224Point) Select(p1, p2 *P224Point, cond int) *P224Point {
+ q.x.Select(p1.x, p2.x, cond)
+ q.y.Select(p1.y, p2.y, cond)
+ q.z.Select(p1.z, p2.z, cond)
+ return q
+}
+
+// ScalarMult sets p = scalar * q, and returns p.
+func (p *P224Point) ScalarMult(q *P224Point, scalar []byte) *P224Point {
+ // table holds the first 16 multiples of q. The explicit newP224Point calls
+ // get inlined, letting the allocations live on the stack.
+ var table = [16]*P224Point{
+ NewP224Point(), NewP224Point(), NewP224Point(), NewP224Point(),
+ NewP224Point(), NewP224Point(), NewP224Point(), NewP224Point(),
+ NewP224Point(), NewP224Point(), NewP224Point(), NewP224Point(),
+ NewP224Point(), NewP224Point(), NewP224Point(), NewP224Point(),
+ }
+ for i := 1; i < 16; i++ {
+ table[i].Add(table[i-1], q)
+ }
+
+ // Instead of doing the classic double-and-add chain, we do it with a
+ // four-bit window: we double four times, and then add [0-15]P.
+ t := NewP224Point()
+ p.Set(NewP224Point())
+ for _, byte := range scalar {
+ p.Double(p)
+ p.Double(p)
+ p.Double(p)
+ p.Double(p)
+
+ for i := uint8(0); i < 16; i++ {
+ cond := subtle.ConstantTimeByteEq(byte>>4, i)
+ t.Select(table[i], t, cond)
+ }
+ p.Add(p, t)
+
+ p.Double(p)
+ p.Double(p)
+ p.Double(p)
+ p.Double(p)
+
+ for i := uint8(0); i < 16; i++ {
+ cond := subtle.ConstantTimeByteEq(byte&0b1111, i)
+ t.Select(table[i], t, cond)
+ }
+ p.Add(p, t)
+ }
+
+ return p
+}
diff --git a/src/crypto/elliptic/internal/nistec/p384.go b/src/crypto/elliptic/internal/nistec/p384.go
new file mode 100644
index 0000000..24a166d
--- /dev/null
+++ b/src/crypto/elliptic/internal/nistec/p384.go
@@ -0,0 +1,298 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package nistec
+
+import (
+ "crypto/elliptic/internal/fiat"
+ "crypto/subtle"
+ "errors"
+)
+
+var p384B, _ = new(fiat.P384Element).SetBytes([]byte{
+ 0xb3, 0x31, 0x2f, 0xa7, 0xe2, 0x3e, 0xe7, 0xe4, 0x98, 0x8e, 0x05, 0x6b,
+ 0xe3, 0xf8, 0x2d, 0x19, 0x18, 0x1d, 0x9c, 0x6e, 0xfe, 0x81, 0x41, 0x12,
+ 0x03, 0x14, 0x08, 0x8f, 0x50, 0x13, 0x87, 0x5a, 0xc6, 0x56, 0x39, 0x8d,
+ 0x8a, 0x2e, 0xd1, 0x9d, 0x2a, 0x85, 0xc8, 0xed, 0xd3, 0xec, 0x2a, 0xef})
+
+var p384G, _ = NewP384Point().SetBytes([]byte{0x4,
+ 0xaa, 0x87, 0xca, 0x22, 0xbe, 0x8b, 0x05, 0x37, 0x8e, 0xb1, 0xc7, 0x1e,
+ 0xf3, 0x20, 0xad, 0x74, 0x6e, 0x1d, 0x3b, 0x62, 0x8b, 0xa7, 0x9b, 0x98,
+ 0x59, 0xf7, 0x41, 0xe0, 0x82, 0x54, 0x2a, 0x38, 0x55, 0x02, 0xf2, 0x5d,
+ 0xbf, 0x55, 0x29, 0x6c, 0x3a, 0x54, 0x5e, 0x38, 0x72, 0x76, 0x0a, 0xb7,
+ 0x36, 0x17, 0xde, 0x4a, 0x96, 0x26, 0x2c, 0x6f, 0x5d, 0x9e, 0x98, 0xbf,
+ 0x92, 0x92, 0xdc, 0x29, 0xf8, 0xf4, 0x1d, 0xbd, 0x28, 0x9a, 0x14, 0x7c,
+ 0xe9, 0xda, 0x31, 0x13, 0xb5, 0xf0, 0xb8, 0xc0, 0x0a, 0x60, 0xb1, 0xce,
+ 0x1d, 0x7e, 0x81, 0x9d, 0x7a, 0x43, 0x1d, 0x7c, 0x90, 0xea, 0x0e, 0x5f})
+
+const p384ElementLength = 48
+
+// P384Point is a P-384 point. The zero value is NOT valid.
+type P384Point struct {
+ // The point is represented in projective coordinates (X:Y:Z),
+ // where x = X/Z and y = Y/Z.
+ x, y, z *fiat.P384Element
+}
+
+// NewP384Point returns a new P384Point representing the point at infinity point.
+func NewP384Point() *P384Point {
+ return &P384Point{
+ x: new(fiat.P384Element),
+ y: new(fiat.P384Element).One(),
+ z: new(fiat.P384Element),
+ }
+}
+
+// NewP384Generator returns a new P384Point set to the canonical generator.
+func NewP384Generator() *P384Point {
+ return (&P384Point{
+ x: new(fiat.P384Element),
+ y: new(fiat.P384Element),
+ z: new(fiat.P384Element),
+ }).Set(p384G)
+}
+
+// Set sets p = q and returns p.
+func (p *P384Point) Set(q *P384Point) *P384Point {
+ p.x.Set(q.x)
+ p.y.Set(q.y)
+ p.z.Set(q.z)
+ return p
+}
+
+// SetBytes sets p to the compressed, uncompressed, or infinity value encoded in
+// b, as specified in SEC 1, Version 2.0, Section 2.3.4. If the point is not on
+// the curve, it returns nil and an error, and the receiver is unchanged.
+// Otherwise, it returns p.
+func (p *P384Point) SetBytes(b []byte) (*P384Point, error) {
+ switch {
+ // Point at infinity.
+ case len(b) == 1 && b[0] == 0:
+ return p.Set(NewP384Point()), nil
+
+ // Uncompressed form.
+ case len(b) == 1+2*p384ElementLength && b[0] == 4:
+ x, err := new(fiat.P384Element).SetBytes(b[1 : 1+p384ElementLength])
+ if err != nil {
+ return nil, err
+ }
+ y, err := new(fiat.P384Element).SetBytes(b[1+p384ElementLength:])
+ if err != nil {
+ return nil, err
+ }
+ if err := p384CheckOnCurve(x, y); err != nil {
+ return nil, err
+ }
+ p.x.Set(x)
+ p.y.Set(y)
+ p.z.One()
+ return p, nil
+
+ // Compressed form
+ case len(b) == 1+p384ElementLength && b[0] == 0:
+ return nil, errors.New("unimplemented") // TODO(filippo)
+
+ default:
+ return nil, errors.New("invalid P384 point encoding")
+ }
+}
+
+func p384CheckOnCurve(x, y *fiat.P384Element) error {
+ // x³ - 3x + b.
+ x3 := new(fiat.P384Element).Square(x)
+ x3.Mul(x3, x)
+
+ threeX := new(fiat.P384Element).Add(x, x)
+ threeX.Add(threeX, x)
+
+ x3.Sub(x3, threeX)
+ x3.Add(x3, p384B)
+
+ // y² = x³ - 3x + b
+ y2 := new(fiat.P384Element).Square(y)
+
+ if x3.Equal(y2) != 1 {
+ return errors.New("P384 point not on curve")
+ }
+ return nil
+}
+
+// Bytes returns the uncompressed or infinity encoding of p, as specified in
+// SEC 1, Version 2.0, Section 2.3.3. Note that the encoding of the point at
+// infinity is shorter than all other encodings.
+func (p *P384Point) Bytes() []byte {
+ // This function is outlined to make the allocations inline in the caller
+ // rather than happen on the heap.
+ var out [133]byte
+ return p.bytes(&out)
+}
+
+func (p *P384Point) bytes(out *[133]byte) []byte {
+ if p.z.IsZero() == 1 {
+ return append(out[:0], 0)
+ }
+
+ zinv := new(fiat.P384Element).Invert(p.z)
+ xx := new(fiat.P384Element).Mul(p.x, zinv)
+ yy := new(fiat.P384Element).Mul(p.y, zinv)
+
+ buf := append(out[:0], 4)
+ buf = append(buf, xx.Bytes()...)
+ buf = append(buf, yy.Bytes()...)
+ return buf
+}
+
+// Add sets q = p1 + p2, and returns q. The points may overlap.
+func (q *P384Point) Add(p1, p2 *P384Point) *P384Point {
+ // Complete addition formula for a = -3 from "Complete addition formulas for
+ // prime order elliptic curves" (https://eprint.iacr.org/2015/1060), §A.2.
+
+ t0 := new(fiat.P384Element).Mul(p1.x, p2.x) // t0 := X1 * X2
+ t1 := new(fiat.P384Element).Mul(p1.y, p2.y) // t1 := Y1 * Y2
+ t2 := new(fiat.P384Element).Mul(p1.z, p2.z) // t2 := Z1 * Z2
+ t3 := new(fiat.P384Element).Add(p1.x, p1.y) // t3 := X1 + Y1
+ t4 := new(fiat.P384Element).Add(p2.x, p2.y) // t4 := X2 + Y2
+ t3.Mul(t3, t4) // t3 := t3 * t4
+ t4.Add(t0, t1) // t4 := t0 + t1
+ t3.Sub(t3, t4) // t3 := t3 - t4
+ t4.Add(p1.y, p1.z) // t4 := Y1 + Z1
+ x3 := new(fiat.P384Element).Add(p2.y, p2.z) // X3 := Y2 + Z2
+ t4.Mul(t4, x3) // t4 := t4 * X3
+ x3.Add(t1, t2) // X3 := t1 + t2
+ t4.Sub(t4, x3) // t4 := t4 - X3
+ x3.Add(p1.x, p1.z) // X3 := X1 + Z1
+ y3 := new(fiat.P384Element).Add(p2.x, p2.z) // Y3 := X2 + Z2
+ x3.Mul(x3, y3) // X3 := X3 * Y3
+ y3.Add(t0, t2) // Y3 := t0 + t2
+ y3.Sub(x3, y3) // Y3 := X3 - Y3
+ z3 := new(fiat.P384Element).Mul(p384B, t2) // Z3 := b * t2
+ x3.Sub(y3, z3) // X3 := Y3 - Z3
+ z3.Add(x3, x3) // Z3 := X3 + X3
+ x3.Add(x3, z3) // X3 := X3 + Z3
+ z3.Sub(t1, x3) // Z3 := t1 - X3
+ x3.Add(t1, x3) // X3 := t1 + X3
+ y3.Mul(p384B, y3) // Y3 := b * Y3
+ t1.Add(t2, t2) // t1 := t2 + t2
+ t2.Add(t1, t2) // t2 := t1 + t2
+ y3.Sub(y3, t2) // Y3 := Y3 - t2
+ y3.Sub(y3, t0) // Y3 := Y3 - t0
+ t1.Add(y3, y3) // t1 := Y3 + Y3
+ y3.Add(t1, y3) // Y3 := t1 + Y3
+ t1.Add(t0, t0) // t1 := t0 + t0
+ t0.Add(t1, t0) // t0 := t1 + t0
+ t0.Sub(t0, t2) // t0 := t0 - t2
+ t1.Mul(t4, y3) // t1 := t4 * Y3
+ t2.Mul(t0, y3) // t2 := t0 * Y3
+ y3.Mul(x3, z3) // Y3 := X3 * Z3
+ y3.Add(y3, t2) // Y3 := Y3 + t2
+ x3.Mul(t3, x3) // X3 := t3 * X3
+ x3.Sub(x3, t1) // X3 := X3 - t1
+ z3.Mul(t4, z3) // Z3 := t4 * Z3
+ t1.Mul(t3, t0) // t1 := t3 * t0
+ z3.Add(z3, t1) // Z3 := Z3 + t1
+
+ q.x.Set(x3)
+ q.y.Set(y3)
+ q.z.Set(z3)
+ return q
+}
+
+// Double sets q = p + p, and returns q. The points may overlap.
+func (q *P384Point) Double(p *P384Point) *P384Point {
+ // Complete addition formula for a = -3 from "Complete addition formulas for
+ // prime order elliptic curves" (https://eprint.iacr.org/2015/1060), §A.2.
+
+ t0 := new(fiat.P384Element).Square(p.x) // t0 := X ^ 2
+ t1 := new(fiat.P384Element).Square(p.y) // t1 := Y ^ 2
+ t2 := new(fiat.P384Element).Square(p.z) // t2 := Z ^ 2
+ t3 := new(fiat.P384Element).Mul(p.x, p.y) // t3 := X * Y
+ t3.Add(t3, t3) // t3 := t3 + t3
+ z3 := new(fiat.P384Element).Mul(p.x, p.z) // Z3 := X * Z
+ z3.Add(z3, z3) // Z3 := Z3 + Z3
+ y3 := new(fiat.P384Element).Mul(p384B, t2) // Y3 := b * t2
+ y3.Sub(y3, z3) // Y3 := Y3 - Z3
+ x3 := new(fiat.P384Element).Add(y3, y3) // X3 := Y3 + Y3
+ y3.Add(x3, y3) // Y3 := X3 + Y3
+ x3.Sub(t1, y3) // X3 := t1 - Y3
+ y3.Add(t1, y3) // Y3 := t1 + Y3
+ y3.Mul(x3, y3) // Y3 := X3 * Y3
+ x3.Mul(x3, t3) // X3 := X3 * t3
+ t3.Add(t2, t2) // t3 := t2 + t2
+ t2.Add(t2, t3) // t2 := t2 + t3
+ z3.Mul(p384B, z3) // Z3 := b * Z3
+ z3.Sub(z3, t2) // Z3 := Z3 - t2
+ z3.Sub(z3, t0) // Z3 := Z3 - t0
+ t3.Add(z3, z3) // t3 := Z3 + Z3
+ z3.Add(z3, t3) // Z3 := Z3 + t3
+ t3.Add(t0, t0) // t3 := t0 + t0
+ t0.Add(t3, t0) // t0 := t3 + t0
+ t0.Sub(t0, t2) // t0 := t0 - t2
+ t0.Mul(t0, z3) // t0 := t0 * Z3
+ y3.Add(y3, t0) // Y3 := Y3 + t0
+ t0.Mul(p.y, p.z) // t0 := Y * Z
+ t0.Add(t0, t0) // t0 := t0 + t0
+ z3.Mul(t0, z3) // Z3 := t0 * Z3
+ x3.Sub(x3, z3) // X3 := X3 - Z3
+ z3.Mul(t0, t1) // Z3 := t0 * t1
+ z3.Add(z3, z3) // Z3 := Z3 + Z3
+ z3.Add(z3, z3) // Z3 := Z3 + Z3
+
+ q.x.Set(x3)
+ q.y.Set(y3)
+ q.z.Set(z3)
+ return q
+}
+
+// Select sets q to p1 if cond == 1, and to p2 if cond == 0.
+func (q *P384Point) Select(p1, p2 *P384Point, cond int) *P384Point {
+ q.x.Select(p1.x, p2.x, cond)
+ q.y.Select(p1.y, p2.y, cond)
+ q.z.Select(p1.z, p2.z, cond)
+ return q
+}
+
+// ScalarMult sets p = scalar * q, and returns p.
+func (p *P384Point) ScalarMult(q *P384Point, scalar []byte) *P384Point {
+ // table holds the first 16 multiples of q. The explicit newP384Point calls
+ // get inlined, letting the allocations live on the stack.
+ var table = [16]*P384Point{
+ NewP384Point(), NewP384Point(), NewP384Point(), NewP384Point(),
+ NewP384Point(), NewP384Point(), NewP384Point(), NewP384Point(),
+ NewP384Point(), NewP384Point(), NewP384Point(), NewP384Point(),
+ NewP384Point(), NewP384Point(), NewP384Point(), NewP384Point(),
+ }
+ for i := 1; i < 16; i++ {
+ table[i].Add(table[i-1], q)
+ }
+
+ // Instead of doing the classic double-and-add chain, we do it with a
+ // four-bit window: we double four times, and then add [0-15]P.
+ t := NewP384Point()
+ p.Set(NewP384Point())
+ for _, byte := range scalar {
+ p.Double(p)
+ p.Double(p)
+ p.Double(p)
+ p.Double(p)
+
+ for i := uint8(0); i < 16; i++ {
+ cond := subtle.ConstantTimeByteEq(byte>>4, i)
+ t.Select(table[i], t, cond)
+ }
+ p.Add(p, t)
+
+ p.Double(p)
+ p.Double(p)
+ p.Double(p)
+ p.Double(p)
+
+ for i := uint8(0); i < 16; i++ {
+ cond := subtle.ConstantTimeByteEq(byte&0b1111, i)
+ t.Select(table[i], t, cond)
+ }
+ p.Add(p, t)
+ }
+
+ return p
+}
diff --git a/src/crypto/elliptic/internal/nistec/p521.go b/src/crypto/elliptic/internal/nistec/p521.go
new file mode 100644
index 0000000..cdbd195
--- /dev/null
+++ b/src/crypto/elliptic/internal/nistec/p521.go
@@ -0,0 +1,310 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package nistec implements the NIST P elliptic curves from FIPS 186-4.
+//
+// This package uses fiat-crypto for its backend field arithmetic (not math/big)
+// and exposes constant-time, heap allocation-free, byte slice-based safe APIs.
+// Group operations use modern and safe complete addition formulas. The point at
+// infinity is handled and encoded according to SEC 1, Version 2.0, and invalid
+// curve points can't be represented.
+package nistec
+
+import (
+ "crypto/elliptic/internal/fiat"
+ "crypto/subtle"
+ "errors"
+)
+
+var p521B, _ = new(fiat.P521Element).SetBytes([]byte{
+ 0x00, 0x51, 0x95, 0x3e, 0xb9, 0x61, 0x8e, 0x1c, 0x9a, 0x1f, 0x92, 0x9a,
+ 0x21, 0xa0, 0xb6, 0x85, 0x40, 0xee, 0xa2, 0xda, 0x72, 0x5b, 0x99, 0xb3,
+ 0x15, 0xf3, 0xb8, 0xb4, 0x89, 0x91, 0x8e, 0xf1, 0x09, 0xe1, 0x56, 0x19,
+ 0x39, 0x51, 0xec, 0x7e, 0x93, 0x7b, 0x16, 0x52, 0xc0, 0xbd, 0x3b, 0xb1,
+ 0xbf, 0x07, 0x35, 0x73, 0xdf, 0x88, 0x3d, 0x2c, 0x34, 0xf1, 0xef, 0x45,
+ 0x1f, 0xd4, 0x6b, 0x50, 0x3f, 0x00})
+
+var p521G, _ = NewP521Point().SetBytes([]byte{0x04,
+ 0x00, 0xc6, 0x85, 0x8e, 0x06, 0xb7, 0x04, 0x04, 0xe9, 0xcd, 0x9e, 0x3e,
+ 0xcb, 0x66, 0x23, 0x95, 0xb4, 0x42, 0x9c, 0x64, 0x81, 0x39, 0x05, 0x3f,
+ 0xb5, 0x21, 0xf8, 0x28, 0xaf, 0x60, 0x6b, 0x4d, 0x3d, 0xba, 0xa1, 0x4b,
+ 0x5e, 0x77, 0xef, 0xe7, 0x59, 0x28, 0xfe, 0x1d, 0xc1, 0x27, 0xa2, 0xff,
+ 0xa8, 0xde, 0x33, 0x48, 0xb3, 0xc1, 0x85, 0x6a, 0x42, 0x9b, 0xf9, 0x7e,
+ 0x7e, 0x31, 0xc2, 0xe5, 0xbd, 0x66, 0x01, 0x18, 0x39, 0x29, 0x6a, 0x78,
+ 0x9a, 0x3b, 0xc0, 0x04, 0x5c, 0x8a, 0x5f, 0xb4, 0x2c, 0x7d, 0x1b, 0xd9,
+ 0x98, 0xf5, 0x44, 0x49, 0x57, 0x9b, 0x44, 0x68, 0x17, 0xaf, 0xbd, 0x17,
+ 0x27, 0x3e, 0x66, 0x2c, 0x97, 0xee, 0x72, 0x99, 0x5e, 0xf4, 0x26, 0x40,
+ 0xc5, 0x50, 0xb9, 0x01, 0x3f, 0xad, 0x07, 0x61, 0x35, 0x3c, 0x70, 0x86,
+ 0xa2, 0x72, 0xc2, 0x40, 0x88, 0xbe, 0x94, 0x76, 0x9f, 0xd1, 0x66, 0x50})
+
+const p521ElementLength = 66
+
+// P521Point is a P-521 point. The zero value is NOT valid.
+type P521Point struct {
+ // The point is represented in projective coordinates (X:Y:Z),
+ // where x = X/Z and y = Y/Z.
+ x, y, z *fiat.P521Element
+}
+
+// NewP521Point returns a new P521Point representing the point at infinity point.
+func NewP521Point() *P521Point {
+ return &P521Point{
+ x: new(fiat.P521Element),
+ y: new(fiat.P521Element).One(),
+ z: new(fiat.P521Element),
+ }
+}
+
+// NewP521Generator returns a new P521Point set to the canonical generator.
+func NewP521Generator() *P521Point {
+ return (&P521Point{
+ x: new(fiat.P521Element),
+ y: new(fiat.P521Element),
+ z: new(fiat.P521Element),
+ }).Set(p521G)
+}
+
+// Set sets p = q and returns p.
+func (p *P521Point) Set(q *P521Point) *P521Point {
+ p.x.Set(q.x)
+ p.y.Set(q.y)
+ p.z.Set(q.z)
+ return p
+}
+
+// SetBytes sets p to the compressed, uncompressed, or infinity value encoded in
+// b, as specified in SEC 1, Version 2.0, Section 2.3.4. If the point is not on
+// the curve, it returns nil and an error, and the receiver is unchanged.
+// Otherwise, it returns p.
+func (p *P521Point) SetBytes(b []byte) (*P521Point, error) {
+ switch {
+ // Point at infinity.
+ case len(b) == 1 && b[0] == 0:
+ return p.Set(NewP521Point()), nil
+
+ // Uncompressed form.
+ case len(b) == 1+2*p521ElementLength && b[0] == 4:
+ x, err := new(fiat.P521Element).SetBytes(b[1 : 1+p521ElementLength])
+ if err != nil {
+ return nil, err
+ }
+ y, err := new(fiat.P521Element).SetBytes(b[1+p521ElementLength:])
+ if err != nil {
+ return nil, err
+ }
+ if err := p521CheckOnCurve(x, y); err != nil {
+ return nil, err
+ }
+ p.x.Set(x)
+ p.y.Set(y)
+ p.z.One()
+ return p, nil
+
+ // Compressed form
+ case len(b) == 1+p521ElementLength && b[0] == 0:
+ return nil, errors.New("unimplemented") // TODO(filippo)
+
+ default:
+ return nil, errors.New("invalid P521 point encoding")
+ }
+}
+
+func p521CheckOnCurve(x, y *fiat.P521Element) error {
+ // x³ - 3x + b.
+ x3 := new(fiat.P521Element).Square(x)
+ x3.Mul(x3, x)
+
+ threeX := new(fiat.P521Element).Add(x, x)
+ threeX.Add(threeX, x)
+
+ x3.Sub(x3, threeX)
+ x3.Add(x3, p521B)
+
+ // y² = x³ - 3x + b
+ y2 := new(fiat.P521Element).Square(y)
+
+ if x3.Equal(y2) != 1 {
+ return errors.New("P521 point not on curve")
+ }
+ return nil
+}
+
+// Bytes returns the uncompressed or infinity encoding of p, as specified in
+// SEC 1, Version 2.0, Section 2.3.3. Note that the encoding of the point at
+// infinity is shorter than all other encodings.
+func (p *P521Point) Bytes() []byte {
+ // This function is outlined to make the allocations inline in the caller
+ // rather than happen on the heap.
+ var out [133]byte
+ return p.bytes(&out)
+}
+
+func (p *P521Point) bytes(out *[133]byte) []byte {
+ if p.z.IsZero() == 1 {
+ return append(out[:0], 0)
+ }
+
+ zinv := new(fiat.P521Element).Invert(p.z)
+ xx := new(fiat.P521Element).Mul(p.x, zinv)
+ yy := new(fiat.P521Element).Mul(p.y, zinv)
+
+ buf := append(out[:0], 4)
+ buf = append(buf, xx.Bytes()...)
+ buf = append(buf, yy.Bytes()...)
+ return buf
+}
+
+// Add sets q = p1 + p2, and returns q. The points may overlap.
+func (q *P521Point) Add(p1, p2 *P521Point) *P521Point {
+ // Complete addition formula for a = -3 from "Complete addition formulas for
+ // prime order elliptic curves" (https://eprint.iacr.org/2015/1060), §A.2.
+
+ t0 := new(fiat.P521Element).Mul(p1.x, p2.x) // t0 := X1 * X2
+ t1 := new(fiat.P521Element).Mul(p1.y, p2.y) // t1 := Y1 * Y2
+ t2 := new(fiat.P521Element).Mul(p1.z, p2.z) // t2 := Z1 * Z2
+ t3 := new(fiat.P521Element).Add(p1.x, p1.y) // t3 := X1 + Y1
+ t4 := new(fiat.P521Element).Add(p2.x, p2.y) // t4 := X2 + Y2
+ t3.Mul(t3, t4) // t3 := t3 * t4
+ t4.Add(t0, t1) // t4 := t0 + t1
+ t3.Sub(t3, t4) // t3 := t3 - t4
+ t4.Add(p1.y, p1.z) // t4 := Y1 + Z1
+ x3 := new(fiat.P521Element).Add(p2.y, p2.z) // X3 := Y2 + Z2
+ t4.Mul(t4, x3) // t4 := t4 * X3
+ x3.Add(t1, t2) // X3 := t1 + t2
+ t4.Sub(t4, x3) // t4 := t4 - X3
+ x3.Add(p1.x, p1.z) // X3 := X1 + Z1
+ y3 := new(fiat.P521Element).Add(p2.x, p2.z) // Y3 := X2 + Z2
+ x3.Mul(x3, y3) // X3 := X3 * Y3
+ y3.Add(t0, t2) // Y3 := t0 + t2
+ y3.Sub(x3, y3) // Y3 := X3 - Y3
+ z3 := new(fiat.P521Element).Mul(p521B, t2) // Z3 := b * t2
+ x3.Sub(y3, z3) // X3 := Y3 - Z3
+ z3.Add(x3, x3) // Z3 := X3 + X3
+ x3.Add(x3, z3) // X3 := X3 + Z3
+ z3.Sub(t1, x3) // Z3 := t1 - X3
+ x3.Add(t1, x3) // X3 := t1 + X3
+ y3.Mul(p521B, y3) // Y3 := b * Y3
+ t1.Add(t2, t2) // t1 := t2 + t2
+ t2.Add(t1, t2) // t2 := t1 + t2
+ y3.Sub(y3, t2) // Y3 := Y3 - t2
+ y3.Sub(y3, t0) // Y3 := Y3 - t0
+ t1.Add(y3, y3) // t1 := Y3 + Y3
+ y3.Add(t1, y3) // Y3 := t1 + Y3
+ t1.Add(t0, t0) // t1 := t0 + t0
+ t0.Add(t1, t0) // t0 := t1 + t0
+ t0.Sub(t0, t2) // t0 := t0 - t2
+ t1.Mul(t4, y3) // t1 := t4 * Y3
+ t2.Mul(t0, y3) // t2 := t0 * Y3
+ y3.Mul(x3, z3) // Y3 := X3 * Z3
+ y3.Add(y3, t2) // Y3 := Y3 + t2
+ x3.Mul(t3, x3) // X3 := t3 * X3
+ x3.Sub(x3, t1) // X3 := X3 - t1
+ z3.Mul(t4, z3) // Z3 := t4 * Z3
+ t1.Mul(t3, t0) // t1 := t3 * t0
+ z3.Add(z3, t1) // Z3 := Z3 + t1
+
+ q.x.Set(x3)
+ q.y.Set(y3)
+ q.z.Set(z3)
+ return q
+}
+
+// Double sets q = p + p, and returns q. The points may overlap.
+func (q *P521Point) Double(p *P521Point) *P521Point {
+ // Complete addition formula for a = -3 from "Complete addition formulas for
+ // prime order elliptic curves" (https://eprint.iacr.org/2015/1060), §A.2.
+
+ t0 := new(fiat.P521Element).Square(p.x) // t0 := X ^ 2
+ t1 := new(fiat.P521Element).Square(p.y) // t1 := Y ^ 2
+ t2 := new(fiat.P521Element).Square(p.z) // t2 := Z ^ 2
+ t3 := new(fiat.P521Element).Mul(p.x, p.y) // t3 := X * Y
+ t3.Add(t3, t3) // t3 := t3 + t3
+ z3 := new(fiat.P521Element).Mul(p.x, p.z) // Z3 := X * Z
+ z3.Add(z3, z3) // Z3 := Z3 + Z3
+ y3 := new(fiat.P521Element).Mul(p521B, t2) // Y3 := b * t2
+ y3.Sub(y3, z3) // Y3 := Y3 - Z3
+ x3 := new(fiat.P521Element).Add(y3, y3) // X3 := Y3 + Y3
+ y3.Add(x3, y3) // Y3 := X3 + Y3
+ x3.Sub(t1, y3) // X3 := t1 - Y3
+ y3.Add(t1, y3) // Y3 := t1 + Y3
+ y3.Mul(x3, y3) // Y3 := X3 * Y3
+ x3.Mul(x3, t3) // X3 := X3 * t3
+ t3.Add(t2, t2) // t3 := t2 + t2
+ t2.Add(t2, t3) // t2 := t2 + t3
+ z3.Mul(p521B, z3) // Z3 := b * Z3
+ z3.Sub(z3, t2) // Z3 := Z3 - t2
+ z3.Sub(z3, t0) // Z3 := Z3 - t0
+ t3.Add(z3, z3) // t3 := Z3 + Z3
+ z3.Add(z3, t3) // Z3 := Z3 + t3
+ t3.Add(t0, t0) // t3 := t0 + t0
+ t0.Add(t3, t0) // t0 := t3 + t0
+ t0.Sub(t0, t2) // t0 := t0 - t2
+ t0.Mul(t0, z3) // t0 := t0 * Z3
+ y3.Add(y3, t0) // Y3 := Y3 + t0
+ t0.Mul(p.y, p.z) // t0 := Y * Z
+ t0.Add(t0, t0) // t0 := t0 + t0
+ z3.Mul(t0, z3) // Z3 := t0 * Z3
+ x3.Sub(x3, z3) // X3 := X3 - Z3
+ z3.Mul(t0, t1) // Z3 := t0 * t1
+ z3.Add(z3, z3) // Z3 := Z3 + Z3
+ z3.Add(z3, z3) // Z3 := Z3 + Z3
+
+ q.x.Set(x3)
+ q.y.Set(y3)
+ q.z.Set(z3)
+ return q
+}
+
+// Select sets q to p1 if cond == 1, and to p2 if cond == 0.
+func (q *P521Point) Select(p1, p2 *P521Point, cond int) *P521Point {
+ q.x.Select(p1.x, p2.x, cond)
+ q.y.Select(p1.y, p2.y, cond)
+ q.z.Select(p1.z, p2.z, cond)
+ return q
+}
+
+// ScalarMult sets p = scalar * q, and returns p.
+func (p *P521Point) ScalarMult(q *P521Point, scalar []byte) *P521Point {
+ // table holds the first 16 multiples of q. The explicit newP521Point calls
+ // get inlined, letting the allocations live on the stack.
+ var table = [16]*P521Point{
+ NewP521Point(), NewP521Point(), NewP521Point(), NewP521Point(),
+ NewP521Point(), NewP521Point(), NewP521Point(), NewP521Point(),
+ NewP521Point(), NewP521Point(), NewP521Point(), NewP521Point(),
+ NewP521Point(), NewP521Point(), NewP521Point(), NewP521Point(),
+ }
+ for i := 1; i < 16; i++ {
+ table[i].Add(table[i-1], q)
+ }
+
+ // Instead of doing the classic double-and-add chain, we do it with a
+ // four-bit window: we double four times, and then add [0-15]P.
+ t := NewP521Point()
+ p.Set(NewP521Point())
+ for _, byte := range scalar {
+ p.Double(p)
+ p.Double(p)
+ p.Double(p)
+ p.Double(p)
+
+ for i := uint8(0); i < 16; i++ {
+ cond := subtle.ConstantTimeByteEq(byte>>4, i)
+ t.Select(table[i], t, cond)
+ }
+ p.Add(p, t)
+
+ p.Double(p)
+ p.Double(p)
+ p.Double(p)
+ p.Double(p)
+
+ for i := uint8(0); i < 16; i++ {
+ cond := subtle.ConstantTimeByteEq(byte&0b1111, i)
+ t.Select(table[i], t, cond)
+ }
+ p.Add(p, t)
+ }
+
+ return p
+}
diff --git a/src/crypto/elliptic/p224.go b/src/crypto/elliptic/p224.go
new file mode 100644
index 0000000..8a431c4
--- /dev/null
+++ b/src/crypto/elliptic/p224.go
@@ -0,0 +1,139 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package elliptic
+
+import (
+ "crypto/elliptic/internal/nistec"
+ "crypto/rand"
+ "math/big"
+)
+
+// p224Curve is a Curve implementation based on nistec.P224Point.
+//
+// It's a wrapper that exposes the big.Int-based Curve interface and encodes the
+// legacy idiosyncrasies it requires, such as invalid and infinity point
+// handling.
+//
+// To interact with the nistec package, points are encoded into and decoded from
+// properly formatted byte slices. All big.Int use is limited to this package.
+// Encoding and decoding is 1/1000th of the runtime of a scalar multiplication,
+// so the overhead is acceptable.
+type p224Curve struct {
+ params *CurveParams
+}
+
+var p224 p224Curve
+var _ Curve = p224
+
+func initP224() {
+ p224.params = &CurveParams{
+ Name: "P-224",
+ BitSize: 224,
+ // FIPS 186-4, section D.1.2.2
+ P: bigFromDecimal("26959946667150639794667015087019630673557916260026308143510066298881"),
+ N: bigFromDecimal("26959946667150639794667015087019625940457807714424391721682722368061"),
+ B: bigFromHex("b4050a850c04b3abf54132565044b0b7d7bfd8ba270b39432355ffb4"),
+ Gx: bigFromHex("b70e0cbd6bb4bf7f321390b94a03c1d356c21122343280d6115c1d21"),
+ Gy: bigFromHex("bd376388b5f723fb4c22dfe6cd4375a05a07476444d5819985007e34"),
+ }
+}
+
+func (curve p224Curve) Params() *CurveParams {
+ return curve.params
+}
+
+func (curve p224Curve) IsOnCurve(x, y *big.Int) bool {
+ // IsOnCurve is documented to reject (0, 0), the conventional point at
+ // infinity, which however is accepted by p224PointFromAffine.
+ if x.Sign() == 0 && y.Sign() == 0 {
+ return false
+ }
+ _, ok := p224PointFromAffine(x, y)
+ return ok
+}
+
+func p224PointFromAffine(x, y *big.Int) (p *nistec.P224Point, ok bool) {
+ // (0, 0) is by convention the point at infinity, which can't be represented
+ // in affine coordinates. Marshal incorrectly encodes it as an uncompressed
+ // point, which SetBytes would correctly reject. See Issue 37294.
+ if x.Sign() == 0 && y.Sign() == 0 {
+ return nistec.NewP224Point(), true
+ }
+ if x.Sign() < 0 || y.Sign() < 0 {
+ return nil, false
+ }
+ if x.BitLen() > 224 || y.BitLen() > 224 {
+ return nil, false
+ }
+ p, err := nistec.NewP224Point().SetBytes(Marshal(P224(), x, y))
+ if err != nil {
+ return nil, false
+ }
+ return p, true
+}
+
+func p224PointToAffine(p *nistec.P224Point) (x, y *big.Int) {
+ out := p.Bytes()
+ if len(out) == 1 && out[0] == 0 {
+ // This is the correct encoding of the point at infinity, which
+ // Unmarshal does not support. See Issue 37294.
+ return new(big.Int), new(big.Int)
+ }
+ x, y = Unmarshal(P224(), out)
+ if x == nil {
+ panic("crypto/elliptic: internal error: Unmarshal rejected a valid point encoding")
+ }
+ return x, y
+}
+
+// p224RandomPoint returns a random point on the curve. It's used when Add,
+// Double, or ScalarMult are fed a point not on the curve, which is undefined
+// behavior. Originally, we used to do the math on it anyway (which allows
+// invalid curve attacks) and relied on the caller and Unmarshal to avoid this
+// happening in the first place. Now, we just can't construct a nistec.P224Point
+// for an invalid pair of coordinates, because that API is safer. If we panic,
+// we risk introducing a DoS. If we return nil, we risk a panic. If we return
+// the input, ecdsa.Verify might fail open. The safest course seems to be to
+// return a valid, random point, which hopefully won't help the attacker.
+func p224RandomPoint() (x, y *big.Int) {
+ _, x, y, err := GenerateKey(P224(), rand.Reader)
+ if err != nil {
+ panic("crypto/elliptic: failed to generate random point")
+ }
+ return x, y
+}
+
+func (p224Curve) Add(x1, y1, x2, y2 *big.Int) (*big.Int, *big.Int) {
+ p1, ok := p224PointFromAffine(x1, y1)
+ if !ok {
+ return p224RandomPoint()
+ }
+ p2, ok := p224PointFromAffine(x2, y2)
+ if !ok {
+ return p224RandomPoint()
+ }
+ return p224PointToAffine(p1.Add(p1, p2))
+}
+
+func (p224Curve) Double(x1, y1 *big.Int) (*big.Int, *big.Int) {
+ p, ok := p224PointFromAffine(x1, y1)
+ if !ok {
+ return p224RandomPoint()
+ }
+ return p224PointToAffine(p.Double(p))
+}
+
+func (p224Curve) ScalarMult(Bx, By *big.Int, scalar []byte) (*big.Int, *big.Int) {
+ p, ok := p224PointFromAffine(Bx, By)
+ if !ok {
+ return p224RandomPoint()
+ }
+ return p224PointToAffine(p.ScalarMult(p, scalar))
+}
+
+func (p224Curve) ScalarBaseMult(scalar []byte) (*big.Int, *big.Int) {
+ p := nistec.NewP224Generator()
+ return p224PointToAffine(p.ScalarMult(p, scalar))
+}
diff --git a/src/crypto/elliptic/p224_test.go b/src/crypto/elliptic/p224_test.go
new file mode 100644
index 0000000..7971f63
--- /dev/null
+++ b/src/crypto/elliptic/p224_test.go
@@ -0,0 +1,325 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package elliptic
+
+import (
+ "encoding/hex"
+ "fmt"
+ "math/big"
+ "testing"
+)
+
+type baseMultTest struct {
+ k string
+ x, y string
+}
+
+var p224BaseMultTests = []baseMultTest{
+ {
+ "1",
+ "b70e0cbd6bb4bf7f321390b94a03c1d356c21122343280d6115c1d21",
+ "bd376388b5f723fb4c22dfe6cd4375a05a07476444d5819985007e34",
+ },
+ {
+ "2",
+ "706a46dc76dcb76798e60e6d89474788d16dc18032d268fd1a704fa6",
+ "1c2b76a7bc25e7702a704fa986892849fca629487acf3709d2e4e8bb",
+ },
+ {
+ "3",
+ "df1b1d66a551d0d31eff822558b9d2cc75c2180279fe0d08fd896d04",
+ "a3f7f03cadd0be444c0aa56830130ddf77d317344e1af3591981a925",
+ },
+ {
+ "4",
+ "ae99feebb5d26945b54892092a8aee02912930fa41cd114e40447301",
+ "482580a0ec5bc47e88bc8c378632cd196cb3fa058a7114eb03054c9",
+ },
+ {
+ "5",
+ "31c49ae75bce7807cdff22055d94ee9021fedbb5ab51c57526f011aa",
+ "27e8bff1745635ec5ba0c9f1c2ede15414c6507d29ffe37e790a079b",
+ },
+ {
+ "6",
+ "1f2483f82572251fca975fea40db821df8ad82a3c002ee6c57112408",
+ "89faf0ccb750d99b553c574fad7ecfb0438586eb3952af5b4b153c7e",
+ },
+ {
+ "7",
+ "db2f6be630e246a5cf7d99b85194b123d487e2d466b94b24a03c3e28",
+ "f3a30085497f2f611ee2517b163ef8c53b715d18bb4e4808d02b963",
+ },
+ {
+ "8",
+ "858e6f9cc6c12c31f5df124aa77767b05c8bc021bd683d2b55571550",
+ "46dcd3ea5c43898c5c5fc4fdac7db39c2f02ebee4e3541d1e78047a",
+ },
+ {
+ "9",
+ "2fdcccfee720a77ef6cb3bfbb447f9383117e3daa4a07e36ed15f78d",
+ "371732e4f41bf4f7883035e6a79fcedc0e196eb07b48171697517463",
+ },
+ {
+ "10",
+ "aea9e17a306517eb89152aa7096d2c381ec813c51aa880e7bee2c0fd",
+ "39bb30eab337e0a521b6cba1abe4b2b3a3e524c14a3fe3eb116b655f",
+ },
+ {
+ "11",
+ "ef53b6294aca431f0f3c22dc82eb9050324f1d88d377e716448e507c",
+ "20b510004092e96636cfb7e32efded8265c266dfb754fa6d6491a6da",
+ },
+ {
+ "12",
+ "6e31ee1dc137f81b056752e4deab1443a481033e9b4c93a3044f4f7a",
+ "207dddf0385bfdeab6e9acda8da06b3bbef224a93ab1e9e036109d13",
+ },
+ {
+ "13",
+ "34e8e17a430e43289793c383fac9774247b40e9ebd3366981fcfaeca",
+ "252819f71c7fb7fbcb159be337d37d3336d7feb963724fdfb0ecb767",
+ },
+ {
+ "14",
+ "a53640c83dc208603ded83e4ecf758f24c357d7cf48088b2ce01e9fa",
+ "d5814cd724199c4a5b974a43685fbf5b8bac69459c9469bc8f23ccaf",
+ },
+ {
+ "15",
+ "baa4d8635511a7d288aebeedd12ce529ff102c91f97f867e21916bf9",
+ "979a5f4759f80f4fb4ec2e34f5566d595680a11735e7b61046127989",
+ },
+ {
+ "16",
+ "b6ec4fe1777382404ef679997ba8d1cc5cd8e85349259f590c4c66d",
+ "3399d464345906b11b00e363ef429221f2ec720d2f665d7dead5b482",
+ },
+ {
+ "17",
+ "b8357c3a6ceef288310e17b8bfeff9200846ca8c1942497c484403bc",
+ "ff149efa6606a6bd20ef7d1b06bd92f6904639dce5174db6cc554a26",
+ },
+ {
+ "18",
+ "c9ff61b040874c0568479216824a15eab1a838a797d189746226e4cc",
+ "ea98d60e5ffc9b8fcf999fab1df7e7ef7084f20ddb61bb045a6ce002",
+ },
+ {
+ "19",
+ "a1e81c04f30ce201c7c9ace785ed44cc33b455a022f2acdbc6cae83c",
+ "dcf1f6c3db09c70acc25391d492fe25b4a180babd6cea356c04719cd",
+ },
+ {
+ "20",
+ "fcc7f2b45df1cd5a3c0c0731ca47a8af75cfb0347e8354eefe782455",
+ "d5d7110274cba7cdee90e1a8b0d394c376a5573db6be0bf2747f530",
+ },
+ {
+ "112233445566778899",
+ "61f077c6f62ed802dad7c2f38f5c67f2cc453601e61bd076bb46179e",
+ "2272f9e9f5933e70388ee652513443b5e289dd135dcc0d0299b225e4",
+ },
+ {
+ "112233445566778899112233445566778899",
+ "29895f0af496bfc62b6ef8d8a65c88c613949b03668aab4f0429e35",
+ "3ea6e53f9a841f2019ec24bde1a75677aa9b5902e61081c01064de93",
+ },
+ {
+ "6950511619965839450988900688150712778015737983940691968051900319680",
+ "ab689930bcae4a4aa5f5cb085e823e8ae30fd365eb1da4aba9cf0379",
+ "3345a121bbd233548af0d210654eb40bab788a03666419be6fbd34e7",
+ },
+ {
+ "13479972933410060327035789020509431695094902435494295338570602119423",
+ "bdb6a8817c1f89da1c2f3dd8e97feb4494f2ed302a4ce2bc7f5f4025",
+ "4c7020d57c00411889462d77a5438bb4e97d177700bf7243a07f1680",
+ },
+ {
+ "13479971751745682581351455311314208093898607229429740618390390702079",
+ "d58b61aa41c32dd5eba462647dba75c5d67c83606c0af2bd928446a9",
+ "d24ba6a837be0460dd107ae77725696d211446c5609b4595976b16bd",
+ },
+ {
+ "13479972931865328106486971546324465392952975980343228160962702868479",
+ "dc9fa77978a005510980e929a1485f63716df695d7a0c18bb518df03",
+ "ede2b016f2ddffc2a8c015b134928275ce09e5661b7ab14ce0d1d403",
+ },
+ {
+ "11795773708834916026404142434151065506931607341523388140225443265536",
+ "499d8b2829cfb879c901f7d85d357045edab55028824d0f05ba279ba",
+ "bf929537b06e4015919639d94f57838fa33fc3d952598dcdbb44d638",
+ },
+ {
+ "784254593043826236572847595991346435467177662189391577090",
+ "8246c999137186632c5f9eddf3b1b0e1764c5e8bd0e0d8a554b9cb77",
+ "e80ed8660bc1cb17ac7d845be40a7a022d3306f116ae9f81fea65947",
+ },
+ {
+ "13479767645505654746623887797783387853576174193480695826442858012671",
+ "6670c20afcceaea672c97f75e2e9dd5c8460e54bb38538ebb4bd30eb",
+ "f280d8008d07a4caf54271f993527d46ff3ff46fd1190a3f1faa4f74",
+ },
+ {
+ "205688069665150753842126177372015544874550518966168735589597183",
+ "eca934247425cfd949b795cb5ce1eff401550386e28d1a4c5a8eb",
+ "d4c01040dba19628931bc8855370317c722cbd9ca6156985f1c2e9ce",
+ },
+ {
+ "13479966930919337728895168462090683249159702977113823384618282123295",
+ "ef353bf5c73cd551b96d596fbc9a67f16d61dd9fe56af19de1fba9cd",
+ "21771b9cdce3e8430c09b3838be70b48c21e15bc09ee1f2d7945b91f",
+ },
+ {
+ "50210731791415612487756441341851895584393717453129007497216",
+ "4036052a3091eb481046ad3289c95d3ac905ca0023de2c03ecd451cf",
+ "d768165a38a2b96f812586a9d59d4136035d9c853a5bf2e1c86a4993",
+ },
+ {
+ "26959946667150639794667015087019625940457807714424391721682722368041",
+ "fcc7f2b45df1cd5a3c0c0731ca47a8af75cfb0347e8354eefe782455",
+ "f2a28eefd8b345832116f1e574f2c6b2c895aa8c24941f40d8b80ad1",
+ },
+ {
+ "26959946667150639794667015087019625940457807714424391721682722368042",
+ "a1e81c04f30ce201c7c9ace785ed44cc33b455a022f2acdbc6cae83c",
+ "230e093c24f638f533dac6e2b6d01da3b5e7f45429315ca93fb8e634",
+ },
+ {
+ "26959946667150639794667015087019625940457807714424391721682722368043",
+ "c9ff61b040874c0568479216824a15eab1a838a797d189746226e4cc",
+ "156729f1a003647030666054e208180f8f7b0df2249e44fba5931fff",
+ },
+ {
+ "26959946667150639794667015087019625940457807714424391721682722368044",
+ "b8357c3a6ceef288310e17b8bfeff9200846ca8c1942497c484403bc",
+ "eb610599f95942df1082e4f9426d086fb9c6231ae8b24933aab5db",
+ },
+ {
+ "26959946667150639794667015087019625940457807714424391721682722368045",
+ "b6ec4fe1777382404ef679997ba8d1cc5cd8e85349259f590c4c66d",
+ "cc662b9bcba6f94ee4ff1c9c10bd6ddd0d138df2d099a282152a4b7f",
+ },
+ {
+ "26959946667150639794667015087019625940457807714424391721682722368046",
+ "baa4d8635511a7d288aebeedd12ce529ff102c91f97f867e21916bf9",
+ "6865a0b8a607f0b04b13d1cb0aa992a5a97f5ee8ca1849efb9ed8678",
+ },
+ {
+ "26959946667150639794667015087019625940457807714424391721682722368047",
+ "a53640c83dc208603ded83e4ecf758f24c357d7cf48088b2ce01e9fa",
+ "2a7eb328dbe663b5a468b5bc97a040a3745396ba636b964370dc3352",
+ },
+ {
+ "26959946667150639794667015087019625940457807714424391721682722368048",
+ "34e8e17a430e43289793c383fac9774247b40e9ebd3366981fcfaeca",
+ "dad7e608e380480434ea641cc82c82cbc92801469c8db0204f13489a",
+ },
+ {
+ "26959946667150639794667015087019625940457807714424391721682722368049",
+ "6e31ee1dc137f81b056752e4deab1443a481033e9b4c93a3044f4f7a",
+ "df82220fc7a4021549165325725f94c3410ddb56c54e161fc9ef62ee",
+ },
+ {
+ "26959946667150639794667015087019625940457807714424391721682722368050",
+ "ef53b6294aca431f0f3c22dc82eb9050324f1d88d377e716448e507c",
+ "df4aefffbf6d1699c930481cd102127c9a3d992048ab05929b6e5927",
+ },
+ {
+ "26959946667150639794667015087019625940457807714424391721682722368051",
+ "aea9e17a306517eb89152aa7096d2c381ec813c51aa880e7bee2c0fd",
+ "c644cf154cc81f5ade49345e541b4d4b5c1adb3eb5c01c14ee949aa2",
+ },
+ {
+ "26959946667150639794667015087019625940457807714424391721682722368052",
+ "2fdcccfee720a77ef6cb3bfbb447f9383117e3daa4a07e36ed15f78d",
+ "c8e8cd1b0be40b0877cfca1958603122f1e6914f84b7e8e968ae8b9e",
+ },
+ {
+ "26959946667150639794667015087019625940457807714424391721682722368053",
+ "858e6f9cc6c12c31f5df124aa77767b05c8bc021bd683d2b55571550",
+ "fb9232c15a3bc7673a3a03b0253824c53d0fd1411b1cabe2e187fb87",
+ },
+ {
+ "26959946667150639794667015087019625940457807714424391721682722368054",
+ "db2f6be630e246a5cf7d99b85194b123d487e2d466b94b24a03c3e28",
+ "f0c5cff7ab680d09ee11dae84e9c1072ac48ea2e744b1b7f72fd469e",
+ },
+ {
+ "26959946667150639794667015087019625940457807714424391721682722368055",
+ "1f2483f82572251fca975fea40db821df8ad82a3c002ee6c57112408",
+ "76050f3348af2664aac3a8b05281304ebc7a7914c6ad50a4b4eac383",
+ },
+ {
+ "26959946667150639794667015087019625940457807714424391721682722368056",
+ "31c49ae75bce7807cdff22055d94ee9021fedbb5ab51c57526f011aa",
+ "d817400e8ba9ca13a45f360e3d121eaaeb39af82d6001c8186f5f866",
+ },
+ {
+ "26959946667150639794667015087019625940457807714424391721682722368057",
+ "ae99feebb5d26945b54892092a8aee02912930fa41cd114e40447301",
+ "fb7da7f5f13a43b81774373c879cd32d6934c05fa758eeb14fcfab38",
+ },
+ {
+ "26959946667150639794667015087019625940457807714424391721682722368058",
+ "df1b1d66a551d0d31eff822558b9d2cc75c2180279fe0d08fd896d04",
+ "5c080fc3522f41bbb3f55a97cfecf21f882ce8cbb1e50ca6e67e56dc",
+ },
+ {
+ "26959946667150639794667015087019625940457807714424391721682722368059",
+ "706a46dc76dcb76798e60e6d89474788d16dc18032d268fd1a704fa6",
+ "e3d4895843da188fd58fb0567976d7b50359d6b78530c8f62d1b1746",
+ },
+ {
+ "26959946667150639794667015087019625940457807714424391721682722368060",
+ "b70e0cbd6bb4bf7f321390b94a03c1d356c21122343280d6115c1d21",
+ "42c89c774a08dc04b3dd201932bc8a5ea5f8b89bbb2a7e667aff81cd",
+ },
+}
+
+func TestP224BaseMult(t *testing.T) {
+ p224 := P224()
+ for i, e := range p224BaseMultTests {
+ k, ok := new(big.Int).SetString(e.k, 10)
+ if !ok {
+ t.Errorf("%d: bad value for k: %s", i, e.k)
+ }
+ x, y := p224.ScalarBaseMult(k.Bytes())
+ if fmt.Sprintf("%x", x) != e.x || fmt.Sprintf("%x", y) != e.y {
+ t.Errorf("%d: bad output for k=%s: got (%x, %x), want (%s, %s)", i, e.k, x, y, e.x, e.y)
+ }
+ if testing.Short() && i > 5 {
+ break
+ }
+ }
+}
+
+func TestP224GenericBaseMult(t *testing.T) {
+ // We use the P224 CurveParams directly in order to test the generic implementation.
+ p224 := genericParamsForCurve(P224())
+ for i, e := range p224BaseMultTests {
+ k, ok := new(big.Int).SetString(e.k, 10)
+ if !ok {
+ t.Errorf("%d: bad value for k: %s", i, e.k)
+ }
+ x, y := p224.ScalarBaseMult(k.Bytes())
+ if fmt.Sprintf("%x", x) != e.x || fmt.Sprintf("%x", y) != e.y {
+ t.Errorf("%d: bad output for k=%s: got (%x, %x), want (%s, %s)", i, e.k, x, y, e.x, e.y)
+ }
+ if testing.Short() && i > 5 {
+ break
+ }
+ }
+}
+
+func TestP224Overflow(t *testing.T) {
+ // This tests for a specific bug in the P224 implementation.
+ p224 := P224()
+ pointData, _ := hex.DecodeString("049B535B45FB0A2072398A6831834624C7E32CCFD5A4B933BCEAF77F1DD945E08BBE5178F5EDF5E733388F196D2A631D2E075BB16CBFEEA15B")
+ x, y := Unmarshal(p224, pointData)
+ if !p224.IsOnCurve(x, y) {
+ t.Error("P224 failed to validate a correct point")
+ }
+}
diff --git a/src/crypto/elliptic/p256.go b/src/crypto/elliptic/p256.go
new file mode 100644
index 0000000..99c39ea
--- /dev/null
+++ b/src/crypto/elliptic/p256.go
@@ -0,0 +1,1195 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !amd64 && !arm64
+
+package elliptic
+
+// P-256 is implemented by various different backends, including a generic
+// 32-bit constant-time one in this file, which is used when assembly
+// implementations are not available, or not appropriate for the hardware.
+
+import "math/big"
+
+type p256Curve struct {
+ *CurveParams
+}
+
+var (
+ p256Params *CurveParams
+
+ // RInverse contains 1/R mod p - the inverse of the Montgomery constant
+ // (2**257).
+ p256RInverse *big.Int
+)
+
+func initP256() {
+ // See FIPS 186-3, section D.2.3
+ p256Params = &CurveParams{Name: "P-256"}
+ p256Params.P, _ = new(big.Int).SetString("115792089210356248762697446949407573530086143415290314195533631308867097853951", 10)
+ p256Params.N, _ = new(big.Int).SetString("115792089210356248762697446949407573529996955224135760342422259061068512044369", 10)
+ p256Params.B, _ = new(big.Int).SetString("5ac635d8aa3a93e7b3ebbd55769886bc651d06b0cc53b0f63bce3c3e27d2604b", 16)
+ p256Params.Gx, _ = new(big.Int).SetString("6b17d1f2e12c4247f8bce6e563a440f277037d812deb33a0f4a13945d898c296", 16)
+ p256Params.Gy, _ = new(big.Int).SetString("4fe342e2fe1a7f9b8ee7eb4a7c0f9e162bce33576b315ececbb6406837bf51f5", 16)
+ p256Params.BitSize = 256
+
+ p256RInverse, _ = new(big.Int).SetString("7fffffff00000001fffffffe8000000100000000ffffffff0000000180000000", 16)
+
+ // Arch-specific initialization, i.e. let a platform dynamically pick a P256 implementation
+ initP256Arch()
+}
+
+func (curve p256Curve) Params() *CurveParams {
+ return curve.CurveParams
+}
+
+// p256GetScalar endian-swaps the big-endian scalar value from in and writes it
+// to out. If the scalar is equal or greater than the order of the group, it's
+// reduced modulo that order.
+func p256GetScalar(out *[32]byte, in []byte) {
+ n := new(big.Int).SetBytes(in)
+ var scalarBytes []byte
+
+ if n.Cmp(p256Params.N) >= 0 || len(in) > len(out) {
+ n.Mod(n, p256Params.N)
+ scalarBytes = n.Bytes()
+ } else {
+ scalarBytes = in
+ }
+
+ for i, v := range scalarBytes {
+ out[len(scalarBytes)-(1+i)] = v
+ }
+}
+
+func (p256Curve) ScalarBaseMult(scalar []byte) (x, y *big.Int) {
+ var scalarReversed [32]byte
+ p256GetScalar(&scalarReversed, scalar)
+
+ var x1, y1, z1 [p256Limbs]uint32
+ p256ScalarBaseMult(&x1, &y1, &z1, &scalarReversed)
+ return p256ToAffine(&x1, &y1, &z1)
+}
+
+func (p256Curve) ScalarMult(bigX, bigY *big.Int, scalar []byte) (x, y *big.Int) {
+ var scalarReversed [32]byte
+ p256GetScalar(&scalarReversed, scalar)
+
+ var px, py, x1, y1, z1 [p256Limbs]uint32
+ p256FromBig(&px, bigX)
+ p256FromBig(&py, bigY)
+ p256ScalarMult(&x1, &y1, &z1, &px, &py, &scalarReversed)
+ return p256ToAffine(&x1, &y1, &z1)
+}
+
+// Field elements are represented as nine, unsigned 32-bit words.
+//
+// The value of a field element is:
+// x[0] + (x[1] * 2**29) + (x[2] * 2**57) + ... + (x[8] * 2**228)
+//
+// That is, each limb is alternately 29 or 28-bits wide in little-endian
+// order.
+//
+// This means that a field element hits 2**257, rather than 2**256 as we would
+// like. A 28, 29, ... pattern would cause us to hit 2**256, but that causes
+// problems when multiplying as terms end up one bit short of a limb which
+// would require much bit-shifting to correct.
+//
+// Finally, the values stored in a field element are in Montgomery form. So the
+// value |y| is stored as (y*R) mod p, where p is the P-256 prime and R is
+// 2**257.
+
+const (
+ p256Limbs = 9
+ bottom29Bits = 0x1fffffff
+)
+
+var (
+ // p256One is the number 1 as a field element.
+ p256One = [p256Limbs]uint32{2, 0, 0, 0xffff800, 0x1fffffff, 0xfffffff, 0x1fbfffff, 0x1ffffff, 0}
+ p256Zero = [p256Limbs]uint32{0, 0, 0, 0, 0, 0, 0, 0, 0}
+ // p256P is the prime modulus as a field element.
+ p256P = [p256Limbs]uint32{0x1fffffff, 0xfffffff, 0x1fffffff, 0x3ff, 0, 0, 0x200000, 0xf000000, 0xfffffff}
+ // p2562P is the twice prime modulus as a field element.
+ p2562P = [p256Limbs]uint32{0x1ffffffe, 0xfffffff, 0x1fffffff, 0x7ff, 0, 0, 0x400000, 0xe000000, 0x1fffffff}
+)
+
+// p256Precomputed contains precomputed values to aid the calculation of scalar
+// multiples of the base point, G. It's actually two, equal length, tables
+// concatenated.
+//
+// The first table contains (x,y) field element pairs for 16 multiples of the
+// base point, G.
+//
+// Index | Index (binary) | Value
+// 0 | 0000 | 0G (all zeros, omitted)
+// 1 | 0001 | G
+// 2 | 0010 | 2**64G
+// 3 | 0011 | 2**64G + G
+// 4 | 0100 | 2**128G
+// 5 | 0101 | 2**128G + G
+// 6 | 0110 | 2**128G + 2**64G
+// 7 | 0111 | 2**128G + 2**64G + G
+// 8 | 1000 | 2**192G
+// 9 | 1001 | 2**192G + G
+// 10 | 1010 | 2**192G + 2**64G
+// 11 | 1011 | 2**192G + 2**64G + G
+// 12 | 1100 | 2**192G + 2**128G
+// 13 | 1101 | 2**192G + 2**128G + G
+// 14 | 1110 | 2**192G + 2**128G + 2**64G
+// 15 | 1111 | 2**192G + 2**128G + 2**64G + G
+//
+// The second table follows the same style, but the terms are 2**32G,
+// 2**96G, 2**160G, 2**224G.
+//
+// This is ~2KB of data.
+var p256Precomputed = [p256Limbs * 2 * 15 * 2]uint32{
+ 0x11522878, 0xe730d41, 0xdb60179, 0x4afe2ff, 0x12883add, 0xcaddd88, 0x119e7edc, 0xd4a6eab, 0x3120bee,
+ 0x1d2aac15, 0xf25357c, 0x19e45cdd, 0x5c721d0, 0x1992c5a5, 0xa237487, 0x154ba21, 0x14b10bb, 0xae3fe3,
+ 0xd41a576, 0x922fc51, 0x234994f, 0x60b60d3, 0x164586ae, 0xce95f18, 0x1fe49073, 0x3fa36cc, 0x5ebcd2c,
+ 0xb402f2f, 0x15c70bf, 0x1561925c, 0x5a26704, 0xda91e90, 0xcdc1c7f, 0x1ea12446, 0xe1ade1e, 0xec91f22,
+ 0x26f7778, 0x566847e, 0xa0bec9e, 0x234f453, 0x1a31f21a, 0xd85e75c, 0x56c7109, 0xa267a00, 0xb57c050,
+ 0x98fb57, 0xaa837cc, 0x60c0792, 0xcfa5e19, 0x61bab9e, 0x589e39b, 0xa324c5, 0x7d6dee7, 0x2976e4b,
+ 0x1fc4124a, 0xa8c244b, 0x1ce86762, 0xcd61c7e, 0x1831c8e0, 0x75774e1, 0x1d96a5a9, 0x843a649, 0xc3ab0fa,
+ 0x6e2e7d5, 0x7673a2a, 0x178b65e8, 0x4003e9b, 0x1a1f11c2, 0x7816ea, 0xf643e11, 0x58c43df, 0xf423fc2,
+ 0x19633ffa, 0x891f2b2, 0x123c231c, 0x46add8c, 0x54700dd, 0x59e2b17, 0x172db40f, 0x83e277d, 0xb0dd609,
+ 0xfd1da12, 0x35c6e52, 0x19ede20c, 0xd19e0c0, 0x97d0f40, 0xb015b19, 0x449e3f5, 0xe10c9e, 0x33ab581,
+ 0x56a67ab, 0x577734d, 0x1dddc062, 0xc57b10d, 0x149b39d, 0x26a9e7b, 0xc35df9f, 0x48764cd, 0x76dbcca,
+ 0xca4b366, 0xe9303ab, 0x1a7480e7, 0x57e9e81, 0x1e13eb50, 0xf466cf3, 0x6f16b20, 0x4ba3173, 0xc168c33,
+ 0x15cb5439, 0x6a38e11, 0x73658bd, 0xb29564f, 0x3f6dc5b, 0x53b97e, 0x1322c4c0, 0x65dd7ff, 0x3a1e4f6,
+ 0x14e614aa, 0x9246317, 0x1bc83aca, 0xad97eed, 0xd38ce4a, 0xf82b006, 0x341f077, 0xa6add89, 0x4894acd,
+ 0x9f162d5, 0xf8410ef, 0x1b266a56, 0xd7f223, 0x3e0cb92, 0xe39b672, 0x6a2901a, 0x69a8556, 0x7e7c0,
+ 0x9b7d8d3, 0x309a80, 0x1ad05f7f, 0xc2fb5dd, 0xcbfd41d, 0x9ceb638, 0x1051825c, 0xda0cf5b, 0x812e881,
+ 0x6f35669, 0x6a56f2c, 0x1df8d184, 0x345820, 0x1477d477, 0x1645db1, 0xbe80c51, 0xc22be3e, 0xe35e65a,
+ 0x1aeb7aa0, 0xc375315, 0xf67bc99, 0x7fdd7b9, 0x191fc1be, 0x61235d, 0x2c184e9, 0x1c5a839, 0x47a1e26,
+ 0xb7cb456, 0x93e225d, 0x14f3c6ed, 0xccc1ac9, 0x17fe37f3, 0x4988989, 0x1a90c502, 0x2f32042, 0xa17769b,
+ 0xafd8c7c, 0x8191c6e, 0x1dcdb237, 0x16200c0, 0x107b32a1, 0x66c08db, 0x10d06a02, 0x3fc93, 0x5620023,
+ 0x16722b27, 0x68b5c59, 0x270fcfc, 0xfad0ecc, 0xe5de1c2, 0xeab466b, 0x2fc513c, 0x407f75c, 0xbaab133,
+ 0x9705fe9, 0xb88b8e7, 0x734c993, 0x1e1ff8f, 0x19156970, 0xabd0f00, 0x10469ea7, 0x3293ac0, 0xcdc98aa,
+ 0x1d843fd, 0xe14bfe8, 0x15be825f, 0x8b5212, 0xeb3fb67, 0x81cbd29, 0xbc62f16, 0x2b6fcc7, 0xf5a4e29,
+ 0x13560b66, 0xc0b6ac2, 0x51ae690, 0xd41e271, 0xf3e9bd4, 0x1d70aab, 0x1029f72, 0x73e1c35, 0xee70fbc,
+ 0xad81baf, 0x9ecc49a, 0x86c741e, 0xfe6be30, 0x176752e7, 0x23d416, 0x1f83de85, 0x27de188, 0x66f70b8,
+ 0x181cd51f, 0x96b6e4c, 0x188f2335, 0xa5df759, 0x17a77eb6, 0xfeb0e73, 0x154ae914, 0x2f3ec51, 0x3826b59,
+ 0xb91f17d, 0x1c72949, 0x1362bf0a, 0xe23fddf, 0xa5614b0, 0xf7d8f, 0x79061, 0x823d9d2, 0x8213f39,
+ 0x1128ae0b, 0xd095d05, 0xb85c0c2, 0x1ecb2ef, 0x24ddc84, 0xe35e901, 0x18411a4a, 0xf5ddc3d, 0x3786689,
+ 0x52260e8, 0x5ae3564, 0x542b10d, 0x8d93a45, 0x19952aa4, 0x996cc41, 0x1051a729, 0x4be3499, 0x52b23aa,
+ 0x109f307e, 0x6f5b6bb, 0x1f84e1e7, 0x77a0cfa, 0x10c4df3f, 0x25a02ea, 0xb048035, 0xe31de66, 0xc6ecaa3,
+ 0x28ea335, 0x2886024, 0x1372f020, 0xf55d35, 0x15e4684c, 0xf2a9e17, 0x1a4a7529, 0xcb7beb1, 0xb2a78a1,
+ 0x1ab21f1f, 0x6361ccf, 0x6c9179d, 0xb135627, 0x1267b974, 0x4408bad, 0x1cbff658, 0xe3d6511, 0xc7d76f,
+ 0x1cc7a69, 0xe7ee31b, 0x54fab4f, 0x2b914f, 0x1ad27a30, 0xcd3579e, 0xc50124c, 0x50daa90, 0xb13f72,
+ 0xb06aa75, 0x70f5cc6, 0x1649e5aa, 0x84a5312, 0x329043c, 0x41c4011, 0x13d32411, 0xb04a838, 0xd760d2d,
+ 0x1713b532, 0xbaa0c03, 0x84022ab, 0x6bcf5c1, 0x2f45379, 0x18ae070, 0x18c9e11e, 0x20bca9a, 0x66f496b,
+ 0x3eef294, 0x67500d2, 0xd7f613c, 0x2dbbeb, 0xb741038, 0xe04133f, 0x1582968d, 0xbe985f7, 0x1acbc1a,
+ 0x1a6a939f, 0x33e50f6, 0xd665ed4, 0xb4b7bd6, 0x1e5a3799, 0x6b33847, 0x17fa56ff, 0x65ef930, 0x21dc4a,
+ 0x2b37659, 0x450fe17, 0xb357b65, 0xdf5efac, 0x15397bef, 0x9d35a7f, 0x112ac15f, 0x624e62e, 0xa90ae2f,
+ 0x107eecd2, 0x1f69bbe, 0x77d6bce, 0x5741394, 0x13c684fc, 0x950c910, 0x725522b, 0xdc78583, 0x40eeabb,
+ 0x1fde328a, 0xbd61d96, 0xd28c387, 0x9e77d89, 0x12550c40, 0x759cb7d, 0x367ef34, 0xae2a960, 0x91b8bdc,
+ 0x93462a9, 0xf469ef, 0xb2e9aef, 0xd2ca771, 0x54e1f42, 0x7aaa49, 0x6316abb, 0x2413c8e, 0x5425bf9,
+ 0x1bed3e3a, 0xf272274, 0x1f5e7326, 0x6416517, 0xea27072, 0x9cedea7, 0x6e7633, 0x7c91952, 0xd806dce,
+ 0x8e2a7e1, 0xe421e1a, 0x418c9e1, 0x1dbc890, 0x1b395c36, 0xa1dc175, 0x1dc4ef73, 0x8956f34, 0xe4b5cf2,
+ 0x1b0d3a18, 0x3194a36, 0x6c2641f, 0xe44124c, 0xa2f4eaa, 0xa8c25ba, 0xf927ed7, 0x627b614, 0x7371cca,
+ 0xba16694, 0x417bc03, 0x7c0a7e3, 0x9c35c19, 0x1168a205, 0x8b6b00d, 0x10e3edc9, 0x9c19bf2, 0x5882229,
+ 0x1b2b4162, 0xa5cef1a, 0x1543622b, 0x9bd433e, 0x364e04d, 0x7480792, 0x5c9b5b3, 0xe85ff25, 0x408ef57,
+ 0x1814cfa4, 0x121b41b, 0xd248a0f, 0x3b05222, 0x39bb16a, 0xc75966d, 0xa038113, 0xa4a1769, 0x11fbc6c,
+ 0x917e50e, 0xeec3da8, 0x169d6eac, 0x10c1699, 0xa416153, 0xf724912, 0x15cd60b7, 0x4acbad9, 0x5efc5fa,
+ 0xf150ed7, 0x122b51, 0x1104b40a, 0xcb7f442, 0xfbb28ff, 0x6ac53ca, 0x196142cc, 0x7bf0fa9, 0x957651,
+ 0x4e0f215, 0xed439f8, 0x3f46bd5, 0x5ace82f, 0x110916b6, 0x6db078, 0xffd7d57, 0xf2ecaac, 0xca86dec,
+ 0x15d6b2da, 0x965ecc9, 0x1c92b4c2, 0x1f3811, 0x1cb080f5, 0x2d8b804, 0x19d1c12d, 0xf20bd46, 0x1951fa7,
+ 0xa3656c3, 0x523a425, 0xfcd0692, 0xd44ddc8, 0x131f0f5b, 0xaf80e4a, 0xcd9fc74, 0x99bb618, 0x2db944c,
+ 0xa673090, 0x1c210e1, 0x178c8d23, 0x1474383, 0x10b8743d, 0x985a55b, 0x2e74779, 0x576138, 0x9587927,
+ 0x133130fa, 0xbe05516, 0x9f4d619, 0xbb62570, 0x99ec591, 0xd9468fe, 0x1d07782d, 0xfc72e0b, 0x701b298,
+ 0x1863863b, 0x85954b8, 0x121a0c36, 0x9e7fedf, 0xf64b429, 0x9b9d71e, 0x14e2f5d8, 0xf858d3a, 0x942eea8,
+ 0xda5b765, 0x6edafff, 0xa9d18cc, 0xc65e4ba, 0x1c747e86, 0xe4ea915, 0x1981d7a1, 0x8395659, 0x52ed4e2,
+ 0x87d43b7, 0x37ab11b, 0x19d292ce, 0xf8d4692, 0x18c3053f, 0x8863e13, 0x4c146c0, 0x6bdf55a, 0x4e4457d,
+ 0x16152289, 0xac78ec2, 0x1a59c5a2, 0x2028b97, 0x71c2d01, 0x295851f, 0x404747b, 0x878558d, 0x7d29aa4,
+ 0x13d8341f, 0x8daefd7, 0x139c972d, 0x6b7ea75, 0xd4a9dde, 0xff163d8, 0x81d55d7, 0xa5bef68, 0xb7b30d8,
+ 0xbe73d6f, 0xaa88141, 0xd976c81, 0x7e7a9cc, 0x18beb771, 0xd773cbd, 0x13f51951, 0x9d0c177, 0x1c49a78,
+}
+
+// Field element operations:
+
+const bottom28Bits = 0xfffffff
+
+// nonZeroToAllOnes returns:
+// 0xffffffff for 0 < x <= 2**31
+// 0 for x == 0 or x > 2**31.
+func nonZeroToAllOnes(x uint32) uint32 {
+ return ((x - 1) >> 31) - 1
+}
+
+// p256ReduceCarry adds a multiple of p in order to cancel |carry|,
+// which is a term at 2**257.
+//
+// On entry: carry < 2**3, inout[0,2,...] < 2**29, inout[1,3,...] < 2**28.
+// On exit: inout[0,2,..] < 2**30, inout[1,3,...] < 2**29.
+func p256ReduceCarry(inout *[p256Limbs]uint32, carry uint32) {
+ carry_mask := nonZeroToAllOnes(carry)
+
+ inout[0] += carry << 1
+ inout[3] += 0x10000000 & carry_mask
+ // carry < 2**3 thus (carry << 11) < 2**14 and we added 2**28 in the
+ // previous line therefore this doesn't underflow.
+ inout[3] -= carry << 11
+ inout[4] += (0x20000000 - 1) & carry_mask
+ inout[5] += (0x10000000 - 1) & carry_mask
+ inout[6] += (0x20000000 - 1) & carry_mask
+ inout[6] -= carry << 22
+ // This may underflow if carry is non-zero but, if so, we'll fix it in the
+ // next line.
+ inout[7] -= 1 & carry_mask
+ inout[7] += carry << 25
+}
+
+// p256Sum sets out = in+in2.
+//
+// On entry, in[i]+in2[i] must not overflow a 32-bit word.
+// On exit: out[0,2,...] < 2**30, out[1,3,...] < 2**29
+func p256Sum(out, in, in2 *[p256Limbs]uint32) {
+ carry := uint32(0)
+ for i := 0; ; i++ {
+ out[i] = in[i] + in2[i]
+ out[i] += carry
+ carry = out[i] >> 29
+ out[i] &= bottom29Bits
+
+ i++
+ if i == p256Limbs {
+ break
+ }
+
+ out[i] = in[i] + in2[i]
+ out[i] += carry
+ carry = out[i] >> 28
+ out[i] &= bottom28Bits
+ }
+
+ p256ReduceCarry(out, carry)
+}
+
+const (
+ two30m2 = 1<<30 - 1<<2
+ two30p13m2 = 1<<30 + 1<<13 - 1<<2
+ two31m2 = 1<<31 - 1<<2
+ two31m3 = 1<<31 - 1<<3
+ two31p24m2 = 1<<31 + 1<<24 - 1<<2
+ two30m27m2 = 1<<30 - 1<<27 - 1<<2
+)
+
+// p256Zero31 is 0 mod p.
+var p256Zero31 = [p256Limbs]uint32{two31m3, two30m2, two31m2, two30p13m2, two31m2, two30m2, two31p24m2, two30m27m2, two31m2}
+
+// p256Diff sets out = in-in2.
+//
+// On entry: in[0,2,...] < 2**30, in[1,3,...] < 2**29 and
+// in2[0,2,...] < 2**30, in2[1,3,...] < 2**29.
+// On exit: out[0,2,...] < 2**30, out[1,3,...] < 2**29.
+func p256Diff(out, in, in2 *[p256Limbs]uint32) {
+ var carry uint32
+
+ for i := 0; ; i++ {
+ out[i] = in[i] - in2[i]
+ out[i] += p256Zero31[i]
+ out[i] += carry
+ carry = out[i] >> 29
+ out[i] &= bottom29Bits
+
+ i++
+ if i == p256Limbs {
+ break
+ }
+
+ out[i] = in[i] - in2[i]
+ out[i] += p256Zero31[i]
+ out[i] += carry
+ carry = out[i] >> 28
+ out[i] &= bottom28Bits
+ }
+
+ p256ReduceCarry(out, carry)
+}
+
+// p256ReduceDegree sets out = tmp/R mod p where tmp contains 64-bit words with
+// the same 29,28,... bit positions as a field element.
+//
+// The values in field elements are in Montgomery form: x*R mod p where R =
+// 2**257. Since we just multiplied two Montgomery values together, the result
+// is x*y*R*R mod p. We wish to divide by R in order for the result also to be
+// in Montgomery form.
+//
+// On entry: tmp[i] < 2**64
+// On exit: out[0,2,...] < 2**30, out[1,3,...] < 2**29
+func p256ReduceDegree(out *[p256Limbs]uint32, tmp [17]uint64) {
+ // The following table may be helpful when reading this code:
+ //
+ // Limb number: 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10...
+ // Width (bits): 29| 28| 29| 28| 29| 28| 29| 28| 29| 28| 29
+ // Start bit: 0 | 29| 57| 86|114|143|171|200|228|257|285
+ // (odd phase): 0 | 28| 57| 85|114|142|171|199|228|256|285
+ var tmp2 [18]uint32
+ var carry, x, xMask uint32
+
+ // tmp contains 64-bit words with the same 29,28,29-bit positions as a
+ // field element. So the top of an element of tmp might overlap with
+ // another element two positions down. The following loop eliminates
+ // this overlap.
+ tmp2[0] = uint32(tmp[0]) & bottom29Bits
+
+ tmp2[1] = uint32(tmp[0]) >> 29
+ tmp2[1] |= (uint32(tmp[0]>>32) << 3) & bottom28Bits
+ tmp2[1] += uint32(tmp[1]) & bottom28Bits
+ carry = tmp2[1] >> 28
+ tmp2[1] &= bottom28Bits
+
+ for i := 2; i < 17; i++ {
+ tmp2[i] = (uint32(tmp[i-2] >> 32)) >> 25
+ tmp2[i] += (uint32(tmp[i-1])) >> 28
+ tmp2[i] += (uint32(tmp[i-1]>>32) << 4) & bottom29Bits
+ tmp2[i] += uint32(tmp[i]) & bottom29Bits
+ tmp2[i] += carry
+ carry = tmp2[i] >> 29
+ tmp2[i] &= bottom29Bits
+
+ i++
+ if i == 17 {
+ break
+ }
+ tmp2[i] = uint32(tmp[i-2]>>32) >> 25
+ tmp2[i] += uint32(tmp[i-1]) >> 29
+ tmp2[i] += ((uint32(tmp[i-1] >> 32)) << 3) & bottom28Bits
+ tmp2[i] += uint32(tmp[i]) & bottom28Bits
+ tmp2[i] += carry
+ carry = tmp2[i] >> 28
+ tmp2[i] &= bottom28Bits
+ }
+
+ tmp2[17] = uint32(tmp[15]>>32) >> 25
+ tmp2[17] += uint32(tmp[16]) >> 29
+ tmp2[17] += uint32(tmp[16]>>32) << 3
+ tmp2[17] += carry
+
+ // Montgomery elimination of terms:
+ //
+ // Since R is 2**257, we can divide by R with a bitwise shift if we can
+ // ensure that the right-most 257 bits are all zero. We can make that true
+ // by adding multiplies of p without affecting the value.
+ //
+ // So we eliminate limbs from right to left. Since the bottom 29 bits of p
+ // are all ones, then by adding tmp2[0]*p to tmp2 we'll make tmp2[0] == 0.
+ // We can do that for 8 further limbs and then right shift to eliminate the
+ // extra factor of R.
+ for i := 0; ; i += 2 {
+ tmp2[i+1] += tmp2[i] >> 29
+ x = tmp2[i] & bottom29Bits
+ xMask = nonZeroToAllOnes(x)
+ tmp2[i] = 0
+
+ // The bounds calculations for this loop are tricky. Each iteration of
+ // the loop eliminates two words by adding values to words to their
+ // right.
+ //
+ // The following table contains the amounts added to each word (as an
+ // offset from the value of i at the top of the loop). The amounts are
+ // accounted for from the first and second half of the loop separately
+ // and are written as, for example, 28 to mean a value <2**28.
+ //
+ // Word: 3 4 5 6 7 8 9 10
+ // Added in top half: 28 11 29 21 29 28
+ // 28 29
+ // 29
+ // Added in bottom half: 29 10 28 21 28 28
+ // 29
+ //
+ // The value that is currently offset 7 will be offset 5 for the next
+ // iteration and then offset 3 for the iteration after that. Therefore
+ // the total value added will be the values added at 7, 5 and 3.
+ //
+ // The following table accumulates these values. The sums at the bottom
+ // are written as, for example, 29+28, to mean a value < 2**29+2**28.
+ //
+ // Word: 3 4 5 6 7 8 9 10 11 12 13
+ // 28 11 10 29 21 29 28 28 28 28 28
+ // 29 28 11 28 29 28 29 28 29 28
+ // 29 28 21 21 29 21 29 21
+ // 10 29 28 21 28 21 28
+ // 28 29 28 29 28 29 28
+ // 11 10 29 10 29 10
+ // 29 28 11 28 11
+ // 29 29
+ // --------------------------------------------
+ // 30+ 31+ 30+ 31+ 30+
+ // 28+ 29+ 28+ 29+ 21+
+ // 21+ 28+ 21+ 28+ 10
+ // 10 21+ 10 21+
+ // 11 11
+ //
+ // So the greatest amount is added to tmp2[10] and tmp2[12]. If
+ // tmp2[10/12] has an initial value of <2**29, then the maximum value
+ // will be < 2**31 + 2**30 + 2**28 + 2**21 + 2**11, which is < 2**32,
+ // as required.
+ tmp2[i+3] += (x << 10) & bottom28Bits
+ tmp2[i+4] += (x >> 18)
+
+ tmp2[i+6] += (x << 21) & bottom29Bits
+ tmp2[i+7] += x >> 8
+
+ // At position 200, which is the starting bit position for word 7, we
+ // have a factor of 0xf000000 = 2**28 - 2**24.
+ tmp2[i+7] += 0x10000000 & xMask
+ tmp2[i+8] += (x - 1) & xMask
+ tmp2[i+7] -= (x << 24) & bottom28Bits
+ tmp2[i+8] -= x >> 4
+
+ tmp2[i+8] += 0x20000000 & xMask
+ tmp2[i+8] -= x
+ tmp2[i+8] += (x << 28) & bottom29Bits
+ tmp2[i+9] += ((x >> 1) - 1) & xMask
+
+ if i+1 == p256Limbs {
+ break
+ }
+ tmp2[i+2] += tmp2[i+1] >> 28
+ x = tmp2[i+1] & bottom28Bits
+ xMask = nonZeroToAllOnes(x)
+ tmp2[i+1] = 0
+
+ tmp2[i+4] += (x << 11) & bottom29Bits
+ tmp2[i+5] += (x >> 18)
+
+ tmp2[i+7] += (x << 21) & bottom28Bits
+ tmp2[i+8] += x >> 7
+
+ // At position 199, which is the starting bit of the 8th word when
+ // dealing with a context starting on an odd word, we have a factor of
+ // 0x1e000000 = 2**29 - 2**25. Since we have not updated i, the 8th
+ // word from i+1 is i+8.
+ tmp2[i+8] += 0x20000000 & xMask
+ tmp2[i+9] += (x - 1) & xMask
+ tmp2[i+8] -= (x << 25) & bottom29Bits
+ tmp2[i+9] -= x >> 4
+
+ tmp2[i+9] += 0x10000000 & xMask
+ tmp2[i+9] -= x
+ tmp2[i+10] += (x - 1) & xMask
+ }
+
+ // We merge the right shift with a carry chain. The words above 2**257 have
+ // widths of 28,29,... which we need to correct when copying them down.
+ carry = 0
+ for i := 0; i < 8; i++ {
+ // The maximum value of tmp2[i + 9] occurs on the first iteration and
+ // is < 2**30+2**29+2**28. Adding 2**29 (from tmp2[i + 10]) is
+ // therefore safe.
+ out[i] = tmp2[i+9]
+ out[i] += carry
+ out[i] += (tmp2[i+10] << 28) & bottom29Bits
+ carry = out[i] >> 29
+ out[i] &= bottom29Bits
+
+ i++
+ out[i] = tmp2[i+9] >> 1
+ out[i] += carry
+ carry = out[i] >> 28
+ out[i] &= bottom28Bits
+ }
+
+ out[8] = tmp2[17]
+ out[8] += carry
+ carry = out[8] >> 29
+ out[8] &= bottom29Bits
+
+ p256ReduceCarry(out, carry)
+}
+
+// p256Square sets out=in*in.
+//
+// On entry: in[0,2,...] < 2**30, in[1,3,...] < 2**29.
+// On exit: out[0,2,...] < 2**30, out[1,3,...] < 2**29.
+func p256Square(out, in *[p256Limbs]uint32) {
+ var tmp [17]uint64
+
+ tmp[0] = uint64(in[0]) * uint64(in[0])
+ tmp[1] = uint64(in[0]) * (uint64(in[1]) << 1)
+ tmp[2] = uint64(in[0])*(uint64(in[2])<<1) +
+ uint64(in[1])*(uint64(in[1])<<1)
+ tmp[3] = uint64(in[0])*(uint64(in[3])<<1) +
+ uint64(in[1])*(uint64(in[2])<<1)
+ tmp[4] = uint64(in[0])*(uint64(in[4])<<1) +
+ uint64(in[1])*(uint64(in[3])<<2) +
+ uint64(in[2])*uint64(in[2])
+ tmp[5] = uint64(in[0])*(uint64(in[5])<<1) +
+ uint64(in[1])*(uint64(in[4])<<1) +
+ uint64(in[2])*(uint64(in[3])<<1)
+ tmp[6] = uint64(in[0])*(uint64(in[6])<<1) +
+ uint64(in[1])*(uint64(in[5])<<2) +
+ uint64(in[2])*(uint64(in[4])<<1) +
+ uint64(in[3])*(uint64(in[3])<<1)
+ tmp[7] = uint64(in[0])*(uint64(in[7])<<1) +
+ uint64(in[1])*(uint64(in[6])<<1) +
+ uint64(in[2])*(uint64(in[5])<<1) +
+ uint64(in[3])*(uint64(in[4])<<1)
+ // tmp[8] has the greatest value of 2**61 + 2**60 + 2**61 + 2**60 + 2**60,
+ // which is < 2**64 as required.
+ tmp[8] = uint64(in[0])*(uint64(in[8])<<1) +
+ uint64(in[1])*(uint64(in[7])<<2) +
+ uint64(in[2])*(uint64(in[6])<<1) +
+ uint64(in[3])*(uint64(in[5])<<2) +
+ uint64(in[4])*uint64(in[4])
+ tmp[9] = uint64(in[1])*(uint64(in[8])<<1) +
+ uint64(in[2])*(uint64(in[7])<<1) +
+ uint64(in[3])*(uint64(in[6])<<1) +
+ uint64(in[4])*(uint64(in[5])<<1)
+ tmp[10] = uint64(in[2])*(uint64(in[8])<<1) +
+ uint64(in[3])*(uint64(in[7])<<2) +
+ uint64(in[4])*(uint64(in[6])<<1) +
+ uint64(in[5])*(uint64(in[5])<<1)
+ tmp[11] = uint64(in[3])*(uint64(in[8])<<1) +
+ uint64(in[4])*(uint64(in[7])<<1) +
+ uint64(in[5])*(uint64(in[6])<<1)
+ tmp[12] = uint64(in[4])*(uint64(in[8])<<1) +
+ uint64(in[5])*(uint64(in[7])<<2) +
+ uint64(in[6])*uint64(in[6])
+ tmp[13] = uint64(in[5])*(uint64(in[8])<<1) +
+ uint64(in[6])*(uint64(in[7])<<1)
+ tmp[14] = uint64(in[6])*(uint64(in[8])<<1) +
+ uint64(in[7])*(uint64(in[7])<<1)
+ tmp[15] = uint64(in[7]) * (uint64(in[8]) << 1)
+ tmp[16] = uint64(in[8]) * uint64(in[8])
+
+ p256ReduceDegree(out, tmp)
+}
+
+// p256Mul sets out=in*in2.
+//
+// On entry: in[0,2,...] < 2**30, in[1,3,...] < 2**29 and
+// in2[0,2,...] < 2**30, in2[1,3,...] < 2**29.
+// On exit: out[0,2,...] < 2**30, out[1,3,...] < 2**29.
+func p256Mul(out, in, in2 *[p256Limbs]uint32) {
+ var tmp [17]uint64
+
+ tmp[0] = uint64(in[0]) * uint64(in2[0])
+ tmp[1] = uint64(in[0])*(uint64(in2[1])<<0) +
+ uint64(in[1])*(uint64(in2[0])<<0)
+ tmp[2] = uint64(in[0])*(uint64(in2[2])<<0) +
+ uint64(in[1])*(uint64(in2[1])<<1) +
+ uint64(in[2])*(uint64(in2[0])<<0)
+ tmp[3] = uint64(in[0])*(uint64(in2[3])<<0) +
+ uint64(in[1])*(uint64(in2[2])<<0) +
+ uint64(in[2])*(uint64(in2[1])<<0) +
+ uint64(in[3])*(uint64(in2[0])<<0)
+ tmp[4] = uint64(in[0])*(uint64(in2[4])<<0) +
+ uint64(in[1])*(uint64(in2[3])<<1) +
+ uint64(in[2])*(uint64(in2[2])<<0) +
+ uint64(in[3])*(uint64(in2[1])<<1) +
+ uint64(in[4])*(uint64(in2[0])<<0)
+ tmp[5] = uint64(in[0])*(uint64(in2[5])<<0) +
+ uint64(in[1])*(uint64(in2[4])<<0) +
+ uint64(in[2])*(uint64(in2[3])<<0) +
+ uint64(in[3])*(uint64(in2[2])<<0) +
+ uint64(in[4])*(uint64(in2[1])<<0) +
+ uint64(in[5])*(uint64(in2[0])<<0)
+ tmp[6] = uint64(in[0])*(uint64(in2[6])<<0) +
+ uint64(in[1])*(uint64(in2[5])<<1) +
+ uint64(in[2])*(uint64(in2[4])<<0) +
+ uint64(in[3])*(uint64(in2[3])<<1) +
+ uint64(in[4])*(uint64(in2[2])<<0) +
+ uint64(in[5])*(uint64(in2[1])<<1) +
+ uint64(in[6])*(uint64(in2[0])<<0)
+ tmp[7] = uint64(in[0])*(uint64(in2[7])<<0) +
+ uint64(in[1])*(uint64(in2[6])<<0) +
+ uint64(in[2])*(uint64(in2[5])<<0) +
+ uint64(in[3])*(uint64(in2[4])<<0) +
+ uint64(in[4])*(uint64(in2[3])<<0) +
+ uint64(in[5])*(uint64(in2[2])<<0) +
+ uint64(in[6])*(uint64(in2[1])<<0) +
+ uint64(in[7])*(uint64(in2[0])<<0)
+ // tmp[8] has the greatest value but doesn't overflow. See logic in
+ // p256Square.
+ tmp[8] = uint64(in[0])*(uint64(in2[8])<<0) +
+ uint64(in[1])*(uint64(in2[7])<<1) +
+ uint64(in[2])*(uint64(in2[6])<<0) +
+ uint64(in[3])*(uint64(in2[5])<<1) +
+ uint64(in[4])*(uint64(in2[4])<<0) +
+ uint64(in[5])*(uint64(in2[3])<<1) +
+ uint64(in[6])*(uint64(in2[2])<<0) +
+ uint64(in[7])*(uint64(in2[1])<<1) +
+ uint64(in[8])*(uint64(in2[0])<<0)
+ tmp[9] = uint64(in[1])*(uint64(in2[8])<<0) +
+ uint64(in[2])*(uint64(in2[7])<<0) +
+ uint64(in[3])*(uint64(in2[6])<<0) +
+ uint64(in[4])*(uint64(in2[5])<<0) +
+ uint64(in[5])*(uint64(in2[4])<<0) +
+ uint64(in[6])*(uint64(in2[3])<<0) +
+ uint64(in[7])*(uint64(in2[2])<<0) +
+ uint64(in[8])*(uint64(in2[1])<<0)
+ tmp[10] = uint64(in[2])*(uint64(in2[8])<<0) +
+ uint64(in[3])*(uint64(in2[7])<<1) +
+ uint64(in[4])*(uint64(in2[6])<<0) +
+ uint64(in[5])*(uint64(in2[5])<<1) +
+ uint64(in[6])*(uint64(in2[4])<<0) +
+ uint64(in[7])*(uint64(in2[3])<<1) +
+ uint64(in[8])*(uint64(in2[2])<<0)
+ tmp[11] = uint64(in[3])*(uint64(in2[8])<<0) +
+ uint64(in[4])*(uint64(in2[7])<<0) +
+ uint64(in[5])*(uint64(in2[6])<<0) +
+ uint64(in[6])*(uint64(in2[5])<<0) +
+ uint64(in[7])*(uint64(in2[4])<<0) +
+ uint64(in[8])*(uint64(in2[3])<<0)
+ tmp[12] = uint64(in[4])*(uint64(in2[8])<<0) +
+ uint64(in[5])*(uint64(in2[7])<<1) +
+ uint64(in[6])*(uint64(in2[6])<<0) +
+ uint64(in[7])*(uint64(in2[5])<<1) +
+ uint64(in[8])*(uint64(in2[4])<<0)
+ tmp[13] = uint64(in[5])*(uint64(in2[8])<<0) +
+ uint64(in[6])*(uint64(in2[7])<<0) +
+ uint64(in[7])*(uint64(in2[6])<<0) +
+ uint64(in[8])*(uint64(in2[5])<<0)
+ tmp[14] = uint64(in[6])*(uint64(in2[8])<<0) +
+ uint64(in[7])*(uint64(in2[7])<<1) +
+ uint64(in[8])*(uint64(in2[6])<<0)
+ tmp[15] = uint64(in[7])*(uint64(in2[8])<<0) +
+ uint64(in[8])*(uint64(in2[7])<<0)
+ tmp[16] = uint64(in[8]) * (uint64(in2[8]) << 0)
+
+ p256ReduceDegree(out, tmp)
+}
+
+func p256Assign(out, in *[p256Limbs]uint32) {
+ *out = *in
+}
+
+// p256Invert calculates |out| = |in|^{-1}
+//
+// Based on Fermat's Little Theorem:
+// a^p = a (mod p)
+// a^{p-1} = 1 (mod p)
+// a^{p-2} = a^{-1} (mod p)
+func p256Invert(out, in *[p256Limbs]uint32) {
+ var ftmp, ftmp2 [p256Limbs]uint32
+
+ // each e_I will hold |in|^{2^I - 1}
+ var e2, e4, e8, e16, e32, e64 [p256Limbs]uint32
+
+ p256Square(&ftmp, in) // 2^1
+ p256Mul(&ftmp, in, &ftmp) // 2^2 - 2^0
+ p256Assign(&e2, &ftmp)
+ p256Square(&ftmp, &ftmp) // 2^3 - 2^1
+ p256Square(&ftmp, &ftmp) // 2^4 - 2^2
+ p256Mul(&ftmp, &ftmp, &e2) // 2^4 - 2^0
+ p256Assign(&e4, &ftmp)
+ p256Square(&ftmp, &ftmp) // 2^5 - 2^1
+ p256Square(&ftmp, &ftmp) // 2^6 - 2^2
+ p256Square(&ftmp, &ftmp) // 2^7 - 2^3
+ p256Square(&ftmp, &ftmp) // 2^8 - 2^4
+ p256Mul(&ftmp, &ftmp, &e4) // 2^8 - 2^0
+ p256Assign(&e8, &ftmp)
+ for i := 0; i < 8; i++ {
+ p256Square(&ftmp, &ftmp)
+ } // 2^16 - 2^8
+ p256Mul(&ftmp, &ftmp, &e8) // 2^16 - 2^0
+ p256Assign(&e16, &ftmp)
+ for i := 0; i < 16; i++ {
+ p256Square(&ftmp, &ftmp)
+ } // 2^32 - 2^16
+ p256Mul(&ftmp, &ftmp, &e16) // 2^32 - 2^0
+ p256Assign(&e32, &ftmp)
+ for i := 0; i < 32; i++ {
+ p256Square(&ftmp, &ftmp)
+ } // 2^64 - 2^32
+ p256Assign(&e64, &ftmp)
+ p256Mul(&ftmp, &ftmp, in) // 2^64 - 2^32 + 2^0
+ for i := 0; i < 192; i++ {
+ p256Square(&ftmp, &ftmp)
+ } // 2^256 - 2^224 + 2^192
+
+ p256Mul(&ftmp2, &e64, &e32) // 2^64 - 2^0
+ for i := 0; i < 16; i++ {
+ p256Square(&ftmp2, &ftmp2)
+ } // 2^80 - 2^16
+ p256Mul(&ftmp2, &ftmp2, &e16) // 2^80 - 2^0
+ for i := 0; i < 8; i++ {
+ p256Square(&ftmp2, &ftmp2)
+ } // 2^88 - 2^8
+ p256Mul(&ftmp2, &ftmp2, &e8) // 2^88 - 2^0
+ for i := 0; i < 4; i++ {
+ p256Square(&ftmp2, &ftmp2)
+ } // 2^92 - 2^4
+ p256Mul(&ftmp2, &ftmp2, &e4) // 2^92 - 2^0
+ p256Square(&ftmp2, &ftmp2) // 2^93 - 2^1
+ p256Square(&ftmp2, &ftmp2) // 2^94 - 2^2
+ p256Mul(&ftmp2, &ftmp2, &e2) // 2^94 - 2^0
+ p256Square(&ftmp2, &ftmp2) // 2^95 - 2^1
+ p256Square(&ftmp2, &ftmp2) // 2^96 - 2^2
+ p256Mul(&ftmp2, &ftmp2, in) // 2^96 - 3
+
+ p256Mul(out, &ftmp2, &ftmp) // 2^256 - 2^224 + 2^192 + 2^96 - 3
+}
+
+// p256Scalar3 sets out=3*out.
+//
+// On entry: out[0,2,...] < 2**30, out[1,3,...] < 2**29.
+// On exit: out[0,2,...] < 2**30, out[1,3,...] < 2**29.
+func p256Scalar3(out *[p256Limbs]uint32) {
+ var carry uint32
+
+ for i := 0; ; i++ {
+ out[i] *= 3
+ out[i] += carry
+ carry = out[i] >> 29
+ out[i] &= bottom29Bits
+
+ i++
+ if i == p256Limbs {
+ break
+ }
+
+ out[i] *= 3
+ out[i] += carry
+ carry = out[i] >> 28
+ out[i] &= bottom28Bits
+ }
+
+ p256ReduceCarry(out, carry)
+}
+
+// p256Scalar4 sets out=4*out.
+//
+// On entry: out[0,2,...] < 2**30, out[1,3,...] < 2**29.
+// On exit: out[0,2,...] < 2**30, out[1,3,...] < 2**29.
+func p256Scalar4(out *[p256Limbs]uint32) {
+ var carry, nextCarry uint32
+
+ for i := 0; ; i++ {
+ nextCarry = out[i] >> 27
+ out[i] <<= 2
+ out[i] &= bottom29Bits
+ out[i] += carry
+ carry = nextCarry + (out[i] >> 29)
+ out[i] &= bottom29Bits
+
+ i++
+ if i == p256Limbs {
+ break
+ }
+ nextCarry = out[i] >> 26
+ out[i] <<= 2
+ out[i] &= bottom28Bits
+ out[i] += carry
+ carry = nextCarry + (out[i] >> 28)
+ out[i] &= bottom28Bits
+ }
+
+ p256ReduceCarry(out, carry)
+}
+
+// p256Scalar8 sets out=8*out.
+//
+// On entry: out[0,2,...] < 2**30, out[1,3,...] < 2**29.
+// On exit: out[0,2,...] < 2**30, out[1,3,...] < 2**29.
+func p256Scalar8(out *[p256Limbs]uint32) {
+ var carry, nextCarry uint32
+
+ for i := 0; ; i++ {
+ nextCarry = out[i] >> 26
+ out[i] <<= 3
+ out[i] &= bottom29Bits
+ out[i] += carry
+ carry = nextCarry + (out[i] >> 29)
+ out[i] &= bottom29Bits
+
+ i++
+ if i == p256Limbs {
+ break
+ }
+ nextCarry = out[i] >> 25
+ out[i] <<= 3
+ out[i] &= bottom28Bits
+ out[i] += carry
+ carry = nextCarry + (out[i] >> 28)
+ out[i] &= bottom28Bits
+ }
+
+ p256ReduceCarry(out, carry)
+}
+
+// Group operations:
+//
+// Elements of the elliptic curve group are represented in Jacobian
+// coordinates: (x, y, z). An affine point (x', y') is x'=x/z**2, y'=y/z**3 in
+// Jacobian form.
+
+// p256PointDouble sets {xOut,yOut,zOut} = 2*{x,y,z}.
+//
+// See https://www.hyperelliptic.org/EFD/g1p/auto-shortw-jacobian-0.html#doubling-dbl-2009-l
+func p256PointDouble(xOut, yOut, zOut, x, y, z *[p256Limbs]uint32) {
+ var delta, gamma, alpha, beta, tmp, tmp2 [p256Limbs]uint32
+
+ p256Square(&delta, z)
+ p256Square(&gamma, y)
+ p256Mul(&beta, x, &gamma)
+
+ p256Sum(&tmp, x, &delta)
+ p256Diff(&tmp2, x, &delta)
+ p256Mul(&alpha, &tmp, &tmp2)
+ p256Scalar3(&alpha)
+
+ p256Sum(&tmp, y, z)
+ p256Square(&tmp, &tmp)
+ p256Diff(&tmp, &tmp, &gamma)
+ p256Diff(zOut, &tmp, &delta)
+
+ p256Scalar4(&beta)
+ p256Square(xOut, &alpha)
+ p256Diff(xOut, xOut, &beta)
+ p256Diff(xOut, xOut, &beta)
+
+ p256Diff(&tmp, &beta, xOut)
+ p256Mul(&tmp, &alpha, &tmp)
+ p256Square(&tmp2, &gamma)
+ p256Scalar8(&tmp2)
+ p256Diff(yOut, &tmp, &tmp2)
+}
+
+// p256PointAddMixed sets {xOut,yOut,zOut} = {x1,y1,z1} + {x2,y2,1}.
+// (i.e. the second point is affine.)
+//
+// See https://www.hyperelliptic.org/EFD/g1p/auto-shortw-jacobian-0.html#addition-add-2007-bl
+//
+// Note that this function does not handle P+P, infinity+P nor P+infinity
+// correctly.
+func p256PointAddMixed(xOut, yOut, zOut, x1, y1, z1, x2, y2 *[p256Limbs]uint32) {
+ var z1z1, z1z1z1, s2, u2, h, i, j, r, rr, v, tmp [p256Limbs]uint32
+
+ p256Square(&z1z1, z1)
+ p256Sum(&tmp, z1, z1)
+
+ p256Mul(&u2, x2, &z1z1)
+ p256Mul(&z1z1z1, z1, &z1z1)
+ p256Mul(&s2, y2, &z1z1z1)
+ p256Diff(&h, &u2, x1)
+ p256Sum(&i, &h, &h)
+ p256Square(&i, &i)
+ p256Mul(&j, &h, &i)
+ p256Diff(&r, &s2, y1)
+ p256Sum(&r, &r, &r)
+ p256Mul(&v, x1, &i)
+
+ p256Mul(zOut, &tmp, &h)
+ p256Square(&rr, &r)
+ p256Diff(xOut, &rr, &j)
+ p256Diff(xOut, xOut, &v)
+ p256Diff(xOut, xOut, &v)
+
+ p256Diff(&tmp, &v, xOut)
+ p256Mul(yOut, &tmp, &r)
+ p256Mul(&tmp, y1, &j)
+ p256Diff(yOut, yOut, &tmp)
+ p256Diff(yOut, yOut, &tmp)
+}
+
+// p256PointAdd sets {xOut,yOut,zOut} = {x1,y1,z1} + {x2,y2,z2}.
+//
+// See https://www.hyperelliptic.org/EFD/g1p/auto-shortw-jacobian-0.html#addition-add-2007-bl
+//
+// Note that this function does not handle P+P, infinity+P nor P+infinity
+// correctly.
+func p256PointAdd(xOut, yOut, zOut, x1, y1, z1, x2, y2, z2 *[p256Limbs]uint32) {
+ var z1z1, z1z1z1, z2z2, z2z2z2, s1, s2, u1, u2, h, i, j, r, rr, v, tmp [p256Limbs]uint32
+
+ p256Square(&z1z1, z1)
+ p256Square(&z2z2, z2)
+ p256Mul(&u1, x1, &z2z2)
+
+ p256Sum(&tmp, z1, z2)
+ p256Square(&tmp, &tmp)
+ p256Diff(&tmp, &tmp, &z1z1)
+ p256Diff(&tmp, &tmp, &z2z2)
+
+ p256Mul(&z2z2z2, z2, &z2z2)
+ p256Mul(&s1, y1, &z2z2z2)
+
+ p256Mul(&u2, x2, &z1z1)
+ p256Mul(&z1z1z1, z1, &z1z1)
+ p256Mul(&s2, y2, &z1z1z1)
+ p256Diff(&h, &u2, &u1)
+ p256Sum(&i, &h, &h)
+ p256Square(&i, &i)
+ p256Mul(&j, &h, &i)
+ p256Diff(&r, &s2, &s1)
+ p256Sum(&r, &r, &r)
+ p256Mul(&v, &u1, &i)
+
+ p256Mul(zOut, &tmp, &h)
+ p256Square(&rr, &r)
+ p256Diff(xOut, &rr, &j)
+ p256Diff(xOut, xOut, &v)
+ p256Diff(xOut, xOut, &v)
+
+ p256Diff(&tmp, &v, xOut)
+ p256Mul(yOut, &tmp, &r)
+ p256Mul(&tmp, &s1, &j)
+ p256Diff(yOut, yOut, &tmp)
+ p256Diff(yOut, yOut, &tmp)
+}
+
+// p256CopyConditional sets out=in if mask = 0xffffffff in constant time.
+//
+// On entry: mask is either 0 or 0xffffffff.
+func p256CopyConditional(out, in *[p256Limbs]uint32, mask uint32) {
+ for i := 0; i < p256Limbs; i++ {
+ tmp := mask & (in[i] ^ out[i])
+ out[i] ^= tmp
+ }
+}
+
+// p256SelectAffinePoint sets {out_x,out_y} to the index'th entry of table.
+// On entry: index < 16, table[0] must be zero.
+func p256SelectAffinePoint(xOut, yOut *[p256Limbs]uint32, table []uint32, index uint32) {
+ for i := range xOut {
+ xOut[i] = 0
+ }
+ for i := range yOut {
+ yOut[i] = 0
+ }
+
+ for i := uint32(1); i < 16; i++ {
+ mask := i ^ index
+ mask |= mask >> 2
+ mask |= mask >> 1
+ mask &= 1
+ mask--
+ for j := range xOut {
+ xOut[j] |= table[0] & mask
+ table = table[1:]
+ }
+ for j := range yOut {
+ yOut[j] |= table[0] & mask
+ table = table[1:]
+ }
+ }
+}
+
+// p256SelectJacobianPoint sets {out_x,out_y,out_z} to the index'th entry of
+// table.
+// On entry: index < 16, table[0] must be zero.
+func p256SelectJacobianPoint(xOut, yOut, zOut *[p256Limbs]uint32, table *[16][3][p256Limbs]uint32, index uint32) {
+ for i := range xOut {
+ xOut[i] = 0
+ }
+ for i := range yOut {
+ yOut[i] = 0
+ }
+ for i := range zOut {
+ zOut[i] = 0
+ }
+
+ // The implicit value at index 0 is all zero. We don't need to perform that
+ // iteration of the loop because we already set out_* to zero.
+ for i := uint32(1); i < 16; i++ {
+ mask := i ^ index
+ mask |= mask >> 2
+ mask |= mask >> 1
+ mask &= 1
+ mask--
+ for j := range xOut {
+ xOut[j] |= table[i][0][j] & mask
+ }
+ for j := range yOut {
+ yOut[j] |= table[i][1][j] & mask
+ }
+ for j := range zOut {
+ zOut[j] |= table[i][2][j] & mask
+ }
+ }
+}
+
+// p256GetBit returns the bit'th bit of scalar.
+func p256GetBit(scalar *[32]uint8, bit uint) uint32 {
+ return uint32(((scalar[bit>>3]) >> (bit & 7)) & 1)
+}
+
+// p256ScalarBaseMult sets {xOut,yOut,zOut} = scalar*G where scalar is a
+// little-endian number. Note that the value of scalar must be less than the
+// order of the group.
+func p256ScalarBaseMult(xOut, yOut, zOut *[p256Limbs]uint32, scalar *[32]uint8) {
+ nIsInfinityMask := ^uint32(0)
+ var pIsNoninfiniteMask, mask, tableOffset uint32
+ var px, py, tx, ty, tz [p256Limbs]uint32
+
+ for i := range xOut {
+ xOut[i] = 0
+ }
+ for i := range yOut {
+ yOut[i] = 0
+ }
+ for i := range zOut {
+ zOut[i] = 0
+ }
+
+ // The loop adds bits at positions 0, 64, 128 and 192, followed by
+ // positions 32,96,160 and 224 and does this 32 times.
+ for i := uint(0); i < 32; i++ {
+ if i != 0 {
+ p256PointDouble(xOut, yOut, zOut, xOut, yOut, zOut)
+ }
+ tableOffset = 0
+ for j := uint(0); j <= 32; j += 32 {
+ bit0 := p256GetBit(scalar, 31-i+j)
+ bit1 := p256GetBit(scalar, 95-i+j)
+ bit2 := p256GetBit(scalar, 159-i+j)
+ bit3 := p256GetBit(scalar, 223-i+j)
+ index := bit0 | (bit1 << 1) | (bit2 << 2) | (bit3 << 3)
+
+ p256SelectAffinePoint(&px, &py, p256Precomputed[tableOffset:], index)
+ tableOffset += 30 * p256Limbs
+
+ // Since scalar is less than the order of the group, we know that
+ // {xOut,yOut,zOut} != {px,py,1}, unless both are zero, which we handle
+ // below.
+ p256PointAddMixed(&tx, &ty, &tz, xOut, yOut, zOut, &px, &py)
+ // The result of pointAddMixed is incorrect if {xOut,yOut,zOut} is zero
+ // (a.k.a. the point at infinity). We handle that situation by
+ // copying the point from the table.
+ p256CopyConditional(xOut, &px, nIsInfinityMask)
+ p256CopyConditional(yOut, &py, nIsInfinityMask)
+ p256CopyConditional(zOut, &p256One, nIsInfinityMask)
+
+ // Equally, the result is also wrong if the point from the table is
+ // zero, which happens when the index is zero. We handle that by
+ // only copying from {tx,ty,tz} to {xOut,yOut,zOut} if index != 0.
+ pIsNoninfiniteMask = nonZeroToAllOnes(index)
+ mask = pIsNoninfiniteMask & ^nIsInfinityMask
+ p256CopyConditional(xOut, &tx, mask)
+ p256CopyConditional(yOut, &ty, mask)
+ p256CopyConditional(zOut, &tz, mask)
+ // If p was not zero, then n is now non-zero.
+ nIsInfinityMask &^= pIsNoninfiniteMask
+ }
+ }
+}
+
+// p256PointToAffine converts a Jacobian point to an affine point. If the input
+// is the point at infinity then it returns (0, 0) in constant time.
+func p256PointToAffine(xOut, yOut, x, y, z *[p256Limbs]uint32) {
+ var zInv, zInvSq [p256Limbs]uint32
+
+ p256Invert(&zInv, z)
+ p256Square(&zInvSq, &zInv)
+ p256Mul(xOut, x, &zInvSq)
+ p256Mul(&zInv, &zInv, &zInvSq)
+ p256Mul(yOut, y, &zInv)
+}
+
+// p256ToAffine returns a pair of *big.Int containing the affine representation
+// of {x,y,z}.
+func p256ToAffine(x, y, z *[p256Limbs]uint32) (xOut, yOut *big.Int) {
+ var xx, yy [p256Limbs]uint32
+ p256PointToAffine(&xx, &yy, x, y, z)
+ return p256ToBig(&xx), p256ToBig(&yy)
+}
+
+// p256ScalarMult sets {xOut,yOut,zOut} = scalar*{x,y}.
+func p256ScalarMult(xOut, yOut, zOut, x, y *[p256Limbs]uint32, scalar *[32]uint8) {
+ var px, py, pz, tx, ty, tz [p256Limbs]uint32
+ var precomp [16][3][p256Limbs]uint32
+ var nIsInfinityMask, index, pIsNoninfiniteMask, mask uint32
+
+ // We precompute 0,1,2,... times {x,y}.
+ precomp[1][0] = *x
+ precomp[1][1] = *y
+ precomp[1][2] = p256One
+
+ for i := 2; i < 16; i += 2 {
+ p256PointDouble(&precomp[i][0], &precomp[i][1], &precomp[i][2], &precomp[i/2][0], &precomp[i/2][1], &precomp[i/2][2])
+ p256PointAddMixed(&precomp[i+1][0], &precomp[i+1][1], &precomp[i+1][2], &precomp[i][0], &precomp[i][1], &precomp[i][2], x, y)
+ }
+
+ for i := range xOut {
+ xOut[i] = 0
+ }
+ for i := range yOut {
+ yOut[i] = 0
+ }
+ for i := range zOut {
+ zOut[i] = 0
+ }
+ nIsInfinityMask = ^uint32(0)
+
+ // We add in a window of four bits each iteration and do this 64 times.
+ for i := 0; i < 64; i++ {
+ if i != 0 {
+ p256PointDouble(xOut, yOut, zOut, xOut, yOut, zOut)
+ p256PointDouble(xOut, yOut, zOut, xOut, yOut, zOut)
+ p256PointDouble(xOut, yOut, zOut, xOut, yOut, zOut)
+ p256PointDouble(xOut, yOut, zOut, xOut, yOut, zOut)
+ }
+
+ index = uint32(scalar[31-i/2])
+ if (i & 1) == 1 {
+ index &= 15
+ } else {
+ index >>= 4
+ }
+
+ // See the comments in scalarBaseMult about handling infinities.
+ p256SelectJacobianPoint(&px, &py, &pz, &precomp, index)
+ p256PointAdd(&tx, &ty, &tz, xOut, yOut, zOut, &px, &py, &pz)
+ p256CopyConditional(xOut, &px, nIsInfinityMask)
+ p256CopyConditional(yOut, &py, nIsInfinityMask)
+ p256CopyConditional(zOut, &pz, nIsInfinityMask)
+
+ pIsNoninfiniteMask = nonZeroToAllOnes(index)
+ mask = pIsNoninfiniteMask & ^nIsInfinityMask
+ p256CopyConditional(xOut, &tx, mask)
+ p256CopyConditional(yOut, &ty, mask)
+ p256CopyConditional(zOut, &tz, mask)
+ nIsInfinityMask &^= pIsNoninfiniteMask
+ }
+}
+
+// p256FromBig sets out = R*in.
+func p256FromBig(out *[p256Limbs]uint32, in *big.Int) {
+ tmp := new(big.Int).Lsh(in, 257)
+ tmp.Mod(tmp, p256Params.P)
+
+ for i := 0; i < p256Limbs; i++ {
+ if bits := tmp.Bits(); len(bits) > 0 {
+ out[i] = uint32(bits[0]) & bottom29Bits
+ } else {
+ out[i] = 0
+ }
+ tmp.Rsh(tmp, 29)
+
+ i++
+ if i == p256Limbs {
+ break
+ }
+
+ if bits := tmp.Bits(); len(bits) > 0 {
+ out[i] = uint32(bits[0]) & bottom28Bits
+ } else {
+ out[i] = 0
+ }
+ tmp.Rsh(tmp, 28)
+ }
+}
+
+// p256ToBig returns a *big.Int containing the value of in.
+func p256ToBig(in *[p256Limbs]uint32) *big.Int {
+ result, tmp := new(big.Int), new(big.Int)
+
+ result.SetInt64(int64(in[p256Limbs-1]))
+ for i := p256Limbs - 2; i >= 0; i-- {
+ if (i & 1) == 0 {
+ result.Lsh(result, 29)
+ } else {
+ result.Lsh(result, 28)
+ }
+ tmp.SetInt64(int64(in[i]))
+ result.Add(result, tmp)
+ }
+
+ result.Mul(result, p256RInverse)
+ result.Mod(result, p256Params.P)
+ return result
+}
diff --git a/src/crypto/elliptic/p256_asm.go b/src/crypto/elliptic/p256_asm.go
new file mode 100644
index 0000000..8624e03
--- /dev/null
+++ b/src/crypto/elliptic/p256_asm.go
@@ -0,0 +1,544 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file contains the Go wrapper for the constant-time, 64-bit assembly
+// implementation of P256. The optimizations performed here are described in
+// detail in:
+// S.Gueron and V.Krasnov, "Fast prime field elliptic-curve cryptography with
+// 256-bit primes"
+// https://link.springer.com/article/10.1007%2Fs13389-014-0090-x
+// https://eprint.iacr.org/2013/816.pdf
+
+//go:build amd64 || arm64
+
+package elliptic
+
+import (
+ _ "embed"
+ "math/big"
+)
+
+//go:generate go run -tags=tablegen gen_p256_table.go
+
+//go:embed p256_asm_table.bin
+var p256Precomputed string
+
+type (
+ p256Curve struct {
+ *CurveParams
+ }
+
+ p256Point struct {
+ xyz [12]uint64
+ }
+)
+
+var p256 p256Curve
+
+func initP256() {
+ // See FIPS 186-3, section D.2.3
+ p256.CurveParams = &CurveParams{Name: "P-256"}
+ p256.P, _ = new(big.Int).SetString("115792089210356248762697446949407573530086143415290314195533631308867097853951", 10)
+ p256.N, _ = new(big.Int).SetString("115792089210356248762697446949407573529996955224135760342422259061068512044369", 10)
+ p256.B, _ = new(big.Int).SetString("5ac635d8aa3a93e7b3ebbd55769886bc651d06b0cc53b0f63bce3c3e27d2604b", 16)
+ p256.Gx, _ = new(big.Int).SetString("6b17d1f2e12c4247f8bce6e563a440f277037d812deb33a0f4a13945d898c296", 16)
+ p256.Gy, _ = new(big.Int).SetString("4fe342e2fe1a7f9b8ee7eb4a7c0f9e162bce33576b315ececbb6406837bf51f5", 16)
+ p256.BitSize = 256
+}
+
+func (curve p256Curve) Params() *CurveParams {
+ return curve.CurveParams
+}
+
+// Functions implemented in p256_asm_*64.s
+// Montgomery multiplication modulo P256
+//go:noescape
+func p256Mul(res, in1, in2 []uint64)
+
+// Montgomery square modulo P256, repeated n times (n >= 1)
+//go:noescape
+func p256Sqr(res, in []uint64, n int)
+
+// Montgomery multiplication by 1
+//go:noescape
+func p256FromMont(res, in []uint64)
+
+// iff cond == 1 val <- -val
+//go:noescape
+func p256NegCond(val []uint64, cond int)
+
+// if cond == 0 res <- b; else res <- a
+//go:noescape
+func p256MovCond(res, a, b []uint64, cond int)
+
+// Endianness swap
+//go:noescape
+func p256BigToLittle(res []uint64, in []byte)
+
+//go:noescape
+func p256LittleToBig(res []byte, in []uint64)
+
+// Constant time table access
+//go:noescape
+func p256Select(point, table []uint64, idx int)
+
+//go:noescape
+func p256SelectBase(point *[12]uint64, table string, idx int)
+
+// Montgomery multiplication modulo Ord(G)
+//go:noescape
+func p256OrdMul(res, in1, in2 []uint64)
+
+// Montgomery square modulo Ord(G), repeated n times
+//go:noescape
+func p256OrdSqr(res, in []uint64, n int)
+
+// Point add with in2 being affine point
+// If sign == 1 -> in2 = -in2
+// If sel == 0 -> res = in1
+// if zero == 0 -> res = in2
+//go:noescape
+func p256PointAddAffineAsm(res, in1, in2 []uint64, sign, sel, zero int)
+
+// Point add. Returns one if the two input points were equal and zero
+// otherwise. (Note that, due to the way that the equations work out, some
+// representations of ∞ are considered equal to everything by this function.)
+//go:noescape
+func p256PointAddAsm(res, in1, in2 []uint64) int
+
+// Point double
+//go:noescape
+func p256PointDoubleAsm(res, in []uint64)
+
+func (curve p256Curve) Inverse(k *big.Int) *big.Int {
+ if k.Sign() < 0 {
+ // This should never happen.
+ k = new(big.Int).Neg(k)
+ }
+
+ if k.Cmp(p256.N) >= 0 {
+ // This should never happen.
+ k = new(big.Int).Mod(k, p256.N)
+ }
+
+ // table will store precomputed powers of x.
+ var table [4 * 9]uint64
+ var (
+ _1 = table[4*0 : 4*1]
+ _11 = table[4*1 : 4*2]
+ _101 = table[4*2 : 4*3]
+ _111 = table[4*3 : 4*4]
+ _1111 = table[4*4 : 4*5]
+ _10101 = table[4*5 : 4*6]
+ _101111 = table[4*6 : 4*7]
+ x = table[4*7 : 4*8]
+ t = table[4*8 : 4*9]
+ )
+
+ fromBig(x[:], k)
+ // This code operates in the Montgomery domain where R = 2^256 mod n
+ // and n is the order of the scalar field. (See initP256 for the
+ // value.) Elements in the Montgomery domain take the form a×R and
+ // multiplication of x and y in the calculates (x × y × R^-1) mod n. RR
+ // is R×R mod n thus the Montgomery multiplication x and RR gives x×R,
+ // i.e. converts x into the Montgomery domain.
+ // Window values borrowed from https://briansmith.org/ecc-inversion-addition-chains-01#p256_scalar_inversion
+ RR := []uint64{0x83244c95be79eea2, 0x4699799c49bd6fa6, 0x2845b2392b6bec59, 0x66e12d94f3d95620}
+ p256OrdMul(_1, x, RR) // _1
+ p256OrdSqr(x, _1, 1) // _10
+ p256OrdMul(_11, x, _1) // _11
+ p256OrdMul(_101, x, _11) // _101
+ p256OrdMul(_111, x, _101) // _111
+ p256OrdSqr(x, _101, 1) // _1010
+ p256OrdMul(_1111, _101, x) // _1111
+
+ p256OrdSqr(t, x, 1) // _10100
+ p256OrdMul(_10101, t, _1) // _10101
+ p256OrdSqr(x, _10101, 1) // _101010
+ p256OrdMul(_101111, _101, x) // _101111
+ p256OrdMul(x, _10101, x) // _111111 = x6
+ p256OrdSqr(t, x, 2) // _11111100
+ p256OrdMul(t, t, _11) // _11111111 = x8
+ p256OrdSqr(x, t, 8) // _ff00
+ p256OrdMul(x, x, t) // _ffff = x16
+ p256OrdSqr(t, x, 16) // _ffff0000
+ p256OrdMul(t, t, x) // _ffffffff = x32
+
+ p256OrdSqr(x, t, 64)
+ p256OrdMul(x, x, t)
+ p256OrdSqr(x, x, 32)
+ p256OrdMul(x, x, t)
+
+ sqrs := []uint8{
+ 6, 5, 4, 5, 5,
+ 4, 3, 3, 5, 9,
+ 6, 2, 5, 6, 5,
+ 4, 5, 5, 3, 10,
+ 2, 5, 5, 3, 7, 6}
+ muls := [][]uint64{
+ _101111, _111, _11, _1111, _10101,
+ _101, _101, _101, _111, _101111,
+ _1111, _1, _1, _1111, _111,
+ _111, _111, _101, _11, _101111,
+ _11, _11, _11, _1, _10101, _1111}
+
+ for i, s := range sqrs {
+ p256OrdSqr(x, x, int(s))
+ p256OrdMul(x, x, muls[i])
+ }
+
+ // Multiplying by one in the Montgomery domain converts a Montgomery
+ // value out of the domain.
+ one := []uint64{1, 0, 0, 0}
+ p256OrdMul(x, x, one)
+
+ xOut := make([]byte, 32)
+ p256LittleToBig(xOut, x)
+ return new(big.Int).SetBytes(xOut)
+}
+
+// fromBig converts a *big.Int into a format used by this code.
+func fromBig(out []uint64, big *big.Int) {
+ for i := range out {
+ out[i] = 0
+ }
+
+ for i, v := range big.Bits() {
+ out[i] = uint64(v)
+ }
+}
+
+// p256GetScalar endian-swaps the big-endian scalar value from in and writes it
+// to out. If the scalar is equal or greater than the order of the group, it's
+// reduced modulo that order.
+func p256GetScalar(out []uint64, in []byte) {
+ n := new(big.Int).SetBytes(in)
+
+ if n.Cmp(p256.N) >= 0 {
+ n.Mod(n, p256.N)
+ }
+ fromBig(out, n)
+}
+
+// p256Mul operates in a Montgomery domain with R = 2^256 mod p, where p is the
+// underlying field of the curve. (See initP256 for the value.) Thus rr here is
+// R×R mod p. See comment in Inverse about how this is used.
+var rr = []uint64{0x0000000000000003, 0xfffffffbffffffff, 0xfffffffffffffffe, 0x00000004fffffffd}
+
+func maybeReduceModP(in *big.Int) *big.Int {
+ if in.Cmp(p256.P) < 0 {
+ return in
+ }
+ return new(big.Int).Mod(in, p256.P)
+}
+
+func (curve p256Curve) CombinedMult(bigX, bigY *big.Int, baseScalar, scalar []byte) (x, y *big.Int) {
+ scalarReversed := make([]uint64, 4)
+ var r1, r2 p256Point
+ p256GetScalar(scalarReversed, baseScalar)
+ r1IsInfinity := scalarIsZero(scalarReversed)
+ r1.p256BaseMult(scalarReversed)
+
+ p256GetScalar(scalarReversed, scalar)
+ r2IsInfinity := scalarIsZero(scalarReversed)
+ fromBig(r2.xyz[0:4], maybeReduceModP(bigX))
+ fromBig(r2.xyz[4:8], maybeReduceModP(bigY))
+ p256Mul(r2.xyz[0:4], r2.xyz[0:4], rr[:])
+ p256Mul(r2.xyz[4:8], r2.xyz[4:8], rr[:])
+
+ // This sets r2's Z value to 1, in the Montgomery domain.
+ r2.xyz[8] = 0x0000000000000001
+ r2.xyz[9] = 0xffffffff00000000
+ r2.xyz[10] = 0xffffffffffffffff
+ r2.xyz[11] = 0x00000000fffffffe
+
+ r2.p256ScalarMult(scalarReversed)
+
+ var sum, double p256Point
+ pointsEqual := p256PointAddAsm(sum.xyz[:], r1.xyz[:], r2.xyz[:])
+ p256PointDoubleAsm(double.xyz[:], r1.xyz[:])
+ sum.CopyConditional(&double, pointsEqual)
+ sum.CopyConditional(&r1, r2IsInfinity)
+ sum.CopyConditional(&r2, r1IsInfinity)
+
+ return sum.p256PointToAffine()
+}
+
+func (curve p256Curve) ScalarBaseMult(scalar []byte) (x, y *big.Int) {
+ scalarReversed := make([]uint64, 4)
+ p256GetScalar(scalarReversed, scalar)
+
+ var r p256Point
+ r.p256BaseMult(scalarReversed)
+ return r.p256PointToAffine()
+}
+
+func (curve p256Curve) ScalarMult(bigX, bigY *big.Int, scalar []byte) (x, y *big.Int) {
+ scalarReversed := make([]uint64, 4)
+ p256GetScalar(scalarReversed, scalar)
+
+ var r p256Point
+ fromBig(r.xyz[0:4], maybeReduceModP(bigX))
+ fromBig(r.xyz[4:8], maybeReduceModP(bigY))
+ p256Mul(r.xyz[0:4], r.xyz[0:4], rr[:])
+ p256Mul(r.xyz[4:8], r.xyz[4:8], rr[:])
+ // This sets r2's Z value to 1, in the Montgomery domain.
+ r.xyz[8] = 0x0000000000000001
+ r.xyz[9] = 0xffffffff00000000
+ r.xyz[10] = 0xffffffffffffffff
+ r.xyz[11] = 0x00000000fffffffe
+
+ r.p256ScalarMult(scalarReversed)
+ return r.p256PointToAffine()
+}
+
+// uint64IsZero returns 1 if x is zero and zero otherwise.
+func uint64IsZero(x uint64) int {
+ x = ^x
+ x &= x >> 32
+ x &= x >> 16
+ x &= x >> 8
+ x &= x >> 4
+ x &= x >> 2
+ x &= x >> 1
+ return int(x & 1)
+}
+
+// scalarIsZero returns 1 if scalar represents the zero value, and zero
+// otherwise.
+func scalarIsZero(scalar []uint64) int {
+ return uint64IsZero(scalar[0] | scalar[1] | scalar[2] | scalar[3])
+}
+
+func (p *p256Point) p256PointToAffine() (x, y *big.Int) {
+ zInv := make([]uint64, 4)
+ zInvSq := make([]uint64, 4)
+ p256Inverse(zInv, p.xyz[8:12])
+ p256Sqr(zInvSq, zInv, 1)
+ p256Mul(zInv, zInv, zInvSq)
+
+ p256Mul(zInvSq, p.xyz[0:4], zInvSq)
+ p256Mul(zInv, p.xyz[4:8], zInv)
+
+ p256FromMont(zInvSq, zInvSq)
+ p256FromMont(zInv, zInv)
+
+ xOut := make([]byte, 32)
+ yOut := make([]byte, 32)
+ p256LittleToBig(xOut, zInvSq)
+ p256LittleToBig(yOut, zInv)
+
+ return new(big.Int).SetBytes(xOut), new(big.Int).SetBytes(yOut)
+}
+
+// CopyConditional copies overwrites p with src if v == 1, and leaves p
+// unchanged if v == 0.
+func (p *p256Point) CopyConditional(src *p256Point, v int) {
+ pMask := uint64(v) - 1
+ srcMask := ^pMask
+
+ for i, n := range p.xyz {
+ p.xyz[i] = (n & pMask) | (src.xyz[i] & srcMask)
+ }
+}
+
+// p256Inverse sets out to in^-1 mod p.
+func p256Inverse(out, in []uint64) {
+ var stack [6 * 4]uint64
+ p2 := stack[4*0 : 4*0+4]
+ p4 := stack[4*1 : 4*1+4]
+ p8 := stack[4*2 : 4*2+4]
+ p16 := stack[4*3 : 4*3+4]
+ p32 := stack[4*4 : 4*4+4]
+
+ p256Sqr(out, in, 1)
+ p256Mul(p2, out, in) // 3*p
+
+ p256Sqr(out, p2, 2)
+ p256Mul(p4, out, p2) // f*p
+
+ p256Sqr(out, p4, 4)
+ p256Mul(p8, out, p4) // ff*p
+
+ p256Sqr(out, p8, 8)
+ p256Mul(p16, out, p8) // ffff*p
+
+ p256Sqr(out, p16, 16)
+ p256Mul(p32, out, p16) // ffffffff*p
+
+ p256Sqr(out, p32, 32)
+ p256Mul(out, out, in)
+
+ p256Sqr(out, out, 128)
+ p256Mul(out, out, p32)
+
+ p256Sqr(out, out, 32)
+ p256Mul(out, out, p32)
+
+ p256Sqr(out, out, 16)
+ p256Mul(out, out, p16)
+
+ p256Sqr(out, out, 8)
+ p256Mul(out, out, p8)
+
+ p256Sqr(out, out, 4)
+ p256Mul(out, out, p4)
+
+ p256Sqr(out, out, 2)
+ p256Mul(out, out, p2)
+
+ p256Sqr(out, out, 2)
+ p256Mul(out, out, in)
+}
+
+func (p *p256Point) p256StorePoint(r *[16 * 4 * 3]uint64, index int) {
+ copy(r[index*12:], p.xyz[:])
+}
+
+func boothW5(in uint) (int, int) {
+ var s uint = ^((in >> 5) - 1)
+ var d uint = (1 << 6) - in - 1
+ d = (d & s) | (in & (^s))
+ d = (d >> 1) + (d & 1)
+ return int(d), int(s & 1)
+}
+
+func boothW6(in uint) (int, int) {
+ var s uint = ^((in >> 6) - 1)
+ var d uint = (1 << 7) - in - 1
+ d = (d & s) | (in & (^s))
+ d = (d >> 1) + (d & 1)
+ return int(d), int(s & 1)
+}
+
+func (p *p256Point) p256BaseMult(scalar []uint64) {
+ wvalue := (scalar[0] << 1) & 0x7f
+ sel, sign := boothW6(uint(wvalue))
+ p256SelectBase(&p.xyz, p256Precomputed, sel)
+ p256NegCond(p.xyz[4:8], sign)
+
+ // (This is one, in the Montgomery domain.)
+ p.xyz[8] = 0x0000000000000001
+ p.xyz[9] = 0xffffffff00000000
+ p.xyz[10] = 0xffffffffffffffff
+ p.xyz[11] = 0x00000000fffffffe
+
+ var t0 p256Point
+ // (This is one, in the Montgomery domain.)
+ t0.xyz[8] = 0x0000000000000001
+ t0.xyz[9] = 0xffffffff00000000
+ t0.xyz[10] = 0xffffffffffffffff
+ t0.xyz[11] = 0x00000000fffffffe
+
+ index := uint(5)
+ zero := sel
+
+ for i := 1; i < 43; i++ {
+ if index < 192 {
+ wvalue = ((scalar[index/64] >> (index % 64)) + (scalar[index/64+1] << (64 - (index % 64)))) & 0x7f
+ } else {
+ wvalue = (scalar[index/64] >> (index % 64)) & 0x7f
+ }
+ index += 6
+ sel, sign = boothW6(uint(wvalue))
+ p256SelectBase(&t0.xyz, p256Precomputed[i*32*8*8:], sel)
+ p256PointAddAffineAsm(p.xyz[0:12], p.xyz[0:12], t0.xyz[0:8], sign, sel, zero)
+ zero |= sel
+ }
+}
+
+func (p *p256Point) p256ScalarMult(scalar []uint64) {
+ // precomp is a table of precomputed points that stores powers of p
+ // from p^1 to p^16.
+ var precomp [16 * 4 * 3]uint64
+ var t0, t1, t2, t3 p256Point
+
+ // Prepare the table
+ p.p256StorePoint(&precomp, 0) // 1
+
+ p256PointDoubleAsm(t0.xyz[:], p.xyz[:])
+ p256PointDoubleAsm(t1.xyz[:], t0.xyz[:])
+ p256PointDoubleAsm(t2.xyz[:], t1.xyz[:])
+ p256PointDoubleAsm(t3.xyz[:], t2.xyz[:])
+ t0.p256StorePoint(&precomp, 1) // 2
+ t1.p256StorePoint(&precomp, 3) // 4
+ t2.p256StorePoint(&precomp, 7) // 8
+ t3.p256StorePoint(&precomp, 15) // 16
+
+ p256PointAddAsm(t0.xyz[:], t0.xyz[:], p.xyz[:])
+ p256PointAddAsm(t1.xyz[:], t1.xyz[:], p.xyz[:])
+ p256PointAddAsm(t2.xyz[:], t2.xyz[:], p.xyz[:])
+ t0.p256StorePoint(&precomp, 2) // 3
+ t1.p256StorePoint(&precomp, 4) // 5
+ t2.p256StorePoint(&precomp, 8) // 9
+
+ p256PointDoubleAsm(t0.xyz[:], t0.xyz[:])
+ p256PointDoubleAsm(t1.xyz[:], t1.xyz[:])
+ t0.p256StorePoint(&precomp, 5) // 6
+ t1.p256StorePoint(&precomp, 9) // 10
+
+ p256PointAddAsm(t2.xyz[:], t0.xyz[:], p.xyz[:])
+ p256PointAddAsm(t1.xyz[:], t1.xyz[:], p.xyz[:])
+ t2.p256StorePoint(&precomp, 6) // 7
+ t1.p256StorePoint(&precomp, 10) // 11
+
+ p256PointDoubleAsm(t0.xyz[:], t0.xyz[:])
+ p256PointDoubleAsm(t2.xyz[:], t2.xyz[:])
+ t0.p256StorePoint(&precomp, 11) // 12
+ t2.p256StorePoint(&precomp, 13) // 14
+
+ p256PointAddAsm(t0.xyz[:], t0.xyz[:], p.xyz[:])
+ p256PointAddAsm(t2.xyz[:], t2.xyz[:], p.xyz[:])
+ t0.p256StorePoint(&precomp, 12) // 13
+ t2.p256StorePoint(&precomp, 14) // 15
+
+ // Start scanning the window from top bit
+ index := uint(254)
+ var sel, sign int
+
+ wvalue := (scalar[index/64] >> (index % 64)) & 0x3f
+ sel, _ = boothW5(uint(wvalue))
+
+ p256Select(p.xyz[0:12], precomp[0:], sel)
+ zero := sel
+
+ for index > 4 {
+ index -= 5
+ p256PointDoubleAsm(p.xyz[:], p.xyz[:])
+ p256PointDoubleAsm(p.xyz[:], p.xyz[:])
+ p256PointDoubleAsm(p.xyz[:], p.xyz[:])
+ p256PointDoubleAsm(p.xyz[:], p.xyz[:])
+ p256PointDoubleAsm(p.xyz[:], p.xyz[:])
+
+ if index < 192 {
+ wvalue = ((scalar[index/64] >> (index % 64)) + (scalar[index/64+1] << (64 - (index % 64)))) & 0x3f
+ } else {
+ wvalue = (scalar[index/64] >> (index % 64)) & 0x3f
+ }
+
+ sel, sign = boothW5(uint(wvalue))
+
+ p256Select(t0.xyz[0:], precomp[0:], sel)
+ p256NegCond(t0.xyz[4:8], sign)
+ p256PointAddAsm(t1.xyz[:], p.xyz[:], t0.xyz[:])
+ p256MovCond(t1.xyz[0:12], t1.xyz[0:12], p.xyz[0:12], sel)
+ p256MovCond(p.xyz[0:12], t1.xyz[0:12], t0.xyz[0:12], zero)
+ zero |= sel
+ }
+
+ p256PointDoubleAsm(p.xyz[:], p.xyz[:])
+ p256PointDoubleAsm(p.xyz[:], p.xyz[:])
+ p256PointDoubleAsm(p.xyz[:], p.xyz[:])
+ p256PointDoubleAsm(p.xyz[:], p.xyz[:])
+ p256PointDoubleAsm(p.xyz[:], p.xyz[:])
+
+ wvalue = (scalar[0] << 1) & 0x3f
+ sel, sign = boothW5(uint(wvalue))
+
+ p256Select(t0.xyz[0:], precomp[0:], sel)
+ p256NegCond(t0.xyz[4:8], sign)
+ p256PointAddAsm(t1.xyz[:], p.xyz[:], t0.xyz[:])
+ p256MovCond(t1.xyz[0:12], t1.xyz[0:12], p.xyz[0:12], sel)
+ p256MovCond(p.xyz[0:12], t1.xyz[0:12], t0.xyz[0:12], zero)
+}
diff --git a/src/crypto/elliptic/p256_asm_amd64.s b/src/crypto/elliptic/p256_asm_amd64.s
new file mode 100644
index 0000000..bd16add
--- /dev/null
+++ b/src/crypto/elliptic/p256_asm_amd64.s
@@ -0,0 +1,2347 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file contains constant-time, 64-bit assembly implementation of
+// P256. The optimizations performed here are described in detail in:
+// S.Gueron and V.Krasnov, "Fast prime field elliptic-curve cryptography with
+// 256-bit primes"
+// https://link.springer.com/article/10.1007%2Fs13389-014-0090-x
+// https://eprint.iacr.org/2013/816.pdf
+
+#include "textflag.h"
+
+#define res_ptr DI
+#define x_ptr SI
+#define y_ptr CX
+
+#define acc0 R8
+#define acc1 R9
+#define acc2 R10
+#define acc3 R11
+#define acc4 R12
+#define acc5 R13
+#define t0 R14
+#define t1 R15
+
+DATA p256const0<>+0x00(SB)/8, $0x00000000ffffffff
+DATA p256const1<>+0x00(SB)/8, $0xffffffff00000001
+DATA p256ordK0<>+0x00(SB)/8, $0xccd1c8aaee00bc4f
+DATA p256ord<>+0x00(SB)/8, $0xf3b9cac2fc632551
+DATA p256ord<>+0x08(SB)/8, $0xbce6faada7179e84
+DATA p256ord<>+0x10(SB)/8, $0xffffffffffffffff
+DATA p256ord<>+0x18(SB)/8, $0xffffffff00000000
+DATA p256one<>+0x00(SB)/8, $0x0000000000000001
+DATA p256one<>+0x08(SB)/8, $0xffffffff00000000
+DATA p256one<>+0x10(SB)/8, $0xffffffffffffffff
+DATA p256one<>+0x18(SB)/8, $0x00000000fffffffe
+GLOBL p256const0<>(SB), 8, $8
+GLOBL p256const1<>(SB), 8, $8
+GLOBL p256ordK0<>(SB), 8, $8
+GLOBL p256ord<>(SB), 8, $32
+GLOBL p256one<>(SB), 8, $32
+
+/* ---------------------------------------*/
+// func p256LittleToBig(res []byte, in []uint64)
+TEXT ·p256LittleToBig(SB),NOSPLIT,$0
+ JMP ·p256BigToLittle(SB)
+/* ---------------------------------------*/
+// func p256BigToLittle(res []uint64, in []byte)
+TEXT ·p256BigToLittle(SB),NOSPLIT,$0
+ MOVQ res+0(FP), res_ptr
+ MOVQ in+24(FP), x_ptr
+
+ MOVQ (8*0)(x_ptr), acc0
+ MOVQ (8*1)(x_ptr), acc1
+ MOVQ (8*2)(x_ptr), acc2
+ MOVQ (8*3)(x_ptr), acc3
+
+ BSWAPQ acc0
+ BSWAPQ acc1
+ BSWAPQ acc2
+ BSWAPQ acc3
+
+ MOVQ acc3, (8*0)(res_ptr)
+ MOVQ acc2, (8*1)(res_ptr)
+ MOVQ acc1, (8*2)(res_ptr)
+ MOVQ acc0, (8*3)(res_ptr)
+
+ RET
+/* ---------------------------------------*/
+// func p256MovCond(res, a, b []uint64, cond int)
+// If cond == 0 res=b, else res=a
+TEXT ·p256MovCond(SB),NOSPLIT,$0
+ MOVQ res+0(FP), res_ptr
+ MOVQ a+24(FP), x_ptr
+ MOVQ b+48(FP), y_ptr
+ MOVQ cond+72(FP), X12
+
+ PXOR X13, X13
+ PSHUFD $0, X12, X12
+ PCMPEQL X13, X12
+
+ MOVOU X12, X0
+ MOVOU (16*0)(x_ptr), X6
+ PANDN X6, X0
+ MOVOU X12, X1
+ MOVOU (16*1)(x_ptr), X7
+ PANDN X7, X1
+ MOVOU X12, X2
+ MOVOU (16*2)(x_ptr), X8
+ PANDN X8, X2
+ MOVOU X12, X3
+ MOVOU (16*3)(x_ptr), X9
+ PANDN X9, X3
+ MOVOU X12, X4
+ MOVOU (16*4)(x_ptr), X10
+ PANDN X10, X4
+ MOVOU X12, X5
+ MOVOU (16*5)(x_ptr), X11
+ PANDN X11, X5
+
+ MOVOU (16*0)(y_ptr), X6
+ MOVOU (16*1)(y_ptr), X7
+ MOVOU (16*2)(y_ptr), X8
+ MOVOU (16*3)(y_ptr), X9
+ MOVOU (16*4)(y_ptr), X10
+ MOVOU (16*5)(y_ptr), X11
+
+ PAND X12, X6
+ PAND X12, X7
+ PAND X12, X8
+ PAND X12, X9
+ PAND X12, X10
+ PAND X12, X11
+
+ PXOR X6, X0
+ PXOR X7, X1
+ PXOR X8, X2
+ PXOR X9, X3
+ PXOR X10, X4
+ PXOR X11, X5
+
+ MOVOU X0, (16*0)(res_ptr)
+ MOVOU X1, (16*1)(res_ptr)
+ MOVOU X2, (16*2)(res_ptr)
+ MOVOU X3, (16*3)(res_ptr)
+ MOVOU X4, (16*4)(res_ptr)
+ MOVOU X5, (16*5)(res_ptr)
+
+ RET
+/* ---------------------------------------*/
+// func p256NegCond(val []uint64, cond int)
+TEXT ·p256NegCond(SB),NOSPLIT,$0
+ MOVQ val+0(FP), res_ptr
+ MOVQ cond+24(FP), t0
+ // acc = poly
+ MOVQ $-1, acc0
+ MOVQ p256const0<>(SB), acc1
+ MOVQ $0, acc2
+ MOVQ p256const1<>(SB), acc3
+ // Load the original value
+ MOVQ (8*0)(res_ptr), acc5
+ MOVQ (8*1)(res_ptr), x_ptr
+ MOVQ (8*2)(res_ptr), y_ptr
+ MOVQ (8*3)(res_ptr), t1
+ // Speculatively subtract
+ SUBQ acc5, acc0
+ SBBQ x_ptr, acc1
+ SBBQ y_ptr, acc2
+ SBBQ t1, acc3
+ // If condition is 0, keep original value
+ TESTQ t0, t0
+ CMOVQEQ acc5, acc0
+ CMOVQEQ x_ptr, acc1
+ CMOVQEQ y_ptr, acc2
+ CMOVQEQ t1, acc3
+ // Store result
+ MOVQ acc0, (8*0)(res_ptr)
+ MOVQ acc1, (8*1)(res_ptr)
+ MOVQ acc2, (8*2)(res_ptr)
+ MOVQ acc3, (8*3)(res_ptr)
+
+ RET
+/* ---------------------------------------*/
+// func p256Sqr(res, in []uint64, n int)
+TEXT ·p256Sqr(SB),NOSPLIT,$0
+ MOVQ res+0(FP), res_ptr
+ MOVQ in+24(FP), x_ptr
+ MOVQ n+48(FP), BX
+
+sqrLoop:
+
+ // y[1:] * y[0]
+ MOVQ (8*0)(x_ptr), t0
+
+ MOVQ (8*1)(x_ptr), AX
+ MULQ t0
+ MOVQ AX, acc1
+ MOVQ DX, acc2
+
+ MOVQ (8*2)(x_ptr), AX
+ MULQ t0
+ ADDQ AX, acc2
+ ADCQ $0, DX
+ MOVQ DX, acc3
+
+ MOVQ (8*3)(x_ptr), AX
+ MULQ t0
+ ADDQ AX, acc3
+ ADCQ $0, DX
+ MOVQ DX, acc4
+ // y[2:] * y[1]
+ MOVQ (8*1)(x_ptr), t0
+
+ MOVQ (8*2)(x_ptr), AX
+ MULQ t0
+ ADDQ AX, acc3
+ ADCQ $0, DX
+ MOVQ DX, t1
+
+ MOVQ (8*3)(x_ptr), AX
+ MULQ t0
+ ADDQ t1, acc4
+ ADCQ $0, DX
+ ADDQ AX, acc4
+ ADCQ $0, DX
+ MOVQ DX, acc5
+ // y[3] * y[2]
+ MOVQ (8*2)(x_ptr), t0
+
+ MOVQ (8*3)(x_ptr), AX
+ MULQ t0
+ ADDQ AX, acc5
+ ADCQ $0, DX
+ MOVQ DX, y_ptr
+ XORQ t1, t1
+ // *2
+ ADDQ acc1, acc1
+ ADCQ acc2, acc2
+ ADCQ acc3, acc3
+ ADCQ acc4, acc4
+ ADCQ acc5, acc5
+ ADCQ y_ptr, y_ptr
+ ADCQ $0, t1
+ // Missing products
+ MOVQ (8*0)(x_ptr), AX
+ MULQ AX
+ MOVQ AX, acc0
+ MOVQ DX, t0
+
+ MOVQ (8*1)(x_ptr), AX
+ MULQ AX
+ ADDQ t0, acc1
+ ADCQ AX, acc2
+ ADCQ $0, DX
+ MOVQ DX, t0
+
+ MOVQ (8*2)(x_ptr), AX
+ MULQ AX
+ ADDQ t0, acc3
+ ADCQ AX, acc4
+ ADCQ $0, DX
+ MOVQ DX, t0
+
+ MOVQ (8*3)(x_ptr), AX
+ MULQ AX
+ ADDQ t0, acc5
+ ADCQ AX, y_ptr
+ ADCQ DX, t1
+ MOVQ t1, x_ptr
+ // First reduction step
+ MOVQ acc0, AX
+ MOVQ acc0, t1
+ SHLQ $32, acc0
+ MULQ p256const1<>(SB)
+ SHRQ $32, t1
+ ADDQ acc0, acc1
+ ADCQ t1, acc2
+ ADCQ AX, acc3
+ ADCQ $0, DX
+ MOVQ DX, acc0
+ // Second reduction step
+ MOVQ acc1, AX
+ MOVQ acc1, t1
+ SHLQ $32, acc1
+ MULQ p256const1<>(SB)
+ SHRQ $32, t1
+ ADDQ acc1, acc2
+ ADCQ t1, acc3
+ ADCQ AX, acc0
+ ADCQ $0, DX
+ MOVQ DX, acc1
+ // Third reduction step
+ MOVQ acc2, AX
+ MOVQ acc2, t1
+ SHLQ $32, acc2
+ MULQ p256const1<>(SB)
+ SHRQ $32, t1
+ ADDQ acc2, acc3
+ ADCQ t1, acc0
+ ADCQ AX, acc1
+ ADCQ $0, DX
+ MOVQ DX, acc2
+ // Last reduction step
+ XORQ t0, t0
+ MOVQ acc3, AX
+ MOVQ acc3, t1
+ SHLQ $32, acc3
+ MULQ p256const1<>(SB)
+ SHRQ $32, t1
+ ADDQ acc3, acc0
+ ADCQ t1, acc1
+ ADCQ AX, acc2
+ ADCQ $0, DX
+ MOVQ DX, acc3
+ // Add bits [511:256] of the sqr result
+ ADCQ acc4, acc0
+ ADCQ acc5, acc1
+ ADCQ y_ptr, acc2
+ ADCQ x_ptr, acc3
+ ADCQ $0, t0
+
+ MOVQ acc0, acc4
+ MOVQ acc1, acc5
+ MOVQ acc2, y_ptr
+ MOVQ acc3, t1
+ // Subtract p256
+ SUBQ $-1, acc0
+ SBBQ p256const0<>(SB) ,acc1
+ SBBQ $0, acc2
+ SBBQ p256const1<>(SB), acc3
+ SBBQ $0, t0
+
+ CMOVQCS acc4, acc0
+ CMOVQCS acc5, acc1
+ CMOVQCS y_ptr, acc2
+ CMOVQCS t1, acc3
+
+ MOVQ acc0, (8*0)(res_ptr)
+ MOVQ acc1, (8*1)(res_ptr)
+ MOVQ acc2, (8*2)(res_ptr)
+ MOVQ acc3, (8*3)(res_ptr)
+ MOVQ res_ptr, x_ptr
+ DECQ BX
+ JNE sqrLoop
+
+ RET
+/* ---------------------------------------*/
+// func p256Mul(res, in1, in2 []uint64)
+TEXT ·p256Mul(SB),NOSPLIT,$0
+ MOVQ res+0(FP), res_ptr
+ MOVQ in1+24(FP), x_ptr
+ MOVQ in2+48(FP), y_ptr
+ // x * y[0]
+ MOVQ (8*0)(y_ptr), t0
+
+ MOVQ (8*0)(x_ptr), AX
+ MULQ t0
+ MOVQ AX, acc0
+ MOVQ DX, acc1
+
+ MOVQ (8*1)(x_ptr), AX
+ MULQ t0
+ ADDQ AX, acc1
+ ADCQ $0, DX
+ MOVQ DX, acc2
+
+ MOVQ (8*2)(x_ptr), AX
+ MULQ t0
+ ADDQ AX, acc2
+ ADCQ $0, DX
+ MOVQ DX, acc3
+
+ MOVQ (8*3)(x_ptr), AX
+ MULQ t0
+ ADDQ AX, acc3
+ ADCQ $0, DX
+ MOVQ DX, acc4
+ XORQ acc5, acc5
+ // First reduction step
+ MOVQ acc0, AX
+ MOVQ acc0, t1
+ SHLQ $32, acc0
+ MULQ p256const1<>(SB)
+ SHRQ $32, t1
+ ADDQ acc0, acc1
+ ADCQ t1, acc2
+ ADCQ AX, acc3
+ ADCQ DX, acc4
+ ADCQ $0, acc5
+ XORQ acc0, acc0
+ // x * y[1]
+ MOVQ (8*1)(y_ptr), t0
+
+ MOVQ (8*0)(x_ptr), AX
+ MULQ t0
+ ADDQ AX, acc1
+ ADCQ $0, DX
+ MOVQ DX, t1
+
+ MOVQ (8*1)(x_ptr), AX
+ MULQ t0
+ ADDQ t1, acc2
+ ADCQ $0, DX
+ ADDQ AX, acc2
+ ADCQ $0, DX
+ MOVQ DX, t1
+
+ MOVQ (8*2)(x_ptr), AX
+ MULQ t0
+ ADDQ t1, acc3
+ ADCQ $0, DX
+ ADDQ AX, acc3
+ ADCQ $0, DX
+ MOVQ DX, t1
+
+ MOVQ (8*3)(x_ptr), AX
+ MULQ t0
+ ADDQ t1, acc4
+ ADCQ $0, DX
+ ADDQ AX, acc4
+ ADCQ DX, acc5
+ ADCQ $0, acc0
+ // Second reduction step
+ MOVQ acc1, AX
+ MOVQ acc1, t1
+ SHLQ $32, acc1
+ MULQ p256const1<>(SB)
+ SHRQ $32, t1
+ ADDQ acc1, acc2
+ ADCQ t1, acc3
+ ADCQ AX, acc4
+ ADCQ DX, acc5
+ ADCQ $0, acc0
+ XORQ acc1, acc1
+ // x * y[2]
+ MOVQ (8*2)(y_ptr), t0
+
+ MOVQ (8*0)(x_ptr), AX
+ MULQ t0
+ ADDQ AX, acc2
+ ADCQ $0, DX
+ MOVQ DX, t1
+
+ MOVQ (8*1)(x_ptr), AX
+ MULQ t0
+ ADDQ t1, acc3
+ ADCQ $0, DX
+ ADDQ AX, acc3
+ ADCQ $0, DX
+ MOVQ DX, t1
+
+ MOVQ (8*2)(x_ptr), AX
+ MULQ t0
+ ADDQ t1, acc4
+ ADCQ $0, DX
+ ADDQ AX, acc4
+ ADCQ $0, DX
+ MOVQ DX, t1
+
+ MOVQ (8*3)(x_ptr), AX
+ MULQ t0
+ ADDQ t1, acc5
+ ADCQ $0, DX
+ ADDQ AX, acc5
+ ADCQ DX, acc0
+ ADCQ $0, acc1
+ // Third reduction step
+ MOVQ acc2, AX
+ MOVQ acc2, t1
+ SHLQ $32, acc2
+ MULQ p256const1<>(SB)
+ SHRQ $32, t1
+ ADDQ acc2, acc3
+ ADCQ t1, acc4
+ ADCQ AX, acc5
+ ADCQ DX, acc0
+ ADCQ $0, acc1
+ XORQ acc2, acc2
+ // x * y[3]
+ MOVQ (8*3)(y_ptr), t0
+
+ MOVQ (8*0)(x_ptr), AX
+ MULQ t0
+ ADDQ AX, acc3
+ ADCQ $0, DX
+ MOVQ DX, t1
+
+ MOVQ (8*1)(x_ptr), AX
+ MULQ t0
+ ADDQ t1, acc4
+ ADCQ $0, DX
+ ADDQ AX, acc4
+ ADCQ $0, DX
+ MOVQ DX, t1
+
+ MOVQ (8*2)(x_ptr), AX
+ MULQ t0
+ ADDQ t1, acc5
+ ADCQ $0, DX
+ ADDQ AX, acc5
+ ADCQ $0, DX
+ MOVQ DX, t1
+
+ MOVQ (8*3)(x_ptr), AX
+ MULQ t0
+ ADDQ t1, acc0
+ ADCQ $0, DX
+ ADDQ AX, acc0
+ ADCQ DX, acc1
+ ADCQ $0, acc2
+ // Last reduction step
+ MOVQ acc3, AX
+ MOVQ acc3, t1
+ SHLQ $32, acc3
+ MULQ p256const1<>(SB)
+ SHRQ $32, t1
+ ADDQ acc3, acc4
+ ADCQ t1, acc5
+ ADCQ AX, acc0
+ ADCQ DX, acc1
+ ADCQ $0, acc2
+ // Copy result [255:0]
+ MOVQ acc4, x_ptr
+ MOVQ acc5, acc3
+ MOVQ acc0, t0
+ MOVQ acc1, t1
+ // Subtract p256
+ SUBQ $-1, acc4
+ SBBQ p256const0<>(SB) ,acc5
+ SBBQ $0, acc0
+ SBBQ p256const1<>(SB), acc1
+ SBBQ $0, acc2
+
+ CMOVQCS x_ptr, acc4
+ CMOVQCS acc3, acc5
+ CMOVQCS t0, acc0
+ CMOVQCS t1, acc1
+
+ MOVQ acc4, (8*0)(res_ptr)
+ MOVQ acc5, (8*1)(res_ptr)
+ MOVQ acc0, (8*2)(res_ptr)
+ MOVQ acc1, (8*3)(res_ptr)
+
+ RET
+/* ---------------------------------------*/
+// func p256FromMont(res, in []uint64)
+TEXT ·p256FromMont(SB),NOSPLIT,$0
+ MOVQ res+0(FP), res_ptr
+ MOVQ in+24(FP), x_ptr
+
+ MOVQ (8*0)(x_ptr), acc0
+ MOVQ (8*1)(x_ptr), acc1
+ MOVQ (8*2)(x_ptr), acc2
+ MOVQ (8*3)(x_ptr), acc3
+ XORQ acc4, acc4
+
+ // Only reduce, no multiplications are needed
+ // First stage
+ MOVQ acc0, AX
+ MOVQ acc0, t1
+ SHLQ $32, acc0
+ MULQ p256const1<>(SB)
+ SHRQ $32, t1
+ ADDQ acc0, acc1
+ ADCQ t1, acc2
+ ADCQ AX, acc3
+ ADCQ DX, acc4
+ XORQ acc5, acc5
+ // Second stage
+ MOVQ acc1, AX
+ MOVQ acc1, t1
+ SHLQ $32, acc1
+ MULQ p256const1<>(SB)
+ SHRQ $32, t1
+ ADDQ acc1, acc2
+ ADCQ t1, acc3
+ ADCQ AX, acc4
+ ADCQ DX, acc5
+ XORQ acc0, acc0
+ // Third stage
+ MOVQ acc2, AX
+ MOVQ acc2, t1
+ SHLQ $32, acc2
+ MULQ p256const1<>(SB)
+ SHRQ $32, t1
+ ADDQ acc2, acc3
+ ADCQ t1, acc4
+ ADCQ AX, acc5
+ ADCQ DX, acc0
+ XORQ acc1, acc1
+ // Last stage
+ MOVQ acc3, AX
+ MOVQ acc3, t1
+ SHLQ $32, acc3
+ MULQ p256const1<>(SB)
+ SHRQ $32, t1
+ ADDQ acc3, acc4
+ ADCQ t1, acc5
+ ADCQ AX, acc0
+ ADCQ DX, acc1
+
+ MOVQ acc4, x_ptr
+ MOVQ acc5, acc3
+ MOVQ acc0, t0
+ MOVQ acc1, t1
+
+ SUBQ $-1, acc4
+ SBBQ p256const0<>(SB), acc5
+ SBBQ $0, acc0
+ SBBQ p256const1<>(SB), acc1
+
+ CMOVQCS x_ptr, acc4
+ CMOVQCS acc3, acc5
+ CMOVQCS t0, acc0
+ CMOVQCS t1, acc1
+
+ MOVQ acc4, (8*0)(res_ptr)
+ MOVQ acc5, (8*1)(res_ptr)
+ MOVQ acc0, (8*2)(res_ptr)
+ MOVQ acc1, (8*3)(res_ptr)
+
+ RET
+/* ---------------------------------------*/
+// Constant time point access to arbitrary point table.
+// Indexed from 1 to 15, with -1 offset
+// (index 0 is implicitly point at infinity)
+// func p256Select(point, table []uint64, idx int)
+TEXT ·p256Select(SB),NOSPLIT,$0
+ MOVQ idx+48(FP),AX
+ MOVQ table+24(FP),DI
+ MOVQ point+0(FP),DX
+
+ PXOR X15, X15 // X15 = 0
+ PCMPEQL X14, X14 // X14 = -1
+ PSUBL X14, X15 // X15 = 1
+ MOVL AX, X14
+ PSHUFD $0, X14, X14
+
+ PXOR X0, X0
+ PXOR X1, X1
+ PXOR X2, X2
+ PXOR X3, X3
+ PXOR X4, X4
+ PXOR X5, X5
+ MOVQ $16, AX
+
+ MOVOU X15, X13
+
+loop_select:
+
+ MOVOU X13, X12
+ PADDL X15, X13
+ PCMPEQL X14, X12
+
+ MOVOU (16*0)(DI), X6
+ MOVOU (16*1)(DI), X7
+ MOVOU (16*2)(DI), X8
+ MOVOU (16*3)(DI), X9
+ MOVOU (16*4)(DI), X10
+ MOVOU (16*5)(DI), X11
+ ADDQ $(16*6), DI
+
+ PAND X12, X6
+ PAND X12, X7
+ PAND X12, X8
+ PAND X12, X9
+ PAND X12, X10
+ PAND X12, X11
+
+ PXOR X6, X0
+ PXOR X7, X1
+ PXOR X8, X2
+ PXOR X9, X3
+ PXOR X10, X4
+ PXOR X11, X5
+
+ DECQ AX
+ JNE loop_select
+
+ MOVOU X0, (16*0)(DX)
+ MOVOU X1, (16*1)(DX)
+ MOVOU X2, (16*2)(DX)
+ MOVOU X3, (16*3)(DX)
+ MOVOU X4, (16*4)(DX)
+ MOVOU X5, (16*5)(DX)
+
+ RET
+/* ---------------------------------------*/
+// Constant time point access to base point table.
+// func p256SelectBase(point *[12]uint64, table string, idx int)
+TEXT ·p256SelectBase(SB),NOSPLIT,$0
+ MOVQ idx+24(FP),AX
+ MOVQ table+8(FP),DI
+ MOVQ point+0(FP),DX
+
+ PXOR X15, X15 // X15 = 0
+ PCMPEQL X14, X14 // X14 = -1
+ PSUBL X14, X15 // X15 = 1
+ MOVL AX, X14
+ PSHUFD $0, X14, X14
+
+ PXOR X0, X0
+ PXOR X1, X1
+ PXOR X2, X2
+ PXOR X3, X3
+ MOVQ $16, AX
+
+ MOVOU X15, X13
+
+loop_select_base:
+
+ MOVOU X13, X12
+ PADDL X15, X13
+ PCMPEQL X14, X12
+
+ MOVOU (16*0)(DI), X4
+ MOVOU (16*1)(DI), X5
+ MOVOU (16*2)(DI), X6
+ MOVOU (16*3)(DI), X7
+
+ MOVOU (16*4)(DI), X8
+ MOVOU (16*5)(DI), X9
+ MOVOU (16*6)(DI), X10
+ MOVOU (16*7)(DI), X11
+
+ ADDQ $(16*8), DI
+
+ PAND X12, X4
+ PAND X12, X5
+ PAND X12, X6
+ PAND X12, X7
+
+ MOVOU X13, X12
+ PADDL X15, X13
+ PCMPEQL X14, X12
+
+ PAND X12, X8
+ PAND X12, X9
+ PAND X12, X10
+ PAND X12, X11
+
+ PXOR X4, X0
+ PXOR X5, X1
+ PXOR X6, X2
+ PXOR X7, X3
+
+ PXOR X8, X0
+ PXOR X9, X1
+ PXOR X10, X2
+ PXOR X11, X3
+
+ DECQ AX
+ JNE loop_select_base
+
+ MOVOU X0, (16*0)(DX)
+ MOVOU X1, (16*1)(DX)
+ MOVOU X2, (16*2)(DX)
+ MOVOU X3, (16*3)(DX)
+
+ RET
+/* ---------------------------------------*/
+// func p256OrdMul(res, in1, in2 []uint64)
+TEXT ·p256OrdMul(SB),NOSPLIT,$0
+ MOVQ res+0(FP), res_ptr
+ MOVQ in1+24(FP), x_ptr
+ MOVQ in2+48(FP), y_ptr
+ // x * y[0]
+ MOVQ (8*0)(y_ptr), t0
+
+ MOVQ (8*0)(x_ptr), AX
+ MULQ t0
+ MOVQ AX, acc0
+ MOVQ DX, acc1
+
+ MOVQ (8*1)(x_ptr), AX
+ MULQ t0
+ ADDQ AX, acc1
+ ADCQ $0, DX
+ MOVQ DX, acc2
+
+ MOVQ (8*2)(x_ptr), AX
+ MULQ t0
+ ADDQ AX, acc2
+ ADCQ $0, DX
+ MOVQ DX, acc3
+
+ MOVQ (8*3)(x_ptr), AX
+ MULQ t0
+ ADDQ AX, acc3
+ ADCQ $0, DX
+ MOVQ DX, acc4
+ XORQ acc5, acc5
+ // First reduction step
+ MOVQ acc0, AX
+ MULQ p256ordK0<>(SB)
+ MOVQ AX, t0
+
+ MOVQ p256ord<>+0x00(SB), AX
+ MULQ t0
+ ADDQ AX, acc0
+ ADCQ $0, DX
+ MOVQ DX, t1
+
+ MOVQ p256ord<>+0x08(SB), AX
+ MULQ t0
+ ADDQ t1, acc1
+ ADCQ $0, DX
+ ADDQ AX, acc1
+ ADCQ $0, DX
+ MOVQ DX, t1
+
+ MOVQ p256ord<>+0x10(SB), AX
+ MULQ t0
+ ADDQ t1, acc2
+ ADCQ $0, DX
+ ADDQ AX, acc2
+ ADCQ $0, DX
+ MOVQ DX, t1
+
+ MOVQ p256ord<>+0x18(SB), AX
+ MULQ t0
+ ADDQ t1, acc3
+ ADCQ $0, DX
+ ADDQ AX, acc3
+ ADCQ DX, acc4
+ ADCQ $0, acc5
+ // x * y[1]
+ MOVQ (8*1)(y_ptr), t0
+
+ MOVQ (8*0)(x_ptr), AX
+ MULQ t0
+ ADDQ AX, acc1
+ ADCQ $0, DX
+ MOVQ DX, t1
+
+ MOVQ (8*1)(x_ptr), AX
+ MULQ t0
+ ADDQ t1, acc2
+ ADCQ $0, DX
+ ADDQ AX, acc2
+ ADCQ $0, DX
+ MOVQ DX, t1
+
+ MOVQ (8*2)(x_ptr), AX
+ MULQ t0
+ ADDQ t1, acc3
+ ADCQ $0, DX
+ ADDQ AX, acc3
+ ADCQ $0, DX
+ MOVQ DX, t1
+
+ MOVQ (8*3)(x_ptr), AX
+ MULQ t0
+ ADDQ t1, acc4
+ ADCQ $0, DX
+ ADDQ AX, acc4
+ ADCQ DX, acc5
+ ADCQ $0, acc0
+ // Second reduction step
+ MOVQ acc1, AX
+ MULQ p256ordK0<>(SB)
+ MOVQ AX, t0
+
+ MOVQ p256ord<>+0x00(SB), AX
+ MULQ t0
+ ADDQ AX, acc1
+ ADCQ $0, DX
+ MOVQ DX, t1
+
+ MOVQ p256ord<>+0x08(SB), AX
+ MULQ t0
+ ADDQ t1, acc2
+ ADCQ $0, DX
+ ADDQ AX, acc2
+ ADCQ $0, DX
+ MOVQ DX, t1
+
+ MOVQ p256ord<>+0x10(SB), AX
+ MULQ t0
+ ADDQ t1, acc3
+ ADCQ $0, DX
+ ADDQ AX, acc3
+ ADCQ $0, DX
+ MOVQ DX, t1
+
+ MOVQ p256ord<>+0x18(SB), AX
+ MULQ t0
+ ADDQ t1, acc4
+ ADCQ $0, DX
+ ADDQ AX, acc4
+ ADCQ DX, acc5
+ ADCQ $0, acc0
+ // x * y[2]
+ MOVQ (8*2)(y_ptr), t0
+
+ MOVQ (8*0)(x_ptr), AX
+ MULQ t0
+ ADDQ AX, acc2
+ ADCQ $0, DX
+ MOVQ DX, t1
+
+ MOVQ (8*1)(x_ptr), AX
+ MULQ t0
+ ADDQ t1, acc3
+ ADCQ $0, DX
+ ADDQ AX, acc3
+ ADCQ $0, DX
+ MOVQ DX, t1
+
+ MOVQ (8*2)(x_ptr), AX
+ MULQ t0
+ ADDQ t1, acc4
+ ADCQ $0, DX
+ ADDQ AX, acc4
+ ADCQ $0, DX
+ MOVQ DX, t1
+
+ MOVQ (8*3)(x_ptr), AX
+ MULQ t0
+ ADDQ t1, acc5
+ ADCQ $0, DX
+ ADDQ AX, acc5
+ ADCQ DX, acc0
+ ADCQ $0, acc1
+ // Third reduction step
+ MOVQ acc2, AX
+ MULQ p256ordK0<>(SB)
+ MOVQ AX, t0
+
+ MOVQ p256ord<>+0x00(SB), AX
+ MULQ t0
+ ADDQ AX, acc2
+ ADCQ $0, DX
+ MOVQ DX, t1
+
+ MOVQ p256ord<>+0x08(SB), AX
+ MULQ t0
+ ADDQ t1, acc3
+ ADCQ $0, DX
+ ADDQ AX, acc3
+ ADCQ $0, DX
+ MOVQ DX, t1
+
+ MOVQ p256ord<>+0x10(SB), AX
+ MULQ t0
+ ADDQ t1, acc4
+ ADCQ $0, DX
+ ADDQ AX, acc4
+ ADCQ $0, DX
+ MOVQ DX, t1
+
+ MOVQ p256ord<>+0x18(SB), AX
+ MULQ t0
+ ADDQ t1, acc5
+ ADCQ $0, DX
+ ADDQ AX, acc5
+ ADCQ DX, acc0
+ ADCQ $0, acc1
+ // x * y[3]
+ MOVQ (8*3)(y_ptr), t0
+
+ MOVQ (8*0)(x_ptr), AX
+ MULQ t0
+ ADDQ AX, acc3
+ ADCQ $0, DX
+ MOVQ DX, t1
+
+ MOVQ (8*1)(x_ptr), AX
+ MULQ t0
+ ADDQ t1, acc4
+ ADCQ $0, DX
+ ADDQ AX, acc4
+ ADCQ $0, DX
+ MOVQ DX, t1
+
+ MOVQ (8*2)(x_ptr), AX
+ MULQ t0
+ ADDQ t1, acc5
+ ADCQ $0, DX
+ ADDQ AX, acc5
+ ADCQ $0, DX
+ MOVQ DX, t1
+
+ MOVQ (8*3)(x_ptr), AX
+ MULQ t0
+ ADDQ t1, acc0
+ ADCQ $0, DX
+ ADDQ AX, acc0
+ ADCQ DX, acc1
+ ADCQ $0, acc2
+ // Last reduction step
+ MOVQ acc3, AX
+ MULQ p256ordK0<>(SB)
+ MOVQ AX, t0
+
+ MOVQ p256ord<>+0x00(SB), AX
+ MULQ t0
+ ADDQ AX, acc3
+ ADCQ $0, DX
+ MOVQ DX, t1
+
+ MOVQ p256ord<>+0x08(SB), AX
+ MULQ t0
+ ADDQ t1, acc4
+ ADCQ $0, DX
+ ADDQ AX, acc4
+ ADCQ $0, DX
+ MOVQ DX, t1
+
+ MOVQ p256ord<>+0x10(SB), AX
+ MULQ t0
+ ADDQ t1, acc5
+ ADCQ $0, DX
+ ADDQ AX, acc5
+ ADCQ $0, DX
+ MOVQ DX, t1
+
+ MOVQ p256ord<>+0x18(SB), AX
+ MULQ t0
+ ADDQ t1, acc0
+ ADCQ $0, DX
+ ADDQ AX, acc0
+ ADCQ DX, acc1
+ ADCQ $0, acc2
+ // Copy result [255:0]
+ MOVQ acc4, x_ptr
+ MOVQ acc5, acc3
+ MOVQ acc0, t0
+ MOVQ acc1, t1
+ // Subtract p256
+ SUBQ p256ord<>+0x00(SB), acc4
+ SBBQ p256ord<>+0x08(SB) ,acc5
+ SBBQ p256ord<>+0x10(SB), acc0
+ SBBQ p256ord<>+0x18(SB), acc1
+ SBBQ $0, acc2
+
+ CMOVQCS x_ptr, acc4
+ CMOVQCS acc3, acc5
+ CMOVQCS t0, acc0
+ CMOVQCS t1, acc1
+
+ MOVQ acc4, (8*0)(res_ptr)
+ MOVQ acc5, (8*1)(res_ptr)
+ MOVQ acc0, (8*2)(res_ptr)
+ MOVQ acc1, (8*3)(res_ptr)
+
+ RET
+/* ---------------------------------------*/
+// func p256OrdSqr(res, in []uint64, n int)
+TEXT ·p256OrdSqr(SB),NOSPLIT,$0
+ MOVQ res+0(FP), res_ptr
+ MOVQ in+24(FP), x_ptr
+ MOVQ n+48(FP), BX
+
+ordSqrLoop:
+
+ // y[1:] * y[0]
+ MOVQ (8*0)(x_ptr), t0
+
+ MOVQ (8*1)(x_ptr), AX
+ MULQ t0
+ MOVQ AX, acc1
+ MOVQ DX, acc2
+
+ MOVQ (8*2)(x_ptr), AX
+ MULQ t0
+ ADDQ AX, acc2
+ ADCQ $0, DX
+ MOVQ DX, acc3
+
+ MOVQ (8*3)(x_ptr), AX
+ MULQ t0
+ ADDQ AX, acc3
+ ADCQ $0, DX
+ MOVQ DX, acc4
+ // y[2:] * y[1]
+ MOVQ (8*1)(x_ptr), t0
+
+ MOVQ (8*2)(x_ptr), AX
+ MULQ t0
+ ADDQ AX, acc3
+ ADCQ $0, DX
+ MOVQ DX, t1
+
+ MOVQ (8*3)(x_ptr), AX
+ MULQ t0
+ ADDQ t1, acc4
+ ADCQ $0, DX
+ ADDQ AX, acc4
+ ADCQ $0, DX
+ MOVQ DX, acc5
+ // y[3] * y[2]
+ MOVQ (8*2)(x_ptr), t0
+
+ MOVQ (8*3)(x_ptr), AX
+ MULQ t0
+ ADDQ AX, acc5
+ ADCQ $0, DX
+ MOVQ DX, y_ptr
+ XORQ t1, t1
+ // *2
+ ADDQ acc1, acc1
+ ADCQ acc2, acc2
+ ADCQ acc3, acc3
+ ADCQ acc4, acc4
+ ADCQ acc5, acc5
+ ADCQ y_ptr, y_ptr
+ ADCQ $0, t1
+ // Missing products
+ MOVQ (8*0)(x_ptr), AX
+ MULQ AX
+ MOVQ AX, acc0
+ MOVQ DX, t0
+
+ MOVQ (8*1)(x_ptr), AX
+ MULQ AX
+ ADDQ t0, acc1
+ ADCQ AX, acc2
+ ADCQ $0, DX
+ MOVQ DX, t0
+
+ MOVQ (8*2)(x_ptr), AX
+ MULQ AX
+ ADDQ t0, acc3
+ ADCQ AX, acc4
+ ADCQ $0, DX
+ MOVQ DX, t0
+
+ MOVQ (8*3)(x_ptr), AX
+ MULQ AX
+ ADDQ t0, acc5
+ ADCQ AX, y_ptr
+ ADCQ DX, t1
+ MOVQ t1, x_ptr
+ // First reduction step
+ MOVQ acc0, AX
+ MULQ p256ordK0<>(SB)
+ MOVQ AX, t0
+
+ MOVQ p256ord<>+0x00(SB), AX
+ MULQ t0
+ ADDQ AX, acc0
+ ADCQ $0, DX
+ MOVQ DX, t1
+
+ MOVQ p256ord<>+0x08(SB), AX
+ MULQ t0
+ ADDQ t1, acc1
+ ADCQ $0, DX
+ ADDQ AX, acc1
+
+ MOVQ t0, t1
+ ADCQ DX, acc2
+ ADCQ $0, t1
+ SUBQ t0, acc2
+ SBBQ $0, t1
+
+ MOVQ t0, AX
+ MOVQ t0, DX
+ MOVQ t0, acc0
+ SHLQ $32, AX
+ SHRQ $32, DX
+
+ ADDQ t1, acc3
+ ADCQ $0, acc0
+ SUBQ AX, acc3
+ SBBQ DX, acc0
+ // Second reduction step
+ MOVQ acc1, AX
+ MULQ p256ordK0<>(SB)
+ MOVQ AX, t0
+
+ MOVQ p256ord<>+0x00(SB), AX
+ MULQ t0
+ ADDQ AX, acc1
+ ADCQ $0, DX
+ MOVQ DX, t1
+
+ MOVQ p256ord<>+0x08(SB), AX
+ MULQ t0
+ ADDQ t1, acc2
+ ADCQ $0, DX
+ ADDQ AX, acc2
+
+ MOVQ t0, t1
+ ADCQ DX, acc3
+ ADCQ $0, t1
+ SUBQ t0, acc3
+ SBBQ $0, t1
+
+ MOVQ t0, AX
+ MOVQ t0, DX
+ MOVQ t0, acc1
+ SHLQ $32, AX
+ SHRQ $32, DX
+
+ ADDQ t1, acc0
+ ADCQ $0, acc1
+ SUBQ AX, acc0
+ SBBQ DX, acc1
+ // Third reduction step
+ MOVQ acc2, AX
+ MULQ p256ordK0<>(SB)
+ MOVQ AX, t0
+
+ MOVQ p256ord<>+0x00(SB), AX
+ MULQ t0
+ ADDQ AX, acc2
+ ADCQ $0, DX
+ MOVQ DX, t1
+
+ MOVQ p256ord<>+0x08(SB), AX
+ MULQ t0
+ ADDQ t1, acc3
+ ADCQ $0, DX
+ ADDQ AX, acc3
+
+ MOVQ t0, t1
+ ADCQ DX, acc0
+ ADCQ $0, t1
+ SUBQ t0, acc0
+ SBBQ $0, t1
+
+ MOVQ t0, AX
+ MOVQ t0, DX
+ MOVQ t0, acc2
+ SHLQ $32, AX
+ SHRQ $32, DX
+
+ ADDQ t1, acc1
+ ADCQ $0, acc2
+ SUBQ AX, acc1
+ SBBQ DX, acc2
+ // Last reduction step
+ MOVQ acc3, AX
+ MULQ p256ordK0<>(SB)
+ MOVQ AX, t0
+
+ MOVQ p256ord<>+0x00(SB), AX
+ MULQ t0
+ ADDQ AX, acc3
+ ADCQ $0, DX
+ MOVQ DX, t1
+
+ MOVQ p256ord<>+0x08(SB), AX
+ MULQ t0
+ ADDQ t1, acc0
+ ADCQ $0, DX
+ ADDQ AX, acc0
+ ADCQ $0, DX
+ MOVQ DX, t1
+
+ MOVQ t0, t1
+ ADCQ DX, acc1
+ ADCQ $0, t1
+ SUBQ t0, acc1
+ SBBQ $0, t1
+
+ MOVQ t0, AX
+ MOVQ t0, DX
+ MOVQ t0, acc3
+ SHLQ $32, AX
+ SHRQ $32, DX
+
+ ADDQ t1, acc2
+ ADCQ $0, acc3
+ SUBQ AX, acc2
+ SBBQ DX, acc3
+ XORQ t0, t0
+ // Add bits [511:256] of the sqr result
+ ADCQ acc4, acc0
+ ADCQ acc5, acc1
+ ADCQ y_ptr, acc2
+ ADCQ x_ptr, acc3
+ ADCQ $0, t0
+
+ MOVQ acc0, acc4
+ MOVQ acc1, acc5
+ MOVQ acc2, y_ptr
+ MOVQ acc3, t1
+ // Subtract p256
+ SUBQ p256ord<>+0x00(SB), acc0
+ SBBQ p256ord<>+0x08(SB) ,acc1
+ SBBQ p256ord<>+0x10(SB), acc2
+ SBBQ p256ord<>+0x18(SB), acc3
+ SBBQ $0, t0
+
+ CMOVQCS acc4, acc0
+ CMOVQCS acc5, acc1
+ CMOVQCS y_ptr, acc2
+ CMOVQCS t1, acc3
+
+ MOVQ acc0, (8*0)(res_ptr)
+ MOVQ acc1, (8*1)(res_ptr)
+ MOVQ acc2, (8*2)(res_ptr)
+ MOVQ acc3, (8*3)(res_ptr)
+ MOVQ res_ptr, x_ptr
+ DECQ BX
+ JNE ordSqrLoop
+
+ RET
+/* ---------------------------------------*/
+#undef res_ptr
+#undef x_ptr
+#undef y_ptr
+
+#undef acc0
+#undef acc1
+#undef acc2
+#undef acc3
+#undef acc4
+#undef acc5
+#undef t0
+#undef t1
+/* ---------------------------------------*/
+#define mul0 AX
+#define mul1 DX
+#define acc0 BX
+#define acc1 CX
+#define acc2 R8
+#define acc3 R9
+#define acc4 R10
+#define acc5 R11
+#define acc6 R12
+#define acc7 R13
+#define t0 R14
+#define t1 R15
+#define t2 DI
+#define t3 SI
+#define hlp BP
+/* ---------------------------------------*/
+TEXT p256SubInternal(SB),NOSPLIT,$0
+ XORQ mul0, mul0
+ SUBQ t0, acc4
+ SBBQ t1, acc5
+ SBBQ t2, acc6
+ SBBQ t3, acc7
+ SBBQ $0, mul0
+
+ MOVQ acc4, acc0
+ MOVQ acc5, acc1
+ MOVQ acc6, acc2
+ MOVQ acc7, acc3
+
+ ADDQ $-1, acc4
+ ADCQ p256const0<>(SB), acc5
+ ADCQ $0, acc6
+ ADCQ p256const1<>(SB), acc7
+ ANDQ $1, mul0
+
+ CMOVQEQ acc0, acc4
+ CMOVQEQ acc1, acc5
+ CMOVQEQ acc2, acc6
+ CMOVQEQ acc3, acc7
+
+ RET
+/* ---------------------------------------*/
+TEXT p256MulInternal(SB),NOSPLIT,$8
+ MOVQ acc4, mul0
+ MULQ t0
+ MOVQ mul0, acc0
+ MOVQ mul1, acc1
+
+ MOVQ acc4, mul0
+ MULQ t1
+ ADDQ mul0, acc1
+ ADCQ $0, mul1
+ MOVQ mul1, acc2
+
+ MOVQ acc4, mul0
+ MULQ t2
+ ADDQ mul0, acc2
+ ADCQ $0, mul1
+ MOVQ mul1, acc3
+
+ MOVQ acc4, mul0
+ MULQ t3
+ ADDQ mul0, acc3
+ ADCQ $0, mul1
+ MOVQ mul1, acc4
+
+ MOVQ acc5, mul0
+ MULQ t0
+ ADDQ mul0, acc1
+ ADCQ $0, mul1
+ MOVQ mul1, hlp
+
+ MOVQ acc5, mul0
+ MULQ t1
+ ADDQ hlp, acc2
+ ADCQ $0, mul1
+ ADDQ mul0, acc2
+ ADCQ $0, mul1
+ MOVQ mul1, hlp
+
+ MOVQ acc5, mul0
+ MULQ t2
+ ADDQ hlp, acc3
+ ADCQ $0, mul1
+ ADDQ mul0, acc3
+ ADCQ $0, mul1
+ MOVQ mul1, hlp
+
+ MOVQ acc5, mul0
+ MULQ t3
+ ADDQ hlp, acc4
+ ADCQ $0, mul1
+ ADDQ mul0, acc4
+ ADCQ $0, mul1
+ MOVQ mul1, acc5
+
+ MOVQ acc6, mul0
+ MULQ t0
+ ADDQ mul0, acc2
+ ADCQ $0, mul1
+ MOVQ mul1, hlp
+
+ MOVQ acc6, mul0
+ MULQ t1
+ ADDQ hlp, acc3
+ ADCQ $0, mul1
+ ADDQ mul0, acc3
+ ADCQ $0, mul1
+ MOVQ mul1, hlp
+
+ MOVQ acc6, mul0
+ MULQ t2
+ ADDQ hlp, acc4
+ ADCQ $0, mul1
+ ADDQ mul0, acc4
+ ADCQ $0, mul1
+ MOVQ mul1, hlp
+
+ MOVQ acc6, mul0
+ MULQ t3
+ ADDQ hlp, acc5
+ ADCQ $0, mul1
+ ADDQ mul0, acc5
+ ADCQ $0, mul1
+ MOVQ mul1, acc6
+
+ MOVQ acc7, mul0
+ MULQ t0
+ ADDQ mul0, acc3
+ ADCQ $0, mul1
+ MOVQ mul1, hlp
+
+ MOVQ acc7, mul0
+ MULQ t1
+ ADDQ hlp, acc4
+ ADCQ $0, mul1
+ ADDQ mul0, acc4
+ ADCQ $0, mul1
+ MOVQ mul1, hlp
+
+ MOVQ acc7, mul0
+ MULQ t2
+ ADDQ hlp, acc5
+ ADCQ $0, mul1
+ ADDQ mul0, acc5
+ ADCQ $0, mul1
+ MOVQ mul1, hlp
+
+ MOVQ acc7, mul0
+ MULQ t3
+ ADDQ hlp, acc6
+ ADCQ $0, mul1
+ ADDQ mul0, acc6
+ ADCQ $0, mul1
+ MOVQ mul1, acc7
+ // First reduction step
+ MOVQ acc0, mul0
+ MOVQ acc0, hlp
+ SHLQ $32, acc0
+ MULQ p256const1<>(SB)
+ SHRQ $32, hlp
+ ADDQ acc0, acc1
+ ADCQ hlp, acc2
+ ADCQ mul0, acc3
+ ADCQ $0, mul1
+ MOVQ mul1, acc0
+ // Second reduction step
+ MOVQ acc1, mul0
+ MOVQ acc1, hlp
+ SHLQ $32, acc1
+ MULQ p256const1<>(SB)
+ SHRQ $32, hlp
+ ADDQ acc1, acc2
+ ADCQ hlp, acc3
+ ADCQ mul0, acc0
+ ADCQ $0, mul1
+ MOVQ mul1, acc1
+ // Third reduction step
+ MOVQ acc2, mul0
+ MOVQ acc2, hlp
+ SHLQ $32, acc2
+ MULQ p256const1<>(SB)
+ SHRQ $32, hlp
+ ADDQ acc2, acc3
+ ADCQ hlp, acc0
+ ADCQ mul0, acc1
+ ADCQ $0, mul1
+ MOVQ mul1, acc2
+ // Last reduction step
+ MOVQ acc3, mul0
+ MOVQ acc3, hlp
+ SHLQ $32, acc3
+ MULQ p256const1<>(SB)
+ SHRQ $32, hlp
+ ADDQ acc3, acc0
+ ADCQ hlp, acc1
+ ADCQ mul0, acc2
+ ADCQ $0, mul1
+ MOVQ mul1, acc3
+ MOVQ $0, BP
+ // Add bits [511:256] of the result
+ ADCQ acc0, acc4
+ ADCQ acc1, acc5
+ ADCQ acc2, acc6
+ ADCQ acc3, acc7
+ ADCQ $0, hlp
+ // Copy result
+ MOVQ acc4, acc0
+ MOVQ acc5, acc1
+ MOVQ acc6, acc2
+ MOVQ acc7, acc3
+ // Subtract p256
+ SUBQ $-1, acc4
+ SBBQ p256const0<>(SB) ,acc5
+ SBBQ $0, acc6
+ SBBQ p256const1<>(SB), acc7
+ SBBQ $0, hlp
+ // If the result of the subtraction is negative, restore the previous result
+ CMOVQCS acc0, acc4
+ CMOVQCS acc1, acc5
+ CMOVQCS acc2, acc6
+ CMOVQCS acc3, acc7
+
+ RET
+/* ---------------------------------------*/
+TEXT p256SqrInternal(SB),NOSPLIT,$8
+
+ MOVQ acc4, mul0
+ MULQ acc5
+ MOVQ mul0, acc1
+ MOVQ mul1, acc2
+
+ MOVQ acc4, mul0
+ MULQ acc6
+ ADDQ mul0, acc2
+ ADCQ $0, mul1
+ MOVQ mul1, acc3
+
+ MOVQ acc4, mul0
+ MULQ acc7
+ ADDQ mul0, acc3
+ ADCQ $0, mul1
+ MOVQ mul1, t0
+
+ MOVQ acc5, mul0
+ MULQ acc6
+ ADDQ mul0, acc3
+ ADCQ $0, mul1
+ MOVQ mul1, hlp
+
+ MOVQ acc5, mul0
+ MULQ acc7
+ ADDQ hlp, t0
+ ADCQ $0, mul1
+ ADDQ mul0, t0
+ ADCQ $0, mul1
+ MOVQ mul1, t1
+
+ MOVQ acc6, mul0
+ MULQ acc7
+ ADDQ mul0, t1
+ ADCQ $0, mul1
+ MOVQ mul1, t2
+ XORQ t3, t3
+ // *2
+ ADDQ acc1, acc1
+ ADCQ acc2, acc2
+ ADCQ acc3, acc3
+ ADCQ t0, t0
+ ADCQ t1, t1
+ ADCQ t2, t2
+ ADCQ $0, t3
+ // Missing products
+ MOVQ acc4, mul0
+ MULQ mul0
+ MOVQ mul0, acc0
+ MOVQ DX, acc4
+
+ MOVQ acc5, mul0
+ MULQ mul0
+ ADDQ acc4, acc1
+ ADCQ mul0, acc2
+ ADCQ $0, DX
+ MOVQ DX, acc4
+
+ MOVQ acc6, mul0
+ MULQ mul0
+ ADDQ acc4, acc3
+ ADCQ mul0, t0
+ ADCQ $0, DX
+ MOVQ DX, acc4
+
+ MOVQ acc7, mul0
+ MULQ mul0
+ ADDQ acc4, t1
+ ADCQ mul0, t2
+ ADCQ DX, t3
+ // First reduction step
+ MOVQ acc0, mul0
+ MOVQ acc0, hlp
+ SHLQ $32, acc0
+ MULQ p256const1<>(SB)
+ SHRQ $32, hlp
+ ADDQ acc0, acc1
+ ADCQ hlp, acc2
+ ADCQ mul0, acc3
+ ADCQ $0, mul1
+ MOVQ mul1, acc0
+ // Second reduction step
+ MOVQ acc1, mul0
+ MOVQ acc1, hlp
+ SHLQ $32, acc1
+ MULQ p256const1<>(SB)
+ SHRQ $32, hlp
+ ADDQ acc1, acc2
+ ADCQ hlp, acc3
+ ADCQ mul0, acc0
+ ADCQ $0, mul1
+ MOVQ mul1, acc1
+ // Third reduction step
+ MOVQ acc2, mul0
+ MOVQ acc2, hlp
+ SHLQ $32, acc2
+ MULQ p256const1<>(SB)
+ SHRQ $32, hlp
+ ADDQ acc2, acc3
+ ADCQ hlp, acc0
+ ADCQ mul0, acc1
+ ADCQ $0, mul1
+ MOVQ mul1, acc2
+ // Last reduction step
+ MOVQ acc3, mul0
+ MOVQ acc3, hlp
+ SHLQ $32, acc3
+ MULQ p256const1<>(SB)
+ SHRQ $32, hlp
+ ADDQ acc3, acc0
+ ADCQ hlp, acc1
+ ADCQ mul0, acc2
+ ADCQ $0, mul1
+ MOVQ mul1, acc3
+ MOVQ $0, BP
+ // Add bits [511:256] of the result
+ ADCQ acc0, t0
+ ADCQ acc1, t1
+ ADCQ acc2, t2
+ ADCQ acc3, t3
+ ADCQ $0, hlp
+ // Copy result
+ MOVQ t0, acc4
+ MOVQ t1, acc5
+ MOVQ t2, acc6
+ MOVQ t3, acc7
+ // Subtract p256
+ SUBQ $-1, acc4
+ SBBQ p256const0<>(SB) ,acc5
+ SBBQ $0, acc6
+ SBBQ p256const1<>(SB), acc7
+ SBBQ $0, hlp
+ // If the result of the subtraction is negative, restore the previous result
+ CMOVQCS t0, acc4
+ CMOVQCS t1, acc5
+ CMOVQCS t2, acc6
+ CMOVQCS t3, acc7
+
+ RET
+/* ---------------------------------------*/
+#define p256MulBy2Inline\
+ XORQ mul0, mul0;\
+ ADDQ acc4, acc4;\
+ ADCQ acc5, acc5;\
+ ADCQ acc6, acc6;\
+ ADCQ acc7, acc7;\
+ ADCQ $0, mul0;\
+ MOVQ acc4, t0;\
+ MOVQ acc5, t1;\
+ MOVQ acc6, t2;\
+ MOVQ acc7, t3;\
+ SUBQ $-1, t0;\
+ SBBQ p256const0<>(SB), t1;\
+ SBBQ $0, t2;\
+ SBBQ p256const1<>(SB), t3;\
+ SBBQ $0, mul0;\
+ CMOVQCS acc4, t0;\
+ CMOVQCS acc5, t1;\
+ CMOVQCS acc6, t2;\
+ CMOVQCS acc7, t3;
+/* ---------------------------------------*/
+#define p256AddInline \
+ XORQ mul0, mul0;\
+ ADDQ t0, acc4;\
+ ADCQ t1, acc5;\
+ ADCQ t2, acc6;\
+ ADCQ t3, acc7;\
+ ADCQ $0, mul0;\
+ MOVQ acc4, t0;\
+ MOVQ acc5, t1;\
+ MOVQ acc6, t2;\
+ MOVQ acc7, t3;\
+ SUBQ $-1, t0;\
+ SBBQ p256const0<>(SB), t1;\
+ SBBQ $0, t2;\
+ SBBQ p256const1<>(SB), t3;\
+ SBBQ $0, mul0;\
+ CMOVQCS acc4, t0;\
+ CMOVQCS acc5, t1;\
+ CMOVQCS acc6, t2;\
+ CMOVQCS acc7, t3;
+/* ---------------------------------------*/
+#define LDacc(src) MOVQ src(8*0), acc4; MOVQ src(8*1), acc5; MOVQ src(8*2), acc6; MOVQ src(8*3), acc7
+#define LDt(src) MOVQ src(8*0), t0; MOVQ src(8*1), t1; MOVQ src(8*2), t2; MOVQ src(8*3), t3
+#define ST(dst) MOVQ acc4, dst(8*0); MOVQ acc5, dst(8*1); MOVQ acc6, dst(8*2); MOVQ acc7, dst(8*3)
+#define STt(dst) MOVQ t0, dst(8*0); MOVQ t1, dst(8*1); MOVQ t2, dst(8*2); MOVQ t3, dst(8*3)
+#define acc2t MOVQ acc4, t0; MOVQ acc5, t1; MOVQ acc6, t2; MOVQ acc7, t3
+#define t2acc MOVQ t0, acc4; MOVQ t1, acc5; MOVQ t2, acc6; MOVQ t3, acc7
+/* ---------------------------------------*/
+#define x1in(off) (32*0 + off)(SP)
+#define y1in(off) (32*1 + off)(SP)
+#define z1in(off) (32*2 + off)(SP)
+#define x2in(off) (32*3 + off)(SP)
+#define y2in(off) (32*4 + off)(SP)
+#define xout(off) (32*5 + off)(SP)
+#define yout(off) (32*6 + off)(SP)
+#define zout(off) (32*7 + off)(SP)
+#define s2(off) (32*8 + off)(SP)
+#define z1sqr(off) (32*9 + off)(SP)
+#define h(off) (32*10 + off)(SP)
+#define r(off) (32*11 + off)(SP)
+#define hsqr(off) (32*12 + off)(SP)
+#define rsqr(off) (32*13 + off)(SP)
+#define hcub(off) (32*14 + off)(SP)
+#define rptr (32*15)(SP)
+#define sel_save (32*15 + 8)(SP)
+#define zero_save (32*15 + 8 + 4)(SP)
+
+// func p256PointAddAffineAsm(res, in1, in2 []uint64, sign, sel, zero int)
+TEXT ·p256PointAddAffineAsm(SB),0,$512-96
+ // Move input to stack in order to free registers
+ MOVQ res+0(FP), AX
+ MOVQ in1+24(FP), BX
+ MOVQ in2+48(FP), CX
+ MOVQ sign+72(FP), DX
+ MOVQ sel+80(FP), t1
+ MOVQ zero+88(FP), t2
+
+ MOVOU (16*0)(BX), X0
+ MOVOU (16*1)(BX), X1
+ MOVOU (16*2)(BX), X2
+ MOVOU (16*3)(BX), X3
+ MOVOU (16*4)(BX), X4
+ MOVOU (16*5)(BX), X5
+
+ MOVOU X0, x1in(16*0)
+ MOVOU X1, x1in(16*1)
+ MOVOU X2, y1in(16*0)
+ MOVOU X3, y1in(16*1)
+ MOVOU X4, z1in(16*0)
+ MOVOU X5, z1in(16*1)
+
+ MOVOU (16*0)(CX), X0
+ MOVOU (16*1)(CX), X1
+
+ MOVOU X0, x2in(16*0)
+ MOVOU X1, x2in(16*1)
+ // Store pointer to result
+ MOVQ mul0, rptr
+ MOVL t1, sel_save
+ MOVL t2, zero_save
+ // Negate y2in based on sign
+ MOVQ (16*2 + 8*0)(CX), acc4
+ MOVQ (16*2 + 8*1)(CX), acc5
+ MOVQ (16*2 + 8*2)(CX), acc6
+ MOVQ (16*2 + 8*3)(CX), acc7
+ MOVQ $-1, acc0
+ MOVQ p256const0<>(SB), acc1
+ MOVQ $0, acc2
+ MOVQ p256const1<>(SB), acc3
+ XORQ mul0, mul0
+ // Speculatively subtract
+ SUBQ acc4, acc0
+ SBBQ acc5, acc1
+ SBBQ acc6, acc2
+ SBBQ acc7, acc3
+ SBBQ $0, mul0
+ MOVQ acc0, t0
+ MOVQ acc1, t1
+ MOVQ acc2, t2
+ MOVQ acc3, t3
+ // Add in case the operand was > p256
+ ADDQ $-1, acc0
+ ADCQ p256const0<>(SB), acc1
+ ADCQ $0, acc2
+ ADCQ p256const1<>(SB), acc3
+ ADCQ $0, mul0
+ CMOVQNE t0, acc0
+ CMOVQNE t1, acc1
+ CMOVQNE t2, acc2
+ CMOVQNE t3, acc3
+ // If condition is 0, keep original value
+ TESTQ DX, DX
+ CMOVQEQ acc4, acc0
+ CMOVQEQ acc5, acc1
+ CMOVQEQ acc6, acc2
+ CMOVQEQ acc7, acc3
+ // Store result
+ MOVQ acc0, y2in(8*0)
+ MOVQ acc1, y2in(8*1)
+ MOVQ acc2, y2in(8*2)
+ MOVQ acc3, y2in(8*3)
+ // Begin point add
+ LDacc (z1in)
+ CALL p256SqrInternal(SB) // z1ˆ2
+ ST (z1sqr)
+
+ LDt (x2in)
+ CALL p256MulInternal(SB) // x2 * z1ˆ2
+
+ LDt (x1in)
+ CALL p256SubInternal(SB) // h = u2 - u1
+ ST (h)
+
+ LDt (z1in)
+ CALL p256MulInternal(SB) // z3 = h * z1
+ ST (zout)
+
+ LDacc (z1sqr)
+ CALL p256MulInternal(SB) // z1ˆ3
+
+ LDt (y2in)
+ CALL p256MulInternal(SB) // s2 = y2 * z1ˆ3
+ ST (s2)
+
+ LDt (y1in)
+ CALL p256SubInternal(SB) // r = s2 - s1
+ ST (r)
+
+ CALL p256SqrInternal(SB) // rsqr = rˆ2
+ ST (rsqr)
+
+ LDacc (h)
+ CALL p256SqrInternal(SB) // hsqr = hˆ2
+ ST (hsqr)
+
+ LDt (h)
+ CALL p256MulInternal(SB) // hcub = hˆ3
+ ST (hcub)
+
+ LDt (y1in)
+ CALL p256MulInternal(SB) // y1 * hˆ3
+ ST (s2)
+
+ LDacc (x1in)
+ LDt (hsqr)
+ CALL p256MulInternal(SB) // u1 * hˆ2
+ ST (h)
+
+ p256MulBy2Inline // u1 * hˆ2 * 2, inline
+ LDacc (rsqr)
+ CALL p256SubInternal(SB) // rˆ2 - u1 * hˆ2 * 2
+
+ LDt (hcub)
+ CALL p256SubInternal(SB)
+ ST (xout)
+
+ MOVQ acc4, t0
+ MOVQ acc5, t1
+ MOVQ acc6, t2
+ MOVQ acc7, t3
+ LDacc (h)
+ CALL p256SubInternal(SB)
+
+ LDt (r)
+ CALL p256MulInternal(SB)
+
+ LDt (s2)
+ CALL p256SubInternal(SB)
+ ST (yout)
+ // Load stored values from stack
+ MOVQ rptr, AX
+ MOVL sel_save, BX
+ MOVL zero_save, CX
+ // The result is not valid if (sel == 0), conditional choose
+ MOVOU xout(16*0), X0
+ MOVOU xout(16*1), X1
+ MOVOU yout(16*0), X2
+ MOVOU yout(16*1), X3
+ MOVOU zout(16*0), X4
+ MOVOU zout(16*1), X5
+
+ MOVL BX, X6
+ MOVL CX, X7
+
+ PXOR X8, X8
+ PCMPEQL X9, X9
+
+ PSHUFD $0, X6, X6
+ PSHUFD $0, X7, X7
+
+ PCMPEQL X8, X6
+ PCMPEQL X8, X7
+
+ MOVOU X6, X15
+ PANDN X9, X15
+
+ MOVOU x1in(16*0), X9
+ MOVOU x1in(16*1), X10
+ MOVOU y1in(16*0), X11
+ MOVOU y1in(16*1), X12
+ MOVOU z1in(16*0), X13
+ MOVOU z1in(16*1), X14
+
+ PAND X15, X0
+ PAND X15, X1
+ PAND X15, X2
+ PAND X15, X3
+ PAND X15, X4
+ PAND X15, X5
+
+ PAND X6, X9
+ PAND X6, X10
+ PAND X6, X11
+ PAND X6, X12
+ PAND X6, X13
+ PAND X6, X14
+
+ PXOR X9, X0
+ PXOR X10, X1
+ PXOR X11, X2
+ PXOR X12, X3
+ PXOR X13, X4
+ PXOR X14, X5
+ // Similarly if zero == 0
+ PCMPEQL X9, X9
+ MOVOU X7, X15
+ PANDN X9, X15
+
+ MOVOU x2in(16*0), X9
+ MOVOU x2in(16*1), X10
+ MOVOU y2in(16*0), X11
+ MOVOU y2in(16*1), X12
+ MOVOU p256one<>+0x00(SB), X13
+ MOVOU p256one<>+0x10(SB), X14
+
+ PAND X15, X0
+ PAND X15, X1
+ PAND X15, X2
+ PAND X15, X3
+ PAND X15, X4
+ PAND X15, X5
+
+ PAND X7, X9
+ PAND X7, X10
+ PAND X7, X11
+ PAND X7, X12
+ PAND X7, X13
+ PAND X7, X14
+
+ PXOR X9, X0
+ PXOR X10, X1
+ PXOR X11, X2
+ PXOR X12, X3
+ PXOR X13, X4
+ PXOR X14, X5
+ // Finally output the result
+ MOVOU X0, (16*0)(AX)
+ MOVOU X1, (16*1)(AX)
+ MOVOU X2, (16*2)(AX)
+ MOVOU X3, (16*3)(AX)
+ MOVOU X4, (16*4)(AX)
+ MOVOU X5, (16*5)(AX)
+ MOVQ $0, rptr
+
+ RET
+#undef x1in
+#undef y1in
+#undef z1in
+#undef x2in
+#undef y2in
+#undef xout
+#undef yout
+#undef zout
+#undef s2
+#undef z1sqr
+#undef h
+#undef r
+#undef hsqr
+#undef rsqr
+#undef hcub
+#undef rptr
+#undef sel_save
+#undef zero_save
+
+// p256IsZero returns 1 in AX if [acc4..acc7] represents zero and zero
+// otherwise. It writes to [acc4..acc7], t0 and t1.
+TEXT p256IsZero(SB),NOSPLIT,$0
+ // AX contains a flag that is set if the input is zero.
+ XORQ AX, AX
+ MOVQ $1, t1
+
+ // Check whether [acc4..acc7] are all zero.
+ MOVQ acc4, t0
+ ORQ acc5, t0
+ ORQ acc6, t0
+ ORQ acc7, t0
+
+ // Set the zero flag if so. (CMOV of a constant to a register doesn't
+ // appear to be supported in Go. Thus t1 = 1.)
+ CMOVQEQ t1, AX
+
+ // XOR [acc4..acc7] with P and compare with zero again.
+ XORQ $-1, acc4
+ XORQ p256const0<>(SB), acc5
+ XORQ p256const1<>(SB), acc7
+ ORQ acc5, acc4
+ ORQ acc6, acc4
+ ORQ acc7, acc4
+
+ // Set the zero flag if so.
+ CMOVQEQ t1, AX
+ RET
+
+/* ---------------------------------------*/
+#define x1in(off) (32*0 + off)(SP)
+#define y1in(off) (32*1 + off)(SP)
+#define z1in(off) (32*2 + off)(SP)
+#define x2in(off) (32*3 + off)(SP)
+#define y2in(off) (32*4 + off)(SP)
+#define z2in(off) (32*5 + off)(SP)
+
+#define xout(off) (32*6 + off)(SP)
+#define yout(off) (32*7 + off)(SP)
+#define zout(off) (32*8 + off)(SP)
+
+#define u1(off) (32*9 + off)(SP)
+#define u2(off) (32*10 + off)(SP)
+#define s1(off) (32*11 + off)(SP)
+#define s2(off) (32*12 + off)(SP)
+#define z1sqr(off) (32*13 + off)(SP)
+#define z2sqr(off) (32*14 + off)(SP)
+#define h(off) (32*15 + off)(SP)
+#define r(off) (32*16 + off)(SP)
+#define hsqr(off) (32*17 + off)(SP)
+#define rsqr(off) (32*18 + off)(SP)
+#define hcub(off) (32*19 + off)(SP)
+#define rptr (32*20)(SP)
+#define points_eq (32*20+8)(SP)
+
+//func p256PointAddAsm(res, in1, in2 []uint64) int
+TEXT ·p256PointAddAsm(SB),0,$680-80
+ // See https://hyperelliptic.org/EFD/g1p/auto-shortw-jacobian-3.html#addition-add-2007-bl
+ // Move input to stack in order to free registers
+ MOVQ res+0(FP), AX
+ MOVQ in1+24(FP), BX
+ MOVQ in2+48(FP), CX
+
+ MOVOU (16*0)(BX), X0
+ MOVOU (16*1)(BX), X1
+ MOVOU (16*2)(BX), X2
+ MOVOU (16*3)(BX), X3
+ MOVOU (16*4)(BX), X4
+ MOVOU (16*5)(BX), X5
+
+ MOVOU X0, x1in(16*0)
+ MOVOU X1, x1in(16*1)
+ MOVOU X2, y1in(16*0)
+ MOVOU X3, y1in(16*1)
+ MOVOU X4, z1in(16*0)
+ MOVOU X5, z1in(16*1)
+
+ MOVOU (16*0)(CX), X0
+ MOVOU (16*1)(CX), X1
+ MOVOU (16*2)(CX), X2
+ MOVOU (16*3)(CX), X3
+ MOVOU (16*4)(CX), X4
+ MOVOU (16*5)(CX), X5
+
+ MOVOU X0, x2in(16*0)
+ MOVOU X1, x2in(16*1)
+ MOVOU X2, y2in(16*0)
+ MOVOU X3, y2in(16*1)
+ MOVOU X4, z2in(16*0)
+ MOVOU X5, z2in(16*1)
+ // Store pointer to result
+ MOVQ AX, rptr
+ // Begin point add
+ LDacc (z2in)
+ CALL p256SqrInternal(SB) // z2ˆ2
+ ST (z2sqr)
+ LDt (z2in)
+ CALL p256MulInternal(SB) // z2ˆ3
+ LDt (y1in)
+ CALL p256MulInternal(SB) // s1 = z2ˆ3*y1
+ ST (s1)
+
+ LDacc (z1in)
+ CALL p256SqrInternal(SB) // z1ˆ2
+ ST (z1sqr)
+ LDt (z1in)
+ CALL p256MulInternal(SB) // z1ˆ3
+ LDt (y2in)
+ CALL p256MulInternal(SB) // s2 = z1ˆ3*y2
+ ST (s2)
+
+ LDt (s1)
+ CALL p256SubInternal(SB) // r = s2 - s1
+ ST (r)
+ CALL p256IsZero(SB)
+ MOVQ AX, points_eq
+
+ LDacc (z2sqr)
+ LDt (x1in)
+ CALL p256MulInternal(SB) // u1 = x1 * z2ˆ2
+ ST (u1)
+ LDacc (z1sqr)
+ LDt (x2in)
+ CALL p256MulInternal(SB) // u2 = x2 * z1ˆ2
+ ST (u2)
+
+ LDt (u1)
+ CALL p256SubInternal(SB) // h = u2 - u1
+ ST (h)
+ CALL p256IsZero(SB)
+ ANDQ points_eq, AX
+ MOVQ AX, points_eq
+
+ LDacc (r)
+ CALL p256SqrInternal(SB) // rsqr = rˆ2
+ ST (rsqr)
+
+ LDacc (h)
+ CALL p256SqrInternal(SB) // hsqr = hˆ2
+ ST (hsqr)
+
+ LDt (h)
+ CALL p256MulInternal(SB) // hcub = hˆ3
+ ST (hcub)
+
+ LDt (s1)
+ CALL p256MulInternal(SB)
+ ST (s2)
+
+ LDacc (z1in)
+ LDt (z2in)
+ CALL p256MulInternal(SB) // z1 * z2
+ LDt (h)
+ CALL p256MulInternal(SB) // z1 * z2 * h
+ ST (zout)
+
+ LDacc (hsqr)
+ LDt (u1)
+ CALL p256MulInternal(SB) // hˆ2 * u1
+ ST (u2)
+
+ p256MulBy2Inline // u1 * hˆ2 * 2, inline
+ LDacc (rsqr)
+ CALL p256SubInternal(SB) // rˆ2 - u1 * hˆ2 * 2
+
+ LDt (hcub)
+ CALL p256SubInternal(SB)
+ ST (xout)
+
+ MOVQ acc4, t0
+ MOVQ acc5, t1
+ MOVQ acc6, t2
+ MOVQ acc7, t3
+ LDacc (u2)
+ CALL p256SubInternal(SB)
+
+ LDt (r)
+ CALL p256MulInternal(SB)
+
+ LDt (s2)
+ CALL p256SubInternal(SB)
+ ST (yout)
+
+ MOVOU xout(16*0), X0
+ MOVOU xout(16*1), X1
+ MOVOU yout(16*0), X2
+ MOVOU yout(16*1), X3
+ MOVOU zout(16*0), X4
+ MOVOU zout(16*1), X5
+ // Finally output the result
+ MOVQ rptr, AX
+ MOVQ $0, rptr
+ MOVOU X0, (16*0)(AX)
+ MOVOU X1, (16*1)(AX)
+ MOVOU X2, (16*2)(AX)
+ MOVOU X3, (16*3)(AX)
+ MOVOU X4, (16*4)(AX)
+ MOVOU X5, (16*5)(AX)
+
+ MOVQ points_eq, AX
+ MOVQ AX, ret+72(FP)
+
+ RET
+#undef x1in
+#undef y1in
+#undef z1in
+#undef x2in
+#undef y2in
+#undef z2in
+#undef xout
+#undef yout
+#undef zout
+#undef s1
+#undef s2
+#undef u1
+#undef u2
+#undef z1sqr
+#undef z2sqr
+#undef h
+#undef r
+#undef hsqr
+#undef rsqr
+#undef hcub
+#undef rptr
+/* ---------------------------------------*/
+#define x(off) (32*0 + off)(SP)
+#define y(off) (32*1 + off)(SP)
+#define z(off) (32*2 + off)(SP)
+
+#define s(off) (32*3 + off)(SP)
+#define m(off) (32*4 + off)(SP)
+#define zsqr(off) (32*5 + off)(SP)
+#define tmp(off) (32*6 + off)(SP)
+#define rptr (32*7)(SP)
+
+//func p256PointDoubleAsm(res, in []uint64)
+TEXT ·p256PointDoubleAsm(SB),NOSPLIT,$256-48
+ // Move input to stack in order to free registers
+ MOVQ res+0(FP), AX
+ MOVQ in+24(FP), BX
+
+ MOVOU (16*0)(BX), X0
+ MOVOU (16*1)(BX), X1
+ MOVOU (16*2)(BX), X2
+ MOVOU (16*3)(BX), X3
+ MOVOU (16*4)(BX), X4
+ MOVOU (16*5)(BX), X5
+
+ MOVOU X0, x(16*0)
+ MOVOU X1, x(16*1)
+ MOVOU X2, y(16*0)
+ MOVOU X3, y(16*1)
+ MOVOU X4, z(16*0)
+ MOVOU X5, z(16*1)
+ // Store pointer to result
+ MOVQ AX, rptr
+ // Begin point double
+ LDacc (z)
+ CALL p256SqrInternal(SB)
+ ST (zsqr)
+
+ LDt (x)
+ p256AddInline
+ STt (m)
+
+ LDacc (z)
+ LDt (y)
+ CALL p256MulInternal(SB)
+ p256MulBy2Inline
+ MOVQ rptr, AX
+ // Store z
+ MOVQ t0, (16*4 + 8*0)(AX)
+ MOVQ t1, (16*4 + 8*1)(AX)
+ MOVQ t2, (16*4 + 8*2)(AX)
+ MOVQ t3, (16*4 + 8*3)(AX)
+
+ LDacc (x)
+ LDt (zsqr)
+ CALL p256SubInternal(SB)
+ LDt (m)
+ CALL p256MulInternal(SB)
+ ST (m)
+ // Multiply by 3
+ p256MulBy2Inline
+ LDacc (m)
+ p256AddInline
+ STt (m)
+ ////////////////////////
+ LDacc (y)
+ p256MulBy2Inline
+ t2acc
+ CALL p256SqrInternal(SB)
+ ST (s)
+ CALL p256SqrInternal(SB)
+ // Divide by 2
+ XORQ mul0, mul0
+ MOVQ acc4, t0
+ MOVQ acc5, t1
+ MOVQ acc6, t2
+ MOVQ acc7, t3
+
+ ADDQ $-1, acc4
+ ADCQ p256const0<>(SB), acc5
+ ADCQ $0, acc6
+ ADCQ p256const1<>(SB), acc7
+ ADCQ $0, mul0
+ TESTQ $1, t0
+
+ CMOVQEQ t0, acc4
+ CMOVQEQ t1, acc5
+ CMOVQEQ t2, acc6
+ CMOVQEQ t3, acc7
+ ANDQ t0, mul0
+
+ SHRQ $1, acc5, acc4
+ SHRQ $1, acc6, acc5
+ SHRQ $1, acc7, acc6
+ SHRQ $1, mul0, acc7
+ ST (y)
+ /////////////////////////
+ LDacc (x)
+ LDt (s)
+ CALL p256MulInternal(SB)
+ ST (s)
+ p256MulBy2Inline
+ STt (tmp)
+
+ LDacc (m)
+ CALL p256SqrInternal(SB)
+ LDt (tmp)
+ CALL p256SubInternal(SB)
+
+ MOVQ rptr, AX
+ // Store x
+ MOVQ acc4, (16*0 + 8*0)(AX)
+ MOVQ acc5, (16*0 + 8*1)(AX)
+ MOVQ acc6, (16*0 + 8*2)(AX)
+ MOVQ acc7, (16*0 + 8*3)(AX)
+
+ acc2t
+ LDacc (s)
+ CALL p256SubInternal(SB)
+
+ LDt (m)
+ CALL p256MulInternal(SB)
+
+ LDt (y)
+ CALL p256SubInternal(SB)
+ MOVQ rptr, AX
+ // Store y
+ MOVQ acc4, (16*2 + 8*0)(AX)
+ MOVQ acc5, (16*2 + 8*1)(AX)
+ MOVQ acc6, (16*2 + 8*2)(AX)
+ MOVQ acc7, (16*2 + 8*3)(AX)
+ ///////////////////////
+ MOVQ $0, rptr
+
+ RET
+/* ---------------------------------------*/
diff --git a/src/crypto/elliptic/p256_asm_arm64.s b/src/crypto/elliptic/p256_asm_arm64.s
new file mode 100644
index 0000000..2b2355d
--- /dev/null
+++ b/src/crypto/elliptic/p256_asm_arm64.s
@@ -0,0 +1,1529 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file contains constant-time, 64-bit assembly implementation of
+// P256. The optimizations performed here are described in detail in:
+// S.Gueron and V.Krasnov, "Fast prime field elliptic-curve cryptography with
+// 256-bit primes"
+// http://link.springer.com/article/10.1007%2Fs13389-014-0090-x
+// https://eprint.iacr.org/2013/816.pdf
+
+#include "textflag.h"
+
+#define res_ptr R0
+#define a_ptr R1
+#define b_ptr R2
+
+#define acc0 R3
+#define acc1 R4
+#define acc2 R5
+#define acc3 R6
+
+#define acc4 R7
+#define acc5 R8
+#define acc6 R9
+#define acc7 R10
+#define t0 R11
+#define t1 R12
+#define t2 R13
+#define t3 R14
+#define const0 R15
+#define const1 R16
+
+#define hlp0 R17
+#define hlp1 res_ptr
+
+#define x0 R19
+#define x1 R20
+#define x2 R21
+#define x3 R22
+#define y0 R23
+#define y1 R24
+#define y2 R25
+#define y3 R26
+
+#define const2 t2
+#define const3 t3
+
+DATA p256const0<>+0x00(SB)/8, $0x00000000ffffffff
+DATA p256const1<>+0x00(SB)/8, $0xffffffff00000001
+DATA p256ordK0<>+0x00(SB)/8, $0xccd1c8aaee00bc4f
+DATA p256ord<>+0x00(SB)/8, $0xf3b9cac2fc632551
+DATA p256ord<>+0x08(SB)/8, $0xbce6faada7179e84
+DATA p256ord<>+0x10(SB)/8, $0xffffffffffffffff
+DATA p256ord<>+0x18(SB)/8, $0xffffffff00000000
+DATA p256one<>+0x00(SB)/8, $0x0000000000000001
+DATA p256one<>+0x08(SB)/8, $0xffffffff00000000
+DATA p256one<>+0x10(SB)/8, $0xffffffffffffffff
+DATA p256one<>+0x18(SB)/8, $0x00000000fffffffe
+GLOBL p256const0<>(SB), 8, $8
+GLOBL p256const1<>(SB), 8, $8
+GLOBL p256ordK0<>(SB), 8, $8
+GLOBL p256ord<>(SB), 8, $32
+GLOBL p256one<>(SB), 8, $32
+
+/* ---------------------------------------*/
+// func p256LittleToBig(res []byte, in []uint64)
+TEXT ·p256LittleToBig(SB),NOSPLIT,$0
+ JMP ·p256BigToLittle(SB)
+/* ---------------------------------------*/
+// func p256BigToLittle(res []uint64, in []byte)
+TEXT ·p256BigToLittle(SB),NOSPLIT,$0
+ MOVD res+0(FP), res_ptr
+ MOVD in+24(FP), a_ptr
+
+ LDP 0*16(a_ptr), (acc0, acc1)
+ LDP 1*16(a_ptr), (acc2, acc3)
+
+ REV acc0, acc0
+ REV acc1, acc1
+ REV acc2, acc2
+ REV acc3, acc3
+
+ STP (acc3, acc2), 0*16(res_ptr)
+ STP (acc1, acc0), 1*16(res_ptr)
+ RET
+/* ---------------------------------------*/
+// func p256MovCond(res, a, b []uint64, cond int)
+// If cond == 0 res=b, else res=a
+TEXT ·p256MovCond(SB),NOSPLIT,$0
+ MOVD res+0(FP), res_ptr
+ MOVD a+24(FP), a_ptr
+ MOVD b+48(FP), b_ptr
+ MOVD cond+72(FP), R3
+
+ CMP $0, R3
+ // Two remarks:
+ // 1) Will want to revisit NEON, when support is better
+ // 2) CSEL might not be constant time on all ARM processors
+ LDP 0*16(a_ptr), (R4, R5)
+ LDP 1*16(a_ptr), (R6, R7)
+ LDP 2*16(a_ptr), (R8, R9)
+ LDP 0*16(b_ptr), (R16, R17)
+ LDP 1*16(b_ptr), (R19, R20)
+ LDP 2*16(b_ptr), (R21, R22)
+ CSEL EQ, R16, R4, R4
+ CSEL EQ, R17, R5, R5
+ CSEL EQ, R19, R6, R6
+ CSEL EQ, R20, R7, R7
+ CSEL EQ, R21, R8, R8
+ CSEL EQ, R22, R9, R9
+ STP (R4, R5), 0*16(res_ptr)
+ STP (R6, R7), 1*16(res_ptr)
+ STP (R8, R9), 2*16(res_ptr)
+
+ LDP 3*16(a_ptr), (R4, R5)
+ LDP 4*16(a_ptr), (R6, R7)
+ LDP 5*16(a_ptr), (R8, R9)
+ LDP 3*16(b_ptr), (R16, R17)
+ LDP 4*16(b_ptr), (R19, R20)
+ LDP 5*16(b_ptr), (R21, R22)
+ CSEL EQ, R16, R4, R4
+ CSEL EQ, R17, R5, R5
+ CSEL EQ, R19, R6, R6
+ CSEL EQ, R20, R7, R7
+ CSEL EQ, R21, R8, R8
+ CSEL EQ, R22, R9, R9
+ STP (R4, R5), 3*16(res_ptr)
+ STP (R6, R7), 4*16(res_ptr)
+ STP (R8, R9), 5*16(res_ptr)
+
+ RET
+/* ---------------------------------------*/
+// func p256NegCond(val []uint64, cond int)
+TEXT ·p256NegCond(SB),NOSPLIT,$0
+ MOVD val+0(FP), a_ptr
+ MOVD cond+24(FP), hlp0
+ MOVD a_ptr, res_ptr
+ // acc = poly
+ MOVD $-1, acc0
+ MOVD p256const0<>(SB), acc1
+ MOVD $0, acc2
+ MOVD p256const1<>(SB), acc3
+ // Load the original value
+ LDP 0*16(a_ptr), (t0, t1)
+ LDP 1*16(a_ptr), (t2, t3)
+ // Speculatively subtract
+ SUBS t0, acc0
+ SBCS t1, acc1
+ SBCS t2, acc2
+ SBC t3, acc3
+ // If condition is 0, keep original value
+ CMP $0, hlp0
+ CSEL EQ, t0, acc0, acc0
+ CSEL EQ, t1, acc1, acc1
+ CSEL EQ, t2, acc2, acc2
+ CSEL EQ, t3, acc3, acc3
+ // Store result
+ STP (acc0, acc1), 0*16(res_ptr)
+ STP (acc2, acc3), 1*16(res_ptr)
+
+ RET
+/* ---------------------------------------*/
+// func p256Sqr(res, in []uint64, n int)
+TEXT ·p256Sqr(SB),NOSPLIT,$0
+ MOVD res+0(FP), res_ptr
+ MOVD in+24(FP), a_ptr
+ MOVD n+48(FP), b_ptr
+
+ MOVD p256const0<>(SB), const0
+ MOVD p256const1<>(SB), const1
+
+ LDP 0*16(a_ptr), (x0, x1)
+ LDP 1*16(a_ptr), (x2, x3)
+
+sqrLoop:
+ SUB $1, b_ptr
+ CALL p256SqrInternal<>(SB)
+ MOVD y0, x0
+ MOVD y1, x1
+ MOVD y2, x2
+ MOVD y3, x3
+ CBNZ b_ptr, sqrLoop
+
+ STP (y0, y1), 0*16(res_ptr)
+ STP (y2, y3), 1*16(res_ptr)
+ RET
+/* ---------------------------------------*/
+// func p256Mul(res, in1, in2 []uint64)
+TEXT ·p256Mul(SB),NOSPLIT,$0
+ MOVD res+0(FP), res_ptr
+ MOVD in1+24(FP), a_ptr
+ MOVD in2+48(FP), b_ptr
+
+ MOVD p256const0<>(SB), const0
+ MOVD p256const1<>(SB), const1
+
+ LDP 0*16(a_ptr), (x0, x1)
+ LDP 1*16(a_ptr), (x2, x3)
+
+ LDP 0*16(b_ptr), (y0, y1)
+ LDP 1*16(b_ptr), (y2, y3)
+
+ CALL p256MulInternal<>(SB)
+
+ STP (y0, y1), 0*16(res_ptr)
+ STP (y2, y3), 1*16(res_ptr)
+ RET
+/* ---------------------------------------*/
+// func p256FromMont(res, in []uint64)
+TEXT ·p256FromMont(SB),NOSPLIT,$0
+ MOVD res+0(FP), res_ptr
+ MOVD in+24(FP), a_ptr
+
+ MOVD p256const0<>(SB), const0
+ MOVD p256const1<>(SB), const1
+
+ LDP 0*16(a_ptr), (acc0, acc1)
+ LDP 1*16(a_ptr), (acc2, acc3)
+ // Only reduce, no multiplications are needed
+ // First reduction step
+ ADDS acc0<<32, acc1, acc1
+ LSR $32, acc0, t0
+ MUL acc0, const1, t1
+ UMULH acc0, const1, acc0
+ ADCS t0, acc2
+ ADCS t1, acc3
+ ADC $0, acc0
+ // Second reduction step
+ ADDS acc1<<32, acc2, acc2
+ LSR $32, acc1, t0
+ MUL acc1, const1, t1
+ UMULH acc1, const1, acc1
+ ADCS t0, acc3
+ ADCS t1, acc0
+ ADC $0, acc1
+ // Third reduction step
+ ADDS acc2<<32, acc3, acc3
+ LSR $32, acc2, t0
+ MUL acc2, const1, t1
+ UMULH acc2, const1, acc2
+ ADCS t0, acc0
+ ADCS t1, acc1
+ ADC $0, acc2
+ // Last reduction step
+ ADDS acc3<<32, acc0, acc0
+ LSR $32, acc3, t0
+ MUL acc3, const1, t1
+ UMULH acc3, const1, acc3
+ ADCS t0, acc1
+ ADCS t1, acc2
+ ADC $0, acc3
+
+ SUBS $-1, acc0, t0
+ SBCS const0, acc1, t1
+ SBCS $0, acc2, t2
+ SBCS const1, acc3, t3
+
+ CSEL CS, t0, acc0, acc0
+ CSEL CS, t1, acc1, acc1
+ CSEL CS, t2, acc2, acc2
+ CSEL CS, t3, acc3, acc3
+
+ STP (acc0, acc1), 0*16(res_ptr)
+ STP (acc2, acc3), 1*16(res_ptr)
+
+ RET
+/* ---------------------------------------*/
+// Constant time point access to arbitrary point table.
+// Indexed from 1 to 15, with -1 offset
+// (index 0 is implicitly point at infinity)
+// func p256Select(point, table []uint64, idx int)
+TEXT ·p256Select(SB),NOSPLIT,$0
+ MOVD idx+48(FP), const0
+ MOVD table+24(FP), b_ptr
+ MOVD point+0(FP), res_ptr
+
+ EOR x0, x0, x0
+ EOR x1, x1, x1
+ EOR x2, x2, x2
+ EOR x3, x3, x3
+ EOR y0, y0, y0
+ EOR y1, y1, y1
+ EOR y2, y2, y2
+ EOR y3, y3, y3
+ EOR t0, t0, t0
+ EOR t1, t1, t1
+ EOR t2, t2, t2
+ EOR t3, t3, t3
+
+ MOVD $0, const1
+
+loop_select:
+ ADD $1, const1
+ CMP const0, const1
+ LDP.P 16(b_ptr), (acc0, acc1)
+ CSEL EQ, acc0, x0, x0
+ CSEL EQ, acc1, x1, x1
+ LDP.P 16(b_ptr), (acc2, acc3)
+ CSEL EQ, acc2, x2, x2
+ CSEL EQ, acc3, x3, x3
+ LDP.P 16(b_ptr), (acc4, acc5)
+ CSEL EQ, acc4, y0, y0
+ CSEL EQ, acc5, y1, y1
+ LDP.P 16(b_ptr), (acc6, acc7)
+ CSEL EQ, acc6, y2, y2
+ CSEL EQ, acc7, y3, y3
+ LDP.P 16(b_ptr), (acc0, acc1)
+ CSEL EQ, acc0, t0, t0
+ CSEL EQ, acc1, t1, t1
+ LDP.P 16(b_ptr), (acc2, acc3)
+ CSEL EQ, acc2, t2, t2
+ CSEL EQ, acc3, t3, t3
+
+ CMP $16, const1
+ BNE loop_select
+
+ STP (x0, x1), 0*16(res_ptr)
+ STP (x2, x3), 1*16(res_ptr)
+ STP (y0, y1), 2*16(res_ptr)
+ STP (y2, y3), 3*16(res_ptr)
+ STP (t0, t1), 4*16(res_ptr)
+ STP (t2, t3), 5*16(res_ptr)
+ RET
+/* ---------------------------------------*/
+// Constant time point access to base point table.
+// func p256SelectBase(point *[12]uint64, table string, idx int)
+TEXT ·p256SelectBase(SB),NOSPLIT,$0
+ MOVD idx+24(FP), t0
+ MOVD table_base+8(FP), t1
+ MOVD point+0(FP), res_ptr
+
+ EOR x0, x0, x0
+ EOR x1, x1, x1
+ EOR x2, x2, x2
+ EOR x3, x3, x3
+ EOR y0, y0, y0
+ EOR y1, y1, y1
+ EOR y2, y2, y2
+ EOR y3, y3, y3
+
+ MOVD $0, t2
+
+loop_select:
+ ADD $1, t2
+ CMP t0, t2
+ LDP.P 16(t1), (acc0, acc1)
+ CSEL EQ, acc0, x0, x0
+ CSEL EQ, acc1, x1, x1
+ LDP.P 16(t1), (acc2, acc3)
+ CSEL EQ, acc2, x2, x2
+ CSEL EQ, acc3, x3, x3
+ LDP.P 16(t1), (acc4, acc5)
+ CSEL EQ, acc4, y0, y0
+ CSEL EQ, acc5, y1, y1
+ LDP.P 16(t1), (acc6, acc7)
+ CSEL EQ, acc6, y2, y2
+ CSEL EQ, acc7, y3, y3
+
+ CMP $32, t2
+ BNE loop_select
+
+ STP (x0, x1), 0*16(res_ptr)
+ STP (x2, x3), 1*16(res_ptr)
+ STP (y0, y1), 2*16(res_ptr)
+ STP (y2, y3), 3*16(res_ptr)
+ RET
+/* ---------------------------------------*/
+// func p256OrdSqr(res, in []uint64, n int)
+TEXT ·p256OrdSqr(SB),NOSPLIT,$0
+ MOVD in+24(FP), a_ptr
+ MOVD n+48(FP), b_ptr
+
+ MOVD p256ordK0<>(SB), hlp1
+ LDP p256ord<>+0x00(SB), (const0, const1)
+ LDP p256ord<>+0x10(SB), (const2, const3)
+
+ LDP 0*16(a_ptr), (x0, x1)
+ LDP 1*16(a_ptr), (x2, x3)
+
+ordSqrLoop:
+ SUB $1, b_ptr
+
+ // x[1:] * x[0]
+ MUL x0, x1, acc1
+ UMULH x0, x1, acc2
+
+ MUL x0, x2, t0
+ ADDS t0, acc2, acc2
+ UMULH x0, x2, acc3
+
+ MUL x0, x3, t0
+ ADCS t0, acc3, acc3
+ UMULH x0, x3, acc4
+ ADC $0, acc4, acc4
+ // x[2:] * x[1]
+ MUL x1, x2, t0
+ ADDS t0, acc3
+ UMULH x1, x2, t1
+ ADCS t1, acc4
+ ADC $0, ZR, acc5
+
+ MUL x1, x3, t0
+ ADDS t0, acc4
+ UMULH x1, x3, t1
+ ADC t1, acc5
+ // x[3] * x[2]
+ MUL x2, x3, t0
+ ADDS t0, acc5
+ UMULH x2, x3, acc6
+ ADC $0, acc6
+
+ MOVD $0, acc7
+ // *2
+ ADDS acc1, acc1
+ ADCS acc2, acc2
+ ADCS acc3, acc3
+ ADCS acc4, acc4
+ ADCS acc5, acc5
+ ADCS acc6, acc6
+ ADC $0, acc7
+ // Missing products
+ MUL x0, x0, acc0
+ UMULH x0, x0, t0
+ ADDS t0, acc1, acc1
+
+ MUL x1, x1, t0
+ ADCS t0, acc2, acc2
+ UMULH x1, x1, t1
+ ADCS t1, acc3, acc3
+
+ MUL x2, x2, t0
+ ADCS t0, acc4, acc4
+ UMULH x2, x2, t1
+ ADCS t1, acc5, acc5
+
+ MUL x3, x3, t0
+ ADCS t0, acc6, acc6
+ UMULH x3, x3, t1
+ ADC t1, acc7, acc7
+ // First reduction step
+ MUL acc0, hlp1, hlp0
+
+ MUL const0, hlp1, t0
+ ADDS t0, acc0, acc0
+ UMULH const0, hlp0, t1
+
+ MUL const1, hlp0, t0
+ ADCS t0, acc1, acc1
+ UMULH const1, hlp0, y0
+
+ MUL const2, hlp0, t0
+ ADCS t0, acc2, acc2
+ UMULH const2, hlp0, acc0
+
+ MUL const3, hlp0, t0
+ ADCS t0, acc3, acc3
+
+ UMULH const3, hlp0, hlp0
+ ADC $0, hlp0
+
+ ADDS t1, acc1, acc1
+ ADCS y0, acc2, acc2
+ ADCS acc0, acc3, acc3
+ ADC $0, hlp0, acc0
+ // Second reduction step
+ MUL acc1, hlp1, hlp0
+
+ MUL const0, hlp1, t0
+ ADDS t0, acc1, acc1
+ UMULH const0, hlp0, t1
+
+ MUL const1, hlp0, t0
+ ADCS t0, acc2, acc2
+ UMULH const1, hlp0, y0
+
+ MUL const2, hlp0, t0
+ ADCS t0, acc3, acc3
+ UMULH const2, hlp0, acc1
+
+ MUL const3, hlp0, t0
+ ADCS t0, acc0, acc0
+
+ UMULH const3, hlp0, hlp0
+ ADC $0, hlp0
+
+ ADDS t1, acc2, acc2
+ ADCS y0, acc3, acc3
+ ADCS acc1, acc0, acc0
+ ADC $0, hlp0, acc1
+ // Third reduction step
+ MUL acc2, hlp1, hlp0
+
+ MUL const0, hlp1, t0
+ ADDS t0, acc2, acc2
+ UMULH const0, hlp0, t1
+
+ MUL const1, hlp0, t0
+ ADCS t0, acc3, acc3
+ UMULH const1, hlp0, y0
+
+ MUL const2, hlp0, t0
+ ADCS t0, acc0, acc0
+ UMULH const2, hlp0, acc2
+
+ MUL const3, hlp0, t0
+ ADCS t0, acc1, acc1
+
+ UMULH const3, hlp0, hlp0
+ ADC $0, hlp0
+
+ ADDS t1, acc3, acc3
+ ADCS y0, acc0, acc0
+ ADCS acc2, acc1, acc1
+ ADC $0, hlp0, acc2
+
+ // Last reduction step
+ MUL acc3, hlp1, hlp0
+
+ MUL const0, hlp1, t0
+ ADDS t0, acc3, acc3
+ UMULH const0, hlp0, t1
+
+ MUL const1, hlp0, t0
+ ADCS t0, acc0, acc0
+ UMULH const1, hlp0, y0
+
+ MUL const2, hlp0, t0
+ ADCS t0, acc1, acc1
+ UMULH const2, hlp0, acc3
+
+ MUL const3, hlp0, t0
+ ADCS t0, acc2, acc2
+
+ UMULH const3, hlp0, hlp0
+ ADC $0, acc7
+
+ ADDS t1, acc0, acc0
+ ADCS y0, acc1, acc1
+ ADCS acc3, acc2, acc2
+ ADC $0, hlp0, acc3
+
+ ADDS acc4, acc0, acc0
+ ADCS acc5, acc1, acc1
+ ADCS acc6, acc2, acc2
+ ADCS acc7, acc3, acc3
+ ADC $0, ZR, acc4
+
+ SUBS const0, acc0, y0
+ SBCS const1, acc1, y1
+ SBCS const2, acc2, y2
+ SBCS const3, acc3, y3
+ SBCS $0, acc4, acc4
+
+ CSEL CS, y0, acc0, x0
+ CSEL CS, y1, acc1, x1
+ CSEL CS, y2, acc2, x2
+ CSEL CS, y3, acc3, x3
+
+ CBNZ b_ptr, ordSqrLoop
+
+ MOVD res+0(FP), res_ptr
+ STP (x0, x1), 0*16(res_ptr)
+ STP (x2, x3), 1*16(res_ptr)
+
+ RET
+/* ---------------------------------------*/
+// func p256OrdMul(res, in1, in2 []uint64)
+TEXT ·p256OrdMul(SB),NOSPLIT,$0
+ MOVD in1+24(FP), a_ptr
+ MOVD in2+48(FP), b_ptr
+
+ MOVD p256ordK0<>(SB), hlp1
+ LDP p256ord<>+0x00(SB), (const0, const1)
+ LDP p256ord<>+0x10(SB), (const2, const3)
+
+ LDP 0*16(a_ptr), (x0, x1)
+ LDP 1*16(a_ptr), (x2, x3)
+ LDP 0*16(b_ptr), (y0, y1)
+ LDP 1*16(b_ptr), (y2, y3)
+
+ // y[0] * x
+ MUL y0, x0, acc0
+ UMULH y0, x0, acc1
+
+ MUL y0, x1, t0
+ ADDS t0, acc1
+ UMULH y0, x1, acc2
+
+ MUL y0, x2, t0
+ ADCS t0, acc2
+ UMULH y0, x2, acc3
+
+ MUL y0, x3, t0
+ ADCS t0, acc3
+ UMULH y0, x3, acc4
+ ADC $0, acc4
+ // First reduction step
+ MUL acc0, hlp1, hlp0
+
+ MUL const0, hlp1, t0
+ ADDS t0, acc0, acc0
+ UMULH const0, hlp0, t1
+
+ MUL const1, hlp0, t0
+ ADCS t0, acc1, acc1
+ UMULH const1, hlp0, y0
+
+ MUL const2, hlp0, t0
+ ADCS t0, acc2, acc2
+ UMULH const2, hlp0, acc0
+
+ MUL const3, hlp0, t0
+ ADCS t0, acc3, acc3
+
+ UMULH const3, hlp0, hlp0
+ ADC $0, acc4
+
+ ADDS t1, acc1, acc1
+ ADCS y0, acc2, acc2
+ ADCS acc0, acc3, acc3
+ ADC $0, hlp0, acc0
+ // y[1] * x
+ MUL y1, x0, t0
+ ADDS t0, acc1
+ UMULH y1, x0, t1
+
+ MUL y1, x1, t0
+ ADCS t0, acc2
+ UMULH y1, x1, hlp0
+
+ MUL y1, x2, t0
+ ADCS t0, acc3
+ UMULH y1, x2, y0
+
+ MUL y1, x3, t0
+ ADCS t0, acc4
+ UMULH y1, x3, y1
+ ADC $0, ZR, acc5
+
+ ADDS t1, acc2
+ ADCS hlp0, acc3
+ ADCS y0, acc4
+ ADC y1, acc5
+ // Second reduction step
+ MUL acc1, hlp1, hlp0
+
+ MUL const0, hlp1, t0
+ ADDS t0, acc1, acc1
+ UMULH const0, hlp0, t1
+
+ MUL const1, hlp0, t0
+ ADCS t0, acc2, acc2
+ UMULH const1, hlp0, y0
+
+ MUL const2, hlp0, t0
+ ADCS t0, acc3, acc3
+ UMULH const2, hlp0, acc1
+
+ MUL const3, hlp0, t0
+ ADCS t0, acc0, acc0
+
+ UMULH const3, hlp0, hlp0
+ ADC $0, acc5
+
+ ADDS t1, acc2, acc2
+ ADCS y0, acc3, acc3
+ ADCS acc1, acc0, acc0
+ ADC $0, hlp0, acc1
+ // y[2] * x
+ MUL y2, x0, t0
+ ADDS t0, acc2
+ UMULH y2, x0, t1
+
+ MUL y2, x1, t0
+ ADCS t0, acc3
+ UMULH y2, x1, hlp0
+
+ MUL y2, x2, t0
+ ADCS t0, acc4
+ UMULH y2, x2, y0
+
+ MUL y2, x3, t0
+ ADCS t0, acc5
+ UMULH y2, x3, y1
+ ADC $0, ZR, acc6
+
+ ADDS t1, acc3
+ ADCS hlp0, acc4
+ ADCS y0, acc5
+ ADC y1, acc6
+ // Third reduction step
+ MUL acc2, hlp1, hlp0
+
+ MUL const0, hlp1, t0
+ ADDS t0, acc2, acc2
+ UMULH const0, hlp0, t1
+
+ MUL const1, hlp0, t0
+ ADCS t0, acc3, acc3
+ UMULH const1, hlp0, y0
+
+ MUL const2, hlp0, t0
+ ADCS t0, acc0, acc0
+ UMULH const2, hlp0, acc2
+
+ MUL const3, hlp0, t0
+ ADCS t0, acc1, acc1
+
+ UMULH const3, hlp0, hlp0
+ ADC $0, acc6
+
+ ADDS t1, acc3, acc3
+ ADCS y0, acc0, acc0
+ ADCS acc2, acc1, acc1
+ ADC $0, hlp0, acc2
+ // y[3] * x
+ MUL y3, x0, t0
+ ADDS t0, acc3
+ UMULH y3, x0, t1
+
+ MUL y3, x1, t0
+ ADCS t0, acc4
+ UMULH y3, x1, hlp0
+
+ MUL y3, x2, t0
+ ADCS t0, acc5
+ UMULH y3, x2, y0
+
+ MUL y3, x3, t0
+ ADCS t0, acc6
+ UMULH y3, x3, y1
+ ADC $0, ZR, acc7
+
+ ADDS t1, acc4
+ ADCS hlp0, acc5
+ ADCS y0, acc6
+ ADC y1, acc7
+ // Last reduction step
+ MUL acc3, hlp1, hlp0
+
+ MUL const0, hlp1, t0
+ ADDS t0, acc3, acc3
+ UMULH const0, hlp0, t1
+
+ MUL const1, hlp0, t0
+ ADCS t0, acc0, acc0
+ UMULH const1, hlp0, y0
+
+ MUL const2, hlp0, t0
+ ADCS t0, acc1, acc1
+ UMULH const2, hlp0, acc3
+
+ MUL const3, hlp0, t0
+ ADCS t0, acc2, acc2
+
+ UMULH const3, hlp0, hlp0
+ ADC $0, acc7
+
+ ADDS t1, acc0, acc0
+ ADCS y0, acc1, acc1
+ ADCS acc3, acc2, acc2
+ ADC $0, hlp0, acc3
+
+ ADDS acc4, acc0, acc0
+ ADCS acc5, acc1, acc1
+ ADCS acc6, acc2, acc2
+ ADCS acc7, acc3, acc3
+ ADC $0, ZR, acc4
+
+ SUBS const0, acc0, t0
+ SBCS const1, acc1, t1
+ SBCS const2, acc2, t2
+ SBCS const3, acc3, t3
+ SBCS $0, acc4, acc4
+
+ CSEL CS, t0, acc0, acc0
+ CSEL CS, t1, acc1, acc1
+ CSEL CS, t2, acc2, acc2
+ CSEL CS, t3, acc3, acc3
+
+ MOVD res+0(FP), res_ptr
+ STP (acc0, acc1), 0*16(res_ptr)
+ STP (acc2, acc3), 1*16(res_ptr)
+
+ RET
+/* ---------------------------------------*/
+TEXT p256SubInternal<>(SB),NOSPLIT,$0
+ SUBS x0, y0, acc0
+ SBCS x1, y1, acc1
+ SBCS x2, y2, acc2
+ SBCS x3, y3, acc3
+ SBC $0, ZR, t0
+
+ ADDS $-1, acc0, acc4
+ ADCS const0, acc1, acc5
+ ADCS $0, acc2, acc6
+ ADC const1, acc3, acc7
+
+ ANDS $1, t0
+ CSEL EQ, acc0, acc4, x0
+ CSEL EQ, acc1, acc5, x1
+ CSEL EQ, acc2, acc6, x2
+ CSEL EQ, acc3, acc7, x3
+
+ RET
+/* ---------------------------------------*/
+TEXT p256SqrInternal<>(SB),NOSPLIT,$0
+ // x[1:] * x[0]
+ MUL x0, x1, acc1
+ UMULH x0, x1, acc2
+
+ MUL x0, x2, t0
+ ADDS t0, acc2, acc2
+ UMULH x0, x2, acc3
+
+ MUL x0, x3, t0
+ ADCS t0, acc3, acc3
+ UMULH x0, x3, acc4
+ ADC $0, acc4, acc4
+ // x[2:] * x[1]
+ MUL x1, x2, t0
+ ADDS t0, acc3
+ UMULH x1, x2, t1
+ ADCS t1, acc4
+ ADC $0, ZR, acc5
+
+ MUL x1, x3, t0
+ ADDS t0, acc4
+ UMULH x1, x3, t1
+ ADC t1, acc5
+ // x[3] * x[2]
+ MUL x2, x3, t0
+ ADDS t0, acc5
+ UMULH x2, x3, acc6
+ ADC $0, acc6
+
+ MOVD $0, acc7
+ // *2
+ ADDS acc1, acc1
+ ADCS acc2, acc2
+ ADCS acc3, acc3
+ ADCS acc4, acc4
+ ADCS acc5, acc5
+ ADCS acc6, acc6
+ ADC $0, acc7
+ // Missing products
+ MUL x0, x0, acc0
+ UMULH x0, x0, t0
+ ADDS t0, acc1, acc1
+
+ MUL x1, x1, t0
+ ADCS t0, acc2, acc2
+ UMULH x1, x1, t1
+ ADCS t1, acc3, acc3
+
+ MUL x2, x2, t0
+ ADCS t0, acc4, acc4
+ UMULH x2, x2, t1
+ ADCS t1, acc5, acc5
+
+ MUL x3, x3, t0
+ ADCS t0, acc6, acc6
+ UMULH x3, x3, t1
+ ADCS t1, acc7, acc7
+ // First reduction step
+ ADDS acc0<<32, acc1, acc1
+ LSR $32, acc0, t0
+ MUL acc0, const1, t1
+ UMULH acc0, const1, acc0
+ ADCS t0, acc2, acc2
+ ADCS t1, acc3, acc3
+ ADC $0, acc0, acc0
+ // Second reduction step
+ ADDS acc1<<32, acc2, acc2
+ LSR $32, acc1, t0
+ MUL acc1, const1, t1
+ UMULH acc1, const1, acc1
+ ADCS t0, acc3, acc3
+ ADCS t1, acc0, acc0
+ ADC $0, acc1, acc1
+ // Third reduction step
+ ADDS acc2<<32, acc3, acc3
+ LSR $32, acc2, t0
+ MUL acc2, const1, t1
+ UMULH acc2, const1, acc2
+ ADCS t0, acc0, acc0
+ ADCS t1, acc1, acc1
+ ADC $0, acc2, acc2
+ // Last reduction step
+ ADDS acc3<<32, acc0, acc0
+ LSR $32, acc3, t0
+ MUL acc3, const1, t1
+ UMULH acc3, const1, acc3
+ ADCS t0, acc1, acc1
+ ADCS t1, acc2, acc2
+ ADC $0, acc3, acc3
+ // Add bits [511:256] of the sqr result
+ ADDS acc4, acc0, acc0
+ ADCS acc5, acc1, acc1
+ ADCS acc6, acc2, acc2
+ ADCS acc7, acc3, acc3
+ ADC $0, ZR, acc4
+
+ SUBS $-1, acc0, t0
+ SBCS const0, acc1, t1
+ SBCS $0, acc2, t2
+ SBCS const1, acc3, t3
+ SBCS $0, acc4, acc4
+
+ CSEL CS, t0, acc0, y0
+ CSEL CS, t1, acc1, y1
+ CSEL CS, t2, acc2, y2
+ CSEL CS, t3, acc3, y3
+ RET
+/* ---------------------------------------*/
+TEXT p256MulInternal<>(SB),NOSPLIT,$0
+ // y[0] * x
+ MUL y0, x0, acc0
+ UMULH y0, x0, acc1
+
+ MUL y0, x1, t0
+ ADDS t0, acc1
+ UMULH y0, x1, acc2
+
+ MUL y0, x2, t0
+ ADCS t0, acc2
+ UMULH y0, x2, acc3
+
+ MUL y0, x3, t0
+ ADCS t0, acc3
+ UMULH y0, x3, acc4
+ ADC $0, acc4
+ // First reduction step
+ ADDS acc0<<32, acc1, acc1
+ LSR $32, acc0, t0
+ MUL acc0, const1, t1
+ UMULH acc0, const1, acc0
+ ADCS t0, acc2
+ ADCS t1, acc3
+ ADC $0, acc0
+ // y[1] * x
+ MUL y1, x0, t0
+ ADDS t0, acc1
+ UMULH y1, x0, t1
+
+ MUL y1, x1, t0
+ ADCS t0, acc2
+ UMULH y1, x1, t2
+
+ MUL y1, x2, t0
+ ADCS t0, acc3
+ UMULH y1, x2, t3
+
+ MUL y1, x3, t0
+ ADCS t0, acc4
+ UMULH y1, x3, hlp0
+ ADC $0, ZR, acc5
+
+ ADDS t1, acc2
+ ADCS t2, acc3
+ ADCS t3, acc4
+ ADC hlp0, acc5
+ // Second reduction step
+ ADDS acc1<<32, acc2, acc2
+ LSR $32, acc1, t0
+ MUL acc1, const1, t1
+ UMULH acc1, const1, acc1
+ ADCS t0, acc3
+ ADCS t1, acc0
+ ADC $0, acc1
+ // y[2] * x
+ MUL y2, x0, t0
+ ADDS t0, acc2
+ UMULH y2, x0, t1
+
+ MUL y2, x1, t0
+ ADCS t0, acc3
+ UMULH y2, x1, t2
+
+ MUL y2, x2, t0
+ ADCS t0, acc4
+ UMULH y2, x2, t3
+
+ MUL y2, x3, t0
+ ADCS t0, acc5
+ UMULH y2, x3, hlp0
+ ADC $0, ZR, acc6
+
+ ADDS t1, acc3
+ ADCS t2, acc4
+ ADCS t3, acc5
+ ADC hlp0, acc6
+ // Third reduction step
+ ADDS acc2<<32, acc3, acc3
+ LSR $32, acc2, t0
+ MUL acc2, const1, t1
+ UMULH acc2, const1, acc2
+ ADCS t0, acc0
+ ADCS t1, acc1
+ ADC $0, acc2
+ // y[3] * x
+ MUL y3, x0, t0
+ ADDS t0, acc3
+ UMULH y3, x0, t1
+
+ MUL y3, x1, t0
+ ADCS t0, acc4
+ UMULH y3, x1, t2
+
+ MUL y3, x2, t0
+ ADCS t0, acc5
+ UMULH y3, x2, t3
+
+ MUL y3, x3, t0
+ ADCS t0, acc6
+ UMULH y3, x3, hlp0
+ ADC $0, ZR, acc7
+
+ ADDS t1, acc4
+ ADCS t2, acc5
+ ADCS t3, acc6
+ ADC hlp0, acc7
+ // Last reduction step
+ ADDS acc3<<32, acc0, acc0
+ LSR $32, acc3, t0
+ MUL acc3, const1, t1
+ UMULH acc3, const1, acc3
+ ADCS t0, acc1
+ ADCS t1, acc2
+ ADC $0, acc3
+ // Add bits [511:256] of the mul result
+ ADDS acc4, acc0, acc0
+ ADCS acc5, acc1, acc1
+ ADCS acc6, acc2, acc2
+ ADCS acc7, acc3, acc3
+ ADC $0, ZR, acc4
+
+ SUBS $-1, acc0, t0
+ SBCS const0, acc1, t1
+ SBCS $0, acc2, t2
+ SBCS const1, acc3, t3
+ SBCS $0, acc4, acc4
+
+ CSEL CS, t0, acc0, y0
+ CSEL CS, t1, acc1, y1
+ CSEL CS, t2, acc2, y2
+ CSEL CS, t3, acc3, y3
+ RET
+/* ---------------------------------------*/
+#define p256MulBy2Inline \
+ ADDS y0, y0, x0; \
+ ADCS y1, y1, x1; \
+ ADCS y2, y2, x2; \
+ ADCS y3, y3, x3; \
+ ADC $0, ZR, hlp0; \
+ SUBS $-1, x0, t0; \
+ SBCS const0, x1, t1;\
+ SBCS $0, x2, t2; \
+ SBCS const1, x3, t3;\
+ SBCS $0, hlp0, hlp0;\
+ CSEL CC, x0, t0, x0;\
+ CSEL CC, x1, t1, x1;\
+ CSEL CC, x2, t2, x2;\
+ CSEL CC, x3, t3, x3;
+/* ---------------------------------------*/
+#define x1in(off) (off)(a_ptr)
+#define y1in(off) (off + 32)(a_ptr)
+#define z1in(off) (off + 64)(a_ptr)
+#define x2in(off) (off)(b_ptr)
+#define z2in(off) (off + 64)(b_ptr)
+#define x3out(off) (off)(res_ptr)
+#define y3out(off) (off + 32)(res_ptr)
+#define z3out(off) (off + 64)(res_ptr)
+#define LDx(src) LDP src(0), (x0, x1); LDP src(16), (x2, x3)
+#define LDy(src) LDP src(0), (y0, y1); LDP src(16), (y2, y3)
+#define STx(src) STP (x0, x1), src(0); STP (x2, x3), src(16)
+#define STy(src) STP (y0, y1), src(0); STP (y2, y3), src(16)
+/* ---------------------------------------*/
+#define y2in(off) (32*0 + 8 + off)(RSP)
+#define s2(off) (32*1 + 8 + off)(RSP)
+#define z1sqr(off) (32*2 + 8 + off)(RSP)
+#define h(off) (32*3 + 8 + off)(RSP)
+#define r(off) (32*4 + 8 + off)(RSP)
+#define hsqr(off) (32*5 + 8 + off)(RSP)
+#define rsqr(off) (32*6 + 8 + off)(RSP)
+#define hcub(off) (32*7 + 8 + off)(RSP)
+
+#define z2sqr(off) (32*8 + 8 + off)(RSP)
+#define s1(off) (32*9 + 8 + off)(RSP)
+#define u1(off) (32*10 + 8 + off)(RSP)
+#define u2(off) (32*11 + 8 + off)(RSP)
+
+// func p256PointAddAffineAsm(res, in1, in2 []uint64, sign, sel, zero int)
+TEXT ·p256PointAddAffineAsm(SB),0,$264-96
+ MOVD in1+24(FP), a_ptr
+ MOVD in2+48(FP), b_ptr
+ MOVD sign+72(FP), hlp0
+ MOVD sel+80(FP), hlp1
+ MOVD zero+88(FP), t2
+
+ MOVD $1, t0
+ CMP $0, t2
+ CSEL EQ, ZR, t0, t2
+ CMP $0, hlp1
+ CSEL EQ, ZR, t0, hlp1
+
+ MOVD p256const0<>(SB), const0
+ MOVD p256const1<>(SB), const1
+ EOR t2<<1, hlp1
+
+ // Negate y2in based on sign
+ LDP 2*16(b_ptr), (y0, y1)
+ LDP 3*16(b_ptr), (y2, y3)
+ MOVD $-1, acc0
+
+ SUBS y0, acc0, acc0
+ SBCS y1, const0, acc1
+ SBCS y2, ZR, acc2
+ SBCS y3, const1, acc3
+ SBC $0, ZR, t0
+
+ ADDS $-1, acc0, acc4
+ ADCS const0, acc1, acc5
+ ADCS $0, acc2, acc6
+ ADCS const1, acc3, acc7
+ ADC $0, t0, t0
+
+ CMP $0, t0
+ CSEL EQ, acc4, acc0, acc0
+ CSEL EQ, acc5, acc1, acc1
+ CSEL EQ, acc6, acc2, acc2
+ CSEL EQ, acc7, acc3, acc3
+ // If condition is 0, keep original value
+ CMP $0, hlp0
+ CSEL EQ, y0, acc0, y0
+ CSEL EQ, y1, acc1, y1
+ CSEL EQ, y2, acc2, y2
+ CSEL EQ, y3, acc3, y3
+ // Store result
+ STy(y2in)
+ // Begin point add
+ LDx(z1in)
+ CALL p256SqrInternal<>(SB) // z1ˆ2
+ STy(z1sqr)
+
+ LDx(x2in)
+ CALL p256MulInternal<>(SB) // x2 * z1ˆ2
+
+ LDx(x1in)
+ CALL p256SubInternal<>(SB) // h = u2 - u1
+ STx(h)
+
+ LDy(z1in)
+ CALL p256MulInternal<>(SB) // z3 = h * z1
+
+ LDP 4*16(a_ptr), (acc0, acc1)// iff select[0] == 0, z3 = z1
+ LDP 5*16(a_ptr), (acc2, acc3)
+ ANDS $1, hlp1, ZR
+ CSEL EQ, acc0, y0, y0
+ CSEL EQ, acc1, y1, y1
+ CSEL EQ, acc2, y2, y2
+ CSEL EQ, acc3, y3, y3
+ LDP p256one<>+0x00(SB), (acc0, acc1)
+ LDP p256one<>+0x10(SB), (acc2, acc3)
+ ANDS $2, hlp1, ZR // iff select[1] == 0, z3 = 1
+ CSEL EQ, acc0, y0, y0
+ CSEL EQ, acc1, y1, y1
+ CSEL EQ, acc2, y2, y2
+ CSEL EQ, acc3, y3, y3
+ LDx(z1in)
+ MOVD res+0(FP), t0
+ STP (y0, y1), 4*16(t0)
+ STP (y2, y3), 5*16(t0)
+
+ LDy(z1sqr)
+ CALL p256MulInternal<>(SB) // z1 ^ 3
+
+ LDx(y2in)
+ CALL p256MulInternal<>(SB) // s2 = y2 * z1ˆ3
+ STy(s2)
+
+ LDx(y1in)
+ CALL p256SubInternal<>(SB) // r = s2 - s1
+ STx(r)
+
+ CALL p256SqrInternal<>(SB) // rsqr = rˆ2
+ STy (rsqr)
+
+ LDx(h)
+ CALL p256SqrInternal<>(SB) // hsqr = hˆ2
+ STy(hsqr)
+
+ CALL p256MulInternal<>(SB) // hcub = hˆ3
+ STy(hcub)
+
+ LDx(y1in)
+ CALL p256MulInternal<>(SB) // y1 * hˆ3
+ STy(s2)
+
+ LDP hsqr(0*8), (x0, x1)
+ LDP hsqr(2*8), (x2, x3)
+ LDP 0*16(a_ptr), (y0, y1)
+ LDP 1*16(a_ptr), (y2, y3)
+ CALL p256MulInternal<>(SB) // u1 * hˆ2
+ STP (y0, y1), h(0*8)
+ STP (y2, y3), h(2*8)
+
+ p256MulBy2Inline // u1 * hˆ2 * 2, inline
+
+ LDy(rsqr)
+ CALL p256SubInternal<>(SB) // rˆ2 - u1 * hˆ2 * 2
+
+ MOVD x0, y0
+ MOVD x1, y1
+ MOVD x2, y2
+ MOVD x3, y3
+ LDx(hcub)
+ CALL p256SubInternal<>(SB)
+
+ LDP 0*16(a_ptr), (acc0, acc1)
+ LDP 1*16(a_ptr), (acc2, acc3)
+ ANDS $1, hlp1, ZR // iff select[0] == 0, x3 = x1
+ CSEL EQ, acc0, x0, x0
+ CSEL EQ, acc1, x1, x1
+ CSEL EQ, acc2, x2, x2
+ CSEL EQ, acc3, x3, x3
+ LDP 0*16(b_ptr), (acc0, acc1)
+ LDP 1*16(b_ptr), (acc2, acc3)
+ ANDS $2, hlp1, ZR // iff select[1] == 0, x3 = x2
+ CSEL EQ, acc0, x0, x0
+ CSEL EQ, acc1, x1, x1
+ CSEL EQ, acc2, x2, x2
+ CSEL EQ, acc3, x3, x3
+ MOVD res+0(FP), t0
+ STP (x0, x1), 0*16(t0)
+ STP (x2, x3), 1*16(t0)
+
+ LDP h(0*8), (y0, y1)
+ LDP h(2*8), (y2, y3)
+ CALL p256SubInternal<>(SB)
+
+ LDP r(0*8), (y0, y1)
+ LDP r(2*8), (y2, y3)
+ CALL p256MulInternal<>(SB)
+
+ LDP s2(0*8), (x0, x1)
+ LDP s2(2*8), (x2, x3)
+ CALL p256SubInternal<>(SB)
+ LDP 2*16(a_ptr), (acc0, acc1)
+ LDP 3*16(a_ptr), (acc2, acc3)
+ ANDS $1, hlp1, ZR // iff select[0] == 0, y3 = y1
+ CSEL EQ, acc0, x0, x0
+ CSEL EQ, acc1, x1, x1
+ CSEL EQ, acc2, x2, x2
+ CSEL EQ, acc3, x3, x3
+ LDP y2in(0*8), (acc0, acc1)
+ LDP y2in(2*8), (acc2, acc3)
+ ANDS $2, hlp1, ZR // iff select[1] == 0, y3 = y2
+ CSEL EQ, acc0, x0, x0
+ CSEL EQ, acc1, x1, x1
+ CSEL EQ, acc2, x2, x2
+ CSEL EQ, acc3, x3, x3
+ MOVD res+0(FP), t0
+ STP (x0, x1), 2*16(t0)
+ STP (x2, x3), 3*16(t0)
+
+ RET
+
+#define p256AddInline \
+ ADDS y0, x0, x0; \
+ ADCS y1, x1, x1; \
+ ADCS y2, x2, x2; \
+ ADCS y3, x3, x3; \
+ ADC $0, ZR, hlp0; \
+ SUBS $-1, x0, t0; \
+ SBCS const0, x1, t1;\
+ SBCS $0, x2, t2; \
+ SBCS const1, x3, t3;\
+ SBCS $0, hlp0, hlp0;\
+ CSEL CC, x0, t0, x0;\
+ CSEL CC, x1, t1, x1;\
+ CSEL CC, x2, t2, x2;\
+ CSEL CC, x3, t3, x3;
+
+#define s(off) (32*0 + 8 + off)(RSP)
+#define m(off) (32*1 + 8 + off)(RSP)
+#define zsqr(off) (32*2 + 8 + off)(RSP)
+#define tmp(off) (32*3 + 8 + off)(RSP)
+
+//func p256PointDoubleAsm(res, in []uint64)
+TEXT ·p256PointDoubleAsm(SB),NOSPLIT,$136-48
+ MOVD res+0(FP), res_ptr
+ MOVD in+24(FP), a_ptr
+
+ MOVD p256const0<>(SB), const0
+ MOVD p256const1<>(SB), const1
+
+ // Begin point double
+ LDP 4*16(a_ptr), (x0, x1)
+ LDP 5*16(a_ptr), (x2, x3)
+ CALL p256SqrInternal<>(SB)
+ STP (y0, y1), zsqr(0*8)
+ STP (y2, y3), zsqr(2*8)
+
+ LDP 0*16(a_ptr), (x0, x1)
+ LDP 1*16(a_ptr), (x2, x3)
+ p256AddInline
+ STx(m)
+
+ LDx(z1in)
+ LDy(y1in)
+ CALL p256MulInternal<>(SB)
+ p256MulBy2Inline
+ STx(z3out)
+
+ LDy(x1in)
+ LDx(zsqr)
+ CALL p256SubInternal<>(SB)
+ LDy(m)
+ CALL p256MulInternal<>(SB)
+
+ // Multiply by 3
+ p256MulBy2Inline
+ p256AddInline
+ STx(m)
+
+ LDy(y1in)
+ p256MulBy2Inline
+ CALL p256SqrInternal<>(SB)
+ STy(s)
+ MOVD y0, x0
+ MOVD y1, x1
+ MOVD y2, x2
+ MOVD y3, x3
+ CALL p256SqrInternal<>(SB)
+
+ // Divide by 2
+ ADDS $-1, y0, t0
+ ADCS const0, y1, t1
+ ADCS $0, y2, t2
+ ADCS const1, y3, t3
+ ADC $0, ZR, hlp0
+
+ ANDS $1, y0, ZR
+ CSEL EQ, y0, t0, t0
+ CSEL EQ, y1, t1, t1
+ CSEL EQ, y2, t2, t2
+ CSEL EQ, y3, t3, t3
+ AND y0, hlp0, hlp0
+
+ EXTR $1, t0, t1, y0
+ EXTR $1, t1, t2, y1
+ EXTR $1, t2, t3, y2
+ EXTR $1, t3, hlp0, y3
+ STy(y3out)
+
+ LDx(x1in)
+ LDy(s)
+ CALL p256MulInternal<>(SB)
+ STy(s)
+ p256MulBy2Inline
+ STx(tmp)
+
+ LDx(m)
+ CALL p256SqrInternal<>(SB)
+ LDx(tmp)
+ CALL p256SubInternal<>(SB)
+
+ STx(x3out)
+
+ LDy(s)
+ CALL p256SubInternal<>(SB)
+
+ LDy(m)
+ CALL p256MulInternal<>(SB)
+
+ LDx(y3out)
+ CALL p256SubInternal<>(SB)
+ STx(y3out)
+ RET
+/* ---------------------------------------*/
+#undef y2in
+#undef x3out
+#undef y3out
+#undef z3out
+#define y2in(off) (off + 32)(b_ptr)
+#define x3out(off) (off)(b_ptr)
+#define y3out(off) (off + 32)(b_ptr)
+#define z3out(off) (off + 64)(b_ptr)
+//func p256PointAddAsm(res, in1, in2 []uint64) int
+TEXT ·p256PointAddAsm(SB),0,$392-80
+ // See https://hyperelliptic.org/EFD/g1p/auto-shortw-jacobian-3.html#addition-add-2007-bl
+ // Move input to stack in order to free registers
+ MOVD in1+24(FP), a_ptr
+ MOVD in2+48(FP), b_ptr
+
+ MOVD p256const0<>(SB), const0
+ MOVD p256const1<>(SB), const1
+
+ // Begin point add
+ LDx(z2in)
+ CALL p256SqrInternal<>(SB) // z2^2
+ STy(z2sqr)
+
+ CALL p256MulInternal<>(SB) // z2^3
+
+ LDx(y1in)
+ CALL p256MulInternal<>(SB) // s1 = z2ˆ3*y1
+ STy(s1)
+
+ LDx(z1in)
+ CALL p256SqrInternal<>(SB) // z1^2
+ STy(z1sqr)
+
+ CALL p256MulInternal<>(SB) // z1^3
+
+ LDx(y2in)
+ CALL p256MulInternal<>(SB) // s2 = z1ˆ3*y2
+
+ LDx(s1)
+ CALL p256SubInternal<>(SB) // r = s2 - s1
+ STx(r)
+
+ MOVD $1, t2
+ ORR x0, x1, t0 // Check if zero mod p256
+ ORR x2, x3, t1
+ ORR t1, t0, t0
+ CMP $0, t0
+ CSEL EQ, t2, ZR, hlp1
+
+ EOR $-1, x0, t0
+ EOR const0, x1, t1
+ EOR const1, x3, t3
+
+ ORR t0, t1, t0
+ ORR x2, t3, t1
+ ORR t1, t0, t0
+ CMP $0, t0
+ CSEL EQ, t2, hlp1, hlp1
+
+ LDx(z2sqr)
+ LDy(x1in)
+ CALL p256MulInternal<>(SB) // u1 = x1 * z2ˆ2
+ STy(u1)
+
+ LDx(z1sqr)
+ LDy(x2in)
+ CALL p256MulInternal<>(SB) // u2 = x2 * z1ˆ2
+ STy(u2)
+
+ LDx(u1)
+ CALL p256SubInternal<>(SB) // h = u2 - u1
+ STx(h)
+
+ MOVD $1, t2
+ ORR x0, x1, t0 // Check if zero mod p256
+ ORR x2, x3, t1
+ ORR t1, t0, t0
+ CMP $0, t0
+ CSEL EQ, t2, ZR, hlp0
+
+ EOR $-1, x0, t0
+ EOR const0, x1, t1
+ EOR const1, x3, t3
+
+ ORR t0, t1, t0
+ ORR x2, t3, t1
+ ORR t1, t0, t0
+ CMP $0, t0
+ CSEL EQ, t2, hlp0, hlp0
+
+ AND hlp0, hlp1, hlp1
+
+ LDx(r)
+ CALL p256SqrInternal<>(SB) // rsqr = rˆ2
+ STy(rsqr)
+
+ LDx(h)
+ CALL p256SqrInternal<>(SB) // hsqr = hˆ2
+ STy(hsqr)
+
+ LDx(h)
+ CALL p256MulInternal<>(SB) // hcub = hˆ3
+ STy(hcub)
+
+ LDx(s1)
+ CALL p256MulInternal<>(SB)
+ STy(s2)
+
+ LDx(z1in)
+ LDy(z2in)
+ CALL p256MulInternal<>(SB) // z1 * z2
+ LDx(h)
+ CALL p256MulInternal<>(SB) // z1 * z2 * h
+ MOVD res+0(FP), b_ptr
+ STy(z3out)
+
+ LDx(hsqr)
+ LDy(u1)
+ CALL p256MulInternal<>(SB) // hˆ2 * u1
+ STy(u2)
+
+ p256MulBy2Inline // u1 * hˆ2 * 2, inline
+ LDy(rsqr)
+ CALL p256SubInternal<>(SB) // rˆ2 - u1 * hˆ2 * 2
+
+ MOVD x0, y0
+ MOVD x1, y1
+ MOVD x2, y2
+ MOVD x3, y3
+ LDx(hcub)
+ CALL p256SubInternal<>(SB)
+ STx(x3out)
+
+ LDy(u2)
+ CALL p256SubInternal<>(SB)
+
+ LDy(r)
+ CALL p256MulInternal<>(SB)
+
+ LDx(s2)
+ CALL p256SubInternal<>(SB)
+ STx(y3out)
+
+ MOVD hlp1, R0
+ MOVD R0, ret+72(FP)
+
+ RET
diff --git a/src/crypto/elliptic/p256_asm_ppc64le.s b/src/crypto/elliptic/p256_asm_ppc64le.s
new file mode 100644
index 0000000..69e96e2
--- /dev/null
+++ b/src/crypto/elliptic/p256_asm_ppc64le.s
@@ -0,0 +1,2494 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#include "textflag.h"
+
+// This is a port of the s390x asm implementation.
+// to ppc64le.
+
+// Some changes were needed due to differences in
+// the Go opcodes and/or available instructions
+// between s390x and ppc64le.
+
+// 1. There were operand order differences in the
+// VSUBUQM, VSUBCUQ, and VSEL instructions.
+
+// 2. ppc64 does not have a multiply high and low
+// like s390x, so those were implemented using
+// macros to compute the equivalent values.
+
+// 3. The LVX, STVX instructions on ppc64 require
+// 16 byte alignment of the data. To avoid that
+// requirement, data is loaded using LXVD2X and
+// STXVD2X with VPERM to reorder bytes correctly.
+
+// I have identified some areas where I believe
+// changes would be needed to make this work for big
+// endian; however additional changes beyond what I
+// have noted are most likely needed to make it work.
+// - The string used with VPERM to swap the byte order
+// for loads and stores.
+// - The EXTRACT_HI and EXTRACT_LO strings.
+// - The constants that are loaded from CPOOL.
+//
+
+// Permute string used by VPERM to reorder bytes
+// loaded or stored using LXVD2X or STXVD2X
+// on little endian.
+DATA byteswap<>+0(SB)/8, $0x08090a0b0c0d0e0f
+DATA byteswap<>+8(SB)/8, $0x0001020304050607
+
+// The following constants are defined in an order
+// that is correct for use with LXVD2X/STXVD2X
+// on little endian.
+DATA p256<>+0x00(SB)/8, $0xffffffff00000001 // P256
+DATA p256<>+0x08(SB)/8, $0x0000000000000000 // P256
+DATA p256<>+0x10(SB)/8, $0x00000000ffffffff // P256
+DATA p256<>+0x18(SB)/8, $0xffffffffffffffff // P256
+DATA p256<>+0x20(SB)/8, $0x0c0d0e0f1c1d1e1f // SEL d1 d0 d1 d0
+DATA p256<>+0x28(SB)/8, $0x0c0d0e0f1c1d1e1f // SEL d1 d0 d1 d0
+DATA p256<>+0x30(SB)/8, $0x0000000010111213 // SEL 0 d1 d0 0
+DATA p256<>+0x38(SB)/8, $0x1415161700000000 // SEL 0 d1 d0 0
+DATA p256<>+0x40(SB)/8, $0x18191a1b1c1d1e1f // SEL d1 d0 d1 d0
+DATA p256<>+0x48(SB)/8, $0x18191a1b1c1d1e1f // SEL d1 d0 d1 d0
+DATA p256mul<>+0x00(SB)/8, $0x00000000ffffffff // P256 original
+DATA p256mul<>+0x08(SB)/8, $0xffffffffffffffff // P256
+DATA p256mul<>+0x10(SB)/8, $0xffffffff00000001 // P256 original
+DATA p256mul<>+0x18(SB)/8, $0x0000000000000000 // P256
+DATA p256mul<>+0x20(SB)/8, $0x1c1d1e1f00000000 // SEL d0 0 0 d0
+DATA p256mul<>+0x28(SB)/8, $0x000000001c1d1e1f // SEL d0 0 0 d0
+DATA p256mul<>+0x30(SB)/8, $0x0001020304050607 // SEL d0 0 d1 d0
+DATA p256mul<>+0x38(SB)/8, $0x1c1d1e1f0c0d0e0f // SEL d0 0 d1 d0
+DATA p256mul<>+0x40(SB)/8, $0x040506071c1d1e1f // SEL 0 d1 d0 d1
+DATA p256mul<>+0x48(SB)/8, $0x0c0d0e0f1c1d1e1f // SEL 0 d1 d0 d1
+DATA p256mul<>+0x50(SB)/8, $0x0405060704050607 // SEL 0 0 d1 d0
+DATA p256mul<>+0x58(SB)/8, $0x1c1d1e1f0c0d0e0f // SEL 0 0 d1 d0
+DATA p256mul<>+0x60(SB)/8, $0x0c0d0e0f1c1d1e1f // SEL d1 d0 d1 d0
+DATA p256mul<>+0x68(SB)/8, $0x0c0d0e0f1c1d1e1f // SEL d1 d0 d1 d0
+DATA p256mul<>+0x70(SB)/8, $0x141516170c0d0e0f // SEL 0 d1 d0 0
+DATA p256mul<>+0x78(SB)/8, $0x1c1d1e1f14151617 // SEL 0 d1 d0 0
+DATA p256mul<>+0x80(SB)/8, $0xffffffff00000000 // (1*2^256)%P256
+DATA p256mul<>+0x88(SB)/8, $0x0000000000000001 // (1*2^256)%P256
+DATA p256mul<>+0x90(SB)/8, $0x00000000fffffffe // (1*2^256)%P256
+DATA p256mul<>+0x98(SB)/8, $0xffffffffffffffff // (1*2^256)%P256
+
+// The following are used with VPERM to extract the high and low
+// values from the intermediate results of a vector multiply.
+// They are used in the VMULTxxx macros. These have been tested
+// only on little endian, I think they would have to be different
+// for big endian.
+DATA p256permhilo<>+0x00(SB)/8, $0x0405060714151617 // least significant
+DATA p256permhilo<>+0x08(SB)/8, $0x0c0d0e0f1c1d1e1f
+DATA p256permhilo<>+0x10(SB)/8, $0x0001020310111213 // most significant
+DATA p256permhilo<>+0x18(SB)/8, $0x08090a0b18191A1B
+
+// External declarations for constants
+GLOBL p256ord<>(SB), 8, $32
+GLOBL p256<>(SB), 8, $80
+GLOBL p256mul<>(SB), 8, $160
+GLOBL p256permhilo<>(SB), 8, $32
+GLOBL byteswap<>+0(SB), RODATA, $16
+
+// The following macros are used to implement the ppc64le
+// equivalent function from the corresponding s390x
+// instruction for vector multiply high, low, and add,
+// since there aren't exact equivalent instructions.
+// The corresponding s390x instructions appear in the
+// comments.
+// Implementation for big endian would have to be
+// investigated, I think it would be different.
+//
+// Vector multiply low word
+//
+// VMLF x0, x1, out_low
+#define VMULT_LOW(x1, x2, out_low) \
+ VMULUWM x1, x2, out_low
+
+//
+// Vector multiply high word
+//
+// VMLHF x0, x1, out_hi
+#define VMULT_HI(x1, x2, out_hi) \
+ VMULEUW x1, x2, TMP1; \
+ VMULOUW x1, x2, TMP2; \
+ VPERM TMP1, TMP2, EXTRACT_HI, out_hi
+
+//
+// Vector multiply word
+//
+// VMLF x0, x1, out_low
+// VMLHF x0, x1, out_hi
+#define VMULT(x1, x2, out_low, out_hi) \
+ VMULEUW x1, x2, TMP1; \
+ VMULOUW x1, x2, TMP2; \
+ VPERM TMP1, TMP2, EXTRACT_LO, out_low; \
+ VPERM TMP1, TMP2, EXTRACT_HI, out_hi
+
+//
+// Vector multiply add word
+//
+// VMALF x0, x1, y, out_low
+// VMALHF x0, x1, y, out_hi
+#define VMULT_ADD(x1, x2, y, out_low, out_hi) \
+ VSPLTISW $1, TMP1; \
+ VMULEUW y, TMP1, TMP2; \
+ VMULOUW y, TMP1, TMP1; \
+ VMULEUW x1, x2, out_low; \
+ VMULOUW x1, x2, out_hi; \
+ VADDUDM TMP1, out_hi, TMP1; \
+ VADDUDM TMP2, out_low, TMP2; \
+ VPERM TMP2, TMP1, EXTRACT_LO, out_low; \
+ VPERM TMP2, TMP1, EXTRACT_HI, out_hi
+
+//
+// Vector multiply add high word
+//
+// VMALF x0, x1, y, out_low
+// VMALHF x0, x1, y, out_hi
+#define VMULT_ADD_HI(x1, x2, y, out_low, out_hi) \
+ VSPLTISW $1, TMP1; \
+ VMULOUW y, TMP1, TMP2; \
+ VMULEUW y, TMP1, TMP1; \
+ VMULEUW x1, x2, out_hi; \
+ VMULOUW x1, x2, out_low; \
+ VADDUDM TMP1, out_hi, TMP1; \
+ VADDUDM TMP2, out_low, TMP2; \
+ VPERM TMP2, TMP1, EXTRACT_HI, out_hi
+
+//
+// Vector multiply add low word
+//
+// VMALF s0, x1, y, out_low
+#define VMULT_ADD_LOW(x1, x2, y, out_low) \
+ VMULUWM x1, x2, out_low; \
+ VADDUWM out_low, y, out_low
+
+#define res_ptr R3
+#define a_ptr R4
+
+#undef res_ptr
+#undef a_ptr
+
+// func p256NegCond(val *p256Point, cond int)
+#define P1ptr R3
+#define CPOOL R7
+
+#define Y1L V0
+#define Y1L_ VS32
+#define Y1H V1
+#define Y1H_ VS33
+#define T1L V2
+#define T1L_ VS34
+#define T1H V3
+#define T1H_ VS35
+
+#define SWAP V28
+#define SWAP_ VS60
+
+#define PL V30
+#define PL_ VS62
+#define PH V31
+#define PH_ VS63
+
+#define SEL1 V5
+#define SEL1_ VS37
+#define CAR1 V6
+//
+// iff cond == 1 val <- -val
+//
+TEXT ·p256NegCond(SB), NOSPLIT, $0-16
+ MOVD val+0(FP), P1ptr
+ MOVD $16, R16
+ MOVD $32, R17
+ MOVD $48, R18
+ MOVD $40, R19
+
+ MOVD cond+8(FP), R6
+ CMP $0, R6
+ BC 12, 2, LR // just return if cond == 0
+
+ MOVD $p256mul<>+0x00(SB), CPOOL
+
+ MOVD $byteswap<>+0x00(SB), R8
+ LXVD2X (R8)(R0), SWAP_
+
+ LXVD2X (P1ptr)(R17), Y1L_
+ LXVD2X (P1ptr)(R18), Y1H_
+
+ VPERM Y1H, Y1H, SWAP, Y1H
+ VPERM Y1L, Y1L, SWAP, Y1L
+
+ LXVD2X (CPOOL)(R0), PL_
+ LXVD2X (CPOOL)(R16), PH_
+
+ VSUBCUQ PL, Y1L, CAR1 // subtract part2 giving carry
+ VSUBUQM PL, Y1L, T1L // subtract part2 giving result
+ VSUBEUQM PH, Y1H, CAR1, T1H // subtract part1 using carry from part2
+
+ VPERM T1H, T1H, SWAP, T1H
+ VPERM T1L, T1L, SWAP, T1L
+
+ STXVD2X T1L_, (R17+P1ptr)
+ STXVD2X T1H_, (R18+P1ptr)
+ RET
+
+#undef P1ptr
+#undef CPOOL
+#undef Y1L
+#undef Y1L_
+#undef Y1H
+#undef Y1H_
+#undef T1L
+#undef T1L_
+#undef T1H
+#undef T1H_
+#undef PL
+#undef PL_
+#undef PH
+#undef PH_
+#undef SEL1
+#undef SEL1_
+#undef CAR1
+
+//
+// if cond == 0 res <-b else res <-a
+//
+// func p256MovCond(res, a, b *p256Point, cond int)
+#define P3ptr R3
+#define P1ptr R4
+#define P2ptr R5
+
+#define FROMptr R7
+#define X1L V0
+#define X1H V1
+#define Y1L V2
+#define Y1H V3
+#define Z1L V4
+#define Z1H V5
+#define X1L_ VS32
+#define X1H_ VS33
+#define Y1L_ VS34
+#define Y1H_ VS35
+#define Z1L_ VS36
+#define Z1H_ VS37
+
+// This function uses LXVD2X and STXVD2X to avoid the
+// data alignment requirement for LVX, STVX. Since
+// this code is just moving bytes and not doing arithmetic,
+// order of the bytes doesn't matter.
+//
+TEXT ·p256MovCond(SB), NOSPLIT, $0-32
+ MOVD res+0(FP), P3ptr
+ MOVD a+8(FP), P1ptr
+ MOVD b+16(FP), P2ptr
+ MOVD cond+24(FP), R6
+ MOVD $16, R16
+ MOVD $32, R17
+ MOVD $48, R18
+ MOVD $56, R21
+ MOVD $64, R19
+ MOVD $80, R20
+
+ // Check the condition
+ CMP $0, R6
+
+ // If 0, use b as the source
+ BEQ FROMB
+
+ // Not 0, use a as the source
+ MOVD P1ptr, FROMptr
+ BR LOADVALS
+
+FROMB:
+ MOVD P2ptr, FROMptr
+
+LOADVALS:
+ // Load from a or b depending on the setting
+ // of FROMptr
+ LXVW4X (FROMptr+R0), X1H_
+ LXVW4X (FROMptr+R16), X1L_
+ LXVW4X (FROMptr+R17), Y1H_
+ LXVW4X (FROMptr+R18), Y1L_
+ LXVW4X (FROMptr+R19), Z1H_
+ LXVW4X (FROMptr+R20), Z1L_
+
+ STXVW4X X1H_, (P3ptr+R0)
+ STXVW4X X1L_, (P3ptr+R16)
+ STXVW4X Y1H_, (P3ptr+R17)
+ STXVW4X Y1L_, (P3ptr+R18)
+ STXVW4X Z1H_, (P3ptr+R19)
+ STXVW4X Z1L_, (P3ptr+R20)
+
+ RET
+
+#undef P3ptr
+#undef P1ptr
+#undef P2ptr
+#undef FROMptr
+#undef X1L
+#undef X1H
+#undef Y1L
+#undef Y1H
+#undef Z1L
+#undef Z1H
+#undef X1L_
+#undef X1H_
+#undef Y1L_
+#undef Y1H_
+#undef Z1L_
+#undef Z1H_
+//
+// Select the point from the table for idx
+//
+// func p256Select(point *p256Point, table []p256Point, idx int)
+#define P3ptr R3
+#define P1ptr R4
+#define COUNT R5
+
+#define X1L V0
+#define X1H V1
+#define Y1L V2
+#define Y1H V3
+#define Z1L V4
+#define Z1H V5
+#define X1L_ VS32
+#define X1H_ VS33
+#define Y1L_ VS34
+#define Y1H_ VS35
+#define Z1L_ VS36
+#define Z1H_ VS37
+#define X2L V6
+#define X2H V7
+#define Y2L V8
+#define Y2H V9
+#define Z2L V10
+#define Z2H V11
+#define X2L_ VS38
+#define X2H_ VS39
+#define Y2L_ VS40
+#define Y2H_ VS41
+#define Z2L_ VS42
+#define Z2H_ VS43
+
+#define ONE V18
+#define IDX V19
+#define SEL1 V20
+#define SEL1_ VS52
+#define SEL2 V21
+//
+TEXT ·p256Select(SB), NOSPLIT, $0-40
+ MOVD point+0(FP), P3ptr
+ MOVD table+8(FP), P1ptr
+ MOVD $16, R16
+ MOVD $32, R17
+ MOVD $48, R18
+ MOVD $64, R19
+ MOVD $80, R20
+
+ LXVDSX (R1)(R19), SEL1_ // VLREPG idx+32(FP), SEL1
+ VSPLTB $7, SEL1, IDX // splat byte
+ VSPLTISB $1, ONE // VREPIB $1, ONE
+ VSPLTISB $1, SEL2 // VREPIB $1, SEL2
+ MOVD $17, COUNT
+ MOVD COUNT, CTR // set up ctr
+
+ VSPLTISB $0, X1H // VZERO X1H
+ VSPLTISB $0, X1L // VZERO X1L
+ VSPLTISB $0, Y1H // VZERO Y1H
+ VSPLTISB $0, Y1L // VZERO Y1L
+ VSPLTISB $0, Z1H // VZERO Z1H
+ VSPLTISB $0, Z1L // VZERO Z1L
+
+loop_select:
+
+ // LVXD2X is used here since data alignment doesn't
+ // matter.
+
+ LXVD2X (P1ptr+R0), X2H_
+ LXVD2X (P1ptr+R16), X2L_
+ LXVD2X (P1ptr+R17), Y2H_
+ LXVD2X (P1ptr+R18), Y2L_
+ LXVD2X (P1ptr+R19), Z2H_
+ LXVD2X (P1ptr+R20), Z2L_
+
+ VCMPEQUD SEL2, IDX, SEL1 // VCEQG SEL2, IDX, SEL1 OK
+
+ // This will result in SEL1 being all 0s or 1s, meaning
+ // the result is either X1L or X2L, no individual byte
+ // selection.
+
+ VSEL X1L, X2L, SEL1, X1L
+ VSEL X1H, X2H, SEL1, X1H
+ VSEL Y1L, Y2L, SEL1, Y1L
+ VSEL Y1H, Y2H, SEL1, Y1H
+ VSEL Z1L, Z2L, SEL1, Z1L
+ VSEL Z1H, Z2H, SEL1, Z1H
+
+ // Add 1 to all bytes in SEL2
+ VADDUBM SEL2, ONE, SEL2 // VAB SEL2, ONE, SEL2 OK
+ ADD $96, P1ptr
+ BC 16, 0, loop_select
+
+ // STXVD2X is used here so that alignment doesn't
+ // need to be verified. Since values were loaded
+ // using LXVD2X this is OK.
+ STXVD2X X1H_, (P3ptr+R0)
+ STXVD2X X1L_, (P3ptr+R16)
+ STXVD2X Y1H_, (P3ptr+R17)
+ STXVD2X Y1L_, (P3ptr+R18)
+ STXVD2X Z1H_, (P3ptr+R19)
+ STXVD2X Z1L_, (P3ptr+R20)
+ RET
+
+#undef P3ptr
+#undef P1ptr
+#undef COUNT
+#undef X1L
+#undef X1H
+#undef Y1L
+#undef Y1H
+#undef Z1L
+#undef Z1H
+#undef X2L
+#undef X2H
+#undef Y2L
+#undef Y2H
+#undef Z2L
+#undef Z2H
+#undef X2L_
+#undef X2H_
+#undef Y2L_
+#undef Y2H_
+#undef Z2L_
+#undef Z2H_
+#undef ONE
+#undef IDX
+#undef SEL1
+#undef SEL1_
+#undef SEL2
+
+// func p256SelectBase(point, table []uint64, idx int)
+#define P3ptr R3
+#define P1ptr R4
+#define COUNT R5
+
+#define X1L V0
+#define X1H V1
+#define Y1L V2
+#define Y1H V3
+#define Z1L V4
+#define Z1H V5
+#define X2L V6
+#define X2H V7
+#define Y2L V8
+#define Y2H V9
+#define Z2L V10
+#define Z2H V11
+#define X2L_ VS38
+#define X2H_ VS39
+#define Y2L_ VS40
+#define Y2H_ VS41
+#define Z2L_ VS42
+#define Z2H_ VS43
+
+#define ONE V18
+#define IDX V19
+#define SEL1 V20
+#define SEL1_ VS52
+#define SEL2 V21
+TEXT ·p256SelectBase(SB), NOSPLIT, $0-40
+ MOVD point+0(FP), P3ptr
+ MOVD table+8(FP), P1ptr
+ MOVD $16, R16
+ MOVD $32, R17
+ MOVD $48, R18
+ MOVD $64, R19
+ MOVD $80, R20
+ MOVD $56, R21
+
+ LXVDSX (R1)(R19), SEL1_
+ VSPLTB $7, SEL1, IDX // splat byte
+
+ VSPLTISB $1, ONE // Vector with byte 1s
+ VSPLTISB $1, SEL2 // Vector with byte 1s
+ MOVD $65, COUNT
+ MOVD COUNT, CTR // loop count
+
+ VSPLTISB $0, X1H // VZERO X1H
+ VSPLTISB $0, X1L // VZERO X1L
+ VSPLTISB $0, Y1H // VZERO Y1H
+ VSPLTISB $0, Y1L // VZERO Y1L
+ VSPLTISB $0, Z1H // VZERO Z1H
+ VSPLTISB $0, Z1L // VZERO Z1L
+
+loop_select:
+ LXVD2X (P1ptr+R0), X2H_
+ LXVD2X (P1ptr+R16), X2L_
+ LXVD2X (P1ptr+R17), Y2H_
+ LXVD2X (P1ptr+R18), Y2L_
+ LXVD2X (P1ptr+R19), Z2H_
+ LXVD2X (P1ptr+R20), Z2L_
+
+ VCMPEQUD SEL2, IDX, SEL1 // Compare against idx
+
+ VSEL X1L, X2L, SEL1, X1L // Select if idx matched
+ VSEL X1H, X2H, SEL1, X1H
+ VSEL Y1L, Y2L, SEL1, Y1L
+ VSEL Y1H, Y2H, SEL1, Y1H
+ VSEL Z1L, Z2L, SEL1, Z1L
+ VSEL Z1H, Z2H, SEL1, Z1H
+
+ VADDUBM SEL2, ONE, SEL2 // Increment SEL2 bytes by 1
+ ADD $96, P1ptr // Next chunk
+ BC 16, 0, loop_select
+
+ STXVD2X X1H_, (P3ptr+R0)
+ STXVD2X X1L_, (P3ptr+R16)
+ STXVD2X Y1H_, (P3ptr+R17)
+ STXVD2X Y1L_, (P3ptr+R18)
+ STXVD2X Z1H_, (P3ptr+R19)
+ STXVD2X Z1L_, (P3ptr+R20)
+ RET
+
+#undef P3ptr
+#undef P1ptr
+#undef COUNT
+#undef X1L
+#undef X1H
+#undef Y1L
+#undef Y1H
+#undef Z1L
+#undef Z1H
+#undef X2L
+#undef X2H
+#undef Y2L
+#undef Y2H
+#undef Z2L
+#undef Z2H
+#undef X1L_
+#undef X1H_
+#undef X2L_
+#undef X2H_
+#undef Y1L_
+#undef Y1H_
+#undef Y2L_
+#undef Y2H_
+#undef Z1L_
+#undef Z1H_
+#undef Z2L_
+#undef Z2H_
+#undef ONE
+#undef IDX
+#undef SEL1
+#undef SEL1_
+#undef SEL2
+#undef SWAP
+#undef SWAP_
+
+// ---------------------------------------
+// func p256FromMont(res, in []byte)
+#define res_ptr R3
+#define x_ptr R4
+#define CPOOL R7
+
+#define T0 V0
+#define T0_ VS32
+#define T1 V1
+#define T1_ VS33
+#define T2 V2
+#define TT0 V3
+#define TT1 V4
+#define TT0_ VS35
+#define TT1_ VS36
+
+#define ZER V6
+#define SEL1 V7
+#define SEL1_ VS39
+#define SEL2 V8
+#define SEL2_ VS40
+#define CAR1 V9
+#define CAR2 V10
+#define RED1 V11
+#define RED2 V12
+#define PL V13
+#define PL_ VS45
+#define PH V14
+#define PH_ VS46
+#define SWAP V28
+#define SWAP_ VS57
+
+TEXT ·p256FromMont(SB), NOSPLIT, $0-48
+ MOVD res+0(FP), res_ptr
+ MOVD in+24(FP), x_ptr
+
+ MOVD $16, R16
+ MOVD $32, R17
+ MOVD $48, R18
+ MOVD $64, R19
+ MOVD $p256<>+0x00(SB), CPOOL
+ MOVD $byteswap<>+0x00(SB), R15
+
+ VSPLTISB $0, T2 // VZERO T2
+ VSPLTISB $0, ZER // VZERO ZER
+
+ // Constants are defined so that the LXVD2X is correct
+ LXVD2X (CPOOL+R0), PH_
+ LXVD2X (CPOOL+R16), PL_
+
+ // VPERM byte selections
+ LXVD2X (CPOOL+R18), SEL2_
+ LXVD2X (CPOOL+R19), SEL1_
+
+ LXVD2X (R15)(R0), SWAP_
+
+ LXVD2X (R16)(x_ptr), T1_
+ LXVD2X (R0)(x_ptr), T0_
+
+ // Put in true little endian order
+ VPERM T0, T0, SWAP, T0
+ VPERM T1, T1, SWAP, T1
+
+ // First round
+ VPERM T1, T0, SEL1, RED2 // d1 d0 d1 d0
+ VPERM ZER, RED2, SEL2, RED1 // 0 d1 d0 0
+ VSUBUQM RED2, RED1, RED2 // VSQ RED1, RED2, RED2 // Guaranteed not to underflow
+
+ VSLDOI $8, T1, T0, T0 // VSLDB $8, T1, T0, T0
+ VSLDOI $8, T2, T1, T1 // VSLDB $8, T2, T1, T1
+
+ VADDCUQ T0, RED1, CAR1 // VACCQ T0, RED1, CAR1
+ VADDUQM T0, RED1, T0 // VAQ T0, RED1, T0
+ VADDECUQ T1, RED2, CAR1, CAR2 // VACCCQ T1, RED2, CAR1, CAR2
+ VADDEUQM T1, RED2, CAR1, T1 // VACQ T1, RED2, CAR1, T1
+ VADDUQM T2, CAR2, T2 // VAQ T2, CAR2, T2
+
+ // Second round
+ VPERM T1, T0, SEL1, RED2 // d1 d0 d1 d0
+ VPERM ZER, RED2, SEL2, RED1 // 0 d1 d0 0
+ VSUBUQM RED2, RED1, RED2 // VSQ RED1, RED2, RED2 // Guaranteed not to underflow
+
+ VSLDOI $8, T1, T0, T0 // VSLDB $8, T1, T0, T0
+ VSLDOI $8, T2, T1, T1 // VSLDB $8, T2, T1, T1
+
+ VADDCUQ T0, RED1, CAR1 // VACCQ T0, RED1, CAR1
+ VADDUQM T0, RED1, T0 // VAQ T0, RED1, T0
+ VADDECUQ T1, RED2, CAR1, CAR2 // VACCCQ T1, RED2, CAR1, CAR2
+ VADDEUQM T1, RED2, CAR1, T1 // VACQ T1, RED2, CAR1, T1
+ VADDUQM T2, CAR2, T2 // VAQ T2, CAR2, T2
+
+ // Third round
+ VPERM T1, T0, SEL1, RED2 // d1 d0 d1 d0
+ VPERM ZER, RED2, SEL2, RED1 // 0 d1 d0 0
+ VSUBUQM RED2, RED1, RED2 // VSQ RED1, RED2, RED2 // Guaranteed not to underflow
+
+ VSLDOI $8, T1, T0, T0 // VSLDB $8, T1, T0, T0
+ VSLDOI $8, T2, T1, T1 // VSLDB $8, T2, T1, T1
+
+ VADDCUQ T0, RED1, CAR1 // VACCQ T0, RED1, CAR1
+ VADDUQM T0, RED1, T0 // VAQ T0, RED1, T0
+ VADDECUQ T1, RED2, CAR1, CAR2 // VACCCQ T1, RED2, CAR1, CAR2
+ VADDEUQM T1, RED2, CAR1, T1 // VACQ T1, RED2, CAR1, T1
+ VADDUQM T2, CAR2, T2 // VAQ T2, CAR2, T2
+
+ // Last round
+ VPERM T1, T0, SEL1, RED2 // d1 d0 d1 d0
+ VPERM ZER, RED2, SEL2, RED1 // 0 d1 d0 0
+ VSUBUQM RED2, RED1, RED2 // VSQ RED1, RED2, RED2 // Guaranteed not to underflow
+
+ VSLDOI $8, T1, T0, T0 // VSLDB $8, T1, T0, T0
+ VSLDOI $8, T2, T1, T1 // VSLDB $8, T2, T1, T1
+
+ VADDCUQ T0, RED1, CAR1 // VACCQ T0, RED1, CAR1
+ VADDUQM T0, RED1, T0 // VAQ T0, RED1, T0
+ VADDECUQ T1, RED2, CAR1, CAR2 // VACCCQ T1, RED2, CAR1, CAR2
+ VADDEUQM T1, RED2, CAR1, T1 // VACQ T1, RED2, CAR1, T1
+ VADDUQM T2, CAR2, T2 // VAQ T2, CAR2, T2
+
+ // ---------------------------------------------------
+
+ VSUBCUQ T0, PL, CAR1 // VSCBIQ PL, T0, CAR1
+ VSUBUQM T0, PL, TT0 // VSQ PL, T0, TT0
+ VSUBECUQ T1, PH, CAR1, CAR2 // VSBCBIQ T1, PH, CAR1, CAR2
+ VSUBEUQM T1, PH, CAR1, TT1 // VSBIQ T1, PH, CAR1, TT1
+ VSUBEUQM T2, ZER, CAR2, T2 // VSBIQ T2, ZER, CAR2, T2
+
+ VSEL TT0, T0, T2, T0
+ VSEL TT1, T1, T2, T1
+
+ // Reorder the bytes so STXVD2X can be used.
+ // TT0, TT1 used for VPERM result in case
+ // the caller expects T0, T1 to be good.
+ VPERM T0, T0, SWAP, TT0
+ VPERM T1, T1, SWAP, TT1
+
+ STXVD2X TT0_, (R0)(res_ptr)
+ STXVD2X TT1_, (R16)(res_ptr)
+ RET
+
+#undef res_ptr
+#undef x_ptr
+#undef CPOOL
+#undef T0
+#undef T0_
+#undef T1
+#undef T1_
+#undef T2
+#undef TT0
+#undef TT1
+#undef ZER
+#undef SEL1
+#undef SEL1_
+#undef SEL2
+#undef SEL2_
+#undef CAR1
+#undef CAR2
+#undef RED1
+#undef RED2
+#undef PL
+#undef PL_
+#undef PH
+#undef PH_
+#undef SWAP
+#undef SWAP_
+
+// ---------------------------------------
+// p256MulInternal
+// V0-V3 V30,V31 - Not Modified
+// V4-V15 V27-V29 - Volatile
+
+#define CPOOL R7
+
+// Parameters
+#define X0 V0 // Not modified
+#define X1 V1 // Not modified
+#define Y0 V2 // Not modified
+#define Y1 V3 // Not modified
+#define T0 V4 // Result
+#define T1 V5 // Result
+#define P0 V30 // Not modified
+#define P1 V31 // Not modified
+
+// Temporaries: lots of reused vector regs
+#define YDIG V6 // Overloaded with CAR2
+#define ADD1H V7 // Overloaded with ADD3H
+#define ADD2H V8 // Overloaded with ADD4H
+#define ADD3 V9 // Overloaded with SEL2,SEL5
+#define ADD4 V10 // Overloaded with SEL3,SEL6
+#define RED1 V11 // Overloaded with CAR2
+#define RED2 V12
+#define RED3 V13 // Overloaded with SEL1
+#define T2 V14
+// Overloaded temporaries
+#define ADD1 V4 // Overloaded with T0
+#define ADD2 V5 // Overloaded with T1
+#define ADD3H V7 // Overloaded with ADD1H
+#define ADD4H V8 // Overloaded with ADD2H
+#define ZER V28 // Overloaded with TMP1
+#define CAR1 V6 // Overloaded with YDIG
+#define CAR2 V11 // Overloaded with RED1
+// Constant Selects
+#define SEL1 V13 // Overloaded with RED3
+#define SEL2 V9 // Overloaded with ADD3,SEL5
+#define SEL3 V10 // Overloaded with ADD4,SEL6
+#define SEL4 V6 // Overloaded with YDIG,CAR1
+#define SEL5 V9 // Overloaded with ADD3,SEL2
+#define SEL6 V10 // Overloaded with ADD4,SEL3
+#define SEL1_ VS45
+#define SEL2_ VS41
+#define SEL3_ VS42
+#define SEL4_ VS38
+#define SEL5_ VS41
+#define SEL6_ VS42
+
+// TMP1, TMP2, EXTRACT_LO, EXTRACT_HI used in
+// VMULT macros
+#define TMP1 V13 // Overloaded with RED3
+#define TMP2 V27
+#define EVENODD R5
+#define EXTRACT_LO V28
+#define EXTRACT_LO_ VS60
+#define EXTRACT_HI V29
+#define EXTRACT_HI_ VS61
+
+/* *
+ * To follow the flow of bits, for your own sanity a stiff drink, need you shall.
+ * Of a single round, a 'helpful' picture, here is. Meaning, column position has.
+ * With you, SIMD be...
+ *
+ * +--------+--------+
+ * +--------| RED2 | RED1 |
+ * | +--------+--------+
+ * | ---+--------+--------+
+ * | +---- T2| T1 | T0 |--+
+ * | | ---+--------+--------+ |
+ * | | |
+ * | | ======================= |
+ * | | |
+ * | | +--------+--------+<-+
+ * | +-------| ADD2 | ADD1 |--|-----+
+ * | | +--------+--------+ | |
+ * | | +--------+--------+<---+ |
+ * | | | ADD2H | ADD1H |--+ |
+ * | | +--------+--------+ | |
+ * | | +--------+--------+<-+ |
+ * | | | ADD4 | ADD3 |--|-+ |
+ * | | +--------+--------+ | | |
+ * | | +--------+--------+<---+ | |
+ * | | | ADD4H | ADD3H |------|-+ |(+vzero)
+ * | | +--------+--------+ | | V
+ * | | ------------------------ | | +--------+
+ * | | | | | RED3 | [d0 0 0 d0]
+ * | | | | +--------+
+ * | +---->+--------+--------+ | | |
+ * (T2[1w]||ADD2[4w]||ADD1[3w]) +--------| T1 | T0 | | | |
+ * | +--------+--------+ | | |
+ * +---->---+--------+--------+ | | |
+ * T2| T1 | T0 |----+ | |
+ * ---+--------+--------+ | | |
+ * ---+--------+--------+<---+ | |
+ * +--- T2| T1 | T0 |----------+
+ * | ---+--------+--------+ | |
+ * | +--------+--------+<-------------+
+ * | | RED2 | RED1 |-----+ | | [0 d1 d0 d1] [d0 0 d1 d0]
+ * | +--------+--------+ | | |
+ * | +--------+<----------------------+
+ * | | RED3 |--------------+ | [0 0 d1 d0]
+ * | +--------+ | |
+ * +--->+--------+--------+ | |
+ * | T1 | T0 |--------+
+ * +--------+--------+ | |
+ * --------------------------- | |
+ * | |
+ * +--------+--------+<----+ |
+ * | RED2 | RED1 | |
+ * +--------+--------+ |
+ * ---+--------+--------+<-------+
+ * T2| T1 | T0 | (H1P-H1P-H00RRAY!)
+ * ---+--------+--------+
+ *
+ * *Mi obra de arte de siglo XXI @vpaprots
+ *
+ *
+ * First group is special, doesn't get the two inputs:
+ * +--------+--------+<-+
+ * +-------| ADD2 | ADD1 |--|-----+
+ * | +--------+--------+ | |
+ * | +--------+--------+<---+ |
+ * | | ADD2H | ADD1H |--+ |
+ * | +--------+--------+ | |
+ * | +--------+--------+<-+ |
+ * | | ADD4 | ADD3 |--|-+ |
+ * | +--------+--------+ | | |
+ * | +--------+--------+<---+ | |
+ * | | ADD4H | ADD3H |------|-+ |(+vzero)
+ * | +--------+--------+ | | V
+ * | ------------------------ | | +--------+
+ * | | | | RED3 | [d0 0 0 d0]
+ * | | | +--------+
+ * +---->+--------+--------+ | | |
+ * (T2[1w]||ADD2[4w]||ADD1[3w]) | T1 | T0 |----+ | |
+ * +--------+--------+ | | |
+ * ---+--------+--------+<---+ | |
+ * +--- T2| T1 | T0 |----------+
+ * | ---+--------+--------+ | |
+ * | +--------+--------+<-------------+
+ * | | RED2 | RED1 |-----+ | | [0 d1 d0 d1] [d0 0 d1 d0]
+ * | +--------+--------+ | | |
+ * | +--------+<----------------------+
+ * | | RED3 |--------------+ | [0 0 d1 d0]
+ * | +--------+ | |
+ * +--->+--------+--------+ | |
+ * | T1 | T0 |--------+
+ * +--------+--------+ | |
+ * --------------------------- | |
+ * | |
+ * +--------+--------+<----+ |
+ * | RED2 | RED1 | |
+ * +--------+--------+ |
+ * ---+--------+--------+<-------+
+ * T2| T1 | T0 | (H1P-H1P-H00RRAY!)
+ * ---+--------+--------+
+ *
+ * Last 'group' needs to RED2||RED1 shifted less
+ */
+TEXT p256MulInternal<>(SB), NOSPLIT, $0-16
+ // CPOOL loaded from caller
+ MOVD $16, R16
+ MOVD $32, R17
+ MOVD $48, R18
+ MOVD $64, R19
+ MOVD $80, R20
+ MOVD $96, R21
+ MOVD $112, R22
+
+ MOVD $p256permhilo<>+0x00(SB), EVENODD
+
+ // These values are used by the VMULTxxx macros to
+ // extract the high and low portions of the intermediate
+ // result.
+ LXVD2X (R0)(EVENODD), EXTRACT_LO_
+ LXVD2X (R16)(EVENODD), EXTRACT_HI_
+
+ // ---------------------------------------------------
+
+ VSPLTW $3, Y0, YDIG // VREPF Y0 is input
+
+ // VMLHF X0, YDIG, ADD1H
+ // VMLHF X1, YDIG, ADD2H
+ // VMLF X0, YDIG, ADD1
+ // VMLF X1, YDIG, ADD2
+ //
+ VMULT(X0, YDIG, ADD1, ADD1H)
+ VMULT(X1, YDIG, ADD2, ADD2H)
+
+ VSPLTW $2, Y0, YDIG // VREPF
+
+ // VMALF X0, YDIG, ADD1H, ADD3
+ // VMALF X1, YDIG, ADD2H, ADD4
+ // VMALHF X0, YDIG, ADD1H, ADD3H // ADD1H Free
+ // VMALHF X1, YDIG, ADD2H, ADD4H // ADD2H Free
+ VMULT_ADD(X0, YDIG, ADD1H, ADD3, ADD3H)
+ VMULT_ADD(X1, YDIG, ADD2H, ADD4, ADD4H)
+
+ LXVD2X (R17)(CPOOL), SEL1_
+ VSPLTISB $0, ZER // VZERO ZER
+ VPERM ZER, ADD1, SEL1, RED3 // [d0 0 0 d0]
+
+ VSLDOI $12, ADD2, ADD1, T0 // ADD1 Free // VSLDB
+ VSLDOI $12, ZER, ADD2, T1 // ADD2 Free // VSLDB
+
+ VADDCUQ T0, ADD3, CAR1 // VACCQ
+ VADDUQM T0, ADD3, T0 // ADD3 Free // VAQ
+ VADDECUQ T1, ADD4, CAR1, T2 // VACCCQ
+ VADDEUQM T1, ADD4, CAR1, T1 // ADD4 Free // VACQ
+
+ LXVD2X (R18)(CPOOL), SEL2_
+ LXVD2X (R19)(CPOOL), SEL3_
+ LXVD2X (R20)(CPOOL), SEL4_
+ VPERM RED3, T0, SEL2, RED1 // [d0 0 d1 d0]
+ VPERM RED3, T0, SEL3, RED2 // [ 0 d1 d0 d1]
+ VPERM RED3, T0, SEL4, RED3 // [ 0 0 d1 d0]
+ VSUBUQM RED2, RED3, RED2 // Guaranteed not to underflow -->? // VSQ
+
+ VSLDOI $12, T1, T0, T0 // VSLDB
+ VSLDOI $12, T2, T1, T1 // VSLDB
+
+ VADDCUQ T0, ADD3H, CAR1 // VACCQ
+ VADDUQM T0, ADD3H, T0 // VAQ
+ VADDECUQ T1, ADD4H, CAR1, T2 // VACCCQ
+ VADDEUQM T1, ADD4H, CAR1, T1 // VACQ
+
+ // ---------------------------------------------------
+
+ VSPLTW $1, Y0, YDIG // VREPF
+ LXVD2X (R0)(EVENODD), EXTRACT_LO_
+ LXVD2X (R16)(EVENODD), EXTRACT_HI_
+
+ // VMALHF X0, YDIG, T0, ADD1H
+ // VMALHF X1, YDIG, T1, ADD2H
+ // VMALF X0, YDIG, T0, ADD1 // T0 Free->ADD1
+ // VMALF X1, YDIG, T1, ADD2 // T1 Free->ADD2
+ VMULT_ADD(X0, YDIG, T0, ADD1, ADD1H)
+ VMULT_ADD(X1, YDIG, T1, ADD2, ADD2H)
+
+ VSPLTW $0, Y0, YDIG // VREPF
+
+ // VMALF X0, YDIG, ADD1H, ADD3
+ // VMALF X1, YDIG, ADD2H, ADD4
+ // VMALHF X0, YDIG, ADD1H, ADD3H // ADD1H Free->ADD3H
+ // VMALHF X1, YDIG, ADD2H, ADD4H // ADD2H Free->ADD4H , YDIG Free->ZER
+ VMULT_ADD(X0, YDIG, ADD1H, ADD3, ADD3H)
+ VMULT_ADD(X1, YDIG, ADD2H, ADD4, ADD4H)
+
+ VSPLTISB $0, ZER // VZERO ZER
+ LXVD2X (R17)(CPOOL), SEL1_
+ VPERM ZER, ADD1, SEL1, RED3 // [d0 0 0 d0]
+
+ VSLDOI $12, ADD2, ADD1, T0 // ADD1 Free->T0 // VSLDB
+ VSLDOI $12, T2, ADD2, T1 // ADD2 Free->T1, T2 Free // VSLDB
+
+ VADDCUQ T0, RED1, CAR1 // VACCQ
+ VADDUQM T0, RED1, T0 // VAQ
+ VADDECUQ T1, RED2, CAR1, T2 // VACCCQ
+ VADDEUQM T1, RED2, CAR1, T1 // VACQ
+
+ VADDCUQ T0, ADD3, CAR1 // VACCQ
+ VADDUQM T0, ADD3, T0 // VAQ
+ VADDECUQ T1, ADD4, CAR1, CAR2 // VACCCQ
+ VADDEUQM T1, ADD4, CAR1, T1 // VACQ
+ VADDUQM T2, CAR2, T2 // VAQ
+
+ LXVD2X (R18)(CPOOL), SEL2_
+ LXVD2X (R19)(CPOOL), SEL3_
+ LXVD2X (R20)(CPOOL), SEL4_
+ VPERM RED3, T0, SEL2, RED1 // [d0 0 d1 d0]
+ VPERM RED3, T0, SEL3, RED2 // [ 0 d1 d0 d1]
+ VPERM RED3, T0, SEL4, RED3 // [ 0 0 d1 d0]
+ VSUBUQM RED2, RED3, RED2 // Guaranteed not to underflow // VSQ
+
+ VSLDOI $12, T1, T0, T0 // VSLDB
+ VSLDOI $12, T2, T1, T1 // VSLDB
+
+ VADDCUQ T0, ADD3H, CAR1 // VACCQ
+ VADDUQM T0, ADD3H, T0 // VAQ
+ VADDECUQ T1, ADD4H, CAR1, T2 // VACCCQ
+ VADDEUQM T1, ADD4H, CAR1, T1 // VACQ
+
+ // ---------------------------------------------------
+
+ VSPLTW $3, Y1, YDIG // VREPF
+ LXVD2X (R0)(EVENODD), EXTRACT_LO_
+ LXVD2X (R16)(EVENODD), EXTRACT_HI_
+
+ // VMALHF X0, YDIG, T0, ADD1H
+ // VMALHF X1, YDIG, T1, ADD2H
+ // VMALF X0, YDIG, T0, ADD1
+ // VMALF X1, YDIG, T1, ADD2
+ VMULT_ADD(X0, YDIG, T0, ADD1, ADD1H)
+ VMULT_ADD(X1, YDIG, T1, ADD2, ADD2H)
+
+ VSPLTW $2, Y1, YDIG // VREPF
+
+ // VMALF X0, YDIG, ADD1H, ADD3
+ // VMALF X1, YDIG, ADD2H, ADD4
+ // VMALHF X0, YDIG, ADD1H, ADD3H // ADD1H Free
+ // VMALHF X1, YDIG, ADD2H, ADD4H // ADD2H Free
+ VMULT_ADD(X0, YDIG, ADD1H, ADD3, ADD3H)
+ VMULT_ADD(X1, YDIG, ADD2H, ADD4, ADD4H)
+
+ LXVD2X (R17)(CPOOL), SEL1_
+ VSPLTISB $0, ZER // VZERO ZER
+ LXVD2X (R17)(CPOOL), SEL1_
+ VPERM ZER, ADD1, SEL1, RED3 // [d0 0 0 d0]
+
+ VSLDOI $12, ADD2, ADD1, T0 // ADD1 Free // VSLDB
+ VSLDOI $12, T2, ADD2, T1 // ADD2 Free // VSLDB
+
+ VADDCUQ T0, RED1, CAR1 // VACCQ
+ VADDUQM T0, RED1, T0 // VAQ
+ VADDECUQ T1, RED2, CAR1, T2 // VACCCQ
+ VADDEUQM T1, RED2, CAR1, T1 // VACQ
+
+ VADDCUQ T0, ADD3, CAR1 // VACCQ
+ VADDUQM T0, ADD3, T0 // VAQ
+ VADDECUQ T1, ADD4, CAR1, CAR2 // VACCCQ
+ VADDEUQM T1, ADD4, CAR1, T1 // VACQ
+ VADDUQM T2, CAR2, T2 // VAQ
+
+ LXVD2X (R18)(CPOOL), SEL2_
+ LXVD2X (R19)(CPOOL), SEL3_
+ LXVD2X (R20)(CPOOL), SEL4_
+ VPERM RED3, T0, SEL2, RED1 // [d0 0 d1 d0]
+ VPERM RED3, T0, SEL3, RED2 // [ 0 d1 d0 d1]
+ VPERM RED3, T0, SEL4, RED3 // [ 0 0 d1 d0]
+ VSUBUQM RED2, RED3, RED2 // Guaranteed not to underflow // VSQ
+
+ VSLDOI $12, T1, T0, T0 // VSLDB
+ VSLDOI $12, T2, T1, T1 // VSLDB
+
+ VADDCUQ T0, ADD3H, CAR1 // VACCQ
+ VADDUQM T0, ADD3H, T0 // VAQ
+ VADDECUQ T1, ADD4H, CAR1, T2 // VACCCQ
+ VADDEUQM T1, ADD4H, CAR1, T1 // VACQ
+
+ // ---------------------------------------------------
+
+ VSPLTW $1, Y1, YDIG // VREPF
+ LXVD2X (R0)(EVENODD), EXTRACT_LO_
+ LXVD2X (R16)(EVENODD), EXTRACT_HI_
+
+ // VMALHF X0, YDIG, T0, ADD1H
+ // VMALHF X1, YDIG, T1, ADD2H
+ // VMALF X0, YDIG, T0, ADD1
+ // VMALF X1, YDIG, T1, ADD2
+ VMULT_ADD(X0, YDIG, T0, ADD1, ADD1H)
+ VMULT_ADD(X1, YDIG, T1, ADD2, ADD2H)
+
+ VSPLTW $0, Y1, YDIG // VREPF
+
+ // VMALF X0, YDIG, ADD1H, ADD3
+ // VMALF X1, YDIG, ADD2H, ADD4
+ // VMALHF X0, YDIG, ADD1H, ADD3H
+ // VMALHF X1, YDIG, ADD2H, ADD4H
+ VMULT_ADD(X0, YDIG, ADD1H, ADD3, ADD3H)
+ VMULT_ADD(X1, YDIG, ADD2H, ADD4, ADD4H)
+
+ VSPLTISB $0, ZER // VZERO ZER
+ LXVD2X (R17)(CPOOL), SEL1_
+ VPERM ZER, ADD1, SEL1, RED3 // [d0 0 0 d0]
+
+ VSLDOI $12, ADD2, ADD1, T0 // VSLDB
+ VSLDOI $12, T2, ADD2, T1 // VSLDB
+
+ VADDCUQ T0, RED1, CAR1 // VACCQ
+ VADDUQM T0, RED1, T0 // VAQ
+ VADDECUQ T1, RED2, CAR1, T2 // VACCCQ
+ VADDEUQM T1, RED2, CAR1, T1 // VACQ
+
+ VADDCUQ T0, ADD3, CAR1 // VACCQ
+ VADDUQM T0, ADD3, T0 // VAQ
+ VADDECUQ T1, ADD4, CAR1, CAR2 // VACCCQ
+ VADDEUQM T1, ADD4, CAR1, T1 // VACQ
+ VADDUQM T2, CAR2, T2 // VAQ
+
+ LXVD2X (R21)(CPOOL), SEL5_
+ LXVD2X (R22)(CPOOL), SEL6_
+ VPERM T0, RED3, SEL5, RED2 // [d1 d0 d1 d0]
+ VPERM T0, RED3, SEL6, RED1 // [ 0 d1 d0 0]
+ VSUBUQM RED2, RED1, RED2 // Guaranteed not to underflow // VSQ
+
+ VSLDOI $12, T1, T0, T0 // VSLDB
+ VSLDOI $12, T2, T1, T1 // VSLDB
+
+ VADDCUQ T0, ADD3H, CAR1 // VACCQ
+ VADDUQM T0, ADD3H, T0 // VAQ
+ VADDECUQ T1, ADD4H, CAR1, T2 // VACCCQ
+ VADDEUQM T1, ADD4H, CAR1, T1 // VACQ
+
+ VADDCUQ T0, RED1, CAR1 // VACCQ
+ VADDUQM T0, RED1, T0 // VAQ
+ VADDECUQ T1, RED2, CAR1, CAR2 // VACCCQ
+ VADDEUQM T1, RED2, CAR1, T1 // VACQ
+ VADDUQM T2, CAR2, T2 // VAQ
+
+ // ---------------------------------------------------
+
+ VSPLTISB $0, RED3 // VZERO RED3
+ VSUBCUQ T0, P0, CAR1 // VSCBIQ
+ VSUBUQM T0, P0, ADD1H // VSQ
+ VSUBECUQ T1, P1, CAR1, CAR2 // VSBCBIQ
+ VSUBEUQM T1, P1, CAR1, ADD2H // VSBIQ
+ VSUBEUQM T2, RED3, CAR2, T2 // VSBIQ
+
+ // what output to use, ADD2H||ADD1H or T1||T0?
+ VSEL ADD1H, T0, T2, T0
+ VSEL ADD2H, T1, T2, T1
+ RET
+
+#undef CPOOL
+
+#undef X0
+#undef X1
+#undef Y0
+#undef Y1
+#undef T0
+#undef T1
+#undef P0
+#undef P1
+
+#undef SEL1
+#undef SEL2
+#undef SEL3
+#undef SEL4
+#undef SEL5
+#undef SEL6
+#undef SEL1_
+#undef SEL2_
+#undef SEL3_
+#undef SEL4_
+#undef SEL5_
+#undef SEL6_
+
+#undef YDIG
+#undef ADD1H
+#undef ADD2H
+#undef ADD3
+#undef ADD4
+#undef RED1
+#undef RED2
+#undef RED3
+#undef T2
+#undef ADD1
+#undef ADD2
+#undef ADD3H
+#undef ADD4H
+#undef ZER
+#undef CAR1
+#undef CAR2
+
+#undef TMP1
+#undef TMP2
+#undef EVENODD
+#undef EXTRACT_HI
+#undef EXTRACT_HI_
+#undef EXTRACT_LO
+#undef EXTRACT_LO_
+
+#define p256SubInternal(T1, T0, X1, X0, Y1, Y0) \
+ VSPLTISB $0, ZER \ // VZERO
+ VSUBCUQ X0, Y0, CAR1 \
+ VSUBUQM X0, Y0, T0 \
+ VSUBECUQ X1, Y1, CAR1, SEL1 \
+ VSUBEUQM X1, Y1, CAR1, T1 \
+ VSUBUQM ZER, SEL1, SEL1 \ // VSQ
+ \
+ VADDCUQ T0, PL, CAR1 \ // VACCQ
+ VADDUQM T0, PL, TT0 \ // VAQ
+ VADDEUQM T1, PH, CAR1, TT1 \ // VACQ
+ \
+ VSEL TT0, T0, SEL1, T0 \
+ VSEL TT1, T1, SEL1, T1 \
+
+#define p256AddInternal(T1, T0, X1, X0, Y1, Y0) \
+ VADDCUQ X0, Y0, CAR1 \
+ VADDUQM X0, Y0, T0 \
+ VADDECUQ X1, Y1, CAR1, T2 \ // VACCCQ
+ VADDEUQM X1, Y1, CAR1, T1 \
+ \
+ VSPLTISB $0, ZER \
+ VSUBCUQ T0, PL, CAR1 \ // VSCBIQ
+ VSUBUQM T0, PL, TT0 \
+ VSUBECUQ T1, PH, CAR1, CAR2 \ // VSBCBIQ
+ VSUBEUQM T1, PH, CAR1, TT1 \ // VSBIQ
+ VSUBEUQM T2, ZER, CAR2, SEL1 \
+ \
+ VSEL TT0, T0, SEL1, T0 \
+ VSEL TT1, T1, SEL1, T1
+
+#define p256HalfInternal(T1, T0, X1, X0) \
+ VSPLTISB $0, ZER \
+ VSUBEUQM ZER, ZER, X0, SEL1 \
+ \
+ VADDCUQ X0, PL, CAR1 \
+ VADDUQM X0, PL, T0 \
+ VADDECUQ X1, PH, CAR1, T2 \
+ VADDEUQM X1, PH, CAR1, T1 \
+ \
+ VSEL T0, X0, SEL1, T0 \
+ VSEL T1, X1, SEL1, T1 \
+ VSEL T2, ZER, SEL1, T2 \
+ \
+ VSLDOI $15, T2, ZER, TT1 \
+ VSLDOI $15, T1, ZER, TT0 \
+ VSPLTISB $1, SEL1 \
+ VSR T0, SEL1, T0 \ // VSRL
+ VSR T1, SEL1, T1 \
+ VSPLTISB $7, SEL1 \ // VREPIB
+ VSL TT0, SEL1, TT0 \
+ VSL TT1, SEL1, TT1 \
+ VOR T0, TT0, T0 \
+ VOR T1, TT1, T1
+
+// ---------------------------------------
+// func p256MulAsm(res, in1, in2 []byte)
+#define res_ptr R3
+#define x_ptr R4
+#define y_ptr R5
+#define CPOOL R7
+#define TEMP R8
+
+// Parameters
+#define X0 V0
+#define X1 V1
+#define Y0 V2
+#define Y1 V3
+#define T0 V4
+#define T1 V5
+#define X0_ VS32
+#define X1_ VS33
+#define Y0_ VS34
+#define Y1_ VS35
+#define T0_ VS36
+#define T1_ VS37
+#define SWAP V28
+#define SWAP_ VS60
+
+// Constants
+#define P0 V30
+#define P1 V31
+#define P0_ VS62
+#define P1_ VS63
+//
+// Montgomery multiplication modulo P256
+//
+TEXT ·p256MulAsm(SB), NOSPLIT, $0-72
+ MOVD res+0(FP), res_ptr
+ MOVD in1+24(FP), x_ptr
+ MOVD in2+48(FP), y_ptr
+ MOVD $16, R16
+ MOVD $32, R17
+
+ MOVD $p256mul<>+0x00(SB), CPOOL
+ MOVD $byteswap<>+0x00(SB), R8
+
+ LXVD2X (R8)(R0), SWAP_
+
+ LXVD2X (R0)(x_ptr), X0_
+ LXVD2X (R16)(x_ptr), X1_
+
+ VPERM X0, X0, SWAP, X0
+ VPERM X1, X1, SWAP, X1
+
+ LXVD2X (R0)(y_ptr), Y0_
+ LXVD2X (R16)(y_ptr), Y1_
+
+ VPERM Y0, Y0, SWAP, Y0
+ VPERM Y1, Y1, SWAP, Y1
+
+ LXVD2X (R16)(CPOOL), P1_
+ LXVD2X (R0)(CPOOL), P0_
+
+ CALL p256MulInternal<>(SB)
+
+ MOVD $p256mul<>+0x00(SB), CPOOL
+ MOVD $byteswap<>+0x00(SB), R8
+
+ LXVD2X (R8)(R0), SWAP_
+
+ VPERM T0, T0, SWAP, T0
+ VPERM T1, T1, SWAP, T1
+ STXVD2X T0_, (R0)(res_ptr)
+ STXVD2X T1_, (R16)(res_ptr)
+ RET
+
+#undef res_ptr
+#undef x_ptr
+#undef y_ptr
+#undef CPOOL
+
+#undef X0
+#undef X1
+#undef Y0
+#undef Y1
+#undef T0
+#undef T1
+#undef P0
+#undef P1
+#undef X0_
+#undef X1_
+#undef Y0_
+#undef Y1_
+#undef T0_
+#undef T1_
+#undef P0_
+#undef P1_
+
+// Point add with P2 being affine point
+// If sign == 1 -> P2 = -P2
+// If sel == 0 -> P3 = P1
+// if zero == 0 -> P3 = P2
+// p256PointAddAffineAsm(P3, P1, P2 *p256Point, sign, sel, zero int)
+#define P3ptr R3
+#define P1ptr R4
+#define P2ptr R5
+#define CPOOL R7
+
+// Temporaries in REGs
+#define Y2L V15
+#define Y2H V16
+#define Y2L_ VS47
+#define Y2H_ VS48
+#define T1L V17
+#define T1H V18
+#define T2L V19
+#define T2H V20
+#define T3L V21
+#define T3H V22
+#define T4L V23
+#define T4H V24
+
+// Temps for Sub and Add
+#define TT0 V11
+#define TT1 V12
+#define T2 V13
+
+// p256MulAsm Parameters
+#define X0 V0
+#define X1 V1
+#define X0_ VS32
+#define X1_ VS33
+#define Y0 V2
+#define Y1 V3
+#define Y0_ VS34
+#define Y1_ VS35
+#define T0 V4
+#define T1 V5
+
+#define PL V30
+#define PH V31
+#define PL_ VS62
+#define PH_ VS63
+
+// Names for zero/sel selects
+#define X1L V0
+#define X1H V1
+#define X1L_ VS32
+#define X1H_ VS33
+#define Y1L V2 // p256MulAsmParmY
+#define Y1H V3 // p256MulAsmParmY
+#define Y1L_ VS34
+#define Y1H_ VS35
+#define Z1L V4
+#define Z1H V5
+#define Z1L_ VS36
+#define Z1H_ VS37
+#define X2L V0
+#define X2H V1
+#define X2L_ VS32
+#define X2H_ VS33
+#define Z2L V4
+#define Z2H V5
+#define Z2L_ VS36
+#define Z2H_ VS37
+#define X3L V17 // T1L
+#define X3H V18 // T1H
+#define Y3L V21 // T3L
+#define Y3H V22 // T3H
+#define Z3L V25
+#define Z3H V26
+#define X3L_ VS49
+#define X3H_ VS50
+#define Y3L_ VS53
+#define Y3H_ VS54
+#define Z3L_ VS57
+#define Z3H_ VS58
+
+#define ZER V6
+#define SEL1 V7
+#define SEL1_ VS39
+#define CAR1 V8
+#define CAR2 V9
+/* *
+ * Three operand formula:
+ * Source: 2004 Hankerson–Menezes–Vanstone, page 91.
+ * T1 = Z1²
+ * T2 = T1*Z1
+ * T1 = T1*X2
+ * T2 = T2*Y2
+ * T1 = T1-X1
+ * T2 = T2-Y1
+ * Z3 = Z1*T1
+ * T3 = T1²
+ * T4 = T3*T1
+ * T3 = T3*X1
+ * T1 = 2*T3
+ * X3 = T2²
+ * X3 = X3-T1
+ * X3 = X3-T4
+ * T3 = T3-X3
+ * T3 = T3*T2
+ * T4 = T4*Y1
+ * Y3 = T3-T4
+
+ * Three operand formulas, but with MulInternal X,Y used to store temps
+X=Z1; Y=Z1; MUL;T- // T1 = Z1² T1
+X=T ; Y- ; MUL;T2=T // T2 = T1*Z1 T1 T2
+X- ; Y=X2; MUL;T1=T // T1 = T1*X2 T1 T2
+X=T2; Y=Y2; MUL;T- // T2 = T2*Y2 T1 T2
+SUB(T2<T-Y1) // T2 = T2-Y1 T1 T2
+SUB(Y<T1-X1) // T1 = T1-X1 T1 T2
+X=Z1; Y- ; MUL;Z3:=T// Z3 = Z1*T1 T2
+X=Y; Y- ; MUL;X=T // T3 = T1*T1 T2
+X- ; Y- ; MUL;T4=T // T4 = T3*T1 T2 T4
+X- ; Y=X1; MUL;T3=T // T3 = T3*X1 T2 T3 T4
+ADD(T1<T+T) // T1 = T3+T3 T1 T2 T3 T4
+X=T2; Y=T2; MUL;T- // X3 = T2*T2 T1 T2 T3 T4
+SUB(T<T-T1) // X3 = X3-T1 T1 T2 T3 T4
+SUB(T<T-T4) X3:=T // X3 = X3-T4 T2 T3 T4
+SUB(X<T3-T) // T3 = T3-X3 T2 T3 T4
+X- ; Y- ; MUL;T3=T // T3 = T3*T2 T2 T3 T4
+X=T4; Y=Y1; MUL;T- // T4 = T4*Y1 T3 T4
+SUB(T<T3-T) Y3:=T // Y3 = T3-T4 T3 T4
+
+ */
+//
+// V27 is clobbered by p256MulInternal so must be
+// saved in a temp.
+//
+TEXT ·p256PointAddAffineAsm(SB), NOSPLIT, $16-48
+ MOVD res+0(FP), P3ptr
+ MOVD in1+8(FP), P1ptr
+ MOVD in2+16(FP), P2ptr
+
+ MOVD $p256mul<>+0x00(SB), CPOOL
+
+ MOVD $16, R16
+ MOVD $32, R17
+ MOVD $48, R18
+ MOVD $64, R19
+ MOVD $80, R20
+ MOVD $96, R21
+ MOVD $112, R22
+ MOVD $128, R23
+ MOVD $144, R24
+ MOVD $160, R25
+ MOVD $104, R26 // offset of sign+24(FP)
+
+ MOVD $byteswap<>+0+00(SB), R8
+ LXVD2X (R16)(CPOOL), PH_
+ LXVD2X (R0)(CPOOL), PL_
+
+ // if (sign == 1) {
+ // Y2 = fromBig(new(big.Int).Mod(new(big.Int).Sub(p256.P, new(big.Int).SetBytes(Y2)), p256.P)) // Y2 = P-Y2
+ // }
+
+ LXVD2X (R8)(R0), SWAP_
+ LXVD2X (R17)(P2ptr), Y2L_
+ LXVD2X (R18)(P2ptr), Y2H_
+ VPERM Y2H, Y2H, SWAP, Y2H
+ VPERM Y2L, Y2L, SWAP, Y2L
+
+ // Equivalent of VLREPG sign+24(FP), SEL1
+ LXVDSX (R1)(R26), SEL1_
+ VSPLTISB $0, ZER
+ VCMPEQUD SEL1, ZER, SEL1
+
+ VSUBCUQ PL, Y2L, CAR1
+ VSUBUQM PL, Y2L, T1L
+ VSUBEUQM PH, Y2H, CAR1, T1H
+
+ VSEL T1L, Y2L, SEL1, Y2L
+ VSEL T1H, Y2H, SEL1, Y2H
+
+/* *
+ * Three operand formula:
+ * Source: 2004 Hankerson–Menezes–Vanstone, page 91.
+ */
+ // X=Z1; Y=Z1; MUL; T- // T1 = Z1² T1
+ LXVD2X (R8)(R0), SWAP_
+ LXVD2X (R19)(P1ptr), X0_ // Z1H
+ LXVD2X (R20)(P1ptr), X1_ // Z1L
+ VPERM X0, X0, SWAP, X0
+ VPERM X1, X1, SWAP, X1
+ VOR X0, X0, Y0
+ VOR X1, X1, Y1
+ CALL p256MulInternal<>(SB)
+
+ // X=T ; Y- ; MUL; T2=T // T2 = T1*Z1 T1 T2
+ VOR T0, T0, X0
+ VOR T1, T1, X1
+ CALL p256MulInternal<>(SB)
+ VOR T0, T0, T2L
+ VOR T1, T1, T2H
+
+ // X- ; Y=X2; MUL; T1=T // T1 = T1*X2 T1 T2
+ MOVD in2+16(FP), P2ptr
+ LXVD2X (R8)(R0), SWAP_
+ LXVD2X (R0)(P2ptr), Y0_ // X2H
+ LXVD2X (R16)(P2ptr), Y1_ // X2L
+ VPERM Y0, Y0, SWAP, Y0
+ VPERM Y1, Y1, SWAP, Y1
+ CALL p256MulInternal<>(SB)
+ VOR T0, T0, T1L
+ VOR T1, T1, T1H
+
+ // X=T2; Y=Y2; MUL; T- // T2 = T2*Y2 T1 T2
+ VOR T2L, T2L, X0
+ VOR T2H, T2H, X1
+ VOR Y2L, Y2L, Y0
+ VOR Y2H, Y2H, Y1
+ CALL p256MulInternal<>(SB)
+
+ // SUB(T2<T-Y1) // T2 = T2-Y1 T1 T2
+ MOVD in1+8(FP), P1ptr
+ LXVD2X (R8)(R0), SWAP_
+ LXVD2X (R17)(P1ptr), Y1L_
+ LXVD2X (R18)(P1ptr), Y1H_
+ VPERM Y1H, Y1H, SWAP, Y1H
+ VPERM Y1L, Y1L, SWAP, Y1L
+ p256SubInternal(T2H,T2L,T1,T0,Y1H,Y1L)
+
+ // SUB(Y<T1-X1) // T1 = T1-X1 T1 T2
+ LXVD2X (R0)(P1ptr), X1L_
+ LXVD2X (R16)(P1ptr), X1H_
+ VPERM X1H, X1H, SWAP, X1H
+ VPERM X1L, X1L, SWAP, X1L
+ p256SubInternal(Y1,Y0,T1H,T1L,X1H,X1L)
+
+ // X=Z1; Y- ; MUL; Z3:=T// Z3 = Z1*T1 T2
+ LXVD2X (R19)(P1ptr), X0_ // Z1H
+ LXVD2X (R20)(P1ptr), X1_ // Z1L
+ VPERM X0, X0, SWAP, X0
+ VPERM X1, X1, SWAP, X1
+ CALL p256MulInternal<>(SB)
+
+ VOR T0, T0, Z3L
+ VOR T1, T1, Z3H
+
+ // X=Y; Y- ; MUL; X=T // T3 = T1*T1 T2
+ VOR Y0, Y0, X0
+ VOR Y1, Y1, X1
+ CALL p256MulInternal<>(SB)
+ VOR T0, T0, X0
+ VOR T1, T1, X1
+
+ // X- ; Y- ; MUL; T4=T // T4 = T3*T1 T2 T4
+ CALL p256MulInternal<>(SB)
+ VOR T0, T0, T4L
+ VOR T1, T1, T4H
+
+ // X- ; Y=X1; MUL; T3=T // T3 = T3*X1 T2 T3 T4
+ MOVD in1+8(FP), P1ptr
+ LXVD2X (R8)(R0), SWAP_
+ LXVD2X (R0)(P1ptr), Y0_ // X1H
+ LXVD2X (R16)(P1ptr), Y1_ // X1L
+ VPERM Y1, Y1, SWAP, Y1
+ VPERM Y0, Y0, SWAP, Y0
+ CALL p256MulInternal<>(SB)
+ VOR T0, T0, T3L
+ VOR T1, T1, T3H
+
+ // ADD(T1<T+T) // T1 = T3+T3 T1 T2 T3 T4
+ p256AddInternal(T1H,T1L, T1,T0,T1,T0)
+
+ // X=T2; Y=T2; MUL; T- // X3 = T2*T2 T1 T2 T3 T4
+ VOR T2L, T2L, X0
+ VOR T2H, T2H, X1
+ VOR T2L, T2L, Y0
+ VOR T2H, T2H, Y1
+ CALL p256MulInternal<>(SB)
+
+ // SUB(T<T-T1) // X3 = X3-T1 T1 T2 T3 T4 (T1 = X3)
+ p256SubInternal(T1,T0,T1,T0,T1H,T1L)
+
+ // SUB(T<T-T4) X3:=T // X3 = X3-T4 T2 T3 T4
+ p256SubInternal(T1,T0,T1,T0,T4H,T4L)
+ VOR T0, T0, X3L
+ VOR T1, T1, X3H
+
+ // SUB(X<T3-T) // T3 = T3-X3 T2 T3 T4
+ p256SubInternal(X1,X0,T3H,T3L,T1,T0)
+
+ // X- ; Y- ; MUL; T3=T // T3 = T3*T2 T2 T3 T4
+ CALL p256MulInternal<>(SB)
+ VOR T0, T0, T3L
+ VOR T1, T1, T3H
+
+ // X=T4; Y=Y1; MUL; T- // T4 = T4*Y1 T3 T4
+ VOR T4L, T4L, X0
+ VOR T4H, T4H, X1
+ MOVD in1+8(FP), P1ptr
+ LXVD2X (R8)(R0), SWAP_
+ LXVD2X (R17)(P1ptr), Y0_ // Y1H
+ LXVD2X (R18)(P1ptr), Y1_ // Y1L
+ VPERM Y0, Y0, SWAP, Y0
+ VPERM Y1, Y1, SWAP, Y1
+ CALL p256MulInternal<>(SB)
+
+ // SUB(T<T3-T) Y3:=T // Y3 = T3-T4 T3 T4 (T3 = Y3)
+ p256SubInternal(Y3H,Y3L,T3H,T3L,T1,T0)
+
+ // if (sel == 0) {
+ // copy(P3.x[:], X1)
+ // copy(P3.y[:], Y1)
+ // copy(P3.z[:], Z1)
+ // }
+
+ LXVD2X (R8)(R0), SWAP_
+ LXVD2X (R0)(P1ptr), X1L_
+ LXVD2X (R16)(P1ptr), X1H_
+ VPERM X1H, X1H, SWAP, X1H
+ VPERM X1L, X1L, SWAP, X1L
+
+ // Y1 already loaded, left over from addition
+ LXVD2X (R19)(P1ptr), Z1L_
+ LXVD2X (R20)(P1ptr), Z1H_
+ VPERM Z1H, Z1H, SWAP, Z1H
+ VPERM Z1L, Z1L, SWAP, Z1L
+
+ MOVD $112, R26 // Get offset to sel+32
+ LXVDSX (R1)(R26), SEL1_
+ VSPLTISB $0, ZER
+ VCMPEQUD SEL1, ZER, SEL1
+
+ VSEL X3L, X1L, SEL1, X3L
+ VSEL X3H, X1H, SEL1, X3H
+ VSEL Y3L, Y1L, SEL1, Y3L
+ VSEL Y3H, Y1H, SEL1, Y3H
+ VSEL Z3L, Z1L, SEL1, Z3L
+ VSEL Z3H, Z1H, SEL1, Z3H
+
+ // if (zero == 0) {
+ // copy(P3.x[:], X2)
+ // copy(P3.y[:], Y2)
+ // copy(P3.z[:], []byte{0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ // 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01}) //(p256.z*2^256)%p
+ // }
+ MOVD in2+16(FP), P2ptr
+ LXVD2X (R0)(P2ptr), X2L_
+ LXVD2X (R16)(P2ptr), X2H_
+ VPERM X2H, X2H, SWAP, X2H
+ VPERM X2L, X2L, SWAP, X2L
+
+ // Y2 already loaded
+ LXVD2X (R23)(CPOOL), Z2L_
+ LXVD2X (R24)(CPOOL), Z2H_
+
+ MOVD $120, R26 // Get the value from zero+40(FP)
+ LXVDSX (R1)(R26), SEL1_
+ VSPLTISB $0, ZER
+ VCMPEQUD SEL1, ZER, SEL1
+
+ VSEL X3L, X2L, SEL1, X3L
+ VSEL X3H, X2H, SEL1, X3H
+ VSEL Y3L, Y2L, SEL1, Y3L
+ VSEL Y3H, Y2H, SEL1, Y3H
+ VSEL Z3L, Z2L, SEL1, Z3L
+ VSEL Z3H, Z2H, SEL1, Z3H
+
+ // Reorder the bytes so they can be stored using STXVD2X.
+ MOVD res+0(FP), P3ptr
+ VPERM X3H, X3H, SWAP, X3H
+ VPERM X3L, X3L, SWAP, X3L
+ VPERM Y3H, Y3H, SWAP, Y3H
+ VPERM Y3L, Y3L, SWAP, Y3L
+ VPERM Z3H, Z3H, SWAP, Z3H
+ VPERM Z3L, Z3L, SWAP, Z3L
+ STXVD2X X3L_, (R0)(P3ptr)
+ STXVD2X X3H_, (R16)(P3ptr)
+ STXVD2X Y3L_, (R17)(P3ptr)
+ STXVD2X Y3H_, (R18)(P3ptr)
+ STXVD2X Z3L_, (R19)(P3ptr)
+ STXVD2X Z3H_, (R20)(P3ptr)
+
+ RET
+
+#undef P3ptr
+#undef P1ptr
+#undef P2ptr
+#undef CPOOL
+#undef SWAP
+#undef SWAP_
+
+#undef Y2L
+#undef Y2H
+#undef Y2L_
+#undef Y2H_
+#undef T1L
+#undef T1H
+#undef T2L
+#undef T2H
+#undef T3L
+#undef T3H
+#undef T4L
+#undef T4H
+
+#undef TT0
+#undef TT1
+#undef TT0_
+#undef TT1_
+#undef T2
+
+#undef X0
+#undef X1
+#undef X0_
+#undef X1_
+#undef Y0
+#undef Y1
+#undef Y0_
+#undef Y1_
+#undef T0
+#undef T1
+
+#undef PL
+#undef PH
+#undef PL_
+#undef PH_
+
+#undef X1L
+#undef X1H
+#undef X1L_
+#undef X1H_
+#undef Y1L
+#undef Y1H
+#undef Y1L_
+#undef Y1H_
+#undef Z1L
+#undef Z1H
+#undef Z1L_
+#undef Z1H_
+#undef X2L
+#undef X2H
+#undef X2L_
+#undef X2H_
+#undef Z2L
+#undef Z2H
+#undef Z2L_
+#undef Z2H_
+#undef X3L
+#undef X3H
+#undef X3L_
+#undef X3H_
+#undef Y3L
+#undef Y3H
+#undef Y3L_
+#undef Y3H_
+#undef Z3L
+#undef Z3H
+#undef Z3L_
+#undef Z3H_
+
+#undef ZER
+#undef SEL1
+#undef SEL1_
+#undef CAR1
+#undef CAR2
+
+// p256PointDoubleAsm(P3, P1 *p256Point)
+// http://www.hyperelliptic.org/EFD/g1p/auto-shortw-jacobian.html#doubling-dbl-2007-bl
+// http://www.hyperelliptic.org/EFD/g1p/auto-shortw.html
+// http://www.hyperelliptic.org/EFD/g1p/auto-shortw-projective-3.html
+#define P3ptr R3
+#define P1ptr R4
+#define CPOOL R7
+
+// Temporaries in REGs
+#define X3L V15
+#define X3H V16
+#define X3L_ VS47
+#define X3H_ VS48
+#define Y3L V17
+#define Y3H V18
+#define Y3L_ VS49
+#define Y3H_ VS50
+#define T1L V19
+#define T1H V20
+#define T2L V21
+#define T2H V22
+#define T3L V23
+#define T3H V24
+
+#define X1L V6
+#define X1H V7
+#define X1L_ VS38
+#define X1H_ VS39
+#define Y1L V8
+#define Y1H V9
+#define Y1L_ VS40
+#define Y1H_ VS41
+#define Z1L V10
+#define Z1H V11
+
+// Temps for Sub and Add
+#define TT0 V11
+#define TT1 V12
+#define TT0_ VS43
+#define TT1_ VS44
+#define T2 V13
+
+// p256MulAsm Parameters
+#define X0 V0
+#define X1 V1
+#define X0_ VS32
+#define X1_ VS33
+#define Y0 V2
+#define Y1 V3
+#define Y0_ VS34
+#define Y1_ VS35
+#define T0 V4
+#define T1 V5
+#define T0_ VS36
+#define T1_ VS37
+
+#define PL V30
+#define PH V31
+#define PL_ VS62
+#define PH_ VS63
+
+#define Z3L V23
+#define Z3H V24
+
+#define SWAP V25
+#define SWAP_ VS57
+#define ZER V26
+#define SEL1 V27
+#define CAR1 V28
+#define CAR2 V29
+/*
+ * http://www.hyperelliptic.org/EFD/g1p/auto-shortw-jacobian-3.html#doubling-dbl-2004-hmv
+ * Cost: 4M + 4S + 1*half + 5add + 2*2 + 1*3.
+ * Source: 2004 Hankerson–Menezes–Vanstone, page 91.
+ * A = 3(X₁-Z₁²)×(X₁+Z₁²)
+ * B = 2Y₁
+ * Z₃ = B×Z₁
+ * C = B²
+ * D = C×X₁
+ * X₃ = A²-2D
+ * Y₃ = (D-X₃)×A-C²/2
+ *
+ * Three-operand formula:
+ * T1 = Z1²
+ * T2 = X1-T1
+ * T1 = X1+T1
+ * T2 = T2*T1
+ * T2 = 3*T2
+ * Y3 = 2*Y1
+ * Z3 = Y3*Z1
+ * Y3 = Y3²
+ * T3 = Y3*X1
+ * Y3 = Y3²
+ * Y3 = half*Y3
+ * X3 = T2²
+ * T1 = 2*T3
+ * X3 = X3-T1
+ * T1 = T3-X3
+ * T1 = T1*T2
+ * Y3 = T1-Y3
+ */
+
+TEXT ·p256PointDoubleAsm(SB), NOSPLIT, $0-16
+ MOVD res+0(FP), P3ptr
+ MOVD in+8(FP), P1ptr
+
+ MOVD $p256mul<>+0x00(SB), CPOOL
+ MOVD $byteswap<>+0x00(SB), R15
+
+ MOVD $16, R16
+ MOVD $32, R17
+ MOVD $48, R18
+ MOVD $64, R19
+ MOVD $80, R20
+
+ LXVD2X (R16)(CPOOL), PH_
+ LXVD2X (R0)(CPOOL), PL_
+
+ LXVD2X (R15)(R0), SWAP_
+
+ // X=Z1; Y=Z1; MUL; T- // T1 = Z1²
+ LXVD2X (R19)(P1ptr), X0_ // Z1H
+ LXVD2X (R20)(P1ptr), X1_ // Z1L
+
+ VPERM X0, X0, SWAP, X0
+ VPERM X1, X1, SWAP, X1
+
+ VOR X0, X0, Y0
+ VOR X1, X1, Y1
+ CALL p256MulInternal<>(SB)
+
+ // SUB(X<X1-T) // T2 = X1-T1
+ LXVD2X (R0)(P1ptr), X1L_
+ LXVD2X (R16)(P1ptr), X1H_
+ VPERM X1L, X1L, SWAP, X1L
+ VPERM X1H, X1H, SWAP, X1H
+
+ p256SubInternal(X1,X0,X1H,X1L,T1,T0)
+
+ // ADD(Y<X1+T) // T1 = X1+T1
+ p256AddInternal(Y1,Y0,X1H,X1L,T1,T0)
+
+ // X- ; Y- ; MUL; T- // T2 = T2*T1
+ CALL p256MulInternal<>(SB)
+
+ // ADD(T2<T+T); ADD(T2<T2+T) // T2 = 3*T2
+ p256AddInternal(T2H,T2L,T1,T0,T1,T0)
+ p256AddInternal(T2H,T2L,T2H,T2L,T1,T0)
+
+ // ADD(X<Y1+Y1) // Y3 = 2*Y1
+ LXVD2X (R15)(R0), SWAP_
+ LXVD2X (R17)(P1ptr), Y1L_
+ LXVD2X (R18)(P1ptr), Y1H_
+ VPERM Y1L, Y1L, SWAP, Y1L
+ VPERM Y1H, Y1H, SWAP, Y1H
+
+ p256AddInternal(X1,X0,Y1H,Y1L,Y1H,Y1L)
+
+ // X- ; Y=Z1; MUL; Z3:=T // Z3 = Y3*Z1
+ LXVD2X (R15)(R0), SWAP_
+ LXVD2X (R19)(P1ptr), Y0_
+ LXVD2X (R20)(P1ptr), Y1_
+ VPERM Y0, Y0, SWAP, Y0
+ VPERM Y1, Y1, SWAP, Y1
+
+ CALL p256MulInternal<>(SB)
+
+ LXVD2X (R15)(R0), SWAP_
+
+ // Leave T0, T1 as is.
+ VPERM T0, T0, SWAP, TT0
+ VPERM T1, T1, SWAP, TT1
+ STXVD2X TT0_, (R19)(P3ptr)
+ STXVD2X TT1_, (R20)(P3ptr)
+
+ // X- ; Y=X ; MUL; T- // Y3 = Y3²
+ VOR X0, X0, Y0
+ VOR X1, X1, Y1
+ CALL p256MulInternal<>(SB)
+
+ // X=T ; Y=X1; MUL; T3=T // T3 = Y3*X1
+ VOR T0, T0, X0
+ VOR T1, T1, X1
+ LXVD2X (R15)(R0), SWAP_
+ LXVD2X (R0)(P1ptr), Y0_
+ LXVD2X (R16)(P1ptr), Y1_
+ VPERM Y0, Y0, SWAP, Y0
+ VPERM Y1, Y1, SWAP, Y1
+ CALL p256MulInternal<>(SB)
+ VOR T0, T0, T3L
+ VOR T1, T1, T3H
+
+ // X- ; Y=X ; MUL; T- // Y3 = Y3²
+ VOR X0, X0, Y0
+ VOR X1, X1, Y1
+ CALL p256MulInternal<>(SB)
+
+ // HAL(Y3<T) // Y3 = half*Y3
+ p256HalfInternal(Y3H,Y3L, T1,T0)
+
+ // X=T2; Y=T2; MUL; T- // X3 = T2²
+ VOR T2L, T2L, X0
+ VOR T2H, T2H, X1
+ VOR T2L, T2L, Y0
+ VOR T2H, T2H, Y1
+ CALL p256MulInternal<>(SB)
+
+ // ADD(T1<T3+T3) // T1 = 2*T3
+ p256AddInternal(T1H,T1L,T3H,T3L,T3H,T3L)
+
+ // SUB(X3<T-T1) X3:=X3 // X3 = X3-T1
+ p256SubInternal(X3H,X3L,T1,T0,T1H,T1L)
+
+ LXVD2X (R15)(R0), SWAP_
+ VPERM X3L, X3L, SWAP, TT0
+ VPERM X3H, X3H, SWAP, TT1
+ STXVD2X TT0_, (R0)(P3ptr)
+ STXVD2X TT1_, (R16)(P3ptr)
+
+ // SUB(X<T3-X3) // T1 = T3-X3
+ p256SubInternal(X1,X0,T3H,T3L,X3H,X3L)
+
+ // X- ; Y- ; MUL; T- // T1 = T1*T2
+ CALL p256MulInternal<>(SB)
+
+ // SUB(Y3<T-Y3) // Y3 = T1-Y3
+ p256SubInternal(Y3H,Y3L,T1,T0,Y3H,Y3L)
+
+ LXVD2X (R15)(R0), SWAP_
+ VPERM Y3L, Y3L, SWAP, Y3L
+ VPERM Y3H, Y3H, SWAP, Y3H
+ STXVD2X Y3L_, (R17)(P3ptr)
+ STXVD2X Y3H_, (R18)(P3ptr)
+ RET
+
+#undef P3ptr
+#undef P1ptr
+#undef CPOOL
+#undef X3L
+#undef X3H
+#undef X3L_
+#undef X3H_
+#undef Y3L
+#undef Y3H
+#undef Y3L_
+#undef Y3H_
+#undef T1L
+#undef T1H
+#undef T2L
+#undef T2H
+#undef T3L
+#undef T3H
+#undef X1L
+#undef X1H
+#undef X1L_
+#undef X1H_
+#undef Y1L
+#undef Y1H
+#undef Y1L_
+#undef Y1H_
+#undef Z1L
+#undef Z1H
+#undef TT0
+#undef TT1
+#undef TT0_
+#undef TT1_
+#undef T2
+#undef X0
+#undef X1
+#undef X0_
+#undef X1_
+#undef Y0
+#undef Y1
+#undef Y0_
+#undef Y1_
+#undef T0
+#undef T1
+#undef T0_
+#undef T1_
+#undef PL
+#undef PH
+#undef PL_
+#undef PH_
+#undef Z3L
+#undef Z3H
+#undef ZER
+#undef SEL1
+#undef CAR1
+#undef CAR2
+#undef SWAP
+#undef SWAP_
+
+// p256PointAddAsm(P3, P1, P2 *p256Point)
+#define P3ptr R3
+#define P1ptr R4
+#define P2ptr R5
+#define CPOOL R7
+#define TRUE R14
+#define RES1 R9
+#define RES2 R10
+
+// Temporaries in REGs
+#define T1L V16
+#define T1H V17
+#define T2L V18
+#define T2H V19
+#define U1L V20
+#define U1H V21
+#define S1L V22
+#define S1H V23
+#define HL V24
+#define HH V25
+#define RL V26
+#define RH V27
+#define RH_ VS59
+
+// Temps for Sub and Add
+#define ZER V6
+#define SEL1 V7
+#define CAR1 V8
+#define CAR2 V9
+#define TT0 V11
+#define TT0_ VS43
+#define TT1 V12
+#define TT1_ VS44
+#define T2 V13
+
+#define SWAP V28
+#define SWAP_ VS60
+
+// p256MulAsm Parameters
+#define X0 V0
+#define X1 V1
+#define X0_ VS32
+#define X1_ VS33
+#define Y0 V2
+#define Y1 V3
+#define Y0_ VS34
+#define Y1_ VS35
+#define T0 V4
+#define T1 V5
+#define T0_ VS36
+#define T1_ VS37
+
+#define PL V30
+#define PH V31
+#define PL_ VS62
+#define PH_ VS63
+/*
+ * https://choucroutage.com/Papers/SideChannelAttacks/ctrsa-2011-brown.pdf "Software Implementation of the NIST Elliptic Curves Over Prime Fields"
+ *
+ * A = X₁×Z₂²
+ * B = Y₁×Z₂³
+ * C = X₂×Z₁²-A
+ * D = Y₂×Z₁³-B
+ * X₃ = D² - 2A×C² - C³
+ * Y₃ = D×(A×C² - X₃) - B×C³
+ * Z₃ = Z₁×Z₂×C
+ *
+ * Three-operand formula (adopted): http://www.hyperelliptic.org/EFD/g1p/auto-shortw-jacobian-3.html#addition-add-1998-cmo-2
+ * Temp storage: T1,T2,U1,H,Z3=X3=Y3,S1,R
+ *
+ * T1 = Z1*Z1
+ * T2 = Z2*Z2
+ * U1 = X1*T2
+ * H = X2*T1
+ * H = H-U1
+ * Z3 = Z1*Z2
+ * Z3 = Z3*H << store-out Z3 result reg.. could override Z1, if slices have same backing array
+ *
+ * S1 = Z2*T2
+ * S1 = Y1*S1
+ * R = Z1*T1
+ * R = Y2*R
+ * R = R-S1
+ *
+ * T1 = H*H
+ * T2 = H*T1
+ * U1 = U1*T1
+ *
+ * X3 = R*R
+ * X3 = X3-T2
+ * T1 = 2*U1
+ * X3 = X3-T1 << store-out X3 result reg
+ *
+ * T2 = S1*T2
+ * Y3 = U1-X3
+ * Y3 = R*Y3
+ * Y3 = Y3-T2 << store-out Y3 result reg
+
+ // X=Z1; Y=Z1; MUL; T- // T1 = Z1*Z1
+ // X- ; Y=T ; MUL; R=T // R = Z1*T1
+ // X=X2; Y- ; MUL; H=T // H = X2*T1
+ // X=Z2; Y=Z2; MUL; T- // T2 = Z2*Z2
+ // X- ; Y=T ; MUL; S1=T // S1 = Z2*T2
+ // X=X1; Y- ; MUL; U1=T // U1 = X1*T2
+ // SUB(H<H-T) // H = H-U1
+ // X=Z1; Y=Z2; MUL; T- // Z3 = Z1*Z2
+ // X=T ; Y=H ; MUL; Z3:=T// Z3 = Z3*H << store-out Z3 result reg.. could override Z1, if slices have same backing array
+ // X=Y1; Y=S1; MUL; S1=T // S1 = Y1*S1
+ // X=Y2; Y=R ; MUL; T- // R = Y2*R
+ // SUB(R<T-S1) // R = R-S1
+ // X=H ; Y=H ; MUL; T- // T1 = H*H
+ // X- ; Y=T ; MUL; T2=T // T2 = H*T1
+ // X=U1; Y- ; MUL; U1=T // U1 = U1*T1
+ // X=R ; Y=R ; MUL; T- // X3 = R*R
+ // SUB(T<T-T2) // X3 = X3-T2
+ // ADD(X<U1+U1) // T1 = 2*U1
+ // SUB(T<T-X) X3:=T // X3 = X3-T1 << store-out X3 result reg
+ // SUB(Y<U1-T) // Y3 = U1-X3
+ // X=R ; Y- ; MUL; U1=T // Y3 = R*Y3
+ // X=S1; Y=T2; MUL; T- // T2 = S1*T2
+ // SUB(T<U1-T); Y3:=T // Y3 = Y3-T2 << store-out Y3 result reg
+ */
+TEXT ·p256PointAddAsm(SB), NOSPLIT, $16-32
+ MOVD res+0(FP), P3ptr
+ MOVD in1+8(FP), P1ptr
+ MOVD $p256mul<>+0x00(SB), CPOOL
+ MOVD $16, R16
+ MOVD $32, R17
+ MOVD $48, R18
+ MOVD $64, R19
+ MOVD $80, R20
+
+ MOVD $byteswap<>+0x00(SB), R8
+ LXVD2X (R16)(CPOOL), PH_
+ LXVD2X (R0)(CPOOL), PL_
+
+ // X=Z1; Y=Z1; MUL; T- // T1 = Z1*Z1
+ LXVD2X (R8)(R0), SWAP_
+ LXVD2X (R19)(P1ptr), X0_ // Z1L
+ LXVD2X (R20)(P1ptr), X1_ // Z1H
+ VPERM X0, X0, SWAP, X0
+ VPERM X1, X1, SWAP, X1
+ VOR X0, X0, Y0
+ VOR X1, X1, Y1
+ CALL p256MulInternal<>(SB)
+
+ // X- ; Y=T ; MUL; R=T // R = Z1*T1
+ VOR T0, T0, Y0
+ VOR T1, T1, Y1
+ CALL p256MulInternal<>(SB)
+ VOR T0, T0, RL // SAVE: RL
+ VOR T1, T1, RH // SAVE: RH
+
+ STXVD2X RH_, (R1)(R17) // V27 has to be saved
+
+ // X=X2; Y- ; MUL; H=T // H = X2*T1
+ MOVD in2+16(FP), P2ptr
+ LXVD2X (R8)(R0), SWAP_
+ LXVD2X (R0)(P2ptr), X0_ // X2L
+ LXVD2X (R16)(P2ptr), X1_ // X2H
+ VPERM X0, X0, SWAP, X0
+ VPERM X1, X1, SWAP, X1
+ CALL p256MulInternal<>(SB)
+ VOR T0, T0, HL // SAVE: HL
+ VOR T1, T1, HH // SAVE: HH
+
+ // X=Z2; Y=Z2; MUL; T- // T2 = Z2*Z2
+ MOVD in2+16(FP), P2ptr
+ LXVD2X (R8)(R0), SWAP_
+ LXVD2X (R19)(P2ptr), X0_ // Z2L
+ LXVD2X (R20)(P2ptr), X1_ // Z2H
+ VPERM X0, X0, SWAP, X0
+ VPERM X1, X1, SWAP, X1
+ VOR X0, X0, Y0
+ VOR X1, X1, Y1
+ CALL p256MulInternal<>(SB)
+
+ // X- ; Y=T ; MUL; S1=T // S1 = Z2*T2
+ VOR T0, T0, Y0
+ VOR T1, T1, Y1
+ CALL p256MulInternal<>(SB)
+ VOR T0, T0, S1L // SAVE: S1L
+ VOR T1, T1, S1H // SAVE: S1H
+
+ // X=X1; Y- ; MUL; U1=T // U1 = X1*T2
+ MOVD in1+8(FP), P1ptr
+ LXVD2X (R8)(R0), SWAP_
+ LXVD2X (R0)(P1ptr), X0_ // X1L
+ LXVD2X (R16)(P1ptr), X1_ // X1H
+ VPERM X0, X0, SWAP, X0
+ VPERM X1, X1, SWAP, X1
+ CALL p256MulInternal<>(SB)
+ VOR T0, T0, U1L // SAVE: U1L
+ VOR T1, T1, U1H // SAVE: U1H
+
+ // SUB(H<H-T) // H = H-U1
+ p256SubInternal(HH,HL,HH,HL,T1,T0)
+
+ // if H == 0 or H^P == 0 then ret=1 else ret=0
+ // clobbers T1H and T1L
+ MOVD $1, TRUE
+ VSPLTISB $0, ZER
+ VOR HL, HH, T1H
+ VCMPEQUDCC ZER, T1H, T1H
+
+ // 26 = CR6 NE
+ ISEL $26, R0, TRUE, RES1
+ VXOR HL, PL, T1L // SAVE: T1L
+ VXOR HH, PH, T1H // SAVE: T1H
+ VOR T1L, T1H, T1H
+ VCMPEQUDCC ZER, T1H, T1H
+
+ // 26 = CR6 NE
+ ISEL $26, R0, TRUE, RES2
+ OR RES2, RES1, RES1
+ MOVD RES1, ret+24(FP)
+
+ // X=Z1; Y=Z2; MUL; T- // Z3 = Z1*Z2
+ MOVD $byteswap<>+0x00(SB), R8
+ MOVD in1+8(FP), P1ptr
+ MOVD in2+16(FP), P2ptr
+ LXVD2X (R8)(R0), SWAP_
+ LXVD2X (R19)(P1ptr), X0_ // Z1L
+ LXVD2X (R20)(P1ptr), X1_ // Z1H
+ VPERM X0, X0, SWAP, X0
+ VPERM X1, X1, SWAP, X1
+ LXVD2X (R19)(P2ptr), Y0_ // Z2L
+ LXVD2X (R20)(P2ptr), Y1_ // Z2H
+ VPERM Y0, Y0, SWAP, Y0
+ VPERM Y1, Y1, SWAP, Y1
+ CALL p256MulInternal<>(SB)
+
+ // X=T ; Y=H ; MUL; Z3:=T// Z3 = Z3*H
+ VOR T0, T0, X0
+ VOR T1, T1, X1
+ VOR HL, HL, Y0
+ VOR HH, HH, Y1
+ CALL p256MulInternal<>(SB)
+ MOVD res+0(FP), P3ptr
+ LXVD2X (R8)(R0), SWAP_
+ VPERM T1, T1, SWAP, TT1
+ VPERM T0, T0, SWAP, TT0
+ STXVD2X TT0_, (R19)(P3ptr)
+ STXVD2X TT1_, (R20)(P3ptr)
+
+ // X=Y1; Y=S1; MUL; S1=T // S1 = Y1*S1
+ MOVD in1+8(FP), P1ptr
+ LXVD2X (R17)(P1ptr), X0_
+ LXVD2X (R18)(P1ptr), X1_
+ VPERM X0, X0, SWAP, X0
+ VPERM X1, X1, SWAP, X1
+ VOR S1L, S1L, Y0
+ VOR S1H, S1H, Y1
+ CALL p256MulInternal<>(SB)
+ VOR T0, T0, S1L
+ VOR T1, T1, S1H
+
+ // X=Y2; Y=R ; MUL; T- // R = Y2*R
+ MOVD in2+16(FP), P2ptr
+ LXVD2X (R8)(R0), SWAP_
+ LXVD2X (R17)(P2ptr), X0_
+ LXVD2X (R18)(P2ptr), X1_
+ VPERM X0, X0, SWAP, X0
+ VPERM X1, X1, SWAP, X1
+ VOR RL, RL, Y0
+
+ // VOR RH, RH, Y1 RH was saved above in D2X format
+ LXVD2X (R1)(R17), Y1_
+ CALL p256MulInternal<>(SB)
+
+ // SUB(R<T-S1) // R = T-S1
+ p256SubInternal(RH,RL,T1,T0,S1H,S1L)
+
+ STXVD2X RH_, (R1)(R17) // Save RH
+
+ // if R == 0 or R^P == 0 then ret=ret else ret=0
+ // clobbers T1H and T1L
+ // Redo this using ISEL??
+ MOVD $1, TRUE
+ VSPLTISB $0, ZER
+ VOR RL, RH, T1H
+ VCMPEQUDCC ZER, T1H, T1H
+
+ // 24 = CR6 NE
+ ISEL $26, R0, TRUE, RES1
+ VXOR RL, PL, T1L
+ VXOR RH, PH, T1H // SAVE: T1L
+ VOR T1L, T1H, T1H
+ VCMPEQUDCC ZER, T1H, T1H
+
+ // 26 = CR6 NE
+ ISEL $26, R0, TRUE, RES2
+ OR RES2, RES1, RES1
+ MOVD ret+24(FP), RES2
+ AND RES2, RES1, RES1
+ MOVD RES1, ret+24(FP)
+
+ // X=H ; Y=H ; MUL; T- // T1 = H*H
+ VOR HL, HL, X0
+ VOR HH, HH, X1
+ VOR HL, HL, Y0
+ VOR HH, HH, Y1
+ CALL p256MulInternal<>(SB)
+
+ // X- ; Y=T ; MUL; T2=T // T2 = H*T1
+ VOR T0, T0, Y0
+ VOR T1, T1, Y1
+ CALL p256MulInternal<>(SB)
+ VOR T0, T0, T2L
+ VOR T1, T1, T2H
+
+ // X=U1; Y- ; MUL; U1=T // U1 = U1*T1
+ VOR U1L, U1L, X0
+ VOR U1H, U1H, X1
+ CALL p256MulInternal<>(SB)
+ VOR T0, T0, U1L
+ VOR T1, T1, U1H
+
+ // X=R ; Y=R ; MUL; T- // X3 = R*R
+ VOR RL, RL, X0
+
+ // VOR RH, RH, X1
+ VOR RL, RL, Y0
+
+ // RH was saved above using STXVD2X
+ LXVD2X (R1)(R17), X1_
+ VOR X1, X1, Y1
+
+ // VOR RH, RH, Y1
+ CALL p256MulInternal<>(SB)
+
+ // SUB(T<T-T2) // X3 = X3-T2
+ p256SubInternal(T1,T0,T1,T0,T2H,T2L)
+
+ // ADD(X<U1+U1) // T1 = 2*U1
+ p256AddInternal(X1,X0,U1H,U1L,U1H,U1L)
+
+ // SUB(T<T-X) X3:=T // X3 = X3-T1 << store-out X3 result reg
+ p256SubInternal(T1,T0,T1,T0,X1,X0)
+ MOVD res+0(FP), P3ptr
+ LXVD2X (R8)(R0), SWAP_
+ VPERM T1, T1, SWAP, TT1
+ VPERM T0, T0, SWAP, TT0
+ STXVD2X TT0_, (R0)(P3ptr)
+ STXVD2X TT1_, (R16)(P3ptr)
+
+ // SUB(Y<U1-T) // Y3 = U1-X3
+ p256SubInternal(Y1,Y0,U1H,U1L,T1,T0)
+
+ // X=R ; Y- ; MUL; U1=T // Y3 = R*Y3
+ VOR RL, RL, X0
+
+ // VOR RH, RH, X1
+ LXVD2X (R1)(R17), X1_
+ CALL p256MulInternal<>(SB)
+ VOR T0, T0, U1L
+ VOR T1, T1, U1H
+
+ // X=S1; Y=T2; MUL; T- // T2 = S1*T2
+ VOR S1L, S1L, X0
+ VOR S1H, S1H, X1
+ VOR T2L, T2L, Y0
+ VOR T2H, T2H, Y1
+ CALL p256MulInternal<>(SB)
+
+ // SUB(T<U1-T); Y3:=T // Y3 = Y3-T2 << store-out Y3 result reg
+ p256SubInternal(T1,T0,U1H,U1L,T1,T0)
+ MOVD res+0(FP), P3ptr
+ LXVD2X (R8)(R0), SWAP_
+ VPERM T1, T1, SWAP, TT1
+ VPERM T0, T0, SWAP, TT0
+ STXVD2X TT0_, (R17)(P3ptr)
+ STXVD2X TT1_, (R18)(P3ptr)
+
+ RET
diff --git a/src/crypto/elliptic/p256_asm_s390x.s b/src/crypto/elliptic/p256_asm_s390x.s
new file mode 100644
index 0000000..cf37e20
--- /dev/null
+++ b/src/crypto/elliptic/p256_asm_s390x.s
@@ -0,0 +1,2714 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#include "textflag.h"
+#include "go_asm.h"
+
+
+DATA p256ordK0<>+0x00(SB)/4, $0xee00bc4f
+DATA p256ord<>+0x00(SB)/8, $0xffffffff00000000
+DATA p256ord<>+0x08(SB)/8, $0xffffffffffffffff
+DATA p256ord<>+0x10(SB)/8, $0xbce6faada7179e84
+DATA p256ord<>+0x18(SB)/8, $0xf3b9cac2fc632551
+DATA p256<>+0x00(SB)/8, $0xffffffff00000001 // P256
+DATA p256<>+0x08(SB)/8, $0x0000000000000000 // P256
+DATA p256<>+0x10(SB)/8, $0x00000000ffffffff // P256
+DATA p256<>+0x18(SB)/8, $0xffffffffffffffff // P256
+DATA p256<>+0x20(SB)/8, $0x0c0d0e0f1c1d1e1f // SEL d1 d0 d1 d0
+DATA p256<>+0x28(SB)/8, $0x0c0d0e0f1c1d1e1f // SEL d1 d0 d1 d0
+DATA p256<>+0x30(SB)/8, $0x0000000010111213 // SEL 0 d1 d0 0
+DATA p256<>+0x38(SB)/8, $0x1415161700000000 // SEL 0 d1 d0 0
+DATA p256<>+0x40(SB)/8, $0x18191a1b1c1d1e1f // SEL d1 d0 d1 d0
+DATA p256<>+0x48(SB)/8, $0x18191a1b1c1d1e1f // SEL d1 d0 d1 d0
+DATA p256mul<>+0x00(SB)/8, $0xffffffff00000001 // P256
+DATA p256mul<>+0x08(SB)/8, $0x0000000000000000 // P256
+DATA p256mul<>+0x10(SB)/8, $0x00000000ffffffff // P256
+DATA p256mul<>+0x18(SB)/8, $0xffffffffffffffff // P256
+DATA p256mul<>+0x20(SB)/8, $0x1c1d1e1f00000000 // SEL d0 0 0 d0
+DATA p256mul<>+0x28(SB)/8, $0x000000001c1d1e1f // SEL d0 0 0 d0
+DATA p256mul<>+0x30(SB)/8, $0x0001020304050607 // SEL d0 0 d1 d0
+DATA p256mul<>+0x38(SB)/8, $0x1c1d1e1f0c0d0e0f // SEL d0 0 d1 d0
+DATA p256mul<>+0x40(SB)/8, $0x040506071c1d1e1f // SEL 0 d1 d0 d1
+DATA p256mul<>+0x48(SB)/8, $0x0c0d0e0f1c1d1e1f // SEL 0 d1 d0 d1
+DATA p256mul<>+0x50(SB)/8, $0x0405060704050607 // SEL 0 0 d1 d0
+DATA p256mul<>+0x58(SB)/8, $0x1c1d1e1f0c0d0e0f // SEL 0 0 d1 d0
+DATA p256mul<>+0x60(SB)/8, $0x0c0d0e0f1c1d1e1f // SEL d1 d0 d1 d0
+DATA p256mul<>+0x68(SB)/8, $0x0c0d0e0f1c1d1e1f // SEL d1 d0 d1 d0
+DATA p256mul<>+0x70(SB)/8, $0x141516170c0d0e0f // SEL 0 d1 d0 0
+DATA p256mul<>+0x78(SB)/8, $0x1c1d1e1f14151617 // SEL 0 d1 d0 0
+DATA p256mul<>+0x80(SB)/8, $0x00000000fffffffe // (1*2^256)%P256
+DATA p256mul<>+0x88(SB)/8, $0xffffffffffffffff // (1*2^256)%P256
+DATA p256mul<>+0x90(SB)/8, $0xffffffff00000000 // (1*2^256)%P256
+DATA p256mul<>+0x98(SB)/8, $0x0000000000000001 // (1*2^256)%P256
+GLOBL p256ordK0<>(SB), 8, $4
+GLOBL p256ord<>(SB), 8, $32
+GLOBL p256<>(SB), 8, $80
+GLOBL p256mul<>(SB), 8, $160
+
+DATA p256vmsl<>+0x0(SB)/8, $0x0012131415161718
+DATA p256vmsl<>+0x8(SB)/8, $0x00191a1b1c1d1e1f
+DATA p256vmsl<>+0x10(SB)/8, $0x0012131415161718
+DATA p256vmsl<>+0x18(SB)/8, $0x000b0c0d0e0f1011
+DATA p256vmsl<>+0x20(SB)/8, $0x00191a1b1c1d1e1f
+DATA p256vmsl<>+0x28(SB)/8, $0x0012131415161718
+DATA p256vmsl<>+0x30(SB)/8, $0x000b0c0d0e0f1011
+DATA p256vmsl<>+0x38(SB)/8, $0x0012131415161718
+DATA p256vmsl<>+0x40(SB)/8, $0x000405060708090a
+DATA p256vmsl<>+0x48(SB)/8, $0x000b0c0d0e0f1011
+DATA p256vmsl<>+0x50(SB)/8, $0x000b0c0d0e0f1011
+DATA p256vmsl<>+0x58(SB)/8, $0x000405060708090a
+DATA p256vmsl<>+0x60(SB)/8, $0x1010101000010203
+DATA p256vmsl<>+0x68(SB)/8, $0x100405060708090a
+DATA p256vmsl<>+0x70(SB)/8, $0x100405060708090a
+DATA p256vmsl<>+0x78(SB)/8, $0x1010101000010203
+GLOBL p256vmsl<>(SB), 8, $128
+
+// ---------------------------------------
+// iff cond == 1 val <- -val
+// func p256NegCond(val *p256Point, cond int)
+#define P1ptr R1
+#define CPOOL R4
+
+#define Y1L V0
+#define Y1H V1
+#define T1L V2
+#define T1H V3
+
+#define PL V30
+#define PH V31
+
+#define ZER V4
+#define SEL1 V5
+#define CAR1 V6
+TEXT ·p256NegCond(SB), NOSPLIT, $0
+ MOVD val+0(FP), P1ptr
+
+ MOVD $p256mul<>+0x00(SB), CPOOL
+ VL 16(CPOOL), PL
+ VL 0(CPOOL), PH
+
+ VL 32(P1ptr), Y1H
+ VL 48(P1ptr), Y1L
+
+ VLREPG cond+8(FP), SEL1
+ VZERO ZER
+ VCEQG SEL1, ZER, SEL1
+
+ VSCBIQ Y1L, PL, CAR1
+ VSQ Y1L, PL, T1L
+ VSBIQ PH, Y1H, CAR1, T1H
+
+ VSEL Y1L, T1L, SEL1, Y1L
+ VSEL Y1H, T1H, SEL1, Y1H
+
+ VST Y1H, 32(P1ptr)
+ VST Y1L, 48(P1ptr)
+ RET
+
+#undef P1ptr
+#undef CPOOL
+#undef Y1L
+#undef Y1H
+#undef T1L
+#undef T1H
+#undef PL
+#undef PH
+#undef ZER
+#undef SEL1
+#undef CAR1
+
+// ---------------------------------------
+// if cond == 0 res <- b; else res <- a
+// func p256MovCond(res, a, b *p256Point, cond int)
+#define P3ptr R1
+#define P1ptr R2
+#define P2ptr R3
+
+#define X1L V0
+#define X1H V1
+#define Y1L V2
+#define Y1H V3
+#define Z1L V4
+#define Z1H V5
+#define X2L V6
+#define X2H V7
+#define Y2L V8
+#define Y2H V9
+#define Z2L V10
+#define Z2H V11
+
+#define ZER V18
+#define SEL1 V19
+TEXT ·p256MovCond(SB), NOSPLIT, $0
+ MOVD res+0(FP), P3ptr
+ MOVD a+8(FP), P1ptr
+ MOVD b+16(FP), P2ptr
+ VLREPG cond+24(FP), SEL1
+ VZERO ZER
+ VCEQG SEL1, ZER, SEL1
+
+ VL 0(P1ptr), X1H
+ VL 16(P1ptr), X1L
+ VL 32(P1ptr), Y1H
+ VL 48(P1ptr), Y1L
+ VL 64(P1ptr), Z1H
+ VL 80(P1ptr), Z1L
+
+ VL 0(P2ptr), X2H
+ VL 16(P2ptr), X2L
+ VL 32(P2ptr), Y2H
+ VL 48(P2ptr), Y2L
+ VL 64(P2ptr), Z2H
+ VL 80(P2ptr), Z2L
+
+ VSEL X2L, X1L, SEL1, X1L
+ VSEL X2H, X1H, SEL1, X1H
+ VSEL Y2L, Y1L, SEL1, Y1L
+ VSEL Y2H, Y1H, SEL1, Y1H
+ VSEL Z2L, Z1L, SEL1, Z1L
+ VSEL Z2H, Z1H, SEL1, Z1H
+
+ VST X1H, 0(P3ptr)
+ VST X1L, 16(P3ptr)
+ VST Y1H, 32(P3ptr)
+ VST Y1L, 48(P3ptr)
+ VST Z1H, 64(P3ptr)
+ VST Z1L, 80(P3ptr)
+
+ RET
+
+#undef P3ptr
+#undef P1ptr
+#undef P2ptr
+#undef X1L
+#undef X1H
+#undef Y1L
+#undef Y1H
+#undef Z1L
+#undef Z1H
+#undef X2L
+#undef X2H
+#undef Y2L
+#undef Y2H
+#undef Z2L
+#undef Z2H
+#undef ZER
+#undef SEL1
+
+// ---------------------------------------
+// Constant time table access
+// Indexed from 1 to 15, with -1 offset
+// (index 0 is implicitly point at infinity)
+// func p256Select(point *p256Point, table []p256Point, idx int)
+#define P3ptr R1
+#define P1ptr R2
+#define COUNT R4
+
+#define X1L V0
+#define X1H V1
+#define Y1L V2
+#define Y1H V3
+#define Z1L V4
+#define Z1H V5
+#define X2L V6
+#define X2H V7
+#define Y2L V8
+#define Y2H V9
+#define Z2L V10
+#define Z2H V11
+
+#define ONE V18
+#define IDX V19
+#define SEL1 V20
+#define SEL2 V21
+TEXT ·p256Select(SB), NOSPLIT, $0
+ MOVD point+0(FP), P3ptr
+ MOVD table+8(FP), P1ptr
+ VLREPB idx+(32+7)(FP), IDX
+ VREPIB $1, ONE
+ VREPIB $1, SEL2
+ MOVD $1, COUNT
+
+ VZERO X1H
+ VZERO X1L
+ VZERO Y1H
+ VZERO Y1L
+ VZERO Z1H
+ VZERO Z1L
+
+loop_select:
+ VL 0(P1ptr), X2H
+ VL 16(P1ptr), X2L
+ VL 32(P1ptr), Y2H
+ VL 48(P1ptr), Y2L
+ VL 64(P1ptr), Z2H
+ VL 80(P1ptr), Z2L
+
+ VCEQG SEL2, IDX, SEL1
+
+ VSEL X2L, X1L, SEL1, X1L
+ VSEL X2H, X1H, SEL1, X1H
+ VSEL Y2L, Y1L, SEL1, Y1L
+ VSEL Y2H, Y1H, SEL1, Y1H
+ VSEL Z2L, Z1L, SEL1, Z1L
+ VSEL Z2H, Z1H, SEL1, Z1H
+
+ VAB SEL2, ONE, SEL2
+ ADDW $1, COUNT
+ ADD $96, P1ptr
+ CMPW COUNT, $17
+ BLT loop_select
+
+ VST X1H, 0(P3ptr)
+ VST X1L, 16(P3ptr)
+ VST Y1H, 32(P3ptr)
+ VST Y1L, 48(P3ptr)
+ VST Z1H, 64(P3ptr)
+ VST Z1L, 80(P3ptr)
+ RET
+
+#undef P3ptr
+#undef P1ptr
+#undef COUNT
+#undef X1L
+#undef X1H
+#undef Y1L
+#undef Y1H
+#undef Z1L
+#undef Z1H
+#undef X2L
+#undef X2H
+#undef Y2L
+#undef Y2H
+#undef Z2L
+#undef Z2H
+#undef ONE
+#undef IDX
+#undef SEL1
+#undef SEL2
+
+// ---------------------------------------
+// Constant time table access
+// Indexed from 1 to 15, with -1 offset
+// (index 0 is implicitly point at infinity)
+// func p256SelectBase(point *p256Point, table []p256Point, idx int)
+#define P3ptr R1
+#define P1ptr R2
+#define COUNT R4
+
+#define X1L V0
+#define X1H V1
+#define Y1L V2
+#define Y1H V3
+#define Z1L V4
+#define Z1H V5
+#define X2L V6
+#define X2H V7
+#define Y2L V8
+#define Y2H V9
+#define Z2L V10
+#define Z2H V11
+
+#define ONE V18
+#define IDX V19
+#define SEL1 V20
+#define SEL2 V21
+TEXT ·p256SelectBase(SB), NOSPLIT, $0
+ MOVD point+0(FP), P3ptr
+ MOVD table+8(FP), P1ptr
+ VLREPB idx+(32+7)(FP), IDX
+ VREPIB $1, ONE
+ VREPIB $1, SEL2
+ MOVD $1, COUNT
+
+ VZERO X1H
+ VZERO X1L
+ VZERO Y1H
+ VZERO Y1L
+ VZERO Z1H
+ VZERO Z1L
+
+loop_select:
+ VL 0(P1ptr), X2H
+ VL 16(P1ptr), X2L
+ VL 32(P1ptr), Y2H
+ VL 48(P1ptr), Y2L
+ VL 64(P1ptr), Z2H
+ VL 80(P1ptr), Z2L
+
+ VCEQG SEL2, IDX, SEL1
+
+ VSEL X2L, X1L, SEL1, X1L
+ VSEL X2H, X1H, SEL1, X1H
+ VSEL Y2L, Y1L, SEL1, Y1L
+ VSEL Y2H, Y1H, SEL1, Y1H
+ VSEL Z2L, Z1L, SEL1, Z1L
+ VSEL Z2H, Z1H, SEL1, Z1H
+
+ VAB SEL2, ONE, SEL2
+ ADDW $1, COUNT
+ ADD $96, P1ptr
+ CMPW COUNT, $65
+ BLT loop_select
+
+ VST X1H, 0(P3ptr)
+ VST X1L, 16(P3ptr)
+ VST Y1H, 32(P3ptr)
+ VST Y1L, 48(P3ptr)
+ VST Z1H, 64(P3ptr)
+ VST Z1L, 80(P3ptr)
+ RET
+
+#undef P3ptr
+#undef P1ptr
+#undef COUNT
+#undef X1L
+#undef X1H
+#undef Y1L
+#undef Y1H
+#undef Z1L
+#undef Z1H
+#undef X2L
+#undef X2H
+#undef Y2L
+#undef Y2H
+#undef Z2L
+#undef Z2H
+#undef ONE
+#undef IDX
+#undef SEL1
+#undef SEL2
+
+// ---------------------------------------
+// func p256FromMont(res, in []byte)
+#define res_ptr R1
+#define x_ptr R2
+#define CPOOL R4
+
+#define T0 V0
+#define T1 V1
+#define T2 V2
+#define TT0 V3
+#define TT1 V4
+
+#define ZER V6
+#define SEL1 V7
+#define SEL2 V8
+#define CAR1 V9
+#define CAR2 V10
+#define RED1 V11
+#define RED2 V12
+#define PL V13
+#define PH V14
+
+TEXT ·p256FromMont(SB), NOSPLIT, $0
+ MOVD res+0(FP), res_ptr
+ MOVD in+24(FP), x_ptr
+
+ VZERO T2
+ VZERO ZER
+ MOVD $p256<>+0x00(SB), CPOOL
+ VL 16(CPOOL), PL
+ VL 0(CPOOL), PH
+ VL 48(CPOOL), SEL2
+ VL 64(CPOOL), SEL1
+
+ VL (1*16)(x_ptr), T0
+ VL (0*16)(x_ptr), T1
+
+ // First round
+ VPERM T1, T0, SEL1, RED2 // d1 d0 d1 d0
+ VPERM ZER, RED2, SEL2, RED1 // 0 d1 d0 0
+ VSQ RED1, RED2, RED2 // Guaranteed not to underflow
+
+ VSLDB $8, T1, T0, T0
+ VSLDB $8, T2, T1, T1
+
+ VACCQ T0, RED1, CAR1
+ VAQ T0, RED1, T0
+ VACCCQ T1, RED2, CAR1, CAR2
+ VACQ T1, RED2, CAR1, T1
+ VAQ T2, CAR2, T2
+
+ // Second round
+ VPERM T1, T0, SEL1, RED2 // d1 d0 d1 d0
+ VPERM ZER, RED2, SEL2, RED1 // 0 d1 d0 0
+ VSQ RED1, RED2, RED2 // Guaranteed not to underflow
+
+ VSLDB $8, T1, T0, T0
+ VSLDB $8, T2, T1, T1
+
+ VACCQ T0, RED1, CAR1
+ VAQ T0, RED1, T0
+ VACCCQ T1, RED2, CAR1, CAR2
+ VACQ T1, RED2, CAR1, T1
+ VAQ T2, CAR2, T2
+
+ // Third round
+ VPERM T1, T0, SEL1, RED2 // d1 d0 d1 d0
+ VPERM ZER, RED2, SEL2, RED1 // 0 d1 d0 0
+ VSQ RED1, RED2, RED2 // Guaranteed not to underflow
+
+ VSLDB $8, T1, T0, T0
+ VSLDB $8, T2, T1, T1
+
+ VACCQ T0, RED1, CAR1
+ VAQ T0, RED1, T0
+ VACCCQ T1, RED2, CAR1, CAR2
+ VACQ T1, RED2, CAR1, T1
+ VAQ T2, CAR2, T2
+
+ // Last round
+ VPERM T1, T0, SEL1, RED2 // d1 d0 d1 d0
+ VPERM ZER, RED2, SEL2, RED1 // 0 d1 d0 0
+ VSQ RED1, RED2, RED2 // Guaranteed not to underflow
+
+ VSLDB $8, T1, T0, T0
+ VSLDB $8, T2, T1, T1
+
+ VACCQ T0, RED1, CAR1
+ VAQ T0, RED1, T0
+ VACCCQ T1, RED2, CAR1, CAR2
+ VACQ T1, RED2, CAR1, T1
+ VAQ T2, CAR2, T2
+
+ // ---------------------------------------------------
+
+ VSCBIQ PL, T0, CAR1
+ VSQ PL, T0, TT0
+ VSBCBIQ T1, PH, CAR1, CAR2
+ VSBIQ T1, PH, CAR1, TT1
+ VSBIQ T2, ZER, CAR2, T2
+
+ // what output to use, TT1||TT0 or T1||T0?
+ VSEL T0, TT0, T2, T0
+ VSEL T1, TT1, T2, T1
+
+ VST T0, (1*16)(res_ptr)
+ VST T1, (0*16)(res_ptr)
+ RET
+
+#undef res_ptr
+#undef x_ptr
+#undef CPOOL
+#undef T0
+#undef T1
+#undef T2
+#undef TT0
+#undef TT1
+#undef ZER
+#undef SEL1
+#undef SEL2
+#undef CAR1
+#undef CAR2
+#undef RED1
+#undef RED2
+#undef PL
+#undef PH
+
+// ---------------------------------------
+// func p256OrdMul(res, in1, in2 []byte)
+#define res_ptr R1
+#define x_ptr R2
+#define y_ptr R3
+#define X0 V0
+#define X1 V1
+#define Y0 V2
+#define Y1 V3
+#define M0 V4
+#define M1 V5
+#define T0 V6
+#define T1 V7
+#define T2 V8
+#define YDIG V9
+
+#define ADD1 V16
+#define ADD1H V17
+#define ADD2 V18
+#define ADD2H V19
+#define RED1 V20
+#define RED1H V21
+#define RED2 V22
+#define RED2H V23
+#define CAR1 V24
+#define CAR1M V25
+
+#define MK0 V30
+#define K0 V31
+TEXT ·p256OrdMul(SB), NOSPLIT, $0
+ MOVD res+0(FP), res_ptr
+ MOVD in1+24(FP), x_ptr
+ MOVD in2+48(FP), y_ptr
+
+ VZERO T2
+ MOVD $p256ordK0<>+0x00(SB), R4
+
+ // VLEF $3, 0(R4), K0
+ WORD $0xE7F40000
+ BYTE $0x38
+ BYTE $0x03
+ MOVD $p256ord<>+0x00(SB), R4
+ VL 16(R4), M0
+ VL 0(R4), M1
+
+ VL (1*16)(x_ptr), X0
+ VL (0*16)(x_ptr), X1
+ VL (1*16)(y_ptr), Y0
+ VL (0*16)(y_ptr), Y1
+
+ // ---------------------------------------------------------------------------/
+ VREPF $3, Y0, YDIG
+ VMLF X0, YDIG, ADD1
+ VMLF ADD1, K0, MK0
+ VREPF $3, MK0, MK0
+
+ VMLF X1, YDIG, ADD2
+ VMLHF X0, YDIG, ADD1H
+ VMLHF X1, YDIG, ADD2H
+
+ VMALF M0, MK0, ADD1, RED1
+ VMALHF M0, MK0, ADD1, RED1H
+ VMALF M1, MK0, ADD2, RED2
+ VMALHF M1, MK0, ADD2, RED2H
+
+ VSLDB $12, RED2, RED1, RED1
+ VSLDB $12, T2, RED2, RED2
+
+ VACCQ RED1, ADD1H, CAR1
+ VAQ RED1, ADD1H, T0
+ VACCQ RED1H, T0, CAR1M
+ VAQ RED1H, T0, T0
+
+ // << ready for next MK0
+
+ VACQ RED2, ADD2H, CAR1, T1
+ VACCCQ RED2, ADD2H, CAR1, CAR1
+ VACCCQ RED2H, T1, CAR1M, T2
+ VACQ RED2H, T1, CAR1M, T1
+ VAQ CAR1, T2, T2
+
+ // ---------------------------------------------------
+/* *
+ * ---+--------+--------+
+ * T2| T1 | T0 |
+ * ---+--------+--------+
+ * *(add)*
+ * +--------+--------+
+ * | X1 | X0 |
+ * +--------+--------+
+ * *(mul)*
+ * +--------+--------+
+ * | YDIG | YDIG |
+ * +--------+--------+
+ * *(add)*
+ * +--------+--------+
+ * | M1 | M0 |
+ * +--------+--------+
+ * *(mul)*
+ * +--------+--------+
+ * | MK0 | MK0 |
+ * +--------+--------+
+ *
+ * ---------------------
+ *
+ * +--------+--------+
+ * | ADD2 | ADD1 |
+ * +--------+--------+
+ * +--------+--------+
+ * | ADD2H | ADD1H |
+ * +--------+--------+
+ * +--------+--------+
+ * | RED2 | RED1 |
+ * +--------+--------+
+ * +--------+--------+
+ * | RED2H | RED1H |
+ * +--------+--------+
+ */
+ VREPF $2, Y0, YDIG
+ VMALF X0, YDIG, T0, ADD1
+ VMLF ADD1, K0, MK0
+ VREPF $3, MK0, MK0
+
+ VMALF X1, YDIG, T1, ADD2
+ VMALHF X0, YDIG, T0, ADD1H
+ VMALHF X1, YDIG, T1, ADD2H
+
+ VMALF M0, MK0, ADD1, RED1
+ VMALHF M0, MK0, ADD1, RED1H
+ VMALF M1, MK0, ADD2, RED2
+ VMALHF M1, MK0, ADD2, RED2H
+
+ VSLDB $12, RED2, RED1, RED1
+ VSLDB $12, T2, RED2, RED2
+
+ VACCQ RED1, ADD1H, CAR1
+ VAQ RED1, ADD1H, T0
+ VACCQ RED1H, T0, CAR1M
+ VAQ RED1H, T0, T0
+
+ // << ready for next MK0
+
+ VACQ RED2, ADD2H, CAR1, T1
+ VACCCQ RED2, ADD2H, CAR1, CAR1
+ VACCCQ RED2H, T1, CAR1M, T2
+ VACQ RED2H, T1, CAR1M, T1
+ VAQ CAR1, T2, T2
+
+ // ---------------------------------------------------
+ VREPF $1, Y0, YDIG
+ VMALF X0, YDIG, T0, ADD1
+ VMLF ADD1, K0, MK0
+ VREPF $3, MK0, MK0
+
+ VMALF X1, YDIG, T1, ADD2
+ VMALHF X0, YDIG, T0, ADD1H
+ VMALHF X1, YDIG, T1, ADD2H
+
+ VMALF M0, MK0, ADD1, RED1
+ VMALHF M0, MK0, ADD1, RED1H
+ VMALF M1, MK0, ADD2, RED2
+ VMALHF M1, MK0, ADD2, RED2H
+
+ VSLDB $12, RED2, RED1, RED1
+ VSLDB $12, T2, RED2, RED2
+
+ VACCQ RED1, ADD1H, CAR1
+ VAQ RED1, ADD1H, T0
+ VACCQ RED1H, T0, CAR1M
+ VAQ RED1H, T0, T0
+
+ // << ready for next MK0
+
+ VACQ RED2, ADD2H, CAR1, T1
+ VACCCQ RED2, ADD2H, CAR1, CAR1
+ VACCCQ RED2H, T1, CAR1M, T2
+ VACQ RED2H, T1, CAR1M, T1
+ VAQ CAR1, T2, T2
+
+ // ---------------------------------------------------
+ VREPF $0, Y0, YDIG
+ VMALF X0, YDIG, T0, ADD1
+ VMLF ADD1, K0, MK0
+ VREPF $3, MK0, MK0
+
+ VMALF X1, YDIG, T1, ADD2
+ VMALHF X0, YDIG, T0, ADD1H
+ VMALHF X1, YDIG, T1, ADD2H
+
+ VMALF M0, MK0, ADD1, RED1
+ VMALHF M0, MK0, ADD1, RED1H
+ VMALF M1, MK0, ADD2, RED2
+ VMALHF M1, MK0, ADD2, RED2H
+
+ VSLDB $12, RED2, RED1, RED1
+ VSLDB $12, T2, RED2, RED2
+
+ VACCQ RED1, ADD1H, CAR1
+ VAQ RED1, ADD1H, T0
+ VACCQ RED1H, T0, CAR1M
+ VAQ RED1H, T0, T0
+
+ // << ready for next MK0
+
+ VACQ RED2, ADD2H, CAR1, T1
+ VACCCQ RED2, ADD2H, CAR1, CAR1
+ VACCCQ RED2H, T1, CAR1M, T2
+ VACQ RED2H, T1, CAR1M, T1
+ VAQ CAR1, T2, T2
+
+ // ---------------------------------------------------
+ VREPF $3, Y1, YDIG
+ VMALF X0, YDIG, T0, ADD1
+ VMLF ADD1, K0, MK0
+ VREPF $3, MK0, MK0
+
+ VMALF X1, YDIG, T1, ADD2
+ VMALHF X0, YDIG, T0, ADD1H
+ VMALHF X1, YDIG, T1, ADD2H
+
+ VMALF M0, MK0, ADD1, RED1
+ VMALHF M0, MK0, ADD1, RED1H
+ VMALF M1, MK0, ADD2, RED2
+ VMALHF M1, MK0, ADD2, RED2H
+
+ VSLDB $12, RED2, RED1, RED1
+ VSLDB $12, T2, RED2, RED2
+
+ VACCQ RED1, ADD1H, CAR1
+ VAQ RED1, ADD1H, T0
+ VACCQ RED1H, T0, CAR1M
+ VAQ RED1H, T0, T0
+
+ // << ready for next MK0
+
+ VACQ RED2, ADD2H, CAR1, T1
+ VACCCQ RED2, ADD2H, CAR1, CAR1
+ VACCCQ RED2H, T1, CAR1M, T2
+ VACQ RED2H, T1, CAR1M, T1
+ VAQ CAR1, T2, T2
+
+ // ---------------------------------------------------
+ VREPF $2, Y1, YDIG
+ VMALF X0, YDIG, T0, ADD1
+ VMLF ADD1, K0, MK0
+ VREPF $3, MK0, MK0
+
+ VMALF X1, YDIG, T1, ADD2
+ VMALHF X0, YDIG, T0, ADD1H
+ VMALHF X1, YDIG, T1, ADD2H
+
+ VMALF M0, MK0, ADD1, RED1
+ VMALHF M0, MK0, ADD1, RED1H
+ VMALF M1, MK0, ADD2, RED2
+ VMALHF M1, MK0, ADD2, RED2H
+
+ VSLDB $12, RED2, RED1, RED1
+ VSLDB $12, T2, RED2, RED2
+
+ VACCQ RED1, ADD1H, CAR1
+ VAQ RED1, ADD1H, T0
+ VACCQ RED1H, T0, CAR1M
+ VAQ RED1H, T0, T0
+
+ // << ready for next MK0
+
+ VACQ RED2, ADD2H, CAR1, T1
+ VACCCQ RED2, ADD2H, CAR1, CAR1
+ VACCCQ RED2H, T1, CAR1M, T2
+ VACQ RED2H, T1, CAR1M, T1
+ VAQ CAR1, T2, T2
+
+ // ---------------------------------------------------
+ VREPF $1, Y1, YDIG
+ VMALF X0, YDIG, T0, ADD1
+ VMLF ADD1, K0, MK0
+ VREPF $3, MK0, MK0
+
+ VMALF X1, YDIG, T1, ADD2
+ VMALHF X0, YDIG, T0, ADD1H
+ VMALHF X1, YDIG, T1, ADD2H
+
+ VMALF M0, MK0, ADD1, RED1
+ VMALHF M0, MK0, ADD1, RED1H
+ VMALF M1, MK0, ADD2, RED2
+ VMALHF M1, MK0, ADD2, RED2H
+
+ VSLDB $12, RED2, RED1, RED1
+ VSLDB $12, T2, RED2, RED2
+
+ VACCQ RED1, ADD1H, CAR1
+ VAQ RED1, ADD1H, T0
+ VACCQ RED1H, T0, CAR1M
+ VAQ RED1H, T0, T0
+
+ // << ready for next MK0
+
+ VACQ RED2, ADD2H, CAR1, T1
+ VACCCQ RED2, ADD2H, CAR1, CAR1
+ VACCCQ RED2H, T1, CAR1M, T2
+ VACQ RED2H, T1, CAR1M, T1
+ VAQ CAR1, T2, T2
+
+ // ---------------------------------------------------
+ VREPF $0, Y1, YDIG
+ VMALF X0, YDIG, T0, ADD1
+ VMLF ADD1, K0, MK0
+ VREPF $3, MK0, MK0
+
+ VMALF X1, YDIG, T1, ADD2
+ VMALHF X0, YDIG, T0, ADD1H
+ VMALHF X1, YDIG, T1, ADD2H
+
+ VMALF M0, MK0, ADD1, RED1
+ VMALHF M0, MK0, ADD1, RED1H
+ VMALF M1, MK0, ADD2, RED2
+ VMALHF M1, MK0, ADD2, RED2H
+
+ VSLDB $12, RED2, RED1, RED1
+ VSLDB $12, T2, RED2, RED2
+
+ VACCQ RED1, ADD1H, CAR1
+ VAQ RED1, ADD1H, T0
+ VACCQ RED1H, T0, CAR1M
+ VAQ RED1H, T0, T0
+
+ // << ready for next MK0
+
+ VACQ RED2, ADD2H, CAR1, T1
+ VACCCQ RED2, ADD2H, CAR1, CAR1
+ VACCCQ RED2H, T1, CAR1M, T2
+ VACQ RED2H, T1, CAR1M, T1
+ VAQ CAR1, T2, T2
+
+ // ---------------------------------------------------
+
+ VZERO RED1
+ VSCBIQ M0, T0, CAR1
+ VSQ M0, T0, ADD1
+ VSBCBIQ T1, M1, CAR1, CAR1M
+ VSBIQ T1, M1, CAR1, ADD2
+ VSBIQ T2, RED1, CAR1M, T2
+
+ // what output to use, ADD2||ADD1 or T1||T0?
+ VSEL T0, ADD1, T2, T0
+ VSEL T1, ADD2, T2, T1
+
+ VST T0, (1*16)(res_ptr)
+ VST T1, (0*16)(res_ptr)
+ RET
+
+#undef res_ptr
+#undef x_ptr
+#undef y_ptr
+#undef X0
+#undef X1
+#undef Y0
+#undef Y1
+#undef M0
+#undef M1
+#undef T0
+#undef T1
+#undef T2
+#undef YDIG
+
+#undef ADD1
+#undef ADD1H
+#undef ADD2
+#undef ADD2H
+#undef RED1
+#undef RED1H
+#undef RED2
+#undef RED2H
+#undef CAR1
+#undef CAR1M
+
+#undef MK0
+#undef K0
+
+// ---------------------------------------
+// p256MulInternalVX
+// V0-V3,V30,V31 - Not Modified
+// V4-V15 - Volatile
+
+#define CPOOL R4
+
+// Parameters
+#define X0 V0 // Not modified
+#define X1 V1 // Not modified
+#define Y0 V2 // Not modified
+#define Y1 V3 // Not modified
+#define T0 V4
+#define T1 V5
+#define P0 V30 // Not modified
+#define P1 V31 // Not modified
+
+// Temporaries
+#define YDIG V6 // Overloaded with CAR2, ZER
+#define ADD1H V7 // Overloaded with ADD3H
+#define ADD2H V8 // Overloaded with ADD4H
+#define ADD3 V9 // Overloaded with SEL2,SEL5
+#define ADD4 V10 // Overloaded with SEL3,SEL6
+#define RED1 V11 // Overloaded with CAR2
+#define RED2 V12
+#define RED3 V13 // Overloaded with SEL1
+#define T2 V14
+// Overloaded temporaries
+#define ADD1 V4 // Overloaded with T0
+#define ADD2 V5 // Overloaded with T1
+#define ADD3H V7 // Overloaded with ADD1H
+#define ADD4H V8 // Overloaded with ADD2H
+#define ZER V6 // Overloaded with YDIG, CAR2
+#define CAR1 V6 // Overloaded with YDIG, ZER
+#define CAR2 V11 // Overloaded with RED1
+// Constant Selects
+#define SEL1 V13 // Overloaded with RED3
+#define SEL2 V9 // Overloaded with ADD3,SEL5
+#define SEL3 V10 // Overloaded with ADD4,SEL6
+#define SEL4 V6 // Overloaded with YDIG,CAR2,ZER
+#define SEL5 V9 // Overloaded with ADD3,SEL2
+#define SEL6 V10 // Overloaded with ADD4,SEL3
+
+/* *
+ * To follow the flow of bits, for your own sanity a stiff drink, need you shall.
+ * Of a single round, a 'helpful' picture, here is. Meaning, column position has.
+ * With you, SIMD be...
+ *
+ * +--------+--------+
+ * +--------| RED2 | RED1 |
+ * | +--------+--------+
+ * | ---+--------+--------+
+ * | +---- T2| T1 | T0 |--+
+ * | | ---+--------+--------+ |
+ * | | |
+ * | | ======================= |
+ * | | |
+ * | | +--------+--------+<-+
+ * | +-------| ADD2 | ADD1 |--|-----+
+ * | | +--------+--------+ | |
+ * | | +--------+--------+<---+ |
+ * | | | ADD2H | ADD1H |--+ |
+ * | | +--------+--------+ | |
+ * | | +--------+--------+<-+ |
+ * | | | ADD4 | ADD3 |--|-+ |
+ * | | +--------+--------+ | | |
+ * | | +--------+--------+<---+ | |
+ * | | | ADD4H | ADD3H |------|-+ |(+vzero)
+ * | | +--------+--------+ | | V
+ * | | ------------------------ | | +--------+
+ * | | | | | RED3 | [d0 0 0 d0]
+ * | | | | +--------+
+ * | +---->+--------+--------+ | | |
+ * (T2[1w]||ADD2[4w]||ADD1[3w]) +--------| T1 | T0 | | | |
+ * | +--------+--------+ | | |
+ * +---->---+--------+--------+ | | |
+ * T2| T1 | T0 |----+ | |
+ * ---+--------+--------+ | | |
+ * ---+--------+--------+<---+ | |
+ * +--- T2| T1 | T0 |----------+
+ * | ---+--------+--------+ | |
+ * | +--------+--------+<-------------+
+ * | | RED2 | RED1 |-----+ | | [0 d1 d0 d1] [d0 0 d1 d0]
+ * | +--------+--------+ | | |
+ * | +--------+<----------------------+
+ * | | RED3 |--------------+ | [0 0 d1 d0]
+ * | +--------+ | |
+ * +--->+--------+--------+ | |
+ * | T1 | T0 |--------+
+ * +--------+--------+ | |
+ * --------------------------- | |
+ * | |
+ * +--------+--------+<----+ |
+ * | RED2 | RED1 | |
+ * +--------+--------+ |
+ * ---+--------+--------+<-------+
+ * T2| T1 | T0 | (H1P-H1P-H00RRAY!)
+ * ---+--------+--------+
+ *
+ * *Mi obra de arte de siglo XXI @vpaprots
+ *
+ *
+ * First group is special, doesn't get the two inputs:
+ * +--------+--------+<-+
+ * +-------| ADD2 | ADD1 |--|-----+
+ * | +--------+--------+ | |
+ * | +--------+--------+<---+ |
+ * | | ADD2H | ADD1H |--+ |
+ * | +--------+--------+ | |
+ * | +--------+--------+<-+ |
+ * | | ADD4 | ADD3 |--|-+ |
+ * | +--------+--------+ | | |
+ * | +--------+--------+<---+ | |
+ * | | ADD4H | ADD3H |------|-+ |(+vzero)
+ * | +--------+--------+ | | V
+ * | ------------------------ | | +--------+
+ * | | | | RED3 | [d0 0 0 d0]
+ * | | | +--------+
+ * +---->+--------+--------+ | | |
+ * (T2[1w]||ADD2[4w]||ADD1[3w]) | T1 | T0 |----+ | |
+ * +--------+--------+ | | |
+ * ---+--------+--------+<---+ | |
+ * +--- T2| T1 | T0 |----------+
+ * | ---+--------+--------+ | |
+ * | +--------+--------+<-------------+
+ * | | RED2 | RED1 |-----+ | | [0 d1 d0 d1] [d0 0 d1 d0]
+ * | +--------+--------+ | | |
+ * | +--------+<----------------------+
+ * | | RED3 |--------------+ | [0 0 d1 d0]
+ * | +--------+ | |
+ * +--->+--------+--------+ | |
+ * | T1 | T0 |--------+
+ * +--------+--------+ | |
+ * --------------------------- | |
+ * | |
+ * +--------+--------+<----+ |
+ * | RED2 | RED1 | |
+ * +--------+--------+ |
+ * ---+--------+--------+<-------+
+ * T2| T1 | T0 | (H1P-H1P-H00RRAY!)
+ * ---+--------+--------+
+ *
+ * Last 'group' needs to RED2||RED1 shifted less
+ */
+TEXT ·p256MulInternalVX(SB), NOSPLIT, $0-0
+ VL 32(CPOOL), SEL1
+ VL 48(CPOOL), SEL2
+ VL 64(CPOOL), SEL3
+ VL 80(CPOOL), SEL4
+
+ // ---------------------------------------------------
+
+ VREPF $3, Y0, YDIG
+ VMLHF X0, YDIG, ADD1H
+ VMLHF X1, YDIG, ADD2H
+ VMLF X0, YDIG, ADD1
+ VMLF X1, YDIG, ADD2
+
+ VREPF $2, Y0, YDIG
+ VMALF X0, YDIG, ADD1H, ADD3
+ VMALF X1, YDIG, ADD2H, ADD4
+ VMALHF X0, YDIG, ADD1H, ADD3H // ADD1H Free
+ VMALHF X1, YDIG, ADD2H, ADD4H // ADD2H Free
+
+ VZERO ZER
+ VL 32(CPOOL), SEL1
+ VPERM ZER, ADD1, SEL1, RED3 // [d0 0 0 d0]
+
+ VSLDB $12, ADD2, ADD1, T0 // ADD1 Free
+ VSLDB $12, ZER, ADD2, T1 // ADD2 Free
+
+ VACCQ T0, ADD3, CAR1
+ VAQ T0, ADD3, T0 // ADD3 Free
+ VACCCQ T1, ADD4, CAR1, T2
+ VACQ T1, ADD4, CAR1, T1 // ADD4 Free
+
+ VL 48(CPOOL), SEL2
+ VL 64(CPOOL), SEL3
+ VL 80(CPOOL), SEL4
+ VPERM RED3, T0, SEL2, RED1 // [d0 0 d1 d0]
+ VPERM RED3, T0, SEL3, RED2 // [ 0 d1 d0 d1]
+ VPERM RED3, T0, SEL4, RED3 // [ 0 0 d1 d0]
+ VSQ RED3, RED2, RED2 // Guaranteed not to underflow
+
+ VSLDB $12, T1, T0, T0
+ VSLDB $12, T2, T1, T1
+
+ VACCQ T0, ADD3H, CAR1
+ VAQ T0, ADD3H, T0
+ VACCCQ T1, ADD4H, CAR1, T2
+ VACQ T1, ADD4H, CAR1, T1
+
+ // ---------------------------------------------------
+
+ VREPF $1, Y0, YDIG
+ VMALHF X0, YDIG, T0, ADD1H
+ VMALHF X1, YDIG, T1, ADD2H
+ VMALF X0, YDIG, T0, ADD1 // T0 Free->ADD1
+ VMALF X1, YDIG, T1, ADD2 // T1 Free->ADD2
+
+ VREPF $0, Y0, YDIG
+ VMALF X0, YDIG, ADD1H, ADD3
+ VMALF X1, YDIG, ADD2H, ADD4
+ VMALHF X0, YDIG, ADD1H, ADD3H // ADD1H Free->ADD3H
+ VMALHF X1, YDIG, ADD2H, ADD4H // ADD2H Free->ADD4H , YDIG Free->ZER
+
+ VZERO ZER
+ VL 32(CPOOL), SEL1
+ VPERM ZER, ADD1, SEL1, RED3 // [d0 0 0 d0]
+
+ VSLDB $12, ADD2, ADD1, T0 // ADD1 Free->T0
+ VSLDB $12, T2, ADD2, T1 // ADD2 Free->T1, T2 Free
+
+ VACCQ T0, RED1, CAR1
+ VAQ T0, RED1, T0
+ VACCCQ T1, RED2, CAR1, T2
+ VACQ T1, RED2, CAR1, T1
+
+ VACCQ T0, ADD3, CAR1
+ VAQ T0, ADD3, T0
+ VACCCQ T1, ADD4, CAR1, CAR2
+ VACQ T1, ADD4, CAR1, T1
+ VAQ T2, CAR2, T2
+
+ VL 48(CPOOL), SEL2
+ VL 64(CPOOL), SEL3
+ VL 80(CPOOL), SEL4
+ VPERM RED3, T0, SEL2, RED1 // [d0 0 d1 d0]
+ VPERM RED3, T0, SEL3, RED2 // [ 0 d1 d0 d1]
+ VPERM RED3, T0, SEL4, RED3 // [ 0 0 d1 d0]
+ VSQ RED3, RED2, RED2 // Guaranteed not to underflow
+
+ VSLDB $12, T1, T0, T0
+ VSLDB $12, T2, T1, T1
+
+ VACCQ T0, ADD3H, CAR1
+ VAQ T0, ADD3H, T0
+ VACCCQ T1, ADD4H, CAR1, T2
+ VACQ T1, ADD4H, CAR1, T1
+
+ // ---------------------------------------------------
+
+ VREPF $3, Y1, YDIG
+ VMALHF X0, YDIG, T0, ADD1H
+ VMALHF X1, YDIG, T1, ADD2H
+ VMALF X0, YDIG, T0, ADD1
+ VMALF X1, YDIG, T1, ADD2
+
+ VREPF $2, Y1, YDIG
+ VMALF X0, YDIG, ADD1H, ADD3
+ VMALF X1, YDIG, ADD2H, ADD4
+ VMALHF X0, YDIG, ADD1H, ADD3H // ADD1H Free
+ VMALHF X1, YDIG, ADD2H, ADD4H // ADD2H Free
+
+ VZERO ZER
+ VL 32(CPOOL), SEL1
+ VPERM ZER, ADD1, SEL1, RED3 // [d0 0 0 d0]
+
+ VSLDB $12, ADD2, ADD1, T0 // ADD1 Free
+ VSLDB $12, T2, ADD2, T1 // ADD2 Free
+
+ VACCQ T0, RED1, CAR1
+ VAQ T0, RED1, T0
+ VACCCQ T1, RED2, CAR1, T2
+ VACQ T1, RED2, CAR1, T1
+
+ VACCQ T0, ADD3, CAR1
+ VAQ T0, ADD3, T0
+ VACCCQ T1, ADD4, CAR1, CAR2
+ VACQ T1, ADD4, CAR1, T1
+ VAQ T2, CAR2, T2
+
+ VL 48(CPOOL), SEL2
+ VL 64(CPOOL), SEL3
+ VL 80(CPOOL), SEL4
+ VPERM RED3, T0, SEL2, RED1 // [d0 0 d1 d0]
+ VPERM RED3, T0, SEL3, RED2 // [ 0 d1 d0 d1]
+ VPERM RED3, T0, SEL4, RED3 // [ 0 0 d1 d0]
+ VSQ RED3, RED2, RED2 // Guaranteed not to underflow
+
+ VSLDB $12, T1, T0, T0
+ VSLDB $12, T2, T1, T1
+
+ VACCQ T0, ADD3H, CAR1
+ VAQ T0, ADD3H, T0
+ VACCCQ T1, ADD4H, CAR1, T2
+ VACQ T1, ADD4H, CAR1, T1
+
+ // ---------------------------------------------------
+
+ VREPF $1, Y1, YDIG
+ VMALHF X0, YDIG, T0, ADD1H
+ VMALHF X1, YDIG, T1, ADD2H
+ VMALF X0, YDIG, T0, ADD1
+ VMALF X1, YDIG, T1, ADD2
+
+ VREPF $0, Y1, YDIG
+ VMALF X0, YDIG, ADD1H, ADD3
+ VMALF X1, YDIG, ADD2H, ADD4
+ VMALHF X0, YDIG, ADD1H, ADD3H
+ VMALHF X1, YDIG, ADD2H, ADD4H
+
+ VZERO ZER
+ VL 32(CPOOL), SEL1
+ VPERM ZER, ADD1, SEL1, RED3 // [d0 0 0 d0]
+
+ VSLDB $12, ADD2, ADD1, T0
+ VSLDB $12, T2, ADD2, T1
+
+ VACCQ T0, RED1, CAR1
+ VAQ T0, RED1, T0
+ VACCCQ T1, RED2, CAR1, T2
+ VACQ T1, RED2, CAR1, T1
+
+ VACCQ T0, ADD3, CAR1
+ VAQ T0, ADD3, T0
+ VACCCQ T1, ADD4, CAR1, CAR2
+ VACQ T1, ADD4, CAR1, T1
+ VAQ T2, CAR2, T2
+
+ VL 96(CPOOL), SEL5
+ VL 112(CPOOL), SEL6
+ VPERM T0, RED3, SEL5, RED2 // [d1 d0 d1 d0]
+ VPERM T0, RED3, SEL6, RED1 // [ 0 d1 d0 0]
+ VSQ RED1, RED2, RED2 // Guaranteed not to underflow
+
+ VSLDB $12, T1, T0, T0
+ VSLDB $12, T2, T1, T1
+
+ VACCQ T0, ADD3H, CAR1
+ VAQ T0, ADD3H, T0
+ VACCCQ T1, ADD4H, CAR1, T2
+ VACQ T1, ADD4H, CAR1, T1
+
+ VACCQ T0, RED1, CAR1
+ VAQ T0, RED1, T0
+ VACCCQ T1, RED2, CAR1, CAR2
+ VACQ T1, RED2, CAR1, T1
+ VAQ T2, CAR2, T2
+
+ // ---------------------------------------------------
+
+ VZERO RED3
+ VSCBIQ P0, T0, CAR1
+ VSQ P0, T0, ADD1H
+ VSBCBIQ T1, P1, CAR1, CAR2
+ VSBIQ T1, P1, CAR1, ADD2H
+ VSBIQ T2, RED3, CAR2, T2
+
+ // what output to use, ADD2H||ADD1H or T1||T0?
+ VSEL T0, ADD1H, T2, T0
+ VSEL T1, ADD2H, T2, T1
+ RET
+
+#undef CPOOL
+
+#undef X0
+#undef X1
+#undef Y0
+#undef Y1
+#undef T0
+#undef T1
+#undef P0
+#undef P1
+
+#undef SEL1
+#undef SEL2
+#undef SEL3
+#undef SEL4
+#undef SEL5
+#undef SEL6
+
+#undef YDIG
+#undef ADD1H
+#undef ADD2H
+#undef ADD3
+#undef ADD4
+#undef RED1
+#undef RED2
+#undef RED3
+#undef T2
+#undef ADD1
+#undef ADD2
+#undef ADD3H
+#undef ADD4H
+#undef ZER
+#undef CAR1
+#undef CAR2
+
+// ---------------------------------------
+// p256MulInternalVMSL
+// V0-V3,V30,V31 - Not Modified
+// V4-V14 - Volatile
+
+#define CPOOL R4
+#define SCRATCH R9
+
+// Parameters
+#define X0 V0 // Not modified
+#define X1 V1 // Not modified
+#define Y0 V2 // Not modified
+#define Y1 V3 // Not modified
+#define T0 V4
+#define T1 V5
+#define T2 V6
+#define P0 V30 // Not modified
+#define P1 V31 // Not modified
+
+// input: d0
+// output: h0, h1
+// temp: TEMP, ZERO, BORROW
+#define OBSERVATION3(d0, h0, h1, TEMP, ZERO, BORROW) \
+ VZERO ZERO \
+ VSLDB $4, d0, ZERO, h0 \
+ VLR h0, BORROW \
+ VSLDB $12, ZERO, h0, TEMP \
+ VSQ TEMP, h0, h0 \
+ VSLDB $12, d0, BORROW, h1 \
+ VSLDB $8, ZERO, BORROW, TEMP \
+ VAQ TEMP, h0, h0 \
+
+#define OBSERVATION3A(d2, h0, h1, TEMP, ZERO) \
+ VZERO ZERO \
+ VSLDB $8, d2, ZERO, TEMP \
+ VSLDB $8, d2, TEMP, h0 \
+ VSLDB $12, ZERO, TEMP, h1 \
+ VSQ h1, h0, h0 \
+
+TEXT ·p256MulInternalVMSL(SB), NOFRAME|NOSPLIT, $0-0
+ VSTM V16, V19, (SCRATCH)
+
+ MOVD $p256vmsl<>+0x00(SB), CPOOL
+
+ // Divide input1 into 5 limbs
+ VGBM $0x007f, V14
+ VZERO V12
+ VSLDB $2, X1, X0, V13
+ VSLDB $2, Y1, Y0, V8
+ VSLDB $4, V12, X1, V11 // V11(X1): 4 bytes limb
+ VSLDB $4, V12, Y1, V6 // V6: 4 bytes limb
+
+ VN V14, X0, V5 // V5: first 7 bytes limb
+ VN V14, Y0, V10 // V10: first 7 bytes limb
+ VN V14, V13, V13 // v13: third 7 bytes limb
+ VN V14, V8, V8 // V8: third 7 bytes limb
+
+ VMSLG V10, V5, V12, V10 // v10: l10 x l5 (column 1)
+ VMSLG V8, V5, V12, V8 // v8: l8 x l5
+ VMSLG V6, V13, V12, V13 // v13: l6 x l3
+ VMSLG V6, V11, V12, V11 // v11: l6 x l1 (column 9)
+ VMSLG V6, V5, V12, V6 // v6: l6 x l5
+
+ MOVD $p256vmsl<>+0x00(SB), CPOOL
+ VGBM $0x7f7f, V14
+
+ VL 0(CPOOL), V4
+ VL 16(CPOOL), V7
+ VL 32(CPOOL), V9
+ VL 48(CPOOL), V5
+ VLM 64(CPOOL), V16, V19
+
+ VPERM V12, X0, V4, V4 // v4: limb4 | limb5
+ VPERM Y1, Y0, V7, V7
+ VPERM V12, Y0, V9, V9 // v9: limb10 | limb9
+ VPERM X1, X0, V5, V5
+ VPERM X1, X0, V16, V16
+ VPERM Y1, Y0, V17, V17
+ VPERM X1, V12, V18, V18 // v18: limb1 | limb2
+ VPERM Y1, V12, V19, V19 // v19: limb7 | limb6
+ VN V14, V7, V7 // v7: limb9 | limb8
+ VN V14, V5, V5 // v5: limb3 | limb4
+ VN V14, V16, V16 // v16: limb2 | limb3
+ VN V14, V17, V17 // v17: limb8 | limb7
+
+ VMSLG V9, V4, V12, V14 // v14: l10 x l4 + l9 x l5 (column 2)
+ VMSLG V9, V5, V8, V8 // v8: l10 x l9 + l3 x l4 + l8 x l5 (column 3)
+ VMSLG V9, V16, V12, V16 // v16: l10 x l9 + l2 x l3
+ VMSLG V9, V18, V12, V9 // v9: l10 x l1 + l9 x l2
+ VMSLG V7, V18, V12, V7 // v7: l9 x l1 + l8 x l2
+ VMSLG V17, V4, V16, V16 // v16: l8 x l4 + l7 x l5 + l10 x l9 + l2 x l3 (column 4)
+ VMSLG V17, V5, V9, V9 // v9: l10 x l1 + l9 x l2 + l8 x l3 + l7 x l4
+ VMSLG V17, V18, V12, V17 // v18: l8 x l1 + l7 x l2
+ VMSLG V19, V5, V7, V7 // v7: l9 x l1 + l8 x l2 + l7 x l3 + l6 x l4 (column 6)
+ VMSLG V19, V18, V12, V19 // v19: l7 x l1 + l6 x l2 (column 8)
+ VAQ V9, V6, V9 // v9: l10 x l1 + l9 x l2 + l8 x l3 + l7 x l4 + l6 x l5 (column 5)
+ VAQ V17, V13, V13 // v13: l8 x l1 + l7 x l2 + l6 x l3 (column 7)
+
+ VSLDB $9, V12, V10, V4
+ VSLDB $9, V12, V7, V5
+ VAQ V4, V14, V14
+ VAQ V5, V13, V13
+
+ VSLDB $9, V12, V14, V4
+ VSLDB $9, V12, V13, V5
+ VAQ V4, V8, V8
+ VAQ V5, V19, V19
+
+ VSLDB $9, V12, V8, V4
+ VSLDB $9, V12, V19, V5
+ VAQ V4, V16, V16
+ VAQ V5, V11, V11
+
+ VSLDB $9, V12, V16, V4
+ VAQ V4, V9, V17
+
+ VGBM $0x007f, V4
+ VGBM $0x00ff, V5
+
+ VN V10, V4, V10
+ VN V14, V4, V14
+ VN V8, V4, V8
+ VN V16, V4, V16
+ VN V17, V4, V9
+ VN V7, V4, V7
+ VN V13, V4, V13
+ VN V19, V4, V19
+ VN V11, V5, V11
+
+ VSLDB $7, V14, V14, V14
+ VSLDB $14, V8, V12, V4
+ VSLDB $14, V12, V8, V8
+ VSLDB $5, V16, V16, V16
+ VSLDB $12, V9, V12, V5
+
+ VO V14, V10, V10
+ VO V8, V16, V16
+ VO V4, V10, V10 // first rightmost 128bits of the multiplication result
+ VO V5, V16, V16 // second rightmost 128bits of the multiplication result
+
+ // adjust v7, v13, v19, v11
+ VSLDB $7, V13, V13, V13
+ VSLDB $14, V19, V12, V4
+ VSLDB $14, V12, V19, V19
+ VSLDB $5, V11, V12, V5
+ VO V13, V7, V7
+ VO V4, V7, V7
+ VO V19, V5, V11
+
+ VSLDB $9, V12, V17, V14
+ VSLDB $12, V12, V9, V9
+ VACCQ V7, V14, V13
+ VAQ V7, V14, V7
+ VAQ V11, V13, V11
+
+ // First reduction, 96 bits
+ VSLDB $4, V16, V10, T0
+ VSLDB $4, V12, V16, T1
+ VSLDB $3, V11, V7, V11 // fourth rightmost 128bits of the multiplication result
+ VSLDB $3, V7, V12, V7
+ OBSERVATION3(V10, V8, T2, V17, V18, V19)// results V8 | T2
+ VO V7, V9, V7 // third rightmost 128bits of the multiplication result
+ VACCQ T0, T2, V9
+ VAQ T0, T2, T2
+ VACQ T1, V8, V9, V8
+
+ // Second reduction 96 bits
+ VSLDB $4, V8, T2, T0
+ VSLDB $4, V12, V8, T1
+ OBSERVATION3(T2, V9, V8, V17, V18, V19)// results V9 | V8
+ VACCQ T0, V8, T2
+ VAQ T0, V8, V8
+ VACQ T1, V9, T2, V9
+
+ // Third reduction 64 bits
+ VSLDB $8, V9, V8, T0
+ VSLDB $8, V12, V9, T1
+ OBSERVATION3A(V8, V14, V13, V17, V18)// results V14 | V13
+ VACCQ T0, V13, V12
+ VAQ T0, V13, V13
+ VACQ T1, V14, V12, V14
+ VACCQ V13, V7, V12
+ VAQ V13, V7, T0
+ VACCCQ V14, V11, V12, T2
+ VACQ V14, V11, V12, T1 // results T2 | T1 | T0
+
+ // ---------------------------------------------------
+ MOVD $p256mul<>+0x00(SB), CPOOL
+
+ VZERO V12
+ VSCBIQ P0, T0, V8
+ VSQ P0, T0, V7
+ VSBCBIQ T1, P1, V8, V10
+ VSBIQ T1, P1, V8, V9
+ VSBIQ T2, V12, V10, T2
+
+ // what output to use, V9||V7 or T1||T0?
+ VSEL T0, V7, T2, T0
+ VSEL T1, V9, T2, T1
+
+ VLM (SCRATCH), V16, V19
+
+ RET
+
+// ---------------------------------------
+// p256SqrInternalVMSL
+// V0-V1,V30,V31 - Not Modified
+// V4-V14 - Volatile
+
+TEXT ·p256SqrInternalVMSL(SB), NOFRAME|NOSPLIT, $0-0
+ VSTM V16, V18, (SCRATCH)
+
+ MOVD $p256vmsl<>+0x00(SB), CPOOL
+ // Divide input into limbs
+ VGBM $0x007f, V14
+ VZERO V12
+ VSLDB $2, X1, X0, V13
+ VSLDB $4, V12, X1, V11 // V11(X1): 4 bytes limb
+
+ VN V14, X0, V10 // V10: first 7 bytes limb
+ VN V14, V13, V13 // v13: third 7 bytes limb
+
+ VMSLG V10, V10, V12, V10 // v10: l10 x l5 (column 1)
+ VMSLG V13, V13, V12, V13 // v13: l8 x l3
+ VMSLG V11, V11, V12, V11 // v11: l6 x l1 (column 9)
+
+ MOVD $p256vmsl<>+0x00(SB), CPOOL
+ VGBM $0x7f7f, V14
+
+ VL 0(CPOOL), V4
+ VL 16(CPOOL), V7
+ VL 32(CPOOL), V9
+ VL 48(CPOOL), V5
+ VLM 64(CPOOL), V16, V18
+ VL 112(CPOOL), V8
+
+ VPERM V12, X0, V4, V4 // v4: limb4 | limb5
+ VPERM X1, X0, V7, V7
+ VPERM V12, X0, V9, V9 // v9: limb10 | limb9
+ VPERM X1, X0, V5, V5
+ VPERM X1, X0, V16, V16
+ VPERM X1, X0, V17, V17
+ VPERM X1, V12, V18, V18 // v18: limb1 | limb2
+ VPERM X1, V12, V8, V8 // v8: limb7 | limb6
+ VN V14, V7, V7 // v7: limb9 | limb8
+ VN V14, V5, V5 // v5: limb3 | limb4
+ VN V14, V16, V16 // v16: limb2 | limb3
+ VN V14, V17, V17 // v17: limb8 | limb7
+
+ VMSLEOG V9, V18, V13, V6 // v6: l10 x l1 + l9 x l2 + l8 x l3 + l7 x l4 + l6 x l5 (column 5)
+ VMSLG V9, V4, V12, V14 // v14: l10 x l4 + l9 x l5 (column 2)
+ VMSLEOG V9, V16, V12, V16 // v16: l10 x l2 + l9 x l3 + l8 x l4 + l7 x l5 (column 4)
+ VMSLEOG V7, V18, V12, V7 // v7: l9 x l1 + l8 x l2 (column 6)
+ VMSLEG V17, V18, V12, V13 // v13: l8 x l1 + l7 x l2 + l6 x l3 (column 7)
+ VMSLG V8, V18, V12, V8 // v8: l7 x l1 + l6 x l2 (column 8)
+ VMSLEG V9, V5, V12, V18 // v18: l10 x l3 + l9 x l4 + l8 x l5 (column 3)
+
+ VSLDB $9, V12, V10, V4
+ VSLDB $9, V12, V7, V5
+ VAQ V4, V14, V14
+ VAQ V5, V13, V13
+
+ VSLDB $9, V12, V14, V4
+ VSLDB $9, V12, V13, V5
+ VAQ V4, V18, V18
+ VAQ V5, V8, V8
+
+ VSLDB $9, V12, V18, V4
+ VSLDB $9, V12, V8, V5
+ VAQ V4, V16, V16
+ VAQ V5, V11, V11
+
+ VSLDB $9, V12, V16, V4
+ VAQ V4, V6, V17
+
+ VGBM $0x007f, V4
+ VGBM $0x00ff, V5
+
+ VN V10, V4, V10
+ VN V14, V4, V14
+ VN V18, V4, V18
+ VN V16, V4, V16
+ VN V17, V4, V9
+ VN V7, V4, V7
+ VN V13, V4, V13
+ VN V8, V4, V8
+ VN V11, V5, V11
+
+ VSLDB $7, V14, V14, V14
+ VSLDB $14, V18, V12, V4
+ VSLDB $14, V12, V18, V18
+ VSLDB $5, V16, V16, V16
+ VSLDB $12, V9, V12, V5
+
+ VO V14, V10, V10
+ VO V18, V16, V16
+ VO V4, V10, V10 // first rightmost 128bits of the multiplication result
+ VO V5, V16, V16 // second rightmost 128bits of the multiplication result
+
+ // adjust v7, v13, v8, v11
+ VSLDB $7, V13, V13, V13
+ VSLDB $14, V8, V12, V4
+ VSLDB $14, V12, V8, V8
+ VSLDB $5, V11, V12, V5
+ VO V13, V7, V7
+ VO V4, V7, V7
+ VO V8, V5, V11
+
+ VSLDB $9, V12, V17, V14
+ VSLDB $12, V12, V9, V9
+ VACCQ V7, V14, V13
+ VAQ V7, V14, V7
+ VAQ V11, V13, V11
+
+ // First reduction, 96 bits
+ VSLDB $4, V16, V10, T0
+ VSLDB $4, V12, V16, T1
+ VSLDB $3, V11, V7, V11 // fourth rightmost 128bits of the multiplication result
+ VSLDB $3, V7, V12, V7
+ OBSERVATION3(V10, V8, T2, V16, V17, V18)// results V8 | T2
+ VO V7, V9, V7 // third rightmost 128bits of the multiplication result
+ VACCQ T0, T2, V9
+ VAQ T0, T2, T2
+ VACQ T1, V8, V9, V8
+
+ // Second reduction 96 bits
+ VSLDB $4, V8, T2, T0
+ VSLDB $4, V12, V8, T1
+ OBSERVATION3(T2, V9, V8, V16, V17, V18)// results V9 | V8
+ VACCQ T0, V8, T2
+ VAQ T0, V8, V8
+ VACQ T1, V9, T2, V9
+
+ // Third reduction 64 bits
+ VSLDB $8, V9, V8, T0
+ VSLDB $8, V12, V9, T1
+ OBSERVATION3A(V8, V14, V13, V17, V18)// results V14 | V13
+ VACCQ T0, V13, V12
+ VAQ T0, V13, V13
+ VACQ T1, V14, V12, V14
+ VACCQ V13, V7, V12
+ VAQ V13, V7, T0
+ VACCCQ V14, V11, V12, T2
+ VACQ V14, V11, V12, T1 // results T2 | T1 | T0
+
+ // ---------------------------------------------------
+ MOVD $p256mul<>+0x00(SB), CPOOL
+
+ VZERO V12
+ VSCBIQ P0, T0, V8
+ VSQ P0, T0, V7
+ VSBCBIQ T1, P1, V8, V10
+ VSBIQ T1, P1, V8, V9
+ VSBIQ T2, V12, V10, T2
+
+ // what output to use, V9||V7 or T1||T0?
+ VSEL T0, V7, T2, T0
+ VSEL T1, V9, T2, T1
+
+ VLM (SCRATCH), V16, V18
+ RET
+
+
+
+#undef CPOOL
+#undef SCRATCH
+#undef X0
+#undef X1
+#undef Y0
+#undef Y1
+#undef T0
+#undef T1
+#undef T2
+#undef P0
+#undef P1
+
+#define SCRATCH R9
+
+TEXT p256MulInternal<>(SB),NOSPLIT,$64-0
+ MOVD $scratch-64(SP), SCRATCH
+ MOVD ·p256MulInternalFacility+0x00(SB),R7
+ CALL (R7)
+ RET
+
+TEXT ·p256MulInternalTrampolineSetup(SB),NOSPLIT|NOFRAME, $0
+ MOVBZ internal∕cpu·S390X+const_offsetS390xHasVE1(SB), R0
+ MOVD $·p256MulInternalFacility+0x00(SB), R7
+ MOVD $·p256MulInternalVX(SB), R8
+ CMPBEQ R0, $0, novmsl // VE1 facility = 1, VMSL supported
+ MOVD $·p256MulInternalVMSL(SB), R8
+novmsl:
+ MOVD R8, 0(R7)
+ BR (R8)
+
+GLOBL ·p256MulInternalFacility+0x00(SB), NOPTR, $8
+DATA ·p256MulInternalFacility+0x00(SB)/8, $·p256MulInternalTrampolineSetup(SB)
+
+// Parameters
+#define X0 V0
+#define X1 V1
+#define Y0 V2
+#define Y1 V3
+
+TEXT ·p256SqrInternalVX(SB), NOFRAME|NOSPLIT, $0
+ VLR X0, Y0
+ VLR X1, Y1
+ BR ·p256MulInternalVX(SB)
+
+#undef X0
+#undef X1
+#undef Y0
+#undef Y1
+
+
+TEXT p256SqrInternal<>(SB),NOSPLIT,$48-0
+ MOVD $scratch-48(SP), SCRATCH
+ MOVD ·p256SqrInternalFacility+0x00(SB),R7
+ CALL (R7)
+ RET
+
+TEXT ·p256SqrInternalTrampolineSetup(SB),NOSPLIT|NOFRAME, $0
+ MOVBZ internal∕cpu·S390X+const_offsetS390xHasVE1(SB), R0
+ MOVD $·p256SqrInternalFacility+0x00(SB), R7
+ MOVD $·p256SqrInternalVX(SB), R8
+ CMPBEQ R0, $0, novmsl // VE1 facility = 1, VMSL supported
+ MOVD $·p256SqrInternalVMSL(SB), R8
+novmsl:
+ MOVD R8, 0(R7)
+ BR (R8)
+
+
+GLOBL ·p256SqrInternalFacility+0x00(SB), NOPTR, $8
+DATA ·p256SqrInternalFacility+0x00(SB)/8, $·p256SqrInternalTrampolineSetup(SB)
+
+#undef SCRATCH
+
+
+#define p256SubInternal(T1, T0, X1, X0, Y1, Y0) \
+ VZERO ZER \
+ VSCBIQ Y0, X0, CAR1 \
+ VSQ Y0, X0, T0 \
+ VSBCBIQ X1, Y1, CAR1, SEL1 \
+ VSBIQ X1, Y1, CAR1, T1 \
+ VSQ SEL1, ZER, SEL1 \
+ \
+ VACCQ T0, PL, CAR1 \
+ VAQ T0, PL, TT0 \
+ VACQ T1, PH, CAR1, TT1 \
+ \
+ VSEL T0, TT0, SEL1, T0 \
+ VSEL T1, TT1, SEL1, T1 \
+
+#define p256AddInternal(T1, T0, X1, X0, Y1, Y0) \
+ VACCQ X0, Y0, CAR1 \
+ VAQ X0, Y0, T0 \
+ VACCCQ X1, Y1, CAR1, T2 \
+ VACQ X1, Y1, CAR1, T1 \
+ \
+ VZERO ZER \
+ VSCBIQ PL, T0, CAR1 \
+ VSQ PL, T0, TT0 \
+ VSBCBIQ T1, PH, CAR1, CAR2 \
+ VSBIQ T1, PH, CAR1, TT1 \
+ VSBIQ T2, ZER, CAR2, SEL1 \
+ \
+ VSEL T0, TT0, SEL1, T0 \
+ VSEL T1, TT1, SEL1, T1
+
+#define p256HalfInternal(T1, T0, X1, X0) \
+ VZERO ZER \
+ VSBIQ ZER, ZER, X0, SEL1 \
+ \
+ VACCQ X0, PL, CAR1 \
+ VAQ X0, PL, T0 \
+ VACCCQ X1, PH, CAR1, T2 \
+ VACQ X1, PH, CAR1, T1 \
+ \
+ VSEL X0, T0, SEL1, T0 \
+ VSEL X1, T1, SEL1, T1 \
+ VSEL ZER, T2, SEL1, T2 \
+ \
+ VSLDB $15, T2, ZER, TT1 \
+ VSLDB $15, T1, ZER, TT0 \
+ VREPIB $1, SEL1 \
+ VSRL SEL1, T0, T0 \
+ VSRL SEL1, T1, T1 \
+ VREPIB $7, SEL1 \
+ VSL SEL1, TT0, TT0 \
+ VSL SEL1, TT1, TT1 \
+ VO T0, TT0, T0 \
+ VO T1, TT1, T1
+
+// ---------------------------------------
+// func p256MulAsm(res, in1, in2 []byte)
+#define res_ptr R1
+#define x_ptr R2
+#define y_ptr R3
+#define CPOOL R4
+
+// Parameters
+#define X0 V0
+#define X1 V1
+#define Y0 V2
+#define Y1 V3
+#define T0 V4
+#define T1 V5
+
+// Constants
+#define P0 V30
+#define P1 V31
+TEXT ·p256MulAsm(SB), NOSPLIT, $0
+ MOVD res+0(FP), res_ptr
+ MOVD in1+24(FP), x_ptr
+ MOVD in2+48(FP), y_ptr
+
+ VL (1*16)(x_ptr), X0
+ VL (0*16)(x_ptr), X1
+ VL (1*16)(y_ptr), Y0
+ VL (0*16)(y_ptr), Y1
+
+ MOVD $p256mul<>+0x00(SB), CPOOL
+ VL 16(CPOOL), P0
+ VL 0(CPOOL), P1
+
+ CALL p256MulInternal<>(SB)
+
+ VST T0, (1*16)(res_ptr)
+ VST T1, (0*16)(res_ptr)
+ RET
+
+#undef res_ptr
+#undef x_ptr
+#undef y_ptr
+#undef CPOOL
+
+#undef X0
+#undef X1
+#undef Y0
+#undef Y1
+#undef T0
+#undef T1
+#undef P0
+#undef P1
+
+// ---------------------------------------
+// func p256SqrAsm(res, in1 []byte)
+#define res_ptr R1
+#define x_ptr R2
+#define y_ptr R3
+#define CPOOL R4
+
+// Parameters
+#define X0 V0
+#define X1 V1
+#define T0 V4
+#define T1 V5
+
+// Constants
+#define P0 V30
+#define P1 V31
+TEXT ·p256SqrAsm(SB), NOSPLIT, $0
+ MOVD res+0(FP), res_ptr
+ MOVD in1+24(FP), x_ptr
+
+ VL (1*16)(x_ptr), X0
+ VL (0*16)(x_ptr), X1
+
+ MOVD $p256mul<>+0x00(SB), CPOOL
+ VL 16(CPOOL), P0
+ VL 0(CPOOL), P1
+
+ CALL p256SqrInternal<>(SB)
+
+ VST T0, (1*16)(res_ptr)
+ VST T1, (0*16)(res_ptr)
+ RET
+
+#undef res_ptr
+#undef x_ptr
+#undef y_ptr
+#undef CPOOL
+
+#undef X0
+#undef X1
+#undef T0
+#undef T1
+#undef P0
+#undef P1
+
+
+// Point add with P2 being affine point
+// If sign == 1 -> P2 = -P2
+// If sel == 0 -> P3 = P1
+// if zero == 0 -> P3 = P2
+// p256PointAddAffineAsm(P3, P1, P2 *p256Point, sign, sel, zero int)
+#define P3ptr R1
+#define P1ptr R2
+#define P2ptr R3
+#define CPOOL R4
+
+// Temporaries in REGs
+#define Y2L V15
+#define Y2H V16
+#define T1L V17
+#define T1H V18
+#define T2L V19
+#define T2H V20
+#define T3L V21
+#define T3H V22
+#define T4L V23
+#define T4H V24
+
+// Temps for Sub and Add
+#define TT0 V11
+#define TT1 V12
+#define T2 V13
+
+// p256MulAsm Parameters
+#define X0 V0
+#define X1 V1
+#define Y0 V2
+#define Y1 V3
+#define T0 V4
+#define T1 V5
+
+#define PL V30
+#define PH V31
+
+// Names for zero/sel selects
+#define X1L V0
+#define X1H V1
+#define Y1L V2 // p256MulAsmParmY
+#define Y1H V3 // p256MulAsmParmY
+#define Z1L V4
+#define Z1H V5
+#define X2L V0
+#define X2H V1
+#define Z2L V4
+#define Z2H V5
+#define X3L V17 // T1L
+#define X3H V18 // T1H
+#define Y3L V21 // T3L
+#define Y3H V22 // T3H
+#define Z3L V28
+#define Z3H V29
+
+#define ZER V6
+#define SEL1 V7
+#define CAR1 V8
+#define CAR2 V9
+/* *
+ * Three operand formula:
+ * Source: 2004 Hankerson–Menezes–Vanstone, page 91.
+ * T1 = Z1²
+ * T2 = T1*Z1
+ * T1 = T1*X2
+ * T2 = T2*Y2
+ * T1 = T1-X1
+ * T2 = T2-Y1
+ * Z3 = Z1*T1
+ * T3 = T1²
+ * T4 = T3*T1
+ * T3 = T3*X1
+ * T1 = 2*T3
+ * X3 = T2²
+ * X3 = X3-T1
+ * X3 = X3-T4
+ * T3 = T3-X3
+ * T3 = T3*T2
+ * T4 = T4*Y1
+ * Y3 = T3-T4
+
+ * Three operand formulas, but with MulInternal X,Y used to store temps
+X=Z1; Y=Z1; MUL;T- // T1 = Z1² T1
+X=T ; Y- ; MUL;T2=T // T2 = T1*Z1 T1 T2
+X- ; Y=X2; MUL;T1=T // T1 = T1*X2 T1 T2
+X=T2; Y=Y2; MUL;T- // T2 = T2*Y2 T1 T2
+SUB(T2<T-Y1) // T2 = T2-Y1 T1 T2
+SUB(Y<T1-X1) // T1 = T1-X1 T1 T2
+X=Z1; Y- ; MUL;Z3:=T// Z3 = Z1*T1 T2
+X=Y; Y- ; MUL;X=T // T3 = T1*T1 T2
+X- ; Y- ; MUL;T4=T // T4 = T3*T1 T2 T4
+X- ; Y=X1; MUL;T3=T // T3 = T3*X1 T2 T3 T4
+ADD(T1<T+T) // T1 = T3+T3 T1 T2 T3 T4
+X=T2; Y=T2; MUL;T- // X3 = T2*T2 T1 T2 T3 T4
+SUB(T<T-T1) // X3 = X3-T1 T1 T2 T3 T4
+SUB(T<T-T4) X3:=T // X3 = X3-T4 T2 T3 T4
+SUB(X<T3-T) // T3 = T3-X3 T2 T3 T4
+X- ; Y- ; MUL;T3=T // T3 = T3*T2 T2 T3 T4
+X=T4; Y=Y1; MUL;T- // T4 = T4*Y1 T3 T4
+SUB(T<T3-T) Y3:=T // Y3 = T3-T4 T3 T4
+
+ */
+TEXT ·p256PointAddAffineAsm(SB), NOSPLIT, $0
+ MOVD P3+0(FP), P3ptr
+ MOVD P1+8(FP), P1ptr
+ MOVD P2+16(FP), P2ptr
+
+ MOVD $p256mul<>+0x00(SB), CPOOL
+ VL 16(CPOOL), PL
+ VL 0(CPOOL), PH
+
+ // if (sign == 1) {
+ // Y2 = fromBig(new(big.Int).Mod(new(big.Int).Sub(p256.P, new(big.Int).SetBytes(Y2)), p256.P)) // Y2 = P-Y2
+ // }
+
+ VL 32(P2ptr), Y2H
+ VL 48(P2ptr), Y2L
+
+ VLREPG sign+24(FP), SEL1
+ VZERO ZER
+ VCEQG SEL1, ZER, SEL1
+
+ VSCBIQ Y2L, PL, CAR1
+ VSQ Y2L, PL, T1L
+ VSBIQ PH, Y2H, CAR1, T1H
+
+ VSEL Y2L, T1L, SEL1, Y2L
+ VSEL Y2H, T1H, SEL1, Y2H
+
+/* *
+ * Three operand formula:
+ * Source: 2004 Hankerson–Menezes–Vanstone, page 91.
+ */
+ // X=Z1; Y=Z1; MUL; T- // T1 = Z1² T1
+ VL 64(P1ptr), X1 // Z1H
+ VL 80(P1ptr), X0 // Z1L
+ VLR X0, Y0
+ VLR X1, Y1
+ CALL p256SqrInternal<>(SB)
+
+ // X=T ; Y- ; MUL; T2=T // T2 = T1*Z1 T1 T2
+ VLR T0, X0
+ VLR T1, X1
+ CALL p256MulInternal<>(SB)
+ VLR T0, T2L
+ VLR T1, T2H
+
+ // X- ; Y=X2; MUL; T1=T // T1 = T1*X2 T1 T2
+ VL 0(P2ptr), Y1 // X2H
+ VL 16(P2ptr), Y0 // X2L
+ CALL p256MulInternal<>(SB)
+ VLR T0, T1L
+ VLR T1, T1H
+
+ // X=T2; Y=Y2; MUL; T- // T2 = T2*Y2 T1 T2
+ VLR T2L, X0
+ VLR T2H, X1
+ VLR Y2L, Y0
+ VLR Y2H, Y1
+ CALL p256MulInternal<>(SB)
+
+ // SUB(T2<T-Y1) // T2 = T2-Y1 T1 T2
+ VL 32(P1ptr), Y1H
+ VL 48(P1ptr), Y1L
+ p256SubInternal(T2H,T2L,T1,T0,Y1H,Y1L)
+
+ // SUB(Y<T1-X1) // T1 = T1-X1 T1 T2
+ VL 0(P1ptr), X1H
+ VL 16(P1ptr), X1L
+ p256SubInternal(Y1,Y0,T1H,T1L,X1H,X1L)
+
+ // X=Z1; Y- ; MUL; Z3:=T// Z3 = Z1*T1 T2
+ VL 64(P1ptr), X1 // Z1H
+ VL 80(P1ptr), X0 // Z1L
+ CALL p256MulInternal<>(SB)
+
+ // VST T1, 64(P3ptr)
+ // VST T0, 80(P3ptr)
+ VLR T0, Z3L
+ VLR T1, Z3H
+
+ // X=Y; Y- ; MUL; X=T // T3 = T1*T1 T2
+ VLR Y0, X0
+ VLR Y1, X1
+ CALL p256SqrInternal<>(SB)
+ VLR T0, X0
+ VLR T1, X1
+
+ // X- ; Y- ; MUL; T4=T // T4 = T3*T1 T2 T4
+ CALL p256MulInternal<>(SB)
+ VLR T0, T4L
+ VLR T1, T4H
+
+ // X- ; Y=X1; MUL; T3=T // T3 = T3*X1 T2 T3 T4
+ VL 0(P1ptr), Y1 // X1H
+ VL 16(P1ptr), Y0 // X1L
+ CALL p256MulInternal<>(SB)
+ VLR T0, T3L
+ VLR T1, T3H
+
+ // ADD(T1<T+T) // T1 = T3+T3 T1 T2 T3 T4
+ p256AddInternal(T1H,T1L, T1,T0,T1,T0)
+
+ // X=T2; Y=T2; MUL; T- // X3 = T2*T2 T1 T2 T3 T4
+ VLR T2L, X0
+ VLR T2H, X1
+ VLR T2L, Y0
+ VLR T2H, Y1
+ CALL p256SqrInternal<>(SB)
+
+ // SUB(T<T-T1) // X3 = X3-T1 T1 T2 T3 T4 (T1 = X3)
+ p256SubInternal(T1,T0,T1,T0,T1H,T1L)
+
+ // SUB(T<T-T4) X3:=T // X3 = X3-T4 T2 T3 T4
+ p256SubInternal(T1,T0,T1,T0,T4H,T4L)
+ VLR T0, X3L
+ VLR T1, X3H
+
+ // SUB(X<T3-T) // T3 = T3-X3 T2 T3 T4
+ p256SubInternal(X1,X0,T3H,T3L,T1,T0)
+
+ // X- ; Y- ; MUL; T3=T // T3 = T3*T2 T2 T3 T4
+ CALL p256MulInternal<>(SB)
+ VLR T0, T3L
+ VLR T1, T3H
+
+ // X=T4; Y=Y1; MUL; T- // T4 = T4*Y1 T3 T4
+ VLR T4L, X0
+ VLR T4H, X1
+ VL 32(P1ptr), Y1 // Y1H
+ VL 48(P1ptr), Y0 // Y1L
+ CALL p256MulInternal<>(SB)
+
+ // SUB(T<T3-T) Y3:=T // Y3 = T3-T4 T3 T4 (T3 = Y3)
+ p256SubInternal(Y3H,Y3L,T3H,T3L,T1,T0)
+
+ // if (sel == 0) {
+ // copy(P3.x[:], X1)
+ // copy(P3.y[:], Y1)
+ // copy(P3.z[:], Z1)
+ // }
+
+ VL 0(P1ptr), X1H
+ VL 16(P1ptr), X1L
+
+ // Y1 already loaded, left over from addition
+ VL 64(P1ptr), Z1H
+ VL 80(P1ptr), Z1L
+
+ VLREPG sel+32(FP), SEL1
+ VZERO ZER
+ VCEQG SEL1, ZER, SEL1
+
+ VSEL X1L, X3L, SEL1, X3L
+ VSEL X1H, X3H, SEL1, X3H
+ VSEL Y1L, Y3L, SEL1, Y3L
+ VSEL Y1H, Y3H, SEL1, Y3H
+ VSEL Z1L, Z3L, SEL1, Z3L
+ VSEL Z1H, Z3H, SEL1, Z3H
+
+ // if (zero == 0) {
+ // copy(P3.x[:], X2)
+ // copy(P3.y[:], Y2)
+ // copy(P3.z[:], []byte{0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ // 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01}) //(p256.z*2^256)%p
+ // }
+ VL 0(P2ptr), X2H
+ VL 16(P2ptr), X2L
+
+ // Y2 already loaded
+ VL 128(CPOOL), Z2H
+ VL 144(CPOOL), Z2L
+
+ VLREPG zero+40(FP), SEL1
+ VZERO ZER
+ VCEQG SEL1, ZER, SEL1
+
+ VSEL X2L, X3L, SEL1, X3L
+ VSEL X2H, X3H, SEL1, X3H
+ VSEL Y2L, Y3L, SEL1, Y3L
+ VSEL Y2H, Y3H, SEL1, Y3H
+ VSEL Z2L, Z3L, SEL1, Z3L
+ VSEL Z2H, Z3H, SEL1, Z3H
+
+ // All done, store out the result!!!
+ VST X3H, 0(P3ptr)
+ VST X3L, 16(P3ptr)
+ VST Y3H, 32(P3ptr)
+ VST Y3L, 48(P3ptr)
+ VST Z3H, 64(P3ptr)
+ VST Z3L, 80(P3ptr)
+
+ RET
+
+#undef P3ptr
+#undef P1ptr
+#undef P2ptr
+#undef CPOOL
+
+#undef Y2L
+#undef Y2H
+#undef T1L
+#undef T1H
+#undef T2L
+#undef T2H
+#undef T3L
+#undef T3H
+#undef T4L
+#undef T4H
+
+#undef TT0
+#undef TT1
+#undef T2
+
+#undef X0
+#undef X1
+#undef Y0
+#undef Y1
+#undef T0
+#undef T1
+
+#undef PL
+#undef PH
+
+#undef X1L
+#undef X1H
+#undef Y1L
+#undef Y1H
+#undef Z1L
+#undef Z1H
+#undef X2L
+#undef X2H
+#undef Z2L
+#undef Z2H
+#undef X3L
+#undef X3H
+#undef Y3L
+#undef Y3H
+#undef Z3L
+#undef Z3H
+
+#undef ZER
+#undef SEL1
+#undef CAR1
+#undef CAR2
+
+// p256PointDoubleAsm(P3, P1 *p256Point)
+// https://www.hyperelliptic.org/EFD/g1p/auto-shortw-jacobian.html#doubling-dbl-2007-bl
+// https://www.hyperelliptic.org/EFD/g1p/auto-shortw.html
+// https://www.hyperelliptic.org/EFD/g1p/auto-shortw-projective-3.html
+#define P3ptr R1
+#define P1ptr R2
+#define CPOOL R4
+
+// Temporaries in REGs
+#define X3L V15
+#define X3H V16
+#define Y3L V17
+#define Y3H V18
+#define T1L V19
+#define T1H V20
+#define T2L V21
+#define T2H V22
+#define T3L V23
+#define T3H V24
+
+#define X1L V6
+#define X1H V7
+#define Y1L V8
+#define Y1H V9
+#define Z1L V10
+#define Z1H V11
+
+// Temps for Sub and Add
+#define TT0 V11
+#define TT1 V12
+#define T2 V13
+
+// p256MulAsm Parameters
+#define X0 V0
+#define X1 V1
+#define Y0 V2
+#define Y1 V3
+#define T0 V4
+#define T1 V5
+
+#define PL V30
+#define PH V31
+
+#define Z3L V23
+#define Z3H V24
+
+#define ZER V26
+#define SEL1 V27
+#define CAR1 V28
+#define CAR2 V29
+/*
+ * https://www.hyperelliptic.org/EFD/g1p/auto-shortw-jacobian-3.html#doubling-dbl-2004-hmv
+ * Cost: 4M + 4S + 1*half + 5add + 2*2 + 1*3.
+ * Source: 2004 Hankerson–Menezes–Vanstone, page 91.
+ * A = 3(X₁-Z₁²)×(X₁+Z₁²)
+ * B = 2Y₁
+ * Z₃ = B×Z₁
+ * C = B²
+ * D = C×X₁
+ * X₃ = A²-2D
+ * Y₃ = (D-X₃)×A-C²/2
+ *
+ * Three-operand formula:
+ * T1 = Z1²
+ * T2 = X1-T1
+ * T1 = X1+T1
+ * T2 = T2*T1
+ * T2 = 3*T2
+ * Y3 = 2*Y1
+ * Z3 = Y3*Z1
+ * Y3 = Y3²
+ * T3 = Y3*X1
+ * Y3 = Y3²
+ * Y3 = half*Y3
+ * X3 = T2²
+ * T1 = 2*T3
+ * X3 = X3-T1
+ * T1 = T3-X3
+ * T1 = T1*T2
+ * Y3 = T1-Y3
+ */
+
+TEXT ·p256PointDoubleAsm(SB), NOSPLIT, $0
+ MOVD P3+0(FP), P3ptr
+ MOVD P1+8(FP), P1ptr
+
+ MOVD $p256mul<>+0x00(SB), CPOOL
+ VL 16(CPOOL), PL
+ VL 0(CPOOL), PH
+
+ // X=Z1; Y=Z1; MUL; T- // T1 = Z1²
+ VL 64(P1ptr), X1 // Z1H
+ VL 80(P1ptr), X0 // Z1L
+ VLR X0, Y0
+ VLR X1, Y1
+ CALL p256SqrInternal<>(SB)
+
+ // SUB(X<X1-T) // T2 = X1-T1
+ VL 0(P1ptr), X1H
+ VL 16(P1ptr), X1L
+ p256SubInternal(X1,X0,X1H,X1L,T1,T0)
+
+ // ADD(Y<X1+T) // T1 = X1+T1
+ p256AddInternal(Y1,Y0,X1H,X1L,T1,T0)
+
+ // X- ; Y- ; MUL; T- // T2 = T2*T1
+ CALL p256MulInternal<>(SB)
+
+ // ADD(T2<T+T); ADD(T2<T2+T) // T2 = 3*T2
+ p256AddInternal(T2H,T2L,T1,T0,T1,T0)
+ p256AddInternal(T2H,T2L,T2H,T2L,T1,T0)
+
+ // ADD(X<Y1+Y1) // Y3 = 2*Y1
+ VL 32(P1ptr), Y1H
+ VL 48(P1ptr), Y1L
+ p256AddInternal(X1,X0,Y1H,Y1L,Y1H,Y1L)
+
+ // X- ; Y=Z1; MUL; Z3:=T // Z3 = Y3*Z1
+ VL 64(P1ptr), Y1 // Z1H
+ VL 80(P1ptr), Y0 // Z1L
+ CALL p256MulInternal<>(SB)
+ VST T1, 64(P3ptr)
+ VST T0, 80(P3ptr)
+
+ // X- ; Y=X ; MUL; T- // Y3 = Y3²
+ VLR X0, Y0
+ VLR X1, Y1
+ CALL p256SqrInternal<>(SB)
+
+ // X=T ; Y=X1; MUL; T3=T // T3 = Y3*X1
+ VLR T0, X0
+ VLR T1, X1
+ VL 0(P1ptr), Y1
+ VL 16(P1ptr), Y0
+ CALL p256MulInternal<>(SB)
+ VLR T0, T3L
+ VLR T1, T3H
+
+ // X- ; Y=X ; MUL; T- // Y3 = Y3²
+ VLR X0, Y0
+ VLR X1, Y1
+ CALL p256SqrInternal<>(SB)
+
+ // HAL(Y3<T) // Y3 = half*Y3
+ p256HalfInternal(Y3H,Y3L, T1,T0)
+
+ // X=T2; Y=T2; MUL; T- // X3 = T2²
+ VLR T2L, X0
+ VLR T2H, X1
+ VLR T2L, Y0
+ VLR T2H, Y1
+ CALL p256SqrInternal<>(SB)
+
+ // ADD(T1<T3+T3) // T1 = 2*T3
+ p256AddInternal(T1H,T1L,T3H,T3L,T3H,T3L)
+
+ // SUB(X3<T-T1) X3:=X3 // X3 = X3-T1
+ p256SubInternal(X3H,X3L,T1,T0,T1H,T1L)
+ VST X3H, 0(P3ptr)
+ VST X3L, 16(P3ptr)
+
+ // SUB(X<T3-X3) // T1 = T3-X3
+ p256SubInternal(X1,X0,T3H,T3L,X3H,X3L)
+
+ // X- ; Y- ; MUL; T- // T1 = T1*T2
+ CALL p256MulInternal<>(SB)
+
+ // SUB(Y3<T-Y3) // Y3 = T1-Y3
+ p256SubInternal(Y3H,Y3L,T1,T0,Y3H,Y3L)
+
+ VST Y3H, 32(P3ptr)
+ VST Y3L, 48(P3ptr)
+ RET
+
+#undef P3ptr
+#undef P1ptr
+#undef CPOOL
+#undef X3L
+#undef X3H
+#undef Y3L
+#undef Y3H
+#undef T1L
+#undef T1H
+#undef T2L
+#undef T2H
+#undef T3L
+#undef T3H
+#undef X1L
+#undef X1H
+#undef Y1L
+#undef Y1H
+#undef Z1L
+#undef Z1H
+#undef TT0
+#undef TT1
+#undef T2
+#undef X0
+#undef X1
+#undef Y0
+#undef Y1
+#undef T0
+#undef T1
+#undef PL
+#undef PH
+#undef Z3L
+#undef Z3H
+#undef ZER
+#undef SEL1
+#undef CAR1
+#undef CAR2
+
+// p256PointAddAsm(P3, P1, P2 *p256Point)
+#define P3ptr R1
+#define P1ptr R2
+#define P2ptr R3
+#define CPOOL R4
+#define ISZERO R5
+#define TRUE R6
+
+// Temporaries in REGs
+#define T1L V16
+#define T1H V17
+#define T2L V18
+#define T2H V19
+#define U1L V20
+#define U1H V21
+#define S1L V22
+#define S1H V23
+#define HL V24
+#define HH V25
+#define RL V26
+#define RH V27
+
+// Temps for Sub and Add
+#define ZER V6
+#define SEL1 V7
+#define CAR1 V8
+#define CAR2 V9
+#define TT0 V11
+#define TT1 V12
+#define T2 V13
+
+// p256MulAsm Parameters
+#define X0 V0
+#define X1 V1
+#define Y0 V2
+#define Y1 V3
+#define T0 V4
+#define T1 V5
+
+#define PL V30
+#define PH V31
+/*
+ * https://delta.cs.cinvestav.mx/~francisco/arith/julio.pdf "Software Implementation of the NIST Elliptic Curves Over Prime Fields"
+ *
+ * A = X₁×Z₂²
+ * B = Y₁×Z₂³
+ * C = X₂×Z₁²-A
+ * D = Y₂×Z₁³-B
+ * X₃ = D² - 2A×C² - C³
+ * Y₃ = D×(A×C² - X₃) - B×C³
+ * Z₃ = Z₁×Z₂×C
+ *
+ * Three-operand formula (adopted): https://www.hyperelliptic.org/EFD/g1p/auto-shortw-jacobian-3.html#addition-add-1998-cmo-2
+ * Temp storage: T1,T2,U1,H,Z3=X3=Y3,S1,R
+ *
+ * T1 = Z1*Z1
+ * T2 = Z2*Z2
+ * U1 = X1*T2
+ * H = X2*T1
+ * H = H-U1
+ * Z3 = Z1*Z2
+ * Z3 = Z3*H << store-out Z3 result reg.. could override Z1, if slices have same backing array
+ *
+ * S1 = Z2*T2
+ * S1 = Y1*S1
+ * R = Z1*T1
+ * R = Y2*R
+ * R = R-S1
+ *
+ * T1 = H*H
+ * T2 = H*T1
+ * U1 = U1*T1
+ *
+ * X3 = R*R
+ * X3 = X3-T2
+ * T1 = 2*U1
+ * X3 = X3-T1 << store-out X3 result reg
+ *
+ * T2 = S1*T2
+ * Y3 = U1-X3
+ * Y3 = R*Y3
+ * Y3 = Y3-T2 << store-out Y3 result reg
+
+ // X=Z1; Y=Z1; MUL; T- // T1 = Z1*Z1
+ // X- ; Y=T ; MUL; R=T // R = Z1*T1
+ // X=X2; Y- ; MUL; H=T // H = X2*T1
+ // X=Z2; Y=Z2; MUL; T- // T2 = Z2*Z2
+ // X- ; Y=T ; MUL; S1=T // S1 = Z2*T2
+ // X=X1; Y- ; MUL; U1=T // U1 = X1*T2
+ // SUB(H<H-T) // H = H-U1
+ // X=Z1; Y=Z2; MUL; T- // Z3 = Z1*Z2
+ // X=T ; Y=H ; MUL; Z3:=T// Z3 = Z3*H << store-out Z3 result reg.. could override Z1, if slices have same backing array
+ // X=Y1; Y=S1; MUL; S1=T // S1 = Y1*S1
+ // X=Y2; Y=R ; MUL; T- // R = Y2*R
+ // SUB(R<T-S1) // R = R-S1
+ // X=H ; Y=H ; MUL; T- // T1 = H*H
+ // X- ; Y=T ; MUL; T2=T // T2 = H*T1
+ // X=U1; Y- ; MUL; U1=T // U1 = U1*T1
+ // X=R ; Y=R ; MUL; T- // X3 = R*R
+ // SUB(T<T-T2) // X3 = X3-T2
+ // ADD(X<U1+U1) // T1 = 2*U1
+ // SUB(T<T-X) X3:=T // X3 = X3-T1 << store-out X3 result reg
+ // SUB(Y<U1-T) // Y3 = U1-X3
+ // X=R ; Y- ; MUL; U1=T // Y3 = R*Y3
+ // X=S1; Y=T2; MUL; T- // T2 = S1*T2
+ // SUB(T<U1-T); Y3:=T // Y3 = Y3-T2 << store-out Y3 result reg
+ */
+TEXT ·p256PointAddAsm(SB), NOSPLIT, $0
+ MOVD P3+0(FP), P3ptr
+ MOVD P1+8(FP), P1ptr
+ MOVD P2+16(FP), P2ptr
+
+ MOVD $p256mul<>+0x00(SB), CPOOL
+ VL 16(CPOOL), PL
+ VL 0(CPOOL), PH
+
+ // X=Z1; Y=Z1; MUL; T- // T1 = Z1*Z1
+ VL 64(P1ptr), X1 // Z1H
+ VL 80(P1ptr), X0 // Z1L
+ VLR X0, Y0
+ VLR X1, Y1
+ CALL p256SqrInternal<>(SB)
+
+ // X- ; Y=T ; MUL; R=T // R = Z1*T1
+ VLR T0, Y0
+ VLR T1, Y1
+ CALL p256MulInternal<>(SB)
+ VLR T0, RL
+ VLR T1, RH
+
+ // X=X2; Y- ; MUL; H=T // H = X2*T1
+ VL 0(P2ptr), X1 // X2H
+ VL 16(P2ptr), X0 // X2L
+ CALL p256MulInternal<>(SB)
+ VLR T0, HL
+ VLR T1, HH
+
+ // X=Z2; Y=Z2; MUL; T- // T2 = Z2*Z2
+ VL 64(P2ptr), X1 // Z2H
+ VL 80(P2ptr), X0 // Z2L
+ VLR X0, Y0
+ VLR X1, Y1
+ CALL p256SqrInternal<>(SB)
+
+ // X- ; Y=T ; MUL; S1=T // S1 = Z2*T2
+ VLR T0, Y0
+ VLR T1, Y1
+ CALL p256MulInternal<>(SB)
+ VLR T0, S1L
+ VLR T1, S1H
+
+ // X=X1; Y- ; MUL; U1=T // U1 = X1*T2
+ VL 0(P1ptr), X1 // X1H
+ VL 16(P1ptr), X0 // X1L
+ CALL p256MulInternal<>(SB)
+ VLR T0, U1L
+ VLR T1, U1H
+
+ // SUB(H<H-T) // H = H-U1
+ p256SubInternal(HH,HL,HH,HL,T1,T0)
+
+ // if H == 0 or H^P == 0 then ret=1 else ret=0
+ // clobbers T1H and T1L
+ MOVD $0, ISZERO
+ MOVD $1, TRUE
+ VZERO ZER
+ VO HL, HH, T1H
+ VCEQGS ZER, T1H, T1H
+ MOVDEQ TRUE, ISZERO
+ VX HL, PL, T1L
+ VX HH, PH, T1H
+ VO T1L, T1H, T1H
+ VCEQGS ZER, T1H, T1H
+ MOVDEQ TRUE, ISZERO
+ MOVD ISZERO, ret+24(FP)
+
+ // X=Z1; Y=Z2; MUL; T- // Z3 = Z1*Z2
+ VL 64(P1ptr), X1 // Z1H
+ VL 80(P1ptr), X0 // Z1L
+ VL 64(P2ptr), Y1 // Z2H
+ VL 80(P2ptr), Y0 // Z2L
+ CALL p256MulInternal<>(SB)
+
+ // X=T ; Y=H ; MUL; Z3:=T// Z3 = Z3*H
+ VLR T0, X0
+ VLR T1, X1
+ VLR HL, Y0
+ VLR HH, Y1
+ CALL p256MulInternal<>(SB)
+ VST T1, 64(P3ptr)
+ VST T0, 80(P3ptr)
+
+ // X=Y1; Y=S1; MUL; S1=T // S1 = Y1*S1
+ VL 32(P1ptr), X1
+ VL 48(P1ptr), X0
+ VLR S1L, Y0
+ VLR S1H, Y1
+ CALL p256MulInternal<>(SB)
+ VLR T0, S1L
+ VLR T1, S1H
+
+ // X=Y2; Y=R ; MUL; T- // R = Y2*R
+ VL 32(P2ptr), X1
+ VL 48(P2ptr), X0
+ VLR RL, Y0
+ VLR RH, Y1
+ CALL p256MulInternal<>(SB)
+
+ // SUB(R<T-S1) // R = T-S1
+ p256SubInternal(RH,RL,T1,T0,S1H,S1L)
+
+ // if R == 0 or R^P == 0 then ret=ret else ret=0
+ // clobbers T1H and T1L
+ MOVD $0, ISZERO
+ MOVD $1, TRUE
+ VZERO ZER
+ VO RL, RH, T1H
+ VCEQGS ZER, T1H, T1H
+ MOVDEQ TRUE, ISZERO
+ VX RL, PL, T1L
+ VX RH, PH, T1H
+ VO T1L, T1H, T1H
+ VCEQGS ZER, T1H, T1H
+ MOVDEQ TRUE, ISZERO
+ AND ret+24(FP), ISZERO
+ MOVD ISZERO, ret+24(FP)
+
+ // X=H ; Y=H ; MUL; T- // T1 = H*H
+ VLR HL, X0
+ VLR HH, X1
+ VLR HL, Y0
+ VLR HH, Y1
+ CALL p256SqrInternal<>(SB)
+
+ // X- ; Y=T ; MUL; T2=T // T2 = H*T1
+ VLR T0, Y0
+ VLR T1, Y1
+ CALL p256MulInternal<>(SB)
+ VLR T0, T2L
+ VLR T1, T2H
+
+ // X=U1; Y- ; MUL; U1=T // U1 = U1*T1
+ VLR U1L, X0
+ VLR U1H, X1
+ CALL p256MulInternal<>(SB)
+ VLR T0, U1L
+ VLR T1, U1H
+
+ // X=R ; Y=R ; MUL; T- // X3 = R*R
+ VLR RL, X0
+ VLR RH, X1
+ VLR RL, Y0
+ VLR RH, Y1
+ CALL p256SqrInternal<>(SB)
+
+ // SUB(T<T-T2) // X3 = X3-T2
+ p256SubInternal(T1,T0,T1,T0,T2H,T2L)
+
+ // ADD(X<U1+U1) // T1 = 2*U1
+ p256AddInternal(X1,X0,U1H,U1L,U1H,U1L)
+
+ // SUB(T<T-X) X3:=T // X3 = X3-T1 << store-out X3 result reg
+ p256SubInternal(T1,T0,T1,T0,X1,X0)
+ VST T1, 0(P3ptr)
+ VST T0, 16(P3ptr)
+
+ // SUB(Y<U1-T) // Y3 = U1-X3
+ p256SubInternal(Y1,Y0,U1H,U1L,T1,T0)
+
+ // X=R ; Y- ; MUL; U1=T // Y3 = R*Y3
+ VLR RL, X0
+ VLR RH, X1
+ CALL p256MulInternal<>(SB)
+ VLR T0, U1L
+ VLR T1, U1H
+
+ // X=S1; Y=T2; MUL; T- // T2 = S1*T2
+ VLR S1L, X0
+ VLR S1H, X1
+ VLR T2L, Y0
+ VLR T2H, Y1
+ CALL p256MulInternal<>(SB)
+
+ // SUB(T<U1-T); Y3:=T // Y3 = Y3-T2 << store-out Y3 result reg
+ p256SubInternal(T1,T0,U1H,U1L,T1,T0)
+ VST T1, 32(P3ptr)
+ VST T0, 48(P3ptr)
+
+ RET
diff --git a/src/crypto/elliptic/p256_asm_table.bin b/src/crypto/elliptic/p256_asm_table.bin
new file mode 100644
index 0000000..20c527e
--- /dev/null
+++ b/src/crypto/elliptic/p256_asm_table.bin
Binary files differ
diff --git a/src/crypto/elliptic/p256_asm_table_test.go b/src/crypto/elliptic/p256_asm_table_test.go
new file mode 100644
index 0000000..6abd8cb
--- /dev/null
+++ b/src/crypto/elliptic/p256_asm_table_test.go
@@ -0,0 +1,64 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build amd64 || arm64
+
+package elliptic
+
+import (
+ "encoding/binary"
+ "reflect"
+ "testing"
+)
+
+func TestP256PrecomputedTable(t *testing.T) {
+
+ basePoint := []uint64{
+ 0x79e730d418a9143c, 0x75ba95fc5fedb601, 0x79fb732b77622510, 0x18905f76a53755c6,
+ 0xddf25357ce95560a, 0x8b4ab8e4ba19e45c, 0xd2e88688dd21f325, 0x8571ff1825885d85,
+ 0x0000000000000001, 0xffffffff00000000, 0xffffffffffffffff, 0x00000000fffffffe,
+ }
+ t1 := make([]uint64, 12)
+ t2 := make([]uint64, 12)
+ copy(t2, basePoint)
+
+ zInv := make([]uint64, 4)
+ zInvSq := make([]uint64, 4)
+ for j := 0; j < 32; j++ {
+ copy(t1, t2)
+ for i := 0; i < 43; i++ {
+ // The window size is 6 so we need to double 6 times.
+ if i != 0 {
+ for k := 0; k < 6; k++ {
+ p256PointDoubleAsm(t1, t1)
+ }
+ }
+ // Convert the point to affine form. (Its values are
+ // still in Montgomery form however.)
+ p256Inverse(zInv, t1[8:12])
+ p256Sqr(zInvSq, zInv, 1)
+ p256Mul(zInv, zInv, zInvSq)
+
+ p256Mul(t1[:4], t1[:4], zInvSq)
+ p256Mul(t1[4:8], t1[4:8], zInv)
+
+ copy(t1[8:12], basePoint[8:12])
+
+ buf := make([]byte, 8*8)
+ for i, u := range t1[:8] {
+ binary.LittleEndian.PutUint64(buf[i*8:i*8+8], u)
+ }
+ start := i*32*8*8 + j*8*8
+ if got, want := p256Precomputed[start:start+64], string(buf); !reflect.DeepEqual(got, want) {
+ t.Fatalf("Unexpected table entry at [%d][%d:%d]: got %v, want %v", i, j*8, (j*8)+8, got, want)
+ }
+ }
+ if j == 0 {
+ p256PointDoubleAsm(t2, basePoint)
+ } else {
+ p256PointAddAsm(t2, t2, basePoint)
+ }
+ }
+
+}
diff --git a/src/crypto/elliptic/p256_generic.go b/src/crypto/elliptic/p256_generic.go
new file mode 100644
index 0000000..7f8fab5
--- /dev/null
+++ b/src/crypto/elliptic/p256_generic.go
@@ -0,0 +1,14 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !amd64 && !s390x && !arm64 && !ppc64le
+
+package elliptic
+
+var p256 p256Curve
+
+func initP256Arch() {
+ // Use pure Go implementation.
+ p256 = p256Curve{p256Params}
+}
diff --git a/src/crypto/elliptic/p256_ppc64le.go b/src/crypto/elliptic/p256_ppc64le.go
new file mode 100644
index 0000000..e9a6a06
--- /dev/null
+++ b/src/crypto/elliptic/p256_ppc64le.go
@@ -0,0 +1,521 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build ppc64le
+
+package elliptic
+
+import (
+ "crypto/subtle"
+ "encoding/binary"
+ "math/big"
+)
+
+// This was ported from the s390x implementation for ppc64le.
+// Some hints are included here for changes that should be
+// in the big endian ppc64 implementation, however more
+// investigation and testing is needed for the ppc64 big
+// endian version to work.
+type p256CurveFast struct {
+ *CurveParams
+}
+
+type p256Point struct {
+ x [32]byte
+ y [32]byte
+ z [32]byte
+}
+
+var (
+ p256 Curve
+ p256PreFast *[37][64]p256Point
+)
+
+func initP256Arch() {
+ p256 = p256CurveFast{p256Params}
+ initTable()
+ return
+}
+
+func (curve p256CurveFast) Params() *CurveParams {
+ return curve.CurveParams
+}
+
+// Functions implemented in p256_asm_ppc64le.s
+// Montgomery multiplication modulo P256
+//
+//go:noescape
+func p256MulAsm(res, in1, in2 []byte)
+
+// Montgomery square modulo P256
+//
+func p256Sqr(res, in []byte) {
+ p256MulAsm(res, in, in)
+}
+
+// Montgomery multiplication by 1
+//
+//go:noescape
+func p256FromMont(res, in []byte)
+
+// iff cond == 1 val <- -val
+//
+//go:noescape
+func p256NegCond(val *p256Point, cond int)
+
+// if cond == 0 res <- b; else res <- a
+//
+//go:noescape
+func p256MovCond(res, a, b *p256Point, cond int)
+
+// Constant time table access
+//
+//go:noescape
+func p256Select(point *p256Point, table []p256Point, idx int)
+
+//
+//go:noescape
+func p256SelectBase(point *p256Point, table []p256Point, idx int)
+
+// Point add with P2 being affine point
+// If sign == 1 -> P2 = -P2
+// If sel == 0 -> P3 = P1
+// if zero == 0 -> P3 = P2
+//
+//go:noescape
+func p256PointAddAffineAsm(res, in1, in2 *p256Point, sign, sel, zero int)
+
+// Point add
+//
+//go:noescape
+func p256PointAddAsm(res, in1, in2 *p256Point) int
+
+//
+//go:noescape
+func p256PointDoubleAsm(res, in *p256Point)
+
+// The result should be a slice in LE order, but the slice
+// from big.Bytes is in BE order.
+// TODO: For big endian implementation, do not reverse bytes.
+//
+func fromBig(big *big.Int) []byte {
+ // This could be done a lot more efficiently...
+ res := big.Bytes()
+ t := make([]byte, 32)
+ if len(res) < 32 {
+ copy(t[32-len(res):], res)
+ } else if len(res) == 32 {
+ copy(t, res)
+ } else {
+ copy(t, res[len(res)-32:])
+ }
+ p256ReverseBytes(t, t)
+ return t
+}
+
+// p256GetMultiplier makes sure byte array will have 32 byte elements, If the scalar
+// is equal or greater than the order of the group, it's reduced modulo that order.
+func p256GetMultiplier(in []byte) []byte {
+ n := new(big.Int).SetBytes(in)
+
+ if n.Cmp(p256Params.N) >= 0 {
+ n.Mod(n, p256Params.N)
+ }
+ return fromBig(n)
+}
+
+// p256MulAsm operates in a Montgomery domain with R = 2^256 mod p, where p is the
+// underlying field of the curve. (See initP256 for the value.) Thus rr here is
+// R×R mod p. See comment in Inverse about how this is used.
+// TODO: For big endian implementation, the bytes in these slices should be in reverse order,
+// as found in the s390x implementation.
+var rr = []byte{0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0, 0xff, 0xff, 0xff, 0xff, 0xfb, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xff, 0xff, 0xff, 0x04, 0x00, 0x00, 0x00}
+
+// (This is one, in the Montgomery domain.)
+var one = []byte{0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00}
+
+func maybeReduceModP(in *big.Int) *big.Int {
+ if in.Cmp(p256Params.P) < 0 {
+ return in
+ }
+ return new(big.Int).Mod(in, p256Params.P)
+}
+
+// p256ReverseBytes copies the first 32 bytes from in to res in reverse order.
+func p256ReverseBytes(res, in []byte) {
+ // remove bounds check
+ in = in[:32]
+ res = res[:32]
+
+ // Load in reverse order
+ a := binary.BigEndian.Uint64(in[0:])
+ b := binary.BigEndian.Uint64(in[8:])
+ c := binary.BigEndian.Uint64(in[16:])
+ d := binary.BigEndian.Uint64(in[24:])
+
+ // Store in normal order
+ binary.LittleEndian.PutUint64(res[0:], d)
+ binary.LittleEndian.PutUint64(res[8:], c)
+ binary.LittleEndian.PutUint64(res[16:], b)
+ binary.LittleEndian.PutUint64(res[24:], a)
+}
+
+func (curve p256CurveFast) CombinedMult(bigX, bigY *big.Int, baseScalar, scalar []byte) (x, y *big.Int) {
+ var r1, r2 p256Point
+
+ scalarReduced := p256GetMultiplier(baseScalar)
+ r1IsInfinity := scalarIsZero(scalarReduced)
+ r1.p256BaseMult(scalarReduced)
+
+ copy(r2.x[:], fromBig(maybeReduceModP(bigX)))
+ copy(r2.y[:], fromBig(maybeReduceModP(bigY)))
+ copy(r2.z[:], one)
+ p256MulAsm(r2.x[:], r2.x[:], rr[:])
+ p256MulAsm(r2.y[:], r2.y[:], rr[:])
+
+ scalarReduced = p256GetMultiplier(scalar)
+ r2IsInfinity := scalarIsZero(scalarReduced)
+ r2.p256ScalarMult(scalarReduced)
+
+ var sum, double p256Point
+ pointsEqual := p256PointAddAsm(&sum, &r1, &r2)
+ p256PointDoubleAsm(&double, &r1)
+ p256MovCond(&sum, &double, &sum, pointsEqual)
+ p256MovCond(&sum, &r1, &sum, r2IsInfinity)
+ p256MovCond(&sum, &r2, &sum, r1IsInfinity)
+ return sum.p256PointToAffine()
+}
+
+func (curve p256CurveFast) ScalarBaseMult(scalar []byte) (x, y *big.Int) {
+ var r p256Point
+ reducedScalar := p256GetMultiplier(scalar)
+ r.p256BaseMult(reducedScalar)
+ return r.p256PointToAffine()
+}
+
+func (curve p256CurveFast) ScalarMult(bigX, bigY *big.Int, scalar []byte) (x, y *big.Int) {
+ scalarReduced := p256GetMultiplier(scalar)
+ var r p256Point
+ copy(r.x[:], fromBig(maybeReduceModP(bigX)))
+ copy(r.y[:], fromBig(maybeReduceModP(bigY)))
+ copy(r.z[:], one)
+ p256MulAsm(r.x[:], r.x[:], rr[:])
+ p256MulAsm(r.y[:], r.y[:], rr[:])
+ r.p256ScalarMult(scalarReduced)
+ return r.p256PointToAffine()
+}
+
+func scalarIsZero(scalar []byte) int {
+ // If any byte is not zero, return 0.
+ // Check for -0.... since that appears to compare to 0.
+ b := byte(0)
+ for _, s := range scalar {
+ b |= s
+ }
+ return subtle.ConstantTimeByteEq(b, 0)
+}
+
+func (p *p256Point) p256PointToAffine() (x, y *big.Int) {
+ zInv := make([]byte, 32)
+ zInvSq := make([]byte, 32)
+
+ p256Inverse(zInv, p.z[:])
+ p256Sqr(zInvSq, zInv)
+ p256MulAsm(zInv, zInv, zInvSq)
+
+ p256MulAsm(zInvSq, p.x[:], zInvSq)
+ p256MulAsm(zInv, p.y[:], zInv)
+
+ p256FromMont(zInvSq, zInvSq)
+ p256FromMont(zInv, zInv)
+
+ // SetBytes expects a slice in big endian order,
+ // since ppc64le is little endian, reverse the bytes.
+ // TODO: For big endian, bytes don't need to be reversed.
+ p256ReverseBytes(zInvSq, zInvSq)
+ p256ReverseBytes(zInv, zInv)
+ rx := new(big.Int).SetBytes(zInvSq)
+ ry := new(big.Int).SetBytes(zInv)
+ return rx, ry
+}
+
+// p256Inverse sets out to in^-1 mod p.
+func p256Inverse(out, in []byte) {
+ var stack [6 * 32]byte
+ p2 := stack[32*0 : 32*0+32]
+ p4 := stack[32*1 : 32*1+32]
+ p8 := stack[32*2 : 32*2+32]
+ p16 := stack[32*3 : 32*3+32]
+ p32 := stack[32*4 : 32*4+32]
+
+ p256Sqr(out, in)
+ p256MulAsm(p2, out, in) // 3*p
+
+ p256Sqr(out, p2)
+ p256Sqr(out, out)
+ p256MulAsm(p4, out, p2) // f*p
+
+ p256Sqr(out, p4)
+ p256Sqr(out, out)
+ p256Sqr(out, out)
+ p256Sqr(out, out)
+ p256MulAsm(p8, out, p4) // ff*p
+
+ p256Sqr(out, p8)
+
+ for i := 0; i < 7; i++ {
+ p256Sqr(out, out)
+ }
+ p256MulAsm(p16, out, p8) // ffff*p
+
+ p256Sqr(out, p16)
+ for i := 0; i < 15; i++ {
+ p256Sqr(out, out)
+ }
+ p256MulAsm(p32, out, p16) // ffffffff*p
+
+ p256Sqr(out, p32)
+
+ for i := 0; i < 31; i++ {
+ p256Sqr(out, out)
+ }
+ p256MulAsm(out, out, in)
+
+ for i := 0; i < 32*4; i++ {
+ p256Sqr(out, out)
+ }
+ p256MulAsm(out, out, p32)
+
+ for i := 0; i < 32; i++ {
+ p256Sqr(out, out)
+ }
+ p256MulAsm(out, out, p32)
+
+ for i := 0; i < 16; i++ {
+ p256Sqr(out, out)
+ }
+ p256MulAsm(out, out, p16)
+
+ for i := 0; i < 8; i++ {
+ p256Sqr(out, out)
+ }
+ p256MulAsm(out, out, p8)
+
+ p256Sqr(out, out)
+ p256Sqr(out, out)
+ p256Sqr(out, out)
+ p256Sqr(out, out)
+ p256MulAsm(out, out, p4)
+
+ p256Sqr(out, out)
+ p256Sqr(out, out)
+ p256MulAsm(out, out, p2)
+
+ p256Sqr(out, out)
+ p256Sqr(out, out)
+ p256MulAsm(out, out, in)
+}
+
+func boothW5(in uint) (int, int) {
+ var s uint = ^((in >> 5) - 1)
+ var d uint = (1 << 6) - in - 1
+ d = (d & s) | (in & (^s))
+ d = (d >> 1) + (d & 1)
+ return int(d), int(s & 1)
+}
+
+func boothW6(in uint) (int, int) {
+ var s uint = ^((in >> 6) - 1)
+ var d uint = (1 << 7) - in - 1
+ d = (d & s) | (in & (^s))
+ d = (d >> 1) + (d & 1)
+ return int(d), int(s & 1)
+}
+
+func boothW7(in uint) (int, int) {
+ var s uint = ^((in >> 7) - 1)
+ var d uint = (1 << 8) - in - 1
+ d = (d & s) | (in & (^s))
+ d = (d >> 1) + (d & 1)
+ return int(d), int(s & 1)
+}
+
+func initTable() {
+
+ p256PreFast = new([37][64]p256Point)
+
+ // TODO: For big endian, these slices should be in reverse byte order,
+ // as found in the s390x implementation.
+ basePoint := p256Point{
+ x: [32]byte{0x3c, 0x14, 0xa9, 0x18, 0xd4, 0x30, 0xe7, 0x79, 0x01, 0xb6, 0xed, 0x5f, 0xfc, 0x95, 0xba, 0x75,
+ 0x10, 0x25, 0x62, 0x77, 0x2b, 0x73, 0xfb, 0x79, 0xc6, 0x55, 0x37, 0xa5, 0x76, 0x5f, 0x90, 0x18}, //(p256.x*2^256)%p
+ y: [32]byte{0x0a, 0x56, 0x95, 0xce, 0x57, 0x53, 0xf2, 0xdd, 0x5c, 0xe4, 0x19, 0xba, 0xe4, 0xb8, 0x4a, 0x8b,
+ 0x25, 0xf3, 0x21, 0xdd, 0x88, 0x86, 0xe8, 0xd2, 0x85, 0x5d, 0x88, 0x25, 0x18, 0xff, 0x71, 0x85}, //(p256.y*2^256)%p
+ z: [32]byte{0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00}, //(p256.z*2^256)%p
+
+ }
+
+ t1 := new(p256Point)
+ t2 := new(p256Point)
+ *t2 = basePoint
+
+ zInv := make([]byte, 32)
+ zInvSq := make([]byte, 32)
+ for j := 0; j < 64; j++ {
+ *t1 = *t2
+ for i := 0; i < 37; i++ {
+ // The window size is 7 so we need to double 7 times.
+ if i != 0 {
+ for k := 0; k < 7; k++ {
+ p256PointDoubleAsm(t1, t1)
+ }
+ }
+ // Convert the point to affine form. (Its values are
+ // still in Montgomery form however.)
+ p256Inverse(zInv, t1.z[:])
+ p256Sqr(zInvSq, zInv)
+ p256MulAsm(zInv, zInv, zInvSq)
+
+ p256MulAsm(t1.x[:], t1.x[:], zInvSq)
+ p256MulAsm(t1.y[:], t1.y[:], zInv)
+
+ copy(t1.z[:], basePoint.z[:])
+ // Update the table entry
+ copy(p256PreFast[i][j].x[:], t1.x[:])
+ copy(p256PreFast[i][j].y[:], t1.y[:])
+ }
+ if j == 0 {
+ p256PointDoubleAsm(t2, &basePoint)
+ } else {
+ p256PointAddAsm(t2, t2, &basePoint)
+ }
+ }
+}
+
+func (p *p256Point) p256BaseMult(scalar []byte) {
+ // TODO: For big endian, the index should be 31 not 0.
+ wvalue := (uint(scalar[0]) << 1) & 0xff
+ sel, sign := boothW7(uint(wvalue))
+ p256SelectBase(p, p256PreFast[0][:], sel)
+ p256NegCond(p, sign)
+
+ copy(p.z[:], one[:])
+ var t0 p256Point
+
+ copy(t0.z[:], one[:])
+
+ index := uint(6)
+ zero := sel
+ for i := 1; i < 37; i++ {
+ // TODO: For big endian, use the same index values as found
+ // in the s390x implementation.
+ if index < 247 {
+ wvalue = ((uint(scalar[index/8]) >> (index % 8)) + (uint(scalar[index/8+1]) << (8 - (index % 8)))) & 0xff
+ } else {
+ wvalue = (uint(scalar[index/8]) >> (index % 8)) & 0xff
+ }
+ index += 7
+ sel, sign = boothW7(uint(wvalue))
+ p256SelectBase(&t0, p256PreFast[i][:], sel)
+ p256PointAddAffineAsm(p, p, &t0, sign, sel, zero)
+ zero |= sel
+ }
+}
+
+func (p *p256Point) p256ScalarMult(scalar []byte) {
+ // precomp is a table of precomputed points that stores powers of p
+ // from p^1 to p^16.
+ var precomp [16]p256Point
+ var t0, t1, t2, t3 p256Point
+
+ *&precomp[0] = *p
+ p256PointDoubleAsm(&t0, p)
+ p256PointDoubleAsm(&t1, &t0)
+ p256PointDoubleAsm(&t2, &t1)
+ p256PointDoubleAsm(&t3, &t2)
+ *&precomp[1] = t0
+ *&precomp[3] = t1
+ *&precomp[7] = t2
+ *&precomp[15] = t3
+
+ p256PointAddAsm(&t0, &t0, p)
+ p256PointAddAsm(&t1, &t1, p)
+ p256PointAddAsm(&t2, &t2, p)
+
+ *&precomp[2] = t0
+ *&precomp[4] = t1
+ *&precomp[8] = t2
+
+ p256PointDoubleAsm(&t0, &t0)
+ p256PointDoubleAsm(&t1, &t1)
+ *&precomp[5] = t0
+ *&precomp[9] = t1
+
+ p256PointAddAsm(&t2, &t0, p)
+ p256PointAddAsm(&t1, &t1, p)
+ *&precomp[6] = t2
+ *&precomp[10] = t1
+
+ p256PointDoubleAsm(&t0, &t0)
+ p256PointDoubleAsm(&t2, &t2)
+ *&precomp[11] = t0
+ *&precomp[13] = t2
+
+ p256PointAddAsm(&t0, &t0, p)
+ p256PointAddAsm(&t2, &t2, p)
+ *&precomp[12] = t0
+ *&precomp[14] = t2
+
+ // Start scanning the window from top bit
+ index := uint(254)
+ var sel, sign int
+
+ // TODO: For big endian, use index found in s390x implementation.
+ wvalue := (uint(scalar[index/8]) >> (index % 8)) & 0x3f
+ sel, _ = boothW5(uint(wvalue))
+ p256Select(p, precomp[:], sel)
+ zero := sel
+
+ for index > 4 {
+ index -= 5
+ p256PointDoubleAsm(p, p)
+ p256PointDoubleAsm(p, p)
+ p256PointDoubleAsm(p, p)
+ p256PointDoubleAsm(p, p)
+ p256PointDoubleAsm(p, p)
+
+ // TODO: For big endian, use index values as found in s390x implementation.
+ if index < 247 {
+ wvalue = ((uint(scalar[index/8]) >> (index % 8)) + (uint(scalar[index/8+1]) << (8 - (index % 8)))) & 0x3f
+ } else {
+ wvalue = (uint(scalar[index/8]) >> (index % 8)) & 0x3f
+ }
+
+ sel, sign = boothW5(uint(wvalue))
+
+ p256Select(&t0, precomp[:], sel)
+ p256NegCond(&t0, sign)
+ p256PointAddAsm(&t1, p, &t0)
+ p256MovCond(&t1, &t1, p, sel)
+ p256MovCond(p, &t1, &t0, zero)
+ zero |= sel
+ }
+
+ p256PointDoubleAsm(p, p)
+ p256PointDoubleAsm(p, p)
+ p256PointDoubleAsm(p, p)
+ p256PointDoubleAsm(p, p)
+ p256PointDoubleAsm(p, p)
+
+ // TODO: Use index for big endian as found in s390x implementation.
+ wvalue = (uint(scalar[0]) << 1) & 0x3f
+ sel, sign = boothW5(uint(wvalue))
+
+ p256Select(&t0, precomp[:], sel)
+ p256NegCond(&t0, sign)
+ p256PointAddAsm(&t1, p, &t0)
+ p256MovCond(&t1, &t1, p, sel)
+ p256MovCond(p, &t1, &t0, zero)
+}
diff --git a/src/crypto/elliptic/p256_s390x.go b/src/crypto/elliptic/p256_s390x.go
new file mode 100644
index 0000000..735e9f5
--- /dev/null
+++ b/src/crypto/elliptic/p256_s390x.go
@@ -0,0 +1,576 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build s390x
+
+package elliptic
+
+import (
+ "crypto/subtle"
+ "internal/cpu"
+ "math/big"
+ "unsafe"
+)
+
+const (
+ offsetS390xHasVX = unsafe.Offsetof(cpu.S390X.HasVX)
+ offsetS390xHasVE1 = unsafe.Offsetof(cpu.S390X.HasVXE)
+)
+
+type p256CurveFast struct {
+ *CurveParams
+}
+
+type p256Point struct {
+ x [32]byte
+ y [32]byte
+ z [32]byte
+}
+
+var (
+ p256 Curve
+ p256PreFast *[37][64]p256Point
+)
+
+//go:noescape
+func p256MulInternalTrampolineSetup()
+
+//go:noescape
+func p256SqrInternalTrampolineSetup()
+
+//go:noescape
+func p256MulInternalVX()
+
+//go:noescape
+func p256MulInternalVMSL()
+
+//go:noescape
+func p256SqrInternalVX()
+
+//go:noescape
+func p256SqrInternalVMSL()
+
+func initP256Arch() {
+ if cpu.S390X.HasVX {
+ p256 = p256CurveFast{p256Params}
+ initTable()
+ return
+ }
+
+ // No vector support, use pure Go implementation.
+ p256 = p256Curve{p256Params}
+ return
+}
+
+func (curve p256CurveFast) Params() *CurveParams {
+ return curve.CurveParams
+}
+
+// Functions implemented in p256_asm_s390x.s
+// Montgomery multiplication modulo P256
+//
+//go:noescape
+func p256SqrAsm(res, in1 []byte)
+
+//go:noescape
+func p256MulAsm(res, in1, in2 []byte)
+
+// Montgomery square modulo P256
+func p256Sqr(res, in []byte) {
+ p256SqrAsm(res, in)
+}
+
+// Montgomery multiplication by 1
+//
+//go:noescape
+func p256FromMont(res, in []byte)
+
+// iff cond == 1 val <- -val
+//
+//go:noescape
+func p256NegCond(val *p256Point, cond int)
+
+// if cond == 0 res <- b; else res <- a
+//
+//go:noescape
+func p256MovCond(res, a, b *p256Point, cond int)
+
+// Constant time table access
+//
+//go:noescape
+func p256Select(point *p256Point, table []p256Point, idx int)
+
+//go:noescape
+func p256SelectBase(point *p256Point, table []p256Point, idx int)
+
+// Montgomery multiplication modulo Ord(G)
+//
+//go:noescape
+func p256OrdMul(res, in1, in2 []byte)
+
+// Montgomery square modulo Ord(G), repeated n times
+func p256OrdSqr(res, in []byte, n int) {
+ copy(res, in)
+ for i := 0; i < n; i += 1 {
+ p256OrdMul(res, res, res)
+ }
+}
+
+// Point add with P2 being affine point
+// If sign == 1 -> P2 = -P2
+// If sel == 0 -> P3 = P1
+// if zero == 0 -> P3 = P2
+//
+//go:noescape
+func p256PointAddAffineAsm(P3, P1, P2 *p256Point, sign, sel, zero int)
+
+// Point add
+//
+//go:noescape
+func p256PointAddAsm(P3, P1, P2 *p256Point) int
+
+//go:noescape
+func p256PointDoubleAsm(P3, P1 *p256Point)
+
+func (curve p256CurveFast) Inverse(k *big.Int) *big.Int {
+ if k.Cmp(p256Params.N) >= 0 {
+ // This should never happen.
+ reducedK := new(big.Int).Mod(k, p256Params.N)
+ k = reducedK
+ }
+
+ // table will store precomputed powers of x. The 32 bytes at index
+ // i store x^(i+1).
+ var table [15][32]byte
+
+ x := fromBig(k)
+ // This code operates in the Montgomery domain where R = 2^256 mod n
+ // and n is the order of the scalar field. (See initP256 for the
+ // value.) Elements in the Montgomery domain take the form a×R and
+ // multiplication of x and y in the calculates (x × y × R^-1) mod n. RR
+ // is R×R mod n thus the Montgomery multiplication x and RR gives x×R,
+ // i.e. converts x into the Montgomery domain. Stored in BigEndian form
+ RR := []byte{0x66, 0xe1, 0x2d, 0x94, 0xf3, 0xd9, 0x56, 0x20, 0x28, 0x45, 0xb2, 0x39, 0x2b, 0x6b, 0xec, 0x59,
+ 0x46, 0x99, 0x79, 0x9c, 0x49, 0xbd, 0x6f, 0xa6, 0x83, 0x24, 0x4c, 0x95, 0xbe, 0x79, 0xee, 0xa2}
+
+ p256OrdMul(table[0][:], x, RR)
+
+ // Prepare the table, no need in constant time access, because the
+ // power is not a secret. (Entry 0 is never used.)
+ for i := 2; i < 16; i += 2 {
+ p256OrdSqr(table[i-1][:], table[(i/2)-1][:], 1)
+ p256OrdMul(table[i][:], table[i-1][:], table[0][:])
+ }
+
+ copy(x, table[14][:]) // f
+
+ p256OrdSqr(x[0:32], x[0:32], 4)
+ p256OrdMul(x[0:32], x[0:32], table[14][:]) // ff
+ t := make([]byte, 32)
+ copy(t, x)
+
+ p256OrdSqr(x, x, 8)
+ p256OrdMul(x, x, t) // ffff
+ copy(t, x)
+
+ p256OrdSqr(x, x, 16)
+ p256OrdMul(x, x, t) // ffffffff
+ copy(t, x)
+
+ p256OrdSqr(x, x, 64) // ffffffff0000000000000000
+ p256OrdMul(x, x, t) // ffffffff00000000ffffffff
+ p256OrdSqr(x, x, 32) // ffffffff00000000ffffffff00000000
+ p256OrdMul(x, x, t) // ffffffff00000000ffffffffffffffff
+
+ // Remaining 32 windows
+ expLo := [32]byte{0xb, 0xc, 0xe, 0x6, 0xf, 0xa, 0xa, 0xd, 0xa, 0x7, 0x1, 0x7, 0x9, 0xe, 0x8, 0x4,
+ 0xf, 0x3, 0xb, 0x9, 0xc, 0xa, 0xc, 0x2, 0xf, 0xc, 0x6, 0x3, 0x2, 0x5, 0x4, 0xf}
+ for i := 0; i < 32; i++ {
+ p256OrdSqr(x, x, 4)
+ p256OrdMul(x, x, table[expLo[i]-1][:])
+ }
+
+ // Multiplying by one in the Montgomery domain converts a Montgomery
+ // value out of the domain.
+ one := []byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1}
+ p256OrdMul(x, x, one)
+
+ return new(big.Int).SetBytes(x)
+}
+
+// fromBig converts a *big.Int into a format used by this code.
+func fromBig(big *big.Int) []byte {
+ // This could be done a lot more efficiently...
+ res := big.Bytes()
+ if 32 == len(res) {
+ return res
+ }
+ t := make([]byte, 32)
+ offset := 32 - len(res)
+ for i := len(res) - 1; i >= 0; i-- {
+ t[i+offset] = res[i]
+ }
+ return t
+}
+
+// p256GetMultiplier makes sure byte array will have 32 byte elements, If the scalar
+// is equal or greater than the order of the group, it's reduced modulo that order.
+func p256GetMultiplier(in []byte) []byte {
+ n := new(big.Int).SetBytes(in)
+
+ if n.Cmp(p256Params.N) >= 0 {
+ n.Mod(n, p256Params.N)
+ }
+ return fromBig(n)
+}
+
+// p256MulAsm operates in a Montgomery domain with R = 2^256 mod p, where p is the
+// underlying field of the curve. (See initP256 for the value.) Thus rr here is
+// R×R mod p. See comment in Inverse about how this is used.
+var rr = []byte{0x00, 0x00, 0x00, 0x04, 0xff, 0xff, 0xff, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe,
+ 0xff, 0xff, 0xff, 0xfb, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03}
+
+// (This is one, in the Montgomery domain.)
+var one = []byte{0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01}
+
+func maybeReduceModP(in *big.Int) *big.Int {
+ if in.Cmp(p256Params.P) < 0 {
+ return in
+ }
+ return new(big.Int).Mod(in, p256Params.P)
+}
+
+func (curve p256CurveFast) CombinedMult(bigX, bigY *big.Int, baseScalar, scalar []byte) (x, y *big.Int) {
+ var r1, r2 p256Point
+ scalarReduced := p256GetMultiplier(baseScalar)
+ r1IsInfinity := scalarIsZero(scalarReduced)
+ r1.p256BaseMult(scalarReduced)
+
+ copy(r2.x[:], fromBig(maybeReduceModP(bigX)))
+ copy(r2.y[:], fromBig(maybeReduceModP(bigY)))
+ copy(r2.z[:], one)
+ p256MulAsm(r2.x[:], r2.x[:], rr[:])
+ p256MulAsm(r2.y[:], r2.y[:], rr[:])
+
+ scalarReduced = p256GetMultiplier(scalar)
+ r2IsInfinity := scalarIsZero(scalarReduced)
+ r2.p256ScalarMult(p256GetMultiplier(scalar))
+
+ var sum, double p256Point
+ pointsEqual := p256PointAddAsm(&sum, &r1, &r2)
+ p256PointDoubleAsm(&double, &r1)
+ p256MovCond(&sum, &double, &sum, pointsEqual)
+ p256MovCond(&sum, &r1, &sum, r2IsInfinity)
+ p256MovCond(&sum, &r2, &sum, r1IsInfinity)
+ return sum.p256PointToAffine()
+}
+
+func (curve p256CurveFast) ScalarBaseMult(scalar []byte) (x, y *big.Int) {
+ var r p256Point
+ r.p256BaseMult(p256GetMultiplier(scalar))
+ return r.p256PointToAffine()
+}
+
+func (curve p256CurveFast) ScalarMult(bigX, bigY *big.Int, scalar []byte) (x, y *big.Int) {
+ var r p256Point
+ copy(r.x[:], fromBig(maybeReduceModP(bigX)))
+ copy(r.y[:], fromBig(maybeReduceModP(bigY)))
+ copy(r.z[:], one)
+ p256MulAsm(r.x[:], r.x[:], rr[:])
+ p256MulAsm(r.y[:], r.y[:], rr[:])
+ r.p256ScalarMult(p256GetMultiplier(scalar))
+ return r.p256PointToAffine()
+}
+
+// scalarIsZero returns 1 if scalar represents the zero value, and zero
+// otherwise.
+func scalarIsZero(scalar []byte) int {
+ b := byte(0)
+ for _, s := range scalar {
+ b |= s
+ }
+ return subtle.ConstantTimeByteEq(b, 0)
+}
+
+func (p *p256Point) p256PointToAffine() (x, y *big.Int) {
+ zInv := make([]byte, 32)
+ zInvSq := make([]byte, 32)
+
+ p256Inverse(zInv, p.z[:])
+ p256Sqr(zInvSq, zInv)
+ p256MulAsm(zInv, zInv, zInvSq)
+
+ p256MulAsm(zInvSq, p.x[:], zInvSq)
+ p256MulAsm(zInv, p.y[:], zInv)
+
+ p256FromMont(zInvSq, zInvSq)
+ p256FromMont(zInv, zInv)
+
+ return new(big.Int).SetBytes(zInvSq), new(big.Int).SetBytes(zInv)
+}
+
+// p256Inverse sets out to in^-1 mod p.
+func p256Inverse(out, in []byte) {
+ var stack [6 * 32]byte
+ p2 := stack[32*0 : 32*0+32]
+ p4 := stack[32*1 : 32*1+32]
+ p8 := stack[32*2 : 32*2+32]
+ p16 := stack[32*3 : 32*3+32]
+ p32 := stack[32*4 : 32*4+32]
+
+ p256Sqr(out, in)
+ p256MulAsm(p2, out, in) // 3*p
+
+ p256Sqr(out, p2)
+ p256Sqr(out, out)
+ p256MulAsm(p4, out, p2) // f*p
+
+ p256Sqr(out, p4)
+ p256Sqr(out, out)
+ p256Sqr(out, out)
+ p256Sqr(out, out)
+ p256MulAsm(p8, out, p4) // ff*p
+
+ p256Sqr(out, p8)
+
+ for i := 0; i < 7; i++ {
+ p256Sqr(out, out)
+ }
+ p256MulAsm(p16, out, p8) // ffff*p
+
+ p256Sqr(out, p16)
+ for i := 0; i < 15; i++ {
+ p256Sqr(out, out)
+ }
+ p256MulAsm(p32, out, p16) // ffffffff*p
+
+ p256Sqr(out, p32)
+
+ for i := 0; i < 31; i++ {
+ p256Sqr(out, out)
+ }
+ p256MulAsm(out, out, in)
+
+ for i := 0; i < 32*4; i++ {
+ p256Sqr(out, out)
+ }
+ p256MulAsm(out, out, p32)
+
+ for i := 0; i < 32; i++ {
+ p256Sqr(out, out)
+ }
+ p256MulAsm(out, out, p32)
+
+ for i := 0; i < 16; i++ {
+ p256Sqr(out, out)
+ }
+ p256MulAsm(out, out, p16)
+
+ for i := 0; i < 8; i++ {
+ p256Sqr(out, out)
+ }
+ p256MulAsm(out, out, p8)
+
+ p256Sqr(out, out)
+ p256Sqr(out, out)
+ p256Sqr(out, out)
+ p256Sqr(out, out)
+ p256MulAsm(out, out, p4)
+
+ p256Sqr(out, out)
+ p256Sqr(out, out)
+ p256MulAsm(out, out, p2)
+
+ p256Sqr(out, out)
+ p256Sqr(out, out)
+ p256MulAsm(out, out, in)
+}
+
+func boothW5(in uint) (int, int) {
+ var s uint = ^((in >> 5) - 1)
+ var d uint = (1 << 6) - in - 1
+ d = (d & s) | (in & (^s))
+ d = (d >> 1) + (d & 1)
+ return int(d), int(s & 1)
+}
+
+func boothW7(in uint) (int, int) {
+ var s uint = ^((in >> 7) - 1)
+ var d uint = (1 << 8) - in - 1
+ d = (d & s) | (in & (^s))
+ d = (d >> 1) + (d & 1)
+ return int(d), int(s & 1)
+}
+
+func initTable() {
+ p256PreFast = new([37][64]p256Point) //z coordinate not used
+ basePoint := p256Point{
+ x: [32]byte{0x18, 0x90, 0x5f, 0x76, 0xa5, 0x37, 0x55, 0xc6, 0x79, 0xfb, 0x73, 0x2b, 0x77, 0x62, 0x25, 0x10,
+ 0x75, 0xba, 0x95, 0xfc, 0x5f, 0xed, 0xb6, 0x01, 0x79, 0xe7, 0x30, 0xd4, 0x18, 0xa9, 0x14, 0x3c}, //(p256.x*2^256)%p
+ y: [32]byte{0x85, 0x71, 0xff, 0x18, 0x25, 0x88, 0x5d, 0x85, 0xd2, 0xe8, 0x86, 0x88, 0xdd, 0x21, 0xf3, 0x25,
+ 0x8b, 0x4a, 0xb8, 0xe4, 0xba, 0x19, 0xe4, 0x5c, 0xdd, 0xf2, 0x53, 0x57, 0xce, 0x95, 0x56, 0x0a}, //(p256.y*2^256)%p
+ z: [32]byte{0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01}, //(p256.z*2^256)%p
+ }
+
+ t1 := new(p256Point)
+ t2 := new(p256Point)
+ *t2 = basePoint
+
+ zInv := make([]byte, 32)
+ zInvSq := make([]byte, 32)
+ for j := 0; j < 64; j++ {
+ *t1 = *t2
+ for i := 0; i < 37; i++ {
+ // The window size is 7 so we need to double 7 times.
+ if i != 0 {
+ for k := 0; k < 7; k++ {
+ p256PointDoubleAsm(t1, t1)
+ }
+ }
+ // Convert the point to affine form. (Its values are
+ // still in Montgomery form however.)
+ p256Inverse(zInv, t1.z[:])
+ p256Sqr(zInvSq, zInv)
+ p256MulAsm(zInv, zInv, zInvSq)
+
+ p256MulAsm(t1.x[:], t1.x[:], zInvSq)
+ p256MulAsm(t1.y[:], t1.y[:], zInv)
+
+ copy(t1.z[:], basePoint.z[:])
+ // Update the table entry
+ copy(p256PreFast[i][j].x[:], t1.x[:])
+ copy(p256PreFast[i][j].y[:], t1.y[:])
+ }
+ if j == 0 {
+ p256PointDoubleAsm(t2, &basePoint)
+ } else {
+ p256PointAddAsm(t2, t2, &basePoint)
+ }
+ }
+}
+
+func (p *p256Point) p256BaseMult(scalar []byte) {
+ wvalue := (uint(scalar[31]) << 1) & 0xff
+ sel, sign := boothW7(uint(wvalue))
+ p256SelectBase(p, p256PreFast[0][:], sel)
+ p256NegCond(p, sign)
+
+ copy(p.z[:], one[:])
+ var t0 p256Point
+
+ copy(t0.z[:], one[:])
+
+ index := uint(6)
+ zero := sel
+
+ for i := 1; i < 37; i++ {
+ if index < 247 {
+ wvalue = ((uint(scalar[31-index/8]) >> (index % 8)) + (uint(scalar[31-index/8-1]) << (8 - (index % 8)))) & 0xff
+ } else {
+ wvalue = (uint(scalar[31-index/8]) >> (index % 8)) & 0xff
+ }
+ index += 7
+ sel, sign = boothW7(uint(wvalue))
+ p256SelectBase(&t0, p256PreFast[i][:], sel)
+ p256PointAddAffineAsm(p, p, &t0, sign, sel, zero)
+ zero |= sel
+ }
+}
+
+func (p *p256Point) p256ScalarMult(scalar []byte) {
+ // precomp is a table of precomputed points that stores powers of p
+ // from p^1 to p^16.
+ var precomp [16]p256Point
+ var t0, t1, t2, t3 p256Point
+
+ // Prepare the table
+ *&precomp[0] = *p
+
+ p256PointDoubleAsm(&t0, p)
+ p256PointDoubleAsm(&t1, &t0)
+ p256PointDoubleAsm(&t2, &t1)
+ p256PointDoubleAsm(&t3, &t2)
+ *&precomp[1] = t0 // 2
+ *&precomp[3] = t1 // 4
+ *&precomp[7] = t2 // 8
+ *&precomp[15] = t3 // 16
+
+ p256PointAddAsm(&t0, &t0, p)
+ p256PointAddAsm(&t1, &t1, p)
+ p256PointAddAsm(&t2, &t2, p)
+ *&precomp[2] = t0 // 3
+ *&precomp[4] = t1 // 5
+ *&precomp[8] = t2 // 9
+
+ p256PointDoubleAsm(&t0, &t0)
+ p256PointDoubleAsm(&t1, &t1)
+ *&precomp[5] = t0 // 6
+ *&precomp[9] = t1 // 10
+
+ p256PointAddAsm(&t2, &t0, p)
+ p256PointAddAsm(&t1, &t1, p)
+ *&precomp[6] = t2 // 7
+ *&precomp[10] = t1 // 11
+
+ p256PointDoubleAsm(&t0, &t0)
+ p256PointDoubleAsm(&t2, &t2)
+ *&precomp[11] = t0 // 12
+ *&precomp[13] = t2 // 14
+
+ p256PointAddAsm(&t0, &t0, p)
+ p256PointAddAsm(&t2, &t2, p)
+ *&precomp[12] = t0 // 13
+ *&precomp[14] = t2 // 15
+
+ // Start scanning the window from top bit
+ index := uint(254)
+ var sel, sign int
+
+ wvalue := (uint(scalar[31-index/8]) >> (index % 8)) & 0x3f
+ sel, _ = boothW5(uint(wvalue))
+ p256Select(p, precomp[:], sel)
+ zero := sel
+
+ for index > 4 {
+ index -= 5
+ p256PointDoubleAsm(p, p)
+ p256PointDoubleAsm(p, p)
+ p256PointDoubleAsm(p, p)
+ p256PointDoubleAsm(p, p)
+ p256PointDoubleAsm(p, p)
+
+ if index < 247 {
+ wvalue = ((uint(scalar[31-index/8]) >> (index % 8)) + (uint(scalar[31-index/8-1]) << (8 - (index % 8)))) & 0x3f
+ } else {
+ wvalue = (uint(scalar[31-index/8]) >> (index % 8)) & 0x3f
+ }
+
+ sel, sign = boothW5(uint(wvalue))
+
+ p256Select(&t0, precomp[:], sel)
+ p256NegCond(&t0, sign)
+ p256PointAddAsm(&t1, p, &t0)
+ p256MovCond(&t1, &t1, p, sel)
+ p256MovCond(p, &t1, &t0, zero)
+ zero |= sel
+ }
+
+ p256PointDoubleAsm(p, p)
+ p256PointDoubleAsm(p, p)
+ p256PointDoubleAsm(p, p)
+ p256PointDoubleAsm(p, p)
+ p256PointDoubleAsm(p, p)
+
+ wvalue = (uint(scalar[31]) << 1) & 0x3f
+ sel, sign = boothW5(uint(wvalue))
+
+ p256Select(&t0, precomp[:], sel)
+ p256NegCond(&t0, sign)
+ p256PointAddAsm(&t1, p, &t0)
+ p256MovCond(&t1, &t1, p, sel)
+ p256MovCond(p, &t1, &t0, zero)
+}
diff --git a/src/crypto/elliptic/p256_test.go b/src/crypto/elliptic/p256_test.go
new file mode 100644
index 0000000..a607766
--- /dev/null
+++ b/src/crypto/elliptic/p256_test.go
@@ -0,0 +1,152 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package elliptic
+
+import (
+ "math/big"
+ "testing"
+)
+
+type scalarMultTest struct {
+ k string
+ xIn, yIn string
+ xOut, yOut string
+}
+
+var p256MultTests = []scalarMultTest{
+ {
+ "2a265f8bcbdcaf94d58519141e578124cb40d64a501fba9c11847b28965bc737",
+ "023819813ac969847059028ea88a1f30dfbcde03fc791d3a252c6b41211882ea",
+ "f93e4ae433cc12cf2a43fc0ef26400c0e125508224cdb649380f25479148a4ad",
+ "4d4de80f1534850d261075997e3049321a0864082d24a917863366c0724f5ae3",
+ "a22d2b7f7818a3563e0f7a76c9bf0921ac55e06e2e4d11795b233824b1db8cc0",
+ },
+ {
+ "313f72ff9fe811bf573176231b286a3bdb6f1b14e05c40146590727a71c3bccd",
+ "cc11887b2d66cbae8f4d306627192522932146b42f01d3c6f92bd5c8ba739b06",
+ "a2f08a029cd06b46183085bae9248b0ed15b70280c7ef13a457f5af382426031",
+ "831c3f6b5f762d2f461901577af41354ac5f228c2591f84f8a6e51e2e3f17991",
+ "93f90934cd0ef2c698cc471c60a93524e87ab31ca2412252337f364513e43684",
+ },
+}
+
+func TestP256BaseMult(t *testing.T) {
+ p256 := P256()
+ p256Generic := genericParamsForCurve(p256)
+
+ scalars := make([]*big.Int, 0, len(p224BaseMultTests)+1)
+ for _, e := range p224BaseMultTests {
+ k, _ := new(big.Int).SetString(e.k, 10)
+ scalars = append(scalars, k)
+ }
+ k := new(big.Int).SetInt64(1)
+ k.Lsh(k, 500)
+ scalars = append(scalars, k)
+
+ for i, k := range scalars {
+ x, y := p256.ScalarBaseMult(k.Bytes())
+ x2, y2 := p256Generic.ScalarBaseMult(k.Bytes())
+ if x.Cmp(x2) != 0 || y.Cmp(y2) != 0 {
+ t.Errorf("#%d: got (%x, %x), want (%x, %x)", i, x, y, x2, y2)
+ }
+
+ if testing.Short() && i > 5 {
+ break
+ }
+ }
+}
+
+func TestP256Mult(t *testing.T) {
+ p256 := P256()
+ for i, e := range p256MultTests {
+ x, _ := new(big.Int).SetString(e.xIn, 16)
+ y, _ := new(big.Int).SetString(e.yIn, 16)
+ k, _ := new(big.Int).SetString(e.k, 16)
+ expectedX, _ := new(big.Int).SetString(e.xOut, 16)
+ expectedY, _ := new(big.Int).SetString(e.yOut, 16)
+
+ xx, yy := p256.ScalarMult(x, y, k.Bytes())
+ if xx.Cmp(expectedX) != 0 || yy.Cmp(expectedY) != 0 {
+ t.Errorf("#%d: got (%x, %x), want (%x, %x)", i, xx, yy, expectedX, expectedY)
+ }
+ }
+}
+
+type synthCombinedMult struct {
+ Curve
+}
+
+func (s synthCombinedMult) CombinedMult(bigX, bigY *big.Int, baseScalar, scalar []byte) (x, y *big.Int) {
+ x1, y1 := s.ScalarBaseMult(baseScalar)
+ x2, y2 := s.ScalarMult(bigX, bigY, scalar)
+ return s.Add(x1, y1, x2, y2)
+}
+
+func TestP256CombinedMult(t *testing.T) {
+ type combinedMult interface {
+ Curve
+ CombinedMult(bigX, bigY *big.Int, baseScalar, scalar []byte) (x, y *big.Int)
+ }
+
+ p256, ok := P256().(combinedMult)
+ if !ok {
+ p256 = &synthCombinedMult{P256()}
+ }
+
+ gx := p256.Params().Gx
+ gy := p256.Params().Gy
+
+ zero := make([]byte, 32)
+ one := make([]byte, 32)
+ one[31] = 1
+ two := make([]byte, 32)
+ two[31] = 2
+
+ // 0×G + 0×G = ∞
+ x, y := p256.CombinedMult(gx, gy, zero, zero)
+ if x.Sign() != 0 || y.Sign() != 0 {
+ t.Errorf("0×G + 0×G = (%d, %d), should be ∞", x, y)
+ }
+
+ // 1×G + 0×G = G
+ x, y = p256.CombinedMult(gx, gy, one, zero)
+ if x.Cmp(gx) != 0 || y.Cmp(gy) != 0 {
+ t.Errorf("1×G + 0×G = (%d, %d), should be (%d, %d)", x, y, gx, gy)
+ }
+
+ // 0×G + 1×G = G
+ x, y = p256.CombinedMult(gx, gy, zero, one)
+ if x.Cmp(gx) != 0 || y.Cmp(gy) != 0 {
+ t.Errorf("0×G + 1×G = (%d, %d), should be (%d, %d)", x, y, gx, gy)
+ }
+
+ // 1×G + 1×G = 2×G
+ x, y = p256.CombinedMult(gx, gy, one, one)
+ ggx, ggy := p256.ScalarBaseMult(two)
+ if x.Cmp(ggx) != 0 || y.Cmp(ggy) != 0 {
+ t.Errorf("1×G + 1×G = (%d, %d), should be (%d, %d)", x, y, ggx, ggy)
+ }
+
+ minusOne := new(big.Int).Sub(p256.Params().N, big.NewInt(1))
+ // 1×G + (-1)×G = ∞
+ x, y = p256.CombinedMult(gx, gy, one, minusOne.Bytes())
+ if x.Sign() != 0 || y.Sign() != 0 {
+ t.Errorf("1×G + (-1)×G = (%d, %d), should be ∞", x, y)
+ }
+}
+
+func TestIssue52075(t *testing.T) {
+ Gx, Gy := P256().Params().Gx, P256().Params().Gy
+ scalar := make([]byte, 33)
+ scalar[32] = 1
+ x, y := P256().ScalarBaseMult(scalar)
+ if x.Cmp(Gx) != 0 || y.Cmp(Gy) != 0 {
+ t.Errorf("unexpected output (%v,%v)", x, y)
+ }
+ x, y = P256().ScalarMult(Gx, Gy, scalar)
+ if x.Cmp(Gx) != 0 || y.Cmp(Gy) != 0 {
+ t.Errorf("unexpected output (%v,%v)", x, y)
+ }
+}
diff --git a/src/crypto/elliptic/p384.go b/src/crypto/elliptic/p384.go
new file mode 100644
index 0000000..33a441d
--- /dev/null
+++ b/src/crypto/elliptic/p384.go
@@ -0,0 +1,144 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package elliptic
+
+import (
+ "crypto/elliptic/internal/nistec"
+ "crypto/rand"
+ "math/big"
+)
+
+// p384Curve is a Curve implementation based on nistec.P384Point.
+//
+// It's a wrapper that exposes the big.Int-based Curve interface and encodes the
+// legacy idiosyncrasies it requires, such as invalid and infinity point
+// handling.
+//
+// To interact with the nistec package, points are encoded into and decoded from
+// properly formatted byte slices. All big.Int use is limited to this package.
+// Encoding and decoding is 1/1000th of the runtime of a scalar multiplication,
+// so the overhead is acceptable.
+type p384Curve struct {
+ params *CurveParams
+}
+
+var p384 p384Curve
+var _ Curve = p384
+
+func initP384() {
+ p384.params = &CurveParams{
+ Name: "P-384",
+ BitSize: 384,
+ // FIPS 186-4, section D.1.2.4
+ P: bigFromDecimal("394020061963944792122790401001436138050797392704654" +
+ "46667948293404245721771496870329047266088258938001861606973112319"),
+ N: bigFromDecimal("394020061963944792122790401001436138050797392704654" +
+ "46667946905279627659399113263569398956308152294913554433653942643"),
+ B: bigFromHex("b3312fa7e23ee7e4988e056be3f82d19181d9c6efe8141120314088" +
+ "f5013875ac656398d8a2ed19d2a85c8edd3ec2aef"),
+ Gx: bigFromHex("aa87ca22be8b05378eb1c71ef320ad746e1d3b628ba79b9859f741" +
+ "e082542a385502f25dbf55296c3a545e3872760ab7"),
+ Gy: bigFromHex("3617de4a96262c6f5d9e98bf9292dc29f8f41dbd289a147ce9da31" +
+ "13b5f0b8c00a60b1ce1d7e819d7a431d7c90ea0e5f"),
+ }
+}
+
+func (curve p384Curve) Params() *CurveParams {
+ return curve.params
+}
+
+func (curve p384Curve) IsOnCurve(x, y *big.Int) bool {
+ // IsOnCurve is documented to reject (0, 0), the conventional point at
+ // infinity, which however is accepted by p384PointFromAffine.
+ if x.Sign() == 0 && y.Sign() == 0 {
+ return false
+ }
+ _, ok := p384PointFromAffine(x, y)
+ return ok
+}
+
+func p384PointFromAffine(x, y *big.Int) (p *nistec.P384Point, ok bool) {
+ // (0, 0) is by convention the point at infinity, which can't be represented
+ // in affine coordinates. Marshal incorrectly encodes it as an uncompressed
+ // point, which SetBytes would correctly reject. See Issue 37294.
+ if x.Sign() == 0 && y.Sign() == 0 {
+ return nistec.NewP384Point(), true
+ }
+ if x.Sign() < 0 || y.Sign() < 0 {
+ return nil, false
+ }
+ if x.BitLen() > 384 || y.BitLen() > 384 {
+ return nil, false
+ }
+ p, err := nistec.NewP384Point().SetBytes(Marshal(P384(), x, y))
+ if err != nil {
+ return nil, false
+ }
+ return p, true
+}
+
+func p384PointToAffine(p *nistec.P384Point) (x, y *big.Int) {
+ out := p.Bytes()
+ if len(out) == 1 && out[0] == 0 {
+ // This is the correct encoding of the point at infinity, which
+ // Unmarshal does not support. See Issue 37294.
+ return new(big.Int), new(big.Int)
+ }
+ x, y = Unmarshal(P384(), out)
+ if x == nil {
+ panic("crypto/elliptic: internal error: Unmarshal rejected a valid point encoding")
+ }
+ return x, y
+}
+
+// p384RandomPoint returns a random point on the curve. It's used when Add,
+// Double, or ScalarMult are fed a point not on the curve, which is undefined
+// behavior. Originally, we used to do the math on it anyway (which allows
+// invalid curve attacks) and relied on the caller and Unmarshal to avoid this
+// happening in the first place. Now, we just can't construct a nistec.P384Point
+// for an invalid pair of coordinates, because that API is safer. If we panic,
+// we risk introducing a DoS. If we return nil, we risk a panic. If we return
+// the input, ecdsa.Verify might fail open. The safest course seems to be to
+// return a valid, random point, which hopefully won't help the attacker.
+func p384RandomPoint() (x, y *big.Int) {
+ _, x, y, err := GenerateKey(P384(), rand.Reader)
+ if err != nil {
+ panic("crypto/elliptic: failed to generate random point")
+ }
+ return x, y
+}
+
+func (p384Curve) Add(x1, y1, x2, y2 *big.Int) (*big.Int, *big.Int) {
+ p1, ok := p384PointFromAffine(x1, y1)
+ if !ok {
+ return p384RandomPoint()
+ }
+ p2, ok := p384PointFromAffine(x2, y2)
+ if !ok {
+ return p384RandomPoint()
+ }
+ return p384PointToAffine(p1.Add(p1, p2))
+}
+
+func (p384Curve) Double(x1, y1 *big.Int) (*big.Int, *big.Int) {
+ p, ok := p384PointFromAffine(x1, y1)
+ if !ok {
+ return p384RandomPoint()
+ }
+ return p384PointToAffine(p.Double(p))
+}
+
+func (p384Curve) ScalarMult(Bx, By *big.Int, scalar []byte) (*big.Int, *big.Int) {
+ p, ok := p384PointFromAffine(Bx, By)
+ if !ok {
+ return p384RandomPoint()
+ }
+ return p384PointToAffine(p.ScalarMult(p, scalar))
+}
+
+func (p384Curve) ScalarBaseMult(scalar []byte) (*big.Int, *big.Int) {
+ p := nistec.NewP384Generator()
+ return p384PointToAffine(p.ScalarMult(p, scalar))
+}
diff --git a/src/crypto/elliptic/p521.go b/src/crypto/elliptic/p521.go
new file mode 100644
index 0000000..6a3ade3
--- /dev/null
+++ b/src/crypto/elliptic/p521.go
@@ -0,0 +1,165 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package elliptic
+
+import (
+ "crypto/elliptic/internal/nistec"
+ "crypto/rand"
+ "math/big"
+)
+
+// p521Curve is a Curve implementation based on nistec.P521Point.
+//
+// It's a wrapper that exposes the big.Int-based Curve interface and encodes the
+// legacy idiosyncrasies it requires, such as invalid and infinity point
+// handling.
+//
+// To interact with the nistec package, points are encoded into and decoded from
+// properly formatted byte slices. All big.Int use is limited to this package.
+// Encoding and decoding is 1/1000th of the runtime of a scalar multiplication,
+// so the overhead is acceptable.
+type p521Curve struct {
+ params *CurveParams
+}
+
+var p521 p521Curve
+var _ Curve = p521
+
+func initP521() {
+ p521.params = &CurveParams{
+ Name: "P-521",
+ BitSize: 521,
+ // FIPS 186-4, section D.1.2.5
+ P: bigFromDecimal("68647976601306097149819007990813932172694353001433" +
+ "0540939446345918554318339765605212255964066145455497729631139148" +
+ "0858037121987999716643812574028291115057151"),
+ N: bigFromDecimal("68647976601306097149819007990813932172694353001433" +
+ "0540939446345918554318339765539424505774633321719753296399637136" +
+ "3321113864768612440380340372808892707005449"),
+ B: bigFromHex("0051953eb9618e1c9a1f929a21a0b68540eea2da725b99b315f3b8" +
+ "b489918ef109e156193951ec7e937b1652c0bd3bb1bf073573df883d2c34f1ef" +
+ "451fd46b503f00"),
+ Gx: bigFromHex("00c6858e06b70404e9cd9e3ecb662395b4429c648139053fb521f8" +
+ "28af606b4d3dbaa14b5e77efe75928fe1dc127a2ffa8de3348b3c1856a429bf9" +
+ "7e7e31c2e5bd66"),
+ Gy: bigFromHex("011839296a789a3bc0045c8a5fb42c7d1bd998f54449579b446817" +
+ "afbd17273e662c97ee72995ef42640c550b9013fad0761353c7086a272c24088" +
+ "be94769fd16650"),
+ }
+}
+
+func (curve p521Curve) Params() *CurveParams {
+ return curve.params
+}
+
+func (curve p521Curve) IsOnCurve(x, y *big.Int) bool {
+ // IsOnCurve is documented to reject (0, 0), the conventional point at
+ // infinity, which however is accepted by p521PointFromAffine.
+ if x.Sign() == 0 && y.Sign() == 0 {
+ return false
+ }
+ _, ok := p521PointFromAffine(x, y)
+ return ok
+}
+
+func p521PointFromAffine(x, y *big.Int) (p *nistec.P521Point, ok bool) {
+ // (0, 0) is by convention the point at infinity, which can't be represented
+ // in affine coordinates. Marshal incorrectly encodes it as an uncompressed
+ // point, which SetBytes would correctly reject. See Issue 37294.
+ if x.Sign() == 0 && y.Sign() == 0 {
+ return nistec.NewP521Point(), true
+ }
+ if x.Sign() < 0 || y.Sign() < 0 {
+ return nil, false
+ }
+ if x.BitLen() > 521 || y.BitLen() > 521 {
+ return nil, false
+ }
+ p, err := nistec.NewP521Point().SetBytes(Marshal(P521(), x, y))
+ if err != nil {
+ return nil, false
+ }
+ return p, true
+}
+
+func p521PointToAffine(p *nistec.P521Point) (x, y *big.Int) {
+ out := p.Bytes()
+ if len(out) == 1 && out[0] == 0 {
+ // This is the correct encoding of the point at infinity, which
+ // Unmarshal does not support. See Issue 37294.
+ return new(big.Int), new(big.Int)
+ }
+ x, y = Unmarshal(P521(), out)
+ if x == nil {
+ panic("crypto/elliptic: internal error: Unmarshal rejected a valid point encoding")
+ }
+ return x, y
+}
+
+// p521RandomPoint returns a random point on the curve. It's used when Add,
+// Double, or ScalarMult are fed a point not on the curve, which is undefined
+// behavior. Originally, we used to do the math on it anyway (which allows
+// invalid curve attacks) and relied on the caller and Unmarshal to avoid this
+// happening in the first place. Now, we just can't construct a nistec.P521Point
+// for an invalid pair of coordinates, because that API is safer. If we panic,
+// we risk introducing a DoS. If we return nil, we risk a panic. If we return
+// the input, ecdsa.Verify might fail open. The safest course seems to be to
+// return a valid, random point, which hopefully won't help the attacker.
+func p521RandomPoint() (x, y *big.Int) {
+ _, x, y, err := GenerateKey(P521(), rand.Reader)
+ if err != nil {
+ panic("crypto/elliptic: failed to generate random point")
+ }
+ return x, y
+}
+
+func (p521Curve) Add(x1, y1, x2, y2 *big.Int) (*big.Int, *big.Int) {
+ p1, ok := p521PointFromAffine(x1, y1)
+ if !ok {
+ return p521RandomPoint()
+ }
+ p2, ok := p521PointFromAffine(x2, y2)
+ if !ok {
+ return p521RandomPoint()
+ }
+ return p521PointToAffine(p1.Add(p1, p2))
+}
+
+func (p521Curve) Double(x1, y1 *big.Int) (*big.Int, *big.Int) {
+ p, ok := p521PointFromAffine(x1, y1)
+ if !ok {
+ return p521RandomPoint()
+ }
+ return p521PointToAffine(p.Double(p))
+}
+
+func (p521Curve) ScalarMult(Bx, By *big.Int, scalar []byte) (*big.Int, *big.Int) {
+ p, ok := p521PointFromAffine(Bx, By)
+ if !ok {
+ return p521RandomPoint()
+ }
+ return p521PointToAffine(p.ScalarMult(p, scalar))
+}
+
+func (p521Curve) ScalarBaseMult(scalar []byte) (*big.Int, *big.Int) {
+ p := nistec.NewP521Generator()
+ return p521PointToAffine(p.ScalarMult(p, scalar))
+}
+
+func bigFromDecimal(s string) *big.Int {
+ b, ok := new(big.Int).SetString(s, 10)
+ if !ok {
+ panic("invalid encoding")
+ }
+ return b
+}
+
+func bigFromHex(s string) *big.Int {
+ b, ok := new(big.Int).SetString(s, 16)
+ if !ok {
+ panic("invalid encoding")
+ }
+ return b
+}