summaryrefslogtreecommitdiffstats
path: root/src/crypto/internal/edwards25519
diff options
context:
space:
mode:
Diffstat (limited to 'src/crypto/internal/edwards25519')
-rw-r--r--src/crypto/internal/edwards25519/doc.go22
-rw-r--r--src/crypto/internal/edwards25519/edwards25519.go426
-rw-r--r--src/crypto/internal/edwards25519/edwards25519_test.go313
-rw-r--r--src/crypto/internal/edwards25519/field/_asm/fe_amd64_asm.go294
-rw-r--r--src/crypto/internal/edwards25519/field/_asm/go.mod12
-rw-r--r--src/crypto/internal/edwards25519/field/_asm/go.sum32
-rw-r--r--src/crypto/internal/edwards25519/field/fe.go420
-rw-r--r--src/crypto/internal/edwards25519/field/fe_alias_test.go140
-rw-r--r--src/crypto/internal/edwards25519/field/fe_amd64.go15
-rw-r--r--src/crypto/internal/edwards25519/field/fe_amd64.s378
-rw-r--r--src/crypto/internal/edwards25519/field/fe_amd64_noasm.go11
-rw-r--r--src/crypto/internal/edwards25519/field/fe_arm64.go15
-rw-r--r--src/crypto/internal/edwards25519/field/fe_arm64.s42
-rw-r--r--src/crypto/internal/edwards25519/field/fe_arm64_noasm.go11
-rw-r--r--src/crypto/internal/edwards25519/field/fe_bench_test.go49
-rw-r--r--src/crypto/internal/edwards25519/field/fe_generic.go266
-rw-r--r--src/crypto/internal/edwards25519/field/fe_test.go560
-rw-r--r--src/crypto/internal/edwards25519/scalar.go343
-rw-r--r--src/crypto/internal/edwards25519/scalar_alias_test.go108
-rw-r--r--src/crypto/internal/edwards25519/scalar_fiat.go1147
-rw-r--r--src/crypto/internal/edwards25519/scalar_test.go249
-rw-r--r--src/crypto/internal/edwards25519/scalarmult.go214
-rw-r--r--src/crypto/internal/edwards25519/scalarmult_test.go209
-rw-r--r--src/crypto/internal/edwards25519/tables.go129
-rw-r--r--src/crypto/internal/edwards25519/tables_test.go119
25 files changed, 5524 insertions, 0 deletions
diff --git a/src/crypto/internal/edwards25519/doc.go b/src/crypto/internal/edwards25519/doc.go
new file mode 100644
index 0000000..8cba6fe
--- /dev/null
+++ b/src/crypto/internal/edwards25519/doc.go
@@ -0,0 +1,22 @@
+// Copyright (c) 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package edwards25519 implements group logic for the twisted Edwards curve
+//
+// -x^2 + y^2 = 1 + -(121665/121666)*x^2*y^2
+//
+// This is better known as the Edwards curve equivalent to Curve25519, and is
+// the curve used by the Ed25519 signature scheme.
+//
+// Most users don't need this package, and should instead use crypto/ed25519 for
+// signatures, golang.org/x/crypto/curve25519 for Diffie-Hellman, or
+// github.com/gtank/ristretto255 for prime order group logic.
+//
+// However, developers who do need to interact with low-level edwards25519
+// operations can use filippo.io/edwards25519, an extended version of this
+// package repackaged as an importable module.
+//
+// (Note that filippo.io/edwards25519 and github.com/gtank/ristretto255 are not
+// maintained by the Go team and are not covered by the Go 1 Compatibility Promise.)
+package edwards25519
diff --git a/src/crypto/internal/edwards25519/edwards25519.go b/src/crypto/internal/edwards25519/edwards25519.go
new file mode 100644
index 0000000..71e9c09
--- /dev/null
+++ b/src/crypto/internal/edwards25519/edwards25519.go
@@ -0,0 +1,426 @@
+// Copyright (c) 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package edwards25519
+
+import (
+ "crypto/internal/edwards25519/field"
+ "errors"
+)
+
+// Point types.
+
+type projP1xP1 struct {
+ X, Y, Z, T field.Element
+}
+
+type projP2 struct {
+ X, Y, Z field.Element
+}
+
+// Point represents a point on the edwards25519 curve.
+//
+// This type works similarly to math/big.Int, and all arguments and receivers
+// are allowed to alias.
+//
+// The zero value is NOT valid, and it may be used only as a receiver.
+type Point struct {
+ // The point is internally represented in extended coordinates (X, Y, Z, T)
+ // where x = X/Z, y = Y/Z, and xy = T/Z per https://eprint.iacr.org/2008/522.
+ x, y, z, t field.Element
+
+ // Make the type not comparable (i.e. used with == or as a map key), as
+ // equivalent points can be represented by different Go values.
+ _ incomparable
+}
+
+type incomparable [0]func()
+
+func checkInitialized(points ...*Point) {
+ for _, p := range points {
+ if p.x == (field.Element{}) && p.y == (field.Element{}) {
+ panic("edwards25519: use of uninitialized Point")
+ }
+ }
+}
+
+type projCached struct {
+ YplusX, YminusX, Z, T2d field.Element
+}
+
+type affineCached struct {
+ YplusX, YminusX, T2d field.Element
+}
+
+// Constructors.
+
+func (v *projP2) Zero() *projP2 {
+ v.X.Zero()
+ v.Y.One()
+ v.Z.One()
+ return v
+}
+
+// identity is the point at infinity.
+var identity, _ = new(Point).SetBytes([]byte{
+ 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0})
+
+// NewIdentityPoint returns a new Point set to the identity.
+func NewIdentityPoint() *Point {
+ return new(Point).Set(identity)
+}
+
+// generator is the canonical curve basepoint. See TestGenerator for the
+// correspondence of this encoding with the values in RFC 8032.
+var generator, _ = new(Point).SetBytes([]byte{
+ 0x58, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66,
+ 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66,
+ 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66,
+ 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66})
+
+// NewGeneratorPoint returns a new Point set to the canonical generator.
+func NewGeneratorPoint() *Point {
+ return new(Point).Set(generator)
+}
+
+func (v *projCached) Zero() *projCached {
+ v.YplusX.One()
+ v.YminusX.One()
+ v.Z.One()
+ v.T2d.Zero()
+ return v
+}
+
+func (v *affineCached) Zero() *affineCached {
+ v.YplusX.One()
+ v.YminusX.One()
+ v.T2d.Zero()
+ return v
+}
+
+// Assignments.
+
+// Set sets v = u, and returns v.
+func (v *Point) Set(u *Point) *Point {
+ *v = *u
+ return v
+}
+
+// Encoding.
+
+// Bytes returns the canonical 32-byte encoding of v, according to RFC 8032,
+// Section 5.1.2.
+func (v *Point) Bytes() []byte {
+ // This function is outlined to make the allocations inline in the caller
+ // rather than happen on the heap.
+ var buf [32]byte
+ return v.bytes(&buf)
+}
+
+func (v *Point) bytes(buf *[32]byte) []byte {
+ checkInitialized(v)
+
+ var zInv, x, y field.Element
+ zInv.Invert(&v.z) // zInv = 1 / Z
+ x.Multiply(&v.x, &zInv) // x = X / Z
+ y.Multiply(&v.y, &zInv) // y = Y / Z
+
+ out := copyFieldElement(buf, &y)
+ out[31] |= byte(x.IsNegative() << 7)
+ return out
+}
+
+var feOne = new(field.Element).One()
+
+// SetBytes sets v = x, where x is a 32-byte encoding of v. If x does not
+// represent a valid point on the curve, SetBytes returns nil and an error and
+// the receiver is unchanged. Otherwise, SetBytes returns v.
+//
+// Note that SetBytes accepts all non-canonical encodings of valid points.
+// That is, it follows decoding rules that match most implementations in
+// the ecosystem rather than RFC 8032.
+func (v *Point) SetBytes(x []byte) (*Point, error) {
+ // Specifically, the non-canonical encodings that are accepted are
+ // 1) the ones where the field element is not reduced (see the
+ // (*field.Element).SetBytes docs) and
+ // 2) the ones where the x-coordinate is zero and the sign bit is set.
+ //
+ // Read more at https://hdevalence.ca/blog/2020-10-04-its-25519am,
+ // specifically the "Canonical A, R" section.
+
+ y, err := new(field.Element).SetBytes(x)
+ if err != nil {
+ return nil, errors.New("edwards25519: invalid point encoding length")
+ }
+
+ // -x² + y² = 1 + dx²y²
+ // x² + dx²y² = x²(dy² + 1) = y² - 1
+ // x² = (y² - 1) / (dy² + 1)
+
+ // u = y² - 1
+ y2 := new(field.Element).Square(y)
+ u := new(field.Element).Subtract(y2, feOne)
+
+ // v = dy² + 1
+ vv := new(field.Element).Multiply(y2, d)
+ vv = vv.Add(vv, feOne)
+
+ // x = +√(u/v)
+ xx, wasSquare := new(field.Element).SqrtRatio(u, vv)
+ if wasSquare == 0 {
+ return nil, errors.New("edwards25519: invalid point encoding")
+ }
+
+ // Select the negative square root if the sign bit is set.
+ xxNeg := new(field.Element).Negate(xx)
+ xx = xx.Select(xxNeg, xx, int(x[31]>>7))
+
+ v.x.Set(xx)
+ v.y.Set(y)
+ v.z.One()
+ v.t.Multiply(xx, y) // xy = T / Z
+
+ return v, nil
+}
+
+func copyFieldElement(buf *[32]byte, v *field.Element) []byte {
+ copy(buf[:], v.Bytes())
+ return buf[:]
+}
+
+// Conversions.
+
+func (v *projP2) FromP1xP1(p *projP1xP1) *projP2 {
+ v.X.Multiply(&p.X, &p.T)
+ v.Y.Multiply(&p.Y, &p.Z)
+ v.Z.Multiply(&p.Z, &p.T)
+ return v
+}
+
+func (v *projP2) FromP3(p *Point) *projP2 {
+ v.X.Set(&p.x)
+ v.Y.Set(&p.y)
+ v.Z.Set(&p.z)
+ return v
+}
+
+func (v *Point) fromP1xP1(p *projP1xP1) *Point {
+ v.x.Multiply(&p.X, &p.T)
+ v.y.Multiply(&p.Y, &p.Z)
+ v.z.Multiply(&p.Z, &p.T)
+ v.t.Multiply(&p.X, &p.Y)
+ return v
+}
+
+func (v *Point) fromP2(p *projP2) *Point {
+ v.x.Multiply(&p.X, &p.Z)
+ v.y.Multiply(&p.Y, &p.Z)
+ v.z.Square(&p.Z)
+ v.t.Multiply(&p.X, &p.Y)
+ return v
+}
+
+// d is a constant in the curve equation.
+var d, _ = new(field.Element).SetBytes([]byte{
+ 0xa3, 0x78, 0x59, 0x13, 0xca, 0x4d, 0xeb, 0x75,
+ 0xab, 0xd8, 0x41, 0x41, 0x4d, 0x0a, 0x70, 0x00,
+ 0x98, 0xe8, 0x79, 0x77, 0x79, 0x40, 0xc7, 0x8c,
+ 0x73, 0xfe, 0x6f, 0x2b, 0xee, 0x6c, 0x03, 0x52})
+var d2 = new(field.Element).Add(d, d)
+
+func (v *projCached) FromP3(p *Point) *projCached {
+ v.YplusX.Add(&p.y, &p.x)
+ v.YminusX.Subtract(&p.y, &p.x)
+ v.Z.Set(&p.z)
+ v.T2d.Multiply(&p.t, d2)
+ return v
+}
+
+func (v *affineCached) FromP3(p *Point) *affineCached {
+ v.YplusX.Add(&p.y, &p.x)
+ v.YminusX.Subtract(&p.y, &p.x)
+ v.T2d.Multiply(&p.t, d2)
+
+ var invZ field.Element
+ invZ.Invert(&p.z)
+ v.YplusX.Multiply(&v.YplusX, &invZ)
+ v.YminusX.Multiply(&v.YminusX, &invZ)
+ v.T2d.Multiply(&v.T2d, &invZ)
+ return v
+}
+
+// (Re)addition and subtraction.
+
+// Add sets v = p + q, and returns v.
+func (v *Point) Add(p, q *Point) *Point {
+ checkInitialized(p, q)
+ qCached := new(projCached).FromP3(q)
+ result := new(projP1xP1).Add(p, qCached)
+ return v.fromP1xP1(result)
+}
+
+// Subtract sets v = p - q, and returns v.
+func (v *Point) Subtract(p, q *Point) *Point {
+ checkInitialized(p, q)
+ qCached := new(projCached).FromP3(q)
+ result := new(projP1xP1).Sub(p, qCached)
+ return v.fromP1xP1(result)
+}
+
+func (v *projP1xP1) Add(p *Point, q *projCached) *projP1xP1 {
+ var YplusX, YminusX, PP, MM, TT2d, ZZ2 field.Element
+
+ YplusX.Add(&p.y, &p.x)
+ YminusX.Subtract(&p.y, &p.x)
+
+ PP.Multiply(&YplusX, &q.YplusX)
+ MM.Multiply(&YminusX, &q.YminusX)
+ TT2d.Multiply(&p.t, &q.T2d)
+ ZZ2.Multiply(&p.z, &q.Z)
+
+ ZZ2.Add(&ZZ2, &ZZ2)
+
+ v.X.Subtract(&PP, &MM)
+ v.Y.Add(&PP, &MM)
+ v.Z.Add(&ZZ2, &TT2d)
+ v.T.Subtract(&ZZ2, &TT2d)
+ return v
+}
+
+func (v *projP1xP1) Sub(p *Point, q *projCached) *projP1xP1 {
+ var YplusX, YminusX, PP, MM, TT2d, ZZ2 field.Element
+
+ YplusX.Add(&p.y, &p.x)
+ YminusX.Subtract(&p.y, &p.x)
+
+ PP.Multiply(&YplusX, &q.YminusX) // flipped sign
+ MM.Multiply(&YminusX, &q.YplusX) // flipped sign
+ TT2d.Multiply(&p.t, &q.T2d)
+ ZZ2.Multiply(&p.z, &q.Z)
+
+ ZZ2.Add(&ZZ2, &ZZ2)
+
+ v.X.Subtract(&PP, &MM)
+ v.Y.Add(&PP, &MM)
+ v.Z.Subtract(&ZZ2, &TT2d) // flipped sign
+ v.T.Add(&ZZ2, &TT2d) // flipped sign
+ return v
+}
+
+func (v *projP1xP1) AddAffine(p *Point, q *affineCached) *projP1xP1 {
+ var YplusX, YminusX, PP, MM, TT2d, Z2 field.Element
+
+ YplusX.Add(&p.y, &p.x)
+ YminusX.Subtract(&p.y, &p.x)
+
+ PP.Multiply(&YplusX, &q.YplusX)
+ MM.Multiply(&YminusX, &q.YminusX)
+ TT2d.Multiply(&p.t, &q.T2d)
+
+ Z2.Add(&p.z, &p.z)
+
+ v.X.Subtract(&PP, &MM)
+ v.Y.Add(&PP, &MM)
+ v.Z.Add(&Z2, &TT2d)
+ v.T.Subtract(&Z2, &TT2d)
+ return v
+}
+
+func (v *projP1xP1) SubAffine(p *Point, q *affineCached) *projP1xP1 {
+ var YplusX, YminusX, PP, MM, TT2d, Z2 field.Element
+
+ YplusX.Add(&p.y, &p.x)
+ YminusX.Subtract(&p.y, &p.x)
+
+ PP.Multiply(&YplusX, &q.YminusX) // flipped sign
+ MM.Multiply(&YminusX, &q.YplusX) // flipped sign
+ TT2d.Multiply(&p.t, &q.T2d)
+
+ Z2.Add(&p.z, &p.z)
+
+ v.X.Subtract(&PP, &MM)
+ v.Y.Add(&PP, &MM)
+ v.Z.Subtract(&Z2, &TT2d) // flipped sign
+ v.T.Add(&Z2, &TT2d) // flipped sign
+ return v
+}
+
+// Doubling.
+
+func (v *projP1xP1) Double(p *projP2) *projP1xP1 {
+ var XX, YY, ZZ2, XplusYsq field.Element
+
+ XX.Square(&p.X)
+ YY.Square(&p.Y)
+ ZZ2.Square(&p.Z)
+ ZZ2.Add(&ZZ2, &ZZ2)
+ XplusYsq.Add(&p.X, &p.Y)
+ XplusYsq.Square(&XplusYsq)
+
+ v.Y.Add(&YY, &XX)
+ v.Z.Subtract(&YY, &XX)
+
+ v.X.Subtract(&XplusYsq, &v.Y)
+ v.T.Subtract(&ZZ2, &v.Z)
+ return v
+}
+
+// Negation.
+
+// Negate sets v = -p, and returns v.
+func (v *Point) Negate(p *Point) *Point {
+ checkInitialized(p)
+ v.x.Negate(&p.x)
+ v.y.Set(&p.y)
+ v.z.Set(&p.z)
+ v.t.Negate(&p.t)
+ return v
+}
+
+// Equal returns 1 if v is equivalent to u, and 0 otherwise.
+func (v *Point) Equal(u *Point) int {
+ checkInitialized(v, u)
+
+ var t1, t2, t3, t4 field.Element
+ t1.Multiply(&v.x, &u.z)
+ t2.Multiply(&u.x, &v.z)
+ t3.Multiply(&v.y, &u.z)
+ t4.Multiply(&u.y, &v.z)
+
+ return t1.Equal(&t2) & t3.Equal(&t4)
+}
+
+// Constant-time operations
+
+// Select sets v to a if cond == 1 and to b if cond == 0.
+func (v *projCached) Select(a, b *projCached, cond int) *projCached {
+ v.YplusX.Select(&a.YplusX, &b.YplusX, cond)
+ v.YminusX.Select(&a.YminusX, &b.YminusX, cond)
+ v.Z.Select(&a.Z, &b.Z, cond)
+ v.T2d.Select(&a.T2d, &b.T2d, cond)
+ return v
+}
+
+// Select sets v to a if cond == 1 and to b if cond == 0.
+func (v *affineCached) Select(a, b *affineCached, cond int) *affineCached {
+ v.YplusX.Select(&a.YplusX, &b.YplusX, cond)
+ v.YminusX.Select(&a.YminusX, &b.YminusX, cond)
+ v.T2d.Select(&a.T2d, &b.T2d, cond)
+ return v
+}
+
+// CondNeg negates v if cond == 1 and leaves it unchanged if cond == 0.
+func (v *projCached) CondNeg(cond int) *projCached {
+ v.YplusX.Swap(&v.YminusX, cond)
+ v.T2d.Select(new(field.Element).Negate(&v.T2d), &v.T2d, cond)
+ return v
+}
+
+// CondNeg negates v if cond == 1 and leaves it unchanged if cond == 0.
+func (v *affineCached) CondNeg(cond int) *affineCached {
+ v.YplusX.Swap(&v.YminusX, cond)
+ v.T2d.Select(new(field.Element).Negate(&v.T2d), &v.T2d, cond)
+ return v
+}
diff --git a/src/crypto/internal/edwards25519/edwards25519_test.go b/src/crypto/internal/edwards25519/edwards25519_test.go
new file mode 100644
index 0000000..307ae26
--- /dev/null
+++ b/src/crypto/internal/edwards25519/edwards25519_test.go
@@ -0,0 +1,313 @@
+// Copyright (c) 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package edwards25519
+
+import (
+ "crypto/internal/edwards25519/field"
+ "encoding/hex"
+ "internal/testenv"
+ "reflect"
+ "testing"
+)
+
+var B = NewGeneratorPoint()
+var I = NewIdentityPoint()
+
+func checkOnCurve(t *testing.T, points ...*Point) {
+ t.Helper()
+ for i, p := range points {
+ var XX, YY, ZZ, ZZZZ field.Element
+ XX.Square(&p.x)
+ YY.Square(&p.y)
+ ZZ.Square(&p.z)
+ ZZZZ.Square(&ZZ)
+ // -x² + y² = 1 + dx²y²
+ // -(X/Z)² + (Y/Z)² = 1 + d(X/Z)²(Y/Z)²
+ // (-X² + Y²)/Z² = 1 + (dX²Y²)/Z⁴
+ // (-X² + Y²)*Z² = Z⁴ + dX²Y²
+ var lhs, rhs field.Element
+ lhs.Subtract(&YY, &XX).Multiply(&lhs, &ZZ)
+ rhs.Multiply(d, &XX).Multiply(&rhs, &YY).Add(&rhs, &ZZZZ)
+ if lhs.Equal(&rhs) != 1 {
+ t.Errorf("X, Y, and Z do not specify a point on the curve\nX = %v\nY = %v\nZ = %v", p.x, p.y, p.z)
+ }
+ // xy = T/Z
+ lhs.Multiply(&p.x, &p.y)
+ rhs.Multiply(&p.z, &p.t)
+ if lhs.Equal(&rhs) != 1 {
+ t.Errorf("point %d is not valid\nX = %v\nY = %v\nZ = %v", i, p.x, p.y, p.z)
+ }
+ }
+}
+
+func TestGenerator(t *testing.T) {
+ // These are the coordinates of B from RFC 8032, Section 5.1, converted to
+ // little endian hex.
+ x := "1ad5258f602d56c9b2a7259560c72c695cdcd6fd31e2a4c0fe536ecdd3366921"
+ y := "5866666666666666666666666666666666666666666666666666666666666666"
+ if got := hex.EncodeToString(B.x.Bytes()); got != x {
+ t.Errorf("wrong B.x: got %s, expected %s", got, x)
+ }
+ if got := hex.EncodeToString(B.y.Bytes()); got != y {
+ t.Errorf("wrong B.y: got %s, expected %s", got, y)
+ }
+ if B.z.Equal(feOne) != 1 {
+ t.Errorf("wrong B.z: got %v, expected 1", B.z)
+ }
+ // Check that t is correct.
+ checkOnCurve(t, B)
+}
+
+func TestAddSubNegOnBasePoint(t *testing.T) {
+ checkLhs, checkRhs := &Point{}, &Point{}
+
+ checkLhs.Add(B, B)
+ tmpP2 := new(projP2).FromP3(B)
+ tmpP1xP1 := new(projP1xP1).Double(tmpP2)
+ checkRhs.fromP1xP1(tmpP1xP1)
+ if checkLhs.Equal(checkRhs) != 1 {
+ t.Error("B + B != [2]B")
+ }
+ checkOnCurve(t, checkLhs, checkRhs)
+
+ checkLhs.Subtract(B, B)
+ Bneg := new(Point).Negate(B)
+ checkRhs.Add(B, Bneg)
+ if checkLhs.Equal(checkRhs) != 1 {
+ t.Error("B - B != B + (-B)")
+ }
+ if I.Equal(checkLhs) != 1 {
+ t.Error("B - B != 0")
+ }
+ if I.Equal(checkRhs) != 1 {
+ t.Error("B + (-B) != 0")
+ }
+ checkOnCurve(t, checkLhs, checkRhs, Bneg)
+}
+
+func TestComparable(t *testing.T) {
+ if reflect.TypeOf(Point{}).Comparable() {
+ t.Error("Point is unexpectedly comparable")
+ }
+}
+
+func TestInvalidEncodings(t *testing.T) {
+ // An invalid point, that also happens to have y > p.
+ invalid := "efffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff7f"
+ p := NewGeneratorPoint()
+ if out, err := p.SetBytes(decodeHex(invalid)); err == nil {
+ t.Error("expected error for invalid point")
+ } else if out != nil {
+ t.Error("SetBytes did not return nil on an invalid encoding")
+ } else if p.Equal(B) != 1 {
+ t.Error("the Point was modified while decoding an invalid encoding")
+ }
+ checkOnCurve(t, p)
+}
+
+func TestNonCanonicalPoints(t *testing.T) {
+ type test struct {
+ name string
+ encoding, canonical string
+ }
+ tests := []test{
+ // Points with x = 0 and the sign bit set. With x = 0 the curve equation
+ // gives y² = 1, so y = ±1. 1 has two valid encodings.
+ {
+ "y=1,sign-",
+ "0100000000000000000000000000000000000000000000000000000000000080",
+ "0100000000000000000000000000000000000000000000000000000000000000",
+ },
+ {
+ "y=p+1,sign-",
+ "eeffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff",
+ "0100000000000000000000000000000000000000000000000000000000000000",
+ },
+ {
+ "y=p-1,sign-",
+ "ecffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff",
+ "ecffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff7f",
+ },
+
+ // Non-canonical y encodings with values 2²⁵⁵-19 (p) to 2²⁵⁵-1 (p+18).
+ {
+ "y=p,sign+",
+ "edffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff7f",
+ "0000000000000000000000000000000000000000000000000000000000000000",
+ },
+ {
+ "y=p,sign-",
+ "edffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff",
+ "0000000000000000000000000000000000000000000000000000000000000080",
+ },
+ {
+ "y=p+1,sign+",
+ "eeffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff7f",
+ "0100000000000000000000000000000000000000000000000000000000000000",
+ },
+ // "y=p+1,sign-" is already tested above.
+ // p+2 is not a valid y-coordinate.
+ {
+ "y=p+3,sign+",
+ "f0ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff7f",
+ "0300000000000000000000000000000000000000000000000000000000000000",
+ },
+ {
+ "y=p+3,sign-",
+ "f0ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff",
+ "0300000000000000000000000000000000000000000000000000000000000080",
+ },
+ {
+ "y=p+4,sign+",
+ "f1ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff7f",
+ "0400000000000000000000000000000000000000000000000000000000000000",
+ },
+ {
+ "y=p+4,sign-",
+ "f1ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff",
+ "0400000000000000000000000000000000000000000000000000000000000080",
+ },
+ {
+ "y=p+5,sign+",
+ "f2ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff7f",
+ "0500000000000000000000000000000000000000000000000000000000000000",
+ },
+ {
+ "y=p+5,sign-",
+ "f2ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff",
+ "0500000000000000000000000000000000000000000000000000000000000080",
+ },
+ {
+ "y=p+6,sign+",
+ "f3ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff7f",
+ "0600000000000000000000000000000000000000000000000000000000000000",
+ },
+ {
+ "y=p+6,sign-",
+ "f3ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff",
+ "0600000000000000000000000000000000000000000000000000000000000080",
+ },
+ // p+7 is not a valid y-coordinate.
+ // p+8 is not a valid y-coordinate.
+ {
+ "y=p+9,sign+",
+ "f6ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff7f",
+ "0900000000000000000000000000000000000000000000000000000000000000",
+ },
+ {
+ "y=p+9,sign-",
+ "f6ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff",
+ "0900000000000000000000000000000000000000000000000000000000000080",
+ },
+ {
+ "y=p+10,sign+",
+ "f7ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff7f",
+ "0a00000000000000000000000000000000000000000000000000000000000000",
+ },
+ {
+ "y=p+10,sign-",
+ "f7ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff",
+ "0a00000000000000000000000000000000000000000000000000000000000080",
+ },
+ // p+11 is not a valid y-coordinate.
+ // p+12 is not a valid y-coordinate.
+ // p+13 is not a valid y-coordinate.
+ {
+ "y=p+14,sign+",
+ "fbffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff7f",
+ "0e00000000000000000000000000000000000000000000000000000000000000",
+ },
+ {
+ "y=p+14,sign-",
+ "fbffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff",
+ "0e00000000000000000000000000000000000000000000000000000000000080",
+ },
+ {
+ "y=p+15,sign+",
+ "fcffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff7f",
+ "0f00000000000000000000000000000000000000000000000000000000000000",
+ },
+ {
+ "y=p+15,sign-",
+ "fcffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff",
+ "0f00000000000000000000000000000000000000000000000000000000000080",
+ },
+ {
+ "y=p+16,sign+",
+ "fdffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff7f",
+ "1000000000000000000000000000000000000000000000000000000000000000",
+ },
+ {
+ "y=p+16,sign-",
+ "fdffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff",
+ "1000000000000000000000000000000000000000000000000000000000000080",
+ },
+ // p+17 is not a valid y-coordinate.
+ {
+ "y=p+18,sign+",
+ "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff7f",
+ "1200000000000000000000000000000000000000000000000000000000000000",
+ },
+ {
+ "y=p+18,sign-",
+ "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff",
+ "1200000000000000000000000000000000000000000000000000000000000080",
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ p1, err := new(Point).SetBytes(decodeHex(tt.encoding))
+ if err != nil {
+ t.Fatalf("error decoding non-canonical point: %v", err)
+ }
+ p2, err := new(Point).SetBytes(decodeHex(tt.canonical))
+ if err != nil {
+ t.Fatalf("error decoding canonical point: %v", err)
+ }
+ if p1.Equal(p2) != 1 {
+ t.Errorf("equivalent points are not equal: %v, %v", p1, p2)
+ }
+ if encoding := hex.EncodeToString(p1.Bytes()); encoding != tt.canonical {
+ t.Errorf("re-encoding does not match canonical; got %q, expected %q", encoding, tt.canonical)
+ }
+ checkOnCurve(t, p1, p2)
+ })
+ }
+}
+
+var testAllocationsSink byte
+
+func TestAllocations(t *testing.T) {
+ testenv.SkipIfOptimizationOff(t)
+
+ if allocs := testing.AllocsPerRun(100, func() {
+ p := NewIdentityPoint()
+ p.Add(p, NewGeneratorPoint())
+ s := NewScalar()
+ testAllocationsSink ^= s.Bytes()[0]
+ testAllocationsSink ^= p.Bytes()[0]
+ }); allocs > 0 {
+ t.Errorf("expected zero allocations, got %0.1v", allocs)
+ }
+}
+
+func decodeHex(s string) []byte {
+ b, err := hex.DecodeString(s)
+ if err != nil {
+ panic(err)
+ }
+ return b
+}
+
+func BenchmarkEncodingDecoding(b *testing.B) {
+ p := new(Point).Set(dalekScalarBasepoint)
+ for i := 0; i < b.N; i++ {
+ buf := p.Bytes()
+ _, err := p.SetBytes(buf)
+ if err != nil {
+ b.Fatal(err)
+ }
+ }
+}
diff --git a/src/crypto/internal/edwards25519/field/_asm/fe_amd64_asm.go b/src/crypto/internal/edwards25519/field/_asm/fe_amd64_asm.go
new file mode 100644
index 0000000..411399c
--- /dev/null
+++ b/src/crypto/internal/edwards25519/field/_asm/fe_amd64_asm.go
@@ -0,0 +1,294 @@
+// Copyright (c) 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+ "fmt"
+
+ . "github.com/mmcloughlin/avo/build"
+ . "github.com/mmcloughlin/avo/gotypes"
+ . "github.com/mmcloughlin/avo/operand"
+ . "github.com/mmcloughlin/avo/reg"
+)
+
+//go:generate go run . -out ../fe_amd64.s -stubs ../fe_amd64.go -pkg field
+
+func main() {
+ Package("crypto/internal/edwards25519/field")
+ ConstraintExpr("amd64,gc,!purego")
+ feMul()
+ feSquare()
+ Generate()
+}
+
+type namedComponent struct {
+ Component
+ name string
+}
+
+func (c namedComponent) String() string { return c.name }
+
+type uint128 struct {
+ name string
+ hi, lo GPVirtual
+}
+
+func (c uint128) String() string { return c.name }
+
+func feSquare() {
+ TEXT("feSquare", NOSPLIT, "func(out, a *Element)")
+ Doc("feSquare sets out = a * a. It works like feSquareGeneric.")
+ Pragma("noescape")
+
+ a := Dereference(Param("a"))
+ l0 := namedComponent{a.Field("l0"), "l0"}
+ l1 := namedComponent{a.Field("l1"), "l1"}
+ l2 := namedComponent{a.Field("l2"), "l2"}
+ l3 := namedComponent{a.Field("l3"), "l3"}
+ l4 := namedComponent{a.Field("l4"), "l4"}
+
+ // r0 = l0×l0 + 19×2×(l1×l4 + l2×l3)
+ r0 := uint128{"r0", GP64(), GP64()}
+ mul64(r0, 1, l0, l0)
+ addMul64(r0, 38, l1, l4)
+ addMul64(r0, 38, l2, l3)
+
+ // r1 = 2×l0×l1 + 19×2×l2×l4 + 19×l3×l3
+ r1 := uint128{"r1", GP64(), GP64()}
+ mul64(r1, 2, l0, l1)
+ addMul64(r1, 38, l2, l4)
+ addMul64(r1, 19, l3, l3)
+
+ // r2 = = 2×l0×l2 + l1×l1 + 19×2×l3×l4
+ r2 := uint128{"r2", GP64(), GP64()}
+ mul64(r2, 2, l0, l2)
+ addMul64(r2, 1, l1, l1)
+ addMul64(r2, 38, l3, l4)
+
+ // r3 = = 2×l0×l3 + 2×l1×l2 + 19×l4×l4
+ r3 := uint128{"r3", GP64(), GP64()}
+ mul64(r3, 2, l0, l3)
+ addMul64(r3, 2, l1, l2)
+ addMul64(r3, 19, l4, l4)
+
+ // r4 = = 2×l0×l4 + 2×l1×l3 + l2×l2
+ r4 := uint128{"r4", GP64(), GP64()}
+ mul64(r4, 2, l0, l4)
+ addMul64(r4, 2, l1, l3)
+ addMul64(r4, 1, l2, l2)
+
+ Comment("First reduction chain")
+ maskLow51Bits := GP64()
+ MOVQ(Imm((1<<51)-1), maskLow51Bits)
+ c0, r0lo := shiftRightBy51(&r0)
+ c1, r1lo := shiftRightBy51(&r1)
+ c2, r2lo := shiftRightBy51(&r2)
+ c3, r3lo := shiftRightBy51(&r3)
+ c4, r4lo := shiftRightBy51(&r4)
+ maskAndAdd(r0lo, maskLow51Bits, c4, 19)
+ maskAndAdd(r1lo, maskLow51Bits, c0, 1)
+ maskAndAdd(r2lo, maskLow51Bits, c1, 1)
+ maskAndAdd(r3lo, maskLow51Bits, c2, 1)
+ maskAndAdd(r4lo, maskLow51Bits, c3, 1)
+
+ Comment("Second reduction chain (carryPropagate)")
+ // c0 = r0 >> 51
+ MOVQ(r0lo, c0)
+ SHRQ(Imm(51), c0)
+ // c1 = r1 >> 51
+ MOVQ(r1lo, c1)
+ SHRQ(Imm(51), c1)
+ // c2 = r2 >> 51
+ MOVQ(r2lo, c2)
+ SHRQ(Imm(51), c2)
+ // c3 = r3 >> 51
+ MOVQ(r3lo, c3)
+ SHRQ(Imm(51), c3)
+ // c4 = r4 >> 51
+ MOVQ(r4lo, c4)
+ SHRQ(Imm(51), c4)
+ maskAndAdd(r0lo, maskLow51Bits, c4, 19)
+ maskAndAdd(r1lo, maskLow51Bits, c0, 1)
+ maskAndAdd(r2lo, maskLow51Bits, c1, 1)
+ maskAndAdd(r3lo, maskLow51Bits, c2, 1)
+ maskAndAdd(r4lo, maskLow51Bits, c3, 1)
+
+ Comment("Store output")
+ out := Dereference(Param("out"))
+ Store(r0lo, out.Field("l0"))
+ Store(r1lo, out.Field("l1"))
+ Store(r2lo, out.Field("l2"))
+ Store(r3lo, out.Field("l3"))
+ Store(r4lo, out.Field("l4"))
+
+ RET()
+}
+
+func feMul() {
+ TEXT("feMul", NOSPLIT, "func(out, a, b *Element)")
+ Doc("feMul sets out = a * b. It works like feMulGeneric.")
+ Pragma("noescape")
+
+ a := Dereference(Param("a"))
+ a0 := namedComponent{a.Field("l0"), "a0"}
+ a1 := namedComponent{a.Field("l1"), "a1"}
+ a2 := namedComponent{a.Field("l2"), "a2"}
+ a3 := namedComponent{a.Field("l3"), "a3"}
+ a4 := namedComponent{a.Field("l4"), "a4"}
+
+ b := Dereference(Param("b"))
+ b0 := namedComponent{b.Field("l0"), "b0"}
+ b1 := namedComponent{b.Field("l1"), "b1"}
+ b2 := namedComponent{b.Field("l2"), "b2"}
+ b3 := namedComponent{b.Field("l3"), "b3"}
+ b4 := namedComponent{b.Field("l4"), "b4"}
+
+ // r0 = a0×b0 + 19×(a1×b4 + a2×b3 + a3×b2 + a4×b1)
+ r0 := uint128{"r0", GP64(), GP64()}
+ mul64(r0, 1, a0, b0)
+ addMul64(r0, 19, a1, b4)
+ addMul64(r0, 19, a2, b3)
+ addMul64(r0, 19, a3, b2)
+ addMul64(r0, 19, a4, b1)
+
+ // r1 = a0×b1 + a1×b0 + 19×(a2×b4 + a3×b3 + a4×b2)
+ r1 := uint128{"r1", GP64(), GP64()}
+ mul64(r1, 1, a0, b1)
+ addMul64(r1, 1, a1, b0)
+ addMul64(r1, 19, a2, b4)
+ addMul64(r1, 19, a3, b3)
+ addMul64(r1, 19, a4, b2)
+
+ // r2 = a0×b2 + a1×b1 + a2×b0 + 19×(a3×b4 + a4×b3)
+ r2 := uint128{"r2", GP64(), GP64()}
+ mul64(r2, 1, a0, b2)
+ addMul64(r2, 1, a1, b1)
+ addMul64(r2, 1, a2, b0)
+ addMul64(r2, 19, a3, b4)
+ addMul64(r2, 19, a4, b3)
+
+ // r3 = a0×b3 + a1×b2 + a2×b1 + a3×b0 + 19×a4×b4
+ r3 := uint128{"r3", GP64(), GP64()}
+ mul64(r3, 1, a0, b3)
+ addMul64(r3, 1, a1, b2)
+ addMul64(r3, 1, a2, b1)
+ addMul64(r3, 1, a3, b0)
+ addMul64(r3, 19, a4, b4)
+
+ // r4 = a0×b4 + a1×b3 + a2×b2 + a3×b1 + a4×b0
+ r4 := uint128{"r4", GP64(), GP64()}
+ mul64(r4, 1, a0, b4)
+ addMul64(r4, 1, a1, b3)
+ addMul64(r4, 1, a2, b2)
+ addMul64(r4, 1, a3, b1)
+ addMul64(r4, 1, a4, b0)
+
+ Comment("First reduction chain")
+ maskLow51Bits := GP64()
+ MOVQ(Imm((1<<51)-1), maskLow51Bits)
+ c0, r0lo := shiftRightBy51(&r0)
+ c1, r1lo := shiftRightBy51(&r1)
+ c2, r2lo := shiftRightBy51(&r2)
+ c3, r3lo := shiftRightBy51(&r3)
+ c4, r4lo := shiftRightBy51(&r4)
+ maskAndAdd(r0lo, maskLow51Bits, c4, 19)
+ maskAndAdd(r1lo, maskLow51Bits, c0, 1)
+ maskAndAdd(r2lo, maskLow51Bits, c1, 1)
+ maskAndAdd(r3lo, maskLow51Bits, c2, 1)
+ maskAndAdd(r4lo, maskLow51Bits, c3, 1)
+
+ Comment("Second reduction chain (carryPropagate)")
+ // c0 = r0 >> 51
+ MOVQ(r0lo, c0)
+ SHRQ(Imm(51), c0)
+ // c1 = r1 >> 51
+ MOVQ(r1lo, c1)
+ SHRQ(Imm(51), c1)
+ // c2 = r2 >> 51
+ MOVQ(r2lo, c2)
+ SHRQ(Imm(51), c2)
+ // c3 = r3 >> 51
+ MOVQ(r3lo, c3)
+ SHRQ(Imm(51), c3)
+ // c4 = r4 >> 51
+ MOVQ(r4lo, c4)
+ SHRQ(Imm(51), c4)
+ maskAndAdd(r0lo, maskLow51Bits, c4, 19)
+ maskAndAdd(r1lo, maskLow51Bits, c0, 1)
+ maskAndAdd(r2lo, maskLow51Bits, c1, 1)
+ maskAndAdd(r3lo, maskLow51Bits, c2, 1)
+ maskAndAdd(r4lo, maskLow51Bits, c3, 1)
+
+ Comment("Store output")
+ out := Dereference(Param("out"))
+ Store(r0lo, out.Field("l0"))
+ Store(r1lo, out.Field("l1"))
+ Store(r2lo, out.Field("l2"))
+ Store(r3lo, out.Field("l3"))
+ Store(r4lo, out.Field("l4"))
+
+ RET()
+}
+
+// mul64 sets r to i * aX * bX.
+func mul64(r uint128, i int, aX, bX namedComponent) {
+ switch i {
+ case 1:
+ Comment(fmt.Sprintf("%s = %s×%s", r, aX, bX))
+ Load(aX, RAX)
+ case 2:
+ Comment(fmt.Sprintf("%s = 2×%s×%s", r, aX, bX))
+ Load(aX, RAX)
+ SHLQ(Imm(1), RAX)
+ default:
+ panic("unsupported i value")
+ }
+ MULQ(mustAddr(bX)) // RDX, RAX = RAX * bX
+ MOVQ(RAX, r.lo)
+ MOVQ(RDX, r.hi)
+}
+
+// addMul64 sets r to r + i * aX * bX.
+func addMul64(r uint128, i uint64, aX, bX namedComponent) {
+ switch i {
+ case 1:
+ Comment(fmt.Sprintf("%s += %s×%s", r, aX, bX))
+ Load(aX, RAX)
+ default:
+ Comment(fmt.Sprintf("%s += %d×%s×%s", r, i, aX, bX))
+ IMUL3Q(Imm(i), Load(aX, GP64()), RAX)
+ }
+ MULQ(mustAddr(bX)) // RDX, RAX = RAX * bX
+ ADDQ(RAX, r.lo)
+ ADCQ(RDX, r.hi)
+}
+
+// shiftRightBy51 returns r >> 51 and r.lo.
+//
+// After this function is called, the uint128 may not be used anymore.
+func shiftRightBy51(r *uint128) (out, lo GPVirtual) {
+ out = r.hi
+ lo = r.lo
+ SHLQ(Imm(64-51), r.lo, r.hi)
+ r.lo, r.hi = nil, nil // make sure the uint128 is unusable
+ return
+}
+
+// maskAndAdd sets r = r&mask + c*i.
+func maskAndAdd(r, mask, c GPVirtual, i uint64) {
+ ANDQ(mask, r)
+ if i != 1 {
+ IMUL3Q(Imm(i), c, c)
+ }
+ ADDQ(c, r)
+}
+
+func mustAddr(c Component) Op {
+ b, err := c.Resolve()
+ if err != nil {
+ panic(err)
+ }
+ return b.Addr
+}
diff --git a/src/crypto/internal/edwards25519/field/_asm/go.mod b/src/crypto/internal/edwards25519/field/_asm/go.mod
new file mode 100644
index 0000000..1ce2b5e
--- /dev/null
+++ b/src/crypto/internal/edwards25519/field/_asm/go.mod
@@ -0,0 +1,12 @@
+module asm
+
+go 1.19
+
+require github.com/mmcloughlin/avo v0.4.0
+
+require (
+ golang.org/x/mod v0.4.2 // indirect
+ golang.org/x/sys v0.0.0-20211030160813-b3129d9d1021 // indirect
+ golang.org/x/tools v0.1.7 // indirect
+ golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect
+)
diff --git a/src/crypto/internal/edwards25519/field/_asm/go.sum b/src/crypto/internal/edwards25519/field/_asm/go.sum
new file mode 100644
index 0000000..b4b5914
--- /dev/null
+++ b/src/crypto/internal/edwards25519/field/_asm/go.sum
@@ -0,0 +1,32 @@
+github.com/mmcloughlin/avo v0.4.0 h1:jeHDRktVD+578ULxWpQHkilor6pkdLF7u7EiTzDbfcU=
+github.com/mmcloughlin/avo v0.4.0/go.mod h1:RW9BfYA3TgO9uCdNrKU2h6J8cPD8ZLznvfgHAeszb1s=
+github.com/yuin/goldmark v1.4.0/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
+golang.org/x/arch v0.0.0-20210923205945-b76863e36670/go.mod h1:5om86z9Hs0C8fWVUuoMHwpExlXzs5Tkyp9hOrfG7pp8=
+golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
+golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/mod v0.4.2 h1:Gz96sIWK3OalVv/I/qNygP42zyoKp3xptRVCWRFEBvo=
+golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
+golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
+golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20211030160813-b3129d9d1021 h1:giLT+HuUP/gXYrG2Plg9WTjj4qhfgaW424ZIFog3rlk=
+golang.org/x/sys v0.0.0-20211030160813-b3129d9d1021/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
+golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
+golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.1.7 h1:6j8CgantCy3yc8JGBqkDLMKWqZ0RDU2g1HVgacojGWQ=
+golang.org/x/tools v0.1.7/go.mod h1:LGqMHiF4EqQNHR1JncWGqT5BVaXmza+X+BDGol+dOxo=
+golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE=
+golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4=
diff --git a/src/crypto/internal/edwards25519/field/fe.go b/src/crypto/internal/edwards25519/field/fe.go
new file mode 100644
index 0000000..5518ef2
--- /dev/null
+++ b/src/crypto/internal/edwards25519/field/fe.go
@@ -0,0 +1,420 @@
+// Copyright (c) 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package field implements fast arithmetic modulo 2^255-19.
+package field
+
+import (
+ "crypto/subtle"
+ "encoding/binary"
+ "errors"
+ "math/bits"
+)
+
+// Element represents an element of the field GF(2^255-19). Note that this
+// is not a cryptographically secure group, and should only be used to interact
+// with edwards25519.Point coordinates.
+//
+// This type works similarly to math/big.Int, and all arguments and receivers
+// are allowed to alias.
+//
+// The zero value is a valid zero element.
+type Element struct {
+ // An element t represents the integer
+ // t.l0 + t.l1*2^51 + t.l2*2^102 + t.l3*2^153 + t.l4*2^204
+ //
+ // Between operations, all limbs are expected to be lower than 2^52.
+ l0 uint64
+ l1 uint64
+ l2 uint64
+ l3 uint64
+ l4 uint64
+}
+
+const maskLow51Bits uint64 = (1 << 51) - 1
+
+var feZero = &Element{0, 0, 0, 0, 0}
+
+// Zero sets v = 0, and returns v.
+func (v *Element) Zero() *Element {
+ *v = *feZero
+ return v
+}
+
+var feOne = &Element{1, 0, 0, 0, 0}
+
+// One sets v = 1, and returns v.
+func (v *Element) One() *Element {
+ *v = *feOne
+ return v
+}
+
+// reduce reduces v modulo 2^255 - 19 and returns it.
+func (v *Element) reduce() *Element {
+ v.carryPropagate()
+
+ // After the light reduction we now have a field element representation
+ // v < 2^255 + 2^13 * 19, but need v < 2^255 - 19.
+
+ // If v >= 2^255 - 19, then v + 19 >= 2^255, which would overflow 2^255 - 1,
+ // generating a carry. That is, c will be 0 if v < 2^255 - 19, and 1 otherwise.
+ c := (v.l0 + 19) >> 51
+ c = (v.l1 + c) >> 51
+ c = (v.l2 + c) >> 51
+ c = (v.l3 + c) >> 51
+ c = (v.l4 + c) >> 51
+
+ // If v < 2^255 - 19 and c = 0, this will be a no-op. Otherwise, it's
+ // effectively applying the reduction identity to the carry.
+ v.l0 += 19 * c
+
+ v.l1 += v.l0 >> 51
+ v.l0 = v.l0 & maskLow51Bits
+ v.l2 += v.l1 >> 51
+ v.l1 = v.l1 & maskLow51Bits
+ v.l3 += v.l2 >> 51
+ v.l2 = v.l2 & maskLow51Bits
+ v.l4 += v.l3 >> 51
+ v.l3 = v.l3 & maskLow51Bits
+ // no additional carry
+ v.l4 = v.l4 & maskLow51Bits
+
+ return v
+}
+
+// Add sets v = a + b, and returns v.
+func (v *Element) Add(a, b *Element) *Element {
+ v.l0 = a.l0 + b.l0
+ v.l1 = a.l1 + b.l1
+ v.l2 = a.l2 + b.l2
+ v.l3 = a.l3 + b.l3
+ v.l4 = a.l4 + b.l4
+ // Using the generic implementation here is actually faster than the
+ // assembly. Probably because the body of this function is so simple that
+ // the compiler can figure out better optimizations by inlining the carry
+ // propagation.
+ return v.carryPropagateGeneric()
+}
+
+// Subtract sets v = a - b, and returns v.
+func (v *Element) Subtract(a, b *Element) *Element {
+ // We first add 2 * p, to guarantee the subtraction won't underflow, and
+ // then subtract b (which can be up to 2^255 + 2^13 * 19).
+ v.l0 = (a.l0 + 0xFFFFFFFFFFFDA) - b.l0
+ v.l1 = (a.l1 + 0xFFFFFFFFFFFFE) - b.l1
+ v.l2 = (a.l2 + 0xFFFFFFFFFFFFE) - b.l2
+ v.l3 = (a.l3 + 0xFFFFFFFFFFFFE) - b.l3
+ v.l4 = (a.l4 + 0xFFFFFFFFFFFFE) - b.l4
+ return v.carryPropagate()
+}
+
+// Negate sets v = -a, and returns v.
+func (v *Element) Negate(a *Element) *Element {
+ return v.Subtract(feZero, a)
+}
+
+// Invert sets v = 1/z mod p, and returns v.
+//
+// If z == 0, Invert returns v = 0.
+func (v *Element) Invert(z *Element) *Element {
+ // Inversion is implemented as exponentiation with exponent p − 2. It uses the
+ // same sequence of 255 squarings and 11 multiplications as [Curve25519].
+ var z2, z9, z11, z2_5_0, z2_10_0, z2_20_0, z2_50_0, z2_100_0, t Element
+
+ z2.Square(z) // 2
+ t.Square(&z2) // 4
+ t.Square(&t) // 8
+ z9.Multiply(&t, z) // 9
+ z11.Multiply(&z9, &z2) // 11
+ t.Square(&z11) // 22
+ z2_5_0.Multiply(&t, &z9) // 31 = 2^5 - 2^0
+
+ t.Square(&z2_5_0) // 2^6 - 2^1
+ for i := 0; i < 4; i++ {
+ t.Square(&t) // 2^10 - 2^5
+ }
+ z2_10_0.Multiply(&t, &z2_5_0) // 2^10 - 2^0
+
+ t.Square(&z2_10_0) // 2^11 - 2^1
+ for i := 0; i < 9; i++ {
+ t.Square(&t) // 2^20 - 2^10
+ }
+ z2_20_0.Multiply(&t, &z2_10_0) // 2^20 - 2^0
+
+ t.Square(&z2_20_0) // 2^21 - 2^1
+ for i := 0; i < 19; i++ {
+ t.Square(&t) // 2^40 - 2^20
+ }
+ t.Multiply(&t, &z2_20_0) // 2^40 - 2^0
+
+ t.Square(&t) // 2^41 - 2^1
+ for i := 0; i < 9; i++ {
+ t.Square(&t) // 2^50 - 2^10
+ }
+ z2_50_0.Multiply(&t, &z2_10_0) // 2^50 - 2^0
+
+ t.Square(&z2_50_0) // 2^51 - 2^1
+ for i := 0; i < 49; i++ {
+ t.Square(&t) // 2^100 - 2^50
+ }
+ z2_100_0.Multiply(&t, &z2_50_0) // 2^100 - 2^0
+
+ t.Square(&z2_100_0) // 2^101 - 2^1
+ for i := 0; i < 99; i++ {
+ t.Square(&t) // 2^200 - 2^100
+ }
+ t.Multiply(&t, &z2_100_0) // 2^200 - 2^0
+
+ t.Square(&t) // 2^201 - 2^1
+ for i := 0; i < 49; i++ {
+ t.Square(&t) // 2^250 - 2^50
+ }
+ t.Multiply(&t, &z2_50_0) // 2^250 - 2^0
+
+ t.Square(&t) // 2^251 - 2^1
+ t.Square(&t) // 2^252 - 2^2
+ t.Square(&t) // 2^253 - 2^3
+ t.Square(&t) // 2^254 - 2^4
+ t.Square(&t) // 2^255 - 2^5
+
+ return v.Multiply(&t, &z11) // 2^255 - 21
+}
+
+// Set sets v = a, and returns v.
+func (v *Element) Set(a *Element) *Element {
+ *v = *a
+ return v
+}
+
+// SetBytes sets v to x, where x is a 32-byte little-endian encoding. If x is
+// not of the right length, SetBytes returns nil and an error, and the
+// receiver is unchanged.
+//
+// Consistent with RFC 7748, the most significant bit (the high bit of the
+// last byte) is ignored, and non-canonical values (2^255-19 through 2^255-1)
+// are accepted. Note that this is laxer than specified by RFC 8032, but
+// consistent with most Ed25519 implementations.
+func (v *Element) SetBytes(x []byte) (*Element, error) {
+ if len(x) != 32 {
+ return nil, errors.New("edwards25519: invalid field element input size")
+ }
+
+ // Bits 0:51 (bytes 0:8, bits 0:64, shift 0, mask 51).
+ v.l0 = binary.LittleEndian.Uint64(x[0:8])
+ v.l0 &= maskLow51Bits
+ // Bits 51:102 (bytes 6:14, bits 48:112, shift 3, mask 51).
+ v.l1 = binary.LittleEndian.Uint64(x[6:14]) >> 3
+ v.l1 &= maskLow51Bits
+ // Bits 102:153 (bytes 12:20, bits 96:160, shift 6, mask 51).
+ v.l2 = binary.LittleEndian.Uint64(x[12:20]) >> 6
+ v.l2 &= maskLow51Bits
+ // Bits 153:204 (bytes 19:27, bits 152:216, shift 1, mask 51).
+ v.l3 = binary.LittleEndian.Uint64(x[19:27]) >> 1
+ v.l3 &= maskLow51Bits
+ // Bits 204:255 (bytes 24:32, bits 192:256, shift 12, mask 51).
+ // Note: not bytes 25:33, shift 4, to avoid overread.
+ v.l4 = binary.LittleEndian.Uint64(x[24:32]) >> 12
+ v.l4 &= maskLow51Bits
+
+ return v, nil
+}
+
+// Bytes returns the canonical 32-byte little-endian encoding of v.
+func (v *Element) Bytes() []byte {
+ // This function is outlined to make the allocations inline in the caller
+ // rather than happen on the heap.
+ var out [32]byte
+ return v.bytes(&out)
+}
+
+func (v *Element) bytes(out *[32]byte) []byte {
+ t := *v
+ t.reduce()
+
+ var buf [8]byte
+ for i, l := range [5]uint64{t.l0, t.l1, t.l2, t.l3, t.l4} {
+ bitsOffset := i * 51
+ binary.LittleEndian.PutUint64(buf[:], l<<uint(bitsOffset%8))
+ for i, bb := range buf {
+ off := bitsOffset/8 + i
+ if off >= len(out) {
+ break
+ }
+ out[off] |= bb
+ }
+ }
+
+ return out[:]
+}
+
+// Equal returns 1 if v and u are equal, and 0 otherwise.
+func (v *Element) Equal(u *Element) int {
+ sa, sv := u.Bytes(), v.Bytes()
+ return subtle.ConstantTimeCompare(sa, sv)
+}
+
+// mask64Bits returns 0xffffffff if cond is 1, and 0 otherwise.
+func mask64Bits(cond int) uint64 { return ^(uint64(cond) - 1) }
+
+// Select sets v to a if cond == 1, and to b if cond == 0.
+func (v *Element) Select(a, b *Element, cond int) *Element {
+ m := mask64Bits(cond)
+ v.l0 = (m & a.l0) | (^m & b.l0)
+ v.l1 = (m & a.l1) | (^m & b.l1)
+ v.l2 = (m & a.l2) | (^m & b.l2)
+ v.l3 = (m & a.l3) | (^m & b.l3)
+ v.l4 = (m & a.l4) | (^m & b.l4)
+ return v
+}
+
+// Swap swaps v and u if cond == 1 or leaves them unchanged if cond == 0, and returns v.
+func (v *Element) Swap(u *Element, cond int) {
+ m := mask64Bits(cond)
+ t := m & (v.l0 ^ u.l0)
+ v.l0 ^= t
+ u.l0 ^= t
+ t = m & (v.l1 ^ u.l1)
+ v.l1 ^= t
+ u.l1 ^= t
+ t = m & (v.l2 ^ u.l2)
+ v.l2 ^= t
+ u.l2 ^= t
+ t = m & (v.l3 ^ u.l3)
+ v.l3 ^= t
+ u.l3 ^= t
+ t = m & (v.l4 ^ u.l4)
+ v.l4 ^= t
+ u.l4 ^= t
+}
+
+// IsNegative returns 1 if v is negative, and 0 otherwise.
+func (v *Element) IsNegative() int {
+ return int(v.Bytes()[0] & 1)
+}
+
+// Absolute sets v to |u|, and returns v.
+func (v *Element) Absolute(u *Element) *Element {
+ return v.Select(new(Element).Negate(u), u, u.IsNegative())
+}
+
+// Multiply sets v = x * y, and returns v.
+func (v *Element) Multiply(x, y *Element) *Element {
+ feMul(v, x, y)
+ return v
+}
+
+// Square sets v = x * x, and returns v.
+func (v *Element) Square(x *Element) *Element {
+ feSquare(v, x)
+ return v
+}
+
+// Mult32 sets v = x * y, and returns v.
+func (v *Element) Mult32(x *Element, y uint32) *Element {
+ x0lo, x0hi := mul51(x.l0, y)
+ x1lo, x1hi := mul51(x.l1, y)
+ x2lo, x2hi := mul51(x.l2, y)
+ x3lo, x3hi := mul51(x.l3, y)
+ x4lo, x4hi := mul51(x.l4, y)
+ v.l0 = x0lo + 19*x4hi // carried over per the reduction identity
+ v.l1 = x1lo + x0hi
+ v.l2 = x2lo + x1hi
+ v.l3 = x3lo + x2hi
+ v.l4 = x4lo + x3hi
+ // The hi portions are going to be only 32 bits, plus any previous excess,
+ // so we can skip the carry propagation.
+ return v
+}
+
+// mul51 returns lo + hi * 2⁵¹ = a * b.
+func mul51(a uint64, b uint32) (lo uint64, hi uint64) {
+ mh, ml := bits.Mul64(a, uint64(b))
+ lo = ml & maskLow51Bits
+ hi = (mh << 13) | (ml >> 51)
+ return
+}
+
+// Pow22523 set v = x^((p-5)/8), and returns v. (p-5)/8 is 2^252-3.
+func (v *Element) Pow22523(x *Element) *Element {
+ var t0, t1, t2 Element
+
+ t0.Square(x) // x^2
+ t1.Square(&t0) // x^4
+ t1.Square(&t1) // x^8
+ t1.Multiply(x, &t1) // x^9
+ t0.Multiply(&t0, &t1) // x^11
+ t0.Square(&t0) // x^22
+ t0.Multiply(&t1, &t0) // x^31
+ t1.Square(&t0) // x^62
+ for i := 1; i < 5; i++ { // x^992
+ t1.Square(&t1)
+ }
+ t0.Multiply(&t1, &t0) // x^1023 -> 1023 = 2^10 - 1
+ t1.Square(&t0) // 2^11 - 2
+ for i := 1; i < 10; i++ { // 2^20 - 2^10
+ t1.Square(&t1)
+ }
+ t1.Multiply(&t1, &t0) // 2^20 - 1
+ t2.Square(&t1) // 2^21 - 2
+ for i := 1; i < 20; i++ { // 2^40 - 2^20
+ t2.Square(&t2)
+ }
+ t1.Multiply(&t2, &t1) // 2^40 - 1
+ t1.Square(&t1) // 2^41 - 2
+ for i := 1; i < 10; i++ { // 2^50 - 2^10
+ t1.Square(&t1)
+ }
+ t0.Multiply(&t1, &t0) // 2^50 - 1
+ t1.Square(&t0) // 2^51 - 2
+ for i := 1; i < 50; i++ { // 2^100 - 2^50
+ t1.Square(&t1)
+ }
+ t1.Multiply(&t1, &t0) // 2^100 - 1
+ t2.Square(&t1) // 2^101 - 2
+ for i := 1; i < 100; i++ { // 2^200 - 2^100
+ t2.Square(&t2)
+ }
+ t1.Multiply(&t2, &t1) // 2^200 - 1
+ t1.Square(&t1) // 2^201 - 2
+ for i := 1; i < 50; i++ { // 2^250 - 2^50
+ t1.Square(&t1)
+ }
+ t0.Multiply(&t1, &t0) // 2^250 - 1
+ t0.Square(&t0) // 2^251 - 2
+ t0.Square(&t0) // 2^252 - 4
+ return v.Multiply(&t0, x) // 2^252 - 3 -> x^(2^252-3)
+}
+
+// sqrtM1 is 2^((p-1)/4), which squared is equal to -1 by Euler's Criterion.
+var sqrtM1 = &Element{1718705420411056, 234908883556509,
+ 2233514472574048, 2117202627021982, 765476049583133}
+
+// SqrtRatio sets r to the non-negative square root of the ratio of u and v.
+//
+// If u/v is square, SqrtRatio returns r and 1. If u/v is not square, SqrtRatio
+// sets r according to Section 4.3 of draft-irtf-cfrg-ristretto255-decaf448-00,
+// and returns r and 0.
+func (r *Element) SqrtRatio(u, v *Element) (R *Element, wasSquare int) {
+ t0 := new(Element)
+
+ // r = (u * v3) * (u * v7)^((p-5)/8)
+ v2 := new(Element).Square(v)
+ uv3 := new(Element).Multiply(u, t0.Multiply(v2, v))
+ uv7 := new(Element).Multiply(uv3, t0.Square(v2))
+ rr := new(Element).Multiply(uv3, t0.Pow22523(uv7))
+
+ check := new(Element).Multiply(v, t0.Square(rr)) // check = v * r^2
+
+ uNeg := new(Element).Negate(u)
+ correctSignSqrt := check.Equal(u)
+ flippedSignSqrt := check.Equal(uNeg)
+ flippedSignSqrtI := check.Equal(t0.Multiply(uNeg, sqrtM1))
+
+ rPrime := new(Element).Multiply(rr, sqrtM1) // r_prime = SQRT_M1 * r
+ // r = CT_SELECT(r_prime IF flipped_sign_sqrt | flipped_sign_sqrt_i ELSE r)
+ rr.Select(rPrime, rr, flippedSignSqrt|flippedSignSqrtI)
+
+ r.Absolute(rr) // Choose the nonnegative square root.
+ return r, correctSignSqrt | flippedSignSqrt
+}
diff --git a/src/crypto/internal/edwards25519/field/fe_alias_test.go b/src/crypto/internal/edwards25519/field/fe_alias_test.go
new file mode 100644
index 0000000..bf1efdc
--- /dev/null
+++ b/src/crypto/internal/edwards25519/field/fe_alias_test.go
@@ -0,0 +1,140 @@
+// Copyright (c) 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package field
+
+import (
+ "testing"
+ "testing/quick"
+)
+
+func checkAliasingOneArg(f func(v, x *Element) *Element) func(v, x Element) bool {
+ return func(v, x Element) bool {
+ x1, v1 := x, x
+
+ // Calculate a reference f(x) without aliasing.
+ if out := f(&v, &x); out != &v && isInBounds(out) {
+ return false
+ }
+
+ // Test aliasing the argument and the receiver.
+ if out := f(&v1, &v1); out != &v1 || v1 != v {
+ return false
+ }
+
+ // Ensure the arguments was not modified.
+ return x == x1
+ }
+}
+
+func checkAliasingTwoArgs(f func(v, x, y *Element) *Element) func(v, x, y Element) bool {
+ return func(v, x, y Element) bool {
+ x1, y1, v1 := x, y, Element{}
+
+ // Calculate a reference f(x, y) without aliasing.
+ if out := f(&v, &x, &y); out != &v && isInBounds(out) {
+ return false
+ }
+
+ // Test aliasing the first argument and the receiver.
+ v1 = x
+ if out := f(&v1, &v1, &y); out != &v1 || v1 != v {
+ return false
+ }
+ // Test aliasing the second argument and the receiver.
+ v1 = y
+ if out := f(&v1, &x, &v1); out != &v1 || v1 != v {
+ return false
+ }
+
+ // Calculate a reference f(x, x) without aliasing.
+ if out := f(&v, &x, &x); out != &v {
+ return false
+ }
+
+ // Test aliasing the first argument and the receiver.
+ v1 = x
+ if out := f(&v1, &v1, &x); out != &v1 || v1 != v {
+ return false
+ }
+ // Test aliasing the second argument and the receiver.
+ v1 = x
+ if out := f(&v1, &x, &v1); out != &v1 || v1 != v {
+ return false
+ }
+ // Test aliasing both arguments and the receiver.
+ v1 = x
+ if out := f(&v1, &v1, &v1); out != &v1 || v1 != v {
+ return false
+ }
+
+ // Ensure the arguments were not modified.
+ return x == x1 && y == y1
+ }
+}
+
+// TestAliasing checks that receivers and arguments can alias each other without
+// leading to incorrect results. That is, it ensures that it's safe to write
+//
+// v.Invert(v)
+//
+// or
+//
+// v.Add(v, v)
+//
+// without any of the inputs getting clobbered by the output being written.
+func TestAliasing(t *testing.T) {
+ type target struct {
+ name string
+ oneArgF func(v, x *Element) *Element
+ twoArgsF func(v, x, y *Element) *Element
+ }
+ for _, tt := range []target{
+ {name: "Absolute", oneArgF: (*Element).Absolute},
+ {name: "Invert", oneArgF: (*Element).Invert},
+ {name: "Negate", oneArgF: (*Element).Negate},
+ {name: "Set", oneArgF: (*Element).Set},
+ {name: "Square", oneArgF: (*Element).Square},
+ {name: "Pow22523", oneArgF: (*Element).Pow22523},
+ {
+ name: "Mult32",
+ oneArgF: func(v, x *Element) *Element {
+ return v.Mult32(x, 0xffffffff)
+ },
+ },
+ {name: "Multiply", twoArgsF: (*Element).Multiply},
+ {name: "Add", twoArgsF: (*Element).Add},
+ {name: "Subtract", twoArgsF: (*Element).Subtract},
+ {
+ name: "SqrtRatio",
+ twoArgsF: func(v, x, y *Element) *Element {
+ r, _ := v.SqrtRatio(x, y)
+ return r
+ },
+ },
+ {
+ name: "Select0",
+ twoArgsF: func(v, x, y *Element) *Element {
+ return v.Select(x, y, 0)
+ },
+ },
+ {
+ name: "Select1",
+ twoArgsF: func(v, x, y *Element) *Element {
+ return v.Select(x, y, 1)
+ },
+ },
+ } {
+ var err error
+ switch {
+ case tt.oneArgF != nil:
+ err = quick.Check(checkAliasingOneArg(tt.oneArgF), &quick.Config{MaxCountScale: 1 << 8})
+ case tt.twoArgsF != nil:
+ err = quick.Check(checkAliasingTwoArgs(tt.twoArgsF), &quick.Config{MaxCountScale: 1 << 8})
+ }
+ if err != nil {
+ t.Errorf("%v: %v", tt.name, err)
+ }
+ }
+}
diff --git a/src/crypto/internal/edwards25519/field/fe_amd64.go b/src/crypto/internal/edwards25519/field/fe_amd64.go
new file mode 100644
index 0000000..70c5416
--- /dev/null
+++ b/src/crypto/internal/edwards25519/field/fe_amd64.go
@@ -0,0 +1,15 @@
+// Code generated by command: go run fe_amd64_asm.go -out ../fe_amd64.s -stubs ../fe_amd64.go -pkg field. DO NOT EDIT.
+
+//go:build amd64 && gc && !purego
+
+package field
+
+// feMul sets out = a * b. It works like feMulGeneric.
+//
+//go:noescape
+func feMul(out *Element, a *Element, b *Element)
+
+// feSquare sets out = a * a. It works like feSquareGeneric.
+//
+//go:noescape
+func feSquare(out *Element, a *Element)
diff --git a/src/crypto/internal/edwards25519/field/fe_amd64.s b/src/crypto/internal/edwards25519/field/fe_amd64.s
new file mode 100644
index 0000000..60817ac
--- /dev/null
+++ b/src/crypto/internal/edwards25519/field/fe_amd64.s
@@ -0,0 +1,378 @@
+// Code generated by command: go run fe_amd64_asm.go -out ../fe_amd64.s -stubs ../fe_amd64.go -pkg field. DO NOT EDIT.
+
+//go:build amd64 && gc && !purego
+
+#include "textflag.h"
+
+// func feMul(out *Element, a *Element, b *Element)
+TEXT ·feMul(SB), NOSPLIT, $0-24
+ MOVQ a+8(FP), CX
+ MOVQ b+16(FP), BX
+
+ // r0 = a0×b0
+ MOVQ (CX), AX
+ MULQ (BX)
+ MOVQ AX, DI
+ MOVQ DX, SI
+
+ // r0 += 19×a1×b4
+ MOVQ 8(CX), AX
+ IMUL3Q $0x13, AX, AX
+ MULQ 32(BX)
+ ADDQ AX, DI
+ ADCQ DX, SI
+
+ // r0 += 19×a2×b3
+ MOVQ 16(CX), AX
+ IMUL3Q $0x13, AX, AX
+ MULQ 24(BX)
+ ADDQ AX, DI
+ ADCQ DX, SI
+
+ // r0 += 19×a3×b2
+ MOVQ 24(CX), AX
+ IMUL3Q $0x13, AX, AX
+ MULQ 16(BX)
+ ADDQ AX, DI
+ ADCQ DX, SI
+
+ // r0 += 19×a4×b1
+ MOVQ 32(CX), AX
+ IMUL3Q $0x13, AX, AX
+ MULQ 8(BX)
+ ADDQ AX, DI
+ ADCQ DX, SI
+
+ // r1 = a0×b1
+ MOVQ (CX), AX
+ MULQ 8(BX)
+ MOVQ AX, R9
+ MOVQ DX, R8
+
+ // r1 += a1×b0
+ MOVQ 8(CX), AX
+ MULQ (BX)
+ ADDQ AX, R9
+ ADCQ DX, R8
+
+ // r1 += 19×a2×b4
+ MOVQ 16(CX), AX
+ IMUL3Q $0x13, AX, AX
+ MULQ 32(BX)
+ ADDQ AX, R9
+ ADCQ DX, R8
+
+ // r1 += 19×a3×b3
+ MOVQ 24(CX), AX
+ IMUL3Q $0x13, AX, AX
+ MULQ 24(BX)
+ ADDQ AX, R9
+ ADCQ DX, R8
+
+ // r1 += 19×a4×b2
+ MOVQ 32(CX), AX
+ IMUL3Q $0x13, AX, AX
+ MULQ 16(BX)
+ ADDQ AX, R9
+ ADCQ DX, R8
+
+ // r2 = a0×b2
+ MOVQ (CX), AX
+ MULQ 16(BX)
+ MOVQ AX, R11
+ MOVQ DX, R10
+
+ // r2 += a1×b1
+ MOVQ 8(CX), AX
+ MULQ 8(BX)
+ ADDQ AX, R11
+ ADCQ DX, R10
+
+ // r2 += a2×b0
+ MOVQ 16(CX), AX
+ MULQ (BX)
+ ADDQ AX, R11
+ ADCQ DX, R10
+
+ // r2 += 19×a3×b4
+ MOVQ 24(CX), AX
+ IMUL3Q $0x13, AX, AX
+ MULQ 32(BX)
+ ADDQ AX, R11
+ ADCQ DX, R10
+
+ // r2 += 19×a4×b3
+ MOVQ 32(CX), AX
+ IMUL3Q $0x13, AX, AX
+ MULQ 24(BX)
+ ADDQ AX, R11
+ ADCQ DX, R10
+
+ // r3 = a0×b3
+ MOVQ (CX), AX
+ MULQ 24(BX)
+ MOVQ AX, R13
+ MOVQ DX, R12
+
+ // r3 += a1×b2
+ MOVQ 8(CX), AX
+ MULQ 16(BX)
+ ADDQ AX, R13
+ ADCQ DX, R12
+
+ // r3 += a2×b1
+ MOVQ 16(CX), AX
+ MULQ 8(BX)
+ ADDQ AX, R13
+ ADCQ DX, R12
+
+ // r3 += a3×b0
+ MOVQ 24(CX), AX
+ MULQ (BX)
+ ADDQ AX, R13
+ ADCQ DX, R12
+
+ // r3 += 19×a4×b4
+ MOVQ 32(CX), AX
+ IMUL3Q $0x13, AX, AX
+ MULQ 32(BX)
+ ADDQ AX, R13
+ ADCQ DX, R12
+
+ // r4 = a0×b4
+ MOVQ (CX), AX
+ MULQ 32(BX)
+ MOVQ AX, R15
+ MOVQ DX, R14
+
+ // r4 += a1×b3
+ MOVQ 8(CX), AX
+ MULQ 24(BX)
+ ADDQ AX, R15
+ ADCQ DX, R14
+
+ // r4 += a2×b2
+ MOVQ 16(CX), AX
+ MULQ 16(BX)
+ ADDQ AX, R15
+ ADCQ DX, R14
+
+ // r4 += a3×b1
+ MOVQ 24(CX), AX
+ MULQ 8(BX)
+ ADDQ AX, R15
+ ADCQ DX, R14
+
+ // r4 += a4×b0
+ MOVQ 32(CX), AX
+ MULQ (BX)
+ ADDQ AX, R15
+ ADCQ DX, R14
+
+ // First reduction chain
+ MOVQ $0x0007ffffffffffff, AX
+ SHLQ $0x0d, DI, SI
+ SHLQ $0x0d, R9, R8
+ SHLQ $0x0d, R11, R10
+ SHLQ $0x0d, R13, R12
+ SHLQ $0x0d, R15, R14
+ ANDQ AX, DI
+ IMUL3Q $0x13, R14, R14
+ ADDQ R14, DI
+ ANDQ AX, R9
+ ADDQ SI, R9
+ ANDQ AX, R11
+ ADDQ R8, R11
+ ANDQ AX, R13
+ ADDQ R10, R13
+ ANDQ AX, R15
+ ADDQ R12, R15
+
+ // Second reduction chain (carryPropagate)
+ MOVQ DI, SI
+ SHRQ $0x33, SI
+ MOVQ R9, R8
+ SHRQ $0x33, R8
+ MOVQ R11, R10
+ SHRQ $0x33, R10
+ MOVQ R13, R12
+ SHRQ $0x33, R12
+ MOVQ R15, R14
+ SHRQ $0x33, R14
+ ANDQ AX, DI
+ IMUL3Q $0x13, R14, R14
+ ADDQ R14, DI
+ ANDQ AX, R9
+ ADDQ SI, R9
+ ANDQ AX, R11
+ ADDQ R8, R11
+ ANDQ AX, R13
+ ADDQ R10, R13
+ ANDQ AX, R15
+ ADDQ R12, R15
+
+ // Store output
+ MOVQ out+0(FP), AX
+ MOVQ DI, (AX)
+ MOVQ R9, 8(AX)
+ MOVQ R11, 16(AX)
+ MOVQ R13, 24(AX)
+ MOVQ R15, 32(AX)
+ RET
+
+// func feSquare(out *Element, a *Element)
+TEXT ·feSquare(SB), NOSPLIT, $0-16
+ MOVQ a+8(FP), CX
+
+ // r0 = l0×l0
+ MOVQ (CX), AX
+ MULQ (CX)
+ MOVQ AX, SI
+ MOVQ DX, BX
+
+ // r0 += 38×l1×l4
+ MOVQ 8(CX), AX
+ IMUL3Q $0x26, AX, AX
+ MULQ 32(CX)
+ ADDQ AX, SI
+ ADCQ DX, BX
+
+ // r0 += 38×l2×l3
+ MOVQ 16(CX), AX
+ IMUL3Q $0x26, AX, AX
+ MULQ 24(CX)
+ ADDQ AX, SI
+ ADCQ DX, BX
+
+ // r1 = 2×l0×l1
+ MOVQ (CX), AX
+ SHLQ $0x01, AX
+ MULQ 8(CX)
+ MOVQ AX, R8
+ MOVQ DX, DI
+
+ // r1 += 38×l2×l4
+ MOVQ 16(CX), AX
+ IMUL3Q $0x26, AX, AX
+ MULQ 32(CX)
+ ADDQ AX, R8
+ ADCQ DX, DI
+
+ // r1 += 19×l3×l3
+ MOVQ 24(CX), AX
+ IMUL3Q $0x13, AX, AX
+ MULQ 24(CX)
+ ADDQ AX, R8
+ ADCQ DX, DI
+
+ // r2 = 2×l0×l2
+ MOVQ (CX), AX
+ SHLQ $0x01, AX
+ MULQ 16(CX)
+ MOVQ AX, R10
+ MOVQ DX, R9
+
+ // r2 += l1×l1
+ MOVQ 8(CX), AX
+ MULQ 8(CX)
+ ADDQ AX, R10
+ ADCQ DX, R9
+
+ // r2 += 38×l3×l4
+ MOVQ 24(CX), AX
+ IMUL3Q $0x26, AX, AX
+ MULQ 32(CX)
+ ADDQ AX, R10
+ ADCQ DX, R9
+
+ // r3 = 2×l0×l3
+ MOVQ (CX), AX
+ SHLQ $0x01, AX
+ MULQ 24(CX)
+ MOVQ AX, R12
+ MOVQ DX, R11
+
+ // r3 += 2×l1×l2
+ MOVQ 8(CX), AX
+ IMUL3Q $0x02, AX, AX
+ MULQ 16(CX)
+ ADDQ AX, R12
+ ADCQ DX, R11
+
+ // r3 += 19×l4×l4
+ MOVQ 32(CX), AX
+ IMUL3Q $0x13, AX, AX
+ MULQ 32(CX)
+ ADDQ AX, R12
+ ADCQ DX, R11
+
+ // r4 = 2×l0×l4
+ MOVQ (CX), AX
+ SHLQ $0x01, AX
+ MULQ 32(CX)
+ MOVQ AX, R14
+ MOVQ DX, R13
+
+ // r4 += 2×l1×l3
+ MOVQ 8(CX), AX
+ IMUL3Q $0x02, AX, AX
+ MULQ 24(CX)
+ ADDQ AX, R14
+ ADCQ DX, R13
+
+ // r4 += l2×l2
+ MOVQ 16(CX), AX
+ MULQ 16(CX)
+ ADDQ AX, R14
+ ADCQ DX, R13
+
+ // First reduction chain
+ MOVQ $0x0007ffffffffffff, AX
+ SHLQ $0x0d, SI, BX
+ SHLQ $0x0d, R8, DI
+ SHLQ $0x0d, R10, R9
+ SHLQ $0x0d, R12, R11
+ SHLQ $0x0d, R14, R13
+ ANDQ AX, SI
+ IMUL3Q $0x13, R13, R13
+ ADDQ R13, SI
+ ANDQ AX, R8
+ ADDQ BX, R8
+ ANDQ AX, R10
+ ADDQ DI, R10
+ ANDQ AX, R12
+ ADDQ R9, R12
+ ANDQ AX, R14
+ ADDQ R11, R14
+
+ // Second reduction chain (carryPropagate)
+ MOVQ SI, BX
+ SHRQ $0x33, BX
+ MOVQ R8, DI
+ SHRQ $0x33, DI
+ MOVQ R10, R9
+ SHRQ $0x33, R9
+ MOVQ R12, R11
+ SHRQ $0x33, R11
+ MOVQ R14, R13
+ SHRQ $0x33, R13
+ ANDQ AX, SI
+ IMUL3Q $0x13, R13, R13
+ ADDQ R13, SI
+ ANDQ AX, R8
+ ADDQ BX, R8
+ ANDQ AX, R10
+ ADDQ DI, R10
+ ANDQ AX, R12
+ ADDQ R9, R12
+ ANDQ AX, R14
+ ADDQ R11, R14
+
+ // Store output
+ MOVQ out+0(FP), AX
+ MOVQ SI, (AX)
+ MOVQ R8, 8(AX)
+ MOVQ R10, 16(AX)
+ MOVQ R12, 24(AX)
+ MOVQ R14, 32(AX)
+ RET
diff --git a/src/crypto/internal/edwards25519/field/fe_amd64_noasm.go b/src/crypto/internal/edwards25519/field/fe_amd64_noasm.go
new file mode 100644
index 0000000..9da280d
--- /dev/null
+++ b/src/crypto/internal/edwards25519/field/fe_amd64_noasm.go
@@ -0,0 +1,11 @@
+// Copyright (c) 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !amd64 || !gc || purego
+
+package field
+
+func feMul(v, x, y *Element) { feMulGeneric(v, x, y) }
+
+func feSquare(v, x *Element) { feSquareGeneric(v, x) }
diff --git a/src/crypto/internal/edwards25519/field/fe_arm64.go b/src/crypto/internal/edwards25519/field/fe_arm64.go
new file mode 100644
index 0000000..075fe9b
--- /dev/null
+++ b/src/crypto/internal/edwards25519/field/fe_arm64.go
@@ -0,0 +1,15 @@
+// Copyright (c) 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build arm64 && gc && !purego
+
+package field
+
+//go:noescape
+func carryPropagate(v *Element)
+
+func (v *Element) carryPropagate() *Element {
+ carryPropagate(v)
+ return v
+}
diff --git a/src/crypto/internal/edwards25519/field/fe_arm64.s b/src/crypto/internal/edwards25519/field/fe_arm64.s
new file mode 100644
index 0000000..751ab2a
--- /dev/null
+++ b/src/crypto/internal/edwards25519/field/fe_arm64.s
@@ -0,0 +1,42 @@
+// Copyright (c) 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build arm64,gc,!purego
+
+#include "textflag.h"
+
+// carryPropagate works exactly like carryPropagateGeneric and uses the
+// same AND, ADD, and LSR+MADD instructions emitted by the compiler, but
+// avoids loading R0-R4 twice and uses LDP and STP.
+//
+// See https://golang.org/issues/43145 for the main compiler issue.
+//
+// func carryPropagate(v *Element)
+TEXT ·carryPropagate(SB),NOFRAME|NOSPLIT,$0-8
+ MOVD v+0(FP), R20
+
+ LDP 0(R20), (R0, R1)
+ LDP 16(R20), (R2, R3)
+ MOVD 32(R20), R4
+
+ AND $0x7ffffffffffff, R0, R10
+ AND $0x7ffffffffffff, R1, R11
+ AND $0x7ffffffffffff, R2, R12
+ AND $0x7ffffffffffff, R3, R13
+ AND $0x7ffffffffffff, R4, R14
+
+ ADD R0>>51, R11, R11
+ ADD R1>>51, R12, R12
+ ADD R2>>51, R13, R13
+ ADD R3>>51, R14, R14
+ // R4>>51 * 19 + R10 -> R10
+ LSR $51, R4, R21
+ MOVD $19, R22
+ MADD R22, R10, R21, R10
+
+ STP (R10, R11), 0(R20)
+ STP (R12, R13), 16(R20)
+ MOVD R14, 32(R20)
+
+ RET
diff --git a/src/crypto/internal/edwards25519/field/fe_arm64_noasm.go b/src/crypto/internal/edwards25519/field/fe_arm64_noasm.go
new file mode 100644
index 0000000..fc029ac
--- /dev/null
+++ b/src/crypto/internal/edwards25519/field/fe_arm64_noasm.go
@@ -0,0 +1,11 @@
+// Copyright (c) 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !arm64 || !gc || purego
+
+package field
+
+func (v *Element) carryPropagate() *Element {
+ return v.carryPropagateGeneric()
+}
diff --git a/src/crypto/internal/edwards25519/field/fe_bench_test.go b/src/crypto/internal/edwards25519/field/fe_bench_test.go
new file mode 100644
index 0000000..84fdf05
--- /dev/null
+++ b/src/crypto/internal/edwards25519/field/fe_bench_test.go
@@ -0,0 +1,49 @@
+// Copyright (c) 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package field
+
+import "testing"
+
+func BenchmarkAdd(b *testing.B) {
+ x := new(Element).One()
+ y := new(Element).Add(x, x)
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ x.Add(x, y)
+ }
+}
+
+func BenchmarkMultiply(b *testing.B) {
+ x := new(Element).One()
+ y := new(Element).Add(x, x)
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ x.Multiply(x, y)
+ }
+}
+
+func BenchmarkSquare(b *testing.B) {
+ x := new(Element).Add(feOne, feOne)
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ x.Square(x)
+ }
+}
+
+func BenchmarkInvert(b *testing.B) {
+ x := new(Element).Add(feOne, feOne)
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ x.Invert(x)
+ }
+}
+
+func BenchmarkMult32(b *testing.B) {
+ x := new(Element).One()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ x.Mult32(x, 0xaa42aa42)
+ }
+}
diff --git a/src/crypto/internal/edwards25519/field/fe_generic.go b/src/crypto/internal/edwards25519/field/fe_generic.go
new file mode 100644
index 0000000..d6667b2
--- /dev/null
+++ b/src/crypto/internal/edwards25519/field/fe_generic.go
@@ -0,0 +1,266 @@
+// Copyright (c) 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package field
+
+import "math/bits"
+
+// uint128 holds a 128-bit number as two 64-bit limbs, for use with the
+// bits.Mul64 and bits.Add64 intrinsics.
+type uint128 struct {
+ lo, hi uint64
+}
+
+// mul64 returns a * b.
+func mul64(a, b uint64) uint128 {
+ hi, lo := bits.Mul64(a, b)
+ return uint128{lo, hi}
+}
+
+// addMul64 returns v + a * b.
+func addMul64(v uint128, a, b uint64) uint128 {
+ hi, lo := bits.Mul64(a, b)
+ lo, c := bits.Add64(lo, v.lo, 0)
+ hi, _ = bits.Add64(hi, v.hi, c)
+ return uint128{lo, hi}
+}
+
+// shiftRightBy51 returns a >> 51. a is assumed to be at most 115 bits.
+func shiftRightBy51(a uint128) uint64 {
+ return (a.hi << (64 - 51)) | (a.lo >> 51)
+}
+
+func feMulGeneric(v, a, b *Element) {
+ a0 := a.l0
+ a1 := a.l1
+ a2 := a.l2
+ a3 := a.l3
+ a4 := a.l4
+
+ b0 := b.l0
+ b1 := b.l1
+ b2 := b.l2
+ b3 := b.l3
+ b4 := b.l4
+
+ // Limb multiplication works like pen-and-paper columnar multiplication, but
+ // with 51-bit limbs instead of digits.
+ //
+ // a4 a3 a2 a1 a0 x
+ // b4 b3 b2 b1 b0 =
+ // ------------------------
+ // a4b0 a3b0 a2b0 a1b0 a0b0 +
+ // a4b1 a3b1 a2b1 a1b1 a0b1 +
+ // a4b2 a3b2 a2b2 a1b2 a0b2 +
+ // a4b3 a3b3 a2b3 a1b3 a0b3 +
+ // a4b4 a3b4 a2b4 a1b4 a0b4 =
+ // ----------------------------------------------
+ // r8 r7 r6 r5 r4 r3 r2 r1 r0
+ //
+ // We can then use the reduction identity (a * 2²⁵⁵ + b = a * 19 + b) to
+ // reduce the limbs that would overflow 255 bits. r5 * 2²⁵⁵ becomes 19 * r5,
+ // r6 * 2³⁰⁶ becomes 19 * r6 * 2⁵¹, etc.
+ //
+ // Reduction can be carried out simultaneously to multiplication. For
+ // example, we do not compute r5: whenever the result of a multiplication
+ // belongs to r5, like a1b4, we multiply it by 19 and add the result to r0.
+ //
+ // a4b0 a3b0 a2b0 a1b0 a0b0 +
+ // a3b1 a2b1 a1b1 a0b1 19×a4b1 +
+ // a2b2 a1b2 a0b2 19×a4b2 19×a3b2 +
+ // a1b3 a0b3 19×a4b3 19×a3b3 19×a2b3 +
+ // a0b4 19×a4b4 19×a3b4 19×a2b4 19×a1b4 =
+ // --------------------------------------
+ // r4 r3 r2 r1 r0
+ //
+ // Finally we add up the columns into wide, overlapping limbs.
+
+ a1_19 := a1 * 19
+ a2_19 := a2 * 19
+ a3_19 := a3 * 19
+ a4_19 := a4 * 19
+
+ // r0 = a0×b0 + 19×(a1×b4 + a2×b3 + a3×b2 + a4×b1)
+ r0 := mul64(a0, b0)
+ r0 = addMul64(r0, a1_19, b4)
+ r0 = addMul64(r0, a2_19, b3)
+ r0 = addMul64(r0, a3_19, b2)
+ r0 = addMul64(r0, a4_19, b1)
+
+ // r1 = a0×b1 + a1×b0 + 19×(a2×b4 + a3×b3 + a4×b2)
+ r1 := mul64(a0, b1)
+ r1 = addMul64(r1, a1, b0)
+ r1 = addMul64(r1, a2_19, b4)
+ r1 = addMul64(r1, a3_19, b3)
+ r1 = addMul64(r1, a4_19, b2)
+
+ // r2 = a0×b2 + a1×b1 + a2×b0 + 19×(a3×b4 + a4×b3)
+ r2 := mul64(a0, b2)
+ r2 = addMul64(r2, a1, b1)
+ r2 = addMul64(r2, a2, b0)
+ r2 = addMul64(r2, a3_19, b4)
+ r2 = addMul64(r2, a4_19, b3)
+
+ // r3 = a0×b3 + a1×b2 + a2×b1 + a3×b0 + 19×a4×b4
+ r3 := mul64(a0, b3)
+ r3 = addMul64(r3, a1, b2)
+ r3 = addMul64(r3, a2, b1)
+ r3 = addMul64(r3, a3, b0)
+ r3 = addMul64(r3, a4_19, b4)
+
+ // r4 = a0×b4 + a1×b3 + a2×b2 + a3×b1 + a4×b0
+ r4 := mul64(a0, b4)
+ r4 = addMul64(r4, a1, b3)
+ r4 = addMul64(r4, a2, b2)
+ r4 = addMul64(r4, a3, b1)
+ r4 = addMul64(r4, a4, b0)
+
+ // After the multiplication, we need to reduce (carry) the five coefficients
+ // to obtain a result with limbs that are at most slightly larger than 2⁵¹,
+ // to respect the Element invariant.
+ //
+ // Overall, the reduction works the same as carryPropagate, except with
+ // wider inputs: we take the carry for each coefficient by shifting it right
+ // by 51, and add it to the limb above it. The top carry is multiplied by 19
+ // according to the reduction identity and added to the lowest limb.
+ //
+ // The largest coefficient (r0) will be at most 111 bits, which guarantees
+ // that all carries are at most 111 - 51 = 60 bits, which fits in a uint64.
+ //
+ // r0 = a0×b0 + 19×(a1×b4 + a2×b3 + a3×b2 + a4×b1)
+ // r0 < 2⁵²×2⁵² + 19×(2⁵²×2⁵² + 2⁵²×2⁵² + 2⁵²×2⁵² + 2⁵²×2⁵²)
+ // r0 < (1 + 19 × 4) × 2⁵² × 2⁵²
+ // r0 < 2⁷ × 2⁵² × 2⁵²
+ // r0 < 2¹¹¹
+ //
+ // Moreover, the top coefficient (r4) is at most 107 bits, so c4 is at most
+ // 56 bits, and c4 * 19 is at most 61 bits, which again fits in a uint64 and
+ // allows us to easily apply the reduction identity.
+ //
+ // r4 = a0×b4 + a1×b3 + a2×b2 + a3×b1 + a4×b0
+ // r4 < 5 × 2⁵² × 2⁵²
+ // r4 < 2¹⁰⁷
+ //
+
+ c0 := shiftRightBy51(r0)
+ c1 := shiftRightBy51(r1)
+ c2 := shiftRightBy51(r2)
+ c3 := shiftRightBy51(r3)
+ c4 := shiftRightBy51(r4)
+
+ rr0 := r0.lo&maskLow51Bits + c4*19
+ rr1 := r1.lo&maskLow51Bits + c0
+ rr2 := r2.lo&maskLow51Bits + c1
+ rr3 := r3.lo&maskLow51Bits + c2
+ rr4 := r4.lo&maskLow51Bits + c3
+
+ // Now all coefficients fit into 64-bit registers but are still too large to
+ // be passed around as a Element. We therefore do one last carry chain,
+ // where the carries will be small enough to fit in the wiggle room above 2⁵¹.
+ *v = Element{rr0, rr1, rr2, rr3, rr4}
+ v.carryPropagate()
+}
+
+func feSquareGeneric(v, a *Element) {
+ l0 := a.l0
+ l1 := a.l1
+ l2 := a.l2
+ l3 := a.l3
+ l4 := a.l4
+
+ // Squaring works precisely like multiplication above, but thanks to its
+ // symmetry we get to group a few terms together.
+ //
+ // l4 l3 l2 l1 l0 x
+ // l4 l3 l2 l1 l0 =
+ // ------------------------
+ // l4l0 l3l0 l2l0 l1l0 l0l0 +
+ // l4l1 l3l1 l2l1 l1l1 l0l1 +
+ // l4l2 l3l2 l2l2 l1l2 l0l2 +
+ // l4l3 l3l3 l2l3 l1l3 l0l3 +
+ // l4l4 l3l4 l2l4 l1l4 l0l4 =
+ // ----------------------------------------------
+ // r8 r7 r6 r5 r4 r3 r2 r1 r0
+ //
+ // l4l0 l3l0 l2l0 l1l0 l0l0 +
+ // l3l1 l2l1 l1l1 l0l1 19×l4l1 +
+ // l2l2 l1l2 l0l2 19×l4l2 19×l3l2 +
+ // l1l3 l0l3 19×l4l3 19×l3l3 19×l2l3 +
+ // l0l4 19×l4l4 19×l3l4 19×l2l4 19×l1l4 =
+ // --------------------------------------
+ // r4 r3 r2 r1 r0
+ //
+ // With precomputed 2×, 19×, and 2×19× terms, we can compute each limb with
+ // only three Mul64 and four Add64, instead of five and eight.
+
+ l0_2 := l0 * 2
+ l1_2 := l1 * 2
+
+ l1_38 := l1 * 38
+ l2_38 := l2 * 38
+ l3_38 := l3 * 38
+
+ l3_19 := l3 * 19
+ l4_19 := l4 * 19
+
+ // r0 = l0×l0 + 19×(l1×l4 + l2×l3 + l3×l2 + l4×l1) = l0×l0 + 19×2×(l1×l4 + l2×l3)
+ r0 := mul64(l0, l0)
+ r0 = addMul64(r0, l1_38, l4)
+ r0 = addMul64(r0, l2_38, l3)
+
+ // r1 = l0×l1 + l1×l0 + 19×(l2×l4 + l3×l3 + l4×l2) = 2×l0×l1 + 19×2×l2×l4 + 19×l3×l3
+ r1 := mul64(l0_2, l1)
+ r1 = addMul64(r1, l2_38, l4)
+ r1 = addMul64(r1, l3_19, l3)
+
+ // r2 = l0×l2 + l1×l1 + l2×l0 + 19×(l3×l4 + l4×l3) = 2×l0×l2 + l1×l1 + 19×2×l3×l4
+ r2 := mul64(l0_2, l2)
+ r2 = addMul64(r2, l1, l1)
+ r2 = addMul64(r2, l3_38, l4)
+
+ // r3 = l0×l3 + l1×l2 + l2×l1 + l3×l0 + 19×l4×l4 = 2×l0×l3 + 2×l1×l2 + 19×l4×l4
+ r3 := mul64(l0_2, l3)
+ r3 = addMul64(r3, l1_2, l2)
+ r3 = addMul64(r3, l4_19, l4)
+
+ // r4 = l0×l4 + l1×l3 + l2×l2 + l3×l1 + l4×l0 = 2×l0×l4 + 2×l1×l3 + l2×l2
+ r4 := mul64(l0_2, l4)
+ r4 = addMul64(r4, l1_2, l3)
+ r4 = addMul64(r4, l2, l2)
+
+ c0 := shiftRightBy51(r0)
+ c1 := shiftRightBy51(r1)
+ c2 := shiftRightBy51(r2)
+ c3 := shiftRightBy51(r3)
+ c4 := shiftRightBy51(r4)
+
+ rr0 := r0.lo&maskLow51Bits + c4*19
+ rr1 := r1.lo&maskLow51Bits + c0
+ rr2 := r2.lo&maskLow51Bits + c1
+ rr3 := r3.lo&maskLow51Bits + c2
+ rr4 := r4.lo&maskLow51Bits + c3
+
+ *v = Element{rr0, rr1, rr2, rr3, rr4}
+ v.carryPropagate()
+}
+
+// carryPropagate brings the limbs below 52 bits by applying the reduction
+// identity (a * 2²⁵⁵ + b = a * 19 + b) to the l4 carry.
+func (v *Element) carryPropagateGeneric() *Element {
+ c0 := v.l0 >> 51
+ c1 := v.l1 >> 51
+ c2 := v.l2 >> 51
+ c3 := v.l3 >> 51
+ c4 := v.l4 >> 51
+
+ // c4 is at most 64 - 51 = 13 bits, so c4*19 is at most 18 bits, and
+ // the final l0 will be at most 52 bits. Similarly for the rest.
+ v.l0 = v.l0&maskLow51Bits + c4*19
+ v.l1 = v.l1&maskLow51Bits + c0
+ v.l2 = v.l2&maskLow51Bits + c1
+ v.l3 = v.l3&maskLow51Bits + c2
+ v.l4 = v.l4&maskLow51Bits + c3
+
+ return v
+}
diff --git a/src/crypto/internal/edwards25519/field/fe_test.go b/src/crypto/internal/edwards25519/field/fe_test.go
new file mode 100644
index 0000000..945a024
--- /dev/null
+++ b/src/crypto/internal/edwards25519/field/fe_test.go
@@ -0,0 +1,560 @@
+// Copyright (c) 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package field
+
+import (
+ "bytes"
+ "crypto/rand"
+ "encoding/hex"
+ "io"
+ "math/big"
+ "math/bits"
+ mathrand "math/rand"
+ "reflect"
+ "testing"
+ "testing/quick"
+)
+
+func (v Element) String() string {
+ return hex.EncodeToString(v.Bytes())
+}
+
+// quickCheckConfig1024 will make each quickcheck test run (1024 * -quickchecks)
+// times. The default value of -quickchecks is 100.
+var quickCheckConfig1024 = &quick.Config{MaxCountScale: 1 << 10}
+
+func generateFieldElement(rand *mathrand.Rand) Element {
+ const maskLow52Bits = (1 << 52) - 1
+ return Element{
+ rand.Uint64() & maskLow52Bits,
+ rand.Uint64() & maskLow52Bits,
+ rand.Uint64() & maskLow52Bits,
+ rand.Uint64() & maskLow52Bits,
+ rand.Uint64() & maskLow52Bits,
+ }
+}
+
+// weirdLimbs can be combined to generate a range of edge-case field elements.
+// 0 and -1 are intentionally more weighted, as they combine well.
+var (
+ weirdLimbs51 = []uint64{
+ 0, 0, 0, 0,
+ 1,
+ 19 - 1,
+ 19,
+ 0x2aaaaaaaaaaaa,
+ 0x5555555555555,
+ (1 << 51) - 20,
+ (1 << 51) - 19,
+ (1 << 51) - 1, (1 << 51) - 1,
+ (1 << 51) - 1, (1 << 51) - 1,
+ }
+ weirdLimbs52 = []uint64{
+ 0, 0, 0, 0, 0, 0,
+ 1,
+ 19 - 1,
+ 19,
+ 0x2aaaaaaaaaaaa,
+ 0x5555555555555,
+ (1 << 51) - 20,
+ (1 << 51) - 19,
+ (1 << 51) - 1, (1 << 51) - 1,
+ (1 << 51) - 1, (1 << 51) - 1,
+ (1 << 51) - 1, (1 << 51) - 1,
+ 1 << 51,
+ (1 << 51) + 1,
+ (1 << 52) - 19,
+ (1 << 52) - 1,
+ }
+)
+
+func generateWeirdFieldElement(rand *mathrand.Rand) Element {
+ return Element{
+ weirdLimbs52[rand.Intn(len(weirdLimbs52))],
+ weirdLimbs51[rand.Intn(len(weirdLimbs51))],
+ weirdLimbs51[rand.Intn(len(weirdLimbs51))],
+ weirdLimbs51[rand.Intn(len(weirdLimbs51))],
+ weirdLimbs51[rand.Intn(len(weirdLimbs51))],
+ }
+}
+
+func (Element) Generate(rand *mathrand.Rand, size int) reflect.Value {
+ if rand.Intn(2) == 0 {
+ return reflect.ValueOf(generateWeirdFieldElement(rand))
+ }
+ return reflect.ValueOf(generateFieldElement(rand))
+}
+
+// isInBounds returns whether the element is within the expected bit size bounds
+// after a light reduction.
+func isInBounds(x *Element) bool {
+ return bits.Len64(x.l0) <= 52 &&
+ bits.Len64(x.l1) <= 52 &&
+ bits.Len64(x.l2) <= 52 &&
+ bits.Len64(x.l3) <= 52 &&
+ bits.Len64(x.l4) <= 52
+}
+
+func TestMultiplyDistributesOverAdd(t *testing.T) {
+ multiplyDistributesOverAdd := func(x, y, z Element) bool {
+ // Compute t1 = (x+y)*z
+ t1 := new(Element)
+ t1.Add(&x, &y)
+ t1.Multiply(t1, &z)
+
+ // Compute t2 = x*z + y*z
+ t2 := new(Element)
+ t3 := new(Element)
+ t2.Multiply(&x, &z)
+ t3.Multiply(&y, &z)
+ t2.Add(t2, t3)
+
+ return t1.Equal(t2) == 1 && isInBounds(t1) && isInBounds(t2)
+ }
+
+ if err := quick.Check(multiplyDistributesOverAdd, quickCheckConfig1024); err != nil {
+ t.Error(err)
+ }
+}
+
+func TestMul64to128(t *testing.T) {
+ a := uint64(5)
+ b := uint64(5)
+ r := mul64(a, b)
+ if r.lo != 0x19 || r.hi != 0 {
+ t.Errorf("lo-range wide mult failed, got %d + %d*(2**64)", r.lo, r.hi)
+ }
+
+ a = uint64(18014398509481983) // 2^54 - 1
+ b = uint64(18014398509481983) // 2^54 - 1
+ r = mul64(a, b)
+ if r.lo != 0xff80000000000001 || r.hi != 0xfffffffffff {
+ t.Errorf("hi-range wide mult failed, got %d + %d*(2**64)", r.lo, r.hi)
+ }
+
+ a = uint64(1125899906842661)
+ b = uint64(2097155)
+ r = mul64(a, b)
+ r = addMul64(r, a, b)
+ r = addMul64(r, a, b)
+ r = addMul64(r, a, b)
+ r = addMul64(r, a, b)
+ if r.lo != 16888498990613035 || r.hi != 640 {
+ t.Errorf("wrong answer: %d + %d*(2**64)", r.lo, r.hi)
+ }
+}
+
+func TestSetBytesRoundTrip(t *testing.T) {
+ f1 := func(in [32]byte, fe Element) bool {
+ fe.SetBytes(in[:])
+
+ // Mask the most significant bit as it's ignored by SetBytes. (Now
+ // instead of earlier so we check the masking in SetBytes is working.)
+ in[len(in)-1] &= (1 << 7) - 1
+
+ return bytes.Equal(in[:], fe.Bytes()) && isInBounds(&fe)
+ }
+ if err := quick.Check(f1, nil); err != nil {
+ t.Errorf("failed bytes->FE->bytes round-trip: %v", err)
+ }
+
+ f2 := func(fe, r Element) bool {
+ r.SetBytes(fe.Bytes())
+
+ // Intentionally not using Equal not to go through Bytes again.
+ // Calling reduce because both Generate and SetBytes can produce
+ // non-canonical representations.
+ fe.reduce()
+ r.reduce()
+ return fe == r
+ }
+ if err := quick.Check(f2, nil); err != nil {
+ t.Errorf("failed FE->bytes->FE round-trip: %v", err)
+ }
+
+ // Check some fixed vectors from dalek
+ type feRTTest struct {
+ fe Element
+ b []byte
+ }
+ var tests = []feRTTest{
+ {
+ fe: Element{358744748052810, 1691584618240980, 977650209285361, 1429865912637724, 560044844278676},
+ b: []byte{74, 209, 69, 197, 70, 70, 161, 222, 56, 226, 229, 19, 112, 60, 25, 92, 187, 74, 222, 56, 50, 153, 51, 233, 40, 74, 57, 6, 160, 185, 213, 31},
+ },
+ {
+ fe: Element{84926274344903, 473620666599931, 365590438845504, 1028470286882429, 2146499180330972},
+ b: []byte{199, 23, 106, 112, 61, 77, 216, 79, 186, 60, 11, 118, 13, 16, 103, 15, 42, 32, 83, 250, 44, 57, 204, 198, 78, 199, 253, 119, 146, 172, 3, 122},
+ },
+ }
+
+ for _, tt := range tests {
+ b := tt.fe.Bytes()
+ fe, _ := new(Element).SetBytes(tt.b)
+ if !bytes.Equal(b, tt.b) || fe.Equal(&tt.fe) != 1 {
+ t.Errorf("Failed fixed roundtrip: %v", tt)
+ }
+ }
+}
+
+func swapEndianness(buf []byte) []byte {
+ for i := 0; i < len(buf)/2; i++ {
+ buf[i], buf[len(buf)-i-1] = buf[len(buf)-i-1], buf[i]
+ }
+ return buf
+}
+
+func TestBytesBigEquivalence(t *testing.T) {
+ f1 := func(in [32]byte, fe, fe1 Element) bool {
+ fe.SetBytes(in[:])
+
+ in[len(in)-1] &= (1 << 7) - 1 // mask the most significant bit
+ b := new(big.Int).SetBytes(swapEndianness(in[:]))
+ fe1.fromBig(b)
+
+ if fe != fe1 {
+ return false
+ }
+
+ buf := make([]byte, 32)
+ buf = swapEndianness(fe1.toBig().FillBytes(buf))
+
+ return bytes.Equal(fe.Bytes(), buf) && isInBounds(&fe) && isInBounds(&fe1)
+ }
+ if err := quick.Check(f1, nil); err != nil {
+ t.Error(err)
+ }
+}
+
+// fromBig sets v = n, and returns v. The bit length of n must not exceed 256.
+func (v *Element) fromBig(n *big.Int) *Element {
+ if n.BitLen() > 32*8 {
+ panic("edwards25519: invalid field element input size")
+ }
+
+ buf := make([]byte, 0, 32)
+ for _, word := range n.Bits() {
+ for i := 0; i < bits.UintSize; i += 8 {
+ if len(buf) >= cap(buf) {
+ break
+ }
+ buf = append(buf, byte(word))
+ word >>= 8
+ }
+ }
+
+ v.SetBytes(buf[:32])
+ return v
+}
+
+func (v *Element) fromDecimal(s string) *Element {
+ n, ok := new(big.Int).SetString(s, 10)
+ if !ok {
+ panic("not a valid decimal: " + s)
+ }
+ return v.fromBig(n)
+}
+
+// toBig returns v as a big.Int.
+func (v *Element) toBig() *big.Int {
+ buf := v.Bytes()
+
+ words := make([]big.Word, 32*8/bits.UintSize)
+ for n := range words {
+ for i := 0; i < bits.UintSize; i += 8 {
+ if len(buf) == 0 {
+ break
+ }
+ words[n] |= big.Word(buf[0]) << big.Word(i)
+ buf = buf[1:]
+ }
+ }
+
+ return new(big.Int).SetBits(words)
+}
+
+func TestDecimalConstants(t *testing.T) {
+ sqrtM1String := "19681161376707505956807079304988542015446066515923890162744021073123829784752"
+ if exp := new(Element).fromDecimal(sqrtM1String); sqrtM1.Equal(exp) != 1 {
+ t.Errorf("sqrtM1 is %v, expected %v", sqrtM1, exp)
+ }
+ // d is in the parent package, and we don't want to expose d or fromDecimal.
+ // dString := "37095705934669439343138083508754565189542113879843219016388785533085940283555"
+ // if exp := new(Element).fromDecimal(dString); d.Equal(exp) != 1 {
+ // t.Errorf("d is %v, expected %v", d, exp)
+ // }
+}
+
+func TestSetBytesRoundTripEdgeCases(t *testing.T) {
+ // TODO: values close to 0, close to 2^255-19, between 2^255-19 and 2^255-1,
+ // and between 2^255 and 2^256-1. Test both the documented SetBytes
+ // behavior, and that Bytes reduces them.
+}
+
+// Tests self-consistency between Multiply and Square.
+func TestConsistency(t *testing.T) {
+ var x Element
+ var x2, x2sq Element
+
+ x = Element{1, 1, 1, 1, 1}
+ x2.Multiply(&x, &x)
+ x2sq.Square(&x)
+
+ if x2 != x2sq {
+ t.Fatalf("all ones failed\nmul: %x\nsqr: %x\n", x2, x2sq)
+ }
+
+ var bytes [32]byte
+
+ _, err := io.ReadFull(rand.Reader, bytes[:])
+ if err != nil {
+ t.Fatal(err)
+ }
+ x.SetBytes(bytes[:])
+
+ x2.Multiply(&x, &x)
+ x2sq.Square(&x)
+
+ if x2 != x2sq {
+ t.Fatalf("all ones failed\nmul: %x\nsqr: %x\n", x2, x2sq)
+ }
+}
+
+func TestEqual(t *testing.T) {
+ x := Element{1, 1, 1, 1, 1}
+ y := Element{5, 4, 3, 2, 1}
+
+ eq := x.Equal(&x)
+ if eq != 1 {
+ t.Errorf("wrong about equality")
+ }
+
+ eq = x.Equal(&y)
+ if eq != 0 {
+ t.Errorf("wrong about inequality")
+ }
+}
+
+func TestInvert(t *testing.T) {
+ x := Element{1, 1, 1, 1, 1}
+ one := Element{1, 0, 0, 0, 0}
+ var xinv, r Element
+
+ xinv.Invert(&x)
+ r.Multiply(&x, &xinv)
+ r.reduce()
+
+ if one != r {
+ t.Errorf("inversion identity failed, got: %x", r)
+ }
+
+ var bytes [32]byte
+
+ _, err := io.ReadFull(rand.Reader, bytes[:])
+ if err != nil {
+ t.Fatal(err)
+ }
+ x.SetBytes(bytes[:])
+
+ xinv.Invert(&x)
+ r.Multiply(&x, &xinv)
+ r.reduce()
+
+ if one != r {
+ t.Errorf("random inversion identity failed, got: %x for field element %x", r, x)
+ }
+
+ zero := Element{}
+ x.Set(&zero)
+ if xx := xinv.Invert(&x); xx != &xinv {
+ t.Errorf("inverting zero did not return the receiver")
+ } else if xinv.Equal(&zero) != 1 {
+ t.Errorf("inverting zero did not return zero")
+ }
+}
+
+func TestSelectSwap(t *testing.T) {
+ a := Element{358744748052810, 1691584618240980, 977650209285361, 1429865912637724, 560044844278676}
+ b := Element{84926274344903, 473620666599931, 365590438845504, 1028470286882429, 2146499180330972}
+
+ var c, d Element
+
+ c.Select(&a, &b, 1)
+ d.Select(&a, &b, 0)
+
+ if c.Equal(&a) != 1 || d.Equal(&b) != 1 {
+ t.Errorf("Select failed")
+ }
+
+ c.Swap(&d, 0)
+
+ if c.Equal(&a) != 1 || d.Equal(&b) != 1 {
+ t.Errorf("Swap failed")
+ }
+
+ c.Swap(&d, 1)
+
+ if c.Equal(&b) != 1 || d.Equal(&a) != 1 {
+ t.Errorf("Swap failed")
+ }
+}
+
+func TestMult32(t *testing.T) {
+ mult32EquivalentToMul := func(x Element, y uint32) bool {
+ t1 := new(Element)
+ for i := 0; i < 100; i++ {
+ t1.Mult32(&x, y)
+ }
+
+ ty := new(Element)
+ ty.l0 = uint64(y)
+
+ t2 := new(Element)
+ for i := 0; i < 100; i++ {
+ t2.Multiply(&x, ty)
+ }
+
+ return t1.Equal(t2) == 1 && isInBounds(t1) && isInBounds(t2)
+ }
+
+ if err := quick.Check(mult32EquivalentToMul, quickCheckConfig1024); err != nil {
+ t.Error(err)
+ }
+}
+
+func TestSqrtRatio(t *testing.T) {
+ // From draft-irtf-cfrg-ristretto255-decaf448-00, Appendix A.4.
+ type test struct {
+ u, v string
+ wasSquare int
+ r string
+ }
+ var tests = []test{
+ // If u is 0, the function is defined to return (0, TRUE), even if v
+ // is zero. Note that where used in this package, the denominator v
+ // is never zero.
+ {
+ "0000000000000000000000000000000000000000000000000000000000000000",
+ "0000000000000000000000000000000000000000000000000000000000000000",
+ 1, "0000000000000000000000000000000000000000000000000000000000000000",
+ },
+ // 0/1 == 0²
+ {
+ "0000000000000000000000000000000000000000000000000000000000000000",
+ "0100000000000000000000000000000000000000000000000000000000000000",
+ 1, "0000000000000000000000000000000000000000000000000000000000000000",
+ },
+ // If u is non-zero and v is zero, defined to return (0, FALSE).
+ {
+ "0100000000000000000000000000000000000000000000000000000000000000",
+ "0000000000000000000000000000000000000000000000000000000000000000",
+ 0, "0000000000000000000000000000000000000000000000000000000000000000",
+ },
+ // 2/1 is not square in this field.
+ {
+ "0200000000000000000000000000000000000000000000000000000000000000",
+ "0100000000000000000000000000000000000000000000000000000000000000",
+ 0, "3c5ff1b5d8e4113b871bd052f9e7bcd0582804c266ffb2d4f4203eb07fdb7c54",
+ },
+ // 4/1 == 2²
+ {
+ "0400000000000000000000000000000000000000000000000000000000000000",
+ "0100000000000000000000000000000000000000000000000000000000000000",
+ 1, "0200000000000000000000000000000000000000000000000000000000000000",
+ },
+ // 1/4 == (2⁻¹)² == (2^(p-2))² per Euler's theorem
+ {
+ "0100000000000000000000000000000000000000000000000000000000000000",
+ "0400000000000000000000000000000000000000000000000000000000000000",
+ 1, "f6ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff3f",
+ },
+ }
+
+ for i, tt := range tests {
+ u, _ := new(Element).SetBytes(decodeHex(tt.u))
+ v, _ := new(Element).SetBytes(decodeHex(tt.v))
+ want, _ := new(Element).SetBytes(decodeHex(tt.r))
+ got, wasSquare := new(Element).SqrtRatio(u, v)
+ if got.Equal(want) == 0 || wasSquare != tt.wasSquare {
+ t.Errorf("%d: got (%v, %v), want (%v, %v)", i, got, wasSquare, want, tt.wasSquare)
+ }
+ }
+}
+
+func TestCarryPropagate(t *testing.T) {
+ asmLikeGeneric := func(a [5]uint64) bool {
+ t1 := &Element{a[0], a[1], a[2], a[3], a[4]}
+ t2 := &Element{a[0], a[1], a[2], a[3], a[4]}
+
+ t1.carryPropagate()
+ t2.carryPropagateGeneric()
+
+ if *t1 != *t2 {
+ t.Logf("got: %#v,\nexpected: %#v", t1, t2)
+ }
+
+ return *t1 == *t2 && isInBounds(t2)
+ }
+
+ if err := quick.Check(asmLikeGeneric, quickCheckConfig1024); err != nil {
+ t.Error(err)
+ }
+
+ if !asmLikeGeneric([5]uint64{0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff}) {
+ t.Errorf("failed for {0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff}")
+ }
+}
+
+func TestFeSquare(t *testing.T) {
+ asmLikeGeneric := func(a Element) bool {
+ t1 := a
+ t2 := a
+
+ feSquareGeneric(&t1, &t1)
+ feSquare(&t2, &t2)
+
+ if t1 != t2 {
+ t.Logf("got: %#v,\nexpected: %#v", t1, t2)
+ }
+
+ return t1 == t2 && isInBounds(&t2)
+ }
+
+ if err := quick.Check(asmLikeGeneric, quickCheckConfig1024); err != nil {
+ t.Error(err)
+ }
+}
+
+func TestFeMul(t *testing.T) {
+ asmLikeGeneric := func(a, b Element) bool {
+ a1 := a
+ a2 := a
+ b1 := b
+ b2 := b
+
+ feMulGeneric(&a1, &a1, &b1)
+ feMul(&a2, &a2, &b2)
+
+ if a1 != a2 || b1 != b2 {
+ t.Logf("got: %#v,\nexpected: %#v", a1, a2)
+ t.Logf("got: %#v,\nexpected: %#v", b1, b2)
+ }
+
+ return a1 == a2 && isInBounds(&a2) &&
+ b1 == b2 && isInBounds(&b2)
+ }
+
+ if err := quick.Check(asmLikeGeneric, quickCheckConfig1024); err != nil {
+ t.Error(err)
+ }
+}
+
+func decodeHex(s string) []byte {
+ b, err := hex.DecodeString(s)
+ if err != nil {
+ panic(err)
+ }
+ return b
+}
diff --git a/src/crypto/internal/edwards25519/scalar.go b/src/crypto/internal/edwards25519/scalar.go
new file mode 100644
index 0000000..d34ecea
--- /dev/null
+++ b/src/crypto/internal/edwards25519/scalar.go
@@ -0,0 +1,343 @@
+// Copyright (c) 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package edwards25519
+
+import (
+ "encoding/binary"
+ "errors"
+)
+
+// A Scalar is an integer modulo
+//
+// l = 2^252 + 27742317777372353535851937790883648493
+//
+// which is the prime order of the edwards25519 group.
+//
+// This type works similarly to math/big.Int, and all arguments and
+// receivers are allowed to alias.
+//
+// The zero value is a valid zero element.
+type Scalar struct {
+ // s is the scalar in the Montgomery domain, in the format of the
+ // fiat-crypto implementation.
+ s fiatScalarMontgomeryDomainFieldElement
+}
+
+// The field implementation in scalar_fiat.go is generated by the fiat-crypto
+// project (https://github.com/mit-plv/fiat-crypto) at version v0.0.9 (23d2dbc)
+// from a formally verified model.
+//
+// fiat-crypto code comes under the following license.
+//
+// Copyright (c) 2015-2020 The fiat-crypto Authors. All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// THIS SOFTWARE IS PROVIDED BY the fiat-crypto authors "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+// THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL Berkeley Software Design,
+// Inc. BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+
+// NewScalar returns a new zero Scalar.
+func NewScalar() *Scalar {
+ return &Scalar{}
+}
+
+// MultiplyAdd sets s = x * y + z mod l, and returns s. It is equivalent to
+// using Multiply and then Add.
+func (s *Scalar) MultiplyAdd(x, y, z *Scalar) *Scalar {
+ // Make a copy of z in case it aliases s.
+ zCopy := new(Scalar).Set(z)
+ return s.Multiply(x, y).Add(s, zCopy)
+}
+
+// Add sets s = x + y mod l, and returns s.
+func (s *Scalar) Add(x, y *Scalar) *Scalar {
+ // s = 1 * x + y mod l
+ fiatScalarAdd(&s.s, &x.s, &y.s)
+ return s
+}
+
+// Subtract sets s = x - y mod l, and returns s.
+func (s *Scalar) Subtract(x, y *Scalar) *Scalar {
+ // s = -1 * y + x mod l
+ fiatScalarSub(&s.s, &x.s, &y.s)
+ return s
+}
+
+// Negate sets s = -x mod l, and returns s.
+func (s *Scalar) Negate(x *Scalar) *Scalar {
+ // s = -1 * x + 0 mod l
+ fiatScalarOpp(&s.s, &x.s)
+ return s
+}
+
+// Multiply sets s = x * y mod l, and returns s.
+func (s *Scalar) Multiply(x, y *Scalar) *Scalar {
+ // s = x * y + 0 mod l
+ fiatScalarMul(&s.s, &x.s, &y.s)
+ return s
+}
+
+// Set sets s = x, and returns s.
+func (s *Scalar) Set(x *Scalar) *Scalar {
+ *s = *x
+ return s
+}
+
+// SetUniformBytes sets s = x mod l, where x is a 64-byte little-endian integer.
+// If x is not of the right length, SetUniformBytes returns nil and an error,
+// and the receiver is unchanged.
+//
+// SetUniformBytes can be used to set s to an uniformly distributed value given
+// 64 uniformly distributed random bytes.
+func (s *Scalar) SetUniformBytes(x []byte) (*Scalar, error) {
+ if len(x) != 64 {
+ return nil, errors.New("edwards25519: invalid SetUniformBytes input length")
+ }
+
+ // We have a value x of 512 bits, but our fiatScalarFromBytes function
+ // expects an input lower than l, which is a little over 252 bits.
+ //
+ // Instead of writing a reduction function that operates on wider inputs, we
+ // can interpret x as the sum of three shorter values a, b, and c.
+ //
+ // x = a + b * 2^168 + c * 2^336 mod l
+ //
+ // We then precompute 2^168 and 2^336 modulo l, and perform the reduction
+ // with two multiplications and two additions.
+
+ s.setShortBytes(x[:21])
+ t := new(Scalar).setShortBytes(x[21:42])
+ s.Add(s, t.Multiply(t, scalarTwo168))
+ t.setShortBytes(x[42:])
+ s.Add(s, t.Multiply(t, scalarTwo336))
+
+ return s, nil
+}
+
+// scalarTwo168 and scalarTwo336 are 2^168 and 2^336 modulo l, encoded as a
+// fiatScalarMontgomeryDomainFieldElement, which is a little-endian 4-limb value
+// in the 2^256 Montgomery domain.
+var scalarTwo168 = &Scalar{s: [4]uint64{0x5b8ab432eac74798, 0x38afddd6de59d5d7,
+ 0xa2c131b399411b7c, 0x6329a7ed9ce5a30}}
+var scalarTwo336 = &Scalar{s: [4]uint64{0xbd3d108e2b35ecc5, 0x5c3a3718bdf9c90b,
+ 0x63aa97a331b4f2ee, 0x3d217f5be65cb5c}}
+
+// setShortBytes sets s = x mod l, where x is a little-endian integer shorter
+// than 32 bytes.
+func (s *Scalar) setShortBytes(x []byte) *Scalar {
+ if len(x) >= 32 {
+ panic("edwards25519: internal error: setShortBytes called with a long string")
+ }
+ var buf [32]byte
+ copy(buf[:], x)
+ fiatScalarFromBytes((*[4]uint64)(&s.s), &buf)
+ fiatScalarToMontgomery(&s.s, (*fiatScalarNonMontgomeryDomainFieldElement)(&s.s))
+ return s
+}
+
+// SetCanonicalBytes sets s = x, where x is a 32-byte little-endian encoding of
+// s, and returns s. If x is not a canonical encoding of s, SetCanonicalBytes
+// returns nil and an error, and the receiver is unchanged.
+func (s *Scalar) SetCanonicalBytes(x []byte) (*Scalar, error) {
+ if len(x) != 32 {
+ return nil, errors.New("invalid scalar length")
+ }
+ if !isReduced(x) {
+ return nil, errors.New("invalid scalar encoding")
+ }
+
+ fiatScalarFromBytes((*[4]uint64)(&s.s), (*[32]byte)(x))
+ fiatScalarToMontgomery(&s.s, (*fiatScalarNonMontgomeryDomainFieldElement)(&s.s))
+
+ return s, nil
+}
+
+// scalarMinusOneBytes is l - 1 in little endian.
+var scalarMinusOneBytes = [32]byte{236, 211, 245, 92, 26, 99, 18, 88, 214, 156, 247, 162, 222, 249, 222, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 16}
+
+// isReduced returns whether the given scalar in 32-byte little endian encoded
+// form is reduced modulo l.
+func isReduced(s []byte) bool {
+ if len(s) != 32 {
+ return false
+ }
+
+ for i := len(s) - 1; i >= 0; i-- {
+ switch {
+ case s[i] > scalarMinusOneBytes[i]:
+ return false
+ case s[i] < scalarMinusOneBytes[i]:
+ return true
+ }
+ }
+ return true
+}
+
+// SetBytesWithClamping applies the buffer pruning described in RFC 8032,
+// Section 5.1.5 (also known as clamping) and sets s to the result. The input
+// must be 32 bytes, and it is not modified. If x is not of the right length,
+// SetBytesWithClamping returns nil and an error, and the receiver is unchanged.
+//
+// Note that since Scalar values are always reduced modulo the prime order of
+// the curve, the resulting value will not preserve any of the cofactor-clearing
+// properties that clamping is meant to provide. It will however work as
+// expected as long as it is applied to points on the prime order subgroup, like
+// in Ed25519. In fact, it is lost to history why RFC 8032 adopted the
+// irrelevant RFC 7748 clamping, but it is now required for compatibility.
+func (s *Scalar) SetBytesWithClamping(x []byte) (*Scalar, error) {
+ // The description above omits the purpose of the high bits of the clamping
+ // for brevity, but those are also lost to reductions, and are also
+ // irrelevant to edwards25519 as they protect against a specific
+ // implementation bug that was once observed in a generic Montgomery ladder.
+ if len(x) != 32 {
+ return nil, errors.New("edwards25519: invalid SetBytesWithClamping input length")
+ }
+
+ // We need to use the wide reduction from SetUniformBytes, since clamping
+ // sets the 2^254 bit, making the value higher than the order.
+ var wideBytes [64]byte
+ copy(wideBytes[:], x[:])
+ wideBytes[0] &= 248
+ wideBytes[31] &= 63
+ wideBytes[31] |= 64
+ return s.SetUniformBytes(wideBytes[:])
+}
+
+// Bytes returns the canonical 32-byte little-endian encoding of s.
+func (s *Scalar) Bytes() []byte {
+ // This function is outlined to make the allocations inline in the caller
+ // rather than happen on the heap.
+ var encoded [32]byte
+ return s.bytes(&encoded)
+}
+
+func (s *Scalar) bytes(out *[32]byte) []byte {
+ var ss fiatScalarNonMontgomeryDomainFieldElement
+ fiatScalarFromMontgomery(&ss, &s.s)
+ fiatScalarToBytes(out, (*[4]uint64)(&ss))
+ return out[:]
+}
+
+// Equal returns 1 if s and t are equal, and 0 otherwise.
+func (s *Scalar) Equal(t *Scalar) int {
+ var diff fiatScalarMontgomeryDomainFieldElement
+ fiatScalarSub(&diff, &s.s, &t.s)
+ var nonzero uint64
+ fiatScalarNonzero(&nonzero, (*[4]uint64)(&diff))
+ nonzero |= nonzero >> 32
+ nonzero |= nonzero >> 16
+ nonzero |= nonzero >> 8
+ nonzero |= nonzero >> 4
+ nonzero |= nonzero >> 2
+ nonzero |= nonzero >> 1
+ return int(^nonzero) & 1
+}
+
+// nonAdjacentForm computes a width-w non-adjacent form for this scalar.
+//
+// w must be between 2 and 8, or nonAdjacentForm will panic.
+func (s *Scalar) nonAdjacentForm(w uint) [256]int8 {
+ // This implementation is adapted from the one
+ // in curve25519-dalek and is documented there:
+ // https://github.com/dalek-cryptography/curve25519-dalek/blob/f630041af28e9a405255f98a8a93adca18e4315b/src/scalar.rs#L800-L871
+ b := s.Bytes()
+ if b[31] > 127 {
+ panic("scalar has high bit set illegally")
+ }
+ if w < 2 {
+ panic("w must be at least 2 by the definition of NAF")
+ } else if w > 8 {
+ panic("NAF digits must fit in int8")
+ }
+
+ var naf [256]int8
+ var digits [5]uint64
+
+ for i := 0; i < 4; i++ {
+ digits[i] = binary.LittleEndian.Uint64(b[i*8:])
+ }
+
+ width := uint64(1 << w)
+ windowMask := uint64(width - 1)
+
+ pos := uint(0)
+ carry := uint64(0)
+ for pos < 256 {
+ indexU64 := pos / 64
+ indexBit := pos % 64
+ var bitBuf uint64
+ if indexBit < 64-w {
+ // This window's bits are contained in a single u64
+ bitBuf = digits[indexU64] >> indexBit
+ } else {
+ // Combine the current 64 bits with bits from the next 64
+ bitBuf = (digits[indexU64] >> indexBit) | (digits[1+indexU64] << (64 - indexBit))
+ }
+
+ // Add carry into the current window
+ window := carry + (bitBuf & windowMask)
+
+ if window&1 == 0 {
+ // If the window value is even, preserve the carry and continue.
+ // Why is the carry preserved?
+ // If carry == 0 and window & 1 == 0,
+ // then the next carry should be 0
+ // If carry == 1 and window & 1 == 0,
+ // then bit_buf & 1 == 1 so the next carry should be 1
+ pos += 1
+ continue
+ }
+
+ if window < width/2 {
+ carry = 0
+ naf[pos] = int8(window)
+ } else {
+ carry = 1
+ naf[pos] = int8(window) - int8(width)
+ }
+
+ pos += w
+ }
+ return naf
+}
+
+func (s *Scalar) signedRadix16() [64]int8 {
+ b := s.Bytes()
+ if b[31] > 127 {
+ panic("scalar has high bit set illegally")
+ }
+
+ var digits [64]int8
+
+ // Compute unsigned radix-16 digits:
+ for i := 0; i < 32; i++ {
+ digits[2*i] = int8(b[i] & 15)
+ digits[2*i+1] = int8((b[i] >> 4) & 15)
+ }
+
+ // Recenter coefficients:
+ for i := 0; i < 63; i++ {
+ carry := (digits[i] + 8) >> 4
+ digits[i] -= carry << 4
+ digits[i+1] += carry
+ }
+
+ return digits
+}
diff --git a/src/crypto/internal/edwards25519/scalar_alias_test.go b/src/crypto/internal/edwards25519/scalar_alias_test.go
new file mode 100644
index 0000000..4d83441
--- /dev/null
+++ b/src/crypto/internal/edwards25519/scalar_alias_test.go
@@ -0,0 +1,108 @@
+// Copyright (c) 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package edwards25519
+
+import (
+ "testing"
+ "testing/quick"
+)
+
+func TestScalarAliasing(t *testing.T) {
+ checkAliasingOneArg := func(f func(v, x *Scalar) *Scalar, v, x Scalar) bool {
+ x1, v1 := x, x
+
+ // Calculate a reference f(x) without aliasing.
+ if out := f(&v, &x); out != &v || !isReduced(out.Bytes()) {
+ return false
+ }
+
+ // Test aliasing the argument and the receiver.
+ if out := f(&v1, &v1); out != &v1 || v1 != v || !isReduced(out.Bytes()) {
+ return false
+ }
+
+ // Ensure the arguments was not modified.
+ return x == x1
+ }
+
+ checkAliasingTwoArgs := func(f func(v, x, y *Scalar) *Scalar, v, x, y Scalar) bool {
+ x1, y1, v1 := x, y, Scalar{}
+
+ // Calculate a reference f(x, y) without aliasing.
+ if out := f(&v, &x, &y); out != &v || !isReduced(out.Bytes()) {
+ return false
+ }
+
+ // Test aliasing the first argument and the receiver.
+ v1 = x
+ if out := f(&v1, &v1, &y); out != &v1 || v1 != v || !isReduced(out.Bytes()) {
+ return false
+ }
+ // Test aliasing the second argument and the receiver.
+ v1 = y
+ if out := f(&v1, &x, &v1); out != &v1 || v1 != v || !isReduced(out.Bytes()) {
+ return false
+ }
+
+ // Calculate a reference f(x, x) without aliasing.
+ if out := f(&v, &x, &x); out != &v || !isReduced(out.Bytes()) {
+ return false
+ }
+
+ // Test aliasing the first argument and the receiver.
+ v1 = x
+ if out := f(&v1, &v1, &x); out != &v1 || v1 != v || !isReduced(out.Bytes()) {
+ return false
+ }
+ // Test aliasing the second argument and the receiver.
+ v1 = x
+ if out := f(&v1, &x, &v1); out != &v1 || v1 != v || !isReduced(out.Bytes()) {
+ return false
+ }
+ // Test aliasing both arguments and the receiver.
+ v1 = x
+ if out := f(&v1, &v1, &v1); out != &v1 || v1 != v || !isReduced(out.Bytes()) {
+ return false
+ }
+
+ // Ensure the arguments were not modified.
+ return x == x1 && y == y1
+ }
+
+ for name, f := range map[string]interface{}{
+ "Negate": func(v, x Scalar) bool {
+ return checkAliasingOneArg((*Scalar).Negate, v, x)
+ },
+ "Multiply": func(v, x, y Scalar) bool {
+ return checkAliasingTwoArgs((*Scalar).Multiply, v, x, y)
+ },
+ "Add": func(v, x, y Scalar) bool {
+ return checkAliasingTwoArgs((*Scalar).Add, v, x, y)
+ },
+ "Subtract": func(v, x, y Scalar) bool {
+ return checkAliasingTwoArgs((*Scalar).Subtract, v, x, y)
+ },
+ "MultiplyAdd1": func(v, x, y, fixed Scalar) bool {
+ return checkAliasingTwoArgs(func(v, x, y *Scalar) *Scalar {
+ return v.MultiplyAdd(&fixed, x, y)
+ }, v, x, y)
+ },
+ "MultiplyAdd2": func(v, x, y, fixed Scalar) bool {
+ return checkAliasingTwoArgs(func(v, x, y *Scalar) *Scalar {
+ return v.MultiplyAdd(x, &fixed, y)
+ }, v, x, y)
+ },
+ "MultiplyAdd3": func(v, x, y, fixed Scalar) bool {
+ return checkAliasingTwoArgs(func(v, x, y *Scalar) *Scalar {
+ return v.MultiplyAdd(x, y, &fixed)
+ }, v, x, y)
+ },
+ } {
+ err := quick.Check(f, &quick.Config{MaxCountScale: 1 << 5})
+ if err != nil {
+ t.Errorf("%v: %v", name, err)
+ }
+ }
+}
diff --git a/src/crypto/internal/edwards25519/scalar_fiat.go b/src/crypto/internal/edwards25519/scalar_fiat.go
new file mode 100644
index 0000000..2e5782b
--- /dev/null
+++ b/src/crypto/internal/edwards25519/scalar_fiat.go
@@ -0,0 +1,1147 @@
+// Code generated by Fiat Cryptography. DO NOT EDIT.
+//
+// Autogenerated: word_by_word_montgomery --lang Go --cmovznz-by-mul --relax-primitive-carry-to-bitwidth 32,64 --public-function-case camelCase --public-type-case camelCase --private-function-case camelCase --private-type-case camelCase --doc-text-before-function-name '' --doc-newline-before-package-declaration --doc-prepend-header 'Code generated by Fiat Cryptography. DO NOT EDIT.' --package-name edwards25519 Scalar 64 '2^252 + 27742317777372353535851937790883648493' mul add sub opp nonzero from_montgomery to_montgomery to_bytes from_bytes
+//
+// curve description: Scalar
+//
+// machine_wordsize = 64 (from "64")
+//
+// requested operations: mul, add, sub, opp, nonzero, from_montgomery, to_montgomery, to_bytes, from_bytes
+//
+// m = 0x1000000000000000000000000000000014def9dea2f79cd65812631a5cf5d3ed (from "2^252 + 27742317777372353535851937790883648493")
+//
+//
+//
+// NOTE: In addition to the bounds specified above each function, all
+//
+// functions synthesized for this Montgomery arithmetic require the
+//
+// input to be strictly less than the prime modulus (m), and also
+//
+// require the input to be in the unique saturated representation.
+//
+// All functions also ensure that these two properties are true of
+//
+// return values.
+//
+//
+//
+// Computed values:
+//
+// eval z = z[0] + (z[1] << 64) + (z[2] << 128) + (z[3] << 192)
+//
+// bytes_eval z = z[0] + (z[1] << 8) + (z[2] << 16) + (z[3] << 24) + (z[4] << 32) + (z[5] << 40) + (z[6] << 48) + (z[7] << 56) + (z[8] << 64) + (z[9] << 72) + (z[10] << 80) + (z[11] << 88) + (z[12] << 96) + (z[13] << 104) + (z[14] << 112) + (z[15] << 120) + (z[16] << 128) + (z[17] << 136) + (z[18] << 144) + (z[19] << 152) + (z[20] << 160) + (z[21] << 168) + (z[22] << 176) + (z[23] << 184) + (z[24] << 192) + (z[25] << 200) + (z[26] << 208) + (z[27] << 216) + (z[28] << 224) + (z[29] << 232) + (z[30] << 240) + (z[31] << 248)
+//
+// twos_complement_eval z = let x1 := z[0] + (z[1] << 64) + (z[2] << 128) + (z[3] << 192) in
+//
+// if x1 & (2^256-1) < 2^255 then x1 & (2^256-1) else (x1 & (2^256-1)) - 2^256
+
+package edwards25519
+
+import "math/bits"
+
+type fiatScalarUint1 uint64 // We use uint64 instead of a more narrow type for performance reasons; see https://github.com/mit-plv/fiat-crypto/pull/1006#issuecomment-892625927
+type fiatScalarInt1 int64 // We use uint64 instead of a more narrow type for performance reasons; see https://github.com/mit-plv/fiat-crypto/pull/1006#issuecomment-892625927
+
+// The type fiatScalarMontgomeryDomainFieldElement is a field element in the Montgomery domain.
+//
+// Bounds: [[0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff]]
+type fiatScalarMontgomeryDomainFieldElement [4]uint64
+
+// The type fiatScalarNonMontgomeryDomainFieldElement is a field element NOT in the Montgomery domain.
+//
+// Bounds: [[0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff]]
+type fiatScalarNonMontgomeryDomainFieldElement [4]uint64
+
+// fiatScalarCmovznzU64 is a single-word conditional move.
+//
+// Postconditions:
+//
+// out1 = (if arg1 = 0 then arg2 else arg3)
+//
+// Input Bounds:
+//
+// arg1: [0x0 ~> 0x1]
+// arg2: [0x0 ~> 0xffffffffffffffff]
+// arg3: [0x0 ~> 0xffffffffffffffff]
+//
+// Output Bounds:
+//
+// out1: [0x0 ~> 0xffffffffffffffff]
+func fiatScalarCmovznzU64(out1 *uint64, arg1 fiatScalarUint1, arg2 uint64, arg3 uint64) {
+ x1 := (uint64(arg1) * 0xffffffffffffffff)
+ x2 := ((x1 & arg3) | ((^x1) & arg2))
+ *out1 = x2
+}
+
+// fiatScalarMul multiplies two field elements in the Montgomery domain.
+//
+// Preconditions:
+//
+// 0 ≤ eval arg1 < m
+// 0 ≤ eval arg2 < m
+//
+// Postconditions:
+//
+// eval (from_montgomery out1) mod m = (eval (from_montgomery arg1) * eval (from_montgomery arg2)) mod m
+// 0 ≤ eval out1 < m
+func fiatScalarMul(out1 *fiatScalarMontgomeryDomainFieldElement, arg1 *fiatScalarMontgomeryDomainFieldElement, arg2 *fiatScalarMontgomeryDomainFieldElement) {
+ x1 := arg1[1]
+ x2 := arg1[2]
+ x3 := arg1[3]
+ x4 := arg1[0]
+ var x5 uint64
+ var x6 uint64
+ x6, x5 = bits.Mul64(x4, arg2[3])
+ var x7 uint64
+ var x8 uint64
+ x8, x7 = bits.Mul64(x4, arg2[2])
+ var x9 uint64
+ var x10 uint64
+ x10, x9 = bits.Mul64(x4, arg2[1])
+ var x11 uint64
+ var x12 uint64
+ x12, x11 = bits.Mul64(x4, arg2[0])
+ var x13 uint64
+ var x14 uint64
+ x13, x14 = bits.Add64(x12, x9, uint64(0x0))
+ var x15 uint64
+ var x16 uint64
+ x15, x16 = bits.Add64(x10, x7, uint64(fiatScalarUint1(x14)))
+ var x17 uint64
+ var x18 uint64
+ x17, x18 = bits.Add64(x8, x5, uint64(fiatScalarUint1(x16)))
+ x19 := (uint64(fiatScalarUint1(x18)) + x6)
+ var x20 uint64
+ _, x20 = bits.Mul64(x11, 0xd2b51da312547e1b)
+ var x22 uint64
+ var x23 uint64
+ x23, x22 = bits.Mul64(x20, 0x1000000000000000)
+ var x24 uint64
+ var x25 uint64
+ x25, x24 = bits.Mul64(x20, 0x14def9dea2f79cd6)
+ var x26 uint64
+ var x27 uint64
+ x27, x26 = bits.Mul64(x20, 0x5812631a5cf5d3ed)
+ var x28 uint64
+ var x29 uint64
+ x28, x29 = bits.Add64(x27, x24, uint64(0x0))
+ x30 := (uint64(fiatScalarUint1(x29)) + x25)
+ var x32 uint64
+ _, x32 = bits.Add64(x11, x26, uint64(0x0))
+ var x33 uint64
+ var x34 uint64
+ x33, x34 = bits.Add64(x13, x28, uint64(fiatScalarUint1(x32)))
+ var x35 uint64
+ var x36 uint64
+ x35, x36 = bits.Add64(x15, x30, uint64(fiatScalarUint1(x34)))
+ var x37 uint64
+ var x38 uint64
+ x37, x38 = bits.Add64(x17, x22, uint64(fiatScalarUint1(x36)))
+ var x39 uint64
+ var x40 uint64
+ x39, x40 = bits.Add64(x19, x23, uint64(fiatScalarUint1(x38)))
+ var x41 uint64
+ var x42 uint64
+ x42, x41 = bits.Mul64(x1, arg2[3])
+ var x43 uint64
+ var x44 uint64
+ x44, x43 = bits.Mul64(x1, arg2[2])
+ var x45 uint64
+ var x46 uint64
+ x46, x45 = bits.Mul64(x1, arg2[1])
+ var x47 uint64
+ var x48 uint64
+ x48, x47 = bits.Mul64(x1, arg2[0])
+ var x49 uint64
+ var x50 uint64
+ x49, x50 = bits.Add64(x48, x45, uint64(0x0))
+ var x51 uint64
+ var x52 uint64
+ x51, x52 = bits.Add64(x46, x43, uint64(fiatScalarUint1(x50)))
+ var x53 uint64
+ var x54 uint64
+ x53, x54 = bits.Add64(x44, x41, uint64(fiatScalarUint1(x52)))
+ x55 := (uint64(fiatScalarUint1(x54)) + x42)
+ var x56 uint64
+ var x57 uint64
+ x56, x57 = bits.Add64(x33, x47, uint64(0x0))
+ var x58 uint64
+ var x59 uint64
+ x58, x59 = bits.Add64(x35, x49, uint64(fiatScalarUint1(x57)))
+ var x60 uint64
+ var x61 uint64
+ x60, x61 = bits.Add64(x37, x51, uint64(fiatScalarUint1(x59)))
+ var x62 uint64
+ var x63 uint64
+ x62, x63 = bits.Add64(x39, x53, uint64(fiatScalarUint1(x61)))
+ var x64 uint64
+ var x65 uint64
+ x64, x65 = bits.Add64(uint64(fiatScalarUint1(x40)), x55, uint64(fiatScalarUint1(x63)))
+ var x66 uint64
+ _, x66 = bits.Mul64(x56, 0xd2b51da312547e1b)
+ var x68 uint64
+ var x69 uint64
+ x69, x68 = bits.Mul64(x66, 0x1000000000000000)
+ var x70 uint64
+ var x71 uint64
+ x71, x70 = bits.Mul64(x66, 0x14def9dea2f79cd6)
+ var x72 uint64
+ var x73 uint64
+ x73, x72 = bits.Mul64(x66, 0x5812631a5cf5d3ed)
+ var x74 uint64
+ var x75 uint64
+ x74, x75 = bits.Add64(x73, x70, uint64(0x0))
+ x76 := (uint64(fiatScalarUint1(x75)) + x71)
+ var x78 uint64
+ _, x78 = bits.Add64(x56, x72, uint64(0x0))
+ var x79 uint64
+ var x80 uint64
+ x79, x80 = bits.Add64(x58, x74, uint64(fiatScalarUint1(x78)))
+ var x81 uint64
+ var x82 uint64
+ x81, x82 = bits.Add64(x60, x76, uint64(fiatScalarUint1(x80)))
+ var x83 uint64
+ var x84 uint64
+ x83, x84 = bits.Add64(x62, x68, uint64(fiatScalarUint1(x82)))
+ var x85 uint64
+ var x86 uint64
+ x85, x86 = bits.Add64(x64, x69, uint64(fiatScalarUint1(x84)))
+ x87 := (uint64(fiatScalarUint1(x86)) + uint64(fiatScalarUint1(x65)))
+ var x88 uint64
+ var x89 uint64
+ x89, x88 = bits.Mul64(x2, arg2[3])
+ var x90 uint64
+ var x91 uint64
+ x91, x90 = bits.Mul64(x2, arg2[2])
+ var x92 uint64
+ var x93 uint64
+ x93, x92 = bits.Mul64(x2, arg2[1])
+ var x94 uint64
+ var x95 uint64
+ x95, x94 = bits.Mul64(x2, arg2[0])
+ var x96 uint64
+ var x97 uint64
+ x96, x97 = bits.Add64(x95, x92, uint64(0x0))
+ var x98 uint64
+ var x99 uint64
+ x98, x99 = bits.Add64(x93, x90, uint64(fiatScalarUint1(x97)))
+ var x100 uint64
+ var x101 uint64
+ x100, x101 = bits.Add64(x91, x88, uint64(fiatScalarUint1(x99)))
+ x102 := (uint64(fiatScalarUint1(x101)) + x89)
+ var x103 uint64
+ var x104 uint64
+ x103, x104 = bits.Add64(x79, x94, uint64(0x0))
+ var x105 uint64
+ var x106 uint64
+ x105, x106 = bits.Add64(x81, x96, uint64(fiatScalarUint1(x104)))
+ var x107 uint64
+ var x108 uint64
+ x107, x108 = bits.Add64(x83, x98, uint64(fiatScalarUint1(x106)))
+ var x109 uint64
+ var x110 uint64
+ x109, x110 = bits.Add64(x85, x100, uint64(fiatScalarUint1(x108)))
+ var x111 uint64
+ var x112 uint64
+ x111, x112 = bits.Add64(x87, x102, uint64(fiatScalarUint1(x110)))
+ var x113 uint64
+ _, x113 = bits.Mul64(x103, 0xd2b51da312547e1b)
+ var x115 uint64
+ var x116 uint64
+ x116, x115 = bits.Mul64(x113, 0x1000000000000000)
+ var x117 uint64
+ var x118 uint64
+ x118, x117 = bits.Mul64(x113, 0x14def9dea2f79cd6)
+ var x119 uint64
+ var x120 uint64
+ x120, x119 = bits.Mul64(x113, 0x5812631a5cf5d3ed)
+ var x121 uint64
+ var x122 uint64
+ x121, x122 = bits.Add64(x120, x117, uint64(0x0))
+ x123 := (uint64(fiatScalarUint1(x122)) + x118)
+ var x125 uint64
+ _, x125 = bits.Add64(x103, x119, uint64(0x0))
+ var x126 uint64
+ var x127 uint64
+ x126, x127 = bits.Add64(x105, x121, uint64(fiatScalarUint1(x125)))
+ var x128 uint64
+ var x129 uint64
+ x128, x129 = bits.Add64(x107, x123, uint64(fiatScalarUint1(x127)))
+ var x130 uint64
+ var x131 uint64
+ x130, x131 = bits.Add64(x109, x115, uint64(fiatScalarUint1(x129)))
+ var x132 uint64
+ var x133 uint64
+ x132, x133 = bits.Add64(x111, x116, uint64(fiatScalarUint1(x131)))
+ x134 := (uint64(fiatScalarUint1(x133)) + uint64(fiatScalarUint1(x112)))
+ var x135 uint64
+ var x136 uint64
+ x136, x135 = bits.Mul64(x3, arg2[3])
+ var x137 uint64
+ var x138 uint64
+ x138, x137 = bits.Mul64(x3, arg2[2])
+ var x139 uint64
+ var x140 uint64
+ x140, x139 = bits.Mul64(x3, arg2[1])
+ var x141 uint64
+ var x142 uint64
+ x142, x141 = bits.Mul64(x3, arg2[0])
+ var x143 uint64
+ var x144 uint64
+ x143, x144 = bits.Add64(x142, x139, uint64(0x0))
+ var x145 uint64
+ var x146 uint64
+ x145, x146 = bits.Add64(x140, x137, uint64(fiatScalarUint1(x144)))
+ var x147 uint64
+ var x148 uint64
+ x147, x148 = bits.Add64(x138, x135, uint64(fiatScalarUint1(x146)))
+ x149 := (uint64(fiatScalarUint1(x148)) + x136)
+ var x150 uint64
+ var x151 uint64
+ x150, x151 = bits.Add64(x126, x141, uint64(0x0))
+ var x152 uint64
+ var x153 uint64
+ x152, x153 = bits.Add64(x128, x143, uint64(fiatScalarUint1(x151)))
+ var x154 uint64
+ var x155 uint64
+ x154, x155 = bits.Add64(x130, x145, uint64(fiatScalarUint1(x153)))
+ var x156 uint64
+ var x157 uint64
+ x156, x157 = bits.Add64(x132, x147, uint64(fiatScalarUint1(x155)))
+ var x158 uint64
+ var x159 uint64
+ x158, x159 = bits.Add64(x134, x149, uint64(fiatScalarUint1(x157)))
+ var x160 uint64
+ _, x160 = bits.Mul64(x150, 0xd2b51da312547e1b)
+ var x162 uint64
+ var x163 uint64
+ x163, x162 = bits.Mul64(x160, 0x1000000000000000)
+ var x164 uint64
+ var x165 uint64
+ x165, x164 = bits.Mul64(x160, 0x14def9dea2f79cd6)
+ var x166 uint64
+ var x167 uint64
+ x167, x166 = bits.Mul64(x160, 0x5812631a5cf5d3ed)
+ var x168 uint64
+ var x169 uint64
+ x168, x169 = bits.Add64(x167, x164, uint64(0x0))
+ x170 := (uint64(fiatScalarUint1(x169)) + x165)
+ var x172 uint64
+ _, x172 = bits.Add64(x150, x166, uint64(0x0))
+ var x173 uint64
+ var x174 uint64
+ x173, x174 = bits.Add64(x152, x168, uint64(fiatScalarUint1(x172)))
+ var x175 uint64
+ var x176 uint64
+ x175, x176 = bits.Add64(x154, x170, uint64(fiatScalarUint1(x174)))
+ var x177 uint64
+ var x178 uint64
+ x177, x178 = bits.Add64(x156, x162, uint64(fiatScalarUint1(x176)))
+ var x179 uint64
+ var x180 uint64
+ x179, x180 = bits.Add64(x158, x163, uint64(fiatScalarUint1(x178)))
+ x181 := (uint64(fiatScalarUint1(x180)) + uint64(fiatScalarUint1(x159)))
+ var x182 uint64
+ var x183 uint64
+ x182, x183 = bits.Sub64(x173, 0x5812631a5cf5d3ed, uint64(0x0))
+ var x184 uint64
+ var x185 uint64
+ x184, x185 = bits.Sub64(x175, 0x14def9dea2f79cd6, uint64(fiatScalarUint1(x183)))
+ var x186 uint64
+ var x187 uint64
+ x186, x187 = bits.Sub64(x177, uint64(0x0), uint64(fiatScalarUint1(x185)))
+ var x188 uint64
+ var x189 uint64
+ x188, x189 = bits.Sub64(x179, 0x1000000000000000, uint64(fiatScalarUint1(x187)))
+ var x191 uint64
+ _, x191 = bits.Sub64(x181, uint64(0x0), uint64(fiatScalarUint1(x189)))
+ var x192 uint64
+ fiatScalarCmovznzU64(&x192, fiatScalarUint1(x191), x182, x173)
+ var x193 uint64
+ fiatScalarCmovznzU64(&x193, fiatScalarUint1(x191), x184, x175)
+ var x194 uint64
+ fiatScalarCmovznzU64(&x194, fiatScalarUint1(x191), x186, x177)
+ var x195 uint64
+ fiatScalarCmovznzU64(&x195, fiatScalarUint1(x191), x188, x179)
+ out1[0] = x192
+ out1[1] = x193
+ out1[2] = x194
+ out1[3] = x195
+}
+
+// fiatScalarAdd adds two field elements in the Montgomery domain.
+//
+// Preconditions:
+//
+// 0 ≤ eval arg1 < m
+// 0 ≤ eval arg2 < m
+//
+// Postconditions:
+//
+// eval (from_montgomery out1) mod m = (eval (from_montgomery arg1) + eval (from_montgomery arg2)) mod m
+// 0 ≤ eval out1 < m
+func fiatScalarAdd(out1 *fiatScalarMontgomeryDomainFieldElement, arg1 *fiatScalarMontgomeryDomainFieldElement, arg2 *fiatScalarMontgomeryDomainFieldElement) {
+ var x1 uint64
+ var x2 uint64
+ x1, x2 = bits.Add64(arg1[0], arg2[0], uint64(0x0))
+ var x3 uint64
+ var x4 uint64
+ x3, x4 = bits.Add64(arg1[1], arg2[1], uint64(fiatScalarUint1(x2)))
+ var x5 uint64
+ var x6 uint64
+ x5, x6 = bits.Add64(arg1[2], arg2[2], uint64(fiatScalarUint1(x4)))
+ var x7 uint64
+ var x8 uint64
+ x7, x8 = bits.Add64(arg1[3], arg2[3], uint64(fiatScalarUint1(x6)))
+ var x9 uint64
+ var x10 uint64
+ x9, x10 = bits.Sub64(x1, 0x5812631a5cf5d3ed, uint64(0x0))
+ var x11 uint64
+ var x12 uint64
+ x11, x12 = bits.Sub64(x3, 0x14def9dea2f79cd6, uint64(fiatScalarUint1(x10)))
+ var x13 uint64
+ var x14 uint64
+ x13, x14 = bits.Sub64(x5, uint64(0x0), uint64(fiatScalarUint1(x12)))
+ var x15 uint64
+ var x16 uint64
+ x15, x16 = bits.Sub64(x7, 0x1000000000000000, uint64(fiatScalarUint1(x14)))
+ var x18 uint64
+ _, x18 = bits.Sub64(uint64(fiatScalarUint1(x8)), uint64(0x0), uint64(fiatScalarUint1(x16)))
+ var x19 uint64
+ fiatScalarCmovznzU64(&x19, fiatScalarUint1(x18), x9, x1)
+ var x20 uint64
+ fiatScalarCmovznzU64(&x20, fiatScalarUint1(x18), x11, x3)
+ var x21 uint64
+ fiatScalarCmovznzU64(&x21, fiatScalarUint1(x18), x13, x5)
+ var x22 uint64
+ fiatScalarCmovznzU64(&x22, fiatScalarUint1(x18), x15, x7)
+ out1[0] = x19
+ out1[1] = x20
+ out1[2] = x21
+ out1[3] = x22
+}
+
+// fiatScalarSub subtracts two field elements in the Montgomery domain.
+//
+// Preconditions:
+//
+// 0 ≤ eval arg1 < m
+// 0 ≤ eval arg2 < m
+//
+// Postconditions:
+//
+// eval (from_montgomery out1) mod m = (eval (from_montgomery arg1) - eval (from_montgomery arg2)) mod m
+// 0 ≤ eval out1 < m
+func fiatScalarSub(out1 *fiatScalarMontgomeryDomainFieldElement, arg1 *fiatScalarMontgomeryDomainFieldElement, arg2 *fiatScalarMontgomeryDomainFieldElement) {
+ var x1 uint64
+ var x2 uint64
+ x1, x2 = bits.Sub64(arg1[0], arg2[0], uint64(0x0))
+ var x3 uint64
+ var x4 uint64
+ x3, x4 = bits.Sub64(arg1[1], arg2[1], uint64(fiatScalarUint1(x2)))
+ var x5 uint64
+ var x6 uint64
+ x5, x6 = bits.Sub64(arg1[2], arg2[2], uint64(fiatScalarUint1(x4)))
+ var x7 uint64
+ var x8 uint64
+ x7, x8 = bits.Sub64(arg1[3], arg2[3], uint64(fiatScalarUint1(x6)))
+ var x9 uint64
+ fiatScalarCmovznzU64(&x9, fiatScalarUint1(x8), uint64(0x0), 0xffffffffffffffff)
+ var x10 uint64
+ var x11 uint64
+ x10, x11 = bits.Add64(x1, (x9 & 0x5812631a5cf5d3ed), uint64(0x0))
+ var x12 uint64
+ var x13 uint64
+ x12, x13 = bits.Add64(x3, (x9 & 0x14def9dea2f79cd6), uint64(fiatScalarUint1(x11)))
+ var x14 uint64
+ var x15 uint64
+ x14, x15 = bits.Add64(x5, uint64(0x0), uint64(fiatScalarUint1(x13)))
+ var x16 uint64
+ x16, _ = bits.Add64(x7, (x9 & 0x1000000000000000), uint64(fiatScalarUint1(x15)))
+ out1[0] = x10
+ out1[1] = x12
+ out1[2] = x14
+ out1[3] = x16
+}
+
+// fiatScalarOpp negates a field element in the Montgomery domain.
+//
+// Preconditions:
+//
+// 0 ≤ eval arg1 < m
+//
+// Postconditions:
+//
+// eval (from_montgomery out1) mod m = -eval (from_montgomery arg1) mod m
+// 0 ≤ eval out1 < m
+func fiatScalarOpp(out1 *fiatScalarMontgomeryDomainFieldElement, arg1 *fiatScalarMontgomeryDomainFieldElement) {
+ var x1 uint64
+ var x2 uint64
+ x1, x2 = bits.Sub64(uint64(0x0), arg1[0], uint64(0x0))
+ var x3 uint64
+ var x4 uint64
+ x3, x4 = bits.Sub64(uint64(0x0), arg1[1], uint64(fiatScalarUint1(x2)))
+ var x5 uint64
+ var x6 uint64
+ x5, x6 = bits.Sub64(uint64(0x0), arg1[2], uint64(fiatScalarUint1(x4)))
+ var x7 uint64
+ var x8 uint64
+ x7, x8 = bits.Sub64(uint64(0x0), arg1[3], uint64(fiatScalarUint1(x6)))
+ var x9 uint64
+ fiatScalarCmovznzU64(&x9, fiatScalarUint1(x8), uint64(0x0), 0xffffffffffffffff)
+ var x10 uint64
+ var x11 uint64
+ x10, x11 = bits.Add64(x1, (x9 & 0x5812631a5cf5d3ed), uint64(0x0))
+ var x12 uint64
+ var x13 uint64
+ x12, x13 = bits.Add64(x3, (x9 & 0x14def9dea2f79cd6), uint64(fiatScalarUint1(x11)))
+ var x14 uint64
+ var x15 uint64
+ x14, x15 = bits.Add64(x5, uint64(0x0), uint64(fiatScalarUint1(x13)))
+ var x16 uint64
+ x16, _ = bits.Add64(x7, (x9 & 0x1000000000000000), uint64(fiatScalarUint1(x15)))
+ out1[0] = x10
+ out1[1] = x12
+ out1[2] = x14
+ out1[3] = x16
+}
+
+// fiatScalarNonzero outputs a single non-zero word if the input is non-zero and zero otherwise.
+//
+// Preconditions:
+//
+// 0 ≤ eval arg1 < m
+//
+// Postconditions:
+//
+// out1 = 0 ↔ eval (from_montgomery arg1) mod m = 0
+//
+// Input Bounds:
+//
+// arg1: [[0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff]]
+//
+// Output Bounds:
+//
+// out1: [0x0 ~> 0xffffffffffffffff]
+func fiatScalarNonzero(out1 *uint64, arg1 *[4]uint64) {
+ x1 := (arg1[0] | (arg1[1] | (arg1[2] | arg1[3])))
+ *out1 = x1
+}
+
+// fiatScalarFromMontgomery translates a field element out of the Montgomery domain.
+//
+// Preconditions:
+//
+// 0 ≤ eval arg1 < m
+//
+// Postconditions:
+//
+// eval out1 mod m = (eval arg1 * ((2^64)⁻¹ mod m)^4) mod m
+// 0 ≤ eval out1 < m
+func fiatScalarFromMontgomery(out1 *fiatScalarNonMontgomeryDomainFieldElement, arg1 *fiatScalarMontgomeryDomainFieldElement) {
+ x1 := arg1[0]
+ var x2 uint64
+ _, x2 = bits.Mul64(x1, 0xd2b51da312547e1b)
+ var x4 uint64
+ var x5 uint64
+ x5, x4 = bits.Mul64(x2, 0x1000000000000000)
+ var x6 uint64
+ var x7 uint64
+ x7, x6 = bits.Mul64(x2, 0x14def9dea2f79cd6)
+ var x8 uint64
+ var x9 uint64
+ x9, x8 = bits.Mul64(x2, 0x5812631a5cf5d3ed)
+ var x10 uint64
+ var x11 uint64
+ x10, x11 = bits.Add64(x9, x6, uint64(0x0))
+ var x13 uint64
+ _, x13 = bits.Add64(x1, x8, uint64(0x0))
+ var x14 uint64
+ var x15 uint64
+ x14, x15 = bits.Add64(uint64(0x0), x10, uint64(fiatScalarUint1(x13)))
+ var x16 uint64
+ var x17 uint64
+ x16, x17 = bits.Add64(x14, arg1[1], uint64(0x0))
+ var x18 uint64
+ _, x18 = bits.Mul64(x16, 0xd2b51da312547e1b)
+ var x20 uint64
+ var x21 uint64
+ x21, x20 = bits.Mul64(x18, 0x1000000000000000)
+ var x22 uint64
+ var x23 uint64
+ x23, x22 = bits.Mul64(x18, 0x14def9dea2f79cd6)
+ var x24 uint64
+ var x25 uint64
+ x25, x24 = bits.Mul64(x18, 0x5812631a5cf5d3ed)
+ var x26 uint64
+ var x27 uint64
+ x26, x27 = bits.Add64(x25, x22, uint64(0x0))
+ var x29 uint64
+ _, x29 = bits.Add64(x16, x24, uint64(0x0))
+ var x30 uint64
+ var x31 uint64
+ x30, x31 = bits.Add64((uint64(fiatScalarUint1(x17)) + (uint64(fiatScalarUint1(x15)) + (uint64(fiatScalarUint1(x11)) + x7))), x26, uint64(fiatScalarUint1(x29)))
+ var x32 uint64
+ var x33 uint64
+ x32, x33 = bits.Add64(x4, (uint64(fiatScalarUint1(x27)) + x23), uint64(fiatScalarUint1(x31)))
+ var x34 uint64
+ var x35 uint64
+ x34, x35 = bits.Add64(x5, x20, uint64(fiatScalarUint1(x33)))
+ var x36 uint64
+ var x37 uint64
+ x36, x37 = bits.Add64(x30, arg1[2], uint64(0x0))
+ var x38 uint64
+ var x39 uint64
+ x38, x39 = bits.Add64(x32, uint64(0x0), uint64(fiatScalarUint1(x37)))
+ var x40 uint64
+ var x41 uint64
+ x40, x41 = bits.Add64(x34, uint64(0x0), uint64(fiatScalarUint1(x39)))
+ var x42 uint64
+ _, x42 = bits.Mul64(x36, 0xd2b51da312547e1b)
+ var x44 uint64
+ var x45 uint64
+ x45, x44 = bits.Mul64(x42, 0x1000000000000000)
+ var x46 uint64
+ var x47 uint64
+ x47, x46 = bits.Mul64(x42, 0x14def9dea2f79cd6)
+ var x48 uint64
+ var x49 uint64
+ x49, x48 = bits.Mul64(x42, 0x5812631a5cf5d3ed)
+ var x50 uint64
+ var x51 uint64
+ x50, x51 = bits.Add64(x49, x46, uint64(0x0))
+ var x53 uint64
+ _, x53 = bits.Add64(x36, x48, uint64(0x0))
+ var x54 uint64
+ var x55 uint64
+ x54, x55 = bits.Add64(x38, x50, uint64(fiatScalarUint1(x53)))
+ var x56 uint64
+ var x57 uint64
+ x56, x57 = bits.Add64(x40, (uint64(fiatScalarUint1(x51)) + x47), uint64(fiatScalarUint1(x55)))
+ var x58 uint64
+ var x59 uint64
+ x58, x59 = bits.Add64((uint64(fiatScalarUint1(x41)) + (uint64(fiatScalarUint1(x35)) + x21)), x44, uint64(fiatScalarUint1(x57)))
+ var x60 uint64
+ var x61 uint64
+ x60, x61 = bits.Add64(x54, arg1[3], uint64(0x0))
+ var x62 uint64
+ var x63 uint64
+ x62, x63 = bits.Add64(x56, uint64(0x0), uint64(fiatScalarUint1(x61)))
+ var x64 uint64
+ var x65 uint64
+ x64, x65 = bits.Add64(x58, uint64(0x0), uint64(fiatScalarUint1(x63)))
+ var x66 uint64
+ _, x66 = bits.Mul64(x60, 0xd2b51da312547e1b)
+ var x68 uint64
+ var x69 uint64
+ x69, x68 = bits.Mul64(x66, 0x1000000000000000)
+ var x70 uint64
+ var x71 uint64
+ x71, x70 = bits.Mul64(x66, 0x14def9dea2f79cd6)
+ var x72 uint64
+ var x73 uint64
+ x73, x72 = bits.Mul64(x66, 0x5812631a5cf5d3ed)
+ var x74 uint64
+ var x75 uint64
+ x74, x75 = bits.Add64(x73, x70, uint64(0x0))
+ var x77 uint64
+ _, x77 = bits.Add64(x60, x72, uint64(0x0))
+ var x78 uint64
+ var x79 uint64
+ x78, x79 = bits.Add64(x62, x74, uint64(fiatScalarUint1(x77)))
+ var x80 uint64
+ var x81 uint64
+ x80, x81 = bits.Add64(x64, (uint64(fiatScalarUint1(x75)) + x71), uint64(fiatScalarUint1(x79)))
+ var x82 uint64
+ var x83 uint64
+ x82, x83 = bits.Add64((uint64(fiatScalarUint1(x65)) + (uint64(fiatScalarUint1(x59)) + x45)), x68, uint64(fiatScalarUint1(x81)))
+ x84 := (uint64(fiatScalarUint1(x83)) + x69)
+ var x85 uint64
+ var x86 uint64
+ x85, x86 = bits.Sub64(x78, 0x5812631a5cf5d3ed, uint64(0x0))
+ var x87 uint64
+ var x88 uint64
+ x87, x88 = bits.Sub64(x80, 0x14def9dea2f79cd6, uint64(fiatScalarUint1(x86)))
+ var x89 uint64
+ var x90 uint64
+ x89, x90 = bits.Sub64(x82, uint64(0x0), uint64(fiatScalarUint1(x88)))
+ var x91 uint64
+ var x92 uint64
+ x91, x92 = bits.Sub64(x84, 0x1000000000000000, uint64(fiatScalarUint1(x90)))
+ var x94 uint64
+ _, x94 = bits.Sub64(uint64(0x0), uint64(0x0), uint64(fiatScalarUint1(x92)))
+ var x95 uint64
+ fiatScalarCmovznzU64(&x95, fiatScalarUint1(x94), x85, x78)
+ var x96 uint64
+ fiatScalarCmovznzU64(&x96, fiatScalarUint1(x94), x87, x80)
+ var x97 uint64
+ fiatScalarCmovznzU64(&x97, fiatScalarUint1(x94), x89, x82)
+ var x98 uint64
+ fiatScalarCmovznzU64(&x98, fiatScalarUint1(x94), x91, x84)
+ out1[0] = x95
+ out1[1] = x96
+ out1[2] = x97
+ out1[3] = x98
+}
+
+// fiatScalarToMontgomery translates a field element into the Montgomery domain.
+//
+// Preconditions:
+//
+// 0 ≤ eval arg1 < m
+//
+// Postconditions:
+//
+// eval (from_montgomery out1) mod m = eval arg1 mod m
+// 0 ≤ eval out1 < m
+func fiatScalarToMontgomery(out1 *fiatScalarMontgomeryDomainFieldElement, arg1 *fiatScalarNonMontgomeryDomainFieldElement) {
+ x1 := arg1[1]
+ x2 := arg1[2]
+ x3 := arg1[3]
+ x4 := arg1[0]
+ var x5 uint64
+ var x6 uint64
+ x6, x5 = bits.Mul64(x4, 0x399411b7c309a3d)
+ var x7 uint64
+ var x8 uint64
+ x8, x7 = bits.Mul64(x4, 0xceec73d217f5be65)
+ var x9 uint64
+ var x10 uint64
+ x10, x9 = bits.Mul64(x4, 0xd00e1ba768859347)
+ var x11 uint64
+ var x12 uint64
+ x12, x11 = bits.Mul64(x4, 0xa40611e3449c0f01)
+ var x13 uint64
+ var x14 uint64
+ x13, x14 = bits.Add64(x12, x9, uint64(0x0))
+ var x15 uint64
+ var x16 uint64
+ x15, x16 = bits.Add64(x10, x7, uint64(fiatScalarUint1(x14)))
+ var x17 uint64
+ var x18 uint64
+ x17, x18 = bits.Add64(x8, x5, uint64(fiatScalarUint1(x16)))
+ var x19 uint64
+ _, x19 = bits.Mul64(x11, 0xd2b51da312547e1b)
+ var x21 uint64
+ var x22 uint64
+ x22, x21 = bits.Mul64(x19, 0x1000000000000000)
+ var x23 uint64
+ var x24 uint64
+ x24, x23 = bits.Mul64(x19, 0x14def9dea2f79cd6)
+ var x25 uint64
+ var x26 uint64
+ x26, x25 = bits.Mul64(x19, 0x5812631a5cf5d3ed)
+ var x27 uint64
+ var x28 uint64
+ x27, x28 = bits.Add64(x26, x23, uint64(0x0))
+ var x30 uint64
+ _, x30 = bits.Add64(x11, x25, uint64(0x0))
+ var x31 uint64
+ var x32 uint64
+ x31, x32 = bits.Add64(x13, x27, uint64(fiatScalarUint1(x30)))
+ var x33 uint64
+ var x34 uint64
+ x33, x34 = bits.Add64(x15, (uint64(fiatScalarUint1(x28)) + x24), uint64(fiatScalarUint1(x32)))
+ var x35 uint64
+ var x36 uint64
+ x35, x36 = bits.Add64(x17, x21, uint64(fiatScalarUint1(x34)))
+ var x37 uint64
+ var x38 uint64
+ x38, x37 = bits.Mul64(x1, 0x399411b7c309a3d)
+ var x39 uint64
+ var x40 uint64
+ x40, x39 = bits.Mul64(x1, 0xceec73d217f5be65)
+ var x41 uint64
+ var x42 uint64
+ x42, x41 = bits.Mul64(x1, 0xd00e1ba768859347)
+ var x43 uint64
+ var x44 uint64
+ x44, x43 = bits.Mul64(x1, 0xa40611e3449c0f01)
+ var x45 uint64
+ var x46 uint64
+ x45, x46 = bits.Add64(x44, x41, uint64(0x0))
+ var x47 uint64
+ var x48 uint64
+ x47, x48 = bits.Add64(x42, x39, uint64(fiatScalarUint1(x46)))
+ var x49 uint64
+ var x50 uint64
+ x49, x50 = bits.Add64(x40, x37, uint64(fiatScalarUint1(x48)))
+ var x51 uint64
+ var x52 uint64
+ x51, x52 = bits.Add64(x31, x43, uint64(0x0))
+ var x53 uint64
+ var x54 uint64
+ x53, x54 = bits.Add64(x33, x45, uint64(fiatScalarUint1(x52)))
+ var x55 uint64
+ var x56 uint64
+ x55, x56 = bits.Add64(x35, x47, uint64(fiatScalarUint1(x54)))
+ var x57 uint64
+ var x58 uint64
+ x57, x58 = bits.Add64(((uint64(fiatScalarUint1(x36)) + (uint64(fiatScalarUint1(x18)) + x6)) + x22), x49, uint64(fiatScalarUint1(x56)))
+ var x59 uint64
+ _, x59 = bits.Mul64(x51, 0xd2b51da312547e1b)
+ var x61 uint64
+ var x62 uint64
+ x62, x61 = bits.Mul64(x59, 0x1000000000000000)
+ var x63 uint64
+ var x64 uint64
+ x64, x63 = bits.Mul64(x59, 0x14def9dea2f79cd6)
+ var x65 uint64
+ var x66 uint64
+ x66, x65 = bits.Mul64(x59, 0x5812631a5cf5d3ed)
+ var x67 uint64
+ var x68 uint64
+ x67, x68 = bits.Add64(x66, x63, uint64(0x0))
+ var x70 uint64
+ _, x70 = bits.Add64(x51, x65, uint64(0x0))
+ var x71 uint64
+ var x72 uint64
+ x71, x72 = bits.Add64(x53, x67, uint64(fiatScalarUint1(x70)))
+ var x73 uint64
+ var x74 uint64
+ x73, x74 = bits.Add64(x55, (uint64(fiatScalarUint1(x68)) + x64), uint64(fiatScalarUint1(x72)))
+ var x75 uint64
+ var x76 uint64
+ x75, x76 = bits.Add64(x57, x61, uint64(fiatScalarUint1(x74)))
+ var x77 uint64
+ var x78 uint64
+ x78, x77 = bits.Mul64(x2, 0x399411b7c309a3d)
+ var x79 uint64
+ var x80 uint64
+ x80, x79 = bits.Mul64(x2, 0xceec73d217f5be65)
+ var x81 uint64
+ var x82 uint64
+ x82, x81 = bits.Mul64(x2, 0xd00e1ba768859347)
+ var x83 uint64
+ var x84 uint64
+ x84, x83 = bits.Mul64(x2, 0xa40611e3449c0f01)
+ var x85 uint64
+ var x86 uint64
+ x85, x86 = bits.Add64(x84, x81, uint64(0x0))
+ var x87 uint64
+ var x88 uint64
+ x87, x88 = bits.Add64(x82, x79, uint64(fiatScalarUint1(x86)))
+ var x89 uint64
+ var x90 uint64
+ x89, x90 = bits.Add64(x80, x77, uint64(fiatScalarUint1(x88)))
+ var x91 uint64
+ var x92 uint64
+ x91, x92 = bits.Add64(x71, x83, uint64(0x0))
+ var x93 uint64
+ var x94 uint64
+ x93, x94 = bits.Add64(x73, x85, uint64(fiatScalarUint1(x92)))
+ var x95 uint64
+ var x96 uint64
+ x95, x96 = bits.Add64(x75, x87, uint64(fiatScalarUint1(x94)))
+ var x97 uint64
+ var x98 uint64
+ x97, x98 = bits.Add64(((uint64(fiatScalarUint1(x76)) + (uint64(fiatScalarUint1(x58)) + (uint64(fiatScalarUint1(x50)) + x38))) + x62), x89, uint64(fiatScalarUint1(x96)))
+ var x99 uint64
+ _, x99 = bits.Mul64(x91, 0xd2b51da312547e1b)
+ var x101 uint64
+ var x102 uint64
+ x102, x101 = bits.Mul64(x99, 0x1000000000000000)
+ var x103 uint64
+ var x104 uint64
+ x104, x103 = bits.Mul64(x99, 0x14def9dea2f79cd6)
+ var x105 uint64
+ var x106 uint64
+ x106, x105 = bits.Mul64(x99, 0x5812631a5cf5d3ed)
+ var x107 uint64
+ var x108 uint64
+ x107, x108 = bits.Add64(x106, x103, uint64(0x0))
+ var x110 uint64
+ _, x110 = bits.Add64(x91, x105, uint64(0x0))
+ var x111 uint64
+ var x112 uint64
+ x111, x112 = bits.Add64(x93, x107, uint64(fiatScalarUint1(x110)))
+ var x113 uint64
+ var x114 uint64
+ x113, x114 = bits.Add64(x95, (uint64(fiatScalarUint1(x108)) + x104), uint64(fiatScalarUint1(x112)))
+ var x115 uint64
+ var x116 uint64
+ x115, x116 = bits.Add64(x97, x101, uint64(fiatScalarUint1(x114)))
+ var x117 uint64
+ var x118 uint64
+ x118, x117 = bits.Mul64(x3, 0x399411b7c309a3d)
+ var x119 uint64
+ var x120 uint64
+ x120, x119 = bits.Mul64(x3, 0xceec73d217f5be65)
+ var x121 uint64
+ var x122 uint64
+ x122, x121 = bits.Mul64(x3, 0xd00e1ba768859347)
+ var x123 uint64
+ var x124 uint64
+ x124, x123 = bits.Mul64(x3, 0xa40611e3449c0f01)
+ var x125 uint64
+ var x126 uint64
+ x125, x126 = bits.Add64(x124, x121, uint64(0x0))
+ var x127 uint64
+ var x128 uint64
+ x127, x128 = bits.Add64(x122, x119, uint64(fiatScalarUint1(x126)))
+ var x129 uint64
+ var x130 uint64
+ x129, x130 = bits.Add64(x120, x117, uint64(fiatScalarUint1(x128)))
+ var x131 uint64
+ var x132 uint64
+ x131, x132 = bits.Add64(x111, x123, uint64(0x0))
+ var x133 uint64
+ var x134 uint64
+ x133, x134 = bits.Add64(x113, x125, uint64(fiatScalarUint1(x132)))
+ var x135 uint64
+ var x136 uint64
+ x135, x136 = bits.Add64(x115, x127, uint64(fiatScalarUint1(x134)))
+ var x137 uint64
+ var x138 uint64
+ x137, x138 = bits.Add64(((uint64(fiatScalarUint1(x116)) + (uint64(fiatScalarUint1(x98)) + (uint64(fiatScalarUint1(x90)) + x78))) + x102), x129, uint64(fiatScalarUint1(x136)))
+ var x139 uint64
+ _, x139 = bits.Mul64(x131, 0xd2b51da312547e1b)
+ var x141 uint64
+ var x142 uint64
+ x142, x141 = bits.Mul64(x139, 0x1000000000000000)
+ var x143 uint64
+ var x144 uint64
+ x144, x143 = bits.Mul64(x139, 0x14def9dea2f79cd6)
+ var x145 uint64
+ var x146 uint64
+ x146, x145 = bits.Mul64(x139, 0x5812631a5cf5d3ed)
+ var x147 uint64
+ var x148 uint64
+ x147, x148 = bits.Add64(x146, x143, uint64(0x0))
+ var x150 uint64
+ _, x150 = bits.Add64(x131, x145, uint64(0x0))
+ var x151 uint64
+ var x152 uint64
+ x151, x152 = bits.Add64(x133, x147, uint64(fiatScalarUint1(x150)))
+ var x153 uint64
+ var x154 uint64
+ x153, x154 = bits.Add64(x135, (uint64(fiatScalarUint1(x148)) + x144), uint64(fiatScalarUint1(x152)))
+ var x155 uint64
+ var x156 uint64
+ x155, x156 = bits.Add64(x137, x141, uint64(fiatScalarUint1(x154)))
+ x157 := ((uint64(fiatScalarUint1(x156)) + (uint64(fiatScalarUint1(x138)) + (uint64(fiatScalarUint1(x130)) + x118))) + x142)
+ var x158 uint64
+ var x159 uint64
+ x158, x159 = bits.Sub64(x151, 0x5812631a5cf5d3ed, uint64(0x0))
+ var x160 uint64
+ var x161 uint64
+ x160, x161 = bits.Sub64(x153, 0x14def9dea2f79cd6, uint64(fiatScalarUint1(x159)))
+ var x162 uint64
+ var x163 uint64
+ x162, x163 = bits.Sub64(x155, uint64(0x0), uint64(fiatScalarUint1(x161)))
+ var x164 uint64
+ var x165 uint64
+ x164, x165 = bits.Sub64(x157, 0x1000000000000000, uint64(fiatScalarUint1(x163)))
+ var x167 uint64
+ _, x167 = bits.Sub64(uint64(0x0), uint64(0x0), uint64(fiatScalarUint1(x165)))
+ var x168 uint64
+ fiatScalarCmovznzU64(&x168, fiatScalarUint1(x167), x158, x151)
+ var x169 uint64
+ fiatScalarCmovznzU64(&x169, fiatScalarUint1(x167), x160, x153)
+ var x170 uint64
+ fiatScalarCmovznzU64(&x170, fiatScalarUint1(x167), x162, x155)
+ var x171 uint64
+ fiatScalarCmovznzU64(&x171, fiatScalarUint1(x167), x164, x157)
+ out1[0] = x168
+ out1[1] = x169
+ out1[2] = x170
+ out1[3] = x171
+}
+
+// fiatScalarToBytes serializes a field element NOT in the Montgomery domain to bytes in little-endian order.
+//
+// Preconditions:
+//
+// 0 ≤ eval arg1 < m
+//
+// Postconditions:
+//
+// out1 = map (λ x, ⌊((eval arg1 mod m) mod 2^(8 * (x + 1))) / 2^(8 * x)⌋) [0..31]
+//
+// Input Bounds:
+//
+// arg1: [[0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0x1fffffffffffffff]]
+//
+// Output Bounds:
+//
+// out1: [[0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0x1f]]
+func fiatScalarToBytes(out1 *[32]uint8, arg1 *[4]uint64) {
+ x1 := arg1[3]
+ x2 := arg1[2]
+ x3 := arg1[1]
+ x4 := arg1[0]
+ x5 := (uint8(x4) & 0xff)
+ x6 := (x4 >> 8)
+ x7 := (uint8(x6) & 0xff)
+ x8 := (x6 >> 8)
+ x9 := (uint8(x8) & 0xff)
+ x10 := (x8 >> 8)
+ x11 := (uint8(x10) & 0xff)
+ x12 := (x10 >> 8)
+ x13 := (uint8(x12) & 0xff)
+ x14 := (x12 >> 8)
+ x15 := (uint8(x14) & 0xff)
+ x16 := (x14 >> 8)
+ x17 := (uint8(x16) & 0xff)
+ x18 := uint8((x16 >> 8))
+ x19 := (uint8(x3) & 0xff)
+ x20 := (x3 >> 8)
+ x21 := (uint8(x20) & 0xff)
+ x22 := (x20 >> 8)
+ x23 := (uint8(x22) & 0xff)
+ x24 := (x22 >> 8)
+ x25 := (uint8(x24) & 0xff)
+ x26 := (x24 >> 8)
+ x27 := (uint8(x26) & 0xff)
+ x28 := (x26 >> 8)
+ x29 := (uint8(x28) & 0xff)
+ x30 := (x28 >> 8)
+ x31 := (uint8(x30) & 0xff)
+ x32 := uint8((x30 >> 8))
+ x33 := (uint8(x2) & 0xff)
+ x34 := (x2 >> 8)
+ x35 := (uint8(x34) & 0xff)
+ x36 := (x34 >> 8)
+ x37 := (uint8(x36) & 0xff)
+ x38 := (x36 >> 8)
+ x39 := (uint8(x38) & 0xff)
+ x40 := (x38 >> 8)
+ x41 := (uint8(x40) & 0xff)
+ x42 := (x40 >> 8)
+ x43 := (uint8(x42) & 0xff)
+ x44 := (x42 >> 8)
+ x45 := (uint8(x44) & 0xff)
+ x46 := uint8((x44 >> 8))
+ x47 := (uint8(x1) & 0xff)
+ x48 := (x1 >> 8)
+ x49 := (uint8(x48) & 0xff)
+ x50 := (x48 >> 8)
+ x51 := (uint8(x50) & 0xff)
+ x52 := (x50 >> 8)
+ x53 := (uint8(x52) & 0xff)
+ x54 := (x52 >> 8)
+ x55 := (uint8(x54) & 0xff)
+ x56 := (x54 >> 8)
+ x57 := (uint8(x56) & 0xff)
+ x58 := (x56 >> 8)
+ x59 := (uint8(x58) & 0xff)
+ x60 := uint8((x58 >> 8))
+ out1[0] = x5
+ out1[1] = x7
+ out1[2] = x9
+ out1[3] = x11
+ out1[4] = x13
+ out1[5] = x15
+ out1[6] = x17
+ out1[7] = x18
+ out1[8] = x19
+ out1[9] = x21
+ out1[10] = x23
+ out1[11] = x25
+ out1[12] = x27
+ out1[13] = x29
+ out1[14] = x31
+ out1[15] = x32
+ out1[16] = x33
+ out1[17] = x35
+ out1[18] = x37
+ out1[19] = x39
+ out1[20] = x41
+ out1[21] = x43
+ out1[22] = x45
+ out1[23] = x46
+ out1[24] = x47
+ out1[25] = x49
+ out1[26] = x51
+ out1[27] = x53
+ out1[28] = x55
+ out1[29] = x57
+ out1[30] = x59
+ out1[31] = x60
+}
+
+// fiatScalarFromBytes deserializes a field element NOT in the Montgomery domain from bytes in little-endian order.
+//
+// Preconditions:
+//
+// 0 ≤ bytes_eval arg1 < m
+//
+// Postconditions:
+//
+// eval out1 mod m = bytes_eval arg1 mod m
+// 0 ≤ eval out1 < m
+//
+// Input Bounds:
+//
+// arg1: [[0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0x1f]]
+//
+// Output Bounds:
+//
+// out1: [[0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0x1fffffffffffffff]]
+func fiatScalarFromBytes(out1 *[4]uint64, arg1 *[32]uint8) {
+ x1 := (uint64(arg1[31]) << 56)
+ x2 := (uint64(arg1[30]) << 48)
+ x3 := (uint64(arg1[29]) << 40)
+ x4 := (uint64(arg1[28]) << 32)
+ x5 := (uint64(arg1[27]) << 24)
+ x6 := (uint64(arg1[26]) << 16)
+ x7 := (uint64(arg1[25]) << 8)
+ x8 := arg1[24]
+ x9 := (uint64(arg1[23]) << 56)
+ x10 := (uint64(arg1[22]) << 48)
+ x11 := (uint64(arg1[21]) << 40)
+ x12 := (uint64(arg1[20]) << 32)
+ x13 := (uint64(arg1[19]) << 24)
+ x14 := (uint64(arg1[18]) << 16)
+ x15 := (uint64(arg1[17]) << 8)
+ x16 := arg1[16]
+ x17 := (uint64(arg1[15]) << 56)
+ x18 := (uint64(arg1[14]) << 48)
+ x19 := (uint64(arg1[13]) << 40)
+ x20 := (uint64(arg1[12]) << 32)
+ x21 := (uint64(arg1[11]) << 24)
+ x22 := (uint64(arg1[10]) << 16)
+ x23 := (uint64(arg1[9]) << 8)
+ x24 := arg1[8]
+ x25 := (uint64(arg1[7]) << 56)
+ x26 := (uint64(arg1[6]) << 48)
+ x27 := (uint64(arg1[5]) << 40)
+ x28 := (uint64(arg1[4]) << 32)
+ x29 := (uint64(arg1[3]) << 24)
+ x30 := (uint64(arg1[2]) << 16)
+ x31 := (uint64(arg1[1]) << 8)
+ x32 := arg1[0]
+ x33 := (x31 + uint64(x32))
+ x34 := (x30 + x33)
+ x35 := (x29 + x34)
+ x36 := (x28 + x35)
+ x37 := (x27 + x36)
+ x38 := (x26 + x37)
+ x39 := (x25 + x38)
+ x40 := (x23 + uint64(x24))
+ x41 := (x22 + x40)
+ x42 := (x21 + x41)
+ x43 := (x20 + x42)
+ x44 := (x19 + x43)
+ x45 := (x18 + x44)
+ x46 := (x17 + x45)
+ x47 := (x15 + uint64(x16))
+ x48 := (x14 + x47)
+ x49 := (x13 + x48)
+ x50 := (x12 + x49)
+ x51 := (x11 + x50)
+ x52 := (x10 + x51)
+ x53 := (x9 + x52)
+ x54 := (x7 + uint64(x8))
+ x55 := (x6 + x54)
+ x56 := (x5 + x55)
+ x57 := (x4 + x56)
+ x58 := (x3 + x57)
+ x59 := (x2 + x58)
+ x60 := (x1 + x59)
+ out1[0] = x39
+ out1[1] = x46
+ out1[2] = x53
+ out1[3] = x60
+}
diff --git a/src/crypto/internal/edwards25519/scalar_test.go b/src/crypto/internal/edwards25519/scalar_test.go
new file mode 100644
index 0000000..67bcdaf
--- /dev/null
+++ b/src/crypto/internal/edwards25519/scalar_test.go
@@ -0,0 +1,249 @@
+// Copyright (c) 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package edwards25519
+
+import (
+ "bytes"
+ "encoding/hex"
+ "math/big"
+ mathrand "math/rand"
+ "reflect"
+ "testing"
+ "testing/quick"
+)
+
+var scOneBytes = [32]byte{1}
+var scOne, _ = new(Scalar).SetCanonicalBytes(scOneBytes[:])
+var scMinusOne, _ = new(Scalar).SetCanonicalBytes(scalarMinusOneBytes[:])
+
+// Generate returns a valid (reduced modulo l) Scalar with a distribution
+// weighted towards high, low, and edge values.
+func (Scalar) Generate(rand *mathrand.Rand, size int) reflect.Value {
+ var s [32]byte
+ diceRoll := rand.Intn(100)
+ switch {
+ case diceRoll == 0:
+ case diceRoll == 1:
+ s = scOneBytes
+ case diceRoll == 2:
+ s = scalarMinusOneBytes
+ case diceRoll < 5:
+ // Generate a low scalar in [0, 2^125).
+ rand.Read(s[:16])
+ s[15] &= (1 << 5) - 1
+ case diceRoll < 10:
+ // Generate a high scalar in [2^252, 2^252 + 2^124).
+ s[31] = 1 << 4
+ rand.Read(s[:16])
+ s[15] &= (1 << 4) - 1
+ default:
+ // Generate a valid scalar in [0, l) by returning [0, 2^252) which has a
+ // negligibly different distribution (the former has a 2^-127.6 chance
+ // of being out of the latter range).
+ rand.Read(s[:])
+ s[31] &= (1 << 4) - 1
+ }
+
+ val := Scalar{}
+ fiatScalarFromBytes((*[4]uint64)(&val.s), &s)
+ fiatScalarToMontgomery(&val.s, (*fiatScalarNonMontgomeryDomainFieldElement)(&val.s))
+
+ return reflect.ValueOf(val)
+}
+
+// quickCheckConfig1024 will make each quickcheck test run (1024 * -quickchecks)
+// times. The default value of -quickchecks is 100.
+var quickCheckConfig1024 = &quick.Config{MaxCountScale: 1 << 10}
+
+func TestScalarGenerate(t *testing.T) {
+ f := func(sc Scalar) bool {
+ return isReduced(sc.Bytes())
+ }
+ if err := quick.Check(f, quickCheckConfig1024); err != nil {
+ t.Errorf("generated unreduced scalar: %v", err)
+ }
+}
+
+func TestScalarSetCanonicalBytes(t *testing.T) {
+ f1 := func(in [32]byte, sc Scalar) bool {
+ // Mask out top 4 bits to guarantee value falls in [0, l).
+ in[len(in)-1] &= (1 << 4) - 1
+ if _, err := sc.SetCanonicalBytes(in[:]); err != nil {
+ return false
+ }
+ repr := sc.Bytes()
+ return bytes.Equal(in[:], repr) && isReduced(repr)
+ }
+ if err := quick.Check(f1, quickCheckConfig1024); err != nil {
+ t.Errorf("failed bytes->scalar->bytes round-trip: %v", err)
+ }
+
+ f2 := func(sc1, sc2 Scalar) bool {
+ if _, err := sc2.SetCanonicalBytes(sc1.Bytes()); err != nil {
+ return false
+ }
+ return sc1 == sc2
+ }
+ if err := quick.Check(f2, quickCheckConfig1024); err != nil {
+ t.Errorf("failed scalar->bytes->scalar round-trip: %v", err)
+ }
+
+ b := scalarMinusOneBytes
+ b[31] += 1
+ s := scOne
+ if out, err := s.SetCanonicalBytes(b[:]); err == nil {
+ t.Errorf("SetCanonicalBytes worked on a non-canonical value")
+ } else if s != scOne {
+ t.Errorf("SetCanonicalBytes modified its receiver")
+ } else if out != nil {
+ t.Errorf("SetCanonicalBytes did not return nil with an error")
+ }
+}
+
+func TestScalarSetUniformBytes(t *testing.T) {
+ mod, _ := new(big.Int).SetString("27742317777372353535851937790883648493", 10)
+ mod.Add(mod, new(big.Int).Lsh(big.NewInt(1), 252))
+ f := func(in [64]byte, sc Scalar) bool {
+ sc.SetUniformBytes(in[:])
+ repr := sc.Bytes()
+ if !isReduced(repr) {
+ return false
+ }
+ scBig := bigIntFromLittleEndianBytes(repr[:])
+ inBig := bigIntFromLittleEndianBytes(in[:])
+ return inBig.Mod(inBig, mod).Cmp(scBig) == 0
+ }
+ if err := quick.Check(f, quickCheckConfig1024); err != nil {
+ t.Error(err)
+ }
+}
+
+func TestScalarSetBytesWithClamping(t *testing.T) {
+ // Generated with libsodium.js 1.0.18 crypto_scalarmult_ed25519_base.
+
+ random := "633d368491364dc9cd4c1bf891b1d59460face1644813240a313e61f2c88216e"
+ s, _ := new(Scalar).SetBytesWithClamping(decodeHex(random))
+ p := new(Point).ScalarBaseMult(s)
+ want := "1d87a9026fd0126a5736fe1628c95dd419172b5b618457e041c9c861b2494a94"
+ if got := hex.EncodeToString(p.Bytes()); got != want {
+ t.Errorf("random: got %q, want %q", got, want)
+ }
+
+ zero := "0000000000000000000000000000000000000000000000000000000000000000"
+ s, _ = new(Scalar).SetBytesWithClamping(decodeHex(zero))
+ p = new(Point).ScalarBaseMult(s)
+ want = "693e47972caf527c7883ad1b39822f026f47db2ab0e1919955b8993aa04411d1"
+ if got := hex.EncodeToString(p.Bytes()); got != want {
+ t.Errorf("zero: got %q, want %q", got, want)
+ }
+
+ one := "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"
+ s, _ = new(Scalar).SetBytesWithClamping(decodeHex(one))
+ p = new(Point).ScalarBaseMult(s)
+ want = "12e9a68b73fd5aacdbcaf3e88c46fea6ebedb1aa84eed1842f07f8edab65e3a7"
+ if got := hex.EncodeToString(p.Bytes()); got != want {
+ t.Errorf("one: got %q, want %q", got, want)
+ }
+}
+
+func bigIntFromLittleEndianBytes(b []byte) *big.Int {
+ bb := make([]byte, len(b))
+ for i := range b {
+ bb[i] = b[len(b)-i-1]
+ }
+ return new(big.Int).SetBytes(bb)
+}
+
+func TestScalarMultiplyDistributesOverAdd(t *testing.T) {
+ multiplyDistributesOverAdd := func(x, y, z Scalar) bool {
+ // Compute t1 = (x+y)*z
+ var t1 Scalar
+ t1.Add(&x, &y)
+ t1.Multiply(&t1, &z)
+
+ // Compute t2 = x*z + y*z
+ var t2 Scalar
+ var t3 Scalar
+ t2.Multiply(&x, &z)
+ t3.Multiply(&y, &z)
+ t2.Add(&t2, &t3)
+
+ reprT1, reprT2 := t1.Bytes(), t2.Bytes()
+
+ return t1 == t2 && isReduced(reprT1) && isReduced(reprT2)
+ }
+
+ if err := quick.Check(multiplyDistributesOverAdd, quickCheckConfig1024); err != nil {
+ t.Error(err)
+ }
+}
+
+func TestScalarAddLikeSubNeg(t *testing.T) {
+ addLikeSubNeg := func(x, y Scalar) bool {
+ // Compute t1 = x - y
+ var t1 Scalar
+ t1.Subtract(&x, &y)
+
+ // Compute t2 = -y + x
+ var t2 Scalar
+ t2.Negate(&y)
+ t2.Add(&t2, &x)
+
+ return t1 == t2 && isReduced(t1.Bytes())
+ }
+
+ if err := quick.Check(addLikeSubNeg, quickCheckConfig1024); err != nil {
+ t.Error(err)
+ }
+}
+
+func TestScalarNonAdjacentForm(t *testing.T) {
+ s, _ := (&Scalar{}).SetCanonicalBytes([]byte{
+ 0x1a, 0x0e, 0x97, 0x8a, 0x90, 0xf6, 0x62, 0x2d,
+ 0x37, 0x47, 0x02, 0x3f, 0x8a, 0xd8, 0x26, 0x4d,
+ 0xa7, 0x58, 0xaa, 0x1b, 0x88, 0xe0, 0x40, 0xd1,
+ 0x58, 0x9e, 0x7b, 0x7f, 0x23, 0x76, 0xef, 0x09,
+ })
+
+ expectedNaf := [256]int8{
+ 0, 13, 0, 0, 0, 0, 0, 0, 0, 7, 0, 0, 0, 0, 0, 0, -9, 0, 0, 0, 0, -11, 0, 0, 0, 0, 3, 0, 0, 0, 0, 1,
+ 0, 0, 0, 0, 9, 0, 0, 0, 0, -5, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 11, 0, 0, 0, 0, 11, 0, 0, 0, 0, 0,
+ -9, 0, 0, 0, 0, 0, -3, 0, 0, 0, 0, 9, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, -1, 0, 0, 0, 0, 0, 9, 0,
+ 0, 0, 0, -15, 0, 0, 0, 0, -7, 0, 0, 0, 0, -9, 0, 0, 0, 0, 0, 5, 0, 0, 0, 0, 13, 0, 0, 0, 0, 0, -3, 0,
+ 0, 0, 0, -11, 0, 0, 0, 0, -7, 0, 0, 0, 0, -13, 0, 0, 0, 0, 11, 0, 0, 0, 0, -9, 0, 0, 0, 0, 0, 1, 0, 0,
+ 0, 0, 0, -15, 0, 0, 0, 0, 1, 0, 0, 0, 0, 7, 0, 0, 0, 0, 0, 0, 0, 0, 5, 0, 0, 0, 0, 0, 13, 0, 0, 0,
+ 0, 0, 0, 11, 0, 0, 0, 0, 0, 15, 0, 0, 0, 0, 0, -9, 0, 0, 0, 0, 0, 0, 0, -1, 0, 0, 0, 0, 0, 0, 0, 7,
+ 0, 0, 0, 0, 0, -15, 0, 0, 0, 0, 0, 15, 0, 0, 0, 0, 15, 0, 0, 0, 0, 15, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0,
+ }
+
+ sNaf := s.nonAdjacentForm(5)
+
+ for i := 0; i < 256; i++ {
+ if expectedNaf[i] != sNaf[i] {
+ t.Errorf("Wrong digit at position %d, got %d, expected %d", i, sNaf[i], expectedNaf[i])
+ }
+ }
+}
+
+type notZeroScalar Scalar
+
+func (notZeroScalar) Generate(rand *mathrand.Rand, size int) reflect.Value {
+ var s Scalar
+ var isNonZero uint64
+ for isNonZero == 0 {
+ s = Scalar{}.Generate(rand, size).Interface().(Scalar)
+ fiatScalarNonzero(&isNonZero, (*[4]uint64)(&s.s))
+ }
+ return reflect.ValueOf(notZeroScalar(s))
+}
+
+func TestScalarEqual(t *testing.T) {
+ if scOne.Equal(scMinusOne) == 1 {
+ t.Errorf("scOne.Equal(&scMinusOne) is true")
+ }
+ if scMinusOne.Equal(scMinusOne) == 0 {
+ t.Errorf("scMinusOne.Equal(&scMinusOne) is false")
+ }
+}
diff --git a/src/crypto/internal/edwards25519/scalarmult.go b/src/crypto/internal/edwards25519/scalarmult.go
new file mode 100644
index 0000000..f7ca3ce
--- /dev/null
+++ b/src/crypto/internal/edwards25519/scalarmult.go
@@ -0,0 +1,214 @@
+// Copyright (c) 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package edwards25519
+
+import "sync"
+
+// basepointTable is a set of 32 affineLookupTables, where table i is generated
+// from 256i * basepoint. It is precomputed the first time it's used.
+func basepointTable() *[32]affineLookupTable {
+ basepointTablePrecomp.initOnce.Do(func() {
+ p := NewGeneratorPoint()
+ for i := 0; i < 32; i++ {
+ basepointTablePrecomp.table[i].FromP3(p)
+ for j := 0; j < 8; j++ {
+ p.Add(p, p)
+ }
+ }
+ })
+ return &basepointTablePrecomp.table
+}
+
+var basepointTablePrecomp struct {
+ table [32]affineLookupTable
+ initOnce sync.Once
+}
+
+// ScalarBaseMult sets v = x * B, where B is the canonical generator, and
+// returns v.
+//
+// The scalar multiplication is done in constant time.
+func (v *Point) ScalarBaseMult(x *Scalar) *Point {
+ basepointTable := basepointTable()
+
+ // Write x = sum(x_i * 16^i) so x*B = sum( B*x_i*16^i )
+ // as described in the Ed25519 paper
+ //
+ // Group even and odd coefficients
+ // x*B = x_0*16^0*B + x_2*16^2*B + ... + x_62*16^62*B
+ // + x_1*16^1*B + x_3*16^3*B + ... + x_63*16^63*B
+ // x*B = x_0*16^0*B + x_2*16^2*B + ... + x_62*16^62*B
+ // + 16*( x_1*16^0*B + x_3*16^2*B + ... + x_63*16^62*B)
+ //
+ // We use a lookup table for each i to get x_i*16^(2*i)*B
+ // and do four doublings to multiply by 16.
+ digits := x.signedRadix16()
+
+ multiple := &affineCached{}
+ tmp1 := &projP1xP1{}
+ tmp2 := &projP2{}
+
+ // Accumulate the odd components first
+ v.Set(NewIdentityPoint())
+ for i := 1; i < 64; i += 2 {
+ basepointTable[i/2].SelectInto(multiple, digits[i])
+ tmp1.AddAffine(v, multiple)
+ v.fromP1xP1(tmp1)
+ }
+
+ // Multiply by 16
+ tmp2.FromP3(v) // tmp2 = v in P2 coords
+ tmp1.Double(tmp2) // tmp1 = 2*v in P1xP1 coords
+ tmp2.FromP1xP1(tmp1) // tmp2 = 2*v in P2 coords
+ tmp1.Double(tmp2) // tmp1 = 4*v in P1xP1 coords
+ tmp2.FromP1xP1(tmp1) // tmp2 = 4*v in P2 coords
+ tmp1.Double(tmp2) // tmp1 = 8*v in P1xP1 coords
+ tmp2.FromP1xP1(tmp1) // tmp2 = 8*v in P2 coords
+ tmp1.Double(tmp2) // tmp1 = 16*v in P1xP1 coords
+ v.fromP1xP1(tmp1) // now v = 16*(odd components)
+
+ // Accumulate the even components
+ for i := 0; i < 64; i += 2 {
+ basepointTable[i/2].SelectInto(multiple, digits[i])
+ tmp1.AddAffine(v, multiple)
+ v.fromP1xP1(tmp1)
+ }
+
+ return v
+}
+
+// ScalarMult sets v = x * q, and returns v.
+//
+// The scalar multiplication is done in constant time.
+func (v *Point) ScalarMult(x *Scalar, q *Point) *Point {
+ checkInitialized(q)
+
+ var table projLookupTable
+ table.FromP3(q)
+
+ // Write x = sum(x_i * 16^i)
+ // so x*Q = sum( Q*x_i*16^i )
+ // = Q*x_0 + 16*(Q*x_1 + 16*( ... + Q*x_63) ... )
+ // <------compute inside out---------
+ //
+ // We use the lookup table to get the x_i*Q values
+ // and do four doublings to compute 16*Q
+ digits := x.signedRadix16()
+
+ // Unwrap first loop iteration to save computing 16*identity
+ multiple := &projCached{}
+ tmp1 := &projP1xP1{}
+ tmp2 := &projP2{}
+ table.SelectInto(multiple, digits[63])
+
+ v.Set(NewIdentityPoint())
+ tmp1.Add(v, multiple) // tmp1 = x_63*Q in P1xP1 coords
+ for i := 62; i >= 0; i-- {
+ tmp2.FromP1xP1(tmp1) // tmp2 = (prev) in P2 coords
+ tmp1.Double(tmp2) // tmp1 = 2*(prev) in P1xP1 coords
+ tmp2.FromP1xP1(tmp1) // tmp2 = 2*(prev) in P2 coords
+ tmp1.Double(tmp2) // tmp1 = 4*(prev) in P1xP1 coords
+ tmp2.FromP1xP1(tmp1) // tmp2 = 4*(prev) in P2 coords
+ tmp1.Double(tmp2) // tmp1 = 8*(prev) in P1xP1 coords
+ tmp2.FromP1xP1(tmp1) // tmp2 = 8*(prev) in P2 coords
+ tmp1.Double(tmp2) // tmp1 = 16*(prev) in P1xP1 coords
+ v.fromP1xP1(tmp1) // v = 16*(prev) in P3 coords
+ table.SelectInto(multiple, digits[i])
+ tmp1.Add(v, multiple) // tmp1 = x_i*Q + 16*(prev) in P1xP1 coords
+ }
+ v.fromP1xP1(tmp1)
+ return v
+}
+
+// basepointNafTable is the nafLookupTable8 for the basepoint.
+// It is precomputed the first time it's used.
+func basepointNafTable() *nafLookupTable8 {
+ basepointNafTablePrecomp.initOnce.Do(func() {
+ basepointNafTablePrecomp.table.FromP3(NewGeneratorPoint())
+ })
+ return &basepointNafTablePrecomp.table
+}
+
+var basepointNafTablePrecomp struct {
+ table nafLookupTable8
+ initOnce sync.Once
+}
+
+// VarTimeDoubleScalarBaseMult sets v = a * A + b * B, where B is the canonical
+// generator, and returns v.
+//
+// Execution time depends on the inputs.
+func (v *Point) VarTimeDoubleScalarBaseMult(a *Scalar, A *Point, b *Scalar) *Point {
+ checkInitialized(A)
+
+ // Similarly to the single variable-base approach, we compute
+ // digits and use them with a lookup table. However, because
+ // we are allowed to do variable-time operations, we don't
+ // need constant-time lookups or constant-time digit
+ // computations.
+ //
+ // So we use a non-adjacent form of some width w instead of
+ // radix 16. This is like a binary representation (one digit
+ // for each binary place) but we allow the digits to grow in
+ // magnitude up to 2^{w-1} so that the nonzero digits are as
+ // sparse as possible. Intuitively, this "condenses" the
+ // "mass" of the scalar onto sparse coefficients (meaning
+ // fewer additions).
+
+ basepointNafTable := basepointNafTable()
+ var aTable nafLookupTable5
+ aTable.FromP3(A)
+ // Because the basepoint is fixed, we can use a wider NAF
+ // corresponding to a bigger table.
+ aNaf := a.nonAdjacentForm(5)
+ bNaf := b.nonAdjacentForm(8)
+
+ // Find the first nonzero coefficient.
+ i := 255
+ for j := i; j >= 0; j-- {
+ if aNaf[j] != 0 || bNaf[j] != 0 {
+ break
+ }
+ }
+
+ multA := &projCached{}
+ multB := &affineCached{}
+ tmp1 := &projP1xP1{}
+ tmp2 := &projP2{}
+ tmp2.Zero()
+
+ // Move from high to low bits, doubling the accumulator
+ // at each iteration and checking whether there is a nonzero
+ // coefficient to look up a multiple of.
+ for ; i >= 0; i-- {
+ tmp1.Double(tmp2)
+
+ // Only update v if we have a nonzero coeff to add in.
+ if aNaf[i] > 0 {
+ v.fromP1xP1(tmp1)
+ aTable.SelectInto(multA, aNaf[i])
+ tmp1.Add(v, multA)
+ } else if aNaf[i] < 0 {
+ v.fromP1xP1(tmp1)
+ aTable.SelectInto(multA, -aNaf[i])
+ tmp1.Sub(v, multA)
+ }
+
+ if bNaf[i] > 0 {
+ v.fromP1xP1(tmp1)
+ basepointNafTable.SelectInto(multB, bNaf[i])
+ tmp1.AddAffine(v, multB)
+ } else if bNaf[i] < 0 {
+ v.fromP1xP1(tmp1)
+ basepointNafTable.SelectInto(multB, -bNaf[i])
+ tmp1.SubAffine(v, multB)
+ }
+
+ tmp2.FromP1xP1(tmp1)
+ }
+
+ v.fromP2(tmp2)
+ return v
+}
diff --git a/src/crypto/internal/edwards25519/scalarmult_test.go b/src/crypto/internal/edwards25519/scalarmult_test.go
new file mode 100644
index 0000000..6c92ab3
--- /dev/null
+++ b/src/crypto/internal/edwards25519/scalarmult_test.go
@@ -0,0 +1,209 @@
+// Copyright (c) 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package edwards25519
+
+import (
+ "testing"
+ "testing/quick"
+)
+
+var (
+ // quickCheckConfig32 will make each quickcheck test run (32 * -quickchecks)
+ // times. The default value of -quickchecks is 100.
+ quickCheckConfig32 = &quick.Config{MaxCountScale: 1 << 5}
+
+ // a random scalar generated using dalek.
+ dalekScalar, _ = (&Scalar{}).SetCanonicalBytes([]byte{219, 106, 114, 9, 174, 249, 155, 89, 69, 203, 201, 93, 92, 116, 234, 187, 78, 115, 103, 172, 182, 98, 62, 103, 187, 136, 13, 100, 248, 110, 12, 4})
+ // the above, times the edwards25519 basepoint.
+ dalekScalarBasepoint, _ = new(Point).SetBytes([]byte{0xf4, 0xef, 0x7c, 0xa, 0x34, 0x55, 0x7b, 0x9f, 0x72, 0x3b, 0xb6, 0x1e, 0xf9, 0x46, 0x9, 0x91, 0x1c, 0xb9, 0xc0, 0x6c, 0x17, 0x28, 0x2d, 0x8b, 0x43, 0x2b, 0x5, 0x18, 0x6a, 0x54, 0x3e, 0x48})
+)
+
+func TestScalarMultSmallScalars(t *testing.T) {
+ var z Scalar
+ var p Point
+ p.ScalarMult(&z, B)
+ if I.Equal(&p) != 1 {
+ t.Error("0*B != 0")
+ }
+ checkOnCurve(t, &p)
+
+ scEight, _ := (&Scalar{}).SetCanonicalBytes([]byte{1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0})
+ p.ScalarMult(scEight, B)
+ if B.Equal(&p) != 1 {
+ t.Error("1*B != 1")
+ }
+ checkOnCurve(t, &p)
+}
+
+func TestScalarMultVsDalek(t *testing.T) {
+ var p Point
+ p.ScalarMult(dalekScalar, B)
+ if dalekScalarBasepoint.Equal(&p) != 1 {
+ t.Error("Scalar mul does not match dalek")
+ }
+ checkOnCurve(t, &p)
+}
+
+func TestBaseMultVsDalek(t *testing.T) {
+ var p Point
+ p.ScalarBaseMult(dalekScalar)
+ if dalekScalarBasepoint.Equal(&p) != 1 {
+ t.Error("Scalar mul does not match dalek")
+ }
+ checkOnCurve(t, &p)
+}
+
+func TestVarTimeDoubleBaseMultVsDalek(t *testing.T) {
+ var p Point
+ var z Scalar
+ p.VarTimeDoubleScalarBaseMult(dalekScalar, B, &z)
+ if dalekScalarBasepoint.Equal(&p) != 1 {
+ t.Error("VarTimeDoubleScalarBaseMult fails with b=0")
+ }
+ checkOnCurve(t, &p)
+ p.VarTimeDoubleScalarBaseMult(&z, B, dalekScalar)
+ if dalekScalarBasepoint.Equal(&p) != 1 {
+ t.Error("VarTimeDoubleScalarBaseMult fails with a=0")
+ }
+ checkOnCurve(t, &p)
+}
+
+func TestScalarMultDistributesOverAdd(t *testing.T) {
+ scalarMultDistributesOverAdd := func(x, y Scalar) bool {
+ var z Scalar
+ z.Add(&x, &y)
+ var p, q, r, check Point
+ p.ScalarMult(&x, B)
+ q.ScalarMult(&y, B)
+ r.ScalarMult(&z, B)
+ check.Add(&p, &q)
+ checkOnCurve(t, &p, &q, &r, &check)
+ return check.Equal(&r) == 1
+ }
+
+ if err := quick.Check(scalarMultDistributesOverAdd, quickCheckConfig32); err != nil {
+ t.Error(err)
+ }
+}
+
+func TestScalarMultNonIdentityPoint(t *testing.T) {
+ // Check whether p.ScalarMult and q.ScalaBaseMult give the same,
+ // when p and q are originally set to the base point.
+
+ scalarMultNonIdentityPoint := func(x Scalar) bool {
+ var p, q Point
+ p.Set(B)
+ q.Set(B)
+
+ p.ScalarMult(&x, B)
+ q.ScalarBaseMult(&x)
+
+ checkOnCurve(t, &p, &q)
+
+ return p.Equal(&q) == 1
+ }
+
+ if err := quick.Check(scalarMultNonIdentityPoint, quickCheckConfig32); err != nil {
+ t.Error(err)
+ }
+}
+
+func TestBasepointTableGeneration(t *testing.T) {
+ // The basepoint table is 32 affineLookupTables,
+ // corresponding to (16^2i)*B for table i.
+ basepointTable := basepointTable()
+
+ tmp1 := &projP1xP1{}
+ tmp2 := &projP2{}
+ tmp3 := &Point{}
+ tmp3.Set(B)
+ table := make([]affineLookupTable, 32)
+ for i := 0; i < 32; i++ {
+ // Build the table
+ table[i].FromP3(tmp3)
+ // Assert equality with the hardcoded one
+ if table[i] != basepointTable[i] {
+ t.Errorf("Basepoint table %d does not match", i)
+ }
+
+ // Set p = (16^2)*p = 256*p = 2^8*p
+ tmp2.FromP3(tmp3)
+ for j := 0; j < 7; j++ {
+ tmp1.Double(tmp2)
+ tmp2.FromP1xP1(tmp1)
+ }
+ tmp1.Double(tmp2)
+ tmp3.fromP1xP1(tmp1)
+ checkOnCurve(t, tmp3)
+ }
+}
+
+func TestScalarMultMatchesBaseMult(t *testing.T) {
+ scalarMultMatchesBaseMult := func(x Scalar) bool {
+ var p, q Point
+ p.ScalarMult(&x, B)
+ q.ScalarBaseMult(&x)
+ checkOnCurve(t, &p, &q)
+ return p.Equal(&q) == 1
+ }
+
+ if err := quick.Check(scalarMultMatchesBaseMult, quickCheckConfig32); err != nil {
+ t.Error(err)
+ }
+}
+
+func TestBasepointNafTableGeneration(t *testing.T) {
+ var table nafLookupTable8
+ table.FromP3(B)
+
+ if table != *basepointNafTable() {
+ t.Error("BasepointNafTable does not match")
+ }
+}
+
+func TestVarTimeDoubleBaseMultMatchesBaseMult(t *testing.T) {
+ varTimeDoubleBaseMultMatchesBaseMult := func(x, y Scalar) bool {
+ var p, q1, q2, check Point
+
+ p.VarTimeDoubleScalarBaseMult(&x, B, &y)
+
+ q1.ScalarBaseMult(&x)
+ q2.ScalarBaseMult(&y)
+ check.Add(&q1, &q2)
+
+ checkOnCurve(t, &p, &check, &q1, &q2)
+ return p.Equal(&check) == 1
+ }
+
+ if err := quick.Check(varTimeDoubleBaseMultMatchesBaseMult, quickCheckConfig32); err != nil {
+ t.Error(err)
+ }
+}
+
+// Benchmarks.
+
+func BenchmarkScalarBaseMult(b *testing.B) {
+ var p Point
+
+ for i := 0; i < b.N; i++ {
+ p.ScalarBaseMult(dalekScalar)
+ }
+}
+
+func BenchmarkScalarMult(b *testing.B) {
+ var p Point
+
+ for i := 0; i < b.N; i++ {
+ p.ScalarMult(dalekScalar, B)
+ }
+}
+
+func BenchmarkVarTimeDoubleScalarBaseMult(b *testing.B) {
+ var p Point
+
+ for i := 0; i < b.N; i++ {
+ p.VarTimeDoubleScalarBaseMult(dalekScalar, B, dalekScalar)
+ }
+}
diff --git a/src/crypto/internal/edwards25519/tables.go b/src/crypto/internal/edwards25519/tables.go
new file mode 100644
index 0000000..83234bb
--- /dev/null
+++ b/src/crypto/internal/edwards25519/tables.go
@@ -0,0 +1,129 @@
+// Copyright (c) 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package edwards25519
+
+import (
+ "crypto/subtle"
+)
+
+// A dynamic lookup table for variable-base, constant-time scalar muls.
+type projLookupTable struct {
+ points [8]projCached
+}
+
+// A precomputed lookup table for fixed-base, constant-time scalar muls.
+type affineLookupTable struct {
+ points [8]affineCached
+}
+
+// A dynamic lookup table for variable-base, variable-time scalar muls.
+type nafLookupTable5 struct {
+ points [8]projCached
+}
+
+// A precomputed lookup table for fixed-base, variable-time scalar muls.
+type nafLookupTable8 struct {
+ points [64]affineCached
+}
+
+// Constructors.
+
+// Builds a lookup table at runtime. Fast.
+func (v *projLookupTable) FromP3(q *Point) {
+ // Goal: v.points[i] = (i+1)*Q, i.e., Q, 2Q, ..., 8Q
+ // This allows lookup of -8Q, ..., -Q, 0, Q, ..., 8Q
+ v.points[0].FromP3(q)
+ tmpP3 := Point{}
+ tmpP1xP1 := projP1xP1{}
+ for i := 0; i < 7; i++ {
+ // Compute (i+1)*Q as Q + i*Q and convert to a projCached
+ // This is needlessly complicated because the API has explicit
+ // receivers instead of creating stack objects and relying on RVO
+ v.points[i+1].FromP3(tmpP3.fromP1xP1(tmpP1xP1.Add(q, &v.points[i])))
+ }
+}
+
+// This is not optimised for speed; fixed-base tables should be precomputed.
+func (v *affineLookupTable) FromP3(q *Point) {
+ // Goal: v.points[i] = (i+1)*Q, i.e., Q, 2Q, ..., 8Q
+ // This allows lookup of -8Q, ..., -Q, 0, Q, ..., 8Q
+ v.points[0].FromP3(q)
+ tmpP3 := Point{}
+ tmpP1xP1 := projP1xP1{}
+ for i := 0; i < 7; i++ {
+ // Compute (i+1)*Q as Q + i*Q and convert to affineCached
+ v.points[i+1].FromP3(tmpP3.fromP1xP1(tmpP1xP1.AddAffine(q, &v.points[i])))
+ }
+}
+
+// Builds a lookup table at runtime. Fast.
+func (v *nafLookupTable5) FromP3(q *Point) {
+ // Goal: v.points[i] = (2*i+1)*Q, i.e., Q, 3Q, 5Q, ..., 15Q
+ // This allows lookup of -15Q, ..., -3Q, -Q, 0, Q, 3Q, ..., 15Q
+ v.points[0].FromP3(q)
+ q2 := Point{}
+ q2.Add(q, q)
+ tmpP3 := Point{}
+ tmpP1xP1 := projP1xP1{}
+ for i := 0; i < 7; i++ {
+ v.points[i+1].FromP3(tmpP3.fromP1xP1(tmpP1xP1.Add(&q2, &v.points[i])))
+ }
+}
+
+// This is not optimised for speed; fixed-base tables should be precomputed.
+func (v *nafLookupTable8) FromP3(q *Point) {
+ v.points[0].FromP3(q)
+ q2 := Point{}
+ q2.Add(q, q)
+ tmpP3 := Point{}
+ tmpP1xP1 := projP1xP1{}
+ for i := 0; i < 63; i++ {
+ v.points[i+1].FromP3(tmpP3.fromP1xP1(tmpP1xP1.AddAffine(&q2, &v.points[i])))
+ }
+}
+
+// Selectors.
+
+// Set dest to x*Q, where -8 <= x <= 8, in constant time.
+func (v *projLookupTable) SelectInto(dest *projCached, x int8) {
+ // Compute xabs = |x|
+ xmask := x >> 7
+ xabs := uint8((x + xmask) ^ xmask)
+
+ dest.Zero()
+ for j := 1; j <= 8; j++ {
+ // Set dest = j*Q if |x| = j
+ cond := subtle.ConstantTimeByteEq(xabs, uint8(j))
+ dest.Select(&v.points[j-1], dest, cond)
+ }
+ // Now dest = |x|*Q, conditionally negate to get x*Q
+ dest.CondNeg(int(xmask & 1))
+}
+
+// Set dest to x*Q, where -8 <= x <= 8, in constant time.
+func (v *affineLookupTable) SelectInto(dest *affineCached, x int8) {
+ // Compute xabs = |x|
+ xmask := x >> 7
+ xabs := uint8((x + xmask) ^ xmask)
+
+ dest.Zero()
+ for j := 1; j <= 8; j++ {
+ // Set dest = j*Q if |x| = j
+ cond := subtle.ConstantTimeByteEq(xabs, uint8(j))
+ dest.Select(&v.points[j-1], dest, cond)
+ }
+ // Now dest = |x|*Q, conditionally negate to get x*Q
+ dest.CondNeg(int(xmask & 1))
+}
+
+// Given odd x with 0 < x < 2^4, return x*Q (in variable time).
+func (v *nafLookupTable5) SelectInto(dest *projCached, x int8) {
+ *dest = v.points[x/2]
+}
+
+// Given odd x with 0 < x < 2^7, return x*Q (in variable time).
+func (v *nafLookupTable8) SelectInto(dest *affineCached, x int8) {
+ *dest = v.points[x/2]
+}
diff --git a/src/crypto/internal/edwards25519/tables_test.go b/src/crypto/internal/edwards25519/tables_test.go
new file mode 100644
index 0000000..b5d161a
--- /dev/null
+++ b/src/crypto/internal/edwards25519/tables_test.go
@@ -0,0 +1,119 @@
+// Copyright (c) 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package edwards25519
+
+import (
+ "testing"
+)
+
+func TestProjLookupTable(t *testing.T) {
+ var table projLookupTable
+ table.FromP3(B)
+
+ var tmp1, tmp2, tmp3 projCached
+ table.SelectInto(&tmp1, 6)
+ table.SelectInto(&tmp2, -2)
+ table.SelectInto(&tmp3, -4)
+ // Expect T1 + T2 + T3 = identity
+
+ var accP1xP1 projP1xP1
+ accP3 := NewIdentityPoint()
+
+ accP1xP1.Add(accP3, &tmp1)
+ accP3.fromP1xP1(&accP1xP1)
+ accP1xP1.Add(accP3, &tmp2)
+ accP3.fromP1xP1(&accP1xP1)
+ accP1xP1.Add(accP3, &tmp3)
+ accP3.fromP1xP1(&accP1xP1)
+
+ if accP3.Equal(I) != 1 {
+ t.Errorf("Consistency check on ProjLookupTable.SelectInto failed! %x %x %x", tmp1, tmp2, tmp3)
+ }
+}
+
+func TestAffineLookupTable(t *testing.T) {
+ var table affineLookupTable
+ table.FromP3(B)
+
+ var tmp1, tmp2, tmp3 affineCached
+ table.SelectInto(&tmp1, 3)
+ table.SelectInto(&tmp2, -7)
+ table.SelectInto(&tmp3, 4)
+ // Expect T1 + T2 + T3 = identity
+
+ var accP1xP1 projP1xP1
+ accP3 := NewIdentityPoint()
+
+ accP1xP1.AddAffine(accP3, &tmp1)
+ accP3.fromP1xP1(&accP1xP1)
+ accP1xP1.AddAffine(accP3, &tmp2)
+ accP3.fromP1xP1(&accP1xP1)
+ accP1xP1.AddAffine(accP3, &tmp3)
+ accP3.fromP1xP1(&accP1xP1)
+
+ if accP3.Equal(I) != 1 {
+ t.Errorf("Consistency check on ProjLookupTable.SelectInto failed! %x %x %x", tmp1, tmp2, tmp3)
+ }
+}
+
+func TestNafLookupTable5(t *testing.T) {
+ var table nafLookupTable5
+ table.FromP3(B)
+
+ var tmp1, tmp2, tmp3, tmp4 projCached
+ table.SelectInto(&tmp1, 9)
+ table.SelectInto(&tmp2, 11)
+ table.SelectInto(&tmp3, 7)
+ table.SelectInto(&tmp4, 13)
+ // Expect T1 + T2 = T3 + T4
+
+ var accP1xP1 projP1xP1
+ lhs := NewIdentityPoint()
+ rhs := NewIdentityPoint()
+
+ accP1xP1.Add(lhs, &tmp1)
+ lhs.fromP1xP1(&accP1xP1)
+ accP1xP1.Add(lhs, &tmp2)
+ lhs.fromP1xP1(&accP1xP1)
+
+ accP1xP1.Add(rhs, &tmp3)
+ rhs.fromP1xP1(&accP1xP1)
+ accP1xP1.Add(rhs, &tmp4)
+ rhs.fromP1xP1(&accP1xP1)
+
+ if lhs.Equal(rhs) != 1 {
+ t.Errorf("Consistency check on nafLookupTable5 failed")
+ }
+}
+
+func TestNafLookupTable8(t *testing.T) {
+ var table nafLookupTable8
+ table.FromP3(B)
+
+ var tmp1, tmp2, tmp3, tmp4 affineCached
+ table.SelectInto(&tmp1, 49)
+ table.SelectInto(&tmp2, 11)
+ table.SelectInto(&tmp3, 35)
+ table.SelectInto(&tmp4, 25)
+ // Expect T1 + T2 = T3 + T4
+
+ var accP1xP1 projP1xP1
+ lhs := NewIdentityPoint()
+ rhs := NewIdentityPoint()
+
+ accP1xP1.AddAffine(lhs, &tmp1)
+ lhs.fromP1xP1(&accP1xP1)
+ accP1xP1.AddAffine(lhs, &tmp2)
+ lhs.fromP1xP1(&accP1xP1)
+
+ accP1xP1.AddAffine(rhs, &tmp3)
+ rhs.fromP1xP1(&accP1xP1)
+ accP1xP1.AddAffine(rhs, &tmp4)
+ rhs.fromP1xP1(&accP1xP1)
+
+ if lhs.Equal(rhs) != 1 {
+ t.Errorf("Consistency check on nafLookupTable8 failed")
+ }
+}