summaryrefslogtreecommitdiffstats
path: root/src/crypto/internal
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-16 19:23:18 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-16 19:23:18 +0000
commit43a123c1ae6613b3efeed291fa552ecd909d3acf (patch)
treefd92518b7024bc74031f78a1cf9e454b65e73665 /src/crypto/internal
parentInitial commit. (diff)
downloadgolang-1.20-43a123c1ae6613b3efeed291fa552ecd909d3acf.tar.xz
golang-1.20-43a123c1ae6613b3efeed291fa552ecd909d3acf.zip
Adding upstream version 1.20.14.upstream/1.20.14upstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'src/crypto/internal')
-rw-r--r--src/crypto/internal/alias/alias.go30
-rw-r--r--src/crypto/internal/alias/alias_test.go46
-rw-r--r--src/crypto/internal/bigmod/_asm/go.mod12
-rw-r--r--src/crypto/internal/bigmod/_asm/go.sum32
-rw-r--r--src/crypto/internal/bigmod/_asm/nat_amd64_asm.go131
-rw-r--r--src/crypto/internal/bigmod/nat.go703
-rw-r--r--src/crypto/internal/bigmod/nat_amd64.go8
-rw-r--r--src/crypto/internal/bigmod/nat_amd64.s68
-rw-r--r--src/crypto/internal/bigmod/nat_noasm.go11
-rw-r--r--src/crypto/internal/bigmod/nat_test.go412
-rw-r--r--src/crypto/internal/boring/Dockerfile63
-rw-r--r--src/crypto/internal/boring/LICENSE202
-rw-r--r--src/crypto/internal/boring/README.md39
-rw-r--r--src/crypto/internal/boring/aes.go385
-rw-r--r--src/crypto/internal/boring/bbig/big.go33
-rw-r--r--src/crypto/internal/boring/bcache/cache.go140
-rw-r--r--src/crypto/internal/boring/bcache/cache_test.go122
-rw-r--r--src/crypto/internal/boring/bcache/stub.s6
-rw-r--r--src/crypto/internal/boring/boring.go126
-rw-r--r--src/crypto/internal/boring/boring_test.go34
-rwxr-xr-xsrc/crypto/internal/boring/build-boring.sh44
-rwxr-xr-xsrc/crypto/internal/boring/build-goboring.sh237
-rwxr-xr-xsrc/crypto/internal/boring/build.sh46
-rw-r--r--src/crypto/internal/boring/div_test.c83
-rw-r--r--src/crypto/internal/boring/doc.go19
-rw-r--r--src/crypto/internal/boring/ecdh.go224
-rw-r--r--src/crypto/internal/boring/ecdsa.go172
-rw-r--r--src/crypto/internal/boring/fipstls/stub.s12
-rw-r--r--src/crypto/internal/boring/fipstls/tls.go52
-rw-r--r--src/crypto/internal/boring/goboringcrypto.h255
-rw-r--r--src/crypto/internal/boring/hmac.go153
-rw-r--r--src/crypto/internal/boring/notboring.go122
-rw-r--r--src/crypto/internal/boring/rand.go24
-rw-r--r--src/crypto/internal/boring/rsa.go379
-rw-r--r--src/crypto/internal/boring/sha.go599
-rw-r--r--src/crypto/internal/boring/sig/sig.go17
-rw-r--r--src/crypto/internal/boring/sig/sig_amd64.s54
-rw-r--r--src/crypto/internal/boring/sig/sig_other.s20
-rw-r--r--src/crypto/internal/boring/syso/goboringcrypto_linux_amd64.sysobin0 -> 2555664 bytes
-rw-r--r--src/crypto/internal/boring/syso/goboringcrypto_linux_arm64.sysobin0 -> 1980296 bytes
-rw-r--r--src/crypto/internal/boring/syso/syso.go9
-rw-r--r--src/crypto/internal/edwards25519/doc.go22
-rw-r--r--src/crypto/internal/edwards25519/edwards25519.go426
-rw-r--r--src/crypto/internal/edwards25519/edwards25519_test.go313
-rw-r--r--src/crypto/internal/edwards25519/field/_asm/fe_amd64_asm.go294
-rw-r--r--src/crypto/internal/edwards25519/field/_asm/go.mod12
-rw-r--r--src/crypto/internal/edwards25519/field/_asm/go.sum32
-rw-r--r--src/crypto/internal/edwards25519/field/fe.go420
-rw-r--r--src/crypto/internal/edwards25519/field/fe_alias_test.go140
-rw-r--r--src/crypto/internal/edwards25519/field/fe_amd64.go15
-rw-r--r--src/crypto/internal/edwards25519/field/fe_amd64.s378
-rw-r--r--src/crypto/internal/edwards25519/field/fe_amd64_noasm.go11
-rw-r--r--src/crypto/internal/edwards25519/field/fe_arm64.go15
-rw-r--r--src/crypto/internal/edwards25519/field/fe_arm64.s42
-rw-r--r--src/crypto/internal/edwards25519/field/fe_arm64_noasm.go11
-rw-r--r--src/crypto/internal/edwards25519/field/fe_bench_test.go49
-rw-r--r--src/crypto/internal/edwards25519/field/fe_generic.go266
-rw-r--r--src/crypto/internal/edwards25519/field/fe_test.go560
-rw-r--r--src/crypto/internal/edwards25519/scalar.go343
-rw-r--r--src/crypto/internal/edwards25519/scalar_alias_test.go108
-rw-r--r--src/crypto/internal/edwards25519/scalar_fiat.go1147
-rw-r--r--src/crypto/internal/edwards25519/scalar_test.go249
-rw-r--r--src/crypto/internal/edwards25519/scalarmult.go214
-rw-r--r--src/crypto/internal/edwards25519/scalarmult_test.go209
-rw-r--r--src/crypto/internal/edwards25519/tables.go129
-rw-r--r--src/crypto/internal/edwards25519/tables_test.go119
-rw-r--r--src/crypto/internal/nistec/fiat/Dockerfile12
-rw-r--r--src/crypto/internal/nistec/fiat/README34
-rw-r--r--src/crypto/internal/nistec/fiat/fiat_test.go64
-rw-r--r--src/crypto/internal/nistec/fiat/generate.go330
-rw-r--r--src/crypto/internal/nistec/fiat/p224.go134
-rw-r--r--src/crypto/internal/nistec/fiat/p224_fiat64.go1461
-rw-r--r--src/crypto/internal/nistec/fiat/p224_invert.go87
-rw-r--r--src/crypto/internal/nistec/fiat/p256.go134
-rw-r--r--src/crypto/internal/nistec/fiat/p256_fiat64.go1400
-rw-r--r--src/crypto/internal/nistec/fiat/p256_invert.go84
-rw-r--r--src/crypto/internal/nistec/fiat/p384.go134
-rw-r--r--src/crypto/internal/nistec/fiat/p384_fiat64.go3036
-rw-r--r--src/crypto/internal/nistec/fiat/p384_invert.go102
-rw-r--r--src/crypto/internal/nistec/fiat/p521.go134
-rw-r--r--src/crypto/internal/nistec/fiat/p521_fiat64.go5541
-rw-r--r--src/crypto/internal/nistec/fiat/p521_invert.go89
-rw-r--r--src/crypto/internal/nistec/generate.go639
-rw-r--r--src/crypto/internal/nistec/nistec.go15
-rw-r--r--src/crypto/internal/nistec/nistec_test.go297
-rw-r--r--src/crypto/internal/nistec/p224.go453
-rw-r--r--src/crypto/internal/nistec/p224_sqrt.go132
-rw-r--r--src/crypto/internal/nistec/p256.go509
-rw-r--r--src/crypto/internal/nistec/p256_asm.go744
-rw-r--r--src/crypto/internal/nistec/p256_asm_amd64.s2350
-rw-r--r--src/crypto/internal/nistec/p256_asm_arm64.s1533
-rw-r--r--src/crypto/internal/nistec/p256_asm_ppc64le.s2208
-rw-r--r--src/crypto/internal/nistec/p256_asm_s390x.s2418
-rw-r--r--src/crypto/internal/nistec/p256_asm_table.binbin0 -> 88064 bytes
-rw-r--r--src/crypto/internal/nistec/p256_asm_table_test.go49
-rw-r--r--src/crypto/internal/nistec/p256_ordinv.go102
-rw-r--r--src/crypto/internal/nistec/p256_ordinv_noasm.go13
-rw-r--r--src/crypto/internal/nistec/p256_ordinv_test.go94
-rw-r--r--src/crypto/internal/nistec/p384.go540
-rw-r--r--src/crypto/internal/nistec/p521.go469
-rw-r--r--src/crypto/internal/randutil/randutil.go38
101 files changed, 36027 insertions, 0 deletions
diff --git a/src/crypto/internal/alias/alias.go b/src/crypto/internal/alias/alias.go
new file mode 100644
index 0000000..936cc25
--- /dev/null
+++ b/src/crypto/internal/alias/alias.go
@@ -0,0 +1,30 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package alias implements memory alaising tests.
+// This code also exists as golang.org/x/crypto/internal/alias.
+package alias
+
+import "unsafe"
+
+// AnyOverlap reports whether x and y share memory at any (not necessarily
+// corresponding) index. The memory beyond the slice length is ignored.
+func AnyOverlap(x, y []byte) bool {
+ return len(x) > 0 && len(y) > 0 &&
+ uintptr(unsafe.Pointer(&x[0])) <= uintptr(unsafe.Pointer(&y[len(y)-1])) &&
+ uintptr(unsafe.Pointer(&y[0])) <= uintptr(unsafe.Pointer(&x[len(x)-1]))
+}
+
+// InexactOverlap reports whether x and y share memory at any non-corresponding
+// index. The memory beyond the slice length is ignored. Note that x and y can
+// have different lengths and still not have any inexact overlap.
+//
+// InexactOverlap can be used to implement the requirements of the crypto/cipher
+// AEAD, Block, BlockMode and Stream interfaces.
+func InexactOverlap(x, y []byte) bool {
+ if len(x) == 0 || len(y) == 0 || &x[0] == &y[0] {
+ return false
+ }
+ return AnyOverlap(x, y)
+}
diff --git a/src/crypto/internal/alias/alias_test.go b/src/crypto/internal/alias/alias_test.go
new file mode 100644
index 0000000..a68fb33
--- /dev/null
+++ b/src/crypto/internal/alias/alias_test.go
@@ -0,0 +1,46 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package alias
+
+import "testing"
+
+var a, b [100]byte
+
+var aliasingTests = []struct {
+ x, y []byte
+ anyOverlap, inexactOverlap bool
+}{
+ {a[:], b[:], false, false},
+ {a[:], b[:0], false, false},
+ {a[:], b[:50], false, false},
+ {a[40:50], a[50:60], false, false},
+ {a[40:50], a[60:70], false, false},
+ {a[:51], a[50:], true, true},
+ {a[:], a[:], true, false},
+ {a[:50], a[:60], true, false},
+ {a[:], nil, false, false},
+ {nil, nil, false, false},
+ {a[:], a[:0], false, false},
+ {a[:10], a[:10:20], true, false},
+ {a[:10], a[5:10:20], true, true},
+}
+
+func testAliasing(t *testing.T, i int, x, y []byte, anyOverlap, inexactOverlap bool) {
+ any := AnyOverlap(x, y)
+ if any != anyOverlap {
+ t.Errorf("%d: wrong AnyOverlap result, expected %v, got %v", i, anyOverlap, any)
+ }
+ inexact := InexactOverlap(x, y)
+ if inexact != inexactOverlap {
+ t.Errorf("%d: wrong InexactOverlap result, expected %v, got %v", i, inexactOverlap, any)
+ }
+}
+
+func TestAliasing(t *testing.T) {
+ for i, tt := range aliasingTests {
+ testAliasing(t, i, tt.x, tt.y, tt.anyOverlap, tt.inexactOverlap)
+ testAliasing(t, i, tt.y, tt.x, tt.anyOverlap, tt.inexactOverlap)
+ }
+}
diff --git a/src/crypto/internal/bigmod/_asm/go.mod b/src/crypto/internal/bigmod/_asm/go.mod
new file mode 100644
index 0000000..1ce2b5e
--- /dev/null
+++ b/src/crypto/internal/bigmod/_asm/go.mod
@@ -0,0 +1,12 @@
+module asm
+
+go 1.19
+
+require github.com/mmcloughlin/avo v0.4.0
+
+require (
+ golang.org/x/mod v0.4.2 // indirect
+ golang.org/x/sys v0.0.0-20211030160813-b3129d9d1021 // indirect
+ golang.org/x/tools v0.1.7 // indirect
+ golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect
+)
diff --git a/src/crypto/internal/bigmod/_asm/go.sum b/src/crypto/internal/bigmod/_asm/go.sum
new file mode 100644
index 0000000..b4b5914
--- /dev/null
+++ b/src/crypto/internal/bigmod/_asm/go.sum
@@ -0,0 +1,32 @@
+github.com/mmcloughlin/avo v0.4.0 h1:jeHDRktVD+578ULxWpQHkilor6pkdLF7u7EiTzDbfcU=
+github.com/mmcloughlin/avo v0.4.0/go.mod h1:RW9BfYA3TgO9uCdNrKU2h6J8cPD8ZLznvfgHAeszb1s=
+github.com/yuin/goldmark v1.4.0/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
+golang.org/x/arch v0.0.0-20210923205945-b76863e36670/go.mod h1:5om86z9Hs0C8fWVUuoMHwpExlXzs5Tkyp9hOrfG7pp8=
+golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
+golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/mod v0.4.2 h1:Gz96sIWK3OalVv/I/qNygP42zyoKp3xptRVCWRFEBvo=
+golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
+golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
+golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20211030160813-b3129d9d1021 h1:giLT+HuUP/gXYrG2Plg9WTjj4qhfgaW424ZIFog3rlk=
+golang.org/x/sys v0.0.0-20211030160813-b3129d9d1021/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
+golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
+golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.1.7 h1:6j8CgantCy3yc8JGBqkDLMKWqZ0RDU2g1HVgacojGWQ=
+golang.org/x/tools v0.1.7/go.mod h1:LGqMHiF4EqQNHR1JncWGqT5BVaXmza+X+BDGol+dOxo=
+golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE=
+golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4=
diff --git a/src/crypto/internal/bigmod/_asm/nat_amd64_asm.go b/src/crypto/internal/bigmod/_asm/nat_amd64_asm.go
new file mode 100644
index 0000000..5690f04
--- /dev/null
+++ b/src/crypto/internal/bigmod/_asm/nat_amd64_asm.go
@@ -0,0 +1,131 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+ . "github.com/mmcloughlin/avo/build"
+ . "github.com/mmcloughlin/avo/operand"
+ . "github.com/mmcloughlin/avo/reg"
+)
+
+//go:generate go run . -out ../nat_amd64.s -stubs ../nat_amd64.go -pkg bigmod
+
+func main() {
+ Package("crypto/internal/bigmod")
+ ConstraintExpr("amd64,gc,!purego")
+
+ Implement("montgomeryLoop")
+ Pragma("noescape")
+
+ size := Load(Param("d").Len(), GP64())
+ d := Mem{Base: Load(Param("d").Base(), GP64())}
+ b := Mem{Base: Load(Param("b").Base(), GP64())}
+ m := Mem{Base: Load(Param("m").Base(), GP64())}
+ m0inv := Load(Param("m0inv"), GP64())
+
+ overflow := zero()
+ i := zero()
+ Label("outerLoop")
+
+ ai := Load(Param("a").Base(), GP64())
+ MOVQ(Mem{Base: ai}.Idx(i, 8), ai)
+
+ z := uint128{GP64(), GP64()}
+ mul64(z, b, ai)
+ add64(z, d)
+ f := GP64()
+ MOVQ(m0inv, f)
+ IMULQ(z.lo, f)
+ _MASK(f)
+ addMul64(z, m, f)
+ carry := shiftBy63(z)
+
+ j := zero()
+ INCQ(j)
+ JMP(LabelRef("innerLoopCondition"))
+ Label("innerLoop")
+
+ // z = d[j] + a[i] * b[j] + f * m[j] + carry
+ z = uint128{GP64(), GP64()}
+ mul64(z, b.Idx(j, 8), ai)
+ addMul64(z, m.Idx(j, 8), f)
+ add64(z, d.Idx(j, 8))
+ add64(z, carry)
+ // d[j-1] = z_lo & _MASK
+ storeMasked(z.lo, d.Idx(j, 8).Offset(-8))
+ // carry = z_hi<<1 | z_lo>>_W
+ MOVQ(shiftBy63(z), carry)
+
+ INCQ(j)
+ Label("innerLoopCondition")
+ CMPQ(size, j)
+ JGT(LabelRef("innerLoop"))
+
+ ADDQ(carry, overflow)
+ storeMasked(overflow, d.Idx(size, 8).Offset(-8))
+ SHRQ(Imm(63), overflow)
+
+ INCQ(i)
+ CMPQ(size, i)
+ JGT(LabelRef("outerLoop"))
+
+ Store(overflow, ReturnIndex(0))
+ RET()
+ Generate()
+}
+
+// zero zeroes a new register and returns it.
+func zero() Register {
+ r := GP64()
+ XORQ(r, r)
+ return r
+}
+
+// _MASK masks out the top bit of r.
+func _MASK(r Register) {
+ BTRQ(Imm(63), r)
+}
+
+type uint128 struct {
+ hi, lo GPVirtual
+}
+
+// storeMasked stores _MASK(src) in dst. It doesn't modify src.
+func storeMasked(src, dst Op) {
+ out := GP64()
+ MOVQ(src, out)
+ _MASK(out)
+ MOVQ(out, dst)
+}
+
+// shiftBy63 returns z >> 63. It reuses z.lo.
+func shiftBy63(z uint128) Register {
+ SHRQ(Imm(63), z.hi, z.lo)
+ result := z.lo
+ z.hi, z.lo = nil, nil
+ return result
+}
+
+// add64 sets r to r + a.
+func add64(r uint128, a Op) {
+ ADDQ(a, r.lo)
+ ADCQ(Imm(0), r.hi)
+}
+
+// mul64 sets r to a * b.
+func mul64(r uint128, a, b Op) {
+ MOVQ(a, RAX)
+ MULQ(b) // RDX, RAX = RAX * b
+ MOVQ(RAX, r.lo)
+ MOVQ(RDX, r.hi)
+}
+
+// addMul64 sets r to r + a * b.
+func addMul64(r uint128, a, b Op) {
+ MOVQ(a, RAX)
+ MULQ(b) // RDX, RAX = RAX * b
+ ADDQ(RAX, r.lo)
+ ADCQ(RDX, r.hi)
+}
diff --git a/src/crypto/internal/bigmod/nat.go b/src/crypto/internal/bigmod/nat.go
new file mode 100644
index 0000000..804316f
--- /dev/null
+++ b/src/crypto/internal/bigmod/nat.go
@@ -0,0 +1,703 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package bigmod
+
+import (
+ "errors"
+ "math/big"
+ "math/bits"
+)
+
+const (
+ // _W is the number of bits we use for our limbs.
+ _W = bits.UintSize - 1
+ // _MASK selects _W bits from a full machine word.
+ _MASK = (1 << _W) - 1
+)
+
+// choice represents a constant-time boolean. The value of choice is always
+// either 1 or 0. We use an int instead of bool in order to make decisions in
+// constant time by turning it into a mask.
+type choice uint
+
+func not(c choice) choice { return 1 ^ c }
+
+const yes = choice(1)
+const no = choice(0)
+
+// ctSelect returns x if on == 1, and y if on == 0. The execution time of this
+// function does not depend on its inputs. If on is any value besides 1 or 0,
+// the result is undefined.
+func ctSelect(on choice, x, y uint) uint {
+ // When on == 1, mask is 0b111..., otherwise mask is 0b000...
+ mask := -uint(on)
+ // When mask is all zeros, we just have y, otherwise, y cancels with itself.
+ return y ^ (mask & (y ^ x))
+}
+
+// ctEq returns 1 if x == y, and 0 otherwise. The execution time of this
+// function does not depend on its inputs.
+func ctEq(x, y uint) choice {
+ // If x != y, then either x - y or y - x will generate a carry.
+ _, c1 := bits.Sub(x, y, 0)
+ _, c2 := bits.Sub(y, x, 0)
+ return not(choice(c1 | c2))
+}
+
+// ctGeq returns 1 if x >= y, and 0 otherwise. The execution time of this
+// function does not depend on its inputs.
+func ctGeq(x, y uint) choice {
+ // If x < y, then x - y generates a carry.
+ _, carry := bits.Sub(x, y, 0)
+ return not(choice(carry))
+}
+
+// Nat represents an arbitrary natural number
+//
+// Each Nat has an announced length, which is the number of limbs it has stored.
+// Operations on this number are allowed to leak this length, but will not leak
+// any information about the values contained in those limbs.
+type Nat struct {
+ // limbs is a little-endian representation in base 2^W with
+ // W = bits.UintSize - 1. The top bit is always unset between operations.
+ //
+ // The top bit is left unset to optimize Montgomery multiplication, in the
+ // inner loop of exponentiation. Using fully saturated limbs would leave us
+ // working with 129-bit numbers on 64-bit platforms, wasting a lot of space,
+ // and thus time.
+ limbs []uint
+}
+
+// preallocTarget is the size in bits of the numbers used to implement the most
+// common and most performant RSA key size. It's also enough to cover some of
+// the operations of key sizes up to 4096.
+const preallocTarget = 2048
+const preallocLimbs = (preallocTarget + _W - 1) / _W
+
+// NewNat returns a new nat with a size of zero, just like new(Nat), but with
+// the preallocated capacity to hold a number of up to preallocTarget bits.
+// NewNat inlines, so the allocation can live on the stack.
+func NewNat() *Nat {
+ limbs := make([]uint, 0, preallocLimbs)
+ return &Nat{limbs}
+}
+
+// expand expands x to n limbs, leaving its value unchanged.
+func (x *Nat) expand(n int) *Nat {
+ if len(x.limbs) > n {
+ panic("bigmod: internal error: shrinking nat")
+ }
+ if cap(x.limbs) < n {
+ newLimbs := make([]uint, n)
+ copy(newLimbs, x.limbs)
+ x.limbs = newLimbs
+ return x
+ }
+ extraLimbs := x.limbs[len(x.limbs):n]
+ for i := range extraLimbs {
+ extraLimbs[i] = 0
+ }
+ x.limbs = x.limbs[:n]
+ return x
+}
+
+// reset returns a zero nat of n limbs, reusing x's storage if n <= cap(x.limbs).
+func (x *Nat) reset(n int) *Nat {
+ if cap(x.limbs) < n {
+ x.limbs = make([]uint, n)
+ return x
+ }
+ for i := range x.limbs {
+ x.limbs[i] = 0
+ }
+ x.limbs = x.limbs[:n]
+ return x
+}
+
+// set assigns x = y, optionally resizing x to the appropriate size.
+func (x *Nat) set(y *Nat) *Nat {
+ x.reset(len(y.limbs))
+ copy(x.limbs, y.limbs)
+ return x
+}
+
+// setBig assigns x = n, optionally resizing n to the appropriate size.
+//
+// The announced length of x is set based on the actual bit size of the input,
+// ignoring leading zeroes.
+func (x *Nat) setBig(n *big.Int) *Nat {
+ requiredLimbs := (n.BitLen() + _W - 1) / _W
+ x.reset(requiredLimbs)
+
+ outI := 0
+ shift := 0
+ limbs := n.Bits()
+ for i := range limbs {
+ xi := uint(limbs[i])
+ x.limbs[outI] |= (xi << shift) & _MASK
+ outI++
+ if outI == requiredLimbs {
+ return x
+ }
+ x.limbs[outI] = xi >> (_W - shift)
+ shift++ // this assumes bits.UintSize - _W = 1
+ if shift == _W {
+ shift = 0
+ outI++
+ }
+ }
+ return x
+}
+
+// Bytes returns x as a zero-extended big-endian byte slice. The size of the
+// slice will match the size of m.
+//
+// x must have the same size as m and it must be reduced modulo m.
+func (x *Nat) Bytes(m *Modulus) []byte {
+ bytes := make([]byte, m.Size())
+ shift := 0
+ outI := len(bytes) - 1
+ for _, limb := range x.limbs {
+ remainingBits := _W
+ for remainingBits >= 8 {
+ bytes[outI] |= byte(limb) << shift
+ consumed := 8 - shift
+ limb >>= consumed
+ remainingBits -= consumed
+ shift = 0
+ outI--
+ if outI < 0 {
+ return bytes
+ }
+ }
+ bytes[outI] = byte(limb)
+ shift = remainingBits
+ }
+ return bytes
+}
+
+// SetBytes assigns x = b, where b is a slice of big-endian bytes.
+// SetBytes returns an error if b >= m.
+//
+// The output will be resized to the size of m and overwritten.
+func (x *Nat) SetBytes(b []byte, m *Modulus) (*Nat, error) {
+ if err := x.setBytes(b, m); err != nil {
+ return nil, err
+ }
+ if x.cmpGeq(m.nat) == yes {
+ return nil, errors.New("input overflows the modulus")
+ }
+ return x, nil
+}
+
+// SetOverflowingBytes assigns x = b, where b is a slice of big-endian bytes. SetOverflowingBytes
+// returns an error if b has a longer bit length than m, but reduces overflowing
+// values up to 2^⌈log2(m)⌉ - 1.
+//
+// The output will be resized to the size of m and overwritten.
+func (x *Nat) SetOverflowingBytes(b []byte, m *Modulus) (*Nat, error) {
+ if err := x.setBytes(b, m); err != nil {
+ return nil, err
+ }
+ leading := _W - bitLen(x.limbs[len(x.limbs)-1])
+ if leading < m.leading {
+ return nil, errors.New("input overflows the modulus")
+ }
+ x.sub(x.cmpGeq(m.nat), m.nat)
+ return x, nil
+}
+
+func (x *Nat) setBytes(b []byte, m *Modulus) error {
+ outI := 0
+ shift := 0
+ x.resetFor(m)
+ for i := len(b) - 1; i >= 0; i-- {
+ bi := b[i]
+ x.limbs[outI] |= uint(bi) << shift
+ shift += 8
+ if shift >= _W {
+ shift -= _W
+ x.limbs[outI] &= _MASK
+ overflow := bi >> (8 - shift)
+ outI++
+ if outI >= len(x.limbs) {
+ if overflow > 0 || i > 0 {
+ return errors.New("input overflows the modulus")
+ }
+ break
+ }
+ x.limbs[outI] = uint(overflow)
+ }
+ }
+ return nil
+}
+
+// Equal returns 1 if x == y, and 0 otherwise.
+//
+// Both operands must have the same announced length.
+func (x *Nat) Equal(y *Nat) choice {
+ // Eliminate bounds checks in the loop.
+ size := len(x.limbs)
+ xLimbs := x.limbs[:size]
+ yLimbs := y.limbs[:size]
+
+ equal := yes
+ for i := 0; i < size; i++ {
+ equal &= ctEq(xLimbs[i], yLimbs[i])
+ }
+ return equal
+}
+
+// IsZero returns 1 if x == 0, and 0 otherwise.
+func (x *Nat) IsZero() choice {
+ // Eliminate bounds checks in the loop.
+ size := len(x.limbs)
+ xLimbs := x.limbs[:size]
+
+ zero := yes
+ for i := 0; i < size; i++ {
+ zero &= ctEq(xLimbs[i], 0)
+ }
+ return zero
+}
+
+// cmpGeq returns 1 if x >= y, and 0 otherwise.
+//
+// Both operands must have the same announced length.
+func (x *Nat) cmpGeq(y *Nat) choice {
+ // Eliminate bounds checks in the loop.
+ size := len(x.limbs)
+ xLimbs := x.limbs[:size]
+ yLimbs := y.limbs[:size]
+
+ var c uint
+ for i := 0; i < size; i++ {
+ c = (xLimbs[i] - yLimbs[i] - c) >> _W
+ }
+ // If there was a carry, then subtracting y underflowed, so
+ // x is not greater than or equal to y.
+ return not(choice(c))
+}
+
+// assign sets x <- y if on == 1, and does nothing otherwise.
+//
+// Both operands must have the same announced length.
+func (x *Nat) assign(on choice, y *Nat) *Nat {
+ // Eliminate bounds checks in the loop.
+ size := len(x.limbs)
+ xLimbs := x.limbs[:size]
+ yLimbs := y.limbs[:size]
+
+ for i := 0; i < size; i++ {
+ xLimbs[i] = ctSelect(on, yLimbs[i], xLimbs[i])
+ }
+ return x
+}
+
+// add computes x += y if on == 1, and does nothing otherwise. It returns the
+// carry of the addition regardless of on.
+//
+// Both operands must have the same announced length.
+func (x *Nat) add(on choice, y *Nat) (c uint) {
+ // Eliminate bounds checks in the loop.
+ size := len(x.limbs)
+ xLimbs := x.limbs[:size]
+ yLimbs := y.limbs[:size]
+
+ for i := 0; i < size; i++ {
+ res := xLimbs[i] + yLimbs[i] + c
+ xLimbs[i] = ctSelect(on, res&_MASK, xLimbs[i])
+ c = res >> _W
+ }
+ return
+}
+
+// sub computes x -= y if on == 1, and does nothing otherwise. It returns the
+// borrow of the subtraction regardless of on.
+//
+// Both operands must have the same announced length.
+func (x *Nat) sub(on choice, y *Nat) (c uint) {
+ // Eliminate bounds checks in the loop.
+ size := len(x.limbs)
+ xLimbs := x.limbs[:size]
+ yLimbs := y.limbs[:size]
+
+ for i := 0; i < size; i++ {
+ res := xLimbs[i] - yLimbs[i] - c
+ xLimbs[i] = ctSelect(on, res&_MASK, xLimbs[i])
+ c = res >> _W
+ }
+ return
+}
+
+// Modulus is used for modular arithmetic, precomputing relevant constants.
+//
+// Moduli are assumed to be odd numbers. Moduli can also leak the exact
+// number of bits needed to store their value, and are stored without padding.
+//
+// Their actual value is still kept secret.
+type Modulus struct {
+ // The underlying natural number for this modulus.
+ //
+ // This will be stored without any padding, and shouldn't alias with any
+ // other natural number being used.
+ nat *Nat
+ leading int // number of leading zeros in the modulus
+ m0inv uint // -nat.limbs[0]⁻¹ mod _W
+ rr *Nat // R*R for montgomeryRepresentation
+}
+
+// rr returns R*R with R = 2^(_W * n) and n = len(m.nat.limbs).
+func rr(m *Modulus) *Nat {
+ rr := NewNat().ExpandFor(m)
+ // R*R is 2^(2 * _W * n). We can safely get 2^(_W * (n - 1)) by setting the
+ // most significant limb to 1. We then get to R*R by shifting left by _W
+ // n + 1 times.
+ n := len(rr.limbs)
+ rr.limbs[n-1] = 1
+ for i := n - 1; i < 2*n; i++ {
+ rr.shiftIn(0, m) // x = x * 2^_W mod m
+ }
+ return rr
+}
+
+// minusInverseModW computes -x⁻¹ mod _W with x odd.
+//
+// This operation is used to precompute a constant involved in Montgomery
+// multiplication.
+func minusInverseModW(x uint) uint {
+ // Every iteration of this loop doubles the least-significant bits of
+ // correct inverse in y. The first three bits are already correct (1⁻¹ = 1,
+ // 3⁻¹ = 3, 5⁻¹ = 5, and 7⁻¹ = 7 mod 8), so doubling five times is enough
+ // for 61 bits (and wastes only one iteration for 31 bits).
+ //
+ // See https://crypto.stackexchange.com/a/47496.
+ y := x
+ for i := 0; i < 5; i++ {
+ y = y * (2 - x*y)
+ }
+ return (1 << _W) - (y & _MASK)
+}
+
+// NewModulusFromBig creates a new Modulus from a [big.Int].
+//
+// The Int must be odd. The number of significant bits must be leakable.
+func NewModulusFromBig(n *big.Int) *Modulus {
+ m := &Modulus{}
+ m.nat = NewNat().setBig(n)
+ m.leading = _W - bitLen(m.nat.limbs[len(m.nat.limbs)-1])
+ m.m0inv = minusInverseModW(m.nat.limbs[0])
+ m.rr = rr(m)
+ return m
+}
+
+// bitLen is a version of bits.Len that only leaks the bit length of n, but not
+// its value. bits.Len and bits.LeadingZeros use a lookup table for the
+// low-order bits on some architectures.
+func bitLen(n uint) int {
+ var len int
+ // We assume, here and elsewhere, that comparison to zero is constant time
+ // with respect to different non-zero values.
+ for n != 0 {
+ len++
+ n >>= 1
+ }
+ return len
+}
+
+// Size returns the size of m in bytes.
+func (m *Modulus) Size() int {
+ return (m.BitLen() + 7) / 8
+}
+
+// BitLen returns the size of m in bits.
+func (m *Modulus) BitLen() int {
+ return len(m.nat.limbs)*_W - int(m.leading)
+}
+
+// Nat returns m as a Nat. The return value must not be written to.
+func (m *Modulus) Nat() *Nat {
+ return m.nat
+}
+
+// shiftIn calculates x = x << _W + y mod m.
+//
+// This assumes that x is already reduced mod m, and that y < 2^_W.
+func (x *Nat) shiftIn(y uint, m *Modulus) *Nat {
+ d := NewNat().resetFor(m)
+
+ // Eliminate bounds checks in the loop.
+ size := len(m.nat.limbs)
+ xLimbs := x.limbs[:size]
+ dLimbs := d.limbs[:size]
+ mLimbs := m.nat.limbs[:size]
+
+ // Each iteration of this loop computes x = 2x + b mod m, where b is a bit
+ // from y. Effectively, it left-shifts x and adds y one bit at a time,
+ // reducing it every time.
+ //
+ // To do the reduction, each iteration computes both 2x + b and 2x + b - m.
+ // The next iteration (and finally the return line) will use either result
+ // based on whether the subtraction underflowed.
+ needSubtraction := no
+ for i := _W - 1; i >= 0; i-- {
+ carry := (y >> i) & 1
+ var borrow uint
+ for i := 0; i < size; i++ {
+ l := ctSelect(needSubtraction, dLimbs[i], xLimbs[i])
+
+ res := l<<1 + carry
+ xLimbs[i] = res & _MASK
+ carry = res >> _W
+
+ res = xLimbs[i] - mLimbs[i] - borrow
+ dLimbs[i] = res & _MASK
+ borrow = res >> _W
+ }
+ // See Add for how carry (aka overflow), borrow (aka underflow), and
+ // needSubtraction relate.
+ needSubtraction = ctEq(carry, borrow)
+ }
+ return x.assign(needSubtraction, d)
+}
+
+// Mod calculates out = x mod m.
+//
+// This works regardless how large the value of x is.
+//
+// The output will be resized to the size of m and overwritten.
+func (out *Nat) Mod(x *Nat, m *Modulus) *Nat {
+ out.resetFor(m)
+ // Working our way from the most significant to the least significant limb,
+ // we can insert each limb at the least significant position, shifting all
+ // previous limbs left by _W. This way each limb will get shifted by the
+ // correct number of bits. We can insert at least N - 1 limbs without
+ // overflowing m. After that, we need to reduce every time we shift.
+ i := len(x.limbs) - 1
+ // For the first N - 1 limbs we can skip the actual shifting and position
+ // them at the shifted position, which starts at min(N - 2, i).
+ start := len(m.nat.limbs) - 2
+ if i < start {
+ start = i
+ }
+ for j := start; j >= 0; j-- {
+ out.limbs[j] = x.limbs[i]
+ i--
+ }
+ // We shift in the remaining limbs, reducing modulo m each time.
+ for i >= 0 {
+ out.shiftIn(x.limbs[i], m)
+ i--
+ }
+ return out
+}
+
+// ExpandFor ensures out has the right size to work with operations modulo m.
+//
+// The announced size of out must be smaller than or equal to that of m.
+func (out *Nat) ExpandFor(m *Modulus) *Nat {
+ return out.expand(len(m.nat.limbs))
+}
+
+// resetFor ensures out has the right size to work with operations modulo m.
+//
+// out is zeroed and may start at any size.
+func (out *Nat) resetFor(m *Modulus) *Nat {
+ return out.reset(len(m.nat.limbs))
+}
+
+// Sub computes x = x - y mod m.
+//
+// The length of both operands must be the same as the modulus. Both operands
+// must already be reduced modulo m.
+func (x *Nat) Sub(y *Nat, m *Modulus) *Nat {
+ underflow := x.sub(yes, y)
+ // If the subtraction underflowed, add m.
+ x.add(choice(underflow), m.nat)
+ return x
+}
+
+// Add computes x = x + y mod m.
+//
+// The length of both operands must be the same as the modulus. Both operands
+// must already be reduced modulo m.
+func (x *Nat) Add(y *Nat, m *Modulus) *Nat {
+ overflow := x.add(yes, y)
+ underflow := not(x.cmpGeq(m.nat)) // x < m
+
+ // Three cases are possible:
+ //
+ // - overflow = 0, underflow = 0
+ //
+ // In this case, addition fits in our limbs, but we can still subtract away
+ // m without an underflow, so we need to perform the subtraction to reduce
+ // our result.
+ //
+ // - overflow = 0, underflow = 1
+ //
+ // The addition fits in our limbs, but we can't subtract m without
+ // underflowing. The result is already reduced.
+ //
+ // - overflow = 1, underflow = 1
+ //
+ // The addition does not fit in our limbs, and the subtraction's borrow
+ // would cancel out with the addition's carry. We need to subtract m to
+ // reduce our result.
+ //
+ // The overflow = 1, underflow = 0 case is not possible, because y is at
+ // most m - 1, and if adding m - 1 overflows, then subtracting m must
+ // necessarily underflow.
+ needSubtraction := ctEq(overflow, uint(underflow))
+
+ x.sub(needSubtraction, m.nat)
+ return x
+}
+
+// montgomeryRepresentation calculates x = x * R mod m, with R = 2^(_W * n) and
+// n = len(m.nat.limbs).
+//
+// Faster Montgomery multiplication replaces standard modular multiplication for
+// numbers in this representation.
+//
+// This assumes that x is already reduced mod m.
+func (x *Nat) montgomeryRepresentation(m *Modulus) *Nat {
+ // A Montgomery multiplication (which computes a * b / R) by R * R works out
+ // to a multiplication by R, which takes the value out of the Montgomery domain.
+ return x.montgomeryMul(NewNat().set(x), m.rr, m)
+}
+
+// montgomeryReduction calculates x = x / R mod m, with R = 2^(_W * n) and
+// n = len(m.nat.limbs).
+//
+// This assumes that x is already reduced mod m.
+func (x *Nat) montgomeryReduction(m *Modulus) *Nat {
+ // By Montgomery multiplying with 1 not in Montgomery representation, we
+ // convert out back from Montgomery representation, because it works out to
+ // dividing by R.
+ t0 := NewNat().set(x)
+ t1 := NewNat().ExpandFor(m)
+ t1.limbs[0] = 1
+ return x.montgomeryMul(t0, t1, m)
+}
+
+// montgomeryMul calculates d = a * b / R mod m, with R = 2^(_W * n) and
+// n = len(m.nat.limbs), using the Montgomery Multiplication technique.
+//
+// All inputs should be the same length, not aliasing d, and already
+// reduced modulo m. d will be resized to the size of m and overwritten.
+func (d *Nat) montgomeryMul(a *Nat, b *Nat, m *Modulus) *Nat {
+ d.resetFor(m)
+ if len(a.limbs) != len(m.nat.limbs) || len(b.limbs) != len(m.nat.limbs) {
+ panic("bigmod: invalid montgomeryMul input")
+ }
+
+ // See https://bearssl.org/bigint.html#montgomery-reduction-and-multiplication
+ // for a description of the algorithm implemented mostly in montgomeryLoop.
+ // See Add for how overflow, underflow, and needSubtraction relate.
+ overflow := montgomeryLoop(d.limbs, a.limbs, b.limbs, m.nat.limbs, m.m0inv)
+ underflow := not(d.cmpGeq(m.nat)) // d < m
+ needSubtraction := ctEq(overflow, uint(underflow))
+ d.sub(needSubtraction, m.nat)
+
+ return d
+}
+
+func montgomeryLoopGeneric(d, a, b, m []uint, m0inv uint) (overflow uint) {
+ // Eliminate bounds checks in the loop.
+ size := len(d)
+ a = a[:size]
+ b = b[:size]
+ m = m[:size]
+
+ for _, ai := range a {
+ // This is an unrolled iteration of the loop below with j = 0.
+ hi, lo := bits.Mul(ai, b[0])
+ z_lo, c := bits.Add(d[0], lo, 0)
+ f := (z_lo * m0inv) & _MASK // (d[0] + a[i] * b[0]) * m0inv
+ z_hi, _ := bits.Add(0, hi, c)
+ hi, lo = bits.Mul(f, m[0])
+ z_lo, c = bits.Add(z_lo, lo, 0)
+ z_hi, _ = bits.Add(z_hi, hi, c)
+ carry := z_hi<<1 | z_lo>>_W
+
+ for j := 1; j < size; j++ {
+ // z = d[j] + a[i] * b[j] + f * m[j] + carry <= 2^(2W+1) - 2^(W+1) + 2^W
+ hi, lo := bits.Mul(ai, b[j])
+ z_lo, c := bits.Add(d[j], lo, 0)
+ z_hi, _ := bits.Add(0, hi, c)
+ hi, lo = bits.Mul(f, m[j])
+ z_lo, c = bits.Add(z_lo, lo, 0)
+ z_hi, _ = bits.Add(z_hi, hi, c)
+ z_lo, c = bits.Add(z_lo, carry, 0)
+ z_hi, _ = bits.Add(z_hi, 0, c)
+ d[j-1] = z_lo & _MASK
+ carry = z_hi<<1 | z_lo>>_W // carry <= 2^(W+1) - 2
+ }
+
+ z := overflow + carry // z <= 2^(W+1) - 1
+ d[size-1] = z & _MASK
+ overflow = z >> _W // overflow <= 1
+ }
+ return
+}
+
+// Mul calculates x *= y mod m.
+//
+// x and y must already be reduced modulo m, they must share its announced
+// length, and they may not alias.
+func (x *Nat) Mul(y *Nat, m *Modulus) *Nat {
+ // A Montgomery multiplication by a value out of the Montgomery domain
+ // takes the result out of Montgomery representation.
+ xR := NewNat().set(x).montgomeryRepresentation(m) // xR = x * R mod m
+ return x.montgomeryMul(xR, y, m) // x = xR * y / R mod m
+}
+
+// Exp calculates out = x^e mod m.
+//
+// The exponent e is represented in big-endian order. The output will be resized
+// to the size of m and overwritten. x must already be reduced modulo m.
+func (out *Nat) Exp(x *Nat, e []byte, m *Modulus) *Nat {
+ // We use a 4 bit window. For our RSA workload, 4 bit windows are faster
+ // than 2 bit windows, but use an extra 12 nats worth of scratch space.
+ // Using bit sizes that don't divide 8 are more complex to implement.
+
+ table := [(1 << 4) - 1]*Nat{ // table[i] = x ^ (i+1)
+ // newNat calls are unrolled so they are allocated on the stack.
+ NewNat(), NewNat(), NewNat(), NewNat(), NewNat(),
+ NewNat(), NewNat(), NewNat(), NewNat(), NewNat(),
+ NewNat(), NewNat(), NewNat(), NewNat(), NewNat(),
+ }
+ table[0].set(x).montgomeryRepresentation(m)
+ for i := 1; i < len(table); i++ {
+ table[i].montgomeryMul(table[i-1], table[0], m)
+ }
+
+ out.resetFor(m)
+ out.limbs[0] = 1
+ out.montgomeryRepresentation(m)
+ t0 := NewNat().ExpandFor(m)
+ t1 := NewNat().ExpandFor(m)
+ for _, b := range e {
+ for _, j := range []int{4, 0} {
+ // Square four times.
+ t1.montgomeryMul(out, out, m)
+ out.montgomeryMul(t1, t1, m)
+ t1.montgomeryMul(out, out, m)
+ out.montgomeryMul(t1, t1, m)
+
+ // Select x^k in constant time from the table.
+ k := uint((b >> j) & 0b1111)
+ for i := range table {
+ t0.assign(ctEq(k, uint(i+1)), table[i])
+ }
+
+ // Multiply by x^k, discarding the result if k = 0.
+ t1.montgomeryMul(out, t0, m)
+ out.assign(not(ctEq(k, 0)), t1)
+ }
+ }
+
+ return out.montgomeryReduction(m)
+}
diff --git a/src/crypto/internal/bigmod/nat_amd64.go b/src/crypto/internal/bigmod/nat_amd64.go
new file mode 100644
index 0000000..e947782
--- /dev/null
+++ b/src/crypto/internal/bigmod/nat_amd64.go
@@ -0,0 +1,8 @@
+// Code generated by command: go run nat_amd64_asm.go -out ../nat_amd64.s -stubs ../nat_amd64.go -pkg bigmod. DO NOT EDIT.
+
+//go:build amd64 && gc && !purego
+
+package bigmod
+
+//go:noescape
+func montgomeryLoop(d []uint, a []uint, b []uint, m []uint, m0inv uint) uint
diff --git a/src/crypto/internal/bigmod/nat_amd64.s b/src/crypto/internal/bigmod/nat_amd64.s
new file mode 100644
index 0000000..12b7629
--- /dev/null
+++ b/src/crypto/internal/bigmod/nat_amd64.s
@@ -0,0 +1,68 @@
+// Code generated by command: go run nat_amd64_asm.go -out ../nat_amd64.s -stubs ../nat_amd64.go -pkg bigmod. DO NOT EDIT.
+
+//go:build amd64 && gc && !purego
+
+// func montgomeryLoop(d []uint, a []uint, b []uint, m []uint, m0inv uint) uint
+TEXT ·montgomeryLoop(SB), $8-112
+ MOVQ d_len+8(FP), CX
+ MOVQ d_base+0(FP), BX
+ MOVQ b_base+48(FP), SI
+ MOVQ m_base+72(FP), DI
+ MOVQ m0inv+96(FP), R8
+ XORQ R9, R9
+ XORQ R10, R10
+
+outerLoop:
+ MOVQ a_base+24(FP), R11
+ MOVQ (R11)(R10*8), R11
+ MOVQ (SI), AX
+ MULQ R11
+ MOVQ AX, R13
+ MOVQ DX, R12
+ ADDQ (BX), R13
+ ADCQ $0x00, R12
+ MOVQ R8, R14
+ IMULQ R13, R14
+ BTRQ $0x3f, R14
+ MOVQ (DI), AX
+ MULQ R14
+ ADDQ AX, R13
+ ADCQ DX, R12
+ SHRQ $0x3f, R12, R13
+ XORQ R12, R12
+ INCQ R12
+ JMP innerLoopCondition
+
+innerLoop:
+ MOVQ (SI)(R12*8), AX
+ MULQ R11
+ MOVQ AX, BP
+ MOVQ DX, R15
+ MOVQ (DI)(R12*8), AX
+ MULQ R14
+ ADDQ AX, BP
+ ADCQ DX, R15
+ ADDQ (BX)(R12*8), BP
+ ADCQ $0x00, R15
+ ADDQ R13, BP
+ ADCQ $0x00, R15
+ MOVQ BP, AX
+ BTRQ $0x3f, AX
+ MOVQ AX, -8(BX)(R12*8)
+ SHRQ $0x3f, R15, BP
+ MOVQ BP, R13
+ INCQ R12
+
+innerLoopCondition:
+ CMPQ CX, R12
+ JGT innerLoop
+ ADDQ R13, R9
+ MOVQ R9, AX
+ BTRQ $0x3f, AX
+ MOVQ AX, -8(BX)(CX*8)
+ SHRQ $0x3f, R9
+ INCQ R10
+ CMPQ CX, R10
+ JGT outerLoop
+ MOVQ R9, ret+104(FP)
+ RET
diff --git a/src/crypto/internal/bigmod/nat_noasm.go b/src/crypto/internal/bigmod/nat_noasm.go
new file mode 100644
index 0000000..870b445
--- /dev/null
+++ b/src/crypto/internal/bigmod/nat_noasm.go
@@ -0,0 +1,11 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !amd64 || !gc || purego
+
+package bigmod
+
+func montgomeryLoop(d, a, b, m []uint, m0inv uint) uint {
+ return montgomeryLoopGeneric(d, a, b, m, m0inv)
+}
diff --git a/src/crypto/internal/bigmod/nat_test.go b/src/crypto/internal/bigmod/nat_test.go
new file mode 100644
index 0000000..6431d25
--- /dev/null
+++ b/src/crypto/internal/bigmod/nat_test.go
@@ -0,0 +1,412 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package bigmod
+
+import (
+ "math/big"
+ "math/bits"
+ "math/rand"
+ "reflect"
+ "testing"
+ "testing/quick"
+)
+
+// Generate generates an even nat. It's used by testing/quick to produce random
+// *nat values for quick.Check invocations.
+func (*Nat) Generate(r *rand.Rand, size int) reflect.Value {
+ limbs := make([]uint, size)
+ for i := 0; i < size; i++ {
+ limbs[i] = uint(r.Uint64()) & ((1 << _W) - 2)
+ }
+ return reflect.ValueOf(&Nat{limbs})
+}
+
+func testModAddCommutative(a *Nat, b *Nat) bool {
+ m := maxModulus(uint(len(a.limbs)))
+ aPlusB := new(Nat).set(a)
+ aPlusB.Add(b, m)
+ bPlusA := new(Nat).set(b)
+ bPlusA.Add(a, m)
+ return aPlusB.Equal(bPlusA) == 1
+}
+
+func TestModAddCommutative(t *testing.T) {
+ err := quick.Check(testModAddCommutative, &quick.Config{})
+ if err != nil {
+ t.Error(err)
+ }
+}
+
+func testModSubThenAddIdentity(a *Nat, b *Nat) bool {
+ m := maxModulus(uint(len(a.limbs)))
+ original := new(Nat).set(a)
+ a.Sub(b, m)
+ a.Add(b, m)
+ return a.Equal(original) == 1
+}
+
+func TestModSubThenAddIdentity(t *testing.T) {
+ err := quick.Check(testModSubThenAddIdentity, &quick.Config{})
+ if err != nil {
+ t.Error(err)
+ }
+}
+
+func testMontgomeryRoundtrip(a *Nat) bool {
+ one := &Nat{make([]uint, len(a.limbs))}
+ one.limbs[0] = 1
+ aPlusOne := new(big.Int).SetBytes(natBytes(a))
+ aPlusOne.Add(aPlusOne, big.NewInt(1))
+ m := NewModulusFromBig(aPlusOne)
+ monty := new(Nat).set(a)
+ monty.montgomeryRepresentation(m)
+ aAgain := new(Nat).set(monty)
+ aAgain.montgomeryMul(monty, one, m)
+ return a.Equal(aAgain) == 1
+}
+
+func TestMontgomeryRoundtrip(t *testing.T) {
+ err := quick.Check(testMontgomeryRoundtrip, &quick.Config{})
+ if err != nil {
+ t.Error(err)
+ }
+}
+
+func TestShiftIn(t *testing.T) {
+ if bits.UintSize != 64 {
+ t.Skip("examples are only valid in 64 bit")
+ }
+ examples := []struct {
+ m, x, expected []byte
+ y uint64
+ }{{
+ m: []byte{13},
+ x: []byte{0},
+ y: 0x7FFF_FFFF_FFFF_FFFF,
+ expected: []byte{7},
+ }, {
+ m: []byte{13},
+ x: []byte{7},
+ y: 0x7FFF_FFFF_FFFF_FFFF,
+ expected: []byte{11},
+ }, {
+ m: []byte{0x06, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0d},
+ x: make([]byte, 9),
+ y: 0x7FFF_FFFF_FFFF_FFFF,
+ expected: []byte{0x00, 0x7f, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff},
+ }, {
+ m: []byte{0x06, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0d},
+ x: []byte{0x00, 0x7f, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff},
+ y: 0,
+ expected: []byte{0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08},
+ }}
+
+ for i, tt := range examples {
+ m := modulusFromBytes(tt.m)
+ got := natFromBytes(tt.x).ExpandFor(m).shiftIn(uint(tt.y), m)
+ if got.Equal(natFromBytes(tt.expected).ExpandFor(m)) != 1 {
+ t.Errorf("%d: got %x, expected %x", i, got, tt.expected)
+ }
+ }
+}
+
+func TestModulusAndNatSizes(t *testing.T) {
+ // These are 126 bit (2 * _W on 64-bit architectures) values, serialized as
+ // 128 bits worth of bytes. If leading zeroes are stripped, they fit in two
+ // limbs, if they are not, they fit in three. This can be a problem because
+ // modulus strips leading zeroes and nat does not.
+ m := modulusFromBytes([]byte{
+ 0x3f, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff})
+ xb := []byte{0x3f, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe}
+ natFromBytes(xb).ExpandFor(m) // must not panic for shrinking
+ NewNat().SetBytes(xb, m)
+}
+
+func TestSetBytes(t *testing.T) {
+ tests := []struct {
+ m, b []byte
+ fail bool
+ }{{
+ m: []byte{0xff, 0xff},
+ b: []byte{0x00, 0x01},
+ }, {
+ m: []byte{0xff, 0xff},
+ b: []byte{0xff, 0xff},
+ fail: true,
+ }, {
+ m: []byte{0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff},
+ b: []byte{0x00, 0x01},
+ }, {
+ m: []byte{0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff},
+ b: []byte{0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe},
+ }, {
+ m: []byte{0xff, 0xff},
+ b: []byte{0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
+ fail: true,
+ }, {
+ m: []byte{0xff, 0xff},
+ b: []byte{0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
+ fail: true,
+ }, {
+ m: []byte{0x7f, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff},
+ b: []byte{0x7f, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe},
+ }, {
+ m: []byte{0x7f, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff},
+ b: []byte{0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe},
+ fail: true,
+ }, {
+ m: []byte{0x7f, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff},
+ b: []byte{0x7f, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff},
+ fail: true,
+ }, {
+ m: []byte{0x7f, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff},
+ b: []byte{0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe},
+ fail: true,
+ }, {
+ m: []byte{0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfd},
+ b: []byte{0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff},
+ fail: true,
+ }}
+
+ for i, tt := range tests {
+ m := modulusFromBytes(tt.m)
+ got, err := NewNat().SetBytes(tt.b, m)
+ if err != nil {
+ if !tt.fail {
+ t.Errorf("%d: unexpected error: %v", i, err)
+ }
+ continue
+ }
+ if err == nil && tt.fail {
+ t.Errorf("%d: unexpected success", i)
+ continue
+ }
+ if expected := natFromBytes(tt.b).ExpandFor(m); got.Equal(expected) != yes {
+ t.Errorf("%d: got %x, expected %x", i, got, expected)
+ }
+ }
+
+ f := func(xBytes []byte) bool {
+ m := maxModulus(uint(len(xBytes)*8/_W + 1))
+ got, err := NewNat().SetBytes(xBytes, m)
+ if err != nil {
+ return false
+ }
+ return got.Equal(natFromBytes(xBytes).ExpandFor(m)) == yes
+ }
+
+ err := quick.Check(f, &quick.Config{})
+ if err != nil {
+ t.Error(err)
+ }
+}
+
+func TestExpand(t *testing.T) {
+ sliced := []uint{1, 2, 3, 4}
+ examples := []struct {
+ in []uint
+ n int
+ out []uint
+ }{{
+ []uint{1, 2},
+ 4,
+ []uint{1, 2, 0, 0},
+ }, {
+ sliced[:2],
+ 4,
+ []uint{1, 2, 0, 0},
+ }, {
+ []uint{1, 2},
+ 2,
+ []uint{1, 2},
+ }}
+
+ for i, tt := range examples {
+ got := (&Nat{tt.in}).expand(tt.n)
+ if len(got.limbs) != len(tt.out) || got.Equal(&Nat{tt.out}) != 1 {
+ t.Errorf("%d: got %x, expected %x", i, got, tt.out)
+ }
+ }
+}
+
+func TestMod(t *testing.T) {
+ m := modulusFromBytes([]byte{0x06, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0d})
+ x := natFromBytes([]byte{0x40, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01})
+ out := new(Nat)
+ out.Mod(x, m)
+ expected := natFromBytes([]byte{0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x09})
+ if out.Equal(expected) != 1 {
+ t.Errorf("%+v != %+v", out, expected)
+ }
+}
+
+func TestModSub(t *testing.T) {
+ m := modulusFromBytes([]byte{13})
+ x := &Nat{[]uint{6}}
+ y := &Nat{[]uint{7}}
+ x.Sub(y, m)
+ expected := &Nat{[]uint{12}}
+ if x.Equal(expected) != 1 {
+ t.Errorf("%+v != %+v", x, expected)
+ }
+ x.Sub(y, m)
+ expected = &Nat{[]uint{5}}
+ if x.Equal(expected) != 1 {
+ t.Errorf("%+v != %+v", x, expected)
+ }
+}
+
+func TestModAdd(t *testing.T) {
+ m := modulusFromBytes([]byte{13})
+ x := &Nat{[]uint{6}}
+ y := &Nat{[]uint{7}}
+ x.Add(y, m)
+ expected := &Nat{[]uint{0}}
+ if x.Equal(expected) != 1 {
+ t.Errorf("%+v != %+v", x, expected)
+ }
+ x.Add(y, m)
+ expected = &Nat{[]uint{7}}
+ if x.Equal(expected) != 1 {
+ t.Errorf("%+v != %+v", x, expected)
+ }
+}
+
+func TestExp(t *testing.T) {
+ m := modulusFromBytes([]byte{13})
+ x := &Nat{[]uint{3}}
+ out := &Nat{[]uint{0}}
+ out.Exp(x, []byte{12}, m)
+ expected := &Nat{[]uint{1}}
+ if out.Equal(expected) != 1 {
+ t.Errorf("%+v != %+v", out, expected)
+ }
+}
+
+func natBytes(n *Nat) []byte {
+ return n.Bytes(maxModulus(uint(len(n.limbs))))
+}
+
+func natFromBytes(b []byte) *Nat {
+ bb := new(big.Int).SetBytes(b)
+ return NewNat().setBig(bb)
+}
+
+func modulusFromBytes(b []byte) *Modulus {
+ bb := new(big.Int).SetBytes(b)
+ return NewModulusFromBig(bb)
+}
+
+// maxModulus returns the biggest modulus that can fit in n limbs.
+func maxModulus(n uint) *Modulus {
+ m := big.NewInt(1)
+ m.Lsh(m, n*_W)
+ m.Sub(m, big.NewInt(1))
+ return NewModulusFromBig(m)
+}
+
+func makeBenchmarkModulus() *Modulus {
+ return maxModulus(32)
+}
+
+func makeBenchmarkValue() *Nat {
+ x := make([]uint, 32)
+ for i := 0; i < 32; i++ {
+ x[i] = _MASK - 1
+ }
+ return &Nat{limbs: x}
+}
+
+func makeBenchmarkExponent() []byte {
+ e := make([]byte, 256)
+ for i := 0; i < 32; i++ {
+ e[i] = 0xFF
+ }
+ return e
+}
+
+func BenchmarkModAdd(b *testing.B) {
+ x := makeBenchmarkValue()
+ y := makeBenchmarkValue()
+ m := makeBenchmarkModulus()
+
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ x.Add(y, m)
+ }
+}
+
+func BenchmarkModSub(b *testing.B) {
+ x := makeBenchmarkValue()
+ y := makeBenchmarkValue()
+ m := makeBenchmarkModulus()
+
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ x.Sub(y, m)
+ }
+}
+
+func BenchmarkMontgomeryRepr(b *testing.B) {
+ x := makeBenchmarkValue()
+ m := makeBenchmarkModulus()
+
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ x.montgomeryRepresentation(m)
+ }
+}
+
+func BenchmarkMontgomeryMul(b *testing.B) {
+ x := makeBenchmarkValue()
+ y := makeBenchmarkValue()
+ out := makeBenchmarkValue()
+ m := makeBenchmarkModulus()
+
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ out.montgomeryMul(x, y, m)
+ }
+}
+
+func BenchmarkModMul(b *testing.B) {
+ x := makeBenchmarkValue()
+ y := makeBenchmarkValue()
+ m := makeBenchmarkModulus()
+
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ x.Mul(y, m)
+ }
+}
+
+func BenchmarkExpBig(b *testing.B) {
+ out := new(big.Int)
+ exponentBytes := makeBenchmarkExponent()
+ x := new(big.Int).SetBytes(exponentBytes)
+ e := new(big.Int).SetBytes(exponentBytes)
+ n := new(big.Int).SetBytes(exponentBytes)
+ one := new(big.Int).SetUint64(1)
+ n.Add(n, one)
+
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ out.Exp(x, e, n)
+ }
+}
+
+func BenchmarkExp(b *testing.B) {
+ x := makeBenchmarkValue()
+ e := makeBenchmarkExponent()
+ out := makeBenchmarkValue()
+ m := makeBenchmarkModulus()
+
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ out.Exp(x, e, m)
+ }
+}
diff --git a/src/crypto/internal/boring/Dockerfile b/src/crypto/internal/boring/Dockerfile
new file mode 100644
index 0000000..58eb028
--- /dev/null
+++ b/src/crypto/internal/boring/Dockerfile
@@ -0,0 +1,63 @@
+# Copyright 2020 The Go Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style
+# license that can be found in the LICENSE file.
+
+# Run this using build.sh.
+
+ARG ubuntu=ubuntu
+FROM $ubuntu:focal
+
+RUN mkdir /boring
+WORKDIR /boring
+
+ENV LANG=C
+ENV LANGUAGE=
+
+# Following NIST submission draft dated July 3, 2021.
+# This corresponds to boringssl.googlesource.com/boringssl tag fips-20210429.
+ENV ClangV=12
+RUN apt-get update && \
+ apt-get install --no-install-recommends -y cmake xz-utils wget unzip ca-certificates clang-$ClangV python
+
+# Download, validate, unpack, build, and install Ninja.
+ENV NinjaV=1.10.2
+ENV NinjaH=ce35865411f0490368a8fc383f29071de6690cbadc27704734978221f25e2bed
+RUN \
+ wget https://github.com/ninja-build/ninja/archive/refs/tags/v$NinjaV.tar.gz && \
+ echo "$NinjaH v$NinjaV.tar.gz" >sha && sha256sum -c sha && \
+ tar -xzf v$NinjaV.tar.gz && \
+ rm v$NinjaV.tar.gz && \
+ cd ninja-$NinjaV && \
+ CC=clang-$ClangV CXX=clang++-$ClangV ./configure.py --bootstrap && \
+ mv ninja /usr/local/bin/
+
+# Download, validate, unpack, and install Go.
+ARG GOARCH
+ENV GoV=1.16.5
+ENV GoHamd64=b12c23023b68de22f74c0524f10b753e7b08b1504cb7e417eccebdd3fae49061
+ENV GoHarm64=d5446b46ef6f36fdffa852f73dfbbe78c1ddf010b99fa4964944b9ae8b4d6799
+RUN \
+ eval GoH=\${GoH$GOARCH} && \
+ wget https://golang.org/dl/go$GoV.linux-$GOARCH.tar.gz && \
+ echo "$GoH go$GoV.linux-$GOARCH.tar.gz" >sha && sha256sum -c sha && \
+ tar -C /usr/local -xzf go$GoV.linux-$GOARCH.tar.gz && \
+ rm go$GoV.linux-$GOARCH.tar.gz && \
+ ln -s /usr/local/go/bin/go /usr/local/bin/
+
+# Download, validate, and unpack BoringCrypto.
+ENV BoringV=853ca1ea1168dff08011e5d42d94609cc0ca2e27
+ENV BoringH=a4d069ccef6f3c7bc0c68de82b91414f05cb817494cd1ab483dcf3368883c7c2
+RUN \
+ wget https://commondatastorage.googleapis.com/chromium-boringssl-fips/boringssl-$BoringV.tar.xz && \
+ echo "$BoringH boringssl-$BoringV.tar.xz" >sha && sha256sum -c sha && \
+ tar xJf boringssl-$BoringV.tar.xz
+
+# Build BoringCrypto.
+ADD build-boring.sh /boring/build-boring.sh
+RUN /boring/build-boring.sh
+
+# Build Go BoringCrypto syso.
+# build.sh copies it back out of the Docker image.
+ADD goboringcrypto.h /boring/godriver/goboringcrypto.h
+ADD build-goboring.sh /boring/build-goboring.sh
+RUN /boring/build-goboring.sh
diff --git a/src/crypto/internal/boring/LICENSE b/src/crypto/internal/boring/LICENSE
new file mode 100644
index 0000000..38990bd
--- /dev/null
+++ b/src/crypto/internal/boring/LICENSE
@@ -0,0 +1,202 @@
+The Go source code and supporting files in this directory
+are covered by the usual Go license (see ../../../../LICENSE).
+
+When building with GOEXPERIMENT=boringcrypto, the following applies.
+
+The goboringcrypto_linux_amd64.syso object file is built
+from BoringSSL source code by build/build.sh and is covered
+by the BoringSSL license reproduced below and also at
+https://boringssl.googlesource.com/boringssl/+/fips-20190808/LICENSE.
+
+BoringSSL is a fork of OpenSSL. As such, large parts of it fall under OpenSSL
+licensing. Files that are completely new have a Google copyright and an ISC
+license. This license is reproduced at the bottom of this file.
+
+Contributors to BoringSSL are required to follow the CLA rules for Chromium:
+https://cla.developers.google.com/clas
+
+Some files from Intel are under yet another license, which is also included
+underneath.
+
+The OpenSSL toolkit stays under a dual license, i.e. both the conditions of the
+OpenSSL License and the original SSLeay license apply to the toolkit. See below
+for the actual license texts. Actually both licenses are BSD-style Open Source
+licenses. In case of any license issues related to OpenSSL please contact
+openssl-core@openssl.org.
+
+The following are Google-internal bug numbers where explicit permission from
+some authors is recorded for use of their work. (This is purely for our own
+record keeping.)
+ 27287199
+ 27287880
+ 27287883
+
+ OpenSSL License
+ ---------------
+
+/* ====================================================================
+ * Copyright (c) 1998-2011 The OpenSSL Project. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * 3. All advertising materials mentioning features or use of this
+ * software must display the following acknowledgment:
+ * "This product includes software developed by the OpenSSL Project
+ * for use in the OpenSSL Toolkit. (http://www.openssl.org/)"
+ *
+ * 4. The names "OpenSSL Toolkit" and "OpenSSL Project" must not be used to
+ * endorse or promote products derived from this software without
+ * prior written permission. For written permission, please contact
+ * openssl-core@openssl.org.
+ *
+ * 5. Products derived from this software may not be called "OpenSSL"
+ * nor may "OpenSSL" appear in their names without prior written
+ * permission of the OpenSSL Project.
+ *
+ * 6. Redistributions of any form whatsoever must retain the following
+ * acknowledgment:
+ * "This product includes software developed by the OpenSSL Project
+ * for use in the OpenSSL Toolkit (http://www.openssl.org/)"
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE OpenSSL PROJECT ``AS IS'' AND ANY
+ * EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE OpenSSL PROJECT OR
+ * ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
+ * OF THE POSSIBILITY OF SUCH DAMAGE.
+ * ====================================================================
+ *
+ * This product includes cryptographic software written by Eric Young
+ * (eay@cryptsoft.com). This product includes software written by Tim
+ * Hudson (tjh@cryptsoft.com).
+ *
+ */
+
+ Original SSLeay License
+ -----------------------
+
+/* Copyright (C) 1995-1998 Eric Young (eay@cryptsoft.com)
+ * All rights reserved.
+ *
+ * This package is an SSL implementation written
+ * by Eric Young (eay@cryptsoft.com).
+ * The implementation was written so as to conform with Netscapes SSL.
+ *
+ * This library is free for commercial and non-commercial use as long as
+ * the following conditions are aheared to. The following conditions
+ * apply to all code found in this distribution, be it the RC4, RSA,
+ * lhash, DES, etc., code; not just the SSL code. The SSL documentation
+ * included with this distribution is covered by the same copyright terms
+ * except that the holder is Tim Hudson (tjh@cryptsoft.com).
+ *
+ * Copyright remains Eric Young's, and as such any Copyright notices in
+ * the code are not to be removed.
+ * If this package is used in a product, Eric Young should be given attribution
+ * as the author of the parts of the library used.
+ * This can be in the form of a textual message at program startup or
+ * in documentation (online or textual) provided with the package.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * "This product includes cryptographic software written by
+ * Eric Young (eay@cryptsoft.com)"
+ * The word 'cryptographic' can be left out if the rouines from the library
+ * being used are not cryptographic related :-).
+ * 4. If you include any Windows specific code (or a derivative thereof) from
+ * the apps directory (application code) you must include an acknowledgement:
+ * "This product includes software written by Tim Hudson (tjh@cryptsoft.com)"
+ *
+ * THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * The licence and distribution terms for any publically available version or
+ * derivative of this code cannot be changed. i.e. this code cannot simply be
+ * copied and put under another distribution licence
+ * [including the GNU Public Licence.]
+ */
+
+
+ISC license used for completely new code in BoringSSL:
+
+/* Copyright (c) 2015, Google Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */
+
+
+Some files from Intel carry the following license:
+
+# Copyright (c) 2012, Intel Corporation
+#
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the
+# distribution.
+#
+# * Neither the name of the Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+#
+# THIS SOFTWARE IS PROVIDED BY INTEL CORPORATION ""AS IS"" AND ANY
+# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL INTEL CORPORATION OR
+# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/src/crypto/internal/boring/README.md b/src/crypto/internal/boring/README.md
new file mode 100644
index 0000000..ec02786
--- /dev/null
+++ b/src/crypto/internal/boring/README.md
@@ -0,0 +1,39 @@
+We have been working inside Google on a fork of Go that uses
+BoringCrypto (the core of [BoringSSL](https://boringssl.googlesource.com/boringssl/))
+for various crypto primitives, in furtherance of some work related to FIPS 140.
+We have heard that some external users of Go would be
+interested in this code as well, so we have published this code
+here in the main Go repository behind the setting GOEXPERIMENT=boringcrypto.
+
+Use of GOEXPERIMENT=boringcrypto outside Google is _unsupported_.
+This mode is not part of the [Go 1 compatibility rules](https://go.dev/doc/go1compat),
+and it may change incompatibly or break in other ways at any time.
+
+To be clear, we are not making any statements or representations about
+the suitability of this code in relation to the FIPS 140 standard.
+Interested users will have to evaluate for themselves whether the code
+is useful for their own purposes.
+
+---
+
+This directory holds the core of the BoringCrypto implementation
+as well as the build scripts for the module itself: syso/*.syso.
+
+syso/goboringcrypto_linux_amd64.syso is built with:
+
+ GOARCH=amd64 ./build.sh
+
+syso/goboringcrypto_linux_arm64.syso is built with:
+
+ GOARCH=arm64 ./build.sh
+
+Both run on an x86 Debian Linux system using Docker.
+For the arm64 build to run on an x86 system, you need
+
+ apt-get install qemu-user-static qemu-binfmt-support
+
+to allow the x86 kernel to run arm64 binaries via QEMU.
+
+See build.sh for more details about the build.
+
+
diff --git a/src/crypto/internal/boring/aes.go b/src/crypto/internal/boring/aes.go
new file mode 100644
index 0000000..6fae1d5
--- /dev/null
+++ b/src/crypto/internal/boring/aes.go
@@ -0,0 +1,385 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build boringcrypto && linux && (amd64 || arm64) && !android && !cmd_go_bootstrap && !msan
+
+package boring
+
+/*
+
+#include "goboringcrypto.h"
+
+// These wrappers allocate out_len on the C stack, and check that it matches the expected
+// value, to avoid having to pass a pointer from Go, which would escape to the heap.
+
+int EVP_AEAD_CTX_seal_wrapper(const GO_EVP_AEAD_CTX *ctx, uint8_t *out,
+ size_t exp_out_len,
+ const uint8_t *nonce, size_t nonce_len,
+ const uint8_t *in, size_t in_len,
+ const uint8_t *ad, size_t ad_len) {
+ size_t out_len;
+ int ok = _goboringcrypto_EVP_AEAD_CTX_seal(ctx, out, &out_len, exp_out_len,
+ nonce, nonce_len, in, in_len, ad, ad_len);
+ if (out_len != exp_out_len) {
+ return 0;
+ }
+ return ok;
+};
+
+int EVP_AEAD_CTX_open_wrapper(const GO_EVP_AEAD_CTX *ctx, uint8_t *out,
+ size_t exp_out_len,
+ const uint8_t *nonce, size_t nonce_len,
+ const uint8_t *in, size_t in_len,
+ const uint8_t *ad, size_t ad_len) {
+ size_t out_len;
+ int ok = _goboringcrypto_EVP_AEAD_CTX_open(ctx, out, &out_len, exp_out_len,
+ nonce, nonce_len, in, in_len, ad, ad_len);
+ if (out_len != exp_out_len) {
+ return 0;
+ }
+ return ok;
+};
+
+*/
+import "C"
+import (
+ "bytes"
+ "crypto/cipher"
+ "errors"
+ "runtime"
+ "strconv"
+ "unsafe"
+)
+
+type aesKeySizeError int
+
+func (k aesKeySizeError) Error() string {
+ return "crypto/aes: invalid key size " + strconv.Itoa(int(k))
+}
+
+const aesBlockSize = 16
+
+type aesCipher struct {
+ key []byte
+ enc C.GO_AES_KEY
+ dec C.GO_AES_KEY
+}
+
+type extraModes interface {
+ // Copied out of crypto/aes/modes.go.
+ NewCBCEncrypter(iv []byte) cipher.BlockMode
+ NewCBCDecrypter(iv []byte) cipher.BlockMode
+ NewCTR(iv []byte) cipher.Stream
+ NewGCM(nonceSize, tagSize int) (cipher.AEAD, error)
+}
+
+var _ extraModes = (*aesCipher)(nil)
+
+func NewAESCipher(key []byte) (cipher.Block, error) {
+ c := &aesCipher{key: bytes.Clone(key)}
+ // Note: 0 is success, contradicting the usual BoringCrypto convention.
+ if C._goboringcrypto_AES_set_decrypt_key((*C.uint8_t)(unsafe.Pointer(&c.key[0])), C.uint(8*len(c.key)), &c.dec) != 0 ||
+ C._goboringcrypto_AES_set_encrypt_key((*C.uint8_t)(unsafe.Pointer(&c.key[0])), C.uint(8*len(c.key)), &c.enc) != 0 {
+ return nil, aesKeySizeError(len(key))
+ }
+ return c, nil
+}
+
+func (c *aesCipher) BlockSize() int { return aesBlockSize }
+
+func (c *aesCipher) Encrypt(dst, src []byte) {
+ if inexactOverlap(dst, src) {
+ panic("crypto/cipher: invalid buffer overlap")
+ }
+ if len(src) < aesBlockSize {
+ panic("crypto/aes: input not full block")
+ }
+ if len(dst) < aesBlockSize {
+ panic("crypto/aes: output not full block")
+ }
+ C._goboringcrypto_AES_encrypt(
+ (*C.uint8_t)(unsafe.Pointer(&src[0])),
+ (*C.uint8_t)(unsafe.Pointer(&dst[0])),
+ &c.enc)
+}
+
+func (c *aesCipher) Decrypt(dst, src []byte) {
+ if inexactOverlap(dst, src) {
+ panic("crypto/cipher: invalid buffer overlap")
+ }
+ if len(src) < aesBlockSize {
+ panic("crypto/aes: input not full block")
+ }
+ if len(dst) < aesBlockSize {
+ panic("crypto/aes: output not full block")
+ }
+ C._goboringcrypto_AES_decrypt(
+ (*C.uint8_t)(unsafe.Pointer(&src[0])),
+ (*C.uint8_t)(unsafe.Pointer(&dst[0])),
+ &c.dec)
+}
+
+type aesCBC struct {
+ key *C.GO_AES_KEY
+ mode C.int
+ iv [aesBlockSize]byte
+}
+
+func (x *aesCBC) BlockSize() int { return aesBlockSize }
+
+func (x *aesCBC) CryptBlocks(dst, src []byte) {
+ if inexactOverlap(dst, src) {
+ panic("crypto/cipher: invalid buffer overlap")
+ }
+ if len(src)%aesBlockSize != 0 {
+ panic("crypto/cipher: input not full blocks")
+ }
+ if len(dst) < len(src) {
+ panic("crypto/cipher: output smaller than input")
+ }
+ if len(src) > 0 {
+ C._goboringcrypto_AES_cbc_encrypt(
+ (*C.uint8_t)(unsafe.Pointer(&src[0])),
+ (*C.uint8_t)(unsafe.Pointer(&dst[0])),
+ C.size_t(len(src)), x.key,
+ (*C.uint8_t)(unsafe.Pointer(&x.iv[0])), x.mode)
+ }
+}
+
+func (x *aesCBC) SetIV(iv []byte) {
+ if len(iv) != aesBlockSize {
+ panic("cipher: incorrect length IV")
+ }
+ copy(x.iv[:], iv)
+}
+
+func (c *aesCipher) NewCBCEncrypter(iv []byte) cipher.BlockMode {
+ x := &aesCBC{key: &c.enc, mode: C.GO_AES_ENCRYPT}
+ copy(x.iv[:], iv)
+ return x
+}
+
+func (c *aesCipher) NewCBCDecrypter(iv []byte) cipher.BlockMode {
+ x := &aesCBC{key: &c.dec, mode: C.GO_AES_DECRYPT}
+ copy(x.iv[:], iv)
+ return x
+}
+
+type aesCTR struct {
+ key *C.GO_AES_KEY
+ iv [aesBlockSize]byte
+ num C.uint
+ ecount_buf [16]C.uint8_t
+}
+
+func (x *aesCTR) XORKeyStream(dst, src []byte) {
+ if inexactOverlap(dst, src) {
+ panic("crypto/cipher: invalid buffer overlap")
+ }
+ if len(dst) < len(src) {
+ panic("crypto/cipher: output smaller than input")
+ }
+ if len(src) == 0 {
+ return
+ }
+ C._goboringcrypto_AES_ctr128_encrypt(
+ (*C.uint8_t)(unsafe.Pointer(&src[0])),
+ (*C.uint8_t)(unsafe.Pointer(&dst[0])),
+ C.size_t(len(src)), x.key, (*C.uint8_t)(unsafe.Pointer(&x.iv[0])),
+ &x.ecount_buf[0], &x.num)
+}
+
+func (c *aesCipher) NewCTR(iv []byte) cipher.Stream {
+ x := &aesCTR{key: &c.enc}
+ copy(x.iv[:], iv)
+ return x
+}
+
+type aesGCM struct {
+ ctx C.GO_EVP_AEAD_CTX
+ aead *C.GO_EVP_AEAD
+}
+
+const (
+ gcmBlockSize = 16
+ gcmTagSize = 16
+ gcmStandardNonceSize = 12
+)
+
+type aesNonceSizeError int
+
+func (n aesNonceSizeError) Error() string {
+ return "crypto/aes: invalid GCM nonce size " + strconv.Itoa(int(n))
+}
+
+type noGCM struct {
+ cipher.Block
+}
+
+func (c *aesCipher) NewGCM(nonceSize, tagSize int) (cipher.AEAD, error) {
+ if nonceSize != gcmStandardNonceSize && tagSize != gcmTagSize {
+ return nil, errors.New("crypto/aes: GCM tag and nonce sizes can't be non-standard at the same time")
+ }
+ // Fall back to standard library for GCM with non-standard nonce or tag size.
+ if nonceSize != gcmStandardNonceSize {
+ return cipher.NewGCMWithNonceSize(&noGCM{c}, nonceSize)
+ }
+ if tagSize != gcmTagSize {
+ return cipher.NewGCMWithTagSize(&noGCM{c}, tagSize)
+ }
+ return c.newGCM(false)
+}
+
+func NewGCMTLS(c cipher.Block) (cipher.AEAD, error) {
+ return c.(*aesCipher).newGCM(true)
+}
+
+func (c *aesCipher) newGCM(tls bool) (cipher.AEAD, error) {
+ var aead *C.GO_EVP_AEAD
+ switch len(c.key) * 8 {
+ case 128:
+ if tls {
+ aead = C._goboringcrypto_EVP_aead_aes_128_gcm_tls12()
+ } else {
+ aead = C._goboringcrypto_EVP_aead_aes_128_gcm()
+ }
+ case 256:
+ if tls {
+ aead = C._goboringcrypto_EVP_aead_aes_256_gcm_tls12()
+ } else {
+ aead = C._goboringcrypto_EVP_aead_aes_256_gcm()
+ }
+ default:
+ // Fall back to standard library for GCM with non-standard key size.
+ return cipher.NewGCMWithNonceSize(&noGCM{c}, gcmStandardNonceSize)
+ }
+
+ g := &aesGCM{aead: aead}
+ if C._goboringcrypto_EVP_AEAD_CTX_init(&g.ctx, aead, (*C.uint8_t)(unsafe.Pointer(&c.key[0])), C.size_t(len(c.key)), C.GO_EVP_AEAD_DEFAULT_TAG_LENGTH, nil) == 0 {
+ return nil, fail("EVP_AEAD_CTX_init")
+ }
+ // Note: Because of the finalizer, any time g.ctx is passed to cgo,
+ // that call must be followed by a call to runtime.KeepAlive(g),
+ // to make sure g is not collected (and finalized) before the cgo
+ // call returns.
+ runtime.SetFinalizer(g, (*aesGCM).finalize)
+ if g.NonceSize() != gcmStandardNonceSize {
+ panic("boringcrypto: internal confusion about nonce size")
+ }
+ if g.Overhead() != gcmTagSize {
+ panic("boringcrypto: internal confusion about tag size")
+ }
+
+ return g, nil
+}
+
+func (g *aesGCM) finalize() {
+ C._goboringcrypto_EVP_AEAD_CTX_cleanup(&g.ctx)
+}
+
+func (g *aesGCM) NonceSize() int {
+ return int(C._goboringcrypto_EVP_AEAD_nonce_length(g.aead))
+}
+
+func (g *aesGCM) Overhead() int {
+ return int(C._goboringcrypto_EVP_AEAD_max_overhead(g.aead))
+}
+
+// base returns the address of the underlying array in b,
+// being careful not to panic when b has zero length.
+func base(b []byte) *C.uint8_t {
+ if len(b) == 0 {
+ return nil
+ }
+ return (*C.uint8_t)(unsafe.Pointer(&b[0]))
+}
+
+func (g *aesGCM) Seal(dst, nonce, plaintext, additionalData []byte) []byte {
+ if len(nonce) != gcmStandardNonceSize {
+ panic("cipher: incorrect nonce length given to GCM")
+ }
+ if uint64(len(plaintext)) > ((1<<32)-2)*aesBlockSize || len(plaintext)+gcmTagSize < len(plaintext) {
+ panic("cipher: message too large for GCM")
+ }
+ if len(dst)+len(plaintext)+gcmTagSize < len(dst) {
+ panic("cipher: message too large for buffer")
+ }
+
+ // Make room in dst to append plaintext+overhead.
+ n := len(dst)
+ for cap(dst) < n+len(plaintext)+gcmTagSize {
+ dst = append(dst[:cap(dst)], 0)
+ }
+ dst = dst[:n+len(plaintext)+gcmTagSize]
+
+ // Check delayed until now to make sure len(dst) is accurate.
+ if inexactOverlap(dst[n:], plaintext) {
+ panic("cipher: invalid buffer overlap")
+ }
+
+ outLen := C.size_t(len(plaintext) + gcmTagSize)
+ ok := C.EVP_AEAD_CTX_seal_wrapper(
+ &g.ctx,
+ (*C.uint8_t)(unsafe.Pointer(&dst[n])), outLen,
+ base(nonce), C.size_t(len(nonce)),
+ base(plaintext), C.size_t(len(plaintext)),
+ base(additionalData), C.size_t(len(additionalData)))
+ runtime.KeepAlive(g)
+ if ok == 0 {
+ panic(fail("EVP_AEAD_CTX_seal"))
+ }
+ return dst[:n+int(outLen)]
+}
+
+var errOpen = errors.New("cipher: message authentication failed")
+
+func (g *aesGCM) Open(dst, nonce, ciphertext, additionalData []byte) ([]byte, error) {
+ if len(nonce) != gcmStandardNonceSize {
+ panic("cipher: incorrect nonce length given to GCM")
+ }
+ if len(ciphertext) < gcmTagSize {
+ return nil, errOpen
+ }
+ if uint64(len(ciphertext)) > ((1<<32)-2)*aesBlockSize+gcmTagSize {
+ return nil, errOpen
+ }
+
+ // Make room in dst to append ciphertext without tag.
+ n := len(dst)
+ for cap(dst) < n+len(ciphertext)-gcmTagSize {
+ dst = append(dst[:cap(dst)], 0)
+ }
+ dst = dst[:n+len(ciphertext)-gcmTagSize]
+
+ // Check delayed until now to make sure len(dst) is accurate.
+ if inexactOverlap(dst[n:], ciphertext) {
+ panic("cipher: invalid buffer overlap")
+ }
+
+ outLen := C.size_t(len(ciphertext) - gcmTagSize)
+ ok := C.EVP_AEAD_CTX_open_wrapper(
+ &g.ctx,
+ base(dst[n:]), outLen,
+ base(nonce), C.size_t(len(nonce)),
+ base(ciphertext), C.size_t(len(ciphertext)),
+ base(additionalData), C.size_t(len(additionalData)))
+ runtime.KeepAlive(g)
+ if ok == 0 {
+ return nil, errOpen
+ }
+ return dst[:n+int(outLen)], nil
+}
+
+func anyOverlap(x, y []byte) bool {
+ return len(x) > 0 && len(y) > 0 &&
+ uintptr(unsafe.Pointer(&x[0])) <= uintptr(unsafe.Pointer(&y[len(y)-1])) &&
+ uintptr(unsafe.Pointer(&y[0])) <= uintptr(unsafe.Pointer(&x[len(x)-1]))
+}
+
+func inexactOverlap(x, y []byte) bool {
+ if len(x) == 0 || len(y) == 0 || &x[0] == &y[0] {
+ return false
+ }
+ return anyOverlap(x, y)
+}
diff --git a/src/crypto/internal/boring/bbig/big.go b/src/crypto/internal/boring/bbig/big.go
new file mode 100644
index 0000000..5ce4697
--- /dev/null
+++ b/src/crypto/internal/boring/bbig/big.go
@@ -0,0 +1,33 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package bbig
+
+import (
+ "crypto/internal/boring"
+ "math/big"
+ "unsafe"
+)
+
+func Enc(b *big.Int) boring.BigInt {
+ if b == nil {
+ return nil
+ }
+ x := b.Bits()
+ if len(x) == 0 {
+ return boring.BigInt{}
+ }
+ return unsafe.Slice((*uint)(&x[0]), len(x))
+}
+
+func Dec(b boring.BigInt) *big.Int {
+ if b == nil {
+ return nil
+ }
+ if len(b) == 0 {
+ return new(big.Int)
+ }
+ x := unsafe.Slice((*big.Word)(&b[0]), len(b))
+ return new(big.Int).SetBits(x)
+}
diff --git a/src/crypto/internal/boring/bcache/cache.go b/src/crypto/internal/boring/bcache/cache.go
new file mode 100644
index 0000000..7934d03
--- /dev/null
+++ b/src/crypto/internal/boring/bcache/cache.go
@@ -0,0 +1,140 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package bcache implements a GC-friendly cache (see [Cache]) for BoringCrypto.
+package bcache
+
+import (
+ "sync/atomic"
+ "unsafe"
+)
+
+// A Cache is a GC-friendly concurrent map from unsafe.Pointer to
+// unsafe.Pointer. It is meant to be used for maintaining shadow
+// BoringCrypto state associated with certain allocated structs, in
+// particular public and private RSA and ECDSA keys.
+//
+// The cache is GC-friendly in the sense that the keys do not
+// indefinitely prevent the garbage collector from collecting them.
+// Instead, at the start of each GC, the cache is cleared entirely. That
+// is, the cache is lossy, and the loss happens at the start of each GC.
+// This means that clients need to be able to cope with cache entries
+// disappearing, but it also means that clients don't need to worry about
+// cache entries keeping the keys from being collected.
+type Cache[K, V any] struct {
+ // The runtime atomically stores nil to ptable at the start of each GC.
+ ptable atomic.Pointer[cacheTable[K, V]]
+}
+
+type cacheTable[K, V any] [cacheSize]atomic.Pointer[cacheEntry[K, V]]
+
+// A cacheEntry is a single entry in the linked list for a given hash table entry.
+type cacheEntry[K, V any] struct {
+ k *K // immutable once created
+ v atomic.Pointer[V] // read and written atomically to allow updates
+ next *cacheEntry[K, V] // immutable once linked into table
+}
+
+func registerCache(unsafe.Pointer) // provided by runtime
+
+// Register registers the cache with the runtime,
+// so that c.ptable can be cleared at the start of each GC.
+// Register must be called during package initialization.
+func (c *Cache[K, V]) Register() {
+ registerCache(unsafe.Pointer(&c.ptable))
+}
+
+// cacheSize is the number of entries in the hash table.
+// The hash is the pointer value mod cacheSize, a prime.
+// Collisions are resolved by maintaining a linked list in each hash slot.
+const cacheSize = 1021
+
+// table returns a pointer to the current cache hash table,
+// coping with the possibility of the GC clearing it out from under us.
+func (c *Cache[K, V]) table() *cacheTable[K, V] {
+ for {
+ p := c.ptable.Load()
+ if p == nil {
+ p = new(cacheTable[K, V])
+ if !c.ptable.CompareAndSwap(nil, p) {
+ continue
+ }
+ }
+ return p
+ }
+}
+
+// Clear clears the cache.
+// The runtime does this automatically at each garbage collection;
+// this method is exposed only for testing.
+func (c *Cache[K, V]) Clear() {
+ // The runtime does this at the start of every garbage collection
+ // (itself, not by calling this function).
+ c.ptable.Store(nil)
+}
+
+// Get returns the cached value associated with v,
+// which is either the value v corresponding to the most recent call to Put(k, v)
+// or nil if that cache entry has been dropped.
+func (c *Cache[K, V]) Get(k *K) *V {
+ head := &c.table()[uintptr(unsafe.Pointer(k))%cacheSize]
+ e := head.Load()
+ for ; e != nil; e = e.next {
+ if e.k == k {
+ return e.v.Load()
+ }
+ }
+ return nil
+}
+
+// Put sets the cached value associated with k to v.
+func (c *Cache[K, V]) Put(k *K, v *V) {
+ head := &c.table()[uintptr(unsafe.Pointer(k))%cacheSize]
+
+ // Strategy is to walk the linked list at head,
+ // same as in Get, to look for existing entry.
+ // If we find one, we update v atomically in place.
+ // If not, then we race to replace the start = *head
+ // we observed with a new k, v entry.
+ // If we win that race, we're done.
+ // Otherwise, we try the whole thing again,
+ // with two optimizations:
+ //
+ // 1. We track in noK the start of the section of
+ // the list that we've confirmed has no entry for k.
+ // The next time down the list, we can stop at noK,
+ // because new entries are inserted at the front of the list.
+ // This guarantees we never traverse an entry
+ // multiple times.
+ //
+ // 2. We only allocate the entry to be added once,
+ // saving it in add for the next attempt.
+ var add, noK *cacheEntry[K, V]
+ n := 0
+ for {
+ e := head.Load()
+ start := e
+ for ; e != nil && e != noK; e = e.next {
+ if e.k == k {
+ e.v.Store(v)
+ return
+ }
+ n++
+ }
+ if add == nil {
+ add = &cacheEntry[K, V]{k: k}
+ add.v.Store(v)
+ }
+ add.next = start
+ if n >= 1000 {
+ // If an individual list gets too long, which shouldn't happen,
+ // throw it away to avoid quadratic lookup behavior.
+ add.next = nil
+ }
+ if head.CompareAndSwap(start, add) {
+ return
+ }
+ noK = start
+ }
+}
diff --git a/src/crypto/internal/boring/bcache/cache_test.go b/src/crypto/internal/boring/bcache/cache_test.go
new file mode 100644
index 0000000..19458a1
--- /dev/null
+++ b/src/crypto/internal/boring/bcache/cache_test.go
@@ -0,0 +1,122 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package bcache
+
+import (
+ "fmt"
+ "runtime"
+ "sync"
+ "sync/atomic"
+ "testing"
+)
+
+var registeredCache Cache[int, int32]
+
+func init() {
+ registeredCache.Register()
+}
+
+var seq atomic.Uint32
+
+func next[T int | int32]() *T {
+ x := new(T)
+ *x = T(seq.Add(1))
+ return x
+}
+
+func str[T int | int32](x *T) string {
+ if x == nil {
+ return "nil"
+ }
+ return fmt.Sprint(*x)
+}
+
+func TestCache(t *testing.T) {
+ // Use unregistered cache for functionality tests,
+ // to keep the runtime from clearing behind our backs.
+ c := new(Cache[int, int32])
+
+ // Create many entries.
+ m := make(map[*int]*int32)
+ for i := 0; i < 10000; i++ {
+ k := next[int]()
+ v := next[int32]()
+ m[k] = v
+ c.Put(k, v)
+ }
+
+ // Overwrite a random 20% of those.
+ n := 0
+ for k := range m {
+ v := next[int32]()
+ m[k] = v
+ c.Put(k, v)
+ if n++; n >= 2000 {
+ break
+ }
+ }
+
+ // Check results.
+ for k, v := range m {
+ if cv := c.Get(k); cv != v {
+ t.Fatalf("c.Get(%v) = %v, want %v", str(k), str(cv), str(v))
+ }
+ }
+
+ c.Clear()
+ for k := range m {
+ if cv := c.Get(k); cv != nil {
+ t.Fatalf("after GC, c.Get(%v) = %v, want nil", str(k), str(cv))
+ }
+ }
+
+ // Check that registered cache is cleared at GC.
+ c = &registeredCache
+ for k, v := range m {
+ c.Put(k, v)
+ }
+ runtime.GC()
+ for k := range m {
+ if cv := c.Get(k); cv != nil {
+ t.Fatalf("after Clear, c.Get(%v) = %v, want nil", str(k), str(cv))
+ }
+ }
+
+ // Check that cache works for concurrent access.
+ // Lists are discarded if they reach 1000 entries,
+ // and there are cacheSize list heads, so we should be
+ // able to do 100 * cacheSize entries with no problem at all.
+ c = new(Cache[int, int32])
+ var barrier, wg sync.WaitGroup
+ const N = 100
+ barrier.Add(N)
+ wg.Add(N)
+ var lost int32
+ for i := 0; i < N; i++ {
+ go func() {
+ defer wg.Done()
+
+ m := make(map[*int]*int32)
+ for j := 0; j < cacheSize; j++ {
+ k, v := next[int](), next[int32]()
+ m[k] = v
+ c.Put(k, v)
+ }
+ barrier.Done()
+ barrier.Wait()
+
+ for k, v := range m {
+ if cv := c.Get(k); cv != v {
+ t.Errorf("c.Get(%v) = %v, want %v", str(k), str(cv), str(v))
+ atomic.AddInt32(&lost, +1)
+ }
+ }
+ }()
+ }
+ wg.Wait()
+ if lost != 0 {
+ t.Errorf("lost %d entries", lost)
+ }
+}
diff --git a/src/crypto/internal/boring/bcache/stub.s b/src/crypto/internal/boring/bcache/stub.s
new file mode 100644
index 0000000..59f2dee
--- /dev/null
+++ b/src/crypto/internal/boring/bcache/stub.s
@@ -0,0 +1,6 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file is here to silence an error about registerCache not having a body.
+// (The body is provided by package runtime.)
diff --git a/src/crypto/internal/boring/boring.go b/src/crypto/internal/boring/boring.go
new file mode 100644
index 0000000..102380a
--- /dev/null
+++ b/src/crypto/internal/boring/boring.go
@@ -0,0 +1,126 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build boringcrypto && linux && (amd64 || arm64) && !android && !cmd_go_bootstrap && !msan
+
+package boring
+
+/*
+// goboringcrypto_linux_amd64.syso references pthread functions.
+#cgo LDFLAGS: "-pthread"
+
+#include "goboringcrypto.h"
+*/
+import "C"
+import (
+ "crypto/internal/boring/sig"
+ _ "crypto/internal/boring/syso"
+ "math/bits"
+ "unsafe"
+)
+
+const available = true
+
+func init() {
+ C._goboringcrypto_BORINGSSL_bcm_power_on_self_test()
+ if C._goboringcrypto_FIPS_mode() != 1 {
+ panic("boringcrypto: not in FIPS mode")
+ }
+ sig.BoringCrypto()
+}
+
+// Unreachable marks code that should be unreachable
+// when BoringCrypto is in use. It panics.
+func Unreachable() {
+ panic("boringcrypto: invalid code execution")
+}
+
+// provided by runtime to avoid os import.
+func runtime_arg0() string
+
+func hasSuffix(s, t string) bool {
+ return len(s) > len(t) && s[len(s)-len(t):] == t
+}
+
+// UnreachableExceptTests marks code that should be unreachable
+// when BoringCrypto is in use. It panics.
+func UnreachableExceptTests() {
+ name := runtime_arg0()
+ // If BoringCrypto ran on Windows we'd need to allow _test.exe and .test.exe as well.
+ if !hasSuffix(name, "_test") && !hasSuffix(name, ".test") {
+ println("boringcrypto: unexpected code execution in", name)
+ panic("boringcrypto: invalid code execution")
+ }
+}
+
+type fail string
+
+func (e fail) Error() string { return "boringcrypto: " + string(e) + " failed" }
+
+func wbase(b BigInt) *C.uint8_t {
+ if len(b) == 0 {
+ return nil
+ }
+ return (*C.uint8_t)(unsafe.Pointer(&b[0]))
+}
+
+const wordBytes = bits.UintSize / 8
+
+func bigToBN(x BigInt) *C.GO_BIGNUM {
+ return C._goboringcrypto_BN_le2bn(wbase(x), C.size_t(len(x)*wordBytes), nil)
+}
+
+func bytesToBN(x []byte) *C.GO_BIGNUM {
+ return C._goboringcrypto_BN_bin2bn((*C.uint8_t)(&x[0]), C.size_t(len(x)), nil)
+}
+
+func bnToBig(bn *C.GO_BIGNUM) BigInt {
+ x := make(BigInt, (C._goboringcrypto_BN_num_bytes(bn)+wordBytes-1)/wordBytes)
+ if C._goboringcrypto_BN_bn2le_padded(wbase(x), C.size_t(len(x)*wordBytes), bn) == 0 {
+ panic("boringcrypto: bignum conversion failed")
+ }
+ return x
+}
+
+func bigToBn(bnp **C.GO_BIGNUM, b BigInt) bool {
+ if *bnp != nil {
+ C._goboringcrypto_BN_free(*bnp)
+ *bnp = nil
+ }
+ if b == nil {
+ return true
+ }
+ bn := bigToBN(b)
+ if bn == nil {
+ return false
+ }
+ *bnp = bn
+ return true
+}
+
+// noescape hides a pointer from escape analysis. noescape is
+// the identity function but escape analysis doesn't think the
+// output depends on the input. noescape is inlined and currently
+// compiles down to zero instructions.
+// USE CAREFULLY!
+//
+//go:nosplit
+func noescape(p unsafe.Pointer) unsafe.Pointer {
+ x := uintptr(p)
+ return unsafe.Pointer(x ^ 0)
+}
+
+var zero byte
+
+// addr converts p to its base addr, including a noescape along the way.
+// If p is nil, addr returns a non-nil pointer, so that the result can always
+// be dereferenced.
+//
+//go:nosplit
+func addr(p []byte) *byte {
+ if len(p) == 0 {
+ return &zero
+ }
+ return (*byte)(noescape(unsafe.Pointer(&p[0])))
+}
diff --git a/src/crypto/internal/boring/boring_test.go b/src/crypto/internal/boring/boring_test.go
new file mode 100644
index 0000000..83bbbd3
--- /dev/null
+++ b/src/crypto/internal/boring/boring_test.go
@@ -0,0 +1,34 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Most functionality in this package is tested by replacing existing code
+// and inheriting that code's tests.
+
+package boring
+
+import "testing"
+
+// Test that func init does not panic.
+func TestInit(t *testing.T) {}
+
+// Test that Unreachable panics.
+func TestUnreachable(t *testing.T) {
+ defer func() {
+ if Enabled {
+ if err := recover(); err == nil {
+ t.Fatal("expected Unreachable to panic")
+ }
+ } else {
+ if err := recover(); err != nil {
+ t.Fatalf("expected Unreachable to be a no-op")
+ }
+ }
+ }()
+ Unreachable()
+}
+
+// Test that UnreachableExceptTests does not panic (this is a test).
+func TestUnreachableExceptTests(t *testing.T) {
+ UnreachableExceptTests()
+}
diff --git a/src/crypto/internal/boring/build-boring.sh b/src/crypto/internal/boring/build-boring.sh
new file mode 100755
index 0000000..db49852
--- /dev/null
+++ b/src/crypto/internal/boring/build-boring.sh
@@ -0,0 +1,44 @@
+#!/bin/bash
+# Copyright 2020 The Go Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style
+# license that can be found in the LICENSE file.
+
+# Do not run directly; run build.sh, which runs this in Docker.
+# This script builds boringssl, which has already been unpacked in /boring/boringssl.
+
+set -e
+id
+date
+cd /boring
+
+# Go requires -fPIC for linux/amd64 cgo builds.
+# Setting -fPIC only affects the compilation of the non-module code in libcrypto.a,
+# because the FIPS module itself is already built with -fPIC.
+echo '#!/bin/bash
+exec clang-'$ClangV' -DGOBORING -fPIC "$@"
+' >/usr/local/bin/clang
+echo '#!/bin/bash
+exec clang++-'$ClangV' -DGOBORING -fPIC "$@"
+' >/usr/local/bin/clang++
+chmod +x /usr/local/bin/clang /usr/local/bin/clang++
+
+# The BoringSSL tests use Go, and cgo would look for gcc.
+export CGO_ENABLED=0
+
+# Modify the support code crypto/mem.c (outside the FIPS module)
+# to not try to use weak symbols, because they don't work with some
+# Go toolchain / clang toolchain combinations.
+perl -p -i -e 's/defined.*ELF.*defined.*GNUC.*/$0 \&\& !defined(GOBORING)/' boringssl/crypto/mem.c
+
+# Verbatim instructions from BoringCrypto build docs.
+printf "set(CMAKE_C_COMPILER \"clang\")\nset(CMAKE_CXX_COMPILER \"clang++\")\n" >${HOME}/toolchain
+cd boringssl
+mkdir build && cd build && cmake -GNinja -DCMAKE_TOOLCHAIN_FILE=${HOME}/toolchain -DFIPS=1 -DCMAKE_BUILD_TYPE=Release ..
+ninja
+./crypto/crypto_test
+cd ../..
+
+if [ "$(./boringssl/build/tool/bssl isfips)" != 1 ]; then
+ echo "NOT FIPS"
+ exit 2
+fi
diff --git a/src/crypto/internal/boring/build-goboring.sh b/src/crypto/internal/boring/build-goboring.sh
new file mode 100755
index 0000000..4938b5e
--- /dev/null
+++ b/src/crypto/internal/boring/build-goboring.sh
@@ -0,0 +1,237 @@
+#!/bin/bash
+# Copyright 2020 The Go Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style
+# license that can be found in the LICENSE file.
+
+# Do not run directly; run build.sh, which runs this in Docker.
+# This script builds goboringcrypto's syso, after boringssl has been built.
+
+export TERM=dumb
+
+set -e
+set -x
+id
+date
+export LANG=C
+unset LANGUAGE
+
+case $(uname -m) in
+x86_64) export GOARCH=amd64 ;;
+aarch64) export GOARCH=arm64 ;;
+*)
+ echo 'unknown uname -m:' $(uname -m) >&2
+ exit 2
+esac
+
+export CGO_ENABLED=0
+
+# Build and run test C++ program to make sure goboringcrypto.h matches openssl/*.h.
+# Also collect list of checked symbols in syms.txt
+set -e
+cd /boring/godriver
+cat >goboringcrypto.cc <<'EOF'
+#include <cassert>
+#include "goboringcrypto0.h"
+#include "goboringcrypto1.h"
+#define check_size(t) if(sizeof(t) != sizeof(GO_ ## t)) {printf("sizeof(" #t ")=%d, but sizeof(GO_" #t ")=%d\n", (int)sizeof(t), (int)sizeof(GO_ ## t)); ret=1;}
+#define check_func(f) { auto x = f; x = _goboringcrypto_ ## f ; }
+#define check_value(n, v) if(n != v) {printf(#n "=%d, but goboringcrypto.h defines it as %d\n", (int)n, (int)v); ret=1;}
+int main() {
+int ret = 0;
+#include "goboringcrypto.x"
+return ret;
+}
+EOF
+
+cat >boringx.awk <<'EOF'
+BEGIN {
+ exitcode = 0
+}
+
+# Ignore comments, #includes, blank lines.
+/^\/\// || /^#/ || NF == 0 { next }
+
+# Ignore unchecked declarations.
+/\/\*unchecked/ { next }
+
+# Check enum values.
+!enum && ($1 == "enum" || $2 == "enum") && $NF == "{" {
+ enum = 1
+ next
+}
+enum && $1 == "};" {
+ enum = 0
+ next
+}
+enum && /^}.*;$/ {
+ enum = 0
+ next
+}
+enum && NF == 3 && $2 == "=" {
+ name = $1
+ sub(/^GO_/, "", name)
+ val = $3
+ sub(/,$/, "", val)
+ print "check_value(" name ", " val ")" > "goboringcrypto.x"
+ next
+}
+enum {
+ print FILENAME ":" NR ": unexpected line in enum: " $0 > "/dev/stderr"
+ exitcode = 1
+ next
+}
+
+# Check struct sizes.
+/^typedef struct / && $NF ~ /^GO_/ {
+ name = $NF
+ sub(/^GO_/, "", name)
+ sub(/;$/, "", name)
+ print "check_size(" name ")" > "goboringcrypto.x"
+ next
+}
+
+# Check function prototypes.
+/^(const )?[^ ]+ \**_goboringcrypto_.*\(/ {
+ name = $2
+ if($1 == "const")
+ name = $3
+ sub(/^\**_goboringcrypto_/, "", name)
+ sub(/\(.*/, "", name)
+ print "check_func(" name ")" > "goboringcrypto.x"
+ print name > "syms.txt"
+ next
+}
+
+{
+ print FILENAME ":" NR ": unexpected line: " $0 > "/dev/stderr"
+ exitcode = 1
+}
+
+END {
+ exit exitcode
+}
+EOF
+
+cat >boringh.awk <<'EOF'
+/^\/\/ #include/ {sub(/\/\//, ""); print > "goboringcrypto0.h"; next}
+/typedef struct|enum ([a-z_]+ )?{|^[ \t]/ {print >"goboringcrypto1.h";next}
+{gsub(/GO_/, ""); gsub(/enum go_/, "enum "); gsub(/go_point_conv/, "point_conv"); print >"goboringcrypto1.h"}
+EOF
+
+awk -f boringx.awk goboringcrypto.h # writes goboringcrypto.x
+awk -f boringh.awk goboringcrypto.h # writes goboringcrypto[01].h
+
+ls -l ../boringssl/include
+clang++ -std=c++11 -fPIC -I../boringssl/include -O2 -o a.out goboringcrypto.cc
+./a.out || exit 2
+
+# clang implements u128 % u128 -> u128 by calling __umodti3,
+# which is in libgcc. To make the result self-contained even if linking
+# against a different compiler version, link our own __umodti3 into the syso.
+# This one is specialized so it only expects divisors below 2^64,
+# which is all BoringCrypto uses. (Otherwise it will seg fault.)
+cat >umod-amd64.s <<'EOF'
+# tu_int __umodti3(tu_int x, tu_int y)
+# x is rsi:rdi, y is rcx:rdx, return result is rdx:rax.
+.globl __umodti3
+__umodti3:
+ # specialized to u128 % u64, so verify that
+ test %rcx,%rcx
+ jne 1f
+
+ # save divisor
+ movq %rdx, %r8
+
+ # reduce top 64 bits mod divisor
+ movq %rsi, %rax
+ xorl %edx, %edx
+ divq %r8
+
+ # reduce full 128-bit mod divisor
+ # quotient fits in 64 bits because top 64 bits have been reduced < divisor.
+ # (even though we only care about the remainder, divq also computes
+ # the quotient, and it will trap if the quotient is too large.)
+ movq %rdi, %rax
+ divq %r8
+
+ # expand remainder to 128 for return
+ movq %rdx, %rax
+ xorl %edx, %edx
+ ret
+
+1:
+ # crash - only want 64-bit divisor
+ xorl %ecx, %ecx
+ movl %ecx, 0(%ecx)
+ jmp 1b
+
+.section .note.GNU-stack,"",@progbits
+EOF
+
+cat >umod-arm64.c <<'EOF'
+typedef unsigned int u128 __attribute__((mode(TI)));
+
+static u128 div(u128 x, u128 y, u128 *rp) {
+ int n = 0;
+ while((y>>(128-1)) != 1 && y < x) {
+ y<<=1;
+ n++;
+ }
+ u128 q = 0;
+ for(;; n--, y>>=1, q<<=1) {
+ if(x>=y) {
+ x -= y;
+ q |= 1;
+ }
+ if(n == 0)
+ break;
+ }
+ if(rp)
+ *rp = x;
+ return q;
+}
+
+u128 __umodti3(u128 x, u128 y) {
+ u128 r;
+ div(x, y, &r);
+ return r;
+}
+
+u128 __udivti3(u128 x, u128 y) {
+ return div(x, y, 0);
+}
+EOF
+
+extra=""
+case $GOARCH in
+amd64)
+ cp umod-amd64.s umod.s
+ clang -c -o umod.o umod.s
+ extra=umod.o
+ ;;
+arm64)
+ cp umod-arm64.c umod.c
+ clang -c -o umod.o umod.c
+ extra=umod.o
+ ;;
+esac
+
+# Prepare copy of libcrypto.a with only the checked functions renamed and exported.
+# All other symbols are left alone and hidden.
+echo BORINGSSL_bcm_power_on_self_test >>syms.txt
+awk '{print "_goboringcrypto_" $0 }' syms.txt >globals.txt
+awk '{print $0 " _goboringcrypto_" $0 }' syms.txt >renames.txt
+objcopy --globalize-symbol=BORINGSSL_bcm_power_on_self_test \
+ ../boringssl/build/crypto/libcrypto.a libcrypto.a
+
+# Link together bcm.o and libcrypto.a into a single object.
+ld -r -nostdlib --whole-archive -o goboringcrypto.o libcrypto.a $extra
+
+echo __umodti3 _goboringcrypto___umodti3 >>renames.txt
+echo __udivti3 _goboringcrypto___udivti3 >>renames.txt
+objcopy --remove-section=.llvm_addrsig goboringcrypto.o goboringcrypto1.o # b/179161016
+objcopy --redefine-syms=renames.txt goboringcrypto1.o goboringcrypto2.o
+objcopy --keep-global-symbols=globals.txt --strip-unneeded goboringcrypto2.o goboringcrypto_linux_$GOARCH.syso
+
+# Done!
+ls -l goboringcrypto_linux_$GOARCH.syso
diff --git a/src/crypto/internal/boring/build.sh b/src/crypto/internal/boring/build.sh
new file mode 100755
index 0000000..ec960d7
--- /dev/null
+++ b/src/crypto/internal/boring/build.sh
@@ -0,0 +1,46 @@
+#!/bin/bash
+# Copyright 2022 The Go Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style
+# license that can be found in the LICENSE file.
+
+# This shell script uses Docker to run build-boring.sh and build-goboring.sh,
+# which build goboringcrypto_linux_$GOARCH.syso according to the Security Policy.
+# Currently, amd64 and arm64 are permitted.
+
+set -e
+set -o pipefail
+
+GOARCH=${GOARCH:-$(go env GOARCH)}
+echo "# Building goboringcrypto_linux_$GOARCH.syso. Set GOARCH to override." >&2
+
+if ! which docker >/dev/null; then
+ echo "# Docker not found. Inside Google, see go/installdocker." >&2
+ exit 1
+fi
+
+platform=""
+buildargs=""
+case "$GOARCH" in
+amd64)
+ ;;
+arm64)
+ if ! docker run --rm -t arm64v8/ubuntu:focal uname -m >/dev/null 2>&1; then
+ echo "# Docker cannot run arm64 binaries. Try:"
+ echo " sudo apt-get install qemu binfmt-support qemu-user-static"
+ echo " docker run --rm --privileged multiarch/qemu-user-static --reset -p yes"
+ echo " docker run --rm -t arm64v8/ubuntu:focal uname -m"
+ exit 1
+ fi
+ platform="--platform linux/arm64/v8"
+ buildargs="--build-arg ubuntu=arm64v8/ubuntu"
+ ;;
+*)
+ echo unknown GOARCH $GOARCH >&2
+ exit 2
+esac
+
+docker build $platform $buildargs --build-arg GOARCH=$GOARCH -t goboring:$GOARCH .
+id=$(docker create $platform goboring:$GOARCH)
+docker cp $id:/boring/godriver/goboringcrypto_linux_$GOARCH.syso ./syso
+docker rm $id
+ls -l ./syso/goboringcrypto_linux_$GOARCH.syso
diff --git a/src/crypto/internal/boring/div_test.c b/src/crypto/internal/boring/div_test.c
new file mode 100644
index 0000000..f909cc9
--- /dev/null
+++ b/src/crypto/internal/boring/div_test.c
@@ -0,0 +1,83 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file is a self-contained test for a copy of
+// the division algorithm in build-goboring.sh,
+// to verify that is correct. The real algorithm uses u128
+// but this copy uses u32 for easier testing.
+// s/32/128/g should be the only difference between the two.
+//
+// This is the dumbest possible division algorithm,
+// but any crypto code that depends on the speed of
+// division is equally dumb.
+
+//go:build ignore
+
+#include <stdio.h>
+#include <stdint.h>
+
+#define nelem(x) (sizeof(x)/sizeof((x)[0]))
+
+typedef uint32_t u32;
+
+static u32 div(u32 x, u32 y, u32 *rp) {
+ int n = 0;
+ while((y>>(32-1)) != 1 && y < x) {
+ y<<=1;
+ n++;
+ }
+ u32 q = 0;
+ for(;; n--, y>>=1, q<<=1) {
+ if(x>=y) {
+ x -= y;
+ q |= 1;
+ }
+ if(n == 0)
+ break;
+ }
+ if(rp)
+ *rp = x;
+ return q;
+}
+
+u32 tests[] = {
+ 0,
+ 1,
+ 2,
+ 3,
+ 4,
+ 5,
+ 6,
+ 7,
+ 8,
+ 9,
+ 10,
+ 11,
+ 31,
+ 0xFFF,
+ 0x1000,
+ 0x1001,
+ 0xF0F0F0,
+ 0xFFFFFF,
+ 0x1000000,
+ 0xF0F0F0F0,
+ 0xFFFFFFFF,
+};
+
+int
+main(void)
+{
+ for(int i=0; i<nelem(tests); i++)
+ for(int j=0; j<nelem(tests); j++) {
+ u32 n = tests[i];
+ u32 d = tests[j];
+ if(d == 0)
+ continue;
+ u32 r;
+ u32 q = div(n, d, &r);
+ if(q != n/d || r != n%d)
+ printf("div(%x, %x) = %x, %x, want %x, %x\n", n, d, q, r, n/d, n%d);
+ }
+ return 0;
+}
diff --git a/src/crypto/internal/boring/doc.go b/src/crypto/internal/boring/doc.go
new file mode 100644
index 0000000..6060fe5
--- /dev/null
+++ b/src/crypto/internal/boring/doc.go
@@ -0,0 +1,19 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package boring provides access to BoringCrypto implementation functions.
+// Check the constant Enabled to find out whether BoringCrypto is available.
+// If BoringCrypto is not available, the functions in this package all panic.
+package boring
+
+// Enabled reports whether BoringCrypto is available.
+// When enabled is false, all functions in this package panic.
+//
+// BoringCrypto is only available on linux/amd64 systems.
+const Enabled = available
+
+// A BigInt is the raw words from a BigInt.
+// This definition allows us to avoid importing math/big.
+// Conversion between BigInt and *big.Int is in crypto/internal/boring/bbig.
+type BigInt []uint
diff --git a/src/crypto/internal/boring/ecdh.go b/src/crypto/internal/boring/ecdh.go
new file mode 100644
index 0000000..8f46d81
--- /dev/null
+++ b/src/crypto/internal/boring/ecdh.go
@@ -0,0 +1,224 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build boringcrypto && linux && (amd64 || arm64) && !android && !cmd_go_bootstrap && !msan
+
+package boring
+
+// #include "goboringcrypto.h"
+import "C"
+import (
+ "errors"
+ "runtime"
+ "unsafe"
+)
+
+type PublicKeyECDH struct {
+ curve string
+ key *C.GO_EC_POINT
+ group *C.GO_EC_GROUP
+ bytes []byte
+}
+
+func (k *PublicKeyECDH) finalize() {
+ C._goboringcrypto_EC_POINT_free(k.key)
+}
+
+type PrivateKeyECDH struct {
+ curve string
+ key *C.GO_EC_KEY
+}
+
+func (k *PrivateKeyECDH) finalize() {
+ C._goboringcrypto_EC_KEY_free(k.key)
+}
+
+func NewPublicKeyECDH(curve string, bytes []byte) (*PublicKeyECDH, error) {
+ if len(bytes) < 1 {
+ return nil, errors.New("NewPublicKeyECDH: missing key")
+ }
+
+ nid, err := curveNID(curve)
+ if err != nil {
+ return nil, err
+ }
+
+ group := C._goboringcrypto_EC_GROUP_new_by_curve_name(nid)
+ if group == nil {
+ return nil, fail("EC_GROUP_new_by_curve_name")
+ }
+ defer C._goboringcrypto_EC_GROUP_free(group)
+ key := C._goboringcrypto_EC_POINT_new(group)
+ if key == nil {
+ return nil, fail("EC_POINT_new")
+ }
+ ok := C._goboringcrypto_EC_POINT_oct2point(group, key, (*C.uint8_t)(unsafe.Pointer(&bytes[0])), C.size_t(len(bytes)), nil) != 0
+ if !ok {
+ C._goboringcrypto_EC_POINT_free(key)
+ return nil, errors.New("point not on curve")
+ }
+
+ k := &PublicKeyECDH{curve, key, group, append([]byte(nil), bytes...)}
+ // Note: Because of the finalizer, any time k.key is passed to cgo,
+ // that call must be followed by a call to runtime.KeepAlive(k),
+ // to make sure k is not collected (and finalized) before the cgo
+ // call returns.
+ runtime.SetFinalizer(k, (*PublicKeyECDH).finalize)
+ return k, nil
+}
+
+func (k *PublicKeyECDH) Bytes() []byte { return k.bytes }
+
+func NewPrivateKeyECDH(curve string, bytes []byte) (*PrivateKeyECDH, error) {
+ nid, err := curveNID(curve)
+ if err != nil {
+ return nil, err
+ }
+ key := C._goboringcrypto_EC_KEY_new_by_curve_name(nid)
+ if key == nil {
+ return nil, fail("EC_KEY_new_by_curve_name")
+ }
+ b := bytesToBN(bytes)
+ ok := b != nil && C._goboringcrypto_EC_KEY_set_private_key(key, b) != 0
+ if b != nil {
+ C._goboringcrypto_BN_free(b)
+ }
+ if !ok {
+ C._goboringcrypto_EC_KEY_free(key)
+ return nil, fail("EC_KEY_set_private_key")
+ }
+ k := &PrivateKeyECDH{curve, key}
+ // Note: Same as in NewPublicKeyECDH regarding finalizer and KeepAlive.
+ runtime.SetFinalizer(k, (*PrivateKeyECDH).finalize)
+ return k, nil
+}
+
+func (k *PrivateKeyECDH) PublicKey() (*PublicKeyECDH, error) {
+ defer runtime.KeepAlive(k)
+
+ group := C._goboringcrypto_EC_KEY_get0_group(k.key)
+ if group == nil {
+ return nil, fail("EC_KEY_get0_group")
+ }
+ kbig := C._goboringcrypto_EC_KEY_get0_private_key(k.key)
+ if kbig == nil {
+ return nil, fail("EC_KEY_get0_private_key")
+ }
+ pt := C._goboringcrypto_EC_POINT_new(group)
+ if pt == nil {
+ return nil, fail("EC_POINT_new")
+ }
+ if C._goboringcrypto_EC_POINT_mul(group, pt, kbig, nil, nil, nil) == 0 {
+ C._goboringcrypto_EC_POINT_free(pt)
+ return nil, fail("EC_POINT_mul")
+ }
+ bytes, err := pointBytesECDH(k.curve, group, pt)
+ if err != nil {
+ C._goboringcrypto_EC_POINT_free(pt)
+ return nil, err
+ }
+ pub := &PublicKeyECDH{k.curve, pt, group, bytes}
+ // Note: Same as in NewPublicKeyECDH regarding finalizer and KeepAlive.
+ runtime.SetFinalizer(pub, (*PublicKeyECDH).finalize)
+ return pub, nil
+}
+
+func pointBytesECDH(curve string, group *C.GO_EC_GROUP, pt *C.GO_EC_POINT) ([]byte, error) {
+ out := make([]byte, 1+2*curveSize(curve))
+ n := C._goboringcrypto_EC_POINT_point2oct(group, pt, C.GO_POINT_CONVERSION_UNCOMPRESSED, (*C.uint8_t)(unsafe.Pointer(&out[0])), C.size_t(len(out)), nil)
+ if int(n) != len(out) {
+ return nil, fail("EC_POINT_point2oct")
+ }
+ return out, nil
+}
+
+func ECDH(priv *PrivateKeyECDH, pub *PublicKeyECDH) ([]byte, error) {
+ group := C._goboringcrypto_EC_KEY_get0_group(priv.key)
+ if group == nil {
+ return nil, fail("EC_KEY_get0_group")
+ }
+ privBig := C._goboringcrypto_EC_KEY_get0_private_key(priv.key)
+ if privBig == nil {
+ return nil, fail("EC_KEY_get0_private_key")
+ }
+ pt := C._goboringcrypto_EC_POINT_new(group)
+ if pt == nil {
+ return nil, fail("EC_POINT_new")
+ }
+ defer C._goboringcrypto_EC_POINT_free(pt)
+ if C._goboringcrypto_EC_POINT_mul(group, pt, nil, pub.key, privBig, nil) == 0 {
+ return nil, fail("EC_POINT_mul")
+ }
+ out, err := xCoordBytesECDH(priv.curve, group, pt)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func xCoordBytesECDH(curve string, group *C.GO_EC_GROUP, pt *C.GO_EC_POINT) ([]byte, error) {
+ big := C._goboringcrypto_BN_new()
+ defer C._goboringcrypto_BN_free(big)
+ if C._goboringcrypto_EC_POINT_get_affine_coordinates_GFp(group, pt, big, nil, nil) == 0 {
+ return nil, fail("EC_POINT_get_affine_coordinates_GFp")
+ }
+ return bigBytesECDH(curve, big)
+}
+
+func bigBytesECDH(curve string, big *C.GO_BIGNUM) ([]byte, error) {
+ out := make([]byte, curveSize(curve))
+ if C._goboringcrypto_BN_bn2bin_padded((*C.uint8_t)(&out[0]), C.size_t(len(out)), big) == 0 {
+ return nil, fail("BN_bn2bin_padded")
+ }
+ return out, nil
+}
+
+func curveSize(curve string) int {
+ switch curve {
+ default:
+ panic("crypto/internal/boring: unknown curve " + curve)
+ case "P-256":
+ return 256 / 8
+ case "P-384":
+ return 384 / 8
+ case "P-521":
+ return (521 + 7) / 8
+ }
+}
+
+func GenerateKeyECDH(curve string) (*PrivateKeyECDH, []byte, error) {
+ nid, err := curveNID(curve)
+ if err != nil {
+ return nil, nil, err
+ }
+ key := C._goboringcrypto_EC_KEY_new_by_curve_name(nid)
+ if key == nil {
+ return nil, nil, fail("EC_KEY_new_by_curve_name")
+ }
+ if C._goboringcrypto_EC_KEY_generate_key_fips(key) == 0 {
+ C._goboringcrypto_EC_KEY_free(key)
+ return nil, nil, fail("EC_KEY_generate_key_fips")
+ }
+
+ group := C._goboringcrypto_EC_KEY_get0_group(key)
+ if group == nil {
+ C._goboringcrypto_EC_KEY_free(key)
+ return nil, nil, fail("EC_KEY_get0_group")
+ }
+ b := C._goboringcrypto_EC_KEY_get0_private_key(key)
+ if b == nil {
+ C._goboringcrypto_EC_KEY_free(key)
+ return nil, nil, fail("EC_KEY_get0_private_key")
+ }
+ bytes, err := bigBytesECDH(curve, b)
+ if err != nil {
+ C._goboringcrypto_EC_KEY_free(key)
+ return nil, nil, err
+ }
+
+ k := &PrivateKeyECDH{curve, key}
+ // Note: Same as in NewPublicKeyECDH regarding finalizer and KeepAlive.
+ runtime.SetFinalizer(k, (*PrivateKeyECDH).finalize)
+ return k, bytes, nil
+}
diff --git a/src/crypto/internal/boring/ecdsa.go b/src/crypto/internal/boring/ecdsa.go
new file mode 100644
index 0000000..e15f368
--- /dev/null
+++ b/src/crypto/internal/boring/ecdsa.go
@@ -0,0 +1,172 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build boringcrypto && linux && (amd64 || arm64) && !android && !cmd_go_bootstrap && !msan
+
+package boring
+
+// #include "goboringcrypto.h"
+import "C"
+import (
+ "errors"
+ "runtime"
+)
+
+type ecdsaSignature struct {
+ R, S BigInt
+}
+
+type PrivateKeyECDSA struct {
+ key *C.GO_EC_KEY
+}
+
+func (k *PrivateKeyECDSA) finalize() {
+ C._goboringcrypto_EC_KEY_free(k.key)
+}
+
+type PublicKeyECDSA struct {
+ key *C.GO_EC_KEY
+}
+
+func (k *PublicKeyECDSA) finalize() {
+ C._goboringcrypto_EC_KEY_free(k.key)
+}
+
+var errUnknownCurve = errors.New("boringcrypto: unknown elliptic curve")
+
+func curveNID(curve string) (C.int, error) {
+ switch curve {
+ case "P-224":
+ return C.GO_NID_secp224r1, nil
+ case "P-256":
+ return C.GO_NID_X9_62_prime256v1, nil
+ case "P-384":
+ return C.GO_NID_secp384r1, nil
+ case "P-521":
+ return C.GO_NID_secp521r1, nil
+ }
+ return 0, errUnknownCurve
+}
+
+func NewPublicKeyECDSA(curve string, X, Y BigInt) (*PublicKeyECDSA, error) {
+ key, err := newECKey(curve, X, Y)
+ if err != nil {
+ return nil, err
+ }
+ k := &PublicKeyECDSA{key}
+ // Note: Because of the finalizer, any time k.key is passed to cgo,
+ // that call must be followed by a call to runtime.KeepAlive(k),
+ // to make sure k is not collected (and finalized) before the cgo
+ // call returns.
+ runtime.SetFinalizer(k, (*PublicKeyECDSA).finalize)
+ return k, nil
+}
+
+func newECKey(curve string, X, Y BigInt) (*C.GO_EC_KEY, error) {
+ nid, err := curveNID(curve)
+ if err != nil {
+ return nil, err
+ }
+ key := C._goboringcrypto_EC_KEY_new_by_curve_name(nid)
+ if key == nil {
+ return nil, fail("EC_KEY_new_by_curve_name")
+ }
+ group := C._goboringcrypto_EC_KEY_get0_group(key)
+ pt := C._goboringcrypto_EC_POINT_new(group)
+ if pt == nil {
+ C._goboringcrypto_EC_KEY_free(key)
+ return nil, fail("EC_POINT_new")
+ }
+ bx := bigToBN(X)
+ by := bigToBN(Y)
+ ok := bx != nil && by != nil && C._goboringcrypto_EC_POINT_set_affine_coordinates_GFp(group, pt, bx, by, nil) != 0 &&
+ C._goboringcrypto_EC_KEY_set_public_key(key, pt) != 0
+ if bx != nil {
+ C._goboringcrypto_BN_free(bx)
+ }
+ if by != nil {
+ C._goboringcrypto_BN_free(by)
+ }
+ C._goboringcrypto_EC_POINT_free(pt)
+ if !ok {
+ C._goboringcrypto_EC_KEY_free(key)
+ return nil, fail("EC_POINT_set_affine_coordinates_GFp")
+ }
+ return key, nil
+}
+
+func NewPrivateKeyECDSA(curve string, X, Y BigInt, D BigInt) (*PrivateKeyECDSA, error) {
+ key, err := newECKey(curve, X, Y)
+ if err != nil {
+ return nil, err
+ }
+ bd := bigToBN(D)
+ ok := bd != nil && C._goboringcrypto_EC_KEY_set_private_key(key, bd) != 0
+ if bd != nil {
+ C._goboringcrypto_BN_free(bd)
+ }
+ if !ok {
+ C._goboringcrypto_EC_KEY_free(key)
+ return nil, fail("EC_KEY_set_private_key")
+ }
+ k := &PrivateKeyECDSA{key}
+ // Note: Because of the finalizer, any time k.key is passed to cgo,
+ // that call must be followed by a call to runtime.KeepAlive(k),
+ // to make sure k is not collected (and finalized) before the cgo
+ // call returns.
+ runtime.SetFinalizer(k, (*PrivateKeyECDSA).finalize)
+ return k, nil
+}
+
+func SignMarshalECDSA(priv *PrivateKeyECDSA, hash []byte) ([]byte, error) {
+ size := C._goboringcrypto_ECDSA_size(priv.key)
+ sig := make([]byte, size)
+ var sigLen C.uint
+ if C._goboringcrypto_ECDSA_sign(0, base(hash), C.size_t(len(hash)), base(sig), &sigLen, priv.key) == 0 {
+ return nil, fail("ECDSA_sign")
+ }
+ runtime.KeepAlive(priv)
+ return sig[:sigLen], nil
+}
+
+func VerifyECDSA(pub *PublicKeyECDSA, hash []byte, sig []byte) bool {
+ ok := C._goboringcrypto_ECDSA_verify(0, base(hash), C.size_t(len(hash)), base(sig), C.size_t(len(sig)), pub.key) != 0
+ runtime.KeepAlive(pub)
+ return ok
+}
+
+func GenerateKeyECDSA(curve string) (X, Y, D BigInt, err error) {
+ nid, err := curveNID(curve)
+ if err != nil {
+ return nil, nil, nil, err
+ }
+ key := C._goboringcrypto_EC_KEY_new_by_curve_name(nid)
+ if key == nil {
+ return nil, nil, nil, fail("EC_KEY_new_by_curve_name")
+ }
+ defer C._goboringcrypto_EC_KEY_free(key)
+ if C._goboringcrypto_EC_KEY_generate_key_fips(key) == 0 {
+ return nil, nil, nil, fail("EC_KEY_generate_key_fips")
+ }
+ group := C._goboringcrypto_EC_KEY_get0_group(key)
+ pt := C._goboringcrypto_EC_KEY_get0_public_key(key)
+ bd := C._goboringcrypto_EC_KEY_get0_private_key(key)
+ if pt == nil || bd == nil {
+ return nil, nil, nil, fail("EC_KEY_get0_private_key")
+ }
+ bx := C._goboringcrypto_BN_new()
+ if bx == nil {
+ return nil, nil, nil, fail("BN_new")
+ }
+ defer C._goboringcrypto_BN_free(bx)
+ by := C._goboringcrypto_BN_new()
+ if by == nil {
+ return nil, nil, nil, fail("BN_new")
+ }
+ defer C._goboringcrypto_BN_free(by)
+ if C._goboringcrypto_EC_POINT_get_affine_coordinates_GFp(group, pt, bx, by, nil) == 0 {
+ return nil, nil, nil, fail("EC_POINT_get_affine_coordinates_GFp")
+ }
+ return bnToBig(bx), bnToBig(by), bnToBig(bd), nil
+}
diff --git a/src/crypto/internal/boring/fipstls/stub.s b/src/crypto/internal/boring/fipstls/stub.s
new file mode 100644
index 0000000..f2e5a50
--- /dev/null
+++ b/src/crypto/internal/boring/fipstls/stub.s
@@ -0,0 +1,12 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build boringcrypto
+
+// runtime_arg0 is declared in tls.go without a body.
+// It's provided by package runtime,
+// but the go command doesn't know that.
+// Having this assembly file keeps the go command
+// from complaining about the missing body
+// (because the implementation might be here).
diff --git a/src/crypto/internal/boring/fipstls/tls.go b/src/crypto/internal/boring/fipstls/tls.go
new file mode 100644
index 0000000..3bf1471
--- /dev/null
+++ b/src/crypto/internal/boring/fipstls/tls.go
@@ -0,0 +1,52 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build boringcrypto
+
+// Package fipstls allows control over whether crypto/tls requires FIPS-approved settings.
+// This package only exists with GOEXPERIMENT=boringcrypto, but the effects are independent
+// of the use of BoringCrypto.
+package fipstls
+
+import "sync/atomic"
+
+var required atomic.Bool
+
+// Force forces crypto/tls to restrict TLS configurations to FIPS-approved settings.
+// By design, this call is impossible to undo (except in tests).
+//
+// Note that this call has an effect even in programs using
+// standard crypto (that is, even when Enabled = false).
+func Force() {
+ required.Store(true)
+}
+
+// Abandon allows non-FIPS-approved settings.
+// If called from a non-test binary, it panics.
+func Abandon() {
+ // Note: Not using boring.UnreachableExceptTests because we want
+ // this test to happen even when boring.Enabled = false.
+ name := runtime_arg0()
+ // Allow _test for Go command, .test for Bazel,
+ // NaClMain for NaCl (where all binaries run as NaClMain),
+ // and empty string for Windows (where runtime_arg0 can't easily find the name).
+ // Since this is an internal package, testing that this isn't used on the
+ // other operating systems should suffice to catch any mistakes.
+ if !hasSuffix(name, "_test") && !hasSuffix(name, ".test") && name != "NaClMain" && name != "" {
+ panic("fipstls: invalid use of Abandon in " + name)
+ }
+ required.Store(false)
+}
+
+// provided by runtime
+func runtime_arg0() string
+
+func hasSuffix(s, t string) bool {
+ return len(s) > len(t) && s[len(s)-len(t):] == t
+}
+
+// Required reports whether FIPS-approved settings are required.
+func Required() bool {
+ return required.Load()
+}
diff --git a/src/crypto/internal/boring/goboringcrypto.h b/src/crypto/internal/boring/goboringcrypto.h
new file mode 100644
index 0000000..2b11049
--- /dev/null
+++ b/src/crypto/internal/boring/goboringcrypto.h
@@ -0,0 +1,255 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This header file describes the BoringCrypto ABI as built for use in Go.
+// The BoringCrypto build for Go (which generates goboringcrypto_*.syso)
+// takes the standard libcrypto.a from BoringCrypto and adds the prefix
+// _goboringcrypto_ to every symbol, to avoid possible conflicts with
+// code wrapping a different BoringCrypto or OpenSSL.
+//
+// To make this header standalone (so that building Go does not require
+// having a full set of BoringCrypto headers), the struct details are not here.
+// Instead, while building the syso, we compile and run a C++ program
+// that checks that the sizes match. The program also checks (during compilation)
+// that all the function prototypes match the BoringCrypto equivalents.
+// The generation of the checking program depends on the declaration
+// forms used below (one line for most, multiline for enums).
+
+#include <stdlib.h> // size_t
+#include <stdint.h> // uint8_t
+
+// This symbol is hidden in BoringCrypto and marked as a constructor,
+// but cmd/link's internal linking mode doesn't handle constructors.
+// Until it does, we've exported the symbol and can call it explicitly.
+// (If using external linking mode, it will therefore be called twice,
+// once explicitly and once as a constructor, but that's OK.)
+/*unchecked*/ void _goboringcrypto_BORINGSSL_bcm_power_on_self_test(void);
+
+// #include <openssl/crypto.h>
+int _goboringcrypto_FIPS_mode(void);
+void* _goboringcrypto_OPENSSL_malloc(size_t);
+
+// #include <openssl/rand.h>
+int _goboringcrypto_RAND_bytes(uint8_t*, size_t);
+
+// #include <openssl/nid.h>
+enum {
+ GO_NID_md5_sha1 = 114,
+
+ GO_NID_secp224r1 = 713,
+ GO_NID_X9_62_prime256v1 = 415,
+ GO_NID_secp384r1 = 715,
+ GO_NID_secp521r1 = 716,
+
+ GO_NID_sha224 = 675,
+ GO_NID_sha256 = 672,
+ GO_NID_sha384 = 673,
+ GO_NID_sha512 = 674,
+};
+
+// #include <openssl/sha.h>
+typedef struct GO_SHA_CTX { char data[96]; } GO_SHA_CTX;
+int _goboringcrypto_SHA1_Init(GO_SHA_CTX*);
+int _goboringcrypto_SHA1_Update(GO_SHA_CTX*, const void*, size_t);
+int _goboringcrypto_SHA1_Final(uint8_t*, GO_SHA_CTX*);
+
+typedef struct GO_SHA256_CTX { char data[48+64]; } GO_SHA256_CTX;
+int _goboringcrypto_SHA224_Init(GO_SHA256_CTX*);
+int _goboringcrypto_SHA224_Update(GO_SHA256_CTX*, const void*, size_t);
+int _goboringcrypto_SHA224_Final(uint8_t*, GO_SHA256_CTX*);
+int _goboringcrypto_SHA256_Init(GO_SHA256_CTX*);
+int _goboringcrypto_SHA256_Update(GO_SHA256_CTX*, const void*, size_t);
+int _goboringcrypto_SHA256_Final(uint8_t*, GO_SHA256_CTX*);
+
+typedef struct GO_SHA512_CTX { char data[88+128]; } GO_SHA512_CTX;
+int _goboringcrypto_SHA384_Init(GO_SHA512_CTX*);
+int _goboringcrypto_SHA384_Update(GO_SHA512_CTX*, const void*, size_t);
+int _goboringcrypto_SHA384_Final(uint8_t*, GO_SHA512_CTX*);
+int _goboringcrypto_SHA512_Init(GO_SHA512_CTX*);
+int _goboringcrypto_SHA512_Update(GO_SHA512_CTX*, const void*, size_t);
+int _goboringcrypto_SHA512_Final(uint8_t*, GO_SHA512_CTX*);
+
+// #include <openssl/digest.h>
+/*unchecked (opaque)*/ typedef struct GO_EVP_MD { char data[1]; } GO_EVP_MD;
+const GO_EVP_MD* _goboringcrypto_EVP_md4(void);
+const GO_EVP_MD* _goboringcrypto_EVP_md5(void);
+const GO_EVP_MD* _goboringcrypto_EVP_md5_sha1(void);
+const GO_EVP_MD* _goboringcrypto_EVP_sha1(void);
+const GO_EVP_MD* _goboringcrypto_EVP_sha224(void);
+const GO_EVP_MD* _goboringcrypto_EVP_sha256(void);
+const GO_EVP_MD* _goboringcrypto_EVP_sha384(void);
+const GO_EVP_MD* _goboringcrypto_EVP_sha512(void);
+int _goboringcrypto_EVP_MD_type(const GO_EVP_MD*);
+size_t _goboringcrypto_EVP_MD_size(const GO_EVP_MD*);
+
+// #include <openssl/hmac.h>
+typedef struct GO_HMAC_CTX { char data[104]; } GO_HMAC_CTX;
+void _goboringcrypto_HMAC_CTX_init(GO_HMAC_CTX*);
+void _goboringcrypto_HMAC_CTX_cleanup(GO_HMAC_CTX*);
+int _goboringcrypto_HMAC_Init(GO_HMAC_CTX*, const void*, int, const GO_EVP_MD*);
+int _goboringcrypto_HMAC_Update(GO_HMAC_CTX*, const uint8_t*, size_t);
+int _goboringcrypto_HMAC_Final(GO_HMAC_CTX*, uint8_t*, unsigned int*);
+size_t _goboringcrypto_HMAC_size(const GO_HMAC_CTX*);
+int _goboringcrypto_HMAC_CTX_copy_ex(GO_HMAC_CTX *dest, const GO_HMAC_CTX *src);
+
+// #include <openssl/aes.h>
+typedef struct GO_AES_KEY { char data[244]; } GO_AES_KEY;
+int _goboringcrypto_AES_set_encrypt_key(const uint8_t*, unsigned int, GO_AES_KEY*);
+int _goboringcrypto_AES_set_decrypt_key(const uint8_t*, unsigned int, GO_AES_KEY*);
+void _goboringcrypto_AES_encrypt(const uint8_t*, uint8_t*, const GO_AES_KEY*);
+void _goboringcrypto_AES_decrypt(const uint8_t*, uint8_t*, const GO_AES_KEY*);
+void _goboringcrypto_AES_ctr128_encrypt(const uint8_t*, uint8_t*, size_t, const GO_AES_KEY*, uint8_t*, uint8_t*, unsigned int*);
+enum {
+ GO_AES_ENCRYPT = 1,
+ GO_AES_DECRYPT = 0
+};
+void _goboringcrypto_AES_cbc_encrypt(const uint8_t*, uint8_t*, size_t, const GO_AES_KEY*, uint8_t*, const int);
+
+// #include <openssl/aead.h>
+/*unchecked (opaque)*/ typedef struct GO_EVP_AEAD { char data[1]; } GO_EVP_AEAD;
+/*unchecked (opaque)*/ typedef struct GO_ENGINE { char data[1]; } GO_ENGINE;
+const GO_EVP_AEAD* _goboringcrypto_EVP_aead_aes_128_gcm(void);
+const GO_EVP_AEAD* _goboringcrypto_EVP_aead_aes_256_gcm(void);
+enum {
+ GO_EVP_AEAD_DEFAULT_TAG_LENGTH = 0
+};
+size_t _goboringcrypto_EVP_AEAD_key_length(const GO_EVP_AEAD*);
+size_t _goboringcrypto_EVP_AEAD_nonce_length(const GO_EVP_AEAD*);
+size_t _goboringcrypto_EVP_AEAD_max_overhead(const GO_EVP_AEAD*);
+size_t _goboringcrypto_EVP_AEAD_max_tag_len(const GO_EVP_AEAD*);
+typedef struct GO_EVP_AEAD_CTX { char data[600]; } GO_EVP_AEAD_CTX;
+void _goboringcrypto_EVP_AEAD_CTX_zero(GO_EVP_AEAD_CTX*);
+int _goboringcrypto_EVP_AEAD_CTX_init(GO_EVP_AEAD_CTX*, const GO_EVP_AEAD*, const uint8_t*, size_t, size_t, GO_ENGINE*);
+void _goboringcrypto_EVP_AEAD_CTX_cleanup(GO_EVP_AEAD_CTX*);
+int _goboringcrypto_EVP_AEAD_CTX_seal(const GO_EVP_AEAD_CTX*, uint8_t*, size_t*, size_t, const uint8_t*, size_t, const uint8_t*, size_t, const uint8_t*, size_t);
+int _goboringcrypto_EVP_AEAD_CTX_open(const GO_EVP_AEAD_CTX*, uint8_t*, size_t*, size_t, const uint8_t*, size_t, const uint8_t*, size_t, const uint8_t*, size_t);
+const GO_EVP_AEAD* _goboringcrypto_EVP_aead_aes_128_gcm_tls12(void);
+const GO_EVP_AEAD* _goboringcrypto_EVP_aead_aes_256_gcm_tls12(void);
+enum go_evp_aead_direction_t {
+ go_evp_aead_open = 0,
+ go_evp_aead_seal = 1
+};
+int _goboringcrypto_EVP_AEAD_CTX_init_with_direction(GO_EVP_AEAD_CTX*, const GO_EVP_AEAD*, const uint8_t*, size_t, size_t, enum go_evp_aead_direction_t);
+
+// #include <openssl/bn.h>
+/*unchecked (opaque)*/ typedef struct GO_BN_CTX { char data[1]; } GO_BN_CTX;
+typedef struct GO_BIGNUM { char data[24]; } GO_BIGNUM;
+GO_BIGNUM* _goboringcrypto_BN_new(void);
+void _goboringcrypto_BN_free(GO_BIGNUM*);
+unsigned _goboringcrypto_BN_num_bits(const GO_BIGNUM*);
+unsigned _goboringcrypto_BN_num_bytes(const GO_BIGNUM*);
+int _goboringcrypto_BN_is_negative(const GO_BIGNUM*);
+GO_BIGNUM* _goboringcrypto_BN_bin2bn(const uint8_t*, size_t, GO_BIGNUM*);
+GO_BIGNUM* _goboringcrypto_BN_le2bn(const uint8_t*, size_t, GO_BIGNUM*);
+size_t _goboringcrypto_BN_bn2bin(const GO_BIGNUM*, uint8_t*);
+int _goboringcrypto_BN_bn2le_padded(uint8_t*, size_t, const GO_BIGNUM*);
+int _goboringcrypto_BN_bn2bin_padded(uint8_t*, size_t, const GO_BIGNUM*);
+
+// #include <openssl/ec.h>
+/*unchecked (opaque)*/ typedef struct GO_EC_GROUP { char data[1]; } GO_EC_GROUP;
+GO_EC_GROUP* _goboringcrypto_EC_GROUP_new_by_curve_name(int);
+void _goboringcrypto_EC_GROUP_free(GO_EC_GROUP*);
+
+/*unchecked (opaque)*/ typedef struct GO_EC_POINT { char data[1]; } GO_EC_POINT;
+GO_EC_POINT* _goboringcrypto_EC_POINT_new(const GO_EC_GROUP*);
+int _goboringcrypto_EC_POINT_mul(const GO_EC_GROUP*, GO_EC_POINT*, const GO_BIGNUM*, const GO_EC_POINT*, const GO_BIGNUM*, GO_BN_CTX*);
+void _goboringcrypto_EC_POINT_free(GO_EC_POINT*);
+int _goboringcrypto_EC_POINT_get_affine_coordinates_GFp(const GO_EC_GROUP*, const GO_EC_POINT*, GO_BIGNUM*, GO_BIGNUM*, GO_BN_CTX*);
+int _goboringcrypto_EC_POINT_set_affine_coordinates_GFp(const GO_EC_GROUP*, GO_EC_POINT*, const GO_BIGNUM*, const GO_BIGNUM*, GO_BN_CTX*);
+int _goboringcrypto_EC_POINT_oct2point(const GO_EC_GROUP*, GO_EC_POINT*, const uint8_t*, size_t, GO_BN_CTX*);
+GO_EC_POINT* _goboringcrypto_EC_POINT_dup(const GO_EC_POINT*, const GO_EC_GROUP*);
+int _goboringcrypto_EC_POINT_is_on_curve(const GO_EC_GROUP*, const GO_EC_POINT*, GO_BN_CTX*);
+#ifndef OPENSSL_HEADER_EC_H
+typedef enum {
+ GO_POINT_CONVERSION_COMPRESSED = 2,
+ GO_POINT_CONVERSION_UNCOMPRESSED = 4,
+ GO_POINT_CONVERSION_HYBRID = 6,
+} go_point_conversion_form_t;
+#endif
+size_t _goboringcrypto_EC_POINT_point2oct(const GO_EC_GROUP*, const GO_EC_POINT*, go_point_conversion_form_t, uint8_t*, size_t, GO_BN_CTX*);
+
+// #include <openssl/ec_key.h>
+/*unchecked (opaque)*/ typedef struct GO_EC_KEY { char data[1]; } GO_EC_KEY;
+GO_EC_KEY* _goboringcrypto_EC_KEY_new(void);
+GO_EC_KEY* _goboringcrypto_EC_KEY_new_by_curve_name(int);
+void _goboringcrypto_EC_KEY_free(GO_EC_KEY*);
+const GO_EC_GROUP* _goboringcrypto_EC_KEY_get0_group(const GO_EC_KEY*);
+int _goboringcrypto_EC_KEY_generate_key_fips(GO_EC_KEY*);
+int _goboringcrypto_EC_KEY_set_private_key(GO_EC_KEY*, const GO_BIGNUM*);
+int _goboringcrypto_EC_KEY_set_public_key(GO_EC_KEY*, const GO_EC_POINT*);
+int _goboringcrypto_EC_KEY_is_opaque(const GO_EC_KEY*);
+const GO_BIGNUM* _goboringcrypto_EC_KEY_get0_private_key(const GO_EC_KEY*);
+const GO_EC_POINT* _goboringcrypto_EC_KEY_get0_public_key(const GO_EC_KEY*);
+// TODO: EC_KEY_check_fips?
+
+// #include <openssl/ecdh.h>
+int _goboringcrypto_ECDH_compute_key_fips(uint8_t*, size_t, const GO_EC_POINT*, const GO_EC_KEY*);
+
+// #include <openssl/ecdsa.h>
+typedef struct GO_ECDSA_SIG { char data[16]; } GO_ECDSA_SIG;
+GO_ECDSA_SIG* _goboringcrypto_ECDSA_SIG_new(void);
+void _goboringcrypto_ECDSA_SIG_free(GO_ECDSA_SIG*);
+GO_ECDSA_SIG* _goboringcrypto_ECDSA_do_sign(const uint8_t*, size_t, const GO_EC_KEY*);
+int _goboringcrypto_ECDSA_do_verify(const uint8_t*, size_t, const GO_ECDSA_SIG*, const GO_EC_KEY*);
+int _goboringcrypto_ECDSA_sign(int, const uint8_t*, size_t, uint8_t*, unsigned int*, const GO_EC_KEY*);
+size_t _goboringcrypto_ECDSA_size(const GO_EC_KEY*);
+int _goboringcrypto_ECDSA_verify(int, const uint8_t*, size_t, const uint8_t*, size_t, const GO_EC_KEY*);
+
+// #include <openssl/rsa.h>
+
+// Note: order of struct fields here is unchecked.
+typedef struct GO_RSA { void *meth; GO_BIGNUM *n, *e, *d, *p, *q, *dmp1, *dmq1, *iqmp; char data[168]; } GO_RSA;
+/*unchecked (opaque)*/ typedef struct GO_BN_GENCB { char data[1]; } GO_BN_GENCB;
+GO_RSA* _goboringcrypto_RSA_new(void);
+void _goboringcrypto_RSA_free(GO_RSA*);
+void _goboringcrypto_RSA_get0_key(const GO_RSA*, const GO_BIGNUM **n, const GO_BIGNUM **e, const GO_BIGNUM **d);
+void _goboringcrypto_RSA_get0_factors(const GO_RSA*, const GO_BIGNUM **p, const GO_BIGNUM **q);
+void _goboringcrypto_RSA_get0_crt_params(const GO_RSA*, const GO_BIGNUM **dmp1, const GO_BIGNUM **dmp2, const GO_BIGNUM **iqmp);
+int _goboringcrypto_RSA_generate_key_ex(GO_RSA*, int, const GO_BIGNUM*, GO_BN_GENCB*);
+int _goboringcrypto_RSA_generate_key_fips(GO_RSA*, int, GO_BN_GENCB*);
+enum {
+ GO_RSA_PKCS1_PADDING = 1,
+ GO_RSA_NO_PADDING = 3,
+ GO_RSA_PKCS1_OAEP_PADDING = 4,
+ GO_RSA_PKCS1_PSS_PADDING = 6,
+};
+int _goboringcrypto_RSA_encrypt(GO_RSA*, size_t *out_len, uint8_t *out, size_t max_out, const uint8_t *in, size_t in_len, int padding);
+int _goboringcrypto_RSA_decrypt(GO_RSA*, size_t *out_len, uint8_t *out, size_t max_out, const uint8_t *in, size_t in_len, int padding);
+int _goboringcrypto_RSA_sign(int hash_nid, const uint8_t* in, unsigned int in_len, uint8_t *out, unsigned int *out_len, GO_RSA*);
+int _goboringcrypto_RSA_sign_pss_mgf1(GO_RSA*, size_t *out_len, uint8_t *out, size_t max_out, const uint8_t *in, size_t in_len, const GO_EVP_MD *md, const GO_EVP_MD *mgf1_md, int salt_len);
+int _goboringcrypto_RSA_sign_raw(GO_RSA*, size_t *out_len, uint8_t *out, size_t max_out, const uint8_t *in, size_t in_len, int padding);
+int _goboringcrypto_RSA_verify(int hash_nid, const uint8_t *msg, size_t msg_len, const uint8_t *sig, size_t sig_len, GO_RSA*);
+int _goboringcrypto_RSA_verify_pss_mgf1(GO_RSA*, const uint8_t *msg, size_t msg_len, const GO_EVP_MD *md, const GO_EVP_MD *mgf1_md, int salt_len, const uint8_t *sig, size_t sig_len);
+int _goboringcrypto_RSA_verify_raw(GO_RSA*, size_t *out_len, uint8_t *out, size_t max_out, const uint8_t *in, size_t in_len, int padding);
+unsigned _goboringcrypto_RSA_size(const GO_RSA*);
+int _goboringcrypto_RSA_is_opaque(const GO_RSA*);
+int _goboringcrypto_RSA_check_key(const GO_RSA*);
+int _goboringcrypto_RSA_check_fips(GO_RSA*);
+GO_RSA* _goboringcrypto_RSA_public_key_from_bytes(const uint8_t*, size_t);
+GO_RSA* _goboringcrypto_RSA_private_key_from_bytes(const uint8_t*, size_t);
+int _goboringcrypto_RSA_public_key_to_bytes(uint8_t**, size_t*, const GO_RSA*);
+int _goboringcrypto_RSA_private_key_to_bytes(uint8_t**, size_t*, const GO_RSA*);
+
+// #include <openssl/evp.h>
+/*unchecked (opaque)*/ typedef struct GO_EVP_PKEY { char data[1]; } GO_EVP_PKEY;
+GO_EVP_PKEY* _goboringcrypto_EVP_PKEY_new(void);
+void _goboringcrypto_EVP_PKEY_free(GO_EVP_PKEY*);
+int _goboringcrypto_EVP_PKEY_set1_RSA(GO_EVP_PKEY*, GO_RSA*);
+
+/*unchecked (opaque)*/ typedef struct GO_EVP_PKEY_CTX { char data[1]; } GO_EVP_PKEY_CTX;
+
+GO_EVP_PKEY_CTX* _goboringcrypto_EVP_PKEY_CTX_new(GO_EVP_PKEY*, GO_ENGINE*);
+void _goboringcrypto_EVP_PKEY_CTX_free(GO_EVP_PKEY_CTX*);
+int _goboringcrypto_EVP_PKEY_CTX_set0_rsa_oaep_label(GO_EVP_PKEY_CTX*, uint8_t*, size_t);
+int _goboringcrypto_EVP_PKEY_CTX_set_rsa_oaep_md(GO_EVP_PKEY_CTX*, const GO_EVP_MD*);
+int _goboringcrypto_EVP_PKEY_CTX_set_rsa_padding(GO_EVP_PKEY_CTX*, int padding);
+int _goboringcrypto_EVP_PKEY_decrypt(GO_EVP_PKEY_CTX*, uint8_t*, size_t*, const uint8_t*, size_t);
+int _goboringcrypto_EVP_PKEY_encrypt(GO_EVP_PKEY_CTX*, uint8_t*, size_t*, const uint8_t*, size_t);
+int _goboringcrypto_EVP_PKEY_decrypt_init(GO_EVP_PKEY_CTX*);
+int _goboringcrypto_EVP_PKEY_encrypt_init(GO_EVP_PKEY_CTX*);
+int _goboringcrypto_EVP_PKEY_CTX_set_rsa_mgf1_md(GO_EVP_PKEY_CTX*, const GO_EVP_MD*);
+int _goboringcrypto_EVP_PKEY_CTX_set_rsa_pss_saltlen(GO_EVP_PKEY_CTX*, int);
+int _goboringcrypto_EVP_PKEY_sign_init(GO_EVP_PKEY_CTX*);
+int _goboringcrypto_EVP_PKEY_verify_init(GO_EVP_PKEY_CTX*);
+int _goboringcrypto_EVP_PKEY_sign(GO_EVP_PKEY_CTX*, uint8_t*, size_t*, const uint8_t*, size_t);
diff --git a/src/crypto/internal/boring/hmac.go b/src/crypto/internal/boring/hmac.go
new file mode 100644
index 0000000..6241a65
--- /dev/null
+++ b/src/crypto/internal/boring/hmac.go
@@ -0,0 +1,153 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build boringcrypto && linux && (amd64 || arm64) && !android && !cmd_go_bootstrap && !msan
+
+package boring
+
+// #include "goboringcrypto.h"
+import "C"
+import (
+ "bytes"
+ "crypto"
+ "hash"
+ "runtime"
+ "unsafe"
+)
+
+// hashToMD converts a hash.Hash implementation from this package
+// to a BoringCrypto *C.GO_EVP_MD.
+func hashToMD(h hash.Hash) *C.GO_EVP_MD {
+ switch h.(type) {
+ case *sha1Hash:
+ return C._goboringcrypto_EVP_sha1()
+ case *sha224Hash:
+ return C._goboringcrypto_EVP_sha224()
+ case *sha256Hash:
+ return C._goboringcrypto_EVP_sha256()
+ case *sha384Hash:
+ return C._goboringcrypto_EVP_sha384()
+ case *sha512Hash:
+ return C._goboringcrypto_EVP_sha512()
+ }
+ return nil
+}
+
+// cryptoHashToMD converts a crypto.Hash
+// to a BoringCrypto *C.GO_EVP_MD.
+func cryptoHashToMD(ch crypto.Hash) *C.GO_EVP_MD {
+ switch ch {
+ case crypto.MD5:
+ return C._goboringcrypto_EVP_md5()
+ case crypto.MD5SHA1:
+ return C._goboringcrypto_EVP_md5_sha1()
+ case crypto.SHA1:
+ return C._goboringcrypto_EVP_sha1()
+ case crypto.SHA224:
+ return C._goboringcrypto_EVP_sha224()
+ case crypto.SHA256:
+ return C._goboringcrypto_EVP_sha256()
+ case crypto.SHA384:
+ return C._goboringcrypto_EVP_sha384()
+ case crypto.SHA512:
+ return C._goboringcrypto_EVP_sha512()
+ }
+ return nil
+}
+
+// NewHMAC returns a new HMAC using BoringCrypto.
+// The function h must return a hash implemented by
+// BoringCrypto (for example, h could be boring.NewSHA256).
+// If h is not recognized, NewHMAC returns nil.
+func NewHMAC(h func() hash.Hash, key []byte) hash.Hash {
+ ch := h()
+ md := hashToMD(ch)
+ if md == nil {
+ return nil
+ }
+
+ // Note: Could hash down long keys here using EVP_Digest.
+ hkey := bytes.Clone(key)
+ hmac := &boringHMAC{
+ md: md,
+ size: ch.Size(),
+ blockSize: ch.BlockSize(),
+ key: hkey,
+ }
+ hmac.Reset()
+ return hmac
+}
+
+type boringHMAC struct {
+ md *C.GO_EVP_MD
+ ctx C.GO_HMAC_CTX
+ ctx2 C.GO_HMAC_CTX
+ size int
+ blockSize int
+ key []byte
+ sum []byte
+ needCleanup bool
+}
+
+func (h *boringHMAC) Reset() {
+ if h.needCleanup {
+ C._goboringcrypto_HMAC_CTX_cleanup(&h.ctx)
+ } else {
+ h.needCleanup = true
+ // Note: Because of the finalizer, any time h.ctx is passed to cgo,
+ // that call must be followed by a call to runtime.KeepAlive(h),
+ // to make sure h is not collected (and finalized) before the cgo
+ // call returns.
+ runtime.SetFinalizer(h, (*boringHMAC).finalize)
+ }
+ C._goboringcrypto_HMAC_CTX_init(&h.ctx)
+
+ if C._goboringcrypto_HMAC_Init(&h.ctx, unsafe.Pointer(base(h.key)), C.int(len(h.key)), h.md) == 0 {
+ panic("boringcrypto: HMAC_Init failed")
+ }
+ if int(C._goboringcrypto_HMAC_size(&h.ctx)) != h.size {
+ println("boringcrypto: HMAC size:", C._goboringcrypto_HMAC_size(&h.ctx), "!=", h.size)
+ panic("boringcrypto: HMAC size mismatch")
+ }
+ runtime.KeepAlive(h) // Next line will keep h alive too; just making doubly sure.
+ h.sum = nil
+}
+
+func (h *boringHMAC) finalize() {
+ C._goboringcrypto_HMAC_CTX_cleanup(&h.ctx)
+}
+
+func (h *boringHMAC) Write(p []byte) (int, error) {
+ if len(p) > 0 {
+ C._goboringcrypto_HMAC_Update(&h.ctx, (*C.uint8_t)(unsafe.Pointer(&p[0])), C.size_t(len(p)))
+ }
+ runtime.KeepAlive(h)
+ return len(p), nil
+}
+
+func (h *boringHMAC) Size() int {
+ return h.size
+}
+
+func (h *boringHMAC) BlockSize() int {
+ return h.blockSize
+}
+
+func (h *boringHMAC) Sum(in []byte) []byte {
+ if h.sum == nil {
+ size := h.Size()
+ h.sum = make([]byte, size)
+ }
+ // Make copy of context because Go hash.Hash mandates
+ // that Sum has no effect on the underlying stream.
+ // In particular it is OK to Sum, then Write more, then Sum again,
+ // and the second Sum acts as if the first didn't happen.
+ C._goboringcrypto_HMAC_CTX_init(&h.ctx2)
+ if C._goboringcrypto_HMAC_CTX_copy_ex(&h.ctx2, &h.ctx) == 0 {
+ panic("boringcrypto: HMAC_CTX_copy_ex failed")
+ }
+ C._goboringcrypto_HMAC_Final(&h.ctx2, (*C.uint8_t)(unsafe.Pointer(&h.sum[0])), nil)
+ C._goboringcrypto_HMAC_CTX_cleanup(&h.ctx2)
+ return append(in, h.sum...)
+}
diff --git a/src/crypto/internal/boring/notboring.go b/src/crypto/internal/boring/notboring.go
new file mode 100644
index 0000000..1c5e4c7
--- /dev/null
+++ b/src/crypto/internal/boring/notboring.go
@@ -0,0 +1,122 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !(boringcrypto && linux && (amd64 || arm64) && !android && !cmd_go_bootstrap && !msan && cgo)
+
+package boring
+
+import (
+ "crypto"
+ "crypto/cipher"
+ "crypto/internal/boring/sig"
+ "hash"
+)
+
+const available = false
+
+// Unreachable marks code that should be unreachable
+// when BoringCrypto is in use. It is a no-op without BoringCrypto.
+func Unreachable() {
+ // Code that's unreachable when using BoringCrypto
+ // is exactly the code we want to detect for reporting
+ // standard Go crypto.
+ sig.StandardCrypto()
+}
+
+// UnreachableExceptTests marks code that should be unreachable
+// when BoringCrypto is in use. It is a no-op without BoringCrypto.
+func UnreachableExceptTests() {}
+
+type randReader int
+
+func (randReader) Read(b []byte) (int, error) { panic("boringcrypto: not available") }
+
+const RandReader = randReader(0)
+
+func NewSHA1() hash.Hash { panic("boringcrypto: not available") }
+func NewSHA224() hash.Hash { panic("boringcrypto: not available") }
+func NewSHA256() hash.Hash { panic("boringcrypto: not available") }
+func NewSHA384() hash.Hash { panic("boringcrypto: not available") }
+func NewSHA512() hash.Hash { panic("boringcrypto: not available") }
+
+func SHA1([]byte) [20]byte { panic("boringcrypto: not available") }
+func SHA224([]byte) [28]byte { panic("boringcrypto: not available") }
+func SHA256([]byte) [32]byte { panic("boringcrypto: not available") }
+func SHA384([]byte) [48]byte { panic("boringcrypto: not available") }
+func SHA512([]byte) [64]byte { panic("boringcrypto: not available") }
+
+func NewHMAC(h func() hash.Hash, key []byte) hash.Hash { panic("boringcrypto: not available") }
+
+func NewAESCipher(key []byte) (cipher.Block, error) { panic("boringcrypto: not available") }
+func NewGCMTLS(cipher.Block) (cipher.AEAD, error) { panic("boringcrypto: not available") }
+
+type PublicKeyECDSA struct{ _ int }
+type PrivateKeyECDSA struct{ _ int }
+
+func GenerateKeyECDSA(curve string) (X, Y, D BigInt, err error) {
+ panic("boringcrypto: not available")
+}
+func NewPrivateKeyECDSA(curve string, X, Y, D BigInt) (*PrivateKeyECDSA, error) {
+ panic("boringcrypto: not available")
+}
+func NewPublicKeyECDSA(curve string, X, Y BigInt) (*PublicKeyECDSA, error) {
+ panic("boringcrypto: not available")
+}
+func SignMarshalECDSA(priv *PrivateKeyECDSA, hash []byte) ([]byte, error) {
+ panic("boringcrypto: not available")
+}
+func VerifyECDSA(pub *PublicKeyECDSA, hash []byte, sig []byte) bool {
+ panic("boringcrypto: not available")
+}
+
+type PublicKeyRSA struct{ _ int }
+type PrivateKeyRSA struct{ _ int }
+
+func DecryptRSAOAEP(h, mgfHash hash.Hash, priv *PrivateKeyRSA, ciphertext, label []byte) ([]byte, error) {
+ panic("boringcrypto: not available")
+}
+func DecryptRSAPKCS1(priv *PrivateKeyRSA, ciphertext []byte) ([]byte, error) {
+ panic("boringcrypto: not available")
+}
+func DecryptRSANoPadding(priv *PrivateKeyRSA, ciphertext []byte) ([]byte, error) {
+ panic("boringcrypto: not available")
+}
+func EncryptRSAOAEP(h, mgfHash hash.Hash, pub *PublicKeyRSA, msg, label []byte) ([]byte, error) {
+ panic("boringcrypto: not available")
+}
+func EncryptRSAPKCS1(pub *PublicKeyRSA, msg []byte) ([]byte, error) {
+ panic("boringcrypto: not available")
+}
+func EncryptRSANoPadding(pub *PublicKeyRSA, msg []byte) ([]byte, error) {
+ panic("boringcrypto: not available")
+}
+func GenerateKeyRSA(bits int) (N, E, D, P, Q, Dp, Dq, Qinv BigInt, err error) {
+ panic("boringcrypto: not available")
+}
+func NewPrivateKeyRSA(N, E, D, P, Q, Dp, Dq, Qinv BigInt) (*PrivateKeyRSA, error) {
+ panic("boringcrypto: not available")
+}
+func NewPublicKeyRSA(N, E BigInt) (*PublicKeyRSA, error) { panic("boringcrypto: not available") }
+func SignRSAPKCS1v15(priv *PrivateKeyRSA, h crypto.Hash, hashed []byte) ([]byte, error) {
+ panic("boringcrypto: not available")
+}
+func SignRSAPSS(priv *PrivateKeyRSA, h crypto.Hash, hashed []byte, saltLen int) ([]byte, error) {
+ panic("boringcrypto: not available")
+}
+func VerifyRSAPKCS1v15(pub *PublicKeyRSA, h crypto.Hash, hashed, sig []byte) error {
+ panic("boringcrypto: not available")
+}
+func VerifyRSAPSS(pub *PublicKeyRSA, h crypto.Hash, hashed, sig []byte, saltLen int) error {
+ panic("boringcrypto: not available")
+}
+
+type PublicKeyECDH struct{}
+type PrivateKeyECDH struct{}
+
+func ECDH(*PrivateKeyECDH, *PublicKeyECDH) ([]byte, error) { panic("boringcrypto: not available") }
+func GenerateKeyECDH(string) (*PrivateKeyECDH, []byte, error) { panic("boringcrypto: not available") }
+func NewPrivateKeyECDH(string, []byte) (*PrivateKeyECDH, error) { panic("boringcrypto: not available") }
+func NewPublicKeyECDH(string, []byte) (*PublicKeyECDH, error) { panic("boringcrypto: not available") }
+func (*PublicKeyECDH) Bytes() []byte { panic("boringcrypto: not available") }
+func (*PrivateKeyECDH) PublicKey() (*PublicKeyECDH, error) { panic("boringcrypto: not available") }
diff --git a/src/crypto/internal/boring/rand.go b/src/crypto/internal/boring/rand.go
new file mode 100644
index 0000000..7639c01
--- /dev/null
+++ b/src/crypto/internal/boring/rand.go
@@ -0,0 +1,24 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build boringcrypto && linux && (amd64 || arm64) && !android && !cmd_go_bootstrap && !msan
+
+package boring
+
+// #include "goboringcrypto.h"
+import "C"
+import "unsafe"
+
+type randReader int
+
+func (randReader) Read(b []byte) (int, error) {
+ // Note: RAND_bytes should never fail; the return value exists only for historical reasons.
+ // We check it even so.
+ if len(b) > 0 && C._goboringcrypto_RAND_bytes((*C.uint8_t)(unsafe.Pointer(&b[0])), C.size_t(len(b))) == 0 {
+ return 0, fail("RAND_bytes")
+ }
+ return len(b), nil
+}
+
+const RandReader = randReader(0)
diff --git a/src/crypto/internal/boring/rsa.go b/src/crypto/internal/boring/rsa.go
new file mode 100644
index 0000000..fa693ea
--- /dev/null
+++ b/src/crypto/internal/boring/rsa.go
@@ -0,0 +1,379 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build boringcrypto && linux && (amd64 || arm64) && !android && !cmd_go_bootstrap && !msan
+
+package boring
+
+// #include "goboringcrypto.h"
+import "C"
+import (
+ "crypto"
+ "crypto/subtle"
+ "errors"
+ "hash"
+ "runtime"
+ "strconv"
+ "unsafe"
+)
+
+func GenerateKeyRSA(bits int) (N, E, D, P, Q, Dp, Dq, Qinv BigInt, err error) {
+ bad := func(e error) (N, E, D, P, Q, Dp, Dq, Qinv BigInt, err error) {
+ return nil, nil, nil, nil, nil, nil, nil, nil, e
+ }
+
+ key := C._goboringcrypto_RSA_new()
+ if key == nil {
+ return bad(fail("RSA_new"))
+ }
+ defer C._goboringcrypto_RSA_free(key)
+
+ if C._goboringcrypto_RSA_generate_key_fips(key, C.int(bits), nil) == 0 {
+ return bad(fail("RSA_generate_key_fips"))
+ }
+
+ var n, e, d, p, q, dp, dq, qinv *C.GO_BIGNUM
+ C._goboringcrypto_RSA_get0_key(key, &n, &e, &d)
+ C._goboringcrypto_RSA_get0_factors(key, &p, &q)
+ C._goboringcrypto_RSA_get0_crt_params(key, &dp, &dq, &qinv)
+ return bnToBig(n), bnToBig(e), bnToBig(d), bnToBig(p), bnToBig(q), bnToBig(dp), bnToBig(dq), bnToBig(qinv), nil
+}
+
+type PublicKeyRSA struct {
+ // _key MUST NOT be accessed directly. Instead, use the withKey method.
+ _key *C.GO_RSA
+}
+
+func NewPublicKeyRSA(N, E BigInt) (*PublicKeyRSA, error) {
+ key := C._goboringcrypto_RSA_new()
+ if key == nil {
+ return nil, fail("RSA_new")
+ }
+ if !bigToBn(&key.n, N) ||
+ !bigToBn(&key.e, E) {
+ return nil, fail("BN_bin2bn")
+ }
+ k := &PublicKeyRSA{_key: key}
+ runtime.SetFinalizer(k, (*PublicKeyRSA).finalize)
+ return k, nil
+}
+
+func (k *PublicKeyRSA) finalize() {
+ C._goboringcrypto_RSA_free(k._key)
+}
+
+func (k *PublicKeyRSA) withKey(f func(*C.GO_RSA) C.int) C.int {
+ // Because of the finalizer, any time _key is passed to cgo, that call must
+ // be followed by a call to runtime.KeepAlive, to make sure k is not
+ // collected (and finalized) before the cgo call returns.
+ defer runtime.KeepAlive(k)
+ return f(k._key)
+}
+
+type PrivateKeyRSA struct {
+ // _key MUST NOT be accessed directly. Instead, use the withKey method.
+ _key *C.GO_RSA
+}
+
+func NewPrivateKeyRSA(N, E, D, P, Q, Dp, Dq, Qinv BigInt) (*PrivateKeyRSA, error) {
+ key := C._goboringcrypto_RSA_new()
+ if key == nil {
+ return nil, fail("RSA_new")
+ }
+ if !bigToBn(&key.n, N) ||
+ !bigToBn(&key.e, E) ||
+ !bigToBn(&key.d, D) ||
+ !bigToBn(&key.p, P) ||
+ !bigToBn(&key.q, Q) ||
+ !bigToBn(&key.dmp1, Dp) ||
+ !bigToBn(&key.dmq1, Dq) ||
+ !bigToBn(&key.iqmp, Qinv) {
+ return nil, fail("BN_bin2bn")
+ }
+ k := &PrivateKeyRSA{_key: key}
+ runtime.SetFinalizer(k, (*PrivateKeyRSA).finalize)
+ return k, nil
+}
+
+func (k *PrivateKeyRSA) finalize() {
+ C._goboringcrypto_RSA_free(k._key)
+}
+
+func (k *PrivateKeyRSA) withKey(f func(*C.GO_RSA) C.int) C.int {
+ // Because of the finalizer, any time _key is passed to cgo, that call must
+ // be followed by a call to runtime.KeepAlive, to make sure k is not
+ // collected (and finalized) before the cgo call returns.
+ defer runtime.KeepAlive(k)
+ return f(k._key)
+}
+
+func setupRSA(withKey func(func(*C.GO_RSA) C.int) C.int,
+ padding C.int, h, mgfHash hash.Hash, label []byte, saltLen int, ch crypto.Hash,
+ init func(*C.GO_EVP_PKEY_CTX) C.int) (pkey *C.GO_EVP_PKEY, ctx *C.GO_EVP_PKEY_CTX, err error) {
+ defer func() {
+ if err != nil {
+ if pkey != nil {
+ C._goboringcrypto_EVP_PKEY_free(pkey)
+ pkey = nil
+ }
+ if ctx != nil {
+ C._goboringcrypto_EVP_PKEY_CTX_free(ctx)
+ ctx = nil
+ }
+ }
+ }()
+
+ pkey = C._goboringcrypto_EVP_PKEY_new()
+ if pkey == nil {
+ return nil, nil, fail("EVP_PKEY_new")
+ }
+ if withKey(func(key *C.GO_RSA) C.int {
+ return C._goboringcrypto_EVP_PKEY_set1_RSA(pkey, key)
+ }) == 0 {
+ return nil, nil, fail("EVP_PKEY_set1_RSA")
+ }
+ ctx = C._goboringcrypto_EVP_PKEY_CTX_new(pkey, nil)
+ if ctx == nil {
+ return nil, nil, fail("EVP_PKEY_CTX_new")
+ }
+ if init(ctx) == 0 {
+ return nil, nil, fail("EVP_PKEY_operation_init")
+ }
+ if C._goboringcrypto_EVP_PKEY_CTX_set_rsa_padding(ctx, padding) == 0 {
+ return nil, nil, fail("EVP_PKEY_CTX_set_rsa_padding")
+ }
+ if padding == C.GO_RSA_PKCS1_OAEP_PADDING {
+ md := hashToMD(h)
+ if md == nil {
+ return nil, nil, errors.New("crypto/rsa: unsupported hash function")
+ }
+ mgfMD := hashToMD(mgfHash)
+ if mgfMD == nil {
+ return nil, nil, errors.New("crypto/rsa: unsupported hash function")
+ }
+ if C._goboringcrypto_EVP_PKEY_CTX_set_rsa_oaep_md(ctx, md) == 0 {
+ return nil, nil, fail("EVP_PKEY_set_rsa_oaep_md")
+ }
+ if C._goboringcrypto_EVP_PKEY_CTX_set_rsa_mgf1_md(ctx, mgfMD) == 0 {
+ return nil, nil, fail("EVP_PKEY_set_rsa_mgf1_md")
+ }
+ // ctx takes ownership of label, so malloc a copy for BoringCrypto to free.
+ clabel := (*C.uint8_t)(C._goboringcrypto_OPENSSL_malloc(C.size_t(len(label))))
+ if clabel == nil {
+ return nil, nil, fail("OPENSSL_malloc")
+ }
+ copy((*[1 << 30]byte)(unsafe.Pointer(clabel))[:len(label)], label)
+ if C._goboringcrypto_EVP_PKEY_CTX_set0_rsa_oaep_label(ctx, clabel, C.size_t(len(label))) == 0 {
+ return nil, nil, fail("EVP_PKEY_CTX_set0_rsa_oaep_label")
+ }
+ }
+ if padding == C.GO_RSA_PKCS1_PSS_PADDING {
+ if saltLen != 0 {
+ if C._goboringcrypto_EVP_PKEY_CTX_set_rsa_pss_saltlen(ctx, C.int(saltLen)) == 0 {
+ return nil, nil, fail("EVP_PKEY_set_rsa_pss_saltlen")
+ }
+ }
+ md := cryptoHashToMD(ch)
+ if md == nil {
+ return nil, nil, errors.New("crypto/rsa: unsupported hash function")
+ }
+ if C._goboringcrypto_EVP_PKEY_CTX_set_rsa_mgf1_md(ctx, md) == 0 {
+ return nil, nil, fail("EVP_PKEY_set_rsa_mgf1_md")
+ }
+ }
+
+ return pkey, ctx, nil
+}
+
+func cryptRSA(withKey func(func(*C.GO_RSA) C.int) C.int,
+ padding C.int, h, mgfHash hash.Hash, label []byte, saltLen int, ch crypto.Hash,
+ init func(*C.GO_EVP_PKEY_CTX) C.int,
+ crypt func(*C.GO_EVP_PKEY_CTX, *C.uint8_t, *C.size_t, *C.uint8_t, C.size_t) C.int,
+ in []byte) ([]byte, error) {
+
+ pkey, ctx, err := setupRSA(withKey, padding, h, mgfHash, label, saltLen, ch, init)
+ if err != nil {
+ return nil, err
+ }
+ defer C._goboringcrypto_EVP_PKEY_free(pkey)
+ defer C._goboringcrypto_EVP_PKEY_CTX_free(ctx)
+
+ var outLen C.size_t
+ if crypt(ctx, nil, &outLen, base(in), C.size_t(len(in))) == 0 {
+ return nil, fail("EVP_PKEY_decrypt/encrypt")
+ }
+ out := make([]byte, outLen)
+ if crypt(ctx, base(out), &outLen, base(in), C.size_t(len(in))) == 0 {
+ return nil, fail("EVP_PKEY_decrypt/encrypt")
+ }
+ return out[:outLen], nil
+}
+
+func DecryptRSAOAEP(h, mgfHash hash.Hash, priv *PrivateKeyRSA, ciphertext, label []byte) ([]byte, error) {
+ return cryptRSA(priv.withKey, C.GO_RSA_PKCS1_OAEP_PADDING, h, mgfHash, label, 0, 0, decryptInit, decrypt, ciphertext)
+}
+
+func EncryptRSAOAEP(h, mgfHash hash.Hash, pub *PublicKeyRSA, msg, label []byte) ([]byte, error) {
+ return cryptRSA(pub.withKey, C.GO_RSA_PKCS1_OAEP_PADDING, h, mgfHash, label, 0, 0, encryptInit, encrypt, msg)
+}
+
+func DecryptRSAPKCS1(priv *PrivateKeyRSA, ciphertext []byte) ([]byte, error) {
+ return cryptRSA(priv.withKey, C.GO_RSA_PKCS1_PADDING, nil, nil, nil, 0, 0, decryptInit, decrypt, ciphertext)
+}
+
+func EncryptRSAPKCS1(pub *PublicKeyRSA, msg []byte) ([]byte, error) {
+ return cryptRSA(pub.withKey, C.GO_RSA_PKCS1_PADDING, nil, nil, nil, 0, 0, encryptInit, encrypt, msg)
+}
+
+func DecryptRSANoPadding(priv *PrivateKeyRSA, ciphertext []byte) ([]byte, error) {
+ return cryptRSA(priv.withKey, C.GO_RSA_NO_PADDING, nil, nil, nil, 0, 0, decryptInit, decrypt, ciphertext)
+}
+
+func EncryptRSANoPadding(pub *PublicKeyRSA, msg []byte) ([]byte, error) {
+ return cryptRSA(pub.withKey, C.GO_RSA_NO_PADDING, nil, nil, nil, 0, 0, encryptInit, encrypt, msg)
+}
+
+// These dumb wrappers work around the fact that cgo functions cannot be used as values directly.
+
+func decryptInit(ctx *C.GO_EVP_PKEY_CTX) C.int {
+ return C._goboringcrypto_EVP_PKEY_decrypt_init(ctx)
+}
+
+func decrypt(ctx *C.GO_EVP_PKEY_CTX, out *C.uint8_t, outLen *C.size_t, in *C.uint8_t, inLen C.size_t) C.int {
+ return C._goboringcrypto_EVP_PKEY_decrypt(ctx, out, outLen, in, inLen)
+}
+
+func encryptInit(ctx *C.GO_EVP_PKEY_CTX) C.int {
+ return C._goboringcrypto_EVP_PKEY_encrypt_init(ctx)
+}
+
+func encrypt(ctx *C.GO_EVP_PKEY_CTX, out *C.uint8_t, outLen *C.size_t, in *C.uint8_t, inLen C.size_t) C.int {
+ return C._goboringcrypto_EVP_PKEY_encrypt(ctx, out, outLen, in, inLen)
+}
+
+var invalidSaltLenErr = errors.New("crypto/rsa: PSSOptions.SaltLength cannot be negative")
+
+func SignRSAPSS(priv *PrivateKeyRSA, h crypto.Hash, hashed []byte, saltLen int) ([]byte, error) {
+ md := cryptoHashToMD(h)
+ if md == nil {
+ return nil, errors.New("crypto/rsa: unsupported hash function")
+ }
+
+ // A salt length of -2 is valid in BoringSSL, but not in crypto/rsa, so reject
+ // it, and lengths < -2, before we convert to the BoringSSL sentinel values.
+ if saltLen <= -2 {
+ return nil, invalidSaltLenErr
+ }
+
+ // BoringSSL uses sentinel salt length values like we do, but the values don't
+ // fully match what we use. We both use -1 for salt length equal to hash length,
+ // but BoringSSL uses -2 to mean maximal size where we use 0. In the latter
+ // case convert to the BoringSSL version.
+ if saltLen == 0 {
+ saltLen = -2
+ }
+
+ var out []byte
+ var outLen C.size_t
+ if priv.withKey(func(key *C.GO_RSA) C.int {
+ out = make([]byte, C._goboringcrypto_RSA_size(key))
+ return C._goboringcrypto_RSA_sign_pss_mgf1(key, &outLen, base(out), C.size_t(len(out)),
+ base(hashed), C.size_t(len(hashed)), md, nil, C.int(saltLen))
+ }) == 0 {
+ return nil, fail("RSA_sign_pss_mgf1")
+ }
+
+ return out[:outLen], nil
+}
+
+func VerifyRSAPSS(pub *PublicKeyRSA, h crypto.Hash, hashed, sig []byte, saltLen int) error {
+ md := cryptoHashToMD(h)
+ if md == nil {
+ return errors.New("crypto/rsa: unsupported hash function")
+ }
+
+ // A salt length of -2 is valid in BoringSSL, but not in crypto/rsa, so reject
+ // it, and lengths < -2, before we convert to the BoringSSL sentinel values.
+ if saltLen <= -2 {
+ return invalidSaltLenErr
+ }
+
+ // BoringSSL uses sentinel salt length values like we do, but the values don't
+ // fully match what we use. We both use -1 for salt length equal to hash length,
+ // but BoringSSL uses -2 to mean maximal size where we use 0. In the latter
+ // case convert to the BoringSSL version.
+ if saltLen == 0 {
+ saltLen = -2
+ }
+
+ if pub.withKey(func(key *C.GO_RSA) C.int {
+ return C._goboringcrypto_RSA_verify_pss_mgf1(key, base(hashed), C.size_t(len(hashed)),
+ md, nil, C.int(saltLen), base(sig), C.size_t(len(sig)))
+ }) == 0 {
+ return fail("RSA_verify_pss_mgf1")
+ }
+ return nil
+}
+
+func SignRSAPKCS1v15(priv *PrivateKeyRSA, h crypto.Hash, hashed []byte) ([]byte, error) {
+ if h == 0 {
+ // No hashing.
+ var out []byte
+ var outLen C.size_t
+ if priv.withKey(func(key *C.GO_RSA) C.int {
+ out = make([]byte, C._goboringcrypto_RSA_size(key))
+ return C._goboringcrypto_RSA_sign_raw(key, &outLen, base(out), C.size_t(len(out)),
+ base(hashed), C.size_t(len(hashed)), C.GO_RSA_PKCS1_PADDING)
+ }) == 0 {
+ return nil, fail("RSA_sign_raw")
+ }
+ return out[:outLen], nil
+ }
+
+ md := cryptoHashToMD(h)
+ if md == nil {
+ return nil, errors.New("crypto/rsa: unsupported hash function: " + strconv.Itoa(int(h)))
+ }
+ nid := C._goboringcrypto_EVP_MD_type(md)
+ var out []byte
+ var outLen C.uint
+ if priv.withKey(func(key *C.GO_RSA) C.int {
+ out = make([]byte, C._goboringcrypto_RSA_size(key))
+ return C._goboringcrypto_RSA_sign(nid, base(hashed), C.uint(len(hashed)),
+ base(out), &outLen, key)
+ }) == 0 {
+ return nil, fail("RSA_sign")
+ }
+ return out[:outLen], nil
+}
+
+func VerifyRSAPKCS1v15(pub *PublicKeyRSA, h crypto.Hash, hashed, sig []byte) error {
+ if h == 0 {
+ var out []byte
+ var outLen C.size_t
+ if pub.withKey(func(key *C.GO_RSA) C.int {
+ out = make([]byte, C._goboringcrypto_RSA_size(key))
+ return C._goboringcrypto_RSA_verify_raw(key, &outLen, base(out),
+ C.size_t(len(out)), base(sig), C.size_t(len(sig)), C.GO_RSA_PKCS1_PADDING)
+ }) == 0 {
+ return fail("RSA_verify")
+ }
+ if subtle.ConstantTimeCompare(hashed, out[:outLen]) != 1 {
+ return fail("RSA_verify")
+ }
+ return nil
+ }
+ md := cryptoHashToMD(h)
+ if md == nil {
+ return errors.New("crypto/rsa: unsupported hash function")
+ }
+ nid := C._goboringcrypto_EVP_MD_type(md)
+ if pub.withKey(func(key *C.GO_RSA) C.int {
+ return C._goboringcrypto_RSA_verify(nid, base(hashed), C.size_t(len(hashed)),
+ base(sig), C.size_t(len(sig)), key)
+ }) == 0 {
+ return fail("RSA_verify")
+ }
+ return nil
+}
diff --git a/src/crypto/internal/boring/sha.go b/src/crypto/internal/boring/sha.go
new file mode 100644
index 0000000..cf82f3f
--- /dev/null
+++ b/src/crypto/internal/boring/sha.go
@@ -0,0 +1,599 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build boringcrypto && linux && (amd64 || arm64) && !android && !cmd_go_bootstrap && !msan
+
+package boring
+
+/*
+#include "goboringcrypto.h"
+
+int
+_goboringcrypto_gosha1(void *p, size_t n, void *out)
+{
+ GO_SHA_CTX ctx;
+ _goboringcrypto_SHA1_Init(&ctx);
+ return _goboringcrypto_SHA1_Update(&ctx, p, n) &&
+ _goboringcrypto_SHA1_Final(out, &ctx);
+}
+
+int
+_goboringcrypto_gosha224(void *p, size_t n, void *out)
+{
+ GO_SHA256_CTX ctx;
+ _goboringcrypto_SHA224_Init(&ctx);
+ return _goboringcrypto_SHA224_Update(&ctx, p, n) &&
+ _goboringcrypto_SHA224_Final(out, &ctx);
+}
+
+int
+_goboringcrypto_gosha256(void *p, size_t n, void *out)
+{
+ GO_SHA256_CTX ctx;
+ _goboringcrypto_SHA256_Init(&ctx);
+ return _goboringcrypto_SHA256_Update(&ctx, p, n) &&
+ _goboringcrypto_SHA256_Final(out, &ctx);
+}
+
+int
+_goboringcrypto_gosha384(void *p, size_t n, void *out)
+{
+ GO_SHA512_CTX ctx;
+ _goboringcrypto_SHA384_Init(&ctx);
+ return _goboringcrypto_SHA384_Update(&ctx, p, n) &&
+ _goboringcrypto_SHA384_Final(out, &ctx);
+}
+
+int
+_goboringcrypto_gosha512(void *p, size_t n, void *out)
+{
+ GO_SHA512_CTX ctx;
+ _goboringcrypto_SHA512_Init(&ctx);
+ return _goboringcrypto_SHA512_Update(&ctx, p, n) &&
+ _goboringcrypto_SHA512_Final(out, &ctx);
+}
+
+*/
+import "C"
+import (
+ "errors"
+ "hash"
+ "unsafe"
+)
+
+// NOTE: The cgo calls in this file are arranged to avoid marking the parameters as escaping.
+// To do that, we call noescape (including via addr).
+// We must also make sure that the data pointer arguments have the form unsafe.Pointer(&...)
+// so that cgo does not annotate them with cgoCheckPointer calls. If it did that, it might look
+// beyond the byte slice and find Go pointers in unprocessed parts of a larger allocation.
+// To do both of these simultaneously, the idiom is unsafe.Pointer(&*addr(p)),
+// where addr returns the base pointer of p, substituting a non-nil pointer for nil,
+// and applying a noescape along the way.
+// This is all to preserve compatibility with the allocation behavior of the non-boring implementations.
+
+func SHA1(p []byte) (sum [20]byte) {
+ if C._goboringcrypto_gosha1(unsafe.Pointer(&*addr(p)), C.size_t(len(p)), unsafe.Pointer(&*addr(sum[:]))) == 0 {
+ panic("boringcrypto: SHA1 failed")
+ }
+ return
+}
+
+func SHA224(p []byte) (sum [28]byte) {
+ if C._goboringcrypto_gosha224(unsafe.Pointer(&*addr(p)), C.size_t(len(p)), unsafe.Pointer(&*addr(sum[:]))) == 0 {
+ panic("boringcrypto: SHA224 failed")
+ }
+ return
+}
+
+func SHA256(p []byte) (sum [32]byte) {
+ if C._goboringcrypto_gosha256(unsafe.Pointer(&*addr(p)), C.size_t(len(p)), unsafe.Pointer(&*addr(sum[:]))) == 0 {
+ panic("boringcrypto: SHA256 failed")
+ }
+ return
+}
+
+func SHA384(p []byte) (sum [48]byte) {
+ if C._goboringcrypto_gosha384(unsafe.Pointer(&*addr(p)), C.size_t(len(p)), unsafe.Pointer(&*addr(sum[:]))) == 0 {
+ panic("boringcrypto: SHA384 failed")
+ }
+ return
+}
+
+func SHA512(p []byte) (sum [64]byte) {
+ if C._goboringcrypto_gosha512(unsafe.Pointer(&*addr(p)), C.size_t(len(p)), unsafe.Pointer(&*addr(sum[:]))) == 0 {
+ panic("boringcrypto: SHA512 failed")
+ }
+ return
+}
+
+// NewSHA1 returns a new SHA1 hash.
+func NewSHA1() hash.Hash {
+ h := new(sha1Hash)
+ h.Reset()
+ return h
+}
+
+type sha1Hash struct {
+ ctx C.GO_SHA_CTX
+ out [20]byte
+}
+
+type sha1Ctx struct {
+ h [5]uint32
+ nl, nh uint32
+ x [64]byte
+ nx uint32
+}
+
+func (h *sha1Hash) noescapeCtx() *C.GO_SHA_CTX {
+ return (*C.GO_SHA_CTX)(noescape(unsafe.Pointer(&h.ctx)))
+}
+
+func (h *sha1Hash) Reset() {
+ C._goboringcrypto_SHA1_Init(h.noescapeCtx())
+}
+
+func (h *sha1Hash) Size() int { return 20 }
+func (h *sha1Hash) BlockSize() int { return 64 }
+func (h *sha1Hash) Sum(dst []byte) []byte { return h.sum(dst) }
+
+func (h *sha1Hash) Write(p []byte) (int, error) {
+ if len(p) > 0 && C._goboringcrypto_SHA1_Update(h.noescapeCtx(), unsafe.Pointer(&*addr(p)), C.size_t(len(p))) == 0 {
+ panic("boringcrypto: SHA1_Update failed")
+ }
+ return len(p), nil
+}
+
+func (h0 *sha1Hash) sum(dst []byte) []byte {
+ h := *h0 // make copy so future Write+Sum is valid
+ if C._goboringcrypto_SHA1_Final((*C.uint8_t)(noescape(unsafe.Pointer(&h.out[0]))), h.noescapeCtx()) == 0 {
+ panic("boringcrypto: SHA1_Final failed")
+ }
+ return append(dst, h.out[:]...)
+}
+
+const (
+ sha1Magic = "sha\x01"
+ sha1MarshaledSize = len(sha1Magic) + 5*4 + 64 + 8
+)
+
+func (h *sha1Hash) MarshalBinary() ([]byte, error) {
+ d := (*sha1Ctx)(unsafe.Pointer(&h.ctx))
+ b := make([]byte, 0, sha1MarshaledSize)
+ b = append(b, sha1Magic...)
+ b = appendUint32(b, d.h[0])
+ b = appendUint32(b, d.h[1])
+ b = appendUint32(b, d.h[2])
+ b = appendUint32(b, d.h[3])
+ b = appendUint32(b, d.h[4])
+ b = append(b, d.x[:d.nx]...)
+ b = b[:len(b)+len(d.x)-int(d.nx)] // already zero
+ b = appendUint64(b, uint64(d.nl)>>3|uint64(d.nh)<<29)
+ return b, nil
+}
+
+func (h *sha1Hash) UnmarshalBinary(b []byte) error {
+ if len(b) < len(sha1Magic) || string(b[:len(sha1Magic)]) != sha1Magic {
+ return errors.New("crypto/sha1: invalid hash state identifier")
+ }
+ if len(b) != sha1MarshaledSize {
+ return errors.New("crypto/sha1: invalid hash state size")
+ }
+ d := (*sha1Ctx)(unsafe.Pointer(&h.ctx))
+ b = b[len(sha1Magic):]
+ b, d.h[0] = consumeUint32(b)
+ b, d.h[1] = consumeUint32(b)
+ b, d.h[2] = consumeUint32(b)
+ b, d.h[3] = consumeUint32(b)
+ b, d.h[4] = consumeUint32(b)
+ b = b[copy(d.x[:], b):]
+ b, n := consumeUint64(b)
+ d.nl = uint32(n << 3)
+ d.nh = uint32(n >> 29)
+ d.nx = uint32(n) % 64
+ return nil
+}
+
+// NewSHA224 returns a new SHA224 hash.
+func NewSHA224() hash.Hash {
+ h := new(sha224Hash)
+ h.Reset()
+ return h
+}
+
+type sha224Hash struct {
+ ctx C.GO_SHA256_CTX
+ out [224 / 8]byte
+}
+
+func (h *sha224Hash) noescapeCtx() *C.GO_SHA256_CTX {
+ return (*C.GO_SHA256_CTX)(noescape(unsafe.Pointer(&h.ctx)))
+}
+
+func (h *sha224Hash) Reset() {
+ C._goboringcrypto_SHA224_Init(h.noescapeCtx())
+}
+func (h *sha224Hash) Size() int { return 224 / 8 }
+func (h *sha224Hash) BlockSize() int { return 64 }
+func (h *sha224Hash) Sum(dst []byte) []byte { return h.sum(dst) }
+
+func (h *sha224Hash) Write(p []byte) (int, error) {
+ if len(p) > 0 && C._goboringcrypto_SHA224_Update(h.noescapeCtx(), unsafe.Pointer(&*addr(p)), C.size_t(len(p))) == 0 {
+ panic("boringcrypto: SHA224_Update failed")
+ }
+ return len(p), nil
+}
+
+func (h0 *sha224Hash) sum(dst []byte) []byte {
+ h := *h0 // make copy so future Write+Sum is valid
+ if C._goboringcrypto_SHA224_Final((*C.uint8_t)(noescape(unsafe.Pointer(&h.out[0]))), h.noescapeCtx()) == 0 {
+ panic("boringcrypto: SHA224_Final failed")
+ }
+ return append(dst, h.out[:]...)
+}
+
+// NewSHA256 returns a new SHA256 hash.
+func NewSHA256() hash.Hash {
+ h := new(sha256Hash)
+ h.Reset()
+ return h
+}
+
+type sha256Hash struct {
+ ctx C.GO_SHA256_CTX
+ out [256 / 8]byte
+}
+
+func (h *sha256Hash) noescapeCtx() *C.GO_SHA256_CTX {
+ return (*C.GO_SHA256_CTX)(noescape(unsafe.Pointer(&h.ctx)))
+}
+
+func (h *sha256Hash) Reset() {
+ C._goboringcrypto_SHA256_Init(h.noescapeCtx())
+}
+func (h *sha256Hash) Size() int { return 256 / 8 }
+func (h *sha256Hash) BlockSize() int { return 64 }
+func (h *sha256Hash) Sum(dst []byte) []byte { return h.sum(dst) }
+
+func (h *sha256Hash) Write(p []byte) (int, error) {
+ if len(p) > 0 && C._goboringcrypto_SHA256_Update(h.noescapeCtx(), unsafe.Pointer(&*addr(p)), C.size_t(len(p))) == 0 {
+ panic("boringcrypto: SHA256_Update failed")
+ }
+ return len(p), nil
+}
+
+func (h0 *sha256Hash) sum(dst []byte) []byte {
+ h := *h0 // make copy so future Write+Sum is valid
+ if C._goboringcrypto_SHA256_Final((*C.uint8_t)(noescape(unsafe.Pointer(&h.out[0]))), h.noescapeCtx()) == 0 {
+ panic("boringcrypto: SHA256_Final failed")
+ }
+ return append(dst, h.out[:]...)
+}
+
+const (
+ magic224 = "sha\x02"
+ magic256 = "sha\x03"
+ marshaledSize256 = len(magic256) + 8*4 + 64 + 8
+)
+
+type sha256Ctx struct {
+ h [8]uint32
+ nl, nh uint32
+ x [64]byte
+ nx uint32
+}
+
+func (h *sha224Hash) MarshalBinary() ([]byte, error) {
+ d := (*sha256Ctx)(unsafe.Pointer(&h.ctx))
+ b := make([]byte, 0, marshaledSize256)
+ b = append(b, magic224...)
+ b = appendUint32(b, d.h[0])
+ b = appendUint32(b, d.h[1])
+ b = appendUint32(b, d.h[2])
+ b = appendUint32(b, d.h[3])
+ b = appendUint32(b, d.h[4])
+ b = appendUint32(b, d.h[5])
+ b = appendUint32(b, d.h[6])
+ b = appendUint32(b, d.h[7])
+ b = append(b, d.x[:d.nx]...)
+ b = b[:len(b)+len(d.x)-int(d.nx)] // already zero
+ b = appendUint64(b, uint64(d.nl)>>3|uint64(d.nh)<<29)
+ return b, nil
+}
+
+func (h *sha256Hash) MarshalBinary() ([]byte, error) {
+ d := (*sha256Ctx)(unsafe.Pointer(&h.ctx))
+ b := make([]byte, 0, marshaledSize256)
+ b = append(b, magic256...)
+ b = appendUint32(b, d.h[0])
+ b = appendUint32(b, d.h[1])
+ b = appendUint32(b, d.h[2])
+ b = appendUint32(b, d.h[3])
+ b = appendUint32(b, d.h[4])
+ b = appendUint32(b, d.h[5])
+ b = appendUint32(b, d.h[6])
+ b = appendUint32(b, d.h[7])
+ b = append(b, d.x[:d.nx]...)
+ b = b[:len(b)+len(d.x)-int(d.nx)] // already zero
+ b = appendUint64(b, uint64(d.nl)>>3|uint64(d.nh)<<29)
+ return b, nil
+}
+
+func (h *sha224Hash) UnmarshalBinary(b []byte) error {
+ if len(b) < len(magic224) || string(b[:len(magic224)]) != magic224 {
+ return errors.New("crypto/sha256: invalid hash state identifier")
+ }
+ if len(b) != marshaledSize256 {
+ return errors.New("crypto/sha256: invalid hash state size")
+ }
+ d := (*sha256Ctx)(unsafe.Pointer(&h.ctx))
+ b = b[len(magic224):]
+ b, d.h[0] = consumeUint32(b)
+ b, d.h[1] = consumeUint32(b)
+ b, d.h[2] = consumeUint32(b)
+ b, d.h[3] = consumeUint32(b)
+ b, d.h[4] = consumeUint32(b)
+ b, d.h[5] = consumeUint32(b)
+ b, d.h[6] = consumeUint32(b)
+ b, d.h[7] = consumeUint32(b)
+ b = b[copy(d.x[:], b):]
+ b, n := consumeUint64(b)
+ d.nl = uint32(n << 3)
+ d.nh = uint32(n >> 29)
+ d.nx = uint32(n) % 64
+ return nil
+}
+
+func (h *sha256Hash) UnmarshalBinary(b []byte) error {
+ if len(b) < len(magic256) || string(b[:len(magic256)]) != magic256 {
+ return errors.New("crypto/sha256: invalid hash state identifier")
+ }
+ if len(b) != marshaledSize256 {
+ return errors.New("crypto/sha256: invalid hash state size")
+ }
+ d := (*sha256Ctx)(unsafe.Pointer(&h.ctx))
+ b = b[len(magic256):]
+ b, d.h[0] = consumeUint32(b)
+ b, d.h[1] = consumeUint32(b)
+ b, d.h[2] = consumeUint32(b)
+ b, d.h[3] = consumeUint32(b)
+ b, d.h[4] = consumeUint32(b)
+ b, d.h[5] = consumeUint32(b)
+ b, d.h[6] = consumeUint32(b)
+ b, d.h[7] = consumeUint32(b)
+ b = b[copy(d.x[:], b):]
+ b, n := consumeUint64(b)
+ d.nl = uint32(n << 3)
+ d.nh = uint32(n >> 29)
+ d.nx = uint32(n) % 64
+ return nil
+}
+
+// NewSHA384 returns a new SHA384 hash.
+func NewSHA384() hash.Hash {
+ h := new(sha384Hash)
+ h.Reset()
+ return h
+}
+
+type sha384Hash struct {
+ ctx C.GO_SHA512_CTX
+ out [384 / 8]byte
+}
+
+func (h *sha384Hash) noescapeCtx() *C.GO_SHA512_CTX {
+ return (*C.GO_SHA512_CTX)(noescape(unsafe.Pointer(&h.ctx)))
+}
+
+func (h *sha384Hash) Reset() {
+ C._goboringcrypto_SHA384_Init(h.noescapeCtx())
+}
+func (h *sha384Hash) Size() int { return 384 / 8 }
+func (h *sha384Hash) BlockSize() int { return 128 }
+func (h *sha384Hash) Sum(dst []byte) []byte { return h.sum(dst) }
+
+func (h *sha384Hash) Write(p []byte) (int, error) {
+ if len(p) > 0 && C._goboringcrypto_SHA384_Update(h.noescapeCtx(), unsafe.Pointer(&*addr(p)), C.size_t(len(p))) == 0 {
+ panic("boringcrypto: SHA384_Update failed")
+ }
+ return len(p), nil
+}
+
+func (h0 *sha384Hash) sum(dst []byte) []byte {
+ h := *h0 // make copy so future Write+Sum is valid
+ if C._goboringcrypto_SHA384_Final((*C.uint8_t)(noescape(unsafe.Pointer(&h.out[0]))), h.noescapeCtx()) == 0 {
+ panic("boringcrypto: SHA384_Final failed")
+ }
+ return append(dst, h.out[:]...)
+}
+
+// NewSHA512 returns a new SHA512 hash.
+func NewSHA512() hash.Hash {
+ h := new(sha512Hash)
+ h.Reset()
+ return h
+}
+
+type sha512Hash struct {
+ ctx C.GO_SHA512_CTX
+ out [512 / 8]byte
+}
+
+func (h *sha512Hash) noescapeCtx() *C.GO_SHA512_CTX {
+ return (*C.GO_SHA512_CTX)(noescape(unsafe.Pointer(&h.ctx)))
+}
+
+func (h *sha512Hash) Reset() {
+ C._goboringcrypto_SHA512_Init(h.noescapeCtx())
+}
+func (h *sha512Hash) Size() int { return 512 / 8 }
+func (h *sha512Hash) BlockSize() int { return 128 }
+func (h *sha512Hash) Sum(dst []byte) []byte { return h.sum(dst) }
+
+func (h *sha512Hash) Write(p []byte) (int, error) {
+ if len(p) > 0 && C._goboringcrypto_SHA512_Update(h.noescapeCtx(), unsafe.Pointer(&*addr(p)), C.size_t(len(p))) == 0 {
+ panic("boringcrypto: SHA512_Update failed")
+ }
+ return len(p), nil
+}
+
+func (h0 *sha512Hash) sum(dst []byte) []byte {
+ h := *h0 // make copy so future Write+Sum is valid
+ if C._goboringcrypto_SHA512_Final((*C.uint8_t)(noescape(unsafe.Pointer(&h.out[0]))), h.noescapeCtx()) == 0 {
+ panic("boringcrypto: SHA512_Final failed")
+ }
+ return append(dst, h.out[:]...)
+}
+
+type sha512Ctx struct {
+ h [8]uint64
+ nl, nh uint64
+ x [128]byte
+ nx uint32
+}
+
+const (
+ magic384 = "sha\x04"
+ magic512_224 = "sha\x05"
+ magic512_256 = "sha\x06"
+ magic512 = "sha\x07"
+ marshaledSize512 = len(magic512) + 8*8 + 128 + 8
+)
+
+func (h *sha384Hash) MarshalBinary() ([]byte, error) {
+ d := (*sha512Ctx)(unsafe.Pointer(&h.ctx))
+ b := make([]byte, 0, marshaledSize512)
+ b = append(b, magic384...)
+ b = appendUint64(b, d.h[0])
+ b = appendUint64(b, d.h[1])
+ b = appendUint64(b, d.h[2])
+ b = appendUint64(b, d.h[3])
+ b = appendUint64(b, d.h[4])
+ b = appendUint64(b, d.h[5])
+ b = appendUint64(b, d.h[6])
+ b = appendUint64(b, d.h[7])
+ b = append(b, d.x[:d.nx]...)
+ b = b[:len(b)+len(d.x)-int(d.nx)] // already zero
+ b = appendUint64(b, d.nl>>3|d.nh<<61)
+ return b, nil
+}
+
+func (h *sha512Hash) MarshalBinary() ([]byte, error) {
+ d := (*sha512Ctx)(unsafe.Pointer(&h.ctx))
+ b := make([]byte, 0, marshaledSize512)
+ b = append(b, magic512...)
+ b = appendUint64(b, d.h[0])
+ b = appendUint64(b, d.h[1])
+ b = appendUint64(b, d.h[2])
+ b = appendUint64(b, d.h[3])
+ b = appendUint64(b, d.h[4])
+ b = appendUint64(b, d.h[5])
+ b = appendUint64(b, d.h[6])
+ b = appendUint64(b, d.h[7])
+ b = append(b, d.x[:d.nx]...)
+ b = b[:len(b)+len(d.x)-int(d.nx)] // already zero
+ b = appendUint64(b, d.nl>>3|d.nh<<61)
+ return b, nil
+}
+
+func (h *sha384Hash) UnmarshalBinary(b []byte) error {
+ if len(b) < len(magic512) {
+ return errors.New("crypto/sha512: invalid hash state identifier")
+ }
+ if string(b[:len(magic384)]) != magic384 {
+ return errors.New("crypto/sha512: invalid hash state identifier")
+ }
+ if len(b) != marshaledSize512 {
+ return errors.New("crypto/sha512: invalid hash state size")
+ }
+ d := (*sha512Ctx)(unsafe.Pointer(&h.ctx))
+ b = b[len(magic512):]
+ b, d.h[0] = consumeUint64(b)
+ b, d.h[1] = consumeUint64(b)
+ b, d.h[2] = consumeUint64(b)
+ b, d.h[3] = consumeUint64(b)
+ b, d.h[4] = consumeUint64(b)
+ b, d.h[5] = consumeUint64(b)
+ b, d.h[6] = consumeUint64(b)
+ b, d.h[7] = consumeUint64(b)
+ b = b[copy(d.x[:], b):]
+ b, n := consumeUint64(b)
+ d.nl = n << 3
+ d.nh = n >> 61
+ d.nx = uint32(n) % 128
+ return nil
+}
+
+func (h *sha512Hash) UnmarshalBinary(b []byte) error {
+ if len(b) < len(magic512) {
+ return errors.New("crypto/sha512: invalid hash state identifier")
+ }
+ if string(b[:len(magic512)]) != magic512 {
+ return errors.New("crypto/sha512: invalid hash state identifier")
+ }
+ if len(b) != marshaledSize512 {
+ return errors.New("crypto/sha512: invalid hash state size")
+ }
+ d := (*sha512Ctx)(unsafe.Pointer(&h.ctx))
+ b = b[len(magic512):]
+ b, d.h[0] = consumeUint64(b)
+ b, d.h[1] = consumeUint64(b)
+ b, d.h[2] = consumeUint64(b)
+ b, d.h[3] = consumeUint64(b)
+ b, d.h[4] = consumeUint64(b)
+ b, d.h[5] = consumeUint64(b)
+ b, d.h[6] = consumeUint64(b)
+ b, d.h[7] = consumeUint64(b)
+ b = b[copy(d.x[:], b):]
+ b, n := consumeUint64(b)
+ d.nl = n << 3
+ d.nh = n >> 61
+ d.nx = uint32(n) % 128
+ return nil
+}
+
+func appendUint64(b []byte, x uint64) []byte {
+ var a [8]byte
+ putUint64(a[:], x)
+ return append(b, a[:]...)
+}
+
+func appendUint32(b []byte, x uint32) []byte {
+ var a [4]byte
+ putUint32(a[:], x)
+ return append(b, a[:]...)
+}
+
+func consumeUint64(b []byte) ([]byte, uint64) {
+ _ = b[7]
+ x := uint64(b[7]) | uint64(b[6])<<8 | uint64(b[5])<<16 | uint64(b[4])<<24 |
+ uint64(b[3])<<32 | uint64(b[2])<<40 | uint64(b[1])<<48 | uint64(b[0])<<56
+ return b[8:], x
+}
+
+func consumeUint32(b []byte) ([]byte, uint32) {
+ _ = b[3]
+ x := uint32(b[3]) | uint32(b[2])<<8 | uint32(b[1])<<16 | uint32(b[0])<<24
+ return b[4:], x
+}
+
+func putUint64(x []byte, s uint64) {
+ _ = x[7]
+ x[0] = byte(s >> 56)
+ x[1] = byte(s >> 48)
+ x[2] = byte(s >> 40)
+ x[3] = byte(s >> 32)
+ x[4] = byte(s >> 24)
+ x[5] = byte(s >> 16)
+ x[6] = byte(s >> 8)
+ x[7] = byte(s)
+}
+
+func putUint32(x []byte, s uint32) {
+ _ = x[3]
+ x[0] = byte(s >> 24)
+ x[1] = byte(s >> 16)
+ x[2] = byte(s >> 8)
+ x[3] = byte(s)
+}
diff --git a/src/crypto/internal/boring/sig/sig.go b/src/crypto/internal/boring/sig/sig.go
new file mode 100644
index 0000000..716c03c
--- /dev/null
+++ b/src/crypto/internal/boring/sig/sig.go
@@ -0,0 +1,17 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package sig holds “code signatures” that can be called
+// and will result in certain code sequences being linked into
+// the final binary. The functions themselves are no-ops.
+package sig
+
+// BoringCrypto indicates that the BoringCrypto module is present.
+func BoringCrypto()
+
+// FIPSOnly indicates that package crypto/tls/fipsonly is present.
+func FIPSOnly()
+
+// StandardCrypto indicates that standard Go crypto is present.
+func StandardCrypto()
diff --git a/src/crypto/internal/boring/sig/sig_amd64.s b/src/crypto/internal/boring/sig/sig_amd64.s
new file mode 100644
index 0000000..64e3462
--- /dev/null
+++ b/src/crypto/internal/boring/sig/sig_amd64.s
@@ -0,0 +1,54 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#include "textflag.h"
+
+// These functions are no-ops, but you can search for their implementations
+// to find out whether they are linked into a particular binary.
+//
+// Each function consists of a two-byte jump over the next 29-bytes,
+// then a 5-byte indicator sequence unlikely to occur in real x86 instructions,
+// then a randomly-chosen 24-byte sequence, and finally a return instruction
+// (the target of the jump).
+//
+// These sequences are known to rsc.io/goversion.
+
+#define START \
+ BYTE $0xEB; BYTE $0x1D; BYTE $0xF4; BYTE $0x48; BYTE $0xF4; BYTE $0x4B; BYTE $0xF4
+
+#define END \
+ BYTE $0xC3
+
+// BoringCrypto indicates that BoringCrypto (in particular, its func init) is present.
+TEXT ·BoringCrypto(SB),NOSPLIT,$0
+ START
+ BYTE $0xB3; BYTE $0x32; BYTE $0xF5; BYTE $0x28;
+ BYTE $0x13; BYTE $0xA3; BYTE $0xB4; BYTE $0x50;
+ BYTE $0xD4; BYTE $0x41; BYTE $0xCC; BYTE $0x24;
+ BYTE $0x85; BYTE $0xF0; BYTE $0x01; BYTE $0x45;
+ BYTE $0x4E; BYTE $0x92; BYTE $0x10; BYTE $0x1B;
+ BYTE $0x1D; BYTE $0x2F; BYTE $0x19; BYTE $0x50;
+ END
+
+// StandardCrypto indicates that standard Go crypto is present.
+TEXT ·StandardCrypto(SB),NOSPLIT,$0
+ START
+ BYTE $0xba; BYTE $0xee; BYTE $0x4d; BYTE $0xfa;
+ BYTE $0x98; BYTE $0x51; BYTE $0xca; BYTE $0x56;
+ BYTE $0xa9; BYTE $0x11; BYTE $0x45; BYTE $0xe8;
+ BYTE $0x3e; BYTE $0x99; BYTE $0xc5; BYTE $0x9c;
+ BYTE $0xf9; BYTE $0x11; BYTE $0xcb; BYTE $0x8e;
+ BYTE $0x80; BYTE $0xda; BYTE $0xf1; BYTE $0x2f;
+ END
+
+// FIPSOnly indicates that crypto/tls/fipsonly is present.
+TEXT ·FIPSOnly(SB),NOSPLIT,$0
+ START
+ BYTE $0x36; BYTE $0x3C; BYTE $0xB9; BYTE $0xCE;
+ BYTE $0x9D; BYTE $0x68; BYTE $0x04; BYTE $0x7D;
+ BYTE $0x31; BYTE $0xF2; BYTE $0x8D; BYTE $0x32;
+ BYTE $0x5D; BYTE $0x5C; BYTE $0xA5; BYTE $0x87;
+ BYTE $0x3F; BYTE $0x5D; BYTE $0x80; BYTE $0xCA;
+ BYTE $0xF6; BYTE $0xD6; BYTE $0x15; BYTE $0x1B;
+ END
diff --git a/src/crypto/internal/boring/sig/sig_other.s b/src/crypto/internal/boring/sig/sig_other.s
new file mode 100644
index 0000000..2bbb1df
--- /dev/null
+++ b/src/crypto/internal/boring/sig/sig_other.s
@@ -0,0 +1,20 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// These functions are no-ops.
+// On amd64 they have recognizable implementations, so that you can
+// search a particular binary to see if they are present.
+// On other platforms (those using this source file), they don't.
+
+//go:build !amd64
+// +build !amd64
+
+TEXT ·BoringCrypto(SB),$0
+ RET
+
+TEXT ·FIPSOnly(SB),$0
+ RET
+
+TEXT ·StandardCrypto(SB),$0
+ RET
diff --git a/src/crypto/internal/boring/syso/goboringcrypto_linux_amd64.syso b/src/crypto/internal/boring/syso/goboringcrypto_linux_amd64.syso
new file mode 100644
index 0000000..6cea789
--- /dev/null
+++ b/src/crypto/internal/boring/syso/goboringcrypto_linux_amd64.syso
Binary files differ
diff --git a/src/crypto/internal/boring/syso/goboringcrypto_linux_arm64.syso b/src/crypto/internal/boring/syso/goboringcrypto_linux_arm64.syso
new file mode 100644
index 0000000..9659aa1
--- /dev/null
+++ b/src/crypto/internal/boring/syso/goboringcrypto_linux_arm64.syso
Binary files differ
diff --git a/src/crypto/internal/boring/syso/syso.go b/src/crypto/internal/boring/syso/syso.go
new file mode 100644
index 0000000..b338754
--- /dev/null
+++ b/src/crypto/internal/boring/syso/syso.go
@@ -0,0 +1,9 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build boringcrypto
+
+// This package only exists with GOEXPERIMENT=boringcrypto.
+// It provides the actual syso file.
+package syso
diff --git a/src/crypto/internal/edwards25519/doc.go b/src/crypto/internal/edwards25519/doc.go
new file mode 100644
index 0000000..8cba6fe
--- /dev/null
+++ b/src/crypto/internal/edwards25519/doc.go
@@ -0,0 +1,22 @@
+// Copyright (c) 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package edwards25519 implements group logic for the twisted Edwards curve
+//
+// -x^2 + y^2 = 1 + -(121665/121666)*x^2*y^2
+//
+// This is better known as the Edwards curve equivalent to Curve25519, and is
+// the curve used by the Ed25519 signature scheme.
+//
+// Most users don't need this package, and should instead use crypto/ed25519 for
+// signatures, golang.org/x/crypto/curve25519 for Diffie-Hellman, or
+// github.com/gtank/ristretto255 for prime order group logic.
+//
+// However, developers who do need to interact with low-level edwards25519
+// operations can use filippo.io/edwards25519, an extended version of this
+// package repackaged as an importable module.
+//
+// (Note that filippo.io/edwards25519 and github.com/gtank/ristretto255 are not
+// maintained by the Go team and are not covered by the Go 1 Compatibility Promise.)
+package edwards25519
diff --git a/src/crypto/internal/edwards25519/edwards25519.go b/src/crypto/internal/edwards25519/edwards25519.go
new file mode 100644
index 0000000..71e9c09
--- /dev/null
+++ b/src/crypto/internal/edwards25519/edwards25519.go
@@ -0,0 +1,426 @@
+// Copyright (c) 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package edwards25519
+
+import (
+ "crypto/internal/edwards25519/field"
+ "errors"
+)
+
+// Point types.
+
+type projP1xP1 struct {
+ X, Y, Z, T field.Element
+}
+
+type projP2 struct {
+ X, Y, Z field.Element
+}
+
+// Point represents a point on the edwards25519 curve.
+//
+// This type works similarly to math/big.Int, and all arguments and receivers
+// are allowed to alias.
+//
+// The zero value is NOT valid, and it may be used only as a receiver.
+type Point struct {
+ // The point is internally represented in extended coordinates (X, Y, Z, T)
+ // where x = X/Z, y = Y/Z, and xy = T/Z per https://eprint.iacr.org/2008/522.
+ x, y, z, t field.Element
+
+ // Make the type not comparable (i.e. used with == or as a map key), as
+ // equivalent points can be represented by different Go values.
+ _ incomparable
+}
+
+type incomparable [0]func()
+
+func checkInitialized(points ...*Point) {
+ for _, p := range points {
+ if p.x == (field.Element{}) && p.y == (field.Element{}) {
+ panic("edwards25519: use of uninitialized Point")
+ }
+ }
+}
+
+type projCached struct {
+ YplusX, YminusX, Z, T2d field.Element
+}
+
+type affineCached struct {
+ YplusX, YminusX, T2d field.Element
+}
+
+// Constructors.
+
+func (v *projP2) Zero() *projP2 {
+ v.X.Zero()
+ v.Y.One()
+ v.Z.One()
+ return v
+}
+
+// identity is the point at infinity.
+var identity, _ = new(Point).SetBytes([]byte{
+ 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0})
+
+// NewIdentityPoint returns a new Point set to the identity.
+func NewIdentityPoint() *Point {
+ return new(Point).Set(identity)
+}
+
+// generator is the canonical curve basepoint. See TestGenerator for the
+// correspondence of this encoding with the values in RFC 8032.
+var generator, _ = new(Point).SetBytes([]byte{
+ 0x58, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66,
+ 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66,
+ 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66,
+ 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66})
+
+// NewGeneratorPoint returns a new Point set to the canonical generator.
+func NewGeneratorPoint() *Point {
+ return new(Point).Set(generator)
+}
+
+func (v *projCached) Zero() *projCached {
+ v.YplusX.One()
+ v.YminusX.One()
+ v.Z.One()
+ v.T2d.Zero()
+ return v
+}
+
+func (v *affineCached) Zero() *affineCached {
+ v.YplusX.One()
+ v.YminusX.One()
+ v.T2d.Zero()
+ return v
+}
+
+// Assignments.
+
+// Set sets v = u, and returns v.
+func (v *Point) Set(u *Point) *Point {
+ *v = *u
+ return v
+}
+
+// Encoding.
+
+// Bytes returns the canonical 32-byte encoding of v, according to RFC 8032,
+// Section 5.1.2.
+func (v *Point) Bytes() []byte {
+ // This function is outlined to make the allocations inline in the caller
+ // rather than happen on the heap.
+ var buf [32]byte
+ return v.bytes(&buf)
+}
+
+func (v *Point) bytes(buf *[32]byte) []byte {
+ checkInitialized(v)
+
+ var zInv, x, y field.Element
+ zInv.Invert(&v.z) // zInv = 1 / Z
+ x.Multiply(&v.x, &zInv) // x = X / Z
+ y.Multiply(&v.y, &zInv) // y = Y / Z
+
+ out := copyFieldElement(buf, &y)
+ out[31] |= byte(x.IsNegative() << 7)
+ return out
+}
+
+var feOne = new(field.Element).One()
+
+// SetBytes sets v = x, where x is a 32-byte encoding of v. If x does not
+// represent a valid point on the curve, SetBytes returns nil and an error and
+// the receiver is unchanged. Otherwise, SetBytes returns v.
+//
+// Note that SetBytes accepts all non-canonical encodings of valid points.
+// That is, it follows decoding rules that match most implementations in
+// the ecosystem rather than RFC 8032.
+func (v *Point) SetBytes(x []byte) (*Point, error) {
+ // Specifically, the non-canonical encodings that are accepted are
+ // 1) the ones where the field element is not reduced (see the
+ // (*field.Element).SetBytes docs) and
+ // 2) the ones where the x-coordinate is zero and the sign bit is set.
+ //
+ // Read more at https://hdevalence.ca/blog/2020-10-04-its-25519am,
+ // specifically the "Canonical A, R" section.
+
+ y, err := new(field.Element).SetBytes(x)
+ if err != nil {
+ return nil, errors.New("edwards25519: invalid point encoding length")
+ }
+
+ // -x² + y² = 1 + dx²y²
+ // x² + dx²y² = x²(dy² + 1) = y² - 1
+ // x² = (y² - 1) / (dy² + 1)
+
+ // u = y² - 1
+ y2 := new(field.Element).Square(y)
+ u := new(field.Element).Subtract(y2, feOne)
+
+ // v = dy² + 1
+ vv := new(field.Element).Multiply(y2, d)
+ vv = vv.Add(vv, feOne)
+
+ // x = +√(u/v)
+ xx, wasSquare := new(field.Element).SqrtRatio(u, vv)
+ if wasSquare == 0 {
+ return nil, errors.New("edwards25519: invalid point encoding")
+ }
+
+ // Select the negative square root if the sign bit is set.
+ xxNeg := new(field.Element).Negate(xx)
+ xx = xx.Select(xxNeg, xx, int(x[31]>>7))
+
+ v.x.Set(xx)
+ v.y.Set(y)
+ v.z.One()
+ v.t.Multiply(xx, y) // xy = T / Z
+
+ return v, nil
+}
+
+func copyFieldElement(buf *[32]byte, v *field.Element) []byte {
+ copy(buf[:], v.Bytes())
+ return buf[:]
+}
+
+// Conversions.
+
+func (v *projP2) FromP1xP1(p *projP1xP1) *projP2 {
+ v.X.Multiply(&p.X, &p.T)
+ v.Y.Multiply(&p.Y, &p.Z)
+ v.Z.Multiply(&p.Z, &p.T)
+ return v
+}
+
+func (v *projP2) FromP3(p *Point) *projP2 {
+ v.X.Set(&p.x)
+ v.Y.Set(&p.y)
+ v.Z.Set(&p.z)
+ return v
+}
+
+func (v *Point) fromP1xP1(p *projP1xP1) *Point {
+ v.x.Multiply(&p.X, &p.T)
+ v.y.Multiply(&p.Y, &p.Z)
+ v.z.Multiply(&p.Z, &p.T)
+ v.t.Multiply(&p.X, &p.Y)
+ return v
+}
+
+func (v *Point) fromP2(p *projP2) *Point {
+ v.x.Multiply(&p.X, &p.Z)
+ v.y.Multiply(&p.Y, &p.Z)
+ v.z.Square(&p.Z)
+ v.t.Multiply(&p.X, &p.Y)
+ return v
+}
+
+// d is a constant in the curve equation.
+var d, _ = new(field.Element).SetBytes([]byte{
+ 0xa3, 0x78, 0x59, 0x13, 0xca, 0x4d, 0xeb, 0x75,
+ 0xab, 0xd8, 0x41, 0x41, 0x4d, 0x0a, 0x70, 0x00,
+ 0x98, 0xe8, 0x79, 0x77, 0x79, 0x40, 0xc7, 0x8c,
+ 0x73, 0xfe, 0x6f, 0x2b, 0xee, 0x6c, 0x03, 0x52})
+var d2 = new(field.Element).Add(d, d)
+
+func (v *projCached) FromP3(p *Point) *projCached {
+ v.YplusX.Add(&p.y, &p.x)
+ v.YminusX.Subtract(&p.y, &p.x)
+ v.Z.Set(&p.z)
+ v.T2d.Multiply(&p.t, d2)
+ return v
+}
+
+func (v *affineCached) FromP3(p *Point) *affineCached {
+ v.YplusX.Add(&p.y, &p.x)
+ v.YminusX.Subtract(&p.y, &p.x)
+ v.T2d.Multiply(&p.t, d2)
+
+ var invZ field.Element
+ invZ.Invert(&p.z)
+ v.YplusX.Multiply(&v.YplusX, &invZ)
+ v.YminusX.Multiply(&v.YminusX, &invZ)
+ v.T2d.Multiply(&v.T2d, &invZ)
+ return v
+}
+
+// (Re)addition and subtraction.
+
+// Add sets v = p + q, and returns v.
+func (v *Point) Add(p, q *Point) *Point {
+ checkInitialized(p, q)
+ qCached := new(projCached).FromP3(q)
+ result := new(projP1xP1).Add(p, qCached)
+ return v.fromP1xP1(result)
+}
+
+// Subtract sets v = p - q, and returns v.
+func (v *Point) Subtract(p, q *Point) *Point {
+ checkInitialized(p, q)
+ qCached := new(projCached).FromP3(q)
+ result := new(projP1xP1).Sub(p, qCached)
+ return v.fromP1xP1(result)
+}
+
+func (v *projP1xP1) Add(p *Point, q *projCached) *projP1xP1 {
+ var YplusX, YminusX, PP, MM, TT2d, ZZ2 field.Element
+
+ YplusX.Add(&p.y, &p.x)
+ YminusX.Subtract(&p.y, &p.x)
+
+ PP.Multiply(&YplusX, &q.YplusX)
+ MM.Multiply(&YminusX, &q.YminusX)
+ TT2d.Multiply(&p.t, &q.T2d)
+ ZZ2.Multiply(&p.z, &q.Z)
+
+ ZZ2.Add(&ZZ2, &ZZ2)
+
+ v.X.Subtract(&PP, &MM)
+ v.Y.Add(&PP, &MM)
+ v.Z.Add(&ZZ2, &TT2d)
+ v.T.Subtract(&ZZ2, &TT2d)
+ return v
+}
+
+func (v *projP1xP1) Sub(p *Point, q *projCached) *projP1xP1 {
+ var YplusX, YminusX, PP, MM, TT2d, ZZ2 field.Element
+
+ YplusX.Add(&p.y, &p.x)
+ YminusX.Subtract(&p.y, &p.x)
+
+ PP.Multiply(&YplusX, &q.YminusX) // flipped sign
+ MM.Multiply(&YminusX, &q.YplusX) // flipped sign
+ TT2d.Multiply(&p.t, &q.T2d)
+ ZZ2.Multiply(&p.z, &q.Z)
+
+ ZZ2.Add(&ZZ2, &ZZ2)
+
+ v.X.Subtract(&PP, &MM)
+ v.Y.Add(&PP, &MM)
+ v.Z.Subtract(&ZZ2, &TT2d) // flipped sign
+ v.T.Add(&ZZ2, &TT2d) // flipped sign
+ return v
+}
+
+func (v *projP1xP1) AddAffine(p *Point, q *affineCached) *projP1xP1 {
+ var YplusX, YminusX, PP, MM, TT2d, Z2 field.Element
+
+ YplusX.Add(&p.y, &p.x)
+ YminusX.Subtract(&p.y, &p.x)
+
+ PP.Multiply(&YplusX, &q.YplusX)
+ MM.Multiply(&YminusX, &q.YminusX)
+ TT2d.Multiply(&p.t, &q.T2d)
+
+ Z2.Add(&p.z, &p.z)
+
+ v.X.Subtract(&PP, &MM)
+ v.Y.Add(&PP, &MM)
+ v.Z.Add(&Z2, &TT2d)
+ v.T.Subtract(&Z2, &TT2d)
+ return v
+}
+
+func (v *projP1xP1) SubAffine(p *Point, q *affineCached) *projP1xP1 {
+ var YplusX, YminusX, PP, MM, TT2d, Z2 field.Element
+
+ YplusX.Add(&p.y, &p.x)
+ YminusX.Subtract(&p.y, &p.x)
+
+ PP.Multiply(&YplusX, &q.YminusX) // flipped sign
+ MM.Multiply(&YminusX, &q.YplusX) // flipped sign
+ TT2d.Multiply(&p.t, &q.T2d)
+
+ Z2.Add(&p.z, &p.z)
+
+ v.X.Subtract(&PP, &MM)
+ v.Y.Add(&PP, &MM)
+ v.Z.Subtract(&Z2, &TT2d) // flipped sign
+ v.T.Add(&Z2, &TT2d) // flipped sign
+ return v
+}
+
+// Doubling.
+
+func (v *projP1xP1) Double(p *projP2) *projP1xP1 {
+ var XX, YY, ZZ2, XplusYsq field.Element
+
+ XX.Square(&p.X)
+ YY.Square(&p.Y)
+ ZZ2.Square(&p.Z)
+ ZZ2.Add(&ZZ2, &ZZ2)
+ XplusYsq.Add(&p.X, &p.Y)
+ XplusYsq.Square(&XplusYsq)
+
+ v.Y.Add(&YY, &XX)
+ v.Z.Subtract(&YY, &XX)
+
+ v.X.Subtract(&XplusYsq, &v.Y)
+ v.T.Subtract(&ZZ2, &v.Z)
+ return v
+}
+
+// Negation.
+
+// Negate sets v = -p, and returns v.
+func (v *Point) Negate(p *Point) *Point {
+ checkInitialized(p)
+ v.x.Negate(&p.x)
+ v.y.Set(&p.y)
+ v.z.Set(&p.z)
+ v.t.Negate(&p.t)
+ return v
+}
+
+// Equal returns 1 if v is equivalent to u, and 0 otherwise.
+func (v *Point) Equal(u *Point) int {
+ checkInitialized(v, u)
+
+ var t1, t2, t3, t4 field.Element
+ t1.Multiply(&v.x, &u.z)
+ t2.Multiply(&u.x, &v.z)
+ t3.Multiply(&v.y, &u.z)
+ t4.Multiply(&u.y, &v.z)
+
+ return t1.Equal(&t2) & t3.Equal(&t4)
+}
+
+// Constant-time operations
+
+// Select sets v to a if cond == 1 and to b if cond == 0.
+func (v *projCached) Select(a, b *projCached, cond int) *projCached {
+ v.YplusX.Select(&a.YplusX, &b.YplusX, cond)
+ v.YminusX.Select(&a.YminusX, &b.YminusX, cond)
+ v.Z.Select(&a.Z, &b.Z, cond)
+ v.T2d.Select(&a.T2d, &b.T2d, cond)
+ return v
+}
+
+// Select sets v to a if cond == 1 and to b if cond == 0.
+func (v *affineCached) Select(a, b *affineCached, cond int) *affineCached {
+ v.YplusX.Select(&a.YplusX, &b.YplusX, cond)
+ v.YminusX.Select(&a.YminusX, &b.YminusX, cond)
+ v.T2d.Select(&a.T2d, &b.T2d, cond)
+ return v
+}
+
+// CondNeg negates v if cond == 1 and leaves it unchanged if cond == 0.
+func (v *projCached) CondNeg(cond int) *projCached {
+ v.YplusX.Swap(&v.YminusX, cond)
+ v.T2d.Select(new(field.Element).Negate(&v.T2d), &v.T2d, cond)
+ return v
+}
+
+// CondNeg negates v if cond == 1 and leaves it unchanged if cond == 0.
+func (v *affineCached) CondNeg(cond int) *affineCached {
+ v.YplusX.Swap(&v.YminusX, cond)
+ v.T2d.Select(new(field.Element).Negate(&v.T2d), &v.T2d, cond)
+ return v
+}
diff --git a/src/crypto/internal/edwards25519/edwards25519_test.go b/src/crypto/internal/edwards25519/edwards25519_test.go
new file mode 100644
index 0000000..307ae26
--- /dev/null
+++ b/src/crypto/internal/edwards25519/edwards25519_test.go
@@ -0,0 +1,313 @@
+// Copyright (c) 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package edwards25519
+
+import (
+ "crypto/internal/edwards25519/field"
+ "encoding/hex"
+ "internal/testenv"
+ "reflect"
+ "testing"
+)
+
+var B = NewGeneratorPoint()
+var I = NewIdentityPoint()
+
+func checkOnCurve(t *testing.T, points ...*Point) {
+ t.Helper()
+ for i, p := range points {
+ var XX, YY, ZZ, ZZZZ field.Element
+ XX.Square(&p.x)
+ YY.Square(&p.y)
+ ZZ.Square(&p.z)
+ ZZZZ.Square(&ZZ)
+ // -x² + y² = 1 + dx²y²
+ // -(X/Z)² + (Y/Z)² = 1 + d(X/Z)²(Y/Z)²
+ // (-X² + Y²)/Z² = 1 + (dX²Y²)/Z⁴
+ // (-X² + Y²)*Z² = Z⁴ + dX²Y²
+ var lhs, rhs field.Element
+ lhs.Subtract(&YY, &XX).Multiply(&lhs, &ZZ)
+ rhs.Multiply(d, &XX).Multiply(&rhs, &YY).Add(&rhs, &ZZZZ)
+ if lhs.Equal(&rhs) != 1 {
+ t.Errorf("X, Y, and Z do not specify a point on the curve\nX = %v\nY = %v\nZ = %v", p.x, p.y, p.z)
+ }
+ // xy = T/Z
+ lhs.Multiply(&p.x, &p.y)
+ rhs.Multiply(&p.z, &p.t)
+ if lhs.Equal(&rhs) != 1 {
+ t.Errorf("point %d is not valid\nX = %v\nY = %v\nZ = %v", i, p.x, p.y, p.z)
+ }
+ }
+}
+
+func TestGenerator(t *testing.T) {
+ // These are the coordinates of B from RFC 8032, Section 5.1, converted to
+ // little endian hex.
+ x := "1ad5258f602d56c9b2a7259560c72c695cdcd6fd31e2a4c0fe536ecdd3366921"
+ y := "5866666666666666666666666666666666666666666666666666666666666666"
+ if got := hex.EncodeToString(B.x.Bytes()); got != x {
+ t.Errorf("wrong B.x: got %s, expected %s", got, x)
+ }
+ if got := hex.EncodeToString(B.y.Bytes()); got != y {
+ t.Errorf("wrong B.y: got %s, expected %s", got, y)
+ }
+ if B.z.Equal(feOne) != 1 {
+ t.Errorf("wrong B.z: got %v, expected 1", B.z)
+ }
+ // Check that t is correct.
+ checkOnCurve(t, B)
+}
+
+func TestAddSubNegOnBasePoint(t *testing.T) {
+ checkLhs, checkRhs := &Point{}, &Point{}
+
+ checkLhs.Add(B, B)
+ tmpP2 := new(projP2).FromP3(B)
+ tmpP1xP1 := new(projP1xP1).Double(tmpP2)
+ checkRhs.fromP1xP1(tmpP1xP1)
+ if checkLhs.Equal(checkRhs) != 1 {
+ t.Error("B + B != [2]B")
+ }
+ checkOnCurve(t, checkLhs, checkRhs)
+
+ checkLhs.Subtract(B, B)
+ Bneg := new(Point).Negate(B)
+ checkRhs.Add(B, Bneg)
+ if checkLhs.Equal(checkRhs) != 1 {
+ t.Error("B - B != B + (-B)")
+ }
+ if I.Equal(checkLhs) != 1 {
+ t.Error("B - B != 0")
+ }
+ if I.Equal(checkRhs) != 1 {
+ t.Error("B + (-B) != 0")
+ }
+ checkOnCurve(t, checkLhs, checkRhs, Bneg)
+}
+
+func TestComparable(t *testing.T) {
+ if reflect.TypeOf(Point{}).Comparable() {
+ t.Error("Point is unexpectedly comparable")
+ }
+}
+
+func TestInvalidEncodings(t *testing.T) {
+ // An invalid point, that also happens to have y > p.
+ invalid := "efffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff7f"
+ p := NewGeneratorPoint()
+ if out, err := p.SetBytes(decodeHex(invalid)); err == nil {
+ t.Error("expected error for invalid point")
+ } else if out != nil {
+ t.Error("SetBytes did not return nil on an invalid encoding")
+ } else if p.Equal(B) != 1 {
+ t.Error("the Point was modified while decoding an invalid encoding")
+ }
+ checkOnCurve(t, p)
+}
+
+func TestNonCanonicalPoints(t *testing.T) {
+ type test struct {
+ name string
+ encoding, canonical string
+ }
+ tests := []test{
+ // Points with x = 0 and the sign bit set. With x = 0 the curve equation
+ // gives y² = 1, so y = ±1. 1 has two valid encodings.
+ {
+ "y=1,sign-",
+ "0100000000000000000000000000000000000000000000000000000000000080",
+ "0100000000000000000000000000000000000000000000000000000000000000",
+ },
+ {
+ "y=p+1,sign-",
+ "eeffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff",
+ "0100000000000000000000000000000000000000000000000000000000000000",
+ },
+ {
+ "y=p-1,sign-",
+ "ecffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff",
+ "ecffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff7f",
+ },
+
+ // Non-canonical y encodings with values 2²⁵⁵-19 (p) to 2²⁵⁵-1 (p+18).
+ {
+ "y=p,sign+",
+ "edffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff7f",
+ "0000000000000000000000000000000000000000000000000000000000000000",
+ },
+ {
+ "y=p,sign-",
+ "edffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff",
+ "0000000000000000000000000000000000000000000000000000000000000080",
+ },
+ {
+ "y=p+1,sign+",
+ "eeffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff7f",
+ "0100000000000000000000000000000000000000000000000000000000000000",
+ },
+ // "y=p+1,sign-" is already tested above.
+ // p+2 is not a valid y-coordinate.
+ {
+ "y=p+3,sign+",
+ "f0ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff7f",
+ "0300000000000000000000000000000000000000000000000000000000000000",
+ },
+ {
+ "y=p+3,sign-",
+ "f0ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff",
+ "0300000000000000000000000000000000000000000000000000000000000080",
+ },
+ {
+ "y=p+4,sign+",
+ "f1ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff7f",
+ "0400000000000000000000000000000000000000000000000000000000000000",
+ },
+ {
+ "y=p+4,sign-",
+ "f1ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff",
+ "0400000000000000000000000000000000000000000000000000000000000080",
+ },
+ {
+ "y=p+5,sign+",
+ "f2ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff7f",
+ "0500000000000000000000000000000000000000000000000000000000000000",
+ },
+ {
+ "y=p+5,sign-",
+ "f2ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff",
+ "0500000000000000000000000000000000000000000000000000000000000080",
+ },
+ {
+ "y=p+6,sign+",
+ "f3ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff7f",
+ "0600000000000000000000000000000000000000000000000000000000000000",
+ },
+ {
+ "y=p+6,sign-",
+ "f3ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff",
+ "0600000000000000000000000000000000000000000000000000000000000080",
+ },
+ // p+7 is not a valid y-coordinate.
+ // p+8 is not a valid y-coordinate.
+ {
+ "y=p+9,sign+",
+ "f6ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff7f",
+ "0900000000000000000000000000000000000000000000000000000000000000",
+ },
+ {
+ "y=p+9,sign-",
+ "f6ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff",
+ "0900000000000000000000000000000000000000000000000000000000000080",
+ },
+ {
+ "y=p+10,sign+",
+ "f7ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff7f",
+ "0a00000000000000000000000000000000000000000000000000000000000000",
+ },
+ {
+ "y=p+10,sign-",
+ "f7ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff",
+ "0a00000000000000000000000000000000000000000000000000000000000080",
+ },
+ // p+11 is not a valid y-coordinate.
+ // p+12 is not a valid y-coordinate.
+ // p+13 is not a valid y-coordinate.
+ {
+ "y=p+14,sign+",
+ "fbffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff7f",
+ "0e00000000000000000000000000000000000000000000000000000000000000",
+ },
+ {
+ "y=p+14,sign-",
+ "fbffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff",
+ "0e00000000000000000000000000000000000000000000000000000000000080",
+ },
+ {
+ "y=p+15,sign+",
+ "fcffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff7f",
+ "0f00000000000000000000000000000000000000000000000000000000000000",
+ },
+ {
+ "y=p+15,sign-",
+ "fcffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff",
+ "0f00000000000000000000000000000000000000000000000000000000000080",
+ },
+ {
+ "y=p+16,sign+",
+ "fdffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff7f",
+ "1000000000000000000000000000000000000000000000000000000000000000",
+ },
+ {
+ "y=p+16,sign-",
+ "fdffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff",
+ "1000000000000000000000000000000000000000000000000000000000000080",
+ },
+ // p+17 is not a valid y-coordinate.
+ {
+ "y=p+18,sign+",
+ "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff7f",
+ "1200000000000000000000000000000000000000000000000000000000000000",
+ },
+ {
+ "y=p+18,sign-",
+ "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff",
+ "1200000000000000000000000000000000000000000000000000000000000080",
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ p1, err := new(Point).SetBytes(decodeHex(tt.encoding))
+ if err != nil {
+ t.Fatalf("error decoding non-canonical point: %v", err)
+ }
+ p2, err := new(Point).SetBytes(decodeHex(tt.canonical))
+ if err != nil {
+ t.Fatalf("error decoding canonical point: %v", err)
+ }
+ if p1.Equal(p2) != 1 {
+ t.Errorf("equivalent points are not equal: %v, %v", p1, p2)
+ }
+ if encoding := hex.EncodeToString(p1.Bytes()); encoding != tt.canonical {
+ t.Errorf("re-encoding does not match canonical; got %q, expected %q", encoding, tt.canonical)
+ }
+ checkOnCurve(t, p1, p2)
+ })
+ }
+}
+
+var testAllocationsSink byte
+
+func TestAllocations(t *testing.T) {
+ testenv.SkipIfOptimizationOff(t)
+
+ if allocs := testing.AllocsPerRun(100, func() {
+ p := NewIdentityPoint()
+ p.Add(p, NewGeneratorPoint())
+ s := NewScalar()
+ testAllocationsSink ^= s.Bytes()[0]
+ testAllocationsSink ^= p.Bytes()[0]
+ }); allocs > 0 {
+ t.Errorf("expected zero allocations, got %0.1v", allocs)
+ }
+}
+
+func decodeHex(s string) []byte {
+ b, err := hex.DecodeString(s)
+ if err != nil {
+ panic(err)
+ }
+ return b
+}
+
+func BenchmarkEncodingDecoding(b *testing.B) {
+ p := new(Point).Set(dalekScalarBasepoint)
+ for i := 0; i < b.N; i++ {
+ buf := p.Bytes()
+ _, err := p.SetBytes(buf)
+ if err != nil {
+ b.Fatal(err)
+ }
+ }
+}
diff --git a/src/crypto/internal/edwards25519/field/_asm/fe_amd64_asm.go b/src/crypto/internal/edwards25519/field/_asm/fe_amd64_asm.go
new file mode 100644
index 0000000..411399c
--- /dev/null
+++ b/src/crypto/internal/edwards25519/field/_asm/fe_amd64_asm.go
@@ -0,0 +1,294 @@
+// Copyright (c) 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+ "fmt"
+
+ . "github.com/mmcloughlin/avo/build"
+ . "github.com/mmcloughlin/avo/gotypes"
+ . "github.com/mmcloughlin/avo/operand"
+ . "github.com/mmcloughlin/avo/reg"
+)
+
+//go:generate go run . -out ../fe_amd64.s -stubs ../fe_amd64.go -pkg field
+
+func main() {
+ Package("crypto/internal/edwards25519/field")
+ ConstraintExpr("amd64,gc,!purego")
+ feMul()
+ feSquare()
+ Generate()
+}
+
+type namedComponent struct {
+ Component
+ name string
+}
+
+func (c namedComponent) String() string { return c.name }
+
+type uint128 struct {
+ name string
+ hi, lo GPVirtual
+}
+
+func (c uint128) String() string { return c.name }
+
+func feSquare() {
+ TEXT("feSquare", NOSPLIT, "func(out, a *Element)")
+ Doc("feSquare sets out = a * a. It works like feSquareGeneric.")
+ Pragma("noescape")
+
+ a := Dereference(Param("a"))
+ l0 := namedComponent{a.Field("l0"), "l0"}
+ l1 := namedComponent{a.Field("l1"), "l1"}
+ l2 := namedComponent{a.Field("l2"), "l2"}
+ l3 := namedComponent{a.Field("l3"), "l3"}
+ l4 := namedComponent{a.Field("l4"), "l4"}
+
+ // r0 = l0×l0 + 19×2×(l1×l4 + l2×l3)
+ r0 := uint128{"r0", GP64(), GP64()}
+ mul64(r0, 1, l0, l0)
+ addMul64(r0, 38, l1, l4)
+ addMul64(r0, 38, l2, l3)
+
+ // r1 = 2×l0×l1 + 19×2×l2×l4 + 19×l3×l3
+ r1 := uint128{"r1", GP64(), GP64()}
+ mul64(r1, 2, l0, l1)
+ addMul64(r1, 38, l2, l4)
+ addMul64(r1, 19, l3, l3)
+
+ // r2 = = 2×l0×l2 + l1×l1 + 19×2×l3×l4
+ r2 := uint128{"r2", GP64(), GP64()}
+ mul64(r2, 2, l0, l2)
+ addMul64(r2, 1, l1, l1)
+ addMul64(r2, 38, l3, l4)
+
+ // r3 = = 2×l0×l3 + 2×l1×l2 + 19×l4×l4
+ r3 := uint128{"r3", GP64(), GP64()}
+ mul64(r3, 2, l0, l3)
+ addMul64(r3, 2, l1, l2)
+ addMul64(r3, 19, l4, l4)
+
+ // r4 = = 2×l0×l4 + 2×l1×l3 + l2×l2
+ r4 := uint128{"r4", GP64(), GP64()}
+ mul64(r4, 2, l0, l4)
+ addMul64(r4, 2, l1, l3)
+ addMul64(r4, 1, l2, l2)
+
+ Comment("First reduction chain")
+ maskLow51Bits := GP64()
+ MOVQ(Imm((1<<51)-1), maskLow51Bits)
+ c0, r0lo := shiftRightBy51(&r0)
+ c1, r1lo := shiftRightBy51(&r1)
+ c2, r2lo := shiftRightBy51(&r2)
+ c3, r3lo := shiftRightBy51(&r3)
+ c4, r4lo := shiftRightBy51(&r4)
+ maskAndAdd(r0lo, maskLow51Bits, c4, 19)
+ maskAndAdd(r1lo, maskLow51Bits, c0, 1)
+ maskAndAdd(r2lo, maskLow51Bits, c1, 1)
+ maskAndAdd(r3lo, maskLow51Bits, c2, 1)
+ maskAndAdd(r4lo, maskLow51Bits, c3, 1)
+
+ Comment("Second reduction chain (carryPropagate)")
+ // c0 = r0 >> 51
+ MOVQ(r0lo, c0)
+ SHRQ(Imm(51), c0)
+ // c1 = r1 >> 51
+ MOVQ(r1lo, c1)
+ SHRQ(Imm(51), c1)
+ // c2 = r2 >> 51
+ MOVQ(r2lo, c2)
+ SHRQ(Imm(51), c2)
+ // c3 = r3 >> 51
+ MOVQ(r3lo, c3)
+ SHRQ(Imm(51), c3)
+ // c4 = r4 >> 51
+ MOVQ(r4lo, c4)
+ SHRQ(Imm(51), c4)
+ maskAndAdd(r0lo, maskLow51Bits, c4, 19)
+ maskAndAdd(r1lo, maskLow51Bits, c0, 1)
+ maskAndAdd(r2lo, maskLow51Bits, c1, 1)
+ maskAndAdd(r3lo, maskLow51Bits, c2, 1)
+ maskAndAdd(r4lo, maskLow51Bits, c3, 1)
+
+ Comment("Store output")
+ out := Dereference(Param("out"))
+ Store(r0lo, out.Field("l0"))
+ Store(r1lo, out.Field("l1"))
+ Store(r2lo, out.Field("l2"))
+ Store(r3lo, out.Field("l3"))
+ Store(r4lo, out.Field("l4"))
+
+ RET()
+}
+
+func feMul() {
+ TEXT("feMul", NOSPLIT, "func(out, a, b *Element)")
+ Doc("feMul sets out = a * b. It works like feMulGeneric.")
+ Pragma("noescape")
+
+ a := Dereference(Param("a"))
+ a0 := namedComponent{a.Field("l0"), "a0"}
+ a1 := namedComponent{a.Field("l1"), "a1"}
+ a2 := namedComponent{a.Field("l2"), "a2"}
+ a3 := namedComponent{a.Field("l3"), "a3"}
+ a4 := namedComponent{a.Field("l4"), "a4"}
+
+ b := Dereference(Param("b"))
+ b0 := namedComponent{b.Field("l0"), "b0"}
+ b1 := namedComponent{b.Field("l1"), "b1"}
+ b2 := namedComponent{b.Field("l2"), "b2"}
+ b3 := namedComponent{b.Field("l3"), "b3"}
+ b4 := namedComponent{b.Field("l4"), "b4"}
+
+ // r0 = a0×b0 + 19×(a1×b4 + a2×b3 + a3×b2 + a4×b1)
+ r0 := uint128{"r0", GP64(), GP64()}
+ mul64(r0, 1, a0, b0)
+ addMul64(r0, 19, a1, b4)
+ addMul64(r0, 19, a2, b3)
+ addMul64(r0, 19, a3, b2)
+ addMul64(r0, 19, a4, b1)
+
+ // r1 = a0×b1 + a1×b0 + 19×(a2×b4 + a3×b3 + a4×b2)
+ r1 := uint128{"r1", GP64(), GP64()}
+ mul64(r1, 1, a0, b1)
+ addMul64(r1, 1, a1, b0)
+ addMul64(r1, 19, a2, b4)
+ addMul64(r1, 19, a3, b3)
+ addMul64(r1, 19, a4, b2)
+
+ // r2 = a0×b2 + a1×b1 + a2×b0 + 19×(a3×b4 + a4×b3)
+ r2 := uint128{"r2", GP64(), GP64()}
+ mul64(r2, 1, a0, b2)
+ addMul64(r2, 1, a1, b1)
+ addMul64(r2, 1, a2, b0)
+ addMul64(r2, 19, a3, b4)
+ addMul64(r2, 19, a4, b3)
+
+ // r3 = a0×b3 + a1×b2 + a2×b1 + a3×b0 + 19×a4×b4
+ r3 := uint128{"r3", GP64(), GP64()}
+ mul64(r3, 1, a0, b3)
+ addMul64(r3, 1, a1, b2)
+ addMul64(r3, 1, a2, b1)
+ addMul64(r3, 1, a3, b0)
+ addMul64(r3, 19, a4, b4)
+
+ // r4 = a0×b4 + a1×b3 + a2×b2 + a3×b1 + a4×b0
+ r4 := uint128{"r4", GP64(), GP64()}
+ mul64(r4, 1, a0, b4)
+ addMul64(r4, 1, a1, b3)
+ addMul64(r4, 1, a2, b2)
+ addMul64(r4, 1, a3, b1)
+ addMul64(r4, 1, a4, b0)
+
+ Comment("First reduction chain")
+ maskLow51Bits := GP64()
+ MOVQ(Imm((1<<51)-1), maskLow51Bits)
+ c0, r0lo := shiftRightBy51(&r0)
+ c1, r1lo := shiftRightBy51(&r1)
+ c2, r2lo := shiftRightBy51(&r2)
+ c3, r3lo := shiftRightBy51(&r3)
+ c4, r4lo := shiftRightBy51(&r4)
+ maskAndAdd(r0lo, maskLow51Bits, c4, 19)
+ maskAndAdd(r1lo, maskLow51Bits, c0, 1)
+ maskAndAdd(r2lo, maskLow51Bits, c1, 1)
+ maskAndAdd(r3lo, maskLow51Bits, c2, 1)
+ maskAndAdd(r4lo, maskLow51Bits, c3, 1)
+
+ Comment("Second reduction chain (carryPropagate)")
+ // c0 = r0 >> 51
+ MOVQ(r0lo, c0)
+ SHRQ(Imm(51), c0)
+ // c1 = r1 >> 51
+ MOVQ(r1lo, c1)
+ SHRQ(Imm(51), c1)
+ // c2 = r2 >> 51
+ MOVQ(r2lo, c2)
+ SHRQ(Imm(51), c2)
+ // c3 = r3 >> 51
+ MOVQ(r3lo, c3)
+ SHRQ(Imm(51), c3)
+ // c4 = r4 >> 51
+ MOVQ(r4lo, c4)
+ SHRQ(Imm(51), c4)
+ maskAndAdd(r0lo, maskLow51Bits, c4, 19)
+ maskAndAdd(r1lo, maskLow51Bits, c0, 1)
+ maskAndAdd(r2lo, maskLow51Bits, c1, 1)
+ maskAndAdd(r3lo, maskLow51Bits, c2, 1)
+ maskAndAdd(r4lo, maskLow51Bits, c3, 1)
+
+ Comment("Store output")
+ out := Dereference(Param("out"))
+ Store(r0lo, out.Field("l0"))
+ Store(r1lo, out.Field("l1"))
+ Store(r2lo, out.Field("l2"))
+ Store(r3lo, out.Field("l3"))
+ Store(r4lo, out.Field("l4"))
+
+ RET()
+}
+
+// mul64 sets r to i * aX * bX.
+func mul64(r uint128, i int, aX, bX namedComponent) {
+ switch i {
+ case 1:
+ Comment(fmt.Sprintf("%s = %s×%s", r, aX, bX))
+ Load(aX, RAX)
+ case 2:
+ Comment(fmt.Sprintf("%s = 2×%s×%s", r, aX, bX))
+ Load(aX, RAX)
+ SHLQ(Imm(1), RAX)
+ default:
+ panic("unsupported i value")
+ }
+ MULQ(mustAddr(bX)) // RDX, RAX = RAX * bX
+ MOVQ(RAX, r.lo)
+ MOVQ(RDX, r.hi)
+}
+
+// addMul64 sets r to r + i * aX * bX.
+func addMul64(r uint128, i uint64, aX, bX namedComponent) {
+ switch i {
+ case 1:
+ Comment(fmt.Sprintf("%s += %s×%s", r, aX, bX))
+ Load(aX, RAX)
+ default:
+ Comment(fmt.Sprintf("%s += %d×%s×%s", r, i, aX, bX))
+ IMUL3Q(Imm(i), Load(aX, GP64()), RAX)
+ }
+ MULQ(mustAddr(bX)) // RDX, RAX = RAX * bX
+ ADDQ(RAX, r.lo)
+ ADCQ(RDX, r.hi)
+}
+
+// shiftRightBy51 returns r >> 51 and r.lo.
+//
+// After this function is called, the uint128 may not be used anymore.
+func shiftRightBy51(r *uint128) (out, lo GPVirtual) {
+ out = r.hi
+ lo = r.lo
+ SHLQ(Imm(64-51), r.lo, r.hi)
+ r.lo, r.hi = nil, nil // make sure the uint128 is unusable
+ return
+}
+
+// maskAndAdd sets r = r&mask + c*i.
+func maskAndAdd(r, mask, c GPVirtual, i uint64) {
+ ANDQ(mask, r)
+ if i != 1 {
+ IMUL3Q(Imm(i), c, c)
+ }
+ ADDQ(c, r)
+}
+
+func mustAddr(c Component) Op {
+ b, err := c.Resolve()
+ if err != nil {
+ panic(err)
+ }
+ return b.Addr
+}
diff --git a/src/crypto/internal/edwards25519/field/_asm/go.mod b/src/crypto/internal/edwards25519/field/_asm/go.mod
new file mode 100644
index 0000000..1ce2b5e
--- /dev/null
+++ b/src/crypto/internal/edwards25519/field/_asm/go.mod
@@ -0,0 +1,12 @@
+module asm
+
+go 1.19
+
+require github.com/mmcloughlin/avo v0.4.0
+
+require (
+ golang.org/x/mod v0.4.2 // indirect
+ golang.org/x/sys v0.0.0-20211030160813-b3129d9d1021 // indirect
+ golang.org/x/tools v0.1.7 // indirect
+ golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect
+)
diff --git a/src/crypto/internal/edwards25519/field/_asm/go.sum b/src/crypto/internal/edwards25519/field/_asm/go.sum
new file mode 100644
index 0000000..b4b5914
--- /dev/null
+++ b/src/crypto/internal/edwards25519/field/_asm/go.sum
@@ -0,0 +1,32 @@
+github.com/mmcloughlin/avo v0.4.0 h1:jeHDRktVD+578ULxWpQHkilor6pkdLF7u7EiTzDbfcU=
+github.com/mmcloughlin/avo v0.4.0/go.mod h1:RW9BfYA3TgO9uCdNrKU2h6J8cPD8ZLznvfgHAeszb1s=
+github.com/yuin/goldmark v1.4.0/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
+golang.org/x/arch v0.0.0-20210923205945-b76863e36670/go.mod h1:5om86z9Hs0C8fWVUuoMHwpExlXzs5Tkyp9hOrfG7pp8=
+golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
+golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/mod v0.4.2 h1:Gz96sIWK3OalVv/I/qNygP42zyoKp3xptRVCWRFEBvo=
+golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
+golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
+golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20211030160813-b3129d9d1021 h1:giLT+HuUP/gXYrG2Plg9WTjj4qhfgaW424ZIFog3rlk=
+golang.org/x/sys v0.0.0-20211030160813-b3129d9d1021/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
+golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
+golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.1.7 h1:6j8CgantCy3yc8JGBqkDLMKWqZ0RDU2g1HVgacojGWQ=
+golang.org/x/tools v0.1.7/go.mod h1:LGqMHiF4EqQNHR1JncWGqT5BVaXmza+X+BDGol+dOxo=
+golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE=
+golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4=
diff --git a/src/crypto/internal/edwards25519/field/fe.go b/src/crypto/internal/edwards25519/field/fe.go
new file mode 100644
index 0000000..5518ef2
--- /dev/null
+++ b/src/crypto/internal/edwards25519/field/fe.go
@@ -0,0 +1,420 @@
+// Copyright (c) 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package field implements fast arithmetic modulo 2^255-19.
+package field
+
+import (
+ "crypto/subtle"
+ "encoding/binary"
+ "errors"
+ "math/bits"
+)
+
+// Element represents an element of the field GF(2^255-19). Note that this
+// is not a cryptographically secure group, and should only be used to interact
+// with edwards25519.Point coordinates.
+//
+// This type works similarly to math/big.Int, and all arguments and receivers
+// are allowed to alias.
+//
+// The zero value is a valid zero element.
+type Element struct {
+ // An element t represents the integer
+ // t.l0 + t.l1*2^51 + t.l2*2^102 + t.l3*2^153 + t.l4*2^204
+ //
+ // Between operations, all limbs are expected to be lower than 2^52.
+ l0 uint64
+ l1 uint64
+ l2 uint64
+ l3 uint64
+ l4 uint64
+}
+
+const maskLow51Bits uint64 = (1 << 51) - 1
+
+var feZero = &Element{0, 0, 0, 0, 0}
+
+// Zero sets v = 0, and returns v.
+func (v *Element) Zero() *Element {
+ *v = *feZero
+ return v
+}
+
+var feOne = &Element{1, 0, 0, 0, 0}
+
+// One sets v = 1, and returns v.
+func (v *Element) One() *Element {
+ *v = *feOne
+ return v
+}
+
+// reduce reduces v modulo 2^255 - 19 and returns it.
+func (v *Element) reduce() *Element {
+ v.carryPropagate()
+
+ // After the light reduction we now have a field element representation
+ // v < 2^255 + 2^13 * 19, but need v < 2^255 - 19.
+
+ // If v >= 2^255 - 19, then v + 19 >= 2^255, which would overflow 2^255 - 1,
+ // generating a carry. That is, c will be 0 if v < 2^255 - 19, and 1 otherwise.
+ c := (v.l0 + 19) >> 51
+ c = (v.l1 + c) >> 51
+ c = (v.l2 + c) >> 51
+ c = (v.l3 + c) >> 51
+ c = (v.l4 + c) >> 51
+
+ // If v < 2^255 - 19 and c = 0, this will be a no-op. Otherwise, it's
+ // effectively applying the reduction identity to the carry.
+ v.l0 += 19 * c
+
+ v.l1 += v.l0 >> 51
+ v.l0 = v.l0 & maskLow51Bits
+ v.l2 += v.l1 >> 51
+ v.l1 = v.l1 & maskLow51Bits
+ v.l3 += v.l2 >> 51
+ v.l2 = v.l2 & maskLow51Bits
+ v.l4 += v.l3 >> 51
+ v.l3 = v.l3 & maskLow51Bits
+ // no additional carry
+ v.l4 = v.l4 & maskLow51Bits
+
+ return v
+}
+
+// Add sets v = a + b, and returns v.
+func (v *Element) Add(a, b *Element) *Element {
+ v.l0 = a.l0 + b.l0
+ v.l1 = a.l1 + b.l1
+ v.l2 = a.l2 + b.l2
+ v.l3 = a.l3 + b.l3
+ v.l4 = a.l4 + b.l4
+ // Using the generic implementation here is actually faster than the
+ // assembly. Probably because the body of this function is so simple that
+ // the compiler can figure out better optimizations by inlining the carry
+ // propagation.
+ return v.carryPropagateGeneric()
+}
+
+// Subtract sets v = a - b, and returns v.
+func (v *Element) Subtract(a, b *Element) *Element {
+ // We first add 2 * p, to guarantee the subtraction won't underflow, and
+ // then subtract b (which can be up to 2^255 + 2^13 * 19).
+ v.l0 = (a.l0 + 0xFFFFFFFFFFFDA) - b.l0
+ v.l1 = (a.l1 + 0xFFFFFFFFFFFFE) - b.l1
+ v.l2 = (a.l2 + 0xFFFFFFFFFFFFE) - b.l2
+ v.l3 = (a.l3 + 0xFFFFFFFFFFFFE) - b.l3
+ v.l4 = (a.l4 + 0xFFFFFFFFFFFFE) - b.l4
+ return v.carryPropagate()
+}
+
+// Negate sets v = -a, and returns v.
+func (v *Element) Negate(a *Element) *Element {
+ return v.Subtract(feZero, a)
+}
+
+// Invert sets v = 1/z mod p, and returns v.
+//
+// If z == 0, Invert returns v = 0.
+func (v *Element) Invert(z *Element) *Element {
+ // Inversion is implemented as exponentiation with exponent p − 2. It uses the
+ // same sequence of 255 squarings and 11 multiplications as [Curve25519].
+ var z2, z9, z11, z2_5_0, z2_10_0, z2_20_0, z2_50_0, z2_100_0, t Element
+
+ z2.Square(z) // 2
+ t.Square(&z2) // 4
+ t.Square(&t) // 8
+ z9.Multiply(&t, z) // 9
+ z11.Multiply(&z9, &z2) // 11
+ t.Square(&z11) // 22
+ z2_5_0.Multiply(&t, &z9) // 31 = 2^5 - 2^0
+
+ t.Square(&z2_5_0) // 2^6 - 2^1
+ for i := 0; i < 4; i++ {
+ t.Square(&t) // 2^10 - 2^5
+ }
+ z2_10_0.Multiply(&t, &z2_5_0) // 2^10 - 2^0
+
+ t.Square(&z2_10_0) // 2^11 - 2^1
+ for i := 0; i < 9; i++ {
+ t.Square(&t) // 2^20 - 2^10
+ }
+ z2_20_0.Multiply(&t, &z2_10_0) // 2^20 - 2^0
+
+ t.Square(&z2_20_0) // 2^21 - 2^1
+ for i := 0; i < 19; i++ {
+ t.Square(&t) // 2^40 - 2^20
+ }
+ t.Multiply(&t, &z2_20_0) // 2^40 - 2^0
+
+ t.Square(&t) // 2^41 - 2^1
+ for i := 0; i < 9; i++ {
+ t.Square(&t) // 2^50 - 2^10
+ }
+ z2_50_0.Multiply(&t, &z2_10_0) // 2^50 - 2^0
+
+ t.Square(&z2_50_0) // 2^51 - 2^1
+ for i := 0; i < 49; i++ {
+ t.Square(&t) // 2^100 - 2^50
+ }
+ z2_100_0.Multiply(&t, &z2_50_0) // 2^100 - 2^0
+
+ t.Square(&z2_100_0) // 2^101 - 2^1
+ for i := 0; i < 99; i++ {
+ t.Square(&t) // 2^200 - 2^100
+ }
+ t.Multiply(&t, &z2_100_0) // 2^200 - 2^0
+
+ t.Square(&t) // 2^201 - 2^1
+ for i := 0; i < 49; i++ {
+ t.Square(&t) // 2^250 - 2^50
+ }
+ t.Multiply(&t, &z2_50_0) // 2^250 - 2^0
+
+ t.Square(&t) // 2^251 - 2^1
+ t.Square(&t) // 2^252 - 2^2
+ t.Square(&t) // 2^253 - 2^3
+ t.Square(&t) // 2^254 - 2^4
+ t.Square(&t) // 2^255 - 2^5
+
+ return v.Multiply(&t, &z11) // 2^255 - 21
+}
+
+// Set sets v = a, and returns v.
+func (v *Element) Set(a *Element) *Element {
+ *v = *a
+ return v
+}
+
+// SetBytes sets v to x, where x is a 32-byte little-endian encoding. If x is
+// not of the right length, SetBytes returns nil and an error, and the
+// receiver is unchanged.
+//
+// Consistent with RFC 7748, the most significant bit (the high bit of the
+// last byte) is ignored, and non-canonical values (2^255-19 through 2^255-1)
+// are accepted. Note that this is laxer than specified by RFC 8032, but
+// consistent with most Ed25519 implementations.
+func (v *Element) SetBytes(x []byte) (*Element, error) {
+ if len(x) != 32 {
+ return nil, errors.New("edwards25519: invalid field element input size")
+ }
+
+ // Bits 0:51 (bytes 0:8, bits 0:64, shift 0, mask 51).
+ v.l0 = binary.LittleEndian.Uint64(x[0:8])
+ v.l0 &= maskLow51Bits
+ // Bits 51:102 (bytes 6:14, bits 48:112, shift 3, mask 51).
+ v.l1 = binary.LittleEndian.Uint64(x[6:14]) >> 3
+ v.l1 &= maskLow51Bits
+ // Bits 102:153 (bytes 12:20, bits 96:160, shift 6, mask 51).
+ v.l2 = binary.LittleEndian.Uint64(x[12:20]) >> 6
+ v.l2 &= maskLow51Bits
+ // Bits 153:204 (bytes 19:27, bits 152:216, shift 1, mask 51).
+ v.l3 = binary.LittleEndian.Uint64(x[19:27]) >> 1
+ v.l3 &= maskLow51Bits
+ // Bits 204:255 (bytes 24:32, bits 192:256, shift 12, mask 51).
+ // Note: not bytes 25:33, shift 4, to avoid overread.
+ v.l4 = binary.LittleEndian.Uint64(x[24:32]) >> 12
+ v.l4 &= maskLow51Bits
+
+ return v, nil
+}
+
+// Bytes returns the canonical 32-byte little-endian encoding of v.
+func (v *Element) Bytes() []byte {
+ // This function is outlined to make the allocations inline in the caller
+ // rather than happen on the heap.
+ var out [32]byte
+ return v.bytes(&out)
+}
+
+func (v *Element) bytes(out *[32]byte) []byte {
+ t := *v
+ t.reduce()
+
+ var buf [8]byte
+ for i, l := range [5]uint64{t.l0, t.l1, t.l2, t.l3, t.l4} {
+ bitsOffset := i * 51
+ binary.LittleEndian.PutUint64(buf[:], l<<uint(bitsOffset%8))
+ for i, bb := range buf {
+ off := bitsOffset/8 + i
+ if off >= len(out) {
+ break
+ }
+ out[off] |= bb
+ }
+ }
+
+ return out[:]
+}
+
+// Equal returns 1 if v and u are equal, and 0 otherwise.
+func (v *Element) Equal(u *Element) int {
+ sa, sv := u.Bytes(), v.Bytes()
+ return subtle.ConstantTimeCompare(sa, sv)
+}
+
+// mask64Bits returns 0xffffffff if cond is 1, and 0 otherwise.
+func mask64Bits(cond int) uint64 { return ^(uint64(cond) - 1) }
+
+// Select sets v to a if cond == 1, and to b if cond == 0.
+func (v *Element) Select(a, b *Element, cond int) *Element {
+ m := mask64Bits(cond)
+ v.l0 = (m & a.l0) | (^m & b.l0)
+ v.l1 = (m & a.l1) | (^m & b.l1)
+ v.l2 = (m & a.l2) | (^m & b.l2)
+ v.l3 = (m & a.l3) | (^m & b.l3)
+ v.l4 = (m & a.l4) | (^m & b.l4)
+ return v
+}
+
+// Swap swaps v and u if cond == 1 or leaves them unchanged if cond == 0, and returns v.
+func (v *Element) Swap(u *Element, cond int) {
+ m := mask64Bits(cond)
+ t := m & (v.l0 ^ u.l0)
+ v.l0 ^= t
+ u.l0 ^= t
+ t = m & (v.l1 ^ u.l1)
+ v.l1 ^= t
+ u.l1 ^= t
+ t = m & (v.l2 ^ u.l2)
+ v.l2 ^= t
+ u.l2 ^= t
+ t = m & (v.l3 ^ u.l3)
+ v.l3 ^= t
+ u.l3 ^= t
+ t = m & (v.l4 ^ u.l4)
+ v.l4 ^= t
+ u.l4 ^= t
+}
+
+// IsNegative returns 1 if v is negative, and 0 otherwise.
+func (v *Element) IsNegative() int {
+ return int(v.Bytes()[0] & 1)
+}
+
+// Absolute sets v to |u|, and returns v.
+func (v *Element) Absolute(u *Element) *Element {
+ return v.Select(new(Element).Negate(u), u, u.IsNegative())
+}
+
+// Multiply sets v = x * y, and returns v.
+func (v *Element) Multiply(x, y *Element) *Element {
+ feMul(v, x, y)
+ return v
+}
+
+// Square sets v = x * x, and returns v.
+func (v *Element) Square(x *Element) *Element {
+ feSquare(v, x)
+ return v
+}
+
+// Mult32 sets v = x * y, and returns v.
+func (v *Element) Mult32(x *Element, y uint32) *Element {
+ x0lo, x0hi := mul51(x.l0, y)
+ x1lo, x1hi := mul51(x.l1, y)
+ x2lo, x2hi := mul51(x.l2, y)
+ x3lo, x3hi := mul51(x.l3, y)
+ x4lo, x4hi := mul51(x.l4, y)
+ v.l0 = x0lo + 19*x4hi // carried over per the reduction identity
+ v.l1 = x1lo + x0hi
+ v.l2 = x2lo + x1hi
+ v.l3 = x3lo + x2hi
+ v.l4 = x4lo + x3hi
+ // The hi portions are going to be only 32 bits, plus any previous excess,
+ // so we can skip the carry propagation.
+ return v
+}
+
+// mul51 returns lo + hi * 2⁵¹ = a * b.
+func mul51(a uint64, b uint32) (lo uint64, hi uint64) {
+ mh, ml := bits.Mul64(a, uint64(b))
+ lo = ml & maskLow51Bits
+ hi = (mh << 13) | (ml >> 51)
+ return
+}
+
+// Pow22523 set v = x^((p-5)/8), and returns v. (p-5)/8 is 2^252-3.
+func (v *Element) Pow22523(x *Element) *Element {
+ var t0, t1, t2 Element
+
+ t0.Square(x) // x^2
+ t1.Square(&t0) // x^4
+ t1.Square(&t1) // x^8
+ t1.Multiply(x, &t1) // x^9
+ t0.Multiply(&t0, &t1) // x^11
+ t0.Square(&t0) // x^22
+ t0.Multiply(&t1, &t0) // x^31
+ t1.Square(&t0) // x^62
+ for i := 1; i < 5; i++ { // x^992
+ t1.Square(&t1)
+ }
+ t0.Multiply(&t1, &t0) // x^1023 -> 1023 = 2^10 - 1
+ t1.Square(&t0) // 2^11 - 2
+ for i := 1; i < 10; i++ { // 2^20 - 2^10
+ t1.Square(&t1)
+ }
+ t1.Multiply(&t1, &t0) // 2^20 - 1
+ t2.Square(&t1) // 2^21 - 2
+ for i := 1; i < 20; i++ { // 2^40 - 2^20
+ t2.Square(&t2)
+ }
+ t1.Multiply(&t2, &t1) // 2^40 - 1
+ t1.Square(&t1) // 2^41 - 2
+ for i := 1; i < 10; i++ { // 2^50 - 2^10
+ t1.Square(&t1)
+ }
+ t0.Multiply(&t1, &t0) // 2^50 - 1
+ t1.Square(&t0) // 2^51 - 2
+ for i := 1; i < 50; i++ { // 2^100 - 2^50
+ t1.Square(&t1)
+ }
+ t1.Multiply(&t1, &t0) // 2^100 - 1
+ t2.Square(&t1) // 2^101 - 2
+ for i := 1; i < 100; i++ { // 2^200 - 2^100
+ t2.Square(&t2)
+ }
+ t1.Multiply(&t2, &t1) // 2^200 - 1
+ t1.Square(&t1) // 2^201 - 2
+ for i := 1; i < 50; i++ { // 2^250 - 2^50
+ t1.Square(&t1)
+ }
+ t0.Multiply(&t1, &t0) // 2^250 - 1
+ t0.Square(&t0) // 2^251 - 2
+ t0.Square(&t0) // 2^252 - 4
+ return v.Multiply(&t0, x) // 2^252 - 3 -> x^(2^252-3)
+}
+
+// sqrtM1 is 2^((p-1)/4), which squared is equal to -1 by Euler's Criterion.
+var sqrtM1 = &Element{1718705420411056, 234908883556509,
+ 2233514472574048, 2117202627021982, 765476049583133}
+
+// SqrtRatio sets r to the non-negative square root of the ratio of u and v.
+//
+// If u/v is square, SqrtRatio returns r and 1. If u/v is not square, SqrtRatio
+// sets r according to Section 4.3 of draft-irtf-cfrg-ristretto255-decaf448-00,
+// and returns r and 0.
+func (r *Element) SqrtRatio(u, v *Element) (R *Element, wasSquare int) {
+ t0 := new(Element)
+
+ // r = (u * v3) * (u * v7)^((p-5)/8)
+ v2 := new(Element).Square(v)
+ uv3 := new(Element).Multiply(u, t0.Multiply(v2, v))
+ uv7 := new(Element).Multiply(uv3, t0.Square(v2))
+ rr := new(Element).Multiply(uv3, t0.Pow22523(uv7))
+
+ check := new(Element).Multiply(v, t0.Square(rr)) // check = v * r^2
+
+ uNeg := new(Element).Negate(u)
+ correctSignSqrt := check.Equal(u)
+ flippedSignSqrt := check.Equal(uNeg)
+ flippedSignSqrtI := check.Equal(t0.Multiply(uNeg, sqrtM1))
+
+ rPrime := new(Element).Multiply(rr, sqrtM1) // r_prime = SQRT_M1 * r
+ // r = CT_SELECT(r_prime IF flipped_sign_sqrt | flipped_sign_sqrt_i ELSE r)
+ rr.Select(rPrime, rr, flippedSignSqrt|flippedSignSqrtI)
+
+ r.Absolute(rr) // Choose the nonnegative square root.
+ return r, correctSignSqrt | flippedSignSqrt
+}
diff --git a/src/crypto/internal/edwards25519/field/fe_alias_test.go b/src/crypto/internal/edwards25519/field/fe_alias_test.go
new file mode 100644
index 0000000..bf1efdc
--- /dev/null
+++ b/src/crypto/internal/edwards25519/field/fe_alias_test.go
@@ -0,0 +1,140 @@
+// Copyright (c) 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package field
+
+import (
+ "testing"
+ "testing/quick"
+)
+
+func checkAliasingOneArg(f func(v, x *Element) *Element) func(v, x Element) bool {
+ return func(v, x Element) bool {
+ x1, v1 := x, x
+
+ // Calculate a reference f(x) without aliasing.
+ if out := f(&v, &x); out != &v && isInBounds(out) {
+ return false
+ }
+
+ // Test aliasing the argument and the receiver.
+ if out := f(&v1, &v1); out != &v1 || v1 != v {
+ return false
+ }
+
+ // Ensure the arguments was not modified.
+ return x == x1
+ }
+}
+
+func checkAliasingTwoArgs(f func(v, x, y *Element) *Element) func(v, x, y Element) bool {
+ return func(v, x, y Element) bool {
+ x1, y1, v1 := x, y, Element{}
+
+ // Calculate a reference f(x, y) without aliasing.
+ if out := f(&v, &x, &y); out != &v && isInBounds(out) {
+ return false
+ }
+
+ // Test aliasing the first argument and the receiver.
+ v1 = x
+ if out := f(&v1, &v1, &y); out != &v1 || v1 != v {
+ return false
+ }
+ // Test aliasing the second argument and the receiver.
+ v1 = y
+ if out := f(&v1, &x, &v1); out != &v1 || v1 != v {
+ return false
+ }
+
+ // Calculate a reference f(x, x) without aliasing.
+ if out := f(&v, &x, &x); out != &v {
+ return false
+ }
+
+ // Test aliasing the first argument and the receiver.
+ v1 = x
+ if out := f(&v1, &v1, &x); out != &v1 || v1 != v {
+ return false
+ }
+ // Test aliasing the second argument and the receiver.
+ v1 = x
+ if out := f(&v1, &x, &v1); out != &v1 || v1 != v {
+ return false
+ }
+ // Test aliasing both arguments and the receiver.
+ v1 = x
+ if out := f(&v1, &v1, &v1); out != &v1 || v1 != v {
+ return false
+ }
+
+ // Ensure the arguments were not modified.
+ return x == x1 && y == y1
+ }
+}
+
+// TestAliasing checks that receivers and arguments can alias each other without
+// leading to incorrect results. That is, it ensures that it's safe to write
+//
+// v.Invert(v)
+//
+// or
+//
+// v.Add(v, v)
+//
+// without any of the inputs getting clobbered by the output being written.
+func TestAliasing(t *testing.T) {
+ type target struct {
+ name string
+ oneArgF func(v, x *Element) *Element
+ twoArgsF func(v, x, y *Element) *Element
+ }
+ for _, tt := range []target{
+ {name: "Absolute", oneArgF: (*Element).Absolute},
+ {name: "Invert", oneArgF: (*Element).Invert},
+ {name: "Negate", oneArgF: (*Element).Negate},
+ {name: "Set", oneArgF: (*Element).Set},
+ {name: "Square", oneArgF: (*Element).Square},
+ {name: "Pow22523", oneArgF: (*Element).Pow22523},
+ {
+ name: "Mult32",
+ oneArgF: func(v, x *Element) *Element {
+ return v.Mult32(x, 0xffffffff)
+ },
+ },
+ {name: "Multiply", twoArgsF: (*Element).Multiply},
+ {name: "Add", twoArgsF: (*Element).Add},
+ {name: "Subtract", twoArgsF: (*Element).Subtract},
+ {
+ name: "SqrtRatio",
+ twoArgsF: func(v, x, y *Element) *Element {
+ r, _ := v.SqrtRatio(x, y)
+ return r
+ },
+ },
+ {
+ name: "Select0",
+ twoArgsF: func(v, x, y *Element) *Element {
+ return v.Select(x, y, 0)
+ },
+ },
+ {
+ name: "Select1",
+ twoArgsF: func(v, x, y *Element) *Element {
+ return v.Select(x, y, 1)
+ },
+ },
+ } {
+ var err error
+ switch {
+ case tt.oneArgF != nil:
+ err = quick.Check(checkAliasingOneArg(tt.oneArgF), &quick.Config{MaxCountScale: 1 << 8})
+ case tt.twoArgsF != nil:
+ err = quick.Check(checkAliasingTwoArgs(tt.twoArgsF), &quick.Config{MaxCountScale: 1 << 8})
+ }
+ if err != nil {
+ t.Errorf("%v: %v", tt.name, err)
+ }
+ }
+}
diff --git a/src/crypto/internal/edwards25519/field/fe_amd64.go b/src/crypto/internal/edwards25519/field/fe_amd64.go
new file mode 100644
index 0000000..70c5416
--- /dev/null
+++ b/src/crypto/internal/edwards25519/field/fe_amd64.go
@@ -0,0 +1,15 @@
+// Code generated by command: go run fe_amd64_asm.go -out ../fe_amd64.s -stubs ../fe_amd64.go -pkg field. DO NOT EDIT.
+
+//go:build amd64 && gc && !purego
+
+package field
+
+// feMul sets out = a * b. It works like feMulGeneric.
+//
+//go:noescape
+func feMul(out *Element, a *Element, b *Element)
+
+// feSquare sets out = a * a. It works like feSquareGeneric.
+//
+//go:noescape
+func feSquare(out *Element, a *Element)
diff --git a/src/crypto/internal/edwards25519/field/fe_amd64.s b/src/crypto/internal/edwards25519/field/fe_amd64.s
new file mode 100644
index 0000000..60817ac
--- /dev/null
+++ b/src/crypto/internal/edwards25519/field/fe_amd64.s
@@ -0,0 +1,378 @@
+// Code generated by command: go run fe_amd64_asm.go -out ../fe_amd64.s -stubs ../fe_amd64.go -pkg field. DO NOT EDIT.
+
+//go:build amd64 && gc && !purego
+
+#include "textflag.h"
+
+// func feMul(out *Element, a *Element, b *Element)
+TEXT ·feMul(SB), NOSPLIT, $0-24
+ MOVQ a+8(FP), CX
+ MOVQ b+16(FP), BX
+
+ // r0 = a0×b0
+ MOVQ (CX), AX
+ MULQ (BX)
+ MOVQ AX, DI
+ MOVQ DX, SI
+
+ // r0 += 19×a1×b4
+ MOVQ 8(CX), AX
+ IMUL3Q $0x13, AX, AX
+ MULQ 32(BX)
+ ADDQ AX, DI
+ ADCQ DX, SI
+
+ // r0 += 19×a2×b3
+ MOVQ 16(CX), AX
+ IMUL3Q $0x13, AX, AX
+ MULQ 24(BX)
+ ADDQ AX, DI
+ ADCQ DX, SI
+
+ // r0 += 19×a3×b2
+ MOVQ 24(CX), AX
+ IMUL3Q $0x13, AX, AX
+ MULQ 16(BX)
+ ADDQ AX, DI
+ ADCQ DX, SI
+
+ // r0 += 19×a4×b1
+ MOVQ 32(CX), AX
+ IMUL3Q $0x13, AX, AX
+ MULQ 8(BX)
+ ADDQ AX, DI
+ ADCQ DX, SI
+
+ // r1 = a0×b1
+ MOVQ (CX), AX
+ MULQ 8(BX)
+ MOVQ AX, R9
+ MOVQ DX, R8
+
+ // r1 += a1×b0
+ MOVQ 8(CX), AX
+ MULQ (BX)
+ ADDQ AX, R9
+ ADCQ DX, R8
+
+ // r1 += 19×a2×b4
+ MOVQ 16(CX), AX
+ IMUL3Q $0x13, AX, AX
+ MULQ 32(BX)
+ ADDQ AX, R9
+ ADCQ DX, R8
+
+ // r1 += 19×a3×b3
+ MOVQ 24(CX), AX
+ IMUL3Q $0x13, AX, AX
+ MULQ 24(BX)
+ ADDQ AX, R9
+ ADCQ DX, R8
+
+ // r1 += 19×a4×b2
+ MOVQ 32(CX), AX
+ IMUL3Q $0x13, AX, AX
+ MULQ 16(BX)
+ ADDQ AX, R9
+ ADCQ DX, R8
+
+ // r2 = a0×b2
+ MOVQ (CX), AX
+ MULQ 16(BX)
+ MOVQ AX, R11
+ MOVQ DX, R10
+
+ // r2 += a1×b1
+ MOVQ 8(CX), AX
+ MULQ 8(BX)
+ ADDQ AX, R11
+ ADCQ DX, R10
+
+ // r2 += a2×b0
+ MOVQ 16(CX), AX
+ MULQ (BX)
+ ADDQ AX, R11
+ ADCQ DX, R10
+
+ // r2 += 19×a3×b4
+ MOVQ 24(CX), AX
+ IMUL3Q $0x13, AX, AX
+ MULQ 32(BX)
+ ADDQ AX, R11
+ ADCQ DX, R10
+
+ // r2 += 19×a4×b3
+ MOVQ 32(CX), AX
+ IMUL3Q $0x13, AX, AX
+ MULQ 24(BX)
+ ADDQ AX, R11
+ ADCQ DX, R10
+
+ // r3 = a0×b3
+ MOVQ (CX), AX
+ MULQ 24(BX)
+ MOVQ AX, R13
+ MOVQ DX, R12
+
+ // r3 += a1×b2
+ MOVQ 8(CX), AX
+ MULQ 16(BX)
+ ADDQ AX, R13
+ ADCQ DX, R12
+
+ // r3 += a2×b1
+ MOVQ 16(CX), AX
+ MULQ 8(BX)
+ ADDQ AX, R13
+ ADCQ DX, R12
+
+ // r3 += a3×b0
+ MOVQ 24(CX), AX
+ MULQ (BX)
+ ADDQ AX, R13
+ ADCQ DX, R12
+
+ // r3 += 19×a4×b4
+ MOVQ 32(CX), AX
+ IMUL3Q $0x13, AX, AX
+ MULQ 32(BX)
+ ADDQ AX, R13
+ ADCQ DX, R12
+
+ // r4 = a0×b4
+ MOVQ (CX), AX
+ MULQ 32(BX)
+ MOVQ AX, R15
+ MOVQ DX, R14
+
+ // r4 += a1×b3
+ MOVQ 8(CX), AX
+ MULQ 24(BX)
+ ADDQ AX, R15
+ ADCQ DX, R14
+
+ // r4 += a2×b2
+ MOVQ 16(CX), AX
+ MULQ 16(BX)
+ ADDQ AX, R15
+ ADCQ DX, R14
+
+ // r4 += a3×b1
+ MOVQ 24(CX), AX
+ MULQ 8(BX)
+ ADDQ AX, R15
+ ADCQ DX, R14
+
+ // r4 += a4×b0
+ MOVQ 32(CX), AX
+ MULQ (BX)
+ ADDQ AX, R15
+ ADCQ DX, R14
+
+ // First reduction chain
+ MOVQ $0x0007ffffffffffff, AX
+ SHLQ $0x0d, DI, SI
+ SHLQ $0x0d, R9, R8
+ SHLQ $0x0d, R11, R10
+ SHLQ $0x0d, R13, R12
+ SHLQ $0x0d, R15, R14
+ ANDQ AX, DI
+ IMUL3Q $0x13, R14, R14
+ ADDQ R14, DI
+ ANDQ AX, R9
+ ADDQ SI, R9
+ ANDQ AX, R11
+ ADDQ R8, R11
+ ANDQ AX, R13
+ ADDQ R10, R13
+ ANDQ AX, R15
+ ADDQ R12, R15
+
+ // Second reduction chain (carryPropagate)
+ MOVQ DI, SI
+ SHRQ $0x33, SI
+ MOVQ R9, R8
+ SHRQ $0x33, R8
+ MOVQ R11, R10
+ SHRQ $0x33, R10
+ MOVQ R13, R12
+ SHRQ $0x33, R12
+ MOVQ R15, R14
+ SHRQ $0x33, R14
+ ANDQ AX, DI
+ IMUL3Q $0x13, R14, R14
+ ADDQ R14, DI
+ ANDQ AX, R9
+ ADDQ SI, R9
+ ANDQ AX, R11
+ ADDQ R8, R11
+ ANDQ AX, R13
+ ADDQ R10, R13
+ ANDQ AX, R15
+ ADDQ R12, R15
+
+ // Store output
+ MOVQ out+0(FP), AX
+ MOVQ DI, (AX)
+ MOVQ R9, 8(AX)
+ MOVQ R11, 16(AX)
+ MOVQ R13, 24(AX)
+ MOVQ R15, 32(AX)
+ RET
+
+// func feSquare(out *Element, a *Element)
+TEXT ·feSquare(SB), NOSPLIT, $0-16
+ MOVQ a+8(FP), CX
+
+ // r0 = l0×l0
+ MOVQ (CX), AX
+ MULQ (CX)
+ MOVQ AX, SI
+ MOVQ DX, BX
+
+ // r0 += 38×l1×l4
+ MOVQ 8(CX), AX
+ IMUL3Q $0x26, AX, AX
+ MULQ 32(CX)
+ ADDQ AX, SI
+ ADCQ DX, BX
+
+ // r0 += 38×l2×l3
+ MOVQ 16(CX), AX
+ IMUL3Q $0x26, AX, AX
+ MULQ 24(CX)
+ ADDQ AX, SI
+ ADCQ DX, BX
+
+ // r1 = 2×l0×l1
+ MOVQ (CX), AX
+ SHLQ $0x01, AX
+ MULQ 8(CX)
+ MOVQ AX, R8
+ MOVQ DX, DI
+
+ // r1 += 38×l2×l4
+ MOVQ 16(CX), AX
+ IMUL3Q $0x26, AX, AX
+ MULQ 32(CX)
+ ADDQ AX, R8
+ ADCQ DX, DI
+
+ // r1 += 19×l3×l3
+ MOVQ 24(CX), AX
+ IMUL3Q $0x13, AX, AX
+ MULQ 24(CX)
+ ADDQ AX, R8
+ ADCQ DX, DI
+
+ // r2 = 2×l0×l2
+ MOVQ (CX), AX
+ SHLQ $0x01, AX
+ MULQ 16(CX)
+ MOVQ AX, R10
+ MOVQ DX, R9
+
+ // r2 += l1×l1
+ MOVQ 8(CX), AX
+ MULQ 8(CX)
+ ADDQ AX, R10
+ ADCQ DX, R9
+
+ // r2 += 38×l3×l4
+ MOVQ 24(CX), AX
+ IMUL3Q $0x26, AX, AX
+ MULQ 32(CX)
+ ADDQ AX, R10
+ ADCQ DX, R9
+
+ // r3 = 2×l0×l3
+ MOVQ (CX), AX
+ SHLQ $0x01, AX
+ MULQ 24(CX)
+ MOVQ AX, R12
+ MOVQ DX, R11
+
+ // r3 += 2×l1×l2
+ MOVQ 8(CX), AX
+ IMUL3Q $0x02, AX, AX
+ MULQ 16(CX)
+ ADDQ AX, R12
+ ADCQ DX, R11
+
+ // r3 += 19×l4×l4
+ MOVQ 32(CX), AX
+ IMUL3Q $0x13, AX, AX
+ MULQ 32(CX)
+ ADDQ AX, R12
+ ADCQ DX, R11
+
+ // r4 = 2×l0×l4
+ MOVQ (CX), AX
+ SHLQ $0x01, AX
+ MULQ 32(CX)
+ MOVQ AX, R14
+ MOVQ DX, R13
+
+ // r4 += 2×l1×l3
+ MOVQ 8(CX), AX
+ IMUL3Q $0x02, AX, AX
+ MULQ 24(CX)
+ ADDQ AX, R14
+ ADCQ DX, R13
+
+ // r4 += l2×l2
+ MOVQ 16(CX), AX
+ MULQ 16(CX)
+ ADDQ AX, R14
+ ADCQ DX, R13
+
+ // First reduction chain
+ MOVQ $0x0007ffffffffffff, AX
+ SHLQ $0x0d, SI, BX
+ SHLQ $0x0d, R8, DI
+ SHLQ $0x0d, R10, R9
+ SHLQ $0x0d, R12, R11
+ SHLQ $0x0d, R14, R13
+ ANDQ AX, SI
+ IMUL3Q $0x13, R13, R13
+ ADDQ R13, SI
+ ANDQ AX, R8
+ ADDQ BX, R8
+ ANDQ AX, R10
+ ADDQ DI, R10
+ ANDQ AX, R12
+ ADDQ R9, R12
+ ANDQ AX, R14
+ ADDQ R11, R14
+
+ // Second reduction chain (carryPropagate)
+ MOVQ SI, BX
+ SHRQ $0x33, BX
+ MOVQ R8, DI
+ SHRQ $0x33, DI
+ MOVQ R10, R9
+ SHRQ $0x33, R9
+ MOVQ R12, R11
+ SHRQ $0x33, R11
+ MOVQ R14, R13
+ SHRQ $0x33, R13
+ ANDQ AX, SI
+ IMUL3Q $0x13, R13, R13
+ ADDQ R13, SI
+ ANDQ AX, R8
+ ADDQ BX, R8
+ ANDQ AX, R10
+ ADDQ DI, R10
+ ANDQ AX, R12
+ ADDQ R9, R12
+ ANDQ AX, R14
+ ADDQ R11, R14
+
+ // Store output
+ MOVQ out+0(FP), AX
+ MOVQ SI, (AX)
+ MOVQ R8, 8(AX)
+ MOVQ R10, 16(AX)
+ MOVQ R12, 24(AX)
+ MOVQ R14, 32(AX)
+ RET
diff --git a/src/crypto/internal/edwards25519/field/fe_amd64_noasm.go b/src/crypto/internal/edwards25519/field/fe_amd64_noasm.go
new file mode 100644
index 0000000..9da280d
--- /dev/null
+++ b/src/crypto/internal/edwards25519/field/fe_amd64_noasm.go
@@ -0,0 +1,11 @@
+// Copyright (c) 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !amd64 || !gc || purego
+
+package field
+
+func feMul(v, x, y *Element) { feMulGeneric(v, x, y) }
+
+func feSquare(v, x *Element) { feSquareGeneric(v, x) }
diff --git a/src/crypto/internal/edwards25519/field/fe_arm64.go b/src/crypto/internal/edwards25519/field/fe_arm64.go
new file mode 100644
index 0000000..075fe9b
--- /dev/null
+++ b/src/crypto/internal/edwards25519/field/fe_arm64.go
@@ -0,0 +1,15 @@
+// Copyright (c) 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build arm64 && gc && !purego
+
+package field
+
+//go:noescape
+func carryPropagate(v *Element)
+
+func (v *Element) carryPropagate() *Element {
+ carryPropagate(v)
+ return v
+}
diff --git a/src/crypto/internal/edwards25519/field/fe_arm64.s b/src/crypto/internal/edwards25519/field/fe_arm64.s
new file mode 100644
index 0000000..751ab2a
--- /dev/null
+++ b/src/crypto/internal/edwards25519/field/fe_arm64.s
@@ -0,0 +1,42 @@
+// Copyright (c) 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build arm64,gc,!purego
+
+#include "textflag.h"
+
+// carryPropagate works exactly like carryPropagateGeneric and uses the
+// same AND, ADD, and LSR+MADD instructions emitted by the compiler, but
+// avoids loading R0-R4 twice and uses LDP and STP.
+//
+// See https://golang.org/issues/43145 for the main compiler issue.
+//
+// func carryPropagate(v *Element)
+TEXT ·carryPropagate(SB),NOFRAME|NOSPLIT,$0-8
+ MOVD v+0(FP), R20
+
+ LDP 0(R20), (R0, R1)
+ LDP 16(R20), (R2, R3)
+ MOVD 32(R20), R4
+
+ AND $0x7ffffffffffff, R0, R10
+ AND $0x7ffffffffffff, R1, R11
+ AND $0x7ffffffffffff, R2, R12
+ AND $0x7ffffffffffff, R3, R13
+ AND $0x7ffffffffffff, R4, R14
+
+ ADD R0>>51, R11, R11
+ ADD R1>>51, R12, R12
+ ADD R2>>51, R13, R13
+ ADD R3>>51, R14, R14
+ // R4>>51 * 19 + R10 -> R10
+ LSR $51, R4, R21
+ MOVD $19, R22
+ MADD R22, R10, R21, R10
+
+ STP (R10, R11), 0(R20)
+ STP (R12, R13), 16(R20)
+ MOVD R14, 32(R20)
+
+ RET
diff --git a/src/crypto/internal/edwards25519/field/fe_arm64_noasm.go b/src/crypto/internal/edwards25519/field/fe_arm64_noasm.go
new file mode 100644
index 0000000..fc029ac
--- /dev/null
+++ b/src/crypto/internal/edwards25519/field/fe_arm64_noasm.go
@@ -0,0 +1,11 @@
+// Copyright (c) 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !arm64 || !gc || purego
+
+package field
+
+func (v *Element) carryPropagate() *Element {
+ return v.carryPropagateGeneric()
+}
diff --git a/src/crypto/internal/edwards25519/field/fe_bench_test.go b/src/crypto/internal/edwards25519/field/fe_bench_test.go
new file mode 100644
index 0000000..84fdf05
--- /dev/null
+++ b/src/crypto/internal/edwards25519/field/fe_bench_test.go
@@ -0,0 +1,49 @@
+// Copyright (c) 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package field
+
+import "testing"
+
+func BenchmarkAdd(b *testing.B) {
+ x := new(Element).One()
+ y := new(Element).Add(x, x)
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ x.Add(x, y)
+ }
+}
+
+func BenchmarkMultiply(b *testing.B) {
+ x := new(Element).One()
+ y := new(Element).Add(x, x)
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ x.Multiply(x, y)
+ }
+}
+
+func BenchmarkSquare(b *testing.B) {
+ x := new(Element).Add(feOne, feOne)
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ x.Square(x)
+ }
+}
+
+func BenchmarkInvert(b *testing.B) {
+ x := new(Element).Add(feOne, feOne)
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ x.Invert(x)
+ }
+}
+
+func BenchmarkMult32(b *testing.B) {
+ x := new(Element).One()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ x.Mult32(x, 0xaa42aa42)
+ }
+}
diff --git a/src/crypto/internal/edwards25519/field/fe_generic.go b/src/crypto/internal/edwards25519/field/fe_generic.go
new file mode 100644
index 0000000..d6667b2
--- /dev/null
+++ b/src/crypto/internal/edwards25519/field/fe_generic.go
@@ -0,0 +1,266 @@
+// Copyright (c) 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package field
+
+import "math/bits"
+
+// uint128 holds a 128-bit number as two 64-bit limbs, for use with the
+// bits.Mul64 and bits.Add64 intrinsics.
+type uint128 struct {
+ lo, hi uint64
+}
+
+// mul64 returns a * b.
+func mul64(a, b uint64) uint128 {
+ hi, lo := bits.Mul64(a, b)
+ return uint128{lo, hi}
+}
+
+// addMul64 returns v + a * b.
+func addMul64(v uint128, a, b uint64) uint128 {
+ hi, lo := bits.Mul64(a, b)
+ lo, c := bits.Add64(lo, v.lo, 0)
+ hi, _ = bits.Add64(hi, v.hi, c)
+ return uint128{lo, hi}
+}
+
+// shiftRightBy51 returns a >> 51. a is assumed to be at most 115 bits.
+func shiftRightBy51(a uint128) uint64 {
+ return (a.hi << (64 - 51)) | (a.lo >> 51)
+}
+
+func feMulGeneric(v, a, b *Element) {
+ a0 := a.l0
+ a1 := a.l1
+ a2 := a.l2
+ a3 := a.l3
+ a4 := a.l4
+
+ b0 := b.l0
+ b1 := b.l1
+ b2 := b.l2
+ b3 := b.l3
+ b4 := b.l4
+
+ // Limb multiplication works like pen-and-paper columnar multiplication, but
+ // with 51-bit limbs instead of digits.
+ //
+ // a4 a3 a2 a1 a0 x
+ // b4 b3 b2 b1 b0 =
+ // ------------------------
+ // a4b0 a3b0 a2b0 a1b0 a0b0 +
+ // a4b1 a3b1 a2b1 a1b1 a0b1 +
+ // a4b2 a3b2 a2b2 a1b2 a0b2 +
+ // a4b3 a3b3 a2b3 a1b3 a0b3 +
+ // a4b4 a3b4 a2b4 a1b4 a0b4 =
+ // ----------------------------------------------
+ // r8 r7 r6 r5 r4 r3 r2 r1 r0
+ //
+ // We can then use the reduction identity (a * 2²⁵⁵ + b = a * 19 + b) to
+ // reduce the limbs that would overflow 255 bits. r5 * 2²⁵⁵ becomes 19 * r5,
+ // r6 * 2³⁰⁶ becomes 19 * r6 * 2⁵¹, etc.
+ //
+ // Reduction can be carried out simultaneously to multiplication. For
+ // example, we do not compute r5: whenever the result of a multiplication
+ // belongs to r5, like a1b4, we multiply it by 19 and add the result to r0.
+ //
+ // a4b0 a3b0 a2b0 a1b0 a0b0 +
+ // a3b1 a2b1 a1b1 a0b1 19×a4b1 +
+ // a2b2 a1b2 a0b2 19×a4b2 19×a3b2 +
+ // a1b3 a0b3 19×a4b3 19×a3b3 19×a2b3 +
+ // a0b4 19×a4b4 19×a3b4 19×a2b4 19×a1b4 =
+ // --------------------------------------
+ // r4 r3 r2 r1 r0
+ //
+ // Finally we add up the columns into wide, overlapping limbs.
+
+ a1_19 := a1 * 19
+ a2_19 := a2 * 19
+ a3_19 := a3 * 19
+ a4_19 := a4 * 19
+
+ // r0 = a0×b0 + 19×(a1×b4 + a2×b3 + a3×b2 + a4×b1)
+ r0 := mul64(a0, b0)
+ r0 = addMul64(r0, a1_19, b4)
+ r0 = addMul64(r0, a2_19, b3)
+ r0 = addMul64(r0, a3_19, b2)
+ r0 = addMul64(r0, a4_19, b1)
+
+ // r1 = a0×b1 + a1×b0 + 19×(a2×b4 + a3×b3 + a4×b2)
+ r1 := mul64(a0, b1)
+ r1 = addMul64(r1, a1, b0)
+ r1 = addMul64(r1, a2_19, b4)
+ r1 = addMul64(r1, a3_19, b3)
+ r1 = addMul64(r1, a4_19, b2)
+
+ // r2 = a0×b2 + a1×b1 + a2×b0 + 19×(a3×b4 + a4×b3)
+ r2 := mul64(a0, b2)
+ r2 = addMul64(r2, a1, b1)
+ r2 = addMul64(r2, a2, b0)
+ r2 = addMul64(r2, a3_19, b4)
+ r2 = addMul64(r2, a4_19, b3)
+
+ // r3 = a0×b3 + a1×b2 + a2×b1 + a3×b0 + 19×a4×b4
+ r3 := mul64(a0, b3)
+ r3 = addMul64(r3, a1, b2)
+ r3 = addMul64(r3, a2, b1)
+ r3 = addMul64(r3, a3, b0)
+ r3 = addMul64(r3, a4_19, b4)
+
+ // r4 = a0×b4 + a1×b3 + a2×b2 + a3×b1 + a4×b0
+ r4 := mul64(a0, b4)
+ r4 = addMul64(r4, a1, b3)
+ r4 = addMul64(r4, a2, b2)
+ r4 = addMul64(r4, a3, b1)
+ r4 = addMul64(r4, a4, b0)
+
+ // After the multiplication, we need to reduce (carry) the five coefficients
+ // to obtain a result with limbs that are at most slightly larger than 2⁵¹,
+ // to respect the Element invariant.
+ //
+ // Overall, the reduction works the same as carryPropagate, except with
+ // wider inputs: we take the carry for each coefficient by shifting it right
+ // by 51, and add it to the limb above it. The top carry is multiplied by 19
+ // according to the reduction identity and added to the lowest limb.
+ //
+ // The largest coefficient (r0) will be at most 111 bits, which guarantees
+ // that all carries are at most 111 - 51 = 60 bits, which fits in a uint64.
+ //
+ // r0 = a0×b0 + 19×(a1×b4 + a2×b3 + a3×b2 + a4×b1)
+ // r0 < 2⁵²×2⁵² + 19×(2⁵²×2⁵² + 2⁵²×2⁵² + 2⁵²×2⁵² + 2⁵²×2⁵²)
+ // r0 < (1 + 19 × 4) × 2⁵² × 2⁵²
+ // r0 < 2⁷ × 2⁵² × 2⁵²
+ // r0 < 2¹¹¹
+ //
+ // Moreover, the top coefficient (r4) is at most 107 bits, so c4 is at most
+ // 56 bits, and c4 * 19 is at most 61 bits, which again fits in a uint64 and
+ // allows us to easily apply the reduction identity.
+ //
+ // r4 = a0×b4 + a1×b3 + a2×b2 + a3×b1 + a4×b0
+ // r4 < 5 × 2⁵² × 2⁵²
+ // r4 < 2¹⁰⁷
+ //
+
+ c0 := shiftRightBy51(r0)
+ c1 := shiftRightBy51(r1)
+ c2 := shiftRightBy51(r2)
+ c3 := shiftRightBy51(r3)
+ c4 := shiftRightBy51(r4)
+
+ rr0 := r0.lo&maskLow51Bits + c4*19
+ rr1 := r1.lo&maskLow51Bits + c0
+ rr2 := r2.lo&maskLow51Bits + c1
+ rr3 := r3.lo&maskLow51Bits + c2
+ rr4 := r4.lo&maskLow51Bits + c3
+
+ // Now all coefficients fit into 64-bit registers but are still too large to
+ // be passed around as a Element. We therefore do one last carry chain,
+ // where the carries will be small enough to fit in the wiggle room above 2⁵¹.
+ *v = Element{rr0, rr1, rr2, rr3, rr4}
+ v.carryPropagate()
+}
+
+func feSquareGeneric(v, a *Element) {
+ l0 := a.l0
+ l1 := a.l1
+ l2 := a.l2
+ l3 := a.l3
+ l4 := a.l4
+
+ // Squaring works precisely like multiplication above, but thanks to its
+ // symmetry we get to group a few terms together.
+ //
+ // l4 l3 l2 l1 l0 x
+ // l4 l3 l2 l1 l0 =
+ // ------------------------
+ // l4l0 l3l0 l2l0 l1l0 l0l0 +
+ // l4l1 l3l1 l2l1 l1l1 l0l1 +
+ // l4l2 l3l2 l2l2 l1l2 l0l2 +
+ // l4l3 l3l3 l2l3 l1l3 l0l3 +
+ // l4l4 l3l4 l2l4 l1l4 l0l4 =
+ // ----------------------------------------------
+ // r8 r7 r6 r5 r4 r3 r2 r1 r0
+ //
+ // l4l0 l3l0 l2l0 l1l0 l0l0 +
+ // l3l1 l2l1 l1l1 l0l1 19×l4l1 +
+ // l2l2 l1l2 l0l2 19×l4l2 19×l3l2 +
+ // l1l3 l0l3 19×l4l3 19×l3l3 19×l2l3 +
+ // l0l4 19×l4l4 19×l3l4 19×l2l4 19×l1l4 =
+ // --------------------------------------
+ // r4 r3 r2 r1 r0
+ //
+ // With precomputed 2×, 19×, and 2×19× terms, we can compute each limb with
+ // only three Mul64 and four Add64, instead of five and eight.
+
+ l0_2 := l0 * 2
+ l1_2 := l1 * 2
+
+ l1_38 := l1 * 38
+ l2_38 := l2 * 38
+ l3_38 := l3 * 38
+
+ l3_19 := l3 * 19
+ l4_19 := l4 * 19
+
+ // r0 = l0×l0 + 19×(l1×l4 + l2×l3 + l3×l2 + l4×l1) = l0×l0 + 19×2×(l1×l4 + l2×l3)
+ r0 := mul64(l0, l0)
+ r0 = addMul64(r0, l1_38, l4)
+ r0 = addMul64(r0, l2_38, l3)
+
+ // r1 = l0×l1 + l1×l0 + 19×(l2×l4 + l3×l3 + l4×l2) = 2×l0×l1 + 19×2×l2×l4 + 19×l3×l3
+ r1 := mul64(l0_2, l1)
+ r1 = addMul64(r1, l2_38, l4)
+ r1 = addMul64(r1, l3_19, l3)
+
+ // r2 = l0×l2 + l1×l1 + l2×l0 + 19×(l3×l4 + l4×l3) = 2×l0×l2 + l1×l1 + 19×2×l3×l4
+ r2 := mul64(l0_2, l2)
+ r2 = addMul64(r2, l1, l1)
+ r2 = addMul64(r2, l3_38, l4)
+
+ // r3 = l0×l3 + l1×l2 + l2×l1 + l3×l0 + 19×l4×l4 = 2×l0×l3 + 2×l1×l2 + 19×l4×l4
+ r3 := mul64(l0_2, l3)
+ r3 = addMul64(r3, l1_2, l2)
+ r3 = addMul64(r3, l4_19, l4)
+
+ // r4 = l0×l4 + l1×l3 + l2×l2 + l3×l1 + l4×l0 = 2×l0×l4 + 2×l1×l3 + l2×l2
+ r4 := mul64(l0_2, l4)
+ r4 = addMul64(r4, l1_2, l3)
+ r4 = addMul64(r4, l2, l2)
+
+ c0 := shiftRightBy51(r0)
+ c1 := shiftRightBy51(r1)
+ c2 := shiftRightBy51(r2)
+ c3 := shiftRightBy51(r3)
+ c4 := shiftRightBy51(r4)
+
+ rr0 := r0.lo&maskLow51Bits + c4*19
+ rr1 := r1.lo&maskLow51Bits + c0
+ rr2 := r2.lo&maskLow51Bits + c1
+ rr3 := r3.lo&maskLow51Bits + c2
+ rr4 := r4.lo&maskLow51Bits + c3
+
+ *v = Element{rr0, rr1, rr2, rr3, rr4}
+ v.carryPropagate()
+}
+
+// carryPropagate brings the limbs below 52 bits by applying the reduction
+// identity (a * 2²⁵⁵ + b = a * 19 + b) to the l4 carry.
+func (v *Element) carryPropagateGeneric() *Element {
+ c0 := v.l0 >> 51
+ c1 := v.l1 >> 51
+ c2 := v.l2 >> 51
+ c3 := v.l3 >> 51
+ c4 := v.l4 >> 51
+
+ // c4 is at most 64 - 51 = 13 bits, so c4*19 is at most 18 bits, and
+ // the final l0 will be at most 52 bits. Similarly for the rest.
+ v.l0 = v.l0&maskLow51Bits + c4*19
+ v.l1 = v.l1&maskLow51Bits + c0
+ v.l2 = v.l2&maskLow51Bits + c1
+ v.l3 = v.l3&maskLow51Bits + c2
+ v.l4 = v.l4&maskLow51Bits + c3
+
+ return v
+}
diff --git a/src/crypto/internal/edwards25519/field/fe_test.go b/src/crypto/internal/edwards25519/field/fe_test.go
new file mode 100644
index 0000000..945a024
--- /dev/null
+++ b/src/crypto/internal/edwards25519/field/fe_test.go
@@ -0,0 +1,560 @@
+// Copyright (c) 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package field
+
+import (
+ "bytes"
+ "crypto/rand"
+ "encoding/hex"
+ "io"
+ "math/big"
+ "math/bits"
+ mathrand "math/rand"
+ "reflect"
+ "testing"
+ "testing/quick"
+)
+
+func (v Element) String() string {
+ return hex.EncodeToString(v.Bytes())
+}
+
+// quickCheckConfig1024 will make each quickcheck test run (1024 * -quickchecks)
+// times. The default value of -quickchecks is 100.
+var quickCheckConfig1024 = &quick.Config{MaxCountScale: 1 << 10}
+
+func generateFieldElement(rand *mathrand.Rand) Element {
+ const maskLow52Bits = (1 << 52) - 1
+ return Element{
+ rand.Uint64() & maskLow52Bits,
+ rand.Uint64() & maskLow52Bits,
+ rand.Uint64() & maskLow52Bits,
+ rand.Uint64() & maskLow52Bits,
+ rand.Uint64() & maskLow52Bits,
+ }
+}
+
+// weirdLimbs can be combined to generate a range of edge-case field elements.
+// 0 and -1 are intentionally more weighted, as they combine well.
+var (
+ weirdLimbs51 = []uint64{
+ 0, 0, 0, 0,
+ 1,
+ 19 - 1,
+ 19,
+ 0x2aaaaaaaaaaaa,
+ 0x5555555555555,
+ (1 << 51) - 20,
+ (1 << 51) - 19,
+ (1 << 51) - 1, (1 << 51) - 1,
+ (1 << 51) - 1, (1 << 51) - 1,
+ }
+ weirdLimbs52 = []uint64{
+ 0, 0, 0, 0, 0, 0,
+ 1,
+ 19 - 1,
+ 19,
+ 0x2aaaaaaaaaaaa,
+ 0x5555555555555,
+ (1 << 51) - 20,
+ (1 << 51) - 19,
+ (1 << 51) - 1, (1 << 51) - 1,
+ (1 << 51) - 1, (1 << 51) - 1,
+ (1 << 51) - 1, (1 << 51) - 1,
+ 1 << 51,
+ (1 << 51) + 1,
+ (1 << 52) - 19,
+ (1 << 52) - 1,
+ }
+)
+
+func generateWeirdFieldElement(rand *mathrand.Rand) Element {
+ return Element{
+ weirdLimbs52[rand.Intn(len(weirdLimbs52))],
+ weirdLimbs51[rand.Intn(len(weirdLimbs51))],
+ weirdLimbs51[rand.Intn(len(weirdLimbs51))],
+ weirdLimbs51[rand.Intn(len(weirdLimbs51))],
+ weirdLimbs51[rand.Intn(len(weirdLimbs51))],
+ }
+}
+
+func (Element) Generate(rand *mathrand.Rand, size int) reflect.Value {
+ if rand.Intn(2) == 0 {
+ return reflect.ValueOf(generateWeirdFieldElement(rand))
+ }
+ return reflect.ValueOf(generateFieldElement(rand))
+}
+
+// isInBounds returns whether the element is within the expected bit size bounds
+// after a light reduction.
+func isInBounds(x *Element) bool {
+ return bits.Len64(x.l0) <= 52 &&
+ bits.Len64(x.l1) <= 52 &&
+ bits.Len64(x.l2) <= 52 &&
+ bits.Len64(x.l3) <= 52 &&
+ bits.Len64(x.l4) <= 52
+}
+
+func TestMultiplyDistributesOverAdd(t *testing.T) {
+ multiplyDistributesOverAdd := func(x, y, z Element) bool {
+ // Compute t1 = (x+y)*z
+ t1 := new(Element)
+ t1.Add(&x, &y)
+ t1.Multiply(t1, &z)
+
+ // Compute t2 = x*z + y*z
+ t2 := new(Element)
+ t3 := new(Element)
+ t2.Multiply(&x, &z)
+ t3.Multiply(&y, &z)
+ t2.Add(t2, t3)
+
+ return t1.Equal(t2) == 1 && isInBounds(t1) && isInBounds(t2)
+ }
+
+ if err := quick.Check(multiplyDistributesOverAdd, quickCheckConfig1024); err != nil {
+ t.Error(err)
+ }
+}
+
+func TestMul64to128(t *testing.T) {
+ a := uint64(5)
+ b := uint64(5)
+ r := mul64(a, b)
+ if r.lo != 0x19 || r.hi != 0 {
+ t.Errorf("lo-range wide mult failed, got %d + %d*(2**64)", r.lo, r.hi)
+ }
+
+ a = uint64(18014398509481983) // 2^54 - 1
+ b = uint64(18014398509481983) // 2^54 - 1
+ r = mul64(a, b)
+ if r.lo != 0xff80000000000001 || r.hi != 0xfffffffffff {
+ t.Errorf("hi-range wide mult failed, got %d + %d*(2**64)", r.lo, r.hi)
+ }
+
+ a = uint64(1125899906842661)
+ b = uint64(2097155)
+ r = mul64(a, b)
+ r = addMul64(r, a, b)
+ r = addMul64(r, a, b)
+ r = addMul64(r, a, b)
+ r = addMul64(r, a, b)
+ if r.lo != 16888498990613035 || r.hi != 640 {
+ t.Errorf("wrong answer: %d + %d*(2**64)", r.lo, r.hi)
+ }
+}
+
+func TestSetBytesRoundTrip(t *testing.T) {
+ f1 := func(in [32]byte, fe Element) bool {
+ fe.SetBytes(in[:])
+
+ // Mask the most significant bit as it's ignored by SetBytes. (Now
+ // instead of earlier so we check the masking in SetBytes is working.)
+ in[len(in)-1] &= (1 << 7) - 1
+
+ return bytes.Equal(in[:], fe.Bytes()) && isInBounds(&fe)
+ }
+ if err := quick.Check(f1, nil); err != nil {
+ t.Errorf("failed bytes->FE->bytes round-trip: %v", err)
+ }
+
+ f2 := func(fe, r Element) bool {
+ r.SetBytes(fe.Bytes())
+
+ // Intentionally not using Equal not to go through Bytes again.
+ // Calling reduce because both Generate and SetBytes can produce
+ // non-canonical representations.
+ fe.reduce()
+ r.reduce()
+ return fe == r
+ }
+ if err := quick.Check(f2, nil); err != nil {
+ t.Errorf("failed FE->bytes->FE round-trip: %v", err)
+ }
+
+ // Check some fixed vectors from dalek
+ type feRTTest struct {
+ fe Element
+ b []byte
+ }
+ var tests = []feRTTest{
+ {
+ fe: Element{358744748052810, 1691584618240980, 977650209285361, 1429865912637724, 560044844278676},
+ b: []byte{74, 209, 69, 197, 70, 70, 161, 222, 56, 226, 229, 19, 112, 60, 25, 92, 187, 74, 222, 56, 50, 153, 51, 233, 40, 74, 57, 6, 160, 185, 213, 31},
+ },
+ {
+ fe: Element{84926274344903, 473620666599931, 365590438845504, 1028470286882429, 2146499180330972},
+ b: []byte{199, 23, 106, 112, 61, 77, 216, 79, 186, 60, 11, 118, 13, 16, 103, 15, 42, 32, 83, 250, 44, 57, 204, 198, 78, 199, 253, 119, 146, 172, 3, 122},
+ },
+ }
+
+ for _, tt := range tests {
+ b := tt.fe.Bytes()
+ fe, _ := new(Element).SetBytes(tt.b)
+ if !bytes.Equal(b, tt.b) || fe.Equal(&tt.fe) != 1 {
+ t.Errorf("Failed fixed roundtrip: %v", tt)
+ }
+ }
+}
+
+func swapEndianness(buf []byte) []byte {
+ for i := 0; i < len(buf)/2; i++ {
+ buf[i], buf[len(buf)-i-1] = buf[len(buf)-i-1], buf[i]
+ }
+ return buf
+}
+
+func TestBytesBigEquivalence(t *testing.T) {
+ f1 := func(in [32]byte, fe, fe1 Element) bool {
+ fe.SetBytes(in[:])
+
+ in[len(in)-1] &= (1 << 7) - 1 // mask the most significant bit
+ b := new(big.Int).SetBytes(swapEndianness(in[:]))
+ fe1.fromBig(b)
+
+ if fe != fe1 {
+ return false
+ }
+
+ buf := make([]byte, 32)
+ buf = swapEndianness(fe1.toBig().FillBytes(buf))
+
+ return bytes.Equal(fe.Bytes(), buf) && isInBounds(&fe) && isInBounds(&fe1)
+ }
+ if err := quick.Check(f1, nil); err != nil {
+ t.Error(err)
+ }
+}
+
+// fromBig sets v = n, and returns v. The bit length of n must not exceed 256.
+func (v *Element) fromBig(n *big.Int) *Element {
+ if n.BitLen() > 32*8 {
+ panic("edwards25519: invalid field element input size")
+ }
+
+ buf := make([]byte, 0, 32)
+ for _, word := range n.Bits() {
+ for i := 0; i < bits.UintSize; i += 8 {
+ if len(buf) >= cap(buf) {
+ break
+ }
+ buf = append(buf, byte(word))
+ word >>= 8
+ }
+ }
+
+ v.SetBytes(buf[:32])
+ return v
+}
+
+func (v *Element) fromDecimal(s string) *Element {
+ n, ok := new(big.Int).SetString(s, 10)
+ if !ok {
+ panic("not a valid decimal: " + s)
+ }
+ return v.fromBig(n)
+}
+
+// toBig returns v as a big.Int.
+func (v *Element) toBig() *big.Int {
+ buf := v.Bytes()
+
+ words := make([]big.Word, 32*8/bits.UintSize)
+ for n := range words {
+ for i := 0; i < bits.UintSize; i += 8 {
+ if len(buf) == 0 {
+ break
+ }
+ words[n] |= big.Word(buf[0]) << big.Word(i)
+ buf = buf[1:]
+ }
+ }
+
+ return new(big.Int).SetBits(words)
+}
+
+func TestDecimalConstants(t *testing.T) {
+ sqrtM1String := "19681161376707505956807079304988542015446066515923890162744021073123829784752"
+ if exp := new(Element).fromDecimal(sqrtM1String); sqrtM1.Equal(exp) != 1 {
+ t.Errorf("sqrtM1 is %v, expected %v", sqrtM1, exp)
+ }
+ // d is in the parent package, and we don't want to expose d or fromDecimal.
+ // dString := "37095705934669439343138083508754565189542113879843219016388785533085940283555"
+ // if exp := new(Element).fromDecimal(dString); d.Equal(exp) != 1 {
+ // t.Errorf("d is %v, expected %v", d, exp)
+ // }
+}
+
+func TestSetBytesRoundTripEdgeCases(t *testing.T) {
+ // TODO: values close to 0, close to 2^255-19, between 2^255-19 and 2^255-1,
+ // and between 2^255 and 2^256-1. Test both the documented SetBytes
+ // behavior, and that Bytes reduces them.
+}
+
+// Tests self-consistency between Multiply and Square.
+func TestConsistency(t *testing.T) {
+ var x Element
+ var x2, x2sq Element
+
+ x = Element{1, 1, 1, 1, 1}
+ x2.Multiply(&x, &x)
+ x2sq.Square(&x)
+
+ if x2 != x2sq {
+ t.Fatalf("all ones failed\nmul: %x\nsqr: %x\n", x2, x2sq)
+ }
+
+ var bytes [32]byte
+
+ _, err := io.ReadFull(rand.Reader, bytes[:])
+ if err != nil {
+ t.Fatal(err)
+ }
+ x.SetBytes(bytes[:])
+
+ x2.Multiply(&x, &x)
+ x2sq.Square(&x)
+
+ if x2 != x2sq {
+ t.Fatalf("all ones failed\nmul: %x\nsqr: %x\n", x2, x2sq)
+ }
+}
+
+func TestEqual(t *testing.T) {
+ x := Element{1, 1, 1, 1, 1}
+ y := Element{5, 4, 3, 2, 1}
+
+ eq := x.Equal(&x)
+ if eq != 1 {
+ t.Errorf("wrong about equality")
+ }
+
+ eq = x.Equal(&y)
+ if eq != 0 {
+ t.Errorf("wrong about inequality")
+ }
+}
+
+func TestInvert(t *testing.T) {
+ x := Element{1, 1, 1, 1, 1}
+ one := Element{1, 0, 0, 0, 0}
+ var xinv, r Element
+
+ xinv.Invert(&x)
+ r.Multiply(&x, &xinv)
+ r.reduce()
+
+ if one != r {
+ t.Errorf("inversion identity failed, got: %x", r)
+ }
+
+ var bytes [32]byte
+
+ _, err := io.ReadFull(rand.Reader, bytes[:])
+ if err != nil {
+ t.Fatal(err)
+ }
+ x.SetBytes(bytes[:])
+
+ xinv.Invert(&x)
+ r.Multiply(&x, &xinv)
+ r.reduce()
+
+ if one != r {
+ t.Errorf("random inversion identity failed, got: %x for field element %x", r, x)
+ }
+
+ zero := Element{}
+ x.Set(&zero)
+ if xx := xinv.Invert(&x); xx != &xinv {
+ t.Errorf("inverting zero did not return the receiver")
+ } else if xinv.Equal(&zero) != 1 {
+ t.Errorf("inverting zero did not return zero")
+ }
+}
+
+func TestSelectSwap(t *testing.T) {
+ a := Element{358744748052810, 1691584618240980, 977650209285361, 1429865912637724, 560044844278676}
+ b := Element{84926274344903, 473620666599931, 365590438845504, 1028470286882429, 2146499180330972}
+
+ var c, d Element
+
+ c.Select(&a, &b, 1)
+ d.Select(&a, &b, 0)
+
+ if c.Equal(&a) != 1 || d.Equal(&b) != 1 {
+ t.Errorf("Select failed")
+ }
+
+ c.Swap(&d, 0)
+
+ if c.Equal(&a) != 1 || d.Equal(&b) != 1 {
+ t.Errorf("Swap failed")
+ }
+
+ c.Swap(&d, 1)
+
+ if c.Equal(&b) != 1 || d.Equal(&a) != 1 {
+ t.Errorf("Swap failed")
+ }
+}
+
+func TestMult32(t *testing.T) {
+ mult32EquivalentToMul := func(x Element, y uint32) bool {
+ t1 := new(Element)
+ for i := 0; i < 100; i++ {
+ t1.Mult32(&x, y)
+ }
+
+ ty := new(Element)
+ ty.l0 = uint64(y)
+
+ t2 := new(Element)
+ for i := 0; i < 100; i++ {
+ t2.Multiply(&x, ty)
+ }
+
+ return t1.Equal(t2) == 1 && isInBounds(t1) && isInBounds(t2)
+ }
+
+ if err := quick.Check(mult32EquivalentToMul, quickCheckConfig1024); err != nil {
+ t.Error(err)
+ }
+}
+
+func TestSqrtRatio(t *testing.T) {
+ // From draft-irtf-cfrg-ristretto255-decaf448-00, Appendix A.4.
+ type test struct {
+ u, v string
+ wasSquare int
+ r string
+ }
+ var tests = []test{
+ // If u is 0, the function is defined to return (0, TRUE), even if v
+ // is zero. Note that where used in this package, the denominator v
+ // is never zero.
+ {
+ "0000000000000000000000000000000000000000000000000000000000000000",
+ "0000000000000000000000000000000000000000000000000000000000000000",
+ 1, "0000000000000000000000000000000000000000000000000000000000000000",
+ },
+ // 0/1 == 0²
+ {
+ "0000000000000000000000000000000000000000000000000000000000000000",
+ "0100000000000000000000000000000000000000000000000000000000000000",
+ 1, "0000000000000000000000000000000000000000000000000000000000000000",
+ },
+ // If u is non-zero and v is zero, defined to return (0, FALSE).
+ {
+ "0100000000000000000000000000000000000000000000000000000000000000",
+ "0000000000000000000000000000000000000000000000000000000000000000",
+ 0, "0000000000000000000000000000000000000000000000000000000000000000",
+ },
+ // 2/1 is not square in this field.
+ {
+ "0200000000000000000000000000000000000000000000000000000000000000",
+ "0100000000000000000000000000000000000000000000000000000000000000",
+ 0, "3c5ff1b5d8e4113b871bd052f9e7bcd0582804c266ffb2d4f4203eb07fdb7c54",
+ },
+ // 4/1 == 2²
+ {
+ "0400000000000000000000000000000000000000000000000000000000000000",
+ "0100000000000000000000000000000000000000000000000000000000000000",
+ 1, "0200000000000000000000000000000000000000000000000000000000000000",
+ },
+ // 1/4 == (2⁻¹)² == (2^(p-2))² per Euler's theorem
+ {
+ "0100000000000000000000000000000000000000000000000000000000000000",
+ "0400000000000000000000000000000000000000000000000000000000000000",
+ 1, "f6ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff3f",
+ },
+ }
+
+ for i, tt := range tests {
+ u, _ := new(Element).SetBytes(decodeHex(tt.u))
+ v, _ := new(Element).SetBytes(decodeHex(tt.v))
+ want, _ := new(Element).SetBytes(decodeHex(tt.r))
+ got, wasSquare := new(Element).SqrtRatio(u, v)
+ if got.Equal(want) == 0 || wasSquare != tt.wasSquare {
+ t.Errorf("%d: got (%v, %v), want (%v, %v)", i, got, wasSquare, want, tt.wasSquare)
+ }
+ }
+}
+
+func TestCarryPropagate(t *testing.T) {
+ asmLikeGeneric := func(a [5]uint64) bool {
+ t1 := &Element{a[0], a[1], a[2], a[3], a[4]}
+ t2 := &Element{a[0], a[1], a[2], a[3], a[4]}
+
+ t1.carryPropagate()
+ t2.carryPropagateGeneric()
+
+ if *t1 != *t2 {
+ t.Logf("got: %#v,\nexpected: %#v", t1, t2)
+ }
+
+ return *t1 == *t2 && isInBounds(t2)
+ }
+
+ if err := quick.Check(asmLikeGeneric, quickCheckConfig1024); err != nil {
+ t.Error(err)
+ }
+
+ if !asmLikeGeneric([5]uint64{0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff}) {
+ t.Errorf("failed for {0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff}")
+ }
+}
+
+func TestFeSquare(t *testing.T) {
+ asmLikeGeneric := func(a Element) bool {
+ t1 := a
+ t2 := a
+
+ feSquareGeneric(&t1, &t1)
+ feSquare(&t2, &t2)
+
+ if t1 != t2 {
+ t.Logf("got: %#v,\nexpected: %#v", t1, t2)
+ }
+
+ return t1 == t2 && isInBounds(&t2)
+ }
+
+ if err := quick.Check(asmLikeGeneric, quickCheckConfig1024); err != nil {
+ t.Error(err)
+ }
+}
+
+func TestFeMul(t *testing.T) {
+ asmLikeGeneric := func(a, b Element) bool {
+ a1 := a
+ a2 := a
+ b1 := b
+ b2 := b
+
+ feMulGeneric(&a1, &a1, &b1)
+ feMul(&a2, &a2, &b2)
+
+ if a1 != a2 || b1 != b2 {
+ t.Logf("got: %#v,\nexpected: %#v", a1, a2)
+ t.Logf("got: %#v,\nexpected: %#v", b1, b2)
+ }
+
+ return a1 == a2 && isInBounds(&a2) &&
+ b1 == b2 && isInBounds(&b2)
+ }
+
+ if err := quick.Check(asmLikeGeneric, quickCheckConfig1024); err != nil {
+ t.Error(err)
+ }
+}
+
+func decodeHex(s string) []byte {
+ b, err := hex.DecodeString(s)
+ if err != nil {
+ panic(err)
+ }
+ return b
+}
diff --git a/src/crypto/internal/edwards25519/scalar.go b/src/crypto/internal/edwards25519/scalar.go
new file mode 100644
index 0000000..d34ecea
--- /dev/null
+++ b/src/crypto/internal/edwards25519/scalar.go
@@ -0,0 +1,343 @@
+// Copyright (c) 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package edwards25519
+
+import (
+ "encoding/binary"
+ "errors"
+)
+
+// A Scalar is an integer modulo
+//
+// l = 2^252 + 27742317777372353535851937790883648493
+//
+// which is the prime order of the edwards25519 group.
+//
+// This type works similarly to math/big.Int, and all arguments and
+// receivers are allowed to alias.
+//
+// The zero value is a valid zero element.
+type Scalar struct {
+ // s is the scalar in the Montgomery domain, in the format of the
+ // fiat-crypto implementation.
+ s fiatScalarMontgomeryDomainFieldElement
+}
+
+// The field implementation in scalar_fiat.go is generated by the fiat-crypto
+// project (https://github.com/mit-plv/fiat-crypto) at version v0.0.9 (23d2dbc)
+// from a formally verified model.
+//
+// fiat-crypto code comes under the following license.
+//
+// Copyright (c) 2015-2020 The fiat-crypto Authors. All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// THIS SOFTWARE IS PROVIDED BY the fiat-crypto authors "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+// THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL Berkeley Software Design,
+// Inc. BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+
+// NewScalar returns a new zero Scalar.
+func NewScalar() *Scalar {
+ return &Scalar{}
+}
+
+// MultiplyAdd sets s = x * y + z mod l, and returns s. It is equivalent to
+// using Multiply and then Add.
+func (s *Scalar) MultiplyAdd(x, y, z *Scalar) *Scalar {
+ // Make a copy of z in case it aliases s.
+ zCopy := new(Scalar).Set(z)
+ return s.Multiply(x, y).Add(s, zCopy)
+}
+
+// Add sets s = x + y mod l, and returns s.
+func (s *Scalar) Add(x, y *Scalar) *Scalar {
+ // s = 1 * x + y mod l
+ fiatScalarAdd(&s.s, &x.s, &y.s)
+ return s
+}
+
+// Subtract sets s = x - y mod l, and returns s.
+func (s *Scalar) Subtract(x, y *Scalar) *Scalar {
+ // s = -1 * y + x mod l
+ fiatScalarSub(&s.s, &x.s, &y.s)
+ return s
+}
+
+// Negate sets s = -x mod l, and returns s.
+func (s *Scalar) Negate(x *Scalar) *Scalar {
+ // s = -1 * x + 0 mod l
+ fiatScalarOpp(&s.s, &x.s)
+ return s
+}
+
+// Multiply sets s = x * y mod l, and returns s.
+func (s *Scalar) Multiply(x, y *Scalar) *Scalar {
+ // s = x * y + 0 mod l
+ fiatScalarMul(&s.s, &x.s, &y.s)
+ return s
+}
+
+// Set sets s = x, and returns s.
+func (s *Scalar) Set(x *Scalar) *Scalar {
+ *s = *x
+ return s
+}
+
+// SetUniformBytes sets s = x mod l, where x is a 64-byte little-endian integer.
+// If x is not of the right length, SetUniformBytes returns nil and an error,
+// and the receiver is unchanged.
+//
+// SetUniformBytes can be used to set s to an uniformly distributed value given
+// 64 uniformly distributed random bytes.
+func (s *Scalar) SetUniformBytes(x []byte) (*Scalar, error) {
+ if len(x) != 64 {
+ return nil, errors.New("edwards25519: invalid SetUniformBytes input length")
+ }
+
+ // We have a value x of 512 bits, but our fiatScalarFromBytes function
+ // expects an input lower than l, which is a little over 252 bits.
+ //
+ // Instead of writing a reduction function that operates on wider inputs, we
+ // can interpret x as the sum of three shorter values a, b, and c.
+ //
+ // x = a + b * 2^168 + c * 2^336 mod l
+ //
+ // We then precompute 2^168 and 2^336 modulo l, and perform the reduction
+ // with two multiplications and two additions.
+
+ s.setShortBytes(x[:21])
+ t := new(Scalar).setShortBytes(x[21:42])
+ s.Add(s, t.Multiply(t, scalarTwo168))
+ t.setShortBytes(x[42:])
+ s.Add(s, t.Multiply(t, scalarTwo336))
+
+ return s, nil
+}
+
+// scalarTwo168 and scalarTwo336 are 2^168 and 2^336 modulo l, encoded as a
+// fiatScalarMontgomeryDomainFieldElement, which is a little-endian 4-limb value
+// in the 2^256 Montgomery domain.
+var scalarTwo168 = &Scalar{s: [4]uint64{0x5b8ab432eac74798, 0x38afddd6de59d5d7,
+ 0xa2c131b399411b7c, 0x6329a7ed9ce5a30}}
+var scalarTwo336 = &Scalar{s: [4]uint64{0xbd3d108e2b35ecc5, 0x5c3a3718bdf9c90b,
+ 0x63aa97a331b4f2ee, 0x3d217f5be65cb5c}}
+
+// setShortBytes sets s = x mod l, where x is a little-endian integer shorter
+// than 32 bytes.
+func (s *Scalar) setShortBytes(x []byte) *Scalar {
+ if len(x) >= 32 {
+ panic("edwards25519: internal error: setShortBytes called with a long string")
+ }
+ var buf [32]byte
+ copy(buf[:], x)
+ fiatScalarFromBytes((*[4]uint64)(&s.s), &buf)
+ fiatScalarToMontgomery(&s.s, (*fiatScalarNonMontgomeryDomainFieldElement)(&s.s))
+ return s
+}
+
+// SetCanonicalBytes sets s = x, where x is a 32-byte little-endian encoding of
+// s, and returns s. If x is not a canonical encoding of s, SetCanonicalBytes
+// returns nil and an error, and the receiver is unchanged.
+func (s *Scalar) SetCanonicalBytes(x []byte) (*Scalar, error) {
+ if len(x) != 32 {
+ return nil, errors.New("invalid scalar length")
+ }
+ if !isReduced(x) {
+ return nil, errors.New("invalid scalar encoding")
+ }
+
+ fiatScalarFromBytes((*[4]uint64)(&s.s), (*[32]byte)(x))
+ fiatScalarToMontgomery(&s.s, (*fiatScalarNonMontgomeryDomainFieldElement)(&s.s))
+
+ return s, nil
+}
+
+// scalarMinusOneBytes is l - 1 in little endian.
+var scalarMinusOneBytes = [32]byte{236, 211, 245, 92, 26, 99, 18, 88, 214, 156, 247, 162, 222, 249, 222, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 16}
+
+// isReduced returns whether the given scalar in 32-byte little endian encoded
+// form is reduced modulo l.
+func isReduced(s []byte) bool {
+ if len(s) != 32 {
+ return false
+ }
+
+ for i := len(s) - 1; i >= 0; i-- {
+ switch {
+ case s[i] > scalarMinusOneBytes[i]:
+ return false
+ case s[i] < scalarMinusOneBytes[i]:
+ return true
+ }
+ }
+ return true
+}
+
+// SetBytesWithClamping applies the buffer pruning described in RFC 8032,
+// Section 5.1.5 (also known as clamping) and sets s to the result. The input
+// must be 32 bytes, and it is not modified. If x is not of the right length,
+// SetBytesWithClamping returns nil and an error, and the receiver is unchanged.
+//
+// Note that since Scalar values are always reduced modulo the prime order of
+// the curve, the resulting value will not preserve any of the cofactor-clearing
+// properties that clamping is meant to provide. It will however work as
+// expected as long as it is applied to points on the prime order subgroup, like
+// in Ed25519. In fact, it is lost to history why RFC 8032 adopted the
+// irrelevant RFC 7748 clamping, but it is now required for compatibility.
+func (s *Scalar) SetBytesWithClamping(x []byte) (*Scalar, error) {
+ // The description above omits the purpose of the high bits of the clamping
+ // for brevity, but those are also lost to reductions, and are also
+ // irrelevant to edwards25519 as they protect against a specific
+ // implementation bug that was once observed in a generic Montgomery ladder.
+ if len(x) != 32 {
+ return nil, errors.New("edwards25519: invalid SetBytesWithClamping input length")
+ }
+
+ // We need to use the wide reduction from SetUniformBytes, since clamping
+ // sets the 2^254 bit, making the value higher than the order.
+ var wideBytes [64]byte
+ copy(wideBytes[:], x[:])
+ wideBytes[0] &= 248
+ wideBytes[31] &= 63
+ wideBytes[31] |= 64
+ return s.SetUniformBytes(wideBytes[:])
+}
+
+// Bytes returns the canonical 32-byte little-endian encoding of s.
+func (s *Scalar) Bytes() []byte {
+ // This function is outlined to make the allocations inline in the caller
+ // rather than happen on the heap.
+ var encoded [32]byte
+ return s.bytes(&encoded)
+}
+
+func (s *Scalar) bytes(out *[32]byte) []byte {
+ var ss fiatScalarNonMontgomeryDomainFieldElement
+ fiatScalarFromMontgomery(&ss, &s.s)
+ fiatScalarToBytes(out, (*[4]uint64)(&ss))
+ return out[:]
+}
+
+// Equal returns 1 if s and t are equal, and 0 otherwise.
+func (s *Scalar) Equal(t *Scalar) int {
+ var diff fiatScalarMontgomeryDomainFieldElement
+ fiatScalarSub(&diff, &s.s, &t.s)
+ var nonzero uint64
+ fiatScalarNonzero(&nonzero, (*[4]uint64)(&diff))
+ nonzero |= nonzero >> 32
+ nonzero |= nonzero >> 16
+ nonzero |= nonzero >> 8
+ nonzero |= nonzero >> 4
+ nonzero |= nonzero >> 2
+ nonzero |= nonzero >> 1
+ return int(^nonzero) & 1
+}
+
+// nonAdjacentForm computes a width-w non-adjacent form for this scalar.
+//
+// w must be between 2 and 8, or nonAdjacentForm will panic.
+func (s *Scalar) nonAdjacentForm(w uint) [256]int8 {
+ // This implementation is adapted from the one
+ // in curve25519-dalek and is documented there:
+ // https://github.com/dalek-cryptography/curve25519-dalek/blob/f630041af28e9a405255f98a8a93adca18e4315b/src/scalar.rs#L800-L871
+ b := s.Bytes()
+ if b[31] > 127 {
+ panic("scalar has high bit set illegally")
+ }
+ if w < 2 {
+ panic("w must be at least 2 by the definition of NAF")
+ } else if w > 8 {
+ panic("NAF digits must fit in int8")
+ }
+
+ var naf [256]int8
+ var digits [5]uint64
+
+ for i := 0; i < 4; i++ {
+ digits[i] = binary.LittleEndian.Uint64(b[i*8:])
+ }
+
+ width := uint64(1 << w)
+ windowMask := uint64(width - 1)
+
+ pos := uint(0)
+ carry := uint64(0)
+ for pos < 256 {
+ indexU64 := pos / 64
+ indexBit := pos % 64
+ var bitBuf uint64
+ if indexBit < 64-w {
+ // This window's bits are contained in a single u64
+ bitBuf = digits[indexU64] >> indexBit
+ } else {
+ // Combine the current 64 bits with bits from the next 64
+ bitBuf = (digits[indexU64] >> indexBit) | (digits[1+indexU64] << (64 - indexBit))
+ }
+
+ // Add carry into the current window
+ window := carry + (bitBuf & windowMask)
+
+ if window&1 == 0 {
+ // If the window value is even, preserve the carry and continue.
+ // Why is the carry preserved?
+ // If carry == 0 and window & 1 == 0,
+ // then the next carry should be 0
+ // If carry == 1 and window & 1 == 0,
+ // then bit_buf & 1 == 1 so the next carry should be 1
+ pos += 1
+ continue
+ }
+
+ if window < width/2 {
+ carry = 0
+ naf[pos] = int8(window)
+ } else {
+ carry = 1
+ naf[pos] = int8(window) - int8(width)
+ }
+
+ pos += w
+ }
+ return naf
+}
+
+func (s *Scalar) signedRadix16() [64]int8 {
+ b := s.Bytes()
+ if b[31] > 127 {
+ panic("scalar has high bit set illegally")
+ }
+
+ var digits [64]int8
+
+ // Compute unsigned radix-16 digits:
+ for i := 0; i < 32; i++ {
+ digits[2*i] = int8(b[i] & 15)
+ digits[2*i+1] = int8((b[i] >> 4) & 15)
+ }
+
+ // Recenter coefficients:
+ for i := 0; i < 63; i++ {
+ carry := (digits[i] + 8) >> 4
+ digits[i] -= carry << 4
+ digits[i+1] += carry
+ }
+
+ return digits
+}
diff --git a/src/crypto/internal/edwards25519/scalar_alias_test.go b/src/crypto/internal/edwards25519/scalar_alias_test.go
new file mode 100644
index 0000000..4d83441
--- /dev/null
+++ b/src/crypto/internal/edwards25519/scalar_alias_test.go
@@ -0,0 +1,108 @@
+// Copyright (c) 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package edwards25519
+
+import (
+ "testing"
+ "testing/quick"
+)
+
+func TestScalarAliasing(t *testing.T) {
+ checkAliasingOneArg := func(f func(v, x *Scalar) *Scalar, v, x Scalar) bool {
+ x1, v1 := x, x
+
+ // Calculate a reference f(x) without aliasing.
+ if out := f(&v, &x); out != &v || !isReduced(out.Bytes()) {
+ return false
+ }
+
+ // Test aliasing the argument and the receiver.
+ if out := f(&v1, &v1); out != &v1 || v1 != v || !isReduced(out.Bytes()) {
+ return false
+ }
+
+ // Ensure the arguments was not modified.
+ return x == x1
+ }
+
+ checkAliasingTwoArgs := func(f func(v, x, y *Scalar) *Scalar, v, x, y Scalar) bool {
+ x1, y1, v1 := x, y, Scalar{}
+
+ // Calculate a reference f(x, y) without aliasing.
+ if out := f(&v, &x, &y); out != &v || !isReduced(out.Bytes()) {
+ return false
+ }
+
+ // Test aliasing the first argument and the receiver.
+ v1 = x
+ if out := f(&v1, &v1, &y); out != &v1 || v1 != v || !isReduced(out.Bytes()) {
+ return false
+ }
+ // Test aliasing the second argument and the receiver.
+ v1 = y
+ if out := f(&v1, &x, &v1); out != &v1 || v1 != v || !isReduced(out.Bytes()) {
+ return false
+ }
+
+ // Calculate a reference f(x, x) without aliasing.
+ if out := f(&v, &x, &x); out != &v || !isReduced(out.Bytes()) {
+ return false
+ }
+
+ // Test aliasing the first argument and the receiver.
+ v1 = x
+ if out := f(&v1, &v1, &x); out != &v1 || v1 != v || !isReduced(out.Bytes()) {
+ return false
+ }
+ // Test aliasing the second argument and the receiver.
+ v1 = x
+ if out := f(&v1, &x, &v1); out != &v1 || v1 != v || !isReduced(out.Bytes()) {
+ return false
+ }
+ // Test aliasing both arguments and the receiver.
+ v1 = x
+ if out := f(&v1, &v1, &v1); out != &v1 || v1 != v || !isReduced(out.Bytes()) {
+ return false
+ }
+
+ // Ensure the arguments were not modified.
+ return x == x1 && y == y1
+ }
+
+ for name, f := range map[string]interface{}{
+ "Negate": func(v, x Scalar) bool {
+ return checkAliasingOneArg((*Scalar).Negate, v, x)
+ },
+ "Multiply": func(v, x, y Scalar) bool {
+ return checkAliasingTwoArgs((*Scalar).Multiply, v, x, y)
+ },
+ "Add": func(v, x, y Scalar) bool {
+ return checkAliasingTwoArgs((*Scalar).Add, v, x, y)
+ },
+ "Subtract": func(v, x, y Scalar) bool {
+ return checkAliasingTwoArgs((*Scalar).Subtract, v, x, y)
+ },
+ "MultiplyAdd1": func(v, x, y, fixed Scalar) bool {
+ return checkAliasingTwoArgs(func(v, x, y *Scalar) *Scalar {
+ return v.MultiplyAdd(&fixed, x, y)
+ }, v, x, y)
+ },
+ "MultiplyAdd2": func(v, x, y, fixed Scalar) bool {
+ return checkAliasingTwoArgs(func(v, x, y *Scalar) *Scalar {
+ return v.MultiplyAdd(x, &fixed, y)
+ }, v, x, y)
+ },
+ "MultiplyAdd3": func(v, x, y, fixed Scalar) bool {
+ return checkAliasingTwoArgs(func(v, x, y *Scalar) *Scalar {
+ return v.MultiplyAdd(x, y, &fixed)
+ }, v, x, y)
+ },
+ } {
+ err := quick.Check(f, &quick.Config{MaxCountScale: 1 << 5})
+ if err != nil {
+ t.Errorf("%v: %v", name, err)
+ }
+ }
+}
diff --git a/src/crypto/internal/edwards25519/scalar_fiat.go b/src/crypto/internal/edwards25519/scalar_fiat.go
new file mode 100644
index 0000000..2e5782b
--- /dev/null
+++ b/src/crypto/internal/edwards25519/scalar_fiat.go
@@ -0,0 +1,1147 @@
+// Code generated by Fiat Cryptography. DO NOT EDIT.
+//
+// Autogenerated: word_by_word_montgomery --lang Go --cmovznz-by-mul --relax-primitive-carry-to-bitwidth 32,64 --public-function-case camelCase --public-type-case camelCase --private-function-case camelCase --private-type-case camelCase --doc-text-before-function-name '' --doc-newline-before-package-declaration --doc-prepend-header 'Code generated by Fiat Cryptography. DO NOT EDIT.' --package-name edwards25519 Scalar 64 '2^252 + 27742317777372353535851937790883648493' mul add sub opp nonzero from_montgomery to_montgomery to_bytes from_bytes
+//
+// curve description: Scalar
+//
+// machine_wordsize = 64 (from "64")
+//
+// requested operations: mul, add, sub, opp, nonzero, from_montgomery, to_montgomery, to_bytes, from_bytes
+//
+// m = 0x1000000000000000000000000000000014def9dea2f79cd65812631a5cf5d3ed (from "2^252 + 27742317777372353535851937790883648493")
+//
+//
+//
+// NOTE: In addition to the bounds specified above each function, all
+//
+// functions synthesized for this Montgomery arithmetic require the
+//
+// input to be strictly less than the prime modulus (m), and also
+//
+// require the input to be in the unique saturated representation.
+//
+// All functions also ensure that these two properties are true of
+//
+// return values.
+//
+//
+//
+// Computed values:
+//
+// eval z = z[0] + (z[1] << 64) + (z[2] << 128) + (z[3] << 192)
+//
+// bytes_eval z = z[0] + (z[1] << 8) + (z[2] << 16) + (z[3] << 24) + (z[4] << 32) + (z[5] << 40) + (z[6] << 48) + (z[7] << 56) + (z[8] << 64) + (z[9] << 72) + (z[10] << 80) + (z[11] << 88) + (z[12] << 96) + (z[13] << 104) + (z[14] << 112) + (z[15] << 120) + (z[16] << 128) + (z[17] << 136) + (z[18] << 144) + (z[19] << 152) + (z[20] << 160) + (z[21] << 168) + (z[22] << 176) + (z[23] << 184) + (z[24] << 192) + (z[25] << 200) + (z[26] << 208) + (z[27] << 216) + (z[28] << 224) + (z[29] << 232) + (z[30] << 240) + (z[31] << 248)
+//
+// twos_complement_eval z = let x1 := z[0] + (z[1] << 64) + (z[2] << 128) + (z[3] << 192) in
+//
+// if x1 & (2^256-1) < 2^255 then x1 & (2^256-1) else (x1 & (2^256-1)) - 2^256
+
+package edwards25519
+
+import "math/bits"
+
+type fiatScalarUint1 uint64 // We use uint64 instead of a more narrow type for performance reasons; see https://github.com/mit-plv/fiat-crypto/pull/1006#issuecomment-892625927
+type fiatScalarInt1 int64 // We use uint64 instead of a more narrow type for performance reasons; see https://github.com/mit-plv/fiat-crypto/pull/1006#issuecomment-892625927
+
+// The type fiatScalarMontgomeryDomainFieldElement is a field element in the Montgomery domain.
+//
+// Bounds: [[0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff]]
+type fiatScalarMontgomeryDomainFieldElement [4]uint64
+
+// The type fiatScalarNonMontgomeryDomainFieldElement is a field element NOT in the Montgomery domain.
+//
+// Bounds: [[0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff]]
+type fiatScalarNonMontgomeryDomainFieldElement [4]uint64
+
+// fiatScalarCmovznzU64 is a single-word conditional move.
+//
+// Postconditions:
+//
+// out1 = (if arg1 = 0 then arg2 else arg3)
+//
+// Input Bounds:
+//
+// arg1: [0x0 ~> 0x1]
+// arg2: [0x0 ~> 0xffffffffffffffff]
+// arg3: [0x0 ~> 0xffffffffffffffff]
+//
+// Output Bounds:
+//
+// out1: [0x0 ~> 0xffffffffffffffff]
+func fiatScalarCmovznzU64(out1 *uint64, arg1 fiatScalarUint1, arg2 uint64, arg3 uint64) {
+ x1 := (uint64(arg1) * 0xffffffffffffffff)
+ x2 := ((x1 & arg3) | ((^x1) & arg2))
+ *out1 = x2
+}
+
+// fiatScalarMul multiplies two field elements in the Montgomery domain.
+//
+// Preconditions:
+//
+// 0 ≤ eval arg1 < m
+// 0 ≤ eval arg2 < m
+//
+// Postconditions:
+//
+// eval (from_montgomery out1) mod m = (eval (from_montgomery arg1) * eval (from_montgomery arg2)) mod m
+// 0 ≤ eval out1 < m
+func fiatScalarMul(out1 *fiatScalarMontgomeryDomainFieldElement, arg1 *fiatScalarMontgomeryDomainFieldElement, arg2 *fiatScalarMontgomeryDomainFieldElement) {
+ x1 := arg1[1]
+ x2 := arg1[2]
+ x3 := arg1[3]
+ x4 := arg1[0]
+ var x5 uint64
+ var x6 uint64
+ x6, x5 = bits.Mul64(x4, arg2[3])
+ var x7 uint64
+ var x8 uint64
+ x8, x7 = bits.Mul64(x4, arg2[2])
+ var x9 uint64
+ var x10 uint64
+ x10, x9 = bits.Mul64(x4, arg2[1])
+ var x11 uint64
+ var x12 uint64
+ x12, x11 = bits.Mul64(x4, arg2[0])
+ var x13 uint64
+ var x14 uint64
+ x13, x14 = bits.Add64(x12, x9, uint64(0x0))
+ var x15 uint64
+ var x16 uint64
+ x15, x16 = bits.Add64(x10, x7, uint64(fiatScalarUint1(x14)))
+ var x17 uint64
+ var x18 uint64
+ x17, x18 = bits.Add64(x8, x5, uint64(fiatScalarUint1(x16)))
+ x19 := (uint64(fiatScalarUint1(x18)) + x6)
+ var x20 uint64
+ _, x20 = bits.Mul64(x11, 0xd2b51da312547e1b)
+ var x22 uint64
+ var x23 uint64
+ x23, x22 = bits.Mul64(x20, 0x1000000000000000)
+ var x24 uint64
+ var x25 uint64
+ x25, x24 = bits.Mul64(x20, 0x14def9dea2f79cd6)
+ var x26 uint64
+ var x27 uint64
+ x27, x26 = bits.Mul64(x20, 0x5812631a5cf5d3ed)
+ var x28 uint64
+ var x29 uint64
+ x28, x29 = bits.Add64(x27, x24, uint64(0x0))
+ x30 := (uint64(fiatScalarUint1(x29)) + x25)
+ var x32 uint64
+ _, x32 = bits.Add64(x11, x26, uint64(0x0))
+ var x33 uint64
+ var x34 uint64
+ x33, x34 = bits.Add64(x13, x28, uint64(fiatScalarUint1(x32)))
+ var x35 uint64
+ var x36 uint64
+ x35, x36 = bits.Add64(x15, x30, uint64(fiatScalarUint1(x34)))
+ var x37 uint64
+ var x38 uint64
+ x37, x38 = bits.Add64(x17, x22, uint64(fiatScalarUint1(x36)))
+ var x39 uint64
+ var x40 uint64
+ x39, x40 = bits.Add64(x19, x23, uint64(fiatScalarUint1(x38)))
+ var x41 uint64
+ var x42 uint64
+ x42, x41 = bits.Mul64(x1, arg2[3])
+ var x43 uint64
+ var x44 uint64
+ x44, x43 = bits.Mul64(x1, arg2[2])
+ var x45 uint64
+ var x46 uint64
+ x46, x45 = bits.Mul64(x1, arg2[1])
+ var x47 uint64
+ var x48 uint64
+ x48, x47 = bits.Mul64(x1, arg2[0])
+ var x49 uint64
+ var x50 uint64
+ x49, x50 = bits.Add64(x48, x45, uint64(0x0))
+ var x51 uint64
+ var x52 uint64
+ x51, x52 = bits.Add64(x46, x43, uint64(fiatScalarUint1(x50)))
+ var x53 uint64
+ var x54 uint64
+ x53, x54 = bits.Add64(x44, x41, uint64(fiatScalarUint1(x52)))
+ x55 := (uint64(fiatScalarUint1(x54)) + x42)
+ var x56 uint64
+ var x57 uint64
+ x56, x57 = bits.Add64(x33, x47, uint64(0x0))
+ var x58 uint64
+ var x59 uint64
+ x58, x59 = bits.Add64(x35, x49, uint64(fiatScalarUint1(x57)))
+ var x60 uint64
+ var x61 uint64
+ x60, x61 = bits.Add64(x37, x51, uint64(fiatScalarUint1(x59)))
+ var x62 uint64
+ var x63 uint64
+ x62, x63 = bits.Add64(x39, x53, uint64(fiatScalarUint1(x61)))
+ var x64 uint64
+ var x65 uint64
+ x64, x65 = bits.Add64(uint64(fiatScalarUint1(x40)), x55, uint64(fiatScalarUint1(x63)))
+ var x66 uint64
+ _, x66 = bits.Mul64(x56, 0xd2b51da312547e1b)
+ var x68 uint64
+ var x69 uint64
+ x69, x68 = bits.Mul64(x66, 0x1000000000000000)
+ var x70 uint64
+ var x71 uint64
+ x71, x70 = bits.Mul64(x66, 0x14def9dea2f79cd6)
+ var x72 uint64
+ var x73 uint64
+ x73, x72 = bits.Mul64(x66, 0x5812631a5cf5d3ed)
+ var x74 uint64
+ var x75 uint64
+ x74, x75 = bits.Add64(x73, x70, uint64(0x0))
+ x76 := (uint64(fiatScalarUint1(x75)) + x71)
+ var x78 uint64
+ _, x78 = bits.Add64(x56, x72, uint64(0x0))
+ var x79 uint64
+ var x80 uint64
+ x79, x80 = bits.Add64(x58, x74, uint64(fiatScalarUint1(x78)))
+ var x81 uint64
+ var x82 uint64
+ x81, x82 = bits.Add64(x60, x76, uint64(fiatScalarUint1(x80)))
+ var x83 uint64
+ var x84 uint64
+ x83, x84 = bits.Add64(x62, x68, uint64(fiatScalarUint1(x82)))
+ var x85 uint64
+ var x86 uint64
+ x85, x86 = bits.Add64(x64, x69, uint64(fiatScalarUint1(x84)))
+ x87 := (uint64(fiatScalarUint1(x86)) + uint64(fiatScalarUint1(x65)))
+ var x88 uint64
+ var x89 uint64
+ x89, x88 = bits.Mul64(x2, arg2[3])
+ var x90 uint64
+ var x91 uint64
+ x91, x90 = bits.Mul64(x2, arg2[2])
+ var x92 uint64
+ var x93 uint64
+ x93, x92 = bits.Mul64(x2, arg2[1])
+ var x94 uint64
+ var x95 uint64
+ x95, x94 = bits.Mul64(x2, arg2[0])
+ var x96 uint64
+ var x97 uint64
+ x96, x97 = bits.Add64(x95, x92, uint64(0x0))
+ var x98 uint64
+ var x99 uint64
+ x98, x99 = bits.Add64(x93, x90, uint64(fiatScalarUint1(x97)))
+ var x100 uint64
+ var x101 uint64
+ x100, x101 = bits.Add64(x91, x88, uint64(fiatScalarUint1(x99)))
+ x102 := (uint64(fiatScalarUint1(x101)) + x89)
+ var x103 uint64
+ var x104 uint64
+ x103, x104 = bits.Add64(x79, x94, uint64(0x0))
+ var x105 uint64
+ var x106 uint64
+ x105, x106 = bits.Add64(x81, x96, uint64(fiatScalarUint1(x104)))
+ var x107 uint64
+ var x108 uint64
+ x107, x108 = bits.Add64(x83, x98, uint64(fiatScalarUint1(x106)))
+ var x109 uint64
+ var x110 uint64
+ x109, x110 = bits.Add64(x85, x100, uint64(fiatScalarUint1(x108)))
+ var x111 uint64
+ var x112 uint64
+ x111, x112 = bits.Add64(x87, x102, uint64(fiatScalarUint1(x110)))
+ var x113 uint64
+ _, x113 = bits.Mul64(x103, 0xd2b51da312547e1b)
+ var x115 uint64
+ var x116 uint64
+ x116, x115 = bits.Mul64(x113, 0x1000000000000000)
+ var x117 uint64
+ var x118 uint64
+ x118, x117 = bits.Mul64(x113, 0x14def9dea2f79cd6)
+ var x119 uint64
+ var x120 uint64
+ x120, x119 = bits.Mul64(x113, 0x5812631a5cf5d3ed)
+ var x121 uint64
+ var x122 uint64
+ x121, x122 = bits.Add64(x120, x117, uint64(0x0))
+ x123 := (uint64(fiatScalarUint1(x122)) + x118)
+ var x125 uint64
+ _, x125 = bits.Add64(x103, x119, uint64(0x0))
+ var x126 uint64
+ var x127 uint64
+ x126, x127 = bits.Add64(x105, x121, uint64(fiatScalarUint1(x125)))
+ var x128 uint64
+ var x129 uint64
+ x128, x129 = bits.Add64(x107, x123, uint64(fiatScalarUint1(x127)))
+ var x130 uint64
+ var x131 uint64
+ x130, x131 = bits.Add64(x109, x115, uint64(fiatScalarUint1(x129)))
+ var x132 uint64
+ var x133 uint64
+ x132, x133 = bits.Add64(x111, x116, uint64(fiatScalarUint1(x131)))
+ x134 := (uint64(fiatScalarUint1(x133)) + uint64(fiatScalarUint1(x112)))
+ var x135 uint64
+ var x136 uint64
+ x136, x135 = bits.Mul64(x3, arg2[3])
+ var x137 uint64
+ var x138 uint64
+ x138, x137 = bits.Mul64(x3, arg2[2])
+ var x139 uint64
+ var x140 uint64
+ x140, x139 = bits.Mul64(x3, arg2[1])
+ var x141 uint64
+ var x142 uint64
+ x142, x141 = bits.Mul64(x3, arg2[0])
+ var x143 uint64
+ var x144 uint64
+ x143, x144 = bits.Add64(x142, x139, uint64(0x0))
+ var x145 uint64
+ var x146 uint64
+ x145, x146 = bits.Add64(x140, x137, uint64(fiatScalarUint1(x144)))
+ var x147 uint64
+ var x148 uint64
+ x147, x148 = bits.Add64(x138, x135, uint64(fiatScalarUint1(x146)))
+ x149 := (uint64(fiatScalarUint1(x148)) + x136)
+ var x150 uint64
+ var x151 uint64
+ x150, x151 = bits.Add64(x126, x141, uint64(0x0))
+ var x152 uint64
+ var x153 uint64
+ x152, x153 = bits.Add64(x128, x143, uint64(fiatScalarUint1(x151)))
+ var x154 uint64
+ var x155 uint64
+ x154, x155 = bits.Add64(x130, x145, uint64(fiatScalarUint1(x153)))
+ var x156 uint64
+ var x157 uint64
+ x156, x157 = bits.Add64(x132, x147, uint64(fiatScalarUint1(x155)))
+ var x158 uint64
+ var x159 uint64
+ x158, x159 = bits.Add64(x134, x149, uint64(fiatScalarUint1(x157)))
+ var x160 uint64
+ _, x160 = bits.Mul64(x150, 0xd2b51da312547e1b)
+ var x162 uint64
+ var x163 uint64
+ x163, x162 = bits.Mul64(x160, 0x1000000000000000)
+ var x164 uint64
+ var x165 uint64
+ x165, x164 = bits.Mul64(x160, 0x14def9dea2f79cd6)
+ var x166 uint64
+ var x167 uint64
+ x167, x166 = bits.Mul64(x160, 0x5812631a5cf5d3ed)
+ var x168 uint64
+ var x169 uint64
+ x168, x169 = bits.Add64(x167, x164, uint64(0x0))
+ x170 := (uint64(fiatScalarUint1(x169)) + x165)
+ var x172 uint64
+ _, x172 = bits.Add64(x150, x166, uint64(0x0))
+ var x173 uint64
+ var x174 uint64
+ x173, x174 = bits.Add64(x152, x168, uint64(fiatScalarUint1(x172)))
+ var x175 uint64
+ var x176 uint64
+ x175, x176 = bits.Add64(x154, x170, uint64(fiatScalarUint1(x174)))
+ var x177 uint64
+ var x178 uint64
+ x177, x178 = bits.Add64(x156, x162, uint64(fiatScalarUint1(x176)))
+ var x179 uint64
+ var x180 uint64
+ x179, x180 = bits.Add64(x158, x163, uint64(fiatScalarUint1(x178)))
+ x181 := (uint64(fiatScalarUint1(x180)) + uint64(fiatScalarUint1(x159)))
+ var x182 uint64
+ var x183 uint64
+ x182, x183 = bits.Sub64(x173, 0x5812631a5cf5d3ed, uint64(0x0))
+ var x184 uint64
+ var x185 uint64
+ x184, x185 = bits.Sub64(x175, 0x14def9dea2f79cd6, uint64(fiatScalarUint1(x183)))
+ var x186 uint64
+ var x187 uint64
+ x186, x187 = bits.Sub64(x177, uint64(0x0), uint64(fiatScalarUint1(x185)))
+ var x188 uint64
+ var x189 uint64
+ x188, x189 = bits.Sub64(x179, 0x1000000000000000, uint64(fiatScalarUint1(x187)))
+ var x191 uint64
+ _, x191 = bits.Sub64(x181, uint64(0x0), uint64(fiatScalarUint1(x189)))
+ var x192 uint64
+ fiatScalarCmovznzU64(&x192, fiatScalarUint1(x191), x182, x173)
+ var x193 uint64
+ fiatScalarCmovznzU64(&x193, fiatScalarUint1(x191), x184, x175)
+ var x194 uint64
+ fiatScalarCmovznzU64(&x194, fiatScalarUint1(x191), x186, x177)
+ var x195 uint64
+ fiatScalarCmovznzU64(&x195, fiatScalarUint1(x191), x188, x179)
+ out1[0] = x192
+ out1[1] = x193
+ out1[2] = x194
+ out1[3] = x195
+}
+
+// fiatScalarAdd adds two field elements in the Montgomery domain.
+//
+// Preconditions:
+//
+// 0 ≤ eval arg1 < m
+// 0 ≤ eval arg2 < m
+//
+// Postconditions:
+//
+// eval (from_montgomery out1) mod m = (eval (from_montgomery arg1) + eval (from_montgomery arg2)) mod m
+// 0 ≤ eval out1 < m
+func fiatScalarAdd(out1 *fiatScalarMontgomeryDomainFieldElement, arg1 *fiatScalarMontgomeryDomainFieldElement, arg2 *fiatScalarMontgomeryDomainFieldElement) {
+ var x1 uint64
+ var x2 uint64
+ x1, x2 = bits.Add64(arg1[0], arg2[0], uint64(0x0))
+ var x3 uint64
+ var x4 uint64
+ x3, x4 = bits.Add64(arg1[1], arg2[1], uint64(fiatScalarUint1(x2)))
+ var x5 uint64
+ var x6 uint64
+ x5, x6 = bits.Add64(arg1[2], arg2[2], uint64(fiatScalarUint1(x4)))
+ var x7 uint64
+ var x8 uint64
+ x7, x8 = bits.Add64(arg1[3], arg2[3], uint64(fiatScalarUint1(x6)))
+ var x9 uint64
+ var x10 uint64
+ x9, x10 = bits.Sub64(x1, 0x5812631a5cf5d3ed, uint64(0x0))
+ var x11 uint64
+ var x12 uint64
+ x11, x12 = bits.Sub64(x3, 0x14def9dea2f79cd6, uint64(fiatScalarUint1(x10)))
+ var x13 uint64
+ var x14 uint64
+ x13, x14 = bits.Sub64(x5, uint64(0x0), uint64(fiatScalarUint1(x12)))
+ var x15 uint64
+ var x16 uint64
+ x15, x16 = bits.Sub64(x7, 0x1000000000000000, uint64(fiatScalarUint1(x14)))
+ var x18 uint64
+ _, x18 = bits.Sub64(uint64(fiatScalarUint1(x8)), uint64(0x0), uint64(fiatScalarUint1(x16)))
+ var x19 uint64
+ fiatScalarCmovznzU64(&x19, fiatScalarUint1(x18), x9, x1)
+ var x20 uint64
+ fiatScalarCmovznzU64(&x20, fiatScalarUint1(x18), x11, x3)
+ var x21 uint64
+ fiatScalarCmovznzU64(&x21, fiatScalarUint1(x18), x13, x5)
+ var x22 uint64
+ fiatScalarCmovznzU64(&x22, fiatScalarUint1(x18), x15, x7)
+ out1[0] = x19
+ out1[1] = x20
+ out1[2] = x21
+ out1[3] = x22
+}
+
+// fiatScalarSub subtracts two field elements in the Montgomery domain.
+//
+// Preconditions:
+//
+// 0 ≤ eval arg1 < m
+// 0 ≤ eval arg2 < m
+//
+// Postconditions:
+//
+// eval (from_montgomery out1) mod m = (eval (from_montgomery arg1) - eval (from_montgomery arg2)) mod m
+// 0 ≤ eval out1 < m
+func fiatScalarSub(out1 *fiatScalarMontgomeryDomainFieldElement, arg1 *fiatScalarMontgomeryDomainFieldElement, arg2 *fiatScalarMontgomeryDomainFieldElement) {
+ var x1 uint64
+ var x2 uint64
+ x1, x2 = bits.Sub64(arg1[0], arg2[0], uint64(0x0))
+ var x3 uint64
+ var x4 uint64
+ x3, x4 = bits.Sub64(arg1[1], arg2[1], uint64(fiatScalarUint1(x2)))
+ var x5 uint64
+ var x6 uint64
+ x5, x6 = bits.Sub64(arg1[2], arg2[2], uint64(fiatScalarUint1(x4)))
+ var x7 uint64
+ var x8 uint64
+ x7, x8 = bits.Sub64(arg1[3], arg2[3], uint64(fiatScalarUint1(x6)))
+ var x9 uint64
+ fiatScalarCmovznzU64(&x9, fiatScalarUint1(x8), uint64(0x0), 0xffffffffffffffff)
+ var x10 uint64
+ var x11 uint64
+ x10, x11 = bits.Add64(x1, (x9 & 0x5812631a5cf5d3ed), uint64(0x0))
+ var x12 uint64
+ var x13 uint64
+ x12, x13 = bits.Add64(x3, (x9 & 0x14def9dea2f79cd6), uint64(fiatScalarUint1(x11)))
+ var x14 uint64
+ var x15 uint64
+ x14, x15 = bits.Add64(x5, uint64(0x0), uint64(fiatScalarUint1(x13)))
+ var x16 uint64
+ x16, _ = bits.Add64(x7, (x9 & 0x1000000000000000), uint64(fiatScalarUint1(x15)))
+ out1[0] = x10
+ out1[1] = x12
+ out1[2] = x14
+ out1[3] = x16
+}
+
+// fiatScalarOpp negates a field element in the Montgomery domain.
+//
+// Preconditions:
+//
+// 0 ≤ eval arg1 < m
+//
+// Postconditions:
+//
+// eval (from_montgomery out1) mod m = -eval (from_montgomery arg1) mod m
+// 0 ≤ eval out1 < m
+func fiatScalarOpp(out1 *fiatScalarMontgomeryDomainFieldElement, arg1 *fiatScalarMontgomeryDomainFieldElement) {
+ var x1 uint64
+ var x2 uint64
+ x1, x2 = bits.Sub64(uint64(0x0), arg1[0], uint64(0x0))
+ var x3 uint64
+ var x4 uint64
+ x3, x4 = bits.Sub64(uint64(0x0), arg1[1], uint64(fiatScalarUint1(x2)))
+ var x5 uint64
+ var x6 uint64
+ x5, x6 = bits.Sub64(uint64(0x0), arg1[2], uint64(fiatScalarUint1(x4)))
+ var x7 uint64
+ var x8 uint64
+ x7, x8 = bits.Sub64(uint64(0x0), arg1[3], uint64(fiatScalarUint1(x6)))
+ var x9 uint64
+ fiatScalarCmovznzU64(&x9, fiatScalarUint1(x8), uint64(0x0), 0xffffffffffffffff)
+ var x10 uint64
+ var x11 uint64
+ x10, x11 = bits.Add64(x1, (x9 & 0x5812631a5cf5d3ed), uint64(0x0))
+ var x12 uint64
+ var x13 uint64
+ x12, x13 = bits.Add64(x3, (x9 & 0x14def9dea2f79cd6), uint64(fiatScalarUint1(x11)))
+ var x14 uint64
+ var x15 uint64
+ x14, x15 = bits.Add64(x5, uint64(0x0), uint64(fiatScalarUint1(x13)))
+ var x16 uint64
+ x16, _ = bits.Add64(x7, (x9 & 0x1000000000000000), uint64(fiatScalarUint1(x15)))
+ out1[0] = x10
+ out1[1] = x12
+ out1[2] = x14
+ out1[3] = x16
+}
+
+// fiatScalarNonzero outputs a single non-zero word if the input is non-zero and zero otherwise.
+//
+// Preconditions:
+//
+// 0 ≤ eval arg1 < m
+//
+// Postconditions:
+//
+// out1 = 0 ↔ eval (from_montgomery arg1) mod m = 0
+//
+// Input Bounds:
+//
+// arg1: [[0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff]]
+//
+// Output Bounds:
+//
+// out1: [0x0 ~> 0xffffffffffffffff]
+func fiatScalarNonzero(out1 *uint64, arg1 *[4]uint64) {
+ x1 := (arg1[0] | (arg1[1] | (arg1[2] | arg1[3])))
+ *out1 = x1
+}
+
+// fiatScalarFromMontgomery translates a field element out of the Montgomery domain.
+//
+// Preconditions:
+//
+// 0 ≤ eval arg1 < m
+//
+// Postconditions:
+//
+// eval out1 mod m = (eval arg1 * ((2^64)⁻¹ mod m)^4) mod m
+// 0 ≤ eval out1 < m
+func fiatScalarFromMontgomery(out1 *fiatScalarNonMontgomeryDomainFieldElement, arg1 *fiatScalarMontgomeryDomainFieldElement) {
+ x1 := arg1[0]
+ var x2 uint64
+ _, x2 = bits.Mul64(x1, 0xd2b51da312547e1b)
+ var x4 uint64
+ var x5 uint64
+ x5, x4 = bits.Mul64(x2, 0x1000000000000000)
+ var x6 uint64
+ var x7 uint64
+ x7, x6 = bits.Mul64(x2, 0x14def9dea2f79cd6)
+ var x8 uint64
+ var x9 uint64
+ x9, x8 = bits.Mul64(x2, 0x5812631a5cf5d3ed)
+ var x10 uint64
+ var x11 uint64
+ x10, x11 = bits.Add64(x9, x6, uint64(0x0))
+ var x13 uint64
+ _, x13 = bits.Add64(x1, x8, uint64(0x0))
+ var x14 uint64
+ var x15 uint64
+ x14, x15 = bits.Add64(uint64(0x0), x10, uint64(fiatScalarUint1(x13)))
+ var x16 uint64
+ var x17 uint64
+ x16, x17 = bits.Add64(x14, arg1[1], uint64(0x0))
+ var x18 uint64
+ _, x18 = bits.Mul64(x16, 0xd2b51da312547e1b)
+ var x20 uint64
+ var x21 uint64
+ x21, x20 = bits.Mul64(x18, 0x1000000000000000)
+ var x22 uint64
+ var x23 uint64
+ x23, x22 = bits.Mul64(x18, 0x14def9dea2f79cd6)
+ var x24 uint64
+ var x25 uint64
+ x25, x24 = bits.Mul64(x18, 0x5812631a5cf5d3ed)
+ var x26 uint64
+ var x27 uint64
+ x26, x27 = bits.Add64(x25, x22, uint64(0x0))
+ var x29 uint64
+ _, x29 = bits.Add64(x16, x24, uint64(0x0))
+ var x30 uint64
+ var x31 uint64
+ x30, x31 = bits.Add64((uint64(fiatScalarUint1(x17)) + (uint64(fiatScalarUint1(x15)) + (uint64(fiatScalarUint1(x11)) + x7))), x26, uint64(fiatScalarUint1(x29)))
+ var x32 uint64
+ var x33 uint64
+ x32, x33 = bits.Add64(x4, (uint64(fiatScalarUint1(x27)) + x23), uint64(fiatScalarUint1(x31)))
+ var x34 uint64
+ var x35 uint64
+ x34, x35 = bits.Add64(x5, x20, uint64(fiatScalarUint1(x33)))
+ var x36 uint64
+ var x37 uint64
+ x36, x37 = bits.Add64(x30, arg1[2], uint64(0x0))
+ var x38 uint64
+ var x39 uint64
+ x38, x39 = bits.Add64(x32, uint64(0x0), uint64(fiatScalarUint1(x37)))
+ var x40 uint64
+ var x41 uint64
+ x40, x41 = bits.Add64(x34, uint64(0x0), uint64(fiatScalarUint1(x39)))
+ var x42 uint64
+ _, x42 = bits.Mul64(x36, 0xd2b51da312547e1b)
+ var x44 uint64
+ var x45 uint64
+ x45, x44 = bits.Mul64(x42, 0x1000000000000000)
+ var x46 uint64
+ var x47 uint64
+ x47, x46 = bits.Mul64(x42, 0x14def9dea2f79cd6)
+ var x48 uint64
+ var x49 uint64
+ x49, x48 = bits.Mul64(x42, 0x5812631a5cf5d3ed)
+ var x50 uint64
+ var x51 uint64
+ x50, x51 = bits.Add64(x49, x46, uint64(0x0))
+ var x53 uint64
+ _, x53 = bits.Add64(x36, x48, uint64(0x0))
+ var x54 uint64
+ var x55 uint64
+ x54, x55 = bits.Add64(x38, x50, uint64(fiatScalarUint1(x53)))
+ var x56 uint64
+ var x57 uint64
+ x56, x57 = bits.Add64(x40, (uint64(fiatScalarUint1(x51)) + x47), uint64(fiatScalarUint1(x55)))
+ var x58 uint64
+ var x59 uint64
+ x58, x59 = bits.Add64((uint64(fiatScalarUint1(x41)) + (uint64(fiatScalarUint1(x35)) + x21)), x44, uint64(fiatScalarUint1(x57)))
+ var x60 uint64
+ var x61 uint64
+ x60, x61 = bits.Add64(x54, arg1[3], uint64(0x0))
+ var x62 uint64
+ var x63 uint64
+ x62, x63 = bits.Add64(x56, uint64(0x0), uint64(fiatScalarUint1(x61)))
+ var x64 uint64
+ var x65 uint64
+ x64, x65 = bits.Add64(x58, uint64(0x0), uint64(fiatScalarUint1(x63)))
+ var x66 uint64
+ _, x66 = bits.Mul64(x60, 0xd2b51da312547e1b)
+ var x68 uint64
+ var x69 uint64
+ x69, x68 = bits.Mul64(x66, 0x1000000000000000)
+ var x70 uint64
+ var x71 uint64
+ x71, x70 = bits.Mul64(x66, 0x14def9dea2f79cd6)
+ var x72 uint64
+ var x73 uint64
+ x73, x72 = bits.Mul64(x66, 0x5812631a5cf5d3ed)
+ var x74 uint64
+ var x75 uint64
+ x74, x75 = bits.Add64(x73, x70, uint64(0x0))
+ var x77 uint64
+ _, x77 = bits.Add64(x60, x72, uint64(0x0))
+ var x78 uint64
+ var x79 uint64
+ x78, x79 = bits.Add64(x62, x74, uint64(fiatScalarUint1(x77)))
+ var x80 uint64
+ var x81 uint64
+ x80, x81 = bits.Add64(x64, (uint64(fiatScalarUint1(x75)) + x71), uint64(fiatScalarUint1(x79)))
+ var x82 uint64
+ var x83 uint64
+ x82, x83 = bits.Add64((uint64(fiatScalarUint1(x65)) + (uint64(fiatScalarUint1(x59)) + x45)), x68, uint64(fiatScalarUint1(x81)))
+ x84 := (uint64(fiatScalarUint1(x83)) + x69)
+ var x85 uint64
+ var x86 uint64
+ x85, x86 = bits.Sub64(x78, 0x5812631a5cf5d3ed, uint64(0x0))
+ var x87 uint64
+ var x88 uint64
+ x87, x88 = bits.Sub64(x80, 0x14def9dea2f79cd6, uint64(fiatScalarUint1(x86)))
+ var x89 uint64
+ var x90 uint64
+ x89, x90 = bits.Sub64(x82, uint64(0x0), uint64(fiatScalarUint1(x88)))
+ var x91 uint64
+ var x92 uint64
+ x91, x92 = bits.Sub64(x84, 0x1000000000000000, uint64(fiatScalarUint1(x90)))
+ var x94 uint64
+ _, x94 = bits.Sub64(uint64(0x0), uint64(0x0), uint64(fiatScalarUint1(x92)))
+ var x95 uint64
+ fiatScalarCmovznzU64(&x95, fiatScalarUint1(x94), x85, x78)
+ var x96 uint64
+ fiatScalarCmovznzU64(&x96, fiatScalarUint1(x94), x87, x80)
+ var x97 uint64
+ fiatScalarCmovznzU64(&x97, fiatScalarUint1(x94), x89, x82)
+ var x98 uint64
+ fiatScalarCmovznzU64(&x98, fiatScalarUint1(x94), x91, x84)
+ out1[0] = x95
+ out1[1] = x96
+ out1[2] = x97
+ out1[3] = x98
+}
+
+// fiatScalarToMontgomery translates a field element into the Montgomery domain.
+//
+// Preconditions:
+//
+// 0 ≤ eval arg1 < m
+//
+// Postconditions:
+//
+// eval (from_montgomery out1) mod m = eval arg1 mod m
+// 0 ≤ eval out1 < m
+func fiatScalarToMontgomery(out1 *fiatScalarMontgomeryDomainFieldElement, arg1 *fiatScalarNonMontgomeryDomainFieldElement) {
+ x1 := arg1[1]
+ x2 := arg1[2]
+ x3 := arg1[3]
+ x4 := arg1[0]
+ var x5 uint64
+ var x6 uint64
+ x6, x5 = bits.Mul64(x4, 0x399411b7c309a3d)
+ var x7 uint64
+ var x8 uint64
+ x8, x7 = bits.Mul64(x4, 0xceec73d217f5be65)
+ var x9 uint64
+ var x10 uint64
+ x10, x9 = bits.Mul64(x4, 0xd00e1ba768859347)
+ var x11 uint64
+ var x12 uint64
+ x12, x11 = bits.Mul64(x4, 0xa40611e3449c0f01)
+ var x13 uint64
+ var x14 uint64
+ x13, x14 = bits.Add64(x12, x9, uint64(0x0))
+ var x15 uint64
+ var x16 uint64
+ x15, x16 = bits.Add64(x10, x7, uint64(fiatScalarUint1(x14)))
+ var x17 uint64
+ var x18 uint64
+ x17, x18 = bits.Add64(x8, x5, uint64(fiatScalarUint1(x16)))
+ var x19 uint64
+ _, x19 = bits.Mul64(x11, 0xd2b51da312547e1b)
+ var x21 uint64
+ var x22 uint64
+ x22, x21 = bits.Mul64(x19, 0x1000000000000000)
+ var x23 uint64
+ var x24 uint64
+ x24, x23 = bits.Mul64(x19, 0x14def9dea2f79cd6)
+ var x25 uint64
+ var x26 uint64
+ x26, x25 = bits.Mul64(x19, 0x5812631a5cf5d3ed)
+ var x27 uint64
+ var x28 uint64
+ x27, x28 = bits.Add64(x26, x23, uint64(0x0))
+ var x30 uint64
+ _, x30 = bits.Add64(x11, x25, uint64(0x0))
+ var x31 uint64
+ var x32 uint64
+ x31, x32 = bits.Add64(x13, x27, uint64(fiatScalarUint1(x30)))
+ var x33 uint64
+ var x34 uint64
+ x33, x34 = bits.Add64(x15, (uint64(fiatScalarUint1(x28)) + x24), uint64(fiatScalarUint1(x32)))
+ var x35 uint64
+ var x36 uint64
+ x35, x36 = bits.Add64(x17, x21, uint64(fiatScalarUint1(x34)))
+ var x37 uint64
+ var x38 uint64
+ x38, x37 = bits.Mul64(x1, 0x399411b7c309a3d)
+ var x39 uint64
+ var x40 uint64
+ x40, x39 = bits.Mul64(x1, 0xceec73d217f5be65)
+ var x41 uint64
+ var x42 uint64
+ x42, x41 = bits.Mul64(x1, 0xd00e1ba768859347)
+ var x43 uint64
+ var x44 uint64
+ x44, x43 = bits.Mul64(x1, 0xa40611e3449c0f01)
+ var x45 uint64
+ var x46 uint64
+ x45, x46 = bits.Add64(x44, x41, uint64(0x0))
+ var x47 uint64
+ var x48 uint64
+ x47, x48 = bits.Add64(x42, x39, uint64(fiatScalarUint1(x46)))
+ var x49 uint64
+ var x50 uint64
+ x49, x50 = bits.Add64(x40, x37, uint64(fiatScalarUint1(x48)))
+ var x51 uint64
+ var x52 uint64
+ x51, x52 = bits.Add64(x31, x43, uint64(0x0))
+ var x53 uint64
+ var x54 uint64
+ x53, x54 = bits.Add64(x33, x45, uint64(fiatScalarUint1(x52)))
+ var x55 uint64
+ var x56 uint64
+ x55, x56 = bits.Add64(x35, x47, uint64(fiatScalarUint1(x54)))
+ var x57 uint64
+ var x58 uint64
+ x57, x58 = bits.Add64(((uint64(fiatScalarUint1(x36)) + (uint64(fiatScalarUint1(x18)) + x6)) + x22), x49, uint64(fiatScalarUint1(x56)))
+ var x59 uint64
+ _, x59 = bits.Mul64(x51, 0xd2b51da312547e1b)
+ var x61 uint64
+ var x62 uint64
+ x62, x61 = bits.Mul64(x59, 0x1000000000000000)
+ var x63 uint64
+ var x64 uint64
+ x64, x63 = bits.Mul64(x59, 0x14def9dea2f79cd6)
+ var x65 uint64
+ var x66 uint64
+ x66, x65 = bits.Mul64(x59, 0x5812631a5cf5d3ed)
+ var x67 uint64
+ var x68 uint64
+ x67, x68 = bits.Add64(x66, x63, uint64(0x0))
+ var x70 uint64
+ _, x70 = bits.Add64(x51, x65, uint64(0x0))
+ var x71 uint64
+ var x72 uint64
+ x71, x72 = bits.Add64(x53, x67, uint64(fiatScalarUint1(x70)))
+ var x73 uint64
+ var x74 uint64
+ x73, x74 = bits.Add64(x55, (uint64(fiatScalarUint1(x68)) + x64), uint64(fiatScalarUint1(x72)))
+ var x75 uint64
+ var x76 uint64
+ x75, x76 = bits.Add64(x57, x61, uint64(fiatScalarUint1(x74)))
+ var x77 uint64
+ var x78 uint64
+ x78, x77 = bits.Mul64(x2, 0x399411b7c309a3d)
+ var x79 uint64
+ var x80 uint64
+ x80, x79 = bits.Mul64(x2, 0xceec73d217f5be65)
+ var x81 uint64
+ var x82 uint64
+ x82, x81 = bits.Mul64(x2, 0xd00e1ba768859347)
+ var x83 uint64
+ var x84 uint64
+ x84, x83 = bits.Mul64(x2, 0xa40611e3449c0f01)
+ var x85 uint64
+ var x86 uint64
+ x85, x86 = bits.Add64(x84, x81, uint64(0x0))
+ var x87 uint64
+ var x88 uint64
+ x87, x88 = bits.Add64(x82, x79, uint64(fiatScalarUint1(x86)))
+ var x89 uint64
+ var x90 uint64
+ x89, x90 = bits.Add64(x80, x77, uint64(fiatScalarUint1(x88)))
+ var x91 uint64
+ var x92 uint64
+ x91, x92 = bits.Add64(x71, x83, uint64(0x0))
+ var x93 uint64
+ var x94 uint64
+ x93, x94 = bits.Add64(x73, x85, uint64(fiatScalarUint1(x92)))
+ var x95 uint64
+ var x96 uint64
+ x95, x96 = bits.Add64(x75, x87, uint64(fiatScalarUint1(x94)))
+ var x97 uint64
+ var x98 uint64
+ x97, x98 = bits.Add64(((uint64(fiatScalarUint1(x76)) + (uint64(fiatScalarUint1(x58)) + (uint64(fiatScalarUint1(x50)) + x38))) + x62), x89, uint64(fiatScalarUint1(x96)))
+ var x99 uint64
+ _, x99 = bits.Mul64(x91, 0xd2b51da312547e1b)
+ var x101 uint64
+ var x102 uint64
+ x102, x101 = bits.Mul64(x99, 0x1000000000000000)
+ var x103 uint64
+ var x104 uint64
+ x104, x103 = bits.Mul64(x99, 0x14def9dea2f79cd6)
+ var x105 uint64
+ var x106 uint64
+ x106, x105 = bits.Mul64(x99, 0x5812631a5cf5d3ed)
+ var x107 uint64
+ var x108 uint64
+ x107, x108 = bits.Add64(x106, x103, uint64(0x0))
+ var x110 uint64
+ _, x110 = bits.Add64(x91, x105, uint64(0x0))
+ var x111 uint64
+ var x112 uint64
+ x111, x112 = bits.Add64(x93, x107, uint64(fiatScalarUint1(x110)))
+ var x113 uint64
+ var x114 uint64
+ x113, x114 = bits.Add64(x95, (uint64(fiatScalarUint1(x108)) + x104), uint64(fiatScalarUint1(x112)))
+ var x115 uint64
+ var x116 uint64
+ x115, x116 = bits.Add64(x97, x101, uint64(fiatScalarUint1(x114)))
+ var x117 uint64
+ var x118 uint64
+ x118, x117 = bits.Mul64(x3, 0x399411b7c309a3d)
+ var x119 uint64
+ var x120 uint64
+ x120, x119 = bits.Mul64(x3, 0xceec73d217f5be65)
+ var x121 uint64
+ var x122 uint64
+ x122, x121 = bits.Mul64(x3, 0xd00e1ba768859347)
+ var x123 uint64
+ var x124 uint64
+ x124, x123 = bits.Mul64(x3, 0xa40611e3449c0f01)
+ var x125 uint64
+ var x126 uint64
+ x125, x126 = bits.Add64(x124, x121, uint64(0x0))
+ var x127 uint64
+ var x128 uint64
+ x127, x128 = bits.Add64(x122, x119, uint64(fiatScalarUint1(x126)))
+ var x129 uint64
+ var x130 uint64
+ x129, x130 = bits.Add64(x120, x117, uint64(fiatScalarUint1(x128)))
+ var x131 uint64
+ var x132 uint64
+ x131, x132 = bits.Add64(x111, x123, uint64(0x0))
+ var x133 uint64
+ var x134 uint64
+ x133, x134 = bits.Add64(x113, x125, uint64(fiatScalarUint1(x132)))
+ var x135 uint64
+ var x136 uint64
+ x135, x136 = bits.Add64(x115, x127, uint64(fiatScalarUint1(x134)))
+ var x137 uint64
+ var x138 uint64
+ x137, x138 = bits.Add64(((uint64(fiatScalarUint1(x116)) + (uint64(fiatScalarUint1(x98)) + (uint64(fiatScalarUint1(x90)) + x78))) + x102), x129, uint64(fiatScalarUint1(x136)))
+ var x139 uint64
+ _, x139 = bits.Mul64(x131, 0xd2b51da312547e1b)
+ var x141 uint64
+ var x142 uint64
+ x142, x141 = bits.Mul64(x139, 0x1000000000000000)
+ var x143 uint64
+ var x144 uint64
+ x144, x143 = bits.Mul64(x139, 0x14def9dea2f79cd6)
+ var x145 uint64
+ var x146 uint64
+ x146, x145 = bits.Mul64(x139, 0x5812631a5cf5d3ed)
+ var x147 uint64
+ var x148 uint64
+ x147, x148 = bits.Add64(x146, x143, uint64(0x0))
+ var x150 uint64
+ _, x150 = bits.Add64(x131, x145, uint64(0x0))
+ var x151 uint64
+ var x152 uint64
+ x151, x152 = bits.Add64(x133, x147, uint64(fiatScalarUint1(x150)))
+ var x153 uint64
+ var x154 uint64
+ x153, x154 = bits.Add64(x135, (uint64(fiatScalarUint1(x148)) + x144), uint64(fiatScalarUint1(x152)))
+ var x155 uint64
+ var x156 uint64
+ x155, x156 = bits.Add64(x137, x141, uint64(fiatScalarUint1(x154)))
+ x157 := ((uint64(fiatScalarUint1(x156)) + (uint64(fiatScalarUint1(x138)) + (uint64(fiatScalarUint1(x130)) + x118))) + x142)
+ var x158 uint64
+ var x159 uint64
+ x158, x159 = bits.Sub64(x151, 0x5812631a5cf5d3ed, uint64(0x0))
+ var x160 uint64
+ var x161 uint64
+ x160, x161 = bits.Sub64(x153, 0x14def9dea2f79cd6, uint64(fiatScalarUint1(x159)))
+ var x162 uint64
+ var x163 uint64
+ x162, x163 = bits.Sub64(x155, uint64(0x0), uint64(fiatScalarUint1(x161)))
+ var x164 uint64
+ var x165 uint64
+ x164, x165 = bits.Sub64(x157, 0x1000000000000000, uint64(fiatScalarUint1(x163)))
+ var x167 uint64
+ _, x167 = bits.Sub64(uint64(0x0), uint64(0x0), uint64(fiatScalarUint1(x165)))
+ var x168 uint64
+ fiatScalarCmovznzU64(&x168, fiatScalarUint1(x167), x158, x151)
+ var x169 uint64
+ fiatScalarCmovznzU64(&x169, fiatScalarUint1(x167), x160, x153)
+ var x170 uint64
+ fiatScalarCmovznzU64(&x170, fiatScalarUint1(x167), x162, x155)
+ var x171 uint64
+ fiatScalarCmovznzU64(&x171, fiatScalarUint1(x167), x164, x157)
+ out1[0] = x168
+ out1[1] = x169
+ out1[2] = x170
+ out1[3] = x171
+}
+
+// fiatScalarToBytes serializes a field element NOT in the Montgomery domain to bytes in little-endian order.
+//
+// Preconditions:
+//
+// 0 ≤ eval arg1 < m
+//
+// Postconditions:
+//
+// out1 = map (λ x, ⌊((eval arg1 mod m) mod 2^(8 * (x + 1))) / 2^(8 * x)⌋) [0..31]
+//
+// Input Bounds:
+//
+// arg1: [[0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0x1fffffffffffffff]]
+//
+// Output Bounds:
+//
+// out1: [[0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0x1f]]
+func fiatScalarToBytes(out1 *[32]uint8, arg1 *[4]uint64) {
+ x1 := arg1[3]
+ x2 := arg1[2]
+ x3 := arg1[1]
+ x4 := arg1[0]
+ x5 := (uint8(x4) & 0xff)
+ x6 := (x4 >> 8)
+ x7 := (uint8(x6) & 0xff)
+ x8 := (x6 >> 8)
+ x9 := (uint8(x8) & 0xff)
+ x10 := (x8 >> 8)
+ x11 := (uint8(x10) & 0xff)
+ x12 := (x10 >> 8)
+ x13 := (uint8(x12) & 0xff)
+ x14 := (x12 >> 8)
+ x15 := (uint8(x14) & 0xff)
+ x16 := (x14 >> 8)
+ x17 := (uint8(x16) & 0xff)
+ x18 := uint8((x16 >> 8))
+ x19 := (uint8(x3) & 0xff)
+ x20 := (x3 >> 8)
+ x21 := (uint8(x20) & 0xff)
+ x22 := (x20 >> 8)
+ x23 := (uint8(x22) & 0xff)
+ x24 := (x22 >> 8)
+ x25 := (uint8(x24) & 0xff)
+ x26 := (x24 >> 8)
+ x27 := (uint8(x26) & 0xff)
+ x28 := (x26 >> 8)
+ x29 := (uint8(x28) & 0xff)
+ x30 := (x28 >> 8)
+ x31 := (uint8(x30) & 0xff)
+ x32 := uint8((x30 >> 8))
+ x33 := (uint8(x2) & 0xff)
+ x34 := (x2 >> 8)
+ x35 := (uint8(x34) & 0xff)
+ x36 := (x34 >> 8)
+ x37 := (uint8(x36) & 0xff)
+ x38 := (x36 >> 8)
+ x39 := (uint8(x38) & 0xff)
+ x40 := (x38 >> 8)
+ x41 := (uint8(x40) & 0xff)
+ x42 := (x40 >> 8)
+ x43 := (uint8(x42) & 0xff)
+ x44 := (x42 >> 8)
+ x45 := (uint8(x44) & 0xff)
+ x46 := uint8((x44 >> 8))
+ x47 := (uint8(x1) & 0xff)
+ x48 := (x1 >> 8)
+ x49 := (uint8(x48) & 0xff)
+ x50 := (x48 >> 8)
+ x51 := (uint8(x50) & 0xff)
+ x52 := (x50 >> 8)
+ x53 := (uint8(x52) & 0xff)
+ x54 := (x52 >> 8)
+ x55 := (uint8(x54) & 0xff)
+ x56 := (x54 >> 8)
+ x57 := (uint8(x56) & 0xff)
+ x58 := (x56 >> 8)
+ x59 := (uint8(x58) & 0xff)
+ x60 := uint8((x58 >> 8))
+ out1[0] = x5
+ out1[1] = x7
+ out1[2] = x9
+ out1[3] = x11
+ out1[4] = x13
+ out1[5] = x15
+ out1[6] = x17
+ out1[7] = x18
+ out1[8] = x19
+ out1[9] = x21
+ out1[10] = x23
+ out1[11] = x25
+ out1[12] = x27
+ out1[13] = x29
+ out1[14] = x31
+ out1[15] = x32
+ out1[16] = x33
+ out1[17] = x35
+ out1[18] = x37
+ out1[19] = x39
+ out1[20] = x41
+ out1[21] = x43
+ out1[22] = x45
+ out1[23] = x46
+ out1[24] = x47
+ out1[25] = x49
+ out1[26] = x51
+ out1[27] = x53
+ out1[28] = x55
+ out1[29] = x57
+ out1[30] = x59
+ out1[31] = x60
+}
+
+// fiatScalarFromBytes deserializes a field element NOT in the Montgomery domain from bytes in little-endian order.
+//
+// Preconditions:
+//
+// 0 ≤ bytes_eval arg1 < m
+//
+// Postconditions:
+//
+// eval out1 mod m = bytes_eval arg1 mod m
+// 0 ≤ eval out1 < m
+//
+// Input Bounds:
+//
+// arg1: [[0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0x1f]]
+//
+// Output Bounds:
+//
+// out1: [[0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0x1fffffffffffffff]]
+func fiatScalarFromBytes(out1 *[4]uint64, arg1 *[32]uint8) {
+ x1 := (uint64(arg1[31]) << 56)
+ x2 := (uint64(arg1[30]) << 48)
+ x3 := (uint64(arg1[29]) << 40)
+ x4 := (uint64(arg1[28]) << 32)
+ x5 := (uint64(arg1[27]) << 24)
+ x6 := (uint64(arg1[26]) << 16)
+ x7 := (uint64(arg1[25]) << 8)
+ x8 := arg1[24]
+ x9 := (uint64(arg1[23]) << 56)
+ x10 := (uint64(arg1[22]) << 48)
+ x11 := (uint64(arg1[21]) << 40)
+ x12 := (uint64(arg1[20]) << 32)
+ x13 := (uint64(arg1[19]) << 24)
+ x14 := (uint64(arg1[18]) << 16)
+ x15 := (uint64(arg1[17]) << 8)
+ x16 := arg1[16]
+ x17 := (uint64(arg1[15]) << 56)
+ x18 := (uint64(arg1[14]) << 48)
+ x19 := (uint64(arg1[13]) << 40)
+ x20 := (uint64(arg1[12]) << 32)
+ x21 := (uint64(arg1[11]) << 24)
+ x22 := (uint64(arg1[10]) << 16)
+ x23 := (uint64(arg1[9]) << 8)
+ x24 := arg1[8]
+ x25 := (uint64(arg1[7]) << 56)
+ x26 := (uint64(arg1[6]) << 48)
+ x27 := (uint64(arg1[5]) << 40)
+ x28 := (uint64(arg1[4]) << 32)
+ x29 := (uint64(arg1[3]) << 24)
+ x30 := (uint64(arg1[2]) << 16)
+ x31 := (uint64(arg1[1]) << 8)
+ x32 := arg1[0]
+ x33 := (x31 + uint64(x32))
+ x34 := (x30 + x33)
+ x35 := (x29 + x34)
+ x36 := (x28 + x35)
+ x37 := (x27 + x36)
+ x38 := (x26 + x37)
+ x39 := (x25 + x38)
+ x40 := (x23 + uint64(x24))
+ x41 := (x22 + x40)
+ x42 := (x21 + x41)
+ x43 := (x20 + x42)
+ x44 := (x19 + x43)
+ x45 := (x18 + x44)
+ x46 := (x17 + x45)
+ x47 := (x15 + uint64(x16))
+ x48 := (x14 + x47)
+ x49 := (x13 + x48)
+ x50 := (x12 + x49)
+ x51 := (x11 + x50)
+ x52 := (x10 + x51)
+ x53 := (x9 + x52)
+ x54 := (x7 + uint64(x8))
+ x55 := (x6 + x54)
+ x56 := (x5 + x55)
+ x57 := (x4 + x56)
+ x58 := (x3 + x57)
+ x59 := (x2 + x58)
+ x60 := (x1 + x59)
+ out1[0] = x39
+ out1[1] = x46
+ out1[2] = x53
+ out1[3] = x60
+}
diff --git a/src/crypto/internal/edwards25519/scalar_test.go b/src/crypto/internal/edwards25519/scalar_test.go
new file mode 100644
index 0000000..67bcdaf
--- /dev/null
+++ b/src/crypto/internal/edwards25519/scalar_test.go
@@ -0,0 +1,249 @@
+// Copyright (c) 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package edwards25519
+
+import (
+ "bytes"
+ "encoding/hex"
+ "math/big"
+ mathrand "math/rand"
+ "reflect"
+ "testing"
+ "testing/quick"
+)
+
+var scOneBytes = [32]byte{1}
+var scOne, _ = new(Scalar).SetCanonicalBytes(scOneBytes[:])
+var scMinusOne, _ = new(Scalar).SetCanonicalBytes(scalarMinusOneBytes[:])
+
+// Generate returns a valid (reduced modulo l) Scalar with a distribution
+// weighted towards high, low, and edge values.
+func (Scalar) Generate(rand *mathrand.Rand, size int) reflect.Value {
+ var s [32]byte
+ diceRoll := rand.Intn(100)
+ switch {
+ case diceRoll == 0:
+ case diceRoll == 1:
+ s = scOneBytes
+ case diceRoll == 2:
+ s = scalarMinusOneBytes
+ case diceRoll < 5:
+ // Generate a low scalar in [0, 2^125).
+ rand.Read(s[:16])
+ s[15] &= (1 << 5) - 1
+ case diceRoll < 10:
+ // Generate a high scalar in [2^252, 2^252 + 2^124).
+ s[31] = 1 << 4
+ rand.Read(s[:16])
+ s[15] &= (1 << 4) - 1
+ default:
+ // Generate a valid scalar in [0, l) by returning [0, 2^252) which has a
+ // negligibly different distribution (the former has a 2^-127.6 chance
+ // of being out of the latter range).
+ rand.Read(s[:])
+ s[31] &= (1 << 4) - 1
+ }
+
+ val := Scalar{}
+ fiatScalarFromBytes((*[4]uint64)(&val.s), &s)
+ fiatScalarToMontgomery(&val.s, (*fiatScalarNonMontgomeryDomainFieldElement)(&val.s))
+
+ return reflect.ValueOf(val)
+}
+
+// quickCheckConfig1024 will make each quickcheck test run (1024 * -quickchecks)
+// times. The default value of -quickchecks is 100.
+var quickCheckConfig1024 = &quick.Config{MaxCountScale: 1 << 10}
+
+func TestScalarGenerate(t *testing.T) {
+ f := func(sc Scalar) bool {
+ return isReduced(sc.Bytes())
+ }
+ if err := quick.Check(f, quickCheckConfig1024); err != nil {
+ t.Errorf("generated unreduced scalar: %v", err)
+ }
+}
+
+func TestScalarSetCanonicalBytes(t *testing.T) {
+ f1 := func(in [32]byte, sc Scalar) bool {
+ // Mask out top 4 bits to guarantee value falls in [0, l).
+ in[len(in)-1] &= (1 << 4) - 1
+ if _, err := sc.SetCanonicalBytes(in[:]); err != nil {
+ return false
+ }
+ repr := sc.Bytes()
+ return bytes.Equal(in[:], repr) && isReduced(repr)
+ }
+ if err := quick.Check(f1, quickCheckConfig1024); err != nil {
+ t.Errorf("failed bytes->scalar->bytes round-trip: %v", err)
+ }
+
+ f2 := func(sc1, sc2 Scalar) bool {
+ if _, err := sc2.SetCanonicalBytes(sc1.Bytes()); err != nil {
+ return false
+ }
+ return sc1 == sc2
+ }
+ if err := quick.Check(f2, quickCheckConfig1024); err != nil {
+ t.Errorf("failed scalar->bytes->scalar round-trip: %v", err)
+ }
+
+ b := scalarMinusOneBytes
+ b[31] += 1
+ s := scOne
+ if out, err := s.SetCanonicalBytes(b[:]); err == nil {
+ t.Errorf("SetCanonicalBytes worked on a non-canonical value")
+ } else if s != scOne {
+ t.Errorf("SetCanonicalBytes modified its receiver")
+ } else if out != nil {
+ t.Errorf("SetCanonicalBytes did not return nil with an error")
+ }
+}
+
+func TestScalarSetUniformBytes(t *testing.T) {
+ mod, _ := new(big.Int).SetString("27742317777372353535851937790883648493", 10)
+ mod.Add(mod, new(big.Int).Lsh(big.NewInt(1), 252))
+ f := func(in [64]byte, sc Scalar) bool {
+ sc.SetUniformBytes(in[:])
+ repr := sc.Bytes()
+ if !isReduced(repr) {
+ return false
+ }
+ scBig := bigIntFromLittleEndianBytes(repr[:])
+ inBig := bigIntFromLittleEndianBytes(in[:])
+ return inBig.Mod(inBig, mod).Cmp(scBig) == 0
+ }
+ if err := quick.Check(f, quickCheckConfig1024); err != nil {
+ t.Error(err)
+ }
+}
+
+func TestScalarSetBytesWithClamping(t *testing.T) {
+ // Generated with libsodium.js 1.0.18 crypto_scalarmult_ed25519_base.
+
+ random := "633d368491364dc9cd4c1bf891b1d59460face1644813240a313e61f2c88216e"
+ s, _ := new(Scalar).SetBytesWithClamping(decodeHex(random))
+ p := new(Point).ScalarBaseMult(s)
+ want := "1d87a9026fd0126a5736fe1628c95dd419172b5b618457e041c9c861b2494a94"
+ if got := hex.EncodeToString(p.Bytes()); got != want {
+ t.Errorf("random: got %q, want %q", got, want)
+ }
+
+ zero := "0000000000000000000000000000000000000000000000000000000000000000"
+ s, _ = new(Scalar).SetBytesWithClamping(decodeHex(zero))
+ p = new(Point).ScalarBaseMult(s)
+ want = "693e47972caf527c7883ad1b39822f026f47db2ab0e1919955b8993aa04411d1"
+ if got := hex.EncodeToString(p.Bytes()); got != want {
+ t.Errorf("zero: got %q, want %q", got, want)
+ }
+
+ one := "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"
+ s, _ = new(Scalar).SetBytesWithClamping(decodeHex(one))
+ p = new(Point).ScalarBaseMult(s)
+ want = "12e9a68b73fd5aacdbcaf3e88c46fea6ebedb1aa84eed1842f07f8edab65e3a7"
+ if got := hex.EncodeToString(p.Bytes()); got != want {
+ t.Errorf("one: got %q, want %q", got, want)
+ }
+}
+
+func bigIntFromLittleEndianBytes(b []byte) *big.Int {
+ bb := make([]byte, len(b))
+ for i := range b {
+ bb[i] = b[len(b)-i-1]
+ }
+ return new(big.Int).SetBytes(bb)
+}
+
+func TestScalarMultiplyDistributesOverAdd(t *testing.T) {
+ multiplyDistributesOverAdd := func(x, y, z Scalar) bool {
+ // Compute t1 = (x+y)*z
+ var t1 Scalar
+ t1.Add(&x, &y)
+ t1.Multiply(&t1, &z)
+
+ // Compute t2 = x*z + y*z
+ var t2 Scalar
+ var t3 Scalar
+ t2.Multiply(&x, &z)
+ t3.Multiply(&y, &z)
+ t2.Add(&t2, &t3)
+
+ reprT1, reprT2 := t1.Bytes(), t2.Bytes()
+
+ return t1 == t2 && isReduced(reprT1) && isReduced(reprT2)
+ }
+
+ if err := quick.Check(multiplyDistributesOverAdd, quickCheckConfig1024); err != nil {
+ t.Error(err)
+ }
+}
+
+func TestScalarAddLikeSubNeg(t *testing.T) {
+ addLikeSubNeg := func(x, y Scalar) bool {
+ // Compute t1 = x - y
+ var t1 Scalar
+ t1.Subtract(&x, &y)
+
+ // Compute t2 = -y + x
+ var t2 Scalar
+ t2.Negate(&y)
+ t2.Add(&t2, &x)
+
+ return t1 == t2 && isReduced(t1.Bytes())
+ }
+
+ if err := quick.Check(addLikeSubNeg, quickCheckConfig1024); err != nil {
+ t.Error(err)
+ }
+}
+
+func TestScalarNonAdjacentForm(t *testing.T) {
+ s, _ := (&Scalar{}).SetCanonicalBytes([]byte{
+ 0x1a, 0x0e, 0x97, 0x8a, 0x90, 0xf6, 0x62, 0x2d,
+ 0x37, 0x47, 0x02, 0x3f, 0x8a, 0xd8, 0x26, 0x4d,
+ 0xa7, 0x58, 0xaa, 0x1b, 0x88, 0xe0, 0x40, 0xd1,
+ 0x58, 0x9e, 0x7b, 0x7f, 0x23, 0x76, 0xef, 0x09,
+ })
+
+ expectedNaf := [256]int8{
+ 0, 13, 0, 0, 0, 0, 0, 0, 0, 7, 0, 0, 0, 0, 0, 0, -9, 0, 0, 0, 0, -11, 0, 0, 0, 0, 3, 0, 0, 0, 0, 1,
+ 0, 0, 0, 0, 9, 0, 0, 0, 0, -5, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 11, 0, 0, 0, 0, 11, 0, 0, 0, 0, 0,
+ -9, 0, 0, 0, 0, 0, -3, 0, 0, 0, 0, 9, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, -1, 0, 0, 0, 0, 0, 9, 0,
+ 0, 0, 0, -15, 0, 0, 0, 0, -7, 0, 0, 0, 0, -9, 0, 0, 0, 0, 0, 5, 0, 0, 0, 0, 13, 0, 0, 0, 0, 0, -3, 0,
+ 0, 0, 0, -11, 0, 0, 0, 0, -7, 0, 0, 0, 0, -13, 0, 0, 0, 0, 11, 0, 0, 0, 0, -9, 0, 0, 0, 0, 0, 1, 0, 0,
+ 0, 0, 0, -15, 0, 0, 0, 0, 1, 0, 0, 0, 0, 7, 0, 0, 0, 0, 0, 0, 0, 0, 5, 0, 0, 0, 0, 0, 13, 0, 0, 0,
+ 0, 0, 0, 11, 0, 0, 0, 0, 0, 15, 0, 0, 0, 0, 0, -9, 0, 0, 0, 0, 0, 0, 0, -1, 0, 0, 0, 0, 0, 0, 0, 7,
+ 0, 0, 0, 0, 0, -15, 0, 0, 0, 0, 0, 15, 0, 0, 0, 0, 15, 0, 0, 0, 0, 15, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0,
+ }
+
+ sNaf := s.nonAdjacentForm(5)
+
+ for i := 0; i < 256; i++ {
+ if expectedNaf[i] != sNaf[i] {
+ t.Errorf("Wrong digit at position %d, got %d, expected %d", i, sNaf[i], expectedNaf[i])
+ }
+ }
+}
+
+type notZeroScalar Scalar
+
+func (notZeroScalar) Generate(rand *mathrand.Rand, size int) reflect.Value {
+ var s Scalar
+ var isNonZero uint64
+ for isNonZero == 0 {
+ s = Scalar{}.Generate(rand, size).Interface().(Scalar)
+ fiatScalarNonzero(&isNonZero, (*[4]uint64)(&s.s))
+ }
+ return reflect.ValueOf(notZeroScalar(s))
+}
+
+func TestScalarEqual(t *testing.T) {
+ if scOne.Equal(scMinusOne) == 1 {
+ t.Errorf("scOne.Equal(&scMinusOne) is true")
+ }
+ if scMinusOne.Equal(scMinusOne) == 0 {
+ t.Errorf("scMinusOne.Equal(&scMinusOne) is false")
+ }
+}
diff --git a/src/crypto/internal/edwards25519/scalarmult.go b/src/crypto/internal/edwards25519/scalarmult.go
new file mode 100644
index 0000000..f7ca3ce
--- /dev/null
+++ b/src/crypto/internal/edwards25519/scalarmult.go
@@ -0,0 +1,214 @@
+// Copyright (c) 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package edwards25519
+
+import "sync"
+
+// basepointTable is a set of 32 affineLookupTables, where table i is generated
+// from 256i * basepoint. It is precomputed the first time it's used.
+func basepointTable() *[32]affineLookupTable {
+ basepointTablePrecomp.initOnce.Do(func() {
+ p := NewGeneratorPoint()
+ for i := 0; i < 32; i++ {
+ basepointTablePrecomp.table[i].FromP3(p)
+ for j := 0; j < 8; j++ {
+ p.Add(p, p)
+ }
+ }
+ })
+ return &basepointTablePrecomp.table
+}
+
+var basepointTablePrecomp struct {
+ table [32]affineLookupTable
+ initOnce sync.Once
+}
+
+// ScalarBaseMult sets v = x * B, where B is the canonical generator, and
+// returns v.
+//
+// The scalar multiplication is done in constant time.
+func (v *Point) ScalarBaseMult(x *Scalar) *Point {
+ basepointTable := basepointTable()
+
+ // Write x = sum(x_i * 16^i) so x*B = sum( B*x_i*16^i )
+ // as described in the Ed25519 paper
+ //
+ // Group even and odd coefficients
+ // x*B = x_0*16^0*B + x_2*16^2*B + ... + x_62*16^62*B
+ // + x_1*16^1*B + x_3*16^3*B + ... + x_63*16^63*B
+ // x*B = x_0*16^0*B + x_2*16^2*B + ... + x_62*16^62*B
+ // + 16*( x_1*16^0*B + x_3*16^2*B + ... + x_63*16^62*B)
+ //
+ // We use a lookup table for each i to get x_i*16^(2*i)*B
+ // and do four doublings to multiply by 16.
+ digits := x.signedRadix16()
+
+ multiple := &affineCached{}
+ tmp1 := &projP1xP1{}
+ tmp2 := &projP2{}
+
+ // Accumulate the odd components first
+ v.Set(NewIdentityPoint())
+ for i := 1; i < 64; i += 2 {
+ basepointTable[i/2].SelectInto(multiple, digits[i])
+ tmp1.AddAffine(v, multiple)
+ v.fromP1xP1(tmp1)
+ }
+
+ // Multiply by 16
+ tmp2.FromP3(v) // tmp2 = v in P2 coords
+ tmp1.Double(tmp2) // tmp1 = 2*v in P1xP1 coords
+ tmp2.FromP1xP1(tmp1) // tmp2 = 2*v in P2 coords
+ tmp1.Double(tmp2) // tmp1 = 4*v in P1xP1 coords
+ tmp2.FromP1xP1(tmp1) // tmp2 = 4*v in P2 coords
+ tmp1.Double(tmp2) // tmp1 = 8*v in P1xP1 coords
+ tmp2.FromP1xP1(tmp1) // tmp2 = 8*v in P2 coords
+ tmp1.Double(tmp2) // tmp1 = 16*v in P1xP1 coords
+ v.fromP1xP1(tmp1) // now v = 16*(odd components)
+
+ // Accumulate the even components
+ for i := 0; i < 64; i += 2 {
+ basepointTable[i/2].SelectInto(multiple, digits[i])
+ tmp1.AddAffine(v, multiple)
+ v.fromP1xP1(tmp1)
+ }
+
+ return v
+}
+
+// ScalarMult sets v = x * q, and returns v.
+//
+// The scalar multiplication is done in constant time.
+func (v *Point) ScalarMult(x *Scalar, q *Point) *Point {
+ checkInitialized(q)
+
+ var table projLookupTable
+ table.FromP3(q)
+
+ // Write x = sum(x_i * 16^i)
+ // so x*Q = sum( Q*x_i*16^i )
+ // = Q*x_0 + 16*(Q*x_1 + 16*( ... + Q*x_63) ... )
+ // <------compute inside out---------
+ //
+ // We use the lookup table to get the x_i*Q values
+ // and do four doublings to compute 16*Q
+ digits := x.signedRadix16()
+
+ // Unwrap first loop iteration to save computing 16*identity
+ multiple := &projCached{}
+ tmp1 := &projP1xP1{}
+ tmp2 := &projP2{}
+ table.SelectInto(multiple, digits[63])
+
+ v.Set(NewIdentityPoint())
+ tmp1.Add(v, multiple) // tmp1 = x_63*Q in P1xP1 coords
+ for i := 62; i >= 0; i-- {
+ tmp2.FromP1xP1(tmp1) // tmp2 = (prev) in P2 coords
+ tmp1.Double(tmp2) // tmp1 = 2*(prev) in P1xP1 coords
+ tmp2.FromP1xP1(tmp1) // tmp2 = 2*(prev) in P2 coords
+ tmp1.Double(tmp2) // tmp1 = 4*(prev) in P1xP1 coords
+ tmp2.FromP1xP1(tmp1) // tmp2 = 4*(prev) in P2 coords
+ tmp1.Double(tmp2) // tmp1 = 8*(prev) in P1xP1 coords
+ tmp2.FromP1xP1(tmp1) // tmp2 = 8*(prev) in P2 coords
+ tmp1.Double(tmp2) // tmp1 = 16*(prev) in P1xP1 coords
+ v.fromP1xP1(tmp1) // v = 16*(prev) in P3 coords
+ table.SelectInto(multiple, digits[i])
+ tmp1.Add(v, multiple) // tmp1 = x_i*Q + 16*(prev) in P1xP1 coords
+ }
+ v.fromP1xP1(tmp1)
+ return v
+}
+
+// basepointNafTable is the nafLookupTable8 for the basepoint.
+// It is precomputed the first time it's used.
+func basepointNafTable() *nafLookupTable8 {
+ basepointNafTablePrecomp.initOnce.Do(func() {
+ basepointNafTablePrecomp.table.FromP3(NewGeneratorPoint())
+ })
+ return &basepointNafTablePrecomp.table
+}
+
+var basepointNafTablePrecomp struct {
+ table nafLookupTable8
+ initOnce sync.Once
+}
+
+// VarTimeDoubleScalarBaseMult sets v = a * A + b * B, where B is the canonical
+// generator, and returns v.
+//
+// Execution time depends on the inputs.
+func (v *Point) VarTimeDoubleScalarBaseMult(a *Scalar, A *Point, b *Scalar) *Point {
+ checkInitialized(A)
+
+ // Similarly to the single variable-base approach, we compute
+ // digits and use them with a lookup table. However, because
+ // we are allowed to do variable-time operations, we don't
+ // need constant-time lookups or constant-time digit
+ // computations.
+ //
+ // So we use a non-adjacent form of some width w instead of
+ // radix 16. This is like a binary representation (one digit
+ // for each binary place) but we allow the digits to grow in
+ // magnitude up to 2^{w-1} so that the nonzero digits are as
+ // sparse as possible. Intuitively, this "condenses" the
+ // "mass" of the scalar onto sparse coefficients (meaning
+ // fewer additions).
+
+ basepointNafTable := basepointNafTable()
+ var aTable nafLookupTable5
+ aTable.FromP3(A)
+ // Because the basepoint is fixed, we can use a wider NAF
+ // corresponding to a bigger table.
+ aNaf := a.nonAdjacentForm(5)
+ bNaf := b.nonAdjacentForm(8)
+
+ // Find the first nonzero coefficient.
+ i := 255
+ for j := i; j >= 0; j-- {
+ if aNaf[j] != 0 || bNaf[j] != 0 {
+ break
+ }
+ }
+
+ multA := &projCached{}
+ multB := &affineCached{}
+ tmp1 := &projP1xP1{}
+ tmp2 := &projP2{}
+ tmp2.Zero()
+
+ // Move from high to low bits, doubling the accumulator
+ // at each iteration and checking whether there is a nonzero
+ // coefficient to look up a multiple of.
+ for ; i >= 0; i-- {
+ tmp1.Double(tmp2)
+
+ // Only update v if we have a nonzero coeff to add in.
+ if aNaf[i] > 0 {
+ v.fromP1xP1(tmp1)
+ aTable.SelectInto(multA, aNaf[i])
+ tmp1.Add(v, multA)
+ } else if aNaf[i] < 0 {
+ v.fromP1xP1(tmp1)
+ aTable.SelectInto(multA, -aNaf[i])
+ tmp1.Sub(v, multA)
+ }
+
+ if bNaf[i] > 0 {
+ v.fromP1xP1(tmp1)
+ basepointNafTable.SelectInto(multB, bNaf[i])
+ tmp1.AddAffine(v, multB)
+ } else if bNaf[i] < 0 {
+ v.fromP1xP1(tmp1)
+ basepointNafTable.SelectInto(multB, -bNaf[i])
+ tmp1.SubAffine(v, multB)
+ }
+
+ tmp2.FromP1xP1(tmp1)
+ }
+
+ v.fromP2(tmp2)
+ return v
+}
diff --git a/src/crypto/internal/edwards25519/scalarmult_test.go b/src/crypto/internal/edwards25519/scalarmult_test.go
new file mode 100644
index 0000000..6c92ab3
--- /dev/null
+++ b/src/crypto/internal/edwards25519/scalarmult_test.go
@@ -0,0 +1,209 @@
+// Copyright (c) 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package edwards25519
+
+import (
+ "testing"
+ "testing/quick"
+)
+
+var (
+ // quickCheckConfig32 will make each quickcheck test run (32 * -quickchecks)
+ // times. The default value of -quickchecks is 100.
+ quickCheckConfig32 = &quick.Config{MaxCountScale: 1 << 5}
+
+ // a random scalar generated using dalek.
+ dalekScalar, _ = (&Scalar{}).SetCanonicalBytes([]byte{219, 106, 114, 9, 174, 249, 155, 89, 69, 203, 201, 93, 92, 116, 234, 187, 78, 115, 103, 172, 182, 98, 62, 103, 187, 136, 13, 100, 248, 110, 12, 4})
+ // the above, times the edwards25519 basepoint.
+ dalekScalarBasepoint, _ = new(Point).SetBytes([]byte{0xf4, 0xef, 0x7c, 0xa, 0x34, 0x55, 0x7b, 0x9f, 0x72, 0x3b, 0xb6, 0x1e, 0xf9, 0x46, 0x9, 0x91, 0x1c, 0xb9, 0xc0, 0x6c, 0x17, 0x28, 0x2d, 0x8b, 0x43, 0x2b, 0x5, 0x18, 0x6a, 0x54, 0x3e, 0x48})
+)
+
+func TestScalarMultSmallScalars(t *testing.T) {
+ var z Scalar
+ var p Point
+ p.ScalarMult(&z, B)
+ if I.Equal(&p) != 1 {
+ t.Error("0*B != 0")
+ }
+ checkOnCurve(t, &p)
+
+ scEight, _ := (&Scalar{}).SetCanonicalBytes([]byte{1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0})
+ p.ScalarMult(scEight, B)
+ if B.Equal(&p) != 1 {
+ t.Error("1*B != 1")
+ }
+ checkOnCurve(t, &p)
+}
+
+func TestScalarMultVsDalek(t *testing.T) {
+ var p Point
+ p.ScalarMult(dalekScalar, B)
+ if dalekScalarBasepoint.Equal(&p) != 1 {
+ t.Error("Scalar mul does not match dalek")
+ }
+ checkOnCurve(t, &p)
+}
+
+func TestBaseMultVsDalek(t *testing.T) {
+ var p Point
+ p.ScalarBaseMult(dalekScalar)
+ if dalekScalarBasepoint.Equal(&p) != 1 {
+ t.Error("Scalar mul does not match dalek")
+ }
+ checkOnCurve(t, &p)
+}
+
+func TestVarTimeDoubleBaseMultVsDalek(t *testing.T) {
+ var p Point
+ var z Scalar
+ p.VarTimeDoubleScalarBaseMult(dalekScalar, B, &z)
+ if dalekScalarBasepoint.Equal(&p) != 1 {
+ t.Error("VarTimeDoubleScalarBaseMult fails with b=0")
+ }
+ checkOnCurve(t, &p)
+ p.VarTimeDoubleScalarBaseMult(&z, B, dalekScalar)
+ if dalekScalarBasepoint.Equal(&p) != 1 {
+ t.Error("VarTimeDoubleScalarBaseMult fails with a=0")
+ }
+ checkOnCurve(t, &p)
+}
+
+func TestScalarMultDistributesOverAdd(t *testing.T) {
+ scalarMultDistributesOverAdd := func(x, y Scalar) bool {
+ var z Scalar
+ z.Add(&x, &y)
+ var p, q, r, check Point
+ p.ScalarMult(&x, B)
+ q.ScalarMult(&y, B)
+ r.ScalarMult(&z, B)
+ check.Add(&p, &q)
+ checkOnCurve(t, &p, &q, &r, &check)
+ return check.Equal(&r) == 1
+ }
+
+ if err := quick.Check(scalarMultDistributesOverAdd, quickCheckConfig32); err != nil {
+ t.Error(err)
+ }
+}
+
+func TestScalarMultNonIdentityPoint(t *testing.T) {
+ // Check whether p.ScalarMult and q.ScalaBaseMult give the same,
+ // when p and q are originally set to the base point.
+
+ scalarMultNonIdentityPoint := func(x Scalar) bool {
+ var p, q Point
+ p.Set(B)
+ q.Set(B)
+
+ p.ScalarMult(&x, B)
+ q.ScalarBaseMult(&x)
+
+ checkOnCurve(t, &p, &q)
+
+ return p.Equal(&q) == 1
+ }
+
+ if err := quick.Check(scalarMultNonIdentityPoint, quickCheckConfig32); err != nil {
+ t.Error(err)
+ }
+}
+
+func TestBasepointTableGeneration(t *testing.T) {
+ // The basepoint table is 32 affineLookupTables,
+ // corresponding to (16^2i)*B for table i.
+ basepointTable := basepointTable()
+
+ tmp1 := &projP1xP1{}
+ tmp2 := &projP2{}
+ tmp3 := &Point{}
+ tmp3.Set(B)
+ table := make([]affineLookupTable, 32)
+ for i := 0; i < 32; i++ {
+ // Build the table
+ table[i].FromP3(tmp3)
+ // Assert equality with the hardcoded one
+ if table[i] != basepointTable[i] {
+ t.Errorf("Basepoint table %d does not match", i)
+ }
+
+ // Set p = (16^2)*p = 256*p = 2^8*p
+ tmp2.FromP3(tmp3)
+ for j := 0; j < 7; j++ {
+ tmp1.Double(tmp2)
+ tmp2.FromP1xP1(tmp1)
+ }
+ tmp1.Double(tmp2)
+ tmp3.fromP1xP1(tmp1)
+ checkOnCurve(t, tmp3)
+ }
+}
+
+func TestScalarMultMatchesBaseMult(t *testing.T) {
+ scalarMultMatchesBaseMult := func(x Scalar) bool {
+ var p, q Point
+ p.ScalarMult(&x, B)
+ q.ScalarBaseMult(&x)
+ checkOnCurve(t, &p, &q)
+ return p.Equal(&q) == 1
+ }
+
+ if err := quick.Check(scalarMultMatchesBaseMult, quickCheckConfig32); err != nil {
+ t.Error(err)
+ }
+}
+
+func TestBasepointNafTableGeneration(t *testing.T) {
+ var table nafLookupTable8
+ table.FromP3(B)
+
+ if table != *basepointNafTable() {
+ t.Error("BasepointNafTable does not match")
+ }
+}
+
+func TestVarTimeDoubleBaseMultMatchesBaseMult(t *testing.T) {
+ varTimeDoubleBaseMultMatchesBaseMult := func(x, y Scalar) bool {
+ var p, q1, q2, check Point
+
+ p.VarTimeDoubleScalarBaseMult(&x, B, &y)
+
+ q1.ScalarBaseMult(&x)
+ q2.ScalarBaseMult(&y)
+ check.Add(&q1, &q2)
+
+ checkOnCurve(t, &p, &check, &q1, &q2)
+ return p.Equal(&check) == 1
+ }
+
+ if err := quick.Check(varTimeDoubleBaseMultMatchesBaseMult, quickCheckConfig32); err != nil {
+ t.Error(err)
+ }
+}
+
+// Benchmarks.
+
+func BenchmarkScalarBaseMult(b *testing.B) {
+ var p Point
+
+ for i := 0; i < b.N; i++ {
+ p.ScalarBaseMult(dalekScalar)
+ }
+}
+
+func BenchmarkScalarMult(b *testing.B) {
+ var p Point
+
+ for i := 0; i < b.N; i++ {
+ p.ScalarMult(dalekScalar, B)
+ }
+}
+
+func BenchmarkVarTimeDoubleScalarBaseMult(b *testing.B) {
+ var p Point
+
+ for i := 0; i < b.N; i++ {
+ p.VarTimeDoubleScalarBaseMult(dalekScalar, B, dalekScalar)
+ }
+}
diff --git a/src/crypto/internal/edwards25519/tables.go b/src/crypto/internal/edwards25519/tables.go
new file mode 100644
index 0000000..83234bb
--- /dev/null
+++ b/src/crypto/internal/edwards25519/tables.go
@@ -0,0 +1,129 @@
+// Copyright (c) 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package edwards25519
+
+import (
+ "crypto/subtle"
+)
+
+// A dynamic lookup table for variable-base, constant-time scalar muls.
+type projLookupTable struct {
+ points [8]projCached
+}
+
+// A precomputed lookup table for fixed-base, constant-time scalar muls.
+type affineLookupTable struct {
+ points [8]affineCached
+}
+
+// A dynamic lookup table for variable-base, variable-time scalar muls.
+type nafLookupTable5 struct {
+ points [8]projCached
+}
+
+// A precomputed lookup table for fixed-base, variable-time scalar muls.
+type nafLookupTable8 struct {
+ points [64]affineCached
+}
+
+// Constructors.
+
+// Builds a lookup table at runtime. Fast.
+func (v *projLookupTable) FromP3(q *Point) {
+ // Goal: v.points[i] = (i+1)*Q, i.e., Q, 2Q, ..., 8Q
+ // This allows lookup of -8Q, ..., -Q, 0, Q, ..., 8Q
+ v.points[0].FromP3(q)
+ tmpP3 := Point{}
+ tmpP1xP1 := projP1xP1{}
+ for i := 0; i < 7; i++ {
+ // Compute (i+1)*Q as Q + i*Q and convert to a projCached
+ // This is needlessly complicated because the API has explicit
+ // receivers instead of creating stack objects and relying on RVO
+ v.points[i+1].FromP3(tmpP3.fromP1xP1(tmpP1xP1.Add(q, &v.points[i])))
+ }
+}
+
+// This is not optimised for speed; fixed-base tables should be precomputed.
+func (v *affineLookupTable) FromP3(q *Point) {
+ // Goal: v.points[i] = (i+1)*Q, i.e., Q, 2Q, ..., 8Q
+ // This allows lookup of -8Q, ..., -Q, 0, Q, ..., 8Q
+ v.points[0].FromP3(q)
+ tmpP3 := Point{}
+ tmpP1xP1 := projP1xP1{}
+ for i := 0; i < 7; i++ {
+ // Compute (i+1)*Q as Q + i*Q and convert to affineCached
+ v.points[i+1].FromP3(tmpP3.fromP1xP1(tmpP1xP1.AddAffine(q, &v.points[i])))
+ }
+}
+
+// Builds a lookup table at runtime. Fast.
+func (v *nafLookupTable5) FromP3(q *Point) {
+ // Goal: v.points[i] = (2*i+1)*Q, i.e., Q, 3Q, 5Q, ..., 15Q
+ // This allows lookup of -15Q, ..., -3Q, -Q, 0, Q, 3Q, ..., 15Q
+ v.points[0].FromP3(q)
+ q2 := Point{}
+ q2.Add(q, q)
+ tmpP3 := Point{}
+ tmpP1xP1 := projP1xP1{}
+ for i := 0; i < 7; i++ {
+ v.points[i+1].FromP3(tmpP3.fromP1xP1(tmpP1xP1.Add(&q2, &v.points[i])))
+ }
+}
+
+// This is not optimised for speed; fixed-base tables should be precomputed.
+func (v *nafLookupTable8) FromP3(q *Point) {
+ v.points[0].FromP3(q)
+ q2 := Point{}
+ q2.Add(q, q)
+ tmpP3 := Point{}
+ tmpP1xP1 := projP1xP1{}
+ for i := 0; i < 63; i++ {
+ v.points[i+1].FromP3(tmpP3.fromP1xP1(tmpP1xP1.AddAffine(&q2, &v.points[i])))
+ }
+}
+
+// Selectors.
+
+// Set dest to x*Q, where -8 <= x <= 8, in constant time.
+func (v *projLookupTable) SelectInto(dest *projCached, x int8) {
+ // Compute xabs = |x|
+ xmask := x >> 7
+ xabs := uint8((x + xmask) ^ xmask)
+
+ dest.Zero()
+ for j := 1; j <= 8; j++ {
+ // Set dest = j*Q if |x| = j
+ cond := subtle.ConstantTimeByteEq(xabs, uint8(j))
+ dest.Select(&v.points[j-1], dest, cond)
+ }
+ // Now dest = |x|*Q, conditionally negate to get x*Q
+ dest.CondNeg(int(xmask & 1))
+}
+
+// Set dest to x*Q, where -8 <= x <= 8, in constant time.
+func (v *affineLookupTable) SelectInto(dest *affineCached, x int8) {
+ // Compute xabs = |x|
+ xmask := x >> 7
+ xabs := uint8((x + xmask) ^ xmask)
+
+ dest.Zero()
+ for j := 1; j <= 8; j++ {
+ // Set dest = j*Q if |x| = j
+ cond := subtle.ConstantTimeByteEq(xabs, uint8(j))
+ dest.Select(&v.points[j-1], dest, cond)
+ }
+ // Now dest = |x|*Q, conditionally negate to get x*Q
+ dest.CondNeg(int(xmask & 1))
+}
+
+// Given odd x with 0 < x < 2^4, return x*Q (in variable time).
+func (v *nafLookupTable5) SelectInto(dest *projCached, x int8) {
+ *dest = v.points[x/2]
+}
+
+// Given odd x with 0 < x < 2^7, return x*Q (in variable time).
+func (v *nafLookupTable8) SelectInto(dest *affineCached, x int8) {
+ *dest = v.points[x/2]
+}
diff --git a/src/crypto/internal/edwards25519/tables_test.go b/src/crypto/internal/edwards25519/tables_test.go
new file mode 100644
index 0000000..b5d161a
--- /dev/null
+++ b/src/crypto/internal/edwards25519/tables_test.go
@@ -0,0 +1,119 @@
+// Copyright (c) 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package edwards25519
+
+import (
+ "testing"
+)
+
+func TestProjLookupTable(t *testing.T) {
+ var table projLookupTable
+ table.FromP3(B)
+
+ var tmp1, tmp2, tmp3 projCached
+ table.SelectInto(&tmp1, 6)
+ table.SelectInto(&tmp2, -2)
+ table.SelectInto(&tmp3, -4)
+ // Expect T1 + T2 + T3 = identity
+
+ var accP1xP1 projP1xP1
+ accP3 := NewIdentityPoint()
+
+ accP1xP1.Add(accP3, &tmp1)
+ accP3.fromP1xP1(&accP1xP1)
+ accP1xP1.Add(accP3, &tmp2)
+ accP3.fromP1xP1(&accP1xP1)
+ accP1xP1.Add(accP3, &tmp3)
+ accP3.fromP1xP1(&accP1xP1)
+
+ if accP3.Equal(I) != 1 {
+ t.Errorf("Consistency check on ProjLookupTable.SelectInto failed! %x %x %x", tmp1, tmp2, tmp3)
+ }
+}
+
+func TestAffineLookupTable(t *testing.T) {
+ var table affineLookupTable
+ table.FromP3(B)
+
+ var tmp1, tmp2, tmp3 affineCached
+ table.SelectInto(&tmp1, 3)
+ table.SelectInto(&tmp2, -7)
+ table.SelectInto(&tmp3, 4)
+ // Expect T1 + T2 + T3 = identity
+
+ var accP1xP1 projP1xP1
+ accP3 := NewIdentityPoint()
+
+ accP1xP1.AddAffine(accP3, &tmp1)
+ accP3.fromP1xP1(&accP1xP1)
+ accP1xP1.AddAffine(accP3, &tmp2)
+ accP3.fromP1xP1(&accP1xP1)
+ accP1xP1.AddAffine(accP3, &tmp3)
+ accP3.fromP1xP1(&accP1xP1)
+
+ if accP3.Equal(I) != 1 {
+ t.Errorf("Consistency check on ProjLookupTable.SelectInto failed! %x %x %x", tmp1, tmp2, tmp3)
+ }
+}
+
+func TestNafLookupTable5(t *testing.T) {
+ var table nafLookupTable5
+ table.FromP3(B)
+
+ var tmp1, tmp2, tmp3, tmp4 projCached
+ table.SelectInto(&tmp1, 9)
+ table.SelectInto(&tmp2, 11)
+ table.SelectInto(&tmp3, 7)
+ table.SelectInto(&tmp4, 13)
+ // Expect T1 + T2 = T3 + T4
+
+ var accP1xP1 projP1xP1
+ lhs := NewIdentityPoint()
+ rhs := NewIdentityPoint()
+
+ accP1xP1.Add(lhs, &tmp1)
+ lhs.fromP1xP1(&accP1xP1)
+ accP1xP1.Add(lhs, &tmp2)
+ lhs.fromP1xP1(&accP1xP1)
+
+ accP1xP1.Add(rhs, &tmp3)
+ rhs.fromP1xP1(&accP1xP1)
+ accP1xP1.Add(rhs, &tmp4)
+ rhs.fromP1xP1(&accP1xP1)
+
+ if lhs.Equal(rhs) != 1 {
+ t.Errorf("Consistency check on nafLookupTable5 failed")
+ }
+}
+
+func TestNafLookupTable8(t *testing.T) {
+ var table nafLookupTable8
+ table.FromP3(B)
+
+ var tmp1, tmp2, tmp3, tmp4 affineCached
+ table.SelectInto(&tmp1, 49)
+ table.SelectInto(&tmp2, 11)
+ table.SelectInto(&tmp3, 35)
+ table.SelectInto(&tmp4, 25)
+ // Expect T1 + T2 = T3 + T4
+
+ var accP1xP1 projP1xP1
+ lhs := NewIdentityPoint()
+ rhs := NewIdentityPoint()
+
+ accP1xP1.AddAffine(lhs, &tmp1)
+ lhs.fromP1xP1(&accP1xP1)
+ accP1xP1.AddAffine(lhs, &tmp2)
+ lhs.fromP1xP1(&accP1xP1)
+
+ accP1xP1.AddAffine(rhs, &tmp3)
+ rhs.fromP1xP1(&accP1xP1)
+ accP1xP1.AddAffine(rhs, &tmp4)
+ rhs.fromP1xP1(&accP1xP1)
+
+ if lhs.Equal(rhs) != 1 {
+ t.Errorf("Consistency check on nafLookupTable8 failed")
+ }
+}
diff --git a/src/crypto/internal/nistec/fiat/Dockerfile b/src/crypto/internal/nistec/fiat/Dockerfile
new file mode 100644
index 0000000..2877e0b
--- /dev/null
+++ b/src/crypto/internal/nistec/fiat/Dockerfile
@@ -0,0 +1,12 @@
+# Copyright 2021 The Go Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style
+# license that can be found in the LICENSE file.
+
+FROM coqorg/coq:8.13.2
+
+RUN git clone https://github.com/mit-plv/fiat-crypto && cd fiat-crypto && \
+ git checkout 23d2dbc4ab897d14bde4404f70cd6991635f9c01 && \
+ git submodule update --init --recursive
+RUN cd fiat-crypto && eval $(opam env) && make -j4 standalone-ocaml SKIP_BEDROCK2=1
+
+ENV PATH /home/coq/fiat-crypto/src/ExtractionOCaml:$PATH
diff --git a/src/crypto/internal/nistec/fiat/README b/src/crypto/internal/nistec/fiat/README
new file mode 100644
index 0000000..916ebc1
--- /dev/null
+++ b/src/crypto/internal/nistec/fiat/README
@@ -0,0 +1,34 @@
+The code in this package was autogenerated by the fiat-crypto project
+at version v0.0.9 from a formally verified model, and by the addchain
+project at a recent tip version.
+
+ docker build -t fiat-crypto:v0.0.9 .
+ go install github.com/mmcloughlin/addchain/cmd/addchain@v0.3.1-0.20211027081849-6a7d3decbe08
+ ../../../../../bin/go run generate.go
+
+fiat-crypto code comes under the following license.
+
+ Copyright (c) 2015-2020 The fiat-crypto Authors. All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are
+ met:
+
+ 1. Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+
+ THIS SOFTWARE IS PROVIDED BY the fiat-crypto authors "AS IS"
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL Berkeley Software Design,
+ Inc. BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+ LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+ NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+The authors are listed at
+
+ https://github.com/mit-plv/fiat-crypto/blob/master/AUTHORS
diff --git a/src/crypto/internal/nistec/fiat/fiat_test.go b/src/crypto/internal/nistec/fiat/fiat_test.go
new file mode 100644
index 0000000..dee9f68
--- /dev/null
+++ b/src/crypto/internal/nistec/fiat/fiat_test.go
@@ -0,0 +1,64 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package fiat_test
+
+import (
+ "crypto/internal/nistec/fiat"
+ "testing"
+)
+
+func BenchmarkMul(b *testing.B) {
+ b.Run("P224", func(b *testing.B) {
+ v := new(fiat.P224Element).One()
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ v.Mul(v, v)
+ }
+ })
+ b.Run("P384", func(b *testing.B) {
+ v := new(fiat.P384Element).One()
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ v.Mul(v, v)
+ }
+ })
+ b.Run("P521", func(b *testing.B) {
+ v := new(fiat.P521Element).One()
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ v.Mul(v, v)
+ }
+ })
+}
+
+func BenchmarkSquare(b *testing.B) {
+ b.Run("P224", func(b *testing.B) {
+ v := new(fiat.P224Element).One()
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ v.Square(v)
+ }
+ })
+ b.Run("P384", func(b *testing.B) {
+ v := new(fiat.P384Element).One()
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ v.Square(v)
+ }
+ })
+ b.Run("P521", func(b *testing.B) {
+ v := new(fiat.P521Element).One()
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ v.Square(v)
+ }
+ })
+}
diff --git a/src/crypto/internal/nistec/fiat/generate.go b/src/crypto/internal/nistec/fiat/generate.go
new file mode 100644
index 0000000..db57021
--- /dev/null
+++ b/src/crypto/internal/nistec/fiat/generate.go
@@ -0,0 +1,330 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build ignore
+
+package main
+
+import (
+ "bytes"
+ "go/format"
+ "io"
+ "log"
+ "os"
+ "os/exec"
+ "text/template"
+)
+
+var curves = []struct {
+ Element string
+ Prime string
+ Prefix string
+ FiatType string
+ BytesLen int
+}{
+ {
+ Element: "P224Element",
+ Prime: "2^224 - 2^96 + 1",
+ Prefix: "p224",
+ FiatType: "[4]uint64",
+ BytesLen: 28,
+ },
+ // The P-256 fiat implementation is used only on 32-bit architectures, but
+ // the uint32 fiat code is for some reason slower than the uint64 one. That
+ // suggests there is a wide margin for improvement.
+ {
+ Element: "P256Element",
+ Prime: "2^256 - 2^224 + 2^192 + 2^96 - 1",
+ Prefix: "p256",
+ FiatType: "[4]uint64",
+ BytesLen: 32,
+ },
+ {
+ Element: "P384Element",
+ Prime: "2^384 - 2^128 - 2^96 + 2^32 - 1",
+ Prefix: "p384",
+ FiatType: "[6]uint64",
+ BytesLen: 48,
+ },
+ // Note that unsaturated_solinas would be about 2x faster than
+ // word_by_word_montgomery for P-521, but this curve is used rarely enough
+ // that it's not worth carrying unsaturated_solinas support for it.
+ {
+ Element: "P521Element",
+ Prime: "2^521 - 1",
+ Prefix: "p521",
+ FiatType: "[9]uint64",
+ BytesLen: 66,
+ },
+}
+
+func main() {
+ t := template.Must(template.New("montgomery").Parse(tmplWrapper))
+
+ tmplAddchainFile, err := os.CreateTemp("", "addchain-template")
+ if err != nil {
+ log.Fatal(err)
+ }
+ defer os.Remove(tmplAddchainFile.Name())
+ if _, err := io.WriteString(tmplAddchainFile, tmplAddchain); err != nil {
+ log.Fatal(err)
+ }
+ if err := tmplAddchainFile.Close(); err != nil {
+ log.Fatal(err)
+ }
+
+ for _, c := range curves {
+ log.Printf("Generating %s.go...", c.Prefix)
+ f, err := os.Create(c.Prefix + ".go")
+ if err != nil {
+ log.Fatal(err)
+ }
+ if err := t.Execute(f, c); err != nil {
+ log.Fatal(err)
+ }
+ if err := f.Close(); err != nil {
+ log.Fatal(err)
+ }
+
+ log.Printf("Generating %s_fiat64.go...", c.Prefix)
+ cmd := exec.Command("docker", "run", "--rm", "--entrypoint", "word_by_word_montgomery",
+ "fiat-crypto:v0.0.9", "--lang", "Go", "--no-wide-int", "--cmovznz-by-mul",
+ "--relax-primitive-carry-to-bitwidth", "32,64", "--internal-static",
+ "--public-function-case", "camelCase", "--public-type-case", "camelCase",
+ "--private-function-case", "camelCase", "--private-type-case", "camelCase",
+ "--doc-text-before-function-name", "", "--doc-newline-before-package-declaration",
+ "--doc-prepend-header", "Code generated by Fiat Cryptography. DO NOT EDIT.",
+ "--package-name", "fiat", "--no-prefix-fiat", c.Prefix, "64", c.Prime,
+ "mul", "square", "add", "sub", "one", "from_montgomery", "to_montgomery",
+ "selectznz", "to_bytes", "from_bytes")
+ cmd.Stderr = os.Stderr
+ out, err := cmd.Output()
+ if err != nil {
+ log.Fatal(err)
+ }
+ out, err = format.Source(out)
+ if err != nil {
+ log.Fatal(err)
+ }
+ if err := os.WriteFile(c.Prefix+"_fiat64.go", out, 0644); err != nil {
+ log.Fatal(err)
+ }
+
+ log.Printf("Generating %s_invert.go...", c.Prefix)
+ f, err = os.CreateTemp("", "addchain-"+c.Prefix)
+ if err != nil {
+ log.Fatal(err)
+ }
+ defer os.Remove(f.Name())
+ cmd = exec.Command("addchain", "search", c.Prime+" - 2")
+ cmd.Stderr = os.Stderr
+ cmd.Stdout = f
+ if err := cmd.Run(); err != nil {
+ log.Fatal(err)
+ }
+ if err := f.Close(); err != nil {
+ log.Fatal(err)
+ }
+ cmd = exec.Command("addchain", "gen", "-tmpl", tmplAddchainFile.Name(), f.Name())
+ cmd.Stderr = os.Stderr
+ out, err = cmd.Output()
+ if err != nil {
+ log.Fatal(err)
+ }
+ out = bytes.Replace(out, []byte("Element"), []byte(c.Element), -1)
+ out, err = format.Source(out)
+ if err != nil {
+ log.Fatal(err)
+ }
+ if err := os.WriteFile(c.Prefix+"_invert.go", out, 0644); err != nil {
+ log.Fatal(err)
+ }
+ }
+}
+
+const tmplWrapper = `// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Code generated by generate.go. DO NOT EDIT.
+
+package fiat
+
+import (
+ "crypto/subtle"
+ "errors"
+)
+
+// {{ .Element }} is an integer modulo {{ .Prime }}.
+//
+// The zero value is a valid zero element.
+type {{ .Element }} struct {
+ // Values are represented internally always in the Montgomery domain, and
+ // converted in Bytes and SetBytes.
+ x {{ .Prefix }}MontgomeryDomainFieldElement
+}
+
+const {{ .Prefix }}ElementLen = {{ .BytesLen }}
+
+type {{ .Prefix }}UntypedFieldElement = {{ .FiatType }}
+
+// One sets e = 1, and returns e.
+func (e *{{ .Element }}) One() *{{ .Element }} {
+ {{ .Prefix }}SetOne(&e.x)
+ return e
+}
+
+// Equal returns 1 if e == t, and zero otherwise.
+func (e *{{ .Element }}) Equal(t *{{ .Element }}) int {
+ eBytes := e.Bytes()
+ tBytes := t.Bytes()
+ return subtle.ConstantTimeCompare(eBytes, tBytes)
+}
+
+// IsZero returns 1 if e == 0, and zero otherwise.
+func (e *{{ .Element }}) IsZero() int {
+ zero := make([]byte, {{ .Prefix }}ElementLen)
+ eBytes := e.Bytes()
+ return subtle.ConstantTimeCompare(eBytes, zero)
+}
+
+// Set sets e = t, and returns e.
+func (e *{{ .Element }}) Set(t *{{ .Element }}) *{{ .Element }} {
+ e.x = t.x
+ return e
+}
+
+// Bytes returns the {{ .BytesLen }}-byte big-endian encoding of e.
+func (e *{{ .Element }}) Bytes() []byte {
+ // This function is outlined to make the allocations inline in the caller
+ // rather than happen on the heap.
+ var out [{{ .Prefix }}ElementLen]byte
+ return e.bytes(&out)
+}
+
+func (e *{{ .Element }}) bytes(out *[{{ .Prefix }}ElementLen]byte) []byte {
+ var tmp {{ .Prefix }}NonMontgomeryDomainFieldElement
+ {{ .Prefix }}FromMontgomery(&tmp, &e.x)
+ {{ .Prefix }}ToBytes(out, (*{{ .Prefix }}UntypedFieldElement)(&tmp))
+ {{ .Prefix }}InvertEndianness(out[:])
+ return out[:]
+}
+
+// SetBytes sets e = v, where v is a big-endian {{ .BytesLen }}-byte encoding, and returns e.
+// If v is not {{ .BytesLen }} bytes or it encodes a value higher than {{ .Prime }},
+// SetBytes returns nil and an error, and e is unchanged.
+func (e *{{ .Element }}) SetBytes(v []byte) (*{{ .Element }}, error) {
+ if len(v) != {{ .Prefix }}ElementLen {
+ return nil, errors.New("invalid {{ .Element }} encoding")
+ }
+
+ // Check for non-canonical encodings (p + k, 2p + k, etc.) by comparing to
+ // the encoding of -1 mod p, so p - 1, the highest canonical encoding.
+ var minusOneEncoding = new({{ .Element }}).Sub(
+ new({{ .Element }}), new({{ .Element }}).One()).Bytes()
+ for i := range v {
+ if v[i] < minusOneEncoding[i] {
+ break
+ }
+ if v[i] > minusOneEncoding[i] {
+ return nil, errors.New("invalid {{ .Element }} encoding")
+ }
+ }
+
+ var in [{{ .Prefix }}ElementLen]byte
+ copy(in[:], v)
+ {{ .Prefix }}InvertEndianness(in[:])
+ var tmp {{ .Prefix }}NonMontgomeryDomainFieldElement
+ {{ .Prefix }}FromBytes((*{{ .Prefix }}UntypedFieldElement)(&tmp), &in)
+ {{ .Prefix }}ToMontgomery(&e.x, &tmp)
+ return e, nil
+}
+
+// Add sets e = t1 + t2, and returns e.
+func (e *{{ .Element }}) Add(t1, t2 *{{ .Element }}) *{{ .Element }} {
+ {{ .Prefix }}Add(&e.x, &t1.x, &t2.x)
+ return e
+}
+
+// Sub sets e = t1 - t2, and returns e.
+func (e *{{ .Element }}) Sub(t1, t2 *{{ .Element }}) *{{ .Element }} {
+ {{ .Prefix }}Sub(&e.x, &t1.x, &t2.x)
+ return e
+}
+
+// Mul sets e = t1 * t2, and returns e.
+func (e *{{ .Element }}) Mul(t1, t2 *{{ .Element }}) *{{ .Element }} {
+ {{ .Prefix }}Mul(&e.x, &t1.x, &t2.x)
+ return e
+}
+
+// Square sets e = t * t, and returns e.
+func (e *{{ .Element }}) Square(t *{{ .Element }}) *{{ .Element }} {
+ {{ .Prefix }}Square(&e.x, &t.x)
+ return e
+}
+
+// Select sets v to a if cond == 1, and to b if cond == 0.
+func (v *{{ .Element }}) Select(a, b *{{ .Element }}, cond int) *{{ .Element }} {
+ {{ .Prefix }}Selectznz((*{{ .Prefix }}UntypedFieldElement)(&v.x), {{ .Prefix }}Uint1(cond),
+ (*{{ .Prefix }}UntypedFieldElement)(&b.x), (*{{ .Prefix }}UntypedFieldElement)(&a.x))
+ return v
+}
+
+func {{ .Prefix }}InvertEndianness(v []byte) {
+ for i := 0; i < len(v)/2; i++ {
+ v[i], v[len(v)-1-i] = v[len(v)-1-i], v[i]
+ }
+}
+`
+
+const tmplAddchain = `// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Code generated by {{ .Meta.Name }}. DO NOT EDIT.
+
+package fiat
+
+// Invert sets e = 1/x, and returns e.
+//
+// If x == 0, Invert returns e = 0.
+func (e *Element) Invert(x *Element) *Element {
+ // Inversion is implemented as exponentiation with exponent p − 2.
+ // The sequence of {{ .Ops.Adds }} multiplications and {{ .Ops.Doubles }} squarings is derived from the
+ // following addition chain generated with {{ .Meta.Module }} {{ .Meta.ReleaseTag }}.
+ //
+ {{- range lines (format .Script) }}
+ // {{ . }}
+ {{- end }}
+ //
+
+ var z = new(Element).Set(e)
+ {{- range .Program.Temporaries }}
+ var {{ . }} = new(Element)
+ {{- end }}
+ {{ range $i := .Program.Instructions -}}
+ {{- with add $i.Op }}
+ {{ $i.Output }}.Mul({{ .X }}, {{ .Y }})
+ {{- end -}}
+
+ {{- with double $i.Op }}
+ {{ $i.Output }}.Square({{ .X }})
+ {{- end -}}
+
+ {{- with shift $i.Op -}}
+ {{- $first := 0 -}}
+ {{- if ne $i.Output.Identifier .X.Identifier }}
+ {{ $i.Output }}.Square({{ .X }})
+ {{- $first = 1 -}}
+ {{- end }}
+ for s := {{ $first }}; s < {{ .S }}; s++ {
+ {{ $i.Output }}.Square({{ $i.Output }})
+ }
+ {{- end -}}
+ {{- end }}
+
+ return e.Set(z)
+}
+`
diff --git a/src/crypto/internal/nistec/fiat/p224.go b/src/crypto/internal/nistec/fiat/p224.go
new file mode 100644
index 0000000..e1a78db
--- /dev/null
+++ b/src/crypto/internal/nistec/fiat/p224.go
@@ -0,0 +1,134 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Code generated by generate.go. DO NOT EDIT.
+
+package fiat
+
+import (
+ "crypto/subtle"
+ "errors"
+)
+
+// P224Element is an integer modulo 2^224 - 2^96 + 1.
+//
+// The zero value is a valid zero element.
+type P224Element struct {
+ // Values are represented internally always in the Montgomery domain, and
+ // converted in Bytes and SetBytes.
+ x p224MontgomeryDomainFieldElement
+}
+
+const p224ElementLen = 28
+
+type p224UntypedFieldElement = [4]uint64
+
+// One sets e = 1, and returns e.
+func (e *P224Element) One() *P224Element {
+ p224SetOne(&e.x)
+ return e
+}
+
+// Equal returns 1 if e == t, and zero otherwise.
+func (e *P224Element) Equal(t *P224Element) int {
+ eBytes := e.Bytes()
+ tBytes := t.Bytes()
+ return subtle.ConstantTimeCompare(eBytes, tBytes)
+}
+
+// IsZero returns 1 if e == 0, and zero otherwise.
+func (e *P224Element) IsZero() int {
+ zero := make([]byte, p224ElementLen)
+ eBytes := e.Bytes()
+ return subtle.ConstantTimeCompare(eBytes, zero)
+}
+
+// Set sets e = t, and returns e.
+func (e *P224Element) Set(t *P224Element) *P224Element {
+ e.x = t.x
+ return e
+}
+
+// Bytes returns the 28-byte big-endian encoding of e.
+func (e *P224Element) Bytes() []byte {
+ // This function is outlined to make the allocations inline in the caller
+ // rather than happen on the heap.
+ var out [p224ElementLen]byte
+ return e.bytes(&out)
+}
+
+func (e *P224Element) bytes(out *[p224ElementLen]byte) []byte {
+ var tmp p224NonMontgomeryDomainFieldElement
+ p224FromMontgomery(&tmp, &e.x)
+ p224ToBytes(out, (*p224UntypedFieldElement)(&tmp))
+ p224InvertEndianness(out[:])
+ return out[:]
+}
+
+// SetBytes sets e = v, where v is a big-endian 28-byte encoding, and returns e.
+// If v is not 28 bytes or it encodes a value higher than 2^224 - 2^96 + 1,
+// SetBytes returns nil and an error, and e is unchanged.
+func (e *P224Element) SetBytes(v []byte) (*P224Element, error) {
+ if len(v) != p224ElementLen {
+ return nil, errors.New("invalid P224Element encoding")
+ }
+
+ // Check for non-canonical encodings (p + k, 2p + k, etc.) by comparing to
+ // the encoding of -1 mod p, so p - 1, the highest canonical encoding.
+ var minusOneEncoding = new(P224Element).Sub(
+ new(P224Element), new(P224Element).One()).Bytes()
+ for i := range v {
+ if v[i] < minusOneEncoding[i] {
+ break
+ }
+ if v[i] > minusOneEncoding[i] {
+ return nil, errors.New("invalid P224Element encoding")
+ }
+ }
+
+ var in [p224ElementLen]byte
+ copy(in[:], v)
+ p224InvertEndianness(in[:])
+ var tmp p224NonMontgomeryDomainFieldElement
+ p224FromBytes((*p224UntypedFieldElement)(&tmp), &in)
+ p224ToMontgomery(&e.x, &tmp)
+ return e, nil
+}
+
+// Add sets e = t1 + t2, and returns e.
+func (e *P224Element) Add(t1, t2 *P224Element) *P224Element {
+ p224Add(&e.x, &t1.x, &t2.x)
+ return e
+}
+
+// Sub sets e = t1 - t2, and returns e.
+func (e *P224Element) Sub(t1, t2 *P224Element) *P224Element {
+ p224Sub(&e.x, &t1.x, &t2.x)
+ return e
+}
+
+// Mul sets e = t1 * t2, and returns e.
+func (e *P224Element) Mul(t1, t2 *P224Element) *P224Element {
+ p224Mul(&e.x, &t1.x, &t2.x)
+ return e
+}
+
+// Square sets e = t * t, and returns e.
+func (e *P224Element) Square(t *P224Element) *P224Element {
+ p224Square(&e.x, &t.x)
+ return e
+}
+
+// Select sets v to a if cond == 1, and to b if cond == 0.
+func (v *P224Element) Select(a, b *P224Element, cond int) *P224Element {
+ p224Selectznz((*p224UntypedFieldElement)(&v.x), p224Uint1(cond),
+ (*p224UntypedFieldElement)(&b.x), (*p224UntypedFieldElement)(&a.x))
+ return v
+}
+
+func p224InvertEndianness(v []byte) {
+ for i := 0; i < len(v)/2; i++ {
+ v[i], v[len(v)-1-i] = v[len(v)-1-i], v[i]
+ }
+}
diff --git a/src/crypto/internal/nistec/fiat/p224_fiat64.go b/src/crypto/internal/nistec/fiat/p224_fiat64.go
new file mode 100644
index 0000000..9337bfe
--- /dev/null
+++ b/src/crypto/internal/nistec/fiat/p224_fiat64.go
@@ -0,0 +1,1461 @@
+// Code generated by Fiat Cryptography. DO NOT EDIT.
+//
+// Autogenerated: word_by_word_montgomery --lang Go --no-wide-int --cmovznz-by-mul --relax-primitive-carry-to-bitwidth 32,64 --internal-static --public-function-case camelCase --public-type-case camelCase --private-function-case camelCase --private-type-case camelCase --doc-text-before-function-name '' --doc-newline-before-package-declaration --doc-prepend-header 'Code generated by Fiat Cryptography. DO NOT EDIT.' --package-name fiat --no-prefix-fiat p224 64 '2^224 - 2^96 + 1' mul square add sub one from_montgomery to_montgomery selectznz to_bytes from_bytes
+//
+// curve description: p224
+//
+// machine_wordsize = 64 (from "64")
+//
+// requested operations: mul, square, add, sub, one, from_montgomery, to_montgomery, selectznz, to_bytes, from_bytes
+//
+// m = 0xffffffffffffffffffffffffffffffff000000000000000000000001 (from "2^224 - 2^96 + 1")
+//
+//
+//
+// NOTE: In addition to the bounds specified above each function, all
+//
+// functions synthesized for this Montgomery arithmetic require the
+//
+// input to be strictly less than the prime modulus (m), and also
+//
+// require the input to be in the unique saturated representation.
+//
+// All functions also ensure that these two properties are true of
+//
+// return values.
+//
+//
+//
+// Computed values:
+//
+// eval z = z[0] + (z[1] << 64) + (z[2] << 128) + (z[3] << 192)
+//
+// bytes_eval z = z[0] + (z[1] << 8) + (z[2] << 16) + (z[3] << 24) + (z[4] << 32) + (z[5] << 40) + (z[6] << 48) + (z[7] << 56) + (z[8] << 64) + (z[9] << 72) + (z[10] << 80) + (z[11] << 88) + (z[12] << 96) + (z[13] << 104) + (z[14] << 112) + (z[15] << 120) + (z[16] << 128) + (z[17] << 136) + (z[18] << 144) + (z[19] << 152) + (z[20] << 160) + (z[21] << 168) + (z[22] << 176) + (z[23] << 184) + (z[24] << 192) + (z[25] << 200) + (z[26] << 208) + (z[27] << 216)
+//
+// twos_complement_eval z = let x1 := z[0] + (z[1] << 64) + (z[2] << 128) + (z[3] << 192) in
+//
+// if x1 & (2^256-1) < 2^255 then x1 & (2^256-1) else (x1 & (2^256-1)) - 2^256
+
+package fiat
+
+import "math/bits"
+
+type p224Uint1 uint64 // We use uint64 instead of a more narrow type for performance reasons; see https://github.com/mit-plv/fiat-crypto/pull/1006#issuecomment-892625927
+type p224Int1 int64 // We use uint64 instead of a more narrow type for performance reasons; see https://github.com/mit-plv/fiat-crypto/pull/1006#issuecomment-892625927
+
+// The type p224MontgomeryDomainFieldElement is a field element in the Montgomery domain.
+//
+// Bounds: [[0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff]]
+type p224MontgomeryDomainFieldElement [4]uint64
+
+// The type p224NonMontgomeryDomainFieldElement is a field element NOT in the Montgomery domain.
+//
+// Bounds: [[0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff]]
+type p224NonMontgomeryDomainFieldElement [4]uint64
+
+// p224CmovznzU64 is a single-word conditional move.
+//
+// Postconditions:
+//
+// out1 = (if arg1 = 0 then arg2 else arg3)
+//
+// Input Bounds:
+//
+// arg1: [0x0 ~> 0x1]
+// arg2: [0x0 ~> 0xffffffffffffffff]
+// arg3: [0x0 ~> 0xffffffffffffffff]
+//
+// Output Bounds:
+//
+// out1: [0x0 ~> 0xffffffffffffffff]
+func p224CmovznzU64(out1 *uint64, arg1 p224Uint1, arg2 uint64, arg3 uint64) {
+ x1 := (uint64(arg1) * 0xffffffffffffffff)
+ x2 := ((x1 & arg3) | ((^x1) & arg2))
+ *out1 = x2
+}
+
+// p224Mul multiplies two field elements in the Montgomery domain.
+//
+// Preconditions:
+//
+// 0 ≤ eval arg1 < m
+// 0 ≤ eval arg2 < m
+//
+// Postconditions:
+//
+// eval (from_montgomery out1) mod m = (eval (from_montgomery arg1) * eval (from_montgomery arg2)) mod m
+// 0 ≤ eval out1 < m
+func p224Mul(out1 *p224MontgomeryDomainFieldElement, arg1 *p224MontgomeryDomainFieldElement, arg2 *p224MontgomeryDomainFieldElement) {
+ x1 := arg1[1]
+ x2 := arg1[2]
+ x3 := arg1[3]
+ x4 := arg1[0]
+ var x5 uint64
+ var x6 uint64
+ x6, x5 = bits.Mul64(x4, arg2[3])
+ var x7 uint64
+ var x8 uint64
+ x8, x7 = bits.Mul64(x4, arg2[2])
+ var x9 uint64
+ var x10 uint64
+ x10, x9 = bits.Mul64(x4, arg2[1])
+ var x11 uint64
+ var x12 uint64
+ x12, x11 = bits.Mul64(x4, arg2[0])
+ var x13 uint64
+ var x14 uint64
+ x13, x14 = bits.Add64(x12, x9, uint64(0x0))
+ var x15 uint64
+ var x16 uint64
+ x15, x16 = bits.Add64(x10, x7, uint64(p224Uint1(x14)))
+ var x17 uint64
+ var x18 uint64
+ x17, x18 = bits.Add64(x8, x5, uint64(p224Uint1(x16)))
+ x19 := (uint64(p224Uint1(x18)) + x6)
+ var x20 uint64
+ _, x20 = bits.Mul64(x11, 0xffffffffffffffff)
+ var x22 uint64
+ var x23 uint64
+ x23, x22 = bits.Mul64(x20, 0xffffffff)
+ var x24 uint64
+ var x25 uint64
+ x25, x24 = bits.Mul64(x20, 0xffffffffffffffff)
+ var x26 uint64
+ var x27 uint64
+ x27, x26 = bits.Mul64(x20, 0xffffffff00000000)
+ var x28 uint64
+ var x29 uint64
+ x28, x29 = bits.Add64(x27, x24, uint64(0x0))
+ var x30 uint64
+ var x31 uint64
+ x30, x31 = bits.Add64(x25, x22, uint64(p224Uint1(x29)))
+ x32 := (uint64(p224Uint1(x31)) + x23)
+ var x34 uint64
+ _, x34 = bits.Add64(x11, x20, uint64(0x0))
+ var x35 uint64
+ var x36 uint64
+ x35, x36 = bits.Add64(x13, x26, uint64(p224Uint1(x34)))
+ var x37 uint64
+ var x38 uint64
+ x37, x38 = bits.Add64(x15, x28, uint64(p224Uint1(x36)))
+ var x39 uint64
+ var x40 uint64
+ x39, x40 = bits.Add64(x17, x30, uint64(p224Uint1(x38)))
+ var x41 uint64
+ var x42 uint64
+ x41, x42 = bits.Add64(x19, x32, uint64(p224Uint1(x40)))
+ var x43 uint64
+ var x44 uint64
+ x44, x43 = bits.Mul64(x1, arg2[3])
+ var x45 uint64
+ var x46 uint64
+ x46, x45 = bits.Mul64(x1, arg2[2])
+ var x47 uint64
+ var x48 uint64
+ x48, x47 = bits.Mul64(x1, arg2[1])
+ var x49 uint64
+ var x50 uint64
+ x50, x49 = bits.Mul64(x1, arg2[0])
+ var x51 uint64
+ var x52 uint64
+ x51, x52 = bits.Add64(x50, x47, uint64(0x0))
+ var x53 uint64
+ var x54 uint64
+ x53, x54 = bits.Add64(x48, x45, uint64(p224Uint1(x52)))
+ var x55 uint64
+ var x56 uint64
+ x55, x56 = bits.Add64(x46, x43, uint64(p224Uint1(x54)))
+ x57 := (uint64(p224Uint1(x56)) + x44)
+ var x58 uint64
+ var x59 uint64
+ x58, x59 = bits.Add64(x35, x49, uint64(0x0))
+ var x60 uint64
+ var x61 uint64
+ x60, x61 = bits.Add64(x37, x51, uint64(p224Uint1(x59)))
+ var x62 uint64
+ var x63 uint64
+ x62, x63 = bits.Add64(x39, x53, uint64(p224Uint1(x61)))
+ var x64 uint64
+ var x65 uint64
+ x64, x65 = bits.Add64(x41, x55, uint64(p224Uint1(x63)))
+ var x66 uint64
+ var x67 uint64
+ x66, x67 = bits.Add64(uint64(p224Uint1(x42)), x57, uint64(p224Uint1(x65)))
+ var x68 uint64
+ _, x68 = bits.Mul64(x58, 0xffffffffffffffff)
+ var x70 uint64
+ var x71 uint64
+ x71, x70 = bits.Mul64(x68, 0xffffffff)
+ var x72 uint64
+ var x73 uint64
+ x73, x72 = bits.Mul64(x68, 0xffffffffffffffff)
+ var x74 uint64
+ var x75 uint64
+ x75, x74 = bits.Mul64(x68, 0xffffffff00000000)
+ var x76 uint64
+ var x77 uint64
+ x76, x77 = bits.Add64(x75, x72, uint64(0x0))
+ var x78 uint64
+ var x79 uint64
+ x78, x79 = bits.Add64(x73, x70, uint64(p224Uint1(x77)))
+ x80 := (uint64(p224Uint1(x79)) + x71)
+ var x82 uint64
+ _, x82 = bits.Add64(x58, x68, uint64(0x0))
+ var x83 uint64
+ var x84 uint64
+ x83, x84 = bits.Add64(x60, x74, uint64(p224Uint1(x82)))
+ var x85 uint64
+ var x86 uint64
+ x85, x86 = bits.Add64(x62, x76, uint64(p224Uint1(x84)))
+ var x87 uint64
+ var x88 uint64
+ x87, x88 = bits.Add64(x64, x78, uint64(p224Uint1(x86)))
+ var x89 uint64
+ var x90 uint64
+ x89, x90 = bits.Add64(x66, x80, uint64(p224Uint1(x88)))
+ x91 := (uint64(p224Uint1(x90)) + uint64(p224Uint1(x67)))
+ var x92 uint64
+ var x93 uint64
+ x93, x92 = bits.Mul64(x2, arg2[3])
+ var x94 uint64
+ var x95 uint64
+ x95, x94 = bits.Mul64(x2, arg2[2])
+ var x96 uint64
+ var x97 uint64
+ x97, x96 = bits.Mul64(x2, arg2[1])
+ var x98 uint64
+ var x99 uint64
+ x99, x98 = bits.Mul64(x2, arg2[0])
+ var x100 uint64
+ var x101 uint64
+ x100, x101 = bits.Add64(x99, x96, uint64(0x0))
+ var x102 uint64
+ var x103 uint64
+ x102, x103 = bits.Add64(x97, x94, uint64(p224Uint1(x101)))
+ var x104 uint64
+ var x105 uint64
+ x104, x105 = bits.Add64(x95, x92, uint64(p224Uint1(x103)))
+ x106 := (uint64(p224Uint1(x105)) + x93)
+ var x107 uint64
+ var x108 uint64
+ x107, x108 = bits.Add64(x83, x98, uint64(0x0))
+ var x109 uint64
+ var x110 uint64
+ x109, x110 = bits.Add64(x85, x100, uint64(p224Uint1(x108)))
+ var x111 uint64
+ var x112 uint64
+ x111, x112 = bits.Add64(x87, x102, uint64(p224Uint1(x110)))
+ var x113 uint64
+ var x114 uint64
+ x113, x114 = bits.Add64(x89, x104, uint64(p224Uint1(x112)))
+ var x115 uint64
+ var x116 uint64
+ x115, x116 = bits.Add64(x91, x106, uint64(p224Uint1(x114)))
+ var x117 uint64
+ _, x117 = bits.Mul64(x107, 0xffffffffffffffff)
+ var x119 uint64
+ var x120 uint64
+ x120, x119 = bits.Mul64(x117, 0xffffffff)
+ var x121 uint64
+ var x122 uint64
+ x122, x121 = bits.Mul64(x117, 0xffffffffffffffff)
+ var x123 uint64
+ var x124 uint64
+ x124, x123 = bits.Mul64(x117, 0xffffffff00000000)
+ var x125 uint64
+ var x126 uint64
+ x125, x126 = bits.Add64(x124, x121, uint64(0x0))
+ var x127 uint64
+ var x128 uint64
+ x127, x128 = bits.Add64(x122, x119, uint64(p224Uint1(x126)))
+ x129 := (uint64(p224Uint1(x128)) + x120)
+ var x131 uint64
+ _, x131 = bits.Add64(x107, x117, uint64(0x0))
+ var x132 uint64
+ var x133 uint64
+ x132, x133 = bits.Add64(x109, x123, uint64(p224Uint1(x131)))
+ var x134 uint64
+ var x135 uint64
+ x134, x135 = bits.Add64(x111, x125, uint64(p224Uint1(x133)))
+ var x136 uint64
+ var x137 uint64
+ x136, x137 = bits.Add64(x113, x127, uint64(p224Uint1(x135)))
+ var x138 uint64
+ var x139 uint64
+ x138, x139 = bits.Add64(x115, x129, uint64(p224Uint1(x137)))
+ x140 := (uint64(p224Uint1(x139)) + uint64(p224Uint1(x116)))
+ var x141 uint64
+ var x142 uint64
+ x142, x141 = bits.Mul64(x3, arg2[3])
+ var x143 uint64
+ var x144 uint64
+ x144, x143 = bits.Mul64(x3, arg2[2])
+ var x145 uint64
+ var x146 uint64
+ x146, x145 = bits.Mul64(x3, arg2[1])
+ var x147 uint64
+ var x148 uint64
+ x148, x147 = bits.Mul64(x3, arg2[0])
+ var x149 uint64
+ var x150 uint64
+ x149, x150 = bits.Add64(x148, x145, uint64(0x0))
+ var x151 uint64
+ var x152 uint64
+ x151, x152 = bits.Add64(x146, x143, uint64(p224Uint1(x150)))
+ var x153 uint64
+ var x154 uint64
+ x153, x154 = bits.Add64(x144, x141, uint64(p224Uint1(x152)))
+ x155 := (uint64(p224Uint1(x154)) + x142)
+ var x156 uint64
+ var x157 uint64
+ x156, x157 = bits.Add64(x132, x147, uint64(0x0))
+ var x158 uint64
+ var x159 uint64
+ x158, x159 = bits.Add64(x134, x149, uint64(p224Uint1(x157)))
+ var x160 uint64
+ var x161 uint64
+ x160, x161 = bits.Add64(x136, x151, uint64(p224Uint1(x159)))
+ var x162 uint64
+ var x163 uint64
+ x162, x163 = bits.Add64(x138, x153, uint64(p224Uint1(x161)))
+ var x164 uint64
+ var x165 uint64
+ x164, x165 = bits.Add64(x140, x155, uint64(p224Uint1(x163)))
+ var x166 uint64
+ _, x166 = bits.Mul64(x156, 0xffffffffffffffff)
+ var x168 uint64
+ var x169 uint64
+ x169, x168 = bits.Mul64(x166, 0xffffffff)
+ var x170 uint64
+ var x171 uint64
+ x171, x170 = bits.Mul64(x166, 0xffffffffffffffff)
+ var x172 uint64
+ var x173 uint64
+ x173, x172 = bits.Mul64(x166, 0xffffffff00000000)
+ var x174 uint64
+ var x175 uint64
+ x174, x175 = bits.Add64(x173, x170, uint64(0x0))
+ var x176 uint64
+ var x177 uint64
+ x176, x177 = bits.Add64(x171, x168, uint64(p224Uint1(x175)))
+ x178 := (uint64(p224Uint1(x177)) + x169)
+ var x180 uint64
+ _, x180 = bits.Add64(x156, x166, uint64(0x0))
+ var x181 uint64
+ var x182 uint64
+ x181, x182 = bits.Add64(x158, x172, uint64(p224Uint1(x180)))
+ var x183 uint64
+ var x184 uint64
+ x183, x184 = bits.Add64(x160, x174, uint64(p224Uint1(x182)))
+ var x185 uint64
+ var x186 uint64
+ x185, x186 = bits.Add64(x162, x176, uint64(p224Uint1(x184)))
+ var x187 uint64
+ var x188 uint64
+ x187, x188 = bits.Add64(x164, x178, uint64(p224Uint1(x186)))
+ x189 := (uint64(p224Uint1(x188)) + uint64(p224Uint1(x165)))
+ var x190 uint64
+ var x191 uint64
+ x190, x191 = bits.Sub64(x181, uint64(0x1), uint64(0x0))
+ var x192 uint64
+ var x193 uint64
+ x192, x193 = bits.Sub64(x183, 0xffffffff00000000, uint64(p224Uint1(x191)))
+ var x194 uint64
+ var x195 uint64
+ x194, x195 = bits.Sub64(x185, 0xffffffffffffffff, uint64(p224Uint1(x193)))
+ var x196 uint64
+ var x197 uint64
+ x196, x197 = bits.Sub64(x187, 0xffffffff, uint64(p224Uint1(x195)))
+ var x199 uint64
+ _, x199 = bits.Sub64(x189, uint64(0x0), uint64(p224Uint1(x197)))
+ var x200 uint64
+ p224CmovznzU64(&x200, p224Uint1(x199), x190, x181)
+ var x201 uint64
+ p224CmovznzU64(&x201, p224Uint1(x199), x192, x183)
+ var x202 uint64
+ p224CmovznzU64(&x202, p224Uint1(x199), x194, x185)
+ var x203 uint64
+ p224CmovznzU64(&x203, p224Uint1(x199), x196, x187)
+ out1[0] = x200
+ out1[1] = x201
+ out1[2] = x202
+ out1[3] = x203
+}
+
+// p224Square squares a field element in the Montgomery domain.
+//
+// Preconditions:
+//
+// 0 ≤ eval arg1 < m
+//
+// Postconditions:
+//
+// eval (from_montgomery out1) mod m = (eval (from_montgomery arg1) * eval (from_montgomery arg1)) mod m
+// 0 ≤ eval out1 < m
+func p224Square(out1 *p224MontgomeryDomainFieldElement, arg1 *p224MontgomeryDomainFieldElement) {
+ x1 := arg1[1]
+ x2 := arg1[2]
+ x3 := arg1[3]
+ x4 := arg1[0]
+ var x5 uint64
+ var x6 uint64
+ x6, x5 = bits.Mul64(x4, arg1[3])
+ var x7 uint64
+ var x8 uint64
+ x8, x7 = bits.Mul64(x4, arg1[2])
+ var x9 uint64
+ var x10 uint64
+ x10, x9 = bits.Mul64(x4, arg1[1])
+ var x11 uint64
+ var x12 uint64
+ x12, x11 = bits.Mul64(x4, arg1[0])
+ var x13 uint64
+ var x14 uint64
+ x13, x14 = bits.Add64(x12, x9, uint64(0x0))
+ var x15 uint64
+ var x16 uint64
+ x15, x16 = bits.Add64(x10, x7, uint64(p224Uint1(x14)))
+ var x17 uint64
+ var x18 uint64
+ x17, x18 = bits.Add64(x8, x5, uint64(p224Uint1(x16)))
+ x19 := (uint64(p224Uint1(x18)) + x6)
+ var x20 uint64
+ _, x20 = bits.Mul64(x11, 0xffffffffffffffff)
+ var x22 uint64
+ var x23 uint64
+ x23, x22 = bits.Mul64(x20, 0xffffffff)
+ var x24 uint64
+ var x25 uint64
+ x25, x24 = bits.Mul64(x20, 0xffffffffffffffff)
+ var x26 uint64
+ var x27 uint64
+ x27, x26 = bits.Mul64(x20, 0xffffffff00000000)
+ var x28 uint64
+ var x29 uint64
+ x28, x29 = bits.Add64(x27, x24, uint64(0x0))
+ var x30 uint64
+ var x31 uint64
+ x30, x31 = bits.Add64(x25, x22, uint64(p224Uint1(x29)))
+ x32 := (uint64(p224Uint1(x31)) + x23)
+ var x34 uint64
+ _, x34 = bits.Add64(x11, x20, uint64(0x0))
+ var x35 uint64
+ var x36 uint64
+ x35, x36 = bits.Add64(x13, x26, uint64(p224Uint1(x34)))
+ var x37 uint64
+ var x38 uint64
+ x37, x38 = bits.Add64(x15, x28, uint64(p224Uint1(x36)))
+ var x39 uint64
+ var x40 uint64
+ x39, x40 = bits.Add64(x17, x30, uint64(p224Uint1(x38)))
+ var x41 uint64
+ var x42 uint64
+ x41, x42 = bits.Add64(x19, x32, uint64(p224Uint1(x40)))
+ var x43 uint64
+ var x44 uint64
+ x44, x43 = bits.Mul64(x1, arg1[3])
+ var x45 uint64
+ var x46 uint64
+ x46, x45 = bits.Mul64(x1, arg1[2])
+ var x47 uint64
+ var x48 uint64
+ x48, x47 = bits.Mul64(x1, arg1[1])
+ var x49 uint64
+ var x50 uint64
+ x50, x49 = bits.Mul64(x1, arg1[0])
+ var x51 uint64
+ var x52 uint64
+ x51, x52 = bits.Add64(x50, x47, uint64(0x0))
+ var x53 uint64
+ var x54 uint64
+ x53, x54 = bits.Add64(x48, x45, uint64(p224Uint1(x52)))
+ var x55 uint64
+ var x56 uint64
+ x55, x56 = bits.Add64(x46, x43, uint64(p224Uint1(x54)))
+ x57 := (uint64(p224Uint1(x56)) + x44)
+ var x58 uint64
+ var x59 uint64
+ x58, x59 = bits.Add64(x35, x49, uint64(0x0))
+ var x60 uint64
+ var x61 uint64
+ x60, x61 = bits.Add64(x37, x51, uint64(p224Uint1(x59)))
+ var x62 uint64
+ var x63 uint64
+ x62, x63 = bits.Add64(x39, x53, uint64(p224Uint1(x61)))
+ var x64 uint64
+ var x65 uint64
+ x64, x65 = bits.Add64(x41, x55, uint64(p224Uint1(x63)))
+ var x66 uint64
+ var x67 uint64
+ x66, x67 = bits.Add64(uint64(p224Uint1(x42)), x57, uint64(p224Uint1(x65)))
+ var x68 uint64
+ _, x68 = bits.Mul64(x58, 0xffffffffffffffff)
+ var x70 uint64
+ var x71 uint64
+ x71, x70 = bits.Mul64(x68, 0xffffffff)
+ var x72 uint64
+ var x73 uint64
+ x73, x72 = bits.Mul64(x68, 0xffffffffffffffff)
+ var x74 uint64
+ var x75 uint64
+ x75, x74 = bits.Mul64(x68, 0xffffffff00000000)
+ var x76 uint64
+ var x77 uint64
+ x76, x77 = bits.Add64(x75, x72, uint64(0x0))
+ var x78 uint64
+ var x79 uint64
+ x78, x79 = bits.Add64(x73, x70, uint64(p224Uint1(x77)))
+ x80 := (uint64(p224Uint1(x79)) + x71)
+ var x82 uint64
+ _, x82 = bits.Add64(x58, x68, uint64(0x0))
+ var x83 uint64
+ var x84 uint64
+ x83, x84 = bits.Add64(x60, x74, uint64(p224Uint1(x82)))
+ var x85 uint64
+ var x86 uint64
+ x85, x86 = bits.Add64(x62, x76, uint64(p224Uint1(x84)))
+ var x87 uint64
+ var x88 uint64
+ x87, x88 = bits.Add64(x64, x78, uint64(p224Uint1(x86)))
+ var x89 uint64
+ var x90 uint64
+ x89, x90 = bits.Add64(x66, x80, uint64(p224Uint1(x88)))
+ x91 := (uint64(p224Uint1(x90)) + uint64(p224Uint1(x67)))
+ var x92 uint64
+ var x93 uint64
+ x93, x92 = bits.Mul64(x2, arg1[3])
+ var x94 uint64
+ var x95 uint64
+ x95, x94 = bits.Mul64(x2, arg1[2])
+ var x96 uint64
+ var x97 uint64
+ x97, x96 = bits.Mul64(x2, arg1[1])
+ var x98 uint64
+ var x99 uint64
+ x99, x98 = bits.Mul64(x2, arg1[0])
+ var x100 uint64
+ var x101 uint64
+ x100, x101 = bits.Add64(x99, x96, uint64(0x0))
+ var x102 uint64
+ var x103 uint64
+ x102, x103 = bits.Add64(x97, x94, uint64(p224Uint1(x101)))
+ var x104 uint64
+ var x105 uint64
+ x104, x105 = bits.Add64(x95, x92, uint64(p224Uint1(x103)))
+ x106 := (uint64(p224Uint1(x105)) + x93)
+ var x107 uint64
+ var x108 uint64
+ x107, x108 = bits.Add64(x83, x98, uint64(0x0))
+ var x109 uint64
+ var x110 uint64
+ x109, x110 = bits.Add64(x85, x100, uint64(p224Uint1(x108)))
+ var x111 uint64
+ var x112 uint64
+ x111, x112 = bits.Add64(x87, x102, uint64(p224Uint1(x110)))
+ var x113 uint64
+ var x114 uint64
+ x113, x114 = bits.Add64(x89, x104, uint64(p224Uint1(x112)))
+ var x115 uint64
+ var x116 uint64
+ x115, x116 = bits.Add64(x91, x106, uint64(p224Uint1(x114)))
+ var x117 uint64
+ _, x117 = bits.Mul64(x107, 0xffffffffffffffff)
+ var x119 uint64
+ var x120 uint64
+ x120, x119 = bits.Mul64(x117, 0xffffffff)
+ var x121 uint64
+ var x122 uint64
+ x122, x121 = bits.Mul64(x117, 0xffffffffffffffff)
+ var x123 uint64
+ var x124 uint64
+ x124, x123 = bits.Mul64(x117, 0xffffffff00000000)
+ var x125 uint64
+ var x126 uint64
+ x125, x126 = bits.Add64(x124, x121, uint64(0x0))
+ var x127 uint64
+ var x128 uint64
+ x127, x128 = bits.Add64(x122, x119, uint64(p224Uint1(x126)))
+ x129 := (uint64(p224Uint1(x128)) + x120)
+ var x131 uint64
+ _, x131 = bits.Add64(x107, x117, uint64(0x0))
+ var x132 uint64
+ var x133 uint64
+ x132, x133 = bits.Add64(x109, x123, uint64(p224Uint1(x131)))
+ var x134 uint64
+ var x135 uint64
+ x134, x135 = bits.Add64(x111, x125, uint64(p224Uint1(x133)))
+ var x136 uint64
+ var x137 uint64
+ x136, x137 = bits.Add64(x113, x127, uint64(p224Uint1(x135)))
+ var x138 uint64
+ var x139 uint64
+ x138, x139 = bits.Add64(x115, x129, uint64(p224Uint1(x137)))
+ x140 := (uint64(p224Uint1(x139)) + uint64(p224Uint1(x116)))
+ var x141 uint64
+ var x142 uint64
+ x142, x141 = bits.Mul64(x3, arg1[3])
+ var x143 uint64
+ var x144 uint64
+ x144, x143 = bits.Mul64(x3, arg1[2])
+ var x145 uint64
+ var x146 uint64
+ x146, x145 = bits.Mul64(x3, arg1[1])
+ var x147 uint64
+ var x148 uint64
+ x148, x147 = bits.Mul64(x3, arg1[0])
+ var x149 uint64
+ var x150 uint64
+ x149, x150 = bits.Add64(x148, x145, uint64(0x0))
+ var x151 uint64
+ var x152 uint64
+ x151, x152 = bits.Add64(x146, x143, uint64(p224Uint1(x150)))
+ var x153 uint64
+ var x154 uint64
+ x153, x154 = bits.Add64(x144, x141, uint64(p224Uint1(x152)))
+ x155 := (uint64(p224Uint1(x154)) + x142)
+ var x156 uint64
+ var x157 uint64
+ x156, x157 = bits.Add64(x132, x147, uint64(0x0))
+ var x158 uint64
+ var x159 uint64
+ x158, x159 = bits.Add64(x134, x149, uint64(p224Uint1(x157)))
+ var x160 uint64
+ var x161 uint64
+ x160, x161 = bits.Add64(x136, x151, uint64(p224Uint1(x159)))
+ var x162 uint64
+ var x163 uint64
+ x162, x163 = bits.Add64(x138, x153, uint64(p224Uint1(x161)))
+ var x164 uint64
+ var x165 uint64
+ x164, x165 = bits.Add64(x140, x155, uint64(p224Uint1(x163)))
+ var x166 uint64
+ _, x166 = bits.Mul64(x156, 0xffffffffffffffff)
+ var x168 uint64
+ var x169 uint64
+ x169, x168 = bits.Mul64(x166, 0xffffffff)
+ var x170 uint64
+ var x171 uint64
+ x171, x170 = bits.Mul64(x166, 0xffffffffffffffff)
+ var x172 uint64
+ var x173 uint64
+ x173, x172 = bits.Mul64(x166, 0xffffffff00000000)
+ var x174 uint64
+ var x175 uint64
+ x174, x175 = bits.Add64(x173, x170, uint64(0x0))
+ var x176 uint64
+ var x177 uint64
+ x176, x177 = bits.Add64(x171, x168, uint64(p224Uint1(x175)))
+ x178 := (uint64(p224Uint1(x177)) + x169)
+ var x180 uint64
+ _, x180 = bits.Add64(x156, x166, uint64(0x0))
+ var x181 uint64
+ var x182 uint64
+ x181, x182 = bits.Add64(x158, x172, uint64(p224Uint1(x180)))
+ var x183 uint64
+ var x184 uint64
+ x183, x184 = bits.Add64(x160, x174, uint64(p224Uint1(x182)))
+ var x185 uint64
+ var x186 uint64
+ x185, x186 = bits.Add64(x162, x176, uint64(p224Uint1(x184)))
+ var x187 uint64
+ var x188 uint64
+ x187, x188 = bits.Add64(x164, x178, uint64(p224Uint1(x186)))
+ x189 := (uint64(p224Uint1(x188)) + uint64(p224Uint1(x165)))
+ var x190 uint64
+ var x191 uint64
+ x190, x191 = bits.Sub64(x181, uint64(0x1), uint64(0x0))
+ var x192 uint64
+ var x193 uint64
+ x192, x193 = bits.Sub64(x183, 0xffffffff00000000, uint64(p224Uint1(x191)))
+ var x194 uint64
+ var x195 uint64
+ x194, x195 = bits.Sub64(x185, 0xffffffffffffffff, uint64(p224Uint1(x193)))
+ var x196 uint64
+ var x197 uint64
+ x196, x197 = bits.Sub64(x187, 0xffffffff, uint64(p224Uint1(x195)))
+ var x199 uint64
+ _, x199 = bits.Sub64(x189, uint64(0x0), uint64(p224Uint1(x197)))
+ var x200 uint64
+ p224CmovznzU64(&x200, p224Uint1(x199), x190, x181)
+ var x201 uint64
+ p224CmovznzU64(&x201, p224Uint1(x199), x192, x183)
+ var x202 uint64
+ p224CmovznzU64(&x202, p224Uint1(x199), x194, x185)
+ var x203 uint64
+ p224CmovznzU64(&x203, p224Uint1(x199), x196, x187)
+ out1[0] = x200
+ out1[1] = x201
+ out1[2] = x202
+ out1[3] = x203
+}
+
+// p224Add adds two field elements in the Montgomery domain.
+//
+// Preconditions:
+//
+// 0 ≤ eval arg1 < m
+// 0 ≤ eval arg2 < m
+//
+// Postconditions:
+//
+// eval (from_montgomery out1) mod m = (eval (from_montgomery arg1) + eval (from_montgomery arg2)) mod m
+// 0 ≤ eval out1 < m
+func p224Add(out1 *p224MontgomeryDomainFieldElement, arg1 *p224MontgomeryDomainFieldElement, arg2 *p224MontgomeryDomainFieldElement) {
+ var x1 uint64
+ var x2 uint64
+ x1, x2 = bits.Add64(arg1[0], arg2[0], uint64(0x0))
+ var x3 uint64
+ var x4 uint64
+ x3, x4 = bits.Add64(arg1[1], arg2[1], uint64(p224Uint1(x2)))
+ var x5 uint64
+ var x6 uint64
+ x5, x6 = bits.Add64(arg1[2], arg2[2], uint64(p224Uint1(x4)))
+ var x7 uint64
+ var x8 uint64
+ x7, x8 = bits.Add64(arg1[3], arg2[3], uint64(p224Uint1(x6)))
+ var x9 uint64
+ var x10 uint64
+ x9, x10 = bits.Sub64(x1, uint64(0x1), uint64(0x0))
+ var x11 uint64
+ var x12 uint64
+ x11, x12 = bits.Sub64(x3, 0xffffffff00000000, uint64(p224Uint1(x10)))
+ var x13 uint64
+ var x14 uint64
+ x13, x14 = bits.Sub64(x5, 0xffffffffffffffff, uint64(p224Uint1(x12)))
+ var x15 uint64
+ var x16 uint64
+ x15, x16 = bits.Sub64(x7, 0xffffffff, uint64(p224Uint1(x14)))
+ var x18 uint64
+ _, x18 = bits.Sub64(uint64(p224Uint1(x8)), uint64(0x0), uint64(p224Uint1(x16)))
+ var x19 uint64
+ p224CmovznzU64(&x19, p224Uint1(x18), x9, x1)
+ var x20 uint64
+ p224CmovznzU64(&x20, p224Uint1(x18), x11, x3)
+ var x21 uint64
+ p224CmovznzU64(&x21, p224Uint1(x18), x13, x5)
+ var x22 uint64
+ p224CmovznzU64(&x22, p224Uint1(x18), x15, x7)
+ out1[0] = x19
+ out1[1] = x20
+ out1[2] = x21
+ out1[3] = x22
+}
+
+// p224Sub subtracts two field elements in the Montgomery domain.
+//
+// Preconditions:
+//
+// 0 ≤ eval arg1 < m
+// 0 ≤ eval arg2 < m
+//
+// Postconditions:
+//
+// eval (from_montgomery out1) mod m = (eval (from_montgomery arg1) - eval (from_montgomery arg2)) mod m
+// 0 ≤ eval out1 < m
+func p224Sub(out1 *p224MontgomeryDomainFieldElement, arg1 *p224MontgomeryDomainFieldElement, arg2 *p224MontgomeryDomainFieldElement) {
+ var x1 uint64
+ var x2 uint64
+ x1, x2 = bits.Sub64(arg1[0], arg2[0], uint64(0x0))
+ var x3 uint64
+ var x4 uint64
+ x3, x4 = bits.Sub64(arg1[1], arg2[1], uint64(p224Uint1(x2)))
+ var x5 uint64
+ var x6 uint64
+ x5, x6 = bits.Sub64(arg1[2], arg2[2], uint64(p224Uint1(x4)))
+ var x7 uint64
+ var x8 uint64
+ x7, x8 = bits.Sub64(arg1[3], arg2[3], uint64(p224Uint1(x6)))
+ var x9 uint64
+ p224CmovznzU64(&x9, p224Uint1(x8), uint64(0x0), 0xffffffffffffffff)
+ var x10 uint64
+ var x11 uint64
+ x10, x11 = bits.Add64(x1, uint64((p224Uint1(x9) & 0x1)), uint64(0x0))
+ var x12 uint64
+ var x13 uint64
+ x12, x13 = bits.Add64(x3, (x9 & 0xffffffff00000000), uint64(p224Uint1(x11)))
+ var x14 uint64
+ var x15 uint64
+ x14, x15 = bits.Add64(x5, x9, uint64(p224Uint1(x13)))
+ var x16 uint64
+ x16, _ = bits.Add64(x7, (x9 & 0xffffffff), uint64(p224Uint1(x15)))
+ out1[0] = x10
+ out1[1] = x12
+ out1[2] = x14
+ out1[3] = x16
+}
+
+// p224SetOne returns the field element one in the Montgomery domain.
+//
+// Postconditions:
+//
+// eval (from_montgomery out1) mod m = 1 mod m
+// 0 ≤ eval out1 < m
+func p224SetOne(out1 *p224MontgomeryDomainFieldElement) {
+ out1[0] = 0xffffffff00000000
+ out1[1] = 0xffffffffffffffff
+ out1[2] = uint64(0x0)
+ out1[3] = uint64(0x0)
+}
+
+// p224FromMontgomery translates a field element out of the Montgomery domain.
+//
+// Preconditions:
+//
+// 0 ≤ eval arg1 < m
+//
+// Postconditions:
+//
+// eval out1 mod m = (eval arg1 * ((2^64)⁻¹ mod m)^4) mod m
+// 0 ≤ eval out1 < m
+func p224FromMontgomery(out1 *p224NonMontgomeryDomainFieldElement, arg1 *p224MontgomeryDomainFieldElement) {
+ x1 := arg1[0]
+ var x2 uint64
+ _, x2 = bits.Mul64(x1, 0xffffffffffffffff)
+ var x4 uint64
+ var x5 uint64
+ x5, x4 = bits.Mul64(x2, 0xffffffff)
+ var x6 uint64
+ var x7 uint64
+ x7, x6 = bits.Mul64(x2, 0xffffffffffffffff)
+ var x8 uint64
+ var x9 uint64
+ x9, x8 = bits.Mul64(x2, 0xffffffff00000000)
+ var x10 uint64
+ var x11 uint64
+ x10, x11 = bits.Add64(x9, x6, uint64(0x0))
+ var x12 uint64
+ var x13 uint64
+ x12, x13 = bits.Add64(x7, x4, uint64(p224Uint1(x11)))
+ var x15 uint64
+ _, x15 = bits.Add64(x1, x2, uint64(0x0))
+ var x16 uint64
+ var x17 uint64
+ x16, x17 = bits.Add64(uint64(0x0), x8, uint64(p224Uint1(x15)))
+ var x18 uint64
+ var x19 uint64
+ x18, x19 = bits.Add64(uint64(0x0), x10, uint64(p224Uint1(x17)))
+ var x20 uint64
+ var x21 uint64
+ x20, x21 = bits.Add64(uint64(0x0), x12, uint64(p224Uint1(x19)))
+ var x22 uint64
+ var x23 uint64
+ x22, x23 = bits.Add64(x16, arg1[1], uint64(0x0))
+ var x24 uint64
+ var x25 uint64
+ x24, x25 = bits.Add64(x18, uint64(0x0), uint64(p224Uint1(x23)))
+ var x26 uint64
+ var x27 uint64
+ x26, x27 = bits.Add64(x20, uint64(0x0), uint64(p224Uint1(x25)))
+ var x28 uint64
+ _, x28 = bits.Mul64(x22, 0xffffffffffffffff)
+ var x30 uint64
+ var x31 uint64
+ x31, x30 = bits.Mul64(x28, 0xffffffff)
+ var x32 uint64
+ var x33 uint64
+ x33, x32 = bits.Mul64(x28, 0xffffffffffffffff)
+ var x34 uint64
+ var x35 uint64
+ x35, x34 = bits.Mul64(x28, 0xffffffff00000000)
+ var x36 uint64
+ var x37 uint64
+ x36, x37 = bits.Add64(x35, x32, uint64(0x0))
+ var x38 uint64
+ var x39 uint64
+ x38, x39 = bits.Add64(x33, x30, uint64(p224Uint1(x37)))
+ var x41 uint64
+ _, x41 = bits.Add64(x22, x28, uint64(0x0))
+ var x42 uint64
+ var x43 uint64
+ x42, x43 = bits.Add64(x24, x34, uint64(p224Uint1(x41)))
+ var x44 uint64
+ var x45 uint64
+ x44, x45 = bits.Add64(x26, x36, uint64(p224Uint1(x43)))
+ var x46 uint64
+ var x47 uint64
+ x46, x47 = bits.Add64((uint64(p224Uint1(x27)) + (uint64(p224Uint1(x21)) + (uint64(p224Uint1(x13)) + x5))), x38, uint64(p224Uint1(x45)))
+ var x48 uint64
+ var x49 uint64
+ x48, x49 = bits.Add64(x42, arg1[2], uint64(0x0))
+ var x50 uint64
+ var x51 uint64
+ x50, x51 = bits.Add64(x44, uint64(0x0), uint64(p224Uint1(x49)))
+ var x52 uint64
+ var x53 uint64
+ x52, x53 = bits.Add64(x46, uint64(0x0), uint64(p224Uint1(x51)))
+ var x54 uint64
+ _, x54 = bits.Mul64(x48, 0xffffffffffffffff)
+ var x56 uint64
+ var x57 uint64
+ x57, x56 = bits.Mul64(x54, 0xffffffff)
+ var x58 uint64
+ var x59 uint64
+ x59, x58 = bits.Mul64(x54, 0xffffffffffffffff)
+ var x60 uint64
+ var x61 uint64
+ x61, x60 = bits.Mul64(x54, 0xffffffff00000000)
+ var x62 uint64
+ var x63 uint64
+ x62, x63 = bits.Add64(x61, x58, uint64(0x0))
+ var x64 uint64
+ var x65 uint64
+ x64, x65 = bits.Add64(x59, x56, uint64(p224Uint1(x63)))
+ var x67 uint64
+ _, x67 = bits.Add64(x48, x54, uint64(0x0))
+ var x68 uint64
+ var x69 uint64
+ x68, x69 = bits.Add64(x50, x60, uint64(p224Uint1(x67)))
+ var x70 uint64
+ var x71 uint64
+ x70, x71 = bits.Add64(x52, x62, uint64(p224Uint1(x69)))
+ var x72 uint64
+ var x73 uint64
+ x72, x73 = bits.Add64((uint64(p224Uint1(x53)) + (uint64(p224Uint1(x47)) + (uint64(p224Uint1(x39)) + x31))), x64, uint64(p224Uint1(x71)))
+ var x74 uint64
+ var x75 uint64
+ x74, x75 = bits.Add64(x68, arg1[3], uint64(0x0))
+ var x76 uint64
+ var x77 uint64
+ x76, x77 = bits.Add64(x70, uint64(0x0), uint64(p224Uint1(x75)))
+ var x78 uint64
+ var x79 uint64
+ x78, x79 = bits.Add64(x72, uint64(0x0), uint64(p224Uint1(x77)))
+ var x80 uint64
+ _, x80 = bits.Mul64(x74, 0xffffffffffffffff)
+ var x82 uint64
+ var x83 uint64
+ x83, x82 = bits.Mul64(x80, 0xffffffff)
+ var x84 uint64
+ var x85 uint64
+ x85, x84 = bits.Mul64(x80, 0xffffffffffffffff)
+ var x86 uint64
+ var x87 uint64
+ x87, x86 = bits.Mul64(x80, 0xffffffff00000000)
+ var x88 uint64
+ var x89 uint64
+ x88, x89 = bits.Add64(x87, x84, uint64(0x0))
+ var x90 uint64
+ var x91 uint64
+ x90, x91 = bits.Add64(x85, x82, uint64(p224Uint1(x89)))
+ var x93 uint64
+ _, x93 = bits.Add64(x74, x80, uint64(0x0))
+ var x94 uint64
+ var x95 uint64
+ x94, x95 = bits.Add64(x76, x86, uint64(p224Uint1(x93)))
+ var x96 uint64
+ var x97 uint64
+ x96, x97 = bits.Add64(x78, x88, uint64(p224Uint1(x95)))
+ var x98 uint64
+ var x99 uint64
+ x98, x99 = bits.Add64((uint64(p224Uint1(x79)) + (uint64(p224Uint1(x73)) + (uint64(p224Uint1(x65)) + x57))), x90, uint64(p224Uint1(x97)))
+ x100 := (uint64(p224Uint1(x99)) + (uint64(p224Uint1(x91)) + x83))
+ var x101 uint64
+ var x102 uint64
+ x101, x102 = bits.Sub64(x94, uint64(0x1), uint64(0x0))
+ var x103 uint64
+ var x104 uint64
+ x103, x104 = bits.Sub64(x96, 0xffffffff00000000, uint64(p224Uint1(x102)))
+ var x105 uint64
+ var x106 uint64
+ x105, x106 = bits.Sub64(x98, 0xffffffffffffffff, uint64(p224Uint1(x104)))
+ var x107 uint64
+ var x108 uint64
+ x107, x108 = bits.Sub64(x100, 0xffffffff, uint64(p224Uint1(x106)))
+ var x110 uint64
+ _, x110 = bits.Sub64(uint64(0x0), uint64(0x0), uint64(p224Uint1(x108)))
+ var x111 uint64
+ p224CmovznzU64(&x111, p224Uint1(x110), x101, x94)
+ var x112 uint64
+ p224CmovznzU64(&x112, p224Uint1(x110), x103, x96)
+ var x113 uint64
+ p224CmovznzU64(&x113, p224Uint1(x110), x105, x98)
+ var x114 uint64
+ p224CmovznzU64(&x114, p224Uint1(x110), x107, x100)
+ out1[0] = x111
+ out1[1] = x112
+ out1[2] = x113
+ out1[3] = x114
+}
+
+// p224ToMontgomery translates a field element into the Montgomery domain.
+//
+// Preconditions:
+//
+// 0 ≤ eval arg1 < m
+//
+// Postconditions:
+//
+// eval (from_montgomery out1) mod m = eval arg1 mod m
+// 0 ≤ eval out1 < m
+func p224ToMontgomery(out1 *p224MontgomeryDomainFieldElement, arg1 *p224NonMontgomeryDomainFieldElement) {
+ x1 := arg1[1]
+ x2 := arg1[2]
+ x3 := arg1[3]
+ x4 := arg1[0]
+ var x5 uint64
+ var x6 uint64
+ x6, x5 = bits.Mul64(x4, 0xffffffff)
+ var x7 uint64
+ var x8 uint64
+ x8, x7 = bits.Mul64(x4, 0xfffffffe00000000)
+ var x9 uint64
+ var x10 uint64
+ x10, x9 = bits.Mul64(x4, 0xffffffff00000000)
+ var x11 uint64
+ var x12 uint64
+ x12, x11 = bits.Mul64(x4, 0xffffffff00000001)
+ var x13 uint64
+ var x14 uint64
+ x13, x14 = bits.Add64(x12, x9, uint64(0x0))
+ var x15 uint64
+ var x16 uint64
+ x15, x16 = bits.Add64(x10, x7, uint64(p224Uint1(x14)))
+ var x17 uint64
+ var x18 uint64
+ x17, x18 = bits.Add64(x8, x5, uint64(p224Uint1(x16)))
+ var x19 uint64
+ _, x19 = bits.Mul64(x11, 0xffffffffffffffff)
+ var x21 uint64
+ var x22 uint64
+ x22, x21 = bits.Mul64(x19, 0xffffffff)
+ var x23 uint64
+ var x24 uint64
+ x24, x23 = bits.Mul64(x19, 0xffffffffffffffff)
+ var x25 uint64
+ var x26 uint64
+ x26, x25 = bits.Mul64(x19, 0xffffffff00000000)
+ var x27 uint64
+ var x28 uint64
+ x27, x28 = bits.Add64(x26, x23, uint64(0x0))
+ var x29 uint64
+ var x30 uint64
+ x29, x30 = bits.Add64(x24, x21, uint64(p224Uint1(x28)))
+ var x32 uint64
+ _, x32 = bits.Add64(x11, x19, uint64(0x0))
+ var x33 uint64
+ var x34 uint64
+ x33, x34 = bits.Add64(x13, x25, uint64(p224Uint1(x32)))
+ var x35 uint64
+ var x36 uint64
+ x35, x36 = bits.Add64(x15, x27, uint64(p224Uint1(x34)))
+ var x37 uint64
+ var x38 uint64
+ x37, x38 = bits.Add64(x17, x29, uint64(p224Uint1(x36)))
+ var x39 uint64
+ var x40 uint64
+ x40, x39 = bits.Mul64(x1, 0xffffffff)
+ var x41 uint64
+ var x42 uint64
+ x42, x41 = bits.Mul64(x1, 0xfffffffe00000000)
+ var x43 uint64
+ var x44 uint64
+ x44, x43 = bits.Mul64(x1, 0xffffffff00000000)
+ var x45 uint64
+ var x46 uint64
+ x46, x45 = bits.Mul64(x1, 0xffffffff00000001)
+ var x47 uint64
+ var x48 uint64
+ x47, x48 = bits.Add64(x46, x43, uint64(0x0))
+ var x49 uint64
+ var x50 uint64
+ x49, x50 = bits.Add64(x44, x41, uint64(p224Uint1(x48)))
+ var x51 uint64
+ var x52 uint64
+ x51, x52 = bits.Add64(x42, x39, uint64(p224Uint1(x50)))
+ var x53 uint64
+ var x54 uint64
+ x53, x54 = bits.Add64(x33, x45, uint64(0x0))
+ var x55 uint64
+ var x56 uint64
+ x55, x56 = bits.Add64(x35, x47, uint64(p224Uint1(x54)))
+ var x57 uint64
+ var x58 uint64
+ x57, x58 = bits.Add64(x37, x49, uint64(p224Uint1(x56)))
+ var x59 uint64
+ var x60 uint64
+ x59, x60 = bits.Add64(((uint64(p224Uint1(x38)) + (uint64(p224Uint1(x18)) + x6)) + (uint64(p224Uint1(x30)) + x22)), x51, uint64(p224Uint1(x58)))
+ var x61 uint64
+ _, x61 = bits.Mul64(x53, 0xffffffffffffffff)
+ var x63 uint64
+ var x64 uint64
+ x64, x63 = bits.Mul64(x61, 0xffffffff)
+ var x65 uint64
+ var x66 uint64
+ x66, x65 = bits.Mul64(x61, 0xffffffffffffffff)
+ var x67 uint64
+ var x68 uint64
+ x68, x67 = bits.Mul64(x61, 0xffffffff00000000)
+ var x69 uint64
+ var x70 uint64
+ x69, x70 = bits.Add64(x68, x65, uint64(0x0))
+ var x71 uint64
+ var x72 uint64
+ x71, x72 = bits.Add64(x66, x63, uint64(p224Uint1(x70)))
+ var x74 uint64
+ _, x74 = bits.Add64(x53, x61, uint64(0x0))
+ var x75 uint64
+ var x76 uint64
+ x75, x76 = bits.Add64(x55, x67, uint64(p224Uint1(x74)))
+ var x77 uint64
+ var x78 uint64
+ x77, x78 = bits.Add64(x57, x69, uint64(p224Uint1(x76)))
+ var x79 uint64
+ var x80 uint64
+ x79, x80 = bits.Add64(x59, x71, uint64(p224Uint1(x78)))
+ var x81 uint64
+ var x82 uint64
+ x82, x81 = bits.Mul64(x2, 0xffffffff)
+ var x83 uint64
+ var x84 uint64
+ x84, x83 = bits.Mul64(x2, 0xfffffffe00000000)
+ var x85 uint64
+ var x86 uint64
+ x86, x85 = bits.Mul64(x2, 0xffffffff00000000)
+ var x87 uint64
+ var x88 uint64
+ x88, x87 = bits.Mul64(x2, 0xffffffff00000001)
+ var x89 uint64
+ var x90 uint64
+ x89, x90 = bits.Add64(x88, x85, uint64(0x0))
+ var x91 uint64
+ var x92 uint64
+ x91, x92 = bits.Add64(x86, x83, uint64(p224Uint1(x90)))
+ var x93 uint64
+ var x94 uint64
+ x93, x94 = bits.Add64(x84, x81, uint64(p224Uint1(x92)))
+ var x95 uint64
+ var x96 uint64
+ x95, x96 = bits.Add64(x75, x87, uint64(0x0))
+ var x97 uint64
+ var x98 uint64
+ x97, x98 = bits.Add64(x77, x89, uint64(p224Uint1(x96)))
+ var x99 uint64
+ var x100 uint64
+ x99, x100 = bits.Add64(x79, x91, uint64(p224Uint1(x98)))
+ var x101 uint64
+ var x102 uint64
+ x101, x102 = bits.Add64(((uint64(p224Uint1(x80)) + (uint64(p224Uint1(x60)) + (uint64(p224Uint1(x52)) + x40))) + (uint64(p224Uint1(x72)) + x64)), x93, uint64(p224Uint1(x100)))
+ var x103 uint64
+ _, x103 = bits.Mul64(x95, 0xffffffffffffffff)
+ var x105 uint64
+ var x106 uint64
+ x106, x105 = bits.Mul64(x103, 0xffffffff)
+ var x107 uint64
+ var x108 uint64
+ x108, x107 = bits.Mul64(x103, 0xffffffffffffffff)
+ var x109 uint64
+ var x110 uint64
+ x110, x109 = bits.Mul64(x103, 0xffffffff00000000)
+ var x111 uint64
+ var x112 uint64
+ x111, x112 = bits.Add64(x110, x107, uint64(0x0))
+ var x113 uint64
+ var x114 uint64
+ x113, x114 = bits.Add64(x108, x105, uint64(p224Uint1(x112)))
+ var x116 uint64
+ _, x116 = bits.Add64(x95, x103, uint64(0x0))
+ var x117 uint64
+ var x118 uint64
+ x117, x118 = bits.Add64(x97, x109, uint64(p224Uint1(x116)))
+ var x119 uint64
+ var x120 uint64
+ x119, x120 = bits.Add64(x99, x111, uint64(p224Uint1(x118)))
+ var x121 uint64
+ var x122 uint64
+ x121, x122 = bits.Add64(x101, x113, uint64(p224Uint1(x120)))
+ var x123 uint64
+ var x124 uint64
+ x124, x123 = bits.Mul64(x3, 0xffffffff)
+ var x125 uint64
+ var x126 uint64
+ x126, x125 = bits.Mul64(x3, 0xfffffffe00000000)
+ var x127 uint64
+ var x128 uint64
+ x128, x127 = bits.Mul64(x3, 0xffffffff00000000)
+ var x129 uint64
+ var x130 uint64
+ x130, x129 = bits.Mul64(x3, 0xffffffff00000001)
+ var x131 uint64
+ var x132 uint64
+ x131, x132 = bits.Add64(x130, x127, uint64(0x0))
+ var x133 uint64
+ var x134 uint64
+ x133, x134 = bits.Add64(x128, x125, uint64(p224Uint1(x132)))
+ var x135 uint64
+ var x136 uint64
+ x135, x136 = bits.Add64(x126, x123, uint64(p224Uint1(x134)))
+ var x137 uint64
+ var x138 uint64
+ x137, x138 = bits.Add64(x117, x129, uint64(0x0))
+ var x139 uint64
+ var x140 uint64
+ x139, x140 = bits.Add64(x119, x131, uint64(p224Uint1(x138)))
+ var x141 uint64
+ var x142 uint64
+ x141, x142 = bits.Add64(x121, x133, uint64(p224Uint1(x140)))
+ var x143 uint64
+ var x144 uint64
+ x143, x144 = bits.Add64(((uint64(p224Uint1(x122)) + (uint64(p224Uint1(x102)) + (uint64(p224Uint1(x94)) + x82))) + (uint64(p224Uint1(x114)) + x106)), x135, uint64(p224Uint1(x142)))
+ var x145 uint64
+ _, x145 = bits.Mul64(x137, 0xffffffffffffffff)
+ var x147 uint64
+ var x148 uint64
+ x148, x147 = bits.Mul64(x145, 0xffffffff)
+ var x149 uint64
+ var x150 uint64
+ x150, x149 = bits.Mul64(x145, 0xffffffffffffffff)
+ var x151 uint64
+ var x152 uint64
+ x152, x151 = bits.Mul64(x145, 0xffffffff00000000)
+ var x153 uint64
+ var x154 uint64
+ x153, x154 = bits.Add64(x152, x149, uint64(0x0))
+ var x155 uint64
+ var x156 uint64
+ x155, x156 = bits.Add64(x150, x147, uint64(p224Uint1(x154)))
+ var x158 uint64
+ _, x158 = bits.Add64(x137, x145, uint64(0x0))
+ var x159 uint64
+ var x160 uint64
+ x159, x160 = bits.Add64(x139, x151, uint64(p224Uint1(x158)))
+ var x161 uint64
+ var x162 uint64
+ x161, x162 = bits.Add64(x141, x153, uint64(p224Uint1(x160)))
+ var x163 uint64
+ var x164 uint64
+ x163, x164 = bits.Add64(x143, x155, uint64(p224Uint1(x162)))
+ x165 := ((uint64(p224Uint1(x164)) + (uint64(p224Uint1(x144)) + (uint64(p224Uint1(x136)) + x124))) + (uint64(p224Uint1(x156)) + x148))
+ var x166 uint64
+ var x167 uint64
+ x166, x167 = bits.Sub64(x159, uint64(0x1), uint64(0x0))
+ var x168 uint64
+ var x169 uint64
+ x168, x169 = bits.Sub64(x161, 0xffffffff00000000, uint64(p224Uint1(x167)))
+ var x170 uint64
+ var x171 uint64
+ x170, x171 = bits.Sub64(x163, 0xffffffffffffffff, uint64(p224Uint1(x169)))
+ var x172 uint64
+ var x173 uint64
+ x172, x173 = bits.Sub64(x165, 0xffffffff, uint64(p224Uint1(x171)))
+ var x175 uint64
+ _, x175 = bits.Sub64(uint64(0x0), uint64(0x0), uint64(p224Uint1(x173)))
+ var x176 uint64
+ p224CmovznzU64(&x176, p224Uint1(x175), x166, x159)
+ var x177 uint64
+ p224CmovznzU64(&x177, p224Uint1(x175), x168, x161)
+ var x178 uint64
+ p224CmovznzU64(&x178, p224Uint1(x175), x170, x163)
+ var x179 uint64
+ p224CmovznzU64(&x179, p224Uint1(x175), x172, x165)
+ out1[0] = x176
+ out1[1] = x177
+ out1[2] = x178
+ out1[3] = x179
+}
+
+// p224Selectznz is a multi-limb conditional select.
+//
+// Postconditions:
+//
+// eval out1 = (if arg1 = 0 then eval arg2 else eval arg3)
+//
+// Input Bounds:
+//
+// arg1: [0x0 ~> 0x1]
+// arg2: [[0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff]]
+// arg3: [[0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff]]
+//
+// Output Bounds:
+//
+// out1: [[0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff]]
+func p224Selectznz(out1 *[4]uint64, arg1 p224Uint1, arg2 *[4]uint64, arg3 *[4]uint64) {
+ var x1 uint64
+ p224CmovznzU64(&x1, arg1, arg2[0], arg3[0])
+ var x2 uint64
+ p224CmovznzU64(&x2, arg1, arg2[1], arg3[1])
+ var x3 uint64
+ p224CmovznzU64(&x3, arg1, arg2[2], arg3[2])
+ var x4 uint64
+ p224CmovznzU64(&x4, arg1, arg2[3], arg3[3])
+ out1[0] = x1
+ out1[1] = x2
+ out1[2] = x3
+ out1[3] = x4
+}
+
+// p224ToBytes serializes a field element NOT in the Montgomery domain to bytes in little-endian order.
+//
+// Preconditions:
+//
+// 0 ≤ eval arg1 < m
+//
+// Postconditions:
+//
+// out1 = map (λ x, ⌊((eval arg1 mod m) mod 2^(8 * (x + 1))) / 2^(8 * x)⌋) [0..27]
+//
+// Input Bounds:
+//
+// arg1: [[0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffff]]
+//
+// Output Bounds:
+//
+// out1: [[0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff]]
+func p224ToBytes(out1 *[28]uint8, arg1 *[4]uint64) {
+ x1 := arg1[3]
+ x2 := arg1[2]
+ x3 := arg1[1]
+ x4 := arg1[0]
+ x5 := (uint8(x4) & 0xff)
+ x6 := (x4 >> 8)
+ x7 := (uint8(x6) & 0xff)
+ x8 := (x6 >> 8)
+ x9 := (uint8(x8) & 0xff)
+ x10 := (x8 >> 8)
+ x11 := (uint8(x10) & 0xff)
+ x12 := (x10 >> 8)
+ x13 := (uint8(x12) & 0xff)
+ x14 := (x12 >> 8)
+ x15 := (uint8(x14) & 0xff)
+ x16 := (x14 >> 8)
+ x17 := (uint8(x16) & 0xff)
+ x18 := uint8((x16 >> 8))
+ x19 := (uint8(x3) & 0xff)
+ x20 := (x3 >> 8)
+ x21 := (uint8(x20) & 0xff)
+ x22 := (x20 >> 8)
+ x23 := (uint8(x22) & 0xff)
+ x24 := (x22 >> 8)
+ x25 := (uint8(x24) & 0xff)
+ x26 := (x24 >> 8)
+ x27 := (uint8(x26) & 0xff)
+ x28 := (x26 >> 8)
+ x29 := (uint8(x28) & 0xff)
+ x30 := (x28 >> 8)
+ x31 := (uint8(x30) & 0xff)
+ x32 := uint8((x30 >> 8))
+ x33 := (uint8(x2) & 0xff)
+ x34 := (x2 >> 8)
+ x35 := (uint8(x34) & 0xff)
+ x36 := (x34 >> 8)
+ x37 := (uint8(x36) & 0xff)
+ x38 := (x36 >> 8)
+ x39 := (uint8(x38) & 0xff)
+ x40 := (x38 >> 8)
+ x41 := (uint8(x40) & 0xff)
+ x42 := (x40 >> 8)
+ x43 := (uint8(x42) & 0xff)
+ x44 := (x42 >> 8)
+ x45 := (uint8(x44) & 0xff)
+ x46 := uint8((x44 >> 8))
+ x47 := (uint8(x1) & 0xff)
+ x48 := (x1 >> 8)
+ x49 := (uint8(x48) & 0xff)
+ x50 := (x48 >> 8)
+ x51 := (uint8(x50) & 0xff)
+ x52 := uint8((x50 >> 8))
+ out1[0] = x5
+ out1[1] = x7
+ out1[2] = x9
+ out1[3] = x11
+ out1[4] = x13
+ out1[5] = x15
+ out1[6] = x17
+ out1[7] = x18
+ out1[8] = x19
+ out1[9] = x21
+ out1[10] = x23
+ out1[11] = x25
+ out1[12] = x27
+ out1[13] = x29
+ out1[14] = x31
+ out1[15] = x32
+ out1[16] = x33
+ out1[17] = x35
+ out1[18] = x37
+ out1[19] = x39
+ out1[20] = x41
+ out1[21] = x43
+ out1[22] = x45
+ out1[23] = x46
+ out1[24] = x47
+ out1[25] = x49
+ out1[26] = x51
+ out1[27] = x52
+}
+
+// p224FromBytes deserializes a field element NOT in the Montgomery domain from bytes in little-endian order.
+//
+// Preconditions:
+//
+// 0 ≤ bytes_eval arg1 < m
+//
+// Postconditions:
+//
+// eval out1 mod m = bytes_eval arg1 mod m
+// 0 ≤ eval out1 < m
+//
+// Input Bounds:
+//
+// arg1: [[0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff]]
+//
+// Output Bounds:
+//
+// out1: [[0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffff]]
+func p224FromBytes(out1 *[4]uint64, arg1 *[28]uint8) {
+ x1 := (uint64(arg1[27]) << 24)
+ x2 := (uint64(arg1[26]) << 16)
+ x3 := (uint64(arg1[25]) << 8)
+ x4 := arg1[24]
+ x5 := (uint64(arg1[23]) << 56)
+ x6 := (uint64(arg1[22]) << 48)
+ x7 := (uint64(arg1[21]) << 40)
+ x8 := (uint64(arg1[20]) << 32)
+ x9 := (uint64(arg1[19]) << 24)
+ x10 := (uint64(arg1[18]) << 16)
+ x11 := (uint64(arg1[17]) << 8)
+ x12 := arg1[16]
+ x13 := (uint64(arg1[15]) << 56)
+ x14 := (uint64(arg1[14]) << 48)
+ x15 := (uint64(arg1[13]) << 40)
+ x16 := (uint64(arg1[12]) << 32)
+ x17 := (uint64(arg1[11]) << 24)
+ x18 := (uint64(arg1[10]) << 16)
+ x19 := (uint64(arg1[9]) << 8)
+ x20 := arg1[8]
+ x21 := (uint64(arg1[7]) << 56)
+ x22 := (uint64(arg1[6]) << 48)
+ x23 := (uint64(arg1[5]) << 40)
+ x24 := (uint64(arg1[4]) << 32)
+ x25 := (uint64(arg1[3]) << 24)
+ x26 := (uint64(arg1[2]) << 16)
+ x27 := (uint64(arg1[1]) << 8)
+ x28 := arg1[0]
+ x29 := (x27 + uint64(x28))
+ x30 := (x26 + x29)
+ x31 := (x25 + x30)
+ x32 := (x24 + x31)
+ x33 := (x23 + x32)
+ x34 := (x22 + x33)
+ x35 := (x21 + x34)
+ x36 := (x19 + uint64(x20))
+ x37 := (x18 + x36)
+ x38 := (x17 + x37)
+ x39 := (x16 + x38)
+ x40 := (x15 + x39)
+ x41 := (x14 + x40)
+ x42 := (x13 + x41)
+ x43 := (x11 + uint64(x12))
+ x44 := (x10 + x43)
+ x45 := (x9 + x44)
+ x46 := (x8 + x45)
+ x47 := (x7 + x46)
+ x48 := (x6 + x47)
+ x49 := (x5 + x48)
+ x50 := (x3 + uint64(x4))
+ x51 := (x2 + x50)
+ x52 := (x1 + x51)
+ out1[0] = x35
+ out1[1] = x42
+ out1[2] = x49
+ out1[3] = x52
+}
diff --git a/src/crypto/internal/nistec/fiat/p224_invert.go b/src/crypto/internal/nistec/fiat/p224_invert.go
new file mode 100644
index 0000000..3cf5286
--- /dev/null
+++ b/src/crypto/internal/nistec/fiat/p224_invert.go
@@ -0,0 +1,87 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Code generated by addchain. DO NOT EDIT.
+
+package fiat
+
+// Invert sets e = 1/x, and returns e.
+//
+// If x == 0, Invert returns e = 0.
+func (e *P224Element) Invert(x *P224Element) *P224Element {
+ // Inversion is implemented as exponentiation with exponent p − 2.
+ // The sequence of 11 multiplications and 223 squarings is derived from the
+ // following addition chain generated with github.com/mmcloughlin/addchain v0.4.0.
+ //
+ // _10 = 2*1
+ // _11 = 1 + _10
+ // _110 = 2*_11
+ // _111 = 1 + _110
+ // _111000 = _111 << 3
+ // _111111 = _111 + _111000
+ // x12 = _111111 << 6 + _111111
+ // x14 = x12 << 2 + _11
+ // x17 = x14 << 3 + _111
+ // x31 = x17 << 14 + x14
+ // x48 = x31 << 17 + x17
+ // x96 = x48 << 48 + x48
+ // x127 = x96 << 31 + x31
+ // return x127 << 97 + x96
+ //
+
+ var z = new(P224Element).Set(e)
+ var t0 = new(P224Element)
+ var t1 = new(P224Element)
+ var t2 = new(P224Element)
+
+ z.Square(x)
+ t0.Mul(x, z)
+ z.Square(t0)
+ z.Mul(x, z)
+ t1.Square(z)
+ for s := 1; s < 3; s++ {
+ t1.Square(t1)
+ }
+ t1.Mul(z, t1)
+ t2.Square(t1)
+ for s := 1; s < 6; s++ {
+ t2.Square(t2)
+ }
+ t1.Mul(t1, t2)
+ for s := 0; s < 2; s++ {
+ t1.Square(t1)
+ }
+ t0.Mul(t0, t1)
+ t1.Square(t0)
+ for s := 1; s < 3; s++ {
+ t1.Square(t1)
+ }
+ z.Mul(z, t1)
+ t1.Square(z)
+ for s := 1; s < 14; s++ {
+ t1.Square(t1)
+ }
+ t0.Mul(t0, t1)
+ t1.Square(t0)
+ for s := 1; s < 17; s++ {
+ t1.Square(t1)
+ }
+ z.Mul(z, t1)
+ t1.Square(z)
+ for s := 1; s < 48; s++ {
+ t1.Square(t1)
+ }
+ z.Mul(z, t1)
+ t1.Square(z)
+ for s := 1; s < 31; s++ {
+ t1.Square(t1)
+ }
+ t0.Mul(t0, t1)
+ for s := 0; s < 97; s++ {
+ t0.Square(t0)
+ }
+ z.Mul(z, t0)
+
+ return e.Set(z)
+}
diff --git a/src/crypto/internal/nistec/fiat/p256.go b/src/crypto/internal/nistec/fiat/p256.go
new file mode 100644
index 0000000..7705904
--- /dev/null
+++ b/src/crypto/internal/nistec/fiat/p256.go
@@ -0,0 +1,134 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Code generated by generate.go. DO NOT EDIT.
+
+package fiat
+
+import (
+ "crypto/subtle"
+ "errors"
+)
+
+// P256Element is an integer modulo 2^256 - 2^224 + 2^192 + 2^96 - 1.
+//
+// The zero value is a valid zero element.
+type P256Element struct {
+ // Values are represented internally always in the Montgomery domain, and
+ // converted in Bytes and SetBytes.
+ x p256MontgomeryDomainFieldElement
+}
+
+const p256ElementLen = 32
+
+type p256UntypedFieldElement = [4]uint64
+
+// One sets e = 1, and returns e.
+func (e *P256Element) One() *P256Element {
+ p256SetOne(&e.x)
+ return e
+}
+
+// Equal returns 1 if e == t, and zero otherwise.
+func (e *P256Element) Equal(t *P256Element) int {
+ eBytes := e.Bytes()
+ tBytes := t.Bytes()
+ return subtle.ConstantTimeCompare(eBytes, tBytes)
+}
+
+// IsZero returns 1 if e == 0, and zero otherwise.
+func (e *P256Element) IsZero() int {
+ zero := make([]byte, p256ElementLen)
+ eBytes := e.Bytes()
+ return subtle.ConstantTimeCompare(eBytes, zero)
+}
+
+// Set sets e = t, and returns e.
+func (e *P256Element) Set(t *P256Element) *P256Element {
+ e.x = t.x
+ return e
+}
+
+// Bytes returns the 32-byte big-endian encoding of e.
+func (e *P256Element) Bytes() []byte {
+ // This function is outlined to make the allocations inline in the caller
+ // rather than happen on the heap.
+ var out [p256ElementLen]byte
+ return e.bytes(&out)
+}
+
+func (e *P256Element) bytes(out *[p256ElementLen]byte) []byte {
+ var tmp p256NonMontgomeryDomainFieldElement
+ p256FromMontgomery(&tmp, &e.x)
+ p256ToBytes(out, (*p256UntypedFieldElement)(&tmp))
+ p256InvertEndianness(out[:])
+ return out[:]
+}
+
+// SetBytes sets e = v, where v is a big-endian 32-byte encoding, and returns e.
+// If v is not 32 bytes or it encodes a value higher than 2^256 - 2^224 + 2^192 + 2^96 - 1,
+// SetBytes returns nil and an error, and e is unchanged.
+func (e *P256Element) SetBytes(v []byte) (*P256Element, error) {
+ if len(v) != p256ElementLen {
+ return nil, errors.New("invalid P256Element encoding")
+ }
+
+ // Check for non-canonical encodings (p + k, 2p + k, etc.) by comparing to
+ // the encoding of -1 mod p, so p - 1, the highest canonical encoding.
+ var minusOneEncoding = new(P256Element).Sub(
+ new(P256Element), new(P256Element).One()).Bytes()
+ for i := range v {
+ if v[i] < minusOneEncoding[i] {
+ break
+ }
+ if v[i] > minusOneEncoding[i] {
+ return nil, errors.New("invalid P256Element encoding")
+ }
+ }
+
+ var in [p256ElementLen]byte
+ copy(in[:], v)
+ p256InvertEndianness(in[:])
+ var tmp p256NonMontgomeryDomainFieldElement
+ p256FromBytes((*p256UntypedFieldElement)(&tmp), &in)
+ p256ToMontgomery(&e.x, &tmp)
+ return e, nil
+}
+
+// Add sets e = t1 + t2, and returns e.
+func (e *P256Element) Add(t1, t2 *P256Element) *P256Element {
+ p256Add(&e.x, &t1.x, &t2.x)
+ return e
+}
+
+// Sub sets e = t1 - t2, and returns e.
+func (e *P256Element) Sub(t1, t2 *P256Element) *P256Element {
+ p256Sub(&e.x, &t1.x, &t2.x)
+ return e
+}
+
+// Mul sets e = t1 * t2, and returns e.
+func (e *P256Element) Mul(t1, t2 *P256Element) *P256Element {
+ p256Mul(&e.x, &t1.x, &t2.x)
+ return e
+}
+
+// Square sets e = t * t, and returns e.
+func (e *P256Element) Square(t *P256Element) *P256Element {
+ p256Square(&e.x, &t.x)
+ return e
+}
+
+// Select sets v to a if cond == 1, and to b if cond == 0.
+func (v *P256Element) Select(a, b *P256Element, cond int) *P256Element {
+ p256Selectznz((*p256UntypedFieldElement)(&v.x), p256Uint1(cond),
+ (*p256UntypedFieldElement)(&b.x), (*p256UntypedFieldElement)(&a.x))
+ return v
+}
+
+func p256InvertEndianness(v []byte) {
+ for i := 0; i < len(v)/2; i++ {
+ v[i], v[len(v)-1-i] = v[len(v)-1-i], v[i]
+ }
+}
diff --git a/src/crypto/internal/nistec/fiat/p256_fiat64.go b/src/crypto/internal/nistec/fiat/p256_fiat64.go
new file mode 100644
index 0000000..75352d5
--- /dev/null
+++ b/src/crypto/internal/nistec/fiat/p256_fiat64.go
@@ -0,0 +1,1400 @@
+// Code generated by Fiat Cryptography. DO NOT EDIT.
+//
+// Autogenerated: word_by_word_montgomery --lang Go --no-wide-int --cmovznz-by-mul --relax-primitive-carry-to-bitwidth 32,64 --internal-static --public-function-case camelCase --public-type-case camelCase --private-function-case camelCase --private-type-case camelCase --doc-text-before-function-name '' --doc-newline-before-package-declaration --doc-prepend-header 'Code generated by Fiat Cryptography. DO NOT EDIT.' --package-name fiat --no-prefix-fiat p256 64 '2^256 - 2^224 + 2^192 + 2^96 - 1' mul square add sub one from_montgomery to_montgomery selectznz to_bytes from_bytes
+//
+// curve description: p256
+//
+// machine_wordsize = 64 (from "64")
+//
+// requested operations: mul, square, add, sub, one, from_montgomery, to_montgomery, selectznz, to_bytes, from_bytes
+//
+// m = 0xffffffff00000001000000000000000000000000ffffffffffffffffffffffff (from "2^256 - 2^224 + 2^192 + 2^96 - 1")
+//
+//
+//
+// NOTE: In addition to the bounds specified above each function, all
+//
+// functions synthesized for this Montgomery arithmetic require the
+//
+// input to be strictly less than the prime modulus (m), and also
+//
+// require the input to be in the unique saturated representation.
+//
+// All functions also ensure that these two properties are true of
+//
+// return values.
+//
+//
+//
+// Computed values:
+//
+// eval z = z[0] + (z[1] << 64) + (z[2] << 128) + (z[3] << 192)
+//
+// bytes_eval z = z[0] + (z[1] << 8) + (z[2] << 16) + (z[3] << 24) + (z[4] << 32) + (z[5] << 40) + (z[6] << 48) + (z[7] << 56) + (z[8] << 64) + (z[9] << 72) + (z[10] << 80) + (z[11] << 88) + (z[12] << 96) + (z[13] << 104) + (z[14] << 112) + (z[15] << 120) + (z[16] << 128) + (z[17] << 136) + (z[18] << 144) + (z[19] << 152) + (z[20] << 160) + (z[21] << 168) + (z[22] << 176) + (z[23] << 184) + (z[24] << 192) + (z[25] << 200) + (z[26] << 208) + (z[27] << 216) + (z[28] << 224) + (z[29] << 232) + (z[30] << 240) + (z[31] << 248)
+//
+// twos_complement_eval z = let x1 := z[0] + (z[1] << 64) + (z[2] << 128) + (z[3] << 192) in
+//
+// if x1 & (2^256-1) < 2^255 then x1 & (2^256-1) else (x1 & (2^256-1)) - 2^256
+
+package fiat
+
+import "math/bits"
+
+type p256Uint1 uint64 // We use uint64 instead of a more narrow type for performance reasons; see https://github.com/mit-plv/fiat-crypto/pull/1006#issuecomment-892625927
+type p256Int1 int64 // We use uint64 instead of a more narrow type for performance reasons; see https://github.com/mit-plv/fiat-crypto/pull/1006#issuecomment-892625927
+
+// The type p256MontgomeryDomainFieldElement is a field element in the Montgomery domain.
+//
+// Bounds: [[0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff]]
+type p256MontgomeryDomainFieldElement [4]uint64
+
+// The type p256NonMontgomeryDomainFieldElement is a field element NOT in the Montgomery domain.
+//
+// Bounds: [[0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff]]
+type p256NonMontgomeryDomainFieldElement [4]uint64
+
+// p256CmovznzU64 is a single-word conditional move.
+//
+// Postconditions:
+//
+// out1 = (if arg1 = 0 then arg2 else arg3)
+//
+// Input Bounds:
+//
+// arg1: [0x0 ~> 0x1]
+// arg2: [0x0 ~> 0xffffffffffffffff]
+// arg3: [0x0 ~> 0xffffffffffffffff]
+//
+// Output Bounds:
+//
+// out1: [0x0 ~> 0xffffffffffffffff]
+func p256CmovznzU64(out1 *uint64, arg1 p256Uint1, arg2 uint64, arg3 uint64) {
+ x1 := (uint64(arg1) * 0xffffffffffffffff)
+ x2 := ((x1 & arg3) | ((^x1) & arg2))
+ *out1 = x2
+}
+
+// p256Mul multiplies two field elements in the Montgomery domain.
+//
+// Preconditions:
+//
+// 0 ≤ eval arg1 < m
+// 0 ≤ eval arg2 < m
+//
+// Postconditions:
+//
+// eval (from_montgomery out1) mod m = (eval (from_montgomery arg1) * eval (from_montgomery arg2)) mod m
+// 0 ≤ eval out1 < m
+func p256Mul(out1 *p256MontgomeryDomainFieldElement, arg1 *p256MontgomeryDomainFieldElement, arg2 *p256MontgomeryDomainFieldElement) {
+ x1 := arg1[1]
+ x2 := arg1[2]
+ x3 := arg1[3]
+ x4 := arg1[0]
+ var x5 uint64
+ var x6 uint64
+ x6, x5 = bits.Mul64(x4, arg2[3])
+ var x7 uint64
+ var x8 uint64
+ x8, x7 = bits.Mul64(x4, arg2[2])
+ var x9 uint64
+ var x10 uint64
+ x10, x9 = bits.Mul64(x4, arg2[1])
+ var x11 uint64
+ var x12 uint64
+ x12, x11 = bits.Mul64(x4, arg2[0])
+ var x13 uint64
+ var x14 uint64
+ x13, x14 = bits.Add64(x12, x9, uint64(0x0))
+ var x15 uint64
+ var x16 uint64
+ x15, x16 = bits.Add64(x10, x7, uint64(p256Uint1(x14)))
+ var x17 uint64
+ var x18 uint64
+ x17, x18 = bits.Add64(x8, x5, uint64(p256Uint1(x16)))
+ x19 := (uint64(p256Uint1(x18)) + x6)
+ var x20 uint64
+ var x21 uint64
+ x21, x20 = bits.Mul64(x11, 0xffffffff00000001)
+ var x22 uint64
+ var x23 uint64
+ x23, x22 = bits.Mul64(x11, 0xffffffff)
+ var x24 uint64
+ var x25 uint64
+ x25, x24 = bits.Mul64(x11, 0xffffffffffffffff)
+ var x26 uint64
+ var x27 uint64
+ x26, x27 = bits.Add64(x25, x22, uint64(0x0))
+ x28 := (uint64(p256Uint1(x27)) + x23)
+ var x30 uint64
+ _, x30 = bits.Add64(x11, x24, uint64(0x0))
+ var x31 uint64
+ var x32 uint64
+ x31, x32 = bits.Add64(x13, x26, uint64(p256Uint1(x30)))
+ var x33 uint64
+ var x34 uint64
+ x33, x34 = bits.Add64(x15, x28, uint64(p256Uint1(x32)))
+ var x35 uint64
+ var x36 uint64
+ x35, x36 = bits.Add64(x17, x20, uint64(p256Uint1(x34)))
+ var x37 uint64
+ var x38 uint64
+ x37, x38 = bits.Add64(x19, x21, uint64(p256Uint1(x36)))
+ var x39 uint64
+ var x40 uint64
+ x40, x39 = bits.Mul64(x1, arg2[3])
+ var x41 uint64
+ var x42 uint64
+ x42, x41 = bits.Mul64(x1, arg2[2])
+ var x43 uint64
+ var x44 uint64
+ x44, x43 = bits.Mul64(x1, arg2[1])
+ var x45 uint64
+ var x46 uint64
+ x46, x45 = bits.Mul64(x1, arg2[0])
+ var x47 uint64
+ var x48 uint64
+ x47, x48 = bits.Add64(x46, x43, uint64(0x0))
+ var x49 uint64
+ var x50 uint64
+ x49, x50 = bits.Add64(x44, x41, uint64(p256Uint1(x48)))
+ var x51 uint64
+ var x52 uint64
+ x51, x52 = bits.Add64(x42, x39, uint64(p256Uint1(x50)))
+ x53 := (uint64(p256Uint1(x52)) + x40)
+ var x54 uint64
+ var x55 uint64
+ x54, x55 = bits.Add64(x31, x45, uint64(0x0))
+ var x56 uint64
+ var x57 uint64
+ x56, x57 = bits.Add64(x33, x47, uint64(p256Uint1(x55)))
+ var x58 uint64
+ var x59 uint64
+ x58, x59 = bits.Add64(x35, x49, uint64(p256Uint1(x57)))
+ var x60 uint64
+ var x61 uint64
+ x60, x61 = bits.Add64(x37, x51, uint64(p256Uint1(x59)))
+ var x62 uint64
+ var x63 uint64
+ x62, x63 = bits.Add64(uint64(p256Uint1(x38)), x53, uint64(p256Uint1(x61)))
+ var x64 uint64
+ var x65 uint64
+ x65, x64 = bits.Mul64(x54, 0xffffffff00000001)
+ var x66 uint64
+ var x67 uint64
+ x67, x66 = bits.Mul64(x54, 0xffffffff)
+ var x68 uint64
+ var x69 uint64
+ x69, x68 = bits.Mul64(x54, 0xffffffffffffffff)
+ var x70 uint64
+ var x71 uint64
+ x70, x71 = bits.Add64(x69, x66, uint64(0x0))
+ x72 := (uint64(p256Uint1(x71)) + x67)
+ var x74 uint64
+ _, x74 = bits.Add64(x54, x68, uint64(0x0))
+ var x75 uint64
+ var x76 uint64
+ x75, x76 = bits.Add64(x56, x70, uint64(p256Uint1(x74)))
+ var x77 uint64
+ var x78 uint64
+ x77, x78 = bits.Add64(x58, x72, uint64(p256Uint1(x76)))
+ var x79 uint64
+ var x80 uint64
+ x79, x80 = bits.Add64(x60, x64, uint64(p256Uint1(x78)))
+ var x81 uint64
+ var x82 uint64
+ x81, x82 = bits.Add64(x62, x65, uint64(p256Uint1(x80)))
+ x83 := (uint64(p256Uint1(x82)) + uint64(p256Uint1(x63)))
+ var x84 uint64
+ var x85 uint64
+ x85, x84 = bits.Mul64(x2, arg2[3])
+ var x86 uint64
+ var x87 uint64
+ x87, x86 = bits.Mul64(x2, arg2[2])
+ var x88 uint64
+ var x89 uint64
+ x89, x88 = bits.Mul64(x2, arg2[1])
+ var x90 uint64
+ var x91 uint64
+ x91, x90 = bits.Mul64(x2, arg2[0])
+ var x92 uint64
+ var x93 uint64
+ x92, x93 = bits.Add64(x91, x88, uint64(0x0))
+ var x94 uint64
+ var x95 uint64
+ x94, x95 = bits.Add64(x89, x86, uint64(p256Uint1(x93)))
+ var x96 uint64
+ var x97 uint64
+ x96, x97 = bits.Add64(x87, x84, uint64(p256Uint1(x95)))
+ x98 := (uint64(p256Uint1(x97)) + x85)
+ var x99 uint64
+ var x100 uint64
+ x99, x100 = bits.Add64(x75, x90, uint64(0x0))
+ var x101 uint64
+ var x102 uint64
+ x101, x102 = bits.Add64(x77, x92, uint64(p256Uint1(x100)))
+ var x103 uint64
+ var x104 uint64
+ x103, x104 = bits.Add64(x79, x94, uint64(p256Uint1(x102)))
+ var x105 uint64
+ var x106 uint64
+ x105, x106 = bits.Add64(x81, x96, uint64(p256Uint1(x104)))
+ var x107 uint64
+ var x108 uint64
+ x107, x108 = bits.Add64(x83, x98, uint64(p256Uint1(x106)))
+ var x109 uint64
+ var x110 uint64
+ x110, x109 = bits.Mul64(x99, 0xffffffff00000001)
+ var x111 uint64
+ var x112 uint64
+ x112, x111 = bits.Mul64(x99, 0xffffffff)
+ var x113 uint64
+ var x114 uint64
+ x114, x113 = bits.Mul64(x99, 0xffffffffffffffff)
+ var x115 uint64
+ var x116 uint64
+ x115, x116 = bits.Add64(x114, x111, uint64(0x0))
+ x117 := (uint64(p256Uint1(x116)) + x112)
+ var x119 uint64
+ _, x119 = bits.Add64(x99, x113, uint64(0x0))
+ var x120 uint64
+ var x121 uint64
+ x120, x121 = bits.Add64(x101, x115, uint64(p256Uint1(x119)))
+ var x122 uint64
+ var x123 uint64
+ x122, x123 = bits.Add64(x103, x117, uint64(p256Uint1(x121)))
+ var x124 uint64
+ var x125 uint64
+ x124, x125 = bits.Add64(x105, x109, uint64(p256Uint1(x123)))
+ var x126 uint64
+ var x127 uint64
+ x126, x127 = bits.Add64(x107, x110, uint64(p256Uint1(x125)))
+ x128 := (uint64(p256Uint1(x127)) + uint64(p256Uint1(x108)))
+ var x129 uint64
+ var x130 uint64
+ x130, x129 = bits.Mul64(x3, arg2[3])
+ var x131 uint64
+ var x132 uint64
+ x132, x131 = bits.Mul64(x3, arg2[2])
+ var x133 uint64
+ var x134 uint64
+ x134, x133 = bits.Mul64(x3, arg2[1])
+ var x135 uint64
+ var x136 uint64
+ x136, x135 = bits.Mul64(x3, arg2[0])
+ var x137 uint64
+ var x138 uint64
+ x137, x138 = bits.Add64(x136, x133, uint64(0x0))
+ var x139 uint64
+ var x140 uint64
+ x139, x140 = bits.Add64(x134, x131, uint64(p256Uint1(x138)))
+ var x141 uint64
+ var x142 uint64
+ x141, x142 = bits.Add64(x132, x129, uint64(p256Uint1(x140)))
+ x143 := (uint64(p256Uint1(x142)) + x130)
+ var x144 uint64
+ var x145 uint64
+ x144, x145 = bits.Add64(x120, x135, uint64(0x0))
+ var x146 uint64
+ var x147 uint64
+ x146, x147 = bits.Add64(x122, x137, uint64(p256Uint1(x145)))
+ var x148 uint64
+ var x149 uint64
+ x148, x149 = bits.Add64(x124, x139, uint64(p256Uint1(x147)))
+ var x150 uint64
+ var x151 uint64
+ x150, x151 = bits.Add64(x126, x141, uint64(p256Uint1(x149)))
+ var x152 uint64
+ var x153 uint64
+ x152, x153 = bits.Add64(x128, x143, uint64(p256Uint1(x151)))
+ var x154 uint64
+ var x155 uint64
+ x155, x154 = bits.Mul64(x144, 0xffffffff00000001)
+ var x156 uint64
+ var x157 uint64
+ x157, x156 = bits.Mul64(x144, 0xffffffff)
+ var x158 uint64
+ var x159 uint64
+ x159, x158 = bits.Mul64(x144, 0xffffffffffffffff)
+ var x160 uint64
+ var x161 uint64
+ x160, x161 = bits.Add64(x159, x156, uint64(0x0))
+ x162 := (uint64(p256Uint1(x161)) + x157)
+ var x164 uint64
+ _, x164 = bits.Add64(x144, x158, uint64(0x0))
+ var x165 uint64
+ var x166 uint64
+ x165, x166 = bits.Add64(x146, x160, uint64(p256Uint1(x164)))
+ var x167 uint64
+ var x168 uint64
+ x167, x168 = bits.Add64(x148, x162, uint64(p256Uint1(x166)))
+ var x169 uint64
+ var x170 uint64
+ x169, x170 = bits.Add64(x150, x154, uint64(p256Uint1(x168)))
+ var x171 uint64
+ var x172 uint64
+ x171, x172 = bits.Add64(x152, x155, uint64(p256Uint1(x170)))
+ x173 := (uint64(p256Uint1(x172)) + uint64(p256Uint1(x153)))
+ var x174 uint64
+ var x175 uint64
+ x174, x175 = bits.Sub64(x165, 0xffffffffffffffff, uint64(0x0))
+ var x176 uint64
+ var x177 uint64
+ x176, x177 = bits.Sub64(x167, 0xffffffff, uint64(p256Uint1(x175)))
+ var x178 uint64
+ var x179 uint64
+ x178, x179 = bits.Sub64(x169, uint64(0x0), uint64(p256Uint1(x177)))
+ var x180 uint64
+ var x181 uint64
+ x180, x181 = bits.Sub64(x171, 0xffffffff00000001, uint64(p256Uint1(x179)))
+ var x183 uint64
+ _, x183 = bits.Sub64(x173, uint64(0x0), uint64(p256Uint1(x181)))
+ var x184 uint64
+ p256CmovznzU64(&x184, p256Uint1(x183), x174, x165)
+ var x185 uint64
+ p256CmovznzU64(&x185, p256Uint1(x183), x176, x167)
+ var x186 uint64
+ p256CmovznzU64(&x186, p256Uint1(x183), x178, x169)
+ var x187 uint64
+ p256CmovznzU64(&x187, p256Uint1(x183), x180, x171)
+ out1[0] = x184
+ out1[1] = x185
+ out1[2] = x186
+ out1[3] = x187
+}
+
+// p256Square squares a field element in the Montgomery domain.
+//
+// Preconditions:
+//
+// 0 ≤ eval arg1 < m
+//
+// Postconditions:
+//
+// eval (from_montgomery out1) mod m = (eval (from_montgomery arg1) * eval (from_montgomery arg1)) mod m
+// 0 ≤ eval out1 < m
+func p256Square(out1 *p256MontgomeryDomainFieldElement, arg1 *p256MontgomeryDomainFieldElement) {
+ x1 := arg1[1]
+ x2 := arg1[2]
+ x3 := arg1[3]
+ x4 := arg1[0]
+ var x5 uint64
+ var x6 uint64
+ x6, x5 = bits.Mul64(x4, arg1[3])
+ var x7 uint64
+ var x8 uint64
+ x8, x7 = bits.Mul64(x4, arg1[2])
+ var x9 uint64
+ var x10 uint64
+ x10, x9 = bits.Mul64(x4, arg1[1])
+ var x11 uint64
+ var x12 uint64
+ x12, x11 = bits.Mul64(x4, arg1[0])
+ var x13 uint64
+ var x14 uint64
+ x13, x14 = bits.Add64(x12, x9, uint64(0x0))
+ var x15 uint64
+ var x16 uint64
+ x15, x16 = bits.Add64(x10, x7, uint64(p256Uint1(x14)))
+ var x17 uint64
+ var x18 uint64
+ x17, x18 = bits.Add64(x8, x5, uint64(p256Uint1(x16)))
+ x19 := (uint64(p256Uint1(x18)) + x6)
+ var x20 uint64
+ var x21 uint64
+ x21, x20 = bits.Mul64(x11, 0xffffffff00000001)
+ var x22 uint64
+ var x23 uint64
+ x23, x22 = bits.Mul64(x11, 0xffffffff)
+ var x24 uint64
+ var x25 uint64
+ x25, x24 = bits.Mul64(x11, 0xffffffffffffffff)
+ var x26 uint64
+ var x27 uint64
+ x26, x27 = bits.Add64(x25, x22, uint64(0x0))
+ x28 := (uint64(p256Uint1(x27)) + x23)
+ var x30 uint64
+ _, x30 = bits.Add64(x11, x24, uint64(0x0))
+ var x31 uint64
+ var x32 uint64
+ x31, x32 = bits.Add64(x13, x26, uint64(p256Uint1(x30)))
+ var x33 uint64
+ var x34 uint64
+ x33, x34 = bits.Add64(x15, x28, uint64(p256Uint1(x32)))
+ var x35 uint64
+ var x36 uint64
+ x35, x36 = bits.Add64(x17, x20, uint64(p256Uint1(x34)))
+ var x37 uint64
+ var x38 uint64
+ x37, x38 = bits.Add64(x19, x21, uint64(p256Uint1(x36)))
+ var x39 uint64
+ var x40 uint64
+ x40, x39 = bits.Mul64(x1, arg1[3])
+ var x41 uint64
+ var x42 uint64
+ x42, x41 = bits.Mul64(x1, arg1[2])
+ var x43 uint64
+ var x44 uint64
+ x44, x43 = bits.Mul64(x1, arg1[1])
+ var x45 uint64
+ var x46 uint64
+ x46, x45 = bits.Mul64(x1, arg1[0])
+ var x47 uint64
+ var x48 uint64
+ x47, x48 = bits.Add64(x46, x43, uint64(0x0))
+ var x49 uint64
+ var x50 uint64
+ x49, x50 = bits.Add64(x44, x41, uint64(p256Uint1(x48)))
+ var x51 uint64
+ var x52 uint64
+ x51, x52 = bits.Add64(x42, x39, uint64(p256Uint1(x50)))
+ x53 := (uint64(p256Uint1(x52)) + x40)
+ var x54 uint64
+ var x55 uint64
+ x54, x55 = bits.Add64(x31, x45, uint64(0x0))
+ var x56 uint64
+ var x57 uint64
+ x56, x57 = bits.Add64(x33, x47, uint64(p256Uint1(x55)))
+ var x58 uint64
+ var x59 uint64
+ x58, x59 = bits.Add64(x35, x49, uint64(p256Uint1(x57)))
+ var x60 uint64
+ var x61 uint64
+ x60, x61 = bits.Add64(x37, x51, uint64(p256Uint1(x59)))
+ var x62 uint64
+ var x63 uint64
+ x62, x63 = bits.Add64(uint64(p256Uint1(x38)), x53, uint64(p256Uint1(x61)))
+ var x64 uint64
+ var x65 uint64
+ x65, x64 = bits.Mul64(x54, 0xffffffff00000001)
+ var x66 uint64
+ var x67 uint64
+ x67, x66 = bits.Mul64(x54, 0xffffffff)
+ var x68 uint64
+ var x69 uint64
+ x69, x68 = bits.Mul64(x54, 0xffffffffffffffff)
+ var x70 uint64
+ var x71 uint64
+ x70, x71 = bits.Add64(x69, x66, uint64(0x0))
+ x72 := (uint64(p256Uint1(x71)) + x67)
+ var x74 uint64
+ _, x74 = bits.Add64(x54, x68, uint64(0x0))
+ var x75 uint64
+ var x76 uint64
+ x75, x76 = bits.Add64(x56, x70, uint64(p256Uint1(x74)))
+ var x77 uint64
+ var x78 uint64
+ x77, x78 = bits.Add64(x58, x72, uint64(p256Uint1(x76)))
+ var x79 uint64
+ var x80 uint64
+ x79, x80 = bits.Add64(x60, x64, uint64(p256Uint1(x78)))
+ var x81 uint64
+ var x82 uint64
+ x81, x82 = bits.Add64(x62, x65, uint64(p256Uint1(x80)))
+ x83 := (uint64(p256Uint1(x82)) + uint64(p256Uint1(x63)))
+ var x84 uint64
+ var x85 uint64
+ x85, x84 = bits.Mul64(x2, arg1[3])
+ var x86 uint64
+ var x87 uint64
+ x87, x86 = bits.Mul64(x2, arg1[2])
+ var x88 uint64
+ var x89 uint64
+ x89, x88 = bits.Mul64(x2, arg1[1])
+ var x90 uint64
+ var x91 uint64
+ x91, x90 = bits.Mul64(x2, arg1[0])
+ var x92 uint64
+ var x93 uint64
+ x92, x93 = bits.Add64(x91, x88, uint64(0x0))
+ var x94 uint64
+ var x95 uint64
+ x94, x95 = bits.Add64(x89, x86, uint64(p256Uint1(x93)))
+ var x96 uint64
+ var x97 uint64
+ x96, x97 = bits.Add64(x87, x84, uint64(p256Uint1(x95)))
+ x98 := (uint64(p256Uint1(x97)) + x85)
+ var x99 uint64
+ var x100 uint64
+ x99, x100 = bits.Add64(x75, x90, uint64(0x0))
+ var x101 uint64
+ var x102 uint64
+ x101, x102 = bits.Add64(x77, x92, uint64(p256Uint1(x100)))
+ var x103 uint64
+ var x104 uint64
+ x103, x104 = bits.Add64(x79, x94, uint64(p256Uint1(x102)))
+ var x105 uint64
+ var x106 uint64
+ x105, x106 = bits.Add64(x81, x96, uint64(p256Uint1(x104)))
+ var x107 uint64
+ var x108 uint64
+ x107, x108 = bits.Add64(x83, x98, uint64(p256Uint1(x106)))
+ var x109 uint64
+ var x110 uint64
+ x110, x109 = bits.Mul64(x99, 0xffffffff00000001)
+ var x111 uint64
+ var x112 uint64
+ x112, x111 = bits.Mul64(x99, 0xffffffff)
+ var x113 uint64
+ var x114 uint64
+ x114, x113 = bits.Mul64(x99, 0xffffffffffffffff)
+ var x115 uint64
+ var x116 uint64
+ x115, x116 = bits.Add64(x114, x111, uint64(0x0))
+ x117 := (uint64(p256Uint1(x116)) + x112)
+ var x119 uint64
+ _, x119 = bits.Add64(x99, x113, uint64(0x0))
+ var x120 uint64
+ var x121 uint64
+ x120, x121 = bits.Add64(x101, x115, uint64(p256Uint1(x119)))
+ var x122 uint64
+ var x123 uint64
+ x122, x123 = bits.Add64(x103, x117, uint64(p256Uint1(x121)))
+ var x124 uint64
+ var x125 uint64
+ x124, x125 = bits.Add64(x105, x109, uint64(p256Uint1(x123)))
+ var x126 uint64
+ var x127 uint64
+ x126, x127 = bits.Add64(x107, x110, uint64(p256Uint1(x125)))
+ x128 := (uint64(p256Uint1(x127)) + uint64(p256Uint1(x108)))
+ var x129 uint64
+ var x130 uint64
+ x130, x129 = bits.Mul64(x3, arg1[3])
+ var x131 uint64
+ var x132 uint64
+ x132, x131 = bits.Mul64(x3, arg1[2])
+ var x133 uint64
+ var x134 uint64
+ x134, x133 = bits.Mul64(x3, arg1[1])
+ var x135 uint64
+ var x136 uint64
+ x136, x135 = bits.Mul64(x3, arg1[0])
+ var x137 uint64
+ var x138 uint64
+ x137, x138 = bits.Add64(x136, x133, uint64(0x0))
+ var x139 uint64
+ var x140 uint64
+ x139, x140 = bits.Add64(x134, x131, uint64(p256Uint1(x138)))
+ var x141 uint64
+ var x142 uint64
+ x141, x142 = bits.Add64(x132, x129, uint64(p256Uint1(x140)))
+ x143 := (uint64(p256Uint1(x142)) + x130)
+ var x144 uint64
+ var x145 uint64
+ x144, x145 = bits.Add64(x120, x135, uint64(0x0))
+ var x146 uint64
+ var x147 uint64
+ x146, x147 = bits.Add64(x122, x137, uint64(p256Uint1(x145)))
+ var x148 uint64
+ var x149 uint64
+ x148, x149 = bits.Add64(x124, x139, uint64(p256Uint1(x147)))
+ var x150 uint64
+ var x151 uint64
+ x150, x151 = bits.Add64(x126, x141, uint64(p256Uint1(x149)))
+ var x152 uint64
+ var x153 uint64
+ x152, x153 = bits.Add64(x128, x143, uint64(p256Uint1(x151)))
+ var x154 uint64
+ var x155 uint64
+ x155, x154 = bits.Mul64(x144, 0xffffffff00000001)
+ var x156 uint64
+ var x157 uint64
+ x157, x156 = bits.Mul64(x144, 0xffffffff)
+ var x158 uint64
+ var x159 uint64
+ x159, x158 = bits.Mul64(x144, 0xffffffffffffffff)
+ var x160 uint64
+ var x161 uint64
+ x160, x161 = bits.Add64(x159, x156, uint64(0x0))
+ x162 := (uint64(p256Uint1(x161)) + x157)
+ var x164 uint64
+ _, x164 = bits.Add64(x144, x158, uint64(0x0))
+ var x165 uint64
+ var x166 uint64
+ x165, x166 = bits.Add64(x146, x160, uint64(p256Uint1(x164)))
+ var x167 uint64
+ var x168 uint64
+ x167, x168 = bits.Add64(x148, x162, uint64(p256Uint1(x166)))
+ var x169 uint64
+ var x170 uint64
+ x169, x170 = bits.Add64(x150, x154, uint64(p256Uint1(x168)))
+ var x171 uint64
+ var x172 uint64
+ x171, x172 = bits.Add64(x152, x155, uint64(p256Uint1(x170)))
+ x173 := (uint64(p256Uint1(x172)) + uint64(p256Uint1(x153)))
+ var x174 uint64
+ var x175 uint64
+ x174, x175 = bits.Sub64(x165, 0xffffffffffffffff, uint64(0x0))
+ var x176 uint64
+ var x177 uint64
+ x176, x177 = bits.Sub64(x167, 0xffffffff, uint64(p256Uint1(x175)))
+ var x178 uint64
+ var x179 uint64
+ x178, x179 = bits.Sub64(x169, uint64(0x0), uint64(p256Uint1(x177)))
+ var x180 uint64
+ var x181 uint64
+ x180, x181 = bits.Sub64(x171, 0xffffffff00000001, uint64(p256Uint1(x179)))
+ var x183 uint64
+ _, x183 = bits.Sub64(x173, uint64(0x0), uint64(p256Uint1(x181)))
+ var x184 uint64
+ p256CmovznzU64(&x184, p256Uint1(x183), x174, x165)
+ var x185 uint64
+ p256CmovznzU64(&x185, p256Uint1(x183), x176, x167)
+ var x186 uint64
+ p256CmovznzU64(&x186, p256Uint1(x183), x178, x169)
+ var x187 uint64
+ p256CmovznzU64(&x187, p256Uint1(x183), x180, x171)
+ out1[0] = x184
+ out1[1] = x185
+ out1[2] = x186
+ out1[3] = x187
+}
+
+// p256Add adds two field elements in the Montgomery domain.
+//
+// Preconditions:
+//
+// 0 ≤ eval arg1 < m
+// 0 ≤ eval arg2 < m
+//
+// Postconditions:
+//
+// eval (from_montgomery out1) mod m = (eval (from_montgomery arg1) + eval (from_montgomery arg2)) mod m
+// 0 ≤ eval out1 < m
+func p256Add(out1 *p256MontgomeryDomainFieldElement, arg1 *p256MontgomeryDomainFieldElement, arg2 *p256MontgomeryDomainFieldElement) {
+ var x1 uint64
+ var x2 uint64
+ x1, x2 = bits.Add64(arg1[0], arg2[0], uint64(0x0))
+ var x3 uint64
+ var x4 uint64
+ x3, x4 = bits.Add64(arg1[1], arg2[1], uint64(p256Uint1(x2)))
+ var x5 uint64
+ var x6 uint64
+ x5, x6 = bits.Add64(arg1[2], arg2[2], uint64(p256Uint1(x4)))
+ var x7 uint64
+ var x8 uint64
+ x7, x8 = bits.Add64(arg1[3], arg2[3], uint64(p256Uint1(x6)))
+ var x9 uint64
+ var x10 uint64
+ x9, x10 = bits.Sub64(x1, 0xffffffffffffffff, uint64(0x0))
+ var x11 uint64
+ var x12 uint64
+ x11, x12 = bits.Sub64(x3, 0xffffffff, uint64(p256Uint1(x10)))
+ var x13 uint64
+ var x14 uint64
+ x13, x14 = bits.Sub64(x5, uint64(0x0), uint64(p256Uint1(x12)))
+ var x15 uint64
+ var x16 uint64
+ x15, x16 = bits.Sub64(x7, 0xffffffff00000001, uint64(p256Uint1(x14)))
+ var x18 uint64
+ _, x18 = bits.Sub64(uint64(p256Uint1(x8)), uint64(0x0), uint64(p256Uint1(x16)))
+ var x19 uint64
+ p256CmovznzU64(&x19, p256Uint1(x18), x9, x1)
+ var x20 uint64
+ p256CmovznzU64(&x20, p256Uint1(x18), x11, x3)
+ var x21 uint64
+ p256CmovznzU64(&x21, p256Uint1(x18), x13, x5)
+ var x22 uint64
+ p256CmovznzU64(&x22, p256Uint1(x18), x15, x7)
+ out1[0] = x19
+ out1[1] = x20
+ out1[2] = x21
+ out1[3] = x22
+}
+
+// p256Sub subtracts two field elements in the Montgomery domain.
+//
+// Preconditions:
+//
+// 0 ≤ eval arg1 < m
+// 0 ≤ eval arg2 < m
+//
+// Postconditions:
+//
+// eval (from_montgomery out1) mod m = (eval (from_montgomery arg1) - eval (from_montgomery arg2)) mod m
+// 0 ≤ eval out1 < m
+func p256Sub(out1 *p256MontgomeryDomainFieldElement, arg1 *p256MontgomeryDomainFieldElement, arg2 *p256MontgomeryDomainFieldElement) {
+ var x1 uint64
+ var x2 uint64
+ x1, x2 = bits.Sub64(arg1[0], arg2[0], uint64(0x0))
+ var x3 uint64
+ var x4 uint64
+ x3, x4 = bits.Sub64(arg1[1], arg2[1], uint64(p256Uint1(x2)))
+ var x5 uint64
+ var x6 uint64
+ x5, x6 = bits.Sub64(arg1[2], arg2[2], uint64(p256Uint1(x4)))
+ var x7 uint64
+ var x8 uint64
+ x7, x8 = bits.Sub64(arg1[3], arg2[3], uint64(p256Uint1(x6)))
+ var x9 uint64
+ p256CmovznzU64(&x9, p256Uint1(x8), uint64(0x0), 0xffffffffffffffff)
+ var x10 uint64
+ var x11 uint64
+ x10, x11 = bits.Add64(x1, x9, uint64(0x0))
+ var x12 uint64
+ var x13 uint64
+ x12, x13 = bits.Add64(x3, (x9 & 0xffffffff), uint64(p256Uint1(x11)))
+ var x14 uint64
+ var x15 uint64
+ x14, x15 = bits.Add64(x5, uint64(0x0), uint64(p256Uint1(x13)))
+ var x16 uint64
+ x16, _ = bits.Add64(x7, (x9 & 0xffffffff00000001), uint64(p256Uint1(x15)))
+ out1[0] = x10
+ out1[1] = x12
+ out1[2] = x14
+ out1[3] = x16
+}
+
+// p256SetOne returns the field element one in the Montgomery domain.
+//
+// Postconditions:
+//
+// eval (from_montgomery out1) mod m = 1 mod m
+// 0 ≤ eval out1 < m
+func p256SetOne(out1 *p256MontgomeryDomainFieldElement) {
+ out1[0] = uint64(0x1)
+ out1[1] = 0xffffffff00000000
+ out1[2] = 0xffffffffffffffff
+ out1[3] = 0xfffffffe
+}
+
+// p256FromMontgomery translates a field element out of the Montgomery domain.
+//
+// Preconditions:
+//
+// 0 ≤ eval arg1 < m
+//
+// Postconditions:
+//
+// eval out1 mod m = (eval arg1 * ((2^64)⁻¹ mod m)^4) mod m
+// 0 ≤ eval out1 < m
+func p256FromMontgomery(out1 *p256NonMontgomeryDomainFieldElement, arg1 *p256MontgomeryDomainFieldElement) {
+ x1 := arg1[0]
+ var x2 uint64
+ var x3 uint64
+ x3, x2 = bits.Mul64(x1, 0xffffffff00000001)
+ var x4 uint64
+ var x5 uint64
+ x5, x4 = bits.Mul64(x1, 0xffffffff)
+ var x6 uint64
+ var x7 uint64
+ x7, x6 = bits.Mul64(x1, 0xffffffffffffffff)
+ var x8 uint64
+ var x9 uint64
+ x8, x9 = bits.Add64(x7, x4, uint64(0x0))
+ var x11 uint64
+ _, x11 = bits.Add64(x1, x6, uint64(0x0))
+ var x12 uint64
+ var x13 uint64
+ x12, x13 = bits.Add64(uint64(0x0), x8, uint64(p256Uint1(x11)))
+ var x14 uint64
+ var x15 uint64
+ x14, x15 = bits.Add64(x12, arg1[1], uint64(0x0))
+ var x16 uint64
+ var x17 uint64
+ x17, x16 = bits.Mul64(x14, 0xffffffff00000001)
+ var x18 uint64
+ var x19 uint64
+ x19, x18 = bits.Mul64(x14, 0xffffffff)
+ var x20 uint64
+ var x21 uint64
+ x21, x20 = bits.Mul64(x14, 0xffffffffffffffff)
+ var x22 uint64
+ var x23 uint64
+ x22, x23 = bits.Add64(x21, x18, uint64(0x0))
+ var x25 uint64
+ _, x25 = bits.Add64(x14, x20, uint64(0x0))
+ var x26 uint64
+ var x27 uint64
+ x26, x27 = bits.Add64((uint64(p256Uint1(x15)) + (uint64(p256Uint1(x13)) + (uint64(p256Uint1(x9)) + x5))), x22, uint64(p256Uint1(x25)))
+ var x28 uint64
+ var x29 uint64
+ x28, x29 = bits.Add64(x2, (uint64(p256Uint1(x23)) + x19), uint64(p256Uint1(x27)))
+ var x30 uint64
+ var x31 uint64
+ x30, x31 = bits.Add64(x3, x16, uint64(p256Uint1(x29)))
+ var x32 uint64
+ var x33 uint64
+ x32, x33 = bits.Add64(x26, arg1[2], uint64(0x0))
+ var x34 uint64
+ var x35 uint64
+ x34, x35 = bits.Add64(x28, uint64(0x0), uint64(p256Uint1(x33)))
+ var x36 uint64
+ var x37 uint64
+ x36, x37 = bits.Add64(x30, uint64(0x0), uint64(p256Uint1(x35)))
+ var x38 uint64
+ var x39 uint64
+ x39, x38 = bits.Mul64(x32, 0xffffffff00000001)
+ var x40 uint64
+ var x41 uint64
+ x41, x40 = bits.Mul64(x32, 0xffffffff)
+ var x42 uint64
+ var x43 uint64
+ x43, x42 = bits.Mul64(x32, 0xffffffffffffffff)
+ var x44 uint64
+ var x45 uint64
+ x44, x45 = bits.Add64(x43, x40, uint64(0x0))
+ var x47 uint64
+ _, x47 = bits.Add64(x32, x42, uint64(0x0))
+ var x48 uint64
+ var x49 uint64
+ x48, x49 = bits.Add64(x34, x44, uint64(p256Uint1(x47)))
+ var x50 uint64
+ var x51 uint64
+ x50, x51 = bits.Add64(x36, (uint64(p256Uint1(x45)) + x41), uint64(p256Uint1(x49)))
+ var x52 uint64
+ var x53 uint64
+ x52, x53 = bits.Add64((uint64(p256Uint1(x37)) + (uint64(p256Uint1(x31)) + x17)), x38, uint64(p256Uint1(x51)))
+ var x54 uint64
+ var x55 uint64
+ x54, x55 = bits.Add64(x48, arg1[3], uint64(0x0))
+ var x56 uint64
+ var x57 uint64
+ x56, x57 = bits.Add64(x50, uint64(0x0), uint64(p256Uint1(x55)))
+ var x58 uint64
+ var x59 uint64
+ x58, x59 = bits.Add64(x52, uint64(0x0), uint64(p256Uint1(x57)))
+ var x60 uint64
+ var x61 uint64
+ x61, x60 = bits.Mul64(x54, 0xffffffff00000001)
+ var x62 uint64
+ var x63 uint64
+ x63, x62 = bits.Mul64(x54, 0xffffffff)
+ var x64 uint64
+ var x65 uint64
+ x65, x64 = bits.Mul64(x54, 0xffffffffffffffff)
+ var x66 uint64
+ var x67 uint64
+ x66, x67 = bits.Add64(x65, x62, uint64(0x0))
+ var x69 uint64
+ _, x69 = bits.Add64(x54, x64, uint64(0x0))
+ var x70 uint64
+ var x71 uint64
+ x70, x71 = bits.Add64(x56, x66, uint64(p256Uint1(x69)))
+ var x72 uint64
+ var x73 uint64
+ x72, x73 = bits.Add64(x58, (uint64(p256Uint1(x67)) + x63), uint64(p256Uint1(x71)))
+ var x74 uint64
+ var x75 uint64
+ x74, x75 = bits.Add64((uint64(p256Uint1(x59)) + (uint64(p256Uint1(x53)) + x39)), x60, uint64(p256Uint1(x73)))
+ x76 := (uint64(p256Uint1(x75)) + x61)
+ var x77 uint64
+ var x78 uint64
+ x77, x78 = bits.Sub64(x70, 0xffffffffffffffff, uint64(0x0))
+ var x79 uint64
+ var x80 uint64
+ x79, x80 = bits.Sub64(x72, 0xffffffff, uint64(p256Uint1(x78)))
+ var x81 uint64
+ var x82 uint64
+ x81, x82 = bits.Sub64(x74, uint64(0x0), uint64(p256Uint1(x80)))
+ var x83 uint64
+ var x84 uint64
+ x83, x84 = bits.Sub64(x76, 0xffffffff00000001, uint64(p256Uint1(x82)))
+ var x86 uint64
+ _, x86 = bits.Sub64(uint64(0x0), uint64(0x0), uint64(p256Uint1(x84)))
+ var x87 uint64
+ p256CmovznzU64(&x87, p256Uint1(x86), x77, x70)
+ var x88 uint64
+ p256CmovznzU64(&x88, p256Uint1(x86), x79, x72)
+ var x89 uint64
+ p256CmovznzU64(&x89, p256Uint1(x86), x81, x74)
+ var x90 uint64
+ p256CmovznzU64(&x90, p256Uint1(x86), x83, x76)
+ out1[0] = x87
+ out1[1] = x88
+ out1[2] = x89
+ out1[3] = x90
+}
+
+// p256ToMontgomery translates a field element into the Montgomery domain.
+//
+// Preconditions:
+//
+// 0 ≤ eval arg1 < m
+//
+// Postconditions:
+//
+// eval (from_montgomery out1) mod m = eval arg1 mod m
+// 0 ≤ eval out1 < m
+func p256ToMontgomery(out1 *p256MontgomeryDomainFieldElement, arg1 *p256NonMontgomeryDomainFieldElement) {
+ x1 := arg1[1]
+ x2 := arg1[2]
+ x3 := arg1[3]
+ x4 := arg1[0]
+ var x5 uint64
+ var x6 uint64
+ x6, x5 = bits.Mul64(x4, 0x4fffffffd)
+ var x7 uint64
+ var x8 uint64
+ x8, x7 = bits.Mul64(x4, 0xfffffffffffffffe)
+ var x9 uint64
+ var x10 uint64
+ x10, x9 = bits.Mul64(x4, 0xfffffffbffffffff)
+ var x11 uint64
+ var x12 uint64
+ x12, x11 = bits.Mul64(x4, 0x3)
+ var x13 uint64
+ var x14 uint64
+ x13, x14 = bits.Add64(x12, x9, uint64(0x0))
+ var x15 uint64
+ var x16 uint64
+ x15, x16 = bits.Add64(x10, x7, uint64(p256Uint1(x14)))
+ var x17 uint64
+ var x18 uint64
+ x17, x18 = bits.Add64(x8, x5, uint64(p256Uint1(x16)))
+ var x19 uint64
+ var x20 uint64
+ x20, x19 = bits.Mul64(x11, 0xffffffff00000001)
+ var x21 uint64
+ var x22 uint64
+ x22, x21 = bits.Mul64(x11, 0xffffffff)
+ var x23 uint64
+ var x24 uint64
+ x24, x23 = bits.Mul64(x11, 0xffffffffffffffff)
+ var x25 uint64
+ var x26 uint64
+ x25, x26 = bits.Add64(x24, x21, uint64(0x0))
+ var x28 uint64
+ _, x28 = bits.Add64(x11, x23, uint64(0x0))
+ var x29 uint64
+ var x30 uint64
+ x29, x30 = bits.Add64(x13, x25, uint64(p256Uint1(x28)))
+ var x31 uint64
+ var x32 uint64
+ x31, x32 = bits.Add64(x15, (uint64(p256Uint1(x26)) + x22), uint64(p256Uint1(x30)))
+ var x33 uint64
+ var x34 uint64
+ x33, x34 = bits.Add64(x17, x19, uint64(p256Uint1(x32)))
+ var x35 uint64
+ var x36 uint64
+ x35, x36 = bits.Add64((uint64(p256Uint1(x18)) + x6), x20, uint64(p256Uint1(x34)))
+ var x37 uint64
+ var x38 uint64
+ x38, x37 = bits.Mul64(x1, 0x4fffffffd)
+ var x39 uint64
+ var x40 uint64
+ x40, x39 = bits.Mul64(x1, 0xfffffffffffffffe)
+ var x41 uint64
+ var x42 uint64
+ x42, x41 = bits.Mul64(x1, 0xfffffffbffffffff)
+ var x43 uint64
+ var x44 uint64
+ x44, x43 = bits.Mul64(x1, 0x3)
+ var x45 uint64
+ var x46 uint64
+ x45, x46 = bits.Add64(x44, x41, uint64(0x0))
+ var x47 uint64
+ var x48 uint64
+ x47, x48 = bits.Add64(x42, x39, uint64(p256Uint1(x46)))
+ var x49 uint64
+ var x50 uint64
+ x49, x50 = bits.Add64(x40, x37, uint64(p256Uint1(x48)))
+ var x51 uint64
+ var x52 uint64
+ x51, x52 = bits.Add64(x29, x43, uint64(0x0))
+ var x53 uint64
+ var x54 uint64
+ x53, x54 = bits.Add64(x31, x45, uint64(p256Uint1(x52)))
+ var x55 uint64
+ var x56 uint64
+ x55, x56 = bits.Add64(x33, x47, uint64(p256Uint1(x54)))
+ var x57 uint64
+ var x58 uint64
+ x57, x58 = bits.Add64(x35, x49, uint64(p256Uint1(x56)))
+ var x59 uint64
+ var x60 uint64
+ x60, x59 = bits.Mul64(x51, 0xffffffff00000001)
+ var x61 uint64
+ var x62 uint64
+ x62, x61 = bits.Mul64(x51, 0xffffffff)
+ var x63 uint64
+ var x64 uint64
+ x64, x63 = bits.Mul64(x51, 0xffffffffffffffff)
+ var x65 uint64
+ var x66 uint64
+ x65, x66 = bits.Add64(x64, x61, uint64(0x0))
+ var x68 uint64
+ _, x68 = bits.Add64(x51, x63, uint64(0x0))
+ var x69 uint64
+ var x70 uint64
+ x69, x70 = bits.Add64(x53, x65, uint64(p256Uint1(x68)))
+ var x71 uint64
+ var x72 uint64
+ x71, x72 = bits.Add64(x55, (uint64(p256Uint1(x66)) + x62), uint64(p256Uint1(x70)))
+ var x73 uint64
+ var x74 uint64
+ x73, x74 = bits.Add64(x57, x59, uint64(p256Uint1(x72)))
+ var x75 uint64
+ var x76 uint64
+ x75, x76 = bits.Add64(((uint64(p256Uint1(x58)) + uint64(p256Uint1(x36))) + (uint64(p256Uint1(x50)) + x38)), x60, uint64(p256Uint1(x74)))
+ var x77 uint64
+ var x78 uint64
+ x78, x77 = bits.Mul64(x2, 0x4fffffffd)
+ var x79 uint64
+ var x80 uint64
+ x80, x79 = bits.Mul64(x2, 0xfffffffffffffffe)
+ var x81 uint64
+ var x82 uint64
+ x82, x81 = bits.Mul64(x2, 0xfffffffbffffffff)
+ var x83 uint64
+ var x84 uint64
+ x84, x83 = bits.Mul64(x2, 0x3)
+ var x85 uint64
+ var x86 uint64
+ x85, x86 = bits.Add64(x84, x81, uint64(0x0))
+ var x87 uint64
+ var x88 uint64
+ x87, x88 = bits.Add64(x82, x79, uint64(p256Uint1(x86)))
+ var x89 uint64
+ var x90 uint64
+ x89, x90 = bits.Add64(x80, x77, uint64(p256Uint1(x88)))
+ var x91 uint64
+ var x92 uint64
+ x91, x92 = bits.Add64(x69, x83, uint64(0x0))
+ var x93 uint64
+ var x94 uint64
+ x93, x94 = bits.Add64(x71, x85, uint64(p256Uint1(x92)))
+ var x95 uint64
+ var x96 uint64
+ x95, x96 = bits.Add64(x73, x87, uint64(p256Uint1(x94)))
+ var x97 uint64
+ var x98 uint64
+ x97, x98 = bits.Add64(x75, x89, uint64(p256Uint1(x96)))
+ var x99 uint64
+ var x100 uint64
+ x100, x99 = bits.Mul64(x91, 0xffffffff00000001)
+ var x101 uint64
+ var x102 uint64
+ x102, x101 = bits.Mul64(x91, 0xffffffff)
+ var x103 uint64
+ var x104 uint64
+ x104, x103 = bits.Mul64(x91, 0xffffffffffffffff)
+ var x105 uint64
+ var x106 uint64
+ x105, x106 = bits.Add64(x104, x101, uint64(0x0))
+ var x108 uint64
+ _, x108 = bits.Add64(x91, x103, uint64(0x0))
+ var x109 uint64
+ var x110 uint64
+ x109, x110 = bits.Add64(x93, x105, uint64(p256Uint1(x108)))
+ var x111 uint64
+ var x112 uint64
+ x111, x112 = bits.Add64(x95, (uint64(p256Uint1(x106)) + x102), uint64(p256Uint1(x110)))
+ var x113 uint64
+ var x114 uint64
+ x113, x114 = bits.Add64(x97, x99, uint64(p256Uint1(x112)))
+ var x115 uint64
+ var x116 uint64
+ x115, x116 = bits.Add64(((uint64(p256Uint1(x98)) + uint64(p256Uint1(x76))) + (uint64(p256Uint1(x90)) + x78)), x100, uint64(p256Uint1(x114)))
+ var x117 uint64
+ var x118 uint64
+ x118, x117 = bits.Mul64(x3, 0x4fffffffd)
+ var x119 uint64
+ var x120 uint64
+ x120, x119 = bits.Mul64(x3, 0xfffffffffffffffe)
+ var x121 uint64
+ var x122 uint64
+ x122, x121 = bits.Mul64(x3, 0xfffffffbffffffff)
+ var x123 uint64
+ var x124 uint64
+ x124, x123 = bits.Mul64(x3, 0x3)
+ var x125 uint64
+ var x126 uint64
+ x125, x126 = bits.Add64(x124, x121, uint64(0x0))
+ var x127 uint64
+ var x128 uint64
+ x127, x128 = bits.Add64(x122, x119, uint64(p256Uint1(x126)))
+ var x129 uint64
+ var x130 uint64
+ x129, x130 = bits.Add64(x120, x117, uint64(p256Uint1(x128)))
+ var x131 uint64
+ var x132 uint64
+ x131, x132 = bits.Add64(x109, x123, uint64(0x0))
+ var x133 uint64
+ var x134 uint64
+ x133, x134 = bits.Add64(x111, x125, uint64(p256Uint1(x132)))
+ var x135 uint64
+ var x136 uint64
+ x135, x136 = bits.Add64(x113, x127, uint64(p256Uint1(x134)))
+ var x137 uint64
+ var x138 uint64
+ x137, x138 = bits.Add64(x115, x129, uint64(p256Uint1(x136)))
+ var x139 uint64
+ var x140 uint64
+ x140, x139 = bits.Mul64(x131, 0xffffffff00000001)
+ var x141 uint64
+ var x142 uint64
+ x142, x141 = bits.Mul64(x131, 0xffffffff)
+ var x143 uint64
+ var x144 uint64
+ x144, x143 = bits.Mul64(x131, 0xffffffffffffffff)
+ var x145 uint64
+ var x146 uint64
+ x145, x146 = bits.Add64(x144, x141, uint64(0x0))
+ var x148 uint64
+ _, x148 = bits.Add64(x131, x143, uint64(0x0))
+ var x149 uint64
+ var x150 uint64
+ x149, x150 = bits.Add64(x133, x145, uint64(p256Uint1(x148)))
+ var x151 uint64
+ var x152 uint64
+ x151, x152 = bits.Add64(x135, (uint64(p256Uint1(x146)) + x142), uint64(p256Uint1(x150)))
+ var x153 uint64
+ var x154 uint64
+ x153, x154 = bits.Add64(x137, x139, uint64(p256Uint1(x152)))
+ var x155 uint64
+ var x156 uint64
+ x155, x156 = bits.Add64(((uint64(p256Uint1(x138)) + uint64(p256Uint1(x116))) + (uint64(p256Uint1(x130)) + x118)), x140, uint64(p256Uint1(x154)))
+ var x157 uint64
+ var x158 uint64
+ x157, x158 = bits.Sub64(x149, 0xffffffffffffffff, uint64(0x0))
+ var x159 uint64
+ var x160 uint64
+ x159, x160 = bits.Sub64(x151, 0xffffffff, uint64(p256Uint1(x158)))
+ var x161 uint64
+ var x162 uint64
+ x161, x162 = bits.Sub64(x153, uint64(0x0), uint64(p256Uint1(x160)))
+ var x163 uint64
+ var x164 uint64
+ x163, x164 = bits.Sub64(x155, 0xffffffff00000001, uint64(p256Uint1(x162)))
+ var x166 uint64
+ _, x166 = bits.Sub64(uint64(p256Uint1(x156)), uint64(0x0), uint64(p256Uint1(x164)))
+ var x167 uint64
+ p256CmovznzU64(&x167, p256Uint1(x166), x157, x149)
+ var x168 uint64
+ p256CmovznzU64(&x168, p256Uint1(x166), x159, x151)
+ var x169 uint64
+ p256CmovznzU64(&x169, p256Uint1(x166), x161, x153)
+ var x170 uint64
+ p256CmovznzU64(&x170, p256Uint1(x166), x163, x155)
+ out1[0] = x167
+ out1[1] = x168
+ out1[2] = x169
+ out1[3] = x170
+}
+
+// p256Selectznz is a multi-limb conditional select.
+//
+// Postconditions:
+//
+// eval out1 = (if arg1 = 0 then eval arg2 else eval arg3)
+//
+// Input Bounds:
+//
+// arg1: [0x0 ~> 0x1]
+// arg2: [[0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff]]
+// arg3: [[0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff]]
+//
+// Output Bounds:
+//
+// out1: [[0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff]]
+func p256Selectznz(out1 *[4]uint64, arg1 p256Uint1, arg2 *[4]uint64, arg3 *[4]uint64) {
+ var x1 uint64
+ p256CmovznzU64(&x1, arg1, arg2[0], arg3[0])
+ var x2 uint64
+ p256CmovznzU64(&x2, arg1, arg2[1], arg3[1])
+ var x3 uint64
+ p256CmovznzU64(&x3, arg1, arg2[2], arg3[2])
+ var x4 uint64
+ p256CmovznzU64(&x4, arg1, arg2[3], arg3[3])
+ out1[0] = x1
+ out1[1] = x2
+ out1[2] = x3
+ out1[3] = x4
+}
+
+// p256ToBytes serializes a field element NOT in the Montgomery domain to bytes in little-endian order.
+//
+// Preconditions:
+//
+// 0 ≤ eval arg1 < m
+//
+// Postconditions:
+//
+// out1 = map (λ x, ⌊((eval arg1 mod m) mod 2^(8 * (x + 1))) / 2^(8 * x)⌋) [0..31]
+//
+// Input Bounds:
+//
+// arg1: [[0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff]]
+//
+// Output Bounds:
+//
+// out1: [[0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff]]
+func p256ToBytes(out1 *[32]uint8, arg1 *[4]uint64) {
+ x1 := arg1[3]
+ x2 := arg1[2]
+ x3 := arg1[1]
+ x4 := arg1[0]
+ x5 := (uint8(x4) & 0xff)
+ x6 := (x4 >> 8)
+ x7 := (uint8(x6) & 0xff)
+ x8 := (x6 >> 8)
+ x9 := (uint8(x8) & 0xff)
+ x10 := (x8 >> 8)
+ x11 := (uint8(x10) & 0xff)
+ x12 := (x10 >> 8)
+ x13 := (uint8(x12) & 0xff)
+ x14 := (x12 >> 8)
+ x15 := (uint8(x14) & 0xff)
+ x16 := (x14 >> 8)
+ x17 := (uint8(x16) & 0xff)
+ x18 := uint8((x16 >> 8))
+ x19 := (uint8(x3) & 0xff)
+ x20 := (x3 >> 8)
+ x21 := (uint8(x20) & 0xff)
+ x22 := (x20 >> 8)
+ x23 := (uint8(x22) & 0xff)
+ x24 := (x22 >> 8)
+ x25 := (uint8(x24) & 0xff)
+ x26 := (x24 >> 8)
+ x27 := (uint8(x26) & 0xff)
+ x28 := (x26 >> 8)
+ x29 := (uint8(x28) & 0xff)
+ x30 := (x28 >> 8)
+ x31 := (uint8(x30) & 0xff)
+ x32 := uint8((x30 >> 8))
+ x33 := (uint8(x2) & 0xff)
+ x34 := (x2 >> 8)
+ x35 := (uint8(x34) & 0xff)
+ x36 := (x34 >> 8)
+ x37 := (uint8(x36) & 0xff)
+ x38 := (x36 >> 8)
+ x39 := (uint8(x38) & 0xff)
+ x40 := (x38 >> 8)
+ x41 := (uint8(x40) & 0xff)
+ x42 := (x40 >> 8)
+ x43 := (uint8(x42) & 0xff)
+ x44 := (x42 >> 8)
+ x45 := (uint8(x44) & 0xff)
+ x46 := uint8((x44 >> 8))
+ x47 := (uint8(x1) & 0xff)
+ x48 := (x1 >> 8)
+ x49 := (uint8(x48) & 0xff)
+ x50 := (x48 >> 8)
+ x51 := (uint8(x50) & 0xff)
+ x52 := (x50 >> 8)
+ x53 := (uint8(x52) & 0xff)
+ x54 := (x52 >> 8)
+ x55 := (uint8(x54) & 0xff)
+ x56 := (x54 >> 8)
+ x57 := (uint8(x56) & 0xff)
+ x58 := (x56 >> 8)
+ x59 := (uint8(x58) & 0xff)
+ x60 := uint8((x58 >> 8))
+ out1[0] = x5
+ out1[1] = x7
+ out1[2] = x9
+ out1[3] = x11
+ out1[4] = x13
+ out1[5] = x15
+ out1[6] = x17
+ out1[7] = x18
+ out1[8] = x19
+ out1[9] = x21
+ out1[10] = x23
+ out1[11] = x25
+ out1[12] = x27
+ out1[13] = x29
+ out1[14] = x31
+ out1[15] = x32
+ out1[16] = x33
+ out1[17] = x35
+ out1[18] = x37
+ out1[19] = x39
+ out1[20] = x41
+ out1[21] = x43
+ out1[22] = x45
+ out1[23] = x46
+ out1[24] = x47
+ out1[25] = x49
+ out1[26] = x51
+ out1[27] = x53
+ out1[28] = x55
+ out1[29] = x57
+ out1[30] = x59
+ out1[31] = x60
+}
+
+// p256FromBytes deserializes a field element NOT in the Montgomery domain from bytes in little-endian order.
+//
+// Preconditions:
+//
+// 0 ≤ bytes_eval arg1 < m
+//
+// Postconditions:
+//
+// eval out1 mod m = bytes_eval arg1 mod m
+// 0 ≤ eval out1 < m
+//
+// Input Bounds:
+//
+// arg1: [[0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff]]
+//
+// Output Bounds:
+//
+// out1: [[0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff]]
+func p256FromBytes(out1 *[4]uint64, arg1 *[32]uint8) {
+ x1 := (uint64(arg1[31]) << 56)
+ x2 := (uint64(arg1[30]) << 48)
+ x3 := (uint64(arg1[29]) << 40)
+ x4 := (uint64(arg1[28]) << 32)
+ x5 := (uint64(arg1[27]) << 24)
+ x6 := (uint64(arg1[26]) << 16)
+ x7 := (uint64(arg1[25]) << 8)
+ x8 := arg1[24]
+ x9 := (uint64(arg1[23]) << 56)
+ x10 := (uint64(arg1[22]) << 48)
+ x11 := (uint64(arg1[21]) << 40)
+ x12 := (uint64(arg1[20]) << 32)
+ x13 := (uint64(arg1[19]) << 24)
+ x14 := (uint64(arg1[18]) << 16)
+ x15 := (uint64(arg1[17]) << 8)
+ x16 := arg1[16]
+ x17 := (uint64(arg1[15]) << 56)
+ x18 := (uint64(arg1[14]) << 48)
+ x19 := (uint64(arg1[13]) << 40)
+ x20 := (uint64(arg1[12]) << 32)
+ x21 := (uint64(arg1[11]) << 24)
+ x22 := (uint64(arg1[10]) << 16)
+ x23 := (uint64(arg1[9]) << 8)
+ x24 := arg1[8]
+ x25 := (uint64(arg1[7]) << 56)
+ x26 := (uint64(arg1[6]) << 48)
+ x27 := (uint64(arg1[5]) << 40)
+ x28 := (uint64(arg1[4]) << 32)
+ x29 := (uint64(arg1[3]) << 24)
+ x30 := (uint64(arg1[2]) << 16)
+ x31 := (uint64(arg1[1]) << 8)
+ x32 := arg1[0]
+ x33 := (x31 + uint64(x32))
+ x34 := (x30 + x33)
+ x35 := (x29 + x34)
+ x36 := (x28 + x35)
+ x37 := (x27 + x36)
+ x38 := (x26 + x37)
+ x39 := (x25 + x38)
+ x40 := (x23 + uint64(x24))
+ x41 := (x22 + x40)
+ x42 := (x21 + x41)
+ x43 := (x20 + x42)
+ x44 := (x19 + x43)
+ x45 := (x18 + x44)
+ x46 := (x17 + x45)
+ x47 := (x15 + uint64(x16))
+ x48 := (x14 + x47)
+ x49 := (x13 + x48)
+ x50 := (x12 + x49)
+ x51 := (x11 + x50)
+ x52 := (x10 + x51)
+ x53 := (x9 + x52)
+ x54 := (x7 + uint64(x8))
+ x55 := (x6 + x54)
+ x56 := (x5 + x55)
+ x57 := (x4 + x56)
+ x58 := (x3 + x57)
+ x59 := (x2 + x58)
+ x60 := (x1 + x59)
+ out1[0] = x39
+ out1[1] = x46
+ out1[2] = x53
+ out1[3] = x60
+}
diff --git a/src/crypto/internal/nistec/fiat/p256_invert.go b/src/crypto/internal/nistec/fiat/p256_invert.go
new file mode 100644
index 0000000..d0101e1
--- /dev/null
+++ b/src/crypto/internal/nistec/fiat/p256_invert.go
@@ -0,0 +1,84 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Code generated by addchain. DO NOT EDIT.
+
+package fiat
+
+// Invert sets e = 1/x, and returns e.
+//
+// If x == 0, Invert returns e = 0.
+func (e *P256Element) Invert(x *P256Element) *P256Element {
+ // Inversion is implemented as exponentiation with exponent p − 2.
+ // The sequence of 12 multiplications and 255 squarings is derived from the
+ // following addition chain generated with github.com/mmcloughlin/addchain v0.4.0.
+ //
+ // _10 = 2*1
+ // _11 = 1 + _10
+ // _110 = 2*_11
+ // _111 = 1 + _110
+ // _111000 = _111 << 3
+ // _111111 = _111 + _111000
+ // x12 = _111111 << 6 + _111111
+ // x15 = x12 << 3 + _111
+ // x16 = 2*x15 + 1
+ // x32 = x16 << 16 + x16
+ // i53 = x32 << 15
+ // x47 = x15 + i53
+ // i263 = ((i53 << 17 + 1) << 143 + x47) << 47
+ // return (x47 + i263) << 2 + 1
+ //
+
+ var z = new(P256Element).Set(e)
+ var t0 = new(P256Element)
+ var t1 = new(P256Element)
+
+ z.Square(x)
+ z.Mul(x, z)
+ z.Square(z)
+ z.Mul(x, z)
+ t0.Square(z)
+ for s := 1; s < 3; s++ {
+ t0.Square(t0)
+ }
+ t0.Mul(z, t0)
+ t1.Square(t0)
+ for s := 1; s < 6; s++ {
+ t1.Square(t1)
+ }
+ t0.Mul(t0, t1)
+ for s := 0; s < 3; s++ {
+ t0.Square(t0)
+ }
+ z.Mul(z, t0)
+ t0.Square(z)
+ t0.Mul(x, t0)
+ t1.Square(t0)
+ for s := 1; s < 16; s++ {
+ t1.Square(t1)
+ }
+ t0.Mul(t0, t1)
+ for s := 0; s < 15; s++ {
+ t0.Square(t0)
+ }
+ z.Mul(z, t0)
+ for s := 0; s < 17; s++ {
+ t0.Square(t0)
+ }
+ t0.Mul(x, t0)
+ for s := 0; s < 143; s++ {
+ t0.Square(t0)
+ }
+ t0.Mul(z, t0)
+ for s := 0; s < 47; s++ {
+ t0.Square(t0)
+ }
+ z.Mul(z, t0)
+ for s := 0; s < 2; s++ {
+ z.Square(z)
+ }
+ z.Mul(x, z)
+
+ return e.Set(z)
+}
diff --git a/src/crypto/internal/nistec/fiat/p384.go b/src/crypto/internal/nistec/fiat/p384.go
new file mode 100644
index 0000000..aed0c01
--- /dev/null
+++ b/src/crypto/internal/nistec/fiat/p384.go
@@ -0,0 +1,134 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Code generated by generate.go. DO NOT EDIT.
+
+package fiat
+
+import (
+ "crypto/subtle"
+ "errors"
+)
+
+// P384Element is an integer modulo 2^384 - 2^128 - 2^96 + 2^32 - 1.
+//
+// The zero value is a valid zero element.
+type P384Element struct {
+ // Values are represented internally always in the Montgomery domain, and
+ // converted in Bytes and SetBytes.
+ x p384MontgomeryDomainFieldElement
+}
+
+const p384ElementLen = 48
+
+type p384UntypedFieldElement = [6]uint64
+
+// One sets e = 1, and returns e.
+func (e *P384Element) One() *P384Element {
+ p384SetOne(&e.x)
+ return e
+}
+
+// Equal returns 1 if e == t, and zero otherwise.
+func (e *P384Element) Equal(t *P384Element) int {
+ eBytes := e.Bytes()
+ tBytes := t.Bytes()
+ return subtle.ConstantTimeCompare(eBytes, tBytes)
+}
+
+// IsZero returns 1 if e == 0, and zero otherwise.
+func (e *P384Element) IsZero() int {
+ zero := make([]byte, p384ElementLen)
+ eBytes := e.Bytes()
+ return subtle.ConstantTimeCompare(eBytes, zero)
+}
+
+// Set sets e = t, and returns e.
+func (e *P384Element) Set(t *P384Element) *P384Element {
+ e.x = t.x
+ return e
+}
+
+// Bytes returns the 48-byte big-endian encoding of e.
+func (e *P384Element) Bytes() []byte {
+ // This function is outlined to make the allocations inline in the caller
+ // rather than happen on the heap.
+ var out [p384ElementLen]byte
+ return e.bytes(&out)
+}
+
+func (e *P384Element) bytes(out *[p384ElementLen]byte) []byte {
+ var tmp p384NonMontgomeryDomainFieldElement
+ p384FromMontgomery(&tmp, &e.x)
+ p384ToBytes(out, (*p384UntypedFieldElement)(&tmp))
+ p384InvertEndianness(out[:])
+ return out[:]
+}
+
+// SetBytes sets e = v, where v is a big-endian 48-byte encoding, and returns e.
+// If v is not 48 bytes or it encodes a value higher than 2^384 - 2^128 - 2^96 + 2^32 - 1,
+// SetBytes returns nil and an error, and e is unchanged.
+func (e *P384Element) SetBytes(v []byte) (*P384Element, error) {
+ if len(v) != p384ElementLen {
+ return nil, errors.New("invalid P384Element encoding")
+ }
+
+ // Check for non-canonical encodings (p + k, 2p + k, etc.) by comparing to
+ // the encoding of -1 mod p, so p - 1, the highest canonical encoding.
+ var minusOneEncoding = new(P384Element).Sub(
+ new(P384Element), new(P384Element).One()).Bytes()
+ for i := range v {
+ if v[i] < minusOneEncoding[i] {
+ break
+ }
+ if v[i] > minusOneEncoding[i] {
+ return nil, errors.New("invalid P384Element encoding")
+ }
+ }
+
+ var in [p384ElementLen]byte
+ copy(in[:], v)
+ p384InvertEndianness(in[:])
+ var tmp p384NonMontgomeryDomainFieldElement
+ p384FromBytes((*p384UntypedFieldElement)(&tmp), &in)
+ p384ToMontgomery(&e.x, &tmp)
+ return e, nil
+}
+
+// Add sets e = t1 + t2, and returns e.
+func (e *P384Element) Add(t1, t2 *P384Element) *P384Element {
+ p384Add(&e.x, &t1.x, &t2.x)
+ return e
+}
+
+// Sub sets e = t1 - t2, and returns e.
+func (e *P384Element) Sub(t1, t2 *P384Element) *P384Element {
+ p384Sub(&e.x, &t1.x, &t2.x)
+ return e
+}
+
+// Mul sets e = t1 * t2, and returns e.
+func (e *P384Element) Mul(t1, t2 *P384Element) *P384Element {
+ p384Mul(&e.x, &t1.x, &t2.x)
+ return e
+}
+
+// Square sets e = t * t, and returns e.
+func (e *P384Element) Square(t *P384Element) *P384Element {
+ p384Square(&e.x, &t.x)
+ return e
+}
+
+// Select sets v to a if cond == 1, and to b if cond == 0.
+func (v *P384Element) Select(a, b *P384Element, cond int) *P384Element {
+ p384Selectznz((*p384UntypedFieldElement)(&v.x), p384Uint1(cond),
+ (*p384UntypedFieldElement)(&b.x), (*p384UntypedFieldElement)(&a.x))
+ return v
+}
+
+func p384InvertEndianness(v []byte) {
+ for i := 0; i < len(v)/2; i++ {
+ v[i], v[len(v)-1-i] = v[len(v)-1-i], v[i]
+ }
+}
diff --git a/src/crypto/internal/nistec/fiat/p384_fiat64.go b/src/crypto/internal/nistec/fiat/p384_fiat64.go
new file mode 100644
index 0000000..979eadd
--- /dev/null
+++ b/src/crypto/internal/nistec/fiat/p384_fiat64.go
@@ -0,0 +1,3036 @@
+// Code generated by Fiat Cryptography. DO NOT EDIT.
+//
+// Autogenerated: word_by_word_montgomery --lang Go --no-wide-int --cmovznz-by-mul --relax-primitive-carry-to-bitwidth 32,64 --internal-static --public-function-case camelCase --public-type-case camelCase --private-function-case camelCase --private-type-case camelCase --doc-text-before-function-name '' --doc-newline-before-package-declaration --doc-prepend-header 'Code generated by Fiat Cryptography. DO NOT EDIT.' --package-name fiat --no-prefix-fiat p384 64 '2^384 - 2^128 - 2^96 + 2^32 - 1' mul square add sub one from_montgomery to_montgomery selectznz to_bytes from_bytes
+//
+// curve description: p384
+//
+// machine_wordsize = 64 (from "64")
+//
+// requested operations: mul, square, add, sub, one, from_montgomery, to_montgomery, selectznz, to_bytes, from_bytes
+//
+// m = 0xfffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffeffffffff0000000000000000ffffffff (from "2^384 - 2^128 - 2^96 + 2^32 - 1")
+//
+//
+//
+// NOTE: In addition to the bounds specified above each function, all
+//
+// functions synthesized for this Montgomery arithmetic require the
+//
+// input to be strictly less than the prime modulus (m), and also
+//
+// require the input to be in the unique saturated representation.
+//
+// All functions also ensure that these two properties are true of
+//
+// return values.
+//
+//
+//
+// Computed values:
+//
+// eval z = z[0] + (z[1] << 64) + (z[2] << 128) + (z[3] << 192) + (z[4] << 256) + (z[5] << 0x140)
+//
+// bytes_eval z = z[0] + (z[1] << 8) + (z[2] << 16) + (z[3] << 24) + (z[4] << 32) + (z[5] << 40) + (z[6] << 48) + (z[7] << 56) + (z[8] << 64) + (z[9] << 72) + (z[10] << 80) + (z[11] << 88) + (z[12] << 96) + (z[13] << 104) + (z[14] << 112) + (z[15] << 120) + (z[16] << 128) + (z[17] << 136) + (z[18] << 144) + (z[19] << 152) + (z[20] << 160) + (z[21] << 168) + (z[22] << 176) + (z[23] << 184) + (z[24] << 192) + (z[25] << 200) + (z[26] << 208) + (z[27] << 216) + (z[28] << 224) + (z[29] << 232) + (z[30] << 240) + (z[31] << 248) + (z[32] << 256) + (z[33] << 0x108) + (z[34] << 0x110) + (z[35] << 0x118) + (z[36] << 0x120) + (z[37] << 0x128) + (z[38] << 0x130) + (z[39] << 0x138) + (z[40] << 0x140) + (z[41] << 0x148) + (z[42] << 0x150) + (z[43] << 0x158) + (z[44] << 0x160) + (z[45] << 0x168) + (z[46] << 0x170) + (z[47] << 0x178)
+//
+// twos_complement_eval z = let x1 := z[0] + (z[1] << 64) + (z[2] << 128) + (z[3] << 192) + (z[4] << 256) + (z[5] << 0x140) in
+//
+// if x1 & (2^384-1) < 2^383 then x1 & (2^384-1) else (x1 & (2^384-1)) - 2^384
+
+package fiat
+
+import "math/bits"
+
+type p384Uint1 uint64 // We use uint64 instead of a more narrow type for performance reasons; see https://github.com/mit-plv/fiat-crypto/pull/1006#issuecomment-892625927
+type p384Int1 int64 // We use uint64 instead of a more narrow type for performance reasons; see https://github.com/mit-plv/fiat-crypto/pull/1006#issuecomment-892625927
+
+// The type p384MontgomeryDomainFieldElement is a field element in the Montgomery domain.
+//
+// Bounds: [[0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff]]
+type p384MontgomeryDomainFieldElement [6]uint64
+
+// The type p384NonMontgomeryDomainFieldElement is a field element NOT in the Montgomery domain.
+//
+// Bounds: [[0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff]]
+type p384NonMontgomeryDomainFieldElement [6]uint64
+
+// p384CmovznzU64 is a single-word conditional move.
+//
+// Postconditions:
+//
+// out1 = (if arg1 = 0 then arg2 else arg3)
+//
+// Input Bounds:
+//
+// arg1: [0x0 ~> 0x1]
+// arg2: [0x0 ~> 0xffffffffffffffff]
+// arg3: [0x0 ~> 0xffffffffffffffff]
+//
+// Output Bounds:
+//
+// out1: [0x0 ~> 0xffffffffffffffff]
+func p384CmovznzU64(out1 *uint64, arg1 p384Uint1, arg2 uint64, arg3 uint64) {
+ x1 := (uint64(arg1) * 0xffffffffffffffff)
+ x2 := ((x1 & arg3) | ((^x1) & arg2))
+ *out1 = x2
+}
+
+// p384Mul multiplies two field elements in the Montgomery domain.
+//
+// Preconditions:
+//
+// 0 ≤ eval arg1 < m
+// 0 ≤ eval arg2 < m
+//
+// Postconditions:
+//
+// eval (from_montgomery out1) mod m = (eval (from_montgomery arg1) * eval (from_montgomery arg2)) mod m
+// 0 ≤ eval out1 < m
+func p384Mul(out1 *p384MontgomeryDomainFieldElement, arg1 *p384MontgomeryDomainFieldElement, arg2 *p384MontgomeryDomainFieldElement) {
+ x1 := arg1[1]
+ x2 := arg1[2]
+ x3 := arg1[3]
+ x4 := arg1[4]
+ x5 := arg1[5]
+ x6 := arg1[0]
+ var x7 uint64
+ var x8 uint64
+ x8, x7 = bits.Mul64(x6, arg2[5])
+ var x9 uint64
+ var x10 uint64
+ x10, x9 = bits.Mul64(x6, arg2[4])
+ var x11 uint64
+ var x12 uint64
+ x12, x11 = bits.Mul64(x6, arg2[3])
+ var x13 uint64
+ var x14 uint64
+ x14, x13 = bits.Mul64(x6, arg2[2])
+ var x15 uint64
+ var x16 uint64
+ x16, x15 = bits.Mul64(x6, arg2[1])
+ var x17 uint64
+ var x18 uint64
+ x18, x17 = bits.Mul64(x6, arg2[0])
+ var x19 uint64
+ var x20 uint64
+ x19, x20 = bits.Add64(x18, x15, uint64(0x0))
+ var x21 uint64
+ var x22 uint64
+ x21, x22 = bits.Add64(x16, x13, uint64(p384Uint1(x20)))
+ var x23 uint64
+ var x24 uint64
+ x23, x24 = bits.Add64(x14, x11, uint64(p384Uint1(x22)))
+ var x25 uint64
+ var x26 uint64
+ x25, x26 = bits.Add64(x12, x9, uint64(p384Uint1(x24)))
+ var x27 uint64
+ var x28 uint64
+ x27, x28 = bits.Add64(x10, x7, uint64(p384Uint1(x26)))
+ x29 := (uint64(p384Uint1(x28)) + x8)
+ var x30 uint64
+ _, x30 = bits.Mul64(x17, 0x100000001)
+ var x32 uint64
+ var x33 uint64
+ x33, x32 = bits.Mul64(x30, 0xffffffffffffffff)
+ var x34 uint64
+ var x35 uint64
+ x35, x34 = bits.Mul64(x30, 0xffffffffffffffff)
+ var x36 uint64
+ var x37 uint64
+ x37, x36 = bits.Mul64(x30, 0xffffffffffffffff)
+ var x38 uint64
+ var x39 uint64
+ x39, x38 = bits.Mul64(x30, 0xfffffffffffffffe)
+ var x40 uint64
+ var x41 uint64
+ x41, x40 = bits.Mul64(x30, 0xffffffff00000000)
+ var x42 uint64
+ var x43 uint64
+ x43, x42 = bits.Mul64(x30, 0xffffffff)
+ var x44 uint64
+ var x45 uint64
+ x44, x45 = bits.Add64(x43, x40, uint64(0x0))
+ var x46 uint64
+ var x47 uint64
+ x46, x47 = bits.Add64(x41, x38, uint64(p384Uint1(x45)))
+ var x48 uint64
+ var x49 uint64
+ x48, x49 = bits.Add64(x39, x36, uint64(p384Uint1(x47)))
+ var x50 uint64
+ var x51 uint64
+ x50, x51 = bits.Add64(x37, x34, uint64(p384Uint1(x49)))
+ var x52 uint64
+ var x53 uint64
+ x52, x53 = bits.Add64(x35, x32, uint64(p384Uint1(x51)))
+ x54 := (uint64(p384Uint1(x53)) + x33)
+ var x56 uint64
+ _, x56 = bits.Add64(x17, x42, uint64(0x0))
+ var x57 uint64
+ var x58 uint64
+ x57, x58 = bits.Add64(x19, x44, uint64(p384Uint1(x56)))
+ var x59 uint64
+ var x60 uint64
+ x59, x60 = bits.Add64(x21, x46, uint64(p384Uint1(x58)))
+ var x61 uint64
+ var x62 uint64
+ x61, x62 = bits.Add64(x23, x48, uint64(p384Uint1(x60)))
+ var x63 uint64
+ var x64 uint64
+ x63, x64 = bits.Add64(x25, x50, uint64(p384Uint1(x62)))
+ var x65 uint64
+ var x66 uint64
+ x65, x66 = bits.Add64(x27, x52, uint64(p384Uint1(x64)))
+ var x67 uint64
+ var x68 uint64
+ x67, x68 = bits.Add64(x29, x54, uint64(p384Uint1(x66)))
+ var x69 uint64
+ var x70 uint64
+ x70, x69 = bits.Mul64(x1, arg2[5])
+ var x71 uint64
+ var x72 uint64
+ x72, x71 = bits.Mul64(x1, arg2[4])
+ var x73 uint64
+ var x74 uint64
+ x74, x73 = bits.Mul64(x1, arg2[3])
+ var x75 uint64
+ var x76 uint64
+ x76, x75 = bits.Mul64(x1, arg2[2])
+ var x77 uint64
+ var x78 uint64
+ x78, x77 = bits.Mul64(x1, arg2[1])
+ var x79 uint64
+ var x80 uint64
+ x80, x79 = bits.Mul64(x1, arg2[0])
+ var x81 uint64
+ var x82 uint64
+ x81, x82 = bits.Add64(x80, x77, uint64(0x0))
+ var x83 uint64
+ var x84 uint64
+ x83, x84 = bits.Add64(x78, x75, uint64(p384Uint1(x82)))
+ var x85 uint64
+ var x86 uint64
+ x85, x86 = bits.Add64(x76, x73, uint64(p384Uint1(x84)))
+ var x87 uint64
+ var x88 uint64
+ x87, x88 = bits.Add64(x74, x71, uint64(p384Uint1(x86)))
+ var x89 uint64
+ var x90 uint64
+ x89, x90 = bits.Add64(x72, x69, uint64(p384Uint1(x88)))
+ x91 := (uint64(p384Uint1(x90)) + x70)
+ var x92 uint64
+ var x93 uint64
+ x92, x93 = bits.Add64(x57, x79, uint64(0x0))
+ var x94 uint64
+ var x95 uint64
+ x94, x95 = bits.Add64(x59, x81, uint64(p384Uint1(x93)))
+ var x96 uint64
+ var x97 uint64
+ x96, x97 = bits.Add64(x61, x83, uint64(p384Uint1(x95)))
+ var x98 uint64
+ var x99 uint64
+ x98, x99 = bits.Add64(x63, x85, uint64(p384Uint1(x97)))
+ var x100 uint64
+ var x101 uint64
+ x100, x101 = bits.Add64(x65, x87, uint64(p384Uint1(x99)))
+ var x102 uint64
+ var x103 uint64
+ x102, x103 = bits.Add64(x67, x89, uint64(p384Uint1(x101)))
+ var x104 uint64
+ var x105 uint64
+ x104, x105 = bits.Add64(uint64(p384Uint1(x68)), x91, uint64(p384Uint1(x103)))
+ var x106 uint64
+ _, x106 = bits.Mul64(x92, 0x100000001)
+ var x108 uint64
+ var x109 uint64
+ x109, x108 = bits.Mul64(x106, 0xffffffffffffffff)
+ var x110 uint64
+ var x111 uint64
+ x111, x110 = bits.Mul64(x106, 0xffffffffffffffff)
+ var x112 uint64
+ var x113 uint64
+ x113, x112 = bits.Mul64(x106, 0xffffffffffffffff)
+ var x114 uint64
+ var x115 uint64
+ x115, x114 = bits.Mul64(x106, 0xfffffffffffffffe)
+ var x116 uint64
+ var x117 uint64
+ x117, x116 = bits.Mul64(x106, 0xffffffff00000000)
+ var x118 uint64
+ var x119 uint64
+ x119, x118 = bits.Mul64(x106, 0xffffffff)
+ var x120 uint64
+ var x121 uint64
+ x120, x121 = bits.Add64(x119, x116, uint64(0x0))
+ var x122 uint64
+ var x123 uint64
+ x122, x123 = bits.Add64(x117, x114, uint64(p384Uint1(x121)))
+ var x124 uint64
+ var x125 uint64
+ x124, x125 = bits.Add64(x115, x112, uint64(p384Uint1(x123)))
+ var x126 uint64
+ var x127 uint64
+ x126, x127 = bits.Add64(x113, x110, uint64(p384Uint1(x125)))
+ var x128 uint64
+ var x129 uint64
+ x128, x129 = bits.Add64(x111, x108, uint64(p384Uint1(x127)))
+ x130 := (uint64(p384Uint1(x129)) + x109)
+ var x132 uint64
+ _, x132 = bits.Add64(x92, x118, uint64(0x0))
+ var x133 uint64
+ var x134 uint64
+ x133, x134 = bits.Add64(x94, x120, uint64(p384Uint1(x132)))
+ var x135 uint64
+ var x136 uint64
+ x135, x136 = bits.Add64(x96, x122, uint64(p384Uint1(x134)))
+ var x137 uint64
+ var x138 uint64
+ x137, x138 = bits.Add64(x98, x124, uint64(p384Uint1(x136)))
+ var x139 uint64
+ var x140 uint64
+ x139, x140 = bits.Add64(x100, x126, uint64(p384Uint1(x138)))
+ var x141 uint64
+ var x142 uint64
+ x141, x142 = bits.Add64(x102, x128, uint64(p384Uint1(x140)))
+ var x143 uint64
+ var x144 uint64
+ x143, x144 = bits.Add64(x104, x130, uint64(p384Uint1(x142)))
+ x145 := (uint64(p384Uint1(x144)) + uint64(p384Uint1(x105)))
+ var x146 uint64
+ var x147 uint64
+ x147, x146 = bits.Mul64(x2, arg2[5])
+ var x148 uint64
+ var x149 uint64
+ x149, x148 = bits.Mul64(x2, arg2[4])
+ var x150 uint64
+ var x151 uint64
+ x151, x150 = bits.Mul64(x2, arg2[3])
+ var x152 uint64
+ var x153 uint64
+ x153, x152 = bits.Mul64(x2, arg2[2])
+ var x154 uint64
+ var x155 uint64
+ x155, x154 = bits.Mul64(x2, arg2[1])
+ var x156 uint64
+ var x157 uint64
+ x157, x156 = bits.Mul64(x2, arg2[0])
+ var x158 uint64
+ var x159 uint64
+ x158, x159 = bits.Add64(x157, x154, uint64(0x0))
+ var x160 uint64
+ var x161 uint64
+ x160, x161 = bits.Add64(x155, x152, uint64(p384Uint1(x159)))
+ var x162 uint64
+ var x163 uint64
+ x162, x163 = bits.Add64(x153, x150, uint64(p384Uint1(x161)))
+ var x164 uint64
+ var x165 uint64
+ x164, x165 = bits.Add64(x151, x148, uint64(p384Uint1(x163)))
+ var x166 uint64
+ var x167 uint64
+ x166, x167 = bits.Add64(x149, x146, uint64(p384Uint1(x165)))
+ x168 := (uint64(p384Uint1(x167)) + x147)
+ var x169 uint64
+ var x170 uint64
+ x169, x170 = bits.Add64(x133, x156, uint64(0x0))
+ var x171 uint64
+ var x172 uint64
+ x171, x172 = bits.Add64(x135, x158, uint64(p384Uint1(x170)))
+ var x173 uint64
+ var x174 uint64
+ x173, x174 = bits.Add64(x137, x160, uint64(p384Uint1(x172)))
+ var x175 uint64
+ var x176 uint64
+ x175, x176 = bits.Add64(x139, x162, uint64(p384Uint1(x174)))
+ var x177 uint64
+ var x178 uint64
+ x177, x178 = bits.Add64(x141, x164, uint64(p384Uint1(x176)))
+ var x179 uint64
+ var x180 uint64
+ x179, x180 = bits.Add64(x143, x166, uint64(p384Uint1(x178)))
+ var x181 uint64
+ var x182 uint64
+ x181, x182 = bits.Add64(x145, x168, uint64(p384Uint1(x180)))
+ var x183 uint64
+ _, x183 = bits.Mul64(x169, 0x100000001)
+ var x185 uint64
+ var x186 uint64
+ x186, x185 = bits.Mul64(x183, 0xffffffffffffffff)
+ var x187 uint64
+ var x188 uint64
+ x188, x187 = bits.Mul64(x183, 0xffffffffffffffff)
+ var x189 uint64
+ var x190 uint64
+ x190, x189 = bits.Mul64(x183, 0xffffffffffffffff)
+ var x191 uint64
+ var x192 uint64
+ x192, x191 = bits.Mul64(x183, 0xfffffffffffffffe)
+ var x193 uint64
+ var x194 uint64
+ x194, x193 = bits.Mul64(x183, 0xffffffff00000000)
+ var x195 uint64
+ var x196 uint64
+ x196, x195 = bits.Mul64(x183, 0xffffffff)
+ var x197 uint64
+ var x198 uint64
+ x197, x198 = bits.Add64(x196, x193, uint64(0x0))
+ var x199 uint64
+ var x200 uint64
+ x199, x200 = bits.Add64(x194, x191, uint64(p384Uint1(x198)))
+ var x201 uint64
+ var x202 uint64
+ x201, x202 = bits.Add64(x192, x189, uint64(p384Uint1(x200)))
+ var x203 uint64
+ var x204 uint64
+ x203, x204 = bits.Add64(x190, x187, uint64(p384Uint1(x202)))
+ var x205 uint64
+ var x206 uint64
+ x205, x206 = bits.Add64(x188, x185, uint64(p384Uint1(x204)))
+ x207 := (uint64(p384Uint1(x206)) + x186)
+ var x209 uint64
+ _, x209 = bits.Add64(x169, x195, uint64(0x0))
+ var x210 uint64
+ var x211 uint64
+ x210, x211 = bits.Add64(x171, x197, uint64(p384Uint1(x209)))
+ var x212 uint64
+ var x213 uint64
+ x212, x213 = bits.Add64(x173, x199, uint64(p384Uint1(x211)))
+ var x214 uint64
+ var x215 uint64
+ x214, x215 = bits.Add64(x175, x201, uint64(p384Uint1(x213)))
+ var x216 uint64
+ var x217 uint64
+ x216, x217 = bits.Add64(x177, x203, uint64(p384Uint1(x215)))
+ var x218 uint64
+ var x219 uint64
+ x218, x219 = bits.Add64(x179, x205, uint64(p384Uint1(x217)))
+ var x220 uint64
+ var x221 uint64
+ x220, x221 = bits.Add64(x181, x207, uint64(p384Uint1(x219)))
+ x222 := (uint64(p384Uint1(x221)) + uint64(p384Uint1(x182)))
+ var x223 uint64
+ var x224 uint64
+ x224, x223 = bits.Mul64(x3, arg2[5])
+ var x225 uint64
+ var x226 uint64
+ x226, x225 = bits.Mul64(x3, arg2[4])
+ var x227 uint64
+ var x228 uint64
+ x228, x227 = bits.Mul64(x3, arg2[3])
+ var x229 uint64
+ var x230 uint64
+ x230, x229 = bits.Mul64(x3, arg2[2])
+ var x231 uint64
+ var x232 uint64
+ x232, x231 = bits.Mul64(x3, arg2[1])
+ var x233 uint64
+ var x234 uint64
+ x234, x233 = bits.Mul64(x3, arg2[0])
+ var x235 uint64
+ var x236 uint64
+ x235, x236 = bits.Add64(x234, x231, uint64(0x0))
+ var x237 uint64
+ var x238 uint64
+ x237, x238 = bits.Add64(x232, x229, uint64(p384Uint1(x236)))
+ var x239 uint64
+ var x240 uint64
+ x239, x240 = bits.Add64(x230, x227, uint64(p384Uint1(x238)))
+ var x241 uint64
+ var x242 uint64
+ x241, x242 = bits.Add64(x228, x225, uint64(p384Uint1(x240)))
+ var x243 uint64
+ var x244 uint64
+ x243, x244 = bits.Add64(x226, x223, uint64(p384Uint1(x242)))
+ x245 := (uint64(p384Uint1(x244)) + x224)
+ var x246 uint64
+ var x247 uint64
+ x246, x247 = bits.Add64(x210, x233, uint64(0x0))
+ var x248 uint64
+ var x249 uint64
+ x248, x249 = bits.Add64(x212, x235, uint64(p384Uint1(x247)))
+ var x250 uint64
+ var x251 uint64
+ x250, x251 = bits.Add64(x214, x237, uint64(p384Uint1(x249)))
+ var x252 uint64
+ var x253 uint64
+ x252, x253 = bits.Add64(x216, x239, uint64(p384Uint1(x251)))
+ var x254 uint64
+ var x255 uint64
+ x254, x255 = bits.Add64(x218, x241, uint64(p384Uint1(x253)))
+ var x256 uint64
+ var x257 uint64
+ x256, x257 = bits.Add64(x220, x243, uint64(p384Uint1(x255)))
+ var x258 uint64
+ var x259 uint64
+ x258, x259 = bits.Add64(x222, x245, uint64(p384Uint1(x257)))
+ var x260 uint64
+ _, x260 = bits.Mul64(x246, 0x100000001)
+ var x262 uint64
+ var x263 uint64
+ x263, x262 = bits.Mul64(x260, 0xffffffffffffffff)
+ var x264 uint64
+ var x265 uint64
+ x265, x264 = bits.Mul64(x260, 0xffffffffffffffff)
+ var x266 uint64
+ var x267 uint64
+ x267, x266 = bits.Mul64(x260, 0xffffffffffffffff)
+ var x268 uint64
+ var x269 uint64
+ x269, x268 = bits.Mul64(x260, 0xfffffffffffffffe)
+ var x270 uint64
+ var x271 uint64
+ x271, x270 = bits.Mul64(x260, 0xffffffff00000000)
+ var x272 uint64
+ var x273 uint64
+ x273, x272 = bits.Mul64(x260, 0xffffffff)
+ var x274 uint64
+ var x275 uint64
+ x274, x275 = bits.Add64(x273, x270, uint64(0x0))
+ var x276 uint64
+ var x277 uint64
+ x276, x277 = bits.Add64(x271, x268, uint64(p384Uint1(x275)))
+ var x278 uint64
+ var x279 uint64
+ x278, x279 = bits.Add64(x269, x266, uint64(p384Uint1(x277)))
+ var x280 uint64
+ var x281 uint64
+ x280, x281 = bits.Add64(x267, x264, uint64(p384Uint1(x279)))
+ var x282 uint64
+ var x283 uint64
+ x282, x283 = bits.Add64(x265, x262, uint64(p384Uint1(x281)))
+ x284 := (uint64(p384Uint1(x283)) + x263)
+ var x286 uint64
+ _, x286 = bits.Add64(x246, x272, uint64(0x0))
+ var x287 uint64
+ var x288 uint64
+ x287, x288 = bits.Add64(x248, x274, uint64(p384Uint1(x286)))
+ var x289 uint64
+ var x290 uint64
+ x289, x290 = bits.Add64(x250, x276, uint64(p384Uint1(x288)))
+ var x291 uint64
+ var x292 uint64
+ x291, x292 = bits.Add64(x252, x278, uint64(p384Uint1(x290)))
+ var x293 uint64
+ var x294 uint64
+ x293, x294 = bits.Add64(x254, x280, uint64(p384Uint1(x292)))
+ var x295 uint64
+ var x296 uint64
+ x295, x296 = bits.Add64(x256, x282, uint64(p384Uint1(x294)))
+ var x297 uint64
+ var x298 uint64
+ x297, x298 = bits.Add64(x258, x284, uint64(p384Uint1(x296)))
+ x299 := (uint64(p384Uint1(x298)) + uint64(p384Uint1(x259)))
+ var x300 uint64
+ var x301 uint64
+ x301, x300 = bits.Mul64(x4, arg2[5])
+ var x302 uint64
+ var x303 uint64
+ x303, x302 = bits.Mul64(x4, arg2[4])
+ var x304 uint64
+ var x305 uint64
+ x305, x304 = bits.Mul64(x4, arg2[3])
+ var x306 uint64
+ var x307 uint64
+ x307, x306 = bits.Mul64(x4, arg2[2])
+ var x308 uint64
+ var x309 uint64
+ x309, x308 = bits.Mul64(x4, arg2[1])
+ var x310 uint64
+ var x311 uint64
+ x311, x310 = bits.Mul64(x4, arg2[0])
+ var x312 uint64
+ var x313 uint64
+ x312, x313 = bits.Add64(x311, x308, uint64(0x0))
+ var x314 uint64
+ var x315 uint64
+ x314, x315 = bits.Add64(x309, x306, uint64(p384Uint1(x313)))
+ var x316 uint64
+ var x317 uint64
+ x316, x317 = bits.Add64(x307, x304, uint64(p384Uint1(x315)))
+ var x318 uint64
+ var x319 uint64
+ x318, x319 = bits.Add64(x305, x302, uint64(p384Uint1(x317)))
+ var x320 uint64
+ var x321 uint64
+ x320, x321 = bits.Add64(x303, x300, uint64(p384Uint1(x319)))
+ x322 := (uint64(p384Uint1(x321)) + x301)
+ var x323 uint64
+ var x324 uint64
+ x323, x324 = bits.Add64(x287, x310, uint64(0x0))
+ var x325 uint64
+ var x326 uint64
+ x325, x326 = bits.Add64(x289, x312, uint64(p384Uint1(x324)))
+ var x327 uint64
+ var x328 uint64
+ x327, x328 = bits.Add64(x291, x314, uint64(p384Uint1(x326)))
+ var x329 uint64
+ var x330 uint64
+ x329, x330 = bits.Add64(x293, x316, uint64(p384Uint1(x328)))
+ var x331 uint64
+ var x332 uint64
+ x331, x332 = bits.Add64(x295, x318, uint64(p384Uint1(x330)))
+ var x333 uint64
+ var x334 uint64
+ x333, x334 = bits.Add64(x297, x320, uint64(p384Uint1(x332)))
+ var x335 uint64
+ var x336 uint64
+ x335, x336 = bits.Add64(x299, x322, uint64(p384Uint1(x334)))
+ var x337 uint64
+ _, x337 = bits.Mul64(x323, 0x100000001)
+ var x339 uint64
+ var x340 uint64
+ x340, x339 = bits.Mul64(x337, 0xffffffffffffffff)
+ var x341 uint64
+ var x342 uint64
+ x342, x341 = bits.Mul64(x337, 0xffffffffffffffff)
+ var x343 uint64
+ var x344 uint64
+ x344, x343 = bits.Mul64(x337, 0xffffffffffffffff)
+ var x345 uint64
+ var x346 uint64
+ x346, x345 = bits.Mul64(x337, 0xfffffffffffffffe)
+ var x347 uint64
+ var x348 uint64
+ x348, x347 = bits.Mul64(x337, 0xffffffff00000000)
+ var x349 uint64
+ var x350 uint64
+ x350, x349 = bits.Mul64(x337, 0xffffffff)
+ var x351 uint64
+ var x352 uint64
+ x351, x352 = bits.Add64(x350, x347, uint64(0x0))
+ var x353 uint64
+ var x354 uint64
+ x353, x354 = bits.Add64(x348, x345, uint64(p384Uint1(x352)))
+ var x355 uint64
+ var x356 uint64
+ x355, x356 = bits.Add64(x346, x343, uint64(p384Uint1(x354)))
+ var x357 uint64
+ var x358 uint64
+ x357, x358 = bits.Add64(x344, x341, uint64(p384Uint1(x356)))
+ var x359 uint64
+ var x360 uint64
+ x359, x360 = bits.Add64(x342, x339, uint64(p384Uint1(x358)))
+ x361 := (uint64(p384Uint1(x360)) + x340)
+ var x363 uint64
+ _, x363 = bits.Add64(x323, x349, uint64(0x0))
+ var x364 uint64
+ var x365 uint64
+ x364, x365 = bits.Add64(x325, x351, uint64(p384Uint1(x363)))
+ var x366 uint64
+ var x367 uint64
+ x366, x367 = bits.Add64(x327, x353, uint64(p384Uint1(x365)))
+ var x368 uint64
+ var x369 uint64
+ x368, x369 = bits.Add64(x329, x355, uint64(p384Uint1(x367)))
+ var x370 uint64
+ var x371 uint64
+ x370, x371 = bits.Add64(x331, x357, uint64(p384Uint1(x369)))
+ var x372 uint64
+ var x373 uint64
+ x372, x373 = bits.Add64(x333, x359, uint64(p384Uint1(x371)))
+ var x374 uint64
+ var x375 uint64
+ x374, x375 = bits.Add64(x335, x361, uint64(p384Uint1(x373)))
+ x376 := (uint64(p384Uint1(x375)) + uint64(p384Uint1(x336)))
+ var x377 uint64
+ var x378 uint64
+ x378, x377 = bits.Mul64(x5, arg2[5])
+ var x379 uint64
+ var x380 uint64
+ x380, x379 = bits.Mul64(x5, arg2[4])
+ var x381 uint64
+ var x382 uint64
+ x382, x381 = bits.Mul64(x5, arg2[3])
+ var x383 uint64
+ var x384 uint64
+ x384, x383 = bits.Mul64(x5, arg2[2])
+ var x385 uint64
+ var x386 uint64
+ x386, x385 = bits.Mul64(x5, arg2[1])
+ var x387 uint64
+ var x388 uint64
+ x388, x387 = bits.Mul64(x5, arg2[0])
+ var x389 uint64
+ var x390 uint64
+ x389, x390 = bits.Add64(x388, x385, uint64(0x0))
+ var x391 uint64
+ var x392 uint64
+ x391, x392 = bits.Add64(x386, x383, uint64(p384Uint1(x390)))
+ var x393 uint64
+ var x394 uint64
+ x393, x394 = bits.Add64(x384, x381, uint64(p384Uint1(x392)))
+ var x395 uint64
+ var x396 uint64
+ x395, x396 = bits.Add64(x382, x379, uint64(p384Uint1(x394)))
+ var x397 uint64
+ var x398 uint64
+ x397, x398 = bits.Add64(x380, x377, uint64(p384Uint1(x396)))
+ x399 := (uint64(p384Uint1(x398)) + x378)
+ var x400 uint64
+ var x401 uint64
+ x400, x401 = bits.Add64(x364, x387, uint64(0x0))
+ var x402 uint64
+ var x403 uint64
+ x402, x403 = bits.Add64(x366, x389, uint64(p384Uint1(x401)))
+ var x404 uint64
+ var x405 uint64
+ x404, x405 = bits.Add64(x368, x391, uint64(p384Uint1(x403)))
+ var x406 uint64
+ var x407 uint64
+ x406, x407 = bits.Add64(x370, x393, uint64(p384Uint1(x405)))
+ var x408 uint64
+ var x409 uint64
+ x408, x409 = bits.Add64(x372, x395, uint64(p384Uint1(x407)))
+ var x410 uint64
+ var x411 uint64
+ x410, x411 = bits.Add64(x374, x397, uint64(p384Uint1(x409)))
+ var x412 uint64
+ var x413 uint64
+ x412, x413 = bits.Add64(x376, x399, uint64(p384Uint1(x411)))
+ var x414 uint64
+ _, x414 = bits.Mul64(x400, 0x100000001)
+ var x416 uint64
+ var x417 uint64
+ x417, x416 = bits.Mul64(x414, 0xffffffffffffffff)
+ var x418 uint64
+ var x419 uint64
+ x419, x418 = bits.Mul64(x414, 0xffffffffffffffff)
+ var x420 uint64
+ var x421 uint64
+ x421, x420 = bits.Mul64(x414, 0xffffffffffffffff)
+ var x422 uint64
+ var x423 uint64
+ x423, x422 = bits.Mul64(x414, 0xfffffffffffffffe)
+ var x424 uint64
+ var x425 uint64
+ x425, x424 = bits.Mul64(x414, 0xffffffff00000000)
+ var x426 uint64
+ var x427 uint64
+ x427, x426 = bits.Mul64(x414, 0xffffffff)
+ var x428 uint64
+ var x429 uint64
+ x428, x429 = bits.Add64(x427, x424, uint64(0x0))
+ var x430 uint64
+ var x431 uint64
+ x430, x431 = bits.Add64(x425, x422, uint64(p384Uint1(x429)))
+ var x432 uint64
+ var x433 uint64
+ x432, x433 = bits.Add64(x423, x420, uint64(p384Uint1(x431)))
+ var x434 uint64
+ var x435 uint64
+ x434, x435 = bits.Add64(x421, x418, uint64(p384Uint1(x433)))
+ var x436 uint64
+ var x437 uint64
+ x436, x437 = bits.Add64(x419, x416, uint64(p384Uint1(x435)))
+ x438 := (uint64(p384Uint1(x437)) + x417)
+ var x440 uint64
+ _, x440 = bits.Add64(x400, x426, uint64(0x0))
+ var x441 uint64
+ var x442 uint64
+ x441, x442 = bits.Add64(x402, x428, uint64(p384Uint1(x440)))
+ var x443 uint64
+ var x444 uint64
+ x443, x444 = bits.Add64(x404, x430, uint64(p384Uint1(x442)))
+ var x445 uint64
+ var x446 uint64
+ x445, x446 = bits.Add64(x406, x432, uint64(p384Uint1(x444)))
+ var x447 uint64
+ var x448 uint64
+ x447, x448 = bits.Add64(x408, x434, uint64(p384Uint1(x446)))
+ var x449 uint64
+ var x450 uint64
+ x449, x450 = bits.Add64(x410, x436, uint64(p384Uint1(x448)))
+ var x451 uint64
+ var x452 uint64
+ x451, x452 = bits.Add64(x412, x438, uint64(p384Uint1(x450)))
+ x453 := (uint64(p384Uint1(x452)) + uint64(p384Uint1(x413)))
+ var x454 uint64
+ var x455 uint64
+ x454, x455 = bits.Sub64(x441, 0xffffffff, uint64(0x0))
+ var x456 uint64
+ var x457 uint64
+ x456, x457 = bits.Sub64(x443, 0xffffffff00000000, uint64(p384Uint1(x455)))
+ var x458 uint64
+ var x459 uint64
+ x458, x459 = bits.Sub64(x445, 0xfffffffffffffffe, uint64(p384Uint1(x457)))
+ var x460 uint64
+ var x461 uint64
+ x460, x461 = bits.Sub64(x447, 0xffffffffffffffff, uint64(p384Uint1(x459)))
+ var x462 uint64
+ var x463 uint64
+ x462, x463 = bits.Sub64(x449, 0xffffffffffffffff, uint64(p384Uint1(x461)))
+ var x464 uint64
+ var x465 uint64
+ x464, x465 = bits.Sub64(x451, 0xffffffffffffffff, uint64(p384Uint1(x463)))
+ var x467 uint64
+ _, x467 = bits.Sub64(x453, uint64(0x0), uint64(p384Uint1(x465)))
+ var x468 uint64
+ p384CmovznzU64(&x468, p384Uint1(x467), x454, x441)
+ var x469 uint64
+ p384CmovznzU64(&x469, p384Uint1(x467), x456, x443)
+ var x470 uint64
+ p384CmovznzU64(&x470, p384Uint1(x467), x458, x445)
+ var x471 uint64
+ p384CmovznzU64(&x471, p384Uint1(x467), x460, x447)
+ var x472 uint64
+ p384CmovznzU64(&x472, p384Uint1(x467), x462, x449)
+ var x473 uint64
+ p384CmovznzU64(&x473, p384Uint1(x467), x464, x451)
+ out1[0] = x468
+ out1[1] = x469
+ out1[2] = x470
+ out1[3] = x471
+ out1[4] = x472
+ out1[5] = x473
+}
+
+// p384Square squares a field element in the Montgomery domain.
+//
+// Preconditions:
+//
+// 0 ≤ eval arg1 < m
+//
+// Postconditions:
+//
+// eval (from_montgomery out1) mod m = (eval (from_montgomery arg1) * eval (from_montgomery arg1)) mod m
+// 0 ≤ eval out1 < m
+func p384Square(out1 *p384MontgomeryDomainFieldElement, arg1 *p384MontgomeryDomainFieldElement) {
+ x1 := arg1[1]
+ x2 := arg1[2]
+ x3 := arg1[3]
+ x4 := arg1[4]
+ x5 := arg1[5]
+ x6 := arg1[0]
+ var x7 uint64
+ var x8 uint64
+ x8, x7 = bits.Mul64(x6, arg1[5])
+ var x9 uint64
+ var x10 uint64
+ x10, x9 = bits.Mul64(x6, arg1[4])
+ var x11 uint64
+ var x12 uint64
+ x12, x11 = bits.Mul64(x6, arg1[3])
+ var x13 uint64
+ var x14 uint64
+ x14, x13 = bits.Mul64(x6, arg1[2])
+ var x15 uint64
+ var x16 uint64
+ x16, x15 = bits.Mul64(x6, arg1[1])
+ var x17 uint64
+ var x18 uint64
+ x18, x17 = bits.Mul64(x6, arg1[0])
+ var x19 uint64
+ var x20 uint64
+ x19, x20 = bits.Add64(x18, x15, uint64(0x0))
+ var x21 uint64
+ var x22 uint64
+ x21, x22 = bits.Add64(x16, x13, uint64(p384Uint1(x20)))
+ var x23 uint64
+ var x24 uint64
+ x23, x24 = bits.Add64(x14, x11, uint64(p384Uint1(x22)))
+ var x25 uint64
+ var x26 uint64
+ x25, x26 = bits.Add64(x12, x9, uint64(p384Uint1(x24)))
+ var x27 uint64
+ var x28 uint64
+ x27, x28 = bits.Add64(x10, x7, uint64(p384Uint1(x26)))
+ x29 := (uint64(p384Uint1(x28)) + x8)
+ var x30 uint64
+ _, x30 = bits.Mul64(x17, 0x100000001)
+ var x32 uint64
+ var x33 uint64
+ x33, x32 = bits.Mul64(x30, 0xffffffffffffffff)
+ var x34 uint64
+ var x35 uint64
+ x35, x34 = bits.Mul64(x30, 0xffffffffffffffff)
+ var x36 uint64
+ var x37 uint64
+ x37, x36 = bits.Mul64(x30, 0xffffffffffffffff)
+ var x38 uint64
+ var x39 uint64
+ x39, x38 = bits.Mul64(x30, 0xfffffffffffffffe)
+ var x40 uint64
+ var x41 uint64
+ x41, x40 = bits.Mul64(x30, 0xffffffff00000000)
+ var x42 uint64
+ var x43 uint64
+ x43, x42 = bits.Mul64(x30, 0xffffffff)
+ var x44 uint64
+ var x45 uint64
+ x44, x45 = bits.Add64(x43, x40, uint64(0x0))
+ var x46 uint64
+ var x47 uint64
+ x46, x47 = bits.Add64(x41, x38, uint64(p384Uint1(x45)))
+ var x48 uint64
+ var x49 uint64
+ x48, x49 = bits.Add64(x39, x36, uint64(p384Uint1(x47)))
+ var x50 uint64
+ var x51 uint64
+ x50, x51 = bits.Add64(x37, x34, uint64(p384Uint1(x49)))
+ var x52 uint64
+ var x53 uint64
+ x52, x53 = bits.Add64(x35, x32, uint64(p384Uint1(x51)))
+ x54 := (uint64(p384Uint1(x53)) + x33)
+ var x56 uint64
+ _, x56 = bits.Add64(x17, x42, uint64(0x0))
+ var x57 uint64
+ var x58 uint64
+ x57, x58 = bits.Add64(x19, x44, uint64(p384Uint1(x56)))
+ var x59 uint64
+ var x60 uint64
+ x59, x60 = bits.Add64(x21, x46, uint64(p384Uint1(x58)))
+ var x61 uint64
+ var x62 uint64
+ x61, x62 = bits.Add64(x23, x48, uint64(p384Uint1(x60)))
+ var x63 uint64
+ var x64 uint64
+ x63, x64 = bits.Add64(x25, x50, uint64(p384Uint1(x62)))
+ var x65 uint64
+ var x66 uint64
+ x65, x66 = bits.Add64(x27, x52, uint64(p384Uint1(x64)))
+ var x67 uint64
+ var x68 uint64
+ x67, x68 = bits.Add64(x29, x54, uint64(p384Uint1(x66)))
+ var x69 uint64
+ var x70 uint64
+ x70, x69 = bits.Mul64(x1, arg1[5])
+ var x71 uint64
+ var x72 uint64
+ x72, x71 = bits.Mul64(x1, arg1[4])
+ var x73 uint64
+ var x74 uint64
+ x74, x73 = bits.Mul64(x1, arg1[3])
+ var x75 uint64
+ var x76 uint64
+ x76, x75 = bits.Mul64(x1, arg1[2])
+ var x77 uint64
+ var x78 uint64
+ x78, x77 = bits.Mul64(x1, arg1[1])
+ var x79 uint64
+ var x80 uint64
+ x80, x79 = bits.Mul64(x1, arg1[0])
+ var x81 uint64
+ var x82 uint64
+ x81, x82 = bits.Add64(x80, x77, uint64(0x0))
+ var x83 uint64
+ var x84 uint64
+ x83, x84 = bits.Add64(x78, x75, uint64(p384Uint1(x82)))
+ var x85 uint64
+ var x86 uint64
+ x85, x86 = bits.Add64(x76, x73, uint64(p384Uint1(x84)))
+ var x87 uint64
+ var x88 uint64
+ x87, x88 = bits.Add64(x74, x71, uint64(p384Uint1(x86)))
+ var x89 uint64
+ var x90 uint64
+ x89, x90 = bits.Add64(x72, x69, uint64(p384Uint1(x88)))
+ x91 := (uint64(p384Uint1(x90)) + x70)
+ var x92 uint64
+ var x93 uint64
+ x92, x93 = bits.Add64(x57, x79, uint64(0x0))
+ var x94 uint64
+ var x95 uint64
+ x94, x95 = bits.Add64(x59, x81, uint64(p384Uint1(x93)))
+ var x96 uint64
+ var x97 uint64
+ x96, x97 = bits.Add64(x61, x83, uint64(p384Uint1(x95)))
+ var x98 uint64
+ var x99 uint64
+ x98, x99 = bits.Add64(x63, x85, uint64(p384Uint1(x97)))
+ var x100 uint64
+ var x101 uint64
+ x100, x101 = bits.Add64(x65, x87, uint64(p384Uint1(x99)))
+ var x102 uint64
+ var x103 uint64
+ x102, x103 = bits.Add64(x67, x89, uint64(p384Uint1(x101)))
+ var x104 uint64
+ var x105 uint64
+ x104, x105 = bits.Add64(uint64(p384Uint1(x68)), x91, uint64(p384Uint1(x103)))
+ var x106 uint64
+ _, x106 = bits.Mul64(x92, 0x100000001)
+ var x108 uint64
+ var x109 uint64
+ x109, x108 = bits.Mul64(x106, 0xffffffffffffffff)
+ var x110 uint64
+ var x111 uint64
+ x111, x110 = bits.Mul64(x106, 0xffffffffffffffff)
+ var x112 uint64
+ var x113 uint64
+ x113, x112 = bits.Mul64(x106, 0xffffffffffffffff)
+ var x114 uint64
+ var x115 uint64
+ x115, x114 = bits.Mul64(x106, 0xfffffffffffffffe)
+ var x116 uint64
+ var x117 uint64
+ x117, x116 = bits.Mul64(x106, 0xffffffff00000000)
+ var x118 uint64
+ var x119 uint64
+ x119, x118 = bits.Mul64(x106, 0xffffffff)
+ var x120 uint64
+ var x121 uint64
+ x120, x121 = bits.Add64(x119, x116, uint64(0x0))
+ var x122 uint64
+ var x123 uint64
+ x122, x123 = bits.Add64(x117, x114, uint64(p384Uint1(x121)))
+ var x124 uint64
+ var x125 uint64
+ x124, x125 = bits.Add64(x115, x112, uint64(p384Uint1(x123)))
+ var x126 uint64
+ var x127 uint64
+ x126, x127 = bits.Add64(x113, x110, uint64(p384Uint1(x125)))
+ var x128 uint64
+ var x129 uint64
+ x128, x129 = bits.Add64(x111, x108, uint64(p384Uint1(x127)))
+ x130 := (uint64(p384Uint1(x129)) + x109)
+ var x132 uint64
+ _, x132 = bits.Add64(x92, x118, uint64(0x0))
+ var x133 uint64
+ var x134 uint64
+ x133, x134 = bits.Add64(x94, x120, uint64(p384Uint1(x132)))
+ var x135 uint64
+ var x136 uint64
+ x135, x136 = bits.Add64(x96, x122, uint64(p384Uint1(x134)))
+ var x137 uint64
+ var x138 uint64
+ x137, x138 = bits.Add64(x98, x124, uint64(p384Uint1(x136)))
+ var x139 uint64
+ var x140 uint64
+ x139, x140 = bits.Add64(x100, x126, uint64(p384Uint1(x138)))
+ var x141 uint64
+ var x142 uint64
+ x141, x142 = bits.Add64(x102, x128, uint64(p384Uint1(x140)))
+ var x143 uint64
+ var x144 uint64
+ x143, x144 = bits.Add64(x104, x130, uint64(p384Uint1(x142)))
+ x145 := (uint64(p384Uint1(x144)) + uint64(p384Uint1(x105)))
+ var x146 uint64
+ var x147 uint64
+ x147, x146 = bits.Mul64(x2, arg1[5])
+ var x148 uint64
+ var x149 uint64
+ x149, x148 = bits.Mul64(x2, arg1[4])
+ var x150 uint64
+ var x151 uint64
+ x151, x150 = bits.Mul64(x2, arg1[3])
+ var x152 uint64
+ var x153 uint64
+ x153, x152 = bits.Mul64(x2, arg1[2])
+ var x154 uint64
+ var x155 uint64
+ x155, x154 = bits.Mul64(x2, arg1[1])
+ var x156 uint64
+ var x157 uint64
+ x157, x156 = bits.Mul64(x2, arg1[0])
+ var x158 uint64
+ var x159 uint64
+ x158, x159 = bits.Add64(x157, x154, uint64(0x0))
+ var x160 uint64
+ var x161 uint64
+ x160, x161 = bits.Add64(x155, x152, uint64(p384Uint1(x159)))
+ var x162 uint64
+ var x163 uint64
+ x162, x163 = bits.Add64(x153, x150, uint64(p384Uint1(x161)))
+ var x164 uint64
+ var x165 uint64
+ x164, x165 = bits.Add64(x151, x148, uint64(p384Uint1(x163)))
+ var x166 uint64
+ var x167 uint64
+ x166, x167 = bits.Add64(x149, x146, uint64(p384Uint1(x165)))
+ x168 := (uint64(p384Uint1(x167)) + x147)
+ var x169 uint64
+ var x170 uint64
+ x169, x170 = bits.Add64(x133, x156, uint64(0x0))
+ var x171 uint64
+ var x172 uint64
+ x171, x172 = bits.Add64(x135, x158, uint64(p384Uint1(x170)))
+ var x173 uint64
+ var x174 uint64
+ x173, x174 = bits.Add64(x137, x160, uint64(p384Uint1(x172)))
+ var x175 uint64
+ var x176 uint64
+ x175, x176 = bits.Add64(x139, x162, uint64(p384Uint1(x174)))
+ var x177 uint64
+ var x178 uint64
+ x177, x178 = bits.Add64(x141, x164, uint64(p384Uint1(x176)))
+ var x179 uint64
+ var x180 uint64
+ x179, x180 = bits.Add64(x143, x166, uint64(p384Uint1(x178)))
+ var x181 uint64
+ var x182 uint64
+ x181, x182 = bits.Add64(x145, x168, uint64(p384Uint1(x180)))
+ var x183 uint64
+ _, x183 = bits.Mul64(x169, 0x100000001)
+ var x185 uint64
+ var x186 uint64
+ x186, x185 = bits.Mul64(x183, 0xffffffffffffffff)
+ var x187 uint64
+ var x188 uint64
+ x188, x187 = bits.Mul64(x183, 0xffffffffffffffff)
+ var x189 uint64
+ var x190 uint64
+ x190, x189 = bits.Mul64(x183, 0xffffffffffffffff)
+ var x191 uint64
+ var x192 uint64
+ x192, x191 = bits.Mul64(x183, 0xfffffffffffffffe)
+ var x193 uint64
+ var x194 uint64
+ x194, x193 = bits.Mul64(x183, 0xffffffff00000000)
+ var x195 uint64
+ var x196 uint64
+ x196, x195 = bits.Mul64(x183, 0xffffffff)
+ var x197 uint64
+ var x198 uint64
+ x197, x198 = bits.Add64(x196, x193, uint64(0x0))
+ var x199 uint64
+ var x200 uint64
+ x199, x200 = bits.Add64(x194, x191, uint64(p384Uint1(x198)))
+ var x201 uint64
+ var x202 uint64
+ x201, x202 = bits.Add64(x192, x189, uint64(p384Uint1(x200)))
+ var x203 uint64
+ var x204 uint64
+ x203, x204 = bits.Add64(x190, x187, uint64(p384Uint1(x202)))
+ var x205 uint64
+ var x206 uint64
+ x205, x206 = bits.Add64(x188, x185, uint64(p384Uint1(x204)))
+ x207 := (uint64(p384Uint1(x206)) + x186)
+ var x209 uint64
+ _, x209 = bits.Add64(x169, x195, uint64(0x0))
+ var x210 uint64
+ var x211 uint64
+ x210, x211 = bits.Add64(x171, x197, uint64(p384Uint1(x209)))
+ var x212 uint64
+ var x213 uint64
+ x212, x213 = bits.Add64(x173, x199, uint64(p384Uint1(x211)))
+ var x214 uint64
+ var x215 uint64
+ x214, x215 = bits.Add64(x175, x201, uint64(p384Uint1(x213)))
+ var x216 uint64
+ var x217 uint64
+ x216, x217 = bits.Add64(x177, x203, uint64(p384Uint1(x215)))
+ var x218 uint64
+ var x219 uint64
+ x218, x219 = bits.Add64(x179, x205, uint64(p384Uint1(x217)))
+ var x220 uint64
+ var x221 uint64
+ x220, x221 = bits.Add64(x181, x207, uint64(p384Uint1(x219)))
+ x222 := (uint64(p384Uint1(x221)) + uint64(p384Uint1(x182)))
+ var x223 uint64
+ var x224 uint64
+ x224, x223 = bits.Mul64(x3, arg1[5])
+ var x225 uint64
+ var x226 uint64
+ x226, x225 = bits.Mul64(x3, arg1[4])
+ var x227 uint64
+ var x228 uint64
+ x228, x227 = bits.Mul64(x3, arg1[3])
+ var x229 uint64
+ var x230 uint64
+ x230, x229 = bits.Mul64(x3, arg1[2])
+ var x231 uint64
+ var x232 uint64
+ x232, x231 = bits.Mul64(x3, arg1[1])
+ var x233 uint64
+ var x234 uint64
+ x234, x233 = bits.Mul64(x3, arg1[0])
+ var x235 uint64
+ var x236 uint64
+ x235, x236 = bits.Add64(x234, x231, uint64(0x0))
+ var x237 uint64
+ var x238 uint64
+ x237, x238 = bits.Add64(x232, x229, uint64(p384Uint1(x236)))
+ var x239 uint64
+ var x240 uint64
+ x239, x240 = bits.Add64(x230, x227, uint64(p384Uint1(x238)))
+ var x241 uint64
+ var x242 uint64
+ x241, x242 = bits.Add64(x228, x225, uint64(p384Uint1(x240)))
+ var x243 uint64
+ var x244 uint64
+ x243, x244 = bits.Add64(x226, x223, uint64(p384Uint1(x242)))
+ x245 := (uint64(p384Uint1(x244)) + x224)
+ var x246 uint64
+ var x247 uint64
+ x246, x247 = bits.Add64(x210, x233, uint64(0x0))
+ var x248 uint64
+ var x249 uint64
+ x248, x249 = bits.Add64(x212, x235, uint64(p384Uint1(x247)))
+ var x250 uint64
+ var x251 uint64
+ x250, x251 = bits.Add64(x214, x237, uint64(p384Uint1(x249)))
+ var x252 uint64
+ var x253 uint64
+ x252, x253 = bits.Add64(x216, x239, uint64(p384Uint1(x251)))
+ var x254 uint64
+ var x255 uint64
+ x254, x255 = bits.Add64(x218, x241, uint64(p384Uint1(x253)))
+ var x256 uint64
+ var x257 uint64
+ x256, x257 = bits.Add64(x220, x243, uint64(p384Uint1(x255)))
+ var x258 uint64
+ var x259 uint64
+ x258, x259 = bits.Add64(x222, x245, uint64(p384Uint1(x257)))
+ var x260 uint64
+ _, x260 = bits.Mul64(x246, 0x100000001)
+ var x262 uint64
+ var x263 uint64
+ x263, x262 = bits.Mul64(x260, 0xffffffffffffffff)
+ var x264 uint64
+ var x265 uint64
+ x265, x264 = bits.Mul64(x260, 0xffffffffffffffff)
+ var x266 uint64
+ var x267 uint64
+ x267, x266 = bits.Mul64(x260, 0xffffffffffffffff)
+ var x268 uint64
+ var x269 uint64
+ x269, x268 = bits.Mul64(x260, 0xfffffffffffffffe)
+ var x270 uint64
+ var x271 uint64
+ x271, x270 = bits.Mul64(x260, 0xffffffff00000000)
+ var x272 uint64
+ var x273 uint64
+ x273, x272 = bits.Mul64(x260, 0xffffffff)
+ var x274 uint64
+ var x275 uint64
+ x274, x275 = bits.Add64(x273, x270, uint64(0x0))
+ var x276 uint64
+ var x277 uint64
+ x276, x277 = bits.Add64(x271, x268, uint64(p384Uint1(x275)))
+ var x278 uint64
+ var x279 uint64
+ x278, x279 = bits.Add64(x269, x266, uint64(p384Uint1(x277)))
+ var x280 uint64
+ var x281 uint64
+ x280, x281 = bits.Add64(x267, x264, uint64(p384Uint1(x279)))
+ var x282 uint64
+ var x283 uint64
+ x282, x283 = bits.Add64(x265, x262, uint64(p384Uint1(x281)))
+ x284 := (uint64(p384Uint1(x283)) + x263)
+ var x286 uint64
+ _, x286 = bits.Add64(x246, x272, uint64(0x0))
+ var x287 uint64
+ var x288 uint64
+ x287, x288 = bits.Add64(x248, x274, uint64(p384Uint1(x286)))
+ var x289 uint64
+ var x290 uint64
+ x289, x290 = bits.Add64(x250, x276, uint64(p384Uint1(x288)))
+ var x291 uint64
+ var x292 uint64
+ x291, x292 = bits.Add64(x252, x278, uint64(p384Uint1(x290)))
+ var x293 uint64
+ var x294 uint64
+ x293, x294 = bits.Add64(x254, x280, uint64(p384Uint1(x292)))
+ var x295 uint64
+ var x296 uint64
+ x295, x296 = bits.Add64(x256, x282, uint64(p384Uint1(x294)))
+ var x297 uint64
+ var x298 uint64
+ x297, x298 = bits.Add64(x258, x284, uint64(p384Uint1(x296)))
+ x299 := (uint64(p384Uint1(x298)) + uint64(p384Uint1(x259)))
+ var x300 uint64
+ var x301 uint64
+ x301, x300 = bits.Mul64(x4, arg1[5])
+ var x302 uint64
+ var x303 uint64
+ x303, x302 = bits.Mul64(x4, arg1[4])
+ var x304 uint64
+ var x305 uint64
+ x305, x304 = bits.Mul64(x4, arg1[3])
+ var x306 uint64
+ var x307 uint64
+ x307, x306 = bits.Mul64(x4, arg1[2])
+ var x308 uint64
+ var x309 uint64
+ x309, x308 = bits.Mul64(x4, arg1[1])
+ var x310 uint64
+ var x311 uint64
+ x311, x310 = bits.Mul64(x4, arg1[0])
+ var x312 uint64
+ var x313 uint64
+ x312, x313 = bits.Add64(x311, x308, uint64(0x0))
+ var x314 uint64
+ var x315 uint64
+ x314, x315 = bits.Add64(x309, x306, uint64(p384Uint1(x313)))
+ var x316 uint64
+ var x317 uint64
+ x316, x317 = bits.Add64(x307, x304, uint64(p384Uint1(x315)))
+ var x318 uint64
+ var x319 uint64
+ x318, x319 = bits.Add64(x305, x302, uint64(p384Uint1(x317)))
+ var x320 uint64
+ var x321 uint64
+ x320, x321 = bits.Add64(x303, x300, uint64(p384Uint1(x319)))
+ x322 := (uint64(p384Uint1(x321)) + x301)
+ var x323 uint64
+ var x324 uint64
+ x323, x324 = bits.Add64(x287, x310, uint64(0x0))
+ var x325 uint64
+ var x326 uint64
+ x325, x326 = bits.Add64(x289, x312, uint64(p384Uint1(x324)))
+ var x327 uint64
+ var x328 uint64
+ x327, x328 = bits.Add64(x291, x314, uint64(p384Uint1(x326)))
+ var x329 uint64
+ var x330 uint64
+ x329, x330 = bits.Add64(x293, x316, uint64(p384Uint1(x328)))
+ var x331 uint64
+ var x332 uint64
+ x331, x332 = bits.Add64(x295, x318, uint64(p384Uint1(x330)))
+ var x333 uint64
+ var x334 uint64
+ x333, x334 = bits.Add64(x297, x320, uint64(p384Uint1(x332)))
+ var x335 uint64
+ var x336 uint64
+ x335, x336 = bits.Add64(x299, x322, uint64(p384Uint1(x334)))
+ var x337 uint64
+ _, x337 = bits.Mul64(x323, 0x100000001)
+ var x339 uint64
+ var x340 uint64
+ x340, x339 = bits.Mul64(x337, 0xffffffffffffffff)
+ var x341 uint64
+ var x342 uint64
+ x342, x341 = bits.Mul64(x337, 0xffffffffffffffff)
+ var x343 uint64
+ var x344 uint64
+ x344, x343 = bits.Mul64(x337, 0xffffffffffffffff)
+ var x345 uint64
+ var x346 uint64
+ x346, x345 = bits.Mul64(x337, 0xfffffffffffffffe)
+ var x347 uint64
+ var x348 uint64
+ x348, x347 = bits.Mul64(x337, 0xffffffff00000000)
+ var x349 uint64
+ var x350 uint64
+ x350, x349 = bits.Mul64(x337, 0xffffffff)
+ var x351 uint64
+ var x352 uint64
+ x351, x352 = bits.Add64(x350, x347, uint64(0x0))
+ var x353 uint64
+ var x354 uint64
+ x353, x354 = bits.Add64(x348, x345, uint64(p384Uint1(x352)))
+ var x355 uint64
+ var x356 uint64
+ x355, x356 = bits.Add64(x346, x343, uint64(p384Uint1(x354)))
+ var x357 uint64
+ var x358 uint64
+ x357, x358 = bits.Add64(x344, x341, uint64(p384Uint1(x356)))
+ var x359 uint64
+ var x360 uint64
+ x359, x360 = bits.Add64(x342, x339, uint64(p384Uint1(x358)))
+ x361 := (uint64(p384Uint1(x360)) + x340)
+ var x363 uint64
+ _, x363 = bits.Add64(x323, x349, uint64(0x0))
+ var x364 uint64
+ var x365 uint64
+ x364, x365 = bits.Add64(x325, x351, uint64(p384Uint1(x363)))
+ var x366 uint64
+ var x367 uint64
+ x366, x367 = bits.Add64(x327, x353, uint64(p384Uint1(x365)))
+ var x368 uint64
+ var x369 uint64
+ x368, x369 = bits.Add64(x329, x355, uint64(p384Uint1(x367)))
+ var x370 uint64
+ var x371 uint64
+ x370, x371 = bits.Add64(x331, x357, uint64(p384Uint1(x369)))
+ var x372 uint64
+ var x373 uint64
+ x372, x373 = bits.Add64(x333, x359, uint64(p384Uint1(x371)))
+ var x374 uint64
+ var x375 uint64
+ x374, x375 = bits.Add64(x335, x361, uint64(p384Uint1(x373)))
+ x376 := (uint64(p384Uint1(x375)) + uint64(p384Uint1(x336)))
+ var x377 uint64
+ var x378 uint64
+ x378, x377 = bits.Mul64(x5, arg1[5])
+ var x379 uint64
+ var x380 uint64
+ x380, x379 = bits.Mul64(x5, arg1[4])
+ var x381 uint64
+ var x382 uint64
+ x382, x381 = bits.Mul64(x5, arg1[3])
+ var x383 uint64
+ var x384 uint64
+ x384, x383 = bits.Mul64(x5, arg1[2])
+ var x385 uint64
+ var x386 uint64
+ x386, x385 = bits.Mul64(x5, arg1[1])
+ var x387 uint64
+ var x388 uint64
+ x388, x387 = bits.Mul64(x5, arg1[0])
+ var x389 uint64
+ var x390 uint64
+ x389, x390 = bits.Add64(x388, x385, uint64(0x0))
+ var x391 uint64
+ var x392 uint64
+ x391, x392 = bits.Add64(x386, x383, uint64(p384Uint1(x390)))
+ var x393 uint64
+ var x394 uint64
+ x393, x394 = bits.Add64(x384, x381, uint64(p384Uint1(x392)))
+ var x395 uint64
+ var x396 uint64
+ x395, x396 = bits.Add64(x382, x379, uint64(p384Uint1(x394)))
+ var x397 uint64
+ var x398 uint64
+ x397, x398 = bits.Add64(x380, x377, uint64(p384Uint1(x396)))
+ x399 := (uint64(p384Uint1(x398)) + x378)
+ var x400 uint64
+ var x401 uint64
+ x400, x401 = bits.Add64(x364, x387, uint64(0x0))
+ var x402 uint64
+ var x403 uint64
+ x402, x403 = bits.Add64(x366, x389, uint64(p384Uint1(x401)))
+ var x404 uint64
+ var x405 uint64
+ x404, x405 = bits.Add64(x368, x391, uint64(p384Uint1(x403)))
+ var x406 uint64
+ var x407 uint64
+ x406, x407 = bits.Add64(x370, x393, uint64(p384Uint1(x405)))
+ var x408 uint64
+ var x409 uint64
+ x408, x409 = bits.Add64(x372, x395, uint64(p384Uint1(x407)))
+ var x410 uint64
+ var x411 uint64
+ x410, x411 = bits.Add64(x374, x397, uint64(p384Uint1(x409)))
+ var x412 uint64
+ var x413 uint64
+ x412, x413 = bits.Add64(x376, x399, uint64(p384Uint1(x411)))
+ var x414 uint64
+ _, x414 = bits.Mul64(x400, 0x100000001)
+ var x416 uint64
+ var x417 uint64
+ x417, x416 = bits.Mul64(x414, 0xffffffffffffffff)
+ var x418 uint64
+ var x419 uint64
+ x419, x418 = bits.Mul64(x414, 0xffffffffffffffff)
+ var x420 uint64
+ var x421 uint64
+ x421, x420 = bits.Mul64(x414, 0xffffffffffffffff)
+ var x422 uint64
+ var x423 uint64
+ x423, x422 = bits.Mul64(x414, 0xfffffffffffffffe)
+ var x424 uint64
+ var x425 uint64
+ x425, x424 = bits.Mul64(x414, 0xffffffff00000000)
+ var x426 uint64
+ var x427 uint64
+ x427, x426 = bits.Mul64(x414, 0xffffffff)
+ var x428 uint64
+ var x429 uint64
+ x428, x429 = bits.Add64(x427, x424, uint64(0x0))
+ var x430 uint64
+ var x431 uint64
+ x430, x431 = bits.Add64(x425, x422, uint64(p384Uint1(x429)))
+ var x432 uint64
+ var x433 uint64
+ x432, x433 = bits.Add64(x423, x420, uint64(p384Uint1(x431)))
+ var x434 uint64
+ var x435 uint64
+ x434, x435 = bits.Add64(x421, x418, uint64(p384Uint1(x433)))
+ var x436 uint64
+ var x437 uint64
+ x436, x437 = bits.Add64(x419, x416, uint64(p384Uint1(x435)))
+ x438 := (uint64(p384Uint1(x437)) + x417)
+ var x440 uint64
+ _, x440 = bits.Add64(x400, x426, uint64(0x0))
+ var x441 uint64
+ var x442 uint64
+ x441, x442 = bits.Add64(x402, x428, uint64(p384Uint1(x440)))
+ var x443 uint64
+ var x444 uint64
+ x443, x444 = bits.Add64(x404, x430, uint64(p384Uint1(x442)))
+ var x445 uint64
+ var x446 uint64
+ x445, x446 = bits.Add64(x406, x432, uint64(p384Uint1(x444)))
+ var x447 uint64
+ var x448 uint64
+ x447, x448 = bits.Add64(x408, x434, uint64(p384Uint1(x446)))
+ var x449 uint64
+ var x450 uint64
+ x449, x450 = bits.Add64(x410, x436, uint64(p384Uint1(x448)))
+ var x451 uint64
+ var x452 uint64
+ x451, x452 = bits.Add64(x412, x438, uint64(p384Uint1(x450)))
+ x453 := (uint64(p384Uint1(x452)) + uint64(p384Uint1(x413)))
+ var x454 uint64
+ var x455 uint64
+ x454, x455 = bits.Sub64(x441, 0xffffffff, uint64(0x0))
+ var x456 uint64
+ var x457 uint64
+ x456, x457 = bits.Sub64(x443, 0xffffffff00000000, uint64(p384Uint1(x455)))
+ var x458 uint64
+ var x459 uint64
+ x458, x459 = bits.Sub64(x445, 0xfffffffffffffffe, uint64(p384Uint1(x457)))
+ var x460 uint64
+ var x461 uint64
+ x460, x461 = bits.Sub64(x447, 0xffffffffffffffff, uint64(p384Uint1(x459)))
+ var x462 uint64
+ var x463 uint64
+ x462, x463 = bits.Sub64(x449, 0xffffffffffffffff, uint64(p384Uint1(x461)))
+ var x464 uint64
+ var x465 uint64
+ x464, x465 = bits.Sub64(x451, 0xffffffffffffffff, uint64(p384Uint1(x463)))
+ var x467 uint64
+ _, x467 = bits.Sub64(x453, uint64(0x0), uint64(p384Uint1(x465)))
+ var x468 uint64
+ p384CmovznzU64(&x468, p384Uint1(x467), x454, x441)
+ var x469 uint64
+ p384CmovznzU64(&x469, p384Uint1(x467), x456, x443)
+ var x470 uint64
+ p384CmovznzU64(&x470, p384Uint1(x467), x458, x445)
+ var x471 uint64
+ p384CmovznzU64(&x471, p384Uint1(x467), x460, x447)
+ var x472 uint64
+ p384CmovznzU64(&x472, p384Uint1(x467), x462, x449)
+ var x473 uint64
+ p384CmovznzU64(&x473, p384Uint1(x467), x464, x451)
+ out1[0] = x468
+ out1[1] = x469
+ out1[2] = x470
+ out1[3] = x471
+ out1[4] = x472
+ out1[5] = x473
+}
+
+// p384Add adds two field elements in the Montgomery domain.
+//
+// Preconditions:
+//
+// 0 ≤ eval arg1 < m
+// 0 ≤ eval arg2 < m
+//
+// Postconditions:
+//
+// eval (from_montgomery out1) mod m = (eval (from_montgomery arg1) + eval (from_montgomery arg2)) mod m
+// 0 ≤ eval out1 < m
+func p384Add(out1 *p384MontgomeryDomainFieldElement, arg1 *p384MontgomeryDomainFieldElement, arg2 *p384MontgomeryDomainFieldElement) {
+ var x1 uint64
+ var x2 uint64
+ x1, x2 = bits.Add64(arg1[0], arg2[0], uint64(0x0))
+ var x3 uint64
+ var x4 uint64
+ x3, x4 = bits.Add64(arg1[1], arg2[1], uint64(p384Uint1(x2)))
+ var x5 uint64
+ var x6 uint64
+ x5, x6 = bits.Add64(arg1[2], arg2[2], uint64(p384Uint1(x4)))
+ var x7 uint64
+ var x8 uint64
+ x7, x8 = bits.Add64(arg1[3], arg2[3], uint64(p384Uint1(x6)))
+ var x9 uint64
+ var x10 uint64
+ x9, x10 = bits.Add64(arg1[4], arg2[4], uint64(p384Uint1(x8)))
+ var x11 uint64
+ var x12 uint64
+ x11, x12 = bits.Add64(arg1[5], arg2[5], uint64(p384Uint1(x10)))
+ var x13 uint64
+ var x14 uint64
+ x13, x14 = bits.Sub64(x1, 0xffffffff, uint64(0x0))
+ var x15 uint64
+ var x16 uint64
+ x15, x16 = bits.Sub64(x3, 0xffffffff00000000, uint64(p384Uint1(x14)))
+ var x17 uint64
+ var x18 uint64
+ x17, x18 = bits.Sub64(x5, 0xfffffffffffffffe, uint64(p384Uint1(x16)))
+ var x19 uint64
+ var x20 uint64
+ x19, x20 = bits.Sub64(x7, 0xffffffffffffffff, uint64(p384Uint1(x18)))
+ var x21 uint64
+ var x22 uint64
+ x21, x22 = bits.Sub64(x9, 0xffffffffffffffff, uint64(p384Uint1(x20)))
+ var x23 uint64
+ var x24 uint64
+ x23, x24 = bits.Sub64(x11, 0xffffffffffffffff, uint64(p384Uint1(x22)))
+ var x26 uint64
+ _, x26 = bits.Sub64(uint64(p384Uint1(x12)), uint64(0x0), uint64(p384Uint1(x24)))
+ var x27 uint64
+ p384CmovznzU64(&x27, p384Uint1(x26), x13, x1)
+ var x28 uint64
+ p384CmovznzU64(&x28, p384Uint1(x26), x15, x3)
+ var x29 uint64
+ p384CmovznzU64(&x29, p384Uint1(x26), x17, x5)
+ var x30 uint64
+ p384CmovznzU64(&x30, p384Uint1(x26), x19, x7)
+ var x31 uint64
+ p384CmovznzU64(&x31, p384Uint1(x26), x21, x9)
+ var x32 uint64
+ p384CmovznzU64(&x32, p384Uint1(x26), x23, x11)
+ out1[0] = x27
+ out1[1] = x28
+ out1[2] = x29
+ out1[3] = x30
+ out1[4] = x31
+ out1[5] = x32
+}
+
+// p384Sub subtracts two field elements in the Montgomery domain.
+//
+// Preconditions:
+//
+// 0 ≤ eval arg1 < m
+// 0 ≤ eval arg2 < m
+//
+// Postconditions:
+//
+// eval (from_montgomery out1) mod m = (eval (from_montgomery arg1) - eval (from_montgomery arg2)) mod m
+// 0 ≤ eval out1 < m
+func p384Sub(out1 *p384MontgomeryDomainFieldElement, arg1 *p384MontgomeryDomainFieldElement, arg2 *p384MontgomeryDomainFieldElement) {
+ var x1 uint64
+ var x2 uint64
+ x1, x2 = bits.Sub64(arg1[0], arg2[0], uint64(0x0))
+ var x3 uint64
+ var x4 uint64
+ x3, x4 = bits.Sub64(arg1[1], arg2[1], uint64(p384Uint1(x2)))
+ var x5 uint64
+ var x6 uint64
+ x5, x6 = bits.Sub64(arg1[2], arg2[2], uint64(p384Uint1(x4)))
+ var x7 uint64
+ var x8 uint64
+ x7, x8 = bits.Sub64(arg1[3], arg2[3], uint64(p384Uint1(x6)))
+ var x9 uint64
+ var x10 uint64
+ x9, x10 = bits.Sub64(arg1[4], arg2[4], uint64(p384Uint1(x8)))
+ var x11 uint64
+ var x12 uint64
+ x11, x12 = bits.Sub64(arg1[5], arg2[5], uint64(p384Uint1(x10)))
+ var x13 uint64
+ p384CmovznzU64(&x13, p384Uint1(x12), uint64(0x0), 0xffffffffffffffff)
+ var x14 uint64
+ var x15 uint64
+ x14, x15 = bits.Add64(x1, (x13 & 0xffffffff), uint64(0x0))
+ var x16 uint64
+ var x17 uint64
+ x16, x17 = bits.Add64(x3, (x13 & 0xffffffff00000000), uint64(p384Uint1(x15)))
+ var x18 uint64
+ var x19 uint64
+ x18, x19 = bits.Add64(x5, (x13 & 0xfffffffffffffffe), uint64(p384Uint1(x17)))
+ var x20 uint64
+ var x21 uint64
+ x20, x21 = bits.Add64(x7, x13, uint64(p384Uint1(x19)))
+ var x22 uint64
+ var x23 uint64
+ x22, x23 = bits.Add64(x9, x13, uint64(p384Uint1(x21)))
+ var x24 uint64
+ x24, _ = bits.Add64(x11, x13, uint64(p384Uint1(x23)))
+ out1[0] = x14
+ out1[1] = x16
+ out1[2] = x18
+ out1[3] = x20
+ out1[4] = x22
+ out1[5] = x24
+}
+
+// p384SetOne returns the field element one in the Montgomery domain.
+//
+// Postconditions:
+//
+// eval (from_montgomery out1) mod m = 1 mod m
+// 0 ≤ eval out1 < m
+func p384SetOne(out1 *p384MontgomeryDomainFieldElement) {
+ out1[0] = 0xffffffff00000001
+ out1[1] = 0xffffffff
+ out1[2] = uint64(0x1)
+ out1[3] = uint64(0x0)
+ out1[4] = uint64(0x0)
+ out1[5] = uint64(0x0)
+}
+
+// p384FromMontgomery translates a field element out of the Montgomery domain.
+//
+// Preconditions:
+//
+// 0 ≤ eval arg1 < m
+//
+// Postconditions:
+//
+// eval out1 mod m = (eval arg1 * ((2^64)⁻¹ mod m)^6) mod m
+// 0 ≤ eval out1 < m
+func p384FromMontgomery(out1 *p384NonMontgomeryDomainFieldElement, arg1 *p384MontgomeryDomainFieldElement) {
+ x1 := arg1[0]
+ var x2 uint64
+ _, x2 = bits.Mul64(x1, 0x100000001)
+ var x4 uint64
+ var x5 uint64
+ x5, x4 = bits.Mul64(x2, 0xffffffffffffffff)
+ var x6 uint64
+ var x7 uint64
+ x7, x6 = bits.Mul64(x2, 0xffffffffffffffff)
+ var x8 uint64
+ var x9 uint64
+ x9, x8 = bits.Mul64(x2, 0xffffffffffffffff)
+ var x10 uint64
+ var x11 uint64
+ x11, x10 = bits.Mul64(x2, 0xfffffffffffffffe)
+ var x12 uint64
+ var x13 uint64
+ x13, x12 = bits.Mul64(x2, 0xffffffff00000000)
+ var x14 uint64
+ var x15 uint64
+ x15, x14 = bits.Mul64(x2, 0xffffffff)
+ var x16 uint64
+ var x17 uint64
+ x16, x17 = bits.Add64(x15, x12, uint64(0x0))
+ var x18 uint64
+ var x19 uint64
+ x18, x19 = bits.Add64(x13, x10, uint64(p384Uint1(x17)))
+ var x20 uint64
+ var x21 uint64
+ x20, x21 = bits.Add64(x11, x8, uint64(p384Uint1(x19)))
+ var x22 uint64
+ var x23 uint64
+ x22, x23 = bits.Add64(x9, x6, uint64(p384Uint1(x21)))
+ var x24 uint64
+ var x25 uint64
+ x24, x25 = bits.Add64(x7, x4, uint64(p384Uint1(x23)))
+ var x27 uint64
+ _, x27 = bits.Add64(x1, x14, uint64(0x0))
+ var x28 uint64
+ var x29 uint64
+ x28, x29 = bits.Add64(uint64(0x0), x16, uint64(p384Uint1(x27)))
+ var x30 uint64
+ var x31 uint64
+ x30, x31 = bits.Add64(uint64(0x0), x18, uint64(p384Uint1(x29)))
+ var x32 uint64
+ var x33 uint64
+ x32, x33 = bits.Add64(uint64(0x0), x20, uint64(p384Uint1(x31)))
+ var x34 uint64
+ var x35 uint64
+ x34, x35 = bits.Add64(uint64(0x0), x22, uint64(p384Uint1(x33)))
+ var x36 uint64
+ var x37 uint64
+ x36, x37 = bits.Add64(uint64(0x0), x24, uint64(p384Uint1(x35)))
+ var x38 uint64
+ var x39 uint64
+ x38, x39 = bits.Add64(uint64(0x0), (uint64(p384Uint1(x25)) + x5), uint64(p384Uint1(x37)))
+ var x40 uint64
+ var x41 uint64
+ x40, x41 = bits.Add64(x28, arg1[1], uint64(0x0))
+ var x42 uint64
+ var x43 uint64
+ x42, x43 = bits.Add64(x30, uint64(0x0), uint64(p384Uint1(x41)))
+ var x44 uint64
+ var x45 uint64
+ x44, x45 = bits.Add64(x32, uint64(0x0), uint64(p384Uint1(x43)))
+ var x46 uint64
+ var x47 uint64
+ x46, x47 = bits.Add64(x34, uint64(0x0), uint64(p384Uint1(x45)))
+ var x48 uint64
+ var x49 uint64
+ x48, x49 = bits.Add64(x36, uint64(0x0), uint64(p384Uint1(x47)))
+ var x50 uint64
+ var x51 uint64
+ x50, x51 = bits.Add64(x38, uint64(0x0), uint64(p384Uint1(x49)))
+ var x52 uint64
+ _, x52 = bits.Mul64(x40, 0x100000001)
+ var x54 uint64
+ var x55 uint64
+ x55, x54 = bits.Mul64(x52, 0xffffffffffffffff)
+ var x56 uint64
+ var x57 uint64
+ x57, x56 = bits.Mul64(x52, 0xffffffffffffffff)
+ var x58 uint64
+ var x59 uint64
+ x59, x58 = bits.Mul64(x52, 0xffffffffffffffff)
+ var x60 uint64
+ var x61 uint64
+ x61, x60 = bits.Mul64(x52, 0xfffffffffffffffe)
+ var x62 uint64
+ var x63 uint64
+ x63, x62 = bits.Mul64(x52, 0xffffffff00000000)
+ var x64 uint64
+ var x65 uint64
+ x65, x64 = bits.Mul64(x52, 0xffffffff)
+ var x66 uint64
+ var x67 uint64
+ x66, x67 = bits.Add64(x65, x62, uint64(0x0))
+ var x68 uint64
+ var x69 uint64
+ x68, x69 = bits.Add64(x63, x60, uint64(p384Uint1(x67)))
+ var x70 uint64
+ var x71 uint64
+ x70, x71 = bits.Add64(x61, x58, uint64(p384Uint1(x69)))
+ var x72 uint64
+ var x73 uint64
+ x72, x73 = bits.Add64(x59, x56, uint64(p384Uint1(x71)))
+ var x74 uint64
+ var x75 uint64
+ x74, x75 = bits.Add64(x57, x54, uint64(p384Uint1(x73)))
+ var x77 uint64
+ _, x77 = bits.Add64(x40, x64, uint64(0x0))
+ var x78 uint64
+ var x79 uint64
+ x78, x79 = bits.Add64(x42, x66, uint64(p384Uint1(x77)))
+ var x80 uint64
+ var x81 uint64
+ x80, x81 = bits.Add64(x44, x68, uint64(p384Uint1(x79)))
+ var x82 uint64
+ var x83 uint64
+ x82, x83 = bits.Add64(x46, x70, uint64(p384Uint1(x81)))
+ var x84 uint64
+ var x85 uint64
+ x84, x85 = bits.Add64(x48, x72, uint64(p384Uint1(x83)))
+ var x86 uint64
+ var x87 uint64
+ x86, x87 = bits.Add64(x50, x74, uint64(p384Uint1(x85)))
+ var x88 uint64
+ var x89 uint64
+ x88, x89 = bits.Add64((uint64(p384Uint1(x51)) + uint64(p384Uint1(x39))), (uint64(p384Uint1(x75)) + x55), uint64(p384Uint1(x87)))
+ var x90 uint64
+ var x91 uint64
+ x90, x91 = bits.Add64(x78, arg1[2], uint64(0x0))
+ var x92 uint64
+ var x93 uint64
+ x92, x93 = bits.Add64(x80, uint64(0x0), uint64(p384Uint1(x91)))
+ var x94 uint64
+ var x95 uint64
+ x94, x95 = bits.Add64(x82, uint64(0x0), uint64(p384Uint1(x93)))
+ var x96 uint64
+ var x97 uint64
+ x96, x97 = bits.Add64(x84, uint64(0x0), uint64(p384Uint1(x95)))
+ var x98 uint64
+ var x99 uint64
+ x98, x99 = bits.Add64(x86, uint64(0x0), uint64(p384Uint1(x97)))
+ var x100 uint64
+ var x101 uint64
+ x100, x101 = bits.Add64(x88, uint64(0x0), uint64(p384Uint1(x99)))
+ var x102 uint64
+ _, x102 = bits.Mul64(x90, 0x100000001)
+ var x104 uint64
+ var x105 uint64
+ x105, x104 = bits.Mul64(x102, 0xffffffffffffffff)
+ var x106 uint64
+ var x107 uint64
+ x107, x106 = bits.Mul64(x102, 0xffffffffffffffff)
+ var x108 uint64
+ var x109 uint64
+ x109, x108 = bits.Mul64(x102, 0xffffffffffffffff)
+ var x110 uint64
+ var x111 uint64
+ x111, x110 = bits.Mul64(x102, 0xfffffffffffffffe)
+ var x112 uint64
+ var x113 uint64
+ x113, x112 = bits.Mul64(x102, 0xffffffff00000000)
+ var x114 uint64
+ var x115 uint64
+ x115, x114 = bits.Mul64(x102, 0xffffffff)
+ var x116 uint64
+ var x117 uint64
+ x116, x117 = bits.Add64(x115, x112, uint64(0x0))
+ var x118 uint64
+ var x119 uint64
+ x118, x119 = bits.Add64(x113, x110, uint64(p384Uint1(x117)))
+ var x120 uint64
+ var x121 uint64
+ x120, x121 = bits.Add64(x111, x108, uint64(p384Uint1(x119)))
+ var x122 uint64
+ var x123 uint64
+ x122, x123 = bits.Add64(x109, x106, uint64(p384Uint1(x121)))
+ var x124 uint64
+ var x125 uint64
+ x124, x125 = bits.Add64(x107, x104, uint64(p384Uint1(x123)))
+ var x127 uint64
+ _, x127 = bits.Add64(x90, x114, uint64(0x0))
+ var x128 uint64
+ var x129 uint64
+ x128, x129 = bits.Add64(x92, x116, uint64(p384Uint1(x127)))
+ var x130 uint64
+ var x131 uint64
+ x130, x131 = bits.Add64(x94, x118, uint64(p384Uint1(x129)))
+ var x132 uint64
+ var x133 uint64
+ x132, x133 = bits.Add64(x96, x120, uint64(p384Uint1(x131)))
+ var x134 uint64
+ var x135 uint64
+ x134, x135 = bits.Add64(x98, x122, uint64(p384Uint1(x133)))
+ var x136 uint64
+ var x137 uint64
+ x136, x137 = bits.Add64(x100, x124, uint64(p384Uint1(x135)))
+ var x138 uint64
+ var x139 uint64
+ x138, x139 = bits.Add64((uint64(p384Uint1(x101)) + uint64(p384Uint1(x89))), (uint64(p384Uint1(x125)) + x105), uint64(p384Uint1(x137)))
+ var x140 uint64
+ var x141 uint64
+ x140, x141 = bits.Add64(x128, arg1[3], uint64(0x0))
+ var x142 uint64
+ var x143 uint64
+ x142, x143 = bits.Add64(x130, uint64(0x0), uint64(p384Uint1(x141)))
+ var x144 uint64
+ var x145 uint64
+ x144, x145 = bits.Add64(x132, uint64(0x0), uint64(p384Uint1(x143)))
+ var x146 uint64
+ var x147 uint64
+ x146, x147 = bits.Add64(x134, uint64(0x0), uint64(p384Uint1(x145)))
+ var x148 uint64
+ var x149 uint64
+ x148, x149 = bits.Add64(x136, uint64(0x0), uint64(p384Uint1(x147)))
+ var x150 uint64
+ var x151 uint64
+ x150, x151 = bits.Add64(x138, uint64(0x0), uint64(p384Uint1(x149)))
+ var x152 uint64
+ _, x152 = bits.Mul64(x140, 0x100000001)
+ var x154 uint64
+ var x155 uint64
+ x155, x154 = bits.Mul64(x152, 0xffffffffffffffff)
+ var x156 uint64
+ var x157 uint64
+ x157, x156 = bits.Mul64(x152, 0xffffffffffffffff)
+ var x158 uint64
+ var x159 uint64
+ x159, x158 = bits.Mul64(x152, 0xffffffffffffffff)
+ var x160 uint64
+ var x161 uint64
+ x161, x160 = bits.Mul64(x152, 0xfffffffffffffffe)
+ var x162 uint64
+ var x163 uint64
+ x163, x162 = bits.Mul64(x152, 0xffffffff00000000)
+ var x164 uint64
+ var x165 uint64
+ x165, x164 = bits.Mul64(x152, 0xffffffff)
+ var x166 uint64
+ var x167 uint64
+ x166, x167 = bits.Add64(x165, x162, uint64(0x0))
+ var x168 uint64
+ var x169 uint64
+ x168, x169 = bits.Add64(x163, x160, uint64(p384Uint1(x167)))
+ var x170 uint64
+ var x171 uint64
+ x170, x171 = bits.Add64(x161, x158, uint64(p384Uint1(x169)))
+ var x172 uint64
+ var x173 uint64
+ x172, x173 = bits.Add64(x159, x156, uint64(p384Uint1(x171)))
+ var x174 uint64
+ var x175 uint64
+ x174, x175 = bits.Add64(x157, x154, uint64(p384Uint1(x173)))
+ var x177 uint64
+ _, x177 = bits.Add64(x140, x164, uint64(0x0))
+ var x178 uint64
+ var x179 uint64
+ x178, x179 = bits.Add64(x142, x166, uint64(p384Uint1(x177)))
+ var x180 uint64
+ var x181 uint64
+ x180, x181 = bits.Add64(x144, x168, uint64(p384Uint1(x179)))
+ var x182 uint64
+ var x183 uint64
+ x182, x183 = bits.Add64(x146, x170, uint64(p384Uint1(x181)))
+ var x184 uint64
+ var x185 uint64
+ x184, x185 = bits.Add64(x148, x172, uint64(p384Uint1(x183)))
+ var x186 uint64
+ var x187 uint64
+ x186, x187 = bits.Add64(x150, x174, uint64(p384Uint1(x185)))
+ var x188 uint64
+ var x189 uint64
+ x188, x189 = bits.Add64((uint64(p384Uint1(x151)) + uint64(p384Uint1(x139))), (uint64(p384Uint1(x175)) + x155), uint64(p384Uint1(x187)))
+ var x190 uint64
+ var x191 uint64
+ x190, x191 = bits.Add64(x178, arg1[4], uint64(0x0))
+ var x192 uint64
+ var x193 uint64
+ x192, x193 = bits.Add64(x180, uint64(0x0), uint64(p384Uint1(x191)))
+ var x194 uint64
+ var x195 uint64
+ x194, x195 = bits.Add64(x182, uint64(0x0), uint64(p384Uint1(x193)))
+ var x196 uint64
+ var x197 uint64
+ x196, x197 = bits.Add64(x184, uint64(0x0), uint64(p384Uint1(x195)))
+ var x198 uint64
+ var x199 uint64
+ x198, x199 = bits.Add64(x186, uint64(0x0), uint64(p384Uint1(x197)))
+ var x200 uint64
+ var x201 uint64
+ x200, x201 = bits.Add64(x188, uint64(0x0), uint64(p384Uint1(x199)))
+ var x202 uint64
+ _, x202 = bits.Mul64(x190, 0x100000001)
+ var x204 uint64
+ var x205 uint64
+ x205, x204 = bits.Mul64(x202, 0xffffffffffffffff)
+ var x206 uint64
+ var x207 uint64
+ x207, x206 = bits.Mul64(x202, 0xffffffffffffffff)
+ var x208 uint64
+ var x209 uint64
+ x209, x208 = bits.Mul64(x202, 0xffffffffffffffff)
+ var x210 uint64
+ var x211 uint64
+ x211, x210 = bits.Mul64(x202, 0xfffffffffffffffe)
+ var x212 uint64
+ var x213 uint64
+ x213, x212 = bits.Mul64(x202, 0xffffffff00000000)
+ var x214 uint64
+ var x215 uint64
+ x215, x214 = bits.Mul64(x202, 0xffffffff)
+ var x216 uint64
+ var x217 uint64
+ x216, x217 = bits.Add64(x215, x212, uint64(0x0))
+ var x218 uint64
+ var x219 uint64
+ x218, x219 = bits.Add64(x213, x210, uint64(p384Uint1(x217)))
+ var x220 uint64
+ var x221 uint64
+ x220, x221 = bits.Add64(x211, x208, uint64(p384Uint1(x219)))
+ var x222 uint64
+ var x223 uint64
+ x222, x223 = bits.Add64(x209, x206, uint64(p384Uint1(x221)))
+ var x224 uint64
+ var x225 uint64
+ x224, x225 = bits.Add64(x207, x204, uint64(p384Uint1(x223)))
+ var x227 uint64
+ _, x227 = bits.Add64(x190, x214, uint64(0x0))
+ var x228 uint64
+ var x229 uint64
+ x228, x229 = bits.Add64(x192, x216, uint64(p384Uint1(x227)))
+ var x230 uint64
+ var x231 uint64
+ x230, x231 = bits.Add64(x194, x218, uint64(p384Uint1(x229)))
+ var x232 uint64
+ var x233 uint64
+ x232, x233 = bits.Add64(x196, x220, uint64(p384Uint1(x231)))
+ var x234 uint64
+ var x235 uint64
+ x234, x235 = bits.Add64(x198, x222, uint64(p384Uint1(x233)))
+ var x236 uint64
+ var x237 uint64
+ x236, x237 = bits.Add64(x200, x224, uint64(p384Uint1(x235)))
+ var x238 uint64
+ var x239 uint64
+ x238, x239 = bits.Add64((uint64(p384Uint1(x201)) + uint64(p384Uint1(x189))), (uint64(p384Uint1(x225)) + x205), uint64(p384Uint1(x237)))
+ var x240 uint64
+ var x241 uint64
+ x240, x241 = bits.Add64(x228, arg1[5], uint64(0x0))
+ var x242 uint64
+ var x243 uint64
+ x242, x243 = bits.Add64(x230, uint64(0x0), uint64(p384Uint1(x241)))
+ var x244 uint64
+ var x245 uint64
+ x244, x245 = bits.Add64(x232, uint64(0x0), uint64(p384Uint1(x243)))
+ var x246 uint64
+ var x247 uint64
+ x246, x247 = bits.Add64(x234, uint64(0x0), uint64(p384Uint1(x245)))
+ var x248 uint64
+ var x249 uint64
+ x248, x249 = bits.Add64(x236, uint64(0x0), uint64(p384Uint1(x247)))
+ var x250 uint64
+ var x251 uint64
+ x250, x251 = bits.Add64(x238, uint64(0x0), uint64(p384Uint1(x249)))
+ var x252 uint64
+ _, x252 = bits.Mul64(x240, 0x100000001)
+ var x254 uint64
+ var x255 uint64
+ x255, x254 = bits.Mul64(x252, 0xffffffffffffffff)
+ var x256 uint64
+ var x257 uint64
+ x257, x256 = bits.Mul64(x252, 0xffffffffffffffff)
+ var x258 uint64
+ var x259 uint64
+ x259, x258 = bits.Mul64(x252, 0xffffffffffffffff)
+ var x260 uint64
+ var x261 uint64
+ x261, x260 = bits.Mul64(x252, 0xfffffffffffffffe)
+ var x262 uint64
+ var x263 uint64
+ x263, x262 = bits.Mul64(x252, 0xffffffff00000000)
+ var x264 uint64
+ var x265 uint64
+ x265, x264 = bits.Mul64(x252, 0xffffffff)
+ var x266 uint64
+ var x267 uint64
+ x266, x267 = bits.Add64(x265, x262, uint64(0x0))
+ var x268 uint64
+ var x269 uint64
+ x268, x269 = bits.Add64(x263, x260, uint64(p384Uint1(x267)))
+ var x270 uint64
+ var x271 uint64
+ x270, x271 = bits.Add64(x261, x258, uint64(p384Uint1(x269)))
+ var x272 uint64
+ var x273 uint64
+ x272, x273 = bits.Add64(x259, x256, uint64(p384Uint1(x271)))
+ var x274 uint64
+ var x275 uint64
+ x274, x275 = bits.Add64(x257, x254, uint64(p384Uint1(x273)))
+ var x277 uint64
+ _, x277 = bits.Add64(x240, x264, uint64(0x0))
+ var x278 uint64
+ var x279 uint64
+ x278, x279 = bits.Add64(x242, x266, uint64(p384Uint1(x277)))
+ var x280 uint64
+ var x281 uint64
+ x280, x281 = bits.Add64(x244, x268, uint64(p384Uint1(x279)))
+ var x282 uint64
+ var x283 uint64
+ x282, x283 = bits.Add64(x246, x270, uint64(p384Uint1(x281)))
+ var x284 uint64
+ var x285 uint64
+ x284, x285 = bits.Add64(x248, x272, uint64(p384Uint1(x283)))
+ var x286 uint64
+ var x287 uint64
+ x286, x287 = bits.Add64(x250, x274, uint64(p384Uint1(x285)))
+ var x288 uint64
+ var x289 uint64
+ x288, x289 = bits.Add64((uint64(p384Uint1(x251)) + uint64(p384Uint1(x239))), (uint64(p384Uint1(x275)) + x255), uint64(p384Uint1(x287)))
+ var x290 uint64
+ var x291 uint64
+ x290, x291 = bits.Sub64(x278, 0xffffffff, uint64(0x0))
+ var x292 uint64
+ var x293 uint64
+ x292, x293 = bits.Sub64(x280, 0xffffffff00000000, uint64(p384Uint1(x291)))
+ var x294 uint64
+ var x295 uint64
+ x294, x295 = bits.Sub64(x282, 0xfffffffffffffffe, uint64(p384Uint1(x293)))
+ var x296 uint64
+ var x297 uint64
+ x296, x297 = bits.Sub64(x284, 0xffffffffffffffff, uint64(p384Uint1(x295)))
+ var x298 uint64
+ var x299 uint64
+ x298, x299 = bits.Sub64(x286, 0xffffffffffffffff, uint64(p384Uint1(x297)))
+ var x300 uint64
+ var x301 uint64
+ x300, x301 = bits.Sub64(x288, 0xffffffffffffffff, uint64(p384Uint1(x299)))
+ var x303 uint64
+ _, x303 = bits.Sub64(uint64(p384Uint1(x289)), uint64(0x0), uint64(p384Uint1(x301)))
+ var x304 uint64
+ p384CmovznzU64(&x304, p384Uint1(x303), x290, x278)
+ var x305 uint64
+ p384CmovznzU64(&x305, p384Uint1(x303), x292, x280)
+ var x306 uint64
+ p384CmovznzU64(&x306, p384Uint1(x303), x294, x282)
+ var x307 uint64
+ p384CmovznzU64(&x307, p384Uint1(x303), x296, x284)
+ var x308 uint64
+ p384CmovznzU64(&x308, p384Uint1(x303), x298, x286)
+ var x309 uint64
+ p384CmovznzU64(&x309, p384Uint1(x303), x300, x288)
+ out1[0] = x304
+ out1[1] = x305
+ out1[2] = x306
+ out1[3] = x307
+ out1[4] = x308
+ out1[5] = x309
+}
+
+// p384ToMontgomery translates a field element into the Montgomery domain.
+//
+// Preconditions:
+//
+// 0 ≤ eval arg1 < m
+//
+// Postconditions:
+//
+// eval (from_montgomery out1) mod m = eval arg1 mod m
+// 0 ≤ eval out1 < m
+func p384ToMontgomery(out1 *p384MontgomeryDomainFieldElement, arg1 *p384NonMontgomeryDomainFieldElement) {
+ x1 := arg1[1]
+ x2 := arg1[2]
+ x3 := arg1[3]
+ x4 := arg1[4]
+ x5 := arg1[5]
+ x6 := arg1[0]
+ var x7 uint64
+ var x8 uint64
+ x8, x7 = bits.Mul64(x6, 0x200000000)
+ var x9 uint64
+ var x10 uint64
+ x10, x9 = bits.Mul64(x6, 0xfffffffe00000000)
+ var x11 uint64
+ var x12 uint64
+ x12, x11 = bits.Mul64(x6, 0x200000000)
+ var x13 uint64
+ var x14 uint64
+ x14, x13 = bits.Mul64(x6, 0xfffffffe00000001)
+ var x15 uint64
+ var x16 uint64
+ x15, x16 = bits.Add64(x14, x11, uint64(0x0))
+ var x17 uint64
+ var x18 uint64
+ x17, x18 = bits.Add64(x12, x9, uint64(p384Uint1(x16)))
+ var x19 uint64
+ var x20 uint64
+ x19, x20 = bits.Add64(x10, x7, uint64(p384Uint1(x18)))
+ var x21 uint64
+ var x22 uint64
+ x21, x22 = bits.Add64(x8, x6, uint64(p384Uint1(x20)))
+ var x23 uint64
+ _, x23 = bits.Mul64(x13, 0x100000001)
+ var x25 uint64
+ var x26 uint64
+ x26, x25 = bits.Mul64(x23, 0xffffffffffffffff)
+ var x27 uint64
+ var x28 uint64
+ x28, x27 = bits.Mul64(x23, 0xffffffffffffffff)
+ var x29 uint64
+ var x30 uint64
+ x30, x29 = bits.Mul64(x23, 0xffffffffffffffff)
+ var x31 uint64
+ var x32 uint64
+ x32, x31 = bits.Mul64(x23, 0xfffffffffffffffe)
+ var x33 uint64
+ var x34 uint64
+ x34, x33 = bits.Mul64(x23, 0xffffffff00000000)
+ var x35 uint64
+ var x36 uint64
+ x36, x35 = bits.Mul64(x23, 0xffffffff)
+ var x37 uint64
+ var x38 uint64
+ x37, x38 = bits.Add64(x36, x33, uint64(0x0))
+ var x39 uint64
+ var x40 uint64
+ x39, x40 = bits.Add64(x34, x31, uint64(p384Uint1(x38)))
+ var x41 uint64
+ var x42 uint64
+ x41, x42 = bits.Add64(x32, x29, uint64(p384Uint1(x40)))
+ var x43 uint64
+ var x44 uint64
+ x43, x44 = bits.Add64(x30, x27, uint64(p384Uint1(x42)))
+ var x45 uint64
+ var x46 uint64
+ x45, x46 = bits.Add64(x28, x25, uint64(p384Uint1(x44)))
+ var x48 uint64
+ _, x48 = bits.Add64(x13, x35, uint64(0x0))
+ var x49 uint64
+ var x50 uint64
+ x49, x50 = bits.Add64(x15, x37, uint64(p384Uint1(x48)))
+ var x51 uint64
+ var x52 uint64
+ x51, x52 = bits.Add64(x17, x39, uint64(p384Uint1(x50)))
+ var x53 uint64
+ var x54 uint64
+ x53, x54 = bits.Add64(x19, x41, uint64(p384Uint1(x52)))
+ var x55 uint64
+ var x56 uint64
+ x55, x56 = bits.Add64(x21, x43, uint64(p384Uint1(x54)))
+ var x57 uint64
+ var x58 uint64
+ x57, x58 = bits.Add64(uint64(p384Uint1(x22)), x45, uint64(p384Uint1(x56)))
+ var x59 uint64
+ var x60 uint64
+ x59, x60 = bits.Add64(uint64(0x0), (uint64(p384Uint1(x46)) + x26), uint64(p384Uint1(x58)))
+ var x61 uint64
+ var x62 uint64
+ x62, x61 = bits.Mul64(x1, 0x200000000)
+ var x63 uint64
+ var x64 uint64
+ x64, x63 = bits.Mul64(x1, 0xfffffffe00000000)
+ var x65 uint64
+ var x66 uint64
+ x66, x65 = bits.Mul64(x1, 0x200000000)
+ var x67 uint64
+ var x68 uint64
+ x68, x67 = bits.Mul64(x1, 0xfffffffe00000001)
+ var x69 uint64
+ var x70 uint64
+ x69, x70 = bits.Add64(x68, x65, uint64(0x0))
+ var x71 uint64
+ var x72 uint64
+ x71, x72 = bits.Add64(x66, x63, uint64(p384Uint1(x70)))
+ var x73 uint64
+ var x74 uint64
+ x73, x74 = bits.Add64(x64, x61, uint64(p384Uint1(x72)))
+ var x75 uint64
+ var x76 uint64
+ x75, x76 = bits.Add64(x62, x1, uint64(p384Uint1(x74)))
+ var x77 uint64
+ var x78 uint64
+ x77, x78 = bits.Add64(x49, x67, uint64(0x0))
+ var x79 uint64
+ var x80 uint64
+ x79, x80 = bits.Add64(x51, x69, uint64(p384Uint1(x78)))
+ var x81 uint64
+ var x82 uint64
+ x81, x82 = bits.Add64(x53, x71, uint64(p384Uint1(x80)))
+ var x83 uint64
+ var x84 uint64
+ x83, x84 = bits.Add64(x55, x73, uint64(p384Uint1(x82)))
+ var x85 uint64
+ var x86 uint64
+ x85, x86 = bits.Add64(x57, x75, uint64(p384Uint1(x84)))
+ var x87 uint64
+ var x88 uint64
+ x87, x88 = bits.Add64(x59, uint64(p384Uint1(x76)), uint64(p384Uint1(x86)))
+ var x89 uint64
+ _, x89 = bits.Mul64(x77, 0x100000001)
+ var x91 uint64
+ var x92 uint64
+ x92, x91 = bits.Mul64(x89, 0xffffffffffffffff)
+ var x93 uint64
+ var x94 uint64
+ x94, x93 = bits.Mul64(x89, 0xffffffffffffffff)
+ var x95 uint64
+ var x96 uint64
+ x96, x95 = bits.Mul64(x89, 0xffffffffffffffff)
+ var x97 uint64
+ var x98 uint64
+ x98, x97 = bits.Mul64(x89, 0xfffffffffffffffe)
+ var x99 uint64
+ var x100 uint64
+ x100, x99 = bits.Mul64(x89, 0xffffffff00000000)
+ var x101 uint64
+ var x102 uint64
+ x102, x101 = bits.Mul64(x89, 0xffffffff)
+ var x103 uint64
+ var x104 uint64
+ x103, x104 = bits.Add64(x102, x99, uint64(0x0))
+ var x105 uint64
+ var x106 uint64
+ x105, x106 = bits.Add64(x100, x97, uint64(p384Uint1(x104)))
+ var x107 uint64
+ var x108 uint64
+ x107, x108 = bits.Add64(x98, x95, uint64(p384Uint1(x106)))
+ var x109 uint64
+ var x110 uint64
+ x109, x110 = bits.Add64(x96, x93, uint64(p384Uint1(x108)))
+ var x111 uint64
+ var x112 uint64
+ x111, x112 = bits.Add64(x94, x91, uint64(p384Uint1(x110)))
+ var x114 uint64
+ _, x114 = bits.Add64(x77, x101, uint64(0x0))
+ var x115 uint64
+ var x116 uint64
+ x115, x116 = bits.Add64(x79, x103, uint64(p384Uint1(x114)))
+ var x117 uint64
+ var x118 uint64
+ x117, x118 = bits.Add64(x81, x105, uint64(p384Uint1(x116)))
+ var x119 uint64
+ var x120 uint64
+ x119, x120 = bits.Add64(x83, x107, uint64(p384Uint1(x118)))
+ var x121 uint64
+ var x122 uint64
+ x121, x122 = bits.Add64(x85, x109, uint64(p384Uint1(x120)))
+ var x123 uint64
+ var x124 uint64
+ x123, x124 = bits.Add64(x87, x111, uint64(p384Uint1(x122)))
+ var x125 uint64
+ var x126 uint64
+ x125, x126 = bits.Add64((uint64(p384Uint1(x88)) + uint64(p384Uint1(x60))), (uint64(p384Uint1(x112)) + x92), uint64(p384Uint1(x124)))
+ var x127 uint64
+ var x128 uint64
+ x128, x127 = bits.Mul64(x2, 0x200000000)
+ var x129 uint64
+ var x130 uint64
+ x130, x129 = bits.Mul64(x2, 0xfffffffe00000000)
+ var x131 uint64
+ var x132 uint64
+ x132, x131 = bits.Mul64(x2, 0x200000000)
+ var x133 uint64
+ var x134 uint64
+ x134, x133 = bits.Mul64(x2, 0xfffffffe00000001)
+ var x135 uint64
+ var x136 uint64
+ x135, x136 = bits.Add64(x134, x131, uint64(0x0))
+ var x137 uint64
+ var x138 uint64
+ x137, x138 = bits.Add64(x132, x129, uint64(p384Uint1(x136)))
+ var x139 uint64
+ var x140 uint64
+ x139, x140 = bits.Add64(x130, x127, uint64(p384Uint1(x138)))
+ var x141 uint64
+ var x142 uint64
+ x141, x142 = bits.Add64(x128, x2, uint64(p384Uint1(x140)))
+ var x143 uint64
+ var x144 uint64
+ x143, x144 = bits.Add64(x115, x133, uint64(0x0))
+ var x145 uint64
+ var x146 uint64
+ x145, x146 = bits.Add64(x117, x135, uint64(p384Uint1(x144)))
+ var x147 uint64
+ var x148 uint64
+ x147, x148 = bits.Add64(x119, x137, uint64(p384Uint1(x146)))
+ var x149 uint64
+ var x150 uint64
+ x149, x150 = bits.Add64(x121, x139, uint64(p384Uint1(x148)))
+ var x151 uint64
+ var x152 uint64
+ x151, x152 = bits.Add64(x123, x141, uint64(p384Uint1(x150)))
+ var x153 uint64
+ var x154 uint64
+ x153, x154 = bits.Add64(x125, uint64(p384Uint1(x142)), uint64(p384Uint1(x152)))
+ var x155 uint64
+ _, x155 = bits.Mul64(x143, 0x100000001)
+ var x157 uint64
+ var x158 uint64
+ x158, x157 = bits.Mul64(x155, 0xffffffffffffffff)
+ var x159 uint64
+ var x160 uint64
+ x160, x159 = bits.Mul64(x155, 0xffffffffffffffff)
+ var x161 uint64
+ var x162 uint64
+ x162, x161 = bits.Mul64(x155, 0xffffffffffffffff)
+ var x163 uint64
+ var x164 uint64
+ x164, x163 = bits.Mul64(x155, 0xfffffffffffffffe)
+ var x165 uint64
+ var x166 uint64
+ x166, x165 = bits.Mul64(x155, 0xffffffff00000000)
+ var x167 uint64
+ var x168 uint64
+ x168, x167 = bits.Mul64(x155, 0xffffffff)
+ var x169 uint64
+ var x170 uint64
+ x169, x170 = bits.Add64(x168, x165, uint64(0x0))
+ var x171 uint64
+ var x172 uint64
+ x171, x172 = bits.Add64(x166, x163, uint64(p384Uint1(x170)))
+ var x173 uint64
+ var x174 uint64
+ x173, x174 = bits.Add64(x164, x161, uint64(p384Uint1(x172)))
+ var x175 uint64
+ var x176 uint64
+ x175, x176 = bits.Add64(x162, x159, uint64(p384Uint1(x174)))
+ var x177 uint64
+ var x178 uint64
+ x177, x178 = bits.Add64(x160, x157, uint64(p384Uint1(x176)))
+ var x180 uint64
+ _, x180 = bits.Add64(x143, x167, uint64(0x0))
+ var x181 uint64
+ var x182 uint64
+ x181, x182 = bits.Add64(x145, x169, uint64(p384Uint1(x180)))
+ var x183 uint64
+ var x184 uint64
+ x183, x184 = bits.Add64(x147, x171, uint64(p384Uint1(x182)))
+ var x185 uint64
+ var x186 uint64
+ x185, x186 = bits.Add64(x149, x173, uint64(p384Uint1(x184)))
+ var x187 uint64
+ var x188 uint64
+ x187, x188 = bits.Add64(x151, x175, uint64(p384Uint1(x186)))
+ var x189 uint64
+ var x190 uint64
+ x189, x190 = bits.Add64(x153, x177, uint64(p384Uint1(x188)))
+ var x191 uint64
+ var x192 uint64
+ x191, x192 = bits.Add64((uint64(p384Uint1(x154)) + uint64(p384Uint1(x126))), (uint64(p384Uint1(x178)) + x158), uint64(p384Uint1(x190)))
+ var x193 uint64
+ var x194 uint64
+ x194, x193 = bits.Mul64(x3, 0x200000000)
+ var x195 uint64
+ var x196 uint64
+ x196, x195 = bits.Mul64(x3, 0xfffffffe00000000)
+ var x197 uint64
+ var x198 uint64
+ x198, x197 = bits.Mul64(x3, 0x200000000)
+ var x199 uint64
+ var x200 uint64
+ x200, x199 = bits.Mul64(x3, 0xfffffffe00000001)
+ var x201 uint64
+ var x202 uint64
+ x201, x202 = bits.Add64(x200, x197, uint64(0x0))
+ var x203 uint64
+ var x204 uint64
+ x203, x204 = bits.Add64(x198, x195, uint64(p384Uint1(x202)))
+ var x205 uint64
+ var x206 uint64
+ x205, x206 = bits.Add64(x196, x193, uint64(p384Uint1(x204)))
+ var x207 uint64
+ var x208 uint64
+ x207, x208 = bits.Add64(x194, x3, uint64(p384Uint1(x206)))
+ var x209 uint64
+ var x210 uint64
+ x209, x210 = bits.Add64(x181, x199, uint64(0x0))
+ var x211 uint64
+ var x212 uint64
+ x211, x212 = bits.Add64(x183, x201, uint64(p384Uint1(x210)))
+ var x213 uint64
+ var x214 uint64
+ x213, x214 = bits.Add64(x185, x203, uint64(p384Uint1(x212)))
+ var x215 uint64
+ var x216 uint64
+ x215, x216 = bits.Add64(x187, x205, uint64(p384Uint1(x214)))
+ var x217 uint64
+ var x218 uint64
+ x217, x218 = bits.Add64(x189, x207, uint64(p384Uint1(x216)))
+ var x219 uint64
+ var x220 uint64
+ x219, x220 = bits.Add64(x191, uint64(p384Uint1(x208)), uint64(p384Uint1(x218)))
+ var x221 uint64
+ _, x221 = bits.Mul64(x209, 0x100000001)
+ var x223 uint64
+ var x224 uint64
+ x224, x223 = bits.Mul64(x221, 0xffffffffffffffff)
+ var x225 uint64
+ var x226 uint64
+ x226, x225 = bits.Mul64(x221, 0xffffffffffffffff)
+ var x227 uint64
+ var x228 uint64
+ x228, x227 = bits.Mul64(x221, 0xffffffffffffffff)
+ var x229 uint64
+ var x230 uint64
+ x230, x229 = bits.Mul64(x221, 0xfffffffffffffffe)
+ var x231 uint64
+ var x232 uint64
+ x232, x231 = bits.Mul64(x221, 0xffffffff00000000)
+ var x233 uint64
+ var x234 uint64
+ x234, x233 = bits.Mul64(x221, 0xffffffff)
+ var x235 uint64
+ var x236 uint64
+ x235, x236 = bits.Add64(x234, x231, uint64(0x0))
+ var x237 uint64
+ var x238 uint64
+ x237, x238 = bits.Add64(x232, x229, uint64(p384Uint1(x236)))
+ var x239 uint64
+ var x240 uint64
+ x239, x240 = bits.Add64(x230, x227, uint64(p384Uint1(x238)))
+ var x241 uint64
+ var x242 uint64
+ x241, x242 = bits.Add64(x228, x225, uint64(p384Uint1(x240)))
+ var x243 uint64
+ var x244 uint64
+ x243, x244 = bits.Add64(x226, x223, uint64(p384Uint1(x242)))
+ var x246 uint64
+ _, x246 = bits.Add64(x209, x233, uint64(0x0))
+ var x247 uint64
+ var x248 uint64
+ x247, x248 = bits.Add64(x211, x235, uint64(p384Uint1(x246)))
+ var x249 uint64
+ var x250 uint64
+ x249, x250 = bits.Add64(x213, x237, uint64(p384Uint1(x248)))
+ var x251 uint64
+ var x252 uint64
+ x251, x252 = bits.Add64(x215, x239, uint64(p384Uint1(x250)))
+ var x253 uint64
+ var x254 uint64
+ x253, x254 = bits.Add64(x217, x241, uint64(p384Uint1(x252)))
+ var x255 uint64
+ var x256 uint64
+ x255, x256 = bits.Add64(x219, x243, uint64(p384Uint1(x254)))
+ var x257 uint64
+ var x258 uint64
+ x257, x258 = bits.Add64((uint64(p384Uint1(x220)) + uint64(p384Uint1(x192))), (uint64(p384Uint1(x244)) + x224), uint64(p384Uint1(x256)))
+ var x259 uint64
+ var x260 uint64
+ x260, x259 = bits.Mul64(x4, 0x200000000)
+ var x261 uint64
+ var x262 uint64
+ x262, x261 = bits.Mul64(x4, 0xfffffffe00000000)
+ var x263 uint64
+ var x264 uint64
+ x264, x263 = bits.Mul64(x4, 0x200000000)
+ var x265 uint64
+ var x266 uint64
+ x266, x265 = bits.Mul64(x4, 0xfffffffe00000001)
+ var x267 uint64
+ var x268 uint64
+ x267, x268 = bits.Add64(x266, x263, uint64(0x0))
+ var x269 uint64
+ var x270 uint64
+ x269, x270 = bits.Add64(x264, x261, uint64(p384Uint1(x268)))
+ var x271 uint64
+ var x272 uint64
+ x271, x272 = bits.Add64(x262, x259, uint64(p384Uint1(x270)))
+ var x273 uint64
+ var x274 uint64
+ x273, x274 = bits.Add64(x260, x4, uint64(p384Uint1(x272)))
+ var x275 uint64
+ var x276 uint64
+ x275, x276 = bits.Add64(x247, x265, uint64(0x0))
+ var x277 uint64
+ var x278 uint64
+ x277, x278 = bits.Add64(x249, x267, uint64(p384Uint1(x276)))
+ var x279 uint64
+ var x280 uint64
+ x279, x280 = bits.Add64(x251, x269, uint64(p384Uint1(x278)))
+ var x281 uint64
+ var x282 uint64
+ x281, x282 = bits.Add64(x253, x271, uint64(p384Uint1(x280)))
+ var x283 uint64
+ var x284 uint64
+ x283, x284 = bits.Add64(x255, x273, uint64(p384Uint1(x282)))
+ var x285 uint64
+ var x286 uint64
+ x285, x286 = bits.Add64(x257, uint64(p384Uint1(x274)), uint64(p384Uint1(x284)))
+ var x287 uint64
+ _, x287 = bits.Mul64(x275, 0x100000001)
+ var x289 uint64
+ var x290 uint64
+ x290, x289 = bits.Mul64(x287, 0xffffffffffffffff)
+ var x291 uint64
+ var x292 uint64
+ x292, x291 = bits.Mul64(x287, 0xffffffffffffffff)
+ var x293 uint64
+ var x294 uint64
+ x294, x293 = bits.Mul64(x287, 0xffffffffffffffff)
+ var x295 uint64
+ var x296 uint64
+ x296, x295 = bits.Mul64(x287, 0xfffffffffffffffe)
+ var x297 uint64
+ var x298 uint64
+ x298, x297 = bits.Mul64(x287, 0xffffffff00000000)
+ var x299 uint64
+ var x300 uint64
+ x300, x299 = bits.Mul64(x287, 0xffffffff)
+ var x301 uint64
+ var x302 uint64
+ x301, x302 = bits.Add64(x300, x297, uint64(0x0))
+ var x303 uint64
+ var x304 uint64
+ x303, x304 = bits.Add64(x298, x295, uint64(p384Uint1(x302)))
+ var x305 uint64
+ var x306 uint64
+ x305, x306 = bits.Add64(x296, x293, uint64(p384Uint1(x304)))
+ var x307 uint64
+ var x308 uint64
+ x307, x308 = bits.Add64(x294, x291, uint64(p384Uint1(x306)))
+ var x309 uint64
+ var x310 uint64
+ x309, x310 = bits.Add64(x292, x289, uint64(p384Uint1(x308)))
+ var x312 uint64
+ _, x312 = bits.Add64(x275, x299, uint64(0x0))
+ var x313 uint64
+ var x314 uint64
+ x313, x314 = bits.Add64(x277, x301, uint64(p384Uint1(x312)))
+ var x315 uint64
+ var x316 uint64
+ x315, x316 = bits.Add64(x279, x303, uint64(p384Uint1(x314)))
+ var x317 uint64
+ var x318 uint64
+ x317, x318 = bits.Add64(x281, x305, uint64(p384Uint1(x316)))
+ var x319 uint64
+ var x320 uint64
+ x319, x320 = bits.Add64(x283, x307, uint64(p384Uint1(x318)))
+ var x321 uint64
+ var x322 uint64
+ x321, x322 = bits.Add64(x285, x309, uint64(p384Uint1(x320)))
+ var x323 uint64
+ var x324 uint64
+ x323, x324 = bits.Add64((uint64(p384Uint1(x286)) + uint64(p384Uint1(x258))), (uint64(p384Uint1(x310)) + x290), uint64(p384Uint1(x322)))
+ var x325 uint64
+ var x326 uint64
+ x326, x325 = bits.Mul64(x5, 0x200000000)
+ var x327 uint64
+ var x328 uint64
+ x328, x327 = bits.Mul64(x5, 0xfffffffe00000000)
+ var x329 uint64
+ var x330 uint64
+ x330, x329 = bits.Mul64(x5, 0x200000000)
+ var x331 uint64
+ var x332 uint64
+ x332, x331 = bits.Mul64(x5, 0xfffffffe00000001)
+ var x333 uint64
+ var x334 uint64
+ x333, x334 = bits.Add64(x332, x329, uint64(0x0))
+ var x335 uint64
+ var x336 uint64
+ x335, x336 = bits.Add64(x330, x327, uint64(p384Uint1(x334)))
+ var x337 uint64
+ var x338 uint64
+ x337, x338 = bits.Add64(x328, x325, uint64(p384Uint1(x336)))
+ var x339 uint64
+ var x340 uint64
+ x339, x340 = bits.Add64(x326, x5, uint64(p384Uint1(x338)))
+ var x341 uint64
+ var x342 uint64
+ x341, x342 = bits.Add64(x313, x331, uint64(0x0))
+ var x343 uint64
+ var x344 uint64
+ x343, x344 = bits.Add64(x315, x333, uint64(p384Uint1(x342)))
+ var x345 uint64
+ var x346 uint64
+ x345, x346 = bits.Add64(x317, x335, uint64(p384Uint1(x344)))
+ var x347 uint64
+ var x348 uint64
+ x347, x348 = bits.Add64(x319, x337, uint64(p384Uint1(x346)))
+ var x349 uint64
+ var x350 uint64
+ x349, x350 = bits.Add64(x321, x339, uint64(p384Uint1(x348)))
+ var x351 uint64
+ var x352 uint64
+ x351, x352 = bits.Add64(x323, uint64(p384Uint1(x340)), uint64(p384Uint1(x350)))
+ var x353 uint64
+ _, x353 = bits.Mul64(x341, 0x100000001)
+ var x355 uint64
+ var x356 uint64
+ x356, x355 = bits.Mul64(x353, 0xffffffffffffffff)
+ var x357 uint64
+ var x358 uint64
+ x358, x357 = bits.Mul64(x353, 0xffffffffffffffff)
+ var x359 uint64
+ var x360 uint64
+ x360, x359 = bits.Mul64(x353, 0xffffffffffffffff)
+ var x361 uint64
+ var x362 uint64
+ x362, x361 = bits.Mul64(x353, 0xfffffffffffffffe)
+ var x363 uint64
+ var x364 uint64
+ x364, x363 = bits.Mul64(x353, 0xffffffff00000000)
+ var x365 uint64
+ var x366 uint64
+ x366, x365 = bits.Mul64(x353, 0xffffffff)
+ var x367 uint64
+ var x368 uint64
+ x367, x368 = bits.Add64(x366, x363, uint64(0x0))
+ var x369 uint64
+ var x370 uint64
+ x369, x370 = bits.Add64(x364, x361, uint64(p384Uint1(x368)))
+ var x371 uint64
+ var x372 uint64
+ x371, x372 = bits.Add64(x362, x359, uint64(p384Uint1(x370)))
+ var x373 uint64
+ var x374 uint64
+ x373, x374 = bits.Add64(x360, x357, uint64(p384Uint1(x372)))
+ var x375 uint64
+ var x376 uint64
+ x375, x376 = bits.Add64(x358, x355, uint64(p384Uint1(x374)))
+ var x378 uint64
+ _, x378 = bits.Add64(x341, x365, uint64(0x0))
+ var x379 uint64
+ var x380 uint64
+ x379, x380 = bits.Add64(x343, x367, uint64(p384Uint1(x378)))
+ var x381 uint64
+ var x382 uint64
+ x381, x382 = bits.Add64(x345, x369, uint64(p384Uint1(x380)))
+ var x383 uint64
+ var x384 uint64
+ x383, x384 = bits.Add64(x347, x371, uint64(p384Uint1(x382)))
+ var x385 uint64
+ var x386 uint64
+ x385, x386 = bits.Add64(x349, x373, uint64(p384Uint1(x384)))
+ var x387 uint64
+ var x388 uint64
+ x387, x388 = bits.Add64(x351, x375, uint64(p384Uint1(x386)))
+ var x389 uint64
+ var x390 uint64
+ x389, x390 = bits.Add64((uint64(p384Uint1(x352)) + uint64(p384Uint1(x324))), (uint64(p384Uint1(x376)) + x356), uint64(p384Uint1(x388)))
+ var x391 uint64
+ var x392 uint64
+ x391, x392 = bits.Sub64(x379, 0xffffffff, uint64(0x0))
+ var x393 uint64
+ var x394 uint64
+ x393, x394 = bits.Sub64(x381, 0xffffffff00000000, uint64(p384Uint1(x392)))
+ var x395 uint64
+ var x396 uint64
+ x395, x396 = bits.Sub64(x383, 0xfffffffffffffffe, uint64(p384Uint1(x394)))
+ var x397 uint64
+ var x398 uint64
+ x397, x398 = bits.Sub64(x385, 0xffffffffffffffff, uint64(p384Uint1(x396)))
+ var x399 uint64
+ var x400 uint64
+ x399, x400 = bits.Sub64(x387, 0xffffffffffffffff, uint64(p384Uint1(x398)))
+ var x401 uint64
+ var x402 uint64
+ x401, x402 = bits.Sub64(x389, 0xffffffffffffffff, uint64(p384Uint1(x400)))
+ var x404 uint64
+ _, x404 = bits.Sub64(uint64(p384Uint1(x390)), uint64(0x0), uint64(p384Uint1(x402)))
+ var x405 uint64
+ p384CmovznzU64(&x405, p384Uint1(x404), x391, x379)
+ var x406 uint64
+ p384CmovznzU64(&x406, p384Uint1(x404), x393, x381)
+ var x407 uint64
+ p384CmovznzU64(&x407, p384Uint1(x404), x395, x383)
+ var x408 uint64
+ p384CmovznzU64(&x408, p384Uint1(x404), x397, x385)
+ var x409 uint64
+ p384CmovznzU64(&x409, p384Uint1(x404), x399, x387)
+ var x410 uint64
+ p384CmovznzU64(&x410, p384Uint1(x404), x401, x389)
+ out1[0] = x405
+ out1[1] = x406
+ out1[2] = x407
+ out1[3] = x408
+ out1[4] = x409
+ out1[5] = x410
+}
+
+// p384Selectznz is a multi-limb conditional select.
+//
+// Postconditions:
+//
+// eval out1 = (if arg1 = 0 then eval arg2 else eval arg3)
+//
+// Input Bounds:
+//
+// arg1: [0x0 ~> 0x1]
+// arg2: [[0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff]]
+// arg3: [[0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff]]
+//
+// Output Bounds:
+//
+// out1: [[0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff]]
+func p384Selectznz(out1 *[6]uint64, arg1 p384Uint1, arg2 *[6]uint64, arg3 *[6]uint64) {
+ var x1 uint64
+ p384CmovznzU64(&x1, arg1, arg2[0], arg3[0])
+ var x2 uint64
+ p384CmovznzU64(&x2, arg1, arg2[1], arg3[1])
+ var x3 uint64
+ p384CmovznzU64(&x3, arg1, arg2[2], arg3[2])
+ var x4 uint64
+ p384CmovznzU64(&x4, arg1, arg2[3], arg3[3])
+ var x5 uint64
+ p384CmovznzU64(&x5, arg1, arg2[4], arg3[4])
+ var x6 uint64
+ p384CmovznzU64(&x6, arg1, arg2[5], arg3[5])
+ out1[0] = x1
+ out1[1] = x2
+ out1[2] = x3
+ out1[3] = x4
+ out1[4] = x5
+ out1[5] = x6
+}
+
+// p384ToBytes serializes a field element NOT in the Montgomery domain to bytes in little-endian order.
+//
+// Preconditions:
+//
+// 0 ≤ eval arg1 < m
+//
+// Postconditions:
+//
+// out1 = map (λ x, ⌊((eval arg1 mod m) mod 2^(8 * (x + 1))) / 2^(8 * x)⌋) [0..47]
+//
+// Input Bounds:
+//
+// arg1: [[0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff]]
+//
+// Output Bounds:
+//
+// out1: [[0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff]]
+func p384ToBytes(out1 *[48]uint8, arg1 *[6]uint64) {
+ x1 := arg1[5]
+ x2 := arg1[4]
+ x3 := arg1[3]
+ x4 := arg1[2]
+ x5 := arg1[1]
+ x6 := arg1[0]
+ x7 := (uint8(x6) & 0xff)
+ x8 := (x6 >> 8)
+ x9 := (uint8(x8) & 0xff)
+ x10 := (x8 >> 8)
+ x11 := (uint8(x10) & 0xff)
+ x12 := (x10 >> 8)
+ x13 := (uint8(x12) & 0xff)
+ x14 := (x12 >> 8)
+ x15 := (uint8(x14) & 0xff)
+ x16 := (x14 >> 8)
+ x17 := (uint8(x16) & 0xff)
+ x18 := (x16 >> 8)
+ x19 := (uint8(x18) & 0xff)
+ x20 := uint8((x18 >> 8))
+ x21 := (uint8(x5) & 0xff)
+ x22 := (x5 >> 8)
+ x23 := (uint8(x22) & 0xff)
+ x24 := (x22 >> 8)
+ x25 := (uint8(x24) & 0xff)
+ x26 := (x24 >> 8)
+ x27 := (uint8(x26) & 0xff)
+ x28 := (x26 >> 8)
+ x29 := (uint8(x28) & 0xff)
+ x30 := (x28 >> 8)
+ x31 := (uint8(x30) & 0xff)
+ x32 := (x30 >> 8)
+ x33 := (uint8(x32) & 0xff)
+ x34 := uint8((x32 >> 8))
+ x35 := (uint8(x4) & 0xff)
+ x36 := (x4 >> 8)
+ x37 := (uint8(x36) & 0xff)
+ x38 := (x36 >> 8)
+ x39 := (uint8(x38) & 0xff)
+ x40 := (x38 >> 8)
+ x41 := (uint8(x40) & 0xff)
+ x42 := (x40 >> 8)
+ x43 := (uint8(x42) & 0xff)
+ x44 := (x42 >> 8)
+ x45 := (uint8(x44) & 0xff)
+ x46 := (x44 >> 8)
+ x47 := (uint8(x46) & 0xff)
+ x48 := uint8((x46 >> 8))
+ x49 := (uint8(x3) & 0xff)
+ x50 := (x3 >> 8)
+ x51 := (uint8(x50) & 0xff)
+ x52 := (x50 >> 8)
+ x53 := (uint8(x52) & 0xff)
+ x54 := (x52 >> 8)
+ x55 := (uint8(x54) & 0xff)
+ x56 := (x54 >> 8)
+ x57 := (uint8(x56) & 0xff)
+ x58 := (x56 >> 8)
+ x59 := (uint8(x58) & 0xff)
+ x60 := (x58 >> 8)
+ x61 := (uint8(x60) & 0xff)
+ x62 := uint8((x60 >> 8))
+ x63 := (uint8(x2) & 0xff)
+ x64 := (x2 >> 8)
+ x65 := (uint8(x64) & 0xff)
+ x66 := (x64 >> 8)
+ x67 := (uint8(x66) & 0xff)
+ x68 := (x66 >> 8)
+ x69 := (uint8(x68) & 0xff)
+ x70 := (x68 >> 8)
+ x71 := (uint8(x70) & 0xff)
+ x72 := (x70 >> 8)
+ x73 := (uint8(x72) & 0xff)
+ x74 := (x72 >> 8)
+ x75 := (uint8(x74) & 0xff)
+ x76 := uint8((x74 >> 8))
+ x77 := (uint8(x1) & 0xff)
+ x78 := (x1 >> 8)
+ x79 := (uint8(x78) & 0xff)
+ x80 := (x78 >> 8)
+ x81 := (uint8(x80) & 0xff)
+ x82 := (x80 >> 8)
+ x83 := (uint8(x82) & 0xff)
+ x84 := (x82 >> 8)
+ x85 := (uint8(x84) & 0xff)
+ x86 := (x84 >> 8)
+ x87 := (uint8(x86) & 0xff)
+ x88 := (x86 >> 8)
+ x89 := (uint8(x88) & 0xff)
+ x90 := uint8((x88 >> 8))
+ out1[0] = x7
+ out1[1] = x9
+ out1[2] = x11
+ out1[3] = x13
+ out1[4] = x15
+ out1[5] = x17
+ out1[6] = x19
+ out1[7] = x20
+ out1[8] = x21
+ out1[9] = x23
+ out1[10] = x25
+ out1[11] = x27
+ out1[12] = x29
+ out1[13] = x31
+ out1[14] = x33
+ out1[15] = x34
+ out1[16] = x35
+ out1[17] = x37
+ out1[18] = x39
+ out1[19] = x41
+ out1[20] = x43
+ out1[21] = x45
+ out1[22] = x47
+ out1[23] = x48
+ out1[24] = x49
+ out1[25] = x51
+ out1[26] = x53
+ out1[27] = x55
+ out1[28] = x57
+ out1[29] = x59
+ out1[30] = x61
+ out1[31] = x62
+ out1[32] = x63
+ out1[33] = x65
+ out1[34] = x67
+ out1[35] = x69
+ out1[36] = x71
+ out1[37] = x73
+ out1[38] = x75
+ out1[39] = x76
+ out1[40] = x77
+ out1[41] = x79
+ out1[42] = x81
+ out1[43] = x83
+ out1[44] = x85
+ out1[45] = x87
+ out1[46] = x89
+ out1[47] = x90
+}
+
+// p384FromBytes deserializes a field element NOT in the Montgomery domain from bytes in little-endian order.
+//
+// Preconditions:
+//
+// 0 ≤ bytes_eval arg1 < m
+//
+// Postconditions:
+//
+// eval out1 mod m = bytes_eval arg1 mod m
+// 0 ≤ eval out1 < m
+//
+// Input Bounds:
+//
+// arg1: [[0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff]]
+//
+// Output Bounds:
+//
+// out1: [[0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff]]
+func p384FromBytes(out1 *[6]uint64, arg1 *[48]uint8) {
+ x1 := (uint64(arg1[47]) << 56)
+ x2 := (uint64(arg1[46]) << 48)
+ x3 := (uint64(arg1[45]) << 40)
+ x4 := (uint64(arg1[44]) << 32)
+ x5 := (uint64(arg1[43]) << 24)
+ x6 := (uint64(arg1[42]) << 16)
+ x7 := (uint64(arg1[41]) << 8)
+ x8 := arg1[40]
+ x9 := (uint64(arg1[39]) << 56)
+ x10 := (uint64(arg1[38]) << 48)
+ x11 := (uint64(arg1[37]) << 40)
+ x12 := (uint64(arg1[36]) << 32)
+ x13 := (uint64(arg1[35]) << 24)
+ x14 := (uint64(arg1[34]) << 16)
+ x15 := (uint64(arg1[33]) << 8)
+ x16 := arg1[32]
+ x17 := (uint64(arg1[31]) << 56)
+ x18 := (uint64(arg1[30]) << 48)
+ x19 := (uint64(arg1[29]) << 40)
+ x20 := (uint64(arg1[28]) << 32)
+ x21 := (uint64(arg1[27]) << 24)
+ x22 := (uint64(arg1[26]) << 16)
+ x23 := (uint64(arg1[25]) << 8)
+ x24 := arg1[24]
+ x25 := (uint64(arg1[23]) << 56)
+ x26 := (uint64(arg1[22]) << 48)
+ x27 := (uint64(arg1[21]) << 40)
+ x28 := (uint64(arg1[20]) << 32)
+ x29 := (uint64(arg1[19]) << 24)
+ x30 := (uint64(arg1[18]) << 16)
+ x31 := (uint64(arg1[17]) << 8)
+ x32 := arg1[16]
+ x33 := (uint64(arg1[15]) << 56)
+ x34 := (uint64(arg1[14]) << 48)
+ x35 := (uint64(arg1[13]) << 40)
+ x36 := (uint64(arg1[12]) << 32)
+ x37 := (uint64(arg1[11]) << 24)
+ x38 := (uint64(arg1[10]) << 16)
+ x39 := (uint64(arg1[9]) << 8)
+ x40 := arg1[8]
+ x41 := (uint64(arg1[7]) << 56)
+ x42 := (uint64(arg1[6]) << 48)
+ x43 := (uint64(arg1[5]) << 40)
+ x44 := (uint64(arg1[4]) << 32)
+ x45 := (uint64(arg1[3]) << 24)
+ x46 := (uint64(arg1[2]) << 16)
+ x47 := (uint64(arg1[1]) << 8)
+ x48 := arg1[0]
+ x49 := (x47 + uint64(x48))
+ x50 := (x46 + x49)
+ x51 := (x45 + x50)
+ x52 := (x44 + x51)
+ x53 := (x43 + x52)
+ x54 := (x42 + x53)
+ x55 := (x41 + x54)
+ x56 := (x39 + uint64(x40))
+ x57 := (x38 + x56)
+ x58 := (x37 + x57)
+ x59 := (x36 + x58)
+ x60 := (x35 + x59)
+ x61 := (x34 + x60)
+ x62 := (x33 + x61)
+ x63 := (x31 + uint64(x32))
+ x64 := (x30 + x63)
+ x65 := (x29 + x64)
+ x66 := (x28 + x65)
+ x67 := (x27 + x66)
+ x68 := (x26 + x67)
+ x69 := (x25 + x68)
+ x70 := (x23 + uint64(x24))
+ x71 := (x22 + x70)
+ x72 := (x21 + x71)
+ x73 := (x20 + x72)
+ x74 := (x19 + x73)
+ x75 := (x18 + x74)
+ x76 := (x17 + x75)
+ x77 := (x15 + uint64(x16))
+ x78 := (x14 + x77)
+ x79 := (x13 + x78)
+ x80 := (x12 + x79)
+ x81 := (x11 + x80)
+ x82 := (x10 + x81)
+ x83 := (x9 + x82)
+ x84 := (x7 + uint64(x8))
+ x85 := (x6 + x84)
+ x86 := (x5 + x85)
+ x87 := (x4 + x86)
+ x88 := (x3 + x87)
+ x89 := (x2 + x88)
+ x90 := (x1 + x89)
+ out1[0] = x55
+ out1[1] = x62
+ out1[2] = x69
+ out1[3] = x76
+ out1[4] = x83
+ out1[5] = x90
+}
diff --git a/src/crypto/internal/nistec/fiat/p384_invert.go b/src/crypto/internal/nistec/fiat/p384_invert.go
new file mode 100644
index 0000000..31591ac
--- /dev/null
+++ b/src/crypto/internal/nistec/fiat/p384_invert.go
@@ -0,0 +1,102 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Code generated by addchain. DO NOT EDIT.
+
+package fiat
+
+// Invert sets e = 1/x, and returns e.
+//
+// If x == 0, Invert returns e = 0.
+func (e *P384Element) Invert(x *P384Element) *P384Element {
+ // Inversion is implemented as exponentiation with exponent p − 2.
+ // The sequence of 15 multiplications and 383 squarings is derived from the
+ // following addition chain generated with github.com/mmcloughlin/addchain v0.4.0.
+ //
+ // _10 = 2*1
+ // _11 = 1 + _10
+ // _110 = 2*_11
+ // _111 = 1 + _110
+ // _111000 = _111 << 3
+ // _111111 = _111 + _111000
+ // x12 = _111111 << 6 + _111111
+ // x24 = x12 << 12 + x12
+ // x30 = x24 << 6 + _111111
+ // x31 = 2*x30 + 1
+ // x32 = 2*x31 + 1
+ // x63 = x32 << 31 + x31
+ // x126 = x63 << 63 + x63
+ // x252 = x126 << 126 + x126
+ // x255 = x252 << 3 + _111
+ // i397 = ((x255 << 33 + x32) << 94 + x30) << 2
+ // return 1 + i397
+ //
+
+ var z = new(P384Element).Set(e)
+ var t0 = new(P384Element)
+ var t1 = new(P384Element)
+ var t2 = new(P384Element)
+ var t3 = new(P384Element)
+
+ z.Square(x)
+ z.Mul(x, z)
+ z.Square(z)
+ t1.Mul(x, z)
+ z.Square(t1)
+ for s := 1; s < 3; s++ {
+ z.Square(z)
+ }
+ z.Mul(t1, z)
+ t0.Square(z)
+ for s := 1; s < 6; s++ {
+ t0.Square(t0)
+ }
+ t0.Mul(z, t0)
+ t2.Square(t0)
+ for s := 1; s < 12; s++ {
+ t2.Square(t2)
+ }
+ t0.Mul(t0, t2)
+ for s := 0; s < 6; s++ {
+ t0.Square(t0)
+ }
+ z.Mul(z, t0)
+ t0.Square(z)
+ t2.Mul(x, t0)
+ t0.Square(t2)
+ t0.Mul(x, t0)
+ t3.Square(t0)
+ for s := 1; s < 31; s++ {
+ t3.Square(t3)
+ }
+ t2.Mul(t2, t3)
+ t3.Square(t2)
+ for s := 1; s < 63; s++ {
+ t3.Square(t3)
+ }
+ t2.Mul(t2, t3)
+ t3.Square(t2)
+ for s := 1; s < 126; s++ {
+ t3.Square(t3)
+ }
+ t2.Mul(t2, t3)
+ for s := 0; s < 3; s++ {
+ t2.Square(t2)
+ }
+ t1.Mul(t1, t2)
+ for s := 0; s < 33; s++ {
+ t1.Square(t1)
+ }
+ t0.Mul(t0, t1)
+ for s := 0; s < 94; s++ {
+ t0.Square(t0)
+ }
+ z.Mul(z, t0)
+ for s := 0; s < 2; s++ {
+ z.Square(z)
+ }
+ z.Mul(x, z)
+
+ return e.Set(z)
+}
diff --git a/src/crypto/internal/nistec/fiat/p521.go b/src/crypto/internal/nistec/fiat/p521.go
new file mode 100644
index 0000000..43ac7d0
--- /dev/null
+++ b/src/crypto/internal/nistec/fiat/p521.go
@@ -0,0 +1,134 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Code generated by generate.go. DO NOT EDIT.
+
+package fiat
+
+import (
+ "crypto/subtle"
+ "errors"
+)
+
+// P521Element is an integer modulo 2^521 - 1.
+//
+// The zero value is a valid zero element.
+type P521Element struct {
+ // Values are represented internally always in the Montgomery domain, and
+ // converted in Bytes and SetBytes.
+ x p521MontgomeryDomainFieldElement
+}
+
+const p521ElementLen = 66
+
+type p521UntypedFieldElement = [9]uint64
+
+// One sets e = 1, and returns e.
+func (e *P521Element) One() *P521Element {
+ p521SetOne(&e.x)
+ return e
+}
+
+// Equal returns 1 if e == t, and zero otherwise.
+func (e *P521Element) Equal(t *P521Element) int {
+ eBytes := e.Bytes()
+ tBytes := t.Bytes()
+ return subtle.ConstantTimeCompare(eBytes, tBytes)
+}
+
+// IsZero returns 1 if e == 0, and zero otherwise.
+func (e *P521Element) IsZero() int {
+ zero := make([]byte, p521ElementLen)
+ eBytes := e.Bytes()
+ return subtle.ConstantTimeCompare(eBytes, zero)
+}
+
+// Set sets e = t, and returns e.
+func (e *P521Element) Set(t *P521Element) *P521Element {
+ e.x = t.x
+ return e
+}
+
+// Bytes returns the 66-byte big-endian encoding of e.
+func (e *P521Element) Bytes() []byte {
+ // This function is outlined to make the allocations inline in the caller
+ // rather than happen on the heap.
+ var out [p521ElementLen]byte
+ return e.bytes(&out)
+}
+
+func (e *P521Element) bytes(out *[p521ElementLen]byte) []byte {
+ var tmp p521NonMontgomeryDomainFieldElement
+ p521FromMontgomery(&tmp, &e.x)
+ p521ToBytes(out, (*p521UntypedFieldElement)(&tmp))
+ p521InvertEndianness(out[:])
+ return out[:]
+}
+
+// SetBytes sets e = v, where v is a big-endian 66-byte encoding, and returns e.
+// If v is not 66 bytes or it encodes a value higher than 2^521 - 1,
+// SetBytes returns nil and an error, and e is unchanged.
+func (e *P521Element) SetBytes(v []byte) (*P521Element, error) {
+ if len(v) != p521ElementLen {
+ return nil, errors.New("invalid P521Element encoding")
+ }
+
+ // Check for non-canonical encodings (p + k, 2p + k, etc.) by comparing to
+ // the encoding of -1 mod p, so p - 1, the highest canonical encoding.
+ var minusOneEncoding = new(P521Element).Sub(
+ new(P521Element), new(P521Element).One()).Bytes()
+ for i := range v {
+ if v[i] < minusOneEncoding[i] {
+ break
+ }
+ if v[i] > minusOneEncoding[i] {
+ return nil, errors.New("invalid P521Element encoding")
+ }
+ }
+
+ var in [p521ElementLen]byte
+ copy(in[:], v)
+ p521InvertEndianness(in[:])
+ var tmp p521NonMontgomeryDomainFieldElement
+ p521FromBytes((*p521UntypedFieldElement)(&tmp), &in)
+ p521ToMontgomery(&e.x, &tmp)
+ return e, nil
+}
+
+// Add sets e = t1 + t2, and returns e.
+func (e *P521Element) Add(t1, t2 *P521Element) *P521Element {
+ p521Add(&e.x, &t1.x, &t2.x)
+ return e
+}
+
+// Sub sets e = t1 - t2, and returns e.
+func (e *P521Element) Sub(t1, t2 *P521Element) *P521Element {
+ p521Sub(&e.x, &t1.x, &t2.x)
+ return e
+}
+
+// Mul sets e = t1 * t2, and returns e.
+func (e *P521Element) Mul(t1, t2 *P521Element) *P521Element {
+ p521Mul(&e.x, &t1.x, &t2.x)
+ return e
+}
+
+// Square sets e = t * t, and returns e.
+func (e *P521Element) Square(t *P521Element) *P521Element {
+ p521Square(&e.x, &t.x)
+ return e
+}
+
+// Select sets v to a if cond == 1, and to b if cond == 0.
+func (v *P521Element) Select(a, b *P521Element, cond int) *P521Element {
+ p521Selectznz((*p521UntypedFieldElement)(&v.x), p521Uint1(cond),
+ (*p521UntypedFieldElement)(&b.x), (*p521UntypedFieldElement)(&a.x))
+ return v
+}
+
+func p521InvertEndianness(v []byte) {
+ for i := 0; i < len(v)/2; i++ {
+ v[i], v[len(v)-1-i] = v[len(v)-1-i], v[i]
+ }
+}
diff --git a/src/crypto/internal/nistec/fiat/p521_fiat64.go b/src/crypto/internal/nistec/fiat/p521_fiat64.go
new file mode 100644
index 0000000..87a359e
--- /dev/null
+++ b/src/crypto/internal/nistec/fiat/p521_fiat64.go
@@ -0,0 +1,5541 @@
+// Code generated by Fiat Cryptography. DO NOT EDIT.
+//
+// Autogenerated: word_by_word_montgomery --lang Go --no-wide-int --cmovznz-by-mul --relax-primitive-carry-to-bitwidth 32,64 --internal-static --public-function-case camelCase --public-type-case camelCase --private-function-case camelCase --private-type-case camelCase --doc-text-before-function-name '' --doc-newline-before-package-declaration --doc-prepend-header 'Code generated by Fiat Cryptography. DO NOT EDIT.' --package-name fiat --no-prefix-fiat p521 64 '2^521 - 1' mul square add sub one from_montgomery to_montgomery selectznz to_bytes from_bytes
+//
+// curve description: p521
+//
+// machine_wordsize = 64 (from "64")
+//
+// requested operations: mul, square, add, sub, one, from_montgomery, to_montgomery, selectznz, to_bytes, from_bytes
+//
+// m = 0x1ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff (from "2^521 - 1")
+//
+//
+//
+// NOTE: In addition to the bounds specified above each function, all
+//
+// functions synthesized for this Montgomery arithmetic require the
+//
+// input to be strictly less than the prime modulus (m), and also
+//
+// require the input to be in the unique saturated representation.
+//
+// All functions also ensure that these two properties are true of
+//
+// return values.
+//
+//
+//
+// Computed values:
+//
+// eval z = z[0] + (z[1] << 64) + (z[2] << 128) + (z[3] << 192) + (z[4] << 256) + (z[5] << 0x140) + (z[6] << 0x180) + (z[7] << 0x1c0) + (z[8] << 2^9)
+//
+// bytes_eval z = z[0] + (z[1] << 8) + (z[2] << 16) + (z[3] << 24) + (z[4] << 32) + (z[5] << 40) + (z[6] << 48) + (z[7] << 56) + (z[8] << 64) + (z[9] << 72) + (z[10] << 80) + (z[11] << 88) + (z[12] << 96) + (z[13] << 104) + (z[14] << 112) + (z[15] << 120) + (z[16] << 128) + (z[17] << 136) + (z[18] << 144) + (z[19] << 152) + (z[20] << 160) + (z[21] << 168) + (z[22] << 176) + (z[23] << 184) + (z[24] << 192) + (z[25] << 200) + (z[26] << 208) + (z[27] << 216) + (z[28] << 224) + (z[29] << 232) + (z[30] << 240) + (z[31] << 248) + (z[32] << 256) + (z[33] << 0x108) + (z[34] << 0x110) + (z[35] << 0x118) + (z[36] << 0x120) + (z[37] << 0x128) + (z[38] << 0x130) + (z[39] << 0x138) + (z[40] << 0x140) + (z[41] << 0x148) + (z[42] << 0x150) + (z[43] << 0x158) + (z[44] << 0x160) + (z[45] << 0x168) + (z[46] << 0x170) + (z[47] << 0x178) + (z[48] << 0x180) + (z[49] << 0x188) + (z[50] << 0x190) + (z[51] << 0x198) + (z[52] << 0x1a0) + (z[53] << 0x1a8) + (z[54] << 0x1b0) + (z[55] << 0x1b8) + (z[56] << 0x1c0) + (z[57] << 0x1c8) + (z[58] << 0x1d0) + (z[59] << 0x1d8) + (z[60] << 0x1e0) + (z[61] << 0x1e8) + (z[62] << 0x1f0) + (z[63] << 0x1f8) + (z[64] << 2^9) + (z[65] << 0x208)
+//
+// twos_complement_eval z = let x1 := z[0] + (z[1] << 64) + (z[2] << 128) + (z[3] << 192) + (z[4] << 256) + (z[5] << 0x140) + (z[6] << 0x180) + (z[7] << 0x1c0) + (z[8] << 2^9) in
+//
+// if x1 & (2^576-1) < 2^575 then x1 & (2^576-1) else (x1 & (2^576-1)) - 2^576
+
+package fiat
+
+import "math/bits"
+
+type p521Uint1 uint64 // We use uint64 instead of a more narrow type for performance reasons; see https://github.com/mit-plv/fiat-crypto/pull/1006#issuecomment-892625927
+type p521Int1 int64 // We use uint64 instead of a more narrow type for performance reasons; see https://github.com/mit-plv/fiat-crypto/pull/1006#issuecomment-892625927
+
+// The type p521MontgomeryDomainFieldElement is a field element in the Montgomery domain.
+//
+// Bounds: [[0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff]]
+type p521MontgomeryDomainFieldElement [9]uint64
+
+// The type p521NonMontgomeryDomainFieldElement is a field element NOT in the Montgomery domain.
+//
+// Bounds: [[0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff]]
+type p521NonMontgomeryDomainFieldElement [9]uint64
+
+// p521CmovznzU64 is a single-word conditional move.
+//
+// Postconditions:
+//
+// out1 = (if arg1 = 0 then arg2 else arg3)
+//
+// Input Bounds:
+//
+// arg1: [0x0 ~> 0x1]
+// arg2: [0x0 ~> 0xffffffffffffffff]
+// arg3: [0x0 ~> 0xffffffffffffffff]
+//
+// Output Bounds:
+//
+// out1: [0x0 ~> 0xffffffffffffffff]
+func p521CmovznzU64(out1 *uint64, arg1 p521Uint1, arg2 uint64, arg3 uint64) {
+ x1 := (uint64(arg1) * 0xffffffffffffffff)
+ x2 := ((x1 & arg3) | ((^x1) & arg2))
+ *out1 = x2
+}
+
+// p521Mul multiplies two field elements in the Montgomery domain.
+//
+// Preconditions:
+//
+// 0 ≤ eval arg1 < m
+// 0 ≤ eval arg2 < m
+//
+// Postconditions:
+//
+// eval (from_montgomery out1) mod m = (eval (from_montgomery arg1) * eval (from_montgomery arg2)) mod m
+// 0 ≤ eval out1 < m
+func p521Mul(out1 *p521MontgomeryDomainFieldElement, arg1 *p521MontgomeryDomainFieldElement, arg2 *p521MontgomeryDomainFieldElement) {
+ x1 := arg1[1]
+ x2 := arg1[2]
+ x3 := arg1[3]
+ x4 := arg1[4]
+ x5 := arg1[5]
+ x6 := arg1[6]
+ x7 := arg1[7]
+ x8 := arg1[8]
+ x9 := arg1[0]
+ var x10 uint64
+ var x11 uint64
+ x11, x10 = bits.Mul64(x9, arg2[8])
+ var x12 uint64
+ var x13 uint64
+ x13, x12 = bits.Mul64(x9, arg2[7])
+ var x14 uint64
+ var x15 uint64
+ x15, x14 = bits.Mul64(x9, arg2[6])
+ var x16 uint64
+ var x17 uint64
+ x17, x16 = bits.Mul64(x9, arg2[5])
+ var x18 uint64
+ var x19 uint64
+ x19, x18 = bits.Mul64(x9, arg2[4])
+ var x20 uint64
+ var x21 uint64
+ x21, x20 = bits.Mul64(x9, arg2[3])
+ var x22 uint64
+ var x23 uint64
+ x23, x22 = bits.Mul64(x9, arg2[2])
+ var x24 uint64
+ var x25 uint64
+ x25, x24 = bits.Mul64(x9, arg2[1])
+ var x26 uint64
+ var x27 uint64
+ x27, x26 = bits.Mul64(x9, arg2[0])
+ var x28 uint64
+ var x29 uint64
+ x28, x29 = bits.Add64(x27, x24, uint64(0x0))
+ var x30 uint64
+ var x31 uint64
+ x30, x31 = bits.Add64(x25, x22, uint64(p521Uint1(x29)))
+ var x32 uint64
+ var x33 uint64
+ x32, x33 = bits.Add64(x23, x20, uint64(p521Uint1(x31)))
+ var x34 uint64
+ var x35 uint64
+ x34, x35 = bits.Add64(x21, x18, uint64(p521Uint1(x33)))
+ var x36 uint64
+ var x37 uint64
+ x36, x37 = bits.Add64(x19, x16, uint64(p521Uint1(x35)))
+ var x38 uint64
+ var x39 uint64
+ x38, x39 = bits.Add64(x17, x14, uint64(p521Uint1(x37)))
+ var x40 uint64
+ var x41 uint64
+ x40, x41 = bits.Add64(x15, x12, uint64(p521Uint1(x39)))
+ var x42 uint64
+ var x43 uint64
+ x42, x43 = bits.Add64(x13, x10, uint64(p521Uint1(x41)))
+ x44 := (uint64(p521Uint1(x43)) + x11)
+ var x45 uint64
+ var x46 uint64
+ x46, x45 = bits.Mul64(x26, 0x1ff)
+ var x47 uint64
+ var x48 uint64
+ x48, x47 = bits.Mul64(x26, 0xffffffffffffffff)
+ var x49 uint64
+ var x50 uint64
+ x50, x49 = bits.Mul64(x26, 0xffffffffffffffff)
+ var x51 uint64
+ var x52 uint64
+ x52, x51 = bits.Mul64(x26, 0xffffffffffffffff)
+ var x53 uint64
+ var x54 uint64
+ x54, x53 = bits.Mul64(x26, 0xffffffffffffffff)
+ var x55 uint64
+ var x56 uint64
+ x56, x55 = bits.Mul64(x26, 0xffffffffffffffff)
+ var x57 uint64
+ var x58 uint64
+ x58, x57 = bits.Mul64(x26, 0xffffffffffffffff)
+ var x59 uint64
+ var x60 uint64
+ x60, x59 = bits.Mul64(x26, 0xffffffffffffffff)
+ var x61 uint64
+ var x62 uint64
+ x62, x61 = bits.Mul64(x26, 0xffffffffffffffff)
+ var x63 uint64
+ var x64 uint64
+ x63, x64 = bits.Add64(x62, x59, uint64(0x0))
+ var x65 uint64
+ var x66 uint64
+ x65, x66 = bits.Add64(x60, x57, uint64(p521Uint1(x64)))
+ var x67 uint64
+ var x68 uint64
+ x67, x68 = bits.Add64(x58, x55, uint64(p521Uint1(x66)))
+ var x69 uint64
+ var x70 uint64
+ x69, x70 = bits.Add64(x56, x53, uint64(p521Uint1(x68)))
+ var x71 uint64
+ var x72 uint64
+ x71, x72 = bits.Add64(x54, x51, uint64(p521Uint1(x70)))
+ var x73 uint64
+ var x74 uint64
+ x73, x74 = bits.Add64(x52, x49, uint64(p521Uint1(x72)))
+ var x75 uint64
+ var x76 uint64
+ x75, x76 = bits.Add64(x50, x47, uint64(p521Uint1(x74)))
+ var x77 uint64
+ var x78 uint64
+ x77, x78 = bits.Add64(x48, x45, uint64(p521Uint1(x76)))
+ x79 := (uint64(p521Uint1(x78)) + x46)
+ var x81 uint64
+ _, x81 = bits.Add64(x26, x61, uint64(0x0))
+ var x82 uint64
+ var x83 uint64
+ x82, x83 = bits.Add64(x28, x63, uint64(p521Uint1(x81)))
+ var x84 uint64
+ var x85 uint64
+ x84, x85 = bits.Add64(x30, x65, uint64(p521Uint1(x83)))
+ var x86 uint64
+ var x87 uint64
+ x86, x87 = bits.Add64(x32, x67, uint64(p521Uint1(x85)))
+ var x88 uint64
+ var x89 uint64
+ x88, x89 = bits.Add64(x34, x69, uint64(p521Uint1(x87)))
+ var x90 uint64
+ var x91 uint64
+ x90, x91 = bits.Add64(x36, x71, uint64(p521Uint1(x89)))
+ var x92 uint64
+ var x93 uint64
+ x92, x93 = bits.Add64(x38, x73, uint64(p521Uint1(x91)))
+ var x94 uint64
+ var x95 uint64
+ x94, x95 = bits.Add64(x40, x75, uint64(p521Uint1(x93)))
+ var x96 uint64
+ var x97 uint64
+ x96, x97 = bits.Add64(x42, x77, uint64(p521Uint1(x95)))
+ var x98 uint64
+ var x99 uint64
+ x98, x99 = bits.Add64(x44, x79, uint64(p521Uint1(x97)))
+ var x100 uint64
+ var x101 uint64
+ x101, x100 = bits.Mul64(x1, arg2[8])
+ var x102 uint64
+ var x103 uint64
+ x103, x102 = bits.Mul64(x1, arg2[7])
+ var x104 uint64
+ var x105 uint64
+ x105, x104 = bits.Mul64(x1, arg2[6])
+ var x106 uint64
+ var x107 uint64
+ x107, x106 = bits.Mul64(x1, arg2[5])
+ var x108 uint64
+ var x109 uint64
+ x109, x108 = bits.Mul64(x1, arg2[4])
+ var x110 uint64
+ var x111 uint64
+ x111, x110 = bits.Mul64(x1, arg2[3])
+ var x112 uint64
+ var x113 uint64
+ x113, x112 = bits.Mul64(x1, arg2[2])
+ var x114 uint64
+ var x115 uint64
+ x115, x114 = bits.Mul64(x1, arg2[1])
+ var x116 uint64
+ var x117 uint64
+ x117, x116 = bits.Mul64(x1, arg2[0])
+ var x118 uint64
+ var x119 uint64
+ x118, x119 = bits.Add64(x117, x114, uint64(0x0))
+ var x120 uint64
+ var x121 uint64
+ x120, x121 = bits.Add64(x115, x112, uint64(p521Uint1(x119)))
+ var x122 uint64
+ var x123 uint64
+ x122, x123 = bits.Add64(x113, x110, uint64(p521Uint1(x121)))
+ var x124 uint64
+ var x125 uint64
+ x124, x125 = bits.Add64(x111, x108, uint64(p521Uint1(x123)))
+ var x126 uint64
+ var x127 uint64
+ x126, x127 = bits.Add64(x109, x106, uint64(p521Uint1(x125)))
+ var x128 uint64
+ var x129 uint64
+ x128, x129 = bits.Add64(x107, x104, uint64(p521Uint1(x127)))
+ var x130 uint64
+ var x131 uint64
+ x130, x131 = bits.Add64(x105, x102, uint64(p521Uint1(x129)))
+ var x132 uint64
+ var x133 uint64
+ x132, x133 = bits.Add64(x103, x100, uint64(p521Uint1(x131)))
+ x134 := (uint64(p521Uint1(x133)) + x101)
+ var x135 uint64
+ var x136 uint64
+ x135, x136 = bits.Add64(x82, x116, uint64(0x0))
+ var x137 uint64
+ var x138 uint64
+ x137, x138 = bits.Add64(x84, x118, uint64(p521Uint1(x136)))
+ var x139 uint64
+ var x140 uint64
+ x139, x140 = bits.Add64(x86, x120, uint64(p521Uint1(x138)))
+ var x141 uint64
+ var x142 uint64
+ x141, x142 = bits.Add64(x88, x122, uint64(p521Uint1(x140)))
+ var x143 uint64
+ var x144 uint64
+ x143, x144 = bits.Add64(x90, x124, uint64(p521Uint1(x142)))
+ var x145 uint64
+ var x146 uint64
+ x145, x146 = bits.Add64(x92, x126, uint64(p521Uint1(x144)))
+ var x147 uint64
+ var x148 uint64
+ x147, x148 = bits.Add64(x94, x128, uint64(p521Uint1(x146)))
+ var x149 uint64
+ var x150 uint64
+ x149, x150 = bits.Add64(x96, x130, uint64(p521Uint1(x148)))
+ var x151 uint64
+ var x152 uint64
+ x151, x152 = bits.Add64(x98, x132, uint64(p521Uint1(x150)))
+ var x153 uint64
+ var x154 uint64
+ x153, x154 = bits.Add64(uint64(p521Uint1(x99)), x134, uint64(p521Uint1(x152)))
+ var x155 uint64
+ var x156 uint64
+ x156, x155 = bits.Mul64(x135, 0x1ff)
+ var x157 uint64
+ var x158 uint64
+ x158, x157 = bits.Mul64(x135, 0xffffffffffffffff)
+ var x159 uint64
+ var x160 uint64
+ x160, x159 = bits.Mul64(x135, 0xffffffffffffffff)
+ var x161 uint64
+ var x162 uint64
+ x162, x161 = bits.Mul64(x135, 0xffffffffffffffff)
+ var x163 uint64
+ var x164 uint64
+ x164, x163 = bits.Mul64(x135, 0xffffffffffffffff)
+ var x165 uint64
+ var x166 uint64
+ x166, x165 = bits.Mul64(x135, 0xffffffffffffffff)
+ var x167 uint64
+ var x168 uint64
+ x168, x167 = bits.Mul64(x135, 0xffffffffffffffff)
+ var x169 uint64
+ var x170 uint64
+ x170, x169 = bits.Mul64(x135, 0xffffffffffffffff)
+ var x171 uint64
+ var x172 uint64
+ x172, x171 = bits.Mul64(x135, 0xffffffffffffffff)
+ var x173 uint64
+ var x174 uint64
+ x173, x174 = bits.Add64(x172, x169, uint64(0x0))
+ var x175 uint64
+ var x176 uint64
+ x175, x176 = bits.Add64(x170, x167, uint64(p521Uint1(x174)))
+ var x177 uint64
+ var x178 uint64
+ x177, x178 = bits.Add64(x168, x165, uint64(p521Uint1(x176)))
+ var x179 uint64
+ var x180 uint64
+ x179, x180 = bits.Add64(x166, x163, uint64(p521Uint1(x178)))
+ var x181 uint64
+ var x182 uint64
+ x181, x182 = bits.Add64(x164, x161, uint64(p521Uint1(x180)))
+ var x183 uint64
+ var x184 uint64
+ x183, x184 = bits.Add64(x162, x159, uint64(p521Uint1(x182)))
+ var x185 uint64
+ var x186 uint64
+ x185, x186 = bits.Add64(x160, x157, uint64(p521Uint1(x184)))
+ var x187 uint64
+ var x188 uint64
+ x187, x188 = bits.Add64(x158, x155, uint64(p521Uint1(x186)))
+ x189 := (uint64(p521Uint1(x188)) + x156)
+ var x191 uint64
+ _, x191 = bits.Add64(x135, x171, uint64(0x0))
+ var x192 uint64
+ var x193 uint64
+ x192, x193 = bits.Add64(x137, x173, uint64(p521Uint1(x191)))
+ var x194 uint64
+ var x195 uint64
+ x194, x195 = bits.Add64(x139, x175, uint64(p521Uint1(x193)))
+ var x196 uint64
+ var x197 uint64
+ x196, x197 = bits.Add64(x141, x177, uint64(p521Uint1(x195)))
+ var x198 uint64
+ var x199 uint64
+ x198, x199 = bits.Add64(x143, x179, uint64(p521Uint1(x197)))
+ var x200 uint64
+ var x201 uint64
+ x200, x201 = bits.Add64(x145, x181, uint64(p521Uint1(x199)))
+ var x202 uint64
+ var x203 uint64
+ x202, x203 = bits.Add64(x147, x183, uint64(p521Uint1(x201)))
+ var x204 uint64
+ var x205 uint64
+ x204, x205 = bits.Add64(x149, x185, uint64(p521Uint1(x203)))
+ var x206 uint64
+ var x207 uint64
+ x206, x207 = bits.Add64(x151, x187, uint64(p521Uint1(x205)))
+ var x208 uint64
+ var x209 uint64
+ x208, x209 = bits.Add64(x153, x189, uint64(p521Uint1(x207)))
+ x210 := (uint64(p521Uint1(x209)) + uint64(p521Uint1(x154)))
+ var x211 uint64
+ var x212 uint64
+ x212, x211 = bits.Mul64(x2, arg2[8])
+ var x213 uint64
+ var x214 uint64
+ x214, x213 = bits.Mul64(x2, arg2[7])
+ var x215 uint64
+ var x216 uint64
+ x216, x215 = bits.Mul64(x2, arg2[6])
+ var x217 uint64
+ var x218 uint64
+ x218, x217 = bits.Mul64(x2, arg2[5])
+ var x219 uint64
+ var x220 uint64
+ x220, x219 = bits.Mul64(x2, arg2[4])
+ var x221 uint64
+ var x222 uint64
+ x222, x221 = bits.Mul64(x2, arg2[3])
+ var x223 uint64
+ var x224 uint64
+ x224, x223 = bits.Mul64(x2, arg2[2])
+ var x225 uint64
+ var x226 uint64
+ x226, x225 = bits.Mul64(x2, arg2[1])
+ var x227 uint64
+ var x228 uint64
+ x228, x227 = bits.Mul64(x2, arg2[0])
+ var x229 uint64
+ var x230 uint64
+ x229, x230 = bits.Add64(x228, x225, uint64(0x0))
+ var x231 uint64
+ var x232 uint64
+ x231, x232 = bits.Add64(x226, x223, uint64(p521Uint1(x230)))
+ var x233 uint64
+ var x234 uint64
+ x233, x234 = bits.Add64(x224, x221, uint64(p521Uint1(x232)))
+ var x235 uint64
+ var x236 uint64
+ x235, x236 = bits.Add64(x222, x219, uint64(p521Uint1(x234)))
+ var x237 uint64
+ var x238 uint64
+ x237, x238 = bits.Add64(x220, x217, uint64(p521Uint1(x236)))
+ var x239 uint64
+ var x240 uint64
+ x239, x240 = bits.Add64(x218, x215, uint64(p521Uint1(x238)))
+ var x241 uint64
+ var x242 uint64
+ x241, x242 = bits.Add64(x216, x213, uint64(p521Uint1(x240)))
+ var x243 uint64
+ var x244 uint64
+ x243, x244 = bits.Add64(x214, x211, uint64(p521Uint1(x242)))
+ x245 := (uint64(p521Uint1(x244)) + x212)
+ var x246 uint64
+ var x247 uint64
+ x246, x247 = bits.Add64(x192, x227, uint64(0x0))
+ var x248 uint64
+ var x249 uint64
+ x248, x249 = bits.Add64(x194, x229, uint64(p521Uint1(x247)))
+ var x250 uint64
+ var x251 uint64
+ x250, x251 = bits.Add64(x196, x231, uint64(p521Uint1(x249)))
+ var x252 uint64
+ var x253 uint64
+ x252, x253 = bits.Add64(x198, x233, uint64(p521Uint1(x251)))
+ var x254 uint64
+ var x255 uint64
+ x254, x255 = bits.Add64(x200, x235, uint64(p521Uint1(x253)))
+ var x256 uint64
+ var x257 uint64
+ x256, x257 = bits.Add64(x202, x237, uint64(p521Uint1(x255)))
+ var x258 uint64
+ var x259 uint64
+ x258, x259 = bits.Add64(x204, x239, uint64(p521Uint1(x257)))
+ var x260 uint64
+ var x261 uint64
+ x260, x261 = bits.Add64(x206, x241, uint64(p521Uint1(x259)))
+ var x262 uint64
+ var x263 uint64
+ x262, x263 = bits.Add64(x208, x243, uint64(p521Uint1(x261)))
+ var x264 uint64
+ var x265 uint64
+ x264, x265 = bits.Add64(x210, x245, uint64(p521Uint1(x263)))
+ var x266 uint64
+ var x267 uint64
+ x267, x266 = bits.Mul64(x246, 0x1ff)
+ var x268 uint64
+ var x269 uint64
+ x269, x268 = bits.Mul64(x246, 0xffffffffffffffff)
+ var x270 uint64
+ var x271 uint64
+ x271, x270 = bits.Mul64(x246, 0xffffffffffffffff)
+ var x272 uint64
+ var x273 uint64
+ x273, x272 = bits.Mul64(x246, 0xffffffffffffffff)
+ var x274 uint64
+ var x275 uint64
+ x275, x274 = bits.Mul64(x246, 0xffffffffffffffff)
+ var x276 uint64
+ var x277 uint64
+ x277, x276 = bits.Mul64(x246, 0xffffffffffffffff)
+ var x278 uint64
+ var x279 uint64
+ x279, x278 = bits.Mul64(x246, 0xffffffffffffffff)
+ var x280 uint64
+ var x281 uint64
+ x281, x280 = bits.Mul64(x246, 0xffffffffffffffff)
+ var x282 uint64
+ var x283 uint64
+ x283, x282 = bits.Mul64(x246, 0xffffffffffffffff)
+ var x284 uint64
+ var x285 uint64
+ x284, x285 = bits.Add64(x283, x280, uint64(0x0))
+ var x286 uint64
+ var x287 uint64
+ x286, x287 = bits.Add64(x281, x278, uint64(p521Uint1(x285)))
+ var x288 uint64
+ var x289 uint64
+ x288, x289 = bits.Add64(x279, x276, uint64(p521Uint1(x287)))
+ var x290 uint64
+ var x291 uint64
+ x290, x291 = bits.Add64(x277, x274, uint64(p521Uint1(x289)))
+ var x292 uint64
+ var x293 uint64
+ x292, x293 = bits.Add64(x275, x272, uint64(p521Uint1(x291)))
+ var x294 uint64
+ var x295 uint64
+ x294, x295 = bits.Add64(x273, x270, uint64(p521Uint1(x293)))
+ var x296 uint64
+ var x297 uint64
+ x296, x297 = bits.Add64(x271, x268, uint64(p521Uint1(x295)))
+ var x298 uint64
+ var x299 uint64
+ x298, x299 = bits.Add64(x269, x266, uint64(p521Uint1(x297)))
+ x300 := (uint64(p521Uint1(x299)) + x267)
+ var x302 uint64
+ _, x302 = bits.Add64(x246, x282, uint64(0x0))
+ var x303 uint64
+ var x304 uint64
+ x303, x304 = bits.Add64(x248, x284, uint64(p521Uint1(x302)))
+ var x305 uint64
+ var x306 uint64
+ x305, x306 = bits.Add64(x250, x286, uint64(p521Uint1(x304)))
+ var x307 uint64
+ var x308 uint64
+ x307, x308 = bits.Add64(x252, x288, uint64(p521Uint1(x306)))
+ var x309 uint64
+ var x310 uint64
+ x309, x310 = bits.Add64(x254, x290, uint64(p521Uint1(x308)))
+ var x311 uint64
+ var x312 uint64
+ x311, x312 = bits.Add64(x256, x292, uint64(p521Uint1(x310)))
+ var x313 uint64
+ var x314 uint64
+ x313, x314 = bits.Add64(x258, x294, uint64(p521Uint1(x312)))
+ var x315 uint64
+ var x316 uint64
+ x315, x316 = bits.Add64(x260, x296, uint64(p521Uint1(x314)))
+ var x317 uint64
+ var x318 uint64
+ x317, x318 = bits.Add64(x262, x298, uint64(p521Uint1(x316)))
+ var x319 uint64
+ var x320 uint64
+ x319, x320 = bits.Add64(x264, x300, uint64(p521Uint1(x318)))
+ x321 := (uint64(p521Uint1(x320)) + uint64(p521Uint1(x265)))
+ var x322 uint64
+ var x323 uint64
+ x323, x322 = bits.Mul64(x3, arg2[8])
+ var x324 uint64
+ var x325 uint64
+ x325, x324 = bits.Mul64(x3, arg2[7])
+ var x326 uint64
+ var x327 uint64
+ x327, x326 = bits.Mul64(x3, arg2[6])
+ var x328 uint64
+ var x329 uint64
+ x329, x328 = bits.Mul64(x3, arg2[5])
+ var x330 uint64
+ var x331 uint64
+ x331, x330 = bits.Mul64(x3, arg2[4])
+ var x332 uint64
+ var x333 uint64
+ x333, x332 = bits.Mul64(x3, arg2[3])
+ var x334 uint64
+ var x335 uint64
+ x335, x334 = bits.Mul64(x3, arg2[2])
+ var x336 uint64
+ var x337 uint64
+ x337, x336 = bits.Mul64(x3, arg2[1])
+ var x338 uint64
+ var x339 uint64
+ x339, x338 = bits.Mul64(x3, arg2[0])
+ var x340 uint64
+ var x341 uint64
+ x340, x341 = bits.Add64(x339, x336, uint64(0x0))
+ var x342 uint64
+ var x343 uint64
+ x342, x343 = bits.Add64(x337, x334, uint64(p521Uint1(x341)))
+ var x344 uint64
+ var x345 uint64
+ x344, x345 = bits.Add64(x335, x332, uint64(p521Uint1(x343)))
+ var x346 uint64
+ var x347 uint64
+ x346, x347 = bits.Add64(x333, x330, uint64(p521Uint1(x345)))
+ var x348 uint64
+ var x349 uint64
+ x348, x349 = bits.Add64(x331, x328, uint64(p521Uint1(x347)))
+ var x350 uint64
+ var x351 uint64
+ x350, x351 = bits.Add64(x329, x326, uint64(p521Uint1(x349)))
+ var x352 uint64
+ var x353 uint64
+ x352, x353 = bits.Add64(x327, x324, uint64(p521Uint1(x351)))
+ var x354 uint64
+ var x355 uint64
+ x354, x355 = bits.Add64(x325, x322, uint64(p521Uint1(x353)))
+ x356 := (uint64(p521Uint1(x355)) + x323)
+ var x357 uint64
+ var x358 uint64
+ x357, x358 = bits.Add64(x303, x338, uint64(0x0))
+ var x359 uint64
+ var x360 uint64
+ x359, x360 = bits.Add64(x305, x340, uint64(p521Uint1(x358)))
+ var x361 uint64
+ var x362 uint64
+ x361, x362 = bits.Add64(x307, x342, uint64(p521Uint1(x360)))
+ var x363 uint64
+ var x364 uint64
+ x363, x364 = bits.Add64(x309, x344, uint64(p521Uint1(x362)))
+ var x365 uint64
+ var x366 uint64
+ x365, x366 = bits.Add64(x311, x346, uint64(p521Uint1(x364)))
+ var x367 uint64
+ var x368 uint64
+ x367, x368 = bits.Add64(x313, x348, uint64(p521Uint1(x366)))
+ var x369 uint64
+ var x370 uint64
+ x369, x370 = bits.Add64(x315, x350, uint64(p521Uint1(x368)))
+ var x371 uint64
+ var x372 uint64
+ x371, x372 = bits.Add64(x317, x352, uint64(p521Uint1(x370)))
+ var x373 uint64
+ var x374 uint64
+ x373, x374 = bits.Add64(x319, x354, uint64(p521Uint1(x372)))
+ var x375 uint64
+ var x376 uint64
+ x375, x376 = bits.Add64(x321, x356, uint64(p521Uint1(x374)))
+ var x377 uint64
+ var x378 uint64
+ x378, x377 = bits.Mul64(x357, 0x1ff)
+ var x379 uint64
+ var x380 uint64
+ x380, x379 = bits.Mul64(x357, 0xffffffffffffffff)
+ var x381 uint64
+ var x382 uint64
+ x382, x381 = bits.Mul64(x357, 0xffffffffffffffff)
+ var x383 uint64
+ var x384 uint64
+ x384, x383 = bits.Mul64(x357, 0xffffffffffffffff)
+ var x385 uint64
+ var x386 uint64
+ x386, x385 = bits.Mul64(x357, 0xffffffffffffffff)
+ var x387 uint64
+ var x388 uint64
+ x388, x387 = bits.Mul64(x357, 0xffffffffffffffff)
+ var x389 uint64
+ var x390 uint64
+ x390, x389 = bits.Mul64(x357, 0xffffffffffffffff)
+ var x391 uint64
+ var x392 uint64
+ x392, x391 = bits.Mul64(x357, 0xffffffffffffffff)
+ var x393 uint64
+ var x394 uint64
+ x394, x393 = bits.Mul64(x357, 0xffffffffffffffff)
+ var x395 uint64
+ var x396 uint64
+ x395, x396 = bits.Add64(x394, x391, uint64(0x0))
+ var x397 uint64
+ var x398 uint64
+ x397, x398 = bits.Add64(x392, x389, uint64(p521Uint1(x396)))
+ var x399 uint64
+ var x400 uint64
+ x399, x400 = bits.Add64(x390, x387, uint64(p521Uint1(x398)))
+ var x401 uint64
+ var x402 uint64
+ x401, x402 = bits.Add64(x388, x385, uint64(p521Uint1(x400)))
+ var x403 uint64
+ var x404 uint64
+ x403, x404 = bits.Add64(x386, x383, uint64(p521Uint1(x402)))
+ var x405 uint64
+ var x406 uint64
+ x405, x406 = bits.Add64(x384, x381, uint64(p521Uint1(x404)))
+ var x407 uint64
+ var x408 uint64
+ x407, x408 = bits.Add64(x382, x379, uint64(p521Uint1(x406)))
+ var x409 uint64
+ var x410 uint64
+ x409, x410 = bits.Add64(x380, x377, uint64(p521Uint1(x408)))
+ x411 := (uint64(p521Uint1(x410)) + x378)
+ var x413 uint64
+ _, x413 = bits.Add64(x357, x393, uint64(0x0))
+ var x414 uint64
+ var x415 uint64
+ x414, x415 = bits.Add64(x359, x395, uint64(p521Uint1(x413)))
+ var x416 uint64
+ var x417 uint64
+ x416, x417 = bits.Add64(x361, x397, uint64(p521Uint1(x415)))
+ var x418 uint64
+ var x419 uint64
+ x418, x419 = bits.Add64(x363, x399, uint64(p521Uint1(x417)))
+ var x420 uint64
+ var x421 uint64
+ x420, x421 = bits.Add64(x365, x401, uint64(p521Uint1(x419)))
+ var x422 uint64
+ var x423 uint64
+ x422, x423 = bits.Add64(x367, x403, uint64(p521Uint1(x421)))
+ var x424 uint64
+ var x425 uint64
+ x424, x425 = bits.Add64(x369, x405, uint64(p521Uint1(x423)))
+ var x426 uint64
+ var x427 uint64
+ x426, x427 = bits.Add64(x371, x407, uint64(p521Uint1(x425)))
+ var x428 uint64
+ var x429 uint64
+ x428, x429 = bits.Add64(x373, x409, uint64(p521Uint1(x427)))
+ var x430 uint64
+ var x431 uint64
+ x430, x431 = bits.Add64(x375, x411, uint64(p521Uint1(x429)))
+ x432 := (uint64(p521Uint1(x431)) + uint64(p521Uint1(x376)))
+ var x433 uint64
+ var x434 uint64
+ x434, x433 = bits.Mul64(x4, arg2[8])
+ var x435 uint64
+ var x436 uint64
+ x436, x435 = bits.Mul64(x4, arg2[7])
+ var x437 uint64
+ var x438 uint64
+ x438, x437 = bits.Mul64(x4, arg2[6])
+ var x439 uint64
+ var x440 uint64
+ x440, x439 = bits.Mul64(x4, arg2[5])
+ var x441 uint64
+ var x442 uint64
+ x442, x441 = bits.Mul64(x4, arg2[4])
+ var x443 uint64
+ var x444 uint64
+ x444, x443 = bits.Mul64(x4, arg2[3])
+ var x445 uint64
+ var x446 uint64
+ x446, x445 = bits.Mul64(x4, arg2[2])
+ var x447 uint64
+ var x448 uint64
+ x448, x447 = bits.Mul64(x4, arg2[1])
+ var x449 uint64
+ var x450 uint64
+ x450, x449 = bits.Mul64(x4, arg2[0])
+ var x451 uint64
+ var x452 uint64
+ x451, x452 = bits.Add64(x450, x447, uint64(0x0))
+ var x453 uint64
+ var x454 uint64
+ x453, x454 = bits.Add64(x448, x445, uint64(p521Uint1(x452)))
+ var x455 uint64
+ var x456 uint64
+ x455, x456 = bits.Add64(x446, x443, uint64(p521Uint1(x454)))
+ var x457 uint64
+ var x458 uint64
+ x457, x458 = bits.Add64(x444, x441, uint64(p521Uint1(x456)))
+ var x459 uint64
+ var x460 uint64
+ x459, x460 = bits.Add64(x442, x439, uint64(p521Uint1(x458)))
+ var x461 uint64
+ var x462 uint64
+ x461, x462 = bits.Add64(x440, x437, uint64(p521Uint1(x460)))
+ var x463 uint64
+ var x464 uint64
+ x463, x464 = bits.Add64(x438, x435, uint64(p521Uint1(x462)))
+ var x465 uint64
+ var x466 uint64
+ x465, x466 = bits.Add64(x436, x433, uint64(p521Uint1(x464)))
+ x467 := (uint64(p521Uint1(x466)) + x434)
+ var x468 uint64
+ var x469 uint64
+ x468, x469 = bits.Add64(x414, x449, uint64(0x0))
+ var x470 uint64
+ var x471 uint64
+ x470, x471 = bits.Add64(x416, x451, uint64(p521Uint1(x469)))
+ var x472 uint64
+ var x473 uint64
+ x472, x473 = bits.Add64(x418, x453, uint64(p521Uint1(x471)))
+ var x474 uint64
+ var x475 uint64
+ x474, x475 = bits.Add64(x420, x455, uint64(p521Uint1(x473)))
+ var x476 uint64
+ var x477 uint64
+ x476, x477 = bits.Add64(x422, x457, uint64(p521Uint1(x475)))
+ var x478 uint64
+ var x479 uint64
+ x478, x479 = bits.Add64(x424, x459, uint64(p521Uint1(x477)))
+ var x480 uint64
+ var x481 uint64
+ x480, x481 = bits.Add64(x426, x461, uint64(p521Uint1(x479)))
+ var x482 uint64
+ var x483 uint64
+ x482, x483 = bits.Add64(x428, x463, uint64(p521Uint1(x481)))
+ var x484 uint64
+ var x485 uint64
+ x484, x485 = bits.Add64(x430, x465, uint64(p521Uint1(x483)))
+ var x486 uint64
+ var x487 uint64
+ x486, x487 = bits.Add64(x432, x467, uint64(p521Uint1(x485)))
+ var x488 uint64
+ var x489 uint64
+ x489, x488 = bits.Mul64(x468, 0x1ff)
+ var x490 uint64
+ var x491 uint64
+ x491, x490 = bits.Mul64(x468, 0xffffffffffffffff)
+ var x492 uint64
+ var x493 uint64
+ x493, x492 = bits.Mul64(x468, 0xffffffffffffffff)
+ var x494 uint64
+ var x495 uint64
+ x495, x494 = bits.Mul64(x468, 0xffffffffffffffff)
+ var x496 uint64
+ var x497 uint64
+ x497, x496 = bits.Mul64(x468, 0xffffffffffffffff)
+ var x498 uint64
+ var x499 uint64
+ x499, x498 = bits.Mul64(x468, 0xffffffffffffffff)
+ var x500 uint64
+ var x501 uint64
+ x501, x500 = bits.Mul64(x468, 0xffffffffffffffff)
+ var x502 uint64
+ var x503 uint64
+ x503, x502 = bits.Mul64(x468, 0xffffffffffffffff)
+ var x504 uint64
+ var x505 uint64
+ x505, x504 = bits.Mul64(x468, 0xffffffffffffffff)
+ var x506 uint64
+ var x507 uint64
+ x506, x507 = bits.Add64(x505, x502, uint64(0x0))
+ var x508 uint64
+ var x509 uint64
+ x508, x509 = bits.Add64(x503, x500, uint64(p521Uint1(x507)))
+ var x510 uint64
+ var x511 uint64
+ x510, x511 = bits.Add64(x501, x498, uint64(p521Uint1(x509)))
+ var x512 uint64
+ var x513 uint64
+ x512, x513 = bits.Add64(x499, x496, uint64(p521Uint1(x511)))
+ var x514 uint64
+ var x515 uint64
+ x514, x515 = bits.Add64(x497, x494, uint64(p521Uint1(x513)))
+ var x516 uint64
+ var x517 uint64
+ x516, x517 = bits.Add64(x495, x492, uint64(p521Uint1(x515)))
+ var x518 uint64
+ var x519 uint64
+ x518, x519 = bits.Add64(x493, x490, uint64(p521Uint1(x517)))
+ var x520 uint64
+ var x521 uint64
+ x520, x521 = bits.Add64(x491, x488, uint64(p521Uint1(x519)))
+ x522 := (uint64(p521Uint1(x521)) + x489)
+ var x524 uint64
+ _, x524 = bits.Add64(x468, x504, uint64(0x0))
+ var x525 uint64
+ var x526 uint64
+ x525, x526 = bits.Add64(x470, x506, uint64(p521Uint1(x524)))
+ var x527 uint64
+ var x528 uint64
+ x527, x528 = bits.Add64(x472, x508, uint64(p521Uint1(x526)))
+ var x529 uint64
+ var x530 uint64
+ x529, x530 = bits.Add64(x474, x510, uint64(p521Uint1(x528)))
+ var x531 uint64
+ var x532 uint64
+ x531, x532 = bits.Add64(x476, x512, uint64(p521Uint1(x530)))
+ var x533 uint64
+ var x534 uint64
+ x533, x534 = bits.Add64(x478, x514, uint64(p521Uint1(x532)))
+ var x535 uint64
+ var x536 uint64
+ x535, x536 = bits.Add64(x480, x516, uint64(p521Uint1(x534)))
+ var x537 uint64
+ var x538 uint64
+ x537, x538 = bits.Add64(x482, x518, uint64(p521Uint1(x536)))
+ var x539 uint64
+ var x540 uint64
+ x539, x540 = bits.Add64(x484, x520, uint64(p521Uint1(x538)))
+ var x541 uint64
+ var x542 uint64
+ x541, x542 = bits.Add64(x486, x522, uint64(p521Uint1(x540)))
+ x543 := (uint64(p521Uint1(x542)) + uint64(p521Uint1(x487)))
+ var x544 uint64
+ var x545 uint64
+ x545, x544 = bits.Mul64(x5, arg2[8])
+ var x546 uint64
+ var x547 uint64
+ x547, x546 = bits.Mul64(x5, arg2[7])
+ var x548 uint64
+ var x549 uint64
+ x549, x548 = bits.Mul64(x5, arg2[6])
+ var x550 uint64
+ var x551 uint64
+ x551, x550 = bits.Mul64(x5, arg2[5])
+ var x552 uint64
+ var x553 uint64
+ x553, x552 = bits.Mul64(x5, arg2[4])
+ var x554 uint64
+ var x555 uint64
+ x555, x554 = bits.Mul64(x5, arg2[3])
+ var x556 uint64
+ var x557 uint64
+ x557, x556 = bits.Mul64(x5, arg2[2])
+ var x558 uint64
+ var x559 uint64
+ x559, x558 = bits.Mul64(x5, arg2[1])
+ var x560 uint64
+ var x561 uint64
+ x561, x560 = bits.Mul64(x5, arg2[0])
+ var x562 uint64
+ var x563 uint64
+ x562, x563 = bits.Add64(x561, x558, uint64(0x0))
+ var x564 uint64
+ var x565 uint64
+ x564, x565 = bits.Add64(x559, x556, uint64(p521Uint1(x563)))
+ var x566 uint64
+ var x567 uint64
+ x566, x567 = bits.Add64(x557, x554, uint64(p521Uint1(x565)))
+ var x568 uint64
+ var x569 uint64
+ x568, x569 = bits.Add64(x555, x552, uint64(p521Uint1(x567)))
+ var x570 uint64
+ var x571 uint64
+ x570, x571 = bits.Add64(x553, x550, uint64(p521Uint1(x569)))
+ var x572 uint64
+ var x573 uint64
+ x572, x573 = bits.Add64(x551, x548, uint64(p521Uint1(x571)))
+ var x574 uint64
+ var x575 uint64
+ x574, x575 = bits.Add64(x549, x546, uint64(p521Uint1(x573)))
+ var x576 uint64
+ var x577 uint64
+ x576, x577 = bits.Add64(x547, x544, uint64(p521Uint1(x575)))
+ x578 := (uint64(p521Uint1(x577)) + x545)
+ var x579 uint64
+ var x580 uint64
+ x579, x580 = bits.Add64(x525, x560, uint64(0x0))
+ var x581 uint64
+ var x582 uint64
+ x581, x582 = bits.Add64(x527, x562, uint64(p521Uint1(x580)))
+ var x583 uint64
+ var x584 uint64
+ x583, x584 = bits.Add64(x529, x564, uint64(p521Uint1(x582)))
+ var x585 uint64
+ var x586 uint64
+ x585, x586 = bits.Add64(x531, x566, uint64(p521Uint1(x584)))
+ var x587 uint64
+ var x588 uint64
+ x587, x588 = bits.Add64(x533, x568, uint64(p521Uint1(x586)))
+ var x589 uint64
+ var x590 uint64
+ x589, x590 = bits.Add64(x535, x570, uint64(p521Uint1(x588)))
+ var x591 uint64
+ var x592 uint64
+ x591, x592 = bits.Add64(x537, x572, uint64(p521Uint1(x590)))
+ var x593 uint64
+ var x594 uint64
+ x593, x594 = bits.Add64(x539, x574, uint64(p521Uint1(x592)))
+ var x595 uint64
+ var x596 uint64
+ x595, x596 = bits.Add64(x541, x576, uint64(p521Uint1(x594)))
+ var x597 uint64
+ var x598 uint64
+ x597, x598 = bits.Add64(x543, x578, uint64(p521Uint1(x596)))
+ var x599 uint64
+ var x600 uint64
+ x600, x599 = bits.Mul64(x579, 0x1ff)
+ var x601 uint64
+ var x602 uint64
+ x602, x601 = bits.Mul64(x579, 0xffffffffffffffff)
+ var x603 uint64
+ var x604 uint64
+ x604, x603 = bits.Mul64(x579, 0xffffffffffffffff)
+ var x605 uint64
+ var x606 uint64
+ x606, x605 = bits.Mul64(x579, 0xffffffffffffffff)
+ var x607 uint64
+ var x608 uint64
+ x608, x607 = bits.Mul64(x579, 0xffffffffffffffff)
+ var x609 uint64
+ var x610 uint64
+ x610, x609 = bits.Mul64(x579, 0xffffffffffffffff)
+ var x611 uint64
+ var x612 uint64
+ x612, x611 = bits.Mul64(x579, 0xffffffffffffffff)
+ var x613 uint64
+ var x614 uint64
+ x614, x613 = bits.Mul64(x579, 0xffffffffffffffff)
+ var x615 uint64
+ var x616 uint64
+ x616, x615 = bits.Mul64(x579, 0xffffffffffffffff)
+ var x617 uint64
+ var x618 uint64
+ x617, x618 = bits.Add64(x616, x613, uint64(0x0))
+ var x619 uint64
+ var x620 uint64
+ x619, x620 = bits.Add64(x614, x611, uint64(p521Uint1(x618)))
+ var x621 uint64
+ var x622 uint64
+ x621, x622 = bits.Add64(x612, x609, uint64(p521Uint1(x620)))
+ var x623 uint64
+ var x624 uint64
+ x623, x624 = bits.Add64(x610, x607, uint64(p521Uint1(x622)))
+ var x625 uint64
+ var x626 uint64
+ x625, x626 = bits.Add64(x608, x605, uint64(p521Uint1(x624)))
+ var x627 uint64
+ var x628 uint64
+ x627, x628 = bits.Add64(x606, x603, uint64(p521Uint1(x626)))
+ var x629 uint64
+ var x630 uint64
+ x629, x630 = bits.Add64(x604, x601, uint64(p521Uint1(x628)))
+ var x631 uint64
+ var x632 uint64
+ x631, x632 = bits.Add64(x602, x599, uint64(p521Uint1(x630)))
+ x633 := (uint64(p521Uint1(x632)) + x600)
+ var x635 uint64
+ _, x635 = bits.Add64(x579, x615, uint64(0x0))
+ var x636 uint64
+ var x637 uint64
+ x636, x637 = bits.Add64(x581, x617, uint64(p521Uint1(x635)))
+ var x638 uint64
+ var x639 uint64
+ x638, x639 = bits.Add64(x583, x619, uint64(p521Uint1(x637)))
+ var x640 uint64
+ var x641 uint64
+ x640, x641 = bits.Add64(x585, x621, uint64(p521Uint1(x639)))
+ var x642 uint64
+ var x643 uint64
+ x642, x643 = bits.Add64(x587, x623, uint64(p521Uint1(x641)))
+ var x644 uint64
+ var x645 uint64
+ x644, x645 = bits.Add64(x589, x625, uint64(p521Uint1(x643)))
+ var x646 uint64
+ var x647 uint64
+ x646, x647 = bits.Add64(x591, x627, uint64(p521Uint1(x645)))
+ var x648 uint64
+ var x649 uint64
+ x648, x649 = bits.Add64(x593, x629, uint64(p521Uint1(x647)))
+ var x650 uint64
+ var x651 uint64
+ x650, x651 = bits.Add64(x595, x631, uint64(p521Uint1(x649)))
+ var x652 uint64
+ var x653 uint64
+ x652, x653 = bits.Add64(x597, x633, uint64(p521Uint1(x651)))
+ x654 := (uint64(p521Uint1(x653)) + uint64(p521Uint1(x598)))
+ var x655 uint64
+ var x656 uint64
+ x656, x655 = bits.Mul64(x6, arg2[8])
+ var x657 uint64
+ var x658 uint64
+ x658, x657 = bits.Mul64(x6, arg2[7])
+ var x659 uint64
+ var x660 uint64
+ x660, x659 = bits.Mul64(x6, arg2[6])
+ var x661 uint64
+ var x662 uint64
+ x662, x661 = bits.Mul64(x6, arg2[5])
+ var x663 uint64
+ var x664 uint64
+ x664, x663 = bits.Mul64(x6, arg2[4])
+ var x665 uint64
+ var x666 uint64
+ x666, x665 = bits.Mul64(x6, arg2[3])
+ var x667 uint64
+ var x668 uint64
+ x668, x667 = bits.Mul64(x6, arg2[2])
+ var x669 uint64
+ var x670 uint64
+ x670, x669 = bits.Mul64(x6, arg2[1])
+ var x671 uint64
+ var x672 uint64
+ x672, x671 = bits.Mul64(x6, arg2[0])
+ var x673 uint64
+ var x674 uint64
+ x673, x674 = bits.Add64(x672, x669, uint64(0x0))
+ var x675 uint64
+ var x676 uint64
+ x675, x676 = bits.Add64(x670, x667, uint64(p521Uint1(x674)))
+ var x677 uint64
+ var x678 uint64
+ x677, x678 = bits.Add64(x668, x665, uint64(p521Uint1(x676)))
+ var x679 uint64
+ var x680 uint64
+ x679, x680 = bits.Add64(x666, x663, uint64(p521Uint1(x678)))
+ var x681 uint64
+ var x682 uint64
+ x681, x682 = bits.Add64(x664, x661, uint64(p521Uint1(x680)))
+ var x683 uint64
+ var x684 uint64
+ x683, x684 = bits.Add64(x662, x659, uint64(p521Uint1(x682)))
+ var x685 uint64
+ var x686 uint64
+ x685, x686 = bits.Add64(x660, x657, uint64(p521Uint1(x684)))
+ var x687 uint64
+ var x688 uint64
+ x687, x688 = bits.Add64(x658, x655, uint64(p521Uint1(x686)))
+ x689 := (uint64(p521Uint1(x688)) + x656)
+ var x690 uint64
+ var x691 uint64
+ x690, x691 = bits.Add64(x636, x671, uint64(0x0))
+ var x692 uint64
+ var x693 uint64
+ x692, x693 = bits.Add64(x638, x673, uint64(p521Uint1(x691)))
+ var x694 uint64
+ var x695 uint64
+ x694, x695 = bits.Add64(x640, x675, uint64(p521Uint1(x693)))
+ var x696 uint64
+ var x697 uint64
+ x696, x697 = bits.Add64(x642, x677, uint64(p521Uint1(x695)))
+ var x698 uint64
+ var x699 uint64
+ x698, x699 = bits.Add64(x644, x679, uint64(p521Uint1(x697)))
+ var x700 uint64
+ var x701 uint64
+ x700, x701 = bits.Add64(x646, x681, uint64(p521Uint1(x699)))
+ var x702 uint64
+ var x703 uint64
+ x702, x703 = bits.Add64(x648, x683, uint64(p521Uint1(x701)))
+ var x704 uint64
+ var x705 uint64
+ x704, x705 = bits.Add64(x650, x685, uint64(p521Uint1(x703)))
+ var x706 uint64
+ var x707 uint64
+ x706, x707 = bits.Add64(x652, x687, uint64(p521Uint1(x705)))
+ var x708 uint64
+ var x709 uint64
+ x708, x709 = bits.Add64(x654, x689, uint64(p521Uint1(x707)))
+ var x710 uint64
+ var x711 uint64
+ x711, x710 = bits.Mul64(x690, 0x1ff)
+ var x712 uint64
+ var x713 uint64
+ x713, x712 = bits.Mul64(x690, 0xffffffffffffffff)
+ var x714 uint64
+ var x715 uint64
+ x715, x714 = bits.Mul64(x690, 0xffffffffffffffff)
+ var x716 uint64
+ var x717 uint64
+ x717, x716 = bits.Mul64(x690, 0xffffffffffffffff)
+ var x718 uint64
+ var x719 uint64
+ x719, x718 = bits.Mul64(x690, 0xffffffffffffffff)
+ var x720 uint64
+ var x721 uint64
+ x721, x720 = bits.Mul64(x690, 0xffffffffffffffff)
+ var x722 uint64
+ var x723 uint64
+ x723, x722 = bits.Mul64(x690, 0xffffffffffffffff)
+ var x724 uint64
+ var x725 uint64
+ x725, x724 = bits.Mul64(x690, 0xffffffffffffffff)
+ var x726 uint64
+ var x727 uint64
+ x727, x726 = bits.Mul64(x690, 0xffffffffffffffff)
+ var x728 uint64
+ var x729 uint64
+ x728, x729 = bits.Add64(x727, x724, uint64(0x0))
+ var x730 uint64
+ var x731 uint64
+ x730, x731 = bits.Add64(x725, x722, uint64(p521Uint1(x729)))
+ var x732 uint64
+ var x733 uint64
+ x732, x733 = bits.Add64(x723, x720, uint64(p521Uint1(x731)))
+ var x734 uint64
+ var x735 uint64
+ x734, x735 = bits.Add64(x721, x718, uint64(p521Uint1(x733)))
+ var x736 uint64
+ var x737 uint64
+ x736, x737 = bits.Add64(x719, x716, uint64(p521Uint1(x735)))
+ var x738 uint64
+ var x739 uint64
+ x738, x739 = bits.Add64(x717, x714, uint64(p521Uint1(x737)))
+ var x740 uint64
+ var x741 uint64
+ x740, x741 = bits.Add64(x715, x712, uint64(p521Uint1(x739)))
+ var x742 uint64
+ var x743 uint64
+ x742, x743 = bits.Add64(x713, x710, uint64(p521Uint1(x741)))
+ x744 := (uint64(p521Uint1(x743)) + x711)
+ var x746 uint64
+ _, x746 = bits.Add64(x690, x726, uint64(0x0))
+ var x747 uint64
+ var x748 uint64
+ x747, x748 = bits.Add64(x692, x728, uint64(p521Uint1(x746)))
+ var x749 uint64
+ var x750 uint64
+ x749, x750 = bits.Add64(x694, x730, uint64(p521Uint1(x748)))
+ var x751 uint64
+ var x752 uint64
+ x751, x752 = bits.Add64(x696, x732, uint64(p521Uint1(x750)))
+ var x753 uint64
+ var x754 uint64
+ x753, x754 = bits.Add64(x698, x734, uint64(p521Uint1(x752)))
+ var x755 uint64
+ var x756 uint64
+ x755, x756 = bits.Add64(x700, x736, uint64(p521Uint1(x754)))
+ var x757 uint64
+ var x758 uint64
+ x757, x758 = bits.Add64(x702, x738, uint64(p521Uint1(x756)))
+ var x759 uint64
+ var x760 uint64
+ x759, x760 = bits.Add64(x704, x740, uint64(p521Uint1(x758)))
+ var x761 uint64
+ var x762 uint64
+ x761, x762 = bits.Add64(x706, x742, uint64(p521Uint1(x760)))
+ var x763 uint64
+ var x764 uint64
+ x763, x764 = bits.Add64(x708, x744, uint64(p521Uint1(x762)))
+ x765 := (uint64(p521Uint1(x764)) + uint64(p521Uint1(x709)))
+ var x766 uint64
+ var x767 uint64
+ x767, x766 = bits.Mul64(x7, arg2[8])
+ var x768 uint64
+ var x769 uint64
+ x769, x768 = bits.Mul64(x7, arg2[7])
+ var x770 uint64
+ var x771 uint64
+ x771, x770 = bits.Mul64(x7, arg2[6])
+ var x772 uint64
+ var x773 uint64
+ x773, x772 = bits.Mul64(x7, arg2[5])
+ var x774 uint64
+ var x775 uint64
+ x775, x774 = bits.Mul64(x7, arg2[4])
+ var x776 uint64
+ var x777 uint64
+ x777, x776 = bits.Mul64(x7, arg2[3])
+ var x778 uint64
+ var x779 uint64
+ x779, x778 = bits.Mul64(x7, arg2[2])
+ var x780 uint64
+ var x781 uint64
+ x781, x780 = bits.Mul64(x7, arg2[1])
+ var x782 uint64
+ var x783 uint64
+ x783, x782 = bits.Mul64(x7, arg2[0])
+ var x784 uint64
+ var x785 uint64
+ x784, x785 = bits.Add64(x783, x780, uint64(0x0))
+ var x786 uint64
+ var x787 uint64
+ x786, x787 = bits.Add64(x781, x778, uint64(p521Uint1(x785)))
+ var x788 uint64
+ var x789 uint64
+ x788, x789 = bits.Add64(x779, x776, uint64(p521Uint1(x787)))
+ var x790 uint64
+ var x791 uint64
+ x790, x791 = bits.Add64(x777, x774, uint64(p521Uint1(x789)))
+ var x792 uint64
+ var x793 uint64
+ x792, x793 = bits.Add64(x775, x772, uint64(p521Uint1(x791)))
+ var x794 uint64
+ var x795 uint64
+ x794, x795 = bits.Add64(x773, x770, uint64(p521Uint1(x793)))
+ var x796 uint64
+ var x797 uint64
+ x796, x797 = bits.Add64(x771, x768, uint64(p521Uint1(x795)))
+ var x798 uint64
+ var x799 uint64
+ x798, x799 = bits.Add64(x769, x766, uint64(p521Uint1(x797)))
+ x800 := (uint64(p521Uint1(x799)) + x767)
+ var x801 uint64
+ var x802 uint64
+ x801, x802 = bits.Add64(x747, x782, uint64(0x0))
+ var x803 uint64
+ var x804 uint64
+ x803, x804 = bits.Add64(x749, x784, uint64(p521Uint1(x802)))
+ var x805 uint64
+ var x806 uint64
+ x805, x806 = bits.Add64(x751, x786, uint64(p521Uint1(x804)))
+ var x807 uint64
+ var x808 uint64
+ x807, x808 = bits.Add64(x753, x788, uint64(p521Uint1(x806)))
+ var x809 uint64
+ var x810 uint64
+ x809, x810 = bits.Add64(x755, x790, uint64(p521Uint1(x808)))
+ var x811 uint64
+ var x812 uint64
+ x811, x812 = bits.Add64(x757, x792, uint64(p521Uint1(x810)))
+ var x813 uint64
+ var x814 uint64
+ x813, x814 = bits.Add64(x759, x794, uint64(p521Uint1(x812)))
+ var x815 uint64
+ var x816 uint64
+ x815, x816 = bits.Add64(x761, x796, uint64(p521Uint1(x814)))
+ var x817 uint64
+ var x818 uint64
+ x817, x818 = bits.Add64(x763, x798, uint64(p521Uint1(x816)))
+ var x819 uint64
+ var x820 uint64
+ x819, x820 = bits.Add64(x765, x800, uint64(p521Uint1(x818)))
+ var x821 uint64
+ var x822 uint64
+ x822, x821 = bits.Mul64(x801, 0x1ff)
+ var x823 uint64
+ var x824 uint64
+ x824, x823 = bits.Mul64(x801, 0xffffffffffffffff)
+ var x825 uint64
+ var x826 uint64
+ x826, x825 = bits.Mul64(x801, 0xffffffffffffffff)
+ var x827 uint64
+ var x828 uint64
+ x828, x827 = bits.Mul64(x801, 0xffffffffffffffff)
+ var x829 uint64
+ var x830 uint64
+ x830, x829 = bits.Mul64(x801, 0xffffffffffffffff)
+ var x831 uint64
+ var x832 uint64
+ x832, x831 = bits.Mul64(x801, 0xffffffffffffffff)
+ var x833 uint64
+ var x834 uint64
+ x834, x833 = bits.Mul64(x801, 0xffffffffffffffff)
+ var x835 uint64
+ var x836 uint64
+ x836, x835 = bits.Mul64(x801, 0xffffffffffffffff)
+ var x837 uint64
+ var x838 uint64
+ x838, x837 = bits.Mul64(x801, 0xffffffffffffffff)
+ var x839 uint64
+ var x840 uint64
+ x839, x840 = bits.Add64(x838, x835, uint64(0x0))
+ var x841 uint64
+ var x842 uint64
+ x841, x842 = bits.Add64(x836, x833, uint64(p521Uint1(x840)))
+ var x843 uint64
+ var x844 uint64
+ x843, x844 = bits.Add64(x834, x831, uint64(p521Uint1(x842)))
+ var x845 uint64
+ var x846 uint64
+ x845, x846 = bits.Add64(x832, x829, uint64(p521Uint1(x844)))
+ var x847 uint64
+ var x848 uint64
+ x847, x848 = bits.Add64(x830, x827, uint64(p521Uint1(x846)))
+ var x849 uint64
+ var x850 uint64
+ x849, x850 = bits.Add64(x828, x825, uint64(p521Uint1(x848)))
+ var x851 uint64
+ var x852 uint64
+ x851, x852 = bits.Add64(x826, x823, uint64(p521Uint1(x850)))
+ var x853 uint64
+ var x854 uint64
+ x853, x854 = bits.Add64(x824, x821, uint64(p521Uint1(x852)))
+ x855 := (uint64(p521Uint1(x854)) + x822)
+ var x857 uint64
+ _, x857 = bits.Add64(x801, x837, uint64(0x0))
+ var x858 uint64
+ var x859 uint64
+ x858, x859 = bits.Add64(x803, x839, uint64(p521Uint1(x857)))
+ var x860 uint64
+ var x861 uint64
+ x860, x861 = bits.Add64(x805, x841, uint64(p521Uint1(x859)))
+ var x862 uint64
+ var x863 uint64
+ x862, x863 = bits.Add64(x807, x843, uint64(p521Uint1(x861)))
+ var x864 uint64
+ var x865 uint64
+ x864, x865 = bits.Add64(x809, x845, uint64(p521Uint1(x863)))
+ var x866 uint64
+ var x867 uint64
+ x866, x867 = bits.Add64(x811, x847, uint64(p521Uint1(x865)))
+ var x868 uint64
+ var x869 uint64
+ x868, x869 = bits.Add64(x813, x849, uint64(p521Uint1(x867)))
+ var x870 uint64
+ var x871 uint64
+ x870, x871 = bits.Add64(x815, x851, uint64(p521Uint1(x869)))
+ var x872 uint64
+ var x873 uint64
+ x872, x873 = bits.Add64(x817, x853, uint64(p521Uint1(x871)))
+ var x874 uint64
+ var x875 uint64
+ x874, x875 = bits.Add64(x819, x855, uint64(p521Uint1(x873)))
+ x876 := (uint64(p521Uint1(x875)) + uint64(p521Uint1(x820)))
+ var x877 uint64
+ var x878 uint64
+ x878, x877 = bits.Mul64(x8, arg2[8])
+ var x879 uint64
+ var x880 uint64
+ x880, x879 = bits.Mul64(x8, arg2[7])
+ var x881 uint64
+ var x882 uint64
+ x882, x881 = bits.Mul64(x8, arg2[6])
+ var x883 uint64
+ var x884 uint64
+ x884, x883 = bits.Mul64(x8, arg2[5])
+ var x885 uint64
+ var x886 uint64
+ x886, x885 = bits.Mul64(x8, arg2[4])
+ var x887 uint64
+ var x888 uint64
+ x888, x887 = bits.Mul64(x8, arg2[3])
+ var x889 uint64
+ var x890 uint64
+ x890, x889 = bits.Mul64(x8, arg2[2])
+ var x891 uint64
+ var x892 uint64
+ x892, x891 = bits.Mul64(x8, arg2[1])
+ var x893 uint64
+ var x894 uint64
+ x894, x893 = bits.Mul64(x8, arg2[0])
+ var x895 uint64
+ var x896 uint64
+ x895, x896 = bits.Add64(x894, x891, uint64(0x0))
+ var x897 uint64
+ var x898 uint64
+ x897, x898 = bits.Add64(x892, x889, uint64(p521Uint1(x896)))
+ var x899 uint64
+ var x900 uint64
+ x899, x900 = bits.Add64(x890, x887, uint64(p521Uint1(x898)))
+ var x901 uint64
+ var x902 uint64
+ x901, x902 = bits.Add64(x888, x885, uint64(p521Uint1(x900)))
+ var x903 uint64
+ var x904 uint64
+ x903, x904 = bits.Add64(x886, x883, uint64(p521Uint1(x902)))
+ var x905 uint64
+ var x906 uint64
+ x905, x906 = bits.Add64(x884, x881, uint64(p521Uint1(x904)))
+ var x907 uint64
+ var x908 uint64
+ x907, x908 = bits.Add64(x882, x879, uint64(p521Uint1(x906)))
+ var x909 uint64
+ var x910 uint64
+ x909, x910 = bits.Add64(x880, x877, uint64(p521Uint1(x908)))
+ x911 := (uint64(p521Uint1(x910)) + x878)
+ var x912 uint64
+ var x913 uint64
+ x912, x913 = bits.Add64(x858, x893, uint64(0x0))
+ var x914 uint64
+ var x915 uint64
+ x914, x915 = bits.Add64(x860, x895, uint64(p521Uint1(x913)))
+ var x916 uint64
+ var x917 uint64
+ x916, x917 = bits.Add64(x862, x897, uint64(p521Uint1(x915)))
+ var x918 uint64
+ var x919 uint64
+ x918, x919 = bits.Add64(x864, x899, uint64(p521Uint1(x917)))
+ var x920 uint64
+ var x921 uint64
+ x920, x921 = bits.Add64(x866, x901, uint64(p521Uint1(x919)))
+ var x922 uint64
+ var x923 uint64
+ x922, x923 = bits.Add64(x868, x903, uint64(p521Uint1(x921)))
+ var x924 uint64
+ var x925 uint64
+ x924, x925 = bits.Add64(x870, x905, uint64(p521Uint1(x923)))
+ var x926 uint64
+ var x927 uint64
+ x926, x927 = bits.Add64(x872, x907, uint64(p521Uint1(x925)))
+ var x928 uint64
+ var x929 uint64
+ x928, x929 = bits.Add64(x874, x909, uint64(p521Uint1(x927)))
+ var x930 uint64
+ var x931 uint64
+ x930, x931 = bits.Add64(x876, x911, uint64(p521Uint1(x929)))
+ var x932 uint64
+ var x933 uint64
+ x933, x932 = bits.Mul64(x912, 0x1ff)
+ var x934 uint64
+ var x935 uint64
+ x935, x934 = bits.Mul64(x912, 0xffffffffffffffff)
+ var x936 uint64
+ var x937 uint64
+ x937, x936 = bits.Mul64(x912, 0xffffffffffffffff)
+ var x938 uint64
+ var x939 uint64
+ x939, x938 = bits.Mul64(x912, 0xffffffffffffffff)
+ var x940 uint64
+ var x941 uint64
+ x941, x940 = bits.Mul64(x912, 0xffffffffffffffff)
+ var x942 uint64
+ var x943 uint64
+ x943, x942 = bits.Mul64(x912, 0xffffffffffffffff)
+ var x944 uint64
+ var x945 uint64
+ x945, x944 = bits.Mul64(x912, 0xffffffffffffffff)
+ var x946 uint64
+ var x947 uint64
+ x947, x946 = bits.Mul64(x912, 0xffffffffffffffff)
+ var x948 uint64
+ var x949 uint64
+ x949, x948 = bits.Mul64(x912, 0xffffffffffffffff)
+ var x950 uint64
+ var x951 uint64
+ x950, x951 = bits.Add64(x949, x946, uint64(0x0))
+ var x952 uint64
+ var x953 uint64
+ x952, x953 = bits.Add64(x947, x944, uint64(p521Uint1(x951)))
+ var x954 uint64
+ var x955 uint64
+ x954, x955 = bits.Add64(x945, x942, uint64(p521Uint1(x953)))
+ var x956 uint64
+ var x957 uint64
+ x956, x957 = bits.Add64(x943, x940, uint64(p521Uint1(x955)))
+ var x958 uint64
+ var x959 uint64
+ x958, x959 = bits.Add64(x941, x938, uint64(p521Uint1(x957)))
+ var x960 uint64
+ var x961 uint64
+ x960, x961 = bits.Add64(x939, x936, uint64(p521Uint1(x959)))
+ var x962 uint64
+ var x963 uint64
+ x962, x963 = bits.Add64(x937, x934, uint64(p521Uint1(x961)))
+ var x964 uint64
+ var x965 uint64
+ x964, x965 = bits.Add64(x935, x932, uint64(p521Uint1(x963)))
+ x966 := (uint64(p521Uint1(x965)) + x933)
+ var x968 uint64
+ _, x968 = bits.Add64(x912, x948, uint64(0x0))
+ var x969 uint64
+ var x970 uint64
+ x969, x970 = bits.Add64(x914, x950, uint64(p521Uint1(x968)))
+ var x971 uint64
+ var x972 uint64
+ x971, x972 = bits.Add64(x916, x952, uint64(p521Uint1(x970)))
+ var x973 uint64
+ var x974 uint64
+ x973, x974 = bits.Add64(x918, x954, uint64(p521Uint1(x972)))
+ var x975 uint64
+ var x976 uint64
+ x975, x976 = bits.Add64(x920, x956, uint64(p521Uint1(x974)))
+ var x977 uint64
+ var x978 uint64
+ x977, x978 = bits.Add64(x922, x958, uint64(p521Uint1(x976)))
+ var x979 uint64
+ var x980 uint64
+ x979, x980 = bits.Add64(x924, x960, uint64(p521Uint1(x978)))
+ var x981 uint64
+ var x982 uint64
+ x981, x982 = bits.Add64(x926, x962, uint64(p521Uint1(x980)))
+ var x983 uint64
+ var x984 uint64
+ x983, x984 = bits.Add64(x928, x964, uint64(p521Uint1(x982)))
+ var x985 uint64
+ var x986 uint64
+ x985, x986 = bits.Add64(x930, x966, uint64(p521Uint1(x984)))
+ x987 := (uint64(p521Uint1(x986)) + uint64(p521Uint1(x931)))
+ var x988 uint64
+ var x989 uint64
+ x988, x989 = bits.Sub64(x969, 0xffffffffffffffff, uint64(0x0))
+ var x990 uint64
+ var x991 uint64
+ x990, x991 = bits.Sub64(x971, 0xffffffffffffffff, uint64(p521Uint1(x989)))
+ var x992 uint64
+ var x993 uint64
+ x992, x993 = bits.Sub64(x973, 0xffffffffffffffff, uint64(p521Uint1(x991)))
+ var x994 uint64
+ var x995 uint64
+ x994, x995 = bits.Sub64(x975, 0xffffffffffffffff, uint64(p521Uint1(x993)))
+ var x996 uint64
+ var x997 uint64
+ x996, x997 = bits.Sub64(x977, 0xffffffffffffffff, uint64(p521Uint1(x995)))
+ var x998 uint64
+ var x999 uint64
+ x998, x999 = bits.Sub64(x979, 0xffffffffffffffff, uint64(p521Uint1(x997)))
+ var x1000 uint64
+ var x1001 uint64
+ x1000, x1001 = bits.Sub64(x981, 0xffffffffffffffff, uint64(p521Uint1(x999)))
+ var x1002 uint64
+ var x1003 uint64
+ x1002, x1003 = bits.Sub64(x983, 0xffffffffffffffff, uint64(p521Uint1(x1001)))
+ var x1004 uint64
+ var x1005 uint64
+ x1004, x1005 = bits.Sub64(x985, 0x1ff, uint64(p521Uint1(x1003)))
+ var x1007 uint64
+ _, x1007 = bits.Sub64(x987, uint64(0x0), uint64(p521Uint1(x1005)))
+ var x1008 uint64
+ p521CmovznzU64(&x1008, p521Uint1(x1007), x988, x969)
+ var x1009 uint64
+ p521CmovznzU64(&x1009, p521Uint1(x1007), x990, x971)
+ var x1010 uint64
+ p521CmovznzU64(&x1010, p521Uint1(x1007), x992, x973)
+ var x1011 uint64
+ p521CmovznzU64(&x1011, p521Uint1(x1007), x994, x975)
+ var x1012 uint64
+ p521CmovznzU64(&x1012, p521Uint1(x1007), x996, x977)
+ var x1013 uint64
+ p521CmovznzU64(&x1013, p521Uint1(x1007), x998, x979)
+ var x1014 uint64
+ p521CmovznzU64(&x1014, p521Uint1(x1007), x1000, x981)
+ var x1015 uint64
+ p521CmovznzU64(&x1015, p521Uint1(x1007), x1002, x983)
+ var x1016 uint64
+ p521CmovznzU64(&x1016, p521Uint1(x1007), x1004, x985)
+ out1[0] = x1008
+ out1[1] = x1009
+ out1[2] = x1010
+ out1[3] = x1011
+ out1[4] = x1012
+ out1[5] = x1013
+ out1[6] = x1014
+ out1[7] = x1015
+ out1[8] = x1016
+}
+
+// p521Square squares a field element in the Montgomery domain.
+//
+// Preconditions:
+//
+// 0 ≤ eval arg1 < m
+//
+// Postconditions:
+//
+// eval (from_montgomery out1) mod m = (eval (from_montgomery arg1) * eval (from_montgomery arg1)) mod m
+// 0 ≤ eval out1 < m
+func p521Square(out1 *p521MontgomeryDomainFieldElement, arg1 *p521MontgomeryDomainFieldElement) {
+ x1 := arg1[1]
+ x2 := arg1[2]
+ x3 := arg1[3]
+ x4 := arg1[4]
+ x5 := arg1[5]
+ x6 := arg1[6]
+ x7 := arg1[7]
+ x8 := arg1[8]
+ x9 := arg1[0]
+ var x10 uint64
+ var x11 uint64
+ x11, x10 = bits.Mul64(x9, arg1[8])
+ var x12 uint64
+ var x13 uint64
+ x13, x12 = bits.Mul64(x9, arg1[7])
+ var x14 uint64
+ var x15 uint64
+ x15, x14 = bits.Mul64(x9, arg1[6])
+ var x16 uint64
+ var x17 uint64
+ x17, x16 = bits.Mul64(x9, arg1[5])
+ var x18 uint64
+ var x19 uint64
+ x19, x18 = bits.Mul64(x9, arg1[4])
+ var x20 uint64
+ var x21 uint64
+ x21, x20 = bits.Mul64(x9, arg1[3])
+ var x22 uint64
+ var x23 uint64
+ x23, x22 = bits.Mul64(x9, arg1[2])
+ var x24 uint64
+ var x25 uint64
+ x25, x24 = bits.Mul64(x9, arg1[1])
+ var x26 uint64
+ var x27 uint64
+ x27, x26 = bits.Mul64(x9, arg1[0])
+ var x28 uint64
+ var x29 uint64
+ x28, x29 = bits.Add64(x27, x24, uint64(0x0))
+ var x30 uint64
+ var x31 uint64
+ x30, x31 = bits.Add64(x25, x22, uint64(p521Uint1(x29)))
+ var x32 uint64
+ var x33 uint64
+ x32, x33 = bits.Add64(x23, x20, uint64(p521Uint1(x31)))
+ var x34 uint64
+ var x35 uint64
+ x34, x35 = bits.Add64(x21, x18, uint64(p521Uint1(x33)))
+ var x36 uint64
+ var x37 uint64
+ x36, x37 = bits.Add64(x19, x16, uint64(p521Uint1(x35)))
+ var x38 uint64
+ var x39 uint64
+ x38, x39 = bits.Add64(x17, x14, uint64(p521Uint1(x37)))
+ var x40 uint64
+ var x41 uint64
+ x40, x41 = bits.Add64(x15, x12, uint64(p521Uint1(x39)))
+ var x42 uint64
+ var x43 uint64
+ x42, x43 = bits.Add64(x13, x10, uint64(p521Uint1(x41)))
+ x44 := (uint64(p521Uint1(x43)) + x11)
+ var x45 uint64
+ var x46 uint64
+ x46, x45 = bits.Mul64(x26, 0x1ff)
+ var x47 uint64
+ var x48 uint64
+ x48, x47 = bits.Mul64(x26, 0xffffffffffffffff)
+ var x49 uint64
+ var x50 uint64
+ x50, x49 = bits.Mul64(x26, 0xffffffffffffffff)
+ var x51 uint64
+ var x52 uint64
+ x52, x51 = bits.Mul64(x26, 0xffffffffffffffff)
+ var x53 uint64
+ var x54 uint64
+ x54, x53 = bits.Mul64(x26, 0xffffffffffffffff)
+ var x55 uint64
+ var x56 uint64
+ x56, x55 = bits.Mul64(x26, 0xffffffffffffffff)
+ var x57 uint64
+ var x58 uint64
+ x58, x57 = bits.Mul64(x26, 0xffffffffffffffff)
+ var x59 uint64
+ var x60 uint64
+ x60, x59 = bits.Mul64(x26, 0xffffffffffffffff)
+ var x61 uint64
+ var x62 uint64
+ x62, x61 = bits.Mul64(x26, 0xffffffffffffffff)
+ var x63 uint64
+ var x64 uint64
+ x63, x64 = bits.Add64(x62, x59, uint64(0x0))
+ var x65 uint64
+ var x66 uint64
+ x65, x66 = bits.Add64(x60, x57, uint64(p521Uint1(x64)))
+ var x67 uint64
+ var x68 uint64
+ x67, x68 = bits.Add64(x58, x55, uint64(p521Uint1(x66)))
+ var x69 uint64
+ var x70 uint64
+ x69, x70 = bits.Add64(x56, x53, uint64(p521Uint1(x68)))
+ var x71 uint64
+ var x72 uint64
+ x71, x72 = bits.Add64(x54, x51, uint64(p521Uint1(x70)))
+ var x73 uint64
+ var x74 uint64
+ x73, x74 = bits.Add64(x52, x49, uint64(p521Uint1(x72)))
+ var x75 uint64
+ var x76 uint64
+ x75, x76 = bits.Add64(x50, x47, uint64(p521Uint1(x74)))
+ var x77 uint64
+ var x78 uint64
+ x77, x78 = bits.Add64(x48, x45, uint64(p521Uint1(x76)))
+ x79 := (uint64(p521Uint1(x78)) + x46)
+ var x81 uint64
+ _, x81 = bits.Add64(x26, x61, uint64(0x0))
+ var x82 uint64
+ var x83 uint64
+ x82, x83 = bits.Add64(x28, x63, uint64(p521Uint1(x81)))
+ var x84 uint64
+ var x85 uint64
+ x84, x85 = bits.Add64(x30, x65, uint64(p521Uint1(x83)))
+ var x86 uint64
+ var x87 uint64
+ x86, x87 = bits.Add64(x32, x67, uint64(p521Uint1(x85)))
+ var x88 uint64
+ var x89 uint64
+ x88, x89 = bits.Add64(x34, x69, uint64(p521Uint1(x87)))
+ var x90 uint64
+ var x91 uint64
+ x90, x91 = bits.Add64(x36, x71, uint64(p521Uint1(x89)))
+ var x92 uint64
+ var x93 uint64
+ x92, x93 = bits.Add64(x38, x73, uint64(p521Uint1(x91)))
+ var x94 uint64
+ var x95 uint64
+ x94, x95 = bits.Add64(x40, x75, uint64(p521Uint1(x93)))
+ var x96 uint64
+ var x97 uint64
+ x96, x97 = bits.Add64(x42, x77, uint64(p521Uint1(x95)))
+ var x98 uint64
+ var x99 uint64
+ x98, x99 = bits.Add64(x44, x79, uint64(p521Uint1(x97)))
+ var x100 uint64
+ var x101 uint64
+ x101, x100 = bits.Mul64(x1, arg1[8])
+ var x102 uint64
+ var x103 uint64
+ x103, x102 = bits.Mul64(x1, arg1[7])
+ var x104 uint64
+ var x105 uint64
+ x105, x104 = bits.Mul64(x1, arg1[6])
+ var x106 uint64
+ var x107 uint64
+ x107, x106 = bits.Mul64(x1, arg1[5])
+ var x108 uint64
+ var x109 uint64
+ x109, x108 = bits.Mul64(x1, arg1[4])
+ var x110 uint64
+ var x111 uint64
+ x111, x110 = bits.Mul64(x1, arg1[3])
+ var x112 uint64
+ var x113 uint64
+ x113, x112 = bits.Mul64(x1, arg1[2])
+ var x114 uint64
+ var x115 uint64
+ x115, x114 = bits.Mul64(x1, arg1[1])
+ var x116 uint64
+ var x117 uint64
+ x117, x116 = bits.Mul64(x1, arg1[0])
+ var x118 uint64
+ var x119 uint64
+ x118, x119 = bits.Add64(x117, x114, uint64(0x0))
+ var x120 uint64
+ var x121 uint64
+ x120, x121 = bits.Add64(x115, x112, uint64(p521Uint1(x119)))
+ var x122 uint64
+ var x123 uint64
+ x122, x123 = bits.Add64(x113, x110, uint64(p521Uint1(x121)))
+ var x124 uint64
+ var x125 uint64
+ x124, x125 = bits.Add64(x111, x108, uint64(p521Uint1(x123)))
+ var x126 uint64
+ var x127 uint64
+ x126, x127 = bits.Add64(x109, x106, uint64(p521Uint1(x125)))
+ var x128 uint64
+ var x129 uint64
+ x128, x129 = bits.Add64(x107, x104, uint64(p521Uint1(x127)))
+ var x130 uint64
+ var x131 uint64
+ x130, x131 = bits.Add64(x105, x102, uint64(p521Uint1(x129)))
+ var x132 uint64
+ var x133 uint64
+ x132, x133 = bits.Add64(x103, x100, uint64(p521Uint1(x131)))
+ x134 := (uint64(p521Uint1(x133)) + x101)
+ var x135 uint64
+ var x136 uint64
+ x135, x136 = bits.Add64(x82, x116, uint64(0x0))
+ var x137 uint64
+ var x138 uint64
+ x137, x138 = bits.Add64(x84, x118, uint64(p521Uint1(x136)))
+ var x139 uint64
+ var x140 uint64
+ x139, x140 = bits.Add64(x86, x120, uint64(p521Uint1(x138)))
+ var x141 uint64
+ var x142 uint64
+ x141, x142 = bits.Add64(x88, x122, uint64(p521Uint1(x140)))
+ var x143 uint64
+ var x144 uint64
+ x143, x144 = bits.Add64(x90, x124, uint64(p521Uint1(x142)))
+ var x145 uint64
+ var x146 uint64
+ x145, x146 = bits.Add64(x92, x126, uint64(p521Uint1(x144)))
+ var x147 uint64
+ var x148 uint64
+ x147, x148 = bits.Add64(x94, x128, uint64(p521Uint1(x146)))
+ var x149 uint64
+ var x150 uint64
+ x149, x150 = bits.Add64(x96, x130, uint64(p521Uint1(x148)))
+ var x151 uint64
+ var x152 uint64
+ x151, x152 = bits.Add64(x98, x132, uint64(p521Uint1(x150)))
+ var x153 uint64
+ var x154 uint64
+ x153, x154 = bits.Add64(uint64(p521Uint1(x99)), x134, uint64(p521Uint1(x152)))
+ var x155 uint64
+ var x156 uint64
+ x156, x155 = bits.Mul64(x135, 0x1ff)
+ var x157 uint64
+ var x158 uint64
+ x158, x157 = bits.Mul64(x135, 0xffffffffffffffff)
+ var x159 uint64
+ var x160 uint64
+ x160, x159 = bits.Mul64(x135, 0xffffffffffffffff)
+ var x161 uint64
+ var x162 uint64
+ x162, x161 = bits.Mul64(x135, 0xffffffffffffffff)
+ var x163 uint64
+ var x164 uint64
+ x164, x163 = bits.Mul64(x135, 0xffffffffffffffff)
+ var x165 uint64
+ var x166 uint64
+ x166, x165 = bits.Mul64(x135, 0xffffffffffffffff)
+ var x167 uint64
+ var x168 uint64
+ x168, x167 = bits.Mul64(x135, 0xffffffffffffffff)
+ var x169 uint64
+ var x170 uint64
+ x170, x169 = bits.Mul64(x135, 0xffffffffffffffff)
+ var x171 uint64
+ var x172 uint64
+ x172, x171 = bits.Mul64(x135, 0xffffffffffffffff)
+ var x173 uint64
+ var x174 uint64
+ x173, x174 = bits.Add64(x172, x169, uint64(0x0))
+ var x175 uint64
+ var x176 uint64
+ x175, x176 = bits.Add64(x170, x167, uint64(p521Uint1(x174)))
+ var x177 uint64
+ var x178 uint64
+ x177, x178 = bits.Add64(x168, x165, uint64(p521Uint1(x176)))
+ var x179 uint64
+ var x180 uint64
+ x179, x180 = bits.Add64(x166, x163, uint64(p521Uint1(x178)))
+ var x181 uint64
+ var x182 uint64
+ x181, x182 = bits.Add64(x164, x161, uint64(p521Uint1(x180)))
+ var x183 uint64
+ var x184 uint64
+ x183, x184 = bits.Add64(x162, x159, uint64(p521Uint1(x182)))
+ var x185 uint64
+ var x186 uint64
+ x185, x186 = bits.Add64(x160, x157, uint64(p521Uint1(x184)))
+ var x187 uint64
+ var x188 uint64
+ x187, x188 = bits.Add64(x158, x155, uint64(p521Uint1(x186)))
+ x189 := (uint64(p521Uint1(x188)) + x156)
+ var x191 uint64
+ _, x191 = bits.Add64(x135, x171, uint64(0x0))
+ var x192 uint64
+ var x193 uint64
+ x192, x193 = bits.Add64(x137, x173, uint64(p521Uint1(x191)))
+ var x194 uint64
+ var x195 uint64
+ x194, x195 = bits.Add64(x139, x175, uint64(p521Uint1(x193)))
+ var x196 uint64
+ var x197 uint64
+ x196, x197 = bits.Add64(x141, x177, uint64(p521Uint1(x195)))
+ var x198 uint64
+ var x199 uint64
+ x198, x199 = bits.Add64(x143, x179, uint64(p521Uint1(x197)))
+ var x200 uint64
+ var x201 uint64
+ x200, x201 = bits.Add64(x145, x181, uint64(p521Uint1(x199)))
+ var x202 uint64
+ var x203 uint64
+ x202, x203 = bits.Add64(x147, x183, uint64(p521Uint1(x201)))
+ var x204 uint64
+ var x205 uint64
+ x204, x205 = bits.Add64(x149, x185, uint64(p521Uint1(x203)))
+ var x206 uint64
+ var x207 uint64
+ x206, x207 = bits.Add64(x151, x187, uint64(p521Uint1(x205)))
+ var x208 uint64
+ var x209 uint64
+ x208, x209 = bits.Add64(x153, x189, uint64(p521Uint1(x207)))
+ x210 := (uint64(p521Uint1(x209)) + uint64(p521Uint1(x154)))
+ var x211 uint64
+ var x212 uint64
+ x212, x211 = bits.Mul64(x2, arg1[8])
+ var x213 uint64
+ var x214 uint64
+ x214, x213 = bits.Mul64(x2, arg1[7])
+ var x215 uint64
+ var x216 uint64
+ x216, x215 = bits.Mul64(x2, arg1[6])
+ var x217 uint64
+ var x218 uint64
+ x218, x217 = bits.Mul64(x2, arg1[5])
+ var x219 uint64
+ var x220 uint64
+ x220, x219 = bits.Mul64(x2, arg1[4])
+ var x221 uint64
+ var x222 uint64
+ x222, x221 = bits.Mul64(x2, arg1[3])
+ var x223 uint64
+ var x224 uint64
+ x224, x223 = bits.Mul64(x2, arg1[2])
+ var x225 uint64
+ var x226 uint64
+ x226, x225 = bits.Mul64(x2, arg1[1])
+ var x227 uint64
+ var x228 uint64
+ x228, x227 = bits.Mul64(x2, arg1[0])
+ var x229 uint64
+ var x230 uint64
+ x229, x230 = bits.Add64(x228, x225, uint64(0x0))
+ var x231 uint64
+ var x232 uint64
+ x231, x232 = bits.Add64(x226, x223, uint64(p521Uint1(x230)))
+ var x233 uint64
+ var x234 uint64
+ x233, x234 = bits.Add64(x224, x221, uint64(p521Uint1(x232)))
+ var x235 uint64
+ var x236 uint64
+ x235, x236 = bits.Add64(x222, x219, uint64(p521Uint1(x234)))
+ var x237 uint64
+ var x238 uint64
+ x237, x238 = bits.Add64(x220, x217, uint64(p521Uint1(x236)))
+ var x239 uint64
+ var x240 uint64
+ x239, x240 = bits.Add64(x218, x215, uint64(p521Uint1(x238)))
+ var x241 uint64
+ var x242 uint64
+ x241, x242 = bits.Add64(x216, x213, uint64(p521Uint1(x240)))
+ var x243 uint64
+ var x244 uint64
+ x243, x244 = bits.Add64(x214, x211, uint64(p521Uint1(x242)))
+ x245 := (uint64(p521Uint1(x244)) + x212)
+ var x246 uint64
+ var x247 uint64
+ x246, x247 = bits.Add64(x192, x227, uint64(0x0))
+ var x248 uint64
+ var x249 uint64
+ x248, x249 = bits.Add64(x194, x229, uint64(p521Uint1(x247)))
+ var x250 uint64
+ var x251 uint64
+ x250, x251 = bits.Add64(x196, x231, uint64(p521Uint1(x249)))
+ var x252 uint64
+ var x253 uint64
+ x252, x253 = bits.Add64(x198, x233, uint64(p521Uint1(x251)))
+ var x254 uint64
+ var x255 uint64
+ x254, x255 = bits.Add64(x200, x235, uint64(p521Uint1(x253)))
+ var x256 uint64
+ var x257 uint64
+ x256, x257 = bits.Add64(x202, x237, uint64(p521Uint1(x255)))
+ var x258 uint64
+ var x259 uint64
+ x258, x259 = bits.Add64(x204, x239, uint64(p521Uint1(x257)))
+ var x260 uint64
+ var x261 uint64
+ x260, x261 = bits.Add64(x206, x241, uint64(p521Uint1(x259)))
+ var x262 uint64
+ var x263 uint64
+ x262, x263 = bits.Add64(x208, x243, uint64(p521Uint1(x261)))
+ var x264 uint64
+ var x265 uint64
+ x264, x265 = bits.Add64(x210, x245, uint64(p521Uint1(x263)))
+ var x266 uint64
+ var x267 uint64
+ x267, x266 = bits.Mul64(x246, 0x1ff)
+ var x268 uint64
+ var x269 uint64
+ x269, x268 = bits.Mul64(x246, 0xffffffffffffffff)
+ var x270 uint64
+ var x271 uint64
+ x271, x270 = bits.Mul64(x246, 0xffffffffffffffff)
+ var x272 uint64
+ var x273 uint64
+ x273, x272 = bits.Mul64(x246, 0xffffffffffffffff)
+ var x274 uint64
+ var x275 uint64
+ x275, x274 = bits.Mul64(x246, 0xffffffffffffffff)
+ var x276 uint64
+ var x277 uint64
+ x277, x276 = bits.Mul64(x246, 0xffffffffffffffff)
+ var x278 uint64
+ var x279 uint64
+ x279, x278 = bits.Mul64(x246, 0xffffffffffffffff)
+ var x280 uint64
+ var x281 uint64
+ x281, x280 = bits.Mul64(x246, 0xffffffffffffffff)
+ var x282 uint64
+ var x283 uint64
+ x283, x282 = bits.Mul64(x246, 0xffffffffffffffff)
+ var x284 uint64
+ var x285 uint64
+ x284, x285 = bits.Add64(x283, x280, uint64(0x0))
+ var x286 uint64
+ var x287 uint64
+ x286, x287 = bits.Add64(x281, x278, uint64(p521Uint1(x285)))
+ var x288 uint64
+ var x289 uint64
+ x288, x289 = bits.Add64(x279, x276, uint64(p521Uint1(x287)))
+ var x290 uint64
+ var x291 uint64
+ x290, x291 = bits.Add64(x277, x274, uint64(p521Uint1(x289)))
+ var x292 uint64
+ var x293 uint64
+ x292, x293 = bits.Add64(x275, x272, uint64(p521Uint1(x291)))
+ var x294 uint64
+ var x295 uint64
+ x294, x295 = bits.Add64(x273, x270, uint64(p521Uint1(x293)))
+ var x296 uint64
+ var x297 uint64
+ x296, x297 = bits.Add64(x271, x268, uint64(p521Uint1(x295)))
+ var x298 uint64
+ var x299 uint64
+ x298, x299 = bits.Add64(x269, x266, uint64(p521Uint1(x297)))
+ x300 := (uint64(p521Uint1(x299)) + x267)
+ var x302 uint64
+ _, x302 = bits.Add64(x246, x282, uint64(0x0))
+ var x303 uint64
+ var x304 uint64
+ x303, x304 = bits.Add64(x248, x284, uint64(p521Uint1(x302)))
+ var x305 uint64
+ var x306 uint64
+ x305, x306 = bits.Add64(x250, x286, uint64(p521Uint1(x304)))
+ var x307 uint64
+ var x308 uint64
+ x307, x308 = bits.Add64(x252, x288, uint64(p521Uint1(x306)))
+ var x309 uint64
+ var x310 uint64
+ x309, x310 = bits.Add64(x254, x290, uint64(p521Uint1(x308)))
+ var x311 uint64
+ var x312 uint64
+ x311, x312 = bits.Add64(x256, x292, uint64(p521Uint1(x310)))
+ var x313 uint64
+ var x314 uint64
+ x313, x314 = bits.Add64(x258, x294, uint64(p521Uint1(x312)))
+ var x315 uint64
+ var x316 uint64
+ x315, x316 = bits.Add64(x260, x296, uint64(p521Uint1(x314)))
+ var x317 uint64
+ var x318 uint64
+ x317, x318 = bits.Add64(x262, x298, uint64(p521Uint1(x316)))
+ var x319 uint64
+ var x320 uint64
+ x319, x320 = bits.Add64(x264, x300, uint64(p521Uint1(x318)))
+ x321 := (uint64(p521Uint1(x320)) + uint64(p521Uint1(x265)))
+ var x322 uint64
+ var x323 uint64
+ x323, x322 = bits.Mul64(x3, arg1[8])
+ var x324 uint64
+ var x325 uint64
+ x325, x324 = bits.Mul64(x3, arg1[7])
+ var x326 uint64
+ var x327 uint64
+ x327, x326 = bits.Mul64(x3, arg1[6])
+ var x328 uint64
+ var x329 uint64
+ x329, x328 = bits.Mul64(x3, arg1[5])
+ var x330 uint64
+ var x331 uint64
+ x331, x330 = bits.Mul64(x3, arg1[4])
+ var x332 uint64
+ var x333 uint64
+ x333, x332 = bits.Mul64(x3, arg1[3])
+ var x334 uint64
+ var x335 uint64
+ x335, x334 = bits.Mul64(x3, arg1[2])
+ var x336 uint64
+ var x337 uint64
+ x337, x336 = bits.Mul64(x3, arg1[1])
+ var x338 uint64
+ var x339 uint64
+ x339, x338 = bits.Mul64(x3, arg1[0])
+ var x340 uint64
+ var x341 uint64
+ x340, x341 = bits.Add64(x339, x336, uint64(0x0))
+ var x342 uint64
+ var x343 uint64
+ x342, x343 = bits.Add64(x337, x334, uint64(p521Uint1(x341)))
+ var x344 uint64
+ var x345 uint64
+ x344, x345 = bits.Add64(x335, x332, uint64(p521Uint1(x343)))
+ var x346 uint64
+ var x347 uint64
+ x346, x347 = bits.Add64(x333, x330, uint64(p521Uint1(x345)))
+ var x348 uint64
+ var x349 uint64
+ x348, x349 = bits.Add64(x331, x328, uint64(p521Uint1(x347)))
+ var x350 uint64
+ var x351 uint64
+ x350, x351 = bits.Add64(x329, x326, uint64(p521Uint1(x349)))
+ var x352 uint64
+ var x353 uint64
+ x352, x353 = bits.Add64(x327, x324, uint64(p521Uint1(x351)))
+ var x354 uint64
+ var x355 uint64
+ x354, x355 = bits.Add64(x325, x322, uint64(p521Uint1(x353)))
+ x356 := (uint64(p521Uint1(x355)) + x323)
+ var x357 uint64
+ var x358 uint64
+ x357, x358 = bits.Add64(x303, x338, uint64(0x0))
+ var x359 uint64
+ var x360 uint64
+ x359, x360 = bits.Add64(x305, x340, uint64(p521Uint1(x358)))
+ var x361 uint64
+ var x362 uint64
+ x361, x362 = bits.Add64(x307, x342, uint64(p521Uint1(x360)))
+ var x363 uint64
+ var x364 uint64
+ x363, x364 = bits.Add64(x309, x344, uint64(p521Uint1(x362)))
+ var x365 uint64
+ var x366 uint64
+ x365, x366 = bits.Add64(x311, x346, uint64(p521Uint1(x364)))
+ var x367 uint64
+ var x368 uint64
+ x367, x368 = bits.Add64(x313, x348, uint64(p521Uint1(x366)))
+ var x369 uint64
+ var x370 uint64
+ x369, x370 = bits.Add64(x315, x350, uint64(p521Uint1(x368)))
+ var x371 uint64
+ var x372 uint64
+ x371, x372 = bits.Add64(x317, x352, uint64(p521Uint1(x370)))
+ var x373 uint64
+ var x374 uint64
+ x373, x374 = bits.Add64(x319, x354, uint64(p521Uint1(x372)))
+ var x375 uint64
+ var x376 uint64
+ x375, x376 = bits.Add64(x321, x356, uint64(p521Uint1(x374)))
+ var x377 uint64
+ var x378 uint64
+ x378, x377 = bits.Mul64(x357, 0x1ff)
+ var x379 uint64
+ var x380 uint64
+ x380, x379 = bits.Mul64(x357, 0xffffffffffffffff)
+ var x381 uint64
+ var x382 uint64
+ x382, x381 = bits.Mul64(x357, 0xffffffffffffffff)
+ var x383 uint64
+ var x384 uint64
+ x384, x383 = bits.Mul64(x357, 0xffffffffffffffff)
+ var x385 uint64
+ var x386 uint64
+ x386, x385 = bits.Mul64(x357, 0xffffffffffffffff)
+ var x387 uint64
+ var x388 uint64
+ x388, x387 = bits.Mul64(x357, 0xffffffffffffffff)
+ var x389 uint64
+ var x390 uint64
+ x390, x389 = bits.Mul64(x357, 0xffffffffffffffff)
+ var x391 uint64
+ var x392 uint64
+ x392, x391 = bits.Mul64(x357, 0xffffffffffffffff)
+ var x393 uint64
+ var x394 uint64
+ x394, x393 = bits.Mul64(x357, 0xffffffffffffffff)
+ var x395 uint64
+ var x396 uint64
+ x395, x396 = bits.Add64(x394, x391, uint64(0x0))
+ var x397 uint64
+ var x398 uint64
+ x397, x398 = bits.Add64(x392, x389, uint64(p521Uint1(x396)))
+ var x399 uint64
+ var x400 uint64
+ x399, x400 = bits.Add64(x390, x387, uint64(p521Uint1(x398)))
+ var x401 uint64
+ var x402 uint64
+ x401, x402 = bits.Add64(x388, x385, uint64(p521Uint1(x400)))
+ var x403 uint64
+ var x404 uint64
+ x403, x404 = bits.Add64(x386, x383, uint64(p521Uint1(x402)))
+ var x405 uint64
+ var x406 uint64
+ x405, x406 = bits.Add64(x384, x381, uint64(p521Uint1(x404)))
+ var x407 uint64
+ var x408 uint64
+ x407, x408 = bits.Add64(x382, x379, uint64(p521Uint1(x406)))
+ var x409 uint64
+ var x410 uint64
+ x409, x410 = bits.Add64(x380, x377, uint64(p521Uint1(x408)))
+ x411 := (uint64(p521Uint1(x410)) + x378)
+ var x413 uint64
+ _, x413 = bits.Add64(x357, x393, uint64(0x0))
+ var x414 uint64
+ var x415 uint64
+ x414, x415 = bits.Add64(x359, x395, uint64(p521Uint1(x413)))
+ var x416 uint64
+ var x417 uint64
+ x416, x417 = bits.Add64(x361, x397, uint64(p521Uint1(x415)))
+ var x418 uint64
+ var x419 uint64
+ x418, x419 = bits.Add64(x363, x399, uint64(p521Uint1(x417)))
+ var x420 uint64
+ var x421 uint64
+ x420, x421 = bits.Add64(x365, x401, uint64(p521Uint1(x419)))
+ var x422 uint64
+ var x423 uint64
+ x422, x423 = bits.Add64(x367, x403, uint64(p521Uint1(x421)))
+ var x424 uint64
+ var x425 uint64
+ x424, x425 = bits.Add64(x369, x405, uint64(p521Uint1(x423)))
+ var x426 uint64
+ var x427 uint64
+ x426, x427 = bits.Add64(x371, x407, uint64(p521Uint1(x425)))
+ var x428 uint64
+ var x429 uint64
+ x428, x429 = bits.Add64(x373, x409, uint64(p521Uint1(x427)))
+ var x430 uint64
+ var x431 uint64
+ x430, x431 = bits.Add64(x375, x411, uint64(p521Uint1(x429)))
+ x432 := (uint64(p521Uint1(x431)) + uint64(p521Uint1(x376)))
+ var x433 uint64
+ var x434 uint64
+ x434, x433 = bits.Mul64(x4, arg1[8])
+ var x435 uint64
+ var x436 uint64
+ x436, x435 = bits.Mul64(x4, arg1[7])
+ var x437 uint64
+ var x438 uint64
+ x438, x437 = bits.Mul64(x4, arg1[6])
+ var x439 uint64
+ var x440 uint64
+ x440, x439 = bits.Mul64(x4, arg1[5])
+ var x441 uint64
+ var x442 uint64
+ x442, x441 = bits.Mul64(x4, arg1[4])
+ var x443 uint64
+ var x444 uint64
+ x444, x443 = bits.Mul64(x4, arg1[3])
+ var x445 uint64
+ var x446 uint64
+ x446, x445 = bits.Mul64(x4, arg1[2])
+ var x447 uint64
+ var x448 uint64
+ x448, x447 = bits.Mul64(x4, arg1[1])
+ var x449 uint64
+ var x450 uint64
+ x450, x449 = bits.Mul64(x4, arg1[0])
+ var x451 uint64
+ var x452 uint64
+ x451, x452 = bits.Add64(x450, x447, uint64(0x0))
+ var x453 uint64
+ var x454 uint64
+ x453, x454 = bits.Add64(x448, x445, uint64(p521Uint1(x452)))
+ var x455 uint64
+ var x456 uint64
+ x455, x456 = bits.Add64(x446, x443, uint64(p521Uint1(x454)))
+ var x457 uint64
+ var x458 uint64
+ x457, x458 = bits.Add64(x444, x441, uint64(p521Uint1(x456)))
+ var x459 uint64
+ var x460 uint64
+ x459, x460 = bits.Add64(x442, x439, uint64(p521Uint1(x458)))
+ var x461 uint64
+ var x462 uint64
+ x461, x462 = bits.Add64(x440, x437, uint64(p521Uint1(x460)))
+ var x463 uint64
+ var x464 uint64
+ x463, x464 = bits.Add64(x438, x435, uint64(p521Uint1(x462)))
+ var x465 uint64
+ var x466 uint64
+ x465, x466 = bits.Add64(x436, x433, uint64(p521Uint1(x464)))
+ x467 := (uint64(p521Uint1(x466)) + x434)
+ var x468 uint64
+ var x469 uint64
+ x468, x469 = bits.Add64(x414, x449, uint64(0x0))
+ var x470 uint64
+ var x471 uint64
+ x470, x471 = bits.Add64(x416, x451, uint64(p521Uint1(x469)))
+ var x472 uint64
+ var x473 uint64
+ x472, x473 = bits.Add64(x418, x453, uint64(p521Uint1(x471)))
+ var x474 uint64
+ var x475 uint64
+ x474, x475 = bits.Add64(x420, x455, uint64(p521Uint1(x473)))
+ var x476 uint64
+ var x477 uint64
+ x476, x477 = bits.Add64(x422, x457, uint64(p521Uint1(x475)))
+ var x478 uint64
+ var x479 uint64
+ x478, x479 = bits.Add64(x424, x459, uint64(p521Uint1(x477)))
+ var x480 uint64
+ var x481 uint64
+ x480, x481 = bits.Add64(x426, x461, uint64(p521Uint1(x479)))
+ var x482 uint64
+ var x483 uint64
+ x482, x483 = bits.Add64(x428, x463, uint64(p521Uint1(x481)))
+ var x484 uint64
+ var x485 uint64
+ x484, x485 = bits.Add64(x430, x465, uint64(p521Uint1(x483)))
+ var x486 uint64
+ var x487 uint64
+ x486, x487 = bits.Add64(x432, x467, uint64(p521Uint1(x485)))
+ var x488 uint64
+ var x489 uint64
+ x489, x488 = bits.Mul64(x468, 0x1ff)
+ var x490 uint64
+ var x491 uint64
+ x491, x490 = bits.Mul64(x468, 0xffffffffffffffff)
+ var x492 uint64
+ var x493 uint64
+ x493, x492 = bits.Mul64(x468, 0xffffffffffffffff)
+ var x494 uint64
+ var x495 uint64
+ x495, x494 = bits.Mul64(x468, 0xffffffffffffffff)
+ var x496 uint64
+ var x497 uint64
+ x497, x496 = bits.Mul64(x468, 0xffffffffffffffff)
+ var x498 uint64
+ var x499 uint64
+ x499, x498 = bits.Mul64(x468, 0xffffffffffffffff)
+ var x500 uint64
+ var x501 uint64
+ x501, x500 = bits.Mul64(x468, 0xffffffffffffffff)
+ var x502 uint64
+ var x503 uint64
+ x503, x502 = bits.Mul64(x468, 0xffffffffffffffff)
+ var x504 uint64
+ var x505 uint64
+ x505, x504 = bits.Mul64(x468, 0xffffffffffffffff)
+ var x506 uint64
+ var x507 uint64
+ x506, x507 = bits.Add64(x505, x502, uint64(0x0))
+ var x508 uint64
+ var x509 uint64
+ x508, x509 = bits.Add64(x503, x500, uint64(p521Uint1(x507)))
+ var x510 uint64
+ var x511 uint64
+ x510, x511 = bits.Add64(x501, x498, uint64(p521Uint1(x509)))
+ var x512 uint64
+ var x513 uint64
+ x512, x513 = bits.Add64(x499, x496, uint64(p521Uint1(x511)))
+ var x514 uint64
+ var x515 uint64
+ x514, x515 = bits.Add64(x497, x494, uint64(p521Uint1(x513)))
+ var x516 uint64
+ var x517 uint64
+ x516, x517 = bits.Add64(x495, x492, uint64(p521Uint1(x515)))
+ var x518 uint64
+ var x519 uint64
+ x518, x519 = bits.Add64(x493, x490, uint64(p521Uint1(x517)))
+ var x520 uint64
+ var x521 uint64
+ x520, x521 = bits.Add64(x491, x488, uint64(p521Uint1(x519)))
+ x522 := (uint64(p521Uint1(x521)) + x489)
+ var x524 uint64
+ _, x524 = bits.Add64(x468, x504, uint64(0x0))
+ var x525 uint64
+ var x526 uint64
+ x525, x526 = bits.Add64(x470, x506, uint64(p521Uint1(x524)))
+ var x527 uint64
+ var x528 uint64
+ x527, x528 = bits.Add64(x472, x508, uint64(p521Uint1(x526)))
+ var x529 uint64
+ var x530 uint64
+ x529, x530 = bits.Add64(x474, x510, uint64(p521Uint1(x528)))
+ var x531 uint64
+ var x532 uint64
+ x531, x532 = bits.Add64(x476, x512, uint64(p521Uint1(x530)))
+ var x533 uint64
+ var x534 uint64
+ x533, x534 = bits.Add64(x478, x514, uint64(p521Uint1(x532)))
+ var x535 uint64
+ var x536 uint64
+ x535, x536 = bits.Add64(x480, x516, uint64(p521Uint1(x534)))
+ var x537 uint64
+ var x538 uint64
+ x537, x538 = bits.Add64(x482, x518, uint64(p521Uint1(x536)))
+ var x539 uint64
+ var x540 uint64
+ x539, x540 = bits.Add64(x484, x520, uint64(p521Uint1(x538)))
+ var x541 uint64
+ var x542 uint64
+ x541, x542 = bits.Add64(x486, x522, uint64(p521Uint1(x540)))
+ x543 := (uint64(p521Uint1(x542)) + uint64(p521Uint1(x487)))
+ var x544 uint64
+ var x545 uint64
+ x545, x544 = bits.Mul64(x5, arg1[8])
+ var x546 uint64
+ var x547 uint64
+ x547, x546 = bits.Mul64(x5, arg1[7])
+ var x548 uint64
+ var x549 uint64
+ x549, x548 = bits.Mul64(x5, arg1[6])
+ var x550 uint64
+ var x551 uint64
+ x551, x550 = bits.Mul64(x5, arg1[5])
+ var x552 uint64
+ var x553 uint64
+ x553, x552 = bits.Mul64(x5, arg1[4])
+ var x554 uint64
+ var x555 uint64
+ x555, x554 = bits.Mul64(x5, arg1[3])
+ var x556 uint64
+ var x557 uint64
+ x557, x556 = bits.Mul64(x5, arg1[2])
+ var x558 uint64
+ var x559 uint64
+ x559, x558 = bits.Mul64(x5, arg1[1])
+ var x560 uint64
+ var x561 uint64
+ x561, x560 = bits.Mul64(x5, arg1[0])
+ var x562 uint64
+ var x563 uint64
+ x562, x563 = bits.Add64(x561, x558, uint64(0x0))
+ var x564 uint64
+ var x565 uint64
+ x564, x565 = bits.Add64(x559, x556, uint64(p521Uint1(x563)))
+ var x566 uint64
+ var x567 uint64
+ x566, x567 = bits.Add64(x557, x554, uint64(p521Uint1(x565)))
+ var x568 uint64
+ var x569 uint64
+ x568, x569 = bits.Add64(x555, x552, uint64(p521Uint1(x567)))
+ var x570 uint64
+ var x571 uint64
+ x570, x571 = bits.Add64(x553, x550, uint64(p521Uint1(x569)))
+ var x572 uint64
+ var x573 uint64
+ x572, x573 = bits.Add64(x551, x548, uint64(p521Uint1(x571)))
+ var x574 uint64
+ var x575 uint64
+ x574, x575 = bits.Add64(x549, x546, uint64(p521Uint1(x573)))
+ var x576 uint64
+ var x577 uint64
+ x576, x577 = bits.Add64(x547, x544, uint64(p521Uint1(x575)))
+ x578 := (uint64(p521Uint1(x577)) + x545)
+ var x579 uint64
+ var x580 uint64
+ x579, x580 = bits.Add64(x525, x560, uint64(0x0))
+ var x581 uint64
+ var x582 uint64
+ x581, x582 = bits.Add64(x527, x562, uint64(p521Uint1(x580)))
+ var x583 uint64
+ var x584 uint64
+ x583, x584 = bits.Add64(x529, x564, uint64(p521Uint1(x582)))
+ var x585 uint64
+ var x586 uint64
+ x585, x586 = bits.Add64(x531, x566, uint64(p521Uint1(x584)))
+ var x587 uint64
+ var x588 uint64
+ x587, x588 = bits.Add64(x533, x568, uint64(p521Uint1(x586)))
+ var x589 uint64
+ var x590 uint64
+ x589, x590 = bits.Add64(x535, x570, uint64(p521Uint1(x588)))
+ var x591 uint64
+ var x592 uint64
+ x591, x592 = bits.Add64(x537, x572, uint64(p521Uint1(x590)))
+ var x593 uint64
+ var x594 uint64
+ x593, x594 = bits.Add64(x539, x574, uint64(p521Uint1(x592)))
+ var x595 uint64
+ var x596 uint64
+ x595, x596 = bits.Add64(x541, x576, uint64(p521Uint1(x594)))
+ var x597 uint64
+ var x598 uint64
+ x597, x598 = bits.Add64(x543, x578, uint64(p521Uint1(x596)))
+ var x599 uint64
+ var x600 uint64
+ x600, x599 = bits.Mul64(x579, 0x1ff)
+ var x601 uint64
+ var x602 uint64
+ x602, x601 = bits.Mul64(x579, 0xffffffffffffffff)
+ var x603 uint64
+ var x604 uint64
+ x604, x603 = bits.Mul64(x579, 0xffffffffffffffff)
+ var x605 uint64
+ var x606 uint64
+ x606, x605 = bits.Mul64(x579, 0xffffffffffffffff)
+ var x607 uint64
+ var x608 uint64
+ x608, x607 = bits.Mul64(x579, 0xffffffffffffffff)
+ var x609 uint64
+ var x610 uint64
+ x610, x609 = bits.Mul64(x579, 0xffffffffffffffff)
+ var x611 uint64
+ var x612 uint64
+ x612, x611 = bits.Mul64(x579, 0xffffffffffffffff)
+ var x613 uint64
+ var x614 uint64
+ x614, x613 = bits.Mul64(x579, 0xffffffffffffffff)
+ var x615 uint64
+ var x616 uint64
+ x616, x615 = bits.Mul64(x579, 0xffffffffffffffff)
+ var x617 uint64
+ var x618 uint64
+ x617, x618 = bits.Add64(x616, x613, uint64(0x0))
+ var x619 uint64
+ var x620 uint64
+ x619, x620 = bits.Add64(x614, x611, uint64(p521Uint1(x618)))
+ var x621 uint64
+ var x622 uint64
+ x621, x622 = bits.Add64(x612, x609, uint64(p521Uint1(x620)))
+ var x623 uint64
+ var x624 uint64
+ x623, x624 = bits.Add64(x610, x607, uint64(p521Uint1(x622)))
+ var x625 uint64
+ var x626 uint64
+ x625, x626 = bits.Add64(x608, x605, uint64(p521Uint1(x624)))
+ var x627 uint64
+ var x628 uint64
+ x627, x628 = bits.Add64(x606, x603, uint64(p521Uint1(x626)))
+ var x629 uint64
+ var x630 uint64
+ x629, x630 = bits.Add64(x604, x601, uint64(p521Uint1(x628)))
+ var x631 uint64
+ var x632 uint64
+ x631, x632 = bits.Add64(x602, x599, uint64(p521Uint1(x630)))
+ x633 := (uint64(p521Uint1(x632)) + x600)
+ var x635 uint64
+ _, x635 = bits.Add64(x579, x615, uint64(0x0))
+ var x636 uint64
+ var x637 uint64
+ x636, x637 = bits.Add64(x581, x617, uint64(p521Uint1(x635)))
+ var x638 uint64
+ var x639 uint64
+ x638, x639 = bits.Add64(x583, x619, uint64(p521Uint1(x637)))
+ var x640 uint64
+ var x641 uint64
+ x640, x641 = bits.Add64(x585, x621, uint64(p521Uint1(x639)))
+ var x642 uint64
+ var x643 uint64
+ x642, x643 = bits.Add64(x587, x623, uint64(p521Uint1(x641)))
+ var x644 uint64
+ var x645 uint64
+ x644, x645 = bits.Add64(x589, x625, uint64(p521Uint1(x643)))
+ var x646 uint64
+ var x647 uint64
+ x646, x647 = bits.Add64(x591, x627, uint64(p521Uint1(x645)))
+ var x648 uint64
+ var x649 uint64
+ x648, x649 = bits.Add64(x593, x629, uint64(p521Uint1(x647)))
+ var x650 uint64
+ var x651 uint64
+ x650, x651 = bits.Add64(x595, x631, uint64(p521Uint1(x649)))
+ var x652 uint64
+ var x653 uint64
+ x652, x653 = bits.Add64(x597, x633, uint64(p521Uint1(x651)))
+ x654 := (uint64(p521Uint1(x653)) + uint64(p521Uint1(x598)))
+ var x655 uint64
+ var x656 uint64
+ x656, x655 = bits.Mul64(x6, arg1[8])
+ var x657 uint64
+ var x658 uint64
+ x658, x657 = bits.Mul64(x6, arg1[7])
+ var x659 uint64
+ var x660 uint64
+ x660, x659 = bits.Mul64(x6, arg1[6])
+ var x661 uint64
+ var x662 uint64
+ x662, x661 = bits.Mul64(x6, arg1[5])
+ var x663 uint64
+ var x664 uint64
+ x664, x663 = bits.Mul64(x6, arg1[4])
+ var x665 uint64
+ var x666 uint64
+ x666, x665 = bits.Mul64(x6, arg1[3])
+ var x667 uint64
+ var x668 uint64
+ x668, x667 = bits.Mul64(x6, arg1[2])
+ var x669 uint64
+ var x670 uint64
+ x670, x669 = bits.Mul64(x6, arg1[1])
+ var x671 uint64
+ var x672 uint64
+ x672, x671 = bits.Mul64(x6, arg1[0])
+ var x673 uint64
+ var x674 uint64
+ x673, x674 = bits.Add64(x672, x669, uint64(0x0))
+ var x675 uint64
+ var x676 uint64
+ x675, x676 = bits.Add64(x670, x667, uint64(p521Uint1(x674)))
+ var x677 uint64
+ var x678 uint64
+ x677, x678 = bits.Add64(x668, x665, uint64(p521Uint1(x676)))
+ var x679 uint64
+ var x680 uint64
+ x679, x680 = bits.Add64(x666, x663, uint64(p521Uint1(x678)))
+ var x681 uint64
+ var x682 uint64
+ x681, x682 = bits.Add64(x664, x661, uint64(p521Uint1(x680)))
+ var x683 uint64
+ var x684 uint64
+ x683, x684 = bits.Add64(x662, x659, uint64(p521Uint1(x682)))
+ var x685 uint64
+ var x686 uint64
+ x685, x686 = bits.Add64(x660, x657, uint64(p521Uint1(x684)))
+ var x687 uint64
+ var x688 uint64
+ x687, x688 = bits.Add64(x658, x655, uint64(p521Uint1(x686)))
+ x689 := (uint64(p521Uint1(x688)) + x656)
+ var x690 uint64
+ var x691 uint64
+ x690, x691 = bits.Add64(x636, x671, uint64(0x0))
+ var x692 uint64
+ var x693 uint64
+ x692, x693 = bits.Add64(x638, x673, uint64(p521Uint1(x691)))
+ var x694 uint64
+ var x695 uint64
+ x694, x695 = bits.Add64(x640, x675, uint64(p521Uint1(x693)))
+ var x696 uint64
+ var x697 uint64
+ x696, x697 = bits.Add64(x642, x677, uint64(p521Uint1(x695)))
+ var x698 uint64
+ var x699 uint64
+ x698, x699 = bits.Add64(x644, x679, uint64(p521Uint1(x697)))
+ var x700 uint64
+ var x701 uint64
+ x700, x701 = bits.Add64(x646, x681, uint64(p521Uint1(x699)))
+ var x702 uint64
+ var x703 uint64
+ x702, x703 = bits.Add64(x648, x683, uint64(p521Uint1(x701)))
+ var x704 uint64
+ var x705 uint64
+ x704, x705 = bits.Add64(x650, x685, uint64(p521Uint1(x703)))
+ var x706 uint64
+ var x707 uint64
+ x706, x707 = bits.Add64(x652, x687, uint64(p521Uint1(x705)))
+ var x708 uint64
+ var x709 uint64
+ x708, x709 = bits.Add64(x654, x689, uint64(p521Uint1(x707)))
+ var x710 uint64
+ var x711 uint64
+ x711, x710 = bits.Mul64(x690, 0x1ff)
+ var x712 uint64
+ var x713 uint64
+ x713, x712 = bits.Mul64(x690, 0xffffffffffffffff)
+ var x714 uint64
+ var x715 uint64
+ x715, x714 = bits.Mul64(x690, 0xffffffffffffffff)
+ var x716 uint64
+ var x717 uint64
+ x717, x716 = bits.Mul64(x690, 0xffffffffffffffff)
+ var x718 uint64
+ var x719 uint64
+ x719, x718 = bits.Mul64(x690, 0xffffffffffffffff)
+ var x720 uint64
+ var x721 uint64
+ x721, x720 = bits.Mul64(x690, 0xffffffffffffffff)
+ var x722 uint64
+ var x723 uint64
+ x723, x722 = bits.Mul64(x690, 0xffffffffffffffff)
+ var x724 uint64
+ var x725 uint64
+ x725, x724 = bits.Mul64(x690, 0xffffffffffffffff)
+ var x726 uint64
+ var x727 uint64
+ x727, x726 = bits.Mul64(x690, 0xffffffffffffffff)
+ var x728 uint64
+ var x729 uint64
+ x728, x729 = bits.Add64(x727, x724, uint64(0x0))
+ var x730 uint64
+ var x731 uint64
+ x730, x731 = bits.Add64(x725, x722, uint64(p521Uint1(x729)))
+ var x732 uint64
+ var x733 uint64
+ x732, x733 = bits.Add64(x723, x720, uint64(p521Uint1(x731)))
+ var x734 uint64
+ var x735 uint64
+ x734, x735 = bits.Add64(x721, x718, uint64(p521Uint1(x733)))
+ var x736 uint64
+ var x737 uint64
+ x736, x737 = bits.Add64(x719, x716, uint64(p521Uint1(x735)))
+ var x738 uint64
+ var x739 uint64
+ x738, x739 = bits.Add64(x717, x714, uint64(p521Uint1(x737)))
+ var x740 uint64
+ var x741 uint64
+ x740, x741 = bits.Add64(x715, x712, uint64(p521Uint1(x739)))
+ var x742 uint64
+ var x743 uint64
+ x742, x743 = bits.Add64(x713, x710, uint64(p521Uint1(x741)))
+ x744 := (uint64(p521Uint1(x743)) + x711)
+ var x746 uint64
+ _, x746 = bits.Add64(x690, x726, uint64(0x0))
+ var x747 uint64
+ var x748 uint64
+ x747, x748 = bits.Add64(x692, x728, uint64(p521Uint1(x746)))
+ var x749 uint64
+ var x750 uint64
+ x749, x750 = bits.Add64(x694, x730, uint64(p521Uint1(x748)))
+ var x751 uint64
+ var x752 uint64
+ x751, x752 = bits.Add64(x696, x732, uint64(p521Uint1(x750)))
+ var x753 uint64
+ var x754 uint64
+ x753, x754 = bits.Add64(x698, x734, uint64(p521Uint1(x752)))
+ var x755 uint64
+ var x756 uint64
+ x755, x756 = bits.Add64(x700, x736, uint64(p521Uint1(x754)))
+ var x757 uint64
+ var x758 uint64
+ x757, x758 = bits.Add64(x702, x738, uint64(p521Uint1(x756)))
+ var x759 uint64
+ var x760 uint64
+ x759, x760 = bits.Add64(x704, x740, uint64(p521Uint1(x758)))
+ var x761 uint64
+ var x762 uint64
+ x761, x762 = bits.Add64(x706, x742, uint64(p521Uint1(x760)))
+ var x763 uint64
+ var x764 uint64
+ x763, x764 = bits.Add64(x708, x744, uint64(p521Uint1(x762)))
+ x765 := (uint64(p521Uint1(x764)) + uint64(p521Uint1(x709)))
+ var x766 uint64
+ var x767 uint64
+ x767, x766 = bits.Mul64(x7, arg1[8])
+ var x768 uint64
+ var x769 uint64
+ x769, x768 = bits.Mul64(x7, arg1[7])
+ var x770 uint64
+ var x771 uint64
+ x771, x770 = bits.Mul64(x7, arg1[6])
+ var x772 uint64
+ var x773 uint64
+ x773, x772 = bits.Mul64(x7, arg1[5])
+ var x774 uint64
+ var x775 uint64
+ x775, x774 = bits.Mul64(x7, arg1[4])
+ var x776 uint64
+ var x777 uint64
+ x777, x776 = bits.Mul64(x7, arg1[3])
+ var x778 uint64
+ var x779 uint64
+ x779, x778 = bits.Mul64(x7, arg1[2])
+ var x780 uint64
+ var x781 uint64
+ x781, x780 = bits.Mul64(x7, arg1[1])
+ var x782 uint64
+ var x783 uint64
+ x783, x782 = bits.Mul64(x7, arg1[0])
+ var x784 uint64
+ var x785 uint64
+ x784, x785 = bits.Add64(x783, x780, uint64(0x0))
+ var x786 uint64
+ var x787 uint64
+ x786, x787 = bits.Add64(x781, x778, uint64(p521Uint1(x785)))
+ var x788 uint64
+ var x789 uint64
+ x788, x789 = bits.Add64(x779, x776, uint64(p521Uint1(x787)))
+ var x790 uint64
+ var x791 uint64
+ x790, x791 = bits.Add64(x777, x774, uint64(p521Uint1(x789)))
+ var x792 uint64
+ var x793 uint64
+ x792, x793 = bits.Add64(x775, x772, uint64(p521Uint1(x791)))
+ var x794 uint64
+ var x795 uint64
+ x794, x795 = bits.Add64(x773, x770, uint64(p521Uint1(x793)))
+ var x796 uint64
+ var x797 uint64
+ x796, x797 = bits.Add64(x771, x768, uint64(p521Uint1(x795)))
+ var x798 uint64
+ var x799 uint64
+ x798, x799 = bits.Add64(x769, x766, uint64(p521Uint1(x797)))
+ x800 := (uint64(p521Uint1(x799)) + x767)
+ var x801 uint64
+ var x802 uint64
+ x801, x802 = bits.Add64(x747, x782, uint64(0x0))
+ var x803 uint64
+ var x804 uint64
+ x803, x804 = bits.Add64(x749, x784, uint64(p521Uint1(x802)))
+ var x805 uint64
+ var x806 uint64
+ x805, x806 = bits.Add64(x751, x786, uint64(p521Uint1(x804)))
+ var x807 uint64
+ var x808 uint64
+ x807, x808 = bits.Add64(x753, x788, uint64(p521Uint1(x806)))
+ var x809 uint64
+ var x810 uint64
+ x809, x810 = bits.Add64(x755, x790, uint64(p521Uint1(x808)))
+ var x811 uint64
+ var x812 uint64
+ x811, x812 = bits.Add64(x757, x792, uint64(p521Uint1(x810)))
+ var x813 uint64
+ var x814 uint64
+ x813, x814 = bits.Add64(x759, x794, uint64(p521Uint1(x812)))
+ var x815 uint64
+ var x816 uint64
+ x815, x816 = bits.Add64(x761, x796, uint64(p521Uint1(x814)))
+ var x817 uint64
+ var x818 uint64
+ x817, x818 = bits.Add64(x763, x798, uint64(p521Uint1(x816)))
+ var x819 uint64
+ var x820 uint64
+ x819, x820 = bits.Add64(x765, x800, uint64(p521Uint1(x818)))
+ var x821 uint64
+ var x822 uint64
+ x822, x821 = bits.Mul64(x801, 0x1ff)
+ var x823 uint64
+ var x824 uint64
+ x824, x823 = bits.Mul64(x801, 0xffffffffffffffff)
+ var x825 uint64
+ var x826 uint64
+ x826, x825 = bits.Mul64(x801, 0xffffffffffffffff)
+ var x827 uint64
+ var x828 uint64
+ x828, x827 = bits.Mul64(x801, 0xffffffffffffffff)
+ var x829 uint64
+ var x830 uint64
+ x830, x829 = bits.Mul64(x801, 0xffffffffffffffff)
+ var x831 uint64
+ var x832 uint64
+ x832, x831 = bits.Mul64(x801, 0xffffffffffffffff)
+ var x833 uint64
+ var x834 uint64
+ x834, x833 = bits.Mul64(x801, 0xffffffffffffffff)
+ var x835 uint64
+ var x836 uint64
+ x836, x835 = bits.Mul64(x801, 0xffffffffffffffff)
+ var x837 uint64
+ var x838 uint64
+ x838, x837 = bits.Mul64(x801, 0xffffffffffffffff)
+ var x839 uint64
+ var x840 uint64
+ x839, x840 = bits.Add64(x838, x835, uint64(0x0))
+ var x841 uint64
+ var x842 uint64
+ x841, x842 = bits.Add64(x836, x833, uint64(p521Uint1(x840)))
+ var x843 uint64
+ var x844 uint64
+ x843, x844 = bits.Add64(x834, x831, uint64(p521Uint1(x842)))
+ var x845 uint64
+ var x846 uint64
+ x845, x846 = bits.Add64(x832, x829, uint64(p521Uint1(x844)))
+ var x847 uint64
+ var x848 uint64
+ x847, x848 = bits.Add64(x830, x827, uint64(p521Uint1(x846)))
+ var x849 uint64
+ var x850 uint64
+ x849, x850 = bits.Add64(x828, x825, uint64(p521Uint1(x848)))
+ var x851 uint64
+ var x852 uint64
+ x851, x852 = bits.Add64(x826, x823, uint64(p521Uint1(x850)))
+ var x853 uint64
+ var x854 uint64
+ x853, x854 = bits.Add64(x824, x821, uint64(p521Uint1(x852)))
+ x855 := (uint64(p521Uint1(x854)) + x822)
+ var x857 uint64
+ _, x857 = bits.Add64(x801, x837, uint64(0x0))
+ var x858 uint64
+ var x859 uint64
+ x858, x859 = bits.Add64(x803, x839, uint64(p521Uint1(x857)))
+ var x860 uint64
+ var x861 uint64
+ x860, x861 = bits.Add64(x805, x841, uint64(p521Uint1(x859)))
+ var x862 uint64
+ var x863 uint64
+ x862, x863 = bits.Add64(x807, x843, uint64(p521Uint1(x861)))
+ var x864 uint64
+ var x865 uint64
+ x864, x865 = bits.Add64(x809, x845, uint64(p521Uint1(x863)))
+ var x866 uint64
+ var x867 uint64
+ x866, x867 = bits.Add64(x811, x847, uint64(p521Uint1(x865)))
+ var x868 uint64
+ var x869 uint64
+ x868, x869 = bits.Add64(x813, x849, uint64(p521Uint1(x867)))
+ var x870 uint64
+ var x871 uint64
+ x870, x871 = bits.Add64(x815, x851, uint64(p521Uint1(x869)))
+ var x872 uint64
+ var x873 uint64
+ x872, x873 = bits.Add64(x817, x853, uint64(p521Uint1(x871)))
+ var x874 uint64
+ var x875 uint64
+ x874, x875 = bits.Add64(x819, x855, uint64(p521Uint1(x873)))
+ x876 := (uint64(p521Uint1(x875)) + uint64(p521Uint1(x820)))
+ var x877 uint64
+ var x878 uint64
+ x878, x877 = bits.Mul64(x8, arg1[8])
+ var x879 uint64
+ var x880 uint64
+ x880, x879 = bits.Mul64(x8, arg1[7])
+ var x881 uint64
+ var x882 uint64
+ x882, x881 = bits.Mul64(x8, arg1[6])
+ var x883 uint64
+ var x884 uint64
+ x884, x883 = bits.Mul64(x8, arg1[5])
+ var x885 uint64
+ var x886 uint64
+ x886, x885 = bits.Mul64(x8, arg1[4])
+ var x887 uint64
+ var x888 uint64
+ x888, x887 = bits.Mul64(x8, arg1[3])
+ var x889 uint64
+ var x890 uint64
+ x890, x889 = bits.Mul64(x8, arg1[2])
+ var x891 uint64
+ var x892 uint64
+ x892, x891 = bits.Mul64(x8, arg1[1])
+ var x893 uint64
+ var x894 uint64
+ x894, x893 = bits.Mul64(x8, arg1[0])
+ var x895 uint64
+ var x896 uint64
+ x895, x896 = bits.Add64(x894, x891, uint64(0x0))
+ var x897 uint64
+ var x898 uint64
+ x897, x898 = bits.Add64(x892, x889, uint64(p521Uint1(x896)))
+ var x899 uint64
+ var x900 uint64
+ x899, x900 = bits.Add64(x890, x887, uint64(p521Uint1(x898)))
+ var x901 uint64
+ var x902 uint64
+ x901, x902 = bits.Add64(x888, x885, uint64(p521Uint1(x900)))
+ var x903 uint64
+ var x904 uint64
+ x903, x904 = bits.Add64(x886, x883, uint64(p521Uint1(x902)))
+ var x905 uint64
+ var x906 uint64
+ x905, x906 = bits.Add64(x884, x881, uint64(p521Uint1(x904)))
+ var x907 uint64
+ var x908 uint64
+ x907, x908 = bits.Add64(x882, x879, uint64(p521Uint1(x906)))
+ var x909 uint64
+ var x910 uint64
+ x909, x910 = bits.Add64(x880, x877, uint64(p521Uint1(x908)))
+ x911 := (uint64(p521Uint1(x910)) + x878)
+ var x912 uint64
+ var x913 uint64
+ x912, x913 = bits.Add64(x858, x893, uint64(0x0))
+ var x914 uint64
+ var x915 uint64
+ x914, x915 = bits.Add64(x860, x895, uint64(p521Uint1(x913)))
+ var x916 uint64
+ var x917 uint64
+ x916, x917 = bits.Add64(x862, x897, uint64(p521Uint1(x915)))
+ var x918 uint64
+ var x919 uint64
+ x918, x919 = bits.Add64(x864, x899, uint64(p521Uint1(x917)))
+ var x920 uint64
+ var x921 uint64
+ x920, x921 = bits.Add64(x866, x901, uint64(p521Uint1(x919)))
+ var x922 uint64
+ var x923 uint64
+ x922, x923 = bits.Add64(x868, x903, uint64(p521Uint1(x921)))
+ var x924 uint64
+ var x925 uint64
+ x924, x925 = bits.Add64(x870, x905, uint64(p521Uint1(x923)))
+ var x926 uint64
+ var x927 uint64
+ x926, x927 = bits.Add64(x872, x907, uint64(p521Uint1(x925)))
+ var x928 uint64
+ var x929 uint64
+ x928, x929 = bits.Add64(x874, x909, uint64(p521Uint1(x927)))
+ var x930 uint64
+ var x931 uint64
+ x930, x931 = bits.Add64(x876, x911, uint64(p521Uint1(x929)))
+ var x932 uint64
+ var x933 uint64
+ x933, x932 = bits.Mul64(x912, 0x1ff)
+ var x934 uint64
+ var x935 uint64
+ x935, x934 = bits.Mul64(x912, 0xffffffffffffffff)
+ var x936 uint64
+ var x937 uint64
+ x937, x936 = bits.Mul64(x912, 0xffffffffffffffff)
+ var x938 uint64
+ var x939 uint64
+ x939, x938 = bits.Mul64(x912, 0xffffffffffffffff)
+ var x940 uint64
+ var x941 uint64
+ x941, x940 = bits.Mul64(x912, 0xffffffffffffffff)
+ var x942 uint64
+ var x943 uint64
+ x943, x942 = bits.Mul64(x912, 0xffffffffffffffff)
+ var x944 uint64
+ var x945 uint64
+ x945, x944 = bits.Mul64(x912, 0xffffffffffffffff)
+ var x946 uint64
+ var x947 uint64
+ x947, x946 = bits.Mul64(x912, 0xffffffffffffffff)
+ var x948 uint64
+ var x949 uint64
+ x949, x948 = bits.Mul64(x912, 0xffffffffffffffff)
+ var x950 uint64
+ var x951 uint64
+ x950, x951 = bits.Add64(x949, x946, uint64(0x0))
+ var x952 uint64
+ var x953 uint64
+ x952, x953 = bits.Add64(x947, x944, uint64(p521Uint1(x951)))
+ var x954 uint64
+ var x955 uint64
+ x954, x955 = bits.Add64(x945, x942, uint64(p521Uint1(x953)))
+ var x956 uint64
+ var x957 uint64
+ x956, x957 = bits.Add64(x943, x940, uint64(p521Uint1(x955)))
+ var x958 uint64
+ var x959 uint64
+ x958, x959 = bits.Add64(x941, x938, uint64(p521Uint1(x957)))
+ var x960 uint64
+ var x961 uint64
+ x960, x961 = bits.Add64(x939, x936, uint64(p521Uint1(x959)))
+ var x962 uint64
+ var x963 uint64
+ x962, x963 = bits.Add64(x937, x934, uint64(p521Uint1(x961)))
+ var x964 uint64
+ var x965 uint64
+ x964, x965 = bits.Add64(x935, x932, uint64(p521Uint1(x963)))
+ x966 := (uint64(p521Uint1(x965)) + x933)
+ var x968 uint64
+ _, x968 = bits.Add64(x912, x948, uint64(0x0))
+ var x969 uint64
+ var x970 uint64
+ x969, x970 = bits.Add64(x914, x950, uint64(p521Uint1(x968)))
+ var x971 uint64
+ var x972 uint64
+ x971, x972 = bits.Add64(x916, x952, uint64(p521Uint1(x970)))
+ var x973 uint64
+ var x974 uint64
+ x973, x974 = bits.Add64(x918, x954, uint64(p521Uint1(x972)))
+ var x975 uint64
+ var x976 uint64
+ x975, x976 = bits.Add64(x920, x956, uint64(p521Uint1(x974)))
+ var x977 uint64
+ var x978 uint64
+ x977, x978 = bits.Add64(x922, x958, uint64(p521Uint1(x976)))
+ var x979 uint64
+ var x980 uint64
+ x979, x980 = bits.Add64(x924, x960, uint64(p521Uint1(x978)))
+ var x981 uint64
+ var x982 uint64
+ x981, x982 = bits.Add64(x926, x962, uint64(p521Uint1(x980)))
+ var x983 uint64
+ var x984 uint64
+ x983, x984 = bits.Add64(x928, x964, uint64(p521Uint1(x982)))
+ var x985 uint64
+ var x986 uint64
+ x985, x986 = bits.Add64(x930, x966, uint64(p521Uint1(x984)))
+ x987 := (uint64(p521Uint1(x986)) + uint64(p521Uint1(x931)))
+ var x988 uint64
+ var x989 uint64
+ x988, x989 = bits.Sub64(x969, 0xffffffffffffffff, uint64(0x0))
+ var x990 uint64
+ var x991 uint64
+ x990, x991 = bits.Sub64(x971, 0xffffffffffffffff, uint64(p521Uint1(x989)))
+ var x992 uint64
+ var x993 uint64
+ x992, x993 = bits.Sub64(x973, 0xffffffffffffffff, uint64(p521Uint1(x991)))
+ var x994 uint64
+ var x995 uint64
+ x994, x995 = bits.Sub64(x975, 0xffffffffffffffff, uint64(p521Uint1(x993)))
+ var x996 uint64
+ var x997 uint64
+ x996, x997 = bits.Sub64(x977, 0xffffffffffffffff, uint64(p521Uint1(x995)))
+ var x998 uint64
+ var x999 uint64
+ x998, x999 = bits.Sub64(x979, 0xffffffffffffffff, uint64(p521Uint1(x997)))
+ var x1000 uint64
+ var x1001 uint64
+ x1000, x1001 = bits.Sub64(x981, 0xffffffffffffffff, uint64(p521Uint1(x999)))
+ var x1002 uint64
+ var x1003 uint64
+ x1002, x1003 = bits.Sub64(x983, 0xffffffffffffffff, uint64(p521Uint1(x1001)))
+ var x1004 uint64
+ var x1005 uint64
+ x1004, x1005 = bits.Sub64(x985, 0x1ff, uint64(p521Uint1(x1003)))
+ var x1007 uint64
+ _, x1007 = bits.Sub64(x987, uint64(0x0), uint64(p521Uint1(x1005)))
+ var x1008 uint64
+ p521CmovznzU64(&x1008, p521Uint1(x1007), x988, x969)
+ var x1009 uint64
+ p521CmovznzU64(&x1009, p521Uint1(x1007), x990, x971)
+ var x1010 uint64
+ p521CmovznzU64(&x1010, p521Uint1(x1007), x992, x973)
+ var x1011 uint64
+ p521CmovznzU64(&x1011, p521Uint1(x1007), x994, x975)
+ var x1012 uint64
+ p521CmovznzU64(&x1012, p521Uint1(x1007), x996, x977)
+ var x1013 uint64
+ p521CmovznzU64(&x1013, p521Uint1(x1007), x998, x979)
+ var x1014 uint64
+ p521CmovznzU64(&x1014, p521Uint1(x1007), x1000, x981)
+ var x1015 uint64
+ p521CmovznzU64(&x1015, p521Uint1(x1007), x1002, x983)
+ var x1016 uint64
+ p521CmovznzU64(&x1016, p521Uint1(x1007), x1004, x985)
+ out1[0] = x1008
+ out1[1] = x1009
+ out1[2] = x1010
+ out1[3] = x1011
+ out1[4] = x1012
+ out1[5] = x1013
+ out1[6] = x1014
+ out1[7] = x1015
+ out1[8] = x1016
+}
+
+// p521Add adds two field elements in the Montgomery domain.
+//
+// Preconditions:
+//
+// 0 ≤ eval arg1 < m
+// 0 ≤ eval arg2 < m
+//
+// Postconditions:
+//
+// eval (from_montgomery out1) mod m = (eval (from_montgomery arg1) + eval (from_montgomery arg2)) mod m
+// 0 ≤ eval out1 < m
+func p521Add(out1 *p521MontgomeryDomainFieldElement, arg1 *p521MontgomeryDomainFieldElement, arg2 *p521MontgomeryDomainFieldElement) {
+ var x1 uint64
+ var x2 uint64
+ x1, x2 = bits.Add64(arg1[0], arg2[0], uint64(0x0))
+ var x3 uint64
+ var x4 uint64
+ x3, x4 = bits.Add64(arg1[1], arg2[1], uint64(p521Uint1(x2)))
+ var x5 uint64
+ var x6 uint64
+ x5, x6 = bits.Add64(arg1[2], arg2[2], uint64(p521Uint1(x4)))
+ var x7 uint64
+ var x8 uint64
+ x7, x8 = bits.Add64(arg1[3], arg2[3], uint64(p521Uint1(x6)))
+ var x9 uint64
+ var x10 uint64
+ x9, x10 = bits.Add64(arg1[4], arg2[4], uint64(p521Uint1(x8)))
+ var x11 uint64
+ var x12 uint64
+ x11, x12 = bits.Add64(arg1[5], arg2[5], uint64(p521Uint1(x10)))
+ var x13 uint64
+ var x14 uint64
+ x13, x14 = bits.Add64(arg1[6], arg2[6], uint64(p521Uint1(x12)))
+ var x15 uint64
+ var x16 uint64
+ x15, x16 = bits.Add64(arg1[7], arg2[7], uint64(p521Uint1(x14)))
+ var x17 uint64
+ var x18 uint64
+ x17, x18 = bits.Add64(arg1[8], arg2[8], uint64(p521Uint1(x16)))
+ var x19 uint64
+ var x20 uint64
+ x19, x20 = bits.Sub64(x1, 0xffffffffffffffff, uint64(0x0))
+ var x21 uint64
+ var x22 uint64
+ x21, x22 = bits.Sub64(x3, 0xffffffffffffffff, uint64(p521Uint1(x20)))
+ var x23 uint64
+ var x24 uint64
+ x23, x24 = bits.Sub64(x5, 0xffffffffffffffff, uint64(p521Uint1(x22)))
+ var x25 uint64
+ var x26 uint64
+ x25, x26 = bits.Sub64(x7, 0xffffffffffffffff, uint64(p521Uint1(x24)))
+ var x27 uint64
+ var x28 uint64
+ x27, x28 = bits.Sub64(x9, 0xffffffffffffffff, uint64(p521Uint1(x26)))
+ var x29 uint64
+ var x30 uint64
+ x29, x30 = bits.Sub64(x11, 0xffffffffffffffff, uint64(p521Uint1(x28)))
+ var x31 uint64
+ var x32 uint64
+ x31, x32 = bits.Sub64(x13, 0xffffffffffffffff, uint64(p521Uint1(x30)))
+ var x33 uint64
+ var x34 uint64
+ x33, x34 = bits.Sub64(x15, 0xffffffffffffffff, uint64(p521Uint1(x32)))
+ var x35 uint64
+ var x36 uint64
+ x35, x36 = bits.Sub64(x17, 0x1ff, uint64(p521Uint1(x34)))
+ var x38 uint64
+ _, x38 = bits.Sub64(uint64(p521Uint1(x18)), uint64(0x0), uint64(p521Uint1(x36)))
+ var x39 uint64
+ p521CmovznzU64(&x39, p521Uint1(x38), x19, x1)
+ var x40 uint64
+ p521CmovznzU64(&x40, p521Uint1(x38), x21, x3)
+ var x41 uint64
+ p521CmovznzU64(&x41, p521Uint1(x38), x23, x5)
+ var x42 uint64
+ p521CmovznzU64(&x42, p521Uint1(x38), x25, x7)
+ var x43 uint64
+ p521CmovznzU64(&x43, p521Uint1(x38), x27, x9)
+ var x44 uint64
+ p521CmovznzU64(&x44, p521Uint1(x38), x29, x11)
+ var x45 uint64
+ p521CmovznzU64(&x45, p521Uint1(x38), x31, x13)
+ var x46 uint64
+ p521CmovznzU64(&x46, p521Uint1(x38), x33, x15)
+ var x47 uint64
+ p521CmovznzU64(&x47, p521Uint1(x38), x35, x17)
+ out1[0] = x39
+ out1[1] = x40
+ out1[2] = x41
+ out1[3] = x42
+ out1[4] = x43
+ out1[5] = x44
+ out1[6] = x45
+ out1[7] = x46
+ out1[8] = x47
+}
+
+// p521Sub subtracts two field elements in the Montgomery domain.
+//
+// Preconditions:
+//
+// 0 ≤ eval arg1 < m
+// 0 ≤ eval arg2 < m
+//
+// Postconditions:
+//
+// eval (from_montgomery out1) mod m = (eval (from_montgomery arg1) - eval (from_montgomery arg2)) mod m
+// 0 ≤ eval out1 < m
+func p521Sub(out1 *p521MontgomeryDomainFieldElement, arg1 *p521MontgomeryDomainFieldElement, arg2 *p521MontgomeryDomainFieldElement) {
+ var x1 uint64
+ var x2 uint64
+ x1, x2 = bits.Sub64(arg1[0], arg2[0], uint64(0x0))
+ var x3 uint64
+ var x4 uint64
+ x3, x4 = bits.Sub64(arg1[1], arg2[1], uint64(p521Uint1(x2)))
+ var x5 uint64
+ var x6 uint64
+ x5, x6 = bits.Sub64(arg1[2], arg2[2], uint64(p521Uint1(x4)))
+ var x7 uint64
+ var x8 uint64
+ x7, x8 = bits.Sub64(arg1[3], arg2[3], uint64(p521Uint1(x6)))
+ var x9 uint64
+ var x10 uint64
+ x9, x10 = bits.Sub64(arg1[4], arg2[4], uint64(p521Uint1(x8)))
+ var x11 uint64
+ var x12 uint64
+ x11, x12 = bits.Sub64(arg1[5], arg2[5], uint64(p521Uint1(x10)))
+ var x13 uint64
+ var x14 uint64
+ x13, x14 = bits.Sub64(arg1[6], arg2[6], uint64(p521Uint1(x12)))
+ var x15 uint64
+ var x16 uint64
+ x15, x16 = bits.Sub64(arg1[7], arg2[7], uint64(p521Uint1(x14)))
+ var x17 uint64
+ var x18 uint64
+ x17, x18 = bits.Sub64(arg1[8], arg2[8], uint64(p521Uint1(x16)))
+ var x19 uint64
+ p521CmovznzU64(&x19, p521Uint1(x18), uint64(0x0), 0xffffffffffffffff)
+ var x20 uint64
+ var x21 uint64
+ x20, x21 = bits.Add64(x1, x19, uint64(0x0))
+ var x22 uint64
+ var x23 uint64
+ x22, x23 = bits.Add64(x3, x19, uint64(p521Uint1(x21)))
+ var x24 uint64
+ var x25 uint64
+ x24, x25 = bits.Add64(x5, x19, uint64(p521Uint1(x23)))
+ var x26 uint64
+ var x27 uint64
+ x26, x27 = bits.Add64(x7, x19, uint64(p521Uint1(x25)))
+ var x28 uint64
+ var x29 uint64
+ x28, x29 = bits.Add64(x9, x19, uint64(p521Uint1(x27)))
+ var x30 uint64
+ var x31 uint64
+ x30, x31 = bits.Add64(x11, x19, uint64(p521Uint1(x29)))
+ var x32 uint64
+ var x33 uint64
+ x32, x33 = bits.Add64(x13, x19, uint64(p521Uint1(x31)))
+ var x34 uint64
+ var x35 uint64
+ x34, x35 = bits.Add64(x15, x19, uint64(p521Uint1(x33)))
+ var x36 uint64
+ x36, _ = bits.Add64(x17, (x19 & 0x1ff), uint64(p521Uint1(x35)))
+ out1[0] = x20
+ out1[1] = x22
+ out1[2] = x24
+ out1[3] = x26
+ out1[4] = x28
+ out1[5] = x30
+ out1[6] = x32
+ out1[7] = x34
+ out1[8] = x36
+}
+
+// p521SetOne returns the field element one in the Montgomery domain.
+//
+// Postconditions:
+//
+// eval (from_montgomery out1) mod m = 1 mod m
+// 0 ≤ eval out1 < m
+func p521SetOne(out1 *p521MontgomeryDomainFieldElement) {
+ out1[0] = 0x80000000000000
+ out1[1] = uint64(0x0)
+ out1[2] = uint64(0x0)
+ out1[3] = uint64(0x0)
+ out1[4] = uint64(0x0)
+ out1[5] = uint64(0x0)
+ out1[6] = uint64(0x0)
+ out1[7] = uint64(0x0)
+ out1[8] = uint64(0x0)
+}
+
+// p521FromMontgomery translates a field element out of the Montgomery domain.
+//
+// Preconditions:
+//
+// 0 ≤ eval arg1 < m
+//
+// Postconditions:
+//
+// eval out1 mod m = (eval arg1 * ((2^64)⁻¹ mod m)^9) mod m
+// 0 ≤ eval out1 < m
+func p521FromMontgomery(out1 *p521NonMontgomeryDomainFieldElement, arg1 *p521MontgomeryDomainFieldElement) {
+ x1 := arg1[0]
+ var x2 uint64
+ var x3 uint64
+ x3, x2 = bits.Mul64(x1, 0x1ff)
+ var x4 uint64
+ var x5 uint64
+ x5, x4 = bits.Mul64(x1, 0xffffffffffffffff)
+ var x6 uint64
+ var x7 uint64
+ x7, x6 = bits.Mul64(x1, 0xffffffffffffffff)
+ var x8 uint64
+ var x9 uint64
+ x9, x8 = bits.Mul64(x1, 0xffffffffffffffff)
+ var x10 uint64
+ var x11 uint64
+ x11, x10 = bits.Mul64(x1, 0xffffffffffffffff)
+ var x12 uint64
+ var x13 uint64
+ x13, x12 = bits.Mul64(x1, 0xffffffffffffffff)
+ var x14 uint64
+ var x15 uint64
+ x15, x14 = bits.Mul64(x1, 0xffffffffffffffff)
+ var x16 uint64
+ var x17 uint64
+ x17, x16 = bits.Mul64(x1, 0xffffffffffffffff)
+ var x18 uint64
+ var x19 uint64
+ x19, x18 = bits.Mul64(x1, 0xffffffffffffffff)
+ var x20 uint64
+ var x21 uint64
+ x20, x21 = bits.Add64(x19, x16, uint64(0x0))
+ var x22 uint64
+ var x23 uint64
+ x22, x23 = bits.Add64(x17, x14, uint64(p521Uint1(x21)))
+ var x24 uint64
+ var x25 uint64
+ x24, x25 = bits.Add64(x15, x12, uint64(p521Uint1(x23)))
+ var x26 uint64
+ var x27 uint64
+ x26, x27 = bits.Add64(x13, x10, uint64(p521Uint1(x25)))
+ var x28 uint64
+ var x29 uint64
+ x28, x29 = bits.Add64(x11, x8, uint64(p521Uint1(x27)))
+ var x30 uint64
+ var x31 uint64
+ x30, x31 = bits.Add64(x9, x6, uint64(p521Uint1(x29)))
+ var x32 uint64
+ var x33 uint64
+ x32, x33 = bits.Add64(x7, x4, uint64(p521Uint1(x31)))
+ var x34 uint64
+ var x35 uint64
+ x34, x35 = bits.Add64(x5, x2, uint64(p521Uint1(x33)))
+ var x37 uint64
+ _, x37 = bits.Add64(x1, x18, uint64(0x0))
+ var x38 uint64
+ var x39 uint64
+ x38, x39 = bits.Add64(uint64(0x0), x20, uint64(p521Uint1(x37)))
+ var x40 uint64
+ var x41 uint64
+ x40, x41 = bits.Add64(uint64(0x0), x22, uint64(p521Uint1(x39)))
+ var x42 uint64
+ var x43 uint64
+ x42, x43 = bits.Add64(uint64(0x0), x24, uint64(p521Uint1(x41)))
+ var x44 uint64
+ var x45 uint64
+ x44, x45 = bits.Add64(uint64(0x0), x26, uint64(p521Uint1(x43)))
+ var x46 uint64
+ var x47 uint64
+ x46, x47 = bits.Add64(uint64(0x0), x28, uint64(p521Uint1(x45)))
+ var x48 uint64
+ var x49 uint64
+ x48, x49 = bits.Add64(uint64(0x0), x30, uint64(p521Uint1(x47)))
+ var x50 uint64
+ var x51 uint64
+ x50, x51 = bits.Add64(uint64(0x0), x32, uint64(p521Uint1(x49)))
+ var x52 uint64
+ var x53 uint64
+ x52, x53 = bits.Add64(uint64(0x0), x34, uint64(p521Uint1(x51)))
+ var x54 uint64
+ var x55 uint64
+ x54, x55 = bits.Add64(x38, arg1[1], uint64(0x0))
+ var x56 uint64
+ var x57 uint64
+ x56, x57 = bits.Add64(x40, uint64(0x0), uint64(p521Uint1(x55)))
+ var x58 uint64
+ var x59 uint64
+ x58, x59 = bits.Add64(x42, uint64(0x0), uint64(p521Uint1(x57)))
+ var x60 uint64
+ var x61 uint64
+ x60, x61 = bits.Add64(x44, uint64(0x0), uint64(p521Uint1(x59)))
+ var x62 uint64
+ var x63 uint64
+ x62, x63 = bits.Add64(x46, uint64(0x0), uint64(p521Uint1(x61)))
+ var x64 uint64
+ var x65 uint64
+ x64, x65 = bits.Add64(x48, uint64(0x0), uint64(p521Uint1(x63)))
+ var x66 uint64
+ var x67 uint64
+ x66, x67 = bits.Add64(x50, uint64(0x0), uint64(p521Uint1(x65)))
+ var x68 uint64
+ var x69 uint64
+ x68, x69 = bits.Add64(x52, uint64(0x0), uint64(p521Uint1(x67)))
+ var x70 uint64
+ var x71 uint64
+ x71, x70 = bits.Mul64(x54, 0x1ff)
+ var x72 uint64
+ var x73 uint64
+ x73, x72 = bits.Mul64(x54, 0xffffffffffffffff)
+ var x74 uint64
+ var x75 uint64
+ x75, x74 = bits.Mul64(x54, 0xffffffffffffffff)
+ var x76 uint64
+ var x77 uint64
+ x77, x76 = bits.Mul64(x54, 0xffffffffffffffff)
+ var x78 uint64
+ var x79 uint64
+ x79, x78 = bits.Mul64(x54, 0xffffffffffffffff)
+ var x80 uint64
+ var x81 uint64
+ x81, x80 = bits.Mul64(x54, 0xffffffffffffffff)
+ var x82 uint64
+ var x83 uint64
+ x83, x82 = bits.Mul64(x54, 0xffffffffffffffff)
+ var x84 uint64
+ var x85 uint64
+ x85, x84 = bits.Mul64(x54, 0xffffffffffffffff)
+ var x86 uint64
+ var x87 uint64
+ x87, x86 = bits.Mul64(x54, 0xffffffffffffffff)
+ var x88 uint64
+ var x89 uint64
+ x88, x89 = bits.Add64(x87, x84, uint64(0x0))
+ var x90 uint64
+ var x91 uint64
+ x90, x91 = bits.Add64(x85, x82, uint64(p521Uint1(x89)))
+ var x92 uint64
+ var x93 uint64
+ x92, x93 = bits.Add64(x83, x80, uint64(p521Uint1(x91)))
+ var x94 uint64
+ var x95 uint64
+ x94, x95 = bits.Add64(x81, x78, uint64(p521Uint1(x93)))
+ var x96 uint64
+ var x97 uint64
+ x96, x97 = bits.Add64(x79, x76, uint64(p521Uint1(x95)))
+ var x98 uint64
+ var x99 uint64
+ x98, x99 = bits.Add64(x77, x74, uint64(p521Uint1(x97)))
+ var x100 uint64
+ var x101 uint64
+ x100, x101 = bits.Add64(x75, x72, uint64(p521Uint1(x99)))
+ var x102 uint64
+ var x103 uint64
+ x102, x103 = bits.Add64(x73, x70, uint64(p521Uint1(x101)))
+ var x105 uint64
+ _, x105 = bits.Add64(x54, x86, uint64(0x0))
+ var x106 uint64
+ var x107 uint64
+ x106, x107 = bits.Add64(x56, x88, uint64(p521Uint1(x105)))
+ var x108 uint64
+ var x109 uint64
+ x108, x109 = bits.Add64(x58, x90, uint64(p521Uint1(x107)))
+ var x110 uint64
+ var x111 uint64
+ x110, x111 = bits.Add64(x60, x92, uint64(p521Uint1(x109)))
+ var x112 uint64
+ var x113 uint64
+ x112, x113 = bits.Add64(x62, x94, uint64(p521Uint1(x111)))
+ var x114 uint64
+ var x115 uint64
+ x114, x115 = bits.Add64(x64, x96, uint64(p521Uint1(x113)))
+ var x116 uint64
+ var x117 uint64
+ x116, x117 = bits.Add64(x66, x98, uint64(p521Uint1(x115)))
+ var x118 uint64
+ var x119 uint64
+ x118, x119 = bits.Add64(x68, x100, uint64(p521Uint1(x117)))
+ var x120 uint64
+ var x121 uint64
+ x120, x121 = bits.Add64((uint64(p521Uint1(x69)) + (uint64(p521Uint1(x53)) + (uint64(p521Uint1(x35)) + x3))), x102, uint64(p521Uint1(x119)))
+ var x122 uint64
+ var x123 uint64
+ x122, x123 = bits.Add64(x106, arg1[2], uint64(0x0))
+ var x124 uint64
+ var x125 uint64
+ x124, x125 = bits.Add64(x108, uint64(0x0), uint64(p521Uint1(x123)))
+ var x126 uint64
+ var x127 uint64
+ x126, x127 = bits.Add64(x110, uint64(0x0), uint64(p521Uint1(x125)))
+ var x128 uint64
+ var x129 uint64
+ x128, x129 = bits.Add64(x112, uint64(0x0), uint64(p521Uint1(x127)))
+ var x130 uint64
+ var x131 uint64
+ x130, x131 = bits.Add64(x114, uint64(0x0), uint64(p521Uint1(x129)))
+ var x132 uint64
+ var x133 uint64
+ x132, x133 = bits.Add64(x116, uint64(0x0), uint64(p521Uint1(x131)))
+ var x134 uint64
+ var x135 uint64
+ x134, x135 = bits.Add64(x118, uint64(0x0), uint64(p521Uint1(x133)))
+ var x136 uint64
+ var x137 uint64
+ x136, x137 = bits.Add64(x120, uint64(0x0), uint64(p521Uint1(x135)))
+ var x138 uint64
+ var x139 uint64
+ x139, x138 = bits.Mul64(x122, 0x1ff)
+ var x140 uint64
+ var x141 uint64
+ x141, x140 = bits.Mul64(x122, 0xffffffffffffffff)
+ var x142 uint64
+ var x143 uint64
+ x143, x142 = bits.Mul64(x122, 0xffffffffffffffff)
+ var x144 uint64
+ var x145 uint64
+ x145, x144 = bits.Mul64(x122, 0xffffffffffffffff)
+ var x146 uint64
+ var x147 uint64
+ x147, x146 = bits.Mul64(x122, 0xffffffffffffffff)
+ var x148 uint64
+ var x149 uint64
+ x149, x148 = bits.Mul64(x122, 0xffffffffffffffff)
+ var x150 uint64
+ var x151 uint64
+ x151, x150 = bits.Mul64(x122, 0xffffffffffffffff)
+ var x152 uint64
+ var x153 uint64
+ x153, x152 = bits.Mul64(x122, 0xffffffffffffffff)
+ var x154 uint64
+ var x155 uint64
+ x155, x154 = bits.Mul64(x122, 0xffffffffffffffff)
+ var x156 uint64
+ var x157 uint64
+ x156, x157 = bits.Add64(x155, x152, uint64(0x0))
+ var x158 uint64
+ var x159 uint64
+ x158, x159 = bits.Add64(x153, x150, uint64(p521Uint1(x157)))
+ var x160 uint64
+ var x161 uint64
+ x160, x161 = bits.Add64(x151, x148, uint64(p521Uint1(x159)))
+ var x162 uint64
+ var x163 uint64
+ x162, x163 = bits.Add64(x149, x146, uint64(p521Uint1(x161)))
+ var x164 uint64
+ var x165 uint64
+ x164, x165 = bits.Add64(x147, x144, uint64(p521Uint1(x163)))
+ var x166 uint64
+ var x167 uint64
+ x166, x167 = bits.Add64(x145, x142, uint64(p521Uint1(x165)))
+ var x168 uint64
+ var x169 uint64
+ x168, x169 = bits.Add64(x143, x140, uint64(p521Uint1(x167)))
+ var x170 uint64
+ var x171 uint64
+ x170, x171 = bits.Add64(x141, x138, uint64(p521Uint1(x169)))
+ var x173 uint64
+ _, x173 = bits.Add64(x122, x154, uint64(0x0))
+ var x174 uint64
+ var x175 uint64
+ x174, x175 = bits.Add64(x124, x156, uint64(p521Uint1(x173)))
+ var x176 uint64
+ var x177 uint64
+ x176, x177 = bits.Add64(x126, x158, uint64(p521Uint1(x175)))
+ var x178 uint64
+ var x179 uint64
+ x178, x179 = bits.Add64(x128, x160, uint64(p521Uint1(x177)))
+ var x180 uint64
+ var x181 uint64
+ x180, x181 = bits.Add64(x130, x162, uint64(p521Uint1(x179)))
+ var x182 uint64
+ var x183 uint64
+ x182, x183 = bits.Add64(x132, x164, uint64(p521Uint1(x181)))
+ var x184 uint64
+ var x185 uint64
+ x184, x185 = bits.Add64(x134, x166, uint64(p521Uint1(x183)))
+ var x186 uint64
+ var x187 uint64
+ x186, x187 = bits.Add64(x136, x168, uint64(p521Uint1(x185)))
+ var x188 uint64
+ var x189 uint64
+ x188, x189 = bits.Add64((uint64(p521Uint1(x137)) + (uint64(p521Uint1(x121)) + (uint64(p521Uint1(x103)) + x71))), x170, uint64(p521Uint1(x187)))
+ var x190 uint64
+ var x191 uint64
+ x190, x191 = bits.Add64(x174, arg1[3], uint64(0x0))
+ var x192 uint64
+ var x193 uint64
+ x192, x193 = bits.Add64(x176, uint64(0x0), uint64(p521Uint1(x191)))
+ var x194 uint64
+ var x195 uint64
+ x194, x195 = bits.Add64(x178, uint64(0x0), uint64(p521Uint1(x193)))
+ var x196 uint64
+ var x197 uint64
+ x196, x197 = bits.Add64(x180, uint64(0x0), uint64(p521Uint1(x195)))
+ var x198 uint64
+ var x199 uint64
+ x198, x199 = bits.Add64(x182, uint64(0x0), uint64(p521Uint1(x197)))
+ var x200 uint64
+ var x201 uint64
+ x200, x201 = bits.Add64(x184, uint64(0x0), uint64(p521Uint1(x199)))
+ var x202 uint64
+ var x203 uint64
+ x202, x203 = bits.Add64(x186, uint64(0x0), uint64(p521Uint1(x201)))
+ var x204 uint64
+ var x205 uint64
+ x204, x205 = bits.Add64(x188, uint64(0x0), uint64(p521Uint1(x203)))
+ var x206 uint64
+ var x207 uint64
+ x207, x206 = bits.Mul64(x190, 0x1ff)
+ var x208 uint64
+ var x209 uint64
+ x209, x208 = bits.Mul64(x190, 0xffffffffffffffff)
+ var x210 uint64
+ var x211 uint64
+ x211, x210 = bits.Mul64(x190, 0xffffffffffffffff)
+ var x212 uint64
+ var x213 uint64
+ x213, x212 = bits.Mul64(x190, 0xffffffffffffffff)
+ var x214 uint64
+ var x215 uint64
+ x215, x214 = bits.Mul64(x190, 0xffffffffffffffff)
+ var x216 uint64
+ var x217 uint64
+ x217, x216 = bits.Mul64(x190, 0xffffffffffffffff)
+ var x218 uint64
+ var x219 uint64
+ x219, x218 = bits.Mul64(x190, 0xffffffffffffffff)
+ var x220 uint64
+ var x221 uint64
+ x221, x220 = bits.Mul64(x190, 0xffffffffffffffff)
+ var x222 uint64
+ var x223 uint64
+ x223, x222 = bits.Mul64(x190, 0xffffffffffffffff)
+ var x224 uint64
+ var x225 uint64
+ x224, x225 = bits.Add64(x223, x220, uint64(0x0))
+ var x226 uint64
+ var x227 uint64
+ x226, x227 = bits.Add64(x221, x218, uint64(p521Uint1(x225)))
+ var x228 uint64
+ var x229 uint64
+ x228, x229 = bits.Add64(x219, x216, uint64(p521Uint1(x227)))
+ var x230 uint64
+ var x231 uint64
+ x230, x231 = bits.Add64(x217, x214, uint64(p521Uint1(x229)))
+ var x232 uint64
+ var x233 uint64
+ x232, x233 = bits.Add64(x215, x212, uint64(p521Uint1(x231)))
+ var x234 uint64
+ var x235 uint64
+ x234, x235 = bits.Add64(x213, x210, uint64(p521Uint1(x233)))
+ var x236 uint64
+ var x237 uint64
+ x236, x237 = bits.Add64(x211, x208, uint64(p521Uint1(x235)))
+ var x238 uint64
+ var x239 uint64
+ x238, x239 = bits.Add64(x209, x206, uint64(p521Uint1(x237)))
+ var x241 uint64
+ _, x241 = bits.Add64(x190, x222, uint64(0x0))
+ var x242 uint64
+ var x243 uint64
+ x242, x243 = bits.Add64(x192, x224, uint64(p521Uint1(x241)))
+ var x244 uint64
+ var x245 uint64
+ x244, x245 = bits.Add64(x194, x226, uint64(p521Uint1(x243)))
+ var x246 uint64
+ var x247 uint64
+ x246, x247 = bits.Add64(x196, x228, uint64(p521Uint1(x245)))
+ var x248 uint64
+ var x249 uint64
+ x248, x249 = bits.Add64(x198, x230, uint64(p521Uint1(x247)))
+ var x250 uint64
+ var x251 uint64
+ x250, x251 = bits.Add64(x200, x232, uint64(p521Uint1(x249)))
+ var x252 uint64
+ var x253 uint64
+ x252, x253 = bits.Add64(x202, x234, uint64(p521Uint1(x251)))
+ var x254 uint64
+ var x255 uint64
+ x254, x255 = bits.Add64(x204, x236, uint64(p521Uint1(x253)))
+ var x256 uint64
+ var x257 uint64
+ x256, x257 = bits.Add64((uint64(p521Uint1(x205)) + (uint64(p521Uint1(x189)) + (uint64(p521Uint1(x171)) + x139))), x238, uint64(p521Uint1(x255)))
+ var x258 uint64
+ var x259 uint64
+ x258, x259 = bits.Add64(x242, arg1[4], uint64(0x0))
+ var x260 uint64
+ var x261 uint64
+ x260, x261 = bits.Add64(x244, uint64(0x0), uint64(p521Uint1(x259)))
+ var x262 uint64
+ var x263 uint64
+ x262, x263 = bits.Add64(x246, uint64(0x0), uint64(p521Uint1(x261)))
+ var x264 uint64
+ var x265 uint64
+ x264, x265 = bits.Add64(x248, uint64(0x0), uint64(p521Uint1(x263)))
+ var x266 uint64
+ var x267 uint64
+ x266, x267 = bits.Add64(x250, uint64(0x0), uint64(p521Uint1(x265)))
+ var x268 uint64
+ var x269 uint64
+ x268, x269 = bits.Add64(x252, uint64(0x0), uint64(p521Uint1(x267)))
+ var x270 uint64
+ var x271 uint64
+ x270, x271 = bits.Add64(x254, uint64(0x0), uint64(p521Uint1(x269)))
+ var x272 uint64
+ var x273 uint64
+ x272, x273 = bits.Add64(x256, uint64(0x0), uint64(p521Uint1(x271)))
+ var x274 uint64
+ var x275 uint64
+ x275, x274 = bits.Mul64(x258, 0x1ff)
+ var x276 uint64
+ var x277 uint64
+ x277, x276 = bits.Mul64(x258, 0xffffffffffffffff)
+ var x278 uint64
+ var x279 uint64
+ x279, x278 = bits.Mul64(x258, 0xffffffffffffffff)
+ var x280 uint64
+ var x281 uint64
+ x281, x280 = bits.Mul64(x258, 0xffffffffffffffff)
+ var x282 uint64
+ var x283 uint64
+ x283, x282 = bits.Mul64(x258, 0xffffffffffffffff)
+ var x284 uint64
+ var x285 uint64
+ x285, x284 = bits.Mul64(x258, 0xffffffffffffffff)
+ var x286 uint64
+ var x287 uint64
+ x287, x286 = bits.Mul64(x258, 0xffffffffffffffff)
+ var x288 uint64
+ var x289 uint64
+ x289, x288 = bits.Mul64(x258, 0xffffffffffffffff)
+ var x290 uint64
+ var x291 uint64
+ x291, x290 = bits.Mul64(x258, 0xffffffffffffffff)
+ var x292 uint64
+ var x293 uint64
+ x292, x293 = bits.Add64(x291, x288, uint64(0x0))
+ var x294 uint64
+ var x295 uint64
+ x294, x295 = bits.Add64(x289, x286, uint64(p521Uint1(x293)))
+ var x296 uint64
+ var x297 uint64
+ x296, x297 = bits.Add64(x287, x284, uint64(p521Uint1(x295)))
+ var x298 uint64
+ var x299 uint64
+ x298, x299 = bits.Add64(x285, x282, uint64(p521Uint1(x297)))
+ var x300 uint64
+ var x301 uint64
+ x300, x301 = bits.Add64(x283, x280, uint64(p521Uint1(x299)))
+ var x302 uint64
+ var x303 uint64
+ x302, x303 = bits.Add64(x281, x278, uint64(p521Uint1(x301)))
+ var x304 uint64
+ var x305 uint64
+ x304, x305 = bits.Add64(x279, x276, uint64(p521Uint1(x303)))
+ var x306 uint64
+ var x307 uint64
+ x306, x307 = bits.Add64(x277, x274, uint64(p521Uint1(x305)))
+ var x309 uint64
+ _, x309 = bits.Add64(x258, x290, uint64(0x0))
+ var x310 uint64
+ var x311 uint64
+ x310, x311 = bits.Add64(x260, x292, uint64(p521Uint1(x309)))
+ var x312 uint64
+ var x313 uint64
+ x312, x313 = bits.Add64(x262, x294, uint64(p521Uint1(x311)))
+ var x314 uint64
+ var x315 uint64
+ x314, x315 = bits.Add64(x264, x296, uint64(p521Uint1(x313)))
+ var x316 uint64
+ var x317 uint64
+ x316, x317 = bits.Add64(x266, x298, uint64(p521Uint1(x315)))
+ var x318 uint64
+ var x319 uint64
+ x318, x319 = bits.Add64(x268, x300, uint64(p521Uint1(x317)))
+ var x320 uint64
+ var x321 uint64
+ x320, x321 = bits.Add64(x270, x302, uint64(p521Uint1(x319)))
+ var x322 uint64
+ var x323 uint64
+ x322, x323 = bits.Add64(x272, x304, uint64(p521Uint1(x321)))
+ var x324 uint64
+ var x325 uint64
+ x324, x325 = bits.Add64((uint64(p521Uint1(x273)) + (uint64(p521Uint1(x257)) + (uint64(p521Uint1(x239)) + x207))), x306, uint64(p521Uint1(x323)))
+ var x326 uint64
+ var x327 uint64
+ x326, x327 = bits.Add64(x310, arg1[5], uint64(0x0))
+ var x328 uint64
+ var x329 uint64
+ x328, x329 = bits.Add64(x312, uint64(0x0), uint64(p521Uint1(x327)))
+ var x330 uint64
+ var x331 uint64
+ x330, x331 = bits.Add64(x314, uint64(0x0), uint64(p521Uint1(x329)))
+ var x332 uint64
+ var x333 uint64
+ x332, x333 = bits.Add64(x316, uint64(0x0), uint64(p521Uint1(x331)))
+ var x334 uint64
+ var x335 uint64
+ x334, x335 = bits.Add64(x318, uint64(0x0), uint64(p521Uint1(x333)))
+ var x336 uint64
+ var x337 uint64
+ x336, x337 = bits.Add64(x320, uint64(0x0), uint64(p521Uint1(x335)))
+ var x338 uint64
+ var x339 uint64
+ x338, x339 = bits.Add64(x322, uint64(0x0), uint64(p521Uint1(x337)))
+ var x340 uint64
+ var x341 uint64
+ x340, x341 = bits.Add64(x324, uint64(0x0), uint64(p521Uint1(x339)))
+ var x342 uint64
+ var x343 uint64
+ x343, x342 = bits.Mul64(x326, 0x1ff)
+ var x344 uint64
+ var x345 uint64
+ x345, x344 = bits.Mul64(x326, 0xffffffffffffffff)
+ var x346 uint64
+ var x347 uint64
+ x347, x346 = bits.Mul64(x326, 0xffffffffffffffff)
+ var x348 uint64
+ var x349 uint64
+ x349, x348 = bits.Mul64(x326, 0xffffffffffffffff)
+ var x350 uint64
+ var x351 uint64
+ x351, x350 = bits.Mul64(x326, 0xffffffffffffffff)
+ var x352 uint64
+ var x353 uint64
+ x353, x352 = bits.Mul64(x326, 0xffffffffffffffff)
+ var x354 uint64
+ var x355 uint64
+ x355, x354 = bits.Mul64(x326, 0xffffffffffffffff)
+ var x356 uint64
+ var x357 uint64
+ x357, x356 = bits.Mul64(x326, 0xffffffffffffffff)
+ var x358 uint64
+ var x359 uint64
+ x359, x358 = bits.Mul64(x326, 0xffffffffffffffff)
+ var x360 uint64
+ var x361 uint64
+ x360, x361 = bits.Add64(x359, x356, uint64(0x0))
+ var x362 uint64
+ var x363 uint64
+ x362, x363 = bits.Add64(x357, x354, uint64(p521Uint1(x361)))
+ var x364 uint64
+ var x365 uint64
+ x364, x365 = bits.Add64(x355, x352, uint64(p521Uint1(x363)))
+ var x366 uint64
+ var x367 uint64
+ x366, x367 = bits.Add64(x353, x350, uint64(p521Uint1(x365)))
+ var x368 uint64
+ var x369 uint64
+ x368, x369 = bits.Add64(x351, x348, uint64(p521Uint1(x367)))
+ var x370 uint64
+ var x371 uint64
+ x370, x371 = bits.Add64(x349, x346, uint64(p521Uint1(x369)))
+ var x372 uint64
+ var x373 uint64
+ x372, x373 = bits.Add64(x347, x344, uint64(p521Uint1(x371)))
+ var x374 uint64
+ var x375 uint64
+ x374, x375 = bits.Add64(x345, x342, uint64(p521Uint1(x373)))
+ var x377 uint64
+ _, x377 = bits.Add64(x326, x358, uint64(0x0))
+ var x378 uint64
+ var x379 uint64
+ x378, x379 = bits.Add64(x328, x360, uint64(p521Uint1(x377)))
+ var x380 uint64
+ var x381 uint64
+ x380, x381 = bits.Add64(x330, x362, uint64(p521Uint1(x379)))
+ var x382 uint64
+ var x383 uint64
+ x382, x383 = bits.Add64(x332, x364, uint64(p521Uint1(x381)))
+ var x384 uint64
+ var x385 uint64
+ x384, x385 = bits.Add64(x334, x366, uint64(p521Uint1(x383)))
+ var x386 uint64
+ var x387 uint64
+ x386, x387 = bits.Add64(x336, x368, uint64(p521Uint1(x385)))
+ var x388 uint64
+ var x389 uint64
+ x388, x389 = bits.Add64(x338, x370, uint64(p521Uint1(x387)))
+ var x390 uint64
+ var x391 uint64
+ x390, x391 = bits.Add64(x340, x372, uint64(p521Uint1(x389)))
+ var x392 uint64
+ var x393 uint64
+ x392, x393 = bits.Add64((uint64(p521Uint1(x341)) + (uint64(p521Uint1(x325)) + (uint64(p521Uint1(x307)) + x275))), x374, uint64(p521Uint1(x391)))
+ var x394 uint64
+ var x395 uint64
+ x394, x395 = bits.Add64(x378, arg1[6], uint64(0x0))
+ var x396 uint64
+ var x397 uint64
+ x396, x397 = bits.Add64(x380, uint64(0x0), uint64(p521Uint1(x395)))
+ var x398 uint64
+ var x399 uint64
+ x398, x399 = bits.Add64(x382, uint64(0x0), uint64(p521Uint1(x397)))
+ var x400 uint64
+ var x401 uint64
+ x400, x401 = bits.Add64(x384, uint64(0x0), uint64(p521Uint1(x399)))
+ var x402 uint64
+ var x403 uint64
+ x402, x403 = bits.Add64(x386, uint64(0x0), uint64(p521Uint1(x401)))
+ var x404 uint64
+ var x405 uint64
+ x404, x405 = bits.Add64(x388, uint64(0x0), uint64(p521Uint1(x403)))
+ var x406 uint64
+ var x407 uint64
+ x406, x407 = bits.Add64(x390, uint64(0x0), uint64(p521Uint1(x405)))
+ var x408 uint64
+ var x409 uint64
+ x408, x409 = bits.Add64(x392, uint64(0x0), uint64(p521Uint1(x407)))
+ var x410 uint64
+ var x411 uint64
+ x411, x410 = bits.Mul64(x394, 0x1ff)
+ var x412 uint64
+ var x413 uint64
+ x413, x412 = bits.Mul64(x394, 0xffffffffffffffff)
+ var x414 uint64
+ var x415 uint64
+ x415, x414 = bits.Mul64(x394, 0xffffffffffffffff)
+ var x416 uint64
+ var x417 uint64
+ x417, x416 = bits.Mul64(x394, 0xffffffffffffffff)
+ var x418 uint64
+ var x419 uint64
+ x419, x418 = bits.Mul64(x394, 0xffffffffffffffff)
+ var x420 uint64
+ var x421 uint64
+ x421, x420 = bits.Mul64(x394, 0xffffffffffffffff)
+ var x422 uint64
+ var x423 uint64
+ x423, x422 = bits.Mul64(x394, 0xffffffffffffffff)
+ var x424 uint64
+ var x425 uint64
+ x425, x424 = bits.Mul64(x394, 0xffffffffffffffff)
+ var x426 uint64
+ var x427 uint64
+ x427, x426 = bits.Mul64(x394, 0xffffffffffffffff)
+ var x428 uint64
+ var x429 uint64
+ x428, x429 = bits.Add64(x427, x424, uint64(0x0))
+ var x430 uint64
+ var x431 uint64
+ x430, x431 = bits.Add64(x425, x422, uint64(p521Uint1(x429)))
+ var x432 uint64
+ var x433 uint64
+ x432, x433 = bits.Add64(x423, x420, uint64(p521Uint1(x431)))
+ var x434 uint64
+ var x435 uint64
+ x434, x435 = bits.Add64(x421, x418, uint64(p521Uint1(x433)))
+ var x436 uint64
+ var x437 uint64
+ x436, x437 = bits.Add64(x419, x416, uint64(p521Uint1(x435)))
+ var x438 uint64
+ var x439 uint64
+ x438, x439 = bits.Add64(x417, x414, uint64(p521Uint1(x437)))
+ var x440 uint64
+ var x441 uint64
+ x440, x441 = bits.Add64(x415, x412, uint64(p521Uint1(x439)))
+ var x442 uint64
+ var x443 uint64
+ x442, x443 = bits.Add64(x413, x410, uint64(p521Uint1(x441)))
+ var x445 uint64
+ _, x445 = bits.Add64(x394, x426, uint64(0x0))
+ var x446 uint64
+ var x447 uint64
+ x446, x447 = bits.Add64(x396, x428, uint64(p521Uint1(x445)))
+ var x448 uint64
+ var x449 uint64
+ x448, x449 = bits.Add64(x398, x430, uint64(p521Uint1(x447)))
+ var x450 uint64
+ var x451 uint64
+ x450, x451 = bits.Add64(x400, x432, uint64(p521Uint1(x449)))
+ var x452 uint64
+ var x453 uint64
+ x452, x453 = bits.Add64(x402, x434, uint64(p521Uint1(x451)))
+ var x454 uint64
+ var x455 uint64
+ x454, x455 = bits.Add64(x404, x436, uint64(p521Uint1(x453)))
+ var x456 uint64
+ var x457 uint64
+ x456, x457 = bits.Add64(x406, x438, uint64(p521Uint1(x455)))
+ var x458 uint64
+ var x459 uint64
+ x458, x459 = bits.Add64(x408, x440, uint64(p521Uint1(x457)))
+ var x460 uint64
+ var x461 uint64
+ x460, x461 = bits.Add64((uint64(p521Uint1(x409)) + (uint64(p521Uint1(x393)) + (uint64(p521Uint1(x375)) + x343))), x442, uint64(p521Uint1(x459)))
+ var x462 uint64
+ var x463 uint64
+ x462, x463 = bits.Add64(x446, arg1[7], uint64(0x0))
+ var x464 uint64
+ var x465 uint64
+ x464, x465 = bits.Add64(x448, uint64(0x0), uint64(p521Uint1(x463)))
+ var x466 uint64
+ var x467 uint64
+ x466, x467 = bits.Add64(x450, uint64(0x0), uint64(p521Uint1(x465)))
+ var x468 uint64
+ var x469 uint64
+ x468, x469 = bits.Add64(x452, uint64(0x0), uint64(p521Uint1(x467)))
+ var x470 uint64
+ var x471 uint64
+ x470, x471 = bits.Add64(x454, uint64(0x0), uint64(p521Uint1(x469)))
+ var x472 uint64
+ var x473 uint64
+ x472, x473 = bits.Add64(x456, uint64(0x0), uint64(p521Uint1(x471)))
+ var x474 uint64
+ var x475 uint64
+ x474, x475 = bits.Add64(x458, uint64(0x0), uint64(p521Uint1(x473)))
+ var x476 uint64
+ var x477 uint64
+ x476, x477 = bits.Add64(x460, uint64(0x0), uint64(p521Uint1(x475)))
+ var x478 uint64
+ var x479 uint64
+ x479, x478 = bits.Mul64(x462, 0x1ff)
+ var x480 uint64
+ var x481 uint64
+ x481, x480 = bits.Mul64(x462, 0xffffffffffffffff)
+ var x482 uint64
+ var x483 uint64
+ x483, x482 = bits.Mul64(x462, 0xffffffffffffffff)
+ var x484 uint64
+ var x485 uint64
+ x485, x484 = bits.Mul64(x462, 0xffffffffffffffff)
+ var x486 uint64
+ var x487 uint64
+ x487, x486 = bits.Mul64(x462, 0xffffffffffffffff)
+ var x488 uint64
+ var x489 uint64
+ x489, x488 = bits.Mul64(x462, 0xffffffffffffffff)
+ var x490 uint64
+ var x491 uint64
+ x491, x490 = bits.Mul64(x462, 0xffffffffffffffff)
+ var x492 uint64
+ var x493 uint64
+ x493, x492 = bits.Mul64(x462, 0xffffffffffffffff)
+ var x494 uint64
+ var x495 uint64
+ x495, x494 = bits.Mul64(x462, 0xffffffffffffffff)
+ var x496 uint64
+ var x497 uint64
+ x496, x497 = bits.Add64(x495, x492, uint64(0x0))
+ var x498 uint64
+ var x499 uint64
+ x498, x499 = bits.Add64(x493, x490, uint64(p521Uint1(x497)))
+ var x500 uint64
+ var x501 uint64
+ x500, x501 = bits.Add64(x491, x488, uint64(p521Uint1(x499)))
+ var x502 uint64
+ var x503 uint64
+ x502, x503 = bits.Add64(x489, x486, uint64(p521Uint1(x501)))
+ var x504 uint64
+ var x505 uint64
+ x504, x505 = bits.Add64(x487, x484, uint64(p521Uint1(x503)))
+ var x506 uint64
+ var x507 uint64
+ x506, x507 = bits.Add64(x485, x482, uint64(p521Uint1(x505)))
+ var x508 uint64
+ var x509 uint64
+ x508, x509 = bits.Add64(x483, x480, uint64(p521Uint1(x507)))
+ var x510 uint64
+ var x511 uint64
+ x510, x511 = bits.Add64(x481, x478, uint64(p521Uint1(x509)))
+ var x513 uint64
+ _, x513 = bits.Add64(x462, x494, uint64(0x0))
+ var x514 uint64
+ var x515 uint64
+ x514, x515 = bits.Add64(x464, x496, uint64(p521Uint1(x513)))
+ var x516 uint64
+ var x517 uint64
+ x516, x517 = bits.Add64(x466, x498, uint64(p521Uint1(x515)))
+ var x518 uint64
+ var x519 uint64
+ x518, x519 = bits.Add64(x468, x500, uint64(p521Uint1(x517)))
+ var x520 uint64
+ var x521 uint64
+ x520, x521 = bits.Add64(x470, x502, uint64(p521Uint1(x519)))
+ var x522 uint64
+ var x523 uint64
+ x522, x523 = bits.Add64(x472, x504, uint64(p521Uint1(x521)))
+ var x524 uint64
+ var x525 uint64
+ x524, x525 = bits.Add64(x474, x506, uint64(p521Uint1(x523)))
+ var x526 uint64
+ var x527 uint64
+ x526, x527 = bits.Add64(x476, x508, uint64(p521Uint1(x525)))
+ var x528 uint64
+ var x529 uint64
+ x528, x529 = bits.Add64((uint64(p521Uint1(x477)) + (uint64(p521Uint1(x461)) + (uint64(p521Uint1(x443)) + x411))), x510, uint64(p521Uint1(x527)))
+ var x530 uint64
+ var x531 uint64
+ x530, x531 = bits.Add64(x514, arg1[8], uint64(0x0))
+ var x532 uint64
+ var x533 uint64
+ x532, x533 = bits.Add64(x516, uint64(0x0), uint64(p521Uint1(x531)))
+ var x534 uint64
+ var x535 uint64
+ x534, x535 = bits.Add64(x518, uint64(0x0), uint64(p521Uint1(x533)))
+ var x536 uint64
+ var x537 uint64
+ x536, x537 = bits.Add64(x520, uint64(0x0), uint64(p521Uint1(x535)))
+ var x538 uint64
+ var x539 uint64
+ x538, x539 = bits.Add64(x522, uint64(0x0), uint64(p521Uint1(x537)))
+ var x540 uint64
+ var x541 uint64
+ x540, x541 = bits.Add64(x524, uint64(0x0), uint64(p521Uint1(x539)))
+ var x542 uint64
+ var x543 uint64
+ x542, x543 = bits.Add64(x526, uint64(0x0), uint64(p521Uint1(x541)))
+ var x544 uint64
+ var x545 uint64
+ x544, x545 = bits.Add64(x528, uint64(0x0), uint64(p521Uint1(x543)))
+ var x546 uint64
+ var x547 uint64
+ x547, x546 = bits.Mul64(x530, 0x1ff)
+ var x548 uint64
+ var x549 uint64
+ x549, x548 = bits.Mul64(x530, 0xffffffffffffffff)
+ var x550 uint64
+ var x551 uint64
+ x551, x550 = bits.Mul64(x530, 0xffffffffffffffff)
+ var x552 uint64
+ var x553 uint64
+ x553, x552 = bits.Mul64(x530, 0xffffffffffffffff)
+ var x554 uint64
+ var x555 uint64
+ x555, x554 = bits.Mul64(x530, 0xffffffffffffffff)
+ var x556 uint64
+ var x557 uint64
+ x557, x556 = bits.Mul64(x530, 0xffffffffffffffff)
+ var x558 uint64
+ var x559 uint64
+ x559, x558 = bits.Mul64(x530, 0xffffffffffffffff)
+ var x560 uint64
+ var x561 uint64
+ x561, x560 = bits.Mul64(x530, 0xffffffffffffffff)
+ var x562 uint64
+ var x563 uint64
+ x563, x562 = bits.Mul64(x530, 0xffffffffffffffff)
+ var x564 uint64
+ var x565 uint64
+ x564, x565 = bits.Add64(x563, x560, uint64(0x0))
+ var x566 uint64
+ var x567 uint64
+ x566, x567 = bits.Add64(x561, x558, uint64(p521Uint1(x565)))
+ var x568 uint64
+ var x569 uint64
+ x568, x569 = bits.Add64(x559, x556, uint64(p521Uint1(x567)))
+ var x570 uint64
+ var x571 uint64
+ x570, x571 = bits.Add64(x557, x554, uint64(p521Uint1(x569)))
+ var x572 uint64
+ var x573 uint64
+ x572, x573 = bits.Add64(x555, x552, uint64(p521Uint1(x571)))
+ var x574 uint64
+ var x575 uint64
+ x574, x575 = bits.Add64(x553, x550, uint64(p521Uint1(x573)))
+ var x576 uint64
+ var x577 uint64
+ x576, x577 = bits.Add64(x551, x548, uint64(p521Uint1(x575)))
+ var x578 uint64
+ var x579 uint64
+ x578, x579 = bits.Add64(x549, x546, uint64(p521Uint1(x577)))
+ var x581 uint64
+ _, x581 = bits.Add64(x530, x562, uint64(0x0))
+ var x582 uint64
+ var x583 uint64
+ x582, x583 = bits.Add64(x532, x564, uint64(p521Uint1(x581)))
+ var x584 uint64
+ var x585 uint64
+ x584, x585 = bits.Add64(x534, x566, uint64(p521Uint1(x583)))
+ var x586 uint64
+ var x587 uint64
+ x586, x587 = bits.Add64(x536, x568, uint64(p521Uint1(x585)))
+ var x588 uint64
+ var x589 uint64
+ x588, x589 = bits.Add64(x538, x570, uint64(p521Uint1(x587)))
+ var x590 uint64
+ var x591 uint64
+ x590, x591 = bits.Add64(x540, x572, uint64(p521Uint1(x589)))
+ var x592 uint64
+ var x593 uint64
+ x592, x593 = bits.Add64(x542, x574, uint64(p521Uint1(x591)))
+ var x594 uint64
+ var x595 uint64
+ x594, x595 = bits.Add64(x544, x576, uint64(p521Uint1(x593)))
+ var x596 uint64
+ var x597 uint64
+ x596, x597 = bits.Add64((uint64(p521Uint1(x545)) + (uint64(p521Uint1(x529)) + (uint64(p521Uint1(x511)) + x479))), x578, uint64(p521Uint1(x595)))
+ x598 := (uint64(p521Uint1(x597)) + (uint64(p521Uint1(x579)) + x547))
+ var x599 uint64
+ var x600 uint64
+ x599, x600 = bits.Sub64(x582, 0xffffffffffffffff, uint64(0x0))
+ var x601 uint64
+ var x602 uint64
+ x601, x602 = bits.Sub64(x584, 0xffffffffffffffff, uint64(p521Uint1(x600)))
+ var x603 uint64
+ var x604 uint64
+ x603, x604 = bits.Sub64(x586, 0xffffffffffffffff, uint64(p521Uint1(x602)))
+ var x605 uint64
+ var x606 uint64
+ x605, x606 = bits.Sub64(x588, 0xffffffffffffffff, uint64(p521Uint1(x604)))
+ var x607 uint64
+ var x608 uint64
+ x607, x608 = bits.Sub64(x590, 0xffffffffffffffff, uint64(p521Uint1(x606)))
+ var x609 uint64
+ var x610 uint64
+ x609, x610 = bits.Sub64(x592, 0xffffffffffffffff, uint64(p521Uint1(x608)))
+ var x611 uint64
+ var x612 uint64
+ x611, x612 = bits.Sub64(x594, 0xffffffffffffffff, uint64(p521Uint1(x610)))
+ var x613 uint64
+ var x614 uint64
+ x613, x614 = bits.Sub64(x596, 0xffffffffffffffff, uint64(p521Uint1(x612)))
+ var x615 uint64
+ var x616 uint64
+ x615, x616 = bits.Sub64(x598, 0x1ff, uint64(p521Uint1(x614)))
+ var x618 uint64
+ _, x618 = bits.Sub64(uint64(0x0), uint64(0x0), uint64(p521Uint1(x616)))
+ var x619 uint64
+ p521CmovznzU64(&x619, p521Uint1(x618), x599, x582)
+ var x620 uint64
+ p521CmovznzU64(&x620, p521Uint1(x618), x601, x584)
+ var x621 uint64
+ p521CmovznzU64(&x621, p521Uint1(x618), x603, x586)
+ var x622 uint64
+ p521CmovznzU64(&x622, p521Uint1(x618), x605, x588)
+ var x623 uint64
+ p521CmovznzU64(&x623, p521Uint1(x618), x607, x590)
+ var x624 uint64
+ p521CmovznzU64(&x624, p521Uint1(x618), x609, x592)
+ var x625 uint64
+ p521CmovznzU64(&x625, p521Uint1(x618), x611, x594)
+ var x626 uint64
+ p521CmovznzU64(&x626, p521Uint1(x618), x613, x596)
+ var x627 uint64
+ p521CmovznzU64(&x627, p521Uint1(x618), x615, x598)
+ out1[0] = x619
+ out1[1] = x620
+ out1[2] = x621
+ out1[3] = x622
+ out1[4] = x623
+ out1[5] = x624
+ out1[6] = x625
+ out1[7] = x626
+ out1[8] = x627
+}
+
+// p521ToMontgomery translates a field element into the Montgomery domain.
+//
+// Preconditions:
+//
+// 0 ≤ eval arg1 < m
+//
+// Postconditions:
+//
+// eval (from_montgomery out1) mod m = eval arg1 mod m
+// 0 ≤ eval out1 < m
+func p521ToMontgomery(out1 *p521MontgomeryDomainFieldElement, arg1 *p521NonMontgomeryDomainFieldElement) {
+ var x1 uint64
+ var x2 uint64
+ x2, x1 = bits.Mul64(arg1[0], 0x400000000000)
+ var x3 uint64
+ var x4 uint64
+ x4, x3 = bits.Mul64(arg1[1], 0x400000000000)
+ var x5 uint64
+ var x6 uint64
+ x5, x6 = bits.Add64(x2, x3, uint64(0x0))
+ var x7 uint64
+ var x8 uint64
+ x8, x7 = bits.Mul64(x1, 0x1ff)
+ var x9 uint64
+ var x10 uint64
+ x10, x9 = bits.Mul64(x1, 0xffffffffffffffff)
+ var x11 uint64
+ var x12 uint64
+ x12, x11 = bits.Mul64(x1, 0xffffffffffffffff)
+ var x13 uint64
+ var x14 uint64
+ x14, x13 = bits.Mul64(x1, 0xffffffffffffffff)
+ var x15 uint64
+ var x16 uint64
+ x16, x15 = bits.Mul64(x1, 0xffffffffffffffff)
+ var x17 uint64
+ var x18 uint64
+ x18, x17 = bits.Mul64(x1, 0xffffffffffffffff)
+ var x19 uint64
+ var x20 uint64
+ x20, x19 = bits.Mul64(x1, 0xffffffffffffffff)
+ var x21 uint64
+ var x22 uint64
+ x22, x21 = bits.Mul64(x1, 0xffffffffffffffff)
+ var x23 uint64
+ var x24 uint64
+ x24, x23 = bits.Mul64(x1, 0xffffffffffffffff)
+ var x25 uint64
+ var x26 uint64
+ x25, x26 = bits.Add64(x24, x21, uint64(0x0))
+ var x27 uint64
+ var x28 uint64
+ x27, x28 = bits.Add64(x22, x19, uint64(p521Uint1(x26)))
+ var x29 uint64
+ var x30 uint64
+ x29, x30 = bits.Add64(x20, x17, uint64(p521Uint1(x28)))
+ var x31 uint64
+ var x32 uint64
+ x31, x32 = bits.Add64(x18, x15, uint64(p521Uint1(x30)))
+ var x33 uint64
+ var x34 uint64
+ x33, x34 = bits.Add64(x16, x13, uint64(p521Uint1(x32)))
+ var x35 uint64
+ var x36 uint64
+ x35, x36 = bits.Add64(x14, x11, uint64(p521Uint1(x34)))
+ var x37 uint64
+ var x38 uint64
+ x37, x38 = bits.Add64(x12, x9, uint64(p521Uint1(x36)))
+ var x39 uint64
+ var x40 uint64
+ x39, x40 = bits.Add64(x10, x7, uint64(p521Uint1(x38)))
+ var x42 uint64
+ _, x42 = bits.Add64(x1, x23, uint64(0x0))
+ var x43 uint64
+ var x44 uint64
+ x43, x44 = bits.Add64(x5, x25, uint64(p521Uint1(x42)))
+ var x45 uint64
+ var x46 uint64
+ x45, x46 = bits.Add64((uint64(p521Uint1(x6)) + x4), x27, uint64(p521Uint1(x44)))
+ var x47 uint64
+ var x48 uint64
+ x47, x48 = bits.Add64(uint64(0x0), x29, uint64(p521Uint1(x46)))
+ var x49 uint64
+ var x50 uint64
+ x49, x50 = bits.Add64(uint64(0x0), x31, uint64(p521Uint1(x48)))
+ var x51 uint64
+ var x52 uint64
+ x51, x52 = bits.Add64(uint64(0x0), x33, uint64(p521Uint1(x50)))
+ var x53 uint64
+ var x54 uint64
+ x53, x54 = bits.Add64(uint64(0x0), x35, uint64(p521Uint1(x52)))
+ var x55 uint64
+ var x56 uint64
+ x55, x56 = bits.Add64(uint64(0x0), x37, uint64(p521Uint1(x54)))
+ var x57 uint64
+ var x58 uint64
+ x57, x58 = bits.Add64(uint64(0x0), x39, uint64(p521Uint1(x56)))
+ var x59 uint64
+ var x60 uint64
+ x60, x59 = bits.Mul64(arg1[2], 0x400000000000)
+ var x61 uint64
+ var x62 uint64
+ x61, x62 = bits.Add64(x45, x59, uint64(0x0))
+ var x63 uint64
+ var x64 uint64
+ x63, x64 = bits.Add64(x47, x60, uint64(p521Uint1(x62)))
+ var x65 uint64
+ var x66 uint64
+ x65, x66 = bits.Add64(x49, uint64(0x0), uint64(p521Uint1(x64)))
+ var x67 uint64
+ var x68 uint64
+ x67, x68 = bits.Add64(x51, uint64(0x0), uint64(p521Uint1(x66)))
+ var x69 uint64
+ var x70 uint64
+ x69, x70 = bits.Add64(x53, uint64(0x0), uint64(p521Uint1(x68)))
+ var x71 uint64
+ var x72 uint64
+ x71, x72 = bits.Add64(x55, uint64(0x0), uint64(p521Uint1(x70)))
+ var x73 uint64
+ var x74 uint64
+ x73, x74 = bits.Add64(x57, uint64(0x0), uint64(p521Uint1(x72)))
+ var x75 uint64
+ var x76 uint64
+ x76, x75 = bits.Mul64(x43, 0x1ff)
+ var x77 uint64
+ var x78 uint64
+ x78, x77 = bits.Mul64(x43, 0xffffffffffffffff)
+ var x79 uint64
+ var x80 uint64
+ x80, x79 = bits.Mul64(x43, 0xffffffffffffffff)
+ var x81 uint64
+ var x82 uint64
+ x82, x81 = bits.Mul64(x43, 0xffffffffffffffff)
+ var x83 uint64
+ var x84 uint64
+ x84, x83 = bits.Mul64(x43, 0xffffffffffffffff)
+ var x85 uint64
+ var x86 uint64
+ x86, x85 = bits.Mul64(x43, 0xffffffffffffffff)
+ var x87 uint64
+ var x88 uint64
+ x88, x87 = bits.Mul64(x43, 0xffffffffffffffff)
+ var x89 uint64
+ var x90 uint64
+ x90, x89 = bits.Mul64(x43, 0xffffffffffffffff)
+ var x91 uint64
+ var x92 uint64
+ x92, x91 = bits.Mul64(x43, 0xffffffffffffffff)
+ var x93 uint64
+ var x94 uint64
+ x93, x94 = bits.Add64(x92, x89, uint64(0x0))
+ var x95 uint64
+ var x96 uint64
+ x95, x96 = bits.Add64(x90, x87, uint64(p521Uint1(x94)))
+ var x97 uint64
+ var x98 uint64
+ x97, x98 = bits.Add64(x88, x85, uint64(p521Uint1(x96)))
+ var x99 uint64
+ var x100 uint64
+ x99, x100 = bits.Add64(x86, x83, uint64(p521Uint1(x98)))
+ var x101 uint64
+ var x102 uint64
+ x101, x102 = bits.Add64(x84, x81, uint64(p521Uint1(x100)))
+ var x103 uint64
+ var x104 uint64
+ x103, x104 = bits.Add64(x82, x79, uint64(p521Uint1(x102)))
+ var x105 uint64
+ var x106 uint64
+ x105, x106 = bits.Add64(x80, x77, uint64(p521Uint1(x104)))
+ var x107 uint64
+ var x108 uint64
+ x107, x108 = bits.Add64(x78, x75, uint64(p521Uint1(x106)))
+ var x110 uint64
+ _, x110 = bits.Add64(x43, x91, uint64(0x0))
+ var x111 uint64
+ var x112 uint64
+ x111, x112 = bits.Add64(x61, x93, uint64(p521Uint1(x110)))
+ var x113 uint64
+ var x114 uint64
+ x113, x114 = bits.Add64(x63, x95, uint64(p521Uint1(x112)))
+ var x115 uint64
+ var x116 uint64
+ x115, x116 = bits.Add64(x65, x97, uint64(p521Uint1(x114)))
+ var x117 uint64
+ var x118 uint64
+ x117, x118 = bits.Add64(x67, x99, uint64(p521Uint1(x116)))
+ var x119 uint64
+ var x120 uint64
+ x119, x120 = bits.Add64(x69, x101, uint64(p521Uint1(x118)))
+ var x121 uint64
+ var x122 uint64
+ x121, x122 = bits.Add64(x71, x103, uint64(p521Uint1(x120)))
+ var x123 uint64
+ var x124 uint64
+ x123, x124 = bits.Add64(x73, x105, uint64(p521Uint1(x122)))
+ var x125 uint64
+ var x126 uint64
+ x125, x126 = bits.Add64((uint64(p521Uint1(x74)) + (uint64(p521Uint1(x58)) + (uint64(p521Uint1(x40)) + x8))), x107, uint64(p521Uint1(x124)))
+ var x127 uint64
+ var x128 uint64
+ x128, x127 = bits.Mul64(arg1[3], 0x400000000000)
+ var x129 uint64
+ var x130 uint64
+ x129, x130 = bits.Add64(x113, x127, uint64(0x0))
+ var x131 uint64
+ var x132 uint64
+ x131, x132 = bits.Add64(x115, x128, uint64(p521Uint1(x130)))
+ var x133 uint64
+ var x134 uint64
+ x133, x134 = bits.Add64(x117, uint64(0x0), uint64(p521Uint1(x132)))
+ var x135 uint64
+ var x136 uint64
+ x135, x136 = bits.Add64(x119, uint64(0x0), uint64(p521Uint1(x134)))
+ var x137 uint64
+ var x138 uint64
+ x137, x138 = bits.Add64(x121, uint64(0x0), uint64(p521Uint1(x136)))
+ var x139 uint64
+ var x140 uint64
+ x139, x140 = bits.Add64(x123, uint64(0x0), uint64(p521Uint1(x138)))
+ var x141 uint64
+ var x142 uint64
+ x141, x142 = bits.Add64(x125, uint64(0x0), uint64(p521Uint1(x140)))
+ var x143 uint64
+ var x144 uint64
+ x144, x143 = bits.Mul64(x111, 0x1ff)
+ var x145 uint64
+ var x146 uint64
+ x146, x145 = bits.Mul64(x111, 0xffffffffffffffff)
+ var x147 uint64
+ var x148 uint64
+ x148, x147 = bits.Mul64(x111, 0xffffffffffffffff)
+ var x149 uint64
+ var x150 uint64
+ x150, x149 = bits.Mul64(x111, 0xffffffffffffffff)
+ var x151 uint64
+ var x152 uint64
+ x152, x151 = bits.Mul64(x111, 0xffffffffffffffff)
+ var x153 uint64
+ var x154 uint64
+ x154, x153 = bits.Mul64(x111, 0xffffffffffffffff)
+ var x155 uint64
+ var x156 uint64
+ x156, x155 = bits.Mul64(x111, 0xffffffffffffffff)
+ var x157 uint64
+ var x158 uint64
+ x158, x157 = bits.Mul64(x111, 0xffffffffffffffff)
+ var x159 uint64
+ var x160 uint64
+ x160, x159 = bits.Mul64(x111, 0xffffffffffffffff)
+ var x161 uint64
+ var x162 uint64
+ x161, x162 = bits.Add64(x160, x157, uint64(0x0))
+ var x163 uint64
+ var x164 uint64
+ x163, x164 = bits.Add64(x158, x155, uint64(p521Uint1(x162)))
+ var x165 uint64
+ var x166 uint64
+ x165, x166 = bits.Add64(x156, x153, uint64(p521Uint1(x164)))
+ var x167 uint64
+ var x168 uint64
+ x167, x168 = bits.Add64(x154, x151, uint64(p521Uint1(x166)))
+ var x169 uint64
+ var x170 uint64
+ x169, x170 = bits.Add64(x152, x149, uint64(p521Uint1(x168)))
+ var x171 uint64
+ var x172 uint64
+ x171, x172 = bits.Add64(x150, x147, uint64(p521Uint1(x170)))
+ var x173 uint64
+ var x174 uint64
+ x173, x174 = bits.Add64(x148, x145, uint64(p521Uint1(x172)))
+ var x175 uint64
+ var x176 uint64
+ x175, x176 = bits.Add64(x146, x143, uint64(p521Uint1(x174)))
+ var x178 uint64
+ _, x178 = bits.Add64(x111, x159, uint64(0x0))
+ var x179 uint64
+ var x180 uint64
+ x179, x180 = bits.Add64(x129, x161, uint64(p521Uint1(x178)))
+ var x181 uint64
+ var x182 uint64
+ x181, x182 = bits.Add64(x131, x163, uint64(p521Uint1(x180)))
+ var x183 uint64
+ var x184 uint64
+ x183, x184 = bits.Add64(x133, x165, uint64(p521Uint1(x182)))
+ var x185 uint64
+ var x186 uint64
+ x185, x186 = bits.Add64(x135, x167, uint64(p521Uint1(x184)))
+ var x187 uint64
+ var x188 uint64
+ x187, x188 = bits.Add64(x137, x169, uint64(p521Uint1(x186)))
+ var x189 uint64
+ var x190 uint64
+ x189, x190 = bits.Add64(x139, x171, uint64(p521Uint1(x188)))
+ var x191 uint64
+ var x192 uint64
+ x191, x192 = bits.Add64(x141, x173, uint64(p521Uint1(x190)))
+ var x193 uint64
+ var x194 uint64
+ x193, x194 = bits.Add64((uint64(p521Uint1(x142)) + (uint64(p521Uint1(x126)) + (uint64(p521Uint1(x108)) + x76))), x175, uint64(p521Uint1(x192)))
+ var x195 uint64
+ var x196 uint64
+ x196, x195 = bits.Mul64(arg1[4], 0x400000000000)
+ var x197 uint64
+ var x198 uint64
+ x197, x198 = bits.Add64(x181, x195, uint64(0x0))
+ var x199 uint64
+ var x200 uint64
+ x199, x200 = bits.Add64(x183, x196, uint64(p521Uint1(x198)))
+ var x201 uint64
+ var x202 uint64
+ x201, x202 = bits.Add64(x185, uint64(0x0), uint64(p521Uint1(x200)))
+ var x203 uint64
+ var x204 uint64
+ x203, x204 = bits.Add64(x187, uint64(0x0), uint64(p521Uint1(x202)))
+ var x205 uint64
+ var x206 uint64
+ x205, x206 = bits.Add64(x189, uint64(0x0), uint64(p521Uint1(x204)))
+ var x207 uint64
+ var x208 uint64
+ x207, x208 = bits.Add64(x191, uint64(0x0), uint64(p521Uint1(x206)))
+ var x209 uint64
+ var x210 uint64
+ x209, x210 = bits.Add64(x193, uint64(0x0), uint64(p521Uint1(x208)))
+ var x211 uint64
+ var x212 uint64
+ x212, x211 = bits.Mul64(x179, 0x1ff)
+ var x213 uint64
+ var x214 uint64
+ x214, x213 = bits.Mul64(x179, 0xffffffffffffffff)
+ var x215 uint64
+ var x216 uint64
+ x216, x215 = bits.Mul64(x179, 0xffffffffffffffff)
+ var x217 uint64
+ var x218 uint64
+ x218, x217 = bits.Mul64(x179, 0xffffffffffffffff)
+ var x219 uint64
+ var x220 uint64
+ x220, x219 = bits.Mul64(x179, 0xffffffffffffffff)
+ var x221 uint64
+ var x222 uint64
+ x222, x221 = bits.Mul64(x179, 0xffffffffffffffff)
+ var x223 uint64
+ var x224 uint64
+ x224, x223 = bits.Mul64(x179, 0xffffffffffffffff)
+ var x225 uint64
+ var x226 uint64
+ x226, x225 = bits.Mul64(x179, 0xffffffffffffffff)
+ var x227 uint64
+ var x228 uint64
+ x228, x227 = bits.Mul64(x179, 0xffffffffffffffff)
+ var x229 uint64
+ var x230 uint64
+ x229, x230 = bits.Add64(x228, x225, uint64(0x0))
+ var x231 uint64
+ var x232 uint64
+ x231, x232 = bits.Add64(x226, x223, uint64(p521Uint1(x230)))
+ var x233 uint64
+ var x234 uint64
+ x233, x234 = bits.Add64(x224, x221, uint64(p521Uint1(x232)))
+ var x235 uint64
+ var x236 uint64
+ x235, x236 = bits.Add64(x222, x219, uint64(p521Uint1(x234)))
+ var x237 uint64
+ var x238 uint64
+ x237, x238 = bits.Add64(x220, x217, uint64(p521Uint1(x236)))
+ var x239 uint64
+ var x240 uint64
+ x239, x240 = bits.Add64(x218, x215, uint64(p521Uint1(x238)))
+ var x241 uint64
+ var x242 uint64
+ x241, x242 = bits.Add64(x216, x213, uint64(p521Uint1(x240)))
+ var x243 uint64
+ var x244 uint64
+ x243, x244 = bits.Add64(x214, x211, uint64(p521Uint1(x242)))
+ var x246 uint64
+ _, x246 = bits.Add64(x179, x227, uint64(0x0))
+ var x247 uint64
+ var x248 uint64
+ x247, x248 = bits.Add64(x197, x229, uint64(p521Uint1(x246)))
+ var x249 uint64
+ var x250 uint64
+ x249, x250 = bits.Add64(x199, x231, uint64(p521Uint1(x248)))
+ var x251 uint64
+ var x252 uint64
+ x251, x252 = bits.Add64(x201, x233, uint64(p521Uint1(x250)))
+ var x253 uint64
+ var x254 uint64
+ x253, x254 = bits.Add64(x203, x235, uint64(p521Uint1(x252)))
+ var x255 uint64
+ var x256 uint64
+ x255, x256 = bits.Add64(x205, x237, uint64(p521Uint1(x254)))
+ var x257 uint64
+ var x258 uint64
+ x257, x258 = bits.Add64(x207, x239, uint64(p521Uint1(x256)))
+ var x259 uint64
+ var x260 uint64
+ x259, x260 = bits.Add64(x209, x241, uint64(p521Uint1(x258)))
+ var x261 uint64
+ var x262 uint64
+ x261, x262 = bits.Add64((uint64(p521Uint1(x210)) + (uint64(p521Uint1(x194)) + (uint64(p521Uint1(x176)) + x144))), x243, uint64(p521Uint1(x260)))
+ var x263 uint64
+ var x264 uint64
+ x264, x263 = bits.Mul64(arg1[5], 0x400000000000)
+ var x265 uint64
+ var x266 uint64
+ x265, x266 = bits.Add64(x249, x263, uint64(0x0))
+ var x267 uint64
+ var x268 uint64
+ x267, x268 = bits.Add64(x251, x264, uint64(p521Uint1(x266)))
+ var x269 uint64
+ var x270 uint64
+ x269, x270 = bits.Add64(x253, uint64(0x0), uint64(p521Uint1(x268)))
+ var x271 uint64
+ var x272 uint64
+ x271, x272 = bits.Add64(x255, uint64(0x0), uint64(p521Uint1(x270)))
+ var x273 uint64
+ var x274 uint64
+ x273, x274 = bits.Add64(x257, uint64(0x0), uint64(p521Uint1(x272)))
+ var x275 uint64
+ var x276 uint64
+ x275, x276 = bits.Add64(x259, uint64(0x0), uint64(p521Uint1(x274)))
+ var x277 uint64
+ var x278 uint64
+ x277, x278 = bits.Add64(x261, uint64(0x0), uint64(p521Uint1(x276)))
+ var x279 uint64
+ var x280 uint64
+ x280, x279 = bits.Mul64(x247, 0x1ff)
+ var x281 uint64
+ var x282 uint64
+ x282, x281 = bits.Mul64(x247, 0xffffffffffffffff)
+ var x283 uint64
+ var x284 uint64
+ x284, x283 = bits.Mul64(x247, 0xffffffffffffffff)
+ var x285 uint64
+ var x286 uint64
+ x286, x285 = bits.Mul64(x247, 0xffffffffffffffff)
+ var x287 uint64
+ var x288 uint64
+ x288, x287 = bits.Mul64(x247, 0xffffffffffffffff)
+ var x289 uint64
+ var x290 uint64
+ x290, x289 = bits.Mul64(x247, 0xffffffffffffffff)
+ var x291 uint64
+ var x292 uint64
+ x292, x291 = bits.Mul64(x247, 0xffffffffffffffff)
+ var x293 uint64
+ var x294 uint64
+ x294, x293 = bits.Mul64(x247, 0xffffffffffffffff)
+ var x295 uint64
+ var x296 uint64
+ x296, x295 = bits.Mul64(x247, 0xffffffffffffffff)
+ var x297 uint64
+ var x298 uint64
+ x297, x298 = bits.Add64(x296, x293, uint64(0x0))
+ var x299 uint64
+ var x300 uint64
+ x299, x300 = bits.Add64(x294, x291, uint64(p521Uint1(x298)))
+ var x301 uint64
+ var x302 uint64
+ x301, x302 = bits.Add64(x292, x289, uint64(p521Uint1(x300)))
+ var x303 uint64
+ var x304 uint64
+ x303, x304 = bits.Add64(x290, x287, uint64(p521Uint1(x302)))
+ var x305 uint64
+ var x306 uint64
+ x305, x306 = bits.Add64(x288, x285, uint64(p521Uint1(x304)))
+ var x307 uint64
+ var x308 uint64
+ x307, x308 = bits.Add64(x286, x283, uint64(p521Uint1(x306)))
+ var x309 uint64
+ var x310 uint64
+ x309, x310 = bits.Add64(x284, x281, uint64(p521Uint1(x308)))
+ var x311 uint64
+ var x312 uint64
+ x311, x312 = bits.Add64(x282, x279, uint64(p521Uint1(x310)))
+ var x314 uint64
+ _, x314 = bits.Add64(x247, x295, uint64(0x0))
+ var x315 uint64
+ var x316 uint64
+ x315, x316 = bits.Add64(x265, x297, uint64(p521Uint1(x314)))
+ var x317 uint64
+ var x318 uint64
+ x317, x318 = bits.Add64(x267, x299, uint64(p521Uint1(x316)))
+ var x319 uint64
+ var x320 uint64
+ x319, x320 = bits.Add64(x269, x301, uint64(p521Uint1(x318)))
+ var x321 uint64
+ var x322 uint64
+ x321, x322 = bits.Add64(x271, x303, uint64(p521Uint1(x320)))
+ var x323 uint64
+ var x324 uint64
+ x323, x324 = bits.Add64(x273, x305, uint64(p521Uint1(x322)))
+ var x325 uint64
+ var x326 uint64
+ x325, x326 = bits.Add64(x275, x307, uint64(p521Uint1(x324)))
+ var x327 uint64
+ var x328 uint64
+ x327, x328 = bits.Add64(x277, x309, uint64(p521Uint1(x326)))
+ var x329 uint64
+ var x330 uint64
+ x329, x330 = bits.Add64((uint64(p521Uint1(x278)) + (uint64(p521Uint1(x262)) + (uint64(p521Uint1(x244)) + x212))), x311, uint64(p521Uint1(x328)))
+ var x331 uint64
+ var x332 uint64
+ x332, x331 = bits.Mul64(arg1[6], 0x400000000000)
+ var x333 uint64
+ var x334 uint64
+ x333, x334 = bits.Add64(x317, x331, uint64(0x0))
+ var x335 uint64
+ var x336 uint64
+ x335, x336 = bits.Add64(x319, x332, uint64(p521Uint1(x334)))
+ var x337 uint64
+ var x338 uint64
+ x337, x338 = bits.Add64(x321, uint64(0x0), uint64(p521Uint1(x336)))
+ var x339 uint64
+ var x340 uint64
+ x339, x340 = bits.Add64(x323, uint64(0x0), uint64(p521Uint1(x338)))
+ var x341 uint64
+ var x342 uint64
+ x341, x342 = bits.Add64(x325, uint64(0x0), uint64(p521Uint1(x340)))
+ var x343 uint64
+ var x344 uint64
+ x343, x344 = bits.Add64(x327, uint64(0x0), uint64(p521Uint1(x342)))
+ var x345 uint64
+ var x346 uint64
+ x345, x346 = bits.Add64(x329, uint64(0x0), uint64(p521Uint1(x344)))
+ var x347 uint64
+ var x348 uint64
+ x348, x347 = bits.Mul64(x315, 0x1ff)
+ var x349 uint64
+ var x350 uint64
+ x350, x349 = bits.Mul64(x315, 0xffffffffffffffff)
+ var x351 uint64
+ var x352 uint64
+ x352, x351 = bits.Mul64(x315, 0xffffffffffffffff)
+ var x353 uint64
+ var x354 uint64
+ x354, x353 = bits.Mul64(x315, 0xffffffffffffffff)
+ var x355 uint64
+ var x356 uint64
+ x356, x355 = bits.Mul64(x315, 0xffffffffffffffff)
+ var x357 uint64
+ var x358 uint64
+ x358, x357 = bits.Mul64(x315, 0xffffffffffffffff)
+ var x359 uint64
+ var x360 uint64
+ x360, x359 = bits.Mul64(x315, 0xffffffffffffffff)
+ var x361 uint64
+ var x362 uint64
+ x362, x361 = bits.Mul64(x315, 0xffffffffffffffff)
+ var x363 uint64
+ var x364 uint64
+ x364, x363 = bits.Mul64(x315, 0xffffffffffffffff)
+ var x365 uint64
+ var x366 uint64
+ x365, x366 = bits.Add64(x364, x361, uint64(0x0))
+ var x367 uint64
+ var x368 uint64
+ x367, x368 = bits.Add64(x362, x359, uint64(p521Uint1(x366)))
+ var x369 uint64
+ var x370 uint64
+ x369, x370 = bits.Add64(x360, x357, uint64(p521Uint1(x368)))
+ var x371 uint64
+ var x372 uint64
+ x371, x372 = bits.Add64(x358, x355, uint64(p521Uint1(x370)))
+ var x373 uint64
+ var x374 uint64
+ x373, x374 = bits.Add64(x356, x353, uint64(p521Uint1(x372)))
+ var x375 uint64
+ var x376 uint64
+ x375, x376 = bits.Add64(x354, x351, uint64(p521Uint1(x374)))
+ var x377 uint64
+ var x378 uint64
+ x377, x378 = bits.Add64(x352, x349, uint64(p521Uint1(x376)))
+ var x379 uint64
+ var x380 uint64
+ x379, x380 = bits.Add64(x350, x347, uint64(p521Uint1(x378)))
+ var x382 uint64
+ _, x382 = bits.Add64(x315, x363, uint64(0x0))
+ var x383 uint64
+ var x384 uint64
+ x383, x384 = bits.Add64(x333, x365, uint64(p521Uint1(x382)))
+ var x385 uint64
+ var x386 uint64
+ x385, x386 = bits.Add64(x335, x367, uint64(p521Uint1(x384)))
+ var x387 uint64
+ var x388 uint64
+ x387, x388 = bits.Add64(x337, x369, uint64(p521Uint1(x386)))
+ var x389 uint64
+ var x390 uint64
+ x389, x390 = bits.Add64(x339, x371, uint64(p521Uint1(x388)))
+ var x391 uint64
+ var x392 uint64
+ x391, x392 = bits.Add64(x341, x373, uint64(p521Uint1(x390)))
+ var x393 uint64
+ var x394 uint64
+ x393, x394 = bits.Add64(x343, x375, uint64(p521Uint1(x392)))
+ var x395 uint64
+ var x396 uint64
+ x395, x396 = bits.Add64(x345, x377, uint64(p521Uint1(x394)))
+ var x397 uint64
+ var x398 uint64
+ x397, x398 = bits.Add64((uint64(p521Uint1(x346)) + (uint64(p521Uint1(x330)) + (uint64(p521Uint1(x312)) + x280))), x379, uint64(p521Uint1(x396)))
+ var x399 uint64
+ var x400 uint64
+ x400, x399 = bits.Mul64(arg1[7], 0x400000000000)
+ var x401 uint64
+ var x402 uint64
+ x401, x402 = bits.Add64(x385, x399, uint64(0x0))
+ var x403 uint64
+ var x404 uint64
+ x403, x404 = bits.Add64(x387, x400, uint64(p521Uint1(x402)))
+ var x405 uint64
+ var x406 uint64
+ x405, x406 = bits.Add64(x389, uint64(0x0), uint64(p521Uint1(x404)))
+ var x407 uint64
+ var x408 uint64
+ x407, x408 = bits.Add64(x391, uint64(0x0), uint64(p521Uint1(x406)))
+ var x409 uint64
+ var x410 uint64
+ x409, x410 = bits.Add64(x393, uint64(0x0), uint64(p521Uint1(x408)))
+ var x411 uint64
+ var x412 uint64
+ x411, x412 = bits.Add64(x395, uint64(0x0), uint64(p521Uint1(x410)))
+ var x413 uint64
+ var x414 uint64
+ x413, x414 = bits.Add64(x397, uint64(0x0), uint64(p521Uint1(x412)))
+ var x415 uint64
+ var x416 uint64
+ x416, x415 = bits.Mul64(x383, 0x1ff)
+ var x417 uint64
+ var x418 uint64
+ x418, x417 = bits.Mul64(x383, 0xffffffffffffffff)
+ var x419 uint64
+ var x420 uint64
+ x420, x419 = bits.Mul64(x383, 0xffffffffffffffff)
+ var x421 uint64
+ var x422 uint64
+ x422, x421 = bits.Mul64(x383, 0xffffffffffffffff)
+ var x423 uint64
+ var x424 uint64
+ x424, x423 = bits.Mul64(x383, 0xffffffffffffffff)
+ var x425 uint64
+ var x426 uint64
+ x426, x425 = bits.Mul64(x383, 0xffffffffffffffff)
+ var x427 uint64
+ var x428 uint64
+ x428, x427 = bits.Mul64(x383, 0xffffffffffffffff)
+ var x429 uint64
+ var x430 uint64
+ x430, x429 = bits.Mul64(x383, 0xffffffffffffffff)
+ var x431 uint64
+ var x432 uint64
+ x432, x431 = bits.Mul64(x383, 0xffffffffffffffff)
+ var x433 uint64
+ var x434 uint64
+ x433, x434 = bits.Add64(x432, x429, uint64(0x0))
+ var x435 uint64
+ var x436 uint64
+ x435, x436 = bits.Add64(x430, x427, uint64(p521Uint1(x434)))
+ var x437 uint64
+ var x438 uint64
+ x437, x438 = bits.Add64(x428, x425, uint64(p521Uint1(x436)))
+ var x439 uint64
+ var x440 uint64
+ x439, x440 = bits.Add64(x426, x423, uint64(p521Uint1(x438)))
+ var x441 uint64
+ var x442 uint64
+ x441, x442 = bits.Add64(x424, x421, uint64(p521Uint1(x440)))
+ var x443 uint64
+ var x444 uint64
+ x443, x444 = bits.Add64(x422, x419, uint64(p521Uint1(x442)))
+ var x445 uint64
+ var x446 uint64
+ x445, x446 = bits.Add64(x420, x417, uint64(p521Uint1(x444)))
+ var x447 uint64
+ var x448 uint64
+ x447, x448 = bits.Add64(x418, x415, uint64(p521Uint1(x446)))
+ var x450 uint64
+ _, x450 = bits.Add64(x383, x431, uint64(0x0))
+ var x451 uint64
+ var x452 uint64
+ x451, x452 = bits.Add64(x401, x433, uint64(p521Uint1(x450)))
+ var x453 uint64
+ var x454 uint64
+ x453, x454 = bits.Add64(x403, x435, uint64(p521Uint1(x452)))
+ var x455 uint64
+ var x456 uint64
+ x455, x456 = bits.Add64(x405, x437, uint64(p521Uint1(x454)))
+ var x457 uint64
+ var x458 uint64
+ x457, x458 = bits.Add64(x407, x439, uint64(p521Uint1(x456)))
+ var x459 uint64
+ var x460 uint64
+ x459, x460 = bits.Add64(x409, x441, uint64(p521Uint1(x458)))
+ var x461 uint64
+ var x462 uint64
+ x461, x462 = bits.Add64(x411, x443, uint64(p521Uint1(x460)))
+ var x463 uint64
+ var x464 uint64
+ x463, x464 = bits.Add64(x413, x445, uint64(p521Uint1(x462)))
+ var x465 uint64
+ var x466 uint64
+ x465, x466 = bits.Add64((uint64(p521Uint1(x414)) + (uint64(p521Uint1(x398)) + (uint64(p521Uint1(x380)) + x348))), x447, uint64(p521Uint1(x464)))
+ var x467 uint64
+ var x468 uint64
+ x468, x467 = bits.Mul64(arg1[8], 0x400000000000)
+ var x469 uint64
+ var x470 uint64
+ x469, x470 = bits.Add64(x453, x467, uint64(0x0))
+ var x471 uint64
+ var x472 uint64
+ x471, x472 = bits.Add64(x455, x468, uint64(p521Uint1(x470)))
+ var x473 uint64
+ var x474 uint64
+ x473, x474 = bits.Add64(x457, uint64(0x0), uint64(p521Uint1(x472)))
+ var x475 uint64
+ var x476 uint64
+ x475, x476 = bits.Add64(x459, uint64(0x0), uint64(p521Uint1(x474)))
+ var x477 uint64
+ var x478 uint64
+ x477, x478 = bits.Add64(x461, uint64(0x0), uint64(p521Uint1(x476)))
+ var x479 uint64
+ var x480 uint64
+ x479, x480 = bits.Add64(x463, uint64(0x0), uint64(p521Uint1(x478)))
+ var x481 uint64
+ var x482 uint64
+ x481, x482 = bits.Add64(x465, uint64(0x0), uint64(p521Uint1(x480)))
+ var x483 uint64
+ var x484 uint64
+ x484, x483 = bits.Mul64(x451, 0x1ff)
+ var x485 uint64
+ var x486 uint64
+ x486, x485 = bits.Mul64(x451, 0xffffffffffffffff)
+ var x487 uint64
+ var x488 uint64
+ x488, x487 = bits.Mul64(x451, 0xffffffffffffffff)
+ var x489 uint64
+ var x490 uint64
+ x490, x489 = bits.Mul64(x451, 0xffffffffffffffff)
+ var x491 uint64
+ var x492 uint64
+ x492, x491 = bits.Mul64(x451, 0xffffffffffffffff)
+ var x493 uint64
+ var x494 uint64
+ x494, x493 = bits.Mul64(x451, 0xffffffffffffffff)
+ var x495 uint64
+ var x496 uint64
+ x496, x495 = bits.Mul64(x451, 0xffffffffffffffff)
+ var x497 uint64
+ var x498 uint64
+ x498, x497 = bits.Mul64(x451, 0xffffffffffffffff)
+ var x499 uint64
+ var x500 uint64
+ x500, x499 = bits.Mul64(x451, 0xffffffffffffffff)
+ var x501 uint64
+ var x502 uint64
+ x501, x502 = bits.Add64(x500, x497, uint64(0x0))
+ var x503 uint64
+ var x504 uint64
+ x503, x504 = bits.Add64(x498, x495, uint64(p521Uint1(x502)))
+ var x505 uint64
+ var x506 uint64
+ x505, x506 = bits.Add64(x496, x493, uint64(p521Uint1(x504)))
+ var x507 uint64
+ var x508 uint64
+ x507, x508 = bits.Add64(x494, x491, uint64(p521Uint1(x506)))
+ var x509 uint64
+ var x510 uint64
+ x509, x510 = bits.Add64(x492, x489, uint64(p521Uint1(x508)))
+ var x511 uint64
+ var x512 uint64
+ x511, x512 = bits.Add64(x490, x487, uint64(p521Uint1(x510)))
+ var x513 uint64
+ var x514 uint64
+ x513, x514 = bits.Add64(x488, x485, uint64(p521Uint1(x512)))
+ var x515 uint64
+ var x516 uint64
+ x515, x516 = bits.Add64(x486, x483, uint64(p521Uint1(x514)))
+ var x518 uint64
+ _, x518 = bits.Add64(x451, x499, uint64(0x0))
+ var x519 uint64
+ var x520 uint64
+ x519, x520 = bits.Add64(x469, x501, uint64(p521Uint1(x518)))
+ var x521 uint64
+ var x522 uint64
+ x521, x522 = bits.Add64(x471, x503, uint64(p521Uint1(x520)))
+ var x523 uint64
+ var x524 uint64
+ x523, x524 = bits.Add64(x473, x505, uint64(p521Uint1(x522)))
+ var x525 uint64
+ var x526 uint64
+ x525, x526 = bits.Add64(x475, x507, uint64(p521Uint1(x524)))
+ var x527 uint64
+ var x528 uint64
+ x527, x528 = bits.Add64(x477, x509, uint64(p521Uint1(x526)))
+ var x529 uint64
+ var x530 uint64
+ x529, x530 = bits.Add64(x479, x511, uint64(p521Uint1(x528)))
+ var x531 uint64
+ var x532 uint64
+ x531, x532 = bits.Add64(x481, x513, uint64(p521Uint1(x530)))
+ var x533 uint64
+ var x534 uint64
+ x533, x534 = bits.Add64((uint64(p521Uint1(x482)) + (uint64(p521Uint1(x466)) + (uint64(p521Uint1(x448)) + x416))), x515, uint64(p521Uint1(x532)))
+ x535 := (uint64(p521Uint1(x534)) + (uint64(p521Uint1(x516)) + x484))
+ var x536 uint64
+ var x537 uint64
+ x536, x537 = bits.Sub64(x519, 0xffffffffffffffff, uint64(0x0))
+ var x538 uint64
+ var x539 uint64
+ x538, x539 = bits.Sub64(x521, 0xffffffffffffffff, uint64(p521Uint1(x537)))
+ var x540 uint64
+ var x541 uint64
+ x540, x541 = bits.Sub64(x523, 0xffffffffffffffff, uint64(p521Uint1(x539)))
+ var x542 uint64
+ var x543 uint64
+ x542, x543 = bits.Sub64(x525, 0xffffffffffffffff, uint64(p521Uint1(x541)))
+ var x544 uint64
+ var x545 uint64
+ x544, x545 = bits.Sub64(x527, 0xffffffffffffffff, uint64(p521Uint1(x543)))
+ var x546 uint64
+ var x547 uint64
+ x546, x547 = bits.Sub64(x529, 0xffffffffffffffff, uint64(p521Uint1(x545)))
+ var x548 uint64
+ var x549 uint64
+ x548, x549 = bits.Sub64(x531, 0xffffffffffffffff, uint64(p521Uint1(x547)))
+ var x550 uint64
+ var x551 uint64
+ x550, x551 = bits.Sub64(x533, 0xffffffffffffffff, uint64(p521Uint1(x549)))
+ var x552 uint64
+ var x553 uint64
+ x552, x553 = bits.Sub64(x535, 0x1ff, uint64(p521Uint1(x551)))
+ var x555 uint64
+ _, x555 = bits.Sub64(uint64(0x0), uint64(0x0), uint64(p521Uint1(x553)))
+ var x556 uint64
+ p521CmovznzU64(&x556, p521Uint1(x555), x536, x519)
+ var x557 uint64
+ p521CmovznzU64(&x557, p521Uint1(x555), x538, x521)
+ var x558 uint64
+ p521CmovznzU64(&x558, p521Uint1(x555), x540, x523)
+ var x559 uint64
+ p521CmovznzU64(&x559, p521Uint1(x555), x542, x525)
+ var x560 uint64
+ p521CmovznzU64(&x560, p521Uint1(x555), x544, x527)
+ var x561 uint64
+ p521CmovznzU64(&x561, p521Uint1(x555), x546, x529)
+ var x562 uint64
+ p521CmovznzU64(&x562, p521Uint1(x555), x548, x531)
+ var x563 uint64
+ p521CmovznzU64(&x563, p521Uint1(x555), x550, x533)
+ var x564 uint64
+ p521CmovznzU64(&x564, p521Uint1(x555), x552, x535)
+ out1[0] = x556
+ out1[1] = x557
+ out1[2] = x558
+ out1[3] = x559
+ out1[4] = x560
+ out1[5] = x561
+ out1[6] = x562
+ out1[7] = x563
+ out1[8] = x564
+}
+
+// p521Selectznz is a multi-limb conditional select.
+//
+// Postconditions:
+//
+// eval out1 = (if arg1 = 0 then eval arg2 else eval arg3)
+//
+// Input Bounds:
+//
+// arg1: [0x0 ~> 0x1]
+// arg2: [[0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff]]
+// arg3: [[0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff]]
+//
+// Output Bounds:
+//
+// out1: [[0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff]]
+func p521Selectznz(out1 *[9]uint64, arg1 p521Uint1, arg2 *[9]uint64, arg3 *[9]uint64) {
+ var x1 uint64
+ p521CmovznzU64(&x1, arg1, arg2[0], arg3[0])
+ var x2 uint64
+ p521CmovznzU64(&x2, arg1, arg2[1], arg3[1])
+ var x3 uint64
+ p521CmovznzU64(&x3, arg1, arg2[2], arg3[2])
+ var x4 uint64
+ p521CmovznzU64(&x4, arg1, arg2[3], arg3[3])
+ var x5 uint64
+ p521CmovznzU64(&x5, arg1, arg2[4], arg3[4])
+ var x6 uint64
+ p521CmovznzU64(&x6, arg1, arg2[5], arg3[5])
+ var x7 uint64
+ p521CmovznzU64(&x7, arg1, arg2[6], arg3[6])
+ var x8 uint64
+ p521CmovznzU64(&x8, arg1, arg2[7], arg3[7])
+ var x9 uint64
+ p521CmovznzU64(&x9, arg1, arg2[8], arg3[8])
+ out1[0] = x1
+ out1[1] = x2
+ out1[2] = x3
+ out1[3] = x4
+ out1[4] = x5
+ out1[5] = x6
+ out1[6] = x7
+ out1[7] = x8
+ out1[8] = x9
+}
+
+// p521ToBytes serializes a field element NOT in the Montgomery domain to bytes in little-endian order.
+//
+// Preconditions:
+//
+// 0 ≤ eval arg1 < m
+//
+// Postconditions:
+//
+// out1 = map (λ x, ⌊((eval arg1 mod m) mod 2^(8 * (x + 1))) / 2^(8 * x)⌋) [0..65]
+//
+// Input Bounds:
+//
+// arg1: [[0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0x1ff]]
+//
+// Output Bounds:
+//
+// out1: [[0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0x1]]
+func p521ToBytes(out1 *[66]uint8, arg1 *[9]uint64) {
+ x1 := arg1[8]
+ x2 := arg1[7]
+ x3 := arg1[6]
+ x4 := arg1[5]
+ x5 := arg1[4]
+ x6 := arg1[3]
+ x7 := arg1[2]
+ x8 := arg1[1]
+ x9 := arg1[0]
+ x10 := (uint8(x9) & 0xff)
+ x11 := (x9 >> 8)
+ x12 := (uint8(x11) & 0xff)
+ x13 := (x11 >> 8)
+ x14 := (uint8(x13) & 0xff)
+ x15 := (x13 >> 8)
+ x16 := (uint8(x15) & 0xff)
+ x17 := (x15 >> 8)
+ x18 := (uint8(x17) & 0xff)
+ x19 := (x17 >> 8)
+ x20 := (uint8(x19) & 0xff)
+ x21 := (x19 >> 8)
+ x22 := (uint8(x21) & 0xff)
+ x23 := uint8((x21 >> 8))
+ x24 := (uint8(x8) & 0xff)
+ x25 := (x8 >> 8)
+ x26 := (uint8(x25) & 0xff)
+ x27 := (x25 >> 8)
+ x28 := (uint8(x27) & 0xff)
+ x29 := (x27 >> 8)
+ x30 := (uint8(x29) & 0xff)
+ x31 := (x29 >> 8)
+ x32 := (uint8(x31) & 0xff)
+ x33 := (x31 >> 8)
+ x34 := (uint8(x33) & 0xff)
+ x35 := (x33 >> 8)
+ x36 := (uint8(x35) & 0xff)
+ x37 := uint8((x35 >> 8))
+ x38 := (uint8(x7) & 0xff)
+ x39 := (x7 >> 8)
+ x40 := (uint8(x39) & 0xff)
+ x41 := (x39 >> 8)
+ x42 := (uint8(x41) & 0xff)
+ x43 := (x41 >> 8)
+ x44 := (uint8(x43) & 0xff)
+ x45 := (x43 >> 8)
+ x46 := (uint8(x45) & 0xff)
+ x47 := (x45 >> 8)
+ x48 := (uint8(x47) & 0xff)
+ x49 := (x47 >> 8)
+ x50 := (uint8(x49) & 0xff)
+ x51 := uint8((x49 >> 8))
+ x52 := (uint8(x6) & 0xff)
+ x53 := (x6 >> 8)
+ x54 := (uint8(x53) & 0xff)
+ x55 := (x53 >> 8)
+ x56 := (uint8(x55) & 0xff)
+ x57 := (x55 >> 8)
+ x58 := (uint8(x57) & 0xff)
+ x59 := (x57 >> 8)
+ x60 := (uint8(x59) & 0xff)
+ x61 := (x59 >> 8)
+ x62 := (uint8(x61) & 0xff)
+ x63 := (x61 >> 8)
+ x64 := (uint8(x63) & 0xff)
+ x65 := uint8((x63 >> 8))
+ x66 := (uint8(x5) & 0xff)
+ x67 := (x5 >> 8)
+ x68 := (uint8(x67) & 0xff)
+ x69 := (x67 >> 8)
+ x70 := (uint8(x69) & 0xff)
+ x71 := (x69 >> 8)
+ x72 := (uint8(x71) & 0xff)
+ x73 := (x71 >> 8)
+ x74 := (uint8(x73) & 0xff)
+ x75 := (x73 >> 8)
+ x76 := (uint8(x75) & 0xff)
+ x77 := (x75 >> 8)
+ x78 := (uint8(x77) & 0xff)
+ x79 := uint8((x77 >> 8))
+ x80 := (uint8(x4) & 0xff)
+ x81 := (x4 >> 8)
+ x82 := (uint8(x81) & 0xff)
+ x83 := (x81 >> 8)
+ x84 := (uint8(x83) & 0xff)
+ x85 := (x83 >> 8)
+ x86 := (uint8(x85) & 0xff)
+ x87 := (x85 >> 8)
+ x88 := (uint8(x87) & 0xff)
+ x89 := (x87 >> 8)
+ x90 := (uint8(x89) & 0xff)
+ x91 := (x89 >> 8)
+ x92 := (uint8(x91) & 0xff)
+ x93 := uint8((x91 >> 8))
+ x94 := (uint8(x3) & 0xff)
+ x95 := (x3 >> 8)
+ x96 := (uint8(x95) & 0xff)
+ x97 := (x95 >> 8)
+ x98 := (uint8(x97) & 0xff)
+ x99 := (x97 >> 8)
+ x100 := (uint8(x99) & 0xff)
+ x101 := (x99 >> 8)
+ x102 := (uint8(x101) & 0xff)
+ x103 := (x101 >> 8)
+ x104 := (uint8(x103) & 0xff)
+ x105 := (x103 >> 8)
+ x106 := (uint8(x105) & 0xff)
+ x107 := uint8((x105 >> 8))
+ x108 := (uint8(x2) & 0xff)
+ x109 := (x2 >> 8)
+ x110 := (uint8(x109) & 0xff)
+ x111 := (x109 >> 8)
+ x112 := (uint8(x111) & 0xff)
+ x113 := (x111 >> 8)
+ x114 := (uint8(x113) & 0xff)
+ x115 := (x113 >> 8)
+ x116 := (uint8(x115) & 0xff)
+ x117 := (x115 >> 8)
+ x118 := (uint8(x117) & 0xff)
+ x119 := (x117 >> 8)
+ x120 := (uint8(x119) & 0xff)
+ x121 := uint8((x119 >> 8))
+ x122 := (uint8(x1) & 0xff)
+ x123 := p521Uint1((x1 >> 8))
+ out1[0] = x10
+ out1[1] = x12
+ out1[2] = x14
+ out1[3] = x16
+ out1[4] = x18
+ out1[5] = x20
+ out1[6] = x22
+ out1[7] = x23
+ out1[8] = x24
+ out1[9] = x26
+ out1[10] = x28
+ out1[11] = x30
+ out1[12] = x32
+ out1[13] = x34
+ out1[14] = x36
+ out1[15] = x37
+ out1[16] = x38
+ out1[17] = x40
+ out1[18] = x42
+ out1[19] = x44
+ out1[20] = x46
+ out1[21] = x48
+ out1[22] = x50
+ out1[23] = x51
+ out1[24] = x52
+ out1[25] = x54
+ out1[26] = x56
+ out1[27] = x58
+ out1[28] = x60
+ out1[29] = x62
+ out1[30] = x64
+ out1[31] = x65
+ out1[32] = x66
+ out1[33] = x68
+ out1[34] = x70
+ out1[35] = x72
+ out1[36] = x74
+ out1[37] = x76
+ out1[38] = x78
+ out1[39] = x79
+ out1[40] = x80
+ out1[41] = x82
+ out1[42] = x84
+ out1[43] = x86
+ out1[44] = x88
+ out1[45] = x90
+ out1[46] = x92
+ out1[47] = x93
+ out1[48] = x94
+ out1[49] = x96
+ out1[50] = x98
+ out1[51] = x100
+ out1[52] = x102
+ out1[53] = x104
+ out1[54] = x106
+ out1[55] = x107
+ out1[56] = x108
+ out1[57] = x110
+ out1[58] = x112
+ out1[59] = x114
+ out1[60] = x116
+ out1[61] = x118
+ out1[62] = x120
+ out1[63] = x121
+ out1[64] = x122
+ out1[65] = uint8(x123)
+}
+
+// p521FromBytes deserializes a field element NOT in the Montgomery domain from bytes in little-endian order.
+//
+// Preconditions:
+//
+// 0 ≤ bytes_eval arg1 < m
+//
+// Postconditions:
+//
+// eval out1 mod m = bytes_eval arg1 mod m
+// 0 ≤ eval out1 < m
+//
+// Input Bounds:
+//
+// arg1: [[0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0x1]]
+//
+// Output Bounds:
+//
+// out1: [[0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0x1ff]]
+func p521FromBytes(out1 *[9]uint64, arg1 *[66]uint8) {
+ x1 := (uint64(p521Uint1(arg1[65])) << 8)
+ x2 := arg1[64]
+ x3 := (uint64(arg1[63]) << 56)
+ x4 := (uint64(arg1[62]) << 48)
+ x5 := (uint64(arg1[61]) << 40)
+ x6 := (uint64(arg1[60]) << 32)
+ x7 := (uint64(arg1[59]) << 24)
+ x8 := (uint64(arg1[58]) << 16)
+ x9 := (uint64(arg1[57]) << 8)
+ x10 := arg1[56]
+ x11 := (uint64(arg1[55]) << 56)
+ x12 := (uint64(arg1[54]) << 48)
+ x13 := (uint64(arg1[53]) << 40)
+ x14 := (uint64(arg1[52]) << 32)
+ x15 := (uint64(arg1[51]) << 24)
+ x16 := (uint64(arg1[50]) << 16)
+ x17 := (uint64(arg1[49]) << 8)
+ x18 := arg1[48]
+ x19 := (uint64(arg1[47]) << 56)
+ x20 := (uint64(arg1[46]) << 48)
+ x21 := (uint64(arg1[45]) << 40)
+ x22 := (uint64(arg1[44]) << 32)
+ x23 := (uint64(arg1[43]) << 24)
+ x24 := (uint64(arg1[42]) << 16)
+ x25 := (uint64(arg1[41]) << 8)
+ x26 := arg1[40]
+ x27 := (uint64(arg1[39]) << 56)
+ x28 := (uint64(arg1[38]) << 48)
+ x29 := (uint64(arg1[37]) << 40)
+ x30 := (uint64(arg1[36]) << 32)
+ x31 := (uint64(arg1[35]) << 24)
+ x32 := (uint64(arg1[34]) << 16)
+ x33 := (uint64(arg1[33]) << 8)
+ x34 := arg1[32]
+ x35 := (uint64(arg1[31]) << 56)
+ x36 := (uint64(arg1[30]) << 48)
+ x37 := (uint64(arg1[29]) << 40)
+ x38 := (uint64(arg1[28]) << 32)
+ x39 := (uint64(arg1[27]) << 24)
+ x40 := (uint64(arg1[26]) << 16)
+ x41 := (uint64(arg1[25]) << 8)
+ x42 := arg1[24]
+ x43 := (uint64(arg1[23]) << 56)
+ x44 := (uint64(arg1[22]) << 48)
+ x45 := (uint64(arg1[21]) << 40)
+ x46 := (uint64(arg1[20]) << 32)
+ x47 := (uint64(arg1[19]) << 24)
+ x48 := (uint64(arg1[18]) << 16)
+ x49 := (uint64(arg1[17]) << 8)
+ x50 := arg1[16]
+ x51 := (uint64(arg1[15]) << 56)
+ x52 := (uint64(arg1[14]) << 48)
+ x53 := (uint64(arg1[13]) << 40)
+ x54 := (uint64(arg1[12]) << 32)
+ x55 := (uint64(arg1[11]) << 24)
+ x56 := (uint64(arg1[10]) << 16)
+ x57 := (uint64(arg1[9]) << 8)
+ x58 := arg1[8]
+ x59 := (uint64(arg1[7]) << 56)
+ x60 := (uint64(arg1[6]) << 48)
+ x61 := (uint64(arg1[5]) << 40)
+ x62 := (uint64(arg1[4]) << 32)
+ x63 := (uint64(arg1[3]) << 24)
+ x64 := (uint64(arg1[2]) << 16)
+ x65 := (uint64(arg1[1]) << 8)
+ x66 := arg1[0]
+ x67 := (x65 + uint64(x66))
+ x68 := (x64 + x67)
+ x69 := (x63 + x68)
+ x70 := (x62 + x69)
+ x71 := (x61 + x70)
+ x72 := (x60 + x71)
+ x73 := (x59 + x72)
+ x74 := (x57 + uint64(x58))
+ x75 := (x56 + x74)
+ x76 := (x55 + x75)
+ x77 := (x54 + x76)
+ x78 := (x53 + x77)
+ x79 := (x52 + x78)
+ x80 := (x51 + x79)
+ x81 := (x49 + uint64(x50))
+ x82 := (x48 + x81)
+ x83 := (x47 + x82)
+ x84 := (x46 + x83)
+ x85 := (x45 + x84)
+ x86 := (x44 + x85)
+ x87 := (x43 + x86)
+ x88 := (x41 + uint64(x42))
+ x89 := (x40 + x88)
+ x90 := (x39 + x89)
+ x91 := (x38 + x90)
+ x92 := (x37 + x91)
+ x93 := (x36 + x92)
+ x94 := (x35 + x93)
+ x95 := (x33 + uint64(x34))
+ x96 := (x32 + x95)
+ x97 := (x31 + x96)
+ x98 := (x30 + x97)
+ x99 := (x29 + x98)
+ x100 := (x28 + x99)
+ x101 := (x27 + x100)
+ x102 := (x25 + uint64(x26))
+ x103 := (x24 + x102)
+ x104 := (x23 + x103)
+ x105 := (x22 + x104)
+ x106 := (x21 + x105)
+ x107 := (x20 + x106)
+ x108 := (x19 + x107)
+ x109 := (x17 + uint64(x18))
+ x110 := (x16 + x109)
+ x111 := (x15 + x110)
+ x112 := (x14 + x111)
+ x113 := (x13 + x112)
+ x114 := (x12 + x113)
+ x115 := (x11 + x114)
+ x116 := (x9 + uint64(x10))
+ x117 := (x8 + x116)
+ x118 := (x7 + x117)
+ x119 := (x6 + x118)
+ x120 := (x5 + x119)
+ x121 := (x4 + x120)
+ x122 := (x3 + x121)
+ x123 := (x1 + uint64(x2))
+ out1[0] = x73
+ out1[1] = x80
+ out1[2] = x87
+ out1[3] = x94
+ out1[4] = x101
+ out1[5] = x108
+ out1[6] = x115
+ out1[7] = x122
+ out1[8] = x123
+}
diff --git a/src/crypto/internal/nistec/fiat/p521_invert.go b/src/crypto/internal/nistec/fiat/p521_invert.go
new file mode 100644
index 0000000..16c53e1
--- /dev/null
+++ b/src/crypto/internal/nistec/fiat/p521_invert.go
@@ -0,0 +1,89 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Code generated by addchain. DO NOT EDIT.
+
+package fiat
+
+// Invert sets e = 1/x, and returns e.
+//
+// If x == 0, Invert returns e = 0.
+func (e *P521Element) Invert(x *P521Element) *P521Element {
+ // Inversion is implemented as exponentiation with exponent p − 2.
+ // The sequence of 13 multiplications and 520 squarings is derived from the
+ // following addition chain generated with github.com/mmcloughlin/addchain v0.4.0.
+ //
+ // _10 = 2*1
+ // _11 = 1 + _10
+ // _1100 = _11 << 2
+ // _1111 = _11 + _1100
+ // _11110000 = _1111 << 4
+ // _11111111 = _1111 + _11110000
+ // x16 = _11111111 << 8 + _11111111
+ // x32 = x16 << 16 + x16
+ // x64 = x32 << 32 + x32
+ // x65 = 2*x64 + 1
+ // x129 = x65 << 64 + x64
+ // x130 = 2*x129 + 1
+ // x259 = x130 << 129 + x129
+ // x260 = 2*x259 + 1
+ // x519 = x260 << 259 + x259
+ // return x519 << 2 + 1
+ //
+
+ var z = new(P521Element).Set(e)
+ var t0 = new(P521Element)
+
+ z.Square(x)
+ z.Mul(x, z)
+ t0.Square(z)
+ for s := 1; s < 2; s++ {
+ t0.Square(t0)
+ }
+ z.Mul(z, t0)
+ t0.Square(z)
+ for s := 1; s < 4; s++ {
+ t0.Square(t0)
+ }
+ z.Mul(z, t0)
+ t0.Square(z)
+ for s := 1; s < 8; s++ {
+ t0.Square(t0)
+ }
+ z.Mul(z, t0)
+ t0.Square(z)
+ for s := 1; s < 16; s++ {
+ t0.Square(t0)
+ }
+ z.Mul(z, t0)
+ t0.Square(z)
+ for s := 1; s < 32; s++ {
+ t0.Square(t0)
+ }
+ z.Mul(z, t0)
+ t0.Square(z)
+ t0.Mul(x, t0)
+ for s := 0; s < 64; s++ {
+ t0.Square(t0)
+ }
+ z.Mul(z, t0)
+ t0.Square(z)
+ t0.Mul(x, t0)
+ for s := 0; s < 129; s++ {
+ t0.Square(t0)
+ }
+ z.Mul(z, t0)
+ t0.Square(z)
+ t0.Mul(x, t0)
+ for s := 0; s < 259; s++ {
+ t0.Square(t0)
+ }
+ z.Mul(z, t0)
+ for s := 0; s < 2; s++ {
+ z.Square(z)
+ }
+ z.Mul(x, z)
+
+ return e.Set(z)
+}
diff --git a/src/crypto/internal/nistec/generate.go b/src/crypto/internal/nistec/generate.go
new file mode 100644
index 0000000..0204bc1
--- /dev/null
+++ b/src/crypto/internal/nistec/generate.go
@@ -0,0 +1,639 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build ignore
+
+package main
+
+// Running this generator requires addchain v0.4.0, which can be installed with
+//
+// go install github.com/mmcloughlin/addchain/cmd/addchain@v0.4.0
+//
+
+import (
+ "bytes"
+ "crypto/elliptic"
+ "fmt"
+ "go/format"
+ "io"
+ "log"
+ "math/big"
+ "os"
+ "os/exec"
+ "strings"
+ "text/template"
+)
+
+var curves = []struct {
+ P string
+ Element string
+ Params *elliptic.CurveParams
+ BuildTags string
+}{
+ {
+ P: "P224",
+ Element: "fiat.P224Element",
+ Params: elliptic.P224().Params(),
+ },
+ {
+ P: "P256",
+ Element: "fiat.P256Element",
+ Params: elliptic.P256().Params(),
+ BuildTags: "!amd64 && !arm64 && !ppc64le",
+ },
+ {
+ P: "P384",
+ Element: "fiat.P384Element",
+ Params: elliptic.P384().Params(),
+ },
+ {
+ P: "P521",
+ Element: "fiat.P521Element",
+ Params: elliptic.P521().Params(),
+ },
+}
+
+func main() {
+ t := template.Must(template.New("tmplNISTEC").Parse(tmplNISTEC))
+
+ tmplAddchainFile, err := os.CreateTemp("", "addchain-template")
+ if err != nil {
+ log.Fatal(err)
+ }
+ defer os.Remove(tmplAddchainFile.Name())
+ if _, err := io.WriteString(tmplAddchainFile, tmplAddchain); err != nil {
+ log.Fatal(err)
+ }
+ if err := tmplAddchainFile.Close(); err != nil {
+ log.Fatal(err)
+ }
+
+ for _, c := range curves {
+ p := strings.ToLower(c.P)
+ elementLen := (c.Params.BitSize + 7) / 8
+ B := fmt.Sprintf("%#v", c.Params.B.FillBytes(make([]byte, elementLen)))
+ Gx := fmt.Sprintf("%#v", c.Params.Gx.FillBytes(make([]byte, elementLen)))
+ Gy := fmt.Sprintf("%#v", c.Params.Gy.FillBytes(make([]byte, elementLen)))
+
+ log.Printf("Generating %s.go...", p)
+ f, err := os.Create(p + ".go")
+ if err != nil {
+ log.Fatal(err)
+ }
+ defer f.Close()
+ buf := &bytes.Buffer{}
+ if err := t.Execute(buf, map[string]interface{}{
+ "P": c.P, "p": p, "B": B, "Gx": Gx, "Gy": Gy,
+ "Element": c.Element, "ElementLen": elementLen,
+ "BuildTags": c.BuildTags,
+ }); err != nil {
+ log.Fatal(err)
+ }
+ out, err := format.Source(buf.Bytes())
+ if err != nil {
+ log.Fatal(err)
+ }
+ if _, err := f.Write(out); err != nil {
+ log.Fatal(err)
+ }
+
+ // If p = 3 mod 4, implement modular square root by exponentiation.
+ mod4 := new(big.Int).Mod(c.Params.P, big.NewInt(4))
+ if mod4.Cmp(big.NewInt(3)) != 0 {
+ continue
+ }
+
+ exp := new(big.Int).Add(c.Params.P, big.NewInt(1))
+ exp.Div(exp, big.NewInt(4))
+
+ tmp, err := os.CreateTemp("", "addchain-"+p)
+ if err != nil {
+ log.Fatal(err)
+ }
+ defer os.Remove(tmp.Name())
+ cmd := exec.Command("addchain", "search", fmt.Sprintf("%d", exp))
+ cmd.Stderr = os.Stderr
+ cmd.Stdout = tmp
+ if err := cmd.Run(); err != nil {
+ log.Fatal(err)
+ }
+ if err := tmp.Close(); err != nil {
+ log.Fatal(err)
+ }
+ cmd = exec.Command("addchain", "gen", "-tmpl", tmplAddchainFile.Name(), tmp.Name())
+ cmd.Stderr = os.Stderr
+ out, err = cmd.Output()
+ if err != nil {
+ log.Fatal(err)
+ }
+ out = bytes.Replace(out, []byte("Element"), []byte(c.Element), -1)
+ out = bytes.Replace(out, []byte("sqrtCandidate"), []byte(p+"SqrtCandidate"), -1)
+ out, err = format.Source(out)
+ if err != nil {
+ log.Fatal(err)
+ }
+ if _, err := f.Write(out); err != nil {
+ log.Fatal(err)
+ }
+ }
+}
+
+const tmplNISTEC = `// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Code generated by generate.go. DO NOT EDIT.
+
+{{ if .BuildTags }}
+//go:build {{ .BuildTags }}
+{{ end }}
+
+package nistec
+
+import (
+ "crypto/internal/nistec/fiat"
+ "crypto/subtle"
+ "errors"
+ "sync"
+)
+
+// {{.p}}ElementLength is the length of an element of the base or scalar field,
+// which have the same bytes length for all NIST P curves.
+const {{.p}}ElementLength = {{ .ElementLen }}
+
+// {{.P}}Point is a {{.P}} point. The zero value is NOT valid.
+type {{.P}}Point struct {
+ // The point is represented in projective coordinates (X:Y:Z),
+ // where x = X/Z and y = Y/Z.
+ x, y, z *{{.Element}}
+}
+
+// New{{.P}}Point returns a new {{.P}}Point representing the point at infinity point.
+func New{{.P}}Point() *{{.P}}Point {
+ return &{{.P}}Point{
+ x: new({{.Element}}),
+ y: new({{.Element}}).One(),
+ z: new({{.Element}}),
+ }
+}
+
+// SetGenerator sets p to the canonical generator and returns p.
+func (p *{{.P}}Point) SetGenerator() *{{.P}}Point {
+ p.x.SetBytes({{.Gx}})
+ p.y.SetBytes({{.Gy}})
+ p.z.One()
+ return p
+}
+
+// Set sets p = q and returns p.
+func (p *{{.P}}Point) Set(q *{{.P}}Point) *{{.P}}Point {
+ p.x.Set(q.x)
+ p.y.Set(q.y)
+ p.z.Set(q.z)
+ return p
+}
+
+// SetBytes sets p to the compressed, uncompressed, or infinity value encoded in
+// b, as specified in SEC 1, Version 2.0, Section 2.3.4. If the point is not on
+// the curve, it returns nil and an error, and the receiver is unchanged.
+// Otherwise, it returns p.
+func (p *{{.P}}Point) SetBytes(b []byte) (*{{.P}}Point, error) {
+ switch {
+ // Point at infinity.
+ case len(b) == 1 && b[0] == 0:
+ return p.Set(New{{.P}}Point()), nil
+
+ // Uncompressed form.
+ case len(b) == 1+2*{{.p}}ElementLength && b[0] == 4:
+ x, err := new({{.Element}}).SetBytes(b[1 : 1+{{.p}}ElementLength])
+ if err != nil {
+ return nil, err
+ }
+ y, err := new({{.Element}}).SetBytes(b[1+{{.p}}ElementLength:])
+ if err != nil {
+ return nil, err
+ }
+ if err := {{.p}}CheckOnCurve(x, y); err != nil {
+ return nil, err
+ }
+ p.x.Set(x)
+ p.y.Set(y)
+ p.z.One()
+ return p, nil
+
+ // Compressed form.
+ case len(b) == 1+{{.p}}ElementLength && (b[0] == 2 || b[0] == 3):
+ x, err := new({{.Element}}).SetBytes(b[1:])
+ if err != nil {
+ return nil, err
+ }
+
+ // y² = x³ - 3x + b
+ y := {{.p}}Polynomial(new({{.Element}}), x)
+ if !{{.p}}Sqrt(y, y) {
+ return nil, errors.New("invalid {{.P}} compressed point encoding")
+ }
+
+ // Select the positive or negative root, as indicated by the least
+ // significant bit, based on the encoding type byte.
+ otherRoot := new({{.Element}})
+ otherRoot.Sub(otherRoot, y)
+ cond := y.Bytes()[{{.p}}ElementLength-1]&1 ^ b[0]&1
+ y.Select(otherRoot, y, int(cond))
+
+ p.x.Set(x)
+ p.y.Set(y)
+ p.z.One()
+ return p, nil
+
+ default:
+ return nil, errors.New("invalid {{.P}} point encoding")
+ }
+}
+
+
+var _{{.p}}B *{{.Element}}
+var _{{.p}}BOnce sync.Once
+
+func {{.p}}B() *{{.Element}} {
+ _{{.p}}BOnce.Do(func() {
+ _{{.p}}B, _ = new({{.Element}}).SetBytes({{.B}})
+ })
+ return _{{.p}}B
+}
+
+// {{.p}}Polynomial sets y2 to x³ - 3x + b, and returns y2.
+func {{.p}}Polynomial(y2, x *{{.Element}}) *{{.Element}} {
+ y2.Square(x)
+ y2.Mul(y2, x)
+
+ threeX := new({{.Element}}).Add(x, x)
+ threeX.Add(threeX, x)
+ y2.Sub(y2, threeX)
+
+ return y2.Add(y2, {{.p}}B())
+}
+
+func {{.p}}CheckOnCurve(x, y *{{.Element}}) error {
+ // y² = x³ - 3x + b
+ rhs := {{.p}}Polynomial(new({{.Element}}), x)
+ lhs := new({{.Element}}).Square(y)
+ if rhs.Equal(lhs) != 1 {
+ return errors.New("{{.P}} point not on curve")
+ }
+ return nil
+}
+
+// Bytes returns the uncompressed or infinity encoding of p, as specified in
+// SEC 1, Version 2.0, Section 2.3.3. Note that the encoding of the point at
+// infinity is shorter than all other encodings.
+func (p *{{.P}}Point) Bytes() []byte {
+ // This function is outlined to make the allocations inline in the caller
+ // rather than happen on the heap.
+ var out [1+2*{{.p}}ElementLength]byte
+ return p.bytes(&out)
+}
+
+func (p *{{.P}}Point) bytes(out *[1+2*{{.p}}ElementLength]byte) []byte {
+ if p.z.IsZero() == 1 {
+ return append(out[:0], 0)
+ }
+
+ zinv := new({{.Element}}).Invert(p.z)
+ x := new({{.Element}}).Mul(p.x, zinv)
+ y := new({{.Element}}).Mul(p.y, zinv)
+
+ buf := append(out[:0], 4)
+ buf = append(buf, x.Bytes()...)
+ buf = append(buf, y.Bytes()...)
+ return buf
+}
+
+// BytesX returns the encoding of the x-coordinate of p, as specified in SEC 1,
+// Version 2.0, Section 2.3.5, or an error if p is the point at infinity.
+func (p *{{.P}}Point) BytesX() ([]byte, error) {
+ // This function is outlined to make the allocations inline in the caller
+ // rather than happen on the heap.
+ var out [{{.p}}ElementLength]byte
+ return p.bytesX(&out)
+}
+
+func (p *{{.P}}Point) bytesX(out *[{{.p}}ElementLength]byte) ([]byte, error) {
+ if p.z.IsZero() == 1 {
+ return nil, errors.New("{{.P}} point is the point at infinity")
+ }
+
+ zinv := new({{.Element}}).Invert(p.z)
+ x := new({{.Element}}).Mul(p.x, zinv)
+
+ return append(out[:0], x.Bytes()...), nil
+}
+
+// BytesCompressed returns the compressed or infinity encoding of p, as
+// specified in SEC 1, Version 2.0, Section 2.3.3. Note that the encoding of the
+// point at infinity is shorter than all other encodings.
+func (p *{{.P}}Point) BytesCompressed() []byte {
+ // This function is outlined to make the allocations inline in the caller
+ // rather than happen on the heap.
+ var out [1 + {{.p}}ElementLength]byte
+ return p.bytesCompressed(&out)
+}
+
+func (p *{{.P}}Point) bytesCompressed(out *[1 + {{.p}}ElementLength]byte) []byte {
+ if p.z.IsZero() == 1 {
+ return append(out[:0], 0)
+ }
+
+ zinv := new({{.Element}}).Invert(p.z)
+ x := new({{.Element}}).Mul(p.x, zinv)
+ y := new({{.Element}}).Mul(p.y, zinv)
+
+ // Encode the sign of the y coordinate (indicated by the least significant
+ // bit) as the encoding type (2 or 3).
+ buf := append(out[:0], 2)
+ buf[0] |= y.Bytes()[{{.p}}ElementLength-1] & 1
+ buf = append(buf, x.Bytes()...)
+ return buf
+}
+
+// Add sets q = p1 + p2, and returns q. The points may overlap.
+func (q *{{.P}}Point) Add(p1, p2 *{{.P}}Point) *{{.P}}Point {
+ // Complete addition formula for a = -3 from "Complete addition formulas for
+ // prime order elliptic curves" (https://eprint.iacr.org/2015/1060), §A.2.
+
+ t0 := new({{.Element}}).Mul(p1.x, p2.x) // t0 := X1 * X2
+ t1 := new({{.Element}}).Mul(p1.y, p2.y) // t1 := Y1 * Y2
+ t2 := new({{.Element}}).Mul(p1.z, p2.z) // t2 := Z1 * Z2
+ t3 := new({{.Element}}).Add(p1.x, p1.y) // t3 := X1 + Y1
+ t4 := new({{.Element}}).Add(p2.x, p2.y) // t4 := X2 + Y2
+ t3.Mul(t3, t4) // t3 := t3 * t4
+ t4.Add(t0, t1) // t4 := t0 + t1
+ t3.Sub(t3, t4) // t3 := t3 - t4
+ t4.Add(p1.y, p1.z) // t4 := Y1 + Z1
+ x3 := new({{.Element}}).Add(p2.y, p2.z) // X3 := Y2 + Z2
+ t4.Mul(t4, x3) // t4 := t4 * X3
+ x3.Add(t1, t2) // X3 := t1 + t2
+ t4.Sub(t4, x3) // t4 := t4 - X3
+ x3.Add(p1.x, p1.z) // X3 := X1 + Z1
+ y3 := new({{.Element}}).Add(p2.x, p2.z) // Y3 := X2 + Z2
+ x3.Mul(x3, y3) // X3 := X3 * Y3
+ y3.Add(t0, t2) // Y3 := t0 + t2
+ y3.Sub(x3, y3) // Y3 := X3 - Y3
+ z3 := new({{.Element}}).Mul({{.p}}B(), t2) // Z3 := b * t2
+ x3.Sub(y3, z3) // X3 := Y3 - Z3
+ z3.Add(x3, x3) // Z3 := X3 + X3
+ x3.Add(x3, z3) // X3 := X3 + Z3
+ z3.Sub(t1, x3) // Z3 := t1 - X3
+ x3.Add(t1, x3) // X3 := t1 + X3
+ y3.Mul({{.p}}B(), y3) // Y3 := b * Y3
+ t1.Add(t2, t2) // t1 := t2 + t2
+ t2.Add(t1, t2) // t2 := t1 + t2
+ y3.Sub(y3, t2) // Y3 := Y3 - t2
+ y3.Sub(y3, t0) // Y3 := Y3 - t0
+ t1.Add(y3, y3) // t1 := Y3 + Y3
+ y3.Add(t1, y3) // Y3 := t1 + Y3
+ t1.Add(t0, t0) // t1 := t0 + t0
+ t0.Add(t1, t0) // t0 := t1 + t0
+ t0.Sub(t0, t2) // t0 := t0 - t2
+ t1.Mul(t4, y3) // t1 := t4 * Y3
+ t2.Mul(t0, y3) // t2 := t0 * Y3
+ y3.Mul(x3, z3) // Y3 := X3 * Z3
+ y3.Add(y3, t2) // Y3 := Y3 + t2
+ x3.Mul(t3, x3) // X3 := t3 * X3
+ x3.Sub(x3, t1) // X3 := X3 - t1
+ z3.Mul(t4, z3) // Z3 := t4 * Z3
+ t1.Mul(t3, t0) // t1 := t3 * t0
+ z3.Add(z3, t1) // Z3 := Z3 + t1
+
+ q.x.Set(x3)
+ q.y.Set(y3)
+ q.z.Set(z3)
+ return q
+}
+
+// Double sets q = p + p, and returns q. The points may overlap.
+func (q *{{.P}}Point) Double(p *{{.P}}Point) *{{.P}}Point {
+ // Complete addition formula for a = -3 from "Complete addition formulas for
+ // prime order elliptic curves" (https://eprint.iacr.org/2015/1060), §A.2.
+
+ t0 := new({{.Element}}).Square(p.x) // t0 := X ^ 2
+ t1 := new({{.Element}}).Square(p.y) // t1 := Y ^ 2
+ t2 := new({{.Element}}).Square(p.z) // t2 := Z ^ 2
+ t3 := new({{.Element}}).Mul(p.x, p.y) // t3 := X * Y
+ t3.Add(t3, t3) // t3 := t3 + t3
+ z3 := new({{.Element}}).Mul(p.x, p.z) // Z3 := X * Z
+ z3.Add(z3, z3) // Z3 := Z3 + Z3
+ y3 := new({{.Element}}).Mul({{.p}}B(), t2) // Y3 := b * t2
+ y3.Sub(y3, z3) // Y3 := Y3 - Z3
+ x3 := new({{.Element}}).Add(y3, y3) // X3 := Y3 + Y3
+ y3.Add(x3, y3) // Y3 := X3 + Y3
+ x3.Sub(t1, y3) // X3 := t1 - Y3
+ y3.Add(t1, y3) // Y3 := t1 + Y3
+ y3.Mul(x3, y3) // Y3 := X3 * Y3
+ x3.Mul(x3, t3) // X3 := X3 * t3
+ t3.Add(t2, t2) // t3 := t2 + t2
+ t2.Add(t2, t3) // t2 := t2 + t3
+ z3.Mul({{.p}}B(), z3) // Z3 := b * Z3
+ z3.Sub(z3, t2) // Z3 := Z3 - t2
+ z3.Sub(z3, t0) // Z3 := Z3 - t0
+ t3.Add(z3, z3) // t3 := Z3 + Z3
+ z3.Add(z3, t3) // Z3 := Z3 + t3
+ t3.Add(t0, t0) // t3 := t0 + t0
+ t0.Add(t3, t0) // t0 := t3 + t0
+ t0.Sub(t0, t2) // t0 := t0 - t2
+ t0.Mul(t0, z3) // t0 := t0 * Z3
+ y3.Add(y3, t0) // Y3 := Y3 + t0
+ t0.Mul(p.y, p.z) // t0 := Y * Z
+ t0.Add(t0, t0) // t0 := t0 + t0
+ z3.Mul(t0, z3) // Z3 := t0 * Z3
+ x3.Sub(x3, z3) // X3 := X3 - Z3
+ z3.Mul(t0, t1) // Z3 := t0 * t1
+ z3.Add(z3, z3) // Z3 := Z3 + Z3
+ z3.Add(z3, z3) // Z3 := Z3 + Z3
+
+ q.x.Set(x3)
+ q.y.Set(y3)
+ q.z.Set(z3)
+ return q
+}
+
+// Select sets q to p1 if cond == 1, and to p2 if cond == 0.
+func (q *{{.P}}Point) Select(p1, p2 *{{.P}}Point, cond int) *{{.P}}Point {
+ q.x.Select(p1.x, p2.x, cond)
+ q.y.Select(p1.y, p2.y, cond)
+ q.z.Select(p1.z, p2.z, cond)
+ return q
+}
+
+// A {{.p}}Table holds the first 15 multiples of a point at offset -1, so [1]P
+// is at table[0], [15]P is at table[14], and [0]P is implicitly the identity
+// point.
+type {{.p}}Table [15]*{{.P}}Point
+
+// Select selects the n-th multiple of the table base point into p. It works in
+// constant time by iterating over every entry of the table. n must be in [0, 15].
+func (table *{{.p}}Table) Select(p *{{.P}}Point, n uint8) {
+ if n >= 16 {
+ panic("nistec: internal error: {{.p}}Table called with out-of-bounds value")
+ }
+ p.Set(New{{.P}}Point())
+ for i := uint8(1); i < 16; i++ {
+ cond := subtle.ConstantTimeByteEq(i, n)
+ p.Select(table[i-1], p, cond)
+ }
+}
+
+// ScalarMult sets p = scalar * q, and returns p.
+func (p *{{.P}}Point) ScalarMult(q *{{.P}}Point, scalar []byte) (*{{.P}}Point, error) {
+ // Compute a {{.p}}Table for the base point q. The explicit New{{.P}}Point
+ // calls get inlined, letting the allocations live on the stack.
+ var table = {{.p}}Table{New{{.P}}Point(), New{{.P}}Point(), New{{.P}}Point(),
+ New{{.P}}Point(), New{{.P}}Point(), New{{.P}}Point(), New{{.P}}Point(),
+ New{{.P}}Point(), New{{.P}}Point(), New{{.P}}Point(), New{{.P}}Point(),
+ New{{.P}}Point(), New{{.P}}Point(), New{{.P}}Point(), New{{.P}}Point()}
+ table[0].Set(q)
+ for i := 1; i < 15; i += 2 {
+ table[i].Double(table[i/2])
+ table[i+1].Add(table[i], q)
+ }
+
+ // Instead of doing the classic double-and-add chain, we do it with a
+ // four-bit window: we double four times, and then add [0-15]P.
+ t := New{{.P}}Point()
+ p.Set(New{{.P}}Point())
+ for i, byte := range scalar {
+ // No need to double on the first iteration, as p is the identity at
+ // this point, and [N]∞ = ∞.
+ if i != 0 {
+ p.Double(p)
+ p.Double(p)
+ p.Double(p)
+ p.Double(p)
+ }
+
+ windowValue := byte >> 4
+ table.Select(t, windowValue)
+ p.Add(p, t)
+
+ p.Double(p)
+ p.Double(p)
+ p.Double(p)
+ p.Double(p)
+
+ windowValue = byte & 0b1111
+ table.Select(t, windowValue)
+ p.Add(p, t)
+ }
+
+ return p, nil
+}
+
+var {{.p}}GeneratorTable *[{{.p}}ElementLength * 2]{{.p}}Table
+var {{.p}}GeneratorTableOnce sync.Once
+
+// generatorTable returns a sequence of {{.p}}Tables. The first table contains
+// multiples of G. Each successive table is the previous table doubled four
+// times.
+func (p *{{.P}}Point) generatorTable() *[{{.p}}ElementLength * 2]{{.p}}Table {
+ {{.p}}GeneratorTableOnce.Do(func() {
+ {{.p}}GeneratorTable = new([{{.p}}ElementLength * 2]{{.p}}Table)
+ base := New{{.P}}Point().SetGenerator()
+ for i := 0; i < {{.p}}ElementLength*2; i++ {
+ {{.p}}GeneratorTable[i][0] = New{{.P}}Point().Set(base)
+ for j := 1; j < 15; j++ {
+ {{.p}}GeneratorTable[i][j] = New{{.P}}Point().Add({{.p}}GeneratorTable[i][j-1], base)
+ }
+ base.Double(base)
+ base.Double(base)
+ base.Double(base)
+ base.Double(base)
+ }
+ })
+ return {{.p}}GeneratorTable
+}
+
+// ScalarBaseMult sets p = scalar * B, where B is the canonical generator, and
+// returns p.
+func (p *{{.P}}Point) ScalarBaseMult(scalar []byte) (*{{.P}}Point, error) {
+ if len(scalar) != {{.p}}ElementLength {
+ return nil, errors.New("invalid scalar length")
+ }
+ tables := p.generatorTable()
+
+ // This is also a scalar multiplication with a four-bit window like in
+ // ScalarMult, but in this case the doublings are precomputed. The value
+ // [windowValue]G added at iteration k would normally get doubled
+ // (totIterations-k)×4 times, but with a larger precomputation we can
+ // instead add [2^((totIterations-k)×4)][windowValue]G and avoid the
+ // doublings between iterations.
+ t := New{{.P}}Point()
+ p.Set(New{{.P}}Point())
+ tableIndex := len(tables) - 1
+ for _, byte := range scalar {
+ windowValue := byte >> 4
+ tables[tableIndex].Select(t, windowValue)
+ p.Add(p, t)
+ tableIndex--
+
+ windowValue = byte & 0b1111
+ tables[tableIndex].Select(t, windowValue)
+ p.Add(p, t)
+ tableIndex--
+ }
+
+ return p, nil
+}
+
+// {{.p}}Sqrt sets e to a square root of x. If x is not a square, {{.p}}Sqrt returns
+// false and e is unchanged. e and x can overlap.
+func {{.p}}Sqrt(e, x *{{ .Element }}) (isSquare bool) {
+ candidate := new({{ .Element }})
+ {{.p}}SqrtCandidate(candidate, x)
+ square := new({{ .Element }}).Square(candidate)
+ if square.Equal(x) != 1 {
+ return false
+ }
+ e.Set(candidate)
+ return true
+}
+`
+
+const tmplAddchain = `
+// sqrtCandidate sets z to a square root candidate for x. z and x must not overlap.
+func sqrtCandidate(z, x *Element) {
+ // Since p = 3 mod 4, exponentiation by (p + 1) / 4 yields a square root candidate.
+ //
+ // The sequence of {{ .Ops.Adds }} multiplications and {{ .Ops.Doubles }} squarings is derived from the
+ // following addition chain generated with {{ .Meta.Module }} {{ .Meta.ReleaseTag }}.
+ //
+ {{- range lines (format .Script) }}
+ // {{ . }}
+ {{- end }}
+ //
+
+ {{- range .Program.Temporaries }}
+ var {{ . }} = new(Element)
+ {{- end }}
+ {{ range $i := .Program.Instructions -}}
+ {{- with add $i.Op }}
+ {{ $i.Output }}.Mul({{ .X }}, {{ .Y }})
+ {{- end -}}
+
+ {{- with double $i.Op }}
+ {{ $i.Output }}.Square({{ .X }})
+ {{- end -}}
+
+ {{- with shift $i.Op -}}
+ {{- $first := 0 -}}
+ {{- if ne $i.Output.Identifier .X.Identifier }}
+ {{ $i.Output }}.Square({{ .X }})
+ {{- $first = 1 -}}
+ {{- end }}
+ for s := {{ $first }}; s < {{ .S }}; s++ {
+ {{ $i.Output }}.Square({{ $i.Output }})
+ }
+ {{- end -}}
+ {{- end }}
+}
+`
diff --git a/src/crypto/internal/nistec/nistec.go b/src/crypto/internal/nistec/nistec.go
new file mode 100644
index 0000000..d898d40
--- /dev/null
+++ b/src/crypto/internal/nistec/nistec.go
@@ -0,0 +1,15 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package nistec implements the NIST P elliptic curves from FIPS 186-4.
+//
+// This package uses fiat-crypto or specialized assembly and Go code for its
+// backend field arithmetic (not math/big) and exposes constant-time, heap
+// allocation-free, byte slice-based safe APIs. Group operations use modern and
+// safe complete addition formulas where possible. The point at infinity is
+// handled and encoded according to SEC 1, Version 2.0, and invalid curve points
+// can't be represented.
+package nistec
+
+//go:generate go run generate.go
diff --git a/src/crypto/internal/nistec/nistec_test.go b/src/crypto/internal/nistec/nistec_test.go
new file mode 100644
index 0000000..9103608
--- /dev/null
+++ b/src/crypto/internal/nistec/nistec_test.go
@@ -0,0 +1,297 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package nistec_test
+
+import (
+ "bytes"
+ "crypto/elliptic"
+ "crypto/internal/nistec"
+ "fmt"
+ "internal/testenv"
+ "math/big"
+ "math/rand"
+ "testing"
+)
+
+func TestAllocations(t *testing.T) {
+ testenv.SkipIfOptimizationOff(t)
+
+ t.Run("P224", func(t *testing.T) {
+ if allocs := testing.AllocsPerRun(100, func() {
+ p := nistec.NewP224Point().SetGenerator()
+ scalar := make([]byte, 28)
+ rand.Read(scalar)
+ p.ScalarBaseMult(scalar)
+ p.ScalarMult(p, scalar)
+ out := p.Bytes()
+ if _, err := nistec.NewP224Point().SetBytes(out); err != nil {
+ t.Fatal(err)
+ }
+ out = p.BytesCompressed()
+ if _, err := p.SetBytes(out); err != nil {
+ t.Fatal(err)
+ }
+ }); allocs > 0 {
+ t.Errorf("expected zero allocations, got %0.1f", allocs)
+ }
+ })
+ t.Run("P256", func(t *testing.T) {
+ if allocs := testing.AllocsPerRun(100, func() {
+ p := nistec.NewP256Point().SetGenerator()
+ scalar := make([]byte, 32)
+ rand.Read(scalar)
+ p.ScalarBaseMult(scalar)
+ p.ScalarMult(p, scalar)
+ out := p.Bytes()
+ if _, err := nistec.NewP256Point().SetBytes(out); err != nil {
+ t.Fatal(err)
+ }
+ out = p.BytesCompressed()
+ if _, err := p.SetBytes(out); err != nil {
+ t.Fatal(err)
+ }
+ }); allocs > 0 {
+ t.Errorf("expected zero allocations, got %0.1f", allocs)
+ }
+ })
+ t.Run("P384", func(t *testing.T) {
+ if allocs := testing.AllocsPerRun(100, func() {
+ p := nistec.NewP384Point().SetGenerator()
+ scalar := make([]byte, 48)
+ rand.Read(scalar)
+ p.ScalarBaseMult(scalar)
+ p.ScalarMult(p, scalar)
+ out := p.Bytes()
+ if _, err := nistec.NewP384Point().SetBytes(out); err != nil {
+ t.Fatal(err)
+ }
+ out = p.BytesCompressed()
+ if _, err := p.SetBytes(out); err != nil {
+ t.Fatal(err)
+ }
+ }); allocs > 0 {
+ t.Errorf("expected zero allocations, got %0.1f", allocs)
+ }
+ })
+ t.Run("P521", func(t *testing.T) {
+ if allocs := testing.AllocsPerRun(100, func() {
+ p := nistec.NewP521Point().SetGenerator()
+ scalar := make([]byte, 66)
+ rand.Read(scalar)
+ p.ScalarBaseMult(scalar)
+ p.ScalarMult(p, scalar)
+ out := p.Bytes()
+ if _, err := nistec.NewP521Point().SetBytes(out); err != nil {
+ t.Fatal(err)
+ }
+ out = p.BytesCompressed()
+ if _, err := p.SetBytes(out); err != nil {
+ t.Fatal(err)
+ }
+ }); allocs > 0 {
+ t.Errorf("expected zero allocations, got %0.1f", allocs)
+ }
+ })
+}
+
+type nistPoint[T any] interface {
+ Bytes() []byte
+ SetGenerator() T
+ SetBytes([]byte) (T, error)
+ Add(T, T) T
+ Double(T) T
+ ScalarMult(T, []byte) (T, error)
+ ScalarBaseMult([]byte) (T, error)
+}
+
+func TestEquivalents(t *testing.T) {
+ t.Run("P224", func(t *testing.T) {
+ testEquivalents(t, nistec.NewP224Point, elliptic.P224())
+ })
+ t.Run("P256", func(t *testing.T) {
+ testEquivalents(t, nistec.NewP256Point, elliptic.P256())
+ })
+ t.Run("P384", func(t *testing.T) {
+ testEquivalents(t, nistec.NewP384Point, elliptic.P384())
+ })
+ t.Run("P521", func(t *testing.T) {
+ testEquivalents(t, nistec.NewP521Point, elliptic.P521())
+ })
+}
+
+func testEquivalents[P nistPoint[P]](t *testing.T, newPoint func() P, c elliptic.Curve) {
+ p := newPoint().SetGenerator()
+
+ elementSize := (c.Params().BitSize + 7) / 8
+ two := make([]byte, elementSize)
+ two[len(two)-1] = 2
+ nPlusTwo := make([]byte, elementSize)
+ new(big.Int).Add(c.Params().N, big.NewInt(2)).FillBytes(nPlusTwo)
+
+ p1 := newPoint().Double(p)
+ p2 := newPoint().Add(p, p)
+ p3, err := newPoint().ScalarMult(p, two)
+ if err != nil {
+ t.Fatal(err)
+ }
+ p4, err := newPoint().ScalarBaseMult(two)
+ if err != nil {
+ t.Fatal(err)
+ }
+ p5, err := newPoint().ScalarMult(p, nPlusTwo)
+ if err != nil {
+ t.Fatal(err)
+ }
+ p6, err := newPoint().ScalarBaseMult(nPlusTwo)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if !bytes.Equal(p1.Bytes(), p2.Bytes()) {
+ t.Error("P+P != 2*P")
+ }
+ if !bytes.Equal(p1.Bytes(), p3.Bytes()) {
+ t.Error("P+P != [2]P")
+ }
+ if !bytes.Equal(p1.Bytes(), p4.Bytes()) {
+ t.Error("G+G != [2]G")
+ }
+ if !bytes.Equal(p1.Bytes(), p5.Bytes()) {
+ t.Error("P+P != [N+2]P")
+ }
+ if !bytes.Equal(p1.Bytes(), p6.Bytes()) {
+ t.Error("G+G != [N+2]G")
+ }
+}
+
+func TestScalarMult(t *testing.T) {
+ t.Run("P224", func(t *testing.T) {
+ testScalarMult(t, nistec.NewP224Point, elliptic.P224())
+ })
+ t.Run("P256", func(t *testing.T) {
+ testScalarMult(t, nistec.NewP256Point, elliptic.P256())
+ })
+ t.Run("P384", func(t *testing.T) {
+ testScalarMult(t, nistec.NewP384Point, elliptic.P384())
+ })
+ t.Run("P521", func(t *testing.T) {
+ testScalarMult(t, nistec.NewP521Point, elliptic.P521())
+ })
+}
+
+func testScalarMult[P nistPoint[P]](t *testing.T, newPoint func() P, c elliptic.Curve) {
+ G := newPoint().SetGenerator()
+ checkScalar := func(t *testing.T, scalar []byte) {
+ p1, err := newPoint().ScalarBaseMult(scalar)
+ fatalIfErr(t, err)
+ p2, err := newPoint().ScalarMult(G, scalar)
+ fatalIfErr(t, err)
+ if !bytes.Equal(p1.Bytes(), p2.Bytes()) {
+ t.Error("[k]G != ScalarBaseMult(k)")
+ }
+
+ d := new(big.Int).SetBytes(scalar)
+ d.Sub(c.Params().N, d)
+ d.Mod(d, c.Params().N)
+ g1, err := newPoint().ScalarBaseMult(d.FillBytes(make([]byte, len(scalar))))
+ fatalIfErr(t, err)
+ g1.Add(g1, p1)
+ if !bytes.Equal(g1.Bytes(), newPoint().Bytes()) {
+ t.Error("[N - k]G + [k]G != ∞")
+ }
+ }
+
+ byteLen := len(c.Params().N.Bytes())
+ bitLen := c.Params().N.BitLen()
+ t.Run("0", func(t *testing.T) { checkScalar(t, make([]byte, byteLen)) })
+ t.Run("1", func(t *testing.T) {
+ checkScalar(t, big.NewInt(1).FillBytes(make([]byte, byteLen)))
+ })
+ t.Run("N-1", func(t *testing.T) {
+ checkScalar(t, new(big.Int).Sub(c.Params().N, big.NewInt(1)).Bytes())
+ })
+ t.Run("N", func(t *testing.T) { checkScalar(t, c.Params().N.Bytes()) })
+ t.Run("N+1", func(t *testing.T) {
+ checkScalar(t, new(big.Int).Add(c.Params().N, big.NewInt(1)).Bytes())
+ })
+ t.Run("all1s", func(t *testing.T) {
+ s := new(big.Int).Lsh(big.NewInt(1), uint(bitLen))
+ s.Sub(s, big.NewInt(1))
+ checkScalar(t, s.Bytes())
+ })
+ if testing.Short() {
+ return
+ }
+ for i := 0; i < bitLen; i++ {
+ t.Run(fmt.Sprintf("1<<%d", i), func(t *testing.T) {
+ s := new(big.Int).Lsh(big.NewInt(1), uint(i))
+ checkScalar(t, s.FillBytes(make([]byte, byteLen)))
+ })
+ }
+ // Test N+1...N+32 since they risk overlapping with precomputed table values
+ // in the final additions.
+ for i := int64(2); i <= 32; i++ {
+ t.Run(fmt.Sprintf("N+%d", i), func(t *testing.T) {
+ checkScalar(t, new(big.Int).Add(c.Params().N, big.NewInt(i)).Bytes())
+ })
+ }
+}
+
+func fatalIfErr(t *testing.T, err error) {
+ t.Helper()
+ if err != nil {
+ t.Fatal(err)
+ }
+}
+
+func BenchmarkScalarMult(b *testing.B) {
+ b.Run("P224", func(b *testing.B) {
+ benchmarkScalarMult(b, nistec.NewP224Point().SetGenerator(), 28)
+ })
+ b.Run("P256", func(b *testing.B) {
+ benchmarkScalarMult(b, nistec.NewP256Point().SetGenerator(), 32)
+ })
+ b.Run("P384", func(b *testing.B) {
+ benchmarkScalarMult(b, nistec.NewP384Point().SetGenerator(), 48)
+ })
+ b.Run("P521", func(b *testing.B) {
+ benchmarkScalarMult(b, nistec.NewP521Point().SetGenerator(), 66)
+ })
+}
+
+func benchmarkScalarMult[P nistPoint[P]](b *testing.B, p P, scalarSize int) {
+ scalar := make([]byte, scalarSize)
+ rand.Read(scalar)
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ p.ScalarMult(p, scalar)
+ }
+}
+
+func BenchmarkScalarBaseMult(b *testing.B) {
+ b.Run("P224", func(b *testing.B) {
+ benchmarkScalarBaseMult(b, nistec.NewP224Point().SetGenerator(), 28)
+ })
+ b.Run("P256", func(b *testing.B) {
+ benchmarkScalarBaseMult(b, nistec.NewP256Point().SetGenerator(), 32)
+ })
+ b.Run("P384", func(b *testing.B) {
+ benchmarkScalarBaseMult(b, nistec.NewP384Point().SetGenerator(), 48)
+ })
+ b.Run("P521", func(b *testing.B) {
+ benchmarkScalarBaseMult(b, nistec.NewP521Point().SetGenerator(), 66)
+ })
+}
+
+func benchmarkScalarBaseMult[P nistPoint[P]](b *testing.B, p P, scalarSize int) {
+ scalar := make([]byte, scalarSize)
+ rand.Read(scalar)
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ p.ScalarBaseMult(scalar)
+ }
+}
diff --git a/src/crypto/internal/nistec/p224.go b/src/crypto/internal/nistec/p224.go
new file mode 100644
index 0000000..faa971d
--- /dev/null
+++ b/src/crypto/internal/nistec/p224.go
@@ -0,0 +1,453 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Code generated by generate.go. DO NOT EDIT.
+
+package nistec
+
+import (
+ "crypto/internal/nistec/fiat"
+ "crypto/subtle"
+ "errors"
+ "sync"
+)
+
+// p224ElementLength is the length of an element of the base or scalar field,
+// which have the same bytes length for all NIST P curves.
+const p224ElementLength = 28
+
+// P224Point is a P224 point. The zero value is NOT valid.
+type P224Point struct {
+ // The point is represented in projective coordinates (X:Y:Z),
+ // where x = X/Z and y = Y/Z.
+ x, y, z *fiat.P224Element
+}
+
+// NewP224Point returns a new P224Point representing the point at infinity point.
+func NewP224Point() *P224Point {
+ return &P224Point{
+ x: new(fiat.P224Element),
+ y: new(fiat.P224Element).One(),
+ z: new(fiat.P224Element),
+ }
+}
+
+// SetGenerator sets p to the canonical generator and returns p.
+func (p *P224Point) SetGenerator() *P224Point {
+ p.x.SetBytes([]byte{0xb7, 0xe, 0xc, 0xbd, 0x6b, 0xb4, 0xbf, 0x7f, 0x32, 0x13, 0x90, 0xb9, 0x4a, 0x3, 0xc1, 0xd3, 0x56, 0xc2, 0x11, 0x22, 0x34, 0x32, 0x80, 0xd6, 0x11, 0x5c, 0x1d, 0x21})
+ p.y.SetBytes([]byte{0xbd, 0x37, 0x63, 0x88, 0xb5, 0xf7, 0x23, 0xfb, 0x4c, 0x22, 0xdf, 0xe6, 0xcd, 0x43, 0x75, 0xa0, 0x5a, 0x7, 0x47, 0x64, 0x44, 0xd5, 0x81, 0x99, 0x85, 0x0, 0x7e, 0x34})
+ p.z.One()
+ return p
+}
+
+// Set sets p = q and returns p.
+func (p *P224Point) Set(q *P224Point) *P224Point {
+ p.x.Set(q.x)
+ p.y.Set(q.y)
+ p.z.Set(q.z)
+ return p
+}
+
+// SetBytes sets p to the compressed, uncompressed, or infinity value encoded in
+// b, as specified in SEC 1, Version 2.0, Section 2.3.4. If the point is not on
+// the curve, it returns nil and an error, and the receiver is unchanged.
+// Otherwise, it returns p.
+func (p *P224Point) SetBytes(b []byte) (*P224Point, error) {
+ switch {
+ // Point at infinity.
+ case len(b) == 1 && b[0] == 0:
+ return p.Set(NewP224Point()), nil
+
+ // Uncompressed form.
+ case len(b) == 1+2*p224ElementLength && b[0] == 4:
+ x, err := new(fiat.P224Element).SetBytes(b[1 : 1+p224ElementLength])
+ if err != nil {
+ return nil, err
+ }
+ y, err := new(fiat.P224Element).SetBytes(b[1+p224ElementLength:])
+ if err != nil {
+ return nil, err
+ }
+ if err := p224CheckOnCurve(x, y); err != nil {
+ return nil, err
+ }
+ p.x.Set(x)
+ p.y.Set(y)
+ p.z.One()
+ return p, nil
+
+ // Compressed form.
+ case len(b) == 1+p224ElementLength && (b[0] == 2 || b[0] == 3):
+ x, err := new(fiat.P224Element).SetBytes(b[1:])
+ if err != nil {
+ return nil, err
+ }
+
+ // y² = x³ - 3x + b
+ y := p224Polynomial(new(fiat.P224Element), x)
+ if !p224Sqrt(y, y) {
+ return nil, errors.New("invalid P224 compressed point encoding")
+ }
+
+ // Select the positive or negative root, as indicated by the least
+ // significant bit, based on the encoding type byte.
+ otherRoot := new(fiat.P224Element)
+ otherRoot.Sub(otherRoot, y)
+ cond := y.Bytes()[p224ElementLength-1]&1 ^ b[0]&1
+ y.Select(otherRoot, y, int(cond))
+
+ p.x.Set(x)
+ p.y.Set(y)
+ p.z.One()
+ return p, nil
+
+ default:
+ return nil, errors.New("invalid P224 point encoding")
+ }
+}
+
+var _p224B *fiat.P224Element
+var _p224BOnce sync.Once
+
+func p224B() *fiat.P224Element {
+ _p224BOnce.Do(func() {
+ _p224B, _ = new(fiat.P224Element).SetBytes([]byte{0xb4, 0x5, 0xa, 0x85, 0xc, 0x4, 0xb3, 0xab, 0xf5, 0x41, 0x32, 0x56, 0x50, 0x44, 0xb0, 0xb7, 0xd7, 0xbf, 0xd8, 0xba, 0x27, 0xb, 0x39, 0x43, 0x23, 0x55, 0xff, 0xb4})
+ })
+ return _p224B
+}
+
+// p224Polynomial sets y2 to x³ - 3x + b, and returns y2.
+func p224Polynomial(y2, x *fiat.P224Element) *fiat.P224Element {
+ y2.Square(x)
+ y2.Mul(y2, x)
+
+ threeX := new(fiat.P224Element).Add(x, x)
+ threeX.Add(threeX, x)
+ y2.Sub(y2, threeX)
+
+ return y2.Add(y2, p224B())
+}
+
+func p224CheckOnCurve(x, y *fiat.P224Element) error {
+ // y² = x³ - 3x + b
+ rhs := p224Polynomial(new(fiat.P224Element), x)
+ lhs := new(fiat.P224Element).Square(y)
+ if rhs.Equal(lhs) != 1 {
+ return errors.New("P224 point not on curve")
+ }
+ return nil
+}
+
+// Bytes returns the uncompressed or infinity encoding of p, as specified in
+// SEC 1, Version 2.0, Section 2.3.3. Note that the encoding of the point at
+// infinity is shorter than all other encodings.
+func (p *P224Point) Bytes() []byte {
+ // This function is outlined to make the allocations inline in the caller
+ // rather than happen on the heap.
+ var out [1 + 2*p224ElementLength]byte
+ return p.bytes(&out)
+}
+
+func (p *P224Point) bytes(out *[1 + 2*p224ElementLength]byte) []byte {
+ if p.z.IsZero() == 1 {
+ return append(out[:0], 0)
+ }
+
+ zinv := new(fiat.P224Element).Invert(p.z)
+ x := new(fiat.P224Element).Mul(p.x, zinv)
+ y := new(fiat.P224Element).Mul(p.y, zinv)
+
+ buf := append(out[:0], 4)
+ buf = append(buf, x.Bytes()...)
+ buf = append(buf, y.Bytes()...)
+ return buf
+}
+
+// BytesX returns the encoding of the x-coordinate of p, as specified in SEC 1,
+// Version 2.0, Section 2.3.5, or an error if p is the point at infinity.
+func (p *P224Point) BytesX() ([]byte, error) {
+ // This function is outlined to make the allocations inline in the caller
+ // rather than happen on the heap.
+ var out [p224ElementLength]byte
+ return p.bytesX(&out)
+}
+
+func (p *P224Point) bytesX(out *[p224ElementLength]byte) ([]byte, error) {
+ if p.z.IsZero() == 1 {
+ return nil, errors.New("P224 point is the point at infinity")
+ }
+
+ zinv := new(fiat.P224Element).Invert(p.z)
+ x := new(fiat.P224Element).Mul(p.x, zinv)
+
+ return append(out[:0], x.Bytes()...), nil
+}
+
+// BytesCompressed returns the compressed or infinity encoding of p, as
+// specified in SEC 1, Version 2.0, Section 2.3.3. Note that the encoding of the
+// point at infinity is shorter than all other encodings.
+func (p *P224Point) BytesCompressed() []byte {
+ // This function is outlined to make the allocations inline in the caller
+ // rather than happen on the heap.
+ var out [1 + p224ElementLength]byte
+ return p.bytesCompressed(&out)
+}
+
+func (p *P224Point) bytesCompressed(out *[1 + p224ElementLength]byte) []byte {
+ if p.z.IsZero() == 1 {
+ return append(out[:0], 0)
+ }
+
+ zinv := new(fiat.P224Element).Invert(p.z)
+ x := new(fiat.P224Element).Mul(p.x, zinv)
+ y := new(fiat.P224Element).Mul(p.y, zinv)
+
+ // Encode the sign of the y coordinate (indicated by the least significant
+ // bit) as the encoding type (2 or 3).
+ buf := append(out[:0], 2)
+ buf[0] |= y.Bytes()[p224ElementLength-1] & 1
+ buf = append(buf, x.Bytes()...)
+ return buf
+}
+
+// Add sets q = p1 + p2, and returns q. The points may overlap.
+func (q *P224Point) Add(p1, p2 *P224Point) *P224Point {
+ // Complete addition formula for a = -3 from "Complete addition formulas for
+ // prime order elliptic curves" (https://eprint.iacr.org/2015/1060), §A.2.
+
+ t0 := new(fiat.P224Element).Mul(p1.x, p2.x) // t0 := X1 * X2
+ t1 := new(fiat.P224Element).Mul(p1.y, p2.y) // t1 := Y1 * Y2
+ t2 := new(fiat.P224Element).Mul(p1.z, p2.z) // t2 := Z1 * Z2
+ t3 := new(fiat.P224Element).Add(p1.x, p1.y) // t3 := X1 + Y1
+ t4 := new(fiat.P224Element).Add(p2.x, p2.y) // t4 := X2 + Y2
+ t3.Mul(t3, t4) // t3 := t3 * t4
+ t4.Add(t0, t1) // t4 := t0 + t1
+ t3.Sub(t3, t4) // t3 := t3 - t4
+ t4.Add(p1.y, p1.z) // t4 := Y1 + Z1
+ x3 := new(fiat.P224Element).Add(p2.y, p2.z) // X3 := Y2 + Z2
+ t4.Mul(t4, x3) // t4 := t4 * X3
+ x3.Add(t1, t2) // X3 := t1 + t2
+ t4.Sub(t4, x3) // t4 := t4 - X3
+ x3.Add(p1.x, p1.z) // X3 := X1 + Z1
+ y3 := new(fiat.P224Element).Add(p2.x, p2.z) // Y3 := X2 + Z2
+ x3.Mul(x3, y3) // X3 := X3 * Y3
+ y3.Add(t0, t2) // Y3 := t0 + t2
+ y3.Sub(x3, y3) // Y3 := X3 - Y3
+ z3 := new(fiat.P224Element).Mul(p224B(), t2) // Z3 := b * t2
+ x3.Sub(y3, z3) // X3 := Y3 - Z3
+ z3.Add(x3, x3) // Z3 := X3 + X3
+ x3.Add(x3, z3) // X3 := X3 + Z3
+ z3.Sub(t1, x3) // Z3 := t1 - X3
+ x3.Add(t1, x3) // X3 := t1 + X3
+ y3.Mul(p224B(), y3) // Y3 := b * Y3
+ t1.Add(t2, t2) // t1 := t2 + t2
+ t2.Add(t1, t2) // t2 := t1 + t2
+ y3.Sub(y3, t2) // Y3 := Y3 - t2
+ y3.Sub(y3, t0) // Y3 := Y3 - t0
+ t1.Add(y3, y3) // t1 := Y3 + Y3
+ y3.Add(t1, y3) // Y3 := t1 + Y3
+ t1.Add(t0, t0) // t1 := t0 + t0
+ t0.Add(t1, t0) // t0 := t1 + t0
+ t0.Sub(t0, t2) // t0 := t0 - t2
+ t1.Mul(t4, y3) // t1 := t4 * Y3
+ t2.Mul(t0, y3) // t2 := t0 * Y3
+ y3.Mul(x3, z3) // Y3 := X3 * Z3
+ y3.Add(y3, t2) // Y3 := Y3 + t2
+ x3.Mul(t3, x3) // X3 := t3 * X3
+ x3.Sub(x3, t1) // X3 := X3 - t1
+ z3.Mul(t4, z3) // Z3 := t4 * Z3
+ t1.Mul(t3, t0) // t1 := t3 * t0
+ z3.Add(z3, t1) // Z3 := Z3 + t1
+
+ q.x.Set(x3)
+ q.y.Set(y3)
+ q.z.Set(z3)
+ return q
+}
+
+// Double sets q = p + p, and returns q. The points may overlap.
+func (q *P224Point) Double(p *P224Point) *P224Point {
+ // Complete addition formula for a = -3 from "Complete addition formulas for
+ // prime order elliptic curves" (https://eprint.iacr.org/2015/1060), §A.2.
+
+ t0 := new(fiat.P224Element).Square(p.x) // t0 := X ^ 2
+ t1 := new(fiat.P224Element).Square(p.y) // t1 := Y ^ 2
+ t2 := new(fiat.P224Element).Square(p.z) // t2 := Z ^ 2
+ t3 := new(fiat.P224Element).Mul(p.x, p.y) // t3 := X * Y
+ t3.Add(t3, t3) // t3 := t3 + t3
+ z3 := new(fiat.P224Element).Mul(p.x, p.z) // Z3 := X * Z
+ z3.Add(z3, z3) // Z3 := Z3 + Z3
+ y3 := new(fiat.P224Element).Mul(p224B(), t2) // Y3 := b * t2
+ y3.Sub(y3, z3) // Y3 := Y3 - Z3
+ x3 := new(fiat.P224Element).Add(y3, y3) // X3 := Y3 + Y3
+ y3.Add(x3, y3) // Y3 := X3 + Y3
+ x3.Sub(t1, y3) // X3 := t1 - Y3
+ y3.Add(t1, y3) // Y3 := t1 + Y3
+ y3.Mul(x3, y3) // Y3 := X3 * Y3
+ x3.Mul(x3, t3) // X3 := X3 * t3
+ t3.Add(t2, t2) // t3 := t2 + t2
+ t2.Add(t2, t3) // t2 := t2 + t3
+ z3.Mul(p224B(), z3) // Z3 := b * Z3
+ z3.Sub(z3, t2) // Z3 := Z3 - t2
+ z3.Sub(z3, t0) // Z3 := Z3 - t0
+ t3.Add(z3, z3) // t3 := Z3 + Z3
+ z3.Add(z3, t3) // Z3 := Z3 + t3
+ t3.Add(t0, t0) // t3 := t0 + t0
+ t0.Add(t3, t0) // t0 := t3 + t0
+ t0.Sub(t0, t2) // t0 := t0 - t2
+ t0.Mul(t0, z3) // t0 := t0 * Z3
+ y3.Add(y3, t0) // Y3 := Y3 + t0
+ t0.Mul(p.y, p.z) // t0 := Y * Z
+ t0.Add(t0, t0) // t0 := t0 + t0
+ z3.Mul(t0, z3) // Z3 := t0 * Z3
+ x3.Sub(x3, z3) // X3 := X3 - Z3
+ z3.Mul(t0, t1) // Z3 := t0 * t1
+ z3.Add(z3, z3) // Z3 := Z3 + Z3
+ z3.Add(z3, z3) // Z3 := Z3 + Z3
+
+ q.x.Set(x3)
+ q.y.Set(y3)
+ q.z.Set(z3)
+ return q
+}
+
+// Select sets q to p1 if cond == 1, and to p2 if cond == 0.
+func (q *P224Point) Select(p1, p2 *P224Point, cond int) *P224Point {
+ q.x.Select(p1.x, p2.x, cond)
+ q.y.Select(p1.y, p2.y, cond)
+ q.z.Select(p1.z, p2.z, cond)
+ return q
+}
+
+// A p224Table holds the first 15 multiples of a point at offset -1, so [1]P
+// is at table[0], [15]P is at table[14], and [0]P is implicitly the identity
+// point.
+type p224Table [15]*P224Point
+
+// Select selects the n-th multiple of the table base point into p. It works in
+// constant time by iterating over every entry of the table. n must be in [0, 15].
+func (table *p224Table) Select(p *P224Point, n uint8) {
+ if n >= 16 {
+ panic("nistec: internal error: p224Table called with out-of-bounds value")
+ }
+ p.Set(NewP224Point())
+ for i := uint8(1); i < 16; i++ {
+ cond := subtle.ConstantTimeByteEq(i, n)
+ p.Select(table[i-1], p, cond)
+ }
+}
+
+// ScalarMult sets p = scalar * q, and returns p.
+func (p *P224Point) ScalarMult(q *P224Point, scalar []byte) (*P224Point, error) {
+ // Compute a p224Table for the base point q. The explicit NewP224Point
+ // calls get inlined, letting the allocations live on the stack.
+ var table = p224Table{NewP224Point(), NewP224Point(), NewP224Point(),
+ NewP224Point(), NewP224Point(), NewP224Point(), NewP224Point(),
+ NewP224Point(), NewP224Point(), NewP224Point(), NewP224Point(),
+ NewP224Point(), NewP224Point(), NewP224Point(), NewP224Point()}
+ table[0].Set(q)
+ for i := 1; i < 15; i += 2 {
+ table[i].Double(table[i/2])
+ table[i+1].Add(table[i], q)
+ }
+
+ // Instead of doing the classic double-and-add chain, we do it with a
+ // four-bit window: we double four times, and then add [0-15]P.
+ t := NewP224Point()
+ p.Set(NewP224Point())
+ for i, byte := range scalar {
+ // No need to double on the first iteration, as p is the identity at
+ // this point, and [N]∞ = ∞.
+ if i != 0 {
+ p.Double(p)
+ p.Double(p)
+ p.Double(p)
+ p.Double(p)
+ }
+
+ windowValue := byte >> 4
+ table.Select(t, windowValue)
+ p.Add(p, t)
+
+ p.Double(p)
+ p.Double(p)
+ p.Double(p)
+ p.Double(p)
+
+ windowValue = byte & 0b1111
+ table.Select(t, windowValue)
+ p.Add(p, t)
+ }
+
+ return p, nil
+}
+
+var p224GeneratorTable *[p224ElementLength * 2]p224Table
+var p224GeneratorTableOnce sync.Once
+
+// generatorTable returns a sequence of p224Tables. The first table contains
+// multiples of G. Each successive table is the previous table doubled four
+// times.
+func (p *P224Point) generatorTable() *[p224ElementLength * 2]p224Table {
+ p224GeneratorTableOnce.Do(func() {
+ p224GeneratorTable = new([p224ElementLength * 2]p224Table)
+ base := NewP224Point().SetGenerator()
+ for i := 0; i < p224ElementLength*2; i++ {
+ p224GeneratorTable[i][0] = NewP224Point().Set(base)
+ for j := 1; j < 15; j++ {
+ p224GeneratorTable[i][j] = NewP224Point().Add(p224GeneratorTable[i][j-1], base)
+ }
+ base.Double(base)
+ base.Double(base)
+ base.Double(base)
+ base.Double(base)
+ }
+ })
+ return p224GeneratorTable
+}
+
+// ScalarBaseMult sets p = scalar * B, where B is the canonical generator, and
+// returns p.
+func (p *P224Point) ScalarBaseMult(scalar []byte) (*P224Point, error) {
+ if len(scalar) != p224ElementLength {
+ return nil, errors.New("invalid scalar length")
+ }
+ tables := p.generatorTable()
+
+ // This is also a scalar multiplication with a four-bit window like in
+ // ScalarMult, but in this case the doublings are precomputed. The value
+ // [windowValue]G added at iteration k would normally get doubled
+ // (totIterations-k)×4 times, but with a larger precomputation we can
+ // instead add [2^((totIterations-k)×4)][windowValue]G and avoid the
+ // doublings between iterations.
+ t := NewP224Point()
+ p.Set(NewP224Point())
+ tableIndex := len(tables) - 1
+ for _, byte := range scalar {
+ windowValue := byte >> 4
+ tables[tableIndex].Select(t, windowValue)
+ p.Add(p, t)
+ tableIndex--
+
+ windowValue = byte & 0b1111
+ tables[tableIndex].Select(t, windowValue)
+ p.Add(p, t)
+ tableIndex--
+ }
+
+ return p, nil
+}
+
+// p224Sqrt sets e to a square root of x. If x is not a square, p224Sqrt returns
+// false and e is unchanged. e and x can overlap.
+func p224Sqrt(e, x *fiat.P224Element) (isSquare bool) {
+ candidate := new(fiat.P224Element)
+ p224SqrtCandidate(candidate, x)
+ square := new(fiat.P224Element).Square(candidate)
+ if square.Equal(x) != 1 {
+ return false
+ }
+ e.Set(candidate)
+ return true
+}
diff --git a/src/crypto/internal/nistec/p224_sqrt.go b/src/crypto/internal/nistec/p224_sqrt.go
new file mode 100644
index 0000000..0c77579
--- /dev/null
+++ b/src/crypto/internal/nistec/p224_sqrt.go
@@ -0,0 +1,132 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package nistec
+
+import (
+ "crypto/internal/nistec/fiat"
+ "sync"
+)
+
+var p224GG *[96]fiat.P224Element
+var p224GGOnce sync.Once
+
+// p224SqrtCandidate sets r to a square root candidate for x. r and x must not overlap.
+func p224SqrtCandidate(r, x *fiat.P224Element) {
+ // Since p = 1 mod 4, we can't use the exponentiation by (p + 1) / 4 like
+ // for the other primes. Instead, implement a variation of Tonelli–Shanks.
+ // The constant-time implementation is adapted from Thomas Pornin's ecGFp5.
+ //
+ // https://github.com/pornin/ecgfp5/blob/82325b965/rust/src/field.rs#L337-L385
+
+ // p = q*2^n + 1 with q odd -> q = 2^128 - 1 and n = 96
+ // g^(2^n) = 1 -> g = 11 ^ q (where 11 is the smallest non-square)
+ // GG[j] = g^(2^j) for j = 0 to n-1
+
+ p224GGOnce.Do(func() {
+ p224GG = new([96]fiat.P224Element)
+ for i := range p224GG {
+ if i == 0 {
+ p224GG[i].SetBytes([]byte{0x6a, 0x0f, 0xec, 0x67,
+ 0x85, 0x98, 0xa7, 0x92, 0x0c, 0x55, 0xb2, 0xd4,
+ 0x0b, 0x2d, 0x6f, 0xfb, 0xbe, 0xa3, 0xd8, 0xce,
+ 0xf3, 0xfb, 0x36, 0x32, 0xdc, 0x69, 0x1b, 0x74})
+ } else {
+ p224GG[i].Square(&p224GG[i-1])
+ }
+ }
+ })
+
+ // r <- x^((q+1)/2) = x^(2^127)
+ // v <- x^q = x^(2^128-1)
+
+ // Compute x^(2^127-1) first.
+ //
+ // The sequence of 10 multiplications and 126 squarings is derived from the
+ // following addition chain generated with github.com/mmcloughlin/addchain v0.4.0.
+ //
+ // _10 = 2*1
+ // _11 = 1 + _10
+ // _110 = 2*_11
+ // _111 = 1 + _110
+ // _111000 = _111 << 3
+ // _111111 = _111 + _111000
+ // _1111110 = 2*_111111
+ // _1111111 = 1 + _1111110
+ // x12 = _1111110 << 5 + _111111
+ // x24 = x12 << 12 + x12
+ // i36 = x24 << 7
+ // x31 = _1111111 + i36
+ // x48 = i36 << 17 + x24
+ // x96 = x48 << 48 + x48
+ // return x96 << 31 + x31
+ //
+ var t0 = new(fiat.P224Element)
+ var t1 = new(fiat.P224Element)
+
+ r.Square(x)
+ r.Mul(x, r)
+ r.Square(r)
+ r.Mul(x, r)
+ t0.Square(r)
+ for s := 1; s < 3; s++ {
+ t0.Square(t0)
+ }
+ t0.Mul(r, t0)
+ t1.Square(t0)
+ r.Mul(x, t1)
+ for s := 0; s < 5; s++ {
+ t1.Square(t1)
+ }
+ t0.Mul(t0, t1)
+ t1.Square(t0)
+ for s := 1; s < 12; s++ {
+ t1.Square(t1)
+ }
+ t0.Mul(t0, t1)
+ t1.Square(t0)
+ for s := 1; s < 7; s++ {
+ t1.Square(t1)
+ }
+ r.Mul(r, t1)
+ for s := 0; s < 17; s++ {
+ t1.Square(t1)
+ }
+ t0.Mul(t0, t1)
+ t1.Square(t0)
+ for s := 1; s < 48; s++ {
+ t1.Square(t1)
+ }
+ t0.Mul(t0, t1)
+ for s := 0; s < 31; s++ {
+ t0.Square(t0)
+ }
+ r.Mul(r, t0)
+
+ // v = x^(2^127-1)^2 * x
+ v := new(fiat.P224Element).Square(r)
+ v.Mul(v, x)
+
+ // r = x^(2^127-1) * x
+ r.Mul(r, x)
+
+ // for i = n-1 down to 1:
+ // w = v^(2^(i-1))
+ // if w == -1 then:
+ // v <- v*GG[n-i]
+ // r <- r*GG[n-i-1]
+
+ var p224MinusOne = new(fiat.P224Element).Sub(
+ new(fiat.P224Element), new(fiat.P224Element).One())
+
+ for i := 96 - 1; i >= 1; i-- {
+ w := new(fiat.P224Element).Set(v)
+ for j := 0; j < i-1; j++ {
+ w.Square(w)
+ }
+ cond := w.Equal(p224MinusOne)
+ v.Select(t0.Mul(v, &p224GG[96-i]), v, cond)
+ r.Select(t0.Mul(r, &p224GG[96-i-1]), r, cond)
+ }
+}
diff --git a/src/crypto/internal/nistec/p256.go b/src/crypto/internal/nistec/p256.go
new file mode 100644
index 0000000..3cfa5fb
--- /dev/null
+++ b/src/crypto/internal/nistec/p256.go
@@ -0,0 +1,509 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Code generated by generate.go. DO NOT EDIT.
+
+//go:build !amd64 && !arm64 && !ppc64le && !s390x
+
+package nistec
+
+import (
+ "crypto/internal/nistec/fiat"
+ "crypto/subtle"
+ "errors"
+ "sync"
+)
+
+// p256ElementLength is the length of an element of the base or scalar field,
+// which have the same bytes length for all NIST P curves.
+const p256ElementLength = 32
+
+// P256Point is a P256 point. The zero value is NOT valid.
+type P256Point struct {
+ // The point is represented in projective coordinates (X:Y:Z),
+ // where x = X/Z and y = Y/Z.
+ x, y, z *fiat.P256Element
+}
+
+// NewP256Point returns a new P256Point representing the point at infinity point.
+func NewP256Point() *P256Point {
+ return &P256Point{
+ x: new(fiat.P256Element),
+ y: new(fiat.P256Element).One(),
+ z: new(fiat.P256Element),
+ }
+}
+
+// SetGenerator sets p to the canonical generator and returns p.
+func (p *P256Point) SetGenerator() *P256Point {
+ p.x.SetBytes([]byte{0x6b, 0x17, 0xd1, 0xf2, 0xe1, 0x2c, 0x42, 0x47, 0xf8, 0xbc, 0xe6, 0xe5, 0x63, 0xa4, 0x40, 0xf2, 0x77, 0x3, 0x7d, 0x81, 0x2d, 0xeb, 0x33, 0xa0, 0xf4, 0xa1, 0x39, 0x45, 0xd8, 0x98, 0xc2, 0x96})
+ p.y.SetBytes([]byte{0x4f, 0xe3, 0x42, 0xe2, 0xfe, 0x1a, 0x7f, 0x9b, 0x8e, 0xe7, 0xeb, 0x4a, 0x7c, 0xf, 0x9e, 0x16, 0x2b, 0xce, 0x33, 0x57, 0x6b, 0x31, 0x5e, 0xce, 0xcb, 0xb6, 0x40, 0x68, 0x37, 0xbf, 0x51, 0xf5})
+ p.z.One()
+ return p
+}
+
+// Set sets p = q and returns p.
+func (p *P256Point) Set(q *P256Point) *P256Point {
+ p.x.Set(q.x)
+ p.y.Set(q.y)
+ p.z.Set(q.z)
+ return p
+}
+
+// SetBytes sets p to the compressed, uncompressed, or infinity value encoded in
+// b, as specified in SEC 1, Version 2.0, Section 2.3.4. If the point is not on
+// the curve, it returns nil and an error, and the receiver is unchanged.
+// Otherwise, it returns p.
+func (p *P256Point) SetBytes(b []byte) (*P256Point, error) {
+ switch {
+ // Point at infinity.
+ case len(b) == 1 && b[0] == 0:
+ return p.Set(NewP256Point()), nil
+
+ // Uncompressed form.
+ case len(b) == 1+2*p256ElementLength && b[0] == 4:
+ x, err := new(fiat.P256Element).SetBytes(b[1 : 1+p256ElementLength])
+ if err != nil {
+ return nil, err
+ }
+ y, err := new(fiat.P256Element).SetBytes(b[1+p256ElementLength:])
+ if err != nil {
+ return nil, err
+ }
+ if err := p256CheckOnCurve(x, y); err != nil {
+ return nil, err
+ }
+ p.x.Set(x)
+ p.y.Set(y)
+ p.z.One()
+ return p, nil
+
+ // Compressed form.
+ case len(b) == 1+p256ElementLength && (b[0] == 2 || b[0] == 3):
+ x, err := new(fiat.P256Element).SetBytes(b[1:])
+ if err != nil {
+ return nil, err
+ }
+
+ // y² = x³ - 3x + b
+ y := p256Polynomial(new(fiat.P256Element), x)
+ if !p256Sqrt(y, y) {
+ return nil, errors.New("invalid P256 compressed point encoding")
+ }
+
+ // Select the positive or negative root, as indicated by the least
+ // significant bit, based on the encoding type byte.
+ otherRoot := new(fiat.P256Element)
+ otherRoot.Sub(otherRoot, y)
+ cond := y.Bytes()[p256ElementLength-1]&1 ^ b[0]&1
+ y.Select(otherRoot, y, int(cond))
+
+ p.x.Set(x)
+ p.y.Set(y)
+ p.z.One()
+ return p, nil
+
+ default:
+ return nil, errors.New("invalid P256 point encoding")
+ }
+}
+
+var _p256B *fiat.P256Element
+var _p256BOnce sync.Once
+
+func p256B() *fiat.P256Element {
+ _p256BOnce.Do(func() {
+ _p256B, _ = new(fiat.P256Element).SetBytes([]byte{0x5a, 0xc6, 0x35, 0xd8, 0xaa, 0x3a, 0x93, 0xe7, 0xb3, 0xeb, 0xbd, 0x55, 0x76, 0x98, 0x86, 0xbc, 0x65, 0x1d, 0x6, 0xb0, 0xcc, 0x53, 0xb0, 0xf6, 0x3b, 0xce, 0x3c, 0x3e, 0x27, 0xd2, 0x60, 0x4b})
+ })
+ return _p256B
+}
+
+// p256Polynomial sets y2 to x³ - 3x + b, and returns y2.
+func p256Polynomial(y2, x *fiat.P256Element) *fiat.P256Element {
+ y2.Square(x)
+ y2.Mul(y2, x)
+
+ threeX := new(fiat.P256Element).Add(x, x)
+ threeX.Add(threeX, x)
+ y2.Sub(y2, threeX)
+
+ return y2.Add(y2, p256B())
+}
+
+func p256CheckOnCurve(x, y *fiat.P256Element) error {
+ // y² = x³ - 3x + b
+ rhs := p256Polynomial(new(fiat.P256Element), x)
+ lhs := new(fiat.P256Element).Square(y)
+ if rhs.Equal(lhs) != 1 {
+ return errors.New("P256 point not on curve")
+ }
+ return nil
+}
+
+// Bytes returns the uncompressed or infinity encoding of p, as specified in
+// SEC 1, Version 2.0, Section 2.3.3. Note that the encoding of the point at
+// infinity is shorter than all other encodings.
+func (p *P256Point) Bytes() []byte {
+ // This function is outlined to make the allocations inline in the caller
+ // rather than happen on the heap.
+ var out [1 + 2*p256ElementLength]byte
+ return p.bytes(&out)
+}
+
+func (p *P256Point) bytes(out *[1 + 2*p256ElementLength]byte) []byte {
+ if p.z.IsZero() == 1 {
+ return append(out[:0], 0)
+ }
+
+ zinv := new(fiat.P256Element).Invert(p.z)
+ x := new(fiat.P256Element).Mul(p.x, zinv)
+ y := new(fiat.P256Element).Mul(p.y, zinv)
+
+ buf := append(out[:0], 4)
+ buf = append(buf, x.Bytes()...)
+ buf = append(buf, y.Bytes()...)
+ return buf
+}
+
+// BytesX returns the encoding of the x-coordinate of p, as specified in SEC 1,
+// Version 2.0, Section 2.3.5, or an error if p is the point at infinity.
+func (p *P256Point) BytesX() ([]byte, error) {
+ // This function is outlined to make the allocations inline in the caller
+ // rather than happen on the heap.
+ var out [p256ElementLength]byte
+ return p.bytesX(&out)
+}
+
+func (p *P256Point) bytesX(out *[p256ElementLength]byte) ([]byte, error) {
+ if p.z.IsZero() == 1 {
+ return nil, errors.New("P256 point is the point at infinity")
+ }
+
+ zinv := new(fiat.P256Element).Invert(p.z)
+ x := new(fiat.P256Element).Mul(p.x, zinv)
+
+ return append(out[:0], x.Bytes()...), nil
+}
+
+// BytesCompressed returns the compressed or infinity encoding of p, as
+// specified in SEC 1, Version 2.0, Section 2.3.3. Note that the encoding of the
+// point at infinity is shorter than all other encodings.
+func (p *P256Point) BytesCompressed() []byte {
+ // This function is outlined to make the allocations inline in the caller
+ // rather than happen on the heap.
+ var out [1 + p256ElementLength]byte
+ return p.bytesCompressed(&out)
+}
+
+func (p *P256Point) bytesCompressed(out *[1 + p256ElementLength]byte) []byte {
+ if p.z.IsZero() == 1 {
+ return append(out[:0], 0)
+ }
+
+ zinv := new(fiat.P256Element).Invert(p.z)
+ x := new(fiat.P256Element).Mul(p.x, zinv)
+ y := new(fiat.P256Element).Mul(p.y, zinv)
+
+ // Encode the sign of the y coordinate (indicated by the least significant
+ // bit) as the encoding type (2 or 3).
+ buf := append(out[:0], 2)
+ buf[0] |= y.Bytes()[p256ElementLength-1] & 1
+ buf = append(buf, x.Bytes()...)
+ return buf
+}
+
+// Add sets q = p1 + p2, and returns q. The points may overlap.
+func (q *P256Point) Add(p1, p2 *P256Point) *P256Point {
+ // Complete addition formula for a = -3 from "Complete addition formulas for
+ // prime order elliptic curves" (https://eprint.iacr.org/2015/1060), §A.2.
+
+ t0 := new(fiat.P256Element).Mul(p1.x, p2.x) // t0 := X1 * X2
+ t1 := new(fiat.P256Element).Mul(p1.y, p2.y) // t1 := Y1 * Y2
+ t2 := new(fiat.P256Element).Mul(p1.z, p2.z) // t2 := Z1 * Z2
+ t3 := new(fiat.P256Element).Add(p1.x, p1.y) // t3 := X1 + Y1
+ t4 := new(fiat.P256Element).Add(p2.x, p2.y) // t4 := X2 + Y2
+ t3.Mul(t3, t4) // t3 := t3 * t4
+ t4.Add(t0, t1) // t4 := t0 + t1
+ t3.Sub(t3, t4) // t3 := t3 - t4
+ t4.Add(p1.y, p1.z) // t4 := Y1 + Z1
+ x3 := new(fiat.P256Element).Add(p2.y, p2.z) // X3 := Y2 + Z2
+ t4.Mul(t4, x3) // t4 := t4 * X3
+ x3.Add(t1, t2) // X3 := t1 + t2
+ t4.Sub(t4, x3) // t4 := t4 - X3
+ x3.Add(p1.x, p1.z) // X3 := X1 + Z1
+ y3 := new(fiat.P256Element).Add(p2.x, p2.z) // Y3 := X2 + Z2
+ x3.Mul(x3, y3) // X3 := X3 * Y3
+ y3.Add(t0, t2) // Y3 := t0 + t2
+ y3.Sub(x3, y3) // Y3 := X3 - Y3
+ z3 := new(fiat.P256Element).Mul(p256B(), t2) // Z3 := b * t2
+ x3.Sub(y3, z3) // X3 := Y3 - Z3
+ z3.Add(x3, x3) // Z3 := X3 + X3
+ x3.Add(x3, z3) // X3 := X3 + Z3
+ z3.Sub(t1, x3) // Z3 := t1 - X3
+ x3.Add(t1, x3) // X3 := t1 + X3
+ y3.Mul(p256B(), y3) // Y3 := b * Y3
+ t1.Add(t2, t2) // t1 := t2 + t2
+ t2.Add(t1, t2) // t2 := t1 + t2
+ y3.Sub(y3, t2) // Y3 := Y3 - t2
+ y3.Sub(y3, t0) // Y3 := Y3 - t0
+ t1.Add(y3, y3) // t1 := Y3 + Y3
+ y3.Add(t1, y3) // Y3 := t1 + Y3
+ t1.Add(t0, t0) // t1 := t0 + t0
+ t0.Add(t1, t0) // t0 := t1 + t0
+ t0.Sub(t0, t2) // t0 := t0 - t2
+ t1.Mul(t4, y3) // t1 := t4 * Y3
+ t2.Mul(t0, y3) // t2 := t0 * Y3
+ y3.Mul(x3, z3) // Y3 := X3 * Z3
+ y3.Add(y3, t2) // Y3 := Y3 + t2
+ x3.Mul(t3, x3) // X3 := t3 * X3
+ x3.Sub(x3, t1) // X3 := X3 - t1
+ z3.Mul(t4, z3) // Z3 := t4 * Z3
+ t1.Mul(t3, t0) // t1 := t3 * t0
+ z3.Add(z3, t1) // Z3 := Z3 + t1
+
+ q.x.Set(x3)
+ q.y.Set(y3)
+ q.z.Set(z3)
+ return q
+}
+
+// Double sets q = p + p, and returns q. The points may overlap.
+func (q *P256Point) Double(p *P256Point) *P256Point {
+ // Complete addition formula for a = -3 from "Complete addition formulas for
+ // prime order elliptic curves" (https://eprint.iacr.org/2015/1060), §A.2.
+
+ t0 := new(fiat.P256Element).Square(p.x) // t0 := X ^ 2
+ t1 := new(fiat.P256Element).Square(p.y) // t1 := Y ^ 2
+ t2 := new(fiat.P256Element).Square(p.z) // t2 := Z ^ 2
+ t3 := new(fiat.P256Element).Mul(p.x, p.y) // t3 := X * Y
+ t3.Add(t3, t3) // t3 := t3 + t3
+ z3 := new(fiat.P256Element).Mul(p.x, p.z) // Z3 := X * Z
+ z3.Add(z3, z3) // Z3 := Z3 + Z3
+ y3 := new(fiat.P256Element).Mul(p256B(), t2) // Y3 := b * t2
+ y3.Sub(y3, z3) // Y3 := Y3 - Z3
+ x3 := new(fiat.P256Element).Add(y3, y3) // X3 := Y3 + Y3
+ y3.Add(x3, y3) // Y3 := X3 + Y3
+ x3.Sub(t1, y3) // X3 := t1 - Y3
+ y3.Add(t1, y3) // Y3 := t1 + Y3
+ y3.Mul(x3, y3) // Y3 := X3 * Y3
+ x3.Mul(x3, t3) // X3 := X3 * t3
+ t3.Add(t2, t2) // t3 := t2 + t2
+ t2.Add(t2, t3) // t2 := t2 + t3
+ z3.Mul(p256B(), z3) // Z3 := b * Z3
+ z3.Sub(z3, t2) // Z3 := Z3 - t2
+ z3.Sub(z3, t0) // Z3 := Z3 - t0
+ t3.Add(z3, z3) // t3 := Z3 + Z3
+ z3.Add(z3, t3) // Z3 := Z3 + t3
+ t3.Add(t0, t0) // t3 := t0 + t0
+ t0.Add(t3, t0) // t0 := t3 + t0
+ t0.Sub(t0, t2) // t0 := t0 - t2
+ t0.Mul(t0, z3) // t0 := t0 * Z3
+ y3.Add(y3, t0) // Y3 := Y3 + t0
+ t0.Mul(p.y, p.z) // t0 := Y * Z
+ t0.Add(t0, t0) // t0 := t0 + t0
+ z3.Mul(t0, z3) // Z3 := t0 * Z3
+ x3.Sub(x3, z3) // X3 := X3 - Z3
+ z3.Mul(t0, t1) // Z3 := t0 * t1
+ z3.Add(z3, z3) // Z3 := Z3 + Z3
+ z3.Add(z3, z3) // Z3 := Z3 + Z3
+
+ q.x.Set(x3)
+ q.y.Set(y3)
+ q.z.Set(z3)
+ return q
+}
+
+// Select sets q to p1 if cond == 1, and to p2 if cond == 0.
+func (q *P256Point) Select(p1, p2 *P256Point, cond int) *P256Point {
+ q.x.Select(p1.x, p2.x, cond)
+ q.y.Select(p1.y, p2.y, cond)
+ q.z.Select(p1.z, p2.z, cond)
+ return q
+}
+
+// A p256Table holds the first 15 multiples of a point at offset -1, so [1]P
+// is at table[0], [15]P is at table[14], and [0]P is implicitly the identity
+// point.
+type p256Table [15]*P256Point
+
+// Select selects the n-th multiple of the table base point into p. It works in
+// constant time by iterating over every entry of the table. n must be in [0, 15].
+func (table *p256Table) Select(p *P256Point, n uint8) {
+ if n >= 16 {
+ panic("nistec: internal error: p256Table called with out-of-bounds value")
+ }
+ p.Set(NewP256Point())
+ for i := uint8(1); i < 16; i++ {
+ cond := subtle.ConstantTimeByteEq(i, n)
+ p.Select(table[i-1], p, cond)
+ }
+}
+
+// ScalarMult sets p = scalar * q, and returns p.
+func (p *P256Point) ScalarMult(q *P256Point, scalar []byte) (*P256Point, error) {
+ // Compute a p256Table for the base point q. The explicit NewP256Point
+ // calls get inlined, letting the allocations live on the stack.
+ var table = p256Table{NewP256Point(), NewP256Point(), NewP256Point(),
+ NewP256Point(), NewP256Point(), NewP256Point(), NewP256Point(),
+ NewP256Point(), NewP256Point(), NewP256Point(), NewP256Point(),
+ NewP256Point(), NewP256Point(), NewP256Point(), NewP256Point()}
+ table[0].Set(q)
+ for i := 1; i < 15; i += 2 {
+ table[i].Double(table[i/2])
+ table[i+1].Add(table[i], q)
+ }
+
+ // Instead of doing the classic double-and-add chain, we do it with a
+ // four-bit window: we double four times, and then add [0-15]P.
+ t := NewP256Point()
+ p.Set(NewP256Point())
+ for i, byte := range scalar {
+ // No need to double on the first iteration, as p is the identity at
+ // this point, and [N]∞ = ∞.
+ if i != 0 {
+ p.Double(p)
+ p.Double(p)
+ p.Double(p)
+ p.Double(p)
+ }
+
+ windowValue := byte >> 4
+ table.Select(t, windowValue)
+ p.Add(p, t)
+
+ p.Double(p)
+ p.Double(p)
+ p.Double(p)
+ p.Double(p)
+
+ windowValue = byte & 0b1111
+ table.Select(t, windowValue)
+ p.Add(p, t)
+ }
+
+ return p, nil
+}
+
+var p256GeneratorTable *[p256ElementLength * 2]p256Table
+var p256GeneratorTableOnce sync.Once
+
+// generatorTable returns a sequence of p256Tables. The first table contains
+// multiples of G. Each successive table is the previous table doubled four
+// times.
+func (p *P256Point) generatorTable() *[p256ElementLength * 2]p256Table {
+ p256GeneratorTableOnce.Do(func() {
+ p256GeneratorTable = new([p256ElementLength * 2]p256Table)
+ base := NewP256Point().SetGenerator()
+ for i := 0; i < p256ElementLength*2; i++ {
+ p256GeneratorTable[i][0] = NewP256Point().Set(base)
+ for j := 1; j < 15; j++ {
+ p256GeneratorTable[i][j] = NewP256Point().Add(p256GeneratorTable[i][j-1], base)
+ }
+ base.Double(base)
+ base.Double(base)
+ base.Double(base)
+ base.Double(base)
+ }
+ })
+ return p256GeneratorTable
+}
+
+// ScalarBaseMult sets p = scalar * B, where B is the canonical generator, and
+// returns p.
+func (p *P256Point) ScalarBaseMult(scalar []byte) (*P256Point, error) {
+ if len(scalar) != p256ElementLength {
+ return nil, errors.New("invalid scalar length")
+ }
+ tables := p.generatorTable()
+
+ // This is also a scalar multiplication with a four-bit window like in
+ // ScalarMult, but in this case the doublings are precomputed. The value
+ // [windowValue]G added at iteration k would normally get doubled
+ // (totIterations-k)×4 times, but with a larger precomputation we can
+ // instead add [2^((totIterations-k)×4)][windowValue]G and avoid the
+ // doublings between iterations.
+ t := NewP256Point()
+ p.Set(NewP256Point())
+ tableIndex := len(tables) - 1
+ for _, byte := range scalar {
+ windowValue := byte >> 4
+ tables[tableIndex].Select(t, windowValue)
+ p.Add(p, t)
+ tableIndex--
+
+ windowValue = byte & 0b1111
+ tables[tableIndex].Select(t, windowValue)
+ p.Add(p, t)
+ tableIndex--
+ }
+
+ return p, nil
+}
+
+// p256Sqrt sets e to a square root of x. If x is not a square, p256Sqrt returns
+// false and e is unchanged. e and x can overlap.
+func p256Sqrt(e, x *fiat.P256Element) (isSquare bool) {
+ candidate := new(fiat.P256Element)
+ p256SqrtCandidate(candidate, x)
+ square := new(fiat.P256Element).Square(candidate)
+ if square.Equal(x) != 1 {
+ return false
+ }
+ e.Set(candidate)
+ return true
+}
+
+// p256SqrtCandidate sets z to a square root candidate for x. z and x must not overlap.
+func p256SqrtCandidate(z, x *fiat.P256Element) {
+ // Since p = 3 mod 4, exponentiation by (p + 1) / 4 yields a square root candidate.
+ //
+ // The sequence of 7 multiplications and 253 squarings is derived from the
+ // following addition chain generated with github.com/mmcloughlin/addchain v0.4.0.
+ //
+ // _10 = 2*1
+ // _11 = 1 + _10
+ // _1100 = _11 << 2
+ // _1111 = _11 + _1100
+ // _11110000 = _1111 << 4
+ // _11111111 = _1111 + _11110000
+ // x16 = _11111111 << 8 + _11111111
+ // x32 = x16 << 16 + x16
+ // return ((x32 << 32 + 1) << 96 + 1) << 94
+ //
+ var t0 = new(fiat.P256Element)
+
+ z.Square(x)
+ z.Mul(x, z)
+ t0.Square(z)
+ for s := 1; s < 2; s++ {
+ t0.Square(t0)
+ }
+ z.Mul(z, t0)
+ t0.Square(z)
+ for s := 1; s < 4; s++ {
+ t0.Square(t0)
+ }
+ z.Mul(z, t0)
+ t0.Square(z)
+ for s := 1; s < 8; s++ {
+ t0.Square(t0)
+ }
+ z.Mul(z, t0)
+ t0.Square(z)
+ for s := 1; s < 16; s++ {
+ t0.Square(t0)
+ }
+ z.Mul(z, t0)
+ for s := 0; s < 32; s++ {
+ z.Square(z)
+ }
+ z.Mul(x, z)
+ for s := 0; s < 96; s++ {
+ z.Square(z)
+ }
+ z.Mul(x, z)
+ for s := 0; s < 94; s++ {
+ z.Square(z)
+ }
+}
diff --git a/src/crypto/internal/nistec/p256_asm.go b/src/crypto/internal/nistec/p256_asm.go
new file mode 100644
index 0000000..99a22b8
--- /dev/null
+++ b/src/crypto/internal/nistec/p256_asm.go
@@ -0,0 +1,744 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file contains the Go wrapper for the constant-time, 64-bit assembly
+// implementation of P256. The optimizations performed here are described in
+// detail in:
+// S.Gueron and V.Krasnov, "Fast prime field elliptic-curve cryptography with
+// 256-bit primes"
+// https://link.springer.com/article/10.1007%2Fs13389-014-0090-x
+// https://eprint.iacr.org/2013/816.pdf
+
+//go:build amd64 || arm64 || ppc64le || s390x
+
+package nistec
+
+import (
+ _ "embed"
+ "encoding/binary"
+ "errors"
+ "math/bits"
+ "runtime"
+ "unsafe"
+)
+
+// p256Element is a P-256 base field element in [0, P-1] in the Montgomery
+// domain (with R 2²⁵⁶) as four limbs in little-endian order value.
+type p256Element [4]uint64
+
+// p256One is one in the Montgomery domain.
+var p256One = p256Element{0x0000000000000001, 0xffffffff00000000,
+ 0xffffffffffffffff, 0x00000000fffffffe}
+
+var p256Zero = p256Element{}
+
+// p256P is 2²⁵⁶ - 2²²⁴ + 2¹⁹² + 2⁹⁶ - 1 in the Montgomery domain.
+var p256P = p256Element{0xffffffffffffffff, 0x00000000ffffffff,
+ 0x0000000000000000, 0xffffffff00000001}
+
+// P256Point is a P-256 point. The zero value should not be assumed to be valid
+// (although it is in this implementation).
+type P256Point struct {
+ // (X:Y:Z) are Jacobian coordinates where x = X/Z² and y = Y/Z³. The point
+ // at infinity can be represented by any set of coordinates with Z = 0.
+ x, y, z p256Element
+}
+
+// NewP256Point returns a new P256Point representing the point at infinity.
+func NewP256Point() *P256Point {
+ return &P256Point{
+ x: p256One, y: p256One, z: p256Zero,
+ }
+}
+
+// SetGenerator sets p to the canonical generator and returns p.
+func (p *P256Point) SetGenerator() *P256Point {
+ p.x = p256Element{0x79e730d418a9143c, 0x75ba95fc5fedb601,
+ 0x79fb732b77622510, 0x18905f76a53755c6}
+ p.y = p256Element{0xddf25357ce95560a, 0x8b4ab8e4ba19e45c,
+ 0xd2e88688dd21f325, 0x8571ff1825885d85}
+ p.z = p256One
+ return p
+}
+
+// Set sets p = q and returns p.
+func (p *P256Point) Set(q *P256Point) *P256Point {
+ p.x, p.y, p.z = q.x, q.y, q.z
+ return p
+}
+
+const p256ElementLength = 32
+const p256UncompressedLength = 1 + 2*p256ElementLength
+const p256CompressedLength = 1 + p256ElementLength
+
+// SetBytes sets p to the compressed, uncompressed, or infinity value encoded in
+// b, as specified in SEC 1, Version 2.0, Section 2.3.4. If the point is not on
+// the curve, it returns nil and an error, and the receiver is unchanged.
+// Otherwise, it returns p.
+func (p *P256Point) SetBytes(b []byte) (*P256Point, error) {
+ // p256Mul operates in the Montgomery domain with R = 2²⁵⁶ mod p. Thus rr
+ // here is R in the Montgomery domain, or R×R mod p. See comment in
+ // P256OrdInverse about how this is used.
+ rr := p256Element{0x0000000000000003, 0xfffffffbffffffff,
+ 0xfffffffffffffffe, 0x00000004fffffffd}
+
+ switch {
+ // Point at infinity.
+ case len(b) == 1 && b[0] == 0:
+ return p.Set(NewP256Point()), nil
+
+ // Uncompressed form.
+ case len(b) == p256UncompressedLength && b[0] == 4:
+ var r P256Point
+ p256BigToLittle(&r.x, (*[32]byte)(b[1:33]))
+ p256BigToLittle(&r.y, (*[32]byte)(b[33:65]))
+ if p256LessThanP(&r.x) == 0 || p256LessThanP(&r.y) == 0 {
+ return nil, errors.New("invalid P256 element encoding")
+ }
+ p256Mul(&r.x, &r.x, &rr)
+ p256Mul(&r.y, &r.y, &rr)
+ if err := p256CheckOnCurve(&r.x, &r.y); err != nil {
+ return nil, err
+ }
+ r.z = p256One
+ return p.Set(&r), nil
+
+ // Compressed form.
+ case len(b) == p256CompressedLength && (b[0] == 2 || b[0] == 3):
+ var r P256Point
+ p256BigToLittle(&r.x, (*[32]byte)(b[1:33]))
+ if p256LessThanP(&r.x) == 0 {
+ return nil, errors.New("invalid P256 element encoding")
+ }
+ p256Mul(&r.x, &r.x, &rr)
+
+ // y² = x³ - 3x + b
+ p256Polynomial(&r.y, &r.x)
+ if !p256Sqrt(&r.y, &r.y) {
+ return nil, errors.New("invalid P256 compressed point encoding")
+ }
+
+ // Select the positive or negative root, as indicated by the least
+ // significant bit, based on the encoding type byte.
+ yy := new(p256Element)
+ p256FromMont(yy, &r.y)
+ cond := int(yy[0]&1) ^ int(b[0]&1)
+ p256NegCond(&r.y, cond)
+
+ r.z = p256One
+ return p.Set(&r), nil
+
+ default:
+ return nil, errors.New("invalid P256 point encoding")
+ }
+}
+
+// p256Polynomial sets y2 to x³ - 3x + b, and returns y2.
+func p256Polynomial(y2, x *p256Element) *p256Element {
+ x3 := new(p256Element)
+ p256Sqr(x3, x, 1)
+ p256Mul(x3, x3, x)
+
+ threeX := new(p256Element)
+ p256Add(threeX, x, x)
+ p256Add(threeX, threeX, x)
+ p256NegCond(threeX, 1)
+
+ p256B := &p256Element{0xd89cdf6229c4bddf, 0xacf005cd78843090,
+ 0xe5a220abf7212ed6, 0xdc30061d04874834}
+
+ p256Add(x3, x3, threeX)
+ p256Add(x3, x3, p256B)
+
+ *y2 = *x3
+ return y2
+}
+
+func p256CheckOnCurve(x, y *p256Element) error {
+ // y² = x³ - 3x + b
+ rhs := p256Polynomial(new(p256Element), x)
+ lhs := new(p256Element)
+ p256Sqr(lhs, y, 1)
+ if p256Equal(lhs, rhs) != 1 {
+ return errors.New("P256 point not on curve")
+ }
+ return nil
+}
+
+// p256LessThanP returns 1 if x < p, and 0 otherwise. Note that a p256Element is
+// not allowed to be equal to or greater than p, so if this function returns 0
+// then x is invalid.
+func p256LessThanP(x *p256Element) int {
+ var b uint64
+ _, b = bits.Sub64(x[0], p256P[0], b)
+ _, b = bits.Sub64(x[1], p256P[1], b)
+ _, b = bits.Sub64(x[2], p256P[2], b)
+ _, b = bits.Sub64(x[3], p256P[3], b)
+ return int(b)
+}
+
+// p256Add sets res = x + y.
+func p256Add(res, x, y *p256Element) {
+ var c, b uint64
+ t1 := make([]uint64, 4)
+ t1[0], c = bits.Add64(x[0], y[0], 0)
+ t1[1], c = bits.Add64(x[1], y[1], c)
+ t1[2], c = bits.Add64(x[2], y[2], c)
+ t1[3], c = bits.Add64(x[3], y[3], c)
+ t2 := make([]uint64, 4)
+ t2[0], b = bits.Sub64(t1[0], p256P[0], 0)
+ t2[1], b = bits.Sub64(t1[1], p256P[1], b)
+ t2[2], b = bits.Sub64(t1[2], p256P[2], b)
+ t2[3], b = bits.Sub64(t1[3], p256P[3], b)
+ // Three options:
+ // - a+b < p
+ // then c is 0, b is 1, and t1 is correct
+ // - p <= a+b < 2^256
+ // then c is 0, b is 0, and t2 is correct
+ // - 2^256 <= a+b
+ // then c is 1, b is 1, and t2 is correct
+ t2Mask := (c ^ b) - 1
+ res[0] = (t1[0] & ^t2Mask) | (t2[0] & t2Mask)
+ res[1] = (t1[1] & ^t2Mask) | (t2[1] & t2Mask)
+ res[2] = (t1[2] & ^t2Mask) | (t2[2] & t2Mask)
+ res[3] = (t1[3] & ^t2Mask) | (t2[3] & t2Mask)
+}
+
+// p256Sqrt sets e to a square root of x. If x is not a square, p256Sqrt returns
+// false and e is unchanged. e and x can overlap.
+func p256Sqrt(e, x *p256Element) (isSquare bool) {
+ t0, t1 := new(p256Element), new(p256Element)
+
+ // Since p = 3 mod 4, exponentiation by (p + 1) / 4 yields a square root candidate.
+ //
+ // The sequence of 7 multiplications and 253 squarings is derived from the
+ // following addition chain generated with github.com/mmcloughlin/addchain v0.4.0.
+ //
+ // _10 = 2*1
+ // _11 = 1 + _10
+ // _1100 = _11 << 2
+ // _1111 = _11 + _1100
+ // _11110000 = _1111 << 4
+ // _11111111 = _1111 + _11110000
+ // x16 = _11111111 << 8 + _11111111
+ // x32 = x16 << 16 + x16
+ // return ((x32 << 32 + 1) << 96 + 1) << 94
+ //
+ p256Sqr(t0, x, 1)
+ p256Mul(t0, x, t0)
+ p256Sqr(t1, t0, 2)
+ p256Mul(t0, t0, t1)
+ p256Sqr(t1, t0, 4)
+ p256Mul(t0, t0, t1)
+ p256Sqr(t1, t0, 8)
+ p256Mul(t0, t0, t1)
+ p256Sqr(t1, t0, 16)
+ p256Mul(t0, t0, t1)
+ p256Sqr(t0, t0, 32)
+ p256Mul(t0, x, t0)
+ p256Sqr(t0, t0, 96)
+ p256Mul(t0, x, t0)
+ p256Sqr(t0, t0, 94)
+
+ p256Sqr(t1, t0, 1)
+ if p256Equal(t1, x) != 1 {
+ return false
+ }
+ *e = *t0
+ return true
+}
+
+// The following assembly functions are implemented in p256_asm_*.s
+
+// Montgomery multiplication. Sets res = in1 * in2 * R⁻¹ mod p.
+//
+//go:noescape
+func p256Mul(res, in1, in2 *p256Element)
+
+// Montgomery square, repeated n times (n >= 1).
+//
+//go:noescape
+func p256Sqr(res, in *p256Element, n int)
+
+// Montgomery multiplication by R⁻¹, or 1 outside the domain.
+// Sets res = in * R⁻¹, bringing res out of the Montgomery domain.
+//
+//go:noescape
+func p256FromMont(res, in *p256Element)
+
+// If cond is not 0, sets val = -val mod p.
+//
+//go:noescape
+func p256NegCond(val *p256Element, cond int)
+
+// If cond is 0, sets res = b, otherwise sets res = a.
+//
+//go:noescape
+func p256MovCond(res, a, b *P256Point, cond int)
+
+//go:noescape
+func p256BigToLittle(res *p256Element, in *[32]byte)
+
+//go:noescape
+func p256LittleToBig(res *[32]byte, in *p256Element)
+
+//go:noescape
+func p256OrdBigToLittle(res *p256OrdElement, in *[32]byte)
+
+//go:noescape
+func p256OrdLittleToBig(res *[32]byte, in *p256OrdElement)
+
+// p256Table is a table of the first 16 multiples of a point. Points are stored
+// at an index offset of -1 so [8]P is at index 7, P is at 0, and [16]P is at 15.
+// [0]P is the point at infinity and it's not stored.
+type p256Table [16]P256Point
+
+// p256Select sets res to the point at index idx in the table.
+// idx must be in [0, 15]. It executes in constant time.
+//
+//go:noescape
+func p256Select(res *P256Point, table *p256Table, idx int)
+
+// p256AffinePoint is a point in affine coordinates (x, y). x and y are still
+// Montgomery domain elements. The point can't be the point at infinity.
+type p256AffinePoint struct {
+ x, y p256Element
+}
+
+// p256AffineTable is a table of the first 32 multiples of a point. Points are
+// stored at an index offset of -1 like in p256Table, and [0]P is not stored.
+type p256AffineTable [32]p256AffinePoint
+
+// p256Precomputed is a series of precomputed multiples of G, the canonical
+// generator. The first p256AffineTable contains multiples of G. The second one
+// multiples of [2⁶]G, the third one of [2¹²]G, and so on, where each successive
+// table is the previous table doubled six times. Six is the width of the
+// sliding window used in p256ScalarMult, and having each table already
+// pre-doubled lets us avoid the doublings between windows entirely. This table
+// MUST NOT be modified, as it aliases into p256PrecomputedEmbed below.
+var p256Precomputed *[43]p256AffineTable
+
+//go:embed p256_asm_table.bin
+var p256PrecomputedEmbed string
+
+func init() {
+ p256PrecomputedPtr := (*unsafe.Pointer)(unsafe.Pointer(&p256PrecomputedEmbed))
+ if runtime.GOARCH == "s390x" {
+ var newTable [43 * 32 * 2 * 4]uint64
+ for i, x := range (*[43 * 32 * 2 * 4][8]byte)(*p256PrecomputedPtr) {
+ newTable[i] = binary.LittleEndian.Uint64(x[:])
+ }
+ newTablePtr := unsafe.Pointer(&newTable)
+ p256PrecomputedPtr = &newTablePtr
+ }
+ p256Precomputed = (*[43]p256AffineTable)(*p256PrecomputedPtr)
+}
+
+// p256SelectAffine sets res to the point at index idx in the table.
+// idx must be in [0, 31]. It executes in constant time.
+//
+//go:noescape
+func p256SelectAffine(res *p256AffinePoint, table *p256AffineTable, idx int)
+
+// Point addition with an affine point and constant time conditions.
+// If zero is 0, sets res = in2. If sel is 0, sets res = in1.
+// If sign is not 0, sets res = in1 + -in2. Otherwise, sets res = in1 + in2
+//
+//go:noescape
+func p256PointAddAffineAsm(res, in1 *P256Point, in2 *p256AffinePoint, sign, sel, zero int)
+
+// Point addition. Sets res = in1 + in2. Returns one if the two input points
+// were equal and zero otherwise. If in1 or in2 are the point at infinity, res
+// and the return value are undefined.
+//
+//go:noescape
+func p256PointAddAsm(res, in1, in2 *P256Point) int
+
+// Point doubling. Sets res = in + in. in can be the point at infinity.
+//
+//go:noescape
+func p256PointDoubleAsm(res, in *P256Point)
+
+// p256OrdElement is a P-256 scalar field element in [0, ord(G)-1] in the
+// Montgomery domain (with R 2²⁵⁶) as four uint64 limbs in little-endian order.
+type p256OrdElement [4]uint64
+
+// p256OrdReduce ensures s is in the range [0, ord(G)-1].
+func p256OrdReduce(s *p256OrdElement) {
+ // Since 2 * ord(G) > 2²⁵⁶, we can just conditionally subtract ord(G),
+ // keeping the result if it doesn't underflow.
+ t0, b := bits.Sub64(s[0], 0xf3b9cac2fc632551, 0)
+ t1, b := bits.Sub64(s[1], 0xbce6faada7179e84, b)
+ t2, b := bits.Sub64(s[2], 0xffffffffffffffff, b)
+ t3, b := bits.Sub64(s[3], 0xffffffff00000000, b)
+ tMask := b - 1 // zero if subtraction underflowed
+ s[0] ^= (t0 ^ s[0]) & tMask
+ s[1] ^= (t1 ^ s[1]) & tMask
+ s[2] ^= (t2 ^ s[2]) & tMask
+ s[3] ^= (t3 ^ s[3]) & tMask
+}
+
+// Add sets q = p1 + p2, and returns q. The points may overlap.
+func (q *P256Point) Add(r1, r2 *P256Point) *P256Point {
+ var sum, double P256Point
+ r1IsInfinity := r1.isInfinity()
+ r2IsInfinity := r2.isInfinity()
+ pointsEqual := p256PointAddAsm(&sum, r1, r2)
+ p256PointDoubleAsm(&double, r1)
+ p256MovCond(&sum, &double, &sum, pointsEqual)
+ p256MovCond(&sum, r1, &sum, r2IsInfinity)
+ p256MovCond(&sum, r2, &sum, r1IsInfinity)
+ return q.Set(&sum)
+}
+
+// Double sets q = p + p, and returns q. The points may overlap.
+func (q *P256Point) Double(p *P256Point) *P256Point {
+ var double P256Point
+ p256PointDoubleAsm(&double, p)
+ return q.Set(&double)
+}
+
+// ScalarBaseMult sets r = scalar * generator, where scalar is a 32-byte big
+// endian value, and returns r. If scalar is not 32 bytes long, ScalarBaseMult
+// returns an error and the receiver is unchanged.
+func (r *P256Point) ScalarBaseMult(scalar []byte) (*P256Point, error) {
+ if len(scalar) != 32 {
+ return nil, errors.New("invalid scalar length")
+ }
+ scalarReversed := new(p256OrdElement)
+ p256OrdBigToLittle(scalarReversed, (*[32]byte)(scalar))
+ p256OrdReduce(scalarReversed)
+
+ r.p256BaseMult(scalarReversed)
+ return r, nil
+}
+
+// ScalarMult sets r = scalar * q, where scalar is a 32-byte big endian value,
+// and returns r. If scalar is not 32 bytes long, ScalarBaseMult returns an
+// error and the receiver is unchanged.
+func (r *P256Point) ScalarMult(q *P256Point, scalar []byte) (*P256Point, error) {
+ if len(scalar) != 32 {
+ return nil, errors.New("invalid scalar length")
+ }
+ scalarReversed := new(p256OrdElement)
+ p256OrdBigToLittle(scalarReversed, (*[32]byte)(scalar))
+ p256OrdReduce(scalarReversed)
+
+ r.Set(q).p256ScalarMult(scalarReversed)
+ return r, nil
+}
+
+// uint64IsZero returns 1 if x is zero and zero otherwise.
+func uint64IsZero(x uint64) int {
+ x = ^x
+ x &= x >> 32
+ x &= x >> 16
+ x &= x >> 8
+ x &= x >> 4
+ x &= x >> 2
+ x &= x >> 1
+ return int(x & 1)
+}
+
+// p256Equal returns 1 if a and b are equal and 0 otherwise.
+func p256Equal(a, b *p256Element) int {
+ var acc uint64
+ for i := range a {
+ acc |= a[i] ^ b[i]
+ }
+ return uint64IsZero(acc)
+}
+
+// isInfinity returns 1 if p is the point at infinity and 0 otherwise.
+func (p *P256Point) isInfinity() int {
+ return p256Equal(&p.z, &p256Zero)
+}
+
+// Bytes returns the uncompressed or infinity encoding of p, as specified in
+// SEC 1, Version 2.0, Section 2.3.3. Note that the encoding of the point at
+// infinity is shorter than all other encodings.
+func (p *P256Point) Bytes() []byte {
+ // This function is outlined to make the allocations inline in the caller
+ // rather than happen on the heap.
+ var out [p256UncompressedLength]byte
+ return p.bytes(&out)
+}
+
+func (p *P256Point) bytes(out *[p256UncompressedLength]byte) []byte {
+ // The proper representation of the point at infinity is a single zero byte.
+ if p.isInfinity() == 1 {
+ return append(out[:0], 0)
+ }
+
+ x, y := new(p256Element), new(p256Element)
+ p.affineFromMont(x, y)
+
+ out[0] = 4 // Uncompressed form.
+ p256LittleToBig((*[32]byte)(out[1:33]), x)
+ p256LittleToBig((*[32]byte)(out[33:65]), y)
+
+ return out[:]
+}
+
+// affineFromMont sets (x, y) to the affine coordinates of p, converted out of the
+// Montgomery domain.
+func (p *P256Point) affineFromMont(x, y *p256Element) {
+ p256Inverse(y, &p.z)
+ p256Sqr(x, y, 1)
+ p256Mul(y, y, x)
+
+ p256Mul(x, &p.x, x)
+ p256Mul(y, &p.y, y)
+
+ p256FromMont(x, x)
+ p256FromMont(y, y)
+}
+
+// BytesX returns the encoding of the x-coordinate of p, as specified in SEC 1,
+// Version 2.0, Section 2.3.5, or an error if p is the point at infinity.
+func (p *P256Point) BytesX() ([]byte, error) {
+ // This function is outlined to make the allocations inline in the caller
+ // rather than happen on the heap.
+ var out [p256ElementLength]byte
+ return p.bytesX(&out)
+}
+
+func (p *P256Point) bytesX(out *[p256ElementLength]byte) ([]byte, error) {
+ if p.isInfinity() == 1 {
+ return nil, errors.New("P256 point is the point at infinity")
+ }
+
+ x := new(p256Element)
+ p256Inverse(x, &p.z)
+ p256Sqr(x, x, 1)
+ p256Mul(x, &p.x, x)
+ p256FromMont(x, x)
+ p256LittleToBig((*[32]byte)(out[:]), x)
+
+ return out[:], nil
+}
+
+// BytesCompressed returns the compressed or infinity encoding of p, as
+// specified in SEC 1, Version 2.0, Section 2.3.3. Note that the encoding of the
+// point at infinity is shorter than all other encodings.
+func (p *P256Point) BytesCompressed() []byte {
+ // This function is outlined to make the allocations inline in the caller
+ // rather than happen on the heap.
+ var out [p256CompressedLength]byte
+ return p.bytesCompressed(&out)
+}
+
+func (p *P256Point) bytesCompressed(out *[p256CompressedLength]byte) []byte {
+ if p.isInfinity() == 1 {
+ return append(out[:0], 0)
+ }
+
+ x, y := new(p256Element), new(p256Element)
+ p.affineFromMont(x, y)
+
+ out[0] = 2 | byte(y[0]&1)
+ p256LittleToBig((*[32]byte)(out[1:33]), x)
+
+ return out[:]
+}
+
+// Select sets q to p1 if cond == 1, and to p2 if cond == 0.
+func (q *P256Point) Select(p1, p2 *P256Point, cond int) *P256Point {
+ p256MovCond(q, p1, p2, cond)
+ return q
+}
+
+// p256Inverse sets out to in⁻¹ mod p. If in is zero, out will be zero.
+func p256Inverse(out, in *p256Element) {
+ // Inversion is calculated through exponentiation by p - 2, per Fermat's
+ // little theorem.
+ //
+ // The sequence of 12 multiplications and 255 squarings is derived from the
+ // following addition chain generated with github.com/mmcloughlin/addchain
+ // v0.4.0.
+ //
+ // _10 = 2*1
+ // _11 = 1 + _10
+ // _110 = 2*_11
+ // _111 = 1 + _110
+ // _111000 = _111 << 3
+ // _111111 = _111 + _111000
+ // x12 = _111111 << 6 + _111111
+ // x15 = x12 << 3 + _111
+ // x16 = 2*x15 + 1
+ // x32 = x16 << 16 + x16
+ // i53 = x32 << 15
+ // x47 = x15 + i53
+ // i263 = ((i53 << 17 + 1) << 143 + x47) << 47
+ // return (x47 + i263) << 2 + 1
+ //
+ var z = new(p256Element)
+ var t0 = new(p256Element)
+ var t1 = new(p256Element)
+
+ p256Sqr(z, in, 1)
+ p256Mul(z, in, z)
+ p256Sqr(z, z, 1)
+ p256Mul(z, in, z)
+ p256Sqr(t0, z, 3)
+ p256Mul(t0, z, t0)
+ p256Sqr(t1, t0, 6)
+ p256Mul(t0, t0, t1)
+ p256Sqr(t0, t0, 3)
+ p256Mul(z, z, t0)
+ p256Sqr(t0, z, 1)
+ p256Mul(t0, in, t0)
+ p256Sqr(t1, t0, 16)
+ p256Mul(t0, t0, t1)
+ p256Sqr(t0, t0, 15)
+ p256Mul(z, z, t0)
+ p256Sqr(t0, t0, 17)
+ p256Mul(t0, in, t0)
+ p256Sqr(t0, t0, 143)
+ p256Mul(t0, z, t0)
+ p256Sqr(t0, t0, 47)
+ p256Mul(z, z, t0)
+ p256Sqr(z, z, 2)
+ p256Mul(out, in, z)
+}
+
+func boothW5(in uint) (int, int) {
+ var s uint = ^((in >> 5) - 1)
+ var d uint = (1 << 6) - in - 1
+ d = (d & s) | (in & (^s))
+ d = (d >> 1) + (d & 1)
+ return int(d), int(s & 1)
+}
+
+func boothW6(in uint) (int, int) {
+ var s uint = ^((in >> 6) - 1)
+ var d uint = (1 << 7) - in - 1
+ d = (d & s) | (in & (^s))
+ d = (d >> 1) + (d & 1)
+ return int(d), int(s & 1)
+}
+
+func (p *P256Point) p256BaseMult(scalar *p256OrdElement) {
+ var t0 p256AffinePoint
+
+ wvalue := (scalar[0] << 1) & 0x7f
+ sel, sign := boothW6(uint(wvalue))
+ p256SelectAffine(&t0, &p256Precomputed[0], sel)
+ p.x, p.y, p.z = t0.x, t0.y, p256One
+ p256NegCond(&p.y, sign)
+
+ index := uint(5)
+ zero := sel
+
+ for i := 1; i < 43; i++ {
+ if index < 192 {
+ wvalue = ((scalar[index/64] >> (index % 64)) + (scalar[index/64+1] << (64 - (index % 64)))) & 0x7f
+ } else {
+ wvalue = (scalar[index/64] >> (index % 64)) & 0x7f
+ }
+ index += 6
+ sel, sign = boothW6(uint(wvalue))
+ p256SelectAffine(&t0, &p256Precomputed[i], sel)
+ p256PointAddAffineAsm(p, p, &t0, sign, sel, zero)
+ zero |= sel
+ }
+
+ // If the whole scalar was zero, set to the point at infinity.
+ p256MovCond(p, p, NewP256Point(), zero)
+}
+
+func (p *P256Point) p256ScalarMult(scalar *p256OrdElement) {
+ // precomp is a table of precomputed points that stores powers of p
+ // from p^1 to p^16.
+ var precomp p256Table
+ var t0, t1, t2, t3 P256Point
+
+ // Prepare the table
+ precomp[0] = *p // 1
+
+ p256PointDoubleAsm(&t0, p)
+ p256PointDoubleAsm(&t1, &t0)
+ p256PointDoubleAsm(&t2, &t1)
+ p256PointDoubleAsm(&t3, &t2)
+ precomp[1] = t0 // 2
+ precomp[3] = t1 // 4
+ precomp[7] = t2 // 8
+ precomp[15] = t3 // 16
+
+ p256PointAddAsm(&t0, &t0, p)
+ p256PointAddAsm(&t1, &t1, p)
+ p256PointAddAsm(&t2, &t2, p)
+ precomp[2] = t0 // 3
+ precomp[4] = t1 // 5
+ precomp[8] = t2 // 9
+
+ p256PointDoubleAsm(&t0, &t0)
+ p256PointDoubleAsm(&t1, &t1)
+ precomp[5] = t0 // 6
+ precomp[9] = t1 // 10
+
+ p256PointAddAsm(&t2, &t0, p)
+ p256PointAddAsm(&t1, &t1, p)
+ precomp[6] = t2 // 7
+ precomp[10] = t1 // 11
+
+ p256PointDoubleAsm(&t0, &t0)
+ p256PointDoubleAsm(&t2, &t2)
+ precomp[11] = t0 // 12
+ precomp[13] = t2 // 14
+
+ p256PointAddAsm(&t0, &t0, p)
+ p256PointAddAsm(&t2, &t2, p)
+ precomp[12] = t0 // 13
+ precomp[14] = t2 // 15
+
+ // Start scanning the window from top bit
+ index := uint(254)
+ var sel, sign int
+
+ wvalue := (scalar[index/64] >> (index % 64)) & 0x3f
+ sel, _ = boothW5(uint(wvalue))
+
+ p256Select(p, &precomp, sel)
+ zero := sel
+
+ for index > 4 {
+ index -= 5
+ p256PointDoubleAsm(p, p)
+ p256PointDoubleAsm(p, p)
+ p256PointDoubleAsm(p, p)
+ p256PointDoubleAsm(p, p)
+ p256PointDoubleAsm(p, p)
+
+ if index < 192 {
+ wvalue = ((scalar[index/64] >> (index % 64)) + (scalar[index/64+1] << (64 - (index % 64)))) & 0x3f
+ } else {
+ wvalue = (scalar[index/64] >> (index % 64)) & 0x3f
+ }
+
+ sel, sign = boothW5(uint(wvalue))
+
+ p256Select(&t0, &precomp, sel)
+ p256NegCond(&t0.y, sign)
+ p256PointAddAsm(&t1, p, &t0)
+ p256MovCond(&t1, &t1, p, sel)
+ p256MovCond(p, &t1, &t0, zero)
+ zero |= sel
+ }
+
+ p256PointDoubleAsm(p, p)
+ p256PointDoubleAsm(p, p)
+ p256PointDoubleAsm(p, p)
+ p256PointDoubleAsm(p, p)
+ p256PointDoubleAsm(p, p)
+
+ wvalue = (scalar[0] << 1) & 0x3f
+ sel, sign = boothW5(uint(wvalue))
+
+ p256Select(&t0, &precomp, sel)
+ p256NegCond(&t0.y, sign)
+ p256PointAddAsm(&t1, p, &t0)
+ p256MovCond(&t1, &t1, p, sel)
+ p256MovCond(p, &t1, &t0, zero)
+}
diff --git a/src/crypto/internal/nistec/p256_asm_amd64.s b/src/crypto/internal/nistec/p256_asm_amd64.s
new file mode 100644
index 0000000..84e4cee
--- /dev/null
+++ b/src/crypto/internal/nistec/p256_asm_amd64.s
@@ -0,0 +1,2350 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file contains constant-time, 64-bit assembly implementation of
+// P256. The optimizations performed here are described in detail in:
+// S.Gueron and V.Krasnov, "Fast prime field elliptic-curve cryptography with
+// 256-bit primes"
+// https://link.springer.com/article/10.1007%2Fs13389-014-0090-x
+// https://eprint.iacr.org/2013/816.pdf
+
+#include "textflag.h"
+
+#define res_ptr DI
+#define x_ptr SI
+#define y_ptr CX
+
+#define acc0 R8
+#define acc1 R9
+#define acc2 R10
+#define acc3 R11
+#define acc4 R12
+#define acc5 R13
+#define t0 R14
+#define t1 R15
+
+DATA p256const0<>+0x00(SB)/8, $0x00000000ffffffff
+DATA p256const1<>+0x00(SB)/8, $0xffffffff00000001
+DATA p256ordK0<>+0x00(SB)/8, $0xccd1c8aaee00bc4f
+DATA p256ord<>+0x00(SB)/8, $0xf3b9cac2fc632551
+DATA p256ord<>+0x08(SB)/8, $0xbce6faada7179e84
+DATA p256ord<>+0x10(SB)/8, $0xffffffffffffffff
+DATA p256ord<>+0x18(SB)/8, $0xffffffff00000000
+DATA p256one<>+0x00(SB)/8, $0x0000000000000001
+DATA p256one<>+0x08(SB)/8, $0xffffffff00000000
+DATA p256one<>+0x10(SB)/8, $0xffffffffffffffff
+DATA p256one<>+0x18(SB)/8, $0x00000000fffffffe
+GLOBL p256const0<>(SB), 8, $8
+GLOBL p256const1<>(SB), 8, $8
+GLOBL p256ordK0<>(SB), 8, $8
+GLOBL p256ord<>(SB), 8, $32
+GLOBL p256one<>(SB), 8, $32
+
+/* ---------------------------------------*/
+// func p256OrdLittleToBig(res *[32]byte, in *p256OrdElement)
+TEXT ·p256OrdLittleToBig(SB),NOSPLIT,$0
+ JMP ·p256BigToLittle(SB)
+/* ---------------------------------------*/
+// func p256OrdBigToLittle(res *p256OrdElement, in *[32]byte)
+TEXT ·p256OrdBigToLittle(SB),NOSPLIT,$0
+ JMP ·p256BigToLittle(SB)
+/* ---------------------------------------*/
+// func p256LittleToBig(res *[32]byte, in *p256Element)
+TEXT ·p256LittleToBig(SB),NOSPLIT,$0
+ JMP ·p256BigToLittle(SB)
+/* ---------------------------------------*/
+// func p256BigToLittle(res *p256Element, in *[32]byte)
+TEXT ·p256BigToLittle(SB),NOSPLIT,$0
+ MOVQ res+0(FP), res_ptr
+ MOVQ in+8(FP), x_ptr
+
+ MOVQ (8*0)(x_ptr), acc0
+ MOVQ (8*1)(x_ptr), acc1
+ MOVQ (8*2)(x_ptr), acc2
+ MOVQ (8*3)(x_ptr), acc3
+
+ BSWAPQ acc0
+ BSWAPQ acc1
+ BSWAPQ acc2
+ BSWAPQ acc3
+
+ MOVQ acc3, (8*0)(res_ptr)
+ MOVQ acc2, (8*1)(res_ptr)
+ MOVQ acc1, (8*2)(res_ptr)
+ MOVQ acc0, (8*3)(res_ptr)
+
+ RET
+/* ---------------------------------------*/
+// func p256MovCond(res, a, b *P256Point, cond int)
+TEXT ·p256MovCond(SB),NOSPLIT,$0
+ MOVQ res+0(FP), res_ptr
+ MOVQ a+8(FP), x_ptr
+ MOVQ b+16(FP), y_ptr
+ MOVQ cond+24(FP), X12
+
+ PXOR X13, X13
+ PSHUFD $0, X12, X12
+ PCMPEQL X13, X12
+
+ MOVOU X12, X0
+ MOVOU (16*0)(x_ptr), X6
+ PANDN X6, X0
+ MOVOU X12, X1
+ MOVOU (16*1)(x_ptr), X7
+ PANDN X7, X1
+ MOVOU X12, X2
+ MOVOU (16*2)(x_ptr), X8
+ PANDN X8, X2
+ MOVOU X12, X3
+ MOVOU (16*3)(x_ptr), X9
+ PANDN X9, X3
+ MOVOU X12, X4
+ MOVOU (16*4)(x_ptr), X10
+ PANDN X10, X4
+ MOVOU X12, X5
+ MOVOU (16*5)(x_ptr), X11
+ PANDN X11, X5
+
+ MOVOU (16*0)(y_ptr), X6
+ MOVOU (16*1)(y_ptr), X7
+ MOVOU (16*2)(y_ptr), X8
+ MOVOU (16*3)(y_ptr), X9
+ MOVOU (16*4)(y_ptr), X10
+ MOVOU (16*5)(y_ptr), X11
+
+ PAND X12, X6
+ PAND X12, X7
+ PAND X12, X8
+ PAND X12, X9
+ PAND X12, X10
+ PAND X12, X11
+
+ PXOR X6, X0
+ PXOR X7, X1
+ PXOR X8, X2
+ PXOR X9, X3
+ PXOR X10, X4
+ PXOR X11, X5
+
+ MOVOU X0, (16*0)(res_ptr)
+ MOVOU X1, (16*1)(res_ptr)
+ MOVOU X2, (16*2)(res_ptr)
+ MOVOU X3, (16*3)(res_ptr)
+ MOVOU X4, (16*4)(res_ptr)
+ MOVOU X5, (16*5)(res_ptr)
+
+ RET
+/* ---------------------------------------*/
+// func p256NegCond(val *p256Element, cond int)
+TEXT ·p256NegCond(SB),NOSPLIT,$0
+ MOVQ val+0(FP), res_ptr
+ MOVQ cond+8(FP), t0
+ // acc = poly
+ MOVQ $-1, acc0
+ MOVQ p256const0<>(SB), acc1
+ MOVQ $0, acc2
+ MOVQ p256const1<>(SB), acc3
+ // Load the original value
+ MOVQ (8*0)(res_ptr), acc5
+ MOVQ (8*1)(res_ptr), x_ptr
+ MOVQ (8*2)(res_ptr), y_ptr
+ MOVQ (8*3)(res_ptr), t1
+ // Speculatively subtract
+ SUBQ acc5, acc0
+ SBBQ x_ptr, acc1
+ SBBQ y_ptr, acc2
+ SBBQ t1, acc3
+ // If condition is 0, keep original value
+ TESTQ t0, t0
+ CMOVQEQ acc5, acc0
+ CMOVQEQ x_ptr, acc1
+ CMOVQEQ y_ptr, acc2
+ CMOVQEQ t1, acc3
+ // Store result
+ MOVQ acc0, (8*0)(res_ptr)
+ MOVQ acc1, (8*1)(res_ptr)
+ MOVQ acc2, (8*2)(res_ptr)
+ MOVQ acc3, (8*3)(res_ptr)
+
+ RET
+/* ---------------------------------------*/
+// func p256Sqr(res, in *p256Element, n int)
+TEXT ·p256Sqr(SB),NOSPLIT,$0
+ MOVQ res+0(FP), res_ptr
+ MOVQ in+8(FP), x_ptr
+ MOVQ n+16(FP), BX
+
+sqrLoop:
+
+ // y[1:] * y[0]
+ MOVQ (8*0)(x_ptr), t0
+
+ MOVQ (8*1)(x_ptr), AX
+ MULQ t0
+ MOVQ AX, acc1
+ MOVQ DX, acc2
+
+ MOVQ (8*2)(x_ptr), AX
+ MULQ t0
+ ADDQ AX, acc2
+ ADCQ $0, DX
+ MOVQ DX, acc3
+
+ MOVQ (8*3)(x_ptr), AX
+ MULQ t0
+ ADDQ AX, acc3
+ ADCQ $0, DX
+ MOVQ DX, acc4
+ // y[2:] * y[1]
+ MOVQ (8*1)(x_ptr), t0
+
+ MOVQ (8*2)(x_ptr), AX
+ MULQ t0
+ ADDQ AX, acc3
+ ADCQ $0, DX
+ MOVQ DX, t1
+
+ MOVQ (8*3)(x_ptr), AX
+ MULQ t0
+ ADDQ t1, acc4
+ ADCQ $0, DX
+ ADDQ AX, acc4
+ ADCQ $0, DX
+ MOVQ DX, acc5
+ // y[3] * y[2]
+ MOVQ (8*2)(x_ptr), t0
+
+ MOVQ (8*3)(x_ptr), AX
+ MULQ t0
+ ADDQ AX, acc5
+ ADCQ $0, DX
+ MOVQ DX, y_ptr
+ XORQ t1, t1
+ // *2
+ ADDQ acc1, acc1
+ ADCQ acc2, acc2
+ ADCQ acc3, acc3
+ ADCQ acc4, acc4
+ ADCQ acc5, acc5
+ ADCQ y_ptr, y_ptr
+ ADCQ $0, t1
+ // Missing products
+ MOVQ (8*0)(x_ptr), AX
+ MULQ AX
+ MOVQ AX, acc0
+ MOVQ DX, t0
+
+ MOVQ (8*1)(x_ptr), AX
+ MULQ AX
+ ADDQ t0, acc1
+ ADCQ AX, acc2
+ ADCQ $0, DX
+ MOVQ DX, t0
+
+ MOVQ (8*2)(x_ptr), AX
+ MULQ AX
+ ADDQ t0, acc3
+ ADCQ AX, acc4
+ ADCQ $0, DX
+ MOVQ DX, t0
+
+ MOVQ (8*3)(x_ptr), AX
+ MULQ AX
+ ADDQ t0, acc5
+ ADCQ AX, y_ptr
+ ADCQ DX, t1
+ MOVQ t1, x_ptr
+ // First reduction step
+ MOVQ acc0, AX
+ MOVQ acc0, t1
+ SHLQ $32, acc0
+ MULQ p256const1<>(SB)
+ SHRQ $32, t1
+ ADDQ acc0, acc1
+ ADCQ t1, acc2
+ ADCQ AX, acc3
+ ADCQ $0, DX
+ MOVQ DX, acc0
+ // Second reduction step
+ MOVQ acc1, AX
+ MOVQ acc1, t1
+ SHLQ $32, acc1
+ MULQ p256const1<>(SB)
+ SHRQ $32, t1
+ ADDQ acc1, acc2
+ ADCQ t1, acc3
+ ADCQ AX, acc0
+ ADCQ $0, DX
+ MOVQ DX, acc1
+ // Third reduction step
+ MOVQ acc2, AX
+ MOVQ acc2, t1
+ SHLQ $32, acc2
+ MULQ p256const1<>(SB)
+ SHRQ $32, t1
+ ADDQ acc2, acc3
+ ADCQ t1, acc0
+ ADCQ AX, acc1
+ ADCQ $0, DX
+ MOVQ DX, acc2
+ // Last reduction step
+ XORQ t0, t0
+ MOVQ acc3, AX
+ MOVQ acc3, t1
+ SHLQ $32, acc3
+ MULQ p256const1<>(SB)
+ SHRQ $32, t1
+ ADDQ acc3, acc0
+ ADCQ t1, acc1
+ ADCQ AX, acc2
+ ADCQ $0, DX
+ MOVQ DX, acc3
+ // Add bits [511:256] of the sqr result
+ ADCQ acc4, acc0
+ ADCQ acc5, acc1
+ ADCQ y_ptr, acc2
+ ADCQ x_ptr, acc3
+ ADCQ $0, t0
+
+ MOVQ acc0, acc4
+ MOVQ acc1, acc5
+ MOVQ acc2, y_ptr
+ MOVQ acc3, t1
+ // Subtract p256
+ SUBQ $-1, acc0
+ SBBQ p256const0<>(SB) ,acc1
+ SBBQ $0, acc2
+ SBBQ p256const1<>(SB), acc3
+ SBBQ $0, t0
+
+ CMOVQCS acc4, acc0
+ CMOVQCS acc5, acc1
+ CMOVQCS y_ptr, acc2
+ CMOVQCS t1, acc3
+
+ MOVQ acc0, (8*0)(res_ptr)
+ MOVQ acc1, (8*1)(res_ptr)
+ MOVQ acc2, (8*2)(res_ptr)
+ MOVQ acc3, (8*3)(res_ptr)
+ MOVQ res_ptr, x_ptr
+ DECQ BX
+ JNE sqrLoop
+
+ RET
+/* ---------------------------------------*/
+// func p256Mul(res, in1, in2 *p256Element)
+TEXT ·p256Mul(SB),NOSPLIT,$0
+ MOVQ res+0(FP), res_ptr
+ MOVQ in1+8(FP), x_ptr
+ MOVQ in2+16(FP), y_ptr
+ // x * y[0]
+ MOVQ (8*0)(y_ptr), t0
+
+ MOVQ (8*0)(x_ptr), AX
+ MULQ t0
+ MOVQ AX, acc0
+ MOVQ DX, acc1
+
+ MOVQ (8*1)(x_ptr), AX
+ MULQ t0
+ ADDQ AX, acc1
+ ADCQ $0, DX
+ MOVQ DX, acc2
+
+ MOVQ (8*2)(x_ptr), AX
+ MULQ t0
+ ADDQ AX, acc2
+ ADCQ $0, DX
+ MOVQ DX, acc3
+
+ MOVQ (8*3)(x_ptr), AX
+ MULQ t0
+ ADDQ AX, acc3
+ ADCQ $0, DX
+ MOVQ DX, acc4
+ XORQ acc5, acc5
+ // First reduction step
+ MOVQ acc0, AX
+ MOVQ acc0, t1
+ SHLQ $32, acc0
+ MULQ p256const1<>(SB)
+ SHRQ $32, t1
+ ADDQ acc0, acc1
+ ADCQ t1, acc2
+ ADCQ AX, acc3
+ ADCQ DX, acc4
+ ADCQ $0, acc5
+ XORQ acc0, acc0
+ // x * y[1]
+ MOVQ (8*1)(y_ptr), t0
+
+ MOVQ (8*0)(x_ptr), AX
+ MULQ t0
+ ADDQ AX, acc1
+ ADCQ $0, DX
+ MOVQ DX, t1
+
+ MOVQ (8*1)(x_ptr), AX
+ MULQ t0
+ ADDQ t1, acc2
+ ADCQ $0, DX
+ ADDQ AX, acc2
+ ADCQ $0, DX
+ MOVQ DX, t1
+
+ MOVQ (8*2)(x_ptr), AX
+ MULQ t0
+ ADDQ t1, acc3
+ ADCQ $0, DX
+ ADDQ AX, acc3
+ ADCQ $0, DX
+ MOVQ DX, t1
+
+ MOVQ (8*3)(x_ptr), AX
+ MULQ t0
+ ADDQ t1, acc4
+ ADCQ $0, DX
+ ADDQ AX, acc4
+ ADCQ DX, acc5
+ ADCQ $0, acc0
+ // Second reduction step
+ MOVQ acc1, AX
+ MOVQ acc1, t1
+ SHLQ $32, acc1
+ MULQ p256const1<>(SB)
+ SHRQ $32, t1
+ ADDQ acc1, acc2
+ ADCQ t1, acc3
+ ADCQ AX, acc4
+ ADCQ DX, acc5
+ ADCQ $0, acc0
+ XORQ acc1, acc1
+ // x * y[2]
+ MOVQ (8*2)(y_ptr), t0
+
+ MOVQ (8*0)(x_ptr), AX
+ MULQ t0
+ ADDQ AX, acc2
+ ADCQ $0, DX
+ MOVQ DX, t1
+
+ MOVQ (8*1)(x_ptr), AX
+ MULQ t0
+ ADDQ t1, acc3
+ ADCQ $0, DX
+ ADDQ AX, acc3
+ ADCQ $0, DX
+ MOVQ DX, t1
+
+ MOVQ (8*2)(x_ptr), AX
+ MULQ t0
+ ADDQ t1, acc4
+ ADCQ $0, DX
+ ADDQ AX, acc4
+ ADCQ $0, DX
+ MOVQ DX, t1
+
+ MOVQ (8*3)(x_ptr), AX
+ MULQ t0
+ ADDQ t1, acc5
+ ADCQ $0, DX
+ ADDQ AX, acc5
+ ADCQ DX, acc0
+ ADCQ $0, acc1
+ // Third reduction step
+ MOVQ acc2, AX
+ MOVQ acc2, t1
+ SHLQ $32, acc2
+ MULQ p256const1<>(SB)
+ SHRQ $32, t1
+ ADDQ acc2, acc3
+ ADCQ t1, acc4
+ ADCQ AX, acc5
+ ADCQ DX, acc0
+ ADCQ $0, acc1
+ XORQ acc2, acc2
+ // x * y[3]
+ MOVQ (8*3)(y_ptr), t0
+
+ MOVQ (8*0)(x_ptr), AX
+ MULQ t0
+ ADDQ AX, acc3
+ ADCQ $0, DX
+ MOVQ DX, t1
+
+ MOVQ (8*1)(x_ptr), AX
+ MULQ t0
+ ADDQ t1, acc4
+ ADCQ $0, DX
+ ADDQ AX, acc4
+ ADCQ $0, DX
+ MOVQ DX, t1
+
+ MOVQ (8*2)(x_ptr), AX
+ MULQ t0
+ ADDQ t1, acc5
+ ADCQ $0, DX
+ ADDQ AX, acc5
+ ADCQ $0, DX
+ MOVQ DX, t1
+
+ MOVQ (8*3)(x_ptr), AX
+ MULQ t0
+ ADDQ t1, acc0
+ ADCQ $0, DX
+ ADDQ AX, acc0
+ ADCQ DX, acc1
+ ADCQ $0, acc2
+ // Last reduction step
+ MOVQ acc3, AX
+ MOVQ acc3, t1
+ SHLQ $32, acc3
+ MULQ p256const1<>(SB)
+ SHRQ $32, t1
+ ADDQ acc3, acc4
+ ADCQ t1, acc5
+ ADCQ AX, acc0
+ ADCQ DX, acc1
+ ADCQ $0, acc2
+ // Copy result [255:0]
+ MOVQ acc4, x_ptr
+ MOVQ acc5, acc3
+ MOVQ acc0, t0
+ MOVQ acc1, t1
+ // Subtract p256
+ SUBQ $-1, acc4
+ SBBQ p256const0<>(SB) ,acc5
+ SBBQ $0, acc0
+ SBBQ p256const1<>(SB), acc1
+ SBBQ $0, acc2
+
+ CMOVQCS x_ptr, acc4
+ CMOVQCS acc3, acc5
+ CMOVQCS t0, acc0
+ CMOVQCS t1, acc1
+
+ MOVQ acc4, (8*0)(res_ptr)
+ MOVQ acc5, (8*1)(res_ptr)
+ MOVQ acc0, (8*2)(res_ptr)
+ MOVQ acc1, (8*3)(res_ptr)
+
+ RET
+/* ---------------------------------------*/
+// func p256FromMont(res, in *p256Element)
+TEXT ·p256FromMont(SB),NOSPLIT,$0
+ MOVQ res+0(FP), res_ptr
+ MOVQ in+8(FP), x_ptr
+
+ MOVQ (8*0)(x_ptr), acc0
+ MOVQ (8*1)(x_ptr), acc1
+ MOVQ (8*2)(x_ptr), acc2
+ MOVQ (8*3)(x_ptr), acc3
+ XORQ acc4, acc4
+
+ // Only reduce, no multiplications are needed
+ // First stage
+ MOVQ acc0, AX
+ MOVQ acc0, t1
+ SHLQ $32, acc0
+ MULQ p256const1<>(SB)
+ SHRQ $32, t1
+ ADDQ acc0, acc1
+ ADCQ t1, acc2
+ ADCQ AX, acc3
+ ADCQ DX, acc4
+ XORQ acc5, acc5
+ // Second stage
+ MOVQ acc1, AX
+ MOVQ acc1, t1
+ SHLQ $32, acc1
+ MULQ p256const1<>(SB)
+ SHRQ $32, t1
+ ADDQ acc1, acc2
+ ADCQ t1, acc3
+ ADCQ AX, acc4
+ ADCQ DX, acc5
+ XORQ acc0, acc0
+ // Third stage
+ MOVQ acc2, AX
+ MOVQ acc2, t1
+ SHLQ $32, acc2
+ MULQ p256const1<>(SB)
+ SHRQ $32, t1
+ ADDQ acc2, acc3
+ ADCQ t1, acc4
+ ADCQ AX, acc5
+ ADCQ DX, acc0
+ XORQ acc1, acc1
+ // Last stage
+ MOVQ acc3, AX
+ MOVQ acc3, t1
+ SHLQ $32, acc3
+ MULQ p256const1<>(SB)
+ SHRQ $32, t1
+ ADDQ acc3, acc4
+ ADCQ t1, acc5
+ ADCQ AX, acc0
+ ADCQ DX, acc1
+
+ MOVQ acc4, x_ptr
+ MOVQ acc5, acc3
+ MOVQ acc0, t0
+ MOVQ acc1, t1
+
+ SUBQ $-1, acc4
+ SBBQ p256const0<>(SB), acc5
+ SBBQ $0, acc0
+ SBBQ p256const1<>(SB), acc1
+
+ CMOVQCS x_ptr, acc4
+ CMOVQCS acc3, acc5
+ CMOVQCS t0, acc0
+ CMOVQCS t1, acc1
+
+ MOVQ acc4, (8*0)(res_ptr)
+ MOVQ acc5, (8*1)(res_ptr)
+ MOVQ acc0, (8*2)(res_ptr)
+ MOVQ acc1, (8*3)(res_ptr)
+
+ RET
+/* ---------------------------------------*/
+// func p256Select(res *P256Point, table *p256Table, idx int)
+TEXT ·p256Select(SB),NOSPLIT,$0
+ MOVQ idx+16(FP),AX
+ MOVQ table+8(FP),DI
+ MOVQ res+0(FP),DX
+
+ PXOR X15, X15 // X15 = 0
+ PCMPEQL X14, X14 // X14 = -1
+ PSUBL X14, X15 // X15 = 1
+ MOVL AX, X14
+ PSHUFD $0, X14, X14
+
+ PXOR X0, X0
+ PXOR X1, X1
+ PXOR X2, X2
+ PXOR X3, X3
+ PXOR X4, X4
+ PXOR X5, X5
+ MOVQ $16, AX
+
+ MOVOU X15, X13
+
+loop_select:
+
+ MOVOU X13, X12
+ PADDL X15, X13
+ PCMPEQL X14, X12
+
+ MOVOU (16*0)(DI), X6
+ MOVOU (16*1)(DI), X7
+ MOVOU (16*2)(DI), X8
+ MOVOU (16*3)(DI), X9
+ MOVOU (16*4)(DI), X10
+ MOVOU (16*5)(DI), X11
+ ADDQ $(16*6), DI
+
+ PAND X12, X6
+ PAND X12, X7
+ PAND X12, X8
+ PAND X12, X9
+ PAND X12, X10
+ PAND X12, X11
+
+ PXOR X6, X0
+ PXOR X7, X1
+ PXOR X8, X2
+ PXOR X9, X3
+ PXOR X10, X4
+ PXOR X11, X5
+
+ DECQ AX
+ JNE loop_select
+
+ MOVOU X0, (16*0)(DX)
+ MOVOU X1, (16*1)(DX)
+ MOVOU X2, (16*2)(DX)
+ MOVOU X3, (16*3)(DX)
+ MOVOU X4, (16*4)(DX)
+ MOVOU X5, (16*5)(DX)
+
+ RET
+/* ---------------------------------------*/
+// func p256SelectAffine(res *p256AffinePoint, table *p256AffineTable, idx int)
+TEXT ·p256SelectAffine(SB),NOSPLIT,$0
+ MOVQ idx+16(FP),AX
+ MOVQ table+8(FP),DI
+ MOVQ res+0(FP),DX
+
+ PXOR X15, X15 // X15 = 0
+ PCMPEQL X14, X14 // X14 = -1
+ PSUBL X14, X15 // X15 = 1
+ MOVL AX, X14
+ PSHUFD $0, X14, X14
+
+ PXOR X0, X0
+ PXOR X1, X1
+ PXOR X2, X2
+ PXOR X3, X3
+ MOVQ $16, AX
+
+ MOVOU X15, X13
+
+loop_select_base:
+
+ MOVOU X13, X12
+ PADDL X15, X13
+ PCMPEQL X14, X12
+
+ MOVOU (16*0)(DI), X4
+ MOVOU (16*1)(DI), X5
+ MOVOU (16*2)(DI), X6
+ MOVOU (16*3)(DI), X7
+
+ MOVOU (16*4)(DI), X8
+ MOVOU (16*5)(DI), X9
+ MOVOU (16*6)(DI), X10
+ MOVOU (16*7)(DI), X11
+
+ ADDQ $(16*8), DI
+
+ PAND X12, X4
+ PAND X12, X5
+ PAND X12, X6
+ PAND X12, X7
+
+ MOVOU X13, X12
+ PADDL X15, X13
+ PCMPEQL X14, X12
+
+ PAND X12, X8
+ PAND X12, X9
+ PAND X12, X10
+ PAND X12, X11
+
+ PXOR X4, X0
+ PXOR X5, X1
+ PXOR X6, X2
+ PXOR X7, X3
+
+ PXOR X8, X0
+ PXOR X9, X1
+ PXOR X10, X2
+ PXOR X11, X3
+
+ DECQ AX
+ JNE loop_select_base
+
+ MOVOU X0, (16*0)(DX)
+ MOVOU X1, (16*1)(DX)
+ MOVOU X2, (16*2)(DX)
+ MOVOU X3, (16*3)(DX)
+
+ RET
+/* ---------------------------------------*/
+// func p256OrdMul(res, in1, in2 *p256OrdElement)
+TEXT ·p256OrdMul(SB),NOSPLIT,$0
+ MOVQ res+0(FP), res_ptr
+ MOVQ in1+8(FP), x_ptr
+ MOVQ in2+16(FP), y_ptr
+ // x * y[0]
+ MOVQ (8*0)(y_ptr), t0
+
+ MOVQ (8*0)(x_ptr), AX
+ MULQ t0
+ MOVQ AX, acc0
+ MOVQ DX, acc1
+
+ MOVQ (8*1)(x_ptr), AX
+ MULQ t0
+ ADDQ AX, acc1
+ ADCQ $0, DX
+ MOVQ DX, acc2
+
+ MOVQ (8*2)(x_ptr), AX
+ MULQ t0
+ ADDQ AX, acc2
+ ADCQ $0, DX
+ MOVQ DX, acc3
+
+ MOVQ (8*3)(x_ptr), AX
+ MULQ t0
+ ADDQ AX, acc3
+ ADCQ $0, DX
+ MOVQ DX, acc4
+ XORQ acc5, acc5
+ // First reduction step
+ MOVQ acc0, AX
+ MULQ p256ordK0<>(SB)
+ MOVQ AX, t0
+
+ MOVQ p256ord<>+0x00(SB), AX
+ MULQ t0
+ ADDQ AX, acc0
+ ADCQ $0, DX
+ MOVQ DX, t1
+
+ MOVQ p256ord<>+0x08(SB), AX
+ MULQ t0
+ ADDQ t1, acc1
+ ADCQ $0, DX
+ ADDQ AX, acc1
+ ADCQ $0, DX
+ MOVQ DX, t1
+
+ MOVQ p256ord<>+0x10(SB), AX
+ MULQ t0
+ ADDQ t1, acc2
+ ADCQ $0, DX
+ ADDQ AX, acc2
+ ADCQ $0, DX
+ MOVQ DX, t1
+
+ MOVQ p256ord<>+0x18(SB), AX
+ MULQ t0
+ ADDQ t1, acc3
+ ADCQ $0, DX
+ ADDQ AX, acc3
+ ADCQ DX, acc4
+ ADCQ $0, acc5
+ // x * y[1]
+ MOVQ (8*1)(y_ptr), t0
+
+ MOVQ (8*0)(x_ptr), AX
+ MULQ t0
+ ADDQ AX, acc1
+ ADCQ $0, DX
+ MOVQ DX, t1
+
+ MOVQ (8*1)(x_ptr), AX
+ MULQ t0
+ ADDQ t1, acc2
+ ADCQ $0, DX
+ ADDQ AX, acc2
+ ADCQ $0, DX
+ MOVQ DX, t1
+
+ MOVQ (8*2)(x_ptr), AX
+ MULQ t0
+ ADDQ t1, acc3
+ ADCQ $0, DX
+ ADDQ AX, acc3
+ ADCQ $0, DX
+ MOVQ DX, t1
+
+ MOVQ (8*3)(x_ptr), AX
+ MULQ t0
+ ADDQ t1, acc4
+ ADCQ $0, DX
+ ADDQ AX, acc4
+ ADCQ DX, acc5
+ ADCQ $0, acc0
+ // Second reduction step
+ MOVQ acc1, AX
+ MULQ p256ordK0<>(SB)
+ MOVQ AX, t0
+
+ MOVQ p256ord<>+0x00(SB), AX
+ MULQ t0
+ ADDQ AX, acc1
+ ADCQ $0, DX
+ MOVQ DX, t1
+
+ MOVQ p256ord<>+0x08(SB), AX
+ MULQ t0
+ ADDQ t1, acc2
+ ADCQ $0, DX
+ ADDQ AX, acc2
+ ADCQ $0, DX
+ MOVQ DX, t1
+
+ MOVQ p256ord<>+0x10(SB), AX
+ MULQ t0
+ ADDQ t1, acc3
+ ADCQ $0, DX
+ ADDQ AX, acc3
+ ADCQ $0, DX
+ MOVQ DX, t1
+
+ MOVQ p256ord<>+0x18(SB), AX
+ MULQ t0
+ ADDQ t1, acc4
+ ADCQ $0, DX
+ ADDQ AX, acc4
+ ADCQ DX, acc5
+ ADCQ $0, acc0
+ // x * y[2]
+ MOVQ (8*2)(y_ptr), t0
+
+ MOVQ (8*0)(x_ptr), AX
+ MULQ t0
+ ADDQ AX, acc2
+ ADCQ $0, DX
+ MOVQ DX, t1
+
+ MOVQ (8*1)(x_ptr), AX
+ MULQ t0
+ ADDQ t1, acc3
+ ADCQ $0, DX
+ ADDQ AX, acc3
+ ADCQ $0, DX
+ MOVQ DX, t1
+
+ MOVQ (8*2)(x_ptr), AX
+ MULQ t0
+ ADDQ t1, acc4
+ ADCQ $0, DX
+ ADDQ AX, acc4
+ ADCQ $0, DX
+ MOVQ DX, t1
+
+ MOVQ (8*3)(x_ptr), AX
+ MULQ t0
+ ADDQ t1, acc5
+ ADCQ $0, DX
+ ADDQ AX, acc5
+ ADCQ DX, acc0
+ ADCQ $0, acc1
+ // Third reduction step
+ MOVQ acc2, AX
+ MULQ p256ordK0<>(SB)
+ MOVQ AX, t0
+
+ MOVQ p256ord<>+0x00(SB), AX
+ MULQ t0
+ ADDQ AX, acc2
+ ADCQ $0, DX
+ MOVQ DX, t1
+
+ MOVQ p256ord<>+0x08(SB), AX
+ MULQ t0
+ ADDQ t1, acc3
+ ADCQ $0, DX
+ ADDQ AX, acc3
+ ADCQ $0, DX
+ MOVQ DX, t1
+
+ MOVQ p256ord<>+0x10(SB), AX
+ MULQ t0
+ ADDQ t1, acc4
+ ADCQ $0, DX
+ ADDQ AX, acc4
+ ADCQ $0, DX
+ MOVQ DX, t1
+
+ MOVQ p256ord<>+0x18(SB), AX
+ MULQ t0
+ ADDQ t1, acc5
+ ADCQ $0, DX
+ ADDQ AX, acc5
+ ADCQ DX, acc0
+ ADCQ $0, acc1
+ // x * y[3]
+ MOVQ (8*3)(y_ptr), t0
+
+ MOVQ (8*0)(x_ptr), AX
+ MULQ t0
+ ADDQ AX, acc3
+ ADCQ $0, DX
+ MOVQ DX, t1
+
+ MOVQ (8*1)(x_ptr), AX
+ MULQ t0
+ ADDQ t1, acc4
+ ADCQ $0, DX
+ ADDQ AX, acc4
+ ADCQ $0, DX
+ MOVQ DX, t1
+
+ MOVQ (8*2)(x_ptr), AX
+ MULQ t0
+ ADDQ t1, acc5
+ ADCQ $0, DX
+ ADDQ AX, acc5
+ ADCQ $0, DX
+ MOVQ DX, t1
+
+ MOVQ (8*3)(x_ptr), AX
+ MULQ t0
+ ADDQ t1, acc0
+ ADCQ $0, DX
+ ADDQ AX, acc0
+ ADCQ DX, acc1
+ ADCQ $0, acc2
+ // Last reduction step
+ MOVQ acc3, AX
+ MULQ p256ordK0<>(SB)
+ MOVQ AX, t0
+
+ MOVQ p256ord<>+0x00(SB), AX
+ MULQ t0
+ ADDQ AX, acc3
+ ADCQ $0, DX
+ MOVQ DX, t1
+
+ MOVQ p256ord<>+0x08(SB), AX
+ MULQ t0
+ ADDQ t1, acc4
+ ADCQ $0, DX
+ ADDQ AX, acc4
+ ADCQ $0, DX
+ MOVQ DX, t1
+
+ MOVQ p256ord<>+0x10(SB), AX
+ MULQ t0
+ ADDQ t1, acc5
+ ADCQ $0, DX
+ ADDQ AX, acc5
+ ADCQ $0, DX
+ MOVQ DX, t1
+
+ MOVQ p256ord<>+0x18(SB), AX
+ MULQ t0
+ ADDQ t1, acc0
+ ADCQ $0, DX
+ ADDQ AX, acc0
+ ADCQ DX, acc1
+ ADCQ $0, acc2
+ // Copy result [255:0]
+ MOVQ acc4, x_ptr
+ MOVQ acc5, acc3
+ MOVQ acc0, t0
+ MOVQ acc1, t1
+ // Subtract p256
+ SUBQ p256ord<>+0x00(SB), acc4
+ SBBQ p256ord<>+0x08(SB) ,acc5
+ SBBQ p256ord<>+0x10(SB), acc0
+ SBBQ p256ord<>+0x18(SB), acc1
+ SBBQ $0, acc2
+
+ CMOVQCS x_ptr, acc4
+ CMOVQCS acc3, acc5
+ CMOVQCS t0, acc0
+ CMOVQCS t1, acc1
+
+ MOVQ acc4, (8*0)(res_ptr)
+ MOVQ acc5, (8*1)(res_ptr)
+ MOVQ acc0, (8*2)(res_ptr)
+ MOVQ acc1, (8*3)(res_ptr)
+
+ RET
+/* ---------------------------------------*/
+// func p256OrdSqr(res, in *p256OrdElement, n int)
+TEXT ·p256OrdSqr(SB),NOSPLIT,$0
+ MOVQ res+0(FP), res_ptr
+ MOVQ in+8(FP), x_ptr
+ MOVQ n+16(FP), BX
+
+ordSqrLoop:
+
+ // y[1:] * y[0]
+ MOVQ (8*0)(x_ptr), t0
+
+ MOVQ (8*1)(x_ptr), AX
+ MULQ t0
+ MOVQ AX, acc1
+ MOVQ DX, acc2
+
+ MOVQ (8*2)(x_ptr), AX
+ MULQ t0
+ ADDQ AX, acc2
+ ADCQ $0, DX
+ MOVQ DX, acc3
+
+ MOVQ (8*3)(x_ptr), AX
+ MULQ t0
+ ADDQ AX, acc3
+ ADCQ $0, DX
+ MOVQ DX, acc4
+ // y[2:] * y[1]
+ MOVQ (8*1)(x_ptr), t0
+
+ MOVQ (8*2)(x_ptr), AX
+ MULQ t0
+ ADDQ AX, acc3
+ ADCQ $0, DX
+ MOVQ DX, t1
+
+ MOVQ (8*3)(x_ptr), AX
+ MULQ t0
+ ADDQ t1, acc4
+ ADCQ $0, DX
+ ADDQ AX, acc4
+ ADCQ $0, DX
+ MOVQ DX, acc5
+ // y[3] * y[2]
+ MOVQ (8*2)(x_ptr), t0
+
+ MOVQ (8*3)(x_ptr), AX
+ MULQ t0
+ ADDQ AX, acc5
+ ADCQ $0, DX
+ MOVQ DX, y_ptr
+ XORQ t1, t1
+ // *2
+ ADDQ acc1, acc1
+ ADCQ acc2, acc2
+ ADCQ acc3, acc3
+ ADCQ acc4, acc4
+ ADCQ acc5, acc5
+ ADCQ y_ptr, y_ptr
+ ADCQ $0, t1
+ // Missing products
+ MOVQ (8*0)(x_ptr), AX
+ MULQ AX
+ MOVQ AX, acc0
+ MOVQ DX, t0
+
+ MOVQ (8*1)(x_ptr), AX
+ MULQ AX
+ ADDQ t0, acc1
+ ADCQ AX, acc2
+ ADCQ $0, DX
+ MOVQ DX, t0
+
+ MOVQ (8*2)(x_ptr), AX
+ MULQ AX
+ ADDQ t0, acc3
+ ADCQ AX, acc4
+ ADCQ $0, DX
+ MOVQ DX, t0
+
+ MOVQ (8*3)(x_ptr), AX
+ MULQ AX
+ ADDQ t0, acc5
+ ADCQ AX, y_ptr
+ ADCQ DX, t1
+ MOVQ t1, x_ptr
+ // First reduction step
+ MOVQ acc0, AX
+ MULQ p256ordK0<>(SB)
+ MOVQ AX, t0
+
+ MOVQ p256ord<>+0x00(SB), AX
+ MULQ t0
+ ADDQ AX, acc0
+ ADCQ $0, DX
+ MOVQ DX, t1
+
+ MOVQ p256ord<>+0x08(SB), AX
+ MULQ t0
+ ADDQ t1, acc1
+ ADCQ $0, DX
+ ADDQ AX, acc1
+
+ MOVQ t0, t1
+ ADCQ DX, acc2
+ ADCQ $0, t1
+ SUBQ t0, acc2
+ SBBQ $0, t1
+
+ MOVQ t0, AX
+ MOVQ t0, DX
+ MOVQ t0, acc0
+ SHLQ $32, AX
+ SHRQ $32, DX
+
+ ADDQ t1, acc3
+ ADCQ $0, acc0
+ SUBQ AX, acc3
+ SBBQ DX, acc0
+ // Second reduction step
+ MOVQ acc1, AX
+ MULQ p256ordK0<>(SB)
+ MOVQ AX, t0
+
+ MOVQ p256ord<>+0x00(SB), AX
+ MULQ t0
+ ADDQ AX, acc1
+ ADCQ $0, DX
+ MOVQ DX, t1
+
+ MOVQ p256ord<>+0x08(SB), AX
+ MULQ t0
+ ADDQ t1, acc2
+ ADCQ $0, DX
+ ADDQ AX, acc2
+
+ MOVQ t0, t1
+ ADCQ DX, acc3
+ ADCQ $0, t1
+ SUBQ t0, acc3
+ SBBQ $0, t1
+
+ MOVQ t0, AX
+ MOVQ t0, DX
+ MOVQ t0, acc1
+ SHLQ $32, AX
+ SHRQ $32, DX
+
+ ADDQ t1, acc0
+ ADCQ $0, acc1
+ SUBQ AX, acc0
+ SBBQ DX, acc1
+ // Third reduction step
+ MOVQ acc2, AX
+ MULQ p256ordK0<>(SB)
+ MOVQ AX, t0
+
+ MOVQ p256ord<>+0x00(SB), AX
+ MULQ t0
+ ADDQ AX, acc2
+ ADCQ $0, DX
+ MOVQ DX, t1
+
+ MOVQ p256ord<>+0x08(SB), AX
+ MULQ t0
+ ADDQ t1, acc3
+ ADCQ $0, DX
+ ADDQ AX, acc3
+
+ MOVQ t0, t1
+ ADCQ DX, acc0
+ ADCQ $0, t1
+ SUBQ t0, acc0
+ SBBQ $0, t1
+
+ MOVQ t0, AX
+ MOVQ t0, DX
+ MOVQ t0, acc2
+ SHLQ $32, AX
+ SHRQ $32, DX
+
+ ADDQ t1, acc1
+ ADCQ $0, acc2
+ SUBQ AX, acc1
+ SBBQ DX, acc2
+ // Last reduction step
+ MOVQ acc3, AX
+ MULQ p256ordK0<>(SB)
+ MOVQ AX, t0
+
+ MOVQ p256ord<>+0x00(SB), AX
+ MULQ t0
+ ADDQ AX, acc3
+ ADCQ $0, DX
+ MOVQ DX, t1
+
+ MOVQ p256ord<>+0x08(SB), AX
+ MULQ t0
+ ADDQ t1, acc0
+ ADCQ $0, DX
+ ADDQ AX, acc0
+ ADCQ $0, DX
+ MOVQ DX, t1
+
+ MOVQ t0, t1
+ ADCQ DX, acc1
+ ADCQ $0, t1
+ SUBQ t0, acc1
+ SBBQ $0, t1
+
+ MOVQ t0, AX
+ MOVQ t0, DX
+ MOVQ t0, acc3
+ SHLQ $32, AX
+ SHRQ $32, DX
+
+ ADDQ t1, acc2
+ ADCQ $0, acc3
+ SUBQ AX, acc2
+ SBBQ DX, acc3
+ XORQ t0, t0
+ // Add bits [511:256] of the sqr result
+ ADCQ acc4, acc0
+ ADCQ acc5, acc1
+ ADCQ y_ptr, acc2
+ ADCQ x_ptr, acc3
+ ADCQ $0, t0
+
+ MOVQ acc0, acc4
+ MOVQ acc1, acc5
+ MOVQ acc2, y_ptr
+ MOVQ acc3, t1
+ // Subtract p256
+ SUBQ p256ord<>+0x00(SB), acc0
+ SBBQ p256ord<>+0x08(SB) ,acc1
+ SBBQ p256ord<>+0x10(SB), acc2
+ SBBQ p256ord<>+0x18(SB), acc3
+ SBBQ $0, t0
+
+ CMOVQCS acc4, acc0
+ CMOVQCS acc5, acc1
+ CMOVQCS y_ptr, acc2
+ CMOVQCS t1, acc3
+
+ MOVQ acc0, (8*0)(res_ptr)
+ MOVQ acc1, (8*1)(res_ptr)
+ MOVQ acc2, (8*2)(res_ptr)
+ MOVQ acc3, (8*3)(res_ptr)
+ MOVQ res_ptr, x_ptr
+ DECQ BX
+ JNE ordSqrLoop
+
+ RET
+/* ---------------------------------------*/
+#undef res_ptr
+#undef x_ptr
+#undef y_ptr
+
+#undef acc0
+#undef acc1
+#undef acc2
+#undef acc3
+#undef acc4
+#undef acc5
+#undef t0
+#undef t1
+/* ---------------------------------------*/
+#define mul0 AX
+#define mul1 DX
+#define acc0 BX
+#define acc1 CX
+#define acc2 R8
+#define acc3 R9
+#define acc4 R10
+#define acc5 R11
+#define acc6 R12
+#define acc7 R13
+#define t0 R14
+#define t1 R15
+#define t2 DI
+#define t3 SI
+#define hlp BP
+/* ---------------------------------------*/
+TEXT p256SubInternal(SB),NOSPLIT,$0
+ XORQ mul0, mul0
+ SUBQ t0, acc4
+ SBBQ t1, acc5
+ SBBQ t2, acc6
+ SBBQ t3, acc7
+ SBBQ $0, mul0
+
+ MOVQ acc4, acc0
+ MOVQ acc5, acc1
+ MOVQ acc6, acc2
+ MOVQ acc7, acc3
+
+ ADDQ $-1, acc4
+ ADCQ p256const0<>(SB), acc5
+ ADCQ $0, acc6
+ ADCQ p256const1<>(SB), acc7
+ ANDQ $1, mul0
+
+ CMOVQEQ acc0, acc4
+ CMOVQEQ acc1, acc5
+ CMOVQEQ acc2, acc6
+ CMOVQEQ acc3, acc7
+
+ RET
+/* ---------------------------------------*/
+TEXT p256MulInternal(SB),NOSPLIT,$8
+ MOVQ acc4, mul0
+ MULQ t0
+ MOVQ mul0, acc0
+ MOVQ mul1, acc1
+
+ MOVQ acc4, mul0
+ MULQ t1
+ ADDQ mul0, acc1
+ ADCQ $0, mul1
+ MOVQ mul1, acc2
+
+ MOVQ acc4, mul0
+ MULQ t2
+ ADDQ mul0, acc2
+ ADCQ $0, mul1
+ MOVQ mul1, acc3
+
+ MOVQ acc4, mul0
+ MULQ t3
+ ADDQ mul0, acc3
+ ADCQ $0, mul1
+ MOVQ mul1, acc4
+
+ MOVQ acc5, mul0
+ MULQ t0
+ ADDQ mul0, acc1
+ ADCQ $0, mul1
+ MOVQ mul1, hlp
+
+ MOVQ acc5, mul0
+ MULQ t1
+ ADDQ hlp, acc2
+ ADCQ $0, mul1
+ ADDQ mul0, acc2
+ ADCQ $0, mul1
+ MOVQ mul1, hlp
+
+ MOVQ acc5, mul0
+ MULQ t2
+ ADDQ hlp, acc3
+ ADCQ $0, mul1
+ ADDQ mul0, acc3
+ ADCQ $0, mul1
+ MOVQ mul1, hlp
+
+ MOVQ acc5, mul0
+ MULQ t3
+ ADDQ hlp, acc4
+ ADCQ $0, mul1
+ ADDQ mul0, acc4
+ ADCQ $0, mul1
+ MOVQ mul1, acc5
+
+ MOVQ acc6, mul0
+ MULQ t0
+ ADDQ mul0, acc2
+ ADCQ $0, mul1
+ MOVQ mul1, hlp
+
+ MOVQ acc6, mul0
+ MULQ t1
+ ADDQ hlp, acc3
+ ADCQ $0, mul1
+ ADDQ mul0, acc3
+ ADCQ $0, mul1
+ MOVQ mul1, hlp
+
+ MOVQ acc6, mul0
+ MULQ t2
+ ADDQ hlp, acc4
+ ADCQ $0, mul1
+ ADDQ mul0, acc4
+ ADCQ $0, mul1
+ MOVQ mul1, hlp
+
+ MOVQ acc6, mul0
+ MULQ t3
+ ADDQ hlp, acc5
+ ADCQ $0, mul1
+ ADDQ mul0, acc5
+ ADCQ $0, mul1
+ MOVQ mul1, acc6
+
+ MOVQ acc7, mul0
+ MULQ t0
+ ADDQ mul0, acc3
+ ADCQ $0, mul1
+ MOVQ mul1, hlp
+
+ MOVQ acc7, mul0
+ MULQ t1
+ ADDQ hlp, acc4
+ ADCQ $0, mul1
+ ADDQ mul0, acc4
+ ADCQ $0, mul1
+ MOVQ mul1, hlp
+
+ MOVQ acc7, mul0
+ MULQ t2
+ ADDQ hlp, acc5
+ ADCQ $0, mul1
+ ADDQ mul0, acc5
+ ADCQ $0, mul1
+ MOVQ mul1, hlp
+
+ MOVQ acc7, mul0
+ MULQ t3
+ ADDQ hlp, acc6
+ ADCQ $0, mul1
+ ADDQ mul0, acc6
+ ADCQ $0, mul1
+ MOVQ mul1, acc7
+ // First reduction step
+ MOVQ acc0, mul0
+ MOVQ acc0, hlp
+ SHLQ $32, acc0
+ MULQ p256const1<>(SB)
+ SHRQ $32, hlp
+ ADDQ acc0, acc1
+ ADCQ hlp, acc2
+ ADCQ mul0, acc3
+ ADCQ $0, mul1
+ MOVQ mul1, acc0
+ // Second reduction step
+ MOVQ acc1, mul0
+ MOVQ acc1, hlp
+ SHLQ $32, acc1
+ MULQ p256const1<>(SB)
+ SHRQ $32, hlp
+ ADDQ acc1, acc2
+ ADCQ hlp, acc3
+ ADCQ mul0, acc0
+ ADCQ $0, mul1
+ MOVQ mul1, acc1
+ // Third reduction step
+ MOVQ acc2, mul0
+ MOVQ acc2, hlp
+ SHLQ $32, acc2
+ MULQ p256const1<>(SB)
+ SHRQ $32, hlp
+ ADDQ acc2, acc3
+ ADCQ hlp, acc0
+ ADCQ mul0, acc1
+ ADCQ $0, mul1
+ MOVQ mul1, acc2
+ // Last reduction step
+ MOVQ acc3, mul0
+ MOVQ acc3, hlp
+ SHLQ $32, acc3
+ MULQ p256const1<>(SB)
+ SHRQ $32, hlp
+ ADDQ acc3, acc0
+ ADCQ hlp, acc1
+ ADCQ mul0, acc2
+ ADCQ $0, mul1
+ MOVQ mul1, acc3
+ MOVQ $0, BP
+ // Add bits [511:256] of the result
+ ADCQ acc0, acc4
+ ADCQ acc1, acc5
+ ADCQ acc2, acc6
+ ADCQ acc3, acc7
+ ADCQ $0, hlp
+ // Copy result
+ MOVQ acc4, acc0
+ MOVQ acc5, acc1
+ MOVQ acc6, acc2
+ MOVQ acc7, acc3
+ // Subtract p256
+ SUBQ $-1, acc4
+ SBBQ p256const0<>(SB) ,acc5
+ SBBQ $0, acc6
+ SBBQ p256const1<>(SB), acc7
+ SBBQ $0, hlp
+ // If the result of the subtraction is negative, restore the previous result
+ CMOVQCS acc0, acc4
+ CMOVQCS acc1, acc5
+ CMOVQCS acc2, acc6
+ CMOVQCS acc3, acc7
+
+ RET
+/* ---------------------------------------*/
+TEXT p256SqrInternal(SB),NOSPLIT,$8
+
+ MOVQ acc4, mul0
+ MULQ acc5
+ MOVQ mul0, acc1
+ MOVQ mul1, acc2
+
+ MOVQ acc4, mul0
+ MULQ acc6
+ ADDQ mul0, acc2
+ ADCQ $0, mul1
+ MOVQ mul1, acc3
+
+ MOVQ acc4, mul0
+ MULQ acc7
+ ADDQ mul0, acc3
+ ADCQ $0, mul1
+ MOVQ mul1, t0
+
+ MOVQ acc5, mul0
+ MULQ acc6
+ ADDQ mul0, acc3
+ ADCQ $0, mul1
+ MOVQ mul1, hlp
+
+ MOVQ acc5, mul0
+ MULQ acc7
+ ADDQ hlp, t0
+ ADCQ $0, mul1
+ ADDQ mul0, t0
+ ADCQ $0, mul1
+ MOVQ mul1, t1
+
+ MOVQ acc6, mul0
+ MULQ acc7
+ ADDQ mul0, t1
+ ADCQ $0, mul1
+ MOVQ mul1, t2
+ XORQ t3, t3
+ // *2
+ ADDQ acc1, acc1
+ ADCQ acc2, acc2
+ ADCQ acc3, acc3
+ ADCQ t0, t0
+ ADCQ t1, t1
+ ADCQ t2, t2
+ ADCQ $0, t3
+ // Missing products
+ MOVQ acc4, mul0
+ MULQ mul0
+ MOVQ mul0, acc0
+ MOVQ DX, acc4
+
+ MOVQ acc5, mul0
+ MULQ mul0
+ ADDQ acc4, acc1
+ ADCQ mul0, acc2
+ ADCQ $0, DX
+ MOVQ DX, acc4
+
+ MOVQ acc6, mul0
+ MULQ mul0
+ ADDQ acc4, acc3
+ ADCQ mul0, t0
+ ADCQ $0, DX
+ MOVQ DX, acc4
+
+ MOVQ acc7, mul0
+ MULQ mul0
+ ADDQ acc4, t1
+ ADCQ mul0, t2
+ ADCQ DX, t3
+ // First reduction step
+ MOVQ acc0, mul0
+ MOVQ acc0, hlp
+ SHLQ $32, acc0
+ MULQ p256const1<>(SB)
+ SHRQ $32, hlp
+ ADDQ acc0, acc1
+ ADCQ hlp, acc2
+ ADCQ mul0, acc3
+ ADCQ $0, mul1
+ MOVQ mul1, acc0
+ // Second reduction step
+ MOVQ acc1, mul0
+ MOVQ acc1, hlp
+ SHLQ $32, acc1
+ MULQ p256const1<>(SB)
+ SHRQ $32, hlp
+ ADDQ acc1, acc2
+ ADCQ hlp, acc3
+ ADCQ mul0, acc0
+ ADCQ $0, mul1
+ MOVQ mul1, acc1
+ // Third reduction step
+ MOVQ acc2, mul0
+ MOVQ acc2, hlp
+ SHLQ $32, acc2
+ MULQ p256const1<>(SB)
+ SHRQ $32, hlp
+ ADDQ acc2, acc3
+ ADCQ hlp, acc0
+ ADCQ mul0, acc1
+ ADCQ $0, mul1
+ MOVQ mul1, acc2
+ // Last reduction step
+ MOVQ acc3, mul0
+ MOVQ acc3, hlp
+ SHLQ $32, acc3
+ MULQ p256const1<>(SB)
+ SHRQ $32, hlp
+ ADDQ acc3, acc0
+ ADCQ hlp, acc1
+ ADCQ mul0, acc2
+ ADCQ $0, mul1
+ MOVQ mul1, acc3
+ MOVQ $0, BP
+ // Add bits [511:256] of the result
+ ADCQ acc0, t0
+ ADCQ acc1, t1
+ ADCQ acc2, t2
+ ADCQ acc3, t3
+ ADCQ $0, hlp
+ // Copy result
+ MOVQ t0, acc4
+ MOVQ t1, acc5
+ MOVQ t2, acc6
+ MOVQ t3, acc7
+ // Subtract p256
+ SUBQ $-1, acc4
+ SBBQ p256const0<>(SB) ,acc5
+ SBBQ $0, acc6
+ SBBQ p256const1<>(SB), acc7
+ SBBQ $0, hlp
+ // If the result of the subtraction is negative, restore the previous result
+ CMOVQCS t0, acc4
+ CMOVQCS t1, acc5
+ CMOVQCS t2, acc6
+ CMOVQCS t3, acc7
+
+ RET
+/* ---------------------------------------*/
+#define p256MulBy2Inline\
+ XORQ mul0, mul0;\
+ ADDQ acc4, acc4;\
+ ADCQ acc5, acc5;\
+ ADCQ acc6, acc6;\
+ ADCQ acc7, acc7;\
+ ADCQ $0, mul0;\
+ MOVQ acc4, t0;\
+ MOVQ acc5, t1;\
+ MOVQ acc6, t2;\
+ MOVQ acc7, t3;\
+ SUBQ $-1, t0;\
+ SBBQ p256const0<>(SB), t1;\
+ SBBQ $0, t2;\
+ SBBQ p256const1<>(SB), t3;\
+ SBBQ $0, mul0;\
+ CMOVQCS acc4, t0;\
+ CMOVQCS acc5, t1;\
+ CMOVQCS acc6, t2;\
+ CMOVQCS acc7, t3;
+/* ---------------------------------------*/
+#define p256AddInline \
+ XORQ mul0, mul0;\
+ ADDQ t0, acc4;\
+ ADCQ t1, acc5;\
+ ADCQ t2, acc6;\
+ ADCQ t3, acc7;\
+ ADCQ $0, mul0;\
+ MOVQ acc4, t0;\
+ MOVQ acc5, t1;\
+ MOVQ acc6, t2;\
+ MOVQ acc7, t3;\
+ SUBQ $-1, t0;\
+ SBBQ p256const0<>(SB), t1;\
+ SBBQ $0, t2;\
+ SBBQ p256const1<>(SB), t3;\
+ SBBQ $0, mul0;\
+ CMOVQCS acc4, t0;\
+ CMOVQCS acc5, t1;\
+ CMOVQCS acc6, t2;\
+ CMOVQCS acc7, t3;
+/* ---------------------------------------*/
+#define LDacc(src) MOVQ src(8*0), acc4; MOVQ src(8*1), acc5; MOVQ src(8*2), acc6; MOVQ src(8*3), acc7
+#define LDt(src) MOVQ src(8*0), t0; MOVQ src(8*1), t1; MOVQ src(8*2), t2; MOVQ src(8*3), t3
+#define ST(dst) MOVQ acc4, dst(8*0); MOVQ acc5, dst(8*1); MOVQ acc6, dst(8*2); MOVQ acc7, dst(8*3)
+#define STt(dst) MOVQ t0, dst(8*0); MOVQ t1, dst(8*1); MOVQ t2, dst(8*2); MOVQ t3, dst(8*3)
+#define acc2t MOVQ acc4, t0; MOVQ acc5, t1; MOVQ acc6, t2; MOVQ acc7, t3
+#define t2acc MOVQ t0, acc4; MOVQ t1, acc5; MOVQ t2, acc6; MOVQ t3, acc7
+/* ---------------------------------------*/
+#define x1in(off) (32*0 + off)(SP)
+#define y1in(off) (32*1 + off)(SP)
+#define z1in(off) (32*2 + off)(SP)
+#define x2in(off) (32*3 + off)(SP)
+#define y2in(off) (32*4 + off)(SP)
+#define xout(off) (32*5 + off)(SP)
+#define yout(off) (32*6 + off)(SP)
+#define zout(off) (32*7 + off)(SP)
+#define s2(off) (32*8 + off)(SP)
+#define z1sqr(off) (32*9 + off)(SP)
+#define h(off) (32*10 + off)(SP)
+#define r(off) (32*11 + off)(SP)
+#define hsqr(off) (32*12 + off)(SP)
+#define rsqr(off) (32*13 + off)(SP)
+#define hcub(off) (32*14 + off)(SP)
+#define rptr (32*15)(SP)
+#define sel_save (32*15 + 8)(SP)
+#define zero_save (32*15 + 8 + 4)(SP)
+
+// func p256PointAddAffineAsm(res, in1 *P256Point, in2 *p256AffinePoint, sign, sel, zero int)
+TEXT ·p256PointAddAffineAsm(SB),0,$512-48
+ // Move input to stack in order to free registers
+ MOVQ res+0(FP), AX
+ MOVQ in1+8(FP), BX
+ MOVQ in2+16(FP), CX
+ MOVQ sign+24(FP), DX
+ MOVQ sel+32(FP), t1
+ MOVQ zero+40(FP), t2
+
+ MOVOU (16*0)(BX), X0
+ MOVOU (16*1)(BX), X1
+ MOVOU (16*2)(BX), X2
+ MOVOU (16*3)(BX), X3
+ MOVOU (16*4)(BX), X4
+ MOVOU (16*5)(BX), X5
+
+ MOVOU X0, x1in(16*0)
+ MOVOU X1, x1in(16*1)
+ MOVOU X2, y1in(16*0)
+ MOVOU X3, y1in(16*1)
+ MOVOU X4, z1in(16*0)
+ MOVOU X5, z1in(16*1)
+
+ MOVOU (16*0)(CX), X0
+ MOVOU (16*1)(CX), X1
+
+ MOVOU X0, x2in(16*0)
+ MOVOU X1, x2in(16*1)
+ // Store pointer to result
+ MOVQ mul0, rptr
+ MOVL t1, sel_save
+ MOVL t2, zero_save
+ // Negate y2in based on sign
+ MOVQ (16*2 + 8*0)(CX), acc4
+ MOVQ (16*2 + 8*1)(CX), acc5
+ MOVQ (16*2 + 8*2)(CX), acc6
+ MOVQ (16*2 + 8*3)(CX), acc7
+ MOVQ $-1, acc0
+ MOVQ p256const0<>(SB), acc1
+ MOVQ $0, acc2
+ MOVQ p256const1<>(SB), acc3
+ XORQ mul0, mul0
+ // Speculatively subtract
+ SUBQ acc4, acc0
+ SBBQ acc5, acc1
+ SBBQ acc6, acc2
+ SBBQ acc7, acc3
+ SBBQ $0, mul0
+ MOVQ acc0, t0
+ MOVQ acc1, t1
+ MOVQ acc2, t2
+ MOVQ acc3, t3
+ // Add in case the operand was > p256
+ ADDQ $-1, acc0
+ ADCQ p256const0<>(SB), acc1
+ ADCQ $0, acc2
+ ADCQ p256const1<>(SB), acc3
+ ADCQ $0, mul0
+ CMOVQNE t0, acc0
+ CMOVQNE t1, acc1
+ CMOVQNE t2, acc2
+ CMOVQNE t3, acc3
+ // If condition is 0, keep original value
+ TESTQ DX, DX
+ CMOVQEQ acc4, acc0
+ CMOVQEQ acc5, acc1
+ CMOVQEQ acc6, acc2
+ CMOVQEQ acc7, acc3
+ // Store result
+ MOVQ acc0, y2in(8*0)
+ MOVQ acc1, y2in(8*1)
+ MOVQ acc2, y2in(8*2)
+ MOVQ acc3, y2in(8*3)
+ // Begin point add
+ LDacc (z1in)
+ CALL p256SqrInternal(SB) // z1ˆ2
+ ST (z1sqr)
+
+ LDt (x2in)
+ CALL p256MulInternal(SB) // x2 * z1ˆ2
+
+ LDt (x1in)
+ CALL p256SubInternal(SB) // h = u2 - u1
+ ST (h)
+
+ LDt (z1in)
+ CALL p256MulInternal(SB) // z3 = h * z1
+ ST (zout)
+
+ LDacc (z1sqr)
+ CALL p256MulInternal(SB) // z1ˆ3
+
+ LDt (y2in)
+ CALL p256MulInternal(SB) // s2 = y2 * z1ˆ3
+ ST (s2)
+
+ LDt (y1in)
+ CALL p256SubInternal(SB) // r = s2 - s1
+ ST (r)
+
+ CALL p256SqrInternal(SB) // rsqr = rˆ2
+ ST (rsqr)
+
+ LDacc (h)
+ CALL p256SqrInternal(SB) // hsqr = hˆ2
+ ST (hsqr)
+
+ LDt (h)
+ CALL p256MulInternal(SB) // hcub = hˆ3
+ ST (hcub)
+
+ LDt (y1in)
+ CALL p256MulInternal(SB) // y1 * hˆ3
+ ST (s2)
+
+ LDacc (x1in)
+ LDt (hsqr)
+ CALL p256MulInternal(SB) // u1 * hˆ2
+ ST (h)
+
+ p256MulBy2Inline // u1 * hˆ2 * 2, inline
+ LDacc (rsqr)
+ CALL p256SubInternal(SB) // rˆ2 - u1 * hˆ2 * 2
+
+ LDt (hcub)
+ CALL p256SubInternal(SB)
+ ST (xout)
+
+ MOVQ acc4, t0
+ MOVQ acc5, t1
+ MOVQ acc6, t2
+ MOVQ acc7, t3
+ LDacc (h)
+ CALL p256SubInternal(SB)
+
+ LDt (r)
+ CALL p256MulInternal(SB)
+
+ LDt (s2)
+ CALL p256SubInternal(SB)
+ ST (yout)
+ // Load stored values from stack
+ MOVQ rptr, AX
+ MOVL sel_save, BX
+ MOVL zero_save, CX
+ // The result is not valid if (sel == 0), conditional choose
+ MOVOU xout(16*0), X0
+ MOVOU xout(16*1), X1
+ MOVOU yout(16*0), X2
+ MOVOU yout(16*1), X3
+ MOVOU zout(16*0), X4
+ MOVOU zout(16*1), X5
+
+ MOVL BX, X6
+ MOVL CX, X7
+
+ PXOR X8, X8
+ PCMPEQL X9, X9
+
+ PSHUFD $0, X6, X6
+ PSHUFD $0, X7, X7
+
+ PCMPEQL X8, X6
+ PCMPEQL X8, X7
+
+ MOVOU X6, X15
+ PANDN X9, X15
+
+ MOVOU x1in(16*0), X9
+ MOVOU x1in(16*1), X10
+ MOVOU y1in(16*0), X11
+ MOVOU y1in(16*1), X12
+ MOVOU z1in(16*0), X13
+ MOVOU z1in(16*1), X14
+
+ PAND X15, X0
+ PAND X15, X1
+ PAND X15, X2
+ PAND X15, X3
+ PAND X15, X4
+ PAND X15, X5
+
+ PAND X6, X9
+ PAND X6, X10
+ PAND X6, X11
+ PAND X6, X12
+ PAND X6, X13
+ PAND X6, X14
+
+ PXOR X9, X0
+ PXOR X10, X1
+ PXOR X11, X2
+ PXOR X12, X3
+ PXOR X13, X4
+ PXOR X14, X5
+ // Similarly if zero == 0
+ PCMPEQL X9, X9
+ MOVOU X7, X15
+ PANDN X9, X15
+
+ MOVOU x2in(16*0), X9
+ MOVOU x2in(16*1), X10
+ MOVOU y2in(16*0), X11
+ MOVOU y2in(16*1), X12
+ MOVOU p256one<>+0x00(SB), X13
+ MOVOU p256one<>+0x10(SB), X14
+
+ PAND X15, X0
+ PAND X15, X1
+ PAND X15, X2
+ PAND X15, X3
+ PAND X15, X4
+ PAND X15, X5
+
+ PAND X7, X9
+ PAND X7, X10
+ PAND X7, X11
+ PAND X7, X12
+ PAND X7, X13
+ PAND X7, X14
+
+ PXOR X9, X0
+ PXOR X10, X1
+ PXOR X11, X2
+ PXOR X12, X3
+ PXOR X13, X4
+ PXOR X14, X5
+ // Finally output the result
+ MOVOU X0, (16*0)(AX)
+ MOVOU X1, (16*1)(AX)
+ MOVOU X2, (16*2)(AX)
+ MOVOU X3, (16*3)(AX)
+ MOVOU X4, (16*4)(AX)
+ MOVOU X5, (16*5)(AX)
+ MOVQ $0, rptr
+
+ RET
+#undef x1in
+#undef y1in
+#undef z1in
+#undef x2in
+#undef y2in
+#undef xout
+#undef yout
+#undef zout
+#undef s2
+#undef z1sqr
+#undef h
+#undef r
+#undef hsqr
+#undef rsqr
+#undef hcub
+#undef rptr
+#undef sel_save
+#undef zero_save
+
+// p256IsZero returns 1 in AX if [acc4..acc7] represents zero and zero
+// otherwise. It writes to [acc4..acc7], t0 and t1.
+TEXT p256IsZero(SB),NOSPLIT,$0
+ // AX contains a flag that is set if the input is zero.
+ XORQ AX, AX
+ MOVQ $1, t1
+
+ // Check whether [acc4..acc7] are all zero.
+ MOVQ acc4, t0
+ ORQ acc5, t0
+ ORQ acc6, t0
+ ORQ acc7, t0
+
+ // Set the zero flag if so. (CMOV of a constant to a register doesn't
+ // appear to be supported in Go. Thus t1 = 1.)
+ CMOVQEQ t1, AX
+
+ // XOR [acc4..acc7] with P and compare with zero again.
+ XORQ $-1, acc4
+ XORQ p256const0<>(SB), acc5
+ XORQ p256const1<>(SB), acc7
+ ORQ acc5, acc4
+ ORQ acc6, acc4
+ ORQ acc7, acc4
+
+ // Set the zero flag if so.
+ CMOVQEQ t1, AX
+ RET
+
+/* ---------------------------------------*/
+#define x1in(off) (32*0 + off)(SP)
+#define y1in(off) (32*1 + off)(SP)
+#define z1in(off) (32*2 + off)(SP)
+#define x2in(off) (32*3 + off)(SP)
+#define y2in(off) (32*4 + off)(SP)
+#define z2in(off) (32*5 + off)(SP)
+
+#define xout(off) (32*6 + off)(SP)
+#define yout(off) (32*7 + off)(SP)
+#define zout(off) (32*8 + off)(SP)
+
+#define u1(off) (32*9 + off)(SP)
+#define u2(off) (32*10 + off)(SP)
+#define s1(off) (32*11 + off)(SP)
+#define s2(off) (32*12 + off)(SP)
+#define z1sqr(off) (32*13 + off)(SP)
+#define z2sqr(off) (32*14 + off)(SP)
+#define h(off) (32*15 + off)(SP)
+#define r(off) (32*16 + off)(SP)
+#define hsqr(off) (32*17 + off)(SP)
+#define rsqr(off) (32*18 + off)(SP)
+#define hcub(off) (32*19 + off)(SP)
+#define rptr (32*20)(SP)
+#define points_eq (32*20+8)(SP)
+
+//func p256PointAddAsm(res, in1, in2 *P256Point) int
+TEXT ·p256PointAddAsm(SB),0,$680-32
+ // See https://hyperelliptic.org/EFD/g1p/auto-shortw-jacobian-3.html#addition-add-2007-bl
+ // Move input to stack in order to free registers
+ MOVQ res+0(FP), AX
+ MOVQ in1+8(FP), BX
+ MOVQ in2+16(FP), CX
+
+ MOVOU (16*0)(BX), X0
+ MOVOU (16*1)(BX), X1
+ MOVOU (16*2)(BX), X2
+ MOVOU (16*3)(BX), X3
+ MOVOU (16*4)(BX), X4
+ MOVOU (16*5)(BX), X5
+
+ MOVOU X0, x1in(16*0)
+ MOVOU X1, x1in(16*1)
+ MOVOU X2, y1in(16*0)
+ MOVOU X3, y1in(16*1)
+ MOVOU X4, z1in(16*0)
+ MOVOU X5, z1in(16*1)
+
+ MOVOU (16*0)(CX), X0
+ MOVOU (16*1)(CX), X1
+ MOVOU (16*2)(CX), X2
+ MOVOU (16*3)(CX), X3
+ MOVOU (16*4)(CX), X4
+ MOVOU (16*5)(CX), X5
+
+ MOVOU X0, x2in(16*0)
+ MOVOU X1, x2in(16*1)
+ MOVOU X2, y2in(16*0)
+ MOVOU X3, y2in(16*1)
+ MOVOU X4, z2in(16*0)
+ MOVOU X5, z2in(16*1)
+ // Store pointer to result
+ MOVQ AX, rptr
+ // Begin point add
+ LDacc (z2in)
+ CALL p256SqrInternal(SB) // z2ˆ2
+ ST (z2sqr)
+ LDt (z2in)
+ CALL p256MulInternal(SB) // z2ˆ3
+ LDt (y1in)
+ CALL p256MulInternal(SB) // s1 = z2ˆ3*y1
+ ST (s1)
+
+ LDacc (z1in)
+ CALL p256SqrInternal(SB) // z1ˆ2
+ ST (z1sqr)
+ LDt (z1in)
+ CALL p256MulInternal(SB) // z1ˆ3
+ LDt (y2in)
+ CALL p256MulInternal(SB) // s2 = z1ˆ3*y2
+ ST (s2)
+
+ LDt (s1)
+ CALL p256SubInternal(SB) // r = s2 - s1
+ ST (r)
+ CALL p256IsZero(SB)
+ MOVQ AX, points_eq
+
+ LDacc (z2sqr)
+ LDt (x1in)
+ CALL p256MulInternal(SB) // u1 = x1 * z2ˆ2
+ ST (u1)
+ LDacc (z1sqr)
+ LDt (x2in)
+ CALL p256MulInternal(SB) // u2 = x2 * z1ˆ2
+ ST (u2)
+
+ LDt (u1)
+ CALL p256SubInternal(SB) // h = u2 - u1
+ ST (h)
+ CALL p256IsZero(SB)
+ ANDQ points_eq, AX
+ MOVQ AX, points_eq
+
+ LDacc (r)
+ CALL p256SqrInternal(SB) // rsqr = rˆ2
+ ST (rsqr)
+
+ LDacc (h)
+ CALL p256SqrInternal(SB) // hsqr = hˆ2
+ ST (hsqr)
+
+ LDt (h)
+ CALL p256MulInternal(SB) // hcub = hˆ3
+ ST (hcub)
+
+ LDt (s1)
+ CALL p256MulInternal(SB)
+ ST (s2)
+
+ LDacc (z1in)
+ LDt (z2in)
+ CALL p256MulInternal(SB) // z1 * z2
+ LDt (h)
+ CALL p256MulInternal(SB) // z1 * z2 * h
+ ST (zout)
+
+ LDacc (hsqr)
+ LDt (u1)
+ CALL p256MulInternal(SB) // hˆ2 * u1
+ ST (u2)
+
+ p256MulBy2Inline // u1 * hˆ2 * 2, inline
+ LDacc (rsqr)
+ CALL p256SubInternal(SB) // rˆ2 - u1 * hˆ2 * 2
+
+ LDt (hcub)
+ CALL p256SubInternal(SB)
+ ST (xout)
+
+ MOVQ acc4, t0
+ MOVQ acc5, t1
+ MOVQ acc6, t2
+ MOVQ acc7, t3
+ LDacc (u2)
+ CALL p256SubInternal(SB)
+
+ LDt (r)
+ CALL p256MulInternal(SB)
+
+ LDt (s2)
+ CALL p256SubInternal(SB)
+ ST (yout)
+
+ MOVOU xout(16*0), X0
+ MOVOU xout(16*1), X1
+ MOVOU yout(16*0), X2
+ MOVOU yout(16*1), X3
+ MOVOU zout(16*0), X4
+ MOVOU zout(16*1), X5
+ // Finally output the result
+ MOVQ rptr, AX
+ MOVQ $0, rptr
+ MOVOU X0, (16*0)(AX)
+ MOVOU X1, (16*1)(AX)
+ MOVOU X2, (16*2)(AX)
+ MOVOU X3, (16*3)(AX)
+ MOVOU X4, (16*4)(AX)
+ MOVOU X5, (16*5)(AX)
+
+ MOVQ points_eq, AX
+ MOVQ AX, ret+24(FP)
+
+ RET
+#undef x1in
+#undef y1in
+#undef z1in
+#undef x2in
+#undef y2in
+#undef z2in
+#undef xout
+#undef yout
+#undef zout
+#undef s1
+#undef s2
+#undef u1
+#undef u2
+#undef z1sqr
+#undef z2sqr
+#undef h
+#undef r
+#undef hsqr
+#undef rsqr
+#undef hcub
+#undef rptr
+/* ---------------------------------------*/
+#define x(off) (32*0 + off)(SP)
+#define y(off) (32*1 + off)(SP)
+#define z(off) (32*2 + off)(SP)
+
+#define s(off) (32*3 + off)(SP)
+#define m(off) (32*4 + off)(SP)
+#define zsqr(off) (32*5 + off)(SP)
+#define tmp(off) (32*6 + off)(SP)
+#define rptr (32*7)(SP)
+
+//func p256PointDoubleAsm(res, in *P256Point)
+TEXT ·p256PointDoubleAsm(SB),NOSPLIT,$256-16
+ // Move input to stack in order to free registers
+ MOVQ res+0(FP), AX
+ MOVQ in+8(FP), BX
+
+ MOVOU (16*0)(BX), X0
+ MOVOU (16*1)(BX), X1
+ MOVOU (16*2)(BX), X2
+ MOVOU (16*3)(BX), X3
+ MOVOU (16*4)(BX), X4
+ MOVOU (16*5)(BX), X5
+
+ MOVOU X0, x(16*0)
+ MOVOU X1, x(16*1)
+ MOVOU X2, y(16*0)
+ MOVOU X3, y(16*1)
+ MOVOU X4, z(16*0)
+ MOVOU X5, z(16*1)
+ // Store pointer to result
+ MOVQ AX, rptr
+ // Begin point double
+ LDacc (z)
+ CALL p256SqrInternal(SB)
+ ST (zsqr)
+
+ LDt (x)
+ p256AddInline
+ STt (m)
+
+ LDacc (z)
+ LDt (y)
+ CALL p256MulInternal(SB)
+ p256MulBy2Inline
+ MOVQ rptr, AX
+ // Store z
+ MOVQ t0, (16*4 + 8*0)(AX)
+ MOVQ t1, (16*4 + 8*1)(AX)
+ MOVQ t2, (16*4 + 8*2)(AX)
+ MOVQ t3, (16*4 + 8*3)(AX)
+
+ LDacc (x)
+ LDt (zsqr)
+ CALL p256SubInternal(SB)
+ LDt (m)
+ CALL p256MulInternal(SB)
+ ST (m)
+ // Multiply by 3
+ p256MulBy2Inline
+ LDacc (m)
+ p256AddInline
+ STt (m)
+ ////////////////////////
+ LDacc (y)
+ p256MulBy2Inline
+ t2acc
+ CALL p256SqrInternal(SB)
+ ST (s)
+ CALL p256SqrInternal(SB)
+ // Divide by 2
+ XORQ mul0, mul0
+ MOVQ acc4, t0
+ MOVQ acc5, t1
+ MOVQ acc6, t2
+ MOVQ acc7, t3
+
+ ADDQ $-1, acc4
+ ADCQ p256const0<>(SB), acc5
+ ADCQ $0, acc6
+ ADCQ p256const1<>(SB), acc7
+ ADCQ $0, mul0
+ TESTQ $1, t0
+
+ CMOVQEQ t0, acc4
+ CMOVQEQ t1, acc5
+ CMOVQEQ t2, acc6
+ CMOVQEQ t3, acc7
+ ANDQ t0, mul0
+
+ SHRQ $1, acc5, acc4
+ SHRQ $1, acc6, acc5
+ SHRQ $1, acc7, acc6
+ SHRQ $1, mul0, acc7
+ ST (y)
+ /////////////////////////
+ LDacc (x)
+ LDt (s)
+ CALL p256MulInternal(SB)
+ ST (s)
+ p256MulBy2Inline
+ STt (tmp)
+
+ LDacc (m)
+ CALL p256SqrInternal(SB)
+ LDt (tmp)
+ CALL p256SubInternal(SB)
+
+ MOVQ rptr, AX
+ // Store x
+ MOVQ acc4, (16*0 + 8*0)(AX)
+ MOVQ acc5, (16*0 + 8*1)(AX)
+ MOVQ acc6, (16*0 + 8*2)(AX)
+ MOVQ acc7, (16*0 + 8*3)(AX)
+
+ acc2t
+ LDacc (s)
+ CALL p256SubInternal(SB)
+
+ LDt (m)
+ CALL p256MulInternal(SB)
+
+ LDt (y)
+ CALL p256SubInternal(SB)
+ MOVQ rptr, AX
+ // Store y
+ MOVQ acc4, (16*2 + 8*0)(AX)
+ MOVQ acc5, (16*2 + 8*1)(AX)
+ MOVQ acc6, (16*2 + 8*2)(AX)
+ MOVQ acc7, (16*2 + 8*3)(AX)
+ ///////////////////////
+ MOVQ $0, rptr
+
+ RET
+/* ---------------------------------------*/
diff --git a/src/crypto/internal/nistec/p256_asm_arm64.s b/src/crypto/internal/nistec/p256_asm_arm64.s
new file mode 100644
index 0000000..1ba5df3
--- /dev/null
+++ b/src/crypto/internal/nistec/p256_asm_arm64.s
@@ -0,0 +1,1533 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file contains constant-time, 64-bit assembly implementation of
+// P256. The optimizations performed here are described in detail in:
+// S.Gueron and V.Krasnov, "Fast prime field elliptic-curve cryptography with
+// 256-bit primes"
+// http://link.springer.com/article/10.1007%2Fs13389-014-0090-x
+// https://eprint.iacr.org/2013/816.pdf
+
+#include "textflag.h"
+
+#define res_ptr R0
+#define a_ptr R1
+#define b_ptr R2
+
+#define acc0 R3
+#define acc1 R4
+#define acc2 R5
+#define acc3 R6
+
+#define acc4 R7
+#define acc5 R8
+#define acc6 R9
+#define acc7 R10
+#define t0 R11
+#define t1 R12
+#define t2 R13
+#define t3 R14
+#define const0 R15
+#define const1 R16
+
+#define hlp0 R17
+#define hlp1 res_ptr
+
+#define x0 R19
+#define x1 R20
+#define x2 R21
+#define x3 R22
+#define y0 R23
+#define y1 R24
+#define y2 R25
+#define y3 R26
+
+#define const2 t2
+#define const3 t3
+
+DATA p256const0<>+0x00(SB)/8, $0x00000000ffffffff
+DATA p256const1<>+0x00(SB)/8, $0xffffffff00000001
+DATA p256ordK0<>+0x00(SB)/8, $0xccd1c8aaee00bc4f
+DATA p256ord<>+0x00(SB)/8, $0xf3b9cac2fc632551
+DATA p256ord<>+0x08(SB)/8, $0xbce6faada7179e84
+DATA p256ord<>+0x10(SB)/8, $0xffffffffffffffff
+DATA p256ord<>+0x18(SB)/8, $0xffffffff00000000
+DATA p256one<>+0x00(SB)/8, $0x0000000000000001
+DATA p256one<>+0x08(SB)/8, $0xffffffff00000000
+DATA p256one<>+0x10(SB)/8, $0xffffffffffffffff
+DATA p256one<>+0x18(SB)/8, $0x00000000fffffffe
+GLOBL p256const0<>(SB), 8, $8
+GLOBL p256const1<>(SB), 8, $8
+GLOBL p256ordK0<>(SB), 8, $8
+GLOBL p256ord<>(SB), 8, $32
+GLOBL p256one<>(SB), 8, $32
+
+/* ---------------------------------------*/
+// func p256OrdLittleToBig(res *[32]byte, in *p256OrdElement)
+TEXT ·p256OrdLittleToBig(SB),NOSPLIT,$0
+ JMP ·p256BigToLittle(SB)
+/* ---------------------------------------*/
+// func p256OrdBigToLittle(res *p256OrdElement, in *[32]byte)
+TEXT ·p256OrdBigToLittle(SB),NOSPLIT,$0
+ JMP ·p256BigToLittle(SB)
+/* ---------------------------------------*/
+// func p256LittleToBig(res *[32]byte, in *p256Element)
+TEXT ·p256LittleToBig(SB),NOSPLIT,$0
+ JMP ·p256BigToLittle(SB)
+/* ---------------------------------------*/
+// func p256BigToLittle(res *p256Element, in *[32]byte)
+TEXT ·p256BigToLittle(SB),NOSPLIT,$0
+ MOVD res+0(FP), res_ptr
+ MOVD in+8(FP), a_ptr
+
+ LDP 0*16(a_ptr), (acc0, acc1)
+ LDP 1*16(a_ptr), (acc2, acc3)
+
+ REV acc0, acc0
+ REV acc1, acc1
+ REV acc2, acc2
+ REV acc3, acc3
+
+ STP (acc3, acc2), 0*16(res_ptr)
+ STP (acc1, acc0), 1*16(res_ptr)
+ RET
+/* ---------------------------------------*/
+// func p256MovCond(res, a, b *P256Point, cond int)
+// If cond == 0 res=b, else res=a
+TEXT ·p256MovCond(SB),NOSPLIT,$0
+ MOVD res+0(FP), res_ptr
+ MOVD a+8(FP), a_ptr
+ MOVD b+16(FP), b_ptr
+ MOVD cond+24(FP), R3
+
+ CMP $0, R3
+ // Two remarks:
+ // 1) Will want to revisit NEON, when support is better
+ // 2) CSEL might not be constant time on all ARM processors
+ LDP 0*16(a_ptr), (R4, R5)
+ LDP 1*16(a_ptr), (R6, R7)
+ LDP 2*16(a_ptr), (R8, R9)
+ LDP 0*16(b_ptr), (R16, R17)
+ LDP 1*16(b_ptr), (R19, R20)
+ LDP 2*16(b_ptr), (R21, R22)
+ CSEL EQ, R16, R4, R4
+ CSEL EQ, R17, R5, R5
+ CSEL EQ, R19, R6, R6
+ CSEL EQ, R20, R7, R7
+ CSEL EQ, R21, R8, R8
+ CSEL EQ, R22, R9, R9
+ STP (R4, R5), 0*16(res_ptr)
+ STP (R6, R7), 1*16(res_ptr)
+ STP (R8, R9), 2*16(res_ptr)
+
+ LDP 3*16(a_ptr), (R4, R5)
+ LDP 4*16(a_ptr), (R6, R7)
+ LDP 5*16(a_ptr), (R8, R9)
+ LDP 3*16(b_ptr), (R16, R17)
+ LDP 4*16(b_ptr), (R19, R20)
+ LDP 5*16(b_ptr), (R21, R22)
+ CSEL EQ, R16, R4, R4
+ CSEL EQ, R17, R5, R5
+ CSEL EQ, R19, R6, R6
+ CSEL EQ, R20, R7, R7
+ CSEL EQ, R21, R8, R8
+ CSEL EQ, R22, R9, R9
+ STP (R4, R5), 3*16(res_ptr)
+ STP (R6, R7), 4*16(res_ptr)
+ STP (R8, R9), 5*16(res_ptr)
+
+ RET
+/* ---------------------------------------*/
+// func p256NegCond(val *p256Element, cond int)
+TEXT ·p256NegCond(SB),NOSPLIT,$0
+ MOVD val+0(FP), a_ptr
+ MOVD cond+8(FP), hlp0
+ MOVD a_ptr, res_ptr
+ // acc = poly
+ MOVD $-1, acc0
+ MOVD p256const0<>(SB), acc1
+ MOVD $0, acc2
+ MOVD p256const1<>(SB), acc3
+ // Load the original value
+ LDP 0*16(a_ptr), (t0, t1)
+ LDP 1*16(a_ptr), (t2, t3)
+ // Speculatively subtract
+ SUBS t0, acc0
+ SBCS t1, acc1
+ SBCS t2, acc2
+ SBC t3, acc3
+ // If condition is 0, keep original value
+ CMP $0, hlp0
+ CSEL EQ, t0, acc0, acc0
+ CSEL EQ, t1, acc1, acc1
+ CSEL EQ, t2, acc2, acc2
+ CSEL EQ, t3, acc3, acc3
+ // Store result
+ STP (acc0, acc1), 0*16(res_ptr)
+ STP (acc2, acc3), 1*16(res_ptr)
+
+ RET
+/* ---------------------------------------*/
+// func p256Sqr(res, in *p256Element, n int)
+TEXT ·p256Sqr(SB),NOSPLIT,$0
+ MOVD res+0(FP), res_ptr
+ MOVD in+8(FP), a_ptr
+ MOVD n+16(FP), b_ptr
+
+ MOVD p256const0<>(SB), const0
+ MOVD p256const1<>(SB), const1
+
+ LDP 0*16(a_ptr), (x0, x1)
+ LDP 1*16(a_ptr), (x2, x3)
+
+sqrLoop:
+ SUB $1, b_ptr
+ CALL p256SqrInternal<>(SB)
+ MOVD y0, x0
+ MOVD y1, x1
+ MOVD y2, x2
+ MOVD y3, x3
+ CBNZ b_ptr, sqrLoop
+
+ STP (y0, y1), 0*16(res_ptr)
+ STP (y2, y3), 1*16(res_ptr)
+ RET
+/* ---------------------------------------*/
+// func p256Mul(res, in1, in2 *p256Element)
+TEXT ·p256Mul(SB),NOSPLIT,$0
+ MOVD res+0(FP), res_ptr
+ MOVD in1+8(FP), a_ptr
+ MOVD in2+16(FP), b_ptr
+
+ MOVD p256const0<>(SB), const0
+ MOVD p256const1<>(SB), const1
+
+ LDP 0*16(a_ptr), (x0, x1)
+ LDP 1*16(a_ptr), (x2, x3)
+
+ LDP 0*16(b_ptr), (y0, y1)
+ LDP 1*16(b_ptr), (y2, y3)
+
+ CALL p256MulInternal<>(SB)
+
+ STP (y0, y1), 0*16(res_ptr)
+ STP (y2, y3), 1*16(res_ptr)
+ RET
+/* ---------------------------------------*/
+// func p256FromMont(res, in *p256Element)
+TEXT ·p256FromMont(SB),NOSPLIT,$0
+ MOVD res+0(FP), res_ptr
+ MOVD in+8(FP), a_ptr
+
+ MOVD p256const0<>(SB), const0
+ MOVD p256const1<>(SB), const1
+
+ LDP 0*16(a_ptr), (acc0, acc1)
+ LDP 1*16(a_ptr), (acc2, acc3)
+ // Only reduce, no multiplications are needed
+ // First reduction step
+ ADDS acc0<<32, acc1, acc1
+ LSR $32, acc0, t0
+ MUL acc0, const1, t1
+ UMULH acc0, const1, acc0
+ ADCS t0, acc2
+ ADCS t1, acc3
+ ADC $0, acc0
+ // Second reduction step
+ ADDS acc1<<32, acc2, acc2
+ LSR $32, acc1, t0
+ MUL acc1, const1, t1
+ UMULH acc1, const1, acc1
+ ADCS t0, acc3
+ ADCS t1, acc0
+ ADC $0, acc1
+ // Third reduction step
+ ADDS acc2<<32, acc3, acc3
+ LSR $32, acc2, t0
+ MUL acc2, const1, t1
+ UMULH acc2, const1, acc2
+ ADCS t0, acc0
+ ADCS t1, acc1
+ ADC $0, acc2
+ // Last reduction step
+ ADDS acc3<<32, acc0, acc0
+ LSR $32, acc3, t0
+ MUL acc3, const1, t1
+ UMULH acc3, const1, acc3
+ ADCS t0, acc1
+ ADCS t1, acc2
+ ADC $0, acc3
+
+ SUBS $-1, acc0, t0
+ SBCS const0, acc1, t1
+ SBCS $0, acc2, t2
+ SBCS const1, acc3, t3
+
+ CSEL CS, t0, acc0, acc0
+ CSEL CS, t1, acc1, acc1
+ CSEL CS, t2, acc2, acc2
+ CSEL CS, t3, acc3, acc3
+
+ STP (acc0, acc1), 0*16(res_ptr)
+ STP (acc2, acc3), 1*16(res_ptr)
+
+ RET
+/* ---------------------------------------*/
+// func p256Select(res *P256Point, table *p256Table, idx int)
+TEXT ·p256Select(SB),NOSPLIT,$0
+ MOVD idx+16(FP), const0
+ MOVD table+8(FP), b_ptr
+ MOVD res+0(FP), res_ptr
+
+ EOR x0, x0, x0
+ EOR x1, x1, x1
+ EOR x2, x2, x2
+ EOR x3, x3, x3
+ EOR y0, y0, y0
+ EOR y1, y1, y1
+ EOR y2, y2, y2
+ EOR y3, y3, y3
+ EOR t0, t0, t0
+ EOR t1, t1, t1
+ EOR t2, t2, t2
+ EOR t3, t3, t3
+
+ MOVD $0, const1
+
+loop_select:
+ ADD $1, const1
+ CMP const0, const1
+ LDP.P 16(b_ptr), (acc0, acc1)
+ CSEL EQ, acc0, x0, x0
+ CSEL EQ, acc1, x1, x1
+ LDP.P 16(b_ptr), (acc2, acc3)
+ CSEL EQ, acc2, x2, x2
+ CSEL EQ, acc3, x3, x3
+ LDP.P 16(b_ptr), (acc4, acc5)
+ CSEL EQ, acc4, y0, y0
+ CSEL EQ, acc5, y1, y1
+ LDP.P 16(b_ptr), (acc6, acc7)
+ CSEL EQ, acc6, y2, y2
+ CSEL EQ, acc7, y3, y3
+ LDP.P 16(b_ptr), (acc0, acc1)
+ CSEL EQ, acc0, t0, t0
+ CSEL EQ, acc1, t1, t1
+ LDP.P 16(b_ptr), (acc2, acc3)
+ CSEL EQ, acc2, t2, t2
+ CSEL EQ, acc3, t3, t3
+
+ CMP $16, const1
+ BNE loop_select
+
+ STP (x0, x1), 0*16(res_ptr)
+ STP (x2, x3), 1*16(res_ptr)
+ STP (y0, y1), 2*16(res_ptr)
+ STP (y2, y3), 3*16(res_ptr)
+ STP (t0, t1), 4*16(res_ptr)
+ STP (t2, t3), 5*16(res_ptr)
+ RET
+/* ---------------------------------------*/
+// func p256SelectAffine(res *p256AffinePoint, table *p256AffineTable, idx int)
+TEXT ·p256SelectAffine(SB),NOSPLIT,$0
+ MOVD idx+16(FP), t0
+ MOVD table+8(FP), t1
+ MOVD res+0(FP), res_ptr
+
+ EOR x0, x0, x0
+ EOR x1, x1, x1
+ EOR x2, x2, x2
+ EOR x3, x3, x3
+ EOR y0, y0, y0
+ EOR y1, y1, y1
+ EOR y2, y2, y2
+ EOR y3, y3, y3
+
+ MOVD $0, t2
+
+loop_select:
+ ADD $1, t2
+ CMP t0, t2
+ LDP.P 16(t1), (acc0, acc1)
+ CSEL EQ, acc0, x0, x0
+ CSEL EQ, acc1, x1, x1
+ LDP.P 16(t1), (acc2, acc3)
+ CSEL EQ, acc2, x2, x2
+ CSEL EQ, acc3, x3, x3
+ LDP.P 16(t1), (acc4, acc5)
+ CSEL EQ, acc4, y0, y0
+ CSEL EQ, acc5, y1, y1
+ LDP.P 16(t1), (acc6, acc7)
+ CSEL EQ, acc6, y2, y2
+ CSEL EQ, acc7, y3, y3
+
+ CMP $32, t2
+ BNE loop_select
+
+ STP (x0, x1), 0*16(res_ptr)
+ STP (x2, x3), 1*16(res_ptr)
+ STP (y0, y1), 2*16(res_ptr)
+ STP (y2, y3), 3*16(res_ptr)
+ RET
+/* ---------------------------------------*/
+// func p256OrdSqr(res, in *p256OrdElement, n int)
+TEXT ·p256OrdSqr(SB),NOSPLIT,$0
+ MOVD in+8(FP), a_ptr
+ MOVD n+16(FP), b_ptr
+
+ MOVD p256ordK0<>(SB), hlp1
+ LDP p256ord<>+0x00(SB), (const0, const1)
+ LDP p256ord<>+0x10(SB), (const2, const3)
+
+ LDP 0*16(a_ptr), (x0, x1)
+ LDP 1*16(a_ptr), (x2, x3)
+
+ordSqrLoop:
+ SUB $1, b_ptr
+
+ // x[1:] * x[0]
+ MUL x0, x1, acc1
+ UMULH x0, x1, acc2
+
+ MUL x0, x2, t0
+ ADDS t0, acc2, acc2
+ UMULH x0, x2, acc3
+
+ MUL x0, x3, t0
+ ADCS t0, acc3, acc3
+ UMULH x0, x3, acc4
+ ADC $0, acc4, acc4
+ // x[2:] * x[1]
+ MUL x1, x2, t0
+ ADDS t0, acc3
+ UMULH x1, x2, t1
+ ADCS t1, acc4
+ ADC $0, ZR, acc5
+
+ MUL x1, x3, t0
+ ADDS t0, acc4
+ UMULH x1, x3, t1
+ ADC t1, acc5
+ // x[3] * x[2]
+ MUL x2, x3, t0
+ ADDS t0, acc5
+ UMULH x2, x3, acc6
+ ADC $0, acc6
+
+ MOVD $0, acc7
+ // *2
+ ADDS acc1, acc1
+ ADCS acc2, acc2
+ ADCS acc3, acc3
+ ADCS acc4, acc4
+ ADCS acc5, acc5
+ ADCS acc6, acc6
+ ADC $0, acc7
+ // Missing products
+ MUL x0, x0, acc0
+ UMULH x0, x0, t0
+ ADDS t0, acc1, acc1
+
+ MUL x1, x1, t0
+ ADCS t0, acc2, acc2
+ UMULH x1, x1, t1
+ ADCS t1, acc3, acc3
+
+ MUL x2, x2, t0
+ ADCS t0, acc4, acc4
+ UMULH x2, x2, t1
+ ADCS t1, acc5, acc5
+
+ MUL x3, x3, t0
+ ADCS t0, acc6, acc6
+ UMULH x3, x3, t1
+ ADC t1, acc7, acc7
+ // First reduction step
+ MUL acc0, hlp1, hlp0
+
+ MUL const0, hlp1, t0
+ ADDS t0, acc0, acc0
+ UMULH const0, hlp0, t1
+
+ MUL const1, hlp0, t0
+ ADCS t0, acc1, acc1
+ UMULH const1, hlp0, y0
+
+ MUL const2, hlp0, t0
+ ADCS t0, acc2, acc2
+ UMULH const2, hlp0, acc0
+
+ MUL const3, hlp0, t0
+ ADCS t0, acc3, acc3
+
+ UMULH const3, hlp0, hlp0
+ ADC $0, hlp0
+
+ ADDS t1, acc1, acc1
+ ADCS y0, acc2, acc2
+ ADCS acc0, acc3, acc3
+ ADC $0, hlp0, acc0
+ // Second reduction step
+ MUL acc1, hlp1, hlp0
+
+ MUL const0, hlp1, t0
+ ADDS t0, acc1, acc1
+ UMULH const0, hlp0, t1
+
+ MUL const1, hlp0, t0
+ ADCS t0, acc2, acc2
+ UMULH const1, hlp0, y0
+
+ MUL const2, hlp0, t0
+ ADCS t0, acc3, acc3
+ UMULH const2, hlp0, acc1
+
+ MUL const3, hlp0, t0
+ ADCS t0, acc0, acc0
+
+ UMULH const3, hlp0, hlp0
+ ADC $0, hlp0
+
+ ADDS t1, acc2, acc2
+ ADCS y0, acc3, acc3
+ ADCS acc1, acc0, acc0
+ ADC $0, hlp0, acc1
+ // Third reduction step
+ MUL acc2, hlp1, hlp0
+
+ MUL const0, hlp1, t0
+ ADDS t0, acc2, acc2
+ UMULH const0, hlp0, t1
+
+ MUL const1, hlp0, t0
+ ADCS t0, acc3, acc3
+ UMULH const1, hlp0, y0
+
+ MUL const2, hlp0, t0
+ ADCS t0, acc0, acc0
+ UMULH const2, hlp0, acc2
+
+ MUL const3, hlp0, t0
+ ADCS t0, acc1, acc1
+
+ UMULH const3, hlp0, hlp0
+ ADC $0, hlp0
+
+ ADDS t1, acc3, acc3
+ ADCS y0, acc0, acc0
+ ADCS acc2, acc1, acc1
+ ADC $0, hlp0, acc2
+
+ // Last reduction step
+ MUL acc3, hlp1, hlp0
+
+ MUL const0, hlp1, t0
+ ADDS t0, acc3, acc3
+ UMULH const0, hlp0, t1
+
+ MUL const1, hlp0, t0
+ ADCS t0, acc0, acc0
+ UMULH const1, hlp0, y0
+
+ MUL const2, hlp0, t0
+ ADCS t0, acc1, acc1
+ UMULH const2, hlp0, acc3
+
+ MUL const3, hlp0, t0
+ ADCS t0, acc2, acc2
+
+ UMULH const3, hlp0, hlp0
+ ADC $0, acc7
+
+ ADDS t1, acc0, acc0
+ ADCS y0, acc1, acc1
+ ADCS acc3, acc2, acc2
+ ADC $0, hlp0, acc3
+
+ ADDS acc4, acc0, acc0
+ ADCS acc5, acc1, acc1
+ ADCS acc6, acc2, acc2
+ ADCS acc7, acc3, acc3
+ ADC $0, ZR, acc4
+
+ SUBS const0, acc0, y0
+ SBCS const1, acc1, y1
+ SBCS const2, acc2, y2
+ SBCS const3, acc3, y3
+ SBCS $0, acc4, acc4
+
+ CSEL CS, y0, acc0, x0
+ CSEL CS, y1, acc1, x1
+ CSEL CS, y2, acc2, x2
+ CSEL CS, y3, acc3, x3
+
+ CBNZ b_ptr, ordSqrLoop
+
+ MOVD res+0(FP), res_ptr
+ STP (x0, x1), 0*16(res_ptr)
+ STP (x2, x3), 1*16(res_ptr)
+
+ RET
+/* ---------------------------------------*/
+// func p256OrdMul(res, in1, in2 *p256OrdElement)
+TEXT ·p256OrdMul(SB),NOSPLIT,$0
+ MOVD in1+8(FP), a_ptr
+ MOVD in2+16(FP), b_ptr
+
+ MOVD p256ordK0<>(SB), hlp1
+ LDP p256ord<>+0x00(SB), (const0, const1)
+ LDP p256ord<>+0x10(SB), (const2, const3)
+
+ LDP 0*16(a_ptr), (x0, x1)
+ LDP 1*16(a_ptr), (x2, x3)
+ LDP 0*16(b_ptr), (y0, y1)
+ LDP 1*16(b_ptr), (y2, y3)
+
+ // y[0] * x
+ MUL y0, x0, acc0
+ UMULH y0, x0, acc1
+
+ MUL y0, x1, t0
+ ADDS t0, acc1
+ UMULH y0, x1, acc2
+
+ MUL y0, x2, t0
+ ADCS t0, acc2
+ UMULH y0, x2, acc3
+
+ MUL y0, x3, t0
+ ADCS t0, acc3
+ UMULH y0, x3, acc4
+ ADC $0, acc4
+ // First reduction step
+ MUL acc0, hlp1, hlp0
+
+ MUL const0, hlp1, t0
+ ADDS t0, acc0, acc0
+ UMULH const0, hlp0, t1
+
+ MUL const1, hlp0, t0
+ ADCS t0, acc1, acc1
+ UMULH const1, hlp0, y0
+
+ MUL const2, hlp0, t0
+ ADCS t0, acc2, acc2
+ UMULH const2, hlp0, acc0
+
+ MUL const3, hlp0, t0
+ ADCS t0, acc3, acc3
+
+ UMULH const3, hlp0, hlp0
+ ADC $0, acc4
+
+ ADDS t1, acc1, acc1
+ ADCS y0, acc2, acc2
+ ADCS acc0, acc3, acc3
+ ADC $0, hlp0, acc0
+ // y[1] * x
+ MUL y1, x0, t0
+ ADDS t0, acc1
+ UMULH y1, x0, t1
+
+ MUL y1, x1, t0
+ ADCS t0, acc2
+ UMULH y1, x1, hlp0
+
+ MUL y1, x2, t0
+ ADCS t0, acc3
+ UMULH y1, x2, y0
+
+ MUL y1, x3, t0
+ ADCS t0, acc4
+ UMULH y1, x3, y1
+ ADC $0, ZR, acc5
+
+ ADDS t1, acc2
+ ADCS hlp0, acc3
+ ADCS y0, acc4
+ ADC y1, acc5
+ // Second reduction step
+ MUL acc1, hlp1, hlp0
+
+ MUL const0, hlp1, t0
+ ADDS t0, acc1, acc1
+ UMULH const0, hlp0, t1
+
+ MUL const1, hlp0, t0
+ ADCS t0, acc2, acc2
+ UMULH const1, hlp0, y0
+
+ MUL const2, hlp0, t0
+ ADCS t0, acc3, acc3
+ UMULH const2, hlp0, acc1
+
+ MUL const3, hlp0, t0
+ ADCS t0, acc0, acc0
+
+ UMULH const3, hlp0, hlp0
+ ADC $0, acc5
+
+ ADDS t1, acc2, acc2
+ ADCS y0, acc3, acc3
+ ADCS acc1, acc0, acc0
+ ADC $0, hlp0, acc1
+ // y[2] * x
+ MUL y2, x0, t0
+ ADDS t0, acc2
+ UMULH y2, x0, t1
+
+ MUL y2, x1, t0
+ ADCS t0, acc3
+ UMULH y2, x1, hlp0
+
+ MUL y2, x2, t0
+ ADCS t0, acc4
+ UMULH y2, x2, y0
+
+ MUL y2, x3, t0
+ ADCS t0, acc5
+ UMULH y2, x3, y1
+ ADC $0, ZR, acc6
+
+ ADDS t1, acc3
+ ADCS hlp0, acc4
+ ADCS y0, acc5
+ ADC y1, acc6
+ // Third reduction step
+ MUL acc2, hlp1, hlp0
+
+ MUL const0, hlp1, t0
+ ADDS t0, acc2, acc2
+ UMULH const0, hlp0, t1
+
+ MUL const1, hlp0, t0
+ ADCS t0, acc3, acc3
+ UMULH const1, hlp0, y0
+
+ MUL const2, hlp0, t0
+ ADCS t0, acc0, acc0
+ UMULH const2, hlp0, acc2
+
+ MUL const3, hlp0, t0
+ ADCS t0, acc1, acc1
+
+ UMULH const3, hlp0, hlp0
+ ADC $0, acc6
+
+ ADDS t1, acc3, acc3
+ ADCS y0, acc0, acc0
+ ADCS acc2, acc1, acc1
+ ADC $0, hlp0, acc2
+ // y[3] * x
+ MUL y3, x0, t0
+ ADDS t0, acc3
+ UMULH y3, x0, t1
+
+ MUL y3, x1, t0
+ ADCS t0, acc4
+ UMULH y3, x1, hlp0
+
+ MUL y3, x2, t0
+ ADCS t0, acc5
+ UMULH y3, x2, y0
+
+ MUL y3, x3, t0
+ ADCS t0, acc6
+ UMULH y3, x3, y1
+ ADC $0, ZR, acc7
+
+ ADDS t1, acc4
+ ADCS hlp0, acc5
+ ADCS y0, acc6
+ ADC y1, acc7
+ // Last reduction step
+ MUL acc3, hlp1, hlp0
+
+ MUL const0, hlp1, t0
+ ADDS t0, acc3, acc3
+ UMULH const0, hlp0, t1
+
+ MUL const1, hlp0, t0
+ ADCS t0, acc0, acc0
+ UMULH const1, hlp0, y0
+
+ MUL const2, hlp0, t0
+ ADCS t0, acc1, acc1
+ UMULH const2, hlp0, acc3
+
+ MUL const3, hlp0, t0
+ ADCS t0, acc2, acc2
+
+ UMULH const3, hlp0, hlp0
+ ADC $0, acc7
+
+ ADDS t1, acc0, acc0
+ ADCS y0, acc1, acc1
+ ADCS acc3, acc2, acc2
+ ADC $0, hlp0, acc3
+
+ ADDS acc4, acc0, acc0
+ ADCS acc5, acc1, acc1
+ ADCS acc6, acc2, acc2
+ ADCS acc7, acc3, acc3
+ ADC $0, ZR, acc4
+
+ SUBS const0, acc0, t0
+ SBCS const1, acc1, t1
+ SBCS const2, acc2, t2
+ SBCS const3, acc3, t3
+ SBCS $0, acc4, acc4
+
+ CSEL CS, t0, acc0, acc0
+ CSEL CS, t1, acc1, acc1
+ CSEL CS, t2, acc2, acc2
+ CSEL CS, t3, acc3, acc3
+
+ MOVD res+0(FP), res_ptr
+ STP (acc0, acc1), 0*16(res_ptr)
+ STP (acc2, acc3), 1*16(res_ptr)
+
+ RET
+/* ---------------------------------------*/
+TEXT p256SubInternal<>(SB),NOSPLIT,$0
+ SUBS x0, y0, acc0
+ SBCS x1, y1, acc1
+ SBCS x2, y2, acc2
+ SBCS x3, y3, acc3
+ SBC $0, ZR, t0
+
+ ADDS $-1, acc0, acc4
+ ADCS const0, acc1, acc5
+ ADCS $0, acc2, acc6
+ ADC const1, acc3, acc7
+
+ ANDS $1, t0
+ CSEL EQ, acc0, acc4, x0
+ CSEL EQ, acc1, acc5, x1
+ CSEL EQ, acc2, acc6, x2
+ CSEL EQ, acc3, acc7, x3
+
+ RET
+/* ---------------------------------------*/
+TEXT p256SqrInternal<>(SB),NOSPLIT,$0
+ // x[1:] * x[0]
+ MUL x0, x1, acc1
+ UMULH x0, x1, acc2
+
+ MUL x0, x2, t0
+ ADDS t0, acc2, acc2
+ UMULH x0, x2, acc3
+
+ MUL x0, x3, t0
+ ADCS t0, acc3, acc3
+ UMULH x0, x3, acc4
+ ADC $0, acc4, acc4
+ // x[2:] * x[1]
+ MUL x1, x2, t0
+ ADDS t0, acc3
+ UMULH x1, x2, t1
+ ADCS t1, acc4
+ ADC $0, ZR, acc5
+
+ MUL x1, x3, t0
+ ADDS t0, acc4
+ UMULH x1, x3, t1
+ ADC t1, acc5
+ // x[3] * x[2]
+ MUL x2, x3, t0
+ ADDS t0, acc5
+ UMULH x2, x3, acc6
+ ADC $0, acc6
+
+ MOVD $0, acc7
+ // *2
+ ADDS acc1, acc1
+ ADCS acc2, acc2
+ ADCS acc3, acc3
+ ADCS acc4, acc4
+ ADCS acc5, acc5
+ ADCS acc6, acc6
+ ADC $0, acc7
+ // Missing products
+ MUL x0, x0, acc0
+ UMULH x0, x0, t0
+ ADDS t0, acc1, acc1
+
+ MUL x1, x1, t0
+ ADCS t0, acc2, acc2
+ UMULH x1, x1, t1
+ ADCS t1, acc3, acc3
+
+ MUL x2, x2, t0
+ ADCS t0, acc4, acc4
+ UMULH x2, x2, t1
+ ADCS t1, acc5, acc5
+
+ MUL x3, x3, t0
+ ADCS t0, acc6, acc6
+ UMULH x3, x3, t1
+ ADCS t1, acc7, acc7
+ // First reduction step
+ ADDS acc0<<32, acc1, acc1
+ LSR $32, acc0, t0
+ MUL acc0, const1, t1
+ UMULH acc0, const1, acc0
+ ADCS t0, acc2, acc2
+ ADCS t1, acc3, acc3
+ ADC $0, acc0, acc0
+ // Second reduction step
+ ADDS acc1<<32, acc2, acc2
+ LSR $32, acc1, t0
+ MUL acc1, const1, t1
+ UMULH acc1, const1, acc1
+ ADCS t0, acc3, acc3
+ ADCS t1, acc0, acc0
+ ADC $0, acc1, acc1
+ // Third reduction step
+ ADDS acc2<<32, acc3, acc3
+ LSR $32, acc2, t0
+ MUL acc2, const1, t1
+ UMULH acc2, const1, acc2
+ ADCS t0, acc0, acc0
+ ADCS t1, acc1, acc1
+ ADC $0, acc2, acc2
+ // Last reduction step
+ ADDS acc3<<32, acc0, acc0
+ LSR $32, acc3, t0
+ MUL acc3, const1, t1
+ UMULH acc3, const1, acc3
+ ADCS t0, acc1, acc1
+ ADCS t1, acc2, acc2
+ ADC $0, acc3, acc3
+ // Add bits [511:256] of the sqr result
+ ADDS acc4, acc0, acc0
+ ADCS acc5, acc1, acc1
+ ADCS acc6, acc2, acc2
+ ADCS acc7, acc3, acc3
+ ADC $0, ZR, acc4
+
+ SUBS $-1, acc0, t0
+ SBCS const0, acc1, t1
+ SBCS $0, acc2, t2
+ SBCS const1, acc3, t3
+ SBCS $0, acc4, acc4
+
+ CSEL CS, t0, acc0, y0
+ CSEL CS, t1, acc1, y1
+ CSEL CS, t2, acc2, y2
+ CSEL CS, t3, acc3, y3
+ RET
+/* ---------------------------------------*/
+TEXT p256MulInternal<>(SB),NOSPLIT,$0
+ // y[0] * x
+ MUL y0, x0, acc0
+ UMULH y0, x0, acc1
+
+ MUL y0, x1, t0
+ ADDS t0, acc1
+ UMULH y0, x1, acc2
+
+ MUL y0, x2, t0
+ ADCS t0, acc2
+ UMULH y0, x2, acc3
+
+ MUL y0, x3, t0
+ ADCS t0, acc3
+ UMULH y0, x3, acc4
+ ADC $0, acc4
+ // First reduction step
+ ADDS acc0<<32, acc1, acc1
+ LSR $32, acc0, t0
+ MUL acc0, const1, t1
+ UMULH acc0, const1, acc0
+ ADCS t0, acc2
+ ADCS t1, acc3
+ ADC $0, acc0
+ // y[1] * x
+ MUL y1, x0, t0
+ ADDS t0, acc1
+ UMULH y1, x0, t1
+
+ MUL y1, x1, t0
+ ADCS t0, acc2
+ UMULH y1, x1, t2
+
+ MUL y1, x2, t0
+ ADCS t0, acc3
+ UMULH y1, x2, t3
+
+ MUL y1, x3, t0
+ ADCS t0, acc4
+ UMULH y1, x3, hlp0
+ ADC $0, ZR, acc5
+
+ ADDS t1, acc2
+ ADCS t2, acc3
+ ADCS t3, acc4
+ ADC hlp0, acc5
+ // Second reduction step
+ ADDS acc1<<32, acc2, acc2
+ LSR $32, acc1, t0
+ MUL acc1, const1, t1
+ UMULH acc1, const1, acc1
+ ADCS t0, acc3
+ ADCS t1, acc0
+ ADC $0, acc1
+ // y[2] * x
+ MUL y2, x0, t0
+ ADDS t0, acc2
+ UMULH y2, x0, t1
+
+ MUL y2, x1, t0
+ ADCS t0, acc3
+ UMULH y2, x1, t2
+
+ MUL y2, x2, t0
+ ADCS t0, acc4
+ UMULH y2, x2, t3
+
+ MUL y2, x3, t0
+ ADCS t0, acc5
+ UMULH y2, x3, hlp0
+ ADC $0, ZR, acc6
+
+ ADDS t1, acc3
+ ADCS t2, acc4
+ ADCS t3, acc5
+ ADC hlp0, acc6
+ // Third reduction step
+ ADDS acc2<<32, acc3, acc3
+ LSR $32, acc2, t0
+ MUL acc2, const1, t1
+ UMULH acc2, const1, acc2
+ ADCS t0, acc0
+ ADCS t1, acc1
+ ADC $0, acc2
+ // y[3] * x
+ MUL y3, x0, t0
+ ADDS t0, acc3
+ UMULH y3, x0, t1
+
+ MUL y3, x1, t0
+ ADCS t0, acc4
+ UMULH y3, x1, t2
+
+ MUL y3, x2, t0
+ ADCS t0, acc5
+ UMULH y3, x2, t3
+
+ MUL y3, x3, t0
+ ADCS t0, acc6
+ UMULH y3, x3, hlp0
+ ADC $0, ZR, acc7
+
+ ADDS t1, acc4
+ ADCS t2, acc5
+ ADCS t3, acc6
+ ADC hlp0, acc7
+ // Last reduction step
+ ADDS acc3<<32, acc0, acc0
+ LSR $32, acc3, t0
+ MUL acc3, const1, t1
+ UMULH acc3, const1, acc3
+ ADCS t0, acc1
+ ADCS t1, acc2
+ ADC $0, acc3
+ // Add bits [511:256] of the mul result
+ ADDS acc4, acc0, acc0
+ ADCS acc5, acc1, acc1
+ ADCS acc6, acc2, acc2
+ ADCS acc7, acc3, acc3
+ ADC $0, ZR, acc4
+
+ SUBS $-1, acc0, t0
+ SBCS const0, acc1, t1
+ SBCS $0, acc2, t2
+ SBCS const1, acc3, t3
+ SBCS $0, acc4, acc4
+
+ CSEL CS, t0, acc0, y0
+ CSEL CS, t1, acc1, y1
+ CSEL CS, t2, acc2, y2
+ CSEL CS, t3, acc3, y3
+ RET
+/* ---------------------------------------*/
+#define p256MulBy2Inline \
+ ADDS y0, y0, x0; \
+ ADCS y1, y1, x1; \
+ ADCS y2, y2, x2; \
+ ADCS y3, y3, x3; \
+ ADC $0, ZR, hlp0; \
+ SUBS $-1, x0, t0; \
+ SBCS const0, x1, t1;\
+ SBCS $0, x2, t2; \
+ SBCS const1, x3, t3;\
+ SBCS $0, hlp0, hlp0;\
+ CSEL CC, x0, t0, x0;\
+ CSEL CC, x1, t1, x1;\
+ CSEL CC, x2, t2, x2;\
+ CSEL CC, x3, t3, x3;
+/* ---------------------------------------*/
+#define x1in(off) (off)(a_ptr)
+#define y1in(off) (off + 32)(a_ptr)
+#define z1in(off) (off + 64)(a_ptr)
+#define x2in(off) (off)(b_ptr)
+#define z2in(off) (off + 64)(b_ptr)
+#define x3out(off) (off)(res_ptr)
+#define y3out(off) (off + 32)(res_ptr)
+#define z3out(off) (off + 64)(res_ptr)
+#define LDx(src) LDP src(0), (x0, x1); LDP src(16), (x2, x3)
+#define LDy(src) LDP src(0), (y0, y1); LDP src(16), (y2, y3)
+#define STx(src) STP (x0, x1), src(0); STP (x2, x3), src(16)
+#define STy(src) STP (y0, y1), src(0); STP (y2, y3), src(16)
+/* ---------------------------------------*/
+#define y2in(off) (32*0 + 8 + off)(RSP)
+#define s2(off) (32*1 + 8 + off)(RSP)
+#define z1sqr(off) (32*2 + 8 + off)(RSP)
+#define h(off) (32*3 + 8 + off)(RSP)
+#define r(off) (32*4 + 8 + off)(RSP)
+#define hsqr(off) (32*5 + 8 + off)(RSP)
+#define rsqr(off) (32*6 + 8 + off)(RSP)
+#define hcub(off) (32*7 + 8 + off)(RSP)
+
+#define z2sqr(off) (32*8 + 8 + off)(RSP)
+#define s1(off) (32*9 + 8 + off)(RSP)
+#define u1(off) (32*10 + 8 + off)(RSP)
+#define u2(off) (32*11 + 8 + off)(RSP)
+
+// func p256PointAddAffineAsm(res, in1 *P256Point, in2 *p256AffinePoint, sign, sel, zero int)
+TEXT ·p256PointAddAffineAsm(SB),0,$264-48
+ MOVD in1+8(FP), a_ptr
+ MOVD in2+16(FP), b_ptr
+ MOVD sign+24(FP), hlp0
+ MOVD sel+32(FP), hlp1
+ MOVD zero+40(FP), t2
+
+ MOVD $1, t0
+ CMP $0, t2
+ CSEL EQ, ZR, t0, t2
+ CMP $0, hlp1
+ CSEL EQ, ZR, t0, hlp1
+
+ MOVD p256const0<>(SB), const0
+ MOVD p256const1<>(SB), const1
+ EOR t2<<1, hlp1
+
+ // Negate y2in based on sign
+ LDP 2*16(b_ptr), (y0, y1)
+ LDP 3*16(b_ptr), (y2, y3)
+ MOVD $-1, acc0
+
+ SUBS y0, acc0, acc0
+ SBCS y1, const0, acc1
+ SBCS y2, ZR, acc2
+ SBCS y3, const1, acc3
+ SBC $0, ZR, t0
+
+ ADDS $-1, acc0, acc4
+ ADCS const0, acc1, acc5
+ ADCS $0, acc2, acc6
+ ADCS const1, acc3, acc7
+ ADC $0, t0, t0
+
+ CMP $0, t0
+ CSEL EQ, acc4, acc0, acc0
+ CSEL EQ, acc5, acc1, acc1
+ CSEL EQ, acc6, acc2, acc2
+ CSEL EQ, acc7, acc3, acc3
+ // If condition is 0, keep original value
+ CMP $0, hlp0
+ CSEL EQ, y0, acc0, y0
+ CSEL EQ, y1, acc1, y1
+ CSEL EQ, y2, acc2, y2
+ CSEL EQ, y3, acc3, y3
+ // Store result
+ STy(y2in)
+ // Begin point add
+ LDx(z1in)
+ CALL p256SqrInternal<>(SB) // z1ˆ2
+ STy(z1sqr)
+
+ LDx(x2in)
+ CALL p256MulInternal<>(SB) // x2 * z1ˆ2
+
+ LDx(x1in)
+ CALL p256SubInternal<>(SB) // h = u2 - u1
+ STx(h)
+
+ LDy(z1in)
+ CALL p256MulInternal<>(SB) // z3 = h * z1
+
+ LDP 4*16(a_ptr), (acc0, acc1)// iff select[0] == 0, z3 = z1
+ LDP 5*16(a_ptr), (acc2, acc3)
+ ANDS $1, hlp1, ZR
+ CSEL EQ, acc0, y0, y0
+ CSEL EQ, acc1, y1, y1
+ CSEL EQ, acc2, y2, y2
+ CSEL EQ, acc3, y3, y3
+ LDP p256one<>+0x00(SB), (acc0, acc1)
+ LDP p256one<>+0x10(SB), (acc2, acc3)
+ ANDS $2, hlp1, ZR // iff select[1] == 0, z3 = 1
+ CSEL EQ, acc0, y0, y0
+ CSEL EQ, acc1, y1, y1
+ CSEL EQ, acc2, y2, y2
+ CSEL EQ, acc3, y3, y3
+ LDx(z1in)
+ MOVD res+0(FP), t0
+ STP (y0, y1), 4*16(t0)
+ STP (y2, y3), 5*16(t0)
+
+ LDy(z1sqr)
+ CALL p256MulInternal<>(SB) // z1 ^ 3
+
+ LDx(y2in)
+ CALL p256MulInternal<>(SB) // s2 = y2 * z1ˆ3
+ STy(s2)
+
+ LDx(y1in)
+ CALL p256SubInternal<>(SB) // r = s2 - s1
+ STx(r)
+
+ CALL p256SqrInternal<>(SB) // rsqr = rˆ2
+ STy (rsqr)
+
+ LDx(h)
+ CALL p256SqrInternal<>(SB) // hsqr = hˆ2
+ STy(hsqr)
+
+ CALL p256MulInternal<>(SB) // hcub = hˆ3
+ STy(hcub)
+
+ LDx(y1in)
+ CALL p256MulInternal<>(SB) // y1 * hˆ3
+ STy(s2)
+
+ LDP hsqr(0*8), (x0, x1)
+ LDP hsqr(2*8), (x2, x3)
+ LDP 0*16(a_ptr), (y0, y1)
+ LDP 1*16(a_ptr), (y2, y3)
+ CALL p256MulInternal<>(SB) // u1 * hˆ2
+ STP (y0, y1), h(0*8)
+ STP (y2, y3), h(2*8)
+
+ p256MulBy2Inline // u1 * hˆ2 * 2, inline
+
+ LDy(rsqr)
+ CALL p256SubInternal<>(SB) // rˆ2 - u1 * hˆ2 * 2
+
+ MOVD x0, y0
+ MOVD x1, y1
+ MOVD x2, y2
+ MOVD x3, y3
+ LDx(hcub)
+ CALL p256SubInternal<>(SB)
+
+ LDP 0*16(a_ptr), (acc0, acc1)
+ LDP 1*16(a_ptr), (acc2, acc3)
+ ANDS $1, hlp1, ZR // iff select[0] == 0, x3 = x1
+ CSEL EQ, acc0, x0, x0
+ CSEL EQ, acc1, x1, x1
+ CSEL EQ, acc2, x2, x2
+ CSEL EQ, acc3, x3, x3
+ LDP 0*16(b_ptr), (acc0, acc1)
+ LDP 1*16(b_ptr), (acc2, acc3)
+ ANDS $2, hlp1, ZR // iff select[1] == 0, x3 = x2
+ CSEL EQ, acc0, x0, x0
+ CSEL EQ, acc1, x1, x1
+ CSEL EQ, acc2, x2, x2
+ CSEL EQ, acc3, x3, x3
+ MOVD res+0(FP), t0
+ STP (x0, x1), 0*16(t0)
+ STP (x2, x3), 1*16(t0)
+
+ LDP h(0*8), (y0, y1)
+ LDP h(2*8), (y2, y3)
+ CALL p256SubInternal<>(SB)
+
+ LDP r(0*8), (y0, y1)
+ LDP r(2*8), (y2, y3)
+ CALL p256MulInternal<>(SB)
+
+ LDP s2(0*8), (x0, x1)
+ LDP s2(2*8), (x2, x3)
+ CALL p256SubInternal<>(SB)
+ LDP 2*16(a_ptr), (acc0, acc1)
+ LDP 3*16(a_ptr), (acc2, acc3)
+ ANDS $1, hlp1, ZR // iff select[0] == 0, y3 = y1
+ CSEL EQ, acc0, x0, x0
+ CSEL EQ, acc1, x1, x1
+ CSEL EQ, acc2, x2, x2
+ CSEL EQ, acc3, x3, x3
+ LDP y2in(0*8), (acc0, acc1)
+ LDP y2in(2*8), (acc2, acc3)
+ ANDS $2, hlp1, ZR // iff select[1] == 0, y3 = y2
+ CSEL EQ, acc0, x0, x0
+ CSEL EQ, acc1, x1, x1
+ CSEL EQ, acc2, x2, x2
+ CSEL EQ, acc3, x3, x3
+ MOVD res+0(FP), t0
+ STP (x0, x1), 2*16(t0)
+ STP (x2, x3), 3*16(t0)
+
+ RET
+
+#define p256AddInline \
+ ADDS y0, x0, x0; \
+ ADCS y1, x1, x1; \
+ ADCS y2, x2, x2; \
+ ADCS y3, x3, x3; \
+ ADC $0, ZR, hlp0; \
+ SUBS $-1, x0, t0; \
+ SBCS const0, x1, t1;\
+ SBCS $0, x2, t2; \
+ SBCS const1, x3, t3;\
+ SBCS $0, hlp0, hlp0;\
+ CSEL CC, x0, t0, x0;\
+ CSEL CC, x1, t1, x1;\
+ CSEL CC, x2, t2, x2;\
+ CSEL CC, x3, t3, x3;
+
+#define s(off) (32*0 + 8 + off)(RSP)
+#define m(off) (32*1 + 8 + off)(RSP)
+#define zsqr(off) (32*2 + 8 + off)(RSP)
+#define tmp(off) (32*3 + 8 + off)(RSP)
+
+//func p256PointDoubleAsm(res, in *P256Point)
+TEXT ·p256PointDoubleAsm(SB),NOSPLIT,$136-16
+ MOVD res+0(FP), res_ptr
+ MOVD in+8(FP), a_ptr
+
+ MOVD p256const0<>(SB), const0
+ MOVD p256const1<>(SB), const1
+
+ // Begin point double
+ LDP 4*16(a_ptr), (x0, x1)
+ LDP 5*16(a_ptr), (x2, x3)
+ CALL p256SqrInternal<>(SB)
+ STP (y0, y1), zsqr(0*8)
+ STP (y2, y3), zsqr(2*8)
+
+ LDP 0*16(a_ptr), (x0, x1)
+ LDP 1*16(a_ptr), (x2, x3)
+ p256AddInline
+ STx(m)
+
+ LDx(z1in)
+ LDy(y1in)
+ CALL p256MulInternal<>(SB)
+ p256MulBy2Inline
+ STx(z3out)
+
+ LDy(x1in)
+ LDx(zsqr)
+ CALL p256SubInternal<>(SB)
+ LDy(m)
+ CALL p256MulInternal<>(SB)
+
+ // Multiply by 3
+ p256MulBy2Inline
+ p256AddInline
+ STx(m)
+
+ LDy(y1in)
+ p256MulBy2Inline
+ CALL p256SqrInternal<>(SB)
+ STy(s)
+ MOVD y0, x0
+ MOVD y1, x1
+ MOVD y2, x2
+ MOVD y3, x3
+ CALL p256SqrInternal<>(SB)
+
+ // Divide by 2
+ ADDS $-1, y0, t0
+ ADCS const0, y1, t1
+ ADCS $0, y2, t2
+ ADCS const1, y3, t3
+ ADC $0, ZR, hlp0
+
+ ANDS $1, y0, ZR
+ CSEL EQ, y0, t0, t0
+ CSEL EQ, y1, t1, t1
+ CSEL EQ, y2, t2, t2
+ CSEL EQ, y3, t3, t3
+ AND y0, hlp0, hlp0
+
+ EXTR $1, t0, t1, y0
+ EXTR $1, t1, t2, y1
+ EXTR $1, t2, t3, y2
+ EXTR $1, t3, hlp0, y3
+ STy(y3out)
+
+ LDx(x1in)
+ LDy(s)
+ CALL p256MulInternal<>(SB)
+ STy(s)
+ p256MulBy2Inline
+ STx(tmp)
+
+ LDx(m)
+ CALL p256SqrInternal<>(SB)
+ LDx(tmp)
+ CALL p256SubInternal<>(SB)
+
+ STx(x3out)
+
+ LDy(s)
+ CALL p256SubInternal<>(SB)
+
+ LDy(m)
+ CALL p256MulInternal<>(SB)
+
+ LDx(y3out)
+ CALL p256SubInternal<>(SB)
+ STx(y3out)
+ RET
+/* ---------------------------------------*/
+#undef y2in
+#undef x3out
+#undef y3out
+#undef z3out
+#define y2in(off) (off + 32)(b_ptr)
+#define x3out(off) (off)(b_ptr)
+#define y3out(off) (off + 32)(b_ptr)
+#define z3out(off) (off + 64)(b_ptr)
+// func p256PointAddAsm(res, in1, in2 *P256Point) int
+TEXT ·p256PointAddAsm(SB),0,$392-32
+ // See https://hyperelliptic.org/EFD/g1p/auto-shortw-jacobian-3.html#addition-add-2007-bl
+ // Move input to stack in order to free registers
+ MOVD in1+8(FP), a_ptr
+ MOVD in2+16(FP), b_ptr
+
+ MOVD p256const0<>(SB), const0
+ MOVD p256const1<>(SB), const1
+
+ // Begin point add
+ LDx(z2in)
+ CALL p256SqrInternal<>(SB) // z2^2
+ STy(z2sqr)
+
+ CALL p256MulInternal<>(SB) // z2^3
+
+ LDx(y1in)
+ CALL p256MulInternal<>(SB) // s1 = z2ˆ3*y1
+ STy(s1)
+
+ LDx(z1in)
+ CALL p256SqrInternal<>(SB) // z1^2
+ STy(z1sqr)
+
+ CALL p256MulInternal<>(SB) // z1^3
+
+ LDx(y2in)
+ CALL p256MulInternal<>(SB) // s2 = z1ˆ3*y2
+
+ LDx(s1)
+ CALL p256SubInternal<>(SB) // r = s2 - s1
+ STx(r)
+
+ MOVD $1, t2
+ ORR x0, x1, t0 // Check if zero mod p256
+ ORR x2, x3, t1
+ ORR t1, t0, t0
+ CMP $0, t0
+ CSEL EQ, t2, ZR, hlp1
+
+ EOR $-1, x0, t0
+ EOR const0, x1, t1
+ EOR const1, x3, t3
+
+ ORR t0, t1, t0
+ ORR x2, t3, t1
+ ORR t1, t0, t0
+ CMP $0, t0
+ CSEL EQ, t2, hlp1, hlp1
+
+ LDx(z2sqr)
+ LDy(x1in)
+ CALL p256MulInternal<>(SB) // u1 = x1 * z2ˆ2
+ STy(u1)
+
+ LDx(z1sqr)
+ LDy(x2in)
+ CALL p256MulInternal<>(SB) // u2 = x2 * z1ˆ2
+ STy(u2)
+
+ LDx(u1)
+ CALL p256SubInternal<>(SB) // h = u2 - u1
+ STx(h)
+
+ MOVD $1, t2
+ ORR x0, x1, t0 // Check if zero mod p256
+ ORR x2, x3, t1
+ ORR t1, t0, t0
+ CMP $0, t0
+ CSEL EQ, t2, ZR, hlp0
+
+ EOR $-1, x0, t0
+ EOR const0, x1, t1
+ EOR const1, x3, t3
+
+ ORR t0, t1, t0
+ ORR x2, t3, t1
+ ORR t1, t0, t0
+ CMP $0, t0
+ CSEL EQ, t2, hlp0, hlp0
+
+ AND hlp0, hlp1, hlp1
+
+ LDx(r)
+ CALL p256SqrInternal<>(SB) // rsqr = rˆ2
+ STy(rsqr)
+
+ LDx(h)
+ CALL p256SqrInternal<>(SB) // hsqr = hˆ2
+ STy(hsqr)
+
+ LDx(h)
+ CALL p256MulInternal<>(SB) // hcub = hˆ3
+ STy(hcub)
+
+ LDx(s1)
+ CALL p256MulInternal<>(SB)
+ STy(s2)
+
+ LDx(z1in)
+ LDy(z2in)
+ CALL p256MulInternal<>(SB) // z1 * z2
+ LDx(h)
+ CALL p256MulInternal<>(SB) // z1 * z2 * h
+ MOVD res+0(FP), b_ptr
+ STy(z3out)
+
+ LDx(hsqr)
+ LDy(u1)
+ CALL p256MulInternal<>(SB) // hˆ2 * u1
+ STy(u2)
+
+ p256MulBy2Inline // u1 * hˆ2 * 2, inline
+ LDy(rsqr)
+ CALL p256SubInternal<>(SB) // rˆ2 - u1 * hˆ2 * 2
+
+ MOVD x0, y0
+ MOVD x1, y1
+ MOVD x2, y2
+ MOVD x3, y3
+ LDx(hcub)
+ CALL p256SubInternal<>(SB)
+ STx(x3out)
+
+ LDy(u2)
+ CALL p256SubInternal<>(SB)
+
+ LDy(r)
+ CALL p256MulInternal<>(SB)
+
+ LDx(s2)
+ CALL p256SubInternal<>(SB)
+ STx(y3out)
+
+ MOVD hlp1, R0
+ MOVD R0, ret+24(FP)
+
+ RET
diff --git a/src/crypto/internal/nistec/p256_asm_ppc64le.s b/src/crypto/internal/nistec/p256_asm_ppc64le.s
new file mode 100644
index 0000000..0593ef3
--- /dev/null
+++ b/src/crypto/internal/nistec/p256_asm_ppc64le.s
@@ -0,0 +1,2208 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#include "textflag.h"
+
+// This is a port of the s390x asm implementation.
+// to ppc64le.
+
+// Some changes were needed due to differences in
+// the Go opcodes and/or available instructions
+// between s390x and ppc64le.
+
+// 1. There were operand order differences in the
+// VSUBUQM, VSUBCUQ, and VSEL instructions.
+
+// 2. ppc64 does not have a multiply high and low
+// like s390x, so those were implemented using
+// macros to compute the equivalent values.
+
+// 3. The LVX, STVX instructions on ppc64 require
+// 16 byte alignment of the data. To avoid that
+// requirement, data is loaded using LXVD2X and
+// STXVD2X with VPERM to reorder bytes correctly.
+
+// I have identified some areas where I believe
+// changes would be needed to make this work for big
+// endian; however additional changes beyond what I
+// have noted are most likely needed to make it work.
+// - The string used with VPERM to swap the byte order
+// for loads and stores.
+// - The constants that are loaded from CPOOL.
+//
+
+// The following constants are defined in an order
+// that is correct for use with LXVD2X/STXVD2X
+// on little endian.
+DATA p256<>+0x00(SB)/8, $0xffffffff00000001 // P256
+DATA p256<>+0x08(SB)/8, $0x0000000000000000 // P256
+DATA p256<>+0x10(SB)/8, $0x00000000ffffffff // P256
+DATA p256<>+0x18(SB)/8, $0xffffffffffffffff // P256
+DATA p256<>+0x20(SB)/8, $0x0c0d0e0f1c1d1e1f // SEL d1 d0 d1 d0
+DATA p256<>+0x28(SB)/8, $0x0c0d0e0f1c1d1e1f // SEL d1 d0 d1 d0
+DATA p256<>+0x30(SB)/8, $0x0000000010111213 // SEL 0 d1 d0 0
+DATA p256<>+0x38(SB)/8, $0x1415161700000000 // SEL 0 d1 d0 0
+DATA p256<>+0x40(SB)/8, $0x18191a1b1c1d1e1f // SEL d1 d0 d1 d0
+DATA p256<>+0x48(SB)/8, $0x18191a1b1c1d1e1f // SEL d1 d0 d1 d0
+DATA p256mul<>+0x00(SB)/8, $0x00000000ffffffff // P256 original
+DATA p256mul<>+0x08(SB)/8, $0xffffffffffffffff // P256
+DATA p256mul<>+0x10(SB)/8, $0xffffffff00000001 // P256 original
+DATA p256mul<>+0x18(SB)/8, $0x0000000000000000 // P256
+DATA p256mul<>+0x20(SB)/8, $0x1c1d1e1f00000000 // SEL d0 0 0 d0
+DATA p256mul<>+0x28(SB)/8, $0x000000001c1d1e1f // SEL d0 0 0 d0
+DATA p256mul<>+0x30(SB)/8, $0x0001020304050607 // SEL d0 0 d1 d0
+DATA p256mul<>+0x38(SB)/8, $0x1c1d1e1f0c0d0e0f // SEL d0 0 d1 d0
+DATA p256mul<>+0x40(SB)/8, $0x040506071c1d1e1f // SEL 0 d1 d0 d1
+DATA p256mul<>+0x48(SB)/8, $0x0c0d0e0f1c1d1e1f // SEL 0 d1 d0 d1
+DATA p256mul<>+0x50(SB)/8, $0x0405060704050607 // SEL 0 0 d1 d0
+DATA p256mul<>+0x58(SB)/8, $0x1c1d1e1f0c0d0e0f // SEL 0 0 d1 d0
+DATA p256mul<>+0x60(SB)/8, $0x0c0d0e0f1c1d1e1f // SEL d1 d0 d1 d0
+DATA p256mul<>+0x68(SB)/8, $0x0c0d0e0f1c1d1e1f // SEL d1 d0 d1 d0
+DATA p256mul<>+0x70(SB)/8, $0x141516170c0d0e0f // SEL 0 d1 d0 0
+DATA p256mul<>+0x78(SB)/8, $0x1c1d1e1f14151617 // SEL 0 d1 d0 0
+DATA p256mul<>+0x80(SB)/8, $0xffffffff00000000 // (1*2^256)%P256
+DATA p256mul<>+0x88(SB)/8, $0x0000000000000001 // (1*2^256)%P256
+DATA p256mul<>+0x90(SB)/8, $0x00000000fffffffe // (1*2^256)%P256
+DATA p256mul<>+0x98(SB)/8, $0xffffffffffffffff // (1*2^256)%P256
+
+// External declarations for constants
+GLOBL p256ord<>(SB), 8, $32
+GLOBL p256<>(SB), 8, $80
+GLOBL p256mul<>(SB), 8, $160
+
+// The following macros are used to implement the ppc64le
+// equivalent function from the corresponding s390x
+// instruction for vector multiply high, low, and add,
+// since there aren't exact equivalent instructions.
+// The corresponding s390x instructions appear in the
+// comments.
+// Implementation for big endian would have to be
+// investigated, I think it would be different.
+//
+//
+// Vector multiply word
+//
+// VMLF x0, x1, out_low
+// VMLHF x0, x1, out_hi
+#define VMULT(x1, x2, out_low, out_hi) \
+ VMULEUW x1, x2, TMP1; \
+ VMULOUW x1, x2, TMP2; \
+ VMRGEW TMP1, TMP2, out_hi; \
+ VMRGOW TMP1, TMP2, out_low
+
+//
+// Vector multiply add word
+//
+// VMALF x0, x1, y, out_low
+// VMALHF x0, x1, y, out_hi
+#define VMULT_ADD(x1, x2, y, one, out_low, out_hi) \
+ VMULEUW y, one, TMP2; \
+ VMULOUW y, one, TMP1; \
+ VMULEUW x1, x2, out_low; \
+ VMULOUW x1, x2, out_hi; \
+ VADDUDM TMP2, out_low, TMP2; \
+ VADDUDM TMP1, out_hi, TMP1; \
+ VMRGOW TMP2, TMP1, out_low; \
+ VMRGEW TMP2, TMP1, out_hi
+
+#define res_ptr R3
+#define a_ptr R4
+
+#undef res_ptr
+#undef a_ptr
+
+#define P1ptr R3
+#define CPOOL R7
+
+#define Y1L V0
+#define Y1H V1
+#define T1L V2
+#define T1H V3
+
+#define PL V30
+#define PH V31
+
+#define CAR1 V6
+// func p256NegCond(val *p256Point, cond int)
+TEXT ·p256NegCond(SB), NOSPLIT, $0-16
+ MOVD val+0(FP), P1ptr
+ MOVD $16, R16
+
+ MOVD cond+8(FP), R6
+ CMP $0, R6
+ BC 12, 2, LR // just return if cond == 0
+
+ MOVD $p256mul<>+0x00(SB), CPOOL
+
+ LXVD2X (P1ptr)(R0), Y1L
+ LXVD2X (P1ptr)(R16), Y1H
+
+ XXPERMDI Y1H, Y1H, $2, Y1H
+ XXPERMDI Y1L, Y1L, $2, Y1L
+
+ LXVD2X (CPOOL)(R0), PL
+ LXVD2X (CPOOL)(R16), PH
+
+ VSUBCUQ PL, Y1L, CAR1 // subtract part2 giving carry
+ VSUBUQM PL, Y1L, T1L // subtract part2 giving result
+ VSUBEUQM PH, Y1H, CAR1, T1H // subtract part1 using carry from part2
+
+ XXPERMDI T1H, T1H, $2, T1H
+ XXPERMDI T1L, T1L, $2, T1L
+
+ STXVD2X T1L, (R0+P1ptr)
+ STXVD2X T1H, (R16+P1ptr)
+ RET
+
+#undef P1ptr
+#undef CPOOL
+#undef Y1L
+#undef Y1H
+#undef T1L
+#undef T1H
+#undef PL
+#undef PH
+#undef CAR1
+
+#define P3ptr R3
+#define P1ptr R4
+#define P2ptr R5
+
+#define X1L V0
+#define X1H V1
+#define Y1L V2
+#define Y1H V3
+#define Z1L V4
+#define Z1H V5
+#define X2L V6
+#define X2H V7
+#define Y2L V8
+#define Y2H V9
+#define Z2L V10
+#define Z2H V11
+#define SEL V12
+#define ZER V13
+
+// This function uses LXVD2X and STXVD2X to avoid the
+// data alignment requirement for LVX, STVX. Since
+// this code is just moving bytes and not doing arithmetic,
+// order of the bytes doesn't matter.
+//
+// func p256MovCond(res, a, b *p256Point, cond int)
+TEXT ·p256MovCond(SB), NOSPLIT, $0-32
+ MOVD res+0(FP), P3ptr
+ MOVD a+8(FP), P1ptr
+ MOVD b+16(FP), P2ptr
+ MOVD $16, R16
+ MOVD $32, R17
+ MOVD $48, R18
+ MOVD $56, R21
+ MOVD $64, R19
+ MOVD $80, R20
+ // cond is R1 + 24 (cond offset) + 32
+ LXVDSX (R1)(R21), SEL
+ VSPLTISB $0, ZER
+ // SEL controls whether to store a or b
+ VCMPEQUD SEL, ZER, SEL
+
+ LXVD2X (P1ptr+R0), X1H
+ LXVD2X (P1ptr+R16), X1L
+ LXVD2X (P1ptr+R17), Y1H
+ LXVD2X (P1ptr+R18), Y1L
+ LXVD2X (P1ptr+R19), Z1H
+ LXVD2X (P1ptr+R20), Z1L
+
+ LXVD2X (P2ptr+R0), X2H
+ LXVD2X (P2ptr+R16), X2L
+ LXVD2X (P2ptr+R17), Y2H
+ LXVD2X (P2ptr+R18), Y2L
+ LXVD2X (P2ptr+R19), Z2H
+ LXVD2X (P2ptr+R20), Z2L
+
+ VSEL X1H, X2H, SEL, X1H
+ VSEL X1L, X2L, SEL, X1L
+ VSEL Y1H, Y2H, SEL, Y1H
+ VSEL Y1L, Y2L, SEL, Y1L
+ VSEL Z1H, Z2H, SEL, Z1H
+ VSEL Z1L, Z2L, SEL, Z1L
+
+ STXVD2X X1H, (P3ptr+R0)
+ STXVD2X X1L, (P3ptr+R16)
+ STXVD2X Y1H, (P3ptr+R17)
+ STXVD2X Y1L, (P3ptr+R18)
+ STXVD2X Z1H, (P3ptr+R19)
+ STXVD2X Z1L, (P3ptr+R20)
+
+ RET
+
+#undef P3ptr
+#undef P1ptr
+#undef P2ptr
+#undef X1L
+#undef X1H
+#undef Y1L
+#undef Y1H
+#undef Z1L
+#undef Z1H
+#undef X2L
+#undef X2H
+#undef Y2L
+#undef Y2H
+#undef Z2L
+#undef Z2H
+#undef SEL
+#undef ZER
+
+#define P3ptr R3
+#define P1ptr R4
+#define COUNT R5
+
+#define X1L V0
+#define X1H V1
+#define Y1L V2
+#define Y1H V3
+#define Z1L V4
+#define Z1H V5
+#define X2L V6
+#define X2H V7
+#define Y2L V8
+#define Y2H V9
+#define Z2L V10
+#define Z2H V11
+
+#define ONE V18
+#define IDX V19
+#define SEL1 V20
+#define SEL2 V21
+// func p256Select(point *p256Point, table *p256Table, idx int)
+TEXT ·p256Select(SB), NOSPLIT, $0-24
+ MOVD res+0(FP), P3ptr
+ MOVD table+8(FP), P1ptr
+ MOVD $16, R16
+ MOVD $32, R17
+ MOVD $48, R18
+ MOVD $64, R19
+ MOVD $80, R20
+
+ LXVDSX (R1)(R18), SEL1 // VLREPG idx+32(FP), SEL1
+ VSPLTB $7, SEL1, IDX // splat byte
+ VSPLTISB $1, ONE // VREPIB $1, ONE
+ VSPLTISB $1, SEL2 // VREPIB $1, SEL2
+ MOVD $17, COUNT
+ MOVD COUNT, CTR // set up ctr
+
+ VSPLTISB $0, X1H // VZERO X1H
+ VSPLTISB $0, X1L // VZERO X1L
+ VSPLTISB $0, Y1H // VZERO Y1H
+ VSPLTISB $0, Y1L // VZERO Y1L
+ VSPLTISB $0, Z1H // VZERO Z1H
+ VSPLTISB $0, Z1L // VZERO Z1L
+
+loop_select:
+
+ // LVXD2X is used here since data alignment doesn't
+ // matter.
+
+ LXVD2X (P1ptr+R0), X2H
+ LXVD2X (P1ptr+R16), X2L
+ LXVD2X (P1ptr+R17), Y2H
+ LXVD2X (P1ptr+R18), Y2L
+ LXVD2X (P1ptr+R19), Z2H
+ LXVD2X (P1ptr+R20), Z2L
+
+ VCMPEQUD SEL2, IDX, SEL1 // VCEQG SEL2, IDX, SEL1 OK
+
+ // This will result in SEL1 being all 0s or 1s, meaning
+ // the result is either X1L or X2L, no individual byte
+ // selection.
+
+ VSEL X1L, X2L, SEL1, X1L
+ VSEL X1H, X2H, SEL1, X1H
+ VSEL Y1L, Y2L, SEL1, Y1L
+ VSEL Y1H, Y2H, SEL1, Y1H
+ VSEL Z1L, Z2L, SEL1, Z1L
+ VSEL Z1H, Z2H, SEL1, Z1H
+
+ // Add 1 to all bytes in SEL2
+ VADDUBM SEL2, ONE, SEL2 // VAB SEL2, ONE, SEL2 OK
+ ADD $96, P1ptr
+ BDNZ loop_select
+
+ // STXVD2X is used here so that alignment doesn't
+ // need to be verified. Since values were loaded
+ // using LXVD2X this is OK.
+ STXVD2X X1H, (P3ptr+R0)
+ STXVD2X X1L, (P3ptr+R16)
+ STXVD2X Y1H, (P3ptr+R17)
+ STXVD2X Y1L, (P3ptr+R18)
+ STXVD2X Z1H, (P3ptr+R19)
+ STXVD2X Z1L, (P3ptr+R20)
+ RET
+
+#undef P3ptr
+#undef P1ptr
+#undef COUNT
+#undef X1L
+#undef X1H
+#undef Y1L
+#undef Y1H
+#undef Z1L
+#undef Z1H
+#undef X2L
+#undef X2H
+#undef Y2L
+#undef Y2H
+#undef Z2L
+#undef Z2H
+#undef ONE
+#undef IDX
+#undef SEL1
+#undef SEL2
+
+// The following functions all reverse the byte order.
+
+//func p256BigToLittle(res *p256Element, in *[32]byte)
+TEXT ·p256BigToLittle(SB), NOSPLIT, $0-16
+ MOVD res+0(FP), R3
+ MOVD in+8(FP), R4
+ BR p256InternalEndianSwap<>(SB)
+
+//func p256LittleToBig(res *[32]byte, in *p256Element)
+TEXT ·p256LittleToBig(SB), NOSPLIT, $0-16
+ MOVD res+0(FP), R3
+ MOVD in+8(FP), R4
+ BR p256InternalEndianSwap<>(SB)
+
+//func p256OrdBigToLittle(res *p256OrdElement, in *[32]byte)
+TEXT ·p256OrdBigToLittle(SB), NOSPLIT, $0-16
+ MOVD res+0(FP), R3
+ MOVD in+8(FP), R4
+ BR p256InternalEndianSwap<>(SB)
+
+//func p256OrdLittleToBig(res *[32]byte, in *p256OrdElement)
+TEXT ·p256OrdLittleToBig(SB), NOSPLIT, $0-16
+ MOVD res+0(FP), R3
+ MOVD in+8(FP), R4
+ BR p256InternalEndianSwap<>(SB)
+
+TEXT p256InternalEndianSwap<>(SB), NOSPLIT, $0-0
+ // Index registers needed for BR movs
+ MOVD $8, R9
+ MOVD $16, R10
+ MOVD $24, R14
+
+ MOVDBR (R0)(R4), R5
+ MOVDBR (R9)(R4), R6
+ MOVDBR (R10)(R4), R7
+ MOVDBR (R14)(R4), R8
+
+ MOVD R8, 0(R3)
+ MOVD R7, 8(R3)
+ MOVD R6, 16(R3)
+ MOVD R5, 24(R3)
+
+ RET
+
+#define P3ptr R3
+#define P1ptr R4
+#define COUNT R5
+
+#define X1L V0
+#define X1H V1
+#define Y1L V2
+#define Y1H V3
+#define Z1L V4
+#define Z1H V5
+#define X2L V6
+#define X2H V7
+#define Y2L V8
+#define Y2H V9
+#define Z2L V10
+#define Z2H V11
+
+#define ONE V18
+#define IDX V19
+#define SEL1 V20
+#define SEL2 V21
+
+// func p256SelectAffine(res *p256AffinePoint, table *p256AffineTable, idx int)
+TEXT ·p256SelectAffine(SB), NOSPLIT, $0-24
+ MOVD res+0(FP), P3ptr
+ MOVD table+8(FP), P1ptr
+ MOVD $16, R16
+ MOVD $32, R17
+ MOVD $48, R18
+
+ LXVDSX (R1)(R18), SEL1
+ VSPLTB $7, SEL1, IDX // splat byte
+
+ VSPLTISB $1, ONE // Vector with byte 1s
+ VSPLTISB $1, SEL2 // Vector with byte 1s
+ MOVD $64, COUNT
+ MOVD COUNT, CTR // loop count
+
+ VSPLTISB $0, X1H // VZERO X1H
+ VSPLTISB $0, X1L // VZERO X1L
+ VSPLTISB $0, Y1H // VZERO Y1H
+ VSPLTISB $0, Y1L // VZERO Y1L
+
+loop_select:
+ LXVD2X (P1ptr+R0), X2H
+ LXVD2X (P1ptr+R16), X2L
+ LXVD2X (P1ptr+R17), Y2H
+ LXVD2X (P1ptr+R18), Y2L
+
+ VCMPEQUD SEL2, IDX, SEL1 // Compare against idx
+
+ VSEL X1L, X2L, SEL1, X1L // Select if idx matched
+ VSEL X1H, X2H, SEL1, X1H
+ VSEL Y1L, Y2L, SEL1, Y1L
+ VSEL Y1H, Y2H, SEL1, Y1H
+
+ VADDUBM SEL2, ONE, SEL2 // Increment SEL2 bytes by 1
+ ADD $64, P1ptr // Next chunk
+ BDNZ loop_select
+
+ STXVD2X X1H, (P3ptr+R0)
+ STXVD2X X1L, (P3ptr+R16)
+ STXVD2X Y1H, (P3ptr+R17)
+ STXVD2X Y1L, (P3ptr+R18)
+ RET
+
+#undef P3ptr
+#undef P1ptr
+#undef COUNT
+#undef X1L
+#undef X1H
+#undef Y1L
+#undef Y1H
+#undef Z1L
+#undef Z1H
+#undef X2L
+#undef X2H
+#undef Y2L
+#undef Y2H
+#undef Z2L
+#undef Z2H
+#undef ONE
+#undef IDX
+#undef SEL1
+#undef SEL2
+
+#define res_ptr R3
+#define x_ptr R4
+#define CPOOL R7
+
+#define T0 V0
+#define T1 V1
+#define T2 V2
+#define TT0 V3
+#define TT1 V4
+
+#define ZER V6
+#define SEL1 V7
+#define SEL2 V8
+#define CAR1 V9
+#define CAR2 V10
+#define RED1 V11
+#define RED2 V12
+#define PL V13
+#define PH V14
+
+// func p256FromMont(res, in *p256Element)
+TEXT ·p256FromMont(SB), NOSPLIT, $0-16
+ MOVD res+0(FP), res_ptr
+ MOVD in+8(FP), x_ptr
+
+ MOVD $16, R16
+ MOVD $32, R17
+ MOVD $48, R18
+ MOVD $64, R19
+ MOVD $p256<>+0x00(SB), CPOOL
+
+ VSPLTISB $0, T2 // VZERO T2
+ VSPLTISB $0, ZER // VZERO ZER
+
+ // Constants are defined so that the LXVD2X is correct
+ LXVD2X (CPOOL+R0), PH
+ LXVD2X (CPOOL+R16), PL
+
+ // VPERM byte selections
+ LXVD2X (CPOOL+R18), SEL2
+ LXVD2X (CPOOL+R19), SEL1
+
+ LXVD2X (R16)(x_ptr), T1
+ LXVD2X (R0)(x_ptr), T0
+
+ // Put in true little endian order
+ XXPERMDI T0, T0, $2, T0
+ XXPERMDI T1, T1, $2, T1
+
+ // First round
+ VPERM T1, T0, SEL1, RED2 // d1 d0 d1 d0
+ VPERM ZER, RED2, SEL2, RED1 // 0 d1 d0 0
+ VSUBUQM RED2, RED1, RED2 // VSQ RED1, RED2, RED2 // Guaranteed not to underflow
+
+ VSLDOI $8, T1, T0, T0 // VSLDB $8, T1, T0, T0
+ VSLDOI $8, T2, T1, T1 // VSLDB $8, T2, T1, T1
+
+ VADDCUQ T0, RED1, CAR1 // VACCQ T0, RED1, CAR1
+ VADDUQM T0, RED1, T0 // VAQ T0, RED1, T0
+ VADDECUQ T1, RED2, CAR1, CAR2 // VACCCQ T1, RED2, CAR1, CAR2
+ VADDEUQM T1, RED2, CAR1, T1 // VACQ T1, RED2, CAR1, T1
+ VADDUQM T2, CAR2, T2 // VAQ T2, CAR2, T2
+
+ // Second round
+ VPERM T1, T0, SEL1, RED2 // d1 d0 d1 d0
+ VPERM ZER, RED2, SEL2, RED1 // 0 d1 d0 0
+ VSUBUQM RED2, RED1, RED2 // VSQ RED1, RED2, RED2 // Guaranteed not to underflow
+
+ VSLDOI $8, T1, T0, T0 // VSLDB $8, T1, T0, T0
+ VSLDOI $8, T2, T1, T1 // VSLDB $8, T2, T1, T1
+
+ VADDCUQ T0, RED1, CAR1 // VACCQ T0, RED1, CAR1
+ VADDUQM T0, RED1, T0 // VAQ T0, RED1, T0
+ VADDECUQ T1, RED2, CAR1, CAR2 // VACCCQ T1, RED2, CAR1, CAR2
+ VADDEUQM T1, RED2, CAR1, T1 // VACQ T1, RED2, CAR1, T1
+ VADDUQM T2, CAR2, T2 // VAQ T2, CAR2, T2
+
+ // Third round
+ VPERM T1, T0, SEL1, RED2 // d1 d0 d1 d0
+ VPERM ZER, RED2, SEL2, RED1 // 0 d1 d0 0
+ VSUBUQM RED2, RED1, RED2 // VSQ RED1, RED2, RED2 // Guaranteed not to underflow
+
+ VSLDOI $8, T1, T0, T0 // VSLDB $8, T1, T0, T0
+ VSLDOI $8, T2, T1, T1 // VSLDB $8, T2, T1, T1
+
+ VADDCUQ T0, RED1, CAR1 // VACCQ T0, RED1, CAR1
+ VADDUQM T0, RED1, T0 // VAQ T0, RED1, T0
+ VADDECUQ T1, RED2, CAR1, CAR2 // VACCCQ T1, RED2, CAR1, CAR2
+ VADDEUQM T1, RED2, CAR1, T1 // VACQ T1, RED2, CAR1, T1
+ VADDUQM T2, CAR2, T2 // VAQ T2, CAR2, T2
+
+ // Last round
+ VPERM T1, T0, SEL1, RED2 // d1 d0 d1 d0
+ VPERM ZER, RED2, SEL2, RED1 // 0 d1 d0 0
+ VSUBUQM RED2, RED1, RED2 // VSQ RED1, RED2, RED2 // Guaranteed not to underflow
+
+ VSLDOI $8, T1, T0, T0 // VSLDB $8, T1, T0, T0
+ VSLDOI $8, T2, T1, T1 // VSLDB $8, T2, T1, T1
+
+ VADDCUQ T0, RED1, CAR1 // VACCQ T0, RED1, CAR1
+ VADDUQM T0, RED1, T0 // VAQ T0, RED1, T0
+ VADDECUQ T1, RED2, CAR1, CAR2 // VACCCQ T1, RED2, CAR1, CAR2
+ VADDEUQM T1, RED2, CAR1, T1 // VACQ T1, RED2, CAR1, T1
+ VADDUQM T2, CAR2, T2 // VAQ T2, CAR2, T2
+
+ // ---------------------------------------------------
+
+ VSUBCUQ T0, PL, CAR1 // VSCBIQ PL, T0, CAR1
+ VSUBUQM T0, PL, TT0 // VSQ PL, T0, TT0
+ VSUBECUQ T1, PH, CAR1, CAR2 // VSBCBIQ T1, PH, CAR1, CAR2
+ VSUBEUQM T1, PH, CAR1, TT1 // VSBIQ T1, PH, CAR1, TT1
+ VSUBEUQM T2, ZER, CAR2, T2 // VSBIQ T2, ZER, CAR2, T2
+
+ VSEL TT0, T0, T2, T0
+ VSEL TT1, T1, T2, T1
+
+ // Reorder the bytes so STXVD2X can be used.
+ // TT0, TT1 used for VPERM result in case
+ // the caller expects T0, T1 to be good.
+ XXPERMDI T0, T0, $2, TT0
+ XXPERMDI T1, T1, $2, TT1
+
+ STXVD2X TT0, (R0)(res_ptr)
+ STXVD2X TT1, (R16)(res_ptr)
+ RET
+
+#undef res_ptr
+#undef x_ptr
+#undef CPOOL
+#undef T0
+#undef T1
+#undef T2
+#undef TT0
+#undef TT1
+#undef ZER
+#undef SEL1
+#undef SEL2
+#undef CAR1
+#undef CAR2
+#undef RED1
+#undef RED2
+#undef PL
+#undef PH
+
+// ---------------------------------------
+// p256MulInternal
+// V0-V3 V30,V31 - Not Modified
+// V4-V15 V27-V29 - Volatile
+
+#define CPOOL R7
+
+// Parameters
+#define X0 V0 // Not modified
+#define X1 V1 // Not modified
+#define Y0 V2 // Not modified
+#define Y1 V3 // Not modified
+#define T0 V4 // Result
+#define T1 V5 // Result
+#define P0 V30 // Not modified
+#define P1 V31 // Not modified
+
+// Temporaries: lots of reused vector regs
+#define YDIG V6 // Overloaded with CAR2
+#define ADD1H V7 // Overloaded with ADD3H
+#define ADD2H V8 // Overloaded with ADD4H
+#define ADD3 V9 // Overloaded with SEL2,SEL5
+#define ADD4 V10 // Overloaded with SEL3,SEL6
+#define RED1 V11 // Overloaded with CAR2
+#define RED2 V12
+#define RED3 V13 // Overloaded with SEL1
+#define T2 V14
+// Overloaded temporaries
+#define ADD1 V4 // Overloaded with T0
+#define ADD2 V5 // Overloaded with T1
+#define ADD3H V7 // Overloaded with ADD1H
+#define ADD4H V8 // Overloaded with ADD2H
+#define ZER V28 // Overloaded with TMP1
+#define CAR1 V6 // Overloaded with YDIG
+#define CAR2 V11 // Overloaded with RED1
+// Constant Selects
+#define SEL1 V13 // Overloaded with RED3
+#define SEL2 V9 // Overloaded with ADD3,SEL5
+#define SEL3 V10 // Overloaded with ADD4,SEL6
+#define SEL4 V6 // Overloaded with YDIG,CAR1
+#define SEL5 V9 // Overloaded with ADD3,SEL2
+#define SEL6 V10 // Overloaded with ADD4,SEL3
+
+// TMP1, TMP2 used in
+// VMULT macros
+#define TMP1 V13 // Overloaded with RED3
+#define TMP2 V27
+#define ONE V29 // 1s splatted by word
+
+/* *
+ * To follow the flow of bits, for your own sanity a stiff drink, need you shall.
+ * Of a single round, a 'helpful' picture, here is. Meaning, column position has.
+ * With you, SIMD be...
+ *
+ * +--------+--------+
+ * +--------| RED2 | RED1 |
+ * | +--------+--------+
+ * | ---+--------+--------+
+ * | +---- T2| T1 | T0 |--+
+ * | | ---+--------+--------+ |
+ * | | |
+ * | | ======================= |
+ * | | |
+ * | | +--------+--------+<-+
+ * | +-------| ADD2 | ADD1 |--|-----+
+ * | | +--------+--------+ | |
+ * | | +--------+--------+<---+ |
+ * | | | ADD2H | ADD1H |--+ |
+ * | | +--------+--------+ | |
+ * | | +--------+--------+<-+ |
+ * | | | ADD4 | ADD3 |--|-+ |
+ * | | +--------+--------+ | | |
+ * | | +--------+--------+<---+ | |
+ * | | | ADD4H | ADD3H |------|-+ |(+vzero)
+ * | | +--------+--------+ | | V
+ * | | ------------------------ | | +--------+
+ * | | | | | RED3 | [d0 0 0 d0]
+ * | | | | +--------+
+ * | +---->+--------+--------+ | | |
+ * (T2[1w]||ADD2[4w]||ADD1[3w]) +--------| T1 | T0 | | | |
+ * | +--------+--------+ | | |
+ * +---->---+--------+--------+ | | |
+ * T2| T1 | T0 |----+ | |
+ * ---+--------+--------+ | | |
+ * ---+--------+--------+<---+ | |
+ * +--- T2| T1 | T0 |----------+
+ * | ---+--------+--------+ | |
+ * | +--------+--------+<-------------+
+ * | | RED2 | RED1 |-----+ | | [0 d1 d0 d1] [d0 0 d1 d0]
+ * | +--------+--------+ | | |
+ * | +--------+<----------------------+
+ * | | RED3 |--------------+ | [0 0 d1 d0]
+ * | +--------+ | |
+ * +--->+--------+--------+ | |
+ * | T1 | T0 |--------+
+ * +--------+--------+ | |
+ * --------------------------- | |
+ * | |
+ * +--------+--------+<----+ |
+ * | RED2 | RED1 | |
+ * +--------+--------+ |
+ * ---+--------+--------+<-------+
+ * T2| T1 | T0 | (H1P-H1P-H00RRAY!)
+ * ---+--------+--------+
+ *
+ * *Mi obra de arte de siglo XXI @vpaprots
+ *
+ *
+ * First group is special, doesn't get the two inputs:
+ * +--------+--------+<-+
+ * +-------| ADD2 | ADD1 |--|-----+
+ * | +--------+--------+ | |
+ * | +--------+--------+<---+ |
+ * | | ADD2H | ADD1H |--+ |
+ * | +--------+--------+ | |
+ * | +--------+--------+<-+ |
+ * | | ADD4 | ADD3 |--|-+ |
+ * | +--------+--------+ | | |
+ * | +--------+--------+<---+ | |
+ * | | ADD4H | ADD3H |------|-+ |(+vzero)
+ * | +--------+--------+ | | V
+ * | ------------------------ | | +--------+
+ * | | | | RED3 | [d0 0 0 d0]
+ * | | | +--------+
+ * +---->+--------+--------+ | | |
+ * (T2[1w]||ADD2[4w]||ADD1[3w]) | T1 | T0 |----+ | |
+ * +--------+--------+ | | |
+ * ---+--------+--------+<---+ | |
+ * +--- T2| T1 | T0 |----------+
+ * | ---+--------+--------+ | |
+ * | +--------+--------+<-------------+
+ * | | RED2 | RED1 |-----+ | | [0 d1 d0 d1] [d0 0 d1 d0]
+ * | +--------+--------+ | | |
+ * | +--------+<----------------------+
+ * | | RED3 |--------------+ | [0 0 d1 d0]
+ * | +--------+ | |
+ * +--->+--------+--------+ | |
+ * | T1 | T0 |--------+
+ * +--------+--------+ | |
+ * --------------------------- | |
+ * | |
+ * +--------+--------+<----+ |
+ * | RED2 | RED1 | |
+ * +--------+--------+ |
+ * ---+--------+--------+<-------+
+ * T2| T1 | T0 | (H1P-H1P-H00RRAY!)
+ * ---+--------+--------+
+ *
+ * Last 'group' needs to RED2||RED1 shifted less
+ */
+TEXT p256MulInternal<>(SB), NOSPLIT, $0-16
+ // CPOOL loaded from caller
+ MOVD $16, R16
+ MOVD $32, R17
+ MOVD $48, R18
+ MOVD $64, R19
+ MOVD $80, R20
+ MOVD $96, R21
+ MOVD $112, R22
+
+ // ---------------------------------------------------
+
+ VSPLTW $3, Y0, YDIG // VREPF Y0 is input
+
+ // VMLHF X0, YDIG, ADD1H
+ // VMLHF X1, YDIG, ADD2H
+ // VMLF X0, YDIG, ADD1
+ // VMLF X1, YDIG, ADD2
+ //
+ VMULT(X0, YDIG, ADD1, ADD1H)
+ VMULT(X1, YDIG, ADD2, ADD2H)
+
+ VSPLTISW $1, ONE
+ VSPLTW $2, Y0, YDIG // VREPF
+
+ // VMALF X0, YDIG, ADD1H, ADD3
+ // VMALF X1, YDIG, ADD2H, ADD4
+ // VMALHF X0, YDIG, ADD1H, ADD3H // ADD1H Free
+ // VMALHF X1, YDIG, ADD2H, ADD4H // ADD2H Free
+ VMULT_ADD(X0, YDIG, ADD1H, ONE, ADD3, ADD3H)
+ VMULT_ADD(X1, YDIG, ADD2H, ONE, ADD4, ADD4H)
+
+ LXVD2X (R17)(CPOOL), SEL1
+ VSPLTISB $0, ZER // VZERO ZER
+ VPERM ZER, ADD1, SEL1, RED3 // [d0 0 0 d0]
+
+ VSLDOI $12, ADD2, ADD1, T0 // ADD1 Free // VSLDB
+ VSLDOI $12, ZER, ADD2, T1 // ADD2 Free // VSLDB
+
+ VADDCUQ T0, ADD3, CAR1 // VACCQ
+ VADDUQM T0, ADD3, T0 // ADD3 Free // VAQ
+ VADDECUQ T1, ADD4, CAR1, T2 // VACCCQ
+ VADDEUQM T1, ADD4, CAR1, T1 // ADD4 Free // VACQ
+
+ LXVD2X (R18)(CPOOL), SEL2
+ LXVD2X (R19)(CPOOL), SEL3
+ LXVD2X (R20)(CPOOL), SEL4
+ VPERM RED3, T0, SEL2, RED1 // [d0 0 d1 d0]
+ VPERM RED3, T0, SEL3, RED2 // [ 0 d1 d0 d1]
+ VPERM RED3, T0, SEL4, RED3 // [ 0 0 d1 d0]
+ VSUBUQM RED2, RED3, RED2 // Guaranteed not to underflow -->? // VSQ
+
+ VSLDOI $12, T1, T0, T0 // VSLDB
+ VSLDOI $12, T2, T1, T1 // VSLDB
+
+ VADDCUQ T0, ADD3H, CAR1 // VACCQ
+ VADDUQM T0, ADD3H, T0 // VAQ
+ VADDECUQ T1, ADD4H, CAR1, T2 // VACCCQ
+ VADDEUQM T1, ADD4H, CAR1, T1 // VACQ
+
+ // ---------------------------------------------------
+
+ VSPLTW $1, Y0, YDIG // VREPF
+
+ // VMALHF X0, YDIG, T0, ADD1H
+ // VMALHF X1, YDIG, T1, ADD2H
+ // VMALF X0, YDIG, T0, ADD1 // T0 Free->ADD1
+ // VMALF X1, YDIG, T1, ADD2 // T1 Free->ADD2
+ VMULT_ADD(X0, YDIG, T0, ONE, ADD1, ADD1H)
+ VMULT_ADD(X1, YDIG, T1, ONE, ADD2, ADD2H)
+
+ VSPLTW $0, Y0, YDIG // VREPF
+
+ // VMALF X0, YDIG, ADD1H, ADD3
+ // VMALF X1, YDIG, ADD2H, ADD4
+ // VMALHF X0, YDIG, ADD1H, ADD3H // ADD1H Free->ADD3H
+ // VMALHF X1, YDIG, ADD2H, ADD4H // ADD2H Free->ADD4H , YDIG Free->ZER
+ VMULT_ADD(X0, YDIG, ADD1H, ONE, ADD3, ADD3H)
+ VMULT_ADD(X1, YDIG, ADD2H, ONE, ADD4, ADD4H)
+
+ VSPLTISB $0, ZER // VZERO ZER
+ LXVD2X (R17)(CPOOL), SEL1
+ VPERM ZER, ADD1, SEL1, RED3 // [d0 0 0 d0]
+
+ VSLDOI $12, ADD2, ADD1, T0 // ADD1 Free->T0 // VSLDB
+ VSLDOI $12, T2, ADD2, T1 // ADD2 Free->T1, T2 Free // VSLDB
+
+ VADDCUQ T0, RED1, CAR1 // VACCQ
+ VADDUQM T0, RED1, T0 // VAQ
+ VADDECUQ T1, RED2, CAR1, T2 // VACCCQ
+ VADDEUQM T1, RED2, CAR1, T1 // VACQ
+
+ VADDCUQ T0, ADD3, CAR1 // VACCQ
+ VADDUQM T0, ADD3, T0 // VAQ
+ VADDECUQ T1, ADD4, CAR1, CAR2 // VACCCQ
+ VADDEUQM T1, ADD4, CAR1, T1 // VACQ
+ VADDUQM T2, CAR2, T2 // VAQ
+
+ LXVD2X (R18)(CPOOL), SEL2
+ LXVD2X (R19)(CPOOL), SEL3
+ LXVD2X (R20)(CPOOL), SEL4
+ VPERM RED3, T0, SEL2, RED1 // [d0 0 d1 d0]
+ VPERM RED3, T0, SEL3, RED2 // [ 0 d1 d0 d1]
+ VPERM RED3, T0, SEL4, RED3 // [ 0 0 d1 d0]
+ VSUBUQM RED2, RED3, RED2 // Guaranteed not to underflow // VSQ
+
+ VSLDOI $12, T1, T0, T0 // VSLDB
+ VSLDOI $12, T2, T1, T1 // VSLDB
+
+ VADDCUQ T0, ADD3H, CAR1 // VACCQ
+ VADDUQM T0, ADD3H, T0 // VAQ
+ VADDECUQ T1, ADD4H, CAR1, T2 // VACCCQ
+ VADDEUQM T1, ADD4H, CAR1, T1 // VACQ
+
+ // ---------------------------------------------------
+
+ VSPLTW $3, Y1, YDIG // VREPF
+
+ // VMALHF X0, YDIG, T0, ADD1H
+ // VMALHF X1, YDIG, T1, ADD2H
+ // VMALF X0, YDIG, T0, ADD1
+ // VMALF X1, YDIG, T1, ADD2
+ VMULT_ADD(X0, YDIG, T0, ONE, ADD1, ADD1H)
+ VMULT_ADD(X1, YDIG, T1, ONE, ADD2, ADD2H)
+
+ VSPLTW $2, Y1, YDIG // VREPF
+
+ // VMALF X0, YDIG, ADD1H, ADD3
+ // VMALF X1, YDIG, ADD2H, ADD4
+ // VMALHF X0, YDIG, ADD1H, ADD3H // ADD1H Free
+ // VMALHF X1, YDIG, ADD2H, ADD4H // ADD2H Free
+ VMULT_ADD(X0, YDIG, ADD1H, ONE, ADD3, ADD3H)
+ VMULT_ADD(X1, YDIG, ADD2H, ONE, ADD4, ADD4H)
+
+ LXVD2X (R17)(CPOOL), SEL1
+ VSPLTISB $0, ZER // VZERO ZER
+ LXVD2X (R17)(CPOOL), SEL1
+ VPERM ZER, ADD1, SEL1, RED3 // [d0 0 0 d0]
+
+ VSLDOI $12, ADD2, ADD1, T0 // ADD1 Free // VSLDB
+ VSLDOI $12, T2, ADD2, T1 // ADD2 Free // VSLDB
+
+ VADDCUQ T0, RED1, CAR1 // VACCQ
+ VADDUQM T0, RED1, T0 // VAQ
+ VADDECUQ T1, RED2, CAR1, T2 // VACCCQ
+ VADDEUQM T1, RED2, CAR1, T1 // VACQ
+
+ VADDCUQ T0, ADD3, CAR1 // VACCQ
+ VADDUQM T0, ADD3, T0 // VAQ
+ VADDECUQ T1, ADD4, CAR1, CAR2 // VACCCQ
+ VADDEUQM T1, ADD4, CAR1, T1 // VACQ
+ VADDUQM T2, CAR2, T2 // VAQ
+
+ LXVD2X (R18)(CPOOL), SEL2
+ LXVD2X (R19)(CPOOL), SEL3
+ LXVD2X (R20)(CPOOL), SEL4
+ VPERM RED3, T0, SEL2, RED1 // [d0 0 d1 d0]
+ VPERM RED3, T0, SEL3, RED2 // [ 0 d1 d0 d1]
+ VPERM RED3, T0, SEL4, RED3 // [ 0 0 d1 d0]
+ VSUBUQM RED2, RED3, RED2 // Guaranteed not to underflow // VSQ
+
+ VSLDOI $12, T1, T0, T0 // VSLDB
+ VSLDOI $12, T2, T1, T1 // VSLDB
+
+ VADDCUQ T0, ADD3H, CAR1 // VACCQ
+ VADDUQM T0, ADD3H, T0 // VAQ
+ VADDECUQ T1, ADD4H, CAR1, T2 // VACCCQ
+ VADDEUQM T1, ADD4H, CAR1, T1 // VACQ
+
+ // ---------------------------------------------------
+
+ VSPLTW $1, Y1, YDIG // VREPF
+
+ // VMALHF X0, YDIG, T0, ADD1H
+ // VMALHF X1, YDIG, T1, ADD2H
+ // VMALF X0, YDIG, T0, ADD1
+ // VMALF X1, YDIG, T1, ADD2
+ VMULT_ADD(X0, YDIG, T0, ONE, ADD1, ADD1H)
+ VMULT_ADD(X1, YDIG, T1, ONE, ADD2, ADD2H)
+
+ VSPLTW $0, Y1, YDIG // VREPF
+
+ // VMALF X0, YDIG, ADD1H, ADD3
+ // VMALF X1, YDIG, ADD2H, ADD4
+ // VMALHF X0, YDIG, ADD1H, ADD3H
+ // VMALHF X1, YDIG, ADD2H, ADD4H
+ VMULT_ADD(X0, YDIG, ADD1H, ONE, ADD3, ADD3H)
+ VMULT_ADD(X1, YDIG, ADD2H, ONE, ADD4, ADD4H)
+
+ VSPLTISB $0, ZER // VZERO ZER
+ LXVD2X (R17)(CPOOL), SEL1
+ VPERM ZER, ADD1, SEL1, RED3 // [d0 0 0 d0]
+
+ VSLDOI $12, ADD2, ADD1, T0 // VSLDB
+ VSLDOI $12, T2, ADD2, T1 // VSLDB
+
+ VADDCUQ T0, RED1, CAR1 // VACCQ
+ VADDUQM T0, RED1, T0 // VAQ
+ VADDECUQ T1, RED2, CAR1, T2 // VACCCQ
+ VADDEUQM T1, RED2, CAR1, T1 // VACQ
+
+ VADDCUQ T0, ADD3, CAR1 // VACCQ
+ VADDUQM T0, ADD3, T0 // VAQ
+ VADDECUQ T1, ADD4, CAR1, CAR2 // VACCCQ
+ VADDEUQM T1, ADD4, CAR1, T1 // VACQ
+ VADDUQM T2, CAR2, T2 // VAQ
+
+ LXVD2X (R21)(CPOOL), SEL5
+ LXVD2X (R22)(CPOOL), SEL6
+ VPERM T0, RED3, SEL5, RED2 // [d1 d0 d1 d0]
+ VPERM T0, RED3, SEL6, RED1 // [ 0 d1 d0 0]
+ VSUBUQM RED2, RED1, RED2 // Guaranteed not to underflow // VSQ
+
+ VSLDOI $12, T1, T0, T0 // VSLDB
+ VSLDOI $12, T2, T1, T1 // VSLDB
+
+ VADDCUQ T0, ADD3H, CAR1 // VACCQ
+ VADDUQM T0, ADD3H, T0 // VAQ
+ VADDECUQ T1, ADD4H, CAR1, T2 // VACCCQ
+ VADDEUQM T1, ADD4H, CAR1, T1 // VACQ
+
+ VADDCUQ T0, RED1, CAR1 // VACCQ
+ VADDUQM T0, RED1, T0 // VAQ
+ VADDECUQ T1, RED2, CAR1, CAR2 // VACCCQ
+ VADDEUQM T1, RED2, CAR1, T1 // VACQ
+ VADDUQM T2, CAR2, T2 // VAQ
+
+ // ---------------------------------------------------
+
+ VSPLTISB $0, RED3 // VZERO RED3
+ VSUBCUQ T0, P0, CAR1 // VSCBIQ
+ VSUBUQM T0, P0, ADD1H // VSQ
+ VSUBECUQ T1, P1, CAR1, CAR2 // VSBCBIQ
+ VSUBEUQM T1, P1, CAR1, ADD2H // VSBIQ
+ VSUBEUQM T2, RED3, CAR2, T2 // VSBIQ
+
+ // what output to use, ADD2H||ADD1H or T1||T0?
+ VSEL ADD1H, T0, T2, T0
+ VSEL ADD2H, T1, T2, T1
+ RET
+
+#undef CPOOL
+
+#undef X0
+#undef X1
+#undef Y0
+#undef Y1
+#undef T0
+#undef T1
+#undef P0
+#undef P1
+
+#undef SEL1
+#undef SEL2
+#undef SEL3
+#undef SEL4
+#undef SEL5
+#undef SEL6
+
+#undef YDIG
+#undef ADD1H
+#undef ADD2H
+#undef ADD3
+#undef ADD4
+#undef RED1
+#undef RED2
+#undef RED3
+#undef T2
+#undef ADD1
+#undef ADD2
+#undef ADD3H
+#undef ADD4H
+#undef ZER
+#undef CAR1
+#undef CAR2
+
+#undef TMP1
+#undef TMP2
+
+#define p256SubInternal(T1, T0, X1, X0, Y1, Y0) \
+ VSPLTISB $0, ZER \ // VZERO
+ VSUBCUQ X0, Y0, CAR1 \
+ VSUBUQM X0, Y0, T0 \
+ VSUBECUQ X1, Y1, CAR1, SEL1 \
+ VSUBEUQM X1, Y1, CAR1, T1 \
+ VSUBUQM ZER, SEL1, SEL1 \ // VSQ
+ \
+ VADDCUQ T0, PL, CAR1 \ // VACCQ
+ VADDUQM T0, PL, TT0 \ // VAQ
+ VADDEUQM T1, PH, CAR1, TT1 \ // VACQ
+ \
+ VSEL TT0, T0, SEL1, T0 \
+ VSEL TT1, T1, SEL1, T1 \
+
+#define p256AddInternal(T1, T0, X1, X0, Y1, Y0) \
+ VADDCUQ X0, Y0, CAR1 \
+ VADDUQM X0, Y0, T0 \
+ VADDECUQ X1, Y1, CAR1, T2 \ // VACCCQ
+ VADDEUQM X1, Y1, CAR1, T1 \
+ \
+ VSPLTISB $0, ZER \
+ VSUBCUQ T0, PL, CAR1 \ // VSCBIQ
+ VSUBUQM T0, PL, TT0 \
+ VSUBECUQ T1, PH, CAR1, CAR2 \ // VSBCBIQ
+ VSUBEUQM T1, PH, CAR1, TT1 \ // VSBIQ
+ VSUBEUQM T2, ZER, CAR2, SEL1 \
+ \
+ VSEL TT0, T0, SEL1, T0 \
+ VSEL TT1, T1, SEL1, T1
+
+#define p256HalfInternal(T1, T0, X1, X0) \
+ VSPLTISB $0, ZER \
+ VSUBEUQM ZER, ZER, X0, SEL1 \
+ \
+ VADDCUQ X0, PL, CAR1 \
+ VADDUQM X0, PL, T0 \
+ VADDECUQ X1, PH, CAR1, T2 \
+ VADDEUQM X1, PH, CAR1, T1 \
+ \
+ VSEL T0, X0, SEL1, T0 \
+ VSEL T1, X1, SEL1, T1 \
+ VSEL T2, ZER, SEL1, T2 \
+ \
+ VSLDOI $15, T2, ZER, TT1 \
+ VSLDOI $15, T1, ZER, TT0 \
+ VSPLTISB $1, SEL1 \
+ VSR T0, SEL1, T0 \ // VSRL
+ VSR T1, SEL1, T1 \
+ VSPLTISB $7, SEL1 \ // VREPIB
+ VSL TT0, SEL1, TT0 \
+ VSL TT1, SEL1, TT1 \
+ VOR T0, TT0, T0 \
+ VOR T1, TT1, T1
+
+#define res_ptr R3
+#define x_ptr R4
+#define y_ptr R5
+#define CPOOL R7
+#define TEMP R8
+#define N R9
+
+// Parameters
+#define X0 V0
+#define X1 V1
+#define Y0 V2
+#define Y1 V3
+#define T0 V4
+#define T1 V5
+
+// Constants
+#define P0 V30
+#define P1 V31
+// func p256MulAsm(res, in1, in2 *p256Element)
+TEXT ·p256Mul(SB), NOSPLIT, $0-24
+ MOVD res+0(FP), res_ptr
+ MOVD in1+8(FP), x_ptr
+ MOVD in2+16(FP), y_ptr
+ MOVD $16, R16
+ MOVD $32, R17
+
+ MOVD $p256mul<>+0x00(SB), CPOOL
+
+
+ LXVD2X (R0)(x_ptr), X0
+ LXVD2X (R16)(x_ptr), X1
+
+ XXPERMDI X0, X0, $2, X0
+ XXPERMDI X1, X1, $2, X1
+
+ LXVD2X (R0)(y_ptr), Y0
+ LXVD2X (R16)(y_ptr), Y1
+
+ XXPERMDI Y0, Y0, $2, Y0
+ XXPERMDI Y1, Y1, $2, Y1
+
+ LXVD2X (R16)(CPOOL), P1
+ LXVD2X (R0)(CPOOL), P0
+
+ CALL p256MulInternal<>(SB)
+
+ MOVD $p256mul<>+0x00(SB), CPOOL
+
+ XXPERMDI T0, T0, $2, T0
+ XXPERMDI T1, T1, $2, T1
+ STXVD2X T0, (R0)(res_ptr)
+ STXVD2X T1, (R16)(res_ptr)
+ RET
+
+// func p256Sqr(res, in *p256Element, n int)
+TEXT ·p256Sqr(SB), NOSPLIT, $0-24
+ MOVD res+0(FP), res_ptr
+ MOVD in+8(FP), x_ptr
+ MOVD $16, R16
+ MOVD $32, R17
+
+ MOVD $p256mul<>+0x00(SB), CPOOL
+
+ LXVD2X (R0)(x_ptr), X0
+ LXVD2X (R16)(x_ptr), X1
+
+ XXPERMDI X0, X0, $2, X0
+ XXPERMDI X1, X1, $2, X1
+
+sqrLoop:
+ // Sqr uses same value for both
+
+ VOR X0, X0, Y0
+ VOR X1, X1, Y1
+
+ LXVD2X (R16)(CPOOL), P1
+ LXVD2X (R0)(CPOOL), P0
+
+ CALL p256MulInternal<>(SB)
+
+ MOVD n+16(FP), N
+ ADD $-1, N
+ CMP $0, N
+ BEQ done
+ MOVD N, n+16(FP) // Save counter to avoid clobber
+ VOR T0, T0, X0
+ VOR T1, T1, X1
+ BR sqrLoop
+
+done:
+ MOVD $p256mul<>+0x00(SB), CPOOL
+
+ XXPERMDI T0, T0, $2, T0
+ XXPERMDI T1, T1, $2, T1
+ STXVD2X T0, (R0)(res_ptr)
+ STXVD2X T1, (R16)(res_ptr)
+ RET
+
+#undef res_ptr
+#undef x_ptr
+#undef y_ptr
+#undef CPOOL
+
+#undef X0
+#undef X1
+#undef Y0
+#undef Y1
+#undef T0
+#undef T1
+#undef P0
+#undef P1
+
+#define P3ptr R3
+#define P1ptr R4
+#define P2ptr R5
+#define CPOOL R7
+
+// Temporaries in REGs
+#define Y2L V15
+#define Y2H V16
+#define T1L V17
+#define T1H V18
+#define T2L V19
+#define T2H V20
+#define T3L V21
+#define T3H V22
+#define T4L V23
+#define T4H V24
+
+// Temps for Sub and Add
+#define TT0 V11
+#define TT1 V12
+#define T2 V13
+
+// p256MulAsm Parameters
+#define X0 V0
+#define X1 V1
+#define Y0 V2
+#define Y1 V3
+#define T0 V4
+#define T1 V5
+
+#define PL V30
+#define PH V31
+
+// Names for zero/sel selects
+#define X1L V0
+#define X1H V1
+#define Y1L V2 // p256MulAsmParmY
+#define Y1H V3 // p256MulAsmParmY
+#define Z1L V4
+#define Z1H V5
+#define X2L V0
+#define X2H V1
+#define Z2L V4
+#define Z2H V5
+#define X3L V17 // T1L
+#define X3H V18 // T1H
+#define Y3L V21 // T3L
+#define Y3H V22 // T3H
+#define Z3L V25
+#define Z3H V26
+
+#define ZER V6
+#define SEL1 V7
+#define CAR1 V8
+#define CAR2 V9
+/* *
+ * Three operand formula:
+ * Source: 2004 Hankerson–Menezes–Vanstone, page 91.
+ * T1 = Z1²
+ * T2 = T1*Z1
+ * T1 = T1*X2
+ * T2 = T2*Y2
+ * T1 = T1-X1
+ * T2 = T2-Y1
+ * Z3 = Z1*T1
+ * T3 = T1²
+ * T4 = T3*T1
+ * T3 = T3*X1
+ * T1 = 2*T3
+ * X3 = T2²
+ * X3 = X3-T1
+ * X3 = X3-T4
+ * T3 = T3-X3
+ * T3 = T3*T2
+ * T4 = T4*Y1
+ * Y3 = T3-T4
+
+ * Three operand formulas, but with MulInternal X,Y used to store temps
+X=Z1; Y=Z1; MUL;T- // T1 = Z1² T1
+X=T ; Y- ; MUL;T2=T // T2 = T1*Z1 T1 T2
+X- ; Y=X2; MUL;T1=T // T1 = T1*X2 T1 T2
+X=T2; Y=Y2; MUL;T- // T2 = T2*Y2 T1 T2
+SUB(T2<T-Y1) // T2 = T2-Y1 T1 T2
+SUB(Y<T1-X1) // T1 = T1-X1 T1 T2
+X=Z1; Y- ; MUL;Z3:=T// Z3 = Z1*T1 T2
+X=Y; Y- ; MUL;X=T // T3 = T1*T1 T2
+X- ; Y- ; MUL;T4=T // T4 = T3*T1 T2 T4
+X- ; Y=X1; MUL;T3=T // T3 = T3*X1 T2 T3 T4
+ADD(T1<T+T) // T1 = T3+T3 T1 T2 T3 T4
+X=T2; Y=T2; MUL;T- // X3 = T2*T2 T1 T2 T3 T4
+SUB(T<T-T1) // X3 = X3-T1 T1 T2 T3 T4
+SUB(T<T-T4) X3:=T // X3 = X3-T4 T2 T3 T4
+SUB(X<T3-T) // T3 = T3-X3 T2 T3 T4
+X- ; Y- ; MUL;T3=T // T3 = T3*T2 T2 T3 T4
+X=T4; Y=Y1; MUL;T- // T4 = T4*Y1 T3 T4
+SUB(T<T3-T) Y3:=T // Y3 = T3-T4 T3 T4
+
+ */
+//
+// V27 is clobbered by p256MulInternal so must be
+// saved in a temp.
+//
+// func p256PointAddAffineAsm(res, in1 *P256Point, in2 *p256AffinePoint, sign, sel, zero int)
+TEXT ·p256PointAddAffineAsm(SB), NOSPLIT, $16-48
+ MOVD res+0(FP), P3ptr
+ MOVD in1+8(FP), P1ptr
+ MOVD in2+16(FP), P2ptr
+
+ MOVD $p256mul<>+0x00(SB), CPOOL
+
+ MOVD $16, R16
+ MOVD $32, R17
+ MOVD $48, R18
+ MOVD $64, R19
+ MOVD $80, R20
+ MOVD $96, R21
+ MOVD $112, R22
+ MOVD $128, R23
+ MOVD $144, R24
+ MOVD $160, R25
+ MOVD $104, R26 // offset of sign+24(FP)
+
+ LXVD2X (R16)(CPOOL), PH
+ LXVD2X (R0)(CPOOL), PL
+
+ LXVD2X (R17)(P2ptr), Y2L
+ LXVD2X (R18)(P2ptr), Y2H
+ XXPERMDI Y2H, Y2H, $2, Y2H
+ XXPERMDI Y2L, Y2L, $2, Y2L
+
+ // Equivalent of VLREPG sign+24(FP), SEL1
+ LXVDSX (R1)(R26), SEL1
+ VSPLTISB $0, ZER
+ VCMPEQUD SEL1, ZER, SEL1
+
+ VSUBCUQ PL, Y2L, CAR1
+ VSUBUQM PL, Y2L, T1L
+ VSUBEUQM PH, Y2H, CAR1, T1H
+
+ VSEL T1L, Y2L, SEL1, Y2L
+ VSEL T1H, Y2H, SEL1, Y2H
+
+/* *
+ * Three operand formula:
+ * Source: 2004 Hankerson–Menezes–Vanstone, page 91.
+ */
+ // X=Z1; Y=Z1; MUL; T- // T1 = Z1² T1
+ LXVD2X (R19)(P1ptr), X0 // Z1H
+ LXVD2X (R20)(P1ptr), X1 // Z1L
+ XXPERMDI X0, X0, $2, X0
+ XXPERMDI X1, X1, $2, X1
+ VOR X0, X0, Y0
+ VOR X1, X1, Y1
+ CALL p256MulInternal<>(SB)
+
+ // X=T ; Y- ; MUL; T2=T // T2 = T1*Z1 T1 T2
+ VOR T0, T0, X0
+ VOR T1, T1, X1
+ CALL p256MulInternal<>(SB)
+ VOR T0, T0, T2L
+ VOR T1, T1, T2H
+
+ // X- ; Y=X2; MUL; T1=T // T1 = T1*X2 T1 T2
+ MOVD in2+16(FP), P2ptr
+ LXVD2X (R0)(P2ptr), Y0 // X2H
+ LXVD2X (R16)(P2ptr), Y1 // X2L
+ XXPERMDI Y0, Y0, $2, Y0
+ XXPERMDI Y1, Y1, $2, Y1
+ CALL p256MulInternal<>(SB)
+ VOR T0, T0, T1L
+ VOR T1, T1, T1H
+
+ // X=T2; Y=Y2; MUL; T- // T2 = T2*Y2 T1 T2
+ VOR T2L, T2L, X0
+ VOR T2H, T2H, X1
+ VOR Y2L, Y2L, Y0
+ VOR Y2H, Y2H, Y1
+ CALL p256MulInternal<>(SB)
+
+ // SUB(T2<T-Y1) // T2 = T2-Y1 T1 T2
+ MOVD in1+8(FP), P1ptr
+ LXVD2X (R17)(P1ptr), Y1L
+ LXVD2X (R18)(P1ptr), Y1H
+ XXPERMDI Y1H, Y1H, $2, Y1H
+ XXPERMDI Y1L, Y1L, $2, Y1L
+ p256SubInternal(T2H,T2L,T1,T0,Y1H,Y1L)
+
+ // SUB(Y<T1-X1) // T1 = T1-X1 T1 T2
+ LXVD2X (R0)(P1ptr), X1L
+ LXVD2X (R16)(P1ptr), X1H
+ XXPERMDI X1H, X1H, $2, X1H
+ XXPERMDI X1L, X1L, $2, X1L
+ p256SubInternal(Y1,Y0,T1H,T1L,X1H,X1L)
+
+ // X=Z1; Y- ; MUL; Z3:=T// Z3 = Z1*T1 T2
+ LXVD2X (R19)(P1ptr), X0 // Z1H
+ LXVD2X (R20)(P1ptr), X1 // Z1L
+ XXPERMDI X0, X0, $2, X0
+ XXPERMDI X1, X1, $2, X1
+ CALL p256MulInternal<>(SB)
+
+ VOR T0, T0, Z3L
+ VOR T1, T1, Z3H
+
+ // X=Y; Y- ; MUL; X=T // T3 = T1*T1 T2
+ VOR Y0, Y0, X0
+ VOR Y1, Y1, X1
+ CALL p256MulInternal<>(SB)
+ VOR T0, T0, X0
+ VOR T1, T1, X1
+
+ // X- ; Y- ; MUL; T4=T // T4 = T3*T1 T2 T4
+ CALL p256MulInternal<>(SB)
+ VOR T0, T0, T4L
+ VOR T1, T1, T4H
+
+ // X- ; Y=X1; MUL; T3=T // T3 = T3*X1 T2 T3 T4
+ MOVD in1+8(FP), P1ptr
+ LXVD2X (R0)(P1ptr), Y0 // X1H
+ LXVD2X (R16)(P1ptr), Y1 // X1L
+ XXPERMDI Y1, Y1, $2, Y1
+ XXPERMDI Y0, Y0, $2, Y0
+ CALL p256MulInternal<>(SB)
+ VOR T0, T0, T3L
+ VOR T1, T1, T3H
+
+ // ADD(T1<T+T) // T1 = T3+T3 T1 T2 T3 T4
+ p256AddInternal(T1H,T1L, T1,T0,T1,T0)
+
+ // X=T2; Y=T2; MUL; T- // X3 = T2*T2 T1 T2 T3 T4
+ VOR T2L, T2L, X0
+ VOR T2H, T2H, X1
+ VOR T2L, T2L, Y0
+ VOR T2H, T2H, Y1
+ CALL p256MulInternal<>(SB)
+
+ // SUB(T<T-T1) // X3 = X3-T1 T1 T2 T3 T4 (T1 = X3)
+ p256SubInternal(T1,T0,T1,T0,T1H,T1L)
+
+ // SUB(T<T-T4) X3:=T // X3 = X3-T4 T2 T3 T4
+ p256SubInternal(T1,T0,T1,T0,T4H,T4L)
+ VOR T0, T0, X3L
+ VOR T1, T1, X3H
+
+ // SUB(X<T3-T) // T3 = T3-X3 T2 T3 T4
+ p256SubInternal(X1,X0,T3H,T3L,T1,T0)
+
+ // X- ; Y- ; MUL; T3=T // T3 = T3*T2 T2 T3 T4
+ CALL p256MulInternal<>(SB)
+ VOR T0, T0, T3L
+ VOR T1, T1, T3H
+
+ // X=T4; Y=Y1; MUL; T- // T4 = T4*Y1 T3 T4
+ VOR T4L, T4L, X0
+ VOR T4H, T4H, X1
+ MOVD in1+8(FP), P1ptr
+ LXVD2X (R17)(P1ptr), Y0 // Y1H
+ LXVD2X (R18)(P1ptr), Y1 // Y1L
+ XXPERMDI Y0, Y0, $2, Y0
+ XXPERMDI Y1, Y1, $2, Y1
+ CALL p256MulInternal<>(SB)
+
+ // SUB(T<T3-T) Y3:=T // Y3 = T3-T4 T3 T4 (T3 = Y3)
+ p256SubInternal(Y3H,Y3L,T3H,T3L,T1,T0)
+
+ // if (sel == 0) {
+ // copy(P3.x[:], X1)
+ // copy(P3.y[:], Y1)
+ // copy(P3.z[:], Z1)
+ // }
+
+ LXVD2X (R0)(P1ptr), X1L
+ LXVD2X (R16)(P1ptr), X1H
+ XXPERMDI X1H, X1H, $2, X1H
+ XXPERMDI X1L, X1L, $2, X1L
+
+ // Y1 already loaded, left over from addition
+ LXVD2X (R19)(P1ptr), Z1L
+ LXVD2X (R20)(P1ptr), Z1H
+ XXPERMDI Z1H, Z1H, $2, Z1H
+ XXPERMDI Z1L, Z1L, $2, Z1L
+
+ MOVD $112, R26 // Get offset to sel+32
+ LXVDSX (R1)(R26), SEL1
+ VSPLTISB $0, ZER
+ VCMPEQUD SEL1, ZER, SEL1
+
+ VSEL X3L, X1L, SEL1, X3L
+ VSEL X3H, X1H, SEL1, X3H
+ VSEL Y3L, Y1L, SEL1, Y3L
+ VSEL Y3H, Y1H, SEL1, Y3H
+ VSEL Z3L, Z1L, SEL1, Z3L
+ VSEL Z3H, Z1H, SEL1, Z3H
+
+ MOVD in2+16(FP), P2ptr
+ LXVD2X (R0)(P2ptr), X2L
+ LXVD2X (R16)(P2ptr), X2H
+ XXPERMDI X2H, X2H, $2, X2H
+ XXPERMDI X2L, X2L, $2, X2L
+
+ // Y2 already loaded
+ LXVD2X (R23)(CPOOL), Z2L
+ LXVD2X (R24)(CPOOL), Z2H
+
+ MOVD $120, R26 // Get the value from zero+40(FP)
+ LXVDSX (R1)(R26), SEL1
+ VSPLTISB $0, ZER
+ VCMPEQUD SEL1, ZER, SEL1
+
+ VSEL X3L, X2L, SEL1, X3L
+ VSEL X3H, X2H, SEL1, X3H
+ VSEL Y3L, Y2L, SEL1, Y3L
+ VSEL Y3H, Y2H, SEL1, Y3H
+ VSEL Z3L, Z2L, SEL1, Z3L
+ VSEL Z3H, Z2H, SEL1, Z3H
+
+ // Reorder the bytes so they can be stored using STXVD2X.
+ MOVD res+0(FP), P3ptr
+ XXPERMDI X3H, X3H, $2, X3H
+ XXPERMDI X3L, X3L, $2, X3L
+ XXPERMDI Y3H, Y3H, $2, Y3H
+ XXPERMDI Y3L, Y3L, $2, Y3L
+ XXPERMDI Z3H, Z3H, $2, Z3H
+ XXPERMDI Z3L, Z3L, $2, Z3L
+ STXVD2X X3L, (R0)(P3ptr)
+ STXVD2X X3H, (R16)(P3ptr)
+ STXVD2X Y3L, (R17)(P3ptr)
+ STXVD2X Y3H, (R18)(P3ptr)
+ STXVD2X Z3L, (R19)(P3ptr)
+ STXVD2X Z3H, (R20)(P3ptr)
+
+ RET
+
+#undef P3ptr
+#undef P1ptr
+#undef P2ptr
+#undef CPOOL
+
+#undef Y2L
+#undef Y2H
+#undef T1L
+#undef T1H
+#undef T2L
+#undef T2H
+#undef T3L
+#undef T3H
+#undef T4L
+#undef T4H
+
+#undef TT0
+#undef TT1
+#undef T2
+
+#undef X0
+#undef X1
+#undef Y0
+#undef Y1
+#undef T0
+#undef T1
+
+#undef PL
+#undef PH
+
+#undef X1L
+#undef X1H
+#undef Y1L
+#undef Y1H
+#undef Z1L
+#undef Z1H
+#undef X2L
+#undef X2H
+#undef Z2L
+#undef Z2H
+#undef X3L
+#undef X3H
+#undef Y3L
+#undef Y3H
+#undef Z3L
+#undef Z3H
+
+#undef ZER
+#undef SEL1
+#undef CAR1
+#undef CAR2
+
+// http://www.hyperelliptic.org/EFD/g1p/auto-shortw-jacobian.html#doubling-dbl-2007-bl
+// http://www.hyperelliptic.org/EFD/g1p/auto-shortw.html
+// http://www.hyperelliptic.org/EFD/g1p/auto-shortw-projective-3.html
+#define P3ptr R3
+#define P1ptr R4
+#define CPOOL R7
+
+// Temporaries in REGs
+#define X3L V15
+#define X3H V16
+#define Y3L V17
+#define Y3H V18
+#define T1L V19
+#define T1H V20
+#define T2L V21
+#define T2H V22
+#define T3L V23
+#define T3H V24
+
+#define X1L V6
+#define X1H V7
+#define Y1L V8
+#define Y1H V9
+#define Z1L V10
+#define Z1H V11
+
+// Temps for Sub and Add
+#define TT0 V11
+#define TT1 V12
+#define T2 V13
+
+// p256MulAsm Parameters
+#define X0 V0
+#define X1 V1
+#define Y0 V2
+#define Y1 V3
+#define T0 V4
+#define T1 V5
+
+#define PL V30
+#define PH V31
+
+#define Z3L V23
+#define Z3H V24
+
+#define ZER V26
+#define SEL1 V27
+#define CAR1 V28
+#define CAR2 V29
+/*
+ * http://www.hyperelliptic.org/EFD/g1p/auto-shortw-jacobian-3.html#doubling-dbl-2004-hmv
+ * Cost: 4M + 4S + 1*half + 5add + 2*2 + 1*3.
+ * Source: 2004 Hankerson–Menezes–Vanstone, page 91.
+ * A = 3(X₁-Z₁²)×(X₁+Z₁²)
+ * B = 2Y₁
+ * Z₃ = B×Z₁
+ * C = B²
+ * D = C×X₁
+ * X₃ = A²-2D
+ * Y₃ = (D-X₃)×A-C²/2
+ *
+ * Three-operand formula:
+ * T1 = Z1²
+ * T2 = X1-T1
+ * T1 = X1+T1
+ * T2 = T2*T1
+ * T2 = 3*T2
+ * Y3 = 2*Y1
+ * Z3 = Y3*Z1
+ * Y3 = Y3²
+ * T3 = Y3*X1
+ * Y3 = Y3²
+ * Y3 = half*Y3
+ * X3 = T2²
+ * T1 = 2*T3
+ * X3 = X3-T1
+ * T1 = T3-X3
+ * T1 = T1*T2
+ * Y3 = T1-Y3
+ */
+// p256PointDoubleAsm(res, in1 *p256Point)
+TEXT ·p256PointDoubleAsm(SB), NOSPLIT, $0-16
+ MOVD res+0(FP), P3ptr
+ MOVD in+8(FP), P1ptr
+
+ MOVD $p256mul<>+0x00(SB), CPOOL
+
+ MOVD $16, R16
+ MOVD $32, R17
+ MOVD $48, R18
+ MOVD $64, R19
+ MOVD $80, R20
+
+ LXVD2X (R16)(CPOOL), PH
+ LXVD2X (R0)(CPOOL), PL
+
+ // X=Z1; Y=Z1; MUL; T- // T1 = Z1²
+ LXVD2X (R19)(P1ptr), X0 // Z1H
+ LXVD2X (R20)(P1ptr), X1 // Z1L
+
+ XXPERMDI X0, X0, $2, X0
+ XXPERMDI X1, X1, $2, X1
+
+ VOR X0, X0, Y0
+ VOR X1, X1, Y1
+ CALL p256MulInternal<>(SB)
+
+ // SUB(X<X1-T) // T2 = X1-T1
+ LXVD2X (R0)(P1ptr), X1L
+ LXVD2X (R16)(P1ptr), X1H
+ XXPERMDI X1L, X1L, $2, X1L
+ XXPERMDI X1H, X1H, $2, X1H
+
+ p256SubInternal(X1,X0,X1H,X1L,T1,T0)
+
+ // ADD(Y<X1+T) // T1 = X1+T1
+ p256AddInternal(Y1,Y0,X1H,X1L,T1,T0)
+
+ // X- ; Y- ; MUL; T- // T2 = T2*T1
+ CALL p256MulInternal<>(SB)
+
+ // ADD(T2<T+T); ADD(T2<T2+T) // T2 = 3*T2
+ p256AddInternal(T2H,T2L,T1,T0,T1,T0)
+ p256AddInternal(T2H,T2L,T2H,T2L,T1,T0)
+
+ // ADD(X<Y1+Y1) // Y3 = 2*Y1
+ LXVD2X (R17)(P1ptr), Y1L
+ LXVD2X (R18)(P1ptr), Y1H
+ XXPERMDI Y1L, Y1L, $2, Y1L
+ XXPERMDI Y1H, Y1H, $2, Y1H
+
+ p256AddInternal(X1,X0,Y1H,Y1L,Y1H,Y1L)
+
+ // X- ; Y=Z1; MUL; Z3:=T // Z3 = Y3*Z1
+ LXVD2X (R19)(P1ptr), Y0
+ LXVD2X (R20)(P1ptr), Y1
+ XXPERMDI Y0, Y0, $2, Y0
+ XXPERMDI Y1, Y1, $2, Y1
+
+ CALL p256MulInternal<>(SB)
+
+ // Leave T0, T1 as is.
+ XXPERMDI T0, T0, $2, TT0
+ XXPERMDI T1, T1, $2, TT1
+ STXVD2X TT0, (R19)(P3ptr)
+ STXVD2X TT1, (R20)(P3ptr)
+
+ // X- ; Y=X ; MUL; T- // Y3 = Y3²
+ VOR X0, X0, Y0
+ VOR X1, X1, Y1
+ CALL p256MulInternal<>(SB)
+
+ // X=T ; Y=X1; MUL; T3=T // T3 = Y3*X1
+ VOR T0, T0, X0
+ VOR T1, T1, X1
+ LXVD2X (R0)(P1ptr), Y0
+ LXVD2X (R16)(P1ptr), Y1
+ XXPERMDI Y0, Y0, $2, Y0
+ XXPERMDI Y1, Y1, $2, Y1
+ CALL p256MulInternal<>(SB)
+ VOR T0, T0, T3L
+ VOR T1, T1, T3H
+
+ // X- ; Y=X ; MUL; T- // Y3 = Y3²
+ VOR X0, X0, Y0
+ VOR X1, X1, Y1
+ CALL p256MulInternal<>(SB)
+
+ // HAL(Y3<T) // Y3 = half*Y3
+ p256HalfInternal(Y3H,Y3L, T1,T0)
+
+ // X=T2; Y=T2; MUL; T- // X3 = T2²
+ VOR T2L, T2L, X0
+ VOR T2H, T2H, X1
+ VOR T2L, T2L, Y0
+ VOR T2H, T2H, Y1
+ CALL p256MulInternal<>(SB)
+
+ // ADD(T1<T3+T3) // T1 = 2*T3
+ p256AddInternal(T1H,T1L,T3H,T3L,T3H,T3L)
+
+ // SUB(X3<T-T1) X3:=X3 // X3 = X3-T1
+ p256SubInternal(X3H,X3L,T1,T0,T1H,T1L)
+
+ XXPERMDI X3L, X3L, $2, TT0
+ XXPERMDI X3H, X3H, $2, TT1
+ STXVD2X TT0, (R0)(P3ptr)
+ STXVD2X TT1, (R16)(P3ptr)
+
+ // SUB(X<T3-X3) // T1 = T3-X3
+ p256SubInternal(X1,X0,T3H,T3L,X3H,X3L)
+
+ // X- ; Y- ; MUL; T- // T1 = T1*T2
+ CALL p256MulInternal<>(SB)
+
+ // SUB(Y3<T-Y3) // Y3 = T1-Y3
+ p256SubInternal(Y3H,Y3L,T1,T0,Y3H,Y3L)
+
+ XXPERMDI Y3L, Y3L, $2, Y3L
+ XXPERMDI Y3H, Y3H, $2, Y3H
+ STXVD2X Y3L, (R17)(P3ptr)
+ STXVD2X Y3H, (R18)(P3ptr)
+ RET
+
+#undef P3ptr
+#undef P1ptr
+#undef CPOOL
+#undef X3L
+#undef X3H
+#undef Y3L
+#undef Y3H
+#undef T1L
+#undef T1H
+#undef T2L
+#undef T2H
+#undef T3L
+#undef T3H
+#undef X1L
+#undef X1H
+#undef Y1L
+#undef Y1H
+#undef Z1L
+#undef Z1H
+#undef TT0
+#undef TT1
+#undef T2
+#undef X0
+#undef X1
+#undef Y0
+#undef Y1
+#undef T0
+#undef T1
+#undef PL
+#undef PH
+#undef Z3L
+#undef Z3H
+#undef ZER
+#undef SEL1
+#undef CAR1
+#undef CAR2
+
+#define P3ptr R3
+#define P1ptr R4
+#define P2ptr R5
+#define CPOOL R7
+#define TRUE R14
+#define RES1 R9
+#define RES2 R10
+
+// Temporaries in REGs
+#define T1L V16
+#define T1H V17
+#define T2L V18
+#define T2H V19
+#define U1L V20
+#define U1H V21
+#define S1L V22
+#define S1H V23
+#define HL V24
+#define HH V25
+#define RL V26
+#define RH V27
+
+// Temps for Sub and Add
+#define ZER V6
+#define SEL1 V7
+#define CAR1 V8
+#define CAR2 V9
+#define TT0 V11
+#define TT1 V12
+#define T2 V13
+
+// p256MulAsm Parameters
+#define X0 V0
+#define X1 V1
+#define Y0 V2
+#define Y1 V3
+#define T0 V4
+#define T1 V5
+
+#define PL V30
+#define PH V31
+/*
+ * https://choucroutage.com/Papers/SideChannelAttacks/ctrsa-2011-brown.pdf "Software Implementation of the NIST Elliptic Curves Over Prime Fields"
+ *
+ * A = X₁×Z₂²
+ * B = Y₁×Z₂³
+ * C = X₂×Z₁²-A
+ * D = Y₂×Z₁³-B
+ * X₃ = D² - 2A×C² - C³
+ * Y₃ = D×(A×C² - X₃) - B×C³
+ * Z₃ = Z₁×Z₂×C
+ *
+ * Three-operand formula (adopted): http://www.hyperelliptic.org/EFD/g1p/auto-shortw-jacobian-3.html#addition-add-1998-cmo-2
+ * Temp storage: T1,T2,U1,H,Z3=X3=Y3,S1,R
+ *
+ * T1 = Z1*Z1
+ * T2 = Z2*Z2
+ * U1 = X1*T2
+ * H = X2*T1
+ * H = H-U1
+ * Z3 = Z1*Z2
+ * Z3 = Z3*H << store-out Z3 result reg.. could override Z1, if slices have same backing array
+ *
+ * S1 = Z2*T2
+ * S1 = Y1*S1
+ * R = Z1*T1
+ * R = Y2*R
+ * R = R-S1
+ *
+ * T1 = H*H
+ * T2 = H*T1
+ * U1 = U1*T1
+ *
+ * X3 = R*R
+ * X3 = X3-T2
+ * T1 = 2*U1
+ * X3 = X3-T1 << store-out X3 result reg
+ *
+ * T2 = S1*T2
+ * Y3 = U1-X3
+ * Y3 = R*Y3
+ * Y3 = Y3-T2 << store-out Y3 result reg
+
+ // X=Z1; Y=Z1; MUL; T- // T1 = Z1*Z1
+ // X- ; Y=T ; MUL; R=T // R = Z1*T1
+ // X=X2; Y- ; MUL; H=T // H = X2*T1
+ // X=Z2; Y=Z2; MUL; T- // T2 = Z2*Z2
+ // X- ; Y=T ; MUL; S1=T // S1 = Z2*T2
+ // X=X1; Y- ; MUL; U1=T // U1 = X1*T2
+ // SUB(H<H-T) // H = H-U1
+ // X=Z1; Y=Z2; MUL; T- // Z3 = Z1*Z2
+ // X=T ; Y=H ; MUL; Z3:=T// Z3 = Z3*H << store-out Z3 result reg.. could override Z1, if slices have same backing array
+ // X=Y1; Y=S1; MUL; S1=T // S1 = Y1*S1
+ // X=Y2; Y=R ; MUL; T- // R = Y2*R
+ // SUB(R<T-S1) // R = R-S1
+ // X=H ; Y=H ; MUL; T- // T1 = H*H
+ // X- ; Y=T ; MUL; T2=T // T2 = H*T1
+ // X=U1; Y- ; MUL; U1=T // U1 = U1*T1
+ // X=R ; Y=R ; MUL; T- // X3 = R*R
+ // SUB(T<T-T2) // X3 = X3-T2
+ // ADD(X<U1+U1) // T1 = 2*U1
+ // SUB(T<T-X) X3:=T // X3 = X3-T1 << store-out X3 result reg
+ // SUB(Y<U1-T) // Y3 = U1-X3
+ // X=R ; Y- ; MUL; U1=T // Y3 = R*Y3
+ // X=S1; Y=T2; MUL; T- // T2 = S1*T2
+ // SUB(T<U1-T); Y3:=T // Y3 = Y3-T2 << store-out Y3 result reg
+ */
+// p256PointAddAsm(res, in1, in2 *p256Point)
+TEXT ·p256PointAddAsm(SB), NOSPLIT, $16-32
+ MOVD res+0(FP), P3ptr
+ MOVD in1+8(FP), P1ptr
+ MOVD $p256mul<>+0x00(SB), CPOOL
+ MOVD $16, R16
+ MOVD $32, R17
+ MOVD $48, R18
+ MOVD $64, R19
+ MOVD $80, R20
+
+ LXVD2X (R16)(CPOOL), PH
+ LXVD2X (R0)(CPOOL), PL
+
+ // X=Z1; Y=Z1; MUL; T- // T1 = Z1*Z1
+ LXVD2X (R19)(P1ptr), X0 // Z1L
+ LXVD2X (R20)(P1ptr), X1 // Z1H
+ XXPERMDI X0, X0, $2, X0
+ XXPERMDI X1, X1, $2, X1
+ VOR X0, X0, Y0
+ VOR X1, X1, Y1
+ CALL p256MulInternal<>(SB)
+
+ // X- ; Y=T ; MUL; R=T // R = Z1*T1
+ VOR T0, T0, Y0
+ VOR T1, T1, Y1
+ CALL p256MulInternal<>(SB)
+ VOR T0, T0, RL // SAVE: RL
+ VOR T1, T1, RH // SAVE: RH
+
+ STXVD2X RH, (R1)(R17) // V27 has to be saved
+
+ // X=X2; Y- ; MUL; H=T // H = X2*T1
+ MOVD in2+16(FP), P2ptr
+ LXVD2X (R0)(P2ptr), X0 // X2L
+ LXVD2X (R16)(P2ptr), X1 // X2H
+ XXPERMDI X0, X0, $2, X0
+ XXPERMDI X1, X1, $2, X1
+ CALL p256MulInternal<>(SB)
+ VOR T0, T0, HL // SAVE: HL
+ VOR T1, T1, HH // SAVE: HH
+
+ // X=Z2; Y=Z2; MUL; T- // T2 = Z2*Z2
+ MOVD in2+16(FP), P2ptr
+ LXVD2X (R19)(P2ptr), X0 // Z2L
+ LXVD2X (R20)(P2ptr), X1 // Z2H
+ XXPERMDI X0, X0, $2, X0
+ XXPERMDI X1, X1, $2, X1
+ VOR X0, X0, Y0
+ VOR X1, X1, Y1
+ CALL p256MulInternal<>(SB)
+
+ // X- ; Y=T ; MUL; S1=T // S1 = Z2*T2
+ VOR T0, T0, Y0
+ VOR T1, T1, Y1
+ CALL p256MulInternal<>(SB)
+ VOR T0, T0, S1L // SAVE: S1L
+ VOR T1, T1, S1H // SAVE: S1H
+
+ // X=X1; Y- ; MUL; U1=T // U1 = X1*T2
+ MOVD in1+8(FP), P1ptr
+ LXVD2X (R0)(P1ptr), X0 // X1L
+ LXVD2X (R16)(P1ptr), X1 // X1H
+ XXPERMDI X0, X0, $2, X0
+ XXPERMDI X1, X1, $2, X1
+ CALL p256MulInternal<>(SB)
+ VOR T0, T0, U1L // SAVE: U1L
+ VOR T1, T1, U1H // SAVE: U1H
+
+ // SUB(H<H-T) // H = H-U1
+ p256SubInternal(HH,HL,HH,HL,T1,T0)
+
+ // if H == 0 or H^P == 0 then ret=1 else ret=0
+ // clobbers T1H and T1L
+ MOVD $1, TRUE
+ VSPLTISB $0, ZER
+ VOR HL, HH, T1H
+ VCMPEQUDCC ZER, T1H, T1H
+
+ // 26 = CR6 NE
+ ISEL $26, R0, TRUE, RES1
+ VXOR HL, PL, T1L // SAVE: T1L
+ VXOR HH, PH, T1H // SAVE: T1H
+ VOR T1L, T1H, T1H
+ VCMPEQUDCC ZER, T1H, T1H
+
+ // 26 = CR6 NE
+ ISEL $26, R0, TRUE, RES2
+ OR RES2, RES1, RES1
+ MOVD RES1, ret+24(FP)
+
+ // X=Z1; Y=Z2; MUL; T- // Z3 = Z1*Z2
+ MOVD in1+8(FP), P1ptr
+ MOVD in2+16(FP), P2ptr
+ LXVD2X (R19)(P1ptr), X0 // Z1L
+ LXVD2X (R20)(P1ptr), X1 // Z1H
+ XXPERMDI X0, X0, $2, X0
+ XXPERMDI X1, X1, $2, X1
+ LXVD2X (R19)(P2ptr), Y0 // Z2L
+ LXVD2X (R20)(P2ptr), Y1 // Z2H
+ XXPERMDI Y0, Y0, $2, Y0
+ XXPERMDI Y1, Y1, $2, Y1
+ CALL p256MulInternal<>(SB)
+
+ // X=T ; Y=H ; MUL; Z3:=T// Z3 = Z3*H
+ VOR T0, T0, X0
+ VOR T1, T1, X1
+ VOR HL, HL, Y0
+ VOR HH, HH, Y1
+ CALL p256MulInternal<>(SB)
+ MOVD res+0(FP), P3ptr
+ XXPERMDI T1, T1, $2, TT1
+ XXPERMDI T0, T0, $2, TT0
+ STXVD2X TT0, (R19)(P3ptr)
+ STXVD2X TT1, (R20)(P3ptr)
+
+ // X=Y1; Y=S1; MUL; S1=T // S1 = Y1*S1
+ MOVD in1+8(FP), P1ptr
+ LXVD2X (R17)(P1ptr), X0
+ LXVD2X (R18)(P1ptr), X1
+ XXPERMDI X0, X0, $2, X0
+ XXPERMDI X1, X1, $2, X1
+ VOR S1L, S1L, Y0
+ VOR S1H, S1H, Y1
+ CALL p256MulInternal<>(SB)
+ VOR T0, T0, S1L
+ VOR T1, T1, S1H
+
+ // X=Y2; Y=R ; MUL; T- // R = Y2*R
+ MOVD in2+16(FP), P2ptr
+ LXVD2X (R17)(P2ptr), X0
+ LXVD2X (R18)(P2ptr), X1
+ XXPERMDI X0, X0, $2, X0
+ XXPERMDI X1, X1, $2, X1
+ VOR RL, RL, Y0
+
+ // VOR RH, RH, Y1 RH was saved above in D2X format
+ LXVD2X (R1)(R17), Y1
+ CALL p256MulInternal<>(SB)
+
+ // SUB(R<T-S1) // R = T-S1
+ p256SubInternal(RH,RL,T1,T0,S1H,S1L)
+
+ STXVD2X RH, (R1)(R17) // Save RH
+
+ // if R == 0 or R^P == 0 then ret=ret else ret=0
+ // clobbers T1H and T1L
+ // Redo this using ISEL??
+ MOVD $1, TRUE
+ VSPLTISB $0, ZER
+ VOR RL, RH, T1H
+ VCMPEQUDCC ZER, T1H, T1H
+
+ // 24 = CR6 NE
+ ISEL $26, R0, TRUE, RES1
+ VXOR RL, PL, T1L
+ VXOR RH, PH, T1H // SAVE: T1L
+ VOR T1L, T1H, T1H
+ VCMPEQUDCC ZER, T1H, T1H
+
+ // 26 = CR6 NE
+ ISEL $26, R0, TRUE, RES2
+ OR RES2, RES1, RES1
+ MOVD ret+24(FP), RES2
+ AND RES2, RES1, RES1
+ MOVD RES1, ret+24(FP)
+
+ // X=H ; Y=H ; MUL; T- // T1 = H*H
+ VOR HL, HL, X0
+ VOR HH, HH, X1
+ VOR HL, HL, Y0
+ VOR HH, HH, Y1
+ CALL p256MulInternal<>(SB)
+
+ // X- ; Y=T ; MUL; T2=T // T2 = H*T1
+ VOR T0, T0, Y0
+ VOR T1, T1, Y1
+ CALL p256MulInternal<>(SB)
+ VOR T0, T0, T2L
+ VOR T1, T1, T2H
+
+ // X=U1; Y- ; MUL; U1=T // U1 = U1*T1
+ VOR U1L, U1L, X0
+ VOR U1H, U1H, X1
+ CALL p256MulInternal<>(SB)
+ VOR T0, T0, U1L
+ VOR T1, T1, U1H
+
+ // X=R ; Y=R ; MUL; T- // X3 = R*R
+ VOR RL, RL, X0
+
+ // VOR RH, RH, X1
+ VOR RL, RL, Y0
+
+ // RH was saved above using STXVD2X
+ LXVD2X (R1)(R17), X1
+ VOR X1, X1, Y1
+
+ // VOR RH, RH, Y1
+ CALL p256MulInternal<>(SB)
+
+ // SUB(T<T-T2) // X3 = X3-T2
+ p256SubInternal(T1,T0,T1,T0,T2H,T2L)
+
+ // ADD(X<U1+U1) // T1 = 2*U1
+ p256AddInternal(X1,X0,U1H,U1L,U1H,U1L)
+
+ // SUB(T<T-X) X3:=T // X3 = X3-T1 << store-out X3 result reg
+ p256SubInternal(T1,T0,T1,T0,X1,X0)
+ MOVD res+0(FP), P3ptr
+ XXPERMDI T1, T1, $2, TT1
+ XXPERMDI T0, T0, $2, TT0
+ STXVD2X TT0, (R0)(P3ptr)
+ STXVD2X TT1, (R16)(P3ptr)
+
+ // SUB(Y<U1-T) // Y3 = U1-X3
+ p256SubInternal(Y1,Y0,U1H,U1L,T1,T0)
+
+ // X=R ; Y- ; MUL; U1=T // Y3 = R*Y3
+ VOR RL, RL, X0
+
+ // VOR RH, RH, X1
+ LXVD2X (R1)(R17), X1
+ CALL p256MulInternal<>(SB)
+ VOR T0, T0, U1L
+ VOR T1, T1, U1H
+
+ // X=S1; Y=T2; MUL; T- // T2 = S1*T2
+ VOR S1L, S1L, X0
+ VOR S1H, S1H, X1
+ VOR T2L, T2L, Y0
+ VOR T2H, T2H, Y1
+ CALL p256MulInternal<>(SB)
+
+ // SUB(T<U1-T); Y3:=T // Y3 = Y3-T2 << store-out Y3 result reg
+ p256SubInternal(T1,T0,U1H,U1L,T1,T0)
+ MOVD res+0(FP), P3ptr
+ XXPERMDI T1, T1, $2, TT1
+ XXPERMDI T0, T0, $2, TT0
+ STXVD2X TT0, (R17)(P3ptr)
+ STXVD2X TT1, (R18)(P3ptr)
+
+ RET
diff --git a/src/crypto/internal/nistec/p256_asm_s390x.s b/src/crypto/internal/nistec/p256_asm_s390x.s
new file mode 100644
index 0000000..8da4f3f
--- /dev/null
+++ b/src/crypto/internal/nistec/p256_asm_s390x.s
@@ -0,0 +1,2418 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#include "textflag.h"
+#include "go_asm.h"
+
+DATA p256ordK0<>+0x00(SB)/4, $0xee00bc4f
+DATA p256ord<>+0x00(SB)/8, $0xffffffff00000000
+DATA p256ord<>+0x08(SB)/8, $0xffffffffffffffff
+DATA p256ord<>+0x10(SB)/8, $0xbce6faada7179e84
+DATA p256ord<>+0x18(SB)/8, $0xf3b9cac2fc632551
+DATA p256<>+0x00(SB)/8, $0xffffffff00000001 // P256
+DATA p256<>+0x08(SB)/8, $0x0000000000000000 // P256
+DATA p256<>+0x10(SB)/8, $0x00000000ffffffff // P256
+DATA p256<>+0x18(SB)/8, $0xffffffffffffffff // P256
+DATA p256<>+0x20(SB)/8, $0x0c0d0e0f1c1d1e1f // SEL d1 d0 d1 d0
+DATA p256<>+0x28(SB)/8, $0x0c0d0e0f1c1d1e1f // SEL d1 d0 d1 d0
+DATA p256<>+0x30(SB)/8, $0x0000000010111213 // SEL 0 d1 d0 0
+DATA p256<>+0x38(SB)/8, $0x1415161700000000 // SEL 0 d1 d0 0
+DATA p256<>+0x40(SB)/8, $0x18191a1b1c1d1e1f // SEL d1 d0 d1 d0
+DATA p256<>+0x48(SB)/8, $0x18191a1b1c1d1e1f // SEL d1 d0 d1 d0
+DATA p256<>+0x50(SB)/8, $0x0706050403020100 // LE2BE permute mask
+DATA p256<>+0x58(SB)/8, $0x0f0e0d0c0b0a0908 // LE2BE permute mask
+DATA p256mul<>+0x00(SB)/8, $0xffffffff00000001 // P256
+DATA p256mul<>+0x08(SB)/8, $0x0000000000000000 // P256
+DATA p256mul<>+0x10(SB)/8, $0x00000000ffffffff // P256
+DATA p256mul<>+0x18(SB)/8, $0xffffffffffffffff // P256
+DATA p256mul<>+0x20(SB)/8, $0x1c1d1e1f00000000 // SEL d0 0 0 d0
+DATA p256mul<>+0x28(SB)/8, $0x000000001c1d1e1f // SEL d0 0 0 d0
+DATA p256mul<>+0x30(SB)/8, $0x0001020304050607 // SEL d0 0 d1 d0
+DATA p256mul<>+0x38(SB)/8, $0x1c1d1e1f0c0d0e0f // SEL d0 0 d1 d0
+DATA p256mul<>+0x40(SB)/8, $0x040506071c1d1e1f // SEL 0 d1 d0 d1
+DATA p256mul<>+0x48(SB)/8, $0x0c0d0e0f1c1d1e1f // SEL 0 d1 d0 d1
+DATA p256mul<>+0x50(SB)/8, $0x0405060704050607 // SEL 0 0 d1 d0
+DATA p256mul<>+0x58(SB)/8, $0x1c1d1e1f0c0d0e0f // SEL 0 0 d1 d0
+DATA p256mul<>+0x60(SB)/8, $0x0c0d0e0f1c1d1e1f // SEL d1 d0 d1 d0
+DATA p256mul<>+0x68(SB)/8, $0x0c0d0e0f1c1d1e1f // SEL d1 d0 d1 d0
+DATA p256mul<>+0x70(SB)/8, $0x141516170c0d0e0f // SEL 0 d1 d0 0
+DATA p256mul<>+0x78(SB)/8, $0x1c1d1e1f14151617 // SEL 0 d1 d0 0
+DATA p256mul<>+0x80(SB)/8, $0x00000000fffffffe // (1*2^256)%P256
+DATA p256mul<>+0x88(SB)/8, $0xffffffffffffffff // (1*2^256)%P256
+DATA p256mul<>+0x90(SB)/8, $0xffffffff00000000 // (1*2^256)%P256
+DATA p256mul<>+0x98(SB)/8, $0x0000000000000001 // (1*2^256)%P256
+GLOBL p256ordK0<>(SB), 8, $4
+GLOBL p256ord<>(SB), 8, $32
+GLOBL p256<>(SB), 8, $96
+GLOBL p256mul<>(SB), 8, $160
+
+// func p256OrdLittleToBig(res *[32]byte, in *p256OrdElement)
+TEXT ·p256OrdLittleToBig(SB), NOSPLIT, $0
+ JMP ·p256BigToLittle(SB)
+
+// func p256OrdBigToLittle(res *p256OrdElement, in *[32]byte)
+TEXT ·p256OrdBigToLittle(SB), NOSPLIT, $0
+ JMP ·p256BigToLittle(SB)
+
+// ---------------------------------------
+// func p256LittleToBig(res *[32]byte, in *p256Element)
+TEXT ·p256LittleToBig(SB), NOSPLIT, $0
+ JMP ·p256BigToLittle(SB)
+
+// func p256BigToLittle(res *p256Element, in *[32]byte)
+#define res_ptr R1
+#define in_ptr R2
+#define T1L V2
+#define T1H V3
+
+TEXT ·p256BigToLittle(SB), NOSPLIT, $0
+ MOVD res+0(FP), res_ptr
+ MOVD in+8(FP), in_ptr
+
+ VL 0(in_ptr), T1H
+ VL 16(in_ptr), T1L
+
+ VPDI $0x4, T1L, T1L, T1L
+ VPDI $0x4, T1H, T1H, T1H
+
+ VST T1L, 0(res_ptr)
+ VST T1H, 16(res_ptr)
+ RET
+
+#undef res_ptr
+#undef in_ptr
+#undef T1L
+#undef T1H
+
+// ---------------------------------------
+// iff cond == 1 val <- -val
+// func p256NegCond(val *p256Element, cond int)
+#define P1ptr R1
+#define CPOOL R4
+
+#define Y1L V0
+#define Y1H V1
+#define T1L V2
+#define T1H V3
+
+#define PL V30
+#define PH V31
+
+#define ZER V4
+#define SEL1 V5
+#define CAR1 V6
+TEXT ·p256NegCond(SB), NOSPLIT, $0
+ MOVD val+0(FP), P1ptr
+
+ MOVD $p256mul<>+0x00(SB), CPOOL
+ VL 16(CPOOL), PL
+ VL 0(CPOOL), PH
+
+ VL 16(P1ptr), Y1H
+ VPDI $0x4, Y1H, Y1H, Y1H
+ VL 0(P1ptr), Y1L
+ VPDI $0x4, Y1L, Y1L, Y1L
+
+ VLREPG cond+8(FP), SEL1
+ VZERO ZER
+ VCEQG SEL1, ZER, SEL1
+
+ VSCBIQ Y1L, PL, CAR1
+ VSQ Y1L, PL, T1L
+ VSBIQ PH, Y1H, CAR1, T1H
+
+ VSEL Y1L, T1L, SEL1, Y1L
+ VSEL Y1H, T1H, SEL1, Y1H
+
+ VPDI $0x4, Y1H, Y1H, Y1H
+ VST Y1H, 16(P1ptr)
+ VPDI $0x4, Y1L, Y1L, Y1L
+ VST Y1L, 0(P1ptr)
+ RET
+
+#undef P1ptr
+#undef CPOOL
+#undef Y1L
+#undef Y1H
+#undef T1L
+#undef T1H
+#undef PL
+#undef PH
+#undef ZER
+#undef SEL1
+#undef CAR1
+
+// ---------------------------------------
+// if cond == 0 res <- b; else res <- a
+// func p256MovCond(res, a, b *P256Point, cond int)
+#define P3ptr R1
+#define P1ptr R2
+#define P2ptr R3
+
+#define X1L V0
+#define X1H V1
+#define Y1L V2
+#define Y1H V3
+#define Z1L V4
+#define Z1H V5
+#define X2L V6
+#define X2H V7
+#define Y2L V8
+#define Y2H V9
+#define Z2L V10
+#define Z2H V11
+
+#define ZER V18
+#define SEL1 V19
+TEXT ·p256MovCond(SB), NOSPLIT, $0
+ MOVD res+0(FP), P3ptr
+ MOVD a+8(FP), P1ptr
+ MOVD b+16(FP), P2ptr
+ VLREPG cond+24(FP), SEL1
+ VZERO ZER
+ VCEQG SEL1, ZER, SEL1
+
+ VL 0(P1ptr), X1H
+ VL 16(P1ptr), X1L
+ VL 32(P1ptr), Y1H
+ VL 48(P1ptr), Y1L
+ VL 64(P1ptr), Z1H
+ VL 80(P1ptr), Z1L
+
+ VL 0(P2ptr), X2H
+ VL 16(P2ptr), X2L
+ VL 32(P2ptr), Y2H
+ VL 48(P2ptr), Y2L
+ VL 64(P2ptr), Z2H
+ VL 80(P2ptr), Z2L
+
+ VSEL X2L, X1L, SEL1, X1L
+ VSEL X2H, X1H, SEL1, X1H
+ VSEL Y2L, Y1L, SEL1, Y1L
+ VSEL Y2H, Y1H, SEL1, Y1H
+ VSEL Z2L, Z1L, SEL1, Z1L
+ VSEL Z2H, Z1H, SEL1, Z1H
+
+ VST X1H, 0(P3ptr)
+ VST X1L, 16(P3ptr)
+ VST Y1H, 32(P3ptr)
+ VST Y1L, 48(P3ptr)
+ VST Z1H, 64(P3ptr)
+ VST Z1L, 80(P3ptr)
+
+ RET
+
+#undef P3ptr
+#undef P1ptr
+#undef P2ptr
+#undef X1L
+#undef X1H
+#undef Y1L
+#undef Y1H
+#undef Z1L
+#undef Z1H
+#undef X2L
+#undef X2H
+#undef Y2L
+#undef Y2H
+#undef Z2L
+#undef Z2H
+#undef ZER
+#undef SEL1
+
+// ---------------------------------------
+// Constant time table access
+// Indexed from 1 to 15, with -1 offset
+// (index 0 is implicitly point at infinity)
+// func p256Select(res *P256Point, table *p256Table, idx int)
+#define P3ptr R1
+#define P1ptr R2
+#define COUNT R4
+
+#define X1L V0
+#define X1H V1
+#define Y1L V2
+#define Y1H V3
+#define Z1L V4
+#define Z1H V5
+#define X2L V6
+#define X2H V7
+#define Y2L V8
+#define Y2H V9
+#define Z2L V10
+#define Z2H V11
+
+#define ONE V18
+#define IDX V19
+#define SEL1 V20
+#define SEL2 V21
+TEXT ·p256Select(SB), NOSPLIT, $0
+ MOVD res+0(FP), P3ptr
+ MOVD table+8(FP), P1ptr
+ VLREPB idx+(16+7)(FP), IDX
+ VREPIB $1, ONE
+ VREPIB $1, SEL2
+ MOVD $1, COUNT
+
+ VZERO X1H
+ VZERO X1L
+ VZERO Y1H
+ VZERO Y1L
+ VZERO Z1H
+ VZERO Z1L
+
+loop_select:
+ VL 0(P1ptr), X2H
+ VL 16(P1ptr), X2L
+ VL 32(P1ptr), Y2H
+ VL 48(P1ptr), Y2L
+ VL 64(P1ptr), Z2H
+ VL 80(P1ptr), Z2L
+
+ VCEQG SEL2, IDX, SEL1
+
+ VSEL X2L, X1L, SEL1, X1L
+ VSEL X2H, X1H, SEL1, X1H
+ VSEL Y2L, Y1L, SEL1, Y1L
+ VSEL Y2H, Y1H, SEL1, Y1H
+ VSEL Z2L, Z1L, SEL1, Z1L
+ VSEL Z2H, Z1H, SEL1, Z1H
+
+ VAB SEL2, ONE, SEL2
+ ADDW $1, COUNT
+ ADD $96, P1ptr
+ CMPW COUNT, $17
+ BLT loop_select
+
+ VST X1H, 0(P3ptr)
+ VST X1L, 16(P3ptr)
+ VST Y1H, 32(P3ptr)
+ VST Y1L, 48(P3ptr)
+ VST Z1H, 64(P3ptr)
+ VST Z1L, 80(P3ptr)
+ RET
+
+#undef P3ptr
+#undef P1ptr
+#undef COUNT
+#undef X1L
+#undef X1H
+#undef Y1L
+#undef Y1H
+#undef Z1L
+#undef Z1H
+#undef X2L
+#undef X2H
+#undef Y2L
+#undef Y2H
+#undef Z2L
+#undef Z2H
+#undef ONE
+#undef IDX
+#undef SEL1
+#undef SEL2
+
+// ---------------------------------------
+
+// func p256FromMont(res, in *p256Element)
+#define res_ptr R1
+#define x_ptr R2
+#define CPOOL R4
+
+#define T0 V0
+#define T1 V1
+#define T2 V2
+#define TT0 V3
+#define TT1 V4
+
+#define ZER V6
+#define SEL1 V7
+#define SEL2 V8
+#define CAR1 V9
+#define CAR2 V10
+#define RED1 V11
+#define RED2 V12
+#define PL V13
+#define PH V14
+
+TEXT ·p256FromMont(SB), NOSPLIT, $0
+ MOVD res+0(FP), res_ptr
+ MOVD in+8(FP), x_ptr
+
+ VZERO T2
+ VZERO ZER
+ MOVD $p256<>+0x00(SB), CPOOL
+ VL 16(CPOOL), PL
+ VL 0(CPOOL), PH
+ VL 48(CPOOL), SEL2
+ VL 64(CPOOL), SEL1
+
+ VL (0*16)(x_ptr), T0
+ VPDI $0x4, T0, T0, T0
+ VL (1*16)(x_ptr), T1
+ VPDI $0x4, T1, T1, T1
+
+ // First round
+ VPERM T1, T0, SEL1, RED2 // d1 d0 d1 d0
+ VPERM ZER, RED2, SEL2, RED1 // 0 d1 d0 0
+ VSQ RED1, RED2, RED2 // Guaranteed not to underflow
+
+ VSLDB $8, T1, T0, T0
+ VSLDB $8, T2, T1, T1
+
+ VACCQ T0, RED1, CAR1
+ VAQ T0, RED1, T0
+ VACCCQ T1, RED2, CAR1, CAR2
+ VACQ T1, RED2, CAR1, T1
+ VAQ T2, CAR2, T2
+
+ // Second round
+ VPERM T1, T0, SEL1, RED2 // d1 d0 d1 d0
+ VPERM ZER, RED2, SEL2, RED1 // 0 d1 d0 0
+ VSQ RED1, RED2, RED2 // Guaranteed not to underflow
+
+ VSLDB $8, T1, T0, T0
+ VSLDB $8, T2, T1, T1
+
+ VACCQ T0, RED1, CAR1
+ VAQ T0, RED1, T0
+ VACCCQ T1, RED2, CAR1, CAR2
+ VACQ T1, RED2, CAR1, T1
+ VAQ T2, CAR2, T2
+
+ // Third round
+ VPERM T1, T0, SEL1, RED2 // d1 d0 d1 d0
+ VPERM ZER, RED2, SEL2, RED1 // 0 d1 d0 0
+ VSQ RED1, RED2, RED2 // Guaranteed not to underflow
+
+ VSLDB $8, T1, T0, T0
+ VSLDB $8, T2, T1, T1
+
+ VACCQ T0, RED1, CAR1
+ VAQ T0, RED1, T0
+ VACCCQ T1, RED2, CAR1, CAR2
+ VACQ T1, RED2, CAR1, T1
+ VAQ T2, CAR2, T2
+
+ // Last round
+ VPERM T1, T0, SEL1, RED2 // d1 d0 d1 d0
+ VPERM ZER, RED2, SEL2, RED1 // 0 d1 d0 0
+ VSQ RED1, RED2, RED2 // Guaranteed not to underflow
+
+ VSLDB $8, T1, T0, T0
+ VSLDB $8, T2, T1, T1
+
+ VACCQ T0, RED1, CAR1
+ VAQ T0, RED1, T0
+ VACCCQ T1, RED2, CAR1, CAR2
+ VACQ T1, RED2, CAR1, T1
+ VAQ T2, CAR2, T2
+
+ // ---------------------------------------------------
+
+ VSCBIQ PL, T0, CAR1
+ VSQ PL, T0, TT0
+ VSBCBIQ T1, PH, CAR1, CAR2
+ VSBIQ T1, PH, CAR1, TT1
+ VSBIQ T2, ZER, CAR2, T2
+
+ // what output to use, TT1||TT0 or T1||T0?
+ VSEL T0, TT0, T2, T0
+ VSEL T1, TT1, T2, T1
+
+ VPDI $0x4, T0, T0, TT0
+ VST TT0, (0*16)(res_ptr)
+ VPDI $0x4, T1, T1, TT1
+ VST TT1, (1*16)(res_ptr)
+ RET
+
+#undef res_ptr
+#undef x_ptr
+#undef CPOOL
+#undef T0
+#undef T1
+#undef T2
+#undef TT0
+#undef TT1
+#undef ZER
+#undef SEL1
+#undef SEL2
+#undef CAR1
+#undef CAR2
+#undef RED1
+#undef RED2
+#undef PL
+#undef PH
+
+// Constant time table access
+// Indexed from 1 to 15, with -1 offset
+// (index 0 is implicitly point at infinity)
+// func p256SelectBase(point *p256Point, table []p256Point, idx int)
+// new : func p256SelectAffine(res *p256AffinePoint, table *p256AffineTable, idx int)
+
+#define P3ptr R1
+#define P1ptr R2
+#define COUNT R4
+#define CPOOL R5
+
+#define X1L V0
+#define X1H V1
+#define Y1L V2
+#define Y1H V3
+#define Z1L V4
+#define Z1H V5
+#define X2L V6
+#define X2H V7
+#define Y2L V8
+#define Y2H V9
+#define Z2L V10
+#define Z2H V11
+#define LE2BE V12
+
+#define ONE V18
+#define IDX V19
+#define SEL1 V20
+#define SEL2 V21
+
+TEXT ·p256SelectAffine(SB), NOSPLIT, $0
+ MOVD res+0(FP), P3ptr
+ MOVD table+8(FP), P1ptr
+ MOVD $p256<>+0x00(SB), CPOOL
+ VLREPB idx+(16+7)(FP), IDX
+ VREPIB $1, ONE
+ VREPIB $1, SEL2
+ MOVD $1, COUNT
+ VL 80(CPOOL), LE2BE
+
+ VZERO X1H
+ VZERO X1L
+ VZERO Y1H
+ VZERO Y1L
+
+loop_select:
+ VL 0(P1ptr), X2H
+ VL 16(P1ptr), X2L
+ VL 32(P1ptr), Y2H
+ VL 48(P1ptr), Y2L
+
+ VCEQG SEL2, IDX, SEL1
+
+ VSEL X2L, X1L, SEL1, X1L
+ VSEL X2H, X1H, SEL1, X1H
+ VSEL Y2L, Y1L, SEL1, Y1L
+ VSEL Y2H, Y1H, SEL1, Y1H
+
+ VAB SEL2, ONE, SEL2
+ ADDW $1, COUNT
+ ADD $64, P1ptr
+ CMPW COUNT, $65
+ BLT loop_select
+ VST X1H, 0(P3ptr)
+ VST X1L, 16(P3ptr)
+ VST Y1H, 32(P3ptr)
+ VST Y1L, 48(P3ptr)
+
+ RET
+
+#undef P3ptr
+#undef P1ptr
+#undef COUNT
+#undef X1L
+#undef X1H
+#undef Y1L
+#undef Y1H
+#undef Z1L
+#undef Z1H
+#undef X2L
+#undef X2H
+#undef Y2L
+#undef Y2H
+#undef Z2L
+#undef Z2H
+#undef ONE
+#undef IDX
+#undef SEL1
+#undef SEL2
+#undef CPOOL
+
+// ---------------------------------------
+
+// func p256OrdMul(res, in1, in2 *p256OrdElement)
+#define res_ptr R1
+#define x_ptr R2
+#define y_ptr R3
+#define X0 V0
+#define X1 V1
+#define Y0 V2
+#define Y1 V3
+#define M0 V4
+#define M1 V5
+#define T0 V6
+#define T1 V7
+#define T2 V8
+#define YDIG V9
+
+#define ADD1 V16
+#define ADD1H V17
+#define ADD2 V18
+#define ADD2H V19
+#define RED1 V20
+#define RED1H V21
+#define RED2 V22
+#define RED2H V23
+#define CAR1 V24
+#define CAR1M V25
+
+#define MK0 V30
+#define K0 V31
+TEXT ·p256OrdMul<>(SB), NOSPLIT, $0
+ MOVD res+0(FP), res_ptr
+ MOVD in1+8(FP), x_ptr
+ MOVD in2+16(FP), y_ptr
+
+ VZERO T2
+ MOVD $p256ordK0<>+0x00(SB), R4
+
+ // VLEF $3, 0(R4), K0
+ WORD $0xE7F40000
+ BYTE $0x38
+ BYTE $0x03
+ MOVD $p256ord<>+0x00(SB), R4
+ VL 16(R4), M0
+ VL 0(R4), M1
+
+ VL (0*16)(x_ptr), X0
+ VPDI $0x4, X0, X0, X0
+ VL (1*16)(x_ptr), X1
+ VPDI $0x4, X1, X1, X1
+ VL (0*16)(y_ptr), Y0
+ VPDI $0x4, Y0, Y0, Y0
+ VL (1*16)(y_ptr), Y1
+ VPDI $0x4, Y1, Y1, Y1
+
+ // ---------------------------------------------------------------------------/
+ VREPF $3, Y0, YDIG
+ VMLF X0, YDIG, ADD1
+ VMLF ADD1, K0, MK0
+ VREPF $3, MK0, MK0
+
+ VMLF X1, YDIG, ADD2
+ VMLHF X0, YDIG, ADD1H
+ VMLHF X1, YDIG, ADD2H
+
+ VMALF M0, MK0, ADD1, RED1
+ VMALHF M0, MK0, ADD1, RED1H
+ VMALF M1, MK0, ADD2, RED2
+ VMALHF M1, MK0, ADD2, RED2H
+
+ VSLDB $12, RED2, RED1, RED1
+ VSLDB $12, T2, RED2, RED2
+
+ VACCQ RED1, ADD1H, CAR1
+ VAQ RED1, ADD1H, T0
+ VACCQ RED1H, T0, CAR1M
+ VAQ RED1H, T0, T0
+
+ // << ready for next MK0
+
+ VACQ RED2, ADD2H, CAR1, T1
+ VACCCQ RED2, ADD2H, CAR1, CAR1
+ VACCCQ RED2H, T1, CAR1M, T2
+ VACQ RED2H, T1, CAR1M, T1
+ VAQ CAR1, T2, T2
+
+ // ---------------------------------------------------
+/* *
+ * ---+--------+--------+
+ * T2| T1 | T0 |
+ * ---+--------+--------+
+ * *(add)*
+ * +--------+--------+
+ * | X1 | X0 |
+ * +--------+--------+
+ * *(mul)*
+ * +--------+--------+
+ * | YDIG | YDIG |
+ * +--------+--------+
+ * *(add)*
+ * +--------+--------+
+ * | M1 | M0 |
+ * +--------+--------+
+ * *(mul)*
+ * +--------+--------+
+ * | MK0 | MK0 |
+ * +--------+--------+
+ *
+ * ---------------------
+ *
+ * +--------+--------+
+ * | ADD2 | ADD1 |
+ * +--------+--------+
+ * +--------+--------+
+ * | ADD2H | ADD1H |
+ * +--------+--------+
+ * +--------+--------+
+ * | RED2 | RED1 |
+ * +--------+--------+
+ * +--------+--------+
+ * | RED2H | RED1H |
+ * +--------+--------+
+ */
+ VREPF $2, Y0, YDIG
+ VMALF X0, YDIG, T0, ADD1
+ VMLF ADD1, K0, MK0
+ VREPF $3, MK0, MK0
+
+ VMALF X1, YDIG, T1, ADD2
+ VMALHF X0, YDIG, T0, ADD1H
+ VMALHF X1, YDIG, T1, ADD2H
+
+ VMALF M0, MK0, ADD1, RED1
+ VMALHF M0, MK0, ADD1, RED1H
+ VMALF M1, MK0, ADD2, RED2
+ VMALHF M1, MK0, ADD2, RED2H
+
+ VSLDB $12, RED2, RED1, RED1
+ VSLDB $12, T2, RED2, RED2
+
+ VACCQ RED1, ADD1H, CAR1
+ VAQ RED1, ADD1H, T0
+ VACCQ RED1H, T0, CAR1M
+ VAQ RED1H, T0, T0
+
+ // << ready for next MK0
+
+ VACQ RED2, ADD2H, CAR1, T1
+ VACCCQ RED2, ADD2H, CAR1, CAR1
+ VACCCQ RED2H, T1, CAR1M, T2
+ VACQ RED2H, T1, CAR1M, T1
+ VAQ CAR1, T2, T2
+
+ // ---------------------------------------------------
+ VREPF $1, Y0, YDIG
+ VMALF X0, YDIG, T0, ADD1
+ VMLF ADD1, K0, MK0
+ VREPF $3, MK0, MK0
+
+ VMALF X1, YDIG, T1, ADD2
+ VMALHF X0, YDIG, T0, ADD1H
+ VMALHF X1, YDIG, T1, ADD2H
+
+ VMALF M0, MK0, ADD1, RED1
+ VMALHF M0, MK0, ADD1, RED1H
+ VMALF M1, MK0, ADD2, RED2
+ VMALHF M1, MK0, ADD2, RED2H
+
+ VSLDB $12, RED2, RED1, RED1
+ VSLDB $12, T2, RED2, RED2
+
+ VACCQ RED1, ADD1H, CAR1
+ VAQ RED1, ADD1H, T0
+ VACCQ RED1H, T0, CAR1M
+ VAQ RED1H, T0, T0
+
+ // << ready for next MK0
+
+ VACQ RED2, ADD2H, CAR1, T1
+ VACCCQ RED2, ADD2H, CAR1, CAR1
+ VACCCQ RED2H, T1, CAR1M, T2
+ VACQ RED2H, T1, CAR1M, T1
+ VAQ CAR1, T2, T2
+
+ // ---------------------------------------------------
+ VREPF $0, Y0, YDIG
+ VMALF X0, YDIG, T0, ADD1
+ VMLF ADD1, K0, MK0
+ VREPF $3, MK0, MK0
+
+ VMALF X1, YDIG, T1, ADD2
+ VMALHF X0, YDIG, T0, ADD1H
+ VMALHF X1, YDIG, T1, ADD2H
+
+ VMALF M0, MK0, ADD1, RED1
+ VMALHF M0, MK0, ADD1, RED1H
+ VMALF M1, MK0, ADD2, RED2
+ VMALHF M1, MK0, ADD2, RED2H
+
+ VSLDB $12, RED2, RED1, RED1
+ VSLDB $12, T2, RED2, RED2
+
+ VACCQ RED1, ADD1H, CAR1
+ VAQ RED1, ADD1H, T0
+ VACCQ RED1H, T0, CAR1M
+ VAQ RED1H, T0, T0
+
+ // << ready for next MK0
+
+ VACQ RED2, ADD2H, CAR1, T1
+ VACCCQ RED2, ADD2H, CAR1, CAR1
+ VACCCQ RED2H, T1, CAR1M, T2
+ VACQ RED2H, T1, CAR1M, T1
+ VAQ CAR1, T2, T2
+
+ // ---------------------------------------------------
+ VREPF $3, Y1, YDIG
+ VMALF X0, YDIG, T0, ADD1
+ VMLF ADD1, K0, MK0
+ VREPF $3, MK0, MK0
+
+ VMALF X1, YDIG, T1, ADD2
+ VMALHF X0, YDIG, T0, ADD1H
+ VMALHF X1, YDIG, T1, ADD2H
+
+ VMALF M0, MK0, ADD1, RED1
+ VMALHF M0, MK0, ADD1, RED1H
+ VMALF M1, MK0, ADD2, RED2
+ VMALHF M1, MK0, ADD2, RED2H
+
+ VSLDB $12, RED2, RED1, RED1
+ VSLDB $12, T2, RED2, RED2
+
+ VACCQ RED1, ADD1H, CAR1
+ VAQ RED1, ADD1H, T0
+ VACCQ RED1H, T0, CAR1M
+ VAQ RED1H, T0, T0
+
+ // << ready for next MK0
+
+ VACQ RED2, ADD2H, CAR1, T1
+ VACCCQ RED2, ADD2H, CAR1, CAR1
+ VACCCQ RED2H, T1, CAR1M, T2
+ VACQ RED2H, T1, CAR1M, T1
+ VAQ CAR1, T2, T2
+
+ // ---------------------------------------------------
+ VREPF $2, Y1, YDIG
+ VMALF X0, YDIG, T0, ADD1
+ VMLF ADD1, K0, MK0
+ VREPF $3, MK0, MK0
+
+ VMALF X1, YDIG, T1, ADD2
+ VMALHF X0, YDIG, T0, ADD1H
+ VMALHF X1, YDIG, T1, ADD2H
+
+ VMALF M0, MK0, ADD1, RED1
+ VMALHF M0, MK0, ADD1, RED1H
+ VMALF M1, MK0, ADD2, RED2
+ VMALHF M1, MK0, ADD2, RED2H
+
+ VSLDB $12, RED2, RED1, RED1
+ VSLDB $12, T2, RED2, RED2
+
+ VACCQ RED1, ADD1H, CAR1
+ VAQ RED1, ADD1H, T0
+ VACCQ RED1H, T0, CAR1M
+ VAQ RED1H, T0, T0
+
+ // << ready for next MK0
+
+ VACQ RED2, ADD2H, CAR1, T1
+ VACCCQ RED2, ADD2H, CAR1, CAR1
+ VACCCQ RED2H, T1, CAR1M, T2
+ VACQ RED2H, T1, CAR1M, T1
+ VAQ CAR1, T2, T2
+
+ // ---------------------------------------------------
+ VREPF $1, Y1, YDIG
+ VMALF X0, YDIG, T0, ADD1
+ VMLF ADD1, K0, MK0
+ VREPF $3, MK0, MK0
+
+ VMALF X1, YDIG, T1, ADD2
+ VMALHF X0, YDIG, T0, ADD1H
+ VMALHF X1, YDIG, T1, ADD2H
+
+ VMALF M0, MK0, ADD1, RED1
+ VMALHF M0, MK0, ADD1, RED1H
+ VMALF M1, MK0, ADD2, RED2
+ VMALHF M1, MK0, ADD2, RED2H
+
+ VSLDB $12, RED2, RED1, RED1
+ VSLDB $12, T2, RED2, RED2
+
+ VACCQ RED1, ADD1H, CAR1
+ VAQ RED1, ADD1H, T0
+ VACCQ RED1H, T0, CAR1M
+ VAQ RED1H, T0, T0
+
+ // << ready for next MK0
+
+ VACQ RED2, ADD2H, CAR1, T1
+ VACCCQ RED2, ADD2H, CAR1, CAR1
+ VACCCQ RED2H, T1, CAR1M, T2
+ VACQ RED2H, T1, CAR1M, T1
+ VAQ CAR1, T2, T2
+
+ // ---------------------------------------------------
+ VREPF $0, Y1, YDIG
+ VMALF X0, YDIG, T0, ADD1
+ VMLF ADD1, K0, MK0
+ VREPF $3, MK0, MK0
+
+ VMALF X1, YDIG, T1, ADD2
+ VMALHF X0, YDIG, T0, ADD1H
+ VMALHF X1, YDIG, T1, ADD2H
+
+ VMALF M0, MK0, ADD1, RED1
+ VMALHF M0, MK0, ADD1, RED1H
+ VMALF M1, MK0, ADD2, RED2
+ VMALHF M1, MK0, ADD2, RED2H
+
+ VSLDB $12, RED2, RED1, RED1
+ VSLDB $12, T2, RED2, RED2
+
+ VACCQ RED1, ADD1H, CAR1
+ VAQ RED1, ADD1H, T0
+ VACCQ RED1H, T0, CAR1M
+ VAQ RED1H, T0, T0
+
+ // << ready for next MK0
+
+ VACQ RED2, ADD2H, CAR1, T1
+ VACCCQ RED2, ADD2H, CAR1, CAR1
+ VACCCQ RED2H, T1, CAR1M, T2
+ VACQ RED2H, T1, CAR1M, T1
+ VAQ CAR1, T2, T2
+
+ // ---------------------------------------------------
+
+ VZERO RED1
+ VSCBIQ M0, T0, CAR1
+ VSQ M0, T0, ADD1
+ VSBCBIQ T1, M1, CAR1, CAR1M
+ VSBIQ T1, M1, CAR1, ADD2
+ VSBIQ T2, RED1, CAR1M, T2
+
+ // what output to use, ADD2||ADD1 or T1||T0?
+ VSEL T0, ADD1, T2, T0
+ VSEL T1, ADD2, T2, T1
+
+ VPDI $0x4, T0, T0, T0
+ VST T0, (0*16)(res_ptr)
+ VPDI $0x4, T1, T1, T1
+ VST T1, (1*16)(res_ptr)
+ RET
+
+#undef res_ptr
+#undef x_ptr
+#undef y_ptr
+#undef X0
+#undef X1
+#undef Y0
+#undef Y1
+#undef M0
+#undef M1
+#undef T0
+#undef T1
+#undef T2
+#undef YDIG
+
+#undef ADD1
+#undef ADD1H
+#undef ADD2
+#undef ADD2H
+#undef RED1
+#undef RED1H
+#undef RED2
+#undef RED2H
+#undef CAR1
+#undef CAR1M
+
+#undef MK0
+#undef K0
+
+// ---------------------------------------
+// p256MulInternal
+// V0-V3,V30,V31 - Not Modified
+// V4-V15 - Volatile
+
+#define CPOOL R4
+
+// Parameters
+#define X0 V0 // Not modified
+#define X1 V1 // Not modified
+#define Y0 V2 // Not modified
+#define Y1 V3 // Not modified
+#define T0 V4
+#define T1 V5
+#define P0 V30 // Not modified
+#define P1 V31 // Not modified
+
+// Temporaries
+#define YDIG V6 // Overloaded with CAR2, ZER
+#define ADD1H V7 // Overloaded with ADD3H
+#define ADD2H V8 // Overloaded with ADD4H
+#define ADD3 V9 // Overloaded with SEL2,SEL5
+#define ADD4 V10 // Overloaded with SEL3,SEL6
+#define RED1 V11 // Overloaded with CAR2
+#define RED2 V12
+#define RED3 V13 // Overloaded with SEL1
+#define T2 V14
+// Overloaded temporaries
+#define ADD1 V4 // Overloaded with T0
+#define ADD2 V5 // Overloaded with T1
+#define ADD3H V7 // Overloaded with ADD1H
+#define ADD4H V8 // Overloaded with ADD2H
+#define ZER V6 // Overloaded with YDIG, CAR2
+#define CAR1 V6 // Overloaded with YDIG, ZER
+#define CAR2 V11 // Overloaded with RED1
+// Constant Selects
+#define SEL1 V13 // Overloaded with RED3
+#define SEL2 V9 // Overloaded with ADD3,SEL5
+#define SEL3 V10 // Overloaded with ADD4,SEL6
+#define SEL4 V6 // Overloaded with YDIG,CAR2,ZER
+#define SEL5 V9 // Overloaded with ADD3,SEL2
+#define SEL6 V10 // Overloaded with ADD4,SEL3
+
+/* *
+ * To follow the flow of bits, for your own sanity a stiff drink, need you shall.
+ * Of a single round, a 'helpful' picture, here is. Meaning, column position has.
+ * With you, SIMD be...
+ *
+ * +--------+--------+
+ * +--------| RED2 | RED1 |
+ * | +--------+--------+
+ * | ---+--------+--------+
+ * | +---- T2| T1 | T0 |--+
+ * | | ---+--------+--------+ |
+ * | | |
+ * | | ======================= |
+ * | | |
+ * | | +--------+--------+<-+
+ * | +-------| ADD2 | ADD1 |--|-----+
+ * | | +--------+--------+ | |
+ * | | +--------+--------+<---+ |
+ * | | | ADD2H | ADD1H |--+ |
+ * | | +--------+--------+ | |
+ * | | +--------+--------+<-+ |
+ * | | | ADD4 | ADD3 |--|-+ |
+ * | | +--------+--------+ | | |
+ * | | +--------+--------+<---+ | |
+ * | | | ADD4H | ADD3H |------|-+ |(+vzero)
+ * | | +--------+--------+ | | V
+ * | | ------------------------ | | +--------+
+ * | | | | | RED3 | [d0 0 0 d0]
+ * | | | | +--------+
+ * | +---->+--------+--------+ | | |
+ * (T2[1w]||ADD2[4w]||ADD1[3w]) +--------| T1 | T0 | | | |
+ * | +--------+--------+ | | |
+ * +---->---+--------+--------+ | | |
+ * T2| T1 | T0 |----+ | |
+ * ---+--------+--------+ | | |
+ * ---+--------+--------+<---+ | |
+ * +--- T2| T1 | T0 |----------+
+ * | ---+--------+--------+ | |
+ * | +--------+--------+<-------------+
+ * | | RED2 | RED1 |-----+ | | [0 d1 d0 d1] [d0 0 d1 d0]
+ * | +--------+--------+ | | |
+ * | +--------+<----------------------+
+ * | | RED3 |--------------+ | [0 0 d1 d0]
+ * | +--------+ | |
+ * +--->+--------+--------+ | |
+ * | T1 | T0 |--------+
+ * +--------+--------+ | |
+ * --------------------------- | |
+ * | |
+ * +--------+--------+<----+ |
+ * | RED2 | RED1 | |
+ * +--------+--------+ |
+ * ---+--------+--------+<-------+
+ * T2| T1 | T0 | (H1P-H1P-H00RRAY!)
+ * ---+--------+--------+
+ *
+ * *Mi obra de arte de siglo XXI @vpaprots
+ *
+ *
+ * First group is special, doesn't get the two inputs:
+ * +--------+--------+<-+
+ * +-------| ADD2 | ADD1 |--|-----+
+ * | +--------+--------+ | |
+ * | +--------+--------+<---+ |
+ * | | ADD2H | ADD1H |--+ |
+ * | +--------+--------+ | |
+ * | +--------+--------+<-+ |
+ * | | ADD4 | ADD3 |--|-+ |
+ * | +--------+--------+ | | |
+ * | +--------+--------+<---+ | |
+ * | | ADD4H | ADD3H |------|-+ |(+vzero)
+ * | +--------+--------+ | | V
+ * | ------------------------ | | +--------+
+ * | | | | RED3 | [d0 0 0 d0]
+ * | | | +--------+
+ * +---->+--------+--------+ | | |
+ * (T2[1w]||ADD2[4w]||ADD1[3w]) | T1 | T0 |----+ | |
+ * +--------+--------+ | | |
+ * ---+--------+--------+<---+ | |
+ * +--- T2| T1 | T0 |----------+
+ * | ---+--------+--------+ | |
+ * | +--------+--------+<-------------+
+ * | | RED2 | RED1 |-----+ | | [0 d1 d0 d1] [d0 0 d1 d0]
+ * | +--------+--------+ | | |
+ * | +--------+<----------------------+
+ * | | RED3 |--------------+ | [0 0 d1 d0]
+ * | +--------+ | |
+ * +--->+--------+--------+ | |
+ * | T1 | T0 |--------+
+ * +--------+--------+ | |
+ * --------------------------- | |
+ * | |
+ * +--------+--------+<----+ |
+ * | RED2 | RED1 | |
+ * +--------+--------+ |
+ * ---+--------+--------+<-------+
+ * T2| T1 | T0 | (H1P-H1P-H00RRAY!)
+ * ---+--------+--------+
+ *
+ * Last 'group' needs to RED2||RED1 shifted less
+ */
+TEXT p256MulInternal<>(SB), NOSPLIT, $0-0
+ VL 32(CPOOL), SEL1
+ VL 48(CPOOL), SEL2
+ VL 64(CPOOL), SEL3
+ VL 80(CPOOL), SEL4
+
+ // ---------------------------------------------------
+
+ VREPF $3, Y0, YDIG
+ VMLHF X0, YDIG, ADD1H
+ VMLHF X1, YDIG, ADD2H
+ VMLF X0, YDIG, ADD1
+ VMLF X1, YDIG, ADD2
+
+ VREPF $2, Y0, YDIG
+ VMALF X0, YDIG, ADD1H, ADD3
+ VMALF X1, YDIG, ADD2H, ADD4
+ VMALHF X0, YDIG, ADD1H, ADD3H // ADD1H Free
+ VMALHF X1, YDIG, ADD2H, ADD4H // ADD2H Free
+
+ VZERO ZER
+ VL 32(CPOOL), SEL1
+ VPERM ZER, ADD1, SEL1, RED3 // [d0 0 0 d0]
+
+ VSLDB $12, ADD2, ADD1, T0 // ADD1 Free
+ VSLDB $12, ZER, ADD2, T1 // ADD2 Free
+
+ VACCQ T0, ADD3, CAR1
+ VAQ T0, ADD3, T0 // ADD3 Free
+ VACCCQ T1, ADD4, CAR1, T2
+ VACQ T1, ADD4, CAR1, T1 // ADD4 Free
+
+ VL 48(CPOOL), SEL2
+ VL 64(CPOOL), SEL3
+ VL 80(CPOOL), SEL4
+ VPERM RED3, T0, SEL2, RED1 // [d0 0 d1 d0]
+ VPERM RED3, T0, SEL3, RED2 // [ 0 d1 d0 d1]
+ VPERM RED3, T0, SEL4, RED3 // [ 0 0 d1 d0]
+ VSQ RED3, RED2, RED2 // Guaranteed not to underflow
+
+ VSLDB $12, T1, T0, T0
+ VSLDB $12, T2, T1, T1
+
+ VACCQ T0, ADD3H, CAR1
+ VAQ T0, ADD3H, T0
+ VACCCQ T1, ADD4H, CAR1, T2
+ VACQ T1, ADD4H, CAR1, T1
+
+ // ---------------------------------------------------
+
+ VREPF $1, Y0, YDIG
+ VMALHF X0, YDIG, T0, ADD1H
+ VMALHF X1, YDIG, T1, ADD2H
+ VMALF X0, YDIG, T0, ADD1 // T0 Free->ADD1
+ VMALF X1, YDIG, T1, ADD2 // T1 Free->ADD2
+
+ VREPF $0, Y0, YDIG
+ VMALF X0, YDIG, ADD1H, ADD3
+ VMALF X1, YDIG, ADD2H, ADD4
+ VMALHF X0, YDIG, ADD1H, ADD3H // ADD1H Free->ADD3H
+ VMALHF X1, YDIG, ADD2H, ADD4H // ADD2H Free->ADD4H , YDIG Free->ZER
+
+ VZERO ZER
+ VL 32(CPOOL), SEL1
+ VPERM ZER, ADD1, SEL1, RED3 // [d0 0 0 d0]
+
+ VSLDB $12, ADD2, ADD1, T0 // ADD1 Free->T0
+ VSLDB $12, T2, ADD2, T1 // ADD2 Free->T1, T2 Free
+
+ VACCQ T0, RED1, CAR1
+ VAQ T0, RED1, T0
+ VACCCQ T1, RED2, CAR1, T2
+ VACQ T1, RED2, CAR1, T1
+
+ VACCQ T0, ADD3, CAR1
+ VAQ T0, ADD3, T0
+ VACCCQ T1, ADD4, CAR1, CAR2
+ VACQ T1, ADD4, CAR1, T1
+ VAQ T2, CAR2, T2
+
+ VL 48(CPOOL), SEL2
+ VL 64(CPOOL), SEL3
+ VL 80(CPOOL), SEL4
+ VPERM RED3, T0, SEL2, RED1 // [d0 0 d1 d0]
+ VPERM RED3, T0, SEL3, RED2 // [ 0 d1 d0 d1]
+ VPERM RED3, T0, SEL4, RED3 // [ 0 0 d1 d0]
+ VSQ RED3, RED2, RED2 // Guaranteed not to underflow
+
+ VSLDB $12, T1, T0, T0
+ VSLDB $12, T2, T1, T1
+
+ VACCQ T0, ADD3H, CAR1
+ VAQ T0, ADD3H, T0
+ VACCCQ T1, ADD4H, CAR1, T2
+ VACQ T1, ADD4H, CAR1, T1
+
+ // ---------------------------------------------------
+
+ VREPF $3, Y1, YDIG
+ VMALHF X0, YDIG, T0, ADD1H
+ VMALHF X1, YDIG, T1, ADD2H
+ VMALF X0, YDIG, T0, ADD1
+ VMALF X1, YDIG, T1, ADD2
+
+ VREPF $2, Y1, YDIG
+ VMALF X0, YDIG, ADD1H, ADD3
+ VMALF X1, YDIG, ADD2H, ADD4
+ VMALHF X0, YDIG, ADD1H, ADD3H // ADD1H Free
+ VMALHF X1, YDIG, ADD2H, ADD4H // ADD2H Free
+
+ VZERO ZER
+ VL 32(CPOOL), SEL1
+ VPERM ZER, ADD1, SEL1, RED3 // [d0 0 0 d0]
+
+ VSLDB $12, ADD2, ADD1, T0 // ADD1 Free
+ VSLDB $12, T2, ADD2, T1 // ADD2 Free
+
+ VACCQ T0, RED1, CAR1
+ VAQ T0, RED1, T0
+ VACCCQ T1, RED2, CAR1, T2
+ VACQ T1, RED2, CAR1, T1
+
+ VACCQ T0, ADD3, CAR1
+ VAQ T0, ADD3, T0
+ VACCCQ T1, ADD4, CAR1, CAR2
+ VACQ T1, ADD4, CAR1, T1
+ VAQ T2, CAR2, T2
+
+ VL 48(CPOOL), SEL2
+ VL 64(CPOOL), SEL3
+ VL 80(CPOOL), SEL4
+ VPERM RED3, T0, SEL2, RED1 // [d0 0 d1 d0]
+ VPERM RED3, T0, SEL3, RED2 // [ 0 d1 d0 d1]
+ VPERM RED3, T0, SEL4, RED3 // [ 0 0 d1 d0]
+ VSQ RED3, RED2, RED2 // Guaranteed not to underflow
+
+ VSLDB $12, T1, T0, T0
+ VSLDB $12, T2, T1, T1
+
+ VACCQ T0, ADD3H, CAR1
+ VAQ T0, ADD3H, T0
+ VACCCQ T1, ADD4H, CAR1, T2
+ VACQ T1, ADD4H, CAR1, T1
+
+ // ---------------------------------------------------
+
+ VREPF $1, Y1, YDIG
+ VMALHF X0, YDIG, T0, ADD1H
+ VMALHF X1, YDIG, T1, ADD2H
+ VMALF X0, YDIG, T0, ADD1
+ VMALF X1, YDIG, T1, ADD2
+
+ VREPF $0, Y1, YDIG
+ VMALF X0, YDIG, ADD1H, ADD3
+ VMALF X1, YDIG, ADD2H, ADD4
+ VMALHF X0, YDIG, ADD1H, ADD3H
+ VMALHF X1, YDIG, ADD2H, ADD4H
+
+ VZERO ZER
+ VL 32(CPOOL), SEL1
+ VPERM ZER, ADD1, SEL1, RED3 // [d0 0 0 d0]
+
+ VSLDB $12, ADD2, ADD1, T0
+ VSLDB $12, T2, ADD2, T1
+
+ VACCQ T0, RED1, CAR1
+ VAQ T0, RED1, T0
+ VACCCQ T1, RED2, CAR1, T2
+ VACQ T1, RED2, CAR1, T1
+
+ VACCQ T0, ADD3, CAR1
+ VAQ T0, ADD3, T0
+ VACCCQ T1, ADD4, CAR1, CAR2
+ VACQ T1, ADD4, CAR1, T1
+ VAQ T2, CAR2, T2
+
+ VL 96(CPOOL), SEL5
+ VL 112(CPOOL), SEL6
+ VPERM T0, RED3, SEL5, RED2 // [d1 d0 d1 d0]
+ VPERM T0, RED3, SEL6, RED1 // [ 0 d1 d0 0]
+ VSQ RED1, RED2, RED2 // Guaranteed not to underflow
+
+ VSLDB $12, T1, T0, T0
+ VSLDB $12, T2, T1, T1
+
+ VACCQ T0, ADD3H, CAR1
+ VAQ T0, ADD3H, T0
+ VACCCQ T1, ADD4H, CAR1, T2
+ VACQ T1, ADD4H, CAR1, T1
+
+ VACCQ T0, RED1, CAR1
+ VAQ T0, RED1, T0
+ VACCCQ T1, RED2, CAR1, CAR2
+ VACQ T1, RED2, CAR1, T1
+ VAQ T2, CAR2, T2
+
+ // ---------------------------------------------------
+
+ VZERO RED3
+ VSCBIQ P0, T0, CAR1
+ VSQ P0, T0, ADD1H
+ VSBCBIQ T1, P1, CAR1, CAR2
+ VSBIQ T1, P1, CAR1, ADD2H
+ VSBIQ T2, RED3, CAR2, T2
+
+ // what output to use, ADD2H||ADD1H or T1||T0?
+ VSEL T0, ADD1H, T2, T0
+ VSEL T1, ADD2H, T2, T1
+ RET
+
+#undef CPOOL
+
+#undef X0
+#undef X1
+#undef Y0
+#undef Y1
+#undef T0
+#undef T1
+#undef P0
+#undef P1
+
+#undef SEL1
+#undef SEL2
+#undef SEL3
+#undef SEL4
+#undef SEL5
+#undef SEL6
+
+#undef YDIG
+#undef ADD1H
+#undef ADD2H
+#undef ADD3
+#undef ADD4
+#undef RED1
+#undef RED2
+#undef RED3
+#undef T2
+#undef ADD1
+#undef ADD2
+#undef ADD3H
+#undef ADD4H
+#undef ZER
+#undef CAR1
+#undef CAR2
+
+// ---------------------------------------
+
+// Parameters
+#define X0 V0
+#define X1 V1
+#define Y0 V2
+#define Y1 V3
+
+TEXT p256SqrInternal<>(SB), NOFRAME|NOSPLIT, $0
+ VLR X0, Y0
+ VLR X1, Y1
+ BR p256MulInternal<>(SB)
+
+#undef X0
+#undef X1
+#undef Y0
+#undef Y1
+
+#define p256SubInternal(T1, T0, X1, X0, Y1, Y0) \
+ VZERO ZER \
+ VSCBIQ Y0, X0, CAR1 \
+ VSQ Y0, X0, T0 \
+ VSBCBIQ X1, Y1, CAR1, SEL1 \
+ VSBIQ X1, Y1, CAR1, T1 \
+ VSQ SEL1, ZER, SEL1 \
+ \
+ VACCQ T0, PL, CAR1 \
+ VAQ T0, PL, TT0 \
+ VACQ T1, PH, CAR1, TT1 \
+ \
+ VSEL T0, TT0, SEL1, T0 \
+ VSEL T1, TT1, SEL1, T1 \
+
+#define p256AddInternal(T1, T0, X1, X0, Y1, Y0) \
+ VACCQ X0, Y0, CAR1 \
+ VAQ X0, Y0, T0 \
+ VACCCQ X1, Y1, CAR1, T2 \
+ VACQ X1, Y1, CAR1, T1 \
+ \
+ VZERO ZER \
+ VSCBIQ PL, T0, CAR1 \
+ VSQ PL, T0, TT0 \
+ VSBCBIQ T1, PH, CAR1, CAR2 \
+ VSBIQ T1, PH, CAR1, TT1 \
+ VSBIQ T2, ZER, CAR2, SEL1 \
+ \
+ VSEL T0, TT0, SEL1, T0 \
+ VSEL T1, TT1, SEL1, T1
+
+#define p256HalfInternal(T1, T0, X1, X0) \
+ VZERO ZER \
+ VSBIQ ZER, ZER, X0, SEL1 \
+ \
+ VACCQ X0, PL, CAR1 \
+ VAQ X0, PL, T0 \
+ VACCCQ X1, PH, CAR1, T2 \
+ VACQ X1, PH, CAR1, T1 \
+ \
+ VSEL X0, T0, SEL1, T0 \
+ VSEL X1, T1, SEL1, T1 \
+ VSEL ZER, T2, SEL1, T2 \
+ \
+ VSLDB $15, T2, ZER, TT1 \
+ VSLDB $15, T1, ZER, TT0 \
+ VREPIB $1, SEL1 \
+ VSRL SEL1, T0, T0 \
+ VSRL SEL1, T1, T1 \
+ VREPIB $7, SEL1 \
+ VSL SEL1, TT0, TT0 \
+ VSL SEL1, TT1, TT1 \
+ VO T0, TT0, T0 \
+ VO T1, TT1, T1
+
+// ---------------------------------------
+// func p256Mul(res, in1, in2 *p256Element)
+#define res_ptr R1
+#define x_ptr R2
+#define y_ptr R3
+#define CPOOL R4
+
+// Parameters
+#define X0 V0
+#define X1 V1
+#define Y0 V2
+#define Y1 V3
+#define T0 V4
+#define T1 V5
+
+// Constants
+#define P0 V30
+#define P1 V31
+TEXT ·p256Mul(SB), NOSPLIT, $0
+ MOVD res+0(FP), res_ptr
+ MOVD in1+8(FP), x_ptr
+ MOVD in2+16(FP), y_ptr
+
+ VL (0*16)(x_ptr), X0
+ VPDI $0x4, X0, X0, X0
+ VL (1*16)(x_ptr), X1
+ VPDI $0x4, X1, X1, X1
+ VL (0*16)(y_ptr), Y0
+ VPDI $0x4, Y0, Y0, Y0
+ VL (1*16)(y_ptr), Y1
+ VPDI $0x4, Y1, Y1, Y1
+
+ MOVD $p256mul<>+0x00(SB), CPOOL
+ VL 16(CPOOL), P0
+ VL 0(CPOOL), P1
+
+ CALL p256MulInternal<>(SB)
+
+ VPDI $0x4, T0, T0, T0
+ VST T0, (0*16)(res_ptr)
+ VPDI $0x4, T1, T1, T1
+ VST T1, (1*16)(res_ptr)
+ RET
+
+#undef res_ptr
+#undef x_ptr
+#undef y_ptr
+#undef CPOOL
+
+#undef X0
+#undef X1
+#undef Y0
+#undef Y1
+#undef T0
+#undef T1
+#undef P0
+#undef P1
+
+// ---------------------------------------
+// func p256Sqr(res, in *p256Element, n int)
+#define res_ptr R1
+#define x_ptr R2
+#define y_ptr R3
+#define CPOOL R4
+#define COUNT R5
+#define N R6
+
+// Parameters
+#define X0 V0
+#define X1 V1
+#define T0 V4
+#define T1 V5
+
+// Constants
+#define P0 V30
+#define P1 V31
+TEXT ·p256Sqr(SB), NOSPLIT, $0
+ MOVD res+0(FP), res_ptr
+ MOVD in+8(FP), x_ptr
+
+ VL (0*16)(x_ptr), X0
+ VPDI $0x4, X0, X0, X0
+ VL (1*16)(x_ptr), X1
+ VPDI $0x4, X1, X1, X1
+
+ MOVD $p256mul<>+0x00(SB), CPOOL
+ MOVD $0, COUNT
+ MOVD n+16(FP), N
+ VL 16(CPOOL), P0
+ VL 0(CPOOL), P1
+
+loop:
+ CALL p256SqrInternal<>(SB)
+ VLR T0, X0
+ VLR T1, X1
+ ADDW $1, COUNT
+ CMPW COUNT, N
+ BLT loop
+
+ VPDI $0x4, T0, T0, T0
+ VST T0, (0*16)(res_ptr)
+ VPDI $0x4, T1, T1, T1
+ VST T1, (1*16)(res_ptr)
+ RET
+
+#undef res_ptr
+#undef x_ptr
+#undef y_ptr
+#undef CPOOL
+#undef COUNT
+#undef N
+
+#undef X0
+#undef X1
+#undef T0
+#undef T1
+#undef P0
+#undef P1
+
+// Point add with P2 being affine point
+// If sign == 1 -> P2 = -P2
+// If sel == 0 -> P3 = P1
+// if zero == 0 -> P3 = P2
+// func p256PointAddAffineAsm(res, in1 *P256Point, in2 *p256AffinePoint, sign, sel, zero int)
+#define P3ptr R1
+#define P1ptr R2
+#define P2ptr R3
+#define CPOOL R4
+
+// Temporaries in REGs
+#define Y2L V15
+#define Y2H V16
+#define T1L V17
+#define T1H V18
+#define T2L V19
+#define T2H V20
+#define T3L V21
+#define T3H V22
+#define T4L V23
+#define T4H V24
+
+// Temps for Sub and Add
+#define TT0 V11
+#define TT1 V12
+#define T2 V13
+
+// p256MulAsm Parameters
+#define X0 V0
+#define X1 V1
+#define Y0 V2
+#define Y1 V3
+#define T0 V4
+#define T1 V5
+
+#define PL V30
+#define PH V31
+
+// Names for zero/sel selects
+#define X1L V0
+#define X1H V1
+#define Y1L V2 // p256MulAsmParmY
+#define Y1H V3 // p256MulAsmParmY
+#define Z1L V4
+#define Z1H V5
+#define X2L V0
+#define X2H V1
+#define Z2L V4
+#define Z2H V5
+#define X3L V17 // T1L
+#define X3H V18 // T1H
+#define Y3L V21 // T3L
+#define Y3H V22 // T3H
+#define Z3L V28
+#define Z3H V29
+
+#define ZER V6
+#define SEL1 V7
+#define CAR1 V8
+#define CAR2 V9
+/* *
+ * Three operand formula:
+ * Source: 2004 Hankerson–Menezes–Vanstone, page 91.
+ * T1 = Z1²
+ * T2 = T1*Z1
+ * T1 = T1*X2
+ * T2 = T2*Y2
+ * T1 = T1-X1
+ * T2 = T2-Y1
+ * Z3 = Z1*T1
+ * T3 = T1²
+ * T4 = T3*T1
+ * T3 = T3*X1
+ * T1 = 2*T3
+ * X3 = T2²
+ * X3 = X3-T1
+ * X3 = X3-T4
+ * T3 = T3-X3
+ * T3 = T3*T2
+ * T4 = T4*Y1
+ * Y3 = T3-T4
+
+ * Three operand formulas, but with MulInternal X,Y used to store temps
+X=Z1; Y=Z1; MUL;T- // T1 = Z1² T1
+X=T ; Y- ; MUL;T2=T // T2 = T1*Z1 T1 T2
+X- ; Y=X2; MUL;T1=T // T1 = T1*X2 T1 T2
+X=T2; Y=Y2; MUL;T- // T2 = T2*Y2 T1 T2
+SUB(T2<T-Y1) // T2 = T2-Y1 T1 T2
+SUB(Y<T1-X1) // T1 = T1-X1 T1 T2
+X=Z1; Y- ; MUL;Z3:=T// Z3 = Z1*T1 T2
+X=Y; Y- ; MUL;X=T // T3 = T1*T1 T2
+X- ; Y- ; MUL;T4=T // T4 = T3*T1 T2 T4
+X- ; Y=X1; MUL;T3=T // T3 = T3*X1 T2 T3 T4
+ADD(T1<T+T) // T1 = T3+T3 T1 T2 T3 T4
+X=T2; Y=T2; MUL;T- // X3 = T2*T2 T1 T2 T3 T4
+SUB(T<T-T1) // X3 = X3-T1 T1 T2 T3 T4
+SUB(T<T-T4) X3:=T // X3 = X3-T4 T2 T3 T4
+SUB(X<T3-T) // T3 = T3-X3 T2 T3 T4
+X- ; Y- ; MUL;T3=T // T3 = T3*T2 T2 T3 T4
+X=T4; Y=Y1; MUL;T- // T4 = T4*Y1 T3 T4
+SUB(T<T3-T) Y3:=T // Y3 = T3-T4 T3 T4
+
+ */
+TEXT ·p256PointAddAffineAsm(SB), NOSPLIT, $0
+ MOVD res+0(FP), P3ptr
+ MOVD in1+8(FP), P1ptr
+ MOVD in2+16(FP), P2ptr
+
+ MOVD $p256mul<>+0x00(SB), CPOOL
+ VL 16(CPOOL), PL
+ VL 0(CPOOL), PH
+
+ // if (sign == 1) {
+ // Y2 = fromBig(new(big.Int).Mod(new(big.Int).Sub(p256.P, new(big.Int).SetBytes(Y2)), p256.P)) // Y2 = P-Y2
+ // }
+
+ VL 48(P2ptr), Y2H
+ VPDI $0x4, Y2H, Y2H, Y2H
+ VL 32(P2ptr), Y2L
+ VPDI $0x4, Y2L, Y2L, Y2L
+
+ VLREPG sign+24(FP), SEL1
+ VZERO ZER
+ VCEQG SEL1, ZER, SEL1
+
+ VSCBIQ Y2L, PL, CAR1
+ VSQ Y2L, PL, T1L
+ VSBIQ PH, Y2H, CAR1, T1H
+
+ VSEL Y2L, T1L, SEL1, Y2L
+ VSEL Y2H, T1H, SEL1, Y2H
+
+/* *
+ * Three operand formula:
+ * Source: 2004 Hankerson–Menezes–Vanstone, page 91.
+ */
+ // X=Z1; Y=Z1; MUL; T- // T1 = Z1² T1
+ VL 80(P1ptr), X1 // Z1H
+ VPDI $0x4, X1, X1, X1
+ VL 64(P1ptr), X0 // Z1L
+ VPDI $0x4, X0, X0, X0
+ VLR X0, Y0
+ VLR X1, Y1
+ CALL p256SqrInternal<>(SB)
+
+ // X=T ; Y- ; MUL; T2=T // T2 = T1*Z1 T1 T2
+ VLR T0, X0
+ VLR T1, X1
+ CALL p256MulInternal<>(SB)
+ VLR T0, T2L
+ VLR T1, T2H
+
+ // X- ; Y=X2; MUL; T1=T // T1 = T1*X2 T1 T2
+ VL 16(P2ptr), Y1 // X2H
+ VPDI $0x4, Y1, Y1, Y1
+ VL 0(P2ptr), Y0 // X2L
+ VPDI $0x4, Y0, Y0, Y0
+ CALL p256MulInternal<>(SB)
+ VLR T0, T1L
+ VLR T1, T1H
+
+ // X=T2; Y=Y2; MUL; T- // T2 = T2*Y2 T1 T2
+ VLR T2L, X0
+ VLR T2H, X1
+ VLR Y2L, Y0
+ VLR Y2H, Y1
+ CALL p256MulInternal<>(SB)
+
+ // SUB(T2<T-Y1) // T2 = T2-Y1 T1 T2
+ VL 48(P1ptr), Y1H
+ VPDI $0x4, Y1H, Y1H, Y1H
+ VL 32(P1ptr), Y1L
+ VPDI $0x4, Y1L, Y1L, Y1L
+ p256SubInternal(T2H,T2L,T1,T0,Y1H,Y1L)
+
+ // SUB(Y<T1-X1) // T1 = T1-X1 T1 T2
+ VL 16(P1ptr), X1H
+ VPDI $0x4, X1H, X1H, X1H
+ VL 0(P1ptr), X1L
+ VPDI $0x4, X1L, X1L, X1L
+ p256SubInternal(Y1,Y0,T1H,T1L,X1H,X1L)
+
+ // X=Z1; Y- ; MUL; Z3:=T// Z3 = Z1*T1 T2
+ VL 80(P1ptr), X1 // Z1H
+ VPDI $0x4, X1, X1, X1
+ VL 64(P1ptr), X0 // Z1L
+ VPDI $0x4, X0, X0, X0
+ CALL p256MulInternal<>(SB)
+
+ // VST T1, 64(P3ptr)
+ // VST T0, 80(P3ptr)
+ VLR T0, Z3L
+ VLR T1, Z3H
+
+ // X=Y; Y- ; MUL; X=T // T3 = T1*T1 T2
+ VLR Y0, X0
+ VLR Y1, X1
+ CALL p256SqrInternal<>(SB)
+ VLR T0, X0
+ VLR T1, X1
+
+ // X- ; Y- ; MUL; T4=T // T4 = T3*T1 T2 T4
+ CALL p256MulInternal<>(SB)
+ VLR T0, T4L
+ VLR T1, T4H
+
+ // X- ; Y=X1; MUL; T3=T // T3 = T3*X1 T2 T3 T4
+ VL 16(P1ptr), Y1 // X1H
+ VPDI $0x4, Y1, Y1, Y1
+ VL 0(P1ptr), Y0 // X1L
+ VPDI $0x4, Y0, Y0, Y0
+ CALL p256MulInternal<>(SB)
+ VLR T0, T3L
+ VLR T1, T3H
+
+ // ADD(T1<T+T) // T1 = T3+T3 T1 T2 T3 T4
+ p256AddInternal(T1H,T1L, T1,T0,T1,T0)
+
+ // X=T2; Y=T2; MUL; T- // X3 = T2*T2 T1 T2 T3 T4
+ VLR T2L, X0
+ VLR T2H, X1
+ VLR T2L, Y0
+ VLR T2H, Y1
+ CALL p256SqrInternal<>(SB)
+
+ // SUB(T<T-T1) // X3 = X3-T1 T1 T2 T3 T4 (T1 = X3)
+ p256SubInternal(T1,T0,T1,T0,T1H,T1L)
+
+ // SUB(T<T-T4) X3:=T // X3 = X3-T4 T2 T3 T4
+ p256SubInternal(T1,T0,T1,T0,T4H,T4L)
+ VLR T0, X3L
+ VLR T1, X3H
+
+ // SUB(X<T3-T) // T3 = T3-X3 T2 T3 T4
+ p256SubInternal(X1,X0,T3H,T3L,T1,T0)
+
+ // X- ; Y- ; MUL; T3=T // T3 = T3*T2 T2 T3 T4
+ CALL p256MulInternal<>(SB)
+ VLR T0, T3L
+ VLR T1, T3H
+
+ // X=T4; Y=Y1; MUL; T- // T4 = T4*Y1 T3 T4
+ VLR T4L, X0
+ VLR T4H, X1
+ VL 48(P1ptr), Y1 // Y1H
+ VPDI $0x4, Y1, Y1, Y1
+ VL 32(P1ptr), Y0 // Y1L
+ VPDI $0x4, Y0, Y0, Y0
+ CALL p256MulInternal<>(SB)
+
+ // SUB(T<T3-T) Y3:=T // Y3 = T3-T4 T3 T4 (T3 = Y3)
+ p256SubInternal(Y3H,Y3L,T3H,T3L,T1,T0)
+
+ // if (sel == 0) {
+ // copy(P3.x[:], X1)
+ // copy(P3.y[:], Y1)
+ // copy(P3.z[:], Z1)
+ // }
+
+ VL 16(P1ptr), X1H
+ VPDI $0x4, X1H, X1H, X1H
+ VL 0(P1ptr), X1L
+ VPDI $0x4, X1L, X1L, X1L
+
+ // Y1 already loaded, left over from addition
+ VL 80(P1ptr), Z1H
+ VPDI $0x4, Z1H, Z1H, Z1H
+ VL 64(P1ptr), Z1L
+ VPDI $0x4, Z1L, Z1L, Z1L
+
+ VLREPG sel+32(FP), SEL1
+ VZERO ZER
+ VCEQG SEL1, ZER, SEL1
+
+ VSEL X1L, X3L, SEL1, X3L
+ VSEL X1H, X3H, SEL1, X3H
+ VSEL Y1L, Y3L, SEL1, Y3L
+ VSEL Y1H, Y3H, SEL1, Y3H
+ VSEL Z1L, Z3L, SEL1, Z3L
+ VSEL Z1H, Z3H, SEL1, Z3H
+
+ // if (zero == 0) {
+ // copy(P3.x[:], X2)
+ // copy(P3.y[:], Y2)
+ // copy(P3.z[:], []byte{0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ // 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01}) //(p256.z*2^256)%p
+ // }
+ VL 16(P2ptr), X2H
+ VPDI $0x4, X2H, X2H, X2H
+ VL 0(P2ptr), X2L
+ VPDI $0x4, X2L, X2L, X2L
+
+ // Y2 already loaded
+ VL 128(CPOOL), Z2H
+ VL 144(CPOOL), Z2L
+
+ VLREPG zero+40(FP), SEL1
+ VZERO ZER
+ VCEQG SEL1, ZER, SEL1
+
+ VSEL X2L, X3L, SEL1, X3L
+ VSEL X2H, X3H, SEL1, X3H
+ VSEL Y2L, Y3L, SEL1, Y3L
+ VSEL Y2H, Y3H, SEL1, Y3H
+ VSEL Z2L, Z3L, SEL1, Z3L
+ VSEL Z2H, Z3H, SEL1, Z3H
+
+ // All done, store out the result!!!
+ VPDI $0x4, X3H, X3H, X3H
+ VST X3H, 16(P3ptr)
+ VPDI $0x4, X3L, X3L, X3L
+ VST X3L, 0(P3ptr)
+ VPDI $0x4, Y3H, Y3H, Y3H
+ VST Y3H, 48(P3ptr)
+ VPDI $0x4, Y3L, Y3L, Y3L
+ VST Y3L, 32(P3ptr)
+ VPDI $0x4, Z3H, Z3H, Z3H
+ VST Z3H, 80(P3ptr)
+ VPDI $0x4, Z3L, Z3L, Z3L
+ VST Z3L, 64(P3ptr)
+
+ RET
+
+#undef P3ptr
+#undef P1ptr
+#undef P2ptr
+#undef CPOOL
+
+#undef Y2L
+#undef Y2H
+#undef T1L
+#undef T1H
+#undef T2L
+#undef T2H
+#undef T3L
+#undef T3H
+#undef T4L
+#undef T4H
+
+#undef TT0
+#undef TT1
+#undef T2
+
+#undef X0
+#undef X1
+#undef Y0
+#undef Y1
+#undef T0
+#undef T1
+
+#undef PL
+#undef PH
+
+#undef X1L
+#undef X1H
+#undef Y1L
+#undef Y1H
+#undef Z1L
+#undef Z1H
+#undef X2L
+#undef X2H
+#undef Z2L
+#undef Z2H
+#undef X3L
+#undef X3H
+#undef Y3L
+#undef Y3H
+#undef Z3L
+#undef Z3H
+
+#undef ZER
+#undef SEL1
+#undef CAR1
+#undef CAR2
+
+// func p256PointDoubleAsm(res, in *P256Point)
+// https://www.hyperelliptic.org/EFD/g1p/auto-shortw-jacobian.html#doubling-dbl-2007-bl
+// https://www.hyperelliptic.org/EFD/g1p/auto-shortw.html
+// https://www.hyperelliptic.org/EFD/g1p/auto-shortw-projective-3.html
+#define P3ptr R1
+#define P1ptr R2
+#define CPOOL R4
+
+// Temporaries in REGs
+#define X3L V15
+#define X3H V16
+#define Y3L V17
+#define Y3H V18
+#define T1L V19
+#define T1H V20
+#define T2L V21
+#define T2H V22
+#define T3L V23
+#define T3H V24
+
+#define X1L V6
+#define X1H V7
+#define Y1L V8
+#define Y1H V9
+#define Z1L V10
+#define Z1H V11
+
+// Temps for Sub and Add
+#define TT0 V11
+#define TT1 V12
+#define T2 V13
+
+// p256MulAsm Parameters
+#define X0 V0
+#define X1 V1
+#define Y0 V2
+#define Y1 V3
+#define T0 V4
+#define T1 V5
+
+#define PL V30
+#define PH V31
+
+#define Z3L V23
+#define Z3H V24
+
+#define ZER V26
+#define SEL1 V27
+#define CAR1 V28
+#define CAR2 V29
+/*
+ * https://www.hyperelliptic.org/EFD/g1p/auto-shortw-jacobian-3.html#doubling-dbl-2004-hmv
+ * Cost: 4M + 4S + 1*half + 5add + 2*2 + 1*3.
+ * Source: 2004 Hankerson–Menezes–Vanstone, page 91.
+ * A = 3(X₁-Z₁²)×(X₁+Z₁²)
+ * B = 2Y₁
+ * Z₃ = B×Z₁
+ * C = B²
+ * D = C×X₁
+ * X₃ = A²-2D
+ * Y₃ = (D-X₃)×A-C²/2
+ *
+ * Three-operand formula:
+ * T1 = Z1²
+ * T2 = X1-T1
+ * T1 = X1+T1
+ * T2 = T2*T1
+ * T2 = 3*T2
+ * Y3 = 2*Y1
+ * Z3 = Y3*Z1
+ * Y3 = Y3²
+ * T3 = Y3*X1
+ * Y3 = Y3²
+ * Y3 = half*Y3
+ * X3 = T2²
+ * T1 = 2*T3
+ * X3 = X3-T1
+ * T1 = T3-X3
+ * T1 = T1*T2
+ * Y3 = T1-Y3
+ */
+
+TEXT ·p256PointDoubleAsm(SB), NOSPLIT, $0
+ MOVD res+0(FP), P3ptr
+ MOVD in+8(FP), P1ptr
+
+ MOVD $p256mul<>+0x00(SB), CPOOL
+ VL 16(CPOOL), PL
+ VL 0(CPOOL), PH
+
+ // X=Z1; Y=Z1; MUL; T- // T1 = Z1²
+ VL 80(P1ptr), X1 // Z1H
+ VPDI $0x4, X1, X1, X1
+ VL 64(P1ptr), X0 // Z1L
+ VPDI $0x4, X0, X0, X0
+ VLR X0, Y0
+ VLR X1, Y1
+ CALL p256SqrInternal<>(SB)
+
+ // SUB(X<X1-T) // T2 = X1-T1
+ VL 16(P1ptr), X1H
+ VPDI $0x4, X1H, X1H, X1H
+ VL 0(P1ptr), X1L
+ VPDI $0x4, X1L, X1L, X1L
+ p256SubInternal(X1,X0,X1H,X1L,T1,T0)
+
+ // ADD(Y<X1+T) // T1 = X1+T1
+ p256AddInternal(Y1,Y0,X1H,X1L,T1,T0)
+
+ // X- ; Y- ; MUL; T- // T2 = T2*T1
+ CALL p256MulInternal<>(SB)
+
+ // ADD(T2<T+T); ADD(T2<T2+T) // T2 = 3*T2
+ p256AddInternal(T2H,T2L,T1,T0,T1,T0)
+ p256AddInternal(T2H,T2L,T2H,T2L,T1,T0)
+
+ // ADD(X<Y1+Y1) // Y3 = 2*Y1
+ VL 48(P1ptr), Y1H
+ VPDI $0x4, Y1H, Y1H, Y1H
+ VL 32(P1ptr), Y1L
+ VPDI $0x4, Y1L, Y1L, Y1L
+ p256AddInternal(X1,X0,Y1H,Y1L,Y1H,Y1L)
+
+ // X- ; Y=Z1; MUL; Z3:=T // Z3 = Y3*Z1
+ VL 80(P1ptr), Y1 // Z1H
+ VPDI $0x4, Y1, Y1, Y1
+ VL 64(P1ptr), Y0 // Z1L
+ VPDI $0x4, Y0, Y0, Y0
+ CALL p256MulInternal<>(SB)
+ VPDI $0x4, T1, T1, TT1
+ VST TT1, 80(P3ptr)
+ VPDI $0x4, T0, T0, TT0
+ VST TT0, 64(P3ptr)
+
+ // X- ; Y=X ; MUL; T- // Y3 = Y3²
+ VLR X0, Y0
+ VLR X1, Y1
+ CALL p256SqrInternal<>(SB)
+
+ // X=T ; Y=X1; MUL; T3=T // T3 = Y3*X1
+ VLR T0, X0
+ VLR T1, X1
+ VL 16(P1ptr), Y1
+ VPDI $0x4, Y1, Y1, Y1
+ VL 0(P1ptr), Y0
+ VPDI $0x4, Y0, Y0, Y0
+ CALL p256MulInternal<>(SB)
+ VLR T0, T3L
+ VLR T1, T3H
+
+ // X- ; Y=X ; MUL; T- // Y3 = Y3²
+ VLR X0, Y0
+ VLR X1, Y1
+ CALL p256SqrInternal<>(SB)
+
+ // HAL(Y3<T) // Y3 = half*Y3
+ p256HalfInternal(Y3H,Y3L, T1,T0)
+
+ // X=T2; Y=T2; MUL; T- // X3 = T2²
+ VLR T2L, X0
+ VLR T2H, X1
+ VLR T2L, Y0
+ VLR T2H, Y1
+ CALL p256SqrInternal<>(SB)
+
+ // ADD(T1<T3+T3) // T1 = 2*T3
+ p256AddInternal(T1H,T1L,T3H,T3L,T3H,T3L)
+
+ // SUB(X3<T-T1) X3:=X3 // X3 = X3-T1
+ p256SubInternal(X3H,X3L,T1,T0,T1H,T1L)
+ VPDI $0x4, X3H, X3H, TT1
+ VST TT1, 16(P3ptr)
+ VPDI $0x4, X3L, X3L, TT0
+ VST TT0, 0(P3ptr)
+
+ // SUB(X<T3-X3) // T1 = T3-X3
+ p256SubInternal(X1,X0,T3H,T3L,X3H,X3L)
+
+ // X- ; Y- ; MUL; T- // T1 = T1*T2
+ CALL p256MulInternal<>(SB)
+
+ // SUB(Y3<T-Y3) // Y3 = T1-Y3
+ p256SubInternal(Y3H,Y3L,T1,T0,Y3H,Y3L)
+
+ VPDI $0x4, Y3H, Y3H, Y3H
+ VST Y3H, 48(P3ptr)
+ VPDI $0x4, Y3L, Y3L, Y3L
+ VST Y3L, 32(P3ptr)
+ RET
+
+#undef P3ptr
+#undef P1ptr
+#undef CPOOL
+#undef X3L
+#undef X3H
+#undef Y3L
+#undef Y3H
+#undef T1L
+#undef T1H
+#undef T2L
+#undef T2H
+#undef T3L
+#undef T3H
+#undef X1L
+#undef X1H
+#undef Y1L
+#undef Y1H
+#undef Z1L
+#undef Z1H
+#undef TT0
+#undef TT1
+#undef T2
+#undef X0
+#undef X1
+#undef Y0
+#undef Y1
+#undef T0
+#undef T1
+#undef PL
+#undef PH
+#undef Z3L
+#undef Z3H
+#undef ZER
+#undef SEL1
+#undef CAR1
+#undef CAR2
+
+// func p256PointAddAsm(res, in1, in2 *P256Point) int
+#define P3ptr R1
+#define P1ptr R2
+#define P2ptr R3
+#define CPOOL R4
+#define ISZERO R5
+#define TRUE R6
+
+// Temporaries in REGs
+#define T1L V16
+#define T1H V17
+#define T2L V18
+#define T2H V19
+#define U1L V20
+#define U1H V21
+#define S1L V22
+#define S1H V23
+#define HL V24
+#define HH V25
+#define RL V26
+#define RH V27
+
+// Temps for Sub and Add
+#define ZER V6
+#define SEL1 V7
+#define CAR1 V8
+#define CAR2 V9
+#define TT0 V11
+#define TT1 V12
+#define T2 V13
+
+// p256MulAsm Parameters
+#define X0 V0
+#define X1 V1
+#define Y0 V2
+#define Y1 V3
+#define T0 V4
+#define T1 V5
+
+#define PL V30
+#define PH V31
+/*
+ * https://delta.cs.cinvestav.mx/~francisco/arith/julio.pdf "Software Implementation of the NIST Elliptic Curves Over Prime Fields"
+ *
+ * A = X₁×Z₂²
+ * B = Y₁×Z₂³
+ * C = X₂×Z₁²-A
+ * D = Y₂×Z₁³-B
+ * X₃ = D² - 2A×C² - C³
+ * Y₃ = D×(A×C² - X₃) - B×C³
+ * Z₃ = Z₁×Z₂×C
+ *
+ * Three-operand formula (adopted): https://www.hyperelliptic.org/EFD/g1p/auto-shortw-jacobian-3.html#addition-add-1998-cmo-2
+ * Temp storage: T1,T2,U1,H,Z3=X3=Y3,S1,R
+ *
+ * T1 = Z1*Z1
+ * T2 = Z2*Z2
+ * U1 = X1*T2
+ * H = X2*T1
+ * H = H-U1
+ * Z3 = Z1*Z2
+ * Z3 = Z3*H << store-out Z3 result reg.. could override Z1, if slices have same backing array
+ *
+ * S1 = Z2*T2
+ * S1 = Y1*S1
+ * R = Z1*T1
+ * R = Y2*R
+ * R = R-S1
+ *
+ * T1 = H*H
+ * T2 = H*T1
+ * U1 = U1*T1
+ *
+ * X3 = R*R
+ * X3 = X3-T2
+ * T1 = 2*U1
+ * X3 = X3-T1 << store-out X3 result reg
+ *
+ * T2 = S1*T2
+ * Y3 = U1-X3
+ * Y3 = R*Y3
+ * Y3 = Y3-T2 << store-out Y3 result reg
+
+ // X=Z1; Y=Z1; MUL; T- // T1 = Z1*Z1
+ // X- ; Y=T ; MUL; R=T // R = Z1*T1
+ // X=X2; Y- ; MUL; H=T // H = X2*T1
+ // X=Z2; Y=Z2; MUL; T- // T2 = Z2*Z2
+ // X- ; Y=T ; MUL; S1=T // S1 = Z2*T2
+ // X=X1; Y- ; MUL; U1=T // U1 = X1*T2
+ // SUB(H<H-T) // H = H-U1
+ // X=Z1; Y=Z2; MUL; T- // Z3 = Z1*Z2
+ // X=T ; Y=H ; MUL; Z3:=T// Z3 = Z3*H << store-out Z3 result reg.. could override Z1, if slices have same backing array
+ // X=Y1; Y=S1; MUL; S1=T // S1 = Y1*S1
+ // X=Y2; Y=R ; MUL; T- // R = Y2*R
+ // SUB(R<T-S1) // R = R-S1
+ // X=H ; Y=H ; MUL; T- // T1 = H*H
+ // X- ; Y=T ; MUL; T2=T // T2 = H*T1
+ // X=U1; Y- ; MUL; U1=T // U1 = U1*T1
+ // X=R ; Y=R ; MUL; T- // X3 = R*R
+ // SUB(T<T-T2) // X3 = X3-T2
+ // ADD(X<U1+U1) // T1 = 2*U1
+ // SUB(T<T-X) X3:=T // X3 = X3-T1 << store-out X3 result reg
+ // SUB(Y<U1-T) // Y3 = U1-X3
+ // X=R ; Y- ; MUL; U1=T // Y3 = R*Y3
+ // X=S1; Y=T2; MUL; T- // T2 = S1*T2
+ // SUB(T<U1-T); Y3:=T // Y3 = Y3-T2 << store-out Y3 result reg
+ */
+TEXT ·p256PointAddAsm(SB), NOSPLIT, $0
+ MOVD res+0(FP), P3ptr
+ MOVD in1+8(FP), P1ptr
+ MOVD in2+16(FP), P2ptr
+
+ MOVD $p256mul<>+0x00(SB), CPOOL
+ VL 16(CPOOL), PL
+ VL 0(CPOOL), PH
+
+ // X=Z1; Y=Z1; MUL; T- // T1 = Z1*Z1
+ VL 80(P1ptr), X1 // Z1H
+ VPDI $0x4, X1, X1, X1
+ VL 64(P1ptr), X0 // Z1L
+ VPDI $0x4, X0, X0, X0
+ VLR X0, Y0
+ VLR X1, Y1
+ CALL p256SqrInternal<>(SB)
+
+ // X- ; Y=T ; MUL; R=T // R = Z1*T1
+ VLR T0, Y0
+ VLR T1, Y1
+ CALL p256MulInternal<>(SB)
+ VLR T0, RL
+ VLR T1, RH
+
+ // X=X2; Y- ; MUL; H=T // H = X2*T1
+ VL 16(P2ptr), X1 // X2H
+ VPDI $0x4, X1, X1, X1
+ VL 0(P2ptr), X0 // X2L
+ VPDI $0x4, X0, X0, X0
+ CALL p256MulInternal<>(SB)
+ VLR T0, HL
+ VLR T1, HH
+
+ // X=Z2; Y=Z2; MUL; T- // T2 = Z2*Z2
+ VL 80(P2ptr), X1 // Z2H
+ VPDI $0x4, X1, X1, X1
+ VL 64(P2ptr), X0 // Z2L
+ VPDI $0x4, X0, X0, X0
+ VLR X0, Y0
+ VLR X1, Y1
+ CALL p256SqrInternal<>(SB)
+
+ // X- ; Y=T ; MUL; S1=T // S1 = Z2*T2
+ VLR T0, Y0
+ VLR T1, Y1
+ CALL p256MulInternal<>(SB)
+ VLR T0, S1L
+ VLR T1, S1H
+
+ // X=X1; Y- ; MUL; U1=T // U1 = X1*T2
+ VL 16(P1ptr), X1 // X1H
+ VPDI $0x4, X1, X1, X1
+ VL 0(P1ptr), X0 // X1L
+ VPDI $0x4, X0, X0, X0
+ CALL p256MulInternal<>(SB)
+ VLR T0, U1L
+ VLR T1, U1H
+
+ // SUB(H<H-T) // H = H-U1
+ p256SubInternal(HH,HL,HH,HL,T1,T0)
+
+ // if H == 0 or H^P == 0 then ret=1 else ret=0
+ // clobbers T1H and T1L
+ MOVD $0, ISZERO
+ MOVD $1, TRUE
+ VZERO ZER
+ VO HL, HH, T1H
+ VCEQGS ZER, T1H, T1H
+ MOVDEQ TRUE, ISZERO
+ VX HL, PL, T1L
+ VX HH, PH, T1H
+ VO T1L, T1H, T1H
+ VCEQGS ZER, T1H, T1H
+ MOVDEQ TRUE, ISZERO
+ MOVD ISZERO, ret+24(FP)
+
+ // X=Z1; Y=Z2; MUL; T- // Z3 = Z1*Z2
+ VL 80(P1ptr), X1 // Z1H
+ VPDI $0x4, X1, X1, X1
+ VL 64(P1ptr), X0 // Z1L
+ VPDI $0x4, X0, X0, X0
+ VL 80(P2ptr), Y1 // Z2H
+ VPDI $0x4, Y1, Y1, Y1
+ VL 64(P2ptr), Y0 // Z2L
+ VPDI $0x4, Y0, Y0, Y0
+ CALL p256MulInternal<>(SB)
+
+ // X=T ; Y=H ; MUL; Z3:=T// Z3 = Z3*H
+ VLR T0, X0
+ VLR T1, X1
+ VLR HL, Y0
+ VLR HH, Y1
+ CALL p256MulInternal<>(SB)
+ VPDI $0x4, T1, T1, TT1
+ VST TT1, 80(P3ptr)
+ VPDI $0x4, T0, T0, TT0
+ VST TT0, 64(P3ptr)
+
+ // X=Y1; Y=S1; MUL; S1=T // S1 = Y1*S1
+ VL 48(P1ptr), X1
+ VPDI $0x4, X1, X1, X1
+ VL 32(P1ptr), X0
+ VPDI $0x4, X0, X0, X0
+ VLR S1L, Y0
+ VLR S1H, Y1
+ CALL p256MulInternal<>(SB)
+ VLR T0, S1L
+ VLR T1, S1H
+
+ // X=Y2; Y=R ; MUL; T- // R = Y2*R
+ VL 48(P2ptr), X1
+ VPDI $0x4, X1, X1, X1
+ VL 32(P2ptr), X0
+ VPDI $0x4, X0, X0, X0
+ VLR RL, Y0
+ VLR RH, Y1
+ CALL p256MulInternal<>(SB)
+
+ // SUB(R<T-S1) // R = T-S1
+ p256SubInternal(RH,RL,T1,T0,S1H,S1L)
+
+ // if R == 0 or R^P == 0 then ret=ret else ret=0
+ // clobbers T1H and T1L
+ MOVD $0, ISZERO
+ MOVD $1, TRUE
+ VZERO ZER
+ VO RL, RH, T1H
+ VCEQGS ZER, T1H, T1H
+ MOVDEQ TRUE, ISZERO
+ VX RL, PL, T1L
+ VX RH, PH, T1H
+ VO T1L, T1H, T1H
+ VCEQGS ZER, T1H, T1H
+ MOVDEQ TRUE, ISZERO
+ AND ret+24(FP), ISZERO
+ MOVD ISZERO, ret+24(FP)
+
+ // X=H ; Y=H ; MUL; T- // T1 = H*H
+ VLR HL, X0
+ VLR HH, X1
+ VLR HL, Y0
+ VLR HH, Y1
+ CALL p256SqrInternal<>(SB)
+
+ // X- ; Y=T ; MUL; T2=T // T2 = H*T1
+ VLR T0, Y0
+ VLR T1, Y1
+ CALL p256MulInternal<>(SB)
+ VLR T0, T2L
+ VLR T1, T2H
+
+ // X=U1; Y- ; MUL; U1=T // U1 = U1*T1
+ VLR U1L, X0
+ VLR U1H, X1
+ CALL p256MulInternal<>(SB)
+ VLR T0, U1L
+ VLR T1, U1H
+
+ // X=R ; Y=R ; MUL; T- // X3 = R*R
+ VLR RL, X0
+ VLR RH, X1
+ VLR RL, Y0
+ VLR RH, Y1
+ CALL p256SqrInternal<>(SB)
+
+ // SUB(T<T-T2) // X3 = X3-T2
+ p256SubInternal(T1,T0,T1,T0,T2H,T2L)
+
+ // ADD(X<U1+U1) // T1 = 2*U1
+ p256AddInternal(X1,X0,U1H,U1L,U1H,U1L)
+
+ // SUB(T<T-X) X3:=T // X3 = X3-T1 << store-out X3 result reg
+ p256SubInternal(T1,T0,T1,T0,X1,X0)
+ VPDI $0x4, T1, T1, TT1
+ VST TT1, 16(P3ptr)
+ VPDI $0x4, T0, T0, TT0
+ VST TT0, 0(P3ptr)
+
+ // SUB(Y<U1-T) // Y3 = U1-X3
+ p256SubInternal(Y1,Y0,U1H,U1L,T1,T0)
+
+ // X=R ; Y- ; MUL; U1=T // Y3 = R*Y3
+ VLR RL, X0
+ VLR RH, X1
+ CALL p256MulInternal<>(SB)
+ VLR T0, U1L
+ VLR T1, U1H
+
+ // X=S1; Y=T2; MUL; T- // T2 = S1*T2
+ VLR S1L, X0
+ VLR S1H, X1
+ VLR T2L, Y0
+ VLR T2H, Y1
+ CALL p256MulInternal<>(SB)
+
+ // SUB(T<U1-T); Y3:=T // Y3 = Y3-T2 << store-out Y3 result reg
+ p256SubInternal(T1,T0,U1H,U1L,T1,T0)
+ VPDI $0x4, T1, T1, T1
+ VST T1, 48(P3ptr)
+ VPDI $0x4, T0, T0, T0
+ VST T0, 32(P3ptr)
+
+ RET
diff --git a/src/crypto/internal/nistec/p256_asm_table.bin b/src/crypto/internal/nistec/p256_asm_table.bin
new file mode 100644
index 0000000..20c527e
--- /dev/null
+++ b/src/crypto/internal/nistec/p256_asm_table.bin
Binary files differ
diff --git a/src/crypto/internal/nistec/p256_asm_table_test.go b/src/crypto/internal/nistec/p256_asm_table_test.go
new file mode 100644
index 0000000..5b7246b
--- /dev/null
+++ b/src/crypto/internal/nistec/p256_asm_table_test.go
@@ -0,0 +1,49 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build amd64 || arm64 || ppc64le || s390x
+
+package nistec
+
+import (
+ "fmt"
+ "testing"
+)
+
+func TestP256PrecomputedTable(t *testing.T) {
+ base := NewP256Point().SetGenerator()
+
+ for i := 0; i < 43; i++ {
+ t.Run(fmt.Sprintf("table[%d]", i), func(t *testing.T) {
+ testP256AffineTable(t, base, &p256Precomputed[i])
+ })
+
+ for k := 0; k < 6; k++ {
+ base.Double(base)
+ }
+ }
+}
+
+func testP256AffineTable(t *testing.T, base *P256Point, table *p256AffineTable) {
+ p := NewP256Point()
+ zInv := new(p256Element)
+ zInvSq := new(p256Element)
+
+ for j := 0; j < 32; j++ {
+ p.Add(p, base)
+
+ // Convert p to affine coordinates.
+ p256Inverse(zInv, &p.z)
+ p256Sqr(zInvSq, zInv, 1)
+ p256Mul(zInv, zInv, zInvSq)
+
+ p256Mul(&p.x, &p.x, zInvSq)
+ p256Mul(&p.y, &p.y, zInv)
+ p.z = p256One
+
+ if p256Equal(&table[j].x, &p.x) != 1 || p256Equal(&table[j].y, &p.y) != 1 {
+ t.Fatalf("incorrect table entry at index %d", j)
+ }
+ }
+}
diff --git a/src/crypto/internal/nistec/p256_ordinv.go b/src/crypto/internal/nistec/p256_ordinv.go
new file mode 100644
index 0000000..1274fb7
--- /dev/null
+++ b/src/crypto/internal/nistec/p256_ordinv.go
@@ -0,0 +1,102 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build amd64 || arm64
+
+package nistec
+
+import "errors"
+
+// Montgomery multiplication modulo org(G). Sets res = in1 * in2 * R⁻¹.
+//
+//go:noescape
+func p256OrdMul(res, in1, in2 *p256OrdElement)
+
+// Montgomery square modulo org(G), repeated n times (n >= 1).
+//
+//go:noescape
+func p256OrdSqr(res, in *p256OrdElement, n int)
+
+func P256OrdInverse(k []byte) ([]byte, error) {
+ if len(k) != 32 {
+ return nil, errors.New("invalid scalar length")
+ }
+
+ x := new(p256OrdElement)
+ p256OrdBigToLittle(x, (*[32]byte)(k))
+ p256OrdReduce(x)
+
+ // Inversion is implemented as exponentiation by n - 2, per Fermat's little theorem.
+ //
+ // The sequence of 38 multiplications and 254 squarings is derived from
+ // https://briansmith.org/ecc-inversion-addition-chains-01#p256_scalar_inversion
+ _1 := new(p256OrdElement)
+ _11 := new(p256OrdElement)
+ _101 := new(p256OrdElement)
+ _111 := new(p256OrdElement)
+ _1111 := new(p256OrdElement)
+ _10101 := new(p256OrdElement)
+ _101111 := new(p256OrdElement)
+ t := new(p256OrdElement)
+
+ // This code operates in the Montgomery domain where R = 2²⁵⁶ mod n and n is
+ // the order of the scalar field. Elements in the Montgomery domain take the
+ // form a×R and p256OrdMul calculates (a × b × R⁻¹) mod n. RR is R in the
+ // domain, or R×R mod n, thus p256OrdMul(x, RR) gives x×R, i.e. converts x
+ // into the Montgomery domain.
+ RR := &p256OrdElement{0x83244c95be79eea2, 0x4699799c49bd6fa6,
+ 0x2845b2392b6bec59, 0x66e12d94f3d95620}
+
+ p256OrdMul(_1, x, RR) // _1
+ p256OrdSqr(x, _1, 1) // _10
+ p256OrdMul(_11, x, _1) // _11
+ p256OrdMul(_101, x, _11) // _101
+ p256OrdMul(_111, x, _101) // _111
+ p256OrdSqr(x, _101, 1) // _1010
+ p256OrdMul(_1111, _101, x) // _1111
+
+ p256OrdSqr(t, x, 1) // _10100
+ p256OrdMul(_10101, t, _1) // _10101
+ p256OrdSqr(x, _10101, 1) // _101010
+ p256OrdMul(_101111, _101, x) // _101111
+ p256OrdMul(x, _10101, x) // _111111 = x6
+ p256OrdSqr(t, x, 2) // _11111100
+ p256OrdMul(t, t, _11) // _11111111 = x8
+ p256OrdSqr(x, t, 8) // _ff00
+ p256OrdMul(x, x, t) // _ffff = x16
+ p256OrdSqr(t, x, 16) // _ffff0000
+ p256OrdMul(t, t, x) // _ffffffff = x32
+
+ p256OrdSqr(x, t, 64)
+ p256OrdMul(x, x, t)
+ p256OrdSqr(x, x, 32)
+ p256OrdMul(x, x, t)
+
+ sqrs := []int{
+ 6, 5, 4, 5, 5,
+ 4, 3, 3, 5, 9,
+ 6, 2, 5, 6, 5,
+ 4, 5, 5, 3, 10,
+ 2, 5, 5, 3, 7, 6}
+ muls := []*p256OrdElement{
+ _101111, _111, _11, _1111, _10101,
+ _101, _101, _101, _111, _101111,
+ _1111, _1, _1, _1111, _111,
+ _111, _111, _101, _11, _101111,
+ _11, _11, _11, _1, _10101, _1111}
+
+ for i, s := range sqrs {
+ p256OrdSqr(x, x, s)
+ p256OrdMul(x, x, muls[i])
+ }
+
+ // Montgomery multiplication by R⁻¹, or 1 outside the domain as R⁻¹×R = 1,
+ // converts a Montgomery value out of the domain.
+ one := &p256OrdElement{1}
+ p256OrdMul(x, x, one)
+
+ var xOut [32]byte
+ p256OrdLittleToBig(&xOut, x)
+ return xOut[:], nil
+}
diff --git a/src/crypto/internal/nistec/p256_ordinv_noasm.go b/src/crypto/internal/nistec/p256_ordinv_noasm.go
new file mode 100644
index 0000000..213875c
--- /dev/null
+++ b/src/crypto/internal/nistec/p256_ordinv_noasm.go
@@ -0,0 +1,13 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !amd64 && !arm64
+
+package nistec
+
+import "errors"
+
+func P256OrdInverse(k []byte) ([]byte, error) {
+ return nil, errors.New("unimplemented")
+}
diff --git a/src/crypto/internal/nistec/p256_ordinv_test.go b/src/crypto/internal/nistec/p256_ordinv_test.go
new file mode 100644
index 0000000..72de6bd
--- /dev/null
+++ b/src/crypto/internal/nistec/p256_ordinv_test.go
@@ -0,0 +1,94 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build amd64 || arm64
+
+package nistec_test
+
+import (
+ "bytes"
+ "crypto/elliptic"
+ "crypto/internal/nistec"
+ "math/big"
+ "testing"
+)
+
+func TestP256OrdInverse(t *testing.T) {
+ N := elliptic.P256().Params().N
+
+ // inv(0) is expected to be 0.
+ zero := make([]byte, 32)
+ out, err := nistec.P256OrdInverse(zero)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !bytes.Equal(out, zero) {
+ t.Error("unexpected output for inv(0)")
+ }
+
+ // inv(N) is also 0 mod N.
+ input := make([]byte, 32)
+ N.FillBytes(input)
+ out, err = nistec.P256OrdInverse(input)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !bytes.Equal(out, zero) {
+ t.Error("unexpected output for inv(N)")
+ }
+ if !bytes.Equal(input, N.Bytes()) {
+ t.Error("input was modified")
+ }
+
+ // Check inv(1) and inv(N+1) against math/big
+ exp := new(big.Int).ModInverse(big.NewInt(1), N).FillBytes(make([]byte, 32))
+ big.NewInt(1).FillBytes(input)
+ out, err = nistec.P256OrdInverse(input)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !bytes.Equal(out, exp) {
+ t.Error("unexpected output for inv(1)")
+ }
+ new(big.Int).Add(N, big.NewInt(1)).FillBytes(input)
+ out, err = nistec.P256OrdInverse(input)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !bytes.Equal(out, exp) {
+ t.Error("unexpected output for inv(N+1)")
+ }
+
+ // Check inv(20) and inv(N+20) against math/big
+ exp = new(big.Int).ModInverse(big.NewInt(20), N).FillBytes(make([]byte, 32))
+ big.NewInt(20).FillBytes(input)
+ out, err = nistec.P256OrdInverse(input)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !bytes.Equal(out, exp) {
+ t.Error("unexpected output for inv(20)")
+ }
+ new(big.Int).Add(N, big.NewInt(20)).FillBytes(input)
+ out, err = nistec.P256OrdInverse(input)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !bytes.Equal(out, exp) {
+ t.Error("unexpected output for inv(N+20)")
+ }
+
+ // Check inv(2^256-1) against math/big
+ bigInput := new(big.Int).Lsh(big.NewInt(1), 256)
+ bigInput.Sub(bigInput, big.NewInt(1))
+ exp = new(big.Int).ModInverse(bigInput, N).FillBytes(make([]byte, 32))
+ bigInput.FillBytes(input)
+ out, err = nistec.P256OrdInverse(input)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !bytes.Equal(out, exp) {
+ t.Error("unexpected output for inv(2^256-1)")
+ }
+}
diff --git a/src/crypto/internal/nistec/p384.go b/src/crypto/internal/nistec/p384.go
new file mode 100644
index 0000000..b452ec9
--- /dev/null
+++ b/src/crypto/internal/nistec/p384.go
@@ -0,0 +1,540 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Code generated by generate.go. DO NOT EDIT.
+
+package nistec
+
+import (
+ "crypto/internal/nistec/fiat"
+ "crypto/subtle"
+ "errors"
+ "sync"
+)
+
+// p384ElementLength is the length of an element of the base or scalar field,
+// which have the same bytes length for all NIST P curves.
+const p384ElementLength = 48
+
+// P384Point is a P384 point. The zero value is NOT valid.
+type P384Point struct {
+ // The point is represented in projective coordinates (X:Y:Z),
+ // where x = X/Z and y = Y/Z.
+ x, y, z *fiat.P384Element
+}
+
+// NewP384Point returns a new P384Point representing the point at infinity point.
+func NewP384Point() *P384Point {
+ return &P384Point{
+ x: new(fiat.P384Element),
+ y: new(fiat.P384Element).One(),
+ z: new(fiat.P384Element),
+ }
+}
+
+// SetGenerator sets p to the canonical generator and returns p.
+func (p *P384Point) SetGenerator() *P384Point {
+ p.x.SetBytes([]byte{0xaa, 0x87, 0xca, 0x22, 0xbe, 0x8b, 0x5, 0x37, 0x8e, 0xb1, 0xc7, 0x1e, 0xf3, 0x20, 0xad, 0x74, 0x6e, 0x1d, 0x3b, 0x62, 0x8b, 0xa7, 0x9b, 0x98, 0x59, 0xf7, 0x41, 0xe0, 0x82, 0x54, 0x2a, 0x38, 0x55, 0x2, 0xf2, 0x5d, 0xbf, 0x55, 0x29, 0x6c, 0x3a, 0x54, 0x5e, 0x38, 0x72, 0x76, 0xa, 0xb7})
+ p.y.SetBytes([]byte{0x36, 0x17, 0xde, 0x4a, 0x96, 0x26, 0x2c, 0x6f, 0x5d, 0x9e, 0x98, 0xbf, 0x92, 0x92, 0xdc, 0x29, 0xf8, 0xf4, 0x1d, 0xbd, 0x28, 0x9a, 0x14, 0x7c, 0xe9, 0xda, 0x31, 0x13, 0xb5, 0xf0, 0xb8, 0xc0, 0xa, 0x60, 0xb1, 0xce, 0x1d, 0x7e, 0x81, 0x9d, 0x7a, 0x43, 0x1d, 0x7c, 0x90, 0xea, 0xe, 0x5f})
+ p.z.One()
+ return p
+}
+
+// Set sets p = q and returns p.
+func (p *P384Point) Set(q *P384Point) *P384Point {
+ p.x.Set(q.x)
+ p.y.Set(q.y)
+ p.z.Set(q.z)
+ return p
+}
+
+// SetBytes sets p to the compressed, uncompressed, or infinity value encoded in
+// b, as specified in SEC 1, Version 2.0, Section 2.3.4. If the point is not on
+// the curve, it returns nil and an error, and the receiver is unchanged.
+// Otherwise, it returns p.
+func (p *P384Point) SetBytes(b []byte) (*P384Point, error) {
+ switch {
+ // Point at infinity.
+ case len(b) == 1 && b[0] == 0:
+ return p.Set(NewP384Point()), nil
+
+ // Uncompressed form.
+ case len(b) == 1+2*p384ElementLength && b[0] == 4:
+ x, err := new(fiat.P384Element).SetBytes(b[1 : 1+p384ElementLength])
+ if err != nil {
+ return nil, err
+ }
+ y, err := new(fiat.P384Element).SetBytes(b[1+p384ElementLength:])
+ if err != nil {
+ return nil, err
+ }
+ if err := p384CheckOnCurve(x, y); err != nil {
+ return nil, err
+ }
+ p.x.Set(x)
+ p.y.Set(y)
+ p.z.One()
+ return p, nil
+
+ // Compressed form.
+ case len(b) == 1+p384ElementLength && (b[0] == 2 || b[0] == 3):
+ x, err := new(fiat.P384Element).SetBytes(b[1:])
+ if err != nil {
+ return nil, err
+ }
+
+ // y² = x³ - 3x + b
+ y := p384Polynomial(new(fiat.P384Element), x)
+ if !p384Sqrt(y, y) {
+ return nil, errors.New("invalid P384 compressed point encoding")
+ }
+
+ // Select the positive or negative root, as indicated by the least
+ // significant bit, based on the encoding type byte.
+ otherRoot := new(fiat.P384Element)
+ otherRoot.Sub(otherRoot, y)
+ cond := y.Bytes()[p384ElementLength-1]&1 ^ b[0]&1
+ y.Select(otherRoot, y, int(cond))
+
+ p.x.Set(x)
+ p.y.Set(y)
+ p.z.One()
+ return p, nil
+
+ default:
+ return nil, errors.New("invalid P384 point encoding")
+ }
+}
+
+var _p384B *fiat.P384Element
+var _p384BOnce sync.Once
+
+func p384B() *fiat.P384Element {
+ _p384BOnce.Do(func() {
+ _p384B, _ = new(fiat.P384Element).SetBytes([]byte{0xb3, 0x31, 0x2f, 0xa7, 0xe2, 0x3e, 0xe7, 0xe4, 0x98, 0x8e, 0x5, 0x6b, 0xe3, 0xf8, 0x2d, 0x19, 0x18, 0x1d, 0x9c, 0x6e, 0xfe, 0x81, 0x41, 0x12, 0x3, 0x14, 0x8, 0x8f, 0x50, 0x13, 0x87, 0x5a, 0xc6, 0x56, 0x39, 0x8d, 0x8a, 0x2e, 0xd1, 0x9d, 0x2a, 0x85, 0xc8, 0xed, 0xd3, 0xec, 0x2a, 0xef})
+ })
+ return _p384B
+}
+
+// p384Polynomial sets y2 to x³ - 3x + b, and returns y2.
+func p384Polynomial(y2, x *fiat.P384Element) *fiat.P384Element {
+ y2.Square(x)
+ y2.Mul(y2, x)
+
+ threeX := new(fiat.P384Element).Add(x, x)
+ threeX.Add(threeX, x)
+ y2.Sub(y2, threeX)
+
+ return y2.Add(y2, p384B())
+}
+
+func p384CheckOnCurve(x, y *fiat.P384Element) error {
+ // y² = x³ - 3x + b
+ rhs := p384Polynomial(new(fiat.P384Element), x)
+ lhs := new(fiat.P384Element).Square(y)
+ if rhs.Equal(lhs) != 1 {
+ return errors.New("P384 point not on curve")
+ }
+ return nil
+}
+
+// Bytes returns the uncompressed or infinity encoding of p, as specified in
+// SEC 1, Version 2.0, Section 2.3.3. Note that the encoding of the point at
+// infinity is shorter than all other encodings.
+func (p *P384Point) Bytes() []byte {
+ // This function is outlined to make the allocations inline in the caller
+ // rather than happen on the heap.
+ var out [1 + 2*p384ElementLength]byte
+ return p.bytes(&out)
+}
+
+func (p *P384Point) bytes(out *[1 + 2*p384ElementLength]byte) []byte {
+ if p.z.IsZero() == 1 {
+ return append(out[:0], 0)
+ }
+
+ zinv := new(fiat.P384Element).Invert(p.z)
+ x := new(fiat.P384Element).Mul(p.x, zinv)
+ y := new(fiat.P384Element).Mul(p.y, zinv)
+
+ buf := append(out[:0], 4)
+ buf = append(buf, x.Bytes()...)
+ buf = append(buf, y.Bytes()...)
+ return buf
+}
+
+// BytesX returns the encoding of the x-coordinate of p, as specified in SEC 1,
+// Version 2.0, Section 2.3.5, or an error if p is the point at infinity.
+func (p *P384Point) BytesX() ([]byte, error) {
+ // This function is outlined to make the allocations inline in the caller
+ // rather than happen on the heap.
+ var out [p384ElementLength]byte
+ return p.bytesX(&out)
+}
+
+func (p *P384Point) bytesX(out *[p384ElementLength]byte) ([]byte, error) {
+ if p.z.IsZero() == 1 {
+ return nil, errors.New("P384 point is the point at infinity")
+ }
+
+ zinv := new(fiat.P384Element).Invert(p.z)
+ x := new(fiat.P384Element).Mul(p.x, zinv)
+
+ return append(out[:0], x.Bytes()...), nil
+}
+
+// BytesCompressed returns the compressed or infinity encoding of p, as
+// specified in SEC 1, Version 2.0, Section 2.3.3. Note that the encoding of the
+// point at infinity is shorter than all other encodings.
+func (p *P384Point) BytesCompressed() []byte {
+ // This function is outlined to make the allocations inline in the caller
+ // rather than happen on the heap.
+ var out [1 + p384ElementLength]byte
+ return p.bytesCompressed(&out)
+}
+
+func (p *P384Point) bytesCompressed(out *[1 + p384ElementLength]byte) []byte {
+ if p.z.IsZero() == 1 {
+ return append(out[:0], 0)
+ }
+
+ zinv := new(fiat.P384Element).Invert(p.z)
+ x := new(fiat.P384Element).Mul(p.x, zinv)
+ y := new(fiat.P384Element).Mul(p.y, zinv)
+
+ // Encode the sign of the y coordinate (indicated by the least significant
+ // bit) as the encoding type (2 or 3).
+ buf := append(out[:0], 2)
+ buf[0] |= y.Bytes()[p384ElementLength-1] & 1
+ buf = append(buf, x.Bytes()...)
+ return buf
+}
+
+// Add sets q = p1 + p2, and returns q. The points may overlap.
+func (q *P384Point) Add(p1, p2 *P384Point) *P384Point {
+ // Complete addition formula for a = -3 from "Complete addition formulas for
+ // prime order elliptic curves" (https://eprint.iacr.org/2015/1060), §A.2.
+
+ t0 := new(fiat.P384Element).Mul(p1.x, p2.x) // t0 := X1 * X2
+ t1 := new(fiat.P384Element).Mul(p1.y, p2.y) // t1 := Y1 * Y2
+ t2 := new(fiat.P384Element).Mul(p1.z, p2.z) // t2 := Z1 * Z2
+ t3 := new(fiat.P384Element).Add(p1.x, p1.y) // t3 := X1 + Y1
+ t4 := new(fiat.P384Element).Add(p2.x, p2.y) // t4 := X2 + Y2
+ t3.Mul(t3, t4) // t3 := t3 * t4
+ t4.Add(t0, t1) // t4 := t0 + t1
+ t3.Sub(t3, t4) // t3 := t3 - t4
+ t4.Add(p1.y, p1.z) // t4 := Y1 + Z1
+ x3 := new(fiat.P384Element).Add(p2.y, p2.z) // X3 := Y2 + Z2
+ t4.Mul(t4, x3) // t4 := t4 * X3
+ x3.Add(t1, t2) // X3 := t1 + t2
+ t4.Sub(t4, x3) // t4 := t4 - X3
+ x3.Add(p1.x, p1.z) // X3 := X1 + Z1
+ y3 := new(fiat.P384Element).Add(p2.x, p2.z) // Y3 := X2 + Z2
+ x3.Mul(x3, y3) // X3 := X3 * Y3
+ y3.Add(t0, t2) // Y3 := t0 + t2
+ y3.Sub(x3, y3) // Y3 := X3 - Y3
+ z3 := new(fiat.P384Element).Mul(p384B(), t2) // Z3 := b * t2
+ x3.Sub(y3, z3) // X3 := Y3 - Z3
+ z3.Add(x3, x3) // Z3 := X3 + X3
+ x3.Add(x3, z3) // X3 := X3 + Z3
+ z3.Sub(t1, x3) // Z3 := t1 - X3
+ x3.Add(t1, x3) // X3 := t1 + X3
+ y3.Mul(p384B(), y3) // Y3 := b * Y3
+ t1.Add(t2, t2) // t1 := t2 + t2
+ t2.Add(t1, t2) // t2 := t1 + t2
+ y3.Sub(y3, t2) // Y3 := Y3 - t2
+ y3.Sub(y3, t0) // Y3 := Y3 - t0
+ t1.Add(y3, y3) // t1 := Y3 + Y3
+ y3.Add(t1, y3) // Y3 := t1 + Y3
+ t1.Add(t0, t0) // t1 := t0 + t0
+ t0.Add(t1, t0) // t0 := t1 + t0
+ t0.Sub(t0, t2) // t0 := t0 - t2
+ t1.Mul(t4, y3) // t1 := t4 * Y3
+ t2.Mul(t0, y3) // t2 := t0 * Y3
+ y3.Mul(x3, z3) // Y3 := X3 * Z3
+ y3.Add(y3, t2) // Y3 := Y3 + t2
+ x3.Mul(t3, x3) // X3 := t3 * X3
+ x3.Sub(x3, t1) // X3 := X3 - t1
+ z3.Mul(t4, z3) // Z3 := t4 * Z3
+ t1.Mul(t3, t0) // t1 := t3 * t0
+ z3.Add(z3, t1) // Z3 := Z3 + t1
+
+ q.x.Set(x3)
+ q.y.Set(y3)
+ q.z.Set(z3)
+ return q
+}
+
+// Double sets q = p + p, and returns q. The points may overlap.
+func (q *P384Point) Double(p *P384Point) *P384Point {
+ // Complete addition formula for a = -3 from "Complete addition formulas for
+ // prime order elliptic curves" (https://eprint.iacr.org/2015/1060), §A.2.
+
+ t0 := new(fiat.P384Element).Square(p.x) // t0 := X ^ 2
+ t1 := new(fiat.P384Element).Square(p.y) // t1 := Y ^ 2
+ t2 := new(fiat.P384Element).Square(p.z) // t2 := Z ^ 2
+ t3 := new(fiat.P384Element).Mul(p.x, p.y) // t3 := X * Y
+ t3.Add(t3, t3) // t3 := t3 + t3
+ z3 := new(fiat.P384Element).Mul(p.x, p.z) // Z3 := X * Z
+ z3.Add(z3, z3) // Z3 := Z3 + Z3
+ y3 := new(fiat.P384Element).Mul(p384B(), t2) // Y3 := b * t2
+ y3.Sub(y3, z3) // Y3 := Y3 - Z3
+ x3 := new(fiat.P384Element).Add(y3, y3) // X3 := Y3 + Y3
+ y3.Add(x3, y3) // Y3 := X3 + Y3
+ x3.Sub(t1, y3) // X3 := t1 - Y3
+ y3.Add(t1, y3) // Y3 := t1 + Y3
+ y3.Mul(x3, y3) // Y3 := X3 * Y3
+ x3.Mul(x3, t3) // X3 := X3 * t3
+ t3.Add(t2, t2) // t3 := t2 + t2
+ t2.Add(t2, t3) // t2 := t2 + t3
+ z3.Mul(p384B(), z3) // Z3 := b * Z3
+ z3.Sub(z3, t2) // Z3 := Z3 - t2
+ z3.Sub(z3, t0) // Z3 := Z3 - t0
+ t3.Add(z3, z3) // t3 := Z3 + Z3
+ z3.Add(z3, t3) // Z3 := Z3 + t3
+ t3.Add(t0, t0) // t3 := t0 + t0
+ t0.Add(t3, t0) // t0 := t3 + t0
+ t0.Sub(t0, t2) // t0 := t0 - t2
+ t0.Mul(t0, z3) // t0 := t0 * Z3
+ y3.Add(y3, t0) // Y3 := Y3 + t0
+ t0.Mul(p.y, p.z) // t0 := Y * Z
+ t0.Add(t0, t0) // t0 := t0 + t0
+ z3.Mul(t0, z3) // Z3 := t0 * Z3
+ x3.Sub(x3, z3) // X3 := X3 - Z3
+ z3.Mul(t0, t1) // Z3 := t0 * t1
+ z3.Add(z3, z3) // Z3 := Z3 + Z3
+ z3.Add(z3, z3) // Z3 := Z3 + Z3
+
+ q.x.Set(x3)
+ q.y.Set(y3)
+ q.z.Set(z3)
+ return q
+}
+
+// Select sets q to p1 if cond == 1, and to p2 if cond == 0.
+func (q *P384Point) Select(p1, p2 *P384Point, cond int) *P384Point {
+ q.x.Select(p1.x, p2.x, cond)
+ q.y.Select(p1.y, p2.y, cond)
+ q.z.Select(p1.z, p2.z, cond)
+ return q
+}
+
+// A p384Table holds the first 15 multiples of a point at offset -1, so [1]P
+// is at table[0], [15]P is at table[14], and [0]P is implicitly the identity
+// point.
+type p384Table [15]*P384Point
+
+// Select selects the n-th multiple of the table base point into p. It works in
+// constant time by iterating over every entry of the table. n must be in [0, 15].
+func (table *p384Table) Select(p *P384Point, n uint8) {
+ if n >= 16 {
+ panic("nistec: internal error: p384Table called with out-of-bounds value")
+ }
+ p.Set(NewP384Point())
+ for i := uint8(1); i < 16; i++ {
+ cond := subtle.ConstantTimeByteEq(i, n)
+ p.Select(table[i-1], p, cond)
+ }
+}
+
+// ScalarMult sets p = scalar * q, and returns p.
+func (p *P384Point) ScalarMult(q *P384Point, scalar []byte) (*P384Point, error) {
+ // Compute a p384Table for the base point q. The explicit NewP384Point
+ // calls get inlined, letting the allocations live on the stack.
+ var table = p384Table{NewP384Point(), NewP384Point(), NewP384Point(),
+ NewP384Point(), NewP384Point(), NewP384Point(), NewP384Point(),
+ NewP384Point(), NewP384Point(), NewP384Point(), NewP384Point(),
+ NewP384Point(), NewP384Point(), NewP384Point(), NewP384Point()}
+ table[0].Set(q)
+ for i := 1; i < 15; i += 2 {
+ table[i].Double(table[i/2])
+ table[i+1].Add(table[i], q)
+ }
+
+ // Instead of doing the classic double-and-add chain, we do it with a
+ // four-bit window: we double four times, and then add [0-15]P.
+ t := NewP384Point()
+ p.Set(NewP384Point())
+ for i, byte := range scalar {
+ // No need to double on the first iteration, as p is the identity at
+ // this point, and [N]∞ = ∞.
+ if i != 0 {
+ p.Double(p)
+ p.Double(p)
+ p.Double(p)
+ p.Double(p)
+ }
+
+ windowValue := byte >> 4
+ table.Select(t, windowValue)
+ p.Add(p, t)
+
+ p.Double(p)
+ p.Double(p)
+ p.Double(p)
+ p.Double(p)
+
+ windowValue = byte & 0b1111
+ table.Select(t, windowValue)
+ p.Add(p, t)
+ }
+
+ return p, nil
+}
+
+var p384GeneratorTable *[p384ElementLength * 2]p384Table
+var p384GeneratorTableOnce sync.Once
+
+// generatorTable returns a sequence of p384Tables. The first table contains
+// multiples of G. Each successive table is the previous table doubled four
+// times.
+func (p *P384Point) generatorTable() *[p384ElementLength * 2]p384Table {
+ p384GeneratorTableOnce.Do(func() {
+ p384GeneratorTable = new([p384ElementLength * 2]p384Table)
+ base := NewP384Point().SetGenerator()
+ for i := 0; i < p384ElementLength*2; i++ {
+ p384GeneratorTable[i][0] = NewP384Point().Set(base)
+ for j := 1; j < 15; j++ {
+ p384GeneratorTable[i][j] = NewP384Point().Add(p384GeneratorTable[i][j-1], base)
+ }
+ base.Double(base)
+ base.Double(base)
+ base.Double(base)
+ base.Double(base)
+ }
+ })
+ return p384GeneratorTable
+}
+
+// ScalarBaseMult sets p = scalar * B, where B is the canonical generator, and
+// returns p.
+func (p *P384Point) ScalarBaseMult(scalar []byte) (*P384Point, error) {
+ if len(scalar) != p384ElementLength {
+ return nil, errors.New("invalid scalar length")
+ }
+ tables := p.generatorTable()
+
+ // This is also a scalar multiplication with a four-bit window like in
+ // ScalarMult, but in this case the doublings are precomputed. The value
+ // [windowValue]G added at iteration k would normally get doubled
+ // (totIterations-k)×4 times, but with a larger precomputation we can
+ // instead add [2^((totIterations-k)×4)][windowValue]G and avoid the
+ // doublings between iterations.
+ t := NewP384Point()
+ p.Set(NewP384Point())
+ tableIndex := len(tables) - 1
+ for _, byte := range scalar {
+ windowValue := byte >> 4
+ tables[tableIndex].Select(t, windowValue)
+ p.Add(p, t)
+ tableIndex--
+
+ windowValue = byte & 0b1111
+ tables[tableIndex].Select(t, windowValue)
+ p.Add(p, t)
+ tableIndex--
+ }
+
+ return p, nil
+}
+
+// p384Sqrt sets e to a square root of x. If x is not a square, p384Sqrt returns
+// false and e is unchanged. e and x can overlap.
+func p384Sqrt(e, x *fiat.P384Element) (isSquare bool) {
+ candidate := new(fiat.P384Element)
+ p384SqrtCandidate(candidate, x)
+ square := new(fiat.P384Element).Square(candidate)
+ if square.Equal(x) != 1 {
+ return false
+ }
+ e.Set(candidate)
+ return true
+}
+
+// p384SqrtCandidate sets z to a square root candidate for x. z and x must not overlap.
+func p384SqrtCandidate(z, x *fiat.P384Element) {
+ // Since p = 3 mod 4, exponentiation by (p + 1) / 4 yields a square root candidate.
+ //
+ // The sequence of 14 multiplications and 381 squarings is derived from the
+ // following addition chain generated with github.com/mmcloughlin/addchain v0.4.0.
+ //
+ // _10 = 2*1
+ // _11 = 1 + _10
+ // _110 = 2*_11
+ // _111 = 1 + _110
+ // _111000 = _111 << 3
+ // _111111 = _111 + _111000
+ // _1111110 = 2*_111111
+ // _1111111 = 1 + _1111110
+ // x12 = _1111110 << 5 + _111111
+ // x24 = x12 << 12 + x12
+ // x31 = x24 << 7 + _1111111
+ // x32 = 2*x31 + 1
+ // x63 = x32 << 31 + x31
+ // x126 = x63 << 63 + x63
+ // x252 = x126 << 126 + x126
+ // x255 = x252 << 3 + _111
+ // return ((x255 << 33 + x32) << 64 + 1) << 30
+ //
+ var t0 = new(fiat.P384Element)
+ var t1 = new(fiat.P384Element)
+ var t2 = new(fiat.P384Element)
+
+ z.Square(x)
+ z.Mul(x, z)
+ z.Square(z)
+ t0.Mul(x, z)
+ z.Square(t0)
+ for s := 1; s < 3; s++ {
+ z.Square(z)
+ }
+ t1.Mul(t0, z)
+ t2.Square(t1)
+ z.Mul(x, t2)
+ for s := 0; s < 5; s++ {
+ t2.Square(t2)
+ }
+ t1.Mul(t1, t2)
+ t2.Square(t1)
+ for s := 1; s < 12; s++ {
+ t2.Square(t2)
+ }
+ t1.Mul(t1, t2)
+ for s := 0; s < 7; s++ {
+ t1.Square(t1)
+ }
+ t1.Mul(z, t1)
+ z.Square(t1)
+ z.Mul(x, z)
+ t2.Square(z)
+ for s := 1; s < 31; s++ {
+ t2.Square(t2)
+ }
+ t1.Mul(t1, t2)
+ t2.Square(t1)
+ for s := 1; s < 63; s++ {
+ t2.Square(t2)
+ }
+ t1.Mul(t1, t2)
+ t2.Square(t1)
+ for s := 1; s < 126; s++ {
+ t2.Square(t2)
+ }
+ t1.Mul(t1, t2)
+ for s := 0; s < 3; s++ {
+ t1.Square(t1)
+ }
+ t0.Mul(t0, t1)
+ for s := 0; s < 33; s++ {
+ t0.Square(t0)
+ }
+ z.Mul(z, t0)
+ for s := 0; s < 64; s++ {
+ z.Square(z)
+ }
+ z.Mul(x, z)
+ for s := 0; s < 30; s++ {
+ z.Square(z)
+ }
+}
diff --git a/src/crypto/internal/nistec/p521.go b/src/crypto/internal/nistec/p521.go
new file mode 100644
index 0000000..a57ad24
--- /dev/null
+++ b/src/crypto/internal/nistec/p521.go
@@ -0,0 +1,469 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Code generated by generate.go. DO NOT EDIT.
+
+package nistec
+
+import (
+ "crypto/internal/nistec/fiat"
+ "crypto/subtle"
+ "errors"
+ "sync"
+)
+
+// p521ElementLength is the length of an element of the base or scalar field,
+// which have the same bytes length for all NIST P curves.
+const p521ElementLength = 66
+
+// P521Point is a P521 point. The zero value is NOT valid.
+type P521Point struct {
+ // The point is represented in projective coordinates (X:Y:Z),
+ // where x = X/Z and y = Y/Z.
+ x, y, z *fiat.P521Element
+}
+
+// NewP521Point returns a new P521Point representing the point at infinity point.
+func NewP521Point() *P521Point {
+ return &P521Point{
+ x: new(fiat.P521Element),
+ y: new(fiat.P521Element).One(),
+ z: new(fiat.P521Element),
+ }
+}
+
+// SetGenerator sets p to the canonical generator and returns p.
+func (p *P521Point) SetGenerator() *P521Point {
+ p.x.SetBytes([]byte{0x0, 0xc6, 0x85, 0x8e, 0x6, 0xb7, 0x4, 0x4, 0xe9, 0xcd, 0x9e, 0x3e, 0xcb, 0x66, 0x23, 0x95, 0xb4, 0x42, 0x9c, 0x64, 0x81, 0x39, 0x5, 0x3f, 0xb5, 0x21, 0xf8, 0x28, 0xaf, 0x60, 0x6b, 0x4d, 0x3d, 0xba, 0xa1, 0x4b, 0x5e, 0x77, 0xef, 0xe7, 0x59, 0x28, 0xfe, 0x1d, 0xc1, 0x27, 0xa2, 0xff, 0xa8, 0xde, 0x33, 0x48, 0xb3, 0xc1, 0x85, 0x6a, 0x42, 0x9b, 0xf9, 0x7e, 0x7e, 0x31, 0xc2, 0xe5, 0xbd, 0x66})
+ p.y.SetBytes([]byte{0x1, 0x18, 0x39, 0x29, 0x6a, 0x78, 0x9a, 0x3b, 0xc0, 0x4, 0x5c, 0x8a, 0x5f, 0xb4, 0x2c, 0x7d, 0x1b, 0xd9, 0x98, 0xf5, 0x44, 0x49, 0x57, 0x9b, 0x44, 0x68, 0x17, 0xaf, 0xbd, 0x17, 0x27, 0x3e, 0x66, 0x2c, 0x97, 0xee, 0x72, 0x99, 0x5e, 0xf4, 0x26, 0x40, 0xc5, 0x50, 0xb9, 0x1, 0x3f, 0xad, 0x7, 0x61, 0x35, 0x3c, 0x70, 0x86, 0xa2, 0x72, 0xc2, 0x40, 0x88, 0xbe, 0x94, 0x76, 0x9f, 0xd1, 0x66, 0x50})
+ p.z.One()
+ return p
+}
+
+// Set sets p = q and returns p.
+func (p *P521Point) Set(q *P521Point) *P521Point {
+ p.x.Set(q.x)
+ p.y.Set(q.y)
+ p.z.Set(q.z)
+ return p
+}
+
+// SetBytes sets p to the compressed, uncompressed, or infinity value encoded in
+// b, as specified in SEC 1, Version 2.0, Section 2.3.4. If the point is not on
+// the curve, it returns nil and an error, and the receiver is unchanged.
+// Otherwise, it returns p.
+func (p *P521Point) SetBytes(b []byte) (*P521Point, error) {
+ switch {
+ // Point at infinity.
+ case len(b) == 1 && b[0] == 0:
+ return p.Set(NewP521Point()), nil
+
+ // Uncompressed form.
+ case len(b) == 1+2*p521ElementLength && b[0] == 4:
+ x, err := new(fiat.P521Element).SetBytes(b[1 : 1+p521ElementLength])
+ if err != nil {
+ return nil, err
+ }
+ y, err := new(fiat.P521Element).SetBytes(b[1+p521ElementLength:])
+ if err != nil {
+ return nil, err
+ }
+ if err := p521CheckOnCurve(x, y); err != nil {
+ return nil, err
+ }
+ p.x.Set(x)
+ p.y.Set(y)
+ p.z.One()
+ return p, nil
+
+ // Compressed form.
+ case len(b) == 1+p521ElementLength && (b[0] == 2 || b[0] == 3):
+ x, err := new(fiat.P521Element).SetBytes(b[1:])
+ if err != nil {
+ return nil, err
+ }
+
+ // y² = x³ - 3x + b
+ y := p521Polynomial(new(fiat.P521Element), x)
+ if !p521Sqrt(y, y) {
+ return nil, errors.New("invalid P521 compressed point encoding")
+ }
+
+ // Select the positive or negative root, as indicated by the least
+ // significant bit, based on the encoding type byte.
+ otherRoot := new(fiat.P521Element)
+ otherRoot.Sub(otherRoot, y)
+ cond := y.Bytes()[p521ElementLength-1]&1 ^ b[0]&1
+ y.Select(otherRoot, y, int(cond))
+
+ p.x.Set(x)
+ p.y.Set(y)
+ p.z.One()
+ return p, nil
+
+ default:
+ return nil, errors.New("invalid P521 point encoding")
+ }
+}
+
+var _p521B *fiat.P521Element
+var _p521BOnce sync.Once
+
+func p521B() *fiat.P521Element {
+ _p521BOnce.Do(func() {
+ _p521B, _ = new(fiat.P521Element).SetBytes([]byte{0x0, 0x51, 0x95, 0x3e, 0xb9, 0x61, 0x8e, 0x1c, 0x9a, 0x1f, 0x92, 0x9a, 0x21, 0xa0, 0xb6, 0x85, 0x40, 0xee, 0xa2, 0xda, 0x72, 0x5b, 0x99, 0xb3, 0x15, 0xf3, 0xb8, 0xb4, 0x89, 0x91, 0x8e, 0xf1, 0x9, 0xe1, 0x56, 0x19, 0x39, 0x51, 0xec, 0x7e, 0x93, 0x7b, 0x16, 0x52, 0xc0, 0xbd, 0x3b, 0xb1, 0xbf, 0x7, 0x35, 0x73, 0xdf, 0x88, 0x3d, 0x2c, 0x34, 0xf1, 0xef, 0x45, 0x1f, 0xd4, 0x6b, 0x50, 0x3f, 0x0})
+ })
+ return _p521B
+}
+
+// p521Polynomial sets y2 to x³ - 3x + b, and returns y2.
+func p521Polynomial(y2, x *fiat.P521Element) *fiat.P521Element {
+ y2.Square(x)
+ y2.Mul(y2, x)
+
+ threeX := new(fiat.P521Element).Add(x, x)
+ threeX.Add(threeX, x)
+ y2.Sub(y2, threeX)
+
+ return y2.Add(y2, p521B())
+}
+
+func p521CheckOnCurve(x, y *fiat.P521Element) error {
+ // y² = x³ - 3x + b
+ rhs := p521Polynomial(new(fiat.P521Element), x)
+ lhs := new(fiat.P521Element).Square(y)
+ if rhs.Equal(lhs) != 1 {
+ return errors.New("P521 point not on curve")
+ }
+ return nil
+}
+
+// Bytes returns the uncompressed or infinity encoding of p, as specified in
+// SEC 1, Version 2.0, Section 2.3.3. Note that the encoding of the point at
+// infinity is shorter than all other encodings.
+func (p *P521Point) Bytes() []byte {
+ // This function is outlined to make the allocations inline in the caller
+ // rather than happen on the heap.
+ var out [1 + 2*p521ElementLength]byte
+ return p.bytes(&out)
+}
+
+func (p *P521Point) bytes(out *[1 + 2*p521ElementLength]byte) []byte {
+ if p.z.IsZero() == 1 {
+ return append(out[:0], 0)
+ }
+
+ zinv := new(fiat.P521Element).Invert(p.z)
+ x := new(fiat.P521Element).Mul(p.x, zinv)
+ y := new(fiat.P521Element).Mul(p.y, zinv)
+
+ buf := append(out[:0], 4)
+ buf = append(buf, x.Bytes()...)
+ buf = append(buf, y.Bytes()...)
+ return buf
+}
+
+// BytesX returns the encoding of the x-coordinate of p, as specified in SEC 1,
+// Version 2.0, Section 2.3.5, or an error if p is the point at infinity.
+func (p *P521Point) BytesX() ([]byte, error) {
+ // This function is outlined to make the allocations inline in the caller
+ // rather than happen on the heap.
+ var out [p521ElementLength]byte
+ return p.bytesX(&out)
+}
+
+func (p *P521Point) bytesX(out *[p521ElementLength]byte) ([]byte, error) {
+ if p.z.IsZero() == 1 {
+ return nil, errors.New("P521 point is the point at infinity")
+ }
+
+ zinv := new(fiat.P521Element).Invert(p.z)
+ x := new(fiat.P521Element).Mul(p.x, zinv)
+
+ return append(out[:0], x.Bytes()...), nil
+}
+
+// BytesCompressed returns the compressed or infinity encoding of p, as
+// specified in SEC 1, Version 2.0, Section 2.3.3. Note that the encoding of the
+// point at infinity is shorter than all other encodings.
+func (p *P521Point) BytesCompressed() []byte {
+ // This function is outlined to make the allocations inline in the caller
+ // rather than happen on the heap.
+ var out [1 + p521ElementLength]byte
+ return p.bytesCompressed(&out)
+}
+
+func (p *P521Point) bytesCompressed(out *[1 + p521ElementLength]byte) []byte {
+ if p.z.IsZero() == 1 {
+ return append(out[:0], 0)
+ }
+
+ zinv := new(fiat.P521Element).Invert(p.z)
+ x := new(fiat.P521Element).Mul(p.x, zinv)
+ y := new(fiat.P521Element).Mul(p.y, zinv)
+
+ // Encode the sign of the y coordinate (indicated by the least significant
+ // bit) as the encoding type (2 or 3).
+ buf := append(out[:0], 2)
+ buf[0] |= y.Bytes()[p521ElementLength-1] & 1
+ buf = append(buf, x.Bytes()...)
+ return buf
+}
+
+// Add sets q = p1 + p2, and returns q. The points may overlap.
+func (q *P521Point) Add(p1, p2 *P521Point) *P521Point {
+ // Complete addition formula for a = -3 from "Complete addition formulas for
+ // prime order elliptic curves" (https://eprint.iacr.org/2015/1060), §A.2.
+
+ t0 := new(fiat.P521Element).Mul(p1.x, p2.x) // t0 := X1 * X2
+ t1 := new(fiat.P521Element).Mul(p1.y, p2.y) // t1 := Y1 * Y2
+ t2 := new(fiat.P521Element).Mul(p1.z, p2.z) // t2 := Z1 * Z2
+ t3 := new(fiat.P521Element).Add(p1.x, p1.y) // t3 := X1 + Y1
+ t4 := new(fiat.P521Element).Add(p2.x, p2.y) // t4 := X2 + Y2
+ t3.Mul(t3, t4) // t3 := t3 * t4
+ t4.Add(t0, t1) // t4 := t0 + t1
+ t3.Sub(t3, t4) // t3 := t3 - t4
+ t4.Add(p1.y, p1.z) // t4 := Y1 + Z1
+ x3 := new(fiat.P521Element).Add(p2.y, p2.z) // X3 := Y2 + Z2
+ t4.Mul(t4, x3) // t4 := t4 * X3
+ x3.Add(t1, t2) // X3 := t1 + t2
+ t4.Sub(t4, x3) // t4 := t4 - X3
+ x3.Add(p1.x, p1.z) // X3 := X1 + Z1
+ y3 := new(fiat.P521Element).Add(p2.x, p2.z) // Y3 := X2 + Z2
+ x3.Mul(x3, y3) // X3 := X3 * Y3
+ y3.Add(t0, t2) // Y3 := t0 + t2
+ y3.Sub(x3, y3) // Y3 := X3 - Y3
+ z3 := new(fiat.P521Element).Mul(p521B(), t2) // Z3 := b * t2
+ x3.Sub(y3, z3) // X3 := Y3 - Z3
+ z3.Add(x3, x3) // Z3 := X3 + X3
+ x3.Add(x3, z3) // X3 := X3 + Z3
+ z3.Sub(t1, x3) // Z3 := t1 - X3
+ x3.Add(t1, x3) // X3 := t1 + X3
+ y3.Mul(p521B(), y3) // Y3 := b * Y3
+ t1.Add(t2, t2) // t1 := t2 + t2
+ t2.Add(t1, t2) // t2 := t1 + t2
+ y3.Sub(y3, t2) // Y3 := Y3 - t2
+ y3.Sub(y3, t0) // Y3 := Y3 - t0
+ t1.Add(y3, y3) // t1 := Y3 + Y3
+ y3.Add(t1, y3) // Y3 := t1 + Y3
+ t1.Add(t0, t0) // t1 := t0 + t0
+ t0.Add(t1, t0) // t0 := t1 + t0
+ t0.Sub(t0, t2) // t0 := t0 - t2
+ t1.Mul(t4, y3) // t1 := t4 * Y3
+ t2.Mul(t0, y3) // t2 := t0 * Y3
+ y3.Mul(x3, z3) // Y3 := X3 * Z3
+ y3.Add(y3, t2) // Y3 := Y3 + t2
+ x3.Mul(t3, x3) // X3 := t3 * X3
+ x3.Sub(x3, t1) // X3 := X3 - t1
+ z3.Mul(t4, z3) // Z3 := t4 * Z3
+ t1.Mul(t3, t0) // t1 := t3 * t0
+ z3.Add(z3, t1) // Z3 := Z3 + t1
+
+ q.x.Set(x3)
+ q.y.Set(y3)
+ q.z.Set(z3)
+ return q
+}
+
+// Double sets q = p + p, and returns q. The points may overlap.
+func (q *P521Point) Double(p *P521Point) *P521Point {
+ // Complete addition formula for a = -3 from "Complete addition formulas for
+ // prime order elliptic curves" (https://eprint.iacr.org/2015/1060), §A.2.
+
+ t0 := new(fiat.P521Element).Square(p.x) // t0 := X ^ 2
+ t1 := new(fiat.P521Element).Square(p.y) // t1 := Y ^ 2
+ t2 := new(fiat.P521Element).Square(p.z) // t2 := Z ^ 2
+ t3 := new(fiat.P521Element).Mul(p.x, p.y) // t3 := X * Y
+ t3.Add(t3, t3) // t3 := t3 + t3
+ z3 := new(fiat.P521Element).Mul(p.x, p.z) // Z3 := X * Z
+ z3.Add(z3, z3) // Z3 := Z3 + Z3
+ y3 := new(fiat.P521Element).Mul(p521B(), t2) // Y3 := b * t2
+ y3.Sub(y3, z3) // Y3 := Y3 - Z3
+ x3 := new(fiat.P521Element).Add(y3, y3) // X3 := Y3 + Y3
+ y3.Add(x3, y3) // Y3 := X3 + Y3
+ x3.Sub(t1, y3) // X3 := t1 - Y3
+ y3.Add(t1, y3) // Y3 := t1 + Y3
+ y3.Mul(x3, y3) // Y3 := X3 * Y3
+ x3.Mul(x3, t3) // X3 := X3 * t3
+ t3.Add(t2, t2) // t3 := t2 + t2
+ t2.Add(t2, t3) // t2 := t2 + t3
+ z3.Mul(p521B(), z3) // Z3 := b * Z3
+ z3.Sub(z3, t2) // Z3 := Z3 - t2
+ z3.Sub(z3, t0) // Z3 := Z3 - t0
+ t3.Add(z3, z3) // t3 := Z3 + Z3
+ z3.Add(z3, t3) // Z3 := Z3 + t3
+ t3.Add(t0, t0) // t3 := t0 + t0
+ t0.Add(t3, t0) // t0 := t3 + t0
+ t0.Sub(t0, t2) // t0 := t0 - t2
+ t0.Mul(t0, z3) // t0 := t0 * Z3
+ y3.Add(y3, t0) // Y3 := Y3 + t0
+ t0.Mul(p.y, p.z) // t0 := Y * Z
+ t0.Add(t0, t0) // t0 := t0 + t0
+ z3.Mul(t0, z3) // Z3 := t0 * Z3
+ x3.Sub(x3, z3) // X3 := X3 - Z3
+ z3.Mul(t0, t1) // Z3 := t0 * t1
+ z3.Add(z3, z3) // Z3 := Z3 + Z3
+ z3.Add(z3, z3) // Z3 := Z3 + Z3
+
+ q.x.Set(x3)
+ q.y.Set(y3)
+ q.z.Set(z3)
+ return q
+}
+
+// Select sets q to p1 if cond == 1, and to p2 if cond == 0.
+func (q *P521Point) Select(p1, p2 *P521Point, cond int) *P521Point {
+ q.x.Select(p1.x, p2.x, cond)
+ q.y.Select(p1.y, p2.y, cond)
+ q.z.Select(p1.z, p2.z, cond)
+ return q
+}
+
+// A p521Table holds the first 15 multiples of a point at offset -1, so [1]P
+// is at table[0], [15]P is at table[14], and [0]P is implicitly the identity
+// point.
+type p521Table [15]*P521Point
+
+// Select selects the n-th multiple of the table base point into p. It works in
+// constant time by iterating over every entry of the table. n must be in [0, 15].
+func (table *p521Table) Select(p *P521Point, n uint8) {
+ if n >= 16 {
+ panic("nistec: internal error: p521Table called with out-of-bounds value")
+ }
+ p.Set(NewP521Point())
+ for i := uint8(1); i < 16; i++ {
+ cond := subtle.ConstantTimeByteEq(i, n)
+ p.Select(table[i-1], p, cond)
+ }
+}
+
+// ScalarMult sets p = scalar * q, and returns p.
+func (p *P521Point) ScalarMult(q *P521Point, scalar []byte) (*P521Point, error) {
+ // Compute a p521Table for the base point q. The explicit NewP521Point
+ // calls get inlined, letting the allocations live on the stack.
+ var table = p521Table{NewP521Point(), NewP521Point(), NewP521Point(),
+ NewP521Point(), NewP521Point(), NewP521Point(), NewP521Point(),
+ NewP521Point(), NewP521Point(), NewP521Point(), NewP521Point(),
+ NewP521Point(), NewP521Point(), NewP521Point(), NewP521Point()}
+ table[0].Set(q)
+ for i := 1; i < 15; i += 2 {
+ table[i].Double(table[i/2])
+ table[i+1].Add(table[i], q)
+ }
+
+ // Instead of doing the classic double-and-add chain, we do it with a
+ // four-bit window: we double four times, and then add [0-15]P.
+ t := NewP521Point()
+ p.Set(NewP521Point())
+ for i, byte := range scalar {
+ // No need to double on the first iteration, as p is the identity at
+ // this point, and [N]∞ = ∞.
+ if i != 0 {
+ p.Double(p)
+ p.Double(p)
+ p.Double(p)
+ p.Double(p)
+ }
+
+ windowValue := byte >> 4
+ table.Select(t, windowValue)
+ p.Add(p, t)
+
+ p.Double(p)
+ p.Double(p)
+ p.Double(p)
+ p.Double(p)
+
+ windowValue = byte & 0b1111
+ table.Select(t, windowValue)
+ p.Add(p, t)
+ }
+
+ return p, nil
+}
+
+var p521GeneratorTable *[p521ElementLength * 2]p521Table
+var p521GeneratorTableOnce sync.Once
+
+// generatorTable returns a sequence of p521Tables. The first table contains
+// multiples of G. Each successive table is the previous table doubled four
+// times.
+func (p *P521Point) generatorTable() *[p521ElementLength * 2]p521Table {
+ p521GeneratorTableOnce.Do(func() {
+ p521GeneratorTable = new([p521ElementLength * 2]p521Table)
+ base := NewP521Point().SetGenerator()
+ for i := 0; i < p521ElementLength*2; i++ {
+ p521GeneratorTable[i][0] = NewP521Point().Set(base)
+ for j := 1; j < 15; j++ {
+ p521GeneratorTable[i][j] = NewP521Point().Add(p521GeneratorTable[i][j-1], base)
+ }
+ base.Double(base)
+ base.Double(base)
+ base.Double(base)
+ base.Double(base)
+ }
+ })
+ return p521GeneratorTable
+}
+
+// ScalarBaseMult sets p = scalar * B, where B is the canonical generator, and
+// returns p.
+func (p *P521Point) ScalarBaseMult(scalar []byte) (*P521Point, error) {
+ if len(scalar) != p521ElementLength {
+ return nil, errors.New("invalid scalar length")
+ }
+ tables := p.generatorTable()
+
+ // This is also a scalar multiplication with a four-bit window like in
+ // ScalarMult, but in this case the doublings are precomputed. The value
+ // [windowValue]G added at iteration k would normally get doubled
+ // (totIterations-k)×4 times, but with a larger precomputation we can
+ // instead add [2^((totIterations-k)×4)][windowValue]G and avoid the
+ // doublings between iterations.
+ t := NewP521Point()
+ p.Set(NewP521Point())
+ tableIndex := len(tables) - 1
+ for _, byte := range scalar {
+ windowValue := byte >> 4
+ tables[tableIndex].Select(t, windowValue)
+ p.Add(p, t)
+ tableIndex--
+
+ windowValue = byte & 0b1111
+ tables[tableIndex].Select(t, windowValue)
+ p.Add(p, t)
+ tableIndex--
+ }
+
+ return p, nil
+}
+
+// p521Sqrt sets e to a square root of x. If x is not a square, p521Sqrt returns
+// false and e is unchanged. e and x can overlap.
+func p521Sqrt(e, x *fiat.P521Element) (isSquare bool) {
+ candidate := new(fiat.P521Element)
+ p521SqrtCandidate(candidate, x)
+ square := new(fiat.P521Element).Square(candidate)
+ if square.Equal(x) != 1 {
+ return false
+ }
+ e.Set(candidate)
+ return true
+}
+
+// p521SqrtCandidate sets z to a square root candidate for x. z and x must not overlap.
+func p521SqrtCandidate(z, x *fiat.P521Element) {
+ // Since p = 3 mod 4, exponentiation by (p + 1) / 4 yields a square root candidate.
+ //
+ // The sequence of 0 multiplications and 519 squarings is derived from the
+ // following addition chain generated with github.com/mmcloughlin/addchain v0.4.0.
+ //
+ // return 1 << 519
+ //
+
+ z.Square(x)
+ for s := 1; s < 519; s++ {
+ z.Square(z)
+ }
+}
diff --git a/src/crypto/internal/randutil/randutil.go b/src/crypto/internal/randutil/randutil.go
new file mode 100644
index 0000000..84b1295
--- /dev/null
+++ b/src/crypto/internal/randutil/randutil.go
@@ -0,0 +1,38 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package randutil contains internal randomness utilities for various
+// crypto packages.
+package randutil
+
+import (
+ "io"
+ "sync"
+)
+
+var (
+ closedChanOnce sync.Once
+ closedChan chan struct{}
+)
+
+// MaybeReadByte reads a single byte from r with ~50% probability. This is used
+// to ensure that callers do not depend on non-guaranteed behaviour, e.g.
+// assuming that rsa.GenerateKey is deterministic w.r.t. a given random stream.
+//
+// This does not affect tests that pass a stream of fixed bytes as the random
+// source (e.g. a zeroReader).
+func MaybeReadByte(r io.Reader) {
+ closedChanOnce.Do(func() {
+ closedChan = make(chan struct{})
+ close(closedChan)
+ })
+
+ select {
+ case <-closedChan:
+ return
+ case <-closedChan:
+ var buf [1]byte
+ r.Read(buf[:])
+ }
+}