summaryrefslogtreecommitdiffstats
path: root/src/crypto/aes
diff options
context:
space:
mode:
Diffstat (limited to 'src/crypto/aes')
-rw-r--r--src/crypto/aes/aes_gcm.go186
-rw-r--r--src/crypto/aes/aes_test.go383
-rw-r--r--src/crypto/aes/asm_amd64.s274
-rw-r--r--src/crypto/aes/asm_arm64.s281
-rw-r--r--src/crypto/aes/asm_ppc64x.s675
-rw-r--r--src/crypto/aes/asm_s390x.s191
-rw-r--r--src/crypto/aes/block.go182
-rw-r--r--src/crypto/aes/cbc_ppc64x.go74
-rw-r--r--src/crypto/aes/cbc_s390x.go66
-rw-r--r--src/crypto/aes/cipher.go82
-rw-r--r--src/crypto/aes/cipher_asm.go113
-rw-r--r--src/crypto/aes/cipher_generic.go26
-rw-r--r--src/crypto/aes/cipher_s390x.go96
-rw-r--r--src/crypto/aes/const.go365
-rw-r--r--src/crypto/aes/ctr_s390x.go84
-rw-r--r--src/crypto/aes/gcm_amd64.s1286
-rw-r--r--src/crypto/aes/gcm_arm64.s1021
-rw-r--r--src/crypto/aes/gcm_ppc64x.go265
-rw-r--r--src/crypto/aes/gcm_ppc64x.s590
-rw-r--r--src/crypto/aes/gcm_s390x.go371
-rw-r--r--src/crypto/aes/modes.go37
-rw-r--r--src/crypto/aes/modes_test.go112
22 files changed, 6760 insertions, 0 deletions
diff --git a/src/crypto/aes/aes_gcm.go b/src/crypto/aes/aes_gcm.go
new file mode 100644
index 0000000..f77d279
--- /dev/null
+++ b/src/crypto/aes/aes_gcm.go
@@ -0,0 +1,186 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build amd64 || arm64
+
+package aes
+
+import (
+ "crypto/cipher"
+ "crypto/internal/alias"
+ "crypto/subtle"
+ "errors"
+)
+
+// The following functions are defined in gcm_*.s.
+
+//go:noescape
+func gcmAesInit(productTable *[256]byte, ks []uint32)
+
+//go:noescape
+func gcmAesData(productTable *[256]byte, data []byte, T *[16]byte)
+
+//go:noescape
+func gcmAesEnc(productTable *[256]byte, dst, src []byte, ctr, T *[16]byte, ks []uint32)
+
+//go:noescape
+func gcmAesDec(productTable *[256]byte, dst, src []byte, ctr, T *[16]byte, ks []uint32)
+
+//go:noescape
+func gcmAesFinish(productTable *[256]byte, tagMask, T *[16]byte, pLen, dLen uint64)
+
+const (
+ gcmBlockSize = 16
+ gcmTagSize = 16
+ gcmMinimumTagSize = 12 // NIST SP 800-38D recommends tags with 12 or more bytes.
+ gcmStandardNonceSize = 12
+)
+
+var errOpen = errors.New("cipher: message authentication failed")
+
+// Assert that aesCipherGCM implements the gcmAble interface.
+var _ gcmAble = (*aesCipherGCM)(nil)
+
+// NewGCM returns the AES cipher wrapped in Galois Counter Mode. This is only
+// called by crypto/cipher.NewGCM via the gcmAble interface.
+func (c *aesCipherGCM) NewGCM(nonceSize, tagSize int) (cipher.AEAD, error) {
+ g := &gcmAsm{ks: c.enc, nonceSize: nonceSize, tagSize: tagSize}
+ gcmAesInit(&g.productTable, g.ks)
+ return g, nil
+}
+
+type gcmAsm struct {
+ // ks is the key schedule, the length of which depends on the size of
+ // the AES key.
+ ks []uint32
+ // productTable contains pre-computed multiples of the binary-field
+ // element used in GHASH.
+ productTable [256]byte
+ // nonceSize contains the expected size of the nonce, in bytes.
+ nonceSize int
+ // tagSize contains the size of the tag, in bytes.
+ tagSize int
+}
+
+func (g *gcmAsm) NonceSize() int {
+ return g.nonceSize
+}
+
+func (g *gcmAsm) Overhead() int {
+ return g.tagSize
+}
+
+// sliceForAppend takes a slice and a requested number of bytes. It returns a
+// slice with the contents of the given slice followed by that many bytes and a
+// second slice that aliases into it and contains only the extra bytes. If the
+// original slice has sufficient capacity then no allocation is performed.
+func sliceForAppend(in []byte, n int) (head, tail []byte) {
+ if total := len(in) + n; cap(in) >= total {
+ head = in[:total]
+ } else {
+ head = make([]byte, total)
+ copy(head, in)
+ }
+ tail = head[len(in):]
+ return
+}
+
+// Seal encrypts and authenticates plaintext. See the cipher.AEAD interface for
+// details.
+func (g *gcmAsm) Seal(dst, nonce, plaintext, data []byte) []byte {
+ if len(nonce) != g.nonceSize {
+ panic("crypto/cipher: incorrect nonce length given to GCM")
+ }
+ if uint64(len(plaintext)) > ((1<<32)-2)*BlockSize {
+ panic("crypto/cipher: message too large for GCM")
+ }
+
+ var counter, tagMask [gcmBlockSize]byte
+
+ if len(nonce) == gcmStandardNonceSize {
+ // Init counter to nonce||1
+ copy(counter[:], nonce)
+ counter[gcmBlockSize-1] = 1
+ } else {
+ // Otherwise counter = GHASH(nonce)
+ gcmAesData(&g.productTable, nonce, &counter)
+ gcmAesFinish(&g.productTable, &tagMask, &counter, uint64(len(nonce)), uint64(0))
+ }
+
+ encryptBlockAsm(len(g.ks)/4-1, &g.ks[0], &tagMask[0], &counter[0])
+
+ var tagOut [gcmTagSize]byte
+ gcmAesData(&g.productTable, data, &tagOut)
+
+ ret, out := sliceForAppend(dst, len(plaintext)+g.tagSize)
+ if alias.InexactOverlap(out[:len(plaintext)], plaintext) {
+ panic("crypto/cipher: invalid buffer overlap")
+ }
+ if len(plaintext) > 0 {
+ gcmAesEnc(&g.productTable, out, plaintext, &counter, &tagOut, g.ks)
+ }
+ gcmAesFinish(&g.productTable, &tagMask, &tagOut, uint64(len(plaintext)), uint64(len(data)))
+ copy(out[len(plaintext):], tagOut[:])
+
+ return ret
+}
+
+// Open authenticates and decrypts ciphertext. See the cipher.AEAD interface
+// for details.
+func (g *gcmAsm) Open(dst, nonce, ciphertext, data []byte) ([]byte, error) {
+ if len(nonce) != g.nonceSize {
+ panic("crypto/cipher: incorrect nonce length given to GCM")
+ }
+ // Sanity check to prevent the authentication from always succeeding if an implementation
+ // leaves tagSize uninitialized, for example.
+ if g.tagSize < gcmMinimumTagSize {
+ panic("crypto/cipher: incorrect GCM tag size")
+ }
+
+ if len(ciphertext) < g.tagSize {
+ return nil, errOpen
+ }
+ if uint64(len(ciphertext)) > ((1<<32)-2)*uint64(BlockSize)+uint64(g.tagSize) {
+ return nil, errOpen
+ }
+
+ tag := ciphertext[len(ciphertext)-g.tagSize:]
+ ciphertext = ciphertext[:len(ciphertext)-g.tagSize]
+
+ // See GCM spec, section 7.1.
+ var counter, tagMask [gcmBlockSize]byte
+
+ if len(nonce) == gcmStandardNonceSize {
+ // Init counter to nonce||1
+ copy(counter[:], nonce)
+ counter[gcmBlockSize-1] = 1
+ } else {
+ // Otherwise counter = GHASH(nonce)
+ gcmAesData(&g.productTable, nonce, &counter)
+ gcmAesFinish(&g.productTable, &tagMask, &counter, uint64(len(nonce)), uint64(0))
+ }
+
+ encryptBlockAsm(len(g.ks)/4-1, &g.ks[0], &tagMask[0], &counter[0])
+
+ var expectedTag [gcmTagSize]byte
+ gcmAesData(&g.productTable, data, &expectedTag)
+
+ ret, out := sliceForAppend(dst, len(ciphertext))
+ if alias.InexactOverlap(out, ciphertext) {
+ panic("crypto/cipher: invalid buffer overlap")
+ }
+ if len(ciphertext) > 0 {
+ gcmAesDec(&g.productTable, out, ciphertext, &counter, &expectedTag, g.ks)
+ }
+ gcmAesFinish(&g.productTable, &tagMask, &expectedTag, uint64(len(ciphertext)), uint64(len(data)))
+
+ if subtle.ConstantTimeCompare(expectedTag[:g.tagSize], tag) != 1 {
+ for i := range out {
+ out[i] = 0
+ }
+ return nil, errOpen
+ }
+
+ return ret, nil
+}
diff --git a/src/crypto/aes/aes_test.go b/src/crypto/aes/aes_test.go
new file mode 100644
index 0000000..1e8bac4
--- /dev/null
+++ b/src/crypto/aes/aes_test.go
@@ -0,0 +1,383 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package aes
+
+import (
+ "testing"
+)
+
+// See const.go for overview of math here.
+
+// Test that powx is initialized correctly.
+// (Can adapt this code to generate it too.)
+func TestPowx(t *testing.T) {
+ p := 1
+ for i := 0; i < len(powx); i++ {
+ if powx[i] != byte(p) {
+ t.Errorf("powx[%d] = %#x, want %#x", i, powx[i], p)
+ }
+ p <<= 1
+ if p&0x100 != 0 {
+ p ^= poly
+ }
+ }
+}
+
+// Multiply b and c as GF(2) polynomials modulo poly
+func mul(b, c uint32) uint32 {
+ i := b
+ j := c
+ s := uint32(0)
+ for k := uint32(1); k < 0x100 && j != 0; k <<= 1 {
+ // Invariant: k == 1<<n, i == b * xⁿ
+
+ if j&k != 0 {
+ // s += i in GF(2); xor in binary
+ s ^= i
+ j ^= k // turn off bit to end loop early
+ }
+
+ // i *= x in GF(2) modulo the polynomial
+ i <<= 1
+ if i&0x100 != 0 {
+ i ^= poly
+ }
+ }
+ return s
+}
+
+// Test all mul inputs against bit-by-bit n² algorithm.
+func TestMul(t *testing.T) {
+ for i := uint32(0); i < 256; i++ {
+ for j := uint32(0); j < 256; j++ {
+ // Multiply i, j bit by bit.
+ s := uint8(0)
+ for k := uint(0); k < 8; k++ {
+ for l := uint(0); l < 8; l++ {
+ if i&(1<<k) != 0 && j&(1<<l) != 0 {
+ s ^= powx[k+l]
+ }
+ }
+ }
+ if x := mul(i, j); x != uint32(s) {
+ t.Fatalf("mul(%#x, %#x) = %#x, want %#x", i, j, x, s)
+ }
+ }
+ }
+}
+
+// Check that S-boxes are inverses of each other.
+// They have more structure that we could test,
+// but if this sanity check passes, we'll assume
+// the cut and paste from the FIPS PDF worked.
+func TestSboxes(t *testing.T) {
+ for i := 0; i < 256; i++ {
+ if j := sbox0[sbox1[i]]; j != byte(i) {
+ t.Errorf("sbox0[sbox1[%#x]] = %#x", i, j)
+ }
+ if j := sbox1[sbox0[i]]; j != byte(i) {
+ t.Errorf("sbox1[sbox0[%#x]] = %#x", i, j)
+ }
+ }
+}
+
+// Test that encryption tables are correct.
+// (Can adapt this code to generate them too.)
+func TestTe(t *testing.T) {
+ for i := 0; i < 256; i++ {
+ s := uint32(sbox0[i])
+ s2 := mul(s, 2)
+ s3 := mul(s, 3)
+ w := s2<<24 | s<<16 | s<<8 | s3
+ te := [][256]uint32{te0, te1, te2, te3}
+ for j := 0; j < 4; j++ {
+ if x := te[j][i]; x != w {
+ t.Fatalf("te[%d][%d] = %#x, want %#x", j, i, x, w)
+ }
+ w = w<<24 | w>>8
+ }
+ }
+}
+
+// Test that decryption tables are correct.
+// (Can adapt this code to generate them too.)
+func TestTd(t *testing.T) {
+ for i := 0; i < 256; i++ {
+ s := uint32(sbox1[i])
+ s9 := mul(s, 0x9)
+ sb := mul(s, 0xb)
+ sd := mul(s, 0xd)
+ se := mul(s, 0xe)
+ w := se<<24 | s9<<16 | sd<<8 | sb
+ td := [][256]uint32{td0, td1, td2, td3}
+ for j := 0; j < 4; j++ {
+ if x := td[j][i]; x != w {
+ t.Fatalf("td[%d][%d] = %#x, want %#x", j, i, x, w)
+ }
+ w = w<<24 | w>>8
+ }
+ }
+}
+
+// Test vectors are from FIPS 197:
+// https://csrc.nist.gov/publications/fips/fips197/fips-197.pdf
+
+// Appendix A of FIPS 197: Key expansion examples
+type KeyTest struct {
+ key []byte
+ enc []uint32
+ dec []uint32 // decryption expansion; not in FIPS 197, computed from C implementation.
+}
+
+var keyTests = []KeyTest{
+ {
+ // A.1. Expansion of a 128-bit Cipher Key
+ []byte{0x2b, 0x7e, 0x15, 0x16, 0x28, 0xae, 0xd2, 0xa6, 0xab, 0xf7, 0x15, 0x88, 0x09, 0xcf, 0x4f, 0x3c},
+ []uint32{
+ 0x2b7e1516, 0x28aed2a6, 0xabf71588, 0x09cf4f3c,
+ 0xa0fafe17, 0x88542cb1, 0x23a33939, 0x2a6c7605,
+ 0xf2c295f2, 0x7a96b943, 0x5935807a, 0x7359f67f,
+ 0x3d80477d, 0x4716fe3e, 0x1e237e44, 0x6d7a883b,
+ 0xef44a541, 0xa8525b7f, 0xb671253b, 0xdb0bad00,
+ 0xd4d1c6f8, 0x7c839d87, 0xcaf2b8bc, 0x11f915bc,
+ 0x6d88a37a, 0x110b3efd, 0xdbf98641, 0xca0093fd,
+ 0x4e54f70e, 0x5f5fc9f3, 0x84a64fb2, 0x4ea6dc4f,
+ 0xead27321, 0xb58dbad2, 0x312bf560, 0x7f8d292f,
+ 0xac7766f3, 0x19fadc21, 0x28d12941, 0x575c006e,
+ 0xd014f9a8, 0xc9ee2589, 0xe13f0cc8, 0xb6630ca6,
+ },
+ []uint32{
+ 0xd014f9a8, 0xc9ee2589, 0xe13f0cc8, 0xb6630ca6,
+ 0xc7b5a63, 0x1319eafe, 0xb0398890, 0x664cfbb4,
+ 0xdf7d925a, 0x1f62b09d, 0xa320626e, 0xd6757324,
+ 0x12c07647, 0xc01f22c7, 0xbc42d2f3, 0x7555114a,
+ 0x6efcd876, 0xd2df5480, 0x7c5df034, 0xc917c3b9,
+ 0x6ea30afc, 0xbc238cf6, 0xae82a4b4, 0xb54a338d,
+ 0x90884413, 0xd280860a, 0x12a12842, 0x1bc89739,
+ 0x7c1f13f7, 0x4208c219, 0xc021ae48, 0x969bf7b,
+ 0xcc7505eb, 0x3e17d1ee, 0x82296c51, 0xc9481133,
+ 0x2b3708a7, 0xf262d405, 0xbc3ebdbf, 0x4b617d62,
+ 0x2b7e1516, 0x28aed2a6, 0xabf71588, 0x9cf4f3c,
+ },
+ },
+ {
+ // A.2. Expansion of a 192-bit Cipher Key
+ []byte{
+ 0x8e, 0x73, 0xb0, 0xf7, 0xda, 0x0e, 0x64, 0x52, 0xc8, 0x10, 0xf3, 0x2b, 0x80, 0x90, 0x79, 0xe5,
+ 0x62, 0xf8, 0xea, 0xd2, 0x52, 0x2c, 0x6b, 0x7b,
+ },
+ []uint32{
+ 0x8e73b0f7, 0xda0e6452, 0xc810f32b, 0x809079e5,
+ 0x62f8ead2, 0x522c6b7b, 0xfe0c91f7, 0x2402f5a5,
+ 0xec12068e, 0x6c827f6b, 0x0e7a95b9, 0x5c56fec2,
+ 0x4db7b4bd, 0x69b54118, 0x85a74796, 0xe92538fd,
+ 0xe75fad44, 0xbb095386, 0x485af057, 0x21efb14f,
+ 0xa448f6d9, 0x4d6dce24, 0xaa326360, 0x113b30e6,
+ 0xa25e7ed5, 0x83b1cf9a, 0x27f93943, 0x6a94f767,
+ 0xc0a69407, 0xd19da4e1, 0xec1786eb, 0x6fa64971,
+ 0x485f7032, 0x22cb8755, 0xe26d1352, 0x33f0b7b3,
+ 0x40beeb28, 0x2f18a259, 0x6747d26b, 0x458c553e,
+ 0xa7e1466c, 0x9411f1df, 0x821f750a, 0xad07d753,
+ 0xca400538, 0x8fcc5006, 0x282d166a, 0xbc3ce7b5,
+ 0xe98ba06f, 0x448c773c, 0x8ecc7204, 0x01002202,
+ },
+ nil,
+ },
+ {
+ // A.3. Expansion of a 256-bit Cipher Key
+ []byte{
+ 0x60, 0x3d, 0xeb, 0x10, 0x15, 0xca, 0x71, 0xbe, 0x2b, 0x73, 0xae, 0xf0, 0x85, 0x7d, 0x77, 0x81,
+ 0x1f, 0x35, 0x2c, 0x07, 0x3b, 0x61, 0x08, 0xd7, 0x2d, 0x98, 0x10, 0xa3, 0x09, 0x14, 0xdf, 0xf4,
+ },
+ []uint32{
+ 0x603deb10, 0x15ca71be, 0x2b73aef0, 0x857d7781,
+ 0x1f352c07, 0x3b6108d7, 0x2d9810a3, 0x0914dff4,
+ 0x9ba35411, 0x8e6925af, 0xa51a8b5f, 0x2067fcde,
+ 0xa8b09c1a, 0x93d194cd, 0xbe49846e, 0xb75d5b9a,
+ 0xd59aecb8, 0x5bf3c917, 0xfee94248, 0xde8ebe96,
+ 0xb5a9328a, 0x2678a647, 0x98312229, 0x2f6c79b3,
+ 0x812c81ad, 0xdadf48ba, 0x24360af2, 0xfab8b464,
+ 0x98c5bfc9, 0xbebd198e, 0x268c3ba7, 0x09e04214,
+ 0x68007bac, 0xb2df3316, 0x96e939e4, 0x6c518d80,
+ 0xc814e204, 0x76a9fb8a, 0x5025c02d, 0x59c58239,
+ 0xde136967, 0x6ccc5a71, 0xfa256395, 0x9674ee15,
+ 0x5886ca5d, 0x2e2f31d7, 0x7e0af1fa, 0x27cf73c3,
+ 0x749c47ab, 0x18501dda, 0xe2757e4f, 0x7401905a,
+ 0xcafaaae3, 0xe4d59b34, 0x9adf6ace, 0xbd10190d,
+ 0xfe4890d1, 0xe6188d0b, 0x046df344, 0x706c631e,
+ },
+ nil,
+ },
+}
+
+// Test key expansion against FIPS 197 examples.
+func TestExpandKey(t *testing.T) {
+L:
+ for i, tt := range keyTests {
+ enc := make([]uint32, len(tt.enc))
+ var dec []uint32
+ if tt.dec != nil {
+ dec = make([]uint32, len(tt.dec))
+ }
+ // This test could only test Go version of expandKey because asm
+ // version might use different memory layout for expanded keys
+ // This is OK because we don't expose expanded keys to the outside
+ expandKeyGo(tt.key, enc, dec)
+ for j, v := range enc {
+ if v != tt.enc[j] {
+ t.Errorf("key %d: enc[%d] = %#x, want %#x", i, j, v, tt.enc[j])
+ continue L
+ }
+ }
+ for j, v := range dec {
+ if v != tt.dec[j] {
+ t.Errorf("key %d: dec[%d] = %#x, want %#x", i, j, v, tt.dec[j])
+ continue L
+ }
+ }
+ }
+}
+
+// Appendix B, C of FIPS 197: Cipher examples, Example vectors.
+type CryptTest struct {
+ key []byte
+ in []byte
+ out []byte
+}
+
+var encryptTests = []CryptTest{
+ {
+ // Appendix B.
+ []byte{0x2b, 0x7e, 0x15, 0x16, 0x28, 0xae, 0xd2, 0xa6, 0xab, 0xf7, 0x15, 0x88, 0x09, 0xcf, 0x4f, 0x3c},
+ []byte{0x32, 0x43, 0xf6, 0xa8, 0x88, 0x5a, 0x30, 0x8d, 0x31, 0x31, 0x98, 0xa2, 0xe0, 0x37, 0x07, 0x34},
+ []byte{0x39, 0x25, 0x84, 0x1d, 0x02, 0xdc, 0x09, 0xfb, 0xdc, 0x11, 0x85, 0x97, 0x19, 0x6a, 0x0b, 0x32},
+ },
+ {
+ // Appendix C.1. AES-128
+ []byte{0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f},
+ []byte{0x00, 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77, 0x88, 0x99, 0xaa, 0xbb, 0xcc, 0xdd, 0xee, 0xff},
+ []byte{0x69, 0xc4, 0xe0, 0xd8, 0x6a, 0x7b, 0x04, 0x30, 0xd8, 0xcd, 0xb7, 0x80, 0x70, 0xb4, 0xc5, 0x5a},
+ },
+ {
+ // Appendix C.2. AES-192
+ []byte{0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
+ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
+ },
+ []byte{0x00, 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77, 0x88, 0x99, 0xaa, 0xbb, 0xcc, 0xdd, 0xee, 0xff},
+ []byte{0xdd, 0xa9, 0x7c, 0xa4, 0x86, 0x4c, 0xdf, 0xe0, 0x6e, 0xaf, 0x70, 0xa0, 0xec, 0x0d, 0x71, 0x91},
+ },
+ {
+ // Appendix C.3. AES-256
+ []byte{0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
+ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f,
+ },
+ []byte{0x00, 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77, 0x88, 0x99, 0xaa, 0xbb, 0xcc, 0xdd, 0xee, 0xff},
+ []byte{0x8e, 0xa2, 0xb7, 0xca, 0x51, 0x67, 0x45, 0xbf, 0xea, 0xfc, 0x49, 0x90, 0x4b, 0x49, 0x60, 0x89},
+ },
+}
+
+// Test Cipher Encrypt method against FIPS 197 examples.
+func TestCipherEncrypt(t *testing.T) {
+ for i, tt := range encryptTests {
+ c, err := NewCipher(tt.key)
+ if err != nil {
+ t.Errorf("NewCipher(%d bytes) = %s", len(tt.key), err)
+ continue
+ }
+ out := make([]byte, len(tt.in))
+ c.Encrypt(out, tt.in)
+ for j, v := range out {
+ if v != tt.out[j] {
+ t.Errorf("Cipher.Encrypt %d: out[%d] = %#x, want %#x", i, j, v, tt.out[j])
+ break
+ }
+ }
+ }
+}
+
+// Test Cipher Decrypt against FIPS 197 examples.
+func TestCipherDecrypt(t *testing.T) {
+ for i, tt := range encryptTests {
+ c, err := NewCipher(tt.key)
+ if err != nil {
+ t.Errorf("NewCipher(%d bytes) = %s", len(tt.key), err)
+ continue
+ }
+ plain := make([]byte, len(tt.in))
+ c.Decrypt(plain, tt.out)
+ for j, v := range plain {
+ if v != tt.in[j] {
+ t.Errorf("decryptBlock %d: plain[%d] = %#x, want %#x", i, j, v, tt.in[j])
+ break
+ }
+ }
+ }
+}
+
+// Test short input/output.
+// Assembly used to not notice.
+// See issue 7928.
+func TestShortBlocks(t *testing.T) {
+ bytes := func(n int) []byte { return make([]byte, n) }
+
+ c, _ := NewCipher(bytes(16))
+
+ mustPanic(t, "crypto/aes: input not full block", func() { c.Encrypt(bytes(1), bytes(1)) })
+ mustPanic(t, "crypto/aes: input not full block", func() { c.Decrypt(bytes(1), bytes(1)) })
+ mustPanic(t, "crypto/aes: input not full block", func() { c.Encrypt(bytes(100), bytes(1)) })
+ mustPanic(t, "crypto/aes: input not full block", func() { c.Decrypt(bytes(100), bytes(1)) })
+ mustPanic(t, "crypto/aes: output not full block", func() { c.Encrypt(bytes(1), bytes(100)) })
+ mustPanic(t, "crypto/aes: output not full block", func() { c.Decrypt(bytes(1), bytes(100)) })
+}
+
+func mustPanic(t *testing.T, msg string, f func()) {
+ defer func() {
+ err := recover()
+ if err == nil {
+ t.Errorf("function did not panic, wanted %q", msg)
+ } else if err != msg {
+ t.Errorf("got panic %v, wanted %q", err, msg)
+ }
+ }()
+ f()
+}
+
+func BenchmarkEncrypt(b *testing.B) {
+ tt := encryptTests[0]
+ c, err := NewCipher(tt.key)
+ if err != nil {
+ b.Fatal("NewCipher:", err)
+ }
+ out := make([]byte, len(tt.in))
+ b.SetBytes(int64(len(out)))
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ c.Encrypt(out, tt.in)
+ }
+}
+
+func BenchmarkDecrypt(b *testing.B) {
+ tt := encryptTests[0]
+ c, err := NewCipher(tt.key)
+ if err != nil {
+ b.Fatal("NewCipher:", err)
+ }
+ out := make([]byte, len(tt.out))
+ b.SetBytes(int64(len(out)))
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ c.Decrypt(out, tt.out)
+ }
+}
+
+func BenchmarkExpand(b *testing.B) {
+ tt := encryptTests[0]
+ n := len(tt.key) + 28
+ c := &aesCipher{make([]uint32, n), make([]uint32, n)}
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ expandKey(tt.key, c.enc, c.dec)
+ }
+}
diff --git a/src/crypto/aes/asm_amd64.s b/src/crypto/aes/asm_amd64.s
new file mode 100644
index 0000000..ed831bf
--- /dev/null
+++ b/src/crypto/aes/asm_amd64.s
@@ -0,0 +1,274 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#include "textflag.h"
+
+// func encryptBlockAsm(nr int, xk *uint32, dst, src *byte)
+TEXT ·encryptBlockAsm(SB),NOSPLIT,$0
+ MOVQ nr+0(FP), CX
+ MOVQ xk+8(FP), AX
+ MOVQ dst+16(FP), DX
+ MOVQ src+24(FP), BX
+ MOVUPS 0(AX), X1
+ MOVUPS 0(BX), X0
+ ADDQ $16, AX
+ PXOR X1, X0
+ SUBQ $12, CX
+ JE Lenc192
+ JB Lenc128
+Lenc256:
+ MOVUPS 0(AX), X1
+ AESENC X1, X0
+ MOVUPS 16(AX), X1
+ AESENC X1, X0
+ ADDQ $32, AX
+Lenc192:
+ MOVUPS 0(AX), X1
+ AESENC X1, X0
+ MOVUPS 16(AX), X1
+ AESENC X1, X0
+ ADDQ $32, AX
+Lenc128:
+ MOVUPS 0(AX), X1
+ AESENC X1, X0
+ MOVUPS 16(AX), X1
+ AESENC X1, X0
+ MOVUPS 32(AX), X1
+ AESENC X1, X0
+ MOVUPS 48(AX), X1
+ AESENC X1, X0
+ MOVUPS 64(AX), X1
+ AESENC X1, X0
+ MOVUPS 80(AX), X1
+ AESENC X1, X0
+ MOVUPS 96(AX), X1
+ AESENC X1, X0
+ MOVUPS 112(AX), X1
+ AESENC X1, X0
+ MOVUPS 128(AX), X1
+ AESENC X1, X0
+ MOVUPS 144(AX), X1
+ AESENCLAST X1, X0
+ MOVUPS X0, 0(DX)
+ RET
+
+// func decryptBlockAsm(nr int, xk *uint32, dst, src *byte)
+TEXT ·decryptBlockAsm(SB),NOSPLIT,$0
+ MOVQ nr+0(FP), CX
+ MOVQ xk+8(FP), AX
+ MOVQ dst+16(FP), DX
+ MOVQ src+24(FP), BX
+ MOVUPS 0(AX), X1
+ MOVUPS 0(BX), X0
+ ADDQ $16, AX
+ PXOR X1, X0
+ SUBQ $12, CX
+ JE Ldec192
+ JB Ldec128
+Ldec256:
+ MOVUPS 0(AX), X1
+ AESDEC X1, X0
+ MOVUPS 16(AX), X1
+ AESDEC X1, X0
+ ADDQ $32, AX
+Ldec192:
+ MOVUPS 0(AX), X1
+ AESDEC X1, X0
+ MOVUPS 16(AX), X1
+ AESDEC X1, X0
+ ADDQ $32, AX
+Ldec128:
+ MOVUPS 0(AX), X1
+ AESDEC X1, X0
+ MOVUPS 16(AX), X1
+ AESDEC X1, X0
+ MOVUPS 32(AX), X1
+ AESDEC X1, X0
+ MOVUPS 48(AX), X1
+ AESDEC X1, X0
+ MOVUPS 64(AX), X1
+ AESDEC X1, X0
+ MOVUPS 80(AX), X1
+ AESDEC X1, X0
+ MOVUPS 96(AX), X1
+ AESDEC X1, X0
+ MOVUPS 112(AX), X1
+ AESDEC X1, X0
+ MOVUPS 128(AX), X1
+ AESDEC X1, X0
+ MOVUPS 144(AX), X1
+ AESDECLAST X1, X0
+ MOVUPS X0, 0(DX)
+ RET
+
+// func expandKeyAsm(nr int, key *byte, enc, dec *uint32) {
+// Note that round keys are stored in uint128 format, not uint32
+TEXT ·expandKeyAsm(SB),NOSPLIT,$0
+ MOVQ nr+0(FP), CX
+ MOVQ key+8(FP), AX
+ MOVQ enc+16(FP), BX
+ MOVQ dec+24(FP), DX
+ MOVUPS (AX), X0
+ // enc
+ MOVUPS X0, (BX)
+ ADDQ $16, BX
+ PXOR X4, X4 // _expand_key_* expect X4 to be zero
+ CMPL CX, $12
+ JE Lexp_enc192
+ JB Lexp_enc128
+Lexp_enc256:
+ MOVUPS 16(AX), X2
+ MOVUPS X2, (BX)
+ ADDQ $16, BX
+ AESKEYGENASSIST $0x01, X2, X1
+ CALL _expand_key_256a<>(SB)
+ AESKEYGENASSIST $0x01, X0, X1
+ CALL _expand_key_256b<>(SB)
+ AESKEYGENASSIST $0x02, X2, X1
+ CALL _expand_key_256a<>(SB)
+ AESKEYGENASSIST $0x02, X0, X1
+ CALL _expand_key_256b<>(SB)
+ AESKEYGENASSIST $0x04, X2, X1
+ CALL _expand_key_256a<>(SB)
+ AESKEYGENASSIST $0x04, X0, X1
+ CALL _expand_key_256b<>(SB)
+ AESKEYGENASSIST $0x08, X2, X1
+ CALL _expand_key_256a<>(SB)
+ AESKEYGENASSIST $0x08, X0, X1
+ CALL _expand_key_256b<>(SB)
+ AESKEYGENASSIST $0x10, X2, X1
+ CALL _expand_key_256a<>(SB)
+ AESKEYGENASSIST $0x10, X0, X1
+ CALL _expand_key_256b<>(SB)
+ AESKEYGENASSIST $0x20, X2, X1
+ CALL _expand_key_256a<>(SB)
+ AESKEYGENASSIST $0x20, X0, X1
+ CALL _expand_key_256b<>(SB)
+ AESKEYGENASSIST $0x40, X2, X1
+ CALL _expand_key_256a<>(SB)
+ JMP Lexp_dec
+Lexp_enc192:
+ MOVQ 16(AX), X2
+ AESKEYGENASSIST $0x01, X2, X1
+ CALL _expand_key_192a<>(SB)
+ AESKEYGENASSIST $0x02, X2, X1
+ CALL _expand_key_192b<>(SB)
+ AESKEYGENASSIST $0x04, X2, X1
+ CALL _expand_key_192a<>(SB)
+ AESKEYGENASSIST $0x08, X2, X1
+ CALL _expand_key_192b<>(SB)
+ AESKEYGENASSIST $0x10, X2, X1
+ CALL _expand_key_192a<>(SB)
+ AESKEYGENASSIST $0x20, X2, X1
+ CALL _expand_key_192b<>(SB)
+ AESKEYGENASSIST $0x40, X2, X1
+ CALL _expand_key_192a<>(SB)
+ AESKEYGENASSIST $0x80, X2, X1
+ CALL _expand_key_192b<>(SB)
+ JMP Lexp_dec
+Lexp_enc128:
+ AESKEYGENASSIST $0x01, X0, X1
+ CALL _expand_key_128<>(SB)
+ AESKEYGENASSIST $0x02, X0, X1
+ CALL _expand_key_128<>(SB)
+ AESKEYGENASSIST $0x04, X0, X1
+ CALL _expand_key_128<>(SB)
+ AESKEYGENASSIST $0x08, X0, X1
+ CALL _expand_key_128<>(SB)
+ AESKEYGENASSIST $0x10, X0, X1
+ CALL _expand_key_128<>(SB)
+ AESKEYGENASSIST $0x20, X0, X1
+ CALL _expand_key_128<>(SB)
+ AESKEYGENASSIST $0x40, X0, X1
+ CALL _expand_key_128<>(SB)
+ AESKEYGENASSIST $0x80, X0, X1
+ CALL _expand_key_128<>(SB)
+ AESKEYGENASSIST $0x1b, X0, X1
+ CALL _expand_key_128<>(SB)
+ AESKEYGENASSIST $0x36, X0, X1
+ CALL _expand_key_128<>(SB)
+Lexp_dec:
+ // dec
+ SUBQ $16, BX
+ MOVUPS (BX), X1
+ MOVUPS X1, (DX)
+ DECQ CX
+Lexp_dec_loop:
+ MOVUPS -16(BX), X1
+ AESIMC X1, X0
+ MOVUPS X0, 16(DX)
+ SUBQ $16, BX
+ ADDQ $16, DX
+ DECQ CX
+ JNZ Lexp_dec_loop
+ MOVUPS -16(BX), X0
+ MOVUPS X0, 16(DX)
+ RET
+
+TEXT _expand_key_128<>(SB),NOSPLIT,$0
+ PSHUFD $0xff, X1, X1
+ SHUFPS $0x10, X0, X4
+ PXOR X4, X0
+ SHUFPS $0x8c, X0, X4
+ PXOR X4, X0
+ PXOR X1, X0
+ MOVUPS X0, (BX)
+ ADDQ $16, BX
+ RET
+
+TEXT _expand_key_192a<>(SB),NOSPLIT,$0
+ PSHUFD $0x55, X1, X1
+ SHUFPS $0x10, X0, X4
+ PXOR X4, X0
+ SHUFPS $0x8c, X0, X4
+ PXOR X4, X0
+ PXOR X1, X0
+
+ MOVAPS X2, X5
+ MOVAPS X2, X6
+ PSLLDQ $0x4, X5
+ PSHUFD $0xff, X0, X3
+ PXOR X3, X2
+ PXOR X5, X2
+
+ MOVAPS X0, X1
+ SHUFPS $0x44, X0, X6
+ MOVUPS X6, (BX)
+ SHUFPS $0x4e, X2, X1
+ MOVUPS X1, 16(BX)
+ ADDQ $32, BX
+ RET
+
+TEXT _expand_key_192b<>(SB),NOSPLIT,$0
+ PSHUFD $0x55, X1, X1
+ SHUFPS $0x10, X0, X4
+ PXOR X4, X0
+ SHUFPS $0x8c, X0, X4
+ PXOR X4, X0
+ PXOR X1, X0
+
+ MOVAPS X2, X5
+ PSLLDQ $0x4, X5
+ PSHUFD $0xff, X0, X3
+ PXOR X3, X2
+ PXOR X5, X2
+
+ MOVUPS X0, (BX)
+ ADDQ $16, BX
+ RET
+
+TEXT _expand_key_256a<>(SB),NOSPLIT,$0
+ JMP _expand_key_128<>(SB)
+
+TEXT _expand_key_256b<>(SB),NOSPLIT,$0
+ PSHUFD $0xaa, X1, X1
+ SHUFPS $0x10, X2, X4
+ PXOR X4, X2
+ SHUFPS $0x8c, X2, X4
+ PXOR X4, X2
+ PXOR X1, X2
+
+ MOVUPS X2, (BX)
+ ADDQ $16, BX
+ RET
diff --git a/src/crypto/aes/asm_arm64.s b/src/crypto/aes/asm_arm64.s
new file mode 100644
index 0000000..4a02e94
--- /dev/null
+++ b/src/crypto/aes/asm_arm64.s
@@ -0,0 +1,281 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#include "textflag.h"
+DATA rotInvSRows<>+0x00(SB)/8, $0x080f0205040b0e01
+DATA rotInvSRows<>+0x08(SB)/8, $0x00070a0d0c030609
+GLOBL rotInvSRows<>(SB), (NOPTR+RODATA), $16
+DATA invSRows<>+0x00(SB)/8, $0x0b0e0104070a0d00
+DATA invSRows<>+0x08(SB)/8, $0x0306090c0f020508
+GLOBL invSRows<>(SB), (NOPTR+RODATA), $16
+// func encryptBlockAsm(nr int, xk *uint32, dst, src *byte)
+TEXT ·encryptBlockAsm(SB),NOSPLIT,$0
+ MOVD nr+0(FP), R9
+ MOVD xk+8(FP), R10
+ MOVD dst+16(FP), R11
+ MOVD src+24(FP), R12
+
+ VLD1 (R12), [V0.B16]
+
+ CMP $12, R9
+ BLT enc128
+ BEQ enc196
+enc256:
+ VLD1.P 32(R10), [V1.B16, V2.B16]
+ AESE V1.B16, V0.B16
+ AESMC V0.B16, V0.B16
+ AESE V2.B16, V0.B16
+ AESMC V0.B16, V0.B16
+enc196:
+ VLD1.P 32(R10), [V3.B16, V4.B16]
+ AESE V3.B16, V0.B16
+ AESMC V0.B16, V0.B16
+ AESE V4.B16, V0.B16
+ AESMC V0.B16, V0.B16
+enc128:
+ VLD1.P 64(R10), [V5.B16, V6.B16, V7.B16, V8.B16]
+ VLD1.P 64(R10), [V9.B16, V10.B16, V11.B16, V12.B16]
+ VLD1.P 48(R10), [V13.B16, V14.B16, V15.B16]
+ AESE V5.B16, V0.B16
+ AESMC V0.B16, V0.B16
+ AESE V6.B16, V0.B16
+ AESMC V0.B16, V0.B16
+ AESE V7.B16, V0.B16
+ AESMC V0.B16, V0.B16
+ AESE V8.B16, V0.B16
+ AESMC V0.B16, V0.B16
+ AESE V9.B16, V0.B16
+ AESMC V0.B16, V0.B16
+ AESE V10.B16, V0.B16
+ AESMC V0.B16, V0.B16
+ AESE V11.B16, V0.B16
+ AESMC V0.B16, V0.B16
+ AESE V12.B16, V0.B16
+ AESMC V0.B16, V0.B16
+ AESE V13.B16, V0.B16
+ AESMC V0.B16, V0.B16
+ AESE V14.B16, V0.B16
+ VEOR V0.B16, V15.B16, V0.B16
+ VST1 [V0.B16], (R11)
+ RET
+
+// func decryptBlockAsm(nr int, xk *uint32, dst, src *byte)
+TEXT ·decryptBlockAsm(SB),NOSPLIT,$0
+ MOVD nr+0(FP), R9
+ MOVD xk+8(FP), R10
+ MOVD dst+16(FP), R11
+ MOVD src+24(FP), R12
+
+ VLD1 (R12), [V0.B16]
+
+ CMP $12, R9
+ BLT dec128
+ BEQ dec196
+dec256:
+ VLD1.P 32(R10), [V1.B16, V2.B16]
+ AESD V1.B16, V0.B16
+ AESIMC V0.B16, V0.B16
+ AESD V2.B16, V0.B16
+ AESIMC V0.B16, V0.B16
+dec196:
+ VLD1.P 32(R10), [V3.B16, V4.B16]
+ AESD V3.B16, V0.B16
+ AESIMC V0.B16, V0.B16
+ AESD V4.B16, V0.B16
+ AESIMC V0.B16, V0.B16
+dec128:
+ VLD1.P 64(R10), [V5.B16, V6.B16, V7.B16, V8.B16]
+ VLD1.P 64(R10), [V9.B16, V10.B16, V11.B16, V12.B16]
+ VLD1.P 48(R10), [V13.B16, V14.B16, V15.B16]
+ AESD V5.B16, V0.B16
+ AESIMC V0.B16, V0.B16
+ AESD V6.B16, V0.B16
+ AESIMC V0.B16, V0.B16
+ AESD V7.B16, V0.B16
+ AESIMC V0.B16, V0.B16
+ AESD V8.B16, V0.B16
+ AESIMC V0.B16, V0.B16
+ AESD V9.B16, V0.B16
+ AESIMC V0.B16, V0.B16
+ AESD V10.B16, V0.B16
+ AESIMC V0.B16, V0.B16
+ AESD V11.B16, V0.B16
+ AESIMC V0.B16, V0.B16
+ AESD V12.B16, V0.B16
+ AESIMC V0.B16, V0.B16
+ AESD V13.B16, V0.B16
+ AESIMC V0.B16, V0.B16
+ AESD V14.B16, V0.B16
+ VEOR V0.B16, V15.B16, V0.B16
+ VST1 [V0.B16], (R11)
+ RET
+
+// func expandKeyAsm(nr int, key *byte, enc, dec *uint32) {
+// Note that round keys are stored in uint128 format, not uint32
+TEXT ·expandKeyAsm(SB),NOSPLIT,$0
+ MOVD nr+0(FP), R8
+ MOVD key+8(FP), R9
+ MOVD enc+16(FP), R10
+ MOVD dec+24(FP), R11
+ LDP rotInvSRows<>(SB), (R0, R1)
+ VMOV R0, V3.D[0]
+ VMOV R1, V3.D[1]
+ VEOR V0.B16, V0.B16, V0.B16 // All zeroes
+ MOVW $1, R13
+ TBZ $1, R8, ks192
+ TBNZ $2, R8, ks256
+ LDPW (R9), (R4, R5)
+ LDPW 8(R9), (R6, R7)
+ STPW.P (R4, R5), 8(R10)
+ STPW.P (R6, R7), 8(R10)
+ MOVW $0x1b, R14
+ks128Loop:
+ VMOV R7, V2.S[0]
+ WORD $0x4E030042 // TBL V3.B16, [V2.B16], V2.B16
+ AESE V0.B16, V2.B16 // Use AES to compute the SBOX
+ EORW R13, R4
+ LSLW $1, R13 // Compute next Rcon
+ ANDSW $0x100, R13, ZR
+ CSELW NE, R14, R13, R13 // Fake modulo
+ SUBS $1, R8
+ VMOV V2.S[0], R0
+ EORW R0, R4
+ EORW R4, R5
+ EORW R5, R6
+ EORW R6, R7
+ STPW.P (R4, R5), 8(R10)
+ STPW.P (R6, R7), 8(R10)
+ BNE ks128Loop
+ CBZ R11, ksDone // If dec is nil we are done
+ SUB $176, R10
+ // Decryption keys are encryption keys with InverseMixColumns applied
+ VLD1.P 64(R10), [V0.B16, V1.B16, V2.B16, V3.B16]
+ VMOV V0.B16, V7.B16
+ AESIMC V1.B16, V6.B16
+ AESIMC V2.B16, V5.B16
+ AESIMC V3.B16, V4.B16
+ VLD1.P 64(R10), [V0.B16, V1.B16, V2.B16, V3.B16]
+ AESIMC V0.B16, V11.B16
+ AESIMC V1.B16, V10.B16
+ AESIMC V2.B16, V9.B16
+ AESIMC V3.B16, V8.B16
+ VLD1 (R10), [V0.B16, V1.B16, V2.B16]
+ AESIMC V0.B16, V14.B16
+ AESIMC V1.B16, V13.B16
+ VMOV V2.B16, V12.B16
+ VST1.P [V12.B16, V13.B16, V14.B16], 48(R11)
+ VST1.P [V8.B16, V9.B16, V10.B16, V11.B16], 64(R11)
+ VST1 [V4.B16, V5.B16, V6.B16, V7.B16], (R11)
+ B ksDone
+ks192:
+ LDPW (R9), (R2, R3)
+ LDPW 8(R9), (R4, R5)
+ LDPW 16(R9), (R6, R7)
+ STPW.P (R2, R3), 8(R10)
+ STPW.P (R4, R5), 8(R10)
+ SUB $4, R8
+ks192Loop:
+ STPW.P (R6, R7), 8(R10)
+ VMOV R7, V2.S[0]
+ WORD $0x4E030042 //TBL V3.B16, [V2.B16], V2.B16
+ AESE V0.B16, V2.B16
+ EORW R13, R2
+ LSLW $1, R13
+ SUBS $1, R8
+ VMOV V2.S[0], R0
+ EORW R0, R2
+ EORW R2, R3
+ EORW R3, R4
+ EORW R4, R5
+ EORW R5, R6
+ EORW R6, R7
+ STPW.P (R2, R3), 8(R10)
+ STPW.P (R4, R5), 8(R10)
+ BNE ks192Loop
+ CBZ R11, ksDone
+ SUB $208, R10
+ VLD1.P 64(R10), [V0.B16, V1.B16, V2.B16, V3.B16]
+ VMOV V0.B16, V7.B16
+ AESIMC V1.B16, V6.B16
+ AESIMC V2.B16, V5.B16
+ AESIMC V3.B16, V4.B16
+ VLD1.P 64(R10), [V0.B16, V1.B16, V2.B16, V3.B16]
+ AESIMC V0.B16, V11.B16
+ AESIMC V1.B16, V10.B16
+ AESIMC V2.B16, V9.B16
+ AESIMC V3.B16, V8.B16
+ VLD1.P 64(R10), [V0.B16, V1.B16, V2.B16, V3.B16]
+ AESIMC V0.B16, V15.B16
+ AESIMC V1.B16, V14.B16
+ AESIMC V2.B16, V13.B16
+ AESIMC V3.B16, V12.B16
+ VLD1 (R10), [V0.B16]
+ VST1.P [V0.B16], 16(R11)
+ VST1.P [V12.B16, V13.B16, V14.B16, V15.B16], 64(R11)
+ VST1.P [V8.B16, V9.B16, V10.B16, V11.B16], 64(R11)
+ VST1 [V4.B16, V5.B16, V6.B16, V7.B16], (R11)
+ B ksDone
+ks256:
+ LDP invSRows<>(SB), (R0, R1)
+ VMOV R0, V4.D[0]
+ VMOV R1, V4.D[1]
+ LDPW (R9), (R0, R1)
+ LDPW 8(R9), (R2, R3)
+ LDPW 16(R9), (R4, R5)
+ LDPW 24(R9), (R6, R7)
+ STPW.P (R0, R1), 8(R10)
+ STPW.P (R2, R3), 8(R10)
+ SUB $7, R8
+ks256Loop:
+ STPW.P (R4, R5), 8(R10)
+ STPW.P (R6, R7), 8(R10)
+ VMOV R7, V2.S[0]
+ WORD $0x4E030042 //TBL V3.B16, [V2.B16], V2.B16
+ AESE V0.B16, V2.B16
+ EORW R13, R0
+ LSLW $1, R13
+ SUBS $1, R8
+ VMOV V2.S[0], R9
+ EORW R9, R0
+ EORW R0, R1
+ EORW R1, R2
+ EORW R2, R3
+ VMOV R3, V2.S[0]
+ WORD $0x4E040042 //TBL V3.B16, [V2.B16], V2.B16
+ AESE V0.B16, V2.B16
+ VMOV V2.S[0], R9
+ EORW R9, R4
+ EORW R4, R5
+ EORW R5, R6
+ EORW R6, R7
+ STPW.P (R0, R1), 8(R10)
+ STPW.P (R2, R3), 8(R10)
+ BNE ks256Loop
+ CBZ R11, ksDone
+ SUB $240, R10
+ VLD1.P 64(R10), [V0.B16, V1.B16, V2.B16, V3.B16]
+ VMOV V0.B16, V7.B16
+ AESIMC V1.B16, V6.B16
+ AESIMC V2.B16, V5.B16
+ AESIMC V3.B16, V4.B16
+ VLD1.P 64(R10), [V0.B16, V1.B16, V2.B16, V3.B16]
+ AESIMC V0.B16, V11.B16
+ AESIMC V1.B16, V10.B16
+ AESIMC V2.B16, V9.B16
+ AESIMC V3.B16, V8.B16
+ VLD1.P 64(R10), [V0.B16, V1.B16, V2.B16, V3.B16]
+ AESIMC V0.B16, V15.B16
+ AESIMC V1.B16, V14.B16
+ AESIMC V2.B16, V13.B16
+ AESIMC V3.B16, V12.B16
+ VLD1 (R10), [V0.B16, V1.B16, V2.B16]
+ AESIMC V0.B16, V18.B16
+ AESIMC V1.B16, V17.B16
+ VMOV V2.B16, V16.B16
+ VST1.P [V16.B16, V17.B16, V18.B16], 48(R11)
+ VST1.P [V12.B16, V13.B16, V14.B16, V15.B16], 64(R11)
+ VST1.P [V8.B16, V9.B16, V10.B16, V11.B16], 64(R11)
+ VST1 [V4.B16, V5.B16, V6.B16, V7.B16], (R11)
+ksDone:
+ RET
diff --git a/src/crypto/aes/asm_ppc64x.s b/src/crypto/aes/asm_ppc64x.s
new file mode 100644
index 0000000..288f725
--- /dev/null
+++ b/src/crypto/aes/asm_ppc64x.s
@@ -0,0 +1,675 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build ppc64 || ppc64le
+
+// Based on CRYPTOGAMS code with the following comment:
+// # ====================================================================
+// # Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
+// # project. The module is, however, dual licensed under OpenSSL and
+// # CRYPTOGAMS licenses depending on where you obtain it. For further
+// # details see http://www.openssl.org/~appro/cryptogams/.
+// # ====================================================================
+
+// Original code can be found at the link below:
+// https://github.com/dot-asm/cryptogams/blob/master/ppc/aesp8-ppc.pl
+
+// Some function names were changed to be consistent with Go function
+// names. For instance, function aes_p8_set_{en,de}crypt_key become
+// set{En,De}cryptKeyAsm. I also split setEncryptKeyAsm in two parts
+// and a new session was created (doEncryptKeyAsm). This was necessary to
+// avoid arguments overwriting when setDecryptKeyAsm calls setEncryptKeyAsm.
+// There were other modifications as well but kept the same functionality.
+
+#include "textflag.h"
+
+// For expandKeyAsm
+#define INP R3
+#define BITS R4
+#define OUTENC R5 // Pointer to next expanded encrypt key
+#define PTR R6
+#define CNT R7
+#define ROUNDS R8
+#define OUTDEC R9 // Pointer to next expanded decrypt key
+#define TEMP R19
+#define ZERO V0
+#define IN0 V1
+#define IN1 V2
+#define KEY V3
+#define RCON V4
+#define MASK V5
+#define TMP V6
+#define STAGE V7
+#define OUTPERM V8
+#define OUTMASK V9
+#define OUTHEAD V10
+#define OUTTAIL V11
+
+// For P9 instruction emulation
+#define ESPERM V21 // Endian swapping permute into BE
+#define TMP2 V22 // Temporary for P8_STXVB16X/P8_STXVB16X
+
+// For {en,de}cryptBlockAsm
+#define BLK_INP R3
+#define BLK_OUT R4
+#define BLK_KEY R5
+#define BLK_ROUNDS R6
+#define BLK_IDX R7
+
+DATA ·rcon+0x00(SB)/8, $0x0f0e0d0c0b0a0908 // Permute for vector doubleword endian swap
+DATA ·rcon+0x08(SB)/8, $0x0706050403020100
+DATA ·rcon+0x10(SB)/8, $0x0100000001000000 // RCON
+DATA ·rcon+0x18(SB)/8, $0x0100000001000000 // RCON
+DATA ·rcon+0x20(SB)/8, $0x1b0000001b000000
+DATA ·rcon+0x28(SB)/8, $0x1b0000001b000000
+DATA ·rcon+0x30(SB)/8, $0x0d0e0f0c0d0e0f0c // MASK
+DATA ·rcon+0x38(SB)/8, $0x0d0e0f0c0d0e0f0c // MASK
+DATA ·rcon+0x40(SB)/8, $0x0000000000000000
+DATA ·rcon+0x48(SB)/8, $0x0000000000000000
+GLOBL ·rcon(SB), RODATA, $80
+
+#ifdef GOARCH_ppc64le
+# ifdef GOPPC64_power9
+#define P8_LXVB16X(RA,RB,VT) LXVB16X (RA+RB), VT
+#define P8_STXVB16X(VS,RA,RB) STXVB16X VS, (RA+RB)
+#define XXBRD_ON_LE(VA,VT) XXBRD VA, VT
+# else
+// On POWER8/ppc64le, emulate the POWER9 instructions by loading unaligned
+// doublewords and byte-swapping each doubleword to emulate BE load/stores.
+#define NEEDS_ESPERM
+#define P8_LXVB16X(RA,RB,VT) \
+ LXVD2X (RA+RB), VT \
+ VPERM VT, VT, ESPERM, VT
+
+#define P8_STXVB16X(VS,RA,RB) \
+ VPERM VS, VS, ESPERM, TMP2 \
+ STXVD2X TMP2, (RA+RB)
+
+#define XXBRD_ON_LE(VA,VT) \
+ VPERM VA, VA, ESPERM, VT
+
+# endif // defined(GOPPC64_power9)
+#else
+#define P8_LXVB16X(RA,RB,VT) LXVD2X (RA+RB), VT
+#define P8_STXVB16X(VS,RA,RB) STXVD2X VS, (RA+RB)
+#define XXBRD_ON_LE(VA, VT)
+#endif // defined(GOARCH_ppc64le)
+
+// func setEncryptKeyAsm(nr int, key *byte, enc *uint32, dec *uint32)
+TEXT ·expandKeyAsm(SB), NOSPLIT|NOFRAME, $0
+ // Load the arguments inside the registers
+ MOVD nr+0(FP), ROUNDS
+ MOVD key+8(FP), INP
+ MOVD enc+16(FP), OUTENC
+ MOVD dec+24(FP), OUTDEC
+
+#ifdef NEEDS_ESPERM
+ MOVD $·rcon(SB), PTR // PTR points to rcon addr
+ LVX (PTR), ESPERM
+ ADD $0x10, PTR
+#else
+ MOVD $·rcon+0x10(SB), PTR // PTR points to rcon addr (skipping permute vector)
+#endif
+
+ // Get key from memory and write aligned into VR
+ P8_LXVB16X(INP, R0, IN0)
+ ADD $0x10, INP, INP
+ MOVD $0x20, TEMP
+
+ CMPW ROUNDS, $12
+ LVX (PTR)(R0), RCON // lvx 4,0,6 Load first 16 bytes into RCON
+ LVX (PTR)(TEMP), MASK
+ ADD $0x10, PTR, PTR // addi 6,6,0x10 PTR to next 16 bytes of RCON
+ MOVD $8, CNT // li 7,8 CNT = 8
+ VXOR ZERO, ZERO, ZERO // vxor 0,0,0 Zero to be zero :)
+ MOVD CNT, CTR // mtctr 7 Set the counter to 8 (rounds)
+
+ // The expanded decrypt key is the expanded encrypt key stored in reverse order.
+ // Move OUTDEC to the last key location, and store in descending order.
+ ADD $160, OUTDEC, OUTDEC
+ BLT loop128
+ ADD $32, OUTDEC, OUTDEC
+ BEQ l192
+ ADD $32, OUTDEC, OUTDEC
+ JMP l256
+
+loop128:
+ // Key schedule (Round 1 to 8)
+ VPERM IN0, IN0, MASK, KEY // vperm 3,1,1,5 Rotate-n-splat
+ VSLDOI $12, ZERO, IN0, TMP // vsldoi 6,0,1,12
+ STXVD2X IN0, (R0+OUTENC)
+ STXVD2X IN0, (R0+OUTDEC)
+ VCIPHERLAST KEY, RCON, KEY // vcipherlast 3,3,4
+ ADD $16, OUTENC, OUTENC
+ ADD $-16, OUTDEC, OUTDEC
+
+ VXOR IN0, TMP, IN0 // vxor 1,1,6
+ VSLDOI $12, ZERO, TMP, TMP // vsldoi 6,0,6,12
+ VXOR IN0, TMP, IN0 // vxor 1,1,6
+ VSLDOI $12, ZERO, TMP, TMP // vsldoi 6,0,6,12
+ VXOR IN0, TMP, IN0 // vxor 1,1,6
+ VADDUWM RCON, RCON, RCON // vadduwm 4,4,4
+ VXOR IN0, KEY, IN0 // vxor 1,1,3
+ BC 0x10, 0, loop128 // bdnz .Loop128
+
+ LVX (PTR)(R0), RCON // lvx 4,0,6 Last two round keys
+
+ // Key schedule (Round 9)
+ VPERM IN0, IN0, MASK, KEY // vperm 3,1,1,5 Rotate-n-spat
+ VSLDOI $12, ZERO, IN0, TMP // vsldoi 6,0,1,12
+ STXVD2X IN0, (R0+OUTENC)
+ STXVD2X IN0, (R0+OUTDEC)
+ VCIPHERLAST KEY, RCON, KEY // vcipherlast 3,3,4
+ ADD $16, OUTENC, OUTENC
+ ADD $-16, OUTDEC, OUTDEC
+
+ // Key schedule (Round 10)
+ VXOR IN0, TMP, IN0 // vxor 1,1,6
+ VSLDOI $12, ZERO, TMP, TMP // vsldoi 6,0,6,12
+ VXOR IN0, TMP, IN0 // vxor 1,1,6
+ VSLDOI $12, ZERO, TMP, TMP // vsldoi 6,0,6,12
+ VXOR IN0, TMP, IN0 // vxor 1,1,6
+ VADDUWM RCON, RCON, RCON // vadduwm 4,4,4
+ VXOR IN0, KEY, IN0 // vxor 1,1,3
+
+ VPERM IN0, IN0, MASK, KEY // vperm 3,1,1,5 Rotate-n-splat
+ VSLDOI $12, ZERO, IN0, TMP // vsldoi 6,0,1,12
+ STXVD2X IN0, (R0+OUTENC)
+ STXVD2X IN0, (R0+OUTDEC)
+ VCIPHERLAST KEY, RCON, KEY // vcipherlast 3,3,4
+ ADD $16, OUTENC, OUTENC
+ ADD $-16, OUTDEC, OUTDEC
+
+ // Key schedule (Round 11)
+ VXOR IN0, TMP, IN0 // vxor 1,1,6
+ VSLDOI $12, ZERO, TMP, TMP // vsldoi 6,0,6,12
+ VXOR IN0, TMP, IN0 // vxor 1,1,6
+ VSLDOI $12, ZERO, TMP, TMP // vsldoi 6,0,6,12
+ VXOR IN0, TMP, IN0 // vxor 1,1,6
+ VXOR IN0, KEY, IN0 // vxor 1,1,3
+ STXVD2X IN0, (R0+OUTENC)
+ STXVD2X IN0, (R0+OUTDEC)
+
+ RET
+
+l192:
+ LXSDX (INP+R0), IN1 // Load next 8 bytes into upper half of VSR.
+ XXBRD_ON_LE(IN1, IN1) // and convert to BE ordering on LE hosts.
+ MOVD $4, CNT // li 7,4
+ STXVD2X IN0, (R0+OUTENC)
+ STXVD2X IN0, (R0+OUTDEC)
+ ADD $16, OUTENC, OUTENC
+ ADD $-16, OUTDEC, OUTDEC
+ VSPLTISB $8, KEY // vspltisb 3,8
+ MOVD CNT, CTR // mtctr 7
+ VSUBUBM MASK, KEY, MASK // vsububm 5,5,3
+
+loop192:
+ VPERM IN1, IN1, MASK, KEY // vperm 3,2,2,5
+ VSLDOI $12, ZERO, IN0, TMP // vsldoi 6,0,1,12
+ VCIPHERLAST KEY, RCON, KEY // vcipherlast 3,3,4
+
+ VXOR IN0, TMP, IN0 // vxor 1,1,6
+ VSLDOI $12, ZERO, TMP, TMP // vsldoi 6,0,6,12
+ VXOR IN0, TMP, IN0 // vxor 1,1,6
+ VSLDOI $12, ZERO, TMP, TMP // vsldoi 6,0,6,12
+ VXOR IN0, TMP, IN0 // vxor 1,1,6
+
+ VSLDOI $8, ZERO, IN1, STAGE // vsldoi 7,0,2,8
+ VSPLTW $3, IN0, TMP // vspltw 6,1,3
+ VXOR TMP, IN1, TMP // vxor 6,6,2
+ VSLDOI $12, ZERO, IN1, IN1 // vsldoi 2,0,2,12
+ VADDUWM RCON, RCON, RCON // vadduwm 4,4,4
+ VXOR IN1, TMP, IN1 // vxor 2,2,6
+ VXOR IN0, KEY, IN0 // vxor 1,1,3
+ VXOR IN1, KEY, IN1 // vxor 2,2,3
+ VSLDOI $8, STAGE, IN0, STAGE // vsldoi 7,7,1,8
+
+ VPERM IN1, IN1, MASK, KEY // vperm 3,2,2,5
+ VSLDOI $12, ZERO, IN0, TMP // vsldoi 6,0,1,12
+ STXVD2X STAGE, (R0+OUTENC)
+ STXVD2X STAGE, (R0+OUTDEC)
+ VCIPHERLAST KEY, RCON, KEY // vcipherlast 3,3,4
+ ADD $16, OUTENC, OUTENC
+ ADD $-16, OUTDEC, OUTDEC
+
+ VSLDOI $8, IN0, IN1, STAGE // vsldoi 7,1,2,8
+ VXOR IN0, TMP, IN0 // vxor 1,1,6
+ VSLDOI $12, ZERO, TMP, TMP // vsldoi 6,0,6,12
+ STXVD2X STAGE, (R0+OUTENC)
+ STXVD2X STAGE, (R0+OUTDEC)
+ VXOR IN0, TMP, IN0 // vxor 1,1,6
+ VSLDOI $12, ZERO, TMP, TMP // vsldoi 6,0,6,12
+ VXOR IN0, TMP, IN0 // vxor 1,1,6
+ ADD $16, OUTENC, OUTENC
+ ADD $-16, OUTDEC, OUTDEC
+
+ VSPLTW $3, IN0, TMP // vspltw 6,1,3
+ VXOR TMP, IN1, TMP // vxor 6,6,2
+ VSLDOI $12, ZERO, IN1, IN1 // vsldoi 2,0,2,12
+ VADDUWM RCON, RCON, RCON // vadduwm 4,4,4
+ VXOR IN1, TMP, IN1 // vxor 2,2,6
+ VXOR IN0, KEY, IN0 // vxor 1,1,3
+ VXOR IN1, KEY, IN1 // vxor 2,2,3
+ STXVD2X IN0, (R0+OUTENC)
+ STXVD2X IN0, (R0+OUTDEC)
+ ADD $16, OUTENC, OUTENC
+ ADD $-16, OUTDEC, OUTDEC
+ BC 0x10, 0, loop192 // bdnz .Loop192
+
+ RET
+
+l256:
+ P8_LXVB16X(INP, R0, IN1)
+ MOVD $7, CNT // li 7,7
+ STXVD2X IN0, (R0+OUTENC)
+ STXVD2X IN0, (R0+OUTDEC)
+ ADD $16, OUTENC, OUTENC
+ ADD $-16, OUTDEC, OUTDEC
+ MOVD CNT, CTR // mtctr 7
+
+loop256:
+ VPERM IN1, IN1, MASK, KEY // vperm 3,2,2,5
+ VSLDOI $12, ZERO, IN0, TMP // vsldoi 6,0,1,12
+ STXVD2X IN1, (R0+OUTENC)
+ STXVD2X IN1, (R0+OUTDEC)
+ VCIPHERLAST KEY, RCON, KEY // vcipherlast 3,3,4
+ ADD $16, OUTENC, OUTENC
+ ADD $-16, OUTDEC, OUTDEC
+
+ VXOR IN0, TMP, IN0 // vxor 1,1,6
+ VSLDOI $12, ZERO, TMP, TMP // vsldoi 6,0,6,12
+ VXOR IN0, TMP, IN0 // vxor 1,1,6
+ VSLDOI $12, ZERO, TMP, TMP // vsldoi 6,0,6,12
+ VXOR IN0, TMP, IN0 // vxor 1,1,6
+ VADDUWM RCON, RCON, RCON // vadduwm 4,4,4
+ VXOR IN0, KEY, IN0 // vxor 1,1,3
+ STXVD2X IN0, (R0+OUTENC)
+ STXVD2X IN0, (R0+OUTDEC)
+ ADD $16, OUTENC, OUTENC
+ ADD $-16, OUTDEC, OUTDEC
+ BC 0x12, 0, done // bdz .Ldone
+
+ VSPLTW $3, IN0, KEY // vspltw 3,1,3
+ VSLDOI $12, ZERO, IN1, TMP // vsldoi 6,0,2,12
+ VSBOX KEY, KEY // vsbox 3,3
+
+ VXOR IN1, TMP, IN1 // vxor 2,2,6
+ VSLDOI $12, ZERO, TMP, TMP // vsldoi 6,0,6,12
+ VXOR IN1, TMP, IN1 // vxor 2,2,6
+ VSLDOI $12, ZERO, TMP, TMP // vsldoi 6,0,6,12
+ VXOR IN1, TMP, IN1 // vxor 2,2,6
+
+ VXOR IN1, KEY, IN1 // vxor 2,2,3
+ JMP loop256 // b .Loop256
+
+done:
+ RET
+
+// func encryptBlockAsm(nr int, xk *uint32, dst, src *byte)
+TEXT ·encryptBlockAsm(SB), NOSPLIT|NOFRAME, $0
+ MOVD nr+0(FP), R6 // Round count/Key size
+ MOVD xk+8(FP), R5 // Key pointer
+ MOVD dst+16(FP), R3 // Dest pointer
+ MOVD src+24(FP), R4 // Src pointer
+#ifdef NEEDS_ESPERM
+ MOVD $·rcon(SB), R7
+ LVX (R7), ESPERM // Permute value for P8_ macros.
+#endif
+
+ // Set CR{1,2,3}EQ to hold the key size information.
+ CMPU R6, $10, CR1
+ CMPU R6, $12, CR2
+ CMPU R6, $14, CR3
+
+ MOVD $16, R6
+ MOVD $32, R7
+ MOVD $48, R8
+ MOVD $64, R9
+ MOVD $80, R10
+ MOVD $96, R11
+ MOVD $112, R12
+
+ // Load text in BE order
+ P8_LXVB16X(R4, R0, V0)
+
+ // V1, V2 will hold keys, V0 is a temp.
+ // At completion, V2 will hold the ciphertext.
+ // Load xk[0:3] and xor with text
+ LXVD2X (R0+R5), V1
+ VXOR V0, V1, V0
+
+ // Load xk[4:11] and cipher
+ LXVD2X (R6+R5), V1
+ LXVD2X (R7+R5), V2
+ VCIPHER V0, V1, V0
+ VCIPHER V0, V2, V0
+
+ // Load xk[12:19] and cipher
+ LXVD2X (R8+R5), V1
+ LXVD2X (R9+R5), V2
+ VCIPHER V0, V1, V0
+ VCIPHER V0, V2, V0
+
+ // Load xk[20:27] and cipher
+ LXVD2X (R10+R5), V1
+ LXVD2X (R11+R5), V2
+ VCIPHER V0, V1, V0
+ VCIPHER V0, V2, V0
+
+ // Increment xk pointer to reuse constant offsets in R6-R12.
+ ADD $112, R5
+
+ // Load xk[28:35] and cipher
+ LXVD2X (R0+R5), V1
+ LXVD2X (R6+R5), V2
+ VCIPHER V0, V1, V0
+ VCIPHER V0, V2, V0
+
+ // Load xk[36:43] and cipher
+ LXVD2X (R7+R5), V1
+ LXVD2X (R8+R5), V2
+ BEQ CR1, Ldec_tail // Key size 10?
+ VCIPHER V0, V1, V0
+ VCIPHER V0, V2, V0
+
+ // Load xk[44:51] and cipher
+ LXVD2X (R9+R5), V1
+ LXVD2X (R10+R5), V2
+ BEQ CR2, Ldec_tail // Key size 12?
+ VCIPHER V0, V1, V0
+ VCIPHER V0, V2, V0
+
+ // Load xk[52:59] and cipher
+ LXVD2X (R11+R5), V1
+ LXVD2X (R12+R5), V2
+ BNE CR3, Linvalid_key_len // Not key size 14?
+ // Fallthrough to final cipher
+
+Ldec_tail:
+ // Cipher last two keys such that key information is
+ // cleared from V1 and V2.
+ VCIPHER V0, V1, V1
+ VCIPHERLAST V1, V2, V2
+
+ // Store the result in BE order.
+ P8_STXVB16X(V2, R3, R0)
+ RET
+
+Linvalid_key_len:
+ // Segfault, this should never happen. Only 3 keys sizes are created/used.
+ MOVD R0, 0(R0)
+ RET
+
+// func decryptBlockAsm(nr int, xk *uint32, dst, src *byte)
+TEXT ·decryptBlockAsm(SB), NOSPLIT|NOFRAME, $0
+ MOVD nr+0(FP), R6 // Round count/Key size
+ MOVD xk+8(FP), R5 // Key pointer
+ MOVD dst+16(FP), R3 // Dest pointer
+ MOVD src+24(FP), R4 // Src pointer
+#ifdef NEEDS_ESPERM
+ MOVD $·rcon(SB), R7
+ LVX (R7), ESPERM // Permute value for P8_ macros.
+#endif
+
+ // Set CR{1,2,3}EQ to hold the key size information.
+ CMPU R6, $10, CR1
+ CMPU R6, $12, CR2
+ CMPU R6, $14, CR3
+
+ MOVD $16, R6
+ MOVD $32, R7
+ MOVD $48, R8
+ MOVD $64, R9
+ MOVD $80, R10
+ MOVD $96, R11
+ MOVD $112, R12
+
+ // Load text in BE order
+ P8_LXVB16X(R4, R0, V0)
+
+ // V1, V2 will hold keys, V0 is a temp.
+ // At completion, V2 will hold the text.
+ // Load xk[0:3] and xor with ciphertext
+ LXVD2X (R0+R5), V1
+ VXOR V0, V1, V0
+
+ // Load xk[4:11] and cipher
+ LXVD2X (R6+R5), V1
+ LXVD2X (R7+R5), V2
+ VNCIPHER V0, V1, V0
+ VNCIPHER V0, V2, V0
+
+ // Load xk[12:19] and cipher
+ LXVD2X (R8+R5), V1
+ LXVD2X (R9+R5), V2
+ VNCIPHER V0, V1, V0
+ VNCIPHER V0, V2, V0
+
+ // Load xk[20:27] and cipher
+ LXVD2X (R10+R5), V1
+ LXVD2X (R11+R5), V2
+ VNCIPHER V0, V1, V0
+ VNCIPHER V0, V2, V0
+
+ // Increment xk pointer to reuse constant offsets in R6-R12.
+ ADD $112, R5
+
+ // Load xk[28:35] and cipher
+ LXVD2X (R0+R5), V1
+ LXVD2X (R6+R5), V2
+ VNCIPHER V0, V1, V0
+ VNCIPHER V0, V2, V0
+
+ // Load xk[36:43] and cipher
+ LXVD2X (R7+R5), V1
+ LXVD2X (R8+R5), V2
+ BEQ CR1, Ldec_tail // Key size 10?
+ VNCIPHER V0, V1, V0
+ VNCIPHER V0, V2, V0
+
+ // Load xk[44:51] and cipher
+ LXVD2X (R9+R5), V1
+ LXVD2X (R10+R5), V2
+ BEQ CR2, Ldec_tail // Key size 12?
+ VNCIPHER V0, V1, V0
+ VNCIPHER V0, V2, V0
+
+ // Load xk[52:59] and cipher
+ LXVD2X (R11+R5), V1
+ LXVD2X (R12+R5), V2
+ BNE CR3, Linvalid_key_len // Not key size 14?
+ // Fallthrough to final cipher
+
+Ldec_tail:
+ // Cipher last two keys such that key information is
+ // cleared from V1 and V2.
+ VNCIPHER V0, V1, V1
+ VNCIPHERLAST V1, V2, V2
+
+ // Store the result in BE order.
+ P8_STXVB16X(V2, R3, R0)
+ RET
+
+Linvalid_key_len:
+ // Segfault, this should never happen. Only 3 keys sizes are created/used.
+ MOVD R0, 0(R0)
+ RET
+
+// Remove defines from above so they can be defined here
+#undef INP
+#undef OUTENC
+#undef ROUNDS
+#undef KEY
+#undef TMP
+
+#define INP R3
+#define OUTP R4
+#define LEN R5
+#define KEYP R6
+#define ROUNDS R7
+#define IVP R8
+#define ENC R9
+
+#define INOUT V2
+#define TMP V3
+#define IVEC V4
+
+// Load the crypt key into VSRs.
+//
+// The expanded key is stored and loaded using
+// STXVD2X/LXVD2X. The in-memory byte ordering
+// depends on the endianness of the machine. The
+// expanded keys are generated by expandKeyAsm above.
+//
+// Rkeyp holds the key pointer. It is clobbered. Once
+// the expanded keys are loaded, it is not needed.
+//
+// R12,R14-R21 are scratch registers.
+// For keyp of 10, V6, V11-V20 hold the expanded key.
+// For keyp of 12, V6, V9-V20 hold the expanded key.
+// For keyp of 14, V6, V7-V20 hold the expanded key.
+#define LOAD_KEY(Rkeyp) \
+ MOVD $16, R12 \
+ MOVD $32, R14 \
+ MOVD $48, R15 \
+ MOVD $64, R16 \
+ MOVD $80, R17 \
+ MOVD $96, R18 \
+ MOVD $112, R19 \
+ MOVD $128, R20 \
+ MOVD $144, R21 \
+ LXVD2X (R0+Rkeyp), V6 \
+ ADD $16, Rkeyp \
+ BEQ CR1, L_start10 \
+ BEQ CR2, L_start12 \
+ LXVD2X (R0+Rkeyp), V7 \
+ LXVD2X (R12+Rkeyp), V8 \
+ ADD $32, Rkeyp \
+ L_start12: \
+ LXVD2X (R0+Rkeyp), V9 \
+ LXVD2X (R12+Rkeyp), V10 \
+ ADD $32, Rkeyp \
+ L_start10: \
+ LXVD2X (R0+Rkeyp), V11 \
+ LXVD2X (R12+Rkeyp), V12 \
+ LXVD2X (R14+Rkeyp), V13 \
+ LXVD2X (R15+Rkeyp), V14 \
+ LXVD2X (R16+Rkeyp), V15 \
+ LXVD2X (R17+Rkeyp), V16 \
+ LXVD2X (R18+Rkeyp), V17 \
+ LXVD2X (R19+Rkeyp), V18 \
+ LXVD2X (R20+Rkeyp), V19 \
+ LXVD2X (R21+Rkeyp), V20
+
+// Perform aes cipher operation for keysize 10/12/14 using the keys
+// loaded by LOAD_KEY, and key size information held in CR1EQ/CR2EQ.
+//
+// Vxor is ideally V6 (Key[0-3]), but for slightly improved encrypting
+// performance V6 and IVEC can be swapped (xor is both associative and
+// commutative) during encryption:
+//
+// VXOR INOUT, IVEC, INOUT
+// VXOR INOUT, V6, INOUT
+//
+// into
+//
+// VXOR INOUT, V6, INOUT
+// VXOR INOUT, IVEC, INOUT
+//
+#define CIPHER_BLOCK(Vin, Vxor, Vout, vcipher, vciphel, label10, label12) \
+ VXOR Vin, Vxor, Vout \
+ BEQ CR1, label10 \
+ BEQ CR2, label12 \
+ vcipher Vout, V7, Vout \
+ vcipher Vout, V8, Vout \
+ label12: \
+ vcipher Vout, V9, Vout \
+ vcipher Vout, V10, Vout \
+ label10: \
+ vcipher Vout, V11, Vout \
+ vcipher Vout, V12, Vout \
+ vcipher Vout, V13, Vout \
+ vcipher Vout, V14, Vout \
+ vcipher Vout, V15, Vout \
+ vcipher Vout, V16, Vout \
+ vcipher Vout, V17, Vout \
+ vcipher Vout, V18, Vout \
+ vcipher Vout, V19, Vout \
+ vciphel Vout, V20, Vout \
+
+#define CLEAR_KEYS() \
+ VXOR V6, V6, V6 \
+ VXOR V7, V7, V7 \
+ VXOR V8, V8, V8 \
+ VXOR V9, V9, V9 \
+ VXOR V10, V10, V10 \
+ VXOR V11, V11, V11 \
+ VXOR V12, V12, V12 \
+ VXOR V13, V13, V13 \
+ VXOR V14, V14, V14 \
+ VXOR V15, V15, V15 \
+ VXOR V16, V16, V16 \
+ VXOR V17, V17, V17 \
+ VXOR V18, V18, V18 \
+ VXOR V19, V19, V19 \
+ VXOR V20, V20, V20
+
+//func cryptBlocksChain(src, dst *byte, length int, key *uint32, iv *byte, enc int, nr int)
+TEXT ·cryptBlocksChain(SB), NOSPLIT|NOFRAME, $0
+ MOVD src+0(FP), INP
+ MOVD dst+8(FP), OUTP
+ MOVD length+16(FP), LEN
+ MOVD key+24(FP), KEYP
+ MOVD iv+32(FP), IVP
+ MOVD enc+40(FP), ENC
+ MOVD nr+48(FP), ROUNDS
+
+#ifdef NEEDS_ESPERM
+ MOVD $·rcon(SB), R11
+ LVX (R11), ESPERM // Permute value for P8_ macros.
+#endif
+
+ // Assume len > 0 && len % blockSize == 0.
+ CMPW ENC, $0
+ P8_LXVB16X(IVP, R0, IVEC)
+ CMPU ROUNDS, $10, CR1
+ CMPU ROUNDS, $12, CR2 // Only sizes 10/12/14 are supported.
+
+ // Setup key in VSRs, and set loop count in CTR.
+ LOAD_KEY(KEYP)
+ SRD $4, LEN
+ MOVD LEN, CTR
+
+ BEQ Lcbc_dec
+
+ PCALIGN $16
+Lcbc_enc:
+ P8_LXVB16X(INP, R0, INOUT)
+ ADD $16, INP
+ VXOR INOUT, V6, INOUT
+ CIPHER_BLOCK(INOUT, IVEC, INOUT, VCIPHER, VCIPHERLAST, Lcbc_enc10, Lcbc_enc12)
+ VOR INOUT, INOUT, IVEC // ciphertext (INOUT) is IVEC for next block.
+ P8_STXVB16X(INOUT, OUTP, R0)
+ ADD $16, OUTP
+ BDNZ Lcbc_enc
+
+ P8_STXVB16X(INOUT, IVP, R0)
+ CLEAR_KEYS()
+ RET
+
+ PCALIGN $16
+Lcbc_dec:
+ P8_LXVB16X(INP, R0, TMP)
+ ADD $16, INP
+ CIPHER_BLOCK(TMP, V6, INOUT, VNCIPHER, VNCIPHERLAST, Lcbc_dec10, Lcbc_dec12)
+ VXOR INOUT, IVEC, INOUT
+ VOR TMP, TMP, IVEC // TMP is IVEC for next block.
+ P8_STXVB16X(INOUT, OUTP, R0)
+ ADD $16, OUTP
+ BDNZ Lcbc_dec
+
+ P8_STXVB16X(IVEC, IVP, R0)
+ CLEAR_KEYS()
+ RET
diff --git a/src/crypto/aes/asm_s390x.s b/src/crypto/aes/asm_s390x.s
new file mode 100644
index 0000000..0c60ac2
--- /dev/null
+++ b/src/crypto/aes/asm_s390x.s
@@ -0,0 +1,191 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#include "textflag.h"
+
+// func cryptBlocks(c code, key, dst, src *byte, length int)
+TEXT ·cryptBlocks(SB),NOSPLIT,$0-40
+ MOVD key+8(FP), R1
+ MOVD dst+16(FP), R2
+ MOVD src+24(FP), R4
+ MOVD length+32(FP), R5
+ MOVD c+0(FP), R0
+loop:
+ WORD $0xB92E0024 // cipher message (KM)
+ BVS loop // branch back if interrupted
+ XOR R0, R0
+ RET
+
+// func cryptBlocksChain(c code, iv, key, dst, src *byte, length int)
+TEXT ·cryptBlocksChain(SB),NOSPLIT,$48-48
+ LA params-48(SP), R1
+ MOVD iv+8(FP), R8
+ MOVD key+16(FP), R9
+ MVC $16, 0(R8), 0(R1) // move iv into params
+ MVC $32, 0(R9), 16(R1) // move key into params
+ MOVD dst+24(FP), R2
+ MOVD src+32(FP), R4
+ MOVD length+40(FP), R5
+ MOVD c+0(FP), R0
+loop:
+ WORD $0xB92F0024 // cipher message with chaining (KMC)
+ BVS loop // branch back if interrupted
+ XOR R0, R0
+ MVC $16, 0(R1), 0(R8) // update iv
+ RET
+
+// func xorBytes(dst, a, b []byte) int
+TEXT ·xorBytes(SB),NOSPLIT,$0-80
+ MOVD dst_base+0(FP), R1
+ MOVD a_base+24(FP), R2
+ MOVD b_base+48(FP), R3
+ MOVD a_len+32(FP), R4
+ MOVD b_len+56(FP), R5
+ CMPBLE R4, R5, skip
+ MOVD R5, R4
+skip:
+ MOVD R4, ret+72(FP)
+ MOVD $0, R5
+ CMPBLT R4, $8, tail
+loop:
+ MOVD 0(R2)(R5*1), R7
+ MOVD 0(R3)(R5*1), R8
+ XOR R7, R8
+ MOVD R8, 0(R1)(R5*1)
+ LAY 8(R5), R5
+ SUB $8, R4
+ CMPBGE R4, $8, loop
+tail:
+ CMPBEQ R4, $0, done
+ MOVB 0(R2)(R5*1), R7
+ MOVB 0(R3)(R5*1), R8
+ XOR R7, R8
+ MOVB R8, 0(R1)(R5*1)
+ LAY 1(R5), R5
+ SUB $1, R4
+ BR tail
+done:
+ RET
+
+// func cryptBlocksGCM(fn code, key, dst, src, buf []byte, cnt *[16]byte)
+TEXT ·cryptBlocksGCM(SB),NOSPLIT,$0-112
+ MOVD src_len+64(FP), R0
+ MOVD buf_base+80(FP), R1
+ MOVD cnt+104(FP), R12
+ LMG (R12), R2, R3
+
+ // Check that the src size is less than or equal to the buffer size.
+ MOVD buf_len+88(FP), R4
+ CMP R0, R4
+ BGT crash
+
+ // Check that the src size is a multiple of 16-bytes.
+ MOVD R0, R4
+ AND $0xf, R4
+ BLT crash // non-zero
+
+ // Check that the src size is less than or equal to the dst size.
+ MOVD dst_len+40(FP), R4
+ CMP R0, R4
+ BGT crash
+
+ MOVD R2, R4
+ MOVD R2, R6
+ MOVD R2, R8
+ MOVD R3, R5
+ MOVD R3, R7
+ MOVD R3, R9
+ ADDW $1, R5
+ ADDW $2, R7
+ ADDW $3, R9
+incr:
+ CMP R0, $64
+ BLT tail
+ STMG R2, R9, (R1)
+ ADDW $4, R3
+ ADDW $4, R5
+ ADDW $4, R7
+ ADDW $4, R9
+ MOVD $64(R1), R1
+ SUB $64, R0
+ BR incr
+tail:
+ CMP R0, $0
+ BEQ crypt
+ STMG R2, R3, (R1)
+ ADDW $1, R3
+ MOVD $16(R1), R1
+ SUB $16, R0
+ BR tail
+crypt:
+ STMG R2, R3, (R12) // update next counter value
+ MOVD fn+0(FP), R0 // function code (encryption)
+ MOVD key_base+8(FP), R1 // key
+ MOVD buf_base+80(FP), R2 // counter values
+ MOVD dst_base+32(FP), R4 // dst
+ MOVD src_base+56(FP), R6 // src
+ MOVD src_len+64(FP), R7 // len
+loop:
+ WORD $0xB92D2046 // cipher message with counter (KMCTR)
+ BVS loop // branch back if interrupted
+ RET
+crash:
+ MOVD $0, (R0)
+ RET
+
+// func ghash(key *gcmHashKey, hash *[16]byte, data []byte)
+TEXT ·ghash(SB),NOSPLIT,$32-40
+ MOVD $65, R0 // GHASH function code
+ MOVD key+0(FP), R2
+ LMG (R2), R6, R7
+ MOVD hash+8(FP), R8
+ LMG (R8), R4, R5
+ MOVD $params-32(SP), R1
+ STMG R4, R7, (R1)
+ LMG data+16(FP), R2, R3 // R2=base, R3=len
+loop:
+ WORD $0xB93E0002 // compute intermediate message digest (KIMD)
+ BVS loop // branch back if interrupted
+ MVC $16, (R1), (R8)
+ MOVD $0, R0
+ RET
+
+// func kmaGCM(fn code, key, dst, src, aad []byte, tag *[16]byte, cnt *gcmCount)
+TEXT ·kmaGCM(SB),NOSPLIT,$112-120
+ MOVD fn+0(FP), R0
+ MOVD $params-112(SP), R1
+
+ // load ptr/len pairs
+ LMG dst+32(FP), R2, R3 // R2=base R3=len
+ LMG src+56(FP), R4, R5 // R4=base R5=len
+ LMG aad+80(FP), R6, R7 // R6=base R7=len
+
+ // setup parameters
+ MOVD cnt+112(FP), R8
+ XC $12, (R1), (R1) // reserved
+ MVC $4, 12(R8), 12(R1) // set chain value
+ MVC $16, (R8), 64(R1) // set initial counter value
+ XC $32, 16(R1), 16(R1) // set hash subkey and tag
+ SLD $3, R7, R12
+ MOVD R12, 48(R1) // set total AAD length
+ SLD $3, R5, R12
+ MOVD R12, 56(R1) // set total plaintext/ciphertext length
+
+ LMG key+8(FP), R8, R9 // R8=base R9=len
+ MVC $16, (R8), 80(R1) // set key
+ CMPBEQ R9, $16, kma
+ MVC $8, 16(R8), 96(R1)
+ CMPBEQ R9, $24, kma
+ MVC $8, 24(R8), 104(R1)
+
+kma:
+ WORD $0xb9296024 // kma %r6,%r2,%r4
+ BVS kma
+
+ MOVD tag+104(FP), R2
+ MVC $16, 16(R1), 0(R2) // copy tag to output
+ MOVD cnt+112(FP), R8
+ MVC $4, 12(R1), 12(R8) // update counter value
+
+ RET
diff --git a/src/crypto/aes/block.go b/src/crypto/aes/block.go
new file mode 100644
index 0000000..53308ae
--- /dev/null
+++ b/src/crypto/aes/block.go
@@ -0,0 +1,182 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This Go implementation is derived in part from the reference
+// ANSI C implementation, which carries the following notice:
+//
+// rijndael-alg-fst.c
+//
+// @version 3.0 (December 2000)
+//
+// Optimised ANSI C code for the Rijndael cipher (now AES)
+//
+// @author Vincent Rijmen <vincent.rijmen@esat.kuleuven.ac.be>
+// @author Antoon Bosselaers <antoon.bosselaers@esat.kuleuven.ac.be>
+// @author Paulo Barreto <paulo.barreto@terra.com.br>
+//
+// This code is hereby placed in the public domain.
+//
+// THIS SOFTWARE IS PROVIDED BY THE AUTHORS ''AS IS'' AND ANY EXPRESS
+// OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+// BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+// OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+// EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// See FIPS 197 for specification, and see Daemen and Rijmen's Rijndael submission
+// for implementation details.
+// https://csrc.nist.gov/csrc/media/publications/fips/197/final/documents/fips-197.pdf
+// https://csrc.nist.gov/archive/aes/rijndael/Rijndael-ammended.pdf
+
+package aes
+
+import (
+ "encoding/binary"
+)
+
+// Encrypt one block from src into dst, using the expanded key xk.
+func encryptBlockGo(xk []uint32, dst, src []byte) {
+ _ = src[15] // early bounds check
+ s0 := binary.BigEndian.Uint32(src[0:4])
+ s1 := binary.BigEndian.Uint32(src[4:8])
+ s2 := binary.BigEndian.Uint32(src[8:12])
+ s3 := binary.BigEndian.Uint32(src[12:16])
+
+ // First round just XORs input with key.
+ s0 ^= xk[0]
+ s1 ^= xk[1]
+ s2 ^= xk[2]
+ s3 ^= xk[3]
+
+ // Middle rounds shuffle using tables.
+ // Number of rounds is set by length of expanded key.
+ nr := len(xk)/4 - 2 // - 2: one above, one more below
+ k := 4
+ var t0, t1, t2, t3 uint32
+ for r := 0; r < nr; r++ {
+ t0 = xk[k+0] ^ te0[uint8(s0>>24)] ^ te1[uint8(s1>>16)] ^ te2[uint8(s2>>8)] ^ te3[uint8(s3)]
+ t1 = xk[k+1] ^ te0[uint8(s1>>24)] ^ te1[uint8(s2>>16)] ^ te2[uint8(s3>>8)] ^ te3[uint8(s0)]
+ t2 = xk[k+2] ^ te0[uint8(s2>>24)] ^ te1[uint8(s3>>16)] ^ te2[uint8(s0>>8)] ^ te3[uint8(s1)]
+ t3 = xk[k+3] ^ te0[uint8(s3>>24)] ^ te1[uint8(s0>>16)] ^ te2[uint8(s1>>8)] ^ te3[uint8(s2)]
+ k += 4
+ s0, s1, s2, s3 = t0, t1, t2, t3
+ }
+
+ // Last round uses s-box directly and XORs to produce output.
+ s0 = uint32(sbox0[t0>>24])<<24 | uint32(sbox0[t1>>16&0xff])<<16 | uint32(sbox0[t2>>8&0xff])<<8 | uint32(sbox0[t3&0xff])
+ s1 = uint32(sbox0[t1>>24])<<24 | uint32(sbox0[t2>>16&0xff])<<16 | uint32(sbox0[t3>>8&0xff])<<8 | uint32(sbox0[t0&0xff])
+ s2 = uint32(sbox0[t2>>24])<<24 | uint32(sbox0[t3>>16&0xff])<<16 | uint32(sbox0[t0>>8&0xff])<<8 | uint32(sbox0[t1&0xff])
+ s3 = uint32(sbox0[t3>>24])<<24 | uint32(sbox0[t0>>16&0xff])<<16 | uint32(sbox0[t1>>8&0xff])<<8 | uint32(sbox0[t2&0xff])
+
+ s0 ^= xk[k+0]
+ s1 ^= xk[k+1]
+ s2 ^= xk[k+2]
+ s3 ^= xk[k+3]
+
+ _ = dst[15] // early bounds check
+ binary.BigEndian.PutUint32(dst[0:4], s0)
+ binary.BigEndian.PutUint32(dst[4:8], s1)
+ binary.BigEndian.PutUint32(dst[8:12], s2)
+ binary.BigEndian.PutUint32(dst[12:16], s3)
+}
+
+// Decrypt one block from src into dst, using the expanded key xk.
+func decryptBlockGo(xk []uint32, dst, src []byte) {
+ _ = src[15] // early bounds check
+ s0 := binary.BigEndian.Uint32(src[0:4])
+ s1 := binary.BigEndian.Uint32(src[4:8])
+ s2 := binary.BigEndian.Uint32(src[8:12])
+ s3 := binary.BigEndian.Uint32(src[12:16])
+
+ // First round just XORs input with key.
+ s0 ^= xk[0]
+ s1 ^= xk[1]
+ s2 ^= xk[2]
+ s3 ^= xk[3]
+
+ // Middle rounds shuffle using tables.
+ // Number of rounds is set by length of expanded key.
+ nr := len(xk)/4 - 2 // - 2: one above, one more below
+ k := 4
+ var t0, t1, t2, t3 uint32
+ for r := 0; r < nr; r++ {
+ t0 = xk[k+0] ^ td0[uint8(s0>>24)] ^ td1[uint8(s3>>16)] ^ td2[uint8(s2>>8)] ^ td3[uint8(s1)]
+ t1 = xk[k+1] ^ td0[uint8(s1>>24)] ^ td1[uint8(s0>>16)] ^ td2[uint8(s3>>8)] ^ td3[uint8(s2)]
+ t2 = xk[k+2] ^ td0[uint8(s2>>24)] ^ td1[uint8(s1>>16)] ^ td2[uint8(s0>>8)] ^ td3[uint8(s3)]
+ t3 = xk[k+3] ^ td0[uint8(s3>>24)] ^ td1[uint8(s2>>16)] ^ td2[uint8(s1>>8)] ^ td3[uint8(s0)]
+ k += 4
+ s0, s1, s2, s3 = t0, t1, t2, t3
+ }
+
+ // Last round uses s-box directly and XORs to produce output.
+ s0 = uint32(sbox1[t0>>24])<<24 | uint32(sbox1[t3>>16&0xff])<<16 | uint32(sbox1[t2>>8&0xff])<<8 | uint32(sbox1[t1&0xff])
+ s1 = uint32(sbox1[t1>>24])<<24 | uint32(sbox1[t0>>16&0xff])<<16 | uint32(sbox1[t3>>8&0xff])<<8 | uint32(sbox1[t2&0xff])
+ s2 = uint32(sbox1[t2>>24])<<24 | uint32(sbox1[t1>>16&0xff])<<16 | uint32(sbox1[t0>>8&0xff])<<8 | uint32(sbox1[t3&0xff])
+ s3 = uint32(sbox1[t3>>24])<<24 | uint32(sbox1[t2>>16&0xff])<<16 | uint32(sbox1[t1>>8&0xff])<<8 | uint32(sbox1[t0&0xff])
+
+ s0 ^= xk[k+0]
+ s1 ^= xk[k+1]
+ s2 ^= xk[k+2]
+ s3 ^= xk[k+3]
+
+ _ = dst[15] // early bounds check
+ binary.BigEndian.PutUint32(dst[0:4], s0)
+ binary.BigEndian.PutUint32(dst[4:8], s1)
+ binary.BigEndian.PutUint32(dst[8:12], s2)
+ binary.BigEndian.PutUint32(dst[12:16], s3)
+}
+
+// Apply sbox0 to each byte in w.
+func subw(w uint32) uint32 {
+ return uint32(sbox0[w>>24])<<24 |
+ uint32(sbox0[w>>16&0xff])<<16 |
+ uint32(sbox0[w>>8&0xff])<<8 |
+ uint32(sbox0[w&0xff])
+}
+
+// Rotate
+func rotw(w uint32) uint32 { return w<<8 | w>>24 }
+
+// Key expansion algorithm. See FIPS-197, Figure 11.
+// Their rcon[i] is our powx[i-1] << 24.
+func expandKeyGo(key []byte, enc, dec []uint32) {
+ // Encryption key setup.
+ var i int
+ nk := len(key) / 4
+ for i = 0; i < nk; i++ {
+ enc[i] = binary.BigEndian.Uint32(key[4*i:])
+ }
+ for ; i < len(enc); i++ {
+ t := enc[i-1]
+ if i%nk == 0 {
+ t = subw(rotw(t)) ^ (uint32(powx[i/nk-1]) << 24)
+ } else if nk > 6 && i%nk == 4 {
+ t = subw(t)
+ }
+ enc[i] = enc[i-nk] ^ t
+ }
+
+ // Derive decryption key from encryption key.
+ // Reverse the 4-word round key sets from enc to produce dec.
+ // All sets but the first and last get the MixColumn transform applied.
+ if dec == nil {
+ return
+ }
+ n := len(enc)
+ for i := 0; i < n; i += 4 {
+ ei := n - i - 4
+ for j := 0; j < 4; j++ {
+ x := enc[ei+j]
+ if i > 0 && i+4 < n {
+ x = td0[sbox0[x>>24]] ^ td1[sbox0[x>>16&0xff]] ^ td2[sbox0[x>>8&0xff]] ^ td3[sbox0[x&0xff]]
+ }
+ dec[i+j] = x
+ }
+ }
+}
diff --git a/src/crypto/aes/cbc_ppc64x.go b/src/crypto/aes/cbc_ppc64x.go
new file mode 100644
index 0000000..c23c371
--- /dev/null
+++ b/src/crypto/aes/cbc_ppc64x.go
@@ -0,0 +1,74 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build ppc64 || ppc64le
+
+package aes
+
+import (
+ "crypto/cipher"
+ "crypto/internal/alias"
+)
+
+// Assert that aesCipherAsm implements the cbcEncAble and cbcDecAble interfaces.
+var _ cbcEncAble = (*aesCipherAsm)(nil)
+var _ cbcDecAble = (*aesCipherAsm)(nil)
+
+const cbcEncrypt = 1
+const cbcDecrypt = 0
+
+type cbc struct {
+ b *aesCipherAsm
+ enc int
+ iv [BlockSize]byte
+}
+
+func (b *aesCipherAsm) NewCBCEncrypter(iv []byte) cipher.BlockMode {
+ var c cbc
+ c.b = b
+ c.enc = cbcEncrypt
+ copy(c.iv[:], iv)
+ return &c
+}
+
+func (b *aesCipherAsm) NewCBCDecrypter(iv []byte) cipher.BlockMode {
+ var c cbc
+ c.b = b
+ c.enc = cbcDecrypt
+ copy(c.iv[:], iv)
+ return &c
+}
+
+func (x *cbc) BlockSize() int { return BlockSize }
+
+// cryptBlocksChain invokes the cipher message identifying encrypt or decrypt.
+//
+//go:noescape
+func cryptBlocksChain(src, dst *byte, length int, key *uint32, iv *byte, enc int, nr int)
+
+func (x *cbc) CryptBlocks(dst, src []byte) {
+ if len(src)%BlockSize != 0 {
+ panic("crypto/cipher: input not full blocks")
+ }
+ if len(dst) < len(src) {
+ panic("crypto/cipher: output smaller than input")
+ }
+ if alias.InexactOverlap(dst[:len(src)], src) {
+ panic("crypto/cipher: invalid buffer overlap")
+ }
+ if len(src) > 0 {
+ if x.enc == cbcEncrypt {
+ cryptBlocksChain(&src[0], &dst[0], len(src), &x.b.enc[0], &x.iv[0], x.enc, len(x.b.enc)/4-1)
+ } else {
+ cryptBlocksChain(&src[0], &dst[0], len(src), &x.b.dec[0], &x.iv[0], x.enc, len(x.b.dec)/4-1)
+ }
+ }
+}
+
+func (x *cbc) SetIV(iv []byte) {
+ if len(iv) != BlockSize {
+ panic("cipher: incorrect length IV")
+ }
+ copy(x.iv[:], iv)
+}
diff --git a/src/crypto/aes/cbc_s390x.go b/src/crypto/aes/cbc_s390x.go
new file mode 100644
index 0000000..eaa21f8
--- /dev/null
+++ b/src/crypto/aes/cbc_s390x.go
@@ -0,0 +1,66 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package aes
+
+import (
+ "crypto/cipher"
+ "crypto/internal/alias"
+)
+
+// Assert that aesCipherAsm implements the cbcEncAble and cbcDecAble interfaces.
+var _ cbcEncAble = (*aesCipherAsm)(nil)
+var _ cbcDecAble = (*aesCipherAsm)(nil)
+
+type cbc struct {
+ b *aesCipherAsm
+ c code
+ iv [BlockSize]byte
+}
+
+func (b *aesCipherAsm) NewCBCEncrypter(iv []byte) cipher.BlockMode {
+ var c cbc
+ c.b = b
+ c.c = b.function
+ copy(c.iv[:], iv)
+ return &c
+}
+
+func (b *aesCipherAsm) NewCBCDecrypter(iv []byte) cipher.BlockMode {
+ var c cbc
+ c.b = b
+ c.c = b.function + 128 // decrypt function code is encrypt + 128
+ copy(c.iv[:], iv)
+ return &c
+}
+
+func (x *cbc) BlockSize() int { return BlockSize }
+
+// cryptBlocksChain invokes the cipher message with chaining (KMC) instruction
+// with the given function code. The length must be a multiple of BlockSize (16).
+//
+//go:noescape
+func cryptBlocksChain(c code, iv, key, dst, src *byte, length int)
+
+func (x *cbc) CryptBlocks(dst, src []byte) {
+ if len(src)%BlockSize != 0 {
+ panic("crypto/cipher: input not full blocks")
+ }
+ if len(dst) < len(src) {
+ panic("crypto/cipher: output smaller than input")
+ }
+ if alias.InexactOverlap(dst[:len(src)], src) {
+ panic("crypto/cipher: invalid buffer overlap")
+ }
+ if len(src) > 0 {
+ cryptBlocksChain(x.c, &x.iv[0], &x.b.key[0], &dst[0], &src[0], len(src))
+ }
+}
+
+func (x *cbc) SetIV(iv []byte) {
+ if len(iv) != BlockSize {
+ panic("cipher: incorrect length IV")
+ }
+ copy(x.iv[:], iv)
+}
diff --git a/src/crypto/aes/cipher.go b/src/crypto/aes/cipher.go
new file mode 100644
index 0000000..183c169
--- /dev/null
+++ b/src/crypto/aes/cipher.go
@@ -0,0 +1,82 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package aes
+
+import (
+ "crypto/cipher"
+ "crypto/internal/alias"
+ "crypto/internal/boring"
+ "strconv"
+)
+
+// The AES block size in bytes.
+const BlockSize = 16
+
+// A cipher is an instance of AES encryption using a particular key.
+type aesCipher struct {
+ enc []uint32
+ dec []uint32
+}
+
+type KeySizeError int
+
+func (k KeySizeError) Error() string {
+ return "crypto/aes: invalid key size " + strconv.Itoa(int(k))
+}
+
+// NewCipher creates and returns a new cipher.Block.
+// The key argument should be the AES key,
+// either 16, 24, or 32 bytes to select
+// AES-128, AES-192, or AES-256.
+func NewCipher(key []byte) (cipher.Block, error) {
+ k := len(key)
+ switch k {
+ default:
+ return nil, KeySizeError(k)
+ case 16, 24, 32:
+ break
+ }
+ if boring.Enabled {
+ return boring.NewAESCipher(key)
+ }
+ return newCipher(key)
+}
+
+// newCipherGeneric creates and returns a new cipher.Block
+// implemented in pure Go.
+func newCipherGeneric(key []byte) (cipher.Block, error) {
+ n := len(key) + 28
+ c := aesCipher{make([]uint32, n), make([]uint32, n)}
+ expandKeyGo(key, c.enc, c.dec)
+ return &c, nil
+}
+
+func (c *aesCipher) BlockSize() int { return BlockSize }
+
+func (c *aesCipher) Encrypt(dst, src []byte) {
+ if len(src) < BlockSize {
+ panic("crypto/aes: input not full block")
+ }
+ if len(dst) < BlockSize {
+ panic("crypto/aes: output not full block")
+ }
+ if alias.InexactOverlap(dst[:BlockSize], src[:BlockSize]) {
+ panic("crypto/aes: invalid buffer overlap")
+ }
+ encryptBlockGo(c.enc, dst, src)
+}
+
+func (c *aesCipher) Decrypt(dst, src []byte) {
+ if len(src) < BlockSize {
+ panic("crypto/aes: input not full block")
+ }
+ if len(dst) < BlockSize {
+ panic("crypto/aes: output not full block")
+ }
+ if alias.InexactOverlap(dst[:BlockSize], src[:BlockSize]) {
+ panic("crypto/aes: invalid buffer overlap")
+ }
+ decryptBlockGo(c.dec, dst, src)
+}
diff --git a/src/crypto/aes/cipher_asm.go b/src/crypto/aes/cipher_asm.go
new file mode 100644
index 0000000..90031c5
--- /dev/null
+++ b/src/crypto/aes/cipher_asm.go
@@ -0,0 +1,113 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build amd64 || arm64 || ppc64 || ppc64le
+
+package aes
+
+import (
+ "crypto/cipher"
+ "crypto/internal/alias"
+ "crypto/internal/boring"
+ "internal/cpu"
+ "internal/goarch"
+)
+
+// defined in asm_*.s
+
+//go:noescape
+func encryptBlockAsm(nr int, xk *uint32, dst, src *byte)
+
+//go:noescape
+func decryptBlockAsm(nr int, xk *uint32, dst, src *byte)
+
+//go:noescape
+func expandKeyAsm(nr int, key *byte, enc *uint32, dec *uint32)
+
+type aesCipherAsm struct {
+ aesCipher
+}
+
+// aesCipherGCM implements crypto/cipher.gcmAble so that crypto/cipher.NewGCM
+// will use the optimised implementation in aes_gcm.go when possible.
+// Instances of this type only exist when hasGCMAsm returns true. Likewise,
+// the gcmAble implementation is in aes_gcm.go.
+type aesCipherGCM struct {
+ aesCipherAsm
+}
+
+var supportsAES = cpu.X86.HasAES || cpu.ARM64.HasAES || goarch.IsPpc64 == 1 || goarch.IsPpc64le == 1
+var supportsGFMUL = cpu.X86.HasPCLMULQDQ || cpu.ARM64.HasPMULL
+
+func newCipher(key []byte) (cipher.Block, error) {
+ if !supportsAES {
+ return newCipherGeneric(key)
+ }
+ n := len(key) + 28
+ c := aesCipherAsm{aesCipher{make([]uint32, n), make([]uint32, n)}}
+ var rounds int
+ switch len(key) {
+ case 128 / 8:
+ rounds = 10
+ case 192 / 8:
+ rounds = 12
+ case 256 / 8:
+ rounds = 14
+ default:
+ return nil, KeySizeError(len(key))
+ }
+
+ expandKeyAsm(rounds, &key[0], &c.enc[0], &c.dec[0])
+ if supportsAES && supportsGFMUL {
+ return &aesCipherGCM{c}, nil
+ }
+ return &c, nil
+}
+
+func (c *aesCipherAsm) BlockSize() int { return BlockSize }
+
+func (c *aesCipherAsm) Encrypt(dst, src []byte) {
+ boring.Unreachable()
+ if len(src) < BlockSize {
+ panic("crypto/aes: input not full block")
+ }
+ if len(dst) < BlockSize {
+ panic("crypto/aes: output not full block")
+ }
+ if alias.InexactOverlap(dst[:BlockSize], src[:BlockSize]) {
+ panic("crypto/aes: invalid buffer overlap")
+ }
+ encryptBlockAsm(len(c.enc)/4-1, &c.enc[0], &dst[0], &src[0])
+}
+
+func (c *aesCipherAsm) Decrypt(dst, src []byte) {
+ boring.Unreachable()
+ if len(src) < BlockSize {
+ panic("crypto/aes: input not full block")
+ }
+ if len(dst) < BlockSize {
+ panic("crypto/aes: output not full block")
+ }
+ if alias.InexactOverlap(dst[:BlockSize], src[:BlockSize]) {
+ panic("crypto/aes: invalid buffer overlap")
+ }
+ decryptBlockAsm(len(c.dec)/4-1, &c.dec[0], &dst[0], &src[0])
+}
+
+// expandKey is used by BenchmarkExpand to ensure that the asm implementation
+// of key expansion is used for the benchmark when it is available.
+func expandKey(key []byte, enc, dec []uint32) {
+ if supportsAES {
+ rounds := 10 // rounds needed for AES128
+ switch len(key) {
+ case 192 / 8:
+ rounds = 12
+ case 256 / 8:
+ rounds = 14
+ }
+ expandKeyAsm(rounds, &key[0], &enc[0], &dec[0])
+ } else {
+ expandKeyGo(key, enc, dec)
+ }
+}
diff --git a/src/crypto/aes/cipher_generic.go b/src/crypto/aes/cipher_generic.go
new file mode 100644
index 0000000..8a8a3ff
--- /dev/null
+++ b/src/crypto/aes/cipher_generic.go
@@ -0,0 +1,26 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !amd64 && !s390x && !ppc64 && !ppc64le && !arm64
+
+package aes
+
+import (
+ "crypto/cipher"
+)
+
+// newCipher calls the newCipherGeneric function
+// directly. Platforms with hardware accelerated
+// implementations of AES should implement their
+// own version of newCipher (which may then call
+// newCipherGeneric if needed).
+func newCipher(key []byte) (cipher.Block, error) {
+ return newCipherGeneric(key)
+}
+
+// expandKey is used by BenchmarkExpand and should
+// call an assembly implementation if one is available.
+func expandKey(key []byte, enc, dec []uint32) {
+ expandKeyGo(key, enc, dec)
+}
diff --git a/src/crypto/aes/cipher_s390x.go b/src/crypto/aes/cipher_s390x.go
new file mode 100644
index 0000000..8dd3d8f
--- /dev/null
+++ b/src/crypto/aes/cipher_s390x.go
@@ -0,0 +1,96 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package aes
+
+import (
+ "crypto/cipher"
+ "crypto/internal/alias"
+ "internal/cpu"
+)
+
+type code int
+
+// Function codes for the cipher message family of instructions.
+const (
+ aes128 code = 18
+ aes192 = 19
+ aes256 = 20
+)
+
+type aesCipherAsm struct {
+ function code // code for cipher message instruction
+ key []byte // key (128, 192 or 256 bits)
+ storage [32]byte // array backing key slice
+}
+
+// cryptBlocks invokes the cipher message (KM) instruction with
+// the given function code. This is equivalent to AES in ECB
+// mode. The length must be a multiple of BlockSize (16).
+//
+//go:noescape
+func cryptBlocks(c code, key, dst, src *byte, length int)
+
+func newCipher(key []byte) (cipher.Block, error) {
+ // The aesCipherAsm type implements the cbcEncAble, cbcDecAble,
+ // ctrAble and gcmAble interfaces. We therefore need to check
+ // for all the features required to implement these modes.
+ // Keep in sync with crypto/tls/common.go.
+ if !(cpu.S390X.HasAES && cpu.S390X.HasAESCBC && cpu.S390X.HasAESCTR && (cpu.S390X.HasGHASH || cpu.S390X.HasAESGCM)) {
+ return newCipherGeneric(key)
+ }
+
+ var function code
+ switch len(key) {
+ case 128 / 8:
+ function = aes128
+ case 192 / 8:
+ function = aes192
+ case 256 / 8:
+ function = aes256
+ default:
+ return nil, KeySizeError(len(key))
+ }
+
+ var c aesCipherAsm
+ c.function = function
+ c.key = c.storage[:len(key)]
+ copy(c.key, key)
+ return &c, nil
+}
+
+func (c *aesCipherAsm) BlockSize() int { return BlockSize }
+
+func (c *aesCipherAsm) Encrypt(dst, src []byte) {
+ if len(src) < BlockSize {
+ panic("crypto/aes: input not full block")
+ }
+ if len(dst) < BlockSize {
+ panic("crypto/aes: output not full block")
+ }
+ if alias.InexactOverlap(dst[:BlockSize], src[:BlockSize]) {
+ panic("crypto/aes: invalid buffer overlap")
+ }
+ cryptBlocks(c.function, &c.key[0], &dst[0], &src[0], BlockSize)
+}
+
+func (c *aesCipherAsm) Decrypt(dst, src []byte) {
+ if len(src) < BlockSize {
+ panic("crypto/aes: input not full block")
+ }
+ if len(dst) < BlockSize {
+ panic("crypto/aes: output not full block")
+ }
+ if alias.InexactOverlap(dst[:BlockSize], src[:BlockSize]) {
+ panic("crypto/aes: invalid buffer overlap")
+ }
+ // The decrypt function code is equal to the function code + 128.
+ cryptBlocks(c.function+128, &c.key[0], &dst[0], &src[0], BlockSize)
+}
+
+// expandKey is used by BenchmarkExpand. cipher message (KM) does not need key
+// expansion so there is no assembly equivalent.
+func expandKey(key []byte, enc, dec []uint32) {
+ expandKeyGo(key, enc, dec)
+}
diff --git a/src/crypto/aes/const.go b/src/crypto/aes/const.go
new file mode 100644
index 0000000..4eca4b9
--- /dev/null
+++ b/src/crypto/aes/const.go
@@ -0,0 +1,365 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package aes implements AES encryption (formerly Rijndael), as defined in
+// U.S. Federal Information Processing Standards Publication 197.
+//
+// The AES operations in this package are not implemented using constant-time algorithms.
+// An exception is when running on systems with enabled hardware support for AES
+// that makes these operations constant-time. Examples include amd64 systems using AES-NI
+// extensions and s390x systems using Message-Security-Assist extensions.
+// On such systems, when the result of NewCipher is passed to cipher.NewGCM,
+// the GHASH operation used by GCM is also constant-time.
+package aes
+
+// This file contains AES constants - 8720 bytes of initialized data.
+
+// https://csrc.nist.gov/publications/fips/fips197/fips-197.pdf
+
+// AES is based on the mathematical behavior of binary polynomials
+// (polynomials over GF(2)) modulo the irreducible polynomial x⁸ + x⁴ + x³ + x + 1.
+// Addition of these binary polynomials corresponds to binary xor.
+// Reducing mod poly corresponds to binary xor with poly every
+// time a 0x100 bit appears.
+const poly = 1<<8 | 1<<4 | 1<<3 | 1<<1 | 1<<0 // x⁸ + x⁴ + x³ + x + 1
+
+// Powers of x mod poly in GF(2).
+var powx = [16]byte{
+ 0x01,
+ 0x02,
+ 0x04,
+ 0x08,
+ 0x10,
+ 0x20,
+ 0x40,
+ 0x80,
+ 0x1b,
+ 0x36,
+ 0x6c,
+ 0xd8,
+ 0xab,
+ 0x4d,
+ 0x9a,
+ 0x2f,
+}
+
+// FIPS-197 Figure 7. S-box substitution values in hexadecimal format.
+var sbox0 = [256]byte{
+ 0x63, 0x7c, 0x77, 0x7b, 0xf2, 0x6b, 0x6f, 0xc5, 0x30, 0x01, 0x67, 0x2b, 0xfe, 0xd7, 0xab, 0x76,
+ 0xca, 0x82, 0xc9, 0x7d, 0xfa, 0x59, 0x47, 0xf0, 0xad, 0xd4, 0xa2, 0xaf, 0x9c, 0xa4, 0x72, 0xc0,
+ 0xb7, 0xfd, 0x93, 0x26, 0x36, 0x3f, 0xf7, 0xcc, 0x34, 0xa5, 0xe5, 0xf1, 0x71, 0xd8, 0x31, 0x15,
+ 0x04, 0xc7, 0x23, 0xc3, 0x18, 0x96, 0x05, 0x9a, 0x07, 0x12, 0x80, 0xe2, 0xeb, 0x27, 0xb2, 0x75,
+ 0x09, 0x83, 0x2c, 0x1a, 0x1b, 0x6e, 0x5a, 0xa0, 0x52, 0x3b, 0xd6, 0xb3, 0x29, 0xe3, 0x2f, 0x84,
+ 0x53, 0xd1, 0x00, 0xed, 0x20, 0xfc, 0xb1, 0x5b, 0x6a, 0xcb, 0xbe, 0x39, 0x4a, 0x4c, 0x58, 0xcf,
+ 0xd0, 0xef, 0xaa, 0xfb, 0x43, 0x4d, 0x33, 0x85, 0x45, 0xf9, 0x02, 0x7f, 0x50, 0x3c, 0x9f, 0xa8,
+ 0x51, 0xa3, 0x40, 0x8f, 0x92, 0x9d, 0x38, 0xf5, 0xbc, 0xb6, 0xda, 0x21, 0x10, 0xff, 0xf3, 0xd2,
+ 0xcd, 0x0c, 0x13, 0xec, 0x5f, 0x97, 0x44, 0x17, 0xc4, 0xa7, 0x7e, 0x3d, 0x64, 0x5d, 0x19, 0x73,
+ 0x60, 0x81, 0x4f, 0xdc, 0x22, 0x2a, 0x90, 0x88, 0x46, 0xee, 0xb8, 0x14, 0xde, 0x5e, 0x0b, 0xdb,
+ 0xe0, 0x32, 0x3a, 0x0a, 0x49, 0x06, 0x24, 0x5c, 0xc2, 0xd3, 0xac, 0x62, 0x91, 0x95, 0xe4, 0x79,
+ 0xe7, 0xc8, 0x37, 0x6d, 0x8d, 0xd5, 0x4e, 0xa9, 0x6c, 0x56, 0xf4, 0xea, 0x65, 0x7a, 0xae, 0x08,
+ 0xba, 0x78, 0x25, 0x2e, 0x1c, 0xa6, 0xb4, 0xc6, 0xe8, 0xdd, 0x74, 0x1f, 0x4b, 0xbd, 0x8b, 0x8a,
+ 0x70, 0x3e, 0xb5, 0x66, 0x48, 0x03, 0xf6, 0x0e, 0x61, 0x35, 0x57, 0xb9, 0x86, 0xc1, 0x1d, 0x9e,
+ 0xe1, 0xf8, 0x98, 0x11, 0x69, 0xd9, 0x8e, 0x94, 0x9b, 0x1e, 0x87, 0xe9, 0xce, 0x55, 0x28, 0xdf,
+ 0x8c, 0xa1, 0x89, 0x0d, 0xbf, 0xe6, 0x42, 0x68, 0x41, 0x99, 0x2d, 0x0f, 0xb0, 0x54, 0xbb, 0x16,
+}
+
+// FIPS-197 Figure 14. Inverse S-box substitution values in hexadecimal format.
+var sbox1 = [256]byte{
+ 0x52, 0x09, 0x6a, 0xd5, 0x30, 0x36, 0xa5, 0x38, 0xbf, 0x40, 0xa3, 0x9e, 0x81, 0xf3, 0xd7, 0xfb,
+ 0x7c, 0xe3, 0x39, 0x82, 0x9b, 0x2f, 0xff, 0x87, 0x34, 0x8e, 0x43, 0x44, 0xc4, 0xde, 0xe9, 0xcb,
+ 0x54, 0x7b, 0x94, 0x32, 0xa6, 0xc2, 0x23, 0x3d, 0xee, 0x4c, 0x95, 0x0b, 0x42, 0xfa, 0xc3, 0x4e,
+ 0x08, 0x2e, 0xa1, 0x66, 0x28, 0xd9, 0x24, 0xb2, 0x76, 0x5b, 0xa2, 0x49, 0x6d, 0x8b, 0xd1, 0x25,
+ 0x72, 0xf8, 0xf6, 0x64, 0x86, 0x68, 0x98, 0x16, 0xd4, 0xa4, 0x5c, 0xcc, 0x5d, 0x65, 0xb6, 0x92,
+ 0x6c, 0x70, 0x48, 0x50, 0xfd, 0xed, 0xb9, 0xda, 0x5e, 0x15, 0x46, 0x57, 0xa7, 0x8d, 0x9d, 0x84,
+ 0x90, 0xd8, 0xab, 0x00, 0x8c, 0xbc, 0xd3, 0x0a, 0xf7, 0xe4, 0x58, 0x05, 0xb8, 0xb3, 0x45, 0x06,
+ 0xd0, 0x2c, 0x1e, 0x8f, 0xca, 0x3f, 0x0f, 0x02, 0xc1, 0xaf, 0xbd, 0x03, 0x01, 0x13, 0x8a, 0x6b,
+ 0x3a, 0x91, 0x11, 0x41, 0x4f, 0x67, 0xdc, 0xea, 0x97, 0xf2, 0xcf, 0xce, 0xf0, 0xb4, 0xe6, 0x73,
+ 0x96, 0xac, 0x74, 0x22, 0xe7, 0xad, 0x35, 0x85, 0xe2, 0xf9, 0x37, 0xe8, 0x1c, 0x75, 0xdf, 0x6e,
+ 0x47, 0xf1, 0x1a, 0x71, 0x1d, 0x29, 0xc5, 0x89, 0x6f, 0xb7, 0x62, 0x0e, 0xaa, 0x18, 0xbe, 0x1b,
+ 0xfc, 0x56, 0x3e, 0x4b, 0xc6, 0xd2, 0x79, 0x20, 0x9a, 0xdb, 0xc0, 0xfe, 0x78, 0xcd, 0x5a, 0xf4,
+ 0x1f, 0xdd, 0xa8, 0x33, 0x88, 0x07, 0xc7, 0x31, 0xb1, 0x12, 0x10, 0x59, 0x27, 0x80, 0xec, 0x5f,
+ 0x60, 0x51, 0x7f, 0xa9, 0x19, 0xb5, 0x4a, 0x0d, 0x2d, 0xe5, 0x7a, 0x9f, 0x93, 0xc9, 0x9c, 0xef,
+ 0xa0, 0xe0, 0x3b, 0x4d, 0xae, 0x2a, 0xf5, 0xb0, 0xc8, 0xeb, 0xbb, 0x3c, 0x83, 0x53, 0x99, 0x61,
+ 0x17, 0x2b, 0x04, 0x7e, 0xba, 0x77, 0xd6, 0x26, 0xe1, 0x69, 0x14, 0x63, 0x55, 0x21, 0x0c, 0x7d,
+}
+
+// Lookup tables for encryption.
+// These can be recomputed by adapting the tests in aes_test.go.
+
+var te0 = [256]uint32{
+ 0xc66363a5, 0xf87c7c84, 0xee777799, 0xf67b7b8d, 0xfff2f20d, 0xd66b6bbd, 0xde6f6fb1, 0x91c5c554,
+ 0x60303050, 0x02010103, 0xce6767a9, 0x562b2b7d, 0xe7fefe19, 0xb5d7d762, 0x4dababe6, 0xec76769a,
+ 0x8fcaca45, 0x1f82829d, 0x89c9c940, 0xfa7d7d87, 0xeffafa15, 0xb25959eb, 0x8e4747c9, 0xfbf0f00b,
+ 0x41adadec, 0xb3d4d467, 0x5fa2a2fd, 0x45afafea, 0x239c9cbf, 0x53a4a4f7, 0xe4727296, 0x9bc0c05b,
+ 0x75b7b7c2, 0xe1fdfd1c, 0x3d9393ae, 0x4c26266a, 0x6c36365a, 0x7e3f3f41, 0xf5f7f702, 0x83cccc4f,
+ 0x6834345c, 0x51a5a5f4, 0xd1e5e534, 0xf9f1f108, 0xe2717193, 0xabd8d873, 0x62313153, 0x2a15153f,
+ 0x0804040c, 0x95c7c752, 0x46232365, 0x9dc3c35e, 0x30181828, 0x379696a1, 0x0a05050f, 0x2f9a9ab5,
+ 0x0e070709, 0x24121236, 0x1b80809b, 0xdfe2e23d, 0xcdebeb26, 0x4e272769, 0x7fb2b2cd, 0xea75759f,
+ 0x1209091b, 0x1d83839e, 0x582c2c74, 0x341a1a2e, 0x361b1b2d, 0xdc6e6eb2, 0xb45a5aee, 0x5ba0a0fb,
+ 0xa45252f6, 0x763b3b4d, 0xb7d6d661, 0x7db3b3ce, 0x5229297b, 0xdde3e33e, 0x5e2f2f71, 0x13848497,
+ 0xa65353f5, 0xb9d1d168, 0x00000000, 0xc1eded2c, 0x40202060, 0xe3fcfc1f, 0x79b1b1c8, 0xb65b5bed,
+ 0xd46a6abe, 0x8dcbcb46, 0x67bebed9, 0x7239394b, 0x944a4ade, 0x984c4cd4, 0xb05858e8, 0x85cfcf4a,
+ 0xbbd0d06b, 0xc5efef2a, 0x4faaaae5, 0xedfbfb16, 0x864343c5, 0x9a4d4dd7, 0x66333355, 0x11858594,
+ 0x8a4545cf, 0xe9f9f910, 0x04020206, 0xfe7f7f81, 0xa05050f0, 0x783c3c44, 0x259f9fba, 0x4ba8a8e3,
+ 0xa25151f3, 0x5da3a3fe, 0x804040c0, 0x058f8f8a, 0x3f9292ad, 0x219d9dbc, 0x70383848, 0xf1f5f504,
+ 0x63bcbcdf, 0x77b6b6c1, 0xafdada75, 0x42212163, 0x20101030, 0xe5ffff1a, 0xfdf3f30e, 0xbfd2d26d,
+ 0x81cdcd4c, 0x180c0c14, 0x26131335, 0xc3ecec2f, 0xbe5f5fe1, 0x359797a2, 0x884444cc, 0x2e171739,
+ 0x93c4c457, 0x55a7a7f2, 0xfc7e7e82, 0x7a3d3d47, 0xc86464ac, 0xba5d5de7, 0x3219192b, 0xe6737395,
+ 0xc06060a0, 0x19818198, 0x9e4f4fd1, 0xa3dcdc7f, 0x44222266, 0x542a2a7e, 0x3b9090ab, 0x0b888883,
+ 0x8c4646ca, 0xc7eeee29, 0x6bb8b8d3, 0x2814143c, 0xa7dede79, 0xbc5e5ee2, 0x160b0b1d, 0xaddbdb76,
+ 0xdbe0e03b, 0x64323256, 0x743a3a4e, 0x140a0a1e, 0x924949db, 0x0c06060a, 0x4824246c, 0xb85c5ce4,
+ 0x9fc2c25d, 0xbdd3d36e, 0x43acacef, 0xc46262a6, 0x399191a8, 0x319595a4, 0xd3e4e437, 0xf279798b,
+ 0xd5e7e732, 0x8bc8c843, 0x6e373759, 0xda6d6db7, 0x018d8d8c, 0xb1d5d564, 0x9c4e4ed2, 0x49a9a9e0,
+ 0xd86c6cb4, 0xac5656fa, 0xf3f4f407, 0xcfeaea25, 0xca6565af, 0xf47a7a8e, 0x47aeaee9, 0x10080818,
+ 0x6fbabad5, 0xf0787888, 0x4a25256f, 0x5c2e2e72, 0x381c1c24, 0x57a6a6f1, 0x73b4b4c7, 0x97c6c651,
+ 0xcbe8e823, 0xa1dddd7c, 0xe874749c, 0x3e1f1f21, 0x964b4bdd, 0x61bdbddc, 0x0d8b8b86, 0x0f8a8a85,
+ 0xe0707090, 0x7c3e3e42, 0x71b5b5c4, 0xcc6666aa, 0x904848d8, 0x06030305, 0xf7f6f601, 0x1c0e0e12,
+ 0xc26161a3, 0x6a35355f, 0xae5757f9, 0x69b9b9d0, 0x17868691, 0x99c1c158, 0x3a1d1d27, 0x279e9eb9,
+ 0xd9e1e138, 0xebf8f813, 0x2b9898b3, 0x22111133, 0xd26969bb, 0xa9d9d970, 0x078e8e89, 0x339494a7,
+ 0x2d9b9bb6, 0x3c1e1e22, 0x15878792, 0xc9e9e920, 0x87cece49, 0xaa5555ff, 0x50282878, 0xa5dfdf7a,
+ 0x038c8c8f, 0x59a1a1f8, 0x09898980, 0x1a0d0d17, 0x65bfbfda, 0xd7e6e631, 0x844242c6, 0xd06868b8,
+ 0x824141c3, 0x299999b0, 0x5a2d2d77, 0x1e0f0f11, 0x7bb0b0cb, 0xa85454fc, 0x6dbbbbd6, 0x2c16163a,
+}
+var te1 = [256]uint32{
+ 0xa5c66363, 0x84f87c7c, 0x99ee7777, 0x8df67b7b, 0x0dfff2f2, 0xbdd66b6b, 0xb1de6f6f, 0x5491c5c5,
+ 0x50603030, 0x03020101, 0xa9ce6767, 0x7d562b2b, 0x19e7fefe, 0x62b5d7d7, 0xe64dabab, 0x9aec7676,
+ 0x458fcaca, 0x9d1f8282, 0x4089c9c9, 0x87fa7d7d, 0x15effafa, 0xebb25959, 0xc98e4747, 0x0bfbf0f0,
+ 0xec41adad, 0x67b3d4d4, 0xfd5fa2a2, 0xea45afaf, 0xbf239c9c, 0xf753a4a4, 0x96e47272, 0x5b9bc0c0,
+ 0xc275b7b7, 0x1ce1fdfd, 0xae3d9393, 0x6a4c2626, 0x5a6c3636, 0x417e3f3f, 0x02f5f7f7, 0x4f83cccc,
+ 0x5c683434, 0xf451a5a5, 0x34d1e5e5, 0x08f9f1f1, 0x93e27171, 0x73abd8d8, 0x53623131, 0x3f2a1515,
+ 0x0c080404, 0x5295c7c7, 0x65462323, 0x5e9dc3c3, 0x28301818, 0xa1379696, 0x0f0a0505, 0xb52f9a9a,
+ 0x090e0707, 0x36241212, 0x9b1b8080, 0x3ddfe2e2, 0x26cdebeb, 0x694e2727, 0xcd7fb2b2, 0x9fea7575,
+ 0x1b120909, 0x9e1d8383, 0x74582c2c, 0x2e341a1a, 0x2d361b1b, 0xb2dc6e6e, 0xeeb45a5a, 0xfb5ba0a0,
+ 0xf6a45252, 0x4d763b3b, 0x61b7d6d6, 0xce7db3b3, 0x7b522929, 0x3edde3e3, 0x715e2f2f, 0x97138484,
+ 0xf5a65353, 0x68b9d1d1, 0x00000000, 0x2cc1eded, 0x60402020, 0x1fe3fcfc, 0xc879b1b1, 0xedb65b5b,
+ 0xbed46a6a, 0x468dcbcb, 0xd967bebe, 0x4b723939, 0xde944a4a, 0xd4984c4c, 0xe8b05858, 0x4a85cfcf,
+ 0x6bbbd0d0, 0x2ac5efef, 0xe54faaaa, 0x16edfbfb, 0xc5864343, 0xd79a4d4d, 0x55663333, 0x94118585,
+ 0xcf8a4545, 0x10e9f9f9, 0x06040202, 0x81fe7f7f, 0xf0a05050, 0x44783c3c, 0xba259f9f, 0xe34ba8a8,
+ 0xf3a25151, 0xfe5da3a3, 0xc0804040, 0x8a058f8f, 0xad3f9292, 0xbc219d9d, 0x48703838, 0x04f1f5f5,
+ 0xdf63bcbc, 0xc177b6b6, 0x75afdada, 0x63422121, 0x30201010, 0x1ae5ffff, 0x0efdf3f3, 0x6dbfd2d2,
+ 0x4c81cdcd, 0x14180c0c, 0x35261313, 0x2fc3ecec, 0xe1be5f5f, 0xa2359797, 0xcc884444, 0x392e1717,
+ 0x5793c4c4, 0xf255a7a7, 0x82fc7e7e, 0x477a3d3d, 0xacc86464, 0xe7ba5d5d, 0x2b321919, 0x95e67373,
+ 0xa0c06060, 0x98198181, 0xd19e4f4f, 0x7fa3dcdc, 0x66442222, 0x7e542a2a, 0xab3b9090, 0x830b8888,
+ 0xca8c4646, 0x29c7eeee, 0xd36bb8b8, 0x3c281414, 0x79a7dede, 0xe2bc5e5e, 0x1d160b0b, 0x76addbdb,
+ 0x3bdbe0e0, 0x56643232, 0x4e743a3a, 0x1e140a0a, 0xdb924949, 0x0a0c0606, 0x6c482424, 0xe4b85c5c,
+ 0x5d9fc2c2, 0x6ebdd3d3, 0xef43acac, 0xa6c46262, 0xa8399191, 0xa4319595, 0x37d3e4e4, 0x8bf27979,
+ 0x32d5e7e7, 0x438bc8c8, 0x596e3737, 0xb7da6d6d, 0x8c018d8d, 0x64b1d5d5, 0xd29c4e4e, 0xe049a9a9,
+ 0xb4d86c6c, 0xfaac5656, 0x07f3f4f4, 0x25cfeaea, 0xafca6565, 0x8ef47a7a, 0xe947aeae, 0x18100808,
+ 0xd56fbaba, 0x88f07878, 0x6f4a2525, 0x725c2e2e, 0x24381c1c, 0xf157a6a6, 0xc773b4b4, 0x5197c6c6,
+ 0x23cbe8e8, 0x7ca1dddd, 0x9ce87474, 0x213e1f1f, 0xdd964b4b, 0xdc61bdbd, 0x860d8b8b, 0x850f8a8a,
+ 0x90e07070, 0x427c3e3e, 0xc471b5b5, 0xaacc6666, 0xd8904848, 0x05060303, 0x01f7f6f6, 0x121c0e0e,
+ 0xa3c26161, 0x5f6a3535, 0xf9ae5757, 0xd069b9b9, 0x91178686, 0x5899c1c1, 0x273a1d1d, 0xb9279e9e,
+ 0x38d9e1e1, 0x13ebf8f8, 0xb32b9898, 0x33221111, 0xbbd26969, 0x70a9d9d9, 0x89078e8e, 0xa7339494,
+ 0xb62d9b9b, 0x223c1e1e, 0x92158787, 0x20c9e9e9, 0x4987cece, 0xffaa5555, 0x78502828, 0x7aa5dfdf,
+ 0x8f038c8c, 0xf859a1a1, 0x80098989, 0x171a0d0d, 0xda65bfbf, 0x31d7e6e6, 0xc6844242, 0xb8d06868,
+ 0xc3824141, 0xb0299999, 0x775a2d2d, 0x111e0f0f, 0xcb7bb0b0, 0xfca85454, 0xd66dbbbb, 0x3a2c1616,
+}
+var te2 = [256]uint32{
+ 0x63a5c663, 0x7c84f87c, 0x7799ee77, 0x7b8df67b, 0xf20dfff2, 0x6bbdd66b, 0x6fb1de6f, 0xc55491c5,
+ 0x30506030, 0x01030201, 0x67a9ce67, 0x2b7d562b, 0xfe19e7fe, 0xd762b5d7, 0xabe64dab, 0x769aec76,
+ 0xca458fca, 0x829d1f82, 0xc94089c9, 0x7d87fa7d, 0xfa15effa, 0x59ebb259, 0x47c98e47, 0xf00bfbf0,
+ 0xadec41ad, 0xd467b3d4, 0xa2fd5fa2, 0xafea45af, 0x9cbf239c, 0xa4f753a4, 0x7296e472, 0xc05b9bc0,
+ 0xb7c275b7, 0xfd1ce1fd, 0x93ae3d93, 0x266a4c26, 0x365a6c36, 0x3f417e3f, 0xf702f5f7, 0xcc4f83cc,
+ 0x345c6834, 0xa5f451a5, 0xe534d1e5, 0xf108f9f1, 0x7193e271, 0xd873abd8, 0x31536231, 0x153f2a15,
+ 0x040c0804, 0xc75295c7, 0x23654623, 0xc35e9dc3, 0x18283018, 0x96a13796, 0x050f0a05, 0x9ab52f9a,
+ 0x07090e07, 0x12362412, 0x809b1b80, 0xe23ddfe2, 0xeb26cdeb, 0x27694e27, 0xb2cd7fb2, 0x759fea75,
+ 0x091b1209, 0x839e1d83, 0x2c74582c, 0x1a2e341a, 0x1b2d361b, 0x6eb2dc6e, 0x5aeeb45a, 0xa0fb5ba0,
+ 0x52f6a452, 0x3b4d763b, 0xd661b7d6, 0xb3ce7db3, 0x297b5229, 0xe33edde3, 0x2f715e2f, 0x84971384,
+ 0x53f5a653, 0xd168b9d1, 0x00000000, 0xed2cc1ed, 0x20604020, 0xfc1fe3fc, 0xb1c879b1, 0x5bedb65b,
+ 0x6abed46a, 0xcb468dcb, 0xbed967be, 0x394b7239, 0x4ade944a, 0x4cd4984c, 0x58e8b058, 0xcf4a85cf,
+ 0xd06bbbd0, 0xef2ac5ef, 0xaae54faa, 0xfb16edfb, 0x43c58643, 0x4dd79a4d, 0x33556633, 0x85941185,
+ 0x45cf8a45, 0xf910e9f9, 0x02060402, 0x7f81fe7f, 0x50f0a050, 0x3c44783c, 0x9fba259f, 0xa8e34ba8,
+ 0x51f3a251, 0xa3fe5da3, 0x40c08040, 0x8f8a058f, 0x92ad3f92, 0x9dbc219d, 0x38487038, 0xf504f1f5,
+ 0xbcdf63bc, 0xb6c177b6, 0xda75afda, 0x21634221, 0x10302010, 0xff1ae5ff, 0xf30efdf3, 0xd26dbfd2,
+ 0xcd4c81cd, 0x0c14180c, 0x13352613, 0xec2fc3ec, 0x5fe1be5f, 0x97a23597, 0x44cc8844, 0x17392e17,
+ 0xc45793c4, 0xa7f255a7, 0x7e82fc7e, 0x3d477a3d, 0x64acc864, 0x5de7ba5d, 0x192b3219, 0x7395e673,
+ 0x60a0c060, 0x81981981, 0x4fd19e4f, 0xdc7fa3dc, 0x22664422, 0x2a7e542a, 0x90ab3b90, 0x88830b88,
+ 0x46ca8c46, 0xee29c7ee, 0xb8d36bb8, 0x143c2814, 0xde79a7de, 0x5ee2bc5e, 0x0b1d160b, 0xdb76addb,
+ 0xe03bdbe0, 0x32566432, 0x3a4e743a, 0x0a1e140a, 0x49db9249, 0x060a0c06, 0x246c4824, 0x5ce4b85c,
+ 0xc25d9fc2, 0xd36ebdd3, 0xacef43ac, 0x62a6c462, 0x91a83991, 0x95a43195, 0xe437d3e4, 0x798bf279,
+ 0xe732d5e7, 0xc8438bc8, 0x37596e37, 0x6db7da6d, 0x8d8c018d, 0xd564b1d5, 0x4ed29c4e, 0xa9e049a9,
+ 0x6cb4d86c, 0x56faac56, 0xf407f3f4, 0xea25cfea, 0x65afca65, 0x7a8ef47a, 0xaee947ae, 0x08181008,
+ 0xbad56fba, 0x7888f078, 0x256f4a25, 0x2e725c2e, 0x1c24381c, 0xa6f157a6, 0xb4c773b4, 0xc65197c6,
+ 0xe823cbe8, 0xdd7ca1dd, 0x749ce874, 0x1f213e1f, 0x4bdd964b, 0xbddc61bd, 0x8b860d8b, 0x8a850f8a,
+ 0x7090e070, 0x3e427c3e, 0xb5c471b5, 0x66aacc66, 0x48d89048, 0x03050603, 0xf601f7f6, 0x0e121c0e,
+ 0x61a3c261, 0x355f6a35, 0x57f9ae57, 0xb9d069b9, 0x86911786, 0xc15899c1, 0x1d273a1d, 0x9eb9279e,
+ 0xe138d9e1, 0xf813ebf8, 0x98b32b98, 0x11332211, 0x69bbd269, 0xd970a9d9, 0x8e89078e, 0x94a73394,
+ 0x9bb62d9b, 0x1e223c1e, 0x87921587, 0xe920c9e9, 0xce4987ce, 0x55ffaa55, 0x28785028, 0xdf7aa5df,
+ 0x8c8f038c, 0xa1f859a1, 0x89800989, 0x0d171a0d, 0xbfda65bf, 0xe631d7e6, 0x42c68442, 0x68b8d068,
+ 0x41c38241, 0x99b02999, 0x2d775a2d, 0x0f111e0f, 0xb0cb7bb0, 0x54fca854, 0xbbd66dbb, 0x163a2c16,
+}
+var te3 = [256]uint32{
+ 0x6363a5c6, 0x7c7c84f8, 0x777799ee, 0x7b7b8df6, 0xf2f20dff, 0x6b6bbdd6, 0x6f6fb1de, 0xc5c55491,
+ 0x30305060, 0x01010302, 0x6767a9ce, 0x2b2b7d56, 0xfefe19e7, 0xd7d762b5, 0xababe64d, 0x76769aec,
+ 0xcaca458f, 0x82829d1f, 0xc9c94089, 0x7d7d87fa, 0xfafa15ef, 0x5959ebb2, 0x4747c98e, 0xf0f00bfb,
+ 0xadadec41, 0xd4d467b3, 0xa2a2fd5f, 0xafafea45, 0x9c9cbf23, 0xa4a4f753, 0x727296e4, 0xc0c05b9b,
+ 0xb7b7c275, 0xfdfd1ce1, 0x9393ae3d, 0x26266a4c, 0x36365a6c, 0x3f3f417e, 0xf7f702f5, 0xcccc4f83,
+ 0x34345c68, 0xa5a5f451, 0xe5e534d1, 0xf1f108f9, 0x717193e2, 0xd8d873ab, 0x31315362, 0x15153f2a,
+ 0x04040c08, 0xc7c75295, 0x23236546, 0xc3c35e9d, 0x18182830, 0x9696a137, 0x05050f0a, 0x9a9ab52f,
+ 0x0707090e, 0x12123624, 0x80809b1b, 0xe2e23ddf, 0xebeb26cd, 0x2727694e, 0xb2b2cd7f, 0x75759fea,
+ 0x09091b12, 0x83839e1d, 0x2c2c7458, 0x1a1a2e34, 0x1b1b2d36, 0x6e6eb2dc, 0x5a5aeeb4, 0xa0a0fb5b,
+ 0x5252f6a4, 0x3b3b4d76, 0xd6d661b7, 0xb3b3ce7d, 0x29297b52, 0xe3e33edd, 0x2f2f715e, 0x84849713,
+ 0x5353f5a6, 0xd1d168b9, 0x00000000, 0xeded2cc1, 0x20206040, 0xfcfc1fe3, 0xb1b1c879, 0x5b5bedb6,
+ 0x6a6abed4, 0xcbcb468d, 0xbebed967, 0x39394b72, 0x4a4ade94, 0x4c4cd498, 0x5858e8b0, 0xcfcf4a85,
+ 0xd0d06bbb, 0xefef2ac5, 0xaaaae54f, 0xfbfb16ed, 0x4343c586, 0x4d4dd79a, 0x33335566, 0x85859411,
+ 0x4545cf8a, 0xf9f910e9, 0x02020604, 0x7f7f81fe, 0x5050f0a0, 0x3c3c4478, 0x9f9fba25, 0xa8a8e34b,
+ 0x5151f3a2, 0xa3a3fe5d, 0x4040c080, 0x8f8f8a05, 0x9292ad3f, 0x9d9dbc21, 0x38384870, 0xf5f504f1,
+ 0xbcbcdf63, 0xb6b6c177, 0xdada75af, 0x21216342, 0x10103020, 0xffff1ae5, 0xf3f30efd, 0xd2d26dbf,
+ 0xcdcd4c81, 0x0c0c1418, 0x13133526, 0xecec2fc3, 0x5f5fe1be, 0x9797a235, 0x4444cc88, 0x1717392e,
+ 0xc4c45793, 0xa7a7f255, 0x7e7e82fc, 0x3d3d477a, 0x6464acc8, 0x5d5de7ba, 0x19192b32, 0x737395e6,
+ 0x6060a0c0, 0x81819819, 0x4f4fd19e, 0xdcdc7fa3, 0x22226644, 0x2a2a7e54, 0x9090ab3b, 0x8888830b,
+ 0x4646ca8c, 0xeeee29c7, 0xb8b8d36b, 0x14143c28, 0xdede79a7, 0x5e5ee2bc, 0x0b0b1d16, 0xdbdb76ad,
+ 0xe0e03bdb, 0x32325664, 0x3a3a4e74, 0x0a0a1e14, 0x4949db92, 0x06060a0c, 0x24246c48, 0x5c5ce4b8,
+ 0xc2c25d9f, 0xd3d36ebd, 0xacacef43, 0x6262a6c4, 0x9191a839, 0x9595a431, 0xe4e437d3, 0x79798bf2,
+ 0xe7e732d5, 0xc8c8438b, 0x3737596e, 0x6d6db7da, 0x8d8d8c01, 0xd5d564b1, 0x4e4ed29c, 0xa9a9e049,
+ 0x6c6cb4d8, 0x5656faac, 0xf4f407f3, 0xeaea25cf, 0x6565afca, 0x7a7a8ef4, 0xaeaee947, 0x08081810,
+ 0xbabad56f, 0x787888f0, 0x25256f4a, 0x2e2e725c, 0x1c1c2438, 0xa6a6f157, 0xb4b4c773, 0xc6c65197,
+ 0xe8e823cb, 0xdddd7ca1, 0x74749ce8, 0x1f1f213e, 0x4b4bdd96, 0xbdbddc61, 0x8b8b860d, 0x8a8a850f,
+ 0x707090e0, 0x3e3e427c, 0xb5b5c471, 0x6666aacc, 0x4848d890, 0x03030506, 0xf6f601f7, 0x0e0e121c,
+ 0x6161a3c2, 0x35355f6a, 0x5757f9ae, 0xb9b9d069, 0x86869117, 0xc1c15899, 0x1d1d273a, 0x9e9eb927,
+ 0xe1e138d9, 0xf8f813eb, 0x9898b32b, 0x11113322, 0x6969bbd2, 0xd9d970a9, 0x8e8e8907, 0x9494a733,
+ 0x9b9bb62d, 0x1e1e223c, 0x87879215, 0xe9e920c9, 0xcece4987, 0x5555ffaa, 0x28287850, 0xdfdf7aa5,
+ 0x8c8c8f03, 0xa1a1f859, 0x89898009, 0x0d0d171a, 0xbfbfda65, 0xe6e631d7, 0x4242c684, 0x6868b8d0,
+ 0x4141c382, 0x9999b029, 0x2d2d775a, 0x0f0f111e, 0xb0b0cb7b, 0x5454fca8, 0xbbbbd66d, 0x16163a2c,
+}
+
+// Lookup tables for decryption.
+// These can be recomputed by adapting the tests in aes_test.go.
+
+var td0 = [256]uint32{
+ 0x51f4a750, 0x7e416553, 0x1a17a4c3, 0x3a275e96, 0x3bab6bcb, 0x1f9d45f1, 0xacfa58ab, 0x4be30393,
+ 0x2030fa55, 0xad766df6, 0x88cc7691, 0xf5024c25, 0x4fe5d7fc, 0xc52acbd7, 0x26354480, 0xb562a38f,
+ 0xdeb15a49, 0x25ba1b67, 0x45ea0e98, 0x5dfec0e1, 0xc32f7502, 0x814cf012, 0x8d4697a3, 0x6bd3f9c6,
+ 0x038f5fe7, 0x15929c95, 0xbf6d7aeb, 0x955259da, 0xd4be832d, 0x587421d3, 0x49e06929, 0x8ec9c844,
+ 0x75c2896a, 0xf48e7978, 0x99583e6b, 0x27b971dd, 0xbee14fb6, 0xf088ad17, 0xc920ac66, 0x7dce3ab4,
+ 0x63df4a18, 0xe51a3182, 0x97513360, 0x62537f45, 0xb16477e0, 0xbb6bae84, 0xfe81a01c, 0xf9082b94,
+ 0x70486858, 0x8f45fd19, 0x94de6c87, 0x527bf8b7, 0xab73d323, 0x724b02e2, 0xe31f8f57, 0x6655ab2a,
+ 0xb2eb2807, 0x2fb5c203, 0x86c57b9a, 0xd33708a5, 0x302887f2, 0x23bfa5b2, 0x02036aba, 0xed16825c,
+ 0x8acf1c2b, 0xa779b492, 0xf307f2f0, 0x4e69e2a1, 0x65daf4cd, 0x0605bed5, 0xd134621f, 0xc4a6fe8a,
+ 0x342e539d, 0xa2f355a0, 0x058ae132, 0xa4f6eb75, 0x0b83ec39, 0x4060efaa, 0x5e719f06, 0xbd6e1051,
+ 0x3e218af9, 0x96dd063d, 0xdd3e05ae, 0x4de6bd46, 0x91548db5, 0x71c45d05, 0x0406d46f, 0x605015ff,
+ 0x1998fb24, 0xd6bde997, 0x894043cc, 0x67d99e77, 0xb0e842bd, 0x07898b88, 0xe7195b38, 0x79c8eedb,
+ 0xa17c0a47, 0x7c420fe9, 0xf8841ec9, 0x00000000, 0x09808683, 0x322bed48, 0x1e1170ac, 0x6c5a724e,
+ 0xfd0efffb, 0x0f853856, 0x3daed51e, 0x362d3927, 0x0a0fd964, 0x685ca621, 0x9b5b54d1, 0x24362e3a,
+ 0x0c0a67b1, 0x9357e70f, 0xb4ee96d2, 0x1b9b919e, 0x80c0c54f, 0x61dc20a2, 0x5a774b69, 0x1c121a16,
+ 0xe293ba0a, 0xc0a02ae5, 0x3c22e043, 0x121b171d, 0x0e090d0b, 0xf28bc7ad, 0x2db6a8b9, 0x141ea9c8,
+ 0x57f11985, 0xaf75074c, 0xee99ddbb, 0xa37f60fd, 0xf701269f, 0x5c72f5bc, 0x44663bc5, 0x5bfb7e34,
+ 0x8b432976, 0xcb23c6dc, 0xb6edfc68, 0xb8e4f163, 0xd731dcca, 0x42638510, 0x13972240, 0x84c61120,
+ 0x854a247d, 0xd2bb3df8, 0xaef93211, 0xc729a16d, 0x1d9e2f4b, 0xdcb230f3, 0x0d8652ec, 0x77c1e3d0,
+ 0x2bb3166c, 0xa970b999, 0x119448fa, 0x47e96422, 0xa8fc8cc4, 0xa0f03f1a, 0x567d2cd8, 0x223390ef,
+ 0x87494ec7, 0xd938d1c1, 0x8ccaa2fe, 0x98d40b36, 0xa6f581cf, 0xa57ade28, 0xdab78e26, 0x3fadbfa4,
+ 0x2c3a9de4, 0x5078920d, 0x6a5fcc9b, 0x547e4662, 0xf68d13c2, 0x90d8b8e8, 0x2e39f75e, 0x82c3aff5,
+ 0x9f5d80be, 0x69d0937c, 0x6fd52da9, 0xcf2512b3, 0xc8ac993b, 0x10187da7, 0xe89c636e, 0xdb3bbb7b,
+ 0xcd267809, 0x6e5918f4, 0xec9ab701, 0x834f9aa8, 0xe6956e65, 0xaaffe67e, 0x21bccf08, 0xef15e8e6,
+ 0xbae79bd9, 0x4a6f36ce, 0xea9f09d4, 0x29b07cd6, 0x31a4b2af, 0x2a3f2331, 0xc6a59430, 0x35a266c0,
+ 0x744ebc37, 0xfc82caa6, 0xe090d0b0, 0x33a7d815, 0xf104984a, 0x41ecdaf7, 0x7fcd500e, 0x1791f62f,
+ 0x764dd68d, 0x43efb04d, 0xccaa4d54, 0xe49604df, 0x9ed1b5e3, 0x4c6a881b, 0xc12c1fb8, 0x4665517f,
+ 0x9d5eea04, 0x018c355d, 0xfa877473, 0xfb0b412e, 0xb3671d5a, 0x92dbd252, 0xe9105633, 0x6dd64713,
+ 0x9ad7618c, 0x37a10c7a, 0x59f8148e, 0xeb133c89, 0xcea927ee, 0xb761c935, 0xe11ce5ed, 0x7a47b13c,
+ 0x9cd2df59, 0x55f2733f, 0x1814ce79, 0x73c737bf, 0x53f7cdea, 0x5ffdaa5b, 0xdf3d6f14, 0x7844db86,
+ 0xcaaff381, 0xb968c43e, 0x3824342c, 0xc2a3405f, 0x161dc372, 0xbce2250c, 0x283c498b, 0xff0d9541,
+ 0x39a80171, 0x080cb3de, 0xd8b4e49c, 0x6456c190, 0x7bcb8461, 0xd532b670, 0x486c5c74, 0xd0b85742,
+}
+var td1 = [256]uint32{
+ 0x5051f4a7, 0x537e4165, 0xc31a17a4, 0x963a275e, 0xcb3bab6b, 0xf11f9d45, 0xabacfa58, 0x934be303,
+ 0x552030fa, 0xf6ad766d, 0x9188cc76, 0x25f5024c, 0xfc4fe5d7, 0xd7c52acb, 0x80263544, 0x8fb562a3,
+ 0x49deb15a, 0x6725ba1b, 0x9845ea0e, 0xe15dfec0, 0x02c32f75, 0x12814cf0, 0xa38d4697, 0xc66bd3f9,
+ 0xe7038f5f, 0x9515929c, 0xebbf6d7a, 0xda955259, 0x2dd4be83, 0xd3587421, 0x2949e069, 0x448ec9c8,
+ 0x6a75c289, 0x78f48e79, 0x6b99583e, 0xdd27b971, 0xb6bee14f, 0x17f088ad, 0x66c920ac, 0xb47dce3a,
+ 0x1863df4a, 0x82e51a31, 0x60975133, 0x4562537f, 0xe0b16477, 0x84bb6bae, 0x1cfe81a0, 0x94f9082b,
+ 0x58704868, 0x198f45fd, 0x8794de6c, 0xb7527bf8, 0x23ab73d3, 0xe2724b02, 0x57e31f8f, 0x2a6655ab,
+ 0x07b2eb28, 0x032fb5c2, 0x9a86c57b, 0xa5d33708, 0xf2302887, 0xb223bfa5, 0xba02036a, 0x5ced1682,
+ 0x2b8acf1c, 0x92a779b4, 0xf0f307f2, 0xa14e69e2, 0xcd65daf4, 0xd50605be, 0x1fd13462, 0x8ac4a6fe,
+ 0x9d342e53, 0xa0a2f355, 0x32058ae1, 0x75a4f6eb, 0x390b83ec, 0xaa4060ef, 0x065e719f, 0x51bd6e10,
+ 0xf93e218a, 0x3d96dd06, 0xaedd3e05, 0x464de6bd, 0xb591548d, 0x0571c45d, 0x6f0406d4, 0xff605015,
+ 0x241998fb, 0x97d6bde9, 0xcc894043, 0x7767d99e, 0xbdb0e842, 0x8807898b, 0x38e7195b, 0xdb79c8ee,
+ 0x47a17c0a, 0xe97c420f, 0xc9f8841e, 0x00000000, 0x83098086, 0x48322bed, 0xac1e1170, 0x4e6c5a72,
+ 0xfbfd0eff, 0x560f8538, 0x1e3daed5, 0x27362d39, 0x640a0fd9, 0x21685ca6, 0xd19b5b54, 0x3a24362e,
+ 0xb10c0a67, 0x0f9357e7, 0xd2b4ee96, 0x9e1b9b91, 0x4f80c0c5, 0xa261dc20, 0x695a774b, 0x161c121a,
+ 0x0ae293ba, 0xe5c0a02a, 0x433c22e0, 0x1d121b17, 0x0b0e090d, 0xadf28bc7, 0xb92db6a8, 0xc8141ea9,
+ 0x8557f119, 0x4caf7507, 0xbbee99dd, 0xfda37f60, 0x9ff70126, 0xbc5c72f5, 0xc544663b, 0x345bfb7e,
+ 0x768b4329, 0xdccb23c6, 0x68b6edfc, 0x63b8e4f1, 0xcad731dc, 0x10426385, 0x40139722, 0x2084c611,
+ 0x7d854a24, 0xf8d2bb3d, 0x11aef932, 0x6dc729a1, 0x4b1d9e2f, 0xf3dcb230, 0xec0d8652, 0xd077c1e3,
+ 0x6c2bb316, 0x99a970b9, 0xfa119448, 0x2247e964, 0xc4a8fc8c, 0x1aa0f03f, 0xd8567d2c, 0xef223390,
+ 0xc787494e, 0xc1d938d1, 0xfe8ccaa2, 0x3698d40b, 0xcfa6f581, 0x28a57ade, 0x26dab78e, 0xa43fadbf,
+ 0xe42c3a9d, 0x0d507892, 0x9b6a5fcc, 0x62547e46, 0xc2f68d13, 0xe890d8b8, 0x5e2e39f7, 0xf582c3af,
+ 0xbe9f5d80, 0x7c69d093, 0xa96fd52d, 0xb3cf2512, 0x3bc8ac99, 0xa710187d, 0x6ee89c63, 0x7bdb3bbb,
+ 0x09cd2678, 0xf46e5918, 0x01ec9ab7, 0xa8834f9a, 0x65e6956e, 0x7eaaffe6, 0x0821bccf, 0xe6ef15e8,
+ 0xd9bae79b, 0xce4a6f36, 0xd4ea9f09, 0xd629b07c, 0xaf31a4b2, 0x312a3f23, 0x30c6a594, 0xc035a266,
+ 0x37744ebc, 0xa6fc82ca, 0xb0e090d0, 0x1533a7d8, 0x4af10498, 0xf741ecda, 0x0e7fcd50, 0x2f1791f6,
+ 0x8d764dd6, 0x4d43efb0, 0x54ccaa4d, 0xdfe49604, 0xe39ed1b5, 0x1b4c6a88, 0xb8c12c1f, 0x7f466551,
+ 0x049d5eea, 0x5d018c35, 0x73fa8774, 0x2efb0b41, 0x5ab3671d, 0x5292dbd2, 0x33e91056, 0x136dd647,
+ 0x8c9ad761, 0x7a37a10c, 0x8e59f814, 0x89eb133c, 0xeecea927, 0x35b761c9, 0xede11ce5, 0x3c7a47b1,
+ 0x599cd2df, 0x3f55f273, 0x791814ce, 0xbf73c737, 0xea53f7cd, 0x5b5ffdaa, 0x14df3d6f, 0x867844db,
+ 0x81caaff3, 0x3eb968c4, 0x2c382434, 0x5fc2a340, 0x72161dc3, 0x0cbce225, 0x8b283c49, 0x41ff0d95,
+ 0x7139a801, 0xde080cb3, 0x9cd8b4e4, 0x906456c1, 0x617bcb84, 0x70d532b6, 0x74486c5c, 0x42d0b857,
+}
+var td2 = [256]uint32{
+ 0xa75051f4, 0x65537e41, 0xa4c31a17, 0x5e963a27, 0x6bcb3bab, 0x45f11f9d, 0x58abacfa, 0x03934be3,
+ 0xfa552030, 0x6df6ad76, 0x769188cc, 0x4c25f502, 0xd7fc4fe5, 0xcbd7c52a, 0x44802635, 0xa38fb562,
+ 0x5a49deb1, 0x1b6725ba, 0x0e9845ea, 0xc0e15dfe, 0x7502c32f, 0xf012814c, 0x97a38d46, 0xf9c66bd3,
+ 0x5fe7038f, 0x9c951592, 0x7aebbf6d, 0x59da9552, 0x832dd4be, 0x21d35874, 0x692949e0, 0xc8448ec9,
+ 0x896a75c2, 0x7978f48e, 0x3e6b9958, 0x71dd27b9, 0x4fb6bee1, 0xad17f088, 0xac66c920, 0x3ab47dce,
+ 0x4a1863df, 0x3182e51a, 0x33609751, 0x7f456253, 0x77e0b164, 0xae84bb6b, 0xa01cfe81, 0x2b94f908,
+ 0x68587048, 0xfd198f45, 0x6c8794de, 0xf8b7527b, 0xd323ab73, 0x02e2724b, 0x8f57e31f, 0xab2a6655,
+ 0x2807b2eb, 0xc2032fb5, 0x7b9a86c5, 0x08a5d337, 0x87f23028, 0xa5b223bf, 0x6aba0203, 0x825ced16,
+ 0x1c2b8acf, 0xb492a779, 0xf2f0f307, 0xe2a14e69, 0xf4cd65da, 0xbed50605, 0x621fd134, 0xfe8ac4a6,
+ 0x539d342e, 0x55a0a2f3, 0xe132058a, 0xeb75a4f6, 0xec390b83, 0xefaa4060, 0x9f065e71, 0x1051bd6e,
+ 0x8af93e21, 0x063d96dd, 0x05aedd3e, 0xbd464de6, 0x8db59154, 0x5d0571c4, 0xd46f0406, 0x15ff6050,
+ 0xfb241998, 0xe997d6bd, 0x43cc8940, 0x9e7767d9, 0x42bdb0e8, 0x8b880789, 0x5b38e719, 0xeedb79c8,
+ 0x0a47a17c, 0x0fe97c42, 0x1ec9f884, 0x00000000, 0x86830980, 0xed48322b, 0x70ac1e11, 0x724e6c5a,
+ 0xfffbfd0e, 0x38560f85, 0xd51e3dae, 0x3927362d, 0xd9640a0f, 0xa621685c, 0x54d19b5b, 0x2e3a2436,
+ 0x67b10c0a, 0xe70f9357, 0x96d2b4ee, 0x919e1b9b, 0xc54f80c0, 0x20a261dc, 0x4b695a77, 0x1a161c12,
+ 0xba0ae293, 0x2ae5c0a0, 0xe0433c22, 0x171d121b, 0x0d0b0e09, 0xc7adf28b, 0xa8b92db6, 0xa9c8141e,
+ 0x198557f1, 0x074caf75, 0xddbbee99, 0x60fda37f, 0x269ff701, 0xf5bc5c72, 0x3bc54466, 0x7e345bfb,
+ 0x29768b43, 0xc6dccb23, 0xfc68b6ed, 0xf163b8e4, 0xdccad731, 0x85104263, 0x22401397, 0x112084c6,
+ 0x247d854a, 0x3df8d2bb, 0x3211aef9, 0xa16dc729, 0x2f4b1d9e, 0x30f3dcb2, 0x52ec0d86, 0xe3d077c1,
+ 0x166c2bb3, 0xb999a970, 0x48fa1194, 0x642247e9, 0x8cc4a8fc, 0x3f1aa0f0, 0x2cd8567d, 0x90ef2233,
+ 0x4ec78749, 0xd1c1d938, 0xa2fe8cca, 0x0b3698d4, 0x81cfa6f5, 0xde28a57a, 0x8e26dab7, 0xbfa43fad,
+ 0x9de42c3a, 0x920d5078, 0xcc9b6a5f, 0x4662547e, 0x13c2f68d, 0xb8e890d8, 0xf75e2e39, 0xaff582c3,
+ 0x80be9f5d, 0x937c69d0, 0x2da96fd5, 0x12b3cf25, 0x993bc8ac, 0x7da71018, 0x636ee89c, 0xbb7bdb3b,
+ 0x7809cd26, 0x18f46e59, 0xb701ec9a, 0x9aa8834f, 0x6e65e695, 0xe67eaaff, 0xcf0821bc, 0xe8e6ef15,
+ 0x9bd9bae7, 0x36ce4a6f, 0x09d4ea9f, 0x7cd629b0, 0xb2af31a4, 0x23312a3f, 0x9430c6a5, 0x66c035a2,
+ 0xbc37744e, 0xcaa6fc82, 0xd0b0e090, 0xd81533a7, 0x984af104, 0xdaf741ec, 0x500e7fcd, 0xf62f1791,
+ 0xd68d764d, 0xb04d43ef, 0x4d54ccaa, 0x04dfe496, 0xb5e39ed1, 0x881b4c6a, 0x1fb8c12c, 0x517f4665,
+ 0xea049d5e, 0x355d018c, 0x7473fa87, 0x412efb0b, 0x1d5ab367, 0xd25292db, 0x5633e910, 0x47136dd6,
+ 0x618c9ad7, 0x0c7a37a1, 0x148e59f8, 0x3c89eb13, 0x27eecea9, 0xc935b761, 0xe5ede11c, 0xb13c7a47,
+ 0xdf599cd2, 0x733f55f2, 0xce791814, 0x37bf73c7, 0xcdea53f7, 0xaa5b5ffd, 0x6f14df3d, 0xdb867844,
+ 0xf381caaf, 0xc43eb968, 0x342c3824, 0x405fc2a3, 0xc372161d, 0x250cbce2, 0x498b283c, 0x9541ff0d,
+ 0x017139a8, 0xb3de080c, 0xe49cd8b4, 0xc1906456, 0x84617bcb, 0xb670d532, 0x5c74486c, 0x5742d0b8,
+}
+var td3 = [256]uint32{
+ 0xf4a75051, 0x4165537e, 0x17a4c31a, 0x275e963a, 0xab6bcb3b, 0x9d45f11f, 0xfa58abac, 0xe303934b,
+ 0x30fa5520, 0x766df6ad, 0xcc769188, 0x024c25f5, 0xe5d7fc4f, 0x2acbd7c5, 0x35448026, 0x62a38fb5,
+ 0xb15a49de, 0xba1b6725, 0xea0e9845, 0xfec0e15d, 0x2f7502c3, 0x4cf01281, 0x4697a38d, 0xd3f9c66b,
+ 0x8f5fe703, 0x929c9515, 0x6d7aebbf, 0x5259da95, 0xbe832dd4, 0x7421d358, 0xe0692949, 0xc9c8448e,
+ 0xc2896a75, 0x8e7978f4, 0x583e6b99, 0xb971dd27, 0xe14fb6be, 0x88ad17f0, 0x20ac66c9, 0xce3ab47d,
+ 0xdf4a1863, 0x1a3182e5, 0x51336097, 0x537f4562, 0x6477e0b1, 0x6bae84bb, 0x81a01cfe, 0x082b94f9,
+ 0x48685870, 0x45fd198f, 0xde6c8794, 0x7bf8b752, 0x73d323ab, 0x4b02e272, 0x1f8f57e3, 0x55ab2a66,
+ 0xeb2807b2, 0xb5c2032f, 0xc57b9a86, 0x3708a5d3, 0x2887f230, 0xbfa5b223, 0x036aba02, 0x16825ced,
+ 0xcf1c2b8a, 0x79b492a7, 0x07f2f0f3, 0x69e2a14e, 0xdaf4cd65, 0x05bed506, 0x34621fd1, 0xa6fe8ac4,
+ 0x2e539d34, 0xf355a0a2, 0x8ae13205, 0xf6eb75a4, 0x83ec390b, 0x60efaa40, 0x719f065e, 0x6e1051bd,
+ 0x218af93e, 0xdd063d96, 0x3e05aedd, 0xe6bd464d, 0x548db591, 0xc45d0571, 0x06d46f04, 0x5015ff60,
+ 0x98fb2419, 0xbde997d6, 0x4043cc89, 0xd99e7767, 0xe842bdb0, 0x898b8807, 0x195b38e7, 0xc8eedb79,
+ 0x7c0a47a1, 0x420fe97c, 0x841ec9f8, 0x00000000, 0x80868309, 0x2bed4832, 0x1170ac1e, 0x5a724e6c,
+ 0x0efffbfd, 0x8538560f, 0xaed51e3d, 0x2d392736, 0x0fd9640a, 0x5ca62168, 0x5b54d19b, 0x362e3a24,
+ 0x0a67b10c, 0x57e70f93, 0xee96d2b4, 0x9b919e1b, 0xc0c54f80, 0xdc20a261, 0x774b695a, 0x121a161c,
+ 0x93ba0ae2, 0xa02ae5c0, 0x22e0433c, 0x1b171d12, 0x090d0b0e, 0x8bc7adf2, 0xb6a8b92d, 0x1ea9c814,
+ 0xf1198557, 0x75074caf, 0x99ddbbee, 0x7f60fda3, 0x01269ff7, 0x72f5bc5c, 0x663bc544, 0xfb7e345b,
+ 0x4329768b, 0x23c6dccb, 0xedfc68b6, 0xe4f163b8, 0x31dccad7, 0x63851042, 0x97224013, 0xc6112084,
+ 0x4a247d85, 0xbb3df8d2, 0xf93211ae, 0x29a16dc7, 0x9e2f4b1d, 0xb230f3dc, 0x8652ec0d, 0xc1e3d077,
+ 0xb3166c2b, 0x70b999a9, 0x9448fa11, 0xe9642247, 0xfc8cc4a8, 0xf03f1aa0, 0x7d2cd856, 0x3390ef22,
+ 0x494ec787, 0x38d1c1d9, 0xcaa2fe8c, 0xd40b3698, 0xf581cfa6, 0x7ade28a5, 0xb78e26da, 0xadbfa43f,
+ 0x3a9de42c, 0x78920d50, 0x5fcc9b6a, 0x7e466254, 0x8d13c2f6, 0xd8b8e890, 0x39f75e2e, 0xc3aff582,
+ 0x5d80be9f, 0xd0937c69, 0xd52da96f, 0x2512b3cf, 0xac993bc8, 0x187da710, 0x9c636ee8, 0x3bbb7bdb,
+ 0x267809cd, 0x5918f46e, 0x9ab701ec, 0x4f9aa883, 0x956e65e6, 0xffe67eaa, 0xbccf0821, 0x15e8e6ef,
+ 0xe79bd9ba, 0x6f36ce4a, 0x9f09d4ea, 0xb07cd629, 0xa4b2af31, 0x3f23312a, 0xa59430c6, 0xa266c035,
+ 0x4ebc3774, 0x82caa6fc, 0x90d0b0e0, 0xa7d81533, 0x04984af1, 0xecdaf741, 0xcd500e7f, 0x91f62f17,
+ 0x4dd68d76, 0xefb04d43, 0xaa4d54cc, 0x9604dfe4, 0xd1b5e39e, 0x6a881b4c, 0x2c1fb8c1, 0x65517f46,
+ 0x5eea049d, 0x8c355d01, 0x877473fa, 0x0b412efb, 0x671d5ab3, 0xdbd25292, 0x105633e9, 0xd647136d,
+ 0xd7618c9a, 0xa10c7a37, 0xf8148e59, 0x133c89eb, 0xa927eece, 0x61c935b7, 0x1ce5ede1, 0x47b13c7a,
+ 0xd2df599c, 0xf2733f55, 0x14ce7918, 0xc737bf73, 0xf7cdea53, 0xfdaa5b5f, 0x3d6f14df, 0x44db8678,
+ 0xaff381ca, 0x68c43eb9, 0x24342c38, 0xa3405fc2, 0x1dc37216, 0xe2250cbc, 0x3c498b28, 0x0d9541ff,
+ 0xa8017139, 0x0cb3de08, 0xb4e49cd8, 0x56c19064, 0xcb84617b, 0x32b670d5, 0x6c5c7448, 0xb85742d0,
+}
diff --git a/src/crypto/aes/ctr_s390x.go b/src/crypto/aes/ctr_s390x.go
new file mode 100644
index 0000000..0d3a58e
--- /dev/null
+++ b/src/crypto/aes/ctr_s390x.go
@@ -0,0 +1,84 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package aes
+
+import (
+ "crypto/cipher"
+ "crypto/internal/alias"
+ "encoding/binary"
+)
+
+// Assert that aesCipherAsm implements the ctrAble interface.
+var _ ctrAble = (*aesCipherAsm)(nil)
+
+// xorBytes xors the contents of a and b and places the resulting values into
+// dst. If a and b are not the same length then the number of bytes processed
+// will be equal to the length of shorter of the two. Returns the number
+// of bytes processed.
+//
+//go:noescape
+func xorBytes(dst, a, b []byte) int
+
+// streamBufferSize is the number of bytes of encrypted counter values to cache.
+const streamBufferSize = 32 * BlockSize
+
+type aesctr struct {
+ block *aesCipherAsm // block cipher
+ ctr [2]uint64 // next value of the counter (big endian)
+ buffer []byte // buffer for the encrypted counter values
+ storage [streamBufferSize]byte // array backing buffer slice
+}
+
+// NewCTR returns a Stream which encrypts/decrypts using the AES block
+// cipher in counter mode. The length of iv must be the same as BlockSize.
+func (c *aesCipherAsm) NewCTR(iv []byte) cipher.Stream {
+ if len(iv) != BlockSize {
+ panic("cipher.NewCTR: IV length must equal block size")
+ }
+ var ac aesctr
+ ac.block = c
+ ac.ctr[0] = binary.BigEndian.Uint64(iv[0:]) // high bits
+ ac.ctr[1] = binary.BigEndian.Uint64(iv[8:]) // low bits
+ ac.buffer = ac.storage[:0]
+ return &ac
+}
+
+func (c *aesctr) refill() {
+ // Fill up the buffer with an incrementing count.
+ c.buffer = c.storage[:streamBufferSize]
+ c0, c1 := c.ctr[0], c.ctr[1]
+ for i := 0; i < streamBufferSize; i += 16 {
+ binary.BigEndian.PutUint64(c.buffer[i+0:], c0)
+ binary.BigEndian.PutUint64(c.buffer[i+8:], c1)
+
+ // Increment in big endian: c0 is high, c1 is low.
+ c1++
+ if c1 == 0 {
+ // add carry
+ c0++
+ }
+ }
+ c.ctr[0], c.ctr[1] = c0, c1
+ // Encrypt the buffer using AES in ECB mode.
+ cryptBlocks(c.block.function, &c.block.key[0], &c.buffer[0], &c.buffer[0], streamBufferSize)
+}
+
+func (c *aesctr) XORKeyStream(dst, src []byte) {
+ if len(dst) < len(src) {
+ panic("crypto/cipher: output smaller than input")
+ }
+ if alias.InexactOverlap(dst[:len(src)], src) {
+ panic("crypto/cipher: invalid buffer overlap")
+ }
+ for len(src) > 0 {
+ if len(c.buffer) == 0 {
+ c.refill()
+ }
+ n := xorBytes(dst, src, c.buffer)
+ c.buffer = c.buffer[n:]
+ src = src[n:]
+ dst = dst[n:]
+ }
+}
diff --git a/src/crypto/aes/gcm_amd64.s b/src/crypto/aes/gcm_amd64.s
new file mode 100644
index 0000000..e6eedf3
--- /dev/null
+++ b/src/crypto/aes/gcm_amd64.s
@@ -0,0 +1,1286 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This is an optimized implementation of AES-GCM using AES-NI and CLMUL-NI
+// The implementation uses some optimization as described in:
+// [1] Gueron, S., Kounavis, M.E.: Intel® Carry-Less Multiplication
+// Instruction and its Usage for Computing the GCM Mode rev. 2.02
+// [2] Gueron, S., Krasnov, V.: Speeding up Counter Mode in Software and
+// Hardware
+
+#include "textflag.h"
+
+#define B0 X0
+#define B1 X1
+#define B2 X2
+#define B3 X3
+#define B4 X4
+#define B5 X5
+#define B6 X6
+#define B7 X7
+
+#define ACC0 X8
+#define ACC1 X9
+#define ACCM X10
+
+#define T0 X11
+#define T1 X12
+#define T2 X13
+#define POLY X14
+#define BSWAP X15
+
+DATA bswapMask<>+0x00(SB)/8, $0x08090a0b0c0d0e0f
+DATA bswapMask<>+0x08(SB)/8, $0x0001020304050607
+
+DATA gcmPoly<>+0x00(SB)/8, $0x0000000000000001
+DATA gcmPoly<>+0x08(SB)/8, $0xc200000000000000
+
+DATA andMask<>+0x00(SB)/8, $0x00000000000000ff
+DATA andMask<>+0x08(SB)/8, $0x0000000000000000
+DATA andMask<>+0x10(SB)/8, $0x000000000000ffff
+DATA andMask<>+0x18(SB)/8, $0x0000000000000000
+DATA andMask<>+0x20(SB)/8, $0x0000000000ffffff
+DATA andMask<>+0x28(SB)/8, $0x0000000000000000
+DATA andMask<>+0x30(SB)/8, $0x00000000ffffffff
+DATA andMask<>+0x38(SB)/8, $0x0000000000000000
+DATA andMask<>+0x40(SB)/8, $0x000000ffffffffff
+DATA andMask<>+0x48(SB)/8, $0x0000000000000000
+DATA andMask<>+0x50(SB)/8, $0x0000ffffffffffff
+DATA andMask<>+0x58(SB)/8, $0x0000000000000000
+DATA andMask<>+0x60(SB)/8, $0x00ffffffffffffff
+DATA andMask<>+0x68(SB)/8, $0x0000000000000000
+DATA andMask<>+0x70(SB)/8, $0xffffffffffffffff
+DATA andMask<>+0x78(SB)/8, $0x0000000000000000
+DATA andMask<>+0x80(SB)/8, $0xffffffffffffffff
+DATA andMask<>+0x88(SB)/8, $0x00000000000000ff
+DATA andMask<>+0x90(SB)/8, $0xffffffffffffffff
+DATA andMask<>+0x98(SB)/8, $0x000000000000ffff
+DATA andMask<>+0xa0(SB)/8, $0xffffffffffffffff
+DATA andMask<>+0xa8(SB)/8, $0x0000000000ffffff
+DATA andMask<>+0xb0(SB)/8, $0xffffffffffffffff
+DATA andMask<>+0xb8(SB)/8, $0x00000000ffffffff
+DATA andMask<>+0xc0(SB)/8, $0xffffffffffffffff
+DATA andMask<>+0xc8(SB)/8, $0x000000ffffffffff
+DATA andMask<>+0xd0(SB)/8, $0xffffffffffffffff
+DATA andMask<>+0xd8(SB)/8, $0x0000ffffffffffff
+DATA andMask<>+0xe0(SB)/8, $0xffffffffffffffff
+DATA andMask<>+0xe8(SB)/8, $0x00ffffffffffffff
+
+GLOBL bswapMask<>(SB), (NOPTR+RODATA), $16
+GLOBL gcmPoly<>(SB), (NOPTR+RODATA), $16
+GLOBL andMask<>(SB), (NOPTR+RODATA), $240
+
+// func gcmAesFinish(productTable *[256]byte, tagMask, T *[16]byte, pLen, dLen uint64)
+TEXT ·gcmAesFinish(SB),NOSPLIT,$0
+#define pTbl DI
+#define tMsk SI
+#define tPtr DX
+#define plen AX
+#define dlen CX
+
+ MOVQ productTable+0(FP), pTbl
+ MOVQ tagMask+8(FP), tMsk
+ MOVQ T+16(FP), tPtr
+ MOVQ pLen+24(FP), plen
+ MOVQ dLen+32(FP), dlen
+
+ MOVOU (tPtr), ACC0
+ MOVOU (tMsk), T2
+
+ MOVOU bswapMask<>(SB), BSWAP
+ MOVOU gcmPoly<>(SB), POLY
+
+ SHLQ $3, plen
+ SHLQ $3, dlen
+
+ MOVQ plen, B0
+ PINSRQ $1, dlen, B0
+
+ PXOR ACC0, B0
+
+ MOVOU (16*14)(pTbl), ACC0
+ MOVOU (16*15)(pTbl), ACCM
+ MOVOU ACC0, ACC1
+
+ PCLMULQDQ $0x00, B0, ACC0
+ PCLMULQDQ $0x11, B0, ACC1
+ PSHUFD $78, B0, T0
+ PXOR B0, T0
+ PCLMULQDQ $0x00, T0, ACCM
+
+ PXOR ACC0, ACCM
+ PXOR ACC1, ACCM
+ MOVOU ACCM, T0
+ PSRLDQ $8, ACCM
+ PSLLDQ $8, T0
+ PXOR ACCM, ACC1
+ PXOR T0, ACC0
+
+ MOVOU POLY, T0
+ PCLMULQDQ $0x01, ACC0, T0
+ PSHUFD $78, ACC0, ACC0
+ PXOR T0, ACC0
+
+ MOVOU POLY, T0
+ PCLMULQDQ $0x01, ACC0, T0
+ PSHUFD $78, ACC0, ACC0
+ PXOR T0, ACC0
+
+ PXOR ACC1, ACC0
+
+ PSHUFB BSWAP, ACC0
+ PXOR T2, ACC0
+ MOVOU ACC0, (tPtr)
+
+ RET
+#undef pTbl
+#undef tMsk
+#undef tPtr
+#undef plen
+#undef dlen
+
+// func gcmAesInit(productTable *[256]byte, ks []uint32)
+TEXT ·gcmAesInit(SB),NOSPLIT,$0
+#define dst DI
+#define KS SI
+#define NR DX
+
+ MOVQ productTable+0(FP), dst
+ MOVQ ks_base+8(FP), KS
+ MOVQ ks_len+16(FP), NR
+
+ SHRQ $2, NR
+ DECQ NR
+
+ MOVOU bswapMask<>(SB), BSWAP
+ MOVOU gcmPoly<>(SB), POLY
+
+ // Encrypt block 0, with the AES key to generate the hash key H
+ MOVOU (16*0)(KS), B0
+ MOVOU (16*1)(KS), T0
+ AESENC T0, B0
+ MOVOU (16*2)(KS), T0
+ AESENC T0, B0
+ MOVOU (16*3)(KS), T0
+ AESENC T0, B0
+ MOVOU (16*4)(KS), T0
+ AESENC T0, B0
+ MOVOU (16*5)(KS), T0
+ AESENC T0, B0
+ MOVOU (16*6)(KS), T0
+ AESENC T0, B0
+ MOVOU (16*7)(KS), T0
+ AESENC T0, B0
+ MOVOU (16*8)(KS), T0
+ AESENC T0, B0
+ MOVOU (16*9)(KS), T0
+ AESENC T0, B0
+ MOVOU (16*10)(KS), T0
+ CMPQ NR, $12
+ JB initEncLast
+ AESENC T0, B0
+ MOVOU (16*11)(KS), T0
+ AESENC T0, B0
+ MOVOU (16*12)(KS), T0
+ JE initEncLast
+ AESENC T0, B0
+ MOVOU (16*13)(KS), T0
+ AESENC T0, B0
+ MOVOU (16*14)(KS), T0
+initEncLast:
+ AESENCLAST T0, B0
+
+ PSHUFB BSWAP, B0
+ // H * 2
+ PSHUFD $0xff, B0, T0
+ MOVOU B0, T1
+ PSRAL $31, T0
+ PAND POLY, T0
+ PSRLL $31, T1
+ PSLLDQ $4, T1
+ PSLLL $1, B0
+ PXOR T0, B0
+ PXOR T1, B0
+ // Karatsuba pre-computations
+ MOVOU B0, (16*14)(dst)
+ PSHUFD $78, B0, B1
+ PXOR B0, B1
+ MOVOU B1, (16*15)(dst)
+
+ MOVOU B0, B2
+ MOVOU B1, B3
+ // Now prepare powers of H and pre-computations for them
+ MOVQ $7, AX
+
+initLoop:
+ MOVOU B2, T0
+ MOVOU B2, T1
+ MOVOU B3, T2
+ PCLMULQDQ $0x00, B0, T0
+ PCLMULQDQ $0x11, B0, T1
+ PCLMULQDQ $0x00, B1, T2
+
+ PXOR T0, T2
+ PXOR T1, T2
+ MOVOU T2, B4
+ PSLLDQ $8, B4
+ PSRLDQ $8, T2
+ PXOR B4, T0
+ PXOR T2, T1
+
+ MOVOU POLY, B2
+ PCLMULQDQ $0x01, T0, B2
+ PSHUFD $78, T0, T0
+ PXOR B2, T0
+ MOVOU POLY, B2
+ PCLMULQDQ $0x01, T0, B2
+ PSHUFD $78, T0, T0
+ PXOR T0, B2
+ PXOR T1, B2
+
+ MOVOU B2, (16*12)(dst)
+ PSHUFD $78, B2, B3
+ PXOR B2, B3
+ MOVOU B3, (16*13)(dst)
+
+ DECQ AX
+ LEAQ (-16*2)(dst), dst
+ JNE initLoop
+
+ RET
+#undef NR
+#undef KS
+#undef dst
+
+// func gcmAesData(productTable *[256]byte, data []byte, T *[16]byte)
+TEXT ·gcmAesData(SB),NOSPLIT,$0
+#define pTbl DI
+#define aut SI
+#define tPtr CX
+#define autLen DX
+
+#define reduceRound(a) MOVOU POLY, T0; PCLMULQDQ $0x01, a, T0; PSHUFD $78, a, a; PXOR T0, a
+#define mulRoundAAD(X ,i) \
+ MOVOU (16*(i*2))(pTbl), T1;\
+ MOVOU T1, T2;\
+ PCLMULQDQ $0x00, X, T1;\
+ PXOR T1, ACC0;\
+ PCLMULQDQ $0x11, X, T2;\
+ PXOR T2, ACC1;\
+ PSHUFD $78, X, T1;\
+ PXOR T1, X;\
+ MOVOU (16*(i*2+1))(pTbl), T1;\
+ PCLMULQDQ $0x00, X, T1;\
+ PXOR T1, ACCM
+
+ MOVQ productTable+0(FP), pTbl
+ MOVQ data_base+8(FP), aut
+ MOVQ data_len+16(FP), autLen
+ MOVQ T+32(FP), tPtr
+
+ PXOR ACC0, ACC0
+ MOVOU bswapMask<>(SB), BSWAP
+ MOVOU gcmPoly<>(SB), POLY
+
+ TESTQ autLen, autLen
+ JEQ dataBail
+
+ CMPQ autLen, $13 // optimize the TLS case
+ JE dataTLS
+ CMPQ autLen, $128
+ JB startSinglesLoop
+ JMP dataOctaLoop
+
+dataTLS:
+ MOVOU (16*14)(pTbl), T1
+ MOVOU (16*15)(pTbl), T2
+ PXOR B0, B0
+ MOVQ (aut), B0
+ PINSRD $2, 8(aut), B0
+ PINSRB $12, 12(aut), B0
+ XORQ autLen, autLen
+ JMP dataMul
+
+dataOctaLoop:
+ CMPQ autLen, $128
+ JB startSinglesLoop
+ SUBQ $128, autLen
+
+ MOVOU (16*0)(aut), X0
+ MOVOU (16*1)(aut), X1
+ MOVOU (16*2)(aut), X2
+ MOVOU (16*3)(aut), X3
+ MOVOU (16*4)(aut), X4
+ MOVOU (16*5)(aut), X5
+ MOVOU (16*6)(aut), X6
+ MOVOU (16*7)(aut), X7
+ LEAQ (16*8)(aut), aut
+ PSHUFB BSWAP, X0
+ PSHUFB BSWAP, X1
+ PSHUFB BSWAP, X2
+ PSHUFB BSWAP, X3
+ PSHUFB BSWAP, X4
+ PSHUFB BSWAP, X5
+ PSHUFB BSWAP, X6
+ PSHUFB BSWAP, X7
+ PXOR ACC0, X0
+
+ MOVOU (16*0)(pTbl), ACC0
+ MOVOU (16*1)(pTbl), ACCM
+ MOVOU ACC0, ACC1
+ PSHUFD $78, X0, T1
+ PXOR X0, T1
+ PCLMULQDQ $0x00, X0, ACC0
+ PCLMULQDQ $0x11, X0, ACC1
+ PCLMULQDQ $0x00, T1, ACCM
+
+ mulRoundAAD(X1, 1)
+ mulRoundAAD(X2, 2)
+ mulRoundAAD(X3, 3)
+ mulRoundAAD(X4, 4)
+ mulRoundAAD(X5, 5)
+ mulRoundAAD(X6, 6)
+ mulRoundAAD(X7, 7)
+
+ PXOR ACC0, ACCM
+ PXOR ACC1, ACCM
+ MOVOU ACCM, T0
+ PSRLDQ $8, ACCM
+ PSLLDQ $8, T0
+ PXOR ACCM, ACC1
+ PXOR T0, ACC0
+ reduceRound(ACC0)
+ reduceRound(ACC0)
+ PXOR ACC1, ACC0
+ JMP dataOctaLoop
+
+startSinglesLoop:
+ MOVOU (16*14)(pTbl), T1
+ MOVOU (16*15)(pTbl), T2
+
+dataSinglesLoop:
+
+ CMPQ autLen, $16
+ JB dataEnd
+ SUBQ $16, autLen
+
+ MOVOU (aut), B0
+dataMul:
+ PSHUFB BSWAP, B0
+ PXOR ACC0, B0
+
+ MOVOU T1, ACC0
+ MOVOU T2, ACCM
+ MOVOU T1, ACC1
+
+ PSHUFD $78, B0, T0
+ PXOR B0, T0
+ PCLMULQDQ $0x00, B0, ACC0
+ PCLMULQDQ $0x11, B0, ACC1
+ PCLMULQDQ $0x00, T0, ACCM
+
+ PXOR ACC0, ACCM
+ PXOR ACC1, ACCM
+ MOVOU ACCM, T0
+ PSRLDQ $8, ACCM
+ PSLLDQ $8, T0
+ PXOR ACCM, ACC1
+ PXOR T0, ACC0
+
+ MOVOU POLY, T0
+ PCLMULQDQ $0x01, ACC0, T0
+ PSHUFD $78, ACC0, ACC0
+ PXOR T0, ACC0
+
+ MOVOU POLY, T0
+ PCLMULQDQ $0x01, ACC0, T0
+ PSHUFD $78, ACC0, ACC0
+ PXOR T0, ACC0
+ PXOR ACC1, ACC0
+
+ LEAQ 16(aut), aut
+
+ JMP dataSinglesLoop
+
+dataEnd:
+
+ TESTQ autLen, autLen
+ JEQ dataBail
+
+ PXOR B0, B0
+ LEAQ -1(aut)(autLen*1), aut
+
+dataLoadLoop:
+
+ PSLLDQ $1, B0
+ PINSRB $0, (aut), B0
+
+ LEAQ -1(aut), aut
+ DECQ autLen
+ JNE dataLoadLoop
+
+ JMP dataMul
+
+dataBail:
+ MOVOU ACC0, (tPtr)
+ RET
+#undef pTbl
+#undef aut
+#undef tPtr
+#undef autLen
+
+// func gcmAesEnc(productTable *[256]byte, dst, src []byte, ctr, T *[16]byte, ks []uint32)
+TEXT ·gcmAesEnc(SB),0,$256-96
+#define pTbl DI
+#define ctx DX
+#define ctrPtr CX
+#define ptx SI
+#define ks AX
+#define tPtr R8
+#define ptxLen R9
+#define aluCTR R10
+#define aluTMP R11
+#define aluK R12
+#define NR R13
+
+#define increment(i) ADDL $1, aluCTR; MOVL aluCTR, aluTMP; XORL aluK, aluTMP; BSWAPL aluTMP; MOVL aluTMP, (3*4 + 8*16 + i*16)(SP)
+#define aesRnd(k) AESENC k, B0; AESENC k, B1; AESENC k, B2; AESENC k, B3; AESENC k, B4; AESENC k, B5; AESENC k, B6; AESENC k, B7
+#define aesRound(i) MOVOU (16*i)(ks), T0;AESENC T0, B0; AESENC T0, B1; AESENC T0, B2; AESENC T0, B3; AESENC T0, B4; AESENC T0, B5; AESENC T0, B6; AESENC T0, B7
+#define aesRndLast(k) AESENCLAST k, B0; AESENCLAST k, B1; AESENCLAST k, B2; AESENCLAST k, B3; AESENCLAST k, B4; AESENCLAST k, B5; AESENCLAST k, B6; AESENCLAST k, B7
+#define combinedRound(i) \
+ MOVOU (16*i)(ks), T0;\
+ AESENC T0, B0;\
+ AESENC T0, B1;\
+ AESENC T0, B2;\
+ AESENC T0, B3;\
+ MOVOU (16*(i*2))(pTbl), T1;\
+ MOVOU T1, T2;\
+ AESENC T0, B4;\
+ AESENC T0, B5;\
+ AESENC T0, B6;\
+ AESENC T0, B7;\
+ MOVOU (16*i)(SP), T0;\
+ PCLMULQDQ $0x00, T0, T1;\
+ PXOR T1, ACC0;\
+ PSHUFD $78, T0, T1;\
+ PCLMULQDQ $0x11, T0, T2;\
+ PXOR T1, T0;\
+ PXOR T2, ACC1;\
+ MOVOU (16*(i*2+1))(pTbl), T2;\
+ PCLMULQDQ $0x00, T2, T0;\
+ PXOR T0, ACCM
+#define mulRound(i) \
+ MOVOU (16*i)(SP), T0;\
+ MOVOU (16*(i*2))(pTbl), T1;\
+ MOVOU T1, T2;\
+ PCLMULQDQ $0x00, T0, T1;\
+ PXOR T1, ACC0;\
+ PCLMULQDQ $0x11, T0, T2;\
+ PXOR T2, ACC1;\
+ PSHUFD $78, T0, T1;\
+ PXOR T1, T0;\
+ MOVOU (16*(i*2+1))(pTbl), T1;\
+ PCLMULQDQ $0x00, T0, T1;\
+ PXOR T1, ACCM
+
+ MOVQ productTable+0(FP), pTbl
+ MOVQ dst+8(FP), ctx
+ MOVQ src_base+32(FP), ptx
+ MOVQ src_len+40(FP), ptxLen
+ MOVQ ctr+56(FP), ctrPtr
+ MOVQ T+64(FP), tPtr
+ MOVQ ks_base+72(FP), ks
+ MOVQ ks_len+80(FP), NR
+
+ SHRQ $2, NR
+ DECQ NR
+
+ MOVOU bswapMask<>(SB), BSWAP
+ MOVOU gcmPoly<>(SB), POLY
+
+ MOVOU (tPtr), ACC0
+ PXOR ACC1, ACC1
+ PXOR ACCM, ACCM
+ MOVOU (ctrPtr), B0
+ MOVL (3*4)(ctrPtr), aluCTR
+ MOVOU (ks), T0
+ MOVL (3*4)(ks), aluK
+ BSWAPL aluCTR
+ BSWAPL aluK
+
+ PXOR B0, T0
+ MOVOU T0, (8*16 + 0*16)(SP)
+ increment(0)
+
+ CMPQ ptxLen, $128
+ JB gcmAesEncSingles
+ SUBQ $128, ptxLen
+
+ // We have at least 8 blocks to encrypt, prepare the rest of the counters
+ MOVOU T0, (8*16 + 1*16)(SP)
+ increment(1)
+ MOVOU T0, (8*16 + 2*16)(SP)
+ increment(2)
+ MOVOU T0, (8*16 + 3*16)(SP)
+ increment(3)
+ MOVOU T0, (8*16 + 4*16)(SP)
+ increment(4)
+ MOVOU T0, (8*16 + 5*16)(SP)
+ increment(5)
+ MOVOU T0, (8*16 + 6*16)(SP)
+ increment(6)
+ MOVOU T0, (8*16 + 7*16)(SP)
+ increment(7)
+
+ MOVOU (8*16 + 0*16)(SP), B0
+ MOVOU (8*16 + 1*16)(SP), B1
+ MOVOU (8*16 + 2*16)(SP), B2
+ MOVOU (8*16 + 3*16)(SP), B3
+ MOVOU (8*16 + 4*16)(SP), B4
+ MOVOU (8*16 + 5*16)(SP), B5
+ MOVOU (8*16 + 6*16)(SP), B6
+ MOVOU (8*16 + 7*16)(SP), B7
+
+ aesRound(1)
+ increment(0)
+ aesRound(2)
+ increment(1)
+ aesRound(3)
+ increment(2)
+ aesRound(4)
+ increment(3)
+ aesRound(5)
+ increment(4)
+ aesRound(6)
+ increment(5)
+ aesRound(7)
+ increment(6)
+ aesRound(8)
+ increment(7)
+ aesRound(9)
+ MOVOU (16*10)(ks), T0
+ CMPQ NR, $12
+ JB encLast1
+ aesRnd(T0)
+ aesRound(11)
+ MOVOU (16*12)(ks), T0
+ JE encLast1
+ aesRnd(T0)
+ aesRound(13)
+ MOVOU (16*14)(ks), T0
+encLast1:
+ aesRndLast(T0)
+
+ MOVOU (16*0)(ptx), T0
+ PXOR T0, B0
+ MOVOU (16*1)(ptx), T0
+ PXOR T0, B1
+ MOVOU (16*2)(ptx), T0
+ PXOR T0, B2
+ MOVOU (16*3)(ptx), T0
+ PXOR T0, B3
+ MOVOU (16*4)(ptx), T0
+ PXOR T0, B4
+ MOVOU (16*5)(ptx), T0
+ PXOR T0, B5
+ MOVOU (16*6)(ptx), T0
+ PXOR T0, B6
+ MOVOU (16*7)(ptx), T0
+ PXOR T0, B7
+
+ MOVOU B0, (16*0)(ctx)
+ PSHUFB BSWAP, B0
+ PXOR ACC0, B0
+ MOVOU B1, (16*1)(ctx)
+ PSHUFB BSWAP, B1
+ MOVOU B2, (16*2)(ctx)
+ PSHUFB BSWAP, B2
+ MOVOU B3, (16*3)(ctx)
+ PSHUFB BSWAP, B3
+ MOVOU B4, (16*4)(ctx)
+ PSHUFB BSWAP, B4
+ MOVOU B5, (16*5)(ctx)
+ PSHUFB BSWAP, B5
+ MOVOU B6, (16*6)(ctx)
+ PSHUFB BSWAP, B6
+ MOVOU B7, (16*7)(ctx)
+ PSHUFB BSWAP, B7
+
+ MOVOU B0, (16*0)(SP)
+ MOVOU B1, (16*1)(SP)
+ MOVOU B2, (16*2)(SP)
+ MOVOU B3, (16*3)(SP)
+ MOVOU B4, (16*4)(SP)
+ MOVOU B5, (16*5)(SP)
+ MOVOU B6, (16*6)(SP)
+ MOVOU B7, (16*7)(SP)
+
+ LEAQ 128(ptx), ptx
+ LEAQ 128(ctx), ctx
+
+gcmAesEncOctetsLoop:
+
+ CMPQ ptxLen, $128
+ JB gcmAesEncOctetsEnd
+ SUBQ $128, ptxLen
+
+ MOVOU (8*16 + 0*16)(SP), B0
+ MOVOU (8*16 + 1*16)(SP), B1
+ MOVOU (8*16 + 2*16)(SP), B2
+ MOVOU (8*16 + 3*16)(SP), B3
+ MOVOU (8*16 + 4*16)(SP), B4
+ MOVOU (8*16 + 5*16)(SP), B5
+ MOVOU (8*16 + 6*16)(SP), B6
+ MOVOU (8*16 + 7*16)(SP), B7
+
+ MOVOU (16*0)(SP), T0
+ PSHUFD $78, T0, T1
+ PXOR T0, T1
+
+ MOVOU (16*0)(pTbl), ACC0
+ MOVOU (16*1)(pTbl), ACCM
+ MOVOU ACC0, ACC1
+
+ PCLMULQDQ $0x00, T1, ACCM
+ PCLMULQDQ $0x00, T0, ACC0
+ PCLMULQDQ $0x11, T0, ACC1
+
+ combinedRound(1)
+ increment(0)
+ combinedRound(2)
+ increment(1)
+ combinedRound(3)
+ increment(2)
+ combinedRound(4)
+ increment(3)
+ combinedRound(5)
+ increment(4)
+ combinedRound(6)
+ increment(5)
+ combinedRound(7)
+ increment(6)
+
+ aesRound(8)
+ increment(7)
+
+ PXOR ACC0, ACCM
+ PXOR ACC1, ACCM
+ MOVOU ACCM, T0
+ PSRLDQ $8, ACCM
+ PSLLDQ $8, T0
+ PXOR ACCM, ACC1
+ PXOR T0, ACC0
+
+ reduceRound(ACC0)
+ aesRound(9)
+
+ reduceRound(ACC0)
+ PXOR ACC1, ACC0
+
+ MOVOU (16*10)(ks), T0
+ CMPQ NR, $12
+ JB encLast2
+ aesRnd(T0)
+ aesRound(11)
+ MOVOU (16*12)(ks), T0
+ JE encLast2
+ aesRnd(T0)
+ aesRound(13)
+ MOVOU (16*14)(ks), T0
+encLast2:
+ aesRndLast(T0)
+
+ MOVOU (16*0)(ptx), T0
+ PXOR T0, B0
+ MOVOU (16*1)(ptx), T0
+ PXOR T0, B1
+ MOVOU (16*2)(ptx), T0
+ PXOR T0, B2
+ MOVOU (16*3)(ptx), T0
+ PXOR T0, B3
+ MOVOU (16*4)(ptx), T0
+ PXOR T0, B4
+ MOVOU (16*5)(ptx), T0
+ PXOR T0, B5
+ MOVOU (16*6)(ptx), T0
+ PXOR T0, B6
+ MOVOU (16*7)(ptx), T0
+ PXOR T0, B7
+
+ MOVOU B0, (16*0)(ctx)
+ PSHUFB BSWAP, B0
+ PXOR ACC0, B0
+ MOVOU B1, (16*1)(ctx)
+ PSHUFB BSWAP, B1
+ MOVOU B2, (16*2)(ctx)
+ PSHUFB BSWAP, B2
+ MOVOU B3, (16*3)(ctx)
+ PSHUFB BSWAP, B3
+ MOVOU B4, (16*4)(ctx)
+ PSHUFB BSWAP, B4
+ MOVOU B5, (16*5)(ctx)
+ PSHUFB BSWAP, B5
+ MOVOU B6, (16*6)(ctx)
+ PSHUFB BSWAP, B6
+ MOVOU B7, (16*7)(ctx)
+ PSHUFB BSWAP, B7
+
+ MOVOU B0, (16*0)(SP)
+ MOVOU B1, (16*1)(SP)
+ MOVOU B2, (16*2)(SP)
+ MOVOU B3, (16*3)(SP)
+ MOVOU B4, (16*4)(SP)
+ MOVOU B5, (16*5)(SP)
+ MOVOU B6, (16*6)(SP)
+ MOVOU B7, (16*7)(SP)
+
+ LEAQ 128(ptx), ptx
+ LEAQ 128(ctx), ctx
+
+ JMP gcmAesEncOctetsLoop
+
+gcmAesEncOctetsEnd:
+
+ MOVOU (16*0)(SP), T0
+ MOVOU (16*0)(pTbl), ACC0
+ MOVOU (16*1)(pTbl), ACCM
+ MOVOU ACC0, ACC1
+ PSHUFD $78, T0, T1
+ PXOR T0, T1
+ PCLMULQDQ $0x00, T0, ACC0
+ PCLMULQDQ $0x11, T0, ACC1
+ PCLMULQDQ $0x00, T1, ACCM
+
+ mulRound(1)
+ mulRound(2)
+ mulRound(3)
+ mulRound(4)
+ mulRound(5)
+ mulRound(6)
+ mulRound(7)
+
+ PXOR ACC0, ACCM
+ PXOR ACC1, ACCM
+ MOVOU ACCM, T0
+ PSRLDQ $8, ACCM
+ PSLLDQ $8, T0
+ PXOR ACCM, ACC1
+ PXOR T0, ACC0
+
+ reduceRound(ACC0)
+ reduceRound(ACC0)
+ PXOR ACC1, ACC0
+
+ TESTQ ptxLen, ptxLen
+ JE gcmAesEncDone
+
+ SUBQ $7, aluCTR
+
+gcmAesEncSingles:
+
+ MOVOU (16*1)(ks), B1
+ MOVOU (16*2)(ks), B2
+ MOVOU (16*3)(ks), B3
+ MOVOU (16*4)(ks), B4
+ MOVOU (16*5)(ks), B5
+ MOVOU (16*6)(ks), B6
+ MOVOU (16*7)(ks), B7
+
+ MOVOU (16*14)(pTbl), T2
+
+gcmAesEncSinglesLoop:
+
+ CMPQ ptxLen, $16
+ JB gcmAesEncTail
+ SUBQ $16, ptxLen
+
+ MOVOU (8*16 + 0*16)(SP), B0
+ increment(0)
+
+ AESENC B1, B0
+ AESENC B2, B0
+ AESENC B3, B0
+ AESENC B4, B0
+ AESENC B5, B0
+ AESENC B6, B0
+ AESENC B7, B0
+ MOVOU (16*8)(ks), T0
+ AESENC T0, B0
+ MOVOU (16*9)(ks), T0
+ AESENC T0, B0
+ MOVOU (16*10)(ks), T0
+ CMPQ NR, $12
+ JB encLast3
+ AESENC T0, B0
+ MOVOU (16*11)(ks), T0
+ AESENC T0, B0
+ MOVOU (16*12)(ks), T0
+ JE encLast3
+ AESENC T0, B0
+ MOVOU (16*13)(ks), T0
+ AESENC T0, B0
+ MOVOU (16*14)(ks), T0
+encLast3:
+ AESENCLAST T0, B0
+
+ MOVOU (ptx), T0
+ PXOR T0, B0
+ MOVOU B0, (ctx)
+
+ PSHUFB BSWAP, B0
+ PXOR ACC0, B0
+
+ MOVOU T2, ACC0
+ MOVOU T2, ACC1
+ MOVOU (16*15)(pTbl), ACCM
+
+ PSHUFD $78, B0, T0
+ PXOR B0, T0
+ PCLMULQDQ $0x00, B0, ACC0
+ PCLMULQDQ $0x11, B0, ACC1
+ PCLMULQDQ $0x00, T0, ACCM
+
+ PXOR ACC0, ACCM
+ PXOR ACC1, ACCM
+ MOVOU ACCM, T0
+ PSRLDQ $8, ACCM
+ PSLLDQ $8, T0
+ PXOR ACCM, ACC1
+ PXOR T0, ACC0
+
+ reduceRound(ACC0)
+ reduceRound(ACC0)
+ PXOR ACC1, ACC0
+
+ LEAQ (16*1)(ptx), ptx
+ LEAQ (16*1)(ctx), ctx
+
+ JMP gcmAesEncSinglesLoop
+
+gcmAesEncTail:
+ TESTQ ptxLen, ptxLen
+ JE gcmAesEncDone
+
+ MOVOU (8*16 + 0*16)(SP), B0
+ AESENC B1, B0
+ AESENC B2, B0
+ AESENC B3, B0
+ AESENC B4, B0
+ AESENC B5, B0
+ AESENC B6, B0
+ AESENC B7, B0
+ MOVOU (16*8)(ks), T0
+ AESENC T0, B0
+ MOVOU (16*9)(ks), T0
+ AESENC T0, B0
+ MOVOU (16*10)(ks), T0
+ CMPQ NR, $12
+ JB encLast4
+ AESENC T0, B0
+ MOVOU (16*11)(ks), T0
+ AESENC T0, B0
+ MOVOU (16*12)(ks), T0
+ JE encLast4
+ AESENC T0, B0
+ MOVOU (16*13)(ks), T0
+ AESENC T0, B0
+ MOVOU (16*14)(ks), T0
+encLast4:
+ AESENCLAST T0, B0
+ MOVOU B0, T0
+
+ LEAQ -1(ptx)(ptxLen*1), ptx
+
+ MOVQ ptxLen, aluTMP
+ SHLQ $4, aluTMP
+
+ LEAQ andMask<>(SB), aluCTR
+ MOVOU -16(aluCTR)(aluTMP*1), T1
+
+ PXOR B0, B0
+ptxLoadLoop:
+ PSLLDQ $1, B0
+ PINSRB $0, (ptx), B0
+ LEAQ -1(ptx), ptx
+ DECQ ptxLen
+ JNE ptxLoadLoop
+
+ PXOR T0, B0
+ PAND T1, B0
+ MOVOU B0, (ctx) // I assume there is always space, due to TAG in the end of the CT
+
+ PSHUFB BSWAP, B0
+ PXOR ACC0, B0
+
+ MOVOU T2, ACC0
+ MOVOU T2, ACC1
+ MOVOU (16*15)(pTbl), ACCM
+
+ PSHUFD $78, B0, T0
+ PXOR B0, T0
+ PCLMULQDQ $0x00, B0, ACC0
+ PCLMULQDQ $0x11, B0, ACC1
+ PCLMULQDQ $0x00, T0, ACCM
+
+ PXOR ACC0, ACCM
+ PXOR ACC1, ACCM
+ MOVOU ACCM, T0
+ PSRLDQ $8, ACCM
+ PSLLDQ $8, T0
+ PXOR ACCM, ACC1
+ PXOR T0, ACC0
+
+ reduceRound(ACC0)
+ reduceRound(ACC0)
+ PXOR ACC1, ACC0
+
+gcmAesEncDone:
+ MOVOU ACC0, (tPtr)
+ RET
+#undef increment
+
+// func gcmAesDec(productTable *[256]byte, dst, src []byte, ctr, T *[16]byte, ks []uint32)
+TEXT ·gcmAesDec(SB),0,$128-96
+#define increment(i) ADDL $1, aluCTR; MOVL aluCTR, aluTMP; XORL aluK, aluTMP; BSWAPL aluTMP; MOVL aluTMP, (3*4 + i*16)(SP)
+#define combinedDecRound(i) \
+ MOVOU (16*i)(ks), T0;\
+ AESENC T0, B0;\
+ AESENC T0, B1;\
+ AESENC T0, B2;\
+ AESENC T0, B3;\
+ MOVOU (16*(i*2))(pTbl), T1;\
+ MOVOU T1, T2;\
+ AESENC T0, B4;\
+ AESENC T0, B5;\
+ AESENC T0, B6;\
+ AESENC T0, B7;\
+ MOVOU (16*i)(ctx), T0;\
+ PSHUFB BSWAP, T0;\
+ PCLMULQDQ $0x00, T0, T1;\
+ PXOR T1, ACC0;\
+ PSHUFD $78, T0, T1;\
+ PCLMULQDQ $0x11, T0, T2;\
+ PXOR T1, T0;\
+ PXOR T2, ACC1;\
+ MOVOU (16*(i*2+1))(pTbl), T2;\
+ PCLMULQDQ $0x00, T2, T0;\
+ PXOR T0, ACCM
+
+ MOVQ productTable+0(FP), pTbl
+ MOVQ dst+8(FP), ptx
+ MOVQ src_base+32(FP), ctx
+ MOVQ src_len+40(FP), ptxLen
+ MOVQ ctr+56(FP), ctrPtr
+ MOVQ T+64(FP), tPtr
+ MOVQ ks_base+72(FP), ks
+ MOVQ ks_len+80(FP), NR
+
+ SHRQ $2, NR
+ DECQ NR
+
+ MOVOU bswapMask<>(SB), BSWAP
+ MOVOU gcmPoly<>(SB), POLY
+
+ MOVOU (tPtr), ACC0
+ PXOR ACC1, ACC1
+ PXOR ACCM, ACCM
+ MOVOU (ctrPtr), B0
+ MOVL (3*4)(ctrPtr), aluCTR
+ MOVOU (ks), T0
+ MOVL (3*4)(ks), aluK
+ BSWAPL aluCTR
+ BSWAPL aluK
+
+ PXOR B0, T0
+ MOVOU T0, (0*16)(SP)
+ increment(0)
+
+ CMPQ ptxLen, $128
+ JB gcmAesDecSingles
+
+ MOVOU T0, (1*16)(SP)
+ increment(1)
+ MOVOU T0, (2*16)(SP)
+ increment(2)
+ MOVOU T0, (3*16)(SP)
+ increment(3)
+ MOVOU T0, (4*16)(SP)
+ increment(4)
+ MOVOU T0, (5*16)(SP)
+ increment(5)
+ MOVOU T0, (6*16)(SP)
+ increment(6)
+ MOVOU T0, (7*16)(SP)
+ increment(7)
+
+gcmAesDecOctetsLoop:
+
+ CMPQ ptxLen, $128
+ JB gcmAesDecEndOctets
+ SUBQ $128, ptxLen
+
+ MOVOU (0*16)(SP), B0
+ MOVOU (1*16)(SP), B1
+ MOVOU (2*16)(SP), B2
+ MOVOU (3*16)(SP), B3
+ MOVOU (4*16)(SP), B4
+ MOVOU (5*16)(SP), B5
+ MOVOU (6*16)(SP), B6
+ MOVOU (7*16)(SP), B7
+
+ MOVOU (16*0)(ctx), T0
+ PSHUFB BSWAP, T0
+ PXOR ACC0, T0
+ PSHUFD $78, T0, T1
+ PXOR T0, T1
+
+ MOVOU (16*0)(pTbl), ACC0
+ MOVOU (16*1)(pTbl), ACCM
+ MOVOU ACC0, ACC1
+
+ PCLMULQDQ $0x00, T1, ACCM
+ PCLMULQDQ $0x00, T0, ACC0
+ PCLMULQDQ $0x11, T0, ACC1
+
+ combinedDecRound(1)
+ increment(0)
+ combinedDecRound(2)
+ increment(1)
+ combinedDecRound(3)
+ increment(2)
+ combinedDecRound(4)
+ increment(3)
+ combinedDecRound(5)
+ increment(4)
+ combinedDecRound(6)
+ increment(5)
+ combinedDecRound(7)
+ increment(6)
+
+ aesRound(8)
+ increment(7)
+
+ PXOR ACC0, ACCM
+ PXOR ACC1, ACCM
+ MOVOU ACCM, T0
+ PSRLDQ $8, ACCM
+ PSLLDQ $8, T0
+ PXOR ACCM, ACC1
+ PXOR T0, ACC0
+
+ reduceRound(ACC0)
+ aesRound(9)
+
+ reduceRound(ACC0)
+ PXOR ACC1, ACC0
+
+ MOVOU (16*10)(ks), T0
+ CMPQ NR, $12
+ JB decLast1
+ aesRnd(T0)
+ aesRound(11)
+ MOVOU (16*12)(ks), T0
+ JE decLast1
+ aesRnd(T0)
+ aesRound(13)
+ MOVOU (16*14)(ks), T0
+decLast1:
+ aesRndLast(T0)
+
+ MOVOU (16*0)(ctx), T0
+ PXOR T0, B0
+ MOVOU (16*1)(ctx), T0
+ PXOR T0, B1
+ MOVOU (16*2)(ctx), T0
+ PXOR T0, B2
+ MOVOU (16*3)(ctx), T0
+ PXOR T0, B3
+ MOVOU (16*4)(ctx), T0
+ PXOR T0, B4
+ MOVOU (16*5)(ctx), T0
+ PXOR T0, B5
+ MOVOU (16*6)(ctx), T0
+ PXOR T0, B6
+ MOVOU (16*7)(ctx), T0
+ PXOR T0, B7
+
+ MOVOU B0, (16*0)(ptx)
+ MOVOU B1, (16*1)(ptx)
+ MOVOU B2, (16*2)(ptx)
+ MOVOU B3, (16*3)(ptx)
+ MOVOU B4, (16*4)(ptx)
+ MOVOU B5, (16*5)(ptx)
+ MOVOU B6, (16*6)(ptx)
+ MOVOU B7, (16*7)(ptx)
+
+ LEAQ 128(ptx), ptx
+ LEAQ 128(ctx), ctx
+
+ JMP gcmAesDecOctetsLoop
+
+gcmAesDecEndOctets:
+
+ SUBQ $7, aluCTR
+
+gcmAesDecSingles:
+
+ MOVOU (16*1)(ks), B1
+ MOVOU (16*2)(ks), B2
+ MOVOU (16*3)(ks), B3
+ MOVOU (16*4)(ks), B4
+ MOVOU (16*5)(ks), B5
+ MOVOU (16*6)(ks), B6
+ MOVOU (16*7)(ks), B7
+
+ MOVOU (16*14)(pTbl), T2
+
+gcmAesDecSinglesLoop:
+
+ CMPQ ptxLen, $16
+ JB gcmAesDecTail
+ SUBQ $16, ptxLen
+
+ MOVOU (ctx), B0
+ MOVOU B0, T1
+ PSHUFB BSWAP, B0
+ PXOR ACC0, B0
+
+ MOVOU T2, ACC0
+ MOVOU T2, ACC1
+ MOVOU (16*15)(pTbl), ACCM
+
+ PCLMULQDQ $0x00, B0, ACC0
+ PCLMULQDQ $0x11, B0, ACC1
+ PSHUFD $78, B0, T0
+ PXOR B0, T0
+ PCLMULQDQ $0x00, T0, ACCM
+
+ PXOR ACC0, ACCM
+ PXOR ACC1, ACCM
+ MOVOU ACCM, T0
+ PSRLDQ $8, ACCM
+ PSLLDQ $8, T0
+ PXOR ACCM, ACC1
+ PXOR T0, ACC0
+
+ reduceRound(ACC0)
+ reduceRound(ACC0)
+ PXOR ACC1, ACC0
+
+ MOVOU (0*16)(SP), B0
+ increment(0)
+ AESENC B1, B0
+ AESENC B2, B0
+ AESENC B3, B0
+ AESENC B4, B0
+ AESENC B5, B0
+ AESENC B6, B0
+ AESENC B7, B0
+ MOVOU (16*8)(ks), T0
+ AESENC T0, B0
+ MOVOU (16*9)(ks), T0
+ AESENC T0, B0
+ MOVOU (16*10)(ks), T0
+ CMPQ NR, $12
+ JB decLast2
+ AESENC T0, B0
+ MOVOU (16*11)(ks), T0
+ AESENC T0, B0
+ MOVOU (16*12)(ks), T0
+ JE decLast2
+ AESENC T0, B0
+ MOVOU (16*13)(ks), T0
+ AESENC T0, B0
+ MOVOU (16*14)(ks), T0
+decLast2:
+ AESENCLAST T0, B0
+
+ PXOR T1, B0
+ MOVOU B0, (ptx)
+
+ LEAQ (16*1)(ptx), ptx
+ LEAQ (16*1)(ctx), ctx
+
+ JMP gcmAesDecSinglesLoop
+
+gcmAesDecTail:
+
+ TESTQ ptxLen, ptxLen
+ JE gcmAesDecDone
+
+ MOVQ ptxLen, aluTMP
+ SHLQ $4, aluTMP
+ LEAQ andMask<>(SB), aluCTR
+ MOVOU -16(aluCTR)(aluTMP*1), T1
+
+ MOVOU (ctx), B0 // I assume there is TAG attached to the ctx, and there is no read overflow
+ PAND T1, B0
+
+ MOVOU B0, T1
+ PSHUFB BSWAP, B0
+ PXOR ACC0, B0
+
+ MOVOU (16*14)(pTbl), ACC0
+ MOVOU (16*15)(pTbl), ACCM
+ MOVOU ACC0, ACC1
+
+ PCLMULQDQ $0x00, B0, ACC0
+ PCLMULQDQ $0x11, B0, ACC1
+ PSHUFD $78, B0, T0
+ PXOR B0, T0
+ PCLMULQDQ $0x00, T0, ACCM
+
+ PXOR ACC0, ACCM
+ PXOR ACC1, ACCM
+ MOVOU ACCM, T0
+ PSRLDQ $8, ACCM
+ PSLLDQ $8, T0
+ PXOR ACCM, ACC1
+ PXOR T0, ACC0
+
+ reduceRound(ACC0)
+ reduceRound(ACC0)
+ PXOR ACC1, ACC0
+
+ MOVOU (0*16)(SP), B0
+ increment(0)
+ AESENC B1, B0
+ AESENC B2, B0
+ AESENC B3, B0
+ AESENC B4, B0
+ AESENC B5, B0
+ AESENC B6, B0
+ AESENC B7, B0
+ MOVOU (16*8)(ks), T0
+ AESENC T0, B0
+ MOVOU (16*9)(ks), T0
+ AESENC T0, B0
+ MOVOU (16*10)(ks), T0
+ CMPQ NR, $12
+ JB decLast3
+ AESENC T0, B0
+ MOVOU (16*11)(ks), T0
+ AESENC T0, B0
+ MOVOU (16*12)(ks), T0
+ JE decLast3
+ AESENC T0, B0
+ MOVOU (16*13)(ks), T0
+ AESENC T0, B0
+ MOVOU (16*14)(ks), T0
+decLast3:
+ AESENCLAST T0, B0
+ PXOR T1, B0
+
+ptxStoreLoop:
+ PEXTRB $0, B0, (ptx)
+ PSRLDQ $1, B0
+ LEAQ 1(ptx), ptx
+ DECQ ptxLen
+
+ JNE ptxStoreLoop
+
+gcmAesDecDone:
+
+ MOVOU ACC0, (tPtr)
+ RET
diff --git a/src/crypto/aes/gcm_arm64.s b/src/crypto/aes/gcm_arm64.s
new file mode 100644
index 0000000..c350102
--- /dev/null
+++ b/src/crypto/aes/gcm_arm64.s
@@ -0,0 +1,1021 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#include "textflag.h"
+
+#define B0 V0
+#define B1 V1
+#define B2 V2
+#define B3 V3
+#define B4 V4
+#define B5 V5
+#define B6 V6
+#define B7 V7
+
+#define ACC0 V8
+#define ACC1 V9
+#define ACCM V10
+
+#define T0 V11
+#define T1 V12
+#define T2 V13
+#define T3 V14
+
+#define POLY V15
+#define ZERO V16
+#define INC V17
+#define CTR V18
+
+#define K0 V19
+#define K1 V20
+#define K2 V21
+#define K3 V22
+#define K4 V23
+#define K5 V24
+#define K6 V25
+#define K7 V26
+#define K8 V27
+#define K9 V28
+#define K10 V29
+#define K11 V30
+#define KLAST V31
+
+#define reduce() \
+ VEOR ACC0.B16, ACCM.B16, ACCM.B16 \
+ VEOR ACC1.B16, ACCM.B16, ACCM.B16 \
+ VEXT $8, ZERO.B16, ACCM.B16, T0.B16 \
+ VEXT $8, ACCM.B16, ZERO.B16, ACCM.B16 \
+ VEOR ACCM.B16, ACC0.B16, ACC0.B16 \
+ VEOR T0.B16, ACC1.B16, ACC1.B16 \
+ VPMULL POLY.D1, ACC0.D1, T0.Q1 \
+ VEXT $8, ACC0.B16, ACC0.B16, ACC0.B16 \
+ VEOR T0.B16, ACC0.B16, ACC0.B16 \
+ VPMULL POLY.D1, ACC0.D1, T0.Q1 \
+ VEOR T0.B16, ACC1.B16, ACC1.B16 \
+ VEXT $8, ACC1.B16, ACC1.B16, ACC1.B16 \
+ VEOR ACC1.B16, ACC0.B16, ACC0.B16 \
+
+// func gcmAesFinish(productTable *[256]byte, tagMask, T *[16]byte, pLen, dLen uint64)
+TEXT ·gcmAesFinish(SB),NOSPLIT,$0
+#define pTbl R0
+#define tMsk R1
+#define tPtr R2
+#define plen R3
+#define dlen R4
+
+ MOVD $0xC2, R1
+ LSL $56, R1
+ MOVD $1, R0
+ VMOV R1, POLY.D[0]
+ VMOV R0, POLY.D[1]
+ VEOR ZERO.B16, ZERO.B16, ZERO.B16
+
+ MOVD productTable+0(FP), pTbl
+ MOVD tagMask+8(FP), tMsk
+ MOVD T+16(FP), tPtr
+ MOVD pLen+24(FP), plen
+ MOVD dLen+32(FP), dlen
+
+ VLD1 (tPtr), [ACC0.B16]
+ VLD1 (tMsk), [B1.B16]
+
+ LSL $3, plen
+ LSL $3, dlen
+
+ VMOV dlen, B0.D[0]
+ VMOV plen, B0.D[1]
+
+ ADD $14*16, pTbl
+ VLD1.P (pTbl), [T1.B16, T2.B16]
+
+ VEOR ACC0.B16, B0.B16, B0.B16
+
+ VEXT $8, B0.B16, B0.B16, T0.B16
+ VEOR B0.B16, T0.B16, T0.B16
+ VPMULL B0.D1, T1.D1, ACC1.Q1
+ VPMULL2 B0.D2, T1.D2, ACC0.Q1
+ VPMULL T0.D1, T2.D1, ACCM.Q1
+
+ reduce()
+
+ VREV64 ACC0.B16, ACC0.B16
+ VEOR B1.B16, ACC0.B16, ACC0.B16
+
+ VST1 [ACC0.B16], (tPtr)
+ RET
+#undef pTbl
+#undef tMsk
+#undef tPtr
+#undef plen
+#undef dlen
+
+// func gcmAesInit(productTable *[256]byte, ks []uint32)
+TEXT ·gcmAesInit(SB),NOSPLIT,$0
+#define pTbl R0
+#define KS R1
+#define NR R2
+#define I R3
+ MOVD productTable+0(FP), pTbl
+ MOVD ks_base+8(FP), KS
+ MOVD ks_len+16(FP), NR
+
+ MOVD $0xC2, I
+ LSL $56, I
+ VMOV I, POLY.D[0]
+ MOVD $1, I
+ VMOV I, POLY.D[1]
+ VEOR ZERO.B16, ZERO.B16, ZERO.B16
+
+ // Encrypt block 0 with the AES key to generate the hash key H
+ VLD1.P 64(KS), [T0.B16, T1.B16, T2.B16, T3.B16]
+ VEOR B0.B16, B0.B16, B0.B16
+ AESE T0.B16, B0.B16
+ AESMC B0.B16, B0.B16
+ AESE T1.B16, B0.B16
+ AESMC B0.B16, B0.B16
+ AESE T2.B16, B0.B16
+ AESMC B0.B16, B0.B16
+ AESE T3.B16, B0.B16
+ AESMC B0.B16, B0.B16
+ VLD1.P 64(KS), [T0.B16, T1.B16, T2.B16, T3.B16]
+ AESE T0.B16, B0.B16
+ AESMC B0.B16, B0.B16
+ AESE T1.B16, B0.B16
+ AESMC B0.B16, B0.B16
+ AESE T2.B16, B0.B16
+ AESMC B0.B16, B0.B16
+ AESE T3.B16, B0.B16
+ AESMC B0.B16, B0.B16
+ TBZ $4, NR, initEncFinish
+ VLD1.P 32(KS), [T0.B16, T1.B16]
+ AESE T0.B16, B0.B16
+ AESMC B0.B16, B0.B16
+ AESE T1.B16, B0.B16
+ AESMC B0.B16, B0.B16
+ TBZ $3, NR, initEncFinish
+ VLD1.P 32(KS), [T0.B16, T1.B16]
+ AESE T0.B16, B0.B16
+ AESMC B0.B16, B0.B16
+ AESE T1.B16, B0.B16
+ AESMC B0.B16, B0.B16
+initEncFinish:
+ VLD1 (KS), [T0.B16, T1.B16, T2.B16]
+ AESE T0.B16, B0.B16
+ AESMC B0.B16, B0.B16
+ AESE T1.B16, B0.B16
+ VEOR T2.B16, B0.B16, B0.B16
+
+ VREV64 B0.B16, B0.B16
+
+ // Multiply by 2 modulo P
+ VMOV B0.D[0], I
+ ASR $63, I
+ VMOV I, T1.D[0]
+ VMOV I, T1.D[1]
+ VAND POLY.B16, T1.B16, T1.B16
+ VUSHR $63, B0.D2, T2.D2
+ VEXT $8, ZERO.B16, T2.B16, T2.B16
+ VSHL $1, B0.D2, B0.D2
+ VEOR T1.B16, B0.B16, B0.B16
+ VEOR T2.B16, B0.B16, B0.B16 // Can avoid this when VSLI is available
+
+ // Karatsuba pre-computation
+ VEXT $8, B0.B16, B0.B16, B1.B16
+ VEOR B0.B16, B1.B16, B1.B16
+
+ ADD $14*16, pTbl
+ VST1 [B0.B16, B1.B16], (pTbl)
+ SUB $2*16, pTbl
+
+ VMOV B0.B16, B2.B16
+ VMOV B1.B16, B3.B16
+
+ MOVD $7, I
+
+initLoop:
+ // Compute powers of H
+ SUBS $1, I
+
+ VPMULL B0.D1, B2.D1, T1.Q1
+ VPMULL2 B0.D2, B2.D2, T0.Q1
+ VPMULL B1.D1, B3.D1, T2.Q1
+ VEOR T0.B16, T2.B16, T2.B16
+ VEOR T1.B16, T2.B16, T2.B16
+ VEXT $8, ZERO.B16, T2.B16, T3.B16
+ VEXT $8, T2.B16, ZERO.B16, T2.B16
+ VEOR T2.B16, T0.B16, T0.B16
+ VEOR T3.B16, T1.B16, T1.B16
+ VPMULL POLY.D1, T0.D1, T2.Q1
+ VEXT $8, T0.B16, T0.B16, T0.B16
+ VEOR T2.B16, T0.B16, T0.B16
+ VPMULL POLY.D1, T0.D1, T2.Q1
+ VEXT $8, T0.B16, T0.B16, T0.B16
+ VEOR T2.B16, T0.B16, T0.B16
+ VEOR T1.B16, T0.B16, B2.B16
+ VMOV B2.B16, B3.B16
+ VEXT $8, B2.B16, B2.B16, B2.B16
+ VEOR B2.B16, B3.B16, B3.B16
+
+ VST1 [B2.B16, B3.B16], (pTbl)
+ SUB $2*16, pTbl
+
+ BNE initLoop
+ RET
+#undef I
+#undef NR
+#undef KS
+#undef pTbl
+
+// func gcmAesData(productTable *[256]byte, data []byte, T *[16]byte)
+TEXT ·gcmAesData(SB),NOSPLIT,$0
+#define pTbl R0
+#define aut R1
+#define tPtr R2
+#define autLen R3
+#define H0 R4
+#define pTblSave R5
+
+#define mulRound(X) \
+ VLD1.P 32(pTbl), [T1.B16, T2.B16] \
+ VREV64 X.B16, X.B16 \
+ VEXT $8, X.B16, X.B16, T0.B16 \
+ VEOR X.B16, T0.B16, T0.B16 \
+ VPMULL X.D1, T1.D1, T3.Q1 \
+ VEOR T3.B16, ACC1.B16, ACC1.B16 \
+ VPMULL2 X.D2, T1.D2, T3.Q1 \
+ VEOR T3.B16, ACC0.B16, ACC0.B16 \
+ VPMULL T0.D1, T2.D1, T3.Q1 \
+ VEOR T3.B16, ACCM.B16, ACCM.B16
+
+ MOVD productTable+0(FP), pTbl
+ MOVD data_base+8(FP), aut
+ MOVD data_len+16(FP), autLen
+ MOVD T+32(FP), tPtr
+
+ VEOR ACC0.B16, ACC0.B16, ACC0.B16
+ CBZ autLen, dataBail
+
+ MOVD $0xC2, H0
+ LSL $56, H0
+ VMOV H0, POLY.D[0]
+ MOVD $1, H0
+ VMOV H0, POLY.D[1]
+ VEOR ZERO.B16, ZERO.B16, ZERO.B16
+ MOVD pTbl, pTblSave
+
+ CMP $13, autLen
+ BEQ dataTLS
+ CMP $128, autLen
+ BLT startSinglesLoop
+ B octetsLoop
+
+dataTLS:
+ ADD $14*16, pTbl
+ VLD1.P (pTbl), [T1.B16, T2.B16]
+ VEOR B0.B16, B0.B16, B0.B16
+
+ MOVD (aut), H0
+ VMOV H0, B0.D[0]
+ MOVW 8(aut), H0
+ VMOV H0, B0.S[2]
+ MOVB 12(aut), H0
+ VMOV H0, B0.B[12]
+
+ MOVD $0, autLen
+ B dataMul
+
+octetsLoop:
+ CMP $128, autLen
+ BLT startSinglesLoop
+ SUB $128, autLen
+
+ VLD1.P 32(aut), [B0.B16, B1.B16]
+
+ VLD1.P 32(pTbl), [T1.B16, T2.B16]
+ VREV64 B0.B16, B0.B16
+ VEOR ACC0.B16, B0.B16, B0.B16
+ VEXT $8, B0.B16, B0.B16, T0.B16
+ VEOR B0.B16, T0.B16, T0.B16
+ VPMULL B0.D1, T1.D1, ACC1.Q1
+ VPMULL2 B0.D2, T1.D2, ACC0.Q1
+ VPMULL T0.D1, T2.D1, ACCM.Q1
+
+ mulRound(B1)
+ VLD1.P 32(aut), [B2.B16, B3.B16]
+ mulRound(B2)
+ mulRound(B3)
+ VLD1.P 32(aut), [B4.B16, B5.B16]
+ mulRound(B4)
+ mulRound(B5)
+ VLD1.P 32(aut), [B6.B16, B7.B16]
+ mulRound(B6)
+ mulRound(B7)
+
+ MOVD pTblSave, pTbl
+ reduce()
+ B octetsLoop
+
+startSinglesLoop:
+
+ ADD $14*16, pTbl
+ VLD1.P (pTbl), [T1.B16, T2.B16]
+
+singlesLoop:
+
+ CMP $16, autLen
+ BLT dataEnd
+ SUB $16, autLen
+
+ VLD1.P 16(aut), [B0.B16]
+dataMul:
+ VREV64 B0.B16, B0.B16
+ VEOR ACC0.B16, B0.B16, B0.B16
+
+ VEXT $8, B0.B16, B0.B16, T0.B16
+ VEOR B0.B16, T0.B16, T0.B16
+ VPMULL B0.D1, T1.D1, ACC1.Q1
+ VPMULL2 B0.D2, T1.D2, ACC0.Q1
+ VPMULL T0.D1, T2.D1, ACCM.Q1
+
+ reduce()
+
+ B singlesLoop
+
+dataEnd:
+
+ CBZ autLen, dataBail
+ VEOR B0.B16, B0.B16, B0.B16
+ ADD autLen, aut
+
+dataLoadLoop:
+ MOVB.W -1(aut), H0
+ VEXT $15, B0.B16, ZERO.B16, B0.B16
+ VMOV H0, B0.B[0]
+ SUBS $1, autLen
+ BNE dataLoadLoop
+ B dataMul
+
+dataBail:
+ VST1 [ACC0.B16], (tPtr)
+ RET
+
+#undef pTbl
+#undef aut
+#undef tPtr
+#undef autLen
+#undef H0
+#undef pTblSave
+
+// func gcmAesEnc(productTable *[256]byte, dst, src []byte, ctr, T *[16]byte, ks []uint32)
+TEXT ·gcmAesEnc(SB),NOSPLIT,$0
+#define pTbl R0
+#define dstPtr R1
+#define ctrPtr R2
+#define srcPtr R3
+#define ks R4
+#define tPtr R5
+#define srcPtrLen R6
+#define aluCTR R7
+#define aluTMP R8
+#define aluK R9
+#define NR R10
+#define H0 R11
+#define H1 R12
+#define curK R13
+#define pTblSave R14
+
+#define aesrndx8(K) \
+ AESE K.B16, B0.B16 \
+ AESMC B0.B16, B0.B16 \
+ AESE K.B16, B1.B16 \
+ AESMC B1.B16, B1.B16 \
+ AESE K.B16, B2.B16 \
+ AESMC B2.B16, B2.B16 \
+ AESE K.B16, B3.B16 \
+ AESMC B3.B16, B3.B16 \
+ AESE K.B16, B4.B16 \
+ AESMC B4.B16, B4.B16 \
+ AESE K.B16, B5.B16 \
+ AESMC B5.B16, B5.B16 \
+ AESE K.B16, B6.B16 \
+ AESMC B6.B16, B6.B16 \
+ AESE K.B16, B7.B16 \
+ AESMC B7.B16, B7.B16
+
+#define aesrndlastx8(K) \
+ AESE K.B16, B0.B16 \
+ AESE K.B16, B1.B16 \
+ AESE K.B16, B2.B16 \
+ AESE K.B16, B3.B16 \
+ AESE K.B16, B4.B16 \
+ AESE K.B16, B5.B16 \
+ AESE K.B16, B6.B16 \
+ AESE K.B16, B7.B16
+
+ MOVD productTable+0(FP), pTbl
+ MOVD dst+8(FP), dstPtr
+ MOVD src_base+32(FP), srcPtr
+ MOVD src_len+40(FP), srcPtrLen
+ MOVD ctr+56(FP), ctrPtr
+ MOVD T+64(FP), tPtr
+ MOVD ks_base+72(FP), ks
+ MOVD ks_len+80(FP), NR
+
+ MOVD $0xC2, H1
+ LSL $56, H1
+ MOVD $1, H0
+ VMOV H1, POLY.D[0]
+ VMOV H0, POLY.D[1]
+ VEOR ZERO.B16, ZERO.B16, ZERO.B16
+ // Compute NR from len(ks)
+ MOVD pTbl, pTblSave
+ // Current tag, after AAD
+ VLD1 (tPtr), [ACC0.B16]
+ VEOR ACC1.B16, ACC1.B16, ACC1.B16
+ VEOR ACCM.B16, ACCM.B16, ACCM.B16
+ // Prepare initial counter, and the increment vector
+ VLD1 (ctrPtr), [CTR.B16]
+ VEOR INC.B16, INC.B16, INC.B16
+ MOVD $1, H0
+ VMOV H0, INC.S[3]
+ VREV32 CTR.B16, CTR.B16
+ VADD CTR.S4, INC.S4, CTR.S4
+ // Skip to <8 blocks loop
+ CMP $128, srcPtrLen
+
+ MOVD ks, H0
+ // For AES-128 round keys are stored in: K0 .. K10, KLAST
+ VLD1.P 64(H0), [K0.B16, K1.B16, K2.B16, K3.B16]
+ VLD1.P 64(H0), [K4.B16, K5.B16, K6.B16, K7.B16]
+ VLD1.P 48(H0), [K8.B16, K9.B16, K10.B16]
+ VMOV K10.B16, KLAST.B16
+
+ BLT startSingles
+ // There are at least 8 blocks to encrypt
+ TBZ $4, NR, octetsLoop
+
+ // For AES-192 round keys occupy: K0 .. K7, K10, K11, K8, K9, KLAST
+ VMOV K8.B16, K10.B16
+ VMOV K9.B16, K11.B16
+ VMOV KLAST.B16, K8.B16
+ VLD1.P 16(H0), [K9.B16]
+ VLD1.P 16(H0), [KLAST.B16]
+ TBZ $3, NR, octetsLoop
+ // For AES-256 round keys occupy: K0 .. K7, K10, K11, mem, mem, K8, K9, KLAST
+ VMOV KLAST.B16, K8.B16
+ VLD1.P 16(H0), [K9.B16]
+ VLD1.P 16(H0), [KLAST.B16]
+ ADD $10*16, ks, H0
+ MOVD H0, curK
+
+octetsLoop:
+ SUB $128, srcPtrLen
+
+ VMOV CTR.B16, B0.B16
+ VADD B0.S4, INC.S4, B1.S4
+ VREV32 B0.B16, B0.B16
+ VADD B1.S4, INC.S4, B2.S4
+ VREV32 B1.B16, B1.B16
+ VADD B2.S4, INC.S4, B3.S4
+ VREV32 B2.B16, B2.B16
+ VADD B3.S4, INC.S4, B4.S4
+ VREV32 B3.B16, B3.B16
+ VADD B4.S4, INC.S4, B5.S4
+ VREV32 B4.B16, B4.B16
+ VADD B5.S4, INC.S4, B6.S4
+ VREV32 B5.B16, B5.B16
+ VADD B6.S4, INC.S4, B7.S4
+ VREV32 B6.B16, B6.B16
+ VADD B7.S4, INC.S4, CTR.S4
+ VREV32 B7.B16, B7.B16
+
+ aesrndx8(K0)
+ aesrndx8(K1)
+ aesrndx8(K2)
+ aesrndx8(K3)
+ aesrndx8(K4)
+ aesrndx8(K5)
+ aesrndx8(K6)
+ aesrndx8(K7)
+ TBZ $4, NR, octetsFinish
+ aesrndx8(K10)
+ aesrndx8(K11)
+ TBZ $3, NR, octetsFinish
+ VLD1.P 32(curK), [T1.B16, T2.B16]
+ aesrndx8(T1)
+ aesrndx8(T2)
+ MOVD H0, curK
+octetsFinish:
+ aesrndx8(K8)
+ aesrndlastx8(K9)
+
+ VEOR KLAST.B16, B0.B16, B0.B16
+ VEOR KLAST.B16, B1.B16, B1.B16
+ VEOR KLAST.B16, B2.B16, B2.B16
+ VEOR KLAST.B16, B3.B16, B3.B16
+ VEOR KLAST.B16, B4.B16, B4.B16
+ VEOR KLAST.B16, B5.B16, B5.B16
+ VEOR KLAST.B16, B6.B16, B6.B16
+ VEOR KLAST.B16, B7.B16, B7.B16
+
+ VLD1.P 32(srcPtr), [T1.B16, T2.B16]
+ VEOR B0.B16, T1.B16, B0.B16
+ VEOR B1.B16, T2.B16, B1.B16
+ VST1.P [B0.B16, B1.B16], 32(dstPtr)
+ VLD1.P 32(srcPtr), [T1.B16, T2.B16]
+ VEOR B2.B16, T1.B16, B2.B16
+ VEOR B3.B16, T2.B16, B3.B16
+ VST1.P [B2.B16, B3.B16], 32(dstPtr)
+ VLD1.P 32(srcPtr), [T1.B16, T2.B16]
+ VEOR B4.B16, T1.B16, B4.B16
+ VEOR B5.B16, T2.B16, B5.B16
+ VST1.P [B4.B16, B5.B16], 32(dstPtr)
+ VLD1.P 32(srcPtr), [T1.B16, T2.B16]
+ VEOR B6.B16, T1.B16, B6.B16
+ VEOR B7.B16, T2.B16, B7.B16
+ VST1.P [B6.B16, B7.B16], 32(dstPtr)
+
+ VLD1.P 32(pTbl), [T1.B16, T2.B16]
+ VREV64 B0.B16, B0.B16
+ VEOR ACC0.B16, B0.B16, B0.B16
+ VEXT $8, B0.B16, B0.B16, T0.B16
+ VEOR B0.B16, T0.B16, T0.B16
+ VPMULL B0.D1, T1.D1, ACC1.Q1
+ VPMULL2 B0.D2, T1.D2, ACC0.Q1
+ VPMULL T0.D1, T2.D1, ACCM.Q1
+
+ mulRound(B1)
+ mulRound(B2)
+ mulRound(B3)
+ mulRound(B4)
+ mulRound(B5)
+ mulRound(B6)
+ mulRound(B7)
+ MOVD pTblSave, pTbl
+ reduce()
+
+ CMP $128, srcPtrLen
+ BGE octetsLoop
+
+startSingles:
+ CBZ srcPtrLen, done
+ ADD $14*16, pTbl
+ // Preload H and its Karatsuba precomp
+ VLD1.P (pTbl), [T1.B16, T2.B16]
+ // Preload AES round keys
+ ADD $128, ks
+ VLD1.P 48(ks), [K8.B16, K9.B16, K10.B16]
+ VMOV K10.B16, KLAST.B16
+ TBZ $4, NR, singlesLoop
+ VLD1.P 32(ks), [B1.B16, B2.B16]
+ VMOV B2.B16, KLAST.B16
+ TBZ $3, NR, singlesLoop
+ VLD1.P 32(ks), [B3.B16, B4.B16]
+ VMOV B4.B16, KLAST.B16
+
+singlesLoop:
+ CMP $16, srcPtrLen
+ BLT tail
+ SUB $16, srcPtrLen
+
+ VLD1.P 16(srcPtr), [T0.B16]
+ VEOR KLAST.B16, T0.B16, T0.B16
+
+ VREV32 CTR.B16, B0.B16
+ VADD CTR.S4, INC.S4, CTR.S4
+
+ AESE K0.B16, B0.B16
+ AESMC B0.B16, B0.B16
+ AESE K1.B16, B0.B16
+ AESMC B0.B16, B0.B16
+ AESE K2.B16, B0.B16
+ AESMC B0.B16, B0.B16
+ AESE K3.B16, B0.B16
+ AESMC B0.B16, B0.B16
+ AESE K4.B16, B0.B16
+ AESMC B0.B16, B0.B16
+ AESE K5.B16, B0.B16
+ AESMC B0.B16, B0.B16
+ AESE K6.B16, B0.B16
+ AESMC B0.B16, B0.B16
+ AESE K7.B16, B0.B16
+ AESMC B0.B16, B0.B16
+ AESE K8.B16, B0.B16
+ AESMC B0.B16, B0.B16
+ AESE K9.B16, B0.B16
+ TBZ $4, NR, singlesLast
+ AESMC B0.B16, B0.B16
+ AESE K10.B16, B0.B16
+ AESMC B0.B16, B0.B16
+ AESE B1.B16, B0.B16
+ TBZ $3, NR, singlesLast
+ AESMC B0.B16, B0.B16
+ AESE B2.B16, B0.B16
+ AESMC B0.B16, B0.B16
+ AESE B3.B16, B0.B16
+singlesLast:
+ VEOR T0.B16, B0.B16, B0.B16
+encReduce:
+ VST1.P [B0.B16], 16(dstPtr)
+
+ VREV64 B0.B16, B0.B16
+ VEOR ACC0.B16, B0.B16, B0.B16
+
+ VEXT $8, B0.B16, B0.B16, T0.B16
+ VEOR B0.B16, T0.B16, T0.B16
+ VPMULL B0.D1, T1.D1, ACC1.Q1
+ VPMULL2 B0.D2, T1.D2, ACC0.Q1
+ VPMULL T0.D1, T2.D1, ACCM.Q1
+
+ reduce()
+
+ B singlesLoop
+tail:
+ CBZ srcPtrLen, done
+
+ VEOR T0.B16, T0.B16, T0.B16
+ VEOR T3.B16, T3.B16, T3.B16
+ MOVD $0, H1
+ SUB $1, H1
+ ADD srcPtrLen, srcPtr
+
+ TBZ $3, srcPtrLen, ld4
+ MOVD.W -8(srcPtr), H0
+ VMOV H0, T0.D[0]
+ VMOV H1, T3.D[0]
+ld4:
+ TBZ $2, srcPtrLen, ld2
+ MOVW.W -4(srcPtr), H0
+ VEXT $12, T0.B16, ZERO.B16, T0.B16
+ VEXT $12, T3.B16, ZERO.B16, T3.B16
+ VMOV H0, T0.S[0]
+ VMOV H1, T3.S[0]
+ld2:
+ TBZ $1, srcPtrLen, ld1
+ MOVH.W -2(srcPtr), H0
+ VEXT $14, T0.B16, ZERO.B16, T0.B16
+ VEXT $14, T3.B16, ZERO.B16, T3.B16
+ VMOV H0, T0.H[0]
+ VMOV H1, T3.H[0]
+ld1:
+ TBZ $0, srcPtrLen, ld0
+ MOVB.W -1(srcPtr), H0
+ VEXT $15, T0.B16, ZERO.B16, T0.B16
+ VEXT $15, T3.B16, ZERO.B16, T3.B16
+ VMOV H0, T0.B[0]
+ VMOV H1, T3.B[0]
+ld0:
+
+ MOVD ZR, srcPtrLen
+ VEOR KLAST.B16, T0.B16, T0.B16
+ VREV32 CTR.B16, B0.B16
+
+ AESE K0.B16, B0.B16
+ AESMC B0.B16, B0.B16
+ AESE K1.B16, B0.B16
+ AESMC B0.B16, B0.B16
+ AESE K2.B16, B0.B16
+ AESMC B0.B16, B0.B16
+ AESE K3.B16, B0.B16
+ AESMC B0.B16, B0.B16
+ AESE K4.B16, B0.B16
+ AESMC B0.B16, B0.B16
+ AESE K5.B16, B0.B16
+ AESMC B0.B16, B0.B16
+ AESE K6.B16, B0.B16
+ AESMC B0.B16, B0.B16
+ AESE K7.B16, B0.B16
+ AESMC B0.B16, B0.B16
+ AESE K8.B16, B0.B16
+ AESMC B0.B16, B0.B16
+ AESE K9.B16, B0.B16
+ TBZ $4, NR, tailLast
+ AESMC B0.B16, B0.B16
+ AESE K10.B16, B0.B16
+ AESMC B0.B16, B0.B16
+ AESE B1.B16, B0.B16
+ TBZ $3, NR, tailLast
+ AESMC B0.B16, B0.B16
+ AESE B2.B16, B0.B16
+ AESMC B0.B16, B0.B16
+ AESE B3.B16, B0.B16
+
+tailLast:
+ VEOR T0.B16, B0.B16, B0.B16
+ VAND T3.B16, B0.B16, B0.B16
+ B encReduce
+
+done:
+ VST1 [ACC0.B16], (tPtr)
+ RET
+
+// func gcmAesDec(productTable *[256]byte, dst, src []byte, ctr, T *[16]byte, ks []uint32)
+TEXT ·gcmAesDec(SB),NOSPLIT,$0
+ MOVD productTable+0(FP), pTbl
+ MOVD dst+8(FP), dstPtr
+ MOVD src_base+32(FP), srcPtr
+ MOVD src_len+40(FP), srcPtrLen
+ MOVD ctr+56(FP), ctrPtr
+ MOVD T+64(FP), tPtr
+ MOVD ks_base+72(FP), ks
+ MOVD ks_len+80(FP), NR
+
+ MOVD $0xC2, H1
+ LSL $56, H1
+ MOVD $1, H0
+ VMOV H1, POLY.D[0]
+ VMOV H0, POLY.D[1]
+ VEOR ZERO.B16, ZERO.B16, ZERO.B16
+ // Compute NR from len(ks)
+ MOVD pTbl, pTblSave
+ // Current tag, after AAD
+ VLD1 (tPtr), [ACC0.B16]
+ VEOR ACC1.B16, ACC1.B16, ACC1.B16
+ VEOR ACCM.B16, ACCM.B16, ACCM.B16
+ // Prepare initial counter, and the increment vector
+ VLD1 (ctrPtr), [CTR.B16]
+ VEOR INC.B16, INC.B16, INC.B16
+ MOVD $1, H0
+ VMOV H0, INC.S[3]
+ VREV32 CTR.B16, CTR.B16
+ VADD CTR.S4, INC.S4, CTR.S4
+
+ MOVD ks, H0
+ // For AES-128 round keys are stored in: K0 .. K10, KLAST
+ VLD1.P 64(H0), [K0.B16, K1.B16, K2.B16, K3.B16]
+ VLD1.P 64(H0), [K4.B16, K5.B16, K6.B16, K7.B16]
+ VLD1.P 48(H0), [K8.B16, K9.B16, K10.B16]
+ VMOV K10.B16, KLAST.B16
+
+ // Skip to <8 blocks loop
+ CMP $128, srcPtrLen
+ BLT startSingles
+ // There are at least 8 blocks to encrypt
+ TBZ $4, NR, octetsLoop
+
+ // For AES-192 round keys occupy: K0 .. K7, K10, K11, K8, K9, KLAST
+ VMOV K8.B16, K10.B16
+ VMOV K9.B16, K11.B16
+ VMOV KLAST.B16, K8.B16
+ VLD1.P 16(H0), [K9.B16]
+ VLD1.P 16(H0), [KLAST.B16]
+ TBZ $3, NR, octetsLoop
+ // For AES-256 round keys occupy: K0 .. K7, K10, K11, mem, mem, K8, K9, KLAST
+ VMOV KLAST.B16, K8.B16
+ VLD1.P 16(H0), [K9.B16]
+ VLD1.P 16(H0), [KLAST.B16]
+ ADD $10*16, ks, H0
+ MOVD H0, curK
+
+octetsLoop:
+ SUB $128, srcPtrLen
+
+ VMOV CTR.B16, B0.B16
+ VADD B0.S4, INC.S4, B1.S4
+ VREV32 B0.B16, B0.B16
+ VADD B1.S4, INC.S4, B2.S4
+ VREV32 B1.B16, B1.B16
+ VADD B2.S4, INC.S4, B3.S4
+ VREV32 B2.B16, B2.B16
+ VADD B3.S4, INC.S4, B4.S4
+ VREV32 B3.B16, B3.B16
+ VADD B4.S4, INC.S4, B5.S4
+ VREV32 B4.B16, B4.B16
+ VADD B5.S4, INC.S4, B6.S4
+ VREV32 B5.B16, B5.B16
+ VADD B6.S4, INC.S4, B7.S4
+ VREV32 B6.B16, B6.B16
+ VADD B7.S4, INC.S4, CTR.S4
+ VREV32 B7.B16, B7.B16
+
+ aesrndx8(K0)
+ aesrndx8(K1)
+ aesrndx8(K2)
+ aesrndx8(K3)
+ aesrndx8(K4)
+ aesrndx8(K5)
+ aesrndx8(K6)
+ aesrndx8(K7)
+ TBZ $4, NR, octetsFinish
+ aesrndx8(K10)
+ aesrndx8(K11)
+ TBZ $3, NR, octetsFinish
+ VLD1.P 32(curK), [T1.B16, T2.B16]
+ aesrndx8(T1)
+ aesrndx8(T2)
+ MOVD H0, curK
+octetsFinish:
+ aesrndx8(K8)
+ aesrndlastx8(K9)
+
+ VEOR KLAST.B16, B0.B16, T1.B16
+ VEOR KLAST.B16, B1.B16, T2.B16
+ VEOR KLAST.B16, B2.B16, B2.B16
+ VEOR KLAST.B16, B3.B16, B3.B16
+ VEOR KLAST.B16, B4.B16, B4.B16
+ VEOR KLAST.B16, B5.B16, B5.B16
+ VEOR KLAST.B16, B6.B16, B6.B16
+ VEOR KLAST.B16, B7.B16, B7.B16
+
+ VLD1.P 32(srcPtr), [B0.B16, B1.B16]
+ VEOR B0.B16, T1.B16, T1.B16
+ VEOR B1.B16, T2.B16, T2.B16
+ VST1.P [T1.B16, T2.B16], 32(dstPtr)
+
+ VLD1.P 32(pTbl), [T1.B16, T2.B16]
+ VREV64 B0.B16, B0.B16
+ VEOR ACC0.B16, B0.B16, B0.B16
+ VEXT $8, B0.B16, B0.B16, T0.B16
+ VEOR B0.B16, T0.B16, T0.B16
+ VPMULL B0.D1, T1.D1, ACC1.Q1
+ VPMULL2 B0.D2, T1.D2, ACC0.Q1
+ VPMULL T0.D1, T2.D1, ACCM.Q1
+ mulRound(B1)
+
+ VLD1.P 32(srcPtr), [B0.B16, B1.B16]
+ VEOR B2.B16, B0.B16, T1.B16
+ VEOR B3.B16, B1.B16, T2.B16
+ VST1.P [T1.B16, T2.B16], 32(dstPtr)
+ mulRound(B0)
+ mulRound(B1)
+
+ VLD1.P 32(srcPtr), [B0.B16, B1.B16]
+ VEOR B4.B16, B0.B16, T1.B16
+ VEOR B5.B16, B1.B16, T2.B16
+ VST1.P [T1.B16, T2.B16], 32(dstPtr)
+ mulRound(B0)
+ mulRound(B1)
+
+ VLD1.P 32(srcPtr), [B0.B16, B1.B16]
+ VEOR B6.B16, B0.B16, T1.B16
+ VEOR B7.B16, B1.B16, T2.B16
+ VST1.P [T1.B16, T2.B16], 32(dstPtr)
+ mulRound(B0)
+ mulRound(B1)
+
+ MOVD pTblSave, pTbl
+ reduce()
+
+ CMP $128, srcPtrLen
+ BGE octetsLoop
+
+startSingles:
+ CBZ srcPtrLen, done
+ ADD $14*16, pTbl
+ // Preload H and its Karatsuba precomp
+ VLD1.P (pTbl), [T1.B16, T2.B16]
+ // Preload AES round keys
+ ADD $128, ks
+ VLD1.P 48(ks), [K8.B16, K9.B16, K10.B16]
+ VMOV K10.B16, KLAST.B16
+ TBZ $4, NR, singlesLoop
+ VLD1.P 32(ks), [B1.B16, B2.B16]
+ VMOV B2.B16, KLAST.B16
+ TBZ $3, NR, singlesLoop
+ VLD1.P 32(ks), [B3.B16, B4.B16]
+ VMOV B4.B16, KLAST.B16
+
+singlesLoop:
+ CMP $16, srcPtrLen
+ BLT tail
+ SUB $16, srcPtrLen
+
+ VLD1.P 16(srcPtr), [T0.B16]
+ VREV64 T0.B16, B5.B16
+ VEOR KLAST.B16, T0.B16, T0.B16
+
+ VREV32 CTR.B16, B0.B16
+ VADD CTR.S4, INC.S4, CTR.S4
+
+ AESE K0.B16, B0.B16
+ AESMC B0.B16, B0.B16
+ AESE K1.B16, B0.B16
+ AESMC B0.B16, B0.B16
+ AESE K2.B16, B0.B16
+ AESMC B0.B16, B0.B16
+ AESE K3.B16, B0.B16
+ AESMC B0.B16, B0.B16
+ AESE K4.B16, B0.B16
+ AESMC B0.B16, B0.B16
+ AESE K5.B16, B0.B16
+ AESMC B0.B16, B0.B16
+ AESE K6.B16, B0.B16
+ AESMC B0.B16, B0.B16
+ AESE K7.B16, B0.B16
+ AESMC B0.B16, B0.B16
+ AESE K8.B16, B0.B16
+ AESMC B0.B16, B0.B16
+ AESE K9.B16, B0.B16
+ TBZ $4, NR, singlesLast
+ AESMC B0.B16, B0.B16
+ AESE K10.B16, B0.B16
+ AESMC B0.B16, B0.B16
+ AESE B1.B16, B0.B16
+ TBZ $3, NR, singlesLast
+ AESMC B0.B16, B0.B16
+ AESE B2.B16, B0.B16
+ AESMC B0.B16, B0.B16
+ AESE B3.B16, B0.B16
+singlesLast:
+ VEOR T0.B16, B0.B16, B0.B16
+
+ VST1.P [B0.B16], 16(dstPtr)
+
+ VEOR ACC0.B16, B5.B16, B5.B16
+ VEXT $8, B5.B16, B5.B16, T0.B16
+ VEOR B5.B16, T0.B16, T0.B16
+ VPMULL B5.D1, T1.D1, ACC1.Q1
+ VPMULL2 B5.D2, T1.D2, ACC0.Q1
+ VPMULL T0.D1, T2.D1, ACCM.Q1
+ reduce()
+
+ B singlesLoop
+tail:
+ CBZ srcPtrLen, done
+
+ VREV32 CTR.B16, B0.B16
+ VADD CTR.S4, INC.S4, CTR.S4
+
+ AESE K0.B16, B0.B16
+ AESMC B0.B16, B0.B16
+ AESE K1.B16, B0.B16
+ AESMC B0.B16, B0.B16
+ AESE K2.B16, B0.B16
+ AESMC B0.B16, B0.B16
+ AESE K3.B16, B0.B16
+ AESMC B0.B16, B0.B16
+ AESE K4.B16, B0.B16
+ AESMC B0.B16, B0.B16
+ AESE K5.B16, B0.B16
+ AESMC B0.B16, B0.B16
+ AESE K6.B16, B0.B16
+ AESMC B0.B16, B0.B16
+ AESE K7.B16, B0.B16
+ AESMC B0.B16, B0.B16
+ AESE K8.B16, B0.B16
+ AESMC B0.B16, B0.B16
+ AESE K9.B16, B0.B16
+ TBZ $4, NR, tailLast
+ AESMC B0.B16, B0.B16
+ AESE K10.B16, B0.B16
+ AESMC B0.B16, B0.B16
+ AESE B1.B16, B0.B16
+ TBZ $3, NR, tailLast
+ AESMC B0.B16, B0.B16
+ AESE B2.B16, B0.B16
+ AESMC B0.B16, B0.B16
+ AESE B3.B16, B0.B16
+tailLast:
+ VEOR KLAST.B16, B0.B16, B0.B16
+
+ // Assuming it is safe to load past dstPtr due to the presence of the tag
+ VLD1 (srcPtr), [B5.B16]
+
+ VEOR B5.B16, B0.B16, B0.B16
+
+ VEOR T3.B16, T3.B16, T3.B16
+ MOVD $0, H1
+ SUB $1, H1
+
+ TBZ $3, srcPtrLen, ld4
+ VMOV B0.D[0], H0
+ MOVD.P H0, 8(dstPtr)
+ VMOV H1, T3.D[0]
+ VEXT $8, ZERO.B16, B0.B16, B0.B16
+ld4:
+ TBZ $2, srcPtrLen, ld2
+ VMOV B0.S[0], H0
+ MOVW.P H0, 4(dstPtr)
+ VEXT $12, T3.B16, ZERO.B16, T3.B16
+ VMOV H1, T3.S[0]
+ VEXT $4, ZERO.B16, B0.B16, B0.B16
+ld2:
+ TBZ $1, srcPtrLen, ld1
+ VMOV B0.H[0], H0
+ MOVH.P H0, 2(dstPtr)
+ VEXT $14, T3.B16, ZERO.B16, T3.B16
+ VMOV H1, T3.H[0]
+ VEXT $2, ZERO.B16, B0.B16, B0.B16
+ld1:
+ TBZ $0, srcPtrLen, ld0
+ VMOV B0.B[0], H0
+ MOVB.P H0, 1(dstPtr)
+ VEXT $15, T3.B16, ZERO.B16, T3.B16
+ VMOV H1, T3.B[0]
+ld0:
+
+ VAND T3.B16, B5.B16, B5.B16
+ VREV64 B5.B16, B5.B16
+
+ VEOR ACC0.B16, B5.B16, B5.B16
+ VEXT $8, B5.B16, B5.B16, T0.B16
+ VEOR B5.B16, T0.B16, T0.B16
+ VPMULL B5.D1, T1.D1, ACC1.Q1
+ VPMULL2 B5.D2, T1.D2, ACC0.Q1
+ VPMULL T0.D1, T2.D1, ACCM.Q1
+ reduce()
+done:
+ VST1 [ACC0.B16], (tPtr)
+
+ RET
diff --git a/src/crypto/aes/gcm_ppc64x.go b/src/crypto/aes/gcm_ppc64x.go
new file mode 100644
index 0000000..44b2705
--- /dev/null
+++ b/src/crypto/aes/gcm_ppc64x.go
@@ -0,0 +1,265 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build ppc64le || ppc64
+
+package aes
+
+import (
+ "crypto/cipher"
+ "crypto/subtle"
+ "encoding/binary"
+ "errors"
+ "runtime"
+)
+
+// This file implements GCM using an optimized GHASH function.
+
+//go:noescape
+func gcmInit(productTable *[256]byte, h []byte)
+
+//go:noescape
+func gcmHash(output []byte, productTable *[256]byte, inp []byte, len int)
+
+//go:noescape
+func gcmMul(output []byte, productTable *[256]byte)
+
+const (
+ gcmCounterSize = 16
+ gcmBlockSize = 16
+ gcmTagSize = 16
+ gcmStandardNonceSize = 12
+)
+
+var errOpen = errors.New("cipher: message authentication failed")
+
+// Assert that aesCipherGCM implements the gcmAble interface.
+var _ gcmAble = (*aesCipherAsm)(nil)
+
+type gcmAsm struct {
+ cipher *aesCipherAsm
+ // ks is the key schedule, the length of which depends on the size of
+ // the AES key.
+ ks []uint32
+ // productTable contains pre-computed multiples of the binary-field
+ // element used in GHASH.
+ productTable [256]byte
+ // nonceSize contains the expected size of the nonce, in bytes.
+ nonceSize int
+ // tagSize contains the size of the tag, in bytes.
+ tagSize int
+}
+
+// NewGCM returns the AES cipher wrapped in Galois Counter Mode. This is only
+// called by crypto/cipher.NewGCM via the gcmAble interface.
+func (c *aesCipherAsm) NewGCM(nonceSize, tagSize int) (cipher.AEAD, error) {
+ var h1, h2 uint64
+ g := &gcmAsm{cipher: c, ks: c.enc, nonceSize: nonceSize, tagSize: tagSize}
+
+ hle := make([]byte, gcmBlockSize)
+
+ c.Encrypt(hle, hle)
+
+ // Reverse the bytes in each 8 byte chunk
+ // Load little endian, store big endian
+ if runtime.GOARCH == "ppc64le" {
+ h1 = binary.LittleEndian.Uint64(hle[:8])
+ h2 = binary.LittleEndian.Uint64(hle[8:])
+ } else {
+ h1 = binary.BigEndian.Uint64(hle[:8])
+ h2 = binary.BigEndian.Uint64(hle[8:])
+ }
+ binary.BigEndian.PutUint64(hle[:8], h1)
+ binary.BigEndian.PutUint64(hle[8:], h2)
+ gcmInit(&g.productTable, hle)
+
+ return g, nil
+}
+
+func (g *gcmAsm) NonceSize() int {
+ return g.nonceSize
+}
+
+func (g *gcmAsm) Overhead() int {
+ return g.tagSize
+}
+
+func sliceForAppend(in []byte, n int) (head, tail []byte) {
+ if total := len(in) + n; cap(in) >= total {
+ head = in[:total]
+ } else {
+ head = make([]byte, total)
+ copy(head, in)
+ }
+ tail = head[len(in):]
+ return
+}
+
+// deriveCounter computes the initial GCM counter state from the given nonce.
+func (g *gcmAsm) deriveCounter(counter *[gcmBlockSize]byte, nonce []byte) {
+ if len(nonce) == gcmStandardNonceSize {
+ copy(counter[:], nonce)
+ counter[gcmBlockSize-1] = 1
+ } else {
+ var hash [16]byte
+ g.paddedGHASH(&hash, nonce)
+ lens := gcmLengths(0, uint64(len(nonce))*8)
+ g.paddedGHASH(&hash, lens[:])
+ copy(counter[:], hash[:])
+ }
+}
+
+// counterCrypt encrypts in using AES in counter mode and places the result
+// into out. counter is the initial count value and will be updated with the next
+// count value. The length of out must be greater than or equal to the length
+// of in.
+func (g *gcmAsm) counterCrypt(out, in []byte, counter *[gcmBlockSize]byte) {
+ var mask [gcmBlockSize]byte
+
+ for len(in) >= gcmBlockSize {
+ // Hint to avoid bounds check
+ _, _ = in[15], out[15]
+ g.cipher.Encrypt(mask[:], counter[:])
+ gcmInc32(counter)
+
+ // XOR 16 bytes each loop iteration in 8 byte chunks
+ in0 := binary.LittleEndian.Uint64(in[0:])
+ in1 := binary.LittleEndian.Uint64(in[8:])
+ m0 := binary.LittleEndian.Uint64(mask[:8])
+ m1 := binary.LittleEndian.Uint64(mask[8:])
+ binary.LittleEndian.PutUint64(out[:8], in0^m0)
+ binary.LittleEndian.PutUint64(out[8:], in1^m1)
+ out = out[16:]
+ in = in[16:]
+ }
+
+ if len(in) > 0 {
+ g.cipher.Encrypt(mask[:], counter[:])
+ gcmInc32(counter)
+ // XOR leftover bytes
+ for i, inb := range in {
+ out[i] = inb ^ mask[i]
+ }
+ }
+}
+
+// increments the rightmost 32-bits of the count value by 1.
+func gcmInc32(counterBlock *[16]byte) {
+ c := counterBlock[len(counterBlock)-4:]
+ x := binary.BigEndian.Uint32(c) + 1
+ binary.BigEndian.PutUint32(c, x)
+}
+
+// paddedGHASH pads data with zeroes until its length is a multiple of
+// 16-bytes. It then calculates a new value for hash using the ghash
+// algorithm.
+func (g *gcmAsm) paddedGHASH(hash *[16]byte, data []byte) {
+ if siz := len(data) - (len(data) % gcmBlockSize); siz > 0 {
+ gcmHash(hash[:], &g.productTable, data[:], siz)
+ data = data[siz:]
+ }
+ if len(data) > 0 {
+ var s [16]byte
+ copy(s[:], data)
+ gcmHash(hash[:], &g.productTable, s[:], len(s))
+ }
+}
+
+// auth calculates GHASH(ciphertext, additionalData), masks the result with
+// tagMask and writes the result to out.
+func (g *gcmAsm) auth(out, ciphertext, aad []byte, tagMask *[gcmTagSize]byte) {
+ var hash [16]byte
+ g.paddedGHASH(&hash, aad)
+ g.paddedGHASH(&hash, ciphertext)
+ lens := gcmLengths(uint64(len(aad))*8, uint64(len(ciphertext))*8)
+ g.paddedGHASH(&hash, lens[:])
+
+ copy(out, hash[:])
+ for i := range out {
+ out[i] ^= tagMask[i]
+ }
+}
+
+// Seal encrypts and authenticates plaintext. See the cipher.AEAD interface for
+// details.
+func (g *gcmAsm) Seal(dst, nonce, plaintext, data []byte) []byte {
+ if len(nonce) != g.nonceSize {
+ panic("cipher: incorrect nonce length given to GCM")
+ }
+ if uint64(len(plaintext)) > ((1<<32)-2)*BlockSize {
+ panic("cipher: message too large for GCM")
+ }
+
+ ret, out := sliceForAppend(dst, len(plaintext)+g.tagSize)
+
+ var counter, tagMask [gcmBlockSize]byte
+ g.deriveCounter(&counter, nonce)
+
+ g.cipher.Encrypt(tagMask[:], counter[:])
+ gcmInc32(&counter)
+
+ g.counterCrypt(out, plaintext, &counter)
+ g.auth(out[len(plaintext):], out[:len(plaintext)], data, &tagMask)
+
+ return ret
+}
+
+// Open authenticates and decrypts ciphertext. See the cipher.AEAD interface
+// for details.
+func (g *gcmAsm) Open(dst, nonce, ciphertext, data []byte) ([]byte, error) {
+ if len(nonce) != g.nonceSize {
+ panic("cipher: incorrect nonce length given to GCM")
+ }
+ if len(ciphertext) < g.tagSize {
+ return nil, errOpen
+ }
+ if uint64(len(ciphertext)) > ((1<<32)-2)*uint64(BlockSize)+uint64(g.tagSize) {
+ return nil, errOpen
+ }
+
+ tag := ciphertext[len(ciphertext)-g.tagSize:]
+ ciphertext = ciphertext[:len(ciphertext)-g.tagSize]
+
+ var counter, tagMask [gcmBlockSize]byte
+ g.deriveCounter(&counter, nonce)
+
+ g.cipher.Encrypt(tagMask[:], counter[:])
+ gcmInc32(&counter)
+
+ var expectedTag [gcmTagSize]byte
+ g.auth(expectedTag[:], ciphertext, data, &tagMask)
+
+ ret, out := sliceForAppend(dst, len(ciphertext))
+
+ if subtle.ConstantTimeCompare(expectedTag[:g.tagSize], tag) != 1 {
+ for i := range out {
+ out[i] = 0
+ }
+ return nil, errOpen
+ }
+
+ g.counterCrypt(out, ciphertext, &counter)
+ return ret, nil
+}
+
+func gcmLengths(len0, len1 uint64) [16]byte {
+ return [16]byte{
+ byte(len0 >> 56),
+ byte(len0 >> 48),
+ byte(len0 >> 40),
+ byte(len0 >> 32),
+ byte(len0 >> 24),
+ byte(len0 >> 16),
+ byte(len0 >> 8),
+ byte(len0),
+ byte(len1 >> 56),
+ byte(len1 >> 48),
+ byte(len1 >> 40),
+ byte(len1 >> 32),
+ byte(len1 >> 24),
+ byte(len1 >> 16),
+ byte(len1 >> 8),
+ byte(len1),
+ }
+}
diff --git a/src/crypto/aes/gcm_ppc64x.s b/src/crypto/aes/gcm_ppc64x.s
new file mode 100644
index 0000000..72f0b8e
--- /dev/null
+++ b/src/crypto/aes/gcm_ppc64x.s
@@ -0,0 +1,590 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build ppc64 || ppc64le
+
+// Based on CRYPTOGAMS code with the following comment:
+// # ====================================================================
+// # Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
+// # project. The module is, however, dual licensed under OpenSSL and
+// # CRYPTOGAMS licenses depending on where you obtain it. For further
+// # details see http://www.openssl.org/~appro/cryptogams/.
+// # ====================================================================
+
+// This implementation is based on the ppc64 asm generated by the
+// script https://github.com/dot-asm/cryptogams/blob/master/ppc/ghashp8-ppc.pl
+// from commit d47afb3c.
+
+// Changes were made due to differences in the ABI and some register usage.
+// Some arguments were changed due to the way the Go code passes them.
+
+#include "textflag.h"
+
+#define XIP R3
+#define HTBL R4
+#define INP R5
+#define LEN R6
+
+#define XL V0
+#define XM V1
+#define XH V2
+#define IN V3
+#define ZERO V4
+#define T0 V5
+#define T1 V6
+#define T2 V7
+#define XC2 V8
+#define H V9
+#define HH V10
+#define HL V11
+#define LEMASK V12
+#define XL1 V13
+#define XM1 V14
+#define XH1 V15
+#define IN1 V16
+#define H2 V17
+#define H2H V18
+#define H2L V19
+#define XL3 V20
+#define XM2 V21
+#define IN2 V22
+#define H3L V23
+#define H3 V24
+#define H3H V25
+#define XH3 V26
+#define XM3 V27
+#define IN3 V28
+#define H4L V29
+#define H4 V30
+#define H4H V31
+
+#define IN0 IN
+#define H21L HL
+#define H21H HH
+#define LOPERM H2L
+#define HIPERM H2H
+
+#define VXL VS32
+#define VIN VS35
+#define VXC2 VS40
+#define VH VS41
+#define VHH VS42
+#define VHL VS43
+#define VIN1 VS48
+#define VH2 VS49
+#define VH2H VS50
+#define VH2L VS51
+
+#define VIN2 VS54
+#define VH3L VS55
+#define VH3 VS56
+#define VH3H VS57
+#define VIN3 VS60
+#define VH4L VS61
+#define VH4 VS62
+#define VH4H VS63
+
+#define VIN0 VIN
+
+// func gcmInit(productTable *[256]byte, h []byte)
+TEXT ·gcmInit(SB), NOSPLIT, $0-32
+ MOVD productTable+0(FP), XIP
+ MOVD h+8(FP), HTBL
+
+ MOVD $0x10, R8
+ MOVD $0x20, R9
+ MOVD $0x30, R10
+ LXVD2X (HTBL)(R0), VH // Load H
+
+ VSPLTISB $-16, XC2 // 0xf0
+ VSPLTISB $1, T0 // one
+ VADDUBM XC2, XC2, XC2 // 0xe0
+ VXOR ZERO, ZERO, ZERO
+ VOR XC2, T0, XC2 // 0xe1
+ VSLDOI $15, XC2, ZERO, XC2 // 0xe1...
+ VSLDOI $1, ZERO, T0, T1 // ...1
+ VADDUBM XC2, XC2, XC2 // 0xc2...
+ VSPLTISB $7, T2
+ VOR XC2, T1, XC2 // 0xc2....01
+ VSPLTB $0, H, T1 // most significant byte
+ VSL H, T0, H // H<<=1
+ VSRAB T1, T2, T1 // broadcast carry bit
+ VAND T1, XC2, T1
+ VXOR H, T1, IN // twisted H
+
+ VSLDOI $8, IN, IN, H // twist even more ...
+ VSLDOI $8, ZERO, XC2, XC2 // 0xc2.0
+ VSLDOI $8, ZERO, H, HL // ... and split
+ VSLDOI $8, H, ZERO, HH
+
+ STXVD2X VXC2, (XIP+R0) // save pre-computed table
+ STXVD2X VHL, (XIP+R8)
+ MOVD $0x40, R8
+ STXVD2X VH, (XIP+R9)
+ MOVD $0x50, R9
+ STXVD2X VHH, (XIP+R10)
+ MOVD $0x60, R10
+
+ VPMSUMD IN, HL, XL // H.lo·H.lo
+ VPMSUMD IN, H, XM // H.hi·H.lo+H.lo·H.hi
+ VPMSUMD IN, HH, XH // H.hi·H.hi
+
+ VPMSUMD XL, XC2, T2 // 1st reduction phase
+
+ VSLDOI $8, XM, ZERO, T0
+ VSLDOI $8, ZERO, XM, T1
+ VXOR XL, T0, XL
+ VXOR XH, T1, XH
+
+ VSLDOI $8, XL, XL, XL
+ VXOR XL, T2, XL
+
+ VSLDOI $8, XL, XL, T1 // 2nd reduction phase
+ VPMSUMD XL, XC2, XL
+ VXOR T1, XH, T1
+ VXOR XL, T1, IN1
+
+ VSLDOI $8, IN1, IN1, H2
+ VSLDOI $8, ZERO, H2, H2L
+ VSLDOI $8, H2, ZERO, H2H
+
+ STXVD2X VH2L, (XIP+R8) // save H^2
+ MOVD $0x70, R8
+ STXVD2X VH2, (XIP+R9)
+ MOVD $0x80, R9
+ STXVD2X VH2H, (XIP+R10)
+ MOVD $0x90, R10
+
+ VPMSUMD IN, H2L, XL // H.lo·H^2.lo
+ VPMSUMD IN1, H2L, XL1 // H^2.lo·H^2.lo
+ VPMSUMD IN, H2, XM // H.hi·H^2.lo+H.lo·H^2.hi
+ VPMSUMD IN1, H2, XM1 // H^2.hi·H^2.lo+H^2.lo·H^2.hi
+ VPMSUMD IN, H2H, XH // H.hi·H^2.hi
+ VPMSUMD IN1, H2H, XH1 // H^2.hi·H^2.hi
+
+ VPMSUMD XL, XC2, T2 // 1st reduction phase
+ VPMSUMD XL1, XC2, HH // 1st reduction phase
+
+ VSLDOI $8, XM, ZERO, T0
+ VSLDOI $8, ZERO, XM, T1
+ VSLDOI $8, XM1, ZERO, HL
+ VSLDOI $8, ZERO, XM1, H
+ VXOR XL, T0, XL
+ VXOR XH, T1, XH
+ VXOR XL1, HL, XL1
+ VXOR XH1, H, XH1
+
+ VSLDOI $8, XL, XL, XL
+ VSLDOI $8, XL1, XL1, XL1
+ VXOR XL, T2, XL
+ VXOR XL1, HH, XL1
+
+ VSLDOI $8, XL, XL, T1 // 2nd reduction phase
+ VSLDOI $8, XL1, XL1, H // 2nd reduction phase
+ VPMSUMD XL, XC2, XL
+ VPMSUMD XL1, XC2, XL1
+ VXOR T1, XH, T1
+ VXOR H, XH1, H
+ VXOR XL, T1, XL
+ VXOR XL1, H, XL1
+
+ VSLDOI $8, XL, XL, H
+ VSLDOI $8, XL1, XL1, H2
+ VSLDOI $8, ZERO, H, HL
+ VSLDOI $8, H, ZERO, HH
+ VSLDOI $8, ZERO, H2, H2L
+ VSLDOI $8, H2, ZERO, H2H
+
+ STXVD2X VHL, (XIP+R8) // save H^3
+ MOVD $0xa0, R8
+ STXVD2X VH, (XIP+R9)
+ MOVD $0xb0, R9
+ STXVD2X VHH, (XIP+R10)
+ MOVD $0xc0, R10
+ STXVD2X VH2L, (XIP+R8) // save H^4
+ STXVD2X VH2, (XIP+R9)
+ STXVD2X VH2H, (XIP+R10)
+
+ RET
+
+// func gcmHash(output []byte, productTable *[256]byte, inp []byte, len int)
+TEXT ·gcmHash(SB), NOSPLIT, $0-64
+ MOVD output+0(FP), XIP
+ MOVD productTable+24(FP), HTBL
+ MOVD inp+32(FP), INP
+ MOVD len+56(FP), LEN
+
+ MOVD $0x10, R8
+ MOVD $0x20, R9
+ MOVD $0x30, R10
+ LXVD2X (XIP)(R0), VXL // load Xi
+
+ LXVD2X (HTBL)(R8), VHL // load pre-computed table
+ MOVD $0x40, R8
+ LXVD2X (HTBL)(R9), VH
+ MOVD $0x50, R9
+ LXVD2X (HTBL)(R10), VHH
+ MOVD $0x60, R10
+ LXVD2X (HTBL)(R0), VXC2
+#ifdef GOARCH_ppc64le
+ LVSL (R0)(R0), LEMASK
+ VSPLTISB $0x07, T0
+ VXOR LEMASK, T0, LEMASK
+ VPERM XL, XL, LEMASK, XL
+#endif
+ VXOR ZERO, ZERO, ZERO
+
+ CMPU LEN, $64
+ BGE gcm_ghash_p8_4x
+
+ LXVD2X (INP)(R0), VIN
+ ADD $16, INP, INP
+ SUBCCC $16, LEN, LEN
+#ifdef GOARCH_ppc64le
+ VPERM IN, IN, LEMASK, IN
+#endif
+ VXOR IN, XL, IN
+ BEQ short
+
+ LXVD2X (HTBL)(R8), VH2L // load H^2
+ MOVD $16, R8
+ LXVD2X (HTBL)(R9), VH2
+ ADD LEN, INP, R9 // end of input
+ LXVD2X (HTBL)(R10), VH2H
+
+loop_2x:
+ LXVD2X (INP)(R0), VIN1
+#ifdef GOARCH_ppc64le
+ VPERM IN1, IN1, LEMASK, IN1
+#endif
+
+ SUBC $32, LEN, LEN
+ VPMSUMD IN, H2L, XL // H^2.lo·Xi.lo
+ VPMSUMD IN1, HL, XL1 // H.lo·Xi+1.lo
+ SUBE R11, R11, R11 // borrow?-1:0
+ VPMSUMD IN, H2, XM // H^2.hi·Xi.lo+H^2.lo·Xi.hi
+ VPMSUMD IN1, H, XM1 // H.hi·Xi+1.lo+H.lo·Xi+1.hi
+ AND LEN, R11, R11
+ VPMSUMD IN, H2H, XH // H^2.hi·Xi.hi
+ VPMSUMD IN1, HH, XH1 // H.hi·Xi+1.hi
+ ADD R11, INP, INP
+
+ VXOR XL, XL1, XL
+ VXOR XM, XM1, XM
+
+ VPMSUMD XL, XC2, T2 // 1st reduction phase
+
+ VSLDOI $8, XM, ZERO, T0
+ VSLDOI $8, ZERO, XM, T1
+ VXOR XH, XH1, XH
+ VXOR XL, T0, XL
+ VXOR XH, T1, XH
+
+ VSLDOI $8, XL, XL, XL
+ VXOR XL, T2, XL
+ LXVD2X (INP)(R8), VIN
+ ADD $32, INP, INP
+
+ VSLDOI $8, XL, XL, T1 // 2nd reduction phase
+ VPMSUMD XL, XC2, XL
+#ifdef GOARCH_ppc64le
+ VPERM IN, IN, LEMASK, IN
+#endif
+ VXOR T1, XH, T1
+ VXOR IN, T1, IN
+ VXOR IN, XL, IN
+ CMP R9, INP
+ BGT loop_2x // done yet?
+
+ CMPWU LEN, $0
+ BNE even
+
+short:
+ VPMSUMD IN, HL, XL // H.lo·Xi.lo
+ VPMSUMD IN, H, XM // H.hi·Xi.lo+H.lo·Xi.hi
+ VPMSUMD IN, HH, XH // H.hi·Xi.hi
+
+ VPMSUMD XL, XC2, T2 // 1st reduction phase
+
+ VSLDOI $8, XM, ZERO, T0
+ VSLDOI $8, ZERO, XM, T1
+ VXOR XL, T0, XL
+ VXOR XH, T1, XH
+
+ VSLDOI $8, XL, XL, XL
+ VXOR XL, T2, XL
+
+ VSLDOI $8, XL, XL, T1 // 2nd reduction phase
+ VPMSUMD XL, XC2, XL
+ VXOR T1, XH, T1
+
+even:
+ VXOR XL, T1, XL
+#ifdef GOARCH_ppc64le
+ VPERM XL, XL, LEMASK, XL
+#endif
+ STXVD2X VXL, (XIP+R0)
+
+ OR R12, R12, R12 // write out Xi
+ RET
+
+gcm_ghash_p8_4x:
+ LVSL (R8)(R0), T0 // 0x0001..0e0f
+ MOVD $0x70, R8
+ LXVD2X (HTBL)(R9), VH2
+ MOVD $0x80, R9
+ VSPLTISB $8, T1 // 0x0808..0808
+ MOVD $0x90, R10
+ LXVD2X (HTBL)(R8), VH3L // load H^3
+ MOVD $0xa0, R8
+ LXVD2X (HTBL)(R9), VH3
+ MOVD $0xb0, R9
+ LXVD2X (HTBL)(R10), VH3H
+ MOVD $0xc0, R10
+ LXVD2X (HTBL)(R8), VH4L // load H^4
+ MOVD $0x10, R8
+ LXVD2X (HTBL)(R9), VH4
+ MOVD $0x20, R9
+ LXVD2X (HTBL)(R10), VH4H
+ MOVD $0x30, R10
+
+ VSLDOI $8, ZERO, T1, T2 // 0x0000..0808
+ VADDUBM T0, T2, HIPERM // 0x0001..1617
+ VADDUBM T1, HIPERM, LOPERM // 0x0809..1e1f
+
+ SRD $4, LEN, LEN // this allows to use sign bit as carry
+
+ LXVD2X (INP)(R0), VIN0 // load input
+ LXVD2X (INP)(R8), VIN1
+ SUBCCC $8, LEN, LEN
+ LXVD2X (INP)(R9), VIN2
+ LXVD2X (INP)(R10), VIN3
+ ADD $0x40, INP, INP
+#ifdef GOARCH_ppc64le
+ VPERM IN0, IN0, LEMASK, IN0
+ VPERM IN1, IN1, LEMASK, IN1
+ VPERM IN2, IN2, LEMASK, IN2
+ VPERM IN3, IN3, LEMASK, IN3
+#endif
+
+ VXOR IN0, XL, XH
+
+ VPMSUMD IN1, H3L, XL1
+ VPMSUMD IN1, H3, XM1
+ VPMSUMD IN1, H3H, XH1
+
+ VPERM H2, H, HIPERM, H21L
+ VPERM IN2, IN3, LOPERM, T0
+ VPERM H2, H, LOPERM, H21H
+ VPERM IN2, IN3, HIPERM, T1
+ VPMSUMD IN2, H2, XM2 // H^2.lo·Xi+2.hi+H^2.hi·Xi+2.lo
+ VPMSUMD T0, H21L, XL3 // H^2.lo·Xi+2.lo+H.lo·Xi+3.lo
+ VPMSUMD IN3, H, XM3 // H.hi·Xi+3.lo +H.lo·Xi+3.hi
+ VPMSUMD T1, H21H, XH3 // H^2.hi·Xi+2.hi+H.hi·Xi+3.hi
+
+ VXOR XM2, XM1, XM2
+ VXOR XL3, XL1, XL3
+ VXOR XM3, XM2, XM3
+ VXOR XH3, XH1, XH3
+
+ BLT tail_4x
+
+loop_4x:
+ LXVD2X (INP)(R0), VIN0
+ LXVD2X (INP)(R8), VIN1
+ SUBCCC $4, LEN, LEN
+ LXVD2X (INP)(R9), VIN2
+ LXVD2X (INP)(R10), VIN3
+ ADD $0x40, INP, INP
+#ifdef GOARCH_ppc64le
+ VPERM IN1, IN1, LEMASK, IN1
+ VPERM IN2, IN2, LEMASK, IN2
+ VPERM IN3, IN3, LEMASK, IN3
+ VPERM IN0, IN0, LEMASK, IN0
+#endif
+
+ VPMSUMD XH, H4L, XL // H^4.lo·Xi.lo
+ VPMSUMD XH, H4, XM // H^4.hi·Xi.lo+H^4.lo·Xi.hi
+ VPMSUMD XH, H4H, XH // H^4.hi·Xi.hi
+ VPMSUMD IN1, H3L, XL1
+ VPMSUMD IN1, H3, XM1
+ VPMSUMD IN1, H3H, XH1
+
+ VXOR XL, XL3, XL
+ VXOR XM, XM3, XM
+ VXOR XH, XH3, XH
+ VPERM IN2, IN3, LOPERM, T0
+ VPERM IN2, IN3, HIPERM, T1
+
+ VPMSUMD XL, XC2, T2 // 1st reduction phase
+ VPMSUMD T0, H21L, XL3 // H.lo·Xi+3.lo +H^2.lo·Xi+2.lo
+ VPMSUMD T1, H21H, XH3 // H.hi·Xi+3.hi +H^2.hi·Xi+2.hi
+
+ VSLDOI $8, XM, ZERO, T0
+ VSLDOI $8, ZERO, XM, T1
+ VXOR XL, T0, XL
+ VXOR XH, T1, XH
+
+ VSLDOI $8, XL, XL, XL
+ VXOR XL, T2, XL
+
+ VSLDOI $8, XL, XL, T1 // 2nd reduction phase
+ VPMSUMD IN2, H2, XM2 // H^2.hi·Xi+2.lo+H^2.lo·Xi+2.hi
+ VPMSUMD IN3, H, XM3 // H.hi·Xi+3.lo +H.lo·Xi+3.hi
+ VPMSUMD XL, XC2, XL
+
+ VXOR XL3, XL1, XL3
+ VXOR XH3, XH1, XH3
+ VXOR XH, IN0, XH
+ VXOR XM2, XM1, XM2
+ VXOR XH, T1, XH
+ VXOR XM3, XM2, XM3
+ VXOR XH, XL, XH
+ BGE loop_4x
+
+tail_4x:
+ VPMSUMD XH, H4L, XL // H^4.lo·Xi.lo
+ VPMSUMD XH, H4, XM // H^4.hi·Xi.lo+H^4.lo·Xi.hi
+ VPMSUMD XH, H4H, XH // H^4.hi·Xi.hi
+
+ VXOR XL, XL3, XL
+ VXOR XM, XM3, XM
+
+ VPMSUMD XL, XC2, T2 // 1st reduction phase
+
+ VSLDOI $8, XM, ZERO, T0
+ VSLDOI $8, ZERO, XM, T1
+ VXOR XH, XH3, XH
+ VXOR XL, T0, XL
+ VXOR XH, T1, XH
+
+ VSLDOI $8, XL, XL, XL
+ VXOR XL, T2, XL
+
+ VSLDOI $8, XL, XL, T1 // 2nd reduction phase
+ VPMSUMD XL, XC2, XL
+ VXOR T1, XH, T1
+ VXOR XL, T1, XL
+
+ ADDCCC $4, LEN, LEN
+ BEQ done_4x
+
+ LXVD2X (INP)(R0), VIN0
+ CMPU LEN, $2
+ MOVD $-4, LEN
+ BLT one
+ LXVD2X (INP)(R8), VIN1
+ BEQ two
+
+three:
+ LXVD2X (INP)(R9), VIN2
+#ifdef GOARCH_ppc64le
+ VPERM IN0, IN0, LEMASK, IN0
+ VPERM IN1, IN1, LEMASK, IN1
+ VPERM IN2, IN2, LEMASK, IN2
+#endif
+
+ VXOR IN0, XL, XH
+ VOR H3L, H3L, H4L
+ VOR H3, H3, H4
+ VOR H3H, H3H, H4H
+
+ VPERM IN1, IN2, LOPERM, T0
+ VPERM IN1, IN2, HIPERM, T1
+ VPMSUMD IN1, H2, XM2 // H^2.lo·Xi+1.hi+H^2.hi·Xi+1.lo
+ VPMSUMD IN2, H, XM3 // H.hi·Xi+2.lo +H.lo·Xi+2.hi
+ VPMSUMD T0, H21L, XL3 // H^2.lo·Xi+1.lo+H.lo·Xi+2.lo
+ VPMSUMD T1, H21H, XH3 // H^2.hi·Xi+1.hi+H.hi·Xi+2.hi
+
+ VXOR XM3, XM2, XM3
+ JMP tail_4x
+
+two:
+#ifdef GOARCH_ppc64le
+ VPERM IN0, IN0, LEMASK, IN0
+ VPERM IN1, IN1, LEMASK, IN1
+#endif
+
+ VXOR IN, XL, XH
+ VPERM ZERO, IN1, LOPERM, T0
+ VPERM ZERO, IN1, HIPERM, T1
+
+ VSLDOI $8, ZERO, H2, H4L
+ VOR H2, H2, H4
+ VSLDOI $8, H2, ZERO, H4H
+
+ VPMSUMD T0, H21L, XL3 // H.lo·Xi+1.lo
+ VPMSUMD IN1, H, XM3 // H.hi·Xi+1.lo+H.lo·Xi+2.hi
+ VPMSUMD T1, H21H, XH3 // H.hi·Xi+1.hi
+
+ JMP tail_4x
+
+one:
+#ifdef GOARCH_ppc64le
+ VPERM IN0, IN0, LEMASK, IN0
+#endif
+
+ VSLDOI $8, ZERO, H, H4L
+ VOR H, H, H4
+ VSLDOI $8, H, ZERO, H4H
+
+ VXOR IN0, XL, XH
+ VXOR XL3, XL3, XL3
+ VXOR XM3, XM3, XM3
+ VXOR XH3, XH3, XH3
+
+ JMP tail_4x
+
+done_4x:
+#ifdef GOARCH_ppc64le
+ VPERM XL, XL, LEMASK, XL
+#endif
+ STXVD2X VXL, (XIP+R0) // write out Xi
+ RET
+
+// func gcmMul(output []byte, productTable *[256]byte)
+TEXT ·gcmMul(SB), NOSPLIT, $0-32
+ MOVD output+0(FP), XIP
+ MOVD productTable+24(FP), HTBL
+
+ MOVD $0x10, R8
+ MOVD $0x20, R9
+ MOVD $0x30, R10
+ LXVD2X (XIP)(R0), VIN // load Xi
+
+ LXVD2X (HTBL)(R8), VHL // Load pre-computed table
+ LXVD2X (HTBL)(R9), VH
+ LXVD2X (HTBL)(R10), VHH
+ LXVD2X (HTBL)(R0), VXC2
+#ifdef GOARCH_ppc64le
+ VSPLTISB $0x07, T0
+ VXOR LEMASK, T0, LEMASK
+ VPERM IN, IN, LEMASK, IN
+#endif
+ VXOR ZERO, ZERO, ZERO
+
+ VPMSUMD IN, HL, XL // H.lo·Xi.lo
+ VPMSUMD IN, H, XM // H.hi·Xi.lo+H.lo·Xi.hi
+ VPMSUMD IN, HH, XH // H.hi·Xi.hi
+
+ VPMSUMD XL, XC2, T2 // 1st reduction phase
+
+ VSLDOI $8, XM, ZERO, T0
+ VSLDOI $8, ZERO, XM, T1
+ VXOR XL, T0, XL
+ VXOR XH, T1, XH
+
+ VSLDOI $8, XL, XL, XL
+ VXOR XL, T2, XL
+
+ VSLDOI $8, XL, XL, T1 // 2nd reduction phase
+ VPMSUMD XL, XC2, XL
+ VXOR T1, XH, T1
+ VXOR XL, T1, XL
+
+#ifdef GOARCH_ppc64le
+ VPERM XL, XL, LEMASK, XL
+#endif
+ STXVD2X VXL, (XIP+R0) // write out Xi
+ RET
diff --git a/src/crypto/aes/gcm_s390x.go b/src/crypto/aes/gcm_s390x.go
new file mode 100644
index 0000000..d95f169
--- /dev/null
+++ b/src/crypto/aes/gcm_s390x.go
@@ -0,0 +1,371 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package aes
+
+import (
+ "crypto/cipher"
+ "crypto/internal/alias"
+ "crypto/subtle"
+ "encoding/binary"
+ "errors"
+ "internal/cpu"
+)
+
+// This file contains two implementations of AES-GCM. The first implementation
+// (gcmAsm) uses the KMCTR instruction to encrypt using AES in counter mode and
+// the KIMD instruction for GHASH. The second implementation (gcmKMA) uses the
+// newer KMA instruction which performs both operations.
+
+// gcmCount represents a 16-byte big-endian count value.
+type gcmCount [16]byte
+
+// inc increments the rightmost 32-bits of the count value by 1.
+func (x *gcmCount) inc() {
+ binary.BigEndian.PutUint32(x[len(x)-4:], binary.BigEndian.Uint32(x[len(x)-4:])+1)
+}
+
+// gcmLengths writes len0 || len1 as big-endian values to a 16-byte array.
+func gcmLengths(len0, len1 uint64) [16]byte {
+ v := [16]byte{}
+ binary.BigEndian.PutUint64(v[0:], len0)
+ binary.BigEndian.PutUint64(v[8:], len1)
+ return v
+}
+
+// gcmHashKey represents the 16-byte hash key required by the GHASH algorithm.
+type gcmHashKey [16]byte
+
+type gcmAsm struct {
+ block *aesCipherAsm
+ hashKey gcmHashKey
+ nonceSize int
+ tagSize int
+}
+
+const (
+ gcmBlockSize = 16
+ gcmTagSize = 16
+ gcmMinimumTagSize = 12 // NIST SP 800-38D recommends tags with 12 or more bytes.
+ gcmStandardNonceSize = 12
+)
+
+var errOpen = errors.New("cipher: message authentication failed")
+
+// Assert that aesCipherAsm implements the gcmAble interface.
+var _ gcmAble = (*aesCipherAsm)(nil)
+
+// NewGCM returns the AES cipher wrapped in Galois Counter Mode. This is only
+// called by crypto/cipher.NewGCM via the gcmAble interface.
+func (c *aesCipherAsm) NewGCM(nonceSize, tagSize int) (cipher.AEAD, error) {
+ var hk gcmHashKey
+ c.Encrypt(hk[:], hk[:])
+ g := gcmAsm{
+ block: c,
+ hashKey: hk,
+ nonceSize: nonceSize,
+ tagSize: tagSize,
+ }
+ if cpu.S390X.HasAESGCM {
+ g := gcmKMA{g}
+ return &g, nil
+ }
+ return &g, nil
+}
+
+func (g *gcmAsm) NonceSize() int {
+ return g.nonceSize
+}
+
+func (g *gcmAsm) Overhead() int {
+ return g.tagSize
+}
+
+// sliceForAppend takes a slice and a requested number of bytes. It returns a
+// slice with the contents of the given slice followed by that many bytes and a
+// second slice that aliases into it and contains only the extra bytes. If the
+// original slice has sufficient capacity then no allocation is performed.
+func sliceForAppend(in []byte, n int) (head, tail []byte) {
+ if total := len(in) + n; cap(in) >= total {
+ head = in[:total]
+ } else {
+ head = make([]byte, total)
+ copy(head, in)
+ }
+ tail = head[len(in):]
+ return
+}
+
+// ghash uses the GHASH algorithm to hash data with the given key. The initial
+// hash value is given by hash which will be updated with the new hash value.
+// The length of data must be a multiple of 16-bytes.
+//
+//go:noescape
+func ghash(key *gcmHashKey, hash *[16]byte, data []byte)
+
+// paddedGHASH pads data with zeroes until its length is a multiple of
+// 16-bytes. It then calculates a new value for hash using the GHASH algorithm.
+func (g *gcmAsm) paddedGHASH(hash *[16]byte, data []byte) {
+ siz := len(data) &^ 0xf // align size to 16-bytes
+ if siz > 0 {
+ ghash(&g.hashKey, hash, data[:siz])
+ data = data[siz:]
+ }
+ if len(data) > 0 {
+ var s [16]byte
+ copy(s[:], data)
+ ghash(&g.hashKey, hash, s[:])
+ }
+}
+
+// cryptBlocksGCM encrypts src using AES in counter mode using the given
+// function code and key. The rightmost 32-bits of the counter are incremented
+// between each block as required by the GCM spec. The initial counter value
+// is given by cnt, which is updated with the value of the next counter value
+// to use.
+//
+// The lengths of both dst and buf must be greater than or equal to the length
+// of src. buf may be partially or completely overwritten during the execution
+// of the function.
+//
+//go:noescape
+func cryptBlocksGCM(fn code, key, dst, src, buf []byte, cnt *gcmCount)
+
+// counterCrypt encrypts src using AES in counter mode and places the result
+// into dst. cnt is the initial count value and will be updated with the next
+// count value. The length of dst must be greater than or equal to the length
+// of src.
+func (g *gcmAsm) counterCrypt(dst, src []byte, cnt *gcmCount) {
+ // Copying src into a buffer improves performance on some models when
+ // src and dst point to the same underlying array. We also need a
+ // buffer for counter values.
+ var ctrbuf, srcbuf [2048]byte
+ for len(src) >= 16 {
+ siz := len(src)
+ if len(src) > len(ctrbuf) {
+ siz = len(ctrbuf)
+ }
+ siz &^= 0xf // align siz to 16-bytes
+ copy(srcbuf[:], src[:siz])
+ cryptBlocksGCM(g.block.function, g.block.key, dst[:siz], srcbuf[:siz], ctrbuf[:], cnt)
+ src = src[siz:]
+ dst = dst[siz:]
+ }
+ if len(src) > 0 {
+ var x [16]byte
+ g.block.Encrypt(x[:], cnt[:])
+ for i := range src {
+ dst[i] = src[i] ^ x[i]
+ }
+ cnt.inc()
+ }
+}
+
+// deriveCounter computes the initial GCM counter state from the given nonce.
+// See NIST SP 800-38D, section 7.1.
+func (g *gcmAsm) deriveCounter(nonce []byte) gcmCount {
+ // GCM has two modes of operation with respect to the initial counter
+ // state: a "fast path" for 96-bit (12-byte) nonces, and a "slow path"
+ // for nonces of other lengths. For a 96-bit nonce, the nonce, along
+ // with a four-byte big-endian counter starting at one, is used
+ // directly as the starting counter. For other nonce sizes, the counter
+ // is computed by passing it through the GHASH function.
+ var counter gcmCount
+ if len(nonce) == gcmStandardNonceSize {
+ copy(counter[:], nonce)
+ counter[gcmBlockSize-1] = 1
+ } else {
+ var hash [16]byte
+ g.paddedGHASH(&hash, nonce)
+ lens := gcmLengths(0, uint64(len(nonce))*8)
+ g.paddedGHASH(&hash, lens[:])
+ copy(counter[:], hash[:])
+ }
+ return counter
+}
+
+// auth calculates GHASH(ciphertext, additionalData), masks the result with
+// tagMask and writes the result to out.
+func (g *gcmAsm) auth(out, ciphertext, additionalData []byte, tagMask *[gcmTagSize]byte) {
+ var hash [16]byte
+ g.paddedGHASH(&hash, additionalData)
+ g.paddedGHASH(&hash, ciphertext)
+ lens := gcmLengths(uint64(len(additionalData))*8, uint64(len(ciphertext))*8)
+ g.paddedGHASH(&hash, lens[:])
+
+ copy(out, hash[:])
+ for i := range out {
+ out[i] ^= tagMask[i]
+ }
+}
+
+// Seal encrypts and authenticates plaintext. See the cipher.AEAD interface for
+// details.
+func (g *gcmAsm) Seal(dst, nonce, plaintext, data []byte) []byte {
+ if len(nonce) != g.nonceSize {
+ panic("crypto/cipher: incorrect nonce length given to GCM")
+ }
+ if uint64(len(plaintext)) > ((1<<32)-2)*BlockSize {
+ panic("crypto/cipher: message too large for GCM")
+ }
+
+ ret, out := sliceForAppend(dst, len(plaintext)+g.tagSize)
+ if alias.InexactOverlap(out[:len(plaintext)], plaintext) {
+ panic("crypto/cipher: invalid buffer overlap")
+ }
+
+ counter := g.deriveCounter(nonce)
+
+ var tagMask [gcmBlockSize]byte
+ g.block.Encrypt(tagMask[:], counter[:])
+ counter.inc()
+
+ var tagOut [gcmTagSize]byte
+ g.counterCrypt(out, plaintext, &counter)
+ g.auth(tagOut[:], out[:len(plaintext)], data, &tagMask)
+ copy(out[len(plaintext):], tagOut[:])
+
+ return ret
+}
+
+// Open authenticates and decrypts ciphertext. See the cipher.AEAD interface
+// for details.
+func (g *gcmAsm) Open(dst, nonce, ciphertext, data []byte) ([]byte, error) {
+ if len(nonce) != g.nonceSize {
+ panic("crypto/cipher: incorrect nonce length given to GCM")
+ }
+ // Sanity check to prevent the authentication from always succeeding if an implementation
+ // leaves tagSize uninitialized, for example.
+ if g.tagSize < gcmMinimumTagSize {
+ panic("crypto/cipher: incorrect GCM tag size")
+ }
+ if len(ciphertext) < g.tagSize {
+ return nil, errOpen
+ }
+ if uint64(len(ciphertext)) > ((1<<32)-2)*uint64(BlockSize)+uint64(g.tagSize) {
+ return nil, errOpen
+ }
+
+ tag := ciphertext[len(ciphertext)-g.tagSize:]
+ ciphertext = ciphertext[:len(ciphertext)-g.tagSize]
+
+ counter := g.deriveCounter(nonce)
+
+ var tagMask [gcmBlockSize]byte
+ g.block.Encrypt(tagMask[:], counter[:])
+ counter.inc()
+
+ var expectedTag [gcmTagSize]byte
+ g.auth(expectedTag[:], ciphertext, data, &tagMask)
+
+ ret, out := sliceForAppend(dst, len(ciphertext))
+ if alias.InexactOverlap(out, ciphertext) {
+ panic("crypto/cipher: invalid buffer overlap")
+ }
+
+ if subtle.ConstantTimeCompare(expectedTag[:g.tagSize], tag) != 1 {
+ // The AESNI code decrypts and authenticates concurrently, and
+ // so overwrites dst in the event of a tag mismatch. That
+ // behavior is mimicked here in order to be consistent across
+ // platforms.
+ for i := range out {
+ out[i] = 0
+ }
+ return nil, errOpen
+ }
+
+ g.counterCrypt(out, ciphertext, &counter)
+ return ret, nil
+}
+
+// gcmKMA implements the cipher.AEAD interface using the KMA instruction. It should
+// only be used if hasKMA is true.
+type gcmKMA struct {
+ gcmAsm
+}
+
+// flags for the KMA instruction
+const (
+ kmaHS = 1 << 10 // hash subkey supplied
+ kmaLAAD = 1 << 9 // last series of additional authenticated data
+ kmaLPC = 1 << 8 // last series of plaintext or ciphertext blocks
+ kmaDecrypt = 1 << 7 // decrypt
+)
+
+// kmaGCM executes the encryption or decryption operation given by fn. The tag
+// will be calculated and written to tag. cnt should contain the current
+// counter state and will be overwritten with the updated counter state.
+// TODO(mundaym): could pass in hash subkey
+//
+//go:noescape
+func kmaGCM(fn code, key, dst, src, aad []byte, tag *[16]byte, cnt *gcmCount)
+
+// Seal encrypts and authenticates plaintext. See the cipher.AEAD interface for
+// details.
+func (g *gcmKMA) Seal(dst, nonce, plaintext, data []byte) []byte {
+ if len(nonce) != g.nonceSize {
+ panic("crypto/cipher: incorrect nonce length given to GCM")
+ }
+ if uint64(len(plaintext)) > ((1<<32)-2)*BlockSize {
+ panic("crypto/cipher: message too large for GCM")
+ }
+
+ ret, out := sliceForAppend(dst, len(plaintext)+g.tagSize)
+ if alias.InexactOverlap(out[:len(plaintext)], plaintext) {
+ panic("crypto/cipher: invalid buffer overlap")
+ }
+
+ counter := g.deriveCounter(nonce)
+ fc := g.block.function | kmaLAAD | kmaLPC
+
+ var tag [gcmTagSize]byte
+ kmaGCM(fc, g.block.key, out[:len(plaintext)], plaintext, data, &tag, &counter)
+ copy(out[len(plaintext):], tag[:])
+
+ return ret
+}
+
+// Open authenticates and decrypts ciphertext. See the cipher.AEAD interface
+// for details.
+func (g *gcmKMA) Open(dst, nonce, ciphertext, data []byte) ([]byte, error) {
+ if len(nonce) != g.nonceSize {
+ panic("crypto/cipher: incorrect nonce length given to GCM")
+ }
+ if len(ciphertext) < g.tagSize {
+ return nil, errOpen
+ }
+ if uint64(len(ciphertext)) > ((1<<32)-2)*uint64(BlockSize)+uint64(g.tagSize) {
+ return nil, errOpen
+ }
+
+ tag := ciphertext[len(ciphertext)-g.tagSize:]
+ ciphertext = ciphertext[:len(ciphertext)-g.tagSize]
+ ret, out := sliceForAppend(dst, len(ciphertext))
+ if alias.InexactOverlap(out, ciphertext) {
+ panic("crypto/cipher: invalid buffer overlap")
+ }
+
+ if g.tagSize < gcmMinimumTagSize {
+ panic("crypto/cipher: incorrect GCM tag size")
+ }
+
+ counter := g.deriveCounter(nonce)
+ fc := g.block.function | kmaLAAD | kmaLPC | kmaDecrypt
+
+ var expectedTag [gcmTagSize]byte
+ kmaGCM(fc, g.block.key, out[:len(ciphertext)], ciphertext, data, &expectedTag, &counter)
+
+ if subtle.ConstantTimeCompare(expectedTag[:g.tagSize], tag) != 1 {
+ // The AESNI code decrypts and authenticates concurrently, and
+ // so overwrites dst in the event of a tag mismatch. That
+ // behavior is mimicked here in order to be consistent across
+ // platforms.
+ for i := range out {
+ out[i] = 0
+ }
+ return nil, errOpen
+ }
+
+ return ret, nil
+}
diff --git a/src/crypto/aes/modes.go b/src/crypto/aes/modes.go
new file mode 100644
index 0000000..5c0b08e
--- /dev/null
+++ b/src/crypto/aes/modes.go
@@ -0,0 +1,37 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package aes
+
+import (
+ "crypto/cipher"
+)
+
+// gcmAble is implemented by cipher.Blocks that can provide an optimized
+// implementation of GCM through the AEAD interface.
+// See crypto/cipher/gcm.go.
+type gcmAble interface {
+ NewGCM(nonceSize, tagSize int) (cipher.AEAD, error)
+}
+
+// cbcEncAble is implemented by cipher.Blocks that can provide an optimized
+// implementation of CBC encryption through the cipher.BlockMode interface.
+// See crypto/cipher/cbc.go.
+type cbcEncAble interface {
+ NewCBCEncrypter(iv []byte) cipher.BlockMode
+}
+
+// cbcDecAble is implemented by cipher.Blocks that can provide an optimized
+// implementation of CBC decryption through the cipher.BlockMode interface.
+// See crypto/cipher/cbc.go.
+type cbcDecAble interface {
+ NewCBCDecrypter(iv []byte) cipher.BlockMode
+}
+
+// ctrAble is implemented by cipher.Blocks that can provide an optimized
+// implementation of CTR through the cipher.Stream interface.
+// See crypto/cipher/ctr.go.
+type ctrAble interface {
+ NewCTR(iv []byte) cipher.Stream
+}
diff --git a/src/crypto/aes/modes_test.go b/src/crypto/aes/modes_test.go
new file mode 100644
index 0000000..a3364c9
--- /dev/null
+++ b/src/crypto/aes/modes_test.go
@@ -0,0 +1,112 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package aes
+
+import (
+ "crypto/cipher"
+ "testing"
+)
+
+// Check that the optimized implementations of cipher modes will
+// be picked up correctly.
+
+// testInterface can be asserted to check that a type originates
+// from this test group.
+type testInterface interface {
+ InAESPackage() bool
+}
+
+// testBlock implements the cipher.Block interface and any *Able
+// interfaces that need to be tested.
+type testBlock struct{}
+
+func (*testBlock) BlockSize() int { return 0 }
+func (*testBlock) Encrypt(a, b []byte) {}
+func (*testBlock) Decrypt(a, b []byte) {}
+func (*testBlock) NewGCM(int, int) (cipher.AEAD, error) {
+ return &testAEAD{}, nil
+}
+func (*testBlock) NewCBCEncrypter([]byte) cipher.BlockMode {
+ return &testBlockMode{}
+}
+func (*testBlock) NewCBCDecrypter([]byte) cipher.BlockMode {
+ return &testBlockMode{}
+}
+func (*testBlock) NewCTR([]byte) cipher.Stream {
+ return &testStream{}
+}
+
+// testAEAD implements the cipher.AEAD interface.
+type testAEAD struct{}
+
+func (*testAEAD) NonceSize() int { return 0 }
+func (*testAEAD) Overhead() int { return 0 }
+func (*testAEAD) Seal(a, b, c, d []byte) []byte { return []byte{} }
+func (*testAEAD) Open(a, b, c, d []byte) ([]byte, error) { return []byte{}, nil }
+func (*testAEAD) InAESPackage() bool { return true }
+
+// Test the gcmAble interface is detected correctly by the cipher package.
+func TestGCMAble(t *testing.T) {
+ b := cipher.Block(&testBlock{})
+ if _, ok := b.(gcmAble); !ok {
+ t.Fatalf("testBlock does not implement the gcmAble interface")
+ }
+ aead, err := cipher.NewGCM(b)
+ if err != nil {
+ t.Fatalf("%v", err)
+ }
+ if _, ok := aead.(testInterface); !ok {
+ t.Fatalf("cipher.NewGCM did not use gcmAble interface")
+ }
+}
+
+// testBlockMode implements the cipher.BlockMode interface.
+type testBlockMode struct{}
+
+func (*testBlockMode) BlockSize() int { return 0 }
+func (*testBlockMode) CryptBlocks(a, b []byte) {}
+func (*testBlockMode) InAESPackage() bool { return true }
+
+// Test the cbcEncAble interface is detected correctly by the cipher package.
+func TestCBCEncAble(t *testing.T) {
+ b := cipher.Block(&testBlock{})
+ if _, ok := b.(cbcEncAble); !ok {
+ t.Fatalf("testBlock does not implement the cbcEncAble interface")
+ }
+ bm := cipher.NewCBCEncrypter(b, []byte{})
+ if _, ok := bm.(testInterface); !ok {
+ t.Fatalf("cipher.NewCBCEncrypter did not use cbcEncAble interface")
+ }
+}
+
+// Test the cbcDecAble interface is detected correctly by the cipher package.
+func TestCBCDecAble(t *testing.T) {
+ b := cipher.Block(&testBlock{})
+ if _, ok := b.(cbcDecAble); !ok {
+ t.Fatalf("testBlock does not implement the cbcDecAble interface")
+ }
+ bm := cipher.NewCBCDecrypter(b, []byte{})
+ if _, ok := bm.(testInterface); !ok {
+ t.Fatalf("cipher.NewCBCDecrypter did not use cbcDecAble interface")
+ }
+}
+
+// testStream implements the cipher.Stream interface.
+type testStream struct{}
+
+func (*testStream) XORKeyStream(a, b []byte) {}
+func (*testStream) InAESPackage() bool { return true }
+
+// Test the ctrAble interface is detected correctly by the cipher package.
+func TestCTRAble(t *testing.T) {
+ b := cipher.Block(&testBlock{})
+ if _, ok := b.(ctrAble); !ok {
+ t.Fatalf("testBlock does not implement the ctrAble interface")
+ }
+ s := cipher.NewCTR(b, []byte{})
+ if _, ok := s.(testInterface); !ok {
+ t.Fatalf("cipher.NewCTR did not use ctrAble interface")
+ }
+}