summaryrefslogtreecommitdiffstats
path: root/src/vendor/golang.org/x/crypto
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-16 19:25:22 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-16 19:25:22 +0000
commitf6ad4dcef54c5ce997a4bad5a6d86de229015700 (patch)
tree7cfa4e31ace5c2bd95c72b154d15af494b2bcbef /src/vendor/golang.org/x/crypto
parentInitial commit. (diff)
downloadgolang-1.22-f6ad4dcef54c5ce997a4bad5a6d86de229015700.tar.xz
golang-1.22-f6ad4dcef54c5ce997a4bad5a6d86de229015700.zip
Adding upstream version 1.22.1.upstream/1.22.1
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'src/vendor/golang.org/x/crypto')
-rw-r--r--src/vendor/golang.org/x/crypto/LICENSE27
-rw-r--r--src/vendor/golang.org/x/crypto/PATENTS22
-rw-r--r--src/vendor/golang.org/x/crypto/chacha20/chacha_arm64.go16
-rw-r--r--src/vendor/golang.org/x/crypto/chacha20/chacha_arm64.s307
-rw-r--r--src/vendor/golang.org/x/crypto/chacha20/chacha_generic.go398
-rw-r--r--src/vendor/golang.org/x/crypto/chacha20/chacha_noasm.go13
-rw-r--r--src/vendor/golang.org/x/crypto/chacha20/chacha_ppc64le.go16
-rw-r--r--src/vendor/golang.org/x/crypto/chacha20/chacha_ppc64le.s449
-rw-r--r--src/vendor/golang.org/x/crypto/chacha20/chacha_s390x.go27
-rw-r--r--src/vendor/golang.org/x/crypto/chacha20/chacha_s390x.s224
-rw-r--r--src/vendor/golang.org/x/crypto/chacha20/xor.go42
-rw-r--r--src/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305.go98
-rw-r--r--src/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_amd64.go86
-rw-r--r--src/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_amd64.s2715
-rw-r--r--src/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_generic.go81
-rw-r--r--src/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_noasm.go15
-rw-r--r--src/vendor/golang.org/x/crypto/chacha20poly1305/xchacha20poly1305.go86
-rw-r--r--src/vendor/golang.org/x/crypto/cryptobyte/asn1.go825
-rw-r--r--src/vendor/golang.org/x/crypto/cryptobyte/asn1/asn1.go46
-rw-r--r--src/vendor/golang.org/x/crypto/cryptobyte/builder.go350
-rw-r--r--src/vendor/golang.org/x/crypto/cryptobyte/string.go183
-rw-r--r--src/vendor/golang.org/x/crypto/hkdf/hkdf.go95
-rw-r--r--src/vendor/golang.org/x/crypto/internal/alias/alias.go31
-rw-r--r--src/vendor/golang.org/x/crypto/internal/alias/alias_purego.go34
-rw-r--r--src/vendor/golang.org/x/crypto/internal/poly1305/bits_compat.go39
-rw-r--r--src/vendor/golang.org/x/crypto/internal/poly1305/bits_go1.13.go21
-rw-r--r--src/vendor/golang.org/x/crypto/internal/poly1305/mac_noasm.go9
-rw-r--r--src/vendor/golang.org/x/crypto/internal/poly1305/poly1305.go99
-rw-r--r--src/vendor/golang.org/x/crypto/internal/poly1305/sum_amd64.go47
-rw-r--r--src/vendor/golang.org/x/crypto/internal/poly1305/sum_amd64.s108
-rw-r--r--src/vendor/golang.org/x/crypto/internal/poly1305/sum_generic.go309
-rw-r--r--src/vendor/golang.org/x/crypto/internal/poly1305/sum_ppc64le.go47
-rw-r--r--src/vendor/golang.org/x/crypto/internal/poly1305/sum_ppc64le.s181
-rw-r--r--src/vendor/golang.org/x/crypto/internal/poly1305/sum_s390x.go76
-rw-r--r--src/vendor/golang.org/x/crypto/internal/poly1305/sum_s390x.s503
35 files changed, 7625 insertions, 0 deletions
diff --git a/src/vendor/golang.org/x/crypto/LICENSE b/src/vendor/golang.org/x/crypto/LICENSE
new file mode 100644
index 0000000..6a66aea
--- /dev/null
+++ b/src/vendor/golang.org/x/crypto/LICENSE
@@ -0,0 +1,27 @@
+Copyright (c) 2009 The Go Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/src/vendor/golang.org/x/crypto/PATENTS b/src/vendor/golang.org/x/crypto/PATENTS
new file mode 100644
index 0000000..7330990
--- /dev/null
+++ b/src/vendor/golang.org/x/crypto/PATENTS
@@ -0,0 +1,22 @@
+Additional IP Rights Grant (Patents)
+
+"This implementation" means the copyrightable works distributed by
+Google as part of the Go project.
+
+Google hereby grants to You a perpetual, worldwide, non-exclusive,
+no-charge, royalty-free, irrevocable (except as stated in this section)
+patent license to make, have made, use, offer to sell, sell, import,
+transfer and otherwise run, modify and propagate the contents of this
+implementation of Go, where such license applies only to those patent
+claims, both currently owned or controlled by Google and acquired in
+the future, licensable by Google that are necessarily infringed by this
+implementation of Go. This grant does not include claims that would be
+infringed only as a consequence of further modification of this
+implementation. If you or your agent or exclusive licensee institute or
+order or agree to the institution of patent litigation against any
+entity (including a cross-claim or counterclaim in a lawsuit) alleging
+that this implementation of Go or any code incorporated within this
+implementation of Go constitutes direct or contributory patent
+infringement, or inducement of patent infringement, then any patent
+rights granted to you under this License for this implementation of Go
+shall terminate as of the date such litigation is filed.
diff --git a/src/vendor/golang.org/x/crypto/chacha20/chacha_arm64.go b/src/vendor/golang.org/x/crypto/chacha20/chacha_arm64.go
new file mode 100644
index 0000000..661ea13
--- /dev/null
+++ b/src/vendor/golang.org/x/crypto/chacha20/chacha_arm64.go
@@ -0,0 +1,16 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build gc && !purego
+
+package chacha20
+
+const bufSize = 256
+
+//go:noescape
+func xorKeyStreamVX(dst, src []byte, key *[8]uint32, nonce *[3]uint32, counter *uint32)
+
+func (c *Cipher) xorKeyStreamBlocks(dst, src []byte) {
+ xorKeyStreamVX(dst, src, &c.key, &c.nonce, &c.counter)
+}
diff --git a/src/vendor/golang.org/x/crypto/chacha20/chacha_arm64.s b/src/vendor/golang.org/x/crypto/chacha20/chacha_arm64.s
new file mode 100644
index 0000000..7dd2638
--- /dev/null
+++ b/src/vendor/golang.org/x/crypto/chacha20/chacha_arm64.s
@@ -0,0 +1,307 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build gc && !purego
+
+#include "textflag.h"
+
+#define NUM_ROUNDS 10
+
+// func xorKeyStreamVX(dst, src []byte, key *[8]uint32, nonce *[3]uint32, counter *uint32)
+TEXT ·xorKeyStreamVX(SB), NOSPLIT, $0
+ MOVD dst+0(FP), R1
+ MOVD src+24(FP), R2
+ MOVD src_len+32(FP), R3
+ MOVD key+48(FP), R4
+ MOVD nonce+56(FP), R6
+ MOVD counter+64(FP), R7
+
+ MOVD $·constants(SB), R10
+ MOVD $·incRotMatrix(SB), R11
+
+ MOVW (R7), R20
+
+ AND $~255, R3, R13
+ ADD R2, R13, R12 // R12 for block end
+ AND $255, R3, R13
+loop:
+ MOVD $NUM_ROUNDS, R21
+ VLD1 (R11), [V30.S4, V31.S4]
+
+ // load contants
+ // VLD4R (R10), [V0.S4, V1.S4, V2.S4, V3.S4]
+ WORD $0x4D60E940
+
+ // load keys
+ // VLD4R 16(R4), [V4.S4, V5.S4, V6.S4, V7.S4]
+ WORD $0x4DFFE884
+ // VLD4R 16(R4), [V8.S4, V9.S4, V10.S4, V11.S4]
+ WORD $0x4DFFE888
+ SUB $32, R4
+
+ // load counter + nonce
+ // VLD1R (R7), [V12.S4]
+ WORD $0x4D40C8EC
+
+ // VLD3R (R6), [V13.S4, V14.S4, V15.S4]
+ WORD $0x4D40E8CD
+
+ // update counter
+ VADD V30.S4, V12.S4, V12.S4
+
+chacha:
+ // V0..V3 += V4..V7
+ // V12..V15 <<<= ((V12..V15 XOR V0..V3), 16)
+ VADD V0.S4, V4.S4, V0.S4
+ VADD V1.S4, V5.S4, V1.S4
+ VADD V2.S4, V6.S4, V2.S4
+ VADD V3.S4, V7.S4, V3.S4
+ VEOR V12.B16, V0.B16, V12.B16
+ VEOR V13.B16, V1.B16, V13.B16
+ VEOR V14.B16, V2.B16, V14.B16
+ VEOR V15.B16, V3.B16, V15.B16
+ VREV32 V12.H8, V12.H8
+ VREV32 V13.H8, V13.H8
+ VREV32 V14.H8, V14.H8
+ VREV32 V15.H8, V15.H8
+ // V8..V11 += V12..V15
+ // V4..V7 <<<= ((V4..V7 XOR V8..V11), 12)
+ VADD V8.S4, V12.S4, V8.S4
+ VADD V9.S4, V13.S4, V9.S4
+ VADD V10.S4, V14.S4, V10.S4
+ VADD V11.S4, V15.S4, V11.S4
+ VEOR V8.B16, V4.B16, V16.B16
+ VEOR V9.B16, V5.B16, V17.B16
+ VEOR V10.B16, V6.B16, V18.B16
+ VEOR V11.B16, V7.B16, V19.B16
+ VSHL $12, V16.S4, V4.S4
+ VSHL $12, V17.S4, V5.S4
+ VSHL $12, V18.S4, V6.S4
+ VSHL $12, V19.S4, V7.S4
+ VSRI $20, V16.S4, V4.S4
+ VSRI $20, V17.S4, V5.S4
+ VSRI $20, V18.S4, V6.S4
+ VSRI $20, V19.S4, V7.S4
+
+ // V0..V3 += V4..V7
+ // V12..V15 <<<= ((V12..V15 XOR V0..V3), 8)
+ VADD V0.S4, V4.S4, V0.S4
+ VADD V1.S4, V5.S4, V1.S4
+ VADD V2.S4, V6.S4, V2.S4
+ VADD V3.S4, V7.S4, V3.S4
+ VEOR V12.B16, V0.B16, V12.B16
+ VEOR V13.B16, V1.B16, V13.B16
+ VEOR V14.B16, V2.B16, V14.B16
+ VEOR V15.B16, V3.B16, V15.B16
+ VTBL V31.B16, [V12.B16], V12.B16
+ VTBL V31.B16, [V13.B16], V13.B16
+ VTBL V31.B16, [V14.B16], V14.B16
+ VTBL V31.B16, [V15.B16], V15.B16
+
+ // V8..V11 += V12..V15
+ // V4..V7 <<<= ((V4..V7 XOR V8..V11), 7)
+ VADD V12.S4, V8.S4, V8.S4
+ VADD V13.S4, V9.S4, V9.S4
+ VADD V14.S4, V10.S4, V10.S4
+ VADD V15.S4, V11.S4, V11.S4
+ VEOR V8.B16, V4.B16, V16.B16
+ VEOR V9.B16, V5.B16, V17.B16
+ VEOR V10.B16, V6.B16, V18.B16
+ VEOR V11.B16, V7.B16, V19.B16
+ VSHL $7, V16.S4, V4.S4
+ VSHL $7, V17.S4, V5.S4
+ VSHL $7, V18.S4, V6.S4
+ VSHL $7, V19.S4, V7.S4
+ VSRI $25, V16.S4, V4.S4
+ VSRI $25, V17.S4, V5.S4
+ VSRI $25, V18.S4, V6.S4
+ VSRI $25, V19.S4, V7.S4
+
+ // V0..V3 += V5..V7, V4
+ // V15,V12-V14 <<<= ((V15,V12-V14 XOR V0..V3), 16)
+ VADD V0.S4, V5.S4, V0.S4
+ VADD V1.S4, V6.S4, V1.S4
+ VADD V2.S4, V7.S4, V2.S4
+ VADD V3.S4, V4.S4, V3.S4
+ VEOR V15.B16, V0.B16, V15.B16
+ VEOR V12.B16, V1.B16, V12.B16
+ VEOR V13.B16, V2.B16, V13.B16
+ VEOR V14.B16, V3.B16, V14.B16
+ VREV32 V12.H8, V12.H8
+ VREV32 V13.H8, V13.H8
+ VREV32 V14.H8, V14.H8
+ VREV32 V15.H8, V15.H8
+
+ // V10 += V15; V5 <<<= ((V10 XOR V5), 12)
+ // ...
+ VADD V15.S4, V10.S4, V10.S4
+ VADD V12.S4, V11.S4, V11.S4
+ VADD V13.S4, V8.S4, V8.S4
+ VADD V14.S4, V9.S4, V9.S4
+ VEOR V10.B16, V5.B16, V16.B16
+ VEOR V11.B16, V6.B16, V17.B16
+ VEOR V8.B16, V7.B16, V18.B16
+ VEOR V9.B16, V4.B16, V19.B16
+ VSHL $12, V16.S4, V5.S4
+ VSHL $12, V17.S4, V6.S4
+ VSHL $12, V18.S4, V7.S4
+ VSHL $12, V19.S4, V4.S4
+ VSRI $20, V16.S4, V5.S4
+ VSRI $20, V17.S4, V6.S4
+ VSRI $20, V18.S4, V7.S4
+ VSRI $20, V19.S4, V4.S4
+
+ // V0 += V5; V15 <<<= ((V0 XOR V15), 8)
+ // ...
+ VADD V5.S4, V0.S4, V0.S4
+ VADD V6.S4, V1.S4, V1.S4
+ VADD V7.S4, V2.S4, V2.S4
+ VADD V4.S4, V3.S4, V3.S4
+ VEOR V0.B16, V15.B16, V15.B16
+ VEOR V1.B16, V12.B16, V12.B16
+ VEOR V2.B16, V13.B16, V13.B16
+ VEOR V3.B16, V14.B16, V14.B16
+ VTBL V31.B16, [V12.B16], V12.B16
+ VTBL V31.B16, [V13.B16], V13.B16
+ VTBL V31.B16, [V14.B16], V14.B16
+ VTBL V31.B16, [V15.B16], V15.B16
+
+ // V10 += V15; V5 <<<= ((V10 XOR V5), 7)
+ // ...
+ VADD V15.S4, V10.S4, V10.S4
+ VADD V12.S4, V11.S4, V11.S4
+ VADD V13.S4, V8.S4, V8.S4
+ VADD V14.S4, V9.S4, V9.S4
+ VEOR V10.B16, V5.B16, V16.B16
+ VEOR V11.B16, V6.B16, V17.B16
+ VEOR V8.B16, V7.B16, V18.B16
+ VEOR V9.B16, V4.B16, V19.B16
+ VSHL $7, V16.S4, V5.S4
+ VSHL $7, V17.S4, V6.S4
+ VSHL $7, V18.S4, V7.S4
+ VSHL $7, V19.S4, V4.S4
+ VSRI $25, V16.S4, V5.S4
+ VSRI $25, V17.S4, V6.S4
+ VSRI $25, V18.S4, V7.S4
+ VSRI $25, V19.S4, V4.S4
+
+ SUB $1, R21
+ CBNZ R21, chacha
+
+ // VLD4R (R10), [V16.S4, V17.S4, V18.S4, V19.S4]
+ WORD $0x4D60E950
+
+ // VLD4R 16(R4), [V20.S4, V21.S4, V22.S4, V23.S4]
+ WORD $0x4DFFE894
+ VADD V30.S4, V12.S4, V12.S4
+ VADD V16.S4, V0.S4, V0.S4
+ VADD V17.S4, V1.S4, V1.S4
+ VADD V18.S4, V2.S4, V2.S4
+ VADD V19.S4, V3.S4, V3.S4
+ // VLD4R 16(R4), [V24.S4, V25.S4, V26.S4, V27.S4]
+ WORD $0x4DFFE898
+ // restore R4
+ SUB $32, R4
+
+ // load counter + nonce
+ // VLD1R (R7), [V28.S4]
+ WORD $0x4D40C8FC
+ // VLD3R (R6), [V29.S4, V30.S4, V31.S4]
+ WORD $0x4D40E8DD
+
+ VADD V20.S4, V4.S4, V4.S4
+ VADD V21.S4, V5.S4, V5.S4
+ VADD V22.S4, V6.S4, V6.S4
+ VADD V23.S4, V7.S4, V7.S4
+ VADD V24.S4, V8.S4, V8.S4
+ VADD V25.S4, V9.S4, V9.S4
+ VADD V26.S4, V10.S4, V10.S4
+ VADD V27.S4, V11.S4, V11.S4
+ VADD V28.S4, V12.S4, V12.S4
+ VADD V29.S4, V13.S4, V13.S4
+ VADD V30.S4, V14.S4, V14.S4
+ VADD V31.S4, V15.S4, V15.S4
+
+ VZIP1 V1.S4, V0.S4, V16.S4
+ VZIP2 V1.S4, V0.S4, V17.S4
+ VZIP1 V3.S4, V2.S4, V18.S4
+ VZIP2 V3.S4, V2.S4, V19.S4
+ VZIP1 V5.S4, V4.S4, V20.S4
+ VZIP2 V5.S4, V4.S4, V21.S4
+ VZIP1 V7.S4, V6.S4, V22.S4
+ VZIP2 V7.S4, V6.S4, V23.S4
+ VZIP1 V9.S4, V8.S4, V24.S4
+ VZIP2 V9.S4, V8.S4, V25.S4
+ VZIP1 V11.S4, V10.S4, V26.S4
+ VZIP2 V11.S4, V10.S4, V27.S4
+ VZIP1 V13.S4, V12.S4, V28.S4
+ VZIP2 V13.S4, V12.S4, V29.S4
+ VZIP1 V15.S4, V14.S4, V30.S4
+ VZIP2 V15.S4, V14.S4, V31.S4
+ VZIP1 V18.D2, V16.D2, V0.D2
+ VZIP2 V18.D2, V16.D2, V4.D2
+ VZIP1 V19.D2, V17.D2, V8.D2
+ VZIP2 V19.D2, V17.D2, V12.D2
+ VLD1.P 64(R2), [V16.B16, V17.B16, V18.B16, V19.B16]
+
+ VZIP1 V22.D2, V20.D2, V1.D2
+ VZIP2 V22.D2, V20.D2, V5.D2
+ VZIP1 V23.D2, V21.D2, V9.D2
+ VZIP2 V23.D2, V21.D2, V13.D2
+ VLD1.P 64(R2), [V20.B16, V21.B16, V22.B16, V23.B16]
+ VZIP1 V26.D2, V24.D2, V2.D2
+ VZIP2 V26.D2, V24.D2, V6.D2
+ VZIP1 V27.D2, V25.D2, V10.D2
+ VZIP2 V27.D2, V25.D2, V14.D2
+ VLD1.P 64(R2), [V24.B16, V25.B16, V26.B16, V27.B16]
+ VZIP1 V30.D2, V28.D2, V3.D2
+ VZIP2 V30.D2, V28.D2, V7.D2
+ VZIP1 V31.D2, V29.D2, V11.D2
+ VZIP2 V31.D2, V29.D2, V15.D2
+ VLD1.P 64(R2), [V28.B16, V29.B16, V30.B16, V31.B16]
+ VEOR V0.B16, V16.B16, V16.B16
+ VEOR V1.B16, V17.B16, V17.B16
+ VEOR V2.B16, V18.B16, V18.B16
+ VEOR V3.B16, V19.B16, V19.B16
+ VST1.P [V16.B16, V17.B16, V18.B16, V19.B16], 64(R1)
+ VEOR V4.B16, V20.B16, V20.B16
+ VEOR V5.B16, V21.B16, V21.B16
+ VEOR V6.B16, V22.B16, V22.B16
+ VEOR V7.B16, V23.B16, V23.B16
+ VST1.P [V20.B16, V21.B16, V22.B16, V23.B16], 64(R1)
+ VEOR V8.B16, V24.B16, V24.B16
+ VEOR V9.B16, V25.B16, V25.B16
+ VEOR V10.B16, V26.B16, V26.B16
+ VEOR V11.B16, V27.B16, V27.B16
+ VST1.P [V24.B16, V25.B16, V26.B16, V27.B16], 64(R1)
+ VEOR V12.B16, V28.B16, V28.B16
+ VEOR V13.B16, V29.B16, V29.B16
+ VEOR V14.B16, V30.B16, V30.B16
+ VEOR V15.B16, V31.B16, V31.B16
+ VST1.P [V28.B16, V29.B16, V30.B16, V31.B16], 64(R1)
+
+ ADD $4, R20
+ MOVW R20, (R7) // update counter
+
+ CMP R2, R12
+ BGT loop
+
+ RET
+
+
+DATA ·constants+0x00(SB)/4, $0x61707865
+DATA ·constants+0x04(SB)/4, $0x3320646e
+DATA ·constants+0x08(SB)/4, $0x79622d32
+DATA ·constants+0x0c(SB)/4, $0x6b206574
+GLOBL ·constants(SB), NOPTR|RODATA, $32
+
+DATA ·incRotMatrix+0x00(SB)/4, $0x00000000
+DATA ·incRotMatrix+0x04(SB)/4, $0x00000001
+DATA ·incRotMatrix+0x08(SB)/4, $0x00000002
+DATA ·incRotMatrix+0x0c(SB)/4, $0x00000003
+DATA ·incRotMatrix+0x10(SB)/4, $0x02010003
+DATA ·incRotMatrix+0x14(SB)/4, $0x06050407
+DATA ·incRotMatrix+0x18(SB)/4, $0x0A09080B
+DATA ·incRotMatrix+0x1c(SB)/4, $0x0E0D0C0F
+GLOBL ·incRotMatrix(SB), NOPTR|RODATA, $32
diff --git a/src/vendor/golang.org/x/crypto/chacha20/chacha_generic.go b/src/vendor/golang.org/x/crypto/chacha20/chacha_generic.go
new file mode 100644
index 0000000..93eb5ae
--- /dev/null
+++ b/src/vendor/golang.org/x/crypto/chacha20/chacha_generic.go
@@ -0,0 +1,398 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package chacha20 implements the ChaCha20 and XChaCha20 encryption algorithms
+// as specified in RFC 8439 and draft-irtf-cfrg-xchacha-01.
+package chacha20
+
+import (
+ "crypto/cipher"
+ "encoding/binary"
+ "errors"
+ "math/bits"
+
+ "golang.org/x/crypto/internal/alias"
+)
+
+const (
+ // KeySize is the size of the key used by this cipher, in bytes.
+ KeySize = 32
+
+ // NonceSize is the size of the nonce used with the standard variant of this
+ // cipher, in bytes.
+ //
+ // Note that this is too short to be safely generated at random if the same
+ // key is reused more than 2³² times.
+ NonceSize = 12
+
+ // NonceSizeX is the size of the nonce used with the XChaCha20 variant of
+ // this cipher, in bytes.
+ NonceSizeX = 24
+)
+
+// Cipher is a stateful instance of ChaCha20 or XChaCha20 using a particular key
+// and nonce. A *Cipher implements the cipher.Stream interface.
+type Cipher struct {
+ // The ChaCha20 state is 16 words: 4 constant, 8 of key, 1 of counter
+ // (incremented after each block), and 3 of nonce.
+ key [8]uint32
+ counter uint32
+ nonce [3]uint32
+
+ // The last len bytes of buf are leftover key stream bytes from the previous
+ // XORKeyStream invocation. The size of buf depends on how many blocks are
+ // computed at a time by xorKeyStreamBlocks.
+ buf [bufSize]byte
+ len int
+
+ // overflow is set when the counter overflowed, no more blocks can be
+ // generated, and the next XORKeyStream call should panic.
+ overflow bool
+
+ // The counter-independent results of the first round are cached after they
+ // are computed the first time.
+ precompDone bool
+ p1, p5, p9, p13 uint32
+ p2, p6, p10, p14 uint32
+ p3, p7, p11, p15 uint32
+}
+
+var _ cipher.Stream = (*Cipher)(nil)
+
+// NewUnauthenticatedCipher creates a new ChaCha20 stream cipher with the given
+// 32 bytes key and a 12 or 24 bytes nonce. If a nonce of 24 bytes is provided,
+// the XChaCha20 construction will be used. It returns an error if key or nonce
+// have any other length.
+//
+// Note that ChaCha20, like all stream ciphers, is not authenticated and allows
+// attackers to silently tamper with the plaintext. For this reason, it is more
+// appropriate as a building block than as a standalone encryption mechanism.
+// Instead, consider using package golang.org/x/crypto/chacha20poly1305.
+func NewUnauthenticatedCipher(key, nonce []byte) (*Cipher, error) {
+ // This function is split into a wrapper so that the Cipher allocation will
+ // be inlined, and depending on how the caller uses the return value, won't
+ // escape to the heap.
+ c := &Cipher{}
+ return newUnauthenticatedCipher(c, key, nonce)
+}
+
+func newUnauthenticatedCipher(c *Cipher, key, nonce []byte) (*Cipher, error) {
+ if len(key) != KeySize {
+ return nil, errors.New("chacha20: wrong key size")
+ }
+ if len(nonce) == NonceSizeX {
+ // XChaCha20 uses the ChaCha20 core to mix 16 bytes of the nonce into a
+ // derived key, allowing it to operate on a nonce of 24 bytes. See
+ // draft-irtf-cfrg-xchacha-01, Section 2.3.
+ key, _ = HChaCha20(key, nonce[0:16])
+ cNonce := make([]byte, NonceSize)
+ copy(cNonce[4:12], nonce[16:24])
+ nonce = cNonce
+ } else if len(nonce) != NonceSize {
+ return nil, errors.New("chacha20: wrong nonce size")
+ }
+
+ key, nonce = key[:KeySize], nonce[:NonceSize] // bounds check elimination hint
+ c.key = [8]uint32{
+ binary.LittleEndian.Uint32(key[0:4]),
+ binary.LittleEndian.Uint32(key[4:8]),
+ binary.LittleEndian.Uint32(key[8:12]),
+ binary.LittleEndian.Uint32(key[12:16]),
+ binary.LittleEndian.Uint32(key[16:20]),
+ binary.LittleEndian.Uint32(key[20:24]),
+ binary.LittleEndian.Uint32(key[24:28]),
+ binary.LittleEndian.Uint32(key[28:32]),
+ }
+ c.nonce = [3]uint32{
+ binary.LittleEndian.Uint32(nonce[0:4]),
+ binary.LittleEndian.Uint32(nonce[4:8]),
+ binary.LittleEndian.Uint32(nonce[8:12]),
+ }
+ return c, nil
+}
+
+// The constant first 4 words of the ChaCha20 state.
+const (
+ j0 uint32 = 0x61707865 // expa
+ j1 uint32 = 0x3320646e // nd 3
+ j2 uint32 = 0x79622d32 // 2-by
+ j3 uint32 = 0x6b206574 // te k
+)
+
+const blockSize = 64
+
+// quarterRound is the core of ChaCha20. It shuffles the bits of 4 state words.
+// It's executed 4 times for each of the 20 ChaCha20 rounds, operating on all 16
+// words each round, in columnar or diagonal groups of 4 at a time.
+func quarterRound(a, b, c, d uint32) (uint32, uint32, uint32, uint32) {
+ a += b
+ d ^= a
+ d = bits.RotateLeft32(d, 16)
+ c += d
+ b ^= c
+ b = bits.RotateLeft32(b, 12)
+ a += b
+ d ^= a
+ d = bits.RotateLeft32(d, 8)
+ c += d
+ b ^= c
+ b = bits.RotateLeft32(b, 7)
+ return a, b, c, d
+}
+
+// SetCounter sets the Cipher counter. The next invocation of XORKeyStream will
+// behave as if (64 * counter) bytes had been encrypted so far.
+//
+// To prevent accidental counter reuse, SetCounter panics if counter is less
+// than the current value.
+//
+// Note that the execution time of XORKeyStream is not independent of the
+// counter value.
+func (s *Cipher) SetCounter(counter uint32) {
+ // Internally, s may buffer multiple blocks, which complicates this
+ // implementation slightly. When checking whether the counter has rolled
+ // back, we must use both s.counter and s.len to determine how many blocks
+ // we have already output.
+ outputCounter := s.counter - uint32(s.len)/blockSize
+ if s.overflow || counter < outputCounter {
+ panic("chacha20: SetCounter attempted to rollback counter")
+ }
+
+ // In the general case, we set the new counter value and reset s.len to 0,
+ // causing the next call to XORKeyStream to refill the buffer. However, if
+ // we're advancing within the existing buffer, we can save work by simply
+ // setting s.len.
+ if counter < s.counter {
+ s.len = int(s.counter-counter) * blockSize
+ } else {
+ s.counter = counter
+ s.len = 0
+ }
+}
+
+// XORKeyStream XORs each byte in the given slice with a byte from the
+// cipher's key stream. Dst and src must overlap entirely or not at all.
+//
+// If len(dst) < len(src), XORKeyStream will panic. It is acceptable
+// to pass a dst bigger than src, and in that case, XORKeyStream will
+// only update dst[:len(src)] and will not touch the rest of dst.
+//
+// Multiple calls to XORKeyStream behave as if the concatenation of
+// the src buffers was passed in a single run. That is, Cipher
+// maintains state and does not reset at each XORKeyStream call.
+func (s *Cipher) XORKeyStream(dst, src []byte) {
+ if len(src) == 0 {
+ return
+ }
+ if len(dst) < len(src) {
+ panic("chacha20: output smaller than input")
+ }
+ dst = dst[:len(src)]
+ if alias.InexactOverlap(dst, src) {
+ panic("chacha20: invalid buffer overlap")
+ }
+
+ // First, drain any remaining key stream from a previous XORKeyStream.
+ if s.len != 0 {
+ keyStream := s.buf[bufSize-s.len:]
+ if len(src) < len(keyStream) {
+ keyStream = keyStream[:len(src)]
+ }
+ _ = src[len(keyStream)-1] // bounds check elimination hint
+ for i, b := range keyStream {
+ dst[i] = src[i] ^ b
+ }
+ s.len -= len(keyStream)
+ dst, src = dst[len(keyStream):], src[len(keyStream):]
+ }
+ if len(src) == 0 {
+ return
+ }
+
+ // If we'd need to let the counter overflow and keep generating output,
+ // panic immediately. If instead we'd only reach the last block, remember
+ // not to generate any more output after the buffer is drained.
+ numBlocks := (uint64(len(src)) + blockSize - 1) / blockSize
+ if s.overflow || uint64(s.counter)+numBlocks > 1<<32 {
+ panic("chacha20: counter overflow")
+ } else if uint64(s.counter)+numBlocks == 1<<32 {
+ s.overflow = true
+ }
+
+ // xorKeyStreamBlocks implementations expect input lengths that are a
+ // multiple of bufSize. Platform-specific ones process multiple blocks at a
+ // time, so have bufSizes that are a multiple of blockSize.
+
+ full := len(src) - len(src)%bufSize
+ if full > 0 {
+ s.xorKeyStreamBlocks(dst[:full], src[:full])
+ }
+ dst, src = dst[full:], src[full:]
+
+ // If using a multi-block xorKeyStreamBlocks would overflow, use the generic
+ // one that does one block at a time.
+ const blocksPerBuf = bufSize / blockSize
+ if uint64(s.counter)+blocksPerBuf > 1<<32 {
+ s.buf = [bufSize]byte{}
+ numBlocks := (len(src) + blockSize - 1) / blockSize
+ buf := s.buf[bufSize-numBlocks*blockSize:]
+ copy(buf, src)
+ s.xorKeyStreamBlocksGeneric(buf, buf)
+ s.len = len(buf) - copy(dst, buf)
+ return
+ }
+
+ // If we have a partial (multi-)block, pad it for xorKeyStreamBlocks, and
+ // keep the leftover keystream for the next XORKeyStream invocation.
+ if len(src) > 0 {
+ s.buf = [bufSize]byte{}
+ copy(s.buf[:], src)
+ s.xorKeyStreamBlocks(s.buf[:], s.buf[:])
+ s.len = bufSize - copy(dst, s.buf[:])
+ }
+}
+
+func (s *Cipher) xorKeyStreamBlocksGeneric(dst, src []byte) {
+ if len(dst) != len(src) || len(dst)%blockSize != 0 {
+ panic("chacha20: internal error: wrong dst and/or src length")
+ }
+
+ // To generate each block of key stream, the initial cipher state
+ // (represented below) is passed through 20 rounds of shuffling,
+ // alternatively applying quarterRounds by columns (like 1, 5, 9, 13)
+ // or by diagonals (like 1, 6, 11, 12).
+ //
+ // 0:cccccccc 1:cccccccc 2:cccccccc 3:cccccccc
+ // 4:kkkkkkkk 5:kkkkkkkk 6:kkkkkkkk 7:kkkkkkkk
+ // 8:kkkkkkkk 9:kkkkkkkk 10:kkkkkkkk 11:kkkkkkkk
+ // 12:bbbbbbbb 13:nnnnnnnn 14:nnnnnnnn 15:nnnnnnnn
+ //
+ // c=constant k=key b=blockcount n=nonce
+ var (
+ c0, c1, c2, c3 = j0, j1, j2, j3
+ c4, c5, c6, c7 = s.key[0], s.key[1], s.key[2], s.key[3]
+ c8, c9, c10, c11 = s.key[4], s.key[5], s.key[6], s.key[7]
+ _, c13, c14, c15 = s.counter, s.nonce[0], s.nonce[1], s.nonce[2]
+ )
+
+ // Three quarters of the first round don't depend on the counter, so we can
+ // calculate them here, and reuse them for multiple blocks in the loop, and
+ // for future XORKeyStream invocations.
+ if !s.precompDone {
+ s.p1, s.p5, s.p9, s.p13 = quarterRound(c1, c5, c9, c13)
+ s.p2, s.p6, s.p10, s.p14 = quarterRound(c2, c6, c10, c14)
+ s.p3, s.p7, s.p11, s.p15 = quarterRound(c3, c7, c11, c15)
+ s.precompDone = true
+ }
+
+ // A condition of len(src) > 0 would be sufficient, but this also
+ // acts as a bounds check elimination hint.
+ for len(src) >= 64 && len(dst) >= 64 {
+ // The remainder of the first column round.
+ fcr0, fcr4, fcr8, fcr12 := quarterRound(c0, c4, c8, s.counter)
+
+ // The second diagonal round.
+ x0, x5, x10, x15 := quarterRound(fcr0, s.p5, s.p10, s.p15)
+ x1, x6, x11, x12 := quarterRound(s.p1, s.p6, s.p11, fcr12)
+ x2, x7, x8, x13 := quarterRound(s.p2, s.p7, fcr8, s.p13)
+ x3, x4, x9, x14 := quarterRound(s.p3, fcr4, s.p9, s.p14)
+
+ // The remaining 18 rounds.
+ for i := 0; i < 9; i++ {
+ // Column round.
+ x0, x4, x8, x12 = quarterRound(x0, x4, x8, x12)
+ x1, x5, x9, x13 = quarterRound(x1, x5, x9, x13)
+ x2, x6, x10, x14 = quarterRound(x2, x6, x10, x14)
+ x3, x7, x11, x15 = quarterRound(x3, x7, x11, x15)
+
+ // Diagonal round.
+ x0, x5, x10, x15 = quarterRound(x0, x5, x10, x15)
+ x1, x6, x11, x12 = quarterRound(x1, x6, x11, x12)
+ x2, x7, x8, x13 = quarterRound(x2, x7, x8, x13)
+ x3, x4, x9, x14 = quarterRound(x3, x4, x9, x14)
+ }
+
+ // Add back the initial state to generate the key stream, then
+ // XOR the key stream with the source and write out the result.
+ addXor(dst[0:4], src[0:4], x0, c0)
+ addXor(dst[4:8], src[4:8], x1, c1)
+ addXor(dst[8:12], src[8:12], x2, c2)
+ addXor(dst[12:16], src[12:16], x3, c3)
+ addXor(dst[16:20], src[16:20], x4, c4)
+ addXor(dst[20:24], src[20:24], x5, c5)
+ addXor(dst[24:28], src[24:28], x6, c6)
+ addXor(dst[28:32], src[28:32], x7, c7)
+ addXor(dst[32:36], src[32:36], x8, c8)
+ addXor(dst[36:40], src[36:40], x9, c9)
+ addXor(dst[40:44], src[40:44], x10, c10)
+ addXor(dst[44:48], src[44:48], x11, c11)
+ addXor(dst[48:52], src[48:52], x12, s.counter)
+ addXor(dst[52:56], src[52:56], x13, c13)
+ addXor(dst[56:60], src[56:60], x14, c14)
+ addXor(dst[60:64], src[60:64], x15, c15)
+
+ s.counter += 1
+
+ src, dst = src[blockSize:], dst[blockSize:]
+ }
+}
+
+// HChaCha20 uses the ChaCha20 core to generate a derived key from a 32 bytes
+// key and a 16 bytes nonce. It returns an error if key or nonce have any other
+// length. It is used as part of the XChaCha20 construction.
+func HChaCha20(key, nonce []byte) ([]byte, error) {
+ // This function is split into a wrapper so that the slice allocation will
+ // be inlined, and depending on how the caller uses the return value, won't
+ // escape to the heap.
+ out := make([]byte, 32)
+ return hChaCha20(out, key, nonce)
+}
+
+func hChaCha20(out, key, nonce []byte) ([]byte, error) {
+ if len(key) != KeySize {
+ return nil, errors.New("chacha20: wrong HChaCha20 key size")
+ }
+ if len(nonce) != 16 {
+ return nil, errors.New("chacha20: wrong HChaCha20 nonce size")
+ }
+
+ x0, x1, x2, x3 := j0, j1, j2, j3
+ x4 := binary.LittleEndian.Uint32(key[0:4])
+ x5 := binary.LittleEndian.Uint32(key[4:8])
+ x6 := binary.LittleEndian.Uint32(key[8:12])
+ x7 := binary.LittleEndian.Uint32(key[12:16])
+ x8 := binary.LittleEndian.Uint32(key[16:20])
+ x9 := binary.LittleEndian.Uint32(key[20:24])
+ x10 := binary.LittleEndian.Uint32(key[24:28])
+ x11 := binary.LittleEndian.Uint32(key[28:32])
+ x12 := binary.LittleEndian.Uint32(nonce[0:4])
+ x13 := binary.LittleEndian.Uint32(nonce[4:8])
+ x14 := binary.LittleEndian.Uint32(nonce[8:12])
+ x15 := binary.LittleEndian.Uint32(nonce[12:16])
+
+ for i := 0; i < 10; i++ {
+ // Diagonal round.
+ x0, x4, x8, x12 = quarterRound(x0, x4, x8, x12)
+ x1, x5, x9, x13 = quarterRound(x1, x5, x9, x13)
+ x2, x6, x10, x14 = quarterRound(x2, x6, x10, x14)
+ x3, x7, x11, x15 = quarterRound(x3, x7, x11, x15)
+
+ // Column round.
+ x0, x5, x10, x15 = quarterRound(x0, x5, x10, x15)
+ x1, x6, x11, x12 = quarterRound(x1, x6, x11, x12)
+ x2, x7, x8, x13 = quarterRound(x2, x7, x8, x13)
+ x3, x4, x9, x14 = quarterRound(x3, x4, x9, x14)
+ }
+
+ _ = out[31] // bounds check elimination hint
+ binary.LittleEndian.PutUint32(out[0:4], x0)
+ binary.LittleEndian.PutUint32(out[4:8], x1)
+ binary.LittleEndian.PutUint32(out[8:12], x2)
+ binary.LittleEndian.PutUint32(out[12:16], x3)
+ binary.LittleEndian.PutUint32(out[16:20], x12)
+ binary.LittleEndian.PutUint32(out[20:24], x13)
+ binary.LittleEndian.PutUint32(out[24:28], x14)
+ binary.LittleEndian.PutUint32(out[28:32], x15)
+ return out, nil
+}
diff --git a/src/vendor/golang.org/x/crypto/chacha20/chacha_noasm.go b/src/vendor/golang.org/x/crypto/chacha20/chacha_noasm.go
new file mode 100644
index 0000000..db42e66
--- /dev/null
+++ b/src/vendor/golang.org/x/crypto/chacha20/chacha_noasm.go
@@ -0,0 +1,13 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build (!arm64 && !s390x && !ppc64le) || !gc || purego
+
+package chacha20
+
+const bufSize = blockSize
+
+func (s *Cipher) xorKeyStreamBlocks(dst, src []byte) {
+ s.xorKeyStreamBlocksGeneric(dst, src)
+}
diff --git a/src/vendor/golang.org/x/crypto/chacha20/chacha_ppc64le.go b/src/vendor/golang.org/x/crypto/chacha20/chacha_ppc64le.go
new file mode 100644
index 0000000..3a4287f
--- /dev/null
+++ b/src/vendor/golang.org/x/crypto/chacha20/chacha_ppc64le.go
@@ -0,0 +1,16 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build gc && !purego
+
+package chacha20
+
+const bufSize = 256
+
+//go:noescape
+func chaCha20_ctr32_vsx(out, inp *byte, len int, key *[8]uint32, counter *uint32)
+
+func (c *Cipher) xorKeyStreamBlocks(dst, src []byte) {
+ chaCha20_ctr32_vsx(&dst[0], &src[0], len(src), &c.key, &c.counter)
+}
diff --git a/src/vendor/golang.org/x/crypto/chacha20/chacha_ppc64le.s b/src/vendor/golang.org/x/crypto/chacha20/chacha_ppc64le.s
new file mode 100644
index 0000000..66aebae
--- /dev/null
+++ b/src/vendor/golang.org/x/crypto/chacha20/chacha_ppc64le.s
@@ -0,0 +1,449 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Based on CRYPTOGAMS code with the following comment:
+// # ====================================================================
+// # Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
+// # project. The module is, however, dual licensed under OpenSSL and
+// # CRYPTOGAMS licenses depending on where you obtain it. For further
+// # details see http://www.openssl.org/~appro/cryptogams/.
+// # ====================================================================
+
+// Code for the perl script that generates the ppc64 assembler
+// can be found in the cryptogams repository at the link below. It is based on
+// the original from openssl.
+
+// https://github.com/dot-asm/cryptogams/commit/a60f5b50ed908e91
+
+// The differences in this and the original implementation are
+// due to the calling conventions and initialization of constants.
+
+//go:build gc && !purego
+
+#include "textflag.h"
+
+#define OUT R3
+#define INP R4
+#define LEN R5
+#define KEY R6
+#define CNT R7
+#define TMP R15
+
+#define CONSTBASE R16
+#define BLOCKS R17
+
+DATA consts<>+0x00(SB)/8, $0x3320646e61707865
+DATA consts<>+0x08(SB)/8, $0x6b20657479622d32
+DATA consts<>+0x10(SB)/8, $0x0000000000000001
+DATA consts<>+0x18(SB)/8, $0x0000000000000000
+DATA consts<>+0x20(SB)/8, $0x0000000000000004
+DATA consts<>+0x28(SB)/8, $0x0000000000000000
+DATA consts<>+0x30(SB)/8, $0x0a0b08090e0f0c0d
+DATA consts<>+0x38(SB)/8, $0x0203000106070405
+DATA consts<>+0x40(SB)/8, $0x090a0b080d0e0f0c
+DATA consts<>+0x48(SB)/8, $0x0102030005060704
+DATA consts<>+0x50(SB)/8, $0x6170786561707865
+DATA consts<>+0x58(SB)/8, $0x6170786561707865
+DATA consts<>+0x60(SB)/8, $0x3320646e3320646e
+DATA consts<>+0x68(SB)/8, $0x3320646e3320646e
+DATA consts<>+0x70(SB)/8, $0x79622d3279622d32
+DATA consts<>+0x78(SB)/8, $0x79622d3279622d32
+DATA consts<>+0x80(SB)/8, $0x6b2065746b206574
+DATA consts<>+0x88(SB)/8, $0x6b2065746b206574
+DATA consts<>+0x90(SB)/8, $0x0000000100000000
+DATA consts<>+0x98(SB)/8, $0x0000000300000002
+GLOBL consts<>(SB), RODATA, $0xa0
+
+//func chaCha20_ctr32_vsx(out, inp *byte, len int, key *[8]uint32, counter *uint32)
+TEXT ·chaCha20_ctr32_vsx(SB),NOSPLIT,$64-40
+ MOVD out+0(FP), OUT
+ MOVD inp+8(FP), INP
+ MOVD len+16(FP), LEN
+ MOVD key+24(FP), KEY
+ MOVD counter+32(FP), CNT
+
+ // Addressing for constants
+ MOVD $consts<>+0x00(SB), CONSTBASE
+ MOVD $16, R8
+ MOVD $32, R9
+ MOVD $48, R10
+ MOVD $64, R11
+ SRD $6, LEN, BLOCKS
+ // V16
+ LXVW4X (CONSTBASE)(R0), VS48
+ ADD $80,CONSTBASE
+
+ // Load key into V17,V18
+ LXVW4X (KEY)(R0), VS49
+ LXVW4X (KEY)(R8), VS50
+
+ // Load CNT, NONCE into V19
+ LXVW4X (CNT)(R0), VS51
+
+ // Clear V27
+ VXOR V27, V27, V27
+
+ // V28
+ LXVW4X (CONSTBASE)(R11), VS60
+
+ // splat slot from V19 -> V26
+ VSPLTW $0, V19, V26
+
+ VSLDOI $4, V19, V27, V19
+ VSLDOI $12, V27, V19, V19
+
+ VADDUWM V26, V28, V26
+
+ MOVD $10, R14
+ MOVD R14, CTR
+
+loop_outer_vsx:
+ // V0, V1, V2, V3
+ LXVW4X (R0)(CONSTBASE), VS32
+ LXVW4X (R8)(CONSTBASE), VS33
+ LXVW4X (R9)(CONSTBASE), VS34
+ LXVW4X (R10)(CONSTBASE), VS35
+
+ // splat values from V17, V18 into V4-V11
+ VSPLTW $0, V17, V4
+ VSPLTW $1, V17, V5
+ VSPLTW $2, V17, V6
+ VSPLTW $3, V17, V7
+ VSPLTW $0, V18, V8
+ VSPLTW $1, V18, V9
+ VSPLTW $2, V18, V10
+ VSPLTW $3, V18, V11
+
+ // VOR
+ VOR V26, V26, V12
+
+ // splat values from V19 -> V13, V14, V15
+ VSPLTW $1, V19, V13
+ VSPLTW $2, V19, V14
+ VSPLTW $3, V19, V15
+
+ // splat const values
+ VSPLTISW $-16, V27
+ VSPLTISW $12, V28
+ VSPLTISW $8, V29
+ VSPLTISW $7, V30
+
+loop_vsx:
+ VADDUWM V0, V4, V0
+ VADDUWM V1, V5, V1
+ VADDUWM V2, V6, V2
+ VADDUWM V3, V7, V3
+
+ VXOR V12, V0, V12
+ VXOR V13, V1, V13
+ VXOR V14, V2, V14
+ VXOR V15, V3, V15
+
+ VRLW V12, V27, V12
+ VRLW V13, V27, V13
+ VRLW V14, V27, V14
+ VRLW V15, V27, V15
+
+ VADDUWM V8, V12, V8
+ VADDUWM V9, V13, V9
+ VADDUWM V10, V14, V10
+ VADDUWM V11, V15, V11
+
+ VXOR V4, V8, V4
+ VXOR V5, V9, V5
+ VXOR V6, V10, V6
+ VXOR V7, V11, V7
+
+ VRLW V4, V28, V4
+ VRLW V5, V28, V5
+ VRLW V6, V28, V6
+ VRLW V7, V28, V7
+
+ VADDUWM V0, V4, V0
+ VADDUWM V1, V5, V1
+ VADDUWM V2, V6, V2
+ VADDUWM V3, V7, V3
+
+ VXOR V12, V0, V12
+ VXOR V13, V1, V13
+ VXOR V14, V2, V14
+ VXOR V15, V3, V15
+
+ VRLW V12, V29, V12
+ VRLW V13, V29, V13
+ VRLW V14, V29, V14
+ VRLW V15, V29, V15
+
+ VADDUWM V8, V12, V8
+ VADDUWM V9, V13, V9
+ VADDUWM V10, V14, V10
+ VADDUWM V11, V15, V11
+
+ VXOR V4, V8, V4
+ VXOR V5, V9, V5
+ VXOR V6, V10, V6
+ VXOR V7, V11, V7
+
+ VRLW V4, V30, V4
+ VRLW V5, V30, V5
+ VRLW V6, V30, V6
+ VRLW V7, V30, V7
+
+ VADDUWM V0, V5, V0
+ VADDUWM V1, V6, V1
+ VADDUWM V2, V7, V2
+ VADDUWM V3, V4, V3
+
+ VXOR V15, V0, V15
+ VXOR V12, V1, V12
+ VXOR V13, V2, V13
+ VXOR V14, V3, V14
+
+ VRLW V15, V27, V15
+ VRLW V12, V27, V12
+ VRLW V13, V27, V13
+ VRLW V14, V27, V14
+
+ VADDUWM V10, V15, V10
+ VADDUWM V11, V12, V11
+ VADDUWM V8, V13, V8
+ VADDUWM V9, V14, V9
+
+ VXOR V5, V10, V5
+ VXOR V6, V11, V6
+ VXOR V7, V8, V7
+ VXOR V4, V9, V4
+
+ VRLW V5, V28, V5
+ VRLW V6, V28, V6
+ VRLW V7, V28, V7
+ VRLW V4, V28, V4
+
+ VADDUWM V0, V5, V0
+ VADDUWM V1, V6, V1
+ VADDUWM V2, V7, V2
+ VADDUWM V3, V4, V3
+
+ VXOR V15, V0, V15
+ VXOR V12, V1, V12
+ VXOR V13, V2, V13
+ VXOR V14, V3, V14
+
+ VRLW V15, V29, V15
+ VRLW V12, V29, V12
+ VRLW V13, V29, V13
+ VRLW V14, V29, V14
+
+ VADDUWM V10, V15, V10
+ VADDUWM V11, V12, V11
+ VADDUWM V8, V13, V8
+ VADDUWM V9, V14, V9
+
+ VXOR V5, V10, V5
+ VXOR V6, V11, V6
+ VXOR V7, V8, V7
+ VXOR V4, V9, V4
+
+ VRLW V5, V30, V5
+ VRLW V6, V30, V6
+ VRLW V7, V30, V7
+ VRLW V4, V30, V4
+ BC 16, LT, loop_vsx
+
+ VADDUWM V12, V26, V12
+
+ WORD $0x13600F8C // VMRGEW V0, V1, V27
+ WORD $0x13821F8C // VMRGEW V2, V3, V28
+
+ WORD $0x10000E8C // VMRGOW V0, V1, V0
+ WORD $0x10421E8C // VMRGOW V2, V3, V2
+
+ WORD $0x13A42F8C // VMRGEW V4, V5, V29
+ WORD $0x13C63F8C // VMRGEW V6, V7, V30
+
+ XXPERMDI VS32, VS34, $0, VS33
+ XXPERMDI VS32, VS34, $3, VS35
+ XXPERMDI VS59, VS60, $0, VS32
+ XXPERMDI VS59, VS60, $3, VS34
+
+ WORD $0x10842E8C // VMRGOW V4, V5, V4
+ WORD $0x10C63E8C // VMRGOW V6, V7, V6
+
+ WORD $0x13684F8C // VMRGEW V8, V9, V27
+ WORD $0x138A5F8C // VMRGEW V10, V11, V28
+
+ XXPERMDI VS36, VS38, $0, VS37
+ XXPERMDI VS36, VS38, $3, VS39
+ XXPERMDI VS61, VS62, $0, VS36
+ XXPERMDI VS61, VS62, $3, VS38
+
+ WORD $0x11084E8C // VMRGOW V8, V9, V8
+ WORD $0x114A5E8C // VMRGOW V10, V11, V10
+
+ WORD $0x13AC6F8C // VMRGEW V12, V13, V29
+ WORD $0x13CE7F8C // VMRGEW V14, V15, V30
+
+ XXPERMDI VS40, VS42, $0, VS41
+ XXPERMDI VS40, VS42, $3, VS43
+ XXPERMDI VS59, VS60, $0, VS40
+ XXPERMDI VS59, VS60, $3, VS42
+
+ WORD $0x118C6E8C // VMRGOW V12, V13, V12
+ WORD $0x11CE7E8C // VMRGOW V14, V15, V14
+
+ VSPLTISW $4, V27
+ VADDUWM V26, V27, V26
+
+ XXPERMDI VS44, VS46, $0, VS45
+ XXPERMDI VS44, VS46, $3, VS47
+ XXPERMDI VS61, VS62, $0, VS44
+ XXPERMDI VS61, VS62, $3, VS46
+
+ VADDUWM V0, V16, V0
+ VADDUWM V4, V17, V4
+ VADDUWM V8, V18, V8
+ VADDUWM V12, V19, V12
+
+ CMPU LEN, $64
+ BLT tail_vsx
+
+ // Bottom of loop
+ LXVW4X (INP)(R0), VS59
+ LXVW4X (INP)(R8), VS60
+ LXVW4X (INP)(R9), VS61
+ LXVW4X (INP)(R10), VS62
+
+ VXOR V27, V0, V27
+ VXOR V28, V4, V28
+ VXOR V29, V8, V29
+ VXOR V30, V12, V30
+
+ STXVW4X VS59, (OUT)(R0)
+ STXVW4X VS60, (OUT)(R8)
+ ADD $64, INP
+ STXVW4X VS61, (OUT)(R9)
+ ADD $-64, LEN
+ STXVW4X VS62, (OUT)(R10)
+ ADD $64, OUT
+ BEQ done_vsx
+
+ VADDUWM V1, V16, V0
+ VADDUWM V5, V17, V4
+ VADDUWM V9, V18, V8
+ VADDUWM V13, V19, V12
+
+ CMPU LEN, $64
+ BLT tail_vsx
+
+ LXVW4X (INP)(R0), VS59
+ LXVW4X (INP)(R8), VS60
+ LXVW4X (INP)(R9), VS61
+ LXVW4X (INP)(R10), VS62
+ VXOR V27, V0, V27
+
+ VXOR V28, V4, V28
+ VXOR V29, V8, V29
+ VXOR V30, V12, V30
+
+ STXVW4X VS59, (OUT)(R0)
+ STXVW4X VS60, (OUT)(R8)
+ ADD $64, INP
+ STXVW4X VS61, (OUT)(R9)
+ ADD $-64, LEN
+ STXVW4X VS62, (OUT)(V10)
+ ADD $64, OUT
+ BEQ done_vsx
+
+ VADDUWM V2, V16, V0
+ VADDUWM V6, V17, V4
+ VADDUWM V10, V18, V8
+ VADDUWM V14, V19, V12
+
+ CMPU LEN, $64
+ BLT tail_vsx
+
+ LXVW4X (INP)(R0), VS59
+ LXVW4X (INP)(R8), VS60
+ LXVW4X (INP)(R9), VS61
+ LXVW4X (INP)(R10), VS62
+
+ VXOR V27, V0, V27
+ VXOR V28, V4, V28
+ VXOR V29, V8, V29
+ VXOR V30, V12, V30
+
+ STXVW4X VS59, (OUT)(R0)
+ STXVW4X VS60, (OUT)(R8)
+ ADD $64, INP
+ STXVW4X VS61, (OUT)(R9)
+ ADD $-64, LEN
+ STXVW4X VS62, (OUT)(R10)
+ ADD $64, OUT
+ BEQ done_vsx
+
+ VADDUWM V3, V16, V0
+ VADDUWM V7, V17, V4
+ VADDUWM V11, V18, V8
+ VADDUWM V15, V19, V12
+
+ CMPU LEN, $64
+ BLT tail_vsx
+
+ LXVW4X (INP)(R0), VS59
+ LXVW4X (INP)(R8), VS60
+ LXVW4X (INP)(R9), VS61
+ LXVW4X (INP)(R10), VS62
+
+ VXOR V27, V0, V27
+ VXOR V28, V4, V28
+ VXOR V29, V8, V29
+ VXOR V30, V12, V30
+
+ STXVW4X VS59, (OUT)(R0)
+ STXVW4X VS60, (OUT)(R8)
+ ADD $64, INP
+ STXVW4X VS61, (OUT)(R9)
+ ADD $-64, LEN
+ STXVW4X VS62, (OUT)(R10)
+ ADD $64, OUT
+
+ MOVD $10, R14
+ MOVD R14, CTR
+ BNE loop_outer_vsx
+
+done_vsx:
+ // Increment counter by number of 64 byte blocks
+ MOVD (CNT), R14
+ ADD BLOCKS, R14
+ MOVD R14, (CNT)
+ RET
+
+tail_vsx:
+ ADD $32, R1, R11
+ MOVD LEN, CTR
+
+ // Save values on stack to copy from
+ STXVW4X VS32, (R11)(R0)
+ STXVW4X VS36, (R11)(R8)
+ STXVW4X VS40, (R11)(R9)
+ STXVW4X VS44, (R11)(R10)
+ ADD $-1, R11, R12
+ ADD $-1, INP
+ ADD $-1, OUT
+
+looptail_vsx:
+ // Copying the result to OUT
+ // in bytes.
+ MOVBZU 1(R12), KEY
+ MOVBZU 1(INP), TMP
+ XOR KEY, TMP, KEY
+ MOVBU KEY, 1(OUT)
+ BC 16, LT, looptail_vsx
+
+ // Clear the stack values
+ STXVW4X VS48, (R11)(R0)
+ STXVW4X VS48, (R11)(R8)
+ STXVW4X VS48, (R11)(R9)
+ STXVW4X VS48, (R11)(R10)
+ BR done_vsx
diff --git a/src/vendor/golang.org/x/crypto/chacha20/chacha_s390x.go b/src/vendor/golang.org/x/crypto/chacha20/chacha_s390x.go
new file mode 100644
index 0000000..683ccfd
--- /dev/null
+++ b/src/vendor/golang.org/x/crypto/chacha20/chacha_s390x.go
@@ -0,0 +1,27 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build gc && !purego
+
+package chacha20
+
+import "golang.org/x/sys/cpu"
+
+var haveAsm = cpu.S390X.HasVX
+
+const bufSize = 256
+
+// xorKeyStreamVX is an assembly implementation of XORKeyStream. It must only
+// be called when the vector facility is available. Implementation in asm_s390x.s.
+//
+//go:noescape
+func xorKeyStreamVX(dst, src []byte, key *[8]uint32, nonce *[3]uint32, counter *uint32)
+
+func (c *Cipher) xorKeyStreamBlocks(dst, src []byte) {
+ if cpu.S390X.HasVX {
+ xorKeyStreamVX(dst, src, &c.key, &c.nonce, &c.counter)
+ } else {
+ c.xorKeyStreamBlocksGeneric(dst, src)
+ }
+}
diff --git a/src/vendor/golang.org/x/crypto/chacha20/chacha_s390x.s b/src/vendor/golang.org/x/crypto/chacha20/chacha_s390x.s
new file mode 100644
index 0000000..1eda91a
--- /dev/null
+++ b/src/vendor/golang.org/x/crypto/chacha20/chacha_s390x.s
@@ -0,0 +1,224 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build gc && !purego
+
+#include "go_asm.h"
+#include "textflag.h"
+
+// This is an implementation of the ChaCha20 encryption algorithm as
+// specified in RFC 7539. It uses vector instructions to compute
+// 4 keystream blocks in parallel (256 bytes) which are then XORed
+// with the bytes in the input slice.
+
+GLOBL ·constants<>(SB), RODATA|NOPTR, $32
+// BSWAP: swap bytes in each 4-byte element
+DATA ·constants<>+0x00(SB)/4, $0x03020100
+DATA ·constants<>+0x04(SB)/4, $0x07060504
+DATA ·constants<>+0x08(SB)/4, $0x0b0a0908
+DATA ·constants<>+0x0c(SB)/4, $0x0f0e0d0c
+// J0: [j0, j1, j2, j3]
+DATA ·constants<>+0x10(SB)/4, $0x61707865
+DATA ·constants<>+0x14(SB)/4, $0x3320646e
+DATA ·constants<>+0x18(SB)/4, $0x79622d32
+DATA ·constants<>+0x1c(SB)/4, $0x6b206574
+
+#define BSWAP V5
+#define J0 V6
+#define KEY0 V7
+#define KEY1 V8
+#define NONCE V9
+#define CTR V10
+#define M0 V11
+#define M1 V12
+#define M2 V13
+#define M3 V14
+#define INC V15
+#define X0 V16
+#define X1 V17
+#define X2 V18
+#define X3 V19
+#define X4 V20
+#define X5 V21
+#define X6 V22
+#define X7 V23
+#define X8 V24
+#define X9 V25
+#define X10 V26
+#define X11 V27
+#define X12 V28
+#define X13 V29
+#define X14 V30
+#define X15 V31
+
+#define NUM_ROUNDS 20
+
+#define ROUND4(a0, a1, a2, a3, b0, b1, b2, b3, c0, c1, c2, c3, d0, d1, d2, d3) \
+ VAF a1, a0, a0 \
+ VAF b1, b0, b0 \
+ VAF c1, c0, c0 \
+ VAF d1, d0, d0 \
+ VX a0, a2, a2 \
+ VX b0, b2, b2 \
+ VX c0, c2, c2 \
+ VX d0, d2, d2 \
+ VERLLF $16, a2, a2 \
+ VERLLF $16, b2, b2 \
+ VERLLF $16, c2, c2 \
+ VERLLF $16, d2, d2 \
+ VAF a2, a3, a3 \
+ VAF b2, b3, b3 \
+ VAF c2, c3, c3 \
+ VAF d2, d3, d3 \
+ VX a3, a1, a1 \
+ VX b3, b1, b1 \
+ VX c3, c1, c1 \
+ VX d3, d1, d1 \
+ VERLLF $12, a1, a1 \
+ VERLLF $12, b1, b1 \
+ VERLLF $12, c1, c1 \
+ VERLLF $12, d1, d1 \
+ VAF a1, a0, a0 \
+ VAF b1, b0, b0 \
+ VAF c1, c0, c0 \
+ VAF d1, d0, d0 \
+ VX a0, a2, a2 \
+ VX b0, b2, b2 \
+ VX c0, c2, c2 \
+ VX d0, d2, d2 \
+ VERLLF $8, a2, a2 \
+ VERLLF $8, b2, b2 \
+ VERLLF $8, c2, c2 \
+ VERLLF $8, d2, d2 \
+ VAF a2, a3, a3 \
+ VAF b2, b3, b3 \
+ VAF c2, c3, c3 \
+ VAF d2, d3, d3 \
+ VX a3, a1, a1 \
+ VX b3, b1, b1 \
+ VX c3, c1, c1 \
+ VX d3, d1, d1 \
+ VERLLF $7, a1, a1 \
+ VERLLF $7, b1, b1 \
+ VERLLF $7, c1, c1 \
+ VERLLF $7, d1, d1
+
+#define PERMUTE(mask, v0, v1, v2, v3) \
+ VPERM v0, v0, mask, v0 \
+ VPERM v1, v1, mask, v1 \
+ VPERM v2, v2, mask, v2 \
+ VPERM v3, v3, mask, v3
+
+#define ADDV(x, v0, v1, v2, v3) \
+ VAF x, v0, v0 \
+ VAF x, v1, v1 \
+ VAF x, v2, v2 \
+ VAF x, v3, v3
+
+#define XORV(off, dst, src, v0, v1, v2, v3) \
+ VLM off(src), M0, M3 \
+ PERMUTE(BSWAP, v0, v1, v2, v3) \
+ VX v0, M0, M0 \
+ VX v1, M1, M1 \
+ VX v2, M2, M2 \
+ VX v3, M3, M3 \
+ VSTM M0, M3, off(dst)
+
+#define SHUFFLE(a, b, c, d, t, u, v, w) \
+ VMRHF a, c, t \ // t = {a[0], c[0], a[1], c[1]}
+ VMRHF b, d, u \ // u = {b[0], d[0], b[1], d[1]}
+ VMRLF a, c, v \ // v = {a[2], c[2], a[3], c[3]}
+ VMRLF b, d, w \ // w = {b[2], d[2], b[3], d[3]}
+ VMRHF t, u, a \ // a = {a[0], b[0], c[0], d[0]}
+ VMRLF t, u, b \ // b = {a[1], b[1], c[1], d[1]}
+ VMRHF v, w, c \ // c = {a[2], b[2], c[2], d[2]}
+ VMRLF v, w, d // d = {a[3], b[3], c[3], d[3]}
+
+// func xorKeyStreamVX(dst, src []byte, key *[8]uint32, nonce *[3]uint32, counter *uint32)
+TEXT ·xorKeyStreamVX(SB), NOSPLIT, $0
+ MOVD $·constants<>(SB), R1
+ MOVD dst+0(FP), R2 // R2=&dst[0]
+ LMG src+24(FP), R3, R4 // R3=&src[0] R4=len(src)
+ MOVD key+48(FP), R5 // R5=key
+ MOVD nonce+56(FP), R6 // R6=nonce
+ MOVD counter+64(FP), R7 // R7=counter
+
+ // load BSWAP and J0
+ VLM (R1), BSWAP, J0
+
+ // setup
+ MOVD $95, R0
+ VLM (R5), KEY0, KEY1
+ VLL R0, (R6), NONCE
+ VZERO M0
+ VLEIB $7, $32, M0
+ VSRLB M0, NONCE, NONCE
+
+ // initialize counter values
+ VLREPF (R7), CTR
+ VZERO INC
+ VLEIF $1, $1, INC
+ VLEIF $2, $2, INC
+ VLEIF $3, $3, INC
+ VAF INC, CTR, CTR
+ VREPIF $4, INC
+
+chacha:
+ VREPF $0, J0, X0
+ VREPF $1, J0, X1
+ VREPF $2, J0, X2
+ VREPF $3, J0, X3
+ VREPF $0, KEY0, X4
+ VREPF $1, KEY0, X5
+ VREPF $2, KEY0, X6
+ VREPF $3, KEY0, X7
+ VREPF $0, KEY1, X8
+ VREPF $1, KEY1, X9
+ VREPF $2, KEY1, X10
+ VREPF $3, KEY1, X11
+ VLR CTR, X12
+ VREPF $1, NONCE, X13
+ VREPF $2, NONCE, X14
+ VREPF $3, NONCE, X15
+
+ MOVD $(NUM_ROUNDS/2), R1
+
+loop:
+ ROUND4(X0, X4, X12, X8, X1, X5, X13, X9, X2, X6, X14, X10, X3, X7, X15, X11)
+ ROUND4(X0, X5, X15, X10, X1, X6, X12, X11, X2, X7, X13, X8, X3, X4, X14, X9)
+
+ ADD $-1, R1
+ BNE loop
+
+ // decrement length
+ ADD $-256, R4
+
+ // rearrange vectors
+ SHUFFLE(X0, X1, X2, X3, M0, M1, M2, M3)
+ ADDV(J0, X0, X1, X2, X3)
+ SHUFFLE(X4, X5, X6, X7, M0, M1, M2, M3)
+ ADDV(KEY0, X4, X5, X6, X7)
+ SHUFFLE(X8, X9, X10, X11, M0, M1, M2, M3)
+ ADDV(KEY1, X8, X9, X10, X11)
+ VAF CTR, X12, X12
+ SHUFFLE(X12, X13, X14, X15, M0, M1, M2, M3)
+ ADDV(NONCE, X12, X13, X14, X15)
+
+ // increment counters
+ VAF INC, CTR, CTR
+
+ // xor keystream with plaintext
+ XORV(0*64, R2, R3, X0, X4, X8, X12)
+ XORV(1*64, R2, R3, X1, X5, X9, X13)
+ XORV(2*64, R2, R3, X2, X6, X10, X14)
+ XORV(3*64, R2, R3, X3, X7, X11, X15)
+
+ // increment pointers
+ MOVD $256(R2), R2
+ MOVD $256(R3), R3
+
+ CMPBNE R4, $0, chacha
+
+ VSTEF $0, CTR, (R7)
+ RET
diff --git a/src/vendor/golang.org/x/crypto/chacha20/xor.go b/src/vendor/golang.org/x/crypto/chacha20/xor.go
new file mode 100644
index 0000000..c2d0485
--- /dev/null
+++ b/src/vendor/golang.org/x/crypto/chacha20/xor.go
@@ -0,0 +1,42 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found src the LICENSE file.
+
+package chacha20
+
+import "runtime"
+
+// Platforms that have fast unaligned 32-bit little endian accesses.
+const unaligned = runtime.GOARCH == "386" ||
+ runtime.GOARCH == "amd64" ||
+ runtime.GOARCH == "arm64" ||
+ runtime.GOARCH == "ppc64le" ||
+ runtime.GOARCH == "s390x"
+
+// addXor reads a little endian uint32 from src, XORs it with (a + b) and
+// places the result in little endian byte order in dst.
+func addXor(dst, src []byte, a, b uint32) {
+ _, _ = src[3], dst[3] // bounds check elimination hint
+ if unaligned {
+ // The compiler should optimize this code into
+ // 32-bit unaligned little endian loads and stores.
+ // TODO: delete once the compiler does a reliably
+ // good job with the generic code below.
+ // See issue #25111 for more details.
+ v := uint32(src[0])
+ v |= uint32(src[1]) << 8
+ v |= uint32(src[2]) << 16
+ v |= uint32(src[3]) << 24
+ v ^= a + b
+ dst[0] = byte(v)
+ dst[1] = byte(v >> 8)
+ dst[2] = byte(v >> 16)
+ dst[3] = byte(v >> 24)
+ } else {
+ a += b
+ dst[0] = src[0] ^ byte(a)
+ dst[1] = src[1] ^ byte(a>>8)
+ dst[2] = src[2] ^ byte(a>>16)
+ dst[3] = src[3] ^ byte(a>>24)
+ }
+}
diff --git a/src/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305.go b/src/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305.go
new file mode 100644
index 0000000..93da732
--- /dev/null
+++ b/src/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305.go
@@ -0,0 +1,98 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package chacha20poly1305 implements the ChaCha20-Poly1305 AEAD and its
+// extended nonce variant XChaCha20-Poly1305, as specified in RFC 8439 and
+// draft-irtf-cfrg-xchacha-01.
+package chacha20poly1305 // import "golang.org/x/crypto/chacha20poly1305"
+
+import (
+ "crypto/cipher"
+ "errors"
+)
+
+const (
+ // KeySize is the size of the key used by this AEAD, in bytes.
+ KeySize = 32
+
+ // NonceSize is the size of the nonce used with the standard variant of this
+ // AEAD, in bytes.
+ //
+ // Note that this is too short to be safely generated at random if the same
+ // key is reused more than 2³² times.
+ NonceSize = 12
+
+ // NonceSizeX is the size of the nonce used with the XChaCha20-Poly1305
+ // variant of this AEAD, in bytes.
+ NonceSizeX = 24
+
+ // Overhead is the size of the Poly1305 authentication tag, and the
+ // difference between a ciphertext length and its plaintext.
+ Overhead = 16
+)
+
+type chacha20poly1305 struct {
+ key [KeySize]byte
+}
+
+// New returns a ChaCha20-Poly1305 AEAD that uses the given 256-bit key.
+func New(key []byte) (cipher.AEAD, error) {
+ if len(key) != KeySize {
+ return nil, errors.New("chacha20poly1305: bad key length")
+ }
+ ret := new(chacha20poly1305)
+ copy(ret.key[:], key)
+ return ret, nil
+}
+
+func (c *chacha20poly1305) NonceSize() int {
+ return NonceSize
+}
+
+func (c *chacha20poly1305) Overhead() int {
+ return Overhead
+}
+
+func (c *chacha20poly1305) Seal(dst, nonce, plaintext, additionalData []byte) []byte {
+ if len(nonce) != NonceSize {
+ panic("chacha20poly1305: bad nonce length passed to Seal")
+ }
+
+ if uint64(len(plaintext)) > (1<<38)-64 {
+ panic("chacha20poly1305: plaintext too large")
+ }
+
+ return c.seal(dst, nonce, plaintext, additionalData)
+}
+
+var errOpen = errors.New("chacha20poly1305: message authentication failed")
+
+func (c *chacha20poly1305) Open(dst, nonce, ciphertext, additionalData []byte) ([]byte, error) {
+ if len(nonce) != NonceSize {
+ panic("chacha20poly1305: bad nonce length passed to Open")
+ }
+ if len(ciphertext) < 16 {
+ return nil, errOpen
+ }
+ if uint64(len(ciphertext)) > (1<<38)-48 {
+ panic("chacha20poly1305: ciphertext too large")
+ }
+
+ return c.open(dst, nonce, ciphertext, additionalData)
+}
+
+// sliceForAppend takes a slice and a requested number of bytes. It returns a
+// slice with the contents of the given slice followed by that many bytes and a
+// second slice that aliases into it and contains only the extra bytes. If the
+// original slice has sufficient capacity then no allocation is performed.
+func sliceForAppend(in []byte, n int) (head, tail []byte) {
+ if total := len(in) + n; cap(in) >= total {
+ head = in[:total]
+ } else {
+ head = make([]byte, total)
+ copy(head, in)
+ }
+ tail = head[len(in):]
+ return
+}
diff --git a/src/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_amd64.go b/src/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_amd64.go
new file mode 100644
index 0000000..50695a1
--- /dev/null
+++ b/src/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_amd64.go
@@ -0,0 +1,86 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build gc && !purego
+
+package chacha20poly1305
+
+import (
+ "encoding/binary"
+
+ "golang.org/x/crypto/internal/alias"
+ "golang.org/x/sys/cpu"
+)
+
+//go:noescape
+func chacha20Poly1305Open(dst []byte, key []uint32, src, ad []byte) bool
+
+//go:noescape
+func chacha20Poly1305Seal(dst []byte, key []uint32, src, ad []byte)
+
+var (
+ useAVX2 = cpu.X86.HasAVX2 && cpu.X86.HasBMI2
+)
+
+// setupState writes a ChaCha20 input matrix to state. See
+// https://tools.ietf.org/html/rfc7539#section-2.3.
+func setupState(state *[16]uint32, key *[32]byte, nonce []byte) {
+ state[0] = 0x61707865
+ state[1] = 0x3320646e
+ state[2] = 0x79622d32
+ state[3] = 0x6b206574
+
+ state[4] = binary.LittleEndian.Uint32(key[0:4])
+ state[5] = binary.LittleEndian.Uint32(key[4:8])
+ state[6] = binary.LittleEndian.Uint32(key[8:12])
+ state[7] = binary.LittleEndian.Uint32(key[12:16])
+ state[8] = binary.LittleEndian.Uint32(key[16:20])
+ state[9] = binary.LittleEndian.Uint32(key[20:24])
+ state[10] = binary.LittleEndian.Uint32(key[24:28])
+ state[11] = binary.LittleEndian.Uint32(key[28:32])
+
+ state[12] = 0
+ state[13] = binary.LittleEndian.Uint32(nonce[0:4])
+ state[14] = binary.LittleEndian.Uint32(nonce[4:8])
+ state[15] = binary.LittleEndian.Uint32(nonce[8:12])
+}
+
+func (c *chacha20poly1305) seal(dst, nonce, plaintext, additionalData []byte) []byte {
+ if !cpu.X86.HasSSSE3 {
+ return c.sealGeneric(dst, nonce, plaintext, additionalData)
+ }
+
+ var state [16]uint32
+ setupState(&state, &c.key, nonce)
+
+ ret, out := sliceForAppend(dst, len(plaintext)+16)
+ if alias.InexactOverlap(out, plaintext) {
+ panic("chacha20poly1305: invalid buffer overlap")
+ }
+ chacha20Poly1305Seal(out[:], state[:], plaintext, additionalData)
+ return ret
+}
+
+func (c *chacha20poly1305) open(dst, nonce, ciphertext, additionalData []byte) ([]byte, error) {
+ if !cpu.X86.HasSSSE3 {
+ return c.openGeneric(dst, nonce, ciphertext, additionalData)
+ }
+
+ var state [16]uint32
+ setupState(&state, &c.key, nonce)
+
+ ciphertext = ciphertext[:len(ciphertext)-16]
+ ret, out := sliceForAppend(dst, len(ciphertext))
+ if alias.InexactOverlap(out, ciphertext) {
+ panic("chacha20poly1305: invalid buffer overlap")
+ }
+ if !chacha20Poly1305Open(out, state[:], ciphertext, additionalData) {
+ for i := range out {
+ out[i] = 0
+ }
+ return nil, errOpen
+ }
+
+ return ret, nil
+}
diff --git a/src/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_amd64.s b/src/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_amd64.s
new file mode 100644
index 0000000..731d2ac
--- /dev/null
+++ b/src/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_amd64.s
@@ -0,0 +1,2715 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file was originally from https://golang.org/cl/24717 by Vlad Krasnov of CloudFlare.
+
+//go:build gc && !purego
+
+#include "textflag.h"
+// General register allocation
+#define oup DI
+#define inp SI
+#define inl BX
+#define adp CX // free to reuse, after we hash the additional data
+#define keyp R8 // free to reuse, when we copy the key to stack
+#define itr2 R9 // general iterator
+#define itr1 CX // general iterator
+#define acc0 R10
+#define acc1 R11
+#define acc2 R12
+#define t0 R13
+#define t1 R14
+#define t2 R15
+#define t3 R8
+// Register and stack allocation for the SSE code
+#define rStore (0*16)(BP)
+#define sStore (1*16)(BP)
+#define state1Store (2*16)(BP)
+#define state2Store (3*16)(BP)
+#define tmpStore (4*16)(BP)
+#define ctr0Store (5*16)(BP)
+#define ctr1Store (6*16)(BP)
+#define ctr2Store (7*16)(BP)
+#define ctr3Store (8*16)(BP)
+#define A0 X0
+#define A1 X1
+#define A2 X2
+#define B0 X3
+#define B1 X4
+#define B2 X5
+#define C0 X6
+#define C1 X7
+#define C2 X8
+#define D0 X9
+#define D1 X10
+#define D2 X11
+#define T0 X12
+#define T1 X13
+#define T2 X14
+#define T3 X15
+#define A3 T0
+#define B3 T1
+#define C3 T2
+#define D3 T3
+// Register and stack allocation for the AVX2 code
+#define rsStoreAVX2 (0*32)(BP)
+#define state1StoreAVX2 (1*32)(BP)
+#define state2StoreAVX2 (2*32)(BP)
+#define ctr0StoreAVX2 (3*32)(BP)
+#define ctr1StoreAVX2 (4*32)(BP)
+#define ctr2StoreAVX2 (5*32)(BP)
+#define ctr3StoreAVX2 (6*32)(BP)
+#define tmpStoreAVX2 (7*32)(BP) // 256 bytes on stack
+#define AA0 Y0
+#define AA1 Y5
+#define AA2 Y6
+#define AA3 Y7
+#define BB0 Y14
+#define BB1 Y9
+#define BB2 Y10
+#define BB3 Y11
+#define CC0 Y12
+#define CC1 Y13
+#define CC2 Y8
+#define CC3 Y15
+#define DD0 Y4
+#define DD1 Y1
+#define DD2 Y2
+#define DD3 Y3
+#define TT0 DD3
+#define TT1 AA3
+#define TT2 BB3
+#define TT3 CC3
+// ChaCha20 constants
+DATA ·chacha20Constants<>+0x00(SB)/4, $0x61707865
+DATA ·chacha20Constants<>+0x04(SB)/4, $0x3320646e
+DATA ·chacha20Constants<>+0x08(SB)/4, $0x79622d32
+DATA ·chacha20Constants<>+0x0c(SB)/4, $0x6b206574
+DATA ·chacha20Constants<>+0x10(SB)/4, $0x61707865
+DATA ·chacha20Constants<>+0x14(SB)/4, $0x3320646e
+DATA ·chacha20Constants<>+0x18(SB)/4, $0x79622d32
+DATA ·chacha20Constants<>+0x1c(SB)/4, $0x6b206574
+// <<< 16 with PSHUFB
+DATA ·rol16<>+0x00(SB)/8, $0x0504070601000302
+DATA ·rol16<>+0x08(SB)/8, $0x0D0C0F0E09080B0A
+DATA ·rol16<>+0x10(SB)/8, $0x0504070601000302
+DATA ·rol16<>+0x18(SB)/8, $0x0D0C0F0E09080B0A
+// <<< 8 with PSHUFB
+DATA ·rol8<>+0x00(SB)/8, $0x0605040702010003
+DATA ·rol8<>+0x08(SB)/8, $0x0E0D0C0F0A09080B
+DATA ·rol8<>+0x10(SB)/8, $0x0605040702010003
+DATA ·rol8<>+0x18(SB)/8, $0x0E0D0C0F0A09080B
+
+DATA ·avx2InitMask<>+0x00(SB)/8, $0x0
+DATA ·avx2InitMask<>+0x08(SB)/8, $0x0
+DATA ·avx2InitMask<>+0x10(SB)/8, $0x1
+DATA ·avx2InitMask<>+0x18(SB)/8, $0x0
+
+DATA ·avx2IncMask<>+0x00(SB)/8, $0x2
+DATA ·avx2IncMask<>+0x08(SB)/8, $0x0
+DATA ·avx2IncMask<>+0x10(SB)/8, $0x2
+DATA ·avx2IncMask<>+0x18(SB)/8, $0x0
+// Poly1305 key clamp
+DATA ·polyClampMask<>+0x00(SB)/8, $0x0FFFFFFC0FFFFFFF
+DATA ·polyClampMask<>+0x08(SB)/8, $0x0FFFFFFC0FFFFFFC
+DATA ·polyClampMask<>+0x10(SB)/8, $0xFFFFFFFFFFFFFFFF
+DATA ·polyClampMask<>+0x18(SB)/8, $0xFFFFFFFFFFFFFFFF
+
+DATA ·sseIncMask<>+0x00(SB)/8, $0x1
+DATA ·sseIncMask<>+0x08(SB)/8, $0x0
+// To load/store the last < 16 bytes in a buffer
+DATA ·andMask<>+0x00(SB)/8, $0x00000000000000ff
+DATA ·andMask<>+0x08(SB)/8, $0x0000000000000000
+DATA ·andMask<>+0x10(SB)/8, $0x000000000000ffff
+DATA ·andMask<>+0x18(SB)/8, $0x0000000000000000
+DATA ·andMask<>+0x20(SB)/8, $0x0000000000ffffff
+DATA ·andMask<>+0x28(SB)/8, $0x0000000000000000
+DATA ·andMask<>+0x30(SB)/8, $0x00000000ffffffff
+DATA ·andMask<>+0x38(SB)/8, $0x0000000000000000
+DATA ·andMask<>+0x40(SB)/8, $0x000000ffffffffff
+DATA ·andMask<>+0x48(SB)/8, $0x0000000000000000
+DATA ·andMask<>+0x50(SB)/8, $0x0000ffffffffffff
+DATA ·andMask<>+0x58(SB)/8, $0x0000000000000000
+DATA ·andMask<>+0x60(SB)/8, $0x00ffffffffffffff
+DATA ·andMask<>+0x68(SB)/8, $0x0000000000000000
+DATA ·andMask<>+0x70(SB)/8, $0xffffffffffffffff
+DATA ·andMask<>+0x78(SB)/8, $0x0000000000000000
+DATA ·andMask<>+0x80(SB)/8, $0xffffffffffffffff
+DATA ·andMask<>+0x88(SB)/8, $0x00000000000000ff
+DATA ·andMask<>+0x90(SB)/8, $0xffffffffffffffff
+DATA ·andMask<>+0x98(SB)/8, $0x000000000000ffff
+DATA ·andMask<>+0xa0(SB)/8, $0xffffffffffffffff
+DATA ·andMask<>+0xa8(SB)/8, $0x0000000000ffffff
+DATA ·andMask<>+0xb0(SB)/8, $0xffffffffffffffff
+DATA ·andMask<>+0xb8(SB)/8, $0x00000000ffffffff
+DATA ·andMask<>+0xc0(SB)/8, $0xffffffffffffffff
+DATA ·andMask<>+0xc8(SB)/8, $0x000000ffffffffff
+DATA ·andMask<>+0xd0(SB)/8, $0xffffffffffffffff
+DATA ·andMask<>+0xd8(SB)/8, $0x0000ffffffffffff
+DATA ·andMask<>+0xe0(SB)/8, $0xffffffffffffffff
+DATA ·andMask<>+0xe8(SB)/8, $0x00ffffffffffffff
+
+GLOBL ·chacha20Constants<>(SB), (NOPTR+RODATA), $32
+GLOBL ·rol16<>(SB), (NOPTR+RODATA), $32
+GLOBL ·rol8<>(SB), (NOPTR+RODATA), $32
+GLOBL ·sseIncMask<>(SB), (NOPTR+RODATA), $16
+GLOBL ·avx2IncMask<>(SB), (NOPTR+RODATA), $32
+GLOBL ·avx2InitMask<>(SB), (NOPTR+RODATA), $32
+GLOBL ·polyClampMask<>(SB), (NOPTR+RODATA), $32
+GLOBL ·andMask<>(SB), (NOPTR+RODATA), $240
+// No PALIGNR in Go ASM yet (but VPALIGNR is present).
+#define shiftB0Left BYTE $0x66; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xdb; BYTE $0x04 // PALIGNR $4, X3, X3
+#define shiftB1Left BYTE $0x66; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xe4; BYTE $0x04 // PALIGNR $4, X4, X4
+#define shiftB2Left BYTE $0x66; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xed; BYTE $0x04 // PALIGNR $4, X5, X5
+#define shiftB3Left BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xed; BYTE $0x04 // PALIGNR $4, X13, X13
+#define shiftC0Left BYTE $0x66; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xf6; BYTE $0x08 // PALIGNR $8, X6, X6
+#define shiftC1Left BYTE $0x66; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xff; BYTE $0x08 // PALIGNR $8, X7, X7
+#define shiftC2Left BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xc0; BYTE $0x08 // PALIGNR $8, X8, X8
+#define shiftC3Left BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xf6; BYTE $0x08 // PALIGNR $8, X14, X14
+#define shiftD0Left BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xc9; BYTE $0x0c // PALIGNR $12, X9, X9
+#define shiftD1Left BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xd2; BYTE $0x0c // PALIGNR $12, X10, X10
+#define shiftD2Left BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xdb; BYTE $0x0c // PALIGNR $12, X11, X11
+#define shiftD3Left BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xff; BYTE $0x0c // PALIGNR $12, X15, X15
+#define shiftB0Right BYTE $0x66; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xdb; BYTE $0x0c // PALIGNR $12, X3, X3
+#define shiftB1Right BYTE $0x66; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xe4; BYTE $0x0c // PALIGNR $12, X4, X4
+#define shiftB2Right BYTE $0x66; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xed; BYTE $0x0c // PALIGNR $12, X5, X5
+#define shiftB3Right BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xed; BYTE $0x0c // PALIGNR $12, X13, X13
+#define shiftC0Right shiftC0Left
+#define shiftC1Right shiftC1Left
+#define shiftC2Right shiftC2Left
+#define shiftC3Right shiftC3Left
+#define shiftD0Right BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xc9; BYTE $0x04 // PALIGNR $4, X9, X9
+#define shiftD1Right BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xd2; BYTE $0x04 // PALIGNR $4, X10, X10
+#define shiftD2Right BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xdb; BYTE $0x04 // PALIGNR $4, X11, X11
+#define shiftD3Right BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xff; BYTE $0x04 // PALIGNR $4, X15, X15
+
+// Some macros
+
+// ROL rotates the uint32s in register R left by N bits, using temporary T.
+#define ROL(N, R, T) \
+ MOVO R, T; PSLLL $(N), T; PSRLL $(32-(N)), R; PXOR T, R
+
+// ROL16 rotates the uint32s in register R left by 16, using temporary T if needed.
+#ifdef GOAMD64_v2
+#define ROL16(R, T) PSHUFB ·rol16<>(SB), R
+#else
+#define ROL16(R, T) ROL(16, R, T)
+#endif
+
+// ROL8 rotates the uint32s in register R left by 8, using temporary T if needed.
+#ifdef GOAMD64_v2
+#define ROL8(R, T) PSHUFB ·rol8<>(SB), R
+#else
+#define ROL8(R, T) ROL(8, R, T)
+#endif
+
+#define chachaQR(A, B, C, D, T) \
+ PADDD B, A; PXOR A, D; ROL16(D, T) \
+ PADDD D, C; PXOR C, B; MOVO B, T; PSLLL $12, T; PSRLL $20, B; PXOR T, B \
+ PADDD B, A; PXOR A, D; ROL8(D, T) \
+ PADDD D, C; PXOR C, B; MOVO B, T; PSLLL $7, T; PSRLL $25, B; PXOR T, B
+
+#define chachaQR_AVX2(A, B, C, D, T) \
+ VPADDD B, A, A; VPXOR A, D, D; VPSHUFB ·rol16<>(SB), D, D \
+ VPADDD D, C, C; VPXOR C, B, B; VPSLLD $12, B, T; VPSRLD $20, B, B; VPXOR T, B, B \
+ VPADDD B, A, A; VPXOR A, D, D; VPSHUFB ·rol8<>(SB), D, D \
+ VPADDD D, C, C; VPXOR C, B, B; VPSLLD $7, B, T; VPSRLD $25, B, B; VPXOR T, B, B
+
+#define polyAdd(S) ADDQ S, acc0; ADCQ 8+S, acc1; ADCQ $1, acc2
+#define polyMulStage1 MOVQ (0*8)(BP), AX; MOVQ AX, t2; MULQ acc0; MOVQ AX, t0; MOVQ DX, t1; MOVQ (0*8)(BP), AX; MULQ acc1; IMULQ acc2, t2; ADDQ AX, t1; ADCQ DX, t2
+#define polyMulStage2 MOVQ (1*8)(BP), AX; MOVQ AX, t3; MULQ acc0; ADDQ AX, t1; ADCQ $0, DX; MOVQ DX, acc0; MOVQ (1*8)(BP), AX; MULQ acc1; ADDQ AX, t2; ADCQ $0, DX
+#define polyMulStage3 IMULQ acc2, t3; ADDQ acc0, t2; ADCQ DX, t3
+#define polyMulReduceStage MOVQ t0, acc0; MOVQ t1, acc1; MOVQ t2, acc2; ANDQ $3, acc2; MOVQ t2, t0; ANDQ $-4, t0; MOVQ t3, t1; SHRQ $2, t3, t2; SHRQ $2, t3; ADDQ t0, acc0; ADCQ t1, acc1; ADCQ $0, acc2; ADDQ t2, acc0; ADCQ t3, acc1; ADCQ $0, acc2
+
+#define polyMulStage1_AVX2 MOVQ (0*8)(BP), DX; MOVQ DX, t2; MULXQ acc0, t0, t1; IMULQ acc2, t2; MULXQ acc1, AX, DX; ADDQ AX, t1; ADCQ DX, t2
+#define polyMulStage2_AVX2 MOVQ (1*8)(BP), DX; MULXQ acc0, acc0, AX; ADDQ acc0, t1; MULXQ acc1, acc1, t3; ADCQ acc1, t2; ADCQ $0, t3
+#define polyMulStage3_AVX2 IMULQ acc2, DX; ADDQ AX, t2; ADCQ DX, t3
+
+#define polyMul polyMulStage1; polyMulStage2; polyMulStage3; polyMulReduceStage
+#define polyMulAVX2 polyMulStage1_AVX2; polyMulStage2_AVX2; polyMulStage3_AVX2; polyMulReduceStage
+// ----------------------------------------------------------------------------
+TEXT polyHashADInternal<>(SB), NOSPLIT, $0
+ // adp points to beginning of additional data
+ // itr2 holds ad length
+ XORQ acc0, acc0
+ XORQ acc1, acc1
+ XORQ acc2, acc2
+ CMPQ itr2, $13
+ JNE hashADLoop
+
+openFastTLSAD:
+ // Special treatment for the TLS case of 13 bytes
+ MOVQ (adp), acc0
+ MOVQ 5(adp), acc1
+ SHRQ $24, acc1
+ MOVQ $1, acc2
+ polyMul
+ RET
+
+hashADLoop:
+ // Hash in 16 byte chunks
+ CMPQ itr2, $16
+ JB hashADTail
+ polyAdd(0(adp))
+ LEAQ (1*16)(adp), adp
+ SUBQ $16, itr2
+ polyMul
+ JMP hashADLoop
+
+hashADTail:
+ CMPQ itr2, $0
+ JE hashADDone
+
+ // Hash last < 16 byte tail
+ XORQ t0, t0
+ XORQ t1, t1
+ XORQ t2, t2
+ ADDQ itr2, adp
+
+hashADTailLoop:
+ SHLQ $8, t0, t1
+ SHLQ $8, t0
+ MOVB -1(adp), t2
+ XORQ t2, t0
+ DECQ adp
+ DECQ itr2
+ JNE hashADTailLoop
+
+hashADTailFinish:
+ ADDQ t0, acc0; ADCQ t1, acc1; ADCQ $1, acc2
+ polyMul
+
+ // Finished AD
+hashADDone:
+ RET
+
+// ----------------------------------------------------------------------------
+// func chacha20Poly1305Open(dst, key, src, ad []byte) bool
+TEXT ·chacha20Poly1305Open(SB), 0, $288-97
+ // For aligned stack access
+ MOVQ SP, BP
+ ADDQ $32, BP
+ ANDQ $-32, BP
+ MOVQ dst+0(FP), oup
+ MOVQ key+24(FP), keyp
+ MOVQ src+48(FP), inp
+ MOVQ src_len+56(FP), inl
+ MOVQ ad+72(FP), adp
+
+ // Check for AVX2 support
+ CMPB ·useAVX2(SB), $1
+ JE chacha20Poly1305Open_AVX2
+
+ // Special optimization, for very short buffers
+ CMPQ inl, $128
+ JBE openSSE128 // About 16% faster
+
+ // For long buffers, prepare the poly key first
+ MOVOU ·chacha20Constants<>(SB), A0
+ MOVOU (1*16)(keyp), B0
+ MOVOU (2*16)(keyp), C0
+ MOVOU (3*16)(keyp), D0
+ MOVO D0, T1
+
+ // Store state on stack for future use
+ MOVO B0, state1Store
+ MOVO C0, state2Store
+ MOVO D0, ctr3Store
+ MOVQ $10, itr2
+
+openSSEPreparePolyKey:
+ chachaQR(A0, B0, C0, D0, T0)
+ shiftB0Left; shiftC0Left; shiftD0Left
+ chachaQR(A0, B0, C0, D0, T0)
+ shiftB0Right; shiftC0Right; shiftD0Right
+ DECQ itr2
+ JNE openSSEPreparePolyKey
+
+ // A0|B0 hold the Poly1305 32-byte key, C0,D0 can be discarded
+ PADDL ·chacha20Constants<>(SB), A0; PADDL state1Store, B0
+
+ // Clamp and store the key
+ PAND ·polyClampMask<>(SB), A0
+ MOVO A0, rStore; MOVO B0, sStore
+
+ // Hash AAD
+ MOVQ ad_len+80(FP), itr2
+ CALL polyHashADInternal<>(SB)
+
+openSSEMainLoop:
+ CMPQ inl, $256
+ JB openSSEMainLoopDone
+
+ // Load state, increment counter blocks
+ MOVO ·chacha20Constants<>(SB), A0; MOVO state1Store, B0; MOVO state2Store, C0; MOVO ctr3Store, D0; PADDL ·sseIncMask<>(SB), D0
+ MOVO A0, A1; MOVO B0, B1; MOVO C0, C1; MOVO D0, D1; PADDL ·sseIncMask<>(SB), D1
+ MOVO A1, A2; MOVO B1, B2; MOVO C1, C2; MOVO D1, D2; PADDL ·sseIncMask<>(SB), D2
+ MOVO A2, A3; MOVO B2, B3; MOVO C2, C3; MOVO D2, D3; PADDL ·sseIncMask<>(SB), D3
+
+ // Store counters
+ MOVO D0, ctr0Store; MOVO D1, ctr1Store; MOVO D2, ctr2Store; MOVO D3, ctr3Store
+
+ // There are 10 ChaCha20 iterations of 2QR each, so for 6 iterations we hash 2 blocks, and for the remaining 4 only 1 block - for a total of 16
+ MOVQ $4, itr1
+ MOVQ inp, itr2
+
+openSSEInternalLoop:
+ MOVO C3, tmpStore
+ chachaQR(A0, B0, C0, D0, C3); chachaQR(A1, B1, C1, D1, C3); chachaQR(A2, B2, C2, D2, C3)
+ MOVO tmpStore, C3
+ MOVO C1, tmpStore
+ chachaQR(A3, B3, C3, D3, C1)
+ MOVO tmpStore, C1
+ polyAdd(0(itr2))
+ shiftB0Left; shiftB1Left; shiftB2Left; shiftB3Left
+ shiftC0Left; shiftC1Left; shiftC2Left; shiftC3Left
+ shiftD0Left; shiftD1Left; shiftD2Left; shiftD3Left
+ polyMulStage1
+ polyMulStage2
+ LEAQ (2*8)(itr2), itr2
+ MOVO C3, tmpStore
+ chachaQR(A0, B0, C0, D0, C3); chachaQR(A1, B1, C1, D1, C3); chachaQR(A2, B2, C2, D2, C3)
+ MOVO tmpStore, C3
+ MOVO C1, tmpStore
+ polyMulStage3
+ chachaQR(A3, B3, C3, D3, C1)
+ MOVO tmpStore, C1
+ polyMulReduceStage
+ shiftB0Right; shiftB1Right; shiftB2Right; shiftB3Right
+ shiftC0Right; shiftC1Right; shiftC2Right; shiftC3Right
+ shiftD0Right; shiftD1Right; shiftD2Right; shiftD3Right
+ DECQ itr1
+ JGE openSSEInternalLoop
+
+ polyAdd(0(itr2))
+ polyMul
+ LEAQ (2*8)(itr2), itr2
+
+ CMPQ itr1, $-6
+ JG openSSEInternalLoop
+
+ // Add in the state
+ PADDD ·chacha20Constants<>(SB), A0; PADDD ·chacha20Constants<>(SB), A1; PADDD ·chacha20Constants<>(SB), A2; PADDD ·chacha20Constants<>(SB), A3
+ PADDD state1Store, B0; PADDD state1Store, B1; PADDD state1Store, B2; PADDD state1Store, B3
+ PADDD state2Store, C0; PADDD state2Store, C1; PADDD state2Store, C2; PADDD state2Store, C3
+ PADDD ctr0Store, D0; PADDD ctr1Store, D1; PADDD ctr2Store, D2; PADDD ctr3Store, D3
+
+ // Load - xor - store
+ MOVO D3, tmpStore
+ MOVOU (0*16)(inp), D3; PXOR D3, A0; MOVOU A0, (0*16)(oup)
+ MOVOU (1*16)(inp), D3; PXOR D3, B0; MOVOU B0, (1*16)(oup)
+ MOVOU (2*16)(inp), D3; PXOR D3, C0; MOVOU C0, (2*16)(oup)
+ MOVOU (3*16)(inp), D3; PXOR D3, D0; MOVOU D0, (3*16)(oup)
+ MOVOU (4*16)(inp), D0; PXOR D0, A1; MOVOU A1, (4*16)(oup)
+ MOVOU (5*16)(inp), D0; PXOR D0, B1; MOVOU B1, (5*16)(oup)
+ MOVOU (6*16)(inp), D0; PXOR D0, C1; MOVOU C1, (6*16)(oup)
+ MOVOU (7*16)(inp), D0; PXOR D0, D1; MOVOU D1, (7*16)(oup)
+ MOVOU (8*16)(inp), D0; PXOR D0, A2; MOVOU A2, (8*16)(oup)
+ MOVOU (9*16)(inp), D0; PXOR D0, B2; MOVOU B2, (9*16)(oup)
+ MOVOU (10*16)(inp), D0; PXOR D0, C2; MOVOU C2, (10*16)(oup)
+ MOVOU (11*16)(inp), D0; PXOR D0, D2; MOVOU D2, (11*16)(oup)
+ MOVOU (12*16)(inp), D0; PXOR D0, A3; MOVOU A3, (12*16)(oup)
+ MOVOU (13*16)(inp), D0; PXOR D0, B3; MOVOU B3, (13*16)(oup)
+ MOVOU (14*16)(inp), D0; PXOR D0, C3; MOVOU C3, (14*16)(oup)
+ MOVOU (15*16)(inp), D0; PXOR tmpStore, D0; MOVOU D0, (15*16)(oup)
+ LEAQ 256(inp), inp
+ LEAQ 256(oup), oup
+ SUBQ $256, inl
+ JMP openSSEMainLoop
+
+openSSEMainLoopDone:
+ // Handle the various tail sizes efficiently
+ TESTQ inl, inl
+ JE openSSEFinalize
+ CMPQ inl, $64
+ JBE openSSETail64
+ CMPQ inl, $128
+ JBE openSSETail128
+ CMPQ inl, $192
+ JBE openSSETail192
+ JMP openSSETail256
+
+openSSEFinalize:
+ // Hash in the PT, AAD lengths
+ ADDQ ad_len+80(FP), acc0; ADCQ src_len+56(FP), acc1; ADCQ $1, acc2
+ polyMul
+
+ // Final reduce
+ MOVQ acc0, t0
+ MOVQ acc1, t1
+ MOVQ acc2, t2
+ SUBQ $-5, acc0
+ SBBQ $-1, acc1
+ SBBQ $3, acc2
+ CMOVQCS t0, acc0
+ CMOVQCS t1, acc1
+ CMOVQCS t2, acc2
+
+ // Add in the "s" part of the key
+ ADDQ 0+sStore, acc0
+ ADCQ 8+sStore, acc1
+
+ // Finally, constant time compare to the tag at the end of the message
+ XORQ AX, AX
+ MOVQ $1, DX
+ XORQ (0*8)(inp), acc0
+ XORQ (1*8)(inp), acc1
+ ORQ acc1, acc0
+ CMOVQEQ DX, AX
+
+ // Return true iff tags are equal
+ MOVB AX, ret+96(FP)
+ RET
+
+// ----------------------------------------------------------------------------
+// Special optimization for buffers smaller than 129 bytes
+openSSE128:
+ // For up to 128 bytes of ciphertext and 64 bytes for the poly key, we require to process three blocks
+ MOVOU ·chacha20Constants<>(SB), A0; MOVOU (1*16)(keyp), B0; MOVOU (2*16)(keyp), C0; MOVOU (3*16)(keyp), D0
+ MOVO A0, A1; MOVO B0, B1; MOVO C0, C1; MOVO D0, D1; PADDL ·sseIncMask<>(SB), D1
+ MOVO A1, A2; MOVO B1, B2; MOVO C1, C2; MOVO D1, D2; PADDL ·sseIncMask<>(SB), D2
+ MOVO B0, T1; MOVO C0, T2; MOVO D1, T3
+ MOVQ $10, itr2
+
+openSSE128InnerCipherLoop:
+ chachaQR(A0, B0, C0, D0, T0); chachaQR(A1, B1, C1, D1, T0); chachaQR(A2, B2, C2, D2, T0)
+ shiftB0Left; shiftB1Left; shiftB2Left
+ shiftC0Left; shiftC1Left; shiftC2Left
+ shiftD0Left; shiftD1Left; shiftD2Left
+ chachaQR(A0, B0, C0, D0, T0); chachaQR(A1, B1, C1, D1, T0); chachaQR(A2, B2, C2, D2, T0)
+ shiftB0Right; shiftB1Right; shiftB2Right
+ shiftC0Right; shiftC1Right; shiftC2Right
+ shiftD0Right; shiftD1Right; shiftD2Right
+ DECQ itr2
+ JNE openSSE128InnerCipherLoop
+
+ // A0|B0 hold the Poly1305 32-byte key, C0,D0 can be discarded
+ PADDL ·chacha20Constants<>(SB), A0; PADDL ·chacha20Constants<>(SB), A1; PADDL ·chacha20Constants<>(SB), A2
+ PADDL T1, B0; PADDL T1, B1; PADDL T1, B2
+ PADDL T2, C1; PADDL T2, C2
+ PADDL T3, D1; PADDL ·sseIncMask<>(SB), T3; PADDL T3, D2
+
+ // Clamp and store the key
+ PAND ·polyClampMask<>(SB), A0
+ MOVOU A0, rStore; MOVOU B0, sStore
+
+ // Hash
+ MOVQ ad_len+80(FP), itr2
+ CALL polyHashADInternal<>(SB)
+
+openSSE128Open:
+ CMPQ inl, $16
+ JB openSSETail16
+ SUBQ $16, inl
+
+ // Load for hashing
+ polyAdd(0(inp))
+
+ // Load for decryption
+ MOVOU (inp), T0; PXOR T0, A1; MOVOU A1, (oup)
+ LEAQ (1*16)(inp), inp
+ LEAQ (1*16)(oup), oup
+ polyMul
+
+ // Shift the stream "left"
+ MOVO B1, A1
+ MOVO C1, B1
+ MOVO D1, C1
+ MOVO A2, D1
+ MOVO B2, A2
+ MOVO C2, B2
+ MOVO D2, C2
+ JMP openSSE128Open
+
+openSSETail16:
+ TESTQ inl, inl
+ JE openSSEFinalize
+
+ // We can safely load the CT from the end, because it is padded with the MAC
+ MOVQ inl, itr2
+ SHLQ $4, itr2
+ LEAQ ·andMask<>(SB), t0
+ MOVOU (inp), T0
+ ADDQ inl, inp
+ PAND -16(t0)(itr2*1), T0
+ MOVO T0, 0+tmpStore
+ MOVQ T0, t0
+ MOVQ 8+tmpStore, t1
+ PXOR A1, T0
+
+ // We can only store one byte at a time, since plaintext can be shorter than 16 bytes
+openSSETail16Store:
+ MOVQ T0, t3
+ MOVB t3, (oup)
+ PSRLDQ $1, T0
+ INCQ oup
+ DECQ inl
+ JNE openSSETail16Store
+ ADDQ t0, acc0; ADCQ t1, acc1; ADCQ $1, acc2
+ polyMul
+ JMP openSSEFinalize
+
+// ----------------------------------------------------------------------------
+// Special optimization for the last 64 bytes of ciphertext
+openSSETail64:
+ // Need to decrypt up to 64 bytes - prepare single block
+ MOVO ·chacha20Constants<>(SB), A0; MOVO state1Store, B0; MOVO state2Store, C0; MOVO ctr3Store, D0; PADDL ·sseIncMask<>(SB), D0; MOVO D0, ctr0Store
+ XORQ itr2, itr2
+ MOVQ inl, itr1
+ CMPQ itr1, $16
+ JB openSSETail64LoopB
+
+openSSETail64LoopA:
+ // Perform ChaCha rounds, while hashing the remaining input
+ polyAdd(0(inp)(itr2*1))
+ polyMul
+ SUBQ $16, itr1
+
+openSSETail64LoopB:
+ ADDQ $16, itr2
+ chachaQR(A0, B0, C0, D0, T0)
+ shiftB0Left; shiftC0Left; shiftD0Left
+ chachaQR(A0, B0, C0, D0, T0)
+ shiftB0Right; shiftC0Right; shiftD0Right
+
+ CMPQ itr1, $16
+ JAE openSSETail64LoopA
+
+ CMPQ itr2, $160
+ JNE openSSETail64LoopB
+
+ PADDL ·chacha20Constants<>(SB), A0; PADDL state1Store, B0; PADDL state2Store, C0; PADDL ctr0Store, D0
+
+openSSETail64DecLoop:
+ CMPQ inl, $16
+ JB openSSETail64DecLoopDone
+ SUBQ $16, inl
+ MOVOU (inp), T0
+ PXOR T0, A0
+ MOVOU A0, (oup)
+ LEAQ 16(inp), inp
+ LEAQ 16(oup), oup
+ MOVO B0, A0
+ MOVO C0, B0
+ MOVO D0, C0
+ JMP openSSETail64DecLoop
+
+openSSETail64DecLoopDone:
+ MOVO A0, A1
+ JMP openSSETail16
+
+// ----------------------------------------------------------------------------
+// Special optimization for the last 128 bytes of ciphertext
+openSSETail128:
+ // Need to decrypt up to 128 bytes - prepare two blocks
+ MOVO ·chacha20Constants<>(SB), A1; MOVO state1Store, B1; MOVO state2Store, C1; MOVO ctr3Store, D1; PADDL ·sseIncMask<>(SB), D1; MOVO D1, ctr0Store
+ MOVO A1, A0; MOVO B1, B0; MOVO C1, C0; MOVO D1, D0; PADDL ·sseIncMask<>(SB), D0; MOVO D0, ctr1Store
+ XORQ itr2, itr2
+ MOVQ inl, itr1
+ ANDQ $-16, itr1
+
+openSSETail128LoopA:
+ // Perform ChaCha rounds, while hashing the remaining input
+ polyAdd(0(inp)(itr2*1))
+ polyMul
+
+openSSETail128LoopB:
+ ADDQ $16, itr2
+ chachaQR(A0, B0, C0, D0, T0); chachaQR(A1, B1, C1, D1, T0)
+ shiftB0Left; shiftC0Left; shiftD0Left
+ shiftB1Left; shiftC1Left; shiftD1Left
+ chachaQR(A0, B0, C0, D0, T0); chachaQR(A1, B1, C1, D1, T0)
+ shiftB0Right; shiftC0Right; shiftD0Right
+ shiftB1Right; shiftC1Right; shiftD1Right
+
+ CMPQ itr2, itr1
+ JB openSSETail128LoopA
+
+ CMPQ itr2, $160
+ JNE openSSETail128LoopB
+
+ PADDL ·chacha20Constants<>(SB), A0; PADDL ·chacha20Constants<>(SB), A1
+ PADDL state1Store, B0; PADDL state1Store, B1
+ PADDL state2Store, C0; PADDL state2Store, C1
+ PADDL ctr1Store, D0; PADDL ctr0Store, D1
+
+ MOVOU (0*16)(inp), T0; MOVOU (1*16)(inp), T1; MOVOU (2*16)(inp), T2; MOVOU (3*16)(inp), T3
+ PXOR T0, A1; PXOR T1, B1; PXOR T2, C1; PXOR T3, D1
+ MOVOU A1, (0*16)(oup); MOVOU B1, (1*16)(oup); MOVOU C1, (2*16)(oup); MOVOU D1, (3*16)(oup)
+
+ SUBQ $64, inl
+ LEAQ 64(inp), inp
+ LEAQ 64(oup), oup
+ JMP openSSETail64DecLoop
+
+// ----------------------------------------------------------------------------
+// Special optimization for the last 192 bytes of ciphertext
+openSSETail192:
+ // Need to decrypt up to 192 bytes - prepare three blocks
+ MOVO ·chacha20Constants<>(SB), A2; MOVO state1Store, B2; MOVO state2Store, C2; MOVO ctr3Store, D2; PADDL ·sseIncMask<>(SB), D2; MOVO D2, ctr0Store
+ MOVO A2, A1; MOVO B2, B1; MOVO C2, C1; MOVO D2, D1; PADDL ·sseIncMask<>(SB), D1; MOVO D1, ctr1Store
+ MOVO A1, A0; MOVO B1, B0; MOVO C1, C0; MOVO D1, D0; PADDL ·sseIncMask<>(SB), D0; MOVO D0, ctr2Store
+
+ MOVQ inl, itr1
+ MOVQ $160, itr2
+ CMPQ itr1, $160
+ CMOVQGT itr2, itr1
+ ANDQ $-16, itr1
+ XORQ itr2, itr2
+
+openSSLTail192LoopA:
+ // Perform ChaCha rounds, while hashing the remaining input
+ polyAdd(0(inp)(itr2*1))
+ polyMul
+
+openSSLTail192LoopB:
+ ADDQ $16, itr2
+ chachaQR(A0, B0, C0, D0, T0); chachaQR(A1, B1, C1, D1, T0); chachaQR(A2, B2, C2, D2, T0)
+ shiftB0Left; shiftC0Left; shiftD0Left
+ shiftB1Left; shiftC1Left; shiftD1Left
+ shiftB2Left; shiftC2Left; shiftD2Left
+
+ chachaQR(A0, B0, C0, D0, T0); chachaQR(A1, B1, C1, D1, T0); chachaQR(A2, B2, C2, D2, T0)
+ shiftB0Right; shiftC0Right; shiftD0Right
+ shiftB1Right; shiftC1Right; shiftD1Right
+ shiftB2Right; shiftC2Right; shiftD2Right
+
+ CMPQ itr2, itr1
+ JB openSSLTail192LoopA
+
+ CMPQ itr2, $160
+ JNE openSSLTail192LoopB
+
+ CMPQ inl, $176
+ JB openSSLTail192Store
+
+ polyAdd(160(inp))
+ polyMul
+
+ CMPQ inl, $192
+ JB openSSLTail192Store
+
+ polyAdd(176(inp))
+ polyMul
+
+openSSLTail192Store:
+ PADDL ·chacha20Constants<>(SB), A0; PADDL ·chacha20Constants<>(SB), A1; PADDL ·chacha20Constants<>(SB), A2
+ PADDL state1Store, B0; PADDL state1Store, B1; PADDL state1Store, B2
+ PADDL state2Store, C0; PADDL state2Store, C1; PADDL state2Store, C2
+ PADDL ctr2Store, D0; PADDL ctr1Store, D1; PADDL ctr0Store, D2
+
+ MOVOU (0*16)(inp), T0; MOVOU (1*16)(inp), T1; MOVOU (2*16)(inp), T2; MOVOU (3*16)(inp), T3
+ PXOR T0, A2; PXOR T1, B2; PXOR T2, C2; PXOR T3, D2
+ MOVOU A2, (0*16)(oup); MOVOU B2, (1*16)(oup); MOVOU C2, (2*16)(oup); MOVOU D2, (3*16)(oup)
+
+ MOVOU (4*16)(inp), T0; MOVOU (5*16)(inp), T1; MOVOU (6*16)(inp), T2; MOVOU (7*16)(inp), T3
+ PXOR T0, A1; PXOR T1, B1; PXOR T2, C1; PXOR T3, D1
+ MOVOU A1, (4*16)(oup); MOVOU B1, (5*16)(oup); MOVOU C1, (6*16)(oup); MOVOU D1, (7*16)(oup)
+
+ SUBQ $128, inl
+ LEAQ 128(inp), inp
+ LEAQ 128(oup), oup
+ JMP openSSETail64DecLoop
+
+// ----------------------------------------------------------------------------
+// Special optimization for the last 256 bytes of ciphertext
+openSSETail256:
+ // Need to decrypt up to 256 bytes - prepare four blocks
+ MOVO ·chacha20Constants<>(SB), A0; MOVO state1Store, B0; MOVO state2Store, C0; MOVO ctr3Store, D0; PADDL ·sseIncMask<>(SB), D0
+ MOVO A0, A1; MOVO B0, B1; MOVO C0, C1; MOVO D0, D1; PADDL ·sseIncMask<>(SB), D1
+ MOVO A1, A2; MOVO B1, B2; MOVO C1, C2; MOVO D1, D2; PADDL ·sseIncMask<>(SB), D2
+ MOVO A2, A3; MOVO B2, B3; MOVO C2, C3; MOVO D2, D3; PADDL ·sseIncMask<>(SB), D3
+
+ // Store counters
+ MOVO D0, ctr0Store; MOVO D1, ctr1Store; MOVO D2, ctr2Store; MOVO D3, ctr3Store
+ XORQ itr2, itr2
+
+openSSETail256Loop:
+ // This loop inteleaves 8 ChaCha quarter rounds with 1 poly multiplication
+ polyAdd(0(inp)(itr2*1))
+ MOVO C3, tmpStore
+ chachaQR(A0, B0, C0, D0, C3); chachaQR(A1, B1, C1, D1, C3); chachaQR(A2, B2, C2, D2, C3)
+ MOVO tmpStore, C3
+ MOVO C1, tmpStore
+ chachaQR(A3, B3, C3, D3, C1)
+ MOVO tmpStore, C1
+ shiftB0Left; shiftB1Left; shiftB2Left; shiftB3Left
+ shiftC0Left; shiftC1Left; shiftC2Left; shiftC3Left
+ shiftD0Left; shiftD1Left; shiftD2Left; shiftD3Left
+ polyMulStage1
+ polyMulStage2
+ MOVO C3, tmpStore
+ chachaQR(A0, B0, C0, D0, C3); chachaQR(A1, B1, C1, D1, C3); chachaQR(A2, B2, C2, D2, C3)
+ MOVO tmpStore, C3
+ MOVO C1, tmpStore
+ chachaQR(A3, B3, C3, D3, C1)
+ MOVO tmpStore, C1
+ polyMulStage3
+ polyMulReduceStage
+ shiftB0Right; shiftB1Right; shiftB2Right; shiftB3Right
+ shiftC0Right; shiftC1Right; shiftC2Right; shiftC3Right
+ shiftD0Right; shiftD1Right; shiftD2Right; shiftD3Right
+ ADDQ $2*8, itr2
+ CMPQ itr2, $160
+ JB openSSETail256Loop
+ MOVQ inl, itr1
+ ANDQ $-16, itr1
+
+openSSETail256HashLoop:
+ polyAdd(0(inp)(itr2*1))
+ polyMul
+ ADDQ $2*8, itr2
+ CMPQ itr2, itr1
+ JB openSSETail256HashLoop
+
+ // Add in the state
+ PADDD ·chacha20Constants<>(SB), A0; PADDD ·chacha20Constants<>(SB), A1; PADDD ·chacha20Constants<>(SB), A2; PADDD ·chacha20Constants<>(SB), A3
+ PADDD state1Store, B0; PADDD state1Store, B1; PADDD state1Store, B2; PADDD state1Store, B3
+ PADDD state2Store, C0; PADDD state2Store, C1; PADDD state2Store, C2; PADDD state2Store, C3
+ PADDD ctr0Store, D0; PADDD ctr1Store, D1; PADDD ctr2Store, D2; PADDD ctr3Store, D3
+ MOVO D3, tmpStore
+
+ // Load - xor - store
+ MOVOU (0*16)(inp), D3; PXOR D3, A0
+ MOVOU (1*16)(inp), D3; PXOR D3, B0
+ MOVOU (2*16)(inp), D3; PXOR D3, C0
+ MOVOU (3*16)(inp), D3; PXOR D3, D0
+ MOVOU A0, (0*16)(oup)
+ MOVOU B0, (1*16)(oup)
+ MOVOU C0, (2*16)(oup)
+ MOVOU D0, (3*16)(oup)
+ MOVOU (4*16)(inp), A0; MOVOU (5*16)(inp), B0; MOVOU (6*16)(inp), C0; MOVOU (7*16)(inp), D0
+ PXOR A0, A1; PXOR B0, B1; PXOR C0, C1; PXOR D0, D1
+ MOVOU A1, (4*16)(oup); MOVOU B1, (5*16)(oup); MOVOU C1, (6*16)(oup); MOVOU D1, (7*16)(oup)
+ MOVOU (8*16)(inp), A0; MOVOU (9*16)(inp), B0; MOVOU (10*16)(inp), C0; MOVOU (11*16)(inp), D0
+ PXOR A0, A2; PXOR B0, B2; PXOR C0, C2; PXOR D0, D2
+ MOVOU A2, (8*16)(oup); MOVOU B2, (9*16)(oup); MOVOU C2, (10*16)(oup); MOVOU D2, (11*16)(oup)
+ LEAQ 192(inp), inp
+ LEAQ 192(oup), oup
+ SUBQ $192, inl
+ MOVO A3, A0
+ MOVO B3, B0
+ MOVO C3, C0
+ MOVO tmpStore, D0
+
+ JMP openSSETail64DecLoop
+
+// ----------------------------------------------------------------------------
+// ------------------------- AVX2 Code ----------------------------------------
+chacha20Poly1305Open_AVX2:
+ VZEROUPPER
+ VMOVDQU ·chacha20Constants<>(SB), AA0
+ BYTE $0xc4; BYTE $0x42; BYTE $0x7d; BYTE $0x5a; BYTE $0x70; BYTE $0x10 // broadcasti128 16(r8), ymm14
+ BYTE $0xc4; BYTE $0x42; BYTE $0x7d; BYTE $0x5a; BYTE $0x60; BYTE $0x20 // broadcasti128 32(r8), ymm12
+ BYTE $0xc4; BYTE $0xc2; BYTE $0x7d; BYTE $0x5a; BYTE $0x60; BYTE $0x30 // broadcasti128 48(r8), ymm4
+ VPADDD ·avx2InitMask<>(SB), DD0, DD0
+
+ // Special optimization, for very short buffers
+ CMPQ inl, $192
+ JBE openAVX2192
+ CMPQ inl, $320
+ JBE openAVX2320
+
+ // For the general key prepare the key first - as a byproduct we have 64 bytes of cipher stream
+ VMOVDQA BB0, state1StoreAVX2
+ VMOVDQA CC0, state2StoreAVX2
+ VMOVDQA DD0, ctr3StoreAVX2
+ MOVQ $10, itr2
+
+openAVX2PreparePolyKey:
+ chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0)
+ VPALIGNR $4, BB0, BB0, BB0; VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $12, DD0, DD0, DD0
+ chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0)
+ VPALIGNR $12, BB0, BB0, BB0; VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $4, DD0, DD0, DD0
+ DECQ itr2
+ JNE openAVX2PreparePolyKey
+
+ VPADDD ·chacha20Constants<>(SB), AA0, AA0
+ VPADDD state1StoreAVX2, BB0, BB0
+ VPADDD state2StoreAVX2, CC0, CC0
+ VPADDD ctr3StoreAVX2, DD0, DD0
+
+ VPERM2I128 $0x02, AA0, BB0, TT0
+
+ // Clamp and store poly key
+ VPAND ·polyClampMask<>(SB), TT0, TT0
+ VMOVDQA TT0, rsStoreAVX2
+
+ // Stream for the first 64 bytes
+ VPERM2I128 $0x13, AA0, BB0, AA0
+ VPERM2I128 $0x13, CC0, DD0, BB0
+
+ // Hash AD + first 64 bytes
+ MOVQ ad_len+80(FP), itr2
+ CALL polyHashADInternal<>(SB)
+ XORQ itr1, itr1
+
+openAVX2InitialHash64:
+ polyAdd(0(inp)(itr1*1))
+ polyMulAVX2
+ ADDQ $16, itr1
+ CMPQ itr1, $64
+ JNE openAVX2InitialHash64
+
+ // Decrypt the first 64 bytes
+ VPXOR (0*32)(inp), AA0, AA0
+ VPXOR (1*32)(inp), BB0, BB0
+ VMOVDQU AA0, (0*32)(oup)
+ VMOVDQU BB0, (1*32)(oup)
+ LEAQ (2*32)(inp), inp
+ LEAQ (2*32)(oup), oup
+ SUBQ $64, inl
+
+openAVX2MainLoop:
+ CMPQ inl, $512
+ JB openAVX2MainLoopDone
+
+ // Load state, increment counter blocks, store the incremented counters
+ VMOVDQU ·chacha20Constants<>(SB), AA0; VMOVDQA AA0, AA1; VMOVDQA AA0, AA2; VMOVDQA AA0, AA3
+ VMOVDQA state1StoreAVX2, BB0; VMOVDQA BB0, BB1; VMOVDQA BB0, BB2; VMOVDQA BB0, BB3
+ VMOVDQA state2StoreAVX2, CC0; VMOVDQA CC0, CC1; VMOVDQA CC0, CC2; VMOVDQA CC0, CC3
+ VMOVDQA ctr3StoreAVX2, DD0; VPADDD ·avx2IncMask<>(SB), DD0, DD0; VPADDD ·avx2IncMask<>(SB), DD0, DD1; VPADDD ·avx2IncMask<>(SB), DD1, DD2; VPADDD ·avx2IncMask<>(SB), DD2, DD3
+ VMOVDQA DD0, ctr0StoreAVX2; VMOVDQA DD1, ctr1StoreAVX2; VMOVDQA DD2, ctr2StoreAVX2; VMOVDQA DD3, ctr3StoreAVX2
+ XORQ itr1, itr1
+
+openAVX2InternalLoop:
+ // Lets just say this spaghetti loop interleaves 2 quarter rounds with 3 poly multiplications
+ // Effectively per 512 bytes of stream we hash 480 bytes of ciphertext
+ polyAdd(0*8(inp)(itr1*1))
+ VPADDD BB0, AA0, AA0; VPADDD BB1, AA1, AA1; VPADDD BB2, AA2, AA2; VPADDD BB3, AA3, AA3
+ polyMulStage1_AVX2
+ VPXOR AA0, DD0, DD0; VPXOR AA1, DD1, DD1; VPXOR AA2, DD2, DD2; VPXOR AA3, DD3, DD3
+ VPSHUFB ·rol16<>(SB), DD0, DD0; VPSHUFB ·rol16<>(SB), DD1, DD1; VPSHUFB ·rol16<>(SB), DD2, DD2; VPSHUFB ·rol16<>(SB), DD3, DD3
+ polyMulStage2_AVX2
+ VPADDD DD0, CC0, CC0; VPADDD DD1, CC1, CC1; VPADDD DD2, CC2, CC2; VPADDD DD3, CC3, CC3
+ VPXOR CC0, BB0, BB0; VPXOR CC1, BB1, BB1; VPXOR CC2, BB2, BB2; VPXOR CC3, BB3, BB3
+ polyMulStage3_AVX2
+ VMOVDQA CC3, tmpStoreAVX2
+ VPSLLD $12, BB0, CC3; VPSRLD $20, BB0, BB0; VPXOR CC3, BB0, BB0
+ VPSLLD $12, BB1, CC3; VPSRLD $20, BB1, BB1; VPXOR CC3, BB1, BB1
+ VPSLLD $12, BB2, CC3; VPSRLD $20, BB2, BB2; VPXOR CC3, BB2, BB2
+ VPSLLD $12, BB3, CC3; VPSRLD $20, BB3, BB3; VPXOR CC3, BB3, BB3
+ VMOVDQA tmpStoreAVX2, CC3
+ polyMulReduceStage
+ VPADDD BB0, AA0, AA0; VPADDD BB1, AA1, AA1; VPADDD BB2, AA2, AA2; VPADDD BB3, AA3, AA3
+ VPXOR AA0, DD0, DD0; VPXOR AA1, DD1, DD1; VPXOR AA2, DD2, DD2; VPXOR AA3, DD3, DD3
+ VPSHUFB ·rol8<>(SB), DD0, DD0; VPSHUFB ·rol8<>(SB), DD1, DD1; VPSHUFB ·rol8<>(SB), DD2, DD2; VPSHUFB ·rol8<>(SB), DD3, DD3
+ polyAdd(2*8(inp)(itr1*1))
+ VPADDD DD0, CC0, CC0; VPADDD DD1, CC1, CC1; VPADDD DD2, CC2, CC2; VPADDD DD3, CC3, CC3
+ polyMulStage1_AVX2
+ VPXOR CC0, BB0, BB0; VPXOR CC1, BB1, BB1; VPXOR CC2, BB2, BB2; VPXOR CC3, BB3, BB3
+ VMOVDQA CC3, tmpStoreAVX2
+ VPSLLD $7, BB0, CC3; VPSRLD $25, BB0, BB0; VPXOR CC3, BB0, BB0
+ VPSLLD $7, BB1, CC3; VPSRLD $25, BB1, BB1; VPXOR CC3, BB1, BB1
+ VPSLLD $7, BB2, CC3; VPSRLD $25, BB2, BB2; VPXOR CC3, BB2, BB2
+ VPSLLD $7, BB3, CC3; VPSRLD $25, BB3, BB3; VPXOR CC3, BB3, BB3
+ VMOVDQA tmpStoreAVX2, CC3
+ polyMulStage2_AVX2
+ VPALIGNR $4, BB0, BB0, BB0; VPALIGNR $4, BB1, BB1, BB1; VPALIGNR $4, BB2, BB2, BB2; VPALIGNR $4, BB3, BB3, BB3
+ VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $8, CC2, CC2, CC2; VPALIGNR $8, CC3, CC3, CC3
+ VPALIGNR $12, DD0, DD0, DD0; VPALIGNR $12, DD1, DD1, DD1; VPALIGNR $12, DD2, DD2, DD2; VPALIGNR $12, DD3, DD3, DD3
+ VPADDD BB0, AA0, AA0; VPADDD BB1, AA1, AA1; VPADDD BB2, AA2, AA2; VPADDD BB3, AA3, AA3
+ polyMulStage3_AVX2
+ VPXOR AA0, DD0, DD0; VPXOR AA1, DD1, DD1; VPXOR AA2, DD2, DD2; VPXOR AA3, DD3, DD3
+ VPSHUFB ·rol16<>(SB), DD0, DD0; VPSHUFB ·rol16<>(SB), DD1, DD1; VPSHUFB ·rol16<>(SB), DD2, DD2; VPSHUFB ·rol16<>(SB), DD3, DD3
+ polyMulReduceStage
+ VPADDD DD0, CC0, CC0; VPADDD DD1, CC1, CC1; VPADDD DD2, CC2, CC2; VPADDD DD3, CC3, CC3
+ VPXOR CC0, BB0, BB0; VPXOR CC1, BB1, BB1; VPXOR CC2, BB2, BB2; VPXOR CC3, BB3, BB3
+ polyAdd(4*8(inp)(itr1*1))
+ LEAQ (6*8)(itr1), itr1
+ VMOVDQA CC3, tmpStoreAVX2
+ VPSLLD $12, BB0, CC3; VPSRLD $20, BB0, BB0; VPXOR CC3, BB0, BB0
+ VPSLLD $12, BB1, CC3; VPSRLD $20, BB1, BB1; VPXOR CC3, BB1, BB1
+ VPSLLD $12, BB2, CC3; VPSRLD $20, BB2, BB2; VPXOR CC3, BB2, BB2
+ VPSLLD $12, BB3, CC3; VPSRLD $20, BB3, BB3; VPXOR CC3, BB3, BB3
+ VMOVDQA tmpStoreAVX2, CC3
+ polyMulStage1_AVX2
+ VPADDD BB0, AA0, AA0; VPADDD BB1, AA1, AA1; VPADDD BB2, AA2, AA2; VPADDD BB3, AA3, AA3
+ VPXOR AA0, DD0, DD0; VPXOR AA1, DD1, DD1; VPXOR AA2, DD2, DD2; VPXOR AA3, DD3, DD3
+ polyMulStage2_AVX2
+ VPSHUFB ·rol8<>(SB), DD0, DD0; VPSHUFB ·rol8<>(SB), DD1, DD1; VPSHUFB ·rol8<>(SB), DD2, DD2; VPSHUFB ·rol8<>(SB), DD3, DD3
+ VPADDD DD0, CC0, CC0; VPADDD DD1, CC1, CC1; VPADDD DD2, CC2, CC2; VPADDD DD3, CC3, CC3
+ polyMulStage3_AVX2
+ VPXOR CC0, BB0, BB0; VPXOR CC1, BB1, BB1; VPXOR CC2, BB2, BB2; VPXOR CC3, BB3, BB3
+ VMOVDQA CC3, tmpStoreAVX2
+ VPSLLD $7, BB0, CC3; VPSRLD $25, BB0, BB0; VPXOR CC3, BB0, BB0
+ VPSLLD $7, BB1, CC3; VPSRLD $25, BB1, BB1; VPXOR CC3, BB1, BB1
+ VPSLLD $7, BB2, CC3; VPSRLD $25, BB2, BB2; VPXOR CC3, BB2, BB2
+ VPSLLD $7, BB3, CC3; VPSRLD $25, BB3, BB3; VPXOR CC3, BB3, BB3
+ VMOVDQA tmpStoreAVX2, CC3
+ polyMulReduceStage
+ VPALIGNR $12, BB0, BB0, BB0; VPALIGNR $12, BB1, BB1, BB1; VPALIGNR $12, BB2, BB2, BB2; VPALIGNR $12, BB3, BB3, BB3
+ VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $8, CC2, CC2, CC2; VPALIGNR $8, CC3, CC3, CC3
+ VPALIGNR $4, DD0, DD0, DD0; VPALIGNR $4, DD1, DD1, DD1; VPALIGNR $4, DD2, DD2, DD2; VPALIGNR $4, DD3, DD3, DD3
+ CMPQ itr1, $480
+ JNE openAVX2InternalLoop
+
+ VPADDD ·chacha20Constants<>(SB), AA0, AA0; VPADDD ·chacha20Constants<>(SB), AA1, AA1; VPADDD ·chacha20Constants<>(SB), AA2, AA2; VPADDD ·chacha20Constants<>(SB), AA3, AA3
+ VPADDD state1StoreAVX2, BB0, BB0; VPADDD state1StoreAVX2, BB1, BB1; VPADDD state1StoreAVX2, BB2, BB2; VPADDD state1StoreAVX2, BB3, BB3
+ VPADDD state2StoreAVX2, CC0, CC0; VPADDD state2StoreAVX2, CC1, CC1; VPADDD state2StoreAVX2, CC2, CC2; VPADDD state2StoreAVX2, CC3, CC3
+ VPADDD ctr0StoreAVX2, DD0, DD0; VPADDD ctr1StoreAVX2, DD1, DD1; VPADDD ctr2StoreAVX2, DD2, DD2; VPADDD ctr3StoreAVX2, DD3, DD3
+ VMOVDQA CC3, tmpStoreAVX2
+
+ // We only hashed 480 of the 512 bytes available - hash the remaining 32 here
+ polyAdd(480(inp))
+ polyMulAVX2
+ VPERM2I128 $0x02, AA0, BB0, CC3; VPERM2I128 $0x13, AA0, BB0, BB0; VPERM2I128 $0x02, CC0, DD0, AA0; VPERM2I128 $0x13, CC0, DD0, CC0
+ VPXOR (0*32)(inp), CC3, CC3; VPXOR (1*32)(inp), AA0, AA0; VPXOR (2*32)(inp), BB0, BB0; VPXOR (3*32)(inp), CC0, CC0
+ VMOVDQU CC3, (0*32)(oup); VMOVDQU AA0, (1*32)(oup); VMOVDQU BB0, (2*32)(oup); VMOVDQU CC0, (3*32)(oup)
+ VPERM2I128 $0x02, AA1, BB1, AA0; VPERM2I128 $0x02, CC1, DD1, BB0; VPERM2I128 $0x13, AA1, BB1, CC0; VPERM2I128 $0x13, CC1, DD1, DD0
+ VPXOR (4*32)(inp), AA0, AA0; VPXOR (5*32)(inp), BB0, BB0; VPXOR (6*32)(inp), CC0, CC0; VPXOR (7*32)(inp), DD0, DD0
+ VMOVDQU AA0, (4*32)(oup); VMOVDQU BB0, (5*32)(oup); VMOVDQU CC0, (6*32)(oup); VMOVDQU DD0, (7*32)(oup)
+
+ // and here
+ polyAdd(496(inp))
+ polyMulAVX2
+ VPERM2I128 $0x02, AA2, BB2, AA0; VPERM2I128 $0x02, CC2, DD2, BB0; VPERM2I128 $0x13, AA2, BB2, CC0; VPERM2I128 $0x13, CC2, DD2, DD0
+ VPXOR (8*32)(inp), AA0, AA0; VPXOR (9*32)(inp), BB0, BB0; VPXOR (10*32)(inp), CC0, CC0; VPXOR (11*32)(inp), DD0, DD0
+ VMOVDQU AA0, (8*32)(oup); VMOVDQU BB0, (9*32)(oup); VMOVDQU CC0, (10*32)(oup); VMOVDQU DD0, (11*32)(oup)
+ VPERM2I128 $0x02, AA3, BB3, AA0; VPERM2I128 $0x02, tmpStoreAVX2, DD3, BB0; VPERM2I128 $0x13, AA3, BB3, CC0; VPERM2I128 $0x13, tmpStoreAVX2, DD3, DD0
+ VPXOR (12*32)(inp), AA0, AA0; VPXOR (13*32)(inp), BB0, BB0; VPXOR (14*32)(inp), CC0, CC0; VPXOR (15*32)(inp), DD0, DD0
+ VMOVDQU AA0, (12*32)(oup); VMOVDQU BB0, (13*32)(oup); VMOVDQU CC0, (14*32)(oup); VMOVDQU DD0, (15*32)(oup)
+ LEAQ (32*16)(inp), inp
+ LEAQ (32*16)(oup), oup
+ SUBQ $(32*16), inl
+ JMP openAVX2MainLoop
+
+openAVX2MainLoopDone:
+ // Handle the various tail sizes efficiently
+ TESTQ inl, inl
+ JE openSSEFinalize
+ CMPQ inl, $128
+ JBE openAVX2Tail128
+ CMPQ inl, $256
+ JBE openAVX2Tail256
+ CMPQ inl, $384
+ JBE openAVX2Tail384
+ JMP openAVX2Tail512
+
+// ----------------------------------------------------------------------------
+// Special optimization for buffers smaller than 193 bytes
+openAVX2192:
+ // For up to 192 bytes of ciphertext and 64 bytes for the poly key, we process four blocks
+ VMOVDQA AA0, AA1
+ VMOVDQA BB0, BB1
+ VMOVDQA CC0, CC1
+ VPADDD ·avx2IncMask<>(SB), DD0, DD1
+ VMOVDQA AA0, AA2
+ VMOVDQA BB0, BB2
+ VMOVDQA CC0, CC2
+ VMOVDQA DD0, DD2
+ VMOVDQA DD1, TT3
+ MOVQ $10, itr2
+
+openAVX2192InnerCipherLoop:
+ chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0); chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0)
+ VPALIGNR $4, BB0, BB0, BB0; VPALIGNR $4, BB1, BB1, BB1
+ VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1
+ VPALIGNR $12, DD0, DD0, DD0; VPALIGNR $12, DD1, DD1, DD1
+ chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0); chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0)
+ VPALIGNR $12, BB0, BB0, BB0; VPALIGNR $12, BB1, BB1, BB1
+ VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1
+ VPALIGNR $4, DD0, DD0, DD0; VPALIGNR $4, DD1, DD1, DD1
+ DECQ itr2
+ JNE openAVX2192InnerCipherLoop
+ VPADDD AA2, AA0, AA0; VPADDD AA2, AA1, AA1
+ VPADDD BB2, BB0, BB0; VPADDD BB2, BB1, BB1
+ VPADDD CC2, CC0, CC0; VPADDD CC2, CC1, CC1
+ VPADDD DD2, DD0, DD0; VPADDD TT3, DD1, DD1
+ VPERM2I128 $0x02, AA0, BB0, TT0
+
+ // Clamp and store poly key
+ VPAND ·polyClampMask<>(SB), TT0, TT0
+ VMOVDQA TT0, rsStoreAVX2
+
+ // Stream for up to 192 bytes
+ VPERM2I128 $0x13, AA0, BB0, AA0
+ VPERM2I128 $0x13, CC0, DD0, BB0
+ VPERM2I128 $0x02, AA1, BB1, CC0
+ VPERM2I128 $0x02, CC1, DD1, DD0
+ VPERM2I128 $0x13, AA1, BB1, AA1
+ VPERM2I128 $0x13, CC1, DD1, BB1
+
+openAVX2ShortOpen:
+ // Hash
+ MOVQ ad_len+80(FP), itr2
+ CALL polyHashADInternal<>(SB)
+
+openAVX2ShortOpenLoop:
+ CMPQ inl, $32
+ JB openAVX2ShortTail32
+ SUBQ $32, inl
+
+ // Load for hashing
+ polyAdd(0*8(inp))
+ polyMulAVX2
+ polyAdd(2*8(inp))
+ polyMulAVX2
+
+ // Load for decryption
+ VPXOR (inp), AA0, AA0
+ VMOVDQU AA0, (oup)
+ LEAQ (1*32)(inp), inp
+ LEAQ (1*32)(oup), oup
+
+ // Shift stream left
+ VMOVDQA BB0, AA0
+ VMOVDQA CC0, BB0
+ VMOVDQA DD0, CC0
+ VMOVDQA AA1, DD0
+ VMOVDQA BB1, AA1
+ VMOVDQA CC1, BB1
+ VMOVDQA DD1, CC1
+ VMOVDQA AA2, DD1
+ VMOVDQA BB2, AA2
+ JMP openAVX2ShortOpenLoop
+
+openAVX2ShortTail32:
+ CMPQ inl, $16
+ VMOVDQA A0, A1
+ JB openAVX2ShortDone
+
+ SUBQ $16, inl
+
+ // Load for hashing
+ polyAdd(0*8(inp))
+ polyMulAVX2
+
+ // Load for decryption
+ VPXOR (inp), A0, T0
+ VMOVDQU T0, (oup)
+ LEAQ (1*16)(inp), inp
+ LEAQ (1*16)(oup), oup
+ VPERM2I128 $0x11, AA0, AA0, AA0
+ VMOVDQA A0, A1
+
+openAVX2ShortDone:
+ VZEROUPPER
+ JMP openSSETail16
+
+// ----------------------------------------------------------------------------
+// Special optimization for buffers smaller than 321 bytes
+openAVX2320:
+ // For up to 320 bytes of ciphertext and 64 bytes for the poly key, we process six blocks
+ VMOVDQA AA0, AA1; VMOVDQA BB0, BB1; VMOVDQA CC0, CC1; VPADDD ·avx2IncMask<>(SB), DD0, DD1
+ VMOVDQA AA0, AA2; VMOVDQA BB0, BB2; VMOVDQA CC0, CC2; VPADDD ·avx2IncMask<>(SB), DD1, DD2
+ VMOVDQA BB0, TT1; VMOVDQA CC0, TT2; VMOVDQA DD0, TT3
+ MOVQ $10, itr2
+
+openAVX2320InnerCipherLoop:
+ chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0); chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0); chachaQR_AVX2(AA2, BB2, CC2, DD2, TT0)
+ VPALIGNR $4, BB0, BB0, BB0; VPALIGNR $4, BB1, BB1, BB1; VPALIGNR $4, BB2, BB2, BB2
+ VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $8, CC2, CC2, CC2
+ VPALIGNR $12, DD0, DD0, DD0; VPALIGNR $12, DD1, DD1, DD1; VPALIGNR $12, DD2, DD2, DD2
+ chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0); chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0); chachaQR_AVX2(AA2, BB2, CC2, DD2, TT0)
+ VPALIGNR $12, BB0, BB0, BB0; VPALIGNR $12, BB1, BB1, BB1; VPALIGNR $12, BB2, BB2, BB2
+ VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $8, CC2, CC2, CC2
+ VPALIGNR $4, DD0, DD0, DD0; VPALIGNR $4, DD1, DD1, DD1; VPALIGNR $4, DD2, DD2, DD2
+ DECQ itr2
+ JNE openAVX2320InnerCipherLoop
+
+ VMOVDQA ·chacha20Constants<>(SB), TT0
+ VPADDD TT0, AA0, AA0; VPADDD TT0, AA1, AA1; VPADDD TT0, AA2, AA2
+ VPADDD TT1, BB0, BB0; VPADDD TT1, BB1, BB1; VPADDD TT1, BB2, BB2
+ VPADDD TT2, CC0, CC0; VPADDD TT2, CC1, CC1; VPADDD TT2, CC2, CC2
+ VMOVDQA ·avx2IncMask<>(SB), TT0
+ VPADDD TT3, DD0, DD0; VPADDD TT0, TT3, TT3
+ VPADDD TT3, DD1, DD1; VPADDD TT0, TT3, TT3
+ VPADDD TT3, DD2, DD2
+
+ // Clamp and store poly key
+ VPERM2I128 $0x02, AA0, BB0, TT0
+ VPAND ·polyClampMask<>(SB), TT0, TT0
+ VMOVDQA TT0, rsStoreAVX2
+
+ // Stream for up to 320 bytes
+ VPERM2I128 $0x13, AA0, BB0, AA0
+ VPERM2I128 $0x13, CC0, DD0, BB0
+ VPERM2I128 $0x02, AA1, BB1, CC0
+ VPERM2I128 $0x02, CC1, DD1, DD0
+ VPERM2I128 $0x13, AA1, BB1, AA1
+ VPERM2I128 $0x13, CC1, DD1, BB1
+ VPERM2I128 $0x02, AA2, BB2, CC1
+ VPERM2I128 $0x02, CC2, DD2, DD1
+ VPERM2I128 $0x13, AA2, BB2, AA2
+ VPERM2I128 $0x13, CC2, DD2, BB2
+ JMP openAVX2ShortOpen
+
+// ----------------------------------------------------------------------------
+// Special optimization for the last 128 bytes of ciphertext
+openAVX2Tail128:
+ // Need to decrypt up to 128 bytes - prepare two blocks
+ VMOVDQA ·chacha20Constants<>(SB), AA1
+ VMOVDQA state1StoreAVX2, BB1
+ VMOVDQA state2StoreAVX2, CC1
+ VMOVDQA ctr3StoreAVX2, DD1
+ VPADDD ·avx2IncMask<>(SB), DD1, DD1
+ VMOVDQA DD1, DD0
+
+ XORQ itr2, itr2
+ MOVQ inl, itr1
+ ANDQ $-16, itr1
+ TESTQ itr1, itr1
+ JE openAVX2Tail128LoopB
+
+openAVX2Tail128LoopA:
+ // Perform ChaCha rounds, while hashing the remaining input
+ polyAdd(0(inp)(itr2*1))
+ polyMulAVX2
+
+openAVX2Tail128LoopB:
+ ADDQ $16, itr2
+ chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0)
+ VPALIGNR $4, BB1, BB1, BB1
+ VPALIGNR $8, CC1, CC1, CC1
+ VPALIGNR $12, DD1, DD1, DD1
+ chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0)
+ VPALIGNR $12, BB1, BB1, BB1
+ VPALIGNR $8, CC1, CC1, CC1
+ VPALIGNR $4, DD1, DD1, DD1
+ CMPQ itr2, itr1
+ JB openAVX2Tail128LoopA
+ CMPQ itr2, $160
+ JNE openAVX2Tail128LoopB
+
+ VPADDD ·chacha20Constants<>(SB), AA1, AA1
+ VPADDD state1StoreAVX2, BB1, BB1
+ VPADDD state2StoreAVX2, CC1, CC1
+ VPADDD DD0, DD1, DD1
+ VPERM2I128 $0x02, AA1, BB1, AA0; VPERM2I128 $0x02, CC1, DD1, BB0; VPERM2I128 $0x13, AA1, BB1, CC0; VPERM2I128 $0x13, CC1, DD1, DD0
+
+openAVX2TailLoop:
+ CMPQ inl, $32
+ JB openAVX2Tail
+ SUBQ $32, inl
+
+ // Load for decryption
+ VPXOR (inp), AA0, AA0
+ VMOVDQU AA0, (oup)
+ LEAQ (1*32)(inp), inp
+ LEAQ (1*32)(oup), oup
+ VMOVDQA BB0, AA0
+ VMOVDQA CC0, BB0
+ VMOVDQA DD0, CC0
+ JMP openAVX2TailLoop
+
+openAVX2Tail:
+ CMPQ inl, $16
+ VMOVDQA A0, A1
+ JB openAVX2TailDone
+ SUBQ $16, inl
+
+ // Load for decryption
+ VPXOR (inp), A0, T0
+ VMOVDQU T0, (oup)
+ LEAQ (1*16)(inp), inp
+ LEAQ (1*16)(oup), oup
+ VPERM2I128 $0x11, AA0, AA0, AA0
+ VMOVDQA A0, A1
+
+openAVX2TailDone:
+ VZEROUPPER
+ JMP openSSETail16
+
+// ----------------------------------------------------------------------------
+// Special optimization for the last 256 bytes of ciphertext
+openAVX2Tail256:
+ // Need to decrypt up to 256 bytes - prepare four blocks
+ VMOVDQA ·chacha20Constants<>(SB), AA0; VMOVDQA AA0, AA1
+ VMOVDQA state1StoreAVX2, BB0; VMOVDQA BB0, BB1
+ VMOVDQA state2StoreAVX2, CC0; VMOVDQA CC0, CC1
+ VMOVDQA ctr3StoreAVX2, DD0
+ VPADDD ·avx2IncMask<>(SB), DD0, DD0
+ VPADDD ·avx2IncMask<>(SB), DD0, DD1
+ VMOVDQA DD0, TT1
+ VMOVDQA DD1, TT2
+
+ // Compute the number of iterations that will hash data
+ MOVQ inl, tmpStoreAVX2
+ MOVQ inl, itr1
+ SUBQ $128, itr1
+ SHRQ $4, itr1
+ MOVQ $10, itr2
+ CMPQ itr1, $10
+ CMOVQGT itr2, itr1
+ MOVQ inp, inl
+ XORQ itr2, itr2
+
+openAVX2Tail256LoopA:
+ polyAdd(0(inl))
+ polyMulAVX2
+ LEAQ 16(inl), inl
+
+ // Perform ChaCha rounds, while hashing the remaining input
+openAVX2Tail256LoopB:
+ chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0); chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0)
+ VPALIGNR $4, BB0, BB0, BB0; VPALIGNR $4, BB1, BB1, BB1
+ VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1
+ VPALIGNR $12, DD0, DD0, DD0; VPALIGNR $12, DD1, DD1, DD1
+ INCQ itr2
+ chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0); chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0)
+ VPALIGNR $12, BB0, BB0, BB0; VPALIGNR $12, BB1, BB1, BB1
+ VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1
+ VPALIGNR $4, DD0, DD0, DD0; VPALIGNR $4, DD1, DD1, DD1
+ CMPQ itr2, itr1
+ JB openAVX2Tail256LoopA
+
+ CMPQ itr2, $10
+ JNE openAVX2Tail256LoopB
+
+ MOVQ inl, itr2
+ SUBQ inp, inl
+ MOVQ inl, itr1
+ MOVQ tmpStoreAVX2, inl
+
+ // Hash the remainder of data (if any)
+openAVX2Tail256Hash:
+ ADDQ $16, itr1
+ CMPQ itr1, inl
+ JGT openAVX2Tail256HashEnd
+ polyAdd (0(itr2))
+ polyMulAVX2
+ LEAQ 16(itr2), itr2
+ JMP openAVX2Tail256Hash
+
+// Store 128 bytes safely, then go to store loop
+openAVX2Tail256HashEnd:
+ VPADDD ·chacha20Constants<>(SB), AA0, AA0; VPADDD ·chacha20Constants<>(SB), AA1, AA1
+ VPADDD state1StoreAVX2, BB0, BB0; VPADDD state1StoreAVX2, BB1, BB1
+ VPADDD state2StoreAVX2, CC0, CC0; VPADDD state2StoreAVX2, CC1, CC1
+ VPADDD TT1, DD0, DD0; VPADDD TT2, DD1, DD1
+ VPERM2I128 $0x02, AA0, BB0, AA2; VPERM2I128 $0x02, CC0, DD0, BB2; VPERM2I128 $0x13, AA0, BB0, CC2; VPERM2I128 $0x13, CC0, DD0, DD2
+ VPERM2I128 $0x02, AA1, BB1, AA0; VPERM2I128 $0x02, CC1, DD1, BB0; VPERM2I128 $0x13, AA1, BB1, CC0; VPERM2I128 $0x13, CC1, DD1, DD0
+
+ VPXOR (0*32)(inp), AA2, AA2; VPXOR (1*32)(inp), BB2, BB2; VPXOR (2*32)(inp), CC2, CC2; VPXOR (3*32)(inp), DD2, DD2
+ VMOVDQU AA2, (0*32)(oup); VMOVDQU BB2, (1*32)(oup); VMOVDQU CC2, (2*32)(oup); VMOVDQU DD2, (3*32)(oup)
+ LEAQ (4*32)(inp), inp
+ LEAQ (4*32)(oup), oup
+ SUBQ $4*32, inl
+
+ JMP openAVX2TailLoop
+
+// ----------------------------------------------------------------------------
+// Special optimization for the last 384 bytes of ciphertext
+openAVX2Tail384:
+ // Need to decrypt up to 384 bytes - prepare six blocks
+ VMOVDQA ·chacha20Constants<>(SB), AA0; VMOVDQA AA0, AA1; VMOVDQA AA0, AA2
+ VMOVDQA state1StoreAVX2, BB0; VMOVDQA BB0, BB1; VMOVDQA BB0, BB2
+ VMOVDQA state2StoreAVX2, CC0; VMOVDQA CC0, CC1; VMOVDQA CC0, CC2
+ VMOVDQA ctr3StoreAVX2, DD0
+ VPADDD ·avx2IncMask<>(SB), DD0, DD0
+ VPADDD ·avx2IncMask<>(SB), DD0, DD1
+ VPADDD ·avx2IncMask<>(SB), DD1, DD2
+ VMOVDQA DD0, ctr0StoreAVX2
+ VMOVDQA DD1, ctr1StoreAVX2
+ VMOVDQA DD2, ctr2StoreAVX2
+
+ // Compute the number of iterations that will hash two blocks of data
+ MOVQ inl, tmpStoreAVX2
+ MOVQ inl, itr1
+ SUBQ $256, itr1
+ SHRQ $4, itr1
+ ADDQ $6, itr1
+ MOVQ $10, itr2
+ CMPQ itr1, $10
+ CMOVQGT itr2, itr1
+ MOVQ inp, inl
+ XORQ itr2, itr2
+
+ // Perform ChaCha rounds, while hashing the remaining input
+openAVX2Tail384LoopB:
+ polyAdd(0(inl))
+ polyMulAVX2
+ LEAQ 16(inl), inl
+
+openAVX2Tail384LoopA:
+ chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0); chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0); chachaQR_AVX2(AA2, BB2, CC2, DD2, TT0)
+ VPALIGNR $4, BB0, BB0, BB0; VPALIGNR $4, BB1, BB1, BB1; VPALIGNR $4, BB2, BB2, BB2
+ VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $8, CC2, CC2, CC2
+ VPALIGNR $12, DD0, DD0, DD0; VPALIGNR $12, DD1, DD1, DD1; VPALIGNR $12, DD2, DD2, DD2
+ polyAdd(0(inl))
+ polyMulAVX2
+ LEAQ 16(inl), inl
+ INCQ itr2
+ chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0); chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0); chachaQR_AVX2(AA2, BB2, CC2, DD2, TT0)
+ VPALIGNR $12, BB0, BB0, BB0; VPALIGNR $12, BB1, BB1, BB1; VPALIGNR $12, BB2, BB2, BB2
+ VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $8, CC2, CC2, CC2
+ VPALIGNR $4, DD0, DD0, DD0; VPALIGNR $4, DD1, DD1, DD1; VPALIGNR $4, DD2, DD2, DD2
+
+ CMPQ itr2, itr1
+ JB openAVX2Tail384LoopB
+
+ CMPQ itr2, $10
+ JNE openAVX2Tail384LoopA
+
+ MOVQ inl, itr2
+ SUBQ inp, inl
+ MOVQ inl, itr1
+ MOVQ tmpStoreAVX2, inl
+
+openAVX2Tail384Hash:
+ ADDQ $16, itr1
+ CMPQ itr1, inl
+ JGT openAVX2Tail384HashEnd
+ polyAdd(0(itr2))
+ polyMulAVX2
+ LEAQ 16(itr2), itr2
+ JMP openAVX2Tail384Hash
+
+// Store 256 bytes safely, then go to store loop
+openAVX2Tail384HashEnd:
+ VPADDD ·chacha20Constants<>(SB), AA0, AA0; VPADDD ·chacha20Constants<>(SB), AA1, AA1; VPADDD ·chacha20Constants<>(SB), AA2, AA2
+ VPADDD state1StoreAVX2, BB0, BB0; VPADDD state1StoreAVX2, BB1, BB1; VPADDD state1StoreAVX2, BB2, BB2
+ VPADDD state2StoreAVX2, CC0, CC0; VPADDD state2StoreAVX2, CC1, CC1; VPADDD state2StoreAVX2, CC2, CC2
+ VPADDD ctr0StoreAVX2, DD0, DD0; VPADDD ctr1StoreAVX2, DD1, DD1; VPADDD ctr2StoreAVX2, DD2, DD2
+ VPERM2I128 $0x02, AA0, BB0, TT0; VPERM2I128 $0x02, CC0, DD0, TT1; VPERM2I128 $0x13, AA0, BB0, TT2; VPERM2I128 $0x13, CC0, DD0, TT3
+ VPXOR (0*32)(inp), TT0, TT0; VPXOR (1*32)(inp), TT1, TT1; VPXOR (2*32)(inp), TT2, TT2; VPXOR (3*32)(inp), TT3, TT3
+ VMOVDQU TT0, (0*32)(oup); VMOVDQU TT1, (1*32)(oup); VMOVDQU TT2, (2*32)(oup); VMOVDQU TT3, (3*32)(oup)
+ VPERM2I128 $0x02, AA1, BB1, TT0; VPERM2I128 $0x02, CC1, DD1, TT1; VPERM2I128 $0x13, AA1, BB1, TT2; VPERM2I128 $0x13, CC1, DD1, TT3
+ VPXOR (4*32)(inp), TT0, TT0; VPXOR (5*32)(inp), TT1, TT1; VPXOR (6*32)(inp), TT2, TT2; VPXOR (7*32)(inp), TT3, TT3
+ VMOVDQU TT0, (4*32)(oup); VMOVDQU TT1, (5*32)(oup); VMOVDQU TT2, (6*32)(oup); VMOVDQU TT3, (7*32)(oup)
+ VPERM2I128 $0x02, AA2, BB2, AA0; VPERM2I128 $0x02, CC2, DD2, BB0; VPERM2I128 $0x13, AA2, BB2, CC0; VPERM2I128 $0x13, CC2, DD2, DD0
+ LEAQ (8*32)(inp), inp
+ LEAQ (8*32)(oup), oup
+ SUBQ $8*32, inl
+ JMP openAVX2TailLoop
+
+// ----------------------------------------------------------------------------
+// Special optimization for the last 512 bytes of ciphertext
+openAVX2Tail512:
+ VMOVDQU ·chacha20Constants<>(SB), AA0; VMOVDQA AA0, AA1; VMOVDQA AA0, AA2; VMOVDQA AA0, AA3
+ VMOVDQA state1StoreAVX2, BB0; VMOVDQA BB0, BB1; VMOVDQA BB0, BB2; VMOVDQA BB0, BB3
+ VMOVDQA state2StoreAVX2, CC0; VMOVDQA CC0, CC1; VMOVDQA CC0, CC2; VMOVDQA CC0, CC3
+ VMOVDQA ctr3StoreAVX2, DD0; VPADDD ·avx2IncMask<>(SB), DD0, DD0; VPADDD ·avx2IncMask<>(SB), DD0, DD1; VPADDD ·avx2IncMask<>(SB), DD1, DD2; VPADDD ·avx2IncMask<>(SB), DD2, DD3
+ VMOVDQA DD0, ctr0StoreAVX2; VMOVDQA DD1, ctr1StoreAVX2; VMOVDQA DD2, ctr2StoreAVX2; VMOVDQA DD3, ctr3StoreAVX2
+ XORQ itr1, itr1
+ MOVQ inp, itr2
+
+openAVX2Tail512LoopB:
+ polyAdd(0(itr2))
+ polyMulAVX2
+ LEAQ (2*8)(itr2), itr2
+
+openAVX2Tail512LoopA:
+ VPADDD BB0, AA0, AA0; VPADDD BB1, AA1, AA1; VPADDD BB2, AA2, AA2; VPADDD BB3, AA3, AA3
+ VPXOR AA0, DD0, DD0; VPXOR AA1, DD1, DD1; VPXOR AA2, DD2, DD2; VPXOR AA3, DD3, DD3
+ VPSHUFB ·rol16<>(SB), DD0, DD0; VPSHUFB ·rol16<>(SB), DD1, DD1; VPSHUFB ·rol16<>(SB), DD2, DD2; VPSHUFB ·rol16<>(SB), DD3, DD3
+ VPADDD DD0, CC0, CC0; VPADDD DD1, CC1, CC1; VPADDD DD2, CC2, CC2; VPADDD DD3, CC3, CC3
+ VPXOR CC0, BB0, BB0; VPXOR CC1, BB1, BB1; VPXOR CC2, BB2, BB2; VPXOR CC3, BB3, BB3
+ VMOVDQA CC3, tmpStoreAVX2
+ VPSLLD $12, BB0, CC3; VPSRLD $20, BB0, BB0; VPXOR CC3, BB0, BB0
+ VPSLLD $12, BB1, CC3; VPSRLD $20, BB1, BB1; VPXOR CC3, BB1, BB1
+ VPSLLD $12, BB2, CC3; VPSRLD $20, BB2, BB2; VPXOR CC3, BB2, BB2
+ VPSLLD $12, BB3, CC3; VPSRLD $20, BB3, BB3; VPXOR CC3, BB3, BB3
+ VMOVDQA tmpStoreAVX2, CC3
+ polyAdd(0*8(itr2))
+ polyMulAVX2
+ VPADDD BB0, AA0, AA0; VPADDD BB1, AA1, AA1; VPADDD BB2, AA2, AA2; VPADDD BB3, AA3, AA3
+ VPXOR AA0, DD0, DD0; VPXOR AA1, DD1, DD1; VPXOR AA2, DD2, DD2; VPXOR AA3, DD3, DD3
+ VPSHUFB ·rol8<>(SB), DD0, DD0; VPSHUFB ·rol8<>(SB), DD1, DD1; VPSHUFB ·rol8<>(SB), DD2, DD2; VPSHUFB ·rol8<>(SB), DD3, DD3
+ VPADDD DD0, CC0, CC0; VPADDD DD1, CC1, CC1; VPADDD DD2, CC2, CC2; VPADDD DD3, CC3, CC3
+ VPXOR CC0, BB0, BB0; VPXOR CC1, BB1, BB1; VPXOR CC2, BB2, BB2; VPXOR CC3, BB3, BB3
+ VMOVDQA CC3, tmpStoreAVX2
+ VPSLLD $7, BB0, CC3; VPSRLD $25, BB0, BB0; VPXOR CC3, BB0, BB0
+ VPSLLD $7, BB1, CC3; VPSRLD $25, BB1, BB1; VPXOR CC3, BB1, BB1
+ VPSLLD $7, BB2, CC3; VPSRLD $25, BB2, BB2; VPXOR CC3, BB2, BB2
+ VPSLLD $7, BB3, CC3; VPSRLD $25, BB3, BB3; VPXOR CC3, BB3, BB3
+ VMOVDQA tmpStoreAVX2, CC3
+ VPALIGNR $4, BB0, BB0, BB0; VPALIGNR $4, BB1, BB1, BB1; VPALIGNR $4, BB2, BB2, BB2; VPALIGNR $4, BB3, BB3, BB3
+ VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $8, CC2, CC2, CC2; VPALIGNR $8, CC3, CC3, CC3
+ VPALIGNR $12, DD0, DD0, DD0; VPALIGNR $12, DD1, DD1, DD1; VPALIGNR $12, DD2, DD2, DD2; VPALIGNR $12, DD3, DD3, DD3
+ VPADDD BB0, AA0, AA0; VPADDD BB1, AA1, AA1; VPADDD BB2, AA2, AA2; VPADDD BB3, AA3, AA3
+ VPXOR AA0, DD0, DD0; VPXOR AA1, DD1, DD1; VPXOR AA2, DD2, DD2; VPXOR AA3, DD3, DD3
+ VPSHUFB ·rol16<>(SB), DD0, DD0; VPSHUFB ·rol16<>(SB), DD1, DD1; VPSHUFB ·rol16<>(SB), DD2, DD2; VPSHUFB ·rol16<>(SB), DD3, DD3
+ VPADDD DD0, CC0, CC0; VPADDD DD1, CC1, CC1; VPADDD DD2, CC2, CC2; VPADDD DD3, CC3, CC3
+ VPXOR CC0, BB0, BB0; VPXOR CC1, BB1, BB1; VPXOR CC2, BB2, BB2; VPXOR CC3, BB3, BB3
+ polyAdd(2*8(itr2))
+ polyMulAVX2
+ LEAQ (4*8)(itr2), itr2
+ VMOVDQA CC3, tmpStoreAVX2
+ VPSLLD $12, BB0, CC3; VPSRLD $20, BB0, BB0; VPXOR CC3, BB0, BB0
+ VPSLLD $12, BB1, CC3; VPSRLD $20, BB1, BB1; VPXOR CC3, BB1, BB1
+ VPSLLD $12, BB2, CC3; VPSRLD $20, BB2, BB2; VPXOR CC3, BB2, BB2
+ VPSLLD $12, BB3, CC3; VPSRLD $20, BB3, BB3; VPXOR CC3, BB3, BB3
+ VMOVDQA tmpStoreAVX2, CC3
+ VPADDD BB0, AA0, AA0; VPADDD BB1, AA1, AA1; VPADDD BB2, AA2, AA2; VPADDD BB3, AA3, AA3
+ VPXOR AA0, DD0, DD0; VPXOR AA1, DD1, DD1; VPXOR AA2, DD2, DD2; VPXOR AA3, DD3, DD3
+ VPSHUFB ·rol8<>(SB), DD0, DD0; VPSHUFB ·rol8<>(SB), DD1, DD1; VPSHUFB ·rol8<>(SB), DD2, DD2; VPSHUFB ·rol8<>(SB), DD3, DD3
+ VPADDD DD0, CC0, CC0; VPADDD DD1, CC1, CC1; VPADDD DD2, CC2, CC2; VPADDD DD3, CC3, CC3
+ VPXOR CC0, BB0, BB0; VPXOR CC1, BB1, BB1; VPXOR CC2, BB2, BB2; VPXOR CC3, BB3, BB3
+ VMOVDQA CC3, tmpStoreAVX2
+ VPSLLD $7, BB0, CC3; VPSRLD $25, BB0, BB0; VPXOR CC3, BB0, BB0
+ VPSLLD $7, BB1, CC3; VPSRLD $25, BB1, BB1; VPXOR CC3, BB1, BB1
+ VPSLLD $7, BB2, CC3; VPSRLD $25, BB2, BB2; VPXOR CC3, BB2, BB2
+ VPSLLD $7, BB3, CC3; VPSRLD $25, BB3, BB3; VPXOR CC3, BB3, BB3
+ VMOVDQA tmpStoreAVX2, CC3
+ VPALIGNR $12, BB0, BB0, BB0; VPALIGNR $12, BB1, BB1, BB1; VPALIGNR $12, BB2, BB2, BB2; VPALIGNR $12, BB3, BB3, BB3
+ VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $8, CC2, CC2, CC2; VPALIGNR $8, CC3, CC3, CC3
+ VPALIGNR $4, DD0, DD0, DD0; VPALIGNR $4, DD1, DD1, DD1; VPALIGNR $4, DD2, DD2, DD2; VPALIGNR $4, DD3, DD3, DD3
+ INCQ itr1
+ CMPQ itr1, $4
+ JLT openAVX2Tail512LoopB
+
+ CMPQ itr1, $10
+ JNE openAVX2Tail512LoopA
+
+ MOVQ inl, itr1
+ SUBQ $384, itr1
+ ANDQ $-16, itr1
+
+openAVX2Tail512HashLoop:
+ TESTQ itr1, itr1
+ JE openAVX2Tail512HashEnd
+ polyAdd(0(itr2))
+ polyMulAVX2
+ LEAQ 16(itr2), itr2
+ SUBQ $16, itr1
+ JMP openAVX2Tail512HashLoop
+
+openAVX2Tail512HashEnd:
+ VPADDD ·chacha20Constants<>(SB), AA0, AA0; VPADDD ·chacha20Constants<>(SB), AA1, AA1; VPADDD ·chacha20Constants<>(SB), AA2, AA2; VPADDD ·chacha20Constants<>(SB), AA3, AA3
+ VPADDD state1StoreAVX2, BB0, BB0; VPADDD state1StoreAVX2, BB1, BB1; VPADDD state1StoreAVX2, BB2, BB2; VPADDD state1StoreAVX2, BB3, BB3
+ VPADDD state2StoreAVX2, CC0, CC0; VPADDD state2StoreAVX2, CC1, CC1; VPADDD state2StoreAVX2, CC2, CC2; VPADDD state2StoreAVX2, CC3, CC3
+ VPADDD ctr0StoreAVX2, DD0, DD0; VPADDD ctr1StoreAVX2, DD1, DD1; VPADDD ctr2StoreAVX2, DD2, DD2; VPADDD ctr3StoreAVX2, DD3, DD3
+ VMOVDQA CC3, tmpStoreAVX2
+ VPERM2I128 $0x02, AA0, BB0, CC3; VPERM2I128 $0x13, AA0, BB0, BB0; VPERM2I128 $0x02, CC0, DD0, AA0; VPERM2I128 $0x13, CC0, DD0, CC0
+ VPXOR (0*32)(inp), CC3, CC3; VPXOR (1*32)(inp), AA0, AA0; VPXOR (2*32)(inp), BB0, BB0; VPXOR (3*32)(inp), CC0, CC0
+ VMOVDQU CC3, (0*32)(oup); VMOVDQU AA0, (1*32)(oup); VMOVDQU BB0, (2*32)(oup); VMOVDQU CC0, (3*32)(oup)
+ VPERM2I128 $0x02, AA1, BB1, AA0; VPERM2I128 $0x02, CC1, DD1, BB0; VPERM2I128 $0x13, AA1, BB1, CC0; VPERM2I128 $0x13, CC1, DD1, DD0
+ VPXOR (4*32)(inp), AA0, AA0; VPXOR (5*32)(inp), BB0, BB0; VPXOR (6*32)(inp), CC0, CC0; VPXOR (7*32)(inp), DD0, DD0
+ VMOVDQU AA0, (4*32)(oup); VMOVDQU BB0, (5*32)(oup); VMOVDQU CC0, (6*32)(oup); VMOVDQU DD0, (7*32)(oup)
+ VPERM2I128 $0x02, AA2, BB2, AA0; VPERM2I128 $0x02, CC2, DD2, BB0; VPERM2I128 $0x13, AA2, BB2, CC0; VPERM2I128 $0x13, CC2, DD2, DD0
+ VPXOR (8*32)(inp), AA0, AA0; VPXOR (9*32)(inp), BB0, BB0; VPXOR (10*32)(inp), CC0, CC0; VPXOR (11*32)(inp), DD0, DD0
+ VMOVDQU AA0, (8*32)(oup); VMOVDQU BB0, (9*32)(oup); VMOVDQU CC0, (10*32)(oup); VMOVDQU DD0, (11*32)(oup)
+ VPERM2I128 $0x02, AA3, BB3, AA0; VPERM2I128 $0x02, tmpStoreAVX2, DD3, BB0; VPERM2I128 $0x13, AA3, BB3, CC0; VPERM2I128 $0x13, tmpStoreAVX2, DD3, DD0
+
+ LEAQ (12*32)(inp), inp
+ LEAQ (12*32)(oup), oup
+ SUBQ $12*32, inl
+
+ JMP openAVX2TailLoop
+
+// ----------------------------------------------------------------------------
+// ----------------------------------------------------------------------------
+// func chacha20Poly1305Seal(dst, key, src, ad []byte)
+TEXT ·chacha20Poly1305Seal(SB), 0, $288-96
+ // For aligned stack access
+ MOVQ SP, BP
+ ADDQ $32, BP
+ ANDQ $-32, BP
+ MOVQ dst+0(FP), oup
+ MOVQ key+24(FP), keyp
+ MOVQ src+48(FP), inp
+ MOVQ src_len+56(FP), inl
+ MOVQ ad+72(FP), adp
+
+ CMPB ·useAVX2(SB), $1
+ JE chacha20Poly1305Seal_AVX2
+
+ // Special optimization, for very short buffers
+ CMPQ inl, $128
+ JBE sealSSE128 // About 15% faster
+
+ // In the seal case - prepare the poly key + 3 blocks of stream in the first iteration
+ MOVOU ·chacha20Constants<>(SB), A0
+ MOVOU (1*16)(keyp), B0
+ MOVOU (2*16)(keyp), C0
+ MOVOU (3*16)(keyp), D0
+
+ // Store state on stack for future use
+ MOVO B0, state1Store
+ MOVO C0, state2Store
+
+ // Load state, increment counter blocks
+ MOVO A0, A1; MOVO B0, B1; MOVO C0, C1; MOVO D0, D1; PADDL ·sseIncMask<>(SB), D1
+ MOVO A1, A2; MOVO B1, B2; MOVO C1, C2; MOVO D1, D2; PADDL ·sseIncMask<>(SB), D2
+ MOVO A2, A3; MOVO B2, B3; MOVO C2, C3; MOVO D2, D3; PADDL ·sseIncMask<>(SB), D3
+
+ // Store counters
+ MOVO D0, ctr0Store; MOVO D1, ctr1Store; MOVO D2, ctr2Store; MOVO D3, ctr3Store
+ MOVQ $10, itr2
+
+sealSSEIntroLoop:
+ MOVO C3, tmpStore
+ chachaQR(A0, B0, C0, D0, C3); chachaQR(A1, B1, C1, D1, C3); chachaQR(A2, B2, C2, D2, C3)
+ MOVO tmpStore, C3
+ MOVO C1, tmpStore
+ chachaQR(A3, B3, C3, D3, C1)
+ MOVO tmpStore, C1
+ shiftB0Left; shiftB1Left; shiftB2Left; shiftB3Left
+ shiftC0Left; shiftC1Left; shiftC2Left; shiftC3Left
+ shiftD0Left; shiftD1Left; shiftD2Left; shiftD3Left
+
+ MOVO C3, tmpStore
+ chachaQR(A0, B0, C0, D0, C3); chachaQR(A1, B1, C1, D1, C3); chachaQR(A2, B2, C2, D2, C3)
+ MOVO tmpStore, C3
+ MOVO C1, tmpStore
+ chachaQR(A3, B3, C3, D3, C1)
+ MOVO tmpStore, C1
+ shiftB0Right; shiftB1Right; shiftB2Right; shiftB3Right
+ shiftC0Right; shiftC1Right; shiftC2Right; shiftC3Right
+ shiftD0Right; shiftD1Right; shiftD2Right; shiftD3Right
+ DECQ itr2
+ JNE sealSSEIntroLoop
+
+ // Add in the state
+ PADDD ·chacha20Constants<>(SB), A0; PADDD ·chacha20Constants<>(SB), A1; PADDD ·chacha20Constants<>(SB), A2; PADDD ·chacha20Constants<>(SB), A3
+ PADDD state1Store, B0; PADDD state1Store, B1; PADDD state1Store, B2; PADDD state1Store, B3
+ PADDD state2Store, C1; PADDD state2Store, C2; PADDD state2Store, C3
+ PADDD ctr1Store, D1; PADDD ctr2Store, D2; PADDD ctr3Store, D3
+
+ // Clamp and store the key
+ PAND ·polyClampMask<>(SB), A0
+ MOVO A0, rStore
+ MOVO B0, sStore
+
+ // Hash AAD
+ MOVQ ad_len+80(FP), itr2
+ CALL polyHashADInternal<>(SB)
+
+ MOVOU (0*16)(inp), A0; MOVOU (1*16)(inp), B0; MOVOU (2*16)(inp), C0; MOVOU (3*16)(inp), D0
+ PXOR A0, A1; PXOR B0, B1; PXOR C0, C1; PXOR D0, D1
+ MOVOU A1, (0*16)(oup); MOVOU B1, (1*16)(oup); MOVOU C1, (2*16)(oup); MOVOU D1, (3*16)(oup)
+ MOVOU (4*16)(inp), A0; MOVOU (5*16)(inp), B0; MOVOU (6*16)(inp), C0; MOVOU (7*16)(inp), D0
+ PXOR A0, A2; PXOR B0, B2; PXOR C0, C2; PXOR D0, D2
+ MOVOU A2, (4*16)(oup); MOVOU B2, (5*16)(oup); MOVOU C2, (6*16)(oup); MOVOU D2, (7*16)(oup)
+
+ MOVQ $128, itr1
+ SUBQ $128, inl
+ LEAQ 128(inp), inp
+
+ MOVO A3, A1; MOVO B3, B1; MOVO C3, C1; MOVO D3, D1
+
+ CMPQ inl, $64
+ JBE sealSSE128SealHash
+
+ MOVOU (0*16)(inp), A0; MOVOU (1*16)(inp), B0; MOVOU (2*16)(inp), C0; MOVOU (3*16)(inp), D0
+ PXOR A0, A3; PXOR B0, B3; PXOR C0, C3; PXOR D0, D3
+ MOVOU A3, (8*16)(oup); MOVOU B3, (9*16)(oup); MOVOU C3, (10*16)(oup); MOVOU D3, (11*16)(oup)
+
+ ADDQ $64, itr1
+ SUBQ $64, inl
+ LEAQ 64(inp), inp
+
+ MOVQ $2, itr1
+ MOVQ $8, itr2
+
+ CMPQ inl, $64
+ JBE sealSSETail64
+ CMPQ inl, $128
+ JBE sealSSETail128
+ CMPQ inl, $192
+ JBE sealSSETail192
+
+sealSSEMainLoop:
+ // Load state, increment counter blocks
+ MOVO ·chacha20Constants<>(SB), A0; MOVO state1Store, B0; MOVO state2Store, C0; MOVO ctr3Store, D0; PADDL ·sseIncMask<>(SB), D0
+ MOVO A0, A1; MOVO B0, B1; MOVO C0, C1; MOVO D0, D1; PADDL ·sseIncMask<>(SB), D1
+ MOVO A1, A2; MOVO B1, B2; MOVO C1, C2; MOVO D1, D2; PADDL ·sseIncMask<>(SB), D2
+ MOVO A2, A3; MOVO B2, B3; MOVO C2, C3; MOVO D2, D3; PADDL ·sseIncMask<>(SB), D3
+
+ // Store counters
+ MOVO D0, ctr0Store; MOVO D1, ctr1Store; MOVO D2, ctr2Store; MOVO D3, ctr3Store
+
+sealSSEInnerLoop:
+ MOVO C3, tmpStore
+ chachaQR(A0, B0, C0, D0, C3); chachaQR(A1, B1, C1, D1, C3); chachaQR(A2, B2, C2, D2, C3)
+ MOVO tmpStore, C3
+ MOVO C1, tmpStore
+ chachaQR(A3, B3, C3, D3, C1)
+ MOVO tmpStore, C1
+ polyAdd(0(oup))
+ shiftB0Left; shiftB1Left; shiftB2Left; shiftB3Left
+ shiftC0Left; shiftC1Left; shiftC2Left; shiftC3Left
+ shiftD0Left; shiftD1Left; shiftD2Left; shiftD3Left
+ polyMulStage1
+ polyMulStage2
+ LEAQ (2*8)(oup), oup
+ MOVO C3, tmpStore
+ chachaQR(A0, B0, C0, D0, C3); chachaQR(A1, B1, C1, D1, C3); chachaQR(A2, B2, C2, D2, C3)
+ MOVO tmpStore, C3
+ MOVO C1, tmpStore
+ polyMulStage3
+ chachaQR(A3, B3, C3, D3, C1)
+ MOVO tmpStore, C1
+ polyMulReduceStage
+ shiftB0Right; shiftB1Right; shiftB2Right; shiftB3Right
+ shiftC0Right; shiftC1Right; shiftC2Right; shiftC3Right
+ shiftD0Right; shiftD1Right; shiftD2Right; shiftD3Right
+ DECQ itr2
+ JGE sealSSEInnerLoop
+ polyAdd(0(oup))
+ polyMul
+ LEAQ (2*8)(oup), oup
+ DECQ itr1
+ JG sealSSEInnerLoop
+
+ // Add in the state
+ PADDD ·chacha20Constants<>(SB), A0; PADDD ·chacha20Constants<>(SB), A1; PADDD ·chacha20Constants<>(SB), A2; PADDD ·chacha20Constants<>(SB), A3
+ PADDD state1Store, B0; PADDD state1Store, B1; PADDD state1Store, B2; PADDD state1Store, B3
+ PADDD state2Store, C0; PADDD state2Store, C1; PADDD state2Store, C2; PADDD state2Store, C3
+ PADDD ctr0Store, D0; PADDD ctr1Store, D1; PADDD ctr2Store, D2; PADDD ctr3Store, D3
+ MOVO D3, tmpStore
+
+ // Load - xor - store
+ MOVOU (0*16)(inp), D3; PXOR D3, A0
+ MOVOU (1*16)(inp), D3; PXOR D3, B0
+ MOVOU (2*16)(inp), D3; PXOR D3, C0
+ MOVOU (3*16)(inp), D3; PXOR D3, D0
+ MOVOU A0, (0*16)(oup)
+ MOVOU B0, (1*16)(oup)
+ MOVOU C0, (2*16)(oup)
+ MOVOU D0, (3*16)(oup)
+ MOVO tmpStore, D3
+
+ MOVOU (4*16)(inp), A0; MOVOU (5*16)(inp), B0; MOVOU (6*16)(inp), C0; MOVOU (7*16)(inp), D0
+ PXOR A0, A1; PXOR B0, B1; PXOR C0, C1; PXOR D0, D1
+ MOVOU A1, (4*16)(oup); MOVOU B1, (5*16)(oup); MOVOU C1, (6*16)(oup); MOVOU D1, (7*16)(oup)
+ MOVOU (8*16)(inp), A0; MOVOU (9*16)(inp), B0; MOVOU (10*16)(inp), C0; MOVOU (11*16)(inp), D0
+ PXOR A0, A2; PXOR B0, B2; PXOR C0, C2; PXOR D0, D2
+ MOVOU A2, (8*16)(oup); MOVOU B2, (9*16)(oup); MOVOU C2, (10*16)(oup); MOVOU D2, (11*16)(oup)
+ ADDQ $192, inp
+ MOVQ $192, itr1
+ SUBQ $192, inl
+ MOVO A3, A1
+ MOVO B3, B1
+ MOVO C3, C1
+ MOVO D3, D1
+ CMPQ inl, $64
+ JBE sealSSE128SealHash
+ MOVOU (0*16)(inp), A0; MOVOU (1*16)(inp), B0; MOVOU (2*16)(inp), C0; MOVOU (3*16)(inp), D0
+ PXOR A0, A3; PXOR B0, B3; PXOR C0, C3; PXOR D0, D3
+ MOVOU A3, (12*16)(oup); MOVOU B3, (13*16)(oup); MOVOU C3, (14*16)(oup); MOVOU D3, (15*16)(oup)
+ LEAQ 64(inp), inp
+ SUBQ $64, inl
+ MOVQ $6, itr1
+ MOVQ $4, itr2
+ CMPQ inl, $192
+ JG sealSSEMainLoop
+
+ MOVQ inl, itr1
+ TESTQ inl, inl
+ JE sealSSE128SealHash
+ MOVQ $6, itr1
+ CMPQ inl, $64
+ JBE sealSSETail64
+ CMPQ inl, $128
+ JBE sealSSETail128
+ JMP sealSSETail192
+
+// ----------------------------------------------------------------------------
+// Special optimization for the last 64 bytes of plaintext
+sealSSETail64:
+ // Need to encrypt up to 64 bytes - prepare single block, hash 192 or 256 bytes
+ MOVO ·chacha20Constants<>(SB), A1
+ MOVO state1Store, B1
+ MOVO state2Store, C1
+ MOVO ctr3Store, D1
+ PADDL ·sseIncMask<>(SB), D1
+ MOVO D1, ctr0Store
+
+sealSSETail64LoopA:
+ // Perform ChaCha rounds, while hashing the previously encrypted ciphertext
+ polyAdd(0(oup))
+ polyMul
+ LEAQ 16(oup), oup
+
+sealSSETail64LoopB:
+ chachaQR(A1, B1, C1, D1, T1)
+ shiftB1Left; shiftC1Left; shiftD1Left
+ chachaQR(A1, B1, C1, D1, T1)
+ shiftB1Right; shiftC1Right; shiftD1Right
+ polyAdd(0(oup))
+ polyMul
+ LEAQ 16(oup), oup
+
+ DECQ itr1
+ JG sealSSETail64LoopA
+
+ DECQ itr2
+ JGE sealSSETail64LoopB
+ PADDL ·chacha20Constants<>(SB), A1
+ PADDL state1Store, B1
+ PADDL state2Store, C1
+ PADDL ctr0Store, D1
+
+ JMP sealSSE128Seal
+
+// ----------------------------------------------------------------------------
+// Special optimization for the last 128 bytes of plaintext
+sealSSETail128:
+ // Need to encrypt up to 128 bytes - prepare two blocks, hash 192 or 256 bytes
+ MOVO ·chacha20Constants<>(SB), A0; MOVO state1Store, B0; MOVO state2Store, C0; MOVO ctr3Store, D0; PADDL ·sseIncMask<>(SB), D0; MOVO D0, ctr0Store
+ MOVO A0, A1; MOVO B0, B1; MOVO C0, C1; MOVO D0, D1; PADDL ·sseIncMask<>(SB), D1; MOVO D1, ctr1Store
+
+sealSSETail128LoopA:
+ // Perform ChaCha rounds, while hashing the previously encrypted ciphertext
+ polyAdd(0(oup))
+ polyMul
+ LEAQ 16(oup), oup
+
+sealSSETail128LoopB:
+ chachaQR(A0, B0, C0, D0, T0); chachaQR(A1, B1, C1, D1, T0)
+ shiftB0Left; shiftC0Left; shiftD0Left
+ shiftB1Left; shiftC1Left; shiftD1Left
+ polyAdd(0(oup))
+ polyMul
+ LEAQ 16(oup), oup
+ chachaQR(A0, B0, C0, D0, T0); chachaQR(A1, B1, C1, D1, T0)
+ shiftB0Right; shiftC0Right; shiftD0Right
+ shiftB1Right; shiftC1Right; shiftD1Right
+
+ DECQ itr1
+ JG sealSSETail128LoopA
+
+ DECQ itr2
+ JGE sealSSETail128LoopB
+
+ PADDL ·chacha20Constants<>(SB), A0; PADDL ·chacha20Constants<>(SB), A1
+ PADDL state1Store, B0; PADDL state1Store, B1
+ PADDL state2Store, C0; PADDL state2Store, C1
+ PADDL ctr0Store, D0; PADDL ctr1Store, D1
+
+ MOVOU (0*16)(inp), T0; MOVOU (1*16)(inp), T1; MOVOU (2*16)(inp), T2; MOVOU (3*16)(inp), T3
+ PXOR T0, A0; PXOR T1, B0; PXOR T2, C0; PXOR T3, D0
+ MOVOU A0, (0*16)(oup); MOVOU B0, (1*16)(oup); MOVOU C0, (2*16)(oup); MOVOU D0, (3*16)(oup)
+
+ MOVQ $64, itr1
+ LEAQ 64(inp), inp
+ SUBQ $64, inl
+
+ JMP sealSSE128SealHash
+
+// ----------------------------------------------------------------------------
+// Special optimization for the last 192 bytes of plaintext
+sealSSETail192:
+ // Need to encrypt up to 192 bytes - prepare three blocks, hash 192 or 256 bytes
+ MOVO ·chacha20Constants<>(SB), A0; MOVO state1Store, B0; MOVO state2Store, C0; MOVO ctr3Store, D0; PADDL ·sseIncMask<>(SB), D0; MOVO D0, ctr0Store
+ MOVO A0, A1; MOVO B0, B1; MOVO C0, C1; MOVO D0, D1; PADDL ·sseIncMask<>(SB), D1; MOVO D1, ctr1Store
+ MOVO A1, A2; MOVO B1, B2; MOVO C1, C2; MOVO D1, D2; PADDL ·sseIncMask<>(SB), D2; MOVO D2, ctr2Store
+
+sealSSETail192LoopA:
+ // Perform ChaCha rounds, while hashing the previously encrypted ciphertext
+ polyAdd(0(oup))
+ polyMul
+ LEAQ 16(oup), oup
+
+sealSSETail192LoopB:
+ chachaQR(A0, B0, C0, D0, T0); chachaQR(A1, B1, C1, D1, T0); chachaQR(A2, B2, C2, D2, T0)
+ shiftB0Left; shiftC0Left; shiftD0Left
+ shiftB1Left; shiftC1Left; shiftD1Left
+ shiftB2Left; shiftC2Left; shiftD2Left
+
+ polyAdd(0(oup))
+ polyMul
+ LEAQ 16(oup), oup
+
+ chachaQR(A0, B0, C0, D0, T0); chachaQR(A1, B1, C1, D1, T0); chachaQR(A2, B2, C2, D2, T0)
+ shiftB0Right; shiftC0Right; shiftD0Right
+ shiftB1Right; shiftC1Right; shiftD1Right
+ shiftB2Right; shiftC2Right; shiftD2Right
+
+ DECQ itr1
+ JG sealSSETail192LoopA
+
+ DECQ itr2
+ JGE sealSSETail192LoopB
+
+ PADDL ·chacha20Constants<>(SB), A0; PADDL ·chacha20Constants<>(SB), A1; PADDL ·chacha20Constants<>(SB), A2
+ PADDL state1Store, B0; PADDL state1Store, B1; PADDL state1Store, B2
+ PADDL state2Store, C0; PADDL state2Store, C1; PADDL state2Store, C2
+ PADDL ctr0Store, D0; PADDL ctr1Store, D1; PADDL ctr2Store, D2
+
+ MOVOU (0*16)(inp), T0; MOVOU (1*16)(inp), T1; MOVOU (2*16)(inp), T2; MOVOU (3*16)(inp), T3
+ PXOR T0, A0; PXOR T1, B0; PXOR T2, C0; PXOR T3, D0
+ MOVOU A0, (0*16)(oup); MOVOU B0, (1*16)(oup); MOVOU C0, (2*16)(oup); MOVOU D0, (3*16)(oup)
+ MOVOU (4*16)(inp), T0; MOVOU (5*16)(inp), T1; MOVOU (6*16)(inp), T2; MOVOU (7*16)(inp), T3
+ PXOR T0, A1; PXOR T1, B1; PXOR T2, C1; PXOR T3, D1
+ MOVOU A1, (4*16)(oup); MOVOU B1, (5*16)(oup); MOVOU C1, (6*16)(oup); MOVOU D1, (7*16)(oup)
+
+ MOVO A2, A1
+ MOVO B2, B1
+ MOVO C2, C1
+ MOVO D2, D1
+ MOVQ $128, itr1
+ LEAQ 128(inp), inp
+ SUBQ $128, inl
+
+ JMP sealSSE128SealHash
+
+// ----------------------------------------------------------------------------
+// Special seal optimization for buffers smaller than 129 bytes
+sealSSE128:
+ // For up to 128 bytes of ciphertext and 64 bytes for the poly key, we require to process three blocks
+ MOVOU ·chacha20Constants<>(SB), A0; MOVOU (1*16)(keyp), B0; MOVOU (2*16)(keyp), C0; MOVOU (3*16)(keyp), D0
+ MOVO A0, A1; MOVO B0, B1; MOVO C0, C1; MOVO D0, D1; PADDL ·sseIncMask<>(SB), D1
+ MOVO A1, A2; MOVO B1, B2; MOVO C1, C2; MOVO D1, D2; PADDL ·sseIncMask<>(SB), D2
+ MOVO B0, T1; MOVO C0, T2; MOVO D1, T3
+ MOVQ $10, itr2
+
+sealSSE128InnerCipherLoop:
+ chachaQR(A0, B0, C0, D0, T0); chachaQR(A1, B1, C1, D1, T0); chachaQR(A2, B2, C2, D2, T0)
+ shiftB0Left; shiftB1Left; shiftB2Left
+ shiftC0Left; shiftC1Left; shiftC2Left
+ shiftD0Left; shiftD1Left; shiftD2Left
+ chachaQR(A0, B0, C0, D0, T0); chachaQR(A1, B1, C1, D1, T0); chachaQR(A2, B2, C2, D2, T0)
+ shiftB0Right; shiftB1Right; shiftB2Right
+ shiftC0Right; shiftC1Right; shiftC2Right
+ shiftD0Right; shiftD1Right; shiftD2Right
+ DECQ itr2
+ JNE sealSSE128InnerCipherLoop
+
+ // A0|B0 hold the Poly1305 32-byte key, C0,D0 can be discarded
+ PADDL ·chacha20Constants<>(SB), A0; PADDL ·chacha20Constants<>(SB), A1; PADDL ·chacha20Constants<>(SB), A2
+ PADDL T1, B0; PADDL T1, B1; PADDL T1, B2
+ PADDL T2, C1; PADDL T2, C2
+ PADDL T3, D1; PADDL ·sseIncMask<>(SB), T3; PADDL T3, D2
+ PAND ·polyClampMask<>(SB), A0
+ MOVOU A0, rStore
+ MOVOU B0, sStore
+
+ // Hash
+ MOVQ ad_len+80(FP), itr2
+ CALL polyHashADInternal<>(SB)
+ XORQ itr1, itr1
+
+sealSSE128SealHash:
+ // itr1 holds the number of bytes encrypted but not yet hashed
+ CMPQ itr1, $16
+ JB sealSSE128Seal
+ polyAdd(0(oup))
+ polyMul
+
+ SUBQ $16, itr1
+ ADDQ $16, oup
+
+ JMP sealSSE128SealHash
+
+sealSSE128Seal:
+ CMPQ inl, $16
+ JB sealSSETail
+ SUBQ $16, inl
+
+ // Load for decryption
+ MOVOU (inp), T0
+ PXOR T0, A1
+ MOVOU A1, (oup)
+ LEAQ (1*16)(inp), inp
+ LEAQ (1*16)(oup), oup
+
+ // Extract for hashing
+ MOVQ A1, t0
+ PSRLDQ $8, A1
+ MOVQ A1, t1
+ ADDQ t0, acc0; ADCQ t1, acc1; ADCQ $1, acc2
+ polyMul
+
+ // Shift the stream "left"
+ MOVO B1, A1
+ MOVO C1, B1
+ MOVO D1, C1
+ MOVO A2, D1
+ MOVO B2, A2
+ MOVO C2, B2
+ MOVO D2, C2
+ JMP sealSSE128Seal
+
+sealSSETail:
+ TESTQ inl, inl
+ JE sealSSEFinalize
+
+ // We can only load the PT one byte at a time to avoid read after end of buffer
+ MOVQ inl, itr2
+ SHLQ $4, itr2
+ LEAQ ·andMask<>(SB), t0
+ MOVQ inl, itr1
+ LEAQ -1(inp)(inl*1), inp
+ XORQ t2, t2
+ XORQ t3, t3
+ XORQ AX, AX
+
+sealSSETailLoadLoop:
+ SHLQ $8, t2, t3
+ SHLQ $8, t2
+ MOVB (inp), AX
+ XORQ AX, t2
+ LEAQ -1(inp), inp
+ DECQ itr1
+ JNE sealSSETailLoadLoop
+ MOVQ t2, 0+tmpStore
+ MOVQ t3, 8+tmpStore
+ PXOR 0+tmpStore, A1
+ MOVOU A1, (oup)
+ MOVOU -16(t0)(itr2*1), T0
+ PAND T0, A1
+ MOVQ A1, t0
+ PSRLDQ $8, A1
+ MOVQ A1, t1
+ ADDQ t0, acc0; ADCQ t1, acc1; ADCQ $1, acc2
+ polyMul
+
+ ADDQ inl, oup
+
+sealSSEFinalize:
+ // Hash in the buffer lengths
+ ADDQ ad_len+80(FP), acc0
+ ADCQ src_len+56(FP), acc1
+ ADCQ $1, acc2
+ polyMul
+
+ // Final reduce
+ MOVQ acc0, t0
+ MOVQ acc1, t1
+ MOVQ acc2, t2
+ SUBQ $-5, acc0
+ SBBQ $-1, acc1
+ SBBQ $3, acc2
+ CMOVQCS t0, acc0
+ CMOVQCS t1, acc1
+ CMOVQCS t2, acc2
+
+ // Add in the "s" part of the key
+ ADDQ 0+sStore, acc0
+ ADCQ 8+sStore, acc1
+
+ // Finally store the tag at the end of the message
+ MOVQ acc0, (0*8)(oup)
+ MOVQ acc1, (1*8)(oup)
+ RET
+
+// ----------------------------------------------------------------------------
+// ------------------------- AVX2 Code ----------------------------------------
+chacha20Poly1305Seal_AVX2:
+ VZEROUPPER
+ VMOVDQU ·chacha20Constants<>(SB), AA0
+ BYTE $0xc4; BYTE $0x42; BYTE $0x7d; BYTE $0x5a; BYTE $0x70; BYTE $0x10 // broadcasti128 16(r8), ymm14
+ BYTE $0xc4; BYTE $0x42; BYTE $0x7d; BYTE $0x5a; BYTE $0x60; BYTE $0x20 // broadcasti128 32(r8), ymm12
+ BYTE $0xc4; BYTE $0xc2; BYTE $0x7d; BYTE $0x5a; BYTE $0x60; BYTE $0x30 // broadcasti128 48(r8), ymm4
+ VPADDD ·avx2InitMask<>(SB), DD0, DD0
+
+ // Special optimizations, for very short buffers
+ CMPQ inl, $192
+ JBE seal192AVX2 // 33% faster
+ CMPQ inl, $320
+ JBE seal320AVX2 // 17% faster
+
+ // For the general key prepare the key first - as a byproduct we have 64 bytes of cipher stream
+ VMOVDQA AA0, AA1; VMOVDQA AA0, AA2; VMOVDQA AA0, AA3
+ VMOVDQA BB0, BB1; VMOVDQA BB0, BB2; VMOVDQA BB0, BB3; VMOVDQA BB0, state1StoreAVX2
+ VMOVDQA CC0, CC1; VMOVDQA CC0, CC2; VMOVDQA CC0, CC3; VMOVDQA CC0, state2StoreAVX2
+ VPADDD ·avx2IncMask<>(SB), DD0, DD1; VMOVDQA DD0, ctr0StoreAVX2
+ VPADDD ·avx2IncMask<>(SB), DD1, DD2; VMOVDQA DD1, ctr1StoreAVX2
+ VPADDD ·avx2IncMask<>(SB), DD2, DD3; VMOVDQA DD2, ctr2StoreAVX2
+ VMOVDQA DD3, ctr3StoreAVX2
+ MOVQ $10, itr2
+
+sealAVX2IntroLoop:
+ VMOVDQA CC3, tmpStoreAVX2
+ chachaQR_AVX2(AA0, BB0, CC0, DD0, CC3); chachaQR_AVX2(AA1, BB1, CC1, DD1, CC3); chachaQR_AVX2(AA2, BB2, CC2, DD2, CC3)
+ VMOVDQA tmpStoreAVX2, CC3
+ VMOVDQA CC1, tmpStoreAVX2
+ chachaQR_AVX2(AA3, BB3, CC3, DD3, CC1)
+ VMOVDQA tmpStoreAVX2, CC1
+
+ VPALIGNR $4, BB0, BB0, BB0; VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $12, DD0, DD0, DD0
+ VPALIGNR $4, BB1, BB1, BB1; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $12, DD1, DD1, DD1
+ VPALIGNR $4, BB2, BB2, BB2; VPALIGNR $8, CC2, CC2, CC2; VPALIGNR $12, DD2, DD2, DD2
+ VPALIGNR $4, BB3, BB3, BB3; VPALIGNR $8, CC3, CC3, CC3; VPALIGNR $12, DD3, DD3, DD3
+
+ VMOVDQA CC3, tmpStoreAVX2
+ chachaQR_AVX2(AA0, BB0, CC0, DD0, CC3); chachaQR_AVX2(AA1, BB1, CC1, DD1, CC3); chachaQR_AVX2(AA2, BB2, CC2, DD2, CC3)
+ VMOVDQA tmpStoreAVX2, CC3
+ VMOVDQA CC1, tmpStoreAVX2
+ chachaQR_AVX2(AA3, BB3, CC3, DD3, CC1)
+ VMOVDQA tmpStoreAVX2, CC1
+
+ VPALIGNR $12, BB0, BB0, BB0; VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $4, DD0, DD0, DD0
+ VPALIGNR $12, BB1, BB1, BB1; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $4, DD1, DD1, DD1
+ VPALIGNR $12, BB2, BB2, BB2; VPALIGNR $8, CC2, CC2, CC2; VPALIGNR $4, DD2, DD2, DD2
+ VPALIGNR $12, BB3, BB3, BB3; VPALIGNR $8, CC3, CC3, CC3; VPALIGNR $4, DD3, DD3, DD3
+ DECQ itr2
+ JNE sealAVX2IntroLoop
+
+ VPADDD ·chacha20Constants<>(SB), AA0, AA0; VPADDD ·chacha20Constants<>(SB), AA1, AA1; VPADDD ·chacha20Constants<>(SB), AA2, AA2; VPADDD ·chacha20Constants<>(SB), AA3, AA3
+ VPADDD state1StoreAVX2, BB0, BB0; VPADDD state1StoreAVX2, BB1, BB1; VPADDD state1StoreAVX2, BB2, BB2; VPADDD state1StoreAVX2, BB3, BB3
+ VPADDD state2StoreAVX2, CC0, CC0; VPADDD state2StoreAVX2, CC1, CC1; VPADDD state2StoreAVX2, CC2, CC2; VPADDD state2StoreAVX2, CC3, CC3
+ VPADDD ctr0StoreAVX2, DD0, DD0; VPADDD ctr1StoreAVX2, DD1, DD1; VPADDD ctr2StoreAVX2, DD2, DD2; VPADDD ctr3StoreAVX2, DD3, DD3
+
+ VPERM2I128 $0x13, CC0, DD0, CC0 // Stream bytes 96 - 127
+ VPERM2I128 $0x02, AA0, BB0, DD0 // The Poly1305 key
+ VPERM2I128 $0x13, AA0, BB0, AA0 // Stream bytes 64 - 95
+
+ // Clamp and store poly key
+ VPAND ·polyClampMask<>(SB), DD0, DD0
+ VMOVDQA DD0, rsStoreAVX2
+
+ // Hash AD
+ MOVQ ad_len+80(FP), itr2
+ CALL polyHashADInternal<>(SB)
+
+ // Can store at least 320 bytes
+ VPXOR (0*32)(inp), AA0, AA0
+ VPXOR (1*32)(inp), CC0, CC0
+ VMOVDQU AA0, (0*32)(oup)
+ VMOVDQU CC0, (1*32)(oup)
+
+ VPERM2I128 $0x02, AA1, BB1, AA0; VPERM2I128 $0x02, CC1, DD1, BB0; VPERM2I128 $0x13, AA1, BB1, CC0; VPERM2I128 $0x13, CC1, DD1, DD0
+ VPXOR (2*32)(inp), AA0, AA0; VPXOR (3*32)(inp), BB0, BB0; VPXOR (4*32)(inp), CC0, CC0; VPXOR (5*32)(inp), DD0, DD0
+ VMOVDQU AA0, (2*32)(oup); VMOVDQU BB0, (3*32)(oup); VMOVDQU CC0, (4*32)(oup); VMOVDQU DD0, (5*32)(oup)
+ VPERM2I128 $0x02, AA2, BB2, AA0; VPERM2I128 $0x02, CC2, DD2, BB0; VPERM2I128 $0x13, AA2, BB2, CC0; VPERM2I128 $0x13, CC2, DD2, DD0
+ VPXOR (6*32)(inp), AA0, AA0; VPXOR (7*32)(inp), BB0, BB0; VPXOR (8*32)(inp), CC0, CC0; VPXOR (9*32)(inp), DD0, DD0
+ VMOVDQU AA0, (6*32)(oup); VMOVDQU BB0, (7*32)(oup); VMOVDQU CC0, (8*32)(oup); VMOVDQU DD0, (9*32)(oup)
+
+ MOVQ $320, itr1
+ SUBQ $320, inl
+ LEAQ 320(inp), inp
+
+ VPERM2I128 $0x02, AA3, BB3, AA0; VPERM2I128 $0x02, CC3, DD3, BB0; VPERM2I128 $0x13, AA3, BB3, CC0; VPERM2I128 $0x13, CC3, DD3, DD0
+ CMPQ inl, $128
+ JBE sealAVX2SealHash
+
+ VPXOR (0*32)(inp), AA0, AA0; VPXOR (1*32)(inp), BB0, BB0; VPXOR (2*32)(inp), CC0, CC0; VPXOR (3*32)(inp), DD0, DD0
+ VMOVDQU AA0, (10*32)(oup); VMOVDQU BB0, (11*32)(oup); VMOVDQU CC0, (12*32)(oup); VMOVDQU DD0, (13*32)(oup)
+ SUBQ $128, inl
+ LEAQ 128(inp), inp
+
+ MOVQ $8, itr1
+ MOVQ $2, itr2
+
+ CMPQ inl, $128
+ JBE sealAVX2Tail128
+ CMPQ inl, $256
+ JBE sealAVX2Tail256
+ CMPQ inl, $384
+ JBE sealAVX2Tail384
+ CMPQ inl, $512
+ JBE sealAVX2Tail512
+
+ // We have 448 bytes to hash, but main loop hashes 512 bytes at a time - perform some rounds, before the main loop
+ VMOVDQA ·chacha20Constants<>(SB), AA0; VMOVDQA AA0, AA1; VMOVDQA AA0, AA2; VMOVDQA AA0, AA3
+ VMOVDQA state1StoreAVX2, BB0; VMOVDQA BB0, BB1; VMOVDQA BB0, BB2; VMOVDQA BB0, BB3
+ VMOVDQA state2StoreAVX2, CC0; VMOVDQA CC0, CC1; VMOVDQA CC0, CC2; VMOVDQA CC0, CC3
+ VMOVDQA ctr3StoreAVX2, DD0
+ VPADDD ·avx2IncMask<>(SB), DD0, DD0; VPADDD ·avx2IncMask<>(SB), DD0, DD1; VPADDD ·avx2IncMask<>(SB), DD1, DD2; VPADDD ·avx2IncMask<>(SB), DD2, DD3
+ VMOVDQA DD0, ctr0StoreAVX2; VMOVDQA DD1, ctr1StoreAVX2; VMOVDQA DD2, ctr2StoreAVX2; VMOVDQA DD3, ctr3StoreAVX2
+
+ VMOVDQA CC3, tmpStoreAVX2
+ chachaQR_AVX2(AA0, BB0, CC0, DD0, CC3); chachaQR_AVX2(AA1, BB1, CC1, DD1, CC3); chachaQR_AVX2(AA2, BB2, CC2, DD2, CC3)
+ VMOVDQA tmpStoreAVX2, CC3
+ VMOVDQA CC1, tmpStoreAVX2
+ chachaQR_AVX2(AA3, BB3, CC3, DD3, CC1)
+ VMOVDQA tmpStoreAVX2, CC1
+
+ VPALIGNR $4, BB0, BB0, BB0; VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $12, DD0, DD0, DD0
+ VPALIGNR $4, BB1, BB1, BB1; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $12, DD1, DD1, DD1
+ VPALIGNR $4, BB2, BB2, BB2; VPALIGNR $8, CC2, CC2, CC2; VPALIGNR $12, DD2, DD2, DD2
+ VPALIGNR $4, BB3, BB3, BB3; VPALIGNR $8, CC3, CC3, CC3; VPALIGNR $12, DD3, DD3, DD3
+
+ VMOVDQA CC3, tmpStoreAVX2
+ chachaQR_AVX2(AA0, BB0, CC0, DD0, CC3); chachaQR_AVX2(AA1, BB1, CC1, DD1, CC3); chachaQR_AVX2(AA2, BB2, CC2, DD2, CC3)
+ VMOVDQA tmpStoreAVX2, CC3
+ VMOVDQA CC1, tmpStoreAVX2
+ chachaQR_AVX2(AA3, BB3, CC3, DD3, CC1)
+ VMOVDQA tmpStoreAVX2, CC1
+
+ VPALIGNR $12, BB0, BB0, BB0; VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $4, DD0, DD0, DD0
+ VPALIGNR $12, BB1, BB1, BB1; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $4, DD1, DD1, DD1
+ VPALIGNR $12, BB2, BB2, BB2; VPALIGNR $8, CC2, CC2, CC2; VPALIGNR $4, DD2, DD2, DD2
+ VPALIGNR $12, BB3, BB3, BB3; VPALIGNR $8, CC3, CC3, CC3; VPALIGNR $4, DD3, DD3, DD3
+ VPADDD BB0, AA0, AA0; VPADDD BB1, AA1, AA1; VPADDD BB2, AA2, AA2; VPADDD BB3, AA3, AA3
+ VPXOR AA0, DD0, DD0; VPXOR AA1, DD1, DD1; VPXOR AA2, DD2, DD2; VPXOR AA3, DD3, DD3
+ VPSHUFB ·rol16<>(SB), DD0, DD0; VPSHUFB ·rol16<>(SB), DD1, DD1; VPSHUFB ·rol16<>(SB), DD2, DD2; VPSHUFB ·rol16<>(SB), DD3, DD3
+ VPADDD DD0, CC0, CC0; VPADDD DD1, CC1, CC1; VPADDD DD2, CC2, CC2; VPADDD DD3, CC3, CC3
+ VPXOR CC0, BB0, BB0; VPXOR CC1, BB1, BB1; VPXOR CC2, BB2, BB2; VPXOR CC3, BB3, BB3
+ VMOVDQA CC3, tmpStoreAVX2
+ VPSLLD $12, BB0, CC3; VPSRLD $20, BB0, BB0; VPXOR CC3, BB0, BB0
+ VPSLLD $12, BB1, CC3; VPSRLD $20, BB1, BB1; VPXOR CC3, BB1, BB1
+ VPSLLD $12, BB2, CC3; VPSRLD $20, BB2, BB2; VPXOR CC3, BB2, BB2
+ VPSLLD $12, BB3, CC3; VPSRLD $20, BB3, BB3; VPXOR CC3, BB3, BB3
+ VMOVDQA tmpStoreAVX2, CC3
+
+ SUBQ $16, oup // Adjust the pointer
+ MOVQ $9, itr1
+ JMP sealAVX2InternalLoopStart
+
+sealAVX2MainLoop:
+ // Load state, increment counter blocks, store the incremented counters
+ VMOVDQU ·chacha20Constants<>(SB), AA0; VMOVDQA AA0, AA1; VMOVDQA AA0, AA2; VMOVDQA AA0, AA3
+ VMOVDQA state1StoreAVX2, BB0; VMOVDQA BB0, BB1; VMOVDQA BB0, BB2; VMOVDQA BB0, BB3
+ VMOVDQA state2StoreAVX2, CC0; VMOVDQA CC0, CC1; VMOVDQA CC0, CC2; VMOVDQA CC0, CC3
+ VMOVDQA ctr3StoreAVX2, DD0; VPADDD ·avx2IncMask<>(SB), DD0, DD0; VPADDD ·avx2IncMask<>(SB), DD0, DD1; VPADDD ·avx2IncMask<>(SB), DD1, DD2; VPADDD ·avx2IncMask<>(SB), DD2, DD3
+ VMOVDQA DD0, ctr0StoreAVX2; VMOVDQA DD1, ctr1StoreAVX2; VMOVDQA DD2, ctr2StoreAVX2; VMOVDQA DD3, ctr3StoreAVX2
+ MOVQ $10, itr1
+
+sealAVX2InternalLoop:
+ polyAdd(0*8(oup))
+ VPADDD BB0, AA0, AA0; VPADDD BB1, AA1, AA1; VPADDD BB2, AA2, AA2; VPADDD BB3, AA3, AA3
+ polyMulStage1_AVX2
+ VPXOR AA0, DD0, DD0; VPXOR AA1, DD1, DD1; VPXOR AA2, DD2, DD2; VPXOR AA3, DD3, DD3
+ VPSHUFB ·rol16<>(SB), DD0, DD0; VPSHUFB ·rol16<>(SB), DD1, DD1; VPSHUFB ·rol16<>(SB), DD2, DD2; VPSHUFB ·rol16<>(SB), DD3, DD3
+ polyMulStage2_AVX2
+ VPADDD DD0, CC0, CC0; VPADDD DD1, CC1, CC1; VPADDD DD2, CC2, CC2; VPADDD DD3, CC3, CC3
+ VPXOR CC0, BB0, BB0; VPXOR CC1, BB1, BB1; VPXOR CC2, BB2, BB2; VPXOR CC3, BB3, BB3
+ polyMulStage3_AVX2
+ VMOVDQA CC3, tmpStoreAVX2
+ VPSLLD $12, BB0, CC3; VPSRLD $20, BB0, BB0; VPXOR CC3, BB0, BB0
+ VPSLLD $12, BB1, CC3; VPSRLD $20, BB1, BB1; VPXOR CC3, BB1, BB1
+ VPSLLD $12, BB2, CC3; VPSRLD $20, BB2, BB2; VPXOR CC3, BB2, BB2
+ VPSLLD $12, BB3, CC3; VPSRLD $20, BB3, BB3; VPXOR CC3, BB3, BB3
+ VMOVDQA tmpStoreAVX2, CC3
+ polyMulReduceStage
+
+sealAVX2InternalLoopStart:
+ VPADDD BB0, AA0, AA0; VPADDD BB1, AA1, AA1; VPADDD BB2, AA2, AA2; VPADDD BB3, AA3, AA3
+ VPXOR AA0, DD0, DD0; VPXOR AA1, DD1, DD1; VPXOR AA2, DD2, DD2; VPXOR AA3, DD3, DD3
+ VPSHUFB ·rol8<>(SB), DD0, DD0; VPSHUFB ·rol8<>(SB), DD1, DD1; VPSHUFB ·rol8<>(SB), DD2, DD2; VPSHUFB ·rol8<>(SB), DD3, DD3
+ polyAdd(2*8(oup))
+ VPADDD DD0, CC0, CC0; VPADDD DD1, CC1, CC1; VPADDD DD2, CC2, CC2; VPADDD DD3, CC3, CC3
+ polyMulStage1_AVX2
+ VPXOR CC0, BB0, BB0; VPXOR CC1, BB1, BB1; VPXOR CC2, BB2, BB2; VPXOR CC3, BB3, BB3
+ VMOVDQA CC3, tmpStoreAVX2
+ VPSLLD $7, BB0, CC3; VPSRLD $25, BB0, BB0; VPXOR CC3, BB0, BB0
+ VPSLLD $7, BB1, CC3; VPSRLD $25, BB1, BB1; VPXOR CC3, BB1, BB1
+ VPSLLD $7, BB2, CC3; VPSRLD $25, BB2, BB2; VPXOR CC3, BB2, BB2
+ VPSLLD $7, BB3, CC3; VPSRLD $25, BB3, BB3; VPXOR CC3, BB3, BB3
+ VMOVDQA tmpStoreAVX2, CC3
+ polyMulStage2_AVX2
+ VPALIGNR $4, BB0, BB0, BB0; VPALIGNR $4, BB1, BB1, BB1; VPALIGNR $4, BB2, BB2, BB2; VPALIGNR $4, BB3, BB3, BB3
+ VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $8, CC2, CC2, CC2; VPALIGNR $8, CC3, CC3, CC3
+ VPALIGNR $12, DD0, DD0, DD0; VPALIGNR $12, DD1, DD1, DD1; VPALIGNR $12, DD2, DD2, DD2; VPALIGNR $12, DD3, DD3, DD3
+ VPADDD BB0, AA0, AA0; VPADDD BB1, AA1, AA1; VPADDD BB2, AA2, AA2; VPADDD BB3, AA3, AA3
+ polyMulStage3_AVX2
+ VPXOR AA0, DD0, DD0; VPXOR AA1, DD1, DD1; VPXOR AA2, DD2, DD2; VPXOR AA3, DD3, DD3
+ VPSHUFB ·rol16<>(SB), DD0, DD0; VPSHUFB ·rol16<>(SB), DD1, DD1; VPSHUFB ·rol16<>(SB), DD2, DD2; VPSHUFB ·rol16<>(SB), DD3, DD3
+ polyMulReduceStage
+ VPADDD DD0, CC0, CC0; VPADDD DD1, CC1, CC1; VPADDD DD2, CC2, CC2; VPADDD DD3, CC3, CC3
+ VPXOR CC0, BB0, BB0; VPXOR CC1, BB1, BB1; VPXOR CC2, BB2, BB2; VPXOR CC3, BB3, BB3
+ polyAdd(4*8(oup))
+ LEAQ (6*8)(oup), oup
+ VMOVDQA CC3, tmpStoreAVX2
+ VPSLLD $12, BB0, CC3; VPSRLD $20, BB0, BB0; VPXOR CC3, BB0, BB0
+ VPSLLD $12, BB1, CC3; VPSRLD $20, BB1, BB1; VPXOR CC3, BB1, BB1
+ VPSLLD $12, BB2, CC3; VPSRLD $20, BB2, BB2; VPXOR CC3, BB2, BB2
+ VPSLLD $12, BB3, CC3; VPSRLD $20, BB3, BB3; VPXOR CC3, BB3, BB3
+ VMOVDQA tmpStoreAVX2, CC3
+ polyMulStage1_AVX2
+ VPADDD BB0, AA0, AA0; VPADDD BB1, AA1, AA1; VPADDD BB2, AA2, AA2; VPADDD BB3, AA3, AA3
+ VPXOR AA0, DD0, DD0; VPXOR AA1, DD1, DD1; VPXOR AA2, DD2, DD2; VPXOR AA3, DD3, DD3
+ polyMulStage2_AVX2
+ VPSHUFB ·rol8<>(SB), DD0, DD0; VPSHUFB ·rol8<>(SB), DD1, DD1; VPSHUFB ·rol8<>(SB), DD2, DD2; VPSHUFB ·rol8<>(SB), DD3, DD3
+ VPADDD DD0, CC0, CC0; VPADDD DD1, CC1, CC1; VPADDD DD2, CC2, CC2; VPADDD DD3, CC3, CC3
+ polyMulStage3_AVX2
+ VPXOR CC0, BB0, BB0; VPXOR CC1, BB1, BB1; VPXOR CC2, BB2, BB2; VPXOR CC3, BB3, BB3
+ VMOVDQA CC3, tmpStoreAVX2
+ VPSLLD $7, BB0, CC3; VPSRLD $25, BB0, BB0; VPXOR CC3, BB0, BB0
+ VPSLLD $7, BB1, CC3; VPSRLD $25, BB1, BB1; VPXOR CC3, BB1, BB1
+ VPSLLD $7, BB2, CC3; VPSRLD $25, BB2, BB2; VPXOR CC3, BB2, BB2
+ VPSLLD $7, BB3, CC3; VPSRLD $25, BB3, BB3; VPXOR CC3, BB3, BB3
+ VMOVDQA tmpStoreAVX2, CC3
+ polyMulReduceStage
+ VPALIGNR $12, BB0, BB0, BB0; VPALIGNR $12, BB1, BB1, BB1; VPALIGNR $12, BB2, BB2, BB2; VPALIGNR $12, BB3, BB3, BB3
+ VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $8, CC2, CC2, CC2; VPALIGNR $8, CC3, CC3, CC3
+ VPALIGNR $4, DD0, DD0, DD0; VPALIGNR $4, DD1, DD1, DD1; VPALIGNR $4, DD2, DD2, DD2; VPALIGNR $4, DD3, DD3, DD3
+ DECQ itr1
+ JNE sealAVX2InternalLoop
+
+ VPADDD ·chacha20Constants<>(SB), AA0, AA0; VPADDD ·chacha20Constants<>(SB), AA1, AA1; VPADDD ·chacha20Constants<>(SB), AA2, AA2; VPADDD ·chacha20Constants<>(SB), AA3, AA3
+ VPADDD state1StoreAVX2, BB0, BB0; VPADDD state1StoreAVX2, BB1, BB1; VPADDD state1StoreAVX2, BB2, BB2; VPADDD state1StoreAVX2, BB3, BB3
+ VPADDD state2StoreAVX2, CC0, CC0; VPADDD state2StoreAVX2, CC1, CC1; VPADDD state2StoreAVX2, CC2, CC2; VPADDD state2StoreAVX2, CC3, CC3
+ VPADDD ctr0StoreAVX2, DD0, DD0; VPADDD ctr1StoreAVX2, DD1, DD1; VPADDD ctr2StoreAVX2, DD2, DD2; VPADDD ctr3StoreAVX2, DD3, DD3
+ VMOVDQA CC3, tmpStoreAVX2
+
+ // We only hashed 480 of the 512 bytes available - hash the remaining 32 here
+ polyAdd(0*8(oup))
+ polyMulAVX2
+ LEAQ (4*8)(oup), oup
+ VPERM2I128 $0x02, AA0, BB0, CC3; VPERM2I128 $0x13, AA0, BB0, BB0; VPERM2I128 $0x02, CC0, DD0, AA0; VPERM2I128 $0x13, CC0, DD0, CC0
+ VPXOR (0*32)(inp), CC3, CC3; VPXOR (1*32)(inp), AA0, AA0; VPXOR (2*32)(inp), BB0, BB0; VPXOR (3*32)(inp), CC0, CC0
+ VMOVDQU CC3, (0*32)(oup); VMOVDQU AA0, (1*32)(oup); VMOVDQU BB0, (2*32)(oup); VMOVDQU CC0, (3*32)(oup)
+ VPERM2I128 $0x02, AA1, BB1, AA0; VPERM2I128 $0x02, CC1, DD1, BB0; VPERM2I128 $0x13, AA1, BB1, CC0; VPERM2I128 $0x13, CC1, DD1, DD0
+ VPXOR (4*32)(inp), AA0, AA0; VPXOR (5*32)(inp), BB0, BB0; VPXOR (6*32)(inp), CC0, CC0; VPXOR (7*32)(inp), DD0, DD0
+ VMOVDQU AA0, (4*32)(oup); VMOVDQU BB0, (5*32)(oup); VMOVDQU CC0, (6*32)(oup); VMOVDQU DD0, (7*32)(oup)
+
+ // and here
+ polyAdd(-2*8(oup))
+ polyMulAVX2
+ VPERM2I128 $0x02, AA2, BB2, AA0; VPERM2I128 $0x02, CC2, DD2, BB0; VPERM2I128 $0x13, AA2, BB2, CC0; VPERM2I128 $0x13, CC2, DD2, DD0
+ VPXOR (8*32)(inp), AA0, AA0; VPXOR (9*32)(inp), BB0, BB0; VPXOR (10*32)(inp), CC0, CC0; VPXOR (11*32)(inp), DD0, DD0
+ VMOVDQU AA0, (8*32)(oup); VMOVDQU BB0, (9*32)(oup); VMOVDQU CC0, (10*32)(oup); VMOVDQU DD0, (11*32)(oup)
+ VPERM2I128 $0x02, AA3, BB3, AA0; VPERM2I128 $0x02, tmpStoreAVX2, DD3, BB0; VPERM2I128 $0x13, AA3, BB3, CC0; VPERM2I128 $0x13, tmpStoreAVX2, DD3, DD0
+ VPXOR (12*32)(inp), AA0, AA0; VPXOR (13*32)(inp), BB0, BB0; VPXOR (14*32)(inp), CC0, CC0; VPXOR (15*32)(inp), DD0, DD0
+ VMOVDQU AA0, (12*32)(oup); VMOVDQU BB0, (13*32)(oup); VMOVDQU CC0, (14*32)(oup); VMOVDQU DD0, (15*32)(oup)
+ LEAQ (32*16)(inp), inp
+ SUBQ $(32*16), inl
+ CMPQ inl, $512
+ JG sealAVX2MainLoop
+
+ // Tail can only hash 480 bytes
+ polyAdd(0*8(oup))
+ polyMulAVX2
+ polyAdd(2*8(oup))
+ polyMulAVX2
+ LEAQ 32(oup), oup
+
+ MOVQ $10, itr1
+ MOVQ $0, itr2
+ CMPQ inl, $128
+ JBE sealAVX2Tail128
+ CMPQ inl, $256
+ JBE sealAVX2Tail256
+ CMPQ inl, $384
+ JBE sealAVX2Tail384
+ JMP sealAVX2Tail512
+
+// ----------------------------------------------------------------------------
+// Special optimization for buffers smaller than 193 bytes
+seal192AVX2:
+ // For up to 192 bytes of ciphertext and 64 bytes for the poly key, we process four blocks
+ VMOVDQA AA0, AA1
+ VMOVDQA BB0, BB1
+ VMOVDQA CC0, CC1
+ VPADDD ·avx2IncMask<>(SB), DD0, DD1
+ VMOVDQA AA0, AA2
+ VMOVDQA BB0, BB2
+ VMOVDQA CC0, CC2
+ VMOVDQA DD0, DD2
+ VMOVDQA DD1, TT3
+ MOVQ $10, itr2
+
+sealAVX2192InnerCipherLoop:
+ chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0); chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0)
+ VPALIGNR $4, BB0, BB0, BB0; VPALIGNR $4, BB1, BB1, BB1
+ VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1
+ VPALIGNR $12, DD0, DD0, DD0; VPALIGNR $12, DD1, DD1, DD1
+ chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0); chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0)
+ VPALIGNR $12, BB0, BB0, BB0; VPALIGNR $12, BB1, BB1, BB1
+ VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1
+ VPALIGNR $4, DD0, DD0, DD0; VPALIGNR $4, DD1, DD1, DD1
+ DECQ itr2
+ JNE sealAVX2192InnerCipherLoop
+ VPADDD AA2, AA0, AA0; VPADDD AA2, AA1, AA1
+ VPADDD BB2, BB0, BB0; VPADDD BB2, BB1, BB1
+ VPADDD CC2, CC0, CC0; VPADDD CC2, CC1, CC1
+ VPADDD DD2, DD0, DD0; VPADDD TT3, DD1, DD1
+ VPERM2I128 $0x02, AA0, BB0, TT0
+
+ // Clamp and store poly key
+ VPAND ·polyClampMask<>(SB), TT0, TT0
+ VMOVDQA TT0, rsStoreAVX2
+
+ // Stream for up to 192 bytes
+ VPERM2I128 $0x13, AA0, BB0, AA0
+ VPERM2I128 $0x13, CC0, DD0, BB0
+ VPERM2I128 $0x02, AA1, BB1, CC0
+ VPERM2I128 $0x02, CC1, DD1, DD0
+ VPERM2I128 $0x13, AA1, BB1, AA1
+ VPERM2I128 $0x13, CC1, DD1, BB1
+
+sealAVX2ShortSeal:
+ // Hash aad
+ MOVQ ad_len+80(FP), itr2
+ CALL polyHashADInternal<>(SB)
+ XORQ itr1, itr1
+
+sealAVX2SealHash:
+ // itr1 holds the number of bytes encrypted but not yet hashed
+ CMPQ itr1, $16
+ JB sealAVX2ShortSealLoop
+ polyAdd(0(oup))
+ polyMul
+ SUBQ $16, itr1
+ ADDQ $16, oup
+ JMP sealAVX2SealHash
+
+sealAVX2ShortSealLoop:
+ CMPQ inl, $32
+ JB sealAVX2ShortTail32
+ SUBQ $32, inl
+
+ // Load for encryption
+ VPXOR (inp), AA0, AA0
+ VMOVDQU AA0, (oup)
+ LEAQ (1*32)(inp), inp
+
+ // Now can hash
+ polyAdd(0*8(oup))
+ polyMulAVX2
+ polyAdd(2*8(oup))
+ polyMulAVX2
+ LEAQ (1*32)(oup), oup
+
+ // Shift stream left
+ VMOVDQA BB0, AA0
+ VMOVDQA CC0, BB0
+ VMOVDQA DD0, CC0
+ VMOVDQA AA1, DD0
+ VMOVDQA BB1, AA1
+ VMOVDQA CC1, BB1
+ VMOVDQA DD1, CC1
+ VMOVDQA AA2, DD1
+ VMOVDQA BB2, AA2
+ JMP sealAVX2ShortSealLoop
+
+sealAVX2ShortTail32:
+ CMPQ inl, $16
+ VMOVDQA A0, A1
+ JB sealAVX2ShortDone
+
+ SUBQ $16, inl
+
+ // Load for encryption
+ VPXOR (inp), A0, T0
+ VMOVDQU T0, (oup)
+ LEAQ (1*16)(inp), inp
+
+ // Hash
+ polyAdd(0*8(oup))
+ polyMulAVX2
+ LEAQ (1*16)(oup), oup
+ VPERM2I128 $0x11, AA0, AA0, AA0
+ VMOVDQA A0, A1
+
+sealAVX2ShortDone:
+ VZEROUPPER
+ JMP sealSSETail
+
+// ----------------------------------------------------------------------------
+// Special optimization for buffers smaller than 321 bytes
+seal320AVX2:
+ // For up to 320 bytes of ciphertext and 64 bytes for the poly key, we process six blocks
+ VMOVDQA AA0, AA1; VMOVDQA BB0, BB1; VMOVDQA CC0, CC1; VPADDD ·avx2IncMask<>(SB), DD0, DD1
+ VMOVDQA AA0, AA2; VMOVDQA BB0, BB2; VMOVDQA CC0, CC2; VPADDD ·avx2IncMask<>(SB), DD1, DD2
+ VMOVDQA BB0, TT1; VMOVDQA CC0, TT2; VMOVDQA DD0, TT3
+ MOVQ $10, itr2
+
+sealAVX2320InnerCipherLoop:
+ chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0); chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0); chachaQR_AVX2(AA2, BB2, CC2, DD2, TT0)
+ VPALIGNR $4, BB0, BB0, BB0; VPALIGNR $4, BB1, BB1, BB1; VPALIGNR $4, BB2, BB2, BB2
+ VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $8, CC2, CC2, CC2
+ VPALIGNR $12, DD0, DD0, DD0; VPALIGNR $12, DD1, DD1, DD1; VPALIGNR $12, DD2, DD2, DD2
+ chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0); chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0); chachaQR_AVX2(AA2, BB2, CC2, DD2, TT0)
+ VPALIGNR $12, BB0, BB0, BB0; VPALIGNR $12, BB1, BB1, BB1; VPALIGNR $12, BB2, BB2, BB2
+ VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $8, CC2, CC2, CC2
+ VPALIGNR $4, DD0, DD0, DD0; VPALIGNR $4, DD1, DD1, DD1; VPALIGNR $4, DD2, DD2, DD2
+ DECQ itr2
+ JNE sealAVX2320InnerCipherLoop
+
+ VMOVDQA ·chacha20Constants<>(SB), TT0
+ VPADDD TT0, AA0, AA0; VPADDD TT0, AA1, AA1; VPADDD TT0, AA2, AA2
+ VPADDD TT1, BB0, BB0; VPADDD TT1, BB1, BB1; VPADDD TT1, BB2, BB2
+ VPADDD TT2, CC0, CC0; VPADDD TT2, CC1, CC1; VPADDD TT2, CC2, CC2
+ VMOVDQA ·avx2IncMask<>(SB), TT0
+ VPADDD TT3, DD0, DD0; VPADDD TT0, TT3, TT3
+ VPADDD TT3, DD1, DD1; VPADDD TT0, TT3, TT3
+ VPADDD TT3, DD2, DD2
+
+ // Clamp and store poly key
+ VPERM2I128 $0x02, AA0, BB0, TT0
+ VPAND ·polyClampMask<>(SB), TT0, TT0
+ VMOVDQA TT0, rsStoreAVX2
+
+ // Stream for up to 320 bytes
+ VPERM2I128 $0x13, AA0, BB0, AA0
+ VPERM2I128 $0x13, CC0, DD0, BB0
+ VPERM2I128 $0x02, AA1, BB1, CC0
+ VPERM2I128 $0x02, CC1, DD1, DD0
+ VPERM2I128 $0x13, AA1, BB1, AA1
+ VPERM2I128 $0x13, CC1, DD1, BB1
+ VPERM2I128 $0x02, AA2, BB2, CC1
+ VPERM2I128 $0x02, CC2, DD2, DD1
+ VPERM2I128 $0x13, AA2, BB2, AA2
+ VPERM2I128 $0x13, CC2, DD2, BB2
+ JMP sealAVX2ShortSeal
+
+// ----------------------------------------------------------------------------
+// Special optimization for the last 128 bytes of ciphertext
+sealAVX2Tail128:
+ // Need to decrypt up to 128 bytes - prepare two blocks
+ // If we got here after the main loop - there are 512 encrypted bytes waiting to be hashed
+ // If we got here before the main loop - there are 448 encrpyred bytes waiting to be hashed
+ VMOVDQA ·chacha20Constants<>(SB), AA0
+ VMOVDQA state1StoreAVX2, BB0
+ VMOVDQA state2StoreAVX2, CC0
+ VMOVDQA ctr3StoreAVX2, DD0
+ VPADDD ·avx2IncMask<>(SB), DD0, DD0
+ VMOVDQA DD0, DD1
+
+sealAVX2Tail128LoopA:
+ polyAdd(0(oup))
+ polyMul
+ LEAQ 16(oup), oup
+
+sealAVX2Tail128LoopB:
+ chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0)
+ polyAdd(0(oup))
+ polyMul
+ VPALIGNR $4, BB0, BB0, BB0
+ VPALIGNR $8, CC0, CC0, CC0
+ VPALIGNR $12, DD0, DD0, DD0
+ chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0)
+ polyAdd(16(oup))
+ polyMul
+ LEAQ 32(oup), oup
+ VPALIGNR $12, BB0, BB0, BB0
+ VPALIGNR $8, CC0, CC0, CC0
+ VPALIGNR $4, DD0, DD0, DD0
+ DECQ itr1
+ JG sealAVX2Tail128LoopA
+ DECQ itr2
+ JGE sealAVX2Tail128LoopB
+
+ VPADDD ·chacha20Constants<>(SB), AA0, AA1
+ VPADDD state1StoreAVX2, BB0, BB1
+ VPADDD state2StoreAVX2, CC0, CC1
+ VPADDD DD1, DD0, DD1
+
+ VPERM2I128 $0x02, AA1, BB1, AA0
+ VPERM2I128 $0x02, CC1, DD1, BB0
+ VPERM2I128 $0x13, AA1, BB1, CC0
+ VPERM2I128 $0x13, CC1, DD1, DD0
+ JMP sealAVX2ShortSealLoop
+
+// ----------------------------------------------------------------------------
+// Special optimization for the last 256 bytes of ciphertext
+sealAVX2Tail256:
+ // Need to decrypt up to 256 bytes - prepare two blocks
+ // If we got here after the main loop - there are 512 encrypted bytes waiting to be hashed
+ // If we got here before the main loop - there are 448 encrpyred bytes waiting to be hashed
+ VMOVDQA ·chacha20Constants<>(SB), AA0; VMOVDQA ·chacha20Constants<>(SB), AA1
+ VMOVDQA state1StoreAVX2, BB0; VMOVDQA state1StoreAVX2, BB1
+ VMOVDQA state2StoreAVX2, CC0; VMOVDQA state2StoreAVX2, CC1
+ VMOVDQA ctr3StoreAVX2, DD0
+ VPADDD ·avx2IncMask<>(SB), DD0, DD0
+ VPADDD ·avx2IncMask<>(SB), DD0, DD1
+ VMOVDQA DD0, TT1
+ VMOVDQA DD1, TT2
+
+sealAVX2Tail256LoopA:
+ polyAdd(0(oup))
+ polyMul
+ LEAQ 16(oup), oup
+
+sealAVX2Tail256LoopB:
+ chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0); chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0)
+ polyAdd(0(oup))
+ polyMul
+ VPALIGNR $4, BB0, BB0, BB0; VPALIGNR $4, BB1, BB1, BB1
+ VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1
+ VPALIGNR $12, DD0, DD0, DD0; VPALIGNR $12, DD1, DD1, DD1
+ chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0); chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0)
+ polyAdd(16(oup))
+ polyMul
+ LEAQ 32(oup), oup
+ VPALIGNR $12, BB0, BB0, BB0; VPALIGNR $12, BB1, BB1, BB1
+ VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1
+ VPALIGNR $4, DD0, DD0, DD0; VPALIGNR $4, DD1, DD1, DD1
+ DECQ itr1
+ JG sealAVX2Tail256LoopA
+ DECQ itr2
+ JGE sealAVX2Tail256LoopB
+
+ VPADDD ·chacha20Constants<>(SB), AA0, AA0; VPADDD ·chacha20Constants<>(SB), AA1, AA1
+ VPADDD state1StoreAVX2, BB0, BB0; VPADDD state1StoreAVX2, BB1, BB1
+ VPADDD state2StoreAVX2, CC0, CC0; VPADDD state2StoreAVX2, CC1, CC1
+ VPADDD TT1, DD0, DD0; VPADDD TT2, DD1, DD1
+ VPERM2I128 $0x02, AA0, BB0, TT0
+ VPERM2I128 $0x02, CC0, DD0, TT1
+ VPERM2I128 $0x13, AA0, BB0, TT2
+ VPERM2I128 $0x13, CC0, DD0, TT3
+ VPXOR (0*32)(inp), TT0, TT0; VPXOR (1*32)(inp), TT1, TT1; VPXOR (2*32)(inp), TT2, TT2; VPXOR (3*32)(inp), TT3, TT3
+ VMOVDQU TT0, (0*32)(oup); VMOVDQU TT1, (1*32)(oup); VMOVDQU TT2, (2*32)(oup); VMOVDQU TT3, (3*32)(oup)
+ MOVQ $128, itr1
+ LEAQ 128(inp), inp
+ SUBQ $128, inl
+ VPERM2I128 $0x02, AA1, BB1, AA0
+ VPERM2I128 $0x02, CC1, DD1, BB0
+ VPERM2I128 $0x13, AA1, BB1, CC0
+ VPERM2I128 $0x13, CC1, DD1, DD0
+
+ JMP sealAVX2SealHash
+
+// ----------------------------------------------------------------------------
+// Special optimization for the last 384 bytes of ciphertext
+sealAVX2Tail384:
+ // Need to decrypt up to 384 bytes - prepare two blocks
+ // If we got here after the main loop - there are 512 encrypted bytes waiting to be hashed
+ // If we got here before the main loop - there are 448 encrpyred bytes waiting to be hashed
+ VMOVDQA ·chacha20Constants<>(SB), AA0; VMOVDQA AA0, AA1; VMOVDQA AA0, AA2
+ VMOVDQA state1StoreAVX2, BB0; VMOVDQA BB0, BB1; VMOVDQA BB0, BB2
+ VMOVDQA state2StoreAVX2, CC0; VMOVDQA CC0, CC1; VMOVDQA CC0, CC2
+ VMOVDQA ctr3StoreAVX2, DD0
+ VPADDD ·avx2IncMask<>(SB), DD0, DD0; VPADDD ·avx2IncMask<>(SB), DD0, DD1; VPADDD ·avx2IncMask<>(SB), DD1, DD2
+ VMOVDQA DD0, TT1; VMOVDQA DD1, TT2; VMOVDQA DD2, TT3
+
+sealAVX2Tail384LoopA:
+ polyAdd(0(oup))
+ polyMul
+ LEAQ 16(oup), oup
+
+sealAVX2Tail384LoopB:
+ chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0); chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0); chachaQR_AVX2(AA2, BB2, CC2, DD2, TT0)
+ polyAdd(0(oup))
+ polyMul
+ VPALIGNR $4, BB0, BB0, BB0; VPALIGNR $4, BB1, BB1, BB1; VPALIGNR $4, BB2, BB2, BB2
+ VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $8, CC2, CC2, CC2
+ VPALIGNR $12, DD0, DD0, DD0; VPALIGNR $12, DD1, DD1, DD1; VPALIGNR $12, DD2, DD2, DD2
+ chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0); chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0); chachaQR_AVX2(AA2, BB2, CC2, DD2, TT0)
+ polyAdd(16(oup))
+ polyMul
+ LEAQ 32(oup), oup
+ VPALIGNR $12, BB0, BB0, BB0; VPALIGNR $12, BB1, BB1, BB1; VPALIGNR $12, BB2, BB2, BB2
+ VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $8, CC2, CC2, CC2
+ VPALIGNR $4, DD0, DD0, DD0; VPALIGNR $4, DD1, DD1, DD1; VPALIGNR $4, DD2, DD2, DD2
+ DECQ itr1
+ JG sealAVX2Tail384LoopA
+ DECQ itr2
+ JGE sealAVX2Tail384LoopB
+
+ VPADDD ·chacha20Constants<>(SB), AA0, AA0; VPADDD ·chacha20Constants<>(SB), AA1, AA1; VPADDD ·chacha20Constants<>(SB), AA2, AA2
+ VPADDD state1StoreAVX2, BB0, BB0; VPADDD state1StoreAVX2, BB1, BB1; VPADDD state1StoreAVX2, BB2, BB2
+ VPADDD state2StoreAVX2, CC0, CC0; VPADDD state2StoreAVX2, CC1, CC1; VPADDD state2StoreAVX2, CC2, CC2
+ VPADDD TT1, DD0, DD0; VPADDD TT2, DD1, DD1; VPADDD TT3, DD2, DD2
+ VPERM2I128 $0x02, AA0, BB0, TT0
+ VPERM2I128 $0x02, CC0, DD0, TT1
+ VPERM2I128 $0x13, AA0, BB0, TT2
+ VPERM2I128 $0x13, CC0, DD0, TT3
+ VPXOR (0*32)(inp), TT0, TT0; VPXOR (1*32)(inp), TT1, TT1; VPXOR (2*32)(inp), TT2, TT2; VPXOR (3*32)(inp), TT3, TT3
+ VMOVDQU TT0, (0*32)(oup); VMOVDQU TT1, (1*32)(oup); VMOVDQU TT2, (2*32)(oup); VMOVDQU TT3, (3*32)(oup)
+ VPERM2I128 $0x02, AA1, BB1, TT0
+ VPERM2I128 $0x02, CC1, DD1, TT1
+ VPERM2I128 $0x13, AA1, BB1, TT2
+ VPERM2I128 $0x13, CC1, DD1, TT3
+ VPXOR (4*32)(inp), TT0, TT0; VPXOR (5*32)(inp), TT1, TT1; VPXOR (6*32)(inp), TT2, TT2; VPXOR (7*32)(inp), TT3, TT3
+ VMOVDQU TT0, (4*32)(oup); VMOVDQU TT1, (5*32)(oup); VMOVDQU TT2, (6*32)(oup); VMOVDQU TT3, (7*32)(oup)
+ MOVQ $256, itr1
+ LEAQ 256(inp), inp
+ SUBQ $256, inl
+ VPERM2I128 $0x02, AA2, BB2, AA0
+ VPERM2I128 $0x02, CC2, DD2, BB0
+ VPERM2I128 $0x13, AA2, BB2, CC0
+ VPERM2I128 $0x13, CC2, DD2, DD0
+
+ JMP sealAVX2SealHash
+
+// ----------------------------------------------------------------------------
+// Special optimization for the last 512 bytes of ciphertext
+sealAVX2Tail512:
+ // Need to decrypt up to 512 bytes - prepare two blocks
+ // If we got here after the main loop - there are 512 encrypted bytes waiting to be hashed
+ // If we got here before the main loop - there are 448 encrpyred bytes waiting to be hashed
+ VMOVDQA ·chacha20Constants<>(SB), AA0; VMOVDQA AA0, AA1; VMOVDQA AA0, AA2; VMOVDQA AA0, AA3
+ VMOVDQA state1StoreAVX2, BB0; VMOVDQA BB0, BB1; VMOVDQA BB0, BB2; VMOVDQA BB0, BB3
+ VMOVDQA state2StoreAVX2, CC0; VMOVDQA CC0, CC1; VMOVDQA CC0, CC2; VMOVDQA CC0, CC3
+ VMOVDQA ctr3StoreAVX2, DD0
+ VPADDD ·avx2IncMask<>(SB), DD0, DD0; VPADDD ·avx2IncMask<>(SB), DD0, DD1; VPADDD ·avx2IncMask<>(SB), DD1, DD2; VPADDD ·avx2IncMask<>(SB), DD2, DD3
+ VMOVDQA DD0, ctr0StoreAVX2; VMOVDQA DD1, ctr1StoreAVX2; VMOVDQA DD2, ctr2StoreAVX2; VMOVDQA DD3, ctr3StoreAVX2
+
+sealAVX2Tail512LoopA:
+ polyAdd(0(oup))
+ polyMul
+ LEAQ 16(oup), oup
+
+sealAVX2Tail512LoopB:
+ VPADDD BB0, AA0, AA0; VPADDD BB1, AA1, AA1; VPADDD BB2, AA2, AA2; VPADDD BB3, AA3, AA3
+ VPXOR AA0, DD0, DD0; VPXOR AA1, DD1, DD1; VPXOR AA2, DD2, DD2; VPXOR AA3, DD3, DD3
+ VPSHUFB ·rol16<>(SB), DD0, DD0; VPSHUFB ·rol16<>(SB), DD1, DD1; VPSHUFB ·rol16<>(SB), DD2, DD2; VPSHUFB ·rol16<>(SB), DD3, DD3
+ VPADDD DD0, CC0, CC0; VPADDD DD1, CC1, CC1; VPADDD DD2, CC2, CC2; VPADDD DD3, CC3, CC3
+ VPXOR CC0, BB0, BB0; VPXOR CC1, BB1, BB1; VPXOR CC2, BB2, BB2; VPXOR CC3, BB3, BB3
+ VMOVDQA CC3, tmpStoreAVX2
+ VPSLLD $12, BB0, CC3; VPSRLD $20, BB0, BB0; VPXOR CC3, BB0, BB0
+ VPSLLD $12, BB1, CC3; VPSRLD $20, BB1, BB1; VPXOR CC3, BB1, BB1
+ VPSLLD $12, BB2, CC3; VPSRLD $20, BB2, BB2; VPXOR CC3, BB2, BB2
+ VPSLLD $12, BB3, CC3; VPSRLD $20, BB3, BB3; VPXOR CC3, BB3, BB3
+ VMOVDQA tmpStoreAVX2, CC3
+ polyAdd(0*8(oup))
+ polyMulAVX2
+ VPADDD BB0, AA0, AA0; VPADDD BB1, AA1, AA1; VPADDD BB2, AA2, AA2; VPADDD BB3, AA3, AA3
+ VPXOR AA0, DD0, DD0; VPXOR AA1, DD1, DD1; VPXOR AA2, DD2, DD2; VPXOR AA3, DD3, DD3
+ VPSHUFB ·rol8<>(SB), DD0, DD0; VPSHUFB ·rol8<>(SB), DD1, DD1; VPSHUFB ·rol8<>(SB), DD2, DD2; VPSHUFB ·rol8<>(SB), DD3, DD3
+ VPADDD DD0, CC0, CC0; VPADDD DD1, CC1, CC1; VPADDD DD2, CC2, CC2; VPADDD DD3, CC3, CC3
+ VPXOR CC0, BB0, BB0; VPXOR CC1, BB1, BB1; VPXOR CC2, BB2, BB2; VPXOR CC3, BB3, BB3
+ VMOVDQA CC3, tmpStoreAVX2
+ VPSLLD $7, BB0, CC3; VPSRLD $25, BB0, BB0; VPXOR CC3, BB0, BB0
+ VPSLLD $7, BB1, CC3; VPSRLD $25, BB1, BB1; VPXOR CC3, BB1, BB1
+ VPSLLD $7, BB2, CC3; VPSRLD $25, BB2, BB2; VPXOR CC3, BB2, BB2
+ VPSLLD $7, BB3, CC3; VPSRLD $25, BB3, BB3; VPXOR CC3, BB3, BB3
+ VMOVDQA tmpStoreAVX2, CC3
+ VPALIGNR $4, BB0, BB0, BB0; VPALIGNR $4, BB1, BB1, BB1; VPALIGNR $4, BB2, BB2, BB2; VPALIGNR $4, BB3, BB3, BB3
+ VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $8, CC2, CC2, CC2; VPALIGNR $8, CC3, CC3, CC3
+ VPALIGNR $12, DD0, DD0, DD0; VPALIGNR $12, DD1, DD1, DD1; VPALIGNR $12, DD2, DD2, DD2; VPALIGNR $12, DD3, DD3, DD3
+ VPADDD BB0, AA0, AA0; VPADDD BB1, AA1, AA1; VPADDD BB2, AA2, AA2; VPADDD BB3, AA3, AA3
+ VPXOR AA0, DD0, DD0; VPXOR AA1, DD1, DD1; VPXOR AA2, DD2, DD2; VPXOR AA3, DD3, DD3
+ VPSHUFB ·rol16<>(SB), DD0, DD0; VPSHUFB ·rol16<>(SB), DD1, DD1; VPSHUFB ·rol16<>(SB), DD2, DD2; VPSHUFB ·rol16<>(SB), DD3, DD3
+ VPADDD DD0, CC0, CC0; VPADDD DD1, CC1, CC1; VPADDD DD2, CC2, CC2; VPADDD DD3, CC3, CC3
+ VPXOR CC0, BB0, BB0; VPXOR CC1, BB1, BB1; VPXOR CC2, BB2, BB2; VPXOR CC3, BB3, BB3
+ polyAdd(2*8(oup))
+ polyMulAVX2
+ LEAQ (4*8)(oup), oup
+ VMOVDQA CC3, tmpStoreAVX2
+ VPSLLD $12, BB0, CC3; VPSRLD $20, BB0, BB0; VPXOR CC3, BB0, BB0
+ VPSLLD $12, BB1, CC3; VPSRLD $20, BB1, BB1; VPXOR CC3, BB1, BB1
+ VPSLLD $12, BB2, CC3; VPSRLD $20, BB2, BB2; VPXOR CC3, BB2, BB2
+ VPSLLD $12, BB3, CC3; VPSRLD $20, BB3, BB3; VPXOR CC3, BB3, BB3
+ VMOVDQA tmpStoreAVX2, CC3
+ VPADDD BB0, AA0, AA0; VPADDD BB1, AA1, AA1; VPADDD BB2, AA2, AA2; VPADDD BB3, AA3, AA3
+ VPXOR AA0, DD0, DD0; VPXOR AA1, DD1, DD1; VPXOR AA2, DD2, DD2; VPXOR AA3, DD3, DD3
+ VPSHUFB ·rol8<>(SB), DD0, DD0; VPSHUFB ·rol8<>(SB), DD1, DD1; VPSHUFB ·rol8<>(SB), DD2, DD2; VPSHUFB ·rol8<>(SB), DD3, DD3
+ VPADDD DD0, CC0, CC0; VPADDD DD1, CC1, CC1; VPADDD DD2, CC2, CC2; VPADDD DD3, CC3, CC3
+ VPXOR CC0, BB0, BB0; VPXOR CC1, BB1, BB1; VPXOR CC2, BB2, BB2; VPXOR CC3, BB3, BB3
+ VMOVDQA CC3, tmpStoreAVX2
+ VPSLLD $7, BB0, CC3; VPSRLD $25, BB0, BB0; VPXOR CC3, BB0, BB0
+ VPSLLD $7, BB1, CC3; VPSRLD $25, BB1, BB1; VPXOR CC3, BB1, BB1
+ VPSLLD $7, BB2, CC3; VPSRLD $25, BB2, BB2; VPXOR CC3, BB2, BB2
+ VPSLLD $7, BB3, CC3; VPSRLD $25, BB3, BB3; VPXOR CC3, BB3, BB3
+ VMOVDQA tmpStoreAVX2, CC3
+ VPALIGNR $12, BB0, BB0, BB0; VPALIGNR $12, BB1, BB1, BB1; VPALIGNR $12, BB2, BB2, BB2; VPALIGNR $12, BB3, BB3, BB3
+ VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $8, CC2, CC2, CC2; VPALIGNR $8, CC3, CC3, CC3
+ VPALIGNR $4, DD0, DD0, DD0; VPALIGNR $4, DD1, DD1, DD1; VPALIGNR $4, DD2, DD2, DD2; VPALIGNR $4, DD3, DD3, DD3
+
+ DECQ itr1
+ JG sealAVX2Tail512LoopA
+ DECQ itr2
+ JGE sealAVX2Tail512LoopB
+
+ VPADDD ·chacha20Constants<>(SB), AA0, AA0; VPADDD ·chacha20Constants<>(SB), AA1, AA1; VPADDD ·chacha20Constants<>(SB), AA2, AA2; VPADDD ·chacha20Constants<>(SB), AA3, AA3
+ VPADDD state1StoreAVX2, BB0, BB0; VPADDD state1StoreAVX2, BB1, BB1; VPADDD state1StoreAVX2, BB2, BB2; VPADDD state1StoreAVX2, BB3, BB3
+ VPADDD state2StoreAVX2, CC0, CC0; VPADDD state2StoreAVX2, CC1, CC1; VPADDD state2StoreAVX2, CC2, CC2; VPADDD state2StoreAVX2, CC3, CC3
+ VPADDD ctr0StoreAVX2, DD0, DD0; VPADDD ctr1StoreAVX2, DD1, DD1; VPADDD ctr2StoreAVX2, DD2, DD2; VPADDD ctr3StoreAVX2, DD3, DD3
+ VMOVDQA CC3, tmpStoreAVX2
+ VPERM2I128 $0x02, AA0, BB0, CC3
+ VPXOR (0*32)(inp), CC3, CC3
+ VMOVDQU CC3, (0*32)(oup)
+ VPERM2I128 $0x02, CC0, DD0, CC3
+ VPXOR (1*32)(inp), CC3, CC3
+ VMOVDQU CC3, (1*32)(oup)
+ VPERM2I128 $0x13, AA0, BB0, CC3
+ VPXOR (2*32)(inp), CC3, CC3
+ VMOVDQU CC3, (2*32)(oup)
+ VPERM2I128 $0x13, CC0, DD0, CC3
+ VPXOR (3*32)(inp), CC3, CC3
+ VMOVDQU CC3, (3*32)(oup)
+
+ VPERM2I128 $0x02, AA1, BB1, AA0
+ VPERM2I128 $0x02, CC1, DD1, BB0
+ VPERM2I128 $0x13, AA1, BB1, CC0
+ VPERM2I128 $0x13, CC1, DD1, DD0
+ VPXOR (4*32)(inp), AA0, AA0; VPXOR (5*32)(inp), BB0, BB0; VPXOR (6*32)(inp), CC0, CC0; VPXOR (7*32)(inp), DD0, DD0
+ VMOVDQU AA0, (4*32)(oup); VMOVDQU BB0, (5*32)(oup); VMOVDQU CC0, (6*32)(oup); VMOVDQU DD0, (7*32)(oup)
+
+ VPERM2I128 $0x02, AA2, BB2, AA0
+ VPERM2I128 $0x02, CC2, DD2, BB0
+ VPERM2I128 $0x13, AA2, BB2, CC0
+ VPERM2I128 $0x13, CC2, DD2, DD0
+ VPXOR (8*32)(inp), AA0, AA0; VPXOR (9*32)(inp), BB0, BB0; VPXOR (10*32)(inp), CC0, CC0; VPXOR (11*32)(inp), DD0, DD0
+ VMOVDQU AA0, (8*32)(oup); VMOVDQU BB0, (9*32)(oup); VMOVDQU CC0, (10*32)(oup); VMOVDQU DD0, (11*32)(oup)
+
+ MOVQ $384, itr1
+ LEAQ 384(inp), inp
+ SUBQ $384, inl
+ VPERM2I128 $0x02, AA3, BB3, AA0
+ VPERM2I128 $0x02, tmpStoreAVX2, DD3, BB0
+ VPERM2I128 $0x13, AA3, BB3, CC0
+ VPERM2I128 $0x13, tmpStoreAVX2, DD3, DD0
+
+ JMP sealAVX2SealHash
diff --git a/src/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_generic.go b/src/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_generic.go
new file mode 100644
index 0000000..6313898
--- /dev/null
+++ b/src/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_generic.go
@@ -0,0 +1,81 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package chacha20poly1305
+
+import (
+ "encoding/binary"
+
+ "golang.org/x/crypto/chacha20"
+ "golang.org/x/crypto/internal/alias"
+ "golang.org/x/crypto/internal/poly1305"
+)
+
+func writeWithPadding(p *poly1305.MAC, b []byte) {
+ p.Write(b)
+ if rem := len(b) % 16; rem != 0 {
+ var buf [16]byte
+ padLen := 16 - rem
+ p.Write(buf[:padLen])
+ }
+}
+
+func writeUint64(p *poly1305.MAC, n int) {
+ var buf [8]byte
+ binary.LittleEndian.PutUint64(buf[:], uint64(n))
+ p.Write(buf[:])
+}
+
+func (c *chacha20poly1305) sealGeneric(dst, nonce, plaintext, additionalData []byte) []byte {
+ ret, out := sliceForAppend(dst, len(plaintext)+poly1305.TagSize)
+ ciphertext, tag := out[:len(plaintext)], out[len(plaintext):]
+ if alias.InexactOverlap(out, plaintext) {
+ panic("chacha20poly1305: invalid buffer overlap")
+ }
+
+ var polyKey [32]byte
+ s, _ := chacha20.NewUnauthenticatedCipher(c.key[:], nonce)
+ s.XORKeyStream(polyKey[:], polyKey[:])
+ s.SetCounter(1) // set the counter to 1, skipping 32 bytes
+ s.XORKeyStream(ciphertext, plaintext)
+
+ p := poly1305.New(&polyKey)
+ writeWithPadding(p, additionalData)
+ writeWithPadding(p, ciphertext)
+ writeUint64(p, len(additionalData))
+ writeUint64(p, len(plaintext))
+ p.Sum(tag[:0])
+
+ return ret
+}
+
+func (c *chacha20poly1305) openGeneric(dst, nonce, ciphertext, additionalData []byte) ([]byte, error) {
+ tag := ciphertext[len(ciphertext)-16:]
+ ciphertext = ciphertext[:len(ciphertext)-16]
+
+ var polyKey [32]byte
+ s, _ := chacha20.NewUnauthenticatedCipher(c.key[:], nonce)
+ s.XORKeyStream(polyKey[:], polyKey[:])
+ s.SetCounter(1) // set the counter to 1, skipping 32 bytes
+
+ p := poly1305.New(&polyKey)
+ writeWithPadding(p, additionalData)
+ writeWithPadding(p, ciphertext)
+ writeUint64(p, len(additionalData))
+ writeUint64(p, len(ciphertext))
+
+ ret, out := sliceForAppend(dst, len(ciphertext))
+ if alias.InexactOverlap(out, ciphertext) {
+ panic("chacha20poly1305: invalid buffer overlap")
+ }
+ if !p.Verify(tag) {
+ for i := range out {
+ out[i] = 0
+ }
+ return nil, errOpen
+ }
+
+ s.XORKeyStream(out, ciphertext)
+ return ret, nil
+}
diff --git a/src/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_noasm.go b/src/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_noasm.go
new file mode 100644
index 0000000..34e6ab1
--- /dev/null
+++ b/src/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_noasm.go
@@ -0,0 +1,15 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !amd64 || !gc || purego
+
+package chacha20poly1305
+
+func (c *chacha20poly1305) seal(dst, nonce, plaintext, additionalData []byte) []byte {
+ return c.sealGeneric(dst, nonce, plaintext, additionalData)
+}
+
+func (c *chacha20poly1305) open(dst, nonce, ciphertext, additionalData []byte) ([]byte, error) {
+ return c.openGeneric(dst, nonce, ciphertext, additionalData)
+}
diff --git a/src/vendor/golang.org/x/crypto/chacha20poly1305/xchacha20poly1305.go b/src/vendor/golang.org/x/crypto/chacha20poly1305/xchacha20poly1305.go
new file mode 100644
index 0000000..1cebfe9
--- /dev/null
+++ b/src/vendor/golang.org/x/crypto/chacha20poly1305/xchacha20poly1305.go
@@ -0,0 +1,86 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package chacha20poly1305
+
+import (
+ "crypto/cipher"
+ "errors"
+
+ "golang.org/x/crypto/chacha20"
+)
+
+type xchacha20poly1305 struct {
+ key [KeySize]byte
+}
+
+// NewX returns a XChaCha20-Poly1305 AEAD that uses the given 256-bit key.
+//
+// XChaCha20-Poly1305 is a ChaCha20-Poly1305 variant that takes a longer nonce,
+// suitable to be generated randomly without risk of collisions. It should be
+// preferred when nonce uniqueness cannot be trivially ensured, or whenever
+// nonces are randomly generated.
+func NewX(key []byte) (cipher.AEAD, error) {
+ if len(key) != KeySize {
+ return nil, errors.New("chacha20poly1305: bad key length")
+ }
+ ret := new(xchacha20poly1305)
+ copy(ret.key[:], key)
+ return ret, nil
+}
+
+func (*xchacha20poly1305) NonceSize() int {
+ return NonceSizeX
+}
+
+func (*xchacha20poly1305) Overhead() int {
+ return Overhead
+}
+
+func (x *xchacha20poly1305) Seal(dst, nonce, plaintext, additionalData []byte) []byte {
+ if len(nonce) != NonceSizeX {
+ panic("chacha20poly1305: bad nonce length passed to Seal")
+ }
+
+ // XChaCha20-Poly1305 technically supports a 64-bit counter, so there is no
+ // size limit. However, since we reuse the ChaCha20-Poly1305 implementation,
+ // the second half of the counter is not available. This is unlikely to be
+ // an issue because the cipher.AEAD API requires the entire message to be in
+ // memory, and the counter overflows at 256 GB.
+ if uint64(len(plaintext)) > (1<<38)-64 {
+ panic("chacha20poly1305: plaintext too large")
+ }
+
+ c := new(chacha20poly1305)
+ hKey, _ := chacha20.HChaCha20(x.key[:], nonce[0:16])
+ copy(c.key[:], hKey)
+
+ // The first 4 bytes of the final nonce are unused counter space.
+ cNonce := make([]byte, NonceSize)
+ copy(cNonce[4:12], nonce[16:24])
+
+ return c.seal(dst, cNonce[:], plaintext, additionalData)
+}
+
+func (x *xchacha20poly1305) Open(dst, nonce, ciphertext, additionalData []byte) ([]byte, error) {
+ if len(nonce) != NonceSizeX {
+ panic("chacha20poly1305: bad nonce length passed to Open")
+ }
+ if len(ciphertext) < 16 {
+ return nil, errOpen
+ }
+ if uint64(len(ciphertext)) > (1<<38)-48 {
+ panic("chacha20poly1305: ciphertext too large")
+ }
+
+ c := new(chacha20poly1305)
+ hKey, _ := chacha20.HChaCha20(x.key[:], nonce[0:16])
+ copy(c.key[:], hKey)
+
+ // The first 4 bytes of the final nonce are unused counter space.
+ cNonce := make([]byte, NonceSize)
+ copy(cNonce[4:12], nonce[16:24])
+
+ return c.open(dst, cNonce[:], ciphertext, additionalData)
+}
diff --git a/src/vendor/golang.org/x/crypto/cryptobyte/asn1.go b/src/vendor/golang.org/x/crypto/cryptobyte/asn1.go
new file mode 100644
index 0000000..2492f79
--- /dev/null
+++ b/src/vendor/golang.org/x/crypto/cryptobyte/asn1.go
@@ -0,0 +1,825 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package cryptobyte
+
+import (
+ encoding_asn1 "encoding/asn1"
+ "fmt"
+ "math/big"
+ "reflect"
+ "time"
+
+ "golang.org/x/crypto/cryptobyte/asn1"
+)
+
+// This file contains ASN.1-related methods for String and Builder.
+
+// Builder
+
+// AddASN1Int64 appends a DER-encoded ASN.1 INTEGER.
+func (b *Builder) AddASN1Int64(v int64) {
+ b.addASN1Signed(asn1.INTEGER, v)
+}
+
+// AddASN1Int64WithTag appends a DER-encoded ASN.1 INTEGER with the
+// given tag.
+func (b *Builder) AddASN1Int64WithTag(v int64, tag asn1.Tag) {
+ b.addASN1Signed(tag, v)
+}
+
+// AddASN1Enum appends a DER-encoded ASN.1 ENUMERATION.
+func (b *Builder) AddASN1Enum(v int64) {
+ b.addASN1Signed(asn1.ENUM, v)
+}
+
+func (b *Builder) addASN1Signed(tag asn1.Tag, v int64) {
+ b.AddASN1(tag, func(c *Builder) {
+ length := 1
+ for i := v; i >= 0x80 || i < -0x80; i >>= 8 {
+ length++
+ }
+
+ for ; length > 0; length-- {
+ i := v >> uint((length-1)*8) & 0xff
+ c.AddUint8(uint8(i))
+ }
+ })
+}
+
+// AddASN1Uint64 appends a DER-encoded ASN.1 INTEGER.
+func (b *Builder) AddASN1Uint64(v uint64) {
+ b.AddASN1(asn1.INTEGER, func(c *Builder) {
+ length := 1
+ for i := v; i >= 0x80; i >>= 8 {
+ length++
+ }
+
+ for ; length > 0; length-- {
+ i := v >> uint((length-1)*8) & 0xff
+ c.AddUint8(uint8(i))
+ }
+ })
+}
+
+// AddASN1BigInt appends a DER-encoded ASN.1 INTEGER.
+func (b *Builder) AddASN1BigInt(n *big.Int) {
+ if b.err != nil {
+ return
+ }
+
+ b.AddASN1(asn1.INTEGER, func(c *Builder) {
+ if n.Sign() < 0 {
+ // A negative number has to be converted to two's-complement form. So we
+ // invert and subtract 1. If the most-significant-bit isn't set then
+ // we'll need to pad the beginning with 0xff in order to keep the number
+ // negative.
+ nMinus1 := new(big.Int).Neg(n)
+ nMinus1.Sub(nMinus1, bigOne)
+ bytes := nMinus1.Bytes()
+ for i := range bytes {
+ bytes[i] ^= 0xff
+ }
+ if len(bytes) == 0 || bytes[0]&0x80 == 0 {
+ c.add(0xff)
+ }
+ c.add(bytes...)
+ } else if n.Sign() == 0 {
+ c.add(0)
+ } else {
+ bytes := n.Bytes()
+ if bytes[0]&0x80 != 0 {
+ c.add(0)
+ }
+ c.add(bytes...)
+ }
+ })
+}
+
+// AddASN1OctetString appends a DER-encoded ASN.1 OCTET STRING.
+func (b *Builder) AddASN1OctetString(bytes []byte) {
+ b.AddASN1(asn1.OCTET_STRING, func(c *Builder) {
+ c.AddBytes(bytes)
+ })
+}
+
+const generalizedTimeFormatStr = "20060102150405Z0700"
+
+// AddASN1GeneralizedTime appends a DER-encoded ASN.1 GENERALIZEDTIME.
+func (b *Builder) AddASN1GeneralizedTime(t time.Time) {
+ if t.Year() < 0 || t.Year() > 9999 {
+ b.err = fmt.Errorf("cryptobyte: cannot represent %v as a GeneralizedTime", t)
+ return
+ }
+ b.AddASN1(asn1.GeneralizedTime, func(c *Builder) {
+ c.AddBytes([]byte(t.Format(generalizedTimeFormatStr)))
+ })
+}
+
+// AddASN1UTCTime appends a DER-encoded ASN.1 UTCTime.
+func (b *Builder) AddASN1UTCTime(t time.Time) {
+ b.AddASN1(asn1.UTCTime, func(c *Builder) {
+ // As utilized by the X.509 profile, UTCTime can only
+ // represent the years 1950 through 2049.
+ if t.Year() < 1950 || t.Year() >= 2050 {
+ b.err = fmt.Errorf("cryptobyte: cannot represent %v as a UTCTime", t)
+ return
+ }
+ c.AddBytes([]byte(t.Format(defaultUTCTimeFormatStr)))
+ })
+}
+
+// AddASN1BitString appends a DER-encoded ASN.1 BIT STRING. This does not
+// support BIT STRINGs that are not a whole number of bytes.
+func (b *Builder) AddASN1BitString(data []byte) {
+ b.AddASN1(asn1.BIT_STRING, func(b *Builder) {
+ b.AddUint8(0)
+ b.AddBytes(data)
+ })
+}
+
+func (b *Builder) addBase128Int(n int64) {
+ var length int
+ if n == 0 {
+ length = 1
+ } else {
+ for i := n; i > 0; i >>= 7 {
+ length++
+ }
+ }
+
+ for i := length - 1; i >= 0; i-- {
+ o := byte(n >> uint(i*7))
+ o &= 0x7f
+ if i != 0 {
+ o |= 0x80
+ }
+
+ b.add(o)
+ }
+}
+
+func isValidOID(oid encoding_asn1.ObjectIdentifier) bool {
+ if len(oid) < 2 {
+ return false
+ }
+
+ if oid[0] > 2 || (oid[0] <= 1 && oid[1] >= 40) {
+ return false
+ }
+
+ for _, v := range oid {
+ if v < 0 {
+ return false
+ }
+ }
+
+ return true
+}
+
+func (b *Builder) AddASN1ObjectIdentifier(oid encoding_asn1.ObjectIdentifier) {
+ b.AddASN1(asn1.OBJECT_IDENTIFIER, func(b *Builder) {
+ if !isValidOID(oid) {
+ b.err = fmt.Errorf("cryptobyte: invalid OID: %v", oid)
+ return
+ }
+
+ b.addBase128Int(int64(oid[0])*40 + int64(oid[1]))
+ for _, v := range oid[2:] {
+ b.addBase128Int(int64(v))
+ }
+ })
+}
+
+func (b *Builder) AddASN1Boolean(v bool) {
+ b.AddASN1(asn1.BOOLEAN, func(b *Builder) {
+ if v {
+ b.AddUint8(0xff)
+ } else {
+ b.AddUint8(0)
+ }
+ })
+}
+
+func (b *Builder) AddASN1NULL() {
+ b.add(uint8(asn1.NULL), 0)
+}
+
+// MarshalASN1 calls encoding_asn1.Marshal on its input and appends the result if
+// successful or records an error if one occurred.
+func (b *Builder) MarshalASN1(v interface{}) {
+ // NOTE(martinkr): This is somewhat of a hack to allow propagation of
+ // encoding_asn1.Marshal errors into Builder.err. N.B. if you call MarshalASN1 with a
+ // value embedded into a struct, its tag information is lost.
+ if b.err != nil {
+ return
+ }
+ bytes, err := encoding_asn1.Marshal(v)
+ if err != nil {
+ b.err = err
+ return
+ }
+ b.AddBytes(bytes)
+}
+
+// AddASN1 appends an ASN.1 object. The object is prefixed with the given tag.
+// Tags greater than 30 are not supported and result in an error (i.e.
+// low-tag-number form only). The child builder passed to the
+// BuilderContinuation can be used to build the content of the ASN.1 object.
+func (b *Builder) AddASN1(tag asn1.Tag, f BuilderContinuation) {
+ if b.err != nil {
+ return
+ }
+ // Identifiers with the low five bits set indicate high-tag-number format
+ // (two or more octets), which we don't support.
+ if tag&0x1f == 0x1f {
+ b.err = fmt.Errorf("cryptobyte: high-tag number identifier octects not supported: 0x%x", tag)
+ return
+ }
+ b.AddUint8(uint8(tag))
+ b.addLengthPrefixed(1, true, f)
+}
+
+// String
+
+// ReadASN1Boolean decodes an ASN.1 BOOLEAN and converts it to a boolean
+// representation into out and advances. It reports whether the read
+// was successful.
+func (s *String) ReadASN1Boolean(out *bool) bool {
+ var bytes String
+ if !s.ReadASN1(&bytes, asn1.BOOLEAN) || len(bytes) != 1 {
+ return false
+ }
+
+ switch bytes[0] {
+ case 0:
+ *out = false
+ case 0xff:
+ *out = true
+ default:
+ return false
+ }
+
+ return true
+}
+
+// ReadASN1Integer decodes an ASN.1 INTEGER into out and advances. If out does
+// not point to an integer, to a big.Int, or to a []byte it panics. Only
+// positive and zero values can be decoded into []byte, and they are returned as
+// big-endian binary values that share memory with s. Positive values will have
+// no leading zeroes, and zero will be returned as a single zero byte.
+// ReadASN1Integer reports whether the read was successful.
+func (s *String) ReadASN1Integer(out interface{}) bool {
+ switch out := out.(type) {
+ case *int, *int8, *int16, *int32, *int64:
+ var i int64
+ if !s.readASN1Int64(&i) || reflect.ValueOf(out).Elem().OverflowInt(i) {
+ return false
+ }
+ reflect.ValueOf(out).Elem().SetInt(i)
+ return true
+ case *uint, *uint8, *uint16, *uint32, *uint64:
+ var u uint64
+ if !s.readASN1Uint64(&u) || reflect.ValueOf(out).Elem().OverflowUint(u) {
+ return false
+ }
+ reflect.ValueOf(out).Elem().SetUint(u)
+ return true
+ case *big.Int:
+ return s.readASN1BigInt(out)
+ case *[]byte:
+ return s.readASN1Bytes(out)
+ default:
+ panic("out does not point to an integer type")
+ }
+}
+
+func checkASN1Integer(bytes []byte) bool {
+ if len(bytes) == 0 {
+ // An INTEGER is encoded with at least one octet.
+ return false
+ }
+ if len(bytes) == 1 {
+ return true
+ }
+ if bytes[0] == 0 && bytes[1]&0x80 == 0 || bytes[0] == 0xff && bytes[1]&0x80 == 0x80 {
+ // Value is not minimally encoded.
+ return false
+ }
+ return true
+}
+
+var bigOne = big.NewInt(1)
+
+func (s *String) readASN1BigInt(out *big.Int) bool {
+ var bytes String
+ if !s.ReadASN1(&bytes, asn1.INTEGER) || !checkASN1Integer(bytes) {
+ return false
+ }
+ if bytes[0]&0x80 == 0x80 {
+ // Negative number.
+ neg := make([]byte, len(bytes))
+ for i, b := range bytes {
+ neg[i] = ^b
+ }
+ out.SetBytes(neg)
+ out.Add(out, bigOne)
+ out.Neg(out)
+ } else {
+ out.SetBytes(bytes)
+ }
+ return true
+}
+
+func (s *String) readASN1Bytes(out *[]byte) bool {
+ var bytes String
+ if !s.ReadASN1(&bytes, asn1.INTEGER) || !checkASN1Integer(bytes) {
+ return false
+ }
+ if bytes[0]&0x80 == 0x80 {
+ return false
+ }
+ for len(bytes) > 1 && bytes[0] == 0 {
+ bytes = bytes[1:]
+ }
+ *out = bytes
+ return true
+}
+
+func (s *String) readASN1Int64(out *int64) bool {
+ var bytes String
+ if !s.ReadASN1(&bytes, asn1.INTEGER) || !checkASN1Integer(bytes) || !asn1Signed(out, bytes) {
+ return false
+ }
+ return true
+}
+
+func asn1Signed(out *int64, n []byte) bool {
+ length := len(n)
+ if length > 8 {
+ return false
+ }
+ for i := 0; i < length; i++ {
+ *out <<= 8
+ *out |= int64(n[i])
+ }
+ // Shift up and down in order to sign extend the result.
+ *out <<= 64 - uint8(length)*8
+ *out >>= 64 - uint8(length)*8
+ return true
+}
+
+func (s *String) readASN1Uint64(out *uint64) bool {
+ var bytes String
+ if !s.ReadASN1(&bytes, asn1.INTEGER) || !checkASN1Integer(bytes) || !asn1Unsigned(out, bytes) {
+ return false
+ }
+ return true
+}
+
+func asn1Unsigned(out *uint64, n []byte) bool {
+ length := len(n)
+ if length > 9 || length == 9 && n[0] != 0 {
+ // Too large for uint64.
+ return false
+ }
+ if n[0]&0x80 != 0 {
+ // Negative number.
+ return false
+ }
+ for i := 0; i < length; i++ {
+ *out <<= 8
+ *out |= uint64(n[i])
+ }
+ return true
+}
+
+// ReadASN1Int64WithTag decodes an ASN.1 INTEGER with the given tag into out
+// and advances. It reports whether the read was successful and resulted in a
+// value that can be represented in an int64.
+func (s *String) ReadASN1Int64WithTag(out *int64, tag asn1.Tag) bool {
+ var bytes String
+ return s.ReadASN1(&bytes, tag) && checkASN1Integer(bytes) && asn1Signed(out, bytes)
+}
+
+// ReadASN1Enum decodes an ASN.1 ENUMERATION into out and advances. It reports
+// whether the read was successful.
+func (s *String) ReadASN1Enum(out *int) bool {
+ var bytes String
+ var i int64
+ if !s.ReadASN1(&bytes, asn1.ENUM) || !checkASN1Integer(bytes) || !asn1Signed(&i, bytes) {
+ return false
+ }
+ if int64(int(i)) != i {
+ return false
+ }
+ *out = int(i)
+ return true
+}
+
+func (s *String) readBase128Int(out *int) bool {
+ ret := 0
+ for i := 0; len(*s) > 0; i++ {
+ if i == 5 {
+ return false
+ }
+ // Avoid overflowing int on a 32-bit platform.
+ // We don't want different behavior based on the architecture.
+ if ret >= 1<<(31-7) {
+ return false
+ }
+ ret <<= 7
+ b := s.read(1)[0]
+
+ // ITU-T X.690, section 8.19.2:
+ // The subidentifier shall be encoded in the fewest possible octets,
+ // that is, the leading octet of the subidentifier shall not have the value 0x80.
+ if i == 0 && b == 0x80 {
+ return false
+ }
+
+ ret |= int(b & 0x7f)
+ if b&0x80 == 0 {
+ *out = ret
+ return true
+ }
+ }
+ return false // truncated
+}
+
+// ReadASN1ObjectIdentifier decodes an ASN.1 OBJECT IDENTIFIER into out and
+// advances. It reports whether the read was successful.
+func (s *String) ReadASN1ObjectIdentifier(out *encoding_asn1.ObjectIdentifier) bool {
+ var bytes String
+ if !s.ReadASN1(&bytes, asn1.OBJECT_IDENTIFIER) || len(bytes) == 0 {
+ return false
+ }
+
+ // In the worst case, we get two elements from the first byte (which is
+ // encoded differently) and then every varint is a single byte long.
+ components := make([]int, len(bytes)+1)
+
+ // The first varint is 40*value1 + value2:
+ // According to this packing, value1 can take the values 0, 1 and 2 only.
+ // When value1 = 0 or value1 = 1, then value2 is <= 39. When value1 = 2,
+ // then there are no restrictions on value2.
+ var v int
+ if !bytes.readBase128Int(&v) {
+ return false
+ }
+ if v < 80 {
+ components[0] = v / 40
+ components[1] = v % 40
+ } else {
+ components[0] = 2
+ components[1] = v - 80
+ }
+
+ i := 2
+ for ; len(bytes) > 0; i++ {
+ if !bytes.readBase128Int(&v) {
+ return false
+ }
+ components[i] = v
+ }
+ *out = components[:i]
+ return true
+}
+
+// ReadASN1GeneralizedTime decodes an ASN.1 GENERALIZEDTIME into out and
+// advances. It reports whether the read was successful.
+func (s *String) ReadASN1GeneralizedTime(out *time.Time) bool {
+ var bytes String
+ if !s.ReadASN1(&bytes, asn1.GeneralizedTime) {
+ return false
+ }
+ t := string(bytes)
+ res, err := time.Parse(generalizedTimeFormatStr, t)
+ if err != nil {
+ return false
+ }
+ if serialized := res.Format(generalizedTimeFormatStr); serialized != t {
+ return false
+ }
+ *out = res
+ return true
+}
+
+const defaultUTCTimeFormatStr = "060102150405Z0700"
+
+// ReadASN1UTCTime decodes an ASN.1 UTCTime into out and advances.
+// It reports whether the read was successful.
+func (s *String) ReadASN1UTCTime(out *time.Time) bool {
+ var bytes String
+ if !s.ReadASN1(&bytes, asn1.UTCTime) {
+ return false
+ }
+ t := string(bytes)
+
+ formatStr := defaultUTCTimeFormatStr
+ var err error
+ res, err := time.Parse(formatStr, t)
+ if err != nil {
+ // Fallback to minute precision if we can't parse second
+ // precision. If we are following X.509 or X.690 we shouldn't
+ // support this, but we do.
+ formatStr = "0601021504Z0700"
+ res, err = time.Parse(formatStr, t)
+ }
+ if err != nil {
+ return false
+ }
+
+ if serialized := res.Format(formatStr); serialized != t {
+ return false
+ }
+
+ if res.Year() >= 2050 {
+ // UTCTime interprets the low order digits 50-99 as 1950-99.
+ // This only applies to its use in the X.509 profile.
+ // See https://tools.ietf.org/html/rfc5280#section-4.1.2.5.1
+ res = res.AddDate(-100, 0, 0)
+ }
+ *out = res
+ return true
+}
+
+// ReadASN1BitString decodes an ASN.1 BIT STRING into out and advances.
+// It reports whether the read was successful.
+func (s *String) ReadASN1BitString(out *encoding_asn1.BitString) bool {
+ var bytes String
+ if !s.ReadASN1(&bytes, asn1.BIT_STRING) || len(bytes) == 0 ||
+ len(bytes)*8/8 != len(bytes) {
+ return false
+ }
+
+ paddingBits := bytes[0]
+ bytes = bytes[1:]
+ if paddingBits > 7 ||
+ len(bytes) == 0 && paddingBits != 0 ||
+ len(bytes) > 0 && bytes[len(bytes)-1]&(1<<paddingBits-1) != 0 {
+ return false
+ }
+
+ out.BitLength = len(bytes)*8 - int(paddingBits)
+ out.Bytes = bytes
+ return true
+}
+
+// ReadASN1BitStringAsBytes decodes an ASN.1 BIT STRING into out and advances. It is
+// an error if the BIT STRING is not a whole number of bytes. It reports
+// whether the read was successful.
+func (s *String) ReadASN1BitStringAsBytes(out *[]byte) bool {
+ var bytes String
+ if !s.ReadASN1(&bytes, asn1.BIT_STRING) || len(bytes) == 0 {
+ return false
+ }
+
+ paddingBits := bytes[0]
+ if paddingBits != 0 {
+ return false
+ }
+ *out = bytes[1:]
+ return true
+}
+
+// ReadASN1Bytes reads the contents of a DER-encoded ASN.1 element (not including
+// tag and length bytes) into out, and advances. The element must match the
+// given tag. It reports whether the read was successful.
+func (s *String) ReadASN1Bytes(out *[]byte, tag asn1.Tag) bool {
+ return s.ReadASN1((*String)(out), tag)
+}
+
+// ReadASN1 reads the contents of a DER-encoded ASN.1 element (not including
+// tag and length bytes) into out, and advances. The element must match the
+// given tag. It reports whether the read was successful.
+//
+// Tags greater than 30 are not supported (i.e. low-tag-number format only).
+func (s *String) ReadASN1(out *String, tag asn1.Tag) bool {
+ var t asn1.Tag
+ if !s.ReadAnyASN1(out, &t) || t != tag {
+ return false
+ }
+ return true
+}
+
+// ReadASN1Element reads the contents of a DER-encoded ASN.1 element (including
+// tag and length bytes) into out, and advances. The element must match the
+// given tag. It reports whether the read was successful.
+//
+// Tags greater than 30 are not supported (i.e. low-tag-number format only).
+func (s *String) ReadASN1Element(out *String, tag asn1.Tag) bool {
+ var t asn1.Tag
+ if !s.ReadAnyASN1Element(out, &t) || t != tag {
+ return false
+ }
+ return true
+}
+
+// ReadAnyASN1 reads the contents of a DER-encoded ASN.1 element (not including
+// tag and length bytes) into out, sets outTag to its tag, and advances.
+// It reports whether the read was successful.
+//
+// Tags greater than 30 are not supported (i.e. low-tag-number format only).
+func (s *String) ReadAnyASN1(out *String, outTag *asn1.Tag) bool {
+ return s.readASN1(out, outTag, true /* skip header */)
+}
+
+// ReadAnyASN1Element reads the contents of a DER-encoded ASN.1 element
+// (including tag and length bytes) into out, sets outTag to is tag, and
+// advances. It reports whether the read was successful.
+//
+// Tags greater than 30 are not supported (i.e. low-tag-number format only).
+func (s *String) ReadAnyASN1Element(out *String, outTag *asn1.Tag) bool {
+ return s.readASN1(out, outTag, false /* include header */)
+}
+
+// PeekASN1Tag reports whether the next ASN.1 value on the string starts with
+// the given tag.
+func (s String) PeekASN1Tag(tag asn1.Tag) bool {
+ if len(s) == 0 {
+ return false
+ }
+ return asn1.Tag(s[0]) == tag
+}
+
+// SkipASN1 reads and discards an ASN.1 element with the given tag. It
+// reports whether the operation was successful.
+func (s *String) SkipASN1(tag asn1.Tag) bool {
+ var unused String
+ return s.ReadASN1(&unused, tag)
+}
+
+// ReadOptionalASN1 attempts to read the contents of a DER-encoded ASN.1
+// element (not including tag and length bytes) tagged with the given tag into
+// out. It stores whether an element with the tag was found in outPresent,
+// unless outPresent is nil. It reports whether the read was successful.
+func (s *String) ReadOptionalASN1(out *String, outPresent *bool, tag asn1.Tag) bool {
+ present := s.PeekASN1Tag(tag)
+ if outPresent != nil {
+ *outPresent = present
+ }
+ if present && !s.ReadASN1(out, tag) {
+ return false
+ }
+ return true
+}
+
+// SkipOptionalASN1 advances s over an ASN.1 element with the given tag, or
+// else leaves s unchanged. It reports whether the operation was successful.
+func (s *String) SkipOptionalASN1(tag asn1.Tag) bool {
+ if !s.PeekASN1Tag(tag) {
+ return true
+ }
+ var unused String
+ return s.ReadASN1(&unused, tag)
+}
+
+// ReadOptionalASN1Integer attempts to read an optional ASN.1 INTEGER explicitly
+// tagged with tag into out and advances. If no element with a matching tag is
+// present, it writes defaultValue into out instead. Otherwise, it behaves like
+// ReadASN1Integer.
+func (s *String) ReadOptionalASN1Integer(out interface{}, tag asn1.Tag, defaultValue interface{}) bool {
+ var present bool
+ var i String
+ if !s.ReadOptionalASN1(&i, &present, tag) {
+ return false
+ }
+ if !present {
+ switch out.(type) {
+ case *int, *int8, *int16, *int32, *int64,
+ *uint, *uint8, *uint16, *uint32, *uint64, *[]byte:
+ reflect.ValueOf(out).Elem().Set(reflect.ValueOf(defaultValue))
+ case *big.Int:
+ if defaultValue, ok := defaultValue.(*big.Int); ok {
+ out.(*big.Int).Set(defaultValue)
+ } else {
+ panic("out points to big.Int, but defaultValue does not")
+ }
+ default:
+ panic("invalid integer type")
+ }
+ return true
+ }
+ if !i.ReadASN1Integer(out) || !i.Empty() {
+ return false
+ }
+ return true
+}
+
+// ReadOptionalASN1OctetString attempts to read an optional ASN.1 OCTET STRING
+// explicitly tagged with tag into out and advances. If no element with a
+// matching tag is present, it sets "out" to nil instead. It reports
+// whether the read was successful.
+func (s *String) ReadOptionalASN1OctetString(out *[]byte, outPresent *bool, tag asn1.Tag) bool {
+ var present bool
+ var child String
+ if !s.ReadOptionalASN1(&child, &present, tag) {
+ return false
+ }
+ if outPresent != nil {
+ *outPresent = present
+ }
+ if present {
+ var oct String
+ if !child.ReadASN1(&oct, asn1.OCTET_STRING) || !child.Empty() {
+ return false
+ }
+ *out = oct
+ } else {
+ *out = nil
+ }
+ return true
+}
+
+// ReadOptionalASN1Boolean attempts to read an optional ASN.1 BOOLEAN
+// explicitly tagged with tag into out and advances. If no element with a
+// matching tag is present, it sets "out" to defaultValue instead. It reports
+// whether the read was successful.
+func (s *String) ReadOptionalASN1Boolean(out *bool, tag asn1.Tag, defaultValue bool) bool {
+ var present bool
+ var child String
+ if !s.ReadOptionalASN1(&child, &present, tag) {
+ return false
+ }
+
+ if !present {
+ *out = defaultValue
+ return true
+ }
+
+ return child.ReadASN1Boolean(out)
+}
+
+func (s *String) readASN1(out *String, outTag *asn1.Tag, skipHeader bool) bool {
+ if len(*s) < 2 {
+ return false
+ }
+ tag, lenByte := (*s)[0], (*s)[1]
+
+ if tag&0x1f == 0x1f {
+ // ITU-T X.690 section 8.1.2
+ //
+ // An identifier octet with a tag part of 0x1f indicates a high-tag-number
+ // form identifier with two or more octets. We only support tags less than
+ // 31 (i.e. low-tag-number form, single octet identifier).
+ return false
+ }
+
+ if outTag != nil {
+ *outTag = asn1.Tag(tag)
+ }
+
+ // ITU-T X.690 section 8.1.3
+ //
+ // Bit 8 of the first length byte indicates whether the length is short- or
+ // long-form.
+ var length, headerLen uint32 // length includes headerLen
+ if lenByte&0x80 == 0 {
+ // Short-form length (section 8.1.3.4), encoded in bits 1-7.
+ length = uint32(lenByte) + 2
+ headerLen = 2
+ } else {
+ // Long-form length (section 8.1.3.5). Bits 1-7 encode the number of octets
+ // used to encode the length.
+ lenLen := lenByte & 0x7f
+ var len32 uint32
+
+ if lenLen == 0 || lenLen > 4 || len(*s) < int(2+lenLen) {
+ return false
+ }
+
+ lenBytes := String((*s)[2 : 2+lenLen])
+ if !lenBytes.readUnsigned(&len32, int(lenLen)) {
+ return false
+ }
+
+ // ITU-T X.690 section 10.1 (DER length forms) requires encoding the length
+ // with the minimum number of octets.
+ if len32 < 128 {
+ // Length should have used short-form encoding.
+ return false
+ }
+ if len32>>((lenLen-1)*8) == 0 {
+ // Leading octet is 0. Length should have been at least one byte shorter.
+ return false
+ }
+
+ headerLen = 2 + uint32(lenLen)
+ if headerLen+len32 < len32 {
+ // Overflow.
+ return false
+ }
+ length = headerLen + len32
+ }
+
+ if int(length) < 0 || !s.ReadBytes((*[]byte)(out), int(length)) {
+ return false
+ }
+ if skipHeader && !out.Skip(int(headerLen)) {
+ panic("cryptobyte: internal error")
+ }
+
+ return true
+}
diff --git a/src/vendor/golang.org/x/crypto/cryptobyte/asn1/asn1.go b/src/vendor/golang.org/x/crypto/cryptobyte/asn1/asn1.go
new file mode 100644
index 0000000..cda8e3e
--- /dev/null
+++ b/src/vendor/golang.org/x/crypto/cryptobyte/asn1/asn1.go
@@ -0,0 +1,46 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package asn1 contains supporting types for parsing and building ASN.1
+// messages with the cryptobyte package.
+package asn1 // import "golang.org/x/crypto/cryptobyte/asn1"
+
+// Tag represents an ASN.1 identifier octet, consisting of a tag number
+// (indicating a type) and class (such as context-specific or constructed).
+//
+// Methods in the cryptobyte package only support the low-tag-number form, i.e.
+// a single identifier octet with bits 7-8 encoding the class and bits 1-6
+// encoding the tag number.
+type Tag uint8
+
+const (
+ classConstructed = 0x20
+ classContextSpecific = 0x80
+)
+
+// Constructed returns t with the constructed class bit set.
+func (t Tag) Constructed() Tag { return t | classConstructed }
+
+// ContextSpecific returns t with the context-specific class bit set.
+func (t Tag) ContextSpecific() Tag { return t | classContextSpecific }
+
+// The following is a list of standard tag and class combinations.
+const (
+ BOOLEAN = Tag(1)
+ INTEGER = Tag(2)
+ BIT_STRING = Tag(3)
+ OCTET_STRING = Tag(4)
+ NULL = Tag(5)
+ OBJECT_IDENTIFIER = Tag(6)
+ ENUM = Tag(10)
+ UTF8String = Tag(12)
+ SEQUENCE = Tag(16 | classConstructed)
+ SET = Tag(17 | classConstructed)
+ PrintableString = Tag(19)
+ T61String = Tag(20)
+ IA5String = Tag(22)
+ UTCTime = Tag(23)
+ GeneralizedTime = Tag(24)
+ GeneralString = Tag(27)
+)
diff --git a/src/vendor/golang.org/x/crypto/cryptobyte/builder.go b/src/vendor/golang.org/x/crypto/cryptobyte/builder.go
new file mode 100644
index 0000000..cf254f5
--- /dev/null
+++ b/src/vendor/golang.org/x/crypto/cryptobyte/builder.go
@@ -0,0 +1,350 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package cryptobyte
+
+import (
+ "errors"
+ "fmt"
+)
+
+// A Builder builds byte strings from fixed-length and length-prefixed values.
+// Builders either allocate space as needed, or are ‘fixed’, which means that
+// they write into a given buffer and produce an error if it's exhausted.
+//
+// The zero value is a usable Builder that allocates space as needed.
+//
+// Simple values are marshaled and appended to a Builder using methods on the
+// Builder. Length-prefixed values are marshaled by providing a
+// BuilderContinuation, which is a function that writes the inner contents of
+// the value to a given Builder. See the documentation for BuilderContinuation
+// for details.
+type Builder struct {
+ err error
+ result []byte
+ fixedSize bool
+ child *Builder
+ offset int
+ pendingLenLen int
+ pendingIsASN1 bool
+ inContinuation *bool
+}
+
+// NewBuilder creates a Builder that appends its output to the given buffer.
+// Like append(), the slice will be reallocated if its capacity is exceeded.
+// Use Bytes to get the final buffer.
+func NewBuilder(buffer []byte) *Builder {
+ return &Builder{
+ result: buffer,
+ }
+}
+
+// NewFixedBuilder creates a Builder that appends its output into the given
+// buffer. This builder does not reallocate the output buffer. Writes that
+// would exceed the buffer's capacity are treated as an error.
+func NewFixedBuilder(buffer []byte) *Builder {
+ return &Builder{
+ result: buffer,
+ fixedSize: true,
+ }
+}
+
+// SetError sets the value to be returned as the error from Bytes. Writes
+// performed after calling SetError are ignored.
+func (b *Builder) SetError(err error) {
+ b.err = err
+}
+
+// Bytes returns the bytes written by the builder or an error if one has
+// occurred during building.
+func (b *Builder) Bytes() ([]byte, error) {
+ if b.err != nil {
+ return nil, b.err
+ }
+ return b.result[b.offset:], nil
+}
+
+// BytesOrPanic returns the bytes written by the builder or panics if an error
+// has occurred during building.
+func (b *Builder) BytesOrPanic() []byte {
+ if b.err != nil {
+ panic(b.err)
+ }
+ return b.result[b.offset:]
+}
+
+// AddUint8 appends an 8-bit value to the byte string.
+func (b *Builder) AddUint8(v uint8) {
+ b.add(byte(v))
+}
+
+// AddUint16 appends a big-endian, 16-bit value to the byte string.
+func (b *Builder) AddUint16(v uint16) {
+ b.add(byte(v>>8), byte(v))
+}
+
+// AddUint24 appends a big-endian, 24-bit value to the byte string. The highest
+// byte of the 32-bit input value is silently truncated.
+func (b *Builder) AddUint24(v uint32) {
+ b.add(byte(v>>16), byte(v>>8), byte(v))
+}
+
+// AddUint32 appends a big-endian, 32-bit value to the byte string.
+func (b *Builder) AddUint32(v uint32) {
+ b.add(byte(v>>24), byte(v>>16), byte(v>>8), byte(v))
+}
+
+// AddUint48 appends a big-endian, 48-bit value to the byte string.
+func (b *Builder) AddUint48(v uint64) {
+ b.add(byte(v>>40), byte(v>>32), byte(v>>24), byte(v>>16), byte(v>>8), byte(v))
+}
+
+// AddUint64 appends a big-endian, 64-bit value to the byte string.
+func (b *Builder) AddUint64(v uint64) {
+ b.add(byte(v>>56), byte(v>>48), byte(v>>40), byte(v>>32), byte(v>>24), byte(v>>16), byte(v>>8), byte(v))
+}
+
+// AddBytes appends a sequence of bytes to the byte string.
+func (b *Builder) AddBytes(v []byte) {
+ b.add(v...)
+}
+
+// BuilderContinuation is a continuation-passing interface for building
+// length-prefixed byte sequences. Builder methods for length-prefixed
+// sequences (AddUint8LengthPrefixed etc) will invoke the BuilderContinuation
+// supplied to them. The child builder passed to the continuation can be used
+// to build the content of the length-prefixed sequence. For example:
+//
+// parent := cryptobyte.NewBuilder()
+// parent.AddUint8LengthPrefixed(func (child *Builder) {
+// child.AddUint8(42)
+// child.AddUint8LengthPrefixed(func (grandchild *Builder) {
+// grandchild.AddUint8(5)
+// })
+// })
+//
+// It is an error to write more bytes to the child than allowed by the reserved
+// length prefix. After the continuation returns, the child must be considered
+// invalid, i.e. users must not store any copies or references of the child
+// that outlive the continuation.
+//
+// If the continuation panics with a value of type BuildError then the inner
+// error will be returned as the error from Bytes. If the child panics
+// otherwise then Bytes will repanic with the same value.
+type BuilderContinuation func(child *Builder)
+
+// BuildError wraps an error. If a BuilderContinuation panics with this value,
+// the panic will be recovered and the inner error will be returned from
+// Builder.Bytes.
+type BuildError struct {
+ Err error
+}
+
+// AddUint8LengthPrefixed adds a 8-bit length-prefixed byte sequence.
+func (b *Builder) AddUint8LengthPrefixed(f BuilderContinuation) {
+ b.addLengthPrefixed(1, false, f)
+}
+
+// AddUint16LengthPrefixed adds a big-endian, 16-bit length-prefixed byte sequence.
+func (b *Builder) AddUint16LengthPrefixed(f BuilderContinuation) {
+ b.addLengthPrefixed(2, false, f)
+}
+
+// AddUint24LengthPrefixed adds a big-endian, 24-bit length-prefixed byte sequence.
+func (b *Builder) AddUint24LengthPrefixed(f BuilderContinuation) {
+ b.addLengthPrefixed(3, false, f)
+}
+
+// AddUint32LengthPrefixed adds a big-endian, 32-bit length-prefixed byte sequence.
+func (b *Builder) AddUint32LengthPrefixed(f BuilderContinuation) {
+ b.addLengthPrefixed(4, false, f)
+}
+
+func (b *Builder) callContinuation(f BuilderContinuation, arg *Builder) {
+ if !*b.inContinuation {
+ *b.inContinuation = true
+
+ defer func() {
+ *b.inContinuation = false
+
+ r := recover()
+ if r == nil {
+ return
+ }
+
+ if buildError, ok := r.(BuildError); ok {
+ b.err = buildError.Err
+ } else {
+ panic(r)
+ }
+ }()
+ }
+
+ f(arg)
+}
+
+func (b *Builder) addLengthPrefixed(lenLen int, isASN1 bool, f BuilderContinuation) {
+ // Subsequent writes can be ignored if the builder has encountered an error.
+ if b.err != nil {
+ return
+ }
+
+ offset := len(b.result)
+ b.add(make([]byte, lenLen)...)
+
+ if b.inContinuation == nil {
+ b.inContinuation = new(bool)
+ }
+
+ b.child = &Builder{
+ result: b.result,
+ fixedSize: b.fixedSize,
+ offset: offset,
+ pendingLenLen: lenLen,
+ pendingIsASN1: isASN1,
+ inContinuation: b.inContinuation,
+ }
+
+ b.callContinuation(f, b.child)
+ b.flushChild()
+ if b.child != nil {
+ panic("cryptobyte: internal error")
+ }
+}
+
+func (b *Builder) flushChild() {
+ if b.child == nil {
+ return
+ }
+ b.child.flushChild()
+ child := b.child
+ b.child = nil
+
+ if child.err != nil {
+ b.err = child.err
+ return
+ }
+
+ length := len(child.result) - child.pendingLenLen - child.offset
+
+ if length < 0 {
+ panic("cryptobyte: internal error") // result unexpectedly shrunk
+ }
+
+ if child.pendingIsASN1 {
+ // For ASN.1, we reserved a single byte for the length. If that turned out
+ // to be incorrect, we have to move the contents along in order to make
+ // space.
+ if child.pendingLenLen != 1 {
+ panic("cryptobyte: internal error")
+ }
+ var lenLen, lenByte uint8
+ if int64(length) > 0xfffffffe {
+ b.err = errors.New("pending ASN.1 child too long")
+ return
+ } else if length > 0xffffff {
+ lenLen = 5
+ lenByte = 0x80 | 4
+ } else if length > 0xffff {
+ lenLen = 4
+ lenByte = 0x80 | 3
+ } else if length > 0xff {
+ lenLen = 3
+ lenByte = 0x80 | 2
+ } else if length > 0x7f {
+ lenLen = 2
+ lenByte = 0x80 | 1
+ } else {
+ lenLen = 1
+ lenByte = uint8(length)
+ length = 0
+ }
+
+ // Insert the initial length byte, make space for successive length bytes,
+ // and adjust the offset.
+ child.result[child.offset] = lenByte
+ extraBytes := int(lenLen - 1)
+ if extraBytes != 0 {
+ child.add(make([]byte, extraBytes)...)
+ childStart := child.offset + child.pendingLenLen
+ copy(child.result[childStart+extraBytes:], child.result[childStart:])
+ }
+ child.offset++
+ child.pendingLenLen = extraBytes
+ }
+
+ l := length
+ for i := child.pendingLenLen - 1; i >= 0; i-- {
+ child.result[child.offset+i] = uint8(l)
+ l >>= 8
+ }
+ if l != 0 {
+ b.err = fmt.Errorf("cryptobyte: pending child length %d exceeds %d-byte length prefix", length, child.pendingLenLen)
+ return
+ }
+
+ if b.fixedSize && &b.result[0] != &child.result[0] {
+ panic("cryptobyte: BuilderContinuation reallocated a fixed-size buffer")
+ }
+
+ b.result = child.result
+}
+
+func (b *Builder) add(bytes ...byte) {
+ if b.err != nil {
+ return
+ }
+ if b.child != nil {
+ panic("cryptobyte: attempted write while child is pending")
+ }
+ if len(b.result)+len(bytes) < len(bytes) {
+ b.err = errors.New("cryptobyte: length overflow")
+ }
+ if b.fixedSize && len(b.result)+len(bytes) > cap(b.result) {
+ b.err = errors.New("cryptobyte: Builder is exceeding its fixed-size buffer")
+ return
+ }
+ b.result = append(b.result, bytes...)
+}
+
+// Unwrite rolls back non-negative n bytes written directly to the Builder.
+// An attempt by a child builder passed to a continuation to unwrite bytes
+// from its parent will panic.
+func (b *Builder) Unwrite(n int) {
+ if b.err != nil {
+ return
+ }
+ if b.child != nil {
+ panic("cryptobyte: attempted unwrite while child is pending")
+ }
+ length := len(b.result) - b.pendingLenLen - b.offset
+ if length < 0 {
+ panic("cryptobyte: internal error")
+ }
+ if n < 0 {
+ panic("cryptobyte: attempted to unwrite negative number of bytes")
+ }
+ if n > length {
+ panic("cryptobyte: attempted to unwrite more than was written")
+ }
+ b.result = b.result[:len(b.result)-n]
+}
+
+// A MarshalingValue marshals itself into a Builder.
+type MarshalingValue interface {
+ // Marshal is called by Builder.AddValue. It receives a pointer to a builder
+ // to marshal itself into. It may return an error that occurred during
+ // marshaling, such as unset or invalid values.
+ Marshal(b *Builder) error
+}
+
+// AddValue calls Marshal on v, passing a pointer to the builder to append to.
+// If Marshal returns an error, it is set on the Builder so that subsequent
+// appends don't have an effect.
+func (b *Builder) AddValue(v MarshalingValue) {
+ err := v.Marshal(b)
+ if err != nil {
+ b.err = err
+ }
+}
diff --git a/src/vendor/golang.org/x/crypto/cryptobyte/string.go b/src/vendor/golang.org/x/crypto/cryptobyte/string.go
new file mode 100644
index 0000000..10692a8
--- /dev/null
+++ b/src/vendor/golang.org/x/crypto/cryptobyte/string.go
@@ -0,0 +1,183 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package cryptobyte contains types that help with parsing and constructing
+// length-prefixed, binary messages, including ASN.1 DER. (The asn1 subpackage
+// contains useful ASN.1 constants.)
+//
+// The String type is for parsing. It wraps a []byte slice and provides helper
+// functions for consuming structures, value by value.
+//
+// The Builder type is for constructing messages. It providers helper functions
+// for appending values and also for appending length-prefixed submessages –
+// without having to worry about calculating the length prefix ahead of time.
+//
+// See the documentation and examples for the Builder and String types to get
+// started.
+package cryptobyte // import "golang.org/x/crypto/cryptobyte"
+
+// String represents a string of bytes. It provides methods for parsing
+// fixed-length and length-prefixed values from it.
+type String []byte
+
+// read advances a String by n bytes and returns them. If less than n bytes
+// remain, it returns nil.
+func (s *String) read(n int) []byte {
+ if len(*s) < n || n < 0 {
+ return nil
+ }
+ v := (*s)[:n]
+ *s = (*s)[n:]
+ return v
+}
+
+// Skip advances the String by n byte and reports whether it was successful.
+func (s *String) Skip(n int) bool {
+ return s.read(n) != nil
+}
+
+// ReadUint8 decodes an 8-bit value into out and advances over it.
+// It reports whether the read was successful.
+func (s *String) ReadUint8(out *uint8) bool {
+ v := s.read(1)
+ if v == nil {
+ return false
+ }
+ *out = uint8(v[0])
+ return true
+}
+
+// ReadUint16 decodes a big-endian, 16-bit value into out and advances over it.
+// It reports whether the read was successful.
+func (s *String) ReadUint16(out *uint16) bool {
+ v := s.read(2)
+ if v == nil {
+ return false
+ }
+ *out = uint16(v[0])<<8 | uint16(v[1])
+ return true
+}
+
+// ReadUint24 decodes a big-endian, 24-bit value into out and advances over it.
+// It reports whether the read was successful.
+func (s *String) ReadUint24(out *uint32) bool {
+ v := s.read(3)
+ if v == nil {
+ return false
+ }
+ *out = uint32(v[0])<<16 | uint32(v[1])<<8 | uint32(v[2])
+ return true
+}
+
+// ReadUint32 decodes a big-endian, 32-bit value into out and advances over it.
+// It reports whether the read was successful.
+func (s *String) ReadUint32(out *uint32) bool {
+ v := s.read(4)
+ if v == nil {
+ return false
+ }
+ *out = uint32(v[0])<<24 | uint32(v[1])<<16 | uint32(v[2])<<8 | uint32(v[3])
+ return true
+}
+
+// ReadUint48 decodes a big-endian, 48-bit value into out and advances over it.
+// It reports whether the read was successful.
+func (s *String) ReadUint48(out *uint64) bool {
+ v := s.read(6)
+ if v == nil {
+ return false
+ }
+ *out = uint64(v[0])<<40 | uint64(v[1])<<32 | uint64(v[2])<<24 | uint64(v[3])<<16 | uint64(v[4])<<8 | uint64(v[5])
+ return true
+}
+
+// ReadUint64 decodes a big-endian, 64-bit value into out and advances over it.
+// It reports whether the read was successful.
+func (s *String) ReadUint64(out *uint64) bool {
+ v := s.read(8)
+ if v == nil {
+ return false
+ }
+ *out = uint64(v[0])<<56 | uint64(v[1])<<48 | uint64(v[2])<<40 | uint64(v[3])<<32 | uint64(v[4])<<24 | uint64(v[5])<<16 | uint64(v[6])<<8 | uint64(v[7])
+ return true
+}
+
+func (s *String) readUnsigned(out *uint32, length int) bool {
+ v := s.read(length)
+ if v == nil {
+ return false
+ }
+ var result uint32
+ for i := 0; i < length; i++ {
+ result <<= 8
+ result |= uint32(v[i])
+ }
+ *out = result
+ return true
+}
+
+func (s *String) readLengthPrefixed(lenLen int, outChild *String) bool {
+ lenBytes := s.read(lenLen)
+ if lenBytes == nil {
+ return false
+ }
+ var length uint32
+ for _, b := range lenBytes {
+ length = length << 8
+ length = length | uint32(b)
+ }
+ v := s.read(int(length))
+ if v == nil {
+ return false
+ }
+ *outChild = v
+ return true
+}
+
+// ReadUint8LengthPrefixed reads the content of an 8-bit length-prefixed value
+// into out and advances over it. It reports whether the read was successful.
+func (s *String) ReadUint8LengthPrefixed(out *String) bool {
+ return s.readLengthPrefixed(1, out)
+}
+
+// ReadUint16LengthPrefixed reads the content of a big-endian, 16-bit
+// length-prefixed value into out and advances over it. It reports whether the
+// read was successful.
+func (s *String) ReadUint16LengthPrefixed(out *String) bool {
+ return s.readLengthPrefixed(2, out)
+}
+
+// ReadUint24LengthPrefixed reads the content of a big-endian, 24-bit
+// length-prefixed value into out and advances over it. It reports whether
+// the read was successful.
+func (s *String) ReadUint24LengthPrefixed(out *String) bool {
+ return s.readLengthPrefixed(3, out)
+}
+
+// ReadBytes reads n bytes into out and advances over them. It reports
+// whether the read was successful.
+func (s *String) ReadBytes(out *[]byte, n int) bool {
+ v := s.read(n)
+ if v == nil {
+ return false
+ }
+ *out = v
+ return true
+}
+
+// CopyBytes copies len(out) bytes into out and advances over them. It reports
+// whether the copy operation was successful
+func (s *String) CopyBytes(out []byte) bool {
+ n := len(out)
+ v := s.read(n)
+ if v == nil {
+ return false
+ }
+ return copy(out, v) == n
+}
+
+// Empty reports whether the string does not contain any bytes.
+func (s String) Empty() bool {
+ return len(s) == 0
+}
diff --git a/src/vendor/golang.org/x/crypto/hkdf/hkdf.go b/src/vendor/golang.org/x/crypto/hkdf/hkdf.go
new file mode 100644
index 0000000..f4ded5f
--- /dev/null
+++ b/src/vendor/golang.org/x/crypto/hkdf/hkdf.go
@@ -0,0 +1,95 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package hkdf implements the HMAC-based Extract-and-Expand Key Derivation
+// Function (HKDF) as defined in RFC 5869.
+//
+// HKDF is a cryptographic key derivation function (KDF) with the goal of
+// expanding limited input keying material into one or more cryptographically
+// strong secret keys.
+package hkdf // import "golang.org/x/crypto/hkdf"
+
+import (
+ "crypto/hmac"
+ "errors"
+ "hash"
+ "io"
+)
+
+// Extract generates a pseudorandom key for use with Expand from an input secret
+// and an optional independent salt.
+//
+// Only use this function if you need to reuse the extracted key with multiple
+// Expand invocations and different context values. Most common scenarios,
+// including the generation of multiple keys, should use New instead.
+func Extract(hash func() hash.Hash, secret, salt []byte) []byte {
+ if salt == nil {
+ salt = make([]byte, hash().Size())
+ }
+ extractor := hmac.New(hash, salt)
+ extractor.Write(secret)
+ return extractor.Sum(nil)
+}
+
+type hkdf struct {
+ expander hash.Hash
+ size int
+
+ info []byte
+ counter byte
+
+ prev []byte
+ buf []byte
+}
+
+func (f *hkdf) Read(p []byte) (int, error) {
+ // Check whether enough data can be generated
+ need := len(p)
+ remains := len(f.buf) + int(255-f.counter+1)*f.size
+ if remains < need {
+ return 0, errors.New("hkdf: entropy limit reached")
+ }
+ // Read any leftover from the buffer
+ n := copy(p, f.buf)
+ p = p[n:]
+
+ // Fill the rest of the buffer
+ for len(p) > 0 {
+ if f.counter > 1 {
+ f.expander.Reset()
+ }
+ f.expander.Write(f.prev)
+ f.expander.Write(f.info)
+ f.expander.Write([]byte{f.counter})
+ f.prev = f.expander.Sum(f.prev[:0])
+ f.counter++
+
+ // Copy the new batch into p
+ f.buf = f.prev
+ n = copy(p, f.buf)
+ p = p[n:]
+ }
+ // Save leftovers for next run
+ f.buf = f.buf[n:]
+
+ return need, nil
+}
+
+// Expand returns a Reader, from which keys can be read, using the given
+// pseudorandom key and optional context info, skipping the extraction step.
+//
+// The pseudorandomKey should have been generated by Extract, or be a uniformly
+// random or pseudorandom cryptographically strong key. See RFC 5869, Section
+// 3.3. Most common scenarios will want to use New instead.
+func Expand(hash func() hash.Hash, pseudorandomKey, info []byte) io.Reader {
+ expander := hmac.New(hash, pseudorandomKey)
+ return &hkdf{expander, expander.Size(), info, 1, nil, nil}
+}
+
+// New returns a Reader, from which keys can be read, using the given hash,
+// secret, salt and context info. Salt and info can be nil.
+func New(hash func() hash.Hash, secret, salt, info []byte) io.Reader {
+ prk := Extract(hash, secret, salt)
+ return Expand(hash, prk, info)
+}
diff --git a/src/vendor/golang.org/x/crypto/internal/alias/alias.go b/src/vendor/golang.org/x/crypto/internal/alias/alias.go
new file mode 100644
index 0000000..551ff0c
--- /dev/null
+++ b/src/vendor/golang.org/x/crypto/internal/alias/alias.go
@@ -0,0 +1,31 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !purego
+
+// Package alias implements memory aliasing tests.
+package alias
+
+import "unsafe"
+
+// AnyOverlap reports whether x and y share memory at any (not necessarily
+// corresponding) index. The memory beyond the slice length is ignored.
+func AnyOverlap(x, y []byte) bool {
+ return len(x) > 0 && len(y) > 0 &&
+ uintptr(unsafe.Pointer(&x[0])) <= uintptr(unsafe.Pointer(&y[len(y)-1])) &&
+ uintptr(unsafe.Pointer(&y[0])) <= uintptr(unsafe.Pointer(&x[len(x)-1]))
+}
+
+// InexactOverlap reports whether x and y share memory at any non-corresponding
+// index. The memory beyond the slice length is ignored. Note that x and y can
+// have different lengths and still not have any inexact overlap.
+//
+// InexactOverlap can be used to implement the requirements of the crypto/cipher
+// AEAD, Block, BlockMode and Stream interfaces.
+func InexactOverlap(x, y []byte) bool {
+ if len(x) == 0 || len(y) == 0 || &x[0] == &y[0] {
+ return false
+ }
+ return AnyOverlap(x, y)
+}
diff --git a/src/vendor/golang.org/x/crypto/internal/alias/alias_purego.go b/src/vendor/golang.org/x/crypto/internal/alias/alias_purego.go
new file mode 100644
index 0000000..6fe61b5
--- /dev/null
+++ b/src/vendor/golang.org/x/crypto/internal/alias/alias_purego.go
@@ -0,0 +1,34 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build purego
+
+// Package alias implements memory aliasing tests.
+package alias
+
+// This is the Google App Engine standard variant based on reflect
+// because the unsafe package and cgo are disallowed.
+
+import "reflect"
+
+// AnyOverlap reports whether x and y share memory at any (not necessarily
+// corresponding) index. The memory beyond the slice length is ignored.
+func AnyOverlap(x, y []byte) bool {
+ return len(x) > 0 && len(y) > 0 &&
+ reflect.ValueOf(&x[0]).Pointer() <= reflect.ValueOf(&y[len(y)-1]).Pointer() &&
+ reflect.ValueOf(&y[0]).Pointer() <= reflect.ValueOf(&x[len(x)-1]).Pointer()
+}
+
+// InexactOverlap reports whether x and y share memory at any non-corresponding
+// index. The memory beyond the slice length is ignored. Note that x and y can
+// have different lengths and still not have any inexact overlap.
+//
+// InexactOverlap can be used to implement the requirements of the crypto/cipher
+// AEAD, Block, BlockMode and Stream interfaces.
+func InexactOverlap(x, y []byte) bool {
+ if len(x) == 0 || len(y) == 0 || &x[0] == &y[0] {
+ return false
+ }
+ return AnyOverlap(x, y)
+}
diff --git a/src/vendor/golang.org/x/crypto/internal/poly1305/bits_compat.go b/src/vendor/golang.org/x/crypto/internal/poly1305/bits_compat.go
new file mode 100644
index 0000000..d33c889
--- /dev/null
+++ b/src/vendor/golang.org/x/crypto/internal/poly1305/bits_compat.go
@@ -0,0 +1,39 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !go1.13
+
+package poly1305
+
+// Generic fallbacks for the math/bits intrinsics, copied from
+// src/math/bits/bits.go. They were added in Go 1.12, but Add64 and Sum64 had
+// variable time fallbacks until Go 1.13.
+
+func bitsAdd64(x, y, carry uint64) (sum, carryOut uint64) {
+ sum = x + y + carry
+ carryOut = ((x & y) | ((x | y) &^ sum)) >> 63
+ return
+}
+
+func bitsSub64(x, y, borrow uint64) (diff, borrowOut uint64) {
+ diff = x - y - borrow
+ borrowOut = ((^x & y) | (^(x ^ y) & diff)) >> 63
+ return
+}
+
+func bitsMul64(x, y uint64) (hi, lo uint64) {
+ const mask32 = 1<<32 - 1
+ x0 := x & mask32
+ x1 := x >> 32
+ y0 := y & mask32
+ y1 := y >> 32
+ w0 := x0 * y0
+ t := x1*y0 + w0>>32
+ w1 := t & mask32
+ w2 := t >> 32
+ w1 += x0 * y1
+ hi = x1*y1 + w2 + w1>>32
+ lo = x * y
+ return
+}
diff --git a/src/vendor/golang.org/x/crypto/internal/poly1305/bits_go1.13.go b/src/vendor/golang.org/x/crypto/internal/poly1305/bits_go1.13.go
new file mode 100644
index 0000000..495c1fa
--- /dev/null
+++ b/src/vendor/golang.org/x/crypto/internal/poly1305/bits_go1.13.go
@@ -0,0 +1,21 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build go1.13
+
+package poly1305
+
+import "math/bits"
+
+func bitsAdd64(x, y, carry uint64) (sum, carryOut uint64) {
+ return bits.Add64(x, y, carry)
+}
+
+func bitsSub64(x, y, borrow uint64) (diff, borrowOut uint64) {
+ return bits.Sub64(x, y, borrow)
+}
+
+func bitsMul64(x, y uint64) (hi, lo uint64) {
+ return bits.Mul64(x, y)
+}
diff --git a/src/vendor/golang.org/x/crypto/internal/poly1305/mac_noasm.go b/src/vendor/golang.org/x/crypto/internal/poly1305/mac_noasm.go
new file mode 100644
index 0000000..333da28
--- /dev/null
+++ b/src/vendor/golang.org/x/crypto/internal/poly1305/mac_noasm.go
@@ -0,0 +1,9 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build (!amd64 && !ppc64le && !s390x) || !gc || purego
+
+package poly1305
+
+type mac struct{ macGeneric }
diff --git a/src/vendor/golang.org/x/crypto/internal/poly1305/poly1305.go b/src/vendor/golang.org/x/crypto/internal/poly1305/poly1305.go
new file mode 100644
index 0000000..4aaea81
--- /dev/null
+++ b/src/vendor/golang.org/x/crypto/internal/poly1305/poly1305.go
@@ -0,0 +1,99 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package poly1305 implements Poly1305 one-time message authentication code as
+// specified in https://cr.yp.to/mac/poly1305-20050329.pdf.
+//
+// Poly1305 is a fast, one-time authentication function. It is infeasible for an
+// attacker to generate an authenticator for a message without the key. However, a
+// key must only be used for a single message. Authenticating two different
+// messages with the same key allows an attacker to forge authenticators for other
+// messages with the same key.
+//
+// Poly1305 was originally coupled with AES in order to make Poly1305-AES. AES was
+// used with a fixed key in order to generate one-time keys from an nonce.
+// However, in this package AES isn't used and the one-time key is specified
+// directly.
+package poly1305
+
+import "crypto/subtle"
+
+// TagSize is the size, in bytes, of a poly1305 authenticator.
+const TagSize = 16
+
+// Sum generates an authenticator for msg using a one-time key and puts the
+// 16-byte result into out. Authenticating two different messages with the same
+// key allows an attacker to forge messages at will.
+func Sum(out *[16]byte, m []byte, key *[32]byte) {
+ h := New(key)
+ h.Write(m)
+ h.Sum(out[:0])
+}
+
+// Verify returns true if mac is a valid authenticator for m with the given key.
+func Verify(mac *[16]byte, m []byte, key *[32]byte) bool {
+ var tmp [16]byte
+ Sum(&tmp, m, key)
+ return subtle.ConstantTimeCompare(tmp[:], mac[:]) == 1
+}
+
+// New returns a new MAC computing an authentication
+// tag of all data written to it with the given key.
+// This allows writing the message progressively instead
+// of passing it as a single slice. Common users should use
+// the Sum function instead.
+//
+// The key must be unique for each message, as authenticating
+// two different messages with the same key allows an attacker
+// to forge messages at will.
+func New(key *[32]byte) *MAC {
+ m := &MAC{}
+ initialize(key, &m.macState)
+ return m
+}
+
+// MAC is an io.Writer computing an authentication tag
+// of the data written to it.
+//
+// MAC cannot be used like common hash.Hash implementations,
+// because using a poly1305 key twice breaks its security.
+// Therefore writing data to a running MAC after calling
+// Sum or Verify causes it to panic.
+type MAC struct {
+ mac // platform-dependent implementation
+
+ finalized bool
+}
+
+// Size returns the number of bytes Sum will return.
+func (h *MAC) Size() int { return TagSize }
+
+// Write adds more data to the running message authentication code.
+// It never returns an error.
+//
+// It must not be called after the first call of Sum or Verify.
+func (h *MAC) Write(p []byte) (n int, err error) {
+ if h.finalized {
+ panic("poly1305: write to MAC after Sum or Verify")
+ }
+ return h.mac.Write(p)
+}
+
+// Sum computes the authenticator of all data written to the
+// message authentication code.
+func (h *MAC) Sum(b []byte) []byte {
+ var mac [TagSize]byte
+ h.mac.Sum(&mac)
+ h.finalized = true
+ return append(b, mac[:]...)
+}
+
+// Verify returns whether the authenticator of all data written to
+// the message authentication code matches the expected value.
+func (h *MAC) Verify(expected []byte) bool {
+ var mac [TagSize]byte
+ h.mac.Sum(&mac)
+ h.finalized = true
+ return subtle.ConstantTimeCompare(expected, mac[:]) == 1
+}
diff --git a/src/vendor/golang.org/x/crypto/internal/poly1305/sum_amd64.go b/src/vendor/golang.org/x/crypto/internal/poly1305/sum_amd64.go
new file mode 100644
index 0000000..164cd47
--- /dev/null
+++ b/src/vendor/golang.org/x/crypto/internal/poly1305/sum_amd64.go
@@ -0,0 +1,47 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build gc && !purego
+
+package poly1305
+
+//go:noescape
+func update(state *macState, msg []byte)
+
+// mac is a wrapper for macGeneric that redirects calls that would have gone to
+// updateGeneric to update.
+//
+// Its Write and Sum methods are otherwise identical to the macGeneric ones, but
+// using function pointers would carry a major performance cost.
+type mac struct{ macGeneric }
+
+func (h *mac) Write(p []byte) (int, error) {
+ nn := len(p)
+ if h.offset > 0 {
+ n := copy(h.buffer[h.offset:], p)
+ if h.offset+n < TagSize {
+ h.offset += n
+ return nn, nil
+ }
+ p = p[n:]
+ h.offset = 0
+ update(&h.macState, h.buffer[:])
+ }
+ if n := len(p) - (len(p) % TagSize); n > 0 {
+ update(&h.macState, p[:n])
+ p = p[n:]
+ }
+ if len(p) > 0 {
+ h.offset += copy(h.buffer[h.offset:], p)
+ }
+ return nn, nil
+}
+
+func (h *mac) Sum(out *[16]byte) {
+ state := h.macState
+ if h.offset > 0 {
+ update(&state, h.buffer[:h.offset])
+ }
+ finalize(out, &state.h, &state.s)
+}
diff --git a/src/vendor/golang.org/x/crypto/internal/poly1305/sum_amd64.s b/src/vendor/golang.org/x/crypto/internal/poly1305/sum_amd64.s
new file mode 100644
index 0000000..e0d3c64
--- /dev/null
+++ b/src/vendor/golang.org/x/crypto/internal/poly1305/sum_amd64.s
@@ -0,0 +1,108 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build gc && !purego
+
+#include "textflag.h"
+
+#define POLY1305_ADD(msg, h0, h1, h2) \
+ ADDQ 0(msg), h0; \
+ ADCQ 8(msg), h1; \
+ ADCQ $1, h2; \
+ LEAQ 16(msg), msg
+
+#define POLY1305_MUL(h0, h1, h2, r0, r1, t0, t1, t2, t3) \
+ MOVQ r0, AX; \
+ MULQ h0; \
+ MOVQ AX, t0; \
+ MOVQ DX, t1; \
+ MOVQ r0, AX; \
+ MULQ h1; \
+ ADDQ AX, t1; \
+ ADCQ $0, DX; \
+ MOVQ r0, t2; \
+ IMULQ h2, t2; \
+ ADDQ DX, t2; \
+ \
+ MOVQ r1, AX; \
+ MULQ h0; \
+ ADDQ AX, t1; \
+ ADCQ $0, DX; \
+ MOVQ DX, h0; \
+ MOVQ r1, t3; \
+ IMULQ h2, t3; \
+ MOVQ r1, AX; \
+ MULQ h1; \
+ ADDQ AX, t2; \
+ ADCQ DX, t3; \
+ ADDQ h0, t2; \
+ ADCQ $0, t3; \
+ \
+ MOVQ t0, h0; \
+ MOVQ t1, h1; \
+ MOVQ t2, h2; \
+ ANDQ $3, h2; \
+ MOVQ t2, t0; \
+ ANDQ $0xFFFFFFFFFFFFFFFC, t0; \
+ ADDQ t0, h0; \
+ ADCQ t3, h1; \
+ ADCQ $0, h2; \
+ SHRQ $2, t3, t2; \
+ SHRQ $2, t3; \
+ ADDQ t2, h0; \
+ ADCQ t3, h1; \
+ ADCQ $0, h2
+
+// func update(state *[7]uint64, msg []byte)
+TEXT ·update(SB), $0-32
+ MOVQ state+0(FP), DI
+ MOVQ msg_base+8(FP), SI
+ MOVQ msg_len+16(FP), R15
+
+ MOVQ 0(DI), R8 // h0
+ MOVQ 8(DI), R9 // h1
+ MOVQ 16(DI), R10 // h2
+ MOVQ 24(DI), R11 // r0
+ MOVQ 32(DI), R12 // r1
+
+ CMPQ R15, $16
+ JB bytes_between_0_and_15
+
+loop:
+ POLY1305_ADD(SI, R8, R9, R10)
+
+multiply:
+ POLY1305_MUL(R8, R9, R10, R11, R12, BX, CX, R13, R14)
+ SUBQ $16, R15
+ CMPQ R15, $16
+ JAE loop
+
+bytes_between_0_and_15:
+ TESTQ R15, R15
+ JZ done
+ MOVQ $1, BX
+ XORQ CX, CX
+ XORQ R13, R13
+ ADDQ R15, SI
+
+flush_buffer:
+ SHLQ $8, BX, CX
+ SHLQ $8, BX
+ MOVB -1(SI), R13
+ XORQ R13, BX
+ DECQ SI
+ DECQ R15
+ JNZ flush_buffer
+
+ ADDQ BX, R8
+ ADCQ CX, R9
+ ADCQ $0, R10
+ MOVQ $16, R15
+ JMP multiply
+
+done:
+ MOVQ R8, 0(DI)
+ MOVQ R9, 8(DI)
+ MOVQ R10, 16(DI)
+ RET
diff --git a/src/vendor/golang.org/x/crypto/internal/poly1305/sum_generic.go b/src/vendor/golang.org/x/crypto/internal/poly1305/sum_generic.go
new file mode 100644
index 0000000..e041da5
--- /dev/null
+++ b/src/vendor/golang.org/x/crypto/internal/poly1305/sum_generic.go
@@ -0,0 +1,309 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file provides the generic implementation of Sum and MAC. Other files
+// might provide optimized assembly implementations of some of this code.
+
+package poly1305
+
+import "encoding/binary"
+
+// Poly1305 [RFC 7539] is a relatively simple algorithm: the authentication tag
+// for a 64 bytes message is approximately
+//
+// s + m[0:16] * r⁴ + m[16:32] * r³ + m[32:48] * r² + m[48:64] * r mod 2¹³⁰ - 5
+//
+// for some secret r and s. It can be computed sequentially like
+//
+// for len(msg) > 0:
+// h += read(msg, 16)
+// h *= r
+// h %= 2¹³⁰ - 5
+// return h + s
+//
+// All the complexity is about doing performant constant-time math on numbers
+// larger than any available numeric type.
+
+func sumGeneric(out *[TagSize]byte, msg []byte, key *[32]byte) {
+ h := newMACGeneric(key)
+ h.Write(msg)
+ h.Sum(out)
+}
+
+func newMACGeneric(key *[32]byte) macGeneric {
+ m := macGeneric{}
+ initialize(key, &m.macState)
+ return m
+}
+
+// macState holds numbers in saturated 64-bit little-endian limbs. That is,
+// the value of [x0, x1, x2] is x[0] + x[1] * 2⁶⁴ + x[2] * 2¹²⁸.
+type macState struct {
+ // h is the main accumulator. It is to be interpreted modulo 2¹³⁰ - 5, but
+ // can grow larger during and after rounds. It must, however, remain below
+ // 2 * (2¹³⁰ - 5).
+ h [3]uint64
+ // r and s are the private key components.
+ r [2]uint64
+ s [2]uint64
+}
+
+type macGeneric struct {
+ macState
+
+ buffer [TagSize]byte
+ offset int
+}
+
+// Write splits the incoming message into TagSize chunks, and passes them to
+// update. It buffers incomplete chunks.
+func (h *macGeneric) Write(p []byte) (int, error) {
+ nn := len(p)
+ if h.offset > 0 {
+ n := copy(h.buffer[h.offset:], p)
+ if h.offset+n < TagSize {
+ h.offset += n
+ return nn, nil
+ }
+ p = p[n:]
+ h.offset = 0
+ updateGeneric(&h.macState, h.buffer[:])
+ }
+ if n := len(p) - (len(p) % TagSize); n > 0 {
+ updateGeneric(&h.macState, p[:n])
+ p = p[n:]
+ }
+ if len(p) > 0 {
+ h.offset += copy(h.buffer[h.offset:], p)
+ }
+ return nn, nil
+}
+
+// Sum flushes the last incomplete chunk from the buffer, if any, and generates
+// the MAC output. It does not modify its state, in order to allow for multiple
+// calls to Sum, even if no Write is allowed after Sum.
+func (h *macGeneric) Sum(out *[TagSize]byte) {
+ state := h.macState
+ if h.offset > 0 {
+ updateGeneric(&state, h.buffer[:h.offset])
+ }
+ finalize(out, &state.h, &state.s)
+}
+
+// [rMask0, rMask1] is the specified Poly1305 clamping mask in little-endian. It
+// clears some bits of the secret coefficient to make it possible to implement
+// multiplication more efficiently.
+const (
+ rMask0 = 0x0FFFFFFC0FFFFFFF
+ rMask1 = 0x0FFFFFFC0FFFFFFC
+)
+
+// initialize loads the 256-bit key into the two 128-bit secret values r and s.
+func initialize(key *[32]byte, m *macState) {
+ m.r[0] = binary.LittleEndian.Uint64(key[0:8]) & rMask0
+ m.r[1] = binary.LittleEndian.Uint64(key[8:16]) & rMask1
+ m.s[0] = binary.LittleEndian.Uint64(key[16:24])
+ m.s[1] = binary.LittleEndian.Uint64(key[24:32])
+}
+
+// uint128 holds a 128-bit number as two 64-bit limbs, for use with the
+// bits.Mul64 and bits.Add64 intrinsics.
+type uint128 struct {
+ lo, hi uint64
+}
+
+func mul64(a, b uint64) uint128 {
+ hi, lo := bitsMul64(a, b)
+ return uint128{lo, hi}
+}
+
+func add128(a, b uint128) uint128 {
+ lo, c := bitsAdd64(a.lo, b.lo, 0)
+ hi, c := bitsAdd64(a.hi, b.hi, c)
+ if c != 0 {
+ panic("poly1305: unexpected overflow")
+ }
+ return uint128{lo, hi}
+}
+
+func shiftRightBy2(a uint128) uint128 {
+ a.lo = a.lo>>2 | (a.hi&3)<<62
+ a.hi = a.hi >> 2
+ return a
+}
+
+// updateGeneric absorbs msg into the state.h accumulator. For each chunk m of
+// 128 bits of message, it computes
+//
+// h₊ = (h + m) * r mod 2¹³⁰ - 5
+//
+// If the msg length is not a multiple of TagSize, it assumes the last
+// incomplete chunk is the final one.
+func updateGeneric(state *macState, msg []byte) {
+ h0, h1, h2 := state.h[0], state.h[1], state.h[2]
+ r0, r1 := state.r[0], state.r[1]
+
+ for len(msg) > 0 {
+ var c uint64
+
+ // For the first step, h + m, we use a chain of bits.Add64 intrinsics.
+ // The resulting value of h might exceed 2¹³⁰ - 5, but will be partially
+ // reduced at the end of the multiplication below.
+ //
+ // The spec requires us to set a bit just above the message size, not to
+ // hide leading zeroes. For full chunks, that's 1 << 128, so we can just
+ // add 1 to the most significant (2¹²⁸) limb, h2.
+ if len(msg) >= TagSize {
+ h0, c = bitsAdd64(h0, binary.LittleEndian.Uint64(msg[0:8]), 0)
+ h1, c = bitsAdd64(h1, binary.LittleEndian.Uint64(msg[8:16]), c)
+ h2 += c + 1
+
+ msg = msg[TagSize:]
+ } else {
+ var buf [TagSize]byte
+ copy(buf[:], msg)
+ buf[len(msg)] = 1
+
+ h0, c = bitsAdd64(h0, binary.LittleEndian.Uint64(buf[0:8]), 0)
+ h1, c = bitsAdd64(h1, binary.LittleEndian.Uint64(buf[8:16]), c)
+ h2 += c
+
+ msg = nil
+ }
+
+ // Multiplication of big number limbs is similar to elementary school
+ // columnar multiplication. Instead of digits, there are 64-bit limbs.
+ //
+ // We are multiplying a 3 limbs number, h, by a 2 limbs number, r.
+ //
+ // h2 h1 h0 x
+ // r1 r0 =
+ // ----------------
+ // h2r0 h1r0 h0r0 <-- individual 128-bit products
+ // + h2r1 h1r1 h0r1
+ // ------------------------
+ // m3 m2 m1 m0 <-- result in 128-bit overlapping limbs
+ // ------------------------
+ // m3.hi m2.hi m1.hi m0.hi <-- carry propagation
+ // + m3.lo m2.lo m1.lo m0.lo
+ // -------------------------------
+ // t4 t3 t2 t1 t0 <-- final result in 64-bit limbs
+ //
+ // The main difference from pen-and-paper multiplication is that we do
+ // carry propagation in a separate step, as if we wrote two digit sums
+ // at first (the 128-bit limbs), and then carried the tens all at once.
+
+ h0r0 := mul64(h0, r0)
+ h1r0 := mul64(h1, r0)
+ h2r0 := mul64(h2, r0)
+ h0r1 := mul64(h0, r1)
+ h1r1 := mul64(h1, r1)
+ h2r1 := mul64(h2, r1)
+
+ // Since h2 is known to be at most 7 (5 + 1 + 1), and r0 and r1 have their
+ // top 4 bits cleared by rMask{0,1}, we know that their product is not going
+ // to overflow 64 bits, so we can ignore the high part of the products.
+ //
+ // This also means that the product doesn't have a fifth limb (t4).
+ if h2r0.hi != 0 {
+ panic("poly1305: unexpected overflow")
+ }
+ if h2r1.hi != 0 {
+ panic("poly1305: unexpected overflow")
+ }
+
+ m0 := h0r0
+ m1 := add128(h1r0, h0r1) // These two additions don't overflow thanks again
+ m2 := add128(h2r0, h1r1) // to the 4 masked bits at the top of r0 and r1.
+ m3 := h2r1
+
+ t0 := m0.lo
+ t1, c := bitsAdd64(m1.lo, m0.hi, 0)
+ t2, c := bitsAdd64(m2.lo, m1.hi, c)
+ t3, _ := bitsAdd64(m3.lo, m2.hi, c)
+
+ // Now we have the result as 4 64-bit limbs, and we need to reduce it
+ // modulo 2¹³⁰ - 5. The special shape of this Crandall prime lets us do
+ // a cheap partial reduction according to the reduction identity
+ //
+ // c * 2¹³⁰ + n = c * 5 + n mod 2¹³⁰ - 5
+ //
+ // because 2¹³⁰ = 5 mod 2¹³⁰ - 5. Partial reduction since the result is
+ // likely to be larger than 2¹³⁰ - 5, but still small enough to fit the
+ // assumptions we make about h in the rest of the code.
+ //
+ // See also https://speakerdeck.com/gtank/engineering-prime-numbers?slide=23
+
+ // We split the final result at the 2¹³⁰ mark into h and cc, the carry.
+ // Note that the carry bits are effectively shifted left by 2, in other
+ // words, cc = c * 4 for the c in the reduction identity.
+ h0, h1, h2 = t0, t1, t2&maskLow2Bits
+ cc := uint128{t2 & maskNotLow2Bits, t3}
+
+ // To add c * 5 to h, we first add cc = c * 4, and then add (cc >> 2) = c.
+
+ h0, c = bitsAdd64(h0, cc.lo, 0)
+ h1, c = bitsAdd64(h1, cc.hi, c)
+ h2 += c
+
+ cc = shiftRightBy2(cc)
+
+ h0, c = bitsAdd64(h0, cc.lo, 0)
+ h1, c = bitsAdd64(h1, cc.hi, c)
+ h2 += c
+
+ // h2 is at most 3 + 1 + 1 = 5, making the whole of h at most
+ //
+ // 5 * 2¹²⁸ + (2¹²⁸ - 1) = 6 * 2¹²⁸ - 1
+ }
+
+ state.h[0], state.h[1], state.h[2] = h0, h1, h2
+}
+
+const (
+ maskLow2Bits uint64 = 0x0000000000000003
+ maskNotLow2Bits uint64 = ^maskLow2Bits
+)
+
+// select64 returns x if v == 1 and y if v == 0, in constant time.
+func select64(v, x, y uint64) uint64 { return ^(v-1)&x | (v-1)&y }
+
+// [p0, p1, p2] is 2¹³⁰ - 5 in little endian order.
+const (
+ p0 = 0xFFFFFFFFFFFFFFFB
+ p1 = 0xFFFFFFFFFFFFFFFF
+ p2 = 0x0000000000000003
+)
+
+// finalize completes the modular reduction of h and computes
+//
+// out = h + s mod 2¹²⁸
+func finalize(out *[TagSize]byte, h *[3]uint64, s *[2]uint64) {
+ h0, h1, h2 := h[0], h[1], h[2]
+
+ // After the partial reduction in updateGeneric, h might be more than
+ // 2¹³⁰ - 5, but will be less than 2 * (2¹³⁰ - 5). To complete the reduction
+ // in constant time, we compute t = h - (2¹³⁰ - 5), and select h as the
+ // result if the subtraction underflows, and t otherwise.
+
+ hMinusP0, b := bitsSub64(h0, p0, 0)
+ hMinusP1, b := bitsSub64(h1, p1, b)
+ _, b = bitsSub64(h2, p2, b)
+
+ // h = h if h < p else h - p
+ h0 = select64(b, h0, hMinusP0)
+ h1 = select64(b, h1, hMinusP1)
+
+ // Finally, we compute the last Poly1305 step
+ //
+ // tag = h + s mod 2¹²⁸
+ //
+ // by just doing a wide addition with the 128 low bits of h and discarding
+ // the overflow.
+ h0, c := bitsAdd64(h0, s[0], 0)
+ h1, _ = bitsAdd64(h1, s[1], c)
+
+ binary.LittleEndian.PutUint64(out[0:8], h0)
+ binary.LittleEndian.PutUint64(out[8:16], h1)
+}
diff --git a/src/vendor/golang.org/x/crypto/internal/poly1305/sum_ppc64le.go b/src/vendor/golang.org/x/crypto/internal/poly1305/sum_ppc64le.go
new file mode 100644
index 0000000..4aec487
--- /dev/null
+++ b/src/vendor/golang.org/x/crypto/internal/poly1305/sum_ppc64le.go
@@ -0,0 +1,47 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build gc && !purego
+
+package poly1305
+
+//go:noescape
+func update(state *macState, msg []byte)
+
+// mac is a wrapper for macGeneric that redirects calls that would have gone to
+// updateGeneric to update.
+//
+// Its Write and Sum methods are otherwise identical to the macGeneric ones, but
+// using function pointers would carry a major performance cost.
+type mac struct{ macGeneric }
+
+func (h *mac) Write(p []byte) (int, error) {
+ nn := len(p)
+ if h.offset > 0 {
+ n := copy(h.buffer[h.offset:], p)
+ if h.offset+n < TagSize {
+ h.offset += n
+ return nn, nil
+ }
+ p = p[n:]
+ h.offset = 0
+ update(&h.macState, h.buffer[:])
+ }
+ if n := len(p) - (len(p) % TagSize); n > 0 {
+ update(&h.macState, p[:n])
+ p = p[n:]
+ }
+ if len(p) > 0 {
+ h.offset += copy(h.buffer[h.offset:], p)
+ }
+ return nn, nil
+}
+
+func (h *mac) Sum(out *[16]byte) {
+ state := h.macState
+ if h.offset > 0 {
+ update(&state, h.buffer[:h.offset])
+ }
+ finalize(out, &state.h, &state.s)
+}
diff --git a/src/vendor/golang.org/x/crypto/internal/poly1305/sum_ppc64le.s b/src/vendor/golang.org/x/crypto/internal/poly1305/sum_ppc64le.s
new file mode 100644
index 0000000..d2ca5de
--- /dev/null
+++ b/src/vendor/golang.org/x/crypto/internal/poly1305/sum_ppc64le.s
@@ -0,0 +1,181 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build gc && !purego
+
+#include "textflag.h"
+
+// This was ported from the amd64 implementation.
+
+#define POLY1305_ADD(msg, h0, h1, h2, t0, t1, t2) \
+ MOVD (msg), t0; \
+ MOVD 8(msg), t1; \
+ MOVD $1, t2; \
+ ADDC t0, h0, h0; \
+ ADDE t1, h1, h1; \
+ ADDE t2, h2; \
+ ADD $16, msg
+
+#define POLY1305_MUL(h0, h1, h2, r0, r1, t0, t1, t2, t3, t4, t5) \
+ MULLD r0, h0, t0; \
+ MULLD r0, h1, t4; \
+ MULHDU r0, h0, t1; \
+ MULHDU r0, h1, t5; \
+ ADDC t4, t1, t1; \
+ MULLD r0, h2, t2; \
+ ADDZE t5; \
+ MULHDU r1, h0, t4; \
+ MULLD r1, h0, h0; \
+ ADD t5, t2, t2; \
+ ADDC h0, t1, t1; \
+ MULLD h2, r1, t3; \
+ ADDZE t4, h0; \
+ MULHDU r1, h1, t5; \
+ MULLD r1, h1, t4; \
+ ADDC t4, t2, t2; \
+ ADDE t5, t3, t3; \
+ ADDC h0, t2, t2; \
+ MOVD $-4, t4; \
+ MOVD t0, h0; \
+ MOVD t1, h1; \
+ ADDZE t3; \
+ ANDCC $3, t2, h2; \
+ AND t2, t4, t0; \
+ ADDC t0, h0, h0; \
+ ADDE t3, h1, h1; \
+ SLD $62, t3, t4; \
+ SRD $2, t2; \
+ ADDZE h2; \
+ OR t4, t2, t2; \
+ SRD $2, t3; \
+ ADDC t2, h0, h0; \
+ ADDE t3, h1, h1; \
+ ADDZE h2
+
+DATA ·poly1305Mask<>+0x00(SB)/8, $0x0FFFFFFC0FFFFFFF
+DATA ·poly1305Mask<>+0x08(SB)/8, $0x0FFFFFFC0FFFFFFC
+GLOBL ·poly1305Mask<>(SB), RODATA, $16
+
+// func update(state *[7]uint64, msg []byte)
+TEXT ·update(SB), $0-32
+ MOVD state+0(FP), R3
+ MOVD msg_base+8(FP), R4
+ MOVD msg_len+16(FP), R5
+
+ MOVD 0(R3), R8 // h0
+ MOVD 8(R3), R9 // h1
+ MOVD 16(R3), R10 // h2
+ MOVD 24(R3), R11 // r0
+ MOVD 32(R3), R12 // r1
+
+ CMP R5, $16
+ BLT bytes_between_0_and_15
+
+loop:
+ POLY1305_ADD(R4, R8, R9, R10, R20, R21, R22)
+
+multiply:
+ POLY1305_MUL(R8, R9, R10, R11, R12, R16, R17, R18, R14, R20, R21)
+ ADD $-16, R5
+ CMP R5, $16
+ BGE loop
+
+bytes_between_0_and_15:
+ CMP R5, $0
+ BEQ done
+ MOVD $0, R16 // h0
+ MOVD $0, R17 // h1
+
+flush_buffer:
+ CMP R5, $8
+ BLE just1
+
+ MOVD $8, R21
+ SUB R21, R5, R21
+
+ // Greater than 8 -- load the rightmost remaining bytes in msg
+ // and put into R17 (h1)
+ MOVD (R4)(R21), R17
+ MOVD $16, R22
+
+ // Find the offset to those bytes
+ SUB R5, R22, R22
+ SLD $3, R22
+
+ // Shift to get only the bytes in msg
+ SRD R22, R17, R17
+
+ // Put 1 at high end
+ MOVD $1, R23
+ SLD $3, R21
+ SLD R21, R23, R23
+ OR R23, R17, R17
+
+ // Remainder is 8
+ MOVD $8, R5
+
+just1:
+ CMP R5, $8
+ BLT less8
+
+ // Exactly 8
+ MOVD (R4), R16
+
+ CMP R17, $0
+
+ // Check if we've already set R17; if not
+ // set 1 to indicate end of msg.
+ BNE carry
+ MOVD $1, R17
+ BR carry
+
+less8:
+ MOVD $0, R16 // h0
+ MOVD $0, R22 // shift count
+ CMP R5, $4
+ BLT less4
+ MOVWZ (R4), R16
+ ADD $4, R4
+ ADD $-4, R5
+ MOVD $32, R22
+
+less4:
+ CMP R5, $2
+ BLT less2
+ MOVHZ (R4), R21
+ SLD R22, R21, R21
+ OR R16, R21, R16
+ ADD $16, R22
+ ADD $-2, R5
+ ADD $2, R4
+
+less2:
+ CMP R5, $0
+ BEQ insert1
+ MOVBZ (R4), R21
+ SLD R22, R21, R21
+ OR R16, R21, R16
+ ADD $8, R22
+
+insert1:
+ // Insert 1 at end of msg
+ MOVD $1, R21
+ SLD R22, R21, R21
+ OR R16, R21, R16
+
+carry:
+ // Add new values to h0, h1, h2
+ ADDC R16, R8
+ ADDE R17, R9
+ ADDZE R10, R10
+ MOVD $16, R5
+ ADD R5, R4
+ BR multiply
+
+done:
+ // Save h0, h1, h2 in state
+ MOVD R8, 0(R3)
+ MOVD R9, 8(R3)
+ MOVD R10, 16(R3)
+ RET
diff --git a/src/vendor/golang.org/x/crypto/internal/poly1305/sum_s390x.go b/src/vendor/golang.org/x/crypto/internal/poly1305/sum_s390x.go
new file mode 100644
index 0000000..e1d033a
--- /dev/null
+++ b/src/vendor/golang.org/x/crypto/internal/poly1305/sum_s390x.go
@@ -0,0 +1,76 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build gc && !purego
+
+package poly1305
+
+import (
+ "golang.org/x/sys/cpu"
+)
+
+// updateVX is an assembly implementation of Poly1305 that uses vector
+// instructions. It must only be called if the vector facility (vx) is
+// available.
+//
+//go:noescape
+func updateVX(state *macState, msg []byte)
+
+// mac is a replacement for macGeneric that uses a larger buffer and redirects
+// calls that would have gone to updateGeneric to updateVX if the vector
+// facility is installed.
+//
+// A larger buffer is required for good performance because the vector
+// implementation has a higher fixed cost per call than the generic
+// implementation.
+type mac struct {
+ macState
+
+ buffer [16 * TagSize]byte // size must be a multiple of block size (16)
+ offset int
+}
+
+func (h *mac) Write(p []byte) (int, error) {
+ nn := len(p)
+ if h.offset > 0 {
+ n := copy(h.buffer[h.offset:], p)
+ if h.offset+n < len(h.buffer) {
+ h.offset += n
+ return nn, nil
+ }
+ p = p[n:]
+ h.offset = 0
+ if cpu.S390X.HasVX {
+ updateVX(&h.macState, h.buffer[:])
+ } else {
+ updateGeneric(&h.macState, h.buffer[:])
+ }
+ }
+
+ tail := len(p) % len(h.buffer) // number of bytes to copy into buffer
+ body := len(p) - tail // number of bytes to process now
+ if body > 0 {
+ if cpu.S390X.HasVX {
+ updateVX(&h.macState, p[:body])
+ } else {
+ updateGeneric(&h.macState, p[:body])
+ }
+ }
+ h.offset = copy(h.buffer[:], p[body:]) // copy tail bytes - can be 0
+ return nn, nil
+}
+
+func (h *mac) Sum(out *[TagSize]byte) {
+ state := h.macState
+ remainder := h.buffer[:h.offset]
+
+ // Use the generic implementation if we have 2 or fewer blocks left
+ // to sum. The vector implementation has a higher startup time.
+ if cpu.S390X.HasVX && len(remainder) > 2*TagSize {
+ updateVX(&state, remainder)
+ } else if len(remainder) > 0 {
+ updateGeneric(&state, remainder)
+ }
+ finalize(out, &state.h, &state.s)
+}
diff --git a/src/vendor/golang.org/x/crypto/internal/poly1305/sum_s390x.s b/src/vendor/golang.org/x/crypto/internal/poly1305/sum_s390x.s
new file mode 100644
index 0000000..0fe3a7c
--- /dev/null
+++ b/src/vendor/golang.org/x/crypto/internal/poly1305/sum_s390x.s
@@ -0,0 +1,503 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build gc && !purego
+
+#include "textflag.h"
+
+// This implementation of Poly1305 uses the vector facility (vx)
+// to process up to 2 blocks (32 bytes) per iteration using an
+// algorithm based on the one described in:
+//
+// NEON crypto, Daniel J. Bernstein & Peter Schwabe
+// https://cryptojedi.org/papers/neoncrypto-20120320.pdf
+//
+// This algorithm uses 5 26-bit limbs to represent a 130-bit
+// value. These limbs are, for the most part, zero extended and
+// placed into 64-bit vector register elements. Each vector
+// register is 128-bits wide and so holds 2 of these elements.
+// Using 26-bit limbs allows us plenty of headroom to accommodate
+// accumulations before and after multiplication without
+// overflowing either 32-bits (before multiplication) or 64-bits
+// (after multiplication).
+//
+// In order to parallelise the operations required to calculate
+// the sum we use two separate accumulators and then sum those
+// in an extra final step. For compatibility with the generic
+// implementation we perform this summation at the end of every
+// updateVX call.
+//
+// To use two accumulators we must multiply the message blocks
+// by r² rather than r. Only the final message block should be
+// multiplied by r.
+//
+// Example:
+//
+// We want to calculate the sum (h) for a 64 byte message (m):
+//
+// h = m[0:16]r⁴ + m[16:32]r³ + m[32:48]r² + m[48:64]r
+//
+// To do this we split the calculation into the even indices
+// and odd indices of the message. These form our SIMD 'lanes':
+//
+// h = m[ 0:16]r⁴ + m[32:48]r² + <- lane 0
+// m[16:32]r³ + m[48:64]r <- lane 1
+//
+// To calculate this iteratively we refactor so that both lanes
+// are written in terms of r² and r:
+//
+// h = (m[ 0:16]r² + m[32:48])r² + <- lane 0
+// (m[16:32]r² + m[48:64])r <- lane 1
+// ^ ^
+// | coefficients for second iteration
+// coefficients for first iteration
+//
+// So in this case we would have two iterations. In the first
+// both lanes are multiplied by r². In the second only the
+// first lane is multiplied by r² and the second lane is
+// instead multiplied by r. This gives use the odd and even
+// powers of r that we need from the original equation.
+//
+// Notation:
+//
+// h - accumulator
+// r - key
+// m - message
+//
+// [a, b] - SIMD register holding two 64-bit values
+// [a, b, c, d] - SIMD register holding four 32-bit values
+// xᵢ[n] - limb n of variable x with bit width i
+//
+// Limbs are expressed in little endian order, so for 26-bit
+// limbs x₂₆[4] will be the most significant limb and x₂₆[0]
+// will be the least significant limb.
+
+// masking constants
+#define MOD24 V0 // [0x0000000000ffffff, 0x0000000000ffffff] - mask low 24-bits
+#define MOD26 V1 // [0x0000000003ffffff, 0x0000000003ffffff] - mask low 26-bits
+
+// expansion constants (see EXPAND macro)
+#define EX0 V2
+#define EX1 V3
+#define EX2 V4
+
+// key (r², r or 1 depending on context)
+#define R_0 V5
+#define R_1 V6
+#define R_2 V7
+#define R_3 V8
+#define R_4 V9
+
+// precalculated coefficients (5r², 5r or 0 depending on context)
+#define R5_1 V10
+#define R5_2 V11
+#define R5_3 V12
+#define R5_4 V13
+
+// message block (m)
+#define M_0 V14
+#define M_1 V15
+#define M_2 V16
+#define M_3 V17
+#define M_4 V18
+
+// accumulator (h)
+#define H_0 V19
+#define H_1 V20
+#define H_2 V21
+#define H_3 V22
+#define H_4 V23
+
+// temporary registers (for short-lived values)
+#define T_0 V24
+#define T_1 V25
+#define T_2 V26
+#define T_3 V27
+#define T_4 V28
+
+GLOBL ·constants<>(SB), RODATA, $0x30
+// EX0
+DATA ·constants<>+0x00(SB)/8, $0x0006050403020100
+DATA ·constants<>+0x08(SB)/8, $0x1016151413121110
+// EX1
+DATA ·constants<>+0x10(SB)/8, $0x060c0b0a09080706
+DATA ·constants<>+0x18(SB)/8, $0x161c1b1a19181716
+// EX2
+DATA ·constants<>+0x20(SB)/8, $0x0d0d0d0d0d0f0e0d
+DATA ·constants<>+0x28(SB)/8, $0x1d1d1d1d1d1f1e1d
+
+// MULTIPLY multiplies each lane of f and g, partially reduced
+// modulo 2¹³⁰ - 5. The result, h, consists of partial products
+// in each lane that need to be reduced further to produce the
+// final result.
+//
+// h₁₃₀ = (f₁₃₀g₁₃₀) % 2¹³⁰ + (5f₁₃₀g₁₃₀) / 2¹³⁰
+//
+// Note that the multiplication by 5 of the high bits is
+// achieved by precalculating the multiplication of four of the
+// g coefficients by 5. These are g51-g54.
+#define MULTIPLY(f0, f1, f2, f3, f4, g0, g1, g2, g3, g4, g51, g52, g53, g54, h0, h1, h2, h3, h4) \
+ VMLOF f0, g0, h0 \
+ VMLOF f0, g3, h3 \
+ VMLOF f0, g1, h1 \
+ VMLOF f0, g4, h4 \
+ VMLOF f0, g2, h2 \
+ VMLOF f1, g54, T_0 \
+ VMLOF f1, g2, T_3 \
+ VMLOF f1, g0, T_1 \
+ VMLOF f1, g3, T_4 \
+ VMLOF f1, g1, T_2 \
+ VMALOF f2, g53, h0, h0 \
+ VMALOF f2, g1, h3, h3 \
+ VMALOF f2, g54, h1, h1 \
+ VMALOF f2, g2, h4, h4 \
+ VMALOF f2, g0, h2, h2 \
+ VMALOF f3, g52, T_0, T_0 \
+ VMALOF f3, g0, T_3, T_3 \
+ VMALOF f3, g53, T_1, T_1 \
+ VMALOF f3, g1, T_4, T_4 \
+ VMALOF f3, g54, T_2, T_2 \
+ VMALOF f4, g51, h0, h0 \
+ VMALOF f4, g54, h3, h3 \
+ VMALOF f4, g52, h1, h1 \
+ VMALOF f4, g0, h4, h4 \
+ VMALOF f4, g53, h2, h2 \
+ VAG T_0, h0, h0 \
+ VAG T_3, h3, h3 \
+ VAG T_1, h1, h1 \
+ VAG T_4, h4, h4 \
+ VAG T_2, h2, h2
+
+// REDUCE performs the following carry operations in four
+// stages, as specified in Bernstein & Schwabe:
+//
+// 1: h₂₆[0]->h₂₆[1] h₂₆[3]->h₂₆[4]
+// 2: h₂₆[1]->h₂₆[2] h₂₆[4]->h₂₆[0]
+// 3: h₂₆[0]->h₂₆[1] h₂₆[2]->h₂₆[3]
+// 4: h₂₆[3]->h₂₆[4]
+//
+// The result is that all of the limbs are limited to 26-bits
+// except for h₂₆[1] and h₂₆[4] which are limited to 27-bits.
+//
+// Note that although each limb is aligned at 26-bit intervals
+// they may contain values that exceed 2²⁶ - 1, hence the need
+// to carry the excess bits in each limb.
+#define REDUCE(h0, h1, h2, h3, h4) \
+ VESRLG $26, h0, T_0 \
+ VESRLG $26, h3, T_1 \
+ VN MOD26, h0, h0 \
+ VN MOD26, h3, h3 \
+ VAG T_0, h1, h1 \
+ VAG T_1, h4, h4 \
+ VESRLG $26, h1, T_2 \
+ VESRLG $26, h4, T_3 \
+ VN MOD26, h1, h1 \
+ VN MOD26, h4, h4 \
+ VESLG $2, T_3, T_4 \
+ VAG T_3, T_4, T_4 \
+ VAG T_2, h2, h2 \
+ VAG T_4, h0, h0 \
+ VESRLG $26, h2, T_0 \
+ VESRLG $26, h0, T_1 \
+ VN MOD26, h2, h2 \
+ VN MOD26, h0, h0 \
+ VAG T_0, h3, h3 \
+ VAG T_1, h1, h1 \
+ VESRLG $26, h3, T_2 \
+ VN MOD26, h3, h3 \
+ VAG T_2, h4, h4
+
+// EXPAND splits the 128-bit little-endian values in0 and in1
+// into 26-bit big-endian limbs and places the results into
+// the first and second lane of d₂₆[0:4] respectively.
+//
+// The EX0, EX1 and EX2 constants are arrays of byte indices
+// for permutation. The permutation both reverses the bytes
+// in the input and ensures the bytes are copied into the
+// destination limb ready to be shifted into their final
+// position.
+#define EXPAND(in0, in1, d0, d1, d2, d3, d4) \
+ VPERM in0, in1, EX0, d0 \
+ VPERM in0, in1, EX1, d2 \
+ VPERM in0, in1, EX2, d4 \
+ VESRLG $26, d0, d1 \
+ VESRLG $30, d2, d3 \
+ VESRLG $4, d2, d2 \
+ VN MOD26, d0, d0 \ // [in0₂₆[0], in1₂₆[0]]
+ VN MOD26, d3, d3 \ // [in0₂₆[3], in1₂₆[3]]
+ VN MOD26, d1, d1 \ // [in0₂₆[1], in1₂₆[1]]
+ VN MOD24, d4, d4 \ // [in0₂₆[4], in1₂₆[4]]
+ VN MOD26, d2, d2 // [in0₂₆[2], in1₂₆[2]]
+
+// func updateVX(state *macState, msg []byte)
+TEXT ·updateVX(SB), NOSPLIT, $0
+ MOVD state+0(FP), R1
+ LMG msg+8(FP), R2, R3 // R2=msg_base, R3=msg_len
+
+ // load EX0, EX1 and EX2
+ MOVD $·constants<>(SB), R5
+ VLM (R5), EX0, EX2
+
+ // generate masks
+ VGMG $(64-24), $63, MOD24 // [0x00ffffff, 0x00ffffff]
+ VGMG $(64-26), $63, MOD26 // [0x03ffffff, 0x03ffffff]
+
+ // load h (accumulator) and r (key) from state
+ VZERO T_1 // [0, 0]
+ VL 0(R1), T_0 // [h₆₄[0], h₆₄[1]]
+ VLEG $0, 16(R1), T_1 // [h₆₄[2], 0]
+ VL 24(R1), T_2 // [r₆₄[0], r₆₄[1]]
+ VPDI $0, T_0, T_2, T_3 // [h₆₄[0], r₆₄[0]]
+ VPDI $5, T_0, T_2, T_4 // [h₆₄[1], r₆₄[1]]
+
+ // unpack h and r into 26-bit limbs
+ // note: h₆₄[2] may have the low 3 bits set, so h₂₆[4] is a 27-bit value
+ VN MOD26, T_3, H_0 // [h₂₆[0], r₂₆[0]]
+ VZERO H_1 // [0, 0]
+ VZERO H_3 // [0, 0]
+ VGMG $(64-12-14), $(63-12), T_0 // [0x03fff000, 0x03fff000] - 26-bit mask with low 12 bits masked out
+ VESLG $24, T_1, T_1 // [h₆₄[2]<<24, 0]
+ VERIMG $-26&63, T_3, MOD26, H_1 // [h₂₆[1], r₂₆[1]]
+ VESRLG $+52&63, T_3, H_2 // [h₂₆[2], r₂₆[2]] - low 12 bits only
+ VERIMG $-14&63, T_4, MOD26, H_3 // [h₂₆[1], r₂₆[1]]
+ VESRLG $40, T_4, H_4 // [h₂₆[4], r₂₆[4]] - low 24 bits only
+ VERIMG $+12&63, T_4, T_0, H_2 // [h₂₆[2], r₂₆[2]] - complete
+ VO T_1, H_4, H_4 // [h₂₆[4], r₂₆[4]] - complete
+
+ // replicate r across all 4 vector elements
+ VREPF $3, H_0, R_0 // [r₂₆[0], r₂₆[0], r₂₆[0], r₂₆[0]]
+ VREPF $3, H_1, R_1 // [r₂₆[1], r₂₆[1], r₂₆[1], r₂₆[1]]
+ VREPF $3, H_2, R_2 // [r₂₆[2], r₂₆[2], r₂₆[2], r₂₆[2]]
+ VREPF $3, H_3, R_3 // [r₂₆[3], r₂₆[3], r₂₆[3], r₂₆[3]]
+ VREPF $3, H_4, R_4 // [r₂₆[4], r₂₆[4], r₂₆[4], r₂₆[4]]
+
+ // zero out lane 1 of h
+ VLEIG $1, $0, H_0 // [h₂₆[0], 0]
+ VLEIG $1, $0, H_1 // [h₂₆[1], 0]
+ VLEIG $1, $0, H_2 // [h₂₆[2], 0]
+ VLEIG $1, $0, H_3 // [h₂₆[3], 0]
+ VLEIG $1, $0, H_4 // [h₂₆[4], 0]
+
+ // calculate 5r (ignore least significant limb)
+ VREPIF $5, T_0
+ VMLF T_0, R_1, R5_1 // [5r₂₆[1], 5r₂₆[1], 5r₂₆[1], 5r₂₆[1]]
+ VMLF T_0, R_2, R5_2 // [5r₂₆[2], 5r₂₆[2], 5r₂₆[2], 5r₂₆[2]]
+ VMLF T_0, R_3, R5_3 // [5r₂₆[3], 5r₂₆[3], 5r₂₆[3], 5r₂₆[3]]
+ VMLF T_0, R_4, R5_4 // [5r₂₆[4], 5r₂₆[4], 5r₂₆[4], 5r₂₆[4]]
+
+ // skip r² calculation if we are only calculating one block
+ CMPBLE R3, $16, skip
+
+ // calculate r²
+ MULTIPLY(R_0, R_1, R_2, R_3, R_4, R_0, R_1, R_2, R_3, R_4, R5_1, R5_2, R5_3, R5_4, M_0, M_1, M_2, M_3, M_4)
+ REDUCE(M_0, M_1, M_2, M_3, M_4)
+ VGBM $0x0f0f, T_0
+ VERIMG $0, M_0, T_0, R_0 // [r₂₆[0], r²₂₆[0], r₂₆[0], r²₂₆[0]]
+ VERIMG $0, M_1, T_0, R_1 // [r₂₆[1], r²₂₆[1], r₂₆[1], r²₂₆[1]]
+ VERIMG $0, M_2, T_0, R_2 // [r₂₆[2], r²₂₆[2], r₂₆[2], r²₂₆[2]]
+ VERIMG $0, M_3, T_0, R_3 // [r₂₆[3], r²₂₆[3], r₂₆[3], r²₂₆[3]]
+ VERIMG $0, M_4, T_0, R_4 // [r₂₆[4], r²₂₆[4], r₂₆[4], r²₂₆[4]]
+
+ // calculate 5r² (ignore least significant limb)
+ VREPIF $5, T_0
+ VMLF T_0, R_1, R5_1 // [5r₂₆[1], 5r²₂₆[1], 5r₂₆[1], 5r²₂₆[1]]
+ VMLF T_0, R_2, R5_2 // [5r₂₆[2], 5r²₂₆[2], 5r₂₆[2], 5r²₂₆[2]]
+ VMLF T_0, R_3, R5_3 // [5r₂₆[3], 5r²₂₆[3], 5r₂₆[3], 5r²₂₆[3]]
+ VMLF T_0, R_4, R5_4 // [5r₂₆[4], 5r²₂₆[4], 5r₂₆[4], 5r²₂₆[4]]
+
+loop:
+ CMPBLE R3, $32, b2 // 2 or fewer blocks remaining, need to change key coefficients
+
+ // load next 2 blocks from message
+ VLM (R2), T_0, T_1
+
+ // update message slice
+ SUB $32, R3
+ MOVD $32(R2), R2
+
+ // unpack message blocks into 26-bit big-endian limbs
+ EXPAND(T_0, T_1, M_0, M_1, M_2, M_3, M_4)
+
+ // add 2¹²⁸ to each message block value
+ VLEIB $4, $1, M_4
+ VLEIB $12, $1, M_4
+
+multiply:
+ // accumulate the incoming message
+ VAG H_0, M_0, M_0
+ VAG H_3, M_3, M_3
+ VAG H_1, M_1, M_1
+ VAG H_4, M_4, M_4
+ VAG H_2, M_2, M_2
+
+ // multiply the accumulator by the key coefficient
+ MULTIPLY(M_0, M_1, M_2, M_3, M_4, R_0, R_1, R_2, R_3, R_4, R5_1, R5_2, R5_3, R5_4, H_0, H_1, H_2, H_3, H_4)
+
+ // carry and partially reduce the partial products
+ REDUCE(H_0, H_1, H_2, H_3, H_4)
+
+ CMPBNE R3, $0, loop
+
+finish:
+ // sum lane 0 and lane 1 and put the result in lane 1
+ VZERO T_0
+ VSUMQG H_0, T_0, H_0
+ VSUMQG H_3, T_0, H_3
+ VSUMQG H_1, T_0, H_1
+ VSUMQG H_4, T_0, H_4
+ VSUMQG H_2, T_0, H_2
+
+ // reduce again after summation
+ // TODO(mundaym): there might be a more efficient way to do this
+ // now that we only have 1 active lane. For example, we could
+ // simultaneously pack the values as we reduce them.
+ REDUCE(H_0, H_1, H_2, H_3, H_4)
+
+ // carry h[1] through to h[4] so that only h[4] can exceed 2²⁶ - 1
+ // TODO(mundaym): in testing this final carry was unnecessary.
+ // Needs a proof before it can be removed though.
+ VESRLG $26, H_1, T_1
+ VN MOD26, H_1, H_1
+ VAQ T_1, H_2, H_2
+ VESRLG $26, H_2, T_2
+ VN MOD26, H_2, H_2
+ VAQ T_2, H_3, H_3
+ VESRLG $26, H_3, T_3
+ VN MOD26, H_3, H_3
+ VAQ T_3, H_4, H_4
+
+ // h is now < 2(2¹³⁰ - 5)
+ // Pack each lane in h₂₆[0:4] into h₁₂₈[0:1].
+ VESLG $26, H_1, H_1
+ VESLG $26, H_3, H_3
+ VO H_0, H_1, H_0
+ VO H_2, H_3, H_2
+ VESLG $4, H_2, H_2
+ VLEIB $7, $48, H_1
+ VSLB H_1, H_2, H_2
+ VO H_0, H_2, H_0
+ VLEIB $7, $104, H_1
+ VSLB H_1, H_4, H_3
+ VO H_3, H_0, H_0
+ VLEIB $7, $24, H_1
+ VSRLB H_1, H_4, H_1
+
+ // update state
+ VSTEG $1, H_0, 0(R1)
+ VSTEG $0, H_0, 8(R1)
+ VSTEG $1, H_1, 16(R1)
+ RET
+
+b2: // 2 or fewer blocks remaining
+ CMPBLE R3, $16, b1
+
+ // Load the 2 remaining blocks (17-32 bytes remaining).
+ MOVD $-17(R3), R0 // index of final byte to load modulo 16
+ VL (R2), T_0 // load full 16 byte block
+ VLL R0, 16(R2), T_1 // load final (possibly partial) block and pad with zeros to 16 bytes
+
+ // The Poly1305 algorithm requires that a 1 bit be appended to
+ // each message block. If the final block is less than 16 bytes
+ // long then it is easiest to insert the 1 before the message
+ // block is split into 26-bit limbs. If, on the other hand, the
+ // final message block is 16 bytes long then we append the 1 bit
+ // after expansion as normal.
+ MOVBZ $1, R0
+ MOVD $-16(R3), R3 // index of byte in last block to insert 1 at (could be 16)
+ CMPBEQ R3, $16, 2(PC) // skip the insertion if the final block is 16 bytes long
+ VLVGB R3, R0, T_1 // insert 1 into the byte at index R3
+
+ // Split both blocks into 26-bit limbs in the appropriate lanes.
+ EXPAND(T_0, T_1, M_0, M_1, M_2, M_3, M_4)
+
+ // Append a 1 byte to the end of the second to last block.
+ VLEIB $4, $1, M_4
+
+ // Append a 1 byte to the end of the last block only if it is a
+ // full 16 byte block.
+ CMPBNE R3, $16, 2(PC)
+ VLEIB $12, $1, M_4
+
+ // Finally, set up the coefficients for the final multiplication.
+ // We have previously saved r and 5r in the 32-bit even indexes
+ // of the R_[0-4] and R5_[1-4] coefficient registers.
+ //
+ // We want lane 0 to be multiplied by r² so that can be kept the
+ // same. We want lane 1 to be multiplied by r so we need to move
+ // the saved r value into the 32-bit odd index in lane 1 by
+ // rotating the 64-bit lane by 32.
+ VGBM $0x00ff, T_0 // [0, 0xffffffffffffffff] - mask lane 1 only
+ VERIMG $32, R_0, T_0, R_0 // [_, r²₂₆[0], _, r₂₆[0]]
+ VERIMG $32, R_1, T_0, R_1 // [_, r²₂₆[1], _, r₂₆[1]]
+ VERIMG $32, R_2, T_0, R_2 // [_, r²₂₆[2], _, r₂₆[2]]
+ VERIMG $32, R_3, T_0, R_3 // [_, r²₂₆[3], _, r₂₆[3]]
+ VERIMG $32, R_4, T_0, R_4 // [_, r²₂₆[4], _, r₂₆[4]]
+ VERIMG $32, R5_1, T_0, R5_1 // [_, 5r²₂₆[1], _, 5r₂₆[1]]
+ VERIMG $32, R5_2, T_0, R5_2 // [_, 5r²₂₆[2], _, 5r₂₆[2]]
+ VERIMG $32, R5_3, T_0, R5_3 // [_, 5r²₂₆[3], _, 5r₂₆[3]]
+ VERIMG $32, R5_4, T_0, R5_4 // [_, 5r²₂₆[4], _, 5r₂₆[4]]
+
+ MOVD $0, R3
+ BR multiply
+
+skip:
+ CMPBEQ R3, $0, finish
+
+b1: // 1 block remaining
+
+ // Load the final block (1-16 bytes). This will be placed into
+ // lane 0.
+ MOVD $-1(R3), R0
+ VLL R0, (R2), T_0 // pad to 16 bytes with zeros
+
+ // The Poly1305 algorithm requires that a 1 bit be appended to
+ // each message block. If the final block is less than 16 bytes
+ // long then it is easiest to insert the 1 before the message
+ // block is split into 26-bit limbs. If, on the other hand, the
+ // final message block is 16 bytes long then we append the 1 bit
+ // after expansion as normal.
+ MOVBZ $1, R0
+ CMPBEQ R3, $16, 2(PC)
+ VLVGB R3, R0, T_0
+
+ // Set the message block in lane 1 to the value 0 so that it
+ // can be accumulated without affecting the final result.
+ VZERO T_1
+
+ // Split the final message block into 26-bit limbs in lane 0.
+ // Lane 1 will be contain 0.
+ EXPAND(T_0, T_1, M_0, M_1, M_2, M_3, M_4)
+
+ // Append a 1 byte to the end of the last block only if it is a
+ // full 16 byte block.
+ CMPBNE R3, $16, 2(PC)
+ VLEIB $4, $1, M_4
+
+ // We have previously saved r and 5r in the 32-bit even indexes
+ // of the R_[0-4] and R5_[1-4] coefficient registers.
+ //
+ // We want lane 0 to be multiplied by r so we need to move the
+ // saved r value into the 32-bit odd index in lane 0. We want
+ // lane 1 to be set to the value 1. This makes multiplication
+ // a no-op. We do this by setting lane 1 in every register to 0
+ // and then just setting the 32-bit index 3 in R_0 to 1.
+ VZERO T_0
+ MOVD $0, R0
+ MOVD $0x10111213, R12
+ VLVGP R12, R0, T_1 // [_, 0x10111213, _, 0x00000000]
+ VPERM T_0, R_0, T_1, R_0 // [_, r₂₆[0], _, 0]
+ VPERM T_0, R_1, T_1, R_1 // [_, r₂₆[1], _, 0]
+ VPERM T_0, R_2, T_1, R_2 // [_, r₂₆[2], _, 0]
+ VPERM T_0, R_3, T_1, R_3 // [_, r₂₆[3], _, 0]
+ VPERM T_0, R_4, T_1, R_4 // [_, r₂₆[4], _, 0]
+ VPERM T_0, R5_1, T_1, R5_1 // [_, 5r₂₆[1], _, 0]
+ VPERM T_0, R5_2, T_1, R5_2 // [_, 5r₂₆[2], _, 0]
+ VPERM T_0, R5_3, T_1, R5_3 // [_, 5r₂₆[3], _, 0]
+ VPERM T_0, R5_4, T_1, R5_4 // [_, 5r₂₆[4], _, 0]
+
+ // Set the value of lane 1 to be 1.
+ VLEIF $3, $1, R_0 // [_, r₂₆[0], _, 1]
+
+ MOVD $0, R3
+ BR multiply