summaryrefslogtreecommitdiffstats
path: root/arch/s390/crypto
diff options
context:
space:
mode:
Diffstat (limited to 'arch/s390/crypto')
-rw-r--r--arch/s390/crypto/chacha-glue.c4
-rw-r--r--arch/s390/crypto/chacha-s390.S2
-rw-r--r--arch/s390/crypto/crc32-vx.c11
-rw-r--r--arch/s390/crypto/crc32-vx.h12
-rw-r--r--arch/s390/crypto/crc32be-vx.c (renamed from arch/s390/crypto/crc32be-vx.S)177
-rw-r--r--arch/s390/crypto/crc32le-vx.c (renamed from arch/s390/crypto/crc32le-vx.S)249
-rw-r--r--arch/s390/crypto/paes_s390.c11
7 files changed, 199 insertions, 267 deletions
diff --git a/arch/s390/crypto/chacha-glue.c b/arch/s390/crypto/chacha-glue.c
index ed9959e6f7..f8b0c52e77 100644
--- a/arch/s390/crypto/chacha-glue.c
+++ b/arch/s390/crypto/chacha-glue.c
@@ -15,14 +15,14 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/sizes.h>
-#include <asm/fpu/api.h>
+#include <asm/fpu.h>
#include "chacha-s390.h"
static void chacha20_crypt_s390(u32 *state, u8 *dst, const u8 *src,
unsigned int nbytes, const u32 *key,
u32 *counter)
{
- struct kernel_fpu vxstate;
+ DECLARE_KERNEL_FPU_ONSTACK32(vxstate);
kernel_fpu_begin(&vxstate, KERNEL_VXR);
chacha20_vx(dst, src, nbytes, key, counter);
diff --git a/arch/s390/crypto/chacha-s390.S b/arch/s390/crypto/chacha-s390.S
index 37cb63f25b..63f3102678 100644
--- a/arch/s390/crypto/chacha-s390.S
+++ b/arch/s390/crypto/chacha-s390.S
@@ -8,7 +8,7 @@
#include <linux/linkage.h>
#include <asm/nospec-insn.h>
-#include <asm/vx-insn.h>
+#include <asm/fpu-insn.h>
#define SP %r15
#define FRAME (16 * 8 + 4 * 8)
diff --git a/arch/s390/crypto/crc32-vx.c b/arch/s390/crypto/crc32-vx.c
index 017143e9ce..74f17c905d 100644
--- a/arch/s390/crypto/crc32-vx.c
+++ b/arch/s390/crypto/crc32-vx.c
@@ -13,8 +13,8 @@
#include <linux/cpufeature.h>
#include <linux/crc32.h>
#include <crypto/internal/hash.h>
-#include <asm/fpu/api.h>
-
+#include <asm/fpu.h>
+#include "crc32-vx.h"
#define CRC32_BLOCK_SIZE 1
#define CRC32_DIGEST_SIZE 4
@@ -31,11 +31,6 @@ struct crc_desc_ctx {
u32 crc;
};
-/* Prototypes for functions in assembly files */
-u32 crc32_le_vgfm_16(u32 crc, unsigned char const *buf, size_t size);
-u32 crc32_be_vgfm_16(u32 crc, unsigned char const *buf, size_t size);
-u32 crc32c_le_vgfm_16(u32 crc, unsigned char const *buf, size_t size);
-
/*
* DEFINE_CRC32_VX() - Define a CRC-32 function using the vector extension
*
@@ -49,8 +44,8 @@ u32 crc32c_le_vgfm_16(u32 crc, unsigned char const *buf, size_t size);
static u32 __pure ___fname(u32 crc, \
unsigned char const *data, size_t datalen) \
{ \
- struct kernel_fpu vxstate; \
unsigned long prealign, aligned, remaining; \
+ DECLARE_KERNEL_FPU_ONSTACK16(vxstate); \
\
if (datalen < VX_MIN_LEN + VX_ALIGN_MASK) \
return ___crc32_sw(crc, data, datalen); \
diff --git a/arch/s390/crypto/crc32-vx.h b/arch/s390/crypto/crc32-vx.h
new file mode 100644
index 0000000000..652c96e1a8
--- /dev/null
+++ b/arch/s390/crypto/crc32-vx.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef _CRC32_VX_S390_H
+#define _CRC32_VX_S390_H
+
+#include <linux/types.h>
+
+u32 crc32_be_vgfm_16(u32 crc, unsigned char const *buf, size_t size);
+u32 crc32_le_vgfm_16(u32 crc, unsigned char const *buf, size_t size);
+u32 crc32c_le_vgfm_16(u32 crc, unsigned char const *buf, size_t size);
+
+#endif /* _CRC32_VX_S390_H */
diff --git a/arch/s390/crypto/crc32be-vx.S b/arch/s390/crypto/crc32be-vx.c
index 34ee479268..fed7c9c70d 100644
--- a/arch/s390/crypto/crc32be-vx.S
+++ b/arch/s390/crypto/crc32be-vx.c
@@ -12,20 +12,17 @@
* Author(s): Hendrik Brueckner <brueckner@linux.vnet.ibm.com>
*/
-#include <linux/linkage.h>
-#include <asm/nospec-insn.h>
-#include <asm/vx-insn.h>
+#include <linux/types.h>
+#include <asm/fpu.h>
+#include "crc32-vx.h"
/* Vector register range containing CRC-32 constants */
-#define CONST_R1R2 %v9
-#define CONST_R3R4 %v10
-#define CONST_R5 %v11
-#define CONST_R6 %v12
-#define CONST_RU_POLY %v13
-#define CONST_CRC_POLY %v14
-
- .data
- .balign 8
+#define CONST_R1R2 9
+#define CONST_R3R4 10
+#define CONST_R5 11
+#define CONST_R6 12
+#define CONST_RU_POLY 13
+#define CONST_CRC_POLY 14
/*
* The CRC-32 constant block contains reduction constants to fold and
@@ -58,105 +55,74 @@
* P'(x) = 0xEDB88320
*/
-SYM_DATA_START_LOCAL(constants_CRC_32_BE)
- .quad 0x08833794c, 0x0e6228b11 # R1, R2
- .quad 0x0c5b9cd4c, 0x0e8a45605 # R3, R4
- .quad 0x0f200aa66, 1 << 32 # R5, x32
- .quad 0x0490d678d, 1 # R6, 1
- .quad 0x104d101df, 0 # u
- .quad 0x104C11DB7, 0 # P(x)
-SYM_DATA_END(constants_CRC_32_BE)
-
- .previous
-
- GEN_BR_THUNK %r14
-
- .text
-/*
- * The CRC-32 function(s) use these calling conventions:
- *
- * Parameters:
- *
- * %r2: Initial CRC value, typically ~0; and final CRC (return) value.
- * %r3: Input buffer pointer, performance might be improved if the
- * buffer is on a doubleword boundary.
- * %r4: Length of the buffer, must be 64 bytes or greater.
+static unsigned long constants_CRC_32_BE[] = {
+ 0x08833794c, 0x0e6228b11, /* R1, R2 */
+ 0x0c5b9cd4c, 0x0e8a45605, /* R3, R4 */
+ 0x0f200aa66, 1UL << 32, /* R5, x32 */
+ 0x0490d678d, 1, /* R6, 1 */
+ 0x104d101df, 0, /* u */
+ 0x104C11DB7, 0, /* P(x) */
+};
+
+/**
+ * crc32_be_vgfm_16 - Compute CRC-32 (BE variant) with vector registers
+ * @crc: Initial CRC value, typically ~0.
+ * @buf: Input buffer pointer, performance might be improved if the
+ * buffer is on a doubleword boundary.
+ * @size: Size of the buffer, must be 64 bytes or greater.
*
* Register usage:
- *
- * %r5: CRC-32 constant pool base pointer.
* V0: Initial CRC value and intermediate constants and results.
* V1..V4: Data for CRC computation.
* V5..V8: Next data chunks that are fetched from the input buffer.
- *
* V9..V14: CRC-32 constants.
*/
-SYM_FUNC_START(crc32_be_vgfm_16)
+u32 crc32_be_vgfm_16(u32 crc, unsigned char const *buf, size_t size)
+{
/* Load CRC-32 constants */
- larl %r5,constants_CRC_32_BE
- VLM CONST_R1R2,CONST_CRC_POLY,0,%r5
+ fpu_vlm(CONST_R1R2, CONST_CRC_POLY, &constants_CRC_32_BE);
+ fpu_vzero(0);
/* Load the initial CRC value into the leftmost word of V0. */
- VZERO %v0
- VLVGF %v0,%r2,0
+ fpu_vlvgf(0, crc, 0);
/* Load a 64-byte data chunk and XOR with CRC */
- VLM %v1,%v4,0,%r3 /* 64-bytes into V1..V4 */
- VX %v1,%v0,%v1 /* V1 ^= CRC */
- aghi %r3,64 /* BUF = BUF + 64 */
- aghi %r4,-64 /* LEN = LEN - 64 */
-
- /* Check remaining buffer size and jump to proper folding method */
- cghi %r4,64
- jl .Lless_than_64bytes
-
-.Lfold_64bytes_loop:
- /* Load the next 64-byte data chunk into V5 to V8 */
- VLM %v5,%v8,0,%r3
+ fpu_vlm(1, 4, buf);
+ fpu_vx(1, 0, 1);
+ buf += 64;
+ size -= 64;
+
+ while (size >= 64) {
+ /* Load the next 64-byte data chunk into V5 to V8 */
+ fpu_vlm(5, 8, buf);
+
+ /*
+ * Perform a GF(2) multiplication of the doublewords in V1 with
+ * the reduction constants in V0. The intermediate result is
+ * then folded (accumulated) with the next data chunk in V5 and
+ * stored in V1. Repeat this step for the register contents
+ * in V2, V3, and V4 respectively.
+ */
+ fpu_vgfmag(1, CONST_R1R2, 1, 5);
+ fpu_vgfmag(2, CONST_R1R2, 2, 6);
+ fpu_vgfmag(3, CONST_R1R2, 3, 7);
+ fpu_vgfmag(4, CONST_R1R2, 4, 8);
+ buf += 64;
+ size -= 64;
+ }
- /*
- * Perform a GF(2) multiplication of the doublewords in V1 with
- * the reduction constants in V0. The intermediate result is
- * then folded (accumulated) with the next data chunk in V5 and
- * stored in V1. Repeat this step for the register contents
- * in V2, V3, and V4 respectively.
- */
- VGFMAG %v1,CONST_R1R2,%v1,%v5
- VGFMAG %v2,CONST_R1R2,%v2,%v6
- VGFMAG %v3,CONST_R1R2,%v3,%v7
- VGFMAG %v4,CONST_R1R2,%v4,%v8
-
- /* Adjust buffer pointer and length for next loop */
- aghi %r3,64 /* BUF = BUF + 64 */
- aghi %r4,-64 /* LEN = LEN - 64 */
-
- cghi %r4,64
- jnl .Lfold_64bytes_loop
-
-.Lless_than_64bytes:
/* Fold V1 to V4 into a single 128-bit value in V1 */
- VGFMAG %v1,CONST_R3R4,%v1,%v2
- VGFMAG %v1,CONST_R3R4,%v1,%v3
- VGFMAG %v1,CONST_R3R4,%v1,%v4
-
- /* Check whether to continue with 64-bit folding */
- cghi %r4,16
- jl .Lfinal_fold
+ fpu_vgfmag(1, CONST_R3R4, 1, 2);
+ fpu_vgfmag(1, CONST_R3R4, 1, 3);
+ fpu_vgfmag(1, CONST_R3R4, 1, 4);
-.Lfold_16bytes_loop:
+ while (size >= 16) {
+ fpu_vl(2, buf);
+ fpu_vgfmag(1, CONST_R3R4, 1, 2);
+ buf += 16;
+ size -= 16;
+ }
- VL %v2,0,,%r3 /* Load next data chunk */
- VGFMAG %v1,CONST_R3R4,%v1,%v2 /* Fold next data chunk */
-
- /* Adjust buffer pointer and size for folding next data chunk */
- aghi %r3,16
- aghi %r4,-16
-
- /* Process remaining data chunks */
- cghi %r4,16
- jnl .Lfold_16bytes_loop
-
-.Lfinal_fold:
/*
* The R5 constant is used to fold a 128-bit value into an 96-bit value
* that is XORed with the next 96-bit input data chunk. To use a single
@@ -164,7 +130,7 @@ SYM_FUNC_START(crc32_be_vgfm_16)
* form an intermediate 96-bit value (with appended zeros) which is then
* XORed with the intermediate reduction result.
*/
- VGFMG %v1,CONST_R5,%v1
+ fpu_vgfmg(1, CONST_R5, 1);
/*
* Further reduce the remaining 96-bit value to a 64-bit value using a
@@ -173,7 +139,7 @@ SYM_FUNC_START(crc32_be_vgfm_16)
* doubleword with R6. The result is a 64-bit value and is subject to
* the Barret reduction.
*/
- VGFMG %v1,CONST_R6,%v1
+ fpu_vgfmg(1, CONST_R6, 1);
/*
* The input values to the Barret reduction are the degree-63 polynomial
@@ -194,20 +160,15 @@ SYM_FUNC_START(crc32_be_vgfm_16)
*/
/* T1(x) = floor( R(x) / x^32 ) GF2MUL u */
- VUPLLF %v2,%v1
- VGFMG %v2,CONST_RU_POLY,%v2
+ fpu_vupllf(2, 1);
+ fpu_vgfmg(2, CONST_RU_POLY, 2);
/*
* Compute the GF(2) product of the CRC polynomial in VO with T1(x) in
* V2 and XOR the intermediate result, T2(x), with the value in V1.
* The final result is in the rightmost word of V2.
*/
- VUPLLF %v2,%v2
- VGFMAG %v2,CONST_CRC_POLY,%v2,%v1
-
-.Ldone:
- VLGVF %r2,%v2,3
- BR_EX %r14
-SYM_FUNC_END(crc32_be_vgfm_16)
-
-.previous
+ fpu_vupllf(2, 2);
+ fpu_vgfmag(2, CONST_CRC_POLY, 2, 1);
+ return fpu_vlgvf(2, 3);
+}
diff --git a/arch/s390/crypto/crc32le-vx.S b/arch/s390/crypto/crc32le-vx.c
index 5a819ae09a..2f629f394d 100644
--- a/arch/s390/crypto/crc32le-vx.S
+++ b/arch/s390/crypto/crc32le-vx.c
@@ -13,20 +13,17 @@
* Author(s): Hendrik Brueckner <brueckner@linux.vnet.ibm.com>
*/
-#include <linux/linkage.h>
-#include <asm/nospec-insn.h>
-#include <asm/vx-insn.h>
+#include <linux/types.h>
+#include <asm/fpu.h>
+#include "crc32-vx.h"
/* Vector register range containing CRC-32 constants */
-#define CONST_PERM_LE2BE %v9
-#define CONST_R2R1 %v10
-#define CONST_R4R3 %v11
-#define CONST_R5 %v12
-#define CONST_RU_POLY %v13
-#define CONST_CRC_POLY %v14
-
- .data
- .balign 8
+#define CONST_PERM_LE2BE 9
+#define CONST_R2R1 10
+#define CONST_R4R3 11
+#define CONST_R5 12
+#define CONST_RU_POLY 13
+#define CONST_CRC_POLY 14
/*
* The CRC-32 constant block contains reduction constants to fold and
@@ -59,64 +56,43 @@
* P'(x) = 0x82F63B78
*/
-SYM_DATA_START_LOCAL(constants_CRC_32_LE)
- .octa 0x0F0E0D0C0B0A09080706050403020100 # BE->LE mask
- .quad 0x1c6e41596, 0x154442bd4 # R2, R1
- .quad 0x0ccaa009e, 0x1751997d0 # R4, R3
- .octa 0x163cd6124 # R5
- .octa 0x1F7011641 # u'
- .octa 0x1DB710641 # P'(x) << 1
-SYM_DATA_END(constants_CRC_32_LE)
-
-SYM_DATA_START_LOCAL(constants_CRC_32C_LE)
- .octa 0x0F0E0D0C0B0A09080706050403020100 # BE->LE mask
- .quad 0x09e4addf8, 0x740eef02 # R2, R1
- .quad 0x14cd00bd6, 0xf20c0dfe # R4, R3
- .octa 0x0dd45aab8 # R5
- .octa 0x0dea713f1 # u'
- .octa 0x105ec76f0 # P'(x) << 1
-SYM_DATA_END(constants_CRC_32C_LE)
-
- .previous
-
- GEN_BR_THUNK %r14
-
- .text
-
-/*
- * The CRC-32 functions use these calling conventions:
- *
- * Parameters:
- *
- * %r2: Initial CRC value, typically ~0; and final CRC (return) value.
- * %r3: Input buffer pointer, performance might be improved if the
- * buffer is on a doubleword boundary.
- * %r4: Length of the buffer, must be 64 bytes or greater.
+static unsigned long constants_CRC_32_LE[] = {
+ 0x0f0e0d0c0b0a0908, 0x0706050403020100, /* BE->LE mask */
+ 0x1c6e41596, 0x154442bd4, /* R2, R1 */
+ 0x0ccaa009e, 0x1751997d0, /* R4, R3 */
+ 0x0, 0x163cd6124, /* R5 */
+ 0x0, 0x1f7011641, /* u' */
+ 0x0, 0x1db710641 /* P'(x) << 1 */
+};
+
+static unsigned long constants_CRC_32C_LE[] = {
+ 0x0f0e0d0c0b0a0908, 0x0706050403020100, /* BE->LE mask */
+ 0x09e4addf8, 0x740eef02, /* R2, R1 */
+ 0x14cd00bd6, 0xf20c0dfe, /* R4, R3 */
+ 0x0, 0x0dd45aab8, /* R5 */
+ 0x0, 0x0dea713f1, /* u' */
+ 0x0, 0x105ec76f0 /* P'(x) << 1 */
+};
+
+/**
+ * crc32_le_vgfm_generic - Compute CRC-32 (LE variant) with vector registers
+ * @crc: Initial CRC value, typically ~0.
+ * @buf: Input buffer pointer, performance might be improved if the
+ * buffer is on a doubleword boundary.
+ * @size: Size of the buffer, must be 64 bytes or greater.
+ * @constants: CRC-32 constant pool base pointer.
*
* Register usage:
- *
- * %r5: CRC-32 constant pool base pointer.
- * V0: Initial CRC value and intermediate constants and results.
- * V1..V4: Data for CRC computation.
- * V5..V8: Next data chunks that are fetched from the input buffer.
- * V9: Constant for BE->LE conversion and shift operations
- *
+ * V0: Initial CRC value and intermediate constants and results.
+ * V1..V4: Data for CRC computation.
+ * V5..V8: Next data chunks that are fetched from the input buffer.
+ * V9: Constant for BE->LE conversion and shift operations
* V10..V14: CRC-32 constants.
*/
-
-SYM_FUNC_START(crc32_le_vgfm_16)
- larl %r5,constants_CRC_32_LE
- j crc32_le_vgfm_generic
-SYM_FUNC_END(crc32_le_vgfm_16)
-
-SYM_FUNC_START(crc32c_le_vgfm_16)
- larl %r5,constants_CRC_32C_LE
- j crc32_le_vgfm_generic
-SYM_FUNC_END(crc32c_le_vgfm_16)
-
-SYM_FUNC_START(crc32_le_vgfm_generic)
+static u32 crc32_le_vgfm_generic(u32 crc, unsigned char const *buf, size_t size, unsigned long *constants)
+{
/* Load CRC-32 constants */
- VLM CONST_PERM_LE2BE,CONST_CRC_POLY,0,%r5
+ fpu_vlm(CONST_PERM_LE2BE, CONST_CRC_POLY, constants);
/*
* Load the initial CRC value.
@@ -125,90 +101,73 @@ SYM_FUNC_START(crc32_le_vgfm_generic)
* vector register and is later XORed with the LSB portion
* of the loaded input data.
*/
- VZERO %v0 /* Clear V0 */
- VLVGF %v0,%r2,3 /* Load CRC into rightmost word */
+ fpu_vzero(0); /* Clear V0 */
+ fpu_vlvgf(0, crc, 3); /* Load CRC into rightmost word */
/* Load a 64-byte data chunk and XOR with CRC */
- VLM %v1,%v4,0,%r3 /* 64-bytes into V1..V4 */
- VPERM %v1,%v1,%v1,CONST_PERM_LE2BE
- VPERM %v2,%v2,%v2,CONST_PERM_LE2BE
- VPERM %v3,%v3,%v3,CONST_PERM_LE2BE
- VPERM %v4,%v4,%v4,CONST_PERM_LE2BE
+ fpu_vlm(1, 4, buf);
+ fpu_vperm(1, 1, 1, CONST_PERM_LE2BE);
+ fpu_vperm(2, 2, 2, CONST_PERM_LE2BE);
+ fpu_vperm(3, 3, 3, CONST_PERM_LE2BE);
+ fpu_vperm(4, 4, 4, CONST_PERM_LE2BE);
+
+ fpu_vx(1, 0, 1); /* V1 ^= CRC */
+ buf += 64;
+ size -= 64;
+
+ while (size >= 64) {
+ fpu_vlm(5, 8, buf);
+ fpu_vperm(5, 5, 5, CONST_PERM_LE2BE);
+ fpu_vperm(6, 6, 6, CONST_PERM_LE2BE);
+ fpu_vperm(7, 7, 7, CONST_PERM_LE2BE);
+ fpu_vperm(8, 8, 8, CONST_PERM_LE2BE);
+ /*
+ * Perform a GF(2) multiplication of the doublewords in V1 with
+ * the R1 and R2 reduction constants in V0. The intermediate
+ * result is then folded (accumulated) with the next data chunk
+ * in V5 and stored in V1. Repeat this step for the register
+ * contents in V2, V3, and V4 respectively.
+ */
+ fpu_vgfmag(1, CONST_R2R1, 1, 5);
+ fpu_vgfmag(2, CONST_R2R1, 2, 6);
+ fpu_vgfmag(3, CONST_R2R1, 3, 7);
+ fpu_vgfmag(4, CONST_R2R1, 4, 8);
+ buf += 64;
+ size -= 64;
+ }
- VX %v1,%v0,%v1 /* V1 ^= CRC */
- aghi %r3,64 /* BUF = BUF + 64 */
- aghi %r4,-64 /* LEN = LEN - 64 */
-
- cghi %r4,64
- jl .Lless_than_64bytes
-
-.Lfold_64bytes_loop:
- /* Load the next 64-byte data chunk into V5 to V8 */
- VLM %v5,%v8,0,%r3
- VPERM %v5,%v5,%v5,CONST_PERM_LE2BE
- VPERM %v6,%v6,%v6,CONST_PERM_LE2BE
- VPERM %v7,%v7,%v7,CONST_PERM_LE2BE
- VPERM %v8,%v8,%v8,CONST_PERM_LE2BE
-
- /*
- * Perform a GF(2) multiplication of the doublewords in V1 with
- * the R1 and R2 reduction constants in V0. The intermediate result
- * is then folded (accumulated) with the next data chunk in V5 and
- * stored in V1. Repeat this step for the register contents
- * in V2, V3, and V4 respectively.
- */
- VGFMAG %v1,CONST_R2R1,%v1,%v5
- VGFMAG %v2,CONST_R2R1,%v2,%v6
- VGFMAG %v3,CONST_R2R1,%v3,%v7
- VGFMAG %v4,CONST_R2R1,%v4,%v8
-
- aghi %r3,64 /* BUF = BUF + 64 */
- aghi %r4,-64 /* LEN = LEN - 64 */
-
- cghi %r4,64
- jnl .Lfold_64bytes_loop
-
-.Lless_than_64bytes:
/*
* Fold V1 to V4 into a single 128-bit value in V1. Multiply V1 with R3
* and R4 and accumulating the next 128-bit chunk until a single 128-bit
* value remains.
*/
- VGFMAG %v1,CONST_R4R3,%v1,%v2
- VGFMAG %v1,CONST_R4R3,%v1,%v3
- VGFMAG %v1,CONST_R4R3,%v1,%v4
-
- cghi %r4,16
- jl .Lfinal_fold
-
-.Lfold_16bytes_loop:
-
- VL %v2,0,,%r3 /* Load next data chunk */
- VPERM %v2,%v2,%v2,CONST_PERM_LE2BE
- VGFMAG %v1,CONST_R4R3,%v1,%v2 /* Fold next data chunk */
+ fpu_vgfmag(1, CONST_R4R3, 1, 2);
+ fpu_vgfmag(1, CONST_R4R3, 1, 3);
+ fpu_vgfmag(1, CONST_R4R3, 1, 4);
+
+ while (size >= 16) {
+ fpu_vl(2, buf);
+ fpu_vperm(2, 2, 2, CONST_PERM_LE2BE);
+ fpu_vgfmag(1, CONST_R4R3, 1, 2);
+ buf += 16;
+ size -= 16;
+ }
- aghi %r3,16
- aghi %r4,-16
-
- cghi %r4,16
- jnl .Lfold_16bytes_loop
-
-.Lfinal_fold:
/*
* Set up a vector register for byte shifts. The shift value must
* be loaded in bits 1-4 in byte element 7 of a vector register.
* Shift by 8 bytes: 0x40
* Shift by 4 bytes: 0x20
*/
- VLEIB %v9,0x40,7
+ fpu_vleib(9, 0x40, 7);
/*
* Prepare V0 for the next GF(2) multiplication: shift V0 by 8 bytes
* to move R4 into the rightmost doubleword and set the leftmost
* doubleword to 0x1.
*/
- VSRLB %v0,CONST_R4R3,%v9
- VLEIG %v0,1,0
+ fpu_vsrlb(0, CONST_R4R3, 9);
+ fpu_vleig(0, 1, 0);
/*
* Compute GF(2) product of V1 and V0. The rightmost doubleword
@@ -216,7 +175,7 @@ SYM_FUNC_START(crc32_le_vgfm_generic)
* multiplied by 0x1 and is then XORed with rightmost product.
* Implicitly, the intermediate leftmost product becomes padded
*/
- VGFMG %v1,%v0,%v1
+ fpu_vgfmg(1, 0, 1);
/*
* Now do the final 32-bit fold by multiplying the rightmost word
@@ -231,10 +190,10 @@ SYM_FUNC_START(crc32_le_vgfm_generic)
* rightmost doubleword and the leftmost doubleword is zero to ignore
* the leftmost product of V1.
*/
- VLEIB %v9,0x20,7 /* Shift by words */
- VSRLB %v2,%v1,%v9 /* Store remaining bits in V2 */
- VUPLLF %v1,%v1 /* Split rightmost doubleword */
- VGFMAG %v1,CONST_R5,%v1,%v2 /* V1 = (V1 * R5) XOR V2 */
+ fpu_vleib(9, 0x20, 7); /* Shift by words */
+ fpu_vsrlb(2, 1, 9); /* Store remaining bits in V2 */
+ fpu_vupllf(1, 1); /* Split rightmost doubleword */
+ fpu_vgfmag(1, CONST_R5, 1, 2); /* V1 = (V1 * R5) XOR V2 */
/*
* Apply a Barret reduction to compute the final 32-bit CRC value.
@@ -256,20 +215,26 @@ SYM_FUNC_START(crc32_le_vgfm_generic)
*/
/* T1(x) = floor( R(x) / x^32 ) GF2MUL u */
- VUPLLF %v2,%v1
- VGFMG %v2,CONST_RU_POLY,%v2
+ fpu_vupllf(2, 1);
+ fpu_vgfmg(2, CONST_RU_POLY, 2);
/*
* Compute the GF(2) product of the CRC polynomial with T1(x) in
* V2 and XOR the intermediate result, T2(x), with the value in V1.
* The final result is stored in word element 2 of V2.
*/
- VUPLLF %v2,%v2
- VGFMAG %v2,CONST_CRC_POLY,%v2,%v1
+ fpu_vupllf(2, 2);
+ fpu_vgfmag(2, CONST_CRC_POLY, 2, 1);
+
+ return fpu_vlgvf(2, 2);
+}
-.Ldone:
- VLGVF %r2,%v2,2
- BR_EX %r14
-SYM_FUNC_END(crc32_le_vgfm_generic)
+u32 crc32_le_vgfm_16(u32 crc, unsigned char const *buf, size_t size)
+{
+ return crc32_le_vgfm_generic(crc, buf, size, &constants_CRC_32_LE[0]);
+}
-.previous
+u32 crc32c_le_vgfm_16(u32 crc, unsigned char const *buf, size_t size)
+{
+ return crc32_le_vgfm_generic(crc, buf, size, &constants_CRC_32C_LE[0]);
+}
diff --git a/arch/s390/crypto/paes_s390.c b/arch/s390/crypto/paes_s390.c
index 55ee5567a5..99ea3f12c5 100644
--- a/arch/s390/crypto/paes_s390.c
+++ b/arch/s390/crypto/paes_s390.c
@@ -125,17 +125,16 @@ struct s390_pxts_ctx {
static inline int __paes_keyblob2pkey(struct key_blob *kb,
struct pkey_protkey *pk)
{
- int i, ret;
+ int i, ret = -EIO;
- /* try three times in case of failure */
- for (i = 0; i < 3; i++) {
- if (i > 0 && ret == -EAGAIN && in_task())
+ /* try three times in case of busy card */
+ for (i = 0; ret && i < 3; i++) {
+ if (ret == -EBUSY && in_task()) {
if (msleep_interruptible(1000))
return -EINTR;
+ }
ret = pkey_keyblob2pkey(kb->key, kb->keylen,
pk->protkey, &pk->len, &pk->type);
- if (ret == 0)
- break;
}
return ret;