summaryrefslogtreecommitdiffstats
path: root/arch/x86/crypto
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-18 18:50:12 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-18 18:50:12 +0000
commit8665bd53f2f2e27e5511d90428cb3f60e6d0ce15 (patch)
tree8d58900dc0ebd4a3011f92c128d2fe45bc7c4bf2 /arch/x86/crypto
parentAdding debian version 6.7.12-1. (diff)
downloadlinux-8665bd53f2f2e27e5511d90428cb3f60e6d0ce15.tar.xz
linux-8665bd53f2f2e27e5511d90428cb3f60e6d0ce15.zip
Merging upstream version 6.8.9.
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'arch/x86/crypto')
-rw-r--r--arch/x86/crypto/Kconfig8
-rw-r--r--arch/x86/crypto/aesni-intel_asm.S2
-rw-r--r--arch/x86/crypto/aesni-intel_avx-x86_64.S2
-rw-r--r--arch/x86/crypto/crc32c-pcl-intel-asm_64.S2
-rw-r--r--arch/x86/crypto/sha1_ssse3_glue.c7
-rw-r--r--arch/x86/crypto/sha256_ssse3_glue.c7
-rw-r--r--arch/x86/crypto/sha512-avx-asm.S2
-rw-r--r--arch/x86/crypto/sha512-ssse3-asm.S2
-rw-r--r--arch/x86/crypto/sm4-aesni-avx-asm_64.S52
-rw-r--r--arch/x86/crypto/sm4-aesni-avx2-asm_64.S55
-rw-r--r--arch/x86/crypto/sm4-avx.h4
-rw-r--r--arch/x86/crypto/sm4_aesni_avx2_glue.c26
-rw-r--r--arch/x86/crypto/sm4_aesni_avx_glue.c130
13 files changed, 19 insertions, 280 deletions
diff --git a/arch/x86/crypto/Kconfig b/arch/x86/crypto/Kconfig
index 9bbfd01cfa..c9e59589a1 100644
--- a/arch/x86/crypto/Kconfig
+++ b/arch/x86/crypto/Kconfig
@@ -189,7 +189,7 @@ config CRYPTO_SERPENT_AVX2_X86_64
Processes 16 blocks in parallel.
config CRYPTO_SM4_AESNI_AVX_X86_64
- tristate "Ciphers: SM4 with modes: ECB, CBC, CFB, CTR (AES-NI/AVX)"
+ tristate "Ciphers: SM4 with modes: ECB, CBC, CTR (AES-NI/AVX)"
depends on X86 && 64BIT
select CRYPTO_SKCIPHER
select CRYPTO_SIMD
@@ -197,7 +197,7 @@ config CRYPTO_SM4_AESNI_AVX_X86_64
select CRYPTO_SM4
help
Length-preserving ciphers: SM4 cipher algorithms
- (OSCCA GB/T 32907-2016) with ECB, CBC, CFB, and CTR modes
+ (OSCCA GB/T 32907-2016) with ECB, CBC, and CTR modes
Architecture: x86_64 using:
- AES-NI (AES New Instructions)
@@ -210,7 +210,7 @@ config CRYPTO_SM4_AESNI_AVX_X86_64
If unsure, say N.
config CRYPTO_SM4_AESNI_AVX2_X86_64
- tristate "Ciphers: SM4 with modes: ECB, CBC, CFB, CTR (AES-NI/AVX2)"
+ tristate "Ciphers: SM4 with modes: ECB, CBC, CTR (AES-NI/AVX2)"
depends on X86 && 64BIT
select CRYPTO_SKCIPHER
select CRYPTO_SIMD
@@ -219,7 +219,7 @@ config CRYPTO_SM4_AESNI_AVX2_X86_64
select CRYPTO_SM4_AESNI_AVX_X86_64
help
Length-preserving ciphers: SM4 cipher algorithms
- (OSCCA GB/T 32907-2016) with ECB, CBC, CFB, and CTR modes
+ (OSCCA GB/T 32907-2016) with ECB, CBC, and CTR modes
Architecture: x86_64 using:
- AES-NI (AES New Instructions)
diff --git a/arch/x86/crypto/aesni-intel_asm.S b/arch/x86/crypto/aesni-intel_asm.S
index 187f913cc2..411d8c83e8 100644
--- a/arch/x86/crypto/aesni-intel_asm.S
+++ b/arch/x86/crypto/aesni-intel_asm.S
@@ -666,7 +666,7 @@ ALL_F: .octa 0xffffffffffffffffffffffffffffffff
.ifc \operation, dec
movdqa %xmm1, %xmm3
- pxor %xmm1, %xmm9 # Cyphertext XOR E(K, Yn)
+ pxor %xmm1, %xmm9 # Ciphertext XOR E(K, Yn)
mov \PLAIN_CYPH_LEN, %r10
add %r13, %r10
diff --git a/arch/x86/crypto/aesni-intel_avx-x86_64.S b/arch/x86/crypto/aesni-intel_avx-x86_64.S
index 74dd230973..8c9749ed06 100644
--- a/arch/x86/crypto/aesni-intel_avx-x86_64.S
+++ b/arch/x86/crypto/aesni-intel_avx-x86_64.S
@@ -747,7 +747,7 @@ VARIABLE_OFFSET = 16*8
.if \ENC_DEC == DEC
vmovdqa %xmm1, %xmm3
- pxor %xmm1, %xmm9 # Cyphertext XOR E(K, Yn)
+ pxor %xmm1, %xmm9 # Ciphertext XOR E(K, Yn)
mov \PLAIN_CYPH_LEN, %r10
add %r13, %r10
diff --git a/arch/x86/crypto/crc32c-pcl-intel-asm_64.S b/arch/x86/crypto/crc32c-pcl-intel-asm_64.S
index 81ce0f4db5..bbcff1fb78 100644
--- a/arch/x86/crypto/crc32c-pcl-intel-asm_64.S
+++ b/arch/x86/crypto/crc32c-pcl-intel-asm_64.S
@@ -184,7 +184,7 @@ SYM_FUNC_START(crc_pcl)
xor crc1,crc1
xor crc2,crc2
- # Fall thruogh into top of crc array (crc_128)
+ # Fall through into top of crc array (crc_128)
################################################################
## 3) CRC Array:
diff --git a/arch/x86/crypto/sha1_ssse3_glue.c b/arch/x86/crypto/sha1_ssse3_glue.c
index 959afa705e..ab8bc54f25 100644
--- a/arch/x86/crypto/sha1_ssse3_glue.c
+++ b/arch/x86/crypto/sha1_ssse3_glue.c
@@ -2,8 +2,8 @@
/*
* Cryptographic API.
*
- * Glue code for the SHA1 Secure Hash Algorithm assembler implementation using
- * Supplemental SSE3 instructions.
+ * Glue code for the SHA1 Secure Hash Algorithm assembler implementations
+ * using SSSE3, AVX, AVX2, and SHA-NI instructions.
*
* This file is based on sha1_generic.c
*
@@ -28,6 +28,9 @@
#include <asm/simd.h>
static const struct x86_cpu_id module_cpu_ids[] = {
+#ifdef CONFIG_AS_SHA1_NI
+ X86_MATCH_FEATURE(X86_FEATURE_SHA_NI, NULL),
+#endif
X86_MATCH_FEATURE(X86_FEATURE_AVX2, NULL),
X86_MATCH_FEATURE(X86_FEATURE_AVX, NULL),
X86_MATCH_FEATURE(X86_FEATURE_SSSE3, NULL),
diff --git a/arch/x86/crypto/sha256_ssse3_glue.c b/arch/x86/crypto/sha256_ssse3_glue.c
index 4c0383a90e..e04a43d9f7 100644
--- a/arch/x86/crypto/sha256_ssse3_glue.c
+++ b/arch/x86/crypto/sha256_ssse3_glue.c
@@ -1,8 +1,8 @@
/*
* Cryptographic API.
*
- * Glue code for the SHA256 Secure Hash Algorithm assembler
- * implementation using supplemental SSE3 / AVX / AVX2 instructions.
+ * Glue code for the SHA256 Secure Hash Algorithm assembler implementations
+ * using SSSE3, AVX, AVX2, and SHA-NI instructions.
*
* This file is based on sha256_generic.c
*
@@ -45,6 +45,9 @@ asmlinkage void sha256_transform_ssse3(struct sha256_state *state,
const u8 *data, int blocks);
static const struct x86_cpu_id module_cpu_ids[] = {
+#ifdef CONFIG_AS_SHA256_NI
+ X86_MATCH_FEATURE(X86_FEATURE_SHA_NI, NULL),
+#endif
X86_MATCH_FEATURE(X86_FEATURE_AVX2, NULL),
X86_MATCH_FEATURE(X86_FEATURE_AVX, NULL),
X86_MATCH_FEATURE(X86_FEATURE_SSSE3, NULL),
diff --git a/arch/x86/crypto/sha512-avx-asm.S b/arch/x86/crypto/sha512-avx-asm.S
index d902b8ea07..5bfce4b045 100644
--- a/arch/x86/crypto/sha512-avx-asm.S
+++ b/arch/x86/crypto/sha512-avx-asm.S
@@ -84,7 +84,7 @@ frame_size = frame_WK + WK_SIZE
# Useful QWORD "arrays" for simpler memory references
# MSG, DIGEST, K_t, W_t are arrays
-# WK_2(t) points to 1 of 2 qwords at frame.WK depdending on t being odd/even
+# WK_2(t) points to 1 of 2 qwords at frame.WK depending on t being odd/even
# Input message (arg1)
#define MSG(i) 8*i(msg)
diff --git a/arch/x86/crypto/sha512-ssse3-asm.S b/arch/x86/crypto/sha512-ssse3-asm.S
index 65be301568..30a2c4777f 100644
--- a/arch/x86/crypto/sha512-ssse3-asm.S
+++ b/arch/x86/crypto/sha512-ssse3-asm.S
@@ -82,7 +82,7 @@ frame_size = frame_WK + WK_SIZE
# Useful QWORD "arrays" for simpler memory references
# MSG, DIGEST, K_t, W_t are arrays
-# WK_2(t) points to 1 of 2 qwords at frame.WK depdending on t being odd/even
+# WK_2(t) points to 1 of 2 qwords at frame.WK depending on t being odd/even
# Input message (arg1)
#define MSG(i) 8*i(msg)
diff --git a/arch/x86/crypto/sm4-aesni-avx-asm_64.S b/arch/x86/crypto/sm4-aesni-avx-asm_64.S
index e2668d2fe6..2bf611eaa1 100644
--- a/arch/x86/crypto/sm4-aesni-avx-asm_64.S
+++ b/arch/x86/crypto/sm4-aesni-avx-asm_64.S
@@ -534,55 +534,3 @@ SYM_TYPED_FUNC_START(sm4_aesni_avx_cbc_dec_blk8)
FRAME_END
RET;
SYM_FUNC_END(sm4_aesni_avx_cbc_dec_blk8)
-
-/*
- * void sm4_aesni_avx_cfb_dec_blk8(const u32 *rk, u8 *dst,
- * const u8 *src, u8 *iv)
- */
-SYM_TYPED_FUNC_START(sm4_aesni_avx_cfb_dec_blk8)
- /* input:
- * %rdi: round key array, CTX
- * %rsi: dst (8 blocks)
- * %rdx: src (8 blocks)
- * %rcx: iv
- */
- FRAME_BEGIN
-
- /* Load input */
- vmovdqu (%rcx), RA0;
- vmovdqu 0 * 16(%rdx), RA1;
- vmovdqu 1 * 16(%rdx), RA2;
- vmovdqu 2 * 16(%rdx), RA3;
- vmovdqu 3 * 16(%rdx), RB0;
- vmovdqu 4 * 16(%rdx), RB1;
- vmovdqu 5 * 16(%rdx), RB2;
- vmovdqu 6 * 16(%rdx), RB3;
-
- /* Update IV */
- vmovdqu 7 * 16(%rdx), RNOT;
- vmovdqu RNOT, (%rcx);
-
- call __sm4_crypt_blk8;
-
- vpxor (0 * 16)(%rdx), RA0, RA0;
- vpxor (1 * 16)(%rdx), RA1, RA1;
- vpxor (2 * 16)(%rdx), RA2, RA2;
- vpxor (3 * 16)(%rdx), RA3, RA3;
- vpxor (4 * 16)(%rdx), RB0, RB0;
- vpxor (5 * 16)(%rdx), RB1, RB1;
- vpxor (6 * 16)(%rdx), RB2, RB2;
- vpxor (7 * 16)(%rdx), RB3, RB3;
-
- vmovdqu RA0, (0 * 16)(%rsi);
- vmovdqu RA1, (1 * 16)(%rsi);
- vmovdqu RA2, (2 * 16)(%rsi);
- vmovdqu RA3, (3 * 16)(%rsi);
- vmovdqu RB0, (4 * 16)(%rsi);
- vmovdqu RB1, (5 * 16)(%rsi);
- vmovdqu RB2, (6 * 16)(%rsi);
- vmovdqu RB3, (7 * 16)(%rsi);
-
- vzeroall;
- FRAME_END
- RET;
-SYM_FUNC_END(sm4_aesni_avx_cfb_dec_blk8)
diff --git a/arch/x86/crypto/sm4-aesni-avx2-asm_64.S b/arch/x86/crypto/sm4-aesni-avx2-asm_64.S
index 98ede94592..9ff5ba0755 100644
--- a/arch/x86/crypto/sm4-aesni-avx2-asm_64.S
+++ b/arch/x86/crypto/sm4-aesni-avx2-asm_64.S
@@ -439,58 +439,3 @@ SYM_TYPED_FUNC_START(sm4_aesni_avx2_cbc_dec_blk16)
FRAME_END
RET;
SYM_FUNC_END(sm4_aesni_avx2_cbc_dec_blk16)
-
-/*
- * void sm4_aesni_avx2_cfb_dec_blk16(const u32 *rk, u8 *dst,
- * const u8 *src, u8 *iv)
- */
-SYM_TYPED_FUNC_START(sm4_aesni_avx2_cfb_dec_blk16)
- /* input:
- * %rdi: round key array, CTX
- * %rsi: dst (16 blocks)
- * %rdx: src (16 blocks)
- * %rcx: iv
- */
- FRAME_BEGIN
-
- vzeroupper;
-
- /* Load input */
- vmovdqu (%rcx), RNOTx;
- vinserti128 $1, (%rdx), RNOT, RA0;
- vmovdqu (0 * 32 + 16)(%rdx), RA1;
- vmovdqu (1 * 32 + 16)(%rdx), RA2;
- vmovdqu (2 * 32 + 16)(%rdx), RA3;
- vmovdqu (3 * 32 + 16)(%rdx), RB0;
- vmovdqu (4 * 32 + 16)(%rdx), RB1;
- vmovdqu (5 * 32 + 16)(%rdx), RB2;
- vmovdqu (6 * 32 + 16)(%rdx), RB3;
-
- /* Update IV */
- vmovdqu (7 * 32 + 16)(%rdx), RNOTx;
- vmovdqu RNOTx, (%rcx);
-
- call __sm4_crypt_blk16;
-
- vpxor (0 * 32)(%rdx), RA0, RA0;
- vpxor (1 * 32)(%rdx), RA1, RA1;
- vpxor (2 * 32)(%rdx), RA2, RA2;
- vpxor (3 * 32)(%rdx), RA3, RA3;
- vpxor (4 * 32)(%rdx), RB0, RB0;
- vpxor (5 * 32)(%rdx), RB1, RB1;
- vpxor (6 * 32)(%rdx), RB2, RB2;
- vpxor (7 * 32)(%rdx), RB3, RB3;
-
- vmovdqu RA0, (0 * 32)(%rsi);
- vmovdqu RA1, (1 * 32)(%rsi);
- vmovdqu RA2, (2 * 32)(%rsi);
- vmovdqu RA3, (3 * 32)(%rsi);
- vmovdqu RB0, (4 * 32)(%rsi);
- vmovdqu RB1, (5 * 32)(%rsi);
- vmovdqu RB2, (6 * 32)(%rsi);
- vmovdqu RB3, (7 * 32)(%rsi);
-
- vzeroall;
- FRAME_END
- RET;
-SYM_FUNC_END(sm4_aesni_avx2_cfb_dec_blk16)
diff --git a/arch/x86/crypto/sm4-avx.h b/arch/x86/crypto/sm4-avx.h
index 1bceab7516..b5b5e67e40 100644
--- a/arch/x86/crypto/sm4-avx.h
+++ b/arch/x86/crypto/sm4-avx.h
@@ -14,10 +14,6 @@ int sm4_cbc_encrypt(struct skcipher_request *req);
int sm4_avx_cbc_decrypt(struct skcipher_request *req,
unsigned int bsize, sm4_crypt_func func);
-int sm4_cfb_encrypt(struct skcipher_request *req);
-int sm4_avx_cfb_decrypt(struct skcipher_request *req,
- unsigned int bsize, sm4_crypt_func func);
-
int sm4_avx_ctr_crypt(struct skcipher_request *req,
unsigned int bsize, sm4_crypt_func func);
diff --git a/arch/x86/crypto/sm4_aesni_avx2_glue.c b/arch/x86/crypto/sm4_aesni_avx2_glue.c
index 84bc718f49..1148fd4cd5 100644
--- a/arch/x86/crypto/sm4_aesni_avx2_glue.c
+++ b/arch/x86/crypto/sm4_aesni_avx2_glue.c
@@ -23,8 +23,6 @@ asmlinkage void sm4_aesni_avx2_ctr_enc_blk16(const u32 *rk, u8 *dst,
const u8 *src, u8 *iv);
asmlinkage void sm4_aesni_avx2_cbc_dec_blk16(const u32 *rk, u8 *dst,
const u8 *src, u8 *iv);
-asmlinkage void sm4_aesni_avx2_cfb_dec_blk16(const u32 *rk, u8 *dst,
- const u8 *src, u8 *iv);
static int sm4_skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key,
unsigned int key_len)
@@ -41,12 +39,6 @@ static int cbc_decrypt(struct skcipher_request *req)
}
-static int cfb_decrypt(struct skcipher_request *req)
-{
- return sm4_avx_cfb_decrypt(req, SM4_CRYPT16_BLOCK_SIZE,
- sm4_aesni_avx2_cfb_dec_blk16);
-}
-
static int ctr_crypt(struct skcipher_request *req)
{
return sm4_avx_ctr_crypt(req, SM4_CRYPT16_BLOCK_SIZE,
@@ -89,24 +81,6 @@ static struct skcipher_alg sm4_aesni_avx2_skciphers[] = {
.decrypt = cbc_decrypt,
}, {
.base = {
- .cra_name = "__cfb(sm4)",
- .cra_driver_name = "__cfb-sm4-aesni-avx2",
- .cra_priority = 500,
- .cra_flags = CRYPTO_ALG_INTERNAL,
- .cra_blocksize = 1,
- .cra_ctxsize = sizeof(struct sm4_ctx),
- .cra_module = THIS_MODULE,
- },
- .min_keysize = SM4_KEY_SIZE,
- .max_keysize = SM4_KEY_SIZE,
- .ivsize = SM4_BLOCK_SIZE,
- .chunksize = SM4_BLOCK_SIZE,
- .walksize = 16 * SM4_BLOCK_SIZE,
- .setkey = sm4_skcipher_setkey,
- .encrypt = sm4_cfb_encrypt,
- .decrypt = cfb_decrypt,
- }, {
- .base = {
.cra_name = "__ctr(sm4)",
.cra_driver_name = "__ctr-sm4-aesni-avx2",
.cra_priority = 500,
diff --git a/arch/x86/crypto/sm4_aesni_avx_glue.c b/arch/x86/crypto/sm4_aesni_avx_glue.c
index 7800f77d68..85b4ca78b4 100644
--- a/arch/x86/crypto/sm4_aesni_avx_glue.c
+++ b/arch/x86/crypto/sm4_aesni_avx_glue.c
@@ -27,8 +27,6 @@ asmlinkage void sm4_aesni_avx_ctr_enc_blk8(const u32 *rk, u8 *dst,
const u8 *src, u8 *iv);
asmlinkage void sm4_aesni_avx_cbc_dec_blk8(const u32 *rk, u8 *dst,
const u8 *src, u8 *iv);
-asmlinkage void sm4_aesni_avx_cfb_dec_blk8(const u32 *rk, u8 *dst,
- const u8 *src, u8 *iv);
static int sm4_skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key,
unsigned int key_len)
@@ -188,116 +186,6 @@ static int cbc_decrypt(struct skcipher_request *req)
sm4_aesni_avx_cbc_dec_blk8);
}
-int sm4_cfb_encrypt(struct skcipher_request *req)
-{
- struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
- struct sm4_ctx *ctx = crypto_skcipher_ctx(tfm);
- struct skcipher_walk walk;
- unsigned int nbytes;
- int err;
-
- err = skcipher_walk_virt(&walk, req, false);
-
- while ((nbytes = walk.nbytes) > 0) {
- u8 keystream[SM4_BLOCK_SIZE];
- const u8 *iv = walk.iv;
- const u8 *src = walk.src.virt.addr;
- u8 *dst = walk.dst.virt.addr;
-
- while (nbytes >= SM4_BLOCK_SIZE) {
- sm4_crypt_block(ctx->rkey_enc, keystream, iv);
- crypto_xor_cpy(dst, src, keystream, SM4_BLOCK_SIZE);
- iv = dst;
- src += SM4_BLOCK_SIZE;
- dst += SM4_BLOCK_SIZE;
- nbytes -= SM4_BLOCK_SIZE;
- }
- if (iv != walk.iv)
- memcpy(walk.iv, iv, SM4_BLOCK_SIZE);
-
- /* tail */
- if (walk.nbytes == walk.total && nbytes > 0) {
- sm4_crypt_block(ctx->rkey_enc, keystream, walk.iv);
- crypto_xor_cpy(dst, src, keystream, nbytes);
- nbytes = 0;
- }
-
- err = skcipher_walk_done(&walk, nbytes);
- }
-
- return err;
-}
-EXPORT_SYMBOL_GPL(sm4_cfb_encrypt);
-
-int sm4_avx_cfb_decrypt(struct skcipher_request *req,
- unsigned int bsize, sm4_crypt_func func)
-{
- struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
- struct sm4_ctx *ctx = crypto_skcipher_ctx(tfm);
- struct skcipher_walk walk;
- unsigned int nbytes;
- int err;
-
- err = skcipher_walk_virt(&walk, req, false);
-
- while ((nbytes = walk.nbytes) > 0) {
- const u8 *src = walk.src.virt.addr;
- u8 *dst = walk.dst.virt.addr;
-
- kernel_fpu_begin();
-
- while (nbytes >= bsize) {
- func(ctx->rkey_enc, dst, src, walk.iv);
- dst += bsize;
- src += bsize;
- nbytes -= bsize;
- }
-
- while (nbytes >= SM4_BLOCK_SIZE) {
- u8 keystream[SM4_BLOCK_SIZE * 8];
- unsigned int nblocks = min(nbytes >> 4, 8u);
-
- memcpy(keystream, walk.iv, SM4_BLOCK_SIZE);
- if (nblocks > 1)
- memcpy(&keystream[SM4_BLOCK_SIZE], src,
- (nblocks - 1) * SM4_BLOCK_SIZE);
- memcpy(walk.iv, src + (nblocks - 1) * SM4_BLOCK_SIZE,
- SM4_BLOCK_SIZE);
-
- sm4_aesni_avx_crypt8(ctx->rkey_enc, keystream,
- keystream, nblocks);
-
- crypto_xor_cpy(dst, src, keystream,
- nblocks * SM4_BLOCK_SIZE);
- dst += nblocks * SM4_BLOCK_SIZE;
- src += nblocks * SM4_BLOCK_SIZE;
- nbytes -= nblocks * SM4_BLOCK_SIZE;
- }
-
- kernel_fpu_end();
-
- /* tail */
- if (walk.nbytes == walk.total && nbytes > 0) {
- u8 keystream[SM4_BLOCK_SIZE];
-
- sm4_crypt_block(ctx->rkey_enc, keystream, walk.iv);
- crypto_xor_cpy(dst, src, keystream, nbytes);
- nbytes = 0;
- }
-
- err = skcipher_walk_done(&walk, nbytes);
- }
-
- return err;
-}
-EXPORT_SYMBOL_GPL(sm4_avx_cfb_decrypt);
-
-static int cfb_decrypt(struct skcipher_request *req)
-{
- return sm4_avx_cfb_decrypt(req, SM4_CRYPT8_BLOCK_SIZE,
- sm4_aesni_avx_cfb_dec_blk8);
-}
-
int sm4_avx_ctr_crypt(struct skcipher_request *req,
unsigned int bsize, sm4_crypt_func func)
{
@@ -408,24 +296,6 @@ static struct skcipher_alg sm4_aesni_avx_skciphers[] = {
.decrypt = cbc_decrypt,
}, {
.base = {
- .cra_name = "__cfb(sm4)",
- .cra_driver_name = "__cfb-sm4-aesni-avx",
- .cra_priority = 400,
- .cra_flags = CRYPTO_ALG_INTERNAL,
- .cra_blocksize = 1,
- .cra_ctxsize = sizeof(struct sm4_ctx),
- .cra_module = THIS_MODULE,
- },
- .min_keysize = SM4_KEY_SIZE,
- .max_keysize = SM4_KEY_SIZE,
- .ivsize = SM4_BLOCK_SIZE,
- .chunksize = SM4_BLOCK_SIZE,
- .walksize = 8 * SM4_BLOCK_SIZE,
- .setkey = sm4_skcipher_setkey,
- .encrypt = sm4_cfb_encrypt,
- .decrypt = cfb_decrypt,
- }, {
- .base = {
.cra_name = "__ctr(sm4)",
.cra_driver_name = "__ctr-sm4-aesni-avx",
.cra_priority = 400,