diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-27 10:05:51 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-27 10:05:51 +0000 |
commit | 5d1646d90e1f2cceb9f0828f4b28318cd0ec7744 (patch) | |
tree | a94efe259b9009378be6d90eb30d2b019d95c194 /drivers/crypto/caam | |
parent | Initial commit. (diff) | |
download | linux-5d1646d90e1f2cceb9f0828f4b28318cd0ec7744.tar.xz linux-5d1646d90e1f2cceb9f0828f4b28318cd0ec7744.zip |
Adding upstream version 5.10.209.upstream/5.10.209
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'drivers/crypto/caam')
41 files changed, 26683 insertions, 0 deletions
diff --git a/drivers/crypto/caam/Kconfig b/drivers/crypto/caam/Kconfig new file mode 100644 index 000000000..84ea7cba5 --- /dev/null +++ b/drivers/crypto/caam/Kconfig @@ -0,0 +1,177 @@ +# SPDX-License-Identifier: GPL-2.0 +config CRYPTO_DEV_FSL_CAAM_COMMON + tristate + +config CRYPTO_DEV_FSL_CAAM_CRYPTO_API_DESC + tristate + +config CRYPTO_DEV_FSL_CAAM_AHASH_API_DESC + tristate + +config CRYPTO_DEV_FSL_CAAM + tristate "Freescale CAAM-Multicore platform driver backend" + depends on FSL_SOC || ARCH_MXC || ARCH_LAYERSCAPE + select SOC_BUS + select CRYPTO_DEV_FSL_CAAM_COMMON + imply FSL_MC_BUS + help + Enables the driver module for Freescale's Cryptographic Accelerator + and Assurance Module (CAAM), also known as the SEC version 4 (SEC4). + This module creates job ring devices, and configures h/w + to operate as a DPAA component automatically, depending + on h/w feature availability. + + To compile this driver as a module, choose M here: the module + will be called caam. + +if CRYPTO_DEV_FSL_CAAM + +config CRYPTO_DEV_FSL_CAAM_DEBUG + bool "Enable debug output in CAAM driver" + help + Selecting this will enable printing of various debug + information in the CAAM driver. + +menuconfig CRYPTO_DEV_FSL_CAAM_JR + tristate "Freescale CAAM Job Ring driver backend" + select CRYPTO_ENGINE + default y + help + Enables the driver module for Job Rings which are part of + Freescale's Cryptographic Accelerator + and Assurance Module (CAAM). This module adds a job ring operation + interface. + + To compile this driver as a module, choose M here: the module + will be called caam_jr. + +if CRYPTO_DEV_FSL_CAAM_JR + +config CRYPTO_DEV_FSL_CAAM_RINGSIZE + int "Job Ring size" + range 2 9 + default "9" + help + Select size of Job Rings as a power of 2, within the + range 2-9 (ring size 4-512). + Examples: + 2 => 4 + 3 => 8 + 4 => 16 + 5 => 32 + 6 => 64 + 7 => 128 + 8 => 256 + 9 => 512 + +config CRYPTO_DEV_FSL_CAAM_INTC + bool "Job Ring interrupt coalescing" + help + Enable the Job Ring's interrupt coalescing feature. + + Note: the driver already provides adequate + interrupt coalescing in software. + +config CRYPTO_DEV_FSL_CAAM_INTC_COUNT_THLD + int "Job Ring interrupt coalescing count threshold" + depends on CRYPTO_DEV_FSL_CAAM_INTC + range 1 255 + default 255 + help + Select number of descriptor completions to queue before + raising an interrupt, in the range 1-255. Note that a selection + of 1 functionally defeats the coalescing feature, and a selection + equal or greater than the job ring size will force timeouts. + +config CRYPTO_DEV_FSL_CAAM_INTC_TIME_THLD + int "Job Ring interrupt coalescing timer threshold" + depends on CRYPTO_DEV_FSL_CAAM_INTC + range 1 65535 + default 2048 + help + Select number of bus clocks/64 to timeout in the case that one or + more descriptor completions are queued without reaching the count + threshold. Range is 1-65535. + +config CRYPTO_DEV_FSL_CAAM_CRYPTO_API + bool "Register algorithm implementations with the Crypto API" + default y + select CRYPTO_DEV_FSL_CAAM_CRYPTO_API_DESC + select CRYPTO_AEAD + select CRYPTO_AUTHENC + select CRYPTO_SKCIPHER + select CRYPTO_LIB_DES + select CRYPTO_XTS + help + Selecting this will offload crypto for users of the + scatterlist crypto API (such as the linux native IPSec + stack) to the SEC4 via job ring. + +config CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI + bool "Queue Interface as Crypto API backend" + depends on FSL_DPAA && NET + default y + select CRYPTO_DEV_FSL_CAAM_CRYPTO_API_DESC + select CRYPTO_AUTHENC + select CRYPTO_SKCIPHER + select CRYPTO_DES + select CRYPTO_XTS + help + Selecting this will use CAAM Queue Interface (QI) for sending + & receiving crypto jobs to/from CAAM. This gives better performance + than job ring interface when the number of cores are more than the + number of job rings assigned to the kernel. The number of portals + assigned to the kernel should also be more than the number of + job rings. + +config CRYPTO_DEV_FSL_CAAM_AHASH_API + bool "Register hash algorithm implementations with Crypto API" + default y + select CRYPTO_DEV_FSL_CAAM_AHASH_API_DESC + select CRYPTO_HASH + help + Selecting this will offload ahash for users of the + scatterlist crypto API to the SEC4 via job ring. + +config CRYPTO_DEV_FSL_CAAM_PKC_API + bool "Register public key cryptography implementations with Crypto API" + default y + select CRYPTO_RSA + help + Selecting this will allow SEC Public key support for RSA. + Supported cryptographic primitives: encryption, decryption, + signature and verification. + +config CRYPTO_DEV_FSL_CAAM_RNG_API + bool "Register caam device for hwrng API" + default y + select CRYPTO_RNG + select HW_RANDOM + help + Selecting this will register the SEC4 hardware rng to + the hw_random API for supplying the kernel entropy pool. + +endif # CRYPTO_DEV_FSL_CAAM_JR + +endif # CRYPTO_DEV_FSL_CAAM + +config CRYPTO_DEV_FSL_DPAA2_CAAM + tristate "QorIQ DPAA2 CAAM (DPSECI) driver" + depends on FSL_MC_DPIO + depends on NETDEVICES + select CRYPTO_DEV_FSL_CAAM_COMMON + select CRYPTO_DEV_FSL_CAAM_CRYPTO_API_DESC + select CRYPTO_DEV_FSL_CAAM_AHASH_API_DESC + select CRYPTO_SKCIPHER + select CRYPTO_AUTHENC + select CRYPTO_AEAD + select CRYPTO_HASH + select CRYPTO_DES + select CRYPTO_XTS + help + CAAM driver for QorIQ Data Path Acceleration Architecture 2. + It handles DPSECI DPAA2 objects that sit on the Management Complex + (MC) fsl-mc bus. + + To compile this as a module, choose M here: the module + will be called dpaa2_caam. diff --git a/drivers/crypto/caam/Makefile b/drivers/crypto/caam/Makefile new file mode 100644 index 000000000..3570286eb --- /dev/null +++ b/drivers/crypto/caam/Makefile @@ -0,0 +1,35 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# Makefile for the CAAM backend and dependent components +# +ifeq ($(CONFIG_CRYPTO_DEV_FSL_CAAM_DEBUG), y) + ccflags-y := -DDEBUG +endif + +ccflags-y += -DVERSION=\"\" + +obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_COMMON) += error.o +obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM) += caam.o +obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_JR) += caam_jr.o +obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API_DESC) += caamalg_desc.o +obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_AHASH_API_DESC) += caamhash_desc.o + +caam-y := ctrl.o +caam_jr-y := jr.o key_gen.o +caam_jr-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API) += caamalg.o +caam_jr-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI) += caamalg_qi.o +caam_jr-$(CONFIG_CRYPTO_DEV_FSL_CAAM_AHASH_API) += caamhash.o +caam_jr-$(CONFIG_CRYPTO_DEV_FSL_CAAM_RNG_API) += caamrng.o +caam_jr-$(CONFIG_CRYPTO_DEV_FSL_CAAM_PKC_API) += caampkc.o pkc_desc.o + +caam-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI) += qi.o +ifneq ($(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI),) + ccflags-y += -DCONFIG_CAAM_QI +endif + +caam-$(CONFIG_DEBUG_FS) += debugfs.o + +obj-$(CONFIG_CRYPTO_DEV_FSL_DPAA2_CAAM) += dpaa2_caam.o + +dpaa2_caam-y := caamalg_qi2.o dpseci.o +dpaa2_caam-$(CONFIG_DEBUG_FS) += dpseci-debugfs.o diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c new file mode 100644 index 000000000..6b513555d --- /dev/null +++ b/drivers/crypto/caam/caamalg.c @@ -0,0 +1,3655 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * caam - Freescale FSL CAAM support for crypto API + * + * Copyright 2008-2011 Freescale Semiconductor, Inc. + * Copyright 2016-2019 NXP + * + * Based on talitos crypto API driver. + * + * relationship of job descriptors to shared descriptors (SteveC Dec 10 2008): + * + * --------------- --------------- + * | JobDesc #1 |-------------------->| ShareDesc | + * | *(packet 1) | | (PDB) | + * --------------- |------------->| (hashKey) | + * . | | (cipherKey) | + * . | |-------->| (operation) | + * --------------- | | --------------- + * | JobDesc #2 |------| | + * | *(packet 2) | | + * --------------- | + * . | + * . | + * --------------- | + * | JobDesc #3 |------------ + * | *(packet 3) | + * --------------- + * + * The SharedDesc never changes for a connection unless rekeyed, but + * each packet will likely be in a different place. So all we need + * to know to process the packet is where the input is, where the + * output goes, and what context we want to process with. Context is + * in the SharedDesc, packet references in the JobDesc. + * + * So, a job desc looks like: + * + * --------------------- + * | Header | + * | ShareDesc Pointer | + * | SEQ_OUT_PTR | + * | (output buffer) | + * | (output length) | + * | SEQ_IN_PTR | + * | (input buffer) | + * | (input length) | + * --------------------- + */ + +#include "compat.h" + +#include "regs.h" +#include "intern.h" +#include "desc_constr.h" +#include "jr.h" +#include "error.h" +#include "sg_sw_sec4.h" +#include "key_gen.h" +#include "caamalg_desc.h" +#include <crypto/engine.h> +#include <crypto/xts.h> +#include <asm/unaligned.h> + +/* + * crypto alg + */ +#define CAAM_CRA_PRIORITY 3000 +/* max key is sum of AES_MAX_KEY_SIZE, max split key size */ +#define CAAM_MAX_KEY_SIZE (AES_MAX_KEY_SIZE + \ + CTR_RFC3686_NONCE_SIZE + \ + SHA512_DIGEST_SIZE * 2) + +#define AEAD_DESC_JOB_IO_LEN (DESC_JOB_IO_LEN + CAAM_CMD_SZ * 2) +#define GCM_DESC_JOB_IO_LEN (AEAD_DESC_JOB_IO_LEN + \ + CAAM_CMD_SZ * 4) +#define AUTHENC_DESC_JOB_IO_LEN (AEAD_DESC_JOB_IO_LEN + \ + CAAM_CMD_SZ * 5) + +#define CHACHAPOLY_DESC_JOB_IO_LEN (AEAD_DESC_JOB_IO_LEN + CAAM_CMD_SZ * 6) + +#define DESC_MAX_USED_BYTES (CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN_MIN) +#define DESC_MAX_USED_LEN (DESC_MAX_USED_BYTES / CAAM_CMD_SZ) + +struct caam_alg_entry { + int class1_alg_type; + int class2_alg_type; + bool rfc3686; + bool geniv; + bool nodkp; +}; + +struct caam_aead_alg { + struct aead_alg aead; + struct caam_alg_entry caam; + bool registered; +}; + +struct caam_skcipher_alg { + struct skcipher_alg skcipher; + struct caam_alg_entry caam; + bool registered; +}; + +/* + * per-session context + */ +struct caam_ctx { + struct crypto_engine_ctx enginectx; + u32 sh_desc_enc[DESC_MAX_USED_LEN]; + u32 sh_desc_dec[DESC_MAX_USED_LEN]; + u8 key[CAAM_MAX_KEY_SIZE]; + dma_addr_t sh_desc_enc_dma; + dma_addr_t sh_desc_dec_dma; + dma_addr_t key_dma; + enum dma_data_direction dir; + struct device *jrdev; + struct alginfo adata; + struct alginfo cdata; + unsigned int authsize; + bool xts_key_fallback; + struct crypto_skcipher *fallback; +}; + +struct caam_skcipher_req_ctx { + struct skcipher_edesc *edesc; + struct skcipher_request fallback_req; +}; + +struct caam_aead_req_ctx { + struct aead_edesc *edesc; +}; + +static int aead_null_set_sh_desc(struct crypto_aead *aead) +{ + struct caam_ctx *ctx = crypto_aead_ctx(aead); + struct device *jrdev = ctx->jrdev; + struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent); + u32 *desc; + int rem_bytes = CAAM_DESC_BYTES_MAX - AEAD_DESC_JOB_IO_LEN - + ctx->adata.keylen_pad; + + /* + * Job Descriptor and Shared Descriptors + * must all fit into the 64-word Descriptor h/w Buffer + */ + if (rem_bytes >= DESC_AEAD_NULL_ENC_LEN) { + ctx->adata.key_inline = true; + ctx->adata.key_virt = ctx->key; + } else { + ctx->adata.key_inline = false; + ctx->adata.key_dma = ctx->key_dma; + } + + /* aead_encrypt shared descriptor */ + desc = ctx->sh_desc_enc; + cnstr_shdsc_aead_null_encap(desc, &ctx->adata, ctx->authsize, + ctrlpriv->era); + dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma, + desc_bytes(desc), ctx->dir); + + /* + * Job Descriptor and Shared Descriptors + * must all fit into the 64-word Descriptor h/w Buffer + */ + if (rem_bytes >= DESC_AEAD_NULL_DEC_LEN) { + ctx->adata.key_inline = true; + ctx->adata.key_virt = ctx->key; + } else { + ctx->adata.key_inline = false; + ctx->adata.key_dma = ctx->key_dma; + } + + /* aead_decrypt shared descriptor */ + desc = ctx->sh_desc_dec; + cnstr_shdsc_aead_null_decap(desc, &ctx->adata, ctx->authsize, + ctrlpriv->era); + dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma, + desc_bytes(desc), ctx->dir); + + return 0; +} + +static int aead_set_sh_desc(struct crypto_aead *aead) +{ + struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead), + struct caam_aead_alg, aead); + unsigned int ivsize = crypto_aead_ivsize(aead); + struct caam_ctx *ctx = crypto_aead_ctx(aead); + struct device *jrdev = ctx->jrdev; + struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent); + u32 ctx1_iv_off = 0; + u32 *desc, *nonce = NULL; + u32 inl_mask; + unsigned int data_len[2]; + const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) == + OP_ALG_AAI_CTR_MOD128); + const bool is_rfc3686 = alg->caam.rfc3686; + + if (!ctx->authsize) + return 0; + + /* NULL encryption / decryption */ + if (!ctx->cdata.keylen) + return aead_null_set_sh_desc(aead); + + /* + * AES-CTR needs to load IV in CONTEXT1 reg + * at an offset of 128bits (16bytes) + * CONTEXT1[255:128] = IV + */ + if (ctr_mode) + ctx1_iv_off = 16; + + /* + * RFC3686 specific: + * CONTEXT1[255:128] = {NONCE, IV, COUNTER} + */ + if (is_rfc3686) { + ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE; + nonce = (u32 *)((void *)ctx->key + ctx->adata.keylen_pad + + ctx->cdata.keylen - CTR_RFC3686_NONCE_SIZE); + } + + /* + * In case |user key| > |derived key|, using DKP<imm,imm> + * would result in invalid opcodes (last bytes of user key) in + * the resulting descriptor. Use DKP<ptr,imm> instead => both + * virtual and dma key addresses are needed. + */ + ctx->adata.key_virt = ctx->key; + ctx->adata.key_dma = ctx->key_dma; + + ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad; + ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad; + + data_len[0] = ctx->adata.keylen_pad; + data_len[1] = ctx->cdata.keylen; + + if (alg->caam.geniv) + goto skip_enc; + + /* + * Job Descriptor and Shared Descriptors + * must all fit into the 64-word Descriptor h/w Buffer + */ + if (desc_inline_query(DESC_AEAD_ENC_LEN + + (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0), + AUTHENC_DESC_JOB_IO_LEN, data_len, &inl_mask, + ARRAY_SIZE(data_len)) < 0) + return -EINVAL; + + ctx->adata.key_inline = !!(inl_mask & 1); + ctx->cdata.key_inline = !!(inl_mask & 2); + + /* aead_encrypt shared descriptor */ + desc = ctx->sh_desc_enc; + cnstr_shdsc_aead_encap(desc, &ctx->cdata, &ctx->adata, ivsize, + ctx->authsize, is_rfc3686, nonce, ctx1_iv_off, + false, ctrlpriv->era); + dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma, + desc_bytes(desc), ctx->dir); + +skip_enc: + /* + * Job Descriptor and Shared Descriptors + * must all fit into the 64-word Descriptor h/w Buffer + */ + if (desc_inline_query(DESC_AEAD_DEC_LEN + + (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0), + AUTHENC_DESC_JOB_IO_LEN, data_len, &inl_mask, + ARRAY_SIZE(data_len)) < 0) + return -EINVAL; + + ctx->adata.key_inline = !!(inl_mask & 1); + ctx->cdata.key_inline = !!(inl_mask & 2); + + /* aead_decrypt shared descriptor */ + desc = ctx->sh_desc_dec; + cnstr_shdsc_aead_decap(desc, &ctx->cdata, &ctx->adata, ivsize, + ctx->authsize, alg->caam.geniv, is_rfc3686, + nonce, ctx1_iv_off, false, ctrlpriv->era); + dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma, + desc_bytes(desc), ctx->dir); + + if (!alg->caam.geniv) + goto skip_givenc; + + /* + * Job Descriptor and Shared Descriptors + * must all fit into the 64-word Descriptor h/w Buffer + */ + if (desc_inline_query(DESC_AEAD_GIVENC_LEN + + (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0), + AUTHENC_DESC_JOB_IO_LEN, data_len, &inl_mask, + ARRAY_SIZE(data_len)) < 0) + return -EINVAL; + + ctx->adata.key_inline = !!(inl_mask & 1); + ctx->cdata.key_inline = !!(inl_mask & 2); + + /* aead_givencrypt shared descriptor */ + desc = ctx->sh_desc_enc; + cnstr_shdsc_aead_givencap(desc, &ctx->cdata, &ctx->adata, ivsize, + ctx->authsize, is_rfc3686, nonce, + ctx1_iv_off, false, ctrlpriv->era); + dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma, + desc_bytes(desc), ctx->dir); + +skip_givenc: + return 0; +} + +static int aead_setauthsize(struct crypto_aead *authenc, + unsigned int authsize) +{ + struct caam_ctx *ctx = crypto_aead_ctx(authenc); + + ctx->authsize = authsize; + aead_set_sh_desc(authenc); + + return 0; +} + +static int gcm_set_sh_desc(struct crypto_aead *aead) +{ + struct caam_ctx *ctx = crypto_aead_ctx(aead); + struct device *jrdev = ctx->jrdev; + unsigned int ivsize = crypto_aead_ivsize(aead); + u32 *desc; + int rem_bytes = CAAM_DESC_BYTES_MAX - GCM_DESC_JOB_IO_LEN - + ctx->cdata.keylen; + + if (!ctx->cdata.keylen || !ctx->authsize) + return 0; + + /* + * AES GCM encrypt shared descriptor + * Job Descriptor and Shared Descriptor + * must fit into the 64-word Descriptor h/w Buffer + */ + if (rem_bytes >= DESC_GCM_ENC_LEN) { + ctx->cdata.key_inline = true; + ctx->cdata.key_virt = ctx->key; + } else { + ctx->cdata.key_inline = false; + ctx->cdata.key_dma = ctx->key_dma; + } + + desc = ctx->sh_desc_enc; + cnstr_shdsc_gcm_encap(desc, &ctx->cdata, ivsize, ctx->authsize, false); + dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma, + desc_bytes(desc), ctx->dir); + + /* + * Job Descriptor and Shared Descriptors + * must all fit into the 64-word Descriptor h/w Buffer + */ + if (rem_bytes >= DESC_GCM_DEC_LEN) { + ctx->cdata.key_inline = true; + ctx->cdata.key_virt = ctx->key; + } else { + ctx->cdata.key_inline = false; + ctx->cdata.key_dma = ctx->key_dma; + } + + desc = ctx->sh_desc_dec; + cnstr_shdsc_gcm_decap(desc, &ctx->cdata, ivsize, ctx->authsize, false); + dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma, + desc_bytes(desc), ctx->dir); + + return 0; +} + +static int gcm_setauthsize(struct crypto_aead *authenc, unsigned int authsize) +{ + struct caam_ctx *ctx = crypto_aead_ctx(authenc); + int err; + + err = crypto_gcm_check_authsize(authsize); + if (err) + return err; + + ctx->authsize = authsize; + gcm_set_sh_desc(authenc); + + return 0; +} + +static int rfc4106_set_sh_desc(struct crypto_aead *aead) +{ + struct caam_ctx *ctx = crypto_aead_ctx(aead); + struct device *jrdev = ctx->jrdev; + unsigned int ivsize = crypto_aead_ivsize(aead); + u32 *desc; + int rem_bytes = CAAM_DESC_BYTES_MAX - GCM_DESC_JOB_IO_LEN - + ctx->cdata.keylen; + + if (!ctx->cdata.keylen || !ctx->authsize) + return 0; + + /* + * RFC4106 encrypt shared descriptor + * Job Descriptor and Shared Descriptor + * must fit into the 64-word Descriptor h/w Buffer + */ + if (rem_bytes >= DESC_RFC4106_ENC_LEN) { + ctx->cdata.key_inline = true; + ctx->cdata.key_virt = ctx->key; + } else { + ctx->cdata.key_inline = false; + ctx->cdata.key_dma = ctx->key_dma; + } + + desc = ctx->sh_desc_enc; + cnstr_shdsc_rfc4106_encap(desc, &ctx->cdata, ivsize, ctx->authsize, + false); + dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma, + desc_bytes(desc), ctx->dir); + + /* + * Job Descriptor and Shared Descriptors + * must all fit into the 64-word Descriptor h/w Buffer + */ + if (rem_bytes >= DESC_RFC4106_DEC_LEN) { + ctx->cdata.key_inline = true; + ctx->cdata.key_virt = ctx->key; + } else { + ctx->cdata.key_inline = false; + ctx->cdata.key_dma = ctx->key_dma; + } + + desc = ctx->sh_desc_dec; + cnstr_shdsc_rfc4106_decap(desc, &ctx->cdata, ivsize, ctx->authsize, + false); + dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma, + desc_bytes(desc), ctx->dir); + + return 0; +} + +static int rfc4106_setauthsize(struct crypto_aead *authenc, + unsigned int authsize) +{ + struct caam_ctx *ctx = crypto_aead_ctx(authenc); + int err; + + err = crypto_rfc4106_check_authsize(authsize); + if (err) + return err; + + ctx->authsize = authsize; + rfc4106_set_sh_desc(authenc); + + return 0; +} + +static int rfc4543_set_sh_desc(struct crypto_aead *aead) +{ + struct caam_ctx *ctx = crypto_aead_ctx(aead); + struct device *jrdev = ctx->jrdev; + unsigned int ivsize = crypto_aead_ivsize(aead); + u32 *desc; + int rem_bytes = CAAM_DESC_BYTES_MAX - GCM_DESC_JOB_IO_LEN - + ctx->cdata.keylen; + + if (!ctx->cdata.keylen || !ctx->authsize) + return 0; + + /* + * RFC4543 encrypt shared descriptor + * Job Descriptor and Shared Descriptor + * must fit into the 64-word Descriptor h/w Buffer + */ + if (rem_bytes >= DESC_RFC4543_ENC_LEN) { + ctx->cdata.key_inline = true; + ctx->cdata.key_virt = ctx->key; + } else { + ctx->cdata.key_inline = false; + ctx->cdata.key_dma = ctx->key_dma; + } + + desc = ctx->sh_desc_enc; + cnstr_shdsc_rfc4543_encap(desc, &ctx->cdata, ivsize, ctx->authsize, + false); + dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma, + desc_bytes(desc), ctx->dir); + + /* + * Job Descriptor and Shared Descriptors + * must all fit into the 64-word Descriptor h/w Buffer + */ + if (rem_bytes >= DESC_RFC4543_DEC_LEN) { + ctx->cdata.key_inline = true; + ctx->cdata.key_virt = ctx->key; + } else { + ctx->cdata.key_inline = false; + ctx->cdata.key_dma = ctx->key_dma; + } + + desc = ctx->sh_desc_dec; + cnstr_shdsc_rfc4543_decap(desc, &ctx->cdata, ivsize, ctx->authsize, + false); + dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma, + desc_bytes(desc), ctx->dir); + + return 0; +} + +static int rfc4543_setauthsize(struct crypto_aead *authenc, + unsigned int authsize) +{ + struct caam_ctx *ctx = crypto_aead_ctx(authenc); + + if (authsize != 16) + return -EINVAL; + + ctx->authsize = authsize; + rfc4543_set_sh_desc(authenc); + + return 0; +} + +static int chachapoly_set_sh_desc(struct crypto_aead *aead) +{ + struct caam_ctx *ctx = crypto_aead_ctx(aead); + struct device *jrdev = ctx->jrdev; + unsigned int ivsize = crypto_aead_ivsize(aead); + u32 *desc; + + if (!ctx->cdata.keylen || !ctx->authsize) + return 0; + + desc = ctx->sh_desc_enc; + cnstr_shdsc_chachapoly(desc, &ctx->cdata, &ctx->adata, ivsize, + ctx->authsize, true, false); + dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma, + desc_bytes(desc), ctx->dir); + + desc = ctx->sh_desc_dec; + cnstr_shdsc_chachapoly(desc, &ctx->cdata, &ctx->adata, ivsize, + ctx->authsize, false, false); + dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma, + desc_bytes(desc), ctx->dir); + + return 0; +} + +static int chachapoly_setauthsize(struct crypto_aead *aead, + unsigned int authsize) +{ + struct caam_ctx *ctx = crypto_aead_ctx(aead); + + if (authsize != POLY1305_DIGEST_SIZE) + return -EINVAL; + + ctx->authsize = authsize; + return chachapoly_set_sh_desc(aead); +} + +static int chachapoly_setkey(struct crypto_aead *aead, const u8 *key, + unsigned int keylen) +{ + struct caam_ctx *ctx = crypto_aead_ctx(aead); + unsigned int ivsize = crypto_aead_ivsize(aead); + unsigned int saltlen = CHACHAPOLY_IV_SIZE - ivsize; + + if (keylen != CHACHA_KEY_SIZE + saltlen) + return -EINVAL; + + memcpy(ctx->key, key, keylen); + ctx->cdata.key_virt = ctx->key; + ctx->cdata.keylen = keylen - saltlen; + + return chachapoly_set_sh_desc(aead); +} + +static int aead_setkey(struct crypto_aead *aead, + const u8 *key, unsigned int keylen) +{ + struct caam_ctx *ctx = crypto_aead_ctx(aead); + struct device *jrdev = ctx->jrdev; + struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent); + struct crypto_authenc_keys keys; + int ret = 0; + + if (crypto_authenc_extractkeys(&keys, key, keylen) != 0) + goto badkey; + + dev_dbg(jrdev, "keylen %d enckeylen %d authkeylen %d\n", + keys.authkeylen + keys.enckeylen, keys.enckeylen, + keys.authkeylen); + print_hex_dump_debug("key in @"__stringify(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); + + /* + * If DKP is supported, use it in the shared descriptor to generate + * the split key. + */ + if (ctrlpriv->era >= 6) { + ctx->adata.keylen = keys.authkeylen; + ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype & + OP_ALG_ALGSEL_MASK); + + if (ctx->adata.keylen_pad + keys.enckeylen > CAAM_MAX_KEY_SIZE) + goto badkey; + + memcpy(ctx->key, keys.authkey, keys.authkeylen); + memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, + keys.enckeylen); + dma_sync_single_for_device(jrdev, ctx->key_dma, + ctx->adata.keylen_pad + + keys.enckeylen, ctx->dir); + goto skip_split_key; + } + + ret = gen_split_key(ctx->jrdev, ctx->key, &ctx->adata, keys.authkey, + keys.authkeylen, CAAM_MAX_KEY_SIZE - + keys.enckeylen); + if (ret) { + goto badkey; + } + + /* postpend encryption key to auth split key */ + memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen); + dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->adata.keylen_pad + + keys.enckeylen, ctx->dir); + + print_hex_dump_debug("ctx.key@"__stringify(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, ctx->key, + ctx->adata.keylen_pad + keys.enckeylen, 1); + +skip_split_key: + ctx->cdata.keylen = keys.enckeylen; + memzero_explicit(&keys, sizeof(keys)); + return aead_set_sh_desc(aead); +badkey: + memzero_explicit(&keys, sizeof(keys)); + return -EINVAL; +} + +static int des3_aead_setkey(struct crypto_aead *aead, const u8 *key, + unsigned int keylen) +{ + struct crypto_authenc_keys keys; + int err; + + err = crypto_authenc_extractkeys(&keys, key, keylen); + if (unlikely(err)) + return err; + + err = verify_aead_des3_key(aead, keys.enckey, keys.enckeylen) ?: + aead_setkey(aead, key, keylen); + + memzero_explicit(&keys, sizeof(keys)); + return err; +} + +static int gcm_setkey(struct crypto_aead *aead, + const u8 *key, unsigned int keylen) +{ + struct caam_ctx *ctx = crypto_aead_ctx(aead); + struct device *jrdev = ctx->jrdev; + int err; + + err = aes_check_keylen(keylen); + if (err) + return err; + + print_hex_dump_debug("key in @"__stringify(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); + + memcpy(ctx->key, key, keylen); + dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, ctx->dir); + ctx->cdata.keylen = keylen; + + return gcm_set_sh_desc(aead); +} + +static int rfc4106_setkey(struct crypto_aead *aead, + const u8 *key, unsigned int keylen) +{ + struct caam_ctx *ctx = crypto_aead_ctx(aead); + struct device *jrdev = ctx->jrdev; + int err; + + err = aes_check_keylen(keylen - 4); + if (err) + return err; + + print_hex_dump_debug("key in @"__stringify(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); + + memcpy(ctx->key, key, keylen); + + /* + * The last four bytes of the key material are used as the salt value + * in the nonce. Update the AES key length. + */ + ctx->cdata.keylen = keylen - 4; + dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->cdata.keylen, + ctx->dir); + return rfc4106_set_sh_desc(aead); +} + +static int rfc4543_setkey(struct crypto_aead *aead, + const u8 *key, unsigned int keylen) +{ + struct caam_ctx *ctx = crypto_aead_ctx(aead); + struct device *jrdev = ctx->jrdev; + int err; + + err = aes_check_keylen(keylen - 4); + if (err) + return err; + + print_hex_dump_debug("key in @"__stringify(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); + + memcpy(ctx->key, key, keylen); + + /* + * The last four bytes of the key material are used as the salt value + * in the nonce. Update the AES key length. + */ + ctx->cdata.keylen = keylen - 4; + dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->cdata.keylen, + ctx->dir); + return rfc4543_set_sh_desc(aead); +} + +static int skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key, + unsigned int keylen, const u32 ctx1_iv_off) +{ + struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher); + struct caam_skcipher_alg *alg = + container_of(crypto_skcipher_alg(skcipher), typeof(*alg), + skcipher); + struct device *jrdev = ctx->jrdev; + unsigned int ivsize = crypto_skcipher_ivsize(skcipher); + u32 *desc; + const bool is_rfc3686 = alg->caam.rfc3686; + + print_hex_dump_debug("key in @"__stringify(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); + + ctx->cdata.keylen = keylen; + ctx->cdata.key_virt = key; + ctx->cdata.key_inline = true; + + /* skcipher_encrypt shared descriptor */ + desc = ctx->sh_desc_enc; + cnstr_shdsc_skcipher_encap(desc, &ctx->cdata, ivsize, is_rfc3686, + ctx1_iv_off); + dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma, + desc_bytes(desc), ctx->dir); + + /* skcipher_decrypt shared descriptor */ + desc = ctx->sh_desc_dec; + cnstr_shdsc_skcipher_decap(desc, &ctx->cdata, ivsize, is_rfc3686, + ctx1_iv_off); + dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma, + desc_bytes(desc), ctx->dir); + + return 0; +} + +static int aes_skcipher_setkey(struct crypto_skcipher *skcipher, + const u8 *key, unsigned int keylen) +{ + int err; + + err = aes_check_keylen(keylen); + if (err) + return err; + + return skcipher_setkey(skcipher, key, keylen, 0); +} + +static int rfc3686_skcipher_setkey(struct crypto_skcipher *skcipher, + const u8 *key, unsigned int keylen) +{ + u32 ctx1_iv_off; + int err; + + /* + * RFC3686 specific: + * | CONTEXT1[255:128] = {NONCE, IV, COUNTER} + * | *key = {KEY, NONCE} + */ + ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE; + keylen -= CTR_RFC3686_NONCE_SIZE; + + err = aes_check_keylen(keylen); + if (err) + return err; + + return skcipher_setkey(skcipher, key, keylen, ctx1_iv_off); +} + +static int ctr_skcipher_setkey(struct crypto_skcipher *skcipher, + const u8 *key, unsigned int keylen) +{ + u32 ctx1_iv_off; + int err; + + /* + * AES-CTR needs to load IV in CONTEXT1 reg + * at an offset of 128bits (16bytes) + * CONTEXT1[255:128] = IV + */ + ctx1_iv_off = 16; + + err = aes_check_keylen(keylen); + if (err) + return err; + + return skcipher_setkey(skcipher, key, keylen, ctx1_iv_off); +} + +static int des_skcipher_setkey(struct crypto_skcipher *skcipher, + const u8 *key, unsigned int keylen) +{ + return verify_skcipher_des_key(skcipher, key) ?: + skcipher_setkey(skcipher, key, keylen, 0); +} + +static int des3_skcipher_setkey(struct crypto_skcipher *skcipher, + const u8 *key, unsigned int keylen) +{ + return verify_skcipher_des3_key(skcipher, key) ?: + skcipher_setkey(skcipher, key, keylen, 0); +} + +static int xts_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key, + unsigned int keylen) +{ + struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher); + struct device *jrdev = ctx->jrdev; + struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent); + u32 *desc; + int err; + + err = xts_verify_key(skcipher, key, keylen); + if (err) { + dev_dbg(jrdev, "key size mismatch\n"); + return err; + } + + if (keylen != 2 * AES_KEYSIZE_128 && keylen != 2 * AES_KEYSIZE_256) + ctx->xts_key_fallback = true; + + if (ctrlpriv->era <= 8 || ctx->xts_key_fallback) { + err = crypto_skcipher_setkey(ctx->fallback, key, keylen); + if (err) + return err; + } + + ctx->cdata.keylen = keylen; + ctx->cdata.key_virt = key; + ctx->cdata.key_inline = true; + + /* xts_skcipher_encrypt shared descriptor */ + desc = ctx->sh_desc_enc; + cnstr_shdsc_xts_skcipher_encap(desc, &ctx->cdata); + dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma, + desc_bytes(desc), ctx->dir); + + /* xts_skcipher_decrypt shared descriptor */ + desc = ctx->sh_desc_dec; + cnstr_shdsc_xts_skcipher_decap(desc, &ctx->cdata); + dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma, + desc_bytes(desc), ctx->dir); + + return 0; +} + +/* + * aead_edesc - s/w-extended aead descriptor + * @src_nents: number of segments in input s/w scatterlist + * @dst_nents: number of segments in output s/w scatterlist + * @mapped_src_nents: number of segments in input h/w link table + * @mapped_dst_nents: number of segments in output h/w link table + * @sec4_sg_bytes: length of dma mapped sec4_sg space + * @bklog: stored to determine if the request needs backlog + * @sec4_sg_dma: bus physical mapped address of h/w link table + * @sec4_sg: pointer to h/w link table + * @hw_desc: the h/w job descriptor followed by any referenced link tables + */ +struct aead_edesc { + int src_nents; + int dst_nents; + int mapped_src_nents; + int mapped_dst_nents; + int sec4_sg_bytes; + bool bklog; + dma_addr_t sec4_sg_dma; + struct sec4_sg_entry *sec4_sg; + u32 hw_desc[]; +}; + +/* + * skcipher_edesc - s/w-extended skcipher descriptor + * @src_nents: number of segments in input s/w scatterlist + * @dst_nents: number of segments in output s/w scatterlist + * @mapped_src_nents: number of segments in input h/w link table + * @mapped_dst_nents: number of segments in output h/w link table + * @iv_dma: dma address of iv for checking continuity and link table + * @sec4_sg_bytes: length of dma mapped sec4_sg space + * @bklog: stored to determine if the request needs backlog + * @sec4_sg_dma: bus physical mapped address of h/w link table + * @sec4_sg: pointer to h/w link table + * @hw_desc: the h/w job descriptor followed by any referenced link tables + * and IV + */ +struct skcipher_edesc { + int src_nents; + int dst_nents; + int mapped_src_nents; + int mapped_dst_nents; + dma_addr_t iv_dma; + int sec4_sg_bytes; + bool bklog; + dma_addr_t sec4_sg_dma; + struct sec4_sg_entry *sec4_sg; + u32 hw_desc[]; +}; + +static void caam_unmap(struct device *dev, struct scatterlist *src, + struct scatterlist *dst, int src_nents, + int dst_nents, + dma_addr_t iv_dma, int ivsize, dma_addr_t sec4_sg_dma, + int sec4_sg_bytes) +{ + if (dst != src) { + if (src_nents) + dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE); + if (dst_nents) + dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE); + } else { + dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL); + } + + if (iv_dma) + dma_unmap_single(dev, iv_dma, ivsize, DMA_BIDIRECTIONAL); + if (sec4_sg_bytes) + dma_unmap_single(dev, sec4_sg_dma, sec4_sg_bytes, + DMA_TO_DEVICE); +} + +static void aead_unmap(struct device *dev, + struct aead_edesc *edesc, + struct aead_request *req) +{ + caam_unmap(dev, req->src, req->dst, + edesc->src_nents, edesc->dst_nents, 0, 0, + edesc->sec4_sg_dma, edesc->sec4_sg_bytes); +} + +static void skcipher_unmap(struct device *dev, struct skcipher_edesc *edesc, + struct skcipher_request *req) +{ + struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req); + int ivsize = crypto_skcipher_ivsize(skcipher); + + caam_unmap(dev, req->src, req->dst, + edesc->src_nents, edesc->dst_nents, + edesc->iv_dma, ivsize, + edesc->sec4_sg_dma, edesc->sec4_sg_bytes); +} + +static void aead_crypt_done(struct device *jrdev, u32 *desc, u32 err, + void *context) +{ + struct aead_request *req = context; + struct caam_aead_req_ctx *rctx = aead_request_ctx(req); + struct caam_drv_private_jr *jrp = dev_get_drvdata(jrdev); + struct aead_edesc *edesc; + int ecode = 0; + bool has_bklog; + + dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); + + edesc = rctx->edesc; + has_bklog = edesc->bklog; + + if (err) + ecode = caam_jr_strstatus(jrdev, err); + + aead_unmap(jrdev, edesc, req); + + kfree(edesc); + + /* + * If no backlog flag, the completion of the request is done + * by CAAM, not crypto engine. + */ + if (!has_bklog) + aead_request_complete(req, ecode); + else + crypto_finalize_aead_request(jrp->engine, req, ecode); +} + +static void skcipher_crypt_done(struct device *jrdev, u32 *desc, u32 err, + void *context) +{ + struct skcipher_request *req = context; + struct skcipher_edesc *edesc; + struct caam_skcipher_req_ctx *rctx = skcipher_request_ctx(req); + struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req); + struct caam_drv_private_jr *jrp = dev_get_drvdata(jrdev); + int ivsize = crypto_skcipher_ivsize(skcipher); + int ecode = 0; + bool has_bklog; + + dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); + + edesc = rctx->edesc; + has_bklog = edesc->bklog; + if (err) + ecode = caam_jr_strstatus(jrdev, err); + + skcipher_unmap(jrdev, edesc, req); + + /* + * The crypto API expects us to set the IV (req->iv) to the last + * ciphertext block (CBC mode) or last counter (CTR mode). + * This is used e.g. by the CTS mode. + */ + if (ivsize && !ecode) { + memcpy(req->iv, (u8 *)edesc->sec4_sg + edesc->sec4_sg_bytes, + ivsize); + + print_hex_dump_debug("dstiv @" __stringify(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, req->iv, + ivsize, 1); + } + + caam_dump_sg("dst @" __stringify(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, req->dst, + edesc->dst_nents > 1 ? 100 : req->cryptlen, 1); + + kfree(edesc); + + /* + * If no backlog flag, the completion of the request is done + * by CAAM, not crypto engine. + */ + if (!has_bklog) + skcipher_request_complete(req, ecode); + else + crypto_finalize_skcipher_request(jrp->engine, req, ecode); +} + +/* + * Fill in aead job descriptor + */ +static void init_aead_job(struct aead_request *req, + struct aead_edesc *edesc, + bool all_contig, bool encrypt) +{ + struct crypto_aead *aead = crypto_aead_reqtfm(req); + struct caam_ctx *ctx = crypto_aead_ctx(aead); + int authsize = ctx->authsize; + u32 *desc = edesc->hw_desc; + u32 out_options, in_options; + dma_addr_t dst_dma, src_dma; + int len, sec4_sg_index = 0; + dma_addr_t ptr; + u32 *sh_desc; + + sh_desc = encrypt ? ctx->sh_desc_enc : ctx->sh_desc_dec; + ptr = encrypt ? ctx->sh_desc_enc_dma : ctx->sh_desc_dec_dma; + + len = desc_len(sh_desc); + init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE); + + if (all_contig) { + src_dma = edesc->mapped_src_nents ? sg_dma_address(req->src) : + 0; + in_options = 0; + } else { + src_dma = edesc->sec4_sg_dma; + sec4_sg_index += edesc->mapped_src_nents; + in_options = LDST_SGF; + } + + append_seq_in_ptr(desc, src_dma, req->assoclen + req->cryptlen, + in_options); + + dst_dma = src_dma; + out_options = in_options; + + if (unlikely(req->src != req->dst)) { + if (!edesc->mapped_dst_nents) { + dst_dma = 0; + out_options = 0; + } else if (edesc->mapped_dst_nents == 1) { + dst_dma = sg_dma_address(req->dst); + out_options = 0; + } else { + dst_dma = edesc->sec4_sg_dma + + sec4_sg_index * + sizeof(struct sec4_sg_entry); + out_options = LDST_SGF; + } + } + + if (encrypt) + append_seq_out_ptr(desc, dst_dma, + req->assoclen + req->cryptlen + authsize, + out_options); + else + append_seq_out_ptr(desc, dst_dma, + req->assoclen + req->cryptlen - authsize, + out_options); +} + +static void init_gcm_job(struct aead_request *req, + struct aead_edesc *edesc, + bool all_contig, bool encrypt) +{ + struct crypto_aead *aead = crypto_aead_reqtfm(req); + struct caam_ctx *ctx = crypto_aead_ctx(aead); + unsigned int ivsize = crypto_aead_ivsize(aead); + u32 *desc = edesc->hw_desc; + bool generic_gcm = (ivsize == GCM_AES_IV_SIZE); + unsigned int last; + + init_aead_job(req, edesc, all_contig, encrypt); + append_math_add_imm_u32(desc, REG3, ZERO, IMM, req->assoclen); + + /* BUG This should not be specific to generic GCM. */ + last = 0; + if (encrypt && generic_gcm && !(req->assoclen + req->cryptlen)) + last = FIFOLD_TYPE_LAST1; + + /* Read GCM IV */ + append_cmd(desc, CMD_FIFO_LOAD | FIFOLD_CLASS_CLASS1 | IMMEDIATE | + FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1 | GCM_AES_IV_SIZE | last); + /* Append Salt */ + if (!generic_gcm) + append_data(desc, ctx->key + ctx->cdata.keylen, 4); + /* Append IV */ + append_data(desc, req->iv, ivsize); + /* End of blank commands */ +} + +static void init_chachapoly_job(struct aead_request *req, + struct aead_edesc *edesc, bool all_contig, + bool encrypt) +{ + struct crypto_aead *aead = crypto_aead_reqtfm(req); + unsigned int ivsize = crypto_aead_ivsize(aead); + unsigned int assoclen = req->assoclen; + u32 *desc = edesc->hw_desc; + u32 ctx_iv_off = 4; + + init_aead_job(req, edesc, all_contig, encrypt); + + if (ivsize != CHACHAPOLY_IV_SIZE) { + /* IPsec specific: CONTEXT1[223:128] = {NONCE, IV} */ + ctx_iv_off += 4; + + /* + * The associated data comes already with the IV but we need + * to skip it when we authenticate or encrypt... + */ + assoclen -= ivsize; + } + + append_math_add_imm_u32(desc, REG3, ZERO, IMM, assoclen); + + /* + * For IPsec load the IV further in the same register. + * For RFC7539 simply load the 12 bytes nonce in a single operation + */ + append_load_as_imm(desc, req->iv, ivsize, LDST_CLASS_1_CCB | + LDST_SRCDST_BYTE_CONTEXT | + ctx_iv_off << LDST_OFFSET_SHIFT); +} + +static void init_authenc_job(struct aead_request *req, + struct aead_edesc *edesc, + bool all_contig, bool encrypt) +{ + struct crypto_aead *aead = crypto_aead_reqtfm(req); + struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead), + struct caam_aead_alg, aead); + unsigned int ivsize = crypto_aead_ivsize(aead); + struct caam_ctx *ctx = crypto_aead_ctx(aead); + struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctx->jrdev->parent); + const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) == + OP_ALG_AAI_CTR_MOD128); + const bool is_rfc3686 = alg->caam.rfc3686; + u32 *desc = edesc->hw_desc; + u32 ivoffset = 0; + + /* + * AES-CTR needs to load IV in CONTEXT1 reg + * at an offset of 128bits (16bytes) + * CONTEXT1[255:128] = IV + */ + if (ctr_mode) + ivoffset = 16; + + /* + * RFC3686 specific: + * CONTEXT1[255:128] = {NONCE, IV, COUNTER} + */ + if (is_rfc3686) + ivoffset = 16 + CTR_RFC3686_NONCE_SIZE; + + init_aead_job(req, edesc, all_contig, encrypt); + + /* + * {REG3, DPOVRD} = assoclen, depending on whether MATH command supports + * having DPOVRD as destination. + */ + if (ctrlpriv->era < 3) + append_math_add_imm_u32(desc, REG3, ZERO, IMM, req->assoclen); + else + append_math_add_imm_u32(desc, DPOVRD, ZERO, IMM, req->assoclen); + + if (ivsize && ((is_rfc3686 && encrypt) || !alg->caam.geniv)) + append_load_as_imm(desc, req->iv, ivsize, + LDST_CLASS_1_CCB | + LDST_SRCDST_BYTE_CONTEXT | + (ivoffset << LDST_OFFSET_SHIFT)); +} + +/* + * Fill in skcipher job descriptor + */ +static void init_skcipher_job(struct skcipher_request *req, + struct skcipher_edesc *edesc, + const bool encrypt) +{ + struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req); + struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher); + struct device *jrdev = ctx->jrdev; + int ivsize = crypto_skcipher_ivsize(skcipher); + u32 *desc = edesc->hw_desc; + u32 *sh_desc; + u32 in_options = 0, out_options = 0; + dma_addr_t src_dma, dst_dma, ptr; + int len, sec4_sg_index = 0; + + print_hex_dump_debug("presciv@"__stringify(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, req->iv, ivsize, 1); + dev_dbg(jrdev, "asked=%d, cryptlen%d\n", + (int)edesc->src_nents > 1 ? 100 : req->cryptlen, req->cryptlen); + + caam_dump_sg("src @" __stringify(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, req->src, + edesc->src_nents > 1 ? 100 : req->cryptlen, 1); + + sh_desc = encrypt ? ctx->sh_desc_enc : ctx->sh_desc_dec; + ptr = encrypt ? ctx->sh_desc_enc_dma : ctx->sh_desc_dec_dma; + + len = desc_len(sh_desc); + init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE); + + if (ivsize || edesc->mapped_src_nents > 1) { + src_dma = edesc->sec4_sg_dma; + sec4_sg_index = edesc->mapped_src_nents + !!ivsize; + in_options = LDST_SGF; + } else { + src_dma = sg_dma_address(req->src); + } + + append_seq_in_ptr(desc, src_dma, req->cryptlen + ivsize, in_options); + + if (likely(req->src == req->dst)) { + dst_dma = src_dma + !!ivsize * sizeof(struct sec4_sg_entry); + out_options = in_options; + } else if (!ivsize && edesc->mapped_dst_nents == 1) { + dst_dma = sg_dma_address(req->dst); + } else { + dst_dma = edesc->sec4_sg_dma + sec4_sg_index * + sizeof(struct sec4_sg_entry); + out_options = LDST_SGF; + } + + append_seq_out_ptr(desc, dst_dma, req->cryptlen + ivsize, out_options); +} + +/* + * allocate and map the aead extended descriptor + */ +static struct aead_edesc *aead_edesc_alloc(struct aead_request *req, + int desc_bytes, bool *all_contig_ptr, + bool encrypt) +{ + struct crypto_aead *aead = crypto_aead_reqtfm(req); + struct caam_ctx *ctx = crypto_aead_ctx(aead); + struct device *jrdev = ctx->jrdev; + struct caam_aead_req_ctx *rctx = aead_request_ctx(req); + gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? + GFP_KERNEL : GFP_ATOMIC; + int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0; + int src_len, dst_len = 0; + struct aead_edesc *edesc; + int sec4_sg_index, sec4_sg_len, sec4_sg_bytes; + unsigned int authsize = ctx->authsize; + + if (unlikely(req->dst != req->src)) { + src_len = req->assoclen + req->cryptlen; + dst_len = src_len + (encrypt ? authsize : (-authsize)); + + src_nents = sg_nents_for_len(req->src, src_len); + if (unlikely(src_nents < 0)) { + dev_err(jrdev, "Insufficient bytes (%d) in src S/G\n", + src_len); + return ERR_PTR(src_nents); + } + + dst_nents = sg_nents_for_len(req->dst, dst_len); + if (unlikely(dst_nents < 0)) { + dev_err(jrdev, "Insufficient bytes (%d) in dst S/G\n", + dst_len); + return ERR_PTR(dst_nents); + } + } else { + src_len = req->assoclen + req->cryptlen + + (encrypt ? authsize : 0); + + src_nents = sg_nents_for_len(req->src, src_len); + if (unlikely(src_nents < 0)) { + dev_err(jrdev, "Insufficient bytes (%d) in src S/G\n", + src_len); + return ERR_PTR(src_nents); + } + } + + if (likely(req->src == req->dst)) { + mapped_src_nents = dma_map_sg(jrdev, req->src, src_nents, + DMA_BIDIRECTIONAL); + if (unlikely(!mapped_src_nents)) { + dev_err(jrdev, "unable to map source\n"); + return ERR_PTR(-ENOMEM); + } + } else { + /* Cover also the case of null (zero length) input data */ + if (src_nents) { + mapped_src_nents = dma_map_sg(jrdev, req->src, + src_nents, DMA_TO_DEVICE); + if (unlikely(!mapped_src_nents)) { + dev_err(jrdev, "unable to map source\n"); + return ERR_PTR(-ENOMEM); + } + } else { + mapped_src_nents = 0; + } + + /* Cover also the case of null (zero length) output data */ + if (dst_nents) { + mapped_dst_nents = dma_map_sg(jrdev, req->dst, + dst_nents, + DMA_FROM_DEVICE); + if (unlikely(!mapped_dst_nents)) { + dev_err(jrdev, "unable to map destination\n"); + dma_unmap_sg(jrdev, req->src, src_nents, + DMA_TO_DEVICE); + return ERR_PTR(-ENOMEM); + } + } else { + mapped_dst_nents = 0; + } + } + + /* + * HW reads 4 S/G entries at a time; make sure the reads don't go beyond + * the end of the table by allocating more S/G entries. + */ + sec4_sg_len = mapped_src_nents > 1 ? mapped_src_nents : 0; + if (mapped_dst_nents > 1) + sec4_sg_len += pad_sg_nents(mapped_dst_nents); + else + sec4_sg_len = pad_sg_nents(sec4_sg_len); + + sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry); + + /* allocate space for base edesc and hw desc commands, link tables */ + edesc = kzalloc(sizeof(*edesc) + desc_bytes + sec4_sg_bytes, + GFP_DMA | flags); + if (!edesc) { + caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, 0, + 0, 0, 0); + return ERR_PTR(-ENOMEM); + } + + edesc->src_nents = src_nents; + edesc->dst_nents = dst_nents; + edesc->mapped_src_nents = mapped_src_nents; + edesc->mapped_dst_nents = mapped_dst_nents; + edesc->sec4_sg = (void *)edesc + sizeof(struct aead_edesc) + + desc_bytes; + + rctx->edesc = edesc; + + *all_contig_ptr = !(mapped_src_nents > 1); + + sec4_sg_index = 0; + if (mapped_src_nents > 1) { + sg_to_sec4_sg_last(req->src, src_len, + edesc->sec4_sg + sec4_sg_index, 0); + sec4_sg_index += mapped_src_nents; + } + if (mapped_dst_nents > 1) { + sg_to_sec4_sg_last(req->dst, dst_len, + edesc->sec4_sg + sec4_sg_index, 0); + } + + if (!sec4_sg_bytes) + return edesc; + + edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, + sec4_sg_bytes, DMA_TO_DEVICE); + if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) { + dev_err(jrdev, "unable to map S/G table\n"); + aead_unmap(jrdev, edesc, req); + kfree(edesc); + return ERR_PTR(-ENOMEM); + } + + edesc->sec4_sg_bytes = sec4_sg_bytes; + + return edesc; +} + +static int aead_enqueue_req(struct device *jrdev, struct aead_request *req) +{ + struct caam_drv_private_jr *jrpriv = dev_get_drvdata(jrdev); + struct caam_aead_req_ctx *rctx = aead_request_ctx(req); + struct aead_edesc *edesc = rctx->edesc; + u32 *desc = edesc->hw_desc; + int ret; + + /* + * Only the backlog request are sent to crypto-engine since the others + * can be handled by CAAM, if free, especially since JR has up to 1024 + * entries (more than the 10 entries from crypto-engine). + */ + if (req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG) + ret = crypto_transfer_aead_request_to_engine(jrpriv->engine, + req); + else + ret = caam_jr_enqueue(jrdev, desc, aead_crypt_done, req); + + if ((ret != -EINPROGRESS) && (ret != -EBUSY)) { + aead_unmap(jrdev, edesc, req); + kfree(rctx->edesc); + } + + return ret; +} + +static inline int chachapoly_crypt(struct aead_request *req, bool encrypt) +{ + struct aead_edesc *edesc; + struct crypto_aead *aead = crypto_aead_reqtfm(req); + struct caam_ctx *ctx = crypto_aead_ctx(aead); + struct device *jrdev = ctx->jrdev; + bool all_contig; + u32 *desc; + + edesc = aead_edesc_alloc(req, CHACHAPOLY_DESC_JOB_IO_LEN, &all_contig, + encrypt); + if (IS_ERR(edesc)) + return PTR_ERR(edesc); + + desc = edesc->hw_desc; + + init_chachapoly_job(req, edesc, all_contig, encrypt); + print_hex_dump_debug("chachapoly jobdesc@" __stringify(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), + 1); + + return aead_enqueue_req(jrdev, req); +} + +static int chachapoly_encrypt(struct aead_request *req) +{ + return chachapoly_crypt(req, true); +} + +static int chachapoly_decrypt(struct aead_request *req) +{ + return chachapoly_crypt(req, false); +} + +static inline int aead_crypt(struct aead_request *req, bool encrypt) +{ + struct aead_edesc *edesc; + struct crypto_aead *aead = crypto_aead_reqtfm(req); + struct caam_ctx *ctx = crypto_aead_ctx(aead); + struct device *jrdev = ctx->jrdev; + bool all_contig; + + /* allocate extended descriptor */ + edesc = aead_edesc_alloc(req, AUTHENC_DESC_JOB_IO_LEN, + &all_contig, encrypt); + if (IS_ERR(edesc)) + return PTR_ERR(edesc); + + /* Create and submit job descriptor */ + init_authenc_job(req, edesc, all_contig, encrypt); + + print_hex_dump_debug("aead jobdesc@"__stringify(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc, + desc_bytes(edesc->hw_desc), 1); + + return aead_enqueue_req(jrdev, req); +} + +static int aead_encrypt(struct aead_request *req) +{ + return aead_crypt(req, true); +} + +static int aead_decrypt(struct aead_request *req) +{ + return aead_crypt(req, false); +} + +static int aead_do_one_req(struct crypto_engine *engine, void *areq) +{ + struct aead_request *req = aead_request_cast(areq); + struct caam_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req)); + struct caam_aead_req_ctx *rctx = aead_request_ctx(req); + u32 *desc = rctx->edesc->hw_desc; + int ret; + + rctx->edesc->bklog = true; + + ret = caam_jr_enqueue(ctx->jrdev, desc, aead_crypt_done, req); + + if (ret != -EINPROGRESS) { + aead_unmap(ctx->jrdev, rctx->edesc, req); + kfree(rctx->edesc); + } else { + ret = 0; + } + + return ret; +} + +static inline int gcm_crypt(struct aead_request *req, bool encrypt) +{ + struct aead_edesc *edesc; + struct crypto_aead *aead = crypto_aead_reqtfm(req); + struct caam_ctx *ctx = crypto_aead_ctx(aead); + struct device *jrdev = ctx->jrdev; + bool all_contig; + + /* allocate extended descriptor */ + edesc = aead_edesc_alloc(req, GCM_DESC_JOB_IO_LEN, &all_contig, + encrypt); + if (IS_ERR(edesc)) + return PTR_ERR(edesc); + + /* Create and submit job descriptor */ + init_gcm_job(req, edesc, all_contig, encrypt); + + print_hex_dump_debug("aead jobdesc@"__stringify(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc, + desc_bytes(edesc->hw_desc), 1); + + return aead_enqueue_req(jrdev, req); +} + +static int gcm_encrypt(struct aead_request *req) +{ + return gcm_crypt(req, true); +} + +static int gcm_decrypt(struct aead_request *req) +{ + return gcm_crypt(req, false); +} + +static int ipsec_gcm_encrypt(struct aead_request *req) +{ + return crypto_ipsec_check_assoclen(req->assoclen) ? : gcm_encrypt(req); +} + +static int ipsec_gcm_decrypt(struct aead_request *req) +{ + return crypto_ipsec_check_assoclen(req->assoclen) ? : gcm_decrypt(req); +} + +/* + * allocate and map the skcipher extended descriptor for skcipher + */ +static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req, + int desc_bytes) +{ + struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req); + struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher); + struct caam_skcipher_req_ctx *rctx = skcipher_request_ctx(req); + struct device *jrdev = ctx->jrdev; + gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? + GFP_KERNEL : GFP_ATOMIC; + int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0; + struct skcipher_edesc *edesc; + dma_addr_t iv_dma = 0; + u8 *iv; + int ivsize = crypto_skcipher_ivsize(skcipher); + int dst_sg_idx, sec4_sg_ents, sec4_sg_bytes; + + src_nents = sg_nents_for_len(req->src, req->cryptlen); + if (unlikely(src_nents < 0)) { + dev_err(jrdev, "Insufficient bytes (%d) in src S/G\n", + req->cryptlen); + return ERR_PTR(src_nents); + } + + if (req->dst != req->src) { + dst_nents = sg_nents_for_len(req->dst, req->cryptlen); + if (unlikely(dst_nents < 0)) { + dev_err(jrdev, "Insufficient bytes (%d) in dst S/G\n", + req->cryptlen); + return ERR_PTR(dst_nents); + } + } + + if (likely(req->src == req->dst)) { + mapped_src_nents = dma_map_sg(jrdev, req->src, src_nents, + DMA_BIDIRECTIONAL); + if (unlikely(!mapped_src_nents)) { + dev_err(jrdev, "unable to map source\n"); + return ERR_PTR(-ENOMEM); + } + } else { + mapped_src_nents = dma_map_sg(jrdev, req->src, src_nents, + DMA_TO_DEVICE); + if (unlikely(!mapped_src_nents)) { + dev_err(jrdev, "unable to map source\n"); + return ERR_PTR(-ENOMEM); + } + mapped_dst_nents = dma_map_sg(jrdev, req->dst, dst_nents, + DMA_FROM_DEVICE); + if (unlikely(!mapped_dst_nents)) { + dev_err(jrdev, "unable to map destination\n"); + dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE); + return ERR_PTR(-ENOMEM); + } + } + + if (!ivsize && mapped_src_nents == 1) + sec4_sg_ents = 0; // no need for an input hw s/g table + else + sec4_sg_ents = mapped_src_nents + !!ivsize; + dst_sg_idx = sec4_sg_ents; + + /* + * Input, output HW S/G tables: [IV, src][dst, IV] + * IV entries point to the same buffer + * If src == dst, S/G entries are reused (S/G tables overlap) + * + * HW reads 4 S/G entries at a time; make sure the reads don't go beyond + * the end of the table by allocating more S/G entries. Logic: + * if (output S/G) + * pad output S/G, if needed + * else if (input S/G) ... + * pad input S/G, if needed + */ + if (ivsize || mapped_dst_nents > 1) { + if (req->src == req->dst) + sec4_sg_ents = !!ivsize + pad_sg_nents(sec4_sg_ents); + else + sec4_sg_ents += pad_sg_nents(mapped_dst_nents + + !!ivsize); + } else { + sec4_sg_ents = pad_sg_nents(sec4_sg_ents); + } + + sec4_sg_bytes = sec4_sg_ents * sizeof(struct sec4_sg_entry); + + /* + * allocate space for base edesc and hw desc commands, link tables, IV + */ + edesc = kzalloc(sizeof(*edesc) + desc_bytes + sec4_sg_bytes + ivsize, + GFP_DMA | flags); + if (!edesc) { + dev_err(jrdev, "could not allocate extended descriptor\n"); + caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, 0, + 0, 0, 0); + return ERR_PTR(-ENOMEM); + } + + edesc->src_nents = src_nents; + edesc->dst_nents = dst_nents; + edesc->mapped_src_nents = mapped_src_nents; + edesc->mapped_dst_nents = mapped_dst_nents; + edesc->sec4_sg_bytes = sec4_sg_bytes; + edesc->sec4_sg = (struct sec4_sg_entry *)((u8 *)edesc->hw_desc + + desc_bytes); + rctx->edesc = edesc; + + /* Make sure IV is located in a DMAable area */ + if (ivsize) { + iv = (u8 *)edesc->sec4_sg + sec4_sg_bytes; + memcpy(iv, req->iv, ivsize); + + iv_dma = dma_map_single(jrdev, iv, ivsize, DMA_BIDIRECTIONAL); + if (dma_mapping_error(jrdev, iv_dma)) { + dev_err(jrdev, "unable to map IV\n"); + caam_unmap(jrdev, req->src, req->dst, src_nents, + dst_nents, 0, 0, 0, 0); + kfree(edesc); + return ERR_PTR(-ENOMEM); + } + + dma_to_sec4_sg_one(edesc->sec4_sg, iv_dma, ivsize, 0); + } + if (dst_sg_idx) + sg_to_sec4_sg(req->src, req->cryptlen, edesc->sec4_sg + + !!ivsize, 0); + + if (req->src != req->dst && (ivsize || mapped_dst_nents > 1)) + sg_to_sec4_sg(req->dst, req->cryptlen, edesc->sec4_sg + + dst_sg_idx, 0); + + if (ivsize) + dma_to_sec4_sg_one(edesc->sec4_sg + dst_sg_idx + + mapped_dst_nents, iv_dma, ivsize, 0); + + if (ivsize || mapped_dst_nents > 1) + sg_to_sec4_set_last(edesc->sec4_sg + dst_sg_idx + + mapped_dst_nents - 1 + !!ivsize); + + if (sec4_sg_bytes) { + edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, + sec4_sg_bytes, + DMA_TO_DEVICE); + if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) { + dev_err(jrdev, "unable to map S/G table\n"); + caam_unmap(jrdev, req->src, req->dst, src_nents, + dst_nents, iv_dma, ivsize, 0, 0); + kfree(edesc); + return ERR_PTR(-ENOMEM); + } + } + + edesc->iv_dma = iv_dma; + + print_hex_dump_debug("skcipher sec4_sg@" __stringify(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, edesc->sec4_sg, + sec4_sg_bytes, 1); + + return edesc; +} + +static int skcipher_do_one_req(struct crypto_engine *engine, void *areq) +{ + struct skcipher_request *req = skcipher_request_cast(areq); + struct caam_ctx *ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req)); + struct caam_skcipher_req_ctx *rctx = skcipher_request_ctx(req); + u32 *desc = rctx->edesc->hw_desc; + int ret; + + rctx->edesc->bklog = true; + + ret = caam_jr_enqueue(ctx->jrdev, desc, skcipher_crypt_done, req); + + if (ret != -EINPROGRESS) { + skcipher_unmap(ctx->jrdev, rctx->edesc, req); + kfree(rctx->edesc); + } else { + ret = 0; + } + + return ret; +} + +static inline bool xts_skcipher_ivsize(struct skcipher_request *req) +{ + struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req); + unsigned int ivsize = crypto_skcipher_ivsize(skcipher); + + return !!get_unaligned((u64 *)(req->iv + (ivsize / 2))); +} + +static inline int skcipher_crypt(struct skcipher_request *req, bool encrypt) +{ + struct skcipher_edesc *edesc; + struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req); + struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher); + struct device *jrdev = ctx->jrdev; + struct caam_drv_private_jr *jrpriv = dev_get_drvdata(jrdev); + struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent); + u32 *desc; + int ret = 0; + + /* + * XTS is expected to return an error even for input length = 0 + * Note that the case input length < block size will be caught during + * HW offloading and return an error. + */ + if (!req->cryptlen && !ctx->fallback) + return 0; + + if (ctx->fallback && ((ctrlpriv->era <= 8 && xts_skcipher_ivsize(req)) || + ctx->xts_key_fallback)) { + struct caam_skcipher_req_ctx *rctx = skcipher_request_ctx(req); + + skcipher_request_set_tfm(&rctx->fallback_req, ctx->fallback); + skcipher_request_set_callback(&rctx->fallback_req, + req->base.flags, + req->base.complete, + req->base.data); + skcipher_request_set_crypt(&rctx->fallback_req, req->src, + req->dst, req->cryptlen, req->iv); + + return encrypt ? crypto_skcipher_encrypt(&rctx->fallback_req) : + crypto_skcipher_decrypt(&rctx->fallback_req); + } + + /* allocate extended descriptor */ + edesc = skcipher_edesc_alloc(req, DESC_JOB_IO_LEN * CAAM_CMD_SZ); + if (IS_ERR(edesc)) + return PTR_ERR(edesc); + + /* Create and submit job descriptor*/ + init_skcipher_job(req, edesc, encrypt); + + print_hex_dump_debug("skcipher jobdesc@" __stringify(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc, + desc_bytes(edesc->hw_desc), 1); + + desc = edesc->hw_desc; + /* + * Only the backlog request are sent to crypto-engine since the others + * can be handled by CAAM, if free, especially since JR has up to 1024 + * entries (more than the 10 entries from crypto-engine). + */ + if (req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG) + ret = crypto_transfer_skcipher_request_to_engine(jrpriv->engine, + req); + else + ret = caam_jr_enqueue(jrdev, desc, skcipher_crypt_done, req); + + if ((ret != -EINPROGRESS) && (ret != -EBUSY)) { + skcipher_unmap(jrdev, edesc, req); + kfree(edesc); + } + + return ret; +} + +static int skcipher_encrypt(struct skcipher_request *req) +{ + return skcipher_crypt(req, true); +} + +static int skcipher_decrypt(struct skcipher_request *req) +{ + return skcipher_crypt(req, false); +} + +static struct caam_skcipher_alg driver_algs[] = { + { + .skcipher = { + .base = { + .cra_name = "cbc(aes)", + .cra_driver_name = "cbc-aes-caam", + .cra_blocksize = AES_BLOCK_SIZE, + }, + .setkey = aes_skcipher_setkey, + .encrypt = skcipher_encrypt, + .decrypt = skcipher_decrypt, + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, + .ivsize = AES_BLOCK_SIZE, + }, + .caam.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, + }, + { + .skcipher = { + .base = { + .cra_name = "cbc(des3_ede)", + .cra_driver_name = "cbc-3des-caam", + .cra_blocksize = DES3_EDE_BLOCK_SIZE, + }, + .setkey = des3_skcipher_setkey, + .encrypt = skcipher_encrypt, + .decrypt = skcipher_decrypt, + .min_keysize = DES3_EDE_KEY_SIZE, + .max_keysize = DES3_EDE_KEY_SIZE, + .ivsize = DES3_EDE_BLOCK_SIZE, + }, + .caam.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, + }, + { + .skcipher = { + .base = { + .cra_name = "cbc(des)", + .cra_driver_name = "cbc-des-caam", + .cra_blocksize = DES_BLOCK_SIZE, + }, + .setkey = des_skcipher_setkey, + .encrypt = skcipher_encrypt, + .decrypt = skcipher_decrypt, + .min_keysize = DES_KEY_SIZE, + .max_keysize = DES_KEY_SIZE, + .ivsize = DES_BLOCK_SIZE, + }, + .caam.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, + }, + { + .skcipher = { + .base = { + .cra_name = "ctr(aes)", + .cra_driver_name = "ctr-aes-caam", + .cra_blocksize = 1, + }, + .setkey = ctr_skcipher_setkey, + .encrypt = skcipher_encrypt, + .decrypt = skcipher_decrypt, + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, + .ivsize = AES_BLOCK_SIZE, + .chunksize = AES_BLOCK_SIZE, + }, + .caam.class1_alg_type = OP_ALG_ALGSEL_AES | + OP_ALG_AAI_CTR_MOD128, + }, + { + .skcipher = { + .base = { + .cra_name = "rfc3686(ctr(aes))", + .cra_driver_name = "rfc3686-ctr-aes-caam", + .cra_blocksize = 1, + }, + .setkey = rfc3686_skcipher_setkey, + .encrypt = skcipher_encrypt, + .decrypt = skcipher_decrypt, + .min_keysize = AES_MIN_KEY_SIZE + + CTR_RFC3686_NONCE_SIZE, + .max_keysize = AES_MAX_KEY_SIZE + + CTR_RFC3686_NONCE_SIZE, + .ivsize = CTR_RFC3686_IV_SIZE, + .chunksize = AES_BLOCK_SIZE, + }, + .caam = { + .class1_alg_type = OP_ALG_ALGSEL_AES | + OP_ALG_AAI_CTR_MOD128, + .rfc3686 = true, + }, + }, + { + .skcipher = { + .base = { + .cra_name = "xts(aes)", + .cra_driver_name = "xts-aes-caam", + .cra_flags = CRYPTO_ALG_NEED_FALLBACK, + .cra_blocksize = AES_BLOCK_SIZE, + }, + .setkey = xts_skcipher_setkey, + .encrypt = skcipher_encrypt, + .decrypt = skcipher_decrypt, + .min_keysize = 2 * AES_MIN_KEY_SIZE, + .max_keysize = 2 * AES_MAX_KEY_SIZE, + .ivsize = AES_BLOCK_SIZE, + }, + .caam.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_XTS, + }, + { + .skcipher = { + .base = { + .cra_name = "ecb(des)", + .cra_driver_name = "ecb-des-caam", + .cra_blocksize = DES_BLOCK_SIZE, + }, + .setkey = des_skcipher_setkey, + .encrypt = skcipher_encrypt, + .decrypt = skcipher_decrypt, + .min_keysize = DES_KEY_SIZE, + .max_keysize = DES_KEY_SIZE, + }, + .caam.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_ECB, + }, + { + .skcipher = { + .base = { + .cra_name = "ecb(aes)", + .cra_driver_name = "ecb-aes-caam", + .cra_blocksize = AES_BLOCK_SIZE, + }, + .setkey = aes_skcipher_setkey, + .encrypt = skcipher_encrypt, + .decrypt = skcipher_decrypt, + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, + }, + .caam.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_ECB, + }, + { + .skcipher = { + .base = { + .cra_name = "ecb(des3_ede)", + .cra_driver_name = "ecb-des3-caam", + .cra_blocksize = DES3_EDE_BLOCK_SIZE, + }, + .setkey = des3_skcipher_setkey, + .encrypt = skcipher_encrypt, + .decrypt = skcipher_decrypt, + .min_keysize = DES3_EDE_KEY_SIZE, + .max_keysize = DES3_EDE_KEY_SIZE, + }, + .caam.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_ECB, + }, +}; + +static struct caam_aead_alg driver_aeads[] = { + { + .aead = { + .base = { + .cra_name = "rfc4106(gcm(aes))", + .cra_driver_name = "rfc4106-gcm-aes-caam", + .cra_blocksize = 1, + }, + .setkey = rfc4106_setkey, + .setauthsize = rfc4106_setauthsize, + .encrypt = ipsec_gcm_encrypt, + .decrypt = ipsec_gcm_decrypt, + .ivsize = GCM_RFC4106_IV_SIZE, + .maxauthsize = AES_BLOCK_SIZE, + }, + .caam = { + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM, + .nodkp = true, + }, + }, + { + .aead = { + .base = { + .cra_name = "rfc4543(gcm(aes))", + .cra_driver_name = "rfc4543-gcm-aes-caam", + .cra_blocksize = 1, + }, + .setkey = rfc4543_setkey, + .setauthsize = rfc4543_setauthsize, + .encrypt = ipsec_gcm_encrypt, + .decrypt = ipsec_gcm_decrypt, + .ivsize = GCM_RFC4543_IV_SIZE, + .maxauthsize = AES_BLOCK_SIZE, + }, + .caam = { + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM, + .nodkp = true, + }, + }, + /* Galois Counter Mode */ + { + .aead = { + .base = { + .cra_name = "gcm(aes)", + .cra_driver_name = "gcm-aes-caam", + .cra_blocksize = 1, + }, + .setkey = gcm_setkey, + .setauthsize = gcm_setauthsize, + .encrypt = gcm_encrypt, + .decrypt = gcm_decrypt, + .ivsize = GCM_AES_IV_SIZE, + .maxauthsize = AES_BLOCK_SIZE, + }, + .caam = { + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM, + .nodkp = true, + }, + }, + /* single-pass ipsec_esp descriptor */ + { + .aead = { + .base = { + .cra_name = "authenc(hmac(md5)," + "ecb(cipher_null))", + .cra_driver_name = "authenc-hmac-md5-" + "ecb-cipher_null-caam", + .cra_blocksize = NULL_BLOCK_SIZE, + }, + .setkey = aead_setkey, + .setauthsize = aead_setauthsize, + .encrypt = aead_encrypt, + .decrypt = aead_decrypt, + .ivsize = NULL_IV_SIZE, + .maxauthsize = MD5_DIGEST_SIZE, + }, + .caam = { + .class2_alg_type = OP_ALG_ALGSEL_MD5 | + OP_ALG_AAI_HMAC_PRECOMP, + }, + }, + { + .aead = { + .base = { + .cra_name = "authenc(hmac(sha1)," + "ecb(cipher_null))", + .cra_driver_name = "authenc-hmac-sha1-" + "ecb-cipher_null-caam", + .cra_blocksize = NULL_BLOCK_SIZE, + }, + .setkey = aead_setkey, + .setauthsize = aead_setauthsize, + .encrypt = aead_encrypt, + .decrypt = aead_decrypt, + .ivsize = NULL_IV_SIZE, + .maxauthsize = SHA1_DIGEST_SIZE, + }, + .caam = { + .class2_alg_type = OP_ALG_ALGSEL_SHA1 | + OP_ALG_AAI_HMAC_PRECOMP, + }, + }, + { + .aead = { + .base = { + .cra_name = "authenc(hmac(sha224)," + "ecb(cipher_null))", + .cra_driver_name = "authenc-hmac-sha224-" + "ecb-cipher_null-caam", + .cra_blocksize = NULL_BLOCK_SIZE, + }, + .setkey = aead_setkey, + .setauthsize = aead_setauthsize, + .encrypt = aead_encrypt, + .decrypt = aead_decrypt, + .ivsize = NULL_IV_SIZE, + .maxauthsize = SHA224_DIGEST_SIZE, + }, + .caam = { + .class2_alg_type = OP_ALG_ALGSEL_SHA224 | + OP_ALG_AAI_HMAC_PRECOMP, + }, + }, + { + .aead = { + .base = { + .cra_name = "authenc(hmac(sha256)," + "ecb(cipher_null))", + .cra_driver_name = "authenc-hmac-sha256-" + "ecb-cipher_null-caam", + .cra_blocksize = NULL_BLOCK_SIZE, + }, + .setkey = aead_setkey, + .setauthsize = aead_setauthsize, + .encrypt = aead_encrypt, + .decrypt = aead_decrypt, + .ivsize = NULL_IV_SIZE, + .maxauthsize = SHA256_DIGEST_SIZE, + }, + .caam = { + .class2_alg_type = OP_ALG_ALGSEL_SHA256 | + OP_ALG_AAI_HMAC_PRECOMP, + }, + }, + { + .aead = { + .base = { + .cra_name = "authenc(hmac(sha384)," + "ecb(cipher_null))", + .cra_driver_name = "authenc-hmac-sha384-" + "ecb-cipher_null-caam", + .cra_blocksize = NULL_BLOCK_SIZE, + }, + .setkey = aead_setkey, + .setauthsize = aead_setauthsize, + .encrypt = aead_encrypt, + .decrypt = aead_decrypt, + .ivsize = NULL_IV_SIZE, + .maxauthsize = SHA384_DIGEST_SIZE, + }, + .caam = { + .class2_alg_type = OP_ALG_ALGSEL_SHA384 | + OP_ALG_AAI_HMAC_PRECOMP, + }, + }, + { + .aead = { + .base = { + .cra_name = "authenc(hmac(sha512)," + "ecb(cipher_null))", + .cra_driver_name = "authenc-hmac-sha512-" + "ecb-cipher_null-caam", + .cra_blocksize = NULL_BLOCK_SIZE, + }, + .setkey = aead_setkey, + .setauthsize = aead_setauthsize, + .encrypt = aead_encrypt, + .decrypt = aead_decrypt, + .ivsize = NULL_IV_SIZE, + .maxauthsize = SHA512_DIGEST_SIZE, + }, + .caam = { + .class2_alg_type = OP_ALG_ALGSEL_SHA512 | + OP_ALG_AAI_HMAC_PRECOMP, + }, + }, + { + .aead = { + .base = { + .cra_name = "authenc(hmac(md5),cbc(aes))", + .cra_driver_name = "authenc-hmac-md5-" + "cbc-aes-caam", + .cra_blocksize = AES_BLOCK_SIZE, + }, + .setkey = aead_setkey, + .setauthsize = aead_setauthsize, + .encrypt = aead_encrypt, + .decrypt = aead_decrypt, + .ivsize = AES_BLOCK_SIZE, + .maxauthsize = MD5_DIGEST_SIZE, + }, + .caam = { + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, + .class2_alg_type = OP_ALG_ALGSEL_MD5 | + OP_ALG_AAI_HMAC_PRECOMP, + }, + }, + { + .aead = { + .base = { + .cra_name = "echainiv(authenc(hmac(md5)," + "cbc(aes)))", + .cra_driver_name = "echainiv-authenc-hmac-md5-" + "cbc-aes-caam", + .cra_blocksize = AES_BLOCK_SIZE, + }, + .setkey = aead_setkey, + .setauthsize = aead_setauthsize, + .encrypt = aead_encrypt, + .decrypt = aead_decrypt, + .ivsize = AES_BLOCK_SIZE, + .maxauthsize = MD5_DIGEST_SIZE, + }, + .caam = { + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, + .class2_alg_type = OP_ALG_ALGSEL_MD5 | + OP_ALG_AAI_HMAC_PRECOMP, + .geniv = true, + }, + }, + { + .aead = { + .base = { + .cra_name = "authenc(hmac(sha1),cbc(aes))", + .cra_driver_name = "authenc-hmac-sha1-" + "cbc-aes-caam", + .cra_blocksize = AES_BLOCK_SIZE, + }, + .setkey = aead_setkey, + .setauthsize = aead_setauthsize, + .encrypt = aead_encrypt, + .decrypt = aead_decrypt, + .ivsize = AES_BLOCK_SIZE, + .maxauthsize = SHA1_DIGEST_SIZE, + }, + .caam = { + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, + .class2_alg_type = OP_ALG_ALGSEL_SHA1 | + OP_ALG_AAI_HMAC_PRECOMP, + }, + }, + { + .aead = { + .base = { + .cra_name = "echainiv(authenc(hmac(sha1)," + "cbc(aes)))", + .cra_driver_name = "echainiv-authenc-" + "hmac-sha1-cbc-aes-caam", + .cra_blocksize = AES_BLOCK_SIZE, + }, + .setkey = aead_setkey, + .setauthsize = aead_setauthsize, + .encrypt = aead_encrypt, + .decrypt = aead_decrypt, + .ivsize = AES_BLOCK_SIZE, + .maxauthsize = SHA1_DIGEST_SIZE, + }, + .caam = { + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, + .class2_alg_type = OP_ALG_ALGSEL_SHA1 | + OP_ALG_AAI_HMAC_PRECOMP, + .geniv = true, + }, + }, + { + .aead = { + .base = { + .cra_name = "authenc(hmac(sha224),cbc(aes))", + .cra_driver_name = "authenc-hmac-sha224-" + "cbc-aes-caam", + .cra_blocksize = AES_BLOCK_SIZE, + }, + .setkey = aead_setkey, + .setauthsize = aead_setauthsize, + .encrypt = aead_encrypt, + .decrypt = aead_decrypt, + .ivsize = AES_BLOCK_SIZE, + .maxauthsize = SHA224_DIGEST_SIZE, + }, + .caam = { + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, + .class2_alg_type = OP_ALG_ALGSEL_SHA224 | + OP_ALG_AAI_HMAC_PRECOMP, + }, + }, + { + .aead = { + .base = { + .cra_name = "echainiv(authenc(hmac(sha224)," + "cbc(aes)))", + .cra_driver_name = "echainiv-authenc-" + "hmac-sha224-cbc-aes-caam", + .cra_blocksize = AES_BLOCK_SIZE, + }, + .setkey = aead_setkey, + .setauthsize = aead_setauthsize, + .encrypt = aead_encrypt, + .decrypt = aead_decrypt, + .ivsize = AES_BLOCK_SIZE, + .maxauthsize = SHA224_DIGEST_SIZE, + }, + .caam = { + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, + .class2_alg_type = OP_ALG_ALGSEL_SHA224 | + OP_ALG_AAI_HMAC_PRECOMP, + .geniv = true, + }, + }, + { + .aead = { + .base = { + .cra_name = "authenc(hmac(sha256),cbc(aes))", + .cra_driver_name = "authenc-hmac-sha256-" + "cbc-aes-caam", + .cra_blocksize = AES_BLOCK_SIZE, + }, + .setkey = aead_setkey, + .setauthsize = aead_setauthsize, + .encrypt = aead_encrypt, + .decrypt = aead_decrypt, + .ivsize = AES_BLOCK_SIZE, + .maxauthsize = SHA256_DIGEST_SIZE, + }, + .caam = { + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, + .class2_alg_type = OP_ALG_ALGSEL_SHA256 | + OP_ALG_AAI_HMAC_PRECOMP, + }, + }, + { + .aead = { + .base = { + .cra_name = "echainiv(authenc(hmac(sha256)," + "cbc(aes)))", + .cra_driver_name = "echainiv-authenc-" + "hmac-sha256-cbc-aes-caam", + .cra_blocksize = AES_BLOCK_SIZE, + }, + .setkey = aead_setkey, + .setauthsize = aead_setauthsize, + .encrypt = aead_encrypt, + .decrypt = aead_decrypt, + .ivsize = AES_BLOCK_SIZE, + .maxauthsize = SHA256_DIGEST_SIZE, + }, + .caam = { + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, + .class2_alg_type = OP_ALG_ALGSEL_SHA256 | + OP_ALG_AAI_HMAC_PRECOMP, + .geniv = true, + }, + }, + { + .aead = { + .base = { + .cra_name = "authenc(hmac(sha384),cbc(aes))", + .cra_driver_name = "authenc-hmac-sha384-" + "cbc-aes-caam", + .cra_blocksize = AES_BLOCK_SIZE, + }, + .setkey = aead_setkey, + .setauthsize = aead_setauthsize, + .encrypt = aead_encrypt, + .decrypt = aead_decrypt, + .ivsize = AES_BLOCK_SIZE, + .maxauthsize = SHA384_DIGEST_SIZE, + }, + .caam = { + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, + .class2_alg_type = OP_ALG_ALGSEL_SHA384 | + OP_ALG_AAI_HMAC_PRECOMP, + }, + }, + { + .aead = { + .base = { + .cra_name = "echainiv(authenc(hmac(sha384)," + "cbc(aes)))", + .cra_driver_name = "echainiv-authenc-" + "hmac-sha384-cbc-aes-caam", + .cra_blocksize = AES_BLOCK_SIZE, + }, + .setkey = aead_setkey, + .setauthsize = aead_setauthsize, + .encrypt = aead_encrypt, + .decrypt = aead_decrypt, + .ivsize = AES_BLOCK_SIZE, + .maxauthsize = SHA384_DIGEST_SIZE, + }, + .caam = { + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, + .class2_alg_type = OP_ALG_ALGSEL_SHA384 | + OP_ALG_AAI_HMAC_PRECOMP, + .geniv = true, + }, + }, + { + .aead = { + .base = { + .cra_name = "authenc(hmac(sha512),cbc(aes))", + .cra_driver_name = "authenc-hmac-sha512-" + "cbc-aes-caam", + .cra_blocksize = AES_BLOCK_SIZE, + }, + .setkey = aead_setkey, + .setauthsize = aead_setauthsize, + .encrypt = aead_encrypt, + .decrypt = aead_decrypt, + .ivsize = AES_BLOCK_SIZE, + .maxauthsize = SHA512_DIGEST_SIZE, + }, + .caam = { + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, + .class2_alg_type = OP_ALG_ALGSEL_SHA512 | + OP_ALG_AAI_HMAC_PRECOMP, + }, + }, + { + .aead = { + .base = { + .cra_name = "echainiv(authenc(hmac(sha512)," + "cbc(aes)))", + .cra_driver_name = "echainiv-authenc-" + "hmac-sha512-cbc-aes-caam", + .cra_blocksize = AES_BLOCK_SIZE, + }, + .setkey = aead_setkey, + .setauthsize = aead_setauthsize, + .encrypt = aead_encrypt, + .decrypt = aead_decrypt, + .ivsize = AES_BLOCK_SIZE, + .maxauthsize = SHA512_DIGEST_SIZE, + }, + .caam = { + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, + .class2_alg_type = OP_ALG_ALGSEL_SHA512 | + OP_ALG_AAI_HMAC_PRECOMP, + .geniv = true, + }, + }, + { + .aead = { + .base = { + .cra_name = "authenc(hmac(md5),cbc(des3_ede))", + .cra_driver_name = "authenc-hmac-md5-" + "cbc-des3_ede-caam", + .cra_blocksize = DES3_EDE_BLOCK_SIZE, + }, + .setkey = des3_aead_setkey, + .setauthsize = aead_setauthsize, + .encrypt = aead_encrypt, + .decrypt = aead_decrypt, + .ivsize = DES3_EDE_BLOCK_SIZE, + .maxauthsize = MD5_DIGEST_SIZE, + }, + .caam = { + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, + .class2_alg_type = OP_ALG_ALGSEL_MD5 | + OP_ALG_AAI_HMAC_PRECOMP, + } + }, + { + .aead = { + .base = { + .cra_name = "echainiv(authenc(hmac(md5)," + "cbc(des3_ede)))", + .cra_driver_name = "echainiv-authenc-hmac-md5-" + "cbc-des3_ede-caam", + .cra_blocksize = DES3_EDE_BLOCK_SIZE, + }, + .setkey = des3_aead_setkey, + .setauthsize = aead_setauthsize, + .encrypt = aead_encrypt, + .decrypt = aead_decrypt, + .ivsize = DES3_EDE_BLOCK_SIZE, + .maxauthsize = MD5_DIGEST_SIZE, + }, + .caam = { + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, + .class2_alg_type = OP_ALG_ALGSEL_MD5 | + OP_ALG_AAI_HMAC_PRECOMP, + .geniv = true, + } + }, + { + .aead = { + .base = { + .cra_name = "authenc(hmac(sha1)," + "cbc(des3_ede))", + .cra_driver_name = "authenc-hmac-sha1-" + "cbc-des3_ede-caam", + .cra_blocksize = DES3_EDE_BLOCK_SIZE, + }, + .setkey = des3_aead_setkey, + .setauthsize = aead_setauthsize, + .encrypt = aead_encrypt, + .decrypt = aead_decrypt, + .ivsize = DES3_EDE_BLOCK_SIZE, + .maxauthsize = SHA1_DIGEST_SIZE, + }, + .caam = { + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, + .class2_alg_type = OP_ALG_ALGSEL_SHA1 | + OP_ALG_AAI_HMAC_PRECOMP, + }, + }, + { + .aead = { + .base = { + .cra_name = "echainiv(authenc(hmac(sha1)," + "cbc(des3_ede)))", + .cra_driver_name = "echainiv-authenc-" + "hmac-sha1-" + "cbc-des3_ede-caam", + .cra_blocksize = DES3_EDE_BLOCK_SIZE, + }, + .setkey = des3_aead_setkey, + .setauthsize = aead_setauthsize, + .encrypt = aead_encrypt, + .decrypt = aead_decrypt, + .ivsize = DES3_EDE_BLOCK_SIZE, + .maxauthsize = SHA1_DIGEST_SIZE, + }, + .caam = { + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, + .class2_alg_type = OP_ALG_ALGSEL_SHA1 | + OP_ALG_AAI_HMAC_PRECOMP, + .geniv = true, + }, + }, + { + .aead = { + .base = { + .cra_name = "authenc(hmac(sha224)," + "cbc(des3_ede))", + .cra_driver_name = "authenc-hmac-sha224-" + "cbc-des3_ede-caam", + .cra_blocksize = DES3_EDE_BLOCK_SIZE, + }, + .setkey = des3_aead_setkey, + .setauthsize = aead_setauthsize, + .encrypt = aead_encrypt, + .decrypt = aead_decrypt, + .ivsize = DES3_EDE_BLOCK_SIZE, + .maxauthsize = SHA224_DIGEST_SIZE, + }, + .caam = { + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, + .class2_alg_type = OP_ALG_ALGSEL_SHA224 | + OP_ALG_AAI_HMAC_PRECOMP, + }, + }, + { + .aead = { + .base = { + .cra_name = "echainiv(authenc(hmac(sha224)," + "cbc(des3_ede)))", + .cra_driver_name = "echainiv-authenc-" + "hmac-sha224-" + "cbc-des3_ede-caam", + .cra_blocksize = DES3_EDE_BLOCK_SIZE, + }, + .setkey = des3_aead_setkey, + .setauthsize = aead_setauthsize, + .encrypt = aead_encrypt, + .decrypt = aead_decrypt, + .ivsize = DES3_EDE_BLOCK_SIZE, + .maxauthsize = SHA224_DIGEST_SIZE, + }, + .caam = { + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, + .class2_alg_type = OP_ALG_ALGSEL_SHA224 | + OP_ALG_AAI_HMAC_PRECOMP, + .geniv = true, + }, + }, + { + .aead = { + .base = { + .cra_name = "authenc(hmac(sha256)," + "cbc(des3_ede))", + .cra_driver_name = "authenc-hmac-sha256-" + "cbc-des3_ede-caam", + .cra_blocksize = DES3_EDE_BLOCK_SIZE, + }, + .setkey = des3_aead_setkey, + .setauthsize = aead_setauthsize, + .encrypt = aead_encrypt, + .decrypt = aead_decrypt, + .ivsize = DES3_EDE_BLOCK_SIZE, + .maxauthsize = SHA256_DIGEST_SIZE, + }, + .caam = { + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, + .class2_alg_type = OP_ALG_ALGSEL_SHA256 | + OP_ALG_AAI_HMAC_PRECOMP, + }, + }, + { + .aead = { + .base = { + .cra_name = "echainiv(authenc(hmac(sha256)," + "cbc(des3_ede)))", + .cra_driver_name = "echainiv-authenc-" + "hmac-sha256-" + "cbc-des3_ede-caam", + .cra_blocksize = DES3_EDE_BLOCK_SIZE, + }, + .setkey = des3_aead_setkey, + .setauthsize = aead_setauthsize, + .encrypt = aead_encrypt, + .decrypt = aead_decrypt, + .ivsize = DES3_EDE_BLOCK_SIZE, + .maxauthsize = SHA256_DIGEST_SIZE, + }, + .caam = { + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, + .class2_alg_type = OP_ALG_ALGSEL_SHA256 | + OP_ALG_AAI_HMAC_PRECOMP, + .geniv = true, + }, + }, + { + .aead = { + .base = { + .cra_name = "authenc(hmac(sha384)," + "cbc(des3_ede))", + .cra_driver_name = "authenc-hmac-sha384-" + "cbc-des3_ede-caam", + .cra_blocksize = DES3_EDE_BLOCK_SIZE, + }, + .setkey = des3_aead_setkey, + .setauthsize = aead_setauthsize, + .encrypt = aead_encrypt, + .decrypt = aead_decrypt, + .ivsize = DES3_EDE_BLOCK_SIZE, + .maxauthsize = SHA384_DIGEST_SIZE, + }, + .caam = { + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, + .class2_alg_type = OP_ALG_ALGSEL_SHA384 | + OP_ALG_AAI_HMAC_PRECOMP, + }, + }, + { + .aead = { + .base = { + .cra_name = "echainiv(authenc(hmac(sha384)," + "cbc(des3_ede)))", + .cra_driver_name = "echainiv-authenc-" + "hmac-sha384-" + "cbc-des3_ede-caam", + .cra_blocksize = DES3_EDE_BLOCK_SIZE, + }, + .setkey = des3_aead_setkey, + .setauthsize = aead_setauthsize, + .encrypt = aead_encrypt, + .decrypt = aead_decrypt, + .ivsize = DES3_EDE_BLOCK_SIZE, + .maxauthsize = SHA384_DIGEST_SIZE, + }, + .caam = { + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, + .class2_alg_type = OP_ALG_ALGSEL_SHA384 | + OP_ALG_AAI_HMAC_PRECOMP, + .geniv = true, + }, + }, + { + .aead = { + .base = { + .cra_name = "authenc(hmac(sha512)," + "cbc(des3_ede))", + .cra_driver_name = "authenc-hmac-sha512-" + "cbc-des3_ede-caam", + .cra_blocksize = DES3_EDE_BLOCK_SIZE, + }, + .setkey = des3_aead_setkey, + .setauthsize = aead_setauthsize, + .encrypt = aead_encrypt, + .decrypt = aead_decrypt, + .ivsize = DES3_EDE_BLOCK_SIZE, + .maxauthsize = SHA512_DIGEST_SIZE, + }, + .caam = { + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, + .class2_alg_type = OP_ALG_ALGSEL_SHA512 | + OP_ALG_AAI_HMAC_PRECOMP, + }, + }, + { + .aead = { + .base = { + .cra_name = "echainiv(authenc(hmac(sha512)," + "cbc(des3_ede)))", + .cra_driver_name = "echainiv-authenc-" + "hmac-sha512-" + "cbc-des3_ede-caam", + .cra_blocksize = DES3_EDE_BLOCK_SIZE, + }, + .setkey = des3_aead_setkey, + .setauthsize = aead_setauthsize, + .encrypt = aead_encrypt, + .decrypt = aead_decrypt, + .ivsize = DES3_EDE_BLOCK_SIZE, + .maxauthsize = SHA512_DIGEST_SIZE, + }, + .caam = { + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, + .class2_alg_type = OP_ALG_ALGSEL_SHA512 | + OP_ALG_AAI_HMAC_PRECOMP, + .geniv = true, + }, + }, + { + .aead = { + .base = { + .cra_name = "authenc(hmac(md5),cbc(des))", + .cra_driver_name = "authenc-hmac-md5-" + "cbc-des-caam", + .cra_blocksize = DES_BLOCK_SIZE, + }, + .setkey = aead_setkey, + .setauthsize = aead_setauthsize, + .encrypt = aead_encrypt, + .decrypt = aead_decrypt, + .ivsize = DES_BLOCK_SIZE, + .maxauthsize = MD5_DIGEST_SIZE, + }, + .caam = { + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, + .class2_alg_type = OP_ALG_ALGSEL_MD5 | + OP_ALG_AAI_HMAC_PRECOMP, + }, + }, + { + .aead = { + .base = { + .cra_name = "echainiv(authenc(hmac(md5)," + "cbc(des)))", + .cra_driver_name = "echainiv-authenc-hmac-md5-" + "cbc-des-caam", + .cra_blocksize = DES_BLOCK_SIZE, + }, + .setkey = aead_setkey, + .setauthsize = aead_setauthsize, + .encrypt = aead_encrypt, + .decrypt = aead_decrypt, + .ivsize = DES_BLOCK_SIZE, + .maxauthsize = MD5_DIGEST_SIZE, + }, + .caam = { + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, + .class2_alg_type = OP_ALG_ALGSEL_MD5 | + OP_ALG_AAI_HMAC_PRECOMP, + .geniv = true, + }, + }, + { + .aead = { + .base = { + .cra_name = "authenc(hmac(sha1),cbc(des))", + .cra_driver_name = "authenc-hmac-sha1-" + "cbc-des-caam", + .cra_blocksize = DES_BLOCK_SIZE, + }, + .setkey = aead_setkey, + .setauthsize = aead_setauthsize, + .encrypt = aead_encrypt, + .decrypt = aead_decrypt, + .ivsize = DES_BLOCK_SIZE, + .maxauthsize = SHA1_DIGEST_SIZE, + }, + .caam = { + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, + .class2_alg_type = OP_ALG_ALGSEL_SHA1 | + OP_ALG_AAI_HMAC_PRECOMP, + }, + }, + { + .aead = { + .base = { + .cra_name = "echainiv(authenc(hmac(sha1)," + "cbc(des)))", + .cra_driver_name = "echainiv-authenc-" + "hmac-sha1-cbc-des-caam", + .cra_blocksize = DES_BLOCK_SIZE, + }, + .setkey = aead_setkey, + .setauthsize = aead_setauthsize, + .encrypt = aead_encrypt, + .decrypt = aead_decrypt, + .ivsize = DES_BLOCK_SIZE, + .maxauthsize = SHA1_DIGEST_SIZE, + }, + .caam = { + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, + .class2_alg_type = OP_ALG_ALGSEL_SHA1 | + OP_ALG_AAI_HMAC_PRECOMP, + .geniv = true, + }, + }, + { + .aead = { + .base = { + .cra_name = "authenc(hmac(sha224),cbc(des))", + .cra_driver_name = "authenc-hmac-sha224-" + "cbc-des-caam", + .cra_blocksize = DES_BLOCK_SIZE, + }, + .setkey = aead_setkey, + .setauthsize = aead_setauthsize, + .encrypt = aead_encrypt, + .decrypt = aead_decrypt, + .ivsize = DES_BLOCK_SIZE, + .maxauthsize = SHA224_DIGEST_SIZE, + }, + .caam = { + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, + .class2_alg_type = OP_ALG_ALGSEL_SHA224 | + OP_ALG_AAI_HMAC_PRECOMP, + }, + }, + { + .aead = { + .base = { + .cra_name = "echainiv(authenc(hmac(sha224)," + "cbc(des)))", + .cra_driver_name = "echainiv-authenc-" + "hmac-sha224-cbc-des-caam", + .cra_blocksize = DES_BLOCK_SIZE, + }, + .setkey = aead_setkey, + .setauthsize = aead_setauthsize, + .encrypt = aead_encrypt, + .decrypt = aead_decrypt, + .ivsize = DES_BLOCK_SIZE, + .maxauthsize = SHA224_DIGEST_SIZE, + }, + .caam = { + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, + .class2_alg_type = OP_ALG_ALGSEL_SHA224 | + OP_ALG_AAI_HMAC_PRECOMP, + .geniv = true, + }, + }, + { + .aead = { + .base = { + .cra_name = "authenc(hmac(sha256),cbc(des))", + .cra_driver_name = "authenc-hmac-sha256-" + "cbc-des-caam", + .cra_blocksize = DES_BLOCK_SIZE, + }, + .setkey = aead_setkey, + .setauthsize = aead_setauthsize, + .encrypt = aead_encrypt, + .decrypt = aead_decrypt, + .ivsize = DES_BLOCK_SIZE, + .maxauthsize = SHA256_DIGEST_SIZE, + }, + .caam = { + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, + .class2_alg_type = OP_ALG_ALGSEL_SHA256 | + OP_ALG_AAI_HMAC_PRECOMP, + }, + }, + { + .aead = { + .base = { + .cra_name = "echainiv(authenc(hmac(sha256)," + "cbc(des)))", + .cra_driver_name = "echainiv-authenc-" + "hmac-sha256-cbc-des-caam", + .cra_blocksize = DES_BLOCK_SIZE, + }, + .setkey = aead_setkey, + .setauthsize = aead_setauthsize, + .encrypt = aead_encrypt, + .decrypt = aead_decrypt, + .ivsize = DES_BLOCK_SIZE, + .maxauthsize = SHA256_DIGEST_SIZE, + }, + .caam = { + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, + .class2_alg_type = OP_ALG_ALGSEL_SHA256 | + OP_ALG_AAI_HMAC_PRECOMP, + .geniv = true, + }, + }, + { + .aead = { + .base = { + .cra_name = "authenc(hmac(sha384),cbc(des))", + .cra_driver_name = "authenc-hmac-sha384-" + "cbc-des-caam", + .cra_blocksize = DES_BLOCK_SIZE, + }, + .setkey = aead_setkey, + .setauthsize = aead_setauthsize, + .encrypt = aead_encrypt, + .decrypt = aead_decrypt, + .ivsize = DES_BLOCK_SIZE, + .maxauthsize = SHA384_DIGEST_SIZE, + }, + .caam = { + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, + .class2_alg_type = OP_ALG_ALGSEL_SHA384 | + OP_ALG_AAI_HMAC_PRECOMP, + }, + }, + { + .aead = { + .base = { + .cra_name = "echainiv(authenc(hmac(sha384)," + "cbc(des)))", + .cra_driver_name = "echainiv-authenc-" + "hmac-sha384-cbc-des-caam", + .cra_blocksize = DES_BLOCK_SIZE, + }, + .setkey = aead_setkey, + .setauthsize = aead_setauthsize, + .encrypt = aead_encrypt, + .decrypt = aead_decrypt, + .ivsize = DES_BLOCK_SIZE, + .maxauthsize = SHA384_DIGEST_SIZE, + }, + .caam = { + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, + .class2_alg_type = OP_ALG_ALGSEL_SHA384 | + OP_ALG_AAI_HMAC_PRECOMP, + .geniv = true, + }, + }, + { + .aead = { + .base = { + .cra_name = "authenc(hmac(sha512),cbc(des))", + .cra_driver_name = "authenc-hmac-sha512-" + "cbc-des-caam", + .cra_blocksize = DES_BLOCK_SIZE, + }, + .setkey = aead_setkey, + .setauthsize = aead_setauthsize, + .encrypt = aead_encrypt, + .decrypt = aead_decrypt, + .ivsize = DES_BLOCK_SIZE, + .maxauthsize = SHA512_DIGEST_SIZE, + }, + .caam = { + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, + .class2_alg_type = OP_ALG_ALGSEL_SHA512 | + OP_ALG_AAI_HMAC_PRECOMP, + }, + }, + { + .aead = { + .base = { + .cra_name = "echainiv(authenc(hmac(sha512)," + "cbc(des)))", + .cra_driver_name = "echainiv-authenc-" + "hmac-sha512-cbc-des-caam", + .cra_blocksize = DES_BLOCK_SIZE, + }, + .setkey = aead_setkey, + .setauthsize = aead_setauthsize, + .encrypt = aead_encrypt, + .decrypt = aead_decrypt, + .ivsize = DES_BLOCK_SIZE, + .maxauthsize = SHA512_DIGEST_SIZE, + }, + .caam = { + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, + .class2_alg_type = OP_ALG_ALGSEL_SHA512 | + OP_ALG_AAI_HMAC_PRECOMP, + .geniv = true, + }, + }, + { + .aead = { + .base = { + .cra_name = "authenc(hmac(md5)," + "rfc3686(ctr(aes)))", + .cra_driver_name = "authenc-hmac-md5-" + "rfc3686-ctr-aes-caam", + .cra_blocksize = 1, + }, + .setkey = aead_setkey, + .setauthsize = aead_setauthsize, + .encrypt = aead_encrypt, + .decrypt = aead_decrypt, + .ivsize = CTR_RFC3686_IV_SIZE, + .maxauthsize = MD5_DIGEST_SIZE, + }, + .caam = { + .class1_alg_type = OP_ALG_ALGSEL_AES | + OP_ALG_AAI_CTR_MOD128, + .class2_alg_type = OP_ALG_ALGSEL_MD5 | + OP_ALG_AAI_HMAC_PRECOMP, + .rfc3686 = true, + }, + }, + { + .aead = { + .base = { + .cra_name = "seqiv(authenc(" + "hmac(md5),rfc3686(ctr(aes))))", + .cra_driver_name = "seqiv-authenc-hmac-md5-" + "rfc3686-ctr-aes-caam", + .cra_blocksize = 1, + }, + .setkey = aead_setkey, + .setauthsize = aead_setauthsize, + .encrypt = aead_encrypt, + .decrypt = aead_decrypt, + .ivsize = CTR_RFC3686_IV_SIZE, + .maxauthsize = MD5_DIGEST_SIZE, + }, + .caam = { + .class1_alg_type = OP_ALG_ALGSEL_AES | + OP_ALG_AAI_CTR_MOD128, + .class2_alg_type = OP_ALG_ALGSEL_MD5 | + OP_ALG_AAI_HMAC_PRECOMP, + .rfc3686 = true, + .geniv = true, + }, + }, + { + .aead = { + .base = { + .cra_name = "authenc(hmac(sha1)," + "rfc3686(ctr(aes)))", + .cra_driver_name = "authenc-hmac-sha1-" + "rfc3686-ctr-aes-caam", + .cra_blocksize = 1, + }, + .setkey = aead_setkey, + .setauthsize = aead_setauthsize, + .encrypt = aead_encrypt, + .decrypt = aead_decrypt, + .ivsize = CTR_RFC3686_IV_SIZE, + .maxauthsize = SHA1_DIGEST_SIZE, + }, + .caam = { + .class1_alg_type = OP_ALG_ALGSEL_AES | + OP_ALG_AAI_CTR_MOD128, + .class2_alg_type = OP_ALG_ALGSEL_SHA1 | + OP_ALG_AAI_HMAC_PRECOMP, + .rfc3686 = true, + }, + }, + { + .aead = { + .base = { + .cra_name = "seqiv(authenc(" + "hmac(sha1),rfc3686(ctr(aes))))", + .cra_driver_name = "seqiv-authenc-hmac-sha1-" + "rfc3686-ctr-aes-caam", + .cra_blocksize = 1, + }, + .setkey = aead_setkey, + .setauthsize = aead_setauthsize, + .encrypt = aead_encrypt, + .decrypt = aead_decrypt, + .ivsize = CTR_RFC3686_IV_SIZE, + .maxauthsize = SHA1_DIGEST_SIZE, + }, + .caam = { + .class1_alg_type = OP_ALG_ALGSEL_AES | + OP_ALG_AAI_CTR_MOD128, + .class2_alg_type = OP_ALG_ALGSEL_SHA1 | + OP_ALG_AAI_HMAC_PRECOMP, + .rfc3686 = true, + .geniv = true, + }, + }, + { + .aead = { + .base = { + .cra_name = "authenc(hmac(sha224)," + "rfc3686(ctr(aes)))", + .cra_driver_name = "authenc-hmac-sha224-" + "rfc3686-ctr-aes-caam", + .cra_blocksize = 1, + }, + .setkey = aead_setkey, + .setauthsize = aead_setauthsize, + .encrypt = aead_encrypt, + .decrypt = aead_decrypt, + .ivsize = CTR_RFC3686_IV_SIZE, + .maxauthsize = SHA224_DIGEST_SIZE, + }, + .caam = { + .class1_alg_type = OP_ALG_ALGSEL_AES | + OP_ALG_AAI_CTR_MOD128, + .class2_alg_type = OP_ALG_ALGSEL_SHA224 | + OP_ALG_AAI_HMAC_PRECOMP, + .rfc3686 = true, + }, + }, + { + .aead = { + .base = { + .cra_name = "seqiv(authenc(" + "hmac(sha224),rfc3686(ctr(aes))))", + .cra_driver_name = "seqiv-authenc-hmac-sha224-" + "rfc3686-ctr-aes-caam", + .cra_blocksize = 1, + }, + .setkey = aead_setkey, + .setauthsize = aead_setauthsize, + .encrypt = aead_encrypt, + .decrypt = aead_decrypt, + .ivsize = CTR_RFC3686_IV_SIZE, + .maxauthsize = SHA224_DIGEST_SIZE, + }, + .caam = { + .class1_alg_type = OP_ALG_ALGSEL_AES | + OP_ALG_AAI_CTR_MOD128, + .class2_alg_type = OP_ALG_ALGSEL_SHA224 | + OP_ALG_AAI_HMAC_PRECOMP, + .rfc3686 = true, + .geniv = true, + }, + }, + { + .aead = { + .base = { + .cra_name = "authenc(hmac(sha256)," + "rfc3686(ctr(aes)))", + .cra_driver_name = "authenc-hmac-sha256-" + "rfc3686-ctr-aes-caam", + .cra_blocksize = 1, + }, + .setkey = aead_setkey, + .setauthsize = aead_setauthsize, + .encrypt = aead_encrypt, + .decrypt = aead_decrypt, + .ivsize = CTR_RFC3686_IV_SIZE, + .maxauthsize = SHA256_DIGEST_SIZE, + }, + .caam = { + .class1_alg_type = OP_ALG_ALGSEL_AES | + OP_ALG_AAI_CTR_MOD128, + .class2_alg_type = OP_ALG_ALGSEL_SHA256 | + OP_ALG_AAI_HMAC_PRECOMP, + .rfc3686 = true, + }, + }, + { + .aead = { + .base = { + .cra_name = "seqiv(authenc(hmac(sha256)," + "rfc3686(ctr(aes))))", + .cra_driver_name = "seqiv-authenc-hmac-sha256-" + "rfc3686-ctr-aes-caam", + .cra_blocksize = 1, + }, + .setkey = aead_setkey, + .setauthsize = aead_setauthsize, + .encrypt = aead_encrypt, + .decrypt = aead_decrypt, + .ivsize = CTR_RFC3686_IV_SIZE, + .maxauthsize = SHA256_DIGEST_SIZE, + }, + .caam = { + .class1_alg_type = OP_ALG_ALGSEL_AES | + OP_ALG_AAI_CTR_MOD128, + .class2_alg_type = OP_ALG_ALGSEL_SHA256 | + OP_ALG_AAI_HMAC_PRECOMP, + .rfc3686 = true, + .geniv = true, + }, + }, + { + .aead = { + .base = { + .cra_name = "authenc(hmac(sha384)," + "rfc3686(ctr(aes)))", + .cra_driver_name = "authenc-hmac-sha384-" + "rfc3686-ctr-aes-caam", + .cra_blocksize = 1, + }, + .setkey = aead_setkey, + .setauthsize = aead_setauthsize, + .encrypt = aead_encrypt, + .decrypt = aead_decrypt, + .ivsize = CTR_RFC3686_IV_SIZE, + .maxauthsize = SHA384_DIGEST_SIZE, + }, + .caam = { + .class1_alg_type = OP_ALG_ALGSEL_AES | + OP_ALG_AAI_CTR_MOD128, + .class2_alg_type = OP_ALG_ALGSEL_SHA384 | + OP_ALG_AAI_HMAC_PRECOMP, + .rfc3686 = true, + }, + }, + { + .aead = { + .base = { + .cra_name = "seqiv(authenc(hmac(sha384)," + "rfc3686(ctr(aes))))", + .cra_driver_name = "seqiv-authenc-hmac-sha384-" + "rfc3686-ctr-aes-caam", + .cra_blocksize = 1, + }, + .setkey = aead_setkey, + .setauthsize = aead_setauthsize, + .encrypt = aead_encrypt, + .decrypt = aead_decrypt, + .ivsize = CTR_RFC3686_IV_SIZE, + .maxauthsize = SHA384_DIGEST_SIZE, + }, + .caam = { + .class1_alg_type = OP_ALG_ALGSEL_AES | + OP_ALG_AAI_CTR_MOD128, + .class2_alg_type = OP_ALG_ALGSEL_SHA384 | + OP_ALG_AAI_HMAC_PRECOMP, + .rfc3686 = true, + .geniv = true, + }, + }, + { + .aead = { + .base = { + .cra_name = "authenc(hmac(sha512)," + "rfc3686(ctr(aes)))", + .cra_driver_name = "authenc-hmac-sha512-" + "rfc3686-ctr-aes-caam", + .cra_blocksize = 1, + }, + .setkey = aead_setkey, + .setauthsize = aead_setauthsize, + .encrypt = aead_encrypt, + .decrypt = aead_decrypt, + .ivsize = CTR_RFC3686_IV_SIZE, + .maxauthsize = SHA512_DIGEST_SIZE, + }, + .caam = { + .class1_alg_type = OP_ALG_ALGSEL_AES | + OP_ALG_AAI_CTR_MOD128, + .class2_alg_type = OP_ALG_ALGSEL_SHA512 | + OP_ALG_AAI_HMAC_PRECOMP, + .rfc3686 = true, + }, + }, + { + .aead = { + .base = { + .cra_name = "seqiv(authenc(hmac(sha512)," + "rfc3686(ctr(aes))))", + .cra_driver_name = "seqiv-authenc-hmac-sha512-" + "rfc3686-ctr-aes-caam", + .cra_blocksize = 1, + }, + .setkey = aead_setkey, + .setauthsize = aead_setauthsize, + .encrypt = aead_encrypt, + .decrypt = aead_decrypt, + .ivsize = CTR_RFC3686_IV_SIZE, + .maxauthsize = SHA512_DIGEST_SIZE, + }, + .caam = { + .class1_alg_type = OP_ALG_ALGSEL_AES | + OP_ALG_AAI_CTR_MOD128, + .class2_alg_type = OP_ALG_ALGSEL_SHA512 | + OP_ALG_AAI_HMAC_PRECOMP, + .rfc3686 = true, + .geniv = true, + }, + }, + { + .aead = { + .base = { + .cra_name = "rfc7539(chacha20,poly1305)", + .cra_driver_name = "rfc7539-chacha20-poly1305-" + "caam", + .cra_blocksize = 1, + }, + .setkey = chachapoly_setkey, + .setauthsize = chachapoly_setauthsize, + .encrypt = chachapoly_encrypt, + .decrypt = chachapoly_decrypt, + .ivsize = CHACHAPOLY_IV_SIZE, + .maxauthsize = POLY1305_DIGEST_SIZE, + }, + .caam = { + .class1_alg_type = OP_ALG_ALGSEL_CHACHA20 | + OP_ALG_AAI_AEAD, + .class2_alg_type = OP_ALG_ALGSEL_POLY1305 | + OP_ALG_AAI_AEAD, + .nodkp = true, + }, + }, + { + .aead = { + .base = { + .cra_name = "rfc7539esp(chacha20,poly1305)", + .cra_driver_name = "rfc7539esp-chacha20-" + "poly1305-caam", + .cra_blocksize = 1, + }, + .setkey = chachapoly_setkey, + .setauthsize = chachapoly_setauthsize, + .encrypt = chachapoly_encrypt, + .decrypt = chachapoly_decrypt, + .ivsize = 8, + .maxauthsize = POLY1305_DIGEST_SIZE, + }, + .caam = { + .class1_alg_type = OP_ALG_ALGSEL_CHACHA20 | + OP_ALG_AAI_AEAD, + .class2_alg_type = OP_ALG_ALGSEL_POLY1305 | + OP_ALG_AAI_AEAD, + .nodkp = true, + }, + }, +}; + +static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam, + bool uses_dkp) +{ + dma_addr_t dma_addr; + struct caam_drv_private *priv; + const size_t sh_desc_enc_offset = offsetof(struct caam_ctx, + sh_desc_enc); + + ctx->jrdev = caam_jr_alloc(); + if (IS_ERR(ctx->jrdev)) { + pr_err("Job Ring Device allocation for transform failed\n"); + return PTR_ERR(ctx->jrdev); + } + + priv = dev_get_drvdata(ctx->jrdev->parent); + if (priv->era >= 6 && uses_dkp) + ctx->dir = DMA_BIDIRECTIONAL; + else + ctx->dir = DMA_TO_DEVICE; + + dma_addr = dma_map_single_attrs(ctx->jrdev, ctx->sh_desc_enc, + offsetof(struct caam_ctx, + sh_desc_enc_dma) - + sh_desc_enc_offset, + ctx->dir, DMA_ATTR_SKIP_CPU_SYNC); + if (dma_mapping_error(ctx->jrdev, dma_addr)) { + dev_err(ctx->jrdev, "unable to map key, shared descriptors\n"); + caam_jr_free(ctx->jrdev); + return -ENOMEM; + } + + ctx->sh_desc_enc_dma = dma_addr; + ctx->sh_desc_dec_dma = dma_addr + offsetof(struct caam_ctx, + sh_desc_dec) - + sh_desc_enc_offset; + ctx->key_dma = dma_addr + offsetof(struct caam_ctx, key) - + sh_desc_enc_offset; + + /* copy descriptor header template value */ + ctx->cdata.algtype = OP_TYPE_CLASS1_ALG | caam->class1_alg_type; + ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam->class2_alg_type; + + return 0; +} + +static int caam_cra_init(struct crypto_skcipher *tfm) +{ + struct skcipher_alg *alg = crypto_skcipher_alg(tfm); + struct caam_skcipher_alg *caam_alg = + container_of(alg, typeof(*caam_alg), skcipher); + struct caam_ctx *ctx = crypto_skcipher_ctx(tfm); + u32 alg_aai = caam_alg->caam.class1_alg_type & OP_ALG_AAI_MASK; + int ret = 0; + + ctx->enginectx.op.do_one_request = skcipher_do_one_req; + + if (alg_aai == OP_ALG_AAI_XTS) { + const char *tfm_name = crypto_tfm_alg_name(&tfm->base); + struct crypto_skcipher *fallback; + + fallback = crypto_alloc_skcipher(tfm_name, 0, + CRYPTO_ALG_NEED_FALLBACK); + if (IS_ERR(fallback)) { + pr_err("Failed to allocate %s fallback: %ld\n", + tfm_name, PTR_ERR(fallback)); + return PTR_ERR(fallback); + } + + ctx->fallback = fallback; + crypto_skcipher_set_reqsize(tfm, sizeof(struct caam_skcipher_req_ctx) + + crypto_skcipher_reqsize(fallback)); + } else { + crypto_skcipher_set_reqsize(tfm, sizeof(struct caam_skcipher_req_ctx)); + } + + ret = caam_init_common(ctx, &caam_alg->caam, false); + if (ret && ctx->fallback) + crypto_free_skcipher(ctx->fallback); + + return ret; +} + +static int caam_aead_init(struct crypto_aead *tfm) +{ + struct aead_alg *alg = crypto_aead_alg(tfm); + struct caam_aead_alg *caam_alg = + container_of(alg, struct caam_aead_alg, aead); + struct caam_ctx *ctx = crypto_aead_ctx(tfm); + + crypto_aead_set_reqsize(tfm, sizeof(struct caam_aead_req_ctx)); + + ctx->enginectx.op.do_one_request = aead_do_one_req; + + return caam_init_common(ctx, &caam_alg->caam, !caam_alg->caam.nodkp); +} + +static void caam_exit_common(struct caam_ctx *ctx) +{ + dma_unmap_single_attrs(ctx->jrdev, ctx->sh_desc_enc_dma, + offsetof(struct caam_ctx, sh_desc_enc_dma) - + offsetof(struct caam_ctx, sh_desc_enc), + ctx->dir, DMA_ATTR_SKIP_CPU_SYNC); + caam_jr_free(ctx->jrdev); +} + +static void caam_cra_exit(struct crypto_skcipher *tfm) +{ + struct caam_ctx *ctx = crypto_skcipher_ctx(tfm); + + if (ctx->fallback) + crypto_free_skcipher(ctx->fallback); + caam_exit_common(ctx); +} + +static void caam_aead_exit(struct crypto_aead *tfm) +{ + caam_exit_common(crypto_aead_ctx(tfm)); +} + +void caam_algapi_exit(void) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) { + struct caam_aead_alg *t_alg = driver_aeads + i; + + if (t_alg->registered) + crypto_unregister_aead(&t_alg->aead); + } + + for (i = 0; i < ARRAY_SIZE(driver_algs); i++) { + struct caam_skcipher_alg *t_alg = driver_algs + i; + + if (t_alg->registered) + crypto_unregister_skcipher(&t_alg->skcipher); + } +} + +static void caam_skcipher_alg_init(struct caam_skcipher_alg *t_alg) +{ + struct skcipher_alg *alg = &t_alg->skcipher; + + alg->base.cra_module = THIS_MODULE; + alg->base.cra_priority = CAAM_CRA_PRIORITY; + alg->base.cra_ctxsize = sizeof(struct caam_ctx); + alg->base.cra_flags |= (CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY | + CRYPTO_ALG_KERN_DRIVER_ONLY); + + alg->init = caam_cra_init; + alg->exit = caam_cra_exit; +} + +static void caam_aead_alg_init(struct caam_aead_alg *t_alg) +{ + struct aead_alg *alg = &t_alg->aead; + + alg->base.cra_module = THIS_MODULE; + alg->base.cra_priority = CAAM_CRA_PRIORITY; + alg->base.cra_ctxsize = sizeof(struct caam_ctx); + alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY | + CRYPTO_ALG_KERN_DRIVER_ONLY; + + alg->init = caam_aead_init; + alg->exit = caam_aead_exit; +} + +int caam_algapi_init(struct device *ctrldev) +{ + struct caam_drv_private *priv = dev_get_drvdata(ctrldev); + int i = 0, err = 0; + u32 aes_vid, aes_inst, des_inst, md_vid, md_inst, ccha_inst, ptha_inst; + unsigned int md_limit = SHA512_DIGEST_SIZE; + bool registered = false, gcm_support; + + /* + * Register crypto algorithms the device supports. + * First, detect presence and attributes of DES, AES, and MD blocks. + */ + if (priv->era < 10) { + u32 cha_vid, cha_inst, aes_rn; + + cha_vid = rd_reg32(&priv->ctrl->perfmon.cha_id_ls); + aes_vid = cha_vid & CHA_ID_LS_AES_MASK; + md_vid = (cha_vid & CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT; + + cha_inst = rd_reg32(&priv->ctrl->perfmon.cha_num_ls); + des_inst = (cha_inst & CHA_ID_LS_DES_MASK) >> + CHA_ID_LS_DES_SHIFT; + aes_inst = cha_inst & CHA_ID_LS_AES_MASK; + md_inst = (cha_inst & CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT; + ccha_inst = 0; + ptha_inst = 0; + + aes_rn = rd_reg32(&priv->ctrl->perfmon.cha_rev_ls) & + CHA_ID_LS_AES_MASK; + gcm_support = !(aes_vid == CHA_VER_VID_AES_LP && aes_rn < 8); + } else { + u32 aesa, mdha; + + aesa = rd_reg32(&priv->ctrl->vreg.aesa); + mdha = rd_reg32(&priv->ctrl->vreg.mdha); + + aes_vid = (aesa & CHA_VER_VID_MASK) >> CHA_VER_VID_SHIFT; + md_vid = (mdha & CHA_VER_VID_MASK) >> CHA_VER_VID_SHIFT; + + des_inst = rd_reg32(&priv->ctrl->vreg.desa) & CHA_VER_NUM_MASK; + aes_inst = aesa & CHA_VER_NUM_MASK; + md_inst = mdha & CHA_VER_NUM_MASK; + ccha_inst = rd_reg32(&priv->ctrl->vreg.ccha) & CHA_VER_NUM_MASK; + ptha_inst = rd_reg32(&priv->ctrl->vreg.ptha) & CHA_VER_NUM_MASK; + + gcm_support = aesa & CHA_VER_MISC_AES_GCM; + } + + /* If MD is present, limit digest size based on LP256 */ + if (md_inst && md_vid == CHA_VER_VID_MD_LP256) + md_limit = SHA256_DIGEST_SIZE; + + for (i = 0; i < ARRAY_SIZE(driver_algs); i++) { + struct caam_skcipher_alg *t_alg = driver_algs + i; + u32 alg_sel = t_alg->caam.class1_alg_type & OP_ALG_ALGSEL_MASK; + + /* Skip DES algorithms if not supported by device */ + if (!des_inst && + ((alg_sel == OP_ALG_ALGSEL_3DES) || + (alg_sel == OP_ALG_ALGSEL_DES))) + continue; + + /* Skip AES algorithms if not supported by device */ + if (!aes_inst && (alg_sel == OP_ALG_ALGSEL_AES)) + continue; + + /* + * Check support for AES modes not available + * on LP devices. + */ + if (aes_vid == CHA_VER_VID_AES_LP && + (t_alg->caam.class1_alg_type & OP_ALG_AAI_MASK) == + OP_ALG_AAI_XTS) + continue; + + caam_skcipher_alg_init(t_alg); + + err = crypto_register_skcipher(&t_alg->skcipher); + if (err) { + pr_warn("%s alg registration failed\n", + t_alg->skcipher.base.cra_driver_name); + continue; + } + + t_alg->registered = true; + registered = true; + } + + for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) { + struct caam_aead_alg *t_alg = driver_aeads + i; + u32 c1_alg_sel = t_alg->caam.class1_alg_type & + OP_ALG_ALGSEL_MASK; + u32 c2_alg_sel = t_alg->caam.class2_alg_type & + OP_ALG_ALGSEL_MASK; + u32 alg_aai = t_alg->caam.class1_alg_type & OP_ALG_AAI_MASK; + + /* Skip DES algorithms if not supported by device */ + if (!des_inst && + ((c1_alg_sel == OP_ALG_ALGSEL_3DES) || + (c1_alg_sel == OP_ALG_ALGSEL_DES))) + continue; + + /* Skip AES algorithms if not supported by device */ + if (!aes_inst && (c1_alg_sel == OP_ALG_ALGSEL_AES)) + continue; + + /* Skip CHACHA20 algorithms if not supported by device */ + if (c1_alg_sel == OP_ALG_ALGSEL_CHACHA20 && !ccha_inst) + continue; + + /* Skip POLY1305 algorithms if not supported by device */ + if (c2_alg_sel == OP_ALG_ALGSEL_POLY1305 && !ptha_inst) + continue; + + /* Skip GCM algorithms if not supported by device */ + if (c1_alg_sel == OP_ALG_ALGSEL_AES && + alg_aai == OP_ALG_AAI_GCM && !gcm_support) + continue; + + /* + * Skip algorithms requiring message digests + * if MD or MD size is not supported by device. + */ + if (is_mdha(c2_alg_sel) && + (!md_inst || t_alg->aead.maxauthsize > md_limit)) + continue; + + caam_aead_alg_init(t_alg); + + err = crypto_register_aead(&t_alg->aead); + if (err) { + pr_warn("%s alg registration failed\n", + t_alg->aead.base.cra_driver_name); + continue; + } + + t_alg->registered = true; + registered = true; + } + + if (registered) + pr_info("caam algorithms registered in /proc/crypto\n"); + + return err; +} diff --git a/drivers/crypto/caam/caamalg_desc.c b/drivers/crypto/caam/caamalg_desc.c new file mode 100644 index 000000000..7571e1ac9 --- /dev/null +++ b/drivers/crypto/caam/caamalg_desc.c @@ -0,0 +1,1644 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Shared descriptors for aead, skcipher algorithms + * + * Copyright 2016-2019 NXP + */ + +#include "compat.h" +#include "desc_constr.h" +#include "caamalg_desc.h" + +/* + * For aead functions, read payload and write payload, + * both of which are specified in req->src and req->dst + */ +static inline void aead_append_src_dst(u32 *desc, u32 msg_type) +{ + append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF); + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH | + KEY_VLF | msg_type | FIFOLD_TYPE_LASTBOTH); +} + +/* Set DK bit in class 1 operation if shared */ +static inline void append_dec_op1(u32 *desc, u32 type) +{ + u32 *jump_cmd, *uncond_jump_cmd; + + /* DK bit is valid only for AES */ + if ((type & OP_ALG_ALGSEL_MASK) != OP_ALG_ALGSEL_AES) { + append_operation(desc, type | OP_ALG_AS_INITFINAL | + OP_ALG_DECRYPT); + return; + } + + jump_cmd = append_jump(desc, JUMP_TEST_ALL | JUMP_COND_SHRD); + append_operation(desc, type | OP_ALG_AS_INIT | OP_ALG_DECRYPT); + uncond_jump_cmd = append_jump(desc, JUMP_TEST_ALL); + set_jump_tgt_here(desc, jump_cmd); + append_operation(desc, type | OP_ALG_AS_INIT | OP_ALG_DECRYPT | + OP_ALG_AAI_DK); + set_jump_tgt_here(desc, uncond_jump_cmd); +} + +/** + * cnstr_shdsc_aead_null_encap - IPSec ESP encapsulation shared descriptor + * (non-protocol) with no (null) encryption. + * @desc: pointer to buffer used for descriptor construction + * @adata: pointer to authentication transform definitions. + * A split key is required for SEC Era < 6; the size of the split key + * is specified in this case. Valid algorithm values - one of + * OP_ALG_ALGSEL_{MD5, SHA1, SHA224, SHA256, SHA384, SHA512} ANDed + * with OP_ALG_AAI_HMAC_PRECOMP. + * @icvsize: integrity check value (ICV) size (truncated or full) + * @era: SEC Era + */ +void cnstr_shdsc_aead_null_encap(u32 * const desc, struct alginfo *adata, + unsigned int icvsize, int era) +{ + u32 *key_jump_cmd, *read_move_cmd, *write_move_cmd; + + init_sh_desc(desc, HDR_SHARE_SERIAL); + + /* Skip if already shared */ + key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | + JUMP_COND_SHRD); + if (era < 6) { + if (adata->key_inline) + append_key_as_imm(desc, adata->key_virt, + adata->keylen_pad, adata->keylen, + CLASS_2 | KEY_DEST_MDHA_SPLIT | + KEY_ENC); + else + append_key(desc, adata->key_dma, adata->keylen, + CLASS_2 | KEY_DEST_MDHA_SPLIT | KEY_ENC); + } else { + append_proto_dkp(desc, adata); + } + set_jump_tgt_here(desc, key_jump_cmd); + + /* assoclen + cryptlen = seqinlen */ + append_math_sub(desc, REG3, SEQINLEN, REG0, CAAM_CMD_SZ); + + /* Prepare to read and write cryptlen + assoclen bytes */ + append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ); + append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ); + + /* + * MOVE_LEN opcode is not available in all SEC HW revisions, + * thus need to do some magic, i.e. self-patch the descriptor + * buffer. + */ + read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF | + MOVE_DEST_MATH3 | + (0x6 << MOVE_LEN_SHIFT)); + write_move_cmd = append_move(desc, MOVE_SRC_MATH3 | + MOVE_DEST_DESCBUF | + MOVE_WAITCOMP | + (0x8 << MOVE_LEN_SHIFT)); + + /* Class 2 operation */ + append_operation(desc, adata->algtype | OP_ALG_AS_INITFINAL | + OP_ALG_ENCRYPT); + + /* Read and write cryptlen bytes */ + aead_append_src_dst(desc, FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1); + + set_move_tgt_here(desc, read_move_cmd); + set_move_tgt_here(desc, write_move_cmd); + append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO); + append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO | + MOVE_AUX_LS); + + /* Write ICV */ + append_seq_store(desc, icvsize, LDST_CLASS_2_CCB | + LDST_SRCDST_BYTE_CONTEXT); + + print_hex_dump_debug("aead null enc shdesc@" __stringify(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), + 1); +} +EXPORT_SYMBOL(cnstr_shdsc_aead_null_encap); + +/** + * cnstr_shdsc_aead_null_decap - IPSec ESP decapsulation shared descriptor + * (non-protocol) with no (null) decryption. + * @desc: pointer to buffer used for descriptor construction + * @adata: pointer to authentication transform definitions. + * A split key is required for SEC Era < 6; the size of the split key + * is specified in this case. Valid algorithm values - one of + * OP_ALG_ALGSEL_{MD5, SHA1, SHA224, SHA256, SHA384, SHA512} ANDed + * with OP_ALG_AAI_HMAC_PRECOMP. + * @icvsize: integrity check value (ICV) size (truncated or full) + * @era: SEC Era + */ +void cnstr_shdsc_aead_null_decap(u32 * const desc, struct alginfo *adata, + unsigned int icvsize, int era) +{ + u32 *key_jump_cmd, *read_move_cmd, *write_move_cmd, *jump_cmd; + + init_sh_desc(desc, HDR_SHARE_SERIAL); + + /* Skip if already shared */ + key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | + JUMP_COND_SHRD); + if (era < 6) { + if (adata->key_inline) + append_key_as_imm(desc, adata->key_virt, + adata->keylen_pad, adata->keylen, + CLASS_2 | KEY_DEST_MDHA_SPLIT | + KEY_ENC); + else + append_key(desc, adata->key_dma, adata->keylen, + CLASS_2 | KEY_DEST_MDHA_SPLIT | KEY_ENC); + } else { + append_proto_dkp(desc, adata); + } + set_jump_tgt_here(desc, key_jump_cmd); + + /* Class 2 operation */ + append_operation(desc, adata->algtype | OP_ALG_AS_INITFINAL | + OP_ALG_DECRYPT | OP_ALG_ICV_ON); + + /* assoclen + cryptlen = seqoutlen */ + append_math_sub(desc, REG2, SEQOUTLEN, REG0, CAAM_CMD_SZ); + + /* Prepare to read and write cryptlen + assoclen bytes */ + append_math_add(desc, VARSEQINLEN, ZERO, REG2, CAAM_CMD_SZ); + append_math_add(desc, VARSEQOUTLEN, ZERO, REG2, CAAM_CMD_SZ); + + /* + * MOVE_LEN opcode is not available in all SEC HW revisions, + * thus need to do some magic, i.e. self-patch the descriptor + * buffer. + */ + read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF | + MOVE_DEST_MATH2 | + (0x6 << MOVE_LEN_SHIFT)); + write_move_cmd = append_move(desc, MOVE_SRC_MATH2 | + MOVE_DEST_DESCBUF | + MOVE_WAITCOMP | + (0x8 << MOVE_LEN_SHIFT)); + + /* Read and write cryptlen bytes */ + aead_append_src_dst(desc, FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1); + + /* + * Insert a NOP here, since we need at least 4 instructions between + * code patching the descriptor buffer and the location being patched. + */ + jump_cmd = append_jump(desc, JUMP_TEST_ALL); + set_jump_tgt_here(desc, jump_cmd); + + set_move_tgt_here(desc, read_move_cmd); + set_move_tgt_here(desc, write_move_cmd); + append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO); + append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO | + MOVE_AUX_LS); + append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO); + + /* Load ICV */ + append_seq_fifo_load(desc, icvsize, FIFOLD_CLASS_CLASS2 | + FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_ICV); + + print_hex_dump_debug("aead null dec shdesc@" __stringify(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), + 1); +} +EXPORT_SYMBOL(cnstr_shdsc_aead_null_decap); + +static void init_sh_desc_key_aead(u32 * const desc, + struct alginfo * const cdata, + struct alginfo * const adata, + const bool is_rfc3686, u32 *nonce, int era) +{ + u32 *key_jump_cmd; + unsigned int enckeylen = cdata->keylen; + + /* Note: Context registers are saved. */ + init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX); + + /* Skip if already shared */ + key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | + JUMP_COND_SHRD); + + /* + * RFC3686 specific: + * | key = {AUTH_KEY, ENC_KEY, NONCE} + * | enckeylen = encryption key size + nonce size + */ + if (is_rfc3686) + enckeylen -= CTR_RFC3686_NONCE_SIZE; + + if (era < 6) { + if (adata->key_inline) + append_key_as_imm(desc, adata->key_virt, + adata->keylen_pad, adata->keylen, + CLASS_2 | KEY_DEST_MDHA_SPLIT | + KEY_ENC); + else + append_key(desc, adata->key_dma, adata->keylen, + CLASS_2 | KEY_DEST_MDHA_SPLIT | KEY_ENC); + } else { + append_proto_dkp(desc, adata); + } + + if (cdata->key_inline) + append_key_as_imm(desc, cdata->key_virt, enckeylen, + enckeylen, CLASS_1 | KEY_DEST_CLASS_REG); + else + append_key(desc, cdata->key_dma, enckeylen, CLASS_1 | + KEY_DEST_CLASS_REG); + + /* Load Counter into CONTEXT1 reg */ + if (is_rfc3686) { + append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE, + LDST_CLASS_IND_CCB | + LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM); + append_move(desc, + MOVE_SRC_OUTFIFO | + MOVE_DEST_CLASS1CTX | + (16 << MOVE_OFFSET_SHIFT) | + (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT)); + } + + set_jump_tgt_here(desc, key_jump_cmd); +} + +/** + * cnstr_shdsc_aead_encap - IPSec ESP encapsulation shared descriptor + * (non-protocol). + * @desc: pointer to buffer used for descriptor construction + * @cdata: pointer to block cipher transform definitions + * Valid algorithm values - one of OP_ALG_ALGSEL_{AES, DES, 3DES} ANDed + * with OP_ALG_AAI_CBC or OP_ALG_AAI_CTR_MOD128. + * @adata: pointer to authentication transform definitions. + * A split key is required for SEC Era < 6; the size of the split key + * is specified in this case. Valid algorithm values - one of + * OP_ALG_ALGSEL_{MD5, SHA1, SHA224, SHA256, SHA384, SHA512} ANDed + * with OP_ALG_AAI_HMAC_PRECOMP. + * @ivsize: initialization vector size + * @icvsize: integrity check value (ICV) size (truncated or full) + * @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template + * @nonce: pointer to rfc3686 nonce + * @ctx1_iv_off: IV offset in CONTEXT1 register + * @is_qi: true when called from caam/qi + * @era: SEC Era + */ +void cnstr_shdsc_aead_encap(u32 * const desc, struct alginfo *cdata, + struct alginfo *adata, unsigned int ivsize, + unsigned int icvsize, const bool is_rfc3686, + u32 *nonce, const u32 ctx1_iv_off, const bool is_qi, + int era) +{ + /* Note: Context registers are saved. */ + init_sh_desc_key_aead(desc, cdata, adata, is_rfc3686, nonce, era); + + /* Class 2 operation */ + append_operation(desc, adata->algtype | OP_ALG_AS_INITFINAL | + OP_ALG_ENCRYPT); + + if (is_qi) { + u32 *wait_load_cmd; + + /* REG3 = assoclen */ + append_seq_load(desc, 4, LDST_CLASS_DECO | + LDST_SRCDST_WORD_DECO_MATH3 | + (4 << LDST_OFFSET_SHIFT)); + + wait_load_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | + JUMP_COND_CALM | JUMP_COND_NCP | + JUMP_COND_NOP | JUMP_COND_NIP | + JUMP_COND_NIFP); + set_jump_tgt_here(desc, wait_load_cmd); + + append_seq_load(desc, ivsize, LDST_CLASS_1_CCB | + LDST_SRCDST_BYTE_CONTEXT | + (ctx1_iv_off << LDST_OFFSET_SHIFT)); + } + + /* Read and write assoclen bytes */ + if (is_qi || era < 3) { + append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ); + append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ); + } else { + append_math_add(desc, VARSEQINLEN, ZERO, DPOVRD, CAAM_CMD_SZ); + append_math_add(desc, VARSEQOUTLEN, ZERO, DPOVRD, CAAM_CMD_SZ); + } + + /* Skip assoc data */ + append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF); + + /* read assoc before reading payload */ + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG | + FIFOLDST_VLF); + + /* Load Counter into CONTEXT1 reg */ + if (is_rfc3686) + append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB | + LDST_SRCDST_BYTE_CONTEXT | + ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) << + LDST_OFFSET_SHIFT)); + + /* Class 1 operation */ + append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL | + OP_ALG_ENCRYPT); + + /* Read and write cryptlen bytes */ + append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ); + append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ); + aead_append_src_dst(desc, FIFOLD_TYPE_MSG1OUT2); + + /* Write ICV */ + append_seq_store(desc, icvsize, LDST_CLASS_2_CCB | + LDST_SRCDST_BYTE_CONTEXT); + + print_hex_dump_debug("aead enc shdesc@" __stringify(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), + 1); +} +EXPORT_SYMBOL(cnstr_shdsc_aead_encap); + +/** + * cnstr_shdsc_aead_decap - IPSec ESP decapsulation shared descriptor + * (non-protocol). + * @desc: pointer to buffer used for descriptor construction + * @cdata: pointer to block cipher transform definitions + * Valid algorithm values - one of OP_ALG_ALGSEL_{AES, DES, 3DES} ANDed + * with OP_ALG_AAI_CBC or OP_ALG_AAI_CTR_MOD128. + * @adata: pointer to authentication transform definitions. + * A split key is required for SEC Era < 6; the size of the split key + * is specified in this case. Valid algorithm values - one of + * OP_ALG_ALGSEL_{MD5, SHA1, SHA224, SHA256, SHA384, SHA512} ANDed + * with OP_ALG_AAI_HMAC_PRECOMP. + * @ivsize: initialization vector size + * @icvsize: integrity check value (ICV) size (truncated or full) + * @geniv: whether to generate Encrypted Chain IV + * @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template + * @nonce: pointer to rfc3686 nonce + * @ctx1_iv_off: IV offset in CONTEXT1 register + * @is_qi: true when called from caam/qi + * @era: SEC Era + */ +void cnstr_shdsc_aead_decap(u32 * const desc, struct alginfo *cdata, + struct alginfo *adata, unsigned int ivsize, + unsigned int icvsize, const bool geniv, + const bool is_rfc3686, u32 *nonce, + const u32 ctx1_iv_off, const bool is_qi, int era) +{ + /* Note: Context registers are saved. */ + init_sh_desc_key_aead(desc, cdata, adata, is_rfc3686, nonce, era); + + /* Class 2 operation */ + append_operation(desc, adata->algtype | OP_ALG_AS_INITFINAL | + OP_ALG_DECRYPT | OP_ALG_ICV_ON); + + if (is_qi) { + u32 *wait_load_cmd; + + /* REG3 = assoclen */ + append_seq_load(desc, 4, LDST_CLASS_DECO | + LDST_SRCDST_WORD_DECO_MATH3 | + (4 << LDST_OFFSET_SHIFT)); + + wait_load_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | + JUMP_COND_CALM | JUMP_COND_NCP | + JUMP_COND_NOP | JUMP_COND_NIP | + JUMP_COND_NIFP); + set_jump_tgt_here(desc, wait_load_cmd); + + if (!geniv) + append_seq_load(desc, ivsize, LDST_CLASS_1_CCB | + LDST_SRCDST_BYTE_CONTEXT | + (ctx1_iv_off << LDST_OFFSET_SHIFT)); + } + + /* Read and write assoclen bytes */ + if (is_qi || era < 3) { + append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ); + if (geniv) + append_math_add_imm_u32(desc, VARSEQOUTLEN, REG3, IMM, + ivsize); + else + append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, + CAAM_CMD_SZ); + } else { + append_math_add(desc, VARSEQINLEN, ZERO, DPOVRD, CAAM_CMD_SZ); + if (geniv) + append_math_add_imm_u32(desc, VARSEQOUTLEN, DPOVRD, IMM, + ivsize); + else + append_math_add(desc, VARSEQOUTLEN, ZERO, DPOVRD, + CAAM_CMD_SZ); + } + + /* Skip assoc data */ + append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF); + + /* read assoc before reading payload */ + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG | + KEY_VLF); + + if (geniv) { + append_seq_load(desc, ivsize, LDST_CLASS_1_CCB | + LDST_SRCDST_BYTE_CONTEXT | + (ctx1_iv_off << LDST_OFFSET_SHIFT)); + append_move(desc, MOVE_SRC_CLASS1CTX | MOVE_DEST_CLASS2INFIFO | + (ctx1_iv_off << MOVE_OFFSET_SHIFT) | ivsize); + } + + /* Load Counter into CONTEXT1 reg */ + if (is_rfc3686) + append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB | + LDST_SRCDST_BYTE_CONTEXT | + ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) << + LDST_OFFSET_SHIFT)); + + /* Choose operation */ + if (ctx1_iv_off) + append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL | + OP_ALG_DECRYPT); + else + append_dec_op1(desc, cdata->algtype); + + /* Read and write cryptlen bytes */ + append_math_add(desc, VARSEQINLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ); + append_math_add(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ); + aead_append_src_dst(desc, FIFOLD_TYPE_MSG); + + /* Load ICV */ + append_seq_fifo_load(desc, icvsize, FIFOLD_CLASS_CLASS2 | + FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_ICV); + + print_hex_dump_debug("aead dec shdesc@" __stringify(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), + 1); +} +EXPORT_SYMBOL(cnstr_shdsc_aead_decap); + +/** + * cnstr_shdsc_aead_givencap - IPSec ESP encapsulation shared descriptor + * (non-protocol) with HW-generated initialization + * vector. + * @desc: pointer to buffer used for descriptor construction + * @cdata: pointer to block cipher transform definitions + * Valid algorithm values - one of OP_ALG_ALGSEL_{AES, DES, 3DES} ANDed + * with OP_ALG_AAI_CBC or OP_ALG_AAI_CTR_MOD128. + * @adata: pointer to authentication transform definitions. + * A split key is required for SEC Era < 6; the size of the split key + * is specified in this case. Valid algorithm values - one of + * OP_ALG_ALGSEL_{MD5, SHA1, SHA224, SHA256, SHA384, SHA512} ANDed + * with OP_ALG_AAI_HMAC_PRECOMP. + * @ivsize: initialization vector size + * @icvsize: integrity check value (ICV) size (truncated or full) + * @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template + * @nonce: pointer to rfc3686 nonce + * @ctx1_iv_off: IV offset in CONTEXT1 register + * @is_qi: true when called from caam/qi + * @era: SEC Era + */ +void cnstr_shdsc_aead_givencap(u32 * const desc, struct alginfo *cdata, + struct alginfo *adata, unsigned int ivsize, + unsigned int icvsize, const bool is_rfc3686, + u32 *nonce, const u32 ctx1_iv_off, + const bool is_qi, int era) +{ + u32 geniv, moveiv; + u32 *wait_cmd; + + /* Note: Context registers are saved. */ + init_sh_desc_key_aead(desc, cdata, adata, is_rfc3686, nonce, era); + + if (is_qi) { + u32 *wait_load_cmd; + + /* REG3 = assoclen */ + append_seq_load(desc, 4, LDST_CLASS_DECO | + LDST_SRCDST_WORD_DECO_MATH3 | + (4 << LDST_OFFSET_SHIFT)); + + wait_load_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | + JUMP_COND_CALM | JUMP_COND_NCP | + JUMP_COND_NOP | JUMP_COND_NIP | + JUMP_COND_NIFP); + set_jump_tgt_here(desc, wait_load_cmd); + } + + if (is_rfc3686) { + if (is_qi) + append_seq_load(desc, ivsize, LDST_CLASS_1_CCB | + LDST_SRCDST_BYTE_CONTEXT | + (ctx1_iv_off << LDST_OFFSET_SHIFT)); + + goto copy_iv; + } + + /* Generate IV */ + geniv = NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DEST_DECO | + NFIFOENTRY_DTYPE_MSG | NFIFOENTRY_LC1 | + NFIFOENTRY_PTYPE_RND | (ivsize << NFIFOENTRY_DLEN_SHIFT); + append_load_imm_u32(desc, geniv, LDST_CLASS_IND_CCB | + LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM); + append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO); + append_move(desc, MOVE_WAITCOMP | + MOVE_SRC_INFIFO | MOVE_DEST_CLASS1CTX | + (ctx1_iv_off << MOVE_OFFSET_SHIFT) | + (ivsize << MOVE_LEN_SHIFT)); + append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO); + +copy_iv: + /* Copy IV to class 1 context */ + append_move(desc, MOVE_SRC_CLASS1CTX | MOVE_DEST_OUTFIFO | + (ctx1_iv_off << MOVE_OFFSET_SHIFT) | + (ivsize << MOVE_LEN_SHIFT)); + + /* Return to encryption */ + append_operation(desc, adata->algtype | OP_ALG_AS_INITFINAL | + OP_ALG_ENCRYPT); + + /* Read and write assoclen bytes */ + if (is_qi || era < 3) { + append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ); + append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ); + } else { + append_math_add(desc, VARSEQINLEN, ZERO, DPOVRD, CAAM_CMD_SZ); + append_math_add(desc, VARSEQOUTLEN, ZERO, DPOVRD, CAAM_CMD_SZ); + } + + /* Skip assoc data */ + append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF); + + /* read assoc before reading payload */ + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG | + KEY_VLF); + + /* Copy iv from outfifo to class 2 fifo */ + moveiv = NFIFOENTRY_STYPE_OFIFO | NFIFOENTRY_DEST_CLASS2 | + NFIFOENTRY_DTYPE_MSG | (ivsize << NFIFOENTRY_DLEN_SHIFT); + append_load_imm_u32(desc, moveiv, LDST_CLASS_IND_CCB | + LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM); + append_load_imm_u32(desc, ivsize, LDST_CLASS_2_CCB | + LDST_SRCDST_WORD_DATASZ_REG | LDST_IMM); + + /* Load Counter into CONTEXT1 reg */ + if (is_rfc3686) + append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB | + LDST_SRCDST_BYTE_CONTEXT | + ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) << + LDST_OFFSET_SHIFT)); + + /* Class 1 operation */ + append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL | + OP_ALG_ENCRYPT); + + /* Will write ivsize + cryptlen */ + append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ); + + /* Not need to reload iv */ + append_seq_fifo_load(desc, ivsize, + FIFOLD_CLASS_SKIP); + + /* Will read cryptlen */ + append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ); + + /* + * Wait for IV transfer (ofifo -> class2) to finish before starting + * ciphertext transfer (ofifo -> external memory). + */ + wait_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | JUMP_COND_NIFP); + set_jump_tgt_here(desc, wait_cmd); + + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH | KEY_VLF | + FIFOLD_TYPE_MSG1OUT2 | FIFOLD_TYPE_LASTBOTH); + append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF); + + /* Write ICV */ + append_seq_store(desc, icvsize, LDST_CLASS_2_CCB | + LDST_SRCDST_BYTE_CONTEXT); + + print_hex_dump_debug("aead givenc shdesc@" __stringify(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), + 1); +} +EXPORT_SYMBOL(cnstr_shdsc_aead_givencap); + +/** + * cnstr_shdsc_gcm_encap - gcm encapsulation shared descriptor + * @desc: pointer to buffer used for descriptor construction + * @cdata: pointer to block cipher transform definitions + * Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_GCM. + * @ivsize: initialization vector size + * @icvsize: integrity check value (ICV) size (truncated or full) + * @is_qi: true when called from caam/qi + */ +void cnstr_shdsc_gcm_encap(u32 * const desc, struct alginfo *cdata, + unsigned int ivsize, unsigned int icvsize, + const bool is_qi) +{ + u32 *key_jump_cmd, *zero_payload_jump_cmd, *zero_assoc_jump_cmd1, + *zero_assoc_jump_cmd2; + + init_sh_desc(desc, HDR_SHARE_SERIAL); + + /* skip key loading if they are loaded due to sharing */ + key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | + JUMP_COND_SHRD); + if (cdata->key_inline) + append_key_as_imm(desc, cdata->key_virt, cdata->keylen, + cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG); + else + append_key(desc, cdata->key_dma, cdata->keylen, CLASS_1 | + KEY_DEST_CLASS_REG); + set_jump_tgt_here(desc, key_jump_cmd); + + /* class 1 operation */ + append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL | + OP_ALG_ENCRYPT); + + if (is_qi) { + u32 *wait_load_cmd; + + /* REG3 = assoclen */ + append_seq_load(desc, 4, LDST_CLASS_DECO | + LDST_SRCDST_WORD_DECO_MATH3 | + (4 << LDST_OFFSET_SHIFT)); + + wait_load_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | + JUMP_COND_CALM | JUMP_COND_NCP | + JUMP_COND_NOP | JUMP_COND_NIP | + JUMP_COND_NIFP); + set_jump_tgt_here(desc, wait_load_cmd); + + append_math_sub_imm_u32(desc, VARSEQOUTLEN, SEQINLEN, IMM, + ivsize); + } else { + append_math_sub(desc, VARSEQOUTLEN, SEQINLEN, REG0, + CAAM_CMD_SZ); + } + + /* if assoclen + cryptlen is ZERO, skip to ICV write */ + zero_assoc_jump_cmd2 = append_jump(desc, JUMP_TEST_ALL | + JUMP_COND_MATH_Z); + + if (is_qi) + append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_CLASS1 | + FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1); + + /* if assoclen is ZERO, skip reading the assoc data */ + append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ); + zero_assoc_jump_cmd1 = append_jump(desc, JUMP_TEST_ALL | + JUMP_COND_MATH_Z); + + append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ); + + /* skip assoc data */ + append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF); + + /* cryptlen = seqinlen - assoclen */ + append_math_sub(desc, VARSEQOUTLEN, SEQINLEN, REG3, CAAM_CMD_SZ); + + /* if cryptlen is ZERO jump to zero-payload commands */ + zero_payload_jump_cmd = append_jump(desc, JUMP_TEST_ALL | + JUMP_COND_MATH_Z); + + /* read assoc data */ + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF | + FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1); + set_jump_tgt_here(desc, zero_assoc_jump_cmd1); + + append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ); + + /* write encrypted data */ + append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF); + + /* read payload data */ + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF | + FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1); + + /* jump to ICV writing */ + if (is_qi) + append_jump(desc, JUMP_TEST_ALL | 4); + else + append_jump(desc, JUMP_TEST_ALL | 2); + + /* zero-payload commands */ + set_jump_tgt_here(desc, zero_payload_jump_cmd); + + /* read assoc data */ + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF | + FIFOLD_TYPE_AAD | FIFOLD_TYPE_LAST1); + if (is_qi) + /* jump to ICV writing */ + append_jump(desc, JUMP_TEST_ALL | 2); + + /* There is no input data */ + set_jump_tgt_here(desc, zero_assoc_jump_cmd2); + + if (is_qi) + append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_CLASS1 | + FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1 | + FIFOLD_TYPE_LAST1); + + /* write ICV */ + append_seq_store(desc, icvsize, LDST_CLASS_1_CCB | + LDST_SRCDST_BYTE_CONTEXT); + + print_hex_dump_debug("gcm enc shdesc@" __stringify(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), + 1); +} +EXPORT_SYMBOL(cnstr_shdsc_gcm_encap); + +/** + * cnstr_shdsc_gcm_decap - gcm decapsulation shared descriptor + * @desc: pointer to buffer used for descriptor construction + * @cdata: pointer to block cipher transform definitions + * Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_GCM. + * @ivsize: initialization vector size + * @icvsize: integrity check value (ICV) size (truncated or full) + * @is_qi: true when called from caam/qi + */ +void cnstr_shdsc_gcm_decap(u32 * const desc, struct alginfo *cdata, + unsigned int ivsize, unsigned int icvsize, + const bool is_qi) +{ + u32 *key_jump_cmd, *zero_payload_jump_cmd, *zero_assoc_jump_cmd1; + + init_sh_desc(desc, HDR_SHARE_SERIAL); + + /* skip key loading if they are loaded due to sharing */ + key_jump_cmd = append_jump(desc, JUMP_JSL | + JUMP_TEST_ALL | JUMP_COND_SHRD); + if (cdata->key_inline) + append_key_as_imm(desc, cdata->key_virt, cdata->keylen, + cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG); + else + append_key(desc, cdata->key_dma, cdata->keylen, CLASS_1 | + KEY_DEST_CLASS_REG); + set_jump_tgt_here(desc, key_jump_cmd); + + /* class 1 operation */ + append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL | + OP_ALG_DECRYPT | OP_ALG_ICV_ON); + + if (is_qi) { + u32 *wait_load_cmd; + + /* REG3 = assoclen */ + append_seq_load(desc, 4, LDST_CLASS_DECO | + LDST_SRCDST_WORD_DECO_MATH3 | + (4 << LDST_OFFSET_SHIFT)); + + wait_load_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | + JUMP_COND_CALM | JUMP_COND_NCP | + JUMP_COND_NOP | JUMP_COND_NIP | + JUMP_COND_NIFP); + set_jump_tgt_here(desc, wait_load_cmd); + + append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_CLASS1 | + FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1); + } + + /* if assoclen is ZERO, skip reading the assoc data */ + append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ); + zero_assoc_jump_cmd1 = append_jump(desc, JUMP_TEST_ALL | + JUMP_COND_MATH_Z); + + append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ); + + /* skip assoc data */ + append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF); + + /* read assoc data */ + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF | + FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1); + + set_jump_tgt_here(desc, zero_assoc_jump_cmd1); + + /* cryptlen = seqoutlen - assoclen */ + append_math_sub(desc, VARSEQINLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ); + + /* jump to zero-payload command if cryptlen is zero */ + zero_payload_jump_cmd = append_jump(desc, JUMP_TEST_ALL | + JUMP_COND_MATH_Z); + + append_math_sub(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ); + + /* store encrypted data */ + append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF); + + /* read payload data */ + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF | + FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1); + + /* zero-payload command */ + set_jump_tgt_here(desc, zero_payload_jump_cmd); + + /* read ICV */ + append_seq_fifo_load(desc, icvsize, FIFOLD_CLASS_CLASS1 | + FIFOLD_TYPE_ICV | FIFOLD_TYPE_LAST1); + + print_hex_dump_debug("gcm dec shdesc@" __stringify(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), + 1); +} +EXPORT_SYMBOL(cnstr_shdsc_gcm_decap); + +/** + * cnstr_shdsc_rfc4106_encap - IPSec ESP gcm encapsulation shared descriptor + * (non-protocol). + * @desc: pointer to buffer used for descriptor construction + * @cdata: pointer to block cipher transform definitions + * Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_GCM. + * @ivsize: initialization vector size + * @icvsize: integrity check value (ICV) size (truncated or full) + * @is_qi: true when called from caam/qi + * + * Input sequence: AAD | PTXT + * Output sequence: AAD | CTXT | ICV + * AAD length (assoclen), which includes the IV length, is available in Math3. + */ +void cnstr_shdsc_rfc4106_encap(u32 * const desc, struct alginfo *cdata, + unsigned int ivsize, unsigned int icvsize, + const bool is_qi) +{ + u32 *key_jump_cmd, *zero_cryptlen_jump_cmd, *skip_instructions; + init_sh_desc(desc, HDR_SHARE_SERIAL); + + /* Skip key loading if it is loaded due to sharing */ + key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | + JUMP_COND_SHRD); + if (cdata->key_inline) + append_key_as_imm(desc, cdata->key_virt, cdata->keylen, + cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG); + else + append_key(desc, cdata->key_dma, cdata->keylen, CLASS_1 | + KEY_DEST_CLASS_REG); + set_jump_tgt_here(desc, key_jump_cmd); + + /* Class 1 operation */ + append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL | + OP_ALG_ENCRYPT); + + if (is_qi) { + u32 *wait_load_cmd; + + /* REG3 = assoclen */ + append_seq_load(desc, 4, LDST_CLASS_DECO | + LDST_SRCDST_WORD_DECO_MATH3 | + (4 << LDST_OFFSET_SHIFT)); + + wait_load_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | + JUMP_COND_CALM | JUMP_COND_NCP | + JUMP_COND_NOP | JUMP_COND_NIP | + JUMP_COND_NIFP); + set_jump_tgt_here(desc, wait_load_cmd); + + /* Read salt and IV */ + append_fifo_load_as_imm(desc, (void *)(cdata->key_virt + + cdata->keylen), 4, FIFOLD_CLASS_CLASS1 | + FIFOLD_TYPE_IV); + append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_CLASS1 | + FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1); + } + + append_math_sub_imm_u32(desc, VARSEQINLEN, REG3, IMM, ivsize); + append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ); + + /* Skip AAD */ + append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF); + + /* Read cryptlen and set this value into VARSEQOUTLEN */ + append_math_sub(desc, VARSEQOUTLEN, SEQINLEN, REG3, CAAM_CMD_SZ); + + /* If cryptlen is ZERO jump to AAD command */ + zero_cryptlen_jump_cmd = append_jump(desc, JUMP_TEST_ALL | + JUMP_COND_MATH_Z); + + /* Read AAD data */ + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF | + FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1); + + /* Workaround for erratum A-005473 (simultaneous SEQ FIFO skips) */ + append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA); + + /* Skip IV */ + append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_SKIP); + append_math_add(desc, VARSEQINLEN, VARSEQOUTLEN, REG0, CAAM_CMD_SZ); + + /* Write encrypted data */ + append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF); + + /* Read payload data */ + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF | + FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1); + + /* Jump instructions to avoid double reading of AAD */ + skip_instructions = append_jump(desc, JUMP_TEST_ALL); + + /* There is no input data, cryptlen = 0 */ + set_jump_tgt_here(desc, zero_cryptlen_jump_cmd); + + /* Read AAD */ + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF | + FIFOLD_TYPE_AAD | FIFOLD_TYPE_LAST1); + + set_jump_tgt_here(desc, skip_instructions); + + /* Write ICV */ + append_seq_store(desc, icvsize, LDST_CLASS_1_CCB | + LDST_SRCDST_BYTE_CONTEXT); + + print_hex_dump_debug("rfc4106 enc shdesc@" __stringify(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), + 1); +} +EXPORT_SYMBOL(cnstr_shdsc_rfc4106_encap); + +/** + * cnstr_shdsc_rfc4106_decap - IPSec ESP gcm decapsulation shared descriptor + * (non-protocol). + * @desc: pointer to buffer used for descriptor construction + * @cdata: pointer to block cipher transform definitions + * Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_GCM. + * @ivsize: initialization vector size + * @icvsize: integrity check value (ICV) size (truncated or full) + * @is_qi: true when called from caam/qi + */ +void cnstr_shdsc_rfc4106_decap(u32 * const desc, struct alginfo *cdata, + unsigned int ivsize, unsigned int icvsize, + const bool is_qi) +{ + u32 *key_jump_cmd; + + init_sh_desc(desc, HDR_SHARE_SERIAL); + + /* Skip key loading if it is loaded due to sharing */ + key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | + JUMP_COND_SHRD); + if (cdata->key_inline) + append_key_as_imm(desc, cdata->key_virt, cdata->keylen, + cdata->keylen, CLASS_1 | + KEY_DEST_CLASS_REG); + else + append_key(desc, cdata->key_dma, cdata->keylen, CLASS_1 | + KEY_DEST_CLASS_REG); + set_jump_tgt_here(desc, key_jump_cmd); + + /* Class 1 operation */ + append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL | + OP_ALG_DECRYPT | OP_ALG_ICV_ON); + + if (is_qi) { + u32 *wait_load_cmd; + + /* REG3 = assoclen */ + append_seq_load(desc, 4, LDST_CLASS_DECO | + LDST_SRCDST_WORD_DECO_MATH3 | + (4 << LDST_OFFSET_SHIFT)); + + wait_load_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | + JUMP_COND_CALM | JUMP_COND_NCP | + JUMP_COND_NOP | JUMP_COND_NIP | + JUMP_COND_NIFP); + set_jump_tgt_here(desc, wait_load_cmd); + + /* Read salt and IV */ + append_fifo_load_as_imm(desc, (void *)(cdata->key_virt + + cdata->keylen), 4, FIFOLD_CLASS_CLASS1 | + FIFOLD_TYPE_IV); + append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_CLASS1 | + FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1); + } + + append_math_sub_imm_u32(desc, VARSEQINLEN, REG3, IMM, ivsize); + append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ); + + /* Read assoc data */ + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF | + FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1); + + /* Skip IV */ + append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_SKIP); + + /* Will read cryptlen bytes */ + append_math_sub(desc, VARSEQINLEN, SEQOUTLEN, REG3, CAAM_CMD_SZ); + + /* Workaround for erratum A-005473 (simultaneous SEQ FIFO skips) */ + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_MSG); + + /* Skip assoc data */ + append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF); + + /* Will write cryptlen bytes */ + append_math_sub(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ); + + /* Store payload data */ + append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF); + + /* Read encrypted data */ + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF | + FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1); + + /* Read ICV */ + append_seq_fifo_load(desc, icvsize, FIFOLD_CLASS_CLASS1 | + FIFOLD_TYPE_ICV | FIFOLD_TYPE_LAST1); + + print_hex_dump_debug("rfc4106 dec shdesc@" __stringify(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), + 1); +} +EXPORT_SYMBOL(cnstr_shdsc_rfc4106_decap); + +/** + * cnstr_shdsc_rfc4543_encap - IPSec ESP gmac encapsulation shared descriptor + * (non-protocol). + * @desc: pointer to buffer used for descriptor construction + * @cdata: pointer to block cipher transform definitions + * Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_GCM. + * @ivsize: initialization vector size + * @icvsize: integrity check value (ICV) size (truncated or full) + * @is_qi: true when called from caam/qi + */ +void cnstr_shdsc_rfc4543_encap(u32 * const desc, struct alginfo *cdata, + unsigned int ivsize, unsigned int icvsize, + const bool is_qi) +{ + u32 *key_jump_cmd, *read_move_cmd, *write_move_cmd; + + init_sh_desc(desc, HDR_SHARE_SERIAL); + + /* Skip key loading if it is loaded due to sharing */ + key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | + JUMP_COND_SHRD); + if (cdata->key_inline) + append_key_as_imm(desc, cdata->key_virt, cdata->keylen, + cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG); + else + append_key(desc, cdata->key_dma, cdata->keylen, CLASS_1 | + KEY_DEST_CLASS_REG); + set_jump_tgt_here(desc, key_jump_cmd); + + /* Class 1 operation */ + append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL | + OP_ALG_ENCRYPT); + + if (is_qi) { + /* assoclen is not needed, skip it */ + append_seq_fifo_load(desc, 4, FIFOLD_CLASS_SKIP); + + /* Read salt and IV */ + append_fifo_load_as_imm(desc, (void *)(cdata->key_virt + + cdata->keylen), 4, FIFOLD_CLASS_CLASS1 | + FIFOLD_TYPE_IV); + append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_CLASS1 | + FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1); + } + + /* assoclen + cryptlen = seqinlen */ + append_math_sub(desc, REG3, SEQINLEN, REG0, CAAM_CMD_SZ); + + /* + * MOVE_LEN opcode is not available in all SEC HW revisions, + * thus need to do some magic, i.e. self-patch the descriptor + * buffer. + */ + read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF | MOVE_DEST_MATH3 | + (0x6 << MOVE_LEN_SHIFT)); + write_move_cmd = append_move(desc, MOVE_SRC_MATH3 | MOVE_DEST_DESCBUF | + (0x8 << MOVE_LEN_SHIFT) | MOVE_WAITCOMP); + + /* Will read assoclen + cryptlen bytes */ + append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ); + + /* Will write assoclen + cryptlen bytes */ + append_math_sub(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ); + + /* Read and write assoclen + cryptlen bytes */ + aead_append_src_dst(desc, FIFOLD_TYPE_AAD); + + set_move_tgt_here(desc, read_move_cmd); + set_move_tgt_here(desc, write_move_cmd); + append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO); + /* Move payload data to OFIFO */ + append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO); + + /* Write ICV */ + append_seq_store(desc, icvsize, LDST_CLASS_1_CCB | + LDST_SRCDST_BYTE_CONTEXT); + + print_hex_dump_debug("rfc4543 enc shdesc@" __stringify(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), + 1); +} +EXPORT_SYMBOL(cnstr_shdsc_rfc4543_encap); + +/** + * cnstr_shdsc_rfc4543_decap - IPSec ESP gmac decapsulation shared descriptor + * (non-protocol). + * @desc: pointer to buffer used for descriptor construction + * @cdata: pointer to block cipher transform definitions + * Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_GCM. + * @ivsize: initialization vector size + * @icvsize: integrity check value (ICV) size (truncated or full) + * @is_qi: true when called from caam/qi + */ +void cnstr_shdsc_rfc4543_decap(u32 * const desc, struct alginfo *cdata, + unsigned int ivsize, unsigned int icvsize, + const bool is_qi) +{ + u32 *key_jump_cmd, *read_move_cmd, *write_move_cmd; + + init_sh_desc(desc, HDR_SHARE_SERIAL); + + /* Skip key loading if it is loaded due to sharing */ + key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | + JUMP_COND_SHRD); + if (cdata->key_inline) + append_key_as_imm(desc, cdata->key_virt, cdata->keylen, + cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG); + else + append_key(desc, cdata->key_dma, cdata->keylen, CLASS_1 | + KEY_DEST_CLASS_REG); + set_jump_tgt_here(desc, key_jump_cmd); + + /* Class 1 operation */ + append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL | + OP_ALG_DECRYPT | OP_ALG_ICV_ON); + + if (is_qi) { + /* assoclen is not needed, skip it */ + append_seq_fifo_load(desc, 4, FIFOLD_CLASS_SKIP); + + /* Read salt and IV */ + append_fifo_load_as_imm(desc, (void *)(cdata->key_virt + + cdata->keylen), 4, FIFOLD_CLASS_CLASS1 | + FIFOLD_TYPE_IV); + append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_CLASS1 | + FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1); + } + + /* assoclen + cryptlen = seqoutlen */ + append_math_sub(desc, REG3, SEQOUTLEN, REG0, CAAM_CMD_SZ); + + /* + * MOVE_LEN opcode is not available in all SEC HW revisions, + * thus need to do some magic, i.e. self-patch the descriptor + * buffer. + */ + read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF | MOVE_DEST_MATH3 | + (0x6 << MOVE_LEN_SHIFT)); + write_move_cmd = append_move(desc, MOVE_SRC_MATH3 | MOVE_DEST_DESCBUF | + (0x8 << MOVE_LEN_SHIFT) | MOVE_WAITCOMP); + + /* Will read assoclen + cryptlen bytes */ + append_math_sub(desc, VARSEQINLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ); + + /* Will write assoclen + cryptlen bytes */ + append_math_sub(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ); + + /* Store payload data */ + append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF); + + /* In-snoop assoclen + cryptlen data */ + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH | FIFOLDST_VLF | + FIFOLD_TYPE_AAD | FIFOLD_TYPE_LAST2FLUSH1); + + set_move_tgt_here(desc, read_move_cmd); + set_move_tgt_here(desc, write_move_cmd); + append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO); + /* Move payload data to OFIFO */ + append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO); + append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO); + + /* Read ICV */ + append_seq_fifo_load(desc, icvsize, FIFOLD_CLASS_CLASS1 | + FIFOLD_TYPE_ICV | FIFOLD_TYPE_LAST1); + + print_hex_dump_debug("rfc4543 dec shdesc@" __stringify(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), + 1); +} +EXPORT_SYMBOL(cnstr_shdsc_rfc4543_decap); + +/** + * cnstr_shdsc_chachapoly - Chacha20 + Poly1305 generic AEAD (rfc7539) and + * IPsec ESP (rfc7634, a.k.a. rfc7539esp) shared + * descriptor (non-protocol). + * @desc: pointer to buffer used for descriptor construction + * @cdata: pointer to block cipher transform definitions + * Valid algorithm values - OP_ALG_ALGSEL_CHACHA20 ANDed with + * OP_ALG_AAI_AEAD. + * @adata: pointer to authentication transform definitions + * Valid algorithm values - OP_ALG_ALGSEL_POLY1305 ANDed with + * OP_ALG_AAI_AEAD. + * @ivsize: initialization vector size + * @icvsize: integrity check value (ICV) size (truncated or full) + * @encap: true if encapsulation, false if decapsulation + * @is_qi: true when called from caam/qi + */ +void cnstr_shdsc_chachapoly(u32 * const desc, struct alginfo *cdata, + struct alginfo *adata, unsigned int ivsize, + unsigned int icvsize, const bool encap, + const bool is_qi) +{ + u32 *key_jump_cmd, *wait_cmd; + u32 nfifo; + const bool is_ipsec = (ivsize != CHACHAPOLY_IV_SIZE); + + /* Note: Context registers are saved. */ + init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX); + + /* skip key loading if they are loaded due to sharing */ + key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | + JUMP_COND_SHRD); + + append_key_as_imm(desc, cdata->key_virt, cdata->keylen, cdata->keylen, + CLASS_1 | KEY_DEST_CLASS_REG); + + /* For IPsec load the salt from keymat in the context register */ + if (is_ipsec) + append_load_as_imm(desc, cdata->key_virt + cdata->keylen, 4, + LDST_CLASS_1_CCB | LDST_SRCDST_BYTE_CONTEXT | + 4 << LDST_OFFSET_SHIFT); + + set_jump_tgt_here(desc, key_jump_cmd); + + /* Class 2 and 1 operations: Poly & ChaCha */ + if (encap) { + append_operation(desc, adata->algtype | OP_ALG_AS_INITFINAL | + OP_ALG_ENCRYPT); + append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL | + OP_ALG_ENCRYPT); + } else { + append_operation(desc, adata->algtype | OP_ALG_AS_INITFINAL | + OP_ALG_DECRYPT | OP_ALG_ICV_ON); + append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL | + OP_ALG_DECRYPT); + } + + if (is_qi) { + u32 *wait_load_cmd; + u32 ctx1_iv_off = is_ipsec ? 8 : 4; + + /* REG3 = assoclen */ + append_seq_load(desc, 4, LDST_CLASS_DECO | + LDST_SRCDST_WORD_DECO_MATH3 | + 4 << LDST_OFFSET_SHIFT); + + wait_load_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | + JUMP_COND_CALM | JUMP_COND_NCP | + JUMP_COND_NOP | JUMP_COND_NIP | + JUMP_COND_NIFP); + set_jump_tgt_here(desc, wait_load_cmd); + + append_seq_load(desc, ivsize, LDST_CLASS_1_CCB | + LDST_SRCDST_BYTE_CONTEXT | + ctx1_iv_off << LDST_OFFSET_SHIFT); + } + + /* + * MAGIC with NFIFO + * Read associated data from the input and send them to class1 and + * class2 alignment blocks. From class1 send data to output fifo and + * then write it to memory since we don't need to encrypt AD. + */ + nfifo = NFIFOENTRY_DEST_BOTH | NFIFOENTRY_FC1 | NFIFOENTRY_FC2 | + NFIFOENTRY_DTYPE_POLY | NFIFOENTRY_BND; + append_load_imm_u32(desc, nfifo, LDST_CLASS_IND_CCB | + LDST_SRCDST_WORD_INFO_FIFO_SM | LDLEN_MATH3); + + append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ); + append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ); + append_seq_fifo_load(desc, 0, FIFOLD_TYPE_NOINFOFIFO | + FIFOLD_CLASS_CLASS1 | LDST_VLF); + append_move_len(desc, MOVE_AUX_LS | MOVE_SRC_AUX_ABLK | + MOVE_DEST_OUTFIFO | MOVELEN_MRSEL_MATH3); + append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | LDST_VLF); + + /* IPsec - copy IV at the output */ + if (is_ipsec) + append_seq_fifo_store(desc, ivsize, FIFOST_TYPE_METADATA | + 0x2 << 25); + + wait_cmd = append_jump(desc, JUMP_JSL | JUMP_TYPE_LOCAL | + JUMP_COND_NOP | JUMP_TEST_ALL); + set_jump_tgt_here(desc, wait_cmd); + + if (encap) { + /* Read and write cryptlen bytes */ + append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ); + append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, + CAAM_CMD_SZ); + aead_append_src_dst(desc, FIFOLD_TYPE_MSG1OUT2); + + /* Write ICV */ + append_seq_store(desc, icvsize, LDST_CLASS_2_CCB | + LDST_SRCDST_BYTE_CONTEXT); + } else { + /* Read and write cryptlen bytes */ + append_math_add(desc, VARSEQINLEN, SEQOUTLEN, REG0, + CAAM_CMD_SZ); + append_math_add(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, + CAAM_CMD_SZ); + aead_append_src_dst(desc, FIFOLD_TYPE_MSG); + + /* Load ICV for verification */ + append_seq_fifo_load(desc, icvsize, FIFOLD_CLASS_CLASS2 | + FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_ICV); + } + + print_hex_dump_debug("chachapoly shdesc@" __stringify(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), + 1); +} +EXPORT_SYMBOL(cnstr_shdsc_chachapoly); + +/* For skcipher encrypt and decrypt, read from req->src and write to req->dst */ +static inline void skcipher_append_src_dst(u32 *desc) +{ + append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ); + append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ); + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | + KEY_VLF | FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1); + append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF); +} + +/** + * cnstr_shdsc_skcipher_encap - skcipher encapsulation shared descriptor + * @desc: pointer to buffer used for descriptor construction + * @cdata: pointer to block cipher transform definitions + * Valid algorithm values - one of OP_ALG_ALGSEL_{AES, DES, 3DES} ANDed + * with OP_ALG_AAI_CBC or OP_ALG_AAI_CTR_MOD128 + * - OP_ALG_ALGSEL_CHACHA20 + * @ivsize: initialization vector size + * @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template + * @ctx1_iv_off: IV offset in CONTEXT1 register + */ +void cnstr_shdsc_skcipher_encap(u32 * const desc, struct alginfo *cdata, + unsigned int ivsize, const bool is_rfc3686, + const u32 ctx1_iv_off) +{ + u32 *key_jump_cmd; + u32 options = cdata->algtype | OP_ALG_AS_INIT | OP_ALG_ENCRYPT; + bool is_chacha20 = ((cdata->algtype & OP_ALG_ALGSEL_MASK) == + OP_ALG_ALGSEL_CHACHA20); + + init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX); + /* Skip if already shared */ + key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | + JUMP_COND_SHRD); + + /* Load class1 key only */ + append_key_as_imm(desc, cdata->key_virt, cdata->keylen, + cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG); + + /* Load nonce into CONTEXT1 reg */ + if (is_rfc3686) { + const u8 *nonce = cdata->key_virt + cdata->keylen; + + append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE, + LDST_CLASS_IND_CCB | + LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM); + append_move(desc, MOVE_WAITCOMP | MOVE_SRC_OUTFIFO | + MOVE_DEST_CLASS1CTX | (16 << MOVE_OFFSET_SHIFT) | + (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT)); + } + + set_jump_tgt_here(desc, key_jump_cmd); + + /* Load IV, if there is one */ + if (ivsize) + append_seq_load(desc, ivsize, LDST_SRCDST_BYTE_CONTEXT | + LDST_CLASS_1_CCB | (ctx1_iv_off << + LDST_OFFSET_SHIFT)); + + /* Load counter into CONTEXT1 reg */ + if (is_rfc3686) + append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB | + LDST_SRCDST_BYTE_CONTEXT | + ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) << + LDST_OFFSET_SHIFT)); + + /* Load operation */ + if (is_chacha20) + options |= OP_ALG_AS_FINALIZE; + append_operation(desc, options); + + /* Perform operation */ + skcipher_append_src_dst(desc); + + /* Store IV */ + if (!is_chacha20 && ivsize) + append_seq_store(desc, ivsize, LDST_SRCDST_BYTE_CONTEXT | + LDST_CLASS_1_CCB | (ctx1_iv_off << + LDST_OFFSET_SHIFT)); + + print_hex_dump_debug("skcipher enc shdesc@" __stringify(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), + 1); +} +EXPORT_SYMBOL(cnstr_shdsc_skcipher_encap); + +/** + * cnstr_shdsc_skcipher_decap - skcipher decapsulation shared descriptor + * @desc: pointer to buffer used for descriptor construction + * @cdata: pointer to block cipher transform definitions + * Valid algorithm values - one of OP_ALG_ALGSEL_{AES, DES, 3DES} ANDed + * with OP_ALG_AAI_CBC or OP_ALG_AAI_CTR_MOD128 + * - OP_ALG_ALGSEL_CHACHA20 + * @ivsize: initialization vector size + * @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template + * @ctx1_iv_off: IV offset in CONTEXT1 register + */ +void cnstr_shdsc_skcipher_decap(u32 * const desc, struct alginfo *cdata, + unsigned int ivsize, const bool is_rfc3686, + const u32 ctx1_iv_off) +{ + u32 *key_jump_cmd; + bool is_chacha20 = ((cdata->algtype & OP_ALG_ALGSEL_MASK) == + OP_ALG_ALGSEL_CHACHA20); + + init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX); + /* Skip if already shared */ + key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | + JUMP_COND_SHRD); + + /* Load class1 key only */ + append_key_as_imm(desc, cdata->key_virt, cdata->keylen, + cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG); + + /* Load nonce into CONTEXT1 reg */ + if (is_rfc3686) { + const u8 *nonce = cdata->key_virt + cdata->keylen; + + append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE, + LDST_CLASS_IND_CCB | + LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM); + append_move(desc, MOVE_WAITCOMP | MOVE_SRC_OUTFIFO | + MOVE_DEST_CLASS1CTX | (16 << MOVE_OFFSET_SHIFT) | + (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT)); + } + + set_jump_tgt_here(desc, key_jump_cmd); + + /* Load IV, if there is one */ + if (ivsize) + append_seq_load(desc, ivsize, LDST_SRCDST_BYTE_CONTEXT | + LDST_CLASS_1_CCB | (ctx1_iv_off << + LDST_OFFSET_SHIFT)); + + /* Load counter into CONTEXT1 reg */ + if (is_rfc3686) + append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB | + LDST_SRCDST_BYTE_CONTEXT | + ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) << + LDST_OFFSET_SHIFT)); + + /* Choose operation */ + if (ctx1_iv_off) + append_operation(desc, cdata->algtype | OP_ALG_AS_INIT | + OP_ALG_DECRYPT); + else + append_dec_op1(desc, cdata->algtype); + + /* Perform operation */ + skcipher_append_src_dst(desc); + + /* Store IV */ + if (!is_chacha20 && ivsize) + append_seq_store(desc, ivsize, LDST_SRCDST_BYTE_CONTEXT | + LDST_CLASS_1_CCB | (ctx1_iv_off << + LDST_OFFSET_SHIFT)); + + print_hex_dump_debug("skcipher dec shdesc@" __stringify(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), + 1); +} +EXPORT_SYMBOL(cnstr_shdsc_skcipher_decap); + +/** + * cnstr_shdsc_xts_skcipher_encap - xts skcipher encapsulation shared descriptor + * @desc: pointer to buffer used for descriptor construction + * @cdata: pointer to block cipher transform definitions + * Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_XTS. + */ +void cnstr_shdsc_xts_skcipher_encap(u32 * const desc, struct alginfo *cdata) +{ + /* + * Set sector size to a big value, practically disabling + * sector size segmentation in xts implementation. We cannot + * take full advantage of this HW feature with existing + * crypto API / dm-crypt SW architecture. + */ + __be64 sector_size = cpu_to_be64(BIT(15)); + u32 *key_jump_cmd; + + init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX); + /* Skip if already shared */ + key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | + JUMP_COND_SHRD); + + /* Load class1 keys only */ + append_key_as_imm(desc, cdata->key_virt, cdata->keylen, + cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG); + + /* Load sector size with index 40 bytes (0x28) */ + append_load_as_imm(desc, (void *)§or_size, 8, LDST_CLASS_1_CCB | + LDST_SRCDST_BYTE_CONTEXT | + (0x28 << LDST_OFFSET_SHIFT)); + + set_jump_tgt_here(desc, key_jump_cmd); + + /* + * create sequence for loading the sector index / 16B tweak value + * Lower 8B of IV - sector index / tweak lower half + * Upper 8B of IV - upper half of 16B tweak + */ + append_seq_load(desc, 8, LDST_SRCDST_BYTE_CONTEXT | LDST_CLASS_1_CCB | + (0x20 << LDST_OFFSET_SHIFT)); + append_seq_load(desc, 8, LDST_SRCDST_BYTE_CONTEXT | LDST_CLASS_1_CCB | + (0x30 << LDST_OFFSET_SHIFT)); + + /* Load operation */ + append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL | + OP_ALG_ENCRYPT); + + /* Perform operation */ + skcipher_append_src_dst(desc); + + /* Store lower 8B and upper 8B of IV */ + append_seq_store(desc, 8, LDST_SRCDST_BYTE_CONTEXT | LDST_CLASS_1_CCB | + (0x20 << LDST_OFFSET_SHIFT)); + append_seq_store(desc, 8, LDST_SRCDST_BYTE_CONTEXT | LDST_CLASS_1_CCB | + (0x30 << LDST_OFFSET_SHIFT)); + + print_hex_dump_debug("xts skcipher enc shdesc@" __stringify(__LINE__) + ": ", DUMP_PREFIX_ADDRESS, 16, 4, + desc, desc_bytes(desc), 1); +} +EXPORT_SYMBOL(cnstr_shdsc_xts_skcipher_encap); + +/** + * cnstr_shdsc_xts_skcipher_decap - xts skcipher decapsulation shared descriptor + * @desc: pointer to buffer used for descriptor construction + * @cdata: pointer to block cipher transform definitions + * Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_XTS. + */ +void cnstr_shdsc_xts_skcipher_decap(u32 * const desc, struct alginfo *cdata) +{ + /* + * Set sector size to a big value, practically disabling + * sector size segmentation in xts implementation. We cannot + * take full advantage of this HW feature with existing + * crypto API / dm-crypt SW architecture. + */ + __be64 sector_size = cpu_to_be64(BIT(15)); + u32 *key_jump_cmd; + + init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX); + /* Skip if already shared */ + key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | + JUMP_COND_SHRD); + + /* Load class1 key only */ + append_key_as_imm(desc, cdata->key_virt, cdata->keylen, + cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG); + + /* Load sector size with index 40 bytes (0x28) */ + append_load_as_imm(desc, (void *)§or_size, 8, LDST_CLASS_1_CCB | + LDST_SRCDST_BYTE_CONTEXT | + (0x28 << LDST_OFFSET_SHIFT)); + + set_jump_tgt_here(desc, key_jump_cmd); + + /* + * create sequence for loading the sector index / 16B tweak value + * Lower 8B of IV - sector index / tweak lower half + * Upper 8B of IV - upper half of 16B tweak + */ + append_seq_load(desc, 8, LDST_SRCDST_BYTE_CONTEXT | LDST_CLASS_1_CCB | + (0x20 << LDST_OFFSET_SHIFT)); + append_seq_load(desc, 8, LDST_SRCDST_BYTE_CONTEXT | LDST_CLASS_1_CCB | + (0x30 << LDST_OFFSET_SHIFT)); + /* Load operation */ + append_dec_op1(desc, cdata->algtype); + + /* Perform operation */ + skcipher_append_src_dst(desc); + + /* Store lower 8B and upper 8B of IV */ + append_seq_store(desc, 8, LDST_SRCDST_BYTE_CONTEXT | LDST_CLASS_1_CCB | + (0x20 << LDST_OFFSET_SHIFT)); + append_seq_store(desc, 8, LDST_SRCDST_BYTE_CONTEXT | LDST_CLASS_1_CCB | + (0x30 << LDST_OFFSET_SHIFT)); + + print_hex_dump_debug("xts skcipher dec shdesc@" __stringify(__LINE__) + ": ", DUMP_PREFIX_ADDRESS, 16, 4, desc, + desc_bytes(desc), 1); +} +EXPORT_SYMBOL(cnstr_shdsc_xts_skcipher_decap); + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("FSL CAAM descriptor support"); +MODULE_AUTHOR("Freescale Semiconductor - NMG/STC"); diff --git a/drivers/crypto/caam/caamalg_desc.h b/drivers/crypto/caam/caamalg_desc.h new file mode 100644 index 000000000..f2893393b --- /dev/null +++ b/drivers/crypto/caam/caamalg_desc.h @@ -0,0 +1,116 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Shared descriptors for aead, skcipher algorithms + * + * Copyright 2016 NXP + */ + +#ifndef _CAAMALG_DESC_H_ +#define _CAAMALG_DESC_H_ + +/* length of descriptors text */ +#define DESC_AEAD_BASE (4 * CAAM_CMD_SZ) +#define DESC_AEAD_ENC_LEN (DESC_AEAD_BASE + 11 * CAAM_CMD_SZ) +#define DESC_AEAD_DEC_LEN (DESC_AEAD_BASE + 15 * CAAM_CMD_SZ) +#define DESC_AEAD_GIVENC_LEN (DESC_AEAD_ENC_LEN + 8 * CAAM_CMD_SZ) +#define DESC_QI_AEAD_ENC_LEN (DESC_AEAD_ENC_LEN + 3 * CAAM_CMD_SZ) +#define DESC_QI_AEAD_DEC_LEN (DESC_AEAD_DEC_LEN + 3 * CAAM_CMD_SZ) +#define DESC_QI_AEAD_GIVENC_LEN (DESC_AEAD_GIVENC_LEN + 3 * CAAM_CMD_SZ) + +/* Note: Nonce is counted in cdata.keylen */ +#define DESC_AEAD_CTR_RFC3686_LEN (4 * CAAM_CMD_SZ) + +#define DESC_AEAD_NULL_BASE (3 * CAAM_CMD_SZ) +#define DESC_AEAD_NULL_ENC_LEN (DESC_AEAD_NULL_BASE + 11 * CAAM_CMD_SZ) +#define DESC_AEAD_NULL_DEC_LEN (DESC_AEAD_NULL_BASE + 13 * CAAM_CMD_SZ) + +#define DESC_GCM_BASE (3 * CAAM_CMD_SZ) +#define DESC_GCM_ENC_LEN (DESC_GCM_BASE + 16 * CAAM_CMD_SZ) +#define DESC_GCM_DEC_LEN (DESC_GCM_BASE + 12 * CAAM_CMD_SZ) +#define DESC_QI_GCM_ENC_LEN (DESC_GCM_ENC_LEN + 6 * CAAM_CMD_SZ) +#define DESC_QI_GCM_DEC_LEN (DESC_GCM_DEC_LEN + 3 * CAAM_CMD_SZ) + +#define DESC_RFC4106_BASE (3 * CAAM_CMD_SZ) +#define DESC_RFC4106_ENC_LEN (DESC_RFC4106_BASE + 16 * CAAM_CMD_SZ) +#define DESC_RFC4106_DEC_LEN (DESC_RFC4106_BASE + 13 * CAAM_CMD_SZ) +#define DESC_QI_RFC4106_ENC_LEN (DESC_RFC4106_ENC_LEN + 5 * CAAM_CMD_SZ) +#define DESC_QI_RFC4106_DEC_LEN (DESC_RFC4106_DEC_LEN + 5 * CAAM_CMD_SZ) + +#define DESC_RFC4543_BASE (3 * CAAM_CMD_SZ) +#define DESC_RFC4543_ENC_LEN (DESC_RFC4543_BASE + 11 * CAAM_CMD_SZ) +#define DESC_RFC4543_DEC_LEN (DESC_RFC4543_BASE + 12 * CAAM_CMD_SZ) +#define DESC_QI_RFC4543_ENC_LEN (DESC_RFC4543_ENC_LEN + 4 * CAAM_CMD_SZ) +#define DESC_QI_RFC4543_DEC_LEN (DESC_RFC4543_DEC_LEN + 4 * CAAM_CMD_SZ) + +#define DESC_SKCIPHER_BASE (3 * CAAM_CMD_SZ) +#define DESC_SKCIPHER_ENC_LEN (DESC_SKCIPHER_BASE + \ + 21 * CAAM_CMD_SZ) +#define DESC_SKCIPHER_DEC_LEN (DESC_SKCIPHER_BASE + \ + 16 * CAAM_CMD_SZ) + +void cnstr_shdsc_aead_null_encap(u32 * const desc, struct alginfo *adata, + unsigned int icvsize, int era); + +void cnstr_shdsc_aead_null_decap(u32 * const desc, struct alginfo *adata, + unsigned int icvsize, int era); + +void cnstr_shdsc_aead_encap(u32 * const desc, struct alginfo *cdata, + struct alginfo *adata, unsigned int ivsize, + unsigned int icvsize, const bool is_rfc3686, + u32 *nonce, const u32 ctx1_iv_off, + const bool is_qi, int era); + +void cnstr_shdsc_aead_decap(u32 * const desc, struct alginfo *cdata, + struct alginfo *adata, unsigned int ivsize, + unsigned int icvsize, const bool geniv, + const bool is_rfc3686, u32 *nonce, + const u32 ctx1_iv_off, const bool is_qi, int era); + +void cnstr_shdsc_aead_givencap(u32 * const desc, struct alginfo *cdata, + struct alginfo *adata, unsigned int ivsize, + unsigned int icvsize, const bool is_rfc3686, + u32 *nonce, const u32 ctx1_iv_off, + const bool is_qi, int era); + +void cnstr_shdsc_gcm_encap(u32 * const desc, struct alginfo *cdata, + unsigned int ivsize, unsigned int icvsize, + const bool is_qi); + +void cnstr_shdsc_gcm_decap(u32 * const desc, struct alginfo *cdata, + unsigned int ivsize, unsigned int icvsize, + const bool is_qi); + +void cnstr_shdsc_rfc4106_encap(u32 * const desc, struct alginfo *cdata, + unsigned int ivsize, unsigned int icvsize, + const bool is_qi); + +void cnstr_shdsc_rfc4106_decap(u32 * const desc, struct alginfo *cdata, + unsigned int ivsize, unsigned int icvsize, + const bool is_qi); + +void cnstr_shdsc_rfc4543_encap(u32 * const desc, struct alginfo *cdata, + unsigned int ivsize, unsigned int icvsize, + const bool is_qi); + +void cnstr_shdsc_rfc4543_decap(u32 * const desc, struct alginfo *cdata, + unsigned int ivsize, unsigned int icvsize, + const bool is_qi); + +void cnstr_shdsc_chachapoly(u32 * const desc, struct alginfo *cdata, + struct alginfo *adata, unsigned int ivsize, + unsigned int icvsize, const bool encap, + const bool is_qi); + +void cnstr_shdsc_skcipher_encap(u32 * const desc, struct alginfo *cdata, + unsigned int ivsize, const bool is_rfc3686, + const u32 ctx1_iv_off); + +void cnstr_shdsc_skcipher_decap(u32 * const desc, struct alginfo *cdata, + unsigned int ivsize, const bool is_rfc3686, + const u32 ctx1_iv_off); + +void cnstr_shdsc_xts_skcipher_encap(u32 * const desc, struct alginfo *cdata); + +void cnstr_shdsc_xts_skcipher_decap(u32 * const desc, struct alginfo *cdata); + +#endif /* _CAAMALG_DESC_H_ */ diff --git a/drivers/crypto/caam/caamalg_qi.c b/drivers/crypto/caam/caamalg_qi.c new file mode 100644 index 000000000..a24ae966d --- /dev/null +++ b/drivers/crypto/caam/caamalg_qi.c @@ -0,0 +1,2727 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Freescale FSL CAAM support for crypto API over QI backend. + * Based on caamalg.c + * + * Copyright 2013-2016 Freescale Semiconductor, Inc. + * Copyright 2016-2019 NXP + */ + +#include "compat.h" +#include "ctrl.h" +#include "regs.h" +#include "intern.h" +#include "desc_constr.h" +#include "error.h" +#include "sg_sw_qm.h" +#include "key_gen.h" +#include "qi.h" +#include "jr.h" +#include "caamalg_desc.h" +#include <crypto/xts.h> +#include <asm/unaligned.h> + +/* + * crypto alg + */ +#define CAAM_CRA_PRIORITY 2000 +/* max key is sum of AES_MAX_KEY_SIZE, max split key size */ +#define CAAM_MAX_KEY_SIZE (AES_MAX_KEY_SIZE + \ + SHA512_DIGEST_SIZE * 2) + +#define DESC_MAX_USED_BYTES (DESC_QI_AEAD_GIVENC_LEN + \ + CAAM_MAX_KEY_SIZE) +#define DESC_MAX_USED_LEN (DESC_MAX_USED_BYTES / CAAM_CMD_SZ) + +struct caam_alg_entry { + int class1_alg_type; + int class2_alg_type; + bool rfc3686; + bool geniv; + bool nodkp; +}; + +struct caam_aead_alg { + struct aead_alg aead; + struct caam_alg_entry caam; + bool registered; +}; + +struct caam_skcipher_alg { + struct skcipher_alg skcipher; + struct caam_alg_entry caam; + bool registered; +}; + +/* + * per-session context + */ +struct caam_ctx { + struct device *jrdev; + u32 sh_desc_enc[DESC_MAX_USED_LEN]; + u32 sh_desc_dec[DESC_MAX_USED_LEN]; + u8 key[CAAM_MAX_KEY_SIZE]; + dma_addr_t key_dma; + enum dma_data_direction dir; + struct alginfo adata; + struct alginfo cdata; + unsigned int authsize; + struct device *qidev; + spinlock_t lock; /* Protects multiple init of driver context */ + struct caam_drv_ctx *drv_ctx[NUM_OP]; + bool xts_key_fallback; + struct crypto_skcipher *fallback; +}; + +struct caam_skcipher_req_ctx { + struct skcipher_request fallback_req; +}; + +static int aead_set_sh_desc(struct crypto_aead *aead) +{ + struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead), + typeof(*alg), aead); + struct caam_ctx *ctx = crypto_aead_ctx(aead); + unsigned int ivsize = crypto_aead_ivsize(aead); + u32 ctx1_iv_off = 0; + u32 *nonce = NULL; + unsigned int data_len[2]; + u32 inl_mask; + const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) == + OP_ALG_AAI_CTR_MOD128); + const bool is_rfc3686 = alg->caam.rfc3686; + struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctx->jrdev->parent); + + if (!ctx->cdata.keylen || !ctx->authsize) + return 0; + + /* + * AES-CTR needs to load IV in CONTEXT1 reg + * at an offset of 128bits (16bytes) + * CONTEXT1[255:128] = IV + */ + if (ctr_mode) + ctx1_iv_off = 16; + + /* + * RFC3686 specific: + * CONTEXT1[255:128] = {NONCE, IV, COUNTER} + */ + if (is_rfc3686) { + ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE; + nonce = (u32 *)((void *)ctx->key + ctx->adata.keylen_pad + + ctx->cdata.keylen - CTR_RFC3686_NONCE_SIZE); + } + + /* + * In case |user key| > |derived key|, using DKP<imm,imm> would result + * in invalid opcodes (last bytes of user key) in the resulting + * descriptor. Use DKP<ptr,imm> instead => both virtual and dma key + * addresses are needed. + */ + ctx->adata.key_virt = ctx->key; + ctx->adata.key_dma = ctx->key_dma; + + ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad; + ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad; + + data_len[0] = ctx->adata.keylen_pad; + data_len[1] = ctx->cdata.keylen; + + if (alg->caam.geniv) + goto skip_enc; + + /* aead_encrypt shared descriptor */ + if (desc_inline_query(DESC_QI_AEAD_ENC_LEN + + (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0), + DESC_JOB_IO_LEN, data_len, &inl_mask, + ARRAY_SIZE(data_len)) < 0) + return -EINVAL; + + ctx->adata.key_inline = !!(inl_mask & 1); + ctx->cdata.key_inline = !!(inl_mask & 2); + + cnstr_shdsc_aead_encap(ctx->sh_desc_enc, &ctx->cdata, &ctx->adata, + ivsize, ctx->authsize, is_rfc3686, nonce, + ctx1_iv_off, true, ctrlpriv->era); + +skip_enc: + /* aead_decrypt shared descriptor */ + if (desc_inline_query(DESC_QI_AEAD_DEC_LEN + + (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0), + DESC_JOB_IO_LEN, data_len, &inl_mask, + ARRAY_SIZE(data_len)) < 0) + return -EINVAL; + + ctx->adata.key_inline = !!(inl_mask & 1); + ctx->cdata.key_inline = !!(inl_mask & 2); + + cnstr_shdsc_aead_decap(ctx->sh_desc_dec, &ctx->cdata, &ctx->adata, + ivsize, ctx->authsize, alg->caam.geniv, + is_rfc3686, nonce, ctx1_iv_off, true, + ctrlpriv->era); + + if (!alg->caam.geniv) + goto skip_givenc; + + /* aead_givencrypt shared descriptor */ + if (desc_inline_query(DESC_QI_AEAD_GIVENC_LEN + + (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0), + DESC_JOB_IO_LEN, data_len, &inl_mask, + ARRAY_SIZE(data_len)) < 0) + return -EINVAL; + + ctx->adata.key_inline = !!(inl_mask & 1); + ctx->cdata.key_inline = !!(inl_mask & 2); + + cnstr_shdsc_aead_givencap(ctx->sh_desc_enc, &ctx->cdata, &ctx->adata, + ivsize, ctx->authsize, is_rfc3686, nonce, + ctx1_iv_off, true, ctrlpriv->era); + +skip_givenc: + return 0; +} + +static int aead_setauthsize(struct crypto_aead *authenc, unsigned int authsize) +{ + struct caam_ctx *ctx = crypto_aead_ctx(authenc); + + ctx->authsize = authsize; + aead_set_sh_desc(authenc); + + return 0; +} + +static int aead_setkey(struct crypto_aead *aead, const u8 *key, + unsigned int keylen) +{ + struct caam_ctx *ctx = crypto_aead_ctx(aead); + struct device *jrdev = ctx->jrdev; + struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent); + struct crypto_authenc_keys keys; + int ret = 0; + + if (crypto_authenc_extractkeys(&keys, key, keylen) != 0) + goto badkey; + + dev_dbg(jrdev, "keylen %d enckeylen %d authkeylen %d\n", + keys.authkeylen + keys.enckeylen, keys.enckeylen, + keys.authkeylen); + print_hex_dump_debug("key in @" __stringify(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); + + /* + * If DKP is supported, use it in the shared descriptor to generate + * the split key. + */ + if (ctrlpriv->era >= 6) { + ctx->adata.keylen = keys.authkeylen; + ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype & + OP_ALG_ALGSEL_MASK); + + if (ctx->adata.keylen_pad + keys.enckeylen > CAAM_MAX_KEY_SIZE) + goto badkey; + + memcpy(ctx->key, keys.authkey, keys.authkeylen); + memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, + keys.enckeylen); + dma_sync_single_for_device(jrdev->parent, ctx->key_dma, + ctx->adata.keylen_pad + + keys.enckeylen, ctx->dir); + goto skip_split_key; + } + + ret = gen_split_key(jrdev, ctx->key, &ctx->adata, keys.authkey, + keys.authkeylen, CAAM_MAX_KEY_SIZE - + keys.enckeylen); + if (ret) + goto badkey; + + /* postpend encryption key to auth split key */ + memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen); + dma_sync_single_for_device(jrdev->parent, ctx->key_dma, + ctx->adata.keylen_pad + keys.enckeylen, + ctx->dir); + + print_hex_dump_debug("ctx.key@" __stringify(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, ctx->key, + ctx->adata.keylen_pad + keys.enckeylen, 1); + +skip_split_key: + ctx->cdata.keylen = keys.enckeylen; + + ret = aead_set_sh_desc(aead); + if (ret) + goto badkey; + + /* Now update the driver contexts with the new shared descriptor */ + if (ctx->drv_ctx[ENCRYPT]) { + ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT], + ctx->sh_desc_enc); + if (ret) { + dev_err(jrdev, "driver enc context update failed\n"); + goto badkey; + } + } + + if (ctx->drv_ctx[DECRYPT]) { + ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT], + ctx->sh_desc_dec); + if (ret) { + dev_err(jrdev, "driver dec context update failed\n"); + goto badkey; + } + } + + memzero_explicit(&keys, sizeof(keys)); + return ret; +badkey: + memzero_explicit(&keys, sizeof(keys)); + return -EINVAL; +} + +static int des3_aead_setkey(struct crypto_aead *aead, const u8 *key, + unsigned int keylen) +{ + struct crypto_authenc_keys keys; + int err; + + err = crypto_authenc_extractkeys(&keys, key, keylen); + if (unlikely(err)) + return err; + + err = verify_aead_des3_key(aead, keys.enckey, keys.enckeylen) ?: + aead_setkey(aead, key, keylen); + + memzero_explicit(&keys, sizeof(keys)); + return err; +} + +static int gcm_set_sh_desc(struct crypto_aead *aead) +{ + struct caam_ctx *ctx = crypto_aead_ctx(aead); + unsigned int ivsize = crypto_aead_ivsize(aead); + int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN - + ctx->cdata.keylen; + + if (!ctx->cdata.keylen || !ctx->authsize) + return 0; + + /* + * Job Descriptor and Shared Descriptor + * must fit into the 64-word Descriptor h/w Buffer + */ + if (rem_bytes >= DESC_QI_GCM_ENC_LEN) { + ctx->cdata.key_inline = true; + ctx->cdata.key_virt = ctx->key; + } else { + ctx->cdata.key_inline = false; + ctx->cdata.key_dma = ctx->key_dma; + } + + cnstr_shdsc_gcm_encap(ctx->sh_desc_enc, &ctx->cdata, ivsize, + ctx->authsize, true); + + /* + * Job Descriptor and Shared Descriptor + * must fit into the 64-word Descriptor h/w Buffer + */ + if (rem_bytes >= DESC_QI_GCM_DEC_LEN) { + ctx->cdata.key_inline = true; + ctx->cdata.key_virt = ctx->key; + } else { + ctx->cdata.key_inline = false; + ctx->cdata.key_dma = ctx->key_dma; + } + + cnstr_shdsc_gcm_decap(ctx->sh_desc_dec, &ctx->cdata, ivsize, + ctx->authsize, true); + + return 0; +} + +static int gcm_setauthsize(struct crypto_aead *authenc, unsigned int authsize) +{ + struct caam_ctx *ctx = crypto_aead_ctx(authenc); + int err; + + err = crypto_gcm_check_authsize(authsize); + if (err) + return err; + + ctx->authsize = authsize; + gcm_set_sh_desc(authenc); + + return 0; +} + +static int gcm_setkey(struct crypto_aead *aead, + const u8 *key, unsigned int keylen) +{ + struct caam_ctx *ctx = crypto_aead_ctx(aead); + struct device *jrdev = ctx->jrdev; + int ret; + + ret = aes_check_keylen(keylen); + if (ret) + return ret; + + print_hex_dump_debug("key in @" __stringify(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); + + memcpy(ctx->key, key, keylen); + dma_sync_single_for_device(jrdev->parent, ctx->key_dma, keylen, + ctx->dir); + ctx->cdata.keylen = keylen; + + ret = gcm_set_sh_desc(aead); + if (ret) + return ret; + + /* Now update the driver contexts with the new shared descriptor */ + if (ctx->drv_ctx[ENCRYPT]) { + ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT], + ctx->sh_desc_enc); + if (ret) { + dev_err(jrdev, "driver enc context update failed\n"); + return ret; + } + } + + if (ctx->drv_ctx[DECRYPT]) { + ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT], + ctx->sh_desc_dec); + if (ret) { + dev_err(jrdev, "driver dec context update failed\n"); + return ret; + } + } + + return 0; +} + +static int rfc4106_set_sh_desc(struct crypto_aead *aead) +{ + struct caam_ctx *ctx = crypto_aead_ctx(aead); + unsigned int ivsize = crypto_aead_ivsize(aead); + int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN - + ctx->cdata.keylen; + + if (!ctx->cdata.keylen || !ctx->authsize) + return 0; + + ctx->cdata.key_virt = ctx->key; + + /* + * Job Descriptor and Shared Descriptor + * must fit into the 64-word Descriptor h/w Buffer + */ + if (rem_bytes >= DESC_QI_RFC4106_ENC_LEN) { + ctx->cdata.key_inline = true; + } else { + ctx->cdata.key_inline = false; + ctx->cdata.key_dma = ctx->key_dma; + } + + cnstr_shdsc_rfc4106_encap(ctx->sh_desc_enc, &ctx->cdata, ivsize, + ctx->authsize, true); + + /* + * Job Descriptor and Shared Descriptor + * must fit into the 64-word Descriptor h/w Buffer + */ + if (rem_bytes >= DESC_QI_RFC4106_DEC_LEN) { + ctx->cdata.key_inline = true; + } else { + ctx->cdata.key_inline = false; + ctx->cdata.key_dma = ctx->key_dma; + } + + cnstr_shdsc_rfc4106_decap(ctx->sh_desc_dec, &ctx->cdata, ivsize, + ctx->authsize, true); + + return 0; +} + +static int rfc4106_setauthsize(struct crypto_aead *authenc, + unsigned int authsize) +{ + struct caam_ctx *ctx = crypto_aead_ctx(authenc); + int err; + + err = crypto_rfc4106_check_authsize(authsize); + if (err) + return err; + + ctx->authsize = authsize; + rfc4106_set_sh_desc(authenc); + + return 0; +} + +static int rfc4106_setkey(struct crypto_aead *aead, + const u8 *key, unsigned int keylen) +{ + struct caam_ctx *ctx = crypto_aead_ctx(aead); + struct device *jrdev = ctx->jrdev; + int ret; + + ret = aes_check_keylen(keylen - 4); + if (ret) + return ret; + + print_hex_dump_debug("key in @" __stringify(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); + + memcpy(ctx->key, key, keylen); + /* + * The last four bytes of the key material are used as the salt value + * in the nonce. Update the AES key length. + */ + ctx->cdata.keylen = keylen - 4; + dma_sync_single_for_device(jrdev->parent, ctx->key_dma, + ctx->cdata.keylen, ctx->dir); + + ret = rfc4106_set_sh_desc(aead); + if (ret) + return ret; + + /* Now update the driver contexts with the new shared descriptor */ + if (ctx->drv_ctx[ENCRYPT]) { + ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT], + ctx->sh_desc_enc); + if (ret) { + dev_err(jrdev, "driver enc context update failed\n"); + return ret; + } + } + + if (ctx->drv_ctx[DECRYPT]) { + ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT], + ctx->sh_desc_dec); + if (ret) { + dev_err(jrdev, "driver dec context update failed\n"); + return ret; + } + } + + return 0; +} + +static int rfc4543_set_sh_desc(struct crypto_aead *aead) +{ + struct caam_ctx *ctx = crypto_aead_ctx(aead); + unsigned int ivsize = crypto_aead_ivsize(aead); + int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN - + ctx->cdata.keylen; + + if (!ctx->cdata.keylen || !ctx->authsize) + return 0; + + ctx->cdata.key_virt = ctx->key; + + /* + * Job Descriptor and Shared Descriptor + * must fit into the 64-word Descriptor h/w Buffer + */ + if (rem_bytes >= DESC_QI_RFC4543_ENC_LEN) { + ctx->cdata.key_inline = true; + } else { + ctx->cdata.key_inline = false; + ctx->cdata.key_dma = ctx->key_dma; + } + + cnstr_shdsc_rfc4543_encap(ctx->sh_desc_enc, &ctx->cdata, ivsize, + ctx->authsize, true); + + /* + * Job Descriptor and Shared Descriptor + * must fit into the 64-word Descriptor h/w Buffer + */ + if (rem_bytes >= DESC_QI_RFC4543_DEC_LEN) { + ctx->cdata.key_inline = true; + } else { + ctx->cdata.key_inline = false; + ctx->cdata.key_dma = ctx->key_dma; + } + + cnstr_shdsc_rfc4543_decap(ctx->sh_desc_dec, &ctx->cdata, ivsize, + ctx->authsize, true); + + return 0; +} + +static int rfc4543_setauthsize(struct crypto_aead *authenc, + unsigned int authsize) +{ + struct caam_ctx *ctx = crypto_aead_ctx(authenc); + + if (authsize != 16) + return -EINVAL; + + ctx->authsize = authsize; + rfc4543_set_sh_desc(authenc); + + return 0; +} + +static int rfc4543_setkey(struct crypto_aead *aead, + const u8 *key, unsigned int keylen) +{ + struct caam_ctx *ctx = crypto_aead_ctx(aead); + struct device *jrdev = ctx->jrdev; + int ret; + + ret = aes_check_keylen(keylen - 4); + if (ret) + return ret; + + print_hex_dump_debug("key in @" __stringify(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); + + memcpy(ctx->key, key, keylen); + /* + * The last four bytes of the key material are used as the salt value + * in the nonce. Update the AES key length. + */ + ctx->cdata.keylen = keylen - 4; + dma_sync_single_for_device(jrdev->parent, ctx->key_dma, + ctx->cdata.keylen, ctx->dir); + + ret = rfc4543_set_sh_desc(aead); + if (ret) + return ret; + + /* Now update the driver contexts with the new shared descriptor */ + if (ctx->drv_ctx[ENCRYPT]) { + ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT], + ctx->sh_desc_enc); + if (ret) { + dev_err(jrdev, "driver enc context update failed\n"); + return ret; + } + } + + if (ctx->drv_ctx[DECRYPT]) { + ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT], + ctx->sh_desc_dec); + if (ret) { + dev_err(jrdev, "driver dec context update failed\n"); + return ret; + } + } + + return 0; +} + +static int skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key, + unsigned int keylen, const u32 ctx1_iv_off) +{ + struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher); + struct caam_skcipher_alg *alg = + container_of(crypto_skcipher_alg(skcipher), typeof(*alg), + skcipher); + struct device *jrdev = ctx->jrdev; + unsigned int ivsize = crypto_skcipher_ivsize(skcipher); + const bool is_rfc3686 = alg->caam.rfc3686; + int ret = 0; + + print_hex_dump_debug("key in @" __stringify(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); + + ctx->cdata.keylen = keylen; + ctx->cdata.key_virt = key; + ctx->cdata.key_inline = true; + + /* skcipher encrypt, decrypt shared descriptors */ + cnstr_shdsc_skcipher_encap(ctx->sh_desc_enc, &ctx->cdata, ivsize, + is_rfc3686, ctx1_iv_off); + cnstr_shdsc_skcipher_decap(ctx->sh_desc_dec, &ctx->cdata, ivsize, + is_rfc3686, ctx1_iv_off); + + /* Now update the driver contexts with the new shared descriptor */ + if (ctx->drv_ctx[ENCRYPT]) { + ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT], + ctx->sh_desc_enc); + if (ret) { + dev_err(jrdev, "driver enc context update failed\n"); + return -EINVAL; + } + } + + if (ctx->drv_ctx[DECRYPT]) { + ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT], + ctx->sh_desc_dec); + if (ret) { + dev_err(jrdev, "driver dec context update failed\n"); + return -EINVAL; + } + } + + return ret; +} + +static int aes_skcipher_setkey(struct crypto_skcipher *skcipher, + const u8 *key, unsigned int keylen) +{ + int err; + + err = aes_check_keylen(keylen); + if (err) + return err; + + return skcipher_setkey(skcipher, key, keylen, 0); +} + +static int rfc3686_skcipher_setkey(struct crypto_skcipher *skcipher, + const u8 *key, unsigned int keylen) +{ + u32 ctx1_iv_off; + int err; + + /* + * RFC3686 specific: + * | CONTEXT1[255:128] = {NONCE, IV, COUNTER} + * | *key = {KEY, NONCE} + */ + ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE; + keylen -= CTR_RFC3686_NONCE_SIZE; + + err = aes_check_keylen(keylen); + if (err) + return err; + + return skcipher_setkey(skcipher, key, keylen, ctx1_iv_off); +} + +static int ctr_skcipher_setkey(struct crypto_skcipher *skcipher, + const u8 *key, unsigned int keylen) +{ + u32 ctx1_iv_off; + int err; + + /* + * AES-CTR needs to load IV in CONTEXT1 reg + * at an offset of 128bits (16bytes) + * CONTEXT1[255:128] = IV + */ + ctx1_iv_off = 16; + + err = aes_check_keylen(keylen); + if (err) + return err; + + return skcipher_setkey(skcipher, key, keylen, ctx1_iv_off); +} + +static int des3_skcipher_setkey(struct crypto_skcipher *skcipher, + const u8 *key, unsigned int keylen) +{ + return verify_skcipher_des3_key(skcipher, key) ?: + skcipher_setkey(skcipher, key, keylen, 0); +} + +static int des_skcipher_setkey(struct crypto_skcipher *skcipher, + const u8 *key, unsigned int keylen) +{ + return verify_skcipher_des_key(skcipher, key) ?: + skcipher_setkey(skcipher, key, keylen, 0); +} + +static int xts_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key, + unsigned int keylen) +{ + struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher); + struct device *jrdev = ctx->jrdev; + struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent); + int ret = 0; + int err; + + err = xts_verify_key(skcipher, key, keylen); + if (err) { + dev_dbg(jrdev, "key size mismatch\n"); + return err; + } + + if (keylen != 2 * AES_KEYSIZE_128 && keylen != 2 * AES_KEYSIZE_256) + ctx->xts_key_fallback = true; + + if (ctrlpriv->era <= 8 || ctx->xts_key_fallback) { + err = crypto_skcipher_setkey(ctx->fallback, key, keylen); + if (err) + return err; + } + + ctx->cdata.keylen = keylen; + ctx->cdata.key_virt = key; + ctx->cdata.key_inline = true; + + /* xts skcipher encrypt, decrypt shared descriptors */ + cnstr_shdsc_xts_skcipher_encap(ctx->sh_desc_enc, &ctx->cdata); + cnstr_shdsc_xts_skcipher_decap(ctx->sh_desc_dec, &ctx->cdata); + + /* Now update the driver contexts with the new shared descriptor */ + if (ctx->drv_ctx[ENCRYPT]) { + ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT], + ctx->sh_desc_enc); + if (ret) { + dev_err(jrdev, "driver enc context update failed\n"); + return -EINVAL; + } + } + + if (ctx->drv_ctx[DECRYPT]) { + ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT], + ctx->sh_desc_dec); + if (ret) { + dev_err(jrdev, "driver dec context update failed\n"); + return -EINVAL; + } + } + + return ret; +} + +/* + * aead_edesc - s/w-extended aead descriptor + * @src_nents: number of segments in input scatterlist + * @dst_nents: number of segments in output scatterlist + * @iv_dma: dma address of iv for checking continuity and link table + * @qm_sg_bytes: length of dma mapped h/w link table + * @qm_sg_dma: bus physical mapped address of h/w link table + * @assoclen: associated data length, in CAAM endianness + * @assoclen_dma: bus physical mapped address of req->assoclen + * @drv_req: driver-specific request structure + * @sgt: the h/w link table, followed by IV + */ +struct aead_edesc { + int src_nents; + int dst_nents; + dma_addr_t iv_dma; + int qm_sg_bytes; + dma_addr_t qm_sg_dma; + unsigned int assoclen; + dma_addr_t assoclen_dma; + struct caam_drv_req drv_req; + struct qm_sg_entry sgt[]; +}; + +/* + * skcipher_edesc - s/w-extended skcipher descriptor + * @src_nents: number of segments in input scatterlist + * @dst_nents: number of segments in output scatterlist + * @iv_dma: dma address of iv for checking continuity and link table + * @qm_sg_bytes: length of dma mapped h/w link table + * @qm_sg_dma: bus physical mapped address of h/w link table + * @drv_req: driver-specific request structure + * @sgt: the h/w link table, followed by IV + */ +struct skcipher_edesc { + int src_nents; + int dst_nents; + dma_addr_t iv_dma; + int qm_sg_bytes; + dma_addr_t qm_sg_dma; + struct caam_drv_req drv_req; + struct qm_sg_entry sgt[]; +}; + +static struct caam_drv_ctx *get_drv_ctx(struct caam_ctx *ctx, + enum optype type) +{ + /* + * This function is called on the fast path with values of 'type' + * known at compile time. Invalid arguments are not expected and + * thus no checks are made. + */ + struct caam_drv_ctx *drv_ctx = ctx->drv_ctx[type]; + u32 *desc; + + if (unlikely(!drv_ctx)) { + spin_lock(&ctx->lock); + + /* Read again to check if some other core init drv_ctx */ + drv_ctx = ctx->drv_ctx[type]; + if (!drv_ctx) { + int cpu; + + if (type == ENCRYPT) + desc = ctx->sh_desc_enc; + else /* (type == DECRYPT) */ + desc = ctx->sh_desc_dec; + + cpu = smp_processor_id(); + drv_ctx = caam_drv_ctx_init(ctx->qidev, &cpu, desc); + if (!IS_ERR_OR_NULL(drv_ctx)) + drv_ctx->op_type = type; + + ctx->drv_ctx[type] = drv_ctx; + } + + spin_unlock(&ctx->lock); + } + + return drv_ctx; +} + +static void caam_unmap(struct device *dev, struct scatterlist *src, + struct scatterlist *dst, int src_nents, + int dst_nents, dma_addr_t iv_dma, int ivsize, + enum dma_data_direction iv_dir, dma_addr_t qm_sg_dma, + int qm_sg_bytes) +{ + if (dst != src) { + if (src_nents) + dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE); + if (dst_nents) + dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE); + } else { + dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL); + } + + if (iv_dma) + dma_unmap_single(dev, iv_dma, ivsize, iv_dir); + if (qm_sg_bytes) + dma_unmap_single(dev, qm_sg_dma, qm_sg_bytes, DMA_TO_DEVICE); +} + +static void aead_unmap(struct device *dev, + struct aead_edesc *edesc, + struct aead_request *req) +{ + struct crypto_aead *aead = crypto_aead_reqtfm(req); + int ivsize = crypto_aead_ivsize(aead); + + caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents, + edesc->iv_dma, ivsize, DMA_TO_DEVICE, edesc->qm_sg_dma, + edesc->qm_sg_bytes); + dma_unmap_single(dev, edesc->assoclen_dma, 4, DMA_TO_DEVICE); +} + +static void skcipher_unmap(struct device *dev, struct skcipher_edesc *edesc, + struct skcipher_request *req) +{ + struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req); + int ivsize = crypto_skcipher_ivsize(skcipher); + + caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents, + edesc->iv_dma, ivsize, DMA_BIDIRECTIONAL, edesc->qm_sg_dma, + edesc->qm_sg_bytes); +} + +static void aead_done(struct caam_drv_req *drv_req, u32 status) +{ + struct device *qidev; + struct aead_edesc *edesc; + struct aead_request *aead_req = drv_req->app_ctx; + struct crypto_aead *aead = crypto_aead_reqtfm(aead_req); + struct caam_ctx *caam_ctx = crypto_aead_ctx(aead); + int ecode = 0; + + qidev = caam_ctx->qidev; + + if (unlikely(status)) + ecode = caam_jr_strstatus(qidev, status); + + edesc = container_of(drv_req, typeof(*edesc), drv_req); + aead_unmap(qidev, edesc, aead_req); + + aead_request_complete(aead_req, ecode); + qi_cache_free(edesc); +} + +/* + * allocate and map the aead extended descriptor + */ +static struct aead_edesc *aead_edesc_alloc(struct aead_request *req, + bool encrypt) +{ + struct crypto_aead *aead = crypto_aead_reqtfm(req); + struct caam_ctx *ctx = crypto_aead_ctx(aead); + struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead), + typeof(*alg), aead); + struct device *qidev = ctx->qidev; + gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? + GFP_KERNEL : GFP_ATOMIC; + int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0; + int src_len, dst_len = 0; + struct aead_edesc *edesc; + dma_addr_t qm_sg_dma, iv_dma = 0; + int ivsize = 0; + unsigned int authsize = ctx->authsize; + int qm_sg_index = 0, qm_sg_ents = 0, qm_sg_bytes; + int in_len, out_len; + struct qm_sg_entry *sg_table, *fd_sgt; + struct caam_drv_ctx *drv_ctx; + + drv_ctx = get_drv_ctx(ctx, encrypt ? ENCRYPT : DECRYPT); + if (IS_ERR_OR_NULL(drv_ctx)) + return (struct aead_edesc *)drv_ctx; + + /* allocate space for base edesc and hw desc commands, link tables */ + edesc = qi_cache_alloc(GFP_DMA | flags); + if (unlikely(!edesc)) { + dev_err(qidev, "could not allocate extended descriptor\n"); + return ERR_PTR(-ENOMEM); + } + + if (likely(req->src == req->dst)) { + src_len = req->assoclen + req->cryptlen + + (encrypt ? authsize : 0); + + src_nents = sg_nents_for_len(req->src, src_len); + if (unlikely(src_nents < 0)) { + dev_err(qidev, "Insufficient bytes (%d) in src S/G\n", + src_len); + qi_cache_free(edesc); + return ERR_PTR(src_nents); + } + + mapped_src_nents = dma_map_sg(qidev, req->src, src_nents, + DMA_BIDIRECTIONAL); + if (unlikely(!mapped_src_nents)) { + dev_err(qidev, "unable to map source\n"); + qi_cache_free(edesc); + return ERR_PTR(-ENOMEM); + } + } else { + src_len = req->assoclen + req->cryptlen; + dst_len = src_len + (encrypt ? authsize : (-authsize)); + + src_nents = sg_nents_for_len(req->src, src_len); + if (unlikely(src_nents < 0)) { + dev_err(qidev, "Insufficient bytes (%d) in src S/G\n", + src_len); + qi_cache_free(edesc); + return ERR_PTR(src_nents); + } + + dst_nents = sg_nents_for_len(req->dst, dst_len); + if (unlikely(dst_nents < 0)) { + dev_err(qidev, "Insufficient bytes (%d) in dst S/G\n", + dst_len); + qi_cache_free(edesc); + return ERR_PTR(dst_nents); + } + + if (src_nents) { + mapped_src_nents = dma_map_sg(qidev, req->src, + src_nents, DMA_TO_DEVICE); + if (unlikely(!mapped_src_nents)) { + dev_err(qidev, "unable to map source\n"); + qi_cache_free(edesc); + return ERR_PTR(-ENOMEM); + } + } else { + mapped_src_nents = 0; + } + + if (dst_nents) { + mapped_dst_nents = dma_map_sg(qidev, req->dst, + dst_nents, + DMA_FROM_DEVICE); + if (unlikely(!mapped_dst_nents)) { + dev_err(qidev, "unable to map destination\n"); + dma_unmap_sg(qidev, req->src, src_nents, + DMA_TO_DEVICE); + qi_cache_free(edesc); + return ERR_PTR(-ENOMEM); + } + } else { + mapped_dst_nents = 0; + } + } + + if ((alg->caam.rfc3686 && encrypt) || !alg->caam.geniv) + ivsize = crypto_aead_ivsize(aead); + + /* + * Create S/G table: req->assoclen, [IV,] req->src [, req->dst]. + * Input is not contiguous. + * HW reads 4 S/G entries at a time; make sure the reads don't go beyond + * the end of the table by allocating more S/G entries. Logic: + * if (src != dst && output S/G) + * pad output S/G, if needed + * else if (src == dst && S/G) + * overlapping S/Gs; pad one of them + * else if (input S/G) ... + * pad input S/G, if needed + */ + qm_sg_ents = 1 + !!ivsize + mapped_src_nents; + if (mapped_dst_nents > 1) + qm_sg_ents += pad_sg_nents(mapped_dst_nents); + else if ((req->src == req->dst) && (mapped_src_nents > 1)) + qm_sg_ents = max(pad_sg_nents(qm_sg_ents), + 1 + !!ivsize + pad_sg_nents(mapped_src_nents)); + else + qm_sg_ents = pad_sg_nents(qm_sg_ents); + + sg_table = &edesc->sgt[0]; + qm_sg_bytes = qm_sg_ents * sizeof(*sg_table); + if (unlikely(offsetof(struct aead_edesc, sgt) + qm_sg_bytes + ivsize > + CAAM_QI_MEMCACHE_SIZE)) { + dev_err(qidev, "No space for %d S/G entries and/or %dB IV\n", + qm_sg_ents, ivsize); + caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0, + 0, DMA_NONE, 0, 0); + qi_cache_free(edesc); + return ERR_PTR(-ENOMEM); + } + + if (ivsize) { + u8 *iv = (u8 *)(sg_table + qm_sg_ents); + + /* Make sure IV is located in a DMAable area */ + memcpy(iv, req->iv, ivsize); + + iv_dma = dma_map_single(qidev, iv, ivsize, DMA_TO_DEVICE); + if (dma_mapping_error(qidev, iv_dma)) { + dev_err(qidev, "unable to map IV\n"); + caam_unmap(qidev, req->src, req->dst, src_nents, + dst_nents, 0, 0, DMA_NONE, 0, 0); + qi_cache_free(edesc); + return ERR_PTR(-ENOMEM); + } + } + + edesc->src_nents = src_nents; + edesc->dst_nents = dst_nents; + edesc->iv_dma = iv_dma; + edesc->drv_req.app_ctx = req; + edesc->drv_req.cbk = aead_done; + edesc->drv_req.drv_ctx = drv_ctx; + + edesc->assoclen = cpu_to_caam32(req->assoclen); + edesc->assoclen_dma = dma_map_single(qidev, &edesc->assoclen, 4, + DMA_TO_DEVICE); + if (dma_mapping_error(qidev, edesc->assoclen_dma)) { + dev_err(qidev, "unable to map assoclen\n"); + caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, + iv_dma, ivsize, DMA_TO_DEVICE, 0, 0); + qi_cache_free(edesc); + return ERR_PTR(-ENOMEM); + } + + dma_to_qm_sg_one(sg_table, edesc->assoclen_dma, 4, 0); + qm_sg_index++; + if (ivsize) { + dma_to_qm_sg_one(sg_table + qm_sg_index, iv_dma, ivsize, 0); + qm_sg_index++; + } + sg_to_qm_sg_last(req->src, src_len, sg_table + qm_sg_index, 0); + qm_sg_index += mapped_src_nents; + + if (mapped_dst_nents > 1) + sg_to_qm_sg_last(req->dst, dst_len, sg_table + qm_sg_index, 0); + + qm_sg_dma = dma_map_single(qidev, sg_table, qm_sg_bytes, DMA_TO_DEVICE); + if (dma_mapping_error(qidev, qm_sg_dma)) { + dev_err(qidev, "unable to map S/G table\n"); + dma_unmap_single(qidev, edesc->assoclen_dma, 4, DMA_TO_DEVICE); + caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, + iv_dma, ivsize, DMA_TO_DEVICE, 0, 0); + qi_cache_free(edesc); + return ERR_PTR(-ENOMEM); + } + + edesc->qm_sg_dma = qm_sg_dma; + edesc->qm_sg_bytes = qm_sg_bytes; + + out_len = req->assoclen + req->cryptlen + + (encrypt ? ctx->authsize : (-ctx->authsize)); + in_len = 4 + ivsize + req->assoclen + req->cryptlen; + + fd_sgt = &edesc->drv_req.fd_sgt[0]; + dma_to_qm_sg_one_last_ext(&fd_sgt[1], qm_sg_dma, in_len, 0); + + if (req->dst == req->src) { + if (mapped_src_nents == 1) + dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(req->src), + out_len, 0); + else + dma_to_qm_sg_one_ext(&fd_sgt[0], qm_sg_dma + + (1 + !!ivsize) * sizeof(*sg_table), + out_len, 0); + } else if (mapped_dst_nents <= 1) { + dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(req->dst), out_len, + 0); + } else { + dma_to_qm_sg_one_ext(&fd_sgt[0], qm_sg_dma + sizeof(*sg_table) * + qm_sg_index, out_len, 0); + } + + return edesc; +} + +static inline int aead_crypt(struct aead_request *req, bool encrypt) +{ + struct aead_edesc *edesc; + struct crypto_aead *aead = crypto_aead_reqtfm(req); + struct caam_ctx *ctx = crypto_aead_ctx(aead); + int ret; + + if (unlikely(caam_congested)) + return -EAGAIN; + + /* allocate extended descriptor */ + edesc = aead_edesc_alloc(req, encrypt); + if (IS_ERR_OR_NULL(edesc)) + return PTR_ERR(edesc); + + /* Create and submit job descriptor */ + ret = caam_qi_enqueue(ctx->qidev, &edesc->drv_req); + if (!ret) { + ret = -EINPROGRESS; + } else { + aead_unmap(ctx->qidev, edesc, req); + qi_cache_free(edesc); + } + + return ret; +} + +static int aead_encrypt(struct aead_request *req) +{ + return aead_crypt(req, true); +} + +static int aead_decrypt(struct aead_request *req) +{ + return aead_crypt(req, false); +} + +static int ipsec_gcm_encrypt(struct aead_request *req) +{ + return crypto_ipsec_check_assoclen(req->assoclen) ? : aead_crypt(req, + true); +} + +static int ipsec_gcm_decrypt(struct aead_request *req) +{ + return crypto_ipsec_check_assoclen(req->assoclen) ? : aead_crypt(req, + false); +} + +static void skcipher_done(struct caam_drv_req *drv_req, u32 status) +{ + struct skcipher_edesc *edesc; + struct skcipher_request *req = drv_req->app_ctx; + struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req); + struct caam_ctx *caam_ctx = crypto_skcipher_ctx(skcipher); + struct device *qidev = caam_ctx->qidev; + int ivsize = crypto_skcipher_ivsize(skcipher); + int ecode = 0; + + dev_dbg(qidev, "%s %d: status 0x%x\n", __func__, __LINE__, status); + + edesc = container_of(drv_req, typeof(*edesc), drv_req); + + if (status) + ecode = caam_jr_strstatus(qidev, status); + + print_hex_dump_debug("dstiv @" __stringify(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, req->iv, + edesc->src_nents > 1 ? 100 : ivsize, 1); + caam_dump_sg("dst @" __stringify(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, req->dst, + edesc->dst_nents > 1 ? 100 : req->cryptlen, 1); + + skcipher_unmap(qidev, edesc, req); + + /* + * The crypto API expects us to set the IV (req->iv) to the last + * ciphertext block (CBC mode) or last counter (CTR mode). + * This is used e.g. by the CTS mode. + */ + if (!ecode) + memcpy(req->iv, (u8 *)&edesc->sgt[0] + edesc->qm_sg_bytes, + ivsize); + + qi_cache_free(edesc); + skcipher_request_complete(req, ecode); +} + +static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req, + bool encrypt) +{ + struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req); + struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher); + struct device *qidev = ctx->qidev; + gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? + GFP_KERNEL : GFP_ATOMIC; + int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0; + struct skcipher_edesc *edesc; + dma_addr_t iv_dma; + u8 *iv; + int ivsize = crypto_skcipher_ivsize(skcipher); + int dst_sg_idx, qm_sg_ents, qm_sg_bytes; + struct qm_sg_entry *sg_table, *fd_sgt; + struct caam_drv_ctx *drv_ctx; + + drv_ctx = get_drv_ctx(ctx, encrypt ? ENCRYPT : DECRYPT); + if (IS_ERR_OR_NULL(drv_ctx)) + return (struct skcipher_edesc *)drv_ctx; + + src_nents = sg_nents_for_len(req->src, req->cryptlen); + if (unlikely(src_nents < 0)) { + dev_err(qidev, "Insufficient bytes (%d) in src S/G\n", + req->cryptlen); + return ERR_PTR(src_nents); + } + + if (unlikely(req->src != req->dst)) { + dst_nents = sg_nents_for_len(req->dst, req->cryptlen); + if (unlikely(dst_nents < 0)) { + dev_err(qidev, "Insufficient bytes (%d) in dst S/G\n", + req->cryptlen); + return ERR_PTR(dst_nents); + } + + mapped_src_nents = dma_map_sg(qidev, req->src, src_nents, + DMA_TO_DEVICE); + if (unlikely(!mapped_src_nents)) { + dev_err(qidev, "unable to map source\n"); + return ERR_PTR(-ENOMEM); + } + + mapped_dst_nents = dma_map_sg(qidev, req->dst, dst_nents, + DMA_FROM_DEVICE); + if (unlikely(!mapped_dst_nents)) { + dev_err(qidev, "unable to map destination\n"); + dma_unmap_sg(qidev, req->src, src_nents, DMA_TO_DEVICE); + return ERR_PTR(-ENOMEM); + } + } else { + mapped_src_nents = dma_map_sg(qidev, req->src, src_nents, + DMA_BIDIRECTIONAL); + if (unlikely(!mapped_src_nents)) { + dev_err(qidev, "unable to map source\n"); + return ERR_PTR(-ENOMEM); + } + } + + qm_sg_ents = 1 + mapped_src_nents; + dst_sg_idx = qm_sg_ents; + + /* + * Input, output HW S/G tables: [IV, src][dst, IV] + * IV entries point to the same buffer + * If src == dst, S/G entries are reused (S/G tables overlap) + * + * HW reads 4 S/G entries at a time; make sure the reads don't go beyond + * the end of the table by allocating more S/G entries. + */ + if (req->src != req->dst) + qm_sg_ents += pad_sg_nents(mapped_dst_nents + 1); + else + qm_sg_ents = 1 + pad_sg_nents(qm_sg_ents); + + qm_sg_bytes = qm_sg_ents * sizeof(struct qm_sg_entry); + if (unlikely(offsetof(struct skcipher_edesc, sgt) + qm_sg_bytes + + ivsize > CAAM_QI_MEMCACHE_SIZE)) { + dev_err(qidev, "No space for %d S/G entries and/or %dB IV\n", + qm_sg_ents, ivsize); + caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0, + 0, DMA_NONE, 0, 0); + return ERR_PTR(-ENOMEM); + } + + /* allocate space for base edesc, link tables and IV */ + edesc = qi_cache_alloc(GFP_DMA | flags); + if (unlikely(!edesc)) { + dev_err(qidev, "could not allocate extended descriptor\n"); + caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0, + 0, DMA_NONE, 0, 0); + return ERR_PTR(-ENOMEM); + } + + /* Make sure IV is located in a DMAable area */ + sg_table = &edesc->sgt[0]; + iv = (u8 *)(sg_table + qm_sg_ents); + memcpy(iv, req->iv, ivsize); + + iv_dma = dma_map_single(qidev, iv, ivsize, DMA_BIDIRECTIONAL); + if (dma_mapping_error(qidev, iv_dma)) { + dev_err(qidev, "unable to map IV\n"); + caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0, + 0, DMA_NONE, 0, 0); + qi_cache_free(edesc); + return ERR_PTR(-ENOMEM); + } + + edesc->src_nents = src_nents; + edesc->dst_nents = dst_nents; + edesc->iv_dma = iv_dma; + edesc->qm_sg_bytes = qm_sg_bytes; + edesc->drv_req.app_ctx = req; + edesc->drv_req.cbk = skcipher_done; + edesc->drv_req.drv_ctx = drv_ctx; + + dma_to_qm_sg_one(sg_table, iv_dma, ivsize, 0); + sg_to_qm_sg(req->src, req->cryptlen, sg_table + 1, 0); + + if (req->src != req->dst) + sg_to_qm_sg(req->dst, req->cryptlen, sg_table + dst_sg_idx, 0); + + dma_to_qm_sg_one(sg_table + dst_sg_idx + mapped_dst_nents, iv_dma, + ivsize, 0); + + edesc->qm_sg_dma = dma_map_single(qidev, sg_table, edesc->qm_sg_bytes, + DMA_TO_DEVICE); + if (dma_mapping_error(qidev, edesc->qm_sg_dma)) { + dev_err(qidev, "unable to map S/G table\n"); + caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, + iv_dma, ivsize, DMA_BIDIRECTIONAL, 0, 0); + qi_cache_free(edesc); + return ERR_PTR(-ENOMEM); + } + + fd_sgt = &edesc->drv_req.fd_sgt[0]; + + dma_to_qm_sg_one_last_ext(&fd_sgt[1], edesc->qm_sg_dma, + ivsize + req->cryptlen, 0); + + if (req->src == req->dst) + dma_to_qm_sg_one_ext(&fd_sgt[0], edesc->qm_sg_dma + + sizeof(*sg_table), req->cryptlen + ivsize, + 0); + else + dma_to_qm_sg_one_ext(&fd_sgt[0], edesc->qm_sg_dma + dst_sg_idx * + sizeof(*sg_table), req->cryptlen + ivsize, + 0); + + return edesc; +} + +static inline bool xts_skcipher_ivsize(struct skcipher_request *req) +{ + struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req); + unsigned int ivsize = crypto_skcipher_ivsize(skcipher); + + return !!get_unaligned((u64 *)(req->iv + (ivsize / 2))); +} + +static inline int skcipher_crypt(struct skcipher_request *req, bool encrypt) +{ + struct skcipher_edesc *edesc; + struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req); + struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher); + struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctx->jrdev->parent); + int ret; + + /* + * XTS is expected to return an error even for input length = 0 + * Note that the case input length < block size will be caught during + * HW offloading and return an error. + */ + if (!req->cryptlen && !ctx->fallback) + return 0; + + if (ctx->fallback && ((ctrlpriv->era <= 8 && xts_skcipher_ivsize(req)) || + ctx->xts_key_fallback)) { + struct caam_skcipher_req_ctx *rctx = skcipher_request_ctx(req); + + skcipher_request_set_tfm(&rctx->fallback_req, ctx->fallback); + skcipher_request_set_callback(&rctx->fallback_req, + req->base.flags, + req->base.complete, + req->base.data); + skcipher_request_set_crypt(&rctx->fallback_req, req->src, + req->dst, req->cryptlen, req->iv); + + return encrypt ? crypto_skcipher_encrypt(&rctx->fallback_req) : + crypto_skcipher_decrypt(&rctx->fallback_req); + } + + if (unlikely(caam_congested)) + return -EAGAIN; + + /* allocate extended descriptor */ + edesc = skcipher_edesc_alloc(req, encrypt); + if (IS_ERR(edesc)) + return PTR_ERR(edesc); + + ret = caam_qi_enqueue(ctx->qidev, &edesc->drv_req); + if (!ret) { + ret = -EINPROGRESS; + } else { + skcipher_unmap(ctx->qidev, edesc, req); + qi_cache_free(edesc); + } + + return ret; +} + +static int skcipher_encrypt(struct skcipher_request *req) +{ + return skcipher_crypt(req, true); +} + +static int skcipher_decrypt(struct skcipher_request *req) +{ + return skcipher_crypt(req, false); +} + +static struct caam_skcipher_alg driver_algs[] = { + { + .skcipher = { + .base = { + .cra_name = "cbc(aes)", + .cra_driver_name = "cbc-aes-caam-qi", + .cra_blocksize = AES_BLOCK_SIZE, + }, + .setkey = aes_skcipher_setkey, + .encrypt = skcipher_encrypt, + .decrypt = skcipher_decrypt, + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, + .ivsize = AES_BLOCK_SIZE, + }, + .caam.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, + }, + { + .skcipher = { + .base = { + .cra_name = "cbc(des3_ede)", + .cra_driver_name = "cbc-3des-caam-qi", + .cra_blocksize = DES3_EDE_BLOCK_SIZE, + }, + .setkey = des3_skcipher_setkey, + .encrypt = skcipher_encrypt, + .decrypt = skcipher_decrypt, + .min_keysize = DES3_EDE_KEY_SIZE, + .max_keysize = DES3_EDE_KEY_SIZE, + .ivsize = DES3_EDE_BLOCK_SIZE, + }, + .caam.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, + }, + { + .skcipher = { + .base = { + .cra_name = "cbc(des)", + .cra_driver_name = "cbc-des-caam-qi", + .cra_blocksize = DES_BLOCK_SIZE, + }, + .setkey = des_skcipher_setkey, + .encrypt = skcipher_encrypt, + .decrypt = skcipher_decrypt, + .min_keysize = DES_KEY_SIZE, + .max_keysize = DES_KEY_SIZE, + .ivsize = DES_BLOCK_SIZE, + }, + .caam.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, + }, + { + .skcipher = { + .base = { + .cra_name = "ctr(aes)", + .cra_driver_name = "ctr-aes-caam-qi", + .cra_blocksize = 1, + }, + .setkey = ctr_skcipher_setkey, + .encrypt = skcipher_encrypt, + .decrypt = skcipher_decrypt, + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, + .ivsize = AES_BLOCK_SIZE, + .chunksize = AES_BLOCK_SIZE, + }, + .caam.class1_alg_type = OP_ALG_ALGSEL_AES | + OP_ALG_AAI_CTR_MOD128, + }, + { + .skcipher = { + .base = { + .cra_name = "rfc3686(ctr(aes))", + .cra_driver_name = "rfc3686-ctr-aes-caam-qi", + .cra_blocksize = 1, + }, + .setkey = rfc3686_skcipher_setkey, + .encrypt = skcipher_encrypt, + .decrypt = skcipher_decrypt, + .min_keysize = AES_MIN_KEY_SIZE + + CTR_RFC3686_NONCE_SIZE, + .max_keysize = AES_MAX_KEY_SIZE + + CTR_RFC3686_NONCE_SIZE, + .ivsize = CTR_RFC3686_IV_SIZE, + .chunksize = AES_BLOCK_SIZE, + }, + .caam = { + .class1_alg_type = OP_ALG_ALGSEL_AES | + OP_ALG_AAI_CTR_MOD128, + .rfc3686 = true, + }, + }, + { + .skcipher = { + .base = { + .cra_name = "xts(aes)", + .cra_driver_name = "xts-aes-caam-qi", + .cra_flags = CRYPTO_ALG_NEED_FALLBACK, + .cra_blocksize = AES_BLOCK_SIZE, + }, + .setkey = xts_skcipher_setkey, + .encrypt = skcipher_encrypt, + .decrypt = skcipher_decrypt, + .min_keysize = 2 * AES_MIN_KEY_SIZE, + .max_keysize = 2 * AES_MAX_KEY_SIZE, + .ivsize = AES_BLOCK_SIZE, + }, + .caam.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_XTS, + }, +}; + +static struct caam_aead_alg driver_aeads[] = { + { + .aead = { + .base = { + .cra_name = "rfc4106(gcm(aes))", + .cra_driver_name = "rfc4106-gcm-aes-caam-qi", + .cra_blocksize = 1, + }, + .setkey = rfc4106_setkey, + .setauthsize = rfc4106_setauthsize, + .encrypt = ipsec_gcm_encrypt, + .decrypt = ipsec_gcm_decrypt, + .ivsize = 8, + .maxauthsize = AES_BLOCK_SIZE, + }, + .caam = { + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM, + .nodkp = true, + }, + }, + { + .aead = { + .base = { + .cra_name = "rfc4543(gcm(aes))", + .cra_driver_name = "rfc4543-gcm-aes-caam-qi", + .cra_blocksize = 1, + }, + .setkey = rfc4543_setkey, + .setauthsize = rfc4543_setauthsize, + .encrypt = ipsec_gcm_encrypt, + .decrypt = ipsec_gcm_decrypt, + .ivsize = 8, + .maxauthsize = AES_BLOCK_SIZE, + }, + .caam = { + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM, + .nodkp = true, + }, + }, + /* Galois Counter Mode */ + { + .aead = { + .base = { + .cra_name = "gcm(aes)", + .cra_driver_name = "gcm-aes-caam-qi", + .cra_blocksize = 1, + }, + .setkey = gcm_setkey, + .setauthsize = gcm_setauthsize, + .encrypt = aead_encrypt, + .decrypt = aead_decrypt, + .ivsize = 12, + .maxauthsize = AES_BLOCK_SIZE, + }, + .caam = { + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM, + .nodkp = true, + } + }, + /* single-pass ipsec_esp descriptor */ + { + .aead = { + .base = { + .cra_name = "authenc(hmac(md5),cbc(aes))", + .cra_driver_name = "authenc-hmac-md5-" + "cbc-aes-caam-qi", + .cra_blocksize = AES_BLOCK_SIZE, + }, + .setkey = aead_setkey, + .setauthsize = aead_setauthsize, + .encrypt = aead_encrypt, + .decrypt = aead_decrypt, + .ivsize = AES_BLOCK_SIZE, + .maxauthsize = MD5_DIGEST_SIZE, + }, + .caam = { + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, + .class2_alg_type = OP_ALG_ALGSEL_MD5 | + OP_ALG_AAI_HMAC_PRECOMP, + } + }, + { + .aead = { + .base = { + .cra_name = "echainiv(authenc(hmac(md5)," + "cbc(aes)))", + .cra_driver_name = "echainiv-authenc-hmac-md5-" + "cbc-aes-caam-qi", + .cra_blocksize = AES_BLOCK_SIZE, + }, + .setkey = aead_setkey, + .setauthsize = aead_setauthsize, + .encrypt = aead_encrypt, + .decrypt = aead_decrypt, + .ivsize = AES_BLOCK_SIZE, + .maxauthsize = MD5_DIGEST_SIZE, + }, + .caam = { + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, + .class2_alg_type = OP_ALG_ALGSEL_MD5 | + OP_ALG_AAI_HMAC_PRECOMP, + .geniv = true, + } + }, + { + .aead = { + .base = { + .cra_name = "authenc(hmac(sha1),cbc(aes))", + .cra_driver_name = "authenc-hmac-sha1-" + "cbc-aes-caam-qi", + .cra_blocksize = AES_BLOCK_SIZE, + }, + .setkey = aead_setkey, + .setauthsize = aead_setauthsize, + .encrypt = aead_encrypt, + .decrypt = aead_decrypt, + .ivsize = AES_BLOCK_SIZE, + .maxauthsize = SHA1_DIGEST_SIZE, + }, + .caam = { + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, + .class2_alg_type = OP_ALG_ALGSEL_SHA1 | + OP_ALG_AAI_HMAC_PRECOMP, + } + }, + { + .aead = { + .base = { + .cra_name = "echainiv(authenc(hmac(sha1)," + "cbc(aes)))", + .cra_driver_name = "echainiv-authenc-" + "hmac-sha1-cbc-aes-caam-qi", + .cra_blocksize = AES_BLOCK_SIZE, + }, + .setkey = aead_setkey, + .setauthsize = aead_setauthsize, + .encrypt = aead_encrypt, + .decrypt = aead_decrypt, + .ivsize = AES_BLOCK_SIZE, + .maxauthsize = SHA1_DIGEST_SIZE, + }, + .caam = { + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, + .class2_alg_type = OP_ALG_ALGSEL_SHA1 | + OP_ALG_AAI_HMAC_PRECOMP, + .geniv = true, + }, + }, + { + .aead = { + .base = { + .cra_name = "authenc(hmac(sha224),cbc(aes))", + .cra_driver_name = "authenc-hmac-sha224-" + "cbc-aes-caam-qi", + .cra_blocksize = AES_BLOCK_SIZE, + }, + .setkey = aead_setkey, + .setauthsize = aead_setauthsize, + .encrypt = aead_encrypt, + .decrypt = aead_decrypt, + .ivsize = AES_BLOCK_SIZE, + .maxauthsize = SHA224_DIGEST_SIZE, + }, + .caam = { + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, + .class2_alg_type = OP_ALG_ALGSEL_SHA224 | + OP_ALG_AAI_HMAC_PRECOMP, + } + }, + { + .aead = { + .base = { + .cra_name = "echainiv(authenc(hmac(sha224)," + "cbc(aes)))", + .cra_driver_name = "echainiv-authenc-" + "hmac-sha224-cbc-aes-caam-qi", + .cra_blocksize = AES_BLOCK_SIZE, + }, + .setkey = aead_setkey, + .setauthsize = aead_setauthsize, + .encrypt = aead_encrypt, + .decrypt = aead_decrypt, + .ivsize = AES_BLOCK_SIZE, + .maxauthsize = SHA224_DIGEST_SIZE, + }, + .caam = { + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, + .class2_alg_type = OP_ALG_ALGSEL_SHA224 | + OP_ALG_AAI_HMAC_PRECOMP, + .geniv = true, + } + }, + { + .aead = { + .base = { + .cra_name = "authenc(hmac(sha256),cbc(aes))", + .cra_driver_name = "authenc-hmac-sha256-" + "cbc-aes-caam-qi", + .cra_blocksize = AES_BLOCK_SIZE, + }, + .setkey = aead_setkey, + .setauthsize = aead_setauthsize, + .encrypt = aead_encrypt, + .decrypt = aead_decrypt, + .ivsize = AES_BLOCK_SIZE, + .maxauthsize = SHA256_DIGEST_SIZE, + }, + .caam = { + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, + .class2_alg_type = OP_ALG_ALGSEL_SHA256 | + OP_ALG_AAI_HMAC_PRECOMP, + } + }, + { + .aead = { + .base = { + .cra_name = "echainiv(authenc(hmac(sha256)," + "cbc(aes)))", + .cra_driver_name = "echainiv-authenc-" + "hmac-sha256-cbc-aes-" + "caam-qi", + .cra_blocksize = AES_BLOCK_SIZE, + }, + .setkey = aead_setkey, + .setauthsize = aead_setauthsize, + .encrypt = aead_encrypt, + .decrypt = aead_decrypt, + .ivsize = AES_BLOCK_SIZE, + .maxauthsize = SHA256_DIGEST_SIZE, + }, + .caam = { + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, + .class2_alg_type = OP_ALG_ALGSEL_SHA256 | + OP_ALG_AAI_HMAC_PRECOMP, + .geniv = true, + } + }, + { + .aead = { + .base = { + .cra_name = "authenc(hmac(sha384),cbc(aes))", + .cra_driver_name = "authenc-hmac-sha384-" + "cbc-aes-caam-qi", + .cra_blocksize = AES_BLOCK_SIZE, + }, + .setkey = aead_setkey, + .setauthsize = aead_setauthsize, + .encrypt = aead_encrypt, + .decrypt = aead_decrypt, + .ivsize = AES_BLOCK_SIZE, + .maxauthsize = SHA384_DIGEST_SIZE, + }, + .caam = { + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, + .class2_alg_type = OP_ALG_ALGSEL_SHA384 | + OP_ALG_AAI_HMAC_PRECOMP, + } + }, + { + .aead = { + .base = { + .cra_name = "echainiv(authenc(hmac(sha384)," + "cbc(aes)))", + .cra_driver_name = "echainiv-authenc-" + "hmac-sha384-cbc-aes-" + "caam-qi", + .cra_blocksize = AES_BLOCK_SIZE, + }, + .setkey = aead_setkey, + .setauthsize = aead_setauthsize, + .encrypt = aead_encrypt, + .decrypt = aead_decrypt, + .ivsize = AES_BLOCK_SIZE, + .maxauthsize = SHA384_DIGEST_SIZE, + }, + .caam = { + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, + .class2_alg_type = OP_ALG_ALGSEL_SHA384 | + OP_ALG_AAI_HMAC_PRECOMP, + .geniv = true, + } + }, + { + .aead = { + .base = { + .cra_name = "authenc(hmac(sha512),cbc(aes))", + .cra_driver_name = "authenc-hmac-sha512-" + "cbc-aes-caam-qi", + .cra_blocksize = AES_BLOCK_SIZE, + }, + .setkey = aead_setkey, + .setauthsize = aead_setauthsize, + .encrypt = aead_encrypt, + .decrypt = aead_decrypt, + .ivsize = AES_BLOCK_SIZE, + .maxauthsize = SHA512_DIGEST_SIZE, + }, + .caam = { + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, + .class2_alg_type = OP_ALG_ALGSEL_SHA512 | + OP_ALG_AAI_HMAC_PRECOMP, + } + }, + { + .aead = { + .base = { + .cra_name = "echainiv(authenc(hmac(sha512)," + "cbc(aes)))", + .cra_driver_name = "echainiv-authenc-" + "hmac-sha512-cbc-aes-" + "caam-qi", + .cra_blocksize = AES_BLOCK_SIZE, + }, + .setkey = aead_setkey, + .setauthsize = aead_setauthsize, + .encrypt = aead_encrypt, + .decrypt = aead_decrypt, + .ivsize = AES_BLOCK_SIZE, + .maxauthsize = SHA512_DIGEST_SIZE, + }, + .caam = { + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, + .class2_alg_type = OP_ALG_ALGSEL_SHA512 | + OP_ALG_AAI_HMAC_PRECOMP, + .geniv = true, + } + }, + { + .aead = { + .base = { + .cra_name = "authenc(hmac(md5),cbc(des3_ede))", + .cra_driver_name = "authenc-hmac-md5-" + "cbc-des3_ede-caam-qi", + .cra_blocksize = DES3_EDE_BLOCK_SIZE, + }, + .setkey = des3_aead_setkey, + .setauthsize = aead_setauthsize, + .encrypt = aead_encrypt, + .decrypt = aead_decrypt, + .ivsize = DES3_EDE_BLOCK_SIZE, + .maxauthsize = MD5_DIGEST_SIZE, + }, + .caam = { + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, + .class2_alg_type = OP_ALG_ALGSEL_MD5 | + OP_ALG_AAI_HMAC_PRECOMP, + } + }, + { + .aead = { + .base = { + .cra_name = "echainiv(authenc(hmac(md5)," + "cbc(des3_ede)))", + .cra_driver_name = "echainiv-authenc-hmac-md5-" + "cbc-des3_ede-caam-qi", + .cra_blocksize = DES3_EDE_BLOCK_SIZE, + }, + .setkey = des3_aead_setkey, + .setauthsize = aead_setauthsize, + .encrypt = aead_encrypt, + .decrypt = aead_decrypt, + .ivsize = DES3_EDE_BLOCK_SIZE, + .maxauthsize = MD5_DIGEST_SIZE, + }, + .caam = { + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, + .class2_alg_type = OP_ALG_ALGSEL_MD5 | + OP_ALG_AAI_HMAC_PRECOMP, + .geniv = true, + } + }, + { + .aead = { + .base = { + .cra_name = "authenc(hmac(sha1)," + "cbc(des3_ede))", + .cra_driver_name = "authenc-hmac-sha1-" + "cbc-des3_ede-caam-qi", + .cra_blocksize = DES3_EDE_BLOCK_SIZE, + }, + .setkey = des3_aead_setkey, + .setauthsize = aead_setauthsize, + .encrypt = aead_encrypt, + .decrypt = aead_decrypt, + .ivsize = DES3_EDE_BLOCK_SIZE, + .maxauthsize = SHA1_DIGEST_SIZE, + }, + .caam = { + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, + .class2_alg_type = OP_ALG_ALGSEL_SHA1 | + OP_ALG_AAI_HMAC_PRECOMP, + }, + }, + { + .aead = { + .base = { + .cra_name = "echainiv(authenc(hmac(sha1)," + "cbc(des3_ede)))", + .cra_driver_name = "echainiv-authenc-" + "hmac-sha1-" + "cbc-des3_ede-caam-qi", + .cra_blocksize = DES3_EDE_BLOCK_SIZE, + }, + .setkey = des3_aead_setkey, + .setauthsize = aead_setauthsize, + .encrypt = aead_encrypt, + .decrypt = aead_decrypt, + .ivsize = DES3_EDE_BLOCK_SIZE, + .maxauthsize = SHA1_DIGEST_SIZE, + }, + .caam = { + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, + .class2_alg_type = OP_ALG_ALGSEL_SHA1 | + OP_ALG_AAI_HMAC_PRECOMP, + .geniv = true, + } + }, + { + .aead = { + .base = { + .cra_name = "authenc(hmac(sha224)," + "cbc(des3_ede))", + .cra_driver_name = "authenc-hmac-sha224-" + "cbc-des3_ede-caam-qi", + .cra_blocksize = DES3_EDE_BLOCK_SIZE, + }, + .setkey = des3_aead_setkey, + .setauthsize = aead_setauthsize, + .encrypt = aead_encrypt, + .decrypt = aead_decrypt, + .ivsize = DES3_EDE_BLOCK_SIZE, + .maxauthsize = SHA224_DIGEST_SIZE, + }, + .caam = { + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, + .class2_alg_type = OP_ALG_ALGSEL_SHA224 | + OP_ALG_AAI_HMAC_PRECOMP, + }, + }, + { + .aead = { + .base = { + .cra_name = "echainiv(authenc(hmac(sha224)," + "cbc(des3_ede)))", + .cra_driver_name = "echainiv-authenc-" + "hmac-sha224-" + "cbc-des3_ede-caam-qi", + .cra_blocksize = DES3_EDE_BLOCK_SIZE, + }, + .setkey = des3_aead_setkey, + .setauthsize = aead_setauthsize, + .encrypt = aead_encrypt, + .decrypt = aead_decrypt, + .ivsize = DES3_EDE_BLOCK_SIZE, + .maxauthsize = SHA224_DIGEST_SIZE, + }, + .caam = { + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, + .class2_alg_type = OP_ALG_ALGSEL_SHA224 | + OP_ALG_AAI_HMAC_PRECOMP, + .geniv = true, + } + }, + { + .aead = { + .base = { + .cra_name = "authenc(hmac(sha256)," + "cbc(des3_ede))", + .cra_driver_name = "authenc-hmac-sha256-" + "cbc-des3_ede-caam-qi", + .cra_blocksize = DES3_EDE_BLOCK_SIZE, + }, + .setkey = des3_aead_setkey, + .setauthsize = aead_setauthsize, + .encrypt = aead_encrypt, + .decrypt = aead_decrypt, + .ivsize = DES3_EDE_BLOCK_SIZE, + .maxauthsize = SHA256_DIGEST_SIZE, + }, + .caam = { + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, + .class2_alg_type = OP_ALG_ALGSEL_SHA256 | + OP_ALG_AAI_HMAC_PRECOMP, + }, + }, + { + .aead = { + .base = { + .cra_name = "echainiv(authenc(hmac(sha256)," + "cbc(des3_ede)))", + .cra_driver_name = "echainiv-authenc-" + "hmac-sha256-" + "cbc-des3_ede-caam-qi", + .cra_blocksize = DES3_EDE_BLOCK_SIZE, + }, + .setkey = des3_aead_setkey, + .setauthsize = aead_setauthsize, + .encrypt = aead_encrypt, + .decrypt = aead_decrypt, + .ivsize = DES3_EDE_BLOCK_SIZE, + .maxauthsize = SHA256_DIGEST_SIZE, + }, + .caam = { + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, + .class2_alg_type = OP_ALG_ALGSEL_SHA256 | + OP_ALG_AAI_HMAC_PRECOMP, + .geniv = true, + } + }, + { + .aead = { + .base = { + .cra_name = "authenc(hmac(sha384)," + "cbc(des3_ede))", + .cra_driver_name = "authenc-hmac-sha384-" + "cbc-des3_ede-caam-qi", + .cra_blocksize = DES3_EDE_BLOCK_SIZE, + }, + .setkey = des3_aead_setkey, + .setauthsize = aead_setauthsize, + .encrypt = aead_encrypt, + .decrypt = aead_decrypt, + .ivsize = DES3_EDE_BLOCK_SIZE, + .maxauthsize = SHA384_DIGEST_SIZE, + }, + .caam = { + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, + .class2_alg_type = OP_ALG_ALGSEL_SHA384 | + OP_ALG_AAI_HMAC_PRECOMP, + }, + }, + { + .aead = { + .base = { + .cra_name = "echainiv(authenc(hmac(sha384)," + "cbc(des3_ede)))", + .cra_driver_name = "echainiv-authenc-" + "hmac-sha384-" + "cbc-des3_ede-caam-qi", + .cra_blocksize = DES3_EDE_BLOCK_SIZE, + }, + .setkey = des3_aead_setkey, + .setauthsize = aead_setauthsize, + .encrypt = aead_encrypt, + .decrypt = aead_decrypt, + .ivsize = DES3_EDE_BLOCK_SIZE, + .maxauthsize = SHA384_DIGEST_SIZE, + }, + .caam = { + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, + .class2_alg_type = OP_ALG_ALGSEL_SHA384 | + OP_ALG_AAI_HMAC_PRECOMP, + .geniv = true, + } + }, + { + .aead = { + .base = { + .cra_name = "authenc(hmac(sha512)," + "cbc(des3_ede))", + .cra_driver_name = "authenc-hmac-sha512-" + "cbc-des3_ede-caam-qi", + .cra_blocksize = DES3_EDE_BLOCK_SIZE, + }, + .setkey = des3_aead_setkey, + .setauthsize = aead_setauthsize, + .encrypt = aead_encrypt, + .decrypt = aead_decrypt, + .ivsize = DES3_EDE_BLOCK_SIZE, + .maxauthsize = SHA512_DIGEST_SIZE, + }, + .caam = { + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, + .class2_alg_type = OP_ALG_ALGSEL_SHA512 | + OP_ALG_AAI_HMAC_PRECOMP, + }, + }, + { + .aead = { + .base = { + .cra_name = "echainiv(authenc(hmac(sha512)," + "cbc(des3_ede)))", + .cra_driver_name = "echainiv-authenc-" + "hmac-sha512-" + "cbc-des3_ede-caam-qi", + .cra_blocksize = DES3_EDE_BLOCK_SIZE, + }, + .setkey = des3_aead_setkey, + .setauthsize = aead_setauthsize, + .encrypt = aead_encrypt, + .decrypt = aead_decrypt, + .ivsize = DES3_EDE_BLOCK_SIZE, + .maxauthsize = SHA512_DIGEST_SIZE, + }, + .caam = { + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, + .class2_alg_type = OP_ALG_ALGSEL_SHA512 | + OP_ALG_AAI_HMAC_PRECOMP, + .geniv = true, + } + }, + { + .aead = { + .base = { + .cra_name = "authenc(hmac(md5),cbc(des))", + .cra_driver_name = "authenc-hmac-md5-" + "cbc-des-caam-qi", + .cra_blocksize = DES_BLOCK_SIZE, + }, + .setkey = aead_setkey, + .setauthsize = aead_setauthsize, + .encrypt = aead_encrypt, + .decrypt = aead_decrypt, + .ivsize = DES_BLOCK_SIZE, + .maxauthsize = MD5_DIGEST_SIZE, + }, + .caam = { + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, + .class2_alg_type = OP_ALG_ALGSEL_MD5 | + OP_ALG_AAI_HMAC_PRECOMP, + }, + }, + { + .aead = { + .base = { + .cra_name = "echainiv(authenc(hmac(md5)," + "cbc(des)))", + .cra_driver_name = "echainiv-authenc-hmac-md5-" + "cbc-des-caam-qi", + .cra_blocksize = DES_BLOCK_SIZE, + }, + .setkey = aead_setkey, + .setauthsize = aead_setauthsize, + .encrypt = aead_encrypt, + .decrypt = aead_decrypt, + .ivsize = DES_BLOCK_SIZE, + .maxauthsize = MD5_DIGEST_SIZE, + }, + .caam = { + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, + .class2_alg_type = OP_ALG_ALGSEL_MD5 | + OP_ALG_AAI_HMAC_PRECOMP, + .geniv = true, + } + }, + { + .aead = { + .base = { + .cra_name = "authenc(hmac(sha1),cbc(des))", + .cra_driver_name = "authenc-hmac-sha1-" + "cbc-des-caam-qi", + .cra_blocksize = DES_BLOCK_SIZE, + }, + .setkey = aead_setkey, + .setauthsize = aead_setauthsize, + .encrypt = aead_encrypt, + .decrypt = aead_decrypt, + .ivsize = DES_BLOCK_SIZE, + .maxauthsize = SHA1_DIGEST_SIZE, + }, + .caam = { + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, + .class2_alg_type = OP_ALG_ALGSEL_SHA1 | + OP_ALG_AAI_HMAC_PRECOMP, + }, + }, + { + .aead = { + .base = { + .cra_name = "echainiv(authenc(hmac(sha1)," + "cbc(des)))", + .cra_driver_name = "echainiv-authenc-" + "hmac-sha1-cbc-des-caam-qi", + .cra_blocksize = DES_BLOCK_SIZE, + }, + .setkey = aead_setkey, + .setauthsize = aead_setauthsize, + .encrypt = aead_encrypt, + .decrypt = aead_decrypt, + .ivsize = DES_BLOCK_SIZE, + .maxauthsize = SHA1_DIGEST_SIZE, + }, + .caam = { + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, + .class2_alg_type = OP_ALG_ALGSEL_SHA1 | + OP_ALG_AAI_HMAC_PRECOMP, + .geniv = true, + } + }, + { + .aead = { + .base = { + .cra_name = "authenc(hmac(sha224),cbc(des))", + .cra_driver_name = "authenc-hmac-sha224-" + "cbc-des-caam-qi", + .cra_blocksize = DES_BLOCK_SIZE, + }, + .setkey = aead_setkey, + .setauthsize = aead_setauthsize, + .encrypt = aead_encrypt, + .decrypt = aead_decrypt, + .ivsize = DES_BLOCK_SIZE, + .maxauthsize = SHA224_DIGEST_SIZE, + }, + .caam = { + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, + .class2_alg_type = OP_ALG_ALGSEL_SHA224 | + OP_ALG_AAI_HMAC_PRECOMP, + }, + }, + { + .aead = { + .base = { + .cra_name = "echainiv(authenc(hmac(sha224)," + "cbc(des)))", + .cra_driver_name = "echainiv-authenc-" + "hmac-sha224-cbc-des-" + "caam-qi", + .cra_blocksize = DES_BLOCK_SIZE, + }, + .setkey = aead_setkey, + .setauthsize = aead_setauthsize, + .encrypt = aead_encrypt, + .decrypt = aead_decrypt, + .ivsize = DES_BLOCK_SIZE, + .maxauthsize = SHA224_DIGEST_SIZE, + }, + .caam = { + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, + .class2_alg_type = OP_ALG_ALGSEL_SHA224 | + OP_ALG_AAI_HMAC_PRECOMP, + .geniv = true, + } + }, + { + .aead = { + .base = { + .cra_name = "authenc(hmac(sha256),cbc(des))", + .cra_driver_name = "authenc-hmac-sha256-" + "cbc-des-caam-qi", + .cra_blocksize = DES_BLOCK_SIZE, + }, + .setkey = aead_setkey, + .setauthsize = aead_setauthsize, + .encrypt = aead_encrypt, + .decrypt = aead_decrypt, + .ivsize = DES_BLOCK_SIZE, + .maxauthsize = SHA256_DIGEST_SIZE, + }, + .caam = { + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, + .class2_alg_type = OP_ALG_ALGSEL_SHA256 | + OP_ALG_AAI_HMAC_PRECOMP, + }, + }, + { + .aead = { + .base = { + .cra_name = "echainiv(authenc(hmac(sha256)," + "cbc(des)))", + .cra_driver_name = "echainiv-authenc-" + "hmac-sha256-cbc-des-" + "caam-qi", + .cra_blocksize = DES_BLOCK_SIZE, + }, + .setkey = aead_setkey, + .setauthsize = aead_setauthsize, + .encrypt = aead_encrypt, + .decrypt = aead_decrypt, + .ivsize = DES_BLOCK_SIZE, + .maxauthsize = SHA256_DIGEST_SIZE, + }, + .caam = { + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, + .class2_alg_type = OP_ALG_ALGSEL_SHA256 | + OP_ALG_AAI_HMAC_PRECOMP, + .geniv = true, + }, + }, + { + .aead = { + .base = { + .cra_name = "authenc(hmac(sha384),cbc(des))", + .cra_driver_name = "authenc-hmac-sha384-" + "cbc-des-caam-qi", + .cra_blocksize = DES_BLOCK_SIZE, + }, + .setkey = aead_setkey, + .setauthsize = aead_setauthsize, + .encrypt = aead_encrypt, + .decrypt = aead_decrypt, + .ivsize = DES_BLOCK_SIZE, + .maxauthsize = SHA384_DIGEST_SIZE, + }, + .caam = { + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, + .class2_alg_type = OP_ALG_ALGSEL_SHA384 | + OP_ALG_AAI_HMAC_PRECOMP, + }, + }, + { + .aead = { + .base = { + .cra_name = "echainiv(authenc(hmac(sha384)," + "cbc(des)))", + .cra_driver_name = "echainiv-authenc-" + "hmac-sha384-cbc-des-" + "caam-qi", + .cra_blocksize = DES_BLOCK_SIZE, + }, + .setkey = aead_setkey, + .setauthsize = aead_setauthsize, + .encrypt = aead_encrypt, + .decrypt = aead_decrypt, + .ivsize = DES_BLOCK_SIZE, + .maxauthsize = SHA384_DIGEST_SIZE, + }, + .caam = { + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, + .class2_alg_type = OP_ALG_ALGSEL_SHA384 | + OP_ALG_AAI_HMAC_PRECOMP, + .geniv = true, + } + }, + { + .aead = { + .base = { + .cra_name = "authenc(hmac(sha512),cbc(des))", + .cra_driver_name = "authenc-hmac-sha512-" + "cbc-des-caam-qi", + .cra_blocksize = DES_BLOCK_SIZE, + }, + .setkey = aead_setkey, + .setauthsize = aead_setauthsize, + .encrypt = aead_encrypt, + .decrypt = aead_decrypt, + .ivsize = DES_BLOCK_SIZE, + .maxauthsize = SHA512_DIGEST_SIZE, + }, + .caam = { + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, + .class2_alg_type = OP_ALG_ALGSEL_SHA512 | + OP_ALG_AAI_HMAC_PRECOMP, + } + }, + { + .aead = { + .base = { + .cra_name = "echainiv(authenc(hmac(sha512)," + "cbc(des)))", + .cra_driver_name = "echainiv-authenc-" + "hmac-sha512-cbc-des-" + "caam-qi", + .cra_blocksize = DES_BLOCK_SIZE, + }, + .setkey = aead_setkey, + .setauthsize = aead_setauthsize, + .encrypt = aead_encrypt, + .decrypt = aead_decrypt, + .ivsize = DES_BLOCK_SIZE, + .maxauthsize = SHA512_DIGEST_SIZE, + }, + .caam = { + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, + .class2_alg_type = OP_ALG_ALGSEL_SHA512 | + OP_ALG_AAI_HMAC_PRECOMP, + .geniv = true, + } + }, +}; + +static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam, + bool uses_dkp) +{ + struct caam_drv_private *priv; + struct device *dev; + + /* + * distribute tfms across job rings to ensure in-order + * crypto request processing per tfm + */ + ctx->jrdev = caam_jr_alloc(); + if (IS_ERR(ctx->jrdev)) { + pr_err("Job Ring Device allocation for transform failed\n"); + return PTR_ERR(ctx->jrdev); + } + + dev = ctx->jrdev->parent; + priv = dev_get_drvdata(dev); + if (priv->era >= 6 && uses_dkp) + ctx->dir = DMA_BIDIRECTIONAL; + else + ctx->dir = DMA_TO_DEVICE; + + ctx->key_dma = dma_map_single(dev, ctx->key, sizeof(ctx->key), + ctx->dir); + if (dma_mapping_error(dev, ctx->key_dma)) { + dev_err(dev, "unable to map key\n"); + caam_jr_free(ctx->jrdev); + return -ENOMEM; + } + + /* copy descriptor header template value */ + ctx->cdata.algtype = OP_TYPE_CLASS1_ALG | caam->class1_alg_type; + ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam->class2_alg_type; + + ctx->qidev = dev; + + spin_lock_init(&ctx->lock); + ctx->drv_ctx[ENCRYPT] = NULL; + ctx->drv_ctx[DECRYPT] = NULL; + + return 0; +} + +static int caam_cra_init(struct crypto_skcipher *tfm) +{ + struct skcipher_alg *alg = crypto_skcipher_alg(tfm); + struct caam_skcipher_alg *caam_alg = + container_of(alg, typeof(*caam_alg), skcipher); + struct caam_ctx *ctx = crypto_skcipher_ctx(tfm); + u32 alg_aai = caam_alg->caam.class1_alg_type & OP_ALG_AAI_MASK; + int ret = 0; + + if (alg_aai == OP_ALG_AAI_XTS) { + const char *tfm_name = crypto_tfm_alg_name(&tfm->base); + struct crypto_skcipher *fallback; + + fallback = crypto_alloc_skcipher(tfm_name, 0, + CRYPTO_ALG_NEED_FALLBACK); + if (IS_ERR(fallback)) { + pr_err("Failed to allocate %s fallback: %ld\n", + tfm_name, PTR_ERR(fallback)); + return PTR_ERR(fallback); + } + + ctx->fallback = fallback; + crypto_skcipher_set_reqsize(tfm, sizeof(struct caam_skcipher_req_ctx) + + crypto_skcipher_reqsize(fallback)); + } + + ret = caam_init_common(ctx, &caam_alg->caam, false); + if (ret && ctx->fallback) + crypto_free_skcipher(ctx->fallback); + + return ret; +} + +static int caam_aead_init(struct crypto_aead *tfm) +{ + struct aead_alg *alg = crypto_aead_alg(tfm); + struct caam_aead_alg *caam_alg = container_of(alg, typeof(*caam_alg), + aead); + struct caam_ctx *ctx = crypto_aead_ctx(tfm); + + return caam_init_common(ctx, &caam_alg->caam, !caam_alg->caam.nodkp); +} + +static void caam_exit_common(struct caam_ctx *ctx) +{ + caam_drv_ctx_rel(ctx->drv_ctx[ENCRYPT]); + caam_drv_ctx_rel(ctx->drv_ctx[DECRYPT]); + + dma_unmap_single(ctx->jrdev->parent, ctx->key_dma, sizeof(ctx->key), + ctx->dir); + + caam_jr_free(ctx->jrdev); +} + +static void caam_cra_exit(struct crypto_skcipher *tfm) +{ + struct caam_ctx *ctx = crypto_skcipher_ctx(tfm); + + if (ctx->fallback) + crypto_free_skcipher(ctx->fallback); + caam_exit_common(ctx); +} + +static void caam_aead_exit(struct crypto_aead *tfm) +{ + caam_exit_common(crypto_aead_ctx(tfm)); +} + +void caam_qi_algapi_exit(void) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) { + struct caam_aead_alg *t_alg = driver_aeads + i; + + if (t_alg->registered) + crypto_unregister_aead(&t_alg->aead); + } + + for (i = 0; i < ARRAY_SIZE(driver_algs); i++) { + struct caam_skcipher_alg *t_alg = driver_algs + i; + + if (t_alg->registered) + crypto_unregister_skcipher(&t_alg->skcipher); + } +} + +static void caam_skcipher_alg_init(struct caam_skcipher_alg *t_alg) +{ + struct skcipher_alg *alg = &t_alg->skcipher; + + alg->base.cra_module = THIS_MODULE; + alg->base.cra_priority = CAAM_CRA_PRIORITY; + alg->base.cra_ctxsize = sizeof(struct caam_ctx); + alg->base.cra_flags |= (CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY | + CRYPTO_ALG_KERN_DRIVER_ONLY); + + alg->init = caam_cra_init; + alg->exit = caam_cra_exit; +} + +static void caam_aead_alg_init(struct caam_aead_alg *t_alg) +{ + struct aead_alg *alg = &t_alg->aead; + + alg->base.cra_module = THIS_MODULE; + alg->base.cra_priority = CAAM_CRA_PRIORITY; + alg->base.cra_ctxsize = sizeof(struct caam_ctx); + alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY | + CRYPTO_ALG_KERN_DRIVER_ONLY; + + alg->init = caam_aead_init; + alg->exit = caam_aead_exit; +} + +int caam_qi_algapi_init(struct device *ctrldev) +{ + struct caam_drv_private *priv = dev_get_drvdata(ctrldev); + int i = 0, err = 0; + u32 aes_vid, aes_inst, des_inst, md_vid, md_inst; + unsigned int md_limit = SHA512_DIGEST_SIZE; + bool registered = false; + + /* Make sure this runs only on (DPAA 1.x) QI */ + if (!priv->qi_present || caam_dpaa2) + return 0; + + /* + * Register crypto algorithms the device supports. + * First, detect presence and attributes of DES, AES, and MD blocks. + */ + if (priv->era < 10) { + u32 cha_vid, cha_inst; + + cha_vid = rd_reg32(&priv->ctrl->perfmon.cha_id_ls); + aes_vid = cha_vid & CHA_ID_LS_AES_MASK; + md_vid = (cha_vid & CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT; + + cha_inst = rd_reg32(&priv->ctrl->perfmon.cha_num_ls); + des_inst = (cha_inst & CHA_ID_LS_DES_MASK) >> + CHA_ID_LS_DES_SHIFT; + aes_inst = cha_inst & CHA_ID_LS_AES_MASK; + md_inst = (cha_inst & CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT; + } else { + u32 aesa, mdha; + + aesa = rd_reg32(&priv->ctrl->vreg.aesa); + mdha = rd_reg32(&priv->ctrl->vreg.mdha); + + aes_vid = (aesa & CHA_VER_VID_MASK) >> CHA_VER_VID_SHIFT; + md_vid = (mdha & CHA_VER_VID_MASK) >> CHA_VER_VID_SHIFT; + + des_inst = rd_reg32(&priv->ctrl->vreg.desa) & CHA_VER_NUM_MASK; + aes_inst = aesa & CHA_VER_NUM_MASK; + md_inst = mdha & CHA_VER_NUM_MASK; + } + + /* If MD is present, limit digest size based on LP256 */ + if (md_inst && md_vid == CHA_VER_VID_MD_LP256) + md_limit = SHA256_DIGEST_SIZE; + + for (i = 0; i < ARRAY_SIZE(driver_algs); i++) { + struct caam_skcipher_alg *t_alg = driver_algs + i; + u32 alg_sel = t_alg->caam.class1_alg_type & OP_ALG_ALGSEL_MASK; + + /* Skip DES algorithms if not supported by device */ + if (!des_inst && + ((alg_sel == OP_ALG_ALGSEL_3DES) || + (alg_sel == OP_ALG_ALGSEL_DES))) + continue; + + /* Skip AES algorithms if not supported by device */ + if (!aes_inst && (alg_sel == OP_ALG_ALGSEL_AES)) + continue; + + caam_skcipher_alg_init(t_alg); + + err = crypto_register_skcipher(&t_alg->skcipher); + if (err) { + dev_warn(ctrldev, "%s alg registration failed\n", + t_alg->skcipher.base.cra_driver_name); + continue; + } + + t_alg->registered = true; + registered = true; + } + + for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) { + struct caam_aead_alg *t_alg = driver_aeads + i; + u32 c1_alg_sel = t_alg->caam.class1_alg_type & + OP_ALG_ALGSEL_MASK; + u32 c2_alg_sel = t_alg->caam.class2_alg_type & + OP_ALG_ALGSEL_MASK; + u32 alg_aai = t_alg->caam.class1_alg_type & OP_ALG_AAI_MASK; + + /* Skip DES algorithms if not supported by device */ + if (!des_inst && + ((c1_alg_sel == OP_ALG_ALGSEL_3DES) || + (c1_alg_sel == OP_ALG_ALGSEL_DES))) + continue; + + /* Skip AES algorithms if not supported by device */ + if (!aes_inst && (c1_alg_sel == OP_ALG_ALGSEL_AES)) + continue; + + /* + * Check support for AES algorithms not available + * on LP devices. + */ + if (aes_vid == CHA_VER_VID_AES_LP && alg_aai == OP_ALG_AAI_GCM) + continue; + + /* + * Skip algorithms requiring message digests + * if MD or MD size is not supported by device. + */ + if (c2_alg_sel && + (!md_inst || (t_alg->aead.maxauthsize > md_limit))) + continue; + + caam_aead_alg_init(t_alg); + + err = crypto_register_aead(&t_alg->aead); + if (err) { + pr_warn("%s alg registration failed\n", + t_alg->aead.base.cra_driver_name); + continue; + } + + t_alg->registered = true; + registered = true; + } + + if (registered) + dev_info(ctrldev, "algorithms registered in /proc/crypto\n"); + + return err; +} diff --git a/drivers/crypto/caam/caamalg_qi2.c b/drivers/crypto/caam/caamalg_qi2.c new file mode 100644 index 000000000..43bed47ce --- /dev/null +++ b/drivers/crypto/caam/caamalg_qi2.c @@ -0,0 +1,5518 @@ +// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) +/* + * Copyright 2015-2016 Freescale Semiconductor Inc. + * Copyright 2017-2019 NXP + */ + +#include "compat.h" +#include "regs.h" +#include "caamalg_qi2.h" +#include "dpseci_cmd.h" +#include "desc_constr.h" +#include "error.h" +#include "sg_sw_sec4.h" +#include "sg_sw_qm2.h" +#include "key_gen.h" +#include "caamalg_desc.h" +#include "caamhash_desc.h" +#include "dpseci-debugfs.h" +#include <linux/fsl/mc.h> +#include <soc/fsl/dpaa2-io.h> +#include <soc/fsl/dpaa2-fd.h> +#include <crypto/xts.h> +#include <asm/unaligned.h> + +#define CAAM_CRA_PRIORITY 2000 + +/* max key is sum of AES_MAX_KEY_SIZE, max split key size */ +#define CAAM_MAX_KEY_SIZE (AES_MAX_KEY_SIZE + CTR_RFC3686_NONCE_SIZE + \ + SHA512_DIGEST_SIZE * 2) + +/* + * This is a a cache of buffers, from which the users of CAAM QI driver + * can allocate short buffers. It's speedier than doing kmalloc on the hotpath. + * NOTE: A more elegant solution would be to have some headroom in the frames + * being processed. This can be added by the dpaa2-eth driver. This would + * pose a problem for userspace application processing which cannot + * know of this limitation. So for now, this will work. + * NOTE: The memcache is SMP-safe. No need to handle spinlocks in-here + */ +static struct kmem_cache *qi_cache; + +struct caam_alg_entry { + struct device *dev; + int class1_alg_type; + int class2_alg_type; + bool rfc3686; + bool geniv; + bool nodkp; +}; + +struct caam_aead_alg { + struct aead_alg aead; + struct caam_alg_entry caam; + bool registered; +}; + +struct caam_skcipher_alg { + struct skcipher_alg skcipher; + struct caam_alg_entry caam; + bool registered; +}; + +/** + * struct caam_ctx - per-session context + * @flc: Flow Contexts array + * @key: [authentication key], encryption key + * @flc_dma: I/O virtual addresses of the Flow Contexts + * @key_dma: I/O virtual address of the key + * @dir: DMA direction for mapping key and Flow Contexts + * @dev: dpseci device + * @adata: authentication algorithm details + * @cdata: encryption algorithm details + * @authsize: authentication tag (a.k.a. ICV / MAC) size + */ +struct caam_ctx { + struct caam_flc flc[NUM_OP]; + u8 key[CAAM_MAX_KEY_SIZE]; + dma_addr_t flc_dma[NUM_OP]; + dma_addr_t key_dma; + enum dma_data_direction dir; + struct device *dev; + struct alginfo adata; + struct alginfo cdata; + unsigned int authsize; + bool xts_key_fallback; + struct crypto_skcipher *fallback; +}; + +static void *dpaa2_caam_iova_to_virt(struct dpaa2_caam_priv *priv, + dma_addr_t iova_addr) +{ + phys_addr_t phys_addr; + + phys_addr = priv->domain ? iommu_iova_to_phys(priv->domain, iova_addr) : + iova_addr; + + return phys_to_virt(phys_addr); +} + +/* + * qi_cache_zalloc - Allocate buffers from CAAM-QI cache + * + * Allocate data on the hotpath. Instead of using kzalloc, one can use the + * services of the CAAM QI memory cache (backed by kmem_cache). The buffers + * will have a size of CAAM_QI_MEMCACHE_SIZE, which should be sufficient for + * hosting 16 SG entries. + * + * @flags - flags that would be used for the equivalent kmalloc(..) call + * + * Returns a pointer to a retrieved buffer on success or NULL on failure. + */ +static inline void *qi_cache_zalloc(gfp_t flags) +{ + return kmem_cache_zalloc(qi_cache, flags); +} + +/* + * qi_cache_free - Frees buffers allocated from CAAM-QI cache + * + * @obj - buffer previously allocated by qi_cache_zalloc + * + * No checking is being done, the call is a passthrough call to + * kmem_cache_free(...) + */ +static inline void qi_cache_free(void *obj) +{ + kmem_cache_free(qi_cache, obj); +} + +static struct caam_request *to_caam_req(struct crypto_async_request *areq) +{ + switch (crypto_tfm_alg_type(areq->tfm)) { + case CRYPTO_ALG_TYPE_SKCIPHER: + return skcipher_request_ctx(skcipher_request_cast(areq)); + case CRYPTO_ALG_TYPE_AEAD: + return aead_request_ctx(container_of(areq, struct aead_request, + base)); + case CRYPTO_ALG_TYPE_AHASH: + return ahash_request_ctx(ahash_request_cast(areq)); + default: + return ERR_PTR(-EINVAL); + } +} + +static void caam_unmap(struct device *dev, struct scatterlist *src, + struct scatterlist *dst, int src_nents, + int dst_nents, dma_addr_t iv_dma, int ivsize, + enum dma_data_direction iv_dir, dma_addr_t qm_sg_dma, + int qm_sg_bytes) +{ + if (dst != src) { + if (src_nents) + dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE); + if (dst_nents) + dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE); + } else { + dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL); + } + + if (iv_dma) + dma_unmap_single(dev, iv_dma, ivsize, iv_dir); + + if (qm_sg_bytes) + dma_unmap_single(dev, qm_sg_dma, qm_sg_bytes, DMA_TO_DEVICE); +} + +static int aead_set_sh_desc(struct crypto_aead *aead) +{ + struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead), + typeof(*alg), aead); + struct caam_ctx *ctx = crypto_aead_ctx(aead); + unsigned int ivsize = crypto_aead_ivsize(aead); + struct device *dev = ctx->dev; + struct dpaa2_caam_priv *priv = dev_get_drvdata(dev); + struct caam_flc *flc; + u32 *desc; + u32 ctx1_iv_off = 0; + u32 *nonce = NULL; + unsigned int data_len[2]; + u32 inl_mask; + const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) == + OP_ALG_AAI_CTR_MOD128); + const bool is_rfc3686 = alg->caam.rfc3686; + + if (!ctx->cdata.keylen || !ctx->authsize) + return 0; + + /* + * AES-CTR needs to load IV in CONTEXT1 reg + * at an offset of 128bits (16bytes) + * CONTEXT1[255:128] = IV + */ + if (ctr_mode) + ctx1_iv_off = 16; + + /* + * RFC3686 specific: + * CONTEXT1[255:128] = {NONCE, IV, COUNTER} + */ + if (is_rfc3686) { + ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE; + nonce = (u32 *)((void *)ctx->key + ctx->adata.keylen_pad + + ctx->cdata.keylen - CTR_RFC3686_NONCE_SIZE); + } + + /* + * In case |user key| > |derived key|, using DKP<imm,imm> would result + * in invalid opcodes (last bytes of user key) in the resulting + * descriptor. Use DKP<ptr,imm> instead => both virtual and dma key + * addresses are needed. + */ + ctx->adata.key_virt = ctx->key; + ctx->adata.key_dma = ctx->key_dma; + + ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad; + ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad; + + data_len[0] = ctx->adata.keylen_pad; + data_len[1] = ctx->cdata.keylen; + + /* aead_encrypt shared descriptor */ + if (desc_inline_query((alg->caam.geniv ? DESC_QI_AEAD_GIVENC_LEN : + DESC_QI_AEAD_ENC_LEN) + + (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0), + DESC_JOB_IO_LEN, data_len, &inl_mask, + ARRAY_SIZE(data_len)) < 0) + return -EINVAL; + + ctx->adata.key_inline = !!(inl_mask & 1); + ctx->cdata.key_inline = !!(inl_mask & 2); + + flc = &ctx->flc[ENCRYPT]; + desc = flc->sh_desc; + + if (alg->caam.geniv) + cnstr_shdsc_aead_givencap(desc, &ctx->cdata, &ctx->adata, + ivsize, ctx->authsize, is_rfc3686, + nonce, ctx1_iv_off, true, + priv->sec_attr.era); + else + cnstr_shdsc_aead_encap(desc, &ctx->cdata, &ctx->adata, + ivsize, ctx->authsize, is_rfc3686, nonce, + ctx1_iv_off, true, priv->sec_attr.era); + + flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */ + dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT], + sizeof(flc->flc) + desc_bytes(desc), + ctx->dir); + + /* aead_decrypt shared descriptor */ + if (desc_inline_query(DESC_QI_AEAD_DEC_LEN + + (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0), + DESC_JOB_IO_LEN, data_len, &inl_mask, + ARRAY_SIZE(data_len)) < 0) + return -EINVAL; + + ctx->adata.key_inline = !!(inl_mask & 1); + ctx->cdata.key_inline = !!(inl_mask & 2); + + flc = &ctx->flc[DECRYPT]; + desc = flc->sh_desc; + cnstr_shdsc_aead_decap(desc, &ctx->cdata, &ctx->adata, + ivsize, ctx->authsize, alg->caam.geniv, + is_rfc3686, nonce, ctx1_iv_off, true, + priv->sec_attr.era); + flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */ + dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT], + sizeof(flc->flc) + desc_bytes(desc), + ctx->dir); + + return 0; +} + +static int aead_setauthsize(struct crypto_aead *authenc, unsigned int authsize) +{ + struct caam_ctx *ctx = crypto_aead_ctx(authenc); + + ctx->authsize = authsize; + aead_set_sh_desc(authenc); + + return 0; +} + +static int aead_setkey(struct crypto_aead *aead, const u8 *key, + unsigned int keylen) +{ + struct caam_ctx *ctx = crypto_aead_ctx(aead); + struct device *dev = ctx->dev; + struct crypto_authenc_keys keys; + + if (crypto_authenc_extractkeys(&keys, key, keylen) != 0) + goto badkey; + + dev_dbg(dev, "keylen %d enckeylen %d authkeylen %d\n", + keys.authkeylen + keys.enckeylen, keys.enckeylen, + keys.authkeylen); + print_hex_dump_debug("key in @" __stringify(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); + + ctx->adata.keylen = keys.authkeylen; + ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype & + OP_ALG_ALGSEL_MASK); + + if (ctx->adata.keylen_pad + keys.enckeylen > CAAM_MAX_KEY_SIZE) + goto badkey; + + memcpy(ctx->key, keys.authkey, keys.authkeylen); + memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen); + dma_sync_single_for_device(dev, ctx->key_dma, ctx->adata.keylen_pad + + keys.enckeylen, ctx->dir); + print_hex_dump_debug("ctx.key@" __stringify(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, ctx->key, + ctx->adata.keylen_pad + keys.enckeylen, 1); + + ctx->cdata.keylen = keys.enckeylen; + + memzero_explicit(&keys, sizeof(keys)); + return aead_set_sh_desc(aead); +badkey: + memzero_explicit(&keys, sizeof(keys)); + return -EINVAL; +} + +static int des3_aead_setkey(struct crypto_aead *aead, const u8 *key, + unsigned int keylen) +{ + struct crypto_authenc_keys keys; + int err; + + err = crypto_authenc_extractkeys(&keys, key, keylen); + if (unlikely(err)) + goto out; + + err = -EINVAL; + if (keys.enckeylen != DES3_EDE_KEY_SIZE) + goto out; + + err = crypto_des3_ede_verify_key(crypto_aead_tfm(aead), keys.enckey) ?: + aead_setkey(aead, key, keylen); + +out: + memzero_explicit(&keys, sizeof(keys)); + return err; +} + +static struct aead_edesc *aead_edesc_alloc(struct aead_request *req, + bool encrypt) +{ + struct crypto_aead *aead = crypto_aead_reqtfm(req); + struct caam_request *req_ctx = aead_request_ctx(req); + struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1]; + struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0]; + struct caam_ctx *ctx = crypto_aead_ctx(aead); + struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead), + typeof(*alg), aead); + struct device *dev = ctx->dev; + gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? + GFP_KERNEL : GFP_ATOMIC; + int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0; + int src_len, dst_len = 0; + struct aead_edesc *edesc; + dma_addr_t qm_sg_dma, iv_dma = 0; + int ivsize = 0; + unsigned int authsize = ctx->authsize; + int qm_sg_index = 0, qm_sg_nents = 0, qm_sg_bytes; + int in_len, out_len; + struct dpaa2_sg_entry *sg_table; + + /* allocate space for base edesc, link tables and IV */ + edesc = qi_cache_zalloc(GFP_DMA | flags); + if (unlikely(!edesc)) { + dev_err(dev, "could not allocate extended descriptor\n"); + return ERR_PTR(-ENOMEM); + } + + if (unlikely(req->dst != req->src)) { + src_len = req->assoclen + req->cryptlen; + dst_len = src_len + (encrypt ? authsize : (-authsize)); + + src_nents = sg_nents_for_len(req->src, src_len); + if (unlikely(src_nents < 0)) { + dev_err(dev, "Insufficient bytes (%d) in src S/G\n", + src_len); + qi_cache_free(edesc); + return ERR_PTR(src_nents); + } + + dst_nents = sg_nents_for_len(req->dst, dst_len); + if (unlikely(dst_nents < 0)) { + dev_err(dev, "Insufficient bytes (%d) in dst S/G\n", + dst_len); + qi_cache_free(edesc); + return ERR_PTR(dst_nents); + } + + if (src_nents) { + mapped_src_nents = dma_map_sg(dev, req->src, src_nents, + DMA_TO_DEVICE); + if (unlikely(!mapped_src_nents)) { + dev_err(dev, "unable to map source\n"); + qi_cache_free(edesc); + return ERR_PTR(-ENOMEM); + } + } else { + mapped_src_nents = 0; + } + + if (dst_nents) { + mapped_dst_nents = dma_map_sg(dev, req->dst, dst_nents, + DMA_FROM_DEVICE); + if (unlikely(!mapped_dst_nents)) { + dev_err(dev, "unable to map destination\n"); + dma_unmap_sg(dev, req->src, src_nents, + DMA_TO_DEVICE); + qi_cache_free(edesc); + return ERR_PTR(-ENOMEM); + } + } else { + mapped_dst_nents = 0; + } + } else { + src_len = req->assoclen + req->cryptlen + + (encrypt ? authsize : 0); + + src_nents = sg_nents_for_len(req->src, src_len); + if (unlikely(src_nents < 0)) { + dev_err(dev, "Insufficient bytes (%d) in src S/G\n", + src_len); + qi_cache_free(edesc); + return ERR_PTR(src_nents); + } + + mapped_src_nents = dma_map_sg(dev, req->src, src_nents, + DMA_BIDIRECTIONAL); + if (unlikely(!mapped_src_nents)) { + dev_err(dev, "unable to map source\n"); + qi_cache_free(edesc); + return ERR_PTR(-ENOMEM); + } + } + + if ((alg->caam.rfc3686 && encrypt) || !alg->caam.geniv) + ivsize = crypto_aead_ivsize(aead); + + /* + * Create S/G table: req->assoclen, [IV,] req->src [, req->dst]. + * Input is not contiguous. + * HW reads 4 S/G entries at a time; make sure the reads don't go beyond + * the end of the table by allocating more S/G entries. Logic: + * if (src != dst && output S/G) + * pad output S/G, if needed + * else if (src == dst && S/G) + * overlapping S/Gs; pad one of them + * else if (input S/G) ... + * pad input S/G, if needed + */ + qm_sg_nents = 1 + !!ivsize + mapped_src_nents; + if (mapped_dst_nents > 1) + qm_sg_nents += pad_sg_nents(mapped_dst_nents); + else if ((req->src == req->dst) && (mapped_src_nents > 1)) + qm_sg_nents = max(pad_sg_nents(qm_sg_nents), + 1 + !!ivsize + + pad_sg_nents(mapped_src_nents)); + else + qm_sg_nents = pad_sg_nents(qm_sg_nents); + + sg_table = &edesc->sgt[0]; + qm_sg_bytes = qm_sg_nents * sizeof(*sg_table); + if (unlikely(offsetof(struct aead_edesc, sgt) + qm_sg_bytes + ivsize > + CAAM_QI_MEMCACHE_SIZE)) { + dev_err(dev, "No space for %d S/G entries and/or %dB IV\n", + qm_sg_nents, ivsize); + caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0, + 0, DMA_NONE, 0, 0); + qi_cache_free(edesc); + return ERR_PTR(-ENOMEM); + } + + if (ivsize) { + u8 *iv = (u8 *)(sg_table + qm_sg_nents); + + /* Make sure IV is located in a DMAable area */ + memcpy(iv, req->iv, ivsize); + + iv_dma = dma_map_single(dev, iv, ivsize, DMA_TO_DEVICE); + if (dma_mapping_error(dev, iv_dma)) { + dev_err(dev, "unable to map IV\n"); + caam_unmap(dev, req->src, req->dst, src_nents, + dst_nents, 0, 0, DMA_NONE, 0, 0); + qi_cache_free(edesc); + return ERR_PTR(-ENOMEM); + } + } + + edesc->src_nents = src_nents; + edesc->dst_nents = dst_nents; + edesc->iv_dma = iv_dma; + + if ((alg->caam.class1_alg_type & OP_ALG_ALGSEL_MASK) == + OP_ALG_ALGSEL_CHACHA20 && ivsize != CHACHAPOLY_IV_SIZE) + /* + * The associated data comes already with the IV but we need + * to skip it when we authenticate or encrypt... + */ + edesc->assoclen = cpu_to_caam32(req->assoclen - ivsize); + else + edesc->assoclen = cpu_to_caam32(req->assoclen); + edesc->assoclen_dma = dma_map_single(dev, &edesc->assoclen, 4, + DMA_TO_DEVICE); + if (dma_mapping_error(dev, edesc->assoclen_dma)) { + dev_err(dev, "unable to map assoclen\n"); + caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, + iv_dma, ivsize, DMA_TO_DEVICE, 0, 0); + qi_cache_free(edesc); + return ERR_PTR(-ENOMEM); + } + + dma_to_qm_sg_one(sg_table, edesc->assoclen_dma, 4, 0); + qm_sg_index++; + if (ivsize) { + dma_to_qm_sg_one(sg_table + qm_sg_index, iv_dma, ivsize, 0); + qm_sg_index++; + } + sg_to_qm_sg_last(req->src, src_len, sg_table + qm_sg_index, 0); + qm_sg_index += mapped_src_nents; + + if (mapped_dst_nents > 1) + sg_to_qm_sg_last(req->dst, dst_len, sg_table + qm_sg_index, 0); + + qm_sg_dma = dma_map_single(dev, sg_table, qm_sg_bytes, DMA_TO_DEVICE); + if (dma_mapping_error(dev, qm_sg_dma)) { + dev_err(dev, "unable to map S/G table\n"); + dma_unmap_single(dev, edesc->assoclen_dma, 4, DMA_TO_DEVICE); + caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, + iv_dma, ivsize, DMA_TO_DEVICE, 0, 0); + qi_cache_free(edesc); + return ERR_PTR(-ENOMEM); + } + + edesc->qm_sg_dma = qm_sg_dma; + edesc->qm_sg_bytes = qm_sg_bytes; + + out_len = req->assoclen + req->cryptlen + + (encrypt ? ctx->authsize : (-ctx->authsize)); + in_len = 4 + ivsize + req->assoclen + req->cryptlen; + + memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt)); + dpaa2_fl_set_final(in_fle, true); + dpaa2_fl_set_format(in_fle, dpaa2_fl_sg); + dpaa2_fl_set_addr(in_fle, qm_sg_dma); + dpaa2_fl_set_len(in_fle, in_len); + + if (req->dst == req->src) { + if (mapped_src_nents == 1) { + dpaa2_fl_set_format(out_fle, dpaa2_fl_single); + dpaa2_fl_set_addr(out_fle, sg_dma_address(req->src)); + } else { + dpaa2_fl_set_format(out_fle, dpaa2_fl_sg); + dpaa2_fl_set_addr(out_fle, qm_sg_dma + + (1 + !!ivsize) * sizeof(*sg_table)); + } + } else if (!mapped_dst_nents) { + /* + * crypto engine requires the output entry to be present when + * "frame list" FD is used. + * Since engine does not support FMT=2'b11 (unused entry type), + * leaving out_fle zeroized is the best option. + */ + goto skip_out_fle; + } else if (mapped_dst_nents == 1) { + dpaa2_fl_set_format(out_fle, dpaa2_fl_single); + dpaa2_fl_set_addr(out_fle, sg_dma_address(req->dst)); + } else { + dpaa2_fl_set_format(out_fle, dpaa2_fl_sg); + dpaa2_fl_set_addr(out_fle, qm_sg_dma + qm_sg_index * + sizeof(*sg_table)); + } + + dpaa2_fl_set_len(out_fle, out_len); + +skip_out_fle: + return edesc; +} + +static int chachapoly_set_sh_desc(struct crypto_aead *aead) +{ + struct caam_ctx *ctx = crypto_aead_ctx(aead); + unsigned int ivsize = crypto_aead_ivsize(aead); + struct device *dev = ctx->dev; + struct caam_flc *flc; + u32 *desc; + + if (!ctx->cdata.keylen || !ctx->authsize) + return 0; + + flc = &ctx->flc[ENCRYPT]; + desc = flc->sh_desc; + cnstr_shdsc_chachapoly(desc, &ctx->cdata, &ctx->adata, ivsize, + ctx->authsize, true, true); + flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */ + dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT], + sizeof(flc->flc) + desc_bytes(desc), + ctx->dir); + + flc = &ctx->flc[DECRYPT]; + desc = flc->sh_desc; + cnstr_shdsc_chachapoly(desc, &ctx->cdata, &ctx->adata, ivsize, + ctx->authsize, false, true); + flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */ + dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT], + sizeof(flc->flc) + desc_bytes(desc), + ctx->dir); + + return 0; +} + +static int chachapoly_setauthsize(struct crypto_aead *aead, + unsigned int authsize) +{ + struct caam_ctx *ctx = crypto_aead_ctx(aead); + + if (authsize != POLY1305_DIGEST_SIZE) + return -EINVAL; + + ctx->authsize = authsize; + return chachapoly_set_sh_desc(aead); +} + +static int chachapoly_setkey(struct crypto_aead *aead, const u8 *key, + unsigned int keylen) +{ + struct caam_ctx *ctx = crypto_aead_ctx(aead); + unsigned int ivsize = crypto_aead_ivsize(aead); + unsigned int saltlen = CHACHAPOLY_IV_SIZE - ivsize; + + if (keylen != CHACHA_KEY_SIZE + saltlen) + return -EINVAL; + + memcpy(ctx->key, key, keylen); + ctx->cdata.key_virt = ctx->key; + ctx->cdata.keylen = keylen - saltlen; + + return chachapoly_set_sh_desc(aead); +} + +static int gcm_set_sh_desc(struct crypto_aead *aead) +{ + struct caam_ctx *ctx = crypto_aead_ctx(aead); + struct device *dev = ctx->dev; + unsigned int ivsize = crypto_aead_ivsize(aead); + struct caam_flc *flc; + u32 *desc; + int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN - + ctx->cdata.keylen; + + if (!ctx->cdata.keylen || !ctx->authsize) + return 0; + + /* + * AES GCM encrypt shared descriptor + * Job Descriptor and Shared Descriptor + * must fit into the 64-word Descriptor h/w Buffer + */ + if (rem_bytes >= DESC_QI_GCM_ENC_LEN) { + ctx->cdata.key_inline = true; + ctx->cdata.key_virt = ctx->key; + } else { + ctx->cdata.key_inline = false; + ctx->cdata.key_dma = ctx->key_dma; + } + + flc = &ctx->flc[ENCRYPT]; + desc = flc->sh_desc; + cnstr_shdsc_gcm_encap(desc, &ctx->cdata, ivsize, ctx->authsize, true); + flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */ + dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT], + sizeof(flc->flc) + desc_bytes(desc), + ctx->dir); + + /* + * Job Descriptor and Shared Descriptors + * must all fit into the 64-word Descriptor h/w Buffer + */ + if (rem_bytes >= DESC_QI_GCM_DEC_LEN) { + ctx->cdata.key_inline = true; + ctx->cdata.key_virt = ctx->key; + } else { + ctx->cdata.key_inline = false; + ctx->cdata.key_dma = ctx->key_dma; + } + + flc = &ctx->flc[DECRYPT]; + desc = flc->sh_desc; + cnstr_shdsc_gcm_decap(desc, &ctx->cdata, ivsize, ctx->authsize, true); + flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */ + dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT], + sizeof(flc->flc) + desc_bytes(desc), + ctx->dir); + + return 0; +} + +static int gcm_setauthsize(struct crypto_aead *authenc, unsigned int authsize) +{ + struct caam_ctx *ctx = crypto_aead_ctx(authenc); + int err; + + err = crypto_gcm_check_authsize(authsize); + if (err) + return err; + + ctx->authsize = authsize; + gcm_set_sh_desc(authenc); + + return 0; +} + +static int gcm_setkey(struct crypto_aead *aead, + const u8 *key, unsigned int keylen) +{ + struct caam_ctx *ctx = crypto_aead_ctx(aead); + struct device *dev = ctx->dev; + int ret; + + ret = aes_check_keylen(keylen); + if (ret) + return ret; + print_hex_dump_debug("key in @" __stringify(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); + + memcpy(ctx->key, key, keylen); + dma_sync_single_for_device(dev, ctx->key_dma, keylen, ctx->dir); + ctx->cdata.keylen = keylen; + + return gcm_set_sh_desc(aead); +} + +static int rfc4106_set_sh_desc(struct crypto_aead *aead) +{ + struct caam_ctx *ctx = crypto_aead_ctx(aead); + struct device *dev = ctx->dev; + unsigned int ivsize = crypto_aead_ivsize(aead); + struct caam_flc *flc; + u32 *desc; + int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN - + ctx->cdata.keylen; + + if (!ctx->cdata.keylen || !ctx->authsize) + return 0; + + ctx->cdata.key_virt = ctx->key; + + /* + * RFC4106 encrypt shared descriptor + * Job Descriptor and Shared Descriptor + * must fit into the 64-word Descriptor h/w Buffer + */ + if (rem_bytes >= DESC_QI_RFC4106_ENC_LEN) { + ctx->cdata.key_inline = true; + } else { + ctx->cdata.key_inline = false; + ctx->cdata.key_dma = ctx->key_dma; + } + + flc = &ctx->flc[ENCRYPT]; + desc = flc->sh_desc; + cnstr_shdsc_rfc4106_encap(desc, &ctx->cdata, ivsize, ctx->authsize, + true); + flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */ + dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT], + sizeof(flc->flc) + desc_bytes(desc), + ctx->dir); + + /* + * Job Descriptor and Shared Descriptors + * must all fit into the 64-word Descriptor h/w Buffer + */ + if (rem_bytes >= DESC_QI_RFC4106_DEC_LEN) { + ctx->cdata.key_inline = true; + } else { + ctx->cdata.key_inline = false; + ctx->cdata.key_dma = ctx->key_dma; + } + + flc = &ctx->flc[DECRYPT]; + desc = flc->sh_desc; + cnstr_shdsc_rfc4106_decap(desc, &ctx->cdata, ivsize, ctx->authsize, + true); + flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */ + dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT], + sizeof(flc->flc) + desc_bytes(desc), + ctx->dir); + + return 0; +} + +static int rfc4106_setauthsize(struct crypto_aead *authenc, + unsigned int authsize) +{ + struct caam_ctx *ctx = crypto_aead_ctx(authenc); + int err; + + err = crypto_rfc4106_check_authsize(authsize); + if (err) + return err; + + ctx->authsize = authsize; + rfc4106_set_sh_desc(authenc); + + return 0; +} + +static int rfc4106_setkey(struct crypto_aead *aead, + const u8 *key, unsigned int keylen) +{ + struct caam_ctx *ctx = crypto_aead_ctx(aead); + struct device *dev = ctx->dev; + int ret; + + ret = aes_check_keylen(keylen - 4); + if (ret) + return ret; + + print_hex_dump_debug("key in @" __stringify(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); + + memcpy(ctx->key, key, keylen); + /* + * The last four bytes of the key material are used as the salt value + * in the nonce. Update the AES key length. + */ + ctx->cdata.keylen = keylen - 4; + dma_sync_single_for_device(dev, ctx->key_dma, ctx->cdata.keylen, + ctx->dir); + + return rfc4106_set_sh_desc(aead); +} + +static int rfc4543_set_sh_desc(struct crypto_aead *aead) +{ + struct caam_ctx *ctx = crypto_aead_ctx(aead); + struct device *dev = ctx->dev; + unsigned int ivsize = crypto_aead_ivsize(aead); + struct caam_flc *flc; + u32 *desc; + int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN - + ctx->cdata.keylen; + + if (!ctx->cdata.keylen || !ctx->authsize) + return 0; + + ctx->cdata.key_virt = ctx->key; + + /* + * RFC4543 encrypt shared descriptor + * Job Descriptor and Shared Descriptor + * must fit into the 64-word Descriptor h/w Buffer + */ + if (rem_bytes >= DESC_QI_RFC4543_ENC_LEN) { + ctx->cdata.key_inline = true; + } else { + ctx->cdata.key_inline = false; + ctx->cdata.key_dma = ctx->key_dma; + } + + flc = &ctx->flc[ENCRYPT]; + desc = flc->sh_desc; + cnstr_shdsc_rfc4543_encap(desc, &ctx->cdata, ivsize, ctx->authsize, + true); + flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */ + dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT], + sizeof(flc->flc) + desc_bytes(desc), + ctx->dir); + + /* + * Job Descriptor and Shared Descriptors + * must all fit into the 64-word Descriptor h/w Buffer + */ + if (rem_bytes >= DESC_QI_RFC4543_DEC_LEN) { + ctx->cdata.key_inline = true; + } else { + ctx->cdata.key_inline = false; + ctx->cdata.key_dma = ctx->key_dma; + } + + flc = &ctx->flc[DECRYPT]; + desc = flc->sh_desc; + cnstr_shdsc_rfc4543_decap(desc, &ctx->cdata, ivsize, ctx->authsize, + true); + flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */ + dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT], + sizeof(flc->flc) + desc_bytes(desc), + ctx->dir); + + return 0; +} + +static int rfc4543_setauthsize(struct crypto_aead *authenc, + unsigned int authsize) +{ + struct caam_ctx *ctx = crypto_aead_ctx(authenc); + + if (authsize != 16) + return -EINVAL; + + ctx->authsize = authsize; + rfc4543_set_sh_desc(authenc); + + return 0; +} + +static int rfc4543_setkey(struct crypto_aead *aead, + const u8 *key, unsigned int keylen) +{ + struct caam_ctx *ctx = crypto_aead_ctx(aead); + struct device *dev = ctx->dev; + int ret; + + ret = aes_check_keylen(keylen - 4); + if (ret) + return ret; + + print_hex_dump_debug("key in @" __stringify(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); + + memcpy(ctx->key, key, keylen); + /* + * The last four bytes of the key material are used as the salt value + * in the nonce. Update the AES key length. + */ + ctx->cdata.keylen = keylen - 4; + dma_sync_single_for_device(dev, ctx->key_dma, ctx->cdata.keylen, + ctx->dir); + + return rfc4543_set_sh_desc(aead); +} + +static int skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key, + unsigned int keylen, const u32 ctx1_iv_off) +{ + struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher); + struct caam_skcipher_alg *alg = + container_of(crypto_skcipher_alg(skcipher), + struct caam_skcipher_alg, skcipher); + struct device *dev = ctx->dev; + struct caam_flc *flc; + unsigned int ivsize = crypto_skcipher_ivsize(skcipher); + u32 *desc; + const bool is_rfc3686 = alg->caam.rfc3686; + + print_hex_dump_debug("key in @" __stringify(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); + + ctx->cdata.keylen = keylen; + ctx->cdata.key_virt = key; + ctx->cdata.key_inline = true; + + /* skcipher_encrypt shared descriptor */ + flc = &ctx->flc[ENCRYPT]; + desc = flc->sh_desc; + cnstr_shdsc_skcipher_encap(desc, &ctx->cdata, ivsize, is_rfc3686, + ctx1_iv_off); + flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */ + dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT], + sizeof(flc->flc) + desc_bytes(desc), + ctx->dir); + + /* skcipher_decrypt shared descriptor */ + flc = &ctx->flc[DECRYPT]; + desc = flc->sh_desc; + cnstr_shdsc_skcipher_decap(desc, &ctx->cdata, ivsize, is_rfc3686, + ctx1_iv_off); + flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */ + dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT], + sizeof(flc->flc) + desc_bytes(desc), + ctx->dir); + + return 0; +} + +static int aes_skcipher_setkey(struct crypto_skcipher *skcipher, + const u8 *key, unsigned int keylen) +{ + int err; + + err = aes_check_keylen(keylen); + if (err) + return err; + + return skcipher_setkey(skcipher, key, keylen, 0); +} + +static int rfc3686_skcipher_setkey(struct crypto_skcipher *skcipher, + const u8 *key, unsigned int keylen) +{ + u32 ctx1_iv_off; + int err; + + /* + * RFC3686 specific: + * | CONTEXT1[255:128] = {NONCE, IV, COUNTER} + * | *key = {KEY, NONCE} + */ + ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE; + keylen -= CTR_RFC3686_NONCE_SIZE; + + err = aes_check_keylen(keylen); + if (err) + return err; + + return skcipher_setkey(skcipher, key, keylen, ctx1_iv_off); +} + +static int ctr_skcipher_setkey(struct crypto_skcipher *skcipher, + const u8 *key, unsigned int keylen) +{ + u32 ctx1_iv_off; + int err; + + /* + * AES-CTR needs to load IV in CONTEXT1 reg + * at an offset of 128bits (16bytes) + * CONTEXT1[255:128] = IV + */ + ctx1_iv_off = 16; + + err = aes_check_keylen(keylen); + if (err) + return err; + + return skcipher_setkey(skcipher, key, keylen, ctx1_iv_off); +} + +static int chacha20_skcipher_setkey(struct crypto_skcipher *skcipher, + const u8 *key, unsigned int keylen) +{ + if (keylen != CHACHA_KEY_SIZE) + return -EINVAL; + + return skcipher_setkey(skcipher, key, keylen, 0); +} + +static int des_skcipher_setkey(struct crypto_skcipher *skcipher, + const u8 *key, unsigned int keylen) +{ + return verify_skcipher_des_key(skcipher, key) ?: + skcipher_setkey(skcipher, key, keylen, 0); +} + +static int des3_skcipher_setkey(struct crypto_skcipher *skcipher, + const u8 *key, unsigned int keylen) +{ + return verify_skcipher_des3_key(skcipher, key) ?: + skcipher_setkey(skcipher, key, keylen, 0); +} + +static int xts_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key, + unsigned int keylen) +{ + struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher); + struct device *dev = ctx->dev; + struct dpaa2_caam_priv *priv = dev_get_drvdata(dev); + struct caam_flc *flc; + u32 *desc; + int err; + + err = xts_verify_key(skcipher, key, keylen); + if (err) { + dev_dbg(dev, "key size mismatch\n"); + return err; + } + + if (keylen != 2 * AES_KEYSIZE_128 && keylen != 2 * AES_KEYSIZE_256) + ctx->xts_key_fallback = true; + + if (priv->sec_attr.era <= 8 || ctx->xts_key_fallback) { + err = crypto_skcipher_setkey(ctx->fallback, key, keylen); + if (err) + return err; + } + + ctx->cdata.keylen = keylen; + ctx->cdata.key_virt = key; + ctx->cdata.key_inline = true; + + /* xts_skcipher_encrypt shared descriptor */ + flc = &ctx->flc[ENCRYPT]; + desc = flc->sh_desc; + cnstr_shdsc_xts_skcipher_encap(desc, &ctx->cdata); + flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */ + dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT], + sizeof(flc->flc) + desc_bytes(desc), + ctx->dir); + + /* xts_skcipher_decrypt shared descriptor */ + flc = &ctx->flc[DECRYPT]; + desc = flc->sh_desc; + cnstr_shdsc_xts_skcipher_decap(desc, &ctx->cdata); + flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */ + dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT], + sizeof(flc->flc) + desc_bytes(desc), + ctx->dir); + + return 0; +} + +static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req) +{ + struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req); + struct caam_request *req_ctx = skcipher_request_ctx(req); + struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1]; + struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0]; + struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher); + struct device *dev = ctx->dev; + gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? + GFP_KERNEL : GFP_ATOMIC; + int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0; + struct skcipher_edesc *edesc; + dma_addr_t iv_dma; + u8 *iv; + int ivsize = crypto_skcipher_ivsize(skcipher); + int dst_sg_idx, qm_sg_ents, qm_sg_bytes; + struct dpaa2_sg_entry *sg_table; + + src_nents = sg_nents_for_len(req->src, req->cryptlen); + if (unlikely(src_nents < 0)) { + dev_err(dev, "Insufficient bytes (%d) in src S/G\n", + req->cryptlen); + return ERR_PTR(src_nents); + } + + if (unlikely(req->dst != req->src)) { + dst_nents = sg_nents_for_len(req->dst, req->cryptlen); + if (unlikely(dst_nents < 0)) { + dev_err(dev, "Insufficient bytes (%d) in dst S/G\n", + req->cryptlen); + return ERR_PTR(dst_nents); + } + + mapped_src_nents = dma_map_sg(dev, req->src, src_nents, + DMA_TO_DEVICE); + if (unlikely(!mapped_src_nents)) { + dev_err(dev, "unable to map source\n"); + return ERR_PTR(-ENOMEM); + } + + mapped_dst_nents = dma_map_sg(dev, req->dst, dst_nents, + DMA_FROM_DEVICE); + if (unlikely(!mapped_dst_nents)) { + dev_err(dev, "unable to map destination\n"); + dma_unmap_sg(dev, req->src, src_nents, DMA_TO_DEVICE); + return ERR_PTR(-ENOMEM); + } + } else { + mapped_src_nents = dma_map_sg(dev, req->src, src_nents, + DMA_BIDIRECTIONAL); + if (unlikely(!mapped_src_nents)) { + dev_err(dev, "unable to map source\n"); + return ERR_PTR(-ENOMEM); + } + } + + qm_sg_ents = 1 + mapped_src_nents; + dst_sg_idx = qm_sg_ents; + + /* + * Input, output HW S/G tables: [IV, src][dst, IV] + * IV entries point to the same buffer + * If src == dst, S/G entries are reused (S/G tables overlap) + * + * HW reads 4 S/G entries at a time; make sure the reads don't go beyond + * the end of the table by allocating more S/G entries. + */ + if (req->src != req->dst) + qm_sg_ents += pad_sg_nents(mapped_dst_nents + 1); + else + qm_sg_ents = 1 + pad_sg_nents(qm_sg_ents); + + qm_sg_bytes = qm_sg_ents * sizeof(struct dpaa2_sg_entry); + if (unlikely(offsetof(struct skcipher_edesc, sgt) + qm_sg_bytes + + ivsize > CAAM_QI_MEMCACHE_SIZE)) { + dev_err(dev, "No space for %d S/G entries and/or %dB IV\n", + qm_sg_ents, ivsize); + caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0, + 0, DMA_NONE, 0, 0); + return ERR_PTR(-ENOMEM); + } + + /* allocate space for base edesc, link tables and IV */ + edesc = qi_cache_zalloc(GFP_DMA | flags); + if (unlikely(!edesc)) { + dev_err(dev, "could not allocate extended descriptor\n"); + caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0, + 0, DMA_NONE, 0, 0); + return ERR_PTR(-ENOMEM); + } + + /* Make sure IV is located in a DMAable area */ + sg_table = &edesc->sgt[0]; + iv = (u8 *)(sg_table + qm_sg_ents); + memcpy(iv, req->iv, ivsize); + + iv_dma = dma_map_single(dev, iv, ivsize, DMA_BIDIRECTIONAL); + if (dma_mapping_error(dev, iv_dma)) { + dev_err(dev, "unable to map IV\n"); + caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0, + 0, DMA_NONE, 0, 0); + qi_cache_free(edesc); + return ERR_PTR(-ENOMEM); + } + + edesc->src_nents = src_nents; + edesc->dst_nents = dst_nents; + edesc->iv_dma = iv_dma; + edesc->qm_sg_bytes = qm_sg_bytes; + + dma_to_qm_sg_one(sg_table, iv_dma, ivsize, 0); + sg_to_qm_sg(req->src, req->cryptlen, sg_table + 1, 0); + + if (req->src != req->dst) + sg_to_qm_sg(req->dst, req->cryptlen, sg_table + dst_sg_idx, 0); + + dma_to_qm_sg_one(sg_table + dst_sg_idx + mapped_dst_nents, iv_dma, + ivsize, 0); + + edesc->qm_sg_dma = dma_map_single(dev, sg_table, edesc->qm_sg_bytes, + DMA_TO_DEVICE); + if (dma_mapping_error(dev, edesc->qm_sg_dma)) { + dev_err(dev, "unable to map S/G table\n"); + caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, + iv_dma, ivsize, DMA_BIDIRECTIONAL, 0, 0); + qi_cache_free(edesc); + return ERR_PTR(-ENOMEM); + } + + memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt)); + dpaa2_fl_set_final(in_fle, true); + dpaa2_fl_set_len(in_fle, req->cryptlen + ivsize); + dpaa2_fl_set_len(out_fle, req->cryptlen + ivsize); + + dpaa2_fl_set_format(in_fle, dpaa2_fl_sg); + dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma); + + dpaa2_fl_set_format(out_fle, dpaa2_fl_sg); + + if (req->src == req->dst) + dpaa2_fl_set_addr(out_fle, edesc->qm_sg_dma + + sizeof(*sg_table)); + else + dpaa2_fl_set_addr(out_fle, edesc->qm_sg_dma + dst_sg_idx * + sizeof(*sg_table)); + + return edesc; +} + +static void aead_unmap(struct device *dev, struct aead_edesc *edesc, + struct aead_request *req) +{ + struct crypto_aead *aead = crypto_aead_reqtfm(req); + int ivsize = crypto_aead_ivsize(aead); + + caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents, + edesc->iv_dma, ivsize, DMA_TO_DEVICE, edesc->qm_sg_dma, + edesc->qm_sg_bytes); + dma_unmap_single(dev, edesc->assoclen_dma, 4, DMA_TO_DEVICE); +} + +static void skcipher_unmap(struct device *dev, struct skcipher_edesc *edesc, + struct skcipher_request *req) +{ + struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req); + int ivsize = crypto_skcipher_ivsize(skcipher); + + caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents, + edesc->iv_dma, ivsize, DMA_BIDIRECTIONAL, edesc->qm_sg_dma, + edesc->qm_sg_bytes); +} + +static void aead_encrypt_done(void *cbk_ctx, u32 status) +{ + struct crypto_async_request *areq = cbk_ctx; + struct aead_request *req = container_of(areq, struct aead_request, + base); + struct caam_request *req_ctx = to_caam_req(areq); + struct aead_edesc *edesc = req_ctx->edesc; + struct crypto_aead *aead = crypto_aead_reqtfm(req); + struct caam_ctx *ctx = crypto_aead_ctx(aead); + int ecode = 0; + + dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status); + + if (unlikely(status)) + ecode = caam_qi2_strstatus(ctx->dev, status); + + aead_unmap(ctx->dev, edesc, req); + qi_cache_free(edesc); + aead_request_complete(req, ecode); +} + +static void aead_decrypt_done(void *cbk_ctx, u32 status) +{ + struct crypto_async_request *areq = cbk_ctx; + struct aead_request *req = container_of(areq, struct aead_request, + base); + struct caam_request *req_ctx = to_caam_req(areq); + struct aead_edesc *edesc = req_ctx->edesc; + struct crypto_aead *aead = crypto_aead_reqtfm(req); + struct caam_ctx *ctx = crypto_aead_ctx(aead); + int ecode = 0; + + dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status); + + if (unlikely(status)) + ecode = caam_qi2_strstatus(ctx->dev, status); + + aead_unmap(ctx->dev, edesc, req); + qi_cache_free(edesc); + aead_request_complete(req, ecode); +} + +static int aead_encrypt(struct aead_request *req) +{ + struct aead_edesc *edesc; + struct crypto_aead *aead = crypto_aead_reqtfm(req); + struct caam_ctx *ctx = crypto_aead_ctx(aead); + struct caam_request *caam_req = aead_request_ctx(req); + int ret; + + /* allocate extended descriptor */ + edesc = aead_edesc_alloc(req, true); + if (IS_ERR(edesc)) + return PTR_ERR(edesc); + + caam_req->flc = &ctx->flc[ENCRYPT]; + caam_req->flc_dma = ctx->flc_dma[ENCRYPT]; + caam_req->cbk = aead_encrypt_done; + caam_req->ctx = &req->base; + caam_req->edesc = edesc; + ret = dpaa2_caam_enqueue(ctx->dev, caam_req); + if (ret != -EINPROGRESS && + !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) { + aead_unmap(ctx->dev, edesc, req); + qi_cache_free(edesc); + } + + return ret; +} + +static int aead_decrypt(struct aead_request *req) +{ + struct aead_edesc *edesc; + struct crypto_aead *aead = crypto_aead_reqtfm(req); + struct caam_ctx *ctx = crypto_aead_ctx(aead); + struct caam_request *caam_req = aead_request_ctx(req); + int ret; + + /* allocate extended descriptor */ + edesc = aead_edesc_alloc(req, false); + if (IS_ERR(edesc)) + return PTR_ERR(edesc); + + caam_req->flc = &ctx->flc[DECRYPT]; + caam_req->flc_dma = ctx->flc_dma[DECRYPT]; + caam_req->cbk = aead_decrypt_done; + caam_req->ctx = &req->base; + caam_req->edesc = edesc; + ret = dpaa2_caam_enqueue(ctx->dev, caam_req); + if (ret != -EINPROGRESS && + !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) { + aead_unmap(ctx->dev, edesc, req); + qi_cache_free(edesc); + } + + return ret; +} + +static int ipsec_gcm_encrypt(struct aead_request *req) +{ + return crypto_ipsec_check_assoclen(req->assoclen) ? : aead_encrypt(req); +} + +static int ipsec_gcm_decrypt(struct aead_request *req) +{ + return crypto_ipsec_check_assoclen(req->assoclen) ? : aead_decrypt(req); +} + +static void skcipher_encrypt_done(void *cbk_ctx, u32 status) +{ + struct crypto_async_request *areq = cbk_ctx; + struct skcipher_request *req = skcipher_request_cast(areq); + struct caam_request *req_ctx = to_caam_req(areq); + struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req); + struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher); + struct skcipher_edesc *edesc = req_ctx->edesc; + int ecode = 0; + int ivsize = crypto_skcipher_ivsize(skcipher); + + dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status); + + if (unlikely(status)) + ecode = caam_qi2_strstatus(ctx->dev, status); + + print_hex_dump_debug("dstiv @" __stringify(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, req->iv, + edesc->src_nents > 1 ? 100 : ivsize, 1); + caam_dump_sg("dst @" __stringify(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, req->dst, + edesc->dst_nents > 1 ? 100 : req->cryptlen, 1); + + skcipher_unmap(ctx->dev, edesc, req); + + /* + * The crypto API expects us to set the IV (req->iv) to the last + * ciphertext block (CBC mode) or last counter (CTR mode). + * This is used e.g. by the CTS mode. + */ + if (!ecode) + memcpy(req->iv, (u8 *)&edesc->sgt[0] + edesc->qm_sg_bytes, + ivsize); + + qi_cache_free(edesc); + skcipher_request_complete(req, ecode); +} + +static void skcipher_decrypt_done(void *cbk_ctx, u32 status) +{ + struct crypto_async_request *areq = cbk_ctx; + struct skcipher_request *req = skcipher_request_cast(areq); + struct caam_request *req_ctx = to_caam_req(areq); + struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req); + struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher); + struct skcipher_edesc *edesc = req_ctx->edesc; + int ecode = 0; + int ivsize = crypto_skcipher_ivsize(skcipher); + + dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status); + + if (unlikely(status)) + ecode = caam_qi2_strstatus(ctx->dev, status); + + print_hex_dump_debug("dstiv @" __stringify(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, req->iv, + edesc->src_nents > 1 ? 100 : ivsize, 1); + caam_dump_sg("dst @" __stringify(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, req->dst, + edesc->dst_nents > 1 ? 100 : req->cryptlen, 1); + + skcipher_unmap(ctx->dev, edesc, req); + + /* + * The crypto API expects us to set the IV (req->iv) to the last + * ciphertext block (CBC mode) or last counter (CTR mode). + * This is used e.g. by the CTS mode. + */ + if (!ecode) + memcpy(req->iv, (u8 *)&edesc->sgt[0] + edesc->qm_sg_bytes, + ivsize); + + qi_cache_free(edesc); + skcipher_request_complete(req, ecode); +} + +static inline bool xts_skcipher_ivsize(struct skcipher_request *req) +{ + struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req); + unsigned int ivsize = crypto_skcipher_ivsize(skcipher); + + return !!get_unaligned((u64 *)(req->iv + (ivsize / 2))); +} + +static int skcipher_encrypt(struct skcipher_request *req) +{ + struct skcipher_edesc *edesc; + struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req); + struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher); + struct caam_request *caam_req = skcipher_request_ctx(req); + struct dpaa2_caam_priv *priv = dev_get_drvdata(ctx->dev); + int ret; + + /* + * XTS is expected to return an error even for input length = 0 + * Note that the case input length < block size will be caught during + * HW offloading and return an error. + */ + if (!req->cryptlen && !ctx->fallback) + return 0; + + if (ctx->fallback && ((priv->sec_attr.era <= 8 && xts_skcipher_ivsize(req)) || + ctx->xts_key_fallback)) { + skcipher_request_set_tfm(&caam_req->fallback_req, ctx->fallback); + skcipher_request_set_callback(&caam_req->fallback_req, + req->base.flags, + req->base.complete, + req->base.data); + skcipher_request_set_crypt(&caam_req->fallback_req, req->src, + req->dst, req->cryptlen, req->iv); + + return crypto_skcipher_encrypt(&caam_req->fallback_req); + } + + /* allocate extended descriptor */ + edesc = skcipher_edesc_alloc(req); + if (IS_ERR(edesc)) + return PTR_ERR(edesc); + + caam_req->flc = &ctx->flc[ENCRYPT]; + caam_req->flc_dma = ctx->flc_dma[ENCRYPT]; + caam_req->cbk = skcipher_encrypt_done; + caam_req->ctx = &req->base; + caam_req->edesc = edesc; + ret = dpaa2_caam_enqueue(ctx->dev, caam_req); + if (ret != -EINPROGRESS && + !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) { + skcipher_unmap(ctx->dev, edesc, req); + qi_cache_free(edesc); + } + + return ret; +} + +static int skcipher_decrypt(struct skcipher_request *req) +{ + struct skcipher_edesc *edesc; + struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req); + struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher); + struct caam_request *caam_req = skcipher_request_ctx(req); + struct dpaa2_caam_priv *priv = dev_get_drvdata(ctx->dev); + int ret; + + /* + * XTS is expected to return an error even for input length = 0 + * Note that the case input length < block size will be caught during + * HW offloading and return an error. + */ + if (!req->cryptlen && !ctx->fallback) + return 0; + + if (ctx->fallback && ((priv->sec_attr.era <= 8 && xts_skcipher_ivsize(req)) || + ctx->xts_key_fallback)) { + skcipher_request_set_tfm(&caam_req->fallback_req, ctx->fallback); + skcipher_request_set_callback(&caam_req->fallback_req, + req->base.flags, + req->base.complete, + req->base.data); + skcipher_request_set_crypt(&caam_req->fallback_req, req->src, + req->dst, req->cryptlen, req->iv); + + return crypto_skcipher_decrypt(&caam_req->fallback_req); + } + + /* allocate extended descriptor */ + edesc = skcipher_edesc_alloc(req); + if (IS_ERR(edesc)) + return PTR_ERR(edesc); + + caam_req->flc = &ctx->flc[DECRYPT]; + caam_req->flc_dma = ctx->flc_dma[DECRYPT]; + caam_req->cbk = skcipher_decrypt_done; + caam_req->ctx = &req->base; + caam_req->edesc = edesc; + ret = dpaa2_caam_enqueue(ctx->dev, caam_req); + if (ret != -EINPROGRESS && + !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) { + skcipher_unmap(ctx->dev, edesc, req); + qi_cache_free(edesc); + } + + return ret; +} + +static int caam_cra_init(struct caam_ctx *ctx, struct caam_alg_entry *caam, + bool uses_dkp) +{ + dma_addr_t dma_addr; + int i; + + /* copy descriptor header template value */ + ctx->cdata.algtype = OP_TYPE_CLASS1_ALG | caam->class1_alg_type; + ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam->class2_alg_type; + + ctx->dev = caam->dev; + ctx->dir = uses_dkp ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE; + + dma_addr = dma_map_single_attrs(ctx->dev, ctx->flc, + offsetof(struct caam_ctx, flc_dma), + ctx->dir, DMA_ATTR_SKIP_CPU_SYNC); + if (dma_mapping_error(ctx->dev, dma_addr)) { + dev_err(ctx->dev, "unable to map key, shared descriptors\n"); + return -ENOMEM; + } + + for (i = 0; i < NUM_OP; i++) + ctx->flc_dma[i] = dma_addr + i * sizeof(ctx->flc[i]); + ctx->key_dma = dma_addr + NUM_OP * sizeof(ctx->flc[0]); + + return 0; +} + +static int caam_cra_init_skcipher(struct crypto_skcipher *tfm) +{ + struct skcipher_alg *alg = crypto_skcipher_alg(tfm); + struct caam_skcipher_alg *caam_alg = + container_of(alg, typeof(*caam_alg), skcipher); + struct caam_ctx *ctx = crypto_skcipher_ctx(tfm); + u32 alg_aai = caam_alg->caam.class1_alg_type & OP_ALG_AAI_MASK; + int ret = 0; + + if (alg_aai == OP_ALG_AAI_XTS) { + const char *tfm_name = crypto_tfm_alg_name(&tfm->base); + struct crypto_skcipher *fallback; + + fallback = crypto_alloc_skcipher(tfm_name, 0, + CRYPTO_ALG_NEED_FALLBACK); + if (IS_ERR(fallback)) { + dev_err(caam_alg->caam.dev, + "Failed to allocate %s fallback: %ld\n", + tfm_name, PTR_ERR(fallback)); + return PTR_ERR(fallback); + } + + ctx->fallback = fallback; + crypto_skcipher_set_reqsize(tfm, sizeof(struct caam_request) + + crypto_skcipher_reqsize(fallback)); + } else { + crypto_skcipher_set_reqsize(tfm, sizeof(struct caam_request)); + } + + ret = caam_cra_init(ctx, &caam_alg->caam, false); + if (ret && ctx->fallback) + crypto_free_skcipher(ctx->fallback); + + return ret; +} + +static int caam_cra_init_aead(struct crypto_aead *tfm) +{ + struct aead_alg *alg = crypto_aead_alg(tfm); + struct caam_aead_alg *caam_alg = container_of(alg, typeof(*caam_alg), + aead); + + crypto_aead_set_reqsize(tfm, sizeof(struct caam_request)); + return caam_cra_init(crypto_aead_ctx(tfm), &caam_alg->caam, + !caam_alg->caam.nodkp); +} + +static void caam_exit_common(struct caam_ctx *ctx) +{ + dma_unmap_single_attrs(ctx->dev, ctx->flc_dma[0], + offsetof(struct caam_ctx, flc_dma), ctx->dir, + DMA_ATTR_SKIP_CPU_SYNC); +} + +static void caam_cra_exit(struct crypto_skcipher *tfm) +{ + struct caam_ctx *ctx = crypto_skcipher_ctx(tfm); + + if (ctx->fallback) + crypto_free_skcipher(ctx->fallback); + caam_exit_common(ctx); +} + +static void caam_cra_exit_aead(struct crypto_aead *tfm) +{ + caam_exit_common(crypto_aead_ctx(tfm)); +} + +static struct caam_skcipher_alg driver_algs[] = { + { + .skcipher = { + .base = { + .cra_name = "cbc(aes)", + .cra_driver_name = "cbc-aes-caam-qi2", + .cra_blocksize = AES_BLOCK_SIZE, + }, + .setkey = aes_skcipher_setkey, + .encrypt = skcipher_encrypt, + .decrypt = skcipher_decrypt, + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, + .ivsize = AES_BLOCK_SIZE, + }, + .caam.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, + }, + { + .skcipher = { + .base = { + .cra_name = "cbc(des3_ede)", + .cra_driver_name = "cbc-3des-caam-qi2", + .cra_blocksize = DES3_EDE_BLOCK_SIZE, + }, + .setkey = des3_skcipher_setkey, + .encrypt = skcipher_encrypt, + .decrypt = skcipher_decrypt, + .min_keysize = DES3_EDE_KEY_SIZE, + .max_keysize = DES3_EDE_KEY_SIZE, + .ivsize = DES3_EDE_BLOCK_SIZE, + }, + .caam.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, + }, + { + .skcipher = { + .base = { + .cra_name = "cbc(des)", + .cra_driver_name = "cbc-des-caam-qi2", + .cra_blocksize = DES_BLOCK_SIZE, + }, + .setkey = des_skcipher_setkey, + .encrypt = skcipher_encrypt, + .decrypt = skcipher_decrypt, + .min_keysize = DES_KEY_SIZE, + .max_keysize = DES_KEY_SIZE, + .ivsize = DES_BLOCK_SIZE, + }, + .caam.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, + }, + { + .skcipher = { + .base = { + .cra_name = "ctr(aes)", + .cra_driver_name = "ctr-aes-caam-qi2", + .cra_blocksize = 1, + }, + .setkey = ctr_skcipher_setkey, + .encrypt = skcipher_encrypt, + .decrypt = skcipher_decrypt, + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, + .ivsize = AES_BLOCK_SIZE, + .chunksize = AES_BLOCK_SIZE, + }, + .caam.class1_alg_type = OP_ALG_ALGSEL_AES | + OP_ALG_AAI_CTR_MOD128, + }, + { + .skcipher = { + .base = { + .cra_name = "rfc3686(ctr(aes))", + .cra_driver_name = "rfc3686-ctr-aes-caam-qi2", + .cra_blocksize = 1, + }, + .setkey = rfc3686_skcipher_setkey, + .encrypt = skcipher_encrypt, + .decrypt = skcipher_decrypt, + .min_keysize = AES_MIN_KEY_SIZE + + CTR_RFC3686_NONCE_SIZE, + .max_keysize = AES_MAX_KEY_SIZE + + CTR_RFC3686_NONCE_SIZE, + .ivsize = CTR_RFC3686_IV_SIZE, + .chunksize = AES_BLOCK_SIZE, + }, + .caam = { + .class1_alg_type = OP_ALG_ALGSEL_AES | + OP_ALG_AAI_CTR_MOD128, + .rfc3686 = true, + }, + }, + { + .skcipher = { + .base = { + .cra_name = "xts(aes)", + .cra_driver_name = "xts-aes-caam-qi2", + .cra_flags = CRYPTO_ALG_NEED_FALLBACK, + .cra_blocksize = AES_BLOCK_SIZE, + }, + .setkey = xts_skcipher_setkey, + .encrypt = skcipher_encrypt, + .decrypt = skcipher_decrypt, + .min_keysize = 2 * AES_MIN_KEY_SIZE, + .max_keysize = 2 * AES_MAX_KEY_SIZE, + .ivsize = AES_BLOCK_SIZE, + }, + .caam.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_XTS, + }, + { + .skcipher = { + .base = { + .cra_name = "chacha20", + .cra_driver_name = "chacha20-caam-qi2", + .cra_blocksize = 1, + }, + .setkey = chacha20_skcipher_setkey, + .encrypt = skcipher_encrypt, + .decrypt = skcipher_decrypt, + .min_keysize = CHACHA_KEY_SIZE, + .max_keysize = CHACHA_KEY_SIZE, + .ivsize = CHACHA_IV_SIZE, + }, + .caam.class1_alg_type = OP_ALG_ALGSEL_CHACHA20, + }, +}; + +static struct caam_aead_alg driver_aeads[] = { + { + .aead = { + .base = { + .cra_name = "rfc4106(gcm(aes))", + .cra_driver_name = "rfc4106-gcm-aes-caam-qi2", + .cra_blocksize = 1, + }, + .setkey = rfc4106_setkey, + .setauthsize = rfc4106_setauthsize, + .encrypt = ipsec_gcm_encrypt, + .decrypt = ipsec_gcm_decrypt, + .ivsize = 8, + .maxauthsize = AES_BLOCK_SIZE, + }, + .caam = { + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM, + .nodkp = true, + }, + }, + { + .aead = { + .base = { + .cra_name = "rfc4543(gcm(aes))", + .cra_driver_name = "rfc4543-gcm-aes-caam-qi2", + .cra_blocksize = 1, + }, + .setkey = rfc4543_setkey, + .setauthsize = rfc4543_setauthsize, + .encrypt = ipsec_gcm_encrypt, + .decrypt = ipsec_gcm_decrypt, + .ivsize = 8, + .maxauthsize = AES_BLOCK_SIZE, + }, + .caam = { + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM, + .nodkp = true, + }, + }, + /* Galois Counter Mode */ + { + .aead = { + .base = { + .cra_name = "gcm(aes)", + .cra_driver_name = "gcm-aes-caam-qi2", + .cra_blocksize = 1, + }, + .setkey = gcm_setkey, + .setauthsize = gcm_setauthsize, + .encrypt = aead_encrypt, + .decrypt = aead_decrypt, + .ivsize = 12, + .maxauthsize = AES_BLOCK_SIZE, + }, + .caam = { + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM, + .nodkp = true, + } + }, + /* single-pass ipsec_esp descriptor */ + { + .aead = { + .base = { + .cra_name = "authenc(hmac(md5),cbc(aes))", + .cra_driver_name = "authenc-hmac-md5-" + "cbc-aes-caam-qi2", + .cra_blocksize = AES_BLOCK_SIZE, + }, + .setkey = aead_setkey, + .setauthsize = aead_setauthsize, + .encrypt = aead_encrypt, + .decrypt = aead_decrypt, + .ivsize = AES_BLOCK_SIZE, + .maxauthsize = MD5_DIGEST_SIZE, + }, + .caam = { + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, + .class2_alg_type = OP_ALG_ALGSEL_MD5 | + OP_ALG_AAI_HMAC_PRECOMP, + } + }, + { + .aead = { + .base = { + .cra_name = "echainiv(authenc(hmac(md5)," + "cbc(aes)))", + .cra_driver_name = "echainiv-authenc-hmac-md5-" + "cbc-aes-caam-qi2", + .cra_blocksize = AES_BLOCK_SIZE, + }, + .setkey = aead_setkey, + .setauthsize = aead_setauthsize, + .encrypt = aead_encrypt, + .decrypt = aead_decrypt, + .ivsize = AES_BLOCK_SIZE, + .maxauthsize = MD5_DIGEST_SIZE, + }, + .caam = { + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, + .class2_alg_type = OP_ALG_ALGSEL_MD5 | + OP_ALG_AAI_HMAC_PRECOMP, + .geniv = true, + } + }, + { + .aead = { + .base = { + .cra_name = "authenc(hmac(sha1),cbc(aes))", + .cra_driver_name = "authenc-hmac-sha1-" + "cbc-aes-caam-qi2", + .cra_blocksize = AES_BLOCK_SIZE, + }, + .setkey = aead_setkey, + .setauthsize = aead_setauthsize, + .encrypt = aead_encrypt, + .decrypt = aead_decrypt, + .ivsize = AES_BLOCK_SIZE, + .maxauthsize = SHA1_DIGEST_SIZE, + }, + .caam = { + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, + .class2_alg_type = OP_ALG_ALGSEL_SHA1 | + OP_ALG_AAI_HMAC_PRECOMP, + } + }, + { + .aead = { + .base = { + .cra_name = "echainiv(authenc(hmac(sha1)," + "cbc(aes)))", + .cra_driver_name = "echainiv-authenc-" + "hmac-sha1-cbc-aes-caam-qi2", + .cra_blocksize = AES_BLOCK_SIZE, + }, + .setkey = aead_setkey, + .setauthsize = aead_setauthsize, + .encrypt = aead_encrypt, + .decrypt = aead_decrypt, + .ivsize = AES_BLOCK_SIZE, + .maxauthsize = SHA1_DIGEST_SIZE, + }, + .caam = { + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, + .class2_alg_type = OP_ALG_ALGSEL_SHA1 | + OP_ALG_AAI_HMAC_PRECOMP, + .geniv = true, + }, + }, + { + .aead = { + .base = { + .cra_name = "authenc(hmac(sha224),cbc(aes))", + .cra_driver_name = "authenc-hmac-sha224-" + "cbc-aes-caam-qi2", + .cra_blocksize = AES_BLOCK_SIZE, + }, + .setkey = aead_setkey, + .setauthsize = aead_setauthsize, + .encrypt = aead_encrypt, + .decrypt = aead_decrypt, + .ivsize = AES_BLOCK_SIZE, + .maxauthsize = SHA224_DIGEST_SIZE, + }, + .caam = { + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, + .class2_alg_type = OP_ALG_ALGSEL_SHA224 | + OP_ALG_AAI_HMAC_PRECOMP, + } + }, + { + .aead = { + .base = { + .cra_name = "echainiv(authenc(hmac(sha224)," + "cbc(aes)))", + .cra_driver_name = "echainiv-authenc-" + "hmac-sha224-cbc-aes-caam-qi2", + .cra_blocksize = AES_BLOCK_SIZE, + }, + .setkey = aead_setkey, + .setauthsize = aead_setauthsize, + .encrypt = aead_encrypt, + .decrypt = aead_decrypt, + .ivsize = AES_BLOCK_SIZE, + .maxauthsize = SHA224_DIGEST_SIZE, + }, + .caam = { + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, + .class2_alg_type = OP_ALG_ALGSEL_SHA224 | + OP_ALG_AAI_HMAC_PRECOMP, + .geniv = true, + } + }, + { + .aead = { + .base = { + .cra_name = "authenc(hmac(sha256),cbc(aes))", + .cra_driver_name = "authenc-hmac-sha256-" + "cbc-aes-caam-qi2", + .cra_blocksize = AES_BLOCK_SIZE, + }, + .setkey = aead_setkey, + .setauthsize = aead_setauthsize, + .encrypt = aead_encrypt, + .decrypt = aead_decrypt, + .ivsize = AES_BLOCK_SIZE, + .maxauthsize = SHA256_DIGEST_SIZE, + }, + .caam = { + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, + .class2_alg_type = OP_ALG_ALGSEL_SHA256 | + OP_ALG_AAI_HMAC_PRECOMP, + } + }, + { + .aead = { + .base = { + .cra_name = "echainiv(authenc(hmac(sha256)," + "cbc(aes)))", + .cra_driver_name = "echainiv-authenc-" + "hmac-sha256-cbc-aes-" + "caam-qi2", + .cra_blocksize = AES_BLOCK_SIZE, + }, + .setkey = aead_setkey, + .setauthsize = aead_setauthsize, + .encrypt = aead_encrypt, + .decrypt = aead_decrypt, + .ivsize = AES_BLOCK_SIZE, + .maxauthsize = SHA256_DIGEST_SIZE, + }, + .caam = { + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, + .class2_alg_type = OP_ALG_ALGSEL_SHA256 | + OP_ALG_AAI_HMAC_PRECOMP, + .geniv = true, + } + }, + { + .aead = { + .base = { + .cra_name = "authenc(hmac(sha384),cbc(aes))", + .cra_driver_name = "authenc-hmac-sha384-" + "cbc-aes-caam-qi2", + .cra_blocksize = AES_BLOCK_SIZE, + }, + .setkey = aead_setkey, + .setauthsize = aead_setauthsize, + .encrypt = aead_encrypt, + .decrypt = aead_decrypt, + .ivsize = AES_BLOCK_SIZE, + .maxauthsize = SHA384_DIGEST_SIZE, + }, + .caam = { + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, + .class2_alg_type = OP_ALG_ALGSEL_SHA384 | + OP_ALG_AAI_HMAC_PRECOMP, + } + }, + { + .aead = { + .base = { + .cra_name = "echainiv(authenc(hmac(sha384)," + "cbc(aes)))", + .cra_driver_name = "echainiv-authenc-" + "hmac-sha384-cbc-aes-" + "caam-qi2", + .cra_blocksize = AES_BLOCK_SIZE, + }, + .setkey = aead_setkey, + .setauthsize = aead_setauthsize, + .encrypt = aead_encrypt, + .decrypt = aead_decrypt, + .ivsize = AES_BLOCK_SIZE, + .maxauthsize = SHA384_DIGEST_SIZE, + }, + .caam = { + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, + .class2_alg_type = OP_ALG_ALGSEL_SHA384 | + OP_ALG_AAI_HMAC_PRECOMP, + .geniv = true, + } + }, + { + .aead = { + .base = { + .cra_name = "authenc(hmac(sha512),cbc(aes))", + .cra_driver_name = "authenc-hmac-sha512-" + "cbc-aes-caam-qi2", + .cra_blocksize = AES_BLOCK_SIZE, + }, + .setkey = aead_setkey, + .setauthsize = aead_setauthsize, + .encrypt = aead_encrypt, + .decrypt = aead_decrypt, + .ivsize = AES_BLOCK_SIZE, + .maxauthsize = SHA512_DIGEST_SIZE, + }, + .caam = { + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, + .class2_alg_type = OP_ALG_ALGSEL_SHA512 | + OP_ALG_AAI_HMAC_PRECOMP, + } + }, + { + .aead = { + .base = { + .cra_name = "echainiv(authenc(hmac(sha512)," + "cbc(aes)))", + .cra_driver_name = "echainiv-authenc-" + "hmac-sha512-cbc-aes-" + "caam-qi2", + .cra_blocksize = AES_BLOCK_SIZE, + }, + .setkey = aead_setkey, + .setauthsize = aead_setauthsize, + .encrypt = aead_encrypt, + .decrypt = aead_decrypt, + .ivsize = AES_BLOCK_SIZE, + .maxauthsize = SHA512_DIGEST_SIZE, + }, + .caam = { + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, + .class2_alg_type = OP_ALG_ALGSEL_SHA512 | + OP_ALG_AAI_HMAC_PRECOMP, + .geniv = true, + } + }, + { + .aead = { + .base = { + .cra_name = "authenc(hmac(md5),cbc(des3_ede))", + .cra_driver_name = "authenc-hmac-md5-" + "cbc-des3_ede-caam-qi2", + .cra_blocksize = DES3_EDE_BLOCK_SIZE, + }, + .setkey = des3_aead_setkey, + .setauthsize = aead_setauthsize, + .encrypt = aead_encrypt, + .decrypt = aead_decrypt, + .ivsize = DES3_EDE_BLOCK_SIZE, + .maxauthsize = MD5_DIGEST_SIZE, + }, + .caam = { + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, + .class2_alg_type = OP_ALG_ALGSEL_MD5 | + OP_ALG_AAI_HMAC_PRECOMP, + } + }, + { + .aead = { + .base = { + .cra_name = "echainiv(authenc(hmac(md5)," + "cbc(des3_ede)))", + .cra_driver_name = "echainiv-authenc-hmac-md5-" + "cbc-des3_ede-caam-qi2", + .cra_blocksize = DES3_EDE_BLOCK_SIZE, + }, + .setkey = des3_aead_setkey, + .setauthsize = aead_setauthsize, + .encrypt = aead_encrypt, + .decrypt = aead_decrypt, + .ivsize = DES3_EDE_BLOCK_SIZE, + .maxauthsize = MD5_DIGEST_SIZE, + }, + .caam = { + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, + .class2_alg_type = OP_ALG_ALGSEL_MD5 | + OP_ALG_AAI_HMAC_PRECOMP, + .geniv = true, + } + }, + { + .aead = { + .base = { + .cra_name = "authenc(hmac(sha1)," + "cbc(des3_ede))", + .cra_driver_name = "authenc-hmac-sha1-" + "cbc-des3_ede-caam-qi2", + .cra_blocksize = DES3_EDE_BLOCK_SIZE, + }, + .setkey = des3_aead_setkey, + .setauthsize = aead_setauthsize, + .encrypt = aead_encrypt, + .decrypt = aead_decrypt, + .ivsize = DES3_EDE_BLOCK_SIZE, + .maxauthsize = SHA1_DIGEST_SIZE, + }, + .caam = { + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, + .class2_alg_type = OP_ALG_ALGSEL_SHA1 | + OP_ALG_AAI_HMAC_PRECOMP, + }, + }, + { + .aead = { + .base = { + .cra_name = "echainiv(authenc(hmac(sha1)," + "cbc(des3_ede)))", + .cra_driver_name = "echainiv-authenc-" + "hmac-sha1-" + "cbc-des3_ede-caam-qi2", + .cra_blocksize = DES3_EDE_BLOCK_SIZE, + }, + .setkey = des3_aead_setkey, + .setauthsize = aead_setauthsize, + .encrypt = aead_encrypt, + .decrypt = aead_decrypt, + .ivsize = DES3_EDE_BLOCK_SIZE, + .maxauthsize = SHA1_DIGEST_SIZE, + }, + .caam = { + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, + .class2_alg_type = OP_ALG_ALGSEL_SHA1 | + OP_ALG_AAI_HMAC_PRECOMP, + .geniv = true, + } + }, + { + .aead = { + .base = { + .cra_name = "authenc(hmac(sha224)," + "cbc(des3_ede))", + .cra_driver_name = "authenc-hmac-sha224-" + "cbc-des3_ede-caam-qi2", + .cra_blocksize = DES3_EDE_BLOCK_SIZE, + }, + .setkey = des3_aead_setkey, + .setauthsize = aead_setauthsize, + .encrypt = aead_encrypt, + .decrypt = aead_decrypt, + .ivsize = DES3_EDE_BLOCK_SIZE, + .maxauthsize = SHA224_DIGEST_SIZE, + }, + .caam = { + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, + .class2_alg_type = OP_ALG_ALGSEL_SHA224 | + OP_ALG_AAI_HMAC_PRECOMP, + }, + }, + { + .aead = { + .base = { + .cra_name = "echainiv(authenc(hmac(sha224)," + "cbc(des3_ede)))", + .cra_driver_name = "echainiv-authenc-" + "hmac-sha224-" + "cbc-des3_ede-caam-qi2", + .cra_blocksize = DES3_EDE_BLOCK_SIZE, + }, + .setkey = des3_aead_setkey, + .setauthsize = aead_setauthsize, + .encrypt = aead_encrypt, + .decrypt = aead_decrypt, + .ivsize = DES3_EDE_BLOCK_SIZE, + .maxauthsize = SHA224_DIGEST_SIZE, + }, + .caam = { + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, + .class2_alg_type = OP_ALG_ALGSEL_SHA224 | + OP_ALG_AAI_HMAC_PRECOMP, + .geniv = true, + } + }, + { + .aead = { + .base = { + .cra_name = "authenc(hmac(sha256)," + "cbc(des3_ede))", + .cra_driver_name = "authenc-hmac-sha256-" + "cbc-des3_ede-caam-qi2", + .cra_blocksize = DES3_EDE_BLOCK_SIZE, + }, + .setkey = des3_aead_setkey, + .setauthsize = aead_setauthsize, + .encrypt = aead_encrypt, + .decrypt = aead_decrypt, + .ivsize = DES3_EDE_BLOCK_SIZE, + .maxauthsize = SHA256_DIGEST_SIZE, + }, + .caam = { + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, + .class2_alg_type = OP_ALG_ALGSEL_SHA256 | + OP_ALG_AAI_HMAC_PRECOMP, + }, + }, + { + .aead = { + .base = { + .cra_name = "echainiv(authenc(hmac(sha256)," + "cbc(des3_ede)))", + .cra_driver_name = "echainiv-authenc-" + "hmac-sha256-" + "cbc-des3_ede-caam-qi2", + .cra_blocksize = DES3_EDE_BLOCK_SIZE, + }, + .setkey = des3_aead_setkey, + .setauthsize = aead_setauthsize, + .encrypt = aead_encrypt, + .decrypt = aead_decrypt, + .ivsize = DES3_EDE_BLOCK_SIZE, + .maxauthsize = SHA256_DIGEST_SIZE, + }, + .caam = { + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, + .class2_alg_type = OP_ALG_ALGSEL_SHA256 | + OP_ALG_AAI_HMAC_PRECOMP, + .geniv = true, + } + }, + { + .aead = { + .base = { + .cra_name = "authenc(hmac(sha384)," + "cbc(des3_ede))", + .cra_driver_name = "authenc-hmac-sha384-" + "cbc-des3_ede-caam-qi2", + .cra_blocksize = DES3_EDE_BLOCK_SIZE, + }, + .setkey = des3_aead_setkey, + .setauthsize = aead_setauthsize, + .encrypt = aead_encrypt, + .decrypt = aead_decrypt, + .ivsize = DES3_EDE_BLOCK_SIZE, + .maxauthsize = SHA384_DIGEST_SIZE, + }, + .caam = { + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, + .class2_alg_type = OP_ALG_ALGSEL_SHA384 | + OP_ALG_AAI_HMAC_PRECOMP, + }, + }, + { + .aead = { + .base = { + .cra_name = "echainiv(authenc(hmac(sha384)," + "cbc(des3_ede)))", + .cra_driver_name = "echainiv-authenc-" + "hmac-sha384-" + "cbc-des3_ede-caam-qi2", + .cra_blocksize = DES3_EDE_BLOCK_SIZE, + }, + .setkey = des3_aead_setkey, + .setauthsize = aead_setauthsize, + .encrypt = aead_encrypt, + .decrypt = aead_decrypt, + .ivsize = DES3_EDE_BLOCK_SIZE, + .maxauthsize = SHA384_DIGEST_SIZE, + }, + .caam = { + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, + .class2_alg_type = OP_ALG_ALGSEL_SHA384 | + OP_ALG_AAI_HMAC_PRECOMP, + .geniv = true, + } + }, + { + .aead = { + .base = { + .cra_name = "authenc(hmac(sha512)," + "cbc(des3_ede))", + .cra_driver_name = "authenc-hmac-sha512-" + "cbc-des3_ede-caam-qi2", + .cra_blocksize = DES3_EDE_BLOCK_SIZE, + }, + .setkey = des3_aead_setkey, + .setauthsize = aead_setauthsize, + .encrypt = aead_encrypt, + .decrypt = aead_decrypt, + .ivsize = DES3_EDE_BLOCK_SIZE, + .maxauthsize = SHA512_DIGEST_SIZE, + }, + .caam = { + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, + .class2_alg_type = OP_ALG_ALGSEL_SHA512 | + OP_ALG_AAI_HMAC_PRECOMP, + }, + }, + { + .aead = { + .base = { + .cra_name = "echainiv(authenc(hmac(sha512)," + "cbc(des3_ede)))", + .cra_driver_name = "echainiv-authenc-" + "hmac-sha512-" + "cbc-des3_ede-caam-qi2", + .cra_blocksize = DES3_EDE_BLOCK_SIZE, + }, + .setkey = des3_aead_setkey, + .setauthsize = aead_setauthsize, + .encrypt = aead_encrypt, + .decrypt = aead_decrypt, + .ivsize = DES3_EDE_BLOCK_SIZE, + .maxauthsize = SHA512_DIGEST_SIZE, + }, + .caam = { + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, + .class2_alg_type = OP_ALG_ALGSEL_SHA512 | + OP_ALG_AAI_HMAC_PRECOMP, + .geniv = true, + } + }, + { + .aead = { + .base = { + .cra_name = "authenc(hmac(md5),cbc(des))", + .cra_driver_name = "authenc-hmac-md5-" + "cbc-des-caam-qi2", + .cra_blocksize = DES_BLOCK_SIZE, + }, + .setkey = aead_setkey, + .setauthsize = aead_setauthsize, + .encrypt = aead_encrypt, + .decrypt = aead_decrypt, + .ivsize = DES_BLOCK_SIZE, + .maxauthsize = MD5_DIGEST_SIZE, + }, + .caam = { + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, + .class2_alg_type = OP_ALG_ALGSEL_MD5 | + OP_ALG_AAI_HMAC_PRECOMP, + }, + }, + { + .aead = { + .base = { + .cra_name = "echainiv(authenc(hmac(md5)," + "cbc(des)))", + .cra_driver_name = "echainiv-authenc-hmac-md5-" + "cbc-des-caam-qi2", + .cra_blocksize = DES_BLOCK_SIZE, + }, + .setkey = aead_setkey, + .setauthsize = aead_setauthsize, + .encrypt = aead_encrypt, + .decrypt = aead_decrypt, + .ivsize = DES_BLOCK_SIZE, + .maxauthsize = MD5_DIGEST_SIZE, + }, + .caam = { + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, + .class2_alg_type = OP_ALG_ALGSEL_MD5 | + OP_ALG_AAI_HMAC_PRECOMP, + .geniv = true, + } + }, + { + .aead = { + .base = { + .cra_name = "authenc(hmac(sha1),cbc(des))", + .cra_driver_name = "authenc-hmac-sha1-" + "cbc-des-caam-qi2", + .cra_blocksize = DES_BLOCK_SIZE, + }, + .setkey = aead_setkey, + .setauthsize = aead_setauthsize, + .encrypt = aead_encrypt, + .decrypt = aead_decrypt, + .ivsize = DES_BLOCK_SIZE, + .maxauthsize = SHA1_DIGEST_SIZE, + }, + .caam = { + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, + .class2_alg_type = OP_ALG_ALGSEL_SHA1 | + OP_ALG_AAI_HMAC_PRECOMP, + }, + }, + { + .aead = { + .base = { + .cra_name = "echainiv(authenc(hmac(sha1)," + "cbc(des)))", + .cra_driver_name = "echainiv-authenc-" + "hmac-sha1-cbc-des-caam-qi2", + .cra_blocksize = DES_BLOCK_SIZE, + }, + .setkey = aead_setkey, + .setauthsize = aead_setauthsize, + .encrypt = aead_encrypt, + .decrypt = aead_decrypt, + .ivsize = DES_BLOCK_SIZE, + .maxauthsize = SHA1_DIGEST_SIZE, + }, + .caam = { + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, + .class2_alg_type = OP_ALG_ALGSEL_SHA1 | + OP_ALG_AAI_HMAC_PRECOMP, + .geniv = true, + } + }, + { + .aead = { + .base = { + .cra_name = "authenc(hmac(sha224),cbc(des))", + .cra_driver_name = "authenc-hmac-sha224-" + "cbc-des-caam-qi2", + .cra_blocksize = DES_BLOCK_SIZE, + }, + .setkey = aead_setkey, + .setauthsize = aead_setauthsize, + .encrypt = aead_encrypt, + .decrypt = aead_decrypt, + .ivsize = DES_BLOCK_SIZE, + .maxauthsize = SHA224_DIGEST_SIZE, + }, + .caam = { + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, + .class2_alg_type = OP_ALG_ALGSEL_SHA224 | + OP_ALG_AAI_HMAC_PRECOMP, + }, + }, + { + .aead = { + .base = { + .cra_name = "echainiv(authenc(hmac(sha224)," + "cbc(des)))", + .cra_driver_name = "echainiv-authenc-" + "hmac-sha224-cbc-des-" + "caam-qi2", + .cra_blocksize = DES_BLOCK_SIZE, + }, + .setkey = aead_setkey, + .setauthsize = aead_setauthsize, + .encrypt = aead_encrypt, + .decrypt = aead_decrypt, + .ivsize = DES_BLOCK_SIZE, + .maxauthsize = SHA224_DIGEST_SIZE, + }, + .caam = { + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, + .class2_alg_type = OP_ALG_ALGSEL_SHA224 | + OP_ALG_AAI_HMAC_PRECOMP, + .geniv = true, + } + }, + { + .aead = { + .base = { + .cra_name = "authenc(hmac(sha256),cbc(des))", + .cra_driver_name = "authenc-hmac-sha256-" + "cbc-des-caam-qi2", + .cra_blocksize = DES_BLOCK_SIZE, + }, + .setkey = aead_setkey, + .setauthsize = aead_setauthsize, + .encrypt = aead_encrypt, + .decrypt = aead_decrypt, + .ivsize = DES_BLOCK_SIZE, + .maxauthsize = SHA256_DIGEST_SIZE, + }, + .caam = { + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, + .class2_alg_type = OP_ALG_ALGSEL_SHA256 | + OP_ALG_AAI_HMAC_PRECOMP, + }, + }, + { + .aead = { + .base = { + .cra_name = "echainiv(authenc(hmac(sha256)," + "cbc(des)))", + .cra_driver_name = "echainiv-authenc-" + "hmac-sha256-cbc-des-" + "caam-qi2", + .cra_blocksize = DES_BLOCK_SIZE, + }, + .setkey = aead_setkey, + .setauthsize = aead_setauthsize, + .encrypt = aead_encrypt, + .decrypt = aead_decrypt, + .ivsize = DES_BLOCK_SIZE, + .maxauthsize = SHA256_DIGEST_SIZE, + }, + .caam = { + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, + .class2_alg_type = OP_ALG_ALGSEL_SHA256 | + OP_ALG_AAI_HMAC_PRECOMP, + .geniv = true, + }, + }, + { + .aead = { + .base = { + .cra_name = "authenc(hmac(sha384),cbc(des))", + .cra_driver_name = "authenc-hmac-sha384-" + "cbc-des-caam-qi2", + .cra_blocksize = DES_BLOCK_SIZE, + }, + .setkey = aead_setkey, + .setauthsize = aead_setauthsize, + .encrypt = aead_encrypt, + .decrypt = aead_decrypt, + .ivsize = DES_BLOCK_SIZE, + .maxauthsize = SHA384_DIGEST_SIZE, + }, + .caam = { + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, + .class2_alg_type = OP_ALG_ALGSEL_SHA384 | + OP_ALG_AAI_HMAC_PRECOMP, + }, + }, + { + .aead = { + .base = { + .cra_name = "echainiv(authenc(hmac(sha384)," + "cbc(des)))", + .cra_driver_name = "echainiv-authenc-" + "hmac-sha384-cbc-des-" + "caam-qi2", + .cra_blocksize = DES_BLOCK_SIZE, + }, + .setkey = aead_setkey, + .setauthsize = aead_setauthsize, + .encrypt = aead_encrypt, + .decrypt = aead_decrypt, + .ivsize = DES_BLOCK_SIZE, + .maxauthsize = SHA384_DIGEST_SIZE, + }, + .caam = { + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, + .class2_alg_type = OP_ALG_ALGSEL_SHA384 | + OP_ALG_AAI_HMAC_PRECOMP, + .geniv = true, + } + }, + { + .aead = { + .base = { + .cra_name = "authenc(hmac(sha512),cbc(des))", + .cra_driver_name = "authenc-hmac-sha512-" + "cbc-des-caam-qi2", + .cra_blocksize = DES_BLOCK_SIZE, + }, + .setkey = aead_setkey, + .setauthsize = aead_setauthsize, + .encrypt = aead_encrypt, + .decrypt = aead_decrypt, + .ivsize = DES_BLOCK_SIZE, + .maxauthsize = SHA512_DIGEST_SIZE, + }, + .caam = { + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, + .class2_alg_type = OP_ALG_ALGSEL_SHA512 | + OP_ALG_AAI_HMAC_PRECOMP, + } + }, + { + .aead = { + .base = { + .cra_name = "echainiv(authenc(hmac(sha512)," + "cbc(des)))", + .cra_driver_name = "echainiv-authenc-" + "hmac-sha512-cbc-des-" + "caam-qi2", + .cra_blocksize = DES_BLOCK_SIZE, + }, + .setkey = aead_setkey, + .setauthsize = aead_setauthsize, + .encrypt = aead_encrypt, + .decrypt = aead_decrypt, + .ivsize = DES_BLOCK_SIZE, + .maxauthsize = SHA512_DIGEST_SIZE, + }, + .caam = { + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, + .class2_alg_type = OP_ALG_ALGSEL_SHA512 | + OP_ALG_AAI_HMAC_PRECOMP, + .geniv = true, + } + }, + { + .aead = { + .base = { + .cra_name = "authenc(hmac(md5)," + "rfc3686(ctr(aes)))", + .cra_driver_name = "authenc-hmac-md5-" + "rfc3686-ctr-aes-caam-qi2", + .cra_blocksize = 1, + }, + .setkey = aead_setkey, + .setauthsize = aead_setauthsize, + .encrypt = aead_encrypt, + .decrypt = aead_decrypt, + .ivsize = CTR_RFC3686_IV_SIZE, + .maxauthsize = MD5_DIGEST_SIZE, + }, + .caam = { + .class1_alg_type = OP_ALG_ALGSEL_AES | + OP_ALG_AAI_CTR_MOD128, + .class2_alg_type = OP_ALG_ALGSEL_MD5 | + OP_ALG_AAI_HMAC_PRECOMP, + .rfc3686 = true, + }, + }, + { + .aead = { + .base = { + .cra_name = "seqiv(authenc(" + "hmac(md5),rfc3686(ctr(aes))))", + .cra_driver_name = "seqiv-authenc-hmac-md5-" + "rfc3686-ctr-aes-caam-qi2", + .cra_blocksize = 1, + }, + .setkey = aead_setkey, + .setauthsize = aead_setauthsize, + .encrypt = aead_encrypt, + .decrypt = aead_decrypt, + .ivsize = CTR_RFC3686_IV_SIZE, + .maxauthsize = MD5_DIGEST_SIZE, + }, + .caam = { + .class1_alg_type = OP_ALG_ALGSEL_AES | + OP_ALG_AAI_CTR_MOD128, + .class2_alg_type = OP_ALG_ALGSEL_MD5 | + OP_ALG_AAI_HMAC_PRECOMP, + .rfc3686 = true, + .geniv = true, + }, + }, + { + .aead = { + .base = { + .cra_name = "authenc(hmac(sha1)," + "rfc3686(ctr(aes)))", + .cra_driver_name = "authenc-hmac-sha1-" + "rfc3686-ctr-aes-caam-qi2", + .cra_blocksize = 1, + }, + .setkey = aead_setkey, + .setauthsize = aead_setauthsize, + .encrypt = aead_encrypt, + .decrypt = aead_decrypt, + .ivsize = CTR_RFC3686_IV_SIZE, + .maxauthsize = SHA1_DIGEST_SIZE, + }, + .caam = { + .class1_alg_type = OP_ALG_ALGSEL_AES | + OP_ALG_AAI_CTR_MOD128, + .class2_alg_type = OP_ALG_ALGSEL_SHA1 | + OP_ALG_AAI_HMAC_PRECOMP, + .rfc3686 = true, + }, + }, + { + .aead = { + .base = { + .cra_name = "seqiv(authenc(" + "hmac(sha1),rfc3686(ctr(aes))))", + .cra_driver_name = "seqiv-authenc-hmac-sha1-" + "rfc3686-ctr-aes-caam-qi2", + .cra_blocksize = 1, + }, + .setkey = aead_setkey, + .setauthsize = aead_setauthsize, + .encrypt = aead_encrypt, + .decrypt = aead_decrypt, + .ivsize = CTR_RFC3686_IV_SIZE, + .maxauthsize = SHA1_DIGEST_SIZE, + }, + .caam = { + .class1_alg_type = OP_ALG_ALGSEL_AES | + OP_ALG_AAI_CTR_MOD128, + .class2_alg_type = OP_ALG_ALGSEL_SHA1 | + OP_ALG_AAI_HMAC_PRECOMP, + .rfc3686 = true, + .geniv = true, + }, + }, + { + .aead = { + .base = { + .cra_name = "authenc(hmac(sha224)," + "rfc3686(ctr(aes)))", + .cra_driver_name = "authenc-hmac-sha224-" + "rfc3686-ctr-aes-caam-qi2", + .cra_blocksize = 1, + }, + .setkey = aead_setkey, + .setauthsize = aead_setauthsize, + .encrypt = aead_encrypt, + .decrypt = aead_decrypt, + .ivsize = CTR_RFC3686_IV_SIZE, + .maxauthsize = SHA224_DIGEST_SIZE, + }, + .caam = { + .class1_alg_type = OP_ALG_ALGSEL_AES | + OP_ALG_AAI_CTR_MOD128, + .class2_alg_type = OP_ALG_ALGSEL_SHA224 | + OP_ALG_AAI_HMAC_PRECOMP, + .rfc3686 = true, + }, + }, + { + .aead = { + .base = { + .cra_name = "seqiv(authenc(" + "hmac(sha224),rfc3686(ctr(aes))))", + .cra_driver_name = "seqiv-authenc-hmac-sha224-" + "rfc3686-ctr-aes-caam-qi2", + .cra_blocksize = 1, + }, + .setkey = aead_setkey, + .setauthsize = aead_setauthsize, + .encrypt = aead_encrypt, + .decrypt = aead_decrypt, + .ivsize = CTR_RFC3686_IV_SIZE, + .maxauthsize = SHA224_DIGEST_SIZE, + }, + .caam = { + .class1_alg_type = OP_ALG_ALGSEL_AES | + OP_ALG_AAI_CTR_MOD128, + .class2_alg_type = OP_ALG_ALGSEL_SHA224 | + OP_ALG_AAI_HMAC_PRECOMP, + .rfc3686 = true, + .geniv = true, + }, + }, + { + .aead = { + .base = { + .cra_name = "authenc(hmac(sha256)," + "rfc3686(ctr(aes)))", + .cra_driver_name = "authenc-hmac-sha256-" + "rfc3686-ctr-aes-caam-qi2", + .cra_blocksize = 1, + }, + .setkey = aead_setkey, + .setauthsize = aead_setauthsize, + .encrypt = aead_encrypt, + .decrypt = aead_decrypt, + .ivsize = CTR_RFC3686_IV_SIZE, + .maxauthsize = SHA256_DIGEST_SIZE, + }, + .caam = { + .class1_alg_type = OP_ALG_ALGSEL_AES | + OP_ALG_AAI_CTR_MOD128, + .class2_alg_type = OP_ALG_ALGSEL_SHA256 | + OP_ALG_AAI_HMAC_PRECOMP, + .rfc3686 = true, + }, + }, + { + .aead = { + .base = { + .cra_name = "seqiv(authenc(hmac(sha256)," + "rfc3686(ctr(aes))))", + .cra_driver_name = "seqiv-authenc-hmac-sha256-" + "rfc3686-ctr-aes-caam-qi2", + .cra_blocksize = 1, + }, + .setkey = aead_setkey, + .setauthsize = aead_setauthsize, + .encrypt = aead_encrypt, + .decrypt = aead_decrypt, + .ivsize = CTR_RFC3686_IV_SIZE, + .maxauthsize = SHA256_DIGEST_SIZE, + }, + .caam = { + .class1_alg_type = OP_ALG_ALGSEL_AES | + OP_ALG_AAI_CTR_MOD128, + .class2_alg_type = OP_ALG_ALGSEL_SHA256 | + OP_ALG_AAI_HMAC_PRECOMP, + .rfc3686 = true, + .geniv = true, + }, + }, + { + .aead = { + .base = { + .cra_name = "authenc(hmac(sha384)," + "rfc3686(ctr(aes)))", + .cra_driver_name = "authenc-hmac-sha384-" + "rfc3686-ctr-aes-caam-qi2", + .cra_blocksize = 1, + }, + .setkey = aead_setkey, + .setauthsize = aead_setauthsize, + .encrypt = aead_encrypt, + .decrypt = aead_decrypt, + .ivsize = CTR_RFC3686_IV_SIZE, + .maxauthsize = SHA384_DIGEST_SIZE, + }, + .caam = { + .class1_alg_type = OP_ALG_ALGSEL_AES | + OP_ALG_AAI_CTR_MOD128, + .class2_alg_type = OP_ALG_ALGSEL_SHA384 | + OP_ALG_AAI_HMAC_PRECOMP, + .rfc3686 = true, + }, + }, + { + .aead = { + .base = { + .cra_name = "seqiv(authenc(hmac(sha384)," + "rfc3686(ctr(aes))))", + .cra_driver_name = "seqiv-authenc-hmac-sha384-" + "rfc3686-ctr-aes-caam-qi2", + .cra_blocksize = 1, + }, + .setkey = aead_setkey, + .setauthsize = aead_setauthsize, + .encrypt = aead_encrypt, + .decrypt = aead_decrypt, + .ivsize = CTR_RFC3686_IV_SIZE, + .maxauthsize = SHA384_DIGEST_SIZE, + }, + .caam = { + .class1_alg_type = OP_ALG_ALGSEL_AES | + OP_ALG_AAI_CTR_MOD128, + .class2_alg_type = OP_ALG_ALGSEL_SHA384 | + OP_ALG_AAI_HMAC_PRECOMP, + .rfc3686 = true, + .geniv = true, + }, + }, + { + .aead = { + .base = { + .cra_name = "rfc7539(chacha20,poly1305)", + .cra_driver_name = "rfc7539-chacha20-poly1305-" + "caam-qi2", + .cra_blocksize = 1, + }, + .setkey = chachapoly_setkey, + .setauthsize = chachapoly_setauthsize, + .encrypt = aead_encrypt, + .decrypt = aead_decrypt, + .ivsize = CHACHAPOLY_IV_SIZE, + .maxauthsize = POLY1305_DIGEST_SIZE, + }, + .caam = { + .class1_alg_type = OP_ALG_ALGSEL_CHACHA20 | + OP_ALG_AAI_AEAD, + .class2_alg_type = OP_ALG_ALGSEL_POLY1305 | + OP_ALG_AAI_AEAD, + .nodkp = true, + }, + }, + { + .aead = { + .base = { + .cra_name = "rfc7539esp(chacha20,poly1305)", + .cra_driver_name = "rfc7539esp-chacha20-" + "poly1305-caam-qi2", + .cra_blocksize = 1, + }, + .setkey = chachapoly_setkey, + .setauthsize = chachapoly_setauthsize, + .encrypt = aead_encrypt, + .decrypt = aead_decrypt, + .ivsize = 8, + .maxauthsize = POLY1305_DIGEST_SIZE, + }, + .caam = { + .class1_alg_type = OP_ALG_ALGSEL_CHACHA20 | + OP_ALG_AAI_AEAD, + .class2_alg_type = OP_ALG_ALGSEL_POLY1305 | + OP_ALG_AAI_AEAD, + .nodkp = true, + }, + }, + { + .aead = { + .base = { + .cra_name = "authenc(hmac(sha512)," + "rfc3686(ctr(aes)))", + .cra_driver_name = "authenc-hmac-sha512-" + "rfc3686-ctr-aes-caam-qi2", + .cra_blocksize = 1, + }, + .setkey = aead_setkey, + .setauthsize = aead_setauthsize, + .encrypt = aead_encrypt, + .decrypt = aead_decrypt, + .ivsize = CTR_RFC3686_IV_SIZE, + .maxauthsize = SHA512_DIGEST_SIZE, + }, + .caam = { + .class1_alg_type = OP_ALG_ALGSEL_AES | + OP_ALG_AAI_CTR_MOD128, + .class2_alg_type = OP_ALG_ALGSEL_SHA512 | + OP_ALG_AAI_HMAC_PRECOMP, + .rfc3686 = true, + }, + }, + { + .aead = { + .base = { + .cra_name = "seqiv(authenc(hmac(sha512)," + "rfc3686(ctr(aes))))", + .cra_driver_name = "seqiv-authenc-hmac-sha512-" + "rfc3686-ctr-aes-caam-qi2", + .cra_blocksize = 1, + }, + .setkey = aead_setkey, + .setauthsize = aead_setauthsize, + .encrypt = aead_encrypt, + .decrypt = aead_decrypt, + .ivsize = CTR_RFC3686_IV_SIZE, + .maxauthsize = SHA512_DIGEST_SIZE, + }, + .caam = { + .class1_alg_type = OP_ALG_ALGSEL_AES | + OP_ALG_AAI_CTR_MOD128, + .class2_alg_type = OP_ALG_ALGSEL_SHA512 | + OP_ALG_AAI_HMAC_PRECOMP, + .rfc3686 = true, + .geniv = true, + }, + }, +}; + +static void caam_skcipher_alg_init(struct caam_skcipher_alg *t_alg) +{ + struct skcipher_alg *alg = &t_alg->skcipher; + + alg->base.cra_module = THIS_MODULE; + alg->base.cra_priority = CAAM_CRA_PRIORITY; + alg->base.cra_ctxsize = sizeof(struct caam_ctx); + alg->base.cra_flags |= (CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY | + CRYPTO_ALG_KERN_DRIVER_ONLY); + + alg->init = caam_cra_init_skcipher; + alg->exit = caam_cra_exit; +} + +static void caam_aead_alg_init(struct caam_aead_alg *t_alg) +{ + struct aead_alg *alg = &t_alg->aead; + + alg->base.cra_module = THIS_MODULE; + alg->base.cra_priority = CAAM_CRA_PRIORITY; + alg->base.cra_ctxsize = sizeof(struct caam_ctx); + alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY | + CRYPTO_ALG_KERN_DRIVER_ONLY; + + alg->init = caam_cra_init_aead; + alg->exit = caam_cra_exit_aead; +} + +/* max hash key is max split key size */ +#define CAAM_MAX_HASH_KEY_SIZE (SHA512_DIGEST_SIZE * 2) + +#define CAAM_MAX_HASH_BLOCK_SIZE SHA512_BLOCK_SIZE + +/* caam context sizes for hashes: running digest + 8 */ +#define HASH_MSG_LEN 8 +#define MAX_CTX_LEN (HASH_MSG_LEN + SHA512_DIGEST_SIZE) + +enum hash_optype { + UPDATE = 0, + UPDATE_FIRST, + FINALIZE, + DIGEST, + HASH_NUM_OP +}; + +/** + * struct caam_hash_ctx - ahash per-session context + * @flc: Flow Contexts array + * @key: authentication key + * @flc_dma: I/O virtual addresses of the Flow Contexts + * @dev: dpseci device + * @ctx_len: size of Context Register + * @adata: hashing algorithm details + */ +struct caam_hash_ctx { + struct caam_flc flc[HASH_NUM_OP]; + u8 key[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned; + dma_addr_t flc_dma[HASH_NUM_OP]; + struct device *dev; + int ctx_len; + struct alginfo adata; +}; + +/* ahash state */ +struct caam_hash_state { + struct caam_request caam_req; + dma_addr_t buf_dma; + dma_addr_t ctx_dma; + int ctx_dma_len; + u8 buf[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned; + int buflen; + int next_buflen; + u8 caam_ctx[MAX_CTX_LEN] ____cacheline_aligned; + int (*update)(struct ahash_request *req); + int (*final)(struct ahash_request *req); + int (*finup)(struct ahash_request *req); +}; + +struct caam_export_state { + u8 buf[CAAM_MAX_HASH_BLOCK_SIZE]; + u8 caam_ctx[MAX_CTX_LEN]; + int buflen; + int (*update)(struct ahash_request *req); + int (*final)(struct ahash_request *req); + int (*finup)(struct ahash_request *req); +}; + +/* Map current buffer in state (if length > 0) and put it in link table */ +static inline int buf_map_to_qm_sg(struct device *dev, + struct dpaa2_sg_entry *qm_sg, + struct caam_hash_state *state) +{ + int buflen = state->buflen; + + if (!buflen) + return 0; + + state->buf_dma = dma_map_single(dev, state->buf, buflen, + DMA_TO_DEVICE); + if (dma_mapping_error(dev, state->buf_dma)) { + dev_err(dev, "unable to map buf\n"); + state->buf_dma = 0; + return -ENOMEM; + } + + dma_to_qm_sg_one(qm_sg, state->buf_dma, buflen, 0); + + return 0; +} + +/* Map state->caam_ctx, and add it to link table */ +static inline int ctx_map_to_qm_sg(struct device *dev, + struct caam_hash_state *state, int ctx_len, + struct dpaa2_sg_entry *qm_sg, u32 flag) +{ + state->ctx_dma_len = ctx_len; + state->ctx_dma = dma_map_single(dev, state->caam_ctx, ctx_len, flag); + if (dma_mapping_error(dev, state->ctx_dma)) { + dev_err(dev, "unable to map ctx\n"); + state->ctx_dma = 0; + return -ENOMEM; + } + + dma_to_qm_sg_one(qm_sg, state->ctx_dma, ctx_len, 0); + + return 0; +} + +static int ahash_set_sh_desc(struct crypto_ahash *ahash) +{ + struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); + int digestsize = crypto_ahash_digestsize(ahash); + struct dpaa2_caam_priv *priv = dev_get_drvdata(ctx->dev); + struct caam_flc *flc; + u32 *desc; + + /* ahash_update shared descriptor */ + flc = &ctx->flc[UPDATE]; + desc = flc->sh_desc; + cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_UPDATE, ctx->ctx_len, + ctx->ctx_len, true, priv->sec_attr.era); + flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */ + dma_sync_single_for_device(ctx->dev, ctx->flc_dma[UPDATE], + desc_bytes(desc), DMA_BIDIRECTIONAL); + print_hex_dump_debug("ahash update shdesc@" __stringify(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), + 1); + + /* ahash_update_first shared descriptor */ + flc = &ctx->flc[UPDATE_FIRST]; + desc = flc->sh_desc; + cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_INIT, ctx->ctx_len, + ctx->ctx_len, false, priv->sec_attr.era); + flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */ + dma_sync_single_for_device(ctx->dev, ctx->flc_dma[UPDATE_FIRST], + desc_bytes(desc), DMA_BIDIRECTIONAL); + print_hex_dump_debug("ahash update first shdesc@" __stringify(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), + 1); + + /* ahash_final shared descriptor */ + flc = &ctx->flc[FINALIZE]; + desc = flc->sh_desc; + cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_FINALIZE, digestsize, + ctx->ctx_len, true, priv->sec_attr.era); + flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */ + dma_sync_single_for_device(ctx->dev, ctx->flc_dma[FINALIZE], + desc_bytes(desc), DMA_BIDIRECTIONAL); + print_hex_dump_debug("ahash final shdesc@" __stringify(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), + 1); + + /* ahash_digest shared descriptor */ + flc = &ctx->flc[DIGEST]; + desc = flc->sh_desc; + cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_INITFINAL, digestsize, + ctx->ctx_len, false, priv->sec_attr.era); + flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */ + dma_sync_single_for_device(ctx->dev, ctx->flc_dma[DIGEST], + desc_bytes(desc), DMA_BIDIRECTIONAL); + print_hex_dump_debug("ahash digest shdesc@" __stringify(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), + 1); + + return 0; +} + +struct split_key_sh_result { + struct completion completion; + int err; + struct device *dev; +}; + +static void split_key_sh_done(void *cbk_ctx, u32 err) +{ + struct split_key_sh_result *res = cbk_ctx; + + dev_dbg(res->dev, "%s %d: err 0x%x\n", __func__, __LINE__, err); + + res->err = err ? caam_qi2_strstatus(res->dev, err) : 0; + complete(&res->completion); +} + +/* Digest hash size if it is too large */ +static int hash_digest_key(struct caam_hash_ctx *ctx, u32 *keylen, u8 *key, + u32 digestsize) +{ + struct caam_request *req_ctx; + u32 *desc; + struct split_key_sh_result result; + dma_addr_t key_dma; + struct caam_flc *flc; + dma_addr_t flc_dma; + int ret = -ENOMEM; + struct dpaa2_fl_entry *in_fle, *out_fle; + + req_ctx = kzalloc(sizeof(*req_ctx), GFP_KERNEL | GFP_DMA); + if (!req_ctx) + return -ENOMEM; + + in_fle = &req_ctx->fd_flt[1]; + out_fle = &req_ctx->fd_flt[0]; + + flc = kzalloc(sizeof(*flc), GFP_KERNEL | GFP_DMA); + if (!flc) + goto err_flc; + + key_dma = dma_map_single(ctx->dev, key, *keylen, DMA_BIDIRECTIONAL); + if (dma_mapping_error(ctx->dev, key_dma)) { + dev_err(ctx->dev, "unable to map key memory\n"); + goto err_key_dma; + } + + desc = flc->sh_desc; + + init_sh_desc(desc, 0); + + /* descriptor to perform unkeyed hash on key_in */ + append_operation(desc, ctx->adata.algtype | OP_ALG_ENCRYPT | + OP_ALG_AS_INITFINAL); + append_seq_fifo_load(desc, *keylen, FIFOLD_CLASS_CLASS2 | + FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_MSG); + append_seq_store(desc, digestsize, LDST_CLASS_2_CCB | + LDST_SRCDST_BYTE_CONTEXT); + + flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */ + flc_dma = dma_map_single(ctx->dev, flc, sizeof(flc->flc) + + desc_bytes(desc), DMA_TO_DEVICE); + if (dma_mapping_error(ctx->dev, flc_dma)) { + dev_err(ctx->dev, "unable to map shared descriptor\n"); + goto err_flc_dma; + } + + dpaa2_fl_set_final(in_fle, true); + dpaa2_fl_set_format(in_fle, dpaa2_fl_single); + dpaa2_fl_set_addr(in_fle, key_dma); + dpaa2_fl_set_len(in_fle, *keylen); + dpaa2_fl_set_format(out_fle, dpaa2_fl_single); + dpaa2_fl_set_addr(out_fle, key_dma); + dpaa2_fl_set_len(out_fle, digestsize); + + print_hex_dump_debug("key_in@" __stringify(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, key, *keylen, 1); + print_hex_dump_debug("shdesc@" __stringify(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), + 1); + + result.err = 0; + init_completion(&result.completion); + result.dev = ctx->dev; + + req_ctx->flc = flc; + req_ctx->flc_dma = flc_dma; + req_ctx->cbk = split_key_sh_done; + req_ctx->ctx = &result; + + ret = dpaa2_caam_enqueue(ctx->dev, req_ctx); + if (ret == -EINPROGRESS) { + /* in progress */ + wait_for_completion(&result.completion); + ret = result.err; + print_hex_dump_debug("digested key@" __stringify(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, key, + digestsize, 1); + } + + dma_unmap_single(ctx->dev, flc_dma, sizeof(flc->flc) + desc_bytes(desc), + DMA_TO_DEVICE); +err_flc_dma: + dma_unmap_single(ctx->dev, key_dma, *keylen, DMA_BIDIRECTIONAL); +err_key_dma: + kfree(flc); +err_flc: + kfree(req_ctx); + + *keylen = digestsize; + + return ret; +} + +static int ahash_setkey(struct crypto_ahash *ahash, const u8 *key, + unsigned int keylen) +{ + struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); + unsigned int blocksize = crypto_tfm_alg_blocksize(&ahash->base); + unsigned int digestsize = crypto_ahash_digestsize(ahash); + int ret; + u8 *hashed_key = NULL; + + dev_dbg(ctx->dev, "keylen %d blocksize %d\n", keylen, blocksize); + + if (keylen > blocksize) { + hashed_key = kmemdup(key, keylen, GFP_KERNEL | GFP_DMA); + if (!hashed_key) + return -ENOMEM; + ret = hash_digest_key(ctx, &keylen, hashed_key, digestsize); + if (ret) + goto bad_free_key; + key = hashed_key; + } + + ctx->adata.keylen = keylen; + ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype & + OP_ALG_ALGSEL_MASK); + if (ctx->adata.keylen_pad > CAAM_MAX_HASH_KEY_SIZE) + goto bad_free_key; + + ctx->adata.key_virt = key; + ctx->adata.key_inline = true; + + /* + * In case |user key| > |derived key|, using DKP<imm,imm> would result + * in invalid opcodes (last bytes of user key) in the resulting + * descriptor. Use DKP<ptr,imm> instead => both virtual and dma key + * addresses are needed. + */ + if (keylen > ctx->adata.keylen_pad) { + memcpy(ctx->key, key, keylen); + dma_sync_single_for_device(ctx->dev, ctx->adata.key_dma, + ctx->adata.keylen_pad, + DMA_TO_DEVICE); + } + + ret = ahash_set_sh_desc(ahash); + kfree(hashed_key); + return ret; +bad_free_key: + kfree(hashed_key); + return -EINVAL; +} + +static inline void ahash_unmap(struct device *dev, struct ahash_edesc *edesc, + struct ahash_request *req) +{ + struct caam_hash_state *state = ahash_request_ctx(req); + + if (edesc->src_nents) + dma_unmap_sg(dev, req->src, edesc->src_nents, DMA_TO_DEVICE); + + if (edesc->qm_sg_bytes) + dma_unmap_single(dev, edesc->qm_sg_dma, edesc->qm_sg_bytes, + DMA_TO_DEVICE); + + if (state->buf_dma) { + dma_unmap_single(dev, state->buf_dma, state->buflen, + DMA_TO_DEVICE); + state->buf_dma = 0; + } +} + +static inline void ahash_unmap_ctx(struct device *dev, + struct ahash_edesc *edesc, + struct ahash_request *req, u32 flag) +{ + struct caam_hash_state *state = ahash_request_ctx(req); + + if (state->ctx_dma) { + dma_unmap_single(dev, state->ctx_dma, state->ctx_dma_len, flag); + state->ctx_dma = 0; + } + ahash_unmap(dev, edesc, req); +} + +static void ahash_done(void *cbk_ctx, u32 status) +{ + struct crypto_async_request *areq = cbk_ctx; + struct ahash_request *req = ahash_request_cast(areq); + struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); + struct caam_hash_state *state = ahash_request_ctx(req); + struct ahash_edesc *edesc = state->caam_req.edesc; + struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); + int digestsize = crypto_ahash_digestsize(ahash); + int ecode = 0; + + dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status); + + if (unlikely(status)) + ecode = caam_qi2_strstatus(ctx->dev, status); + + ahash_unmap_ctx(ctx->dev, edesc, req, DMA_FROM_DEVICE); + memcpy(req->result, state->caam_ctx, digestsize); + qi_cache_free(edesc); + + print_hex_dump_debug("ctx@" __stringify(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx, + ctx->ctx_len, 1); + + req->base.complete(&req->base, ecode); +} + +static void ahash_done_bi(void *cbk_ctx, u32 status) +{ + struct crypto_async_request *areq = cbk_ctx; + struct ahash_request *req = ahash_request_cast(areq); + struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); + struct caam_hash_state *state = ahash_request_ctx(req); + struct ahash_edesc *edesc = state->caam_req.edesc; + struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); + int ecode = 0; + + dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status); + + if (unlikely(status)) + ecode = caam_qi2_strstatus(ctx->dev, status); + + ahash_unmap_ctx(ctx->dev, edesc, req, DMA_BIDIRECTIONAL); + qi_cache_free(edesc); + + scatterwalk_map_and_copy(state->buf, req->src, + req->nbytes - state->next_buflen, + state->next_buflen, 0); + state->buflen = state->next_buflen; + + print_hex_dump_debug("buf@" __stringify(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, state->buf, + state->buflen, 1); + + print_hex_dump_debug("ctx@" __stringify(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx, + ctx->ctx_len, 1); + if (req->result) + print_hex_dump_debug("result@" __stringify(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, req->result, + crypto_ahash_digestsize(ahash), 1); + + req->base.complete(&req->base, ecode); +} + +static void ahash_done_ctx_src(void *cbk_ctx, u32 status) +{ + struct crypto_async_request *areq = cbk_ctx; + struct ahash_request *req = ahash_request_cast(areq); + struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); + struct caam_hash_state *state = ahash_request_ctx(req); + struct ahash_edesc *edesc = state->caam_req.edesc; + struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); + int digestsize = crypto_ahash_digestsize(ahash); + int ecode = 0; + + dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status); + + if (unlikely(status)) + ecode = caam_qi2_strstatus(ctx->dev, status); + + ahash_unmap_ctx(ctx->dev, edesc, req, DMA_BIDIRECTIONAL); + memcpy(req->result, state->caam_ctx, digestsize); + qi_cache_free(edesc); + + print_hex_dump_debug("ctx@" __stringify(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx, + ctx->ctx_len, 1); + + req->base.complete(&req->base, ecode); +} + +static void ahash_done_ctx_dst(void *cbk_ctx, u32 status) +{ + struct crypto_async_request *areq = cbk_ctx; + struct ahash_request *req = ahash_request_cast(areq); + struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); + struct caam_hash_state *state = ahash_request_ctx(req); + struct ahash_edesc *edesc = state->caam_req.edesc; + struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); + int ecode = 0; + + dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status); + + if (unlikely(status)) + ecode = caam_qi2_strstatus(ctx->dev, status); + + ahash_unmap_ctx(ctx->dev, edesc, req, DMA_FROM_DEVICE); + qi_cache_free(edesc); + + scatterwalk_map_and_copy(state->buf, req->src, + req->nbytes - state->next_buflen, + state->next_buflen, 0); + state->buflen = state->next_buflen; + + print_hex_dump_debug("buf@" __stringify(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, state->buf, + state->buflen, 1); + + print_hex_dump_debug("ctx@" __stringify(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx, + ctx->ctx_len, 1); + if (req->result) + print_hex_dump_debug("result@" __stringify(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, req->result, + crypto_ahash_digestsize(ahash), 1); + + req->base.complete(&req->base, ecode); +} + +static int ahash_update_ctx(struct ahash_request *req) +{ + struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); + struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); + struct caam_hash_state *state = ahash_request_ctx(req); + struct caam_request *req_ctx = &state->caam_req; + struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1]; + struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0]; + gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? + GFP_KERNEL : GFP_ATOMIC; + u8 *buf = state->buf; + int *buflen = &state->buflen; + int *next_buflen = &state->next_buflen; + int in_len = *buflen + req->nbytes, to_hash; + int src_nents, mapped_nents, qm_sg_bytes, qm_sg_src_index; + struct ahash_edesc *edesc; + int ret = 0; + + *next_buflen = in_len & (crypto_tfm_alg_blocksize(&ahash->base) - 1); + to_hash = in_len - *next_buflen; + + if (to_hash) { + struct dpaa2_sg_entry *sg_table; + int src_len = req->nbytes - *next_buflen; + + src_nents = sg_nents_for_len(req->src, src_len); + if (src_nents < 0) { + dev_err(ctx->dev, "Invalid number of src SG.\n"); + return src_nents; + } + + if (src_nents) { + mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents, + DMA_TO_DEVICE); + if (!mapped_nents) { + dev_err(ctx->dev, "unable to DMA map source\n"); + return -ENOMEM; + } + } else { + mapped_nents = 0; + } + + /* allocate space for base edesc and link tables */ + edesc = qi_cache_zalloc(GFP_DMA | flags); + if (!edesc) { + dma_unmap_sg(ctx->dev, req->src, src_nents, + DMA_TO_DEVICE); + return -ENOMEM; + } + + edesc->src_nents = src_nents; + qm_sg_src_index = 1 + (*buflen ? 1 : 0); + qm_sg_bytes = pad_sg_nents(qm_sg_src_index + mapped_nents) * + sizeof(*sg_table); + sg_table = &edesc->sgt[0]; + + ret = ctx_map_to_qm_sg(ctx->dev, state, ctx->ctx_len, sg_table, + DMA_BIDIRECTIONAL); + if (ret) + goto unmap_ctx; + + ret = buf_map_to_qm_sg(ctx->dev, sg_table + 1, state); + if (ret) + goto unmap_ctx; + + if (mapped_nents) { + sg_to_qm_sg_last(req->src, src_len, + sg_table + qm_sg_src_index, 0); + } else { + dpaa2_sg_set_final(sg_table + qm_sg_src_index - 1, + true); + } + + edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table, + qm_sg_bytes, DMA_TO_DEVICE); + if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) { + dev_err(ctx->dev, "unable to map S/G table\n"); + ret = -ENOMEM; + goto unmap_ctx; + } + edesc->qm_sg_bytes = qm_sg_bytes; + + memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt)); + dpaa2_fl_set_final(in_fle, true); + dpaa2_fl_set_format(in_fle, dpaa2_fl_sg); + dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma); + dpaa2_fl_set_len(in_fle, ctx->ctx_len + to_hash); + dpaa2_fl_set_format(out_fle, dpaa2_fl_single); + dpaa2_fl_set_addr(out_fle, state->ctx_dma); + dpaa2_fl_set_len(out_fle, ctx->ctx_len); + + req_ctx->flc = &ctx->flc[UPDATE]; + req_ctx->flc_dma = ctx->flc_dma[UPDATE]; + req_ctx->cbk = ahash_done_bi; + req_ctx->ctx = &req->base; + req_ctx->edesc = edesc; + + ret = dpaa2_caam_enqueue(ctx->dev, req_ctx); + if (ret != -EINPROGRESS && + !(ret == -EBUSY && + req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) + goto unmap_ctx; + } else if (*next_buflen) { + scatterwalk_map_and_copy(buf + *buflen, req->src, 0, + req->nbytes, 0); + *buflen = *next_buflen; + + print_hex_dump_debug("buf@" __stringify(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, buf, + *buflen, 1); + } + + return ret; +unmap_ctx: + ahash_unmap_ctx(ctx->dev, edesc, req, DMA_BIDIRECTIONAL); + qi_cache_free(edesc); + return ret; +} + +static int ahash_final_ctx(struct ahash_request *req) +{ + struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); + struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); + struct caam_hash_state *state = ahash_request_ctx(req); + struct caam_request *req_ctx = &state->caam_req; + struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1]; + struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0]; + gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? + GFP_KERNEL : GFP_ATOMIC; + int buflen = state->buflen; + int qm_sg_bytes; + int digestsize = crypto_ahash_digestsize(ahash); + struct ahash_edesc *edesc; + struct dpaa2_sg_entry *sg_table; + int ret; + + /* allocate space for base edesc and link tables */ + edesc = qi_cache_zalloc(GFP_DMA | flags); + if (!edesc) + return -ENOMEM; + + qm_sg_bytes = pad_sg_nents(1 + (buflen ? 1 : 0)) * sizeof(*sg_table); + sg_table = &edesc->sgt[0]; + + ret = ctx_map_to_qm_sg(ctx->dev, state, ctx->ctx_len, sg_table, + DMA_BIDIRECTIONAL); + if (ret) + goto unmap_ctx; + + ret = buf_map_to_qm_sg(ctx->dev, sg_table + 1, state); + if (ret) + goto unmap_ctx; + + dpaa2_sg_set_final(sg_table + (buflen ? 1 : 0), true); + + edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table, qm_sg_bytes, + DMA_TO_DEVICE); + if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) { + dev_err(ctx->dev, "unable to map S/G table\n"); + ret = -ENOMEM; + goto unmap_ctx; + } + edesc->qm_sg_bytes = qm_sg_bytes; + + memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt)); + dpaa2_fl_set_final(in_fle, true); + dpaa2_fl_set_format(in_fle, dpaa2_fl_sg); + dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma); + dpaa2_fl_set_len(in_fle, ctx->ctx_len + buflen); + dpaa2_fl_set_format(out_fle, dpaa2_fl_single); + dpaa2_fl_set_addr(out_fle, state->ctx_dma); + dpaa2_fl_set_len(out_fle, digestsize); + + req_ctx->flc = &ctx->flc[FINALIZE]; + req_ctx->flc_dma = ctx->flc_dma[FINALIZE]; + req_ctx->cbk = ahash_done_ctx_src; + req_ctx->ctx = &req->base; + req_ctx->edesc = edesc; + + ret = dpaa2_caam_enqueue(ctx->dev, req_ctx); + if (ret == -EINPROGRESS || + (ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) + return ret; + +unmap_ctx: + ahash_unmap_ctx(ctx->dev, edesc, req, DMA_BIDIRECTIONAL); + qi_cache_free(edesc); + return ret; +} + +static int ahash_finup_ctx(struct ahash_request *req) +{ + struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); + struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); + struct caam_hash_state *state = ahash_request_ctx(req); + struct caam_request *req_ctx = &state->caam_req; + struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1]; + struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0]; + gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? + GFP_KERNEL : GFP_ATOMIC; + int buflen = state->buflen; + int qm_sg_bytes, qm_sg_src_index; + int src_nents, mapped_nents; + int digestsize = crypto_ahash_digestsize(ahash); + struct ahash_edesc *edesc; + struct dpaa2_sg_entry *sg_table; + int ret; + + src_nents = sg_nents_for_len(req->src, req->nbytes); + if (src_nents < 0) { + dev_err(ctx->dev, "Invalid number of src SG.\n"); + return src_nents; + } + + if (src_nents) { + mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents, + DMA_TO_DEVICE); + if (!mapped_nents) { + dev_err(ctx->dev, "unable to DMA map source\n"); + return -ENOMEM; + } + } else { + mapped_nents = 0; + } + + /* allocate space for base edesc and link tables */ + edesc = qi_cache_zalloc(GFP_DMA | flags); + if (!edesc) { + dma_unmap_sg(ctx->dev, req->src, src_nents, DMA_TO_DEVICE); + return -ENOMEM; + } + + edesc->src_nents = src_nents; + qm_sg_src_index = 1 + (buflen ? 1 : 0); + qm_sg_bytes = pad_sg_nents(qm_sg_src_index + mapped_nents) * + sizeof(*sg_table); + sg_table = &edesc->sgt[0]; + + ret = ctx_map_to_qm_sg(ctx->dev, state, ctx->ctx_len, sg_table, + DMA_BIDIRECTIONAL); + if (ret) + goto unmap_ctx; + + ret = buf_map_to_qm_sg(ctx->dev, sg_table + 1, state); + if (ret) + goto unmap_ctx; + + sg_to_qm_sg_last(req->src, req->nbytes, sg_table + qm_sg_src_index, 0); + + edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table, qm_sg_bytes, + DMA_TO_DEVICE); + if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) { + dev_err(ctx->dev, "unable to map S/G table\n"); + ret = -ENOMEM; + goto unmap_ctx; + } + edesc->qm_sg_bytes = qm_sg_bytes; + + memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt)); + dpaa2_fl_set_final(in_fle, true); + dpaa2_fl_set_format(in_fle, dpaa2_fl_sg); + dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma); + dpaa2_fl_set_len(in_fle, ctx->ctx_len + buflen + req->nbytes); + dpaa2_fl_set_format(out_fle, dpaa2_fl_single); + dpaa2_fl_set_addr(out_fle, state->ctx_dma); + dpaa2_fl_set_len(out_fle, digestsize); + + req_ctx->flc = &ctx->flc[FINALIZE]; + req_ctx->flc_dma = ctx->flc_dma[FINALIZE]; + req_ctx->cbk = ahash_done_ctx_src; + req_ctx->ctx = &req->base; + req_ctx->edesc = edesc; + + ret = dpaa2_caam_enqueue(ctx->dev, req_ctx); + if (ret == -EINPROGRESS || + (ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) + return ret; + +unmap_ctx: + ahash_unmap_ctx(ctx->dev, edesc, req, DMA_BIDIRECTIONAL); + qi_cache_free(edesc); + return ret; +} + +static int ahash_digest(struct ahash_request *req) +{ + struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); + struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); + struct caam_hash_state *state = ahash_request_ctx(req); + struct caam_request *req_ctx = &state->caam_req; + struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1]; + struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0]; + gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? + GFP_KERNEL : GFP_ATOMIC; + int digestsize = crypto_ahash_digestsize(ahash); + int src_nents, mapped_nents; + struct ahash_edesc *edesc; + int ret = -ENOMEM; + + state->buf_dma = 0; + + src_nents = sg_nents_for_len(req->src, req->nbytes); + if (src_nents < 0) { + dev_err(ctx->dev, "Invalid number of src SG.\n"); + return src_nents; + } + + if (src_nents) { + mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents, + DMA_TO_DEVICE); + if (!mapped_nents) { + dev_err(ctx->dev, "unable to map source for DMA\n"); + return ret; + } + } else { + mapped_nents = 0; + } + + /* allocate space for base edesc and link tables */ + edesc = qi_cache_zalloc(GFP_DMA | flags); + if (!edesc) { + dma_unmap_sg(ctx->dev, req->src, src_nents, DMA_TO_DEVICE); + return ret; + } + + edesc->src_nents = src_nents; + memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt)); + + if (mapped_nents > 1) { + int qm_sg_bytes; + struct dpaa2_sg_entry *sg_table = &edesc->sgt[0]; + + qm_sg_bytes = pad_sg_nents(mapped_nents) * sizeof(*sg_table); + sg_to_qm_sg_last(req->src, req->nbytes, sg_table, 0); + edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table, + qm_sg_bytes, DMA_TO_DEVICE); + if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) { + dev_err(ctx->dev, "unable to map S/G table\n"); + goto unmap; + } + edesc->qm_sg_bytes = qm_sg_bytes; + dpaa2_fl_set_format(in_fle, dpaa2_fl_sg); + dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma); + } else { + dpaa2_fl_set_format(in_fle, dpaa2_fl_single); + dpaa2_fl_set_addr(in_fle, sg_dma_address(req->src)); + } + + state->ctx_dma_len = digestsize; + state->ctx_dma = dma_map_single(ctx->dev, state->caam_ctx, digestsize, + DMA_FROM_DEVICE); + if (dma_mapping_error(ctx->dev, state->ctx_dma)) { + dev_err(ctx->dev, "unable to map ctx\n"); + state->ctx_dma = 0; + goto unmap; + } + + dpaa2_fl_set_final(in_fle, true); + dpaa2_fl_set_len(in_fle, req->nbytes); + dpaa2_fl_set_format(out_fle, dpaa2_fl_single); + dpaa2_fl_set_addr(out_fle, state->ctx_dma); + dpaa2_fl_set_len(out_fle, digestsize); + + req_ctx->flc = &ctx->flc[DIGEST]; + req_ctx->flc_dma = ctx->flc_dma[DIGEST]; + req_ctx->cbk = ahash_done; + req_ctx->ctx = &req->base; + req_ctx->edesc = edesc; + ret = dpaa2_caam_enqueue(ctx->dev, req_ctx); + if (ret == -EINPROGRESS || + (ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) + return ret; + +unmap: + ahash_unmap_ctx(ctx->dev, edesc, req, DMA_FROM_DEVICE); + qi_cache_free(edesc); + return ret; +} + +static int ahash_final_no_ctx(struct ahash_request *req) +{ + struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); + struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); + struct caam_hash_state *state = ahash_request_ctx(req); + struct caam_request *req_ctx = &state->caam_req; + struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1]; + struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0]; + gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? + GFP_KERNEL : GFP_ATOMIC; + u8 *buf = state->buf; + int buflen = state->buflen; + int digestsize = crypto_ahash_digestsize(ahash); + struct ahash_edesc *edesc; + int ret = -ENOMEM; + + /* allocate space for base edesc and link tables */ + edesc = qi_cache_zalloc(GFP_DMA | flags); + if (!edesc) + return ret; + + if (buflen) { + state->buf_dma = dma_map_single(ctx->dev, buf, buflen, + DMA_TO_DEVICE); + if (dma_mapping_error(ctx->dev, state->buf_dma)) { + dev_err(ctx->dev, "unable to map src\n"); + goto unmap; + } + } + + state->ctx_dma_len = digestsize; + state->ctx_dma = dma_map_single(ctx->dev, state->caam_ctx, digestsize, + DMA_FROM_DEVICE); + if (dma_mapping_error(ctx->dev, state->ctx_dma)) { + dev_err(ctx->dev, "unable to map ctx\n"); + state->ctx_dma = 0; + goto unmap; + } + + memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt)); + dpaa2_fl_set_final(in_fle, true); + /* + * crypto engine requires the input entry to be present when + * "frame list" FD is used. + * Since engine does not support FMT=2'b11 (unused entry type), leaving + * in_fle zeroized (except for "Final" flag) is the best option. + */ + if (buflen) { + dpaa2_fl_set_format(in_fle, dpaa2_fl_single); + dpaa2_fl_set_addr(in_fle, state->buf_dma); + dpaa2_fl_set_len(in_fle, buflen); + } + dpaa2_fl_set_format(out_fle, dpaa2_fl_single); + dpaa2_fl_set_addr(out_fle, state->ctx_dma); + dpaa2_fl_set_len(out_fle, digestsize); + + req_ctx->flc = &ctx->flc[DIGEST]; + req_ctx->flc_dma = ctx->flc_dma[DIGEST]; + req_ctx->cbk = ahash_done; + req_ctx->ctx = &req->base; + req_ctx->edesc = edesc; + + ret = dpaa2_caam_enqueue(ctx->dev, req_ctx); + if (ret == -EINPROGRESS || + (ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) + return ret; + +unmap: + ahash_unmap_ctx(ctx->dev, edesc, req, DMA_FROM_DEVICE); + qi_cache_free(edesc); + return ret; +} + +static int ahash_update_no_ctx(struct ahash_request *req) +{ + struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); + struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); + struct caam_hash_state *state = ahash_request_ctx(req); + struct caam_request *req_ctx = &state->caam_req; + struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1]; + struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0]; + gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? + GFP_KERNEL : GFP_ATOMIC; + u8 *buf = state->buf; + int *buflen = &state->buflen; + int *next_buflen = &state->next_buflen; + int in_len = *buflen + req->nbytes, to_hash; + int qm_sg_bytes, src_nents, mapped_nents; + struct ahash_edesc *edesc; + int ret = 0; + + *next_buflen = in_len & (crypto_tfm_alg_blocksize(&ahash->base) - 1); + to_hash = in_len - *next_buflen; + + if (to_hash) { + struct dpaa2_sg_entry *sg_table; + int src_len = req->nbytes - *next_buflen; + + src_nents = sg_nents_for_len(req->src, src_len); + if (src_nents < 0) { + dev_err(ctx->dev, "Invalid number of src SG.\n"); + return src_nents; + } + + if (src_nents) { + mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents, + DMA_TO_DEVICE); + if (!mapped_nents) { + dev_err(ctx->dev, "unable to DMA map source\n"); + return -ENOMEM; + } + } else { + mapped_nents = 0; + } + + /* allocate space for base edesc and link tables */ + edesc = qi_cache_zalloc(GFP_DMA | flags); + if (!edesc) { + dma_unmap_sg(ctx->dev, req->src, src_nents, + DMA_TO_DEVICE); + return -ENOMEM; + } + + edesc->src_nents = src_nents; + qm_sg_bytes = pad_sg_nents(1 + mapped_nents) * + sizeof(*sg_table); + sg_table = &edesc->sgt[0]; + + ret = buf_map_to_qm_sg(ctx->dev, sg_table, state); + if (ret) + goto unmap_ctx; + + sg_to_qm_sg_last(req->src, src_len, sg_table + 1, 0); + + edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table, + qm_sg_bytes, DMA_TO_DEVICE); + if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) { + dev_err(ctx->dev, "unable to map S/G table\n"); + ret = -ENOMEM; + goto unmap_ctx; + } + edesc->qm_sg_bytes = qm_sg_bytes; + + state->ctx_dma_len = ctx->ctx_len; + state->ctx_dma = dma_map_single(ctx->dev, state->caam_ctx, + ctx->ctx_len, DMA_FROM_DEVICE); + if (dma_mapping_error(ctx->dev, state->ctx_dma)) { + dev_err(ctx->dev, "unable to map ctx\n"); + state->ctx_dma = 0; + ret = -ENOMEM; + goto unmap_ctx; + } + + memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt)); + dpaa2_fl_set_final(in_fle, true); + dpaa2_fl_set_format(in_fle, dpaa2_fl_sg); + dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma); + dpaa2_fl_set_len(in_fle, to_hash); + dpaa2_fl_set_format(out_fle, dpaa2_fl_single); + dpaa2_fl_set_addr(out_fle, state->ctx_dma); + dpaa2_fl_set_len(out_fle, ctx->ctx_len); + + req_ctx->flc = &ctx->flc[UPDATE_FIRST]; + req_ctx->flc_dma = ctx->flc_dma[UPDATE_FIRST]; + req_ctx->cbk = ahash_done_ctx_dst; + req_ctx->ctx = &req->base; + req_ctx->edesc = edesc; + + ret = dpaa2_caam_enqueue(ctx->dev, req_ctx); + if (ret != -EINPROGRESS && + !(ret == -EBUSY && + req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) + goto unmap_ctx; + + state->update = ahash_update_ctx; + state->finup = ahash_finup_ctx; + state->final = ahash_final_ctx; + } else if (*next_buflen) { + scatterwalk_map_and_copy(buf + *buflen, req->src, 0, + req->nbytes, 0); + *buflen = *next_buflen; + + print_hex_dump_debug("buf@" __stringify(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, buf, + *buflen, 1); + } + + return ret; +unmap_ctx: + ahash_unmap_ctx(ctx->dev, edesc, req, DMA_TO_DEVICE); + qi_cache_free(edesc); + return ret; +} + +static int ahash_finup_no_ctx(struct ahash_request *req) +{ + struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); + struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); + struct caam_hash_state *state = ahash_request_ctx(req); + struct caam_request *req_ctx = &state->caam_req; + struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1]; + struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0]; + gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? + GFP_KERNEL : GFP_ATOMIC; + int buflen = state->buflen; + int qm_sg_bytes, src_nents, mapped_nents; + int digestsize = crypto_ahash_digestsize(ahash); + struct ahash_edesc *edesc; + struct dpaa2_sg_entry *sg_table; + int ret = -ENOMEM; + + src_nents = sg_nents_for_len(req->src, req->nbytes); + if (src_nents < 0) { + dev_err(ctx->dev, "Invalid number of src SG.\n"); + return src_nents; + } + + if (src_nents) { + mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents, + DMA_TO_DEVICE); + if (!mapped_nents) { + dev_err(ctx->dev, "unable to DMA map source\n"); + return ret; + } + } else { + mapped_nents = 0; + } + + /* allocate space for base edesc and link tables */ + edesc = qi_cache_zalloc(GFP_DMA | flags); + if (!edesc) { + dma_unmap_sg(ctx->dev, req->src, src_nents, DMA_TO_DEVICE); + return ret; + } + + edesc->src_nents = src_nents; + qm_sg_bytes = pad_sg_nents(2 + mapped_nents) * sizeof(*sg_table); + sg_table = &edesc->sgt[0]; + + ret = buf_map_to_qm_sg(ctx->dev, sg_table, state); + if (ret) + goto unmap; + + sg_to_qm_sg_last(req->src, req->nbytes, sg_table + 1, 0); + + edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table, qm_sg_bytes, + DMA_TO_DEVICE); + if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) { + dev_err(ctx->dev, "unable to map S/G table\n"); + ret = -ENOMEM; + goto unmap; + } + edesc->qm_sg_bytes = qm_sg_bytes; + + state->ctx_dma_len = digestsize; + state->ctx_dma = dma_map_single(ctx->dev, state->caam_ctx, digestsize, + DMA_FROM_DEVICE); + if (dma_mapping_error(ctx->dev, state->ctx_dma)) { + dev_err(ctx->dev, "unable to map ctx\n"); + state->ctx_dma = 0; + ret = -ENOMEM; + goto unmap; + } + + memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt)); + dpaa2_fl_set_final(in_fle, true); + dpaa2_fl_set_format(in_fle, dpaa2_fl_sg); + dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma); + dpaa2_fl_set_len(in_fle, buflen + req->nbytes); + dpaa2_fl_set_format(out_fle, dpaa2_fl_single); + dpaa2_fl_set_addr(out_fle, state->ctx_dma); + dpaa2_fl_set_len(out_fle, digestsize); + + req_ctx->flc = &ctx->flc[DIGEST]; + req_ctx->flc_dma = ctx->flc_dma[DIGEST]; + req_ctx->cbk = ahash_done; + req_ctx->ctx = &req->base; + req_ctx->edesc = edesc; + ret = dpaa2_caam_enqueue(ctx->dev, req_ctx); + if (ret != -EINPROGRESS && + !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) + goto unmap; + + return ret; +unmap: + ahash_unmap_ctx(ctx->dev, edesc, req, DMA_FROM_DEVICE); + qi_cache_free(edesc); + return ret; +} + +static int ahash_update_first(struct ahash_request *req) +{ + struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); + struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); + struct caam_hash_state *state = ahash_request_ctx(req); + struct caam_request *req_ctx = &state->caam_req; + struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1]; + struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0]; + gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? + GFP_KERNEL : GFP_ATOMIC; + u8 *buf = state->buf; + int *buflen = &state->buflen; + int *next_buflen = &state->next_buflen; + int to_hash; + int src_nents, mapped_nents; + struct ahash_edesc *edesc; + int ret = 0; + + *next_buflen = req->nbytes & (crypto_tfm_alg_blocksize(&ahash->base) - + 1); + to_hash = req->nbytes - *next_buflen; + + if (to_hash) { + struct dpaa2_sg_entry *sg_table; + int src_len = req->nbytes - *next_buflen; + + src_nents = sg_nents_for_len(req->src, src_len); + if (src_nents < 0) { + dev_err(ctx->dev, "Invalid number of src SG.\n"); + return src_nents; + } + + if (src_nents) { + mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents, + DMA_TO_DEVICE); + if (!mapped_nents) { + dev_err(ctx->dev, "unable to map source for DMA\n"); + return -ENOMEM; + } + } else { + mapped_nents = 0; + } + + /* allocate space for base edesc and link tables */ + edesc = qi_cache_zalloc(GFP_DMA | flags); + if (!edesc) { + dma_unmap_sg(ctx->dev, req->src, src_nents, + DMA_TO_DEVICE); + return -ENOMEM; + } + + edesc->src_nents = src_nents; + sg_table = &edesc->sgt[0]; + + memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt)); + dpaa2_fl_set_final(in_fle, true); + dpaa2_fl_set_len(in_fle, to_hash); + + if (mapped_nents > 1) { + int qm_sg_bytes; + + sg_to_qm_sg_last(req->src, src_len, sg_table, 0); + qm_sg_bytes = pad_sg_nents(mapped_nents) * + sizeof(*sg_table); + edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table, + qm_sg_bytes, + DMA_TO_DEVICE); + if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) { + dev_err(ctx->dev, "unable to map S/G table\n"); + ret = -ENOMEM; + goto unmap_ctx; + } + edesc->qm_sg_bytes = qm_sg_bytes; + dpaa2_fl_set_format(in_fle, dpaa2_fl_sg); + dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma); + } else { + dpaa2_fl_set_format(in_fle, dpaa2_fl_single); + dpaa2_fl_set_addr(in_fle, sg_dma_address(req->src)); + } + + state->ctx_dma_len = ctx->ctx_len; + state->ctx_dma = dma_map_single(ctx->dev, state->caam_ctx, + ctx->ctx_len, DMA_FROM_DEVICE); + if (dma_mapping_error(ctx->dev, state->ctx_dma)) { + dev_err(ctx->dev, "unable to map ctx\n"); + state->ctx_dma = 0; + ret = -ENOMEM; + goto unmap_ctx; + } + + dpaa2_fl_set_format(out_fle, dpaa2_fl_single); + dpaa2_fl_set_addr(out_fle, state->ctx_dma); + dpaa2_fl_set_len(out_fle, ctx->ctx_len); + + req_ctx->flc = &ctx->flc[UPDATE_FIRST]; + req_ctx->flc_dma = ctx->flc_dma[UPDATE_FIRST]; + req_ctx->cbk = ahash_done_ctx_dst; + req_ctx->ctx = &req->base; + req_ctx->edesc = edesc; + + ret = dpaa2_caam_enqueue(ctx->dev, req_ctx); + if (ret != -EINPROGRESS && + !(ret == -EBUSY && req->base.flags & + CRYPTO_TFM_REQ_MAY_BACKLOG)) + goto unmap_ctx; + + state->update = ahash_update_ctx; + state->finup = ahash_finup_ctx; + state->final = ahash_final_ctx; + } else if (*next_buflen) { + state->update = ahash_update_no_ctx; + state->finup = ahash_finup_no_ctx; + state->final = ahash_final_no_ctx; + scatterwalk_map_and_copy(buf, req->src, 0, + req->nbytes, 0); + *buflen = *next_buflen; + + print_hex_dump_debug("buf@" __stringify(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, buf, + *buflen, 1); + } + + return ret; +unmap_ctx: + ahash_unmap_ctx(ctx->dev, edesc, req, DMA_TO_DEVICE); + qi_cache_free(edesc); + return ret; +} + +static int ahash_finup_first(struct ahash_request *req) +{ + return ahash_digest(req); +} + +static int ahash_init(struct ahash_request *req) +{ + struct caam_hash_state *state = ahash_request_ctx(req); + + state->update = ahash_update_first; + state->finup = ahash_finup_first; + state->final = ahash_final_no_ctx; + + state->ctx_dma = 0; + state->ctx_dma_len = 0; + state->buf_dma = 0; + state->buflen = 0; + state->next_buflen = 0; + + return 0; +} + +static int ahash_update(struct ahash_request *req) +{ + struct caam_hash_state *state = ahash_request_ctx(req); + + return state->update(req); +} + +static int ahash_finup(struct ahash_request *req) +{ + struct caam_hash_state *state = ahash_request_ctx(req); + + return state->finup(req); +} + +static int ahash_final(struct ahash_request *req) +{ + struct caam_hash_state *state = ahash_request_ctx(req); + + return state->final(req); +} + +static int ahash_export(struct ahash_request *req, void *out) +{ + struct caam_hash_state *state = ahash_request_ctx(req); + struct caam_export_state *export = out; + u8 *buf = state->buf; + int len = state->buflen; + + memcpy(export->buf, buf, len); + memcpy(export->caam_ctx, state->caam_ctx, sizeof(export->caam_ctx)); + export->buflen = len; + export->update = state->update; + export->final = state->final; + export->finup = state->finup; + + return 0; +} + +static int ahash_import(struct ahash_request *req, const void *in) +{ + struct caam_hash_state *state = ahash_request_ctx(req); + const struct caam_export_state *export = in; + + memset(state, 0, sizeof(*state)); + memcpy(state->buf, export->buf, export->buflen); + memcpy(state->caam_ctx, export->caam_ctx, sizeof(state->caam_ctx)); + state->buflen = export->buflen; + state->update = export->update; + state->final = export->final; + state->finup = export->finup; + + return 0; +} + +struct caam_hash_template { + char name[CRYPTO_MAX_ALG_NAME]; + char driver_name[CRYPTO_MAX_ALG_NAME]; + char hmac_name[CRYPTO_MAX_ALG_NAME]; + char hmac_driver_name[CRYPTO_MAX_ALG_NAME]; + unsigned int blocksize; + struct ahash_alg template_ahash; + u32 alg_type; +}; + +/* ahash descriptors */ +static struct caam_hash_template driver_hash[] = { + { + .name = "sha1", + .driver_name = "sha1-caam-qi2", + .hmac_name = "hmac(sha1)", + .hmac_driver_name = "hmac-sha1-caam-qi2", + .blocksize = SHA1_BLOCK_SIZE, + .template_ahash = { + .init = ahash_init, + .update = ahash_update, + .final = ahash_final, + .finup = ahash_finup, + .digest = ahash_digest, + .export = ahash_export, + .import = ahash_import, + .setkey = ahash_setkey, + .halg = { + .digestsize = SHA1_DIGEST_SIZE, + .statesize = sizeof(struct caam_export_state), + }, + }, + .alg_type = OP_ALG_ALGSEL_SHA1, + }, { + .name = "sha224", + .driver_name = "sha224-caam-qi2", + .hmac_name = "hmac(sha224)", + .hmac_driver_name = "hmac-sha224-caam-qi2", + .blocksize = SHA224_BLOCK_SIZE, + .template_ahash = { + .init = ahash_init, + .update = ahash_update, + .final = ahash_final, + .finup = ahash_finup, + .digest = ahash_digest, + .export = ahash_export, + .import = ahash_import, + .setkey = ahash_setkey, + .halg = { + .digestsize = SHA224_DIGEST_SIZE, + .statesize = sizeof(struct caam_export_state), + }, + }, + .alg_type = OP_ALG_ALGSEL_SHA224, + }, { + .name = "sha256", + .driver_name = "sha256-caam-qi2", + .hmac_name = "hmac(sha256)", + .hmac_driver_name = "hmac-sha256-caam-qi2", + .blocksize = SHA256_BLOCK_SIZE, + .template_ahash = { + .init = ahash_init, + .update = ahash_update, + .final = ahash_final, + .finup = ahash_finup, + .digest = ahash_digest, + .export = ahash_export, + .import = ahash_import, + .setkey = ahash_setkey, + .halg = { + .digestsize = SHA256_DIGEST_SIZE, + .statesize = sizeof(struct caam_export_state), + }, + }, + .alg_type = OP_ALG_ALGSEL_SHA256, + }, { + .name = "sha384", + .driver_name = "sha384-caam-qi2", + .hmac_name = "hmac(sha384)", + .hmac_driver_name = "hmac-sha384-caam-qi2", + .blocksize = SHA384_BLOCK_SIZE, + .template_ahash = { + .init = ahash_init, + .update = ahash_update, + .final = ahash_final, + .finup = ahash_finup, + .digest = ahash_digest, + .export = ahash_export, + .import = ahash_import, + .setkey = ahash_setkey, + .halg = { + .digestsize = SHA384_DIGEST_SIZE, + .statesize = sizeof(struct caam_export_state), + }, + }, + .alg_type = OP_ALG_ALGSEL_SHA384, + }, { + .name = "sha512", + .driver_name = "sha512-caam-qi2", + .hmac_name = "hmac(sha512)", + .hmac_driver_name = "hmac-sha512-caam-qi2", + .blocksize = SHA512_BLOCK_SIZE, + .template_ahash = { + .init = ahash_init, + .update = ahash_update, + .final = ahash_final, + .finup = ahash_finup, + .digest = ahash_digest, + .export = ahash_export, + .import = ahash_import, + .setkey = ahash_setkey, + .halg = { + .digestsize = SHA512_DIGEST_SIZE, + .statesize = sizeof(struct caam_export_state), + }, + }, + .alg_type = OP_ALG_ALGSEL_SHA512, + }, { + .name = "md5", + .driver_name = "md5-caam-qi2", + .hmac_name = "hmac(md5)", + .hmac_driver_name = "hmac-md5-caam-qi2", + .blocksize = MD5_BLOCK_WORDS * 4, + .template_ahash = { + .init = ahash_init, + .update = ahash_update, + .final = ahash_final, + .finup = ahash_finup, + .digest = ahash_digest, + .export = ahash_export, + .import = ahash_import, + .setkey = ahash_setkey, + .halg = { + .digestsize = MD5_DIGEST_SIZE, + .statesize = sizeof(struct caam_export_state), + }, + }, + .alg_type = OP_ALG_ALGSEL_MD5, + } +}; + +struct caam_hash_alg { + struct list_head entry; + struct device *dev; + int alg_type; + struct ahash_alg ahash_alg; +}; + +static int caam_hash_cra_init(struct crypto_tfm *tfm) +{ + struct crypto_ahash *ahash = __crypto_ahash_cast(tfm); + struct crypto_alg *base = tfm->__crt_alg; + struct hash_alg_common *halg = + container_of(base, struct hash_alg_common, base); + struct ahash_alg *alg = + container_of(halg, struct ahash_alg, halg); + struct caam_hash_alg *caam_hash = + container_of(alg, struct caam_hash_alg, ahash_alg); + struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm); + /* Sizes for MDHA running digests: MD5, SHA1, 224, 256, 384, 512 */ + static const u8 runninglen[] = { HASH_MSG_LEN + MD5_DIGEST_SIZE, + HASH_MSG_LEN + SHA1_DIGEST_SIZE, + HASH_MSG_LEN + 32, + HASH_MSG_LEN + SHA256_DIGEST_SIZE, + HASH_MSG_LEN + 64, + HASH_MSG_LEN + SHA512_DIGEST_SIZE }; + dma_addr_t dma_addr; + int i; + + ctx->dev = caam_hash->dev; + + if (alg->setkey) { + ctx->adata.key_dma = dma_map_single_attrs(ctx->dev, ctx->key, + ARRAY_SIZE(ctx->key), + DMA_TO_DEVICE, + DMA_ATTR_SKIP_CPU_SYNC); + if (dma_mapping_error(ctx->dev, ctx->adata.key_dma)) { + dev_err(ctx->dev, "unable to map key\n"); + return -ENOMEM; + } + } + + dma_addr = dma_map_single_attrs(ctx->dev, ctx->flc, sizeof(ctx->flc), + DMA_BIDIRECTIONAL, + DMA_ATTR_SKIP_CPU_SYNC); + if (dma_mapping_error(ctx->dev, dma_addr)) { + dev_err(ctx->dev, "unable to map shared descriptors\n"); + if (ctx->adata.key_dma) + dma_unmap_single_attrs(ctx->dev, ctx->adata.key_dma, + ARRAY_SIZE(ctx->key), + DMA_TO_DEVICE, + DMA_ATTR_SKIP_CPU_SYNC); + return -ENOMEM; + } + + for (i = 0; i < HASH_NUM_OP; i++) + ctx->flc_dma[i] = dma_addr + i * sizeof(ctx->flc[i]); + + /* copy descriptor header template value */ + ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam_hash->alg_type; + + ctx->ctx_len = runninglen[(ctx->adata.algtype & + OP_ALG_ALGSEL_SUBMASK) >> + OP_ALG_ALGSEL_SHIFT]; + + crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), + sizeof(struct caam_hash_state)); + + /* + * For keyed hash algorithms shared descriptors + * will be created later in setkey() callback + */ + return alg->setkey ? 0 : ahash_set_sh_desc(ahash); +} + +static void caam_hash_cra_exit(struct crypto_tfm *tfm) +{ + struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm); + + dma_unmap_single_attrs(ctx->dev, ctx->flc_dma[0], sizeof(ctx->flc), + DMA_BIDIRECTIONAL, DMA_ATTR_SKIP_CPU_SYNC); + if (ctx->adata.key_dma) + dma_unmap_single_attrs(ctx->dev, ctx->adata.key_dma, + ARRAY_SIZE(ctx->key), DMA_TO_DEVICE, + DMA_ATTR_SKIP_CPU_SYNC); +} + +static struct caam_hash_alg *caam_hash_alloc(struct device *dev, + struct caam_hash_template *template, bool keyed) +{ + struct caam_hash_alg *t_alg; + struct ahash_alg *halg; + struct crypto_alg *alg; + + t_alg = kzalloc(sizeof(*t_alg), GFP_KERNEL); + if (!t_alg) + return ERR_PTR(-ENOMEM); + + t_alg->ahash_alg = template->template_ahash; + halg = &t_alg->ahash_alg; + alg = &halg->halg.base; + + if (keyed) { + snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", + template->hmac_name); + snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s", + template->hmac_driver_name); + } else { + snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", + template->name); + snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s", + template->driver_name); + t_alg->ahash_alg.setkey = NULL; + } + alg->cra_module = THIS_MODULE; + alg->cra_init = caam_hash_cra_init; + alg->cra_exit = caam_hash_cra_exit; + alg->cra_ctxsize = sizeof(struct caam_hash_ctx); + alg->cra_priority = CAAM_CRA_PRIORITY; + alg->cra_blocksize = template->blocksize; + alg->cra_alignmask = 0; + alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY; + + t_alg->alg_type = template->alg_type; + t_alg->dev = dev; + + return t_alg; +} + +static void dpaa2_caam_fqdan_cb(struct dpaa2_io_notification_ctx *nctx) +{ + struct dpaa2_caam_priv_per_cpu *ppriv; + + ppriv = container_of(nctx, struct dpaa2_caam_priv_per_cpu, nctx); + napi_schedule_irqoff(&ppriv->napi); +} + +static int __cold dpaa2_dpseci_dpio_setup(struct dpaa2_caam_priv *priv) +{ + struct device *dev = priv->dev; + struct dpaa2_io_notification_ctx *nctx; + struct dpaa2_caam_priv_per_cpu *ppriv; + int err, i = 0, cpu; + + for_each_online_cpu(cpu) { + ppriv = per_cpu_ptr(priv->ppriv, cpu); + ppriv->priv = priv; + nctx = &ppriv->nctx; + nctx->is_cdan = 0; + nctx->id = ppriv->rsp_fqid; + nctx->desired_cpu = cpu; + nctx->cb = dpaa2_caam_fqdan_cb; + + /* Register notification callbacks */ + ppriv->dpio = dpaa2_io_service_select(cpu); + err = dpaa2_io_service_register(ppriv->dpio, nctx, dev); + if (unlikely(err)) { + dev_dbg(dev, "No affine DPIO for cpu %d\n", cpu); + nctx->cb = NULL; + /* + * If no affine DPIO for this core, there's probably + * none available for next cores either. Signal we want + * to retry later, in case the DPIO devices weren't + * probed yet. + */ + err = -EPROBE_DEFER; + goto err; + } + + ppriv->store = dpaa2_io_store_create(DPAA2_CAAM_STORE_SIZE, + dev); + if (unlikely(!ppriv->store)) { + dev_err(dev, "dpaa2_io_store_create() failed\n"); + err = -ENOMEM; + goto err; + } + + if (++i == priv->num_pairs) + break; + } + + return 0; + +err: + for_each_online_cpu(cpu) { + ppriv = per_cpu_ptr(priv->ppriv, cpu); + if (!ppriv->nctx.cb) + break; + dpaa2_io_service_deregister(ppriv->dpio, &ppriv->nctx, dev); + } + + for_each_online_cpu(cpu) { + ppriv = per_cpu_ptr(priv->ppriv, cpu); + if (!ppriv->store) + break; + dpaa2_io_store_destroy(ppriv->store); + } + + return err; +} + +static void __cold dpaa2_dpseci_dpio_free(struct dpaa2_caam_priv *priv) +{ + struct dpaa2_caam_priv_per_cpu *ppriv; + int i = 0, cpu; + + for_each_online_cpu(cpu) { + ppriv = per_cpu_ptr(priv->ppriv, cpu); + dpaa2_io_service_deregister(ppriv->dpio, &ppriv->nctx, + priv->dev); + dpaa2_io_store_destroy(ppriv->store); + + if (++i == priv->num_pairs) + return; + } +} + +static int dpaa2_dpseci_bind(struct dpaa2_caam_priv *priv) +{ + struct dpseci_rx_queue_cfg rx_queue_cfg; + struct device *dev = priv->dev; + struct fsl_mc_device *ls_dev = to_fsl_mc_device(dev); + struct dpaa2_caam_priv_per_cpu *ppriv; + int err = 0, i = 0, cpu; + + /* Configure Rx queues */ + for_each_online_cpu(cpu) { + ppriv = per_cpu_ptr(priv->ppriv, cpu); + + rx_queue_cfg.options = DPSECI_QUEUE_OPT_DEST | + DPSECI_QUEUE_OPT_USER_CTX; + rx_queue_cfg.order_preservation_en = 0; + rx_queue_cfg.dest_cfg.dest_type = DPSECI_DEST_DPIO; + rx_queue_cfg.dest_cfg.dest_id = ppriv->nctx.dpio_id; + /* + * Rx priority (WQ) doesn't really matter, since we use + * pull mode, i.e. volatile dequeues from specific FQs + */ + rx_queue_cfg.dest_cfg.priority = 0; + rx_queue_cfg.user_ctx = ppriv->nctx.qman64; + + err = dpseci_set_rx_queue(priv->mc_io, 0, ls_dev->mc_handle, i, + &rx_queue_cfg); + if (err) { + dev_err(dev, "dpseci_set_rx_queue() failed with err %d\n", + err); + return err; + } + + if (++i == priv->num_pairs) + break; + } + + return err; +} + +static void dpaa2_dpseci_congestion_free(struct dpaa2_caam_priv *priv) +{ + struct device *dev = priv->dev; + + if (!priv->cscn_mem) + return; + + dma_unmap_single(dev, priv->cscn_dma, DPAA2_CSCN_SIZE, DMA_FROM_DEVICE); + kfree(priv->cscn_mem); +} + +static void dpaa2_dpseci_free(struct dpaa2_caam_priv *priv) +{ + struct device *dev = priv->dev; + struct fsl_mc_device *ls_dev = to_fsl_mc_device(dev); + int err; + + if (DPSECI_VER(priv->major_ver, priv->minor_ver) > DPSECI_VER(5, 3)) { + err = dpseci_reset(priv->mc_io, 0, ls_dev->mc_handle); + if (err) + dev_err(dev, "dpseci_reset() failed\n"); + } + + dpaa2_dpseci_congestion_free(priv); + dpseci_close(priv->mc_io, 0, ls_dev->mc_handle); +} + +static void dpaa2_caam_process_fd(struct dpaa2_caam_priv *priv, + const struct dpaa2_fd *fd) +{ + struct caam_request *req; + u32 fd_err; + + if (dpaa2_fd_get_format(fd) != dpaa2_fd_list) { + dev_err(priv->dev, "Only Frame List FD format is supported!\n"); + return; + } + + fd_err = dpaa2_fd_get_ctrl(fd) & FD_CTRL_ERR_MASK; + if (unlikely(fd_err)) + dev_err_ratelimited(priv->dev, "FD error: %08x\n", fd_err); + + /* + * FD[ADDR] is guaranteed to be valid, irrespective of errors reported + * in FD[ERR] or FD[FRC]. + */ + req = dpaa2_caam_iova_to_virt(priv, dpaa2_fd_get_addr(fd)); + dma_unmap_single(priv->dev, req->fd_flt_dma, sizeof(req->fd_flt), + DMA_BIDIRECTIONAL); + req->cbk(req->ctx, dpaa2_fd_get_frc(fd)); +} + +static int dpaa2_caam_pull_fq(struct dpaa2_caam_priv_per_cpu *ppriv) +{ + int err; + + /* Retry while portal is busy */ + do { + err = dpaa2_io_service_pull_fq(ppriv->dpio, ppriv->rsp_fqid, + ppriv->store); + } while (err == -EBUSY); + + if (unlikely(err)) + dev_err(ppriv->priv->dev, "dpaa2_io_service_pull err %d", err); + + return err; +} + +static int dpaa2_caam_store_consume(struct dpaa2_caam_priv_per_cpu *ppriv) +{ + struct dpaa2_dq *dq; + int cleaned = 0, is_last; + + do { + dq = dpaa2_io_store_next(ppriv->store, &is_last); + if (unlikely(!dq)) { + if (unlikely(!is_last)) { + dev_dbg(ppriv->priv->dev, + "FQ %d returned no valid frames\n", + ppriv->rsp_fqid); + /* + * MUST retry until we get some sort of + * valid response token (be it "empty dequeue" + * or a valid frame). + */ + continue; + } + break; + } + + /* Process FD */ + dpaa2_caam_process_fd(ppriv->priv, dpaa2_dq_fd(dq)); + cleaned++; + } while (!is_last); + + return cleaned; +} + +static int dpaa2_dpseci_poll(struct napi_struct *napi, int budget) +{ + struct dpaa2_caam_priv_per_cpu *ppriv; + struct dpaa2_caam_priv *priv; + int err, cleaned = 0, store_cleaned; + + ppriv = container_of(napi, struct dpaa2_caam_priv_per_cpu, napi); + priv = ppriv->priv; + + if (unlikely(dpaa2_caam_pull_fq(ppriv))) + return 0; + + do { + store_cleaned = dpaa2_caam_store_consume(ppriv); + cleaned += store_cleaned; + + if (store_cleaned == 0 || + cleaned > budget - DPAA2_CAAM_STORE_SIZE) + break; + + /* Try to dequeue some more */ + err = dpaa2_caam_pull_fq(ppriv); + if (unlikely(err)) + break; + } while (1); + + if (cleaned < budget) { + napi_complete_done(napi, cleaned); + err = dpaa2_io_service_rearm(ppriv->dpio, &ppriv->nctx); + if (unlikely(err)) + dev_err(priv->dev, "Notification rearm failed: %d\n", + err); + } + + return cleaned; +} + +static int dpaa2_dpseci_congestion_setup(struct dpaa2_caam_priv *priv, + u16 token) +{ + struct dpseci_congestion_notification_cfg cong_notif_cfg = { 0 }; + struct device *dev = priv->dev; + int err; + + /* + * Congestion group feature supported starting with DPSECI API v5.1 + * and only when object has been created with this capability. + */ + if ((DPSECI_VER(priv->major_ver, priv->minor_ver) < DPSECI_VER(5, 1)) || + !(priv->dpseci_attr.options & DPSECI_OPT_HAS_CG)) + return 0; + + priv->cscn_mem = kzalloc(DPAA2_CSCN_SIZE + DPAA2_CSCN_ALIGN, + GFP_KERNEL | GFP_DMA); + if (!priv->cscn_mem) + return -ENOMEM; + + priv->cscn_mem_aligned = PTR_ALIGN(priv->cscn_mem, DPAA2_CSCN_ALIGN); + priv->cscn_dma = dma_map_single(dev, priv->cscn_mem_aligned, + DPAA2_CSCN_SIZE, DMA_FROM_DEVICE); + if (dma_mapping_error(dev, priv->cscn_dma)) { + dev_err(dev, "Error mapping CSCN memory area\n"); + err = -ENOMEM; + goto err_dma_map; + } + + cong_notif_cfg.units = DPSECI_CONGESTION_UNIT_BYTES; + cong_notif_cfg.threshold_entry = DPAA2_SEC_CONG_ENTRY_THRESH; + cong_notif_cfg.threshold_exit = DPAA2_SEC_CONG_EXIT_THRESH; + cong_notif_cfg.message_ctx = (uintptr_t)priv; + cong_notif_cfg.message_iova = priv->cscn_dma; + cong_notif_cfg.notification_mode = DPSECI_CGN_MODE_WRITE_MEM_ON_ENTER | + DPSECI_CGN_MODE_WRITE_MEM_ON_EXIT | + DPSECI_CGN_MODE_COHERENT_WRITE; + + err = dpseci_set_congestion_notification(priv->mc_io, 0, token, + &cong_notif_cfg); + if (err) { + dev_err(dev, "dpseci_set_congestion_notification failed\n"); + goto err_set_cong; + } + + return 0; + +err_set_cong: + dma_unmap_single(dev, priv->cscn_dma, DPAA2_CSCN_SIZE, DMA_FROM_DEVICE); +err_dma_map: + kfree(priv->cscn_mem); + + return err; +} + +static int __cold dpaa2_dpseci_setup(struct fsl_mc_device *ls_dev) +{ + struct device *dev = &ls_dev->dev; + struct dpaa2_caam_priv *priv; + struct dpaa2_caam_priv_per_cpu *ppriv; + int err, cpu; + u8 i; + + priv = dev_get_drvdata(dev); + + priv->dev = dev; + priv->dpsec_id = ls_dev->obj_desc.id; + + /* Get a handle for the DPSECI this interface is associate with */ + err = dpseci_open(priv->mc_io, 0, priv->dpsec_id, &ls_dev->mc_handle); + if (err) { + dev_err(dev, "dpseci_open() failed: %d\n", err); + goto err_open; + } + + err = dpseci_get_api_version(priv->mc_io, 0, &priv->major_ver, + &priv->minor_ver); + if (err) { + dev_err(dev, "dpseci_get_api_version() failed\n"); + goto err_get_vers; + } + + dev_info(dev, "dpseci v%d.%d\n", priv->major_ver, priv->minor_ver); + + if (DPSECI_VER(priv->major_ver, priv->minor_ver) > DPSECI_VER(5, 3)) { + err = dpseci_reset(priv->mc_io, 0, ls_dev->mc_handle); + if (err) { + dev_err(dev, "dpseci_reset() failed\n"); + goto err_get_vers; + } + } + + err = dpseci_get_attributes(priv->mc_io, 0, ls_dev->mc_handle, + &priv->dpseci_attr); + if (err) { + dev_err(dev, "dpseci_get_attributes() failed\n"); + goto err_get_vers; + } + + err = dpseci_get_sec_attr(priv->mc_io, 0, ls_dev->mc_handle, + &priv->sec_attr); + if (err) { + dev_err(dev, "dpseci_get_sec_attr() failed\n"); + goto err_get_vers; + } + + err = dpaa2_dpseci_congestion_setup(priv, ls_dev->mc_handle); + if (err) { + dev_err(dev, "setup_congestion() failed\n"); + goto err_get_vers; + } + + priv->num_pairs = min(priv->dpseci_attr.num_rx_queues, + priv->dpseci_attr.num_tx_queues); + if (priv->num_pairs > num_online_cpus()) { + dev_warn(dev, "%d queues won't be used\n", + priv->num_pairs - num_online_cpus()); + priv->num_pairs = num_online_cpus(); + } + + for (i = 0; i < priv->dpseci_attr.num_rx_queues; i++) { + err = dpseci_get_rx_queue(priv->mc_io, 0, ls_dev->mc_handle, i, + &priv->rx_queue_attr[i]); + if (err) { + dev_err(dev, "dpseci_get_rx_queue() failed\n"); + goto err_get_rx_queue; + } + } + + for (i = 0; i < priv->dpseci_attr.num_tx_queues; i++) { + err = dpseci_get_tx_queue(priv->mc_io, 0, ls_dev->mc_handle, i, + &priv->tx_queue_attr[i]); + if (err) { + dev_err(dev, "dpseci_get_tx_queue() failed\n"); + goto err_get_rx_queue; + } + } + + i = 0; + for_each_online_cpu(cpu) { + u8 j; + + j = i % priv->num_pairs; + + ppriv = per_cpu_ptr(priv->ppriv, cpu); + ppriv->req_fqid = priv->tx_queue_attr[j].fqid; + + /* + * Allow all cores to enqueue, while only some of them + * will take part in dequeuing. + */ + if (++i > priv->num_pairs) + continue; + + ppriv->rsp_fqid = priv->rx_queue_attr[j].fqid; + ppriv->prio = j; + + dev_dbg(dev, "pair %d: rx queue %d, tx queue %d\n", j, + priv->rx_queue_attr[j].fqid, + priv->tx_queue_attr[j].fqid); + + ppriv->net_dev.dev = *dev; + INIT_LIST_HEAD(&ppriv->net_dev.napi_list); + netif_napi_add(&ppriv->net_dev, &ppriv->napi, dpaa2_dpseci_poll, + DPAA2_CAAM_NAPI_WEIGHT); + } + + return 0; + +err_get_rx_queue: + dpaa2_dpseci_congestion_free(priv); +err_get_vers: + dpseci_close(priv->mc_io, 0, ls_dev->mc_handle); +err_open: + return err; +} + +static int dpaa2_dpseci_enable(struct dpaa2_caam_priv *priv) +{ + struct device *dev = priv->dev; + struct fsl_mc_device *ls_dev = to_fsl_mc_device(dev); + struct dpaa2_caam_priv_per_cpu *ppriv; + int i; + + for (i = 0; i < priv->num_pairs; i++) { + ppriv = per_cpu_ptr(priv->ppriv, i); + napi_enable(&ppriv->napi); + } + + return dpseci_enable(priv->mc_io, 0, ls_dev->mc_handle); +} + +static int __cold dpaa2_dpseci_disable(struct dpaa2_caam_priv *priv) +{ + struct device *dev = priv->dev; + struct dpaa2_caam_priv_per_cpu *ppriv; + struct fsl_mc_device *ls_dev = to_fsl_mc_device(dev); + int i, err = 0, enabled; + + err = dpseci_disable(priv->mc_io, 0, ls_dev->mc_handle); + if (err) { + dev_err(dev, "dpseci_disable() failed\n"); + return err; + } + + err = dpseci_is_enabled(priv->mc_io, 0, ls_dev->mc_handle, &enabled); + if (err) { + dev_err(dev, "dpseci_is_enabled() failed\n"); + return err; + } + + dev_dbg(dev, "disable: %s\n", enabled ? "false" : "true"); + + for (i = 0; i < priv->num_pairs; i++) { + ppriv = per_cpu_ptr(priv->ppriv, i); + napi_disable(&ppriv->napi); + netif_napi_del(&ppriv->napi); + } + + return 0; +} + +static struct list_head hash_list; + +static int dpaa2_caam_probe(struct fsl_mc_device *dpseci_dev) +{ + struct device *dev; + struct dpaa2_caam_priv *priv; + int i, err = 0; + bool registered = false; + + /* + * There is no way to get CAAM endianness - there is no direct register + * space access and MC f/w does not provide this attribute. + * All DPAA2-based SoCs have little endian CAAM, thus hard-code this + * property. + */ + caam_little_end = true; + + caam_imx = false; + + dev = &dpseci_dev->dev; + + priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + dev_set_drvdata(dev, priv); + + priv->domain = iommu_get_domain_for_dev(dev); + + qi_cache = kmem_cache_create("dpaa2_caamqicache", CAAM_QI_MEMCACHE_SIZE, + 0, SLAB_CACHE_DMA, NULL); + if (!qi_cache) { + dev_err(dev, "Can't allocate SEC cache\n"); + return -ENOMEM; + } + + err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(49)); + if (err) { + dev_err(dev, "dma_set_mask_and_coherent() failed\n"); + goto err_dma_mask; + } + + /* Obtain a MC portal */ + err = fsl_mc_portal_allocate(dpseci_dev, 0, &priv->mc_io); + if (err) { + if (err == -ENXIO) + err = -EPROBE_DEFER; + else + dev_err(dev, "MC portal allocation failed\n"); + + goto err_dma_mask; + } + + priv->ppriv = alloc_percpu(*priv->ppriv); + if (!priv->ppriv) { + dev_err(dev, "alloc_percpu() failed\n"); + err = -ENOMEM; + goto err_alloc_ppriv; + } + + /* DPSECI initialization */ + err = dpaa2_dpseci_setup(dpseci_dev); + if (err) { + dev_err(dev, "dpaa2_dpseci_setup() failed\n"); + goto err_dpseci_setup; + } + + /* DPIO */ + err = dpaa2_dpseci_dpio_setup(priv); + if (err) { + dev_err_probe(dev, err, "dpaa2_dpseci_dpio_setup() failed\n"); + goto err_dpio_setup; + } + + /* DPSECI binding to DPIO */ + err = dpaa2_dpseci_bind(priv); + if (err) { + dev_err(dev, "dpaa2_dpseci_bind() failed\n"); + goto err_bind; + } + + /* DPSECI enable */ + err = dpaa2_dpseci_enable(priv); + if (err) { + dev_err(dev, "dpaa2_dpseci_enable() failed\n"); + goto err_bind; + } + + dpaa2_dpseci_debugfs_init(priv); + + /* register crypto algorithms the device supports */ + for (i = 0; i < ARRAY_SIZE(driver_algs); i++) { + struct caam_skcipher_alg *t_alg = driver_algs + i; + u32 alg_sel = t_alg->caam.class1_alg_type & OP_ALG_ALGSEL_MASK; + + /* Skip DES algorithms if not supported by device */ + if (!priv->sec_attr.des_acc_num && + (alg_sel == OP_ALG_ALGSEL_3DES || + alg_sel == OP_ALG_ALGSEL_DES)) + continue; + + /* Skip AES algorithms if not supported by device */ + if (!priv->sec_attr.aes_acc_num && + alg_sel == OP_ALG_ALGSEL_AES) + continue; + + /* Skip CHACHA20 algorithms if not supported by device */ + if (alg_sel == OP_ALG_ALGSEL_CHACHA20 && + !priv->sec_attr.ccha_acc_num) + continue; + + t_alg->caam.dev = dev; + caam_skcipher_alg_init(t_alg); + + err = crypto_register_skcipher(&t_alg->skcipher); + if (err) { + dev_warn(dev, "%s alg registration failed: %d\n", + t_alg->skcipher.base.cra_driver_name, err); + continue; + } + + t_alg->registered = true; + registered = true; + } + + for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) { + struct caam_aead_alg *t_alg = driver_aeads + i; + u32 c1_alg_sel = t_alg->caam.class1_alg_type & + OP_ALG_ALGSEL_MASK; + u32 c2_alg_sel = t_alg->caam.class2_alg_type & + OP_ALG_ALGSEL_MASK; + + /* Skip DES algorithms if not supported by device */ + if (!priv->sec_attr.des_acc_num && + (c1_alg_sel == OP_ALG_ALGSEL_3DES || + c1_alg_sel == OP_ALG_ALGSEL_DES)) + continue; + + /* Skip AES algorithms if not supported by device */ + if (!priv->sec_attr.aes_acc_num && + c1_alg_sel == OP_ALG_ALGSEL_AES) + continue; + + /* Skip CHACHA20 algorithms if not supported by device */ + if (c1_alg_sel == OP_ALG_ALGSEL_CHACHA20 && + !priv->sec_attr.ccha_acc_num) + continue; + + /* Skip POLY1305 algorithms if not supported by device */ + if (c2_alg_sel == OP_ALG_ALGSEL_POLY1305 && + !priv->sec_attr.ptha_acc_num) + continue; + + /* + * Skip algorithms requiring message digests + * if MD not supported by device. + */ + if ((c2_alg_sel & ~OP_ALG_ALGSEL_SUBMASK) == 0x40 && + !priv->sec_attr.md_acc_num) + continue; + + t_alg->caam.dev = dev; + caam_aead_alg_init(t_alg); + + err = crypto_register_aead(&t_alg->aead); + if (err) { + dev_warn(dev, "%s alg registration failed: %d\n", + t_alg->aead.base.cra_driver_name, err); + continue; + } + + t_alg->registered = true; + registered = true; + } + if (registered) + dev_info(dev, "algorithms registered in /proc/crypto\n"); + + /* register hash algorithms the device supports */ + INIT_LIST_HEAD(&hash_list); + + /* + * Skip registration of any hashing algorithms if MD block + * is not present. + */ + if (!priv->sec_attr.md_acc_num) + return 0; + + for (i = 0; i < ARRAY_SIZE(driver_hash); i++) { + struct caam_hash_alg *t_alg; + struct caam_hash_template *alg = driver_hash + i; + + /* register hmac version */ + t_alg = caam_hash_alloc(dev, alg, true); + if (IS_ERR(t_alg)) { + err = PTR_ERR(t_alg); + dev_warn(dev, "%s hash alg allocation failed: %d\n", + alg->hmac_driver_name, err); + continue; + } + + err = crypto_register_ahash(&t_alg->ahash_alg); + if (err) { + dev_warn(dev, "%s alg registration failed: %d\n", + t_alg->ahash_alg.halg.base.cra_driver_name, + err); + kfree(t_alg); + } else { + list_add_tail(&t_alg->entry, &hash_list); + } + + /* register unkeyed version */ + t_alg = caam_hash_alloc(dev, alg, false); + if (IS_ERR(t_alg)) { + err = PTR_ERR(t_alg); + dev_warn(dev, "%s alg allocation failed: %d\n", + alg->driver_name, err); + continue; + } + + err = crypto_register_ahash(&t_alg->ahash_alg); + if (err) { + dev_warn(dev, "%s alg registration failed: %d\n", + t_alg->ahash_alg.halg.base.cra_driver_name, + err); + kfree(t_alg); + } else { + list_add_tail(&t_alg->entry, &hash_list); + } + } + if (!list_empty(&hash_list)) + dev_info(dev, "hash algorithms registered in /proc/crypto\n"); + + return err; + +err_bind: + dpaa2_dpseci_dpio_free(priv); +err_dpio_setup: + dpaa2_dpseci_free(priv); +err_dpseci_setup: + free_percpu(priv->ppriv); +err_alloc_ppriv: + fsl_mc_portal_free(priv->mc_io); +err_dma_mask: + kmem_cache_destroy(qi_cache); + + return err; +} + +static int __cold dpaa2_caam_remove(struct fsl_mc_device *ls_dev) +{ + struct device *dev; + struct dpaa2_caam_priv *priv; + int i; + + dev = &ls_dev->dev; + priv = dev_get_drvdata(dev); + + dpaa2_dpseci_debugfs_exit(priv); + + for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) { + struct caam_aead_alg *t_alg = driver_aeads + i; + + if (t_alg->registered) + crypto_unregister_aead(&t_alg->aead); + } + + for (i = 0; i < ARRAY_SIZE(driver_algs); i++) { + struct caam_skcipher_alg *t_alg = driver_algs + i; + + if (t_alg->registered) + crypto_unregister_skcipher(&t_alg->skcipher); + } + + if (hash_list.next) { + struct caam_hash_alg *t_hash_alg, *p; + + list_for_each_entry_safe(t_hash_alg, p, &hash_list, entry) { + crypto_unregister_ahash(&t_hash_alg->ahash_alg); + list_del(&t_hash_alg->entry); + kfree(t_hash_alg); + } + } + + dpaa2_dpseci_disable(priv); + dpaa2_dpseci_dpio_free(priv); + dpaa2_dpseci_free(priv); + free_percpu(priv->ppriv); + fsl_mc_portal_free(priv->mc_io); + kmem_cache_destroy(qi_cache); + + return 0; +} + +int dpaa2_caam_enqueue(struct device *dev, struct caam_request *req) +{ + struct dpaa2_fd fd; + struct dpaa2_caam_priv *priv = dev_get_drvdata(dev); + struct dpaa2_caam_priv_per_cpu *ppriv; + int err = 0, i; + + if (IS_ERR(req)) + return PTR_ERR(req); + + if (priv->cscn_mem) { + dma_sync_single_for_cpu(priv->dev, priv->cscn_dma, + DPAA2_CSCN_SIZE, + DMA_FROM_DEVICE); + if (unlikely(dpaa2_cscn_state_congested(priv->cscn_mem_aligned))) { + dev_dbg_ratelimited(dev, "Dropping request\n"); + return -EBUSY; + } + } + + dpaa2_fl_set_flc(&req->fd_flt[1], req->flc_dma); + + req->fd_flt_dma = dma_map_single(dev, req->fd_flt, sizeof(req->fd_flt), + DMA_BIDIRECTIONAL); + if (dma_mapping_error(dev, req->fd_flt_dma)) { + dev_err(dev, "DMA mapping error for QI enqueue request\n"); + goto err_out; + } + + memset(&fd, 0, sizeof(fd)); + dpaa2_fd_set_format(&fd, dpaa2_fd_list); + dpaa2_fd_set_addr(&fd, req->fd_flt_dma); + dpaa2_fd_set_len(&fd, dpaa2_fl_get_len(&req->fd_flt[1])); + dpaa2_fd_set_flc(&fd, req->flc_dma); + + ppriv = raw_cpu_ptr(priv->ppriv); + for (i = 0; i < (priv->dpseci_attr.num_tx_queues << 1); i++) { + err = dpaa2_io_service_enqueue_fq(ppriv->dpio, ppriv->req_fqid, + &fd); + if (err != -EBUSY) + break; + + cpu_relax(); + } + + if (unlikely(err)) { + dev_err_ratelimited(dev, "Error enqueuing frame: %d\n", err); + goto err_out; + } + + return -EINPROGRESS; + +err_out: + dma_unmap_single(dev, req->fd_flt_dma, sizeof(req->fd_flt), + DMA_BIDIRECTIONAL); + return -EIO; +} +EXPORT_SYMBOL(dpaa2_caam_enqueue); + +static const struct fsl_mc_device_id dpaa2_caam_match_id_table[] = { + { + .vendor = FSL_MC_VENDOR_FREESCALE, + .obj_type = "dpseci", + }, + { .vendor = 0x0 } +}; +MODULE_DEVICE_TABLE(fslmc, dpaa2_caam_match_id_table); + +static struct fsl_mc_driver dpaa2_caam_driver = { + .driver = { + .name = KBUILD_MODNAME, + .owner = THIS_MODULE, + }, + .probe = dpaa2_caam_probe, + .remove = dpaa2_caam_remove, + .match_id_table = dpaa2_caam_match_id_table +}; + +MODULE_LICENSE("Dual BSD/GPL"); +MODULE_AUTHOR("Freescale Semiconductor, Inc"); +MODULE_DESCRIPTION("Freescale DPAA2 CAAM Driver"); + +module_fsl_mc_driver(dpaa2_caam_driver); diff --git a/drivers/crypto/caam/caamalg_qi2.h b/drivers/crypto/caam/caamalg_qi2.h new file mode 100644 index 000000000..d35253407 --- /dev/null +++ b/drivers/crypto/caam/caamalg_qi2.h @@ -0,0 +1,200 @@ +/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */ +/* + * Copyright 2015-2016 Freescale Semiconductor Inc. + * Copyright 2017-2018 NXP + */ + +#ifndef _CAAMALG_QI2_H_ +#define _CAAMALG_QI2_H_ + +#include <soc/fsl/dpaa2-io.h> +#include <soc/fsl/dpaa2-fd.h> +#include <linux/threads.h> +#include <linux/netdevice.h> +#include "dpseci.h" +#include "desc_constr.h" +#include <crypto/skcipher.h> + +#define DPAA2_CAAM_STORE_SIZE 16 +/* NAPI weight *must* be a multiple of the store size. */ +#define DPAA2_CAAM_NAPI_WEIGHT 512 + +/* The congestion entrance threshold was chosen so that on LS2088 + * we support the maximum throughput for the available memory + */ +#define DPAA2_SEC_CONG_ENTRY_THRESH (128 * 1024 * 1024) +#define DPAA2_SEC_CONG_EXIT_THRESH (DPAA2_SEC_CONG_ENTRY_THRESH * 9 / 10) + +/** + * dpaa2_caam_priv - driver private data + * @dpseci_id: DPSECI object unique ID + * @major_ver: DPSECI major version + * @minor_ver: DPSECI minor version + * @dpseci_attr: DPSECI attributes + * @sec_attr: SEC engine attributes + * @rx_queue_attr: array of Rx queue attributes + * @tx_queue_attr: array of Tx queue attributes + * @cscn_mem: pointer to memory region containing the congestion SCN + * it's size is larger than to accommodate alignment + * @cscn_mem_aligned: pointer to congestion SCN; it is computed as + * PTR_ALIGN(cscn_mem, DPAA2_CSCN_ALIGN) + * @cscn_dma: dma address used by the QMAN to write CSCN messages + * @dev: device associated with the DPSECI object + * @mc_io: pointer to MC portal's I/O object + * @domain: IOMMU domain + * @ppriv: per CPU pointers to privata data + */ +struct dpaa2_caam_priv { + int dpsec_id; + + u16 major_ver; + u16 minor_ver; + + struct dpseci_attr dpseci_attr; + struct dpseci_sec_attr sec_attr; + struct dpseci_rx_queue_attr rx_queue_attr[DPSECI_MAX_QUEUE_NUM]; + struct dpseci_tx_queue_attr tx_queue_attr[DPSECI_MAX_QUEUE_NUM]; + int num_pairs; + + /* congestion */ + void *cscn_mem; + void *cscn_mem_aligned; + dma_addr_t cscn_dma; + + struct device *dev; + struct fsl_mc_io *mc_io; + struct iommu_domain *domain; + + struct dpaa2_caam_priv_per_cpu __percpu *ppriv; + struct dentry *dfs_root; +}; + +/** + * dpaa2_caam_priv_per_cpu - per CPU private data + * @napi: napi structure + * @net_dev: netdev used by napi + * @req_fqid: (virtual) request (Tx / enqueue) FQID + * @rsp_fqid: (virtual) response (Rx / dequeue) FQID + * @prio: internal queue number - index for dpaa2_caam_priv.*_queue_attr + * @nctx: notification context of response FQ + * @store: where dequeued frames are stored + * @priv: backpointer to dpaa2_caam_priv + * @dpio: portal used for data path operations + */ +struct dpaa2_caam_priv_per_cpu { + struct napi_struct napi; + struct net_device net_dev; + int req_fqid; + int rsp_fqid; + int prio; + struct dpaa2_io_notification_ctx nctx; + struct dpaa2_io_store *store; + struct dpaa2_caam_priv *priv; + struct dpaa2_io *dpio; +}; + +/* Length of a single buffer in the QI driver memory cache */ +#define CAAM_QI_MEMCACHE_SIZE 512 + +/* + * aead_edesc - s/w-extended aead descriptor + * @src_nents: number of segments in input scatterlist + * @dst_nents: number of segments in output scatterlist + * @iv_dma: dma address of iv for checking continuity and link table + * @qm_sg_bytes: length of dma mapped h/w link table + * @qm_sg_dma: bus physical mapped address of h/w link table + * @assoclen: associated data length, in CAAM endianness + * @assoclen_dma: bus physical mapped address of req->assoclen + * @sgt: the h/w link table, followed by IV + */ +struct aead_edesc { + int src_nents; + int dst_nents; + dma_addr_t iv_dma; + int qm_sg_bytes; + dma_addr_t qm_sg_dma; + unsigned int assoclen; + dma_addr_t assoclen_dma; + struct dpaa2_sg_entry sgt[]; +}; + +/* + * skcipher_edesc - s/w-extended skcipher descriptor + * @src_nents: number of segments in input scatterlist + * @dst_nents: number of segments in output scatterlist + * @iv_dma: dma address of iv for checking continuity and link table + * @qm_sg_bytes: length of dma mapped qm_sg space + * @qm_sg_dma: I/O virtual address of h/w link table + * @sgt: the h/w link table, followed by IV + */ +struct skcipher_edesc { + int src_nents; + int dst_nents; + dma_addr_t iv_dma; + int qm_sg_bytes; + dma_addr_t qm_sg_dma; + struct dpaa2_sg_entry sgt[]; +}; + +/* + * ahash_edesc - s/w-extended ahash descriptor + * @qm_sg_dma: I/O virtual address of h/w link table + * @src_nents: number of segments in input scatterlist + * @qm_sg_bytes: length of dma mapped qm_sg space + * @sgt: pointer to h/w link table + */ +struct ahash_edesc { + dma_addr_t qm_sg_dma; + int src_nents; + int qm_sg_bytes; + struct dpaa2_sg_entry sgt[]; +}; + +/** + * caam_flc - Flow Context (FLC) + * @flc: Flow Context options + * @sh_desc: Shared Descriptor + */ +struct caam_flc { + u32 flc[16]; + u32 sh_desc[MAX_SDLEN]; +} ____cacheline_aligned; + +enum optype { + ENCRYPT = 0, + DECRYPT, + NUM_OP +}; + +/** + * caam_request - the request structure the driver application should fill while + * submitting a job to driver. + * @fd_flt: Frame list table defining input and output + * fd_flt[0] - FLE pointing to output buffer + * fd_flt[1] - FLE pointing to input buffer + * @fd_flt_dma: DMA address for the frame list table + * @flc: Flow Context + * @flc_dma: I/O virtual address of Flow Context + * @cbk: Callback function to invoke when job is completed + * @ctx: arbit context attached with request by the application + * @edesc: extended descriptor; points to one of {skcipher,aead}_edesc + */ +struct caam_request { + struct dpaa2_fl_entry fd_flt[2]; + dma_addr_t fd_flt_dma; + struct caam_flc *flc; + dma_addr_t flc_dma; + void (*cbk)(void *ctx, u32 err); + void *ctx; + void *edesc; + struct skcipher_request fallback_req; +}; + +/** + * dpaa2_caam_enqueue() - enqueue a crypto request + * @dev: device associated with the DPSECI object + * @req: pointer to caam_request + */ +int dpaa2_caam_enqueue(struct device *dev, struct caam_request *req); + +#endif /* _CAAMALG_QI2_H_ */ diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c new file mode 100644 index 000000000..e8a6d8bc4 --- /dev/null +++ b/drivers/crypto/caam/caamhash.c @@ -0,0 +1,2023 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * caam - Freescale FSL CAAM support for ahash functions of crypto API + * + * Copyright 2011 Freescale Semiconductor, Inc. + * Copyright 2018-2019 NXP + * + * Based on caamalg.c crypto API driver. + * + * relationship of digest job descriptor or first job descriptor after init to + * shared descriptors: + * + * --------------- --------------- + * | JobDesc #1 |-------------------->| ShareDesc | + * | *(packet 1) | | (hashKey) | + * --------------- | (operation) | + * --------------- + * + * relationship of subsequent job descriptors to shared descriptors: + * + * --------------- --------------- + * | JobDesc #2 |-------------------->| ShareDesc | + * | *(packet 2) | |------------->| (hashKey) | + * --------------- | |-------->| (operation) | + * . | | | (load ctx2) | + * . | | --------------- + * --------------- | | + * | JobDesc #3 |------| | + * | *(packet 3) | | + * --------------- | + * . | + * . | + * --------------- | + * | JobDesc #4 |------------ + * | *(packet 4) | + * --------------- + * + * The SharedDesc never changes for a connection unless rekeyed, but + * each packet will likely be in a different place. So all we need + * to know to process the packet is where the input is, where the + * output goes, and what context we want to process with. Context is + * in the SharedDesc, packet references in the JobDesc. + * + * So, a job desc looks like: + * + * --------------------- + * | Header | + * | ShareDesc Pointer | + * | SEQ_OUT_PTR | + * | (output buffer) | + * | (output length) | + * | SEQ_IN_PTR | + * | (input buffer) | + * | (input length) | + * --------------------- + */ + +#include "compat.h" + +#include "regs.h" +#include "intern.h" +#include "desc_constr.h" +#include "jr.h" +#include "error.h" +#include "sg_sw_sec4.h" +#include "key_gen.h" +#include "caamhash_desc.h" +#include <crypto/engine.h> + +#define CAAM_CRA_PRIORITY 3000 + +/* max hash key is max split key size */ +#define CAAM_MAX_HASH_KEY_SIZE (SHA512_DIGEST_SIZE * 2) + +#define CAAM_MAX_HASH_BLOCK_SIZE SHA512_BLOCK_SIZE +#define CAAM_MAX_HASH_DIGEST_SIZE SHA512_DIGEST_SIZE + +#define DESC_HASH_MAX_USED_BYTES (DESC_AHASH_FINAL_LEN + \ + CAAM_MAX_HASH_KEY_SIZE) +#define DESC_HASH_MAX_USED_LEN (DESC_HASH_MAX_USED_BYTES / CAAM_CMD_SZ) + +/* caam context sizes for hashes: running digest + 8 */ +#define HASH_MSG_LEN 8 +#define MAX_CTX_LEN (HASH_MSG_LEN + SHA512_DIGEST_SIZE) + +static struct list_head hash_list; + +/* ahash per-session context */ +struct caam_hash_ctx { + struct crypto_engine_ctx enginectx; + u32 sh_desc_update[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned; + u32 sh_desc_update_first[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned; + u32 sh_desc_fin[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned; + u32 sh_desc_digest[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned; + u8 key[CAAM_MAX_HASH_KEY_SIZE] ____cacheline_aligned; + dma_addr_t sh_desc_update_dma ____cacheline_aligned; + dma_addr_t sh_desc_update_first_dma; + dma_addr_t sh_desc_fin_dma; + dma_addr_t sh_desc_digest_dma; + enum dma_data_direction dir; + enum dma_data_direction key_dir; + struct device *jrdev; + int ctx_len; + struct alginfo adata; +}; + +/* ahash state */ +struct caam_hash_state { + dma_addr_t buf_dma; + dma_addr_t ctx_dma; + int ctx_dma_len; + u8 buf[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned; + int buflen; + int next_buflen; + u8 caam_ctx[MAX_CTX_LEN] ____cacheline_aligned; + int (*update)(struct ahash_request *req) ____cacheline_aligned; + int (*final)(struct ahash_request *req); + int (*finup)(struct ahash_request *req); + struct ahash_edesc *edesc; + void (*ahash_op_done)(struct device *jrdev, u32 *desc, u32 err, + void *context); +}; + +struct caam_export_state { + u8 buf[CAAM_MAX_HASH_BLOCK_SIZE]; + u8 caam_ctx[MAX_CTX_LEN]; + int buflen; + int (*update)(struct ahash_request *req); + int (*final)(struct ahash_request *req); + int (*finup)(struct ahash_request *req); +}; + +static inline bool is_cmac_aes(u32 algtype) +{ + return (algtype & (OP_ALG_ALGSEL_MASK | OP_ALG_AAI_MASK)) == + (OP_ALG_ALGSEL_AES | OP_ALG_AAI_CMAC); +} +/* Common job descriptor seq in/out ptr routines */ + +/* Map state->caam_ctx, and append seq_out_ptr command that points to it */ +static inline int map_seq_out_ptr_ctx(u32 *desc, struct device *jrdev, + struct caam_hash_state *state, + int ctx_len) +{ + state->ctx_dma_len = ctx_len; + state->ctx_dma = dma_map_single(jrdev, state->caam_ctx, + ctx_len, DMA_FROM_DEVICE); + if (dma_mapping_error(jrdev, state->ctx_dma)) { + dev_err(jrdev, "unable to map ctx\n"); + state->ctx_dma = 0; + return -ENOMEM; + } + + append_seq_out_ptr(desc, state->ctx_dma, ctx_len, 0); + + return 0; +} + +/* Map current buffer in state (if length > 0) and put it in link table */ +static inline int buf_map_to_sec4_sg(struct device *jrdev, + struct sec4_sg_entry *sec4_sg, + struct caam_hash_state *state) +{ + int buflen = state->buflen; + + if (!buflen) + return 0; + + state->buf_dma = dma_map_single(jrdev, state->buf, buflen, + DMA_TO_DEVICE); + if (dma_mapping_error(jrdev, state->buf_dma)) { + dev_err(jrdev, "unable to map buf\n"); + state->buf_dma = 0; + return -ENOMEM; + } + + dma_to_sec4_sg_one(sec4_sg, state->buf_dma, buflen, 0); + + return 0; +} + +/* Map state->caam_ctx, and add it to link table */ +static inline int ctx_map_to_sec4_sg(struct device *jrdev, + struct caam_hash_state *state, int ctx_len, + struct sec4_sg_entry *sec4_sg, u32 flag) +{ + state->ctx_dma_len = ctx_len; + state->ctx_dma = dma_map_single(jrdev, state->caam_ctx, ctx_len, flag); + if (dma_mapping_error(jrdev, state->ctx_dma)) { + dev_err(jrdev, "unable to map ctx\n"); + state->ctx_dma = 0; + return -ENOMEM; + } + + dma_to_sec4_sg_one(sec4_sg, state->ctx_dma, ctx_len, 0); + + return 0; +} + +static int ahash_set_sh_desc(struct crypto_ahash *ahash) +{ + struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); + int digestsize = crypto_ahash_digestsize(ahash); + struct device *jrdev = ctx->jrdev; + struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent); + u32 *desc; + + ctx->adata.key_virt = ctx->key; + + /* ahash_update shared descriptor */ + desc = ctx->sh_desc_update; + cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_UPDATE, ctx->ctx_len, + ctx->ctx_len, true, ctrlpriv->era); + dma_sync_single_for_device(jrdev, ctx->sh_desc_update_dma, + desc_bytes(desc), ctx->dir); + + print_hex_dump_debug("ahash update shdesc@"__stringify(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), + 1); + + /* ahash_update_first shared descriptor */ + desc = ctx->sh_desc_update_first; + cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_INIT, ctx->ctx_len, + ctx->ctx_len, false, ctrlpriv->era); + dma_sync_single_for_device(jrdev, ctx->sh_desc_update_first_dma, + desc_bytes(desc), ctx->dir); + print_hex_dump_debug("ahash update first shdesc@"__stringify(__LINE__) + ": ", DUMP_PREFIX_ADDRESS, 16, 4, desc, + desc_bytes(desc), 1); + + /* ahash_final shared descriptor */ + desc = ctx->sh_desc_fin; + cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_FINALIZE, digestsize, + ctx->ctx_len, true, ctrlpriv->era); + dma_sync_single_for_device(jrdev, ctx->sh_desc_fin_dma, + desc_bytes(desc), ctx->dir); + + print_hex_dump_debug("ahash final shdesc@"__stringify(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, desc, + desc_bytes(desc), 1); + + /* ahash_digest shared descriptor */ + desc = ctx->sh_desc_digest; + cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_INITFINAL, digestsize, + ctx->ctx_len, false, ctrlpriv->era); + dma_sync_single_for_device(jrdev, ctx->sh_desc_digest_dma, + desc_bytes(desc), ctx->dir); + + print_hex_dump_debug("ahash digest shdesc@"__stringify(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, desc, + desc_bytes(desc), 1); + + return 0; +} + +static int axcbc_set_sh_desc(struct crypto_ahash *ahash) +{ + struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); + int digestsize = crypto_ahash_digestsize(ahash); + struct device *jrdev = ctx->jrdev; + u32 *desc; + + /* shared descriptor for ahash_update */ + desc = ctx->sh_desc_update; + cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_UPDATE, + ctx->ctx_len, ctx->ctx_len); + dma_sync_single_for_device(jrdev, ctx->sh_desc_update_dma, + desc_bytes(desc), ctx->dir); + print_hex_dump_debug("axcbc update shdesc@" __stringify(__LINE__)" : ", + DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), + 1); + + /* shared descriptor for ahash_{final,finup} */ + desc = ctx->sh_desc_fin; + cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_FINALIZE, + digestsize, ctx->ctx_len); + dma_sync_single_for_device(jrdev, ctx->sh_desc_fin_dma, + desc_bytes(desc), ctx->dir); + print_hex_dump_debug("axcbc finup shdesc@" __stringify(__LINE__)" : ", + DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), + 1); + + /* key is immediate data for INIT and INITFINAL states */ + ctx->adata.key_virt = ctx->key; + + /* shared descriptor for first invocation of ahash_update */ + desc = ctx->sh_desc_update_first; + cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_INIT, ctx->ctx_len, + ctx->ctx_len); + dma_sync_single_for_device(jrdev, ctx->sh_desc_update_first_dma, + desc_bytes(desc), ctx->dir); + print_hex_dump_debug("axcbc update first shdesc@" __stringify(__LINE__) + " : ", DUMP_PREFIX_ADDRESS, 16, 4, desc, + desc_bytes(desc), 1); + + /* shared descriptor for ahash_digest */ + desc = ctx->sh_desc_digest; + cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_INITFINAL, + digestsize, ctx->ctx_len); + dma_sync_single_for_device(jrdev, ctx->sh_desc_digest_dma, + desc_bytes(desc), ctx->dir); + print_hex_dump_debug("axcbc digest shdesc@" __stringify(__LINE__)" : ", + DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), + 1); + return 0; +} + +static int acmac_set_sh_desc(struct crypto_ahash *ahash) +{ + struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); + int digestsize = crypto_ahash_digestsize(ahash); + struct device *jrdev = ctx->jrdev; + u32 *desc; + + /* shared descriptor for ahash_update */ + desc = ctx->sh_desc_update; + cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_UPDATE, + ctx->ctx_len, ctx->ctx_len); + dma_sync_single_for_device(jrdev, ctx->sh_desc_update_dma, + desc_bytes(desc), ctx->dir); + print_hex_dump_debug("acmac update shdesc@" __stringify(__LINE__)" : ", + DUMP_PREFIX_ADDRESS, 16, 4, desc, + desc_bytes(desc), 1); + + /* shared descriptor for ahash_{final,finup} */ + desc = ctx->sh_desc_fin; + cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_FINALIZE, + digestsize, ctx->ctx_len); + dma_sync_single_for_device(jrdev, ctx->sh_desc_fin_dma, + desc_bytes(desc), ctx->dir); + print_hex_dump_debug("acmac finup shdesc@" __stringify(__LINE__)" : ", + DUMP_PREFIX_ADDRESS, 16, 4, desc, + desc_bytes(desc), 1); + + /* shared descriptor for first invocation of ahash_update */ + desc = ctx->sh_desc_update_first; + cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_INIT, ctx->ctx_len, + ctx->ctx_len); + dma_sync_single_for_device(jrdev, ctx->sh_desc_update_first_dma, + desc_bytes(desc), ctx->dir); + print_hex_dump_debug("acmac update first shdesc@" __stringify(__LINE__) + " : ", DUMP_PREFIX_ADDRESS, 16, 4, desc, + desc_bytes(desc), 1); + + /* shared descriptor for ahash_digest */ + desc = ctx->sh_desc_digest; + cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_INITFINAL, + digestsize, ctx->ctx_len); + dma_sync_single_for_device(jrdev, ctx->sh_desc_digest_dma, + desc_bytes(desc), ctx->dir); + print_hex_dump_debug("acmac digest shdesc@" __stringify(__LINE__)" : ", + DUMP_PREFIX_ADDRESS, 16, 4, desc, + desc_bytes(desc), 1); + + return 0; +} + +/* Digest hash size if it is too large */ +static int hash_digest_key(struct caam_hash_ctx *ctx, u32 *keylen, u8 *key, + u32 digestsize) +{ + struct device *jrdev = ctx->jrdev; + u32 *desc; + struct split_key_result result; + dma_addr_t key_dma; + int ret; + + desc = kmalloc(CAAM_CMD_SZ * 8 + CAAM_PTR_SZ * 2, GFP_KERNEL | GFP_DMA); + if (!desc) { + dev_err(jrdev, "unable to allocate key input memory\n"); + return -ENOMEM; + } + + init_job_desc(desc, 0); + + key_dma = dma_map_single(jrdev, key, *keylen, DMA_BIDIRECTIONAL); + if (dma_mapping_error(jrdev, key_dma)) { + dev_err(jrdev, "unable to map key memory\n"); + kfree(desc); + return -ENOMEM; + } + + /* Job descriptor to perform unkeyed hash on key_in */ + append_operation(desc, ctx->adata.algtype | OP_ALG_ENCRYPT | + OP_ALG_AS_INITFINAL); + append_seq_in_ptr(desc, key_dma, *keylen, 0); + append_seq_fifo_load(desc, *keylen, FIFOLD_CLASS_CLASS2 | + FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_MSG); + append_seq_out_ptr(desc, key_dma, digestsize, 0); + append_seq_store(desc, digestsize, LDST_CLASS_2_CCB | + LDST_SRCDST_BYTE_CONTEXT); + + print_hex_dump_debug("key_in@"__stringify(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, key, *keylen, 1); + print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), + 1); + + result.err = 0; + init_completion(&result.completion); + + ret = caam_jr_enqueue(jrdev, desc, split_key_done, &result); + if (ret == -EINPROGRESS) { + /* in progress */ + wait_for_completion(&result.completion); + ret = result.err; + + print_hex_dump_debug("digested key@"__stringify(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, key, + digestsize, 1); + } + dma_unmap_single(jrdev, key_dma, *keylen, DMA_BIDIRECTIONAL); + + *keylen = digestsize; + + kfree(desc); + + return ret; +} + +static int ahash_setkey(struct crypto_ahash *ahash, + const u8 *key, unsigned int keylen) +{ + struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); + struct device *jrdev = ctx->jrdev; + int blocksize = crypto_tfm_alg_blocksize(&ahash->base); + int digestsize = crypto_ahash_digestsize(ahash); + struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctx->jrdev->parent); + int ret; + u8 *hashed_key = NULL; + + dev_dbg(jrdev, "keylen %d\n", keylen); + + if (keylen > blocksize) { + hashed_key = kmemdup(key, keylen, GFP_KERNEL | GFP_DMA); + if (!hashed_key) + return -ENOMEM; + ret = hash_digest_key(ctx, &keylen, hashed_key, digestsize); + if (ret) + goto bad_free_key; + key = hashed_key; + } + + /* + * If DKP is supported, use it in the shared descriptor to generate + * the split key. + */ + if (ctrlpriv->era >= 6) { + ctx->adata.key_inline = true; + ctx->adata.keylen = keylen; + ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype & + OP_ALG_ALGSEL_MASK); + + if (ctx->adata.keylen_pad > CAAM_MAX_HASH_KEY_SIZE) + goto bad_free_key; + + memcpy(ctx->key, key, keylen); + + /* + * In case |user key| > |derived key|, using DKP<imm,imm> + * would result in invalid opcodes (last bytes of user key) in + * the resulting descriptor. Use DKP<ptr,imm> instead => both + * virtual and dma key addresses are needed. + */ + if (keylen > ctx->adata.keylen_pad) + dma_sync_single_for_device(ctx->jrdev, + ctx->adata.key_dma, + ctx->adata.keylen_pad, + DMA_TO_DEVICE); + } else { + ret = gen_split_key(ctx->jrdev, ctx->key, &ctx->adata, key, + keylen, CAAM_MAX_HASH_KEY_SIZE); + if (ret) + goto bad_free_key; + } + + kfree(hashed_key); + return ahash_set_sh_desc(ahash); + bad_free_key: + kfree(hashed_key); + return -EINVAL; +} + +static int axcbc_setkey(struct crypto_ahash *ahash, const u8 *key, + unsigned int keylen) +{ + struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); + struct device *jrdev = ctx->jrdev; + + if (keylen != AES_KEYSIZE_128) + return -EINVAL; + + memcpy(ctx->key, key, keylen); + dma_sync_single_for_device(jrdev, ctx->adata.key_dma, keylen, + DMA_TO_DEVICE); + ctx->adata.keylen = keylen; + + print_hex_dump_debug("axcbc ctx.key@" __stringify(__LINE__)" : ", + DUMP_PREFIX_ADDRESS, 16, 4, ctx->key, keylen, 1); + + return axcbc_set_sh_desc(ahash); +} + +static int acmac_setkey(struct crypto_ahash *ahash, const u8 *key, + unsigned int keylen) +{ + struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); + int err; + + err = aes_check_keylen(keylen); + if (err) + return err; + + /* key is immediate data for all cmac shared descriptors */ + ctx->adata.key_virt = key; + ctx->adata.keylen = keylen; + + print_hex_dump_debug("acmac ctx.key@" __stringify(__LINE__)" : ", + DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); + + return acmac_set_sh_desc(ahash); +} + +/* + * ahash_edesc - s/w-extended ahash descriptor + * @sec4_sg_dma: physical mapped address of h/w link table + * @src_nents: number of segments in input scatterlist + * @sec4_sg_bytes: length of dma mapped sec4_sg space + * @bklog: stored to determine if the request needs backlog + * @hw_desc: the h/w job descriptor followed by any referenced link tables + * @sec4_sg: h/w link table + */ +struct ahash_edesc { + dma_addr_t sec4_sg_dma; + int src_nents; + int sec4_sg_bytes; + bool bklog; + u32 hw_desc[DESC_JOB_IO_LEN_MAX / sizeof(u32)] ____cacheline_aligned; + struct sec4_sg_entry sec4_sg[]; +}; + +static inline void ahash_unmap(struct device *dev, + struct ahash_edesc *edesc, + struct ahash_request *req, int dst_len) +{ + struct caam_hash_state *state = ahash_request_ctx(req); + + if (edesc->src_nents) + dma_unmap_sg(dev, req->src, edesc->src_nents, DMA_TO_DEVICE); + + if (edesc->sec4_sg_bytes) + dma_unmap_single(dev, edesc->sec4_sg_dma, + edesc->sec4_sg_bytes, DMA_TO_DEVICE); + + if (state->buf_dma) { + dma_unmap_single(dev, state->buf_dma, state->buflen, + DMA_TO_DEVICE); + state->buf_dma = 0; + } +} + +static inline void ahash_unmap_ctx(struct device *dev, + struct ahash_edesc *edesc, + struct ahash_request *req, int dst_len, u32 flag) +{ + struct caam_hash_state *state = ahash_request_ctx(req); + + if (state->ctx_dma) { + dma_unmap_single(dev, state->ctx_dma, state->ctx_dma_len, flag); + state->ctx_dma = 0; + } + ahash_unmap(dev, edesc, req, dst_len); +} + +static inline void ahash_done_cpy(struct device *jrdev, u32 *desc, u32 err, + void *context, enum dma_data_direction dir) +{ + struct ahash_request *req = context; + struct caam_drv_private_jr *jrp = dev_get_drvdata(jrdev); + struct ahash_edesc *edesc; + struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); + int digestsize = crypto_ahash_digestsize(ahash); + struct caam_hash_state *state = ahash_request_ctx(req); + struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); + int ecode = 0; + bool has_bklog; + + dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); + + edesc = state->edesc; + has_bklog = edesc->bklog; + + if (err) + ecode = caam_jr_strstatus(jrdev, err); + + ahash_unmap_ctx(jrdev, edesc, req, digestsize, dir); + memcpy(req->result, state->caam_ctx, digestsize); + kfree(edesc); + + print_hex_dump_debug("ctx@"__stringify(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx, + ctx->ctx_len, 1); + + /* + * If no backlog flag, the completion of the request is done + * by CAAM, not crypto engine. + */ + if (!has_bklog) + req->base.complete(&req->base, ecode); + else + crypto_finalize_hash_request(jrp->engine, req, ecode); +} + +static void ahash_done(struct device *jrdev, u32 *desc, u32 err, + void *context) +{ + ahash_done_cpy(jrdev, desc, err, context, DMA_FROM_DEVICE); +} + +static void ahash_done_ctx_src(struct device *jrdev, u32 *desc, u32 err, + void *context) +{ + ahash_done_cpy(jrdev, desc, err, context, DMA_BIDIRECTIONAL); +} + +static inline void ahash_done_switch(struct device *jrdev, u32 *desc, u32 err, + void *context, enum dma_data_direction dir) +{ + struct ahash_request *req = context; + struct caam_drv_private_jr *jrp = dev_get_drvdata(jrdev); + struct ahash_edesc *edesc; + struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); + struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); + struct caam_hash_state *state = ahash_request_ctx(req); + int digestsize = crypto_ahash_digestsize(ahash); + int ecode = 0; + bool has_bklog; + + dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); + + edesc = state->edesc; + has_bklog = edesc->bklog; + if (err) + ecode = caam_jr_strstatus(jrdev, err); + + ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, dir); + kfree(edesc); + + scatterwalk_map_and_copy(state->buf, req->src, + req->nbytes - state->next_buflen, + state->next_buflen, 0); + state->buflen = state->next_buflen; + + print_hex_dump_debug("buf@" __stringify(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, state->buf, + state->buflen, 1); + + print_hex_dump_debug("ctx@"__stringify(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx, + ctx->ctx_len, 1); + if (req->result) + print_hex_dump_debug("result@"__stringify(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, req->result, + digestsize, 1); + + /* + * If no backlog flag, the completion of the request is done + * by CAAM, not crypto engine. + */ + if (!has_bklog) + req->base.complete(&req->base, ecode); + else + crypto_finalize_hash_request(jrp->engine, req, ecode); + +} + +static void ahash_done_bi(struct device *jrdev, u32 *desc, u32 err, + void *context) +{ + ahash_done_switch(jrdev, desc, err, context, DMA_BIDIRECTIONAL); +} + +static void ahash_done_ctx_dst(struct device *jrdev, u32 *desc, u32 err, + void *context) +{ + ahash_done_switch(jrdev, desc, err, context, DMA_FROM_DEVICE); +} + +/* + * Allocate an enhanced descriptor, which contains the hardware descriptor + * and space for hardware scatter table containing sg_num entries. + */ +static struct ahash_edesc *ahash_edesc_alloc(struct ahash_request *req, + int sg_num, u32 *sh_desc, + dma_addr_t sh_desc_dma) +{ + struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); + struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); + struct caam_hash_state *state = ahash_request_ctx(req); + gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? + GFP_KERNEL : GFP_ATOMIC; + struct ahash_edesc *edesc; + unsigned int sg_size = sg_num * sizeof(struct sec4_sg_entry); + + edesc = kzalloc(sizeof(*edesc) + sg_size, GFP_DMA | flags); + if (!edesc) { + dev_err(ctx->jrdev, "could not allocate extended descriptor\n"); + return NULL; + } + + state->edesc = edesc; + + init_job_desc_shared(edesc->hw_desc, sh_desc_dma, desc_len(sh_desc), + HDR_SHARE_DEFER | HDR_REVERSE); + + return edesc; +} + +static int ahash_edesc_add_src(struct caam_hash_ctx *ctx, + struct ahash_edesc *edesc, + struct ahash_request *req, int nents, + unsigned int first_sg, + unsigned int first_bytes, size_t to_hash) +{ + dma_addr_t src_dma; + u32 options; + + if (nents > 1 || first_sg) { + struct sec4_sg_entry *sg = edesc->sec4_sg; + unsigned int sgsize = sizeof(*sg) * + pad_sg_nents(first_sg + nents); + + sg_to_sec4_sg_last(req->src, to_hash, sg + first_sg, 0); + + src_dma = dma_map_single(ctx->jrdev, sg, sgsize, DMA_TO_DEVICE); + if (dma_mapping_error(ctx->jrdev, src_dma)) { + dev_err(ctx->jrdev, "unable to map S/G table\n"); + return -ENOMEM; + } + + edesc->sec4_sg_bytes = sgsize; + edesc->sec4_sg_dma = src_dma; + options = LDST_SGF; + } else { + src_dma = sg_dma_address(req->src); + options = 0; + } + + append_seq_in_ptr(edesc->hw_desc, src_dma, first_bytes + to_hash, + options); + + return 0; +} + +static int ahash_do_one_req(struct crypto_engine *engine, void *areq) +{ + struct ahash_request *req = ahash_request_cast(areq); + struct caam_hash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req)); + struct caam_hash_state *state = ahash_request_ctx(req); + struct device *jrdev = ctx->jrdev; + u32 *desc = state->edesc->hw_desc; + int ret; + + state->edesc->bklog = true; + + ret = caam_jr_enqueue(jrdev, desc, state->ahash_op_done, req); + + if (ret != -EINPROGRESS) { + ahash_unmap(jrdev, state->edesc, req, 0); + kfree(state->edesc); + } else { + ret = 0; + } + + return ret; +} + +static int ahash_enqueue_req(struct device *jrdev, + void (*cbk)(struct device *jrdev, u32 *desc, + u32 err, void *context), + struct ahash_request *req, + int dst_len, enum dma_data_direction dir) +{ + struct caam_drv_private_jr *jrpriv = dev_get_drvdata(jrdev); + struct caam_hash_state *state = ahash_request_ctx(req); + struct ahash_edesc *edesc = state->edesc; + u32 *desc = edesc->hw_desc; + int ret; + + state->ahash_op_done = cbk; + + /* + * Only the backlog request are sent to crypto-engine since the others + * can be handled by CAAM, if free, especially since JR has up to 1024 + * entries (more than the 10 entries from crypto-engine). + */ + if (req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG) + ret = crypto_transfer_hash_request_to_engine(jrpriv->engine, + req); + else + ret = caam_jr_enqueue(jrdev, desc, cbk, req); + + if ((ret != -EINPROGRESS) && (ret != -EBUSY)) { + ahash_unmap_ctx(jrdev, edesc, req, dst_len, dir); + kfree(edesc); + } + + return ret; +} + +/* submit update job descriptor */ +static int ahash_update_ctx(struct ahash_request *req) +{ + struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); + struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); + struct caam_hash_state *state = ahash_request_ctx(req); + struct device *jrdev = ctx->jrdev; + u8 *buf = state->buf; + int *buflen = &state->buflen; + int *next_buflen = &state->next_buflen; + int blocksize = crypto_ahash_blocksize(ahash); + int in_len = *buflen + req->nbytes, to_hash; + u32 *desc; + int src_nents, mapped_nents, sec4_sg_bytes, sec4_sg_src_index; + struct ahash_edesc *edesc; + int ret = 0; + + *next_buflen = in_len & (blocksize - 1); + to_hash = in_len - *next_buflen; + + /* + * For XCBC and CMAC, if to_hash is multiple of block size, + * keep last block in internal buffer + */ + if ((is_xcbc_aes(ctx->adata.algtype) || + is_cmac_aes(ctx->adata.algtype)) && to_hash >= blocksize && + (*next_buflen == 0)) { + *next_buflen = blocksize; + to_hash -= blocksize; + } + + if (to_hash) { + int pad_nents; + int src_len = req->nbytes - *next_buflen; + + src_nents = sg_nents_for_len(req->src, src_len); + if (src_nents < 0) { + dev_err(jrdev, "Invalid number of src SG.\n"); + return src_nents; + } + + if (src_nents) { + mapped_nents = dma_map_sg(jrdev, req->src, src_nents, + DMA_TO_DEVICE); + if (!mapped_nents) { + dev_err(jrdev, "unable to DMA map source\n"); + return -ENOMEM; + } + } else { + mapped_nents = 0; + } + + sec4_sg_src_index = 1 + (*buflen ? 1 : 0); + pad_nents = pad_sg_nents(sec4_sg_src_index + mapped_nents); + sec4_sg_bytes = pad_nents * sizeof(struct sec4_sg_entry); + + /* + * allocate space for base edesc and hw desc commands, + * link tables + */ + edesc = ahash_edesc_alloc(req, pad_nents, ctx->sh_desc_update, + ctx->sh_desc_update_dma); + if (!edesc) { + dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE); + return -ENOMEM; + } + + edesc->src_nents = src_nents; + edesc->sec4_sg_bytes = sec4_sg_bytes; + + ret = ctx_map_to_sec4_sg(jrdev, state, ctx->ctx_len, + edesc->sec4_sg, DMA_BIDIRECTIONAL); + if (ret) + goto unmap_ctx; + + ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1, state); + if (ret) + goto unmap_ctx; + + if (mapped_nents) + sg_to_sec4_sg_last(req->src, src_len, + edesc->sec4_sg + sec4_sg_src_index, + 0); + else + sg_to_sec4_set_last(edesc->sec4_sg + sec4_sg_src_index - + 1); + + desc = edesc->hw_desc; + + edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, + sec4_sg_bytes, + DMA_TO_DEVICE); + if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) { + dev_err(jrdev, "unable to map S/G table\n"); + ret = -ENOMEM; + goto unmap_ctx; + } + + append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len + + to_hash, LDST_SGF); + + append_seq_out_ptr(desc, state->ctx_dma, ctx->ctx_len, 0); + + print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, desc, + desc_bytes(desc), 1); + + ret = ahash_enqueue_req(jrdev, ahash_done_bi, req, + ctx->ctx_len, DMA_BIDIRECTIONAL); + } else if (*next_buflen) { + scatterwalk_map_and_copy(buf + *buflen, req->src, 0, + req->nbytes, 0); + *buflen = *next_buflen; + + print_hex_dump_debug("buf@" __stringify(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, buf, + *buflen, 1); + } + + return ret; +unmap_ctx: + ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_BIDIRECTIONAL); + kfree(edesc); + return ret; +} + +static int ahash_final_ctx(struct ahash_request *req) +{ + struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); + struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); + struct caam_hash_state *state = ahash_request_ctx(req); + struct device *jrdev = ctx->jrdev; + int buflen = state->buflen; + u32 *desc; + int sec4_sg_bytes; + int digestsize = crypto_ahash_digestsize(ahash); + struct ahash_edesc *edesc; + int ret; + + sec4_sg_bytes = pad_sg_nents(1 + (buflen ? 1 : 0)) * + sizeof(struct sec4_sg_entry); + + /* allocate space for base edesc and hw desc commands, link tables */ + edesc = ahash_edesc_alloc(req, 4, ctx->sh_desc_fin, + ctx->sh_desc_fin_dma); + if (!edesc) + return -ENOMEM; + + desc = edesc->hw_desc; + + edesc->sec4_sg_bytes = sec4_sg_bytes; + + ret = ctx_map_to_sec4_sg(jrdev, state, ctx->ctx_len, + edesc->sec4_sg, DMA_BIDIRECTIONAL); + if (ret) + goto unmap_ctx; + + ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1, state); + if (ret) + goto unmap_ctx; + + sg_to_sec4_set_last(edesc->sec4_sg + (buflen ? 1 : 0)); + + edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, + sec4_sg_bytes, DMA_TO_DEVICE); + if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) { + dev_err(jrdev, "unable to map S/G table\n"); + ret = -ENOMEM; + goto unmap_ctx; + } + + append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len + buflen, + LDST_SGF); + append_seq_out_ptr(desc, state->ctx_dma, digestsize, 0); + + print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), + 1); + + return ahash_enqueue_req(jrdev, ahash_done_ctx_src, req, + digestsize, DMA_BIDIRECTIONAL); + unmap_ctx: + ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_BIDIRECTIONAL); + kfree(edesc); + return ret; +} + +static int ahash_finup_ctx(struct ahash_request *req) +{ + struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); + struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); + struct caam_hash_state *state = ahash_request_ctx(req); + struct device *jrdev = ctx->jrdev; + int buflen = state->buflen; + u32 *desc; + int sec4_sg_src_index; + int src_nents, mapped_nents; + int digestsize = crypto_ahash_digestsize(ahash); + struct ahash_edesc *edesc; + int ret; + + src_nents = sg_nents_for_len(req->src, req->nbytes); + if (src_nents < 0) { + dev_err(jrdev, "Invalid number of src SG.\n"); + return src_nents; + } + + if (src_nents) { + mapped_nents = dma_map_sg(jrdev, req->src, src_nents, + DMA_TO_DEVICE); + if (!mapped_nents) { + dev_err(jrdev, "unable to DMA map source\n"); + return -ENOMEM; + } + } else { + mapped_nents = 0; + } + + sec4_sg_src_index = 1 + (buflen ? 1 : 0); + + /* allocate space for base edesc and hw desc commands, link tables */ + edesc = ahash_edesc_alloc(req, sec4_sg_src_index + mapped_nents, + ctx->sh_desc_fin, ctx->sh_desc_fin_dma); + if (!edesc) { + dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE); + return -ENOMEM; + } + + desc = edesc->hw_desc; + + edesc->src_nents = src_nents; + + ret = ctx_map_to_sec4_sg(jrdev, state, ctx->ctx_len, + edesc->sec4_sg, DMA_BIDIRECTIONAL); + if (ret) + goto unmap_ctx; + + ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1, state); + if (ret) + goto unmap_ctx; + + ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents, + sec4_sg_src_index, ctx->ctx_len + buflen, + req->nbytes); + if (ret) + goto unmap_ctx; + + append_seq_out_ptr(desc, state->ctx_dma, digestsize, 0); + + print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), + 1); + + return ahash_enqueue_req(jrdev, ahash_done_ctx_src, req, + digestsize, DMA_BIDIRECTIONAL); + unmap_ctx: + ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_BIDIRECTIONAL); + kfree(edesc); + return ret; +} + +static int ahash_digest(struct ahash_request *req) +{ + struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); + struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); + struct caam_hash_state *state = ahash_request_ctx(req); + struct device *jrdev = ctx->jrdev; + u32 *desc; + int digestsize = crypto_ahash_digestsize(ahash); + int src_nents, mapped_nents; + struct ahash_edesc *edesc; + int ret; + + state->buf_dma = 0; + + src_nents = sg_nents_for_len(req->src, req->nbytes); + if (src_nents < 0) { + dev_err(jrdev, "Invalid number of src SG.\n"); + return src_nents; + } + + if (src_nents) { + mapped_nents = dma_map_sg(jrdev, req->src, src_nents, + DMA_TO_DEVICE); + if (!mapped_nents) { + dev_err(jrdev, "unable to map source for DMA\n"); + return -ENOMEM; + } + } else { + mapped_nents = 0; + } + + /* allocate space for base edesc and hw desc commands, link tables */ + edesc = ahash_edesc_alloc(req, mapped_nents > 1 ? mapped_nents : 0, + ctx->sh_desc_digest, ctx->sh_desc_digest_dma); + if (!edesc) { + dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE); + return -ENOMEM; + } + + edesc->src_nents = src_nents; + + ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents, 0, 0, + req->nbytes); + if (ret) { + ahash_unmap(jrdev, edesc, req, digestsize); + kfree(edesc); + return ret; + } + + desc = edesc->hw_desc; + + ret = map_seq_out_ptr_ctx(desc, jrdev, state, digestsize); + if (ret) { + ahash_unmap(jrdev, edesc, req, digestsize); + kfree(edesc); + return -ENOMEM; + } + + print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), + 1); + + return ahash_enqueue_req(jrdev, ahash_done, req, digestsize, + DMA_FROM_DEVICE); +} + +/* submit ahash final if it the first job descriptor */ +static int ahash_final_no_ctx(struct ahash_request *req) +{ + struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); + struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); + struct caam_hash_state *state = ahash_request_ctx(req); + struct device *jrdev = ctx->jrdev; + u8 *buf = state->buf; + int buflen = state->buflen; + u32 *desc; + int digestsize = crypto_ahash_digestsize(ahash); + struct ahash_edesc *edesc; + int ret; + + /* allocate space for base edesc and hw desc commands, link tables */ + edesc = ahash_edesc_alloc(req, 0, ctx->sh_desc_digest, + ctx->sh_desc_digest_dma); + if (!edesc) + return -ENOMEM; + + desc = edesc->hw_desc; + + if (buflen) { + state->buf_dma = dma_map_single(jrdev, buf, buflen, + DMA_TO_DEVICE); + if (dma_mapping_error(jrdev, state->buf_dma)) { + dev_err(jrdev, "unable to map src\n"); + goto unmap; + } + + append_seq_in_ptr(desc, state->buf_dma, buflen, 0); + } + + ret = map_seq_out_ptr_ctx(desc, jrdev, state, digestsize); + if (ret) + goto unmap; + + print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), + 1); + + return ahash_enqueue_req(jrdev, ahash_done, req, + digestsize, DMA_FROM_DEVICE); + unmap: + ahash_unmap(jrdev, edesc, req, digestsize); + kfree(edesc); + return -ENOMEM; +} + +/* submit ahash update if it the first job descriptor after update */ +static int ahash_update_no_ctx(struct ahash_request *req) +{ + struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); + struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); + struct caam_hash_state *state = ahash_request_ctx(req); + struct device *jrdev = ctx->jrdev; + u8 *buf = state->buf; + int *buflen = &state->buflen; + int *next_buflen = &state->next_buflen; + int blocksize = crypto_ahash_blocksize(ahash); + int in_len = *buflen + req->nbytes, to_hash; + int sec4_sg_bytes, src_nents, mapped_nents; + struct ahash_edesc *edesc; + u32 *desc; + int ret = 0; + + *next_buflen = in_len & (blocksize - 1); + to_hash = in_len - *next_buflen; + + /* + * For XCBC and CMAC, if to_hash is multiple of block size, + * keep last block in internal buffer + */ + if ((is_xcbc_aes(ctx->adata.algtype) || + is_cmac_aes(ctx->adata.algtype)) && to_hash >= blocksize && + (*next_buflen == 0)) { + *next_buflen = blocksize; + to_hash -= blocksize; + } + + if (to_hash) { + int pad_nents; + int src_len = req->nbytes - *next_buflen; + + src_nents = sg_nents_for_len(req->src, src_len); + if (src_nents < 0) { + dev_err(jrdev, "Invalid number of src SG.\n"); + return src_nents; + } + + if (src_nents) { + mapped_nents = dma_map_sg(jrdev, req->src, src_nents, + DMA_TO_DEVICE); + if (!mapped_nents) { + dev_err(jrdev, "unable to DMA map source\n"); + return -ENOMEM; + } + } else { + mapped_nents = 0; + } + + pad_nents = pad_sg_nents(1 + mapped_nents); + sec4_sg_bytes = pad_nents * sizeof(struct sec4_sg_entry); + + /* + * allocate space for base edesc and hw desc commands, + * link tables + */ + edesc = ahash_edesc_alloc(req, pad_nents, + ctx->sh_desc_update_first, + ctx->sh_desc_update_first_dma); + if (!edesc) { + dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE); + return -ENOMEM; + } + + edesc->src_nents = src_nents; + edesc->sec4_sg_bytes = sec4_sg_bytes; + + ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg, state); + if (ret) + goto unmap_ctx; + + sg_to_sec4_sg_last(req->src, src_len, edesc->sec4_sg + 1, 0); + + desc = edesc->hw_desc; + + edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, + sec4_sg_bytes, + DMA_TO_DEVICE); + if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) { + dev_err(jrdev, "unable to map S/G table\n"); + ret = -ENOMEM; + goto unmap_ctx; + } + + append_seq_in_ptr(desc, edesc->sec4_sg_dma, to_hash, LDST_SGF); + + ret = map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len); + if (ret) + goto unmap_ctx; + + print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, desc, + desc_bytes(desc), 1); + + ret = ahash_enqueue_req(jrdev, ahash_done_ctx_dst, req, + ctx->ctx_len, DMA_TO_DEVICE); + if ((ret != -EINPROGRESS) && (ret != -EBUSY)) + return ret; + state->update = ahash_update_ctx; + state->finup = ahash_finup_ctx; + state->final = ahash_final_ctx; + } else if (*next_buflen) { + scatterwalk_map_and_copy(buf + *buflen, req->src, 0, + req->nbytes, 0); + *buflen = *next_buflen; + + print_hex_dump_debug("buf@" __stringify(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, buf, + *buflen, 1); + } + + return ret; + unmap_ctx: + ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_TO_DEVICE); + kfree(edesc); + return ret; +} + +/* submit ahash finup if it the first job descriptor after update */ +static int ahash_finup_no_ctx(struct ahash_request *req) +{ + struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); + struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); + struct caam_hash_state *state = ahash_request_ctx(req); + struct device *jrdev = ctx->jrdev; + int buflen = state->buflen; + u32 *desc; + int sec4_sg_bytes, sec4_sg_src_index, src_nents, mapped_nents; + int digestsize = crypto_ahash_digestsize(ahash); + struct ahash_edesc *edesc; + int ret; + + src_nents = sg_nents_for_len(req->src, req->nbytes); + if (src_nents < 0) { + dev_err(jrdev, "Invalid number of src SG.\n"); + return src_nents; + } + + if (src_nents) { + mapped_nents = dma_map_sg(jrdev, req->src, src_nents, + DMA_TO_DEVICE); + if (!mapped_nents) { + dev_err(jrdev, "unable to DMA map source\n"); + return -ENOMEM; + } + } else { + mapped_nents = 0; + } + + sec4_sg_src_index = 2; + sec4_sg_bytes = (sec4_sg_src_index + mapped_nents) * + sizeof(struct sec4_sg_entry); + + /* allocate space for base edesc and hw desc commands, link tables */ + edesc = ahash_edesc_alloc(req, sec4_sg_src_index + mapped_nents, + ctx->sh_desc_digest, ctx->sh_desc_digest_dma); + if (!edesc) { + dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE); + return -ENOMEM; + } + + desc = edesc->hw_desc; + + edesc->src_nents = src_nents; + edesc->sec4_sg_bytes = sec4_sg_bytes; + + ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg, state); + if (ret) + goto unmap; + + ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents, 1, buflen, + req->nbytes); + if (ret) { + dev_err(jrdev, "unable to map S/G table\n"); + goto unmap; + } + + ret = map_seq_out_ptr_ctx(desc, jrdev, state, digestsize); + if (ret) + goto unmap; + + print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), + 1); + + return ahash_enqueue_req(jrdev, ahash_done, req, + digestsize, DMA_FROM_DEVICE); + unmap: + ahash_unmap(jrdev, edesc, req, digestsize); + kfree(edesc); + return -ENOMEM; + +} + +/* submit first update job descriptor after init */ +static int ahash_update_first(struct ahash_request *req) +{ + struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); + struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); + struct caam_hash_state *state = ahash_request_ctx(req); + struct device *jrdev = ctx->jrdev; + u8 *buf = state->buf; + int *buflen = &state->buflen; + int *next_buflen = &state->next_buflen; + int to_hash; + int blocksize = crypto_ahash_blocksize(ahash); + u32 *desc; + int src_nents, mapped_nents; + struct ahash_edesc *edesc; + int ret = 0; + + *next_buflen = req->nbytes & (blocksize - 1); + to_hash = req->nbytes - *next_buflen; + + /* + * For XCBC and CMAC, if to_hash is multiple of block size, + * keep last block in internal buffer + */ + if ((is_xcbc_aes(ctx->adata.algtype) || + is_cmac_aes(ctx->adata.algtype)) && to_hash >= blocksize && + (*next_buflen == 0)) { + *next_buflen = blocksize; + to_hash -= blocksize; + } + + if (to_hash) { + src_nents = sg_nents_for_len(req->src, + req->nbytes - *next_buflen); + if (src_nents < 0) { + dev_err(jrdev, "Invalid number of src SG.\n"); + return src_nents; + } + + if (src_nents) { + mapped_nents = dma_map_sg(jrdev, req->src, src_nents, + DMA_TO_DEVICE); + if (!mapped_nents) { + dev_err(jrdev, "unable to map source for DMA\n"); + return -ENOMEM; + } + } else { + mapped_nents = 0; + } + + /* + * allocate space for base edesc and hw desc commands, + * link tables + */ + edesc = ahash_edesc_alloc(req, mapped_nents > 1 ? + mapped_nents : 0, + ctx->sh_desc_update_first, + ctx->sh_desc_update_first_dma); + if (!edesc) { + dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE); + return -ENOMEM; + } + + edesc->src_nents = src_nents; + + ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents, 0, 0, + to_hash); + if (ret) + goto unmap_ctx; + + desc = edesc->hw_desc; + + ret = map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len); + if (ret) + goto unmap_ctx; + + print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, desc, + desc_bytes(desc), 1); + + ret = ahash_enqueue_req(jrdev, ahash_done_ctx_dst, req, + ctx->ctx_len, DMA_TO_DEVICE); + if ((ret != -EINPROGRESS) && (ret != -EBUSY)) + return ret; + state->update = ahash_update_ctx; + state->finup = ahash_finup_ctx; + state->final = ahash_final_ctx; + } else if (*next_buflen) { + state->update = ahash_update_no_ctx; + state->finup = ahash_finup_no_ctx; + state->final = ahash_final_no_ctx; + scatterwalk_map_and_copy(buf, req->src, 0, + req->nbytes, 0); + *buflen = *next_buflen; + + print_hex_dump_debug("buf@" __stringify(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, buf, + *buflen, 1); + } + + return ret; + unmap_ctx: + ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_TO_DEVICE); + kfree(edesc); + return ret; +} + +static int ahash_finup_first(struct ahash_request *req) +{ + return ahash_digest(req); +} + +static int ahash_init(struct ahash_request *req) +{ + struct caam_hash_state *state = ahash_request_ctx(req); + + state->update = ahash_update_first; + state->finup = ahash_finup_first; + state->final = ahash_final_no_ctx; + + state->ctx_dma = 0; + state->ctx_dma_len = 0; + state->buf_dma = 0; + state->buflen = 0; + state->next_buflen = 0; + + return 0; +} + +static int ahash_update(struct ahash_request *req) +{ + struct caam_hash_state *state = ahash_request_ctx(req); + + return state->update(req); +} + +static int ahash_finup(struct ahash_request *req) +{ + struct caam_hash_state *state = ahash_request_ctx(req); + + return state->finup(req); +} + +static int ahash_final(struct ahash_request *req) +{ + struct caam_hash_state *state = ahash_request_ctx(req); + + return state->final(req); +} + +static int ahash_export(struct ahash_request *req, void *out) +{ + struct caam_hash_state *state = ahash_request_ctx(req); + struct caam_export_state *export = out; + u8 *buf = state->buf; + int len = state->buflen; + + memcpy(export->buf, buf, len); + memcpy(export->caam_ctx, state->caam_ctx, sizeof(export->caam_ctx)); + export->buflen = len; + export->update = state->update; + export->final = state->final; + export->finup = state->finup; + + return 0; +} + +static int ahash_import(struct ahash_request *req, const void *in) +{ + struct caam_hash_state *state = ahash_request_ctx(req); + const struct caam_export_state *export = in; + + memset(state, 0, sizeof(*state)); + memcpy(state->buf, export->buf, export->buflen); + memcpy(state->caam_ctx, export->caam_ctx, sizeof(state->caam_ctx)); + state->buflen = export->buflen; + state->update = export->update; + state->final = export->final; + state->finup = export->finup; + + return 0; +} + +struct caam_hash_template { + char name[CRYPTO_MAX_ALG_NAME]; + char driver_name[CRYPTO_MAX_ALG_NAME]; + char hmac_name[CRYPTO_MAX_ALG_NAME]; + char hmac_driver_name[CRYPTO_MAX_ALG_NAME]; + unsigned int blocksize; + struct ahash_alg template_ahash; + u32 alg_type; +}; + +/* ahash descriptors */ +static struct caam_hash_template driver_hash[] = { + { + .name = "sha1", + .driver_name = "sha1-caam", + .hmac_name = "hmac(sha1)", + .hmac_driver_name = "hmac-sha1-caam", + .blocksize = SHA1_BLOCK_SIZE, + .template_ahash = { + .init = ahash_init, + .update = ahash_update, + .final = ahash_final, + .finup = ahash_finup, + .digest = ahash_digest, + .export = ahash_export, + .import = ahash_import, + .setkey = ahash_setkey, + .halg = { + .digestsize = SHA1_DIGEST_SIZE, + .statesize = sizeof(struct caam_export_state), + }, + }, + .alg_type = OP_ALG_ALGSEL_SHA1, + }, { + .name = "sha224", + .driver_name = "sha224-caam", + .hmac_name = "hmac(sha224)", + .hmac_driver_name = "hmac-sha224-caam", + .blocksize = SHA224_BLOCK_SIZE, + .template_ahash = { + .init = ahash_init, + .update = ahash_update, + .final = ahash_final, + .finup = ahash_finup, + .digest = ahash_digest, + .export = ahash_export, + .import = ahash_import, + .setkey = ahash_setkey, + .halg = { + .digestsize = SHA224_DIGEST_SIZE, + .statesize = sizeof(struct caam_export_state), + }, + }, + .alg_type = OP_ALG_ALGSEL_SHA224, + }, { + .name = "sha256", + .driver_name = "sha256-caam", + .hmac_name = "hmac(sha256)", + .hmac_driver_name = "hmac-sha256-caam", + .blocksize = SHA256_BLOCK_SIZE, + .template_ahash = { + .init = ahash_init, + .update = ahash_update, + .final = ahash_final, + .finup = ahash_finup, + .digest = ahash_digest, + .export = ahash_export, + .import = ahash_import, + .setkey = ahash_setkey, + .halg = { + .digestsize = SHA256_DIGEST_SIZE, + .statesize = sizeof(struct caam_export_state), + }, + }, + .alg_type = OP_ALG_ALGSEL_SHA256, + }, { + .name = "sha384", + .driver_name = "sha384-caam", + .hmac_name = "hmac(sha384)", + .hmac_driver_name = "hmac-sha384-caam", + .blocksize = SHA384_BLOCK_SIZE, + .template_ahash = { + .init = ahash_init, + .update = ahash_update, + .final = ahash_final, + .finup = ahash_finup, + .digest = ahash_digest, + .export = ahash_export, + .import = ahash_import, + .setkey = ahash_setkey, + .halg = { + .digestsize = SHA384_DIGEST_SIZE, + .statesize = sizeof(struct caam_export_state), + }, + }, + .alg_type = OP_ALG_ALGSEL_SHA384, + }, { + .name = "sha512", + .driver_name = "sha512-caam", + .hmac_name = "hmac(sha512)", + .hmac_driver_name = "hmac-sha512-caam", + .blocksize = SHA512_BLOCK_SIZE, + .template_ahash = { + .init = ahash_init, + .update = ahash_update, + .final = ahash_final, + .finup = ahash_finup, + .digest = ahash_digest, + .export = ahash_export, + .import = ahash_import, + .setkey = ahash_setkey, + .halg = { + .digestsize = SHA512_DIGEST_SIZE, + .statesize = sizeof(struct caam_export_state), + }, + }, + .alg_type = OP_ALG_ALGSEL_SHA512, + }, { + .name = "md5", + .driver_name = "md5-caam", + .hmac_name = "hmac(md5)", + .hmac_driver_name = "hmac-md5-caam", + .blocksize = MD5_BLOCK_WORDS * 4, + .template_ahash = { + .init = ahash_init, + .update = ahash_update, + .final = ahash_final, + .finup = ahash_finup, + .digest = ahash_digest, + .export = ahash_export, + .import = ahash_import, + .setkey = ahash_setkey, + .halg = { + .digestsize = MD5_DIGEST_SIZE, + .statesize = sizeof(struct caam_export_state), + }, + }, + .alg_type = OP_ALG_ALGSEL_MD5, + }, { + .hmac_name = "xcbc(aes)", + .hmac_driver_name = "xcbc-aes-caam", + .blocksize = AES_BLOCK_SIZE, + .template_ahash = { + .init = ahash_init, + .update = ahash_update, + .final = ahash_final, + .finup = ahash_finup, + .digest = ahash_digest, + .export = ahash_export, + .import = ahash_import, + .setkey = axcbc_setkey, + .halg = { + .digestsize = AES_BLOCK_SIZE, + .statesize = sizeof(struct caam_export_state), + }, + }, + .alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_XCBC_MAC, + }, { + .hmac_name = "cmac(aes)", + .hmac_driver_name = "cmac-aes-caam", + .blocksize = AES_BLOCK_SIZE, + .template_ahash = { + .init = ahash_init, + .update = ahash_update, + .final = ahash_final, + .finup = ahash_finup, + .digest = ahash_digest, + .export = ahash_export, + .import = ahash_import, + .setkey = acmac_setkey, + .halg = { + .digestsize = AES_BLOCK_SIZE, + .statesize = sizeof(struct caam_export_state), + }, + }, + .alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CMAC, + }, +}; + +struct caam_hash_alg { + struct list_head entry; + int alg_type; + struct ahash_alg ahash_alg; +}; + +static int caam_hash_cra_init(struct crypto_tfm *tfm) +{ + struct crypto_ahash *ahash = __crypto_ahash_cast(tfm); + struct crypto_alg *base = tfm->__crt_alg; + struct hash_alg_common *halg = + container_of(base, struct hash_alg_common, base); + struct ahash_alg *alg = + container_of(halg, struct ahash_alg, halg); + struct caam_hash_alg *caam_hash = + container_of(alg, struct caam_hash_alg, ahash_alg); + struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm); + /* Sizes for MDHA running digests: MD5, SHA1, 224, 256, 384, 512 */ + static const u8 runninglen[] = { HASH_MSG_LEN + MD5_DIGEST_SIZE, + HASH_MSG_LEN + SHA1_DIGEST_SIZE, + HASH_MSG_LEN + 32, + HASH_MSG_LEN + SHA256_DIGEST_SIZE, + HASH_MSG_LEN + 64, + HASH_MSG_LEN + SHA512_DIGEST_SIZE }; + const size_t sh_desc_update_offset = offsetof(struct caam_hash_ctx, + sh_desc_update); + dma_addr_t dma_addr; + struct caam_drv_private *priv; + + /* + * Get a Job ring from Job Ring driver to ensure in-order + * crypto request processing per tfm + */ + ctx->jrdev = caam_jr_alloc(); + if (IS_ERR(ctx->jrdev)) { + pr_err("Job Ring Device allocation for transform failed\n"); + return PTR_ERR(ctx->jrdev); + } + + priv = dev_get_drvdata(ctx->jrdev->parent); + + if (is_xcbc_aes(caam_hash->alg_type)) { + ctx->dir = DMA_TO_DEVICE; + ctx->key_dir = DMA_BIDIRECTIONAL; + ctx->adata.algtype = OP_TYPE_CLASS1_ALG | caam_hash->alg_type; + ctx->ctx_len = 48; + } else if (is_cmac_aes(caam_hash->alg_type)) { + ctx->dir = DMA_TO_DEVICE; + ctx->key_dir = DMA_NONE; + ctx->adata.algtype = OP_TYPE_CLASS1_ALG | caam_hash->alg_type; + ctx->ctx_len = 32; + } else { + if (priv->era >= 6) { + ctx->dir = DMA_BIDIRECTIONAL; + ctx->key_dir = alg->setkey ? DMA_TO_DEVICE : DMA_NONE; + } else { + ctx->dir = DMA_TO_DEVICE; + ctx->key_dir = DMA_NONE; + } + ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam_hash->alg_type; + ctx->ctx_len = runninglen[(ctx->adata.algtype & + OP_ALG_ALGSEL_SUBMASK) >> + OP_ALG_ALGSEL_SHIFT]; + } + + if (ctx->key_dir != DMA_NONE) { + ctx->adata.key_dma = dma_map_single_attrs(ctx->jrdev, ctx->key, + ARRAY_SIZE(ctx->key), + ctx->key_dir, + DMA_ATTR_SKIP_CPU_SYNC); + if (dma_mapping_error(ctx->jrdev, ctx->adata.key_dma)) { + dev_err(ctx->jrdev, "unable to map key\n"); + caam_jr_free(ctx->jrdev); + return -ENOMEM; + } + } + + dma_addr = dma_map_single_attrs(ctx->jrdev, ctx->sh_desc_update, + offsetof(struct caam_hash_ctx, key) - + sh_desc_update_offset, + ctx->dir, DMA_ATTR_SKIP_CPU_SYNC); + if (dma_mapping_error(ctx->jrdev, dma_addr)) { + dev_err(ctx->jrdev, "unable to map shared descriptors\n"); + + if (ctx->key_dir != DMA_NONE) + dma_unmap_single_attrs(ctx->jrdev, ctx->adata.key_dma, + ARRAY_SIZE(ctx->key), + ctx->key_dir, + DMA_ATTR_SKIP_CPU_SYNC); + + caam_jr_free(ctx->jrdev); + return -ENOMEM; + } + + ctx->sh_desc_update_dma = dma_addr; + ctx->sh_desc_update_first_dma = dma_addr + + offsetof(struct caam_hash_ctx, + sh_desc_update_first) - + sh_desc_update_offset; + ctx->sh_desc_fin_dma = dma_addr + offsetof(struct caam_hash_ctx, + sh_desc_fin) - + sh_desc_update_offset; + ctx->sh_desc_digest_dma = dma_addr + offsetof(struct caam_hash_ctx, + sh_desc_digest) - + sh_desc_update_offset; + + ctx->enginectx.op.do_one_request = ahash_do_one_req; + + crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), + sizeof(struct caam_hash_state)); + + /* + * For keyed hash algorithms shared descriptors + * will be created later in setkey() callback + */ + return alg->setkey ? 0 : ahash_set_sh_desc(ahash); +} + +static void caam_hash_cra_exit(struct crypto_tfm *tfm) +{ + struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm); + + dma_unmap_single_attrs(ctx->jrdev, ctx->sh_desc_update_dma, + offsetof(struct caam_hash_ctx, key) - + offsetof(struct caam_hash_ctx, sh_desc_update), + ctx->dir, DMA_ATTR_SKIP_CPU_SYNC); + if (ctx->key_dir != DMA_NONE) + dma_unmap_single_attrs(ctx->jrdev, ctx->adata.key_dma, + ARRAY_SIZE(ctx->key), ctx->key_dir, + DMA_ATTR_SKIP_CPU_SYNC); + caam_jr_free(ctx->jrdev); +} + +void caam_algapi_hash_exit(void) +{ + struct caam_hash_alg *t_alg, *n; + + if (!hash_list.next) + return; + + list_for_each_entry_safe(t_alg, n, &hash_list, entry) { + crypto_unregister_ahash(&t_alg->ahash_alg); + list_del(&t_alg->entry); + kfree(t_alg); + } +} + +static struct caam_hash_alg * +caam_hash_alloc(struct caam_hash_template *template, + bool keyed) +{ + struct caam_hash_alg *t_alg; + struct ahash_alg *halg; + struct crypto_alg *alg; + + t_alg = kzalloc(sizeof(*t_alg), GFP_KERNEL); + if (!t_alg) { + pr_err("failed to allocate t_alg\n"); + return ERR_PTR(-ENOMEM); + } + + t_alg->ahash_alg = template->template_ahash; + halg = &t_alg->ahash_alg; + alg = &halg->halg.base; + + if (keyed) { + snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", + template->hmac_name); + snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s", + template->hmac_driver_name); + } else { + snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", + template->name); + snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s", + template->driver_name); + t_alg->ahash_alg.setkey = NULL; + } + alg->cra_module = THIS_MODULE; + alg->cra_init = caam_hash_cra_init; + alg->cra_exit = caam_hash_cra_exit; + alg->cra_ctxsize = sizeof(struct caam_hash_ctx); + alg->cra_priority = CAAM_CRA_PRIORITY; + alg->cra_blocksize = template->blocksize; + alg->cra_alignmask = 0; + alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY; + + t_alg->alg_type = template->alg_type; + + return t_alg; +} + +int caam_algapi_hash_init(struct device *ctrldev) +{ + int i = 0, err = 0; + struct caam_drv_private *priv = dev_get_drvdata(ctrldev); + unsigned int md_limit = SHA512_DIGEST_SIZE; + u32 md_inst, md_vid; + + /* + * Register crypto algorithms the device supports. First, identify + * presence and attributes of MD block. + */ + if (priv->era < 10) { + md_vid = (rd_reg32(&priv->ctrl->perfmon.cha_id_ls) & + CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT; + md_inst = (rd_reg32(&priv->ctrl->perfmon.cha_num_ls) & + CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT; + } else { + u32 mdha = rd_reg32(&priv->ctrl->vreg.mdha); + + md_vid = (mdha & CHA_VER_VID_MASK) >> CHA_VER_VID_SHIFT; + md_inst = mdha & CHA_VER_NUM_MASK; + } + + /* + * Skip registration of any hashing algorithms if MD block + * is not present. + */ + if (!md_inst) + return 0; + + /* Limit digest size based on LP256 */ + if (md_vid == CHA_VER_VID_MD_LP256) + md_limit = SHA256_DIGEST_SIZE; + + INIT_LIST_HEAD(&hash_list); + + /* register crypto algorithms the device supports */ + for (i = 0; i < ARRAY_SIZE(driver_hash); i++) { + struct caam_hash_alg *t_alg; + struct caam_hash_template *alg = driver_hash + i; + + /* If MD size is not supported by device, skip registration */ + if (is_mdha(alg->alg_type) && + alg->template_ahash.halg.digestsize > md_limit) + continue; + + /* register hmac version */ + t_alg = caam_hash_alloc(alg, true); + if (IS_ERR(t_alg)) { + err = PTR_ERR(t_alg); + pr_warn("%s alg allocation failed\n", + alg->hmac_driver_name); + continue; + } + + err = crypto_register_ahash(&t_alg->ahash_alg); + if (err) { + pr_warn("%s alg registration failed: %d\n", + t_alg->ahash_alg.halg.base.cra_driver_name, + err); + kfree(t_alg); + } else + list_add_tail(&t_alg->entry, &hash_list); + + if ((alg->alg_type & OP_ALG_ALGSEL_MASK) == OP_ALG_ALGSEL_AES) + continue; + + /* register unkeyed version */ + t_alg = caam_hash_alloc(alg, false); + if (IS_ERR(t_alg)) { + err = PTR_ERR(t_alg); + pr_warn("%s alg allocation failed\n", alg->driver_name); + continue; + } + + err = crypto_register_ahash(&t_alg->ahash_alg); + if (err) { + pr_warn("%s alg registration failed: %d\n", + t_alg->ahash_alg.halg.base.cra_driver_name, + err); + kfree(t_alg); + } else + list_add_tail(&t_alg->entry, &hash_list); + } + + return err; +} diff --git a/drivers/crypto/caam/caamhash_desc.c b/drivers/crypto/caam/caamhash_desc.c new file mode 100644 index 000000000..78383d77d --- /dev/null +++ b/drivers/crypto/caam/caamhash_desc.c @@ -0,0 +1,145 @@ +// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) +/* + * Shared descriptors for ahash algorithms + * + * Copyright 2017-2019 NXP + */ + +#include "compat.h" +#include "desc_constr.h" +#include "caamhash_desc.h" + +/** + * cnstr_shdsc_ahash - ahash shared descriptor + * @desc: pointer to buffer used for descriptor construction + * @adata: pointer to authentication transform definitions. + * A split key is required for SEC Era < 6; the size of the split key + * is specified in this case. + * Valid algorithm values - one of OP_ALG_ALGSEL_{MD5, SHA1, SHA224, + * SHA256, SHA384, SHA512}. + * @state: algorithm state OP_ALG_AS_{INIT, FINALIZE, INITFINALIZE, UPDATE} + * @digestsize: algorithm's digest size + * @ctx_len: size of Context Register + * @import_ctx: true if previous Context Register needs to be restored + * must be true for ahash update and final + * must be false for for ahash first and digest + * @era: SEC Era + */ +void cnstr_shdsc_ahash(u32 * const desc, struct alginfo *adata, u32 state, + int digestsize, int ctx_len, bool import_ctx, int era) +{ + u32 op = adata->algtype; + + init_sh_desc(desc, HDR_SHARE_SERIAL); + + /* Append key if it has been set; ahash update excluded */ + if (state != OP_ALG_AS_UPDATE && adata->keylen) { + u32 *skip_key_load; + + /* Skip key loading if already shared */ + skip_key_load = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | + JUMP_COND_SHRD); + + if (era < 6) + append_key_as_imm(desc, adata->key_virt, + adata->keylen_pad, + adata->keylen, CLASS_2 | + KEY_DEST_MDHA_SPLIT | KEY_ENC); + else + append_proto_dkp(desc, adata); + + set_jump_tgt_here(desc, skip_key_load); + + op |= OP_ALG_AAI_HMAC_PRECOMP; + } + + /* If needed, import context from software */ + if (import_ctx) + append_seq_load(desc, ctx_len, LDST_CLASS_2_CCB | + LDST_SRCDST_BYTE_CONTEXT); + + /* Class 2 operation */ + append_operation(desc, op | state | OP_ALG_ENCRYPT); + + /* + * Load from buf and/or src and write to req->result or state->context + * Calculate remaining bytes to read + */ + append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ); + /* Read remaining bytes */ + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_LAST2 | + FIFOLD_TYPE_MSG | KEY_VLF); + /* Store class2 context bytes */ + append_seq_store(desc, digestsize, LDST_CLASS_2_CCB | + LDST_SRCDST_BYTE_CONTEXT); +} +EXPORT_SYMBOL(cnstr_shdsc_ahash); + +/** + * cnstr_shdsc_sk_hash - shared descriptor for symmetric key cipher-based + * hash algorithms + * @desc: pointer to buffer used for descriptor construction + * @adata: pointer to authentication transform definitions. + * @state: algorithm state OP_ALG_AS_{INIT, FINALIZE, INITFINALIZE, UPDATE} + * @digestsize: algorithm's digest size + * @ctx_len: size of Context Register + */ +void cnstr_shdsc_sk_hash(u32 * const desc, struct alginfo *adata, u32 state, + int digestsize, int ctx_len) +{ + u32 *skip_key_load; + + init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX); + + /* Skip loading of key, context if already shared */ + skip_key_load = append_jump(desc, JUMP_TEST_ALL | JUMP_COND_SHRD); + + if (state == OP_ALG_AS_INIT || state == OP_ALG_AS_INITFINAL) { + append_key_as_imm(desc, adata->key_virt, adata->keylen, + adata->keylen, CLASS_1 | KEY_DEST_CLASS_REG); + } else { /* UPDATE, FINALIZE */ + if (is_xcbc_aes(adata->algtype)) + /* Load K1 */ + append_key(desc, adata->key_dma, adata->keylen, + CLASS_1 | KEY_DEST_CLASS_REG | KEY_ENC); + else /* CMAC */ + append_key_as_imm(desc, adata->key_virt, adata->keylen, + adata->keylen, CLASS_1 | + KEY_DEST_CLASS_REG); + /* Restore context */ + append_seq_load(desc, ctx_len, LDST_CLASS_1_CCB | + LDST_SRCDST_BYTE_CONTEXT); + } + + set_jump_tgt_here(desc, skip_key_load); + + /* Class 1 operation */ + append_operation(desc, adata->algtype | state | OP_ALG_ENCRYPT); + + /* + * Load from buf and/or src and write to req->result or state->context + * Calculate remaining bytes to read + */ + append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ); + + /* Read remaining bytes */ + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_LAST1 | + FIFOLD_TYPE_MSG | FIFOLDST_VLF); + + /* + * Save context: + * - xcbc: partial hash, keys K2 and K3 + * - cmac: partial hash, constant L = E(K,0) + */ + append_seq_store(desc, digestsize, LDST_CLASS_1_CCB | + LDST_SRCDST_BYTE_CONTEXT); + if (is_xcbc_aes(adata->algtype) && state == OP_ALG_AS_INIT) + /* Save K1 */ + append_fifo_store(desc, adata->key_dma, adata->keylen, + LDST_CLASS_1_CCB | FIFOST_TYPE_KEY_KEK); +} +EXPORT_SYMBOL(cnstr_shdsc_sk_hash); + +MODULE_LICENSE("Dual BSD/GPL"); +MODULE_DESCRIPTION("FSL CAAM ahash descriptors support"); +MODULE_AUTHOR("NXP Semiconductors"); diff --git a/drivers/crypto/caam/caamhash_desc.h b/drivers/crypto/caam/caamhash_desc.h new file mode 100644 index 000000000..4f369b8cb --- /dev/null +++ b/drivers/crypto/caam/caamhash_desc.h @@ -0,0 +1,29 @@ +/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */ +/* + * Shared descriptors for ahash algorithms + * + * Copyright 2017 NXP + */ + +#ifndef _CAAMHASH_DESC_H_ +#define _CAAMHASH_DESC_H_ + +/* length of descriptors text */ +#define DESC_AHASH_BASE (3 * CAAM_CMD_SZ) +#define DESC_AHASH_UPDATE_LEN (6 * CAAM_CMD_SZ) +#define DESC_AHASH_UPDATE_FIRST_LEN (DESC_AHASH_BASE + 4 * CAAM_CMD_SZ) +#define DESC_AHASH_FINAL_LEN (DESC_AHASH_BASE + 5 * CAAM_CMD_SZ) +#define DESC_AHASH_DIGEST_LEN (DESC_AHASH_BASE + 4 * CAAM_CMD_SZ) + +static inline bool is_xcbc_aes(u32 algtype) +{ + return (algtype & (OP_ALG_ALGSEL_MASK | OP_ALG_AAI_MASK)) == + (OP_ALG_ALGSEL_AES | OP_ALG_AAI_XCBC_MAC); +} + +void cnstr_shdsc_ahash(u32 * const desc, struct alginfo *adata, u32 state, + int digestsize, int ctx_len, bool import_ctx, int era); + +void cnstr_shdsc_sk_hash(u32 * const desc, struct alginfo *adata, u32 state, + int digestsize, int ctx_len); +#endif /* _CAAMHASH_DESC_H_ */ diff --git a/drivers/crypto/caam/caampkc.c b/drivers/crypto/caam/caampkc.c new file mode 100644 index 000000000..5bd70a59f --- /dev/null +++ b/drivers/crypto/caam/caampkc.c @@ -0,0 +1,1213 @@ +// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) +/* + * caam - Freescale FSL CAAM support for Public Key Cryptography + * + * Copyright 2016 Freescale Semiconductor, Inc. + * Copyright 2018-2019 NXP + * + * There is no Shared Descriptor for PKC so that the Job Descriptor must carry + * all the desired key parameters, input and output pointers. + */ +#include "compat.h" +#include "regs.h" +#include "intern.h" +#include "jr.h" +#include "error.h" +#include "desc_constr.h" +#include "sg_sw_sec4.h" +#include "caampkc.h" + +#define DESC_RSA_PUB_LEN (2 * CAAM_CMD_SZ + SIZEOF_RSA_PUB_PDB) +#define DESC_RSA_PRIV_F1_LEN (2 * CAAM_CMD_SZ + \ + SIZEOF_RSA_PRIV_F1_PDB) +#define DESC_RSA_PRIV_F2_LEN (2 * CAAM_CMD_SZ + \ + SIZEOF_RSA_PRIV_F2_PDB) +#define DESC_RSA_PRIV_F3_LEN (2 * CAAM_CMD_SZ + \ + SIZEOF_RSA_PRIV_F3_PDB) +#define CAAM_RSA_MAX_INPUT_SIZE 512 /* for a 4096-bit modulus */ + +/* buffer filled with zeros, used for padding */ +static u8 *zero_buffer; + +/* + * variable used to avoid double free of resources in case + * algorithm registration was unsuccessful + */ +static bool init_done; + +struct caam_akcipher_alg { + struct akcipher_alg akcipher; + bool registered; +}; + +static void rsa_io_unmap(struct device *dev, struct rsa_edesc *edesc, + struct akcipher_request *req) +{ + struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req); + + dma_unmap_sg(dev, req->dst, edesc->dst_nents, DMA_FROM_DEVICE); + dma_unmap_sg(dev, req_ctx->fixup_src, edesc->src_nents, DMA_TO_DEVICE); + + if (edesc->sec4_sg_bytes) + dma_unmap_single(dev, edesc->sec4_sg_dma, edesc->sec4_sg_bytes, + DMA_TO_DEVICE); +} + +static void rsa_pub_unmap(struct device *dev, struct rsa_edesc *edesc, + struct akcipher_request *req) +{ + struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); + struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm); + struct caam_rsa_key *key = &ctx->key; + struct rsa_pub_pdb *pdb = &edesc->pdb.pub; + + dma_unmap_single(dev, pdb->n_dma, key->n_sz, DMA_TO_DEVICE); + dma_unmap_single(dev, pdb->e_dma, key->e_sz, DMA_TO_DEVICE); +} + +static void rsa_priv_f1_unmap(struct device *dev, struct rsa_edesc *edesc, + struct akcipher_request *req) +{ + struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); + struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm); + struct caam_rsa_key *key = &ctx->key; + struct rsa_priv_f1_pdb *pdb = &edesc->pdb.priv_f1; + + dma_unmap_single(dev, pdb->n_dma, key->n_sz, DMA_TO_DEVICE); + dma_unmap_single(dev, pdb->d_dma, key->d_sz, DMA_TO_DEVICE); +} + +static void rsa_priv_f2_unmap(struct device *dev, struct rsa_edesc *edesc, + struct akcipher_request *req) +{ + struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); + struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm); + struct caam_rsa_key *key = &ctx->key; + struct rsa_priv_f2_pdb *pdb = &edesc->pdb.priv_f2; + size_t p_sz = key->p_sz; + size_t q_sz = key->q_sz; + + dma_unmap_single(dev, pdb->d_dma, key->d_sz, DMA_TO_DEVICE); + dma_unmap_single(dev, pdb->p_dma, p_sz, DMA_TO_DEVICE); + dma_unmap_single(dev, pdb->q_dma, q_sz, DMA_TO_DEVICE); + dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_BIDIRECTIONAL); + dma_unmap_single(dev, pdb->tmp2_dma, q_sz, DMA_BIDIRECTIONAL); +} + +static void rsa_priv_f3_unmap(struct device *dev, struct rsa_edesc *edesc, + struct akcipher_request *req) +{ + struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); + struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm); + struct caam_rsa_key *key = &ctx->key; + struct rsa_priv_f3_pdb *pdb = &edesc->pdb.priv_f3; + size_t p_sz = key->p_sz; + size_t q_sz = key->q_sz; + + dma_unmap_single(dev, pdb->p_dma, p_sz, DMA_TO_DEVICE); + dma_unmap_single(dev, pdb->q_dma, q_sz, DMA_TO_DEVICE); + dma_unmap_single(dev, pdb->dp_dma, p_sz, DMA_TO_DEVICE); + dma_unmap_single(dev, pdb->dq_dma, q_sz, DMA_TO_DEVICE); + dma_unmap_single(dev, pdb->c_dma, p_sz, DMA_TO_DEVICE); + dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_BIDIRECTIONAL); + dma_unmap_single(dev, pdb->tmp2_dma, q_sz, DMA_BIDIRECTIONAL); +} + +/* RSA Job Completion handler */ +static void rsa_pub_done(struct device *dev, u32 *desc, u32 err, void *context) +{ + struct akcipher_request *req = context; + struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req); + struct caam_drv_private_jr *jrp = dev_get_drvdata(dev); + struct rsa_edesc *edesc; + int ecode = 0; + bool has_bklog; + + if (err) + ecode = caam_jr_strstatus(dev, err); + + edesc = req_ctx->edesc; + has_bklog = edesc->bklog; + + rsa_pub_unmap(dev, edesc, req); + rsa_io_unmap(dev, edesc, req); + kfree(edesc); + + /* + * If no backlog flag, the completion of the request is done + * by CAAM, not crypto engine. + */ + if (!has_bklog) + akcipher_request_complete(req, ecode); + else + crypto_finalize_akcipher_request(jrp->engine, req, ecode); +} + +static void rsa_priv_f_done(struct device *dev, u32 *desc, u32 err, + void *context) +{ + struct akcipher_request *req = context; + struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); + struct caam_drv_private_jr *jrp = dev_get_drvdata(dev); + struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm); + struct caam_rsa_key *key = &ctx->key; + struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req); + struct rsa_edesc *edesc; + int ecode = 0; + bool has_bklog; + + if (err) + ecode = caam_jr_strstatus(dev, err); + + edesc = req_ctx->edesc; + has_bklog = edesc->bklog; + + switch (key->priv_form) { + case FORM1: + rsa_priv_f1_unmap(dev, edesc, req); + break; + case FORM2: + rsa_priv_f2_unmap(dev, edesc, req); + break; + case FORM3: + rsa_priv_f3_unmap(dev, edesc, req); + } + + rsa_io_unmap(dev, edesc, req); + kfree(edesc); + + /* + * If no backlog flag, the completion of the request is done + * by CAAM, not crypto engine. + */ + if (!has_bklog) + akcipher_request_complete(req, ecode); + else + crypto_finalize_akcipher_request(jrp->engine, req, ecode); +} + +/** + * Count leading zeros, need it to strip, from a given scatterlist + * + * @sgl : scatterlist to count zeros from + * @nbytes: number of zeros, in bytes, to strip + * @flags : operation flags + */ +static int caam_rsa_count_leading_zeros(struct scatterlist *sgl, + unsigned int nbytes, + unsigned int flags) +{ + struct sg_mapping_iter miter; + int lzeros, ents; + unsigned int len; + unsigned int tbytes = nbytes; + const u8 *buff; + + ents = sg_nents_for_len(sgl, nbytes); + if (ents < 0) + return ents; + + sg_miter_start(&miter, sgl, ents, SG_MITER_FROM_SG | flags); + + lzeros = 0; + len = 0; + while (nbytes > 0) { + /* do not strip more than given bytes */ + while (len && !*buff && lzeros < nbytes) { + lzeros++; + len--; + buff++; + } + + if (len && *buff) + break; + + if (!sg_miter_next(&miter)) + break; + + buff = miter.addr; + len = miter.length; + + nbytes -= lzeros; + lzeros = 0; + } + + miter.consumed = lzeros; + sg_miter_stop(&miter); + nbytes -= lzeros; + + return tbytes - nbytes; +} + +static struct rsa_edesc *rsa_edesc_alloc(struct akcipher_request *req, + size_t desclen) +{ + struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); + struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm); + struct device *dev = ctx->dev; + struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req); + struct caam_rsa_key *key = &ctx->key; + struct rsa_edesc *edesc; + gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? + GFP_KERNEL : GFP_ATOMIC; + int sg_flags = (flags == GFP_ATOMIC) ? SG_MITER_ATOMIC : 0; + int sec4_sg_index, sec4_sg_len = 0, sec4_sg_bytes; + int src_nents, dst_nents; + int mapped_src_nents, mapped_dst_nents; + unsigned int diff_size = 0; + int lzeros; + + if (req->src_len > key->n_sz) { + /* + * strip leading zeros and + * return the number of zeros to skip + */ + lzeros = caam_rsa_count_leading_zeros(req->src, req->src_len - + key->n_sz, sg_flags); + if (lzeros < 0) + return ERR_PTR(lzeros); + + req_ctx->fixup_src = scatterwalk_ffwd(req_ctx->src, req->src, + lzeros); + req_ctx->fixup_src_len = req->src_len - lzeros; + } else { + /* + * input src is less then n key modulus, + * so there will be zero padding + */ + diff_size = key->n_sz - req->src_len; + req_ctx->fixup_src = req->src; + req_ctx->fixup_src_len = req->src_len; + } + + src_nents = sg_nents_for_len(req_ctx->fixup_src, + req_ctx->fixup_src_len); + dst_nents = sg_nents_for_len(req->dst, req->dst_len); + + mapped_src_nents = dma_map_sg(dev, req_ctx->fixup_src, src_nents, + DMA_TO_DEVICE); + if (unlikely(!mapped_src_nents)) { + dev_err(dev, "unable to map source\n"); + return ERR_PTR(-ENOMEM); + } + mapped_dst_nents = dma_map_sg(dev, req->dst, dst_nents, + DMA_FROM_DEVICE); + if (unlikely(!mapped_dst_nents)) { + dev_err(dev, "unable to map destination\n"); + goto src_fail; + } + + if (!diff_size && mapped_src_nents == 1) + sec4_sg_len = 0; /* no need for an input hw s/g table */ + else + sec4_sg_len = mapped_src_nents + !!diff_size; + sec4_sg_index = sec4_sg_len; + + if (mapped_dst_nents > 1) + sec4_sg_len += pad_sg_nents(mapped_dst_nents); + else + sec4_sg_len = pad_sg_nents(sec4_sg_len); + + sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry); + + /* allocate space for base edesc, hw desc commands and link tables */ + edesc = kzalloc(sizeof(*edesc) + desclen + sec4_sg_bytes, + GFP_DMA | flags); + if (!edesc) + goto dst_fail; + + edesc->sec4_sg = (void *)edesc + sizeof(*edesc) + desclen; + if (diff_size) + dma_to_sec4_sg_one(edesc->sec4_sg, ctx->padding_dma, diff_size, + 0); + + if (sec4_sg_index) + sg_to_sec4_sg_last(req_ctx->fixup_src, req_ctx->fixup_src_len, + edesc->sec4_sg + !!diff_size, 0); + + if (mapped_dst_nents > 1) + sg_to_sec4_sg_last(req->dst, req->dst_len, + edesc->sec4_sg + sec4_sg_index, 0); + + /* Save nents for later use in Job Descriptor */ + edesc->src_nents = src_nents; + edesc->dst_nents = dst_nents; + + req_ctx->edesc = edesc; + + if (!sec4_sg_bytes) + return edesc; + + edesc->mapped_src_nents = mapped_src_nents; + edesc->mapped_dst_nents = mapped_dst_nents; + + edesc->sec4_sg_dma = dma_map_single(dev, edesc->sec4_sg, + sec4_sg_bytes, DMA_TO_DEVICE); + if (dma_mapping_error(dev, edesc->sec4_sg_dma)) { + dev_err(dev, "unable to map S/G table\n"); + goto sec4_sg_fail; + } + + edesc->sec4_sg_bytes = sec4_sg_bytes; + + print_hex_dump_debug("caampkc sec4_sg@" __stringify(__LINE__) ": ", + DUMP_PREFIX_ADDRESS, 16, 4, edesc->sec4_sg, + edesc->sec4_sg_bytes, 1); + + return edesc; + +sec4_sg_fail: + kfree(edesc); +dst_fail: + dma_unmap_sg(dev, req->dst, dst_nents, DMA_FROM_DEVICE); +src_fail: + dma_unmap_sg(dev, req_ctx->fixup_src, src_nents, DMA_TO_DEVICE); + return ERR_PTR(-ENOMEM); +} + +static int akcipher_do_one_req(struct crypto_engine *engine, void *areq) +{ + struct akcipher_request *req = container_of(areq, + struct akcipher_request, + base); + struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); + struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req); + struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm); + struct device *jrdev = ctx->dev; + u32 *desc = req_ctx->edesc->hw_desc; + int ret; + + req_ctx->edesc->bklog = true; + + ret = caam_jr_enqueue(jrdev, desc, req_ctx->akcipher_op_done, req); + + if (ret != -EINPROGRESS) { + rsa_pub_unmap(jrdev, req_ctx->edesc, req); + rsa_io_unmap(jrdev, req_ctx->edesc, req); + kfree(req_ctx->edesc); + } else { + ret = 0; + } + + return ret; +} + +static int set_rsa_pub_pdb(struct akcipher_request *req, + struct rsa_edesc *edesc) +{ + struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); + struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req); + struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm); + struct caam_rsa_key *key = &ctx->key; + struct device *dev = ctx->dev; + struct rsa_pub_pdb *pdb = &edesc->pdb.pub; + int sec4_sg_index = 0; + + pdb->n_dma = dma_map_single(dev, key->n, key->n_sz, DMA_TO_DEVICE); + if (dma_mapping_error(dev, pdb->n_dma)) { + dev_err(dev, "Unable to map RSA modulus memory\n"); + return -ENOMEM; + } + + pdb->e_dma = dma_map_single(dev, key->e, key->e_sz, DMA_TO_DEVICE); + if (dma_mapping_error(dev, pdb->e_dma)) { + dev_err(dev, "Unable to map RSA public exponent memory\n"); + dma_unmap_single(dev, pdb->n_dma, key->n_sz, DMA_TO_DEVICE); + return -ENOMEM; + } + + if (edesc->mapped_src_nents > 1) { + pdb->sgf |= RSA_PDB_SGF_F; + pdb->f_dma = edesc->sec4_sg_dma; + sec4_sg_index += edesc->mapped_src_nents; + } else { + pdb->f_dma = sg_dma_address(req_ctx->fixup_src); + } + + if (edesc->mapped_dst_nents > 1) { + pdb->sgf |= RSA_PDB_SGF_G; + pdb->g_dma = edesc->sec4_sg_dma + + sec4_sg_index * sizeof(struct sec4_sg_entry); + } else { + pdb->g_dma = sg_dma_address(req->dst); + } + + pdb->sgf |= (key->e_sz << RSA_PDB_E_SHIFT) | key->n_sz; + pdb->f_len = req_ctx->fixup_src_len; + + return 0; +} + +static int set_rsa_priv_f1_pdb(struct akcipher_request *req, + struct rsa_edesc *edesc) +{ + struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); + struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm); + struct caam_rsa_key *key = &ctx->key; + struct device *dev = ctx->dev; + struct rsa_priv_f1_pdb *pdb = &edesc->pdb.priv_f1; + int sec4_sg_index = 0; + + pdb->n_dma = dma_map_single(dev, key->n, key->n_sz, DMA_TO_DEVICE); + if (dma_mapping_error(dev, pdb->n_dma)) { + dev_err(dev, "Unable to map modulus memory\n"); + return -ENOMEM; + } + + pdb->d_dma = dma_map_single(dev, key->d, key->d_sz, DMA_TO_DEVICE); + if (dma_mapping_error(dev, pdb->d_dma)) { + dev_err(dev, "Unable to map RSA private exponent memory\n"); + dma_unmap_single(dev, pdb->n_dma, key->n_sz, DMA_TO_DEVICE); + return -ENOMEM; + } + + if (edesc->mapped_src_nents > 1) { + pdb->sgf |= RSA_PRIV_PDB_SGF_G; + pdb->g_dma = edesc->sec4_sg_dma; + sec4_sg_index += edesc->mapped_src_nents; + + } else { + struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req); + + pdb->g_dma = sg_dma_address(req_ctx->fixup_src); + } + + if (edesc->mapped_dst_nents > 1) { + pdb->sgf |= RSA_PRIV_PDB_SGF_F; + pdb->f_dma = edesc->sec4_sg_dma + + sec4_sg_index * sizeof(struct sec4_sg_entry); + } else { + pdb->f_dma = sg_dma_address(req->dst); + } + + pdb->sgf |= (key->d_sz << RSA_PDB_D_SHIFT) | key->n_sz; + + return 0; +} + +static int set_rsa_priv_f2_pdb(struct akcipher_request *req, + struct rsa_edesc *edesc) +{ + struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); + struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm); + struct caam_rsa_key *key = &ctx->key; + struct device *dev = ctx->dev; + struct rsa_priv_f2_pdb *pdb = &edesc->pdb.priv_f2; + int sec4_sg_index = 0; + size_t p_sz = key->p_sz; + size_t q_sz = key->q_sz; + + pdb->d_dma = dma_map_single(dev, key->d, key->d_sz, DMA_TO_DEVICE); + if (dma_mapping_error(dev, pdb->d_dma)) { + dev_err(dev, "Unable to map RSA private exponent memory\n"); + return -ENOMEM; + } + + pdb->p_dma = dma_map_single(dev, key->p, p_sz, DMA_TO_DEVICE); + if (dma_mapping_error(dev, pdb->p_dma)) { + dev_err(dev, "Unable to map RSA prime factor p memory\n"); + goto unmap_d; + } + + pdb->q_dma = dma_map_single(dev, key->q, q_sz, DMA_TO_DEVICE); + if (dma_mapping_error(dev, pdb->q_dma)) { + dev_err(dev, "Unable to map RSA prime factor q memory\n"); + goto unmap_p; + } + + pdb->tmp1_dma = dma_map_single(dev, key->tmp1, p_sz, DMA_BIDIRECTIONAL); + if (dma_mapping_error(dev, pdb->tmp1_dma)) { + dev_err(dev, "Unable to map RSA tmp1 memory\n"); + goto unmap_q; + } + + pdb->tmp2_dma = dma_map_single(dev, key->tmp2, q_sz, DMA_BIDIRECTIONAL); + if (dma_mapping_error(dev, pdb->tmp2_dma)) { + dev_err(dev, "Unable to map RSA tmp2 memory\n"); + goto unmap_tmp1; + } + + if (edesc->mapped_src_nents > 1) { + pdb->sgf |= RSA_PRIV_PDB_SGF_G; + pdb->g_dma = edesc->sec4_sg_dma; + sec4_sg_index += edesc->mapped_src_nents; + } else { + struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req); + + pdb->g_dma = sg_dma_address(req_ctx->fixup_src); + } + + if (edesc->mapped_dst_nents > 1) { + pdb->sgf |= RSA_PRIV_PDB_SGF_F; + pdb->f_dma = edesc->sec4_sg_dma + + sec4_sg_index * sizeof(struct sec4_sg_entry); + } else { + pdb->f_dma = sg_dma_address(req->dst); + } + + pdb->sgf |= (key->d_sz << RSA_PDB_D_SHIFT) | key->n_sz; + pdb->p_q_len = (q_sz << RSA_PDB_Q_SHIFT) | p_sz; + + return 0; + +unmap_tmp1: + dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_BIDIRECTIONAL); +unmap_q: + dma_unmap_single(dev, pdb->q_dma, q_sz, DMA_TO_DEVICE); +unmap_p: + dma_unmap_single(dev, pdb->p_dma, p_sz, DMA_TO_DEVICE); +unmap_d: + dma_unmap_single(dev, pdb->d_dma, key->d_sz, DMA_TO_DEVICE); + + return -ENOMEM; +} + +static int set_rsa_priv_f3_pdb(struct akcipher_request *req, + struct rsa_edesc *edesc) +{ + struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); + struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm); + struct caam_rsa_key *key = &ctx->key; + struct device *dev = ctx->dev; + struct rsa_priv_f3_pdb *pdb = &edesc->pdb.priv_f3; + int sec4_sg_index = 0; + size_t p_sz = key->p_sz; + size_t q_sz = key->q_sz; + + pdb->p_dma = dma_map_single(dev, key->p, p_sz, DMA_TO_DEVICE); + if (dma_mapping_error(dev, pdb->p_dma)) { + dev_err(dev, "Unable to map RSA prime factor p memory\n"); + return -ENOMEM; + } + + pdb->q_dma = dma_map_single(dev, key->q, q_sz, DMA_TO_DEVICE); + if (dma_mapping_error(dev, pdb->q_dma)) { + dev_err(dev, "Unable to map RSA prime factor q memory\n"); + goto unmap_p; + } + + pdb->dp_dma = dma_map_single(dev, key->dp, p_sz, DMA_TO_DEVICE); + if (dma_mapping_error(dev, pdb->dp_dma)) { + dev_err(dev, "Unable to map RSA exponent dp memory\n"); + goto unmap_q; + } + + pdb->dq_dma = dma_map_single(dev, key->dq, q_sz, DMA_TO_DEVICE); + if (dma_mapping_error(dev, pdb->dq_dma)) { + dev_err(dev, "Unable to map RSA exponent dq memory\n"); + goto unmap_dp; + } + + pdb->c_dma = dma_map_single(dev, key->qinv, p_sz, DMA_TO_DEVICE); + if (dma_mapping_error(dev, pdb->c_dma)) { + dev_err(dev, "Unable to map RSA CRT coefficient qinv memory\n"); + goto unmap_dq; + } + + pdb->tmp1_dma = dma_map_single(dev, key->tmp1, p_sz, DMA_BIDIRECTIONAL); + if (dma_mapping_error(dev, pdb->tmp1_dma)) { + dev_err(dev, "Unable to map RSA tmp1 memory\n"); + goto unmap_qinv; + } + + pdb->tmp2_dma = dma_map_single(dev, key->tmp2, q_sz, DMA_BIDIRECTIONAL); + if (dma_mapping_error(dev, pdb->tmp2_dma)) { + dev_err(dev, "Unable to map RSA tmp2 memory\n"); + goto unmap_tmp1; + } + + if (edesc->mapped_src_nents > 1) { + pdb->sgf |= RSA_PRIV_PDB_SGF_G; + pdb->g_dma = edesc->sec4_sg_dma; + sec4_sg_index += edesc->mapped_src_nents; + } else { + struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req); + + pdb->g_dma = sg_dma_address(req_ctx->fixup_src); + } + + if (edesc->mapped_dst_nents > 1) { + pdb->sgf |= RSA_PRIV_PDB_SGF_F; + pdb->f_dma = edesc->sec4_sg_dma + + sec4_sg_index * sizeof(struct sec4_sg_entry); + } else { + pdb->f_dma = sg_dma_address(req->dst); + } + + pdb->sgf |= key->n_sz; + pdb->p_q_len = (q_sz << RSA_PDB_Q_SHIFT) | p_sz; + + return 0; + +unmap_tmp1: + dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_BIDIRECTIONAL); +unmap_qinv: + dma_unmap_single(dev, pdb->c_dma, p_sz, DMA_TO_DEVICE); +unmap_dq: + dma_unmap_single(dev, pdb->dq_dma, q_sz, DMA_TO_DEVICE); +unmap_dp: + dma_unmap_single(dev, pdb->dp_dma, p_sz, DMA_TO_DEVICE); +unmap_q: + dma_unmap_single(dev, pdb->q_dma, q_sz, DMA_TO_DEVICE); +unmap_p: + dma_unmap_single(dev, pdb->p_dma, p_sz, DMA_TO_DEVICE); + + return -ENOMEM; +} + +static int akcipher_enqueue_req(struct device *jrdev, + void (*cbk)(struct device *jrdev, u32 *desc, + u32 err, void *context), + struct akcipher_request *req) +{ + struct caam_drv_private_jr *jrpriv = dev_get_drvdata(jrdev); + struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); + struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm); + struct caam_rsa_key *key = &ctx->key; + struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req); + struct rsa_edesc *edesc = req_ctx->edesc; + u32 *desc = edesc->hw_desc; + int ret; + + req_ctx->akcipher_op_done = cbk; + /* + * Only the backlog request are sent to crypto-engine since the others + * can be handled by CAAM, if free, especially since JR has up to 1024 + * entries (more than the 10 entries from crypto-engine). + */ + if (req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG) + ret = crypto_transfer_akcipher_request_to_engine(jrpriv->engine, + req); + else + ret = caam_jr_enqueue(jrdev, desc, cbk, req); + + if ((ret != -EINPROGRESS) && (ret != -EBUSY)) { + switch (key->priv_form) { + case FORM1: + rsa_priv_f1_unmap(jrdev, edesc, req); + break; + case FORM2: + rsa_priv_f2_unmap(jrdev, edesc, req); + break; + case FORM3: + rsa_priv_f3_unmap(jrdev, edesc, req); + break; + default: + rsa_pub_unmap(jrdev, edesc, req); + } + rsa_io_unmap(jrdev, edesc, req); + kfree(edesc); + } + + return ret; +} + +static int caam_rsa_enc(struct akcipher_request *req) +{ + struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); + struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm); + struct caam_rsa_key *key = &ctx->key; + struct device *jrdev = ctx->dev; + struct rsa_edesc *edesc; + int ret; + + if (unlikely(!key->n || !key->e)) + return -EINVAL; + + if (req->dst_len < key->n_sz) { + req->dst_len = key->n_sz; + dev_err(jrdev, "Output buffer length less than parameter n\n"); + return -EOVERFLOW; + } + + /* Allocate extended descriptor */ + edesc = rsa_edesc_alloc(req, DESC_RSA_PUB_LEN); + if (IS_ERR(edesc)) + return PTR_ERR(edesc); + + /* Set RSA Encrypt Protocol Data Block */ + ret = set_rsa_pub_pdb(req, edesc); + if (ret) + goto init_fail; + + /* Initialize Job Descriptor */ + init_rsa_pub_desc(edesc->hw_desc, &edesc->pdb.pub); + + return akcipher_enqueue_req(jrdev, rsa_pub_done, req); + +init_fail: + rsa_io_unmap(jrdev, edesc, req); + kfree(edesc); + return ret; +} + +static int caam_rsa_dec_priv_f1(struct akcipher_request *req) +{ + struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); + struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm); + struct device *jrdev = ctx->dev; + struct rsa_edesc *edesc; + int ret; + + /* Allocate extended descriptor */ + edesc = rsa_edesc_alloc(req, DESC_RSA_PRIV_F1_LEN); + if (IS_ERR(edesc)) + return PTR_ERR(edesc); + + /* Set RSA Decrypt Protocol Data Block - Private Key Form #1 */ + ret = set_rsa_priv_f1_pdb(req, edesc); + if (ret) + goto init_fail; + + /* Initialize Job Descriptor */ + init_rsa_priv_f1_desc(edesc->hw_desc, &edesc->pdb.priv_f1); + + return akcipher_enqueue_req(jrdev, rsa_priv_f_done, req); + +init_fail: + rsa_io_unmap(jrdev, edesc, req); + kfree(edesc); + return ret; +} + +static int caam_rsa_dec_priv_f2(struct akcipher_request *req) +{ + struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); + struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm); + struct device *jrdev = ctx->dev; + struct rsa_edesc *edesc; + int ret; + + /* Allocate extended descriptor */ + edesc = rsa_edesc_alloc(req, DESC_RSA_PRIV_F2_LEN); + if (IS_ERR(edesc)) + return PTR_ERR(edesc); + + /* Set RSA Decrypt Protocol Data Block - Private Key Form #2 */ + ret = set_rsa_priv_f2_pdb(req, edesc); + if (ret) + goto init_fail; + + /* Initialize Job Descriptor */ + init_rsa_priv_f2_desc(edesc->hw_desc, &edesc->pdb.priv_f2); + + return akcipher_enqueue_req(jrdev, rsa_priv_f_done, req); + +init_fail: + rsa_io_unmap(jrdev, edesc, req); + kfree(edesc); + return ret; +} + +static int caam_rsa_dec_priv_f3(struct akcipher_request *req) +{ + struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); + struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm); + struct device *jrdev = ctx->dev; + struct rsa_edesc *edesc; + int ret; + + /* Allocate extended descriptor */ + edesc = rsa_edesc_alloc(req, DESC_RSA_PRIV_F3_LEN); + if (IS_ERR(edesc)) + return PTR_ERR(edesc); + + /* Set RSA Decrypt Protocol Data Block - Private Key Form #3 */ + ret = set_rsa_priv_f3_pdb(req, edesc); + if (ret) + goto init_fail; + + /* Initialize Job Descriptor */ + init_rsa_priv_f3_desc(edesc->hw_desc, &edesc->pdb.priv_f3); + + return akcipher_enqueue_req(jrdev, rsa_priv_f_done, req); + +init_fail: + rsa_io_unmap(jrdev, edesc, req); + kfree(edesc); + return ret; +} + +static int caam_rsa_dec(struct akcipher_request *req) +{ + struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); + struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm); + struct caam_rsa_key *key = &ctx->key; + int ret; + + if (unlikely(!key->n || !key->d)) + return -EINVAL; + + if (req->dst_len < key->n_sz) { + req->dst_len = key->n_sz; + dev_err(ctx->dev, "Output buffer length less than parameter n\n"); + return -EOVERFLOW; + } + + if (key->priv_form == FORM3) + ret = caam_rsa_dec_priv_f3(req); + else if (key->priv_form == FORM2) + ret = caam_rsa_dec_priv_f2(req); + else + ret = caam_rsa_dec_priv_f1(req); + + return ret; +} + +static void caam_rsa_free_key(struct caam_rsa_key *key) +{ + kfree_sensitive(key->d); + kfree_sensitive(key->p); + kfree_sensitive(key->q); + kfree_sensitive(key->dp); + kfree_sensitive(key->dq); + kfree_sensitive(key->qinv); + kfree_sensitive(key->tmp1); + kfree_sensitive(key->tmp2); + kfree(key->e); + kfree(key->n); + memset(key, 0, sizeof(*key)); +} + +static void caam_rsa_drop_leading_zeros(const u8 **ptr, size_t *nbytes) +{ + while (!**ptr && *nbytes) { + (*ptr)++; + (*nbytes)--; + } +} + +/** + * caam_read_rsa_crt - Used for reading dP, dQ, qInv CRT members. + * dP, dQ and qInv could decode to less than corresponding p, q length, as the + * BER-encoding requires that the minimum number of bytes be used to encode the + * integer. dP, dQ, qInv decoded values have to be zero-padded to appropriate + * length. + * + * @ptr : pointer to {dP, dQ, qInv} CRT member + * @nbytes: length in bytes of {dP, dQ, qInv} CRT member + * @dstlen: length in bytes of corresponding p or q prime factor + */ +static u8 *caam_read_rsa_crt(const u8 *ptr, size_t nbytes, size_t dstlen) +{ + u8 *dst; + + caam_rsa_drop_leading_zeros(&ptr, &nbytes); + if (!nbytes) + return NULL; + + dst = kzalloc(dstlen, GFP_DMA | GFP_KERNEL); + if (!dst) + return NULL; + + memcpy(dst + (dstlen - nbytes), ptr, nbytes); + + return dst; +} + +/** + * caam_read_raw_data - Read a raw byte stream as a positive integer. + * The function skips buffer's leading zeros, copies the remained data + * to a buffer allocated in the GFP_DMA | GFP_KERNEL zone and returns + * the address of the new buffer. + * + * @buf : The data to read + * @nbytes: The amount of data to read + */ +static inline u8 *caam_read_raw_data(const u8 *buf, size_t *nbytes) +{ + + caam_rsa_drop_leading_zeros(&buf, nbytes); + if (!*nbytes) + return NULL; + + return kmemdup(buf, *nbytes, GFP_DMA | GFP_KERNEL); +} + +static int caam_rsa_check_key_length(unsigned int len) +{ + if (len > 4096) + return -EINVAL; + return 0; +} + +static int caam_rsa_set_pub_key(struct crypto_akcipher *tfm, const void *key, + unsigned int keylen) +{ + struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm); + struct rsa_key raw_key = {NULL}; + struct caam_rsa_key *rsa_key = &ctx->key; + int ret; + + /* Free the old RSA key if any */ + caam_rsa_free_key(rsa_key); + + ret = rsa_parse_pub_key(&raw_key, key, keylen); + if (ret) + return ret; + + /* Copy key in DMA zone */ + rsa_key->e = kmemdup(raw_key.e, raw_key.e_sz, GFP_DMA | GFP_KERNEL); + if (!rsa_key->e) + goto err; + + /* + * Skip leading zeros and copy the positive integer to a buffer + * allocated in the GFP_DMA | GFP_KERNEL zone. The decryption descriptor + * expects a positive integer for the RSA modulus and uses its length as + * decryption output length. + */ + rsa_key->n = caam_read_raw_data(raw_key.n, &raw_key.n_sz); + if (!rsa_key->n) + goto err; + + if (caam_rsa_check_key_length(raw_key.n_sz << 3)) { + caam_rsa_free_key(rsa_key); + return -EINVAL; + } + + rsa_key->e_sz = raw_key.e_sz; + rsa_key->n_sz = raw_key.n_sz; + + return 0; +err: + caam_rsa_free_key(rsa_key); + return -ENOMEM; +} + +static void caam_rsa_set_priv_key_form(struct caam_rsa_ctx *ctx, + struct rsa_key *raw_key) +{ + struct caam_rsa_key *rsa_key = &ctx->key; + size_t p_sz = raw_key->p_sz; + size_t q_sz = raw_key->q_sz; + + rsa_key->p = caam_read_raw_data(raw_key->p, &p_sz); + if (!rsa_key->p) + return; + rsa_key->p_sz = p_sz; + + rsa_key->q = caam_read_raw_data(raw_key->q, &q_sz); + if (!rsa_key->q) + goto free_p; + rsa_key->q_sz = q_sz; + + rsa_key->tmp1 = kzalloc(raw_key->p_sz, GFP_DMA | GFP_KERNEL); + if (!rsa_key->tmp1) + goto free_q; + + rsa_key->tmp2 = kzalloc(raw_key->q_sz, GFP_DMA | GFP_KERNEL); + if (!rsa_key->tmp2) + goto free_tmp1; + + rsa_key->priv_form = FORM2; + + rsa_key->dp = caam_read_rsa_crt(raw_key->dp, raw_key->dp_sz, p_sz); + if (!rsa_key->dp) + goto free_tmp2; + + rsa_key->dq = caam_read_rsa_crt(raw_key->dq, raw_key->dq_sz, q_sz); + if (!rsa_key->dq) + goto free_dp; + + rsa_key->qinv = caam_read_rsa_crt(raw_key->qinv, raw_key->qinv_sz, + q_sz); + if (!rsa_key->qinv) + goto free_dq; + + rsa_key->priv_form = FORM3; + + return; + +free_dq: + kfree_sensitive(rsa_key->dq); +free_dp: + kfree_sensitive(rsa_key->dp); +free_tmp2: + kfree_sensitive(rsa_key->tmp2); +free_tmp1: + kfree_sensitive(rsa_key->tmp1); +free_q: + kfree_sensitive(rsa_key->q); +free_p: + kfree_sensitive(rsa_key->p); +} + +static int caam_rsa_set_priv_key(struct crypto_akcipher *tfm, const void *key, + unsigned int keylen) +{ + struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm); + struct rsa_key raw_key = {NULL}; + struct caam_rsa_key *rsa_key = &ctx->key; + int ret; + + /* Free the old RSA key if any */ + caam_rsa_free_key(rsa_key); + + ret = rsa_parse_priv_key(&raw_key, key, keylen); + if (ret) + return ret; + + /* Copy key in DMA zone */ + rsa_key->d = kmemdup(raw_key.d, raw_key.d_sz, GFP_DMA | GFP_KERNEL); + if (!rsa_key->d) + goto err; + + rsa_key->e = kmemdup(raw_key.e, raw_key.e_sz, GFP_DMA | GFP_KERNEL); + if (!rsa_key->e) + goto err; + + /* + * Skip leading zeros and copy the positive integer to a buffer + * allocated in the GFP_DMA | GFP_KERNEL zone. The decryption descriptor + * expects a positive integer for the RSA modulus and uses its length as + * decryption output length. + */ + rsa_key->n = caam_read_raw_data(raw_key.n, &raw_key.n_sz); + if (!rsa_key->n) + goto err; + + if (caam_rsa_check_key_length(raw_key.n_sz << 3)) { + caam_rsa_free_key(rsa_key); + return -EINVAL; + } + + rsa_key->d_sz = raw_key.d_sz; + rsa_key->e_sz = raw_key.e_sz; + rsa_key->n_sz = raw_key.n_sz; + + caam_rsa_set_priv_key_form(ctx, &raw_key); + + return 0; + +err: + caam_rsa_free_key(rsa_key); + return -ENOMEM; +} + +static unsigned int caam_rsa_max_size(struct crypto_akcipher *tfm) +{ + struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm); + + return ctx->key.n_sz; +} + +/* Per session pkc's driver context creation function */ +static int caam_rsa_init_tfm(struct crypto_akcipher *tfm) +{ + struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm); + + ctx->dev = caam_jr_alloc(); + + if (IS_ERR(ctx->dev)) { + pr_err("Job Ring Device allocation for transform failed\n"); + return PTR_ERR(ctx->dev); + } + + ctx->padding_dma = dma_map_single(ctx->dev, zero_buffer, + CAAM_RSA_MAX_INPUT_SIZE - 1, + DMA_TO_DEVICE); + if (dma_mapping_error(ctx->dev, ctx->padding_dma)) { + dev_err(ctx->dev, "unable to map padding\n"); + caam_jr_free(ctx->dev); + return -ENOMEM; + } + + ctx->enginectx.op.do_one_request = akcipher_do_one_req; + + return 0; +} + +/* Per session pkc's driver context cleanup function */ +static void caam_rsa_exit_tfm(struct crypto_akcipher *tfm) +{ + struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm); + struct caam_rsa_key *key = &ctx->key; + + dma_unmap_single(ctx->dev, ctx->padding_dma, CAAM_RSA_MAX_INPUT_SIZE - + 1, DMA_TO_DEVICE); + caam_rsa_free_key(key); + caam_jr_free(ctx->dev); +} + +static struct caam_akcipher_alg caam_rsa = { + .akcipher = { + .encrypt = caam_rsa_enc, + .decrypt = caam_rsa_dec, + .set_pub_key = caam_rsa_set_pub_key, + .set_priv_key = caam_rsa_set_priv_key, + .max_size = caam_rsa_max_size, + .init = caam_rsa_init_tfm, + .exit = caam_rsa_exit_tfm, + .reqsize = sizeof(struct caam_rsa_req_ctx), + .base = { + .cra_name = "rsa", + .cra_driver_name = "rsa-caam", + .cra_priority = 3000, + .cra_module = THIS_MODULE, + .cra_ctxsize = sizeof(struct caam_rsa_ctx), + }, + } +}; + +/* Public Key Cryptography module initialization handler */ +int caam_pkc_init(struct device *ctrldev) +{ + struct caam_drv_private *priv = dev_get_drvdata(ctrldev); + u32 pk_inst, pkha; + int err; + init_done = false; + + /* Determine public key hardware accelerator presence. */ + if (priv->era < 10) { + pk_inst = (rd_reg32(&priv->ctrl->perfmon.cha_num_ls) & + CHA_ID_LS_PK_MASK) >> CHA_ID_LS_PK_SHIFT; + } else { + pkha = rd_reg32(&priv->ctrl->vreg.pkha); + pk_inst = pkha & CHA_VER_NUM_MASK; + + /* + * Newer CAAMs support partially disabled functionality. If this is the + * case, the number is non-zero, but this bit is set to indicate that + * no encryption or decryption is supported. Only signing and verifying + * is supported. + */ + if (pkha & CHA_VER_MISC_PKHA_NO_CRYPT) + pk_inst = 0; + } + + /* Do not register algorithms if PKHA is not present. */ + if (!pk_inst) + return 0; + + /* allocate zero buffer, used for padding input */ + zero_buffer = kzalloc(CAAM_RSA_MAX_INPUT_SIZE - 1, GFP_DMA | + GFP_KERNEL); + if (!zero_buffer) + return -ENOMEM; + + err = crypto_register_akcipher(&caam_rsa.akcipher); + + if (err) { + kfree(zero_buffer); + dev_warn(ctrldev, "%s alg registration failed\n", + caam_rsa.akcipher.base.cra_driver_name); + } else { + init_done = true; + caam_rsa.registered = true; + dev_info(ctrldev, "caam pkc algorithms registered in /proc/crypto\n"); + } + + return err; +} + +void caam_pkc_exit(void) +{ + if (!init_done) + return; + + if (caam_rsa.registered) + crypto_unregister_akcipher(&caam_rsa.akcipher); + + kfree(zero_buffer); +} diff --git a/drivers/crypto/caam/caampkc.h b/drivers/crypto/caam/caampkc.h new file mode 100644 index 000000000..cc889a525 --- /dev/null +++ b/drivers/crypto/caam/caampkc.h @@ -0,0 +1,158 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * caam - Freescale FSL CAAM support for Public Key Cryptography descriptors + * + * Copyright 2016 Freescale Semiconductor, Inc. + * + * There is no Shared Descriptor for PKC so that the Job Descriptor must carry + * all the desired key parameters, input and output pointers. + */ + +#ifndef _PKC_DESC_H_ +#define _PKC_DESC_H_ +#include "compat.h" +#include "pdb.h" +#include <crypto/engine.h> + +/** + * caam_priv_key_form - CAAM RSA private key representation + * CAAM RSA private key may have either of three forms. + * + * 1. The first representation consists of the pair (n, d), where the + * components have the following meanings: + * n the RSA modulus + * d the RSA private exponent + * + * 2. The second representation consists of the triplet (p, q, d), where the + * components have the following meanings: + * p the first prime factor of the RSA modulus n + * q the second prime factor of the RSA modulus n + * d the RSA private exponent + * + * 3. The third representation consists of the quintuple (p, q, dP, dQ, qInv), + * where the components have the following meanings: + * p the first prime factor of the RSA modulus n + * q the second prime factor of the RSA modulus n + * dP the first factors's CRT exponent + * dQ the second factors's CRT exponent + * qInv the (first) CRT coefficient + * + * The benefit of using the third or the second key form is lower computational + * cost for the decryption and signature operations. + */ +enum caam_priv_key_form { + FORM1, + FORM2, + FORM3 +}; + +/** + * caam_rsa_key - CAAM RSA key structure. Keys are allocated in DMA zone. + * @n : RSA modulus raw byte stream + * @e : RSA public exponent raw byte stream + * @d : RSA private exponent raw byte stream + * @p : RSA prime factor p of RSA modulus n + * @q : RSA prime factor q of RSA modulus n + * @dp : RSA CRT exponent of p + * @dp : RSA CRT exponent of q + * @qinv : RSA CRT coefficient + * @tmp1 : CAAM uses this temporary buffer as internal state buffer. + * It is assumed to be as long as p. + * @tmp2 : CAAM uses this temporary buffer as internal state buffer. + * It is assumed to be as long as q. + * @n_sz : length in bytes of RSA modulus n + * @e_sz : length in bytes of RSA public exponent + * @d_sz : length in bytes of RSA private exponent + * @p_sz : length in bytes of RSA prime factor p of RSA modulus n + * @q_sz : length in bytes of RSA prime factor q of RSA modulus n + * @priv_form : CAAM RSA private key representation + */ +struct caam_rsa_key { + u8 *n; + u8 *e; + u8 *d; + u8 *p; + u8 *q; + u8 *dp; + u8 *dq; + u8 *qinv; + u8 *tmp1; + u8 *tmp2; + size_t n_sz; + size_t e_sz; + size_t d_sz; + size_t p_sz; + size_t q_sz; + enum caam_priv_key_form priv_form; +}; + +/** + * caam_rsa_ctx - per session context. + * @enginectx : crypto engine context + * @key : RSA key in DMA zone + * @dev : device structure + * @padding_dma : dma address of padding, for adding it to the input + */ +struct caam_rsa_ctx { + struct crypto_engine_ctx enginectx; + struct caam_rsa_key key; + struct device *dev; + dma_addr_t padding_dma; + +}; + +/** + * caam_rsa_req_ctx - per request context. + * @src : input scatterlist (stripped of leading zeros) + * @fixup_src : input scatterlist (that might be stripped of leading zeros) + * @fixup_src_len : length of the fixup_src input scatterlist + * @edesc : s/w-extended rsa descriptor + * @akcipher_op_done : callback used when operation is done + */ +struct caam_rsa_req_ctx { + struct scatterlist src[2]; + struct scatterlist *fixup_src; + unsigned int fixup_src_len; + struct rsa_edesc *edesc; + void (*akcipher_op_done)(struct device *jrdev, u32 *desc, u32 err, + void *context); +}; + +/** + * rsa_edesc - s/w-extended rsa descriptor + * @src_nents : number of segments in input s/w scatterlist + * @dst_nents : number of segments in output s/w scatterlist + * @mapped_src_nents: number of segments in input h/w link table + * @mapped_dst_nents: number of segments in output h/w link table + * @sec4_sg_bytes : length of h/w link table + * @bklog : stored to determine if the request needs backlog + * @sec4_sg_dma : dma address of h/w link table + * @sec4_sg : pointer to h/w link table + * @pdb : specific RSA Protocol Data Block (PDB) + * @hw_desc : descriptor followed by link tables if any + */ +struct rsa_edesc { + int src_nents; + int dst_nents; + int mapped_src_nents; + int mapped_dst_nents; + int sec4_sg_bytes; + bool bklog; + dma_addr_t sec4_sg_dma; + struct sec4_sg_entry *sec4_sg; + union { + struct rsa_pub_pdb pub; + struct rsa_priv_f1_pdb priv_f1; + struct rsa_priv_f2_pdb priv_f2; + struct rsa_priv_f3_pdb priv_f3; + } pdb; + u32 hw_desc[]; +}; + +/* Descriptor construction primitives. */ +void init_rsa_pub_desc(u32 *desc, struct rsa_pub_pdb *pdb); +void init_rsa_priv_f1_desc(u32 *desc, struct rsa_priv_f1_pdb *pdb); +void init_rsa_priv_f2_desc(u32 *desc, struct rsa_priv_f2_pdb *pdb); +void init_rsa_priv_f3_desc(u32 *desc, struct rsa_priv_f3_pdb *pdb); + +#endif diff --git a/drivers/crypto/caam/caamrng.c b/drivers/crypto/caam/caamrng.c new file mode 100644 index 000000000..77d048dfe --- /dev/null +++ b/drivers/crypto/caam/caamrng.c @@ -0,0 +1,261 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * caam - Freescale FSL CAAM support for hw_random + * + * Copyright 2011 Freescale Semiconductor, Inc. + * Copyright 2018-2019 NXP + * + * Based on caamalg.c crypto API driver. + * + */ + +#include <linux/hw_random.h> +#include <linux/completion.h> +#include <linux/atomic.h> +#include <linux/kfifo.h> + +#include "compat.h" + +#include "regs.h" +#include "intern.h" +#include "desc_constr.h" +#include "jr.h" +#include "error.h" + +#define CAAM_RNG_MAX_FIFO_STORE_SIZE 16 + +/* + * Length of used descriptors, see caam_init_desc() + */ +#define CAAM_RNG_DESC_LEN (CAAM_CMD_SZ + \ + CAAM_CMD_SZ + \ + CAAM_CMD_SZ + CAAM_PTR_SZ_MAX) + +/* rng per-device context */ +struct caam_rng_ctx { + struct hwrng rng; + struct device *jrdev; + struct device *ctrldev; + void *desc_async; + void *desc_sync; + struct work_struct worker; + struct kfifo fifo; +}; + +struct caam_rng_job_ctx { + struct completion *done; + int *err; +}; + +static struct caam_rng_ctx *to_caam_rng_ctx(struct hwrng *r) +{ + return (struct caam_rng_ctx *)r->priv; +} + +static void caam_rng_done(struct device *jrdev, u32 *desc, u32 err, + void *context) +{ + struct caam_rng_job_ctx *jctx = context; + + if (err) + *jctx->err = caam_jr_strstatus(jrdev, err); + + complete(jctx->done); +} + +static u32 *caam_init_desc(u32 *desc, dma_addr_t dst_dma) +{ + init_job_desc(desc, 0); /* + 1 cmd_sz */ + /* Generate random bytes: + 1 cmd_sz */ + append_operation(desc, OP_ALG_ALGSEL_RNG | OP_TYPE_CLASS1_ALG | + OP_ALG_PR_ON); + /* Store bytes: + 1 cmd_sz + caam_ptr_sz */ + append_fifo_store(desc, dst_dma, + CAAM_RNG_MAX_FIFO_STORE_SIZE, FIFOST_TYPE_RNGSTORE); + + print_hex_dump_debug("rng job desc@: ", DUMP_PREFIX_ADDRESS, + 16, 4, desc, desc_bytes(desc), 1); + + return desc; +} + +static int caam_rng_read_one(struct device *jrdev, + void *dst, int len, + void *desc, + struct completion *done) +{ + dma_addr_t dst_dma; + int err, ret = 0; + struct caam_rng_job_ctx jctx = { + .done = done, + .err = &ret, + }; + + len = CAAM_RNG_MAX_FIFO_STORE_SIZE; + + dst_dma = dma_map_single(jrdev, dst, len, DMA_FROM_DEVICE); + if (dma_mapping_error(jrdev, dst_dma)) { + dev_err(jrdev, "unable to map destination memory\n"); + return -ENOMEM; + } + + init_completion(done); + err = caam_jr_enqueue(jrdev, + caam_init_desc(desc, dst_dma), + caam_rng_done, &jctx); + if (err == -EINPROGRESS) { + wait_for_completion(done); + err = 0; + } + + dma_unmap_single(jrdev, dst_dma, len, DMA_FROM_DEVICE); + + return err ?: (ret ?: len); +} + +static void caam_rng_fill_async(struct caam_rng_ctx *ctx) +{ + struct scatterlist sg[1]; + struct completion done; + int len, nents; + + sg_init_table(sg, ARRAY_SIZE(sg)); + nents = kfifo_dma_in_prepare(&ctx->fifo, sg, ARRAY_SIZE(sg), + CAAM_RNG_MAX_FIFO_STORE_SIZE); + if (!nents) + return; + + len = caam_rng_read_one(ctx->jrdev, sg_virt(&sg[0]), + sg[0].length, + ctx->desc_async, + &done); + if (len < 0) + return; + + kfifo_dma_in_finish(&ctx->fifo, len); +} + +static void caam_rng_worker(struct work_struct *work) +{ + struct caam_rng_ctx *ctx = container_of(work, struct caam_rng_ctx, + worker); + caam_rng_fill_async(ctx); +} + +static int caam_read(struct hwrng *rng, void *dst, size_t max, bool wait) +{ + struct caam_rng_ctx *ctx = to_caam_rng_ctx(rng); + int out; + + if (wait) { + struct completion done; + + return caam_rng_read_one(ctx->jrdev, dst, max, + ctx->desc_sync, &done); + } + + out = kfifo_out(&ctx->fifo, dst, max); + if (kfifo_is_empty(&ctx->fifo)) + schedule_work(&ctx->worker); + + return out; +} + +static void caam_cleanup(struct hwrng *rng) +{ + struct caam_rng_ctx *ctx = to_caam_rng_ctx(rng); + + flush_work(&ctx->worker); + caam_jr_free(ctx->jrdev); + kfifo_free(&ctx->fifo); +} + +static int caam_init(struct hwrng *rng) +{ + struct caam_rng_ctx *ctx = to_caam_rng_ctx(rng); + int err; + + ctx->desc_sync = devm_kzalloc(ctx->ctrldev, CAAM_RNG_DESC_LEN, + GFP_DMA | GFP_KERNEL); + if (!ctx->desc_sync) + return -ENOMEM; + + ctx->desc_async = devm_kzalloc(ctx->ctrldev, CAAM_RNG_DESC_LEN, + GFP_DMA | GFP_KERNEL); + if (!ctx->desc_async) + return -ENOMEM; + + if (kfifo_alloc(&ctx->fifo, CAAM_RNG_MAX_FIFO_STORE_SIZE, + GFP_DMA | GFP_KERNEL)) + return -ENOMEM; + + INIT_WORK(&ctx->worker, caam_rng_worker); + + ctx->jrdev = caam_jr_alloc(); + err = PTR_ERR_OR_ZERO(ctx->jrdev); + if (err) { + kfifo_free(&ctx->fifo); + pr_err("Job Ring Device allocation for transform failed\n"); + return err; + } + + /* + * Fill async buffer to have early randomness data for + * hw_random + */ + caam_rng_fill_async(ctx); + + return 0; +} + +int caam_rng_init(struct device *ctrldev); + +void caam_rng_exit(struct device *ctrldev) +{ + devres_release_group(ctrldev, caam_rng_init); +} + +int caam_rng_init(struct device *ctrldev) +{ + struct caam_rng_ctx *ctx; + u32 rng_inst; + struct caam_drv_private *priv = dev_get_drvdata(ctrldev); + int ret; + + /* Check for an instantiated RNG before registration */ + if (priv->era < 10) + rng_inst = (rd_reg32(&priv->ctrl->perfmon.cha_num_ls) & + CHA_ID_LS_RNG_MASK) >> CHA_ID_LS_RNG_SHIFT; + else + rng_inst = rd_reg32(&priv->ctrl->vreg.rng) & CHA_VER_NUM_MASK; + + if (!rng_inst) + return 0; + + if (!devres_open_group(ctrldev, caam_rng_init, GFP_KERNEL)) + return -ENOMEM; + + ctx = devm_kzalloc(ctrldev, sizeof(*ctx), GFP_KERNEL); + if (!ctx) + return -ENOMEM; + + ctx->ctrldev = ctrldev; + + ctx->rng.name = "rng-caam"; + ctx->rng.init = caam_init; + ctx->rng.cleanup = caam_cleanup; + ctx->rng.read = caam_read; + ctx->rng.priv = (unsigned long)ctx; + ctx->rng.quality = 1024; + + dev_info(ctrldev, "registering rng-caam\n"); + + ret = devm_hwrng_register(ctrldev, &ctx->rng); + if (ret) { + caam_rng_exit(ctrldev); + return ret; + } + + devres_close_group(ctrldev, caam_rng_init); + return 0; +} diff --git a/drivers/crypto/caam/compat.h b/drivers/crypto/caam/compat.h new file mode 100644 index 000000000..c3c22a8de --- /dev/null +++ b/drivers/crypto/caam/compat.h @@ -0,0 +1,51 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright 2008-2011 Freescale Semiconductor, Inc. + */ + +#ifndef CAAM_COMPAT_H +#define CAAM_COMPAT_H + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/mod_devicetable.h> +#include <linux/device.h> +#include <linux/interrupt.h> +#include <linux/crypto.h> +#include <linux/hash.h> +#include <linux/hw_random.h> +#include <linux/of_platform.h> +#include <linux/dma-mapping.h> +#include <linux/io.h> +#include <linux/iommu.h> +#include <linux/spinlock.h> +#include <linux/rtnetlink.h> +#include <linux/in.h> +#include <linux/slab.h> +#include <linux/types.h> +#include <linux/debugfs.h> +#include <linux/circ_buf.h> +#include <linux/clk.h> +#include <net/xfrm.h> + +#include <crypto/algapi.h> +#include <crypto/null.h> +#include <crypto/aes.h> +#include <crypto/ctr.h> +#include <crypto/internal/des.h> +#include <crypto/gcm.h> +#include <crypto/sha.h> +#include <crypto/md5.h> +#include <crypto/chacha.h> +#include <crypto/poly1305.h> +#include <crypto/internal/aead.h> +#include <crypto/authenc.h> +#include <crypto/akcipher.h> +#include <crypto/scatterwalk.h> +#include <crypto/skcipher.h> +#include <crypto/internal/skcipher.h> +#include <crypto/internal/hash.h> +#include <crypto/internal/rsa.h> +#include <crypto/internal/akcipher.h> + +#endif /* !defined(CAAM_COMPAT_H) */ diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c new file mode 100644 index 000000000..f9a1ec3c8 --- /dev/null +++ b/drivers/crypto/caam/ctrl.c @@ -0,0 +1,945 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* * CAAM control-plane driver backend + * Controller-level driver, kernel property detection, initialization + * + * Copyright 2008-2012 Freescale Semiconductor, Inc. + * Copyright 2018-2019 NXP + */ + +#include <linux/device.h> +#include <linux/of_address.h> +#include <linux/of_irq.h> +#include <linux/sys_soc.h> +#include <linux/fsl/mc.h> + +#include "compat.h" +#include "debugfs.h" +#include "regs.h" +#include "intern.h" +#include "jr.h" +#include "desc_constr.h" +#include "ctrl.h" + +bool caam_dpaa2; +EXPORT_SYMBOL(caam_dpaa2); + +#ifdef CONFIG_CAAM_QI +#include "qi.h" +#endif + +/* + * Descriptor to instantiate RNG State Handle 0 in normal mode and + * load the JDKEK, TDKEK and TDSK registers + */ +static void build_instantiation_desc(u32 *desc, int handle, int do_sk) +{ + u32 *jump_cmd, op_flags; + + init_job_desc(desc, 0); + + op_flags = OP_TYPE_CLASS1_ALG | OP_ALG_ALGSEL_RNG | + (handle << OP_ALG_AAI_SHIFT) | OP_ALG_AS_INIT | + OP_ALG_PR_ON; + + /* INIT RNG in non-test mode */ + append_operation(desc, op_flags); + + if (!handle && do_sk) { + /* + * For SH0, Secure Keys must be generated as well + */ + + /* wait for done */ + jump_cmd = append_jump(desc, JUMP_CLASS_CLASS1); + set_jump_tgt_here(desc, jump_cmd); + + /* + * load 1 to clear written reg: + * resets the done interrupt and returns the RNG to idle. + */ + append_load_imm_u32(desc, 1, LDST_SRCDST_WORD_CLRW); + + /* Initialize State Handle */ + append_operation(desc, OP_TYPE_CLASS1_ALG | OP_ALG_ALGSEL_RNG | + OP_ALG_AAI_RNG4_SK); + } + + append_jump(desc, JUMP_CLASS_CLASS1 | JUMP_TYPE_HALT); +} + +/* Descriptor for deinstantiation of State Handle 0 of the RNG block. */ +static void build_deinstantiation_desc(u32 *desc, int handle) +{ + init_job_desc(desc, 0); + + /* Uninstantiate State Handle 0 */ + append_operation(desc, OP_TYPE_CLASS1_ALG | OP_ALG_ALGSEL_RNG | + (handle << OP_ALG_AAI_SHIFT) | OP_ALG_AS_INITFINAL); + + append_jump(desc, JUMP_CLASS_CLASS1 | JUMP_TYPE_HALT); +} + +/* + * run_descriptor_deco0 - runs a descriptor on DECO0, under direct control of + * the software (no JR/QI used). + * @ctrldev - pointer to device + * @status - descriptor status, after being run + * + * Return: - 0 if no error occurred + * - -ENODEV if the DECO couldn't be acquired + * - -EAGAIN if an error occurred while executing the descriptor + */ +static inline int run_descriptor_deco0(struct device *ctrldev, u32 *desc, + u32 *status) +{ + struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctrldev); + struct caam_ctrl __iomem *ctrl = ctrlpriv->ctrl; + struct caam_deco __iomem *deco = ctrlpriv->deco; + unsigned int timeout = 100000; + u32 deco_dbg_reg, deco_state, flags; + int i; + + + if (ctrlpriv->virt_en == 1 || + /* + * Apparently on i.MX8M{Q,M,N,P} it doesn't matter if virt_en == 1 + * and the following steps should be performed regardless + */ + of_machine_is_compatible("fsl,imx8mq") || + of_machine_is_compatible("fsl,imx8mm") || + of_machine_is_compatible("fsl,imx8mn") || + of_machine_is_compatible("fsl,imx8mp")) { + clrsetbits_32(&ctrl->deco_rsr, 0, DECORSR_JR0); + + while (!(rd_reg32(&ctrl->deco_rsr) & DECORSR_VALID) && + --timeout) + cpu_relax(); + + timeout = 100000; + } + + clrsetbits_32(&ctrl->deco_rq, 0, DECORR_RQD0ENABLE); + + while (!(rd_reg32(&ctrl->deco_rq) & DECORR_DEN0) && + --timeout) + cpu_relax(); + + if (!timeout) { + dev_err(ctrldev, "failed to acquire DECO 0\n"); + clrsetbits_32(&ctrl->deco_rq, DECORR_RQD0ENABLE, 0); + return -ENODEV; + } + + for (i = 0; i < desc_len(desc); i++) + wr_reg32(&deco->descbuf[i], caam32_to_cpu(*(desc + i))); + + flags = DECO_JQCR_WHL; + /* + * If the descriptor length is longer than 4 words, then the + * FOUR bit in JRCTRL register must be set. + */ + if (desc_len(desc) >= 4) + flags |= DECO_JQCR_FOUR; + + /* Instruct the DECO to execute it */ + clrsetbits_32(&deco->jr_ctl_hi, 0, flags); + + timeout = 10000000; + do { + deco_dbg_reg = rd_reg32(&deco->desc_dbg); + + if (ctrlpriv->era < 10) + deco_state = (deco_dbg_reg & DESC_DBG_DECO_STAT_MASK) >> + DESC_DBG_DECO_STAT_SHIFT; + else + deco_state = (rd_reg32(&deco->dbg_exec) & + DESC_DER_DECO_STAT_MASK) >> + DESC_DER_DECO_STAT_SHIFT; + + /* + * If an error occurred in the descriptor, then + * the DECO status field will be set to 0x0D + */ + if (deco_state == DECO_STAT_HOST_ERR) + break; + + cpu_relax(); + } while ((deco_dbg_reg & DESC_DBG_DECO_STAT_VALID) && --timeout); + + *status = rd_reg32(&deco->op_status_hi) & + DECO_OP_STATUS_HI_ERR_MASK; + + if (ctrlpriv->virt_en == 1) + clrsetbits_32(&ctrl->deco_rsr, DECORSR_JR0, 0); + + /* Mark the DECO as free */ + clrsetbits_32(&ctrl->deco_rq, DECORR_RQD0ENABLE, 0); + + if (!timeout) + return -EAGAIN; + + return 0; +} + +/* + * deinstantiate_rng - builds and executes a descriptor on DECO0, + * which deinitializes the RNG block. + * @ctrldev - pointer to device + * @state_handle_mask - bitmask containing the instantiation status + * for the RNG4 state handles which exist in + * the RNG4 block: 1 if it's been instantiated + * + * Return: - 0 if no error occurred + * - -ENOMEM if there isn't enough memory to allocate the descriptor + * - -ENODEV if DECO0 couldn't be acquired + * - -EAGAIN if an error occurred when executing the descriptor + */ +static int deinstantiate_rng(struct device *ctrldev, int state_handle_mask) +{ + u32 *desc, status; + int sh_idx, ret = 0; + + desc = kmalloc(CAAM_CMD_SZ * 3, GFP_KERNEL | GFP_DMA); + if (!desc) + return -ENOMEM; + + for (sh_idx = 0; sh_idx < RNG4_MAX_HANDLES; sh_idx++) { + /* + * If the corresponding bit is set, then it means the state + * handle was initialized by us, and thus it needs to be + * deinitialized as well + */ + if ((1 << sh_idx) & state_handle_mask) { + /* + * Create the descriptor for deinstantating this state + * handle + */ + build_deinstantiation_desc(desc, sh_idx); + + /* Try to run it through DECO0 */ + ret = run_descriptor_deco0(ctrldev, desc, &status); + + if (ret || + (status && status != JRSTA_SSRC_JUMP_HALT_CC)) { + dev_err(ctrldev, + "Failed to deinstantiate RNG4 SH%d\n", + sh_idx); + break; + } + dev_info(ctrldev, "Deinstantiated RNG4 SH%d\n", sh_idx); + } + } + + kfree(desc); + + return ret; +} + +static void devm_deinstantiate_rng(void *data) +{ + struct device *ctrldev = data; + struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctrldev); + + /* + * De-initialize RNG state handles initialized by this driver. + * In case of SoCs with Management Complex, RNG is managed by MC f/w. + */ + if (ctrlpriv->rng4_sh_init) + deinstantiate_rng(ctrldev, ctrlpriv->rng4_sh_init); +} + +/* + * instantiate_rng - builds and executes a descriptor on DECO0, + * which initializes the RNG block. + * @ctrldev - pointer to device + * @state_handle_mask - bitmask containing the instantiation status + * for the RNG4 state handles which exist in + * the RNG4 block: 1 if it's been instantiated + * by an external entry, 0 otherwise. + * @gen_sk - generate data to be loaded into the JDKEK, TDKEK and TDSK; + * Caution: this can be done only once; if the keys need to be + * regenerated, a POR is required + * + * Return: - 0 if no error occurred + * - -ENOMEM if there isn't enough memory to allocate the descriptor + * - -ENODEV if DECO0 couldn't be acquired + * - -EAGAIN if an error occurred when executing the descriptor + * f.i. there was a RNG hardware error due to not "good enough" + * entropy being acquired. + */ +static int instantiate_rng(struct device *ctrldev, int state_handle_mask, + int gen_sk) +{ + struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctrldev); + struct caam_ctrl __iomem *ctrl; + u32 *desc, status = 0, rdsta_val; + int ret = 0, sh_idx; + + ctrl = (struct caam_ctrl __iomem *)ctrlpriv->ctrl; + desc = kmalloc(CAAM_CMD_SZ * 7, GFP_KERNEL | GFP_DMA); + if (!desc) + return -ENOMEM; + + for (sh_idx = 0; sh_idx < RNG4_MAX_HANDLES; sh_idx++) { + const u32 rdsta_if = RDSTA_IF0 << sh_idx; + const u32 rdsta_pr = RDSTA_PR0 << sh_idx; + const u32 rdsta_mask = rdsta_if | rdsta_pr; + + /* Clear the contents before using the descriptor */ + memset(desc, 0x00, CAAM_CMD_SZ * 7); + + /* + * If the corresponding bit is set, this state handle + * was initialized by somebody else, so it's left alone. + */ + if (rdsta_if & state_handle_mask) { + if (rdsta_pr & state_handle_mask) + continue; + + dev_info(ctrldev, + "RNG4 SH%d was previously instantiated without prediction resistance. Tearing it down\n", + sh_idx); + + ret = deinstantiate_rng(ctrldev, rdsta_if); + if (ret) + break; + } + + /* Create the descriptor for instantiating RNG State Handle */ + build_instantiation_desc(desc, sh_idx, gen_sk); + + /* Try to run it through DECO0 */ + ret = run_descriptor_deco0(ctrldev, desc, &status); + + /* + * If ret is not 0, or descriptor status is not 0, then + * something went wrong. No need to try the next state + * handle (if available), bail out here. + * Also, if for some reason, the State Handle didn't get + * instantiated although the descriptor has finished + * without any error (HW optimizations for later + * CAAM eras), then try again. + */ + if (ret) + break; + + rdsta_val = rd_reg32(&ctrl->r4tst[0].rdsta) & RDSTA_MASK; + if ((status && status != JRSTA_SSRC_JUMP_HALT_CC) || + (rdsta_val & rdsta_mask) != rdsta_mask) { + ret = -EAGAIN; + break; + } + + dev_info(ctrldev, "Instantiated RNG4 SH%d\n", sh_idx); + } + + kfree(desc); + + if (ret) + return ret; + + return devm_add_action_or_reset(ctrldev, devm_deinstantiate_rng, ctrldev); +} + +/* + * kick_trng - sets the various parameters for enabling the initialization + * of the RNG4 block in CAAM + * @pdev - pointer to the platform device + * @ent_delay - Defines the length (in system clocks) of each entropy sample. + */ +static void kick_trng(struct platform_device *pdev, int ent_delay) +{ + struct device *ctrldev = &pdev->dev; + struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctrldev); + struct caam_ctrl __iomem *ctrl; + struct rng4tst __iomem *r4tst; + u32 val; + + ctrl = (struct caam_ctrl __iomem *)ctrlpriv->ctrl; + r4tst = &ctrl->r4tst[0]; + + /* + * Setting both RTMCTL:PRGM and RTMCTL:TRNG_ACC causes TRNG to + * properly invalidate the entropy in the entropy register and + * force re-generation. + */ + clrsetbits_32(&r4tst->rtmctl, 0, RTMCTL_PRGM | RTMCTL_ACC); + + /* + * Performance-wise, it does not make sense to + * set the delay to a value that is lower + * than the last one that worked (i.e. the state handles + * were instantiated properly. Thus, instead of wasting + * time trying to set the values controlling the sample + * frequency, the function simply returns. + */ + val = (rd_reg32(&r4tst->rtsdctl) & RTSDCTL_ENT_DLY_MASK) + >> RTSDCTL_ENT_DLY_SHIFT; + if (ent_delay <= val) + goto start_rng; + + val = rd_reg32(&r4tst->rtsdctl); + val = (val & ~RTSDCTL_ENT_DLY_MASK) | + (ent_delay << RTSDCTL_ENT_DLY_SHIFT); + wr_reg32(&r4tst->rtsdctl, val); + /* min. freq. count, equal to 1/4 of the entropy sample length */ + wr_reg32(&r4tst->rtfrqmin, ent_delay >> 2); + /* disable maximum frequency count */ + wr_reg32(&r4tst->rtfrqmax, RTFRQMAX_DISABLE); + /* read the control register */ + val = rd_reg32(&r4tst->rtmctl); +start_rng: + /* + * select raw sampling in both entropy shifter + * and statistical checker; ; put RNG4 into run mode + */ + clrsetbits_32(&r4tst->rtmctl, RTMCTL_PRGM | RTMCTL_ACC, + RTMCTL_SAMP_MODE_RAW_ES_SC); +} + +static int caam_get_era_from_hw(struct caam_ctrl __iomem *ctrl) +{ + static const struct { + u16 ip_id; + u8 maj_rev; + u8 era; + } id[] = { + {0x0A10, 1, 1}, + {0x0A10, 2, 2}, + {0x0A12, 1, 3}, + {0x0A14, 1, 3}, + {0x0A14, 2, 4}, + {0x0A16, 1, 4}, + {0x0A10, 3, 4}, + {0x0A11, 1, 4}, + {0x0A18, 1, 4}, + {0x0A11, 2, 5}, + {0x0A12, 2, 5}, + {0x0A13, 1, 5}, + {0x0A1C, 1, 5} + }; + u32 ccbvid, id_ms; + u8 maj_rev, era; + u16 ip_id; + int i; + + ccbvid = rd_reg32(&ctrl->perfmon.ccb_id); + era = (ccbvid & CCBVID_ERA_MASK) >> CCBVID_ERA_SHIFT; + if (era) /* This is '0' prior to CAAM ERA-6 */ + return era; + + id_ms = rd_reg32(&ctrl->perfmon.caam_id_ms); + ip_id = (id_ms & SECVID_MS_IPID_MASK) >> SECVID_MS_IPID_SHIFT; + maj_rev = (id_ms & SECVID_MS_MAJ_REV_MASK) >> SECVID_MS_MAJ_REV_SHIFT; + + for (i = 0; i < ARRAY_SIZE(id); i++) + if (id[i].ip_id == ip_id && id[i].maj_rev == maj_rev) + return id[i].era; + + return -ENOTSUPP; +} + +/** + * caam_get_era() - Return the ERA of the SEC on SoC, based + * on "sec-era" optional property in the DTS. This property is updated + * by u-boot. + * In case this property is not passed an attempt to retrieve the CAAM + * era via register reads will be made. + * + * @ctrl: controller region + */ +static int caam_get_era(struct caam_ctrl __iomem *ctrl) +{ + struct device_node *caam_node; + int ret; + u32 prop; + + caam_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0"); + ret = of_property_read_u32(caam_node, "fsl,sec-era", &prop); + of_node_put(caam_node); + + if (!ret) + return prop; + else + return caam_get_era_from_hw(ctrl); +} + +/* + * ERRATA: imx6 devices (imx6D, imx6Q, imx6DL, imx6S, imx6DP and imx6QP) + * have an issue wherein AXI bus transactions may not occur in the correct + * order. This isn't a problem running single descriptors, but can be if + * running multiple concurrent descriptors. Reworking the driver to throttle + * to single requests is impractical, thus the workaround is to limit the AXI + * pipeline to a depth of 1 (from it's default of 4) to preclude this situation + * from occurring. + */ +static void handle_imx6_err005766(u32 __iomem *mcr) +{ + if (of_machine_is_compatible("fsl,imx6q") || + of_machine_is_compatible("fsl,imx6dl") || + of_machine_is_compatible("fsl,imx6qp")) + clrsetbits_32(mcr, MCFGR_AXIPIPE_MASK, + 1 << MCFGR_AXIPIPE_SHIFT); +} + +static const struct of_device_id caam_match[] = { + { + .compatible = "fsl,sec-v4.0", + }, + { + .compatible = "fsl,sec4.0", + }, + {}, +}; +MODULE_DEVICE_TABLE(of, caam_match); + +struct caam_imx_data { + const struct clk_bulk_data *clks; + int num_clks; +}; + +static const struct clk_bulk_data caam_imx6_clks[] = { + { .id = "ipg" }, + { .id = "mem" }, + { .id = "aclk" }, + { .id = "emi_slow" }, +}; + +static const struct caam_imx_data caam_imx6_data = { + .clks = caam_imx6_clks, + .num_clks = ARRAY_SIZE(caam_imx6_clks), +}; + +static const struct clk_bulk_data caam_imx7_clks[] = { + { .id = "ipg" }, + { .id = "aclk" }, +}; + +static const struct caam_imx_data caam_imx7_data = { + .clks = caam_imx7_clks, + .num_clks = ARRAY_SIZE(caam_imx7_clks), +}; + +static const struct clk_bulk_data caam_imx6ul_clks[] = { + { .id = "ipg" }, + { .id = "mem" }, + { .id = "aclk" }, +}; + +static const struct caam_imx_data caam_imx6ul_data = { + .clks = caam_imx6ul_clks, + .num_clks = ARRAY_SIZE(caam_imx6ul_clks), +}; + +static const struct clk_bulk_data caam_vf610_clks[] = { + { .id = "ipg" }, +}; + +static const struct caam_imx_data caam_vf610_data = { + .clks = caam_vf610_clks, + .num_clks = ARRAY_SIZE(caam_vf610_clks), +}; + +static const struct soc_device_attribute caam_imx_soc_table[] = { + { .soc_id = "i.MX6UL", .data = &caam_imx6ul_data }, + { .soc_id = "i.MX6*", .data = &caam_imx6_data }, + { .soc_id = "i.MX7*", .data = &caam_imx7_data }, + { .soc_id = "i.MX8M*", .data = &caam_imx7_data }, + { .soc_id = "VF*", .data = &caam_vf610_data }, + { .family = "Freescale i.MX" }, + { /* sentinel */ } +}; + +static void disable_clocks(void *data) +{ + struct caam_drv_private *ctrlpriv = data; + + clk_bulk_disable_unprepare(ctrlpriv->num_clks, ctrlpriv->clks); +} + +static int init_clocks(struct device *dev, const struct caam_imx_data *data) +{ + struct caam_drv_private *ctrlpriv = dev_get_drvdata(dev); + int ret; + + ctrlpriv->num_clks = data->num_clks; + ctrlpriv->clks = devm_kmemdup(dev, data->clks, + data->num_clks * sizeof(data->clks[0]), + GFP_KERNEL); + if (!ctrlpriv->clks) + return -ENOMEM; + + ret = devm_clk_bulk_get(dev, ctrlpriv->num_clks, ctrlpriv->clks); + if (ret) { + dev_err(dev, + "Failed to request all necessary clocks\n"); + return ret; + } + + ret = clk_bulk_prepare_enable(ctrlpriv->num_clks, ctrlpriv->clks); + if (ret) { + dev_err(dev, + "Failed to prepare/enable all necessary clocks\n"); + return ret; + } + + return devm_add_action_or_reset(dev, disable_clocks, ctrlpriv); +} + +static void caam_remove_debugfs(void *root) +{ + debugfs_remove_recursive(root); +} + +#ifdef CONFIG_FSL_MC_BUS +static bool check_version(struct fsl_mc_version *mc_version, u32 major, + u32 minor, u32 revision) +{ + if (mc_version->major > major) + return true; + + if (mc_version->major == major) { + if (mc_version->minor > minor) + return true; + + if (mc_version->minor == minor && + mc_version->revision > revision) + return true; + } + + return false; +} +#endif + +static bool needs_entropy_delay_adjustment(void) +{ + if (of_machine_is_compatible("fsl,imx6sx")) + return true; + return false; +} + +/* Probe routine for CAAM top (controller) level */ +static int caam_probe(struct platform_device *pdev) +{ + int ret, ring, gen_sk, ent_delay = RTSDCTL_ENT_DLY_MIN; + u64 caam_id; + const struct soc_device_attribute *imx_soc_match; + struct device *dev; + struct device_node *nprop, *np; + struct caam_ctrl __iomem *ctrl; + struct caam_drv_private *ctrlpriv; + struct dentry *dfs_root; + u32 scfgr, comp_params; + u8 rng_vid; + int pg_size; + int BLOCK_OFFSET = 0; + bool pr_support = false; + + ctrlpriv = devm_kzalloc(&pdev->dev, sizeof(*ctrlpriv), GFP_KERNEL); + if (!ctrlpriv) + return -ENOMEM; + + dev = &pdev->dev; + dev_set_drvdata(dev, ctrlpriv); + nprop = pdev->dev.of_node; + + imx_soc_match = soc_device_match(caam_imx_soc_table); + caam_imx = (bool)imx_soc_match; + + if (imx_soc_match) { + if (!imx_soc_match->data) { + dev_err(dev, "No clock data provided for i.MX SoC"); + return -EINVAL; + } + + ret = init_clocks(dev, imx_soc_match->data); + if (ret) + return ret; + } + + + /* Get configuration properties from device tree */ + /* First, get register page */ + ctrl = devm_of_iomap(dev, nprop, 0, NULL); + ret = PTR_ERR_OR_ZERO(ctrl); + if (ret) { + dev_err(dev, "caam: of_iomap() failed\n"); + return ret; + } + + caam_little_end = !(bool)(rd_reg32(&ctrl->perfmon.status) & + (CSTA_PLEND | CSTA_ALT_PLEND)); + comp_params = rd_reg32(&ctrl->perfmon.comp_parms_ms); + if (comp_params & CTPR_MS_PS && rd_reg32(&ctrl->mcr) & MCFGR_LONG_PTR) + caam_ptr_sz = sizeof(u64); + else + caam_ptr_sz = sizeof(u32); + caam_dpaa2 = !!(comp_params & CTPR_MS_DPAA2); + ctrlpriv->qi_present = !!(comp_params & CTPR_MS_QI_MASK); + +#ifdef CONFIG_CAAM_QI + /* If (DPAA 1.x) QI present, check whether dependencies are available */ + if (ctrlpriv->qi_present && !caam_dpaa2) { + ret = qman_is_probed(); + if (!ret) { + return -EPROBE_DEFER; + } else if (ret < 0) { + dev_err(dev, "failing probe due to qman probe error\n"); + return -ENODEV; + } + + ret = qman_portals_probed(); + if (!ret) { + return -EPROBE_DEFER; + } else if (ret < 0) { + dev_err(dev, "failing probe due to qman portals probe error\n"); + return -ENODEV; + } + } +#endif + + /* Allocating the BLOCK_OFFSET based on the supported page size on + * the platform + */ + pg_size = (comp_params & CTPR_MS_PG_SZ_MASK) >> CTPR_MS_PG_SZ_SHIFT; + if (pg_size == 0) + BLOCK_OFFSET = PG_SIZE_4K; + else + BLOCK_OFFSET = PG_SIZE_64K; + + ctrlpriv->ctrl = (struct caam_ctrl __iomem __force *)ctrl; + ctrlpriv->assure = (struct caam_assurance __iomem __force *) + ((__force uint8_t *)ctrl + + BLOCK_OFFSET * ASSURE_BLOCK_NUMBER + ); + ctrlpriv->deco = (struct caam_deco __iomem __force *) + ((__force uint8_t *)ctrl + + BLOCK_OFFSET * DECO_BLOCK_NUMBER + ); + + /* Get the IRQ of the controller (for security violations only) */ + ctrlpriv->secvio_irq = irq_of_parse_and_map(nprop, 0); + np = of_find_compatible_node(NULL, NULL, "fsl,qoriq-mc"); + ctrlpriv->mc_en = !!np; + of_node_put(np); + +#ifdef CONFIG_FSL_MC_BUS + if (ctrlpriv->mc_en) { + struct fsl_mc_version *mc_version; + + mc_version = fsl_mc_get_version(); + if (mc_version) + pr_support = check_version(mc_version, 10, 20, 0); + else + return -EPROBE_DEFER; + } +#endif + + /* + * Enable DECO watchdogs and, if this is a PHYS_ADDR_T_64BIT kernel, + * long pointers in master configuration register. + * In case of SoCs with Management Complex, MC f/w performs + * the configuration. + */ + if (!ctrlpriv->mc_en) + clrsetbits_32(&ctrl->mcr, MCFGR_AWCACHE_MASK, + MCFGR_AWCACHE_CACH | MCFGR_AWCACHE_BUFF | + MCFGR_WDENABLE | MCFGR_LARGE_BURST); + + handle_imx6_err005766(&ctrl->mcr); + + /* + * Read the Compile Time parameters and SCFGR to determine + * if virtualization is enabled for this platform + */ + scfgr = rd_reg32(&ctrl->scfgr); + + ctrlpriv->virt_en = 0; + if (comp_params & CTPR_MS_VIRT_EN_INCL) { + /* VIRT_EN_INCL = 1 & VIRT_EN_POR = 1 or + * VIRT_EN_INCL = 1 & VIRT_EN_POR = 0 & SCFGR_VIRT_EN = 1 + */ + if ((comp_params & CTPR_MS_VIRT_EN_POR) || + (!(comp_params & CTPR_MS_VIRT_EN_POR) && + (scfgr & SCFGR_VIRT_EN))) + ctrlpriv->virt_en = 1; + } else { + /* VIRT_EN_INCL = 0 && VIRT_EN_POR_VALUE = 1 */ + if (comp_params & CTPR_MS_VIRT_EN_POR) + ctrlpriv->virt_en = 1; + } + + if (ctrlpriv->virt_en == 1) + clrsetbits_32(&ctrl->jrstart, 0, JRSTART_JR0_START | + JRSTART_JR1_START | JRSTART_JR2_START | + JRSTART_JR3_START); + + ret = dma_set_mask_and_coherent(dev, caam_get_dma_mask(dev)); + if (ret) { + dev_err(dev, "dma_set_mask_and_coherent failed (%d)\n", ret); + return ret; + } + + ctrlpriv->era = caam_get_era(ctrl); + ctrlpriv->domain = iommu_get_domain_for_dev(dev); + + dfs_root = debugfs_create_dir(dev_name(dev), NULL); + if (IS_ENABLED(CONFIG_DEBUG_FS)) { + ret = devm_add_action_or_reset(dev, caam_remove_debugfs, + dfs_root); + if (ret) + return ret; + } + + caam_debugfs_init(ctrlpriv, dfs_root); + + /* Check to see if (DPAA 1.x) QI present. If so, enable */ + if (ctrlpriv->qi_present && !caam_dpaa2) { + ctrlpriv->qi = (struct caam_queue_if __iomem __force *) + ((__force uint8_t *)ctrl + + BLOCK_OFFSET * QI_BLOCK_NUMBER + ); + /* This is all that's required to physically enable QI */ + wr_reg32(&ctrlpriv->qi->qi_control_lo, QICTL_DQEN); + + /* If QMAN driver is present, init CAAM-QI backend */ +#ifdef CONFIG_CAAM_QI + ret = caam_qi_init(pdev); + if (ret) + dev_err(dev, "caam qi i/f init failed: %d\n", ret); +#endif + } + + ring = 0; + for_each_available_child_of_node(nprop, np) + if (of_device_is_compatible(np, "fsl,sec-v4.0-job-ring") || + of_device_is_compatible(np, "fsl,sec4.0-job-ring")) { + ctrlpriv->jr[ring] = (struct caam_job_ring __iomem __force *) + ((__force uint8_t *)ctrl + + (ring + JR_BLOCK_NUMBER) * + BLOCK_OFFSET + ); + ctrlpriv->total_jobrs++; + ring++; + } + + /* If no QI and no rings specified, quit and go home */ + if ((!ctrlpriv->qi_present) && (!ctrlpriv->total_jobrs)) { + dev_err(dev, "no queues configured, terminating\n"); + return -ENOMEM; + } + + if (ctrlpriv->era < 10) + rng_vid = (rd_reg32(&ctrl->perfmon.cha_id_ls) & + CHA_ID_LS_RNG_MASK) >> CHA_ID_LS_RNG_SHIFT; + else + rng_vid = (rd_reg32(&ctrl->vreg.rng) & CHA_VER_VID_MASK) >> + CHA_VER_VID_SHIFT; + + /* + * If SEC has RNG version >= 4 and RNG state handle has not been + * already instantiated, do RNG instantiation + * In case of SoCs with Management Complex, RNG is managed by MC f/w. + */ + if (!(ctrlpriv->mc_en && pr_support) && rng_vid >= 4) { + ctrlpriv->rng4_sh_init = + rd_reg32(&ctrl->r4tst[0].rdsta); + /* + * If the secure keys (TDKEK, JDKEK, TDSK), were already + * generated, signal this to the function that is instantiating + * the state handles. An error would occur if RNG4 attempts + * to regenerate these keys before the next POR. + */ + gen_sk = ctrlpriv->rng4_sh_init & RDSTA_SKVN ? 0 : 1; + ctrlpriv->rng4_sh_init &= RDSTA_MASK; + do { + int inst_handles = + rd_reg32(&ctrl->r4tst[0].rdsta) & + RDSTA_MASK; + /* + * If either SH were instantiated by somebody else + * (e.g. u-boot) then it is assumed that the entropy + * parameters are properly set and thus the function + * setting these (kick_trng(...)) is skipped. + * Also, if a handle was instantiated, do not change + * the TRNG parameters. + */ + if (needs_entropy_delay_adjustment()) + ent_delay = 12000; + if (!(ctrlpriv->rng4_sh_init || inst_handles)) { + dev_info(dev, + "Entropy delay = %u\n", + ent_delay); + kick_trng(pdev, ent_delay); + ent_delay += 400; + } + /* + * if instantiate_rng(...) fails, the loop will rerun + * and the kick_trng(...) function will modify the + * upper and lower limits of the entropy sampling + * interval, leading to a successful initialization of + * the RNG. + */ + ret = instantiate_rng(dev, inst_handles, + gen_sk); + /* + * Entropy delay is determined via TRNG characterization. + * TRNG characterization is run across different voltages + * and temperatures. + * If worst case value for ent_dly is identified, + * the loop can be skipped for that platform. + */ + if (needs_entropy_delay_adjustment()) + break; + if (ret == -EAGAIN) + /* + * if here, the loop will rerun, + * so don't hog the CPU + */ + cpu_relax(); + } while ((ret == -EAGAIN) && (ent_delay < RTSDCTL_ENT_DLY_MAX)); + if (ret) { + dev_err(dev, "failed to instantiate RNG"); + return ret; + } + /* + * Set handles initialized by this module as the complement of + * the already initialized ones + */ + ctrlpriv->rng4_sh_init = ~ctrlpriv->rng4_sh_init & RDSTA_MASK; + + /* Enable RDB bit so that RNG works faster */ + clrsetbits_32(&ctrl->scfgr, 0, SCFGR_RDBENABLE); + } + + /* NOTE: RTIC detection ought to go here, around Si time */ + + caam_id = (u64)rd_reg32(&ctrl->perfmon.caam_id_ms) << 32 | + (u64)rd_reg32(&ctrl->perfmon.caam_id_ls); + + /* Report "alive" for developer to see */ + dev_info(dev, "device ID = 0x%016llx (Era %d)\n", caam_id, + ctrlpriv->era); + dev_info(dev, "job rings = %d, qi = %d\n", + ctrlpriv->total_jobrs, ctrlpriv->qi_present); + + ret = devm_of_platform_populate(dev); + if (ret) + dev_err(dev, "JR platform devices creation error\n"); + + return ret; +} + +static struct platform_driver caam_driver = { + .driver = { + .name = "caam", + .of_match_table = caam_match, + }, + .probe = caam_probe, +}; + +module_platform_driver(caam_driver); + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("FSL CAAM request backend"); +MODULE_AUTHOR("Freescale Semiconductor - NMG/STC"); diff --git a/drivers/crypto/caam/ctrl.h b/drivers/crypto/caam/ctrl.h new file mode 100644 index 000000000..f3ecd6792 --- /dev/null +++ b/drivers/crypto/caam/ctrl.h @@ -0,0 +1,14 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * CAAM control-plane driver backend public-level include definitions + * + * Copyright 2012 Freescale Semiconductor, Inc. + */ + +#ifndef CTRL_H +#define CTRL_H + +/* Prototypes for backend-level services exposed to APIs */ +extern bool caam_dpaa2; + +#endif /* CTRL_H */ diff --git a/drivers/crypto/caam/debugfs.c b/drivers/crypto/caam/debugfs.c new file mode 100644 index 000000000..8ebf18398 --- /dev/null +++ b/drivers/crypto/caam/debugfs.c @@ -0,0 +1,96 @@ +// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) +/* Copyright 2019 NXP */ + +#include <linux/debugfs.h> +#include "compat.h" +#include "debugfs.h" +#include "regs.h" +#include "intern.h" + +static int caam_debugfs_u64_get(void *data, u64 *val) +{ + *val = caam64_to_cpu(*(u64 *)data); + return 0; +} + +static int caam_debugfs_u32_get(void *data, u64 *val) +{ + *val = caam32_to_cpu(*(u32 *)data); + return 0; +} + +DEFINE_SIMPLE_ATTRIBUTE(caam_fops_u32_ro, caam_debugfs_u32_get, NULL, "%llu\n"); +DEFINE_SIMPLE_ATTRIBUTE(caam_fops_u64_ro, caam_debugfs_u64_get, NULL, "%llu\n"); + +#ifdef CONFIG_CAAM_QI +/* + * This is a counter for the number of times the congestion group (where all + * the request and response queueus are) reached congestion. Incremented + * each time the congestion callback is called with congested == true. + */ +static u64 times_congested; + +void caam_debugfs_qi_congested(void) +{ + times_congested++; +} + +void caam_debugfs_qi_init(struct caam_drv_private *ctrlpriv) +{ + debugfs_create_file("qi_congested", 0444, ctrlpriv->ctl, + ×_congested, &caam_fops_u64_ro); +} +#endif + +void caam_debugfs_init(struct caam_drv_private *ctrlpriv, struct dentry *root) +{ + struct caam_perfmon *perfmon; + + /* + * FIXME: needs better naming distinction, as some amalgamation of + * "caam" and nprop->full_name. The OF name isn't distinctive, + * but does separate instances + */ + perfmon = (struct caam_perfmon __force *)&ctrlpriv->ctrl->perfmon; + + ctrlpriv->ctl = debugfs_create_dir("ctl", root); + + debugfs_create_file("rq_dequeued", 0444, ctrlpriv->ctl, + &perfmon->req_dequeued, &caam_fops_u64_ro); + debugfs_create_file("ob_rq_encrypted", 0444, ctrlpriv->ctl, + &perfmon->ob_enc_req, &caam_fops_u64_ro); + debugfs_create_file("ib_rq_decrypted", 0444, ctrlpriv->ctl, + &perfmon->ib_dec_req, &caam_fops_u64_ro); + debugfs_create_file("ob_bytes_encrypted", 0444, ctrlpriv->ctl, + &perfmon->ob_enc_bytes, &caam_fops_u64_ro); + debugfs_create_file("ob_bytes_protected", 0444, ctrlpriv->ctl, + &perfmon->ob_prot_bytes, &caam_fops_u64_ro); + debugfs_create_file("ib_bytes_decrypted", 0444, ctrlpriv->ctl, + &perfmon->ib_dec_bytes, &caam_fops_u64_ro); + debugfs_create_file("ib_bytes_validated", 0444, ctrlpriv->ctl, + &perfmon->ib_valid_bytes, &caam_fops_u64_ro); + + /* Controller level - global status values */ + debugfs_create_file("fault_addr", 0444, ctrlpriv->ctl, + &perfmon->faultaddr, &caam_fops_u32_ro); + debugfs_create_file("fault_detail", 0444, ctrlpriv->ctl, + &perfmon->faultdetail, &caam_fops_u32_ro); + debugfs_create_file("fault_status", 0444, ctrlpriv->ctl, + &perfmon->status, &caam_fops_u32_ro); + + /* Internal covering keys (useful in non-secure mode only) */ + ctrlpriv->ctl_kek_wrap.data = (__force void *)&ctrlpriv->ctrl->kek[0]; + ctrlpriv->ctl_kek_wrap.size = KEK_KEY_SIZE * sizeof(u32); + debugfs_create_blob("kek", 0444, ctrlpriv->ctl, + &ctrlpriv->ctl_kek_wrap); + + ctrlpriv->ctl_tkek_wrap.data = (__force void *)&ctrlpriv->ctrl->tkek[0]; + ctrlpriv->ctl_tkek_wrap.size = KEK_KEY_SIZE * sizeof(u32); + debugfs_create_blob("tkek", 0444, ctrlpriv->ctl, + &ctrlpriv->ctl_tkek_wrap); + + ctrlpriv->ctl_tdsk_wrap.data = (__force void *)&ctrlpriv->ctrl->tdsk[0]; + ctrlpriv->ctl_tdsk_wrap.size = KEK_KEY_SIZE * sizeof(u32); + debugfs_create_blob("tdsk", 0444, ctrlpriv->ctl, + &ctrlpriv->ctl_tdsk_wrap); +} diff --git a/drivers/crypto/caam/debugfs.h b/drivers/crypto/caam/debugfs.h new file mode 100644 index 000000000..661d768ac --- /dev/null +++ b/drivers/crypto/caam/debugfs.h @@ -0,0 +1,26 @@ +/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */ +/* Copyright 2019 NXP */ + +#ifndef CAAM_DEBUGFS_H +#define CAAM_DEBUGFS_H + +struct dentry; +struct caam_drv_private; + +#ifdef CONFIG_DEBUG_FS +void caam_debugfs_init(struct caam_drv_private *ctrlpriv, struct dentry *root); +#else +static inline void caam_debugfs_init(struct caam_drv_private *ctrlpriv, + struct dentry *root) +{} +#endif + +#if defined(CONFIG_DEBUG_FS) && defined(CONFIG_CAAM_QI) +void caam_debugfs_qi_congested(void); +void caam_debugfs_qi_init(struct caam_drv_private *ctrlpriv); +#else +static inline void caam_debugfs_qi_congested(void) {} +static inline void caam_debugfs_qi_init(struct caam_drv_private *ctrlpriv) {} +#endif + +#endif /* CAAM_DEBUGFS_H */ diff --git a/drivers/crypto/caam/desc.h b/drivers/crypto/caam/desc.h new file mode 100644 index 000000000..e13470901 --- /dev/null +++ b/drivers/crypto/caam/desc.h @@ -0,0 +1,1687 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * CAAM descriptor composition header + * Definitions to support CAAM descriptor instruction generation + * + * Copyright 2008-2011 Freescale Semiconductor, Inc. + * Copyright 2018 NXP + */ + +#ifndef DESC_H +#define DESC_H + +/* + * 16-byte hardware scatter/gather table + * An 8-byte table exists in the hardware spec, but has never been + * implemented to date. The 8/16 option is selected at RTL-compile-time. + * and this selection is visible in the Compile Time Parameters Register + */ + +#define SEC4_SG_LEN_EXT 0x80000000 /* Entry points to table */ +#define SEC4_SG_LEN_FIN 0x40000000 /* Last entry in table */ +#define SEC4_SG_BPID_MASK 0x000000ff +#define SEC4_SG_BPID_SHIFT 16 +#define SEC4_SG_LEN_MASK 0x3fffffff /* Excludes EXT and FINAL */ +#define SEC4_SG_OFFSET_MASK 0x00001fff + +/* Max size of any CAAM descriptor in 32-bit words, inclusive of header */ +#define MAX_CAAM_DESCSIZE 64 + +/* Block size of any entity covered/uncovered with a KEK/TKEK */ +#define KEK_BLOCKSIZE 16 + +/* + * Supported descriptor command types as they show up + * inside a descriptor command word. + */ +#define CMD_SHIFT 27 +#define CMD_MASK 0xf8000000 + +#define CMD_KEY (0x00 << CMD_SHIFT) +#define CMD_SEQ_KEY (0x01 << CMD_SHIFT) +#define CMD_LOAD (0x02 << CMD_SHIFT) +#define CMD_SEQ_LOAD (0x03 << CMD_SHIFT) +#define CMD_FIFO_LOAD (0x04 << CMD_SHIFT) +#define CMD_SEQ_FIFO_LOAD (0x05 << CMD_SHIFT) +#define CMD_STORE (0x0a << CMD_SHIFT) +#define CMD_SEQ_STORE (0x0b << CMD_SHIFT) +#define CMD_FIFO_STORE (0x0c << CMD_SHIFT) +#define CMD_SEQ_FIFO_STORE (0x0d << CMD_SHIFT) +#define CMD_MOVE_LEN (0x0e << CMD_SHIFT) +#define CMD_MOVE (0x0f << CMD_SHIFT) +#define CMD_OPERATION (0x10 << CMD_SHIFT) +#define CMD_SIGNATURE (0x12 << CMD_SHIFT) +#define CMD_JUMP (0x14 << CMD_SHIFT) +#define CMD_MATH (0x15 << CMD_SHIFT) +#define CMD_DESC_HDR (0x16 << CMD_SHIFT) +#define CMD_SHARED_DESC_HDR (0x17 << CMD_SHIFT) +#define CMD_SEQ_IN_PTR (0x1e << CMD_SHIFT) +#define CMD_SEQ_OUT_PTR (0x1f << CMD_SHIFT) + +/* General-purpose class selector for all commands */ +#define CLASS_SHIFT 25 +#define CLASS_MASK (0x03 << CLASS_SHIFT) + +#define CLASS_NONE (0x00 << CLASS_SHIFT) +#define CLASS_1 (0x01 << CLASS_SHIFT) +#define CLASS_2 (0x02 << CLASS_SHIFT) +#define CLASS_BOTH (0x03 << CLASS_SHIFT) + +/* + * Descriptor header command constructs + * Covers shared, job, and trusted descriptor headers + */ + +/* + * Do Not Run - marks a descriptor inexecutable if there was + * a preceding error somewhere + */ +#define HDR_DNR 0x01000000 + +/* + * ONE - should always be set. Combination of ONE (always + * set) and ZRO (always clear) forms an endianness sanity check + */ +#define HDR_ONE 0x00800000 +#define HDR_ZRO 0x00008000 + +/* Start Index or SharedDesc Length */ +#define HDR_START_IDX_SHIFT 16 +#define HDR_START_IDX_MASK (0x3f << HDR_START_IDX_SHIFT) + +/* If shared descriptor header, 6-bit length */ +#define HDR_DESCLEN_SHR_MASK 0x3f + +/* If non-shared header, 7-bit length */ +#define HDR_DESCLEN_MASK 0x7f + +/* This is a TrustedDesc (if not SharedDesc) */ +#define HDR_TRUSTED 0x00004000 + +/* Make into TrustedDesc (if not SharedDesc) */ +#define HDR_MAKE_TRUSTED 0x00002000 + +/* Save context if self-shared (if SharedDesc) */ +#define HDR_SAVECTX 0x00001000 + +/* Next item points to SharedDesc */ +#define HDR_SHARED 0x00001000 + +/* + * Reverse Execution Order - execute JobDesc first, then + * execute SharedDesc (normally SharedDesc goes first). + */ +#define HDR_REVERSE 0x00000800 + +/* Propagate DNR property to SharedDesc */ +#define HDR_PROP_DNR 0x00000800 + +/* JobDesc/SharedDesc share property */ +#define HDR_SD_SHARE_SHIFT 8 +#define HDR_SD_SHARE_MASK (0x03 << HDR_SD_SHARE_SHIFT) +#define HDR_JD_SHARE_SHIFT 8 +#define HDR_JD_SHARE_MASK (0x07 << HDR_JD_SHARE_SHIFT) + +#define HDR_SHARE_NEVER (0x00 << HDR_SD_SHARE_SHIFT) +#define HDR_SHARE_WAIT (0x01 << HDR_SD_SHARE_SHIFT) +#define HDR_SHARE_SERIAL (0x02 << HDR_SD_SHARE_SHIFT) +#define HDR_SHARE_ALWAYS (0x03 << HDR_SD_SHARE_SHIFT) +#define HDR_SHARE_DEFER (0x04 << HDR_SD_SHARE_SHIFT) + +/* JobDesc/SharedDesc descriptor length */ +#define HDR_JD_LENGTH_MASK 0x7f +#define HDR_SD_LENGTH_MASK 0x3f + +/* + * KEY/SEQ_KEY Command Constructs + */ + +/* Key Destination Class: 01 = Class 1, 02 - Class 2 */ +#define KEY_DEST_CLASS_SHIFT 25 /* use CLASS_1 or CLASS_2 */ +#define KEY_DEST_CLASS_MASK (0x03 << KEY_DEST_CLASS_SHIFT) + +/* Scatter-Gather Table/Variable Length Field */ +#define KEY_SGF 0x01000000 +#define KEY_VLF 0x01000000 + +/* Immediate - Key follows command in the descriptor */ +#define KEY_IMM 0x00800000 + +/* + * Encrypted - Key is encrypted either with the KEK, or + * with the TDKEK if TK is set + */ +#define KEY_ENC 0x00400000 + +/* + * No Write Back - Do not allow key to be FIFO STOREd + */ +#define KEY_NWB 0x00200000 + +/* + * Enhanced Encryption of Key + */ +#define KEY_EKT 0x00100000 + +/* + * Encrypted with Trusted Key + */ +#define KEY_TK 0x00008000 + +/* + * KDEST - Key Destination: 0 - class key register, + * 1 - PKHA 'e', 2 - AFHA Sbox, 3 - MDHA split-key + */ +#define KEY_DEST_SHIFT 16 +#define KEY_DEST_MASK (0x03 << KEY_DEST_SHIFT) + +#define KEY_DEST_CLASS_REG (0x00 << KEY_DEST_SHIFT) +#define KEY_DEST_PKHA_E (0x01 << KEY_DEST_SHIFT) +#define KEY_DEST_AFHA_SBOX (0x02 << KEY_DEST_SHIFT) +#define KEY_DEST_MDHA_SPLIT (0x03 << KEY_DEST_SHIFT) + +/* Length in bytes */ +#define KEY_LENGTH_MASK 0x000003ff + +/* + * LOAD/SEQ_LOAD/STORE/SEQ_STORE Command Constructs + */ + +/* + * Load/Store Destination: 0 = class independent CCB, + * 1 = class 1 CCB, 2 = class 2 CCB, 3 = DECO + */ +#define LDST_CLASS_SHIFT 25 +#define LDST_CLASS_MASK (0x03 << LDST_CLASS_SHIFT) +#define LDST_CLASS_IND_CCB (0x00 << LDST_CLASS_SHIFT) +#define LDST_CLASS_1_CCB (0x01 << LDST_CLASS_SHIFT) +#define LDST_CLASS_2_CCB (0x02 << LDST_CLASS_SHIFT) +#define LDST_CLASS_DECO (0x03 << LDST_CLASS_SHIFT) + +/* Scatter-Gather Table/Variable Length Field */ +#define LDST_SGF 0x01000000 +#define LDST_VLF LDST_SGF + +/* Immediate - Key follows this command in descriptor */ +#define LDST_IMM_MASK 1 +#define LDST_IMM_SHIFT 23 +#define LDST_IMM (LDST_IMM_MASK << LDST_IMM_SHIFT) + +/* SRC/DST - Destination for LOAD, Source for STORE */ +#define LDST_SRCDST_SHIFT 16 +#define LDST_SRCDST_MASK (0x7f << LDST_SRCDST_SHIFT) + +#define LDST_SRCDST_BYTE_CONTEXT (0x20 << LDST_SRCDST_SHIFT) +#define LDST_SRCDST_BYTE_KEY (0x40 << LDST_SRCDST_SHIFT) +#define LDST_SRCDST_BYTE_INFIFO (0x7c << LDST_SRCDST_SHIFT) +#define LDST_SRCDST_BYTE_OUTFIFO (0x7e << LDST_SRCDST_SHIFT) + +#define LDST_SRCDST_WORD_MODE_REG (0x00 << LDST_SRCDST_SHIFT) +#define LDST_SRCDST_WORD_KEYSZ_REG (0x01 << LDST_SRCDST_SHIFT) +#define LDST_SRCDST_WORD_DATASZ_REG (0x02 << LDST_SRCDST_SHIFT) +#define LDST_SRCDST_WORD_ICVSZ_REG (0x03 << LDST_SRCDST_SHIFT) +#define LDST_SRCDST_WORD_CHACTRL (0x06 << LDST_SRCDST_SHIFT) +#define LDST_SRCDST_WORD_DECOCTRL (0x06 << LDST_SRCDST_SHIFT) +#define LDST_SRCDST_WORD_IRQCTRL (0x07 << LDST_SRCDST_SHIFT) +#define LDST_SRCDST_WORD_DECO_PCLOVRD (0x07 << LDST_SRCDST_SHIFT) +#define LDST_SRCDST_WORD_CLRW (0x08 << LDST_SRCDST_SHIFT) +#define LDST_SRCDST_WORD_DECO_MATH0 (0x08 << LDST_SRCDST_SHIFT) +#define LDST_SRCDST_WORD_STAT (0x09 << LDST_SRCDST_SHIFT) +#define LDST_SRCDST_WORD_DECO_MATH1 (0x09 << LDST_SRCDST_SHIFT) +#define LDST_SRCDST_WORD_DECO_MATH2 (0x0a << LDST_SRCDST_SHIFT) +#define LDST_SRCDST_WORD_DECO_AAD_SZ (0x0b << LDST_SRCDST_SHIFT) +#define LDST_SRCDST_WORD_DECO_MATH3 (0x0b << LDST_SRCDST_SHIFT) +#define LDST_SRCDST_WORD_CLASS1_IV_SZ (0x0c << LDST_SRCDST_SHIFT) +#define LDST_SRCDST_WORD_ALTDS_CLASS1 (0x0f << LDST_SRCDST_SHIFT) +#define LDST_SRCDST_WORD_PKHA_A_SZ (0x10 << LDST_SRCDST_SHIFT) +#define LDST_SRCDST_WORD_PKHA_B_SZ (0x11 << LDST_SRCDST_SHIFT) +#define LDST_SRCDST_WORD_PKHA_N_SZ (0x12 << LDST_SRCDST_SHIFT) +#define LDST_SRCDST_WORD_PKHA_E_SZ (0x13 << LDST_SRCDST_SHIFT) +#define LDST_SRCDST_WORD_CLASS_CTX (0x20 << LDST_SRCDST_SHIFT) +#define LDST_SRCDST_WORD_DESCBUF (0x40 << LDST_SRCDST_SHIFT) +#define LDST_SRCDST_WORD_DESCBUF_JOB (0x41 << LDST_SRCDST_SHIFT) +#define LDST_SRCDST_WORD_DESCBUF_SHARED (0x42 << LDST_SRCDST_SHIFT) +#define LDST_SRCDST_WORD_DESCBUF_JOB_WE (0x45 << LDST_SRCDST_SHIFT) +#define LDST_SRCDST_WORD_DESCBUF_SHARED_WE (0x46 << LDST_SRCDST_SHIFT) +#define LDST_SRCDST_WORD_INFO_FIFO_SM (0x71 << LDST_SRCDST_SHIFT) +#define LDST_SRCDST_WORD_INFO_FIFO (0x7a << LDST_SRCDST_SHIFT) + +/* Offset in source/destination */ +#define LDST_OFFSET_SHIFT 8 +#define LDST_OFFSET_MASK (0xff << LDST_OFFSET_SHIFT) + +/* LDOFF definitions used when DST = LDST_SRCDST_WORD_DECOCTRL */ +/* These could also be shifted by LDST_OFFSET_SHIFT - this reads better */ +#define LDOFF_CHG_SHARE_SHIFT 0 +#define LDOFF_CHG_SHARE_MASK (0x3 << LDOFF_CHG_SHARE_SHIFT) +#define LDOFF_CHG_SHARE_NEVER (0x1 << LDOFF_CHG_SHARE_SHIFT) +#define LDOFF_CHG_SHARE_OK_PROP (0x2 << LDOFF_CHG_SHARE_SHIFT) +#define LDOFF_CHG_SHARE_OK_NO_PROP (0x3 << LDOFF_CHG_SHARE_SHIFT) + +#define LDOFF_ENABLE_AUTO_NFIFO (1 << 2) +#define LDOFF_DISABLE_AUTO_NFIFO (1 << 3) + +#define LDOFF_CHG_NONSEQLIODN_SHIFT 4 +#define LDOFF_CHG_NONSEQLIODN_MASK (0x3 << LDOFF_CHG_NONSEQLIODN_SHIFT) +#define LDOFF_CHG_NONSEQLIODN_SEQ (0x1 << LDOFF_CHG_NONSEQLIODN_SHIFT) +#define LDOFF_CHG_NONSEQLIODN_NON_SEQ (0x2 << LDOFF_CHG_NONSEQLIODN_SHIFT) +#define LDOFF_CHG_NONSEQLIODN_TRUSTED (0x3 << LDOFF_CHG_NONSEQLIODN_SHIFT) + +#define LDOFF_CHG_SEQLIODN_SHIFT 6 +#define LDOFF_CHG_SEQLIODN_MASK (0x3 << LDOFF_CHG_SEQLIODN_SHIFT) +#define LDOFF_CHG_SEQLIODN_SEQ (0x1 << LDOFF_CHG_SEQLIODN_SHIFT) +#define LDOFF_CHG_SEQLIODN_NON_SEQ (0x2 << LDOFF_CHG_SEQLIODN_SHIFT) +#define LDOFF_CHG_SEQLIODN_TRUSTED (0x3 << LDOFF_CHG_SEQLIODN_SHIFT) + +/* Data length in bytes */ +#define LDST_LEN_SHIFT 0 +#define LDST_LEN_MASK (0xff << LDST_LEN_SHIFT) + +/* Special Length definitions when dst=deco-ctrl */ +#define LDLEN_ENABLE_OSL_COUNT (1 << 7) +#define LDLEN_RST_CHA_OFIFO_PTR (1 << 6) +#define LDLEN_RST_OFIFO (1 << 5) +#define LDLEN_SET_OFIFO_OFF_VALID (1 << 4) +#define LDLEN_SET_OFIFO_OFF_RSVD (1 << 3) +#define LDLEN_SET_OFIFO_OFFSET_SHIFT 0 +#define LDLEN_SET_OFIFO_OFFSET_MASK (3 << LDLEN_SET_OFIFO_OFFSET_SHIFT) + +/* Special Length definitions when dst=sm, nfifo-{sm,m} */ +#define LDLEN_MATH0 0 +#define LDLEN_MATH1 1 +#define LDLEN_MATH2 2 +#define LDLEN_MATH3 3 + +/* + * FIFO_LOAD/FIFO_STORE/SEQ_FIFO_LOAD/SEQ_FIFO_STORE + * Command Constructs + */ + +/* + * Load Destination: 0 = skip (SEQ_FIFO_LOAD only), + * 1 = Load for Class1, 2 = Load for Class2, 3 = Load both + * Store Source: 0 = normal, 1 = Class1key, 2 = Class2key + */ +#define FIFOLD_CLASS_SHIFT 25 +#define FIFOLD_CLASS_MASK (0x03 << FIFOLD_CLASS_SHIFT) +#define FIFOLD_CLASS_SKIP (0x00 << FIFOLD_CLASS_SHIFT) +#define FIFOLD_CLASS_CLASS1 (0x01 << FIFOLD_CLASS_SHIFT) +#define FIFOLD_CLASS_CLASS2 (0x02 << FIFOLD_CLASS_SHIFT) +#define FIFOLD_CLASS_BOTH (0x03 << FIFOLD_CLASS_SHIFT) + +#define FIFOST_CLASS_SHIFT 25 +#define FIFOST_CLASS_MASK (0x03 << FIFOST_CLASS_SHIFT) +#define FIFOST_CLASS_NORMAL (0x00 << FIFOST_CLASS_SHIFT) +#define FIFOST_CLASS_CLASS1KEY (0x01 << FIFOST_CLASS_SHIFT) +#define FIFOST_CLASS_CLASS2KEY (0x02 << FIFOST_CLASS_SHIFT) + +/* + * Scatter-Gather Table/Variable Length Field + * If set for FIFO_LOAD, refers to a SG table. Within + * SEQ_FIFO_LOAD, is variable input sequence + */ +#define FIFOLDST_SGF_SHIFT 24 +#define FIFOLDST_SGF_MASK (1 << FIFOLDST_SGF_SHIFT) +#define FIFOLDST_VLF_MASK (1 << FIFOLDST_SGF_SHIFT) +#define FIFOLDST_SGF (1 << FIFOLDST_SGF_SHIFT) +#define FIFOLDST_VLF (1 << FIFOLDST_SGF_SHIFT) + +/* Immediate - Data follows command in descriptor */ +#define FIFOLD_IMM_SHIFT 23 +#define FIFOLD_IMM_MASK (1 << FIFOLD_IMM_SHIFT) +#define FIFOLD_IMM (1 << FIFOLD_IMM_SHIFT) + +/* Continue - Not the last FIFO store to come */ +#define FIFOST_CONT_SHIFT 23 +#define FIFOST_CONT_MASK (1 << FIFOST_CONT_SHIFT) + +/* + * Extended Length - use 32-bit extended length that + * follows the pointer field. Illegal with IMM set + */ +#define FIFOLDST_EXT_SHIFT 22 +#define FIFOLDST_EXT_MASK (1 << FIFOLDST_EXT_SHIFT) +#define FIFOLDST_EXT (1 << FIFOLDST_EXT_SHIFT) + +/* Input data type.*/ +#define FIFOLD_TYPE_SHIFT 16 +#define FIFOLD_CONT_TYPE_SHIFT 19 /* shift past last-flush bits */ +#define FIFOLD_TYPE_MASK (0x3f << FIFOLD_TYPE_SHIFT) + +/* PK types */ +#define FIFOLD_TYPE_PK (0x00 << FIFOLD_TYPE_SHIFT) +#define FIFOLD_TYPE_PK_MASK (0x30 << FIFOLD_TYPE_SHIFT) +#define FIFOLD_TYPE_PK_TYPEMASK (0x0f << FIFOLD_TYPE_SHIFT) +#define FIFOLD_TYPE_PK_A0 (0x00 << FIFOLD_TYPE_SHIFT) +#define FIFOLD_TYPE_PK_A1 (0x01 << FIFOLD_TYPE_SHIFT) +#define FIFOLD_TYPE_PK_A2 (0x02 << FIFOLD_TYPE_SHIFT) +#define FIFOLD_TYPE_PK_A3 (0x03 << FIFOLD_TYPE_SHIFT) +#define FIFOLD_TYPE_PK_B0 (0x04 << FIFOLD_TYPE_SHIFT) +#define FIFOLD_TYPE_PK_B1 (0x05 << FIFOLD_TYPE_SHIFT) +#define FIFOLD_TYPE_PK_B2 (0x06 << FIFOLD_TYPE_SHIFT) +#define FIFOLD_TYPE_PK_B3 (0x07 << FIFOLD_TYPE_SHIFT) +#define FIFOLD_TYPE_PK_N (0x08 << FIFOLD_TYPE_SHIFT) +#define FIFOLD_TYPE_PK_A (0x0c << FIFOLD_TYPE_SHIFT) +#define FIFOLD_TYPE_PK_B (0x0d << FIFOLD_TYPE_SHIFT) + +/* Other types. Need to OR in last/flush bits as desired */ +#define FIFOLD_TYPE_MSG_MASK (0x38 << FIFOLD_TYPE_SHIFT) +#define FIFOLD_TYPE_MSG (0x10 << FIFOLD_TYPE_SHIFT) +#define FIFOLD_TYPE_MSG1OUT2 (0x18 << FIFOLD_TYPE_SHIFT) +#define FIFOLD_TYPE_IV (0x20 << FIFOLD_TYPE_SHIFT) +#define FIFOLD_TYPE_BITDATA (0x28 << FIFOLD_TYPE_SHIFT) +#define FIFOLD_TYPE_AAD (0x30 << FIFOLD_TYPE_SHIFT) +#define FIFOLD_TYPE_ICV (0x38 << FIFOLD_TYPE_SHIFT) + +/* Last/Flush bits for use with "other" types above */ +#define FIFOLD_TYPE_ACT_MASK (0x07 << FIFOLD_TYPE_SHIFT) +#define FIFOLD_TYPE_NOACTION (0x00 << FIFOLD_TYPE_SHIFT) +#define FIFOLD_TYPE_FLUSH1 (0x01 << FIFOLD_TYPE_SHIFT) +#define FIFOLD_TYPE_LAST1 (0x02 << FIFOLD_TYPE_SHIFT) +#define FIFOLD_TYPE_LAST2FLUSH (0x03 << FIFOLD_TYPE_SHIFT) +#define FIFOLD_TYPE_LAST2 (0x04 << FIFOLD_TYPE_SHIFT) +#define FIFOLD_TYPE_LAST2FLUSH1 (0x05 << FIFOLD_TYPE_SHIFT) +#define FIFOLD_TYPE_LASTBOTH (0x06 << FIFOLD_TYPE_SHIFT) +#define FIFOLD_TYPE_LASTBOTHFL (0x07 << FIFOLD_TYPE_SHIFT) +#define FIFOLD_TYPE_NOINFOFIFO (0x0F << FIFOLD_TYPE_SHIFT) + +#define FIFOLDST_LEN_MASK 0xffff +#define FIFOLDST_EXT_LEN_MASK 0xffffffff + +/* Output data types */ +#define FIFOST_TYPE_SHIFT 16 +#define FIFOST_TYPE_MASK (0x3f << FIFOST_TYPE_SHIFT) + +#define FIFOST_TYPE_PKHA_A0 (0x00 << FIFOST_TYPE_SHIFT) +#define FIFOST_TYPE_PKHA_A1 (0x01 << FIFOST_TYPE_SHIFT) +#define FIFOST_TYPE_PKHA_A2 (0x02 << FIFOST_TYPE_SHIFT) +#define FIFOST_TYPE_PKHA_A3 (0x03 << FIFOST_TYPE_SHIFT) +#define FIFOST_TYPE_PKHA_B0 (0x04 << FIFOST_TYPE_SHIFT) +#define FIFOST_TYPE_PKHA_B1 (0x05 << FIFOST_TYPE_SHIFT) +#define FIFOST_TYPE_PKHA_B2 (0x06 << FIFOST_TYPE_SHIFT) +#define FIFOST_TYPE_PKHA_B3 (0x07 << FIFOST_TYPE_SHIFT) +#define FIFOST_TYPE_PKHA_N (0x08 << FIFOST_TYPE_SHIFT) +#define FIFOST_TYPE_PKHA_A (0x0c << FIFOST_TYPE_SHIFT) +#define FIFOST_TYPE_PKHA_B (0x0d << FIFOST_TYPE_SHIFT) +#define FIFOST_TYPE_AF_SBOX_JKEK (0x20 << FIFOST_TYPE_SHIFT) +#define FIFOST_TYPE_AF_SBOX_TKEK (0x21 << FIFOST_TYPE_SHIFT) +#define FIFOST_TYPE_PKHA_E_JKEK (0x22 << FIFOST_TYPE_SHIFT) +#define FIFOST_TYPE_PKHA_E_TKEK (0x23 << FIFOST_TYPE_SHIFT) +#define FIFOST_TYPE_KEY_KEK (0x24 << FIFOST_TYPE_SHIFT) +#define FIFOST_TYPE_KEY_TKEK (0x25 << FIFOST_TYPE_SHIFT) +#define FIFOST_TYPE_SPLIT_KEK (0x26 << FIFOST_TYPE_SHIFT) +#define FIFOST_TYPE_SPLIT_TKEK (0x27 << FIFOST_TYPE_SHIFT) +#define FIFOST_TYPE_OUTFIFO_KEK (0x28 << FIFOST_TYPE_SHIFT) +#define FIFOST_TYPE_OUTFIFO_TKEK (0x29 << FIFOST_TYPE_SHIFT) +#define FIFOST_TYPE_MESSAGE_DATA (0x30 << FIFOST_TYPE_SHIFT) +#define FIFOST_TYPE_RNGSTORE (0x34 << FIFOST_TYPE_SHIFT) +#define FIFOST_TYPE_RNGFIFO (0x35 << FIFOST_TYPE_SHIFT) +#define FIFOST_TYPE_METADATA (0x3e << FIFOST_TYPE_SHIFT) +#define FIFOST_TYPE_SKIP (0x3f << FIFOST_TYPE_SHIFT) + +/* + * OPERATION Command Constructs + */ + +/* Operation type selectors - OP TYPE */ +#define OP_TYPE_SHIFT 24 +#define OP_TYPE_MASK (0x07 << OP_TYPE_SHIFT) + +#define OP_TYPE_UNI_PROTOCOL (0x00 << OP_TYPE_SHIFT) +#define OP_TYPE_PK (0x01 << OP_TYPE_SHIFT) +#define OP_TYPE_CLASS1_ALG (0x02 << OP_TYPE_SHIFT) +#define OP_TYPE_CLASS2_ALG (0x04 << OP_TYPE_SHIFT) +#define OP_TYPE_DECAP_PROTOCOL (0x06 << OP_TYPE_SHIFT) +#define OP_TYPE_ENCAP_PROTOCOL (0x07 << OP_TYPE_SHIFT) + +/* ProtocolID selectors - PROTID */ +#define OP_PCLID_SHIFT 16 +#define OP_PCLID_MASK (0xff << 16) + +/* Assuming OP_TYPE = OP_TYPE_UNI_PROTOCOL */ +#define OP_PCLID_IKEV1_PRF (0x01 << OP_PCLID_SHIFT) +#define OP_PCLID_IKEV2_PRF (0x02 << OP_PCLID_SHIFT) +#define OP_PCLID_SSL30_PRF (0x08 << OP_PCLID_SHIFT) +#define OP_PCLID_TLS10_PRF (0x09 << OP_PCLID_SHIFT) +#define OP_PCLID_TLS11_PRF (0x0a << OP_PCLID_SHIFT) +#define OP_PCLID_DTLS10_PRF (0x0c << OP_PCLID_SHIFT) +#define OP_PCLID_PRF (0x06 << OP_PCLID_SHIFT) +#define OP_PCLID_BLOB (0x0d << OP_PCLID_SHIFT) +#define OP_PCLID_SECRETKEY (0x11 << OP_PCLID_SHIFT) +#define OP_PCLID_PUBLICKEYPAIR (0x14 << OP_PCLID_SHIFT) +#define OP_PCLID_DSASIGN (0x15 << OP_PCLID_SHIFT) +#define OP_PCLID_DSAVERIFY (0x16 << OP_PCLID_SHIFT) +#define OP_PCLID_RSAENC_PUBKEY (0x18 << OP_PCLID_SHIFT) +#define OP_PCLID_RSADEC_PRVKEY (0x19 << OP_PCLID_SHIFT) +#define OP_PCLID_DKP_MD5 (0x20 << OP_PCLID_SHIFT) +#define OP_PCLID_DKP_SHA1 (0x21 << OP_PCLID_SHIFT) +#define OP_PCLID_DKP_SHA224 (0x22 << OP_PCLID_SHIFT) +#define OP_PCLID_DKP_SHA256 (0x23 << OP_PCLID_SHIFT) +#define OP_PCLID_DKP_SHA384 (0x24 << OP_PCLID_SHIFT) +#define OP_PCLID_DKP_SHA512 (0x25 << OP_PCLID_SHIFT) +#define OP_PCLID_DKP_RIF_MD5 (0x60 << OP_PCLID_SHIFT) +#define OP_PCLID_DKP_RIF_SHA1 (0x61 << OP_PCLID_SHIFT) +#define OP_PCLID_DKP_RIF_SHA224 (0x62 << OP_PCLID_SHIFT) +#define OP_PCLID_DKP_RIF_SHA256 (0x63 << OP_PCLID_SHIFT) +#define OP_PCLID_DKP_RIF_SHA384 (0x64 << OP_PCLID_SHIFT) +#define OP_PCLID_DKP_RIF_SHA512 (0x65 << OP_PCLID_SHIFT) + +/* Assuming OP_TYPE = OP_TYPE_DECAP_PROTOCOL/ENCAP_PROTOCOL */ +#define OP_PCLID_IPSEC (0x01 << OP_PCLID_SHIFT) +#define OP_PCLID_SRTP (0x02 << OP_PCLID_SHIFT) +#define OP_PCLID_MACSEC (0x03 << OP_PCLID_SHIFT) +#define OP_PCLID_WIFI (0x04 << OP_PCLID_SHIFT) +#define OP_PCLID_WIMAX (0x05 << OP_PCLID_SHIFT) +#define OP_PCLID_SSL30 (0x08 << OP_PCLID_SHIFT) +#define OP_PCLID_TLS10 (0x09 << OP_PCLID_SHIFT) +#define OP_PCLID_TLS11 (0x0a << OP_PCLID_SHIFT) +#define OP_PCLID_TLS12 (0x0b << OP_PCLID_SHIFT) +#define OP_PCLID_DTLS (0x0c << OP_PCLID_SHIFT) + +/* + * ProtocolInfo selectors + */ +#define OP_PCLINFO_MASK 0xffff + +/* for OP_PCLID_IPSEC */ +#define OP_PCL_IPSEC_CIPHER_MASK 0xff00 +#define OP_PCL_IPSEC_AUTH_MASK 0x00ff + +#define OP_PCL_IPSEC_DES_IV64 0x0100 +#define OP_PCL_IPSEC_DES 0x0200 +#define OP_PCL_IPSEC_3DES 0x0300 +#define OP_PCL_IPSEC_AES_CBC 0x0c00 +#define OP_PCL_IPSEC_AES_CTR 0x0d00 +#define OP_PCL_IPSEC_AES_XTS 0x1600 +#define OP_PCL_IPSEC_AES_CCM8 0x0e00 +#define OP_PCL_IPSEC_AES_CCM12 0x0f00 +#define OP_PCL_IPSEC_AES_CCM16 0x1000 +#define OP_PCL_IPSEC_AES_GCM8 0x1200 +#define OP_PCL_IPSEC_AES_GCM12 0x1300 +#define OP_PCL_IPSEC_AES_GCM16 0x1400 + +#define OP_PCL_IPSEC_HMAC_NULL 0x0000 +#define OP_PCL_IPSEC_HMAC_MD5_96 0x0001 +#define OP_PCL_IPSEC_HMAC_SHA1_96 0x0002 +#define OP_PCL_IPSEC_AES_XCBC_MAC_96 0x0005 +#define OP_PCL_IPSEC_HMAC_MD5_128 0x0006 +#define OP_PCL_IPSEC_HMAC_SHA1_160 0x0007 +#define OP_PCL_IPSEC_HMAC_SHA2_256_128 0x000c +#define OP_PCL_IPSEC_HMAC_SHA2_384_192 0x000d +#define OP_PCL_IPSEC_HMAC_SHA2_512_256 0x000e + +/* For SRTP - OP_PCLID_SRTP */ +#define OP_PCL_SRTP_CIPHER_MASK 0xff00 +#define OP_PCL_SRTP_AUTH_MASK 0x00ff + +#define OP_PCL_SRTP_AES_CTR 0x0d00 + +#define OP_PCL_SRTP_HMAC_SHA1_160 0x0007 + +/* For SSL 3.0 - OP_PCLID_SSL30 */ +#define OP_PCL_SSL30_AES_128_CBC_SHA 0x002f +#define OP_PCL_SSL30_AES_128_CBC_SHA_2 0x0030 +#define OP_PCL_SSL30_AES_128_CBC_SHA_3 0x0031 +#define OP_PCL_SSL30_AES_128_CBC_SHA_4 0x0032 +#define OP_PCL_SSL30_AES_128_CBC_SHA_5 0x0033 +#define OP_PCL_SSL30_AES_128_CBC_SHA_6 0x0034 +#define OP_PCL_SSL30_AES_128_CBC_SHA_7 0x008c +#define OP_PCL_SSL30_AES_128_CBC_SHA_8 0x0090 +#define OP_PCL_SSL30_AES_128_CBC_SHA_9 0x0094 +#define OP_PCL_SSL30_AES_128_CBC_SHA_10 0xc004 +#define OP_PCL_SSL30_AES_128_CBC_SHA_11 0xc009 +#define OP_PCL_SSL30_AES_128_CBC_SHA_12 0xc00e +#define OP_PCL_SSL30_AES_128_CBC_SHA_13 0xc013 +#define OP_PCL_SSL30_AES_128_CBC_SHA_14 0xc018 +#define OP_PCL_SSL30_AES_128_CBC_SHA_15 0xc01d +#define OP_PCL_SSL30_AES_128_CBC_SHA_16 0xc01e +#define OP_PCL_SSL30_AES_128_CBC_SHA_17 0xc01f + +#define OP_PCL_SSL30_AES_256_CBC_SHA 0x0035 +#define OP_PCL_SSL30_AES_256_CBC_SHA_2 0x0036 +#define OP_PCL_SSL30_AES_256_CBC_SHA_3 0x0037 +#define OP_PCL_SSL30_AES_256_CBC_SHA_4 0x0038 +#define OP_PCL_SSL30_AES_256_CBC_SHA_5 0x0039 +#define OP_PCL_SSL30_AES_256_CBC_SHA_6 0x003a +#define OP_PCL_SSL30_AES_256_CBC_SHA_7 0x008d +#define OP_PCL_SSL30_AES_256_CBC_SHA_8 0x0091 +#define OP_PCL_SSL30_AES_256_CBC_SHA_9 0x0095 +#define OP_PCL_SSL30_AES_256_CBC_SHA_10 0xc005 +#define OP_PCL_SSL30_AES_256_CBC_SHA_11 0xc00a +#define OP_PCL_SSL30_AES_256_CBC_SHA_12 0xc00f +#define OP_PCL_SSL30_AES_256_CBC_SHA_13 0xc014 +#define OP_PCL_SSL30_AES_256_CBC_SHA_14 0xc019 +#define OP_PCL_SSL30_AES_256_CBC_SHA_15 0xc020 +#define OP_PCL_SSL30_AES_256_CBC_SHA_16 0xc021 +#define OP_PCL_SSL30_AES_256_CBC_SHA_17 0xc022 + +#define OP_PCL_SSL30_3DES_EDE_CBC_MD5 0x0023 + +#define OP_PCL_SSL30_3DES_EDE_CBC_SHA 0x001f +#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_2 0x008b +#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_3 0x008f +#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_4 0x0093 +#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_5 0x000a +#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_6 0x000d +#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_7 0x0010 +#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_8 0x0013 +#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_9 0x0016 +#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_10 0x001b +#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_11 0xc003 +#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_12 0xc008 +#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_13 0xc00d +#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_14 0xc012 +#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_15 0xc017 +#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_16 0xc01a +#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_17 0xc01b +#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_18 0xc01c + +#define OP_PCL_SSL30_DES40_CBC_MD5 0x0029 + +#define OP_PCL_SSL30_DES_CBC_MD5 0x0022 + +#define OP_PCL_SSL30_DES40_CBC_SHA 0x0008 +#define OP_PCL_SSL30_DES40_CBC_SHA_2 0x000b +#define OP_PCL_SSL30_DES40_CBC_SHA_3 0x000e +#define OP_PCL_SSL30_DES40_CBC_SHA_4 0x0011 +#define OP_PCL_SSL30_DES40_CBC_SHA_5 0x0014 +#define OP_PCL_SSL30_DES40_CBC_SHA_6 0x0019 +#define OP_PCL_SSL30_DES40_CBC_SHA_7 0x0026 + +#define OP_PCL_SSL30_DES_CBC_SHA 0x001e +#define OP_PCL_SSL30_DES_CBC_SHA_2 0x0009 +#define OP_PCL_SSL30_DES_CBC_SHA_3 0x000c +#define OP_PCL_SSL30_DES_CBC_SHA_4 0x000f +#define OP_PCL_SSL30_DES_CBC_SHA_5 0x0012 +#define OP_PCL_SSL30_DES_CBC_SHA_6 0x0015 +#define OP_PCL_SSL30_DES_CBC_SHA_7 0x001a + +#define OP_PCL_SSL30_RC4_128_MD5 0x0024 +#define OP_PCL_SSL30_RC4_128_MD5_2 0x0004 +#define OP_PCL_SSL30_RC4_128_MD5_3 0x0018 + +#define OP_PCL_SSL30_RC4_40_MD5 0x002b +#define OP_PCL_SSL30_RC4_40_MD5_2 0x0003 +#define OP_PCL_SSL30_RC4_40_MD5_3 0x0017 + +#define OP_PCL_SSL30_RC4_128_SHA 0x0020 +#define OP_PCL_SSL30_RC4_128_SHA_2 0x008a +#define OP_PCL_SSL30_RC4_128_SHA_3 0x008e +#define OP_PCL_SSL30_RC4_128_SHA_4 0x0092 +#define OP_PCL_SSL30_RC4_128_SHA_5 0x0005 +#define OP_PCL_SSL30_RC4_128_SHA_6 0xc002 +#define OP_PCL_SSL30_RC4_128_SHA_7 0xc007 +#define OP_PCL_SSL30_RC4_128_SHA_8 0xc00c +#define OP_PCL_SSL30_RC4_128_SHA_9 0xc011 +#define OP_PCL_SSL30_RC4_128_SHA_10 0xc016 + +#define OP_PCL_SSL30_RC4_40_SHA 0x0028 + + +/* For TLS 1.0 - OP_PCLID_TLS10 */ +#define OP_PCL_TLS10_AES_128_CBC_SHA 0x002f +#define OP_PCL_TLS10_AES_128_CBC_SHA_2 0x0030 +#define OP_PCL_TLS10_AES_128_CBC_SHA_3 0x0031 +#define OP_PCL_TLS10_AES_128_CBC_SHA_4 0x0032 +#define OP_PCL_TLS10_AES_128_CBC_SHA_5 0x0033 +#define OP_PCL_TLS10_AES_128_CBC_SHA_6 0x0034 +#define OP_PCL_TLS10_AES_128_CBC_SHA_7 0x008c +#define OP_PCL_TLS10_AES_128_CBC_SHA_8 0x0090 +#define OP_PCL_TLS10_AES_128_CBC_SHA_9 0x0094 +#define OP_PCL_TLS10_AES_128_CBC_SHA_10 0xc004 +#define OP_PCL_TLS10_AES_128_CBC_SHA_11 0xc009 +#define OP_PCL_TLS10_AES_128_CBC_SHA_12 0xc00e +#define OP_PCL_TLS10_AES_128_CBC_SHA_13 0xc013 +#define OP_PCL_TLS10_AES_128_CBC_SHA_14 0xc018 +#define OP_PCL_TLS10_AES_128_CBC_SHA_15 0xc01d +#define OP_PCL_TLS10_AES_128_CBC_SHA_16 0xc01e +#define OP_PCL_TLS10_AES_128_CBC_SHA_17 0xc01f + +#define OP_PCL_TLS10_AES_256_CBC_SHA 0x0035 +#define OP_PCL_TLS10_AES_256_CBC_SHA_2 0x0036 +#define OP_PCL_TLS10_AES_256_CBC_SHA_3 0x0037 +#define OP_PCL_TLS10_AES_256_CBC_SHA_4 0x0038 +#define OP_PCL_TLS10_AES_256_CBC_SHA_5 0x0039 +#define OP_PCL_TLS10_AES_256_CBC_SHA_6 0x003a +#define OP_PCL_TLS10_AES_256_CBC_SHA_7 0x008d +#define OP_PCL_TLS10_AES_256_CBC_SHA_8 0x0091 +#define OP_PCL_TLS10_AES_256_CBC_SHA_9 0x0095 +#define OP_PCL_TLS10_AES_256_CBC_SHA_10 0xc005 +#define OP_PCL_TLS10_AES_256_CBC_SHA_11 0xc00a +#define OP_PCL_TLS10_AES_256_CBC_SHA_12 0xc00f +#define OP_PCL_TLS10_AES_256_CBC_SHA_13 0xc014 +#define OP_PCL_TLS10_AES_256_CBC_SHA_14 0xc019 +#define OP_PCL_TLS10_AES_256_CBC_SHA_15 0xc020 +#define OP_PCL_TLS10_AES_256_CBC_SHA_16 0xc021 +#define OP_PCL_TLS10_AES_256_CBC_SHA_17 0xc022 + +/* #define OP_PCL_TLS10_3DES_EDE_CBC_MD5 0x0023 */ + +#define OP_PCL_TLS10_3DES_EDE_CBC_SHA 0x001f +#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_2 0x008b +#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_3 0x008f +#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_4 0x0093 +#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_5 0x000a +#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_6 0x000d +#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_7 0x0010 +#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_8 0x0013 +#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_9 0x0016 +#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_10 0x001b +#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_11 0xc003 +#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_12 0xc008 +#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_13 0xc00d +#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_14 0xc012 +#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_15 0xc017 +#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_16 0xc01a +#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_17 0xc01b +#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_18 0xc01c + +#define OP_PCL_TLS10_DES40_CBC_MD5 0x0029 + +#define OP_PCL_TLS10_DES_CBC_MD5 0x0022 + +#define OP_PCL_TLS10_DES40_CBC_SHA 0x0008 +#define OP_PCL_TLS10_DES40_CBC_SHA_2 0x000b +#define OP_PCL_TLS10_DES40_CBC_SHA_3 0x000e +#define OP_PCL_TLS10_DES40_CBC_SHA_4 0x0011 +#define OP_PCL_TLS10_DES40_CBC_SHA_5 0x0014 +#define OP_PCL_TLS10_DES40_CBC_SHA_6 0x0019 +#define OP_PCL_TLS10_DES40_CBC_SHA_7 0x0026 + + +#define OP_PCL_TLS10_DES_CBC_SHA 0x001e +#define OP_PCL_TLS10_DES_CBC_SHA_2 0x0009 +#define OP_PCL_TLS10_DES_CBC_SHA_3 0x000c +#define OP_PCL_TLS10_DES_CBC_SHA_4 0x000f +#define OP_PCL_TLS10_DES_CBC_SHA_5 0x0012 +#define OP_PCL_TLS10_DES_CBC_SHA_6 0x0015 +#define OP_PCL_TLS10_DES_CBC_SHA_7 0x001a + +#define OP_PCL_TLS10_RC4_128_MD5 0x0024 +#define OP_PCL_TLS10_RC4_128_MD5_2 0x0004 +#define OP_PCL_TLS10_RC4_128_MD5_3 0x0018 + +#define OP_PCL_TLS10_RC4_40_MD5 0x002b +#define OP_PCL_TLS10_RC4_40_MD5_2 0x0003 +#define OP_PCL_TLS10_RC4_40_MD5_3 0x0017 + +#define OP_PCL_TLS10_RC4_128_SHA 0x0020 +#define OP_PCL_TLS10_RC4_128_SHA_2 0x008a +#define OP_PCL_TLS10_RC4_128_SHA_3 0x008e +#define OP_PCL_TLS10_RC4_128_SHA_4 0x0092 +#define OP_PCL_TLS10_RC4_128_SHA_5 0x0005 +#define OP_PCL_TLS10_RC4_128_SHA_6 0xc002 +#define OP_PCL_TLS10_RC4_128_SHA_7 0xc007 +#define OP_PCL_TLS10_RC4_128_SHA_8 0xc00c +#define OP_PCL_TLS10_RC4_128_SHA_9 0xc011 +#define OP_PCL_TLS10_RC4_128_SHA_10 0xc016 + +#define OP_PCL_TLS10_RC4_40_SHA 0x0028 + +#define OP_PCL_TLS10_3DES_EDE_CBC_MD5 0xff23 +#define OP_PCL_TLS10_3DES_EDE_CBC_SHA160 0xff30 +#define OP_PCL_TLS10_3DES_EDE_CBC_SHA224 0xff34 +#define OP_PCL_TLS10_3DES_EDE_CBC_SHA256 0xff36 +#define OP_PCL_TLS10_3DES_EDE_CBC_SHA384 0xff33 +#define OP_PCL_TLS10_3DES_EDE_CBC_SHA512 0xff35 +#define OP_PCL_TLS10_AES_128_CBC_SHA160 0xff80 +#define OP_PCL_TLS10_AES_128_CBC_SHA224 0xff84 +#define OP_PCL_TLS10_AES_128_CBC_SHA256 0xff86 +#define OP_PCL_TLS10_AES_128_CBC_SHA384 0xff83 +#define OP_PCL_TLS10_AES_128_CBC_SHA512 0xff85 +#define OP_PCL_TLS10_AES_192_CBC_SHA160 0xff20 +#define OP_PCL_TLS10_AES_192_CBC_SHA224 0xff24 +#define OP_PCL_TLS10_AES_192_CBC_SHA256 0xff26 +#define OP_PCL_TLS10_AES_192_CBC_SHA384 0xff23 +#define OP_PCL_TLS10_AES_192_CBC_SHA512 0xff25 +#define OP_PCL_TLS10_AES_256_CBC_SHA160 0xff60 +#define OP_PCL_TLS10_AES_256_CBC_SHA224 0xff64 +#define OP_PCL_TLS10_AES_256_CBC_SHA256 0xff66 +#define OP_PCL_TLS10_AES_256_CBC_SHA384 0xff63 +#define OP_PCL_TLS10_AES_256_CBC_SHA512 0xff65 + + + +/* For TLS 1.1 - OP_PCLID_TLS11 */ +#define OP_PCL_TLS11_AES_128_CBC_SHA 0x002f +#define OP_PCL_TLS11_AES_128_CBC_SHA_2 0x0030 +#define OP_PCL_TLS11_AES_128_CBC_SHA_3 0x0031 +#define OP_PCL_TLS11_AES_128_CBC_SHA_4 0x0032 +#define OP_PCL_TLS11_AES_128_CBC_SHA_5 0x0033 +#define OP_PCL_TLS11_AES_128_CBC_SHA_6 0x0034 +#define OP_PCL_TLS11_AES_128_CBC_SHA_7 0x008c +#define OP_PCL_TLS11_AES_128_CBC_SHA_8 0x0090 +#define OP_PCL_TLS11_AES_128_CBC_SHA_9 0x0094 +#define OP_PCL_TLS11_AES_128_CBC_SHA_10 0xc004 +#define OP_PCL_TLS11_AES_128_CBC_SHA_11 0xc009 +#define OP_PCL_TLS11_AES_128_CBC_SHA_12 0xc00e +#define OP_PCL_TLS11_AES_128_CBC_SHA_13 0xc013 +#define OP_PCL_TLS11_AES_128_CBC_SHA_14 0xc018 +#define OP_PCL_TLS11_AES_128_CBC_SHA_15 0xc01d +#define OP_PCL_TLS11_AES_128_CBC_SHA_16 0xc01e +#define OP_PCL_TLS11_AES_128_CBC_SHA_17 0xc01f + +#define OP_PCL_TLS11_AES_256_CBC_SHA 0x0035 +#define OP_PCL_TLS11_AES_256_CBC_SHA_2 0x0036 +#define OP_PCL_TLS11_AES_256_CBC_SHA_3 0x0037 +#define OP_PCL_TLS11_AES_256_CBC_SHA_4 0x0038 +#define OP_PCL_TLS11_AES_256_CBC_SHA_5 0x0039 +#define OP_PCL_TLS11_AES_256_CBC_SHA_6 0x003a +#define OP_PCL_TLS11_AES_256_CBC_SHA_7 0x008d +#define OP_PCL_TLS11_AES_256_CBC_SHA_8 0x0091 +#define OP_PCL_TLS11_AES_256_CBC_SHA_9 0x0095 +#define OP_PCL_TLS11_AES_256_CBC_SHA_10 0xc005 +#define OP_PCL_TLS11_AES_256_CBC_SHA_11 0xc00a +#define OP_PCL_TLS11_AES_256_CBC_SHA_12 0xc00f +#define OP_PCL_TLS11_AES_256_CBC_SHA_13 0xc014 +#define OP_PCL_TLS11_AES_256_CBC_SHA_14 0xc019 +#define OP_PCL_TLS11_AES_256_CBC_SHA_15 0xc020 +#define OP_PCL_TLS11_AES_256_CBC_SHA_16 0xc021 +#define OP_PCL_TLS11_AES_256_CBC_SHA_17 0xc022 + +/* #define OP_PCL_TLS11_3DES_EDE_CBC_MD5 0x0023 */ + +#define OP_PCL_TLS11_3DES_EDE_CBC_SHA 0x001f +#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_2 0x008b +#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_3 0x008f +#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_4 0x0093 +#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_5 0x000a +#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_6 0x000d +#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_7 0x0010 +#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_8 0x0013 +#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_9 0x0016 +#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_10 0x001b +#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_11 0xc003 +#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_12 0xc008 +#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_13 0xc00d +#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_14 0xc012 +#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_15 0xc017 +#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_16 0xc01a +#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_17 0xc01b +#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_18 0xc01c + +#define OP_PCL_TLS11_DES40_CBC_MD5 0x0029 + +#define OP_PCL_TLS11_DES_CBC_MD5 0x0022 + +#define OP_PCL_TLS11_DES40_CBC_SHA 0x0008 +#define OP_PCL_TLS11_DES40_CBC_SHA_2 0x000b +#define OP_PCL_TLS11_DES40_CBC_SHA_3 0x000e +#define OP_PCL_TLS11_DES40_CBC_SHA_4 0x0011 +#define OP_PCL_TLS11_DES40_CBC_SHA_5 0x0014 +#define OP_PCL_TLS11_DES40_CBC_SHA_6 0x0019 +#define OP_PCL_TLS11_DES40_CBC_SHA_7 0x0026 + +#define OP_PCL_TLS11_DES_CBC_SHA 0x001e +#define OP_PCL_TLS11_DES_CBC_SHA_2 0x0009 +#define OP_PCL_TLS11_DES_CBC_SHA_3 0x000c +#define OP_PCL_TLS11_DES_CBC_SHA_4 0x000f +#define OP_PCL_TLS11_DES_CBC_SHA_5 0x0012 +#define OP_PCL_TLS11_DES_CBC_SHA_6 0x0015 +#define OP_PCL_TLS11_DES_CBC_SHA_7 0x001a + +#define OP_PCL_TLS11_RC4_128_MD5 0x0024 +#define OP_PCL_TLS11_RC4_128_MD5_2 0x0004 +#define OP_PCL_TLS11_RC4_128_MD5_3 0x0018 + +#define OP_PCL_TLS11_RC4_40_MD5 0x002b +#define OP_PCL_TLS11_RC4_40_MD5_2 0x0003 +#define OP_PCL_TLS11_RC4_40_MD5_3 0x0017 + +#define OP_PCL_TLS11_RC4_128_SHA 0x0020 +#define OP_PCL_TLS11_RC4_128_SHA_2 0x008a +#define OP_PCL_TLS11_RC4_128_SHA_3 0x008e +#define OP_PCL_TLS11_RC4_128_SHA_4 0x0092 +#define OP_PCL_TLS11_RC4_128_SHA_5 0x0005 +#define OP_PCL_TLS11_RC4_128_SHA_6 0xc002 +#define OP_PCL_TLS11_RC4_128_SHA_7 0xc007 +#define OP_PCL_TLS11_RC4_128_SHA_8 0xc00c +#define OP_PCL_TLS11_RC4_128_SHA_9 0xc011 +#define OP_PCL_TLS11_RC4_128_SHA_10 0xc016 + +#define OP_PCL_TLS11_RC4_40_SHA 0x0028 + +#define OP_PCL_TLS11_3DES_EDE_CBC_MD5 0xff23 +#define OP_PCL_TLS11_3DES_EDE_CBC_SHA160 0xff30 +#define OP_PCL_TLS11_3DES_EDE_CBC_SHA224 0xff34 +#define OP_PCL_TLS11_3DES_EDE_CBC_SHA256 0xff36 +#define OP_PCL_TLS11_3DES_EDE_CBC_SHA384 0xff33 +#define OP_PCL_TLS11_3DES_EDE_CBC_SHA512 0xff35 +#define OP_PCL_TLS11_AES_128_CBC_SHA160 0xff80 +#define OP_PCL_TLS11_AES_128_CBC_SHA224 0xff84 +#define OP_PCL_TLS11_AES_128_CBC_SHA256 0xff86 +#define OP_PCL_TLS11_AES_128_CBC_SHA384 0xff83 +#define OP_PCL_TLS11_AES_128_CBC_SHA512 0xff85 +#define OP_PCL_TLS11_AES_192_CBC_SHA160 0xff20 +#define OP_PCL_TLS11_AES_192_CBC_SHA224 0xff24 +#define OP_PCL_TLS11_AES_192_CBC_SHA256 0xff26 +#define OP_PCL_TLS11_AES_192_CBC_SHA384 0xff23 +#define OP_PCL_TLS11_AES_192_CBC_SHA512 0xff25 +#define OP_PCL_TLS11_AES_256_CBC_SHA160 0xff60 +#define OP_PCL_TLS11_AES_256_CBC_SHA224 0xff64 +#define OP_PCL_TLS11_AES_256_CBC_SHA256 0xff66 +#define OP_PCL_TLS11_AES_256_CBC_SHA384 0xff63 +#define OP_PCL_TLS11_AES_256_CBC_SHA512 0xff65 + + +/* For TLS 1.2 - OP_PCLID_TLS12 */ +#define OP_PCL_TLS12_AES_128_CBC_SHA 0x002f +#define OP_PCL_TLS12_AES_128_CBC_SHA_2 0x0030 +#define OP_PCL_TLS12_AES_128_CBC_SHA_3 0x0031 +#define OP_PCL_TLS12_AES_128_CBC_SHA_4 0x0032 +#define OP_PCL_TLS12_AES_128_CBC_SHA_5 0x0033 +#define OP_PCL_TLS12_AES_128_CBC_SHA_6 0x0034 +#define OP_PCL_TLS12_AES_128_CBC_SHA_7 0x008c +#define OP_PCL_TLS12_AES_128_CBC_SHA_8 0x0090 +#define OP_PCL_TLS12_AES_128_CBC_SHA_9 0x0094 +#define OP_PCL_TLS12_AES_128_CBC_SHA_10 0xc004 +#define OP_PCL_TLS12_AES_128_CBC_SHA_11 0xc009 +#define OP_PCL_TLS12_AES_128_CBC_SHA_12 0xc00e +#define OP_PCL_TLS12_AES_128_CBC_SHA_13 0xc013 +#define OP_PCL_TLS12_AES_128_CBC_SHA_14 0xc018 +#define OP_PCL_TLS12_AES_128_CBC_SHA_15 0xc01d +#define OP_PCL_TLS12_AES_128_CBC_SHA_16 0xc01e +#define OP_PCL_TLS12_AES_128_CBC_SHA_17 0xc01f + +#define OP_PCL_TLS12_AES_256_CBC_SHA 0x0035 +#define OP_PCL_TLS12_AES_256_CBC_SHA_2 0x0036 +#define OP_PCL_TLS12_AES_256_CBC_SHA_3 0x0037 +#define OP_PCL_TLS12_AES_256_CBC_SHA_4 0x0038 +#define OP_PCL_TLS12_AES_256_CBC_SHA_5 0x0039 +#define OP_PCL_TLS12_AES_256_CBC_SHA_6 0x003a +#define OP_PCL_TLS12_AES_256_CBC_SHA_7 0x008d +#define OP_PCL_TLS12_AES_256_CBC_SHA_8 0x0091 +#define OP_PCL_TLS12_AES_256_CBC_SHA_9 0x0095 +#define OP_PCL_TLS12_AES_256_CBC_SHA_10 0xc005 +#define OP_PCL_TLS12_AES_256_CBC_SHA_11 0xc00a +#define OP_PCL_TLS12_AES_256_CBC_SHA_12 0xc00f +#define OP_PCL_TLS12_AES_256_CBC_SHA_13 0xc014 +#define OP_PCL_TLS12_AES_256_CBC_SHA_14 0xc019 +#define OP_PCL_TLS12_AES_256_CBC_SHA_15 0xc020 +#define OP_PCL_TLS12_AES_256_CBC_SHA_16 0xc021 +#define OP_PCL_TLS12_AES_256_CBC_SHA_17 0xc022 + +/* #define OP_PCL_TLS12_3DES_EDE_CBC_MD5 0x0023 */ + +#define OP_PCL_TLS12_3DES_EDE_CBC_SHA 0x001f +#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_2 0x008b +#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_3 0x008f +#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_4 0x0093 +#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_5 0x000a +#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_6 0x000d +#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_7 0x0010 +#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_8 0x0013 +#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_9 0x0016 +#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_10 0x001b +#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_11 0xc003 +#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_12 0xc008 +#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_13 0xc00d +#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_14 0xc012 +#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_15 0xc017 +#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_16 0xc01a +#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_17 0xc01b +#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_18 0xc01c + +#define OP_PCL_TLS12_DES40_CBC_MD5 0x0029 + +#define OP_PCL_TLS12_DES_CBC_MD5 0x0022 + +#define OP_PCL_TLS12_DES40_CBC_SHA 0x0008 +#define OP_PCL_TLS12_DES40_CBC_SHA_2 0x000b +#define OP_PCL_TLS12_DES40_CBC_SHA_3 0x000e +#define OP_PCL_TLS12_DES40_CBC_SHA_4 0x0011 +#define OP_PCL_TLS12_DES40_CBC_SHA_5 0x0014 +#define OP_PCL_TLS12_DES40_CBC_SHA_6 0x0019 +#define OP_PCL_TLS12_DES40_CBC_SHA_7 0x0026 + +#define OP_PCL_TLS12_DES_CBC_SHA 0x001e +#define OP_PCL_TLS12_DES_CBC_SHA_2 0x0009 +#define OP_PCL_TLS12_DES_CBC_SHA_3 0x000c +#define OP_PCL_TLS12_DES_CBC_SHA_4 0x000f +#define OP_PCL_TLS12_DES_CBC_SHA_5 0x0012 +#define OP_PCL_TLS12_DES_CBC_SHA_6 0x0015 +#define OP_PCL_TLS12_DES_CBC_SHA_7 0x001a + +#define OP_PCL_TLS12_RC4_128_MD5 0x0024 +#define OP_PCL_TLS12_RC4_128_MD5_2 0x0004 +#define OP_PCL_TLS12_RC4_128_MD5_3 0x0018 + +#define OP_PCL_TLS12_RC4_40_MD5 0x002b +#define OP_PCL_TLS12_RC4_40_MD5_2 0x0003 +#define OP_PCL_TLS12_RC4_40_MD5_3 0x0017 + +#define OP_PCL_TLS12_RC4_128_SHA 0x0020 +#define OP_PCL_TLS12_RC4_128_SHA_2 0x008a +#define OP_PCL_TLS12_RC4_128_SHA_3 0x008e +#define OP_PCL_TLS12_RC4_128_SHA_4 0x0092 +#define OP_PCL_TLS12_RC4_128_SHA_5 0x0005 +#define OP_PCL_TLS12_RC4_128_SHA_6 0xc002 +#define OP_PCL_TLS12_RC4_128_SHA_7 0xc007 +#define OP_PCL_TLS12_RC4_128_SHA_8 0xc00c +#define OP_PCL_TLS12_RC4_128_SHA_9 0xc011 +#define OP_PCL_TLS12_RC4_128_SHA_10 0xc016 + +#define OP_PCL_TLS12_RC4_40_SHA 0x0028 + +/* #define OP_PCL_TLS12_AES_128_CBC_SHA256 0x003c */ +#define OP_PCL_TLS12_AES_128_CBC_SHA256_2 0x003e +#define OP_PCL_TLS12_AES_128_CBC_SHA256_3 0x003f +#define OP_PCL_TLS12_AES_128_CBC_SHA256_4 0x0040 +#define OP_PCL_TLS12_AES_128_CBC_SHA256_5 0x0067 +#define OP_PCL_TLS12_AES_128_CBC_SHA256_6 0x006c + +/* #define OP_PCL_TLS12_AES_256_CBC_SHA256 0x003d */ +#define OP_PCL_TLS12_AES_256_CBC_SHA256_2 0x0068 +#define OP_PCL_TLS12_AES_256_CBC_SHA256_3 0x0069 +#define OP_PCL_TLS12_AES_256_CBC_SHA256_4 0x006a +#define OP_PCL_TLS12_AES_256_CBC_SHA256_5 0x006b +#define OP_PCL_TLS12_AES_256_CBC_SHA256_6 0x006d + +/* AEAD_AES_xxx_CCM/GCM remain to be defined... */ + +#define OP_PCL_TLS12_3DES_EDE_CBC_MD5 0xff23 +#define OP_PCL_TLS12_3DES_EDE_CBC_SHA160 0xff30 +#define OP_PCL_TLS12_3DES_EDE_CBC_SHA224 0xff34 +#define OP_PCL_TLS12_3DES_EDE_CBC_SHA256 0xff36 +#define OP_PCL_TLS12_3DES_EDE_CBC_SHA384 0xff33 +#define OP_PCL_TLS12_3DES_EDE_CBC_SHA512 0xff35 +#define OP_PCL_TLS12_AES_128_CBC_SHA160 0xff80 +#define OP_PCL_TLS12_AES_128_CBC_SHA224 0xff84 +#define OP_PCL_TLS12_AES_128_CBC_SHA256 0xff86 +#define OP_PCL_TLS12_AES_128_CBC_SHA384 0xff83 +#define OP_PCL_TLS12_AES_128_CBC_SHA512 0xff85 +#define OP_PCL_TLS12_AES_192_CBC_SHA160 0xff20 +#define OP_PCL_TLS12_AES_192_CBC_SHA224 0xff24 +#define OP_PCL_TLS12_AES_192_CBC_SHA256 0xff26 +#define OP_PCL_TLS12_AES_192_CBC_SHA384 0xff23 +#define OP_PCL_TLS12_AES_192_CBC_SHA512 0xff25 +#define OP_PCL_TLS12_AES_256_CBC_SHA160 0xff60 +#define OP_PCL_TLS12_AES_256_CBC_SHA224 0xff64 +#define OP_PCL_TLS12_AES_256_CBC_SHA256 0xff66 +#define OP_PCL_TLS12_AES_256_CBC_SHA384 0xff63 +#define OP_PCL_TLS12_AES_256_CBC_SHA512 0xff65 + +/* For DTLS - OP_PCLID_DTLS */ + +#define OP_PCL_DTLS_AES_128_CBC_SHA 0x002f +#define OP_PCL_DTLS_AES_128_CBC_SHA_2 0x0030 +#define OP_PCL_DTLS_AES_128_CBC_SHA_3 0x0031 +#define OP_PCL_DTLS_AES_128_CBC_SHA_4 0x0032 +#define OP_PCL_DTLS_AES_128_CBC_SHA_5 0x0033 +#define OP_PCL_DTLS_AES_128_CBC_SHA_6 0x0034 +#define OP_PCL_DTLS_AES_128_CBC_SHA_7 0x008c +#define OP_PCL_DTLS_AES_128_CBC_SHA_8 0x0090 +#define OP_PCL_DTLS_AES_128_CBC_SHA_9 0x0094 +#define OP_PCL_DTLS_AES_128_CBC_SHA_10 0xc004 +#define OP_PCL_DTLS_AES_128_CBC_SHA_11 0xc009 +#define OP_PCL_DTLS_AES_128_CBC_SHA_12 0xc00e +#define OP_PCL_DTLS_AES_128_CBC_SHA_13 0xc013 +#define OP_PCL_DTLS_AES_128_CBC_SHA_14 0xc018 +#define OP_PCL_DTLS_AES_128_CBC_SHA_15 0xc01d +#define OP_PCL_DTLS_AES_128_CBC_SHA_16 0xc01e +#define OP_PCL_DTLS_AES_128_CBC_SHA_17 0xc01f + +#define OP_PCL_DTLS_AES_256_CBC_SHA 0x0035 +#define OP_PCL_DTLS_AES_256_CBC_SHA_2 0x0036 +#define OP_PCL_DTLS_AES_256_CBC_SHA_3 0x0037 +#define OP_PCL_DTLS_AES_256_CBC_SHA_4 0x0038 +#define OP_PCL_DTLS_AES_256_CBC_SHA_5 0x0039 +#define OP_PCL_DTLS_AES_256_CBC_SHA_6 0x003a +#define OP_PCL_DTLS_AES_256_CBC_SHA_7 0x008d +#define OP_PCL_DTLS_AES_256_CBC_SHA_8 0x0091 +#define OP_PCL_DTLS_AES_256_CBC_SHA_9 0x0095 +#define OP_PCL_DTLS_AES_256_CBC_SHA_10 0xc005 +#define OP_PCL_DTLS_AES_256_CBC_SHA_11 0xc00a +#define OP_PCL_DTLS_AES_256_CBC_SHA_12 0xc00f +#define OP_PCL_DTLS_AES_256_CBC_SHA_13 0xc014 +#define OP_PCL_DTLS_AES_256_CBC_SHA_14 0xc019 +#define OP_PCL_DTLS_AES_256_CBC_SHA_15 0xc020 +#define OP_PCL_DTLS_AES_256_CBC_SHA_16 0xc021 +#define OP_PCL_DTLS_AES_256_CBC_SHA_17 0xc022 + +/* #define OP_PCL_DTLS_3DES_EDE_CBC_MD5 0x0023 */ + +#define OP_PCL_DTLS_3DES_EDE_CBC_SHA 0x001f +#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_2 0x008b +#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_3 0x008f +#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_4 0x0093 +#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_5 0x000a +#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_6 0x000d +#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_7 0x0010 +#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_8 0x0013 +#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_9 0x0016 +#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_10 0x001b +#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_11 0xc003 +#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_12 0xc008 +#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_13 0xc00d +#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_14 0xc012 +#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_15 0xc017 +#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_16 0xc01a +#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_17 0xc01b +#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_18 0xc01c + +#define OP_PCL_DTLS_DES40_CBC_MD5 0x0029 + +#define OP_PCL_DTLS_DES_CBC_MD5 0x0022 + +#define OP_PCL_DTLS_DES40_CBC_SHA 0x0008 +#define OP_PCL_DTLS_DES40_CBC_SHA_2 0x000b +#define OP_PCL_DTLS_DES40_CBC_SHA_3 0x000e +#define OP_PCL_DTLS_DES40_CBC_SHA_4 0x0011 +#define OP_PCL_DTLS_DES40_CBC_SHA_5 0x0014 +#define OP_PCL_DTLS_DES40_CBC_SHA_6 0x0019 +#define OP_PCL_DTLS_DES40_CBC_SHA_7 0x0026 + + +#define OP_PCL_DTLS_DES_CBC_SHA 0x001e +#define OP_PCL_DTLS_DES_CBC_SHA_2 0x0009 +#define OP_PCL_DTLS_DES_CBC_SHA_3 0x000c +#define OP_PCL_DTLS_DES_CBC_SHA_4 0x000f +#define OP_PCL_DTLS_DES_CBC_SHA_5 0x0012 +#define OP_PCL_DTLS_DES_CBC_SHA_6 0x0015 +#define OP_PCL_DTLS_DES_CBC_SHA_7 0x001a + + +#define OP_PCL_DTLS_3DES_EDE_CBC_MD5 0xff23 +#define OP_PCL_DTLS_3DES_EDE_CBC_SHA160 0xff30 +#define OP_PCL_DTLS_3DES_EDE_CBC_SHA224 0xff34 +#define OP_PCL_DTLS_3DES_EDE_CBC_SHA256 0xff36 +#define OP_PCL_DTLS_3DES_EDE_CBC_SHA384 0xff33 +#define OP_PCL_DTLS_3DES_EDE_CBC_SHA512 0xff35 +#define OP_PCL_DTLS_AES_128_CBC_SHA160 0xff80 +#define OP_PCL_DTLS_AES_128_CBC_SHA224 0xff84 +#define OP_PCL_DTLS_AES_128_CBC_SHA256 0xff86 +#define OP_PCL_DTLS_AES_128_CBC_SHA384 0xff83 +#define OP_PCL_DTLS_AES_128_CBC_SHA512 0xff85 +#define OP_PCL_DTLS_AES_192_CBC_SHA160 0xff20 +#define OP_PCL_DTLS_AES_192_CBC_SHA224 0xff24 +#define OP_PCL_DTLS_AES_192_CBC_SHA256 0xff26 +#define OP_PCL_DTLS_AES_192_CBC_SHA384 0xff23 +#define OP_PCL_DTLS_AES_192_CBC_SHA512 0xff25 +#define OP_PCL_DTLS_AES_256_CBC_SHA160 0xff60 +#define OP_PCL_DTLS_AES_256_CBC_SHA224 0xff64 +#define OP_PCL_DTLS_AES_256_CBC_SHA256 0xff66 +#define OP_PCL_DTLS_AES_256_CBC_SHA384 0xff63 +#define OP_PCL_DTLS_AES_256_CBC_SHA512 0xff65 + +/* 802.16 WiMAX protinfos */ +#define OP_PCL_WIMAX_OFDM 0x0201 +#define OP_PCL_WIMAX_OFDMA 0x0231 + +/* 802.11 WiFi protinfos */ +#define OP_PCL_WIFI 0xac04 + +/* MacSec protinfos */ +#define OP_PCL_MACSEC 0x0001 + +/* Derived Key Protocol (DKP) Protinfo */ +#define OP_PCL_DKP_SRC_SHIFT 14 +#define OP_PCL_DKP_SRC_MASK (3 << OP_PCL_DKP_SRC_SHIFT) +#define OP_PCL_DKP_SRC_IMM (0 << OP_PCL_DKP_SRC_SHIFT) +#define OP_PCL_DKP_SRC_SEQ (1 << OP_PCL_DKP_SRC_SHIFT) +#define OP_PCL_DKP_SRC_PTR (2 << OP_PCL_DKP_SRC_SHIFT) +#define OP_PCL_DKP_SRC_SGF (3 << OP_PCL_DKP_SRC_SHIFT) +#define OP_PCL_DKP_DST_SHIFT 12 +#define OP_PCL_DKP_DST_MASK (3 << OP_PCL_DKP_DST_SHIFT) +#define OP_PCL_DKP_DST_IMM (0 << OP_PCL_DKP_DST_SHIFT) +#define OP_PCL_DKP_DST_SEQ (1 << OP_PCL_DKP_DST_SHIFT) +#define OP_PCL_DKP_DST_PTR (2 << OP_PCL_DKP_DST_SHIFT) +#define OP_PCL_DKP_DST_SGF (3 << OP_PCL_DKP_DST_SHIFT) +#define OP_PCL_DKP_KEY_SHIFT 0 +#define OP_PCL_DKP_KEY_MASK (0xfff << OP_PCL_DKP_KEY_SHIFT) + +/* PKI unidirectional protocol protinfo bits */ +#define OP_PCL_PKPROT_TEST 0x0008 +#define OP_PCL_PKPROT_DECRYPT 0x0004 +#define OP_PCL_PKPROT_ECC 0x0002 +#define OP_PCL_PKPROT_F2M 0x0001 + +/* For non-protocol/alg-only op commands */ +#define OP_ALG_TYPE_SHIFT 24 +#define OP_ALG_TYPE_MASK (0x7 << OP_ALG_TYPE_SHIFT) +#define OP_ALG_TYPE_CLASS1 (2 << OP_ALG_TYPE_SHIFT) +#define OP_ALG_TYPE_CLASS2 (4 << OP_ALG_TYPE_SHIFT) + +/* version register fields */ +#define OP_VER_CCHA_NUM 0x000000ff /* Number CCHAs instantiated */ +#define OP_VER_CCHA_MISC 0x0000ff00 /* CCHA Miscellaneous Information */ +#define OP_VER_CCHA_REV 0x00ff0000 /* CCHA Revision Number */ +#define OP_VER_CCHA_VID 0xff000000 /* CCHA Version ID */ + +#define OP_ALG_ALGSEL_SHIFT 16 +#define OP_ALG_ALGSEL_MASK (0xff << OP_ALG_ALGSEL_SHIFT) +#define OP_ALG_ALGSEL_SUBMASK (0x0f << OP_ALG_ALGSEL_SHIFT) +#define OP_ALG_ALGSEL_AES (0x10 << OP_ALG_ALGSEL_SHIFT) +#define OP_ALG_ALGSEL_DES (0x20 << OP_ALG_ALGSEL_SHIFT) +#define OP_ALG_ALGSEL_3DES (0x21 << OP_ALG_ALGSEL_SHIFT) +#define OP_ALG_ALGSEL_ARC4 (0x30 << OP_ALG_ALGSEL_SHIFT) +#define OP_ALG_CHA_MDHA (0x40 << OP_ALG_ALGSEL_SHIFT) +#define OP_ALG_ALGSEL_MD5 (0x40 << OP_ALG_ALGSEL_SHIFT) +#define OP_ALG_ALGSEL_SHA1 (0x41 << OP_ALG_ALGSEL_SHIFT) +#define OP_ALG_ALGSEL_SHA224 (0x42 << OP_ALG_ALGSEL_SHIFT) +#define OP_ALG_ALGSEL_SHA256 (0x43 << OP_ALG_ALGSEL_SHIFT) +#define OP_ALG_ALGSEL_SHA384 (0x44 << OP_ALG_ALGSEL_SHIFT) +#define OP_ALG_ALGSEL_SHA512 (0x45 << OP_ALG_ALGSEL_SHIFT) +#define OP_ALG_ALGSEL_RNG (0x50 << OP_ALG_ALGSEL_SHIFT) +#define OP_ALG_ALGSEL_SNOW (0x60 << OP_ALG_ALGSEL_SHIFT) +#define OP_ALG_ALGSEL_SNOW_F8 (0x60 << OP_ALG_ALGSEL_SHIFT) +#define OP_ALG_ALGSEL_KASUMI (0x70 << OP_ALG_ALGSEL_SHIFT) +#define OP_ALG_ALGSEL_CRC (0x90 << OP_ALG_ALGSEL_SHIFT) +#define OP_ALG_ALGSEL_SNOW_F9 (0xA0 << OP_ALG_ALGSEL_SHIFT) +#define OP_ALG_ALGSEL_CHACHA20 (0xD0 << OP_ALG_ALGSEL_SHIFT) +#define OP_ALG_ALGSEL_POLY1305 (0xE0 << OP_ALG_ALGSEL_SHIFT) + +#define OP_ALG_AAI_SHIFT 4 +#define OP_ALG_AAI_MASK (0x1ff << OP_ALG_AAI_SHIFT) + +/* blockcipher AAI set */ +#define OP_ALG_AAI_CTR_MOD128 (0x00 << OP_ALG_AAI_SHIFT) +#define OP_ALG_AAI_CTR_MOD8 (0x01 << OP_ALG_AAI_SHIFT) +#define OP_ALG_AAI_CTR_MOD16 (0x02 << OP_ALG_AAI_SHIFT) +#define OP_ALG_AAI_CTR_MOD24 (0x03 << OP_ALG_AAI_SHIFT) +#define OP_ALG_AAI_CTR_MOD32 (0x04 << OP_ALG_AAI_SHIFT) +#define OP_ALG_AAI_CTR_MOD40 (0x05 << OP_ALG_AAI_SHIFT) +#define OP_ALG_AAI_CTR_MOD48 (0x06 << OP_ALG_AAI_SHIFT) +#define OP_ALG_AAI_CTR_MOD56 (0x07 << OP_ALG_AAI_SHIFT) +#define OP_ALG_AAI_CTR_MOD64 (0x08 << OP_ALG_AAI_SHIFT) +#define OP_ALG_AAI_CTR_MOD72 (0x09 << OP_ALG_AAI_SHIFT) +#define OP_ALG_AAI_CTR_MOD80 (0x0a << OP_ALG_AAI_SHIFT) +#define OP_ALG_AAI_CTR_MOD88 (0x0b << OP_ALG_AAI_SHIFT) +#define OP_ALG_AAI_CTR_MOD96 (0x0c << OP_ALG_AAI_SHIFT) +#define OP_ALG_AAI_CTR_MOD104 (0x0d << OP_ALG_AAI_SHIFT) +#define OP_ALG_AAI_CTR_MOD112 (0x0e << OP_ALG_AAI_SHIFT) +#define OP_ALG_AAI_CTR_MOD120 (0x0f << OP_ALG_AAI_SHIFT) +#define OP_ALG_AAI_CBC (0x10 << OP_ALG_AAI_SHIFT) +#define OP_ALG_AAI_ECB (0x20 << OP_ALG_AAI_SHIFT) +#define OP_ALG_AAI_CFB (0x30 << OP_ALG_AAI_SHIFT) +#define OP_ALG_AAI_OFB (0x40 << OP_ALG_AAI_SHIFT) +#define OP_ALG_AAI_XTS (0x50 << OP_ALG_AAI_SHIFT) +#define OP_ALG_AAI_CMAC (0x60 << OP_ALG_AAI_SHIFT) +#define OP_ALG_AAI_XCBC_MAC (0x70 << OP_ALG_AAI_SHIFT) +#define OP_ALG_AAI_CCM (0x80 << OP_ALG_AAI_SHIFT) +#define OP_ALG_AAI_GCM (0x90 << OP_ALG_AAI_SHIFT) +#define OP_ALG_AAI_CBC_XCBCMAC (0xa0 << OP_ALG_AAI_SHIFT) +#define OP_ALG_AAI_CTR_XCBCMAC (0xb0 << OP_ALG_AAI_SHIFT) +#define OP_ALG_AAI_CHECKODD (0x80 << OP_ALG_AAI_SHIFT) +#define OP_ALG_AAI_DK (0x100 << OP_ALG_AAI_SHIFT) + +/* randomizer AAI set */ +#define OP_ALG_AAI_RNG (0x00 << OP_ALG_AAI_SHIFT) +#define OP_ALG_AAI_RNG_NZB (0x10 << OP_ALG_AAI_SHIFT) +#define OP_ALG_AAI_RNG_OBP (0x20 << OP_ALG_AAI_SHIFT) + +/* RNG4 AAI set */ +#define OP_ALG_AAI_RNG4_SH_0 (0x00 << OP_ALG_AAI_SHIFT) +#define OP_ALG_AAI_RNG4_SH_1 (0x01 << OP_ALG_AAI_SHIFT) +#define OP_ALG_AAI_RNG4_PS (0x40 << OP_ALG_AAI_SHIFT) +#define OP_ALG_AAI_RNG4_AI (0x80 << OP_ALG_AAI_SHIFT) +#define OP_ALG_AAI_RNG4_SK (0x100 << OP_ALG_AAI_SHIFT) + +/* Chacha20 AAI set */ +#define OP_ALG_AAI_AEAD (0x002 << OP_ALG_AAI_SHIFT) +#define OP_ALG_AAI_KEYSTREAM (0x001 << OP_ALG_AAI_SHIFT) +#define OP_ALG_AAI_BC8 (0x008 << OP_ALG_AAI_SHIFT) + +/* hmac/smac AAI set */ +#define OP_ALG_AAI_HASH (0x00 << OP_ALG_AAI_SHIFT) +#define OP_ALG_AAI_HMAC (0x01 << OP_ALG_AAI_SHIFT) +#define OP_ALG_AAI_SMAC (0x02 << OP_ALG_AAI_SHIFT) +#define OP_ALG_AAI_HMAC_PRECOMP (0x04 << OP_ALG_AAI_SHIFT) + +/* CRC AAI set*/ +#define OP_ALG_AAI_802 (0x01 << OP_ALG_AAI_SHIFT) +#define OP_ALG_AAI_3385 (0x02 << OP_ALG_AAI_SHIFT) +#define OP_ALG_AAI_CUST_POLY (0x04 << OP_ALG_AAI_SHIFT) +#define OP_ALG_AAI_DIS (0x10 << OP_ALG_AAI_SHIFT) +#define OP_ALG_AAI_DOS (0x20 << OP_ALG_AAI_SHIFT) +#define OP_ALG_AAI_DOC (0x40 << OP_ALG_AAI_SHIFT) + +/* Kasumi/SNOW AAI set */ +#define OP_ALG_AAI_F8 (0xc0 << OP_ALG_AAI_SHIFT) +#define OP_ALG_AAI_F9 (0xc8 << OP_ALG_AAI_SHIFT) +#define OP_ALG_AAI_GSM (0x10 << OP_ALG_AAI_SHIFT) +#define OP_ALG_AAI_EDGE (0x20 << OP_ALG_AAI_SHIFT) + +#define OP_ALG_AS_SHIFT 2 +#define OP_ALG_AS_MASK (0x3 << OP_ALG_AS_SHIFT) +#define OP_ALG_AS_UPDATE (0 << OP_ALG_AS_SHIFT) +#define OP_ALG_AS_INIT (1 << OP_ALG_AS_SHIFT) +#define OP_ALG_AS_FINALIZE (2 << OP_ALG_AS_SHIFT) +#define OP_ALG_AS_INITFINAL (3 << OP_ALG_AS_SHIFT) + +#define OP_ALG_ICV_SHIFT 1 +#define OP_ALG_ICV_MASK (1 << OP_ALG_ICV_SHIFT) +#define OP_ALG_ICV_OFF (0 << OP_ALG_ICV_SHIFT) +#define OP_ALG_ICV_ON (1 << OP_ALG_ICV_SHIFT) + +#define OP_ALG_PR_ON BIT(1) + +#define OP_ALG_DIR_SHIFT 0 +#define OP_ALG_DIR_MASK 1 +#define OP_ALG_DECRYPT 0 +#define OP_ALG_ENCRYPT 1 + +/* PKHA algorithm type set */ +#define OP_ALG_PK 0x00800000 +#define OP_ALG_PK_FUN_MASK 0x3f /* clrmem, modmath, or cpymem */ + +/* PKHA mode clear memory functions */ +#define OP_ALG_PKMODE_A_RAM 0x80000 +#define OP_ALG_PKMODE_B_RAM 0x40000 +#define OP_ALG_PKMODE_E_RAM 0x20000 +#define OP_ALG_PKMODE_N_RAM 0x10000 +#define OP_ALG_PKMODE_CLEARMEM 0x00001 + +/* PKHA mode modular-arithmetic functions */ +#define OP_ALG_PKMODE_MOD_IN_MONTY 0x80000 +#define OP_ALG_PKMODE_MOD_OUT_MONTY 0x40000 +#define OP_ALG_PKMODE_MOD_F2M 0x20000 +#define OP_ALG_PKMODE_MOD_R2_IN 0x10000 +#define OP_ALG_PKMODE_PRJECTV 0x00800 +#define OP_ALG_PKMODE_TIME_EQ 0x400 +#define OP_ALG_PKMODE_OUT_B 0x000 +#define OP_ALG_PKMODE_OUT_A 0x100 +#define OP_ALG_PKMODE_MOD_ADD 0x002 +#define OP_ALG_PKMODE_MOD_SUB_AB 0x003 +#define OP_ALG_PKMODE_MOD_SUB_BA 0x004 +#define OP_ALG_PKMODE_MOD_MULT 0x005 +#define OP_ALG_PKMODE_MOD_EXPO 0x006 +#define OP_ALG_PKMODE_MOD_REDUCT 0x007 +#define OP_ALG_PKMODE_MOD_INV 0x008 +#define OP_ALG_PKMODE_MOD_ECC_ADD 0x009 +#define OP_ALG_PKMODE_MOD_ECC_DBL 0x00a +#define OP_ALG_PKMODE_MOD_ECC_MULT 0x00b +#define OP_ALG_PKMODE_MOD_MONT_CNST 0x00c +#define OP_ALG_PKMODE_MOD_CRT_CNST 0x00d +#define OP_ALG_PKMODE_MOD_GCD 0x00e +#define OP_ALG_PKMODE_MOD_PRIMALITY 0x00f + +/* PKHA mode copy-memory functions */ +#define OP_ALG_PKMODE_SRC_REG_SHIFT 17 +#define OP_ALG_PKMODE_SRC_REG_MASK (7 << OP_ALG_PKMODE_SRC_REG_SHIFT) +#define OP_ALG_PKMODE_DST_REG_SHIFT 10 +#define OP_ALG_PKMODE_DST_REG_MASK (7 << OP_ALG_PKMODE_DST_REG_SHIFT) +#define OP_ALG_PKMODE_SRC_SEG_SHIFT 8 +#define OP_ALG_PKMODE_SRC_SEG_MASK (3 << OP_ALG_PKMODE_SRC_SEG_SHIFT) +#define OP_ALG_PKMODE_DST_SEG_SHIFT 6 +#define OP_ALG_PKMODE_DST_SEG_MASK (3 << OP_ALG_PKMODE_DST_SEG_SHIFT) + +#define OP_ALG_PKMODE_SRC_REG_A (0 << OP_ALG_PKMODE_SRC_REG_SHIFT) +#define OP_ALG_PKMODE_SRC_REG_B (1 << OP_ALG_PKMODE_SRC_REG_SHIFT) +#define OP_ALG_PKMODE_SRC_REG_N (3 << OP_ALG_PKMODE_SRC_REG_SHIFT) +#define OP_ALG_PKMODE_DST_REG_A (0 << OP_ALG_PKMODE_DST_REG_SHIFT) +#define OP_ALG_PKMODE_DST_REG_B (1 << OP_ALG_PKMODE_DST_REG_SHIFT) +#define OP_ALG_PKMODE_DST_REG_E (2 << OP_ALG_PKMODE_DST_REG_SHIFT) +#define OP_ALG_PKMODE_DST_REG_N (3 << OP_ALG_PKMODE_DST_REG_SHIFT) +#define OP_ALG_PKMODE_SRC_SEG_0 (0 << OP_ALG_PKMODE_SRC_SEG_SHIFT) +#define OP_ALG_PKMODE_SRC_SEG_1 (1 << OP_ALG_PKMODE_SRC_SEG_SHIFT) +#define OP_ALG_PKMODE_SRC_SEG_2 (2 << OP_ALG_PKMODE_SRC_SEG_SHIFT) +#define OP_ALG_PKMODE_SRC_SEG_3 (3 << OP_ALG_PKMODE_SRC_SEG_SHIFT) +#define OP_ALG_PKMODE_DST_SEG_0 (0 << OP_ALG_PKMODE_DST_SEG_SHIFT) +#define OP_ALG_PKMODE_DST_SEG_1 (1 << OP_ALG_PKMODE_DST_SEG_SHIFT) +#define OP_ALG_PKMODE_DST_SEG_2 (2 << OP_ALG_PKMODE_DST_SEG_SHIFT) +#define OP_ALG_PKMODE_DST_SEG_3 (3 << OP_ALG_PKMODE_DST_SEG_SHIFT) +#define OP_ALG_PKMODE_CPYMEM_N_SZ 0x80 +#define OP_ALG_PKMODE_CPYMEM_SRC_SZ 0x81 + +/* + * SEQ_IN_PTR Command Constructs + */ + +/* Release Buffers */ +#define SQIN_RBS 0x04000000 + +/* Sequence pointer is really a descriptor */ +#define SQIN_INL 0x02000000 + +/* Sequence pointer is a scatter-gather table */ +#define SQIN_SGF 0x01000000 + +/* Appends to a previous pointer */ +#define SQIN_PRE 0x00800000 + +/* Use extended length following pointer */ +#define SQIN_EXT 0x00400000 + +/* Restore sequence with pointer/length */ +#define SQIN_RTO 0x00200000 + +/* Replace job descriptor */ +#define SQIN_RJD 0x00100000 + +#define SQIN_LEN_SHIFT 0 +#define SQIN_LEN_MASK (0xffff << SQIN_LEN_SHIFT) + +/* + * SEQ_OUT_PTR Command Constructs + */ + +/* Sequence pointer is a scatter-gather table */ +#define SQOUT_SGF 0x01000000 + +/* Appends to a previous pointer */ +#define SQOUT_PRE SQIN_PRE + +/* Restore sequence with pointer/length */ +#define SQOUT_RTO SQIN_RTO + +/* Use extended length following pointer */ +#define SQOUT_EXT 0x00400000 + +#define SQOUT_LEN_SHIFT 0 +#define SQOUT_LEN_MASK (0xffff << SQOUT_LEN_SHIFT) + + +/* + * SIGNATURE Command Constructs + */ + +/* TYPE field is all that's relevant */ +#define SIGN_TYPE_SHIFT 16 +#define SIGN_TYPE_MASK (0x0f << SIGN_TYPE_SHIFT) + +#define SIGN_TYPE_FINAL (0x00 << SIGN_TYPE_SHIFT) +#define SIGN_TYPE_FINAL_RESTORE (0x01 << SIGN_TYPE_SHIFT) +#define SIGN_TYPE_FINAL_NONZERO (0x02 << SIGN_TYPE_SHIFT) +#define SIGN_TYPE_IMM_2 (0x0a << SIGN_TYPE_SHIFT) +#define SIGN_TYPE_IMM_3 (0x0b << SIGN_TYPE_SHIFT) +#define SIGN_TYPE_IMM_4 (0x0c << SIGN_TYPE_SHIFT) + +/* + * MOVE Command Constructs + */ + +#define MOVE_AUX_SHIFT 25 +#define MOVE_AUX_MASK (3 << MOVE_AUX_SHIFT) +#define MOVE_AUX_MS (2 << MOVE_AUX_SHIFT) +#define MOVE_AUX_LS (1 << MOVE_AUX_SHIFT) + +#define MOVE_WAITCOMP_SHIFT 24 +#define MOVE_WAITCOMP_MASK (1 << MOVE_WAITCOMP_SHIFT) +#define MOVE_WAITCOMP (1 << MOVE_WAITCOMP_SHIFT) + +#define MOVE_SRC_SHIFT 20 +#define MOVE_SRC_MASK (0x0f << MOVE_SRC_SHIFT) +#define MOVE_SRC_CLASS1CTX (0x00 << MOVE_SRC_SHIFT) +#define MOVE_SRC_CLASS2CTX (0x01 << MOVE_SRC_SHIFT) +#define MOVE_SRC_OUTFIFO (0x02 << MOVE_SRC_SHIFT) +#define MOVE_SRC_DESCBUF (0x03 << MOVE_SRC_SHIFT) +#define MOVE_SRC_MATH0 (0x04 << MOVE_SRC_SHIFT) +#define MOVE_SRC_MATH1 (0x05 << MOVE_SRC_SHIFT) +#define MOVE_SRC_MATH2 (0x06 << MOVE_SRC_SHIFT) +#define MOVE_SRC_MATH3 (0x07 << MOVE_SRC_SHIFT) +#define MOVE_SRC_INFIFO (0x08 << MOVE_SRC_SHIFT) +#define MOVE_SRC_INFIFO_CL (0x09 << MOVE_SRC_SHIFT) +#define MOVE_SRC_AUX_ABLK (0x0a << MOVE_SRC_SHIFT) + +#define MOVE_DEST_SHIFT 16 +#define MOVE_DEST_MASK (0x0f << MOVE_DEST_SHIFT) +#define MOVE_DEST_CLASS1CTX (0x00 << MOVE_DEST_SHIFT) +#define MOVE_DEST_CLASS2CTX (0x01 << MOVE_DEST_SHIFT) +#define MOVE_DEST_OUTFIFO (0x02 << MOVE_DEST_SHIFT) +#define MOVE_DEST_DESCBUF (0x03 << MOVE_DEST_SHIFT) +#define MOVE_DEST_MATH0 (0x04 << MOVE_DEST_SHIFT) +#define MOVE_DEST_MATH1 (0x05 << MOVE_DEST_SHIFT) +#define MOVE_DEST_MATH2 (0x06 << MOVE_DEST_SHIFT) +#define MOVE_DEST_MATH3 (0x07 << MOVE_DEST_SHIFT) +#define MOVE_DEST_CLASS1INFIFO (0x08 << MOVE_DEST_SHIFT) +#define MOVE_DEST_CLASS2INFIFO (0x09 << MOVE_DEST_SHIFT) +#define MOVE_DEST_INFIFO_NOINFO (0x0a << MOVE_DEST_SHIFT) +#define MOVE_DEST_PK_A (0x0c << MOVE_DEST_SHIFT) +#define MOVE_DEST_CLASS1KEY (0x0d << MOVE_DEST_SHIFT) +#define MOVE_DEST_CLASS2KEY (0x0e << MOVE_DEST_SHIFT) + +#define MOVE_OFFSET_SHIFT 8 +#define MOVE_OFFSET_MASK (0xff << MOVE_OFFSET_SHIFT) + +#define MOVE_LEN_SHIFT 0 +#define MOVE_LEN_MASK (0xff << MOVE_LEN_SHIFT) + +#define MOVELEN_MRSEL_SHIFT 0 +#define MOVELEN_MRSEL_MASK (0x3 << MOVE_LEN_SHIFT) +#define MOVELEN_MRSEL_MATH0 (0 << MOVELEN_MRSEL_SHIFT) +#define MOVELEN_MRSEL_MATH1 (1 << MOVELEN_MRSEL_SHIFT) +#define MOVELEN_MRSEL_MATH2 (2 << MOVELEN_MRSEL_SHIFT) +#define MOVELEN_MRSEL_MATH3 (3 << MOVELEN_MRSEL_SHIFT) + +/* + * MATH Command Constructs + */ + +#define MATH_IFB_SHIFT 26 +#define MATH_IFB_MASK (1 << MATH_IFB_SHIFT) +#define MATH_IFB (1 << MATH_IFB_SHIFT) + +#define MATH_NFU_SHIFT 25 +#define MATH_NFU_MASK (1 << MATH_NFU_SHIFT) +#define MATH_NFU (1 << MATH_NFU_SHIFT) + +#define MATH_STL_SHIFT 24 +#define MATH_STL_MASK (1 << MATH_STL_SHIFT) +#define MATH_STL (1 << MATH_STL_SHIFT) + +/* Function selectors */ +#define MATH_FUN_SHIFT 20 +#define MATH_FUN_MASK (0x0f << MATH_FUN_SHIFT) +#define MATH_FUN_ADD (0x00 << MATH_FUN_SHIFT) +#define MATH_FUN_ADDC (0x01 << MATH_FUN_SHIFT) +#define MATH_FUN_SUB (0x02 << MATH_FUN_SHIFT) +#define MATH_FUN_SUBB (0x03 << MATH_FUN_SHIFT) +#define MATH_FUN_OR (0x04 << MATH_FUN_SHIFT) +#define MATH_FUN_AND (0x05 << MATH_FUN_SHIFT) +#define MATH_FUN_XOR (0x06 << MATH_FUN_SHIFT) +#define MATH_FUN_LSHIFT (0x07 << MATH_FUN_SHIFT) +#define MATH_FUN_RSHIFT (0x08 << MATH_FUN_SHIFT) +#define MATH_FUN_SHLD (0x09 << MATH_FUN_SHIFT) +#define MATH_FUN_ZBYT (0x0a << MATH_FUN_SHIFT) + +/* Source 0 selectors */ +#define MATH_SRC0_SHIFT 16 +#define MATH_SRC0_MASK (0x0f << MATH_SRC0_SHIFT) +#define MATH_SRC0_REG0 (0x00 << MATH_SRC0_SHIFT) +#define MATH_SRC0_REG1 (0x01 << MATH_SRC0_SHIFT) +#define MATH_SRC0_REG2 (0x02 << MATH_SRC0_SHIFT) +#define MATH_SRC0_REG3 (0x03 << MATH_SRC0_SHIFT) +#define MATH_SRC0_IMM (0x04 << MATH_SRC0_SHIFT) +#define MATH_SRC0_DPOVRD (0x07 << MATH_SRC0_SHIFT) +#define MATH_SRC0_SEQINLEN (0x08 << MATH_SRC0_SHIFT) +#define MATH_SRC0_SEQOUTLEN (0x09 << MATH_SRC0_SHIFT) +#define MATH_SRC0_VARSEQINLEN (0x0a << MATH_SRC0_SHIFT) +#define MATH_SRC0_VARSEQOUTLEN (0x0b << MATH_SRC0_SHIFT) +#define MATH_SRC0_ZERO (0x0c << MATH_SRC0_SHIFT) + +/* Source 1 selectors */ +#define MATH_SRC1_SHIFT 12 +#define MATH_SRC1_MASK (0x0f << MATH_SRC1_SHIFT) +#define MATH_SRC1_REG0 (0x00 << MATH_SRC1_SHIFT) +#define MATH_SRC1_REG1 (0x01 << MATH_SRC1_SHIFT) +#define MATH_SRC1_REG2 (0x02 << MATH_SRC1_SHIFT) +#define MATH_SRC1_REG3 (0x03 << MATH_SRC1_SHIFT) +#define MATH_SRC1_IMM (0x04 << MATH_SRC1_SHIFT) +#define MATH_SRC1_DPOVRD (0x07 << MATH_SRC1_SHIFT) +#define MATH_SRC1_INFIFO (0x0a << MATH_SRC1_SHIFT) +#define MATH_SRC1_OUTFIFO (0x0b << MATH_SRC1_SHIFT) +#define MATH_SRC1_ONE (0x0c << MATH_SRC1_SHIFT) + +/* Destination selectors */ +#define MATH_DEST_SHIFT 8 +#define MATH_DEST_MASK (0x0f << MATH_DEST_SHIFT) +#define MATH_DEST_REG0 (0x00 << MATH_DEST_SHIFT) +#define MATH_DEST_REG1 (0x01 << MATH_DEST_SHIFT) +#define MATH_DEST_REG2 (0x02 << MATH_DEST_SHIFT) +#define MATH_DEST_REG3 (0x03 << MATH_DEST_SHIFT) +#define MATH_DEST_DPOVRD (0x07 << MATH_DEST_SHIFT) +#define MATH_DEST_SEQINLEN (0x08 << MATH_DEST_SHIFT) +#define MATH_DEST_SEQOUTLEN (0x09 << MATH_DEST_SHIFT) +#define MATH_DEST_VARSEQINLEN (0x0a << MATH_DEST_SHIFT) +#define MATH_DEST_VARSEQOUTLEN (0x0b << MATH_DEST_SHIFT) +#define MATH_DEST_NONE (0x0f << MATH_DEST_SHIFT) + +/* Length selectors */ +#define MATH_LEN_SHIFT 0 +#define MATH_LEN_MASK (0x0f << MATH_LEN_SHIFT) +#define MATH_LEN_1BYTE 0x01 +#define MATH_LEN_2BYTE 0x02 +#define MATH_LEN_4BYTE 0x04 +#define MATH_LEN_8BYTE 0x08 + +/* + * JUMP Command Constructs + */ + +#define JUMP_CLASS_SHIFT 25 +#define JUMP_CLASS_MASK (3 << JUMP_CLASS_SHIFT) +#define JUMP_CLASS_NONE 0 +#define JUMP_CLASS_CLASS1 (1 << JUMP_CLASS_SHIFT) +#define JUMP_CLASS_CLASS2 (2 << JUMP_CLASS_SHIFT) +#define JUMP_CLASS_BOTH (3 << JUMP_CLASS_SHIFT) + +#define JUMP_JSL_SHIFT 24 +#define JUMP_JSL_MASK (1 << JUMP_JSL_SHIFT) +#define JUMP_JSL (1 << JUMP_JSL_SHIFT) + +#define JUMP_TYPE_SHIFT 22 +#define JUMP_TYPE_LOCAL (0x00 << JUMP_TYPE_SHIFT) +#define JUMP_TYPE_NONLOCAL (0x01 << JUMP_TYPE_SHIFT) +#define JUMP_TYPE_HALT (0x02 << JUMP_TYPE_SHIFT) +#define JUMP_TYPE_HALT_USER (0x03 << JUMP_TYPE_SHIFT) + +#define JUMP_TEST_SHIFT 16 +#define JUMP_TEST_MASK (0x03 << JUMP_TEST_SHIFT) +#define JUMP_TEST_ALL (0x00 << JUMP_TEST_SHIFT) +#define JUMP_TEST_INVALL (0x01 << JUMP_TEST_SHIFT) +#define JUMP_TEST_ANY (0x02 << JUMP_TEST_SHIFT) +#define JUMP_TEST_INVANY (0x03 << JUMP_TEST_SHIFT) + +/* Condition codes. JSL bit is factored in */ +#define JUMP_COND_SHIFT 8 +#define JUMP_COND_MASK (0x100ff << JUMP_COND_SHIFT) +#define JUMP_COND_PK_0 (0x80 << JUMP_COND_SHIFT) +#define JUMP_COND_PK_GCD_1 (0x40 << JUMP_COND_SHIFT) +#define JUMP_COND_PK_PRIME (0x20 << JUMP_COND_SHIFT) +#define JUMP_COND_MATH_N (0x08 << JUMP_COND_SHIFT) +#define JUMP_COND_MATH_Z (0x04 << JUMP_COND_SHIFT) +#define JUMP_COND_MATH_C (0x02 << JUMP_COND_SHIFT) +#define JUMP_COND_MATH_NV (0x01 << JUMP_COND_SHIFT) + +#define JUMP_COND_JRP ((0x80 << JUMP_COND_SHIFT) | JUMP_JSL) +#define JUMP_COND_SHRD ((0x40 << JUMP_COND_SHIFT) | JUMP_JSL) +#define JUMP_COND_SELF ((0x20 << JUMP_COND_SHIFT) | JUMP_JSL) +#define JUMP_COND_CALM ((0x10 << JUMP_COND_SHIFT) | JUMP_JSL) +#define JUMP_COND_NIP ((0x08 << JUMP_COND_SHIFT) | JUMP_JSL) +#define JUMP_COND_NIFP ((0x04 << JUMP_COND_SHIFT) | JUMP_JSL) +#define JUMP_COND_NOP ((0x02 << JUMP_COND_SHIFT) | JUMP_JSL) +#define JUMP_COND_NCP ((0x01 << JUMP_COND_SHIFT) | JUMP_JSL) + +#define JUMP_OFFSET_SHIFT 0 +#define JUMP_OFFSET_MASK (0xff << JUMP_OFFSET_SHIFT) + +/* + * NFIFO ENTRY + * Data Constructs + * + */ +#define NFIFOENTRY_DEST_SHIFT 30 +#define NFIFOENTRY_DEST_MASK (3 << NFIFOENTRY_DEST_SHIFT) +#define NFIFOENTRY_DEST_DECO (0 << NFIFOENTRY_DEST_SHIFT) +#define NFIFOENTRY_DEST_CLASS1 (1 << NFIFOENTRY_DEST_SHIFT) +#define NFIFOENTRY_DEST_CLASS2 (2 << NFIFOENTRY_DEST_SHIFT) +#define NFIFOENTRY_DEST_BOTH (3 << NFIFOENTRY_DEST_SHIFT) + +#define NFIFOENTRY_LC2_SHIFT 29 +#define NFIFOENTRY_LC2_MASK (1 << NFIFOENTRY_LC2_SHIFT) +#define NFIFOENTRY_LC2 (1 << NFIFOENTRY_LC2_SHIFT) + +#define NFIFOENTRY_LC1_SHIFT 28 +#define NFIFOENTRY_LC1_MASK (1 << NFIFOENTRY_LC1_SHIFT) +#define NFIFOENTRY_LC1 (1 << NFIFOENTRY_LC1_SHIFT) + +#define NFIFOENTRY_FC2_SHIFT 27 +#define NFIFOENTRY_FC2_MASK (1 << NFIFOENTRY_FC2_SHIFT) +#define NFIFOENTRY_FC2 (1 << NFIFOENTRY_FC2_SHIFT) + +#define NFIFOENTRY_FC1_SHIFT 26 +#define NFIFOENTRY_FC1_MASK (1 << NFIFOENTRY_FC1_SHIFT) +#define NFIFOENTRY_FC1 (1 << NFIFOENTRY_FC1_SHIFT) + +#define NFIFOENTRY_STYPE_SHIFT 24 +#define NFIFOENTRY_STYPE_MASK (3 << NFIFOENTRY_STYPE_SHIFT) +#define NFIFOENTRY_STYPE_DFIFO (0 << NFIFOENTRY_STYPE_SHIFT) +#define NFIFOENTRY_STYPE_OFIFO (1 << NFIFOENTRY_STYPE_SHIFT) +#define NFIFOENTRY_STYPE_PAD (2 << NFIFOENTRY_STYPE_SHIFT) +#define NFIFOENTRY_STYPE_SNOOP (3 << NFIFOENTRY_STYPE_SHIFT) + +#define NFIFOENTRY_DTYPE_SHIFT 20 +#define NFIFOENTRY_DTYPE_MASK (0xF << NFIFOENTRY_DTYPE_SHIFT) + +#define NFIFOENTRY_DTYPE_SBOX (0x0 << NFIFOENTRY_DTYPE_SHIFT) +#define NFIFOENTRY_DTYPE_AAD (0x1 << NFIFOENTRY_DTYPE_SHIFT) +#define NFIFOENTRY_DTYPE_IV (0x2 << NFIFOENTRY_DTYPE_SHIFT) +#define NFIFOENTRY_DTYPE_SAD (0x3 << NFIFOENTRY_DTYPE_SHIFT) +#define NFIFOENTRY_DTYPE_ICV (0xA << NFIFOENTRY_DTYPE_SHIFT) +#define NFIFOENTRY_DTYPE_POLY (0xB << NFIFOENTRY_DTYPE_SHIFT) +#define NFIFOENTRY_DTYPE_SKIP (0xE << NFIFOENTRY_DTYPE_SHIFT) +#define NFIFOENTRY_DTYPE_MSG (0xF << NFIFOENTRY_DTYPE_SHIFT) + +#define NFIFOENTRY_DTYPE_PK_A0 (0x0 << NFIFOENTRY_DTYPE_SHIFT) +#define NFIFOENTRY_DTYPE_PK_A1 (0x1 << NFIFOENTRY_DTYPE_SHIFT) +#define NFIFOENTRY_DTYPE_PK_A2 (0x2 << NFIFOENTRY_DTYPE_SHIFT) +#define NFIFOENTRY_DTYPE_PK_A3 (0x3 << NFIFOENTRY_DTYPE_SHIFT) +#define NFIFOENTRY_DTYPE_PK_B0 (0x4 << NFIFOENTRY_DTYPE_SHIFT) +#define NFIFOENTRY_DTYPE_PK_B1 (0x5 << NFIFOENTRY_DTYPE_SHIFT) +#define NFIFOENTRY_DTYPE_PK_B2 (0x6 << NFIFOENTRY_DTYPE_SHIFT) +#define NFIFOENTRY_DTYPE_PK_B3 (0x7 << NFIFOENTRY_DTYPE_SHIFT) +#define NFIFOENTRY_DTYPE_PK_N (0x8 << NFIFOENTRY_DTYPE_SHIFT) +#define NFIFOENTRY_DTYPE_PK_E (0x9 << NFIFOENTRY_DTYPE_SHIFT) +#define NFIFOENTRY_DTYPE_PK_A (0xC << NFIFOENTRY_DTYPE_SHIFT) +#define NFIFOENTRY_DTYPE_PK_B (0xD << NFIFOENTRY_DTYPE_SHIFT) + + +#define NFIFOENTRY_BND_SHIFT 19 +#define NFIFOENTRY_BND_MASK (1 << NFIFOENTRY_BND_SHIFT) +#define NFIFOENTRY_BND (1 << NFIFOENTRY_BND_SHIFT) + +#define NFIFOENTRY_PTYPE_SHIFT 16 +#define NFIFOENTRY_PTYPE_MASK (0x7 << NFIFOENTRY_PTYPE_SHIFT) + +#define NFIFOENTRY_PTYPE_ZEROS (0x0 << NFIFOENTRY_PTYPE_SHIFT) +#define NFIFOENTRY_PTYPE_RND_NOZEROS (0x1 << NFIFOENTRY_PTYPE_SHIFT) +#define NFIFOENTRY_PTYPE_INCREMENT (0x2 << NFIFOENTRY_PTYPE_SHIFT) +#define NFIFOENTRY_PTYPE_RND (0x3 << NFIFOENTRY_PTYPE_SHIFT) +#define NFIFOENTRY_PTYPE_ZEROS_NZ (0x4 << NFIFOENTRY_PTYPE_SHIFT) +#define NFIFOENTRY_PTYPE_RND_NZ_LZ (0x5 << NFIFOENTRY_PTYPE_SHIFT) +#define NFIFOENTRY_PTYPE_N (0x6 << NFIFOENTRY_PTYPE_SHIFT) +#define NFIFOENTRY_PTYPE_RND_NZ_N (0x7 << NFIFOENTRY_PTYPE_SHIFT) + +#define NFIFOENTRY_OC_SHIFT 15 +#define NFIFOENTRY_OC_MASK (1 << NFIFOENTRY_OC_SHIFT) +#define NFIFOENTRY_OC (1 << NFIFOENTRY_OC_SHIFT) + +#define NFIFOENTRY_AST_SHIFT 14 +#define NFIFOENTRY_AST_MASK (1 << NFIFOENTRY_OC_SHIFT) +#define NFIFOENTRY_AST (1 << NFIFOENTRY_OC_SHIFT) + +#define NFIFOENTRY_BM_SHIFT 11 +#define NFIFOENTRY_BM_MASK (1 << NFIFOENTRY_BM_SHIFT) +#define NFIFOENTRY_BM (1 << NFIFOENTRY_BM_SHIFT) + +#define NFIFOENTRY_PS_SHIFT 10 +#define NFIFOENTRY_PS_MASK (1 << NFIFOENTRY_PS_SHIFT) +#define NFIFOENTRY_PS (1 << NFIFOENTRY_PS_SHIFT) + +#define NFIFOENTRY_DLEN_SHIFT 0 +#define NFIFOENTRY_DLEN_MASK (0xFFF << NFIFOENTRY_DLEN_SHIFT) + +#define NFIFOENTRY_PLEN_SHIFT 0 +#define NFIFOENTRY_PLEN_MASK (0xFF << NFIFOENTRY_PLEN_SHIFT) + +/* Append Load Immediate Command */ +#define FD_CMD_APPEND_LOAD_IMMEDIATE 0x80000000 + +/* Set SEQ LIODN equal to the Non-SEQ LIODN for the job */ +#define FD_CMD_SET_SEQ_LIODN_EQUAL_NONSEQ_LIODN 0x40000000 + +/* Frame Descriptor Command for Replacement Job Descriptor */ +#define FD_CMD_REPLACE_JOB_DESC 0x20000000 + +#endif /* DESC_H */ diff --git a/drivers/crypto/caam/desc_constr.h b/drivers/crypto/caam/desc_constr.h new file mode 100644 index 000000000..62ce6421b --- /dev/null +++ b/drivers/crypto/caam/desc_constr.h @@ -0,0 +1,605 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * caam descriptor construction helper functions + * + * Copyright 2008-2012 Freescale Semiconductor, Inc. + * Copyright 2019 NXP + */ + +#ifndef DESC_CONSTR_H +#define DESC_CONSTR_H + +#include "desc.h" +#include "regs.h" + +#define IMMEDIATE (1 << 23) +#define CAAM_CMD_SZ sizeof(u32) +#define CAAM_PTR_SZ caam_ptr_sz +#define CAAM_PTR_SZ_MAX sizeof(dma_addr_t) +#define CAAM_PTR_SZ_MIN sizeof(u32) +#define CAAM_DESC_BYTES_MAX (CAAM_CMD_SZ * MAX_CAAM_DESCSIZE) +#define __DESC_JOB_IO_LEN(n) (CAAM_CMD_SZ * 5 + (n) * 3) +#define DESC_JOB_IO_LEN __DESC_JOB_IO_LEN(CAAM_PTR_SZ) +#define DESC_JOB_IO_LEN_MAX __DESC_JOB_IO_LEN(CAAM_PTR_SZ_MAX) +#define DESC_JOB_IO_LEN_MIN __DESC_JOB_IO_LEN(CAAM_PTR_SZ_MIN) + +/* + * The CAAM QI hardware constructs a job descriptor which points + * to shared descriptor (as pointed by context_a of FQ to CAAM). + * When the job descriptor is executed by deco, the whole job + * descriptor together with shared descriptor gets loaded in + * deco buffer which is 64 words long (each 32-bit). + * + * The job descriptor constructed by QI hardware has layout: + * + * HEADER (1 word) + * Shdesc ptr (1 or 2 words) + * SEQ_OUT_PTR (1 word) + * Out ptr (1 or 2 words) + * Out length (1 word) + * SEQ_IN_PTR (1 word) + * In ptr (1 or 2 words) + * In length (1 word) + * + * The shdesc ptr is used to fetch shared descriptor contents + * into deco buffer. + * + * Apart from shdesc contents, the total number of words that + * get loaded in deco buffer are '8' or '11'. The remaining words + * in deco buffer can be used for storing shared descriptor. + */ +#define MAX_SDLEN ((CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN_MIN) / CAAM_CMD_SZ) + +#ifdef DEBUG +#define PRINT_POS do { printk(KERN_DEBUG "%02d: %s\n", desc_len(desc),\ + &__func__[sizeof("append")]); } while (0) +#else +#define PRINT_POS +#endif + +#define SET_OK_NO_PROP_ERRORS (IMMEDIATE | LDST_CLASS_DECO | \ + LDST_SRCDST_WORD_DECOCTRL | \ + (LDOFF_CHG_SHARE_OK_NO_PROP << \ + LDST_OFFSET_SHIFT)) +#define DISABLE_AUTO_INFO_FIFO (IMMEDIATE | LDST_CLASS_DECO | \ + LDST_SRCDST_WORD_DECOCTRL | \ + (LDOFF_DISABLE_AUTO_NFIFO << LDST_OFFSET_SHIFT)) +#define ENABLE_AUTO_INFO_FIFO (IMMEDIATE | LDST_CLASS_DECO | \ + LDST_SRCDST_WORD_DECOCTRL | \ + (LDOFF_ENABLE_AUTO_NFIFO << LDST_OFFSET_SHIFT)) + +extern bool caam_little_end; +extern size_t caam_ptr_sz; + +/* + * HW fetches 4 S/G table entries at a time, irrespective of how many entries + * are in the table. It's SW's responsibility to make sure these accesses + * do not have side effects. + */ +static inline int pad_sg_nents(int sg_nents) +{ + return ALIGN(sg_nents, 4); +} + +static inline int desc_len(u32 * const desc) +{ + return caam32_to_cpu(*desc) & HDR_DESCLEN_MASK; +} + +static inline int desc_bytes(void * const desc) +{ + return desc_len(desc) * CAAM_CMD_SZ; +} + +static inline u32 *desc_end(u32 * const desc) +{ + return desc + desc_len(desc); +} + +static inline void *sh_desc_pdb(u32 * const desc) +{ + return desc + 1; +} + +static inline void init_desc(u32 * const desc, u32 options) +{ + *desc = cpu_to_caam32((options | HDR_ONE) + 1); +} + +static inline void init_sh_desc(u32 * const desc, u32 options) +{ + PRINT_POS; + init_desc(desc, CMD_SHARED_DESC_HDR | options); +} + +static inline void init_sh_desc_pdb(u32 * const desc, u32 options, + size_t pdb_bytes) +{ + u32 pdb_len = (pdb_bytes + CAAM_CMD_SZ - 1) / CAAM_CMD_SZ; + + init_sh_desc(desc, (((pdb_len + 1) << HDR_START_IDX_SHIFT) + pdb_len) | + options); +} + +static inline void init_job_desc(u32 * const desc, u32 options) +{ + init_desc(desc, CMD_DESC_HDR | options); +} + +static inline void init_job_desc_pdb(u32 * const desc, u32 options, + size_t pdb_bytes) +{ + u32 pdb_len = (pdb_bytes + CAAM_CMD_SZ - 1) / CAAM_CMD_SZ; + + init_job_desc(desc, (((pdb_len + 1) << HDR_START_IDX_SHIFT)) | options); +} + +static inline void append_ptr(u32 * const desc, dma_addr_t ptr) +{ + if (caam_ptr_sz == sizeof(dma_addr_t)) { + dma_addr_t *offset = (dma_addr_t *)desc_end(desc); + + *offset = cpu_to_caam_dma(ptr); + } else { + u32 *offset = (u32 *)desc_end(desc); + + *offset = cpu_to_caam_dma(ptr); + } + + (*desc) = cpu_to_caam32(caam32_to_cpu(*desc) + + CAAM_PTR_SZ / CAAM_CMD_SZ); +} + +static inline void init_job_desc_shared(u32 * const desc, dma_addr_t ptr, + int len, u32 options) +{ + PRINT_POS; + init_job_desc(desc, HDR_SHARED | options | + (len << HDR_START_IDX_SHIFT)); + append_ptr(desc, ptr); +} + +static inline void append_data(u32 * const desc, const void *data, int len) +{ + u32 *offset = desc_end(desc); + + if (len) /* avoid sparse warning: memcpy with byte count of 0 */ + memcpy(offset, data, len); + + (*desc) = cpu_to_caam32(caam32_to_cpu(*desc) + + (len + CAAM_CMD_SZ - 1) / CAAM_CMD_SZ); +} + +static inline void append_cmd(u32 * const desc, u32 command) +{ + u32 *cmd = desc_end(desc); + + *cmd = cpu_to_caam32(command); + + (*desc) = cpu_to_caam32(caam32_to_cpu(*desc) + 1); +} + +#define append_u32 append_cmd + +static inline void append_u64(u32 * const desc, u64 data) +{ + u32 *offset = desc_end(desc); + + /* Only 32-bit alignment is guaranteed in descriptor buffer */ + if (caam_little_end) { + *offset = cpu_to_caam32(lower_32_bits(data)); + *(++offset) = cpu_to_caam32(upper_32_bits(data)); + } else { + *offset = cpu_to_caam32(upper_32_bits(data)); + *(++offset) = cpu_to_caam32(lower_32_bits(data)); + } + + (*desc) = cpu_to_caam32(caam32_to_cpu(*desc) + 2); +} + +/* Write command without affecting header, and return pointer to next word */ +static inline u32 *write_cmd(u32 * const desc, u32 command) +{ + *desc = cpu_to_caam32(command); + + return desc + 1; +} + +static inline void append_cmd_ptr(u32 * const desc, dma_addr_t ptr, int len, + u32 command) +{ + append_cmd(desc, command | len); + append_ptr(desc, ptr); +} + +/* Write length after pointer, rather than inside command */ +static inline void append_cmd_ptr_extlen(u32 * const desc, dma_addr_t ptr, + unsigned int len, u32 command) +{ + append_cmd(desc, command); + if (!(command & (SQIN_RTO | SQIN_PRE))) + append_ptr(desc, ptr); + append_cmd(desc, len); +} + +static inline void append_cmd_data(u32 * const desc, const void *data, int len, + u32 command) +{ + append_cmd(desc, command | IMMEDIATE | len); + append_data(desc, data, len); +} + +#define APPEND_CMD_RET(cmd, op) \ +static inline u32 *append_##cmd(u32 * const desc, u32 options) \ +{ \ + u32 *cmd = desc_end(desc); \ + PRINT_POS; \ + append_cmd(desc, CMD_##op | options); \ + return cmd; \ +} +APPEND_CMD_RET(jump, JUMP) +APPEND_CMD_RET(move, MOVE) +APPEND_CMD_RET(move_len, MOVE_LEN) + +static inline void set_jump_tgt_here(u32 * const desc, u32 *jump_cmd) +{ + *jump_cmd = cpu_to_caam32(caam32_to_cpu(*jump_cmd) | + (desc_len(desc) - (jump_cmd - desc))); +} + +static inline void set_move_tgt_here(u32 * const desc, u32 *move_cmd) +{ + u32 val = caam32_to_cpu(*move_cmd); + + val &= ~MOVE_OFFSET_MASK; + val |= (desc_len(desc) << (MOVE_OFFSET_SHIFT + 2)) & MOVE_OFFSET_MASK; + *move_cmd = cpu_to_caam32(val); +} + +#define APPEND_CMD(cmd, op) \ +static inline void append_##cmd(u32 * const desc, u32 options) \ +{ \ + PRINT_POS; \ + append_cmd(desc, CMD_##op | options); \ +} +APPEND_CMD(operation, OPERATION) + +#define APPEND_CMD_LEN(cmd, op) \ +static inline void append_##cmd(u32 * const desc, unsigned int len, \ + u32 options) \ +{ \ + PRINT_POS; \ + append_cmd(desc, CMD_##op | len | options); \ +} + +APPEND_CMD_LEN(seq_load, SEQ_LOAD) +APPEND_CMD_LEN(seq_store, SEQ_STORE) +APPEND_CMD_LEN(seq_fifo_load, SEQ_FIFO_LOAD) +APPEND_CMD_LEN(seq_fifo_store, SEQ_FIFO_STORE) + +#define APPEND_CMD_PTR(cmd, op) \ +static inline void append_##cmd(u32 * const desc, dma_addr_t ptr, \ + unsigned int len, u32 options) \ +{ \ + PRINT_POS; \ + append_cmd_ptr(desc, ptr, len, CMD_##op | options); \ +} +APPEND_CMD_PTR(key, KEY) +APPEND_CMD_PTR(load, LOAD) +APPEND_CMD_PTR(fifo_load, FIFO_LOAD) +APPEND_CMD_PTR(fifo_store, FIFO_STORE) + +static inline void append_store(u32 * const desc, dma_addr_t ptr, + unsigned int len, u32 options) +{ + u32 cmd_src; + + cmd_src = options & LDST_SRCDST_MASK; + + append_cmd(desc, CMD_STORE | options | len); + + /* The following options do not require pointer */ + if (!(cmd_src == LDST_SRCDST_WORD_DESCBUF_SHARED || + cmd_src == LDST_SRCDST_WORD_DESCBUF_JOB || + cmd_src == LDST_SRCDST_WORD_DESCBUF_JOB_WE || + cmd_src == LDST_SRCDST_WORD_DESCBUF_SHARED_WE)) + append_ptr(desc, ptr); +} + +#define APPEND_SEQ_PTR_INTLEN(cmd, op) \ +static inline void append_seq_##cmd##_ptr_intlen(u32 * const desc, \ + dma_addr_t ptr, \ + unsigned int len, \ + u32 options) \ +{ \ + PRINT_POS; \ + if (options & (SQIN_RTO | SQIN_PRE)) \ + append_cmd(desc, CMD_SEQ_##op##_PTR | len | options); \ + else \ + append_cmd_ptr(desc, ptr, len, CMD_SEQ_##op##_PTR | options); \ +} +APPEND_SEQ_PTR_INTLEN(in, IN) +APPEND_SEQ_PTR_INTLEN(out, OUT) + +#define APPEND_CMD_PTR_TO_IMM(cmd, op) \ +static inline void append_##cmd##_as_imm(u32 * const desc, const void *data, \ + unsigned int len, u32 options) \ +{ \ + PRINT_POS; \ + append_cmd_data(desc, data, len, CMD_##op | options); \ +} +APPEND_CMD_PTR_TO_IMM(load, LOAD); +APPEND_CMD_PTR_TO_IMM(fifo_load, FIFO_LOAD); + +#define APPEND_CMD_PTR_EXTLEN(cmd, op) \ +static inline void append_##cmd##_extlen(u32 * const desc, dma_addr_t ptr, \ + unsigned int len, u32 options) \ +{ \ + PRINT_POS; \ + append_cmd_ptr_extlen(desc, ptr, len, CMD_##op | SQIN_EXT | options); \ +} +APPEND_CMD_PTR_EXTLEN(seq_in_ptr, SEQ_IN_PTR) +APPEND_CMD_PTR_EXTLEN(seq_out_ptr, SEQ_OUT_PTR) + +/* + * Determine whether to store length internally or externally depending on + * the size of its type + */ +#define APPEND_CMD_PTR_LEN(cmd, op, type) \ +static inline void append_##cmd(u32 * const desc, dma_addr_t ptr, \ + type len, u32 options) \ +{ \ + PRINT_POS; \ + if (sizeof(type) > sizeof(u16)) \ + append_##cmd##_extlen(desc, ptr, len, options); \ + else \ + append_##cmd##_intlen(desc, ptr, len, options); \ +} +APPEND_CMD_PTR_LEN(seq_in_ptr, SEQ_IN_PTR, u32) +APPEND_CMD_PTR_LEN(seq_out_ptr, SEQ_OUT_PTR, u32) + +/* + * 2nd variant for commands whose specified immediate length differs + * from length of immediate data provided, e.g., split keys + */ +#define APPEND_CMD_PTR_TO_IMM2(cmd, op) \ +static inline void append_##cmd##_as_imm(u32 * const desc, const void *data, \ + unsigned int data_len, \ + unsigned int len, u32 options) \ +{ \ + PRINT_POS; \ + append_cmd(desc, CMD_##op | IMMEDIATE | len | options); \ + append_data(desc, data, data_len); \ +} +APPEND_CMD_PTR_TO_IMM2(key, KEY); + +#define APPEND_CMD_RAW_IMM(cmd, op, type) \ +static inline void append_##cmd##_imm_##type(u32 * const desc, type immediate, \ + u32 options) \ +{ \ + PRINT_POS; \ + if (options & LDST_LEN_MASK) \ + append_cmd(desc, CMD_##op | IMMEDIATE | options); \ + else \ + append_cmd(desc, CMD_##op | IMMEDIATE | options | \ + sizeof(type)); \ + append_cmd(desc, immediate); \ +} +APPEND_CMD_RAW_IMM(load, LOAD, u32); + +/* + * ee - endianness + * size - size of immediate type in bytes + */ +#define APPEND_CMD_RAW_IMM2(cmd, op, ee, size) \ +static inline void append_##cmd##_imm_##ee##size(u32 *desc, \ + u##size immediate, \ + u32 options) \ +{ \ + __##ee##size data = cpu_to_##ee##size(immediate); \ + PRINT_POS; \ + append_cmd(desc, CMD_##op | IMMEDIATE | options | sizeof(data)); \ + append_data(desc, &data, sizeof(data)); \ +} + +APPEND_CMD_RAW_IMM2(load, LOAD, be, 32); + +/* + * Append math command. Only the last part of destination and source need to + * be specified + */ +#define APPEND_MATH(op, desc, dest, src_0, src_1, len) \ +append_cmd(desc, CMD_MATH | MATH_FUN_##op | MATH_DEST_##dest | \ + MATH_SRC0_##src_0 | MATH_SRC1_##src_1 | (u32)len); + +#define append_math_add(desc, dest, src0, src1, len) \ + APPEND_MATH(ADD, desc, dest, src0, src1, len) +#define append_math_sub(desc, dest, src0, src1, len) \ + APPEND_MATH(SUB, desc, dest, src0, src1, len) +#define append_math_add_c(desc, dest, src0, src1, len) \ + APPEND_MATH(ADDC, desc, dest, src0, src1, len) +#define append_math_sub_b(desc, dest, src0, src1, len) \ + APPEND_MATH(SUBB, desc, dest, src0, src1, len) +#define append_math_and(desc, dest, src0, src1, len) \ + APPEND_MATH(AND, desc, dest, src0, src1, len) +#define append_math_or(desc, dest, src0, src1, len) \ + APPEND_MATH(OR, desc, dest, src0, src1, len) +#define append_math_xor(desc, dest, src0, src1, len) \ + APPEND_MATH(XOR, desc, dest, src0, src1, len) +#define append_math_lshift(desc, dest, src0, src1, len) \ + APPEND_MATH(LSHIFT, desc, dest, src0, src1, len) +#define append_math_rshift(desc, dest, src0, src1, len) \ + APPEND_MATH(RSHIFT, desc, dest, src0, src1, len) +#define append_math_ldshift(desc, dest, src0, src1, len) \ + APPEND_MATH(SHLD, desc, dest, src0, src1, len) + +/* Exactly one source is IMM. Data is passed in as u32 value */ +#define APPEND_MATH_IMM_u32(op, desc, dest, src_0, src_1, data) \ +do { \ + APPEND_MATH(op, desc, dest, src_0, src_1, CAAM_CMD_SZ); \ + append_cmd(desc, data); \ +} while (0) + +#define append_math_add_imm_u32(desc, dest, src0, src1, data) \ + APPEND_MATH_IMM_u32(ADD, desc, dest, src0, src1, data) +#define append_math_sub_imm_u32(desc, dest, src0, src1, data) \ + APPEND_MATH_IMM_u32(SUB, desc, dest, src0, src1, data) +#define append_math_add_c_imm_u32(desc, dest, src0, src1, data) \ + APPEND_MATH_IMM_u32(ADDC, desc, dest, src0, src1, data) +#define append_math_sub_b_imm_u32(desc, dest, src0, src1, data) \ + APPEND_MATH_IMM_u32(SUBB, desc, dest, src0, src1, data) +#define append_math_and_imm_u32(desc, dest, src0, src1, data) \ + APPEND_MATH_IMM_u32(AND, desc, dest, src0, src1, data) +#define append_math_or_imm_u32(desc, dest, src0, src1, data) \ + APPEND_MATH_IMM_u32(OR, desc, dest, src0, src1, data) +#define append_math_xor_imm_u32(desc, dest, src0, src1, data) \ + APPEND_MATH_IMM_u32(XOR, desc, dest, src0, src1, data) +#define append_math_lshift_imm_u32(desc, dest, src0, src1, data) \ + APPEND_MATH_IMM_u32(LSHIFT, desc, dest, src0, src1, data) +#define append_math_rshift_imm_u32(desc, dest, src0, src1, data) \ + APPEND_MATH_IMM_u32(RSHIFT, desc, dest, src0, src1, data) + +/* Exactly one source is IMM. Data is passed in as u64 value */ +#define APPEND_MATH_IMM_u64(op, desc, dest, src_0, src_1, data) \ +do { \ + u32 upper = (data >> 16) >> 16; \ + APPEND_MATH(op, desc, dest, src_0, src_1, CAAM_CMD_SZ * 2 | \ + (upper ? 0 : MATH_IFB)); \ + if (upper) \ + append_u64(desc, data); \ + else \ + append_u32(desc, lower_32_bits(data)); \ +} while (0) + +#define append_math_add_imm_u64(desc, dest, src0, src1, data) \ + APPEND_MATH_IMM_u64(ADD, desc, dest, src0, src1, data) +#define append_math_sub_imm_u64(desc, dest, src0, src1, data) \ + APPEND_MATH_IMM_u64(SUB, desc, dest, src0, src1, data) +#define append_math_add_c_imm_u64(desc, dest, src0, src1, data) \ + APPEND_MATH_IMM_u64(ADDC, desc, dest, src0, src1, data) +#define append_math_sub_b_imm_u64(desc, dest, src0, src1, data) \ + APPEND_MATH_IMM_u64(SUBB, desc, dest, src0, src1, data) +#define append_math_and_imm_u64(desc, dest, src0, src1, data) \ + APPEND_MATH_IMM_u64(AND, desc, dest, src0, src1, data) +#define append_math_or_imm_u64(desc, dest, src0, src1, data) \ + APPEND_MATH_IMM_u64(OR, desc, dest, src0, src1, data) +#define append_math_xor_imm_u64(desc, dest, src0, src1, data) \ + APPEND_MATH_IMM_u64(XOR, desc, dest, src0, src1, data) +#define append_math_lshift_imm_u64(desc, dest, src0, src1, data) \ + APPEND_MATH_IMM_u64(LSHIFT, desc, dest, src0, src1, data) +#define append_math_rshift_imm_u64(desc, dest, src0, src1, data) \ + APPEND_MATH_IMM_u64(RSHIFT, desc, dest, src0, src1, data) + +/** + * struct alginfo - Container for algorithm details + * @algtype: algorithm selector; for valid values, see documentation of the + * functions where it is used. + * @keylen: length of the provided algorithm key, in bytes + * @keylen_pad: padded length of the provided algorithm key, in bytes + * @key_dma: dma (bus) address where algorithm key resides + * @key_virt: virtual address where algorithm key resides + * @key_inline: true - key can be inlined in the descriptor; false - key is + * referenced by the descriptor + */ +struct alginfo { + u32 algtype; + unsigned int keylen; + unsigned int keylen_pad; + dma_addr_t key_dma; + const void *key_virt; + bool key_inline; +}; + +/** + * desc_inline_query() - Provide indications on which data items can be inlined + * and which shall be referenced in a shared descriptor. + * @sd_base_len: Shared descriptor base length - bytes consumed by the commands, + * excluding the data items to be inlined (or corresponding + * pointer if an item is not inlined). Each cnstr_* function that + * generates descriptors should have a define mentioning + * corresponding length. + * @jd_len: Maximum length of the job descriptor(s) that will be used + * together with the shared descriptor. + * @data_len: Array of lengths of the data items trying to be inlined + * @inl_mask: 32bit mask with bit x = 1 if data item x can be inlined, 0 + * otherwise. + * @count: Number of data items (size of @data_len array); must be <= 32 + * + * Return: 0 if data can be inlined / referenced, negative value if not. If 0, + * check @inl_mask for details. + */ +static inline int desc_inline_query(unsigned int sd_base_len, + unsigned int jd_len, unsigned int *data_len, + u32 *inl_mask, unsigned int count) +{ + int rem_bytes = (int)(CAAM_DESC_BYTES_MAX - sd_base_len - jd_len); + unsigned int i; + + *inl_mask = 0; + for (i = 0; (i < count) && (rem_bytes > 0); i++) { + if (rem_bytes - (int)(data_len[i] + + (count - i - 1) * CAAM_PTR_SZ) >= 0) { + rem_bytes -= data_len[i]; + *inl_mask |= (1 << i); + } else { + rem_bytes -= CAAM_PTR_SZ; + } + } + + return (rem_bytes >= 0) ? 0 : -1; +} + +/** + * append_proto_dkp - Derived Key Protocol (DKP): key -> split key + * @desc: pointer to buffer used for descriptor construction + * @adata: pointer to authentication transform definitions. + * keylen should be the length of initial key, while keylen_pad + * the length of the derived (split) key. + * Valid algorithm values - one of OP_ALG_ALGSEL_{MD5, SHA1, SHA224, + * SHA256, SHA384, SHA512}. + */ +static inline void append_proto_dkp(u32 * const desc, struct alginfo *adata) +{ + u32 protid; + + /* + * Quick & dirty translation from OP_ALG_ALGSEL_{MD5, SHA*} + * to OP_PCLID_DKP_{MD5, SHA*} + */ + protid = (adata->algtype & OP_ALG_ALGSEL_SUBMASK) | + (0x20 << OP_ALG_ALGSEL_SHIFT); + + if (adata->key_inline) { + int words; + + if (adata->keylen > adata->keylen_pad) { + append_operation(desc, OP_TYPE_UNI_PROTOCOL | protid | + OP_PCL_DKP_SRC_PTR | + OP_PCL_DKP_DST_IMM | adata->keylen); + append_ptr(desc, adata->key_dma); + + words = (ALIGN(adata->keylen_pad, CAAM_CMD_SZ) - + CAAM_PTR_SZ) / CAAM_CMD_SZ; + } else { + append_operation(desc, OP_TYPE_UNI_PROTOCOL | protid | + OP_PCL_DKP_SRC_IMM | + OP_PCL_DKP_DST_IMM | adata->keylen); + append_data(desc, adata->key_virt, adata->keylen); + + words = (ALIGN(adata->keylen_pad, CAAM_CMD_SZ) - + ALIGN(adata->keylen, CAAM_CMD_SZ)) / + CAAM_CMD_SZ; + } + + /* Reserve space in descriptor buffer for the derived key */ + if (words) + (*desc) = cpu_to_caam32(caam32_to_cpu(*desc) + words); + } else { + append_operation(desc, OP_TYPE_UNI_PROTOCOL | protid | + OP_PCL_DKP_SRC_PTR | OP_PCL_DKP_DST_PTR | + adata->keylen); + append_ptr(desc, adata->key_dma); + } +} + +#endif /* DESC_CONSTR_H */ diff --git a/drivers/crypto/caam/dpseci-debugfs.c b/drivers/crypto/caam/dpseci-debugfs.c new file mode 100644 index 000000000..0eca8c2fd --- /dev/null +++ b/drivers/crypto/caam/dpseci-debugfs.c @@ -0,0 +1,60 @@ +// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) +/* Copyright 2019 NXP */ + +#include <linux/module.h> +#include <linux/device.h> +#include <linux/debugfs.h> +#include "dpseci-debugfs.h" + +static int dpseci_dbg_fqs_show(struct seq_file *file, void *offset) +{ + struct dpaa2_caam_priv *priv = (struct dpaa2_caam_priv *)file->private; + u32 fqid, fcnt, bcnt; + int i, err; + + seq_printf(file, "FQ stats for %s:\n", dev_name(priv->dev)); + seq_printf(file, "%s%16s%16s\n", + "Rx-VFQID", + "Pending frames", + "Pending bytes"); + + for (i = 0; i < priv->num_pairs; i++) { + fqid = priv->rx_queue_attr[i].fqid; + err = dpaa2_io_query_fq_count(NULL, fqid, &fcnt, &bcnt); + if (err) + continue; + + seq_printf(file, "%5d%16u%16u\n", fqid, fcnt, bcnt); + } + + seq_printf(file, "%s%16s%16s\n", + "Tx-VFQID", + "Pending frames", + "Pending bytes"); + + for (i = 0; i < priv->num_pairs; i++) { + fqid = priv->tx_queue_attr[i].fqid; + err = dpaa2_io_query_fq_count(NULL, fqid, &fcnt, &bcnt); + if (err) + continue; + + seq_printf(file, "%5d%16u%16u\n", fqid, fcnt, bcnt); + } + + return 0; +} + +DEFINE_SHOW_ATTRIBUTE(dpseci_dbg_fqs); + +void dpaa2_dpseci_debugfs_init(struct dpaa2_caam_priv *priv) +{ + priv->dfs_root = debugfs_create_dir(dev_name(priv->dev), NULL); + + debugfs_create_file("fq_stats", 0444, priv->dfs_root, priv, + &dpseci_dbg_fqs_fops); +} + +void dpaa2_dpseci_debugfs_exit(struct dpaa2_caam_priv *priv) +{ + debugfs_remove_recursive(priv->dfs_root); +} diff --git a/drivers/crypto/caam/dpseci-debugfs.h b/drivers/crypto/caam/dpseci-debugfs.h new file mode 100644 index 000000000..bc22af7be --- /dev/null +++ b/drivers/crypto/caam/dpseci-debugfs.h @@ -0,0 +1,18 @@ +/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */ +/* Copyright 2019 NXP */ + +#ifndef DPSECI_DEBUGFS_H +#define DPSECI_DEBUGFS_H + +#include <linux/dcache.h> +#include "caamalg_qi2.h" + +#ifdef CONFIG_DEBUG_FS +void dpaa2_dpseci_debugfs_init(struct dpaa2_caam_priv *priv); +void dpaa2_dpseci_debugfs_exit(struct dpaa2_caam_priv *priv); +#else +static inline void dpaa2_dpseci_debugfs_init(struct dpaa2_caam_priv *priv) {} +static inline void dpaa2_dpseci_debugfs_exit(struct dpaa2_caam_priv *priv) {} +#endif /* CONFIG_DEBUG_FS */ + +#endif /* DPSECI_DEBUGFS_H */ diff --git a/drivers/crypto/caam/dpseci.c b/drivers/crypto/caam/dpseci.c new file mode 100644 index 000000000..039df6c57 --- /dev/null +++ b/drivers/crypto/caam/dpseci.c @@ -0,0 +1,444 @@ +// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) +/* + * Copyright 2013-2016 Freescale Semiconductor Inc. + * Copyright 2017-2018 NXP + */ + +#include <linux/fsl/mc.h> +#include "dpseci.h" +#include "dpseci_cmd.h" + +/** + * dpseci_open() - Open a control session for the specified object + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @dpseci_id: DPSECI unique ID + * @token: Returned token; use in subsequent API calls + * + * This function can be used to open a control session for an already created + * object; an object may have been declared statically in the DPL + * or created dynamically. + * This function returns a unique authentication token, associated with the + * specific object ID and the specific MC portal; this token must be used in all + * subsequent commands for this specific object. + * + * Return: '0' on success, error code otherwise + */ +int dpseci_open(struct fsl_mc_io *mc_io, u32 cmd_flags, int dpseci_id, + u16 *token) +{ + struct fsl_mc_command cmd = { 0 }; + struct dpseci_cmd_open *cmd_params; + int err; + + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_OPEN, + cmd_flags, + 0); + cmd_params = (struct dpseci_cmd_open *)cmd.params; + cmd_params->dpseci_id = cpu_to_le32(dpseci_id); + err = mc_send_command(mc_io, &cmd); + if (err) + return err; + + *token = mc_cmd_hdr_read_token(&cmd); + + return 0; +} + +/** + * dpseci_close() - Close the control session of the object + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPSECI object + * + * After this function is called, no further operations are allowed on the + * object without opening a new control session. + * + * Return: '0' on success, error code otherwise + */ +int dpseci_close(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token) +{ + struct fsl_mc_command cmd = { 0 }; + + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_CLOSE, + cmd_flags, + token); + return mc_send_command(mc_io, &cmd); +} + +/** + * dpseci_enable() - Enable the DPSECI, allow sending and receiving frames + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPSECI object + * + * Return: '0' on success, error code otherwise + */ +int dpseci_enable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token) +{ + struct fsl_mc_command cmd = { 0 }; + + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_ENABLE, + cmd_flags, + token); + return mc_send_command(mc_io, &cmd); +} + +/** + * dpseci_disable() - Disable the DPSECI, stop sending and receiving frames + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPSECI object + * + * Return: '0' on success, error code otherwise + */ +int dpseci_disable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token) +{ + struct fsl_mc_command cmd = { 0 }; + + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_DISABLE, + cmd_flags, + token); + + return mc_send_command(mc_io, &cmd); +} + +/** + * dpseci_reset() - Reset the DPSECI, returns the object to initial state + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPSECI object + * + * Return: '0' on success, error code otherwise + */ +int dpseci_reset(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token) +{ + struct fsl_mc_command cmd = { 0 }; + + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_RESET, + cmd_flags, + token); + return mc_send_command(mc_io, &cmd); +} + +/** + * dpseci_is_enabled() - Check if the DPSECI is enabled. + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPSECI object + * @en: Returns '1' if object is enabled; '0' otherwise + * + * Return: '0' on success, error code otherwise + */ +int dpseci_is_enabled(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, + int *en) +{ + struct fsl_mc_command cmd = { 0 }; + struct dpseci_rsp_is_enabled *rsp_params; + int err; + + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_IS_ENABLED, + cmd_flags, + token); + err = mc_send_command(mc_io, &cmd); + if (err) + return err; + + rsp_params = (struct dpseci_rsp_is_enabled *)cmd.params; + *en = dpseci_get_field(rsp_params->is_enabled, ENABLE); + + return 0; +} + +/** + * dpseci_get_attributes() - Retrieve DPSECI attributes + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPSECI object + * @attr: Returned object's attributes + * + * Return: '0' on success, error code otherwise + */ +int dpseci_get_attributes(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, + struct dpseci_attr *attr) +{ + struct fsl_mc_command cmd = { 0 }; + struct dpseci_rsp_get_attributes *rsp_params; + int err; + + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_ATTR, + cmd_flags, + token); + err = mc_send_command(mc_io, &cmd); + if (err) + return err; + + rsp_params = (struct dpseci_rsp_get_attributes *)cmd.params; + attr->id = le32_to_cpu(rsp_params->id); + attr->num_tx_queues = rsp_params->num_tx_queues; + attr->num_rx_queues = rsp_params->num_rx_queues; + attr->options = le32_to_cpu(rsp_params->options); + + return 0; +} + +/** + * dpseci_set_rx_queue() - Set Rx queue configuration + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPSECI object + * @queue: Select the queue relative to number of priorities configured at + * DPSECI creation; use DPSECI_ALL_QUEUES to configure all + * Rx queues identically. + * @cfg: Rx queue configuration + * + * Return: '0' on success, error code otherwise + */ +int dpseci_set_rx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, + u8 queue, const struct dpseci_rx_queue_cfg *cfg) +{ + struct fsl_mc_command cmd = { 0 }; + struct dpseci_cmd_queue *cmd_params; + + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_SET_RX_QUEUE, + cmd_flags, + token); + cmd_params = (struct dpseci_cmd_queue *)cmd.params; + cmd_params->dest_id = cpu_to_le32(cfg->dest_cfg.dest_id); + cmd_params->priority = cfg->dest_cfg.priority; + cmd_params->queue = queue; + dpseci_set_field(cmd_params->dest_type, DEST_TYPE, + cfg->dest_cfg.dest_type); + cmd_params->user_ctx = cpu_to_le64(cfg->user_ctx); + cmd_params->options = cpu_to_le32(cfg->options); + dpseci_set_field(cmd_params->order_preservation_en, ORDER_PRESERVATION, + cfg->order_preservation_en); + + return mc_send_command(mc_io, &cmd); +} + +/** + * dpseci_get_rx_queue() - Retrieve Rx queue attributes + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPSECI object + * @queue: Select the queue relative to number of priorities configured at + * DPSECI creation + * @attr: Returned Rx queue attributes + * + * Return: '0' on success, error code otherwise + */ +int dpseci_get_rx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, + u8 queue, struct dpseci_rx_queue_attr *attr) +{ + struct fsl_mc_command cmd = { 0 }; + struct dpseci_cmd_queue *cmd_params; + int err; + + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_RX_QUEUE, + cmd_flags, + token); + cmd_params = (struct dpseci_cmd_queue *)cmd.params; + cmd_params->queue = queue; + err = mc_send_command(mc_io, &cmd); + if (err) + return err; + + attr->dest_cfg.dest_id = le32_to_cpu(cmd_params->dest_id); + attr->dest_cfg.priority = cmd_params->priority; + attr->dest_cfg.dest_type = dpseci_get_field(cmd_params->dest_type, + DEST_TYPE); + attr->user_ctx = le64_to_cpu(cmd_params->user_ctx); + attr->fqid = le32_to_cpu(cmd_params->fqid); + attr->order_preservation_en = + dpseci_get_field(cmd_params->order_preservation_en, + ORDER_PRESERVATION); + + return 0; +} + +/** + * dpseci_get_tx_queue() - Retrieve Tx queue attributes + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPSECI object + * @queue: Select the queue relative to number of priorities configured at + * DPSECI creation + * @attr: Returned Tx queue attributes + * + * Return: '0' on success, error code otherwise + */ +int dpseci_get_tx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, + u8 queue, struct dpseci_tx_queue_attr *attr) +{ + struct fsl_mc_command cmd = { 0 }; + struct dpseci_cmd_queue *cmd_params; + struct dpseci_rsp_get_tx_queue *rsp_params; + int err; + + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_TX_QUEUE, + cmd_flags, + token); + cmd_params = (struct dpseci_cmd_queue *)cmd.params; + cmd_params->queue = queue; + err = mc_send_command(mc_io, &cmd); + if (err) + return err; + + rsp_params = (struct dpseci_rsp_get_tx_queue *)cmd.params; + attr->fqid = le32_to_cpu(rsp_params->fqid); + attr->priority = rsp_params->priority; + + return 0; +} + +/** + * dpseci_get_sec_attr() - Retrieve SEC accelerator attributes + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPSECI object + * @attr: Returned SEC attributes + * + * Return: '0' on success, error code otherwise + */ +int dpseci_get_sec_attr(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, + struct dpseci_sec_attr *attr) +{ + struct fsl_mc_command cmd = { 0 }; + struct dpseci_rsp_get_sec_attr *rsp_params; + int err; + + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_SEC_ATTR, + cmd_flags, + token); + err = mc_send_command(mc_io, &cmd); + if (err) + return err; + + rsp_params = (struct dpseci_rsp_get_sec_attr *)cmd.params; + attr->ip_id = le16_to_cpu(rsp_params->ip_id); + attr->major_rev = rsp_params->major_rev; + attr->minor_rev = rsp_params->minor_rev; + attr->era = rsp_params->era; + attr->deco_num = rsp_params->deco_num; + attr->zuc_auth_acc_num = rsp_params->zuc_auth_acc_num; + attr->zuc_enc_acc_num = rsp_params->zuc_enc_acc_num; + attr->snow_f8_acc_num = rsp_params->snow_f8_acc_num; + attr->snow_f9_acc_num = rsp_params->snow_f9_acc_num; + attr->crc_acc_num = rsp_params->crc_acc_num; + attr->pk_acc_num = rsp_params->pk_acc_num; + attr->kasumi_acc_num = rsp_params->kasumi_acc_num; + attr->rng_acc_num = rsp_params->rng_acc_num; + attr->md_acc_num = rsp_params->md_acc_num; + attr->arc4_acc_num = rsp_params->arc4_acc_num; + attr->des_acc_num = rsp_params->des_acc_num; + attr->aes_acc_num = rsp_params->aes_acc_num; + attr->ccha_acc_num = rsp_params->ccha_acc_num; + attr->ptha_acc_num = rsp_params->ptha_acc_num; + + return 0; +} + +/** + * dpseci_get_api_version() - Get Data Path SEC Interface API version + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @major_ver: Major version of data path sec API + * @minor_ver: Minor version of data path sec API + * + * Return: '0' on success, error code otherwise + */ +int dpseci_get_api_version(struct fsl_mc_io *mc_io, u32 cmd_flags, + u16 *major_ver, u16 *minor_ver) +{ + struct fsl_mc_command cmd = { 0 }; + struct dpseci_rsp_get_api_version *rsp_params; + int err; + + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_API_VERSION, + cmd_flags, 0); + err = mc_send_command(mc_io, &cmd); + if (err) + return err; + + rsp_params = (struct dpseci_rsp_get_api_version *)cmd.params; + *major_ver = le16_to_cpu(rsp_params->major); + *minor_ver = le16_to_cpu(rsp_params->minor); + + return 0; +} + +/** + * dpseci_set_congestion_notification() - Set congestion group + * notification configuration + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPSECI object + * @cfg: congestion notification configuration + * + * Return: '0' on success, error code otherwise + */ +int dpseci_set_congestion_notification(struct fsl_mc_io *mc_io, u32 cmd_flags, + u16 token, const struct dpseci_congestion_notification_cfg *cfg) +{ + struct fsl_mc_command cmd = { 0 }; + struct dpseci_cmd_congestion_notification *cmd_params; + + cmd.header = mc_encode_cmd_header( + DPSECI_CMDID_SET_CONGESTION_NOTIFICATION, + cmd_flags, + token); + cmd_params = (struct dpseci_cmd_congestion_notification *)cmd.params; + cmd_params->dest_id = cpu_to_le32(cfg->dest_cfg.dest_id); + cmd_params->notification_mode = cpu_to_le16(cfg->notification_mode); + cmd_params->priority = cfg->dest_cfg.priority; + dpseci_set_field(cmd_params->options, CGN_DEST_TYPE, + cfg->dest_cfg.dest_type); + dpseci_set_field(cmd_params->options, CGN_UNITS, cfg->units); + cmd_params->message_iova = cpu_to_le64(cfg->message_iova); + cmd_params->message_ctx = cpu_to_le64(cfg->message_ctx); + cmd_params->threshold_entry = cpu_to_le32(cfg->threshold_entry); + cmd_params->threshold_exit = cpu_to_le32(cfg->threshold_exit); + + return mc_send_command(mc_io, &cmd); +} + +/** + * dpseci_get_congestion_notification() - Get congestion group notification + * configuration + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPSECI object + * @cfg: congestion notification configuration + * + * Return: '0' on success, error code otherwise + */ +int dpseci_get_congestion_notification(struct fsl_mc_io *mc_io, u32 cmd_flags, + u16 token, struct dpseci_congestion_notification_cfg *cfg) +{ + struct fsl_mc_command cmd = { 0 }; + struct dpseci_cmd_congestion_notification *rsp_params; + int err; + + cmd.header = mc_encode_cmd_header( + DPSECI_CMDID_GET_CONGESTION_NOTIFICATION, + cmd_flags, + token); + err = mc_send_command(mc_io, &cmd); + if (err) + return err; + + rsp_params = (struct dpseci_cmd_congestion_notification *)cmd.params; + cfg->dest_cfg.dest_id = le32_to_cpu(rsp_params->dest_id); + cfg->notification_mode = le16_to_cpu(rsp_params->notification_mode); + cfg->dest_cfg.priority = rsp_params->priority; + cfg->dest_cfg.dest_type = dpseci_get_field(rsp_params->options, + CGN_DEST_TYPE); + cfg->units = dpseci_get_field(rsp_params->options, CGN_UNITS); + cfg->message_iova = le64_to_cpu(rsp_params->message_iova); + cfg->message_ctx = le64_to_cpu(rsp_params->message_ctx); + cfg->threshold_entry = le32_to_cpu(rsp_params->threshold_entry); + cfg->threshold_exit = le32_to_cpu(rsp_params->threshold_exit); + + return 0; +} diff --git a/drivers/crypto/caam/dpseci.h b/drivers/crypto/caam/dpseci.h new file mode 100644 index 000000000..6dcd9be81 --- /dev/null +++ b/drivers/crypto/caam/dpseci.h @@ -0,0 +1,335 @@ +/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */ +/* + * Copyright 2013-2016 Freescale Semiconductor Inc. + * Copyright 2017-2018 NXP + */ +#ifndef _DPSECI_H_ +#define _DPSECI_H_ + +/* + * Data Path SEC Interface API + * Contains initialization APIs and runtime control APIs for DPSECI + */ + +struct fsl_mc_io; + +/** + * General DPSECI macros + */ + +/** + * Maximum number of Tx/Rx queues per DPSECI object + */ +#define DPSECI_MAX_QUEUE_NUM 16 + +/** + * All queues considered; see dpseci_set_rx_queue() + */ +#define DPSECI_ALL_QUEUES (u8)(-1) + +int dpseci_open(struct fsl_mc_io *mc_io, u32 cmd_flags, int dpseci_id, + u16 *token); + +int dpseci_close(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token); + +/** + * Enable the Congestion Group support + */ +#define DPSECI_OPT_HAS_CG 0x000020 + +/** + * struct dpseci_cfg - Structure representing DPSECI configuration + * @options: Any combination of the following flags: + * DPSECI_OPT_HAS_CG + * @num_tx_queues: num of queues towards the SEC + * @num_rx_queues: num of queues back from the SEC + * @priorities: Priorities for the SEC hardware processing; + * each place in the array is the priority of the tx queue + * towards the SEC; + * valid priorities are configured with values 1-8; + */ +struct dpseci_cfg { + u32 options; + u8 num_tx_queues; + u8 num_rx_queues; + u8 priorities[DPSECI_MAX_QUEUE_NUM]; +}; + +int dpseci_enable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token); + +int dpseci_disable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token); + +int dpseci_reset(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token); + +int dpseci_is_enabled(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, + int *en); + +/** + * struct dpseci_attr - Structure representing DPSECI attributes + * @id: DPSECI object ID + * @num_tx_queues: number of queues towards the SEC + * @num_rx_queues: number of queues back from the SEC + * @options: any combination of the following flags: + * DPSECI_OPT_HAS_CG + */ +struct dpseci_attr { + int id; + u8 num_tx_queues; + u8 num_rx_queues; + u32 options; +}; + +int dpseci_get_attributes(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, + struct dpseci_attr *attr); + +/** + * enum dpseci_dest - DPSECI destination types + * @DPSECI_DEST_NONE: Unassigned destination; The queue is set in parked mode + * and does not generate FQDAN notifications; user is expected to dequeue + * from the queue based on polling or other user-defined method + * @DPSECI_DEST_DPIO: The queue is set in schedule mode and generates FQDAN + * notifications to the specified DPIO; user is expected to dequeue from + * the queue only after notification is received + * @DPSECI_DEST_DPCON: The queue is set in schedule mode and does not generate + * FQDAN notifications, but is connected to the specified DPCON object; + * user is expected to dequeue from the DPCON channel + */ +enum dpseci_dest { + DPSECI_DEST_NONE = 0, + DPSECI_DEST_DPIO, + DPSECI_DEST_DPCON +}; + +/** + * struct dpseci_dest_cfg - Structure representing DPSECI destination parameters + * @dest_type: Destination type + * @dest_id: Either DPIO ID or DPCON ID, depending on the destination type + * @priority: Priority selection within the DPIO or DPCON channel; valid values + * are 0-1 or 0-7, depending on the number of priorities in that channel; + * not relevant for 'DPSECI_DEST_NONE' option + */ +struct dpseci_dest_cfg { + enum dpseci_dest dest_type; + int dest_id; + u8 priority; +}; + +/** + * DPSECI queue modification options + */ + +/** + * Select to modify the user's context associated with the queue + */ +#define DPSECI_QUEUE_OPT_USER_CTX 0x00000001 + +/** + * Select to modify the queue's destination + */ +#define DPSECI_QUEUE_OPT_DEST 0x00000002 + +/** + * Select to modify the queue's order preservation + */ +#define DPSECI_QUEUE_OPT_ORDER_PRESERVATION 0x00000004 + +/** + * struct dpseci_rx_queue_cfg - DPSECI RX queue configuration + * @options: Flags representing the suggested modifications to the queue; + * Use any combination of 'DPSECI_QUEUE_OPT_<X>' flags + * @order_preservation_en: order preservation configuration for the rx queue + * valid only if 'DPSECI_QUEUE_OPT_ORDER_PRESERVATION' is contained in 'options' + * @user_ctx: User context value provided in the frame descriptor of each + * dequeued frame; valid only if 'DPSECI_QUEUE_OPT_USER_CTX' is contained + * in 'options' + * @dest_cfg: Queue destination parameters; valid only if + * 'DPSECI_QUEUE_OPT_DEST' is contained in 'options' + */ +struct dpseci_rx_queue_cfg { + u32 options; + int order_preservation_en; + u64 user_ctx; + struct dpseci_dest_cfg dest_cfg; +}; + +int dpseci_set_rx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, + u8 queue, const struct dpseci_rx_queue_cfg *cfg); + +/** + * struct dpseci_rx_queue_attr - Structure representing attributes of Rx queues + * @user_ctx: User context value provided in the frame descriptor of each + * dequeued frame + * @order_preservation_en: Status of the order preservation configuration on the + * queue + * @dest_cfg: Queue destination configuration + * @fqid: Virtual FQID value to be used for dequeue operations + */ +struct dpseci_rx_queue_attr { + u64 user_ctx; + int order_preservation_en; + struct dpseci_dest_cfg dest_cfg; + u32 fqid; +}; + +int dpseci_get_rx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, + u8 queue, struct dpseci_rx_queue_attr *attr); + +/** + * struct dpseci_tx_queue_attr - Structure representing attributes of Tx queues + * @fqid: Virtual FQID to be used for sending frames to SEC hardware + * @priority: SEC hardware processing priority for the queue + */ +struct dpseci_tx_queue_attr { + u32 fqid; + u8 priority; +}; + +int dpseci_get_tx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, + u8 queue, struct dpseci_tx_queue_attr *attr); + +/** + * struct dpseci_sec_attr - Structure representing attributes of the SEC + * hardware accelerator + * @ip_id: ID for SEC + * @major_rev: Major revision number for SEC + * @minor_rev: Minor revision number for SEC + * @era: SEC Era + * @deco_num: The number of copies of the DECO that are implemented in this + * version of SEC + * @zuc_auth_acc_num: The number of copies of ZUCA that are implemented in this + * version of SEC + * @zuc_enc_acc_num: The number of copies of ZUCE that are implemented in this + * version of SEC + * @snow_f8_acc_num: The number of copies of the SNOW-f8 module that are + * implemented in this version of SEC + * @snow_f9_acc_num: The number of copies of the SNOW-f9 module that are + * implemented in this version of SEC + * @crc_acc_num: The number of copies of the CRC module that are implemented in + * this version of SEC + * @pk_acc_num: The number of copies of the Public Key module that are + * implemented in this version of SEC + * @kasumi_acc_num: The number of copies of the Kasumi module that are + * implemented in this version of SEC + * @rng_acc_num: The number of copies of the Random Number Generator that are + * implemented in this version of SEC + * @md_acc_num: The number of copies of the MDHA (Hashing module) that are + * implemented in this version of SEC + * @arc4_acc_num: The number of copies of the ARC4 module that are implemented + * in this version of SEC + * @des_acc_num: The number of copies of the DES module that are implemented in + * this version of SEC + * @aes_acc_num: The number of copies of the AES module that are implemented in + * this version of SEC + * @ccha_acc_num: The number of copies of the ChaCha20 module that are + * implemented in this version of SEC. + * @ptha_acc_num: The number of copies of the Poly1305 module that are + * implemented in this version of SEC. + **/ +struct dpseci_sec_attr { + u16 ip_id; + u8 major_rev; + u8 minor_rev; + u8 era; + u8 deco_num; + u8 zuc_auth_acc_num; + u8 zuc_enc_acc_num; + u8 snow_f8_acc_num; + u8 snow_f9_acc_num; + u8 crc_acc_num; + u8 pk_acc_num; + u8 kasumi_acc_num; + u8 rng_acc_num; + u8 md_acc_num; + u8 arc4_acc_num; + u8 des_acc_num; + u8 aes_acc_num; + u8 ccha_acc_num; + u8 ptha_acc_num; +}; + +int dpseci_get_sec_attr(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, + struct dpseci_sec_attr *attr); + +int dpseci_get_api_version(struct fsl_mc_io *mc_io, u32 cmd_flags, + u16 *major_ver, u16 *minor_ver); + +/** + * enum dpseci_congestion_unit - DPSECI congestion units + * @DPSECI_CONGESTION_UNIT_BYTES: bytes units + * @DPSECI_CONGESTION_UNIT_FRAMES: frames units + */ +enum dpseci_congestion_unit { + DPSECI_CONGESTION_UNIT_BYTES = 0, + DPSECI_CONGESTION_UNIT_FRAMES +}; + +/** + * CSCN message is written to message_iova once entering a + * congestion state (see 'threshold_entry') + */ +#define DPSECI_CGN_MODE_WRITE_MEM_ON_ENTER 0x00000001 + +/** + * CSCN message is written to message_iova once exiting a + * congestion state (see 'threshold_exit') + */ +#define DPSECI_CGN_MODE_WRITE_MEM_ON_EXIT 0x00000002 + +/** + * CSCN write will attempt to allocate into a cache (coherent write); + * valid only if 'DPSECI_CGN_MODE_WRITE_MEM_<X>' is selected + */ +#define DPSECI_CGN_MODE_COHERENT_WRITE 0x00000004 + +/** + * if 'dpseci_dest_cfg.dest_type != DPSECI_DEST_NONE' CSCN message is sent to + * DPIO/DPCON's WQ channel once entering a congestion state + * (see 'threshold_entry') + */ +#define DPSECI_CGN_MODE_NOTIFY_DEST_ON_ENTER 0x00000008 + +/** + * if 'dpseci_dest_cfg.dest_type != DPSECI_DEST_NONE' CSCN message is sent to + * DPIO/DPCON's WQ channel once exiting a congestion state + * (see 'threshold_exit') + */ +#define DPSECI_CGN_MODE_NOTIFY_DEST_ON_EXIT 0x00000010 + +/** + * if 'dpseci_dest_cfg.dest_type != DPSECI_DEST_NONE' when the CSCN is written + * to the sw-portal's DQRR, the DQRI interrupt is asserted immediately + * (if enabled) + */ +#define DPSECI_CGN_MODE_INTR_COALESCING_DISABLED 0x00000020 + +/** + * struct dpseci_congestion_notification_cfg - congestion notification + * configuration + * @units: units type + * @threshold_entry: above this threshold we enter a congestion state. + * set it to '0' to disable it + * @threshold_exit: below this threshold we exit the congestion state. + * @message_ctx: The context that will be part of the CSCN message + * @message_iova: I/O virtual address (must be in DMA-able memory), + * must be 16B aligned; + * @dest_cfg: CSCN can be send to either DPIO or DPCON WQ channel + * @notification_mode: Mask of available options; use 'DPSECI_CGN_MODE_<X>' + * values + */ +struct dpseci_congestion_notification_cfg { + enum dpseci_congestion_unit units; + u32 threshold_entry; + u32 threshold_exit; + u64 message_ctx; + u64 message_iova; + struct dpseci_dest_cfg dest_cfg; + u16 notification_mode; +}; + +int dpseci_set_congestion_notification(struct fsl_mc_io *mc_io, u32 cmd_flags, + u16 token, const struct dpseci_congestion_notification_cfg *cfg); + +int dpseci_get_congestion_notification(struct fsl_mc_io *mc_io, u32 cmd_flags, + u16 token, struct dpseci_congestion_notification_cfg *cfg); + +#endif /* _DPSECI_H_ */ diff --git a/drivers/crypto/caam/dpseci_cmd.h b/drivers/crypto/caam/dpseci_cmd.h new file mode 100644 index 000000000..71a007c85 --- /dev/null +++ b/drivers/crypto/caam/dpseci_cmd.h @@ -0,0 +1,150 @@ +/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */ +/* + * Copyright 2013-2016 Freescale Semiconductor Inc. + * Copyright 2017-2018 NXP + */ + +#ifndef _DPSECI_CMD_H_ +#define _DPSECI_CMD_H_ + +/* DPSECI Version */ +#define DPSECI_VER_MAJOR 5 +#define DPSECI_VER_MINOR 3 + +#define DPSECI_VER(maj, min) (((maj) << 16) | (min)) +#define DPSECI_VERSION DPSECI_VER(DPSECI_VER_MAJOR, DPSECI_VER_MINOR) + +/* Command versioning */ +#define DPSECI_CMD_BASE_VERSION 1 +#define DPSECI_CMD_BASE_VERSION_V2 2 +#define DPSECI_CMD_ID_OFFSET 4 + +#define DPSECI_CMD_V1(id) (((id) << DPSECI_CMD_ID_OFFSET) | \ + DPSECI_CMD_BASE_VERSION) + +#define DPSECI_CMD_V2(id) (((id) << DPSECI_CMD_ID_OFFSET) | \ + DPSECI_CMD_BASE_VERSION_V2) + +/* Command IDs */ +#define DPSECI_CMDID_CLOSE DPSECI_CMD_V1(0x800) +#define DPSECI_CMDID_OPEN DPSECI_CMD_V1(0x809) +#define DPSECI_CMDID_GET_API_VERSION DPSECI_CMD_V1(0xa09) + +#define DPSECI_CMDID_ENABLE DPSECI_CMD_V1(0x002) +#define DPSECI_CMDID_DISABLE DPSECI_CMD_V1(0x003) +#define DPSECI_CMDID_GET_ATTR DPSECI_CMD_V1(0x004) +#define DPSECI_CMDID_RESET DPSECI_CMD_V1(0x005) +#define DPSECI_CMDID_IS_ENABLED DPSECI_CMD_V1(0x006) + +#define DPSECI_CMDID_SET_RX_QUEUE DPSECI_CMD_V1(0x194) +#define DPSECI_CMDID_GET_RX_QUEUE DPSECI_CMD_V1(0x196) +#define DPSECI_CMDID_GET_TX_QUEUE DPSECI_CMD_V1(0x197) +#define DPSECI_CMDID_GET_SEC_ATTR DPSECI_CMD_V2(0x198) +#define DPSECI_CMDID_SET_CONGESTION_NOTIFICATION DPSECI_CMD_V1(0x170) +#define DPSECI_CMDID_GET_CONGESTION_NOTIFICATION DPSECI_CMD_V1(0x171) + +/* Macros for accessing command fields smaller than 1 byte */ +#define DPSECI_MASK(field) \ + GENMASK(DPSECI_##field##_SHIFT + DPSECI_##field##_SIZE - 1, \ + DPSECI_##field##_SHIFT) + +#define dpseci_set_field(var, field, val) \ + ((var) |= (((val) << DPSECI_##field##_SHIFT) & DPSECI_MASK(field))) + +#define dpseci_get_field(var, field) \ + (((var) & DPSECI_MASK(field)) >> DPSECI_##field##_SHIFT) + +struct dpseci_cmd_open { + __le32 dpseci_id; +}; + +#define DPSECI_ENABLE_SHIFT 0 +#define DPSECI_ENABLE_SIZE 1 + +struct dpseci_rsp_is_enabled { + u8 is_enabled; +}; + +struct dpseci_rsp_get_attributes { + __le32 id; + __le32 pad0; + u8 num_tx_queues; + u8 num_rx_queues; + u8 pad1[6]; + __le32 options; +}; + +#define DPSECI_DEST_TYPE_SHIFT 0 +#define DPSECI_DEST_TYPE_SIZE 4 + +#define DPSECI_ORDER_PRESERVATION_SHIFT 0 +#define DPSECI_ORDER_PRESERVATION_SIZE 1 + +struct dpseci_cmd_queue { + __le32 dest_id; + u8 priority; + u8 queue; + u8 dest_type; + u8 pad; + __le64 user_ctx; + union { + __le32 options; + __le32 fqid; + }; + u8 order_preservation_en; +}; + +struct dpseci_rsp_get_tx_queue { + __le32 pad; + __le32 fqid; + u8 priority; +}; + +struct dpseci_rsp_get_sec_attr { + __le16 ip_id; + u8 major_rev; + u8 minor_rev; + u8 era; + u8 pad0[3]; + u8 deco_num; + u8 zuc_auth_acc_num; + u8 zuc_enc_acc_num; + u8 pad1; + u8 snow_f8_acc_num; + u8 snow_f9_acc_num; + u8 crc_acc_num; + u8 pad2; + u8 pk_acc_num; + u8 kasumi_acc_num; + u8 rng_acc_num; + u8 pad3; + u8 md_acc_num; + u8 arc4_acc_num; + u8 des_acc_num; + u8 aes_acc_num; + u8 ccha_acc_num; + u8 ptha_acc_num; +}; + +struct dpseci_rsp_get_api_version { + __le16 major; + __le16 minor; +}; + +#define DPSECI_CGN_DEST_TYPE_SHIFT 0 +#define DPSECI_CGN_DEST_TYPE_SIZE 4 +#define DPSECI_CGN_UNITS_SHIFT 4 +#define DPSECI_CGN_UNITS_SIZE 2 + +struct dpseci_cmd_congestion_notification { + __le32 dest_id; + __le16 notification_mode; + u8 priority; + u8 options; + __le64 message_iova; + __le64 message_ctx; + __le32 threshold_entry; + __le32 threshold_exit; +}; + +#endif /* _DPSECI_CMD_H_ */ diff --git a/drivers/crypto/caam/error.c b/drivers/crypto/caam/error.c new file mode 100644 index 000000000..72db90176 --- /dev/null +++ b/drivers/crypto/caam/error.c @@ -0,0 +1,393 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * CAAM Error Reporting + * + * Copyright 2009-2011 Freescale Semiconductor, Inc. + */ + +#include "compat.h" +#include "regs.h" +#include "desc.h" +#include "error.h" + +#ifdef DEBUG +#include <linux/highmem.h> + +void caam_dump_sg(const char *prefix_str, int prefix_type, + int rowsize, int groupsize, struct scatterlist *sg, + size_t tlen, bool ascii) +{ + struct scatterlist *it; + void *it_page; + size_t len; + void *buf; + + for (it = sg; it && tlen > 0 ; it = sg_next(it)) { + /* + * make sure the scatterlist's page + * has a valid virtual memory mapping + */ + it_page = kmap_atomic(sg_page(it)); + if (unlikely(!it_page)) { + pr_err("caam_dump_sg: kmap failed\n"); + return; + } + + buf = it_page + it->offset; + len = min_t(size_t, tlen, it->length); + print_hex_dump_debug(prefix_str, prefix_type, rowsize, + groupsize, buf, len, ascii); + tlen -= len; + + kunmap_atomic(it_page); + } +} +#else +void caam_dump_sg(const char *prefix_str, int prefix_type, + int rowsize, int groupsize, struct scatterlist *sg, + size_t tlen, bool ascii) +{} +#endif /* DEBUG */ +EXPORT_SYMBOL(caam_dump_sg); + +bool caam_little_end; +EXPORT_SYMBOL(caam_little_end); + +bool caam_imx; +EXPORT_SYMBOL(caam_imx); + +size_t caam_ptr_sz; +EXPORT_SYMBOL(caam_ptr_sz); + +static const struct { + u8 value; + const char *error_text; +} desc_error_list[] = { + { 0x00, "No error." }, + { 0x01, "SGT Length Error. The descriptor is trying to read more data than is contained in the SGT table." }, + { 0x02, "SGT Null Entry Error." }, + { 0x03, "Job Ring Control Error. There is a bad value in the Job Ring Control register." }, + { 0x04, "Invalid Descriptor Command. The Descriptor Command field is invalid." }, + { 0x05, "Reserved." }, + { 0x06, "Invalid KEY Command" }, + { 0x07, "Invalid LOAD Command" }, + { 0x08, "Invalid STORE Command" }, + { 0x09, "Invalid OPERATION Command" }, + { 0x0A, "Invalid FIFO LOAD Command" }, + { 0x0B, "Invalid FIFO STORE Command" }, + { 0x0C, "Invalid MOVE/MOVE_LEN Command" }, + { 0x0D, "Invalid JUMP Command. A nonlocal JUMP Command is invalid because the target is not a Job Header Command, or the jump is from a Trusted Descriptor to a Job Descriptor, or because the target Descriptor contains a Shared Descriptor." }, + { 0x0E, "Invalid MATH Command" }, + { 0x0F, "Invalid SIGNATURE Command" }, + { 0x10, "Invalid Sequence Command. A SEQ IN PTR OR SEQ OUT PTR Command is invalid or a SEQ KEY, SEQ LOAD, SEQ FIFO LOAD, or SEQ FIFO STORE decremented the input or output sequence length below 0. This error may result if a built-in PROTOCOL Command has encountered a malformed PDU." }, + { 0x11, "Skip data type invalid. The type must be 0xE or 0xF."}, + { 0x12, "Shared Descriptor Header Error" }, + { 0x13, "Header Error. Invalid length or parity, or certain other problems." }, + { 0x14, "Burster Error. Burster has gotten to an illegal state" }, + { 0x15, "Context Register Length Error. The descriptor is trying to read or write past the end of the Context Register. A SEQ LOAD or SEQ STORE with the VLF bit set was executed with too large a length in the variable length register (VSOL for SEQ STORE or VSIL for SEQ LOAD)." }, + { 0x16, "DMA Error" }, + { 0x17, "Reserved." }, + { 0x1A, "Job failed due to JR reset" }, + { 0x1B, "Job failed due to Fail Mode" }, + { 0x1C, "DECO Watchdog timer timeout error" }, + { 0x1D, "DECO tried to copy a key from another DECO but the other DECO's Key Registers were locked" }, + { 0x1E, "DECO attempted to copy data from a DECO that had an unmasked Descriptor error" }, + { 0x1F, "LIODN error. DECO was trying to share from itself or from another DECO but the two Non-SEQ LIODN values didn't match or the 'shared from' DECO's Descriptor required that the SEQ LIODNs be the same and they aren't." }, + { 0x20, "DECO has completed a reset initiated via the DRR register" }, + { 0x21, "Nonce error. When using EKT (CCM) key encryption option in the FIFO STORE Command, the Nonce counter reached its maximum value and this encryption mode can no longer be used." }, + { 0x22, "Meta data is too large (> 511 bytes) for TLS decap (input frame; block ciphers) and IPsec decap (output frame, when doing the next header byte update) and DCRC (output frame)." }, + { 0x23, "Read Input Frame error" }, + { 0x24, "JDKEK, TDKEK or TDSK not loaded error" }, + { 0x80, "DNR (do not run) error" }, + { 0x81, "undefined protocol command" }, + { 0x82, "invalid setting in PDB" }, + { 0x83, "Anti-replay LATE error" }, + { 0x84, "Anti-replay REPLAY error" }, + { 0x85, "Sequence number overflow" }, + { 0x86, "Sigver invalid signature" }, + { 0x87, "DSA Sign Illegal test descriptor" }, + { 0x88, "Protocol Format Error - A protocol has seen an error in the format of data received. When running RSA, this means that formatting with random padding was used, and did not follow the form: 0x00, 0x02, 8-to-N bytes of non-zero pad, 0x00, F data." }, + { 0x89, "Protocol Size Error - A protocol has seen an error in size. When running RSA, pdb size N < (size of F) when no formatting is used; or pdb size N < (F + 11) when formatting is used." }, + { 0xC1, "Blob Command error: Undefined mode" }, + { 0xC2, "Blob Command error: Secure Memory Blob mode error" }, + { 0xC4, "Blob Command error: Black Blob key or input size error" }, + { 0xC5, "Blob Command error: Invalid key destination" }, + { 0xC8, "Blob Command error: Trusted/Secure mode error" }, + { 0xF0, "IPsec TTL or hop limit field either came in as 0, or was decremented to 0" }, + { 0xF1, "3GPP HFN matches or exceeds the Threshold" }, +}; + +static const struct { + u8 value; + const char *error_text; +} qi_error_list[] = { + { 0x00, "No error" }, + { 0x1F, "Job terminated by FQ or ICID flush" }, + { 0x20, "FD format error"}, + { 0x21, "FD command format error"}, + { 0x23, "FL format error"}, + { 0x25, "CRJD specified in FD, but not enabled in FLC"}, + { 0x30, "Max. buffer size too small"}, + { 0x31, "DHR exceeds max. buffer size (allocate mode, S/G format)"}, + { 0x32, "SGT exceeds max. buffer size (allocate mode, S/G format"}, + { 0x33, "Size over/underflow (allocate mode)"}, + { 0x34, "Size over/underflow (reuse mode)"}, + { 0x35, "Length exceeds max. short length (allocate mode, S/G/ format)"}, + { 0x36, "Memory footprint exceeds max. value (allocate mode, S/G/ format)"}, + { 0x41, "SBC frame format not supported (allocate mode)"}, + { 0x42, "Pool 0 invalid / pool 1 size < pool 0 size (allocate mode)"}, + { 0x43, "Annotation output enabled but ASAR = 0 (allocate mode)"}, + { 0x44, "Unsupported or reserved frame format or SGHR = 1 (reuse mode)"}, + { 0x45, "DHR correction underflow (reuse mode, single buffer format)"}, + { 0x46, "Annotation length exceeds offset (reuse mode)"}, + { 0x48, "Annotation output enabled but ASA limited by ASAR (reuse mode)"}, + { 0x49, "Data offset correction exceeds input frame data length (reuse mode)"}, + { 0x4B, "Annotation output enabled but ASA cannot be expanded (frame list)"}, + { 0x51, "Unsupported IF reuse mode"}, + { 0x52, "Unsupported FL use mode"}, + { 0x53, "Unsupported RJD use mode"}, + { 0x54, "Unsupported inline descriptor use mode"}, + { 0xC0, "Table buffer pool 0 depletion"}, + { 0xC1, "Table buffer pool 1 depletion"}, + { 0xC2, "Data buffer pool 0 depletion, no OF allocated"}, + { 0xC3, "Data buffer pool 1 depletion, no OF allocated"}, + { 0xC4, "Data buffer pool 0 depletion, partial OF allocated"}, + { 0xC5, "Data buffer pool 1 depletion, partial OF allocated"}, + { 0xD0, "FLC read error"}, + { 0xD1, "FL read error"}, + { 0xD2, "FL write error"}, + { 0xD3, "OF SGT write error"}, + { 0xD4, "PTA read error"}, + { 0xD5, "PTA write error"}, + { 0xD6, "OF SGT F-bit write error"}, + { 0xD7, "ASA write error"}, + { 0xE1, "FLC[ICR]=0 ICID error"}, + { 0xE2, "FLC[ICR]=1 ICID error"}, + { 0xE4, "source of ICID flush not trusted (BDI = 0)"}, +}; + +static const char * const cha_id_list[] = { + "", + "AES", + "DES", + "ARC4", + "MDHA", + "RNG", + "SNOW f8", + "Kasumi f8/9", + "PKHA", + "CRCA", + "SNOW f9", + "ZUCE", + "ZUCA", +}; + +static const char * const err_id_list[] = { + "No error.", + "Mode error.", + "Data size error.", + "Key size error.", + "PKHA A memory size error.", + "PKHA B memory size error.", + "Data arrived out of sequence error.", + "PKHA divide-by-zero error.", + "PKHA modulus even error.", + "DES key parity error.", + "ICV check failed.", + "Hardware error.", + "Unsupported CCM AAD size.", + "Class 1 CHA is not reset", + "Invalid CHA combination was selected", + "Invalid CHA selected.", +}; + +static const char * const rng_err_id_list[] = { + "", + "", + "", + "Instantiate", + "Not instantiated", + "Test instantiate", + "Prediction resistance", + "Prediction resistance and test request", + "Uninstantiate", + "Secure key generation", + "", + "Hardware error", + "Continuous check" +}; + +static int report_ccb_status(struct device *jrdev, const u32 status, + const char *error) +{ + u8 cha_id = (status & JRSTA_CCBERR_CHAID_MASK) >> + JRSTA_CCBERR_CHAID_SHIFT; + u8 err_id = status & JRSTA_CCBERR_ERRID_MASK; + u8 idx = (status & JRSTA_DECOERR_INDEX_MASK) >> + JRSTA_DECOERR_INDEX_SHIFT; + char *idx_str; + const char *cha_str = "unidentified cha_id value 0x"; + char cha_err_code[3] = { 0 }; + const char *err_str = "unidentified err_id value 0x"; + char err_err_code[3] = { 0 }; + + if (status & JRSTA_DECOERR_JUMP) + idx_str = "jump tgt desc idx"; + else + idx_str = "desc idx"; + + if (cha_id < ARRAY_SIZE(cha_id_list)) + cha_str = cha_id_list[cha_id]; + else + snprintf(cha_err_code, sizeof(cha_err_code), "%02x", cha_id); + + if ((cha_id << JRSTA_CCBERR_CHAID_SHIFT) == JRSTA_CCBERR_CHAID_RNG && + err_id < ARRAY_SIZE(rng_err_id_list) && + strlen(rng_err_id_list[err_id])) { + /* RNG-only error */ + err_str = rng_err_id_list[err_id]; + } else { + err_str = err_id_list[err_id]; + } + + /* + * CCB ICV check failures are part of normal operation life; + * we leave the upper layers to do what they want with them. + */ + if (err_id == JRSTA_CCBERR_ERRID_ICVCHK) + return -EBADMSG; + + dev_err_ratelimited(jrdev, "%08x: %s: %s %d: %s%s: %s%s\n", status, + error, idx_str, idx, cha_str, cha_err_code, + err_str, err_err_code); + + return -EINVAL; +} + +static int report_jump_status(struct device *jrdev, const u32 status, + const char *error) +{ + dev_err(jrdev, "%08x: %s: %s() not implemented\n", + status, error, __func__); + + return -EINVAL; +} + +static int report_deco_status(struct device *jrdev, const u32 status, + const char *error) +{ + u8 err_id = status & JRSTA_DECOERR_ERROR_MASK; + u8 idx = (status & JRSTA_DECOERR_INDEX_MASK) >> + JRSTA_DECOERR_INDEX_SHIFT; + char *idx_str; + const char *err_str = "unidentified error value 0x"; + char err_err_code[3] = { 0 }; + int i; + + if (status & JRSTA_DECOERR_JUMP) + idx_str = "jump tgt desc idx"; + else + idx_str = "desc idx"; + + for (i = 0; i < ARRAY_SIZE(desc_error_list); i++) + if (desc_error_list[i].value == err_id) + break; + + if (i != ARRAY_SIZE(desc_error_list) && desc_error_list[i].error_text) + err_str = desc_error_list[i].error_text; + else + snprintf(err_err_code, sizeof(err_err_code), "%02x", err_id); + + dev_err(jrdev, "%08x: %s: %s %d: %s%s\n", + status, error, idx_str, idx, err_str, err_err_code); + + return -EINVAL; +} + +static int report_qi_status(struct device *qidev, const u32 status, + const char *error) +{ + u8 err_id = status & JRSTA_QIERR_ERROR_MASK; + const char *err_str = "unidentified error value 0x"; + char err_err_code[3] = { 0 }; + int i; + + for (i = 0; i < ARRAY_SIZE(qi_error_list); i++) + if (qi_error_list[i].value == err_id) + break; + + if (i != ARRAY_SIZE(qi_error_list) && qi_error_list[i].error_text) + err_str = qi_error_list[i].error_text; + else + snprintf(err_err_code, sizeof(err_err_code), "%02x", err_id); + + dev_err(qidev, "%08x: %s: %s%s\n", + status, error, err_str, err_err_code); + + return -EINVAL; +} + +static int report_jr_status(struct device *jrdev, const u32 status, + const char *error) +{ + dev_err(jrdev, "%08x: %s: %s() not implemented\n", + status, error, __func__); + + return -EINVAL; +} + +static int report_cond_code_status(struct device *jrdev, const u32 status, + const char *error) +{ + dev_err(jrdev, "%08x: %s: %s() not implemented\n", + status, error, __func__); + + return -EINVAL; +} + +int caam_strstatus(struct device *jrdev, u32 status, bool qi_v2) +{ + static const struct stat_src { + int (*report_ssed)(struct device *jrdev, const u32 status, + const char *error); + const char *error; + } status_src[16] = { + { NULL, "No error" }, + { NULL, NULL }, + { report_ccb_status, "CCB" }, + { report_jump_status, "Jump" }, + { report_deco_status, "DECO" }, + { report_qi_status, "Queue Manager Interface" }, + { report_jr_status, "Job Ring" }, + { report_cond_code_status, "Condition Code" }, + { NULL, NULL }, + { NULL, NULL }, + { NULL, NULL }, + { NULL, NULL }, + { NULL, NULL }, + { NULL, NULL }, + { NULL, NULL }, + { NULL, NULL }, + }; + u32 ssrc = status >> JRSTA_SSRC_SHIFT; + const char *error = status_src[ssrc].error; + + /* + * If there is an error handling function, call it to report the error. + * Otherwise print the error source name. + */ + if (status_src[ssrc].report_ssed) + return status_src[ssrc].report_ssed(jrdev, status, error); + + if (error) + dev_err(jrdev, "%d: %s\n", ssrc, error); + else + dev_err(jrdev, "%d: unknown error source\n", ssrc); + + return -EINVAL; +} +EXPORT_SYMBOL(caam_strstatus); + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("FSL CAAM error reporting"); +MODULE_AUTHOR("Freescale Semiconductor"); diff --git a/drivers/crypto/caam/error.h b/drivers/crypto/caam/error.h new file mode 100644 index 000000000..16809fa8f --- /dev/null +++ b/drivers/crypto/caam/error.h @@ -0,0 +1,29 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * CAAM Error Reporting code header + * + * Copyright 2009-2011 Freescale Semiconductor, Inc. + */ + +#ifndef CAAM_ERROR_H +#define CAAM_ERROR_H + +#include "desc.h" + +#define CAAM_ERROR_STR_MAX 302 + +int caam_strstatus(struct device *dev, u32 status, bool qi_v2); + +#define caam_jr_strstatus(jrdev, status) caam_strstatus(jrdev, status, false) +#define caam_qi2_strstatus(qidev, status) caam_strstatus(qidev, status, true) + +void caam_dump_sg(const char *prefix_str, int prefix_type, + int rowsize, int groupsize, struct scatterlist *sg, + size_t tlen, bool ascii); + +static inline bool is_mdha(u32 algtype) +{ + return (algtype & OP_ALG_ALGSEL_MASK & ~OP_ALG_ALGSEL_SUBMASK) == + OP_ALG_CHA_MDHA; +} +#endif /* CAAM_ERROR_H */ diff --git a/drivers/crypto/caam/intern.h b/drivers/crypto/caam/intern.h new file mode 100644 index 000000000..9112279a4 --- /dev/null +++ b/drivers/crypto/caam/intern.h @@ -0,0 +1,216 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * CAAM/SEC 4.x driver backend + * Private/internal definitions between modules + * + * Copyright 2008-2011 Freescale Semiconductor, Inc. + * Copyright 2019 NXP + */ + +#ifndef INTERN_H +#define INTERN_H + +#include "ctrl.h" +#include <crypto/engine.h> + +/* Currently comes from Kconfig param as a ^2 (driver-required) */ +#define JOBR_DEPTH (1 << CONFIG_CRYPTO_DEV_FSL_CAAM_RINGSIZE) + +/* Kconfig params for interrupt coalescing if selected (else zero) */ +#ifdef CONFIG_CRYPTO_DEV_FSL_CAAM_INTC +#define JOBR_INTC JRCFG_ICEN +#define JOBR_INTC_TIME_THLD CONFIG_CRYPTO_DEV_FSL_CAAM_INTC_TIME_THLD +#define JOBR_INTC_COUNT_THLD CONFIG_CRYPTO_DEV_FSL_CAAM_INTC_COUNT_THLD +#else +#define JOBR_INTC 0 +#define JOBR_INTC_TIME_THLD 0 +#define JOBR_INTC_COUNT_THLD 0 +#endif + +/* + * Storage for tracking each in-process entry moving across a ring + * Each entry on an output ring needs one of these + */ +struct caam_jrentry_info { + void (*callbk)(struct device *dev, u32 *desc, u32 status, void *arg); + void *cbkarg; /* Argument per ring entry */ + u32 *desc_addr_virt; /* Stored virt addr for postprocessing */ + dma_addr_t desc_addr_dma; /* Stored bus addr for done matching */ + u32 desc_size; /* Stored size for postprocessing, header derived */ +}; + +/* Private sub-storage for a single JobR */ +struct caam_drv_private_jr { + struct list_head list_node; /* Job Ring device list */ + struct device *dev; + int ridx; + struct caam_job_ring __iomem *rregs; /* JobR's register space */ + struct tasklet_struct irqtask; + int irq; /* One per queue */ + bool hwrng; + + /* Number of scatterlist crypt transforms active on the JobR */ + atomic_t tfm_count ____cacheline_aligned; + + /* Job ring info */ + struct caam_jrentry_info *entinfo; /* Alloc'ed 1 per ring entry */ + spinlock_t inplock ____cacheline_aligned; /* Input ring index lock */ + u32 inpring_avail; /* Number of free entries in input ring */ + int head; /* entinfo (s/w ring) head index */ + void *inpring; /* Base of input ring, alloc + * DMA-safe */ + int out_ring_read_index; /* Output index "tail" */ + int tail; /* entinfo (s/w ring) tail index */ + void *outring; /* Base of output ring, DMA-safe */ + struct crypto_engine *engine; +}; + +/* + * Driver-private storage for a single CAAM block instance + */ +struct caam_drv_private { + /* Physical-presence section */ + struct caam_ctrl __iomem *ctrl; /* controller region */ + struct caam_deco __iomem *deco; /* DECO/CCB views */ + struct caam_assurance __iomem *assure; + struct caam_queue_if __iomem *qi; /* QI control region */ + struct caam_job_ring __iomem *jr[4]; /* JobR's register space */ + + struct iommu_domain *domain; + + /* + * Detected geometry block. Filled in from device tree if powerpc, + * or from register-based version detection code + */ + u8 total_jobrs; /* Total Job Rings in device */ + u8 qi_present; /* Nonzero if QI present in device */ + u8 mc_en; /* Nonzero if MC f/w is active */ + int secvio_irq; /* Security violation interrupt number */ + int virt_en; /* Virtualization enabled in CAAM */ + int era; /* CAAM Era (internal HW revision) */ + +#define RNG4_MAX_HANDLES 2 + /* RNG4 block */ + u32 rng4_sh_init; /* This bitmap shows which of the State + Handles of the RNG4 block are initialized + by this driver */ + + struct clk_bulk_data *clks; + int num_clks; + /* + * debugfs entries for developer view into driver/device + * variables at runtime. + */ +#ifdef CONFIG_DEBUG_FS + struct dentry *ctl; /* controller dir */ + struct debugfs_blob_wrapper ctl_kek_wrap, ctl_tkek_wrap, ctl_tdsk_wrap; +#endif +}; + +#ifdef CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API + +int caam_algapi_init(struct device *dev); +void caam_algapi_exit(void); + +#else + +static inline int caam_algapi_init(struct device *dev) +{ + return 0; +} + +static inline void caam_algapi_exit(void) +{ +} + +#endif /* CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API */ + +#ifdef CONFIG_CRYPTO_DEV_FSL_CAAM_AHASH_API + +int caam_algapi_hash_init(struct device *dev); +void caam_algapi_hash_exit(void); + +#else + +static inline int caam_algapi_hash_init(struct device *dev) +{ + return 0; +} + +static inline void caam_algapi_hash_exit(void) +{ +} + +#endif /* CONFIG_CRYPTO_DEV_FSL_CAAM_AHASH_API */ + +#ifdef CONFIG_CRYPTO_DEV_FSL_CAAM_PKC_API + +int caam_pkc_init(struct device *dev); +void caam_pkc_exit(void); + +#else + +static inline int caam_pkc_init(struct device *dev) +{ + return 0; +} + +static inline void caam_pkc_exit(void) +{ +} + +#endif /* CONFIG_CRYPTO_DEV_FSL_CAAM_PKC_API */ + +#ifdef CONFIG_CRYPTO_DEV_FSL_CAAM_RNG_API + +int caam_rng_init(struct device *dev); +void caam_rng_exit(struct device *dev); + +#else + +static inline int caam_rng_init(struct device *dev) +{ + return 0; +} + +static inline void caam_rng_exit(struct device *dev) {} + +#endif /* CONFIG_CRYPTO_DEV_FSL_CAAM_RNG_API */ + +#ifdef CONFIG_CAAM_QI + +int caam_qi_algapi_init(struct device *dev); +void caam_qi_algapi_exit(void); + +#else + +static inline int caam_qi_algapi_init(struct device *dev) +{ + return 0; +} + +static inline void caam_qi_algapi_exit(void) +{ +} + +#endif /* CONFIG_CAAM_QI */ + +static inline u64 caam_get_dma_mask(struct device *dev) +{ + struct device_node *nprop = dev->of_node; + + if (caam_ptr_sz != sizeof(u64)) + return DMA_BIT_MASK(32); + + if (caam_dpaa2) + return DMA_BIT_MASK(49); + + if (of_device_is_compatible(nprop, "fsl,sec-v5.0-job-ring") || + of_device_is_compatible(nprop, "fsl,sec-v5.0")) + return DMA_BIT_MASK(40); + + return DMA_BIT_MASK(36); +} + + +#endif /* INTERN_H */ diff --git a/drivers/crypto/caam/jr.c b/drivers/crypto/caam/jr.c new file mode 100644 index 000000000..6f669966b --- /dev/null +++ b/drivers/crypto/caam/jr.c @@ -0,0 +1,637 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * CAAM/SEC 4.x transport/backend driver + * JobR backend functionality + * + * Copyright 2008-2012 Freescale Semiconductor, Inc. + * Copyright 2019 NXP + */ + +#include <linux/of_irq.h> +#include <linux/of_address.h> + +#include "compat.h" +#include "ctrl.h" +#include "regs.h" +#include "jr.h" +#include "desc.h" +#include "intern.h" + +struct jr_driver_data { + /* List of Physical JobR's with the Driver */ + struct list_head jr_list; + spinlock_t jr_alloc_lock; /* jr_list lock */ +} ____cacheline_aligned; + +static struct jr_driver_data driver_data; +static DEFINE_MUTEX(algs_lock); +static unsigned int active_devs; + +static void register_algs(struct caam_drv_private_jr *jrpriv, + struct device *dev) +{ + mutex_lock(&algs_lock); + + if (++active_devs != 1) + goto algs_unlock; + + caam_algapi_init(dev); + caam_algapi_hash_init(dev); + caam_pkc_init(dev); + jrpriv->hwrng = !caam_rng_init(dev); + caam_qi_algapi_init(dev); + +algs_unlock: + mutex_unlock(&algs_lock); +} + +static void unregister_algs(void) +{ + mutex_lock(&algs_lock); + + if (--active_devs != 0) + goto algs_unlock; + + caam_qi_algapi_exit(); + + caam_pkc_exit(); + caam_algapi_hash_exit(); + caam_algapi_exit(); + +algs_unlock: + mutex_unlock(&algs_lock); +} + +static void caam_jr_crypto_engine_exit(void *data) +{ + struct device *jrdev = data; + struct caam_drv_private_jr *jrpriv = dev_get_drvdata(jrdev); + + /* Free the resources of crypto-engine */ + crypto_engine_exit(jrpriv->engine); +} + +static int caam_reset_hw_jr(struct device *dev) +{ + struct caam_drv_private_jr *jrp = dev_get_drvdata(dev); + unsigned int timeout = 100000; + + /* + * mask interrupts since we are going to poll + * for reset completion status + */ + clrsetbits_32(&jrp->rregs->rconfig_lo, 0, JRCFG_IMSK); + + /* initiate flush (required prior to reset) */ + wr_reg32(&jrp->rregs->jrcommand, JRCR_RESET); + while (((rd_reg32(&jrp->rregs->jrintstatus) & JRINT_ERR_HALT_MASK) == + JRINT_ERR_HALT_INPROGRESS) && --timeout) + cpu_relax(); + + if ((rd_reg32(&jrp->rregs->jrintstatus) & JRINT_ERR_HALT_MASK) != + JRINT_ERR_HALT_COMPLETE || timeout == 0) { + dev_err(dev, "failed to flush job ring %d\n", jrp->ridx); + return -EIO; + } + + /* initiate reset */ + timeout = 100000; + wr_reg32(&jrp->rregs->jrcommand, JRCR_RESET); + while ((rd_reg32(&jrp->rregs->jrcommand) & JRCR_RESET) && --timeout) + cpu_relax(); + + if (timeout == 0) { + dev_err(dev, "failed to reset job ring %d\n", jrp->ridx); + return -EIO; + } + + /* unmask interrupts */ + clrsetbits_32(&jrp->rregs->rconfig_lo, JRCFG_IMSK, 0); + + return 0; +} + +/* + * Shutdown JobR independent of platform property code + */ +static int caam_jr_shutdown(struct device *dev) +{ + struct caam_drv_private_jr *jrp = dev_get_drvdata(dev); + int ret; + + ret = caam_reset_hw_jr(dev); + + tasklet_kill(&jrp->irqtask); + + return ret; +} + +static int caam_jr_remove(struct platform_device *pdev) +{ + int ret; + struct device *jrdev; + struct caam_drv_private_jr *jrpriv; + + jrdev = &pdev->dev; + jrpriv = dev_get_drvdata(jrdev); + + if (jrpriv->hwrng) + caam_rng_exit(jrdev->parent); + + /* + * Return EBUSY if job ring already allocated. + */ + if (atomic_read(&jrpriv->tfm_count)) { + dev_err(jrdev, "Device is busy\n"); + return -EBUSY; + } + + /* Unregister JR-based RNG & crypto algorithms */ + unregister_algs(); + + /* Remove the node from Physical JobR list maintained by driver */ + spin_lock(&driver_data.jr_alloc_lock); + list_del(&jrpriv->list_node); + spin_unlock(&driver_data.jr_alloc_lock); + + /* Release ring */ + ret = caam_jr_shutdown(jrdev); + if (ret) + dev_err(jrdev, "Failed to shut down job ring\n"); + + return ret; +} + +/* Main per-ring interrupt handler */ +static irqreturn_t caam_jr_interrupt(int irq, void *st_dev) +{ + struct device *dev = st_dev; + struct caam_drv_private_jr *jrp = dev_get_drvdata(dev); + u32 irqstate; + + /* + * Check the output ring for ready responses, kick + * tasklet if jobs done. + */ + irqstate = rd_reg32(&jrp->rregs->jrintstatus); + if (!irqstate) + return IRQ_NONE; + + /* + * If JobR error, we got more development work to do + * Flag a bug now, but we really need to shut down and + * restart the queue (and fix code). + */ + if (irqstate & JRINT_JR_ERROR) { + dev_err(dev, "job ring error: irqstate: %08x\n", irqstate); + BUG(); + } + + /* mask valid interrupts */ + clrsetbits_32(&jrp->rregs->rconfig_lo, 0, JRCFG_IMSK); + + /* Have valid interrupt at this point, just ACK and trigger */ + wr_reg32(&jrp->rregs->jrintstatus, irqstate); + + preempt_disable(); + tasklet_schedule(&jrp->irqtask); + preempt_enable(); + + return IRQ_HANDLED; +} + +/* Deferred service handler, run as interrupt-fired tasklet */ +static void caam_jr_dequeue(unsigned long devarg) +{ + int hw_idx, sw_idx, i, head, tail; + struct device *dev = (struct device *)devarg; + struct caam_drv_private_jr *jrp = dev_get_drvdata(dev); + void (*usercall)(struct device *dev, u32 *desc, u32 status, void *arg); + u32 *userdesc, userstatus; + void *userarg; + u32 outring_used = 0; + + while (outring_used || + (outring_used = rd_reg32(&jrp->rregs->outring_used))) { + + head = READ_ONCE(jrp->head); + + sw_idx = tail = jrp->tail; + hw_idx = jrp->out_ring_read_index; + + for (i = 0; CIRC_CNT(head, tail + i, JOBR_DEPTH) >= 1; i++) { + sw_idx = (tail + i) & (JOBR_DEPTH - 1); + + if (jr_outentry_desc(jrp->outring, hw_idx) == + caam_dma_to_cpu(jrp->entinfo[sw_idx].desc_addr_dma)) + break; /* found */ + } + /* we should never fail to find a matching descriptor */ + BUG_ON(CIRC_CNT(head, tail + i, JOBR_DEPTH) <= 0); + + /* Unmap just-run descriptor so we can post-process */ + dma_unmap_single(dev, + caam_dma_to_cpu(jr_outentry_desc(jrp->outring, + hw_idx)), + jrp->entinfo[sw_idx].desc_size, + DMA_TO_DEVICE); + + /* mark completed, avoid matching on a recycled desc addr */ + jrp->entinfo[sw_idx].desc_addr_dma = 0; + + /* Stash callback params */ + usercall = jrp->entinfo[sw_idx].callbk; + userarg = jrp->entinfo[sw_idx].cbkarg; + userdesc = jrp->entinfo[sw_idx].desc_addr_virt; + userstatus = caam32_to_cpu(jr_outentry_jrstatus(jrp->outring, + hw_idx)); + + /* + * Make sure all information from the job has been obtained + * before telling CAAM that the job has been removed from the + * output ring. + */ + mb(); + + /* set done */ + wr_reg32(&jrp->rregs->outring_rmvd, 1); + + jrp->out_ring_read_index = (jrp->out_ring_read_index + 1) & + (JOBR_DEPTH - 1); + + /* + * if this job completed out-of-order, do not increment + * the tail. Otherwise, increment tail by 1 plus the + * number of subsequent jobs already completed out-of-order + */ + if (sw_idx == tail) { + do { + tail = (tail + 1) & (JOBR_DEPTH - 1); + } while (CIRC_CNT(head, tail, JOBR_DEPTH) >= 1 && + jrp->entinfo[tail].desc_addr_dma == 0); + + jrp->tail = tail; + } + + /* Finally, execute user's callback */ + usercall(dev, userdesc, userstatus, userarg); + outring_used--; + } + + /* reenable / unmask IRQs */ + clrsetbits_32(&jrp->rregs->rconfig_lo, JRCFG_IMSK, 0); +} + +/** + * caam_jr_alloc() - Alloc a job ring for someone to use as needed. + * + * returns : pointer to the newly allocated physical + * JobR dev can be written to if successful. + **/ +struct device *caam_jr_alloc(void) +{ + struct caam_drv_private_jr *jrpriv, *min_jrpriv = NULL; + struct device *dev = ERR_PTR(-ENODEV); + int min_tfm_cnt = INT_MAX; + int tfm_cnt; + + spin_lock(&driver_data.jr_alloc_lock); + + if (list_empty(&driver_data.jr_list)) { + spin_unlock(&driver_data.jr_alloc_lock); + return ERR_PTR(-ENODEV); + } + + list_for_each_entry(jrpriv, &driver_data.jr_list, list_node) { + tfm_cnt = atomic_read(&jrpriv->tfm_count); + if (tfm_cnt < min_tfm_cnt) { + min_tfm_cnt = tfm_cnt; + min_jrpriv = jrpriv; + } + if (!min_tfm_cnt) + break; + } + + if (min_jrpriv) { + atomic_inc(&min_jrpriv->tfm_count); + dev = min_jrpriv->dev; + } + spin_unlock(&driver_data.jr_alloc_lock); + + return dev; +} +EXPORT_SYMBOL(caam_jr_alloc); + +/** + * caam_jr_free() - Free the Job Ring + * @rdev: points to the dev that identifies the Job ring to + * be released. + **/ +void caam_jr_free(struct device *rdev) +{ + struct caam_drv_private_jr *jrpriv = dev_get_drvdata(rdev); + + atomic_dec(&jrpriv->tfm_count); +} +EXPORT_SYMBOL(caam_jr_free); + +/** + * caam_jr_enqueue() - Enqueue a job descriptor head. Returns -EINPROGRESS + * if OK, -ENOSPC if the queue is full, -EIO if it cannot map the caller's + * descriptor. + * @dev: struct device of the job ring to be used + * @desc: points to a job descriptor that execute our request. All + * descriptors (and all referenced data) must be in a DMAable + * region, and all data references must be physical addresses + * accessible to CAAM (i.e. within a PAMU window granted + * to it). + * @cbk: pointer to a callback function to be invoked upon completion + * of this request. This has the form: + * callback(struct device *dev, u32 *desc, u32 stat, void *arg) + * where: + * dev: contains the job ring device that processed this + * response. + * desc: descriptor that initiated the request, same as + * "desc" being argued to caam_jr_enqueue(). + * status: untranslated status received from CAAM. See the + * reference manual for a detailed description of + * error meaning, or see the JRSTA definitions in the + * register header file + * areq: optional pointer to an argument passed with the + * original request + * @areq: optional pointer to a user argument for use at callback + * time. + **/ +int caam_jr_enqueue(struct device *dev, u32 *desc, + void (*cbk)(struct device *dev, u32 *desc, + u32 status, void *areq), + void *areq) +{ + struct caam_drv_private_jr *jrp = dev_get_drvdata(dev); + struct caam_jrentry_info *head_entry; + int head, tail, desc_size; + dma_addr_t desc_dma; + + desc_size = (caam32_to_cpu(*desc) & HDR_JD_LENGTH_MASK) * sizeof(u32); + desc_dma = dma_map_single(dev, desc, desc_size, DMA_TO_DEVICE); + if (dma_mapping_error(dev, desc_dma)) { + dev_err(dev, "caam_jr_enqueue(): can't map jobdesc\n"); + return -EIO; + } + + spin_lock_bh(&jrp->inplock); + + head = jrp->head; + tail = READ_ONCE(jrp->tail); + + if (!jrp->inpring_avail || + CIRC_SPACE(head, tail, JOBR_DEPTH) <= 0) { + spin_unlock_bh(&jrp->inplock); + dma_unmap_single(dev, desc_dma, desc_size, DMA_TO_DEVICE); + return -ENOSPC; + } + + head_entry = &jrp->entinfo[head]; + head_entry->desc_addr_virt = desc; + head_entry->desc_size = desc_size; + head_entry->callbk = (void *)cbk; + head_entry->cbkarg = areq; + head_entry->desc_addr_dma = desc_dma; + + jr_inpentry_set(jrp->inpring, head, cpu_to_caam_dma(desc_dma)); + + /* + * Guarantee that the descriptor's DMA address has been written to + * the next slot in the ring before the write index is updated, since + * other cores may update this index independently. + */ + smp_wmb(); + + jrp->head = (head + 1) & (JOBR_DEPTH - 1); + + /* + * Ensure that all job information has been written before + * notifying CAAM that a new job was added to the input ring + * using a memory barrier. The wr_reg32() uses api iowrite32() + * to do the register write. iowrite32() issues a memory barrier + * before the write operation. + */ + + wr_reg32(&jrp->rregs->inpring_jobadd, 1); + + jrp->inpring_avail--; + if (!jrp->inpring_avail) + jrp->inpring_avail = rd_reg32(&jrp->rregs->inpring_avail); + + spin_unlock_bh(&jrp->inplock); + + return -EINPROGRESS; +} +EXPORT_SYMBOL(caam_jr_enqueue); + +/* + * Init JobR independent of platform property detection + */ +static int caam_jr_init(struct device *dev) +{ + struct caam_drv_private_jr *jrp; + dma_addr_t inpbusaddr, outbusaddr; + int i, error; + + jrp = dev_get_drvdata(dev); + + error = caam_reset_hw_jr(dev); + if (error) + return error; + + jrp->inpring = dmam_alloc_coherent(dev, SIZEOF_JR_INPENTRY * + JOBR_DEPTH, &inpbusaddr, + GFP_KERNEL); + if (!jrp->inpring) + return -ENOMEM; + + jrp->outring = dmam_alloc_coherent(dev, SIZEOF_JR_OUTENTRY * + JOBR_DEPTH, &outbusaddr, + GFP_KERNEL); + if (!jrp->outring) + return -ENOMEM; + + jrp->entinfo = devm_kcalloc(dev, JOBR_DEPTH, sizeof(*jrp->entinfo), + GFP_KERNEL); + if (!jrp->entinfo) + return -ENOMEM; + + for (i = 0; i < JOBR_DEPTH; i++) + jrp->entinfo[i].desc_addr_dma = !0; + + /* Setup rings */ + jrp->out_ring_read_index = 0; + jrp->head = 0; + jrp->tail = 0; + + wr_reg64(&jrp->rregs->inpring_base, inpbusaddr); + wr_reg64(&jrp->rregs->outring_base, outbusaddr); + wr_reg32(&jrp->rregs->inpring_size, JOBR_DEPTH); + wr_reg32(&jrp->rregs->outring_size, JOBR_DEPTH); + + jrp->inpring_avail = JOBR_DEPTH; + + spin_lock_init(&jrp->inplock); + + /* Select interrupt coalescing parameters */ + clrsetbits_32(&jrp->rregs->rconfig_lo, 0, JOBR_INTC | + (JOBR_INTC_COUNT_THLD << JRCFG_ICDCT_SHIFT) | + (JOBR_INTC_TIME_THLD << JRCFG_ICTT_SHIFT)); + + tasklet_init(&jrp->irqtask, caam_jr_dequeue, (unsigned long)dev); + + /* Connect job ring interrupt handler. */ + error = devm_request_irq(dev, jrp->irq, caam_jr_interrupt, IRQF_SHARED, + dev_name(dev), dev); + if (error) { + dev_err(dev, "can't connect JobR %d interrupt (%d)\n", + jrp->ridx, jrp->irq); + tasklet_kill(&jrp->irqtask); + } + + return error; +} + +static void caam_jr_irq_dispose_mapping(void *data) +{ + irq_dispose_mapping((unsigned long)data); +} + +/* + * Probe routine for each detected JobR subsystem. + */ +static int caam_jr_probe(struct platform_device *pdev) +{ + struct device *jrdev; + struct device_node *nprop; + struct caam_job_ring __iomem *ctrl; + struct caam_drv_private_jr *jrpriv; + static int total_jobrs; + struct resource *r; + int error; + + jrdev = &pdev->dev; + jrpriv = devm_kzalloc(jrdev, sizeof(*jrpriv), GFP_KERNEL); + if (!jrpriv) + return -ENOMEM; + + dev_set_drvdata(jrdev, jrpriv); + + /* save ring identity relative to detection */ + jrpriv->ridx = total_jobrs++; + + nprop = pdev->dev.of_node; + /* Get configuration properties from device tree */ + /* First, get register page */ + r = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!r) { + dev_err(jrdev, "platform_get_resource() failed\n"); + return -ENOMEM; + } + + ctrl = devm_ioremap(jrdev, r->start, resource_size(r)); + if (!ctrl) { + dev_err(jrdev, "devm_ioremap() failed\n"); + return -ENOMEM; + } + + jrpriv->rregs = (struct caam_job_ring __iomem __force *)ctrl; + + error = dma_set_mask_and_coherent(jrdev, caam_get_dma_mask(jrdev)); + if (error) { + dev_err(jrdev, "dma_set_mask_and_coherent failed (%d)\n", + error); + return error; + } + + /* Initialize crypto engine */ + jrpriv->engine = crypto_engine_alloc_init(jrdev, false); + if (!jrpriv->engine) { + dev_err(jrdev, "Could not init crypto-engine\n"); + return -ENOMEM; + } + + error = devm_add_action_or_reset(jrdev, caam_jr_crypto_engine_exit, + jrdev); + if (error) + return error; + + /* Start crypto engine */ + error = crypto_engine_start(jrpriv->engine); + if (error) { + dev_err(jrdev, "Could not start crypto-engine\n"); + return error; + } + + /* Identify the interrupt */ + jrpriv->irq = irq_of_parse_and_map(nprop, 0); + if (!jrpriv->irq) { + dev_err(jrdev, "irq_of_parse_and_map failed\n"); + return -EINVAL; + } + + error = devm_add_action_or_reset(jrdev, caam_jr_irq_dispose_mapping, + (void *)(unsigned long)jrpriv->irq); + if (error) + return error; + + /* Now do the platform independent part */ + error = caam_jr_init(jrdev); /* now turn on hardware */ + if (error) + return error; + + jrpriv->dev = jrdev; + spin_lock(&driver_data.jr_alloc_lock); + list_add_tail(&jrpriv->list_node, &driver_data.jr_list); + spin_unlock(&driver_data.jr_alloc_lock); + + atomic_set(&jrpriv->tfm_count, 0); + + register_algs(jrpriv, jrdev->parent); + + return 0; +} + +static const struct of_device_id caam_jr_match[] = { + { + .compatible = "fsl,sec-v4.0-job-ring", + }, + { + .compatible = "fsl,sec4.0-job-ring", + }, + {}, +}; +MODULE_DEVICE_TABLE(of, caam_jr_match); + +static struct platform_driver caam_jr_driver = { + .driver = { + .name = "caam_jr", + .of_match_table = caam_jr_match, + }, + .probe = caam_jr_probe, + .remove = caam_jr_remove, +}; + +static int __init jr_driver_init(void) +{ + spin_lock_init(&driver_data.jr_alloc_lock); + INIT_LIST_HEAD(&driver_data.jr_list); + return platform_driver_register(&caam_jr_driver); +} + +static void __exit jr_driver_exit(void) +{ + platform_driver_unregister(&caam_jr_driver); +} + +module_init(jr_driver_init); +module_exit(jr_driver_exit); + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("FSL CAAM JR request backend"); +MODULE_AUTHOR("Freescale Semiconductor - NMG/STC"); diff --git a/drivers/crypto/caam/jr.h b/drivers/crypto/caam/jr.h new file mode 100644 index 000000000..eab611530 --- /dev/null +++ b/drivers/crypto/caam/jr.h @@ -0,0 +1,19 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * CAAM public-level include definitions for the JobR backend + * + * Copyright 2008-2011 Freescale Semiconductor, Inc. + */ + +#ifndef JR_H +#define JR_H + +/* Prototypes for backend-level services exposed to APIs */ +struct device *caam_jr_alloc(void); +void caam_jr_free(struct device *rdev); +int caam_jr_enqueue(struct device *dev, u32 *desc, + void (*cbk)(struct device *dev, u32 *desc, u32 status, + void *areq), + void *areq); + +#endif /* JR_H */ diff --git a/drivers/crypto/caam/key_gen.c b/drivers/crypto/caam/key_gen.c new file mode 100644 index 000000000..b0e8a4939 --- /dev/null +++ b/drivers/crypto/caam/key_gen.c @@ -0,0 +1,126 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * CAAM/SEC 4.x functions for handling key-generation jobs + * + * Copyright 2008-2011 Freescale Semiconductor, Inc. + * + */ +#include "compat.h" +#include "jr.h" +#include "error.h" +#include "desc_constr.h" +#include "key_gen.h" + +void split_key_done(struct device *dev, u32 *desc, u32 err, + void *context) +{ + struct split_key_result *res = context; + int ecode = 0; + + dev_dbg(dev, "%s %d: err 0x%x\n", __func__, __LINE__, err); + + if (err) + ecode = caam_jr_strstatus(dev, err); + + res->err = ecode; + + complete(&res->completion); +} +EXPORT_SYMBOL(split_key_done); +/* +get a split ipad/opad key + +Split key generation----------------------------------------------- + +[00] 0xb0810008 jobdesc: stidx=1 share=never len=8 +[01] 0x04000014 key: class2->keyreg len=20 + @0xffe01000 +[03] 0x84410014 operation: cls2-op sha1 hmac init dec +[04] 0x24940000 fifold: class2 msgdata-last2 len=0 imm +[05] 0xa4000001 jump: class2 local all ->1 [06] +[06] 0x64260028 fifostr: class2 mdsplit-jdk len=40 + @0xffe04000 +*/ +int gen_split_key(struct device *jrdev, u8 *key_out, + struct alginfo * const adata, const u8 *key_in, u32 keylen, + int max_keylen) +{ + u32 *desc; + struct split_key_result result; + dma_addr_t dma_addr; + unsigned int local_max; + int ret = -ENOMEM; + + adata->keylen = split_key_len(adata->algtype & OP_ALG_ALGSEL_MASK); + adata->keylen_pad = split_key_pad_len(adata->algtype & + OP_ALG_ALGSEL_MASK); + local_max = max(keylen, adata->keylen_pad); + + dev_dbg(jrdev, "split keylen %d split keylen padded %d\n", + adata->keylen, adata->keylen_pad); + print_hex_dump_debug("ctx.key@" __stringify(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, key_in, keylen, 1); + + if (local_max > max_keylen) + return -EINVAL; + + desc = kmalloc(CAAM_CMD_SZ * 6 + CAAM_PTR_SZ * 2, GFP_KERNEL | GFP_DMA); + if (!desc) { + dev_err(jrdev, "unable to allocate key input memory\n"); + return ret; + } + + memcpy(key_out, key_in, keylen); + + dma_addr = dma_map_single(jrdev, key_out, local_max, DMA_BIDIRECTIONAL); + if (dma_mapping_error(jrdev, dma_addr)) { + dev_err(jrdev, "unable to map key memory\n"); + goto out_free; + } + + init_job_desc(desc, 0); + append_key(desc, dma_addr, keylen, CLASS_2 | KEY_DEST_CLASS_REG); + + /* Sets MDHA up into an HMAC-INIT */ + append_operation(desc, (adata->algtype & OP_ALG_ALGSEL_MASK) | + OP_ALG_AAI_HMAC | OP_TYPE_CLASS2_ALG | OP_ALG_DECRYPT | + OP_ALG_AS_INIT); + + /* + * do a FIFO_LOAD of zero, this will trigger the internal key expansion + * into both pads inside MDHA + */ + append_fifo_load_as_imm(desc, NULL, 0, LDST_CLASS_2_CCB | + FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST2); + + /* + * FIFO_STORE with the explicit split-key content store + * (0x26 output type) + */ + append_fifo_store(desc, dma_addr, adata->keylen, + LDST_CLASS_2_CCB | FIFOST_TYPE_SPLIT_KEK); + + print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), + 1); + + result.err = 0; + init_completion(&result.completion); + + ret = caam_jr_enqueue(jrdev, desc, split_key_done, &result); + if (ret == -EINPROGRESS) { + /* in progress */ + wait_for_completion(&result.completion); + ret = result.err; + + print_hex_dump_debug("ctx.key@"__stringify(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, key_out, + adata->keylen_pad, 1); + } + + dma_unmap_single(jrdev, dma_addr, local_max, DMA_BIDIRECTIONAL); +out_free: + kfree(desc); + return ret; +} +EXPORT_SYMBOL(gen_split_key); diff --git a/drivers/crypto/caam/key_gen.h b/drivers/crypto/caam/key_gen.h new file mode 100644 index 000000000..818f78f6f --- /dev/null +++ b/drivers/crypto/caam/key_gen.h @@ -0,0 +1,48 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * CAAM/SEC 4.x definitions for handling key-generation jobs + * + * Copyright 2008-2011 Freescale Semiconductor, Inc. + * + */ + +/** + * split_key_len - Compute MDHA split key length for a given algorithm + * @hash: Hashing algorithm selection, one of OP_ALG_ALGSEL_* - MD5, SHA1, + * SHA224, SHA384, SHA512. + * + * Return: MDHA split key length + */ +static inline u32 split_key_len(u32 hash) +{ + /* Sizes for MDHA pads (*not* keys): MD5, SHA1, 224, 256, 384, 512 */ + static const u8 mdpadlen[] = { 16, 20, 32, 32, 64, 64 }; + u32 idx; + + idx = (hash & OP_ALG_ALGSEL_SUBMASK) >> OP_ALG_ALGSEL_SHIFT; + + return (u32)(mdpadlen[idx] * 2); +} + +/** + * split_key_pad_len - Compute MDHA split key pad length for a given algorithm + * @hash: Hashing algorithm selection, one of OP_ALG_ALGSEL_* - MD5, SHA1, + * SHA224, SHA384, SHA512. + * + * Return: MDHA split key pad length + */ +static inline u32 split_key_pad_len(u32 hash) +{ + return ALIGN(split_key_len(hash), 16); +} + +struct split_key_result { + struct completion completion; + int err; +}; + +void split_key_done(struct device *dev, u32 *desc, u32 err, void *context); + +int gen_split_key(struct device *jrdev, u8 *key_out, + struct alginfo * const adata, const u8 *key_in, u32 keylen, + int max_keylen); diff --git a/drivers/crypto/caam/pdb.h b/drivers/crypto/caam/pdb.h new file mode 100644 index 000000000..8ccc22075 --- /dev/null +++ b/drivers/crypto/caam/pdb.h @@ -0,0 +1,599 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * CAAM Protocol Data Block (PDB) definition header file + * + * Copyright 2008-2016 Freescale Semiconductor, Inc. + * + */ + +#ifndef CAAM_PDB_H +#define CAAM_PDB_H +#include "compat.h" + +/* + * PDB- IPSec ESP Header Modification Options + */ +#define PDBHMO_ESP_DECAP_SHIFT 28 +#define PDBHMO_ESP_ENCAP_SHIFT 28 +/* + * Encap and Decap - Decrement TTL (Hop Limit) - Based on the value of the + * Options Byte IP version (IPvsn) field: + * if IPv4, decrement the inner IP header TTL field (byte 8); + * if IPv6 decrement the inner IP header Hop Limit field (byte 7). +*/ +#define PDBHMO_ESP_DECAP_DEC_TTL (0x02 << PDBHMO_ESP_DECAP_SHIFT) +#define PDBHMO_ESP_ENCAP_DEC_TTL (0x02 << PDBHMO_ESP_ENCAP_SHIFT) +/* + * Decap - DiffServ Copy - Copy the IPv4 TOS or IPv6 Traffic Class byte + * from the outer IP header to the inner IP header. + */ +#define PDBHMO_ESP_DIFFSERV (0x01 << PDBHMO_ESP_DECAP_SHIFT) +/* + * Encap- Copy DF bit -if an IPv4 tunnel mode outer IP header is coming from + * the PDB, copy the DF bit from the inner IP header to the outer IP header. + */ +#define PDBHMO_ESP_DFBIT (0x04 << PDBHMO_ESP_ENCAP_SHIFT) + +#define PDBNH_ESP_ENCAP_SHIFT 16 +#define PDBNH_ESP_ENCAP_MASK (0xff << PDBNH_ESP_ENCAP_SHIFT) + +#define PDBHDRLEN_ESP_DECAP_SHIFT 16 +#define PDBHDRLEN_MASK (0x0fff << PDBHDRLEN_ESP_DECAP_SHIFT) + +#define PDB_NH_OFFSET_SHIFT 8 +#define PDB_NH_OFFSET_MASK (0xff << PDB_NH_OFFSET_SHIFT) + +/* + * PDB - IPSec ESP Encap/Decap Options + */ +#define PDBOPTS_ESP_ARSNONE 0x00 /* no antireplay window */ +#define PDBOPTS_ESP_ARS32 0x40 /* 32-entry antireplay window */ +#define PDBOPTS_ESP_ARS128 0x80 /* 128-entry antireplay window */ +#define PDBOPTS_ESP_ARS64 0xc0 /* 64-entry antireplay window */ +#define PDBOPTS_ESP_ARS_MASK 0xc0 /* antireplay window mask */ +#define PDBOPTS_ESP_IVSRC 0x20 /* IV comes from internal random gen */ +#define PDBOPTS_ESP_ESN 0x10 /* extended sequence included */ +#define PDBOPTS_ESP_OUTFMT 0x08 /* output only decapsulation (decap) */ +#define PDBOPTS_ESP_IPHDRSRC 0x08 /* IP header comes from PDB (encap) */ +#define PDBOPTS_ESP_INCIPHDR 0x04 /* Prepend IP header to output frame */ +#define PDBOPTS_ESP_IPVSN 0x02 /* process IPv6 header */ +#define PDBOPTS_ESP_AOFL 0x04 /* adjust out frame len (decap, SEC>=5.3)*/ +#define PDBOPTS_ESP_TUNNEL 0x01 /* tunnel mode next-header byte */ +#define PDBOPTS_ESP_IPV6 0x02 /* ip header version is V6 */ +#define PDBOPTS_ESP_DIFFSERV 0x40 /* copy TOS/TC from inner iphdr */ +#define PDBOPTS_ESP_UPDATE_CSUM 0x80 /* encap-update ip header checksum */ +#define PDBOPTS_ESP_VERIFY_CSUM 0x20 /* decap-validate ip header checksum */ + +/* + * General IPSec encap/decap PDB definitions + */ + +/** + * ipsec_encap_cbc - PDB part for IPsec CBC encapsulation + * @iv: 16-byte array initialization vector + */ +struct ipsec_encap_cbc { + u8 iv[16]; +}; + +/** + * ipsec_encap_ctr - PDB part for IPsec CTR encapsulation + * @ctr_nonce: 4-byte array nonce + * @ctr_initial: initial count constant + * @iv: initialization vector + */ +struct ipsec_encap_ctr { + u8 ctr_nonce[4]; + u32 ctr_initial; + u64 iv; +}; + +/** + * ipsec_encap_ccm - PDB part for IPsec CCM encapsulation + * @salt: 3-byte array salt (lower 24 bits) + * @ccm_opt: CCM algorithm options - MSB-LSB description: + * b0_flags (8b) - CCM B0; use 0x5B for 8-byte ICV, 0x6B for 12-byte ICV, + * 0x7B for 16-byte ICV (cf. RFC4309, RFC3610) + * ctr_flags (8b) - counter flags; constant equal to 0x3 + * ctr_initial (16b) - initial count constant + * @iv: initialization vector + */ +struct ipsec_encap_ccm { + u8 salt[4]; + u32 ccm_opt; + u64 iv; +}; + +/** + * ipsec_encap_gcm - PDB part for IPsec GCM encapsulation + * @salt: 3-byte array salt (lower 24 bits) + * @rsvd: reserved, do not use + * @iv: initialization vector + */ +struct ipsec_encap_gcm { + u8 salt[4]; + u32 rsvd1; + u64 iv; +}; + +/** + * ipsec_encap_pdb - PDB for IPsec encapsulation + * @options: MSB-LSB description + * hmo (header manipulation options) - 4b + * reserved - 4b + * next header - 8b + * next header offset - 8b + * option flags (depend on selected algorithm) - 8b + * @seq_num_ext_hi: (optional) IPsec Extended Sequence Number (ESN) + * @seq_num: IPsec sequence number + * @spi: IPsec SPI (Security Parameters Index) + * @ip_hdr_len: optional IP Header length (in bytes) + * reserved - 16b + * Opt. IP Hdr Len - 16b + * @ip_hdr: optional IP Header content + */ +struct ipsec_encap_pdb { + u32 options; + u32 seq_num_ext_hi; + u32 seq_num; + union { + struct ipsec_encap_cbc cbc; + struct ipsec_encap_ctr ctr; + struct ipsec_encap_ccm ccm; + struct ipsec_encap_gcm gcm; + }; + u32 spi; + u32 ip_hdr_len; + u32 ip_hdr[0]; +}; + +/** + * ipsec_decap_cbc - PDB part for IPsec CBC decapsulation + * @rsvd: reserved, do not use + */ +struct ipsec_decap_cbc { + u32 rsvd[2]; +}; + +/** + * ipsec_decap_ctr - PDB part for IPsec CTR decapsulation + * @ctr_nonce: 4-byte array nonce + * @ctr_initial: initial count constant + */ +struct ipsec_decap_ctr { + u8 ctr_nonce[4]; + u32 ctr_initial; +}; + +/** + * ipsec_decap_ccm - PDB part for IPsec CCM decapsulation + * @salt: 3-byte salt (lower 24 bits) + * @ccm_opt: CCM algorithm options - MSB-LSB description: + * b0_flags (8b) - CCM B0; use 0x5B for 8-byte ICV, 0x6B for 12-byte ICV, + * 0x7B for 16-byte ICV (cf. RFC4309, RFC3610) + * ctr_flags (8b) - counter flags; constant equal to 0x3 + * ctr_initial (16b) - initial count constant + */ +struct ipsec_decap_ccm { + u8 salt[4]; + u32 ccm_opt; +}; + +/** + * ipsec_decap_gcm - PDB part for IPsec GCN decapsulation + * @salt: 4-byte salt + * @rsvd: reserved, do not use + */ +struct ipsec_decap_gcm { + u8 salt[4]; + u32 resvd; +}; + +/** + * ipsec_decap_pdb - PDB for IPsec decapsulation + * @options: MSB-LSB description + * hmo (header manipulation options) - 4b + * IP header length - 12b + * next header offset - 8b + * option flags (depend on selected algorithm) - 8b + * @seq_num_ext_hi: (optional) IPsec Extended Sequence Number (ESN) + * @seq_num: IPsec sequence number + * @anti_replay: Anti-replay window; size depends on ARS (option flags) + */ +struct ipsec_decap_pdb { + u32 options; + union { + struct ipsec_decap_cbc cbc; + struct ipsec_decap_ctr ctr; + struct ipsec_decap_ccm ccm; + struct ipsec_decap_gcm gcm; + }; + u32 seq_num_ext_hi; + u32 seq_num; + __be32 anti_replay[4]; +}; + +/* + * IPSec ESP Datapath Protocol Override Register (DPOVRD) + */ +struct ipsec_deco_dpovrd { +#define IPSEC_ENCAP_DECO_DPOVRD_USE 0x80 + u8 ovrd_ecn; + u8 ip_hdr_len; + u8 nh_offset; + u8 next_header; /* reserved if decap */ +}; + +/* + * IEEE 802.11i WiFi Protocol Data Block + */ +#define WIFI_PDBOPTS_FCS 0x01 +#define WIFI_PDBOPTS_AR 0x40 + +struct wifi_encap_pdb { + u16 mac_hdr_len; + u8 rsvd; + u8 options; + u8 iv_flags; + u8 pri; + u16 pn1; + u32 pn2; + u16 frm_ctrl_mask; + u16 seq_ctrl_mask; + u8 rsvd1[2]; + u8 cnst; + u8 key_id; + u8 ctr_flags; + u8 rsvd2; + u16 ctr_init; +}; + +struct wifi_decap_pdb { + u16 mac_hdr_len; + u8 rsvd; + u8 options; + u8 iv_flags; + u8 pri; + u16 pn1; + u32 pn2; + u16 frm_ctrl_mask; + u16 seq_ctrl_mask; + u8 rsvd1[4]; + u8 ctr_flags; + u8 rsvd2; + u16 ctr_init; +}; + +/* + * IEEE 802.16 WiMAX Protocol Data Block + */ +#define WIMAX_PDBOPTS_FCS 0x01 +#define WIMAX_PDBOPTS_AR 0x40 /* decap only */ + +struct wimax_encap_pdb { + u8 rsvd[3]; + u8 options; + u32 nonce; + u8 b0_flags; + u8 ctr_flags; + u16 ctr_init; + /* begin DECO writeback region */ + u32 pn; + /* end DECO writeback region */ +}; + +struct wimax_decap_pdb { + u8 rsvd[3]; + u8 options; + u32 nonce; + u8 iv_flags; + u8 ctr_flags; + u16 ctr_init; + /* begin DECO writeback region */ + u32 pn; + u8 rsvd1[2]; + u16 antireplay_len; + u64 antireplay_scorecard; + /* end DECO writeback region */ +}; + +/* + * IEEE 801.AE MacSEC Protocol Data Block + */ +#define MACSEC_PDBOPTS_FCS 0x01 +#define MACSEC_PDBOPTS_AR 0x40 /* used in decap only */ + +struct macsec_encap_pdb { + u16 aad_len; + u8 rsvd; + u8 options; + u64 sci; + u16 ethertype; + u8 tci_an; + u8 rsvd1; + /* begin DECO writeback region */ + u32 pn; + /* end DECO writeback region */ +}; + +struct macsec_decap_pdb { + u16 aad_len; + u8 rsvd; + u8 options; + u64 sci; + u8 rsvd1[3]; + /* begin DECO writeback region */ + u8 antireplay_len; + u32 pn; + u64 antireplay_scorecard; + /* end DECO writeback region */ +}; + +/* + * SSL/TLS/DTLS Protocol Data Blocks + */ + +#define TLS_PDBOPTS_ARS32 0x40 +#define TLS_PDBOPTS_ARS64 0xc0 +#define TLS_PDBOPTS_OUTFMT 0x08 +#define TLS_PDBOPTS_IV_WRTBK 0x02 /* 1.1/1.2/DTLS only */ +#define TLS_PDBOPTS_EXP_RND_IV 0x01 /* 1.1/1.2/DTLS only */ + +struct tls_block_encap_pdb { + u8 type; + u8 version[2]; + u8 options; + u64 seq_num; + u32 iv[4]; +}; + +struct tls_stream_encap_pdb { + u8 type; + u8 version[2]; + u8 options; + u64 seq_num; + u8 i; + u8 j; + u8 rsvd1[2]; +}; + +struct dtls_block_encap_pdb { + u8 type; + u8 version[2]; + u8 options; + u16 epoch; + u16 seq_num[3]; + u32 iv[4]; +}; + +struct tls_block_decap_pdb { + u8 rsvd[3]; + u8 options; + u64 seq_num; + u32 iv[4]; +}; + +struct tls_stream_decap_pdb { + u8 rsvd[3]; + u8 options; + u64 seq_num; + u8 i; + u8 j; + u8 rsvd1[2]; +}; + +struct dtls_block_decap_pdb { + u8 rsvd[3]; + u8 options; + u16 epoch; + u16 seq_num[3]; + u32 iv[4]; + u64 antireplay_scorecard; +}; + +/* + * SRTP Protocol Data Blocks + */ +#define SRTP_PDBOPTS_MKI 0x08 +#define SRTP_PDBOPTS_AR 0x40 + +struct srtp_encap_pdb { + u8 x_len; + u8 mki_len; + u8 n_tag; + u8 options; + u32 cnst0; + u8 rsvd[2]; + u16 cnst1; + u16 salt[7]; + u16 cnst2; + u32 rsvd1; + u32 roc; + u32 opt_mki; +}; + +struct srtp_decap_pdb { + u8 x_len; + u8 mki_len; + u8 n_tag; + u8 options; + u32 cnst0; + u8 rsvd[2]; + u16 cnst1; + u16 salt[7]; + u16 cnst2; + u16 rsvd1; + u16 seq_num; + u32 roc; + u64 antireplay_scorecard; +}; + +/* + * DSA/ECDSA Protocol Data Blocks + * Two of these exist: DSA-SIGN, and DSA-VERIFY. They are similar + * except for the treatment of "w" for verify, "s" for sign, + * and the placement of "a,b". + */ +#define DSA_PDB_SGF_SHIFT 24 +#define DSA_PDB_SGF_MASK (0xff << DSA_PDB_SGF_SHIFT) +#define DSA_PDB_SGF_Q (0x80 << DSA_PDB_SGF_SHIFT) +#define DSA_PDB_SGF_R (0x40 << DSA_PDB_SGF_SHIFT) +#define DSA_PDB_SGF_G (0x20 << DSA_PDB_SGF_SHIFT) +#define DSA_PDB_SGF_W (0x10 << DSA_PDB_SGF_SHIFT) +#define DSA_PDB_SGF_S (0x10 << DSA_PDB_SGF_SHIFT) +#define DSA_PDB_SGF_F (0x08 << DSA_PDB_SGF_SHIFT) +#define DSA_PDB_SGF_C (0x04 << DSA_PDB_SGF_SHIFT) +#define DSA_PDB_SGF_D (0x02 << DSA_PDB_SGF_SHIFT) +#define DSA_PDB_SGF_AB_SIGN (0x02 << DSA_PDB_SGF_SHIFT) +#define DSA_PDB_SGF_AB_VERIFY (0x01 << DSA_PDB_SGF_SHIFT) + +#define DSA_PDB_L_SHIFT 7 +#define DSA_PDB_L_MASK (0x3ff << DSA_PDB_L_SHIFT) + +#define DSA_PDB_N_MASK 0x7f + +struct dsa_sign_pdb { + u32 sgf_ln; /* Use DSA_PDB_ definitions per above */ + u8 *q; + u8 *r; + u8 *g; /* or Gx,y */ + u8 *s; + u8 *f; + u8 *c; + u8 *d; + u8 *ab; /* ECC only */ + u8 *u; +}; + +struct dsa_verify_pdb { + u32 sgf_ln; + u8 *q; + u8 *r; + u8 *g; /* or Gx,y */ + u8 *w; /* or Wx,y */ + u8 *f; + u8 *c; + u8 *d; + u8 *tmp; /* temporary data block */ + u8 *ab; /* only used if ECC processing */ +}; + +/* RSA Protocol Data Block */ +#define RSA_PDB_SGF_SHIFT 28 +#define RSA_PDB_E_SHIFT 12 +#define RSA_PDB_E_MASK (0xFFF << RSA_PDB_E_SHIFT) +#define RSA_PDB_D_SHIFT 12 +#define RSA_PDB_D_MASK (0xFFF << RSA_PDB_D_SHIFT) +#define RSA_PDB_Q_SHIFT 12 +#define RSA_PDB_Q_MASK (0xFFF << RSA_PDB_Q_SHIFT) + +#define RSA_PDB_SGF_F (0x8 << RSA_PDB_SGF_SHIFT) +#define RSA_PDB_SGF_G (0x4 << RSA_PDB_SGF_SHIFT) +#define RSA_PRIV_PDB_SGF_F (0x4 << RSA_PDB_SGF_SHIFT) +#define RSA_PRIV_PDB_SGF_G (0x8 << RSA_PDB_SGF_SHIFT) + +#define RSA_PRIV_KEY_FRM_1 0 +#define RSA_PRIV_KEY_FRM_2 1 +#define RSA_PRIV_KEY_FRM_3 2 + +/** + * RSA Encrypt Protocol Data Block + * @sgf: scatter-gather field + * @f_dma: dma address of input data + * @g_dma: dma address of encrypted output data + * @n_dma: dma address of RSA modulus + * @e_dma: dma address of RSA public exponent + * @f_len: length in octets of the input data + */ +struct rsa_pub_pdb { + u32 sgf; + dma_addr_t f_dma; + dma_addr_t g_dma; + dma_addr_t n_dma; + dma_addr_t e_dma; + u32 f_len; +}; + +#define SIZEOF_RSA_PUB_PDB (2 * sizeof(u32) + 4 * caam_ptr_sz) + +/** + * RSA Decrypt PDB - Private Key Form #1 + * @sgf: scatter-gather field + * @g_dma: dma address of encrypted input data + * @f_dma: dma address of output data + * @n_dma: dma address of RSA modulus + * @d_dma: dma address of RSA private exponent + */ +struct rsa_priv_f1_pdb { + u32 sgf; + dma_addr_t g_dma; + dma_addr_t f_dma; + dma_addr_t n_dma; + dma_addr_t d_dma; +}; + +#define SIZEOF_RSA_PRIV_F1_PDB (sizeof(u32) + 4 * caam_ptr_sz) + +/** + * RSA Decrypt PDB - Private Key Form #2 + * @sgf : scatter-gather field + * @g_dma : dma address of encrypted input data + * @f_dma : dma address of output data + * @d_dma : dma address of RSA private exponent + * @p_dma : dma address of RSA prime factor p of RSA modulus n + * @q_dma : dma address of RSA prime factor q of RSA modulus n + * @tmp1_dma: dma address of temporary buffer. CAAM uses this temporary buffer + * as internal state buffer. It is assumed to be as long as p. + * @tmp2_dma: dma address of temporary buffer. CAAM uses this temporary buffer + * as internal state buffer. It is assumed to be as long as q. + * @p_q_len : length in bytes of first two prime factors of the RSA modulus n + */ +struct rsa_priv_f2_pdb { + u32 sgf; + dma_addr_t g_dma; + dma_addr_t f_dma; + dma_addr_t d_dma; + dma_addr_t p_dma; + dma_addr_t q_dma; + dma_addr_t tmp1_dma; + dma_addr_t tmp2_dma; + u32 p_q_len; +}; + +#define SIZEOF_RSA_PRIV_F2_PDB (2 * sizeof(u32) + 7 * caam_ptr_sz) + +/** + * RSA Decrypt PDB - Private Key Form #3 + * This is the RSA Chinese Reminder Theorem (CRT) form for two prime factors of + * the RSA modulus. + * @sgf : scatter-gather field + * @g_dma : dma address of encrypted input data + * @f_dma : dma address of output data + * @c_dma : dma address of RSA CRT coefficient + * @p_dma : dma address of RSA prime factor p of RSA modulus n + * @q_dma : dma address of RSA prime factor q of RSA modulus n + * @dp_dma : dma address of RSA CRT exponent of RSA prime factor p + * @dp_dma : dma address of RSA CRT exponent of RSA prime factor q + * @tmp1_dma: dma address of temporary buffer. CAAM uses this temporary buffer + * as internal state buffer. It is assumed to be as long as p. + * @tmp2_dma: dma address of temporary buffer. CAAM uses this temporary buffer + * as internal state buffer. It is assumed to be as long as q. + * @p_q_len : length in bytes of first two prime factors of the RSA modulus n + */ +struct rsa_priv_f3_pdb { + u32 sgf; + dma_addr_t g_dma; + dma_addr_t f_dma; + dma_addr_t c_dma; + dma_addr_t p_dma; + dma_addr_t q_dma; + dma_addr_t dp_dma; + dma_addr_t dq_dma; + dma_addr_t tmp1_dma; + dma_addr_t tmp2_dma; + u32 p_q_len; +}; + +#define SIZEOF_RSA_PRIV_F3_PDB (2 * sizeof(u32) + 9 * caam_ptr_sz) + +#endif diff --git a/drivers/crypto/caam/pkc_desc.c b/drivers/crypto/caam/pkc_desc.c new file mode 100644 index 000000000..0d5ee762e --- /dev/null +++ b/drivers/crypto/caam/pkc_desc.c @@ -0,0 +1,73 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * caam - Freescale FSL CAAM support for Public Key Cryptography descriptors + * + * Copyright 2016 Freescale Semiconductor, Inc. + * + * There is no Shared Descriptor for PKC so that the Job Descriptor must carry + * all the desired key parameters, input and output pointers. + */ +#include "caampkc.h" +#include "desc_constr.h" + +/* Descriptor for RSA Public operation */ +void init_rsa_pub_desc(u32 *desc, struct rsa_pub_pdb *pdb) +{ + init_job_desc_pdb(desc, 0, SIZEOF_RSA_PUB_PDB); + append_cmd(desc, pdb->sgf); + append_ptr(desc, pdb->f_dma); + append_ptr(desc, pdb->g_dma); + append_ptr(desc, pdb->n_dma); + append_ptr(desc, pdb->e_dma); + append_cmd(desc, pdb->f_len); + append_operation(desc, OP_TYPE_UNI_PROTOCOL | OP_PCLID_RSAENC_PUBKEY); +} + +/* Descriptor for RSA Private operation - Private Key Form #1 */ +void init_rsa_priv_f1_desc(u32 *desc, struct rsa_priv_f1_pdb *pdb) +{ + init_job_desc_pdb(desc, 0, SIZEOF_RSA_PRIV_F1_PDB); + append_cmd(desc, pdb->sgf); + append_ptr(desc, pdb->g_dma); + append_ptr(desc, pdb->f_dma); + append_ptr(desc, pdb->n_dma); + append_ptr(desc, pdb->d_dma); + append_operation(desc, OP_TYPE_UNI_PROTOCOL | OP_PCLID_RSADEC_PRVKEY | + RSA_PRIV_KEY_FRM_1); +} + +/* Descriptor for RSA Private operation - Private Key Form #2 */ +void init_rsa_priv_f2_desc(u32 *desc, struct rsa_priv_f2_pdb *pdb) +{ + init_job_desc_pdb(desc, 0, SIZEOF_RSA_PRIV_F2_PDB); + append_cmd(desc, pdb->sgf); + append_ptr(desc, pdb->g_dma); + append_ptr(desc, pdb->f_dma); + append_ptr(desc, pdb->d_dma); + append_ptr(desc, pdb->p_dma); + append_ptr(desc, pdb->q_dma); + append_ptr(desc, pdb->tmp1_dma); + append_ptr(desc, pdb->tmp2_dma); + append_cmd(desc, pdb->p_q_len); + append_operation(desc, OP_TYPE_UNI_PROTOCOL | OP_PCLID_RSADEC_PRVKEY | + RSA_PRIV_KEY_FRM_2); +} + +/* Descriptor for RSA Private operation - Private Key Form #3 */ +void init_rsa_priv_f3_desc(u32 *desc, struct rsa_priv_f3_pdb *pdb) +{ + init_job_desc_pdb(desc, 0, SIZEOF_RSA_PRIV_F3_PDB); + append_cmd(desc, pdb->sgf); + append_ptr(desc, pdb->g_dma); + append_ptr(desc, pdb->f_dma); + append_ptr(desc, pdb->c_dma); + append_ptr(desc, pdb->p_dma); + append_ptr(desc, pdb->q_dma); + append_ptr(desc, pdb->dp_dma); + append_ptr(desc, pdb->dq_dma); + append_ptr(desc, pdb->tmp1_dma); + append_ptr(desc, pdb->tmp2_dma); + append_cmd(desc, pdb->p_q_len); + append_operation(desc, OP_TYPE_UNI_PROTOCOL | OP_PCLID_RSADEC_PRVKEY | + RSA_PRIV_KEY_FRM_3); +} diff --git a/drivers/crypto/caam/qi.c b/drivers/crypto/caam/qi.c new file mode 100644 index 000000000..ec53528d8 --- /dev/null +++ b/drivers/crypto/caam/qi.c @@ -0,0 +1,777 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * CAAM/SEC 4.x QI transport/backend driver + * Queue Interface backend functionality + * + * Copyright 2013-2016 Freescale Semiconductor, Inc. + * Copyright 2016-2017, 2019-2020 NXP + */ + +#include <linux/cpumask.h> +#include <linux/kthread.h> +#include <soc/fsl/qman.h> + +#include "debugfs.h" +#include "regs.h" +#include "qi.h" +#include "desc.h" +#include "intern.h" +#include "desc_constr.h" + +#define PREHDR_RSLS_SHIFT 31 +#define PREHDR_ABS BIT(25) + +/* + * Use a reasonable backlog of frames (per CPU) as congestion threshold, + * so that resources used by the in-flight buffers do not become a memory hog. + */ +#define MAX_RSP_FQ_BACKLOG_PER_CPU 256 + +#define CAAM_QI_ENQUEUE_RETRIES 10000 + +#define CAAM_NAPI_WEIGHT 63 + +/* + * caam_napi - struct holding CAAM NAPI-related params + * @irqtask: IRQ task for QI backend + * @p: QMan portal + */ +struct caam_napi { + struct napi_struct irqtask; + struct qman_portal *p; +}; + +/* + * caam_qi_pcpu_priv - percpu private data structure to main list of pending + * responses expected on each cpu. + * @caam_napi: CAAM NAPI params + * @net_dev: netdev used by NAPI + * @rsp_fq: response FQ from CAAM + */ +struct caam_qi_pcpu_priv { + struct caam_napi caam_napi; + struct net_device net_dev; + struct qman_fq *rsp_fq; +} ____cacheline_aligned; + +static DEFINE_PER_CPU(struct caam_qi_pcpu_priv, pcpu_qipriv); +static DEFINE_PER_CPU(int, last_cpu); + +/* + * caam_qi_priv - CAAM QI backend private params + * @cgr: QMan congestion group + */ +struct caam_qi_priv { + struct qman_cgr cgr; +}; + +static struct caam_qi_priv qipriv ____cacheline_aligned; + +/* + * This is written by only one core - the one that initialized the CGR - and + * read by multiple cores (all the others). + */ +bool caam_congested __read_mostly; +EXPORT_SYMBOL(caam_congested); + +/* + * This is a a cache of buffers, from which the users of CAAM QI driver + * can allocate short (CAAM_QI_MEMCACHE_SIZE) buffers. It's faster than + * doing malloc on the hotpath. + * NOTE: A more elegant solution would be to have some headroom in the frames + * being processed. This could be added by the dpaa-ethernet driver. + * This would pose a problem for userspace application processing which + * cannot know of this limitation. So for now, this will work. + * NOTE: The memcache is SMP-safe. No need to handle spinlocks in-here + */ +static struct kmem_cache *qi_cache; + +static void *caam_iova_to_virt(struct iommu_domain *domain, + dma_addr_t iova_addr) +{ + phys_addr_t phys_addr; + + phys_addr = domain ? iommu_iova_to_phys(domain, iova_addr) : iova_addr; + + return phys_to_virt(phys_addr); +} + +int caam_qi_enqueue(struct device *qidev, struct caam_drv_req *req) +{ + struct qm_fd fd; + dma_addr_t addr; + int ret; + int num_retries = 0; + + qm_fd_clear_fd(&fd); + qm_fd_set_compound(&fd, qm_sg_entry_get_len(&req->fd_sgt[1])); + + addr = dma_map_single(qidev, req->fd_sgt, sizeof(req->fd_sgt), + DMA_BIDIRECTIONAL); + if (dma_mapping_error(qidev, addr)) { + dev_err(qidev, "DMA mapping error for QI enqueue request\n"); + return -EIO; + } + qm_fd_addr_set64(&fd, addr); + + do { + ret = qman_enqueue(req->drv_ctx->req_fq, &fd); + if (likely(!ret)) { + refcount_inc(&req->drv_ctx->refcnt); + return 0; + } + + if (ret != -EBUSY) + break; + num_retries++; + } while (num_retries < CAAM_QI_ENQUEUE_RETRIES); + + dev_err(qidev, "qman_enqueue failed: %d\n", ret); + + return ret; +} +EXPORT_SYMBOL(caam_qi_enqueue); + +static void caam_fq_ern_cb(struct qman_portal *qm, struct qman_fq *fq, + const union qm_mr_entry *msg) +{ + const struct qm_fd *fd; + struct caam_drv_req *drv_req; + struct device *qidev = &(raw_cpu_ptr(&pcpu_qipriv)->net_dev.dev); + struct caam_drv_private *priv = dev_get_drvdata(qidev); + + fd = &msg->ern.fd; + + drv_req = caam_iova_to_virt(priv->domain, qm_fd_addr_get64(fd)); + if (!drv_req) { + dev_err(qidev, + "Can't find original request for CAAM response\n"); + return; + } + + refcount_dec(&drv_req->drv_ctx->refcnt); + + if (qm_fd_get_format(fd) != qm_fd_compound) { + dev_err(qidev, "Non-compound FD from CAAM\n"); + return; + } + + dma_unmap_single(drv_req->drv_ctx->qidev, qm_fd_addr(fd), + sizeof(drv_req->fd_sgt), DMA_BIDIRECTIONAL); + + if (fd->status) + drv_req->cbk(drv_req, be32_to_cpu(fd->status)); + else + drv_req->cbk(drv_req, JRSTA_SSRC_QI); +} + +static struct qman_fq *create_caam_req_fq(struct device *qidev, + struct qman_fq *rsp_fq, + dma_addr_t hwdesc, + int fq_sched_flag) +{ + int ret; + struct qman_fq *req_fq; + struct qm_mcc_initfq opts; + + req_fq = kzalloc(sizeof(*req_fq), GFP_ATOMIC); + if (!req_fq) + return ERR_PTR(-ENOMEM); + + req_fq->cb.ern = caam_fq_ern_cb; + req_fq->cb.fqs = NULL; + + ret = qman_create_fq(0, QMAN_FQ_FLAG_DYNAMIC_FQID | + QMAN_FQ_FLAG_TO_DCPORTAL, req_fq); + if (ret) { + dev_err(qidev, "Failed to create session req FQ\n"); + goto create_req_fq_fail; + } + + memset(&opts, 0, sizeof(opts)); + opts.we_mask = cpu_to_be16(QM_INITFQ_WE_FQCTRL | QM_INITFQ_WE_DESTWQ | + QM_INITFQ_WE_CONTEXTB | + QM_INITFQ_WE_CONTEXTA | QM_INITFQ_WE_CGID); + opts.fqd.fq_ctrl = cpu_to_be16(QM_FQCTRL_CPCSTASH | QM_FQCTRL_CGE); + qm_fqd_set_destwq(&opts.fqd, qm_channel_caam, 2); + opts.fqd.context_b = cpu_to_be32(qman_fq_fqid(rsp_fq)); + qm_fqd_context_a_set64(&opts.fqd, hwdesc); + opts.fqd.cgid = qipriv.cgr.cgrid; + + ret = qman_init_fq(req_fq, fq_sched_flag, &opts); + if (ret) { + dev_err(qidev, "Failed to init session req FQ\n"); + goto init_req_fq_fail; + } + + dev_dbg(qidev, "Allocated request FQ %u for CPU %u\n", req_fq->fqid, + smp_processor_id()); + return req_fq; + +init_req_fq_fail: + qman_destroy_fq(req_fq); +create_req_fq_fail: + kfree(req_fq); + return ERR_PTR(ret); +} + +static int empty_retired_fq(struct device *qidev, struct qman_fq *fq) +{ + int ret; + + ret = qman_volatile_dequeue(fq, QMAN_VOLATILE_FLAG_WAIT_INT | + QMAN_VOLATILE_FLAG_FINISH, + QM_VDQCR_PRECEDENCE_VDQCR | + QM_VDQCR_NUMFRAMES_TILLEMPTY); + if (ret) { + dev_err(qidev, "Volatile dequeue fail for FQ: %u\n", fq->fqid); + return ret; + } + + do { + struct qman_portal *p; + + p = qman_get_affine_portal(smp_processor_id()); + qman_p_poll_dqrr(p, 16); + } while (fq->flags & QMAN_FQ_STATE_NE); + + return 0; +} + +static int kill_fq(struct device *qidev, struct qman_fq *fq) +{ + u32 flags; + int ret; + + ret = qman_retire_fq(fq, &flags); + if (ret < 0) { + dev_err(qidev, "qman_retire_fq failed: %d\n", ret); + return ret; + } + + if (!ret) + goto empty_fq; + + /* Async FQ retirement condition */ + if (ret == 1) { + /* Retry till FQ gets in retired state */ + do { + msleep(20); + } while (fq->state != qman_fq_state_retired); + + WARN_ON(fq->flags & QMAN_FQ_STATE_BLOCKOOS); + WARN_ON(fq->flags & QMAN_FQ_STATE_ORL); + } + +empty_fq: + if (fq->flags & QMAN_FQ_STATE_NE) { + ret = empty_retired_fq(qidev, fq); + if (ret) { + dev_err(qidev, "empty_retired_fq fail for FQ: %u\n", + fq->fqid); + return ret; + } + } + + ret = qman_oos_fq(fq); + if (ret) + dev_err(qidev, "OOS of FQID: %u failed\n", fq->fqid); + + qman_destroy_fq(fq); + kfree(fq); + + return ret; +} + +static int empty_caam_fq(struct qman_fq *fq, struct caam_drv_ctx *drv_ctx) +{ + int ret; + int retries = 10; + struct qm_mcr_queryfq_np np; + + /* Wait till the older CAAM FQ get empty */ + do { + ret = qman_query_fq_np(fq, &np); + if (ret) + return ret; + + if (!qm_mcr_np_get(&np, frm_cnt)) + break; + + msleep(20); + } while (1); + + /* Wait until pending jobs from this FQ are processed by CAAM */ + do { + if (refcount_read(&drv_ctx->refcnt) == 1) + break; + + msleep(20); + } while (--retries); + + if (!retries) + dev_warn_once(drv_ctx->qidev, "%d frames from FQID %u still pending in CAAM\n", + refcount_read(&drv_ctx->refcnt), fq->fqid); + + return 0; +} + +int caam_drv_ctx_update(struct caam_drv_ctx *drv_ctx, u32 *sh_desc) +{ + int ret; + u32 num_words; + struct qman_fq *new_fq, *old_fq; + struct device *qidev = drv_ctx->qidev; + + num_words = desc_len(sh_desc); + if (num_words > MAX_SDLEN) { + dev_err(qidev, "Invalid descriptor len: %d words\n", num_words); + return -EINVAL; + } + + /* Note down older req FQ */ + old_fq = drv_ctx->req_fq; + + /* Create a new req FQ in parked state */ + new_fq = create_caam_req_fq(drv_ctx->qidev, drv_ctx->rsp_fq, + drv_ctx->context_a, 0); + if (IS_ERR(new_fq)) { + dev_err(qidev, "FQ allocation for shdesc update failed\n"); + return PTR_ERR(new_fq); + } + + /* Hook up new FQ to context so that new requests keep queuing */ + drv_ctx->req_fq = new_fq; + + /* Empty and remove the older FQ */ + ret = empty_caam_fq(old_fq, drv_ctx); + if (ret) { + dev_err(qidev, "Old CAAM FQ empty failed: %d\n", ret); + + /* We can revert to older FQ */ + drv_ctx->req_fq = old_fq; + + if (kill_fq(qidev, new_fq)) + dev_warn(qidev, "New CAAM FQ kill failed\n"); + + return ret; + } + + /* + * Re-initialise pre-header. Set RSLS and SDLEN. + * Update the shared descriptor for driver context. + */ + drv_ctx->prehdr[0] = cpu_to_caam32((1 << PREHDR_RSLS_SHIFT) | + num_words); + drv_ctx->prehdr[1] = cpu_to_caam32(PREHDR_ABS); + memcpy(drv_ctx->sh_desc, sh_desc, desc_bytes(sh_desc)); + dma_sync_single_for_device(qidev, drv_ctx->context_a, + sizeof(drv_ctx->sh_desc) + + sizeof(drv_ctx->prehdr), + DMA_BIDIRECTIONAL); + + /* Put the new FQ in scheduled state */ + ret = qman_schedule_fq(new_fq); + if (ret) { + dev_err(qidev, "Fail to sched new CAAM FQ, ecode = %d\n", ret); + + /* + * We can kill new FQ and revert to old FQ. + * Since the desc is already modified, it is success case + */ + + drv_ctx->req_fq = old_fq; + + if (kill_fq(qidev, new_fq)) + dev_warn(qidev, "New CAAM FQ kill failed\n"); + } else if (kill_fq(qidev, old_fq)) { + dev_warn(qidev, "Old CAAM FQ kill failed\n"); + } + + return 0; +} +EXPORT_SYMBOL(caam_drv_ctx_update); + +struct caam_drv_ctx *caam_drv_ctx_init(struct device *qidev, + int *cpu, + u32 *sh_desc) +{ + size_t size; + u32 num_words; + dma_addr_t hwdesc; + struct caam_drv_ctx *drv_ctx; + const cpumask_t *cpus = qman_affine_cpus(); + + num_words = desc_len(sh_desc); + if (num_words > MAX_SDLEN) { + dev_err(qidev, "Invalid descriptor len: %d words\n", + num_words); + return ERR_PTR(-EINVAL); + } + + drv_ctx = kzalloc(sizeof(*drv_ctx), GFP_ATOMIC); + if (!drv_ctx) + return ERR_PTR(-ENOMEM); + + /* + * Initialise pre-header - set RSLS and SDLEN - and shared descriptor + * and dma-map them. + */ + drv_ctx->prehdr[0] = cpu_to_caam32((1 << PREHDR_RSLS_SHIFT) | + num_words); + drv_ctx->prehdr[1] = cpu_to_caam32(PREHDR_ABS); + memcpy(drv_ctx->sh_desc, sh_desc, desc_bytes(sh_desc)); + size = sizeof(drv_ctx->prehdr) + sizeof(drv_ctx->sh_desc); + hwdesc = dma_map_single(qidev, drv_ctx->prehdr, size, + DMA_BIDIRECTIONAL); + if (dma_mapping_error(qidev, hwdesc)) { + dev_err(qidev, "DMA map error for preheader + shdesc\n"); + kfree(drv_ctx); + return ERR_PTR(-ENOMEM); + } + drv_ctx->context_a = hwdesc; + + /* If given CPU does not own the portal, choose another one that does */ + if (!cpumask_test_cpu(*cpu, cpus)) { + int *pcpu = &get_cpu_var(last_cpu); + + *pcpu = cpumask_next(*pcpu, cpus); + if (*pcpu >= nr_cpu_ids) + *pcpu = cpumask_first(cpus); + *cpu = *pcpu; + + put_cpu_var(last_cpu); + } + drv_ctx->cpu = *cpu; + + /* Find response FQ hooked with this CPU */ + drv_ctx->rsp_fq = per_cpu(pcpu_qipriv.rsp_fq, drv_ctx->cpu); + + /* Attach request FQ */ + drv_ctx->req_fq = create_caam_req_fq(qidev, drv_ctx->rsp_fq, hwdesc, + QMAN_INITFQ_FLAG_SCHED); + if (IS_ERR(drv_ctx->req_fq)) { + dev_err(qidev, "create_caam_req_fq failed\n"); + dma_unmap_single(qidev, hwdesc, size, DMA_BIDIRECTIONAL); + kfree(drv_ctx); + return ERR_PTR(-ENOMEM); + } + + /* init reference counter used to track references to request FQ */ + refcount_set(&drv_ctx->refcnt, 1); + + drv_ctx->qidev = qidev; + return drv_ctx; +} +EXPORT_SYMBOL(caam_drv_ctx_init); + +void *qi_cache_alloc(gfp_t flags) +{ + return kmem_cache_alloc(qi_cache, flags); +} +EXPORT_SYMBOL(qi_cache_alloc); + +void qi_cache_free(void *obj) +{ + kmem_cache_free(qi_cache, obj); +} +EXPORT_SYMBOL(qi_cache_free); + +static int caam_qi_poll(struct napi_struct *napi, int budget) +{ + struct caam_napi *np = container_of(napi, struct caam_napi, irqtask); + + int cleaned = qman_p_poll_dqrr(np->p, budget); + + if (cleaned < budget) { + napi_complete(napi); + qman_p_irqsource_add(np->p, QM_PIRQ_DQRI); + } + + return cleaned; +} + +void caam_drv_ctx_rel(struct caam_drv_ctx *drv_ctx) +{ + if (IS_ERR_OR_NULL(drv_ctx)) + return; + + /* Remove request FQ */ + if (kill_fq(drv_ctx->qidev, drv_ctx->req_fq)) + dev_err(drv_ctx->qidev, "Crypto session req FQ kill failed\n"); + + dma_unmap_single(drv_ctx->qidev, drv_ctx->context_a, + sizeof(drv_ctx->sh_desc) + sizeof(drv_ctx->prehdr), + DMA_BIDIRECTIONAL); + kfree(drv_ctx); +} +EXPORT_SYMBOL(caam_drv_ctx_rel); + +static void caam_qi_shutdown(void *data) +{ + int i; + struct device *qidev = data; + struct caam_qi_priv *priv = &qipriv; + const cpumask_t *cpus = qman_affine_cpus(); + + for_each_cpu(i, cpus) { + struct napi_struct *irqtask; + + irqtask = &per_cpu_ptr(&pcpu_qipriv.caam_napi, i)->irqtask; + napi_disable(irqtask); + netif_napi_del(irqtask); + + if (kill_fq(qidev, per_cpu(pcpu_qipriv.rsp_fq, i))) + dev_err(qidev, "Rsp FQ kill failed, cpu: %d\n", i); + } + + qman_delete_cgr_safe(&priv->cgr); + qman_release_cgrid(priv->cgr.cgrid); + + kmem_cache_destroy(qi_cache); +} + +static void cgr_cb(struct qman_portal *qm, struct qman_cgr *cgr, int congested) +{ + caam_congested = congested; + + if (congested) { + caam_debugfs_qi_congested(); + + pr_debug_ratelimited("CAAM entered congestion\n"); + + } else { + pr_debug_ratelimited("CAAM exited congestion\n"); + } +} + +static int caam_qi_napi_schedule(struct qman_portal *p, struct caam_napi *np) +{ + /* + * In case of threaded ISR, for RT kernels in_irq() does not return + * appropriate value, so use in_serving_softirq to distinguish between + * softirq and irq contexts. + */ + if (unlikely(in_irq() || !in_serving_softirq())) { + /* Disable QMan IRQ source and invoke NAPI */ + qman_p_irqsource_remove(p, QM_PIRQ_DQRI); + np->p = p; + napi_schedule(&np->irqtask); + return 1; + } + return 0; +} + +static enum qman_cb_dqrr_result caam_rsp_fq_dqrr_cb(struct qman_portal *p, + struct qman_fq *rsp_fq, + const struct qm_dqrr_entry *dqrr) +{ + struct caam_napi *caam_napi = raw_cpu_ptr(&pcpu_qipriv.caam_napi); + struct caam_drv_req *drv_req; + const struct qm_fd *fd; + struct device *qidev = &(raw_cpu_ptr(&pcpu_qipriv)->net_dev.dev); + struct caam_drv_private *priv = dev_get_drvdata(qidev); + u32 status; + + if (caam_qi_napi_schedule(p, caam_napi)) + return qman_cb_dqrr_stop; + + fd = &dqrr->fd; + + drv_req = caam_iova_to_virt(priv->domain, qm_fd_addr_get64(fd)); + if (unlikely(!drv_req)) { + dev_err(qidev, + "Can't find original request for caam response\n"); + return qman_cb_dqrr_consume; + } + + refcount_dec(&drv_req->drv_ctx->refcnt); + + status = be32_to_cpu(fd->status); + if (unlikely(status)) { + u32 ssrc = status & JRSTA_SSRC_MASK; + u8 err_id = status & JRSTA_CCBERR_ERRID_MASK; + + if (ssrc != JRSTA_SSRC_CCB_ERROR || + err_id != JRSTA_CCBERR_ERRID_ICVCHK) + dev_err_ratelimited(qidev, + "Error: %#x in CAAM response FD\n", + status); + } + + if (unlikely(qm_fd_get_format(fd) != qm_fd_compound)) { + dev_err(qidev, "Non-compound FD from CAAM\n"); + return qman_cb_dqrr_consume; + } + + dma_unmap_single(drv_req->drv_ctx->qidev, qm_fd_addr(fd), + sizeof(drv_req->fd_sgt), DMA_BIDIRECTIONAL); + + drv_req->cbk(drv_req, status); + return qman_cb_dqrr_consume; +} + +static int alloc_rsp_fq_cpu(struct device *qidev, unsigned int cpu) +{ + struct qm_mcc_initfq opts; + struct qman_fq *fq; + int ret; + + fq = kzalloc(sizeof(*fq), GFP_KERNEL | GFP_DMA); + if (!fq) + return -ENOMEM; + + fq->cb.dqrr = caam_rsp_fq_dqrr_cb; + + ret = qman_create_fq(0, QMAN_FQ_FLAG_NO_ENQUEUE | + QMAN_FQ_FLAG_DYNAMIC_FQID, fq); + if (ret) { + dev_err(qidev, "Rsp FQ create failed\n"); + kfree(fq); + return -ENODEV; + } + + memset(&opts, 0, sizeof(opts)); + opts.we_mask = cpu_to_be16(QM_INITFQ_WE_FQCTRL | QM_INITFQ_WE_DESTWQ | + QM_INITFQ_WE_CONTEXTB | + QM_INITFQ_WE_CONTEXTA | QM_INITFQ_WE_CGID); + opts.fqd.fq_ctrl = cpu_to_be16(QM_FQCTRL_CTXASTASHING | + QM_FQCTRL_CPCSTASH | QM_FQCTRL_CGE); + qm_fqd_set_destwq(&opts.fqd, qman_affine_channel(cpu), 3); + opts.fqd.cgid = qipriv.cgr.cgrid; + opts.fqd.context_a.stashing.exclusive = QM_STASHING_EXCL_CTX | + QM_STASHING_EXCL_DATA; + qm_fqd_set_stashing(&opts.fqd, 0, 1, 1); + + ret = qman_init_fq(fq, QMAN_INITFQ_FLAG_SCHED, &opts); + if (ret) { + dev_err(qidev, "Rsp FQ init failed\n"); + kfree(fq); + return -ENODEV; + } + + per_cpu(pcpu_qipriv.rsp_fq, cpu) = fq; + + dev_dbg(qidev, "Allocated response FQ %u for CPU %u", fq->fqid, cpu); + return 0; +} + +static int init_cgr(struct device *qidev) +{ + int ret; + struct qm_mcc_initcgr opts; + const u64 val = (u64)cpumask_weight(qman_affine_cpus()) * + MAX_RSP_FQ_BACKLOG_PER_CPU; + + ret = qman_alloc_cgrid(&qipriv.cgr.cgrid); + if (ret) { + dev_err(qidev, "CGR alloc failed for rsp FQs: %d\n", ret); + return ret; + } + + qipriv.cgr.cb = cgr_cb; + memset(&opts, 0, sizeof(opts)); + opts.we_mask = cpu_to_be16(QM_CGR_WE_CSCN_EN | QM_CGR_WE_CS_THRES | + QM_CGR_WE_MODE); + opts.cgr.cscn_en = QM_CGR_EN; + opts.cgr.mode = QMAN_CGR_MODE_FRAME; + qm_cgr_cs_thres_set64(&opts.cgr.cs_thres, val, 1); + + ret = qman_create_cgr(&qipriv.cgr, QMAN_CGR_FLAG_USE_INIT, &opts); + if (ret) { + dev_err(qidev, "Error %d creating CAAM CGRID: %u\n", ret, + qipriv.cgr.cgrid); + return ret; + } + + dev_dbg(qidev, "Congestion threshold set to %llu\n", val); + return 0; +} + +static int alloc_rsp_fqs(struct device *qidev) +{ + int ret, i; + const cpumask_t *cpus = qman_affine_cpus(); + + /*Now create response FQs*/ + for_each_cpu(i, cpus) { + ret = alloc_rsp_fq_cpu(qidev, i); + if (ret) { + dev_err(qidev, "CAAM rsp FQ alloc failed, cpu: %u", i); + return ret; + } + } + + return 0; +} + +static void free_rsp_fqs(void) +{ + int i; + const cpumask_t *cpus = qman_affine_cpus(); + + for_each_cpu(i, cpus) + kfree(per_cpu(pcpu_qipriv.rsp_fq, i)); +} + +int caam_qi_init(struct platform_device *caam_pdev) +{ + int err, i; + struct device *ctrldev = &caam_pdev->dev, *qidev; + struct caam_drv_private *ctrlpriv; + const cpumask_t *cpus = qman_affine_cpus(); + + ctrlpriv = dev_get_drvdata(ctrldev); + qidev = ctrldev; + + /* Initialize the congestion detection */ + err = init_cgr(qidev); + if (err) { + dev_err(qidev, "CGR initialization failed: %d\n", err); + return err; + } + + /* Initialise response FQs */ + err = alloc_rsp_fqs(qidev); + if (err) { + dev_err(qidev, "Can't allocate CAAM response FQs: %d\n", err); + free_rsp_fqs(); + return err; + } + + /* + * Enable the NAPI contexts on each of the core which has an affine + * portal. + */ + for_each_cpu(i, cpus) { + struct caam_qi_pcpu_priv *priv = per_cpu_ptr(&pcpu_qipriv, i); + struct caam_napi *caam_napi = &priv->caam_napi; + struct napi_struct *irqtask = &caam_napi->irqtask; + struct net_device *net_dev = &priv->net_dev; + + net_dev->dev = *qidev; + INIT_LIST_HEAD(&net_dev->napi_list); + + netif_napi_add(net_dev, irqtask, caam_qi_poll, + CAAM_NAPI_WEIGHT); + + napi_enable(irqtask); + } + + qi_cache = kmem_cache_create("caamqicache", CAAM_QI_MEMCACHE_SIZE, 0, + SLAB_CACHE_DMA, NULL); + if (!qi_cache) { + dev_err(qidev, "Can't allocate CAAM cache\n"); + free_rsp_fqs(); + return -ENOMEM; + } + + caam_debugfs_qi_init(ctrlpriv); + + err = devm_add_action_or_reset(qidev, caam_qi_shutdown, ctrlpriv); + if (err) + return err; + + dev_info(qidev, "Linux CAAM Queue I/F driver initialised\n"); + return 0; +} diff --git a/drivers/crypto/caam/qi.h b/drivers/crypto/caam/qi.h new file mode 100644 index 000000000..5894f16f8 --- /dev/null +++ b/drivers/crypto/caam/qi.h @@ -0,0 +1,179 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Public definitions for the CAAM/QI (Queue Interface) backend. + * + * Copyright 2013-2016 Freescale Semiconductor, Inc. + * Copyright 2016-2017, 2020 NXP + */ + +#ifndef __QI_H__ +#define __QI_H__ + +#include <soc/fsl/qman.h> +#include "compat.h" +#include "desc.h" +#include "desc_constr.h" + +/* Length of a single buffer in the QI driver memory cache */ +#define CAAM_QI_MEMCACHE_SIZE 768 + +extern bool caam_congested __read_mostly; + +/* + * This is the request structure the driver application should fill while + * submitting a job to driver. + */ +struct caam_drv_req; + +/* + * caam_qi_cbk - application's callback function invoked by the driver when the + * request has been successfully processed. + * @drv_req: original request that was submitted + * @status: completion status of request (0 - success, non-zero - error code) + */ +typedef void (*caam_qi_cbk)(struct caam_drv_req *drv_req, u32 status); + +enum optype { + ENCRYPT, + DECRYPT, + NUM_OP +}; + +/** + * caam_drv_ctx - CAAM/QI backend driver context + * + * The jobs are processed by the driver against a driver context. + * With every cryptographic context, a driver context is attached. + * The driver context contains data for private use by driver. + * For the applications, this is an opaque structure. + * + * @prehdr: preheader placed before shrd desc + * @sh_desc: shared descriptor + * @context_a: shared descriptor dma address + * @req_fq: to-CAAM request frame queue + * @rsp_fq: from-CAAM response frame queue + * @refcnt: reference counter incremented for each frame enqueued in to-CAAM FQ + * @cpu: cpu on which to receive CAAM response + * @op_type: operation type + * @qidev: device pointer for CAAM/QI backend + */ +struct caam_drv_ctx { + u32 prehdr[2]; + u32 sh_desc[MAX_SDLEN]; + dma_addr_t context_a; + struct qman_fq *req_fq; + struct qman_fq *rsp_fq; + refcount_t refcnt; + int cpu; + enum optype op_type; + struct device *qidev; +} ____cacheline_aligned; + +/** + * caam_drv_req - The request structure the driver application should fill while + * submitting a job to driver. + * @fd_sgt: QMan S/G pointing to output (fd_sgt[0]) and input (fd_sgt[1]) + * buffers. + * @cbk: callback function to invoke when job is completed + * @app_ctx: arbitrary context attached with request by the application + * + * The fields mentioned below should not be used by application. + * These are for private use by driver. + * + * @hdr__: linked list header to maintain list of outstanding requests to CAAM + * @hwaddr: DMA address for the S/G table. + */ +struct caam_drv_req { + struct qm_sg_entry fd_sgt[2]; + struct caam_drv_ctx *drv_ctx; + caam_qi_cbk cbk; + void *app_ctx; +} ____cacheline_aligned; + +/** + * caam_drv_ctx_init - Initialise a CAAM/QI driver context + * + * A CAAM/QI driver context must be attached with each cryptographic context. + * This function allocates memory for CAAM/QI context and returns a handle to + * the application. This handle must be submitted along with each enqueue + * request to the driver by the application. + * + * @cpu: CPU where the application prefers to the driver to receive CAAM + * responses. The request completion callback would be issued from this + * CPU. + * @sh_desc: shared descriptor pointer to be attached with CAAM/QI driver + * context. + * + * Returns a driver context on success or negative error code on failure. + */ +struct caam_drv_ctx *caam_drv_ctx_init(struct device *qidev, int *cpu, + u32 *sh_desc); + +/** + * caam_qi_enqueue - Submit a request to QI backend driver. + * + * The request structure must be properly filled as described above. + * + * @qidev: device pointer for QI backend + * @req: CAAM QI request structure + * + * Returns 0 on success or negative error code on failure. + */ +int caam_qi_enqueue(struct device *qidev, struct caam_drv_req *req); + +/** + * caam_drv_ctx_busy - Check if there are too many jobs pending with CAAM + * or too many CAAM responses are pending to be processed. + * @drv_ctx: driver context for which job is to be submitted + * + * Returns caam congestion status 'true/false' + */ +bool caam_drv_ctx_busy(struct caam_drv_ctx *drv_ctx); + +/** + * caam_drv_ctx_update - Update QI driver context + * + * Invoked when shared descriptor is required to be change in driver context. + * + * @drv_ctx: driver context to be updated + * @sh_desc: new shared descriptor pointer to be updated in QI driver context + * + * Returns 0 on success or negative error code on failure. + */ +int caam_drv_ctx_update(struct caam_drv_ctx *drv_ctx, u32 *sh_desc); + +/** + * caam_drv_ctx_rel - Release a QI driver context + * @drv_ctx: context to be released + */ +void caam_drv_ctx_rel(struct caam_drv_ctx *drv_ctx); + +int caam_qi_init(struct platform_device *pdev); + +/** + * qi_cache_alloc - Allocate buffers from CAAM-QI cache + * + * Invoked when a user of the CAAM-QI (i.e. caamalg-qi) needs data which has + * to be allocated on the hotpath. Instead of using malloc, one can use the + * services of the CAAM QI memory cache (backed by kmem_cache). The buffers + * will have a size of 256B, which is sufficient for hosting 16 SG entries. + * + * @flags: flags that would be used for the equivalent malloc(..) call + * + * Returns a pointer to a retrieved buffer on success or NULL on failure. + */ +void *qi_cache_alloc(gfp_t flags); + +/** + * qi_cache_free - Frees buffers allocated from CAAM-QI cache + * + * Invoked when a user of the CAAM-QI (i.e. caamalg-qi) no longer needs + * the buffer previously allocated by a qi_cache_alloc call. + * No checking is being done, the call is a passthrough call to + * kmem_cache_free(...) + * + * @obj: object previously allocated using qi_cache_alloc() + */ +void qi_cache_free(void *obj); + +#endif /* __QI_H__ */ diff --git a/drivers/crypto/caam/regs.h b/drivers/crypto/caam/regs.h new file mode 100644 index 000000000..3738625c0 --- /dev/null +++ b/drivers/crypto/caam/regs.h @@ -0,0 +1,1028 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * CAAM hardware register-level view + * + * Copyright 2008-2011 Freescale Semiconductor, Inc. + * Copyright 2018 NXP + */ + +#ifndef REGS_H +#define REGS_H + +#include <linux/types.h> +#include <linux/bitops.h> +#include <linux/io.h> +#include <linux/io-64-nonatomic-hi-lo.h> + +/* + * Architecture-specific register access methods + * + * CAAM's bus-addressable registers are 64 bits internally. + * They have been wired to be safely accessible on 32-bit + * architectures, however. Registers were organized such + * that (a) they can be contained in 32 bits, (b) if not, then they + * can be treated as two 32-bit entities, or finally (c) if they + * must be treated as a single 64-bit value, then this can safely + * be done with two 32-bit cycles. + * + * For 32-bit operations on 64-bit values, CAAM follows the same + * 64-bit register access conventions as it's predecessors, in that + * writes are "triggered" by a write to the register at the numerically + * higher address, thus, a full 64-bit write cycle requires a write + * to the lower address, followed by a write to the higher address, + * which will latch/execute the write cycle. + * + * For example, let's assume a SW reset of CAAM through the master + * configuration register. + * - SWRST is in bit 31 of MCFG. + * - MCFG begins at base+0x0000. + * - Bits 63-32 are a 32-bit word at base+0x0000 (numerically-lower) + * - Bits 31-0 are a 32-bit word at base+0x0004 (numerically-higher) + * + * (and on Power, the convention is 0-31, 32-63, I know...) + * + * Assuming a 64-bit write to this MCFG to perform a software reset + * would then require a write of 0 to base+0x0000, followed by a + * write of 0x80000000 to base+0x0004, which would "execute" the + * reset. + * + * Of course, since MCFG 63-32 is all zero, we could cheat and simply + * write 0x8000000 to base+0x0004, and the reset would work fine. + * However, since CAAM does contain some write-and-read-intended + * 64-bit registers, this code defines 64-bit access methods for + * the sake of internal consistency and simplicity, and so that a + * clean transition to 64-bit is possible when it becomes necessary. + * + * There are limitations to this that the developer must recognize. + * 32-bit architectures cannot enforce an atomic-64 operation, + * Therefore: + * + * - On writes, since the HW is assumed to latch the cycle on the + * write of the higher-numeric-address word, then ordered + * writes work OK. + * + * - For reads, where a register contains a relevant value of more + * that 32 bits, the hardware employs logic to latch the other + * "half" of the data until read, ensuring an accurate value. + * This is of particular relevance when dealing with CAAM's + * performance counters. + * + */ + +extern bool caam_little_end; +extern bool caam_imx; +extern size_t caam_ptr_sz; + +#define caam_to_cpu(len) \ +static inline u##len caam##len ## _to_cpu(u##len val) \ +{ \ + if (caam_little_end) \ + return le##len ## _to_cpu((__force __le##len)val); \ + else \ + return be##len ## _to_cpu((__force __be##len)val); \ +} + +#define cpu_to_caam(len) \ +static inline u##len cpu_to_caam##len(u##len val) \ +{ \ + if (caam_little_end) \ + return (__force u##len)cpu_to_le##len(val); \ + else \ + return (__force u##len)cpu_to_be##len(val); \ +} + +caam_to_cpu(16) +caam_to_cpu(32) +caam_to_cpu(64) +cpu_to_caam(16) +cpu_to_caam(32) +cpu_to_caam(64) + +static inline void wr_reg32(void __iomem *reg, u32 data) +{ + if (caam_little_end) + iowrite32(data, reg); + else + iowrite32be(data, reg); +} + +static inline u32 rd_reg32(void __iomem *reg) +{ + if (caam_little_end) + return ioread32(reg); + + return ioread32be(reg); +} + +static inline void clrsetbits_32(void __iomem *reg, u32 clear, u32 set) +{ + if (caam_little_end) + iowrite32((ioread32(reg) & ~clear) | set, reg); + else + iowrite32be((ioread32be(reg) & ~clear) | set, reg); +} + +/* + * The only users of these wr/rd_reg64 functions is the Job Ring (JR). + * The DMA address registers in the JR are handled differently depending on + * platform: + * + * 1. All BE CAAM platforms and i.MX platforms (LE CAAM): + * + * base + 0x0000 : most-significant 32 bits + * base + 0x0004 : least-significant 32 bits + * + * The 32-bit version of this core therefore has to write to base + 0x0004 + * to set the 32-bit wide DMA address. + * + * 2. All other LE CAAM platforms (LS1021A etc.) + * base + 0x0000 : least-significant 32 bits + * base + 0x0004 : most-significant 32 bits + */ +static inline void wr_reg64(void __iomem *reg, u64 data) +{ + if (caam_little_end) { + if (caam_imx) { + iowrite32(data >> 32, (u32 __iomem *)(reg)); + iowrite32(data, (u32 __iomem *)(reg) + 1); + } else { + iowrite64(data, reg); + } + } else { + iowrite64be(data, reg); + } +} + +static inline u64 rd_reg64(void __iomem *reg) +{ + if (caam_little_end) { + if (caam_imx) { + u32 low, high; + + high = ioread32(reg); + low = ioread32(reg + sizeof(u32)); + + return low + ((u64)high << 32); + } else { + return ioread64(reg); + } + } else { + return ioread64be(reg); + } +} + +static inline u64 cpu_to_caam_dma64(dma_addr_t value) +{ + if (caam_imx) { + u64 ret_val = (u64)cpu_to_caam32(lower_32_bits(value)) << 32; + + if (IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT)) + ret_val |= (u64)cpu_to_caam32(upper_32_bits(value)); + + return ret_val; + } + + return cpu_to_caam64(value); +} + +static inline u64 caam_dma64_to_cpu(u64 value) +{ + if (caam_imx) + return (((u64)caam32_to_cpu(lower_32_bits(value)) << 32) | + (u64)caam32_to_cpu(upper_32_bits(value))); + + return caam64_to_cpu(value); +} + +static inline u64 cpu_to_caam_dma(u64 value) +{ + if (IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT) && + caam_ptr_sz == sizeof(u64)) + return cpu_to_caam_dma64(value); + else + return cpu_to_caam32(value); +} + +static inline u64 caam_dma_to_cpu(u64 value) +{ + if (IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT) && + caam_ptr_sz == sizeof(u64)) + return caam_dma64_to_cpu(value); + else + return caam32_to_cpu(value); +} + +/* + * jr_outentry + * Represents each entry in a JobR output ring + */ + +static inline void jr_outentry_get(void *outring, int hw_idx, dma_addr_t *desc, + u32 *jrstatus) +{ + + if (caam_ptr_sz == sizeof(u32)) { + struct { + u32 desc; + u32 jrstatus; + } __packed *outentry = outring; + + *desc = outentry[hw_idx].desc; + *jrstatus = outentry[hw_idx].jrstatus; + } else { + struct { + dma_addr_t desc;/* Pointer to completed descriptor */ + u32 jrstatus; /* Status for completed descriptor */ + } __packed *outentry = outring; + + *desc = outentry[hw_idx].desc; + *jrstatus = outentry[hw_idx].jrstatus; + } +} + +#define SIZEOF_JR_OUTENTRY (caam_ptr_sz + sizeof(u32)) + +static inline dma_addr_t jr_outentry_desc(void *outring, int hw_idx) +{ + dma_addr_t desc; + u32 unused; + + jr_outentry_get(outring, hw_idx, &desc, &unused); + + return desc; +} + +static inline u32 jr_outentry_jrstatus(void *outring, int hw_idx) +{ + dma_addr_t unused; + u32 jrstatus; + + jr_outentry_get(outring, hw_idx, &unused, &jrstatus); + + return jrstatus; +} + +static inline void jr_inpentry_set(void *inpring, int hw_idx, dma_addr_t val) +{ + if (caam_ptr_sz == sizeof(u32)) { + u32 *inpentry = inpring; + + inpentry[hw_idx] = val; + } else { + dma_addr_t *inpentry = inpring; + + inpentry[hw_idx] = val; + } +} + +#define SIZEOF_JR_INPENTRY caam_ptr_sz + + +/* Version registers (Era 10+) e80-eff */ +struct version_regs { + u32 crca; /* CRCA_VERSION */ + u32 afha; /* AFHA_VERSION */ + u32 kfha; /* KFHA_VERSION */ + u32 pkha; /* PKHA_VERSION */ + u32 aesa; /* AESA_VERSION */ + u32 mdha; /* MDHA_VERSION */ + u32 desa; /* DESA_VERSION */ + u32 snw8a; /* SNW8A_VERSION */ + u32 snw9a; /* SNW9A_VERSION */ + u32 zuce; /* ZUCE_VERSION */ + u32 zuca; /* ZUCA_VERSION */ + u32 ccha; /* CCHA_VERSION */ + u32 ptha; /* PTHA_VERSION */ + u32 rng; /* RNG_VERSION */ + u32 trng; /* TRNG_VERSION */ + u32 aaha; /* AAHA_VERSION */ + u32 rsvd[10]; + u32 sr; /* SR_VERSION */ + u32 dma; /* DMA_VERSION */ + u32 ai; /* AI_VERSION */ + u32 qi; /* QI_VERSION */ + u32 jr; /* JR_VERSION */ + u32 deco; /* DECO_VERSION */ +}; + +/* Version registers bitfields */ + +/* Number of CHAs instantiated */ +#define CHA_VER_NUM_MASK 0xffull +/* CHA Miscellaneous Information */ +#define CHA_VER_MISC_SHIFT 8 +#define CHA_VER_MISC_MASK (0xffull << CHA_VER_MISC_SHIFT) +/* CHA Revision Number */ +#define CHA_VER_REV_SHIFT 16 +#define CHA_VER_REV_MASK (0xffull << CHA_VER_REV_SHIFT) +/* CHA Version ID */ +#define CHA_VER_VID_SHIFT 24 +#define CHA_VER_VID_MASK (0xffull << CHA_VER_VID_SHIFT) + +/* CHA Miscellaneous Information - AESA_MISC specific */ +#define CHA_VER_MISC_AES_GCM BIT(1 + CHA_VER_MISC_SHIFT) + +/* CHA Miscellaneous Information - PKHA_MISC specific */ +#define CHA_VER_MISC_PKHA_NO_CRYPT BIT(7 + CHA_VER_MISC_SHIFT) + +/* + * caam_perfmon - Performance Monitor/Secure Memory Status/ + * CAAM Global Status/Component Version IDs + * + * Spans f00-fff wherever instantiated + */ + +/* Number of DECOs */ +#define CHA_NUM_MS_DECONUM_SHIFT 24 +#define CHA_NUM_MS_DECONUM_MASK (0xfull << CHA_NUM_MS_DECONUM_SHIFT) + +/* + * CHA version IDs / instantiation bitfields (< Era 10) + * Defined for use with the cha_id fields in perfmon, but the same shift/mask + * selectors can be used to pull out the number of instantiated blocks within + * cha_num fields in perfmon because the locations are the same. + */ +#define CHA_ID_LS_AES_SHIFT 0 +#define CHA_ID_LS_AES_MASK (0xfull << CHA_ID_LS_AES_SHIFT) + +#define CHA_ID_LS_DES_SHIFT 4 +#define CHA_ID_LS_DES_MASK (0xfull << CHA_ID_LS_DES_SHIFT) + +#define CHA_ID_LS_ARC4_SHIFT 8 +#define CHA_ID_LS_ARC4_MASK (0xfull << CHA_ID_LS_ARC4_SHIFT) + +#define CHA_ID_LS_MD_SHIFT 12 +#define CHA_ID_LS_MD_MASK (0xfull << CHA_ID_LS_MD_SHIFT) + +#define CHA_ID_LS_RNG_SHIFT 16 +#define CHA_ID_LS_RNG_MASK (0xfull << CHA_ID_LS_RNG_SHIFT) + +#define CHA_ID_LS_SNW8_SHIFT 20 +#define CHA_ID_LS_SNW8_MASK (0xfull << CHA_ID_LS_SNW8_SHIFT) + +#define CHA_ID_LS_KAS_SHIFT 24 +#define CHA_ID_LS_KAS_MASK (0xfull << CHA_ID_LS_KAS_SHIFT) + +#define CHA_ID_LS_PK_SHIFT 28 +#define CHA_ID_LS_PK_MASK (0xfull << CHA_ID_LS_PK_SHIFT) + +#define CHA_ID_MS_CRC_SHIFT 0 +#define CHA_ID_MS_CRC_MASK (0xfull << CHA_ID_MS_CRC_SHIFT) + +#define CHA_ID_MS_SNW9_SHIFT 4 +#define CHA_ID_MS_SNW9_MASK (0xfull << CHA_ID_MS_SNW9_SHIFT) + +#define CHA_ID_MS_DECO_SHIFT 24 +#define CHA_ID_MS_DECO_MASK (0xfull << CHA_ID_MS_DECO_SHIFT) + +#define CHA_ID_MS_JR_SHIFT 28 +#define CHA_ID_MS_JR_MASK (0xfull << CHA_ID_MS_JR_SHIFT) + +/* Specific CHA version IDs */ +#define CHA_VER_VID_AES_LP 0x3ull +#define CHA_VER_VID_AES_HP 0x4ull +#define CHA_VER_VID_MD_LP256 0x0ull +#define CHA_VER_VID_MD_LP512 0x1ull +#define CHA_VER_VID_MD_HP 0x2ull + +struct sec_vid { + u16 ip_id; + u8 maj_rev; + u8 min_rev; +}; + +struct caam_perfmon { + /* Performance Monitor Registers f00-f9f */ + u64 req_dequeued; /* PC_REQ_DEQ - Dequeued Requests */ + u64 ob_enc_req; /* PC_OB_ENC_REQ - Outbound Encrypt Requests */ + u64 ib_dec_req; /* PC_IB_DEC_REQ - Inbound Decrypt Requests */ + u64 ob_enc_bytes; /* PC_OB_ENCRYPT - Outbound Bytes Encrypted */ + u64 ob_prot_bytes; /* PC_OB_PROTECT - Outbound Bytes Protected */ + u64 ib_dec_bytes; /* PC_IB_DECRYPT - Inbound Bytes Decrypted */ + u64 ib_valid_bytes; /* PC_IB_VALIDATED Inbound Bytes Validated */ + u64 rsvd[13]; + + /* CAAM Hardware Instantiation Parameters fa0-fbf */ + u32 cha_rev_ms; /* CRNR - CHA Rev No. Most significant half*/ + u32 cha_rev_ls; /* CRNR - CHA Rev No. Least significant half*/ +#define CTPR_MS_QI_SHIFT 25 +#define CTPR_MS_QI_MASK (0x1ull << CTPR_MS_QI_SHIFT) +#define CTPR_MS_PS BIT(17) +#define CTPR_MS_DPAA2 BIT(13) +#define CTPR_MS_VIRT_EN_INCL 0x00000001 +#define CTPR_MS_VIRT_EN_POR 0x00000002 +#define CTPR_MS_PG_SZ_MASK 0x10 +#define CTPR_MS_PG_SZ_SHIFT 4 + u32 comp_parms_ms; /* CTPR - Compile Parameters Register */ + u32 comp_parms_ls; /* CTPR - Compile Parameters Register */ + u64 rsvd1[2]; + + /* CAAM Global Status fc0-fdf */ + u64 faultaddr; /* FAR - Fault Address */ + u32 faultliodn; /* FALR - Fault Address LIODN */ + u32 faultdetail; /* FADR - Fault Addr Detail */ + u32 rsvd2; +#define CSTA_PLEND BIT(10) +#define CSTA_ALT_PLEND BIT(18) + u32 status; /* CSTA - CAAM Status */ + u64 rsvd3; + + /* Component Instantiation Parameters fe0-fff */ + u32 rtic_id; /* RVID - RTIC Version ID */ +#define CCBVID_ERA_MASK 0xff000000 +#define CCBVID_ERA_SHIFT 24 + u32 ccb_id; /* CCBVID - CCB Version ID */ + u32 cha_id_ms; /* CHAVID - CHA Version ID Most Significant*/ + u32 cha_id_ls; /* CHAVID - CHA Version ID Least Significant*/ + u32 cha_num_ms; /* CHANUM - CHA Number Most Significant */ + u32 cha_num_ls; /* CHANUM - CHA Number Least Significant*/ +#define SECVID_MS_IPID_MASK 0xffff0000 +#define SECVID_MS_IPID_SHIFT 16 +#define SECVID_MS_MAJ_REV_MASK 0x0000ff00 +#define SECVID_MS_MAJ_REV_SHIFT 8 + u32 caam_id_ms; /* CAAMVID - CAAM Version ID MS */ + u32 caam_id_ls; /* CAAMVID - CAAM Version ID LS */ +}; + +/* LIODN programming for DMA configuration */ +#define MSTRID_LOCK_LIODN 0x80000000 +#define MSTRID_LOCK_MAKETRUSTED 0x00010000 /* only for JR masterid */ + +#define MSTRID_LIODN_MASK 0x0fff +struct masterid { + u32 liodn_ms; /* lock and make-trusted control bits */ + u32 liodn_ls; /* LIODN for non-sequence and seq access */ +}; + +/* Partition ID for DMA configuration */ +struct partid { + u32 rsvd1; + u32 pidr; /* partition ID, DECO */ +}; + +/* RNGB test mode (replicated twice in some configurations) */ +/* Padded out to 0x100 */ +struct rngtst { + u32 mode; /* RTSTMODEx - Test mode */ + u32 rsvd1[3]; + u32 reset; /* RTSTRESETx - Test reset control */ + u32 rsvd2[3]; + u32 status; /* RTSTSSTATUSx - Test status */ + u32 rsvd3; + u32 errstat; /* RTSTERRSTATx - Test error status */ + u32 rsvd4; + u32 errctl; /* RTSTERRCTLx - Test error control */ + u32 rsvd5; + u32 entropy; /* RTSTENTROPYx - Test entropy */ + u32 rsvd6[15]; + u32 verifctl; /* RTSTVERIFCTLx - Test verification control */ + u32 rsvd7; + u32 verifstat; /* RTSTVERIFSTATx - Test verification status */ + u32 rsvd8; + u32 verifdata; /* RTSTVERIFDx - Test verification data */ + u32 rsvd9; + u32 xkey; /* RTSTXKEYx - Test XKEY */ + u32 rsvd10; + u32 oscctctl; /* RTSTOSCCTCTLx - Test osc. counter control */ + u32 rsvd11; + u32 oscct; /* RTSTOSCCTx - Test oscillator counter */ + u32 rsvd12; + u32 oscctstat; /* RTSTODCCTSTATx - Test osc counter status */ + u32 rsvd13[2]; + u32 ofifo[4]; /* RTSTOFIFOx - Test output FIFO */ + u32 rsvd14[15]; +}; + +/* RNG4 TRNG test registers */ +struct rng4tst { +#define RTMCTL_ACC BIT(5) /* TRNG access mode */ +#define RTMCTL_PRGM BIT(16) /* 1 -> program mode, 0 -> run mode */ +#define RTMCTL_SAMP_MODE_VON_NEUMANN_ES_SC 0 /* use von Neumann data in + both entropy shifter and + statistical checker */ +#define RTMCTL_SAMP_MODE_RAW_ES_SC 1 /* use raw data in both + entropy shifter and + statistical checker */ +#define RTMCTL_SAMP_MODE_VON_NEUMANN_ES_RAW_SC 2 /* use von Neumann data in + entropy shifter, raw data + in statistical checker */ +#define RTMCTL_SAMP_MODE_INVALID 3 /* invalid combination */ + u32 rtmctl; /* misc. control register */ + u32 rtscmisc; /* statistical check misc. register */ + u32 rtpkrrng; /* poker range register */ + union { + u32 rtpkrmax; /* PRGM=1: poker max. limit register */ + u32 rtpkrsq; /* PRGM=0: poker square calc. result register */ + }; +#define RTSDCTL_ENT_DLY_SHIFT 16 +#define RTSDCTL_ENT_DLY_MASK (0xffff << RTSDCTL_ENT_DLY_SHIFT) +#define RTSDCTL_ENT_DLY_MIN 3200 +#define RTSDCTL_ENT_DLY_MAX 12800 + u32 rtsdctl; /* seed control register */ + union { + u32 rtsblim; /* PRGM=1: sparse bit limit register */ + u32 rttotsam; /* PRGM=0: total samples register */ + }; + u32 rtfrqmin; /* frequency count min. limit register */ +#define RTFRQMAX_DISABLE (1 << 20) + union { + u32 rtfrqmax; /* PRGM=1: freq. count max. limit register */ + u32 rtfrqcnt; /* PRGM=0: freq. count register */ + }; + u32 rsvd1[40]; +#define RDSTA_SKVT 0x80000000 +#define RDSTA_SKVN 0x40000000 +#define RDSTA_PR0 BIT(4) +#define RDSTA_PR1 BIT(5) +#define RDSTA_IF0 0x00000001 +#define RDSTA_IF1 0x00000002 +#define RDSTA_MASK (RDSTA_PR1 | RDSTA_PR0 | RDSTA_IF1 | RDSTA_IF0) + u32 rdsta; + u32 rsvd2[15]; +}; + +/* + * caam_ctrl - basic core configuration + * starts base + 0x0000 padded out to 0x1000 + */ + +#define KEK_KEY_SIZE 8 +#define TKEK_KEY_SIZE 8 +#define TDSK_KEY_SIZE 8 + +#define DECO_RESET 1 /* Use with DECO reset/availability regs */ +#define DECO_RESET_0 (DECO_RESET << 0) +#define DECO_RESET_1 (DECO_RESET << 1) +#define DECO_RESET_2 (DECO_RESET << 2) +#define DECO_RESET_3 (DECO_RESET << 3) +#define DECO_RESET_4 (DECO_RESET << 4) + +struct caam_ctrl { + /* Basic Configuration Section 000-01f */ + /* Read/Writable */ + u32 rsvd1; + u32 mcr; /* MCFG Master Config Register */ + u32 rsvd2; + u32 scfgr; /* SCFGR, Security Config Register */ + + /* Bus Access Configuration Section 010-11f */ + /* Read/Writable */ + struct masterid jr_mid[4]; /* JRxLIODNR - JobR LIODN setup */ + u32 rsvd3[11]; + u32 jrstart; /* JRSTART - Job Ring Start Register */ + struct masterid rtic_mid[4]; /* RTICxLIODNR - RTIC LIODN setup */ + u32 rsvd4[5]; + u32 deco_rsr; /* DECORSR - Deco Request Source */ + u32 rsvd11; + u32 deco_rq; /* DECORR - DECO Request */ + struct partid deco_mid[5]; /* DECOxLIODNR - 1 per DECO */ + u32 rsvd5[22]; + + /* DECO Availability/Reset Section 120-3ff */ + u32 deco_avail; /* DAR - DECO availability */ + u32 deco_reset; /* DRR - DECO reset */ + u32 rsvd6[182]; + + /* Key Encryption/Decryption Configuration 400-5ff */ + /* Read/Writable only while in Non-secure mode */ + u32 kek[KEK_KEY_SIZE]; /* JDKEKR - Key Encryption Key */ + u32 tkek[TKEK_KEY_SIZE]; /* TDKEKR - Trusted Desc KEK */ + u32 tdsk[TDSK_KEY_SIZE]; /* TDSKR - Trusted Desc Signing Key */ + u32 rsvd7[32]; + u64 sknonce; /* SKNR - Secure Key Nonce */ + u32 rsvd8[70]; + + /* RNG Test/Verification/Debug Access 600-7ff */ + /* (Useful in Test/Debug modes only...) */ + union { + struct rngtst rtst[2]; + struct rng4tst r4tst[2]; + }; + + u32 rsvd9[416]; + + /* Version registers - introduced with era 10 e80-eff */ + struct version_regs vreg; + /* Performance Monitor f00-fff */ + struct caam_perfmon perfmon; +}; + +/* + * Controller master config register defs + */ +#define MCFGR_SWRESET 0x80000000 /* software reset */ +#define MCFGR_WDENABLE 0x40000000 /* DECO watchdog enable */ +#define MCFGR_WDFAIL 0x20000000 /* DECO watchdog force-fail */ +#define MCFGR_DMA_RESET 0x10000000 +#define MCFGR_LONG_PTR 0x00010000 /* Use >32-bit desc addressing */ +#define SCFGR_RDBENABLE 0x00000400 +#define SCFGR_VIRT_EN 0x00008000 +#define DECORR_RQD0ENABLE 0x00000001 /* Enable DECO0 for direct access */ +#define DECORSR_JR0 0x00000001 /* JR to supply TZ, SDID, ICID */ +#define DECORSR_VALID 0x80000000 +#define DECORR_DEN0 0x00010000 /* DECO0 available for access*/ + +/* AXI read cache control */ +#define MCFGR_ARCACHE_SHIFT 12 +#define MCFGR_ARCACHE_MASK (0xf << MCFGR_ARCACHE_SHIFT) +#define MCFGR_ARCACHE_BUFF (0x1 << MCFGR_ARCACHE_SHIFT) +#define MCFGR_ARCACHE_CACH (0x2 << MCFGR_ARCACHE_SHIFT) +#define MCFGR_ARCACHE_RALL (0x4 << MCFGR_ARCACHE_SHIFT) + +/* AXI write cache control */ +#define MCFGR_AWCACHE_SHIFT 8 +#define MCFGR_AWCACHE_MASK (0xf << MCFGR_AWCACHE_SHIFT) +#define MCFGR_AWCACHE_BUFF (0x1 << MCFGR_AWCACHE_SHIFT) +#define MCFGR_AWCACHE_CACH (0x2 << MCFGR_AWCACHE_SHIFT) +#define MCFGR_AWCACHE_WALL (0x8 << MCFGR_AWCACHE_SHIFT) + +/* AXI pipeline depth */ +#define MCFGR_AXIPIPE_SHIFT 4 +#define MCFGR_AXIPIPE_MASK (0xf << MCFGR_AXIPIPE_SHIFT) + +#define MCFGR_AXIPRI 0x00000008 /* Assert AXI priority sideband */ +#define MCFGR_LARGE_BURST 0x00000004 /* 128/256-byte burst size */ +#define MCFGR_BURST_64 0x00000001 /* 64-byte burst size */ + +/* JRSTART register offsets */ +#define JRSTART_JR0_START 0x00000001 /* Start Job ring 0 */ +#define JRSTART_JR1_START 0x00000002 /* Start Job ring 1 */ +#define JRSTART_JR2_START 0x00000004 /* Start Job ring 2 */ +#define JRSTART_JR3_START 0x00000008 /* Start Job ring 3 */ + +/* + * caam_job_ring - direct job ring setup + * 1-4 possible per instantiation, base + 1000/2000/3000/4000 + * Padded out to 0x1000 + */ +struct caam_job_ring { + /* Input ring */ + u64 inpring_base; /* IRBAx - Input desc ring baseaddr */ + u32 rsvd1; + u32 inpring_size; /* IRSx - Input ring size */ + u32 rsvd2; + u32 inpring_avail; /* IRSAx - Input ring room remaining */ + u32 rsvd3; + u32 inpring_jobadd; /* IRJAx - Input ring jobs added */ + + /* Output Ring */ + u64 outring_base; /* ORBAx - Output status ring base addr */ + u32 rsvd4; + u32 outring_size; /* ORSx - Output ring size */ + u32 rsvd5; + u32 outring_rmvd; /* ORJRx - Output ring jobs removed */ + u32 rsvd6; + u32 outring_used; /* ORSFx - Output ring slots full */ + + /* Status/Configuration */ + u32 rsvd7; + u32 jroutstatus; /* JRSTAx - JobR output status */ + u32 rsvd8; + u32 jrintstatus; /* JRINTx - JobR interrupt status */ + u32 rconfig_hi; /* JRxCFG - Ring configuration */ + u32 rconfig_lo; + + /* Indices. CAAM maintains as "heads" of each queue */ + u32 rsvd9; + u32 inp_rdidx; /* IRRIx - Input ring read index */ + u32 rsvd10; + u32 out_wtidx; /* ORWIx - Output ring write index */ + + /* Command/control */ + u32 rsvd11; + u32 jrcommand; /* JRCRx - JobR command */ + + u32 rsvd12[900]; + + /* Version registers - introduced with era 10 e80-eff */ + struct version_regs vreg; + /* Performance Monitor f00-fff */ + struct caam_perfmon perfmon; +}; + +#define JR_RINGSIZE_MASK 0x03ff +/* + * jrstatus - Job Ring Output Status + * All values in lo word + * Also note, same values written out as status through QI + * in the command/status field of a frame descriptor + */ +#define JRSTA_SSRC_SHIFT 28 +#define JRSTA_SSRC_MASK 0xf0000000 + +#define JRSTA_SSRC_NONE 0x00000000 +#define JRSTA_SSRC_CCB_ERROR 0x20000000 +#define JRSTA_SSRC_JUMP_HALT_USER 0x30000000 +#define JRSTA_SSRC_DECO 0x40000000 +#define JRSTA_SSRC_QI 0x50000000 +#define JRSTA_SSRC_JRERROR 0x60000000 +#define JRSTA_SSRC_JUMP_HALT_CC 0x70000000 + +#define JRSTA_DECOERR_JUMP 0x08000000 +#define JRSTA_DECOERR_INDEX_SHIFT 8 +#define JRSTA_DECOERR_INDEX_MASK 0xff00 +#define JRSTA_DECOERR_ERROR_MASK 0x00ff + +#define JRSTA_DECOERR_NONE 0x00 +#define JRSTA_DECOERR_LINKLEN 0x01 +#define JRSTA_DECOERR_LINKPTR 0x02 +#define JRSTA_DECOERR_JRCTRL 0x03 +#define JRSTA_DECOERR_DESCCMD 0x04 +#define JRSTA_DECOERR_ORDER 0x05 +#define JRSTA_DECOERR_KEYCMD 0x06 +#define JRSTA_DECOERR_LOADCMD 0x07 +#define JRSTA_DECOERR_STORECMD 0x08 +#define JRSTA_DECOERR_OPCMD 0x09 +#define JRSTA_DECOERR_FIFOLDCMD 0x0a +#define JRSTA_DECOERR_FIFOSTCMD 0x0b +#define JRSTA_DECOERR_MOVECMD 0x0c +#define JRSTA_DECOERR_JUMPCMD 0x0d +#define JRSTA_DECOERR_MATHCMD 0x0e +#define JRSTA_DECOERR_SHASHCMD 0x0f +#define JRSTA_DECOERR_SEQCMD 0x10 +#define JRSTA_DECOERR_DECOINTERNAL 0x11 +#define JRSTA_DECOERR_SHDESCHDR 0x12 +#define JRSTA_DECOERR_HDRLEN 0x13 +#define JRSTA_DECOERR_BURSTER 0x14 +#define JRSTA_DECOERR_DESCSIGNATURE 0x15 +#define JRSTA_DECOERR_DMA 0x16 +#define JRSTA_DECOERR_BURSTFIFO 0x17 +#define JRSTA_DECOERR_JRRESET 0x1a +#define JRSTA_DECOERR_JOBFAIL 0x1b +#define JRSTA_DECOERR_DNRERR 0x80 +#define JRSTA_DECOERR_UNDEFPCL 0x81 +#define JRSTA_DECOERR_PDBERR 0x82 +#define JRSTA_DECOERR_ANRPLY_LATE 0x83 +#define JRSTA_DECOERR_ANRPLY_REPLAY 0x84 +#define JRSTA_DECOERR_SEQOVF 0x85 +#define JRSTA_DECOERR_INVSIGN 0x86 +#define JRSTA_DECOERR_DSASIGN 0x87 + +#define JRSTA_QIERR_ERROR_MASK 0x00ff + +#define JRSTA_CCBERR_JUMP 0x08000000 +#define JRSTA_CCBERR_INDEX_MASK 0xff00 +#define JRSTA_CCBERR_INDEX_SHIFT 8 +#define JRSTA_CCBERR_CHAID_MASK 0x00f0 +#define JRSTA_CCBERR_CHAID_SHIFT 4 +#define JRSTA_CCBERR_ERRID_MASK 0x000f + +#define JRSTA_CCBERR_CHAID_AES (0x01 << JRSTA_CCBERR_CHAID_SHIFT) +#define JRSTA_CCBERR_CHAID_DES (0x02 << JRSTA_CCBERR_CHAID_SHIFT) +#define JRSTA_CCBERR_CHAID_ARC4 (0x03 << JRSTA_CCBERR_CHAID_SHIFT) +#define JRSTA_CCBERR_CHAID_MD (0x04 << JRSTA_CCBERR_CHAID_SHIFT) +#define JRSTA_CCBERR_CHAID_RNG (0x05 << JRSTA_CCBERR_CHAID_SHIFT) +#define JRSTA_CCBERR_CHAID_SNOW (0x06 << JRSTA_CCBERR_CHAID_SHIFT) +#define JRSTA_CCBERR_CHAID_KASUMI (0x07 << JRSTA_CCBERR_CHAID_SHIFT) +#define JRSTA_CCBERR_CHAID_PK (0x08 << JRSTA_CCBERR_CHAID_SHIFT) +#define JRSTA_CCBERR_CHAID_CRC (0x09 << JRSTA_CCBERR_CHAID_SHIFT) + +#define JRSTA_CCBERR_ERRID_NONE 0x00 +#define JRSTA_CCBERR_ERRID_MODE 0x01 +#define JRSTA_CCBERR_ERRID_DATASIZ 0x02 +#define JRSTA_CCBERR_ERRID_KEYSIZ 0x03 +#define JRSTA_CCBERR_ERRID_PKAMEMSZ 0x04 +#define JRSTA_CCBERR_ERRID_PKBMEMSZ 0x05 +#define JRSTA_CCBERR_ERRID_SEQUENCE 0x06 +#define JRSTA_CCBERR_ERRID_PKDIVZRO 0x07 +#define JRSTA_CCBERR_ERRID_PKMODEVN 0x08 +#define JRSTA_CCBERR_ERRID_KEYPARIT 0x09 +#define JRSTA_CCBERR_ERRID_ICVCHK 0x0a +#define JRSTA_CCBERR_ERRID_HARDWARE 0x0b +#define JRSTA_CCBERR_ERRID_CCMAAD 0x0c +#define JRSTA_CCBERR_ERRID_INVCHA 0x0f + +#define JRINT_ERR_INDEX_MASK 0x3fff0000 +#define JRINT_ERR_INDEX_SHIFT 16 +#define JRINT_ERR_TYPE_MASK 0xf00 +#define JRINT_ERR_TYPE_SHIFT 8 +#define JRINT_ERR_HALT_MASK 0xc +#define JRINT_ERR_HALT_SHIFT 2 +#define JRINT_ERR_HALT_INPROGRESS 0x4 +#define JRINT_ERR_HALT_COMPLETE 0x8 +#define JRINT_JR_ERROR 0x02 +#define JRINT_JR_INT 0x01 + +#define JRINT_ERR_TYPE_WRITE 1 +#define JRINT_ERR_TYPE_BAD_INPADDR 3 +#define JRINT_ERR_TYPE_BAD_OUTADDR 4 +#define JRINT_ERR_TYPE_INV_INPWRT 5 +#define JRINT_ERR_TYPE_INV_OUTWRT 6 +#define JRINT_ERR_TYPE_RESET 7 +#define JRINT_ERR_TYPE_REMOVE_OFL 8 +#define JRINT_ERR_TYPE_ADD_OFL 9 + +#define JRCFG_SOE 0x04 +#define JRCFG_ICEN 0x02 +#define JRCFG_IMSK 0x01 +#define JRCFG_ICDCT_SHIFT 8 +#define JRCFG_ICTT_SHIFT 16 + +#define JRCR_RESET 0x01 + +/* + * caam_assurance - Assurance Controller View + * base + 0x6000 padded out to 0x1000 + */ + +struct rtic_element { + u64 address; + u32 rsvd; + u32 length; +}; + +struct rtic_block { + struct rtic_element element[2]; +}; + +struct rtic_memhash { + u32 memhash_be[32]; + u32 memhash_le[32]; +}; + +struct caam_assurance { + /* Status/Command/Watchdog */ + u32 rsvd1; + u32 status; /* RSTA - Status */ + u32 rsvd2; + u32 cmd; /* RCMD - Command */ + u32 rsvd3; + u32 ctrl; /* RCTL - Control */ + u32 rsvd4; + u32 throttle; /* RTHR - Throttle */ + u32 rsvd5[2]; + u64 watchdog; /* RWDOG - Watchdog Timer */ + u32 rsvd6; + u32 rend; /* REND - Endian corrections */ + u32 rsvd7[50]; + + /* Block access/configuration @ 100/110/120/130 */ + struct rtic_block memblk[4]; /* Memory Blocks A-D */ + u32 rsvd8[32]; + + /* Block hashes @ 200/300/400/500 */ + struct rtic_memhash hash[4]; /* Block hash values A-D */ + u32 rsvd_3[640]; +}; + +/* + * caam_queue_if - QI configuration and control + * starts base + 0x7000, padded out to 0x1000 long + */ + +struct caam_queue_if { + u32 qi_control_hi; /* QICTL - QI Control */ + u32 qi_control_lo; + u32 rsvd1; + u32 qi_status; /* QISTA - QI Status */ + u32 qi_deq_cfg_hi; /* QIDQC - QI Dequeue Configuration */ + u32 qi_deq_cfg_lo; + u32 qi_enq_cfg_hi; /* QISEQC - QI Enqueue Command */ + u32 qi_enq_cfg_lo; + u32 rsvd2[1016]; +}; + +/* QI control bits - low word */ +#define QICTL_DQEN 0x01 /* Enable frame pop */ +#define QICTL_STOP 0x02 /* Stop dequeue/enqueue */ +#define QICTL_SOE 0x04 /* Stop on error */ + +/* QI control bits - high word */ +#define QICTL_MBSI 0x01 +#define QICTL_MHWSI 0x02 +#define QICTL_MWSI 0x04 +#define QICTL_MDWSI 0x08 +#define QICTL_CBSI 0x10 /* CtrlDataByteSwapInput */ +#define QICTL_CHWSI 0x20 /* CtrlDataHalfSwapInput */ +#define QICTL_CWSI 0x40 /* CtrlDataWordSwapInput */ +#define QICTL_CDWSI 0x80 /* CtrlDataDWordSwapInput */ +#define QICTL_MBSO 0x0100 +#define QICTL_MHWSO 0x0200 +#define QICTL_MWSO 0x0400 +#define QICTL_MDWSO 0x0800 +#define QICTL_CBSO 0x1000 /* CtrlDataByteSwapOutput */ +#define QICTL_CHWSO 0x2000 /* CtrlDataHalfSwapOutput */ +#define QICTL_CWSO 0x4000 /* CtrlDataWordSwapOutput */ +#define QICTL_CDWSO 0x8000 /* CtrlDataDWordSwapOutput */ +#define QICTL_DMBS 0x010000 +#define QICTL_EPO 0x020000 + +/* QI status bits */ +#define QISTA_PHRDERR 0x01 /* PreHeader Read Error */ +#define QISTA_CFRDERR 0x02 /* Compound Frame Read Error */ +#define QISTA_OFWRERR 0x04 /* Output Frame Read Error */ +#define QISTA_BPDERR 0x08 /* Buffer Pool Depleted */ +#define QISTA_BTSERR 0x10 /* Buffer Undersize */ +#define QISTA_CFWRERR 0x20 /* Compound Frame Write Err */ +#define QISTA_STOPD 0x80000000 /* QI Stopped (see QICTL) */ + +/* deco_sg_table - DECO view of scatter/gather table */ +struct deco_sg_table { + u64 addr; /* Segment Address */ + u32 elen; /* E, F bits + 30-bit length */ + u32 bpid_offset; /* Buffer Pool ID + 16-bit length */ +}; + +/* + * caam_deco - descriptor controller - CHA cluster block + * + * Only accessible when direct DECO access is turned on + * (done in DECORR, via MID programmed in DECOxMID + * + * 5 typical, base + 0x8000/9000/a000/b000 + * Padded out to 0x1000 long + */ +struct caam_deco { + u32 rsvd1; + u32 cls1_mode; /* CxC1MR - Class 1 Mode */ + u32 rsvd2; + u32 cls1_keysize; /* CxC1KSR - Class 1 Key Size */ + u32 cls1_datasize_hi; /* CxC1DSR - Class 1 Data Size */ + u32 cls1_datasize_lo; + u32 rsvd3; + u32 cls1_icvsize; /* CxC1ICVSR - Class 1 ICV size */ + u32 rsvd4[5]; + u32 cha_ctrl; /* CCTLR - CHA control */ + u32 rsvd5; + u32 irq_crtl; /* CxCIRQ - CCB interrupt done/error/clear */ + u32 rsvd6; + u32 clr_written; /* CxCWR - Clear-Written */ + u32 ccb_status_hi; /* CxCSTA - CCB Status/Error */ + u32 ccb_status_lo; + u32 rsvd7[3]; + u32 aad_size; /* CxAADSZR - Current AAD Size */ + u32 rsvd8; + u32 cls1_iv_size; /* CxC1IVSZR - Current Class 1 IV Size */ + u32 rsvd9[7]; + u32 pkha_a_size; /* PKASZRx - Size of PKHA A */ + u32 rsvd10; + u32 pkha_b_size; /* PKBSZRx - Size of PKHA B */ + u32 rsvd11; + u32 pkha_n_size; /* PKNSZRx - Size of PKHA N */ + u32 rsvd12; + u32 pkha_e_size; /* PKESZRx - Size of PKHA E */ + u32 rsvd13[24]; + u32 cls1_ctx[16]; /* CxC1CTXR - Class 1 Context @100 */ + u32 rsvd14[48]; + u32 cls1_key[8]; /* CxC1KEYR - Class 1 Key @200 */ + u32 rsvd15[121]; + u32 cls2_mode; /* CxC2MR - Class 2 Mode */ + u32 rsvd16; + u32 cls2_keysize; /* CxX2KSR - Class 2 Key Size */ + u32 cls2_datasize_hi; /* CxC2DSR - Class 2 Data Size */ + u32 cls2_datasize_lo; + u32 rsvd17; + u32 cls2_icvsize; /* CxC2ICVSZR - Class 2 ICV Size */ + u32 rsvd18[56]; + u32 cls2_ctx[18]; /* CxC2CTXR - Class 2 Context @500 */ + u32 rsvd19[46]; + u32 cls2_key[32]; /* CxC2KEYR - Class2 Key @600 */ + u32 rsvd20[84]; + u32 inp_infofifo_hi; /* CxIFIFO - Input Info FIFO @7d0 */ + u32 inp_infofifo_lo; + u32 rsvd21[2]; + u64 inp_datafifo; /* CxDFIFO - Input Data FIFO */ + u32 rsvd22[2]; + u64 out_datafifo; /* CxOFIFO - Output Data FIFO */ + u32 rsvd23[2]; + u32 jr_ctl_hi; /* CxJRR - JobR Control Register @800 */ + u32 jr_ctl_lo; + u64 jr_descaddr; /* CxDADR - JobR Descriptor Address */ +#define DECO_OP_STATUS_HI_ERR_MASK 0xF00000FF + u32 op_status_hi; /* DxOPSTA - DECO Operation Status */ + u32 op_status_lo; + u32 rsvd24[2]; + u32 liodn; /* DxLSR - DECO LIODN Status - non-seq */ + u32 td_liodn; /* DxLSR - DECO LIODN Status - trustdesc */ + u32 rsvd26[6]; + u64 math[4]; /* DxMTH - Math register */ + u32 rsvd27[8]; + struct deco_sg_table gthr_tbl[4]; /* DxGTR - Gather Tables */ + u32 rsvd28[16]; + struct deco_sg_table sctr_tbl[4]; /* DxSTR - Scatter Tables */ + u32 rsvd29[48]; + u32 descbuf[64]; /* DxDESB - Descriptor buffer */ + u32 rscvd30[193]; +#define DESC_DBG_DECO_STAT_VALID 0x80000000 +#define DESC_DBG_DECO_STAT_MASK 0x00F00000 +#define DESC_DBG_DECO_STAT_SHIFT 20 + u32 desc_dbg; /* DxDDR - DECO Debug Register */ + u32 rsvd31[13]; +#define DESC_DER_DECO_STAT_MASK 0x000F0000 +#define DESC_DER_DECO_STAT_SHIFT 16 + u32 dbg_exec; /* DxDER - DECO Debug Exec Register */ + u32 rsvd32[112]; +}; + +#define DECO_STAT_HOST_ERR 0xD + +#define DECO_JQCR_WHL 0x20000000 +#define DECO_JQCR_FOUR 0x10000000 + +#define JR_BLOCK_NUMBER 1 +#define ASSURE_BLOCK_NUMBER 6 +#define QI_BLOCK_NUMBER 7 +#define DECO_BLOCK_NUMBER 8 +#define PG_SIZE_4K 0x1000 +#define PG_SIZE_64K 0x10000 +#endif /* REGS_H */ diff --git a/drivers/crypto/caam/sg_sw_qm.h b/drivers/crypto/caam/sg_sw_qm.h new file mode 100644 index 000000000..d56cc7efb --- /dev/null +++ b/drivers/crypto/caam/sg_sw_qm.h @@ -0,0 +1,85 @@ +/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */ +/* + * Copyright 2013-2016 Freescale Semiconductor, Inc. + * Copyright 2016-2017 NXP + */ + +#ifndef __SG_SW_QM_H +#define __SG_SW_QM_H + +#include <soc/fsl/qman.h> +#include "regs.h" + +static inline void __dma_to_qm_sg(struct qm_sg_entry *qm_sg_ptr, dma_addr_t dma, + u16 offset) +{ + qm_sg_entry_set64(qm_sg_ptr, dma); + qm_sg_ptr->__reserved2 = 0; + qm_sg_ptr->bpid = 0; + qm_sg_ptr->offset = cpu_to_be16(offset & QM_SG_OFF_MASK); +} + +static inline void dma_to_qm_sg_one(struct qm_sg_entry *qm_sg_ptr, + dma_addr_t dma, u32 len, u16 offset) +{ + __dma_to_qm_sg(qm_sg_ptr, dma, offset); + qm_sg_entry_set_len(qm_sg_ptr, len); +} + +static inline void dma_to_qm_sg_one_last(struct qm_sg_entry *qm_sg_ptr, + dma_addr_t dma, u32 len, u16 offset) +{ + __dma_to_qm_sg(qm_sg_ptr, dma, offset); + qm_sg_entry_set_f(qm_sg_ptr, len); +} + +static inline void dma_to_qm_sg_one_ext(struct qm_sg_entry *qm_sg_ptr, + dma_addr_t dma, u32 len, u16 offset) +{ + __dma_to_qm_sg(qm_sg_ptr, dma, offset); + qm_sg_ptr->cfg = cpu_to_be32(QM_SG_EXT | (len & QM_SG_LEN_MASK)); +} + +static inline void dma_to_qm_sg_one_last_ext(struct qm_sg_entry *qm_sg_ptr, + dma_addr_t dma, u32 len, + u16 offset) +{ + __dma_to_qm_sg(qm_sg_ptr, dma, offset); + qm_sg_ptr->cfg = cpu_to_be32(QM_SG_EXT | QM_SG_FIN | + (len & QM_SG_LEN_MASK)); +} + +/* + * convert scatterlist to h/w link table format + * but does not have final bit; instead, returns last entry + */ +static inline struct qm_sg_entry * +sg_to_qm_sg(struct scatterlist *sg, int len, + struct qm_sg_entry *qm_sg_ptr, u16 offset) +{ + int ent_len; + + while (len) { + ent_len = min_t(int, sg_dma_len(sg), len); + + dma_to_qm_sg_one(qm_sg_ptr, sg_dma_address(sg), ent_len, + offset); + qm_sg_ptr++; + sg = sg_next(sg); + len -= ent_len; + } + return qm_sg_ptr - 1; +} + +/* + * convert scatterlist to h/w link table format + * scatterlist must have been previously dma mapped + */ +static inline void sg_to_qm_sg_last(struct scatterlist *sg, int len, + struct qm_sg_entry *qm_sg_ptr, u16 offset) +{ + qm_sg_ptr = sg_to_qm_sg(sg, len, qm_sg_ptr, offset); + qm_sg_entry_set_f(qm_sg_ptr, qm_sg_entry_get_len(qm_sg_ptr)); +} + +#endif /* __SG_SW_QM_H */ diff --git a/drivers/crypto/caam/sg_sw_qm2.h b/drivers/crypto/caam/sg_sw_qm2.h new file mode 100644 index 000000000..b8b737d2b --- /dev/null +++ b/drivers/crypto/caam/sg_sw_qm2.h @@ -0,0 +1,57 @@ +/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */ +/* + * Copyright 2015-2016 Freescale Semiconductor, Inc. + * Copyright 2017 NXP + */ + +#ifndef _SG_SW_QM2_H_ +#define _SG_SW_QM2_H_ + +#include <soc/fsl/dpaa2-fd.h> + +static inline void dma_to_qm_sg_one(struct dpaa2_sg_entry *qm_sg_ptr, + dma_addr_t dma, u32 len, u16 offset) +{ + dpaa2_sg_set_addr(qm_sg_ptr, dma); + dpaa2_sg_set_format(qm_sg_ptr, dpaa2_sg_single); + dpaa2_sg_set_final(qm_sg_ptr, false); + dpaa2_sg_set_len(qm_sg_ptr, len); + dpaa2_sg_set_bpid(qm_sg_ptr, 0); + dpaa2_sg_set_offset(qm_sg_ptr, offset); +} + +/* + * convert scatterlist to h/w link table format + * but does not have final bit; instead, returns last entry + */ +static inline struct dpaa2_sg_entry * +sg_to_qm_sg(struct scatterlist *sg, int len, + struct dpaa2_sg_entry *qm_sg_ptr, u16 offset) +{ + int ent_len; + + while (len) { + ent_len = min_t(int, sg_dma_len(sg), len); + + dma_to_qm_sg_one(qm_sg_ptr, sg_dma_address(sg), ent_len, + offset); + qm_sg_ptr++; + sg = sg_next(sg); + len -= ent_len; + } + return qm_sg_ptr - 1; +} + +/* + * convert scatterlist to h/w link table format + * scatterlist must have been previously dma mapped + */ +static inline void sg_to_qm_sg_last(struct scatterlist *sg, int len, + struct dpaa2_sg_entry *qm_sg_ptr, + u16 offset) +{ + qm_sg_ptr = sg_to_qm_sg(sg, len, qm_sg_ptr, offset); + dpaa2_sg_set_final(qm_sg_ptr, true); +} + +#endif /* _SG_SW_QM2_H_ */ diff --git a/drivers/crypto/caam/sg_sw_sec4.h b/drivers/crypto/caam/sg_sw_sec4.h new file mode 100644 index 000000000..07e1ee992 --- /dev/null +++ b/drivers/crypto/caam/sg_sw_sec4.h @@ -0,0 +1,85 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * CAAM/SEC 4.x functions for using scatterlists in caam driver + * + * Copyright 2008-2011 Freescale Semiconductor, Inc. + * + */ + +#ifndef _SG_SW_SEC4_H_ +#define _SG_SW_SEC4_H_ + +#include "ctrl.h" +#include "regs.h" +#include "sg_sw_qm2.h" +#include <soc/fsl/dpaa2-fd.h> + +struct sec4_sg_entry { + u64 ptr; + u32 len; + u32 bpid_offset; +}; + +/* + * convert single dma address to h/w link table format + */ +static inline void dma_to_sec4_sg_one(struct sec4_sg_entry *sec4_sg_ptr, + dma_addr_t dma, u32 len, u16 offset) +{ + if (caam_dpaa2) { + dma_to_qm_sg_one((struct dpaa2_sg_entry *)sec4_sg_ptr, dma, len, + offset); + } else { + sec4_sg_ptr->ptr = cpu_to_caam_dma64(dma); + sec4_sg_ptr->len = cpu_to_caam32(len); + sec4_sg_ptr->bpid_offset = cpu_to_caam32(offset & + SEC4_SG_OFFSET_MASK); + } + + print_hex_dump_debug("sec4_sg_ptr@: ", DUMP_PREFIX_ADDRESS, 16, 4, + sec4_sg_ptr, sizeof(struct sec4_sg_entry), 1); +} + +/* + * convert scatterlist to h/w link table format + * but does not have final bit; instead, returns last entry + */ +static inline struct sec4_sg_entry * +sg_to_sec4_sg(struct scatterlist *sg, int len, + struct sec4_sg_entry *sec4_sg_ptr, u16 offset) +{ + int ent_len; + + while (len) { + ent_len = min_t(int, sg_dma_len(sg), len); + + dma_to_sec4_sg_one(sec4_sg_ptr, sg_dma_address(sg), ent_len, + offset); + sec4_sg_ptr++; + sg = sg_next(sg); + len -= ent_len; + } + return sec4_sg_ptr - 1; +} + +static inline void sg_to_sec4_set_last(struct sec4_sg_entry *sec4_sg_ptr) +{ + if (caam_dpaa2) + dpaa2_sg_set_final((struct dpaa2_sg_entry *)sec4_sg_ptr, true); + else + sec4_sg_ptr->len |= cpu_to_caam32(SEC4_SG_LEN_FIN); +} + +/* + * convert scatterlist to h/w link table format + * scatterlist must have been previously dma mapped + */ +static inline void sg_to_sec4_sg_last(struct scatterlist *sg, int len, + struct sec4_sg_entry *sec4_sg_ptr, + u16 offset) +{ + sec4_sg_ptr = sg_to_sec4_sg(sg, len, sec4_sg_ptr, offset); + sg_to_sec4_set_last(sec4_sg_ptr); +} + +#endif /* _SG_SW_SEC4_H_ */ |