summaryrefslogtreecommitdiffstats
path: root/drivers/crypto/chelsio
diff options
context:
space:
mode:
Diffstat (limited to '')
-rw-r--r--drivers/crypto/chelsio/Kconfig23
-rw-r--r--drivers/crypto/chelsio/Makefile5
-rw-r--r--drivers/crypto/chelsio/chcr_algo.c4531
-rw-r--r--drivers/crypto/chelsio/chcr_algo.h405
-rw-r--r--drivers/crypto/chelsio/chcr_core.c311
-rw-r--r--drivers/crypto/chelsio/chcr_core.h139
-rw-r--r--drivers/crypto/chelsio/chcr_crypto.h353
7 files changed, 5767 insertions, 0 deletions
diff --git a/drivers/crypto/chelsio/Kconfig b/drivers/crypto/chelsio/Kconfig
new file mode 100644
index 000000000..f886401af
--- /dev/null
+++ b/drivers/crypto/chelsio/Kconfig
@@ -0,0 +1,23 @@
+# SPDX-License-Identifier: GPL-2.0-only
+config CRYPTO_DEV_CHELSIO
+ tristate "Chelsio Crypto Co-processor Driver"
+ depends on CHELSIO_T4
+ select CRYPTO_LIB_AES
+ select CRYPTO_SHA1
+ select CRYPTO_SHA256
+ select CRYPTO_SHA512
+ select CRYPTO_AUTHENC
+ select CRYPTO_GF128MUL
+ help
+ The Chelsio Crypto Co-processor driver for T6 adapters.
+
+ For general information about Chelsio and our products, visit
+ our website at <http://www.chelsio.com>.
+
+ For customer support, please visit our customer support page at
+ <http://www.chelsio.com/support.html>.
+
+ Please send feedback to <linux-bugs@chelsio.com>.
+
+ To compile this driver as a module, choose M here: the module
+ will be called chcr.
diff --git a/drivers/crypto/chelsio/Makefile b/drivers/crypto/chelsio/Makefile
new file mode 100644
index 000000000..2e5df484a
--- /dev/null
+++ b/drivers/crypto/chelsio/Makefile
@@ -0,0 +1,5 @@
+# SPDX-License-Identifier: GPL-2.0-only
+ccflags-y := -I $(srctree)/drivers/net/ethernet/chelsio/cxgb4
+
+obj-$(CONFIG_CRYPTO_DEV_CHELSIO) += chcr.o
+chcr-objs := chcr_core.o chcr_algo.o
diff --git a/drivers/crypto/chelsio/chcr_algo.c b/drivers/crypto/chelsio/chcr_algo.c
new file mode 100644
index 000000000..6933546f8
--- /dev/null
+++ b/drivers/crypto/chelsio/chcr_algo.c
@@ -0,0 +1,4531 @@
+/*
+ * This file is part of the Chelsio T6 Crypto driver for Linux.
+ *
+ * Copyright (c) 2003-2016 Chelsio Communications, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Written and Maintained by:
+ * Manoj Malviya (manojmalviya@chelsio.com)
+ * Atul Gupta (atul.gupta@chelsio.com)
+ * Jitendra Lulla (jlulla@chelsio.com)
+ * Yeshaswi M R Gowda (yeshaswi@chelsio.com)
+ * Harsh Jain (harsh@chelsio.com)
+ */
+
+#define pr_fmt(fmt) "chcr:" fmt
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/crypto.h>
+#include <linux/skbuff.h>
+#include <linux/rtnetlink.h>
+#include <linux/highmem.h>
+#include <linux/scatterlist.h>
+
+#include <crypto/aes.h>
+#include <crypto/algapi.h>
+#include <crypto/hash.h>
+#include <crypto/gcm.h>
+#include <crypto/sha1.h>
+#include <crypto/sha2.h>
+#include <crypto/authenc.h>
+#include <crypto/ctr.h>
+#include <crypto/gf128mul.h>
+#include <crypto/internal/aead.h>
+#include <crypto/null.h>
+#include <crypto/internal/skcipher.h>
+#include <crypto/aead.h>
+#include <crypto/scatterwalk.h>
+#include <crypto/internal/hash.h>
+
+#include "t4fw_api.h"
+#include "t4_msg.h"
+#include "chcr_core.h"
+#include "chcr_algo.h"
+#include "chcr_crypto.h"
+
+#define IV AES_BLOCK_SIZE
+
+static unsigned int sgl_ent_len[] = {
+ 0, 0, 16, 24, 40, 48, 64, 72, 88,
+ 96, 112, 120, 136, 144, 160, 168, 184,
+ 192, 208, 216, 232, 240, 256, 264, 280,
+ 288, 304, 312, 328, 336, 352, 360, 376
+};
+
+static unsigned int dsgl_ent_len[] = {
+ 0, 32, 32, 48, 48, 64, 64, 80, 80,
+ 112, 112, 128, 128, 144, 144, 160, 160,
+ 192, 192, 208, 208, 224, 224, 240, 240,
+ 272, 272, 288, 288, 304, 304, 320, 320
+};
+
+static u32 round_constant[11] = {
+ 0x01000000, 0x02000000, 0x04000000, 0x08000000,
+ 0x10000000, 0x20000000, 0x40000000, 0x80000000,
+ 0x1B000000, 0x36000000, 0x6C000000
+};
+
+static int chcr_handle_cipher_resp(struct skcipher_request *req,
+ unsigned char *input, int err);
+
+static inline struct chcr_aead_ctx *AEAD_CTX(struct chcr_context *ctx)
+{
+ return ctx->crypto_ctx->aeadctx;
+}
+
+static inline struct ablk_ctx *ABLK_CTX(struct chcr_context *ctx)
+{
+ return ctx->crypto_ctx->ablkctx;
+}
+
+static inline struct hmac_ctx *HMAC_CTX(struct chcr_context *ctx)
+{
+ return ctx->crypto_ctx->hmacctx;
+}
+
+static inline struct chcr_gcm_ctx *GCM_CTX(struct chcr_aead_ctx *gctx)
+{
+ return gctx->ctx->gcm;
+}
+
+static inline struct chcr_authenc_ctx *AUTHENC_CTX(struct chcr_aead_ctx *gctx)
+{
+ return gctx->ctx->authenc;
+}
+
+static inline struct uld_ctx *ULD_CTX(struct chcr_context *ctx)
+{
+ return container_of(ctx->dev, struct uld_ctx, dev);
+}
+
+static inline void chcr_init_hctx_per_wr(struct chcr_ahash_req_ctx *reqctx)
+{
+ memset(&reqctx->hctx_wr, 0, sizeof(struct chcr_hctx_per_wr));
+}
+
+static int sg_nents_xlen(struct scatterlist *sg, unsigned int reqlen,
+ unsigned int entlen,
+ unsigned int skip)
+{
+ int nents = 0;
+ unsigned int less;
+ unsigned int skip_len = 0;
+
+ while (sg && skip) {
+ if (sg_dma_len(sg) <= skip) {
+ skip -= sg_dma_len(sg);
+ skip_len = 0;
+ sg = sg_next(sg);
+ } else {
+ skip_len = skip;
+ skip = 0;
+ }
+ }
+
+ while (sg && reqlen) {
+ less = min(reqlen, sg_dma_len(sg) - skip_len);
+ nents += DIV_ROUND_UP(less, entlen);
+ reqlen -= less;
+ skip_len = 0;
+ sg = sg_next(sg);
+ }
+ return nents;
+}
+
+static inline int get_aead_subtype(struct crypto_aead *aead)
+{
+ struct aead_alg *alg = crypto_aead_alg(aead);
+ struct chcr_alg_template *chcr_crypto_alg =
+ container_of(alg, struct chcr_alg_template, alg.aead);
+ return chcr_crypto_alg->type & CRYPTO_ALG_SUB_TYPE_MASK;
+}
+
+void chcr_verify_tag(struct aead_request *req, u8 *input, int *err)
+{
+ u8 temp[SHA512_DIGEST_SIZE];
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ int authsize = crypto_aead_authsize(tfm);
+ struct cpl_fw6_pld *fw6_pld;
+ int cmp = 0;
+
+ fw6_pld = (struct cpl_fw6_pld *)input;
+ if ((get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106) ||
+ (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_GCM)) {
+ cmp = crypto_memneq(&fw6_pld->data[2], (fw6_pld + 1), authsize);
+ } else {
+
+ sg_pcopy_to_buffer(req->src, sg_nents(req->src), temp,
+ authsize, req->assoclen +
+ req->cryptlen - authsize);
+ cmp = crypto_memneq(temp, (fw6_pld + 1), authsize);
+ }
+ if (cmp)
+ *err = -EBADMSG;
+ else
+ *err = 0;
+}
+
+static int chcr_inc_wrcount(struct chcr_dev *dev)
+{
+ if (dev->state == CHCR_DETACH)
+ return 1;
+ atomic_inc(&dev->inflight);
+ return 0;
+}
+
+static inline void chcr_dec_wrcount(struct chcr_dev *dev)
+{
+ atomic_dec(&dev->inflight);
+}
+
+static inline int chcr_handle_aead_resp(struct aead_request *req,
+ unsigned char *input,
+ int err)
+{
+ struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ struct chcr_dev *dev = a_ctx(tfm)->dev;
+
+ chcr_aead_common_exit(req);
+ if (reqctx->verify == VERIFY_SW) {
+ chcr_verify_tag(req, input, &err);
+ reqctx->verify = VERIFY_HW;
+ }
+ chcr_dec_wrcount(dev);
+ req->base.complete(&req->base, err);
+
+ return err;
+}
+
+static void get_aes_decrypt_key(unsigned char *dec_key,
+ const unsigned char *key,
+ unsigned int keylength)
+{
+ u32 temp;
+ u32 w_ring[MAX_NK];
+ int i, j, k;
+ u8 nr, nk;
+
+ switch (keylength) {
+ case AES_KEYLENGTH_128BIT:
+ nk = KEYLENGTH_4BYTES;
+ nr = NUMBER_OF_ROUNDS_10;
+ break;
+ case AES_KEYLENGTH_192BIT:
+ nk = KEYLENGTH_6BYTES;
+ nr = NUMBER_OF_ROUNDS_12;
+ break;
+ case AES_KEYLENGTH_256BIT:
+ nk = KEYLENGTH_8BYTES;
+ nr = NUMBER_OF_ROUNDS_14;
+ break;
+ default:
+ return;
+ }
+ for (i = 0; i < nk; i++)
+ w_ring[i] = get_unaligned_be32(&key[i * 4]);
+
+ i = 0;
+ temp = w_ring[nk - 1];
+ while (i + nk < (nr + 1) * 4) {
+ if (!(i % nk)) {
+ /* RotWord(temp) */
+ temp = (temp << 8) | (temp >> 24);
+ temp = aes_ks_subword(temp);
+ temp ^= round_constant[i / nk];
+ } else if (nk == 8 && (i % 4 == 0)) {
+ temp = aes_ks_subword(temp);
+ }
+ w_ring[i % nk] ^= temp;
+ temp = w_ring[i % nk];
+ i++;
+ }
+ i--;
+ for (k = 0, j = i % nk; k < nk; k++) {
+ put_unaligned_be32(w_ring[j], &dec_key[k * 4]);
+ j--;
+ if (j < 0)
+ j += nk;
+ }
+}
+
+static struct crypto_shash *chcr_alloc_shash(unsigned int ds)
+{
+ struct crypto_shash *base_hash = ERR_PTR(-EINVAL);
+
+ switch (ds) {
+ case SHA1_DIGEST_SIZE:
+ base_hash = crypto_alloc_shash("sha1", 0, 0);
+ break;
+ case SHA224_DIGEST_SIZE:
+ base_hash = crypto_alloc_shash("sha224", 0, 0);
+ break;
+ case SHA256_DIGEST_SIZE:
+ base_hash = crypto_alloc_shash("sha256", 0, 0);
+ break;
+ case SHA384_DIGEST_SIZE:
+ base_hash = crypto_alloc_shash("sha384", 0, 0);
+ break;
+ case SHA512_DIGEST_SIZE:
+ base_hash = crypto_alloc_shash("sha512", 0, 0);
+ break;
+ }
+
+ return base_hash;
+}
+
+static int chcr_compute_partial_hash(struct shash_desc *desc,
+ char *iopad, char *result_hash,
+ int digest_size)
+{
+ struct sha1_state sha1_st;
+ struct sha256_state sha256_st;
+ struct sha512_state sha512_st;
+ int error;
+
+ if (digest_size == SHA1_DIGEST_SIZE) {
+ error = crypto_shash_init(desc) ?:
+ crypto_shash_update(desc, iopad, SHA1_BLOCK_SIZE) ?:
+ crypto_shash_export(desc, (void *)&sha1_st);
+ memcpy(result_hash, sha1_st.state, SHA1_DIGEST_SIZE);
+ } else if (digest_size == SHA224_DIGEST_SIZE) {
+ error = crypto_shash_init(desc) ?:
+ crypto_shash_update(desc, iopad, SHA256_BLOCK_SIZE) ?:
+ crypto_shash_export(desc, (void *)&sha256_st);
+ memcpy(result_hash, sha256_st.state, SHA256_DIGEST_SIZE);
+
+ } else if (digest_size == SHA256_DIGEST_SIZE) {
+ error = crypto_shash_init(desc) ?:
+ crypto_shash_update(desc, iopad, SHA256_BLOCK_SIZE) ?:
+ crypto_shash_export(desc, (void *)&sha256_st);
+ memcpy(result_hash, sha256_st.state, SHA256_DIGEST_SIZE);
+
+ } else if (digest_size == SHA384_DIGEST_SIZE) {
+ error = crypto_shash_init(desc) ?:
+ crypto_shash_update(desc, iopad, SHA512_BLOCK_SIZE) ?:
+ crypto_shash_export(desc, (void *)&sha512_st);
+ memcpy(result_hash, sha512_st.state, SHA512_DIGEST_SIZE);
+
+ } else if (digest_size == SHA512_DIGEST_SIZE) {
+ error = crypto_shash_init(desc) ?:
+ crypto_shash_update(desc, iopad, SHA512_BLOCK_SIZE) ?:
+ crypto_shash_export(desc, (void *)&sha512_st);
+ memcpy(result_hash, sha512_st.state, SHA512_DIGEST_SIZE);
+ } else {
+ error = -EINVAL;
+ pr_err("Unknown digest size %d\n", digest_size);
+ }
+ return error;
+}
+
+static void chcr_change_order(char *buf, int ds)
+{
+ int i;
+
+ if (ds == SHA512_DIGEST_SIZE) {
+ for (i = 0; i < (ds / sizeof(u64)); i++)
+ *((__be64 *)buf + i) =
+ cpu_to_be64(*((u64 *)buf + i));
+ } else {
+ for (i = 0; i < (ds / sizeof(u32)); i++)
+ *((__be32 *)buf + i) =
+ cpu_to_be32(*((u32 *)buf + i));
+ }
+}
+
+static inline int is_hmac(struct crypto_tfm *tfm)
+{
+ struct crypto_alg *alg = tfm->__crt_alg;
+ struct chcr_alg_template *chcr_crypto_alg =
+ container_of(__crypto_ahash_alg(alg), struct chcr_alg_template,
+ alg.hash);
+ if (chcr_crypto_alg->type == CRYPTO_ALG_TYPE_HMAC)
+ return 1;
+ return 0;
+}
+
+static inline void dsgl_walk_init(struct dsgl_walk *walk,
+ struct cpl_rx_phys_dsgl *dsgl)
+{
+ walk->dsgl = dsgl;
+ walk->nents = 0;
+ walk->to = (struct phys_sge_pairs *)(dsgl + 1);
+}
+
+static inline void dsgl_walk_end(struct dsgl_walk *walk, unsigned short qid,
+ int pci_chan_id)
+{
+ struct cpl_rx_phys_dsgl *phys_cpl;
+
+ phys_cpl = walk->dsgl;
+
+ phys_cpl->op_to_tid = htonl(CPL_RX_PHYS_DSGL_OPCODE_V(CPL_RX_PHYS_DSGL)
+ | CPL_RX_PHYS_DSGL_ISRDMA_V(0));
+ phys_cpl->pcirlxorder_to_noofsgentr =
+ htonl(CPL_RX_PHYS_DSGL_PCIRLXORDER_V(0) |
+ CPL_RX_PHYS_DSGL_PCINOSNOOP_V(0) |
+ CPL_RX_PHYS_DSGL_PCITPHNTENB_V(0) |
+ CPL_RX_PHYS_DSGL_PCITPHNT_V(0) |
+ CPL_RX_PHYS_DSGL_DCAID_V(0) |
+ CPL_RX_PHYS_DSGL_NOOFSGENTR_V(walk->nents));
+ phys_cpl->rss_hdr_int.opcode = CPL_RX_PHYS_ADDR;
+ phys_cpl->rss_hdr_int.qid = htons(qid);
+ phys_cpl->rss_hdr_int.hash_val = 0;
+ phys_cpl->rss_hdr_int.channel = pci_chan_id;
+}
+
+static inline void dsgl_walk_add_page(struct dsgl_walk *walk,
+ size_t size,
+ dma_addr_t addr)
+{
+ int j;
+
+ if (!size)
+ return;
+ j = walk->nents;
+ walk->to->len[j % 8] = htons(size);
+ walk->to->addr[j % 8] = cpu_to_be64(addr);
+ j++;
+ if ((j % 8) == 0)
+ walk->to++;
+ walk->nents = j;
+}
+
+static void dsgl_walk_add_sg(struct dsgl_walk *walk,
+ struct scatterlist *sg,
+ unsigned int slen,
+ unsigned int skip)
+{
+ int skip_len = 0;
+ unsigned int left_size = slen, len = 0;
+ unsigned int j = walk->nents;
+ int offset, ent_len;
+
+ if (!slen)
+ return;
+ while (sg && skip) {
+ if (sg_dma_len(sg) <= skip) {
+ skip -= sg_dma_len(sg);
+ skip_len = 0;
+ sg = sg_next(sg);
+ } else {
+ skip_len = skip;
+ skip = 0;
+ }
+ }
+
+ while (left_size && sg) {
+ len = min_t(u32, left_size, sg_dma_len(sg) - skip_len);
+ offset = 0;
+ while (len) {
+ ent_len = min_t(u32, len, CHCR_DST_SG_SIZE);
+ walk->to->len[j % 8] = htons(ent_len);
+ walk->to->addr[j % 8] = cpu_to_be64(sg_dma_address(sg) +
+ offset + skip_len);
+ offset += ent_len;
+ len -= ent_len;
+ j++;
+ if ((j % 8) == 0)
+ walk->to++;
+ }
+ walk->last_sg = sg;
+ walk->last_sg_len = min_t(u32, left_size, sg_dma_len(sg) -
+ skip_len) + skip_len;
+ left_size -= min_t(u32, left_size, sg_dma_len(sg) - skip_len);
+ skip_len = 0;
+ sg = sg_next(sg);
+ }
+ walk->nents = j;
+}
+
+static inline void ulptx_walk_init(struct ulptx_walk *walk,
+ struct ulptx_sgl *ulp)
+{
+ walk->sgl = ulp;
+ walk->nents = 0;
+ walk->pair_idx = 0;
+ walk->pair = ulp->sge;
+ walk->last_sg = NULL;
+ walk->last_sg_len = 0;
+}
+
+static inline void ulptx_walk_end(struct ulptx_walk *walk)
+{
+ walk->sgl->cmd_nsge = htonl(ULPTX_CMD_V(ULP_TX_SC_DSGL) |
+ ULPTX_NSGE_V(walk->nents));
+}
+
+
+static inline void ulptx_walk_add_page(struct ulptx_walk *walk,
+ size_t size,
+ dma_addr_t addr)
+{
+ if (!size)
+ return;
+
+ if (walk->nents == 0) {
+ walk->sgl->len0 = cpu_to_be32(size);
+ walk->sgl->addr0 = cpu_to_be64(addr);
+ } else {
+ walk->pair->addr[walk->pair_idx] = cpu_to_be64(addr);
+ walk->pair->len[walk->pair_idx] = cpu_to_be32(size);
+ walk->pair_idx = !walk->pair_idx;
+ if (!walk->pair_idx)
+ walk->pair++;
+ }
+ walk->nents++;
+}
+
+static void ulptx_walk_add_sg(struct ulptx_walk *walk,
+ struct scatterlist *sg,
+ unsigned int len,
+ unsigned int skip)
+{
+ int small;
+ int skip_len = 0;
+ unsigned int sgmin;
+
+ if (!len)
+ return;
+ while (sg && skip) {
+ if (sg_dma_len(sg) <= skip) {
+ skip -= sg_dma_len(sg);
+ skip_len = 0;
+ sg = sg_next(sg);
+ } else {
+ skip_len = skip;
+ skip = 0;
+ }
+ }
+ WARN(!sg, "SG should not be null here\n");
+ if (sg && (walk->nents == 0)) {
+ small = min_t(unsigned int, sg_dma_len(sg) - skip_len, len);
+ sgmin = min_t(unsigned int, small, CHCR_SRC_SG_SIZE);
+ walk->sgl->len0 = cpu_to_be32(sgmin);
+ walk->sgl->addr0 = cpu_to_be64(sg_dma_address(sg) + skip_len);
+ walk->nents++;
+ len -= sgmin;
+ walk->last_sg = sg;
+ walk->last_sg_len = sgmin + skip_len;
+ skip_len += sgmin;
+ if (sg_dma_len(sg) == skip_len) {
+ sg = sg_next(sg);
+ skip_len = 0;
+ }
+ }
+
+ while (sg && len) {
+ small = min(sg_dma_len(sg) - skip_len, len);
+ sgmin = min_t(unsigned int, small, CHCR_SRC_SG_SIZE);
+ walk->pair->len[walk->pair_idx] = cpu_to_be32(sgmin);
+ walk->pair->addr[walk->pair_idx] =
+ cpu_to_be64(sg_dma_address(sg) + skip_len);
+ walk->pair_idx = !walk->pair_idx;
+ walk->nents++;
+ if (!walk->pair_idx)
+ walk->pair++;
+ len -= sgmin;
+ skip_len += sgmin;
+ walk->last_sg = sg;
+ walk->last_sg_len = skip_len;
+ if (sg_dma_len(sg) == skip_len) {
+ sg = sg_next(sg);
+ skip_len = 0;
+ }
+ }
+}
+
+static inline int get_cryptoalg_subtype(struct crypto_skcipher *tfm)
+{
+ struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
+ struct chcr_alg_template *chcr_crypto_alg =
+ container_of(alg, struct chcr_alg_template, alg.skcipher);
+
+ return chcr_crypto_alg->type & CRYPTO_ALG_SUB_TYPE_MASK;
+}
+
+static int cxgb4_is_crypto_q_full(struct net_device *dev, unsigned int idx)
+{
+ struct adapter *adap = netdev2adap(dev);
+ struct sge_uld_txq_info *txq_info =
+ adap->sge.uld_txq_info[CXGB4_TX_CRYPTO];
+ struct sge_uld_txq *txq;
+ int ret = 0;
+
+ local_bh_disable();
+ txq = &txq_info->uldtxq[idx];
+ spin_lock(&txq->sendq.lock);
+ if (txq->full)
+ ret = -1;
+ spin_unlock(&txq->sendq.lock);
+ local_bh_enable();
+ return ret;
+}
+
+static int generate_copy_rrkey(struct ablk_ctx *ablkctx,
+ struct _key_ctx *key_ctx)
+{
+ if (ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CBC) {
+ memcpy(key_ctx->key, ablkctx->rrkey, ablkctx->enckey_len);
+ } else {
+ memcpy(key_ctx->key,
+ ablkctx->key + (ablkctx->enckey_len >> 1),
+ ablkctx->enckey_len >> 1);
+ memcpy(key_ctx->key + (ablkctx->enckey_len >> 1),
+ ablkctx->rrkey, ablkctx->enckey_len >> 1);
+ }
+ return 0;
+}
+
+static int chcr_hash_ent_in_wr(struct scatterlist *src,
+ unsigned int minsg,
+ unsigned int space,
+ unsigned int srcskip)
+{
+ int srclen = 0;
+ int srcsg = minsg;
+ int soffset = 0, sless;
+
+ if (sg_dma_len(src) == srcskip) {
+ src = sg_next(src);
+ srcskip = 0;
+ }
+ while (src && space > (sgl_ent_len[srcsg + 1])) {
+ sless = min_t(unsigned int, sg_dma_len(src) - soffset - srcskip,
+ CHCR_SRC_SG_SIZE);
+ srclen += sless;
+ soffset += sless;
+ srcsg++;
+ if (sg_dma_len(src) == (soffset + srcskip)) {
+ src = sg_next(src);
+ soffset = 0;
+ srcskip = 0;
+ }
+ }
+ return srclen;
+}
+
+static int chcr_sg_ent_in_wr(struct scatterlist *src,
+ struct scatterlist *dst,
+ unsigned int minsg,
+ unsigned int space,
+ unsigned int srcskip,
+ unsigned int dstskip)
+{
+ int srclen = 0, dstlen = 0;
+ int srcsg = minsg, dstsg = minsg;
+ int offset = 0, soffset = 0, less, sless = 0;
+
+ if (sg_dma_len(src) == srcskip) {
+ src = sg_next(src);
+ srcskip = 0;
+ }
+ if (sg_dma_len(dst) == dstskip) {
+ dst = sg_next(dst);
+ dstskip = 0;
+ }
+
+ while (src && dst &&
+ space > (sgl_ent_len[srcsg + 1] + dsgl_ent_len[dstsg])) {
+ sless = min_t(unsigned int, sg_dma_len(src) - srcskip - soffset,
+ CHCR_SRC_SG_SIZE);
+ srclen += sless;
+ srcsg++;
+ offset = 0;
+ while (dst && ((dstsg + 1) <= MAX_DSGL_ENT) &&
+ space > (sgl_ent_len[srcsg] + dsgl_ent_len[dstsg + 1])) {
+ if (srclen <= dstlen)
+ break;
+ less = min_t(unsigned int, sg_dma_len(dst) - offset -
+ dstskip, CHCR_DST_SG_SIZE);
+ dstlen += less;
+ offset += less;
+ if ((offset + dstskip) == sg_dma_len(dst)) {
+ dst = sg_next(dst);
+ offset = 0;
+ }
+ dstsg++;
+ dstskip = 0;
+ }
+ soffset += sless;
+ if ((soffset + srcskip) == sg_dma_len(src)) {
+ src = sg_next(src);
+ srcskip = 0;
+ soffset = 0;
+ }
+
+ }
+ return min(srclen, dstlen);
+}
+
+static int chcr_cipher_fallback(struct crypto_skcipher *cipher,
+ struct skcipher_request *req,
+ u8 *iv,
+ unsigned short op_type)
+{
+ struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
+ int err;
+
+ skcipher_request_set_tfm(&reqctx->fallback_req, cipher);
+ skcipher_request_set_callback(&reqctx->fallback_req, req->base.flags,
+ req->base.complete, req->base.data);
+ skcipher_request_set_crypt(&reqctx->fallback_req, req->src, req->dst,
+ req->cryptlen, iv);
+
+ err = op_type ? crypto_skcipher_decrypt(&reqctx->fallback_req) :
+ crypto_skcipher_encrypt(&reqctx->fallback_req);
+
+ return err;
+
+}
+
+static inline int get_qidxs(struct crypto_async_request *req,
+ unsigned int *txqidx, unsigned int *rxqidx)
+{
+ struct crypto_tfm *tfm = req->tfm;
+ int ret = 0;
+
+ switch (tfm->__crt_alg->cra_flags & CRYPTO_ALG_TYPE_MASK) {
+ case CRYPTO_ALG_TYPE_AEAD:
+ {
+ struct aead_request *aead_req =
+ container_of(req, struct aead_request, base);
+ struct chcr_aead_reqctx *reqctx = aead_request_ctx(aead_req);
+ *txqidx = reqctx->txqidx;
+ *rxqidx = reqctx->rxqidx;
+ break;
+ }
+ case CRYPTO_ALG_TYPE_SKCIPHER:
+ {
+ struct skcipher_request *sk_req =
+ container_of(req, struct skcipher_request, base);
+ struct chcr_skcipher_req_ctx *reqctx =
+ skcipher_request_ctx(sk_req);
+ *txqidx = reqctx->txqidx;
+ *rxqidx = reqctx->rxqidx;
+ break;
+ }
+ case CRYPTO_ALG_TYPE_AHASH:
+ {
+ struct ahash_request *ahash_req =
+ container_of(req, struct ahash_request, base);
+ struct chcr_ahash_req_ctx *reqctx =
+ ahash_request_ctx(ahash_req);
+ *txqidx = reqctx->txqidx;
+ *rxqidx = reqctx->rxqidx;
+ break;
+ }
+ default:
+ ret = -EINVAL;
+ /* should never get here */
+ BUG();
+ break;
+ }
+ return ret;
+}
+
+static inline void create_wreq(struct chcr_context *ctx,
+ struct chcr_wr *chcr_req,
+ struct crypto_async_request *req,
+ unsigned int imm,
+ int hash_sz,
+ unsigned int len16,
+ unsigned int sc_len,
+ unsigned int lcb)
+{
+ struct uld_ctx *u_ctx = ULD_CTX(ctx);
+ unsigned int tx_channel_id, rx_channel_id;
+ unsigned int txqidx = 0, rxqidx = 0;
+ unsigned int qid, fid, portno;
+
+ get_qidxs(req, &txqidx, &rxqidx);
+ qid = u_ctx->lldi.rxq_ids[rxqidx];
+ fid = u_ctx->lldi.rxq_ids[0];
+ portno = rxqidx / ctx->rxq_perchan;
+ tx_channel_id = txqidx / ctx->txq_perchan;
+ rx_channel_id = cxgb4_port_e2cchan(u_ctx->lldi.ports[portno]);
+
+
+ chcr_req->wreq.op_to_cctx_size = FILL_WR_OP_CCTX_SIZE;
+ chcr_req->wreq.pld_size_hash_size =
+ htonl(FW_CRYPTO_LOOKASIDE_WR_HASH_SIZE_V(hash_sz));
+ chcr_req->wreq.len16_pkd =
+ htonl(FW_CRYPTO_LOOKASIDE_WR_LEN16_V(DIV_ROUND_UP(len16, 16)));
+ chcr_req->wreq.cookie = cpu_to_be64((uintptr_t)req);
+ chcr_req->wreq.rx_chid_to_rx_q_id = FILL_WR_RX_Q_ID(rx_channel_id, qid,
+ !!lcb, txqidx);
+
+ chcr_req->ulptx.cmd_dest = FILL_ULPTX_CMD_DEST(tx_channel_id, fid);
+ chcr_req->ulptx.len = htonl((DIV_ROUND_UP(len16, 16) -
+ ((sizeof(chcr_req->wreq)) >> 4)));
+ chcr_req->sc_imm.cmd_more = FILL_CMD_MORE(!imm);
+ chcr_req->sc_imm.len = cpu_to_be32(sizeof(struct cpl_tx_sec_pdu) +
+ sizeof(chcr_req->key_ctx) + sc_len);
+}
+
+/**
+ * create_cipher_wr - form the WR for cipher operations
+ * @wrparam: Container for create_cipher_wr()'s parameters
+ */
+static struct sk_buff *create_cipher_wr(struct cipher_wr_param *wrparam)
+{
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(wrparam->req);
+ struct chcr_context *ctx = c_ctx(tfm);
+ struct uld_ctx *u_ctx = ULD_CTX(ctx);
+ struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
+ struct sk_buff *skb = NULL;
+ struct chcr_wr *chcr_req;
+ struct cpl_rx_phys_dsgl *phys_cpl;
+ struct ulptx_sgl *ulptx;
+ struct chcr_skcipher_req_ctx *reqctx =
+ skcipher_request_ctx(wrparam->req);
+ unsigned int temp = 0, transhdr_len, dst_size;
+ int error;
+ int nents;
+ unsigned int kctx_len;
+ gfp_t flags = wrparam->req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
+ GFP_KERNEL : GFP_ATOMIC;
+ struct adapter *adap = padap(ctx->dev);
+ unsigned int rx_channel_id = reqctx->rxqidx / ctx->rxq_perchan;
+
+ rx_channel_id = cxgb4_port_e2cchan(u_ctx->lldi.ports[rx_channel_id]);
+ nents = sg_nents_xlen(reqctx->dstsg, wrparam->bytes, CHCR_DST_SG_SIZE,
+ reqctx->dst_ofst);
+ dst_size = get_space_for_phys_dsgl(nents);
+ kctx_len = roundup(ablkctx->enckey_len, 16);
+ transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
+ nents = sg_nents_xlen(reqctx->srcsg, wrparam->bytes,
+ CHCR_SRC_SG_SIZE, reqctx->src_ofst);
+ temp = reqctx->imm ? roundup(wrparam->bytes, 16) :
+ (sgl_len(nents) * 8);
+ transhdr_len += temp;
+ transhdr_len = roundup(transhdr_len, 16);
+ skb = alloc_skb(SGE_MAX_WR_LEN, flags);
+ if (!skb) {
+ error = -ENOMEM;
+ goto err;
+ }
+ chcr_req = __skb_put_zero(skb, transhdr_len);
+ chcr_req->sec_cpl.op_ivinsrtofst =
+ FILL_SEC_CPL_OP_IVINSR(rx_channel_id, 2, 1);
+
+ chcr_req->sec_cpl.pldlen = htonl(IV + wrparam->bytes);
+ chcr_req->sec_cpl.aadstart_cipherstop_hi =
+ FILL_SEC_CPL_CIPHERSTOP_HI(0, 0, IV + 1, 0);
+
+ chcr_req->sec_cpl.cipherstop_lo_authinsert =
+ FILL_SEC_CPL_AUTHINSERT(0, 0, 0, 0);
+ chcr_req->sec_cpl.seqno_numivs = FILL_SEC_CPL_SCMD0_SEQNO(reqctx->op, 0,
+ ablkctx->ciph_mode,
+ 0, 0, IV >> 1);
+ chcr_req->sec_cpl.ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 0,
+ 0, 1, dst_size);
+
+ chcr_req->key_ctx.ctx_hdr = ablkctx->key_ctx_hdr;
+ if ((reqctx->op == CHCR_DECRYPT_OP) &&
+ (!(get_cryptoalg_subtype(tfm) ==
+ CRYPTO_ALG_SUB_TYPE_CTR)) &&
+ (!(get_cryptoalg_subtype(tfm) ==
+ CRYPTO_ALG_SUB_TYPE_CTR_RFC3686))) {
+ generate_copy_rrkey(ablkctx, &chcr_req->key_ctx);
+ } else {
+ if ((ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CBC) ||
+ (ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CTR)) {
+ memcpy(chcr_req->key_ctx.key, ablkctx->key,
+ ablkctx->enckey_len);
+ } else {
+ memcpy(chcr_req->key_ctx.key, ablkctx->key +
+ (ablkctx->enckey_len >> 1),
+ ablkctx->enckey_len >> 1);
+ memcpy(chcr_req->key_ctx.key +
+ (ablkctx->enckey_len >> 1),
+ ablkctx->key,
+ ablkctx->enckey_len >> 1);
+ }
+ }
+ phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
+ ulptx = (struct ulptx_sgl *)((u8 *)(phys_cpl + 1) + dst_size);
+ chcr_add_cipher_src_ent(wrparam->req, ulptx, wrparam);
+ chcr_add_cipher_dst_ent(wrparam->req, phys_cpl, wrparam, wrparam->qid);
+
+ atomic_inc(&adap->chcr_stats.cipher_rqst);
+ temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size + kctx_len + IV
+ + (reqctx->imm ? (wrparam->bytes) : 0);
+ create_wreq(c_ctx(tfm), chcr_req, &(wrparam->req->base), reqctx->imm, 0,
+ transhdr_len, temp,
+ ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CBC);
+ reqctx->skb = skb;
+
+ if (reqctx->op && (ablkctx->ciph_mode ==
+ CHCR_SCMD_CIPHER_MODE_AES_CBC))
+ sg_pcopy_to_buffer(wrparam->req->src,
+ sg_nents(wrparam->req->src), wrparam->req->iv, 16,
+ reqctx->processed + wrparam->bytes - AES_BLOCK_SIZE);
+
+ return skb;
+err:
+ return ERR_PTR(error);
+}
+
+static inline int chcr_keyctx_ck_size(unsigned int keylen)
+{
+ int ck_size = 0;
+
+ if (keylen == AES_KEYSIZE_128)
+ ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
+ else if (keylen == AES_KEYSIZE_192)
+ ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
+ else if (keylen == AES_KEYSIZE_256)
+ ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
+ else
+ ck_size = 0;
+
+ return ck_size;
+}
+static int chcr_cipher_fallback_setkey(struct crypto_skcipher *cipher,
+ const u8 *key,
+ unsigned int keylen)
+{
+ struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
+
+ crypto_skcipher_clear_flags(ablkctx->sw_cipher,
+ CRYPTO_TFM_REQ_MASK);
+ crypto_skcipher_set_flags(ablkctx->sw_cipher,
+ cipher->base.crt_flags & CRYPTO_TFM_REQ_MASK);
+ return crypto_skcipher_setkey(ablkctx->sw_cipher, key, keylen);
+}
+
+static int chcr_aes_cbc_setkey(struct crypto_skcipher *cipher,
+ const u8 *key,
+ unsigned int keylen)
+{
+ struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
+ unsigned int ck_size, context_size;
+ u16 alignment = 0;
+ int err;
+
+ err = chcr_cipher_fallback_setkey(cipher, key, keylen);
+ if (err)
+ goto badkey_err;
+
+ ck_size = chcr_keyctx_ck_size(keylen);
+ alignment = ck_size == CHCR_KEYCTX_CIPHER_KEY_SIZE_192 ? 8 : 0;
+ memcpy(ablkctx->key, key, keylen);
+ ablkctx->enckey_len = keylen;
+ get_aes_decrypt_key(ablkctx->rrkey, ablkctx->key, keylen << 3);
+ context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD +
+ keylen + alignment) >> 4;
+
+ ablkctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, CHCR_KEYCTX_NO_KEY,
+ 0, 0, context_size);
+ ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_CBC;
+ return 0;
+badkey_err:
+ ablkctx->enckey_len = 0;
+
+ return err;
+}
+
+static int chcr_aes_ctr_setkey(struct crypto_skcipher *cipher,
+ const u8 *key,
+ unsigned int keylen)
+{
+ struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
+ unsigned int ck_size, context_size;
+ u16 alignment = 0;
+ int err;
+
+ err = chcr_cipher_fallback_setkey(cipher, key, keylen);
+ if (err)
+ goto badkey_err;
+ ck_size = chcr_keyctx_ck_size(keylen);
+ alignment = (ck_size == CHCR_KEYCTX_CIPHER_KEY_SIZE_192) ? 8 : 0;
+ memcpy(ablkctx->key, key, keylen);
+ ablkctx->enckey_len = keylen;
+ context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD +
+ keylen + alignment) >> 4;
+
+ ablkctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, CHCR_KEYCTX_NO_KEY,
+ 0, 0, context_size);
+ ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_CTR;
+
+ return 0;
+badkey_err:
+ ablkctx->enckey_len = 0;
+
+ return err;
+}
+
+static int chcr_aes_rfc3686_setkey(struct crypto_skcipher *cipher,
+ const u8 *key,
+ unsigned int keylen)
+{
+ struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
+ unsigned int ck_size, context_size;
+ u16 alignment = 0;
+ int err;
+
+ if (keylen < CTR_RFC3686_NONCE_SIZE)
+ return -EINVAL;
+ memcpy(ablkctx->nonce, key + (keylen - CTR_RFC3686_NONCE_SIZE),
+ CTR_RFC3686_NONCE_SIZE);
+
+ keylen -= CTR_RFC3686_NONCE_SIZE;
+ err = chcr_cipher_fallback_setkey(cipher, key, keylen);
+ if (err)
+ goto badkey_err;
+
+ ck_size = chcr_keyctx_ck_size(keylen);
+ alignment = (ck_size == CHCR_KEYCTX_CIPHER_KEY_SIZE_192) ? 8 : 0;
+ memcpy(ablkctx->key, key, keylen);
+ ablkctx->enckey_len = keylen;
+ context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD +
+ keylen + alignment) >> 4;
+
+ ablkctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, CHCR_KEYCTX_NO_KEY,
+ 0, 0, context_size);
+ ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_CTR;
+
+ return 0;
+badkey_err:
+ ablkctx->enckey_len = 0;
+
+ return err;
+}
+static void ctr_add_iv(u8 *dstiv, u8 *srciv, u32 add)
+{
+ unsigned int size = AES_BLOCK_SIZE;
+ __be32 *b = (__be32 *)(dstiv + size);
+ u32 c, prev;
+
+ memcpy(dstiv, srciv, AES_BLOCK_SIZE);
+ for (; size >= 4; size -= 4) {
+ prev = be32_to_cpu(*--b);
+ c = prev + add;
+ *b = cpu_to_be32(c);
+ if (prev < c)
+ break;
+ add = 1;
+ }
+
+}
+
+static unsigned int adjust_ctr_overflow(u8 *iv, u32 bytes)
+{
+ __be32 *b = (__be32 *)(iv + AES_BLOCK_SIZE);
+ u64 c;
+ u32 temp = be32_to_cpu(*--b);
+
+ temp = ~temp;
+ c = (u64)temp + 1; // No of block can processed without overflow
+ if ((bytes / AES_BLOCK_SIZE) >= c)
+ bytes = c * AES_BLOCK_SIZE;
+ return bytes;
+}
+
+static int chcr_update_tweak(struct skcipher_request *req, u8 *iv,
+ u32 isfinal)
+{
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(tfm));
+ struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
+ struct crypto_aes_ctx aes;
+ int ret, i;
+ u8 *key;
+ unsigned int keylen;
+ int round = reqctx->last_req_len / AES_BLOCK_SIZE;
+ int round8 = round / 8;
+
+ memcpy(iv, reqctx->iv, AES_BLOCK_SIZE);
+
+ keylen = ablkctx->enckey_len / 2;
+ key = ablkctx->key + keylen;
+ /* For a 192 bit key remove the padded zeroes which was
+ * added in chcr_xts_setkey
+ */
+ if (KEY_CONTEXT_CK_SIZE_G(ntohl(ablkctx->key_ctx_hdr))
+ == CHCR_KEYCTX_CIPHER_KEY_SIZE_192)
+ ret = aes_expandkey(&aes, key, keylen - 8);
+ else
+ ret = aes_expandkey(&aes, key, keylen);
+ if (ret)
+ return ret;
+ aes_encrypt(&aes, iv, iv);
+ for (i = 0; i < round8; i++)
+ gf128mul_x8_ble((le128 *)iv, (le128 *)iv);
+
+ for (i = 0; i < (round % 8); i++)
+ gf128mul_x_ble((le128 *)iv, (le128 *)iv);
+
+ if (!isfinal)
+ aes_decrypt(&aes, iv, iv);
+
+ memzero_explicit(&aes, sizeof(aes));
+ return 0;
+}
+
+static int chcr_update_cipher_iv(struct skcipher_request *req,
+ struct cpl_fw6_pld *fw6_pld, u8 *iv)
+{
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
+ int subtype = get_cryptoalg_subtype(tfm);
+ int ret = 0;
+
+ if (subtype == CRYPTO_ALG_SUB_TYPE_CTR)
+ ctr_add_iv(iv, req->iv, (reqctx->processed /
+ AES_BLOCK_SIZE));
+ else if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_RFC3686)
+ *(__be32 *)(reqctx->iv + CTR_RFC3686_NONCE_SIZE +
+ CTR_RFC3686_IV_SIZE) = cpu_to_be32((reqctx->processed /
+ AES_BLOCK_SIZE) + 1);
+ else if (subtype == CRYPTO_ALG_SUB_TYPE_XTS)
+ ret = chcr_update_tweak(req, iv, 0);
+ else if (subtype == CRYPTO_ALG_SUB_TYPE_CBC) {
+ if (reqctx->op)
+ /*Updated before sending last WR*/
+ memcpy(iv, req->iv, AES_BLOCK_SIZE);
+ else
+ memcpy(iv, &fw6_pld->data[2], AES_BLOCK_SIZE);
+ }
+
+ return ret;
+
+}
+
+/* We need separate function for final iv because in rfc3686 Initial counter
+ * starts from 1 and buffer size of iv is 8 byte only which remains constant
+ * for subsequent update requests
+ */
+
+static int chcr_final_cipher_iv(struct skcipher_request *req,
+ struct cpl_fw6_pld *fw6_pld, u8 *iv)
+{
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
+ int subtype = get_cryptoalg_subtype(tfm);
+ int ret = 0;
+
+ if (subtype == CRYPTO_ALG_SUB_TYPE_CTR)
+ ctr_add_iv(iv, req->iv, DIV_ROUND_UP(reqctx->processed,
+ AES_BLOCK_SIZE));
+ else if (subtype == CRYPTO_ALG_SUB_TYPE_XTS) {
+ if (!reqctx->partial_req)
+ memcpy(iv, reqctx->iv, AES_BLOCK_SIZE);
+ else
+ ret = chcr_update_tweak(req, iv, 1);
+ }
+ else if (subtype == CRYPTO_ALG_SUB_TYPE_CBC) {
+ /*Already updated for Decrypt*/
+ if (!reqctx->op)
+ memcpy(iv, &fw6_pld->data[2], AES_BLOCK_SIZE);
+
+ }
+ return ret;
+
+}
+
+static int chcr_handle_cipher_resp(struct skcipher_request *req,
+ unsigned char *input, int err)
+{
+ struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct cpl_fw6_pld *fw6_pld = (struct cpl_fw6_pld *)input;
+ struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(tfm));
+ struct uld_ctx *u_ctx = ULD_CTX(c_ctx(tfm));
+ struct chcr_dev *dev = c_ctx(tfm)->dev;
+ struct chcr_context *ctx = c_ctx(tfm);
+ struct adapter *adap = padap(ctx->dev);
+ struct cipher_wr_param wrparam;
+ struct sk_buff *skb;
+ int bytes;
+
+ if (err)
+ goto unmap;
+ if (req->cryptlen == reqctx->processed) {
+ chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev,
+ req);
+ err = chcr_final_cipher_iv(req, fw6_pld, req->iv);
+ goto complete;
+ }
+
+ if (!reqctx->imm) {
+ bytes = chcr_sg_ent_in_wr(reqctx->srcsg, reqctx->dstsg, 0,
+ CIP_SPACE_LEFT(ablkctx->enckey_len),
+ reqctx->src_ofst, reqctx->dst_ofst);
+ if ((bytes + reqctx->processed) >= req->cryptlen)
+ bytes = req->cryptlen - reqctx->processed;
+ else
+ bytes = rounddown(bytes, 16);
+ } else {
+ /*CTR mode counter overfloa*/
+ bytes = req->cryptlen - reqctx->processed;
+ }
+ err = chcr_update_cipher_iv(req, fw6_pld, reqctx->iv);
+ if (err)
+ goto unmap;
+
+ if (unlikely(bytes == 0)) {
+ chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev,
+ req);
+ memcpy(req->iv, reqctx->init_iv, IV);
+ atomic_inc(&adap->chcr_stats.fallback);
+ err = chcr_cipher_fallback(ablkctx->sw_cipher, req, req->iv,
+ reqctx->op);
+ goto complete;
+ }
+
+ if (get_cryptoalg_subtype(tfm) ==
+ CRYPTO_ALG_SUB_TYPE_CTR)
+ bytes = adjust_ctr_overflow(reqctx->iv, bytes);
+ wrparam.qid = u_ctx->lldi.rxq_ids[reqctx->rxqidx];
+ wrparam.req = req;
+ wrparam.bytes = bytes;
+ skb = create_cipher_wr(&wrparam);
+ if (IS_ERR(skb)) {
+ pr_err("%s : Failed to form WR. No memory\n", __func__);
+ err = PTR_ERR(skb);
+ goto unmap;
+ }
+ skb->dev = u_ctx->lldi.ports[0];
+ set_wr_txq(skb, CPL_PRIORITY_DATA, reqctx->txqidx);
+ chcr_send_wr(skb);
+ reqctx->last_req_len = bytes;
+ reqctx->processed += bytes;
+ if (get_cryptoalg_subtype(tfm) ==
+ CRYPTO_ALG_SUB_TYPE_CBC && req->base.flags ==
+ CRYPTO_TFM_REQ_MAY_SLEEP ) {
+ complete(&ctx->cbc_aes_aio_done);
+ }
+ return 0;
+unmap:
+ chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev, req);
+complete:
+ if (get_cryptoalg_subtype(tfm) ==
+ CRYPTO_ALG_SUB_TYPE_CBC && req->base.flags ==
+ CRYPTO_TFM_REQ_MAY_SLEEP ) {
+ complete(&ctx->cbc_aes_aio_done);
+ }
+ chcr_dec_wrcount(dev);
+ req->base.complete(&req->base, err);
+ return err;
+}
+
+static int process_cipher(struct skcipher_request *req,
+ unsigned short qid,
+ struct sk_buff **skb,
+ unsigned short op_type)
+{
+ struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ unsigned int ivsize = crypto_skcipher_ivsize(tfm);
+ struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(tfm));
+ struct adapter *adap = padap(c_ctx(tfm)->dev);
+ struct cipher_wr_param wrparam;
+ int bytes, err = -EINVAL;
+ int subtype;
+
+ reqctx->processed = 0;
+ reqctx->partial_req = 0;
+ if (!req->iv)
+ goto error;
+ subtype = get_cryptoalg_subtype(tfm);
+ if ((ablkctx->enckey_len == 0) || (ivsize > AES_BLOCK_SIZE) ||
+ (req->cryptlen == 0) ||
+ (req->cryptlen % crypto_skcipher_blocksize(tfm))) {
+ if (req->cryptlen == 0 && subtype != CRYPTO_ALG_SUB_TYPE_XTS)
+ goto fallback;
+ else if (req->cryptlen % crypto_skcipher_blocksize(tfm) &&
+ subtype == CRYPTO_ALG_SUB_TYPE_XTS)
+ goto fallback;
+ pr_err("AES: Invalid value of Key Len %d nbytes %d IV Len %d\n",
+ ablkctx->enckey_len, req->cryptlen, ivsize);
+ goto error;
+ }
+
+ err = chcr_cipher_dma_map(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev, req);
+ if (err)
+ goto error;
+ if (req->cryptlen < (SGE_MAX_WR_LEN - (sizeof(struct chcr_wr) +
+ AES_MIN_KEY_SIZE +
+ sizeof(struct cpl_rx_phys_dsgl) +
+ /*Min dsgl size*/
+ 32))) {
+ /* Can be sent as Imm*/
+ unsigned int dnents = 0, transhdr_len, phys_dsgl, kctx_len;
+
+ dnents = sg_nents_xlen(req->dst, req->cryptlen,
+ CHCR_DST_SG_SIZE, 0);
+ phys_dsgl = get_space_for_phys_dsgl(dnents);
+ kctx_len = roundup(ablkctx->enckey_len, 16);
+ transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, phys_dsgl);
+ reqctx->imm = (transhdr_len + IV + req->cryptlen) <=
+ SGE_MAX_WR_LEN;
+ bytes = IV + req->cryptlen;
+
+ } else {
+ reqctx->imm = 0;
+ }
+
+ if (!reqctx->imm) {
+ bytes = chcr_sg_ent_in_wr(req->src, req->dst, 0,
+ CIP_SPACE_LEFT(ablkctx->enckey_len),
+ 0, 0);
+ if ((bytes + reqctx->processed) >= req->cryptlen)
+ bytes = req->cryptlen - reqctx->processed;
+ else
+ bytes = rounddown(bytes, 16);
+ } else {
+ bytes = req->cryptlen;
+ }
+ if (subtype == CRYPTO_ALG_SUB_TYPE_CTR) {
+ bytes = adjust_ctr_overflow(req->iv, bytes);
+ }
+ if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_RFC3686) {
+ memcpy(reqctx->iv, ablkctx->nonce, CTR_RFC3686_NONCE_SIZE);
+ memcpy(reqctx->iv + CTR_RFC3686_NONCE_SIZE, req->iv,
+ CTR_RFC3686_IV_SIZE);
+
+ /* initialize counter portion of counter block */
+ *(__be32 *)(reqctx->iv + CTR_RFC3686_NONCE_SIZE +
+ CTR_RFC3686_IV_SIZE) = cpu_to_be32(1);
+ memcpy(reqctx->init_iv, reqctx->iv, IV);
+
+ } else {
+
+ memcpy(reqctx->iv, req->iv, IV);
+ memcpy(reqctx->init_iv, req->iv, IV);
+ }
+ if (unlikely(bytes == 0)) {
+ chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev,
+ req);
+fallback: atomic_inc(&adap->chcr_stats.fallback);
+ err = chcr_cipher_fallback(ablkctx->sw_cipher, req,
+ subtype ==
+ CRYPTO_ALG_SUB_TYPE_CTR_RFC3686 ?
+ reqctx->iv : req->iv,
+ op_type);
+ goto error;
+ }
+ reqctx->op = op_type;
+ reqctx->srcsg = req->src;
+ reqctx->dstsg = req->dst;
+ reqctx->src_ofst = 0;
+ reqctx->dst_ofst = 0;
+ wrparam.qid = qid;
+ wrparam.req = req;
+ wrparam.bytes = bytes;
+ *skb = create_cipher_wr(&wrparam);
+ if (IS_ERR(*skb)) {
+ err = PTR_ERR(*skb);
+ goto unmap;
+ }
+ reqctx->processed = bytes;
+ reqctx->last_req_len = bytes;
+ reqctx->partial_req = !!(req->cryptlen - reqctx->processed);
+
+ return 0;
+unmap:
+ chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev, req);
+error:
+ return err;
+}
+
+static int chcr_aes_encrypt(struct skcipher_request *req)
+{
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
+ struct chcr_dev *dev = c_ctx(tfm)->dev;
+ struct sk_buff *skb = NULL;
+ int err;
+ struct uld_ctx *u_ctx = ULD_CTX(c_ctx(tfm));
+ struct chcr_context *ctx = c_ctx(tfm);
+ unsigned int cpu;
+
+ cpu = get_cpu();
+ reqctx->txqidx = cpu % ctx->ntxq;
+ reqctx->rxqidx = cpu % ctx->nrxq;
+ put_cpu();
+
+ err = chcr_inc_wrcount(dev);
+ if (err)
+ return -ENXIO;
+ if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
+ reqctx->txqidx) &&
+ (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)))) {
+ err = -ENOSPC;
+ goto error;
+ }
+
+ err = process_cipher(req, u_ctx->lldi.rxq_ids[reqctx->rxqidx],
+ &skb, CHCR_ENCRYPT_OP);
+ if (err || !skb)
+ return err;
+ skb->dev = u_ctx->lldi.ports[0];
+ set_wr_txq(skb, CPL_PRIORITY_DATA, reqctx->txqidx);
+ chcr_send_wr(skb);
+ if (get_cryptoalg_subtype(tfm) ==
+ CRYPTO_ALG_SUB_TYPE_CBC && req->base.flags ==
+ CRYPTO_TFM_REQ_MAY_SLEEP ) {
+ reqctx->partial_req = 1;
+ wait_for_completion(&ctx->cbc_aes_aio_done);
+ }
+ return -EINPROGRESS;
+error:
+ chcr_dec_wrcount(dev);
+ return err;
+}
+
+static int chcr_aes_decrypt(struct skcipher_request *req)
+{
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
+ struct uld_ctx *u_ctx = ULD_CTX(c_ctx(tfm));
+ struct chcr_dev *dev = c_ctx(tfm)->dev;
+ struct sk_buff *skb = NULL;
+ int err;
+ struct chcr_context *ctx = c_ctx(tfm);
+ unsigned int cpu;
+
+ cpu = get_cpu();
+ reqctx->txqidx = cpu % ctx->ntxq;
+ reqctx->rxqidx = cpu % ctx->nrxq;
+ put_cpu();
+
+ err = chcr_inc_wrcount(dev);
+ if (err)
+ return -ENXIO;
+
+ if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
+ reqctx->txqidx) &&
+ (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))))
+ return -ENOSPC;
+ err = process_cipher(req, u_ctx->lldi.rxq_ids[reqctx->rxqidx],
+ &skb, CHCR_DECRYPT_OP);
+ if (err || !skb)
+ return err;
+ skb->dev = u_ctx->lldi.ports[0];
+ set_wr_txq(skb, CPL_PRIORITY_DATA, reqctx->txqidx);
+ chcr_send_wr(skb);
+ return -EINPROGRESS;
+}
+static int chcr_device_init(struct chcr_context *ctx)
+{
+ struct uld_ctx *u_ctx = NULL;
+ int txq_perchan, ntxq;
+ int err = 0, rxq_perchan;
+
+ if (!ctx->dev) {
+ u_ctx = assign_chcr_device();
+ if (!u_ctx) {
+ err = -ENXIO;
+ pr_err("chcr device assignment fails\n");
+ goto out;
+ }
+ ctx->dev = &u_ctx->dev;
+ ntxq = u_ctx->lldi.ntxq;
+ rxq_perchan = u_ctx->lldi.nrxq / u_ctx->lldi.nchan;
+ txq_perchan = ntxq / u_ctx->lldi.nchan;
+ ctx->ntxq = ntxq;
+ ctx->nrxq = u_ctx->lldi.nrxq;
+ ctx->rxq_perchan = rxq_perchan;
+ ctx->txq_perchan = txq_perchan;
+ }
+out:
+ return err;
+}
+
+static int chcr_init_tfm(struct crypto_skcipher *tfm)
+{
+ struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
+ struct chcr_context *ctx = crypto_skcipher_ctx(tfm);
+ struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
+
+ ablkctx->sw_cipher = crypto_alloc_skcipher(alg->base.cra_name, 0,
+ CRYPTO_ALG_NEED_FALLBACK);
+ if (IS_ERR(ablkctx->sw_cipher)) {
+ pr_err("failed to allocate fallback for %s\n", alg->base.cra_name);
+ return PTR_ERR(ablkctx->sw_cipher);
+ }
+ init_completion(&ctx->cbc_aes_aio_done);
+ crypto_skcipher_set_reqsize(tfm, sizeof(struct chcr_skcipher_req_ctx) +
+ crypto_skcipher_reqsize(ablkctx->sw_cipher));
+
+ return chcr_device_init(ctx);
+}
+
+static int chcr_rfc3686_init(struct crypto_skcipher *tfm)
+{
+ struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
+ struct chcr_context *ctx = crypto_skcipher_ctx(tfm);
+ struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
+
+ /*RFC3686 initialises IV counter value to 1, rfc3686(ctr(aes))
+ * cannot be used as fallback in chcr_handle_cipher_response
+ */
+ ablkctx->sw_cipher = crypto_alloc_skcipher("ctr(aes)", 0,
+ CRYPTO_ALG_NEED_FALLBACK);
+ if (IS_ERR(ablkctx->sw_cipher)) {
+ pr_err("failed to allocate fallback for %s\n", alg->base.cra_name);
+ return PTR_ERR(ablkctx->sw_cipher);
+ }
+ crypto_skcipher_set_reqsize(tfm, sizeof(struct chcr_skcipher_req_ctx) +
+ crypto_skcipher_reqsize(ablkctx->sw_cipher));
+ return chcr_device_init(ctx);
+}
+
+
+static void chcr_exit_tfm(struct crypto_skcipher *tfm)
+{
+ struct chcr_context *ctx = crypto_skcipher_ctx(tfm);
+ struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
+
+ crypto_free_skcipher(ablkctx->sw_cipher);
+}
+
+static int get_alg_config(struct algo_param *params,
+ unsigned int auth_size)
+{
+ switch (auth_size) {
+ case SHA1_DIGEST_SIZE:
+ params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_160;
+ params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA1;
+ params->result_size = SHA1_DIGEST_SIZE;
+ break;
+ case SHA224_DIGEST_SIZE:
+ params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_256;
+ params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA224;
+ params->result_size = SHA256_DIGEST_SIZE;
+ break;
+ case SHA256_DIGEST_SIZE:
+ params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_256;
+ params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA256;
+ params->result_size = SHA256_DIGEST_SIZE;
+ break;
+ case SHA384_DIGEST_SIZE:
+ params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_512;
+ params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA512_384;
+ params->result_size = SHA512_DIGEST_SIZE;
+ break;
+ case SHA512_DIGEST_SIZE:
+ params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_512;
+ params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA512_512;
+ params->result_size = SHA512_DIGEST_SIZE;
+ break;
+ default:
+ pr_err("ERROR, unsupported digest size\n");
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static inline void chcr_free_shash(struct crypto_shash *base_hash)
+{
+ crypto_free_shash(base_hash);
+}
+
+/**
+ * create_hash_wr - Create hash work request
+ * @req: Cipher req base
+ * @param: Container for create_hash_wr()'s parameters
+ */
+static struct sk_buff *create_hash_wr(struct ahash_request *req,
+ struct hash_wr_param *param)
+{
+ struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct chcr_context *ctx = h_ctx(tfm);
+ struct hmac_ctx *hmacctx = HMAC_CTX(ctx);
+ struct sk_buff *skb = NULL;
+ struct uld_ctx *u_ctx = ULD_CTX(ctx);
+ struct chcr_wr *chcr_req;
+ struct ulptx_sgl *ulptx;
+ unsigned int nents = 0, transhdr_len;
+ unsigned int temp = 0;
+ gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
+ GFP_ATOMIC;
+ struct adapter *adap = padap(h_ctx(tfm)->dev);
+ int error = 0;
+ unsigned int rx_channel_id = req_ctx->rxqidx / ctx->rxq_perchan;
+
+ rx_channel_id = cxgb4_port_e2cchan(u_ctx->lldi.ports[rx_channel_id]);
+ transhdr_len = HASH_TRANSHDR_SIZE(param->kctx_len);
+ req_ctx->hctx_wr.imm = (transhdr_len + param->bfr_len +
+ param->sg_len) <= SGE_MAX_WR_LEN;
+ nents = sg_nents_xlen(req_ctx->hctx_wr.srcsg, param->sg_len,
+ CHCR_SRC_SG_SIZE, req_ctx->hctx_wr.src_ofst);
+ nents += param->bfr_len ? 1 : 0;
+ transhdr_len += req_ctx->hctx_wr.imm ? roundup(param->bfr_len +
+ param->sg_len, 16) : (sgl_len(nents) * 8);
+ transhdr_len = roundup(transhdr_len, 16);
+
+ skb = alloc_skb(transhdr_len, flags);
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+ chcr_req = __skb_put_zero(skb, transhdr_len);
+
+ chcr_req->sec_cpl.op_ivinsrtofst =
+ FILL_SEC_CPL_OP_IVINSR(rx_channel_id, 2, 0);
+
+ chcr_req->sec_cpl.pldlen = htonl(param->bfr_len + param->sg_len);
+
+ chcr_req->sec_cpl.aadstart_cipherstop_hi =
+ FILL_SEC_CPL_CIPHERSTOP_HI(0, 0, 0, 0);
+ chcr_req->sec_cpl.cipherstop_lo_authinsert =
+ FILL_SEC_CPL_AUTHINSERT(0, 1, 0, 0);
+ chcr_req->sec_cpl.seqno_numivs =
+ FILL_SEC_CPL_SCMD0_SEQNO(0, 0, 0, param->alg_prm.auth_mode,
+ param->opad_needed, 0);
+
+ chcr_req->sec_cpl.ivgen_hdrlen =
+ FILL_SEC_CPL_IVGEN_HDRLEN(param->last, param->more, 0, 1, 0, 0);
+
+ memcpy(chcr_req->key_ctx.key, req_ctx->partial_hash,
+ param->alg_prm.result_size);
+
+ if (param->opad_needed)
+ memcpy(chcr_req->key_ctx.key +
+ ((param->alg_prm.result_size <= 32) ? 32 :
+ CHCR_HASH_MAX_DIGEST_SIZE),
+ hmacctx->opad, param->alg_prm.result_size);
+
+ chcr_req->key_ctx.ctx_hdr = FILL_KEY_CTX_HDR(CHCR_KEYCTX_NO_KEY,
+ param->alg_prm.mk_size, 0,
+ param->opad_needed,
+ ((param->kctx_len +
+ sizeof(chcr_req->key_ctx)) >> 4));
+ chcr_req->sec_cpl.scmd1 = cpu_to_be64((u64)param->scmd1);
+ ulptx = (struct ulptx_sgl *)((u8 *)(chcr_req + 1) + param->kctx_len +
+ DUMMY_BYTES);
+ if (param->bfr_len != 0) {
+ req_ctx->hctx_wr.dma_addr =
+ dma_map_single(&u_ctx->lldi.pdev->dev, req_ctx->reqbfr,
+ param->bfr_len, DMA_TO_DEVICE);
+ if (dma_mapping_error(&u_ctx->lldi.pdev->dev,
+ req_ctx->hctx_wr. dma_addr)) {
+ error = -ENOMEM;
+ goto err;
+ }
+ req_ctx->hctx_wr.dma_len = param->bfr_len;
+ } else {
+ req_ctx->hctx_wr.dma_addr = 0;
+ }
+ chcr_add_hash_src_ent(req, ulptx, param);
+ /* Request upto max wr size */
+ temp = param->kctx_len + DUMMY_BYTES + (req_ctx->hctx_wr.imm ?
+ (param->sg_len + param->bfr_len) : 0);
+ atomic_inc(&adap->chcr_stats.digest_rqst);
+ create_wreq(h_ctx(tfm), chcr_req, &req->base, req_ctx->hctx_wr.imm,
+ param->hash_size, transhdr_len,
+ temp, 0);
+ req_ctx->hctx_wr.skb = skb;
+ return skb;
+err:
+ kfree_skb(skb);
+ return ERR_PTR(error);
+}
+
+static int chcr_ahash_update(struct ahash_request *req)
+{
+ struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
+ struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
+ struct uld_ctx *u_ctx = ULD_CTX(h_ctx(rtfm));
+ struct chcr_context *ctx = h_ctx(rtfm);
+ struct chcr_dev *dev = h_ctx(rtfm)->dev;
+ struct sk_buff *skb;
+ u8 remainder = 0, bs;
+ unsigned int nbytes = req->nbytes;
+ struct hash_wr_param params;
+ int error;
+ unsigned int cpu;
+
+ cpu = get_cpu();
+ req_ctx->txqidx = cpu % ctx->ntxq;
+ req_ctx->rxqidx = cpu % ctx->nrxq;
+ put_cpu();
+
+ bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
+
+ if (nbytes + req_ctx->reqlen >= bs) {
+ remainder = (nbytes + req_ctx->reqlen) % bs;
+ nbytes = nbytes + req_ctx->reqlen - remainder;
+ } else {
+ sg_pcopy_to_buffer(req->src, sg_nents(req->src), req_ctx->reqbfr
+ + req_ctx->reqlen, nbytes, 0);
+ req_ctx->reqlen += nbytes;
+ return 0;
+ }
+ error = chcr_inc_wrcount(dev);
+ if (error)
+ return -ENXIO;
+ /* Detach state for CHCR means lldi or padap is freed. Increasing
+ * inflight count for dev guarantees that lldi and padap is valid
+ */
+ if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
+ req_ctx->txqidx) &&
+ (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)))) {
+ error = -ENOSPC;
+ goto err;
+ }
+
+ chcr_init_hctx_per_wr(req_ctx);
+ error = chcr_hash_dma_map(&u_ctx->lldi.pdev->dev, req);
+ if (error) {
+ error = -ENOMEM;
+ goto err;
+ }
+ get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
+ params.kctx_len = roundup(params.alg_prm.result_size, 16);
+ params.sg_len = chcr_hash_ent_in_wr(req->src, !!req_ctx->reqlen,
+ HASH_SPACE_LEFT(params.kctx_len), 0);
+ if (params.sg_len > req->nbytes)
+ params.sg_len = req->nbytes;
+ params.sg_len = rounddown(params.sg_len + req_ctx->reqlen, bs) -
+ req_ctx->reqlen;
+ params.opad_needed = 0;
+ params.more = 1;
+ params.last = 0;
+ params.bfr_len = req_ctx->reqlen;
+ params.scmd1 = 0;
+ req_ctx->hctx_wr.srcsg = req->src;
+
+ params.hash_size = params.alg_prm.result_size;
+ req_ctx->data_len += params.sg_len + params.bfr_len;
+ skb = create_hash_wr(req, &params);
+ if (IS_ERR(skb)) {
+ error = PTR_ERR(skb);
+ goto unmap;
+ }
+
+ req_ctx->hctx_wr.processed += params.sg_len;
+ if (remainder) {
+ /* Swap buffers */
+ swap(req_ctx->reqbfr, req_ctx->skbfr);
+ sg_pcopy_to_buffer(req->src, sg_nents(req->src),
+ req_ctx->reqbfr, remainder, req->nbytes -
+ remainder);
+ }
+ req_ctx->reqlen = remainder;
+ skb->dev = u_ctx->lldi.ports[0];
+ set_wr_txq(skb, CPL_PRIORITY_DATA, req_ctx->txqidx);
+ chcr_send_wr(skb);
+ return -EINPROGRESS;
+unmap:
+ chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req);
+err:
+ chcr_dec_wrcount(dev);
+ return error;
+}
+
+static void create_last_hash_block(char *bfr_ptr, unsigned int bs, u64 scmd1)
+{
+ memset(bfr_ptr, 0, bs);
+ *bfr_ptr = 0x80;
+ if (bs == 64)
+ *(__be64 *)(bfr_ptr + 56) = cpu_to_be64(scmd1 << 3);
+ else
+ *(__be64 *)(bfr_ptr + 120) = cpu_to_be64(scmd1 << 3);
+}
+
+static int chcr_ahash_final(struct ahash_request *req)
+{
+ struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
+ struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
+ struct chcr_dev *dev = h_ctx(rtfm)->dev;
+ struct hash_wr_param params;
+ struct sk_buff *skb;
+ struct uld_ctx *u_ctx = ULD_CTX(h_ctx(rtfm));
+ struct chcr_context *ctx = h_ctx(rtfm);
+ u8 bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
+ int error;
+ unsigned int cpu;
+
+ cpu = get_cpu();
+ req_ctx->txqidx = cpu % ctx->ntxq;
+ req_ctx->rxqidx = cpu % ctx->nrxq;
+ put_cpu();
+
+ error = chcr_inc_wrcount(dev);
+ if (error)
+ return -ENXIO;
+
+ chcr_init_hctx_per_wr(req_ctx);
+ if (is_hmac(crypto_ahash_tfm(rtfm)))
+ params.opad_needed = 1;
+ else
+ params.opad_needed = 0;
+ params.sg_len = 0;
+ req_ctx->hctx_wr.isfinal = 1;
+ get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
+ params.kctx_len = roundup(params.alg_prm.result_size, 16);
+ if (is_hmac(crypto_ahash_tfm(rtfm))) {
+ params.opad_needed = 1;
+ params.kctx_len *= 2;
+ } else {
+ params.opad_needed = 0;
+ }
+
+ req_ctx->hctx_wr.result = 1;
+ params.bfr_len = req_ctx->reqlen;
+ req_ctx->data_len += params.bfr_len + params.sg_len;
+ req_ctx->hctx_wr.srcsg = req->src;
+ if (req_ctx->reqlen == 0) {
+ create_last_hash_block(req_ctx->reqbfr, bs, req_ctx->data_len);
+ params.last = 0;
+ params.more = 1;
+ params.scmd1 = 0;
+ params.bfr_len = bs;
+
+ } else {
+ params.scmd1 = req_ctx->data_len;
+ params.last = 1;
+ params.more = 0;
+ }
+ params.hash_size = crypto_ahash_digestsize(rtfm);
+ skb = create_hash_wr(req, &params);
+ if (IS_ERR(skb)) {
+ error = PTR_ERR(skb);
+ goto err;
+ }
+ req_ctx->reqlen = 0;
+ skb->dev = u_ctx->lldi.ports[0];
+ set_wr_txq(skb, CPL_PRIORITY_DATA, req_ctx->txqidx);
+ chcr_send_wr(skb);
+ return -EINPROGRESS;
+err:
+ chcr_dec_wrcount(dev);
+ return error;
+}
+
+static int chcr_ahash_finup(struct ahash_request *req)
+{
+ struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
+ struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
+ struct chcr_dev *dev = h_ctx(rtfm)->dev;
+ struct uld_ctx *u_ctx = ULD_CTX(h_ctx(rtfm));
+ struct chcr_context *ctx = h_ctx(rtfm);
+ struct sk_buff *skb;
+ struct hash_wr_param params;
+ u8 bs;
+ int error;
+ unsigned int cpu;
+
+ cpu = get_cpu();
+ req_ctx->txqidx = cpu % ctx->ntxq;
+ req_ctx->rxqidx = cpu % ctx->nrxq;
+ put_cpu();
+
+ bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
+ error = chcr_inc_wrcount(dev);
+ if (error)
+ return -ENXIO;
+
+ if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
+ req_ctx->txqidx) &&
+ (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)))) {
+ error = -ENOSPC;
+ goto err;
+ }
+ chcr_init_hctx_per_wr(req_ctx);
+ error = chcr_hash_dma_map(&u_ctx->lldi.pdev->dev, req);
+ if (error) {
+ error = -ENOMEM;
+ goto err;
+ }
+
+ get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
+ params.kctx_len = roundup(params.alg_prm.result_size, 16);
+ if (is_hmac(crypto_ahash_tfm(rtfm))) {
+ params.kctx_len *= 2;
+ params.opad_needed = 1;
+ } else {
+ params.opad_needed = 0;
+ }
+
+ params.sg_len = chcr_hash_ent_in_wr(req->src, !!req_ctx->reqlen,
+ HASH_SPACE_LEFT(params.kctx_len), 0);
+ if (params.sg_len < req->nbytes) {
+ if (is_hmac(crypto_ahash_tfm(rtfm))) {
+ params.kctx_len /= 2;
+ params.opad_needed = 0;
+ }
+ params.last = 0;
+ params.more = 1;
+ params.sg_len = rounddown(params.sg_len + req_ctx->reqlen, bs)
+ - req_ctx->reqlen;
+ params.hash_size = params.alg_prm.result_size;
+ params.scmd1 = 0;
+ } else {
+ params.last = 1;
+ params.more = 0;
+ params.sg_len = req->nbytes;
+ params.hash_size = crypto_ahash_digestsize(rtfm);
+ params.scmd1 = req_ctx->data_len + req_ctx->reqlen +
+ params.sg_len;
+ }
+ params.bfr_len = req_ctx->reqlen;
+ req_ctx->data_len += params.bfr_len + params.sg_len;
+ req_ctx->hctx_wr.result = 1;
+ req_ctx->hctx_wr.srcsg = req->src;
+ if ((req_ctx->reqlen + req->nbytes) == 0) {
+ create_last_hash_block(req_ctx->reqbfr, bs, req_ctx->data_len);
+ params.last = 0;
+ params.more = 1;
+ params.scmd1 = 0;
+ params.bfr_len = bs;
+ }
+ skb = create_hash_wr(req, &params);
+ if (IS_ERR(skb)) {
+ error = PTR_ERR(skb);
+ goto unmap;
+ }
+ req_ctx->reqlen = 0;
+ req_ctx->hctx_wr.processed += params.sg_len;
+ skb->dev = u_ctx->lldi.ports[0];
+ set_wr_txq(skb, CPL_PRIORITY_DATA, req_ctx->txqidx);
+ chcr_send_wr(skb);
+ return -EINPROGRESS;
+unmap:
+ chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req);
+err:
+ chcr_dec_wrcount(dev);
+ return error;
+}
+
+static int chcr_ahash_digest(struct ahash_request *req)
+{
+ struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
+ struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
+ struct chcr_dev *dev = h_ctx(rtfm)->dev;
+ struct uld_ctx *u_ctx = ULD_CTX(h_ctx(rtfm));
+ struct chcr_context *ctx = h_ctx(rtfm);
+ struct sk_buff *skb;
+ struct hash_wr_param params;
+ u8 bs;
+ int error;
+ unsigned int cpu;
+
+ cpu = get_cpu();
+ req_ctx->txqidx = cpu % ctx->ntxq;
+ req_ctx->rxqidx = cpu % ctx->nrxq;
+ put_cpu();
+
+ rtfm->init(req);
+ bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
+ error = chcr_inc_wrcount(dev);
+ if (error)
+ return -ENXIO;
+
+ if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
+ req_ctx->txqidx) &&
+ (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)))) {
+ error = -ENOSPC;
+ goto err;
+ }
+
+ chcr_init_hctx_per_wr(req_ctx);
+ error = chcr_hash_dma_map(&u_ctx->lldi.pdev->dev, req);
+ if (error) {
+ error = -ENOMEM;
+ goto err;
+ }
+
+ get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
+ params.kctx_len = roundup(params.alg_prm.result_size, 16);
+ if (is_hmac(crypto_ahash_tfm(rtfm))) {
+ params.kctx_len *= 2;
+ params.opad_needed = 1;
+ } else {
+ params.opad_needed = 0;
+ }
+ params.sg_len = chcr_hash_ent_in_wr(req->src, !!req_ctx->reqlen,
+ HASH_SPACE_LEFT(params.kctx_len), 0);
+ if (params.sg_len < req->nbytes) {
+ if (is_hmac(crypto_ahash_tfm(rtfm))) {
+ params.kctx_len /= 2;
+ params.opad_needed = 0;
+ }
+ params.last = 0;
+ params.more = 1;
+ params.scmd1 = 0;
+ params.sg_len = rounddown(params.sg_len, bs);
+ params.hash_size = params.alg_prm.result_size;
+ } else {
+ params.sg_len = req->nbytes;
+ params.hash_size = crypto_ahash_digestsize(rtfm);
+ params.last = 1;
+ params.more = 0;
+ params.scmd1 = req->nbytes + req_ctx->data_len;
+
+ }
+ params.bfr_len = 0;
+ req_ctx->hctx_wr.result = 1;
+ req_ctx->hctx_wr.srcsg = req->src;
+ req_ctx->data_len += params.bfr_len + params.sg_len;
+
+ if (req->nbytes == 0) {
+ create_last_hash_block(req_ctx->reqbfr, bs, req_ctx->data_len);
+ params.more = 1;
+ params.bfr_len = bs;
+ }
+
+ skb = create_hash_wr(req, &params);
+ if (IS_ERR(skb)) {
+ error = PTR_ERR(skb);
+ goto unmap;
+ }
+ req_ctx->hctx_wr.processed += params.sg_len;
+ skb->dev = u_ctx->lldi.ports[0];
+ set_wr_txq(skb, CPL_PRIORITY_DATA, req_ctx->txqidx);
+ chcr_send_wr(skb);
+ return -EINPROGRESS;
+unmap:
+ chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req);
+err:
+ chcr_dec_wrcount(dev);
+ return error;
+}
+
+static int chcr_ahash_continue(struct ahash_request *req)
+{
+ struct chcr_ahash_req_ctx *reqctx = ahash_request_ctx(req);
+ struct chcr_hctx_per_wr *hctx_wr = &reqctx->hctx_wr;
+ struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
+ struct chcr_context *ctx = h_ctx(rtfm);
+ struct uld_ctx *u_ctx = ULD_CTX(ctx);
+ struct sk_buff *skb;
+ struct hash_wr_param params;
+ u8 bs;
+ int error;
+ unsigned int cpu;
+
+ cpu = get_cpu();
+ reqctx->txqidx = cpu % ctx->ntxq;
+ reqctx->rxqidx = cpu % ctx->nrxq;
+ put_cpu();
+
+ bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
+ get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
+ params.kctx_len = roundup(params.alg_prm.result_size, 16);
+ if (is_hmac(crypto_ahash_tfm(rtfm))) {
+ params.kctx_len *= 2;
+ params.opad_needed = 1;
+ } else {
+ params.opad_needed = 0;
+ }
+ params.sg_len = chcr_hash_ent_in_wr(hctx_wr->srcsg, 0,
+ HASH_SPACE_LEFT(params.kctx_len),
+ hctx_wr->src_ofst);
+ if ((params.sg_len + hctx_wr->processed) > req->nbytes)
+ params.sg_len = req->nbytes - hctx_wr->processed;
+ if (!hctx_wr->result ||
+ ((params.sg_len + hctx_wr->processed) < req->nbytes)) {
+ if (is_hmac(crypto_ahash_tfm(rtfm))) {
+ params.kctx_len /= 2;
+ params.opad_needed = 0;
+ }
+ params.last = 0;
+ params.more = 1;
+ params.sg_len = rounddown(params.sg_len, bs);
+ params.hash_size = params.alg_prm.result_size;
+ params.scmd1 = 0;
+ } else {
+ params.last = 1;
+ params.more = 0;
+ params.hash_size = crypto_ahash_digestsize(rtfm);
+ params.scmd1 = reqctx->data_len + params.sg_len;
+ }
+ params.bfr_len = 0;
+ reqctx->data_len += params.sg_len;
+ skb = create_hash_wr(req, &params);
+ if (IS_ERR(skb)) {
+ error = PTR_ERR(skb);
+ goto err;
+ }
+ hctx_wr->processed += params.sg_len;
+ skb->dev = u_ctx->lldi.ports[0];
+ set_wr_txq(skb, CPL_PRIORITY_DATA, reqctx->txqidx);
+ chcr_send_wr(skb);
+ return 0;
+err:
+ return error;
+}
+
+static inline void chcr_handle_ahash_resp(struct ahash_request *req,
+ unsigned char *input,
+ int err)
+{
+ struct chcr_ahash_req_ctx *reqctx = ahash_request_ctx(req);
+ struct chcr_hctx_per_wr *hctx_wr = &reqctx->hctx_wr;
+ int digestsize, updated_digestsize;
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct uld_ctx *u_ctx = ULD_CTX(h_ctx(tfm));
+ struct chcr_dev *dev = h_ctx(tfm)->dev;
+
+ if (input == NULL)
+ goto out;
+ digestsize = crypto_ahash_digestsize(crypto_ahash_reqtfm(req));
+ updated_digestsize = digestsize;
+ if (digestsize == SHA224_DIGEST_SIZE)
+ updated_digestsize = SHA256_DIGEST_SIZE;
+ else if (digestsize == SHA384_DIGEST_SIZE)
+ updated_digestsize = SHA512_DIGEST_SIZE;
+
+ if (hctx_wr->dma_addr) {
+ dma_unmap_single(&u_ctx->lldi.pdev->dev, hctx_wr->dma_addr,
+ hctx_wr->dma_len, DMA_TO_DEVICE);
+ hctx_wr->dma_addr = 0;
+ }
+ if (hctx_wr->isfinal || ((hctx_wr->processed + reqctx->reqlen) ==
+ req->nbytes)) {
+ if (hctx_wr->result == 1) {
+ hctx_wr->result = 0;
+ memcpy(req->result, input + sizeof(struct cpl_fw6_pld),
+ digestsize);
+ } else {
+ memcpy(reqctx->partial_hash,
+ input + sizeof(struct cpl_fw6_pld),
+ updated_digestsize);
+
+ }
+ goto unmap;
+ }
+ memcpy(reqctx->partial_hash, input + sizeof(struct cpl_fw6_pld),
+ updated_digestsize);
+
+ err = chcr_ahash_continue(req);
+ if (err)
+ goto unmap;
+ return;
+unmap:
+ if (hctx_wr->is_sg_map)
+ chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req);
+
+
+out:
+ chcr_dec_wrcount(dev);
+ req->base.complete(&req->base, err);
+}
+
+/*
+ * chcr_handle_resp - Unmap the DMA buffers associated with the request
+ * @req: crypto request
+ */
+int chcr_handle_resp(struct crypto_async_request *req, unsigned char *input,
+ int err)
+{
+ struct crypto_tfm *tfm = req->tfm;
+ struct chcr_context *ctx = crypto_tfm_ctx(tfm);
+ struct adapter *adap = padap(ctx->dev);
+
+ switch (tfm->__crt_alg->cra_flags & CRYPTO_ALG_TYPE_MASK) {
+ case CRYPTO_ALG_TYPE_AEAD:
+ err = chcr_handle_aead_resp(aead_request_cast(req), input, err);
+ break;
+
+ case CRYPTO_ALG_TYPE_SKCIPHER:
+ chcr_handle_cipher_resp(skcipher_request_cast(req),
+ input, err);
+ break;
+ case CRYPTO_ALG_TYPE_AHASH:
+ chcr_handle_ahash_resp(ahash_request_cast(req), input, err);
+ }
+ atomic_inc(&adap->chcr_stats.complete);
+ return err;
+}
+static int chcr_ahash_export(struct ahash_request *areq, void *out)
+{
+ struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
+ struct chcr_ahash_req_ctx *state = out;
+
+ state->reqlen = req_ctx->reqlen;
+ state->data_len = req_ctx->data_len;
+ memcpy(state->bfr1, req_ctx->reqbfr, req_ctx->reqlen);
+ memcpy(state->partial_hash, req_ctx->partial_hash,
+ CHCR_HASH_MAX_DIGEST_SIZE);
+ chcr_init_hctx_per_wr(state);
+ return 0;
+}
+
+static int chcr_ahash_import(struct ahash_request *areq, const void *in)
+{
+ struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
+ struct chcr_ahash_req_ctx *state = (struct chcr_ahash_req_ctx *)in;
+
+ req_ctx->reqlen = state->reqlen;
+ req_ctx->data_len = state->data_len;
+ req_ctx->reqbfr = req_ctx->bfr1;
+ req_ctx->skbfr = req_ctx->bfr2;
+ memcpy(req_ctx->bfr1, state->bfr1, CHCR_HASH_MAX_BLOCK_SIZE_128);
+ memcpy(req_ctx->partial_hash, state->partial_hash,
+ CHCR_HASH_MAX_DIGEST_SIZE);
+ chcr_init_hctx_per_wr(req_ctx);
+ return 0;
+}
+
+static int chcr_ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
+ unsigned int keylen)
+{
+ struct hmac_ctx *hmacctx = HMAC_CTX(h_ctx(tfm));
+ unsigned int digestsize = crypto_ahash_digestsize(tfm);
+ unsigned int bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
+ unsigned int i, err = 0, updated_digestsize;
+
+ SHASH_DESC_ON_STACK(shash, hmacctx->base_hash);
+
+ /* use the key to calculate the ipad and opad. ipad will sent with the
+ * first request's data. opad will be sent with the final hash result
+ * ipad in hmacctx->ipad and opad in hmacctx->opad location
+ */
+ shash->tfm = hmacctx->base_hash;
+ if (keylen > bs) {
+ err = crypto_shash_digest(shash, key, keylen,
+ hmacctx->ipad);
+ if (err)
+ goto out;
+ keylen = digestsize;
+ } else {
+ memcpy(hmacctx->ipad, key, keylen);
+ }
+ memset(hmacctx->ipad + keylen, 0, bs - keylen);
+ memcpy(hmacctx->opad, hmacctx->ipad, bs);
+
+ for (i = 0; i < bs / sizeof(int); i++) {
+ *((unsigned int *)(&hmacctx->ipad) + i) ^= IPAD_DATA;
+ *((unsigned int *)(&hmacctx->opad) + i) ^= OPAD_DATA;
+ }
+
+ updated_digestsize = digestsize;
+ if (digestsize == SHA224_DIGEST_SIZE)
+ updated_digestsize = SHA256_DIGEST_SIZE;
+ else if (digestsize == SHA384_DIGEST_SIZE)
+ updated_digestsize = SHA512_DIGEST_SIZE;
+ err = chcr_compute_partial_hash(shash, hmacctx->ipad,
+ hmacctx->ipad, digestsize);
+ if (err)
+ goto out;
+ chcr_change_order(hmacctx->ipad, updated_digestsize);
+
+ err = chcr_compute_partial_hash(shash, hmacctx->opad,
+ hmacctx->opad, digestsize);
+ if (err)
+ goto out;
+ chcr_change_order(hmacctx->opad, updated_digestsize);
+out:
+ return err;
+}
+
+static int chcr_aes_xts_setkey(struct crypto_skcipher *cipher, const u8 *key,
+ unsigned int key_len)
+{
+ struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
+ unsigned short context_size = 0;
+ int err;
+
+ err = chcr_cipher_fallback_setkey(cipher, key, key_len);
+ if (err)
+ goto badkey_err;
+
+ memcpy(ablkctx->key, key, key_len);
+ ablkctx->enckey_len = key_len;
+ get_aes_decrypt_key(ablkctx->rrkey, ablkctx->key, key_len << 2);
+ context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD + key_len) >> 4;
+ /* Both keys for xts must be aligned to 16 byte boundary
+ * by padding with zeros. So for 24 byte keys padding 8 zeroes.
+ */
+ if (key_len == 48) {
+ context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD + key_len
+ + 16) >> 4;
+ memmove(ablkctx->key + 32, ablkctx->key + 24, 24);
+ memset(ablkctx->key + 24, 0, 8);
+ memset(ablkctx->key + 56, 0, 8);
+ ablkctx->enckey_len = 64;
+ ablkctx->key_ctx_hdr =
+ FILL_KEY_CTX_HDR(CHCR_KEYCTX_CIPHER_KEY_SIZE_192,
+ CHCR_KEYCTX_NO_KEY, 1,
+ 0, context_size);
+ } else {
+ ablkctx->key_ctx_hdr =
+ FILL_KEY_CTX_HDR((key_len == AES_KEYSIZE_256) ?
+ CHCR_KEYCTX_CIPHER_KEY_SIZE_128 :
+ CHCR_KEYCTX_CIPHER_KEY_SIZE_256,
+ CHCR_KEYCTX_NO_KEY, 1,
+ 0, context_size);
+ }
+ ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_XTS;
+ return 0;
+badkey_err:
+ ablkctx->enckey_len = 0;
+
+ return err;
+}
+
+static int chcr_sha_init(struct ahash_request *areq)
+{
+ struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
+ int digestsize = crypto_ahash_digestsize(tfm);
+
+ req_ctx->data_len = 0;
+ req_ctx->reqlen = 0;
+ req_ctx->reqbfr = req_ctx->bfr1;
+ req_ctx->skbfr = req_ctx->bfr2;
+ copy_hash_init_values(req_ctx->partial_hash, digestsize);
+
+ return 0;
+}
+
+static int chcr_sha_cra_init(struct crypto_tfm *tfm)
+{
+ crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
+ sizeof(struct chcr_ahash_req_ctx));
+ return chcr_device_init(crypto_tfm_ctx(tfm));
+}
+
+static int chcr_hmac_init(struct ahash_request *areq)
+{
+ struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
+ struct crypto_ahash *rtfm = crypto_ahash_reqtfm(areq);
+ struct hmac_ctx *hmacctx = HMAC_CTX(h_ctx(rtfm));
+ unsigned int digestsize = crypto_ahash_digestsize(rtfm);
+ unsigned int bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
+
+ chcr_sha_init(areq);
+ req_ctx->data_len = bs;
+ if (is_hmac(crypto_ahash_tfm(rtfm))) {
+ if (digestsize == SHA224_DIGEST_SIZE)
+ memcpy(req_ctx->partial_hash, hmacctx->ipad,
+ SHA256_DIGEST_SIZE);
+ else if (digestsize == SHA384_DIGEST_SIZE)
+ memcpy(req_ctx->partial_hash, hmacctx->ipad,
+ SHA512_DIGEST_SIZE);
+ else
+ memcpy(req_ctx->partial_hash, hmacctx->ipad,
+ digestsize);
+ }
+ return 0;
+}
+
+static int chcr_hmac_cra_init(struct crypto_tfm *tfm)
+{
+ struct chcr_context *ctx = crypto_tfm_ctx(tfm);
+ struct hmac_ctx *hmacctx = HMAC_CTX(ctx);
+ unsigned int digestsize =
+ crypto_ahash_digestsize(__crypto_ahash_cast(tfm));
+
+ crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
+ sizeof(struct chcr_ahash_req_ctx));
+ hmacctx->base_hash = chcr_alloc_shash(digestsize);
+ if (IS_ERR(hmacctx->base_hash))
+ return PTR_ERR(hmacctx->base_hash);
+ return chcr_device_init(crypto_tfm_ctx(tfm));
+}
+
+static void chcr_hmac_cra_exit(struct crypto_tfm *tfm)
+{
+ struct chcr_context *ctx = crypto_tfm_ctx(tfm);
+ struct hmac_ctx *hmacctx = HMAC_CTX(ctx);
+
+ if (hmacctx->base_hash) {
+ chcr_free_shash(hmacctx->base_hash);
+ hmacctx->base_hash = NULL;
+ }
+}
+
+inline void chcr_aead_common_exit(struct aead_request *req)
+{
+ struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ struct uld_ctx *u_ctx = ULD_CTX(a_ctx(tfm));
+
+ chcr_aead_dma_unmap(&u_ctx->lldi.pdev->dev, req, reqctx->op);
+}
+
+static int chcr_aead_common_init(struct aead_request *req)
+{
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
+ struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
+ unsigned int authsize = crypto_aead_authsize(tfm);
+ int error = -EINVAL;
+
+ /* validate key size */
+ if (aeadctx->enckey_len == 0)
+ goto err;
+ if (reqctx->op && req->cryptlen < authsize)
+ goto err;
+ if (reqctx->b0_len)
+ reqctx->scratch_pad = reqctx->iv + IV;
+ else
+ reqctx->scratch_pad = NULL;
+
+ error = chcr_aead_dma_map(&ULD_CTX(a_ctx(tfm))->lldi.pdev->dev, req,
+ reqctx->op);
+ if (error) {
+ error = -ENOMEM;
+ goto err;
+ }
+
+ return 0;
+err:
+ return error;
+}
+
+static int chcr_aead_need_fallback(struct aead_request *req, int dst_nents,
+ int aadmax, int wrlen,
+ unsigned short op_type)
+{
+ unsigned int authsize = crypto_aead_authsize(crypto_aead_reqtfm(req));
+
+ if (((req->cryptlen - (op_type ? authsize : 0)) == 0) ||
+ dst_nents > MAX_DSGL_ENT ||
+ (req->assoclen > aadmax) ||
+ (wrlen > SGE_MAX_WR_LEN))
+ return 1;
+ return 0;
+}
+
+static int chcr_aead_fallback(struct aead_request *req, unsigned short op_type)
+{
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
+ struct aead_request *subreq = aead_request_ctx(req);
+
+ aead_request_set_tfm(subreq, aeadctx->sw_cipher);
+ aead_request_set_callback(subreq, req->base.flags,
+ req->base.complete, req->base.data);
+ aead_request_set_crypt(subreq, req->src, req->dst, req->cryptlen,
+ req->iv);
+ aead_request_set_ad(subreq, req->assoclen);
+ return op_type ? crypto_aead_decrypt(subreq) :
+ crypto_aead_encrypt(subreq);
+}
+
+static struct sk_buff *create_authenc_wr(struct aead_request *req,
+ unsigned short qid,
+ int size)
+{
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ struct chcr_context *ctx = a_ctx(tfm);
+ struct uld_ctx *u_ctx = ULD_CTX(ctx);
+ struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
+ struct chcr_authenc_ctx *actx = AUTHENC_CTX(aeadctx);
+ struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
+ struct sk_buff *skb = NULL;
+ struct chcr_wr *chcr_req;
+ struct cpl_rx_phys_dsgl *phys_cpl;
+ struct ulptx_sgl *ulptx;
+ unsigned int transhdr_len;
+ unsigned int dst_size = 0, temp, subtype = get_aead_subtype(tfm);
+ unsigned int kctx_len = 0, dnents, snents;
+ unsigned int authsize = crypto_aead_authsize(tfm);
+ int error = -EINVAL;
+ u8 *ivptr;
+ int null = 0;
+ gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
+ GFP_ATOMIC;
+ struct adapter *adap = padap(ctx->dev);
+ unsigned int rx_channel_id = reqctx->rxqidx / ctx->rxq_perchan;
+
+ rx_channel_id = cxgb4_port_e2cchan(u_ctx->lldi.ports[rx_channel_id]);
+ if (req->cryptlen == 0)
+ return NULL;
+
+ reqctx->b0_len = 0;
+ error = chcr_aead_common_init(req);
+ if (error)
+ return ERR_PTR(error);
+
+ if (subtype == CRYPTO_ALG_SUB_TYPE_CBC_NULL ||
+ subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) {
+ null = 1;
+ }
+ dnents = sg_nents_xlen(req->dst, req->assoclen + req->cryptlen +
+ (reqctx->op ? -authsize : authsize), CHCR_DST_SG_SIZE, 0);
+ dnents += MIN_AUTH_SG; // For IV
+ snents = sg_nents_xlen(req->src, req->assoclen + req->cryptlen,
+ CHCR_SRC_SG_SIZE, 0);
+ dst_size = get_space_for_phys_dsgl(dnents);
+ kctx_len = (KEY_CONTEXT_CTX_LEN_G(ntohl(aeadctx->key_ctx_hdr)) << 4)
+ - sizeof(chcr_req->key_ctx);
+ transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
+ reqctx->imm = (transhdr_len + req->assoclen + req->cryptlen) <
+ SGE_MAX_WR_LEN;
+ temp = reqctx->imm ? roundup(req->assoclen + req->cryptlen, 16)
+ : (sgl_len(snents) * 8);
+ transhdr_len += temp;
+ transhdr_len = roundup(transhdr_len, 16);
+
+ if (chcr_aead_need_fallback(req, dnents, T6_MAX_AAD_SIZE,
+ transhdr_len, reqctx->op)) {
+ atomic_inc(&adap->chcr_stats.fallback);
+ chcr_aead_common_exit(req);
+ return ERR_PTR(chcr_aead_fallback(req, reqctx->op));
+ }
+ skb = alloc_skb(transhdr_len, flags);
+ if (!skb) {
+ error = -ENOMEM;
+ goto err;
+ }
+
+ chcr_req = __skb_put_zero(skb, transhdr_len);
+
+ temp = (reqctx->op == CHCR_ENCRYPT_OP) ? 0 : authsize;
+
+ /*
+ * Input order is AAD,IV and Payload. where IV should be included as
+ * the part of authdata. All other fields should be filled according
+ * to the hardware spec
+ */
+ chcr_req->sec_cpl.op_ivinsrtofst =
+ FILL_SEC_CPL_OP_IVINSR(rx_channel_id, 2, 1);
+ chcr_req->sec_cpl.pldlen = htonl(req->assoclen + IV + req->cryptlen);
+ chcr_req->sec_cpl.aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(
+ null ? 0 : 1 + IV,
+ null ? 0 : IV + req->assoclen,
+ req->assoclen + IV + 1,
+ (temp & 0x1F0) >> 4);
+ chcr_req->sec_cpl.cipherstop_lo_authinsert = FILL_SEC_CPL_AUTHINSERT(
+ temp & 0xF,
+ null ? 0 : req->assoclen + IV + 1,
+ temp, temp);
+ if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL ||
+ subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA)
+ temp = CHCR_SCMD_CIPHER_MODE_AES_CTR;
+ else
+ temp = CHCR_SCMD_CIPHER_MODE_AES_CBC;
+ chcr_req->sec_cpl.seqno_numivs = FILL_SEC_CPL_SCMD0_SEQNO(reqctx->op,
+ (reqctx->op == CHCR_ENCRYPT_OP) ? 1 : 0,
+ temp,
+ actx->auth_mode, aeadctx->hmac_ctrl,
+ IV >> 1);
+ chcr_req->sec_cpl.ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1,
+ 0, 0, dst_size);
+
+ chcr_req->key_ctx.ctx_hdr = aeadctx->key_ctx_hdr;
+ if (reqctx->op == CHCR_ENCRYPT_OP ||
+ subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA ||
+ subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL)
+ memcpy(chcr_req->key_ctx.key, aeadctx->key,
+ aeadctx->enckey_len);
+ else
+ memcpy(chcr_req->key_ctx.key, actx->dec_rrkey,
+ aeadctx->enckey_len);
+
+ memcpy(chcr_req->key_ctx.key + roundup(aeadctx->enckey_len, 16),
+ actx->h_iopad, kctx_len - roundup(aeadctx->enckey_len, 16));
+ phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
+ ivptr = (u8 *)(phys_cpl + 1) + dst_size;
+ ulptx = (struct ulptx_sgl *)(ivptr + IV);
+ if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA ||
+ subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) {
+ memcpy(ivptr, aeadctx->nonce, CTR_RFC3686_NONCE_SIZE);
+ memcpy(ivptr + CTR_RFC3686_NONCE_SIZE, req->iv,
+ CTR_RFC3686_IV_SIZE);
+ *(__be32 *)(ivptr + CTR_RFC3686_NONCE_SIZE +
+ CTR_RFC3686_IV_SIZE) = cpu_to_be32(1);
+ } else {
+ memcpy(ivptr, req->iv, IV);
+ }
+ chcr_add_aead_dst_ent(req, phys_cpl, qid);
+ chcr_add_aead_src_ent(req, ulptx);
+ atomic_inc(&adap->chcr_stats.cipher_rqst);
+ temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size + IV +
+ kctx_len + (reqctx->imm ? (req->assoclen + req->cryptlen) : 0);
+ create_wreq(a_ctx(tfm), chcr_req, &req->base, reqctx->imm, size,
+ transhdr_len, temp, 0);
+ reqctx->skb = skb;
+
+ return skb;
+err:
+ chcr_aead_common_exit(req);
+
+ return ERR_PTR(error);
+}
+
+int chcr_aead_dma_map(struct device *dev,
+ struct aead_request *req,
+ unsigned short op_type)
+{
+ int error;
+ struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ unsigned int authsize = crypto_aead_authsize(tfm);
+ int src_len, dst_len;
+
+ /* calculate and handle src and dst sg length separately
+ * for inplace and out-of place operations
+ */
+ if (req->src == req->dst) {
+ src_len = req->assoclen + req->cryptlen + (op_type ?
+ 0 : authsize);
+ dst_len = src_len;
+ } else {
+ src_len = req->assoclen + req->cryptlen;
+ dst_len = req->assoclen + req->cryptlen + (op_type ?
+ -authsize : authsize);
+ }
+
+ if (!req->cryptlen || !src_len || !dst_len)
+ return 0;
+ reqctx->iv_dma = dma_map_single(dev, reqctx->iv, (IV + reqctx->b0_len),
+ DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(dev, reqctx->iv_dma))
+ return -ENOMEM;
+ if (reqctx->b0_len)
+ reqctx->b0_dma = reqctx->iv_dma + IV;
+ else
+ reqctx->b0_dma = 0;
+ if (req->src == req->dst) {
+ error = dma_map_sg(dev, req->src,
+ sg_nents_for_len(req->src, src_len),
+ DMA_BIDIRECTIONAL);
+ if (!error)
+ goto err;
+ } else {
+ error = dma_map_sg(dev, req->src,
+ sg_nents_for_len(req->src, src_len),
+ DMA_TO_DEVICE);
+ if (!error)
+ goto err;
+ error = dma_map_sg(dev, req->dst,
+ sg_nents_for_len(req->dst, dst_len),
+ DMA_FROM_DEVICE);
+ if (!error) {
+ dma_unmap_sg(dev, req->src,
+ sg_nents_for_len(req->src, src_len),
+ DMA_TO_DEVICE);
+ goto err;
+ }
+ }
+
+ return 0;
+err:
+ dma_unmap_single(dev, reqctx->iv_dma, IV, DMA_BIDIRECTIONAL);
+ return -ENOMEM;
+}
+
+void chcr_aead_dma_unmap(struct device *dev,
+ struct aead_request *req,
+ unsigned short op_type)
+{
+ struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ unsigned int authsize = crypto_aead_authsize(tfm);
+ int src_len, dst_len;
+
+ /* calculate and handle src and dst sg length separately
+ * for inplace and out-of place operations
+ */
+ if (req->src == req->dst) {
+ src_len = req->assoclen + req->cryptlen + (op_type ?
+ 0 : authsize);
+ dst_len = src_len;
+ } else {
+ src_len = req->assoclen + req->cryptlen;
+ dst_len = req->assoclen + req->cryptlen + (op_type ?
+ -authsize : authsize);
+ }
+
+ if (!req->cryptlen || !src_len || !dst_len)
+ return;
+
+ dma_unmap_single(dev, reqctx->iv_dma, (IV + reqctx->b0_len),
+ DMA_BIDIRECTIONAL);
+ if (req->src == req->dst) {
+ dma_unmap_sg(dev, req->src,
+ sg_nents_for_len(req->src, src_len),
+ DMA_BIDIRECTIONAL);
+ } else {
+ dma_unmap_sg(dev, req->src,
+ sg_nents_for_len(req->src, src_len),
+ DMA_TO_DEVICE);
+ dma_unmap_sg(dev, req->dst,
+ sg_nents_for_len(req->dst, dst_len),
+ DMA_FROM_DEVICE);
+ }
+}
+
+void chcr_add_aead_src_ent(struct aead_request *req,
+ struct ulptx_sgl *ulptx)
+{
+ struct ulptx_walk ulp_walk;
+ struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
+
+ if (reqctx->imm) {
+ u8 *buf = (u8 *)ulptx;
+
+ if (reqctx->b0_len) {
+ memcpy(buf, reqctx->scratch_pad, reqctx->b0_len);
+ buf += reqctx->b0_len;
+ }
+ sg_pcopy_to_buffer(req->src, sg_nents(req->src),
+ buf, req->cryptlen + req->assoclen, 0);
+ } else {
+ ulptx_walk_init(&ulp_walk, ulptx);
+ if (reqctx->b0_len)
+ ulptx_walk_add_page(&ulp_walk, reqctx->b0_len,
+ reqctx->b0_dma);
+ ulptx_walk_add_sg(&ulp_walk, req->src, req->cryptlen +
+ req->assoclen, 0);
+ ulptx_walk_end(&ulp_walk);
+ }
+}
+
+void chcr_add_aead_dst_ent(struct aead_request *req,
+ struct cpl_rx_phys_dsgl *phys_cpl,
+ unsigned short qid)
+{
+ struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ struct dsgl_walk dsgl_walk;
+ unsigned int authsize = crypto_aead_authsize(tfm);
+ struct chcr_context *ctx = a_ctx(tfm);
+ struct uld_ctx *u_ctx = ULD_CTX(ctx);
+ u32 temp;
+ unsigned int rx_channel_id = reqctx->rxqidx / ctx->rxq_perchan;
+
+ rx_channel_id = cxgb4_port_e2cchan(u_ctx->lldi.ports[rx_channel_id]);
+ dsgl_walk_init(&dsgl_walk, phys_cpl);
+ dsgl_walk_add_page(&dsgl_walk, IV + reqctx->b0_len, reqctx->iv_dma);
+ temp = req->assoclen + req->cryptlen +
+ (reqctx->op ? -authsize : authsize);
+ dsgl_walk_add_sg(&dsgl_walk, req->dst, temp, 0);
+ dsgl_walk_end(&dsgl_walk, qid, rx_channel_id);
+}
+
+void chcr_add_cipher_src_ent(struct skcipher_request *req,
+ void *ulptx,
+ struct cipher_wr_param *wrparam)
+{
+ struct ulptx_walk ulp_walk;
+ struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
+ u8 *buf = ulptx;
+
+ memcpy(buf, reqctx->iv, IV);
+ buf += IV;
+ if (reqctx->imm) {
+ sg_pcopy_to_buffer(req->src, sg_nents(req->src),
+ buf, wrparam->bytes, reqctx->processed);
+ } else {
+ ulptx_walk_init(&ulp_walk, (struct ulptx_sgl *)buf);
+ ulptx_walk_add_sg(&ulp_walk, reqctx->srcsg, wrparam->bytes,
+ reqctx->src_ofst);
+ reqctx->srcsg = ulp_walk.last_sg;
+ reqctx->src_ofst = ulp_walk.last_sg_len;
+ ulptx_walk_end(&ulp_walk);
+ }
+}
+
+void chcr_add_cipher_dst_ent(struct skcipher_request *req,
+ struct cpl_rx_phys_dsgl *phys_cpl,
+ struct cipher_wr_param *wrparam,
+ unsigned short qid)
+{
+ struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(wrparam->req);
+ struct chcr_context *ctx = c_ctx(tfm);
+ struct uld_ctx *u_ctx = ULD_CTX(ctx);
+ struct dsgl_walk dsgl_walk;
+ unsigned int rx_channel_id = reqctx->rxqidx / ctx->rxq_perchan;
+
+ rx_channel_id = cxgb4_port_e2cchan(u_ctx->lldi.ports[rx_channel_id]);
+ dsgl_walk_init(&dsgl_walk, phys_cpl);
+ dsgl_walk_add_sg(&dsgl_walk, reqctx->dstsg, wrparam->bytes,
+ reqctx->dst_ofst);
+ reqctx->dstsg = dsgl_walk.last_sg;
+ reqctx->dst_ofst = dsgl_walk.last_sg_len;
+ dsgl_walk_end(&dsgl_walk, qid, rx_channel_id);
+}
+
+void chcr_add_hash_src_ent(struct ahash_request *req,
+ struct ulptx_sgl *ulptx,
+ struct hash_wr_param *param)
+{
+ struct ulptx_walk ulp_walk;
+ struct chcr_ahash_req_ctx *reqctx = ahash_request_ctx(req);
+
+ if (reqctx->hctx_wr.imm) {
+ u8 *buf = (u8 *)ulptx;
+
+ if (param->bfr_len) {
+ memcpy(buf, reqctx->reqbfr, param->bfr_len);
+ buf += param->bfr_len;
+ }
+
+ sg_pcopy_to_buffer(reqctx->hctx_wr.srcsg,
+ sg_nents(reqctx->hctx_wr.srcsg), buf,
+ param->sg_len, 0);
+ } else {
+ ulptx_walk_init(&ulp_walk, ulptx);
+ if (param->bfr_len)
+ ulptx_walk_add_page(&ulp_walk, param->bfr_len,
+ reqctx->hctx_wr.dma_addr);
+ ulptx_walk_add_sg(&ulp_walk, reqctx->hctx_wr.srcsg,
+ param->sg_len, reqctx->hctx_wr.src_ofst);
+ reqctx->hctx_wr.srcsg = ulp_walk.last_sg;
+ reqctx->hctx_wr.src_ofst = ulp_walk.last_sg_len;
+ ulptx_walk_end(&ulp_walk);
+ }
+}
+
+int chcr_hash_dma_map(struct device *dev,
+ struct ahash_request *req)
+{
+ struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
+ int error = 0;
+
+ if (!req->nbytes)
+ return 0;
+ error = dma_map_sg(dev, req->src, sg_nents(req->src),
+ DMA_TO_DEVICE);
+ if (!error)
+ return -ENOMEM;
+ req_ctx->hctx_wr.is_sg_map = 1;
+ return 0;
+}
+
+void chcr_hash_dma_unmap(struct device *dev,
+ struct ahash_request *req)
+{
+ struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
+
+ if (!req->nbytes)
+ return;
+
+ dma_unmap_sg(dev, req->src, sg_nents(req->src),
+ DMA_TO_DEVICE);
+ req_ctx->hctx_wr.is_sg_map = 0;
+
+}
+
+int chcr_cipher_dma_map(struct device *dev,
+ struct skcipher_request *req)
+{
+ int error;
+
+ if (req->src == req->dst) {
+ error = dma_map_sg(dev, req->src, sg_nents(req->src),
+ DMA_BIDIRECTIONAL);
+ if (!error)
+ goto err;
+ } else {
+ error = dma_map_sg(dev, req->src, sg_nents(req->src),
+ DMA_TO_DEVICE);
+ if (!error)
+ goto err;
+ error = dma_map_sg(dev, req->dst, sg_nents(req->dst),
+ DMA_FROM_DEVICE);
+ if (!error) {
+ dma_unmap_sg(dev, req->src, sg_nents(req->src),
+ DMA_TO_DEVICE);
+ goto err;
+ }
+ }
+
+ return 0;
+err:
+ return -ENOMEM;
+}
+
+void chcr_cipher_dma_unmap(struct device *dev,
+ struct skcipher_request *req)
+{
+ if (req->src == req->dst) {
+ dma_unmap_sg(dev, req->src, sg_nents(req->src),
+ DMA_BIDIRECTIONAL);
+ } else {
+ dma_unmap_sg(dev, req->src, sg_nents(req->src),
+ DMA_TO_DEVICE);
+ dma_unmap_sg(dev, req->dst, sg_nents(req->dst),
+ DMA_FROM_DEVICE);
+ }
+}
+
+static int set_msg_len(u8 *block, unsigned int msglen, int csize)
+{
+ __be32 data;
+
+ memset(block, 0, csize);
+ block += csize;
+
+ if (csize >= 4)
+ csize = 4;
+ else if (msglen > (unsigned int)(1 << (8 * csize)))
+ return -EOVERFLOW;
+
+ data = cpu_to_be32(msglen);
+ memcpy(block - csize, (u8 *)&data + 4 - csize, csize);
+
+ return 0;
+}
+
+static int generate_b0(struct aead_request *req, u8 *ivptr,
+ unsigned short op_type)
+{
+ unsigned int l, lp, m;
+ int rc;
+ struct crypto_aead *aead = crypto_aead_reqtfm(req);
+ struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
+ u8 *b0 = reqctx->scratch_pad;
+
+ m = crypto_aead_authsize(aead);
+
+ memcpy(b0, ivptr, 16);
+
+ lp = b0[0];
+ l = lp + 1;
+
+ /* set m, bits 3-5 */
+ *b0 |= (8 * ((m - 2) / 2));
+
+ /* set adata, bit 6, if associated data is used */
+ if (req->assoclen)
+ *b0 |= 64;
+ rc = set_msg_len(b0 + 16 - l,
+ (op_type == CHCR_DECRYPT_OP) ?
+ req->cryptlen - m : req->cryptlen, l);
+
+ return rc;
+}
+
+static inline int crypto_ccm_check_iv(const u8 *iv)
+{
+ /* 2 <= L <= 8, so 1 <= L' <= 7. */
+ if (iv[0] < 1 || iv[0] > 7)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int ccm_format_packet(struct aead_request *req,
+ u8 *ivptr,
+ unsigned int sub_type,
+ unsigned short op_type,
+ unsigned int assoclen)
+{
+ struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
+ int rc = 0;
+
+ if (sub_type == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309) {
+ ivptr[0] = 3;
+ memcpy(ivptr + 1, &aeadctx->salt[0], 3);
+ memcpy(ivptr + 4, req->iv, 8);
+ memset(ivptr + 12, 0, 4);
+ } else {
+ memcpy(ivptr, req->iv, 16);
+ }
+ if (assoclen)
+ put_unaligned_be16(assoclen, &reqctx->scratch_pad[16]);
+
+ rc = generate_b0(req, ivptr, op_type);
+ /* zero the ctr value */
+ memset(ivptr + 15 - ivptr[0], 0, ivptr[0] + 1);
+ return rc;
+}
+
+static void fill_sec_cpl_for_aead(struct cpl_tx_sec_pdu *sec_cpl,
+ unsigned int dst_size,
+ struct aead_request *req,
+ unsigned short op_type)
+{
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ struct chcr_context *ctx = a_ctx(tfm);
+ struct uld_ctx *u_ctx = ULD_CTX(ctx);
+ struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
+ struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
+ unsigned int cipher_mode = CHCR_SCMD_CIPHER_MODE_AES_CCM;
+ unsigned int mac_mode = CHCR_SCMD_AUTH_MODE_CBCMAC;
+ unsigned int rx_channel_id = reqctx->rxqidx / ctx->rxq_perchan;
+ unsigned int ccm_xtra;
+ unsigned int tag_offset = 0, auth_offset = 0;
+ unsigned int assoclen;
+
+ rx_channel_id = cxgb4_port_e2cchan(u_ctx->lldi.ports[rx_channel_id]);
+
+ if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309)
+ assoclen = req->assoclen - 8;
+ else
+ assoclen = req->assoclen;
+ ccm_xtra = CCM_B0_SIZE +
+ ((assoclen) ? CCM_AAD_FIELD_SIZE : 0);
+
+ auth_offset = req->cryptlen ?
+ (req->assoclen + IV + 1 + ccm_xtra) : 0;
+ if (op_type == CHCR_DECRYPT_OP) {
+ if (crypto_aead_authsize(tfm) != req->cryptlen)
+ tag_offset = crypto_aead_authsize(tfm);
+ else
+ auth_offset = 0;
+ }
+
+ sec_cpl->op_ivinsrtofst = FILL_SEC_CPL_OP_IVINSR(rx_channel_id, 2, 1);
+ sec_cpl->pldlen =
+ htonl(req->assoclen + IV + req->cryptlen + ccm_xtra);
+ /* For CCM there wil be b0 always. So AAD start will be 1 always */
+ sec_cpl->aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(
+ 1 + IV, IV + assoclen + ccm_xtra,
+ req->assoclen + IV + 1 + ccm_xtra, 0);
+
+ sec_cpl->cipherstop_lo_authinsert = FILL_SEC_CPL_AUTHINSERT(0,
+ auth_offset, tag_offset,
+ (op_type == CHCR_ENCRYPT_OP) ? 0 :
+ crypto_aead_authsize(tfm));
+ sec_cpl->seqno_numivs = FILL_SEC_CPL_SCMD0_SEQNO(op_type,
+ (op_type == CHCR_ENCRYPT_OP) ? 0 : 1,
+ cipher_mode, mac_mode,
+ aeadctx->hmac_ctrl, IV >> 1);
+
+ sec_cpl->ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1, 0,
+ 0, dst_size);
+}
+
+static int aead_ccm_validate_input(unsigned short op_type,
+ struct aead_request *req,
+ struct chcr_aead_ctx *aeadctx,
+ unsigned int sub_type)
+{
+ if (sub_type != CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309) {
+ if (crypto_ccm_check_iv(req->iv)) {
+ pr_err("CCM: IV check fails\n");
+ return -EINVAL;
+ }
+ } else {
+ if (req->assoclen != 16 && req->assoclen != 20) {
+ pr_err("RFC4309: Invalid AAD length %d\n",
+ req->assoclen);
+ return -EINVAL;
+ }
+ }
+ return 0;
+}
+
+static struct sk_buff *create_aead_ccm_wr(struct aead_request *req,
+ unsigned short qid,
+ int size)
+{
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
+ struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
+ struct sk_buff *skb = NULL;
+ struct chcr_wr *chcr_req;
+ struct cpl_rx_phys_dsgl *phys_cpl;
+ struct ulptx_sgl *ulptx;
+ unsigned int transhdr_len;
+ unsigned int dst_size = 0, kctx_len, dnents, temp, snents;
+ unsigned int sub_type, assoclen = req->assoclen;
+ unsigned int authsize = crypto_aead_authsize(tfm);
+ int error = -EINVAL;
+ u8 *ivptr;
+ gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
+ GFP_ATOMIC;
+ struct adapter *adap = padap(a_ctx(tfm)->dev);
+
+ sub_type = get_aead_subtype(tfm);
+ if (sub_type == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309)
+ assoclen -= 8;
+ reqctx->b0_len = CCM_B0_SIZE + (assoclen ? CCM_AAD_FIELD_SIZE : 0);
+ error = chcr_aead_common_init(req);
+ if (error)
+ return ERR_PTR(error);
+
+ error = aead_ccm_validate_input(reqctx->op, req, aeadctx, sub_type);
+ if (error)
+ goto err;
+ dnents = sg_nents_xlen(req->dst, req->assoclen + req->cryptlen
+ + (reqctx->op ? -authsize : authsize),
+ CHCR_DST_SG_SIZE, 0);
+ dnents += MIN_CCM_SG; // For IV and B0
+ dst_size = get_space_for_phys_dsgl(dnents);
+ snents = sg_nents_xlen(req->src, req->assoclen + req->cryptlen,
+ CHCR_SRC_SG_SIZE, 0);
+ snents += MIN_CCM_SG; //For B0
+ kctx_len = roundup(aeadctx->enckey_len, 16) * 2;
+ transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
+ reqctx->imm = (transhdr_len + req->assoclen + req->cryptlen +
+ reqctx->b0_len) <= SGE_MAX_WR_LEN;
+ temp = reqctx->imm ? roundup(req->assoclen + req->cryptlen +
+ reqctx->b0_len, 16) :
+ (sgl_len(snents) * 8);
+ transhdr_len += temp;
+ transhdr_len = roundup(transhdr_len, 16);
+
+ if (chcr_aead_need_fallback(req, dnents, T6_MAX_AAD_SIZE -
+ reqctx->b0_len, transhdr_len, reqctx->op)) {
+ atomic_inc(&adap->chcr_stats.fallback);
+ chcr_aead_common_exit(req);
+ return ERR_PTR(chcr_aead_fallback(req, reqctx->op));
+ }
+ skb = alloc_skb(transhdr_len, flags);
+
+ if (!skb) {
+ error = -ENOMEM;
+ goto err;
+ }
+
+ chcr_req = __skb_put_zero(skb, transhdr_len);
+
+ fill_sec_cpl_for_aead(&chcr_req->sec_cpl, dst_size, req, reqctx->op);
+
+ chcr_req->key_ctx.ctx_hdr = aeadctx->key_ctx_hdr;
+ memcpy(chcr_req->key_ctx.key, aeadctx->key, aeadctx->enckey_len);
+ memcpy(chcr_req->key_ctx.key + roundup(aeadctx->enckey_len, 16),
+ aeadctx->key, aeadctx->enckey_len);
+
+ phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
+ ivptr = (u8 *)(phys_cpl + 1) + dst_size;
+ ulptx = (struct ulptx_sgl *)(ivptr + IV);
+ error = ccm_format_packet(req, ivptr, sub_type, reqctx->op, assoclen);
+ if (error)
+ goto dstmap_fail;
+ chcr_add_aead_dst_ent(req, phys_cpl, qid);
+ chcr_add_aead_src_ent(req, ulptx);
+
+ atomic_inc(&adap->chcr_stats.aead_rqst);
+ temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size + IV +
+ kctx_len + (reqctx->imm ? (req->assoclen + req->cryptlen +
+ reqctx->b0_len) : 0);
+ create_wreq(a_ctx(tfm), chcr_req, &req->base, reqctx->imm, 0,
+ transhdr_len, temp, 0);
+ reqctx->skb = skb;
+
+ return skb;
+dstmap_fail:
+ kfree_skb(skb);
+err:
+ chcr_aead_common_exit(req);
+ return ERR_PTR(error);
+}
+
+static struct sk_buff *create_gcm_wr(struct aead_request *req,
+ unsigned short qid,
+ int size)
+{
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ struct chcr_context *ctx = a_ctx(tfm);
+ struct uld_ctx *u_ctx = ULD_CTX(ctx);
+ struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
+ struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
+ struct sk_buff *skb = NULL;
+ struct chcr_wr *chcr_req;
+ struct cpl_rx_phys_dsgl *phys_cpl;
+ struct ulptx_sgl *ulptx;
+ unsigned int transhdr_len, dnents = 0, snents;
+ unsigned int dst_size = 0, temp = 0, kctx_len, assoclen = req->assoclen;
+ unsigned int authsize = crypto_aead_authsize(tfm);
+ int error = -EINVAL;
+ u8 *ivptr;
+ gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
+ GFP_ATOMIC;
+ struct adapter *adap = padap(ctx->dev);
+ unsigned int rx_channel_id = reqctx->rxqidx / ctx->rxq_perchan;
+
+ rx_channel_id = cxgb4_port_e2cchan(u_ctx->lldi.ports[rx_channel_id]);
+ if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106)
+ assoclen = req->assoclen - 8;
+
+ reqctx->b0_len = 0;
+ error = chcr_aead_common_init(req);
+ if (error)
+ return ERR_PTR(error);
+ dnents = sg_nents_xlen(req->dst, req->assoclen + req->cryptlen +
+ (reqctx->op ? -authsize : authsize),
+ CHCR_DST_SG_SIZE, 0);
+ snents = sg_nents_xlen(req->src, req->assoclen + req->cryptlen,
+ CHCR_SRC_SG_SIZE, 0);
+ dnents += MIN_GCM_SG; // For IV
+ dst_size = get_space_for_phys_dsgl(dnents);
+ kctx_len = roundup(aeadctx->enckey_len, 16) + AEAD_H_SIZE;
+ transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
+ reqctx->imm = (transhdr_len + req->assoclen + req->cryptlen) <=
+ SGE_MAX_WR_LEN;
+ temp = reqctx->imm ? roundup(req->assoclen + req->cryptlen, 16) :
+ (sgl_len(snents) * 8);
+ transhdr_len += temp;
+ transhdr_len = roundup(transhdr_len, 16);
+ if (chcr_aead_need_fallback(req, dnents, T6_MAX_AAD_SIZE,
+ transhdr_len, reqctx->op)) {
+
+ atomic_inc(&adap->chcr_stats.fallback);
+ chcr_aead_common_exit(req);
+ return ERR_PTR(chcr_aead_fallback(req, reqctx->op));
+ }
+ skb = alloc_skb(transhdr_len, flags);
+ if (!skb) {
+ error = -ENOMEM;
+ goto err;
+ }
+
+ chcr_req = __skb_put_zero(skb, transhdr_len);
+
+ //Offset of tag from end
+ temp = (reqctx->op == CHCR_ENCRYPT_OP) ? 0 : authsize;
+ chcr_req->sec_cpl.op_ivinsrtofst = FILL_SEC_CPL_OP_IVINSR(
+ rx_channel_id, 2, 1);
+ chcr_req->sec_cpl.pldlen =
+ htonl(req->assoclen + IV + req->cryptlen);
+ chcr_req->sec_cpl.aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(
+ assoclen ? 1 + IV : 0,
+ assoclen ? IV + assoclen : 0,
+ req->assoclen + IV + 1, 0);
+ chcr_req->sec_cpl.cipherstop_lo_authinsert =
+ FILL_SEC_CPL_AUTHINSERT(0, req->assoclen + IV + 1,
+ temp, temp);
+ chcr_req->sec_cpl.seqno_numivs =
+ FILL_SEC_CPL_SCMD0_SEQNO(reqctx->op, (reqctx->op ==
+ CHCR_ENCRYPT_OP) ? 1 : 0,
+ CHCR_SCMD_CIPHER_MODE_AES_GCM,
+ CHCR_SCMD_AUTH_MODE_GHASH,
+ aeadctx->hmac_ctrl, IV >> 1);
+ chcr_req->sec_cpl.ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1,
+ 0, 0, dst_size);
+ chcr_req->key_ctx.ctx_hdr = aeadctx->key_ctx_hdr;
+ memcpy(chcr_req->key_ctx.key, aeadctx->key, aeadctx->enckey_len);
+ memcpy(chcr_req->key_ctx.key + roundup(aeadctx->enckey_len, 16),
+ GCM_CTX(aeadctx)->ghash_h, AEAD_H_SIZE);
+
+ phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
+ ivptr = (u8 *)(phys_cpl + 1) + dst_size;
+ /* prepare a 16 byte iv */
+ /* S A L T | IV | 0x00000001 */
+ if (get_aead_subtype(tfm) ==
+ CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106) {
+ memcpy(ivptr, aeadctx->salt, 4);
+ memcpy(ivptr + 4, req->iv, GCM_RFC4106_IV_SIZE);
+ } else {
+ memcpy(ivptr, req->iv, GCM_AES_IV_SIZE);
+ }
+ put_unaligned_be32(0x01, &ivptr[12]);
+ ulptx = (struct ulptx_sgl *)(ivptr + 16);
+
+ chcr_add_aead_dst_ent(req, phys_cpl, qid);
+ chcr_add_aead_src_ent(req, ulptx);
+ atomic_inc(&adap->chcr_stats.aead_rqst);
+ temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size + IV +
+ kctx_len + (reqctx->imm ? (req->assoclen + req->cryptlen) : 0);
+ create_wreq(a_ctx(tfm), chcr_req, &req->base, reqctx->imm, size,
+ transhdr_len, temp, reqctx->verify);
+ reqctx->skb = skb;
+ return skb;
+
+err:
+ chcr_aead_common_exit(req);
+ return ERR_PTR(error);
+}
+
+
+
+static int chcr_aead_cra_init(struct crypto_aead *tfm)
+{
+ struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
+ struct aead_alg *alg = crypto_aead_alg(tfm);
+
+ aeadctx->sw_cipher = crypto_alloc_aead(alg->base.cra_name, 0,
+ CRYPTO_ALG_NEED_FALLBACK |
+ CRYPTO_ALG_ASYNC);
+ if (IS_ERR(aeadctx->sw_cipher))
+ return PTR_ERR(aeadctx->sw_cipher);
+ crypto_aead_set_reqsize(tfm, max(sizeof(struct chcr_aead_reqctx),
+ sizeof(struct aead_request) +
+ crypto_aead_reqsize(aeadctx->sw_cipher)));
+ return chcr_device_init(a_ctx(tfm));
+}
+
+static void chcr_aead_cra_exit(struct crypto_aead *tfm)
+{
+ struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
+
+ crypto_free_aead(aeadctx->sw_cipher);
+}
+
+static int chcr_authenc_null_setauthsize(struct crypto_aead *tfm,
+ unsigned int authsize)
+{
+ struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
+
+ aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NOP;
+ aeadctx->mayverify = VERIFY_HW;
+ return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
+}
+static int chcr_authenc_setauthsize(struct crypto_aead *tfm,
+ unsigned int authsize)
+{
+ struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
+ u32 maxauth = crypto_aead_maxauthsize(tfm);
+
+ /*SHA1 authsize in ipsec is 12 instead of 10 i.e maxauthsize / 2 is not
+ * true for sha1. authsize == 12 condition should be before
+ * authsize == (maxauth >> 1)
+ */
+ if (authsize == ICV_4) {
+ aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL1;
+ aeadctx->mayverify = VERIFY_HW;
+ } else if (authsize == ICV_6) {
+ aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL2;
+ aeadctx->mayverify = VERIFY_HW;
+ } else if (authsize == ICV_10) {
+ aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_TRUNC_RFC4366;
+ aeadctx->mayverify = VERIFY_HW;
+ } else if (authsize == ICV_12) {
+ aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
+ aeadctx->mayverify = VERIFY_HW;
+ } else if (authsize == ICV_14) {
+ aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL3;
+ aeadctx->mayverify = VERIFY_HW;
+ } else if (authsize == (maxauth >> 1)) {
+ aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2;
+ aeadctx->mayverify = VERIFY_HW;
+ } else if (authsize == maxauth) {
+ aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
+ aeadctx->mayverify = VERIFY_HW;
+ } else {
+ aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
+ aeadctx->mayverify = VERIFY_SW;
+ }
+ return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
+}
+
+
+static int chcr_gcm_setauthsize(struct crypto_aead *tfm, unsigned int authsize)
+{
+ struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
+
+ switch (authsize) {
+ case ICV_4:
+ aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL1;
+ aeadctx->mayverify = VERIFY_HW;
+ break;
+ case ICV_8:
+ aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2;
+ aeadctx->mayverify = VERIFY_HW;
+ break;
+ case ICV_12:
+ aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
+ aeadctx->mayverify = VERIFY_HW;
+ break;
+ case ICV_14:
+ aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL3;
+ aeadctx->mayverify = VERIFY_HW;
+ break;
+ case ICV_16:
+ aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
+ aeadctx->mayverify = VERIFY_HW;
+ break;
+ case ICV_13:
+ case ICV_15:
+ aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
+ aeadctx->mayverify = VERIFY_SW;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
+}
+
+static int chcr_4106_4309_setauthsize(struct crypto_aead *tfm,
+ unsigned int authsize)
+{
+ struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
+
+ switch (authsize) {
+ case ICV_8:
+ aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2;
+ aeadctx->mayverify = VERIFY_HW;
+ break;
+ case ICV_12:
+ aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
+ aeadctx->mayverify = VERIFY_HW;
+ break;
+ case ICV_16:
+ aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
+ aeadctx->mayverify = VERIFY_HW;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
+}
+
+static int chcr_ccm_setauthsize(struct crypto_aead *tfm,
+ unsigned int authsize)
+{
+ struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
+
+ switch (authsize) {
+ case ICV_4:
+ aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL1;
+ aeadctx->mayverify = VERIFY_HW;
+ break;
+ case ICV_6:
+ aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL2;
+ aeadctx->mayverify = VERIFY_HW;
+ break;
+ case ICV_8:
+ aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2;
+ aeadctx->mayverify = VERIFY_HW;
+ break;
+ case ICV_10:
+ aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_TRUNC_RFC4366;
+ aeadctx->mayverify = VERIFY_HW;
+ break;
+ case ICV_12:
+ aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
+ aeadctx->mayverify = VERIFY_HW;
+ break;
+ case ICV_14:
+ aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL3;
+ aeadctx->mayverify = VERIFY_HW;
+ break;
+ case ICV_16:
+ aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
+ aeadctx->mayverify = VERIFY_HW;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
+}
+
+static int chcr_ccm_common_setkey(struct crypto_aead *aead,
+ const u8 *key,
+ unsigned int keylen)
+{
+ struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(aead));
+ unsigned char ck_size, mk_size;
+ int key_ctx_size = 0;
+
+ key_ctx_size = sizeof(struct _key_ctx) + roundup(keylen, 16) * 2;
+ if (keylen == AES_KEYSIZE_128) {
+ ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
+ mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_128;
+ } else if (keylen == AES_KEYSIZE_192) {
+ ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
+ mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_192;
+ } else if (keylen == AES_KEYSIZE_256) {
+ ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
+ mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_256;
+ } else {
+ aeadctx->enckey_len = 0;
+ return -EINVAL;
+ }
+ aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, mk_size, 0, 0,
+ key_ctx_size >> 4);
+ memcpy(aeadctx->key, key, keylen);
+ aeadctx->enckey_len = keylen;
+
+ return 0;
+}
+
+static int chcr_aead_ccm_setkey(struct crypto_aead *aead,
+ const u8 *key,
+ unsigned int keylen)
+{
+ struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(aead));
+ int error;
+
+ crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
+ crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(aead) &
+ CRYPTO_TFM_REQ_MASK);
+ error = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
+ if (error)
+ return error;
+ return chcr_ccm_common_setkey(aead, key, keylen);
+}
+
+static int chcr_aead_rfc4309_setkey(struct crypto_aead *aead, const u8 *key,
+ unsigned int keylen)
+{
+ struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(aead));
+ int error;
+
+ if (keylen < 3) {
+ aeadctx->enckey_len = 0;
+ return -EINVAL;
+ }
+ crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
+ crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(aead) &
+ CRYPTO_TFM_REQ_MASK);
+ error = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
+ if (error)
+ return error;
+ keylen -= 3;
+ memcpy(aeadctx->salt, key + keylen, 3);
+ return chcr_ccm_common_setkey(aead, key, keylen);
+}
+
+static int chcr_gcm_setkey(struct crypto_aead *aead, const u8 *key,
+ unsigned int keylen)
+{
+ struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(aead));
+ struct chcr_gcm_ctx *gctx = GCM_CTX(aeadctx);
+ unsigned int ck_size;
+ int ret = 0, key_ctx_size = 0;
+ struct crypto_aes_ctx aes;
+
+ aeadctx->enckey_len = 0;
+ crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
+ crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(aead)
+ & CRYPTO_TFM_REQ_MASK);
+ ret = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
+ if (ret)
+ goto out;
+
+ if (get_aead_subtype(aead) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106 &&
+ keylen > 3) {
+ keylen -= 4; /* nonce/salt is present in the last 4 bytes */
+ memcpy(aeadctx->salt, key + keylen, 4);
+ }
+ if (keylen == AES_KEYSIZE_128) {
+ ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
+ } else if (keylen == AES_KEYSIZE_192) {
+ ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
+ } else if (keylen == AES_KEYSIZE_256) {
+ ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
+ } else {
+ pr_err("GCM: Invalid key length %d\n", keylen);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ memcpy(aeadctx->key, key, keylen);
+ aeadctx->enckey_len = keylen;
+ key_ctx_size = sizeof(struct _key_ctx) + roundup(keylen, 16) +
+ AEAD_H_SIZE;
+ aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size,
+ CHCR_KEYCTX_MAC_KEY_SIZE_128,
+ 0, 0,
+ key_ctx_size >> 4);
+ /* Calculate the H = CIPH(K, 0 repeated 16 times).
+ * It will go in key context
+ */
+ ret = aes_expandkey(&aes, key, keylen);
+ if (ret) {
+ aeadctx->enckey_len = 0;
+ goto out;
+ }
+ memset(gctx->ghash_h, 0, AEAD_H_SIZE);
+ aes_encrypt(&aes, gctx->ghash_h, gctx->ghash_h);
+ memzero_explicit(&aes, sizeof(aes));
+
+out:
+ return ret;
+}
+
+static int chcr_authenc_setkey(struct crypto_aead *authenc, const u8 *key,
+ unsigned int keylen)
+{
+ struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(authenc));
+ struct chcr_authenc_ctx *actx = AUTHENC_CTX(aeadctx);
+ /* it contains auth and cipher key both*/
+ struct crypto_authenc_keys keys;
+ unsigned int bs, subtype;
+ unsigned int max_authsize = crypto_aead_alg(authenc)->maxauthsize;
+ int err = 0, i, key_ctx_len = 0;
+ unsigned char ck_size = 0;
+ unsigned char pad[CHCR_HASH_MAX_BLOCK_SIZE_128] = { 0 };
+ struct crypto_shash *base_hash = ERR_PTR(-EINVAL);
+ struct algo_param param;
+ int align;
+ u8 *o_ptr = NULL;
+
+ crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
+ crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(authenc)
+ & CRYPTO_TFM_REQ_MASK);
+ err = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
+ if (err)
+ goto out;
+
+ if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
+ goto out;
+
+ if (get_alg_config(&param, max_authsize)) {
+ pr_err("Unsupported digest size\n");
+ goto out;
+ }
+ subtype = get_aead_subtype(authenc);
+ if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA ||
+ subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) {
+ if (keys.enckeylen < CTR_RFC3686_NONCE_SIZE)
+ goto out;
+ memcpy(aeadctx->nonce, keys.enckey + (keys.enckeylen
+ - CTR_RFC3686_NONCE_SIZE), CTR_RFC3686_NONCE_SIZE);
+ keys.enckeylen -= CTR_RFC3686_NONCE_SIZE;
+ }
+ if (keys.enckeylen == AES_KEYSIZE_128) {
+ ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
+ } else if (keys.enckeylen == AES_KEYSIZE_192) {
+ ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
+ } else if (keys.enckeylen == AES_KEYSIZE_256) {
+ ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
+ } else {
+ pr_err("Unsupported cipher key\n");
+ goto out;
+ }
+
+ /* Copy only encryption key. We use authkey to generate h(ipad) and
+ * h(opad) so authkey is not needed again. authkeylen size have the
+ * size of the hash digest size.
+ */
+ memcpy(aeadctx->key, keys.enckey, keys.enckeylen);
+ aeadctx->enckey_len = keys.enckeylen;
+ if (subtype == CRYPTO_ALG_SUB_TYPE_CBC_SHA ||
+ subtype == CRYPTO_ALG_SUB_TYPE_CBC_NULL) {
+
+ get_aes_decrypt_key(actx->dec_rrkey, aeadctx->key,
+ aeadctx->enckey_len << 3);
+ }
+ base_hash = chcr_alloc_shash(max_authsize);
+ if (IS_ERR(base_hash)) {
+ pr_err("Base driver cannot be loaded\n");
+ goto out;
+ }
+ {
+ SHASH_DESC_ON_STACK(shash, base_hash);
+
+ shash->tfm = base_hash;
+ bs = crypto_shash_blocksize(base_hash);
+ align = KEYCTX_ALIGN_PAD(max_authsize);
+ o_ptr = actx->h_iopad + param.result_size + align;
+
+ if (keys.authkeylen > bs) {
+ err = crypto_shash_digest(shash, keys.authkey,
+ keys.authkeylen,
+ o_ptr);
+ if (err) {
+ pr_err("Base driver cannot be loaded\n");
+ goto out;
+ }
+ keys.authkeylen = max_authsize;
+ } else
+ memcpy(o_ptr, keys.authkey, keys.authkeylen);
+
+ /* Compute the ipad-digest*/
+ memset(pad + keys.authkeylen, 0, bs - keys.authkeylen);
+ memcpy(pad, o_ptr, keys.authkeylen);
+ for (i = 0; i < bs >> 2; i++)
+ *((unsigned int *)pad + i) ^= IPAD_DATA;
+
+ if (chcr_compute_partial_hash(shash, pad, actx->h_iopad,
+ max_authsize))
+ goto out;
+ /* Compute the opad-digest */
+ memset(pad + keys.authkeylen, 0, bs - keys.authkeylen);
+ memcpy(pad, o_ptr, keys.authkeylen);
+ for (i = 0; i < bs >> 2; i++)
+ *((unsigned int *)pad + i) ^= OPAD_DATA;
+
+ if (chcr_compute_partial_hash(shash, pad, o_ptr, max_authsize))
+ goto out;
+
+ /* convert the ipad and opad digest to network order */
+ chcr_change_order(actx->h_iopad, param.result_size);
+ chcr_change_order(o_ptr, param.result_size);
+ key_ctx_len = sizeof(struct _key_ctx) +
+ roundup(keys.enckeylen, 16) +
+ (param.result_size + align) * 2;
+ aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, param.mk_size,
+ 0, 1, key_ctx_len >> 4);
+ actx->auth_mode = param.auth_mode;
+ chcr_free_shash(base_hash);
+
+ memzero_explicit(&keys, sizeof(keys));
+ return 0;
+ }
+out:
+ aeadctx->enckey_len = 0;
+ memzero_explicit(&keys, sizeof(keys));
+ if (!IS_ERR(base_hash))
+ chcr_free_shash(base_hash);
+ return -EINVAL;
+}
+
+static int chcr_aead_digest_null_setkey(struct crypto_aead *authenc,
+ const u8 *key, unsigned int keylen)
+{
+ struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(authenc));
+ struct chcr_authenc_ctx *actx = AUTHENC_CTX(aeadctx);
+ struct crypto_authenc_keys keys;
+ int err;
+ /* it contains auth and cipher key both*/
+ unsigned int subtype;
+ int key_ctx_len = 0;
+ unsigned char ck_size = 0;
+
+ crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
+ crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(authenc)
+ & CRYPTO_TFM_REQ_MASK);
+ err = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
+ if (err)
+ goto out;
+
+ if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
+ goto out;
+
+ subtype = get_aead_subtype(authenc);
+ if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA ||
+ subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) {
+ if (keys.enckeylen < CTR_RFC3686_NONCE_SIZE)
+ goto out;
+ memcpy(aeadctx->nonce, keys.enckey + (keys.enckeylen
+ - CTR_RFC3686_NONCE_SIZE), CTR_RFC3686_NONCE_SIZE);
+ keys.enckeylen -= CTR_RFC3686_NONCE_SIZE;
+ }
+ if (keys.enckeylen == AES_KEYSIZE_128) {
+ ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
+ } else if (keys.enckeylen == AES_KEYSIZE_192) {
+ ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
+ } else if (keys.enckeylen == AES_KEYSIZE_256) {
+ ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
+ } else {
+ pr_err("Unsupported cipher key %d\n", keys.enckeylen);
+ goto out;
+ }
+ memcpy(aeadctx->key, keys.enckey, keys.enckeylen);
+ aeadctx->enckey_len = keys.enckeylen;
+ if (subtype == CRYPTO_ALG_SUB_TYPE_CBC_SHA ||
+ subtype == CRYPTO_ALG_SUB_TYPE_CBC_NULL) {
+ get_aes_decrypt_key(actx->dec_rrkey, aeadctx->key,
+ aeadctx->enckey_len << 3);
+ }
+ key_ctx_len = sizeof(struct _key_ctx) + roundup(keys.enckeylen, 16);
+
+ aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, CHCR_KEYCTX_NO_KEY, 0,
+ 0, key_ctx_len >> 4);
+ actx->auth_mode = CHCR_SCMD_AUTH_MODE_NOP;
+ memzero_explicit(&keys, sizeof(keys));
+ return 0;
+out:
+ aeadctx->enckey_len = 0;
+ memzero_explicit(&keys, sizeof(keys));
+ return -EINVAL;
+}
+
+static int chcr_aead_op(struct aead_request *req,
+ int size,
+ create_wr_t create_wr_fn)
+{
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
+ struct chcr_context *ctx = a_ctx(tfm);
+ struct uld_ctx *u_ctx = ULD_CTX(ctx);
+ struct sk_buff *skb;
+ struct chcr_dev *cdev;
+
+ cdev = a_ctx(tfm)->dev;
+ if (!cdev) {
+ pr_err("%s : No crypto device.\n", __func__);
+ return -ENXIO;
+ }
+
+ if (chcr_inc_wrcount(cdev)) {
+ /* Detach state for CHCR means lldi or padap is freed.
+ * We cannot increment fallback here.
+ */
+ return chcr_aead_fallback(req, reqctx->op);
+ }
+
+ if (cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
+ reqctx->txqidx) &&
+ (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))) {
+ chcr_dec_wrcount(cdev);
+ return -ENOSPC;
+ }
+
+ if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106 &&
+ crypto_ipsec_check_assoclen(req->assoclen) != 0) {
+ pr_err("RFC4106: Invalid value of assoclen %d\n",
+ req->assoclen);
+ return -EINVAL;
+ }
+
+ /* Form a WR from req */
+ skb = create_wr_fn(req, u_ctx->lldi.rxq_ids[reqctx->rxqidx], size);
+
+ if (IS_ERR_OR_NULL(skb)) {
+ chcr_dec_wrcount(cdev);
+ return PTR_ERR_OR_ZERO(skb);
+ }
+
+ skb->dev = u_ctx->lldi.ports[0];
+ set_wr_txq(skb, CPL_PRIORITY_DATA, reqctx->txqidx);
+ chcr_send_wr(skb);
+ return -EINPROGRESS;
+}
+
+static int chcr_aead_encrypt(struct aead_request *req)
+{
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
+ struct chcr_context *ctx = a_ctx(tfm);
+ unsigned int cpu;
+
+ cpu = get_cpu();
+ reqctx->txqidx = cpu % ctx->ntxq;
+ reqctx->rxqidx = cpu % ctx->nrxq;
+ put_cpu();
+
+ reqctx->verify = VERIFY_HW;
+ reqctx->op = CHCR_ENCRYPT_OP;
+
+ switch (get_aead_subtype(tfm)) {
+ case CRYPTO_ALG_SUB_TYPE_CTR_SHA:
+ case CRYPTO_ALG_SUB_TYPE_CBC_SHA:
+ case CRYPTO_ALG_SUB_TYPE_CBC_NULL:
+ case CRYPTO_ALG_SUB_TYPE_CTR_NULL:
+ return chcr_aead_op(req, 0, create_authenc_wr);
+ case CRYPTO_ALG_SUB_TYPE_AEAD_CCM:
+ case CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309:
+ return chcr_aead_op(req, 0, create_aead_ccm_wr);
+ default:
+ return chcr_aead_op(req, 0, create_gcm_wr);
+ }
+}
+
+static int chcr_aead_decrypt(struct aead_request *req)
+{
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ struct chcr_context *ctx = a_ctx(tfm);
+ struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
+ struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
+ int size;
+ unsigned int cpu;
+
+ cpu = get_cpu();
+ reqctx->txqidx = cpu % ctx->ntxq;
+ reqctx->rxqidx = cpu % ctx->nrxq;
+ put_cpu();
+
+ if (aeadctx->mayverify == VERIFY_SW) {
+ size = crypto_aead_maxauthsize(tfm);
+ reqctx->verify = VERIFY_SW;
+ } else {
+ size = 0;
+ reqctx->verify = VERIFY_HW;
+ }
+ reqctx->op = CHCR_DECRYPT_OP;
+ switch (get_aead_subtype(tfm)) {
+ case CRYPTO_ALG_SUB_TYPE_CBC_SHA:
+ case CRYPTO_ALG_SUB_TYPE_CTR_SHA:
+ case CRYPTO_ALG_SUB_TYPE_CBC_NULL:
+ case CRYPTO_ALG_SUB_TYPE_CTR_NULL:
+ return chcr_aead_op(req, size, create_authenc_wr);
+ case CRYPTO_ALG_SUB_TYPE_AEAD_CCM:
+ case CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309:
+ return chcr_aead_op(req, size, create_aead_ccm_wr);
+ default:
+ return chcr_aead_op(req, size, create_gcm_wr);
+ }
+}
+
+static struct chcr_alg_template driver_algs[] = {
+ /* AES-CBC */
+ {
+ .type = CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_SUB_TYPE_CBC,
+ .is_registered = 0,
+ .alg.skcipher = {
+ .base.cra_name = "cbc(aes)",
+ .base.cra_driver_name = "cbc-aes-chcr",
+ .base.cra_blocksize = AES_BLOCK_SIZE,
+
+ .init = chcr_init_tfm,
+ .exit = chcr_exit_tfm,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .setkey = chcr_aes_cbc_setkey,
+ .encrypt = chcr_aes_encrypt,
+ .decrypt = chcr_aes_decrypt,
+ }
+ },
+ {
+ .type = CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_SUB_TYPE_XTS,
+ .is_registered = 0,
+ .alg.skcipher = {
+ .base.cra_name = "xts(aes)",
+ .base.cra_driver_name = "xts-aes-chcr",
+ .base.cra_blocksize = AES_BLOCK_SIZE,
+
+ .init = chcr_init_tfm,
+ .exit = chcr_exit_tfm,
+ .min_keysize = 2 * AES_MIN_KEY_SIZE,
+ .max_keysize = 2 * AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .setkey = chcr_aes_xts_setkey,
+ .encrypt = chcr_aes_encrypt,
+ .decrypt = chcr_aes_decrypt,
+ }
+ },
+ {
+ .type = CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_SUB_TYPE_CTR,
+ .is_registered = 0,
+ .alg.skcipher = {
+ .base.cra_name = "ctr(aes)",
+ .base.cra_driver_name = "ctr-aes-chcr",
+ .base.cra_blocksize = 1,
+
+ .init = chcr_init_tfm,
+ .exit = chcr_exit_tfm,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .setkey = chcr_aes_ctr_setkey,
+ .encrypt = chcr_aes_encrypt,
+ .decrypt = chcr_aes_decrypt,
+ }
+ },
+ {
+ .type = CRYPTO_ALG_TYPE_SKCIPHER |
+ CRYPTO_ALG_SUB_TYPE_CTR_RFC3686,
+ .is_registered = 0,
+ .alg.skcipher = {
+ .base.cra_name = "rfc3686(ctr(aes))",
+ .base.cra_driver_name = "rfc3686-ctr-aes-chcr",
+ .base.cra_blocksize = 1,
+
+ .init = chcr_rfc3686_init,
+ .exit = chcr_exit_tfm,
+ .min_keysize = AES_MIN_KEY_SIZE + CTR_RFC3686_NONCE_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE + CTR_RFC3686_NONCE_SIZE,
+ .ivsize = CTR_RFC3686_IV_SIZE,
+ .setkey = chcr_aes_rfc3686_setkey,
+ .encrypt = chcr_aes_encrypt,
+ .decrypt = chcr_aes_decrypt,
+ }
+ },
+ /* SHA */
+ {
+ .type = CRYPTO_ALG_TYPE_AHASH,
+ .is_registered = 0,
+ .alg.hash = {
+ .halg.digestsize = SHA1_DIGEST_SIZE,
+ .halg.base = {
+ .cra_name = "sha1",
+ .cra_driver_name = "sha1-chcr",
+ .cra_blocksize = SHA1_BLOCK_SIZE,
+ }
+ }
+ },
+ {
+ .type = CRYPTO_ALG_TYPE_AHASH,
+ .is_registered = 0,
+ .alg.hash = {
+ .halg.digestsize = SHA256_DIGEST_SIZE,
+ .halg.base = {
+ .cra_name = "sha256",
+ .cra_driver_name = "sha256-chcr",
+ .cra_blocksize = SHA256_BLOCK_SIZE,
+ }
+ }
+ },
+ {
+ .type = CRYPTO_ALG_TYPE_AHASH,
+ .is_registered = 0,
+ .alg.hash = {
+ .halg.digestsize = SHA224_DIGEST_SIZE,
+ .halg.base = {
+ .cra_name = "sha224",
+ .cra_driver_name = "sha224-chcr",
+ .cra_blocksize = SHA224_BLOCK_SIZE,
+ }
+ }
+ },
+ {
+ .type = CRYPTO_ALG_TYPE_AHASH,
+ .is_registered = 0,
+ .alg.hash = {
+ .halg.digestsize = SHA384_DIGEST_SIZE,
+ .halg.base = {
+ .cra_name = "sha384",
+ .cra_driver_name = "sha384-chcr",
+ .cra_blocksize = SHA384_BLOCK_SIZE,
+ }
+ }
+ },
+ {
+ .type = CRYPTO_ALG_TYPE_AHASH,
+ .is_registered = 0,
+ .alg.hash = {
+ .halg.digestsize = SHA512_DIGEST_SIZE,
+ .halg.base = {
+ .cra_name = "sha512",
+ .cra_driver_name = "sha512-chcr",
+ .cra_blocksize = SHA512_BLOCK_SIZE,
+ }
+ }
+ },
+ /* HMAC */
+ {
+ .type = CRYPTO_ALG_TYPE_HMAC,
+ .is_registered = 0,
+ .alg.hash = {
+ .halg.digestsize = SHA1_DIGEST_SIZE,
+ .halg.base = {
+ .cra_name = "hmac(sha1)",
+ .cra_driver_name = "hmac-sha1-chcr",
+ .cra_blocksize = SHA1_BLOCK_SIZE,
+ }
+ }
+ },
+ {
+ .type = CRYPTO_ALG_TYPE_HMAC,
+ .is_registered = 0,
+ .alg.hash = {
+ .halg.digestsize = SHA224_DIGEST_SIZE,
+ .halg.base = {
+ .cra_name = "hmac(sha224)",
+ .cra_driver_name = "hmac-sha224-chcr",
+ .cra_blocksize = SHA224_BLOCK_SIZE,
+ }
+ }
+ },
+ {
+ .type = CRYPTO_ALG_TYPE_HMAC,
+ .is_registered = 0,
+ .alg.hash = {
+ .halg.digestsize = SHA256_DIGEST_SIZE,
+ .halg.base = {
+ .cra_name = "hmac(sha256)",
+ .cra_driver_name = "hmac-sha256-chcr",
+ .cra_blocksize = SHA256_BLOCK_SIZE,
+ }
+ }
+ },
+ {
+ .type = CRYPTO_ALG_TYPE_HMAC,
+ .is_registered = 0,
+ .alg.hash = {
+ .halg.digestsize = SHA384_DIGEST_SIZE,
+ .halg.base = {
+ .cra_name = "hmac(sha384)",
+ .cra_driver_name = "hmac-sha384-chcr",
+ .cra_blocksize = SHA384_BLOCK_SIZE,
+ }
+ }
+ },
+ {
+ .type = CRYPTO_ALG_TYPE_HMAC,
+ .is_registered = 0,
+ .alg.hash = {
+ .halg.digestsize = SHA512_DIGEST_SIZE,
+ .halg.base = {
+ .cra_name = "hmac(sha512)",
+ .cra_driver_name = "hmac-sha512-chcr",
+ .cra_blocksize = SHA512_BLOCK_SIZE,
+ }
+ }
+ },
+ /* Add AEAD Algorithms */
+ {
+ .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_GCM,
+ .is_registered = 0,
+ .alg.aead = {
+ .base = {
+ .cra_name = "gcm(aes)",
+ .cra_driver_name = "gcm-aes-chcr",
+ .cra_blocksize = 1,
+ .cra_priority = CHCR_AEAD_PRIORITY,
+ .cra_ctxsize = sizeof(struct chcr_context) +
+ sizeof(struct chcr_aead_ctx) +
+ sizeof(struct chcr_gcm_ctx),
+ },
+ .ivsize = GCM_AES_IV_SIZE,
+ .maxauthsize = GHASH_DIGEST_SIZE,
+ .setkey = chcr_gcm_setkey,
+ .setauthsize = chcr_gcm_setauthsize,
+ }
+ },
+ {
+ .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106,
+ .is_registered = 0,
+ .alg.aead = {
+ .base = {
+ .cra_name = "rfc4106(gcm(aes))",
+ .cra_driver_name = "rfc4106-gcm-aes-chcr",
+ .cra_blocksize = 1,
+ .cra_priority = CHCR_AEAD_PRIORITY + 1,
+ .cra_ctxsize = sizeof(struct chcr_context) +
+ sizeof(struct chcr_aead_ctx) +
+ sizeof(struct chcr_gcm_ctx),
+
+ },
+ .ivsize = GCM_RFC4106_IV_SIZE,
+ .maxauthsize = GHASH_DIGEST_SIZE,
+ .setkey = chcr_gcm_setkey,
+ .setauthsize = chcr_4106_4309_setauthsize,
+ }
+ },
+ {
+ .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_CCM,
+ .is_registered = 0,
+ .alg.aead = {
+ .base = {
+ .cra_name = "ccm(aes)",
+ .cra_driver_name = "ccm-aes-chcr",
+ .cra_blocksize = 1,
+ .cra_priority = CHCR_AEAD_PRIORITY,
+ .cra_ctxsize = sizeof(struct chcr_context) +
+ sizeof(struct chcr_aead_ctx),
+
+ },
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = GHASH_DIGEST_SIZE,
+ .setkey = chcr_aead_ccm_setkey,
+ .setauthsize = chcr_ccm_setauthsize,
+ }
+ },
+ {
+ .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309,
+ .is_registered = 0,
+ .alg.aead = {
+ .base = {
+ .cra_name = "rfc4309(ccm(aes))",
+ .cra_driver_name = "rfc4309-ccm-aes-chcr",
+ .cra_blocksize = 1,
+ .cra_priority = CHCR_AEAD_PRIORITY + 1,
+ .cra_ctxsize = sizeof(struct chcr_context) +
+ sizeof(struct chcr_aead_ctx),
+
+ },
+ .ivsize = 8,
+ .maxauthsize = GHASH_DIGEST_SIZE,
+ .setkey = chcr_aead_rfc4309_setkey,
+ .setauthsize = chcr_4106_4309_setauthsize,
+ }
+ },
+ {
+ .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA,
+ .is_registered = 0,
+ .alg.aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha1),cbc(aes))",
+ .cra_driver_name =
+ "authenc-hmac-sha1-cbc-aes-chcr",
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_priority = CHCR_AEAD_PRIORITY,
+ .cra_ctxsize = sizeof(struct chcr_context) +
+ sizeof(struct chcr_aead_ctx) +
+ sizeof(struct chcr_authenc_ctx),
+
+ },
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = SHA1_DIGEST_SIZE,
+ .setkey = chcr_authenc_setkey,
+ .setauthsize = chcr_authenc_setauthsize,
+ }
+ },
+ {
+ .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA,
+ .is_registered = 0,
+ .alg.aead = {
+ .base = {
+
+ .cra_name = "authenc(hmac(sha256),cbc(aes))",
+ .cra_driver_name =
+ "authenc-hmac-sha256-cbc-aes-chcr",
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_priority = CHCR_AEAD_PRIORITY,
+ .cra_ctxsize = sizeof(struct chcr_context) +
+ sizeof(struct chcr_aead_ctx) +
+ sizeof(struct chcr_authenc_ctx),
+
+ },
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = SHA256_DIGEST_SIZE,
+ .setkey = chcr_authenc_setkey,
+ .setauthsize = chcr_authenc_setauthsize,
+ }
+ },
+ {
+ .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA,
+ .is_registered = 0,
+ .alg.aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha224),cbc(aes))",
+ .cra_driver_name =
+ "authenc-hmac-sha224-cbc-aes-chcr",
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_priority = CHCR_AEAD_PRIORITY,
+ .cra_ctxsize = sizeof(struct chcr_context) +
+ sizeof(struct chcr_aead_ctx) +
+ sizeof(struct chcr_authenc_ctx),
+ },
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = SHA224_DIGEST_SIZE,
+ .setkey = chcr_authenc_setkey,
+ .setauthsize = chcr_authenc_setauthsize,
+ }
+ },
+ {
+ .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA,
+ .is_registered = 0,
+ .alg.aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha384),cbc(aes))",
+ .cra_driver_name =
+ "authenc-hmac-sha384-cbc-aes-chcr",
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_priority = CHCR_AEAD_PRIORITY,
+ .cra_ctxsize = sizeof(struct chcr_context) +
+ sizeof(struct chcr_aead_ctx) +
+ sizeof(struct chcr_authenc_ctx),
+
+ },
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = SHA384_DIGEST_SIZE,
+ .setkey = chcr_authenc_setkey,
+ .setauthsize = chcr_authenc_setauthsize,
+ }
+ },
+ {
+ .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA,
+ .is_registered = 0,
+ .alg.aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha512),cbc(aes))",
+ .cra_driver_name =
+ "authenc-hmac-sha512-cbc-aes-chcr",
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_priority = CHCR_AEAD_PRIORITY,
+ .cra_ctxsize = sizeof(struct chcr_context) +
+ sizeof(struct chcr_aead_ctx) +
+ sizeof(struct chcr_authenc_ctx),
+
+ },
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = SHA512_DIGEST_SIZE,
+ .setkey = chcr_authenc_setkey,
+ .setauthsize = chcr_authenc_setauthsize,
+ }
+ },
+ {
+ .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_NULL,
+ .is_registered = 0,
+ .alg.aead = {
+ .base = {
+ .cra_name = "authenc(digest_null,cbc(aes))",
+ .cra_driver_name =
+ "authenc-digest_null-cbc-aes-chcr",
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_priority = CHCR_AEAD_PRIORITY,
+ .cra_ctxsize = sizeof(struct chcr_context) +
+ sizeof(struct chcr_aead_ctx) +
+ sizeof(struct chcr_authenc_ctx),
+
+ },
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = 0,
+ .setkey = chcr_aead_digest_null_setkey,
+ .setauthsize = chcr_authenc_null_setauthsize,
+ }
+ },
+ {
+ .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA,
+ .is_registered = 0,
+ .alg.aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha1),rfc3686(ctr(aes)))",
+ .cra_driver_name =
+ "authenc-hmac-sha1-rfc3686-ctr-aes-chcr",
+ .cra_blocksize = 1,
+ .cra_priority = CHCR_AEAD_PRIORITY,
+ .cra_ctxsize = sizeof(struct chcr_context) +
+ sizeof(struct chcr_aead_ctx) +
+ sizeof(struct chcr_authenc_ctx),
+
+ },
+ .ivsize = CTR_RFC3686_IV_SIZE,
+ .maxauthsize = SHA1_DIGEST_SIZE,
+ .setkey = chcr_authenc_setkey,
+ .setauthsize = chcr_authenc_setauthsize,
+ }
+ },
+ {
+ .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA,
+ .is_registered = 0,
+ .alg.aead = {
+ .base = {
+
+ .cra_name = "authenc(hmac(sha256),rfc3686(ctr(aes)))",
+ .cra_driver_name =
+ "authenc-hmac-sha256-rfc3686-ctr-aes-chcr",
+ .cra_blocksize = 1,
+ .cra_priority = CHCR_AEAD_PRIORITY,
+ .cra_ctxsize = sizeof(struct chcr_context) +
+ sizeof(struct chcr_aead_ctx) +
+ sizeof(struct chcr_authenc_ctx),
+
+ },
+ .ivsize = CTR_RFC3686_IV_SIZE,
+ .maxauthsize = SHA256_DIGEST_SIZE,
+ .setkey = chcr_authenc_setkey,
+ .setauthsize = chcr_authenc_setauthsize,
+ }
+ },
+ {
+ .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA,
+ .is_registered = 0,
+ .alg.aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha224),rfc3686(ctr(aes)))",
+ .cra_driver_name =
+ "authenc-hmac-sha224-rfc3686-ctr-aes-chcr",
+ .cra_blocksize = 1,
+ .cra_priority = CHCR_AEAD_PRIORITY,
+ .cra_ctxsize = sizeof(struct chcr_context) +
+ sizeof(struct chcr_aead_ctx) +
+ sizeof(struct chcr_authenc_ctx),
+ },
+ .ivsize = CTR_RFC3686_IV_SIZE,
+ .maxauthsize = SHA224_DIGEST_SIZE,
+ .setkey = chcr_authenc_setkey,
+ .setauthsize = chcr_authenc_setauthsize,
+ }
+ },
+ {
+ .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA,
+ .is_registered = 0,
+ .alg.aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha384),rfc3686(ctr(aes)))",
+ .cra_driver_name =
+ "authenc-hmac-sha384-rfc3686-ctr-aes-chcr",
+ .cra_blocksize = 1,
+ .cra_priority = CHCR_AEAD_PRIORITY,
+ .cra_ctxsize = sizeof(struct chcr_context) +
+ sizeof(struct chcr_aead_ctx) +
+ sizeof(struct chcr_authenc_ctx),
+
+ },
+ .ivsize = CTR_RFC3686_IV_SIZE,
+ .maxauthsize = SHA384_DIGEST_SIZE,
+ .setkey = chcr_authenc_setkey,
+ .setauthsize = chcr_authenc_setauthsize,
+ }
+ },
+ {
+ .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA,
+ .is_registered = 0,
+ .alg.aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha512),rfc3686(ctr(aes)))",
+ .cra_driver_name =
+ "authenc-hmac-sha512-rfc3686-ctr-aes-chcr",
+ .cra_blocksize = 1,
+ .cra_priority = CHCR_AEAD_PRIORITY,
+ .cra_ctxsize = sizeof(struct chcr_context) +
+ sizeof(struct chcr_aead_ctx) +
+ sizeof(struct chcr_authenc_ctx),
+
+ },
+ .ivsize = CTR_RFC3686_IV_SIZE,
+ .maxauthsize = SHA512_DIGEST_SIZE,
+ .setkey = chcr_authenc_setkey,
+ .setauthsize = chcr_authenc_setauthsize,
+ }
+ },
+ {
+ .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_NULL,
+ .is_registered = 0,
+ .alg.aead = {
+ .base = {
+ .cra_name = "authenc(digest_null,rfc3686(ctr(aes)))",
+ .cra_driver_name =
+ "authenc-digest_null-rfc3686-ctr-aes-chcr",
+ .cra_blocksize = 1,
+ .cra_priority = CHCR_AEAD_PRIORITY,
+ .cra_ctxsize = sizeof(struct chcr_context) +
+ sizeof(struct chcr_aead_ctx) +
+ sizeof(struct chcr_authenc_ctx),
+
+ },
+ .ivsize = CTR_RFC3686_IV_SIZE,
+ .maxauthsize = 0,
+ .setkey = chcr_aead_digest_null_setkey,
+ .setauthsize = chcr_authenc_null_setauthsize,
+ }
+ },
+};
+
+/*
+ * chcr_unregister_alg - Deregister crypto algorithms with
+ * kernel framework.
+ */
+static int chcr_unregister_alg(void)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
+ switch (driver_algs[i].type & CRYPTO_ALG_TYPE_MASK) {
+ case CRYPTO_ALG_TYPE_SKCIPHER:
+ if (driver_algs[i].is_registered && refcount_read(
+ &driver_algs[i].alg.skcipher.base.cra_refcnt)
+ == 1) {
+ crypto_unregister_skcipher(
+ &driver_algs[i].alg.skcipher);
+ driver_algs[i].is_registered = 0;
+ }
+ break;
+ case CRYPTO_ALG_TYPE_AEAD:
+ if (driver_algs[i].is_registered && refcount_read(
+ &driver_algs[i].alg.aead.base.cra_refcnt) == 1) {
+ crypto_unregister_aead(
+ &driver_algs[i].alg.aead);
+ driver_algs[i].is_registered = 0;
+ }
+ break;
+ case CRYPTO_ALG_TYPE_AHASH:
+ if (driver_algs[i].is_registered && refcount_read(
+ &driver_algs[i].alg.hash.halg.base.cra_refcnt)
+ == 1) {
+ crypto_unregister_ahash(
+ &driver_algs[i].alg.hash);
+ driver_algs[i].is_registered = 0;
+ }
+ break;
+ }
+ }
+ return 0;
+}
+
+#define SZ_AHASH_CTX sizeof(struct chcr_context)
+#define SZ_AHASH_H_CTX (sizeof(struct chcr_context) + sizeof(struct hmac_ctx))
+#define SZ_AHASH_REQ_CTX sizeof(struct chcr_ahash_req_ctx)
+
+/*
+ * chcr_register_alg - Register crypto algorithms with kernel framework.
+ */
+static int chcr_register_alg(void)
+{
+ struct crypto_alg ai;
+ struct ahash_alg *a_hash;
+ int err = 0, i;
+ char *name = NULL;
+
+ for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
+ if (driver_algs[i].is_registered)
+ continue;
+ switch (driver_algs[i].type & CRYPTO_ALG_TYPE_MASK) {
+ case CRYPTO_ALG_TYPE_SKCIPHER:
+ driver_algs[i].alg.skcipher.base.cra_priority =
+ CHCR_CRA_PRIORITY;
+ driver_algs[i].alg.skcipher.base.cra_module = THIS_MODULE;
+ driver_algs[i].alg.skcipher.base.cra_flags =
+ CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY |
+ CRYPTO_ALG_NEED_FALLBACK;
+ driver_algs[i].alg.skcipher.base.cra_ctxsize =
+ sizeof(struct chcr_context) +
+ sizeof(struct ablk_ctx);
+ driver_algs[i].alg.skcipher.base.cra_alignmask = 0;
+
+ err = crypto_register_skcipher(&driver_algs[i].alg.skcipher);
+ name = driver_algs[i].alg.skcipher.base.cra_driver_name;
+ break;
+ case CRYPTO_ALG_TYPE_AEAD:
+ driver_algs[i].alg.aead.base.cra_flags =
+ CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK |
+ CRYPTO_ALG_ALLOCATES_MEMORY;
+ driver_algs[i].alg.aead.encrypt = chcr_aead_encrypt;
+ driver_algs[i].alg.aead.decrypt = chcr_aead_decrypt;
+ driver_algs[i].alg.aead.init = chcr_aead_cra_init;
+ driver_algs[i].alg.aead.exit = chcr_aead_cra_exit;
+ driver_algs[i].alg.aead.base.cra_module = THIS_MODULE;
+ err = crypto_register_aead(&driver_algs[i].alg.aead);
+ name = driver_algs[i].alg.aead.base.cra_driver_name;
+ break;
+ case CRYPTO_ALG_TYPE_AHASH:
+ a_hash = &driver_algs[i].alg.hash;
+ a_hash->update = chcr_ahash_update;
+ a_hash->final = chcr_ahash_final;
+ a_hash->finup = chcr_ahash_finup;
+ a_hash->digest = chcr_ahash_digest;
+ a_hash->export = chcr_ahash_export;
+ a_hash->import = chcr_ahash_import;
+ a_hash->halg.statesize = SZ_AHASH_REQ_CTX;
+ a_hash->halg.base.cra_priority = CHCR_CRA_PRIORITY;
+ a_hash->halg.base.cra_module = THIS_MODULE;
+ a_hash->halg.base.cra_flags =
+ CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY;
+ a_hash->halg.base.cra_alignmask = 0;
+ a_hash->halg.base.cra_exit = NULL;
+
+ if (driver_algs[i].type == CRYPTO_ALG_TYPE_HMAC) {
+ a_hash->halg.base.cra_init = chcr_hmac_cra_init;
+ a_hash->halg.base.cra_exit = chcr_hmac_cra_exit;
+ a_hash->init = chcr_hmac_init;
+ a_hash->setkey = chcr_ahash_setkey;
+ a_hash->halg.base.cra_ctxsize = SZ_AHASH_H_CTX;
+ } else {
+ a_hash->init = chcr_sha_init;
+ a_hash->halg.base.cra_ctxsize = SZ_AHASH_CTX;
+ a_hash->halg.base.cra_init = chcr_sha_cra_init;
+ }
+ err = crypto_register_ahash(&driver_algs[i].alg.hash);
+ ai = driver_algs[i].alg.hash.halg.base;
+ name = ai.cra_driver_name;
+ break;
+ }
+ if (err) {
+ pr_err("%s : Algorithm registration failed\n", name);
+ goto register_err;
+ } else {
+ driver_algs[i].is_registered = 1;
+ }
+ }
+ return 0;
+
+register_err:
+ chcr_unregister_alg();
+ return err;
+}
+
+/*
+ * start_crypto - Register the crypto algorithms.
+ * This should called once when the first device comesup. After this
+ * kernel will start calling driver APIs for crypto operations.
+ */
+int start_crypto(void)
+{
+ return chcr_register_alg();
+}
+
+/*
+ * stop_crypto - Deregister all the crypto algorithms with kernel.
+ * This should be called once when the last device goes down. After this
+ * kernel will not call the driver API for crypto operations.
+ */
+int stop_crypto(void)
+{
+ chcr_unregister_alg();
+ return 0;
+}
diff --git a/drivers/crypto/chelsio/chcr_algo.h b/drivers/crypto/chelsio/chcr_algo.h
new file mode 100644
index 000000000..507aafe93
--- /dev/null
+++ b/drivers/crypto/chelsio/chcr_algo.h
@@ -0,0 +1,405 @@
+/*
+ * This file is part of the Chelsio T6 Crypto driver for Linux.
+ *
+ * Copyright (c) 2003-2016 Chelsio Communications, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#ifndef __CHCR_ALGO_H__
+#define __CHCR_ALGO_H__
+
+/* Crypto key context */
+#define KEY_CONTEXT_CTX_LEN_S 24
+#define KEY_CONTEXT_CTX_LEN_M 0xff
+#define KEY_CONTEXT_CTX_LEN_V(x) ((x) << KEY_CONTEXT_CTX_LEN_S)
+#define KEY_CONTEXT_CTX_LEN_G(x) \
+ (((x) >> KEY_CONTEXT_CTX_LEN_S) & KEY_CONTEXT_CTX_LEN_M)
+
+#define KEY_CONTEXT_DUAL_CK_S 12
+#define KEY_CONTEXT_DUAL_CK_M 0x1
+#define KEY_CONTEXT_DUAL_CK_V(x) ((x) << KEY_CONTEXT_DUAL_CK_S)
+#define KEY_CONTEXT_DUAL_CK_G(x) \
+(((x) >> KEY_CONTEXT_DUAL_CK_S) & KEY_CONTEXT_DUAL_CK_M)
+#define KEY_CONTEXT_DUAL_CK_F KEY_CONTEXT_DUAL_CK_V(1U)
+
+#define KEY_CONTEXT_SALT_PRESENT_S 10
+#define KEY_CONTEXT_SALT_PRESENT_M 0x1
+#define KEY_CONTEXT_SALT_PRESENT_V(x) ((x) << KEY_CONTEXT_SALT_PRESENT_S)
+#define KEY_CONTEXT_SALT_PRESENT_G(x) \
+ (((x) >> KEY_CONTEXT_SALT_PRESENT_S) & \
+ KEY_CONTEXT_SALT_PRESENT_M)
+#define KEY_CONTEXT_SALT_PRESENT_F KEY_CONTEXT_SALT_PRESENT_V(1U)
+
+#define KEY_CONTEXT_VALID_S 0
+#define KEY_CONTEXT_VALID_M 0x1
+#define KEY_CONTEXT_VALID_V(x) ((x) << KEY_CONTEXT_VALID_S)
+#define KEY_CONTEXT_VALID_G(x) \
+ (((x) >> KEY_CONTEXT_VALID_S) & \
+ KEY_CONTEXT_VALID_M)
+#define KEY_CONTEXT_VALID_F KEY_CONTEXT_VALID_V(1U)
+
+#define KEY_CONTEXT_CK_SIZE_S 6
+#define KEY_CONTEXT_CK_SIZE_M 0xf
+#define KEY_CONTEXT_CK_SIZE_V(x) ((x) << KEY_CONTEXT_CK_SIZE_S)
+#define KEY_CONTEXT_CK_SIZE_G(x) \
+ (((x) >> KEY_CONTEXT_CK_SIZE_S) & KEY_CONTEXT_CK_SIZE_M)
+
+#define KEY_CONTEXT_MK_SIZE_S 2
+#define KEY_CONTEXT_MK_SIZE_M 0xf
+#define KEY_CONTEXT_MK_SIZE_V(x) ((x) << KEY_CONTEXT_MK_SIZE_S)
+#define KEY_CONTEXT_MK_SIZE_G(x) \
+ (((x) >> KEY_CONTEXT_MK_SIZE_S) & KEY_CONTEXT_MK_SIZE_M)
+
+#define KEY_CONTEXT_OPAD_PRESENT_S 11
+#define KEY_CONTEXT_OPAD_PRESENT_M 0x1
+#define KEY_CONTEXT_OPAD_PRESENT_V(x) ((x) << KEY_CONTEXT_OPAD_PRESENT_S)
+#define KEY_CONTEXT_OPAD_PRESENT_G(x) \
+ (((x) >> KEY_CONTEXT_OPAD_PRESENT_S) & \
+ KEY_CONTEXT_OPAD_PRESENT_M)
+#define KEY_CONTEXT_OPAD_PRESENT_F KEY_CONTEXT_OPAD_PRESENT_V(1U)
+
+#define CHCR_HASH_MAX_DIGEST_SIZE 64
+#define CHCR_MAX_SHA_DIGEST_SIZE 64
+
+#define IPSEC_TRUNCATED_ICV_SIZE 12
+#define TLS_TRUNCATED_HMAC_SIZE 10
+#define CBCMAC_DIGEST_SIZE 16
+#define MAX_HASH_NAME 20
+
+#define SHA1_INIT_STATE_5X4B 5
+#define SHA256_INIT_STATE_8X4B 8
+#define SHA512_INIT_STATE_8X8B 8
+#define SHA1_INIT_STATE SHA1_INIT_STATE_5X4B
+#define SHA224_INIT_STATE SHA256_INIT_STATE_8X4B
+#define SHA256_INIT_STATE SHA256_INIT_STATE_8X4B
+#define SHA384_INIT_STATE SHA512_INIT_STATE_8X8B
+#define SHA512_INIT_STATE SHA512_INIT_STATE_8X8B
+
+#define DUMMY_BYTES 16
+
+#define IPAD_DATA 0x36363636
+#define OPAD_DATA 0x5c5c5c5c
+
+#define TRANSHDR_SIZE(kctx_len)\
+ (sizeof(struct chcr_wr) +\
+ kctx_len)
+#define CIPHER_TRANSHDR_SIZE(kctx_len, sge_pairs) \
+ (TRANSHDR_SIZE((kctx_len)) + (sge_pairs) +\
+ sizeof(struct cpl_rx_phys_dsgl) + AES_BLOCK_SIZE)
+#define HASH_TRANSHDR_SIZE(kctx_len)\
+ (TRANSHDR_SIZE(kctx_len) + DUMMY_BYTES)
+
+
+#define FILL_SEC_CPL_OP_IVINSR(id, len, ofst) \
+ htonl( \
+ CPL_TX_SEC_PDU_OPCODE_V(CPL_TX_SEC_PDU) | \
+ CPL_TX_SEC_PDU_RXCHID_V((id)) | \
+ CPL_TX_SEC_PDU_ACKFOLLOWS_V(0) | \
+ CPL_TX_SEC_PDU_ULPTXLPBK_V(1) | \
+ CPL_TX_SEC_PDU_CPLLEN_V((len)) | \
+ CPL_TX_SEC_PDU_PLACEHOLDER_V(0) | \
+ CPL_TX_SEC_PDU_IVINSRTOFST_V((ofst)))
+
+#define FILL_SEC_CPL_CIPHERSTOP_HI(a_start, a_stop, c_start, c_stop_hi) \
+ htonl( \
+ CPL_TX_SEC_PDU_AADSTART_V((a_start)) | \
+ CPL_TX_SEC_PDU_AADSTOP_V((a_stop)) | \
+ CPL_TX_SEC_PDU_CIPHERSTART_V((c_start)) | \
+ CPL_TX_SEC_PDU_CIPHERSTOP_HI_V((c_stop_hi)))
+
+#define FILL_SEC_CPL_AUTHINSERT(c_stop_lo, a_start, a_stop, a_inst) \
+ htonl( \
+ CPL_TX_SEC_PDU_CIPHERSTOP_LO_V((c_stop_lo)) | \
+ CPL_TX_SEC_PDU_AUTHSTART_V((a_start)) | \
+ CPL_TX_SEC_PDU_AUTHSTOP_V((a_stop)) | \
+ CPL_TX_SEC_PDU_AUTHINSERT_V((a_inst)))
+
+#define FILL_SEC_CPL_SCMD0_SEQNO(ctrl, seq, cmode, amode, opad, size) \
+ htonl( \
+ SCMD_SEQ_NO_CTRL_V(0) | \
+ SCMD_STATUS_PRESENT_V(0) | \
+ SCMD_PROTO_VERSION_V(CHCR_SCMD_PROTO_VERSION_GENERIC) | \
+ SCMD_ENC_DEC_CTRL_V((ctrl)) | \
+ SCMD_CIPH_AUTH_SEQ_CTRL_V((seq)) | \
+ SCMD_CIPH_MODE_V((cmode)) | \
+ SCMD_AUTH_MODE_V((amode)) | \
+ SCMD_HMAC_CTRL_V((opad)) | \
+ SCMD_IV_SIZE_V((size)) | \
+ SCMD_NUM_IVS_V(0))
+
+#define FILL_SEC_CPL_IVGEN_HDRLEN(last, more, ctx_in, mac, ivdrop, len) htonl( \
+ SCMD_ENB_DBGID_V(0) | \
+ SCMD_IV_GEN_CTRL_V(0) | \
+ SCMD_LAST_FRAG_V((last)) | \
+ SCMD_MORE_FRAGS_V((more)) | \
+ SCMD_TLS_COMPPDU_V(0) | \
+ SCMD_KEY_CTX_INLINE_V((ctx_in)) | \
+ SCMD_TLS_FRAG_ENABLE_V(0) | \
+ SCMD_MAC_ONLY_V((mac)) | \
+ SCMD_AADIVDROP_V((ivdrop)) | \
+ SCMD_HDR_LEN_V((len)))
+
+#define FILL_KEY_CTX_HDR(ck_size, mk_size, d_ck, opad, ctx_len) \
+ htonl(KEY_CONTEXT_VALID_V(1) | \
+ KEY_CONTEXT_CK_SIZE_V((ck_size)) | \
+ KEY_CONTEXT_MK_SIZE_V(mk_size) | \
+ KEY_CONTEXT_DUAL_CK_V((d_ck)) | \
+ KEY_CONTEXT_OPAD_PRESENT_V((opad)) | \
+ KEY_CONTEXT_SALT_PRESENT_V(1) | \
+ KEY_CONTEXT_CTX_LEN_V((ctx_len)))
+
+#define FILL_KEY_CRX_HDR(ck_size, mk_size, d_ck, opad, ctx_len) \
+ htonl(TLS_KEYCTX_RXMK_SIZE_V(mk_size) | \
+ TLS_KEYCTX_RXCK_SIZE_V(ck_size) | \
+ TLS_KEYCTX_RX_VALID_V(1) | \
+ TLS_KEYCTX_RX_SEQCTR_V(3) | \
+ TLS_KEYCTX_RXAUTH_MODE_V(4) | \
+ TLS_KEYCTX_RXCIPH_MODE_V(2) | \
+ TLS_KEYCTX_RXFLIT_CNT_V((ctx_len)))
+
+#define FILL_WR_OP_CCTX_SIZE \
+ htonl( \
+ FW_CRYPTO_LOOKASIDE_WR_OPCODE_V( \
+ FW_CRYPTO_LOOKASIDE_WR) | \
+ FW_CRYPTO_LOOKASIDE_WR_COMPL_V(0) | \
+ FW_CRYPTO_LOOKASIDE_WR_IMM_LEN_V((0)) | \
+ FW_CRYPTO_LOOKASIDE_WR_CCTX_LOC_V(0) | \
+ FW_CRYPTO_LOOKASIDE_WR_CCTX_SIZE_V(0))
+
+#define FILL_WR_RX_Q_ID(cid, qid, lcb, fid) \
+ htonl( \
+ FW_CRYPTO_LOOKASIDE_WR_RX_CHID_V((cid)) | \
+ FW_CRYPTO_LOOKASIDE_WR_RX_Q_ID_V((qid)) | \
+ FW_CRYPTO_LOOKASIDE_WR_LCB_V((lcb)) | \
+ FW_CRYPTO_LOOKASIDE_WR_IV_V((IV_NOP)) | \
+ FW_CRYPTO_LOOKASIDE_WR_FQIDX_V(fid))
+
+#define FILL_ULPTX_CMD_DEST(cid, qid) \
+ htonl(ULPTX_CMD_V(ULP_TX_PKT) | \
+ ULP_TXPKT_DEST_V(0) | \
+ ULP_TXPKT_DATAMODIFY_V(0) | \
+ ULP_TXPKT_CHANNELID_V((cid)) | \
+ ULP_TXPKT_RO_V(1) | \
+ ULP_TXPKT_FID_V(qid))
+
+#define KEYCTX_ALIGN_PAD(bs) ({unsigned int _bs = (bs);\
+ _bs == SHA1_DIGEST_SIZE ? 12 : 0; })
+
+#define FILL_PLD_SIZE_HASH_SIZE(payload_sgl_len, sgl_lengths, total_frags) \
+ htonl(FW_CRYPTO_LOOKASIDE_WR_PLD_SIZE_V(payload_sgl_len ? \
+ sgl_lengths[total_frags] : 0) |\
+ FW_CRYPTO_LOOKASIDE_WR_HASH_SIZE_V(0))
+
+#define FILL_LEN_PKD(calc_tx_flits_ofld, skb) \
+ htonl(FW_CRYPTO_LOOKASIDE_WR_LEN16_V(DIV_ROUND_UP((\
+ calc_tx_flits_ofld(skb) * 8), 16)))
+
+#define FILL_CMD_MORE(immdatalen) htonl(ULPTX_CMD_V(ULP_TX_SC_IMM) |\
+ ULP_TX_SC_MORE_V((immdatalen)))
+#define MAX_NK 8
+#define MAX_DSGL_ENT 32
+#define MIN_AUTH_SG 1 /* IV */
+#define MIN_GCM_SG 1 /* IV */
+#define MIN_DIGEST_SG 1 /*Partial Buffer*/
+#define MIN_CCM_SG 1 /*IV+B0*/
+#define CIP_SPACE_LEFT(len) \
+ ((SGE_MAX_WR_LEN - CIP_WR_MIN_LEN - (len)))
+#define HASH_SPACE_LEFT(len) \
+ ((SGE_MAX_WR_LEN - HASH_WR_MIN_LEN - (len)))
+
+struct algo_param {
+ unsigned int auth_mode;
+ unsigned int mk_size;
+ unsigned int result_size;
+};
+
+struct hash_wr_param {
+ struct algo_param alg_prm;
+ unsigned int opad_needed;
+ unsigned int more;
+ unsigned int last;
+ unsigned int kctx_len;
+ unsigned int sg_len;
+ unsigned int bfr_len;
+ unsigned int hash_size;
+ u64 scmd1;
+};
+
+struct cipher_wr_param {
+ struct skcipher_request *req;
+ char *iv;
+ int bytes;
+ unsigned short qid;
+};
+enum {
+ AES_KEYLENGTH_128BIT = 128,
+ AES_KEYLENGTH_192BIT = 192,
+ AES_KEYLENGTH_256BIT = 256
+};
+
+enum {
+ KEYLENGTH_3BYTES = 3,
+ KEYLENGTH_4BYTES = 4,
+ KEYLENGTH_6BYTES = 6,
+ KEYLENGTH_8BYTES = 8
+};
+
+enum {
+ NUMBER_OF_ROUNDS_10 = 10,
+ NUMBER_OF_ROUNDS_12 = 12,
+ NUMBER_OF_ROUNDS_14 = 14,
+};
+
+/*
+ * CCM defines values of 4, 6, 8, 10, 12, 14, and 16 octets,
+ * where they indicate the size of the integrity check value (ICV)
+ */
+enum {
+ ICV_4 = 4,
+ ICV_6 = 6,
+ ICV_8 = 8,
+ ICV_10 = 10,
+ ICV_12 = 12,
+ ICV_13 = 13,
+ ICV_14 = 14,
+ ICV_15 = 15,
+ ICV_16 = 16
+};
+
+struct phys_sge_pairs {
+ __be16 len[8];
+ __be64 addr[8];
+};
+
+
+static const u32 chcr_sha1_init[SHA1_DIGEST_SIZE / 4] = {
+ SHA1_H0, SHA1_H1, SHA1_H2, SHA1_H3, SHA1_H4,
+};
+
+static const u32 chcr_sha224_init[SHA256_DIGEST_SIZE / 4] = {
+ SHA224_H0, SHA224_H1, SHA224_H2, SHA224_H3,
+ SHA224_H4, SHA224_H5, SHA224_H6, SHA224_H7,
+};
+
+static const u32 chcr_sha256_init[SHA256_DIGEST_SIZE / 4] = {
+ SHA256_H0, SHA256_H1, SHA256_H2, SHA256_H3,
+ SHA256_H4, SHA256_H5, SHA256_H6, SHA256_H7,
+};
+
+static const u64 chcr_sha384_init[SHA512_DIGEST_SIZE / 8] = {
+ SHA384_H0, SHA384_H1, SHA384_H2, SHA384_H3,
+ SHA384_H4, SHA384_H5, SHA384_H6, SHA384_H7,
+};
+
+static const u64 chcr_sha512_init[SHA512_DIGEST_SIZE / 8] = {
+ SHA512_H0, SHA512_H1, SHA512_H2, SHA512_H3,
+ SHA512_H4, SHA512_H5, SHA512_H6, SHA512_H7,
+};
+
+static inline void copy_hash_init_values(char *key, int digestsize)
+{
+ u8 i;
+ __be32 *dkey = (__be32 *)key;
+ u64 *ldkey = (u64 *)key;
+ __be64 *sha384 = (__be64 *)chcr_sha384_init;
+ __be64 *sha512 = (__be64 *)chcr_sha512_init;
+
+ switch (digestsize) {
+ case SHA1_DIGEST_SIZE:
+ for (i = 0; i < SHA1_INIT_STATE; i++)
+ dkey[i] = cpu_to_be32(chcr_sha1_init[i]);
+ break;
+ case SHA224_DIGEST_SIZE:
+ for (i = 0; i < SHA224_INIT_STATE; i++)
+ dkey[i] = cpu_to_be32(chcr_sha224_init[i]);
+ break;
+ case SHA256_DIGEST_SIZE:
+ for (i = 0; i < SHA256_INIT_STATE; i++)
+ dkey[i] = cpu_to_be32(chcr_sha256_init[i]);
+ break;
+ case SHA384_DIGEST_SIZE:
+ for (i = 0; i < SHA384_INIT_STATE; i++)
+ ldkey[i] = be64_to_cpu(sha384[i]);
+ break;
+ case SHA512_DIGEST_SIZE:
+ for (i = 0; i < SHA512_INIT_STATE; i++)
+ ldkey[i] = be64_to_cpu(sha512[i]);
+ break;
+ }
+}
+
+/* Number of len fields(8) * size of one addr field */
+#define PHYSDSGL_MAX_LEN_SIZE 16
+
+static inline u16 get_space_for_phys_dsgl(unsigned int sgl_entr)
+{
+ /* len field size + addr field size */
+ return ((sgl_entr >> 3) + ((sgl_entr % 8) ?
+ 1 : 0)) * PHYSDSGL_MAX_LEN_SIZE +
+ (sgl_entr << 3) + ((sgl_entr % 2 ? 1 : 0) << 3);
+}
+
+/* The AES s-transform matrix (s-box). */
+static const u8 aes_sbox[256] = {
+ 99, 124, 119, 123, 242, 107, 111, 197, 48, 1, 103, 43, 254, 215,
+ 171, 118, 202, 130, 201, 125, 250, 89, 71, 240, 173, 212, 162, 175,
+ 156, 164, 114, 192, 183, 253, 147, 38, 54, 63, 247, 204, 52, 165,
+ 229, 241, 113, 216, 49, 21, 4, 199, 35, 195, 24, 150, 5, 154, 7,
+ 18, 128, 226, 235, 39, 178, 117, 9, 131, 44, 26, 27, 110, 90,
+ 160, 82, 59, 214, 179, 41, 227, 47, 132, 83, 209, 0, 237, 32,
+ 252, 177, 91, 106, 203, 190, 57, 74, 76, 88, 207, 208, 239, 170,
+ 251, 67, 77, 51, 133, 69, 249, 2, 127, 80, 60, 159, 168, 81,
+ 163, 64, 143, 146, 157, 56, 245, 188, 182, 218, 33, 16, 255, 243,
+ 210, 205, 12, 19, 236, 95, 151, 68, 23, 196, 167, 126, 61, 100,
+ 93, 25, 115, 96, 129, 79, 220, 34, 42, 144, 136, 70, 238, 184,
+ 20, 222, 94, 11, 219, 224, 50, 58, 10, 73, 6, 36, 92, 194,
+ 211, 172, 98, 145, 149, 228, 121, 231, 200, 55, 109, 141, 213, 78,
+ 169, 108, 86, 244, 234, 101, 122, 174, 8, 186, 120, 37, 46, 28, 166,
+ 180, 198, 232, 221, 116, 31, 75, 189, 139, 138, 112, 62, 181, 102,
+ 72, 3, 246, 14, 97, 53, 87, 185, 134, 193, 29, 158, 225, 248,
+ 152, 17, 105, 217, 142, 148, 155, 30, 135, 233, 206, 85, 40, 223,
+ 140, 161, 137, 13, 191, 230, 66, 104, 65, 153, 45, 15, 176, 84,
+ 187, 22
+};
+
+static inline u32 aes_ks_subword(const u32 w)
+{
+ u8 bytes[4];
+
+ *(u32 *)(&bytes[0]) = w;
+ bytes[0] = aes_sbox[bytes[0]];
+ bytes[1] = aes_sbox[bytes[1]];
+ bytes[2] = aes_sbox[bytes[2]];
+ bytes[3] = aes_sbox[bytes[3]];
+ return *(u32 *)(&bytes[0]);
+}
+
+#endif /* __CHCR_ALGO_H__ */
diff --git a/drivers/crypto/chelsio/chcr_core.c b/drivers/crypto/chelsio/chcr_core.c
new file mode 100644
index 000000000..39c70e625
--- /dev/null
+++ b/drivers/crypto/chelsio/chcr_core.c
@@ -0,0 +1,311 @@
+/*
+ * This file is part of the Chelsio T4/T5/T6 Ethernet driver for Linux.
+ *
+ * Copyright (C) 2011-2016 Chelsio Communications. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ *
+ * Written and Maintained by:
+ * Manoj Malviya (manojmalviya@chelsio.com)
+ * Atul Gupta (atul.gupta@chelsio.com)
+ * Jitendra Lulla (jlulla@chelsio.com)
+ * Yeshaswi M R Gowda (yeshaswi@chelsio.com)
+ * Harsh Jain (harsh@chelsio.com)
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/skbuff.h>
+
+#include <crypto/aes.h>
+#include <crypto/hash.h>
+
+#include "t4_msg.h"
+#include "chcr_core.h"
+#include "cxgb4_uld.h"
+
+static struct chcr_driver_data drv_data;
+
+typedef int (*chcr_handler_func)(struct adapter *adap, unsigned char *input);
+static int cpl_fw6_pld_handler(struct adapter *adap, unsigned char *input);
+static void *chcr_uld_add(const struct cxgb4_lld_info *lld);
+static int chcr_uld_state_change(void *handle, enum cxgb4_state state);
+
+static chcr_handler_func work_handlers[NUM_CPL_CMDS] = {
+ [CPL_FW6_PLD] = cpl_fw6_pld_handler,
+};
+
+static struct cxgb4_uld_info chcr_uld_info = {
+ .name = DRV_MODULE_NAME,
+ .nrxq = MAX_ULD_QSETS,
+ /* Max ntxq will be derived from fw config file*/
+ .rxq_size = 1024,
+ .add = chcr_uld_add,
+ .state_change = chcr_uld_state_change,
+ .rx_handler = chcr_uld_rx_handler,
+};
+
+static void detach_work_fn(struct work_struct *work)
+{
+ struct chcr_dev *dev;
+
+ dev = container_of(work, struct chcr_dev, detach_work.work);
+
+ if (atomic_read(&dev->inflight)) {
+ dev->wqretry--;
+ if (dev->wqretry) {
+ pr_debug("Request Inflight Count %d\n",
+ atomic_read(&dev->inflight));
+
+ schedule_delayed_work(&dev->detach_work, WQ_DETACH_TM);
+ } else {
+ WARN(1, "CHCR:%d request Still Pending\n",
+ atomic_read(&dev->inflight));
+ complete(&dev->detach_comp);
+ }
+ } else {
+ complete(&dev->detach_comp);
+ }
+}
+
+struct uld_ctx *assign_chcr_device(void)
+{
+ struct uld_ctx *u_ctx = NULL;
+
+ /*
+ * When multiple devices are present in system select
+ * device in round-robin fashion for crypto operations
+ * Although One session must use the same device to
+ * maintain request-response ordering.
+ */
+ mutex_lock(&drv_data.drv_mutex);
+ if (!list_empty(&drv_data.act_dev)) {
+ u_ctx = drv_data.last_dev;
+ if (list_is_last(&drv_data.last_dev->entry, &drv_data.act_dev))
+ drv_data.last_dev = list_first_entry(&drv_data.act_dev,
+ struct uld_ctx, entry);
+ else
+ drv_data.last_dev =
+ list_next_entry(drv_data.last_dev, entry);
+ }
+ mutex_unlock(&drv_data.drv_mutex);
+ return u_ctx;
+}
+
+static void chcr_dev_add(struct uld_ctx *u_ctx)
+{
+ struct chcr_dev *dev;
+
+ dev = &u_ctx->dev;
+ dev->state = CHCR_ATTACH;
+ atomic_set(&dev->inflight, 0);
+ mutex_lock(&drv_data.drv_mutex);
+ list_move(&u_ctx->entry, &drv_data.act_dev);
+ if (!drv_data.last_dev)
+ drv_data.last_dev = u_ctx;
+ mutex_unlock(&drv_data.drv_mutex);
+}
+
+static void chcr_dev_init(struct uld_ctx *u_ctx)
+{
+ struct chcr_dev *dev;
+
+ dev = &u_ctx->dev;
+ spin_lock_init(&dev->lock_chcr_dev);
+ INIT_DELAYED_WORK(&dev->detach_work, detach_work_fn);
+ init_completion(&dev->detach_comp);
+ dev->state = CHCR_INIT;
+ dev->wqretry = WQ_RETRY;
+ atomic_inc(&drv_data.dev_count);
+ atomic_set(&dev->inflight, 0);
+ mutex_lock(&drv_data.drv_mutex);
+ list_add_tail(&u_ctx->entry, &drv_data.inact_dev);
+ mutex_unlock(&drv_data.drv_mutex);
+}
+
+static int chcr_dev_move(struct uld_ctx *u_ctx)
+{
+ mutex_lock(&drv_data.drv_mutex);
+ if (drv_data.last_dev == u_ctx) {
+ if (list_is_last(&drv_data.last_dev->entry, &drv_data.act_dev))
+ drv_data.last_dev = list_first_entry(&drv_data.act_dev,
+ struct uld_ctx, entry);
+ else
+ drv_data.last_dev =
+ list_next_entry(drv_data.last_dev, entry);
+ }
+ list_move(&u_ctx->entry, &drv_data.inact_dev);
+ if (list_empty(&drv_data.act_dev))
+ drv_data.last_dev = NULL;
+ atomic_dec(&drv_data.dev_count);
+ mutex_unlock(&drv_data.drv_mutex);
+
+ return 0;
+}
+
+static int cpl_fw6_pld_handler(struct adapter *adap,
+ unsigned char *input)
+{
+ struct crypto_async_request *req;
+ struct cpl_fw6_pld *fw6_pld;
+ u32 ack_err_status = 0;
+ int error_status = 0;
+
+ fw6_pld = (struct cpl_fw6_pld *)input;
+ req = (struct crypto_async_request *)(uintptr_t)be64_to_cpu(
+ fw6_pld->data[1]);
+
+ ack_err_status =
+ ntohl(*(__be32 *)((unsigned char *)&fw6_pld->data[0] + 4));
+ if (CHK_MAC_ERR_BIT(ack_err_status) || CHK_PAD_ERR_BIT(ack_err_status))
+ error_status = -EBADMSG;
+ /* call completion callback with failure status */
+ if (req) {
+ error_status = chcr_handle_resp(req, input, error_status);
+ } else {
+ pr_err("Incorrect request address from the firmware\n");
+ return -EFAULT;
+ }
+ if (error_status)
+ atomic_inc(&adap->chcr_stats.error);
+
+ return 0;
+}
+
+int chcr_send_wr(struct sk_buff *skb)
+{
+ return cxgb4_crypto_send(skb->dev, skb);
+}
+
+static void *chcr_uld_add(const struct cxgb4_lld_info *lld)
+{
+ struct uld_ctx *u_ctx;
+
+ /* Create the device and add it in the device list */
+ pr_info_once("%s\n", DRV_DESC);
+ if (!(lld->ulp_crypto & ULP_CRYPTO_LOOKASIDE))
+ return ERR_PTR(-EOPNOTSUPP);
+
+ /* Create the device and add it in the device list */
+ u_ctx = kzalloc(sizeof(*u_ctx), GFP_KERNEL);
+ if (!u_ctx) {
+ u_ctx = ERR_PTR(-ENOMEM);
+ goto out;
+ }
+ u_ctx->lldi = *lld;
+ chcr_dev_init(u_ctx);
+out:
+ return u_ctx;
+}
+
+int chcr_uld_rx_handler(void *handle, const __be64 *rsp,
+ const struct pkt_gl *pgl)
+{
+ struct uld_ctx *u_ctx = (struct uld_ctx *)handle;
+ struct chcr_dev *dev = &u_ctx->dev;
+ struct adapter *adap = padap(dev);
+ const struct cpl_fw6_pld *rpl = (struct cpl_fw6_pld *)rsp;
+
+ if (!work_handlers[rpl->opcode]) {
+ pr_err("Unsupported opcode %d received\n", rpl->opcode);
+ return 0;
+ }
+
+ if (!pgl)
+ work_handlers[rpl->opcode](adap, (unsigned char *)&rsp[1]);
+ else
+ work_handlers[rpl->opcode](adap, pgl->va);
+ return 0;
+}
+
+static void chcr_detach_device(struct uld_ctx *u_ctx)
+{
+ struct chcr_dev *dev = &u_ctx->dev;
+
+ if (dev->state == CHCR_DETACH) {
+ pr_debug("Detached Event received for already detach device\n");
+ return;
+ }
+ dev->state = CHCR_DETACH;
+ if (atomic_read(&dev->inflight) != 0) {
+ schedule_delayed_work(&dev->detach_work, WQ_DETACH_TM);
+ wait_for_completion(&dev->detach_comp);
+ }
+
+ // Move u_ctx to inactive_dev list
+ chcr_dev_move(u_ctx);
+}
+
+static int chcr_uld_state_change(void *handle, enum cxgb4_state state)
+{
+ struct uld_ctx *u_ctx = handle;
+ int ret = 0;
+
+ switch (state) {
+ case CXGB4_STATE_UP:
+ if (u_ctx->dev.state != CHCR_INIT) {
+ // ALready Initialised.
+ return 0;
+ }
+ chcr_dev_add(u_ctx);
+ ret = start_crypto();
+ break;
+
+ case CXGB4_STATE_DETACH:
+ chcr_detach_device(u_ctx);
+ if (!atomic_read(&drv_data.dev_count))
+ stop_crypto();
+ break;
+
+ case CXGB4_STATE_START_RECOVERY:
+ case CXGB4_STATE_DOWN:
+ default:
+ break;
+ }
+ return ret;
+}
+
+static int __init chcr_crypto_init(void)
+{
+ INIT_LIST_HEAD(&drv_data.act_dev);
+ INIT_LIST_HEAD(&drv_data.inact_dev);
+ atomic_set(&drv_data.dev_count, 0);
+ mutex_init(&drv_data.drv_mutex);
+ drv_data.last_dev = NULL;
+ cxgb4_register_uld(CXGB4_ULD_CRYPTO, &chcr_uld_info);
+
+ return 0;
+}
+
+static void __exit chcr_crypto_exit(void)
+{
+ struct uld_ctx *u_ctx, *tmp;
+ struct adapter *adap;
+
+ stop_crypto();
+ cxgb4_unregister_uld(CXGB4_ULD_CRYPTO);
+ /* Remove all devices from list */
+ mutex_lock(&drv_data.drv_mutex);
+ list_for_each_entry_safe(u_ctx, tmp, &drv_data.act_dev, entry) {
+ adap = padap(&u_ctx->dev);
+ memset(&adap->chcr_stats, 0, sizeof(adap->chcr_stats));
+ list_del(&u_ctx->entry);
+ kfree(u_ctx);
+ }
+ list_for_each_entry_safe(u_ctx, tmp, &drv_data.inact_dev, entry) {
+ adap = padap(&u_ctx->dev);
+ memset(&adap->chcr_stats, 0, sizeof(adap->chcr_stats));
+ list_del(&u_ctx->entry);
+ kfree(u_ctx);
+ }
+ mutex_unlock(&drv_data.drv_mutex);
+}
+
+module_init(chcr_crypto_init);
+module_exit(chcr_crypto_exit);
+
+MODULE_DESCRIPTION("Crypto Co-processor for Chelsio Terminator cards.");
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Chelsio Communications");
diff --git a/drivers/crypto/chelsio/chcr_core.h b/drivers/crypto/chelsio/chcr_core.h
new file mode 100644
index 000000000..f7c8bb95a
--- /dev/null
+++ b/drivers/crypto/chelsio/chcr_core.h
@@ -0,0 +1,139 @@
+/*
+ * This file is part of the Chelsio T6 Crypto driver for Linux.
+ *
+ * Copyright (c) 2003-2016 Chelsio Communications, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#ifndef __CHCR_CORE_H__
+#define __CHCR_CORE_H__
+
+#include <crypto/algapi.h>
+#include <net/tls.h>
+#include "t4_hw.h"
+#include "cxgb4.h"
+#include "t4_msg.h"
+#include "cxgb4_uld.h"
+
+#define DRV_MODULE_NAME "chcr"
+#define DRV_DESC "Chelsio T6 Crypto Co-processor Driver"
+
+#define MAX_PENDING_REQ_TO_HW 20
+#define CHCR_TEST_RESPONSE_TIMEOUT 1000
+#define WQ_DETACH_TM (msecs_to_jiffies(50))
+#define PAD_ERROR_BIT 1
+#define CHK_PAD_ERR_BIT(x) (((x) >> PAD_ERROR_BIT) & 1)
+
+#define MAC_ERROR_BIT 0
+#define CHK_MAC_ERR_BIT(x) (((x) >> MAC_ERROR_BIT) & 1)
+#define MAX_SALT 4
+#define CIP_WR_MIN_LEN (sizeof(struct chcr_wr) + \
+ sizeof(struct cpl_rx_phys_dsgl) + \
+ sizeof(struct ulptx_sgl) + 16) //IV
+
+#define HASH_WR_MIN_LEN (sizeof(struct chcr_wr) + \
+ DUMMY_BYTES + \
+ sizeof(struct ulptx_sgl))
+struct uld_ctx;
+
+struct _key_ctx {
+ __be32 ctx_hdr;
+ u8 salt[MAX_SALT];
+ __be64 iv_to_auth;
+ unsigned char key[];
+};
+
+#define WQ_RETRY 5
+struct chcr_driver_data {
+ struct list_head act_dev;
+ struct list_head inact_dev;
+ atomic_t dev_count;
+ struct mutex drv_mutex;
+ struct uld_ctx *last_dev;
+};
+
+enum chcr_state {
+ CHCR_INIT = 0,
+ CHCR_ATTACH,
+ CHCR_DETACH,
+};
+struct chcr_wr {
+ struct fw_crypto_lookaside_wr wreq;
+ struct ulp_txpkt ulptx;
+ struct ulptx_idata sc_imm;
+ struct cpl_tx_sec_pdu sec_cpl;
+ struct _key_ctx key_ctx;
+};
+
+struct chcr_dev {
+ spinlock_t lock_chcr_dev;
+ enum chcr_state state;
+ atomic_t inflight;
+ int wqretry;
+ struct delayed_work detach_work;
+ struct completion detach_comp;
+};
+
+struct uld_ctx {
+ struct list_head entry;
+ struct cxgb4_lld_info lldi;
+ struct chcr_dev dev;
+};
+
+/*
+ * sgl_len - calculates the size of an SGL of the given capacity
+ * @n: the number of SGL entries
+ * Calculates the number of flits needed for a scatter/gather list that
+ * can hold the given number of entries.
+ */
+static inline unsigned int sgl_len(unsigned int n)
+{
+ n--;
+ return (3 * n) / 2 + (n & 1) + 2;
+}
+
+static inline void *padap(struct chcr_dev *dev)
+{
+ struct uld_ctx *u_ctx = container_of(dev, struct uld_ctx, dev);
+
+ return pci_get_drvdata(u_ctx->lldi.pdev);
+}
+
+struct uld_ctx *assign_chcr_device(void);
+int chcr_send_wr(struct sk_buff *skb);
+int start_crypto(void);
+int stop_crypto(void);
+int chcr_uld_rx_handler(void *handle, const __be64 *rsp,
+ const struct pkt_gl *pgl);
+int chcr_uld_tx_handler(struct sk_buff *skb, struct net_device *dev);
+int chcr_handle_resp(struct crypto_async_request *req, unsigned char *input,
+ int err);
+#endif /* __CHCR_CORE_H__ */
diff --git a/drivers/crypto/chelsio/chcr_crypto.h b/drivers/crypto/chelsio/chcr_crypto.h
new file mode 100644
index 000000000..c7816c83e
--- /dev/null
+++ b/drivers/crypto/chelsio/chcr_crypto.h
@@ -0,0 +1,353 @@
+/*
+ * This file is part of the Chelsio T6 Crypto driver for Linux.
+ *
+ * Copyright (c) 2003-2016 Chelsio Communications, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#ifndef __CHCR_CRYPTO_H__
+#define __CHCR_CRYPTO_H__
+
+#define GHASH_BLOCK_SIZE 16
+#define GHASH_DIGEST_SIZE 16
+
+#define CCM_B0_SIZE 16
+#define CCM_AAD_FIELD_SIZE 2
+// 511 - 16(For IV)
+#define T6_MAX_AAD_SIZE 495
+
+
+/* Define following if h/w is not dropping the AAD and IV data before
+ * giving the processed data
+ */
+
+#define CHCR_CRA_PRIORITY 500
+#define CHCR_AEAD_PRIORITY 6000
+#define CHCR_AES_MAX_KEY_LEN (2 * (AES_MAX_KEY_SIZE)) /* consider xts */
+#define CHCR_MAX_CRYPTO_IV_LEN 16 /* AES IV len */
+
+#define CHCR_MAX_AUTHENC_AES_KEY_LEN 32 /* max aes key length*/
+#define CHCR_MAX_AUTHENC_SHA_KEY_LEN 128 /* max sha key length*/
+
+#define CHCR_GIVENCRYPT_OP 2
+/* CPL/SCMD parameters */
+
+#define CHCR_ENCRYPT_OP 0
+#define CHCR_DECRYPT_OP 1
+
+#define CHCR_SCMD_SEQ_NO_CTRL_32BIT 1
+#define CHCR_SCMD_SEQ_NO_CTRL_48BIT 2
+#define CHCR_SCMD_SEQ_NO_CTRL_64BIT 3
+
+#define CHCR_SCMD_PROTO_VERSION_GENERIC 4
+
+#define CHCR_SCMD_AUTH_CTRL_AUTH_CIPHER 0
+#define CHCR_SCMD_AUTH_CTRL_CIPHER_AUTH 1
+
+#define CHCR_SCMD_CIPHER_MODE_NOP 0
+#define CHCR_SCMD_CIPHER_MODE_AES_CBC 1
+#define CHCR_SCMD_CIPHER_MODE_AES_GCM 2
+#define CHCR_SCMD_CIPHER_MODE_AES_CTR 3
+#define CHCR_SCMD_CIPHER_MODE_GENERIC_AES 4
+#define CHCR_SCMD_CIPHER_MODE_AES_XTS 6
+#define CHCR_SCMD_CIPHER_MODE_AES_CCM 7
+
+#define CHCR_SCMD_AUTH_MODE_NOP 0
+#define CHCR_SCMD_AUTH_MODE_SHA1 1
+#define CHCR_SCMD_AUTH_MODE_SHA224 2
+#define CHCR_SCMD_AUTH_MODE_SHA256 3
+#define CHCR_SCMD_AUTH_MODE_GHASH 4
+#define CHCR_SCMD_AUTH_MODE_SHA512_224 5
+#define CHCR_SCMD_AUTH_MODE_SHA512_256 6
+#define CHCR_SCMD_AUTH_MODE_SHA512_384 7
+#define CHCR_SCMD_AUTH_MODE_SHA512_512 8
+#define CHCR_SCMD_AUTH_MODE_CBCMAC 9
+#define CHCR_SCMD_AUTH_MODE_CMAC 10
+
+#define CHCR_SCMD_HMAC_CTRL_NOP 0
+#define CHCR_SCMD_HMAC_CTRL_NO_TRUNC 1
+#define CHCR_SCMD_HMAC_CTRL_TRUNC_RFC4366 2
+#define CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT 3
+#define CHCR_SCMD_HMAC_CTRL_PL1 4
+#define CHCR_SCMD_HMAC_CTRL_PL2 5
+#define CHCR_SCMD_HMAC_CTRL_PL3 6
+#define CHCR_SCMD_HMAC_CTRL_DIV2 7
+#define VERIFY_HW 0
+#define VERIFY_SW 1
+
+#define CHCR_SCMD_IVGEN_CTRL_HW 0
+#define CHCR_SCMD_IVGEN_CTRL_SW 1
+/* This are not really mac key size. They are intermediate values
+ * of sha engine and its size
+ */
+#define CHCR_KEYCTX_MAC_KEY_SIZE_128 0
+#define CHCR_KEYCTX_MAC_KEY_SIZE_160 1
+#define CHCR_KEYCTX_MAC_KEY_SIZE_192 2
+#define CHCR_KEYCTX_MAC_KEY_SIZE_256 3
+#define CHCR_KEYCTX_MAC_KEY_SIZE_512 4
+#define CHCR_KEYCTX_CIPHER_KEY_SIZE_128 0
+#define CHCR_KEYCTX_CIPHER_KEY_SIZE_192 1
+#define CHCR_KEYCTX_CIPHER_KEY_SIZE_256 2
+#define CHCR_KEYCTX_NO_KEY 15
+
+#define CHCR_CPL_FW4_PLD_IV_OFFSET (5 * 64) /* bytes. flt #5 and #6 */
+#define CHCR_CPL_FW4_PLD_HASH_RESULT_OFFSET (7 * 64) /* bytes. flt #7 */
+#define CHCR_CPL_FW4_PLD_DATA_SIZE (4 * 64) /* bytes. flt #4 to #7 */
+
+#define KEY_CONTEXT_HDR_SALT_AND_PAD 16
+#define flits_to_bytes(x) (x * 8)
+
+#define IV_NOP 0
+#define IV_IMMEDIATE 1
+#define IV_DSGL 2
+
+#define AEAD_H_SIZE 16
+
+#define CRYPTO_ALG_SUB_TYPE_MASK 0x0f000000
+#define CRYPTO_ALG_SUB_TYPE_HASH_HMAC 0x01000000
+#define CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106 0x02000000
+#define CRYPTO_ALG_SUB_TYPE_AEAD_GCM 0x03000000
+#define CRYPTO_ALG_SUB_TYPE_CBC_SHA 0x04000000
+#define CRYPTO_ALG_SUB_TYPE_AEAD_CCM 0x05000000
+#define CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309 0x06000000
+#define CRYPTO_ALG_SUB_TYPE_CBC_NULL 0x07000000
+#define CRYPTO_ALG_SUB_TYPE_CTR 0x08000000
+#define CRYPTO_ALG_SUB_TYPE_CTR_RFC3686 0x09000000
+#define CRYPTO_ALG_SUB_TYPE_XTS 0x0a000000
+#define CRYPTO_ALG_SUB_TYPE_CBC 0x0b000000
+#define CRYPTO_ALG_SUB_TYPE_CTR_SHA 0x0c000000
+#define CRYPTO_ALG_SUB_TYPE_CTR_NULL 0x0d000000
+#define CRYPTO_ALG_TYPE_HMAC (CRYPTO_ALG_TYPE_AHASH |\
+ CRYPTO_ALG_SUB_TYPE_HASH_HMAC)
+
+#define MAX_SCRATCH_PAD_SIZE 32
+
+#define CHCR_HASH_MAX_BLOCK_SIZE_64 64
+#define CHCR_HASH_MAX_BLOCK_SIZE_128 128
+#define CHCR_SRC_SG_SIZE (0x10000 - sizeof(int))
+#define CHCR_DST_SG_SIZE 2048
+
+static inline struct chcr_context *a_ctx(struct crypto_aead *tfm)
+{
+ return crypto_aead_ctx(tfm);
+}
+
+static inline struct chcr_context *c_ctx(struct crypto_skcipher *tfm)
+{
+ return crypto_skcipher_ctx(tfm);
+}
+
+static inline struct chcr_context *h_ctx(struct crypto_ahash *tfm)
+{
+ return crypto_tfm_ctx(crypto_ahash_tfm(tfm));
+}
+
+struct ablk_ctx {
+ struct crypto_skcipher *sw_cipher;
+ __be32 key_ctx_hdr;
+ unsigned int enckey_len;
+ unsigned char ciph_mode;
+ u8 key[CHCR_AES_MAX_KEY_LEN];
+ u8 nonce[4];
+ u8 rrkey[AES_MAX_KEY_SIZE];
+};
+struct chcr_aead_reqctx {
+ struct sk_buff *skb;
+ dma_addr_t iv_dma;
+ dma_addr_t b0_dma;
+ unsigned int b0_len;
+ unsigned int op;
+ u16 imm;
+ u16 verify;
+ u16 txqidx;
+ u16 rxqidx;
+ u8 iv[CHCR_MAX_CRYPTO_IV_LEN + MAX_SCRATCH_PAD_SIZE];
+ u8 *scratch_pad;
+};
+
+struct ulptx_walk {
+ struct ulptx_sgl *sgl;
+ unsigned int nents;
+ unsigned int pair_idx;
+ unsigned int last_sg_len;
+ struct scatterlist *last_sg;
+ struct ulptx_sge_pair *pair;
+
+};
+
+struct dsgl_walk {
+ unsigned int nents;
+ unsigned int last_sg_len;
+ struct scatterlist *last_sg;
+ struct cpl_rx_phys_dsgl *dsgl;
+ struct phys_sge_pairs *to;
+};
+
+struct chcr_gcm_ctx {
+ u8 ghash_h[AEAD_H_SIZE];
+};
+
+struct chcr_authenc_ctx {
+ u8 dec_rrkey[AES_MAX_KEY_SIZE];
+ u8 h_iopad[2 * CHCR_HASH_MAX_DIGEST_SIZE];
+ unsigned char auth_mode;
+};
+
+struct __aead_ctx {
+ union {
+ DECLARE_FLEX_ARRAY(struct chcr_gcm_ctx, gcm);
+ DECLARE_FLEX_ARRAY(struct chcr_authenc_ctx, authenc);
+ };
+};
+
+struct chcr_aead_ctx {
+ __be32 key_ctx_hdr;
+ unsigned int enckey_len;
+ struct crypto_aead *sw_cipher;
+ u8 salt[MAX_SALT];
+ u8 key[CHCR_AES_MAX_KEY_LEN];
+ u8 nonce[4];
+ u16 hmac_ctrl;
+ u16 mayverify;
+ struct __aead_ctx ctx[];
+};
+
+struct hmac_ctx {
+ struct crypto_shash *base_hash;
+ u8 ipad[CHCR_HASH_MAX_BLOCK_SIZE_128];
+ u8 opad[CHCR_HASH_MAX_BLOCK_SIZE_128];
+};
+
+struct __crypto_ctx {
+ union {
+ DECLARE_FLEX_ARRAY(struct hmac_ctx, hmacctx);
+ DECLARE_FLEX_ARRAY(struct ablk_ctx, ablkctx);
+ DECLARE_FLEX_ARRAY(struct chcr_aead_ctx, aeadctx);
+ };
+};
+
+struct chcr_context {
+ struct chcr_dev *dev;
+ unsigned char rxq_perchan;
+ unsigned char txq_perchan;
+ unsigned int ntxq;
+ unsigned int nrxq;
+ struct completion cbc_aes_aio_done;
+ struct __crypto_ctx crypto_ctx[];
+};
+
+struct chcr_hctx_per_wr {
+ struct scatterlist *srcsg;
+ struct sk_buff *skb;
+ dma_addr_t dma_addr;
+ u32 dma_len;
+ unsigned int src_ofst;
+ unsigned int processed;
+ u32 result;
+ u8 is_sg_map;
+ u8 imm;
+ /*Final callback called. Driver cannot rely on nbytes to decide
+ * final call
+ */
+ u8 isfinal;
+};
+
+struct chcr_ahash_req_ctx {
+ struct chcr_hctx_per_wr hctx_wr;
+ u8 *reqbfr;
+ u8 *skbfr;
+ /* SKB which is being sent to the hardware for processing */
+ u64 data_len; /* Data len till time */
+ u16 txqidx;
+ u16 rxqidx;
+ u8 reqlen;
+ u8 partial_hash[CHCR_HASH_MAX_DIGEST_SIZE];
+ u8 bfr1[CHCR_HASH_MAX_BLOCK_SIZE_128];
+ u8 bfr2[CHCR_HASH_MAX_BLOCK_SIZE_128];
+};
+
+struct chcr_skcipher_req_ctx {
+ struct sk_buff *skb;
+ struct scatterlist *dstsg;
+ unsigned int processed;
+ unsigned int last_req_len;
+ unsigned int partial_req;
+ struct scatterlist *srcsg;
+ unsigned int src_ofst;
+ unsigned int dst_ofst;
+ unsigned int op;
+ u16 imm;
+ u8 iv[CHCR_MAX_CRYPTO_IV_LEN];
+ u8 init_iv[CHCR_MAX_CRYPTO_IV_LEN];
+ u16 txqidx;
+ u16 rxqidx;
+ struct skcipher_request fallback_req; // keep at the end
+};
+
+struct chcr_alg_template {
+ u32 type;
+ u32 is_registered;
+ union {
+ struct skcipher_alg skcipher;
+ struct ahash_alg hash;
+ struct aead_alg aead;
+ } alg;
+};
+
+typedef struct sk_buff *(*create_wr_t)(struct aead_request *req,
+ unsigned short qid,
+ int size);
+
+void chcr_verify_tag(struct aead_request *req, u8 *input, int *err);
+int chcr_aead_dma_map(struct device *dev, struct aead_request *req,
+ unsigned short op_type);
+void chcr_aead_dma_unmap(struct device *dev, struct aead_request *req,
+ unsigned short op_type);
+void chcr_add_aead_dst_ent(struct aead_request *req,
+ struct cpl_rx_phys_dsgl *phys_cpl,
+ unsigned short qid);
+void chcr_add_aead_src_ent(struct aead_request *req, struct ulptx_sgl *ulptx);
+void chcr_add_cipher_src_ent(struct skcipher_request *req,
+ void *ulptx,
+ struct cipher_wr_param *wrparam);
+int chcr_cipher_dma_map(struct device *dev, struct skcipher_request *req);
+void chcr_cipher_dma_unmap(struct device *dev, struct skcipher_request *req);
+void chcr_add_cipher_dst_ent(struct skcipher_request *req,
+ struct cpl_rx_phys_dsgl *phys_cpl,
+ struct cipher_wr_param *wrparam,
+ unsigned short qid);
+int sg_nents_len_skip(struct scatterlist *sg, u64 len, u64 skip);
+void chcr_add_hash_src_ent(struct ahash_request *req, struct ulptx_sgl *ulptx,
+ struct hash_wr_param *param);
+int chcr_hash_dma_map(struct device *dev, struct ahash_request *req);
+void chcr_hash_dma_unmap(struct device *dev, struct ahash_request *req);
+void chcr_aead_common_exit(struct aead_request *req);
+#endif /* __CHCR_CRYPTO_H__ */