summaryrefslogtreecommitdiffstats
path: root/drivers/crypto/nx
diff options
context:
space:
mode:
Diffstat (limited to '')
-rw-r--r--drivers/crypto/nx/Kconfig50
-rw-r--r--drivers/crypto/nx/Makefile18
-rw-r--r--drivers/crypto/nx/nx-842-pseries.c1134
-rw-r--r--drivers/crypto/nx/nx-842.c521
-rw-r--r--drivers/crypto/nx/nx-842.h189
-rw-r--r--drivers/crypto/nx/nx-aes-cbc.c127
-rw-r--r--drivers/crypto/nx/nx-aes-ccm.c566
-rw-r--r--drivers/crypto/nx/nx-aes-ctr.c145
-rw-r--r--drivers/crypto/nx/nx-aes-ecb.c125
-rw-r--r--drivers/crypto/nx/nx-aes-gcm.c505
-rw-r--r--drivers/crypto/nx/nx-aes-xcbc.c379
-rw-r--r--drivers/crypto/nx/nx-common-powernv.c1136
-rw-r--r--drivers/crypto/nx/nx-sha256.c283
-rw-r--r--drivers/crypto/nx/nx-sha512.c289
-rw-r--r--drivers/crypto/nx/nx.c852
-rw-r--r--drivers/crypto/nx/nx.h195
-rw-r--r--drivers/crypto/nx/nx_csbcpb.h206
-rw-r--r--drivers/crypto/nx/nx_debugfs.c66
18 files changed, 6786 insertions, 0 deletions
diff --git a/drivers/crypto/nx/Kconfig b/drivers/crypto/nx/Kconfig
new file mode 100644
index 000000000..23e3d0160
--- /dev/null
+++ b/drivers/crypto/nx/Kconfig
@@ -0,0 +1,50 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+config CRYPTO_DEV_NX_ENCRYPT
+ tristate "Encryption acceleration support on pSeries platform"
+ depends on PPC_PSERIES && IBMVIO && !CPU_LITTLE_ENDIAN
+ default y
+ select CRYPTO_AES
+ select CRYPTO_CCM
+ help
+ Support for PowerPC Nest (NX) encryption acceleration. This
+ module supports acceleration for AES and SHA2 algorithms on
+ the pSeries platform. If you choose 'M' here, this module
+ will be called nx_crypto.
+
+config CRYPTO_DEV_NX_COMPRESS
+ tristate "Compression acceleration support"
+ default y
+ select CRYPTO_ALGAPI
+ select 842_DECOMPRESS
+ help
+ Support for PowerPC Nest (NX) compression acceleration. This
+ module supports acceleration for compressing memory with the 842
+ algorithm using the cryptographic API. One of the platform
+ drivers must be selected also. If you choose 'M' here, this
+ module will be called nx_compress.
+
+if CRYPTO_DEV_NX_COMPRESS
+
+config CRYPTO_DEV_NX_COMPRESS_PSERIES
+ tristate "Compression acceleration support on pSeries platform"
+ depends on PPC_PSERIES && IBMVIO
+ default y
+ help
+ Support for PowerPC Nest (NX) compression acceleration. This
+ module supports acceleration for compressing memory with the 842
+ algorithm. This supports NX hardware on the pSeries platform.
+ If you choose 'M' here, this module will be called nx_compress_pseries.
+
+config CRYPTO_DEV_NX_COMPRESS_POWERNV
+ tristate "Compression acceleration support on PowerNV platform"
+ depends on PPC_POWERNV
+ depends on PPC_VAS
+ default y
+ help
+ Support for PowerPC Nest (NX) compression acceleration. This
+ module supports acceleration for compressing memory with the 842
+ algorithm. This supports NX hardware on the PowerNV platform.
+ If you choose 'M' here, this module will be called nx_compress_powernv.
+
+endif
diff --git a/drivers/crypto/nx/Makefile b/drivers/crypto/nx/Makefile
new file mode 100644
index 000000000..351822a59
--- /dev/null
+++ b/drivers/crypto/nx/Makefile
@@ -0,0 +1,18 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_CRYPTO_DEV_NX_ENCRYPT) += nx-crypto.o
+nx-crypto-objs := nx.o \
+ nx-aes-cbc.o \
+ nx-aes-ecb.o \
+ nx-aes-gcm.o \
+ nx-aes-ccm.o \
+ nx-aes-ctr.o \
+ nx-aes-xcbc.o \
+ nx-sha256.o \
+ nx-sha512.o
+
+nx-crypto-$(CONFIG_DEBUG_FS) += nx_debugfs.o
+obj-$(CONFIG_CRYPTO_DEV_NX_COMPRESS_PSERIES) += nx-compress-pseries.o nx-compress.o
+obj-$(CONFIG_CRYPTO_DEV_NX_COMPRESS_POWERNV) += nx-compress-powernv.o nx-compress.o
+nx-compress-objs := nx-842.o
+nx-compress-pseries-objs := nx-842-pseries.o
+nx-compress-powernv-objs := nx-common-powernv.o
diff --git a/drivers/crypto/nx/nx-842-pseries.c b/drivers/crypto/nx/nx-842-pseries.c
new file mode 100644
index 000000000..c5ec50a28
--- /dev/null
+++ b/drivers/crypto/nx/nx-842-pseries.c
@@ -0,0 +1,1134 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Driver for IBM Power 842 compression accelerator
+ *
+ * Copyright (C) IBM Corporation, 2012
+ *
+ * Authors: Robert Jennings <rcj@linux.vnet.ibm.com>
+ * Seth Jennings <sjenning@linux.vnet.ibm.com>
+ */
+
+#include <asm/vio.h>
+
+#include "nx-842.h"
+#include "nx_csbcpb.h" /* struct nx_csbcpb */
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Robert Jennings <rcj@linux.vnet.ibm.com>");
+MODULE_DESCRIPTION("842 H/W Compression driver for IBM Power processors");
+MODULE_ALIAS_CRYPTO("842");
+MODULE_ALIAS_CRYPTO("842-nx");
+
+static struct nx842_constraints nx842_pseries_constraints = {
+ .alignment = DDE_BUFFER_ALIGN,
+ .multiple = DDE_BUFFER_LAST_MULT,
+ .minimum = DDE_BUFFER_LAST_MULT,
+ .maximum = PAGE_SIZE, /* dynamic, max_sync_size */
+};
+
+static int check_constraints(unsigned long buf, unsigned int *len, bool in)
+{
+ if (!IS_ALIGNED(buf, nx842_pseries_constraints.alignment)) {
+ pr_debug("%s buffer 0x%lx not aligned to 0x%x\n",
+ in ? "input" : "output", buf,
+ nx842_pseries_constraints.alignment);
+ return -EINVAL;
+ }
+ if (*len % nx842_pseries_constraints.multiple) {
+ pr_debug("%s buffer len 0x%x not multiple of 0x%x\n",
+ in ? "input" : "output", *len,
+ nx842_pseries_constraints.multiple);
+ if (in)
+ return -EINVAL;
+ *len = round_down(*len, nx842_pseries_constraints.multiple);
+ }
+ if (*len < nx842_pseries_constraints.minimum) {
+ pr_debug("%s buffer len 0x%x under minimum 0x%x\n",
+ in ? "input" : "output", *len,
+ nx842_pseries_constraints.minimum);
+ return -EINVAL;
+ }
+ if (*len > nx842_pseries_constraints.maximum) {
+ pr_debug("%s buffer len 0x%x over maximum 0x%x\n",
+ in ? "input" : "output", *len,
+ nx842_pseries_constraints.maximum);
+ if (in)
+ return -EINVAL;
+ *len = nx842_pseries_constraints.maximum;
+ }
+ return 0;
+}
+
+/* I assume we need to align the CSB? */
+#define WORKMEM_ALIGN (256)
+
+struct nx842_workmem {
+ /* scatterlist */
+ char slin[4096];
+ char slout[4096];
+ /* coprocessor status/parameter block */
+ struct nx_csbcpb csbcpb;
+
+ char padding[WORKMEM_ALIGN];
+} __aligned(WORKMEM_ALIGN);
+
+/* Macros for fields within nx_csbcpb */
+/* Check the valid bit within the csbcpb valid field */
+#define NX842_CSBCBP_VALID_CHK(x) (x & BIT_MASK(7))
+
+/* CE macros operate on the completion_extension field bits in the csbcpb.
+ * CE0 0=full completion, 1=partial completion
+ * CE1 0=CE0 indicates completion, 1=termination (output may be modified)
+ * CE2 0=processed_bytes is source bytes, 1=processed_bytes is target bytes */
+#define NX842_CSBCPB_CE0(x) (x & BIT_MASK(7))
+#define NX842_CSBCPB_CE1(x) (x & BIT_MASK(6))
+#define NX842_CSBCPB_CE2(x) (x & BIT_MASK(5))
+
+/* The NX unit accepts data only on 4K page boundaries */
+#define NX842_HW_PAGE_SIZE (4096)
+#define NX842_HW_PAGE_MASK (~(NX842_HW_PAGE_SIZE-1))
+
+struct ibm_nx842_counters {
+ atomic64_t comp_complete;
+ atomic64_t comp_failed;
+ atomic64_t decomp_complete;
+ atomic64_t decomp_failed;
+ atomic64_t swdecomp;
+ atomic64_t comp_times[32];
+ atomic64_t decomp_times[32];
+};
+
+static struct nx842_devdata {
+ struct vio_dev *vdev;
+ struct device *dev;
+ struct ibm_nx842_counters *counters;
+ unsigned int max_sg_len;
+ unsigned int max_sync_size;
+ unsigned int max_sync_sg;
+} __rcu *devdata;
+static DEFINE_SPINLOCK(devdata_mutex);
+
+#define NX842_COUNTER_INC(_x) \
+static inline void nx842_inc_##_x( \
+ const struct nx842_devdata *dev) { \
+ if (dev) \
+ atomic64_inc(&dev->counters->_x); \
+}
+NX842_COUNTER_INC(comp_complete);
+NX842_COUNTER_INC(comp_failed);
+NX842_COUNTER_INC(decomp_complete);
+NX842_COUNTER_INC(decomp_failed);
+NX842_COUNTER_INC(swdecomp);
+
+#define NX842_HIST_SLOTS 16
+
+static void ibm_nx842_incr_hist(atomic64_t *times, unsigned int time)
+{
+ int bucket = fls(time);
+
+ if (bucket)
+ bucket = min((NX842_HIST_SLOTS - 1), bucket - 1);
+
+ atomic64_inc(&times[bucket]);
+}
+
+/* NX unit operation flags */
+#define NX842_OP_COMPRESS 0x0
+#define NX842_OP_CRC 0x1
+#define NX842_OP_DECOMPRESS 0x2
+#define NX842_OP_COMPRESS_CRC (NX842_OP_COMPRESS | NX842_OP_CRC)
+#define NX842_OP_DECOMPRESS_CRC (NX842_OP_DECOMPRESS | NX842_OP_CRC)
+#define NX842_OP_ASYNC (1<<23)
+#define NX842_OP_NOTIFY (1<<22)
+#define NX842_OP_NOTIFY_INT(x) ((x & 0xff)<<8)
+
+static unsigned long nx842_get_desired_dma(struct vio_dev *viodev)
+{
+ /* No use of DMA mappings within the driver. */
+ return 0;
+}
+
+struct nx842_slentry {
+ __be64 ptr; /* Real address (use __pa()) */
+ __be64 len;
+};
+
+/* pHyp scatterlist entry */
+struct nx842_scatterlist {
+ int entry_nr; /* number of slentries */
+ struct nx842_slentry *entries; /* ptr to array of slentries */
+};
+
+/* Does not include sizeof(entry_nr) in the size */
+static inline unsigned long nx842_get_scatterlist_size(
+ struct nx842_scatterlist *sl)
+{
+ return sl->entry_nr * sizeof(struct nx842_slentry);
+}
+
+static int nx842_build_scatterlist(unsigned long buf, int len,
+ struct nx842_scatterlist *sl)
+{
+ unsigned long entrylen;
+ struct nx842_slentry *entry;
+
+ sl->entry_nr = 0;
+
+ entry = sl->entries;
+ while (len) {
+ entry->ptr = cpu_to_be64(nx842_get_pa((void *)buf));
+ entrylen = min_t(int, len,
+ LEN_ON_SIZE(buf, NX842_HW_PAGE_SIZE));
+ entry->len = cpu_to_be64(entrylen);
+
+ len -= entrylen;
+ buf += entrylen;
+
+ sl->entry_nr++;
+ entry++;
+ }
+
+ return 0;
+}
+
+static int nx842_validate_result(struct device *dev,
+ struct cop_status_block *csb)
+{
+ /* The csb must be valid after returning from vio_h_cop_sync */
+ if (!NX842_CSBCBP_VALID_CHK(csb->valid)) {
+ dev_err(dev, "%s: cspcbp not valid upon completion.\n",
+ __func__);
+ dev_dbg(dev, "valid:0x%02x cs:0x%02x cc:0x%02x ce:0x%02x\n",
+ csb->valid,
+ csb->crb_seq_number,
+ csb->completion_code,
+ csb->completion_extension);
+ dev_dbg(dev, "processed_bytes:%d address:0x%016lx\n",
+ be32_to_cpu(csb->processed_byte_count),
+ (unsigned long)be64_to_cpu(csb->address));
+ return -EIO;
+ }
+
+ /* Check return values from the hardware in the CSB */
+ switch (csb->completion_code) {
+ case 0: /* Completed without error */
+ break;
+ case 64: /* Compression ok, but output larger than input */
+ dev_dbg(dev, "%s: output size larger than input size\n",
+ __func__);
+ break;
+ case 13: /* Output buffer too small */
+ dev_dbg(dev, "%s: Out of space in output buffer\n",
+ __func__);
+ return -ENOSPC;
+ case 65: /* Calculated CRC doesn't match the passed value */
+ dev_dbg(dev, "%s: CRC mismatch for decompression\n",
+ __func__);
+ return -EINVAL;
+ case 66: /* Input data contains an illegal template field */
+ case 67: /* Template indicates data past the end of the input stream */
+ dev_dbg(dev, "%s: Bad data for decompression (code:%d)\n",
+ __func__, csb->completion_code);
+ return -EINVAL;
+ default:
+ dev_dbg(dev, "%s: Unspecified error (code:%d)\n",
+ __func__, csb->completion_code);
+ return -EIO;
+ }
+
+ /* Hardware sanity check */
+ if (!NX842_CSBCPB_CE2(csb->completion_extension)) {
+ dev_err(dev, "%s: No error returned by hardware, but "
+ "data returned is unusable, contact support.\n"
+ "(Additional info: csbcbp->processed bytes "
+ "does not specify processed bytes for the "
+ "target buffer.)\n", __func__);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+/**
+ * nx842_pseries_compress - Compress data using the 842 algorithm
+ *
+ * Compression provide by the NX842 coprocessor on IBM Power systems.
+ * The input buffer is compressed and the result is stored in the
+ * provided output buffer.
+ *
+ * Upon return from this function @outlen contains the length of the
+ * compressed data. If there is an error then @outlen will be 0 and an
+ * error will be specified by the return code from this function.
+ *
+ * @in: Pointer to input buffer
+ * @inlen: Length of input buffer
+ * @out: Pointer to output buffer
+ * @outlen: Length of output buffer
+ * @wrkmem: ptr to buffer for working memory, size determined by
+ * nx842_pseries_driver.workmem_size
+ *
+ * Returns:
+ * 0 Success, output of length @outlen stored in the buffer at @out
+ * -ENOMEM Unable to allocate internal buffers
+ * -ENOSPC Output buffer is to small
+ * -EIO Internal error
+ * -ENODEV Hardware unavailable
+ */
+static int nx842_pseries_compress(const unsigned char *in, unsigned int inlen,
+ unsigned char *out, unsigned int *outlen,
+ void *wmem)
+{
+ struct nx842_devdata *local_devdata;
+ struct device *dev = NULL;
+ struct nx842_workmem *workmem;
+ struct nx842_scatterlist slin, slout;
+ struct nx_csbcpb *csbcpb;
+ int ret = 0;
+ unsigned long inbuf, outbuf;
+ struct vio_pfo_op op = {
+ .done = NULL,
+ .handle = 0,
+ .timeout = 0,
+ };
+ unsigned long start = get_tb();
+
+ inbuf = (unsigned long)in;
+ if (check_constraints(inbuf, &inlen, true))
+ return -EINVAL;
+
+ outbuf = (unsigned long)out;
+ if (check_constraints(outbuf, outlen, false))
+ return -EINVAL;
+
+ rcu_read_lock();
+ local_devdata = rcu_dereference(devdata);
+ if (!local_devdata || !local_devdata->dev) {
+ rcu_read_unlock();
+ return -ENODEV;
+ }
+ dev = local_devdata->dev;
+
+ /* Init scatterlist */
+ workmem = PTR_ALIGN(wmem, WORKMEM_ALIGN);
+ slin.entries = (struct nx842_slentry *)workmem->slin;
+ slout.entries = (struct nx842_slentry *)workmem->slout;
+
+ /* Init operation */
+ op.flags = NX842_OP_COMPRESS_CRC;
+ csbcpb = &workmem->csbcpb;
+ memset(csbcpb, 0, sizeof(*csbcpb));
+ op.csbcpb = nx842_get_pa(csbcpb);
+
+ if ((inbuf & NX842_HW_PAGE_MASK) ==
+ ((inbuf + inlen - 1) & NX842_HW_PAGE_MASK)) {
+ /* Create direct DDE */
+ op.in = nx842_get_pa((void *)inbuf);
+ op.inlen = inlen;
+ } else {
+ /* Create indirect DDE (scatterlist) */
+ nx842_build_scatterlist(inbuf, inlen, &slin);
+ op.in = nx842_get_pa(slin.entries);
+ op.inlen = -nx842_get_scatterlist_size(&slin);
+ }
+
+ if ((outbuf & NX842_HW_PAGE_MASK) ==
+ ((outbuf + *outlen - 1) & NX842_HW_PAGE_MASK)) {
+ /* Create direct DDE */
+ op.out = nx842_get_pa((void *)outbuf);
+ op.outlen = *outlen;
+ } else {
+ /* Create indirect DDE (scatterlist) */
+ nx842_build_scatterlist(outbuf, *outlen, &slout);
+ op.out = nx842_get_pa(slout.entries);
+ op.outlen = -nx842_get_scatterlist_size(&slout);
+ }
+
+ dev_dbg(dev, "%s: op.in %lx op.inlen %ld op.out %lx op.outlen %ld\n",
+ __func__, (unsigned long)op.in, (long)op.inlen,
+ (unsigned long)op.out, (long)op.outlen);
+
+ /* Send request to pHyp */
+ ret = vio_h_cop_sync(local_devdata->vdev, &op);
+
+ /* Check for pHyp error */
+ if (ret) {
+ dev_dbg(dev, "%s: vio_h_cop_sync error (ret=%d, hret=%ld)\n",
+ __func__, ret, op.hcall_err);
+ ret = -EIO;
+ goto unlock;
+ }
+
+ /* Check for hardware error */
+ ret = nx842_validate_result(dev, &csbcpb->csb);
+ if (ret)
+ goto unlock;
+
+ *outlen = be32_to_cpu(csbcpb->csb.processed_byte_count);
+ dev_dbg(dev, "%s: processed_bytes=%d\n", __func__, *outlen);
+
+unlock:
+ if (ret)
+ nx842_inc_comp_failed(local_devdata);
+ else {
+ nx842_inc_comp_complete(local_devdata);
+ ibm_nx842_incr_hist(local_devdata->counters->comp_times,
+ (get_tb() - start) / tb_ticks_per_usec);
+ }
+ rcu_read_unlock();
+ return ret;
+}
+
+/**
+ * nx842_pseries_decompress - Decompress data using the 842 algorithm
+ *
+ * Decompression provide by the NX842 coprocessor on IBM Power systems.
+ * The input buffer is decompressed and the result is stored in the
+ * provided output buffer. The size allocated to the output buffer is
+ * provided by the caller of this function in @outlen. Upon return from
+ * this function @outlen contains the length of the decompressed data.
+ * If there is an error then @outlen will be 0 and an error will be
+ * specified by the return code from this function.
+ *
+ * @in: Pointer to input buffer
+ * @inlen: Length of input buffer
+ * @out: Pointer to output buffer
+ * @outlen: Length of output buffer
+ * @wrkmem: ptr to buffer for working memory, size determined by
+ * nx842_pseries_driver.workmem_size
+ *
+ * Returns:
+ * 0 Success, output of length @outlen stored in the buffer at @out
+ * -ENODEV Hardware decompression device is unavailable
+ * -ENOMEM Unable to allocate internal buffers
+ * -ENOSPC Output buffer is to small
+ * -EINVAL Bad input data encountered when attempting decompress
+ * -EIO Internal error
+ */
+static int nx842_pseries_decompress(const unsigned char *in, unsigned int inlen,
+ unsigned char *out, unsigned int *outlen,
+ void *wmem)
+{
+ struct nx842_devdata *local_devdata;
+ struct device *dev = NULL;
+ struct nx842_workmem *workmem;
+ struct nx842_scatterlist slin, slout;
+ struct nx_csbcpb *csbcpb;
+ int ret = 0;
+ unsigned long inbuf, outbuf;
+ struct vio_pfo_op op = {
+ .done = NULL,
+ .handle = 0,
+ .timeout = 0,
+ };
+ unsigned long start = get_tb();
+
+ /* Ensure page alignment and size */
+ inbuf = (unsigned long)in;
+ if (check_constraints(inbuf, &inlen, true))
+ return -EINVAL;
+
+ outbuf = (unsigned long)out;
+ if (check_constraints(outbuf, outlen, false))
+ return -EINVAL;
+
+ rcu_read_lock();
+ local_devdata = rcu_dereference(devdata);
+ if (!local_devdata || !local_devdata->dev) {
+ rcu_read_unlock();
+ return -ENODEV;
+ }
+ dev = local_devdata->dev;
+
+ workmem = PTR_ALIGN(wmem, WORKMEM_ALIGN);
+
+ /* Init scatterlist */
+ slin.entries = (struct nx842_slentry *)workmem->slin;
+ slout.entries = (struct nx842_slentry *)workmem->slout;
+
+ /* Init operation */
+ op.flags = NX842_OP_DECOMPRESS_CRC;
+ csbcpb = &workmem->csbcpb;
+ memset(csbcpb, 0, sizeof(*csbcpb));
+ op.csbcpb = nx842_get_pa(csbcpb);
+
+ if ((inbuf & NX842_HW_PAGE_MASK) ==
+ ((inbuf + inlen - 1) & NX842_HW_PAGE_MASK)) {
+ /* Create direct DDE */
+ op.in = nx842_get_pa((void *)inbuf);
+ op.inlen = inlen;
+ } else {
+ /* Create indirect DDE (scatterlist) */
+ nx842_build_scatterlist(inbuf, inlen, &slin);
+ op.in = nx842_get_pa(slin.entries);
+ op.inlen = -nx842_get_scatterlist_size(&slin);
+ }
+
+ if ((outbuf & NX842_HW_PAGE_MASK) ==
+ ((outbuf + *outlen - 1) & NX842_HW_PAGE_MASK)) {
+ /* Create direct DDE */
+ op.out = nx842_get_pa((void *)outbuf);
+ op.outlen = *outlen;
+ } else {
+ /* Create indirect DDE (scatterlist) */
+ nx842_build_scatterlist(outbuf, *outlen, &slout);
+ op.out = nx842_get_pa(slout.entries);
+ op.outlen = -nx842_get_scatterlist_size(&slout);
+ }
+
+ dev_dbg(dev, "%s: op.in %lx op.inlen %ld op.out %lx op.outlen %ld\n",
+ __func__, (unsigned long)op.in, (long)op.inlen,
+ (unsigned long)op.out, (long)op.outlen);
+
+ /* Send request to pHyp */
+ ret = vio_h_cop_sync(local_devdata->vdev, &op);
+
+ /* Check for pHyp error */
+ if (ret) {
+ dev_dbg(dev, "%s: vio_h_cop_sync error (ret=%d, hret=%ld)\n",
+ __func__, ret, op.hcall_err);
+ goto unlock;
+ }
+
+ /* Check for hardware error */
+ ret = nx842_validate_result(dev, &csbcpb->csb);
+ if (ret)
+ goto unlock;
+
+ *outlen = be32_to_cpu(csbcpb->csb.processed_byte_count);
+
+unlock:
+ if (ret)
+ /* decompress fail */
+ nx842_inc_decomp_failed(local_devdata);
+ else {
+ nx842_inc_decomp_complete(local_devdata);
+ ibm_nx842_incr_hist(local_devdata->counters->decomp_times,
+ (get_tb() - start) / tb_ticks_per_usec);
+ }
+
+ rcu_read_unlock();
+ return ret;
+}
+
+/**
+ * nx842_OF_set_defaults -- Set default (disabled) values for devdata
+ *
+ * @devdata - struct nx842_devdata to update
+ *
+ * Returns:
+ * 0 on success
+ * -ENOENT if @devdata ptr is NULL
+ */
+static int nx842_OF_set_defaults(struct nx842_devdata *devdata)
+{
+ if (devdata) {
+ devdata->max_sync_size = 0;
+ devdata->max_sync_sg = 0;
+ devdata->max_sg_len = 0;
+ return 0;
+ } else
+ return -ENOENT;
+}
+
+/**
+ * nx842_OF_upd_status -- Check the device info from OF status prop
+ *
+ * The status property indicates if the accelerator is enabled. If the
+ * device is in the OF tree it indicates that the hardware is present.
+ * The status field indicates if the device is enabled when the status
+ * is 'okay'. Otherwise the device driver will be disabled.
+ *
+ * @devdata: struct nx842_devdata to use for dev_info
+ * @prop: struct property point containing the maxsyncop for the update
+ *
+ * Returns:
+ * 0 - Device is available
+ * -ENODEV - Device is not available
+ */
+static int nx842_OF_upd_status(struct nx842_devdata *devdata,
+ struct property *prop)
+{
+ const char *status = (const char *)prop->value;
+
+ if (!strncmp(status, "okay", (size_t)prop->length))
+ return 0;
+ if (!strncmp(status, "disabled", (size_t)prop->length))
+ return -ENODEV;
+ dev_info(devdata->dev, "%s: unknown status '%s'\n", __func__, status);
+
+ return -EINVAL;
+}
+
+/**
+ * nx842_OF_upd_maxsglen -- Update the device info from OF maxsglen prop
+ *
+ * Definition of the 'ibm,max-sg-len' OF property:
+ * This field indicates the maximum byte length of a scatter list
+ * for the platform facility. It is a single cell encoded as with encode-int.
+ *
+ * Example:
+ * # od -x ibm,max-sg-len
+ * 0000000 0000 0ff0
+ *
+ * In this example, the maximum byte length of a scatter list is
+ * 0x0ff0 (4,080).
+ *
+ * @devdata - struct nx842_devdata to update
+ * @prop - struct property point containing the maxsyncop for the update
+ *
+ * Returns:
+ * 0 on success
+ * -EINVAL on failure
+ */
+static int nx842_OF_upd_maxsglen(struct nx842_devdata *devdata,
+ struct property *prop) {
+ int ret = 0;
+ const unsigned int maxsglen = of_read_number(prop->value, 1);
+
+ if (prop->length != sizeof(maxsglen)) {
+ dev_err(devdata->dev, "%s: unexpected format for ibm,max-sg-len property\n", __func__);
+ dev_dbg(devdata->dev, "%s: ibm,max-sg-len is %d bytes long, expected %lu bytes\n", __func__,
+ prop->length, sizeof(maxsglen));
+ ret = -EINVAL;
+ } else {
+ devdata->max_sg_len = min_t(unsigned int,
+ maxsglen, NX842_HW_PAGE_SIZE);
+ }
+
+ return ret;
+}
+
+/**
+ * nx842_OF_upd_maxsyncop -- Update the device info from OF maxsyncop prop
+ *
+ * Definition of the 'ibm,max-sync-cop' OF property:
+ * Two series of cells. The first series of cells represents the maximums
+ * that can be synchronously compressed. The second series of cells
+ * represents the maximums that can be synchronously decompressed.
+ * 1. The first cell in each series contains the count of the number of
+ * data length, scatter list elements pairs that follow – each being
+ * of the form
+ * a. One cell data byte length
+ * b. One cell total number of scatter list elements
+ *
+ * Example:
+ * # od -x ibm,max-sync-cop
+ * 0000000 0000 0001 0000 1000 0000 01fe 0000 0001
+ * 0000020 0000 1000 0000 01fe
+ *
+ * In this example, compression supports 0x1000 (4,096) data byte length
+ * and 0x1fe (510) total scatter list elements. Decompression supports
+ * 0x1000 (4,096) data byte length and 0x1f3 (510) total scatter list
+ * elements.
+ *
+ * @devdata - struct nx842_devdata to update
+ * @prop - struct property point containing the maxsyncop for the update
+ *
+ * Returns:
+ * 0 on success
+ * -EINVAL on failure
+ */
+static int nx842_OF_upd_maxsyncop(struct nx842_devdata *devdata,
+ struct property *prop) {
+ int ret = 0;
+ unsigned int comp_data_limit, decomp_data_limit;
+ unsigned int comp_sg_limit, decomp_sg_limit;
+ const struct maxsynccop_t {
+ __be32 comp_elements;
+ __be32 comp_data_limit;
+ __be32 comp_sg_limit;
+ __be32 decomp_elements;
+ __be32 decomp_data_limit;
+ __be32 decomp_sg_limit;
+ } *maxsynccop;
+
+ if (prop->length != sizeof(*maxsynccop)) {
+ dev_err(devdata->dev, "%s: unexpected format for ibm,max-sync-cop property\n", __func__);
+ dev_dbg(devdata->dev, "%s: ibm,max-sync-cop is %d bytes long, expected %lu bytes\n", __func__, prop->length,
+ sizeof(*maxsynccop));
+ ret = -EINVAL;
+ goto out;
+ }
+
+ maxsynccop = (const struct maxsynccop_t *)prop->value;
+ comp_data_limit = be32_to_cpu(maxsynccop->comp_data_limit);
+ comp_sg_limit = be32_to_cpu(maxsynccop->comp_sg_limit);
+ decomp_data_limit = be32_to_cpu(maxsynccop->decomp_data_limit);
+ decomp_sg_limit = be32_to_cpu(maxsynccop->decomp_sg_limit);
+
+ /* Use one limit rather than separate limits for compression and
+ * decompression. Set a maximum for this so as not to exceed the
+ * size that the header can support and round the value down to
+ * the hardware page size (4K) */
+ devdata->max_sync_size = min(comp_data_limit, decomp_data_limit);
+
+ devdata->max_sync_size = min_t(unsigned int, devdata->max_sync_size,
+ 65536);
+
+ if (devdata->max_sync_size < 4096) {
+ dev_err(devdata->dev, "%s: hardware max data size (%u) is "
+ "less than the driver minimum, unable to use "
+ "the hardware device\n",
+ __func__, devdata->max_sync_size);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ nx842_pseries_constraints.maximum = devdata->max_sync_size;
+
+ devdata->max_sync_sg = min(comp_sg_limit, decomp_sg_limit);
+ if (devdata->max_sync_sg < 1) {
+ dev_err(devdata->dev, "%s: hardware max sg size (%u) is "
+ "less than the driver minimum, unable to use "
+ "the hardware device\n",
+ __func__, devdata->max_sync_sg);
+ ret = -EINVAL;
+ goto out;
+ }
+
+out:
+ return ret;
+}
+
+/**
+ *
+ * nx842_OF_upd -- Handle OF properties updates for the device.
+ *
+ * Set all properties from the OF tree. Optionally, a new property
+ * can be provided by the @new_prop pointer to overwrite an existing value.
+ * The device will remain disabled until all values are valid, this function
+ * will return an error for updates unless all values are valid.
+ *
+ * @new_prop: If not NULL, this property is being updated. If NULL, update
+ * all properties from the current values in the OF tree.
+ *
+ * Returns:
+ * 0 - Success
+ * -ENOMEM - Could not allocate memory for new devdata structure
+ * -EINVAL - property value not found, new_prop is not a recognized
+ * property for the device or property value is not valid.
+ * -ENODEV - Device is not available
+ */
+static int nx842_OF_upd(struct property *new_prop)
+{
+ struct nx842_devdata *old_devdata = NULL;
+ struct nx842_devdata *new_devdata = NULL;
+ struct device_node *of_node = NULL;
+ struct property *status = NULL;
+ struct property *maxsglen = NULL;
+ struct property *maxsyncop = NULL;
+ int ret = 0;
+ unsigned long flags;
+
+ new_devdata = kzalloc(sizeof(*new_devdata), GFP_NOFS);
+ if (!new_devdata)
+ return -ENOMEM;
+
+ spin_lock_irqsave(&devdata_mutex, flags);
+ old_devdata = rcu_dereference_check(devdata,
+ lockdep_is_held(&devdata_mutex));
+ if (old_devdata)
+ of_node = old_devdata->dev->of_node;
+
+ if (!old_devdata || !of_node) {
+ pr_err("%s: device is not available\n", __func__);
+ spin_unlock_irqrestore(&devdata_mutex, flags);
+ kfree(new_devdata);
+ return -ENODEV;
+ }
+
+ memcpy(new_devdata, old_devdata, sizeof(*old_devdata));
+ new_devdata->counters = old_devdata->counters;
+
+ /* Set ptrs for existing properties */
+ status = of_find_property(of_node, "status", NULL);
+ maxsglen = of_find_property(of_node, "ibm,max-sg-len", NULL);
+ maxsyncop = of_find_property(of_node, "ibm,max-sync-cop", NULL);
+ if (!status || !maxsglen || !maxsyncop) {
+ dev_err(old_devdata->dev, "%s: Could not locate device properties\n", __func__);
+ ret = -EINVAL;
+ goto error_out;
+ }
+
+ /*
+ * If this is a property update, there are only certain properties that
+ * we care about. Bail if it isn't in the below list
+ */
+ if (new_prop && (strncmp(new_prop->name, "status", new_prop->length) ||
+ strncmp(new_prop->name, "ibm,max-sg-len", new_prop->length) ||
+ strncmp(new_prop->name, "ibm,max-sync-cop", new_prop->length)))
+ goto out;
+
+ /* Perform property updates */
+ ret = nx842_OF_upd_status(new_devdata, status);
+ if (ret)
+ goto error_out;
+
+ ret = nx842_OF_upd_maxsglen(new_devdata, maxsglen);
+ if (ret)
+ goto error_out;
+
+ ret = nx842_OF_upd_maxsyncop(new_devdata, maxsyncop);
+ if (ret)
+ goto error_out;
+
+out:
+ dev_info(old_devdata->dev, "%s: max_sync_size new:%u old:%u\n",
+ __func__, new_devdata->max_sync_size,
+ old_devdata->max_sync_size);
+ dev_info(old_devdata->dev, "%s: max_sync_sg new:%u old:%u\n",
+ __func__, new_devdata->max_sync_sg,
+ old_devdata->max_sync_sg);
+ dev_info(old_devdata->dev, "%s: max_sg_len new:%u old:%u\n",
+ __func__, new_devdata->max_sg_len,
+ old_devdata->max_sg_len);
+
+ rcu_assign_pointer(devdata, new_devdata);
+ spin_unlock_irqrestore(&devdata_mutex, flags);
+ synchronize_rcu();
+ dev_set_drvdata(new_devdata->dev, new_devdata);
+ kfree(old_devdata);
+ return 0;
+
+error_out:
+ if (new_devdata) {
+ dev_info(old_devdata->dev, "%s: device disabled\n", __func__);
+ nx842_OF_set_defaults(new_devdata);
+ rcu_assign_pointer(devdata, new_devdata);
+ spin_unlock_irqrestore(&devdata_mutex, flags);
+ synchronize_rcu();
+ dev_set_drvdata(new_devdata->dev, new_devdata);
+ kfree(old_devdata);
+ } else {
+ dev_err(old_devdata->dev, "%s: could not update driver from hardware\n", __func__);
+ spin_unlock_irqrestore(&devdata_mutex, flags);
+ }
+
+ if (!ret)
+ ret = -EINVAL;
+ return ret;
+}
+
+/**
+ * nx842_OF_notifier - Process updates to OF properties for the device
+ *
+ * @np: notifier block
+ * @action: notifier action
+ * @update: struct pSeries_reconfig_prop_update pointer if action is
+ * PSERIES_UPDATE_PROPERTY
+ *
+ * Returns:
+ * NOTIFY_OK on success
+ * NOTIFY_BAD encoded with error number on failure, use
+ * notifier_to_errno() to decode this value
+ */
+static int nx842_OF_notifier(struct notifier_block *np, unsigned long action,
+ void *data)
+{
+ struct of_reconfig_data *upd = data;
+ struct nx842_devdata *local_devdata;
+ struct device_node *node = NULL;
+
+ rcu_read_lock();
+ local_devdata = rcu_dereference(devdata);
+ if (local_devdata)
+ node = local_devdata->dev->of_node;
+
+ if (local_devdata &&
+ action == OF_RECONFIG_UPDATE_PROPERTY &&
+ !strcmp(upd->dn->name, node->name)) {
+ rcu_read_unlock();
+ nx842_OF_upd(upd->prop);
+ } else
+ rcu_read_unlock();
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block nx842_of_nb = {
+ .notifier_call = nx842_OF_notifier,
+};
+
+#define nx842_counter_read(_name) \
+static ssize_t nx842_##_name##_show(struct device *dev, \
+ struct device_attribute *attr, \
+ char *buf) { \
+ struct nx842_devdata *local_devdata; \
+ int p = 0; \
+ rcu_read_lock(); \
+ local_devdata = rcu_dereference(devdata); \
+ if (local_devdata) \
+ p = snprintf(buf, PAGE_SIZE, "%lld\n", \
+ atomic64_read(&local_devdata->counters->_name)); \
+ rcu_read_unlock(); \
+ return p; \
+}
+
+#define NX842DEV_COUNTER_ATTR_RO(_name) \
+ nx842_counter_read(_name); \
+ static struct device_attribute dev_attr_##_name = __ATTR(_name, \
+ 0444, \
+ nx842_##_name##_show,\
+ NULL);
+
+NX842DEV_COUNTER_ATTR_RO(comp_complete);
+NX842DEV_COUNTER_ATTR_RO(comp_failed);
+NX842DEV_COUNTER_ATTR_RO(decomp_complete);
+NX842DEV_COUNTER_ATTR_RO(decomp_failed);
+NX842DEV_COUNTER_ATTR_RO(swdecomp);
+
+static ssize_t nx842_timehist_show(struct device *,
+ struct device_attribute *, char *);
+
+static struct device_attribute dev_attr_comp_times = __ATTR(comp_times, 0444,
+ nx842_timehist_show, NULL);
+static struct device_attribute dev_attr_decomp_times = __ATTR(decomp_times,
+ 0444, nx842_timehist_show, NULL);
+
+static ssize_t nx842_timehist_show(struct device *dev,
+ struct device_attribute *attr, char *buf) {
+ char *p = buf;
+ struct nx842_devdata *local_devdata;
+ atomic64_t *times;
+ int bytes_remain = PAGE_SIZE;
+ int bytes;
+ int i;
+
+ rcu_read_lock();
+ local_devdata = rcu_dereference(devdata);
+ if (!local_devdata) {
+ rcu_read_unlock();
+ return 0;
+ }
+
+ if (attr == &dev_attr_comp_times)
+ times = local_devdata->counters->comp_times;
+ else if (attr == &dev_attr_decomp_times)
+ times = local_devdata->counters->decomp_times;
+ else {
+ rcu_read_unlock();
+ return 0;
+ }
+
+ for (i = 0; i < (NX842_HIST_SLOTS - 2); i++) {
+ bytes = snprintf(p, bytes_remain, "%u-%uus:\t%lld\n",
+ i ? (2<<(i-1)) : 0, (2<<i)-1,
+ atomic64_read(&times[i]));
+ bytes_remain -= bytes;
+ p += bytes;
+ }
+ /* The last bucket holds everything over
+ * 2<<(NX842_HIST_SLOTS - 2) us */
+ bytes = snprintf(p, bytes_remain, "%uus - :\t%lld\n",
+ 2<<(NX842_HIST_SLOTS - 2),
+ atomic64_read(&times[(NX842_HIST_SLOTS - 1)]));
+ p += bytes;
+
+ rcu_read_unlock();
+ return p - buf;
+}
+
+static struct attribute *nx842_sysfs_entries[] = {
+ &dev_attr_comp_complete.attr,
+ &dev_attr_comp_failed.attr,
+ &dev_attr_decomp_complete.attr,
+ &dev_attr_decomp_failed.attr,
+ &dev_attr_swdecomp.attr,
+ &dev_attr_comp_times.attr,
+ &dev_attr_decomp_times.attr,
+ NULL,
+};
+
+static struct attribute_group nx842_attribute_group = {
+ .name = NULL, /* put in device directory */
+ .attrs = nx842_sysfs_entries,
+};
+
+static struct nx842_driver nx842_pseries_driver = {
+ .name = KBUILD_MODNAME,
+ .owner = THIS_MODULE,
+ .workmem_size = sizeof(struct nx842_workmem),
+ .constraints = &nx842_pseries_constraints,
+ .compress = nx842_pseries_compress,
+ .decompress = nx842_pseries_decompress,
+};
+
+static int nx842_pseries_crypto_init(struct crypto_tfm *tfm)
+{
+ return nx842_crypto_init(tfm, &nx842_pseries_driver);
+}
+
+static struct crypto_alg nx842_pseries_alg = {
+ .cra_name = "842",
+ .cra_driver_name = "842-nx",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_TYPE_COMPRESS,
+ .cra_ctxsize = sizeof(struct nx842_crypto_ctx),
+ .cra_module = THIS_MODULE,
+ .cra_init = nx842_pseries_crypto_init,
+ .cra_exit = nx842_crypto_exit,
+ .cra_u = { .compress = {
+ .coa_compress = nx842_crypto_compress,
+ .coa_decompress = nx842_crypto_decompress } }
+};
+
+static int nx842_probe(struct vio_dev *viodev,
+ const struct vio_device_id *id)
+{
+ struct nx842_devdata *old_devdata, *new_devdata = NULL;
+ unsigned long flags;
+ int ret = 0;
+
+ new_devdata = kzalloc(sizeof(*new_devdata), GFP_NOFS);
+ if (!new_devdata)
+ return -ENOMEM;
+
+ new_devdata->counters = kzalloc(sizeof(*new_devdata->counters),
+ GFP_NOFS);
+ if (!new_devdata->counters) {
+ kfree(new_devdata);
+ return -ENOMEM;
+ }
+
+ spin_lock_irqsave(&devdata_mutex, flags);
+ old_devdata = rcu_dereference_check(devdata,
+ lockdep_is_held(&devdata_mutex));
+
+ if (old_devdata && old_devdata->vdev != NULL) {
+ dev_err(&viodev->dev, "%s: Attempt to register more than one instance of the hardware\n", __func__);
+ ret = -1;
+ goto error_unlock;
+ }
+
+ dev_set_drvdata(&viodev->dev, NULL);
+
+ new_devdata->vdev = viodev;
+ new_devdata->dev = &viodev->dev;
+ nx842_OF_set_defaults(new_devdata);
+
+ rcu_assign_pointer(devdata, new_devdata);
+ spin_unlock_irqrestore(&devdata_mutex, flags);
+ synchronize_rcu();
+ kfree(old_devdata);
+
+ of_reconfig_notifier_register(&nx842_of_nb);
+
+ ret = nx842_OF_upd(NULL);
+ if (ret)
+ goto error;
+
+ ret = crypto_register_alg(&nx842_pseries_alg);
+ if (ret) {
+ dev_err(&viodev->dev, "could not register comp alg: %d\n", ret);
+ goto error;
+ }
+
+ rcu_read_lock();
+ dev_set_drvdata(&viodev->dev, rcu_dereference(devdata));
+ rcu_read_unlock();
+
+ if (sysfs_create_group(&viodev->dev.kobj, &nx842_attribute_group)) {
+ dev_err(&viodev->dev, "could not create sysfs device attributes\n");
+ ret = -1;
+ goto error;
+ }
+
+ return 0;
+
+error_unlock:
+ spin_unlock_irqrestore(&devdata_mutex, flags);
+ if (new_devdata)
+ kfree(new_devdata->counters);
+ kfree(new_devdata);
+error:
+ return ret;
+}
+
+static int nx842_remove(struct vio_dev *viodev)
+{
+ struct nx842_devdata *old_devdata;
+ unsigned long flags;
+
+ pr_info("Removing IBM Power 842 compression device\n");
+ sysfs_remove_group(&viodev->dev.kobj, &nx842_attribute_group);
+
+ crypto_unregister_alg(&nx842_pseries_alg);
+
+ spin_lock_irqsave(&devdata_mutex, flags);
+ old_devdata = rcu_dereference_check(devdata,
+ lockdep_is_held(&devdata_mutex));
+ of_reconfig_notifier_unregister(&nx842_of_nb);
+ RCU_INIT_POINTER(devdata, NULL);
+ spin_unlock_irqrestore(&devdata_mutex, flags);
+ synchronize_rcu();
+ dev_set_drvdata(&viodev->dev, NULL);
+ if (old_devdata)
+ kfree(old_devdata->counters);
+ kfree(old_devdata);
+
+ return 0;
+}
+
+static const struct vio_device_id nx842_vio_driver_ids[] = {
+ {"ibm,compression-v1", "ibm,compression"},
+ {"", ""},
+};
+MODULE_DEVICE_TABLE(vio, nx842_vio_driver_ids);
+
+static struct vio_driver nx842_vio_driver = {
+ .name = KBUILD_MODNAME,
+ .probe = nx842_probe,
+ .remove = nx842_remove,
+ .get_desired_dma = nx842_get_desired_dma,
+ .id_table = nx842_vio_driver_ids,
+};
+
+static int __init nx842_pseries_init(void)
+{
+ struct nx842_devdata *new_devdata;
+ int ret;
+
+ if (!of_find_compatible_node(NULL, NULL, "ibm,compression"))
+ return -ENODEV;
+
+ RCU_INIT_POINTER(devdata, NULL);
+ new_devdata = kzalloc(sizeof(*new_devdata), GFP_KERNEL);
+ if (!new_devdata)
+ return -ENOMEM;
+
+ RCU_INIT_POINTER(devdata, new_devdata);
+
+ ret = vio_register_driver(&nx842_vio_driver);
+ if (ret) {
+ pr_err("Could not register VIO driver %d\n", ret);
+
+ kfree(new_devdata);
+ return ret;
+ }
+
+ return 0;
+}
+
+module_init(nx842_pseries_init);
+
+static void __exit nx842_pseries_exit(void)
+{
+ struct nx842_devdata *old_devdata;
+ unsigned long flags;
+
+ crypto_unregister_alg(&nx842_pseries_alg);
+
+ spin_lock_irqsave(&devdata_mutex, flags);
+ old_devdata = rcu_dereference_check(devdata,
+ lockdep_is_held(&devdata_mutex));
+ RCU_INIT_POINTER(devdata, NULL);
+ spin_unlock_irqrestore(&devdata_mutex, flags);
+ synchronize_rcu();
+ if (old_devdata && old_devdata->dev)
+ dev_set_drvdata(old_devdata->dev, NULL);
+ kfree(old_devdata);
+ vio_unregister_driver(&nx842_vio_driver);
+}
+
+module_exit(nx842_pseries_exit);
+
diff --git a/drivers/crypto/nx/nx-842.c b/drivers/crypto/nx/nx-842.c
new file mode 100644
index 000000000..2ab90ec10
--- /dev/null
+++ b/drivers/crypto/nx/nx-842.c
@@ -0,0 +1,521 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Cryptographic API for the NX-842 hardware compression.
+ *
+ * Copyright (C) IBM Corporation, 2011-2015
+ *
+ * Designer of the Power data compression engine:
+ * Bulent Abali <abali@us.ibm.com>
+ *
+ * Original Authors: Robert Jennings <rcj@linux.vnet.ibm.com>
+ * Seth Jennings <sjenning@linux.vnet.ibm.com>
+ *
+ * Rewrite: Dan Streetman <ddstreet@ieee.org>
+ *
+ * This is an interface to the NX-842 compression hardware in PowerPC
+ * processors. Most of the complexity of this drvier is due to the fact that
+ * the NX-842 compression hardware requires the input and output data buffers
+ * to be specifically aligned, to be a specific multiple in length, and within
+ * specific minimum and maximum lengths. Those restrictions, provided by the
+ * nx-842 driver via nx842_constraints, mean this driver must use bounce
+ * buffers and headers to correct misaligned in or out buffers, and to split
+ * input buffers that are too large.
+ *
+ * This driver will fall back to software decompression if the hardware
+ * decompression fails, so this driver's decompression should never fail as
+ * long as the provided compressed buffer is valid. Any compressed buffer
+ * created by this driver will have a header (except ones where the input
+ * perfectly matches the constraints); so users of this driver cannot simply
+ * pass a compressed buffer created by this driver over to the 842 software
+ * decompression library. Instead, users must use this driver to decompress;
+ * if the hardware fails or is unavailable, the compressed buffer will be
+ * parsed and the header removed, and the raw 842 buffer(s) passed to the 842
+ * software decompression library.
+ *
+ * This does not fall back to software compression, however, since the caller
+ * of this function is specifically requesting hardware compression; if the
+ * hardware compression fails, the caller can fall back to software
+ * compression, and the raw 842 compressed buffer that the software compressor
+ * creates can be passed to this driver for hardware decompression; any
+ * buffer without our specific header magic is assumed to be a raw 842 buffer
+ * and passed directly to the hardware. Note that the software compression
+ * library will produce a compressed buffer that is incompatible with the
+ * hardware decompressor if the original input buffer length is not a multiple
+ * of 8; if such a compressed buffer is passed to this driver for
+ * decompression, the hardware will reject it and this driver will then pass
+ * it over to the software library for decompression.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/vmalloc.h>
+#include <linux/sw842.h>
+#include <linux/spinlock.h>
+
+#include "nx-842.h"
+
+/* The first 5 bits of this magic are 0x1f, which is an invalid 842 5-bit
+ * template (see lib/842/842.h), so this magic number will never appear at
+ * the start of a raw 842 compressed buffer. That is important, as any buffer
+ * passed to us without this magic is assumed to be a raw 842 compressed
+ * buffer, and passed directly to the hardware to decompress.
+ */
+#define NX842_CRYPTO_MAGIC (0xf842)
+#define NX842_CRYPTO_HEADER_SIZE(g) \
+ (sizeof(struct nx842_crypto_header) + \
+ sizeof(struct nx842_crypto_header_group) * (g))
+#define NX842_CRYPTO_HEADER_MAX_SIZE \
+ NX842_CRYPTO_HEADER_SIZE(NX842_CRYPTO_GROUP_MAX)
+
+/* bounce buffer size */
+#define BOUNCE_BUFFER_ORDER (2)
+#define BOUNCE_BUFFER_SIZE \
+ ((unsigned int)(PAGE_SIZE << BOUNCE_BUFFER_ORDER))
+
+/* try longer on comp because we can fallback to sw decomp if hw is busy */
+#define COMP_BUSY_TIMEOUT (250) /* ms */
+#define DECOMP_BUSY_TIMEOUT (50) /* ms */
+
+struct nx842_crypto_param {
+ u8 *in;
+ unsigned int iremain;
+ u8 *out;
+ unsigned int oremain;
+ unsigned int ototal;
+};
+
+static int update_param(struct nx842_crypto_param *p,
+ unsigned int slen, unsigned int dlen)
+{
+ if (p->iremain < slen)
+ return -EOVERFLOW;
+ if (p->oremain < dlen)
+ return -ENOSPC;
+
+ p->in += slen;
+ p->iremain -= slen;
+ p->out += dlen;
+ p->oremain -= dlen;
+ p->ototal += dlen;
+
+ return 0;
+}
+
+int nx842_crypto_init(struct crypto_tfm *tfm, struct nx842_driver *driver)
+{
+ struct nx842_crypto_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ spin_lock_init(&ctx->lock);
+ ctx->driver = driver;
+ ctx->wmem = kmalloc(driver->workmem_size, GFP_KERNEL);
+ ctx->sbounce = (u8 *)__get_free_pages(GFP_KERNEL, BOUNCE_BUFFER_ORDER);
+ ctx->dbounce = (u8 *)__get_free_pages(GFP_KERNEL, BOUNCE_BUFFER_ORDER);
+ if (!ctx->wmem || !ctx->sbounce || !ctx->dbounce) {
+ kfree(ctx->wmem);
+ free_page((unsigned long)ctx->sbounce);
+ free_page((unsigned long)ctx->dbounce);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(nx842_crypto_init);
+
+void nx842_crypto_exit(struct crypto_tfm *tfm)
+{
+ struct nx842_crypto_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ kfree(ctx->wmem);
+ free_page((unsigned long)ctx->sbounce);
+ free_page((unsigned long)ctx->dbounce);
+}
+EXPORT_SYMBOL_GPL(nx842_crypto_exit);
+
+static void check_constraints(struct nx842_constraints *c)
+{
+ /* limit maximum, to always have enough bounce buffer to decompress */
+ if (c->maximum > BOUNCE_BUFFER_SIZE)
+ c->maximum = BOUNCE_BUFFER_SIZE;
+}
+
+static int nx842_crypto_add_header(struct nx842_crypto_header *hdr, u8 *buf)
+{
+ int s = NX842_CRYPTO_HEADER_SIZE(hdr->groups);
+
+ /* compress should have added space for header */
+ if (s > be16_to_cpu(hdr->group[0].padding)) {
+ pr_err("Internal error: no space for header\n");
+ return -EINVAL;
+ }
+
+ memcpy(buf, hdr, s);
+
+ print_hex_dump_debug("header ", DUMP_PREFIX_OFFSET, 16, 1, buf, s, 0);
+
+ return 0;
+}
+
+static int compress(struct nx842_crypto_ctx *ctx,
+ struct nx842_crypto_param *p,
+ struct nx842_crypto_header_group *g,
+ struct nx842_constraints *c,
+ u16 *ignore,
+ unsigned int hdrsize)
+{
+ unsigned int slen = p->iremain, dlen = p->oremain, tmplen;
+ unsigned int adj_slen = slen;
+ u8 *src = p->in, *dst = p->out;
+ int ret, dskip = 0;
+ ktime_t timeout;
+
+ if (p->iremain == 0)
+ return -EOVERFLOW;
+
+ if (p->oremain == 0 || hdrsize + c->minimum > dlen)
+ return -ENOSPC;
+
+ if (slen % c->multiple)
+ adj_slen = round_up(slen, c->multiple);
+ if (slen < c->minimum)
+ adj_slen = c->minimum;
+ if (slen > c->maximum)
+ adj_slen = slen = c->maximum;
+ if (adj_slen > slen || (u64)src % c->alignment) {
+ adj_slen = min(adj_slen, BOUNCE_BUFFER_SIZE);
+ slen = min(slen, BOUNCE_BUFFER_SIZE);
+ if (adj_slen > slen)
+ memset(ctx->sbounce + slen, 0, adj_slen - slen);
+ memcpy(ctx->sbounce, src, slen);
+ src = ctx->sbounce;
+ slen = adj_slen;
+ pr_debug("using comp sbounce buffer, len %x\n", slen);
+ }
+
+ dst += hdrsize;
+ dlen -= hdrsize;
+
+ if ((u64)dst % c->alignment) {
+ dskip = (int)(PTR_ALIGN(dst, c->alignment) - dst);
+ dst += dskip;
+ dlen -= dskip;
+ }
+ if (dlen % c->multiple)
+ dlen = round_down(dlen, c->multiple);
+ if (dlen < c->minimum) {
+nospc:
+ dst = ctx->dbounce;
+ dlen = min(p->oremain, BOUNCE_BUFFER_SIZE);
+ dlen = round_down(dlen, c->multiple);
+ dskip = 0;
+ pr_debug("using comp dbounce buffer, len %x\n", dlen);
+ }
+ if (dlen > c->maximum)
+ dlen = c->maximum;
+
+ tmplen = dlen;
+ timeout = ktime_add_ms(ktime_get(), COMP_BUSY_TIMEOUT);
+ do {
+ dlen = tmplen; /* reset dlen, if we're retrying */
+ ret = ctx->driver->compress(src, slen, dst, &dlen, ctx->wmem);
+ /* possibly we should reduce the slen here, instead of
+ * retrying with the dbounce buffer?
+ */
+ if (ret == -ENOSPC && dst != ctx->dbounce)
+ goto nospc;
+ } while (ret == -EBUSY && ktime_before(ktime_get(), timeout));
+ if (ret)
+ return ret;
+
+ dskip += hdrsize;
+
+ if (dst == ctx->dbounce)
+ memcpy(p->out + dskip, dst, dlen);
+
+ g->padding = cpu_to_be16(dskip);
+ g->compressed_length = cpu_to_be32(dlen);
+ g->uncompressed_length = cpu_to_be32(slen);
+
+ if (p->iremain < slen) {
+ *ignore = slen - p->iremain;
+ slen = p->iremain;
+ }
+
+ pr_debug("compress slen %x ignore %x dlen %x padding %x\n",
+ slen, *ignore, dlen, dskip);
+
+ return update_param(p, slen, dskip + dlen);
+}
+
+int nx842_crypto_compress(struct crypto_tfm *tfm,
+ const u8 *src, unsigned int slen,
+ u8 *dst, unsigned int *dlen)
+{
+ struct nx842_crypto_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct nx842_crypto_header *hdr = &ctx->header;
+ struct nx842_crypto_param p;
+ struct nx842_constraints c = *ctx->driver->constraints;
+ unsigned int groups, hdrsize, h;
+ int ret, n;
+ bool add_header;
+ u16 ignore = 0;
+
+ check_constraints(&c);
+
+ p.in = (u8 *)src;
+ p.iremain = slen;
+ p.out = dst;
+ p.oremain = *dlen;
+ p.ototal = 0;
+
+ *dlen = 0;
+
+ groups = min_t(unsigned int, NX842_CRYPTO_GROUP_MAX,
+ DIV_ROUND_UP(p.iremain, c.maximum));
+ hdrsize = NX842_CRYPTO_HEADER_SIZE(groups);
+
+ spin_lock_bh(&ctx->lock);
+
+ /* skip adding header if the buffers meet all constraints */
+ add_header = (p.iremain % c.multiple ||
+ p.iremain < c.minimum ||
+ p.iremain > c.maximum ||
+ (u64)p.in % c.alignment ||
+ p.oremain % c.multiple ||
+ p.oremain < c.minimum ||
+ p.oremain > c.maximum ||
+ (u64)p.out % c.alignment);
+
+ hdr->magic = cpu_to_be16(NX842_CRYPTO_MAGIC);
+ hdr->groups = 0;
+ hdr->ignore = 0;
+
+ while (p.iremain > 0) {
+ n = hdr->groups++;
+ ret = -ENOSPC;
+ if (hdr->groups > NX842_CRYPTO_GROUP_MAX)
+ goto unlock;
+
+ /* header goes before first group */
+ h = !n && add_header ? hdrsize : 0;
+
+ if (ignore)
+ pr_warn("internal error, ignore is set %x\n", ignore);
+
+ ret = compress(ctx, &p, &hdr->group[n], &c, &ignore, h);
+ if (ret)
+ goto unlock;
+ }
+
+ if (!add_header && hdr->groups > 1) {
+ pr_err("Internal error: No header but multiple groups\n");
+ ret = -EINVAL;
+ goto unlock;
+ }
+
+ /* ignore indicates the input stream needed to be padded */
+ hdr->ignore = cpu_to_be16(ignore);
+ if (ignore)
+ pr_debug("marked %d bytes as ignore\n", ignore);
+
+ if (add_header)
+ ret = nx842_crypto_add_header(hdr, dst);
+ if (ret)
+ goto unlock;
+
+ *dlen = p.ototal;
+
+ pr_debug("compress total slen %x dlen %x\n", slen, *dlen);
+
+unlock:
+ spin_unlock_bh(&ctx->lock);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(nx842_crypto_compress);
+
+static int decompress(struct nx842_crypto_ctx *ctx,
+ struct nx842_crypto_param *p,
+ struct nx842_crypto_header_group *g,
+ struct nx842_constraints *c,
+ u16 ignore)
+{
+ unsigned int slen = be32_to_cpu(g->compressed_length);
+ unsigned int required_len = be32_to_cpu(g->uncompressed_length);
+ unsigned int dlen = p->oremain, tmplen;
+ unsigned int adj_slen = slen;
+ u8 *src = p->in, *dst = p->out;
+ u16 padding = be16_to_cpu(g->padding);
+ int ret, spadding = 0;
+ ktime_t timeout;
+
+ if (!slen || !required_len)
+ return -EINVAL;
+
+ if (p->iremain <= 0 || padding + slen > p->iremain)
+ return -EOVERFLOW;
+
+ if (p->oremain <= 0 || required_len - ignore > p->oremain)
+ return -ENOSPC;
+
+ src += padding;
+
+ if (slen % c->multiple)
+ adj_slen = round_up(slen, c->multiple);
+ if (slen < c->minimum)
+ adj_slen = c->minimum;
+ if (slen > c->maximum)
+ goto usesw;
+ if (slen < adj_slen || (u64)src % c->alignment) {
+ /* we can append padding bytes because the 842 format defines
+ * an "end" template (see lib/842/842_decompress.c) and will
+ * ignore any bytes following it.
+ */
+ if (slen < adj_slen)
+ memset(ctx->sbounce + slen, 0, adj_slen - slen);
+ memcpy(ctx->sbounce, src, slen);
+ src = ctx->sbounce;
+ spadding = adj_slen - slen;
+ slen = adj_slen;
+ pr_debug("using decomp sbounce buffer, len %x\n", slen);
+ }
+
+ if (dlen % c->multiple)
+ dlen = round_down(dlen, c->multiple);
+ if (dlen < required_len || (u64)dst % c->alignment) {
+ dst = ctx->dbounce;
+ dlen = min(required_len, BOUNCE_BUFFER_SIZE);
+ pr_debug("using decomp dbounce buffer, len %x\n", dlen);
+ }
+ if (dlen < c->minimum)
+ goto usesw;
+ if (dlen > c->maximum)
+ dlen = c->maximum;
+
+ tmplen = dlen;
+ timeout = ktime_add_ms(ktime_get(), DECOMP_BUSY_TIMEOUT);
+ do {
+ dlen = tmplen; /* reset dlen, if we're retrying */
+ ret = ctx->driver->decompress(src, slen, dst, &dlen, ctx->wmem);
+ } while (ret == -EBUSY && ktime_before(ktime_get(), timeout));
+ if (ret) {
+usesw:
+ /* reset everything, sw doesn't have constraints */
+ src = p->in + padding;
+ slen = be32_to_cpu(g->compressed_length);
+ spadding = 0;
+ dst = p->out;
+ dlen = p->oremain;
+ if (dlen < required_len) { /* have ignore bytes */
+ dst = ctx->dbounce;
+ dlen = BOUNCE_BUFFER_SIZE;
+ }
+ pr_info_ratelimited("using software 842 decompression\n");
+ ret = sw842_decompress(src, slen, dst, &dlen);
+ }
+ if (ret)
+ return ret;
+
+ slen -= spadding;
+
+ dlen -= ignore;
+ if (ignore)
+ pr_debug("ignoring last %x bytes\n", ignore);
+
+ if (dst == ctx->dbounce)
+ memcpy(p->out, dst, dlen);
+
+ pr_debug("decompress slen %x padding %x dlen %x ignore %x\n",
+ slen, padding, dlen, ignore);
+
+ return update_param(p, slen + padding, dlen);
+}
+
+int nx842_crypto_decompress(struct crypto_tfm *tfm,
+ const u8 *src, unsigned int slen,
+ u8 *dst, unsigned int *dlen)
+{
+ struct nx842_crypto_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct nx842_crypto_header *hdr;
+ struct nx842_crypto_param p;
+ struct nx842_constraints c = *ctx->driver->constraints;
+ int n, ret, hdr_len;
+ u16 ignore = 0;
+
+ check_constraints(&c);
+
+ p.in = (u8 *)src;
+ p.iremain = slen;
+ p.out = dst;
+ p.oremain = *dlen;
+ p.ototal = 0;
+
+ *dlen = 0;
+
+ hdr = (struct nx842_crypto_header *)src;
+
+ spin_lock_bh(&ctx->lock);
+
+ /* If it doesn't start with our header magic number, assume it's a raw
+ * 842 compressed buffer and pass it directly to the hardware driver
+ */
+ if (be16_to_cpu(hdr->magic) != NX842_CRYPTO_MAGIC) {
+ struct nx842_crypto_header_group g = {
+ .padding = 0,
+ .compressed_length = cpu_to_be32(p.iremain),
+ .uncompressed_length = cpu_to_be32(p.oremain),
+ };
+
+ ret = decompress(ctx, &p, &g, &c, 0);
+ if (ret)
+ goto unlock;
+
+ goto success;
+ }
+
+ if (!hdr->groups) {
+ pr_err("header has no groups\n");
+ ret = -EINVAL;
+ goto unlock;
+ }
+ if (hdr->groups > NX842_CRYPTO_GROUP_MAX) {
+ pr_err("header has too many groups %x, max %x\n",
+ hdr->groups, NX842_CRYPTO_GROUP_MAX);
+ ret = -EINVAL;
+ goto unlock;
+ }
+
+ hdr_len = NX842_CRYPTO_HEADER_SIZE(hdr->groups);
+ if (hdr_len > slen) {
+ ret = -EOVERFLOW;
+ goto unlock;
+ }
+
+ memcpy(&ctx->header, src, hdr_len);
+ hdr = &ctx->header;
+
+ for (n = 0; n < hdr->groups; n++) {
+ /* ignore applies to last group */
+ if (n + 1 == hdr->groups)
+ ignore = be16_to_cpu(hdr->ignore);
+
+ ret = decompress(ctx, &p, &hdr->group[n], &c, ignore);
+ if (ret)
+ goto unlock;
+ }
+
+success:
+ *dlen = p.ototal;
+
+ pr_debug("decompress total slen %x dlen %x\n", slen, *dlen);
+
+ ret = 0;
+
+unlock:
+ spin_unlock_bh(&ctx->lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(nx842_crypto_decompress);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("IBM PowerPC Nest (NX) 842 Hardware Compression Driver");
+MODULE_AUTHOR("Dan Streetman <ddstreet@ieee.org>");
diff --git a/drivers/crypto/nx/nx-842.h b/drivers/crypto/nx/nx-842.h
new file mode 100644
index 000000000..b66f19ac6
--- /dev/null
+++ b/drivers/crypto/nx/nx-842.h
@@ -0,0 +1,189 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __NX_842_H__
+#define __NX_842_H__
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/crypto.h>
+#include <linux/of.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/mm.h>
+#include <linux/ratelimit.h>
+
+/* Restrictions on Data Descriptor List (DDL) and Entry (DDE) buffers
+ *
+ * From NX P8 workbook, sec 4.9.1 "842 details"
+ * Each DDE buffer is 128 byte aligned
+ * Each DDE buffer size is a multiple of 32 bytes (except the last)
+ * The last DDE buffer size is a multiple of 8 bytes
+ */
+#define DDE_BUFFER_ALIGN (128)
+#define DDE_BUFFER_SIZE_MULT (32)
+#define DDE_BUFFER_LAST_MULT (8)
+
+/* Arbitrary DDL length limit
+ * Allows max buffer size of MAX-1 to MAX pages
+ * (depending on alignment)
+ */
+#define DDL_LEN_MAX (17)
+
+/* CCW 842 CI/FC masks
+ * NX P8 workbook, section 4.3.1, figure 4-6
+ * "CI/FC Boundary by NX CT type"
+ */
+#define CCW_CI_842 (0x00003ff8)
+#define CCW_FC_842 (0x00000007)
+
+/* CCW Function Codes (FC) for 842
+ * NX P8 workbook, section 4.9, table 4-28
+ * "Function Code Definitions for 842 Memory Compression"
+ */
+#define CCW_FC_842_COMP_NOCRC (0)
+#define CCW_FC_842_COMP_CRC (1)
+#define CCW_FC_842_DECOMP_NOCRC (2)
+#define CCW_FC_842_DECOMP_CRC (3)
+#define CCW_FC_842_MOVE (4)
+
+/* CSB CC Error Types for 842
+ * NX P8 workbook, section 4.10.3, table 4-30
+ * "Reported Error Types Summary Table"
+ */
+/* These are all duplicates of existing codes defined in icswx.h. */
+#define CSB_CC_TRANSLATION_DUP1 (80)
+#define CSB_CC_TRANSLATION_DUP2 (82)
+#define CSB_CC_TRANSLATION_DUP3 (84)
+#define CSB_CC_TRANSLATION_DUP4 (86)
+#define CSB_CC_TRANSLATION_DUP5 (92)
+#define CSB_CC_TRANSLATION_DUP6 (94)
+#define CSB_CC_PROTECTION_DUP1 (81)
+#define CSB_CC_PROTECTION_DUP2 (83)
+#define CSB_CC_PROTECTION_DUP3 (85)
+#define CSB_CC_PROTECTION_DUP4 (87)
+#define CSB_CC_PROTECTION_DUP5 (93)
+#define CSB_CC_PROTECTION_DUP6 (95)
+#define CSB_CC_RD_EXTERNAL_DUP1 (89)
+#define CSB_CC_RD_EXTERNAL_DUP2 (90)
+#define CSB_CC_RD_EXTERNAL_DUP3 (91)
+/* These are specific to NX */
+/* 842 codes */
+#define CSB_CC_TPBC_GT_SPBC (64) /* no error, but >1 comp ratio */
+#define CSB_CC_CRC_MISMATCH (65) /* decomp crc mismatch */
+#define CSB_CC_TEMPL_INVALID (66) /* decomp invalid template value */
+#define CSB_CC_TEMPL_OVERFLOW (67) /* decomp template shows data after end */
+/* sym crypt codes */
+#define CSB_CC_DECRYPT_OVERFLOW (64)
+/* asym crypt codes */
+#define CSB_CC_MINV_OVERFLOW (128)
+/*
+ * HW error - Job did not finish in the maximum time allowed.
+ * Job terminated.
+ */
+#define CSB_CC_HW_EXPIRED_TIMER (224)
+/* These are reserved for hypervisor use */
+#define CSB_CC_HYP_RESERVE_START (240)
+#define CSB_CC_HYP_RESERVE_END (253)
+#define CSB_CC_HYP_RESERVE_P9_END (251)
+/* No valid interrupt server (P9 or later). */
+#define CSB_CC_HYP_RESERVE_NO_INTR_SERVER (252)
+#define CSB_CC_HYP_NO_HW (254)
+#define CSB_CC_HYP_HANG_ABORTED (255)
+
+/* CCB Completion Modes (CM) for 842
+ * NX P8 workbook, section 4.3, figure 4-5
+ * "CRB Details - Normal Cop_Req (CL=00, C=1)"
+ */
+#define CCB_CM_EXTRA_WRITE (CCB_CM0_ALL_COMPLETIONS & CCB_CM12_STORE)
+#define CCB_CM_INTERRUPT (CCB_CM0_ALL_COMPLETIONS & CCB_CM12_INTERRUPT)
+
+#define LEN_ON_SIZE(pa, size) ((size) - ((pa) & ((size) - 1)))
+#define LEN_ON_PAGE(pa) LEN_ON_SIZE(pa, PAGE_SIZE)
+
+static inline unsigned long nx842_get_pa(void *addr)
+{
+ if (!is_vmalloc_addr(addr))
+ return __pa(addr);
+
+ return page_to_phys(vmalloc_to_page(addr)) + offset_in_page(addr);
+}
+
+/**
+ * This provides the driver's constraints. Different nx842 implementations
+ * may have varying requirements. The constraints are:
+ * @alignment: All buffers should be aligned to this
+ * @multiple: All buffer lengths should be a multiple of this
+ * @minimum: Buffer lengths must not be less than this amount
+ * @maximum: Buffer lengths must not be more than this amount
+ *
+ * The constraints apply to all buffers and lengths, both input and output,
+ * for both compression and decompression, except for the minimum which
+ * only applies to compression input and decompression output; the
+ * compressed data can be less than the minimum constraint. It can be
+ * assumed that compressed data will always adhere to the multiple
+ * constraint.
+ *
+ * The driver may succeed even if these constraints are violated;
+ * however the driver can return failure or suffer reduced performance
+ * if any constraint is not met.
+ */
+struct nx842_constraints {
+ int alignment;
+ int multiple;
+ int minimum;
+ int maximum;
+};
+
+struct nx842_driver {
+ char *name;
+ struct module *owner;
+ size_t workmem_size;
+
+ struct nx842_constraints *constraints;
+
+ int (*compress)(const unsigned char *in, unsigned int in_len,
+ unsigned char *out, unsigned int *out_len,
+ void *wrkmem);
+ int (*decompress)(const unsigned char *in, unsigned int in_len,
+ unsigned char *out, unsigned int *out_len,
+ void *wrkmem);
+};
+
+struct nx842_crypto_header_group {
+ __be16 padding; /* unused bytes at start of group */
+ __be32 compressed_length; /* compressed bytes in group */
+ __be32 uncompressed_length; /* bytes after decompression */
+} __packed;
+
+struct nx842_crypto_header {
+ __be16 magic; /* NX842_CRYPTO_MAGIC */
+ __be16 ignore; /* decompressed end bytes to ignore */
+ u8 groups; /* total groups in this header */
+ struct nx842_crypto_header_group group[];
+} __packed;
+
+#define NX842_CRYPTO_GROUP_MAX (0x20)
+
+struct nx842_crypto_ctx {
+ spinlock_t lock;
+
+ u8 *wmem;
+ u8 *sbounce, *dbounce;
+
+ struct nx842_crypto_header header;
+ struct nx842_crypto_header_group group[NX842_CRYPTO_GROUP_MAX];
+
+ struct nx842_driver *driver;
+};
+
+int nx842_crypto_init(struct crypto_tfm *tfm, struct nx842_driver *driver);
+void nx842_crypto_exit(struct crypto_tfm *tfm);
+int nx842_crypto_compress(struct crypto_tfm *tfm,
+ const u8 *src, unsigned int slen,
+ u8 *dst, unsigned int *dlen);
+int nx842_crypto_decompress(struct crypto_tfm *tfm,
+ const u8 *src, unsigned int slen,
+ u8 *dst, unsigned int *dlen);
+
+#endif /* __NX_842_H__ */
diff --git a/drivers/crypto/nx/nx-aes-cbc.c b/drivers/crypto/nx/nx-aes-cbc.c
new file mode 100644
index 000000000..92e921ece
--- /dev/null
+++ b/drivers/crypto/nx/nx-aes-cbc.c
@@ -0,0 +1,127 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/**
+ * AES CBC routines supporting the Power 7+ Nest Accelerators driver
+ *
+ * Copyright (C) 2011-2012 International Business Machines Inc.
+ *
+ * Author: Kent Yoder <yoder1@us.ibm.com>
+ */
+
+#include <crypto/aes.h>
+#include <crypto/algapi.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/crypto.h>
+#include <asm/vio.h>
+
+#include "nx_csbcpb.h"
+#include "nx.h"
+
+
+static int cbc_aes_nx_set_key(struct crypto_skcipher *tfm,
+ const u8 *in_key,
+ unsigned int key_len)
+{
+ struct nx_crypto_ctx *nx_ctx = crypto_skcipher_ctx(tfm);
+ struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
+
+ nx_ctx_init(nx_ctx, HCOP_FC_AES);
+
+ switch (key_len) {
+ case AES_KEYSIZE_128:
+ NX_CPB_SET_KEY_SIZE(csbcpb, NX_KS_AES_128);
+ nx_ctx->ap = &nx_ctx->props[NX_PROPS_AES_128];
+ break;
+ case AES_KEYSIZE_192:
+ NX_CPB_SET_KEY_SIZE(csbcpb, NX_KS_AES_192);
+ nx_ctx->ap = &nx_ctx->props[NX_PROPS_AES_192];
+ break;
+ case AES_KEYSIZE_256:
+ NX_CPB_SET_KEY_SIZE(csbcpb, NX_KS_AES_256);
+ nx_ctx->ap = &nx_ctx->props[NX_PROPS_AES_256];
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ csbcpb->cpb.hdr.mode = NX_MODE_AES_CBC;
+ memcpy(csbcpb->cpb.aes_cbc.key, in_key, key_len);
+
+ return 0;
+}
+
+static int cbc_aes_nx_crypt(struct skcipher_request *req,
+ int enc)
+{
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct nx_crypto_ctx *nx_ctx = crypto_skcipher_ctx(tfm);
+ struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
+ unsigned long irq_flags;
+ unsigned int processed = 0, to_process;
+ int rc;
+
+ spin_lock_irqsave(&nx_ctx->lock, irq_flags);
+
+ if (enc)
+ NX_CPB_FDM(csbcpb) |= NX_FDM_ENDE_ENCRYPT;
+ else
+ NX_CPB_FDM(csbcpb) &= ~NX_FDM_ENDE_ENCRYPT;
+
+ do {
+ to_process = req->cryptlen - processed;
+
+ rc = nx_build_sg_lists(nx_ctx, req->iv, req->dst, req->src,
+ &to_process, processed,
+ csbcpb->cpb.aes_cbc.iv);
+ if (rc)
+ goto out;
+
+ if (!nx_ctx->op.inlen || !nx_ctx->op.outlen) {
+ rc = -EINVAL;
+ goto out;
+ }
+
+ rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
+ req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP);
+ if (rc)
+ goto out;
+
+ memcpy(req->iv, csbcpb->cpb.aes_cbc.cv, AES_BLOCK_SIZE);
+ atomic_inc(&(nx_ctx->stats->aes_ops));
+ atomic64_add(csbcpb->csb.processed_byte_count,
+ &(nx_ctx->stats->aes_bytes));
+
+ processed += to_process;
+ } while (processed < req->cryptlen);
+out:
+ spin_unlock_irqrestore(&nx_ctx->lock, irq_flags);
+ return rc;
+}
+
+static int cbc_aes_nx_encrypt(struct skcipher_request *req)
+{
+ return cbc_aes_nx_crypt(req, 1);
+}
+
+static int cbc_aes_nx_decrypt(struct skcipher_request *req)
+{
+ return cbc_aes_nx_crypt(req, 0);
+}
+
+struct skcipher_alg nx_cbc_aes_alg = {
+ .base.cra_name = "cbc(aes)",
+ .base.cra_driver_name = "cbc-aes-nx",
+ .base.cra_priority = 300,
+ .base.cra_blocksize = AES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct nx_crypto_ctx),
+ .base.cra_alignmask = 0xf,
+ .base.cra_module = THIS_MODULE,
+ .init = nx_crypto_ctx_aes_cbc_init,
+ .exit = nx_crypto_ctx_skcipher_exit,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .setkey = cbc_aes_nx_set_key,
+ .encrypt = cbc_aes_nx_encrypt,
+ .decrypt = cbc_aes_nx_decrypt,
+};
diff --git a/drivers/crypto/nx/nx-aes-ccm.c b/drivers/crypto/nx/nx-aes-ccm.c
new file mode 100644
index 000000000..4c9362eeb
--- /dev/null
+++ b/drivers/crypto/nx/nx-aes-ccm.c
@@ -0,0 +1,566 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/**
+ * AES CCM routines supporting the Power 7+ Nest Accelerators driver
+ *
+ * Copyright (C) 2012 International Business Machines Inc.
+ *
+ * Author: Kent Yoder <yoder1@us.ibm.com>
+ */
+
+#include <crypto/internal/aead.h>
+#include <crypto/aes.h>
+#include <crypto/algapi.h>
+#include <crypto/scatterwalk.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/crypto.h>
+#include <asm/vio.h>
+
+#include "nx_csbcpb.h"
+#include "nx.h"
+
+
+static int ccm_aes_nx_set_key(struct crypto_aead *tfm,
+ const u8 *in_key,
+ unsigned int key_len)
+{
+ struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&tfm->base);
+ struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
+ struct nx_csbcpb *csbcpb_aead = nx_ctx->csbcpb_aead;
+
+ nx_ctx_init(nx_ctx, HCOP_FC_AES);
+
+ switch (key_len) {
+ case AES_KEYSIZE_128:
+ NX_CPB_SET_KEY_SIZE(csbcpb, NX_KS_AES_128);
+ NX_CPB_SET_KEY_SIZE(csbcpb_aead, NX_KS_AES_128);
+ nx_ctx->ap = &nx_ctx->props[NX_PROPS_AES_128];
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ csbcpb->cpb.hdr.mode = NX_MODE_AES_CCM;
+ memcpy(csbcpb->cpb.aes_ccm.key, in_key, key_len);
+
+ csbcpb_aead->cpb.hdr.mode = NX_MODE_AES_CCA;
+ memcpy(csbcpb_aead->cpb.aes_cca.key, in_key, key_len);
+
+ return 0;
+
+}
+
+static int ccm4309_aes_nx_set_key(struct crypto_aead *tfm,
+ const u8 *in_key,
+ unsigned int key_len)
+{
+ struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&tfm->base);
+
+ if (key_len < 3)
+ return -EINVAL;
+
+ key_len -= 3;
+
+ memcpy(nx_ctx->priv.ccm.nonce, in_key + key_len, 3);
+
+ return ccm_aes_nx_set_key(tfm, in_key, key_len);
+}
+
+static int ccm_aes_nx_setauthsize(struct crypto_aead *tfm,
+ unsigned int authsize)
+{
+ switch (authsize) {
+ case 4:
+ case 6:
+ case 8:
+ case 10:
+ case 12:
+ case 14:
+ case 16:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int ccm4309_aes_nx_setauthsize(struct crypto_aead *tfm,
+ unsigned int authsize)
+{
+ switch (authsize) {
+ case 8:
+ case 12:
+ case 16:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/* taken from crypto/ccm.c */
+static int set_msg_len(u8 *block, unsigned int msglen, int csize)
+{
+ __be32 data;
+
+ memset(block, 0, csize);
+ block += csize;
+
+ if (csize >= 4)
+ csize = 4;
+ else if (msglen > (unsigned int)(1 << (8 * csize)))
+ return -EOVERFLOW;
+
+ data = cpu_to_be32(msglen);
+ memcpy(block - csize, (u8 *)&data + 4 - csize, csize);
+
+ return 0;
+}
+
+/* taken from crypto/ccm.c */
+static inline int crypto_ccm_check_iv(const u8 *iv)
+{
+ /* 2 <= L <= 8, so 1 <= L' <= 7. */
+ if (1 > iv[0] || iv[0] > 7)
+ return -EINVAL;
+
+ return 0;
+}
+
+/* based on code from crypto/ccm.c */
+static int generate_b0(u8 *iv, unsigned int assoclen, unsigned int authsize,
+ unsigned int cryptlen, u8 *b0)
+{
+ unsigned int l, lp, m = authsize;
+ int rc;
+
+ memcpy(b0, iv, 16);
+
+ lp = b0[0];
+ l = lp + 1;
+
+ /* set m, bits 3-5 */
+ *b0 |= (8 * ((m - 2) / 2));
+
+ /* set adata, bit 6, if associated data is used */
+ if (assoclen)
+ *b0 |= 64;
+
+ rc = set_msg_len(b0 + 16 - l, cryptlen, l);
+
+ return rc;
+}
+
+static int generate_pat(u8 *iv,
+ struct aead_request *req,
+ struct nx_crypto_ctx *nx_ctx,
+ unsigned int authsize,
+ unsigned int nbytes,
+ unsigned int assoclen,
+ u8 *out)
+{
+ struct nx_sg *nx_insg = nx_ctx->in_sg;
+ struct nx_sg *nx_outsg = nx_ctx->out_sg;
+ unsigned int iauth_len = 0;
+ u8 tmp[16], *b1 = NULL, *b0 = NULL, *result = NULL;
+ int rc;
+ unsigned int max_sg_len;
+
+ /* zero the ctr value */
+ memset(iv + 15 - iv[0], 0, iv[0] + 1);
+
+ /* page 78 of nx_wb.pdf has,
+ * Note: RFC3610 allows the AAD data to be up to 2^64 -1 bytes
+ * in length. If a full message is used, the AES CCA implementation
+ * restricts the maximum AAD length to 2^32 -1 bytes.
+ * If partial messages are used, the implementation supports
+ * 2^64 -1 bytes maximum AAD length.
+ *
+ * However, in the cryptoapi's aead_request structure,
+ * assoclen is an unsigned int, thus it cannot hold a length
+ * value greater than 2^32 - 1.
+ * Thus the AAD is further constrained by this and is never
+ * greater than 2^32.
+ */
+
+ if (!assoclen) {
+ b0 = nx_ctx->csbcpb->cpb.aes_ccm.in_pat_or_b0;
+ } else if (assoclen <= 14) {
+ /* if associated data is 14 bytes or less, we do 1 GCM
+ * operation on 2 AES blocks, B0 (stored in the csbcpb) and B1,
+ * which is fed in through the source buffers here */
+ b0 = nx_ctx->csbcpb->cpb.aes_ccm.in_pat_or_b0;
+ b1 = nx_ctx->priv.ccm.iauth_tag;
+ iauth_len = assoclen;
+ } else if (assoclen <= 65280) {
+ /* if associated data is less than (2^16 - 2^8), we construct
+ * B1 differently and feed in the associated data to a CCA
+ * operation */
+ b0 = nx_ctx->csbcpb_aead->cpb.aes_cca.b0;
+ b1 = nx_ctx->csbcpb_aead->cpb.aes_cca.b1;
+ iauth_len = 14;
+ } else {
+ b0 = nx_ctx->csbcpb_aead->cpb.aes_cca.b0;
+ b1 = nx_ctx->csbcpb_aead->cpb.aes_cca.b1;
+ iauth_len = 10;
+ }
+
+ /* generate B0 */
+ rc = generate_b0(iv, assoclen, authsize, nbytes, b0);
+ if (rc)
+ return rc;
+
+ /* generate B1:
+ * add control info for associated data
+ * RFC 3610 and NIST Special Publication 800-38C
+ */
+ if (b1) {
+ memset(b1, 0, 16);
+ if (assoclen <= 65280) {
+ *(u16 *)b1 = assoclen;
+ scatterwalk_map_and_copy(b1 + 2, req->src, 0,
+ iauth_len, SCATTERWALK_FROM_SG);
+ } else {
+ *(u16 *)b1 = (u16)(0xfffe);
+ *(u32 *)&b1[2] = assoclen;
+ scatterwalk_map_and_copy(b1 + 6, req->src, 0,
+ iauth_len, SCATTERWALK_FROM_SG);
+ }
+ }
+
+ /* now copy any remaining AAD to scatterlist and call nx... */
+ if (!assoclen) {
+ return rc;
+ } else if (assoclen <= 14) {
+ unsigned int len = 16;
+
+ nx_insg = nx_build_sg_list(nx_insg, b1, &len, nx_ctx->ap->sglen);
+
+ if (len != 16)
+ return -EINVAL;
+
+ nx_outsg = nx_build_sg_list(nx_outsg, tmp, &len,
+ nx_ctx->ap->sglen);
+
+ if (len != 16)
+ return -EINVAL;
+
+ /* inlen should be negative, indicating to phyp that its a
+ * pointer to an sg list */
+ nx_ctx->op.inlen = (nx_ctx->in_sg - nx_insg) *
+ sizeof(struct nx_sg);
+ nx_ctx->op.outlen = (nx_ctx->out_sg - nx_outsg) *
+ sizeof(struct nx_sg);
+
+ NX_CPB_FDM(nx_ctx->csbcpb) |= NX_FDM_ENDE_ENCRYPT;
+ NX_CPB_FDM(nx_ctx->csbcpb) |= NX_FDM_INTERMEDIATE;
+
+ result = nx_ctx->csbcpb->cpb.aes_ccm.out_pat_or_mac;
+
+ rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
+ req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP);
+ if (rc)
+ return rc;
+
+ atomic_inc(&(nx_ctx->stats->aes_ops));
+ atomic64_add(assoclen, &nx_ctx->stats->aes_bytes);
+
+ } else {
+ unsigned int processed = 0, to_process;
+
+ processed += iauth_len;
+
+ /* page_limit: number of sg entries that fit on one page */
+ max_sg_len = min_t(u64, nx_ctx->ap->sglen,
+ nx_driver.of.max_sg_len/sizeof(struct nx_sg));
+ max_sg_len = min_t(u64, max_sg_len,
+ nx_ctx->ap->databytelen/NX_PAGE_SIZE);
+
+ do {
+ to_process = min_t(u32, assoclen - processed,
+ nx_ctx->ap->databytelen);
+
+ nx_insg = nx_walk_and_build(nx_ctx->in_sg,
+ nx_ctx->ap->sglen,
+ req->src, processed,
+ &to_process);
+
+ if ((to_process + processed) < assoclen) {
+ NX_CPB_FDM(nx_ctx->csbcpb_aead) |=
+ NX_FDM_INTERMEDIATE;
+ } else {
+ NX_CPB_FDM(nx_ctx->csbcpb_aead) &=
+ ~NX_FDM_INTERMEDIATE;
+ }
+
+
+ nx_ctx->op_aead.inlen = (nx_ctx->in_sg - nx_insg) *
+ sizeof(struct nx_sg);
+
+ result = nx_ctx->csbcpb_aead->cpb.aes_cca.out_pat_or_b0;
+
+ rc = nx_hcall_sync(nx_ctx, &nx_ctx->op_aead,
+ req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP);
+ if (rc)
+ return rc;
+
+ memcpy(nx_ctx->csbcpb_aead->cpb.aes_cca.b0,
+ nx_ctx->csbcpb_aead->cpb.aes_cca.out_pat_or_b0,
+ AES_BLOCK_SIZE);
+
+ NX_CPB_FDM(nx_ctx->csbcpb_aead) |= NX_FDM_CONTINUATION;
+
+ atomic_inc(&(nx_ctx->stats->aes_ops));
+ atomic64_add(assoclen, &nx_ctx->stats->aes_bytes);
+
+ processed += to_process;
+ } while (processed < assoclen);
+
+ result = nx_ctx->csbcpb_aead->cpb.aes_cca.out_pat_or_b0;
+ }
+
+ memcpy(out, result, AES_BLOCK_SIZE);
+
+ return rc;
+}
+
+static int ccm_nx_decrypt(struct aead_request *req,
+ u8 *iv,
+ unsigned int assoclen)
+{
+ struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
+ struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
+ unsigned int nbytes = req->cryptlen;
+ unsigned int authsize = crypto_aead_authsize(crypto_aead_reqtfm(req));
+ struct nx_ccm_priv *priv = &nx_ctx->priv.ccm;
+ unsigned long irq_flags;
+ unsigned int processed = 0, to_process;
+ int rc = -1;
+
+ spin_lock_irqsave(&nx_ctx->lock, irq_flags);
+
+ nbytes -= authsize;
+
+ /* copy out the auth tag to compare with later */
+ scatterwalk_map_and_copy(priv->oauth_tag,
+ req->src, nbytes + req->assoclen, authsize,
+ SCATTERWALK_FROM_SG);
+
+ rc = generate_pat(iv, req, nx_ctx, authsize, nbytes, assoclen,
+ csbcpb->cpb.aes_ccm.in_pat_or_b0);
+ if (rc)
+ goto out;
+
+ do {
+
+ /* to_process: the AES_BLOCK_SIZE data chunk to process in this
+ * update. This value is bound by sg list limits.
+ */
+ to_process = nbytes - processed;
+
+ if ((to_process + processed) < nbytes)
+ NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
+ else
+ NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE;
+
+ NX_CPB_FDM(nx_ctx->csbcpb) &= ~NX_FDM_ENDE_ENCRYPT;
+
+ rc = nx_build_sg_lists(nx_ctx, iv, req->dst, req->src,
+ &to_process, processed + req->assoclen,
+ csbcpb->cpb.aes_ccm.iv_or_ctr);
+ if (rc)
+ goto out;
+
+ rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
+ req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP);
+ if (rc)
+ goto out;
+
+ /* for partial completion, copy following for next
+ * entry into loop...
+ */
+ memcpy(iv, csbcpb->cpb.aes_ccm.out_ctr, AES_BLOCK_SIZE);
+ memcpy(csbcpb->cpb.aes_ccm.in_pat_or_b0,
+ csbcpb->cpb.aes_ccm.out_pat_or_mac, AES_BLOCK_SIZE);
+ memcpy(csbcpb->cpb.aes_ccm.in_s0,
+ csbcpb->cpb.aes_ccm.out_s0, AES_BLOCK_SIZE);
+
+ NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
+
+ /* update stats */
+ atomic_inc(&(nx_ctx->stats->aes_ops));
+ atomic64_add(csbcpb->csb.processed_byte_count,
+ &(nx_ctx->stats->aes_bytes));
+
+ processed += to_process;
+ } while (processed < nbytes);
+
+ rc = crypto_memneq(csbcpb->cpb.aes_ccm.out_pat_or_mac, priv->oauth_tag,
+ authsize) ? -EBADMSG : 0;
+out:
+ spin_unlock_irqrestore(&nx_ctx->lock, irq_flags);
+ return rc;
+}
+
+static int ccm_nx_encrypt(struct aead_request *req,
+ u8 *iv,
+ unsigned int assoclen)
+{
+ struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
+ struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
+ unsigned int nbytes = req->cryptlen;
+ unsigned int authsize = crypto_aead_authsize(crypto_aead_reqtfm(req));
+ unsigned long irq_flags;
+ unsigned int processed = 0, to_process;
+ int rc = -1;
+
+ spin_lock_irqsave(&nx_ctx->lock, irq_flags);
+
+ rc = generate_pat(iv, req, nx_ctx, authsize, nbytes, assoclen,
+ csbcpb->cpb.aes_ccm.in_pat_or_b0);
+ if (rc)
+ goto out;
+
+ do {
+ /* to process: the AES_BLOCK_SIZE data chunk to process in this
+ * update. This value is bound by sg list limits.
+ */
+ to_process = nbytes - processed;
+
+ if ((to_process + processed) < nbytes)
+ NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
+ else
+ NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE;
+
+ NX_CPB_FDM(csbcpb) |= NX_FDM_ENDE_ENCRYPT;
+
+ rc = nx_build_sg_lists(nx_ctx, iv, req->dst, req->src,
+ &to_process, processed + req->assoclen,
+ csbcpb->cpb.aes_ccm.iv_or_ctr);
+ if (rc)
+ goto out;
+
+ rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
+ req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP);
+ if (rc)
+ goto out;
+
+ /* for partial completion, copy following for next
+ * entry into loop...
+ */
+ memcpy(iv, csbcpb->cpb.aes_ccm.out_ctr, AES_BLOCK_SIZE);
+ memcpy(csbcpb->cpb.aes_ccm.in_pat_or_b0,
+ csbcpb->cpb.aes_ccm.out_pat_or_mac, AES_BLOCK_SIZE);
+ memcpy(csbcpb->cpb.aes_ccm.in_s0,
+ csbcpb->cpb.aes_ccm.out_s0, AES_BLOCK_SIZE);
+
+ NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
+
+ /* update stats */
+ atomic_inc(&(nx_ctx->stats->aes_ops));
+ atomic64_add(csbcpb->csb.processed_byte_count,
+ &(nx_ctx->stats->aes_bytes));
+
+ processed += to_process;
+
+ } while (processed < nbytes);
+
+ /* copy out the auth tag */
+ scatterwalk_map_and_copy(csbcpb->cpb.aes_ccm.out_pat_or_mac,
+ req->dst, nbytes + req->assoclen, authsize,
+ SCATTERWALK_TO_SG);
+
+out:
+ spin_unlock_irqrestore(&nx_ctx->lock, irq_flags);
+ return rc;
+}
+
+static int ccm4309_aes_nx_encrypt(struct aead_request *req)
+{
+ struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
+ struct nx_gcm_rctx *rctx = aead_request_ctx(req);
+ u8 *iv = rctx->iv;
+
+ iv[0] = 3;
+ memcpy(iv + 1, nx_ctx->priv.ccm.nonce, 3);
+ memcpy(iv + 4, req->iv, 8);
+
+ return ccm_nx_encrypt(req, iv, req->assoclen - 8);
+}
+
+static int ccm_aes_nx_encrypt(struct aead_request *req)
+{
+ int rc;
+
+ rc = crypto_ccm_check_iv(req->iv);
+ if (rc)
+ return rc;
+
+ return ccm_nx_encrypt(req, req->iv, req->assoclen);
+}
+
+static int ccm4309_aes_nx_decrypt(struct aead_request *req)
+{
+ struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
+ struct nx_gcm_rctx *rctx = aead_request_ctx(req);
+ u8 *iv = rctx->iv;
+
+ iv[0] = 3;
+ memcpy(iv + 1, nx_ctx->priv.ccm.nonce, 3);
+ memcpy(iv + 4, req->iv, 8);
+
+ return ccm_nx_decrypt(req, iv, req->assoclen - 8);
+}
+
+static int ccm_aes_nx_decrypt(struct aead_request *req)
+{
+ int rc;
+
+ rc = crypto_ccm_check_iv(req->iv);
+ if (rc)
+ return rc;
+
+ return ccm_nx_decrypt(req, req->iv, req->assoclen);
+}
+
+struct aead_alg nx_ccm_aes_alg = {
+ .base = {
+ .cra_name = "ccm(aes)",
+ .cra_driver_name = "ccm-aes-nx",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_NEED_FALLBACK,
+ .cra_blocksize = 1,
+ .cra_ctxsize = sizeof(struct nx_crypto_ctx),
+ .cra_module = THIS_MODULE,
+ },
+ .init = nx_crypto_ctx_aes_ccm_init,
+ .exit = nx_crypto_ctx_aead_exit,
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = AES_BLOCK_SIZE,
+ .setkey = ccm_aes_nx_set_key,
+ .setauthsize = ccm_aes_nx_setauthsize,
+ .encrypt = ccm_aes_nx_encrypt,
+ .decrypt = ccm_aes_nx_decrypt,
+};
+
+struct aead_alg nx_ccm4309_aes_alg = {
+ .base = {
+ .cra_name = "rfc4309(ccm(aes))",
+ .cra_driver_name = "rfc4309-ccm-aes-nx",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_NEED_FALLBACK,
+ .cra_blocksize = 1,
+ .cra_ctxsize = sizeof(struct nx_crypto_ctx),
+ .cra_module = THIS_MODULE,
+ },
+ .init = nx_crypto_ctx_aes_ccm_init,
+ .exit = nx_crypto_ctx_aead_exit,
+ .ivsize = 8,
+ .maxauthsize = AES_BLOCK_SIZE,
+ .setkey = ccm4309_aes_nx_set_key,
+ .setauthsize = ccm4309_aes_nx_setauthsize,
+ .encrypt = ccm4309_aes_nx_encrypt,
+ .decrypt = ccm4309_aes_nx_decrypt,
+};
diff --git a/drivers/crypto/nx/nx-aes-ctr.c b/drivers/crypto/nx/nx-aes-ctr.c
new file mode 100644
index 000000000..02ad26012
--- /dev/null
+++ b/drivers/crypto/nx/nx-aes-ctr.c
@@ -0,0 +1,145 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/**
+ * AES CTR routines supporting the Power 7+ Nest Accelerators driver
+ *
+ * Copyright (C) 2011-2012 International Business Machines Inc.
+ *
+ * Author: Kent Yoder <yoder1@us.ibm.com>
+ */
+
+#include <crypto/aes.h>
+#include <crypto/ctr.h>
+#include <crypto/algapi.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/crypto.h>
+#include <asm/vio.h>
+
+#include "nx_csbcpb.h"
+#include "nx.h"
+
+
+static int ctr_aes_nx_set_key(struct crypto_skcipher *tfm,
+ const u8 *in_key,
+ unsigned int key_len)
+{
+ struct nx_crypto_ctx *nx_ctx = crypto_skcipher_ctx(tfm);
+ struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
+
+ nx_ctx_init(nx_ctx, HCOP_FC_AES);
+
+ switch (key_len) {
+ case AES_KEYSIZE_128:
+ NX_CPB_SET_KEY_SIZE(csbcpb, NX_KS_AES_128);
+ nx_ctx->ap = &nx_ctx->props[NX_PROPS_AES_128];
+ break;
+ case AES_KEYSIZE_192:
+ NX_CPB_SET_KEY_SIZE(csbcpb, NX_KS_AES_192);
+ nx_ctx->ap = &nx_ctx->props[NX_PROPS_AES_192];
+ break;
+ case AES_KEYSIZE_256:
+ NX_CPB_SET_KEY_SIZE(csbcpb, NX_KS_AES_256);
+ nx_ctx->ap = &nx_ctx->props[NX_PROPS_AES_256];
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ csbcpb->cpb.hdr.mode = NX_MODE_AES_CTR;
+ memcpy(csbcpb->cpb.aes_ctr.key, in_key, key_len);
+
+ return 0;
+}
+
+static int ctr3686_aes_nx_set_key(struct crypto_skcipher *tfm,
+ const u8 *in_key,
+ unsigned int key_len)
+{
+ struct nx_crypto_ctx *nx_ctx = crypto_skcipher_ctx(tfm);
+
+ if (key_len < CTR_RFC3686_NONCE_SIZE)
+ return -EINVAL;
+
+ memcpy(nx_ctx->priv.ctr.nonce,
+ in_key + key_len - CTR_RFC3686_NONCE_SIZE,
+ CTR_RFC3686_NONCE_SIZE);
+
+ key_len -= CTR_RFC3686_NONCE_SIZE;
+
+ return ctr_aes_nx_set_key(tfm, in_key, key_len);
+}
+
+static int ctr_aes_nx_crypt(struct skcipher_request *req, u8 *iv)
+{
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct nx_crypto_ctx *nx_ctx = crypto_skcipher_ctx(tfm);
+ struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
+ unsigned long irq_flags;
+ unsigned int processed = 0, to_process;
+ int rc;
+
+ spin_lock_irqsave(&nx_ctx->lock, irq_flags);
+
+ do {
+ to_process = req->cryptlen - processed;
+
+ rc = nx_build_sg_lists(nx_ctx, iv, req->dst, req->src,
+ &to_process, processed,
+ csbcpb->cpb.aes_ctr.iv);
+ if (rc)
+ goto out;
+
+ if (!nx_ctx->op.inlen || !nx_ctx->op.outlen) {
+ rc = -EINVAL;
+ goto out;
+ }
+
+ rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
+ req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP);
+ if (rc)
+ goto out;
+
+ memcpy(iv, csbcpb->cpb.aes_cbc.cv, AES_BLOCK_SIZE);
+
+ atomic_inc(&(nx_ctx->stats->aes_ops));
+ atomic64_add(csbcpb->csb.processed_byte_count,
+ &(nx_ctx->stats->aes_bytes));
+
+ processed += to_process;
+ } while (processed < req->cryptlen);
+out:
+ spin_unlock_irqrestore(&nx_ctx->lock, irq_flags);
+ return rc;
+}
+
+static int ctr3686_aes_nx_crypt(struct skcipher_request *req)
+{
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct nx_crypto_ctx *nx_ctx = crypto_skcipher_ctx(tfm);
+ u8 iv[16];
+
+ memcpy(iv, nx_ctx->priv.ctr.nonce, CTR_RFC3686_NONCE_SIZE);
+ memcpy(iv + CTR_RFC3686_NONCE_SIZE, req->iv, CTR_RFC3686_IV_SIZE);
+ iv[12] = iv[13] = iv[14] = 0;
+ iv[15] = 1;
+
+ return ctr_aes_nx_crypt(req, iv);
+}
+
+struct skcipher_alg nx_ctr3686_aes_alg = {
+ .base.cra_name = "rfc3686(ctr(aes))",
+ .base.cra_driver_name = "rfc3686-ctr-aes-nx",
+ .base.cra_priority = 300,
+ .base.cra_blocksize = 1,
+ .base.cra_ctxsize = sizeof(struct nx_crypto_ctx),
+ .base.cra_module = THIS_MODULE,
+ .init = nx_crypto_ctx_aes_ctr_init,
+ .exit = nx_crypto_ctx_skcipher_exit,
+ .min_keysize = AES_MIN_KEY_SIZE + CTR_RFC3686_NONCE_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE + CTR_RFC3686_NONCE_SIZE,
+ .ivsize = CTR_RFC3686_IV_SIZE,
+ .setkey = ctr3686_aes_nx_set_key,
+ .encrypt = ctr3686_aes_nx_crypt,
+ .decrypt = ctr3686_aes_nx_crypt,
+ .chunksize = AES_BLOCK_SIZE,
+};
diff --git a/drivers/crypto/nx/nx-aes-ecb.c b/drivers/crypto/nx/nx-aes-ecb.c
new file mode 100644
index 000000000..77e338dc3
--- /dev/null
+++ b/drivers/crypto/nx/nx-aes-ecb.c
@@ -0,0 +1,125 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/**
+ * AES ECB routines supporting the Power 7+ Nest Accelerators driver
+ *
+ * Copyright (C) 2011-2012 International Business Machines Inc.
+ *
+ * Author: Kent Yoder <yoder1@us.ibm.com>
+ */
+
+#include <crypto/aes.h>
+#include <crypto/algapi.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/crypto.h>
+#include <asm/vio.h>
+
+#include "nx_csbcpb.h"
+#include "nx.h"
+
+
+static int ecb_aes_nx_set_key(struct crypto_skcipher *tfm,
+ const u8 *in_key,
+ unsigned int key_len)
+{
+ struct nx_crypto_ctx *nx_ctx = crypto_skcipher_ctx(tfm);
+ struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb;
+
+ nx_ctx_init(nx_ctx, HCOP_FC_AES);
+
+ switch (key_len) {
+ case AES_KEYSIZE_128:
+ NX_CPB_SET_KEY_SIZE(csbcpb, NX_KS_AES_128);
+ nx_ctx->ap = &nx_ctx->props[NX_PROPS_AES_128];
+ break;
+ case AES_KEYSIZE_192:
+ NX_CPB_SET_KEY_SIZE(csbcpb, NX_KS_AES_192);
+ nx_ctx->ap = &nx_ctx->props[NX_PROPS_AES_192];
+ break;
+ case AES_KEYSIZE_256:
+ NX_CPB_SET_KEY_SIZE(csbcpb, NX_KS_AES_256);
+ nx_ctx->ap = &nx_ctx->props[NX_PROPS_AES_256];
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ csbcpb->cpb.hdr.mode = NX_MODE_AES_ECB;
+ memcpy(csbcpb->cpb.aes_ecb.key, in_key, key_len);
+
+ return 0;
+}
+
+static int ecb_aes_nx_crypt(struct skcipher_request *req,
+ int enc)
+{
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct nx_crypto_ctx *nx_ctx = crypto_skcipher_ctx(tfm);
+ struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
+ unsigned long irq_flags;
+ unsigned int processed = 0, to_process;
+ int rc;
+
+ spin_lock_irqsave(&nx_ctx->lock, irq_flags);
+
+ if (enc)
+ NX_CPB_FDM(csbcpb) |= NX_FDM_ENDE_ENCRYPT;
+ else
+ NX_CPB_FDM(csbcpb) &= ~NX_FDM_ENDE_ENCRYPT;
+
+ do {
+ to_process = req->cryptlen - processed;
+
+ rc = nx_build_sg_lists(nx_ctx, NULL, req->dst, req->src,
+ &to_process, processed, NULL);
+ if (rc)
+ goto out;
+
+ if (!nx_ctx->op.inlen || !nx_ctx->op.outlen) {
+ rc = -EINVAL;
+ goto out;
+ }
+
+ rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
+ req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP);
+ if (rc)
+ goto out;
+
+ atomic_inc(&(nx_ctx->stats->aes_ops));
+ atomic64_add(csbcpb->csb.processed_byte_count,
+ &(nx_ctx->stats->aes_bytes));
+
+ processed += to_process;
+ } while (processed < req->cryptlen);
+
+out:
+ spin_unlock_irqrestore(&nx_ctx->lock, irq_flags);
+ return rc;
+}
+
+static int ecb_aes_nx_encrypt(struct skcipher_request *req)
+{
+ return ecb_aes_nx_crypt(req, 1);
+}
+
+static int ecb_aes_nx_decrypt(struct skcipher_request *req)
+{
+ return ecb_aes_nx_crypt(req, 0);
+}
+
+struct skcipher_alg nx_ecb_aes_alg = {
+ .base.cra_name = "ecb(aes)",
+ .base.cra_driver_name = "ecb-aes-nx",
+ .base.cra_priority = 300,
+ .base.cra_blocksize = AES_BLOCK_SIZE,
+ .base.cra_alignmask = 0xf,
+ .base.cra_ctxsize = sizeof(struct nx_crypto_ctx),
+ .base.cra_module = THIS_MODULE,
+ .init = nx_crypto_ctx_aes_ecb_init,
+ .exit = nx_crypto_ctx_skcipher_exit,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .setkey = ecb_aes_nx_set_key,
+ .encrypt = ecb_aes_nx_encrypt,
+ .decrypt = ecb_aes_nx_decrypt,
+};
diff --git a/drivers/crypto/nx/nx-aes-gcm.c b/drivers/crypto/nx/nx-aes-gcm.c
new file mode 100644
index 000000000..19c6ed5ba
--- /dev/null
+++ b/drivers/crypto/nx/nx-aes-gcm.c
@@ -0,0 +1,505 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/**
+ * AES GCM routines supporting the Power 7+ Nest Accelerators driver
+ *
+ * Copyright (C) 2012 International Business Machines Inc.
+ *
+ * Author: Kent Yoder <yoder1@us.ibm.com>
+ */
+
+#include <crypto/internal/aead.h>
+#include <crypto/aes.h>
+#include <crypto/algapi.h>
+#include <crypto/gcm.h>
+#include <crypto/scatterwalk.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <asm/vio.h>
+
+#include "nx_csbcpb.h"
+#include "nx.h"
+
+
+static int gcm_aes_nx_set_key(struct crypto_aead *tfm,
+ const u8 *in_key,
+ unsigned int key_len)
+{
+ struct nx_crypto_ctx *nx_ctx = crypto_aead_ctx(tfm);
+ struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
+ struct nx_csbcpb *csbcpb_aead = nx_ctx->csbcpb_aead;
+
+ nx_ctx_init(nx_ctx, HCOP_FC_AES);
+
+ switch (key_len) {
+ case AES_KEYSIZE_128:
+ NX_CPB_SET_KEY_SIZE(csbcpb, NX_KS_AES_128);
+ NX_CPB_SET_KEY_SIZE(csbcpb_aead, NX_KS_AES_128);
+ nx_ctx->ap = &nx_ctx->props[NX_PROPS_AES_128];
+ break;
+ case AES_KEYSIZE_192:
+ NX_CPB_SET_KEY_SIZE(csbcpb, NX_KS_AES_192);
+ NX_CPB_SET_KEY_SIZE(csbcpb_aead, NX_KS_AES_192);
+ nx_ctx->ap = &nx_ctx->props[NX_PROPS_AES_192];
+ break;
+ case AES_KEYSIZE_256:
+ NX_CPB_SET_KEY_SIZE(csbcpb, NX_KS_AES_256);
+ NX_CPB_SET_KEY_SIZE(csbcpb_aead, NX_KS_AES_256);
+ nx_ctx->ap = &nx_ctx->props[NX_PROPS_AES_256];
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ csbcpb->cpb.hdr.mode = NX_MODE_AES_GCM;
+ memcpy(csbcpb->cpb.aes_gcm.key, in_key, key_len);
+
+ csbcpb_aead->cpb.hdr.mode = NX_MODE_AES_GCA;
+ memcpy(csbcpb_aead->cpb.aes_gca.key, in_key, key_len);
+
+ return 0;
+}
+
+static int gcm4106_aes_nx_set_key(struct crypto_aead *tfm,
+ const u8 *in_key,
+ unsigned int key_len)
+{
+ struct nx_crypto_ctx *nx_ctx = crypto_aead_ctx(tfm);
+ char *nonce = nx_ctx->priv.gcm.nonce;
+ int rc;
+
+ if (key_len < 4)
+ return -EINVAL;
+
+ key_len -= 4;
+
+ rc = gcm_aes_nx_set_key(tfm, in_key, key_len);
+ if (rc)
+ goto out;
+
+ memcpy(nonce, in_key + key_len, 4);
+out:
+ return rc;
+}
+
+static int gcm4106_aes_nx_setauthsize(struct crypto_aead *tfm,
+ unsigned int authsize)
+{
+ switch (authsize) {
+ case 8:
+ case 12:
+ case 16:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int nx_gca(struct nx_crypto_ctx *nx_ctx,
+ struct aead_request *req,
+ u8 *out,
+ unsigned int assoclen)
+{
+ int rc;
+ struct nx_csbcpb *csbcpb_aead = nx_ctx->csbcpb_aead;
+ struct scatter_walk walk;
+ struct nx_sg *nx_sg = nx_ctx->in_sg;
+ unsigned int nbytes = assoclen;
+ unsigned int processed = 0, to_process;
+ unsigned int max_sg_len;
+
+ if (nbytes <= AES_BLOCK_SIZE) {
+ scatterwalk_start(&walk, req->src);
+ scatterwalk_copychunks(out, &walk, nbytes, SCATTERWALK_FROM_SG);
+ scatterwalk_done(&walk, SCATTERWALK_FROM_SG, 0);
+ return 0;
+ }
+
+ NX_CPB_FDM(csbcpb_aead) &= ~NX_FDM_CONTINUATION;
+
+ /* page_limit: number of sg entries that fit on one page */
+ max_sg_len = min_t(u64, nx_driver.of.max_sg_len/sizeof(struct nx_sg),
+ nx_ctx->ap->sglen);
+ max_sg_len = min_t(u64, max_sg_len,
+ nx_ctx->ap->databytelen/NX_PAGE_SIZE);
+
+ do {
+ /*
+ * to_process: the data chunk to process in this update.
+ * This value is bound by sg list limits.
+ */
+ to_process = min_t(u64, nbytes - processed,
+ nx_ctx->ap->databytelen);
+ to_process = min_t(u64, to_process,
+ NX_PAGE_SIZE * (max_sg_len - 1));
+
+ nx_sg = nx_walk_and_build(nx_ctx->in_sg, max_sg_len,
+ req->src, processed, &to_process);
+
+ if ((to_process + processed) < nbytes)
+ NX_CPB_FDM(csbcpb_aead) |= NX_FDM_INTERMEDIATE;
+ else
+ NX_CPB_FDM(csbcpb_aead) &= ~NX_FDM_INTERMEDIATE;
+
+ nx_ctx->op_aead.inlen = (nx_ctx->in_sg - nx_sg)
+ * sizeof(struct nx_sg);
+
+ rc = nx_hcall_sync(nx_ctx, &nx_ctx->op_aead,
+ req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP);
+ if (rc)
+ return rc;
+
+ memcpy(csbcpb_aead->cpb.aes_gca.in_pat,
+ csbcpb_aead->cpb.aes_gca.out_pat,
+ AES_BLOCK_SIZE);
+ NX_CPB_FDM(csbcpb_aead) |= NX_FDM_CONTINUATION;
+
+ atomic_inc(&(nx_ctx->stats->aes_ops));
+ atomic64_add(assoclen, &(nx_ctx->stats->aes_bytes));
+
+ processed += to_process;
+ } while (processed < nbytes);
+
+ memcpy(out, csbcpb_aead->cpb.aes_gca.out_pat, AES_BLOCK_SIZE);
+
+ return rc;
+}
+
+static int gmac(struct aead_request *req, const u8 *iv, unsigned int assoclen)
+{
+ int rc;
+ struct nx_crypto_ctx *nx_ctx =
+ crypto_aead_ctx(crypto_aead_reqtfm(req));
+ struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
+ struct nx_sg *nx_sg;
+ unsigned int nbytes = assoclen;
+ unsigned int processed = 0, to_process;
+ unsigned int max_sg_len;
+
+ /* Set GMAC mode */
+ csbcpb->cpb.hdr.mode = NX_MODE_AES_GMAC;
+
+ NX_CPB_FDM(csbcpb) &= ~NX_FDM_CONTINUATION;
+
+ /* page_limit: number of sg entries that fit on one page */
+ max_sg_len = min_t(u64, nx_driver.of.max_sg_len/sizeof(struct nx_sg),
+ nx_ctx->ap->sglen);
+ max_sg_len = min_t(u64, max_sg_len,
+ nx_ctx->ap->databytelen/NX_PAGE_SIZE);
+
+ /* Copy IV */
+ memcpy(csbcpb->cpb.aes_gcm.iv_or_cnt, iv, AES_BLOCK_SIZE);
+
+ do {
+ /*
+ * to_process: the data chunk to process in this update.
+ * This value is bound by sg list limits.
+ */
+ to_process = min_t(u64, nbytes - processed,
+ nx_ctx->ap->databytelen);
+ to_process = min_t(u64, to_process,
+ NX_PAGE_SIZE * (max_sg_len - 1));
+
+ nx_sg = nx_walk_and_build(nx_ctx->in_sg, max_sg_len,
+ req->src, processed, &to_process);
+
+ if ((to_process + processed) < nbytes)
+ NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
+ else
+ NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE;
+
+ nx_ctx->op.inlen = (nx_ctx->in_sg - nx_sg)
+ * sizeof(struct nx_sg);
+
+ csbcpb->cpb.aes_gcm.bit_length_data = 0;
+ csbcpb->cpb.aes_gcm.bit_length_aad = 8 * nbytes;
+
+ rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
+ req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP);
+ if (rc)
+ goto out;
+
+ memcpy(csbcpb->cpb.aes_gcm.in_pat_or_aad,
+ csbcpb->cpb.aes_gcm.out_pat_or_mac, AES_BLOCK_SIZE);
+ memcpy(csbcpb->cpb.aes_gcm.in_s0,
+ csbcpb->cpb.aes_gcm.out_s0, AES_BLOCK_SIZE);
+
+ NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
+
+ atomic_inc(&(nx_ctx->stats->aes_ops));
+ atomic64_add(assoclen, &(nx_ctx->stats->aes_bytes));
+
+ processed += to_process;
+ } while (processed < nbytes);
+
+out:
+ /* Restore GCM mode */
+ csbcpb->cpb.hdr.mode = NX_MODE_AES_GCM;
+ return rc;
+}
+
+static int gcm_empty(struct aead_request *req, const u8 *iv, int enc)
+{
+ int rc;
+ struct nx_crypto_ctx *nx_ctx =
+ crypto_aead_ctx(crypto_aead_reqtfm(req));
+ struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
+ char out[AES_BLOCK_SIZE];
+ struct nx_sg *in_sg, *out_sg;
+ int len;
+
+ /* For scenarios where the input message is zero length, AES CTR mode
+ * may be used. Set the source data to be a single block (16B) of all
+ * zeros, and set the input IV value to be the same as the GMAC IV
+ * value. - nx_wb 4.8.1.3 */
+
+ /* Change to ECB mode */
+ csbcpb->cpb.hdr.mode = NX_MODE_AES_ECB;
+ memcpy(csbcpb->cpb.aes_ecb.key, csbcpb->cpb.aes_gcm.key,
+ sizeof(csbcpb->cpb.aes_ecb.key));
+ if (enc)
+ NX_CPB_FDM(csbcpb) |= NX_FDM_ENDE_ENCRYPT;
+ else
+ NX_CPB_FDM(csbcpb) &= ~NX_FDM_ENDE_ENCRYPT;
+
+ len = AES_BLOCK_SIZE;
+
+ /* Encrypt the counter/IV */
+ in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *) iv,
+ &len, nx_ctx->ap->sglen);
+
+ if (len != AES_BLOCK_SIZE)
+ return -EINVAL;
+
+ len = sizeof(out);
+ out_sg = nx_build_sg_list(nx_ctx->out_sg, (u8 *) out, &len,
+ nx_ctx->ap->sglen);
+
+ if (len != sizeof(out))
+ return -EINVAL;
+
+ nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg);
+ nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg);
+
+ rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
+ req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP);
+ if (rc)
+ goto out;
+ atomic_inc(&(nx_ctx->stats->aes_ops));
+
+ /* Copy out the auth tag */
+ memcpy(csbcpb->cpb.aes_gcm.out_pat_or_mac, out,
+ crypto_aead_authsize(crypto_aead_reqtfm(req)));
+out:
+ /* Restore XCBC mode */
+ csbcpb->cpb.hdr.mode = NX_MODE_AES_GCM;
+
+ /*
+ * ECB key uses the same region that GCM AAD and counter, so it's safe
+ * to just fill it with zeroes.
+ */
+ memset(csbcpb->cpb.aes_ecb.key, 0, sizeof(csbcpb->cpb.aes_ecb.key));
+
+ return rc;
+}
+
+static int gcm_aes_nx_crypt(struct aead_request *req, int enc,
+ unsigned int assoclen)
+{
+ struct nx_crypto_ctx *nx_ctx =
+ crypto_aead_ctx(crypto_aead_reqtfm(req));
+ struct nx_gcm_rctx *rctx = aead_request_ctx(req);
+ struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
+ unsigned int nbytes = req->cryptlen;
+ unsigned int processed = 0, to_process;
+ unsigned long irq_flags;
+ int rc = -EINVAL;
+
+ spin_lock_irqsave(&nx_ctx->lock, irq_flags);
+
+ /* initialize the counter */
+ *(u32 *)&rctx->iv[NX_GCM_CTR_OFFSET] = 1;
+
+ if (nbytes == 0) {
+ if (assoclen == 0)
+ rc = gcm_empty(req, rctx->iv, enc);
+ else
+ rc = gmac(req, rctx->iv, assoclen);
+ if (rc)
+ goto out;
+ else
+ goto mac;
+ }
+
+ /* Process associated data */
+ csbcpb->cpb.aes_gcm.bit_length_aad = assoclen * 8;
+ if (assoclen) {
+ rc = nx_gca(nx_ctx, req, csbcpb->cpb.aes_gcm.in_pat_or_aad,
+ assoclen);
+ if (rc)
+ goto out;
+ }
+
+ /* Set flags for encryption */
+ NX_CPB_FDM(csbcpb) &= ~NX_FDM_CONTINUATION;
+ if (enc) {
+ NX_CPB_FDM(csbcpb) |= NX_FDM_ENDE_ENCRYPT;
+ } else {
+ NX_CPB_FDM(csbcpb) &= ~NX_FDM_ENDE_ENCRYPT;
+ nbytes -= crypto_aead_authsize(crypto_aead_reqtfm(req));
+ }
+
+ do {
+ to_process = nbytes - processed;
+
+ csbcpb->cpb.aes_gcm.bit_length_data = nbytes * 8;
+ rc = nx_build_sg_lists(nx_ctx, rctx->iv, req->dst,
+ req->src, &to_process,
+ processed + req->assoclen,
+ csbcpb->cpb.aes_gcm.iv_or_cnt);
+
+ if (rc)
+ goto out;
+
+ if ((to_process + processed) < nbytes)
+ NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
+ else
+ NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE;
+
+
+ rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
+ req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP);
+ if (rc)
+ goto out;
+
+ memcpy(rctx->iv, csbcpb->cpb.aes_gcm.out_cnt, AES_BLOCK_SIZE);
+ memcpy(csbcpb->cpb.aes_gcm.in_pat_or_aad,
+ csbcpb->cpb.aes_gcm.out_pat_or_mac, AES_BLOCK_SIZE);
+ memcpy(csbcpb->cpb.aes_gcm.in_s0,
+ csbcpb->cpb.aes_gcm.out_s0, AES_BLOCK_SIZE);
+
+ NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
+
+ atomic_inc(&(nx_ctx->stats->aes_ops));
+ atomic64_add(csbcpb->csb.processed_byte_count,
+ &(nx_ctx->stats->aes_bytes));
+
+ processed += to_process;
+ } while (processed < nbytes);
+
+mac:
+ if (enc) {
+ /* copy out the auth tag */
+ scatterwalk_map_and_copy(
+ csbcpb->cpb.aes_gcm.out_pat_or_mac,
+ req->dst, req->assoclen + nbytes,
+ crypto_aead_authsize(crypto_aead_reqtfm(req)),
+ SCATTERWALK_TO_SG);
+ } else {
+ u8 *itag = nx_ctx->priv.gcm.iauth_tag;
+ u8 *otag = csbcpb->cpb.aes_gcm.out_pat_or_mac;
+
+ scatterwalk_map_and_copy(
+ itag, req->src, req->assoclen + nbytes,
+ crypto_aead_authsize(crypto_aead_reqtfm(req)),
+ SCATTERWALK_FROM_SG);
+ rc = crypto_memneq(itag, otag,
+ crypto_aead_authsize(crypto_aead_reqtfm(req))) ?
+ -EBADMSG : 0;
+ }
+out:
+ spin_unlock_irqrestore(&nx_ctx->lock, irq_flags);
+ return rc;
+}
+
+static int gcm_aes_nx_encrypt(struct aead_request *req)
+{
+ struct nx_gcm_rctx *rctx = aead_request_ctx(req);
+ char *iv = rctx->iv;
+
+ memcpy(iv, req->iv, GCM_AES_IV_SIZE);
+
+ return gcm_aes_nx_crypt(req, 1, req->assoclen);
+}
+
+static int gcm_aes_nx_decrypt(struct aead_request *req)
+{
+ struct nx_gcm_rctx *rctx = aead_request_ctx(req);
+ char *iv = rctx->iv;
+
+ memcpy(iv, req->iv, GCM_AES_IV_SIZE);
+
+ return gcm_aes_nx_crypt(req, 0, req->assoclen);
+}
+
+static int gcm4106_aes_nx_encrypt(struct aead_request *req)
+{
+ struct nx_crypto_ctx *nx_ctx =
+ crypto_aead_ctx(crypto_aead_reqtfm(req));
+ struct nx_gcm_rctx *rctx = aead_request_ctx(req);
+ char *iv = rctx->iv;
+ char *nonce = nx_ctx->priv.gcm.nonce;
+
+ memcpy(iv, nonce, NX_GCM4106_NONCE_LEN);
+ memcpy(iv + NX_GCM4106_NONCE_LEN, req->iv, 8);
+
+ if (req->assoclen < 8)
+ return -EINVAL;
+
+ return gcm_aes_nx_crypt(req, 1, req->assoclen - 8);
+}
+
+static int gcm4106_aes_nx_decrypt(struct aead_request *req)
+{
+ struct nx_crypto_ctx *nx_ctx =
+ crypto_aead_ctx(crypto_aead_reqtfm(req));
+ struct nx_gcm_rctx *rctx = aead_request_ctx(req);
+ char *iv = rctx->iv;
+ char *nonce = nx_ctx->priv.gcm.nonce;
+
+ memcpy(iv, nonce, NX_GCM4106_NONCE_LEN);
+ memcpy(iv + NX_GCM4106_NONCE_LEN, req->iv, 8);
+
+ if (req->assoclen < 8)
+ return -EINVAL;
+
+ return gcm_aes_nx_crypt(req, 0, req->assoclen - 8);
+}
+
+struct aead_alg nx_gcm_aes_alg = {
+ .base = {
+ .cra_name = "gcm(aes)",
+ .cra_driver_name = "gcm-aes-nx",
+ .cra_priority = 300,
+ .cra_blocksize = 1,
+ .cra_ctxsize = sizeof(struct nx_crypto_ctx),
+ .cra_module = THIS_MODULE,
+ },
+ .init = nx_crypto_ctx_aes_gcm_init,
+ .exit = nx_crypto_ctx_aead_exit,
+ .ivsize = GCM_AES_IV_SIZE,
+ .maxauthsize = AES_BLOCK_SIZE,
+ .setkey = gcm_aes_nx_set_key,
+ .encrypt = gcm_aes_nx_encrypt,
+ .decrypt = gcm_aes_nx_decrypt,
+};
+
+struct aead_alg nx_gcm4106_aes_alg = {
+ .base = {
+ .cra_name = "rfc4106(gcm(aes))",
+ .cra_driver_name = "rfc4106-gcm-aes-nx",
+ .cra_priority = 300,
+ .cra_blocksize = 1,
+ .cra_ctxsize = sizeof(struct nx_crypto_ctx),
+ .cra_module = THIS_MODULE,
+ },
+ .init = nx_crypto_ctx_aes_gcm_init,
+ .exit = nx_crypto_ctx_aead_exit,
+ .ivsize = GCM_RFC4106_IV_SIZE,
+ .maxauthsize = AES_BLOCK_SIZE,
+ .setkey = gcm4106_aes_nx_set_key,
+ .setauthsize = gcm4106_aes_nx_setauthsize,
+ .encrypt = gcm4106_aes_nx_encrypt,
+ .decrypt = gcm4106_aes_nx_decrypt,
+};
diff --git a/drivers/crypto/nx/nx-aes-xcbc.c b/drivers/crypto/nx/nx-aes-xcbc.c
new file mode 100644
index 000000000..48dc1c98c
--- /dev/null
+++ b/drivers/crypto/nx/nx-aes-xcbc.c
@@ -0,0 +1,379 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/**
+ * AES XCBC routines supporting the Power 7+ Nest Accelerators driver
+ *
+ * Copyright (C) 2011-2012 International Business Machines Inc.
+ *
+ * Author: Kent Yoder <yoder1@us.ibm.com>
+ */
+
+#include <crypto/internal/hash.h>
+#include <crypto/aes.h>
+#include <crypto/algapi.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/crypto.h>
+#include <asm/vio.h>
+
+#include "nx_csbcpb.h"
+#include "nx.h"
+
+
+struct xcbc_state {
+ u8 state[AES_BLOCK_SIZE];
+ unsigned int count;
+ u8 buffer[AES_BLOCK_SIZE];
+};
+
+static int nx_xcbc_set_key(struct crypto_shash *desc,
+ const u8 *in_key,
+ unsigned int key_len)
+{
+ struct nx_crypto_ctx *nx_ctx = crypto_shash_ctx(desc);
+ struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
+
+ switch (key_len) {
+ case AES_KEYSIZE_128:
+ nx_ctx->ap = &nx_ctx->props[NX_PROPS_AES_128];
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ memcpy(csbcpb->cpb.aes_xcbc.key, in_key, key_len);
+
+ return 0;
+}
+
+/*
+ * Based on RFC 3566, for a zero-length message:
+ *
+ * n = 1
+ * K1 = E(K, 0x01010101010101010101010101010101)
+ * K3 = E(K, 0x03030303030303030303030303030303)
+ * E[0] = 0x00000000000000000000000000000000
+ * M[1] = 0x80000000000000000000000000000000 (0 length message with padding)
+ * E[1] = (K1, M[1] ^ E[0] ^ K3)
+ * Tag = M[1]
+ */
+static int nx_xcbc_empty(struct shash_desc *desc, u8 *out)
+{
+ struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
+ struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
+ struct nx_sg *in_sg, *out_sg;
+ u8 keys[2][AES_BLOCK_SIZE];
+ u8 key[32];
+ int rc = 0;
+ int len;
+
+ /* Change to ECB mode */
+ csbcpb->cpb.hdr.mode = NX_MODE_AES_ECB;
+ memcpy(key, csbcpb->cpb.aes_xcbc.key, AES_BLOCK_SIZE);
+ memcpy(csbcpb->cpb.aes_ecb.key, key, AES_BLOCK_SIZE);
+ NX_CPB_FDM(csbcpb) |= NX_FDM_ENDE_ENCRYPT;
+
+ /* K1 and K3 base patterns */
+ memset(keys[0], 0x01, sizeof(keys[0]));
+ memset(keys[1], 0x03, sizeof(keys[1]));
+
+ len = sizeof(keys);
+ /* Generate K1 and K3 encrypting the patterns */
+ in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *) keys, &len,
+ nx_ctx->ap->sglen);
+
+ if (len != sizeof(keys))
+ return -EINVAL;
+
+ out_sg = nx_build_sg_list(nx_ctx->out_sg, (u8 *) keys, &len,
+ nx_ctx->ap->sglen);
+
+ if (len != sizeof(keys))
+ return -EINVAL;
+
+ nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg);
+ nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg);
+
+ rc = nx_hcall_sync(nx_ctx, &nx_ctx->op, 0);
+ if (rc)
+ goto out;
+ atomic_inc(&(nx_ctx->stats->aes_ops));
+
+ /* XOr K3 with the padding for a 0 length message */
+ keys[1][0] ^= 0x80;
+
+ len = sizeof(keys[1]);
+
+ /* Encrypt the final result */
+ memcpy(csbcpb->cpb.aes_ecb.key, keys[0], AES_BLOCK_SIZE);
+ in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *) keys[1], &len,
+ nx_ctx->ap->sglen);
+
+ if (len != sizeof(keys[1]))
+ return -EINVAL;
+
+ len = AES_BLOCK_SIZE;
+ out_sg = nx_build_sg_list(nx_ctx->out_sg, out, &len,
+ nx_ctx->ap->sglen);
+
+ if (len != AES_BLOCK_SIZE)
+ return -EINVAL;
+
+ nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg);
+ nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg);
+
+ rc = nx_hcall_sync(nx_ctx, &nx_ctx->op, 0);
+ if (rc)
+ goto out;
+ atomic_inc(&(nx_ctx->stats->aes_ops));
+
+out:
+ /* Restore XCBC mode */
+ csbcpb->cpb.hdr.mode = NX_MODE_AES_XCBC_MAC;
+ memcpy(csbcpb->cpb.aes_xcbc.key, key, AES_BLOCK_SIZE);
+ NX_CPB_FDM(csbcpb) &= ~NX_FDM_ENDE_ENCRYPT;
+
+ return rc;
+}
+
+static int nx_crypto_ctx_aes_xcbc_init2(struct crypto_tfm *tfm)
+{
+ struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(tfm);
+ struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
+ int err;
+
+ err = nx_crypto_ctx_aes_xcbc_init(tfm);
+ if (err)
+ return err;
+
+ nx_ctx_init(nx_ctx, HCOP_FC_AES);
+
+ NX_CPB_SET_KEY_SIZE(csbcpb, NX_KS_AES_128);
+ csbcpb->cpb.hdr.mode = NX_MODE_AES_XCBC_MAC;
+
+ return 0;
+}
+
+static int nx_xcbc_init(struct shash_desc *desc)
+{
+ struct xcbc_state *sctx = shash_desc_ctx(desc);
+
+ memset(sctx, 0, sizeof *sctx);
+
+ return 0;
+}
+
+static int nx_xcbc_update(struct shash_desc *desc,
+ const u8 *data,
+ unsigned int len)
+{
+ struct xcbc_state *sctx = shash_desc_ctx(desc);
+ struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
+ struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
+ struct nx_sg *in_sg;
+ struct nx_sg *out_sg;
+ u32 to_process = 0, leftover, total;
+ unsigned int max_sg_len;
+ unsigned long irq_flags;
+ int rc = 0;
+ int data_len;
+
+ spin_lock_irqsave(&nx_ctx->lock, irq_flags);
+
+
+ total = sctx->count + len;
+
+ /* 2 cases for total data len:
+ * 1: <= AES_BLOCK_SIZE: copy into state, return 0
+ * 2: > AES_BLOCK_SIZE: process X blocks, copy in leftover
+ */
+ if (total <= AES_BLOCK_SIZE) {
+ memcpy(sctx->buffer + sctx->count, data, len);
+ sctx->count += len;
+ goto out;
+ }
+
+ in_sg = nx_ctx->in_sg;
+ max_sg_len = min_t(u64, nx_driver.of.max_sg_len/sizeof(struct nx_sg),
+ nx_ctx->ap->sglen);
+ max_sg_len = min_t(u64, max_sg_len,
+ nx_ctx->ap->databytelen/NX_PAGE_SIZE);
+
+ data_len = AES_BLOCK_SIZE;
+ out_sg = nx_build_sg_list(nx_ctx->out_sg, (u8 *)sctx->state,
+ &len, nx_ctx->ap->sglen);
+
+ if (data_len != AES_BLOCK_SIZE) {
+ rc = -EINVAL;
+ goto out;
+ }
+
+ nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg);
+
+ do {
+ to_process = total - to_process;
+ to_process = to_process & ~(AES_BLOCK_SIZE - 1);
+
+ leftover = total - to_process;
+
+ /* the hardware will not accept a 0 byte operation for this
+ * algorithm and the operation MUST be finalized to be correct.
+ * So if we happen to get an update that falls on a block sized
+ * boundary, we must save off the last block to finalize with
+ * later. */
+ if (!leftover) {
+ to_process -= AES_BLOCK_SIZE;
+ leftover = AES_BLOCK_SIZE;
+ }
+
+ if (sctx->count) {
+ data_len = sctx->count;
+ in_sg = nx_build_sg_list(nx_ctx->in_sg,
+ (u8 *) sctx->buffer,
+ &data_len,
+ max_sg_len);
+ if (data_len != sctx->count) {
+ rc = -EINVAL;
+ goto out;
+ }
+ }
+
+ data_len = to_process - sctx->count;
+ in_sg = nx_build_sg_list(in_sg,
+ (u8 *) data,
+ &data_len,
+ max_sg_len);
+
+ if (data_len != to_process - sctx->count) {
+ rc = -EINVAL;
+ goto out;
+ }
+
+ nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) *
+ sizeof(struct nx_sg);
+
+ /* we've hit the nx chip previously and we're updating again,
+ * so copy over the partial digest */
+ if (NX_CPB_FDM(csbcpb) & NX_FDM_CONTINUATION) {
+ memcpy(csbcpb->cpb.aes_xcbc.cv,
+ csbcpb->cpb.aes_xcbc.out_cv_mac,
+ AES_BLOCK_SIZE);
+ }
+
+ NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
+ if (!nx_ctx->op.inlen || !nx_ctx->op.outlen) {
+ rc = -EINVAL;
+ goto out;
+ }
+
+ rc = nx_hcall_sync(nx_ctx, &nx_ctx->op, 0);
+ if (rc)
+ goto out;
+
+ atomic_inc(&(nx_ctx->stats->aes_ops));
+
+ /* everything after the first update is continuation */
+ NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
+
+ total -= to_process;
+ data += to_process - sctx->count;
+ sctx->count = 0;
+ in_sg = nx_ctx->in_sg;
+ } while (leftover > AES_BLOCK_SIZE);
+
+ /* copy the leftover back into the state struct */
+ memcpy(sctx->buffer, data, leftover);
+ sctx->count = leftover;
+
+out:
+ spin_unlock_irqrestore(&nx_ctx->lock, irq_flags);
+ return rc;
+}
+
+static int nx_xcbc_final(struct shash_desc *desc, u8 *out)
+{
+ struct xcbc_state *sctx = shash_desc_ctx(desc);
+ struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
+ struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
+ struct nx_sg *in_sg, *out_sg;
+ unsigned long irq_flags;
+ int rc = 0;
+ int len;
+
+ spin_lock_irqsave(&nx_ctx->lock, irq_flags);
+
+ if (NX_CPB_FDM(csbcpb) & NX_FDM_CONTINUATION) {
+ /* we've hit the nx chip previously, now we're finalizing,
+ * so copy over the partial digest */
+ memcpy(csbcpb->cpb.aes_xcbc.cv,
+ csbcpb->cpb.aes_xcbc.out_cv_mac, AES_BLOCK_SIZE);
+ } else if (sctx->count == 0) {
+ /*
+ * we've never seen an update, so this is a 0 byte op. The
+ * hardware cannot handle a 0 byte op, so just ECB to
+ * generate the hash.
+ */
+ rc = nx_xcbc_empty(desc, out);
+ goto out;
+ }
+
+ /* final is represented by continuing the operation and indicating that
+ * this is not an intermediate operation */
+ NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE;
+
+ len = sctx->count;
+ in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *)sctx->buffer,
+ &len, nx_ctx->ap->sglen);
+
+ if (len != sctx->count) {
+ rc = -EINVAL;
+ goto out;
+ }
+
+ len = AES_BLOCK_SIZE;
+ out_sg = nx_build_sg_list(nx_ctx->out_sg, out, &len,
+ nx_ctx->ap->sglen);
+
+ if (len != AES_BLOCK_SIZE) {
+ rc = -EINVAL;
+ goto out;
+ }
+
+ nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg);
+ nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg);
+
+ if (!nx_ctx->op.outlen) {
+ rc = -EINVAL;
+ goto out;
+ }
+
+ rc = nx_hcall_sync(nx_ctx, &nx_ctx->op, 0);
+ if (rc)
+ goto out;
+
+ atomic_inc(&(nx_ctx->stats->aes_ops));
+
+ memcpy(out, csbcpb->cpb.aes_xcbc.out_cv_mac, AES_BLOCK_SIZE);
+out:
+ spin_unlock_irqrestore(&nx_ctx->lock, irq_flags);
+ return rc;
+}
+
+struct shash_alg nx_shash_aes_xcbc_alg = {
+ .digestsize = AES_BLOCK_SIZE,
+ .init = nx_xcbc_init,
+ .update = nx_xcbc_update,
+ .final = nx_xcbc_final,
+ .setkey = nx_xcbc_set_key,
+ .descsize = sizeof(struct xcbc_state),
+ .statesize = sizeof(struct xcbc_state),
+ .base = {
+ .cra_name = "xcbc(aes)",
+ .cra_driver_name = "xcbc-aes-nx",
+ .cra_priority = 300,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_module = THIS_MODULE,
+ .cra_ctxsize = sizeof(struct nx_crypto_ctx),
+ .cra_init = nx_crypto_ctx_aes_xcbc_init2,
+ .cra_exit = nx_crypto_ctx_exit,
+ }
+};
diff --git a/drivers/crypto/nx/nx-common-powernv.c b/drivers/crypto/nx/nx-common-powernv.c
new file mode 100644
index 000000000..8a4f10bb3
--- /dev/null
+++ b/drivers/crypto/nx/nx-common-powernv.c
@@ -0,0 +1,1136 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Driver for IBM PowerNV compression accelerator
+ *
+ * Copyright (C) 2015 Dan Streetman, IBM Corp
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include "nx-842.h"
+
+#include <linux/timer.h>
+
+#include <asm/prom.h>
+#include <asm/icswx.h>
+#include <asm/vas.h>
+#include <asm/reg.h>
+#include <asm/opal-api.h>
+#include <asm/opal.h>
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Dan Streetman <ddstreet@ieee.org>");
+MODULE_DESCRIPTION("H/W Compression driver for IBM PowerNV processors");
+MODULE_ALIAS_CRYPTO("842");
+MODULE_ALIAS_CRYPTO("842-nx");
+
+#define WORKMEM_ALIGN (CRB_ALIGN)
+#define CSB_WAIT_MAX (5000) /* ms */
+#define VAS_RETRIES (10)
+
+struct nx842_workmem {
+ /* Below fields must be properly aligned */
+ struct coprocessor_request_block crb; /* CRB_ALIGN align */
+ struct data_descriptor_entry ddl_in[DDL_LEN_MAX]; /* DDE_ALIGN align */
+ struct data_descriptor_entry ddl_out[DDL_LEN_MAX]; /* DDE_ALIGN align */
+ /* Above fields must be properly aligned */
+
+ ktime_t start;
+
+ char padding[WORKMEM_ALIGN]; /* unused, to allow alignment */
+} __packed __aligned(WORKMEM_ALIGN);
+
+struct nx_coproc {
+ unsigned int chip_id;
+ unsigned int ct; /* Can be 842 or GZIP high/normal*/
+ unsigned int ci; /* Coprocessor instance, used with icswx */
+ struct {
+ struct vas_window *rxwin;
+ int id;
+ } vas;
+ struct list_head list;
+};
+
+/*
+ * Send the request to NX engine on the chip for the corresponding CPU
+ * where the process is executing. Use with VAS function.
+ */
+static DEFINE_PER_CPU(struct vas_window *, cpu_txwin);
+
+/* no cpu hotplug on powernv, so this list never changes after init */
+static LIST_HEAD(nx_coprocs);
+static unsigned int nx842_ct; /* used in icswx function */
+
+/*
+ * Using same values as in skiboot or coprocessor type representing
+ * in NX workbook.
+ */
+#define NX_CT_GZIP (2) /* on P9 and later */
+#define NX_CT_842 (3)
+
+static int (*nx842_powernv_exec)(const unsigned char *in,
+ unsigned int inlen, unsigned char *out,
+ unsigned int *outlenp, void *workmem, int fc);
+
+/**
+ * setup_indirect_dde - Setup an indirect DDE
+ *
+ * The DDE is setup with the the DDE count, byte count, and address of
+ * first direct DDE in the list.
+ */
+static void setup_indirect_dde(struct data_descriptor_entry *dde,
+ struct data_descriptor_entry *ddl,
+ unsigned int dde_count, unsigned int byte_count)
+{
+ dde->flags = 0;
+ dde->count = dde_count;
+ dde->index = 0;
+ dde->length = cpu_to_be32(byte_count);
+ dde->address = cpu_to_be64(nx842_get_pa(ddl));
+}
+
+/**
+ * setup_direct_dde - Setup single DDE from buffer
+ *
+ * The DDE is setup with the buffer and length. The buffer must be properly
+ * aligned. The used length is returned.
+ * Returns:
+ * N Successfully set up DDE with N bytes
+ */
+static unsigned int setup_direct_dde(struct data_descriptor_entry *dde,
+ unsigned long pa, unsigned int len)
+{
+ unsigned int l = min_t(unsigned int, len, LEN_ON_PAGE(pa));
+
+ dde->flags = 0;
+ dde->count = 0;
+ dde->index = 0;
+ dde->length = cpu_to_be32(l);
+ dde->address = cpu_to_be64(pa);
+
+ return l;
+}
+
+/**
+ * setup_ddl - Setup DDL from buffer
+ *
+ * Returns:
+ * 0 Successfully set up DDL
+ */
+static int setup_ddl(struct data_descriptor_entry *dde,
+ struct data_descriptor_entry *ddl,
+ unsigned char *buf, unsigned int len,
+ bool in)
+{
+ unsigned long pa = nx842_get_pa(buf);
+ int i, ret, total_len = len;
+
+ if (!IS_ALIGNED(pa, DDE_BUFFER_ALIGN)) {
+ pr_debug("%s buffer pa 0x%lx not 0x%x-byte aligned\n",
+ in ? "input" : "output", pa, DDE_BUFFER_ALIGN);
+ return -EINVAL;
+ }
+
+ /* only need to check last mult; since buffer must be
+ * DDE_BUFFER_ALIGN aligned, and that is a multiple of
+ * DDE_BUFFER_SIZE_MULT, and pre-last page DDE buffers
+ * are guaranteed a multiple of DDE_BUFFER_SIZE_MULT.
+ */
+ if (len % DDE_BUFFER_LAST_MULT) {
+ pr_debug("%s buffer len 0x%x not a multiple of 0x%x\n",
+ in ? "input" : "output", len, DDE_BUFFER_LAST_MULT);
+ if (in)
+ return -EINVAL;
+ len = round_down(len, DDE_BUFFER_LAST_MULT);
+ }
+
+ /* use a single direct DDE */
+ if (len <= LEN_ON_PAGE(pa)) {
+ ret = setup_direct_dde(dde, pa, len);
+ WARN_ON(ret < len);
+ return 0;
+ }
+
+ /* use the DDL */
+ for (i = 0; i < DDL_LEN_MAX && len > 0; i++) {
+ ret = setup_direct_dde(&ddl[i], pa, len);
+ buf += ret;
+ len -= ret;
+ pa = nx842_get_pa(buf);
+ }
+
+ if (len > 0) {
+ pr_debug("0x%x total %s bytes 0x%x too many for DDL.\n",
+ total_len, in ? "input" : "output", len);
+ if (in)
+ return -EMSGSIZE;
+ total_len -= len;
+ }
+ setup_indirect_dde(dde, ddl, i, total_len);
+
+ return 0;
+}
+
+#define CSB_ERR(csb, msg, ...) \
+ pr_err("ERROR: " msg " : %02x %02x %02x %02x %08x\n", \
+ ##__VA_ARGS__, (csb)->flags, \
+ (csb)->cs, (csb)->cc, (csb)->ce, \
+ be32_to_cpu((csb)->count))
+
+#define CSB_ERR_ADDR(csb, msg, ...) \
+ CSB_ERR(csb, msg " at %lx", ##__VA_ARGS__, \
+ (unsigned long)be64_to_cpu((csb)->address))
+
+/**
+ * wait_for_csb
+ */
+static int wait_for_csb(struct nx842_workmem *wmem,
+ struct coprocessor_status_block *csb)
+{
+ ktime_t start = wmem->start, now = ktime_get();
+ ktime_t timeout = ktime_add_ms(start, CSB_WAIT_MAX);
+
+ while (!(READ_ONCE(csb->flags) & CSB_V)) {
+ cpu_relax();
+ now = ktime_get();
+ if (ktime_after(now, timeout))
+ break;
+ }
+
+ /* hw has updated csb and output buffer */
+ barrier();
+
+ /* check CSB flags */
+ if (!(csb->flags & CSB_V)) {
+ CSB_ERR(csb, "CSB still not valid after %ld us, giving up",
+ (long)ktime_us_delta(now, start));
+ return -ETIMEDOUT;
+ }
+ if (csb->flags & CSB_F) {
+ CSB_ERR(csb, "Invalid CSB format");
+ return -EPROTO;
+ }
+ if (csb->flags & CSB_CH) {
+ CSB_ERR(csb, "Invalid CSB chaining state");
+ return -EPROTO;
+ }
+
+ /* verify CSB completion sequence is 0 */
+ if (csb->cs) {
+ CSB_ERR(csb, "Invalid CSB completion sequence");
+ return -EPROTO;
+ }
+
+ /* check CSB Completion Code */
+ switch (csb->cc) {
+ /* no error */
+ case CSB_CC_SUCCESS:
+ break;
+ case CSB_CC_TPBC_GT_SPBC:
+ /* not an error, but the compressed data is
+ * larger than the uncompressed data :(
+ */
+ break;
+
+ /* input data errors */
+ case CSB_CC_OPERAND_OVERLAP:
+ /* input and output buffers overlap */
+ CSB_ERR(csb, "Operand Overlap error");
+ return -EINVAL;
+ case CSB_CC_INVALID_OPERAND:
+ CSB_ERR(csb, "Invalid operand");
+ return -EINVAL;
+ case CSB_CC_NOSPC:
+ /* output buffer too small */
+ return -ENOSPC;
+ case CSB_CC_ABORT:
+ CSB_ERR(csb, "Function aborted");
+ return -EINTR;
+ case CSB_CC_CRC_MISMATCH:
+ CSB_ERR(csb, "CRC mismatch");
+ return -EINVAL;
+ case CSB_CC_TEMPL_INVALID:
+ CSB_ERR(csb, "Compressed data template invalid");
+ return -EINVAL;
+ case CSB_CC_TEMPL_OVERFLOW:
+ CSB_ERR(csb, "Compressed data template shows data past end");
+ return -EINVAL;
+ case CSB_CC_EXCEED_BYTE_COUNT: /* P9 or later */
+ /*
+ * DDE byte count exceeds the limit specified in Maximum
+ * byte count register.
+ */
+ CSB_ERR(csb, "DDE byte count exceeds the limit");
+ return -EINVAL;
+
+ /* these should not happen */
+ case CSB_CC_INVALID_ALIGN:
+ /* setup_ddl should have detected this */
+ CSB_ERR_ADDR(csb, "Invalid alignment");
+ return -EINVAL;
+ case CSB_CC_DATA_LENGTH:
+ /* setup_ddl should have detected this */
+ CSB_ERR(csb, "Invalid data length");
+ return -EINVAL;
+ case CSB_CC_WR_TRANSLATION:
+ case CSB_CC_TRANSLATION:
+ case CSB_CC_TRANSLATION_DUP1:
+ case CSB_CC_TRANSLATION_DUP2:
+ case CSB_CC_TRANSLATION_DUP3:
+ case CSB_CC_TRANSLATION_DUP4:
+ case CSB_CC_TRANSLATION_DUP5:
+ case CSB_CC_TRANSLATION_DUP6:
+ /* should not happen, we use physical addrs */
+ CSB_ERR_ADDR(csb, "Translation error");
+ return -EPROTO;
+ case CSB_CC_WR_PROTECTION:
+ case CSB_CC_PROTECTION:
+ case CSB_CC_PROTECTION_DUP1:
+ case CSB_CC_PROTECTION_DUP2:
+ case CSB_CC_PROTECTION_DUP3:
+ case CSB_CC_PROTECTION_DUP4:
+ case CSB_CC_PROTECTION_DUP5:
+ case CSB_CC_PROTECTION_DUP6:
+ /* should not happen, we use physical addrs */
+ CSB_ERR_ADDR(csb, "Protection error");
+ return -EPROTO;
+ case CSB_CC_PRIVILEGE:
+ /* shouldn't happen, we're in HYP mode */
+ CSB_ERR(csb, "Insufficient Privilege error");
+ return -EPROTO;
+ case CSB_CC_EXCESSIVE_DDE:
+ /* shouldn't happen, setup_ddl doesn't use many dde's */
+ CSB_ERR(csb, "Too many DDEs in DDL");
+ return -EINVAL;
+ case CSB_CC_TRANSPORT:
+ case CSB_CC_INVALID_CRB: /* P9 or later */
+ /* shouldn't happen, we setup CRB correctly */
+ CSB_ERR(csb, "Invalid CRB");
+ return -EINVAL;
+ case CSB_CC_INVALID_DDE: /* P9 or later */
+ /*
+ * shouldn't happen, setup_direct/indirect_dde creates
+ * DDE right
+ */
+ CSB_ERR(csb, "Invalid DDE");
+ return -EINVAL;
+ case CSB_CC_SEGMENTED_DDL:
+ /* shouldn't happen, setup_ddl creates DDL right */
+ CSB_ERR(csb, "Segmented DDL error");
+ return -EINVAL;
+ case CSB_CC_DDE_OVERFLOW:
+ /* shouldn't happen, setup_ddl creates DDL right */
+ CSB_ERR(csb, "DDE overflow error");
+ return -EINVAL;
+ case CSB_CC_SESSION:
+ /* should not happen with ICSWX */
+ CSB_ERR(csb, "Session violation error");
+ return -EPROTO;
+ case CSB_CC_CHAIN:
+ /* should not happen, we don't use chained CRBs */
+ CSB_ERR(csb, "Chained CRB error");
+ return -EPROTO;
+ case CSB_CC_SEQUENCE:
+ /* should not happen, we don't use chained CRBs */
+ CSB_ERR(csb, "CRB sequence number error");
+ return -EPROTO;
+ case CSB_CC_UNKNOWN_CODE:
+ CSB_ERR(csb, "Unknown subfunction code");
+ return -EPROTO;
+
+ /* hardware errors */
+ case CSB_CC_RD_EXTERNAL:
+ case CSB_CC_RD_EXTERNAL_DUP1:
+ case CSB_CC_RD_EXTERNAL_DUP2:
+ case CSB_CC_RD_EXTERNAL_DUP3:
+ CSB_ERR_ADDR(csb, "Read error outside coprocessor");
+ return -EPROTO;
+ case CSB_CC_WR_EXTERNAL:
+ CSB_ERR_ADDR(csb, "Write error outside coprocessor");
+ return -EPROTO;
+ case CSB_CC_INTERNAL:
+ CSB_ERR(csb, "Internal error in coprocessor");
+ return -EPROTO;
+ case CSB_CC_PROVISION:
+ CSB_ERR(csb, "Storage provision error");
+ return -EPROTO;
+ case CSB_CC_HW:
+ CSB_ERR(csb, "Correctable hardware error");
+ return -EPROTO;
+ case CSB_CC_HW_EXPIRED_TIMER: /* P9 or later */
+ CSB_ERR(csb, "Job did not finish within allowed time");
+ return -EPROTO;
+
+ default:
+ CSB_ERR(csb, "Invalid CC %d", csb->cc);
+ return -EPROTO;
+ }
+
+ /* check Completion Extension state */
+ if (csb->ce & CSB_CE_TERMINATION) {
+ CSB_ERR(csb, "CSB request was terminated");
+ return -EPROTO;
+ }
+ if (csb->ce & CSB_CE_INCOMPLETE) {
+ CSB_ERR(csb, "CSB request not complete");
+ return -EPROTO;
+ }
+ if (!(csb->ce & CSB_CE_TPBC)) {
+ CSB_ERR(csb, "TPBC not provided, unknown target length");
+ return -EPROTO;
+ }
+
+ /* successful completion */
+ pr_debug_ratelimited("Processed %u bytes in %lu us\n",
+ be32_to_cpu(csb->count),
+ (unsigned long)ktime_us_delta(now, start));
+
+ return 0;
+}
+
+static int nx842_config_crb(const unsigned char *in, unsigned int inlen,
+ unsigned char *out, unsigned int outlen,
+ struct nx842_workmem *wmem)
+{
+ struct coprocessor_request_block *crb;
+ struct coprocessor_status_block *csb;
+ u64 csb_addr;
+ int ret;
+
+ crb = &wmem->crb;
+ csb = &crb->csb;
+
+ /* Clear any previous values */
+ memset(crb, 0, sizeof(*crb));
+
+ /* set up DDLs */
+ ret = setup_ddl(&crb->source, wmem->ddl_in,
+ (unsigned char *)in, inlen, true);
+ if (ret)
+ return ret;
+
+ ret = setup_ddl(&crb->target, wmem->ddl_out,
+ out, outlen, false);
+ if (ret)
+ return ret;
+
+ /* set up CRB's CSB addr */
+ csb_addr = nx842_get_pa(csb) & CRB_CSB_ADDRESS;
+ csb_addr |= CRB_CSB_AT; /* Addrs are phys */
+ crb->csb_addr = cpu_to_be64(csb_addr);
+
+ return 0;
+}
+
+/**
+ * nx842_exec_icswx - compress/decompress data using the 842 algorithm
+ *
+ * (De)compression provided by the NX842 coprocessor on IBM PowerNV systems.
+ * This compresses or decompresses the provided input buffer into the provided
+ * output buffer.
+ *
+ * Upon return from this function @outlen contains the length of the
+ * output data. If there is an error then @outlen will be 0 and an
+ * error will be specified by the return code from this function.
+ *
+ * The @workmem buffer should only be used by one function call at a time.
+ *
+ * @in: input buffer pointer
+ * @inlen: input buffer size
+ * @out: output buffer pointer
+ * @outlenp: output buffer size pointer
+ * @workmem: working memory buffer pointer, size determined by
+ * nx842_powernv_driver.workmem_size
+ * @fc: function code, see CCW Function Codes in nx-842.h
+ *
+ * Returns:
+ * 0 Success, output of length @outlenp stored in the buffer at @out
+ * -ENODEV Hardware unavailable
+ * -ENOSPC Output buffer is to small
+ * -EMSGSIZE Input buffer too large
+ * -EINVAL buffer constraints do not fix nx842_constraints
+ * -EPROTO hardware error during operation
+ * -ETIMEDOUT hardware did not complete operation in reasonable time
+ * -EINTR operation was aborted
+ */
+static int nx842_exec_icswx(const unsigned char *in, unsigned int inlen,
+ unsigned char *out, unsigned int *outlenp,
+ void *workmem, int fc)
+{
+ struct coprocessor_request_block *crb;
+ struct coprocessor_status_block *csb;
+ struct nx842_workmem *wmem;
+ int ret;
+ u32 ccw;
+ unsigned int outlen = *outlenp;
+
+ wmem = PTR_ALIGN(workmem, WORKMEM_ALIGN);
+
+ *outlenp = 0;
+
+ /* shoudn't happen, we don't load without a coproc */
+ if (!nx842_ct) {
+ pr_err_ratelimited("coprocessor CT is 0");
+ return -ENODEV;
+ }
+
+ ret = nx842_config_crb(in, inlen, out, outlen, wmem);
+ if (ret)
+ return ret;
+
+ crb = &wmem->crb;
+ csb = &crb->csb;
+
+ /* set up CCW */
+ ccw = 0;
+ ccw = SET_FIELD(CCW_CT, ccw, nx842_ct);
+ ccw = SET_FIELD(CCW_CI_842, ccw, 0); /* use 0 for hw auto-selection */
+ ccw = SET_FIELD(CCW_FC_842, ccw, fc);
+
+ wmem->start = ktime_get();
+
+ /* do ICSWX */
+ ret = icswx(cpu_to_be32(ccw), crb);
+
+ pr_debug_ratelimited("icswx CR %x ccw %x crb->ccw %x\n", ret,
+ (unsigned int)ccw,
+ (unsigned int)be32_to_cpu(crb->ccw));
+
+ /*
+ * NX842 coprocessor sets 3rd bit in CR register with XER[S0].
+ * XER[S0] is the integer summary overflow bit which is nothing
+ * to do NX. Since this bit can be set with other return values,
+ * mask this bit.
+ */
+ ret &= ~ICSWX_XERS0;
+
+ switch (ret) {
+ case ICSWX_INITIATED:
+ ret = wait_for_csb(wmem, csb);
+ break;
+ case ICSWX_BUSY:
+ pr_debug_ratelimited("842 Coprocessor busy\n");
+ ret = -EBUSY;
+ break;
+ case ICSWX_REJECTED:
+ pr_err_ratelimited("ICSWX rejected\n");
+ ret = -EPROTO;
+ break;
+ }
+
+ if (!ret)
+ *outlenp = be32_to_cpu(csb->count);
+
+ return ret;
+}
+
+/**
+ * nx842_exec_vas - compress/decompress data using the 842 algorithm
+ *
+ * (De)compression provided by the NX842 coprocessor on IBM PowerNV systems.
+ * This compresses or decompresses the provided input buffer into the provided
+ * output buffer.
+ *
+ * Upon return from this function @outlen contains the length of the
+ * output data. If there is an error then @outlen will be 0 and an
+ * error will be specified by the return code from this function.
+ *
+ * The @workmem buffer should only be used by one function call at a time.
+ *
+ * @in: input buffer pointer
+ * @inlen: input buffer size
+ * @out: output buffer pointer
+ * @outlenp: output buffer size pointer
+ * @workmem: working memory buffer pointer, size determined by
+ * nx842_powernv_driver.workmem_size
+ * @fc: function code, see CCW Function Codes in nx-842.h
+ *
+ * Returns:
+ * 0 Success, output of length @outlenp stored in the buffer
+ * at @out
+ * -ENODEV Hardware unavailable
+ * -ENOSPC Output buffer is to small
+ * -EMSGSIZE Input buffer too large
+ * -EINVAL buffer constraints do not fix nx842_constraints
+ * -EPROTO hardware error during operation
+ * -ETIMEDOUT hardware did not complete operation in reasonable time
+ * -EINTR operation was aborted
+ */
+static int nx842_exec_vas(const unsigned char *in, unsigned int inlen,
+ unsigned char *out, unsigned int *outlenp,
+ void *workmem, int fc)
+{
+ struct coprocessor_request_block *crb;
+ struct coprocessor_status_block *csb;
+ struct nx842_workmem *wmem;
+ struct vas_window *txwin;
+ int ret, i = 0;
+ u32 ccw;
+ unsigned int outlen = *outlenp;
+
+ wmem = PTR_ALIGN(workmem, WORKMEM_ALIGN);
+
+ *outlenp = 0;
+
+ crb = &wmem->crb;
+ csb = &crb->csb;
+
+ ret = nx842_config_crb(in, inlen, out, outlen, wmem);
+ if (ret)
+ return ret;
+
+ ccw = 0;
+ ccw = SET_FIELD(CCW_FC_842, ccw, fc);
+ crb->ccw = cpu_to_be32(ccw);
+
+ do {
+ wmem->start = ktime_get();
+ preempt_disable();
+ txwin = this_cpu_read(cpu_txwin);
+
+ /*
+ * VAS copy CRB into L2 cache. Refer <asm/vas.h>.
+ * @crb and @offset.
+ */
+ vas_copy_crb(crb, 0);
+
+ /*
+ * VAS paste previously copied CRB to NX.
+ * @txwin, @offset and @last (must be true).
+ */
+ ret = vas_paste_crb(txwin, 0, 1);
+ preempt_enable();
+ /*
+ * Retry copy/paste function for VAS failures.
+ */
+ } while (ret && (i++ < VAS_RETRIES));
+
+ if (ret) {
+ pr_err_ratelimited("VAS copy/paste failed\n");
+ return ret;
+ }
+
+ ret = wait_for_csb(wmem, csb);
+ if (!ret)
+ *outlenp = be32_to_cpu(csb->count);
+
+ return ret;
+}
+
+/**
+ * nx842_powernv_compress - Compress data using the 842 algorithm
+ *
+ * Compression provided by the NX842 coprocessor on IBM PowerNV systems.
+ * The input buffer is compressed and the result is stored in the
+ * provided output buffer.
+ *
+ * Upon return from this function @outlen contains the length of the
+ * compressed data. If there is an error then @outlen will be 0 and an
+ * error will be specified by the return code from this function.
+ *
+ * @in: input buffer pointer
+ * @inlen: input buffer size
+ * @out: output buffer pointer
+ * @outlenp: output buffer size pointer
+ * @workmem: working memory buffer pointer, size determined by
+ * nx842_powernv_driver.workmem_size
+ *
+ * Returns: see @nx842_powernv_exec()
+ */
+static int nx842_powernv_compress(const unsigned char *in, unsigned int inlen,
+ unsigned char *out, unsigned int *outlenp,
+ void *wmem)
+{
+ return nx842_powernv_exec(in, inlen, out, outlenp,
+ wmem, CCW_FC_842_COMP_CRC);
+}
+
+/**
+ * nx842_powernv_decompress - Decompress data using the 842 algorithm
+ *
+ * Decompression provided by the NX842 coprocessor on IBM PowerNV systems.
+ * The input buffer is decompressed and the result is stored in the
+ * provided output buffer.
+ *
+ * Upon return from this function @outlen contains the length of the
+ * decompressed data. If there is an error then @outlen will be 0 and an
+ * error will be specified by the return code from this function.
+ *
+ * @in: input buffer pointer
+ * @inlen: input buffer size
+ * @out: output buffer pointer
+ * @outlenp: output buffer size pointer
+ * @workmem: working memory buffer pointer, size determined by
+ * nx842_powernv_driver.workmem_size
+ *
+ * Returns: see @nx842_powernv_exec()
+ */
+static int nx842_powernv_decompress(const unsigned char *in, unsigned int inlen,
+ unsigned char *out, unsigned int *outlenp,
+ void *wmem)
+{
+ return nx842_powernv_exec(in, inlen, out, outlenp,
+ wmem, CCW_FC_842_DECOMP_CRC);
+}
+
+static inline void nx_add_coprocs_list(struct nx_coproc *coproc,
+ int chipid)
+{
+ coproc->chip_id = chipid;
+ INIT_LIST_HEAD(&coproc->list);
+ list_add(&coproc->list, &nx_coprocs);
+}
+
+static struct vas_window *nx_alloc_txwin(struct nx_coproc *coproc)
+{
+ struct vas_window *txwin = NULL;
+ struct vas_tx_win_attr txattr;
+
+ /*
+ * Kernel requests will be high priority. So open send
+ * windows only for high priority RxFIFO entries.
+ */
+ vas_init_tx_win_attr(&txattr, coproc->ct);
+ txattr.lpid = 0; /* lpid is 0 for kernel requests */
+
+ /*
+ * Open a VAS send window which is used to send request to NX.
+ */
+ txwin = vas_tx_win_open(coproc->vas.id, coproc->ct, &txattr);
+ if (IS_ERR(txwin))
+ pr_err("ibm,nx-842: Can not open TX window: %ld\n",
+ PTR_ERR(txwin));
+
+ return txwin;
+}
+
+/*
+ * Identify chip ID for each CPU, open send wndow for the corresponding NX
+ * engine and save txwin in percpu cpu_txwin.
+ * cpu_txwin is used in copy/paste operation for each compression /
+ * decompression request.
+ */
+static int nx_open_percpu_txwins(void)
+{
+ struct nx_coproc *coproc, *n;
+ unsigned int i, chip_id;
+
+ for_each_possible_cpu(i) {
+ struct vas_window *txwin = NULL;
+
+ chip_id = cpu_to_chip_id(i);
+
+ list_for_each_entry_safe(coproc, n, &nx_coprocs, list) {
+ /*
+ * Kernel requests use only high priority FIFOs. So
+ * open send windows for these FIFOs.
+ * GZIP is not supported in kernel right now.
+ */
+
+ if (coproc->ct != VAS_COP_TYPE_842_HIPRI)
+ continue;
+
+ if (coproc->chip_id == chip_id) {
+ txwin = nx_alloc_txwin(coproc);
+ if (IS_ERR(txwin))
+ return PTR_ERR(txwin);
+
+ per_cpu(cpu_txwin, i) = txwin;
+ break;
+ }
+ }
+
+ if (!per_cpu(cpu_txwin, i)) {
+ /* shouldn't happen, Each chip will have NX engine */
+ pr_err("NX engine is not available for CPU %d\n", i);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static int __init nx_set_ct(struct nx_coproc *coproc, const char *priority,
+ int high, int normal)
+{
+ if (!strcmp(priority, "High"))
+ coproc->ct = high;
+ else if (!strcmp(priority, "Normal"))
+ coproc->ct = normal;
+ else {
+ pr_err("Invalid RxFIFO priority value\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int __init vas_cfg_coproc_info(struct device_node *dn, int chip_id,
+ int vasid, int type, int *ct)
+{
+ struct vas_window *rxwin = NULL;
+ struct vas_rx_win_attr rxattr;
+ u32 lpid, pid, tid, fifo_size;
+ struct nx_coproc *coproc;
+ u64 rx_fifo;
+ const char *priority;
+ int ret;
+
+ ret = of_property_read_u64(dn, "rx-fifo-address", &rx_fifo);
+ if (ret) {
+ pr_err("Missing rx-fifo-address property\n");
+ return ret;
+ }
+
+ ret = of_property_read_u32(dn, "rx-fifo-size", &fifo_size);
+ if (ret) {
+ pr_err("Missing rx-fifo-size property\n");
+ return ret;
+ }
+
+ ret = of_property_read_u32(dn, "lpid", &lpid);
+ if (ret) {
+ pr_err("Missing lpid property\n");
+ return ret;
+ }
+
+ ret = of_property_read_u32(dn, "pid", &pid);
+ if (ret) {
+ pr_err("Missing pid property\n");
+ return ret;
+ }
+
+ ret = of_property_read_u32(dn, "tid", &tid);
+ if (ret) {
+ pr_err("Missing tid property\n");
+ return ret;
+ }
+
+ ret = of_property_read_string(dn, "priority", &priority);
+ if (ret) {
+ pr_err("Missing priority property\n");
+ return ret;
+ }
+
+ coproc = kzalloc(sizeof(*coproc), GFP_KERNEL);
+ if (!coproc)
+ return -ENOMEM;
+
+ if (type == NX_CT_842)
+ ret = nx_set_ct(coproc, priority, VAS_COP_TYPE_842_HIPRI,
+ VAS_COP_TYPE_842);
+ else if (type == NX_CT_GZIP)
+ ret = nx_set_ct(coproc, priority, VAS_COP_TYPE_GZIP_HIPRI,
+ VAS_COP_TYPE_GZIP);
+
+ if (ret)
+ goto err_out;
+
+ vas_init_rx_win_attr(&rxattr, coproc->ct);
+ rxattr.rx_fifo = rx_fifo;
+ rxattr.rx_fifo_size = fifo_size;
+ rxattr.lnotify_lpid = lpid;
+ rxattr.lnotify_pid = pid;
+ rxattr.lnotify_tid = tid;
+ /*
+ * Maximum RX window credits can not be more than #CRBs in
+ * RxFIFO. Otherwise, can get checkstop if RxFIFO overruns.
+ */
+ rxattr.wcreds_max = fifo_size / CRB_SIZE;
+
+ /*
+ * Open a VAS receice window which is used to configure RxFIFO
+ * for NX.
+ */
+ rxwin = vas_rx_win_open(vasid, coproc->ct, &rxattr);
+ if (IS_ERR(rxwin)) {
+ ret = PTR_ERR(rxwin);
+ pr_err("setting RxFIFO with VAS failed: %d\n",
+ ret);
+ goto err_out;
+ }
+
+ coproc->vas.rxwin = rxwin;
+ coproc->vas.id = vasid;
+ nx_add_coprocs_list(coproc, chip_id);
+
+ /*
+ * (lpid, pid, tid) combination has to be unique for each
+ * coprocessor instance in the system. So to make it
+ * unique, skiboot uses coprocessor type such as 842 or
+ * GZIP for pid and provides this value to kernel in pid
+ * device-tree property.
+ */
+ *ct = pid;
+
+ return 0;
+
+err_out:
+ kfree(coproc);
+ return ret;
+}
+
+static int __init nx_coproc_init(int chip_id, int ct_842, int ct_gzip)
+{
+ int ret = 0;
+
+ if (opal_check_token(OPAL_NX_COPROC_INIT)) {
+ ret = opal_nx_coproc_init(chip_id, ct_842);
+
+ if (!ret)
+ ret = opal_nx_coproc_init(chip_id, ct_gzip);
+
+ if (ret) {
+ ret = opal_error_code(ret);
+ pr_err("Failed to initialize NX for chip(%d): %d\n",
+ chip_id, ret);
+ }
+ } else
+ pr_warn("Firmware doesn't support NX initialization\n");
+
+ return ret;
+}
+
+static int __init find_nx_device_tree(struct device_node *dn, int chip_id,
+ int vasid, int type, char *devname,
+ int *ct)
+{
+ int ret = 0;
+
+ if (of_device_is_compatible(dn, devname)) {
+ ret = vas_cfg_coproc_info(dn, chip_id, vasid, type, ct);
+ if (ret)
+ of_node_put(dn);
+ }
+
+ return ret;
+}
+
+static int __init nx_powernv_probe_vas(struct device_node *pn)
+{
+ int chip_id, vasid, ret = 0;
+ int ct_842 = 0, ct_gzip = 0;
+ struct device_node *dn;
+
+ chip_id = of_get_ibm_chip_id(pn);
+ if (chip_id < 0) {
+ pr_err("ibm,chip-id missing\n");
+ return -EINVAL;
+ }
+
+ vasid = chip_to_vas_id(chip_id);
+ if (vasid < 0) {
+ pr_err("Unable to map chip_id %d to vasid\n", chip_id);
+ return -EINVAL;
+ }
+
+ for_each_child_of_node(pn, dn) {
+ ret = find_nx_device_tree(dn, chip_id, vasid, NX_CT_842,
+ "ibm,p9-nx-842", &ct_842);
+
+ if (!ret)
+ ret = find_nx_device_tree(dn, chip_id, vasid,
+ NX_CT_GZIP, "ibm,p9-nx-gzip", &ct_gzip);
+
+ if (ret)
+ return ret;
+ }
+
+ if (!ct_842 || !ct_gzip) {
+ pr_err("NX FIFO nodes are missing\n");
+ return -EINVAL;
+ }
+
+ /*
+ * Initialize NX instance for both high and normal priority FIFOs.
+ */
+ ret = nx_coproc_init(chip_id, ct_842, ct_gzip);
+
+ return ret;
+}
+
+static int __init nx842_powernv_probe(struct device_node *dn)
+{
+ struct nx_coproc *coproc;
+ unsigned int ct, ci;
+ int chip_id;
+
+ chip_id = of_get_ibm_chip_id(dn);
+ if (chip_id < 0) {
+ pr_err("ibm,chip-id missing\n");
+ return -EINVAL;
+ }
+
+ if (of_property_read_u32(dn, "ibm,842-coprocessor-type", &ct)) {
+ pr_err("ibm,842-coprocessor-type missing\n");
+ return -EINVAL;
+ }
+
+ if (of_property_read_u32(dn, "ibm,842-coprocessor-instance", &ci)) {
+ pr_err("ibm,842-coprocessor-instance missing\n");
+ return -EINVAL;
+ }
+
+ coproc = kzalloc(sizeof(*coproc), GFP_KERNEL);
+ if (!coproc)
+ return -ENOMEM;
+
+ coproc->ct = ct;
+ coproc->ci = ci;
+ nx_add_coprocs_list(coproc, chip_id);
+
+ pr_info("coprocessor found on chip %d, CT %d CI %d\n", chip_id, ct, ci);
+
+ if (!nx842_ct)
+ nx842_ct = ct;
+ else if (nx842_ct != ct)
+ pr_err("NX842 chip %d, CT %d != first found CT %d\n",
+ chip_id, ct, nx842_ct);
+
+ return 0;
+}
+
+static void nx_delete_coprocs(void)
+{
+ struct nx_coproc *coproc, *n;
+ struct vas_window *txwin;
+ int i;
+
+ /*
+ * close percpu txwins that are opened for the corresponding coproc.
+ */
+ for_each_possible_cpu(i) {
+ txwin = per_cpu(cpu_txwin, i);
+ if (txwin)
+ vas_win_close(txwin);
+
+ per_cpu(cpu_txwin, i) = NULL;
+ }
+
+ list_for_each_entry_safe(coproc, n, &nx_coprocs, list) {
+ if (coproc->vas.rxwin)
+ vas_win_close(coproc->vas.rxwin);
+
+ list_del(&coproc->list);
+ kfree(coproc);
+ }
+}
+
+static struct nx842_constraints nx842_powernv_constraints = {
+ .alignment = DDE_BUFFER_ALIGN,
+ .multiple = DDE_BUFFER_LAST_MULT,
+ .minimum = DDE_BUFFER_LAST_MULT,
+ .maximum = (DDL_LEN_MAX - 1) * PAGE_SIZE,
+};
+
+static struct nx842_driver nx842_powernv_driver = {
+ .name = KBUILD_MODNAME,
+ .owner = THIS_MODULE,
+ .workmem_size = sizeof(struct nx842_workmem),
+ .constraints = &nx842_powernv_constraints,
+ .compress = nx842_powernv_compress,
+ .decompress = nx842_powernv_decompress,
+};
+
+static int nx842_powernv_crypto_init(struct crypto_tfm *tfm)
+{
+ return nx842_crypto_init(tfm, &nx842_powernv_driver);
+}
+
+static struct crypto_alg nx842_powernv_alg = {
+ .cra_name = "842",
+ .cra_driver_name = "842-nx",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_TYPE_COMPRESS,
+ .cra_ctxsize = sizeof(struct nx842_crypto_ctx),
+ .cra_module = THIS_MODULE,
+ .cra_init = nx842_powernv_crypto_init,
+ .cra_exit = nx842_crypto_exit,
+ .cra_u = { .compress = {
+ .coa_compress = nx842_crypto_compress,
+ .coa_decompress = nx842_crypto_decompress } }
+};
+
+static __init int nx_compress_powernv_init(void)
+{
+ struct device_node *dn;
+ int ret;
+
+ /* verify workmem size/align restrictions */
+ BUILD_BUG_ON(WORKMEM_ALIGN % CRB_ALIGN);
+ BUILD_BUG_ON(CRB_ALIGN % DDE_ALIGN);
+ BUILD_BUG_ON(CRB_SIZE % DDE_ALIGN);
+ /* verify buffer size/align restrictions */
+ BUILD_BUG_ON(PAGE_SIZE % DDE_BUFFER_ALIGN);
+ BUILD_BUG_ON(DDE_BUFFER_ALIGN % DDE_BUFFER_SIZE_MULT);
+ BUILD_BUG_ON(DDE_BUFFER_SIZE_MULT % DDE_BUFFER_LAST_MULT);
+
+ for_each_compatible_node(dn, NULL, "ibm,power9-nx") {
+ ret = nx_powernv_probe_vas(dn);
+ if (ret) {
+ nx_delete_coprocs();
+ of_node_put(dn);
+ return ret;
+ }
+ }
+
+ if (list_empty(&nx_coprocs)) {
+ for_each_compatible_node(dn, NULL, "ibm,power-nx")
+ nx842_powernv_probe(dn);
+
+ if (!nx842_ct)
+ return -ENODEV;
+
+ nx842_powernv_exec = nx842_exec_icswx;
+ } else {
+ /*
+ * Register VAS user space API for NX GZIP so
+ * that user space can use GZIP engine.
+ * Using high FIFO priority for kernel requests and
+ * normal FIFO priority is assigned for userspace.
+ * 842 compression is supported only in kernel.
+ */
+ ret = vas_register_coproc_api(THIS_MODULE, VAS_COP_TYPE_GZIP,
+ "nx-gzip");
+
+ /*
+ * GZIP is not supported in kernel right now.
+ * So open tx windows only for 842.
+ */
+ if (!ret)
+ ret = nx_open_percpu_txwins();
+
+ if (ret) {
+ nx_delete_coprocs();
+ return ret;
+ }
+
+ nx842_powernv_exec = nx842_exec_vas;
+ }
+
+ ret = crypto_register_alg(&nx842_powernv_alg);
+ if (ret) {
+ nx_delete_coprocs();
+ return ret;
+ }
+
+ return 0;
+}
+module_init(nx_compress_powernv_init);
+
+static void __exit nx_compress_powernv_exit(void)
+{
+ /*
+ * GZIP engine is supported only in power9 or later and nx842_ct
+ * is used on power8 (icswx).
+ * VAS API for NX GZIP is registered during init for user space
+ * use. So delete this API use for GZIP engine.
+ */
+ if (!nx842_ct)
+ vas_unregister_coproc_api();
+
+ crypto_unregister_alg(&nx842_powernv_alg);
+
+ nx_delete_coprocs();
+}
+module_exit(nx_compress_powernv_exit);
diff --git a/drivers/crypto/nx/nx-sha256.c b/drivers/crypto/nx/nx-sha256.c
new file mode 100644
index 000000000..02fb53453
--- /dev/null
+++ b/drivers/crypto/nx/nx-sha256.c
@@ -0,0 +1,283 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/**
+ * SHA-256 routines supporting the Power 7+ Nest Accelerators driver
+ *
+ * Copyright (C) 2011-2012 International Business Machines Inc.
+ *
+ * Author: Kent Yoder <yoder1@us.ibm.com>
+ */
+
+#include <crypto/internal/hash.h>
+#include <crypto/sha.h>
+#include <linux/module.h>
+#include <asm/vio.h>
+#include <asm/byteorder.h>
+
+#include "nx_csbcpb.h"
+#include "nx.h"
+
+
+static int nx_crypto_ctx_sha256_init(struct crypto_tfm *tfm)
+{
+ struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(tfm);
+ int err;
+
+ err = nx_crypto_ctx_sha_init(tfm);
+ if (err)
+ return err;
+
+ nx_ctx_init(nx_ctx, HCOP_FC_SHA);
+
+ nx_ctx->ap = &nx_ctx->props[NX_PROPS_SHA256];
+
+ NX_CPB_SET_DIGEST_SIZE(nx_ctx->csbcpb, NX_DS_SHA256);
+
+ return 0;
+}
+
+static int nx_sha256_init(struct shash_desc *desc) {
+ struct sha256_state *sctx = shash_desc_ctx(desc);
+
+ memset(sctx, 0, sizeof *sctx);
+
+ sctx->state[0] = __cpu_to_be32(SHA256_H0);
+ sctx->state[1] = __cpu_to_be32(SHA256_H1);
+ sctx->state[2] = __cpu_to_be32(SHA256_H2);
+ sctx->state[3] = __cpu_to_be32(SHA256_H3);
+ sctx->state[4] = __cpu_to_be32(SHA256_H4);
+ sctx->state[5] = __cpu_to_be32(SHA256_H5);
+ sctx->state[6] = __cpu_to_be32(SHA256_H6);
+ sctx->state[7] = __cpu_to_be32(SHA256_H7);
+ sctx->count = 0;
+
+ return 0;
+}
+
+static int nx_sha256_update(struct shash_desc *desc, const u8 *data,
+ unsigned int len)
+{
+ struct sha256_state *sctx = shash_desc_ctx(desc);
+ struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
+ struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb;
+ struct nx_sg *out_sg;
+ u64 to_process = 0, leftover, total;
+ unsigned long irq_flags;
+ int rc = 0;
+ int data_len;
+ u32 max_sg_len;
+ u64 buf_len = (sctx->count % SHA256_BLOCK_SIZE);
+
+ spin_lock_irqsave(&nx_ctx->lock, irq_flags);
+
+ /* 2 cases for total data len:
+ * 1: < SHA256_BLOCK_SIZE: copy into state, return 0
+ * 2: >= SHA256_BLOCK_SIZE: process X blocks, copy in leftover
+ */
+ total = (sctx->count % SHA256_BLOCK_SIZE) + len;
+ if (total < SHA256_BLOCK_SIZE) {
+ memcpy(sctx->buf + buf_len, data, len);
+ sctx->count += len;
+ goto out;
+ }
+
+ memcpy(csbcpb->cpb.sha256.message_digest, sctx->state, SHA256_DIGEST_SIZE);
+ NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
+ NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
+
+ max_sg_len = min_t(u64, nx_ctx->ap->sglen,
+ nx_driver.of.max_sg_len/sizeof(struct nx_sg));
+ max_sg_len = min_t(u64, max_sg_len,
+ nx_ctx->ap->databytelen/NX_PAGE_SIZE);
+
+ data_len = SHA256_DIGEST_SIZE;
+ out_sg = nx_build_sg_list(nx_ctx->out_sg, (u8 *)sctx->state,
+ &data_len, max_sg_len);
+ nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg);
+
+ if (data_len != SHA256_DIGEST_SIZE) {
+ rc = -EINVAL;
+ goto out;
+ }
+
+ do {
+ int used_sgs = 0;
+ struct nx_sg *in_sg = nx_ctx->in_sg;
+
+ if (buf_len) {
+ data_len = buf_len;
+ in_sg = nx_build_sg_list(in_sg,
+ (u8 *) sctx->buf,
+ &data_len,
+ max_sg_len);
+
+ if (data_len != buf_len) {
+ rc = -EINVAL;
+ goto out;
+ }
+ used_sgs = in_sg - nx_ctx->in_sg;
+ }
+
+ /* to_process: SHA256_BLOCK_SIZE aligned chunk to be
+ * processed in this iteration. This value is restricted
+ * by sg list limits and number of sgs we already used
+ * for leftover data. (see above)
+ * In ideal case, we could allow NX_PAGE_SIZE * max_sg_len,
+ * but because data may not be aligned, we need to account
+ * for that too. */
+ to_process = min_t(u64, total,
+ (max_sg_len - 1 - used_sgs) * NX_PAGE_SIZE);
+ to_process = to_process & ~(SHA256_BLOCK_SIZE - 1);
+
+ data_len = to_process - buf_len;
+ in_sg = nx_build_sg_list(in_sg, (u8 *) data,
+ &data_len, max_sg_len);
+
+ nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg);
+
+ to_process = data_len + buf_len;
+ leftover = total - to_process;
+
+ /*
+ * we've hit the nx chip previously and we're updating
+ * again, so copy over the partial digest.
+ */
+ memcpy(csbcpb->cpb.sha256.input_partial_digest,
+ csbcpb->cpb.sha256.message_digest,
+ SHA256_DIGEST_SIZE);
+
+ if (!nx_ctx->op.inlen || !nx_ctx->op.outlen) {
+ rc = -EINVAL;
+ goto out;
+ }
+
+ rc = nx_hcall_sync(nx_ctx, &nx_ctx->op, 0);
+ if (rc)
+ goto out;
+
+ atomic_inc(&(nx_ctx->stats->sha256_ops));
+
+ total -= to_process;
+ data += to_process - buf_len;
+ buf_len = 0;
+
+ } while (leftover >= SHA256_BLOCK_SIZE);
+
+ /* copy the leftover back into the state struct */
+ if (leftover)
+ memcpy(sctx->buf, data, leftover);
+
+ sctx->count += len;
+ memcpy(sctx->state, csbcpb->cpb.sha256.message_digest, SHA256_DIGEST_SIZE);
+out:
+ spin_unlock_irqrestore(&nx_ctx->lock, irq_flags);
+ return rc;
+}
+
+static int nx_sha256_final(struct shash_desc *desc, u8 *out)
+{
+ struct sha256_state *sctx = shash_desc_ctx(desc);
+ struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
+ struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb;
+ struct nx_sg *in_sg, *out_sg;
+ unsigned long irq_flags;
+ u32 max_sg_len;
+ int rc = 0;
+ int len;
+
+ spin_lock_irqsave(&nx_ctx->lock, irq_flags);
+
+ max_sg_len = min_t(u64, nx_ctx->ap->sglen,
+ nx_driver.of.max_sg_len/sizeof(struct nx_sg));
+ max_sg_len = min_t(u64, max_sg_len,
+ nx_ctx->ap->databytelen/NX_PAGE_SIZE);
+
+ /* final is represented by continuing the operation and indicating that
+ * this is not an intermediate operation */
+ if (sctx->count >= SHA256_BLOCK_SIZE) {
+ /* we've hit the nx chip previously, now we're finalizing,
+ * so copy over the partial digest */
+ memcpy(csbcpb->cpb.sha256.input_partial_digest, sctx->state, SHA256_DIGEST_SIZE);
+ NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE;
+ NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
+ } else {
+ NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE;
+ NX_CPB_FDM(csbcpb) &= ~NX_FDM_CONTINUATION;
+ }
+
+ csbcpb->cpb.sha256.message_bit_length = (u64) (sctx->count * 8);
+
+ len = sctx->count & (SHA256_BLOCK_SIZE - 1);
+ in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *) sctx->buf,
+ &len, max_sg_len);
+
+ if (len != (sctx->count & (SHA256_BLOCK_SIZE - 1))) {
+ rc = -EINVAL;
+ goto out;
+ }
+
+ len = SHA256_DIGEST_SIZE;
+ out_sg = nx_build_sg_list(nx_ctx->out_sg, out, &len, max_sg_len);
+
+ if (len != SHA256_DIGEST_SIZE) {
+ rc = -EINVAL;
+ goto out;
+ }
+
+ nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg);
+ nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg);
+ if (!nx_ctx->op.outlen) {
+ rc = -EINVAL;
+ goto out;
+ }
+
+ rc = nx_hcall_sync(nx_ctx, &nx_ctx->op, 0);
+ if (rc)
+ goto out;
+
+ atomic_inc(&(nx_ctx->stats->sha256_ops));
+
+ atomic64_add(sctx->count, &(nx_ctx->stats->sha256_bytes));
+ memcpy(out, csbcpb->cpb.sha256.message_digest, SHA256_DIGEST_SIZE);
+out:
+ spin_unlock_irqrestore(&nx_ctx->lock, irq_flags);
+ return rc;
+}
+
+static int nx_sha256_export(struct shash_desc *desc, void *out)
+{
+ struct sha256_state *sctx = shash_desc_ctx(desc);
+
+ memcpy(out, sctx, sizeof(*sctx));
+
+ return 0;
+}
+
+static int nx_sha256_import(struct shash_desc *desc, const void *in)
+{
+ struct sha256_state *sctx = shash_desc_ctx(desc);
+
+ memcpy(sctx, in, sizeof(*sctx));
+
+ return 0;
+}
+
+struct shash_alg nx_shash_sha256_alg = {
+ .digestsize = SHA256_DIGEST_SIZE,
+ .init = nx_sha256_init,
+ .update = nx_sha256_update,
+ .final = nx_sha256_final,
+ .export = nx_sha256_export,
+ .import = nx_sha256_import,
+ .descsize = sizeof(struct sha256_state),
+ .statesize = sizeof(struct sha256_state),
+ .base = {
+ .cra_name = "sha256",
+ .cra_driver_name = "sha256-nx",
+ .cra_priority = 300,
+ .cra_blocksize = SHA256_BLOCK_SIZE,
+ .cra_module = THIS_MODULE,
+ .cra_ctxsize = sizeof(struct nx_crypto_ctx),
+ .cra_init = nx_crypto_ctx_sha256_init,
+ .cra_exit = nx_crypto_ctx_exit,
+ }
+};
diff --git a/drivers/crypto/nx/nx-sha512.c b/drivers/crypto/nx/nx-sha512.c
new file mode 100644
index 000000000..4c7a3e3ee
--- /dev/null
+++ b/drivers/crypto/nx/nx-sha512.c
@@ -0,0 +1,289 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/**
+ * SHA-512 routines supporting the Power 7+ Nest Accelerators driver
+ *
+ * Copyright (C) 2011-2012 International Business Machines Inc.
+ *
+ * Author: Kent Yoder <yoder1@us.ibm.com>
+ */
+
+#include <crypto/internal/hash.h>
+#include <crypto/sha.h>
+#include <linux/module.h>
+#include <asm/vio.h>
+
+#include "nx_csbcpb.h"
+#include "nx.h"
+
+
+static int nx_crypto_ctx_sha512_init(struct crypto_tfm *tfm)
+{
+ struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(tfm);
+ int err;
+
+ err = nx_crypto_ctx_sha_init(tfm);
+ if (err)
+ return err;
+
+ nx_ctx_init(nx_ctx, HCOP_FC_SHA);
+
+ nx_ctx->ap = &nx_ctx->props[NX_PROPS_SHA512];
+
+ NX_CPB_SET_DIGEST_SIZE(nx_ctx->csbcpb, NX_DS_SHA512);
+
+ return 0;
+}
+
+static int nx_sha512_init(struct shash_desc *desc)
+{
+ struct sha512_state *sctx = shash_desc_ctx(desc);
+
+ memset(sctx, 0, sizeof *sctx);
+
+ sctx->state[0] = __cpu_to_be64(SHA512_H0);
+ sctx->state[1] = __cpu_to_be64(SHA512_H1);
+ sctx->state[2] = __cpu_to_be64(SHA512_H2);
+ sctx->state[3] = __cpu_to_be64(SHA512_H3);
+ sctx->state[4] = __cpu_to_be64(SHA512_H4);
+ sctx->state[5] = __cpu_to_be64(SHA512_H5);
+ sctx->state[6] = __cpu_to_be64(SHA512_H6);
+ sctx->state[7] = __cpu_to_be64(SHA512_H7);
+ sctx->count[0] = 0;
+
+ return 0;
+}
+
+static int nx_sha512_update(struct shash_desc *desc, const u8 *data,
+ unsigned int len)
+{
+ struct sha512_state *sctx = shash_desc_ctx(desc);
+ struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
+ struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb;
+ struct nx_sg *out_sg;
+ u64 to_process, leftover = 0, total;
+ unsigned long irq_flags;
+ int rc = 0;
+ int data_len;
+ u32 max_sg_len;
+ u64 buf_len = (sctx->count[0] % SHA512_BLOCK_SIZE);
+
+ spin_lock_irqsave(&nx_ctx->lock, irq_flags);
+
+ /* 2 cases for total data len:
+ * 1: < SHA512_BLOCK_SIZE: copy into state, return 0
+ * 2: >= SHA512_BLOCK_SIZE: process X blocks, copy in leftover
+ */
+ total = (sctx->count[0] % SHA512_BLOCK_SIZE) + len;
+ if (total < SHA512_BLOCK_SIZE) {
+ memcpy(sctx->buf + buf_len, data, len);
+ sctx->count[0] += len;
+ goto out;
+ }
+
+ memcpy(csbcpb->cpb.sha512.message_digest, sctx->state, SHA512_DIGEST_SIZE);
+ NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
+ NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
+
+ max_sg_len = min_t(u64, nx_ctx->ap->sglen,
+ nx_driver.of.max_sg_len/sizeof(struct nx_sg));
+ max_sg_len = min_t(u64, max_sg_len,
+ nx_ctx->ap->databytelen/NX_PAGE_SIZE);
+
+ data_len = SHA512_DIGEST_SIZE;
+ out_sg = nx_build_sg_list(nx_ctx->out_sg, (u8 *)sctx->state,
+ &data_len, max_sg_len);
+ nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg);
+
+ if (data_len != SHA512_DIGEST_SIZE) {
+ rc = -EINVAL;
+ goto out;
+ }
+
+ do {
+ int used_sgs = 0;
+ struct nx_sg *in_sg = nx_ctx->in_sg;
+
+ if (buf_len) {
+ data_len = buf_len;
+ in_sg = nx_build_sg_list(in_sg,
+ (u8 *) sctx->buf,
+ &data_len, max_sg_len);
+
+ if (data_len != buf_len) {
+ rc = -EINVAL;
+ goto out;
+ }
+ used_sgs = in_sg - nx_ctx->in_sg;
+ }
+
+ /* to_process: SHA512_BLOCK_SIZE aligned chunk to be
+ * processed in this iteration. This value is restricted
+ * by sg list limits and number of sgs we already used
+ * for leftover data. (see above)
+ * In ideal case, we could allow NX_PAGE_SIZE * max_sg_len,
+ * but because data may not be aligned, we need to account
+ * for that too. */
+ to_process = min_t(u64, total,
+ (max_sg_len - 1 - used_sgs) * NX_PAGE_SIZE);
+ to_process = to_process & ~(SHA512_BLOCK_SIZE - 1);
+
+ data_len = to_process - buf_len;
+ in_sg = nx_build_sg_list(in_sg, (u8 *) data,
+ &data_len, max_sg_len);
+
+ nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg);
+
+ if (data_len != (to_process - buf_len)) {
+ rc = -EINVAL;
+ goto out;
+ }
+
+ to_process = data_len + buf_len;
+ leftover = total - to_process;
+
+ /*
+ * we've hit the nx chip previously and we're updating
+ * again, so copy over the partial digest.
+ */
+ memcpy(csbcpb->cpb.sha512.input_partial_digest,
+ csbcpb->cpb.sha512.message_digest,
+ SHA512_DIGEST_SIZE);
+
+ if (!nx_ctx->op.inlen || !nx_ctx->op.outlen) {
+ rc = -EINVAL;
+ goto out;
+ }
+
+ rc = nx_hcall_sync(nx_ctx, &nx_ctx->op, 0);
+ if (rc)
+ goto out;
+
+ atomic_inc(&(nx_ctx->stats->sha512_ops));
+
+ total -= to_process;
+ data += to_process - buf_len;
+ buf_len = 0;
+
+ } while (leftover >= SHA512_BLOCK_SIZE);
+
+ /* copy the leftover back into the state struct */
+ if (leftover)
+ memcpy(sctx->buf, data, leftover);
+ sctx->count[0] += len;
+ memcpy(sctx->state, csbcpb->cpb.sha512.message_digest, SHA512_DIGEST_SIZE);
+out:
+ spin_unlock_irqrestore(&nx_ctx->lock, irq_flags);
+ return rc;
+}
+
+static int nx_sha512_final(struct shash_desc *desc, u8 *out)
+{
+ struct sha512_state *sctx = shash_desc_ctx(desc);
+ struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
+ struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb;
+ struct nx_sg *in_sg, *out_sg;
+ u32 max_sg_len;
+ u64 count0;
+ unsigned long irq_flags;
+ int rc = 0;
+ int len;
+
+ spin_lock_irqsave(&nx_ctx->lock, irq_flags);
+
+ max_sg_len = min_t(u64, nx_ctx->ap->sglen,
+ nx_driver.of.max_sg_len/sizeof(struct nx_sg));
+ max_sg_len = min_t(u64, max_sg_len,
+ nx_ctx->ap->databytelen/NX_PAGE_SIZE);
+
+ /* final is represented by continuing the operation and indicating that
+ * this is not an intermediate operation */
+ if (sctx->count[0] >= SHA512_BLOCK_SIZE) {
+ /* we've hit the nx chip previously, now we're finalizing,
+ * so copy over the partial digest */
+ memcpy(csbcpb->cpb.sha512.input_partial_digest, sctx->state,
+ SHA512_DIGEST_SIZE);
+ NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE;
+ NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
+ } else {
+ NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE;
+ NX_CPB_FDM(csbcpb) &= ~NX_FDM_CONTINUATION;
+ }
+
+ NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE;
+
+ count0 = sctx->count[0] * 8;
+
+ csbcpb->cpb.sha512.message_bit_length_lo = count0;
+
+ len = sctx->count[0] & (SHA512_BLOCK_SIZE - 1);
+ in_sg = nx_build_sg_list(nx_ctx->in_sg, sctx->buf, &len,
+ max_sg_len);
+
+ if (len != (sctx->count[0] & (SHA512_BLOCK_SIZE - 1))) {
+ rc = -EINVAL;
+ goto out;
+ }
+
+ len = SHA512_DIGEST_SIZE;
+ out_sg = nx_build_sg_list(nx_ctx->out_sg, out, &len,
+ max_sg_len);
+
+ nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg);
+ nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg);
+
+ if (!nx_ctx->op.outlen) {
+ rc = -EINVAL;
+ goto out;
+ }
+
+ rc = nx_hcall_sync(nx_ctx, &nx_ctx->op, 0);
+ if (rc)
+ goto out;
+
+ atomic_inc(&(nx_ctx->stats->sha512_ops));
+ atomic64_add(sctx->count[0], &(nx_ctx->stats->sha512_bytes));
+
+ memcpy(out, csbcpb->cpb.sha512.message_digest, SHA512_DIGEST_SIZE);
+out:
+ spin_unlock_irqrestore(&nx_ctx->lock, irq_flags);
+ return rc;
+}
+
+static int nx_sha512_export(struct shash_desc *desc, void *out)
+{
+ struct sha512_state *sctx = shash_desc_ctx(desc);
+
+ memcpy(out, sctx, sizeof(*sctx));
+
+ return 0;
+}
+
+static int nx_sha512_import(struct shash_desc *desc, const void *in)
+{
+ struct sha512_state *sctx = shash_desc_ctx(desc);
+
+ memcpy(sctx, in, sizeof(*sctx));
+
+ return 0;
+}
+
+struct shash_alg nx_shash_sha512_alg = {
+ .digestsize = SHA512_DIGEST_SIZE,
+ .init = nx_sha512_init,
+ .update = nx_sha512_update,
+ .final = nx_sha512_final,
+ .export = nx_sha512_export,
+ .import = nx_sha512_import,
+ .descsize = sizeof(struct sha512_state),
+ .statesize = sizeof(struct sha512_state),
+ .base = {
+ .cra_name = "sha512",
+ .cra_driver_name = "sha512-nx",
+ .cra_priority = 300,
+ .cra_blocksize = SHA512_BLOCK_SIZE,
+ .cra_module = THIS_MODULE,
+ .cra_ctxsize = sizeof(struct nx_crypto_ctx),
+ .cra_init = nx_crypto_ctx_sha512_init,
+ .cra_exit = nx_crypto_ctx_exit,
+ }
+};
diff --git a/drivers/crypto/nx/nx.c b/drivers/crypto/nx/nx.c
new file mode 100644
index 000000000..40882d6d5
--- /dev/null
+++ b/drivers/crypto/nx/nx.c
@@ -0,0 +1,852 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/**
+ * Routines supporting the Power 7+ Nest Accelerators driver
+ *
+ * Copyright (C) 2011-2012 International Business Machines Inc.
+ *
+ * Author: Kent Yoder <yoder1@us.ibm.com>
+ */
+
+#include <crypto/internal/aead.h>
+#include <crypto/internal/hash.h>
+#include <crypto/aes.h>
+#include <crypto/sha.h>
+#include <crypto/algapi.h>
+#include <crypto/scatterwalk.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/types.h>
+#include <linux/mm.h>
+#include <linux/scatterlist.h>
+#include <linux/device.h>
+#include <linux/of.h>
+#include <asm/hvcall.h>
+#include <asm/vio.h>
+
+#include "nx_csbcpb.h"
+#include "nx.h"
+
+
+/**
+ * nx_hcall_sync - make an H_COP_OP hcall for the passed in op structure
+ *
+ * @nx_ctx: the crypto context handle
+ * @op: PFO operation struct to pass in
+ * @may_sleep: flag indicating the request can sleep
+ *
+ * Make the hcall, retrying while the hardware is busy. If we cannot yield
+ * the thread, limit the number of retries to 10 here.
+ */
+int nx_hcall_sync(struct nx_crypto_ctx *nx_ctx,
+ struct vio_pfo_op *op,
+ u32 may_sleep)
+{
+ int rc, retries = 10;
+ struct vio_dev *viodev = nx_driver.viodev;
+
+ atomic_inc(&(nx_ctx->stats->sync_ops));
+
+ do {
+ rc = vio_h_cop_sync(viodev, op);
+ } while (rc == -EBUSY && !may_sleep && retries--);
+
+ if (rc) {
+ dev_dbg(&viodev->dev, "vio_h_cop_sync failed: rc: %d "
+ "hcall rc: %ld\n", rc, op->hcall_err);
+ atomic_inc(&(nx_ctx->stats->errors));
+ atomic_set(&(nx_ctx->stats->last_error), op->hcall_err);
+ atomic_set(&(nx_ctx->stats->last_error_pid), current->pid);
+ }
+
+ return rc;
+}
+
+/**
+ * nx_build_sg_list - build an NX scatter list describing a single buffer
+ *
+ * @sg_head: pointer to the first scatter list element to build
+ * @start_addr: pointer to the linear buffer
+ * @len: length of the data at @start_addr
+ * @sgmax: the largest number of scatter list elements we're allowed to create
+ *
+ * This function will start writing nx_sg elements at @sg_head and keep
+ * writing them until all of the data from @start_addr is described or
+ * until sgmax elements have been written. Scatter list elements will be
+ * created such that none of the elements describes a buffer that crosses a 4K
+ * boundary.
+ */
+struct nx_sg *nx_build_sg_list(struct nx_sg *sg_head,
+ u8 *start_addr,
+ unsigned int *len,
+ u32 sgmax)
+{
+ unsigned int sg_len = 0;
+ struct nx_sg *sg;
+ u64 sg_addr = (u64)start_addr;
+ u64 end_addr;
+
+ /* determine the start and end for this address range - slightly
+ * different if this is in VMALLOC_REGION */
+ if (is_vmalloc_addr(start_addr))
+ sg_addr = page_to_phys(vmalloc_to_page(start_addr))
+ + offset_in_page(sg_addr);
+ else
+ sg_addr = __pa(sg_addr);
+
+ end_addr = sg_addr + *len;
+
+ /* each iteration will write one struct nx_sg element and add the
+ * length of data described by that element to sg_len. Once @len bytes
+ * have been described (or @sgmax elements have been written), the
+ * loop ends. min_t is used to ensure @end_addr falls on the same page
+ * as sg_addr, if not, we need to create another nx_sg element for the
+ * data on the next page.
+ *
+ * Also when using vmalloc'ed data, every time that a system page
+ * boundary is crossed the physical address needs to be re-calculated.
+ */
+ for (sg = sg_head; sg_len < *len; sg++) {
+ u64 next_page;
+
+ sg->addr = sg_addr;
+ sg_addr = min_t(u64, NX_PAGE_NUM(sg_addr + NX_PAGE_SIZE),
+ end_addr);
+
+ next_page = (sg->addr & PAGE_MASK) + PAGE_SIZE;
+ sg->len = min_t(u64, sg_addr, next_page) - sg->addr;
+ sg_len += sg->len;
+
+ if (sg_addr >= next_page &&
+ is_vmalloc_addr(start_addr + sg_len)) {
+ sg_addr = page_to_phys(vmalloc_to_page(
+ start_addr + sg_len));
+ end_addr = sg_addr + *len - sg_len;
+ }
+
+ if ((sg - sg_head) == sgmax) {
+ pr_err("nx: scatter/gather list overflow, pid: %d\n",
+ current->pid);
+ sg++;
+ break;
+ }
+ }
+ *len = sg_len;
+
+ /* return the moved sg_head pointer */
+ return sg;
+}
+
+/**
+ * nx_walk_and_build - walk a linux scatterlist and build an nx scatterlist
+ *
+ * @nx_dst: pointer to the first nx_sg element to write
+ * @sglen: max number of nx_sg entries we're allowed to write
+ * @sg_src: pointer to the source linux scatterlist to walk
+ * @start: number of bytes to fast-forward past at the beginning of @sg_src
+ * @src_len: number of bytes to walk in @sg_src
+ */
+struct nx_sg *nx_walk_and_build(struct nx_sg *nx_dst,
+ unsigned int sglen,
+ struct scatterlist *sg_src,
+ unsigned int start,
+ unsigned int *src_len)
+{
+ struct scatter_walk walk;
+ struct nx_sg *nx_sg = nx_dst;
+ unsigned int n, offset = 0, len = *src_len;
+ char *dst;
+
+ /* we need to fast forward through @start bytes first */
+ for (;;) {
+ scatterwalk_start(&walk, sg_src);
+
+ if (start < offset + sg_src->length)
+ break;
+
+ offset += sg_src->length;
+ sg_src = sg_next(sg_src);
+ }
+
+ /* start - offset is the number of bytes to advance in the scatterlist
+ * element we're currently looking at */
+ scatterwalk_advance(&walk, start - offset);
+
+ while (len && (nx_sg - nx_dst) < sglen) {
+ n = scatterwalk_clamp(&walk, len);
+ if (!n) {
+ /* In cases where we have scatterlist chain sg_next
+ * handles with it properly */
+ scatterwalk_start(&walk, sg_next(walk.sg));
+ n = scatterwalk_clamp(&walk, len);
+ }
+ dst = scatterwalk_map(&walk);
+
+ nx_sg = nx_build_sg_list(nx_sg, dst, &n, sglen - (nx_sg - nx_dst));
+ len -= n;
+
+ scatterwalk_unmap(dst);
+ scatterwalk_advance(&walk, n);
+ scatterwalk_done(&walk, SCATTERWALK_FROM_SG, len);
+ }
+ /* update to_process */
+ *src_len -= len;
+
+ /* return the moved destination pointer */
+ return nx_sg;
+}
+
+/**
+ * trim_sg_list - ensures the bound in sg list.
+ * @sg: sg list head
+ * @end: sg lisg end
+ * @delta: is the amount we need to crop in order to bound the list.
+ *
+ */
+static long int trim_sg_list(struct nx_sg *sg,
+ struct nx_sg *end,
+ unsigned int delta,
+ unsigned int *nbytes)
+{
+ long int oplen;
+ long int data_back;
+ unsigned int is_delta = delta;
+
+ while (delta && end > sg) {
+ struct nx_sg *last = end - 1;
+
+ if (last->len > delta) {
+ last->len -= delta;
+ delta = 0;
+ } else {
+ end--;
+ delta -= last->len;
+ }
+ }
+
+ /* There are cases where we need to crop list in order to make it
+ * a block size multiple, but we also need to align data. In order to
+ * that we need to calculate how much we need to put back to be
+ * processed
+ */
+ oplen = (sg - end) * sizeof(struct nx_sg);
+ if (is_delta) {
+ data_back = (abs(oplen) / AES_BLOCK_SIZE) * sg->len;
+ data_back = *nbytes - (data_back & ~(AES_BLOCK_SIZE - 1));
+ *nbytes -= data_back;
+ }
+
+ return oplen;
+}
+
+/**
+ * nx_build_sg_lists - walk the input scatterlists and build arrays of NX
+ * scatterlists based on them.
+ *
+ * @nx_ctx: NX crypto context for the lists we're building
+ * @iv: iv data, if the algorithm requires it
+ * @dst: destination scatterlist
+ * @src: source scatterlist
+ * @nbytes: length of data described in the scatterlists
+ * @offset: number of bytes to fast-forward past at the beginning of
+ * scatterlists.
+ * @oiv: destination for the iv data, if the algorithm requires it
+ *
+ * This is common code shared by all the AES algorithms. It uses the crypto
+ * scatterlist walk routines to traverse input and output scatterlists, building
+ * corresponding NX scatterlists
+ */
+int nx_build_sg_lists(struct nx_crypto_ctx *nx_ctx,
+ const u8 *iv,
+ struct scatterlist *dst,
+ struct scatterlist *src,
+ unsigned int *nbytes,
+ unsigned int offset,
+ u8 *oiv)
+{
+ unsigned int delta = 0;
+ unsigned int total = *nbytes;
+ struct nx_sg *nx_insg = nx_ctx->in_sg;
+ struct nx_sg *nx_outsg = nx_ctx->out_sg;
+ unsigned int max_sg_len;
+
+ max_sg_len = min_t(u64, nx_ctx->ap->sglen,
+ nx_driver.of.max_sg_len/sizeof(struct nx_sg));
+ max_sg_len = min_t(u64, max_sg_len,
+ nx_ctx->ap->databytelen/NX_PAGE_SIZE);
+
+ if (oiv)
+ memcpy(oiv, iv, AES_BLOCK_SIZE);
+
+ *nbytes = min_t(u64, *nbytes, nx_ctx->ap->databytelen);
+
+ nx_outsg = nx_walk_and_build(nx_outsg, max_sg_len, dst,
+ offset, nbytes);
+ nx_insg = nx_walk_and_build(nx_insg, max_sg_len, src,
+ offset, nbytes);
+
+ if (*nbytes < total)
+ delta = *nbytes - (*nbytes & ~(AES_BLOCK_SIZE - 1));
+
+ /* these lengths should be negative, which will indicate to phyp that
+ * the input and output parameters are scatterlists, not linear
+ * buffers */
+ nx_ctx->op.inlen = trim_sg_list(nx_ctx->in_sg, nx_insg, delta, nbytes);
+ nx_ctx->op.outlen = trim_sg_list(nx_ctx->out_sg, nx_outsg, delta, nbytes);
+
+ return 0;
+}
+
+/**
+ * nx_ctx_init - initialize an nx_ctx's vio_pfo_op struct
+ *
+ * @nx_ctx: the nx context to initialize
+ * @function: the function code for the op
+ */
+void nx_ctx_init(struct nx_crypto_ctx *nx_ctx, unsigned int function)
+{
+ spin_lock_init(&nx_ctx->lock);
+ memset(nx_ctx->kmem, 0, nx_ctx->kmem_len);
+ nx_ctx->csbcpb->csb.valid |= NX_CSB_VALID_BIT;
+
+ nx_ctx->op.flags = function;
+ nx_ctx->op.csbcpb = __pa(nx_ctx->csbcpb);
+ nx_ctx->op.in = __pa(nx_ctx->in_sg);
+ nx_ctx->op.out = __pa(nx_ctx->out_sg);
+
+ if (nx_ctx->csbcpb_aead) {
+ nx_ctx->csbcpb_aead->csb.valid |= NX_CSB_VALID_BIT;
+
+ nx_ctx->op_aead.flags = function;
+ nx_ctx->op_aead.csbcpb = __pa(nx_ctx->csbcpb_aead);
+ nx_ctx->op_aead.in = __pa(nx_ctx->in_sg);
+ nx_ctx->op_aead.out = __pa(nx_ctx->out_sg);
+ }
+}
+
+static void nx_of_update_status(struct device *dev,
+ struct property *p,
+ struct nx_of *props)
+{
+ if (!strncmp(p->value, "okay", p->length)) {
+ props->status = NX_WAITING;
+ props->flags |= NX_OF_FLAG_STATUS_SET;
+ } else {
+ dev_info(dev, "%s: status '%s' is not 'okay'\n", __func__,
+ (char *)p->value);
+ }
+}
+
+static void nx_of_update_sglen(struct device *dev,
+ struct property *p,
+ struct nx_of *props)
+{
+ if (p->length != sizeof(props->max_sg_len)) {
+ dev_err(dev, "%s: unexpected format for "
+ "ibm,max-sg-len property\n", __func__);
+ dev_dbg(dev, "%s: ibm,max-sg-len is %d bytes "
+ "long, expected %zd bytes\n", __func__,
+ p->length, sizeof(props->max_sg_len));
+ return;
+ }
+
+ props->max_sg_len = *(u32 *)p->value;
+ props->flags |= NX_OF_FLAG_MAXSGLEN_SET;
+}
+
+static void nx_of_update_msc(struct device *dev,
+ struct property *p,
+ struct nx_of *props)
+{
+ struct msc_triplet *trip;
+ struct max_sync_cop *msc;
+ unsigned int bytes_so_far, i, lenp;
+
+ msc = (struct max_sync_cop *)p->value;
+ lenp = p->length;
+
+ /* You can't tell if the data read in for this property is sane by its
+ * size alone. This is because there are sizes embedded in the data
+ * structure. The best we can do is check lengths as we parse and bail
+ * as soon as a length error is detected. */
+ bytes_so_far = 0;
+
+ while ((bytes_so_far + sizeof(struct max_sync_cop)) <= lenp) {
+ bytes_so_far += sizeof(struct max_sync_cop);
+
+ trip = msc->trip;
+
+ for (i = 0;
+ ((bytes_so_far + sizeof(struct msc_triplet)) <= lenp) &&
+ i < msc->triplets;
+ i++) {
+ if (msc->fc >= NX_MAX_FC || msc->mode >= NX_MAX_MODE) {
+ dev_err(dev, "unknown function code/mode "
+ "combo: %d/%d (ignored)\n", msc->fc,
+ msc->mode);
+ goto next_loop;
+ }
+
+ if (!trip->sglen || trip->databytelen < NX_PAGE_SIZE) {
+ dev_warn(dev, "bogus sglen/databytelen: "
+ "%u/%u (ignored)\n", trip->sglen,
+ trip->databytelen);
+ goto next_loop;
+ }
+
+ switch (trip->keybitlen) {
+ case 128:
+ case 160:
+ props->ap[msc->fc][msc->mode][0].databytelen =
+ trip->databytelen;
+ props->ap[msc->fc][msc->mode][0].sglen =
+ trip->sglen;
+ break;
+ case 192:
+ props->ap[msc->fc][msc->mode][1].databytelen =
+ trip->databytelen;
+ props->ap[msc->fc][msc->mode][1].sglen =
+ trip->sglen;
+ break;
+ case 256:
+ if (msc->fc == NX_FC_AES) {
+ props->ap[msc->fc][msc->mode][2].
+ databytelen = trip->databytelen;
+ props->ap[msc->fc][msc->mode][2].sglen =
+ trip->sglen;
+ } else if (msc->fc == NX_FC_AES_HMAC ||
+ msc->fc == NX_FC_SHA) {
+ props->ap[msc->fc][msc->mode][1].
+ databytelen = trip->databytelen;
+ props->ap[msc->fc][msc->mode][1].sglen =
+ trip->sglen;
+ } else {
+ dev_warn(dev, "unknown function "
+ "code/key bit len combo"
+ ": (%u/256)\n", msc->fc);
+ }
+ break;
+ case 512:
+ props->ap[msc->fc][msc->mode][2].databytelen =
+ trip->databytelen;
+ props->ap[msc->fc][msc->mode][2].sglen =
+ trip->sglen;
+ break;
+ default:
+ dev_warn(dev, "unknown function code/key bit "
+ "len combo: (%u/%u)\n", msc->fc,
+ trip->keybitlen);
+ break;
+ }
+next_loop:
+ bytes_so_far += sizeof(struct msc_triplet);
+ trip++;
+ }
+
+ msc = (struct max_sync_cop *)trip;
+ }
+
+ props->flags |= NX_OF_FLAG_MAXSYNCCOP_SET;
+}
+
+/**
+ * nx_of_init - read openFirmware values from the device tree
+ *
+ * @dev: device handle
+ * @props: pointer to struct to hold the properties values
+ *
+ * Called once at driver probe time, this function will read out the
+ * openFirmware properties we use at runtime. If all the OF properties are
+ * acceptable, when we exit this function props->flags will indicate that
+ * we're ready to register our crypto algorithms.
+ */
+static void nx_of_init(struct device *dev, struct nx_of *props)
+{
+ struct device_node *base_node = dev->of_node;
+ struct property *p;
+
+ p = of_find_property(base_node, "status", NULL);
+ if (!p)
+ dev_info(dev, "%s: property 'status' not found\n", __func__);
+ else
+ nx_of_update_status(dev, p, props);
+
+ p = of_find_property(base_node, "ibm,max-sg-len", NULL);
+ if (!p)
+ dev_info(dev, "%s: property 'ibm,max-sg-len' not found\n",
+ __func__);
+ else
+ nx_of_update_sglen(dev, p, props);
+
+ p = of_find_property(base_node, "ibm,max-sync-cop", NULL);
+ if (!p)
+ dev_info(dev, "%s: property 'ibm,max-sync-cop' not found\n",
+ __func__);
+ else
+ nx_of_update_msc(dev, p, props);
+}
+
+static bool nx_check_prop(struct device *dev, u32 fc, u32 mode, int slot)
+{
+ struct alg_props *props = &nx_driver.of.ap[fc][mode][slot];
+
+ if (!props->sglen || props->databytelen < NX_PAGE_SIZE) {
+ if (dev)
+ dev_warn(dev, "bogus sglen/databytelen for %u/%u/%u: "
+ "%u/%u (ignored)\n", fc, mode, slot,
+ props->sglen, props->databytelen);
+ return false;
+ }
+
+ return true;
+}
+
+static bool nx_check_props(struct device *dev, u32 fc, u32 mode)
+{
+ int i;
+
+ for (i = 0; i < 3; i++)
+ if (!nx_check_prop(dev, fc, mode, i))
+ return false;
+
+ return true;
+}
+
+static int nx_register_skcipher(struct skcipher_alg *alg, u32 fc, u32 mode)
+{
+ return nx_check_props(&nx_driver.viodev->dev, fc, mode) ?
+ crypto_register_skcipher(alg) : 0;
+}
+
+static int nx_register_aead(struct aead_alg *alg, u32 fc, u32 mode)
+{
+ return nx_check_props(&nx_driver.viodev->dev, fc, mode) ?
+ crypto_register_aead(alg) : 0;
+}
+
+static int nx_register_shash(struct shash_alg *alg, u32 fc, u32 mode, int slot)
+{
+ return (slot >= 0 ? nx_check_prop(&nx_driver.viodev->dev,
+ fc, mode, slot) :
+ nx_check_props(&nx_driver.viodev->dev, fc, mode)) ?
+ crypto_register_shash(alg) : 0;
+}
+
+static void nx_unregister_skcipher(struct skcipher_alg *alg, u32 fc, u32 mode)
+{
+ if (nx_check_props(NULL, fc, mode))
+ crypto_unregister_skcipher(alg);
+}
+
+static void nx_unregister_aead(struct aead_alg *alg, u32 fc, u32 mode)
+{
+ if (nx_check_props(NULL, fc, mode))
+ crypto_unregister_aead(alg);
+}
+
+static void nx_unregister_shash(struct shash_alg *alg, u32 fc, u32 mode,
+ int slot)
+{
+ if (slot >= 0 ? nx_check_prop(NULL, fc, mode, slot) :
+ nx_check_props(NULL, fc, mode))
+ crypto_unregister_shash(alg);
+}
+
+/**
+ * nx_register_algs - register algorithms with the crypto API
+ *
+ * Called from nx_probe()
+ *
+ * If all OF properties are in an acceptable state, the driver flags will
+ * indicate that we're ready and we'll create our debugfs files and register
+ * out crypto algorithms.
+ */
+static int nx_register_algs(void)
+{
+ int rc = -1;
+
+ if (nx_driver.of.flags != NX_OF_FLAG_MASK_READY)
+ goto out;
+
+ memset(&nx_driver.stats, 0, sizeof(struct nx_stats));
+
+ NX_DEBUGFS_INIT(&nx_driver);
+
+ nx_driver.of.status = NX_OKAY;
+
+ rc = nx_register_skcipher(&nx_ecb_aes_alg, NX_FC_AES, NX_MODE_AES_ECB);
+ if (rc)
+ goto out;
+
+ rc = nx_register_skcipher(&nx_cbc_aes_alg, NX_FC_AES, NX_MODE_AES_CBC);
+ if (rc)
+ goto out_unreg_ecb;
+
+ rc = nx_register_skcipher(&nx_ctr3686_aes_alg, NX_FC_AES,
+ NX_MODE_AES_CTR);
+ if (rc)
+ goto out_unreg_cbc;
+
+ rc = nx_register_aead(&nx_gcm_aes_alg, NX_FC_AES, NX_MODE_AES_GCM);
+ if (rc)
+ goto out_unreg_ctr3686;
+
+ rc = nx_register_aead(&nx_gcm4106_aes_alg, NX_FC_AES, NX_MODE_AES_GCM);
+ if (rc)
+ goto out_unreg_gcm;
+
+ rc = nx_register_aead(&nx_ccm_aes_alg, NX_FC_AES, NX_MODE_AES_CCM);
+ if (rc)
+ goto out_unreg_gcm4106;
+
+ rc = nx_register_aead(&nx_ccm4309_aes_alg, NX_FC_AES, NX_MODE_AES_CCM);
+ if (rc)
+ goto out_unreg_ccm;
+
+ rc = nx_register_shash(&nx_shash_sha256_alg, NX_FC_SHA, NX_MODE_SHA,
+ NX_PROPS_SHA256);
+ if (rc)
+ goto out_unreg_ccm4309;
+
+ rc = nx_register_shash(&nx_shash_sha512_alg, NX_FC_SHA, NX_MODE_SHA,
+ NX_PROPS_SHA512);
+ if (rc)
+ goto out_unreg_s256;
+
+ rc = nx_register_shash(&nx_shash_aes_xcbc_alg,
+ NX_FC_AES, NX_MODE_AES_XCBC_MAC, -1);
+ if (rc)
+ goto out_unreg_s512;
+
+ goto out;
+
+out_unreg_s512:
+ nx_unregister_shash(&nx_shash_sha512_alg, NX_FC_SHA, NX_MODE_SHA,
+ NX_PROPS_SHA512);
+out_unreg_s256:
+ nx_unregister_shash(&nx_shash_sha256_alg, NX_FC_SHA, NX_MODE_SHA,
+ NX_PROPS_SHA256);
+out_unreg_ccm4309:
+ nx_unregister_aead(&nx_ccm4309_aes_alg, NX_FC_AES, NX_MODE_AES_CCM);
+out_unreg_ccm:
+ nx_unregister_aead(&nx_ccm_aes_alg, NX_FC_AES, NX_MODE_AES_CCM);
+out_unreg_gcm4106:
+ nx_unregister_aead(&nx_gcm4106_aes_alg, NX_FC_AES, NX_MODE_AES_GCM);
+out_unreg_gcm:
+ nx_unregister_aead(&nx_gcm_aes_alg, NX_FC_AES, NX_MODE_AES_GCM);
+out_unreg_ctr3686:
+ nx_unregister_skcipher(&nx_ctr3686_aes_alg, NX_FC_AES, NX_MODE_AES_CTR);
+out_unreg_cbc:
+ nx_unregister_skcipher(&nx_cbc_aes_alg, NX_FC_AES, NX_MODE_AES_CBC);
+out_unreg_ecb:
+ nx_unregister_skcipher(&nx_ecb_aes_alg, NX_FC_AES, NX_MODE_AES_ECB);
+out:
+ return rc;
+}
+
+/**
+ * nx_crypto_ctx_init - create and initialize a crypto api context
+ *
+ * @nx_ctx: the crypto api context
+ * @fc: function code for the context
+ * @mode: the function code specific mode for this context
+ */
+static int nx_crypto_ctx_init(struct nx_crypto_ctx *nx_ctx, u32 fc, u32 mode)
+{
+ if (nx_driver.of.status != NX_OKAY) {
+ pr_err("Attempt to initialize NX crypto context while device "
+ "is not available!\n");
+ return -ENODEV;
+ }
+
+ /* we need an extra page for csbcpb_aead for these modes */
+ if (mode == NX_MODE_AES_GCM || mode == NX_MODE_AES_CCM)
+ nx_ctx->kmem_len = (5 * NX_PAGE_SIZE) +
+ sizeof(struct nx_csbcpb);
+ else
+ nx_ctx->kmem_len = (4 * NX_PAGE_SIZE) +
+ sizeof(struct nx_csbcpb);
+
+ nx_ctx->kmem = kmalloc(nx_ctx->kmem_len, GFP_KERNEL);
+ if (!nx_ctx->kmem)
+ return -ENOMEM;
+
+ /* the csbcpb and scatterlists must be 4K aligned pages */
+ nx_ctx->csbcpb = (struct nx_csbcpb *)(round_up((u64)nx_ctx->kmem,
+ (u64)NX_PAGE_SIZE));
+ nx_ctx->in_sg = (struct nx_sg *)((u8 *)nx_ctx->csbcpb + NX_PAGE_SIZE);
+ nx_ctx->out_sg = (struct nx_sg *)((u8 *)nx_ctx->in_sg + NX_PAGE_SIZE);
+
+ if (mode == NX_MODE_AES_GCM || mode == NX_MODE_AES_CCM)
+ nx_ctx->csbcpb_aead =
+ (struct nx_csbcpb *)((u8 *)nx_ctx->out_sg +
+ NX_PAGE_SIZE);
+
+ /* give each context a pointer to global stats and their OF
+ * properties */
+ nx_ctx->stats = &nx_driver.stats;
+ memcpy(nx_ctx->props, nx_driver.of.ap[fc][mode],
+ sizeof(struct alg_props) * 3);
+
+ return 0;
+}
+
+/* entry points from the crypto tfm initializers */
+int nx_crypto_ctx_aes_ccm_init(struct crypto_aead *tfm)
+{
+ crypto_aead_set_reqsize(tfm, sizeof(struct nx_ccm_rctx));
+ return nx_crypto_ctx_init(crypto_aead_ctx(tfm), NX_FC_AES,
+ NX_MODE_AES_CCM);
+}
+
+int nx_crypto_ctx_aes_gcm_init(struct crypto_aead *tfm)
+{
+ crypto_aead_set_reqsize(tfm, sizeof(struct nx_gcm_rctx));
+ return nx_crypto_ctx_init(crypto_aead_ctx(tfm), NX_FC_AES,
+ NX_MODE_AES_GCM);
+}
+
+int nx_crypto_ctx_aes_ctr_init(struct crypto_skcipher *tfm)
+{
+ return nx_crypto_ctx_init(crypto_skcipher_ctx(tfm), NX_FC_AES,
+ NX_MODE_AES_CTR);
+}
+
+int nx_crypto_ctx_aes_cbc_init(struct crypto_skcipher *tfm)
+{
+ return nx_crypto_ctx_init(crypto_skcipher_ctx(tfm), NX_FC_AES,
+ NX_MODE_AES_CBC);
+}
+
+int nx_crypto_ctx_aes_ecb_init(struct crypto_skcipher *tfm)
+{
+ return nx_crypto_ctx_init(crypto_skcipher_ctx(tfm), NX_FC_AES,
+ NX_MODE_AES_ECB);
+}
+
+int nx_crypto_ctx_sha_init(struct crypto_tfm *tfm)
+{
+ return nx_crypto_ctx_init(crypto_tfm_ctx(tfm), NX_FC_SHA, NX_MODE_SHA);
+}
+
+int nx_crypto_ctx_aes_xcbc_init(struct crypto_tfm *tfm)
+{
+ return nx_crypto_ctx_init(crypto_tfm_ctx(tfm), NX_FC_AES,
+ NX_MODE_AES_XCBC_MAC);
+}
+
+/**
+ * nx_crypto_ctx_exit - destroy a crypto api context
+ *
+ * @tfm: the crypto transform pointer for the context
+ *
+ * As crypto API contexts are destroyed, this exit hook is called to free the
+ * memory associated with it.
+ */
+void nx_crypto_ctx_exit(struct crypto_tfm *tfm)
+{
+ struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(tfm);
+
+ kfree_sensitive(nx_ctx->kmem);
+ nx_ctx->csbcpb = NULL;
+ nx_ctx->csbcpb_aead = NULL;
+ nx_ctx->in_sg = NULL;
+ nx_ctx->out_sg = NULL;
+}
+
+void nx_crypto_ctx_skcipher_exit(struct crypto_skcipher *tfm)
+{
+ nx_crypto_ctx_exit(crypto_skcipher_ctx(tfm));
+}
+
+void nx_crypto_ctx_aead_exit(struct crypto_aead *tfm)
+{
+ struct nx_crypto_ctx *nx_ctx = crypto_aead_ctx(tfm);
+
+ kfree_sensitive(nx_ctx->kmem);
+}
+
+static int nx_probe(struct vio_dev *viodev, const struct vio_device_id *id)
+{
+ dev_dbg(&viodev->dev, "driver probed: %s resource id: 0x%x\n",
+ viodev->name, viodev->resource_id);
+
+ if (nx_driver.viodev) {
+ dev_err(&viodev->dev, "%s: Attempt to register more than one "
+ "instance of the hardware\n", __func__);
+ return -EINVAL;
+ }
+
+ nx_driver.viodev = viodev;
+
+ nx_of_init(&viodev->dev, &nx_driver.of);
+
+ return nx_register_algs();
+}
+
+static int nx_remove(struct vio_dev *viodev)
+{
+ dev_dbg(&viodev->dev, "entering nx_remove for UA 0x%x\n",
+ viodev->unit_address);
+
+ if (nx_driver.of.status == NX_OKAY) {
+ NX_DEBUGFS_FINI(&nx_driver);
+
+ nx_unregister_shash(&nx_shash_aes_xcbc_alg,
+ NX_FC_AES, NX_MODE_AES_XCBC_MAC, -1);
+ nx_unregister_shash(&nx_shash_sha512_alg,
+ NX_FC_SHA, NX_MODE_SHA, NX_PROPS_SHA256);
+ nx_unregister_shash(&nx_shash_sha256_alg,
+ NX_FC_SHA, NX_MODE_SHA, NX_PROPS_SHA512);
+ nx_unregister_aead(&nx_ccm4309_aes_alg,
+ NX_FC_AES, NX_MODE_AES_CCM);
+ nx_unregister_aead(&nx_ccm_aes_alg, NX_FC_AES, NX_MODE_AES_CCM);
+ nx_unregister_aead(&nx_gcm4106_aes_alg,
+ NX_FC_AES, NX_MODE_AES_GCM);
+ nx_unregister_aead(&nx_gcm_aes_alg,
+ NX_FC_AES, NX_MODE_AES_GCM);
+ nx_unregister_skcipher(&nx_ctr3686_aes_alg,
+ NX_FC_AES, NX_MODE_AES_CTR);
+ nx_unregister_skcipher(&nx_cbc_aes_alg, NX_FC_AES,
+ NX_MODE_AES_CBC);
+ nx_unregister_skcipher(&nx_ecb_aes_alg, NX_FC_AES,
+ NX_MODE_AES_ECB);
+ }
+
+ return 0;
+}
+
+
+/* module wide initialization/cleanup */
+static int __init nx_init(void)
+{
+ return vio_register_driver(&nx_driver.viodriver);
+}
+
+static void __exit nx_fini(void)
+{
+ vio_unregister_driver(&nx_driver.viodriver);
+}
+
+static const struct vio_device_id nx_crypto_driver_ids[] = {
+ { "ibm,sym-encryption-v1", "ibm,sym-encryption" },
+ { "", "" }
+};
+MODULE_DEVICE_TABLE(vio, nx_crypto_driver_ids);
+
+/* driver state structure */
+struct nx_crypto_driver nx_driver = {
+ .viodriver = {
+ .id_table = nx_crypto_driver_ids,
+ .probe = nx_probe,
+ .remove = nx_remove,
+ .name = NX_NAME,
+ },
+};
+
+module_init(nx_init);
+module_exit(nx_fini);
+
+MODULE_AUTHOR("Kent Yoder <yoder1@us.ibm.com>");
+MODULE_DESCRIPTION(NX_STRING);
+MODULE_LICENSE("GPL");
+MODULE_VERSION(NX_VERSION);
diff --git a/drivers/crypto/nx/nx.h b/drivers/crypto/nx/nx.h
new file mode 100644
index 000000000..2697baebb
--- /dev/null
+++ b/drivers/crypto/nx/nx.h
@@ -0,0 +1,195 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __NX_H__
+#define __NX_H__
+
+#include <crypto/ctr.h>
+
+#define NX_NAME "nx-crypto"
+#define NX_STRING "IBM Power7+ Nest Accelerator Crypto Driver"
+#define NX_VERSION "1.0"
+
+/* a scatterlist in the format PHYP is expecting */
+struct nx_sg {
+ u64 addr;
+ u32 rsvd;
+ u32 len;
+} __attribute((packed));
+
+#define NX_PAGE_SIZE (4096)
+#define NX_MAX_SG_ENTRIES (NX_PAGE_SIZE/(sizeof(struct nx_sg)))
+
+enum nx_status {
+ NX_DISABLED,
+ NX_WAITING,
+ NX_OKAY
+};
+
+/* msc_triplet and max_sync_cop are used only to assist in parsing the
+ * openFirmware property */
+struct msc_triplet {
+ u32 keybitlen;
+ u32 databytelen;
+ u32 sglen;
+} __packed;
+
+struct max_sync_cop {
+ u32 fc;
+ u32 mode;
+ u32 triplets;
+ struct msc_triplet trip[];
+} __packed;
+
+struct alg_props {
+ u32 databytelen;
+ u32 sglen;
+};
+
+#define NX_OF_FLAG_MAXSGLEN_SET (1)
+#define NX_OF_FLAG_STATUS_SET (2)
+#define NX_OF_FLAG_MAXSYNCCOP_SET (4)
+#define NX_OF_FLAG_MASK_READY (NX_OF_FLAG_MAXSGLEN_SET | \
+ NX_OF_FLAG_STATUS_SET | \
+ NX_OF_FLAG_MAXSYNCCOP_SET)
+struct nx_of {
+ u32 flags;
+ u32 max_sg_len;
+ enum nx_status status;
+ struct alg_props ap[NX_MAX_FC][NX_MAX_MODE][3];
+};
+
+struct nx_stats {
+ atomic_t aes_ops;
+ atomic64_t aes_bytes;
+ atomic_t sha256_ops;
+ atomic64_t sha256_bytes;
+ atomic_t sha512_ops;
+ atomic64_t sha512_bytes;
+
+ atomic_t sync_ops;
+
+ atomic_t errors;
+ atomic_t last_error;
+ atomic_t last_error_pid;
+};
+
+struct nx_crypto_driver {
+ struct nx_stats stats;
+ struct nx_of of;
+ struct vio_dev *viodev;
+ struct vio_driver viodriver;
+ struct dentry *dfs_root;
+};
+
+#define NX_GCM4106_NONCE_LEN (4)
+#define NX_GCM_CTR_OFFSET (12)
+struct nx_gcm_rctx {
+ u8 iv[16];
+};
+
+struct nx_gcm_priv {
+ u8 iauth_tag[16];
+ u8 nonce[NX_GCM4106_NONCE_LEN];
+};
+
+#define NX_CCM_AES_KEY_LEN (16)
+#define NX_CCM4309_AES_KEY_LEN (19)
+#define NX_CCM4309_NONCE_LEN (3)
+struct nx_ccm_rctx {
+ u8 iv[16];
+};
+
+struct nx_ccm_priv {
+ u8 b0[16];
+ u8 iauth_tag[16];
+ u8 oauth_tag[16];
+ u8 nonce[NX_CCM4309_NONCE_LEN];
+};
+
+struct nx_xcbc_priv {
+ u8 key[16];
+};
+
+struct nx_ctr_priv {
+ u8 nonce[CTR_RFC3686_NONCE_SIZE];
+};
+
+struct nx_crypto_ctx {
+ spinlock_t lock; /* synchronize access to the context */
+ void *kmem; /* unaligned, kmalloc'd buffer */
+ size_t kmem_len; /* length of kmem */
+ struct nx_csbcpb *csbcpb; /* aligned page given to phyp @ hcall time */
+ struct vio_pfo_op op; /* operation struct with hcall parameters */
+ struct nx_csbcpb *csbcpb_aead; /* secondary csbcpb used by AEAD algs */
+ struct vio_pfo_op op_aead;/* operation struct for csbcpb_aead */
+
+ struct nx_sg *in_sg; /* aligned pointer into kmem to an sg list */
+ struct nx_sg *out_sg; /* aligned pointer into kmem to an sg list */
+
+ struct alg_props *ap; /* pointer into props based on our key size */
+ struct alg_props props[3];/* openFirmware properties for requests */
+ struct nx_stats *stats; /* pointer into an nx_crypto_driver for stats
+ reporting */
+
+ union {
+ struct nx_gcm_priv gcm;
+ struct nx_ccm_priv ccm;
+ struct nx_xcbc_priv xcbc;
+ struct nx_ctr_priv ctr;
+ } priv;
+};
+
+struct crypto_aead;
+
+/* prototypes */
+int nx_crypto_ctx_aes_ccm_init(struct crypto_aead *tfm);
+int nx_crypto_ctx_aes_gcm_init(struct crypto_aead *tfm);
+int nx_crypto_ctx_aes_xcbc_init(struct crypto_tfm *tfm);
+int nx_crypto_ctx_aes_ctr_init(struct crypto_skcipher *tfm);
+int nx_crypto_ctx_aes_cbc_init(struct crypto_skcipher *tfm);
+int nx_crypto_ctx_aes_ecb_init(struct crypto_skcipher *tfm);
+int nx_crypto_ctx_sha_init(struct crypto_tfm *tfm);
+void nx_crypto_ctx_exit(struct crypto_tfm *tfm);
+void nx_crypto_ctx_skcipher_exit(struct crypto_skcipher *tfm);
+void nx_crypto_ctx_aead_exit(struct crypto_aead *tfm);
+void nx_ctx_init(struct nx_crypto_ctx *nx_ctx, unsigned int function);
+int nx_hcall_sync(struct nx_crypto_ctx *ctx, struct vio_pfo_op *op,
+ u32 may_sleep);
+struct nx_sg *nx_build_sg_list(struct nx_sg *, u8 *, unsigned int *, u32);
+int nx_build_sg_lists(struct nx_crypto_ctx *nx_ctx, const u8 *iv,
+ struct scatterlist *dst, struct scatterlist *src,
+ unsigned int *nbytes, unsigned int offset, u8 *oiv);
+struct nx_sg *nx_walk_and_build(struct nx_sg *, unsigned int,
+ struct scatterlist *, unsigned int,
+ unsigned int *);
+
+#ifdef CONFIG_DEBUG_FS
+#define NX_DEBUGFS_INIT(drv) nx_debugfs_init(drv)
+#define NX_DEBUGFS_FINI(drv) nx_debugfs_fini(drv)
+
+void nx_debugfs_init(struct nx_crypto_driver *);
+void nx_debugfs_fini(struct nx_crypto_driver *);
+#else
+#define NX_DEBUGFS_INIT(drv) do {} while (0)
+#define NX_DEBUGFS_FINI(drv) do {} while (0)
+#endif
+
+#define NX_PAGE_NUM(x) ((u64)(x) & 0xfffffffffffff000ULL)
+
+extern struct skcipher_alg nx_cbc_aes_alg;
+extern struct skcipher_alg nx_ecb_aes_alg;
+extern struct aead_alg nx_gcm_aes_alg;
+extern struct aead_alg nx_gcm4106_aes_alg;
+extern struct skcipher_alg nx_ctr3686_aes_alg;
+extern struct aead_alg nx_ccm_aes_alg;
+extern struct aead_alg nx_ccm4309_aes_alg;
+extern struct shash_alg nx_shash_aes_xcbc_alg;
+extern struct shash_alg nx_shash_sha512_alg;
+extern struct shash_alg nx_shash_sha256_alg;
+
+extern struct nx_crypto_driver nx_driver;
+
+#define SCATTERWALK_TO_SG 1
+#define SCATTERWALK_FROM_SG 0
+
+#endif
diff --git a/drivers/crypto/nx/nx_csbcpb.h b/drivers/crypto/nx/nx_csbcpb.h
new file mode 100644
index 000000000..493f8490f
--- /dev/null
+++ b/drivers/crypto/nx/nx_csbcpb.h
@@ -0,0 +1,206 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __NX_CSBCPB_H__
+#define __NX_CSBCPB_H__
+
+struct cop_symcpb_aes_ecb {
+ u8 key[32];
+ u8 __rsvd[80];
+} __packed;
+
+struct cop_symcpb_aes_cbc {
+ u8 iv[16];
+ u8 key[32];
+ u8 cv[16];
+ u32 spbc;
+ u8 __rsvd[44];
+} __packed;
+
+struct cop_symcpb_aes_gca {
+ u8 in_pat[16];
+ u8 key[32];
+ u8 out_pat[16];
+ u32 spbc;
+ u8 __rsvd[44];
+} __packed;
+
+struct cop_symcpb_aes_gcm {
+ u8 in_pat_or_aad[16];
+ u8 iv_or_cnt[16];
+ u64 bit_length_aad;
+ u64 bit_length_data;
+ u8 in_s0[16];
+ u8 key[32];
+ u8 __rsvd1[16];
+ u8 out_pat_or_mac[16];
+ u8 out_s0[16];
+ u8 out_cnt[16];
+ u32 spbc;
+ u8 __rsvd2[12];
+} __packed;
+
+struct cop_symcpb_aes_ctr {
+ u8 iv[16];
+ u8 key[32];
+ u8 cv[16];
+ u32 spbc;
+ u8 __rsvd2[44];
+} __packed;
+
+struct cop_symcpb_aes_cca {
+ u8 b0[16];
+ u8 b1[16];
+ u8 key[16];
+ u8 out_pat_or_b0[16];
+ u32 spbc;
+ u8 __rsvd[44];
+} __packed;
+
+struct cop_symcpb_aes_ccm {
+ u8 in_pat_or_b0[16];
+ u8 iv_or_ctr[16];
+ u8 in_s0[16];
+ u8 key[16];
+ u8 __rsvd1[48];
+ u8 out_pat_or_mac[16];
+ u8 out_s0[16];
+ u8 out_ctr[16];
+ u32 spbc;
+ u8 __rsvd2[12];
+} __packed;
+
+struct cop_symcpb_aes_xcbc {
+ u8 cv[16];
+ u8 key[16];
+ u8 __rsvd1[16];
+ u8 out_cv_mac[16];
+ u32 spbc;
+ u8 __rsvd2[44];
+} __packed;
+
+struct cop_symcpb_sha256 {
+ u64 message_bit_length;
+ u64 __rsvd1;
+ u8 input_partial_digest[32];
+ u8 message_digest[32];
+ u32 spbc;
+ u8 __rsvd2[44];
+} __packed;
+
+struct cop_symcpb_sha512 {
+ u64 message_bit_length_hi;
+ u64 message_bit_length_lo;
+ u8 input_partial_digest[64];
+ u8 __rsvd1[32];
+ u8 message_digest[64];
+ u32 spbc;
+ u8 __rsvd2[76];
+} __packed;
+
+#define NX_FDM_INTERMEDIATE 0x01
+#define NX_FDM_CONTINUATION 0x02
+#define NX_FDM_ENDE_ENCRYPT 0x80
+
+#define NX_CPB_FDM(c) ((c)->cpb.hdr.fdm)
+#define NX_CPB_KS_DS(c) ((c)->cpb.hdr.ks_ds)
+
+#define NX_CPB_KEY_SIZE(c) (NX_CPB_KS_DS(c) >> 4)
+#define NX_CPB_SET_KEY_SIZE(c, x) NX_CPB_KS_DS(c) |= ((x) << 4)
+#define NX_CPB_SET_DIGEST_SIZE(c, x) NX_CPB_KS_DS(c) |= (x)
+
+struct cop_symcpb_header {
+ u8 mode;
+ u8 fdm;
+ u8 ks_ds;
+ u8 pad_byte;
+ u8 __rsvd[12];
+} __packed;
+
+struct cop_parameter_block {
+ struct cop_symcpb_header hdr;
+ union {
+ struct cop_symcpb_aes_ecb aes_ecb;
+ struct cop_symcpb_aes_cbc aes_cbc;
+ struct cop_symcpb_aes_gca aes_gca;
+ struct cop_symcpb_aes_gcm aes_gcm;
+ struct cop_symcpb_aes_cca aes_cca;
+ struct cop_symcpb_aes_ccm aes_ccm;
+ struct cop_symcpb_aes_ctr aes_ctr;
+ struct cop_symcpb_aes_xcbc aes_xcbc;
+ struct cop_symcpb_sha256 sha256;
+ struct cop_symcpb_sha512 sha512;
+ };
+} __packed;
+
+#define NX_CSB_VALID_BIT 0x80
+
+/* co-processor status block */
+struct cop_status_block {
+ u8 valid;
+ u8 crb_seq_number;
+ u8 completion_code;
+ u8 completion_extension;
+ u32 processed_byte_count;
+ u64 address;
+} __packed;
+
+/* Nest accelerator workbook section 4.4 */
+struct nx_csbcpb {
+ unsigned char __rsvd[112];
+ struct cop_status_block csb;
+ struct cop_parameter_block cpb;
+} __packed;
+
+/* nx_csbcpb related definitions */
+#define NX_MODE_AES_ECB 0
+#define NX_MODE_AES_CBC 1
+#define NX_MODE_AES_GMAC 2
+#define NX_MODE_AES_GCA 3
+#define NX_MODE_AES_GCM 4
+#define NX_MODE_AES_CCA 5
+#define NX_MODE_AES_CCM 6
+#define NX_MODE_AES_CTR 7
+#define NX_MODE_AES_XCBC_MAC 20
+#define NX_MODE_SHA 0
+#define NX_MODE_SHA_HMAC 1
+#define NX_MODE_AES_CBC_HMAC_ETA 8
+#define NX_MODE_AES_CBC_HMAC_ATE 9
+#define NX_MODE_AES_CBC_HMAC_EAA 10
+#define NX_MODE_AES_CTR_HMAC_ETA 12
+#define NX_MODE_AES_CTR_HMAC_ATE 13
+#define NX_MODE_AES_CTR_HMAC_EAA 14
+
+#define NX_FDM_CI_FULL 0
+#define NX_FDM_CI_FIRST 1
+#define NX_FDM_CI_LAST 2
+#define NX_FDM_CI_MIDDLE 3
+
+#define NX_FDM_PR_NONE 0
+#define NX_FDM_PR_PAD 1
+
+#define NX_KS_AES_128 1
+#define NX_KS_AES_192 2
+#define NX_KS_AES_256 3
+
+#define NX_DS_SHA256 2
+#define NX_DS_SHA512 3
+
+#define NX_FC_AES 0
+#define NX_FC_SHA 2
+#define NX_FC_AES_HMAC 6
+
+#define NX_MAX_FC (NX_FC_AES_HMAC + 1)
+#define NX_MAX_MODE (NX_MODE_AES_XCBC_MAC + 1)
+
+#define HCOP_FC_AES NX_FC_AES
+#define HCOP_FC_SHA NX_FC_SHA
+#define HCOP_FC_AES_HMAC NX_FC_AES_HMAC
+
+/* indices into the array of algorithm properties */
+#define NX_PROPS_AES_128 0
+#define NX_PROPS_AES_192 1
+#define NX_PROPS_AES_256 2
+#define NX_PROPS_SHA256 1
+#define NX_PROPS_SHA512 2
+
+#endif
diff --git a/drivers/crypto/nx/nx_debugfs.c b/drivers/crypto/nx/nx_debugfs.c
new file mode 100644
index 000000000..1975bcbee
--- /dev/null
+++ b/drivers/crypto/nx/nx_debugfs.c
@@ -0,0 +1,66 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/**
+ * debugfs routines supporting the Power 7+ Nest Accelerators driver
+ *
+ * Copyright (C) 2011-2012 International Business Machines Inc.
+ *
+ * Author: Kent Yoder <yoder1@us.ibm.com>
+ */
+
+#include <linux/device.h>
+#include <linux/kobject.h>
+#include <linux/string.h>
+#include <linux/debugfs.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/crypto.h>
+#include <crypto/hash.h>
+#include <asm/vio.h>
+
+#include "nx_csbcpb.h"
+#include "nx.h"
+
+#ifdef CONFIG_DEBUG_FS
+
+/*
+ * debugfs
+ *
+ * For documentation on these attributes, please see:
+ *
+ * Documentation/ABI/testing/debugfs-pfo-nx-crypto
+ */
+
+void nx_debugfs_init(struct nx_crypto_driver *drv)
+{
+ struct dentry *root;
+
+ root = debugfs_create_dir(NX_NAME, NULL);
+ drv->dfs_root = root;
+
+ debugfs_create_u32("aes_ops", S_IRUSR | S_IRGRP | S_IROTH,
+ root, &drv->stats.aes_ops.counter);
+ debugfs_create_u32("sha256_ops", S_IRUSR | S_IRGRP | S_IROTH,
+ root, &drv->stats.sha256_ops.counter);
+ debugfs_create_u32("sha512_ops", S_IRUSR | S_IRGRP | S_IROTH,
+ root, &drv->stats.sha512_ops.counter);
+ debugfs_create_u64("aes_bytes", S_IRUSR | S_IRGRP | S_IROTH,
+ root, &drv->stats.aes_bytes.counter);
+ debugfs_create_u64("sha256_bytes", S_IRUSR | S_IRGRP | S_IROTH,
+ root, &drv->stats.sha256_bytes.counter);
+ debugfs_create_u64("sha512_bytes", S_IRUSR | S_IRGRP | S_IROTH,
+ root, &drv->stats.sha512_bytes.counter);
+ debugfs_create_u32("errors", S_IRUSR | S_IRGRP | S_IROTH,
+ root, &drv->stats.errors.counter);
+ debugfs_create_u32("last_error", S_IRUSR | S_IRGRP | S_IROTH,
+ root, &drv->stats.last_error.counter);
+ debugfs_create_u32("last_error_pid", S_IRUSR | S_IRGRP | S_IROTH,
+ root, &drv->stats.last_error_pid.counter);
+}
+
+void
+nx_debugfs_fini(struct nx_crypto_driver *drv)
+{
+ debugfs_remove_recursive(drv->dfs_root);
+}
+
+#endif