summaryrefslogtreecommitdiffstats
path: root/drivers/crypto/virtio
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/crypto/virtio')
-rw-r--r--drivers/crypto/virtio/Kconfig13
-rw-r--r--drivers/crypto/virtio/Makefile7
-rw-r--r--drivers/crypto/virtio/virtio_crypto_akcipher_algs.c591
-rw-r--r--drivers/crypto/virtio/virtio_crypto_algs.c656
-rw-r--r--drivers/crypto/virtio/virtio_crypto_common.h151
-rw-r--r--drivers/crypto/virtio/virtio_crypto_core.c590
-rw-r--r--drivers/crypto/virtio/virtio_crypto_mgr.c332
7 files changed, 2340 insertions, 0 deletions
diff --git a/drivers/crypto/virtio/Kconfig b/drivers/crypto/virtio/Kconfig
new file mode 100644
index 000000000..5f8915f4a
--- /dev/null
+++ b/drivers/crypto/virtio/Kconfig
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-2.0-only
+config CRYPTO_DEV_VIRTIO
+ tristate "VirtIO crypto driver"
+ depends on VIRTIO
+ select CRYPTO_AEAD
+ select CRYPTO_AKCIPHER2
+ select CRYPTO_SKCIPHER
+ select CRYPTO_ENGINE
+ select CRYPTO_RSA
+ select MPILIB
+ help
+ This driver provides support for virtio crypto device. If you
+ choose 'M' here, this module will be called virtio_crypto.
diff --git a/drivers/crypto/virtio/Makefile b/drivers/crypto/virtio/Makefile
new file mode 100644
index 000000000..f2b839473
--- /dev/null
+++ b/drivers/crypto/virtio/Makefile
@@ -0,0 +1,7 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_CRYPTO_DEV_VIRTIO) += virtio_crypto.o
+virtio_crypto-objs := \
+ virtio_crypto_algs.o \
+ virtio_crypto_akcipher_algs.o \
+ virtio_crypto_mgr.o \
+ virtio_crypto_core.o
diff --git a/drivers/crypto/virtio/virtio_crypto_akcipher_algs.c b/drivers/crypto/virtio/virtio_crypto_akcipher_algs.c
new file mode 100644
index 000000000..2cfc36d14
--- /dev/null
+++ b/drivers/crypto/virtio/virtio_crypto_akcipher_algs.c
@@ -0,0 +1,591 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+ /* Asymmetric algorithms supported by virtio crypto device
+ *
+ * Authors: zhenwei pi <pizhenwei@bytedance.com>
+ * lei he <helei.sig11@bytedance.com>
+ *
+ * Copyright 2022 Bytedance CO., LTD.
+ */
+
+#include <linux/mpi.h>
+#include <linux/scatterlist.h>
+#include <crypto/algapi.h>
+#include <crypto/internal/akcipher.h>
+#include <crypto/internal/rsa.h>
+#include <linux/err.h>
+#include <crypto/scatterwalk.h>
+#include <linux/atomic.h>
+
+#include <uapi/linux/virtio_crypto.h>
+#include "virtio_crypto_common.h"
+
+struct virtio_crypto_rsa_ctx {
+ MPI n;
+};
+
+struct virtio_crypto_akcipher_ctx {
+ struct crypto_engine_ctx enginectx;
+ struct virtio_crypto *vcrypto;
+ struct crypto_akcipher *tfm;
+ bool session_valid;
+ __u64 session_id;
+ union {
+ struct virtio_crypto_rsa_ctx rsa_ctx;
+ };
+};
+
+struct virtio_crypto_akcipher_request {
+ struct virtio_crypto_request base;
+ struct virtio_crypto_akcipher_ctx *akcipher_ctx;
+ struct akcipher_request *akcipher_req;
+ void *src_buf;
+ void *dst_buf;
+ uint32_t opcode;
+};
+
+struct virtio_crypto_akcipher_algo {
+ uint32_t algonum;
+ uint32_t service;
+ unsigned int active_devs;
+ struct akcipher_alg algo;
+};
+
+static DEFINE_MUTEX(algs_lock);
+
+static void virtio_crypto_akcipher_finalize_req(
+ struct virtio_crypto_akcipher_request *vc_akcipher_req,
+ struct akcipher_request *req, int err)
+{
+ kfree(vc_akcipher_req->src_buf);
+ kfree(vc_akcipher_req->dst_buf);
+ vc_akcipher_req->src_buf = NULL;
+ vc_akcipher_req->dst_buf = NULL;
+ virtcrypto_clear_request(&vc_akcipher_req->base);
+
+ crypto_finalize_akcipher_request(vc_akcipher_req->base.dataq->engine, req, err);
+}
+
+static void virtio_crypto_dataq_akcipher_callback(struct virtio_crypto_request *vc_req, int len)
+{
+ struct virtio_crypto_akcipher_request *vc_akcipher_req =
+ container_of(vc_req, struct virtio_crypto_akcipher_request, base);
+ struct akcipher_request *akcipher_req;
+ int error;
+
+ switch (vc_req->status) {
+ case VIRTIO_CRYPTO_OK:
+ error = 0;
+ break;
+ case VIRTIO_CRYPTO_INVSESS:
+ case VIRTIO_CRYPTO_ERR:
+ error = -EINVAL;
+ break;
+ case VIRTIO_CRYPTO_BADMSG:
+ error = -EBADMSG;
+ break;
+
+ case VIRTIO_CRYPTO_KEY_REJECTED:
+ error = -EKEYREJECTED;
+ break;
+
+ default:
+ error = -EIO;
+ break;
+ }
+
+ akcipher_req = vc_akcipher_req->akcipher_req;
+ if (vc_akcipher_req->opcode != VIRTIO_CRYPTO_AKCIPHER_VERIFY)
+ sg_copy_from_buffer(akcipher_req->dst, sg_nents(akcipher_req->dst),
+ vc_akcipher_req->dst_buf, akcipher_req->dst_len);
+ virtio_crypto_akcipher_finalize_req(vc_akcipher_req, akcipher_req, error);
+}
+
+static int virtio_crypto_alg_akcipher_init_session(struct virtio_crypto_akcipher_ctx *ctx,
+ struct virtio_crypto_ctrl_header *header, void *para,
+ const uint8_t *key, unsigned int keylen)
+{
+ struct scatterlist outhdr_sg, key_sg, inhdr_sg, *sgs[3];
+ struct virtio_crypto *vcrypto = ctx->vcrypto;
+ uint8_t *pkey;
+ int err;
+ unsigned int num_out = 0, num_in = 0;
+ struct virtio_crypto_op_ctrl_req *ctrl;
+ struct virtio_crypto_session_input *input;
+ struct virtio_crypto_ctrl_request *vc_ctrl_req;
+
+ pkey = kmemdup(key, keylen, GFP_ATOMIC);
+ if (!pkey)
+ return -ENOMEM;
+
+ vc_ctrl_req = kzalloc(sizeof(*vc_ctrl_req), GFP_KERNEL);
+ if (!vc_ctrl_req) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ ctrl = &vc_ctrl_req->ctrl;
+ memcpy(&ctrl->header, header, sizeof(ctrl->header));
+ memcpy(&ctrl->u, para, sizeof(ctrl->u));
+ input = &vc_ctrl_req->input;
+ input->status = cpu_to_le32(VIRTIO_CRYPTO_ERR);
+
+ sg_init_one(&outhdr_sg, ctrl, sizeof(*ctrl));
+ sgs[num_out++] = &outhdr_sg;
+
+ sg_init_one(&key_sg, pkey, keylen);
+ sgs[num_out++] = &key_sg;
+
+ sg_init_one(&inhdr_sg, input, sizeof(*input));
+ sgs[num_out + num_in++] = &inhdr_sg;
+
+ err = virtio_crypto_ctrl_vq_request(vcrypto, sgs, num_out, num_in, vc_ctrl_req);
+ if (err < 0)
+ goto out;
+
+ if (le32_to_cpu(input->status) != VIRTIO_CRYPTO_OK) {
+ pr_err("virtio_crypto: Create session failed status: %u\n",
+ le32_to_cpu(input->status));
+ err = -EINVAL;
+ goto out;
+ }
+
+ ctx->session_id = le64_to_cpu(input->session_id);
+ ctx->session_valid = true;
+ err = 0;
+
+out:
+ kfree(vc_ctrl_req);
+ kfree_sensitive(pkey);
+
+ return err;
+}
+
+static int virtio_crypto_alg_akcipher_close_session(struct virtio_crypto_akcipher_ctx *ctx)
+{
+ struct scatterlist outhdr_sg, inhdr_sg, *sgs[2];
+ struct virtio_crypto_destroy_session_req *destroy_session;
+ struct virtio_crypto *vcrypto = ctx->vcrypto;
+ unsigned int num_out = 0, num_in = 0;
+ int err;
+ struct virtio_crypto_op_ctrl_req *ctrl;
+ struct virtio_crypto_inhdr *ctrl_status;
+ struct virtio_crypto_ctrl_request *vc_ctrl_req;
+
+ if (!ctx->session_valid)
+ return 0;
+
+ vc_ctrl_req = kzalloc(sizeof(*vc_ctrl_req), GFP_KERNEL);
+ if (!vc_ctrl_req)
+ return -ENOMEM;
+
+ ctrl_status = &vc_ctrl_req->ctrl_status;
+ ctrl_status->status = VIRTIO_CRYPTO_ERR;
+ ctrl = &vc_ctrl_req->ctrl;
+ ctrl->header.opcode = cpu_to_le32(VIRTIO_CRYPTO_AKCIPHER_DESTROY_SESSION);
+ ctrl->header.queue_id = 0;
+
+ destroy_session = &ctrl->u.destroy_session;
+ destroy_session->session_id = cpu_to_le64(ctx->session_id);
+
+ sg_init_one(&outhdr_sg, ctrl, sizeof(*ctrl));
+ sgs[num_out++] = &outhdr_sg;
+
+ sg_init_one(&inhdr_sg, &ctrl_status->status, sizeof(ctrl_status->status));
+ sgs[num_out + num_in++] = &inhdr_sg;
+
+ err = virtio_crypto_ctrl_vq_request(vcrypto, sgs, num_out, num_in, vc_ctrl_req);
+ if (err < 0)
+ goto out;
+
+ if (ctrl_status->status != VIRTIO_CRYPTO_OK) {
+ pr_err("virtio_crypto: Close session failed status: %u, session_id: 0x%llx\n",
+ ctrl_status->status, destroy_session->session_id);
+ err = -EINVAL;
+ goto out;
+ }
+
+ err = 0;
+ ctx->session_valid = false;
+
+out:
+ kfree(vc_ctrl_req);
+
+ return err;
+}
+
+static int __virtio_crypto_akcipher_do_req(struct virtio_crypto_akcipher_request *vc_akcipher_req,
+ struct akcipher_request *req, struct data_queue *data_vq)
+{
+ struct virtio_crypto_akcipher_ctx *ctx = vc_akcipher_req->akcipher_ctx;
+ struct virtio_crypto_request *vc_req = &vc_akcipher_req->base;
+ struct virtio_crypto *vcrypto = ctx->vcrypto;
+ struct virtio_crypto_op_data_req *req_data = vc_req->req_data;
+ struct scatterlist *sgs[4], outhdr_sg, inhdr_sg, srcdata_sg, dstdata_sg;
+ void *src_buf = NULL, *dst_buf = NULL;
+ unsigned int num_out = 0, num_in = 0;
+ int node = dev_to_node(&vcrypto->vdev->dev);
+ unsigned long flags;
+ int ret = -ENOMEM;
+ bool verify = vc_akcipher_req->opcode == VIRTIO_CRYPTO_AKCIPHER_VERIFY;
+ unsigned int src_len = verify ? req->src_len + req->dst_len : req->src_len;
+
+ /* out header */
+ sg_init_one(&outhdr_sg, req_data, sizeof(*req_data));
+ sgs[num_out++] = &outhdr_sg;
+
+ /* src data */
+ src_buf = kcalloc_node(src_len, 1, GFP_KERNEL, node);
+ if (!src_buf)
+ goto err;
+
+ if (verify) {
+ /* for verify operation, both src and dst data work as OUT direction */
+ sg_copy_to_buffer(req->src, sg_nents(req->src), src_buf, src_len);
+ sg_init_one(&srcdata_sg, src_buf, src_len);
+ sgs[num_out++] = &srcdata_sg;
+ } else {
+ sg_copy_to_buffer(req->src, sg_nents(req->src), src_buf, src_len);
+ sg_init_one(&srcdata_sg, src_buf, src_len);
+ sgs[num_out++] = &srcdata_sg;
+
+ /* dst data */
+ dst_buf = kcalloc_node(req->dst_len, 1, GFP_KERNEL, node);
+ if (!dst_buf)
+ goto err;
+
+ sg_init_one(&dstdata_sg, dst_buf, req->dst_len);
+ sgs[num_out + num_in++] = &dstdata_sg;
+ }
+
+ vc_akcipher_req->src_buf = src_buf;
+ vc_akcipher_req->dst_buf = dst_buf;
+
+ /* in header */
+ sg_init_one(&inhdr_sg, &vc_req->status, sizeof(vc_req->status));
+ sgs[num_out + num_in++] = &inhdr_sg;
+
+ spin_lock_irqsave(&data_vq->lock, flags);
+ ret = virtqueue_add_sgs(data_vq->vq, sgs, num_out, num_in, vc_req, GFP_ATOMIC);
+ virtqueue_kick(data_vq->vq);
+ spin_unlock_irqrestore(&data_vq->lock, flags);
+ if (ret)
+ goto err;
+
+ return 0;
+
+err:
+ kfree(src_buf);
+ kfree(dst_buf);
+
+ return -ENOMEM;
+}
+
+static int virtio_crypto_rsa_do_req(struct crypto_engine *engine, void *vreq)
+{
+ struct akcipher_request *req = container_of(vreq, struct akcipher_request, base);
+ struct virtio_crypto_akcipher_request *vc_akcipher_req = akcipher_request_ctx(req);
+ struct virtio_crypto_request *vc_req = &vc_akcipher_req->base;
+ struct virtio_crypto_akcipher_ctx *ctx = vc_akcipher_req->akcipher_ctx;
+ struct virtio_crypto *vcrypto = ctx->vcrypto;
+ struct data_queue *data_vq = vc_req->dataq;
+ struct virtio_crypto_op_header *header;
+ struct virtio_crypto_akcipher_data_req *akcipher_req;
+ int ret;
+
+ vc_req->sgs = NULL;
+ vc_req->req_data = kzalloc_node(sizeof(*vc_req->req_data),
+ GFP_KERNEL, dev_to_node(&vcrypto->vdev->dev));
+ if (!vc_req->req_data)
+ return -ENOMEM;
+
+ /* build request header */
+ header = &vc_req->req_data->header;
+ header->opcode = cpu_to_le32(vc_akcipher_req->opcode);
+ header->algo = cpu_to_le32(VIRTIO_CRYPTO_AKCIPHER_RSA);
+ header->session_id = cpu_to_le64(ctx->session_id);
+
+ /* build request akcipher data */
+ akcipher_req = &vc_req->req_data->u.akcipher_req;
+ akcipher_req->para.src_data_len = cpu_to_le32(req->src_len);
+ akcipher_req->para.dst_data_len = cpu_to_le32(req->dst_len);
+
+ ret = __virtio_crypto_akcipher_do_req(vc_akcipher_req, req, data_vq);
+ if (ret < 0) {
+ kfree_sensitive(vc_req->req_data);
+ vc_req->req_data = NULL;
+ return ret;
+ }
+
+ return 0;
+}
+
+static int virtio_crypto_rsa_req(struct akcipher_request *req, uint32_t opcode)
+{
+ struct crypto_akcipher *atfm = crypto_akcipher_reqtfm(req);
+ struct virtio_crypto_akcipher_ctx *ctx = akcipher_tfm_ctx(atfm);
+ struct virtio_crypto_akcipher_request *vc_akcipher_req = akcipher_request_ctx(req);
+ struct virtio_crypto_request *vc_req = &vc_akcipher_req->base;
+ struct virtio_crypto *vcrypto = ctx->vcrypto;
+ /* Use the first data virtqueue as default */
+ struct data_queue *data_vq = &vcrypto->data_vq[0];
+
+ vc_req->dataq = data_vq;
+ vc_req->alg_cb = virtio_crypto_dataq_akcipher_callback;
+ vc_akcipher_req->akcipher_ctx = ctx;
+ vc_akcipher_req->akcipher_req = req;
+ vc_akcipher_req->opcode = opcode;
+
+ return crypto_transfer_akcipher_request_to_engine(data_vq->engine, req);
+}
+
+static int virtio_crypto_rsa_encrypt(struct akcipher_request *req)
+{
+ return virtio_crypto_rsa_req(req, VIRTIO_CRYPTO_AKCIPHER_ENCRYPT);
+}
+
+static int virtio_crypto_rsa_decrypt(struct akcipher_request *req)
+{
+ return virtio_crypto_rsa_req(req, VIRTIO_CRYPTO_AKCIPHER_DECRYPT);
+}
+
+static int virtio_crypto_rsa_sign(struct akcipher_request *req)
+{
+ return virtio_crypto_rsa_req(req, VIRTIO_CRYPTO_AKCIPHER_SIGN);
+}
+
+static int virtio_crypto_rsa_verify(struct akcipher_request *req)
+{
+ return virtio_crypto_rsa_req(req, VIRTIO_CRYPTO_AKCIPHER_VERIFY);
+}
+
+static int virtio_crypto_rsa_set_key(struct crypto_akcipher *tfm,
+ const void *key,
+ unsigned int keylen,
+ bool private,
+ int padding_algo,
+ int hash_algo)
+{
+ struct virtio_crypto_akcipher_ctx *ctx = akcipher_tfm_ctx(tfm);
+ struct virtio_crypto_rsa_ctx *rsa_ctx = &ctx->rsa_ctx;
+ struct virtio_crypto *vcrypto;
+ struct virtio_crypto_ctrl_header header;
+ struct virtio_crypto_akcipher_session_para para;
+ struct rsa_key rsa_key = {0};
+ int node = virtio_crypto_get_current_node();
+ uint32_t keytype;
+ int ret;
+
+ /* mpi_free will test n, just free it. */
+ mpi_free(rsa_ctx->n);
+ rsa_ctx->n = NULL;
+
+ if (private) {
+ keytype = VIRTIO_CRYPTO_AKCIPHER_KEY_TYPE_PRIVATE;
+ ret = rsa_parse_priv_key(&rsa_key, key, keylen);
+ } else {
+ keytype = VIRTIO_CRYPTO_AKCIPHER_KEY_TYPE_PUBLIC;
+ ret = rsa_parse_pub_key(&rsa_key, key, keylen);
+ }
+
+ if (ret)
+ return ret;
+
+ rsa_ctx->n = mpi_read_raw_data(rsa_key.n, rsa_key.n_sz);
+ if (!rsa_ctx->n)
+ return -ENOMEM;
+
+ if (!ctx->vcrypto) {
+ vcrypto = virtcrypto_get_dev_node(node, VIRTIO_CRYPTO_SERVICE_AKCIPHER,
+ VIRTIO_CRYPTO_AKCIPHER_RSA);
+ if (!vcrypto) {
+ pr_err("virtio_crypto: Could not find a virtio device in the system or unsupported algo\n");
+ return -ENODEV;
+ }
+
+ ctx->vcrypto = vcrypto;
+ } else {
+ virtio_crypto_alg_akcipher_close_session(ctx);
+ }
+
+ /* set ctrl header */
+ header.opcode = cpu_to_le32(VIRTIO_CRYPTO_AKCIPHER_CREATE_SESSION);
+ header.algo = cpu_to_le32(VIRTIO_CRYPTO_AKCIPHER_RSA);
+ header.queue_id = 0;
+
+ /* set RSA para */
+ para.algo = cpu_to_le32(VIRTIO_CRYPTO_AKCIPHER_RSA);
+ para.keytype = cpu_to_le32(keytype);
+ para.keylen = cpu_to_le32(keylen);
+ para.u.rsa.padding_algo = cpu_to_le32(padding_algo);
+ para.u.rsa.hash_algo = cpu_to_le32(hash_algo);
+
+ return virtio_crypto_alg_akcipher_init_session(ctx, &header, &para, key, keylen);
+}
+
+static int virtio_crypto_rsa_raw_set_priv_key(struct crypto_akcipher *tfm,
+ const void *key,
+ unsigned int keylen)
+{
+ return virtio_crypto_rsa_set_key(tfm, key, keylen, 1,
+ VIRTIO_CRYPTO_RSA_RAW_PADDING,
+ VIRTIO_CRYPTO_RSA_NO_HASH);
+}
+
+
+static int virtio_crypto_p1pad_rsa_sha1_set_priv_key(struct crypto_akcipher *tfm,
+ const void *key,
+ unsigned int keylen)
+{
+ return virtio_crypto_rsa_set_key(tfm, key, keylen, 1,
+ VIRTIO_CRYPTO_RSA_PKCS1_PADDING,
+ VIRTIO_CRYPTO_RSA_SHA1);
+}
+
+static int virtio_crypto_rsa_raw_set_pub_key(struct crypto_akcipher *tfm,
+ const void *key,
+ unsigned int keylen)
+{
+ return virtio_crypto_rsa_set_key(tfm, key, keylen, 0,
+ VIRTIO_CRYPTO_RSA_RAW_PADDING,
+ VIRTIO_CRYPTO_RSA_NO_HASH);
+}
+
+static int virtio_crypto_p1pad_rsa_sha1_set_pub_key(struct crypto_akcipher *tfm,
+ const void *key,
+ unsigned int keylen)
+{
+ return virtio_crypto_rsa_set_key(tfm, key, keylen, 0,
+ VIRTIO_CRYPTO_RSA_PKCS1_PADDING,
+ VIRTIO_CRYPTO_RSA_SHA1);
+}
+
+static unsigned int virtio_crypto_rsa_max_size(struct crypto_akcipher *tfm)
+{
+ struct virtio_crypto_akcipher_ctx *ctx = akcipher_tfm_ctx(tfm);
+ struct virtio_crypto_rsa_ctx *rsa_ctx = &ctx->rsa_ctx;
+
+ return mpi_get_size(rsa_ctx->n);
+}
+
+static int virtio_crypto_rsa_init_tfm(struct crypto_akcipher *tfm)
+{
+ struct virtio_crypto_akcipher_ctx *ctx = akcipher_tfm_ctx(tfm);
+
+ ctx->tfm = tfm;
+ ctx->enginectx.op.do_one_request = virtio_crypto_rsa_do_req;
+ ctx->enginectx.op.prepare_request = NULL;
+ ctx->enginectx.op.unprepare_request = NULL;
+
+ return 0;
+}
+
+static void virtio_crypto_rsa_exit_tfm(struct crypto_akcipher *tfm)
+{
+ struct virtio_crypto_akcipher_ctx *ctx = akcipher_tfm_ctx(tfm);
+ struct virtio_crypto_rsa_ctx *rsa_ctx = &ctx->rsa_ctx;
+
+ virtio_crypto_alg_akcipher_close_session(ctx);
+ virtcrypto_dev_put(ctx->vcrypto);
+ mpi_free(rsa_ctx->n);
+ rsa_ctx->n = NULL;
+}
+
+static struct virtio_crypto_akcipher_algo virtio_crypto_akcipher_algs[] = {
+ {
+ .algonum = VIRTIO_CRYPTO_AKCIPHER_RSA,
+ .service = VIRTIO_CRYPTO_SERVICE_AKCIPHER,
+ .algo = {
+ .encrypt = virtio_crypto_rsa_encrypt,
+ .decrypt = virtio_crypto_rsa_decrypt,
+ .set_pub_key = virtio_crypto_rsa_raw_set_pub_key,
+ .set_priv_key = virtio_crypto_rsa_raw_set_priv_key,
+ .max_size = virtio_crypto_rsa_max_size,
+ .init = virtio_crypto_rsa_init_tfm,
+ .exit = virtio_crypto_rsa_exit_tfm,
+ .reqsize = sizeof(struct virtio_crypto_akcipher_request),
+ .base = {
+ .cra_name = "rsa",
+ .cra_driver_name = "virtio-crypto-rsa",
+ .cra_priority = 150,
+ .cra_module = THIS_MODULE,
+ .cra_ctxsize = sizeof(struct virtio_crypto_akcipher_ctx),
+ },
+ },
+ },
+ {
+ .algonum = VIRTIO_CRYPTO_AKCIPHER_RSA,
+ .service = VIRTIO_CRYPTO_SERVICE_AKCIPHER,
+ .algo = {
+ .encrypt = virtio_crypto_rsa_encrypt,
+ .decrypt = virtio_crypto_rsa_decrypt,
+ .sign = virtio_crypto_rsa_sign,
+ .verify = virtio_crypto_rsa_verify,
+ .set_pub_key = virtio_crypto_p1pad_rsa_sha1_set_pub_key,
+ .set_priv_key = virtio_crypto_p1pad_rsa_sha1_set_priv_key,
+ .max_size = virtio_crypto_rsa_max_size,
+ .init = virtio_crypto_rsa_init_tfm,
+ .exit = virtio_crypto_rsa_exit_tfm,
+ .reqsize = sizeof(struct virtio_crypto_akcipher_request),
+ .base = {
+ .cra_name = "pkcs1pad(rsa,sha1)",
+ .cra_driver_name = "virtio-pkcs1-rsa-with-sha1",
+ .cra_priority = 150,
+ .cra_module = THIS_MODULE,
+ .cra_ctxsize = sizeof(struct virtio_crypto_akcipher_ctx),
+ },
+ },
+ },
+};
+
+int virtio_crypto_akcipher_algs_register(struct virtio_crypto *vcrypto)
+{
+ int ret = 0;
+ int i = 0;
+
+ mutex_lock(&algs_lock);
+
+ for (i = 0; i < ARRAY_SIZE(virtio_crypto_akcipher_algs); i++) {
+ uint32_t service = virtio_crypto_akcipher_algs[i].service;
+ uint32_t algonum = virtio_crypto_akcipher_algs[i].algonum;
+
+ if (!virtcrypto_algo_is_supported(vcrypto, service, algonum))
+ continue;
+
+ if (virtio_crypto_akcipher_algs[i].active_devs == 0) {
+ ret = crypto_register_akcipher(&virtio_crypto_akcipher_algs[i].algo);
+ if (ret)
+ goto unlock;
+ }
+
+ virtio_crypto_akcipher_algs[i].active_devs++;
+ dev_info(&vcrypto->vdev->dev, "Registered akcipher algo %s\n",
+ virtio_crypto_akcipher_algs[i].algo.base.cra_name);
+ }
+
+unlock:
+ mutex_unlock(&algs_lock);
+ return ret;
+}
+
+void virtio_crypto_akcipher_algs_unregister(struct virtio_crypto *vcrypto)
+{
+ int i = 0;
+
+ mutex_lock(&algs_lock);
+
+ for (i = 0; i < ARRAY_SIZE(virtio_crypto_akcipher_algs); i++) {
+ uint32_t service = virtio_crypto_akcipher_algs[i].service;
+ uint32_t algonum = virtio_crypto_akcipher_algs[i].algonum;
+
+ if (virtio_crypto_akcipher_algs[i].active_devs == 0 ||
+ !virtcrypto_algo_is_supported(vcrypto, service, algonum))
+ continue;
+
+ if (virtio_crypto_akcipher_algs[i].active_devs == 1)
+ crypto_unregister_akcipher(&virtio_crypto_akcipher_algs[i].algo);
+
+ virtio_crypto_akcipher_algs[i].active_devs--;
+ }
+
+ mutex_unlock(&algs_lock);
+}
diff --git a/drivers/crypto/virtio/virtio_crypto_algs.c b/drivers/crypto/virtio/virtio_crypto_algs.c
new file mode 100644
index 000000000..ae645c9d4
--- /dev/null
+++ b/drivers/crypto/virtio/virtio_crypto_algs.c
@@ -0,0 +1,656 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+ /* Algorithms supported by virtio crypto device
+ *
+ * Authors: Gonglei <arei.gonglei@huawei.com>
+ *
+ * Copyright 2016 HUAWEI TECHNOLOGIES CO., LTD.
+ */
+
+#include <linux/scatterlist.h>
+#include <crypto/algapi.h>
+#include <crypto/internal/skcipher.h>
+#include <linux/err.h>
+#include <crypto/scatterwalk.h>
+#include <linux/atomic.h>
+
+#include <uapi/linux/virtio_crypto.h>
+#include "virtio_crypto_common.h"
+
+
+struct virtio_crypto_skcipher_ctx {
+ struct crypto_engine_ctx enginectx;
+ struct virtio_crypto *vcrypto;
+ struct crypto_skcipher *tfm;
+
+ struct virtio_crypto_sym_session_info enc_sess_info;
+ struct virtio_crypto_sym_session_info dec_sess_info;
+};
+
+struct virtio_crypto_sym_request {
+ struct virtio_crypto_request base;
+
+ /* Cipher or aead */
+ uint32_t type;
+ struct virtio_crypto_skcipher_ctx *skcipher_ctx;
+ struct skcipher_request *skcipher_req;
+ uint8_t *iv;
+ /* Encryption? */
+ bool encrypt;
+};
+
+struct virtio_crypto_algo {
+ uint32_t algonum;
+ uint32_t service;
+ unsigned int active_devs;
+ struct skcipher_alg algo;
+};
+
+/*
+ * The algs_lock protects the below global virtio_crypto_active_devs
+ * and crypto algorithms registion.
+ */
+static DEFINE_MUTEX(algs_lock);
+static void virtio_crypto_skcipher_finalize_req(
+ struct virtio_crypto_sym_request *vc_sym_req,
+ struct skcipher_request *req,
+ int err);
+
+static void virtio_crypto_dataq_sym_callback
+ (struct virtio_crypto_request *vc_req, int len)
+{
+ struct virtio_crypto_sym_request *vc_sym_req =
+ container_of(vc_req, struct virtio_crypto_sym_request, base);
+ struct skcipher_request *ablk_req;
+ int error;
+
+ /* Finish the encrypt or decrypt process */
+ if (vc_sym_req->type == VIRTIO_CRYPTO_SYM_OP_CIPHER) {
+ switch (vc_req->status) {
+ case VIRTIO_CRYPTO_OK:
+ error = 0;
+ break;
+ case VIRTIO_CRYPTO_INVSESS:
+ case VIRTIO_CRYPTO_ERR:
+ error = -EINVAL;
+ break;
+ case VIRTIO_CRYPTO_BADMSG:
+ error = -EBADMSG;
+ break;
+ default:
+ error = -EIO;
+ break;
+ }
+ ablk_req = vc_sym_req->skcipher_req;
+ virtio_crypto_skcipher_finalize_req(vc_sym_req,
+ ablk_req, error);
+ }
+}
+
+static u64 virtio_crypto_alg_sg_nents_length(struct scatterlist *sg)
+{
+ u64 total = 0;
+
+ for (total = 0; sg; sg = sg_next(sg))
+ total += sg->length;
+
+ return total;
+}
+
+static int
+virtio_crypto_alg_validate_key(int key_len, uint32_t *alg)
+{
+ switch (key_len) {
+ case AES_KEYSIZE_128:
+ case AES_KEYSIZE_192:
+ case AES_KEYSIZE_256:
+ *alg = VIRTIO_CRYPTO_CIPHER_AES_CBC;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int virtio_crypto_alg_skcipher_init_session(
+ struct virtio_crypto_skcipher_ctx *ctx,
+ uint32_t alg, const uint8_t *key,
+ unsigned int keylen,
+ int encrypt)
+{
+ struct scatterlist outhdr, key_sg, inhdr, *sgs[3];
+ struct virtio_crypto *vcrypto = ctx->vcrypto;
+ int op = encrypt ? VIRTIO_CRYPTO_OP_ENCRYPT : VIRTIO_CRYPTO_OP_DECRYPT;
+ int err;
+ unsigned int num_out = 0, num_in = 0;
+ struct virtio_crypto_op_ctrl_req *ctrl;
+ struct virtio_crypto_session_input *input;
+ struct virtio_crypto_sym_create_session_req *sym_create_session;
+ struct virtio_crypto_ctrl_request *vc_ctrl_req;
+
+ /*
+ * Avoid to do DMA from the stack, switch to using
+ * dynamically-allocated for the key
+ */
+ uint8_t *cipher_key = kmemdup(key, keylen, GFP_ATOMIC);
+
+ if (!cipher_key)
+ return -ENOMEM;
+
+ vc_ctrl_req = kzalloc(sizeof(*vc_ctrl_req), GFP_KERNEL);
+ if (!vc_ctrl_req) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ /* Pad ctrl header */
+ ctrl = &vc_ctrl_req->ctrl;
+ ctrl->header.opcode = cpu_to_le32(VIRTIO_CRYPTO_CIPHER_CREATE_SESSION);
+ ctrl->header.algo = cpu_to_le32(alg);
+ /* Set the default dataqueue id to 0 */
+ ctrl->header.queue_id = 0;
+
+ input = &vc_ctrl_req->input;
+ input->status = cpu_to_le32(VIRTIO_CRYPTO_ERR);
+ /* Pad cipher's parameters */
+ sym_create_session = &ctrl->u.sym_create_session;
+ sym_create_session->op_type = cpu_to_le32(VIRTIO_CRYPTO_SYM_OP_CIPHER);
+ sym_create_session->u.cipher.para.algo = ctrl->header.algo;
+ sym_create_session->u.cipher.para.keylen = cpu_to_le32(keylen);
+ sym_create_session->u.cipher.para.op = cpu_to_le32(op);
+
+ sg_init_one(&outhdr, ctrl, sizeof(*ctrl));
+ sgs[num_out++] = &outhdr;
+
+ /* Set key */
+ sg_init_one(&key_sg, cipher_key, keylen);
+ sgs[num_out++] = &key_sg;
+
+ /* Return status and session id back */
+ sg_init_one(&inhdr, input, sizeof(*input));
+ sgs[num_out + num_in++] = &inhdr;
+
+ err = virtio_crypto_ctrl_vq_request(vcrypto, sgs, num_out, num_in, vc_ctrl_req);
+ if (err < 0)
+ goto out;
+
+ if (le32_to_cpu(input->status) != VIRTIO_CRYPTO_OK) {
+ pr_err("virtio_crypto: Create session failed status: %u\n",
+ le32_to_cpu(input->status));
+ err = -EINVAL;
+ goto out;
+ }
+
+ if (encrypt)
+ ctx->enc_sess_info.session_id = le64_to_cpu(input->session_id);
+ else
+ ctx->dec_sess_info.session_id = le64_to_cpu(input->session_id);
+
+ err = 0;
+out:
+ kfree(vc_ctrl_req);
+ kfree_sensitive(cipher_key);
+ return err;
+}
+
+static int virtio_crypto_alg_skcipher_close_session(
+ struct virtio_crypto_skcipher_ctx *ctx,
+ int encrypt)
+{
+ struct scatterlist outhdr, status_sg, *sgs[2];
+ struct virtio_crypto_destroy_session_req *destroy_session;
+ struct virtio_crypto *vcrypto = ctx->vcrypto;
+ int err;
+ unsigned int num_out = 0, num_in = 0;
+ struct virtio_crypto_op_ctrl_req *ctrl;
+ struct virtio_crypto_inhdr *ctrl_status;
+ struct virtio_crypto_ctrl_request *vc_ctrl_req;
+
+ vc_ctrl_req = kzalloc(sizeof(*vc_ctrl_req), GFP_KERNEL);
+ if (!vc_ctrl_req)
+ return -ENOMEM;
+
+ ctrl_status = &vc_ctrl_req->ctrl_status;
+ ctrl_status->status = VIRTIO_CRYPTO_ERR;
+ /* Pad ctrl header */
+ ctrl = &vc_ctrl_req->ctrl;
+ ctrl->header.opcode = cpu_to_le32(VIRTIO_CRYPTO_CIPHER_DESTROY_SESSION);
+ /* Set the default virtqueue id to 0 */
+ ctrl->header.queue_id = 0;
+
+ destroy_session = &ctrl->u.destroy_session;
+
+ if (encrypt)
+ destroy_session->session_id = cpu_to_le64(ctx->enc_sess_info.session_id);
+ else
+ destroy_session->session_id = cpu_to_le64(ctx->dec_sess_info.session_id);
+
+ sg_init_one(&outhdr, ctrl, sizeof(*ctrl));
+ sgs[num_out++] = &outhdr;
+
+ /* Return status and session id back */
+ sg_init_one(&status_sg, &ctrl_status->status, sizeof(ctrl_status->status));
+ sgs[num_out + num_in++] = &status_sg;
+
+ err = virtio_crypto_ctrl_vq_request(vcrypto, sgs, num_out, num_in, vc_ctrl_req);
+ if (err < 0)
+ goto out;
+
+ if (ctrl_status->status != VIRTIO_CRYPTO_OK) {
+ pr_err("virtio_crypto: Close session failed status: %u, session_id: 0x%llx\n",
+ ctrl_status->status, destroy_session->session_id);
+
+ err = -EINVAL;
+ goto out;
+ }
+
+ err = 0;
+out:
+ kfree(vc_ctrl_req);
+ return err;
+}
+
+static int virtio_crypto_alg_skcipher_init_sessions(
+ struct virtio_crypto_skcipher_ctx *ctx,
+ const uint8_t *key, unsigned int keylen)
+{
+ uint32_t alg;
+ int ret;
+ struct virtio_crypto *vcrypto = ctx->vcrypto;
+
+ if (keylen > vcrypto->max_cipher_key_len) {
+ pr_err("virtio_crypto: the key is too long\n");
+ return -EINVAL;
+ }
+
+ if (virtio_crypto_alg_validate_key(keylen, &alg))
+ return -EINVAL;
+
+ /* Create encryption session */
+ ret = virtio_crypto_alg_skcipher_init_session(ctx,
+ alg, key, keylen, 1);
+ if (ret)
+ return ret;
+ /* Create decryption session */
+ ret = virtio_crypto_alg_skcipher_init_session(ctx,
+ alg, key, keylen, 0);
+ if (ret) {
+ virtio_crypto_alg_skcipher_close_session(ctx, 1);
+ return ret;
+ }
+ return 0;
+}
+
+/* Note: kernel crypto API realization */
+static int virtio_crypto_skcipher_setkey(struct crypto_skcipher *tfm,
+ const uint8_t *key,
+ unsigned int keylen)
+{
+ struct virtio_crypto_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
+ uint32_t alg;
+ int ret;
+
+ ret = virtio_crypto_alg_validate_key(keylen, &alg);
+ if (ret)
+ return ret;
+
+ if (!ctx->vcrypto) {
+ /* New key */
+ int node = virtio_crypto_get_current_node();
+ struct virtio_crypto *vcrypto =
+ virtcrypto_get_dev_node(node,
+ VIRTIO_CRYPTO_SERVICE_CIPHER, alg);
+ if (!vcrypto) {
+ pr_err("virtio_crypto: Could not find a virtio device in the system or unsupported algo\n");
+ return -ENODEV;
+ }
+
+ ctx->vcrypto = vcrypto;
+ } else {
+ /* Rekeying, we should close the created sessions previously */
+ virtio_crypto_alg_skcipher_close_session(ctx, 1);
+ virtio_crypto_alg_skcipher_close_session(ctx, 0);
+ }
+
+ ret = virtio_crypto_alg_skcipher_init_sessions(ctx, key, keylen);
+ if (ret) {
+ virtcrypto_dev_put(ctx->vcrypto);
+ ctx->vcrypto = NULL;
+
+ return ret;
+ }
+
+ return 0;
+}
+
+static int
+__virtio_crypto_skcipher_do_req(struct virtio_crypto_sym_request *vc_sym_req,
+ struct skcipher_request *req,
+ struct data_queue *data_vq)
+{
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct virtio_crypto_skcipher_ctx *ctx = vc_sym_req->skcipher_ctx;
+ struct virtio_crypto_request *vc_req = &vc_sym_req->base;
+ unsigned int ivsize = crypto_skcipher_ivsize(tfm);
+ struct virtio_crypto *vcrypto = ctx->vcrypto;
+ struct virtio_crypto_op_data_req *req_data;
+ int src_nents, dst_nents;
+ int err;
+ unsigned long flags;
+ struct scatterlist outhdr, iv_sg, status_sg, **sgs;
+ u64 dst_len;
+ unsigned int num_out = 0, num_in = 0;
+ int sg_total;
+ uint8_t *iv;
+ struct scatterlist *sg;
+
+ src_nents = sg_nents_for_len(req->src, req->cryptlen);
+ if (src_nents < 0) {
+ pr_err("Invalid number of src SG.\n");
+ return src_nents;
+ }
+
+ dst_nents = sg_nents(req->dst);
+
+ pr_debug("virtio_crypto: Number of sgs (src_nents: %d, dst_nents: %d)\n",
+ src_nents, dst_nents);
+
+ /* Why 3? outhdr + iv + inhdr */
+ sg_total = src_nents + dst_nents + 3;
+ sgs = kcalloc_node(sg_total, sizeof(*sgs), GFP_KERNEL,
+ dev_to_node(&vcrypto->vdev->dev));
+ if (!sgs)
+ return -ENOMEM;
+
+ req_data = kzalloc_node(sizeof(*req_data), GFP_KERNEL,
+ dev_to_node(&vcrypto->vdev->dev));
+ if (!req_data) {
+ kfree(sgs);
+ return -ENOMEM;
+ }
+
+ vc_req->req_data = req_data;
+ vc_sym_req->type = VIRTIO_CRYPTO_SYM_OP_CIPHER;
+ /* Head of operation */
+ if (vc_sym_req->encrypt) {
+ req_data->header.session_id =
+ cpu_to_le64(ctx->enc_sess_info.session_id);
+ req_data->header.opcode =
+ cpu_to_le32(VIRTIO_CRYPTO_CIPHER_ENCRYPT);
+ } else {
+ req_data->header.session_id =
+ cpu_to_le64(ctx->dec_sess_info.session_id);
+ req_data->header.opcode =
+ cpu_to_le32(VIRTIO_CRYPTO_CIPHER_DECRYPT);
+ }
+ req_data->u.sym_req.op_type = cpu_to_le32(VIRTIO_CRYPTO_SYM_OP_CIPHER);
+ req_data->u.sym_req.u.cipher.para.iv_len = cpu_to_le32(ivsize);
+ req_data->u.sym_req.u.cipher.para.src_data_len =
+ cpu_to_le32(req->cryptlen);
+
+ dst_len = virtio_crypto_alg_sg_nents_length(req->dst);
+ if (unlikely(dst_len > U32_MAX)) {
+ pr_err("virtio_crypto: The dst_len is beyond U32_MAX\n");
+ err = -EINVAL;
+ goto free;
+ }
+
+ dst_len = min_t(unsigned int, req->cryptlen, dst_len);
+ pr_debug("virtio_crypto: src_len: %u, dst_len: %llu\n",
+ req->cryptlen, dst_len);
+
+ if (unlikely(req->cryptlen + dst_len + ivsize +
+ sizeof(vc_req->status) > vcrypto->max_size)) {
+ pr_err("virtio_crypto: The length is too big\n");
+ err = -EINVAL;
+ goto free;
+ }
+
+ req_data->u.sym_req.u.cipher.para.dst_data_len =
+ cpu_to_le32((uint32_t)dst_len);
+
+ /* Outhdr */
+ sg_init_one(&outhdr, req_data, sizeof(*req_data));
+ sgs[num_out++] = &outhdr;
+
+ /* IV */
+
+ /*
+ * Avoid to do DMA from the stack, switch to using
+ * dynamically-allocated for the IV
+ */
+ iv = kzalloc_node(ivsize, GFP_ATOMIC,
+ dev_to_node(&vcrypto->vdev->dev));
+ if (!iv) {
+ err = -ENOMEM;
+ goto free;
+ }
+ memcpy(iv, req->iv, ivsize);
+ if (!vc_sym_req->encrypt)
+ scatterwalk_map_and_copy(req->iv, req->src,
+ req->cryptlen - AES_BLOCK_SIZE,
+ AES_BLOCK_SIZE, 0);
+
+ sg_init_one(&iv_sg, iv, ivsize);
+ sgs[num_out++] = &iv_sg;
+ vc_sym_req->iv = iv;
+
+ /* Source data */
+ for (sg = req->src; src_nents; sg = sg_next(sg), src_nents--)
+ sgs[num_out++] = sg;
+
+ /* Destination data */
+ for (sg = req->dst; sg; sg = sg_next(sg))
+ sgs[num_out + num_in++] = sg;
+
+ /* Status */
+ sg_init_one(&status_sg, &vc_req->status, sizeof(vc_req->status));
+ sgs[num_out + num_in++] = &status_sg;
+
+ vc_req->sgs = sgs;
+
+ spin_lock_irqsave(&data_vq->lock, flags);
+ err = virtqueue_add_sgs(data_vq->vq, sgs, num_out,
+ num_in, vc_req, GFP_ATOMIC);
+ virtqueue_kick(data_vq->vq);
+ spin_unlock_irqrestore(&data_vq->lock, flags);
+ if (unlikely(err < 0))
+ goto free_iv;
+
+ return 0;
+
+free_iv:
+ kfree_sensitive(iv);
+free:
+ kfree_sensitive(req_data);
+ kfree(sgs);
+ return err;
+}
+
+static int virtio_crypto_skcipher_encrypt(struct skcipher_request *req)
+{
+ struct crypto_skcipher *atfm = crypto_skcipher_reqtfm(req);
+ struct virtio_crypto_skcipher_ctx *ctx = crypto_skcipher_ctx(atfm);
+ struct virtio_crypto_sym_request *vc_sym_req =
+ skcipher_request_ctx(req);
+ struct virtio_crypto_request *vc_req = &vc_sym_req->base;
+ struct virtio_crypto *vcrypto = ctx->vcrypto;
+ /* Use the first data virtqueue as default */
+ struct data_queue *data_vq = &vcrypto->data_vq[0];
+
+ if (!req->cryptlen)
+ return 0;
+ if (req->cryptlen % AES_BLOCK_SIZE)
+ return -EINVAL;
+
+ vc_req->dataq = data_vq;
+ vc_req->alg_cb = virtio_crypto_dataq_sym_callback;
+ vc_sym_req->skcipher_ctx = ctx;
+ vc_sym_req->skcipher_req = req;
+ vc_sym_req->encrypt = true;
+
+ return crypto_transfer_skcipher_request_to_engine(data_vq->engine, req);
+}
+
+static int virtio_crypto_skcipher_decrypt(struct skcipher_request *req)
+{
+ struct crypto_skcipher *atfm = crypto_skcipher_reqtfm(req);
+ struct virtio_crypto_skcipher_ctx *ctx = crypto_skcipher_ctx(atfm);
+ struct virtio_crypto_sym_request *vc_sym_req =
+ skcipher_request_ctx(req);
+ struct virtio_crypto_request *vc_req = &vc_sym_req->base;
+ struct virtio_crypto *vcrypto = ctx->vcrypto;
+ /* Use the first data virtqueue as default */
+ struct data_queue *data_vq = &vcrypto->data_vq[0];
+
+ if (!req->cryptlen)
+ return 0;
+ if (req->cryptlen % AES_BLOCK_SIZE)
+ return -EINVAL;
+
+ vc_req->dataq = data_vq;
+ vc_req->alg_cb = virtio_crypto_dataq_sym_callback;
+ vc_sym_req->skcipher_ctx = ctx;
+ vc_sym_req->skcipher_req = req;
+ vc_sym_req->encrypt = false;
+
+ return crypto_transfer_skcipher_request_to_engine(data_vq->engine, req);
+}
+
+static int virtio_crypto_skcipher_init(struct crypto_skcipher *tfm)
+{
+ struct virtio_crypto_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
+
+ crypto_skcipher_set_reqsize(tfm, sizeof(struct virtio_crypto_sym_request));
+ ctx->tfm = tfm;
+
+ ctx->enginectx.op.do_one_request = virtio_crypto_skcipher_crypt_req;
+ ctx->enginectx.op.prepare_request = NULL;
+ ctx->enginectx.op.unprepare_request = NULL;
+ return 0;
+}
+
+static void virtio_crypto_skcipher_exit(struct crypto_skcipher *tfm)
+{
+ struct virtio_crypto_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
+
+ if (!ctx->vcrypto)
+ return;
+
+ virtio_crypto_alg_skcipher_close_session(ctx, 1);
+ virtio_crypto_alg_skcipher_close_session(ctx, 0);
+ virtcrypto_dev_put(ctx->vcrypto);
+ ctx->vcrypto = NULL;
+}
+
+int virtio_crypto_skcipher_crypt_req(
+ struct crypto_engine *engine, void *vreq)
+{
+ struct skcipher_request *req = container_of(vreq, struct skcipher_request, base);
+ struct virtio_crypto_sym_request *vc_sym_req =
+ skcipher_request_ctx(req);
+ struct virtio_crypto_request *vc_req = &vc_sym_req->base;
+ struct data_queue *data_vq = vc_req->dataq;
+ int ret;
+
+ ret = __virtio_crypto_skcipher_do_req(vc_sym_req, req, data_vq);
+ if (ret < 0)
+ return ret;
+
+ virtqueue_kick(data_vq->vq);
+
+ return 0;
+}
+
+static void virtio_crypto_skcipher_finalize_req(
+ struct virtio_crypto_sym_request *vc_sym_req,
+ struct skcipher_request *req,
+ int err)
+{
+ if (vc_sym_req->encrypt)
+ scatterwalk_map_and_copy(req->iv, req->dst,
+ req->cryptlen - AES_BLOCK_SIZE,
+ AES_BLOCK_SIZE, 0);
+ kfree_sensitive(vc_sym_req->iv);
+ virtcrypto_clear_request(&vc_sym_req->base);
+
+ crypto_finalize_skcipher_request(vc_sym_req->base.dataq->engine,
+ req, err);
+}
+
+static struct virtio_crypto_algo virtio_crypto_algs[] = { {
+ .algonum = VIRTIO_CRYPTO_CIPHER_AES_CBC,
+ .service = VIRTIO_CRYPTO_SERVICE_CIPHER,
+ .algo = {
+ .base.cra_name = "cbc(aes)",
+ .base.cra_driver_name = "virtio_crypto_aes_cbc",
+ .base.cra_priority = 150,
+ .base.cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY,
+ .base.cra_blocksize = AES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct virtio_crypto_skcipher_ctx),
+ .base.cra_module = THIS_MODULE,
+ .init = virtio_crypto_skcipher_init,
+ .exit = virtio_crypto_skcipher_exit,
+ .setkey = virtio_crypto_skcipher_setkey,
+ .decrypt = virtio_crypto_skcipher_decrypt,
+ .encrypt = virtio_crypto_skcipher_encrypt,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ },
+} };
+
+int virtio_crypto_algs_register(struct virtio_crypto *vcrypto)
+{
+ int ret = 0;
+ int i = 0;
+
+ mutex_lock(&algs_lock);
+
+ for (i = 0; i < ARRAY_SIZE(virtio_crypto_algs); i++) {
+
+ uint32_t service = virtio_crypto_algs[i].service;
+ uint32_t algonum = virtio_crypto_algs[i].algonum;
+
+ if (!virtcrypto_algo_is_supported(vcrypto, service, algonum))
+ continue;
+
+ if (virtio_crypto_algs[i].active_devs == 0) {
+ ret = crypto_register_skcipher(&virtio_crypto_algs[i].algo);
+ if (ret)
+ goto unlock;
+ }
+
+ virtio_crypto_algs[i].active_devs++;
+ dev_info(&vcrypto->vdev->dev, "Registered algo %s\n",
+ virtio_crypto_algs[i].algo.base.cra_name);
+ }
+
+unlock:
+ mutex_unlock(&algs_lock);
+ return ret;
+}
+
+void virtio_crypto_algs_unregister(struct virtio_crypto *vcrypto)
+{
+ int i = 0;
+
+ mutex_lock(&algs_lock);
+
+ for (i = 0; i < ARRAY_SIZE(virtio_crypto_algs); i++) {
+
+ uint32_t service = virtio_crypto_algs[i].service;
+ uint32_t algonum = virtio_crypto_algs[i].algonum;
+
+ if (virtio_crypto_algs[i].active_devs == 0 ||
+ !virtcrypto_algo_is_supported(vcrypto, service, algonum))
+ continue;
+
+ if (virtio_crypto_algs[i].active_devs == 1)
+ crypto_unregister_skcipher(&virtio_crypto_algs[i].algo);
+
+ virtio_crypto_algs[i].active_devs--;
+ }
+
+ mutex_unlock(&algs_lock);
+}
diff --git a/drivers/crypto/virtio/virtio_crypto_common.h b/drivers/crypto/virtio/virtio_crypto_common.h
new file mode 100644
index 000000000..d61c991d7
--- /dev/null
+++ b/drivers/crypto/virtio/virtio_crypto_common.h
@@ -0,0 +1,151 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/* Common header for Virtio crypto device.
+ *
+ * Copyright 2016 HUAWEI TECHNOLOGIES CO., LTD.
+ */
+
+#ifndef _VIRTIO_CRYPTO_COMMON_H
+#define _VIRTIO_CRYPTO_COMMON_H
+
+#include <linux/virtio.h>
+#include <linux/crypto.h>
+#include <linux/spinlock.h>
+#include <linux/interrupt.h>
+#include <crypto/aead.h>
+#include <crypto/aes.h>
+#include <crypto/engine.h>
+#include <uapi/linux/virtio_crypto.h>
+
+
+/* Internal representation of a data virtqueue */
+struct data_queue {
+ /* Virtqueue associated with this send _queue */
+ struct virtqueue *vq;
+
+ /* To protect the vq operations for the dataq */
+ spinlock_t lock;
+
+ /* Name of the tx queue: dataq.$index */
+ char name[32];
+
+ struct crypto_engine *engine;
+ struct tasklet_struct done_task;
+};
+
+struct virtio_crypto {
+ struct virtio_device *vdev;
+ struct virtqueue *ctrl_vq;
+ struct data_queue *data_vq;
+
+ /* To protect the vq operations for the controlq */
+ spinlock_t ctrl_lock;
+
+ /* Maximum of data queues supported by the device */
+ u32 max_data_queues;
+
+ /* Number of queue currently used by the driver */
+ u32 curr_queue;
+
+ /*
+ * Specifies the services mask which the device support,
+ * see VIRTIO_CRYPTO_SERVICE_*
+ */
+ u32 crypto_services;
+
+ /* Detailed algorithms mask */
+ u32 cipher_algo_l;
+ u32 cipher_algo_h;
+ u32 hash_algo;
+ u32 mac_algo_l;
+ u32 mac_algo_h;
+ u32 aead_algo;
+ u32 akcipher_algo;
+
+ /* Maximum length of cipher key */
+ u32 max_cipher_key_len;
+ /* Maximum length of authenticated key */
+ u32 max_auth_key_len;
+ /* Maximum size of per request */
+ u64 max_size;
+
+ unsigned long status;
+ atomic_t ref_count;
+ struct list_head list;
+ struct module *owner;
+ uint8_t dev_id;
+
+ /* Does the affinity hint is set for virtqueues? */
+ bool affinity_hint_set;
+};
+
+struct virtio_crypto_sym_session_info {
+ /* Backend session id, which come from the host side */
+ __u64 session_id;
+};
+
+/*
+ * Note: there are padding fields in request, clear them to zero before
+ * sending to host to avoid to divulge any information.
+ * Ex, virtio_crypto_ctrl_request::ctrl::u::destroy_session::padding[48]
+ */
+struct virtio_crypto_ctrl_request {
+ struct virtio_crypto_op_ctrl_req ctrl;
+ struct virtio_crypto_session_input input;
+ struct virtio_crypto_inhdr ctrl_status;
+ struct completion compl;
+};
+
+struct virtio_crypto_request;
+typedef void (*virtio_crypto_data_callback)
+ (struct virtio_crypto_request *vc_req, int len);
+
+struct virtio_crypto_request {
+ uint8_t status;
+ struct virtio_crypto_op_data_req *req_data;
+ struct scatterlist **sgs;
+ struct data_queue *dataq;
+ virtio_crypto_data_callback alg_cb;
+};
+
+int virtcrypto_devmgr_add_dev(struct virtio_crypto *vcrypto_dev);
+struct list_head *virtcrypto_devmgr_get_head(void);
+void virtcrypto_devmgr_rm_dev(struct virtio_crypto *vcrypto_dev);
+struct virtio_crypto *virtcrypto_devmgr_get_first(void);
+int virtcrypto_dev_in_use(struct virtio_crypto *vcrypto_dev);
+int virtcrypto_dev_get(struct virtio_crypto *vcrypto_dev);
+void virtcrypto_dev_put(struct virtio_crypto *vcrypto_dev);
+int virtcrypto_dev_started(struct virtio_crypto *vcrypto_dev);
+bool virtcrypto_algo_is_supported(struct virtio_crypto *vcrypto_dev,
+ uint32_t service,
+ uint32_t algo);
+struct virtio_crypto *virtcrypto_get_dev_node(int node,
+ uint32_t service,
+ uint32_t algo);
+int virtcrypto_dev_start(struct virtio_crypto *vcrypto);
+void virtcrypto_dev_stop(struct virtio_crypto *vcrypto);
+int virtio_crypto_skcipher_crypt_req(
+ struct crypto_engine *engine, void *vreq);
+
+void
+virtcrypto_clear_request(struct virtio_crypto_request *vc_req);
+
+static inline int virtio_crypto_get_current_node(void)
+{
+ int cpu, node;
+
+ cpu = get_cpu();
+ node = topology_physical_package_id(cpu);
+ put_cpu();
+
+ return node;
+}
+
+int virtio_crypto_algs_register(struct virtio_crypto *vcrypto);
+void virtio_crypto_algs_unregister(struct virtio_crypto *vcrypto);
+int virtio_crypto_akcipher_algs_register(struct virtio_crypto *vcrypto);
+void virtio_crypto_akcipher_algs_unregister(struct virtio_crypto *vcrypto);
+int virtio_crypto_ctrl_vq_request(struct virtio_crypto *vcrypto, struct scatterlist *sgs[],
+ unsigned int out_sgs, unsigned int in_sgs,
+ struct virtio_crypto_ctrl_request *vc_ctrl_req);
+
+#endif /* _VIRTIO_CRYPTO_COMMON_H */
diff --git a/drivers/crypto/virtio/virtio_crypto_core.c b/drivers/crypto/virtio/virtio_crypto_core.c
new file mode 100644
index 000000000..3da956145
--- /dev/null
+++ b/drivers/crypto/virtio/virtio_crypto_core.c
@@ -0,0 +1,590 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+ /* Driver for Virtio crypto device.
+ *
+ * Copyright 2016 HUAWEI TECHNOLOGIES CO., LTD.
+ */
+
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/virtio_config.h>
+#include <linux/cpu.h>
+
+#include <uapi/linux/virtio_crypto.h>
+#include "virtio_crypto_common.h"
+
+
+void
+virtcrypto_clear_request(struct virtio_crypto_request *vc_req)
+{
+ if (vc_req) {
+ kfree_sensitive(vc_req->req_data);
+ kfree(vc_req->sgs);
+ }
+}
+
+static void virtio_crypto_ctrlq_callback(struct virtio_crypto_ctrl_request *vc_ctrl_req)
+{
+ complete(&vc_ctrl_req->compl);
+}
+
+static void virtcrypto_ctrlq_callback(struct virtqueue *vq)
+{
+ struct virtio_crypto *vcrypto = vq->vdev->priv;
+ struct virtio_crypto_ctrl_request *vc_ctrl_req;
+ unsigned long flags;
+ unsigned int len;
+
+ spin_lock_irqsave(&vcrypto->ctrl_lock, flags);
+ do {
+ virtqueue_disable_cb(vq);
+ while ((vc_ctrl_req = virtqueue_get_buf(vq, &len)) != NULL) {
+ spin_unlock_irqrestore(&vcrypto->ctrl_lock, flags);
+ virtio_crypto_ctrlq_callback(vc_ctrl_req);
+ spin_lock_irqsave(&vcrypto->ctrl_lock, flags);
+ }
+ if (unlikely(virtqueue_is_broken(vq)))
+ break;
+ } while (!virtqueue_enable_cb(vq));
+ spin_unlock_irqrestore(&vcrypto->ctrl_lock, flags);
+}
+
+int virtio_crypto_ctrl_vq_request(struct virtio_crypto *vcrypto, struct scatterlist *sgs[],
+ unsigned int out_sgs, unsigned int in_sgs,
+ struct virtio_crypto_ctrl_request *vc_ctrl_req)
+{
+ int err;
+ unsigned long flags;
+
+ init_completion(&vc_ctrl_req->compl);
+
+ spin_lock_irqsave(&vcrypto->ctrl_lock, flags);
+ err = virtqueue_add_sgs(vcrypto->ctrl_vq, sgs, out_sgs, in_sgs, vc_ctrl_req, GFP_ATOMIC);
+ if (err < 0) {
+ spin_unlock_irqrestore(&vcrypto->ctrl_lock, flags);
+ return err;
+ }
+
+ virtqueue_kick(vcrypto->ctrl_vq);
+ spin_unlock_irqrestore(&vcrypto->ctrl_lock, flags);
+
+ wait_for_completion(&vc_ctrl_req->compl);
+
+ return 0;
+}
+
+static void virtcrypto_done_task(unsigned long data)
+{
+ struct data_queue *data_vq = (struct data_queue *)data;
+ struct virtqueue *vq = data_vq->vq;
+ struct virtio_crypto_request *vc_req;
+ unsigned int len;
+
+ do {
+ virtqueue_disable_cb(vq);
+ while ((vc_req = virtqueue_get_buf(vq, &len)) != NULL) {
+ if (vc_req->alg_cb)
+ vc_req->alg_cb(vc_req, len);
+ }
+ } while (!virtqueue_enable_cb(vq));
+}
+
+static void virtcrypto_dataq_callback(struct virtqueue *vq)
+{
+ struct virtio_crypto *vcrypto = vq->vdev->priv;
+ struct data_queue *dq = &vcrypto->data_vq[vq->index];
+
+ tasklet_schedule(&dq->done_task);
+}
+
+static int virtcrypto_find_vqs(struct virtio_crypto *vi)
+{
+ vq_callback_t **callbacks;
+ struct virtqueue **vqs;
+ int ret = -ENOMEM;
+ int i, total_vqs;
+ const char **names;
+ struct device *dev = &vi->vdev->dev;
+
+ /*
+ * We expect 1 data virtqueue, followed by
+ * possible N-1 data queues used in multiqueue mode,
+ * followed by control vq.
+ */
+ total_vqs = vi->max_data_queues + 1;
+
+ /* Allocate space for find_vqs parameters */
+ vqs = kcalloc(total_vqs, sizeof(*vqs), GFP_KERNEL);
+ if (!vqs)
+ goto err_vq;
+ callbacks = kcalloc(total_vqs, sizeof(*callbacks), GFP_KERNEL);
+ if (!callbacks)
+ goto err_callback;
+ names = kcalloc(total_vqs, sizeof(*names), GFP_KERNEL);
+ if (!names)
+ goto err_names;
+
+ /* Parameters for control virtqueue */
+ callbacks[total_vqs - 1] = virtcrypto_ctrlq_callback;
+ names[total_vqs - 1] = "controlq";
+
+ /* Allocate/initialize parameters for data virtqueues */
+ for (i = 0; i < vi->max_data_queues; i++) {
+ callbacks[i] = virtcrypto_dataq_callback;
+ snprintf(vi->data_vq[i].name, sizeof(vi->data_vq[i].name),
+ "dataq.%d", i);
+ names[i] = vi->data_vq[i].name;
+ }
+
+ ret = virtio_find_vqs(vi->vdev, total_vqs, vqs, callbacks, names, NULL);
+ if (ret)
+ goto err_find;
+
+ vi->ctrl_vq = vqs[total_vqs - 1];
+
+ for (i = 0; i < vi->max_data_queues; i++) {
+ spin_lock_init(&vi->data_vq[i].lock);
+ vi->data_vq[i].vq = vqs[i];
+ /* Initialize crypto engine */
+ vi->data_vq[i].engine = crypto_engine_alloc_init(dev, 1);
+ if (!vi->data_vq[i].engine) {
+ ret = -ENOMEM;
+ goto err_engine;
+ }
+ tasklet_init(&vi->data_vq[i].done_task, virtcrypto_done_task,
+ (unsigned long)&vi->data_vq[i]);
+ }
+
+ kfree(names);
+ kfree(callbacks);
+ kfree(vqs);
+
+ return 0;
+
+err_engine:
+err_find:
+ kfree(names);
+err_names:
+ kfree(callbacks);
+err_callback:
+ kfree(vqs);
+err_vq:
+ return ret;
+}
+
+static int virtcrypto_alloc_queues(struct virtio_crypto *vi)
+{
+ vi->data_vq = kcalloc(vi->max_data_queues, sizeof(*vi->data_vq),
+ GFP_KERNEL);
+ if (!vi->data_vq)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static void virtcrypto_clean_affinity(struct virtio_crypto *vi, long hcpu)
+{
+ int i;
+
+ if (vi->affinity_hint_set) {
+ for (i = 0; i < vi->max_data_queues; i++)
+ virtqueue_set_affinity(vi->data_vq[i].vq, NULL);
+
+ vi->affinity_hint_set = false;
+ }
+}
+
+static void virtcrypto_set_affinity(struct virtio_crypto *vcrypto)
+{
+ int i = 0;
+ int cpu;
+
+ /*
+ * In single queue mode, we don't set the cpu affinity.
+ */
+ if (vcrypto->curr_queue == 1 || vcrypto->max_data_queues == 1) {
+ virtcrypto_clean_affinity(vcrypto, -1);
+ return;
+ }
+
+ /*
+ * In multiqueue mode, we let the queue to be private to one cpu
+ * by setting the affinity hint to eliminate the contention.
+ *
+ * TODO: adds cpu hotplug support by register cpu notifier.
+ *
+ */
+ for_each_online_cpu(cpu) {
+ virtqueue_set_affinity(vcrypto->data_vq[i].vq, cpumask_of(cpu));
+ if (++i >= vcrypto->max_data_queues)
+ break;
+ }
+
+ vcrypto->affinity_hint_set = true;
+}
+
+static void virtcrypto_free_queues(struct virtio_crypto *vi)
+{
+ kfree(vi->data_vq);
+}
+
+static int virtcrypto_init_vqs(struct virtio_crypto *vi)
+{
+ int ret;
+
+ /* Allocate send & receive queues */
+ ret = virtcrypto_alloc_queues(vi);
+ if (ret)
+ goto err;
+
+ ret = virtcrypto_find_vqs(vi);
+ if (ret)
+ goto err_free;
+
+ get_online_cpus();
+ virtcrypto_set_affinity(vi);
+ put_online_cpus();
+
+ return 0;
+
+err_free:
+ virtcrypto_free_queues(vi);
+err:
+ return ret;
+}
+
+static int virtcrypto_update_status(struct virtio_crypto *vcrypto)
+{
+ u32 status;
+ int err;
+
+ virtio_cread_le(vcrypto->vdev,
+ struct virtio_crypto_config, status, &status);
+
+ /*
+ * Unknown status bits would be a host error and the driver
+ * should consider the device to be broken.
+ */
+ if (status & (~VIRTIO_CRYPTO_S_HW_READY)) {
+ dev_warn(&vcrypto->vdev->dev,
+ "Unknown status bits: 0x%x\n", status);
+
+ virtio_break_device(vcrypto->vdev);
+ return -EPERM;
+ }
+
+ if (vcrypto->status == status)
+ return 0;
+
+ vcrypto->status = status;
+
+ if (vcrypto->status & VIRTIO_CRYPTO_S_HW_READY) {
+ err = virtcrypto_dev_start(vcrypto);
+ if (err) {
+ dev_err(&vcrypto->vdev->dev,
+ "Failed to start virtio crypto device.\n");
+
+ return -EPERM;
+ }
+ dev_info(&vcrypto->vdev->dev, "Accelerator device is ready\n");
+ } else {
+ virtcrypto_dev_stop(vcrypto);
+ dev_info(&vcrypto->vdev->dev, "Accelerator is not ready\n");
+ }
+
+ return 0;
+}
+
+static int virtcrypto_start_crypto_engines(struct virtio_crypto *vcrypto)
+{
+ int32_t i;
+ int ret;
+
+ for (i = 0; i < vcrypto->max_data_queues; i++) {
+ if (vcrypto->data_vq[i].engine) {
+ ret = crypto_engine_start(vcrypto->data_vq[i].engine);
+ if (ret)
+ goto err;
+ }
+ }
+
+ return 0;
+
+err:
+ while (--i >= 0)
+ if (vcrypto->data_vq[i].engine)
+ crypto_engine_exit(vcrypto->data_vq[i].engine);
+
+ return ret;
+}
+
+static void virtcrypto_clear_crypto_engines(struct virtio_crypto *vcrypto)
+{
+ u32 i;
+
+ for (i = 0; i < vcrypto->max_data_queues; i++)
+ if (vcrypto->data_vq[i].engine)
+ crypto_engine_exit(vcrypto->data_vq[i].engine);
+}
+
+static void virtcrypto_del_vqs(struct virtio_crypto *vcrypto)
+{
+ struct virtio_device *vdev = vcrypto->vdev;
+
+ virtcrypto_clean_affinity(vcrypto, -1);
+
+ vdev->config->del_vqs(vdev);
+
+ virtcrypto_free_queues(vcrypto);
+}
+
+static int virtcrypto_probe(struct virtio_device *vdev)
+{
+ int err = -EFAULT;
+ struct virtio_crypto *vcrypto;
+ u32 max_data_queues = 0, max_cipher_key_len = 0;
+ u32 max_auth_key_len = 0;
+ u64 max_size = 0;
+ u32 cipher_algo_l = 0;
+ u32 cipher_algo_h = 0;
+ u32 hash_algo = 0;
+ u32 mac_algo_l = 0;
+ u32 mac_algo_h = 0;
+ u32 aead_algo = 0;
+ u32 akcipher_algo = 0;
+ u32 crypto_services = 0;
+
+ if (!virtio_has_feature(vdev, VIRTIO_F_VERSION_1))
+ return -ENODEV;
+
+ if (!vdev->config->get) {
+ dev_err(&vdev->dev, "%s failure: config access disabled\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ if (num_possible_nodes() > 1 && dev_to_node(&vdev->dev) < 0) {
+ /*
+ * If the accelerator is connected to a node with no memory
+ * there is no point in using the accelerator since the remote
+ * memory transaction will be very slow.
+ */
+ dev_err(&vdev->dev, "Invalid NUMA configuration.\n");
+ return -EINVAL;
+ }
+
+ vcrypto = kzalloc_node(sizeof(*vcrypto), GFP_KERNEL,
+ dev_to_node(&vdev->dev));
+ if (!vcrypto)
+ return -ENOMEM;
+
+ virtio_cread_le(vdev, struct virtio_crypto_config,
+ max_dataqueues, &max_data_queues);
+ if (max_data_queues < 1)
+ max_data_queues = 1;
+
+ virtio_cread_le(vdev, struct virtio_crypto_config,
+ max_cipher_key_len, &max_cipher_key_len);
+ virtio_cread_le(vdev, struct virtio_crypto_config,
+ max_auth_key_len, &max_auth_key_len);
+ virtio_cread_le(vdev, struct virtio_crypto_config,
+ max_size, &max_size);
+ virtio_cread_le(vdev, struct virtio_crypto_config,
+ crypto_services, &crypto_services);
+ virtio_cread_le(vdev, struct virtio_crypto_config,
+ cipher_algo_l, &cipher_algo_l);
+ virtio_cread_le(vdev, struct virtio_crypto_config,
+ cipher_algo_h, &cipher_algo_h);
+ virtio_cread_le(vdev, struct virtio_crypto_config,
+ hash_algo, &hash_algo);
+ virtio_cread_le(vdev, struct virtio_crypto_config,
+ mac_algo_l, &mac_algo_l);
+ virtio_cread_le(vdev, struct virtio_crypto_config,
+ mac_algo_h, &mac_algo_h);
+ virtio_cread_le(vdev, struct virtio_crypto_config,
+ aead_algo, &aead_algo);
+ if (crypto_services & (1 << VIRTIO_CRYPTO_SERVICE_AKCIPHER))
+ virtio_cread_le(vdev, struct virtio_crypto_config,
+ akcipher_algo, &akcipher_algo);
+
+ /* Add virtio crypto device to global table */
+ err = virtcrypto_devmgr_add_dev(vcrypto);
+ if (err) {
+ dev_err(&vdev->dev, "Failed to add new virtio crypto device.\n");
+ goto free;
+ }
+ vcrypto->owner = THIS_MODULE;
+ vcrypto = vdev->priv = vcrypto;
+ vcrypto->vdev = vdev;
+
+ spin_lock_init(&vcrypto->ctrl_lock);
+
+ /* Use single data queue as default */
+ vcrypto->curr_queue = 1;
+ vcrypto->max_data_queues = max_data_queues;
+ vcrypto->max_cipher_key_len = max_cipher_key_len;
+ vcrypto->max_auth_key_len = max_auth_key_len;
+ vcrypto->max_size = max_size;
+ vcrypto->crypto_services = crypto_services;
+ vcrypto->cipher_algo_l = cipher_algo_l;
+ vcrypto->cipher_algo_h = cipher_algo_h;
+ vcrypto->mac_algo_l = mac_algo_l;
+ vcrypto->mac_algo_h = mac_algo_h;
+ vcrypto->hash_algo = hash_algo;
+ vcrypto->aead_algo = aead_algo;
+ vcrypto->akcipher_algo = akcipher_algo;
+
+ dev_info(&vdev->dev,
+ "max_queues: %u, max_cipher_key_len: %u, max_auth_key_len: %u, max_size 0x%llx\n",
+ vcrypto->max_data_queues,
+ vcrypto->max_cipher_key_len,
+ vcrypto->max_auth_key_len,
+ vcrypto->max_size);
+
+ err = virtcrypto_init_vqs(vcrypto);
+ if (err) {
+ dev_err(&vdev->dev, "Failed to initialize vqs.\n");
+ goto free_dev;
+ }
+
+ err = virtcrypto_start_crypto_engines(vcrypto);
+ if (err)
+ goto free_vqs;
+
+ virtio_device_ready(vdev);
+
+ err = virtcrypto_update_status(vcrypto);
+ if (err)
+ goto free_engines;
+
+ return 0;
+
+free_engines:
+ virtcrypto_clear_crypto_engines(vcrypto);
+free_vqs:
+ vcrypto->vdev->config->reset(vdev);
+ virtcrypto_del_vqs(vcrypto);
+free_dev:
+ virtcrypto_devmgr_rm_dev(vcrypto);
+free:
+ kfree(vcrypto);
+ return err;
+}
+
+static void virtcrypto_free_unused_reqs(struct virtio_crypto *vcrypto)
+{
+ struct virtio_crypto_request *vc_req;
+ int i;
+ struct virtqueue *vq;
+
+ for (i = 0; i < vcrypto->max_data_queues; i++) {
+ vq = vcrypto->data_vq[i].vq;
+ while ((vc_req = virtqueue_detach_unused_buf(vq)) != NULL) {
+ kfree(vc_req->req_data);
+ kfree(vc_req->sgs);
+ }
+ }
+}
+
+static void virtcrypto_remove(struct virtio_device *vdev)
+{
+ struct virtio_crypto *vcrypto = vdev->priv;
+ int i;
+
+ dev_info(&vdev->dev, "Start virtcrypto_remove.\n");
+
+ if (virtcrypto_dev_started(vcrypto))
+ virtcrypto_dev_stop(vcrypto);
+ for (i = 0; i < vcrypto->max_data_queues; i++)
+ tasklet_kill(&vcrypto->data_vq[i].done_task);
+ vdev->config->reset(vdev);
+ virtcrypto_free_unused_reqs(vcrypto);
+ virtcrypto_clear_crypto_engines(vcrypto);
+ virtcrypto_del_vqs(vcrypto);
+ virtcrypto_devmgr_rm_dev(vcrypto);
+ kfree(vcrypto);
+}
+
+static void virtcrypto_config_changed(struct virtio_device *vdev)
+{
+ struct virtio_crypto *vcrypto = vdev->priv;
+
+ virtcrypto_update_status(vcrypto);
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int virtcrypto_freeze(struct virtio_device *vdev)
+{
+ struct virtio_crypto *vcrypto = vdev->priv;
+
+ vdev->config->reset(vdev);
+ virtcrypto_free_unused_reqs(vcrypto);
+ if (virtcrypto_dev_started(vcrypto))
+ virtcrypto_dev_stop(vcrypto);
+
+ virtcrypto_clear_crypto_engines(vcrypto);
+ virtcrypto_del_vqs(vcrypto);
+ return 0;
+}
+
+static int virtcrypto_restore(struct virtio_device *vdev)
+{
+ struct virtio_crypto *vcrypto = vdev->priv;
+ int err;
+
+ err = virtcrypto_init_vqs(vcrypto);
+ if (err)
+ return err;
+
+ err = virtcrypto_start_crypto_engines(vcrypto);
+ if (err)
+ goto free_vqs;
+
+ virtio_device_ready(vdev);
+
+ err = virtcrypto_dev_start(vcrypto);
+ if (err) {
+ dev_err(&vdev->dev, "Failed to start virtio crypto device.\n");
+ goto free_engines;
+ }
+
+ return 0;
+
+free_engines:
+ virtcrypto_clear_crypto_engines(vcrypto);
+free_vqs:
+ vcrypto->vdev->config->reset(vdev);
+ virtcrypto_del_vqs(vcrypto);
+ return err;
+}
+#endif
+
+static const unsigned int features[] = {
+ /* none */
+};
+
+static const struct virtio_device_id id_table[] = {
+ { VIRTIO_ID_CRYPTO, VIRTIO_DEV_ANY_ID },
+ { 0 },
+};
+
+static struct virtio_driver virtio_crypto_driver = {
+ .driver.name = KBUILD_MODNAME,
+ .driver.owner = THIS_MODULE,
+ .feature_table = features,
+ .feature_table_size = ARRAY_SIZE(features),
+ .id_table = id_table,
+ .probe = virtcrypto_probe,
+ .remove = virtcrypto_remove,
+ .config_changed = virtcrypto_config_changed,
+#ifdef CONFIG_PM_SLEEP
+ .freeze = virtcrypto_freeze,
+ .restore = virtcrypto_restore,
+#endif
+};
+
+module_virtio_driver(virtio_crypto_driver);
+
+MODULE_DEVICE_TABLE(virtio, id_table);
+MODULE_DESCRIPTION("virtio crypto device driver");
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Gonglei <arei.gonglei@huawei.com>");
diff --git a/drivers/crypto/virtio/virtio_crypto_mgr.c b/drivers/crypto/virtio/virtio_crypto_mgr.c
new file mode 100644
index 000000000..1cb92418b
--- /dev/null
+++ b/drivers/crypto/virtio/virtio_crypto_mgr.c
@@ -0,0 +1,332 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+ /* Management for virtio crypto devices (refer to adf_dev_mgr.c)
+ *
+ * Copyright 2016 HUAWEI TECHNOLOGIES CO., LTD.
+ */
+
+#include <linux/mutex.h>
+#include <linux/list.h>
+#include <linux/module.h>
+
+#include <uapi/linux/virtio_crypto.h>
+#include "virtio_crypto_common.h"
+
+static LIST_HEAD(virtio_crypto_table);
+static uint32_t num_devices;
+
+/* The table_lock protects the above global list and num_devices */
+static DEFINE_MUTEX(table_lock);
+
+#define VIRTIO_CRYPTO_MAX_DEVICES 32
+
+
+/*
+ * virtcrypto_devmgr_add_dev() - Add vcrypto_dev to the acceleration
+ * framework.
+ * @vcrypto_dev: Pointer to virtio crypto device.
+ *
+ * Function adds virtio crypto device to the global list.
+ * To be used by virtio crypto device specific drivers.
+ *
+ * Return: 0 on success, error code othewise.
+ */
+int virtcrypto_devmgr_add_dev(struct virtio_crypto *vcrypto_dev)
+{
+ struct list_head *itr;
+
+ mutex_lock(&table_lock);
+ if (num_devices == VIRTIO_CRYPTO_MAX_DEVICES) {
+ pr_info("virtio_crypto: only support up to %d devices\n",
+ VIRTIO_CRYPTO_MAX_DEVICES);
+ mutex_unlock(&table_lock);
+ return -EFAULT;
+ }
+
+ list_for_each(itr, &virtio_crypto_table) {
+ struct virtio_crypto *ptr =
+ list_entry(itr, struct virtio_crypto, list);
+
+ if (ptr == vcrypto_dev) {
+ mutex_unlock(&table_lock);
+ return -EEXIST;
+ }
+ }
+ atomic_set(&vcrypto_dev->ref_count, 0);
+ list_add_tail(&vcrypto_dev->list, &virtio_crypto_table);
+ vcrypto_dev->dev_id = num_devices++;
+ mutex_unlock(&table_lock);
+ return 0;
+}
+
+struct list_head *virtcrypto_devmgr_get_head(void)
+{
+ return &virtio_crypto_table;
+}
+
+/*
+ * virtcrypto_devmgr_rm_dev() - Remove vcrypto_dev from the acceleration
+ * framework.
+ * @vcrypto_dev: Pointer to virtio crypto device.
+ *
+ * Function removes virtio crypto device from the acceleration framework.
+ * To be used by virtio crypto device specific drivers.
+ *
+ * Return: void
+ */
+void virtcrypto_devmgr_rm_dev(struct virtio_crypto *vcrypto_dev)
+{
+ mutex_lock(&table_lock);
+ list_del(&vcrypto_dev->list);
+ num_devices--;
+ mutex_unlock(&table_lock);
+}
+
+/*
+ * virtcrypto_devmgr_get_first()
+ *
+ * Function returns the first virtio crypto device from the acceleration
+ * framework.
+ *
+ * To be used by virtio crypto device specific drivers.
+ *
+ * Return: pointer to vcrypto_dev or NULL if not found.
+ */
+struct virtio_crypto *virtcrypto_devmgr_get_first(void)
+{
+ struct virtio_crypto *dev = NULL;
+
+ mutex_lock(&table_lock);
+ if (!list_empty(&virtio_crypto_table))
+ dev = list_first_entry(&virtio_crypto_table,
+ struct virtio_crypto,
+ list);
+ mutex_unlock(&table_lock);
+ return dev;
+}
+
+/*
+ * virtcrypto_dev_in_use() - Check whether vcrypto_dev is currently in use
+ * @vcrypto_dev: Pointer to virtio crypto device.
+ *
+ * To be used by virtio crypto device specific drivers.
+ *
+ * Return: 1 when device is in use, 0 otherwise.
+ */
+int virtcrypto_dev_in_use(struct virtio_crypto *vcrypto_dev)
+{
+ return atomic_read(&vcrypto_dev->ref_count) != 0;
+}
+
+/*
+ * virtcrypto_dev_get() - Increment vcrypto_dev reference count
+ * @vcrypto_dev: Pointer to virtio crypto device.
+ *
+ * Increment the vcrypto_dev refcount and if this is the first time
+ * incrementing it during this period the vcrypto_dev is in use,
+ * increment the module refcount too.
+ * To be used by virtio crypto device specific drivers.
+ *
+ * Return: 0 when successful, EFAULT when fail to bump module refcount
+ */
+int virtcrypto_dev_get(struct virtio_crypto *vcrypto_dev)
+{
+ if (atomic_add_return(1, &vcrypto_dev->ref_count) == 1)
+ if (!try_module_get(vcrypto_dev->owner))
+ return -EFAULT;
+ return 0;
+}
+
+/*
+ * virtcrypto_dev_put() - Decrement vcrypto_dev reference count
+ * @vcrypto_dev: Pointer to virtio crypto device.
+ *
+ * Decrement the vcrypto_dev refcount and if this is the last time
+ * decrementing it during this period the vcrypto_dev is in use,
+ * decrement the module refcount too.
+ * To be used by virtio crypto device specific drivers.
+ *
+ * Return: void
+ */
+void virtcrypto_dev_put(struct virtio_crypto *vcrypto_dev)
+{
+ if (atomic_sub_return(1, &vcrypto_dev->ref_count) == 0)
+ module_put(vcrypto_dev->owner);
+}
+
+/*
+ * virtcrypto_dev_started() - Check whether device has started
+ * @vcrypto_dev: Pointer to virtio crypto device.
+ *
+ * To be used by virtio crypto device specific drivers.
+ *
+ * Return: 1 when the device has started, 0 otherwise
+ */
+int virtcrypto_dev_started(struct virtio_crypto *vcrypto_dev)
+{
+ return (vcrypto_dev->status & VIRTIO_CRYPTO_S_HW_READY);
+}
+
+/*
+ * virtcrypto_get_dev_node() - Get vcrypto_dev on the node.
+ * @node: Node id the driver works.
+ * @service: Crypto service that needs to be supported by the
+ * dev
+ * @algo: The algorithm number that needs to be supported by the
+ * dev
+ *
+ * Function returns the virtio crypto device used fewest on the node,
+ * and supports the given crypto service and algorithm.
+ *
+ * To be used by virtio crypto device specific drivers.
+ *
+ * Return: pointer to vcrypto_dev or NULL if not found.
+ */
+struct virtio_crypto *virtcrypto_get_dev_node(int node, uint32_t service,
+ uint32_t algo)
+{
+ struct virtio_crypto *vcrypto_dev = NULL, *tmp_dev;
+ unsigned long best = ~0;
+ unsigned long ctr;
+
+ mutex_lock(&table_lock);
+ list_for_each_entry(tmp_dev, virtcrypto_devmgr_get_head(), list) {
+
+ if ((node == dev_to_node(&tmp_dev->vdev->dev) ||
+ dev_to_node(&tmp_dev->vdev->dev) < 0) &&
+ virtcrypto_dev_started(tmp_dev) &&
+ virtcrypto_algo_is_supported(tmp_dev, service, algo)) {
+ ctr = atomic_read(&tmp_dev->ref_count);
+ if (best > ctr) {
+ vcrypto_dev = tmp_dev;
+ best = ctr;
+ }
+ }
+ }
+
+ if (!vcrypto_dev) {
+ pr_info("virtio_crypto: Could not find a device on node %d\n",
+ node);
+ /* Get any started device */
+ list_for_each_entry(tmp_dev,
+ virtcrypto_devmgr_get_head(), list) {
+ if (virtcrypto_dev_started(tmp_dev) &&
+ virtcrypto_algo_is_supported(tmp_dev,
+ service, algo)) {
+ vcrypto_dev = tmp_dev;
+ break;
+ }
+ }
+ }
+ mutex_unlock(&table_lock);
+ if (!vcrypto_dev)
+ return NULL;
+
+ virtcrypto_dev_get(vcrypto_dev);
+ return vcrypto_dev;
+}
+
+/*
+ * virtcrypto_dev_start() - Start virtio crypto device
+ * @vcrypto: Pointer to virtio crypto device.
+ *
+ * Function notifies all the registered services that the virtio crypto device
+ * is ready to be used.
+ * To be used by virtio crypto device specific drivers.
+ *
+ * Return: 0 on success, EFAULT when fail to register algorithms
+ */
+int virtcrypto_dev_start(struct virtio_crypto *vcrypto)
+{
+ if (virtio_crypto_algs_register(vcrypto)) {
+ pr_err("virtio_crypto: Failed to register crypto algs\n");
+ return -EFAULT;
+ }
+
+ if (virtio_crypto_akcipher_algs_register(vcrypto)) {
+ pr_err("virtio_crypto: Failed to register crypto akcipher algs\n");
+ virtio_crypto_algs_unregister(vcrypto);
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+/*
+ * virtcrypto_dev_stop() - Stop virtio crypto device
+ * @vcrypto: Pointer to virtio crypto device.
+ *
+ * Function notifies all the registered services that the virtio crypto device
+ * is ready to be used.
+ * To be used by virtio crypto device specific drivers.
+ *
+ * Return: void
+ */
+void virtcrypto_dev_stop(struct virtio_crypto *vcrypto)
+{
+ virtio_crypto_algs_unregister(vcrypto);
+ virtio_crypto_akcipher_algs_unregister(vcrypto);
+}
+
+/*
+ * vcrypto_algo_is_supported()
+ * @vcrypto: Pointer to virtio crypto device.
+ * @service: The bit number for service validate.
+ * See VIRTIO_CRYPTO_SERVICE_*
+ * @algo : The bit number for the algorithm to validate.
+ *
+ *
+ * Validate if the virtio crypto device supports a service and
+ * algo.
+ *
+ * Return true if device supports a service and algo.
+ */
+
+bool virtcrypto_algo_is_supported(struct virtio_crypto *vcrypto,
+ uint32_t service,
+ uint32_t algo)
+{
+ uint32_t service_mask = 1u << service;
+ uint32_t algo_mask = 0;
+ bool low = true;
+
+ if (algo > 31) {
+ algo -= 32;
+ low = false;
+ }
+
+ if (!(vcrypto->crypto_services & service_mask))
+ return false;
+
+ switch (service) {
+ case VIRTIO_CRYPTO_SERVICE_CIPHER:
+ if (low)
+ algo_mask = vcrypto->cipher_algo_l;
+ else
+ algo_mask = vcrypto->cipher_algo_h;
+ break;
+
+ case VIRTIO_CRYPTO_SERVICE_HASH:
+ algo_mask = vcrypto->hash_algo;
+ break;
+
+ case VIRTIO_CRYPTO_SERVICE_MAC:
+ if (low)
+ algo_mask = vcrypto->mac_algo_l;
+ else
+ algo_mask = vcrypto->mac_algo_h;
+ break;
+
+ case VIRTIO_CRYPTO_SERVICE_AEAD:
+ algo_mask = vcrypto->aead_algo;
+ break;
+
+ case VIRTIO_CRYPTO_SERVICE_AKCIPHER:
+ algo_mask = vcrypto->akcipher_algo;
+ break;
+ }
+
+ if (!(algo_mask & (1u << algo)))
+ return false;
+
+ return true;
+}