summaryrefslogtreecommitdiffstats
path: root/drivers/s390/crypto
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-27 10:05:51 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-27 10:05:51 +0000
commit5d1646d90e1f2cceb9f0828f4b28318cd0ec7744 (patch)
treea94efe259b9009378be6d90eb30d2b019d95c194 /drivers/s390/crypto
parentInitial commit. (diff)
downloadlinux-upstream.tar.xz
linux-upstream.zip
Adding upstream version 5.10.209.upstream/5.10.209upstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'drivers/s390/crypto')
-rw-r--r--drivers/s390/crypto/Makefile22
-rw-r--r--drivers/s390/crypto/ap_bus.c1736
-rw-r--r--drivers/s390/crypto/ap_bus.h347
-rw-r--r--drivers/s390/crypto/ap_card.c225
-rw-r--r--drivers/s390/crypto/ap_debug.h34
-rw-r--r--drivers/s390/crypto/ap_queue.c900
-rw-r--r--drivers/s390/crypto/pkey_api.c2100
-rw-r--r--drivers/s390/crypto/vfio_ap_drv.c215
-rw-r--r--drivers/s390/crypto/vfio_ap_ops.c1328
-rw-r--r--drivers/s390/crypto/vfio_ap_private.h104
-rw-r--r--drivers/s390/crypto/zcrypt_api.c2143
-rw-r--r--drivers/s390/crypto/zcrypt_api.h187
-rw-r--r--drivers/s390/crypto/zcrypt_card.c197
-rw-r--r--drivers/s390/crypto/zcrypt_cca_key.h248
-rw-r--r--drivers/s390/crypto/zcrypt_ccamisc.c1970
-rw-r--r--drivers/s390/crypto/zcrypt_ccamisc.h270
-rw-r--r--drivers/s390/crypto/zcrypt_cex2a.c233
-rw-r--r--drivers/s390/crypto/zcrypt_cex2a.h134
-rw-r--r--drivers/s390/crypto/zcrypt_cex2c.c429
-rw-r--r--drivers/s390/crypto/zcrypt_cex2c.h18
-rw-r--r--drivers/s390/crypto/zcrypt_cex4.c712
-rw-r--r--drivers/s390/crypto/zcrypt_cex4.h13
-rw-r--r--drivers/s390/crypto/zcrypt_debug.h38
-rw-r--r--drivers/s390/crypto/zcrypt_ep11misc.c1470
-rw-r--r--drivers/s390/crypto/zcrypt_ep11misc.h148
-rw-r--r--drivers/s390/crypto/zcrypt_error.h139
-rw-r--r--drivers/s390/crypto/zcrypt_msgtype50.c567
-rw-r--r--drivers/s390/crypto/zcrypt_msgtype50.h29
-rw-r--r--drivers/s390/crypto/zcrypt_msgtype6.c1374
-rw-r--r--drivers/s390/crypto/zcrypt_msgtype6.h164
-rw-r--r--drivers/s390/crypto/zcrypt_queue.c228
31 files changed, 17722 insertions, 0 deletions
diff --git a/drivers/s390/crypto/Makefile b/drivers/s390/crypto/Makefile
new file mode 100644
index 000000000..22d2db690
--- /dev/null
+++ b/drivers/s390/crypto/Makefile
@@ -0,0 +1,22 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# S/390 crypto devices
+#
+
+ap-objs := ap_bus.o ap_card.o ap_queue.o
+obj-$(subst m,y,$(CONFIG_ZCRYPT)) += ap.o
+# zcrypt_api.o and zcrypt_msgtype*.o depend on ap.o
+zcrypt-objs := zcrypt_api.o zcrypt_card.o zcrypt_queue.o
+zcrypt-objs += zcrypt_msgtype6.o zcrypt_msgtype50.o
+zcrypt-objs += zcrypt_ccamisc.o zcrypt_ep11misc.o
+obj-$(CONFIG_ZCRYPT) += zcrypt.o
+# adapter drivers depend on ap.o and zcrypt.o
+obj-$(CONFIG_ZCRYPT) += zcrypt_cex2c.o zcrypt_cex2a.o zcrypt_cex4.o
+
+# pkey kernel module
+pkey-objs := pkey_api.o
+obj-$(CONFIG_PKEY) += pkey.o
+
+# adjunct processor matrix
+vfio_ap-objs := vfio_ap_drv.o vfio_ap_ops.o
+obj-$(CONFIG_VFIO_AP) += vfio_ap.o
diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c
new file mode 100644
index 000000000..c00a288a4
--- /dev/null
+++ b/drivers/s390/crypto/ap_bus.c
@@ -0,0 +1,1736 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright IBM Corp. 2006, 2012
+ * Author(s): Cornelia Huck <cornelia.huck@de.ibm.com>
+ * Martin Schwidefsky <schwidefsky@de.ibm.com>
+ * Ralph Wuerthner <rwuerthn@de.ibm.com>
+ * Felix Beck <felix.beck@de.ibm.com>
+ * Holger Dengler <hd@linux.vnet.ibm.com>
+ *
+ * Adjunct processor bus.
+ */
+
+#define KMSG_COMPONENT "ap"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <linux/kernel_stat.h>
+#include <linux/moduleparam.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/freezer.h>
+#include <linux/interrupt.h>
+#include <linux/workqueue.h>
+#include <linux/slab.h>
+#include <linux/notifier.h>
+#include <linux/kthread.h>
+#include <linux/mutex.h>
+#include <asm/airq.h>
+#include <linux/atomic.h>
+#include <asm/isc.h>
+#include <linux/hrtimer.h>
+#include <linux/ktime.h>
+#include <asm/facility.h>
+#include <linux/crypto.h>
+#include <linux/mod_devicetable.h>
+#include <linux/debugfs.h>
+#include <linux/ctype.h>
+
+#include "ap_bus.h"
+#include "ap_debug.h"
+
+/*
+ * Module parameters; note though this file itself isn't modular.
+ */
+int ap_domain_index = -1; /* Adjunct Processor Domain Index */
+static DEFINE_SPINLOCK(ap_domain_lock);
+module_param_named(domain, ap_domain_index, int, 0440);
+MODULE_PARM_DESC(domain, "domain index for ap devices");
+EXPORT_SYMBOL(ap_domain_index);
+
+static int ap_thread_flag;
+module_param_named(poll_thread, ap_thread_flag, int, 0440);
+MODULE_PARM_DESC(poll_thread, "Turn on/off poll thread, default is 0 (off).");
+
+static char *apm_str;
+module_param_named(apmask, apm_str, charp, 0440);
+MODULE_PARM_DESC(apmask, "AP bus adapter mask.");
+
+static char *aqm_str;
+module_param_named(aqmask, aqm_str, charp, 0440);
+MODULE_PARM_DESC(aqmask, "AP bus domain mask.");
+
+static struct device *ap_root_device;
+
+/* Hashtable of all queue devices on the AP bus */
+DEFINE_HASHTABLE(ap_queues, 8);
+/* lock used for the ap_queues hashtable */
+DEFINE_SPINLOCK(ap_queues_lock);
+
+/* Default permissions (ioctl, card and domain masking) */
+struct ap_perms ap_perms;
+EXPORT_SYMBOL(ap_perms);
+DEFINE_MUTEX(ap_perms_mutex);
+EXPORT_SYMBOL(ap_perms_mutex);
+
+static struct ap_config_info *ap_qci_info;
+
+/*
+ * AP bus related debug feature things.
+ */
+debug_info_t *ap_dbf_info;
+
+/*
+ * Workqueue timer for bus rescan.
+ */
+static struct timer_list ap_config_timer;
+static int ap_config_time = AP_CONFIG_TIME;
+static void ap_scan_bus(struct work_struct *);
+static DECLARE_WORK(ap_scan_work, ap_scan_bus);
+
+/*
+ * Tasklet & timer for AP request polling and interrupts
+ */
+static void ap_tasklet_fn(unsigned long);
+static DECLARE_TASKLET_OLD(ap_tasklet, ap_tasklet_fn);
+static DECLARE_WAIT_QUEUE_HEAD(ap_poll_wait);
+static struct task_struct *ap_poll_kthread;
+static DEFINE_MUTEX(ap_poll_thread_mutex);
+static DEFINE_SPINLOCK(ap_poll_timer_lock);
+static struct hrtimer ap_poll_timer;
+/*
+ * In LPAR poll with 4kHz frequency. Poll every 250000 nanoseconds.
+ * If z/VM change to 1500000 nanoseconds to adjust to z/VM polling.
+ */
+static unsigned long long poll_timeout = 250000;
+
+/* Maximum domain id, if not given via qci */
+static int ap_max_domain_id = 15;
+/* Maximum adapter id, if not given via qci */
+static int ap_max_adapter_id = 63;
+
+static struct bus_type ap_bus_type;
+
+/* Adapter interrupt definitions */
+static void ap_interrupt_handler(struct airq_struct *airq, bool floating);
+
+static bool ap_irq_flag;
+
+static struct airq_struct ap_airq = {
+ .handler = ap_interrupt_handler,
+ .isc = AP_ISC,
+};
+
+/**
+ * ap_airq_ptr() - Get the address of the adapter interrupt indicator
+ *
+ * Returns the address of the local-summary-indicator of the adapter
+ * interrupt handler for AP, or NULL if adapter interrupts are not
+ * available.
+ */
+void *ap_airq_ptr(void)
+{
+ if (ap_irq_flag)
+ return ap_airq.lsi_ptr;
+ return NULL;
+}
+
+/**
+ * ap_interrupts_available(): Test if AP interrupts are available.
+ *
+ * Returns 1 if AP interrupts are available.
+ */
+static int ap_interrupts_available(void)
+{
+ return test_facility(65);
+}
+
+/**
+ * ap_qci_available(): Test if AP configuration
+ * information can be queried via QCI subfunction.
+ *
+ * Returns 1 if subfunction PQAP(QCI) is available.
+ */
+static int ap_qci_available(void)
+{
+ return test_facility(12);
+}
+
+/**
+ * ap_apft_available(): Test if AP facilities test (APFT)
+ * facility is available.
+ *
+ * Returns 1 if APFT is is available.
+ */
+static int ap_apft_available(void)
+{
+ return test_facility(15);
+}
+
+/*
+ * ap_qact_available(): Test if the PQAP(QACT) subfunction is available.
+ *
+ * Returns 1 if the QACT subfunction is available.
+ */
+static inline int ap_qact_available(void)
+{
+ if (ap_qci_info)
+ return ap_qci_info->qact;
+ return 0;
+}
+
+/*
+ * ap_fetch_qci_info(): Fetch cryptographic config info
+ *
+ * Returns the ap configuration info fetched via PQAP(QCI).
+ * On success 0 is returned, on failure a negative errno
+ * is returned, e.g. if the PQAP(QCI) instruction is not
+ * available, the return value will be -EOPNOTSUPP.
+ */
+static inline int ap_fetch_qci_info(struct ap_config_info *info)
+{
+ if (!ap_qci_available())
+ return -EOPNOTSUPP;
+ if (!info)
+ return -EINVAL;
+ return ap_qci(info);
+}
+
+/**
+ * ap_init_qci_info(): Allocate and query qci config info.
+ * Does also update the static variables ap_max_domain_id
+ * and ap_max_adapter_id if this info is available.
+
+ */
+static void __init ap_init_qci_info(void)
+{
+ if (!ap_qci_available()) {
+ AP_DBF_INFO("%s QCI not supported\n", __func__);
+ return;
+ }
+
+ ap_qci_info = kzalloc(sizeof(*ap_qci_info), GFP_KERNEL);
+ if (!ap_qci_info)
+ return;
+ if (ap_fetch_qci_info(ap_qci_info) != 0) {
+ kfree(ap_qci_info);
+ ap_qci_info = NULL;
+ return;
+ }
+ AP_DBF_INFO("%s successful fetched initial qci info\n", __func__);
+
+ if (ap_qci_info->apxa) {
+ if (ap_qci_info->Na) {
+ ap_max_adapter_id = ap_qci_info->Na;
+ AP_DBF_INFO("%s new ap_max_adapter_id is %d\n",
+ __func__, ap_max_adapter_id);
+ }
+ if (ap_qci_info->Nd) {
+ ap_max_domain_id = ap_qci_info->Nd;
+ AP_DBF_INFO("%s new ap_max_domain_id is %d\n",
+ __func__, ap_max_domain_id);
+ }
+ }
+}
+
+/*
+ * ap_test_config(): helper function to extract the nrth bit
+ * within the unsigned int array field.
+ */
+static inline int ap_test_config(unsigned int *field, unsigned int nr)
+{
+ return ap_test_bit((field + (nr >> 5)), (nr & 0x1f));
+}
+
+/*
+ * ap_test_config_card_id(): Test, whether an AP card ID is configured.
+ *
+ * Returns 0 if the card is not configured
+ * 1 if the card is configured or
+ * if the configuration information is not available
+ */
+static inline int ap_test_config_card_id(unsigned int id)
+{
+ if (id > ap_max_adapter_id)
+ return 0;
+ if (ap_qci_info)
+ return ap_test_config(ap_qci_info->apm, id);
+ return 1;
+}
+
+/*
+ * ap_test_config_usage_domain(): Test, whether an AP usage domain
+ * is configured.
+ *
+ * Returns 0 if the usage domain is not configured
+ * 1 if the usage domain is configured or
+ * if the configuration information is not available
+ */
+int ap_test_config_usage_domain(unsigned int domain)
+{
+ if (domain > ap_max_domain_id)
+ return 0;
+ if (ap_qci_info)
+ return ap_test_config(ap_qci_info->aqm, domain);
+ return 1;
+}
+EXPORT_SYMBOL(ap_test_config_usage_domain);
+
+/*
+ * ap_test_config_ctrl_domain(): Test, whether an AP control domain
+ * is configured.
+ * @domain AP control domain ID
+ *
+ * Returns 1 if the control domain is configured
+ * 0 in all other cases
+ */
+int ap_test_config_ctrl_domain(unsigned int domain)
+{
+ if (!ap_qci_info || domain > ap_max_domain_id)
+ return 0;
+ return ap_test_config(ap_qci_info->adm, domain);
+}
+EXPORT_SYMBOL(ap_test_config_ctrl_domain);
+
+/*
+ * ap_queue_info(): Check and get AP queue info.
+ * Returns true if TAPQ succeeded and the info is filled or
+ * false otherwise.
+ */
+static bool ap_queue_info(ap_qid_t qid, int *q_type,
+ unsigned int *q_fac, int *q_depth, bool *q_decfg)
+{
+ struct ap_queue_status status;
+ unsigned long info = 0;
+
+ /* make sure we don't run into a specifiation exception */
+ if (AP_QID_CARD(qid) > ap_max_adapter_id ||
+ AP_QID_QUEUE(qid) > ap_max_domain_id)
+ return false;
+
+ /* call TAPQ on this APQN */
+ status = ap_test_queue(qid, ap_apft_available(), &info);
+ switch (status.response_code) {
+ case AP_RESPONSE_NORMAL:
+ case AP_RESPONSE_RESET_IN_PROGRESS:
+ case AP_RESPONSE_DECONFIGURED:
+ case AP_RESPONSE_CHECKSTOPPED:
+ case AP_RESPONSE_BUSY:
+ /*
+ * According to the architecture in all these cases the
+ * info should be filled. All bits 0 is not possible as
+ * there is at least one of the mode bits set.
+ */
+ if (WARN_ON_ONCE(!info))
+ return false;
+ *q_type = (int)((info >> 24) & 0xff);
+ *q_fac = (unsigned int)(info >> 32);
+ *q_depth = (int)(info & 0xff);
+ *q_decfg = status.response_code == AP_RESPONSE_DECONFIGURED;
+ switch (*q_type) {
+ /* For CEX2 and CEX3 the available functions
+ * are not reflected by the facilities bits.
+ * Instead it is coded into the type. So here
+ * modify the function bits based on the type.
+ */
+ case AP_DEVICE_TYPE_CEX2A:
+ case AP_DEVICE_TYPE_CEX3A:
+ *q_fac |= 0x08000000;
+ break;
+ case AP_DEVICE_TYPE_CEX2C:
+ case AP_DEVICE_TYPE_CEX3C:
+ *q_fac |= 0x10000000;
+ break;
+ default:
+ break;
+ }
+ return true;
+ default:
+ /*
+ * A response code which indicates, there is no info available.
+ */
+ return false;
+ }
+}
+
+void ap_wait(enum ap_sm_wait wait)
+{
+ ktime_t hr_time;
+
+ switch (wait) {
+ case AP_SM_WAIT_AGAIN:
+ case AP_SM_WAIT_INTERRUPT:
+ if (ap_irq_flag)
+ break;
+ if (ap_poll_kthread) {
+ wake_up(&ap_poll_wait);
+ break;
+ }
+ fallthrough;
+ case AP_SM_WAIT_TIMEOUT:
+ spin_lock_bh(&ap_poll_timer_lock);
+ if (!hrtimer_is_queued(&ap_poll_timer)) {
+ hr_time = poll_timeout;
+ hrtimer_forward_now(&ap_poll_timer, hr_time);
+ hrtimer_restart(&ap_poll_timer);
+ }
+ spin_unlock_bh(&ap_poll_timer_lock);
+ break;
+ case AP_SM_WAIT_NONE:
+ default:
+ break;
+ }
+}
+
+/**
+ * ap_request_timeout(): Handling of request timeouts
+ * @t: timer making this callback
+ *
+ * Handles request timeouts.
+ */
+void ap_request_timeout(struct timer_list *t)
+{
+ struct ap_queue *aq = from_timer(aq, t, timeout);
+
+ spin_lock_bh(&aq->lock);
+ ap_wait(ap_sm_event(aq, AP_SM_EVENT_TIMEOUT));
+ spin_unlock_bh(&aq->lock);
+}
+
+/**
+ * ap_poll_timeout(): AP receive polling for finished AP requests.
+ * @unused: Unused pointer.
+ *
+ * Schedules the AP tasklet using a high resolution timer.
+ */
+static enum hrtimer_restart ap_poll_timeout(struct hrtimer *unused)
+{
+ tasklet_schedule(&ap_tasklet);
+ return HRTIMER_NORESTART;
+}
+
+/**
+ * ap_interrupt_handler() - Schedule ap_tasklet on interrupt
+ * @airq: pointer to adapter interrupt descriptor
+ */
+static void ap_interrupt_handler(struct airq_struct *airq, bool floating)
+{
+ inc_irq_stat(IRQIO_APB);
+ tasklet_schedule(&ap_tasklet);
+}
+
+/**
+ * ap_tasklet_fn(): Tasklet to poll all AP devices.
+ * @dummy: Unused variable
+ *
+ * Poll all AP devices on the bus.
+ */
+static void ap_tasklet_fn(unsigned long dummy)
+{
+ int bkt;
+ struct ap_queue *aq;
+ enum ap_sm_wait wait = AP_SM_WAIT_NONE;
+
+ /* Reset the indicator if interrupts are used. Thus new interrupts can
+ * be received. Doing it in the beginning of the tasklet is therefor
+ * important that no requests on any AP get lost.
+ */
+ if (ap_irq_flag)
+ xchg(ap_airq.lsi_ptr, 0);
+
+ spin_lock_bh(&ap_queues_lock);
+ hash_for_each(ap_queues, bkt, aq, hnode) {
+ spin_lock_bh(&aq->lock);
+ wait = min(wait, ap_sm_event_loop(aq, AP_SM_EVENT_POLL));
+ spin_unlock_bh(&aq->lock);
+ }
+ spin_unlock_bh(&ap_queues_lock);
+
+ ap_wait(wait);
+}
+
+static int ap_pending_requests(void)
+{
+ int bkt;
+ struct ap_queue *aq;
+
+ spin_lock_bh(&ap_queues_lock);
+ hash_for_each(ap_queues, bkt, aq, hnode) {
+ if (aq->queue_count == 0)
+ continue;
+ spin_unlock_bh(&ap_queues_lock);
+ return 1;
+ }
+ spin_unlock_bh(&ap_queues_lock);
+ return 0;
+}
+
+/**
+ * ap_poll_thread(): Thread that polls for finished requests.
+ * @data: Unused pointer
+ *
+ * AP bus poll thread. The purpose of this thread is to poll for
+ * finished requests in a loop if there is a "free" cpu - that is
+ * a cpu that doesn't have anything better to do. The polling stops
+ * as soon as there is another task or if all messages have been
+ * delivered.
+ */
+static int ap_poll_thread(void *data)
+{
+ DECLARE_WAITQUEUE(wait, current);
+
+ set_user_nice(current, MAX_NICE);
+ set_freezable();
+ while (!kthread_should_stop()) {
+ add_wait_queue(&ap_poll_wait, &wait);
+ set_current_state(TASK_INTERRUPTIBLE);
+ if (!ap_pending_requests()) {
+ schedule();
+ try_to_freeze();
+ }
+ set_current_state(TASK_RUNNING);
+ remove_wait_queue(&ap_poll_wait, &wait);
+ if (need_resched()) {
+ schedule();
+ try_to_freeze();
+ continue;
+ }
+ ap_tasklet_fn(0);
+ }
+
+ return 0;
+}
+
+static int ap_poll_thread_start(void)
+{
+ int rc;
+
+ if (ap_irq_flag || ap_poll_kthread)
+ return 0;
+ mutex_lock(&ap_poll_thread_mutex);
+ ap_poll_kthread = kthread_run(ap_poll_thread, NULL, "appoll");
+ rc = PTR_ERR_OR_ZERO(ap_poll_kthread);
+ if (rc)
+ ap_poll_kthread = NULL;
+ mutex_unlock(&ap_poll_thread_mutex);
+ return rc;
+}
+
+static void ap_poll_thread_stop(void)
+{
+ if (!ap_poll_kthread)
+ return;
+ mutex_lock(&ap_poll_thread_mutex);
+ kthread_stop(ap_poll_kthread);
+ ap_poll_kthread = NULL;
+ mutex_unlock(&ap_poll_thread_mutex);
+}
+
+#define is_card_dev(x) ((x)->parent == ap_root_device)
+#define is_queue_dev(x) ((x)->parent != ap_root_device)
+
+/**
+ * ap_bus_match()
+ * @dev: Pointer to device
+ * @drv: Pointer to device_driver
+ *
+ * AP bus driver registration/unregistration.
+ */
+static int ap_bus_match(struct device *dev, struct device_driver *drv)
+{
+ struct ap_driver *ap_drv = to_ap_drv(drv);
+ struct ap_device_id *id;
+
+ /*
+ * Compare device type of the device with the list of
+ * supported types of the device_driver.
+ */
+ for (id = ap_drv->ids; id->match_flags; id++) {
+ if (is_card_dev(dev) &&
+ id->match_flags & AP_DEVICE_ID_MATCH_CARD_TYPE &&
+ id->dev_type == to_ap_dev(dev)->device_type)
+ return 1;
+ if (is_queue_dev(dev) &&
+ id->match_flags & AP_DEVICE_ID_MATCH_QUEUE_TYPE &&
+ id->dev_type == to_ap_dev(dev)->device_type)
+ return 1;
+ }
+ return 0;
+}
+
+/**
+ * ap_uevent(): Uevent function for AP devices.
+ * @dev: Pointer to device
+ * @env: Pointer to kobj_uevent_env
+ *
+ * It sets up a single environment variable DEV_TYPE which contains the
+ * hardware device type.
+ */
+static int ap_uevent(struct device *dev, struct kobj_uevent_env *env)
+{
+ struct ap_device *ap_dev = to_ap_dev(dev);
+ int retval = 0;
+
+ if (!ap_dev)
+ return -ENODEV;
+
+ /* Set up DEV_TYPE environment variable. */
+ retval = add_uevent_var(env, "DEV_TYPE=%04X", ap_dev->device_type);
+ if (retval)
+ return retval;
+
+ /* Add MODALIAS= */
+ retval = add_uevent_var(env, "MODALIAS=ap:t%02X", ap_dev->device_type);
+
+ return retval;
+}
+
+static int __ap_queue_devices_with_id_unregister(struct device *dev, void *data)
+{
+ if (is_queue_dev(dev) &&
+ AP_QID_CARD(to_ap_queue(dev)->qid) == (int)(long) data)
+ device_unregister(dev);
+ return 0;
+}
+
+static struct bus_type ap_bus_type = {
+ .name = "ap",
+ .match = &ap_bus_match,
+ .uevent = &ap_uevent,
+};
+
+static int __ap_revise_reserved(struct device *dev, void *dummy)
+{
+ int rc, card, queue, devres, drvres;
+
+ if (is_queue_dev(dev)) {
+ card = AP_QID_CARD(to_ap_queue(dev)->qid);
+ queue = AP_QID_QUEUE(to_ap_queue(dev)->qid);
+ mutex_lock(&ap_perms_mutex);
+ devres = test_bit_inv(card, ap_perms.apm)
+ && test_bit_inv(queue, ap_perms.aqm);
+ mutex_unlock(&ap_perms_mutex);
+ drvres = to_ap_drv(dev->driver)->flags
+ & AP_DRIVER_FLAG_DEFAULT;
+ if (!!devres != !!drvres) {
+ AP_DBF_DBG("reprobing queue=%02x.%04x\n",
+ card, queue);
+ rc = device_reprobe(dev);
+ }
+ }
+
+ return 0;
+}
+
+static void ap_bus_revise_bindings(void)
+{
+ bus_for_each_dev(&ap_bus_type, NULL, NULL, __ap_revise_reserved);
+}
+
+int ap_owned_by_def_drv(int card, int queue)
+{
+ int rc = 0;
+
+ if (card < 0 || card >= AP_DEVICES || queue < 0 || queue >= AP_DOMAINS)
+ return -EINVAL;
+
+ mutex_lock(&ap_perms_mutex);
+
+ if (test_bit_inv(card, ap_perms.apm)
+ && test_bit_inv(queue, ap_perms.aqm))
+ rc = 1;
+
+ mutex_unlock(&ap_perms_mutex);
+
+ return rc;
+}
+EXPORT_SYMBOL(ap_owned_by_def_drv);
+
+int ap_apqn_in_matrix_owned_by_def_drv(unsigned long *apm,
+ unsigned long *aqm)
+{
+ int card, queue, rc = 0;
+
+ mutex_lock(&ap_perms_mutex);
+
+ for (card = 0; !rc && card < AP_DEVICES; card++)
+ if (test_bit_inv(card, apm) &&
+ test_bit_inv(card, ap_perms.apm))
+ for (queue = 0; !rc && queue < AP_DOMAINS; queue++)
+ if (test_bit_inv(queue, aqm) &&
+ test_bit_inv(queue, ap_perms.aqm))
+ rc = 1;
+
+ mutex_unlock(&ap_perms_mutex);
+
+ return rc;
+}
+EXPORT_SYMBOL(ap_apqn_in_matrix_owned_by_def_drv);
+
+static int ap_device_probe(struct device *dev)
+{
+ struct ap_device *ap_dev = to_ap_dev(dev);
+ struct ap_driver *ap_drv = to_ap_drv(dev->driver);
+ int card, queue, devres, drvres, rc = -ENODEV;
+
+ if (!get_device(dev))
+ return rc;
+
+ if (is_queue_dev(dev)) {
+ /*
+ * If the apqn is marked as reserved/used by ap bus and
+ * default drivers, only probe with drivers with the default
+ * flag set. If it is not marked, only probe with drivers
+ * with the default flag not set.
+ */
+ card = AP_QID_CARD(to_ap_queue(dev)->qid);
+ queue = AP_QID_QUEUE(to_ap_queue(dev)->qid);
+ mutex_lock(&ap_perms_mutex);
+ devres = test_bit_inv(card, ap_perms.apm)
+ && test_bit_inv(queue, ap_perms.aqm);
+ mutex_unlock(&ap_perms_mutex);
+ drvres = ap_drv->flags & AP_DRIVER_FLAG_DEFAULT;
+ if (!!devres != !!drvres)
+ goto out;
+ }
+
+ /* Add queue/card to list of active queues/cards */
+ spin_lock_bh(&ap_queues_lock);
+ if (is_queue_dev(dev))
+ hash_add(ap_queues, &to_ap_queue(dev)->hnode,
+ to_ap_queue(dev)->qid);
+ spin_unlock_bh(&ap_queues_lock);
+
+ ap_dev->drv = ap_drv;
+ rc = ap_drv->probe ? ap_drv->probe(ap_dev) : -ENODEV;
+
+ if (rc) {
+ spin_lock_bh(&ap_queues_lock);
+ if (is_queue_dev(dev))
+ hash_del(&to_ap_queue(dev)->hnode);
+ spin_unlock_bh(&ap_queues_lock);
+ ap_dev->drv = NULL;
+ }
+
+out:
+ if (rc)
+ put_device(dev);
+ return rc;
+}
+
+static int ap_device_remove(struct device *dev)
+{
+ struct ap_device *ap_dev = to_ap_dev(dev);
+ struct ap_driver *ap_drv = ap_dev->drv;
+
+ /* prepare ap queue device removal */
+ if (is_queue_dev(dev))
+ ap_queue_prepare_remove(to_ap_queue(dev));
+
+ /* driver's chance to clean up gracefully */
+ if (ap_drv->remove)
+ ap_drv->remove(ap_dev);
+
+ /* now do the ap queue device remove */
+ if (is_queue_dev(dev))
+ ap_queue_remove(to_ap_queue(dev));
+
+ /* Remove queue/card from list of active queues/cards */
+ spin_lock_bh(&ap_queues_lock);
+ if (is_queue_dev(dev))
+ hash_del(&to_ap_queue(dev)->hnode);
+ spin_unlock_bh(&ap_queues_lock);
+
+ put_device(dev);
+
+ return 0;
+}
+
+struct ap_queue *ap_get_qdev(ap_qid_t qid)
+{
+ int bkt;
+ struct ap_queue *aq;
+
+ spin_lock_bh(&ap_queues_lock);
+ hash_for_each(ap_queues, bkt, aq, hnode) {
+ if (aq->qid == qid) {
+ get_device(&aq->ap_dev.device);
+ spin_unlock_bh(&ap_queues_lock);
+ return aq;
+ }
+ }
+ spin_unlock_bh(&ap_queues_lock);
+
+ return NULL;
+}
+EXPORT_SYMBOL(ap_get_qdev);
+
+int ap_driver_register(struct ap_driver *ap_drv, struct module *owner,
+ char *name)
+{
+ struct device_driver *drv = &ap_drv->driver;
+
+ drv->bus = &ap_bus_type;
+ drv->probe = ap_device_probe;
+ drv->remove = ap_device_remove;
+ drv->owner = owner;
+ drv->name = name;
+ return driver_register(drv);
+}
+EXPORT_SYMBOL(ap_driver_register);
+
+void ap_driver_unregister(struct ap_driver *ap_drv)
+{
+ driver_unregister(&ap_drv->driver);
+}
+EXPORT_SYMBOL(ap_driver_unregister);
+
+void ap_bus_force_rescan(void)
+{
+ /* processing a asynchronous bus rescan */
+ del_timer(&ap_config_timer);
+ queue_work(system_long_wq, &ap_scan_work);
+ flush_work(&ap_scan_work);
+}
+EXPORT_SYMBOL(ap_bus_force_rescan);
+
+/*
+* A config change has happened, force an ap bus rescan.
+*/
+void ap_bus_cfg_chg(void)
+{
+ AP_DBF_DBG("%s config change, forcing bus rescan\n", __func__);
+
+ ap_bus_force_rescan();
+}
+
+/*
+ * hex2bitmap() - parse hex mask string and set bitmap.
+ * Valid strings are "0x012345678" with at least one valid hex number.
+ * Rest of the bitmap to the right is padded with 0. No spaces allowed
+ * within the string, the leading 0x may be omitted.
+ * Returns the bitmask with exactly the bits set as given by the hex
+ * string (both in big endian order).
+ */
+static int hex2bitmap(const char *str, unsigned long *bitmap, int bits)
+{
+ int i, n, b;
+
+ /* bits needs to be a multiple of 8 */
+ if (bits & 0x07)
+ return -EINVAL;
+
+ if (str[0] == '0' && str[1] == 'x')
+ str++;
+ if (*str == 'x')
+ str++;
+
+ for (i = 0; isxdigit(*str) && i < bits; str++) {
+ b = hex_to_bin(*str);
+ for (n = 0; n < 4; n++)
+ if (b & (0x08 >> n))
+ set_bit_inv(i + n, bitmap);
+ i += 4;
+ }
+
+ if (*str == '\n')
+ str++;
+ if (*str)
+ return -EINVAL;
+ return 0;
+}
+
+/*
+ * modify_bitmap() - parse bitmask argument and modify an existing
+ * bit mask accordingly. A concatenation (done with ',') of these
+ * terms is recognized:
+ * +<bitnr>[-<bitnr>] or -<bitnr>[-<bitnr>]
+ * <bitnr> may be any valid number (hex, decimal or octal) in the range
+ * 0...bits-1; the leading + or - is required. Here are some examples:
+ * +0-15,+32,-128,-0xFF
+ * -0-255,+1-16,+0x128
+ * +1,+2,+3,+4,-5,-7-10
+ * Returns the new bitmap after all changes have been applied. Every
+ * positive value in the string will set a bit and every negative value
+ * in the string will clear a bit. As a bit may be touched more than once,
+ * the last 'operation' wins:
+ * +0-255,-128 = first bits 0-255 will be set, then bit 128 will be
+ * cleared again. All other bits are unmodified.
+ */
+static int modify_bitmap(const char *str, unsigned long *bitmap, int bits)
+{
+ int a, i, z;
+ char *np, sign;
+
+ /* bits needs to be a multiple of 8 */
+ if (bits & 0x07)
+ return -EINVAL;
+
+ while (*str) {
+ sign = *str++;
+ if (sign != '+' && sign != '-')
+ return -EINVAL;
+ a = z = simple_strtoul(str, &np, 0);
+ if (str == np || a >= bits)
+ return -EINVAL;
+ str = np;
+ if (*str == '-') {
+ z = simple_strtoul(++str, &np, 0);
+ if (str == np || a > z || z >= bits)
+ return -EINVAL;
+ str = np;
+ }
+ for (i = a; i <= z; i++)
+ if (sign == '+')
+ set_bit_inv(i, bitmap);
+ else
+ clear_bit_inv(i, bitmap);
+ while (*str == ',' || *str == '\n')
+ str++;
+ }
+
+ return 0;
+}
+
+int ap_parse_mask_str(const char *str,
+ unsigned long *bitmap, int bits,
+ struct mutex *lock)
+{
+ unsigned long *newmap, size;
+ int rc;
+
+ /* bits needs to be a multiple of 8 */
+ if (bits & 0x07)
+ return -EINVAL;
+
+ size = BITS_TO_LONGS(bits)*sizeof(unsigned long);
+ newmap = kmalloc(size, GFP_KERNEL);
+ if (!newmap)
+ return -ENOMEM;
+ if (mutex_lock_interruptible(lock)) {
+ kfree(newmap);
+ return -ERESTARTSYS;
+ }
+
+ if (*str == '+' || *str == '-') {
+ memcpy(newmap, bitmap, size);
+ rc = modify_bitmap(str, newmap, bits);
+ } else {
+ memset(newmap, 0, size);
+ rc = hex2bitmap(str, newmap, bits);
+ }
+ if (rc == 0)
+ memcpy(bitmap, newmap, size);
+ mutex_unlock(lock);
+ kfree(newmap);
+ return rc;
+}
+EXPORT_SYMBOL(ap_parse_mask_str);
+
+/*
+ * AP bus attributes.
+ */
+
+static ssize_t ap_domain_show(struct bus_type *bus, char *buf)
+{
+ return scnprintf(buf, PAGE_SIZE, "%d\n", ap_domain_index);
+}
+
+static ssize_t ap_domain_store(struct bus_type *bus,
+ const char *buf, size_t count)
+{
+ int domain;
+
+ if (sscanf(buf, "%i\n", &domain) != 1 ||
+ domain < 0 || domain > ap_max_domain_id ||
+ !test_bit_inv(domain, ap_perms.aqm))
+ return -EINVAL;
+
+ spin_lock_bh(&ap_domain_lock);
+ ap_domain_index = domain;
+ spin_unlock_bh(&ap_domain_lock);
+
+ AP_DBF_INFO("stored new default domain=%d\n", domain);
+
+ return count;
+}
+
+static BUS_ATTR_RW(ap_domain);
+
+static ssize_t ap_control_domain_mask_show(struct bus_type *bus, char *buf)
+{
+ if (!ap_qci_info) /* QCI not supported */
+ return scnprintf(buf, PAGE_SIZE, "not supported\n");
+
+ return scnprintf(buf, PAGE_SIZE,
+ "0x%08x%08x%08x%08x%08x%08x%08x%08x\n",
+ ap_qci_info->adm[0], ap_qci_info->adm[1],
+ ap_qci_info->adm[2], ap_qci_info->adm[3],
+ ap_qci_info->adm[4], ap_qci_info->adm[5],
+ ap_qci_info->adm[6], ap_qci_info->adm[7]);
+}
+
+static BUS_ATTR_RO(ap_control_domain_mask);
+
+static ssize_t ap_usage_domain_mask_show(struct bus_type *bus, char *buf)
+{
+ if (!ap_qci_info) /* QCI not supported */
+ return scnprintf(buf, PAGE_SIZE, "not supported\n");
+
+ return scnprintf(buf, PAGE_SIZE,
+ "0x%08x%08x%08x%08x%08x%08x%08x%08x\n",
+ ap_qci_info->aqm[0], ap_qci_info->aqm[1],
+ ap_qci_info->aqm[2], ap_qci_info->aqm[3],
+ ap_qci_info->aqm[4], ap_qci_info->aqm[5],
+ ap_qci_info->aqm[6], ap_qci_info->aqm[7]);
+}
+
+static BUS_ATTR_RO(ap_usage_domain_mask);
+
+static ssize_t ap_adapter_mask_show(struct bus_type *bus, char *buf)
+{
+ if (!ap_qci_info) /* QCI not supported */
+ return scnprintf(buf, PAGE_SIZE, "not supported\n");
+
+ return scnprintf(buf, PAGE_SIZE,
+ "0x%08x%08x%08x%08x%08x%08x%08x%08x\n",
+ ap_qci_info->apm[0], ap_qci_info->apm[1],
+ ap_qci_info->apm[2], ap_qci_info->apm[3],
+ ap_qci_info->apm[4], ap_qci_info->apm[5],
+ ap_qci_info->apm[6], ap_qci_info->apm[7]);
+}
+
+static BUS_ATTR_RO(ap_adapter_mask);
+
+static ssize_t ap_interrupts_show(struct bus_type *bus, char *buf)
+{
+ return scnprintf(buf, PAGE_SIZE, "%d\n",
+ ap_irq_flag ? 1 : 0);
+}
+
+static BUS_ATTR_RO(ap_interrupts);
+
+static ssize_t config_time_show(struct bus_type *bus, char *buf)
+{
+ return scnprintf(buf, PAGE_SIZE, "%d\n", ap_config_time);
+}
+
+static ssize_t config_time_store(struct bus_type *bus,
+ const char *buf, size_t count)
+{
+ int time;
+
+ if (sscanf(buf, "%d\n", &time) != 1 || time < 5 || time > 120)
+ return -EINVAL;
+ ap_config_time = time;
+ mod_timer(&ap_config_timer, jiffies + ap_config_time * HZ);
+ return count;
+}
+
+static BUS_ATTR_RW(config_time);
+
+static ssize_t poll_thread_show(struct bus_type *bus, char *buf)
+{
+ return scnprintf(buf, PAGE_SIZE, "%d\n", ap_poll_kthread ? 1 : 0);
+}
+
+static ssize_t poll_thread_store(struct bus_type *bus,
+ const char *buf, size_t count)
+{
+ int flag, rc;
+
+ if (sscanf(buf, "%d\n", &flag) != 1)
+ return -EINVAL;
+ if (flag) {
+ rc = ap_poll_thread_start();
+ if (rc)
+ count = rc;
+ } else
+ ap_poll_thread_stop();
+ return count;
+}
+
+static BUS_ATTR_RW(poll_thread);
+
+static ssize_t poll_timeout_show(struct bus_type *bus, char *buf)
+{
+ return scnprintf(buf, PAGE_SIZE, "%llu\n", poll_timeout);
+}
+
+static ssize_t poll_timeout_store(struct bus_type *bus, const char *buf,
+ size_t count)
+{
+ unsigned long long time;
+ ktime_t hr_time;
+
+ /* 120 seconds = maximum poll interval */
+ if (sscanf(buf, "%llu\n", &time) != 1 || time < 1 ||
+ time > 120000000000ULL)
+ return -EINVAL;
+ poll_timeout = time;
+ hr_time = poll_timeout;
+
+ spin_lock_bh(&ap_poll_timer_lock);
+ hrtimer_cancel(&ap_poll_timer);
+ hrtimer_set_expires(&ap_poll_timer, hr_time);
+ hrtimer_start_expires(&ap_poll_timer, HRTIMER_MODE_ABS);
+ spin_unlock_bh(&ap_poll_timer_lock);
+
+ return count;
+}
+
+static BUS_ATTR_RW(poll_timeout);
+
+static ssize_t ap_max_domain_id_show(struct bus_type *bus, char *buf)
+{
+ return scnprintf(buf, PAGE_SIZE, "%d\n", ap_max_domain_id);
+}
+
+static BUS_ATTR_RO(ap_max_domain_id);
+
+static ssize_t ap_max_adapter_id_show(struct bus_type *bus, char *buf)
+{
+ return scnprintf(buf, PAGE_SIZE, "%d\n", ap_max_adapter_id);
+}
+
+static BUS_ATTR_RO(ap_max_adapter_id);
+
+static ssize_t apmask_show(struct bus_type *bus, char *buf)
+{
+ int rc;
+
+ if (mutex_lock_interruptible(&ap_perms_mutex))
+ return -ERESTARTSYS;
+ rc = scnprintf(buf, PAGE_SIZE,
+ "0x%016lx%016lx%016lx%016lx\n",
+ ap_perms.apm[0], ap_perms.apm[1],
+ ap_perms.apm[2], ap_perms.apm[3]);
+ mutex_unlock(&ap_perms_mutex);
+
+ return rc;
+}
+
+static ssize_t apmask_store(struct bus_type *bus, const char *buf,
+ size_t count)
+{
+ int rc;
+
+ rc = ap_parse_mask_str(buf, ap_perms.apm, AP_DEVICES, &ap_perms_mutex);
+ if (rc)
+ return rc;
+
+ ap_bus_revise_bindings();
+
+ return count;
+}
+
+static BUS_ATTR_RW(apmask);
+
+static ssize_t aqmask_show(struct bus_type *bus, char *buf)
+{
+ int rc;
+
+ if (mutex_lock_interruptible(&ap_perms_mutex))
+ return -ERESTARTSYS;
+ rc = scnprintf(buf, PAGE_SIZE,
+ "0x%016lx%016lx%016lx%016lx\n",
+ ap_perms.aqm[0], ap_perms.aqm[1],
+ ap_perms.aqm[2], ap_perms.aqm[3]);
+ mutex_unlock(&ap_perms_mutex);
+
+ return rc;
+}
+
+static ssize_t aqmask_store(struct bus_type *bus, const char *buf,
+ size_t count)
+{
+ int rc;
+
+ rc = ap_parse_mask_str(buf, ap_perms.aqm, AP_DOMAINS, &ap_perms_mutex);
+ if (rc)
+ return rc;
+
+ ap_bus_revise_bindings();
+
+ return count;
+}
+
+static BUS_ATTR_RW(aqmask);
+
+static struct bus_attribute *const ap_bus_attrs[] = {
+ &bus_attr_ap_domain,
+ &bus_attr_ap_control_domain_mask,
+ &bus_attr_ap_usage_domain_mask,
+ &bus_attr_ap_adapter_mask,
+ &bus_attr_config_time,
+ &bus_attr_poll_thread,
+ &bus_attr_ap_interrupts,
+ &bus_attr_poll_timeout,
+ &bus_attr_ap_max_domain_id,
+ &bus_attr_ap_max_adapter_id,
+ &bus_attr_apmask,
+ &bus_attr_aqmask,
+ NULL,
+};
+
+/**
+ * ap_select_domain(): Select an AP domain if possible and we haven't
+ * already done so before.
+ */
+static void ap_select_domain(void)
+{
+ struct ap_queue_status status;
+ int card, dom;
+
+ /*
+ * Choose the default domain. Either the one specified with
+ * the "domain=" parameter or the first domain with at least
+ * one valid APQN.
+ */
+ spin_lock_bh(&ap_domain_lock);
+ if (ap_domain_index >= 0) {
+ /* Domain has already been selected. */
+ goto out;
+ }
+ for (dom = 0; dom <= ap_max_domain_id; dom++) {
+ if (!ap_test_config_usage_domain(dom) ||
+ !test_bit_inv(dom, ap_perms.aqm))
+ continue;
+ for (card = 0; card <= ap_max_adapter_id; card++) {
+ if (!ap_test_config_card_id(card) ||
+ !test_bit_inv(card, ap_perms.apm))
+ continue;
+ status = ap_test_queue(AP_MKQID(card, dom),
+ ap_apft_available(),
+ NULL);
+ if (status.response_code == AP_RESPONSE_NORMAL)
+ break;
+ }
+ if (card <= ap_max_adapter_id)
+ break;
+ }
+ if (dom <= ap_max_domain_id) {
+ ap_domain_index = dom;
+ AP_DBF_INFO("%s new default domain is %d\n",
+ __func__, ap_domain_index);
+ }
+out:
+ spin_unlock_bh(&ap_domain_lock);
+}
+
+/*
+ * This function checks the type and returns either 0 for not
+ * supported or the highest compatible type value (which may
+ * include the input type value).
+ */
+static int ap_get_compatible_type(ap_qid_t qid, int rawtype, unsigned int func)
+{
+ int comp_type = 0;
+
+ /* < CEX2A is not supported */
+ if (rawtype < AP_DEVICE_TYPE_CEX2A) {
+ AP_DBF_WARN("get_comp_type queue=%02x.%04x unsupported type %d\n",
+ AP_QID_CARD(qid), AP_QID_QUEUE(qid), rawtype);
+ return 0;
+ }
+ /* up to CEX7 known and fully supported */
+ if (rawtype <= AP_DEVICE_TYPE_CEX7)
+ return rawtype;
+ /*
+ * unknown new type > CEX7, check for compatibility
+ * to the highest known and supported type which is
+ * currently CEX7 with the help of the QACT function.
+ */
+ if (ap_qact_available()) {
+ struct ap_queue_status status;
+ union ap_qact_ap_info apinfo = {0};
+
+ apinfo.mode = (func >> 26) & 0x07;
+ apinfo.cat = AP_DEVICE_TYPE_CEX7;
+ status = ap_qact(qid, 0, &apinfo);
+ if (status.response_code == AP_RESPONSE_NORMAL
+ && apinfo.cat >= AP_DEVICE_TYPE_CEX2A
+ && apinfo.cat <= AP_DEVICE_TYPE_CEX7)
+ comp_type = apinfo.cat;
+ }
+ if (!comp_type)
+ AP_DBF_WARN("get_comp_type queue=%02x.%04x unable to map type %d\n",
+ AP_QID_CARD(qid), AP_QID_QUEUE(qid), rawtype);
+ else if (comp_type != rawtype)
+ AP_DBF_INFO("get_comp_type queue=%02x.%04x map type %d to %d\n",
+ AP_QID_CARD(qid), AP_QID_QUEUE(qid),
+ rawtype, comp_type);
+ return comp_type;
+}
+
+/*
+ * Helper function to be used with bus_find_dev
+ * matches for the card device with the given id
+ */
+static int __match_card_device_with_id(struct device *dev, const void *data)
+{
+ return is_card_dev(dev) && to_ap_card(dev)->id == (int)(long)(void *) data;
+}
+
+/*
+ * Helper function to be used with bus_find_dev
+ * matches for the queue device with a given qid
+ */
+static int __match_queue_device_with_qid(struct device *dev, const void *data)
+{
+ return is_queue_dev(dev) && to_ap_queue(dev)->qid == (int)(long) data;
+}
+
+/*
+ * Helper function to be used with bus_find_dev
+ * matches any queue device with given queue id
+ */
+static int __match_queue_device_with_queue_id(struct device *dev, const void *data)
+{
+ return is_queue_dev(dev)
+ && AP_QID_QUEUE(to_ap_queue(dev)->qid) == (int)(long) data;
+}
+
+/*
+ * Helper function for ap_scan_bus().
+ * Remove card device and associated queue devices.
+ */
+static inline void ap_scan_rm_card_dev_and_queue_devs(struct ap_card *ac)
+{
+ bus_for_each_dev(&ap_bus_type, NULL,
+ (void *)(long) ac->id,
+ __ap_queue_devices_with_id_unregister);
+ device_unregister(&ac->ap_dev.device);
+}
+
+/*
+ * Helper function for ap_scan_bus().
+ * Does the scan bus job for all the domains within
+ * a valid adapter given by an ap_card ptr.
+ */
+static inline void ap_scan_domains(struct ap_card *ac)
+{
+ bool decfg;
+ ap_qid_t qid;
+ unsigned int func;
+ struct device *dev;
+ struct ap_queue *aq;
+ int rc, dom, depth, type;
+
+ /*
+ * Go through the configuration for the domains and compare them
+ * to the existing queue devices. Also take care of the config
+ * and error state for the queue devices.
+ */
+
+ for (dom = 0; dom <= ap_max_domain_id; dom++) {
+ qid = AP_MKQID(ac->id, dom);
+ dev = bus_find_device(&ap_bus_type, NULL,
+ (void *)(long) qid,
+ __match_queue_device_with_qid);
+ aq = dev ? to_ap_queue(dev) : NULL;
+ if (!ap_test_config_usage_domain(dom)) {
+ if (dev) {
+ AP_DBF_INFO("%s(%d,%d) not in config any more, rm queue device\n",
+ __func__, ac->id, dom);
+ device_unregister(dev);
+ put_device(dev);
+ }
+ continue;
+ }
+ /* domain is valid, get info from this APQN */
+ if (!ap_queue_info(qid, &type, &func, &depth, &decfg)) {
+ if (aq) {
+ AP_DBF_INFO(
+ "%s(%d,%d) ap_queue_info() not successful, rm queue device\n",
+ __func__, ac->id, dom);
+ device_unregister(dev);
+ put_device(dev);
+ }
+ continue;
+ }
+ /* if no queue device exists, create a new one */
+ if (!aq) {
+ aq = ap_queue_create(qid, ac->ap_dev.device_type);
+ if (!aq) {
+ AP_DBF_WARN("%s(%d,%d) ap_queue_create() failed\n",
+ __func__, ac->id, dom);
+ continue;
+ }
+ aq->card = ac;
+ aq->config = !decfg;
+ dev = &aq->ap_dev.device;
+ dev->bus = &ap_bus_type;
+ dev->parent = &ac->ap_dev.device;
+ dev_set_name(dev, "%02x.%04x", ac->id, dom);
+ /* register queue device */
+ rc = device_register(dev);
+ if (rc) {
+ AP_DBF_WARN("%s(%d,%d) device_register() failed\n",
+ __func__, ac->id, dom);
+ goto put_dev_and_continue;
+ }
+ /* get it and thus adjust reference counter */
+ get_device(dev);
+ if (decfg)
+ AP_DBF_INFO("%s(%d,%d) new (decfg) queue device created\n",
+ __func__, ac->id, dom);
+ else
+ AP_DBF_INFO("%s(%d,%d) new queue device created\n",
+ __func__, ac->id, dom);
+ goto put_dev_and_continue;
+ }
+ /* Check config state on the already existing queue device */
+ spin_lock_bh(&aq->lock);
+ if (decfg && aq->config) {
+ /* config off this queue device */
+ aq->config = false;
+ if (aq->dev_state > AP_DEV_STATE_UNINITIATED) {
+ aq->dev_state = AP_DEV_STATE_ERROR;
+ aq->last_err_rc = AP_RESPONSE_DECONFIGURED;
+ }
+ spin_unlock_bh(&aq->lock);
+ AP_DBF_INFO("%s(%d,%d) queue device config off\n",
+ __func__, ac->id, dom);
+ /* 'receive' pending messages with -EAGAIN */
+ ap_flush_queue(aq);
+ goto put_dev_and_continue;
+ }
+ if (!decfg && !aq->config) {
+ /* config on this queue device */
+ aq->config = true;
+ if (aq->dev_state > AP_DEV_STATE_UNINITIATED) {
+ aq->dev_state = AP_DEV_STATE_OPERATING;
+ aq->sm_state = AP_SM_STATE_RESET_START;
+ }
+ spin_unlock_bh(&aq->lock);
+ AP_DBF_INFO("%s(%d,%d) queue device config on\n",
+ __func__, ac->id, dom);
+ goto put_dev_and_continue;
+ }
+ /* handle other error states */
+ if (!decfg && aq->dev_state == AP_DEV_STATE_ERROR) {
+ spin_unlock_bh(&aq->lock);
+ /* 'receive' pending messages with -EAGAIN */
+ ap_flush_queue(aq);
+ /* re-init (with reset) the queue device */
+ ap_queue_init_state(aq);
+ AP_DBF_INFO("%s(%d,%d) queue device reinit enforced\n",
+ __func__, ac->id, dom);
+ goto put_dev_and_continue;
+ }
+ spin_unlock_bh(&aq->lock);
+put_dev_and_continue:
+ put_device(dev);
+ }
+}
+
+/*
+ * Helper function for ap_scan_bus().
+ * Does the scan bus job for the given adapter id.
+ */
+static inline void ap_scan_adapter(int ap)
+{
+ bool decfg;
+ ap_qid_t qid;
+ unsigned int func;
+ struct device *dev;
+ struct ap_card *ac;
+ int rc, dom, depth, type, comp_type;
+
+ /* Is there currently a card device for this adapter ? */
+ dev = bus_find_device(&ap_bus_type, NULL,
+ (void *)(long) ap,
+ __match_card_device_with_id);
+ ac = dev ? to_ap_card(dev) : NULL;
+
+ /* Adapter not in configuration ? */
+ if (!ap_test_config_card_id(ap)) {
+ if (ac) {
+ AP_DBF_INFO("%s(%d) ap not in config any more, rm card and queue devices\n",
+ __func__, ap);
+ ap_scan_rm_card_dev_and_queue_devs(ac);
+ put_device(dev);
+ }
+ return;
+ }
+
+ /*
+ * Adapter ap is valid in the current configuration. So do some checks:
+ * If no card device exists, build one. If a card device exists, check
+ * for type and functions changed. For all this we need to find a valid
+ * APQN first.
+ */
+
+ for (dom = 0; dom <= ap_max_domain_id; dom++)
+ if (ap_test_config_usage_domain(dom)) {
+ qid = AP_MKQID(ap, dom);
+ if (ap_queue_info(qid, &type, &func, &depth, &decfg))
+ break;
+ }
+ if (dom > ap_max_domain_id) {
+ /* Could not find a valid APQN for this adapter */
+ if (ac) {
+ AP_DBF_INFO(
+ "%s(%d) no type info (no APQN found), rm card and queue devices\n",
+ __func__, ap);
+ ap_scan_rm_card_dev_and_queue_devs(ac);
+ put_device(dev);
+ } else {
+ AP_DBF_DBG("%s(%d) no type info (no APQN found), ignored\n",
+ __func__, ap);
+ }
+ return;
+ }
+ if (!type) {
+ /* No apdater type info available, an unusable adapter */
+ if (ac) {
+ AP_DBF_INFO("%s(%d) no valid type (0) info, rm card and queue devices\n",
+ __func__, ap);
+ ap_scan_rm_card_dev_and_queue_devs(ac);
+ put_device(dev);
+ } else {
+ AP_DBF_DBG("%s(%d) no valid type (0) info, ignored\n",
+ __func__, ap);
+ }
+ return;
+ }
+
+ if (ac) {
+ /* Check APQN against existing card device for changes */
+ if (ac->raw_hwtype != type) {
+ AP_DBF_INFO("%s(%d) hwtype %d changed, rm card and queue devices\n",
+ __func__, ap, type);
+ ap_scan_rm_card_dev_and_queue_devs(ac);
+ put_device(dev);
+ ac = NULL;
+ } else if (ac->functions != func) {
+ AP_DBF_INFO("%s(%d) functions 0x%08x changed, rm card and queue devices\n",
+ __func__, ap, type);
+ ap_scan_rm_card_dev_and_queue_devs(ac);
+ put_device(dev);
+ ac = NULL;
+ } else {
+ if (decfg && ac->config) {
+ ac->config = false;
+ AP_DBF_INFO("%s(%d) card device config off\n",
+ __func__, ap);
+
+ }
+ if (!decfg && !ac->config) {
+ ac->config = true;
+ AP_DBF_INFO("%s(%d) card device config on\n",
+ __func__, ap);
+ }
+ }
+ }
+
+ if (!ac) {
+ /* Build a new card device */
+ comp_type = ap_get_compatible_type(qid, type, func);
+ if (!comp_type) {
+ AP_DBF_WARN("%s(%d) type %d, can't get compatibility type\n",
+ __func__, ap, type);
+ return;
+ }
+ ac = ap_card_create(ap, depth, type, comp_type, func);
+ if (!ac) {
+ AP_DBF_WARN("%s(%d) ap_card_create() failed\n",
+ __func__, ap);
+ return;
+ }
+ ac->config = !decfg;
+ dev = &ac->ap_dev.device;
+ dev->bus = &ap_bus_type;
+ dev->parent = ap_root_device;
+ dev_set_name(dev, "card%02x", ap);
+ /* Register the new card device with AP bus */
+ rc = device_register(dev);
+ if (rc) {
+ AP_DBF_WARN("%s(%d) device_register() failed\n",
+ __func__, ap);
+ put_device(dev);
+ return;
+ }
+ /* get it and thus adjust reference counter */
+ get_device(dev);
+ if (decfg)
+ AP_DBF_INFO("%s(%d) new (decfg) card device type=%d func=0x%08x created\n",
+ __func__, ap, type, func);
+ else
+ AP_DBF_INFO("%s(%d) new card device type=%d func=0x%08x created\n",
+ __func__, ap, type, func);
+ }
+
+ /* Verify the domains and the queue devices for this card */
+ ap_scan_domains(ac);
+
+ /* release the card device */
+ put_device(&ac->ap_dev.device);
+}
+
+/**
+ * ap_scan_bus(): Scan the AP bus for new devices
+ * Runs periodically, workqueue timer (ap_config_time)
+ */
+static void ap_scan_bus(struct work_struct *unused)
+{
+ int ap;
+
+ ap_fetch_qci_info(ap_qci_info);
+ ap_select_domain();
+
+ AP_DBF_DBG("%s running\n", __func__);
+
+ /* loop over all possible adapters */
+ for (ap = 0; ap <= ap_max_adapter_id; ap++)
+ ap_scan_adapter(ap);
+
+ /* check if there is at least one queue available with default domain */
+ if (ap_domain_index >= 0) {
+ struct device *dev =
+ bus_find_device(&ap_bus_type, NULL,
+ (void *)(long) ap_domain_index,
+ __match_queue_device_with_queue_id);
+ if (dev)
+ put_device(dev);
+ else
+ AP_DBF_INFO("no queue device with default domain %d available\n",
+ ap_domain_index);
+ }
+
+ mod_timer(&ap_config_timer, jiffies + ap_config_time * HZ);
+}
+
+static void ap_config_timeout(struct timer_list *unused)
+{
+ queue_work(system_long_wq, &ap_scan_work);
+}
+
+static int __init ap_debug_init(void)
+{
+ ap_dbf_info = debug_register("ap", 1, 1,
+ DBF_MAX_SPRINTF_ARGS * sizeof(long));
+ debug_register_view(ap_dbf_info, &debug_sprintf_view);
+ debug_set_level(ap_dbf_info, DBF_ERR);
+
+ return 0;
+}
+
+static void __init ap_perms_init(void)
+{
+ /* all resources useable if no kernel parameter string given */
+ memset(&ap_perms.ioctlm, 0xFF, sizeof(ap_perms.ioctlm));
+ memset(&ap_perms.apm, 0xFF, sizeof(ap_perms.apm));
+ memset(&ap_perms.aqm, 0xFF, sizeof(ap_perms.aqm));
+
+ /* apm kernel parameter string */
+ if (apm_str) {
+ memset(&ap_perms.apm, 0, sizeof(ap_perms.apm));
+ ap_parse_mask_str(apm_str, ap_perms.apm, AP_DEVICES,
+ &ap_perms_mutex);
+ }
+
+ /* aqm kernel parameter string */
+ if (aqm_str) {
+ memset(&ap_perms.aqm, 0, sizeof(ap_perms.aqm));
+ ap_parse_mask_str(aqm_str, ap_perms.aqm, AP_DOMAINS,
+ &ap_perms_mutex);
+ }
+}
+
+/**
+ * ap_module_init(): The module initialization code.
+ *
+ * Initializes the module.
+ */
+static int __init ap_module_init(void)
+{
+ int rc, i;
+
+ rc = ap_debug_init();
+ if (rc)
+ return rc;
+
+ if (!ap_instructions_available()) {
+ pr_warn("The hardware system does not support AP instructions\n");
+ return -ENODEV;
+ }
+
+ /* init ap_queue hashtable */
+ hash_init(ap_queues);
+
+ /* set up the AP permissions (ioctls, ap and aq masks) */
+ ap_perms_init();
+
+ /* Get AP configuration data if available */
+ ap_init_qci_info();
+
+ /* check default domain setting */
+ if (ap_domain_index < -1 || ap_domain_index > ap_max_domain_id ||
+ (ap_domain_index >= 0 &&
+ !test_bit_inv(ap_domain_index, ap_perms.aqm))) {
+ pr_warn("%d is not a valid cryptographic domain\n",
+ ap_domain_index);
+ ap_domain_index = -1;
+ }
+
+ /* enable interrupts if available */
+ if (ap_interrupts_available()) {
+ rc = register_adapter_interrupt(&ap_airq);
+ ap_irq_flag = (rc == 0);
+ }
+
+ /* Create /sys/bus/ap. */
+ rc = bus_register(&ap_bus_type);
+ if (rc)
+ goto out;
+ for (i = 0; ap_bus_attrs[i]; i++) {
+ rc = bus_create_file(&ap_bus_type, ap_bus_attrs[i]);
+ if (rc)
+ goto out_bus;
+ }
+
+ /* Create /sys/devices/ap. */
+ ap_root_device = root_device_register("ap");
+ rc = PTR_ERR_OR_ZERO(ap_root_device);
+ if (rc)
+ goto out_bus;
+
+ /* Setup the AP bus rescan timer. */
+ timer_setup(&ap_config_timer, ap_config_timeout, 0);
+
+ /*
+ * Setup the high resultion poll timer.
+ * If we are running under z/VM adjust polling to z/VM polling rate.
+ */
+ if (MACHINE_IS_VM)
+ poll_timeout = 1500000;
+ hrtimer_init(&ap_poll_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
+ ap_poll_timer.function = ap_poll_timeout;
+
+ /* Start the low priority AP bus poll thread. */
+ if (ap_thread_flag) {
+ rc = ap_poll_thread_start();
+ if (rc)
+ goto out_work;
+ }
+
+ queue_work(system_long_wq, &ap_scan_work);
+
+ return 0;
+
+out_work:
+ hrtimer_cancel(&ap_poll_timer);
+ root_device_unregister(ap_root_device);
+out_bus:
+ while (i--)
+ bus_remove_file(&ap_bus_type, ap_bus_attrs[i]);
+ bus_unregister(&ap_bus_type);
+out:
+ if (ap_irq_flag)
+ unregister_adapter_interrupt(&ap_airq);
+ kfree(ap_qci_info);
+ return rc;
+}
+device_initcall(ap_module_init);
diff --git a/drivers/s390/crypto/ap_bus.h b/drivers/s390/crypto/ap_bus.h
new file mode 100644
index 000000000..ccdbd95ca
--- /dev/null
+++ b/drivers/s390/crypto/ap_bus.h
@@ -0,0 +1,347 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright IBM Corp. 2006, 2019
+ * Author(s): Cornelia Huck <cornelia.huck@de.ibm.com>
+ * Martin Schwidefsky <schwidefsky@de.ibm.com>
+ * Ralph Wuerthner <rwuerthn@de.ibm.com>
+ * Felix Beck <felix.beck@de.ibm.com>
+ * Holger Dengler <hd@linux.vnet.ibm.com>
+ *
+ * Adjunct processor bus header file.
+ */
+
+#ifndef _AP_BUS_H_
+#define _AP_BUS_H_
+
+#include <linux/device.h>
+#include <linux/types.h>
+#include <linux/hashtable.h>
+#include <asm/isc.h>
+#include <asm/ap.h>
+
+#define AP_DEVICES 256 /* Number of AP devices. */
+#define AP_DOMAINS 256 /* Number of AP domains. */
+#define AP_IOCTLS 256 /* Number of ioctls. */
+#define AP_RESET_TIMEOUT (HZ*0.7) /* Time in ticks for reset timeouts. */
+#define AP_CONFIG_TIME 30 /* Time in seconds between AP bus rescans. */
+#define AP_POLL_TIME 1 /* Time in ticks between receive polls. */
+
+extern int ap_domain_index;
+
+extern DECLARE_HASHTABLE(ap_queues, 8);
+extern spinlock_t ap_queues_lock;
+
+static inline int ap_test_bit(unsigned int *ptr, unsigned int nr)
+{
+ return (*ptr & (0x80000000u >> nr)) != 0;
+}
+
+#define AP_RESPONSE_NORMAL 0x00
+#define AP_RESPONSE_Q_NOT_AVAIL 0x01
+#define AP_RESPONSE_RESET_IN_PROGRESS 0x02
+#define AP_RESPONSE_DECONFIGURED 0x03
+#define AP_RESPONSE_CHECKSTOPPED 0x04
+#define AP_RESPONSE_BUSY 0x05
+#define AP_RESPONSE_INVALID_ADDRESS 0x06
+#define AP_RESPONSE_OTHERWISE_CHANGED 0x07
+#define AP_RESPONSE_Q_FULL 0x10
+#define AP_RESPONSE_NO_PENDING_REPLY 0x10
+#define AP_RESPONSE_INDEX_TOO_BIG 0x11
+#define AP_RESPONSE_NO_FIRST_PART 0x13
+#define AP_RESPONSE_MESSAGE_TOO_BIG 0x15
+#define AP_RESPONSE_REQ_FAC_NOT_INST 0x16
+#define AP_RESPONSE_INVALID_DOMAIN 0x42
+
+/*
+ * Known device types
+ */
+#define AP_DEVICE_TYPE_PCICC 3
+#define AP_DEVICE_TYPE_PCICA 4
+#define AP_DEVICE_TYPE_PCIXCC 5
+#define AP_DEVICE_TYPE_CEX2A 6
+#define AP_DEVICE_TYPE_CEX2C 7
+#define AP_DEVICE_TYPE_CEX3A 8
+#define AP_DEVICE_TYPE_CEX3C 9
+#define AP_DEVICE_TYPE_CEX4 10
+#define AP_DEVICE_TYPE_CEX5 11
+#define AP_DEVICE_TYPE_CEX6 12
+#define AP_DEVICE_TYPE_CEX7 13
+
+/*
+ * Known function facilities
+ */
+#define AP_FUNC_MEX4K 1
+#define AP_FUNC_CRT4K 2
+#define AP_FUNC_COPRO 3
+#define AP_FUNC_ACCEL 4
+#define AP_FUNC_EP11 5
+#define AP_FUNC_APXA 6
+
+/*
+ * AP queue state machine states
+ */
+enum ap_sm_state {
+ AP_SM_STATE_RESET_START = 0,
+ AP_SM_STATE_RESET_WAIT,
+ AP_SM_STATE_SETIRQ_WAIT,
+ AP_SM_STATE_IDLE,
+ AP_SM_STATE_WORKING,
+ AP_SM_STATE_QUEUE_FULL,
+ NR_AP_SM_STATES
+};
+
+/*
+ * AP queue state machine events
+ */
+enum ap_sm_event {
+ AP_SM_EVENT_POLL,
+ AP_SM_EVENT_TIMEOUT,
+ NR_AP_SM_EVENTS
+};
+
+/*
+ * AP queue state wait behaviour
+ */
+enum ap_sm_wait {
+ AP_SM_WAIT_AGAIN = 0, /* retry immediately */
+ AP_SM_WAIT_TIMEOUT, /* wait for timeout */
+ AP_SM_WAIT_INTERRUPT, /* wait for thin interrupt (if available) */
+ AP_SM_WAIT_NONE, /* no wait */
+ NR_AP_SM_WAIT
+};
+
+/*
+ * AP queue device states
+ */
+enum ap_dev_state {
+ AP_DEV_STATE_UNINITIATED = 0, /* fresh and virgin, not touched */
+ AP_DEV_STATE_OPERATING, /* queue dev is working normal */
+ AP_DEV_STATE_SHUTDOWN, /* remove/unbind/shutdown in progress */
+ AP_DEV_STATE_ERROR, /* device is in error state */
+ NR_AP_DEV_STATES
+};
+
+struct ap_device;
+struct ap_message;
+
+/*
+ * The ap driver struct includes a flags field which holds some info for
+ * the ap bus about the driver. Currently only one flag is supported and
+ * used: The DEFAULT flag marks an ap driver as a default driver which is
+ * used together with the apmask and aqmask whitelisting of the ap bus.
+ */
+#define AP_DRIVER_FLAG_DEFAULT 0x0001
+
+struct ap_driver {
+ struct device_driver driver;
+ struct ap_device_id *ids;
+ unsigned int flags;
+
+ int (*probe)(struct ap_device *);
+ void (*remove)(struct ap_device *);
+};
+
+#define to_ap_drv(x) container_of((x), struct ap_driver, driver)
+
+int ap_driver_register(struct ap_driver *, struct module *, char *);
+void ap_driver_unregister(struct ap_driver *);
+
+struct ap_device {
+ struct device device;
+ struct ap_driver *drv; /* Pointer to AP device driver. */
+ int device_type; /* AP device type. */
+};
+
+#define to_ap_dev(x) container_of((x), struct ap_device, device)
+
+struct ap_card {
+ struct ap_device ap_dev;
+ void *private; /* ap driver private pointer. */
+ int raw_hwtype; /* AP raw hardware type. */
+ unsigned int functions; /* AP device function bitfield. */
+ int queue_depth; /* AP queue depth.*/
+ int id; /* AP card number. */
+ bool config; /* configured state */
+ atomic64_t total_request_count; /* # requests ever for this AP device.*/
+};
+
+#define to_ap_card(x) container_of((x), struct ap_card, ap_dev.device)
+
+struct ap_queue {
+ struct ap_device ap_dev;
+ struct hlist_node hnode; /* Node for the ap_queues hashtable */
+ struct ap_card *card; /* Ptr to assoc. AP card. */
+ spinlock_t lock; /* Per device lock. */
+ void *private; /* ap driver private pointer. */
+ enum ap_dev_state dev_state; /* queue device state */
+ bool config; /* configured state */
+ ap_qid_t qid; /* AP queue id. */
+ bool interrupt; /* indicate if interrupts are enabled */
+ int queue_count; /* # messages currently on AP queue. */
+ int pendingq_count; /* # requests on pendingq list. */
+ int requestq_count; /* # requests on requestq list. */
+ u64 total_request_count; /* # requests ever for this AP device.*/
+ int request_timeout; /* Request timeout in jiffies. */
+ struct timer_list timeout; /* Timer for request timeouts. */
+ struct list_head pendingq; /* List of message sent to AP queue. */
+ struct list_head requestq; /* List of message yet to be sent. */
+ struct ap_message *reply; /* Per device reply message. */
+ enum ap_sm_state sm_state; /* ap queue state machine state */
+ int last_err_rc; /* last error state response code */
+};
+
+#define to_ap_queue(x) container_of((x), struct ap_queue, ap_dev.device)
+
+typedef enum ap_sm_wait (ap_func_t)(struct ap_queue *queue);
+
+/* failure injection cmd struct */
+struct ap_fi {
+ union {
+ u16 cmd; /* fi flags + action */
+ struct {
+ u8 flags; /* fi flags only */
+ u8 action; /* fi action only */
+ };
+ };
+};
+
+/* all currently known fi actions */
+enum ap_fi_actions {
+ AP_FI_ACTION_CCA_AGENT_FF = 0x01,
+ AP_FI_ACTION_CCA_DOM_INVAL = 0x02,
+ AP_FI_ACTION_NQAP_QID_INVAL = 0x03,
+};
+
+/* all currently known fi flags */
+enum ap_fi_flags {
+ AP_FI_FLAG_NO_RETRY = 0x01,
+ AP_FI_FLAG_TOGGLE_SPECIAL = 0x02,
+};
+
+struct ap_message {
+ struct list_head list; /* Request queueing. */
+ unsigned long long psmid; /* Message id. */
+ void *msg; /* Pointer to message buffer. */
+ unsigned int len; /* Message length. */
+ u16 flags; /* Flags, see AP_MSG_FLAG_xxx */
+ struct ap_fi fi; /* Failure Injection cmd */
+ int rc; /* Return code for this message */
+ void *private; /* ap driver private pointer. */
+ /* receive is called from tasklet context */
+ void (*receive)(struct ap_queue *, struct ap_message *,
+ struct ap_message *);
+};
+
+#define AP_MSG_FLAG_SPECIAL 1 /* flag msg as 'special' with NQAP */
+
+/**
+ * ap_init_message() - Initialize ap_message.
+ * Initialize a message before using. Otherwise this might result in
+ * unexpected behaviour.
+ */
+static inline void ap_init_message(struct ap_message *ap_msg)
+{
+ memset(ap_msg, 0, sizeof(*ap_msg));
+}
+
+/**
+ * ap_release_message() - Release ap_message.
+ * Releases all memory used internal within the ap_message struct
+ * Currently this is the message and private field.
+ */
+static inline void ap_release_message(struct ap_message *ap_msg)
+{
+ kfree_sensitive(ap_msg->msg);
+ kfree_sensitive(ap_msg->private);
+}
+
+/*
+ * Note: don't use ap_send/ap_recv after using ap_queue_message
+ * for the first time. Otherwise the ap message queue will get
+ * confused.
+ */
+int ap_send(ap_qid_t, unsigned long long, void *, size_t);
+int ap_recv(ap_qid_t, unsigned long long *, void *, size_t);
+
+enum ap_sm_wait ap_sm_event(struct ap_queue *aq, enum ap_sm_event event);
+enum ap_sm_wait ap_sm_event_loop(struct ap_queue *aq, enum ap_sm_event event);
+
+int ap_queue_message(struct ap_queue *aq, struct ap_message *ap_msg);
+void ap_cancel_message(struct ap_queue *aq, struct ap_message *ap_msg);
+void ap_flush_queue(struct ap_queue *aq);
+
+void *ap_airq_ptr(void);
+void ap_wait(enum ap_sm_wait wait);
+void ap_request_timeout(struct timer_list *t);
+void ap_bus_force_rescan(void);
+
+int ap_test_config_usage_domain(unsigned int domain);
+int ap_test_config_ctrl_domain(unsigned int domain);
+
+void ap_queue_init_reply(struct ap_queue *aq, struct ap_message *ap_msg);
+struct ap_queue *ap_queue_create(ap_qid_t qid, int device_type);
+void ap_queue_prepare_remove(struct ap_queue *aq);
+void ap_queue_remove(struct ap_queue *aq);
+void ap_queue_init_state(struct ap_queue *aq);
+
+struct ap_card *ap_card_create(int id, int queue_depth, int raw_device_type,
+ int comp_device_type, unsigned int functions);
+
+struct ap_perms {
+ unsigned long ioctlm[BITS_TO_LONGS(AP_IOCTLS)];
+ unsigned long apm[BITS_TO_LONGS(AP_DEVICES)];
+ unsigned long aqm[BITS_TO_LONGS(AP_DOMAINS)];
+};
+extern struct ap_perms ap_perms;
+extern struct mutex ap_perms_mutex;
+
+/*
+ * Get ap_queue device for this qid.
+ * Returns ptr to the struct ap_queue device or NULL if there
+ * was no ap_queue device with this qid found. When something is
+ * found, the reference count of the embedded device is increased.
+ * So the caller has to decrease the reference count after use
+ * with a call to put_device(&aq->ap_dev.device).
+ */
+struct ap_queue *ap_get_qdev(ap_qid_t qid);
+
+/*
+ * check APQN for owned/reserved by ap bus and default driver(s).
+ * Checks if this APQN is or will be in use by the ap bus
+ * and the default set of drivers.
+ * If yes, returns 1, if not returns 0. On error a negative
+ * errno value is returned.
+ */
+int ap_owned_by_def_drv(int card, int queue);
+
+/*
+ * check 'matrix' of APQNs for owned/reserved by ap bus and
+ * default driver(s).
+ * Checks if there is at least one APQN in the given 'matrix'
+ * marked as owned/reserved by the ap bus and default driver(s).
+ * If such an APQN is found the return value is 1, otherwise
+ * 0 is returned. On error a negative errno value is returned.
+ * The parameter apm is a bitmask which should be declared
+ * as DECLARE_BITMAP(apm, AP_DEVICES), the aqm parameter is
+ * similar, should be declared as DECLARE_BITMAP(aqm, AP_DOMAINS).
+ */
+int ap_apqn_in_matrix_owned_by_def_drv(unsigned long *apm,
+ unsigned long *aqm);
+
+/*
+ * ap_parse_mask_str() - helper function to parse a bitmap string
+ * and clear/set the bits in the bitmap accordingly. The string may be
+ * given as absolute value, a hex string like 0x1F2E3D4C5B6A" simple
+ * overwriting the current content of the bitmap. Or as relative string
+ * like "+1-16,-32,-0x40,+128" where only single bits or ranges of
+ * bits are cleared or set. Distinction is done based on the very
+ * first character which may be '+' or '-' for the relative string
+ * and othewise assume to be an absolute value string. If parsing fails
+ * a negative errno value is returned. All arguments and bitmaps are
+ * big endian order.
+ */
+int ap_parse_mask_str(const char *str,
+ unsigned long *bitmap, int bits,
+ struct mutex *lock);
+
+#endif /* _AP_BUS_H_ */
diff --git a/drivers/s390/crypto/ap_card.c b/drivers/s390/crypto/ap_card.c
new file mode 100644
index 000000000..d98bdd28d
--- /dev/null
+++ b/drivers/s390/crypto/ap_card.c
@@ -0,0 +1,225 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright IBM Corp. 2016
+ * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
+ *
+ * Adjunct processor bus, card related code.
+ */
+
+#define KMSG_COMPONENT "ap"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <asm/facility.h>
+#include <asm/sclp.h>
+
+#include "ap_bus.h"
+
+/*
+ * AP card related attributes.
+ */
+static ssize_t hwtype_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct ap_card *ac = to_ap_card(dev);
+
+ return scnprintf(buf, PAGE_SIZE, "%d\n", ac->ap_dev.device_type);
+}
+
+static DEVICE_ATTR_RO(hwtype);
+
+static ssize_t raw_hwtype_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct ap_card *ac = to_ap_card(dev);
+
+ return scnprintf(buf, PAGE_SIZE, "%d\n", ac->raw_hwtype);
+}
+
+static DEVICE_ATTR_RO(raw_hwtype);
+
+static ssize_t depth_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct ap_card *ac = to_ap_card(dev);
+
+ return scnprintf(buf, PAGE_SIZE, "%d\n", ac->queue_depth);
+}
+
+static DEVICE_ATTR_RO(depth);
+
+static ssize_t ap_functions_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct ap_card *ac = to_ap_card(dev);
+
+ return scnprintf(buf, PAGE_SIZE, "0x%08X\n", ac->functions);
+}
+
+static DEVICE_ATTR_RO(ap_functions);
+
+static ssize_t request_count_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct ap_card *ac = to_ap_card(dev);
+ u64 req_cnt;
+
+ req_cnt = 0;
+ spin_lock_bh(&ap_queues_lock);
+ req_cnt = atomic64_read(&ac->total_request_count);
+ spin_unlock_bh(&ap_queues_lock);
+ return scnprintf(buf, PAGE_SIZE, "%llu\n", req_cnt);
+}
+
+static ssize_t request_count_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ int bkt;
+ struct ap_queue *aq;
+ struct ap_card *ac = to_ap_card(dev);
+
+ spin_lock_bh(&ap_queues_lock);
+ hash_for_each(ap_queues, bkt, aq, hnode)
+ if (ac == aq->card)
+ aq->total_request_count = 0;
+ spin_unlock_bh(&ap_queues_lock);
+ atomic64_set(&ac->total_request_count, 0);
+
+ return count;
+}
+
+static DEVICE_ATTR_RW(request_count);
+
+static ssize_t requestq_count_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ int bkt;
+ struct ap_queue *aq;
+ unsigned int reqq_cnt;
+ struct ap_card *ac = to_ap_card(dev);
+
+ reqq_cnt = 0;
+ spin_lock_bh(&ap_queues_lock);
+ hash_for_each(ap_queues, bkt, aq, hnode)
+ if (ac == aq->card)
+ reqq_cnt += aq->requestq_count;
+ spin_unlock_bh(&ap_queues_lock);
+ return scnprintf(buf, PAGE_SIZE, "%d\n", reqq_cnt);
+}
+
+static DEVICE_ATTR_RO(requestq_count);
+
+static ssize_t pendingq_count_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ int bkt;
+ struct ap_queue *aq;
+ unsigned int penq_cnt;
+ struct ap_card *ac = to_ap_card(dev);
+
+ penq_cnt = 0;
+ spin_lock_bh(&ap_queues_lock);
+ hash_for_each(ap_queues, bkt, aq, hnode)
+ if (ac == aq->card)
+ penq_cnt += aq->pendingq_count;
+ spin_unlock_bh(&ap_queues_lock);
+ return scnprintf(buf, PAGE_SIZE, "%d\n", penq_cnt);
+}
+
+static DEVICE_ATTR_RO(pendingq_count);
+
+static ssize_t modalias_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return scnprintf(buf, PAGE_SIZE, "ap:t%02X\n",
+ to_ap_dev(dev)->device_type);
+}
+
+static DEVICE_ATTR_RO(modalias);
+
+static ssize_t config_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct ap_card *ac = to_ap_card(dev);
+
+ return scnprintf(buf, PAGE_SIZE, "%d\n", ac->config ? 1 : 0);
+}
+
+static ssize_t config_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ int rc = 0, cfg;
+ struct ap_card *ac = to_ap_card(dev);
+
+ if (sscanf(buf, "%d\n", &cfg) != 1 || cfg < 0 || cfg > 1)
+ return -EINVAL;
+
+ if (cfg && !ac->config)
+ rc = sclp_ap_configure(ac->id);
+ else if (!cfg && ac->config)
+ rc = sclp_ap_deconfigure(ac->id);
+ if (rc)
+ return rc;
+
+ ac->config = cfg ? true : false;
+
+ return count;
+}
+
+static DEVICE_ATTR_RW(config);
+
+static struct attribute *ap_card_dev_attrs[] = {
+ &dev_attr_hwtype.attr,
+ &dev_attr_raw_hwtype.attr,
+ &dev_attr_depth.attr,
+ &dev_attr_ap_functions.attr,
+ &dev_attr_request_count.attr,
+ &dev_attr_requestq_count.attr,
+ &dev_attr_pendingq_count.attr,
+ &dev_attr_modalias.attr,
+ &dev_attr_config.attr,
+ NULL
+};
+
+static struct attribute_group ap_card_dev_attr_group = {
+ .attrs = ap_card_dev_attrs
+};
+
+static const struct attribute_group *ap_card_dev_attr_groups[] = {
+ &ap_card_dev_attr_group,
+ NULL
+};
+
+static struct device_type ap_card_type = {
+ .name = "ap_card",
+ .groups = ap_card_dev_attr_groups,
+};
+
+static void ap_card_device_release(struct device *dev)
+{
+ struct ap_card *ac = to_ap_card(dev);
+
+ kfree(ac);
+}
+
+struct ap_card *ap_card_create(int id, int queue_depth, int raw_type,
+ int comp_type, unsigned int functions)
+{
+ struct ap_card *ac;
+
+ ac = kzalloc(sizeof(*ac), GFP_KERNEL);
+ if (!ac)
+ return NULL;
+ ac->ap_dev.device.release = ap_card_device_release;
+ ac->ap_dev.device.type = &ap_card_type;
+ ac->ap_dev.device_type = comp_type;
+ ac->raw_hwtype = raw_type;
+ ac->queue_depth = queue_depth;
+ ac->functions = functions;
+ ac->id = id;
+ return ac;
+}
diff --git a/drivers/s390/crypto/ap_debug.h b/drivers/s390/crypto/ap_debug.h
new file mode 100644
index 000000000..34b0350d0
--- /dev/null
+++ b/drivers/s390/crypto/ap_debug.h
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright IBM Corp. 2016
+ * Author(s): Harald Freudenberger <freude@de.ibm.com>
+ */
+#ifndef AP_DEBUG_H
+#define AP_DEBUG_H
+
+#include <asm/debug.h>
+
+#define DBF_ERR 3 /* error conditions */
+#define DBF_WARN 4 /* warning conditions */
+#define DBF_INFO 5 /* informational */
+#define DBF_DEBUG 6 /* for debugging only */
+
+#define RC2ERR(rc) ((rc) ? DBF_ERR : DBF_INFO)
+#define RC2WARN(rc) ((rc) ? DBF_WARN : DBF_INFO)
+
+#define DBF_MAX_SPRINTF_ARGS 5
+
+#define AP_DBF(...) \
+ debug_sprintf_event(ap_dbf_info, ##__VA_ARGS__)
+#define AP_DBF_ERR(...) \
+ debug_sprintf_event(ap_dbf_info, DBF_ERR, ##__VA_ARGS__)
+#define AP_DBF_WARN(...) \
+ debug_sprintf_event(ap_dbf_info, DBF_WARN, ##__VA_ARGS__)
+#define AP_DBF_INFO(...) \
+ debug_sprintf_event(ap_dbf_info, DBF_INFO, ##__VA_ARGS__)
+#define AP_DBF_DBG(...) \
+ debug_sprintf_event(ap_dbf_info, DBF_DEBUG, ##__VA_ARGS__)
+
+extern debug_info_t *ap_dbf_info;
+
+#endif /* AP_DEBUG_H */
diff --git a/drivers/s390/crypto/ap_queue.c b/drivers/s390/crypto/ap_queue.c
new file mode 100644
index 000000000..ff0018f5b
--- /dev/null
+++ b/drivers/s390/crypto/ap_queue.c
@@ -0,0 +1,900 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright IBM Corp. 2016
+ * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
+ *
+ * Adjunct processor bus, queue related code.
+ */
+
+#define KMSG_COMPONENT "ap"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <asm/facility.h>
+
+#include "ap_bus.h"
+#include "ap_debug.h"
+
+static void __ap_flush_queue(struct ap_queue *aq);
+
+/**
+ * ap_queue_enable_irq(): Enable interrupt support on this AP queue.
+ * @qid: The AP queue number
+ * @ind: the notification indicator byte
+ *
+ * Enables interruption on AP queue via ap_aqic(). Based on the return
+ * value it waits a while and tests the AP queue if interrupts
+ * have been switched on using ap_test_queue().
+ */
+static int ap_queue_enable_irq(struct ap_queue *aq, void *ind)
+{
+ struct ap_queue_status status;
+ struct ap_qirq_ctrl qirqctrl = { 0 };
+
+ qirqctrl.ir = 1;
+ qirqctrl.isc = AP_ISC;
+ status = ap_aqic(aq->qid, qirqctrl, ind);
+ switch (status.response_code) {
+ case AP_RESPONSE_NORMAL:
+ case AP_RESPONSE_OTHERWISE_CHANGED:
+ return 0;
+ case AP_RESPONSE_Q_NOT_AVAIL:
+ case AP_RESPONSE_DECONFIGURED:
+ case AP_RESPONSE_CHECKSTOPPED:
+ case AP_RESPONSE_INVALID_ADDRESS:
+ pr_err("Registering adapter interrupts for AP device %02x.%04x failed\n",
+ AP_QID_CARD(aq->qid),
+ AP_QID_QUEUE(aq->qid));
+ return -EOPNOTSUPP;
+ case AP_RESPONSE_RESET_IN_PROGRESS:
+ case AP_RESPONSE_BUSY:
+ default:
+ return -EBUSY;
+ }
+}
+
+/**
+ * __ap_send(): Send message to adjunct processor queue.
+ * @qid: The AP queue number
+ * @psmid: The program supplied message identifier
+ * @msg: The message text
+ * @length: The message length
+ * @special: Special Bit
+ *
+ * Returns AP queue status structure.
+ * Condition code 1 on NQAP can't happen because the L bit is 1.
+ * Condition code 2 on NQAP also means the send is incomplete,
+ * because a segment boundary was reached. The NQAP is repeated.
+ */
+static inline struct ap_queue_status
+__ap_send(ap_qid_t qid, unsigned long long psmid, void *msg, size_t length,
+ int special)
+{
+ if (special)
+ qid |= 0x400000UL;
+ return ap_nqap(qid, psmid, msg, length);
+}
+
+int ap_send(ap_qid_t qid, unsigned long long psmid, void *msg, size_t length)
+{
+ struct ap_queue_status status;
+
+ status = __ap_send(qid, psmid, msg, length, 0);
+ switch (status.response_code) {
+ case AP_RESPONSE_NORMAL:
+ return 0;
+ case AP_RESPONSE_Q_FULL:
+ case AP_RESPONSE_RESET_IN_PROGRESS:
+ return -EBUSY;
+ case AP_RESPONSE_REQ_FAC_NOT_INST:
+ return -EINVAL;
+ default: /* Device is gone. */
+ return -ENODEV;
+ }
+}
+EXPORT_SYMBOL(ap_send);
+
+int ap_recv(ap_qid_t qid, unsigned long long *psmid, void *msg, size_t length)
+{
+ struct ap_queue_status status;
+
+ if (msg == NULL)
+ return -EINVAL;
+ status = ap_dqap(qid, psmid, msg, length);
+ switch (status.response_code) {
+ case AP_RESPONSE_NORMAL:
+ return 0;
+ case AP_RESPONSE_NO_PENDING_REPLY:
+ if (status.queue_empty)
+ return -ENOENT;
+ return -EBUSY;
+ case AP_RESPONSE_RESET_IN_PROGRESS:
+ return -EBUSY;
+ default:
+ return -ENODEV;
+ }
+}
+EXPORT_SYMBOL(ap_recv);
+
+/* State machine definitions and helpers */
+
+static enum ap_sm_wait ap_sm_nop(struct ap_queue *aq)
+{
+ return AP_SM_WAIT_NONE;
+}
+
+/**
+ * ap_sm_recv(): Receive pending reply messages from an AP queue but do
+ * not change the state of the device.
+ * @aq: pointer to the AP queue
+ *
+ * Returns AP_SM_WAIT_NONE, AP_SM_WAIT_AGAIN, or AP_SM_WAIT_INTERRUPT
+ */
+static struct ap_queue_status ap_sm_recv(struct ap_queue *aq)
+{
+ struct ap_queue_status status;
+ struct ap_message *ap_msg;
+ bool found = false;
+
+ status = ap_dqap(aq->qid, &aq->reply->psmid,
+ aq->reply->msg, aq->reply->len);
+ switch (status.response_code) {
+ case AP_RESPONSE_NORMAL:
+ aq->queue_count = max_t(int, 0, aq->queue_count - 1);
+ if (!status.queue_empty && !aq->queue_count)
+ aq->queue_count++;
+ if (aq->queue_count > 0)
+ mod_timer(&aq->timeout,
+ jiffies + aq->request_timeout);
+ list_for_each_entry(ap_msg, &aq->pendingq, list) {
+ if (ap_msg->psmid != aq->reply->psmid)
+ continue;
+ list_del_init(&ap_msg->list);
+ aq->pendingq_count--;
+ ap_msg->receive(aq, ap_msg, aq->reply);
+ found = true;
+ break;
+ }
+ if (!found) {
+ AP_DBF_WARN("%s unassociated reply psmid=0x%016llx on 0x%02x.%04x\n",
+ __func__, aq->reply->psmid,
+ AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
+ }
+ fallthrough;
+ case AP_RESPONSE_NO_PENDING_REPLY:
+ if (!status.queue_empty || aq->queue_count <= 0)
+ break;
+ /* The card shouldn't forget requests but who knows. */
+ aq->queue_count = 0;
+ list_splice_init(&aq->pendingq, &aq->requestq);
+ aq->requestq_count += aq->pendingq_count;
+ aq->pendingq_count = 0;
+ break;
+ default:
+ break;
+ }
+ return status;
+}
+
+/**
+ * ap_sm_read(): Receive pending reply messages from an AP queue.
+ * @aq: pointer to the AP queue
+ *
+ * Returns AP_SM_WAIT_NONE, AP_SM_WAIT_AGAIN, or AP_SM_WAIT_INTERRUPT
+ */
+static enum ap_sm_wait ap_sm_read(struct ap_queue *aq)
+{
+ struct ap_queue_status status;
+
+ if (!aq->reply)
+ return AP_SM_WAIT_NONE;
+ status = ap_sm_recv(aq);
+ switch (status.response_code) {
+ case AP_RESPONSE_NORMAL:
+ if (aq->queue_count > 0) {
+ aq->sm_state = AP_SM_STATE_WORKING;
+ return AP_SM_WAIT_AGAIN;
+ }
+ aq->sm_state = AP_SM_STATE_IDLE;
+ return AP_SM_WAIT_NONE;
+ case AP_RESPONSE_NO_PENDING_REPLY:
+ if (aq->queue_count > 0)
+ return aq->interrupt ?
+ AP_SM_WAIT_INTERRUPT : AP_SM_WAIT_TIMEOUT;
+ aq->sm_state = AP_SM_STATE_IDLE;
+ return AP_SM_WAIT_NONE;
+ default:
+ aq->dev_state = AP_DEV_STATE_ERROR;
+ aq->last_err_rc = status.response_code;
+ AP_DBF_WARN("%s RC 0x%02x on 0x%02x.%04x -> AP_DEV_STATE_ERROR\n",
+ __func__, status.response_code,
+ AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
+ return AP_SM_WAIT_NONE;
+ }
+}
+
+/**
+ * ap_sm_write(): Send messages from the request queue to an AP queue.
+ * @aq: pointer to the AP queue
+ *
+ * Returns AP_SM_WAIT_NONE, AP_SM_WAIT_AGAIN, or AP_SM_WAIT_INTERRUPT
+ */
+static enum ap_sm_wait ap_sm_write(struct ap_queue *aq)
+{
+ struct ap_queue_status status;
+ struct ap_message *ap_msg;
+ ap_qid_t qid = aq->qid;
+
+ if (aq->requestq_count <= 0)
+ return AP_SM_WAIT_NONE;
+ /* Start the next request on the queue. */
+ ap_msg = list_entry(aq->requestq.next, struct ap_message, list);
+#ifdef CONFIG_ZCRYPT_DEBUG
+ if (ap_msg->fi.action == AP_FI_ACTION_NQAP_QID_INVAL) {
+ AP_DBF_WARN("%s fi cmd 0x%04x: forcing invalid qid 0xFF00\n",
+ __func__, ap_msg->fi.cmd);
+ qid = 0xFF00;
+ }
+#endif
+ status = __ap_send(qid, ap_msg->psmid,
+ ap_msg->msg, ap_msg->len,
+ ap_msg->flags & AP_MSG_FLAG_SPECIAL);
+ switch (status.response_code) {
+ case AP_RESPONSE_NORMAL:
+ aq->queue_count = max_t(int, 1, aq->queue_count + 1);
+ if (aq->queue_count == 1)
+ mod_timer(&aq->timeout, jiffies + aq->request_timeout);
+ list_move_tail(&ap_msg->list, &aq->pendingq);
+ aq->requestq_count--;
+ aq->pendingq_count++;
+ if (aq->queue_count < aq->card->queue_depth) {
+ aq->sm_state = AP_SM_STATE_WORKING;
+ return AP_SM_WAIT_AGAIN;
+ }
+ fallthrough;
+ case AP_RESPONSE_Q_FULL:
+ aq->sm_state = AP_SM_STATE_QUEUE_FULL;
+ return aq->interrupt ?
+ AP_SM_WAIT_INTERRUPT : AP_SM_WAIT_TIMEOUT;
+ case AP_RESPONSE_RESET_IN_PROGRESS:
+ aq->sm_state = AP_SM_STATE_RESET_WAIT;
+ return AP_SM_WAIT_TIMEOUT;
+ case AP_RESPONSE_INVALID_DOMAIN:
+ AP_DBF(DBF_WARN, "AP_RESPONSE_INVALID_DOMAIN on NQAP\n");
+ fallthrough;
+ case AP_RESPONSE_MESSAGE_TOO_BIG:
+ case AP_RESPONSE_REQ_FAC_NOT_INST:
+ list_del_init(&ap_msg->list);
+ aq->requestq_count--;
+ ap_msg->rc = -EINVAL;
+ ap_msg->receive(aq, ap_msg, NULL);
+ return AP_SM_WAIT_AGAIN;
+ default:
+ aq->dev_state = AP_DEV_STATE_ERROR;
+ aq->last_err_rc = status.response_code;
+ AP_DBF_WARN("%s RC 0x%02x on 0x%02x.%04x -> AP_DEV_STATE_ERROR\n",
+ __func__, status.response_code,
+ AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
+ return AP_SM_WAIT_NONE;
+ }
+}
+
+/**
+ * ap_sm_read_write(): Send and receive messages to/from an AP queue.
+ * @aq: pointer to the AP queue
+ *
+ * Returns AP_SM_WAIT_NONE, AP_SM_WAIT_AGAIN, or AP_SM_WAIT_INTERRUPT
+ */
+static enum ap_sm_wait ap_sm_read_write(struct ap_queue *aq)
+{
+ return min(ap_sm_read(aq), ap_sm_write(aq));
+}
+
+/**
+ * ap_sm_reset(): Reset an AP queue.
+ * @qid: The AP queue number
+ *
+ * Submit the Reset command to an AP queue.
+ */
+static enum ap_sm_wait ap_sm_reset(struct ap_queue *aq)
+{
+ struct ap_queue_status status;
+
+ status = ap_rapq(aq->qid);
+ switch (status.response_code) {
+ case AP_RESPONSE_NORMAL:
+ case AP_RESPONSE_RESET_IN_PROGRESS:
+ aq->sm_state = AP_SM_STATE_RESET_WAIT;
+ aq->interrupt = false;
+ return AP_SM_WAIT_TIMEOUT;
+ default:
+ aq->dev_state = AP_DEV_STATE_ERROR;
+ aq->last_err_rc = status.response_code;
+ AP_DBF_WARN("%s RC 0x%02x on 0x%02x.%04x -> AP_DEV_STATE_ERROR\n",
+ __func__, status.response_code,
+ AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
+ return AP_SM_WAIT_NONE;
+ }
+}
+
+/**
+ * ap_sm_reset_wait(): Test queue for completion of the reset operation
+ * @aq: pointer to the AP queue
+ *
+ * Returns AP_POLL_IMMEDIATELY, AP_POLL_AFTER_TIMEROUT or 0.
+ */
+static enum ap_sm_wait ap_sm_reset_wait(struct ap_queue *aq)
+{
+ struct ap_queue_status status;
+ void *lsi_ptr;
+
+ if (aq->queue_count > 0 && aq->reply)
+ /* Try to read a completed message and get the status */
+ status = ap_sm_recv(aq);
+ else
+ /* Get the status with TAPQ */
+ status = ap_tapq(aq->qid, NULL);
+
+ switch (status.response_code) {
+ case AP_RESPONSE_NORMAL:
+ lsi_ptr = ap_airq_ptr();
+ if (lsi_ptr && ap_queue_enable_irq(aq, lsi_ptr) == 0)
+ aq->sm_state = AP_SM_STATE_SETIRQ_WAIT;
+ else
+ aq->sm_state = (aq->queue_count > 0) ?
+ AP_SM_STATE_WORKING : AP_SM_STATE_IDLE;
+ return AP_SM_WAIT_AGAIN;
+ case AP_RESPONSE_BUSY:
+ case AP_RESPONSE_RESET_IN_PROGRESS:
+ return AP_SM_WAIT_TIMEOUT;
+ case AP_RESPONSE_Q_NOT_AVAIL:
+ case AP_RESPONSE_DECONFIGURED:
+ case AP_RESPONSE_CHECKSTOPPED:
+ default:
+ aq->dev_state = AP_DEV_STATE_ERROR;
+ aq->last_err_rc = status.response_code;
+ AP_DBF_WARN("%s RC 0x%02x on 0x%02x.%04x -> AP_DEV_STATE_ERROR\n",
+ __func__, status.response_code,
+ AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
+ return AP_SM_WAIT_NONE;
+ }
+}
+
+/**
+ * ap_sm_setirq_wait(): Test queue for completion of the irq enablement
+ * @aq: pointer to the AP queue
+ *
+ * Returns AP_POLL_IMMEDIATELY, AP_POLL_AFTER_TIMEROUT or 0.
+ */
+static enum ap_sm_wait ap_sm_setirq_wait(struct ap_queue *aq)
+{
+ struct ap_queue_status status;
+
+ if (aq->queue_count > 0 && aq->reply)
+ /* Try to read a completed message and get the status */
+ status = ap_sm_recv(aq);
+ else
+ /* Get the status with TAPQ */
+ status = ap_tapq(aq->qid, NULL);
+
+ if (status.irq_enabled == 1) {
+ /* Irqs are now enabled */
+ aq->interrupt = true;
+ aq->sm_state = (aq->queue_count > 0) ?
+ AP_SM_STATE_WORKING : AP_SM_STATE_IDLE;
+ }
+
+ switch (status.response_code) {
+ case AP_RESPONSE_NORMAL:
+ if (aq->queue_count > 0)
+ return AP_SM_WAIT_AGAIN;
+ fallthrough;
+ case AP_RESPONSE_NO_PENDING_REPLY:
+ return AP_SM_WAIT_TIMEOUT;
+ default:
+ aq->dev_state = AP_DEV_STATE_ERROR;
+ aq->last_err_rc = status.response_code;
+ AP_DBF_WARN("%s RC 0x%02x on 0x%02x.%04x -> AP_DEV_STATE_ERROR\n",
+ __func__, status.response_code,
+ AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
+ return AP_SM_WAIT_NONE;
+ }
+}
+
+/*
+ * AP state machine jump table
+ */
+static ap_func_t *ap_jumptable[NR_AP_SM_STATES][NR_AP_SM_EVENTS] = {
+ [AP_SM_STATE_RESET_START] = {
+ [AP_SM_EVENT_POLL] = ap_sm_reset,
+ [AP_SM_EVENT_TIMEOUT] = ap_sm_nop,
+ },
+ [AP_SM_STATE_RESET_WAIT] = {
+ [AP_SM_EVENT_POLL] = ap_sm_reset_wait,
+ [AP_SM_EVENT_TIMEOUT] = ap_sm_nop,
+ },
+ [AP_SM_STATE_SETIRQ_WAIT] = {
+ [AP_SM_EVENT_POLL] = ap_sm_setirq_wait,
+ [AP_SM_EVENT_TIMEOUT] = ap_sm_nop,
+ },
+ [AP_SM_STATE_IDLE] = {
+ [AP_SM_EVENT_POLL] = ap_sm_write,
+ [AP_SM_EVENT_TIMEOUT] = ap_sm_nop,
+ },
+ [AP_SM_STATE_WORKING] = {
+ [AP_SM_EVENT_POLL] = ap_sm_read_write,
+ [AP_SM_EVENT_TIMEOUT] = ap_sm_reset,
+ },
+ [AP_SM_STATE_QUEUE_FULL] = {
+ [AP_SM_EVENT_POLL] = ap_sm_read,
+ [AP_SM_EVENT_TIMEOUT] = ap_sm_reset,
+ },
+};
+
+enum ap_sm_wait ap_sm_event(struct ap_queue *aq, enum ap_sm_event event)
+{
+ if (aq->dev_state > AP_DEV_STATE_UNINITIATED)
+ return ap_jumptable[aq->sm_state][event](aq);
+ else
+ return AP_SM_WAIT_NONE;
+}
+
+enum ap_sm_wait ap_sm_event_loop(struct ap_queue *aq, enum ap_sm_event event)
+{
+ enum ap_sm_wait wait;
+
+ while ((wait = ap_sm_event(aq, event)) == AP_SM_WAIT_AGAIN)
+ ;
+ return wait;
+}
+
+/*
+ * AP queue related attributes.
+ */
+static ssize_t request_count_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct ap_queue *aq = to_ap_queue(dev);
+ bool valid = false;
+ u64 req_cnt;
+
+ spin_lock_bh(&aq->lock);
+ if (aq->dev_state > AP_DEV_STATE_UNINITIATED) {
+ req_cnt = aq->total_request_count;
+ valid = true;
+ }
+ spin_unlock_bh(&aq->lock);
+
+ if (valid)
+ return scnprintf(buf, PAGE_SIZE, "%llu\n", req_cnt);
+ else
+ return scnprintf(buf, PAGE_SIZE, "-\n");
+}
+
+static ssize_t request_count_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct ap_queue *aq = to_ap_queue(dev);
+
+ spin_lock_bh(&aq->lock);
+ aq->total_request_count = 0;
+ spin_unlock_bh(&aq->lock);
+
+ return count;
+}
+
+static DEVICE_ATTR_RW(request_count);
+
+static ssize_t requestq_count_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct ap_queue *aq = to_ap_queue(dev);
+ unsigned int reqq_cnt = 0;
+
+ spin_lock_bh(&aq->lock);
+ if (aq->dev_state > AP_DEV_STATE_UNINITIATED)
+ reqq_cnt = aq->requestq_count;
+ spin_unlock_bh(&aq->lock);
+ return scnprintf(buf, PAGE_SIZE, "%d\n", reqq_cnt);
+}
+
+static DEVICE_ATTR_RO(requestq_count);
+
+static ssize_t pendingq_count_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct ap_queue *aq = to_ap_queue(dev);
+ unsigned int penq_cnt = 0;
+
+ spin_lock_bh(&aq->lock);
+ if (aq->dev_state > AP_DEV_STATE_UNINITIATED)
+ penq_cnt = aq->pendingq_count;
+ spin_unlock_bh(&aq->lock);
+ return scnprintf(buf, PAGE_SIZE, "%d\n", penq_cnt);
+}
+
+static DEVICE_ATTR_RO(pendingq_count);
+
+static ssize_t reset_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct ap_queue *aq = to_ap_queue(dev);
+ int rc = 0;
+
+ spin_lock_bh(&aq->lock);
+ switch (aq->sm_state) {
+ case AP_SM_STATE_RESET_START:
+ case AP_SM_STATE_RESET_WAIT:
+ rc = scnprintf(buf, PAGE_SIZE, "Reset in progress.\n");
+ break;
+ case AP_SM_STATE_WORKING:
+ case AP_SM_STATE_QUEUE_FULL:
+ rc = scnprintf(buf, PAGE_SIZE, "Reset Timer armed.\n");
+ break;
+ default:
+ rc = scnprintf(buf, PAGE_SIZE, "No Reset Timer set.\n");
+ }
+ spin_unlock_bh(&aq->lock);
+ return rc;
+}
+
+static ssize_t reset_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct ap_queue *aq = to_ap_queue(dev);
+
+ spin_lock_bh(&aq->lock);
+ __ap_flush_queue(aq);
+ aq->sm_state = AP_SM_STATE_RESET_START;
+ ap_wait(ap_sm_event(aq, AP_SM_EVENT_POLL));
+ spin_unlock_bh(&aq->lock);
+
+ AP_DBF(DBF_INFO, "reset queue=%02x.%04x triggered by user\n",
+ AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
+
+ return count;
+}
+
+static DEVICE_ATTR_RW(reset);
+
+static ssize_t interrupt_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct ap_queue *aq = to_ap_queue(dev);
+ int rc = 0;
+
+ spin_lock_bh(&aq->lock);
+ if (aq->sm_state == AP_SM_STATE_SETIRQ_WAIT)
+ rc = scnprintf(buf, PAGE_SIZE, "Enable Interrupt pending.\n");
+ else if (aq->interrupt)
+ rc = scnprintf(buf, PAGE_SIZE, "Interrupts enabled.\n");
+ else
+ rc = scnprintf(buf, PAGE_SIZE, "Interrupts disabled.\n");
+ spin_unlock_bh(&aq->lock);
+ return rc;
+}
+
+static DEVICE_ATTR_RO(interrupt);
+
+static ssize_t config_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct ap_queue *aq = to_ap_queue(dev);
+ int rc;
+
+ spin_lock_bh(&aq->lock);
+ rc = scnprintf(buf, PAGE_SIZE, "%d\n", aq->config ? 1 : 0);
+ spin_unlock_bh(&aq->lock);
+ return rc;
+}
+
+static DEVICE_ATTR_RO(config);
+
+#ifdef CONFIG_ZCRYPT_DEBUG
+static ssize_t states_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct ap_queue *aq = to_ap_queue(dev);
+ int rc = 0;
+
+ spin_lock_bh(&aq->lock);
+ /* queue device state */
+ switch (aq->dev_state) {
+ case AP_DEV_STATE_UNINITIATED:
+ rc = scnprintf(buf, PAGE_SIZE, "UNINITIATED\n");
+ break;
+ case AP_DEV_STATE_OPERATING:
+ rc = scnprintf(buf, PAGE_SIZE, "OPERATING");
+ break;
+ case AP_DEV_STATE_SHUTDOWN:
+ rc = scnprintf(buf, PAGE_SIZE, "SHUTDOWN");
+ break;
+ case AP_DEV_STATE_ERROR:
+ rc = scnprintf(buf, PAGE_SIZE, "ERROR");
+ break;
+ default:
+ rc = scnprintf(buf, PAGE_SIZE, "UNKNOWN");
+ }
+ /* state machine state */
+ if (aq->dev_state) {
+ switch (aq->sm_state) {
+ case AP_SM_STATE_RESET_START:
+ rc += scnprintf(buf + rc, PAGE_SIZE - rc,
+ " [RESET_START]\n");
+ break;
+ case AP_SM_STATE_RESET_WAIT:
+ rc += scnprintf(buf + rc, PAGE_SIZE - rc,
+ " [RESET_WAIT]\n");
+ break;
+ case AP_SM_STATE_SETIRQ_WAIT:
+ rc += scnprintf(buf + rc, PAGE_SIZE - rc,
+ " [SETIRQ_WAIT]\n");
+ break;
+ case AP_SM_STATE_IDLE:
+ rc += scnprintf(buf + rc, PAGE_SIZE - rc,
+ " [IDLE]\n");
+ break;
+ case AP_SM_STATE_WORKING:
+ rc += scnprintf(buf + rc, PAGE_SIZE - rc,
+ " [WORKING]\n");
+ break;
+ case AP_SM_STATE_QUEUE_FULL:
+ rc += scnprintf(buf + rc, PAGE_SIZE - rc,
+ " [FULL]\n");
+ break;
+ default:
+ rc += scnprintf(buf + rc, PAGE_SIZE - rc,
+ " [UNKNOWN]\n");
+ }
+ }
+ spin_unlock_bh(&aq->lock);
+
+ return rc;
+}
+static DEVICE_ATTR_RO(states);
+
+static ssize_t last_err_rc_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct ap_queue *aq = to_ap_queue(dev);
+ int rc;
+
+ spin_lock_bh(&aq->lock);
+ rc = aq->last_err_rc;
+ spin_unlock_bh(&aq->lock);
+
+ switch (rc) {
+ case AP_RESPONSE_NORMAL:
+ return scnprintf(buf, PAGE_SIZE, "NORMAL\n");
+ case AP_RESPONSE_Q_NOT_AVAIL:
+ return scnprintf(buf, PAGE_SIZE, "Q_NOT_AVAIL\n");
+ case AP_RESPONSE_RESET_IN_PROGRESS:
+ return scnprintf(buf, PAGE_SIZE, "RESET_IN_PROGRESS\n");
+ case AP_RESPONSE_DECONFIGURED:
+ return scnprintf(buf, PAGE_SIZE, "DECONFIGURED\n");
+ case AP_RESPONSE_CHECKSTOPPED:
+ return scnprintf(buf, PAGE_SIZE, "CHECKSTOPPED\n");
+ case AP_RESPONSE_BUSY:
+ return scnprintf(buf, PAGE_SIZE, "BUSY\n");
+ case AP_RESPONSE_INVALID_ADDRESS:
+ return scnprintf(buf, PAGE_SIZE, "INVALID_ADDRESS\n");
+ case AP_RESPONSE_OTHERWISE_CHANGED:
+ return scnprintf(buf, PAGE_SIZE, "OTHERWISE_CHANGED\n");
+ case AP_RESPONSE_Q_FULL:
+ return scnprintf(buf, PAGE_SIZE, "Q_FULL/NO_PENDING_REPLY\n");
+ case AP_RESPONSE_INDEX_TOO_BIG:
+ return scnprintf(buf, PAGE_SIZE, "INDEX_TOO_BIG\n");
+ case AP_RESPONSE_NO_FIRST_PART:
+ return scnprintf(buf, PAGE_SIZE, "NO_FIRST_PART\n");
+ case AP_RESPONSE_MESSAGE_TOO_BIG:
+ return scnprintf(buf, PAGE_SIZE, "MESSAGE_TOO_BIG\n");
+ case AP_RESPONSE_REQ_FAC_NOT_INST:
+ return scnprintf(buf, PAGE_SIZE, "REQ_FAC_NOT_INST\n");
+ default:
+ return scnprintf(buf, PAGE_SIZE, "response code %d\n", rc);
+ }
+}
+static DEVICE_ATTR_RO(last_err_rc);
+#endif
+
+static struct attribute *ap_queue_dev_attrs[] = {
+ &dev_attr_request_count.attr,
+ &dev_attr_requestq_count.attr,
+ &dev_attr_pendingq_count.attr,
+ &dev_attr_reset.attr,
+ &dev_attr_interrupt.attr,
+ &dev_attr_config.attr,
+#ifdef CONFIG_ZCRYPT_DEBUG
+ &dev_attr_states.attr,
+ &dev_attr_last_err_rc.attr,
+#endif
+ NULL
+};
+
+static struct attribute_group ap_queue_dev_attr_group = {
+ .attrs = ap_queue_dev_attrs
+};
+
+static const struct attribute_group *ap_queue_dev_attr_groups[] = {
+ &ap_queue_dev_attr_group,
+ NULL
+};
+
+static struct device_type ap_queue_type = {
+ .name = "ap_queue",
+ .groups = ap_queue_dev_attr_groups,
+};
+
+static void ap_queue_device_release(struct device *dev)
+{
+ struct ap_queue *aq = to_ap_queue(dev);
+
+ spin_lock_bh(&ap_queues_lock);
+ hash_del(&aq->hnode);
+ spin_unlock_bh(&ap_queues_lock);
+
+ kfree(aq);
+}
+
+struct ap_queue *ap_queue_create(ap_qid_t qid, int device_type)
+{
+ struct ap_queue *aq;
+
+ aq = kzalloc(sizeof(*aq), GFP_KERNEL);
+ if (!aq)
+ return NULL;
+ aq->ap_dev.device.release = ap_queue_device_release;
+ aq->ap_dev.device.type = &ap_queue_type;
+ aq->ap_dev.device_type = device_type;
+ aq->qid = qid;
+ aq->interrupt = false;
+ spin_lock_init(&aq->lock);
+ INIT_LIST_HEAD(&aq->pendingq);
+ INIT_LIST_HEAD(&aq->requestq);
+ timer_setup(&aq->timeout, ap_request_timeout, 0);
+
+ return aq;
+}
+
+void ap_queue_init_reply(struct ap_queue *aq, struct ap_message *reply)
+{
+ aq->reply = reply;
+
+ spin_lock_bh(&aq->lock);
+ ap_wait(ap_sm_event(aq, AP_SM_EVENT_POLL));
+ spin_unlock_bh(&aq->lock);
+}
+EXPORT_SYMBOL(ap_queue_init_reply);
+
+/**
+ * ap_queue_message(): Queue a request to an AP device.
+ * @aq: The AP device to queue the message to
+ * @ap_msg: The message that is to be added
+ */
+int ap_queue_message(struct ap_queue *aq, struct ap_message *ap_msg)
+{
+ int rc = 0;
+
+ /* msg needs to have a valid receive-callback */
+ BUG_ON(!ap_msg->receive);
+
+ spin_lock_bh(&aq->lock);
+
+ /* only allow to queue new messages if device state is ok */
+ if (aq->dev_state == AP_DEV_STATE_OPERATING) {
+ list_add_tail(&ap_msg->list, &aq->requestq);
+ aq->requestq_count++;
+ aq->total_request_count++;
+ atomic64_inc(&aq->card->total_request_count);
+ } else
+ rc = -ENODEV;
+
+ /* Send/receive as many request from the queue as possible. */
+ ap_wait(ap_sm_event_loop(aq, AP_SM_EVENT_POLL));
+
+ spin_unlock_bh(&aq->lock);
+
+ return rc;
+}
+EXPORT_SYMBOL(ap_queue_message);
+
+/**
+ * ap_cancel_message(): Cancel a crypto request.
+ * @aq: The AP device that has the message queued
+ * @ap_msg: The message that is to be removed
+ *
+ * Cancel a crypto request. This is done by removing the request
+ * from the device pending or request queue. Note that the
+ * request stays on the AP queue. When it finishes the message
+ * reply will be discarded because the psmid can't be found.
+ */
+void ap_cancel_message(struct ap_queue *aq, struct ap_message *ap_msg)
+{
+ struct ap_message *tmp;
+
+ spin_lock_bh(&aq->lock);
+ if (!list_empty(&ap_msg->list)) {
+ list_for_each_entry(tmp, &aq->pendingq, list)
+ if (tmp->psmid == ap_msg->psmid) {
+ aq->pendingq_count--;
+ goto found;
+ }
+ aq->requestq_count--;
+found:
+ list_del_init(&ap_msg->list);
+ }
+ spin_unlock_bh(&aq->lock);
+}
+EXPORT_SYMBOL(ap_cancel_message);
+
+/**
+ * __ap_flush_queue(): Flush requests.
+ * @aq: Pointer to the AP queue
+ *
+ * Flush all requests from the request/pending queue of an AP device.
+ */
+static void __ap_flush_queue(struct ap_queue *aq)
+{
+ struct ap_message *ap_msg, *next;
+
+ list_for_each_entry_safe(ap_msg, next, &aq->pendingq, list) {
+ list_del_init(&ap_msg->list);
+ aq->pendingq_count--;
+ ap_msg->rc = -EAGAIN;
+ ap_msg->receive(aq, ap_msg, NULL);
+ }
+ list_for_each_entry_safe(ap_msg, next, &aq->requestq, list) {
+ list_del_init(&ap_msg->list);
+ aq->requestq_count--;
+ ap_msg->rc = -EAGAIN;
+ ap_msg->receive(aq, ap_msg, NULL);
+ }
+ aq->queue_count = 0;
+}
+
+void ap_flush_queue(struct ap_queue *aq)
+{
+ spin_lock_bh(&aq->lock);
+ __ap_flush_queue(aq);
+ spin_unlock_bh(&aq->lock);
+}
+EXPORT_SYMBOL(ap_flush_queue);
+
+void ap_queue_prepare_remove(struct ap_queue *aq)
+{
+ spin_lock_bh(&aq->lock);
+ /* flush queue */
+ __ap_flush_queue(aq);
+ /* move queue device state to SHUTDOWN in progress */
+ aq->dev_state = AP_DEV_STATE_SHUTDOWN;
+ spin_unlock_bh(&aq->lock);
+ del_timer_sync(&aq->timeout);
+}
+
+void ap_queue_remove(struct ap_queue *aq)
+{
+ /*
+ * all messages have been flushed and the device state
+ * is SHUTDOWN. Now reset with zero which also clears
+ * the irq registration and move the device state
+ * to the initial value AP_DEV_STATE_UNINITIATED.
+ */
+ spin_lock_bh(&aq->lock);
+ ap_zapq(aq->qid);
+ aq->dev_state = AP_DEV_STATE_UNINITIATED;
+ spin_unlock_bh(&aq->lock);
+}
+
+void ap_queue_init_state(struct ap_queue *aq)
+{
+ spin_lock_bh(&aq->lock);
+ aq->dev_state = AP_DEV_STATE_OPERATING;
+ aq->sm_state = AP_SM_STATE_RESET_START;
+ ap_wait(ap_sm_event(aq, AP_SM_EVENT_POLL));
+ spin_unlock_bh(&aq->lock);
+}
+EXPORT_SYMBOL(ap_queue_init_state);
diff --git a/drivers/s390/crypto/pkey_api.c b/drivers/s390/crypto/pkey_api.c
new file mode 100644
index 000000000..69882ff4d
--- /dev/null
+++ b/drivers/s390/crypto/pkey_api.c
@@ -0,0 +1,2100 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * pkey device driver
+ *
+ * Copyright IBM Corp. 2017,2019
+ * Author(s): Harald Freudenberger
+ */
+
+#define KMSG_COMPONENT "pkey"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <linux/fs.h>
+#include <linux/init.h>
+#include <linux/miscdevice.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/kallsyms.h>
+#include <linux/debugfs.h>
+#include <linux/random.h>
+#include <linux/cpufeature.h>
+#include <asm/zcrypt.h>
+#include <asm/cpacf.h>
+#include <asm/pkey.h>
+#include <crypto/aes.h>
+
+#include "zcrypt_api.h"
+#include "zcrypt_ccamisc.h"
+#include "zcrypt_ep11misc.h"
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("IBM Corporation");
+MODULE_DESCRIPTION("s390 protected key interface");
+
+#define KEYBLOBBUFSIZE 8192 /* key buffer size used for internal processing */
+#define PROTKEYBLOBBUFSIZE 256 /* protected key buffer size used internal */
+#define MAXAPQNSINLIST 64 /* max 64 apqns within a apqn list */
+
+/*
+ * debug feature data and functions
+ */
+
+static debug_info_t *debug_info;
+
+#define DEBUG_DBG(...) debug_sprintf_event(debug_info, 6, ##__VA_ARGS__)
+#define DEBUG_INFO(...) debug_sprintf_event(debug_info, 5, ##__VA_ARGS__)
+#define DEBUG_WARN(...) debug_sprintf_event(debug_info, 4, ##__VA_ARGS__)
+#define DEBUG_ERR(...) debug_sprintf_event(debug_info, 3, ##__VA_ARGS__)
+
+static void __init pkey_debug_init(void)
+{
+ /* 5 arguments per dbf entry (including the format string ptr) */
+ debug_info = debug_register("pkey", 1, 1, 5 * sizeof(long));
+ debug_register_view(debug_info, &debug_sprintf_view);
+ debug_set_level(debug_info, 3);
+}
+
+static void __exit pkey_debug_exit(void)
+{
+ debug_unregister(debug_info);
+}
+
+/* inside view of a protected key token (only type 0x00 version 0x01) */
+struct protaeskeytoken {
+ u8 type; /* 0x00 for PAES specific key tokens */
+ u8 res0[3];
+ u8 version; /* should be 0x01 for protected AES key token */
+ u8 res1[3];
+ u32 keytype; /* key type, one of the PKEY_KEYTYPE values */
+ u32 len; /* bytes actually stored in protkey[] */
+ u8 protkey[MAXPROTKEYSIZE]; /* the protected key blob */
+} __packed;
+
+/* inside view of a clear key token (type 0x00 version 0x02) */
+struct clearaeskeytoken {
+ u8 type; /* 0x00 for PAES specific key tokens */
+ u8 res0[3];
+ u8 version; /* 0x02 for clear AES key token */
+ u8 res1[3];
+ u32 keytype; /* key type, one of the PKEY_KEYTYPE values */
+ u32 len; /* bytes actually stored in clearkey[] */
+ u8 clearkey[]; /* clear key value */
+} __packed;
+
+/*
+ * Create a protected key from a clear key value.
+ */
+static int pkey_clr2protkey(u32 keytype,
+ const struct pkey_clrkey *clrkey,
+ struct pkey_protkey *protkey)
+{
+ /* mask of available pckmo subfunctions */
+ static cpacf_mask_t pckmo_functions;
+
+ long fc;
+ int keysize;
+ u8 paramblock[64];
+
+ switch (keytype) {
+ case PKEY_KEYTYPE_AES_128:
+ keysize = 16;
+ fc = CPACF_PCKMO_ENC_AES_128_KEY;
+ break;
+ case PKEY_KEYTYPE_AES_192:
+ keysize = 24;
+ fc = CPACF_PCKMO_ENC_AES_192_KEY;
+ break;
+ case PKEY_KEYTYPE_AES_256:
+ keysize = 32;
+ fc = CPACF_PCKMO_ENC_AES_256_KEY;
+ break;
+ default:
+ DEBUG_ERR("%s unknown/unsupported keytype %d\n",
+ __func__, keytype);
+ return -EINVAL;
+ }
+
+ /* Did we already check for PCKMO ? */
+ if (!pckmo_functions.bytes[0]) {
+ /* no, so check now */
+ if (!cpacf_query(CPACF_PCKMO, &pckmo_functions))
+ return -ENODEV;
+ }
+ /* check for the pckmo subfunction we need now */
+ if (!cpacf_test_func(&pckmo_functions, fc)) {
+ DEBUG_ERR("%s pckmo functions not available\n", __func__);
+ return -ENODEV;
+ }
+
+ /* prepare param block */
+ memset(paramblock, 0, sizeof(paramblock));
+ memcpy(paramblock, clrkey->clrkey, keysize);
+
+ /* call the pckmo instruction */
+ cpacf_pckmo(fc, paramblock);
+
+ /* copy created protected key */
+ protkey->type = keytype;
+ protkey->len = keysize + 32;
+ memcpy(protkey->protkey, paramblock, keysize + 32);
+
+ return 0;
+}
+
+/*
+ * Find card and transform secure key into protected key.
+ */
+static int pkey_skey2pkey(const u8 *key, struct pkey_protkey *pkey)
+{
+ int rc, verify;
+ u16 cardnr, domain;
+ struct keytoken_header *hdr = (struct keytoken_header *)key;
+
+ /*
+ * The cca_xxx2protkey call may fail when a card has been
+ * addressed where the master key was changed after last fetch
+ * of the mkvp into the cache. Try 3 times: First witout verify
+ * then with verify and last round with verify and old master
+ * key verification pattern match not ignored.
+ */
+ for (verify = 0; verify < 3; verify++) {
+ rc = cca_findcard(key, &cardnr, &domain, verify);
+ if (rc < 0)
+ continue;
+ if (rc > 0 && verify < 2)
+ continue;
+ switch (hdr->version) {
+ case TOKVER_CCA_AES:
+ rc = cca_sec2protkey(cardnr, domain,
+ key, pkey->protkey,
+ &pkey->len, &pkey->type);
+ break;
+ case TOKVER_CCA_VLSC:
+ rc = cca_cipher2protkey(cardnr, domain,
+ key, pkey->protkey,
+ &pkey->len, &pkey->type);
+ break;
+ default:
+ return -EINVAL;
+ }
+ if (rc == 0)
+ break;
+ }
+
+ if (rc)
+ DEBUG_DBG("%s failed rc=%d\n", __func__, rc);
+
+ return rc;
+}
+
+/*
+ * Construct EP11 key with given clear key value.
+ */
+static int pkey_clr2ep11key(const u8 *clrkey, size_t clrkeylen,
+ u8 *keybuf, size_t *keybuflen)
+{
+ int i, rc;
+ u16 card, dom;
+ u32 nr_apqns, *apqns = NULL;
+
+ /* build a list of apqns suitable for ep11 keys with cpacf support */
+ rc = ep11_findcard2(&apqns, &nr_apqns, 0xFFFF, 0xFFFF,
+ ZCRYPT_CEX7, EP11_API_V, NULL);
+ if (rc)
+ goto out;
+
+ /* go through the list of apqns and try to bild an ep11 key */
+ for (rc = -ENODEV, i = 0; i < nr_apqns; i++) {
+ card = apqns[i] >> 16;
+ dom = apqns[i] & 0xFFFF;
+ rc = ep11_clr2keyblob(card, dom, clrkeylen * 8,
+ 0, clrkey, keybuf, keybuflen);
+ if (rc == 0)
+ break;
+ }
+
+out:
+ kfree(apqns);
+ if (rc)
+ DEBUG_DBG("%s failed rc=%d\n", __func__, rc);
+ return rc;
+}
+
+/*
+ * Find card and transform EP11 secure key into protected key.
+ */
+static int pkey_ep11key2pkey(const u8 *key, struct pkey_protkey *pkey)
+{
+ int i, rc;
+ u16 card, dom;
+ u32 nr_apqns, *apqns = NULL;
+ struct ep11keyblob *kb = (struct ep11keyblob *) key;
+
+ /* build a list of apqns suitable for this key */
+ rc = ep11_findcard2(&apqns, &nr_apqns, 0xFFFF, 0xFFFF,
+ ZCRYPT_CEX7, EP11_API_V, kb->wkvp);
+ if (rc)
+ goto out;
+
+ /* go through the list of apqns and try to derive an pkey */
+ for (rc = -ENODEV, i = 0; i < nr_apqns; i++) {
+ card = apqns[i] >> 16;
+ dom = apqns[i] & 0xFFFF;
+ pkey->len = sizeof(pkey->protkey);
+ rc = ep11_kblob2protkey(card, dom, key, kb->head.len,
+ pkey->protkey, &pkey->len, &pkey->type);
+ if (rc == 0)
+ break;
+ }
+
+out:
+ kfree(apqns);
+ if (rc)
+ DEBUG_DBG("%s failed rc=%d\n", __func__, rc);
+ return rc;
+}
+
+/*
+ * Verify key and give back some info about the key.
+ */
+static int pkey_verifykey(const struct pkey_seckey *seckey,
+ u16 *pcardnr, u16 *pdomain,
+ u16 *pkeysize, u32 *pattributes)
+{
+ struct secaeskeytoken *t = (struct secaeskeytoken *) seckey;
+ u16 cardnr, domain;
+ int rc;
+
+ /* check the secure key for valid AES secure key */
+ rc = cca_check_secaeskeytoken(debug_info, 3, (u8 *) seckey, 0);
+ if (rc)
+ goto out;
+ if (pattributes)
+ *pattributes = PKEY_VERIFY_ATTR_AES;
+ if (pkeysize)
+ *pkeysize = t->bitsize;
+
+ /* try to find a card which can handle this key */
+ rc = cca_findcard(seckey->seckey, &cardnr, &domain, 1);
+ if (rc < 0)
+ goto out;
+
+ if (rc > 0) {
+ /* key mkvp matches to old master key mkvp */
+ DEBUG_DBG("%s secure key has old mkvp\n", __func__);
+ if (pattributes)
+ *pattributes |= PKEY_VERIFY_ATTR_OLD_MKVP;
+ rc = 0;
+ }
+
+ if (pcardnr)
+ *pcardnr = cardnr;
+ if (pdomain)
+ *pdomain = domain;
+
+out:
+ DEBUG_DBG("%s rc=%d\n", __func__, rc);
+ return rc;
+}
+
+/*
+ * Generate a random protected key
+ */
+static int pkey_genprotkey(u32 keytype, struct pkey_protkey *protkey)
+{
+ struct pkey_clrkey clrkey;
+ int keysize;
+ int rc;
+
+ switch (keytype) {
+ case PKEY_KEYTYPE_AES_128:
+ keysize = 16;
+ break;
+ case PKEY_KEYTYPE_AES_192:
+ keysize = 24;
+ break;
+ case PKEY_KEYTYPE_AES_256:
+ keysize = 32;
+ break;
+ default:
+ DEBUG_ERR("%s unknown/unsupported keytype %d\n", __func__,
+ keytype);
+ return -EINVAL;
+ }
+
+ /* generate a dummy random clear key */
+ get_random_bytes(clrkey.clrkey, keysize);
+
+ /* convert it to a dummy protected key */
+ rc = pkey_clr2protkey(keytype, &clrkey, protkey);
+ if (rc)
+ return rc;
+
+ /* replace the key part of the protected key with random bytes */
+ get_random_bytes(protkey->protkey, keysize);
+
+ return 0;
+}
+
+/*
+ * Verify if a protected key is still valid
+ */
+static int pkey_verifyprotkey(const struct pkey_protkey *protkey)
+{
+ unsigned long fc;
+ struct {
+ u8 iv[AES_BLOCK_SIZE];
+ u8 key[MAXPROTKEYSIZE];
+ } param;
+ u8 null_msg[AES_BLOCK_SIZE];
+ u8 dest_buf[AES_BLOCK_SIZE];
+ unsigned int k;
+
+ switch (protkey->type) {
+ case PKEY_KEYTYPE_AES_128:
+ fc = CPACF_KMC_PAES_128;
+ break;
+ case PKEY_KEYTYPE_AES_192:
+ fc = CPACF_KMC_PAES_192;
+ break;
+ case PKEY_KEYTYPE_AES_256:
+ fc = CPACF_KMC_PAES_256;
+ break;
+ default:
+ DEBUG_ERR("%s unknown/unsupported keytype %d\n", __func__,
+ protkey->type);
+ return -EINVAL;
+ }
+
+ memset(null_msg, 0, sizeof(null_msg));
+
+ memset(param.iv, 0, sizeof(param.iv));
+ memcpy(param.key, protkey->protkey, sizeof(param.key));
+
+ k = cpacf_kmc(fc | CPACF_ENCRYPT, &param, null_msg, dest_buf,
+ sizeof(null_msg));
+ if (k != sizeof(null_msg)) {
+ DEBUG_ERR("%s protected key is not valid\n", __func__);
+ return -EKEYREJECTED;
+ }
+
+ return 0;
+}
+
+/*
+ * Transform a non-CCA key token into a protected key
+ */
+static int pkey_nonccatok2pkey(const u8 *key, u32 keylen,
+ struct pkey_protkey *protkey)
+{
+ int rc = -EINVAL;
+ u8 *tmpbuf = NULL;
+ struct keytoken_header *hdr = (struct keytoken_header *)key;
+
+ switch (hdr->version) {
+ case TOKVER_PROTECTED_KEY: {
+ struct protaeskeytoken *t;
+
+ if (keylen != sizeof(struct protaeskeytoken))
+ goto out;
+ t = (struct protaeskeytoken *)key;
+ protkey->len = t->len;
+ protkey->type = t->keytype;
+ memcpy(protkey->protkey, t->protkey,
+ sizeof(protkey->protkey));
+ rc = pkey_verifyprotkey(protkey);
+ break;
+ }
+ case TOKVER_CLEAR_KEY: {
+ struct clearaeskeytoken *t;
+ struct pkey_clrkey ckey;
+ union u_tmpbuf {
+ u8 skey[SECKEYBLOBSIZE];
+ u8 ep11key[MAXEP11AESKEYBLOBSIZE];
+ };
+ size_t tmpbuflen = sizeof(union u_tmpbuf);
+
+ if (keylen < sizeof(struct clearaeskeytoken))
+ goto out;
+ t = (struct clearaeskeytoken *)key;
+ if (keylen != sizeof(*t) + t->len)
+ goto out;
+ if ((t->keytype == PKEY_KEYTYPE_AES_128 && t->len == 16)
+ || (t->keytype == PKEY_KEYTYPE_AES_192 && t->len == 24)
+ || (t->keytype == PKEY_KEYTYPE_AES_256 && t->len == 32))
+ memcpy(ckey.clrkey, t->clearkey, t->len);
+ else
+ goto out;
+ /* alloc temp key buffer space */
+ tmpbuf = kmalloc(tmpbuflen, GFP_ATOMIC);
+ if (!tmpbuf) {
+ rc = -ENOMEM;
+ goto out;
+ }
+ /* try direct way with the PCKMO instruction */
+ rc = pkey_clr2protkey(t->keytype, &ckey, protkey);
+ if (rc == 0)
+ break;
+ /* PCKMO failed, so try the CCA secure key way */
+ rc = cca_clr2seckey(0xFFFF, 0xFFFF, t->keytype,
+ ckey.clrkey, tmpbuf);
+ if (rc == 0)
+ rc = pkey_skey2pkey(tmpbuf, protkey);
+ if (rc == 0)
+ break;
+ /* if the CCA way also failed, let's try via EP11 */
+ rc = pkey_clr2ep11key(ckey.clrkey, t->len,
+ tmpbuf, &tmpbuflen);
+ if (rc == 0)
+ rc = pkey_ep11key2pkey(tmpbuf, protkey);
+ /* now we should really have an protected key */
+ DEBUG_ERR("%s unable to build protected key from clear",
+ __func__);
+ break;
+ }
+ case TOKVER_EP11_AES: {
+ /* check ep11 key for exportable as protected key */
+ rc = ep11_check_aes_key(debug_info, 3, key, keylen, 1);
+ if (rc)
+ goto out;
+ rc = pkey_ep11key2pkey(key, protkey);
+ break;
+ }
+ case TOKVER_EP11_AES_WITH_HEADER:
+ /* check ep11 key with header for exportable as protected key */
+ rc = ep11_check_aes_key_with_hdr(debug_info, 3, key, keylen, 1);
+ if (rc)
+ goto out;
+ rc = pkey_ep11key2pkey(key + sizeof(struct ep11kblob_header),
+ protkey);
+ break;
+ default:
+ DEBUG_ERR("%s unknown/unsupported non-CCA token version %d\n",
+ __func__, hdr->version);
+ rc = -EINVAL;
+ }
+
+out:
+ kfree(tmpbuf);
+ return rc;
+}
+
+/*
+ * Transform a CCA internal key token into a protected key
+ */
+static int pkey_ccainttok2pkey(const u8 *key, u32 keylen,
+ struct pkey_protkey *protkey)
+{
+ struct keytoken_header *hdr = (struct keytoken_header *)key;
+
+ switch (hdr->version) {
+ case TOKVER_CCA_AES:
+ if (keylen != sizeof(struct secaeskeytoken))
+ return -EINVAL;
+ break;
+ case TOKVER_CCA_VLSC:
+ if (keylen < hdr->len || keylen > MAXCCAVLSCTOKENSIZE)
+ return -EINVAL;
+ break;
+ default:
+ DEBUG_ERR("%s unknown/unsupported CCA internal token version %d\n",
+ __func__, hdr->version);
+ return -EINVAL;
+ }
+
+ return pkey_skey2pkey(key, protkey);
+}
+
+/*
+ * Transform a key blob (of any type) into a protected key
+ */
+int pkey_keyblob2pkey(const u8 *key, u32 keylen,
+ struct pkey_protkey *protkey)
+{
+ int rc;
+ struct keytoken_header *hdr = (struct keytoken_header *)key;
+
+ if (keylen < sizeof(struct keytoken_header)) {
+ DEBUG_ERR("%s invalid keylen %d\n", __func__, keylen);
+ return -EINVAL;
+ }
+
+ switch (hdr->type) {
+ case TOKTYPE_NON_CCA:
+ rc = pkey_nonccatok2pkey(key, keylen, protkey);
+ break;
+ case TOKTYPE_CCA_INTERNAL:
+ rc = pkey_ccainttok2pkey(key, keylen, protkey);
+ break;
+ default:
+ DEBUG_ERR("%s unknown/unsupported blob type %d\n",
+ __func__, hdr->type);
+ return -EINVAL;
+ }
+
+ DEBUG_DBG("%s rc=%d\n", __func__, rc);
+ return rc;
+
+}
+EXPORT_SYMBOL(pkey_keyblob2pkey);
+
+static int pkey_genseckey2(const struct pkey_apqn *apqns, size_t nr_apqns,
+ enum pkey_key_type ktype, enum pkey_key_size ksize,
+ u32 kflags, u8 *keybuf, size_t *keybufsize)
+{
+ int i, card, dom, rc;
+
+ /* check for at least one apqn given */
+ if (!apqns || !nr_apqns)
+ return -EINVAL;
+
+ /* check key type and size */
+ switch (ktype) {
+ case PKEY_TYPE_CCA_DATA:
+ case PKEY_TYPE_CCA_CIPHER:
+ if (*keybufsize < SECKEYBLOBSIZE)
+ return -EINVAL;
+ break;
+ case PKEY_TYPE_EP11:
+ if (*keybufsize < MINEP11AESKEYBLOBSIZE)
+ return -EINVAL;
+ break;
+ default:
+ return -EINVAL;
+ }
+ switch (ksize) {
+ case PKEY_SIZE_AES_128:
+ case PKEY_SIZE_AES_192:
+ case PKEY_SIZE_AES_256:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* simple try all apqns from the list */
+ for (i = 0, rc = -ENODEV; i < nr_apqns; i++) {
+ card = apqns[i].card;
+ dom = apqns[i].domain;
+ if (ktype == PKEY_TYPE_EP11) {
+ rc = ep11_genaeskey(card, dom, ksize, kflags,
+ keybuf, keybufsize);
+ } else if (ktype == PKEY_TYPE_CCA_DATA) {
+ rc = cca_genseckey(card, dom, ksize, keybuf);
+ *keybufsize = (rc ? 0 : SECKEYBLOBSIZE);
+ } else /* TOKVER_CCA_VLSC */
+ rc = cca_gencipherkey(card, dom, ksize, kflags,
+ keybuf, keybufsize);
+ if (rc == 0)
+ break;
+ }
+
+ return rc;
+}
+
+static int pkey_clr2seckey2(const struct pkey_apqn *apqns, size_t nr_apqns,
+ enum pkey_key_type ktype, enum pkey_key_size ksize,
+ u32 kflags, const u8 *clrkey,
+ u8 *keybuf, size_t *keybufsize)
+{
+ int i, card, dom, rc;
+
+ /* check for at least one apqn given */
+ if (!apqns || !nr_apqns)
+ return -EINVAL;
+
+ /* check key type and size */
+ switch (ktype) {
+ case PKEY_TYPE_CCA_DATA:
+ case PKEY_TYPE_CCA_CIPHER:
+ if (*keybufsize < SECKEYBLOBSIZE)
+ return -EINVAL;
+ break;
+ case PKEY_TYPE_EP11:
+ if (*keybufsize < MINEP11AESKEYBLOBSIZE)
+ return -EINVAL;
+ break;
+ default:
+ return -EINVAL;
+ }
+ switch (ksize) {
+ case PKEY_SIZE_AES_128:
+ case PKEY_SIZE_AES_192:
+ case PKEY_SIZE_AES_256:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* simple try all apqns from the list */
+ for (i = 0, rc = -ENODEV; i < nr_apqns; i++) {
+ card = apqns[i].card;
+ dom = apqns[i].domain;
+ if (ktype == PKEY_TYPE_EP11) {
+ rc = ep11_clr2keyblob(card, dom, ksize, kflags,
+ clrkey, keybuf, keybufsize);
+ } else if (ktype == PKEY_TYPE_CCA_DATA) {
+ rc = cca_clr2seckey(card, dom, ksize,
+ clrkey, keybuf);
+ *keybufsize = (rc ? 0 : SECKEYBLOBSIZE);
+ } else /* TOKVER_CCA_VLSC */
+ rc = cca_clr2cipherkey(card, dom, ksize, kflags,
+ clrkey, keybuf, keybufsize);
+ if (rc == 0)
+ break;
+ }
+
+ return rc;
+}
+
+static int pkey_verifykey2(const u8 *key, size_t keylen,
+ u16 *cardnr, u16 *domain,
+ enum pkey_key_type *ktype,
+ enum pkey_key_size *ksize, u32 *flags)
+{
+ int rc;
+ u32 _nr_apqns, *_apqns = NULL;
+ struct keytoken_header *hdr = (struct keytoken_header *)key;
+
+ if (keylen < sizeof(struct keytoken_header))
+ return -EINVAL;
+
+ if (hdr->type == TOKTYPE_CCA_INTERNAL
+ && hdr->version == TOKVER_CCA_AES) {
+ struct secaeskeytoken *t = (struct secaeskeytoken *)key;
+
+ rc = cca_check_secaeskeytoken(debug_info, 3, key, 0);
+ if (rc)
+ goto out;
+ if (ktype)
+ *ktype = PKEY_TYPE_CCA_DATA;
+ if (ksize)
+ *ksize = (enum pkey_key_size) t->bitsize;
+
+ rc = cca_findcard2(&_apqns, &_nr_apqns, *cardnr, *domain,
+ ZCRYPT_CEX3C, AES_MK_SET, t->mkvp, 0, 1);
+ if (rc == 0 && flags)
+ *flags = PKEY_FLAGS_MATCH_CUR_MKVP;
+ if (rc == -ENODEV) {
+ rc = cca_findcard2(&_apqns, &_nr_apqns,
+ *cardnr, *domain,
+ ZCRYPT_CEX3C, AES_MK_SET,
+ 0, t->mkvp, 1);
+ if (rc == 0 && flags)
+ *flags = PKEY_FLAGS_MATCH_ALT_MKVP;
+ }
+ if (rc)
+ goto out;
+
+ *cardnr = ((struct pkey_apqn *)_apqns)->card;
+ *domain = ((struct pkey_apqn *)_apqns)->domain;
+
+ } else if (hdr->type == TOKTYPE_CCA_INTERNAL
+ && hdr->version == TOKVER_CCA_VLSC) {
+ struct cipherkeytoken *t = (struct cipherkeytoken *)key;
+
+ rc = cca_check_secaescipherkey(debug_info, 3, key, 0, 1);
+ if (rc)
+ goto out;
+ if (ktype)
+ *ktype = PKEY_TYPE_CCA_CIPHER;
+ if (ksize) {
+ *ksize = PKEY_SIZE_UNKNOWN;
+ if (!t->plfver && t->wpllen == 512)
+ *ksize = PKEY_SIZE_AES_128;
+ else if (!t->plfver && t->wpllen == 576)
+ *ksize = PKEY_SIZE_AES_192;
+ else if (!t->plfver && t->wpllen == 640)
+ *ksize = PKEY_SIZE_AES_256;
+ }
+
+ rc = cca_findcard2(&_apqns, &_nr_apqns, *cardnr, *domain,
+ ZCRYPT_CEX6, AES_MK_SET, t->mkvp0, 0, 1);
+ if (rc == 0 && flags)
+ *flags = PKEY_FLAGS_MATCH_CUR_MKVP;
+ if (rc == -ENODEV) {
+ rc = cca_findcard2(&_apqns, &_nr_apqns,
+ *cardnr, *domain,
+ ZCRYPT_CEX6, AES_MK_SET,
+ 0, t->mkvp0, 1);
+ if (rc == 0 && flags)
+ *flags = PKEY_FLAGS_MATCH_ALT_MKVP;
+ }
+ if (rc)
+ goto out;
+
+ *cardnr = ((struct pkey_apqn *)_apqns)->card;
+ *domain = ((struct pkey_apqn *)_apqns)->domain;
+
+ } else if (hdr->type == TOKTYPE_NON_CCA
+ && hdr->version == TOKVER_EP11_AES) {
+ struct ep11keyblob *kb = (struct ep11keyblob *)key;
+
+ rc = ep11_check_aes_key(debug_info, 3, key, keylen, 1);
+ if (rc)
+ goto out;
+ if (ktype)
+ *ktype = PKEY_TYPE_EP11;
+ if (ksize)
+ *ksize = kb->head.bitlen;
+
+ rc = ep11_findcard2(&_apqns, &_nr_apqns, *cardnr, *domain,
+ ZCRYPT_CEX7, EP11_API_V, kb->wkvp);
+ if (rc)
+ goto out;
+
+ if (flags)
+ *flags = PKEY_FLAGS_MATCH_CUR_MKVP;
+
+ *cardnr = ((struct pkey_apqn *)_apqns)->card;
+ *domain = ((struct pkey_apqn *)_apqns)->domain;
+
+ } else
+ rc = -EINVAL;
+
+out:
+ kfree(_apqns);
+ return rc;
+}
+
+static int pkey_keyblob2pkey2(const struct pkey_apqn *apqns, size_t nr_apqns,
+ const u8 *key, size_t keylen,
+ struct pkey_protkey *pkey)
+{
+ int i, card, dom, rc;
+ struct keytoken_header *hdr = (struct keytoken_header *)key;
+
+ /* check for at least one apqn given */
+ if (!apqns || !nr_apqns)
+ return -EINVAL;
+
+ if (keylen < sizeof(struct keytoken_header))
+ return -EINVAL;
+
+ if (hdr->type == TOKTYPE_CCA_INTERNAL) {
+ if (hdr->version == TOKVER_CCA_AES) {
+ if (keylen != sizeof(struct secaeskeytoken))
+ return -EINVAL;
+ if (cca_check_secaeskeytoken(debug_info, 3, key, 0))
+ return -EINVAL;
+ } else if (hdr->version == TOKVER_CCA_VLSC) {
+ if (keylen < hdr->len || keylen > MAXCCAVLSCTOKENSIZE)
+ return -EINVAL;
+ if (cca_check_secaescipherkey(debug_info, 3, key, 0, 1))
+ return -EINVAL;
+ } else {
+ DEBUG_ERR("%s unknown CCA internal token version %d\n",
+ __func__, hdr->version);
+ return -EINVAL;
+ }
+ } else if (hdr->type == TOKTYPE_NON_CCA) {
+ if (hdr->version == TOKVER_EP11_AES) {
+ if (keylen < sizeof(struct ep11keyblob))
+ return -EINVAL;
+ if (ep11_check_aes_key(debug_info, 3, key, keylen, 1))
+ return -EINVAL;
+ } else {
+ return pkey_nonccatok2pkey(key, keylen, pkey);
+ }
+ } else {
+ DEBUG_ERR("%s unknown/unsupported blob type %d\n",
+ __func__, hdr->type);
+ return -EINVAL;
+ }
+
+ /* simple try all apqns from the list */
+ for (i = 0, rc = -ENODEV; i < nr_apqns; i++) {
+ card = apqns[i].card;
+ dom = apqns[i].domain;
+ if (hdr->type == TOKTYPE_CCA_INTERNAL
+ && hdr->version == TOKVER_CCA_AES)
+ rc = cca_sec2protkey(card, dom, key, pkey->protkey,
+ &pkey->len, &pkey->type);
+ else if (hdr->type == TOKTYPE_CCA_INTERNAL
+ && hdr->version == TOKVER_CCA_VLSC)
+ rc = cca_cipher2protkey(card, dom, key, pkey->protkey,
+ &pkey->len, &pkey->type);
+ else { /* EP11 AES secure key blob */
+ struct ep11keyblob *kb = (struct ep11keyblob *) key;
+
+ pkey->len = sizeof(pkey->protkey);
+ rc = ep11_kblob2protkey(card, dom, key, kb->head.len,
+ pkey->protkey, &pkey->len,
+ &pkey->type);
+ }
+ if (rc == 0)
+ break;
+ }
+
+ return rc;
+}
+
+static int pkey_apqns4key(const u8 *key, size_t keylen, u32 flags,
+ struct pkey_apqn *apqns, size_t *nr_apqns)
+{
+ int rc;
+ u32 _nr_apqns, *_apqns = NULL;
+ struct keytoken_header *hdr = (struct keytoken_header *)key;
+
+ if (keylen < sizeof(struct keytoken_header) || flags == 0)
+ return -EINVAL;
+
+ if (hdr->type == TOKTYPE_NON_CCA
+ && (hdr->version == TOKVER_EP11_AES_WITH_HEADER
+ || hdr->version == TOKVER_EP11_ECC_WITH_HEADER)
+ && is_ep11_keyblob(key + sizeof(struct ep11kblob_header))) {
+ int minhwtype = 0, api = 0;
+ struct ep11keyblob *kb = (struct ep11keyblob *)
+ (key + sizeof(struct ep11kblob_header));
+
+ if (flags != PKEY_FLAGS_MATCH_CUR_MKVP)
+ return -EINVAL;
+ if (kb->attr & EP11_BLOB_PKEY_EXTRACTABLE) {
+ minhwtype = ZCRYPT_CEX7;
+ api = EP11_API_V;
+ }
+ rc = ep11_findcard2(&_apqns, &_nr_apqns, 0xFFFF, 0xFFFF,
+ minhwtype, api, kb->wkvp);
+ if (rc)
+ goto out;
+ } else if (hdr->type == TOKTYPE_NON_CCA
+ && hdr->version == TOKVER_EP11_AES
+ && is_ep11_keyblob(key)) {
+ int minhwtype = 0, api = 0;
+ struct ep11keyblob *kb = (struct ep11keyblob *) key;
+
+ if (flags != PKEY_FLAGS_MATCH_CUR_MKVP)
+ return -EINVAL;
+ if (kb->attr & EP11_BLOB_PKEY_EXTRACTABLE) {
+ minhwtype = ZCRYPT_CEX7;
+ api = EP11_API_V;
+ }
+ rc = ep11_findcard2(&_apqns, &_nr_apqns, 0xFFFF, 0xFFFF,
+ minhwtype, api, kb->wkvp);
+ if (rc)
+ goto out;
+ } else if (hdr->type == TOKTYPE_CCA_INTERNAL) {
+ int minhwtype = ZCRYPT_CEX3C;
+ u64 cur_mkvp = 0, old_mkvp = 0;
+
+ if (hdr->version == TOKVER_CCA_AES) {
+ struct secaeskeytoken *t = (struct secaeskeytoken *)key;
+
+ if (flags & PKEY_FLAGS_MATCH_CUR_MKVP)
+ cur_mkvp = t->mkvp;
+ if (flags & PKEY_FLAGS_MATCH_ALT_MKVP)
+ old_mkvp = t->mkvp;
+ } else if (hdr->version == TOKVER_CCA_VLSC) {
+ struct cipherkeytoken *t = (struct cipherkeytoken *)key;
+
+ minhwtype = ZCRYPT_CEX6;
+ if (flags & PKEY_FLAGS_MATCH_CUR_MKVP)
+ cur_mkvp = t->mkvp0;
+ if (flags & PKEY_FLAGS_MATCH_ALT_MKVP)
+ old_mkvp = t->mkvp0;
+ } else {
+ /* unknown cca internal token type */
+ return -EINVAL;
+ }
+ rc = cca_findcard2(&_apqns, &_nr_apqns, 0xFFFF, 0xFFFF,
+ minhwtype, AES_MK_SET,
+ cur_mkvp, old_mkvp, 1);
+ if (rc)
+ goto out;
+ } else if (hdr->type == TOKTYPE_CCA_INTERNAL_PKA) {
+ u64 cur_mkvp = 0, old_mkvp = 0;
+ struct eccprivkeytoken *t = (struct eccprivkeytoken *)key;
+
+ if (t->secid == 0x20) {
+ if (flags & PKEY_FLAGS_MATCH_CUR_MKVP)
+ cur_mkvp = t->mkvp;
+ if (flags & PKEY_FLAGS_MATCH_ALT_MKVP)
+ old_mkvp = t->mkvp;
+ } else {
+ /* unknown cca internal 2 token type */
+ return -EINVAL;
+ }
+ rc = cca_findcard2(&_apqns, &_nr_apqns, 0xFFFF, 0xFFFF,
+ ZCRYPT_CEX7, APKA_MK_SET,
+ cur_mkvp, old_mkvp, 1);
+ if (rc)
+ goto out;
+ } else
+ return -EINVAL;
+
+ if (apqns) {
+ if (*nr_apqns < _nr_apqns)
+ rc = -ENOSPC;
+ else
+ memcpy(apqns, _apqns, _nr_apqns * sizeof(u32));
+ }
+ *nr_apqns = _nr_apqns;
+
+out:
+ kfree(_apqns);
+ return rc;
+}
+
+static int pkey_apqns4keytype(enum pkey_key_type ktype,
+ u8 cur_mkvp[32], u8 alt_mkvp[32], u32 flags,
+ struct pkey_apqn *apqns, size_t *nr_apqns)
+{
+ int rc;
+ u32 _nr_apqns, *_apqns = NULL;
+
+ if (ktype == PKEY_TYPE_CCA_DATA || ktype == PKEY_TYPE_CCA_CIPHER) {
+ u64 cur_mkvp = 0, old_mkvp = 0;
+ int minhwtype = ZCRYPT_CEX3C;
+
+ if (flags & PKEY_FLAGS_MATCH_CUR_MKVP)
+ cur_mkvp = *((u64 *) cur_mkvp);
+ if (flags & PKEY_FLAGS_MATCH_ALT_MKVP)
+ old_mkvp = *((u64 *) alt_mkvp);
+ if (ktype == PKEY_TYPE_CCA_CIPHER)
+ minhwtype = ZCRYPT_CEX6;
+ rc = cca_findcard2(&_apqns, &_nr_apqns, 0xFFFF, 0xFFFF,
+ minhwtype, AES_MK_SET,
+ cur_mkvp, old_mkvp, 1);
+ if (rc)
+ goto out;
+ } else if (ktype == PKEY_TYPE_CCA_ECC) {
+ u64 cur_mkvp = 0, old_mkvp = 0;
+
+ if (flags & PKEY_FLAGS_MATCH_CUR_MKVP)
+ cur_mkvp = *((u64 *) cur_mkvp);
+ if (flags & PKEY_FLAGS_MATCH_ALT_MKVP)
+ old_mkvp = *((u64 *) alt_mkvp);
+ rc = cca_findcard2(&_apqns, &_nr_apqns, 0xFFFF, 0xFFFF,
+ ZCRYPT_CEX7, APKA_MK_SET,
+ cur_mkvp, old_mkvp, 1);
+ if (rc)
+ goto out;
+
+ } else if (ktype == PKEY_TYPE_EP11 ||
+ ktype == PKEY_TYPE_EP11_AES ||
+ ktype == PKEY_TYPE_EP11_ECC) {
+ u8 *wkvp = NULL;
+
+ if (flags & PKEY_FLAGS_MATCH_CUR_MKVP)
+ wkvp = cur_mkvp;
+ rc = ep11_findcard2(&_apqns, &_nr_apqns, 0xFFFF, 0xFFFF,
+ ZCRYPT_CEX7, EP11_API_V, wkvp);
+ if (rc)
+ goto out;
+
+ } else
+ return -EINVAL;
+
+ if (apqns) {
+ if (*nr_apqns < _nr_apqns)
+ rc = -ENOSPC;
+ else
+ memcpy(apqns, _apqns, _nr_apqns * sizeof(u32));
+ }
+ *nr_apqns = _nr_apqns;
+
+out:
+ kfree(_apqns);
+ return rc;
+}
+
+static int pkey_keyblob2pkey3(const struct pkey_apqn *apqns, size_t nr_apqns,
+ const u8 *key, size_t keylen, u32 *protkeytype,
+ u8 *protkey, u32 *protkeylen)
+{
+ int i, card, dom, rc;
+ struct keytoken_header *hdr = (struct keytoken_header *)key;
+
+ /* check for at least one apqn given */
+ if (!apqns || !nr_apqns)
+ return -EINVAL;
+
+ if (keylen < sizeof(struct keytoken_header))
+ return -EINVAL;
+
+ if (hdr->type == TOKTYPE_NON_CCA
+ && hdr->version == TOKVER_EP11_AES_WITH_HEADER
+ && is_ep11_keyblob(key + sizeof(struct ep11kblob_header))) {
+ /* EP11 AES key blob with header */
+ if (ep11_check_aes_key_with_hdr(debug_info, 3, key, keylen, 1))
+ return -EINVAL;
+ } else if (hdr->type == TOKTYPE_NON_CCA
+ && hdr->version == TOKVER_EP11_ECC_WITH_HEADER
+ && is_ep11_keyblob(key + sizeof(struct ep11kblob_header))) {
+ /* EP11 ECC key blob with header */
+ if (ep11_check_ecc_key_with_hdr(debug_info, 3, key, keylen, 1))
+ return -EINVAL;
+ } else if (hdr->type == TOKTYPE_NON_CCA
+ && hdr->version == TOKVER_EP11_AES
+ && is_ep11_keyblob(key)) {
+ /* EP11 AES key blob with header in session field */
+ if (ep11_check_aes_key(debug_info, 3, key, keylen, 1))
+ return -EINVAL;
+ } else if (hdr->type == TOKTYPE_CCA_INTERNAL) {
+ if (hdr->version == TOKVER_CCA_AES) {
+ /* CCA AES data key */
+ if (keylen != sizeof(struct secaeskeytoken))
+ return -EINVAL;
+ if (cca_check_secaeskeytoken(debug_info, 3, key, 0))
+ return -EINVAL;
+ } else if (hdr->version == TOKVER_CCA_VLSC) {
+ /* CCA AES cipher key */
+ if (keylen < hdr->len || keylen > MAXCCAVLSCTOKENSIZE)
+ return -EINVAL;
+ if (cca_check_secaescipherkey(debug_info, 3, key, 0, 1))
+ return -EINVAL;
+ } else {
+ DEBUG_ERR("%s unknown CCA internal token version %d\n",
+ __func__, hdr->version);
+ return -EINVAL;
+ }
+ } else if (hdr->type == TOKTYPE_CCA_INTERNAL_PKA) {
+ /* CCA ECC (private) key */
+ if (keylen < sizeof(struct eccprivkeytoken))
+ return -EINVAL;
+ if (cca_check_sececckeytoken(debug_info, 3, key, keylen, 1))
+ return -EINVAL;
+ } else if (hdr->type == TOKTYPE_NON_CCA) {
+ struct pkey_protkey pkey;
+
+ rc = pkey_nonccatok2pkey(key, keylen, &pkey);
+ if (rc)
+ return rc;
+ memcpy(protkey, pkey.protkey, pkey.len);
+ *protkeylen = pkey.len;
+ *protkeytype = pkey.type;
+ return 0;
+ } else {
+ DEBUG_ERR("%s unknown/unsupported blob type %d\n",
+ __func__, hdr->type);
+ return -EINVAL;
+ }
+
+ /* simple try all apqns from the list */
+ for (rc = -ENODEV, i = 0; rc && i < nr_apqns; i++) {
+ card = apqns[i].card;
+ dom = apqns[i].domain;
+ if (hdr->type == TOKTYPE_NON_CCA
+ && (hdr->version == TOKVER_EP11_AES_WITH_HEADER
+ || hdr->version == TOKVER_EP11_ECC_WITH_HEADER)
+ && is_ep11_keyblob(key + sizeof(struct ep11kblob_header)))
+ rc = ep11_kblob2protkey(card, dom, key, hdr->len,
+ protkey, protkeylen, protkeytype);
+ else if (hdr->type == TOKTYPE_NON_CCA
+ && hdr->version == TOKVER_EP11_AES
+ && is_ep11_keyblob(key))
+ rc = ep11_kblob2protkey(card, dom, key, hdr->len,
+ protkey, protkeylen, protkeytype);
+ else if (hdr->type == TOKTYPE_CCA_INTERNAL &&
+ hdr->version == TOKVER_CCA_AES)
+ rc = cca_sec2protkey(card, dom, key, protkey,
+ protkeylen, protkeytype);
+ else if (hdr->type == TOKTYPE_CCA_INTERNAL &&
+ hdr->version == TOKVER_CCA_VLSC)
+ rc = cca_cipher2protkey(card, dom, key, protkey,
+ protkeylen, protkeytype);
+ else if (hdr->type == TOKTYPE_CCA_INTERNAL_PKA)
+ rc = cca_ecc2protkey(card, dom, key, protkey,
+ protkeylen, protkeytype);
+ else
+ return -EINVAL;
+ }
+
+ return rc;
+}
+
+/*
+ * File io functions
+ */
+
+static void *_copy_key_from_user(void __user *ukey, size_t keylen)
+{
+ if (!ukey || keylen < MINKEYBLOBSIZE || keylen > KEYBLOBBUFSIZE)
+ return ERR_PTR(-EINVAL);
+
+ return memdup_user(ukey, keylen);
+}
+
+static void *_copy_apqns_from_user(void __user *uapqns, size_t nr_apqns)
+{
+ if (!uapqns || nr_apqns == 0)
+ return NULL;
+
+ return memdup_user(uapqns, nr_apqns * sizeof(struct pkey_apqn));
+}
+
+static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd,
+ unsigned long arg)
+{
+ int rc;
+
+ switch (cmd) {
+ case PKEY_GENSECK: {
+ struct pkey_genseck __user *ugs = (void __user *) arg;
+ struct pkey_genseck kgs;
+
+ if (copy_from_user(&kgs, ugs, sizeof(kgs)))
+ return -EFAULT;
+ rc = cca_genseckey(kgs.cardnr, kgs.domain,
+ kgs.keytype, kgs.seckey.seckey);
+ DEBUG_DBG("%s cca_genseckey()=%d\n", __func__, rc);
+ if (rc)
+ break;
+ if (copy_to_user(ugs, &kgs, sizeof(kgs)))
+ return -EFAULT;
+ break;
+ }
+ case PKEY_CLR2SECK: {
+ struct pkey_clr2seck __user *ucs = (void __user *) arg;
+ struct pkey_clr2seck kcs;
+
+ if (copy_from_user(&kcs, ucs, sizeof(kcs)))
+ return -EFAULT;
+ rc = cca_clr2seckey(kcs.cardnr, kcs.domain, kcs.keytype,
+ kcs.clrkey.clrkey, kcs.seckey.seckey);
+ DEBUG_DBG("%s cca_clr2seckey()=%d\n", __func__, rc);
+ if (rc)
+ break;
+ if (copy_to_user(ucs, &kcs, sizeof(kcs)))
+ return -EFAULT;
+ memzero_explicit(&kcs, sizeof(kcs));
+ break;
+ }
+ case PKEY_SEC2PROTK: {
+ struct pkey_sec2protk __user *usp = (void __user *) arg;
+ struct pkey_sec2protk ksp;
+
+ if (copy_from_user(&ksp, usp, sizeof(ksp)))
+ return -EFAULT;
+ rc = cca_sec2protkey(ksp.cardnr, ksp.domain,
+ ksp.seckey.seckey, ksp.protkey.protkey,
+ &ksp.protkey.len, &ksp.protkey.type);
+ DEBUG_DBG("%s cca_sec2protkey()=%d\n", __func__, rc);
+ if (rc)
+ break;
+ if (copy_to_user(usp, &ksp, sizeof(ksp)))
+ return -EFAULT;
+ break;
+ }
+ case PKEY_CLR2PROTK: {
+ struct pkey_clr2protk __user *ucp = (void __user *) arg;
+ struct pkey_clr2protk kcp;
+
+ if (copy_from_user(&kcp, ucp, sizeof(kcp)))
+ return -EFAULT;
+ rc = pkey_clr2protkey(kcp.keytype,
+ &kcp.clrkey, &kcp.protkey);
+ DEBUG_DBG("%s pkey_clr2protkey()=%d\n", __func__, rc);
+ if (rc)
+ break;
+ if (copy_to_user(ucp, &kcp, sizeof(kcp)))
+ return -EFAULT;
+ memzero_explicit(&kcp, sizeof(kcp));
+ break;
+ }
+ case PKEY_FINDCARD: {
+ struct pkey_findcard __user *ufc = (void __user *) arg;
+ struct pkey_findcard kfc;
+
+ if (copy_from_user(&kfc, ufc, sizeof(kfc)))
+ return -EFAULT;
+ rc = cca_findcard(kfc.seckey.seckey,
+ &kfc.cardnr, &kfc.domain, 1);
+ DEBUG_DBG("%s cca_findcard()=%d\n", __func__, rc);
+ if (rc < 0)
+ break;
+ if (copy_to_user(ufc, &kfc, sizeof(kfc)))
+ return -EFAULT;
+ break;
+ }
+ case PKEY_SKEY2PKEY: {
+ struct pkey_skey2pkey __user *usp = (void __user *) arg;
+ struct pkey_skey2pkey ksp;
+
+ if (copy_from_user(&ksp, usp, sizeof(ksp)))
+ return -EFAULT;
+ rc = pkey_skey2pkey(ksp.seckey.seckey, &ksp.protkey);
+ DEBUG_DBG("%s pkey_skey2pkey()=%d\n", __func__, rc);
+ if (rc)
+ break;
+ if (copy_to_user(usp, &ksp, sizeof(ksp)))
+ return -EFAULT;
+ break;
+ }
+ case PKEY_VERIFYKEY: {
+ struct pkey_verifykey __user *uvk = (void __user *) arg;
+ struct pkey_verifykey kvk;
+
+ if (copy_from_user(&kvk, uvk, sizeof(kvk)))
+ return -EFAULT;
+ rc = pkey_verifykey(&kvk.seckey, &kvk.cardnr, &kvk.domain,
+ &kvk.keysize, &kvk.attributes);
+ DEBUG_DBG("%s pkey_verifykey()=%d\n", __func__, rc);
+ if (rc)
+ break;
+ if (copy_to_user(uvk, &kvk, sizeof(kvk)))
+ return -EFAULT;
+ break;
+ }
+ case PKEY_GENPROTK: {
+ struct pkey_genprotk __user *ugp = (void __user *) arg;
+ struct pkey_genprotk kgp;
+
+ if (copy_from_user(&kgp, ugp, sizeof(kgp)))
+ return -EFAULT;
+ rc = pkey_genprotkey(kgp.keytype, &kgp.protkey);
+ DEBUG_DBG("%s pkey_genprotkey()=%d\n", __func__, rc);
+ if (rc)
+ break;
+ if (copy_to_user(ugp, &kgp, sizeof(kgp)))
+ return -EFAULT;
+ break;
+ }
+ case PKEY_VERIFYPROTK: {
+ struct pkey_verifyprotk __user *uvp = (void __user *) arg;
+ struct pkey_verifyprotk kvp;
+
+ if (copy_from_user(&kvp, uvp, sizeof(kvp)))
+ return -EFAULT;
+ rc = pkey_verifyprotkey(&kvp.protkey);
+ DEBUG_DBG("%s pkey_verifyprotkey()=%d\n", __func__, rc);
+ break;
+ }
+ case PKEY_KBLOB2PROTK: {
+ struct pkey_kblob2pkey __user *utp = (void __user *) arg;
+ struct pkey_kblob2pkey ktp;
+ u8 *kkey;
+
+ if (copy_from_user(&ktp, utp, sizeof(ktp)))
+ return -EFAULT;
+ kkey = _copy_key_from_user(ktp.key, ktp.keylen);
+ if (IS_ERR(kkey))
+ return PTR_ERR(kkey);
+ rc = pkey_keyblob2pkey(kkey, ktp.keylen, &ktp.protkey);
+ DEBUG_DBG("%s pkey_keyblob2pkey()=%d\n", __func__, rc);
+ memzero_explicit(kkey, ktp.keylen);
+ kfree(kkey);
+ if (rc)
+ break;
+ if (copy_to_user(utp, &ktp, sizeof(ktp)))
+ return -EFAULT;
+ break;
+ }
+ case PKEY_GENSECK2: {
+ struct pkey_genseck2 __user *ugs = (void __user *) arg;
+ struct pkey_genseck2 kgs;
+ struct pkey_apqn *apqns;
+ size_t klen = KEYBLOBBUFSIZE;
+ u8 *kkey;
+
+ if (copy_from_user(&kgs, ugs, sizeof(kgs)))
+ return -EFAULT;
+ apqns = _copy_apqns_from_user(kgs.apqns, kgs.apqn_entries);
+ if (IS_ERR(apqns))
+ return PTR_ERR(apqns);
+ kkey = kmalloc(klen, GFP_KERNEL);
+ if (!kkey) {
+ kfree(apqns);
+ return -ENOMEM;
+ }
+ rc = pkey_genseckey2(apqns, kgs.apqn_entries,
+ kgs.type, kgs.size, kgs.keygenflags,
+ kkey, &klen);
+ DEBUG_DBG("%s pkey_genseckey2()=%d\n", __func__, rc);
+ kfree(apqns);
+ if (rc) {
+ kfree(kkey);
+ break;
+ }
+ if (kgs.key) {
+ if (kgs.keylen < klen) {
+ kfree(kkey);
+ return -EINVAL;
+ }
+ if (copy_to_user(kgs.key, kkey, klen)) {
+ kfree(kkey);
+ return -EFAULT;
+ }
+ }
+ kgs.keylen = klen;
+ if (copy_to_user(ugs, &kgs, sizeof(kgs)))
+ rc = -EFAULT;
+ kfree(kkey);
+ break;
+ }
+ case PKEY_CLR2SECK2: {
+ struct pkey_clr2seck2 __user *ucs = (void __user *) arg;
+ struct pkey_clr2seck2 kcs;
+ struct pkey_apqn *apqns;
+ size_t klen = KEYBLOBBUFSIZE;
+ u8 *kkey;
+
+ if (copy_from_user(&kcs, ucs, sizeof(kcs)))
+ return -EFAULT;
+ apqns = _copy_apqns_from_user(kcs.apqns, kcs.apqn_entries);
+ if (IS_ERR(apqns))
+ return PTR_ERR(apqns);
+ kkey = kmalloc(klen, GFP_KERNEL);
+ if (!kkey) {
+ kfree(apqns);
+ return -ENOMEM;
+ }
+ rc = pkey_clr2seckey2(apqns, kcs.apqn_entries,
+ kcs.type, kcs.size, kcs.keygenflags,
+ kcs.clrkey.clrkey, kkey, &klen);
+ DEBUG_DBG("%s pkey_clr2seckey2()=%d\n", __func__, rc);
+ kfree(apqns);
+ if (rc) {
+ kfree(kkey);
+ break;
+ }
+ if (kcs.key) {
+ if (kcs.keylen < klen) {
+ kfree(kkey);
+ return -EINVAL;
+ }
+ if (copy_to_user(kcs.key, kkey, klen)) {
+ kfree(kkey);
+ return -EFAULT;
+ }
+ }
+ kcs.keylen = klen;
+ if (copy_to_user(ucs, &kcs, sizeof(kcs)))
+ rc = -EFAULT;
+ memzero_explicit(&kcs, sizeof(kcs));
+ kfree(kkey);
+ break;
+ }
+ case PKEY_VERIFYKEY2: {
+ struct pkey_verifykey2 __user *uvk = (void __user *) arg;
+ struct pkey_verifykey2 kvk;
+ u8 *kkey;
+
+ if (copy_from_user(&kvk, uvk, sizeof(kvk)))
+ return -EFAULT;
+ kkey = _copy_key_from_user(kvk.key, kvk.keylen);
+ if (IS_ERR(kkey))
+ return PTR_ERR(kkey);
+ rc = pkey_verifykey2(kkey, kvk.keylen,
+ &kvk.cardnr, &kvk.domain,
+ &kvk.type, &kvk.size, &kvk.flags);
+ DEBUG_DBG("%s pkey_verifykey2()=%d\n", __func__, rc);
+ kfree(kkey);
+ if (rc)
+ break;
+ if (copy_to_user(uvk, &kvk, sizeof(kvk)))
+ return -EFAULT;
+ break;
+ }
+ case PKEY_KBLOB2PROTK2: {
+ struct pkey_kblob2pkey2 __user *utp = (void __user *) arg;
+ struct pkey_kblob2pkey2 ktp;
+ struct pkey_apqn *apqns = NULL;
+ u8 *kkey;
+
+ if (copy_from_user(&ktp, utp, sizeof(ktp)))
+ return -EFAULT;
+ apqns = _copy_apqns_from_user(ktp.apqns, ktp.apqn_entries);
+ if (IS_ERR(apqns))
+ return PTR_ERR(apqns);
+ kkey = _copy_key_from_user(ktp.key, ktp.keylen);
+ if (IS_ERR(kkey)) {
+ kfree(apqns);
+ return PTR_ERR(kkey);
+ }
+ rc = pkey_keyblob2pkey2(apqns, ktp.apqn_entries,
+ kkey, ktp.keylen, &ktp.protkey);
+ DEBUG_DBG("%s pkey_keyblob2pkey2()=%d\n", __func__, rc);
+ kfree(apqns);
+ memzero_explicit(kkey, ktp.keylen);
+ kfree(kkey);
+ if (rc)
+ break;
+ if (copy_to_user(utp, &ktp, sizeof(ktp)))
+ return -EFAULT;
+ break;
+ }
+ case PKEY_APQNS4K: {
+ struct pkey_apqns4key __user *uak = (void __user *) arg;
+ struct pkey_apqns4key kak;
+ struct pkey_apqn *apqns = NULL;
+ size_t nr_apqns, len;
+ u8 *kkey;
+
+ if (copy_from_user(&kak, uak, sizeof(kak)))
+ return -EFAULT;
+ nr_apqns = kak.apqn_entries;
+ if (nr_apqns) {
+ apqns = kmalloc_array(nr_apqns,
+ sizeof(struct pkey_apqn),
+ GFP_KERNEL);
+ if (!apqns)
+ return -ENOMEM;
+ }
+ kkey = _copy_key_from_user(kak.key, kak.keylen);
+ if (IS_ERR(kkey)) {
+ kfree(apqns);
+ return PTR_ERR(kkey);
+ }
+ rc = pkey_apqns4key(kkey, kak.keylen, kak.flags,
+ apqns, &nr_apqns);
+ DEBUG_DBG("%s pkey_apqns4key()=%d\n", __func__, rc);
+ kfree(kkey);
+ if (rc && rc != -ENOSPC) {
+ kfree(apqns);
+ break;
+ }
+ if (!rc && kak.apqns) {
+ if (nr_apqns > kak.apqn_entries) {
+ kfree(apqns);
+ return -EINVAL;
+ }
+ len = nr_apqns * sizeof(struct pkey_apqn);
+ if (len) {
+ if (copy_to_user(kak.apqns, apqns, len)) {
+ kfree(apqns);
+ return -EFAULT;
+ }
+ }
+ }
+ kak.apqn_entries = nr_apqns;
+ if (copy_to_user(uak, &kak, sizeof(kak)))
+ rc = -EFAULT;
+ kfree(apqns);
+ break;
+ }
+ case PKEY_APQNS4KT: {
+ struct pkey_apqns4keytype __user *uat = (void __user *) arg;
+ struct pkey_apqns4keytype kat;
+ struct pkey_apqn *apqns = NULL;
+ size_t nr_apqns, len;
+
+ if (copy_from_user(&kat, uat, sizeof(kat)))
+ return -EFAULT;
+ nr_apqns = kat.apqn_entries;
+ if (nr_apqns) {
+ apqns = kmalloc_array(nr_apqns,
+ sizeof(struct pkey_apqn),
+ GFP_KERNEL);
+ if (!apqns)
+ return -ENOMEM;
+ }
+ rc = pkey_apqns4keytype(kat.type, kat.cur_mkvp, kat.alt_mkvp,
+ kat.flags, apqns, &nr_apqns);
+ DEBUG_DBG("%s pkey_apqns4keytype()=%d\n", __func__, rc);
+ if (rc && rc != -ENOSPC) {
+ kfree(apqns);
+ break;
+ }
+ if (!rc && kat.apqns) {
+ if (nr_apqns > kat.apqn_entries) {
+ kfree(apqns);
+ return -EINVAL;
+ }
+ len = nr_apqns * sizeof(struct pkey_apqn);
+ if (len) {
+ if (copy_to_user(kat.apqns, apqns, len)) {
+ kfree(apqns);
+ return -EFAULT;
+ }
+ }
+ }
+ kat.apqn_entries = nr_apqns;
+ if (copy_to_user(uat, &kat, sizeof(kat)))
+ rc = -EFAULT;
+ kfree(apqns);
+ break;
+ }
+ case PKEY_KBLOB2PROTK3: {
+ struct pkey_kblob2pkey3 __user *utp = (void __user *) arg;
+ struct pkey_kblob2pkey3 ktp;
+ struct pkey_apqn *apqns = NULL;
+ u32 protkeylen = PROTKEYBLOBBUFSIZE;
+ u8 *kkey, *protkey;
+
+ if (copy_from_user(&ktp, utp, sizeof(ktp)))
+ return -EFAULT;
+ apqns = _copy_apqns_from_user(ktp.apqns, ktp.apqn_entries);
+ if (IS_ERR(apqns))
+ return PTR_ERR(apqns);
+ kkey = _copy_key_from_user(ktp.key, ktp.keylen);
+ if (IS_ERR(kkey)) {
+ kfree(apqns);
+ return PTR_ERR(kkey);
+ }
+ protkey = kmalloc(protkeylen, GFP_KERNEL);
+ if (!protkey) {
+ kfree(apqns);
+ kfree(kkey);
+ return -ENOMEM;
+ }
+ rc = pkey_keyblob2pkey3(apqns, ktp.apqn_entries, kkey,
+ ktp.keylen, &ktp.pkeytype,
+ protkey, &protkeylen);
+ DEBUG_DBG("%s pkey_keyblob2pkey3()=%d\n", __func__, rc);
+ kfree(apqns);
+ memzero_explicit(kkey, ktp.keylen);
+ kfree(kkey);
+ if (rc) {
+ kfree(protkey);
+ break;
+ }
+ if (ktp.pkey && ktp.pkeylen) {
+ if (protkeylen > ktp.pkeylen) {
+ kfree(protkey);
+ return -EINVAL;
+ }
+ if (copy_to_user(ktp.pkey, protkey, protkeylen)) {
+ kfree(protkey);
+ return -EFAULT;
+ }
+ }
+ kfree(protkey);
+ ktp.pkeylen = protkeylen;
+ if (copy_to_user(utp, &ktp, sizeof(ktp)))
+ return -EFAULT;
+ break;
+ }
+ default:
+ /* unknown/unsupported ioctl cmd */
+ return -ENOTTY;
+ }
+
+ return rc;
+}
+
+/*
+ * Sysfs and file io operations
+ */
+
+/*
+ * Sysfs attribute read function for all protected key binary attributes.
+ * The implementation can not deal with partial reads, because a new random
+ * protected key blob is generated with each read. In case of partial reads
+ * (i.e. off != 0 or count < key blob size) -EINVAL is returned.
+ */
+static ssize_t pkey_protkey_aes_attr_read(u32 keytype, bool is_xts, char *buf,
+ loff_t off, size_t count)
+{
+ struct protaeskeytoken protkeytoken;
+ struct pkey_protkey protkey;
+ int rc;
+
+ if (off != 0 || count < sizeof(protkeytoken))
+ return -EINVAL;
+ if (is_xts)
+ if (count < 2 * sizeof(protkeytoken))
+ return -EINVAL;
+
+ memset(&protkeytoken, 0, sizeof(protkeytoken));
+ protkeytoken.type = TOKTYPE_NON_CCA;
+ protkeytoken.version = TOKVER_PROTECTED_KEY;
+ protkeytoken.keytype = keytype;
+
+ rc = pkey_genprotkey(protkeytoken.keytype, &protkey);
+ if (rc)
+ return rc;
+
+ protkeytoken.len = protkey.len;
+ memcpy(&protkeytoken.protkey, &protkey.protkey, protkey.len);
+
+ memcpy(buf, &protkeytoken, sizeof(protkeytoken));
+
+ if (is_xts) {
+ rc = pkey_genprotkey(protkeytoken.keytype, &protkey);
+ if (rc)
+ return rc;
+
+ protkeytoken.len = protkey.len;
+ memcpy(&protkeytoken.protkey, &protkey.protkey, protkey.len);
+
+ memcpy(buf + sizeof(protkeytoken), &protkeytoken,
+ sizeof(protkeytoken));
+
+ return 2 * sizeof(protkeytoken);
+ }
+
+ return sizeof(protkeytoken);
+}
+
+static ssize_t protkey_aes_128_read(struct file *filp,
+ struct kobject *kobj,
+ struct bin_attribute *attr,
+ char *buf, loff_t off,
+ size_t count)
+{
+ return pkey_protkey_aes_attr_read(PKEY_KEYTYPE_AES_128, false, buf,
+ off, count);
+}
+
+static ssize_t protkey_aes_192_read(struct file *filp,
+ struct kobject *kobj,
+ struct bin_attribute *attr,
+ char *buf, loff_t off,
+ size_t count)
+{
+ return pkey_protkey_aes_attr_read(PKEY_KEYTYPE_AES_192, false, buf,
+ off, count);
+}
+
+static ssize_t protkey_aes_256_read(struct file *filp,
+ struct kobject *kobj,
+ struct bin_attribute *attr,
+ char *buf, loff_t off,
+ size_t count)
+{
+ return pkey_protkey_aes_attr_read(PKEY_KEYTYPE_AES_256, false, buf,
+ off, count);
+}
+
+static ssize_t protkey_aes_128_xts_read(struct file *filp,
+ struct kobject *kobj,
+ struct bin_attribute *attr,
+ char *buf, loff_t off,
+ size_t count)
+{
+ return pkey_protkey_aes_attr_read(PKEY_KEYTYPE_AES_128, true, buf,
+ off, count);
+}
+
+static ssize_t protkey_aes_256_xts_read(struct file *filp,
+ struct kobject *kobj,
+ struct bin_attribute *attr,
+ char *buf, loff_t off,
+ size_t count)
+{
+ return pkey_protkey_aes_attr_read(PKEY_KEYTYPE_AES_256, true, buf,
+ off, count);
+}
+
+static BIN_ATTR_RO(protkey_aes_128, sizeof(struct protaeskeytoken));
+static BIN_ATTR_RO(protkey_aes_192, sizeof(struct protaeskeytoken));
+static BIN_ATTR_RO(protkey_aes_256, sizeof(struct protaeskeytoken));
+static BIN_ATTR_RO(protkey_aes_128_xts, 2 * sizeof(struct protaeskeytoken));
+static BIN_ATTR_RO(protkey_aes_256_xts, 2 * sizeof(struct protaeskeytoken));
+
+static struct bin_attribute *protkey_attrs[] = {
+ &bin_attr_protkey_aes_128,
+ &bin_attr_protkey_aes_192,
+ &bin_attr_protkey_aes_256,
+ &bin_attr_protkey_aes_128_xts,
+ &bin_attr_protkey_aes_256_xts,
+ NULL
+};
+
+static struct attribute_group protkey_attr_group = {
+ .name = "protkey",
+ .bin_attrs = protkey_attrs,
+};
+
+/*
+ * Sysfs attribute read function for all secure key ccadata binary attributes.
+ * The implementation can not deal with partial reads, because a new random
+ * protected key blob is generated with each read. In case of partial reads
+ * (i.e. off != 0 or count < key blob size) -EINVAL is returned.
+ */
+static ssize_t pkey_ccadata_aes_attr_read(u32 keytype, bool is_xts, char *buf,
+ loff_t off, size_t count)
+{
+ int rc;
+ struct pkey_seckey *seckey = (struct pkey_seckey *) buf;
+
+ if (off != 0 || count < sizeof(struct secaeskeytoken))
+ return -EINVAL;
+ if (is_xts)
+ if (count < 2 * sizeof(struct secaeskeytoken))
+ return -EINVAL;
+
+ rc = cca_genseckey(-1, -1, keytype, seckey->seckey);
+ if (rc)
+ return rc;
+
+ if (is_xts) {
+ seckey++;
+ rc = cca_genseckey(-1, -1, keytype, seckey->seckey);
+ if (rc)
+ return rc;
+
+ return 2 * sizeof(struct secaeskeytoken);
+ }
+
+ return sizeof(struct secaeskeytoken);
+}
+
+static ssize_t ccadata_aes_128_read(struct file *filp,
+ struct kobject *kobj,
+ struct bin_attribute *attr,
+ char *buf, loff_t off,
+ size_t count)
+{
+ return pkey_ccadata_aes_attr_read(PKEY_KEYTYPE_AES_128, false, buf,
+ off, count);
+}
+
+static ssize_t ccadata_aes_192_read(struct file *filp,
+ struct kobject *kobj,
+ struct bin_attribute *attr,
+ char *buf, loff_t off,
+ size_t count)
+{
+ return pkey_ccadata_aes_attr_read(PKEY_KEYTYPE_AES_192, false, buf,
+ off, count);
+}
+
+static ssize_t ccadata_aes_256_read(struct file *filp,
+ struct kobject *kobj,
+ struct bin_attribute *attr,
+ char *buf, loff_t off,
+ size_t count)
+{
+ return pkey_ccadata_aes_attr_read(PKEY_KEYTYPE_AES_256, false, buf,
+ off, count);
+}
+
+static ssize_t ccadata_aes_128_xts_read(struct file *filp,
+ struct kobject *kobj,
+ struct bin_attribute *attr,
+ char *buf, loff_t off,
+ size_t count)
+{
+ return pkey_ccadata_aes_attr_read(PKEY_KEYTYPE_AES_128, true, buf,
+ off, count);
+}
+
+static ssize_t ccadata_aes_256_xts_read(struct file *filp,
+ struct kobject *kobj,
+ struct bin_attribute *attr,
+ char *buf, loff_t off,
+ size_t count)
+{
+ return pkey_ccadata_aes_attr_read(PKEY_KEYTYPE_AES_256, true, buf,
+ off, count);
+}
+
+static BIN_ATTR_RO(ccadata_aes_128, sizeof(struct secaeskeytoken));
+static BIN_ATTR_RO(ccadata_aes_192, sizeof(struct secaeskeytoken));
+static BIN_ATTR_RO(ccadata_aes_256, sizeof(struct secaeskeytoken));
+static BIN_ATTR_RO(ccadata_aes_128_xts, 2 * sizeof(struct secaeskeytoken));
+static BIN_ATTR_RO(ccadata_aes_256_xts, 2 * sizeof(struct secaeskeytoken));
+
+static struct bin_attribute *ccadata_attrs[] = {
+ &bin_attr_ccadata_aes_128,
+ &bin_attr_ccadata_aes_192,
+ &bin_attr_ccadata_aes_256,
+ &bin_attr_ccadata_aes_128_xts,
+ &bin_attr_ccadata_aes_256_xts,
+ NULL
+};
+
+static struct attribute_group ccadata_attr_group = {
+ .name = "ccadata",
+ .bin_attrs = ccadata_attrs,
+};
+
+#define CCACIPHERTOKENSIZE (sizeof(struct cipherkeytoken) + 80)
+
+/*
+ * Sysfs attribute read function for all secure key ccacipher binary attributes.
+ * The implementation can not deal with partial reads, because a new random
+ * secure key blob is generated with each read. In case of partial reads
+ * (i.e. off != 0 or count < key blob size) -EINVAL is returned.
+ */
+static ssize_t pkey_ccacipher_aes_attr_read(enum pkey_key_size keybits,
+ bool is_xts, char *buf, loff_t off,
+ size_t count)
+{
+ int i, rc, card, dom;
+ u32 nr_apqns, *apqns = NULL;
+ size_t keysize = CCACIPHERTOKENSIZE;
+
+ if (off != 0 || count < CCACIPHERTOKENSIZE)
+ return -EINVAL;
+ if (is_xts)
+ if (count < 2 * CCACIPHERTOKENSIZE)
+ return -EINVAL;
+
+ /* build a list of apqns able to generate an cipher key */
+ rc = cca_findcard2(&apqns, &nr_apqns, 0xFFFF, 0xFFFF,
+ ZCRYPT_CEX6, 0, 0, 0, 0);
+ if (rc)
+ return rc;
+
+ memset(buf, 0, is_xts ? 2 * keysize : keysize);
+
+ /* simple try all apqns from the list */
+ for (i = 0, rc = -ENODEV; i < nr_apqns; i++) {
+ card = apqns[i] >> 16;
+ dom = apqns[i] & 0xFFFF;
+ rc = cca_gencipherkey(card, dom, keybits, 0, buf, &keysize);
+ if (rc == 0)
+ break;
+ }
+ if (rc)
+ return rc;
+
+ if (is_xts) {
+ keysize = CCACIPHERTOKENSIZE;
+ buf += CCACIPHERTOKENSIZE;
+ rc = cca_gencipherkey(card, dom, keybits, 0, buf, &keysize);
+ if (rc == 0)
+ return 2 * CCACIPHERTOKENSIZE;
+ }
+
+ return CCACIPHERTOKENSIZE;
+}
+
+static ssize_t ccacipher_aes_128_read(struct file *filp,
+ struct kobject *kobj,
+ struct bin_attribute *attr,
+ char *buf, loff_t off,
+ size_t count)
+{
+ return pkey_ccacipher_aes_attr_read(PKEY_SIZE_AES_128, false, buf,
+ off, count);
+}
+
+static ssize_t ccacipher_aes_192_read(struct file *filp,
+ struct kobject *kobj,
+ struct bin_attribute *attr,
+ char *buf, loff_t off,
+ size_t count)
+{
+ return pkey_ccacipher_aes_attr_read(PKEY_SIZE_AES_192, false, buf,
+ off, count);
+}
+
+static ssize_t ccacipher_aes_256_read(struct file *filp,
+ struct kobject *kobj,
+ struct bin_attribute *attr,
+ char *buf, loff_t off,
+ size_t count)
+{
+ return pkey_ccacipher_aes_attr_read(PKEY_SIZE_AES_256, false, buf,
+ off, count);
+}
+
+static ssize_t ccacipher_aes_128_xts_read(struct file *filp,
+ struct kobject *kobj,
+ struct bin_attribute *attr,
+ char *buf, loff_t off,
+ size_t count)
+{
+ return pkey_ccacipher_aes_attr_read(PKEY_SIZE_AES_128, true, buf,
+ off, count);
+}
+
+static ssize_t ccacipher_aes_256_xts_read(struct file *filp,
+ struct kobject *kobj,
+ struct bin_attribute *attr,
+ char *buf, loff_t off,
+ size_t count)
+{
+ return pkey_ccacipher_aes_attr_read(PKEY_SIZE_AES_256, true, buf,
+ off, count);
+}
+
+static BIN_ATTR_RO(ccacipher_aes_128, CCACIPHERTOKENSIZE);
+static BIN_ATTR_RO(ccacipher_aes_192, CCACIPHERTOKENSIZE);
+static BIN_ATTR_RO(ccacipher_aes_256, CCACIPHERTOKENSIZE);
+static BIN_ATTR_RO(ccacipher_aes_128_xts, 2 * CCACIPHERTOKENSIZE);
+static BIN_ATTR_RO(ccacipher_aes_256_xts, 2 * CCACIPHERTOKENSIZE);
+
+static struct bin_attribute *ccacipher_attrs[] = {
+ &bin_attr_ccacipher_aes_128,
+ &bin_attr_ccacipher_aes_192,
+ &bin_attr_ccacipher_aes_256,
+ &bin_attr_ccacipher_aes_128_xts,
+ &bin_attr_ccacipher_aes_256_xts,
+ NULL
+};
+
+static struct attribute_group ccacipher_attr_group = {
+ .name = "ccacipher",
+ .bin_attrs = ccacipher_attrs,
+};
+
+/*
+ * Sysfs attribute read function for all ep11 aes key binary attributes.
+ * The implementation can not deal with partial reads, because a new random
+ * secure key blob is generated with each read. In case of partial reads
+ * (i.e. off != 0 or count < key blob size) -EINVAL is returned.
+ * This function and the sysfs attributes using it provide EP11 key blobs
+ * padded to the upper limit of MAXEP11AESKEYBLOBSIZE which is currently
+ * 320 bytes.
+ */
+static ssize_t pkey_ep11_aes_attr_read(enum pkey_key_size keybits,
+ bool is_xts, char *buf, loff_t off,
+ size_t count)
+{
+ int i, rc, card, dom;
+ u32 nr_apqns, *apqns = NULL;
+ size_t keysize = MAXEP11AESKEYBLOBSIZE;
+
+ if (off != 0 || count < MAXEP11AESKEYBLOBSIZE)
+ return -EINVAL;
+ if (is_xts)
+ if (count < 2 * MAXEP11AESKEYBLOBSIZE)
+ return -EINVAL;
+
+ /* build a list of apqns able to generate an cipher key */
+ rc = ep11_findcard2(&apqns, &nr_apqns, 0xFFFF, 0xFFFF,
+ ZCRYPT_CEX7, EP11_API_V, NULL);
+ if (rc)
+ return rc;
+
+ memset(buf, 0, is_xts ? 2 * keysize : keysize);
+
+ /* simple try all apqns from the list */
+ for (i = 0, rc = -ENODEV; i < nr_apqns; i++) {
+ card = apqns[i] >> 16;
+ dom = apqns[i] & 0xFFFF;
+ rc = ep11_genaeskey(card, dom, keybits, 0, buf, &keysize);
+ if (rc == 0)
+ break;
+ }
+ if (rc)
+ return rc;
+
+ if (is_xts) {
+ keysize = MAXEP11AESKEYBLOBSIZE;
+ buf += MAXEP11AESKEYBLOBSIZE;
+ rc = ep11_genaeskey(card, dom, keybits, 0, buf, &keysize);
+ if (rc == 0)
+ return 2 * MAXEP11AESKEYBLOBSIZE;
+ }
+
+ return MAXEP11AESKEYBLOBSIZE;
+}
+
+static ssize_t ep11_aes_128_read(struct file *filp,
+ struct kobject *kobj,
+ struct bin_attribute *attr,
+ char *buf, loff_t off,
+ size_t count)
+{
+ return pkey_ep11_aes_attr_read(PKEY_SIZE_AES_128, false, buf,
+ off, count);
+}
+
+static ssize_t ep11_aes_192_read(struct file *filp,
+ struct kobject *kobj,
+ struct bin_attribute *attr,
+ char *buf, loff_t off,
+ size_t count)
+{
+ return pkey_ep11_aes_attr_read(PKEY_SIZE_AES_192, false, buf,
+ off, count);
+}
+
+static ssize_t ep11_aes_256_read(struct file *filp,
+ struct kobject *kobj,
+ struct bin_attribute *attr,
+ char *buf, loff_t off,
+ size_t count)
+{
+ return pkey_ep11_aes_attr_read(PKEY_SIZE_AES_256, false, buf,
+ off, count);
+}
+
+static ssize_t ep11_aes_128_xts_read(struct file *filp,
+ struct kobject *kobj,
+ struct bin_attribute *attr,
+ char *buf, loff_t off,
+ size_t count)
+{
+ return pkey_ep11_aes_attr_read(PKEY_SIZE_AES_128, true, buf,
+ off, count);
+}
+
+static ssize_t ep11_aes_256_xts_read(struct file *filp,
+ struct kobject *kobj,
+ struct bin_attribute *attr,
+ char *buf, loff_t off,
+ size_t count)
+{
+ return pkey_ep11_aes_attr_read(PKEY_SIZE_AES_256, true, buf,
+ off, count);
+}
+
+static BIN_ATTR_RO(ep11_aes_128, MAXEP11AESKEYBLOBSIZE);
+static BIN_ATTR_RO(ep11_aes_192, MAXEP11AESKEYBLOBSIZE);
+static BIN_ATTR_RO(ep11_aes_256, MAXEP11AESKEYBLOBSIZE);
+static BIN_ATTR_RO(ep11_aes_128_xts, 2 * MAXEP11AESKEYBLOBSIZE);
+static BIN_ATTR_RO(ep11_aes_256_xts, 2 * MAXEP11AESKEYBLOBSIZE);
+
+static struct bin_attribute *ep11_attrs[] = {
+ &bin_attr_ep11_aes_128,
+ &bin_attr_ep11_aes_192,
+ &bin_attr_ep11_aes_256,
+ &bin_attr_ep11_aes_128_xts,
+ &bin_attr_ep11_aes_256_xts,
+ NULL
+};
+
+static struct attribute_group ep11_attr_group = {
+ .name = "ep11",
+ .bin_attrs = ep11_attrs,
+};
+
+static const struct attribute_group *pkey_attr_groups[] = {
+ &protkey_attr_group,
+ &ccadata_attr_group,
+ &ccacipher_attr_group,
+ &ep11_attr_group,
+ NULL,
+};
+
+static const struct file_operations pkey_fops = {
+ .owner = THIS_MODULE,
+ .open = nonseekable_open,
+ .llseek = no_llseek,
+ .unlocked_ioctl = pkey_unlocked_ioctl,
+};
+
+static struct miscdevice pkey_dev = {
+ .name = "pkey",
+ .minor = MISC_DYNAMIC_MINOR,
+ .mode = 0666,
+ .fops = &pkey_fops,
+ .groups = pkey_attr_groups,
+};
+
+/*
+ * Module init
+ */
+static int __init pkey_init(void)
+{
+ cpacf_mask_t func_mask;
+
+ /*
+ * The pckmo instruction should be available - even if we don't
+ * actually invoke it. This instruction comes with MSA 3 which
+ * is also the minimum level for the kmc instructions which
+ * are able to work with protected keys.
+ */
+ if (!cpacf_query(CPACF_PCKMO, &func_mask))
+ return -ENODEV;
+
+ /* check for kmc instructions available */
+ if (!cpacf_query(CPACF_KMC, &func_mask))
+ return -ENODEV;
+ if (!cpacf_test_func(&func_mask, CPACF_KMC_PAES_128) ||
+ !cpacf_test_func(&func_mask, CPACF_KMC_PAES_192) ||
+ !cpacf_test_func(&func_mask, CPACF_KMC_PAES_256))
+ return -ENODEV;
+
+ pkey_debug_init();
+
+ return misc_register(&pkey_dev);
+}
+
+/*
+ * Module exit
+ */
+static void __exit pkey_exit(void)
+{
+ misc_deregister(&pkey_dev);
+ pkey_debug_exit();
+}
+
+module_cpu_feature_match(MSA, pkey_init);
+module_exit(pkey_exit);
diff --git a/drivers/s390/crypto/vfio_ap_drv.c b/drivers/s390/crypto/vfio_ap_drv.c
new file mode 100644
index 000000000..22128eb44
--- /dev/null
+++ b/drivers/s390/crypto/vfio_ap_drv.c
@@ -0,0 +1,215 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * VFIO based AP device driver
+ *
+ * Copyright IBM Corp. 2018
+ *
+ * Author(s): Tony Krowiak <akrowiak@linux.ibm.com>
+ * Pierre Morel <pmorel@linux.ibm.com>
+ */
+
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <asm/facility.h>
+#include "vfio_ap_private.h"
+
+#define VFIO_AP_ROOT_NAME "vfio_ap"
+#define VFIO_AP_DEV_NAME "matrix"
+
+MODULE_AUTHOR("IBM Corporation");
+MODULE_DESCRIPTION("VFIO AP device driver, Copyright IBM Corp. 2018");
+MODULE_LICENSE("GPL v2");
+
+static struct ap_driver vfio_ap_drv;
+
+struct ap_matrix_dev *matrix_dev;
+
+/* Only type 10 adapters (CEX4 and later) are supported
+ * by the AP matrix device driver
+ */
+static struct ap_device_id ap_queue_ids[] = {
+ { .dev_type = AP_DEVICE_TYPE_CEX4,
+ .match_flags = AP_DEVICE_ID_MATCH_QUEUE_TYPE },
+ { .dev_type = AP_DEVICE_TYPE_CEX5,
+ .match_flags = AP_DEVICE_ID_MATCH_QUEUE_TYPE },
+ { .dev_type = AP_DEVICE_TYPE_CEX6,
+ .match_flags = AP_DEVICE_ID_MATCH_QUEUE_TYPE },
+ { .dev_type = AP_DEVICE_TYPE_CEX7,
+ .match_flags = AP_DEVICE_ID_MATCH_QUEUE_TYPE },
+ { /* end of sibling */ },
+};
+
+MODULE_DEVICE_TABLE(vfio_ap, ap_queue_ids);
+
+/**
+ * vfio_ap_queue_dev_probe:
+ *
+ * Allocate a vfio_ap_queue structure and associate it
+ * with the device as driver_data.
+ */
+static int vfio_ap_queue_dev_probe(struct ap_device *apdev)
+{
+ struct vfio_ap_queue *q;
+
+ q = kzalloc(sizeof(*q), GFP_KERNEL);
+ if (!q)
+ return -ENOMEM;
+ dev_set_drvdata(&apdev->device, q);
+ q->apqn = to_ap_queue(&apdev->device)->qid;
+ q->saved_isc = VFIO_AP_ISC_INVALID;
+ return 0;
+}
+
+/**
+ * vfio_ap_queue_dev_remove:
+ *
+ * Takes the matrix lock to avoid actions on this device while removing
+ * Free the associated vfio_ap_queue structure
+ */
+static void vfio_ap_queue_dev_remove(struct ap_device *apdev)
+{
+ struct vfio_ap_queue *q;
+
+ mutex_lock(&matrix_dev->lock);
+ q = dev_get_drvdata(&apdev->device);
+ vfio_ap_mdev_reset_queue(q, 1);
+ dev_set_drvdata(&apdev->device, NULL);
+ kfree(q);
+ mutex_unlock(&matrix_dev->lock);
+}
+
+static void vfio_ap_matrix_dev_release(struct device *dev)
+{
+ struct ap_matrix_dev *matrix_dev;
+
+ matrix_dev = container_of(dev, struct ap_matrix_dev, device);
+ kfree(matrix_dev);
+}
+
+static int matrix_bus_match(struct device *dev, struct device_driver *drv)
+{
+ return 1;
+}
+
+static struct bus_type matrix_bus = {
+ .name = "matrix",
+ .match = &matrix_bus_match,
+};
+
+static struct device_driver matrix_driver = {
+ .name = "vfio_ap",
+ .bus = &matrix_bus,
+ .suppress_bind_attrs = true,
+};
+
+static int vfio_ap_matrix_dev_create(void)
+{
+ int ret;
+ struct device *root_device;
+
+ root_device = root_device_register(VFIO_AP_ROOT_NAME);
+ if (IS_ERR(root_device))
+ return PTR_ERR(root_device);
+
+ ret = bus_register(&matrix_bus);
+ if (ret)
+ goto bus_register_err;
+
+ matrix_dev = kzalloc(sizeof(*matrix_dev), GFP_KERNEL);
+ if (!matrix_dev) {
+ ret = -ENOMEM;
+ goto matrix_alloc_err;
+ }
+
+ /* Fill in config info via PQAP(QCI), if available */
+ if (test_facility(12)) {
+ ret = ap_qci(&matrix_dev->info);
+ if (ret)
+ goto matrix_alloc_err;
+ }
+
+ mutex_init(&matrix_dev->lock);
+ INIT_LIST_HEAD(&matrix_dev->mdev_list);
+
+ dev_set_name(&matrix_dev->device, "%s", VFIO_AP_DEV_NAME);
+ matrix_dev->device.parent = root_device;
+ matrix_dev->device.bus = &matrix_bus;
+ matrix_dev->device.release = vfio_ap_matrix_dev_release;
+ matrix_dev->vfio_ap_drv = &vfio_ap_drv;
+
+ ret = device_register(&matrix_dev->device);
+ if (ret)
+ goto matrix_reg_err;
+
+ ret = driver_register(&matrix_driver);
+ if (ret)
+ goto matrix_drv_err;
+
+ return 0;
+
+matrix_drv_err:
+ device_unregister(&matrix_dev->device);
+matrix_reg_err:
+ put_device(&matrix_dev->device);
+matrix_alloc_err:
+ bus_unregister(&matrix_bus);
+bus_register_err:
+ root_device_unregister(root_device);
+ return ret;
+}
+
+static void vfio_ap_matrix_dev_destroy(void)
+{
+ struct device *root_device = matrix_dev->device.parent;
+
+ driver_unregister(&matrix_driver);
+ device_unregister(&matrix_dev->device);
+ bus_unregister(&matrix_bus);
+ root_device_unregister(root_device);
+}
+
+static int __init vfio_ap_init(void)
+{
+ int ret;
+
+ /* If there are no AP instructions, there is nothing to pass through. */
+ if (!ap_instructions_available())
+ return -ENODEV;
+
+ ret = vfio_ap_matrix_dev_create();
+ if (ret)
+ return ret;
+
+ memset(&vfio_ap_drv, 0, sizeof(vfio_ap_drv));
+ vfio_ap_drv.probe = vfio_ap_queue_dev_probe;
+ vfio_ap_drv.remove = vfio_ap_queue_dev_remove;
+ vfio_ap_drv.ids = ap_queue_ids;
+
+ ret = ap_driver_register(&vfio_ap_drv, THIS_MODULE, VFIO_AP_DRV_NAME);
+ if (ret) {
+ vfio_ap_matrix_dev_destroy();
+ return ret;
+ }
+
+ ret = vfio_ap_mdev_register();
+ if (ret) {
+ ap_driver_unregister(&vfio_ap_drv);
+ vfio_ap_matrix_dev_destroy();
+
+ return ret;
+ }
+
+ return 0;
+}
+
+static void __exit vfio_ap_exit(void)
+{
+ vfio_ap_mdev_unregister();
+ ap_driver_unregister(&vfio_ap_drv);
+ vfio_ap_matrix_dev_destroy();
+}
+
+module_init(vfio_ap_init);
+module_exit(vfio_ap_exit);
diff --git a/drivers/s390/crypto/vfio_ap_ops.c b/drivers/s390/crypto/vfio_ap_ops.c
new file mode 100644
index 000000000..72eb8f984
--- /dev/null
+++ b/drivers/s390/crypto/vfio_ap_ops.c
@@ -0,0 +1,1328 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Adjunct processor matrix VFIO device driver callbacks.
+ *
+ * Copyright IBM Corp. 2018
+ *
+ * Author(s): Tony Krowiak <akrowiak@linux.ibm.com>
+ * Halil Pasic <pasic@linux.ibm.com>
+ * Pierre Morel <pmorel@linux.ibm.com>
+ */
+#include <linux/string.h>
+#include <linux/vfio.h>
+#include <linux/device.h>
+#include <linux/list.h>
+#include <linux/ctype.h>
+#include <linux/bitops.h>
+#include <linux/kvm_host.h>
+#include <linux/module.h>
+#include <asm/kvm.h>
+#include <asm/zcrypt.h>
+
+#include "vfio_ap_private.h"
+
+#define VFIO_AP_MDEV_TYPE_HWVIRT "passthrough"
+#define VFIO_AP_MDEV_NAME_HWVIRT "VFIO AP Passthrough Device"
+
+static int vfio_ap_mdev_reset_queues(struct mdev_device *mdev);
+static struct vfio_ap_queue *vfio_ap_find_queue(int apqn);
+
+static int match_apqn(struct device *dev, const void *data)
+{
+ struct vfio_ap_queue *q = dev_get_drvdata(dev);
+
+ return (q->apqn == *(int *)(data)) ? 1 : 0;
+}
+
+/**
+ * vfio_ap_get_queue: Retrieve a queue with a specific APQN from a list
+ * @matrix_mdev: the associated mediated matrix
+ * @apqn: The queue APQN
+ *
+ * Retrieve a queue with a specific APQN from the list of the
+ * devices of the vfio_ap_drv.
+ * Verify that the APID and the APQI are set in the matrix.
+ *
+ * Returns the pointer to the associated vfio_ap_queue
+ */
+static struct vfio_ap_queue *vfio_ap_get_queue(
+ struct ap_matrix_mdev *matrix_mdev,
+ int apqn)
+{
+ struct vfio_ap_queue *q;
+
+ if (!test_bit_inv(AP_QID_CARD(apqn), matrix_mdev->matrix.apm))
+ return NULL;
+ if (!test_bit_inv(AP_QID_QUEUE(apqn), matrix_mdev->matrix.aqm))
+ return NULL;
+
+ q = vfio_ap_find_queue(apqn);
+ if (q)
+ q->matrix_mdev = matrix_mdev;
+
+ return q;
+}
+
+/**
+ * vfio_ap_wait_for_irqclear
+ * @apqn: The AP Queue number
+ *
+ * Checks the IRQ bit for the status of this APQN using ap_tapq.
+ * Returns if the ap_tapq function succeeded and the bit is clear.
+ * Returns if ap_tapq function failed with invalid, deconfigured or
+ * checkstopped AP.
+ * Otherwise retries up to 5 times after waiting 20ms.
+ *
+ */
+static void vfio_ap_wait_for_irqclear(int apqn)
+{
+ struct ap_queue_status status;
+ int retry = 5;
+
+ do {
+ status = ap_tapq(apqn, NULL);
+ switch (status.response_code) {
+ case AP_RESPONSE_NORMAL:
+ case AP_RESPONSE_RESET_IN_PROGRESS:
+ if (!status.irq_enabled)
+ return;
+ fallthrough;
+ case AP_RESPONSE_BUSY:
+ msleep(20);
+ break;
+ case AP_RESPONSE_Q_NOT_AVAIL:
+ case AP_RESPONSE_DECONFIGURED:
+ case AP_RESPONSE_CHECKSTOPPED:
+ default:
+ WARN_ONCE(1, "%s: tapq rc %02x: %04x\n", __func__,
+ status.response_code, apqn);
+ return;
+ }
+ } while (--retry);
+
+ WARN_ONCE(1, "%s: tapq rc %02x: %04x could not clear IR bit\n",
+ __func__, status.response_code, apqn);
+}
+
+/**
+ * vfio_ap_free_aqic_resources
+ * @q: The vfio_ap_queue
+ *
+ * Unregisters the ISC in the GIB when the saved ISC not invalid.
+ * Unpin the guest's page holding the NIB when it exist.
+ * Reset the saved_pfn and saved_isc to invalid values.
+ *
+ */
+static void vfio_ap_free_aqic_resources(struct vfio_ap_queue *q)
+{
+ if (!q)
+ return;
+ if (q->saved_isc != VFIO_AP_ISC_INVALID &&
+ !WARN_ON(!(q->matrix_mdev && q->matrix_mdev->kvm))) {
+ kvm_s390_gisc_unregister(q->matrix_mdev->kvm, q->saved_isc);
+ q->saved_isc = VFIO_AP_ISC_INVALID;
+ }
+ if (q->saved_pfn && !WARN_ON(!q->matrix_mdev)) {
+ vfio_unpin_pages(mdev_dev(q->matrix_mdev->mdev),
+ &q->saved_pfn, 1);
+ q->saved_pfn = 0;
+ }
+}
+
+/**
+ * vfio_ap_irq_disable
+ * @q: The vfio_ap_queue
+ *
+ * Uses ap_aqic to disable the interruption and in case of success, reset
+ * in progress or IRQ disable command already proceeded: calls
+ * vfio_ap_wait_for_irqclear() to check for the IRQ bit to be clear
+ * and calls vfio_ap_free_aqic_resources() to free the resources associated
+ * with the AP interrupt handling.
+ *
+ * In the case the AP is busy, or a reset is in progress,
+ * retries after 20ms, up to 5 times.
+ *
+ * Returns if ap_aqic function failed with invalid, deconfigured or
+ * checkstopped AP.
+ */
+static struct ap_queue_status vfio_ap_irq_disable(struct vfio_ap_queue *q)
+{
+ struct ap_qirq_ctrl aqic_gisa = {};
+ struct ap_queue_status status;
+ int retries = 5;
+
+ do {
+ status = ap_aqic(q->apqn, aqic_gisa, NULL);
+ switch (status.response_code) {
+ case AP_RESPONSE_OTHERWISE_CHANGED:
+ case AP_RESPONSE_NORMAL:
+ vfio_ap_wait_for_irqclear(q->apqn);
+ goto end_free;
+ case AP_RESPONSE_RESET_IN_PROGRESS:
+ case AP_RESPONSE_BUSY:
+ msleep(20);
+ break;
+ case AP_RESPONSE_Q_NOT_AVAIL:
+ case AP_RESPONSE_DECONFIGURED:
+ case AP_RESPONSE_CHECKSTOPPED:
+ case AP_RESPONSE_INVALID_ADDRESS:
+ default:
+ /* All cases in default means AP not operational */
+ WARN_ONCE(1, "%s: ap_aqic status %d\n", __func__,
+ status.response_code);
+ goto end_free;
+ }
+ } while (retries--);
+
+ WARN_ONCE(1, "%s: ap_aqic status %d\n", __func__,
+ status.response_code);
+end_free:
+ vfio_ap_free_aqic_resources(q);
+ q->matrix_mdev = NULL;
+ return status;
+}
+
+/**
+ * vfio_ap_setirq: Enable Interruption for a APQN
+ *
+ * @dev: the device associated with the ap_queue
+ * @q: the vfio_ap_queue holding AQIC parameters
+ *
+ * Pin the NIB saved in *q
+ * Register the guest ISC to GIB interface and retrieve the
+ * host ISC to issue the host side PQAP/AQIC
+ *
+ * Response.status may be set to AP_RESPONSE_INVALID_ADDRESS in case the
+ * vfio_pin_pages failed.
+ *
+ * Otherwise return the ap_queue_status returned by the ap_aqic(),
+ * all retry handling will be done by the guest.
+ */
+static struct ap_queue_status vfio_ap_irq_enable(struct vfio_ap_queue *q,
+ int isc,
+ unsigned long nib)
+{
+ struct ap_qirq_ctrl aqic_gisa = {};
+ struct ap_queue_status status = {};
+ struct kvm_s390_gisa *gisa;
+ struct kvm *kvm;
+ unsigned long h_nib, g_pfn, h_pfn;
+ int ret;
+
+ g_pfn = nib >> PAGE_SHIFT;
+ ret = vfio_pin_pages(mdev_dev(q->matrix_mdev->mdev), &g_pfn, 1,
+ IOMMU_READ | IOMMU_WRITE, &h_pfn);
+ switch (ret) {
+ case 1:
+ break;
+ default:
+ status.response_code = AP_RESPONSE_INVALID_ADDRESS;
+ return status;
+ }
+
+ kvm = q->matrix_mdev->kvm;
+ gisa = kvm->arch.gisa_int.origin;
+
+ h_nib = (h_pfn << PAGE_SHIFT) | (nib & ~PAGE_MASK);
+ aqic_gisa.gisc = isc;
+ aqic_gisa.isc = kvm_s390_gisc_register(kvm, isc);
+ aqic_gisa.ir = 1;
+ aqic_gisa.gisa = (uint64_t)gisa >> 4;
+
+ status = ap_aqic(q->apqn, aqic_gisa, (void *)h_nib);
+ switch (status.response_code) {
+ case AP_RESPONSE_NORMAL:
+ /* See if we did clear older IRQ configuration */
+ vfio_ap_free_aqic_resources(q);
+ q->saved_pfn = g_pfn;
+ q->saved_isc = isc;
+ break;
+ case AP_RESPONSE_OTHERWISE_CHANGED:
+ /* We could not modify IRQ setings: clear new configuration */
+ vfio_unpin_pages(mdev_dev(q->matrix_mdev->mdev), &g_pfn, 1);
+ kvm_s390_gisc_unregister(kvm, isc);
+ break;
+ default:
+ pr_warn("%s: apqn %04x: response: %02x\n", __func__, q->apqn,
+ status.response_code);
+ vfio_ap_irq_disable(q);
+ break;
+ }
+
+ return status;
+}
+
+/**
+ * handle_pqap: PQAP instruction callback
+ *
+ * @vcpu: The vcpu on which we received the PQAP instruction
+ *
+ * Get the general register contents to initialize internal variables.
+ * REG[0]: APQN
+ * REG[1]: IR and ISC
+ * REG[2]: NIB
+ *
+ * Response.status may be set to following Response Code:
+ * - AP_RESPONSE_Q_NOT_AVAIL: if the queue is not available
+ * - AP_RESPONSE_DECONFIGURED: if the queue is not configured
+ * - AP_RESPONSE_NORMAL (0) : in case of successs
+ * Check vfio_ap_setirq() and vfio_ap_clrirq() for other possible RC.
+ * We take the matrix_dev lock to ensure serialization on queues and
+ * mediated device access.
+ *
+ * Return 0 if we could handle the request inside KVM.
+ * otherwise, returns -EOPNOTSUPP to let QEMU handle the fault.
+ */
+static int handle_pqap(struct kvm_vcpu *vcpu)
+{
+ uint64_t status;
+ uint16_t apqn;
+ struct vfio_ap_queue *q;
+ struct ap_queue_status qstatus = {
+ .response_code = AP_RESPONSE_Q_NOT_AVAIL, };
+ struct ap_matrix_mdev *matrix_mdev;
+
+ /* If we do not use the AIV facility just go to userland */
+ if (!(vcpu->arch.sie_block->eca & ECA_AIV))
+ return -EOPNOTSUPP;
+
+ apqn = vcpu->run->s.regs.gprs[0] & 0xffff;
+ mutex_lock(&matrix_dev->lock);
+
+ if (!vcpu->kvm->arch.crypto.pqap_hook)
+ goto out_unlock;
+ matrix_mdev = container_of(vcpu->kvm->arch.crypto.pqap_hook,
+ struct ap_matrix_mdev, pqap_hook);
+
+ q = vfio_ap_get_queue(matrix_mdev, apqn);
+ if (!q)
+ goto out_unlock;
+
+ status = vcpu->run->s.regs.gprs[1];
+
+ /* If IR bit(16) is set we enable the interrupt */
+ if ((status >> (63 - 16)) & 0x01)
+ qstatus = vfio_ap_irq_enable(q, status & 0x07,
+ vcpu->run->s.regs.gprs[2]);
+ else
+ qstatus = vfio_ap_irq_disable(q);
+
+out_unlock:
+ memcpy(&vcpu->run->s.regs.gprs[1], &qstatus, sizeof(qstatus));
+ vcpu->run->s.regs.gprs[1] >>= 32;
+ mutex_unlock(&matrix_dev->lock);
+ return 0;
+}
+
+static void vfio_ap_matrix_init(struct ap_config_info *info,
+ struct ap_matrix *matrix)
+{
+ matrix->apm_max = info->apxa ? info->Na : 63;
+ matrix->aqm_max = info->apxa ? info->Nd : 15;
+ matrix->adm_max = info->apxa ? info->Nd : 15;
+}
+
+static int vfio_ap_mdev_create(struct kobject *kobj, struct mdev_device *mdev)
+{
+ struct ap_matrix_mdev *matrix_mdev;
+
+ if ((atomic_dec_if_positive(&matrix_dev->available_instances) < 0))
+ return -EPERM;
+
+ matrix_mdev = kzalloc(sizeof(*matrix_mdev), GFP_KERNEL);
+ if (!matrix_mdev) {
+ atomic_inc(&matrix_dev->available_instances);
+ return -ENOMEM;
+ }
+
+ matrix_mdev->mdev = mdev;
+ vfio_ap_matrix_init(&matrix_dev->info, &matrix_mdev->matrix);
+ mdev_set_drvdata(mdev, matrix_mdev);
+ matrix_mdev->pqap_hook.hook = handle_pqap;
+ matrix_mdev->pqap_hook.owner = THIS_MODULE;
+ mutex_lock(&matrix_dev->lock);
+ list_add(&matrix_mdev->node, &matrix_dev->mdev_list);
+ mutex_unlock(&matrix_dev->lock);
+
+ return 0;
+}
+
+static int vfio_ap_mdev_remove(struct mdev_device *mdev)
+{
+ struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev);
+
+ if (matrix_mdev->kvm)
+ return -EBUSY;
+
+ mutex_lock(&matrix_dev->lock);
+ vfio_ap_mdev_reset_queues(mdev);
+ list_del(&matrix_mdev->node);
+ mutex_unlock(&matrix_dev->lock);
+
+ kfree(matrix_mdev);
+ mdev_set_drvdata(mdev, NULL);
+ atomic_inc(&matrix_dev->available_instances);
+
+ return 0;
+}
+
+static ssize_t name_show(struct kobject *kobj, struct device *dev, char *buf)
+{
+ return sprintf(buf, "%s\n", VFIO_AP_MDEV_NAME_HWVIRT);
+}
+
+static MDEV_TYPE_ATTR_RO(name);
+
+static ssize_t available_instances_show(struct kobject *kobj,
+ struct device *dev, char *buf)
+{
+ return sprintf(buf, "%d\n",
+ atomic_read(&matrix_dev->available_instances));
+}
+
+static MDEV_TYPE_ATTR_RO(available_instances);
+
+static ssize_t device_api_show(struct kobject *kobj, struct device *dev,
+ char *buf)
+{
+ return sprintf(buf, "%s\n", VFIO_DEVICE_API_AP_STRING);
+}
+
+static MDEV_TYPE_ATTR_RO(device_api);
+
+static struct attribute *vfio_ap_mdev_type_attrs[] = {
+ &mdev_type_attr_name.attr,
+ &mdev_type_attr_device_api.attr,
+ &mdev_type_attr_available_instances.attr,
+ NULL,
+};
+
+static struct attribute_group vfio_ap_mdev_hwvirt_type_group = {
+ .name = VFIO_AP_MDEV_TYPE_HWVIRT,
+ .attrs = vfio_ap_mdev_type_attrs,
+};
+
+static struct attribute_group *vfio_ap_mdev_type_groups[] = {
+ &vfio_ap_mdev_hwvirt_type_group,
+ NULL,
+};
+
+struct vfio_ap_queue_reserved {
+ unsigned long *apid;
+ unsigned long *apqi;
+ bool reserved;
+};
+
+/**
+ * vfio_ap_has_queue
+ *
+ * @dev: an AP queue device
+ * @data: a struct vfio_ap_queue_reserved reference
+ *
+ * Flags whether the AP queue device (@dev) has a queue ID containing the APQN,
+ * apid or apqi specified in @data:
+ *
+ * - If @data contains both an apid and apqi value, then @data will be flagged
+ * as reserved if the APID and APQI fields for the AP queue device matches
+ *
+ * - If @data contains only an apid value, @data will be flagged as
+ * reserved if the APID field in the AP queue device matches
+ *
+ * - If @data contains only an apqi value, @data will be flagged as
+ * reserved if the APQI field in the AP queue device matches
+ *
+ * Returns 0 to indicate the input to function succeeded. Returns -EINVAL if
+ * @data does not contain either an apid or apqi.
+ */
+static int vfio_ap_has_queue(struct device *dev, void *data)
+{
+ struct vfio_ap_queue_reserved *qres = data;
+ struct ap_queue *ap_queue = to_ap_queue(dev);
+ ap_qid_t qid;
+ unsigned long id;
+
+ if (qres->apid && qres->apqi) {
+ qid = AP_MKQID(*qres->apid, *qres->apqi);
+ if (qid == ap_queue->qid)
+ qres->reserved = true;
+ } else if (qres->apid && !qres->apqi) {
+ id = AP_QID_CARD(ap_queue->qid);
+ if (id == *qres->apid)
+ qres->reserved = true;
+ } else if (!qres->apid && qres->apqi) {
+ id = AP_QID_QUEUE(ap_queue->qid);
+ if (id == *qres->apqi)
+ qres->reserved = true;
+ } else {
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * vfio_ap_verify_queue_reserved
+ *
+ * @matrix_dev: a mediated matrix device
+ * @apid: an AP adapter ID
+ * @apqi: an AP queue index
+ *
+ * Verifies that the AP queue with @apid/@apqi is reserved by the VFIO AP device
+ * driver according to the following rules:
+ *
+ * - If both @apid and @apqi are not NULL, then there must be an AP queue
+ * device bound to the vfio_ap driver with the APQN identified by @apid and
+ * @apqi
+ *
+ * - If only @apid is not NULL, then there must be an AP queue device bound
+ * to the vfio_ap driver with an APQN containing @apid
+ *
+ * - If only @apqi is not NULL, then there must be an AP queue device bound
+ * to the vfio_ap driver with an APQN containing @apqi
+ *
+ * Returns 0 if the AP queue is reserved; otherwise, returns -EADDRNOTAVAIL.
+ */
+static int vfio_ap_verify_queue_reserved(unsigned long *apid,
+ unsigned long *apqi)
+{
+ int ret;
+ struct vfio_ap_queue_reserved qres;
+
+ qres.apid = apid;
+ qres.apqi = apqi;
+ qres.reserved = false;
+
+ ret = driver_for_each_device(&matrix_dev->vfio_ap_drv->driver, NULL,
+ &qres, vfio_ap_has_queue);
+ if (ret)
+ return ret;
+
+ if (qres.reserved)
+ return 0;
+
+ return -EADDRNOTAVAIL;
+}
+
+static int
+vfio_ap_mdev_verify_queues_reserved_for_apid(struct ap_matrix_mdev *matrix_mdev,
+ unsigned long apid)
+{
+ int ret;
+ unsigned long apqi;
+ unsigned long nbits = matrix_mdev->matrix.aqm_max + 1;
+
+ if (find_first_bit_inv(matrix_mdev->matrix.aqm, nbits) >= nbits)
+ return vfio_ap_verify_queue_reserved(&apid, NULL);
+
+ for_each_set_bit_inv(apqi, matrix_mdev->matrix.aqm, nbits) {
+ ret = vfio_ap_verify_queue_reserved(&apid, &apqi);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+/**
+ * vfio_ap_mdev_verify_no_sharing
+ *
+ * Verifies that the APQNs derived from the cross product of the AP adapter IDs
+ * and AP queue indexes comprising the AP matrix are not configured for another
+ * mediated device. AP queue sharing is not allowed.
+ *
+ * @matrix_mdev: the mediated matrix device
+ *
+ * Returns 0 if the APQNs are not shared, otherwise; returns -EADDRINUSE.
+ */
+static int vfio_ap_mdev_verify_no_sharing(struct ap_matrix_mdev *matrix_mdev)
+{
+ struct ap_matrix_mdev *lstdev;
+ DECLARE_BITMAP(apm, AP_DEVICES);
+ DECLARE_BITMAP(aqm, AP_DOMAINS);
+
+ list_for_each_entry(lstdev, &matrix_dev->mdev_list, node) {
+ if (matrix_mdev == lstdev)
+ continue;
+
+ memset(apm, 0, sizeof(apm));
+ memset(aqm, 0, sizeof(aqm));
+
+ /*
+ * We work on full longs, as we can only exclude the leftover
+ * bits in non-inverse order. The leftover is all zeros.
+ */
+ if (!bitmap_and(apm, matrix_mdev->matrix.apm,
+ lstdev->matrix.apm, AP_DEVICES))
+ continue;
+
+ if (!bitmap_and(aqm, matrix_mdev->matrix.aqm,
+ lstdev->matrix.aqm, AP_DOMAINS))
+ continue;
+
+ return -EADDRINUSE;
+ }
+
+ return 0;
+}
+
+/**
+ * assign_adapter_store
+ *
+ * @dev: the matrix device
+ * @attr: the mediated matrix device's assign_adapter attribute
+ * @buf: a buffer containing the AP adapter number (APID) to
+ * be assigned
+ * @count: the number of bytes in @buf
+ *
+ * Parses the APID from @buf and sets the corresponding bit in the mediated
+ * matrix device's APM.
+ *
+ * Returns the number of bytes processed if the APID is valid; otherwise,
+ * returns one of the following errors:
+ *
+ * 1. -EINVAL
+ * The APID is not a valid number
+ *
+ * 2. -ENODEV
+ * The APID exceeds the maximum value configured for the system
+ *
+ * 3. -EADDRNOTAVAIL
+ * An APQN derived from the cross product of the APID being assigned
+ * and the APQIs previously assigned is not bound to the vfio_ap device
+ * driver; or, if no APQIs have yet been assigned, the APID is not
+ * contained in an APQN bound to the vfio_ap device driver.
+ *
+ * 4. -EADDRINUSE
+ * An APQN derived from the cross product of the APID being assigned
+ * and the APQIs previously assigned is being used by another mediated
+ * matrix device
+ */
+static ssize_t assign_adapter_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ int ret;
+ unsigned long apid;
+ struct mdev_device *mdev = mdev_from_dev(dev);
+ struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev);
+
+ /* If the guest is running, disallow assignment of adapter */
+ if (matrix_mdev->kvm)
+ return -EBUSY;
+
+ ret = kstrtoul(buf, 0, &apid);
+ if (ret)
+ return ret;
+
+ if (apid > matrix_mdev->matrix.apm_max)
+ return -ENODEV;
+
+ /*
+ * Set the bit in the AP mask (APM) corresponding to the AP adapter
+ * number (APID). The bits in the mask, from most significant to least
+ * significant bit, correspond to APIDs 0-255.
+ */
+ mutex_lock(&matrix_dev->lock);
+
+ ret = vfio_ap_mdev_verify_queues_reserved_for_apid(matrix_mdev, apid);
+ if (ret)
+ goto done;
+
+ set_bit_inv(apid, matrix_mdev->matrix.apm);
+
+ ret = vfio_ap_mdev_verify_no_sharing(matrix_mdev);
+ if (ret)
+ goto share_err;
+
+ ret = count;
+ goto done;
+
+share_err:
+ clear_bit_inv(apid, matrix_mdev->matrix.apm);
+done:
+ mutex_unlock(&matrix_dev->lock);
+
+ return ret;
+}
+static DEVICE_ATTR_WO(assign_adapter);
+
+/**
+ * unassign_adapter_store
+ *
+ * @dev: the matrix device
+ * @attr: the mediated matrix device's unassign_adapter attribute
+ * @buf: a buffer containing the adapter number (APID) to be unassigned
+ * @count: the number of bytes in @buf
+ *
+ * Parses the APID from @buf and clears the corresponding bit in the mediated
+ * matrix device's APM.
+ *
+ * Returns the number of bytes processed if the APID is valid; otherwise,
+ * returns one of the following errors:
+ * -EINVAL if the APID is not a number
+ * -ENODEV if the APID it exceeds the maximum value configured for the
+ * system
+ */
+static ssize_t unassign_adapter_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ int ret;
+ unsigned long apid;
+ struct mdev_device *mdev = mdev_from_dev(dev);
+ struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev);
+
+ /* If the guest is running, disallow un-assignment of adapter */
+ if (matrix_mdev->kvm)
+ return -EBUSY;
+
+ ret = kstrtoul(buf, 0, &apid);
+ if (ret)
+ return ret;
+
+ if (apid > matrix_mdev->matrix.apm_max)
+ return -ENODEV;
+
+ mutex_lock(&matrix_dev->lock);
+ clear_bit_inv((unsigned long)apid, matrix_mdev->matrix.apm);
+ mutex_unlock(&matrix_dev->lock);
+
+ return count;
+}
+static DEVICE_ATTR_WO(unassign_adapter);
+
+static int
+vfio_ap_mdev_verify_queues_reserved_for_apqi(struct ap_matrix_mdev *matrix_mdev,
+ unsigned long apqi)
+{
+ int ret;
+ unsigned long apid;
+ unsigned long nbits = matrix_mdev->matrix.apm_max + 1;
+
+ if (find_first_bit_inv(matrix_mdev->matrix.apm, nbits) >= nbits)
+ return vfio_ap_verify_queue_reserved(NULL, &apqi);
+
+ for_each_set_bit_inv(apid, matrix_mdev->matrix.apm, nbits) {
+ ret = vfio_ap_verify_queue_reserved(&apid, &apqi);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+/**
+ * assign_domain_store
+ *
+ * @dev: the matrix device
+ * @attr: the mediated matrix device's assign_domain attribute
+ * @buf: a buffer containing the AP queue index (APQI) of the domain to
+ * be assigned
+ * @count: the number of bytes in @buf
+ *
+ * Parses the APQI from @buf and sets the corresponding bit in the mediated
+ * matrix device's AQM.
+ *
+ * Returns the number of bytes processed if the APQI is valid; otherwise returns
+ * one of the following errors:
+ *
+ * 1. -EINVAL
+ * The APQI is not a valid number
+ *
+ * 2. -ENODEV
+ * The APQI exceeds the maximum value configured for the system
+ *
+ * 3. -EADDRNOTAVAIL
+ * An APQN derived from the cross product of the APQI being assigned
+ * and the APIDs previously assigned is not bound to the vfio_ap device
+ * driver; or, if no APIDs have yet been assigned, the APQI is not
+ * contained in an APQN bound to the vfio_ap device driver.
+ *
+ * 4. -EADDRINUSE
+ * An APQN derived from the cross product of the APQI being assigned
+ * and the APIDs previously assigned is being used by another mediated
+ * matrix device
+ */
+static ssize_t assign_domain_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ int ret;
+ unsigned long apqi;
+ struct mdev_device *mdev = mdev_from_dev(dev);
+ struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev);
+ unsigned long max_apqi = matrix_mdev->matrix.aqm_max;
+
+ /* If the guest is running, disallow assignment of domain */
+ if (matrix_mdev->kvm)
+ return -EBUSY;
+
+ ret = kstrtoul(buf, 0, &apqi);
+ if (ret)
+ return ret;
+ if (apqi > max_apqi)
+ return -ENODEV;
+
+ mutex_lock(&matrix_dev->lock);
+
+ ret = vfio_ap_mdev_verify_queues_reserved_for_apqi(matrix_mdev, apqi);
+ if (ret)
+ goto done;
+
+ set_bit_inv(apqi, matrix_mdev->matrix.aqm);
+
+ ret = vfio_ap_mdev_verify_no_sharing(matrix_mdev);
+ if (ret)
+ goto share_err;
+
+ ret = count;
+ goto done;
+
+share_err:
+ clear_bit_inv(apqi, matrix_mdev->matrix.aqm);
+done:
+ mutex_unlock(&matrix_dev->lock);
+
+ return ret;
+}
+static DEVICE_ATTR_WO(assign_domain);
+
+
+/**
+ * unassign_domain_store
+ *
+ * @dev: the matrix device
+ * @attr: the mediated matrix device's unassign_domain attribute
+ * @buf: a buffer containing the AP queue index (APQI) of the domain to
+ * be unassigned
+ * @count: the number of bytes in @buf
+ *
+ * Parses the APQI from @buf and clears the corresponding bit in the
+ * mediated matrix device's AQM.
+ *
+ * Returns the number of bytes processed if the APQI is valid; otherwise,
+ * returns one of the following errors:
+ * -EINVAL if the APQI is not a number
+ * -ENODEV if the APQI exceeds the maximum value configured for the system
+ */
+static ssize_t unassign_domain_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ int ret;
+ unsigned long apqi;
+ struct mdev_device *mdev = mdev_from_dev(dev);
+ struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev);
+
+ /* If the guest is running, disallow un-assignment of domain */
+ if (matrix_mdev->kvm)
+ return -EBUSY;
+
+ ret = kstrtoul(buf, 0, &apqi);
+ if (ret)
+ return ret;
+
+ if (apqi > matrix_mdev->matrix.aqm_max)
+ return -ENODEV;
+
+ mutex_lock(&matrix_dev->lock);
+ clear_bit_inv((unsigned long)apqi, matrix_mdev->matrix.aqm);
+ mutex_unlock(&matrix_dev->lock);
+
+ return count;
+}
+static DEVICE_ATTR_WO(unassign_domain);
+
+/**
+ * assign_control_domain_store
+ *
+ * @dev: the matrix device
+ * @attr: the mediated matrix device's assign_control_domain attribute
+ * @buf: a buffer containing the domain ID to be assigned
+ * @count: the number of bytes in @buf
+ *
+ * Parses the domain ID from @buf and sets the corresponding bit in the mediated
+ * matrix device's ADM.
+ *
+ * Returns the number of bytes processed if the domain ID is valid; otherwise,
+ * returns one of the following errors:
+ * -EINVAL if the ID is not a number
+ * -ENODEV if the ID exceeds the maximum value configured for the system
+ */
+static ssize_t assign_control_domain_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ int ret;
+ unsigned long id;
+ struct mdev_device *mdev = mdev_from_dev(dev);
+ struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev);
+
+ /* If the guest is running, disallow assignment of control domain */
+ if (matrix_mdev->kvm)
+ return -EBUSY;
+
+ ret = kstrtoul(buf, 0, &id);
+ if (ret)
+ return ret;
+
+ if (id > matrix_mdev->matrix.adm_max)
+ return -ENODEV;
+
+ /* Set the bit in the ADM (bitmask) corresponding to the AP control
+ * domain number (id). The bits in the mask, from most significant to
+ * least significant, correspond to IDs 0 up to the one less than the
+ * number of control domains that can be assigned.
+ */
+ mutex_lock(&matrix_dev->lock);
+ set_bit_inv(id, matrix_mdev->matrix.adm);
+ mutex_unlock(&matrix_dev->lock);
+
+ return count;
+}
+static DEVICE_ATTR_WO(assign_control_domain);
+
+/**
+ * unassign_control_domain_store
+ *
+ * @dev: the matrix device
+ * @attr: the mediated matrix device's unassign_control_domain attribute
+ * @buf: a buffer containing the domain ID to be unassigned
+ * @count: the number of bytes in @buf
+ *
+ * Parses the domain ID from @buf and clears the corresponding bit in the
+ * mediated matrix device's ADM.
+ *
+ * Returns the number of bytes processed if the domain ID is valid; otherwise,
+ * returns one of the following errors:
+ * -EINVAL if the ID is not a number
+ * -ENODEV if the ID exceeds the maximum value configured for the system
+ */
+static ssize_t unassign_control_domain_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ int ret;
+ unsigned long domid;
+ struct mdev_device *mdev = mdev_from_dev(dev);
+ struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev);
+ unsigned long max_domid = matrix_mdev->matrix.adm_max;
+
+ /* If the guest is running, disallow un-assignment of control domain */
+ if (matrix_mdev->kvm)
+ return -EBUSY;
+
+ ret = kstrtoul(buf, 0, &domid);
+ if (ret)
+ return ret;
+ if (domid > max_domid)
+ return -ENODEV;
+
+ mutex_lock(&matrix_dev->lock);
+ clear_bit_inv(domid, matrix_mdev->matrix.adm);
+ mutex_unlock(&matrix_dev->lock);
+
+ return count;
+}
+static DEVICE_ATTR_WO(unassign_control_domain);
+
+static ssize_t control_domains_show(struct device *dev,
+ struct device_attribute *dev_attr,
+ char *buf)
+{
+ unsigned long id;
+ int nchars = 0;
+ int n;
+ char *bufpos = buf;
+ struct mdev_device *mdev = mdev_from_dev(dev);
+ struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev);
+ unsigned long max_domid = matrix_mdev->matrix.adm_max;
+
+ mutex_lock(&matrix_dev->lock);
+ for_each_set_bit_inv(id, matrix_mdev->matrix.adm, max_domid + 1) {
+ n = sprintf(bufpos, "%04lx\n", id);
+ bufpos += n;
+ nchars += n;
+ }
+ mutex_unlock(&matrix_dev->lock);
+
+ return nchars;
+}
+static DEVICE_ATTR_RO(control_domains);
+
+static ssize_t matrix_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct mdev_device *mdev = mdev_from_dev(dev);
+ struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev);
+ char *bufpos = buf;
+ unsigned long apid;
+ unsigned long apqi;
+ unsigned long apid1;
+ unsigned long apqi1;
+ unsigned long napm_bits = matrix_mdev->matrix.apm_max + 1;
+ unsigned long naqm_bits = matrix_mdev->matrix.aqm_max + 1;
+ int nchars = 0;
+ int n;
+
+ apid1 = find_first_bit_inv(matrix_mdev->matrix.apm, napm_bits);
+ apqi1 = find_first_bit_inv(matrix_mdev->matrix.aqm, naqm_bits);
+
+ mutex_lock(&matrix_dev->lock);
+
+ if ((apid1 < napm_bits) && (apqi1 < naqm_bits)) {
+ for_each_set_bit_inv(apid, matrix_mdev->matrix.apm, napm_bits) {
+ for_each_set_bit_inv(apqi, matrix_mdev->matrix.aqm,
+ naqm_bits) {
+ n = sprintf(bufpos, "%02lx.%04lx\n", apid,
+ apqi);
+ bufpos += n;
+ nchars += n;
+ }
+ }
+ } else if (apid1 < napm_bits) {
+ for_each_set_bit_inv(apid, matrix_mdev->matrix.apm, napm_bits) {
+ n = sprintf(bufpos, "%02lx.\n", apid);
+ bufpos += n;
+ nchars += n;
+ }
+ } else if (apqi1 < naqm_bits) {
+ for_each_set_bit_inv(apqi, matrix_mdev->matrix.aqm, naqm_bits) {
+ n = sprintf(bufpos, ".%04lx\n", apqi);
+ bufpos += n;
+ nchars += n;
+ }
+ }
+
+ mutex_unlock(&matrix_dev->lock);
+
+ return nchars;
+}
+static DEVICE_ATTR_RO(matrix);
+
+static struct attribute *vfio_ap_mdev_attrs[] = {
+ &dev_attr_assign_adapter.attr,
+ &dev_attr_unassign_adapter.attr,
+ &dev_attr_assign_domain.attr,
+ &dev_attr_unassign_domain.attr,
+ &dev_attr_assign_control_domain.attr,
+ &dev_attr_unassign_control_domain.attr,
+ &dev_attr_control_domains.attr,
+ &dev_attr_matrix.attr,
+ NULL,
+};
+
+static struct attribute_group vfio_ap_mdev_attr_group = {
+ .attrs = vfio_ap_mdev_attrs
+};
+
+static const struct attribute_group *vfio_ap_mdev_attr_groups[] = {
+ &vfio_ap_mdev_attr_group,
+ NULL
+};
+
+/**
+ * vfio_ap_mdev_set_kvm
+ *
+ * @matrix_mdev: a mediated matrix device
+ * @kvm: reference to KVM instance
+ *
+ * Verifies no other mediated matrix device has @kvm and sets a reference to
+ * it in @matrix_mdev->kvm.
+ *
+ * Return 0 if no other mediated matrix device has a reference to @kvm;
+ * otherwise, returns an -EPERM.
+ */
+static int vfio_ap_mdev_set_kvm(struct ap_matrix_mdev *matrix_mdev,
+ struct kvm *kvm)
+{
+ struct ap_matrix_mdev *m;
+
+ mutex_lock(&matrix_dev->lock);
+
+ list_for_each_entry(m, &matrix_dev->mdev_list, node) {
+ if ((m != matrix_mdev) && (m->kvm == kvm)) {
+ mutex_unlock(&matrix_dev->lock);
+ return -EPERM;
+ }
+ }
+
+ matrix_mdev->kvm = kvm;
+ kvm_get_kvm(kvm);
+ kvm->arch.crypto.pqap_hook = &matrix_mdev->pqap_hook;
+ mutex_unlock(&matrix_dev->lock);
+
+ return 0;
+}
+
+/*
+ * vfio_ap_mdev_iommu_notifier: IOMMU notifier callback
+ *
+ * @nb: The notifier block
+ * @action: Action to be taken
+ * @data: data associated with the request
+ *
+ * For an UNMAP request, unpin the guest IOVA (the NIB guest address we
+ * pinned before). Other requests are ignored.
+ *
+ */
+static int vfio_ap_mdev_iommu_notifier(struct notifier_block *nb,
+ unsigned long action, void *data)
+{
+ struct ap_matrix_mdev *matrix_mdev;
+
+ matrix_mdev = container_of(nb, struct ap_matrix_mdev, iommu_notifier);
+
+ if (action == VFIO_IOMMU_NOTIFY_DMA_UNMAP) {
+ struct vfio_iommu_type1_dma_unmap *unmap = data;
+ unsigned long g_pfn = unmap->iova >> PAGE_SHIFT;
+
+ vfio_unpin_pages(mdev_dev(matrix_mdev->mdev), &g_pfn, 1);
+ return NOTIFY_OK;
+ }
+
+ return NOTIFY_DONE;
+}
+
+static int vfio_ap_mdev_group_notifier(struct notifier_block *nb,
+ unsigned long action, void *data)
+{
+ int ret;
+ struct ap_matrix_mdev *matrix_mdev;
+
+ if (action != VFIO_GROUP_NOTIFY_SET_KVM)
+ return NOTIFY_OK;
+
+ matrix_mdev = container_of(nb, struct ap_matrix_mdev, group_notifier);
+
+ if (!data) {
+ matrix_mdev->kvm = NULL;
+ return NOTIFY_OK;
+ }
+
+ ret = vfio_ap_mdev_set_kvm(matrix_mdev, data);
+ if (ret)
+ return NOTIFY_DONE;
+
+ /* If there is no CRYCB pointer, then we can't copy the masks */
+ if (!matrix_mdev->kvm->arch.crypto.crycbd)
+ return NOTIFY_DONE;
+
+ kvm_arch_crypto_set_masks(matrix_mdev->kvm, matrix_mdev->matrix.apm,
+ matrix_mdev->matrix.aqm,
+ matrix_mdev->matrix.adm);
+
+ return NOTIFY_OK;
+}
+
+static struct vfio_ap_queue *vfio_ap_find_queue(int apqn)
+{
+ struct device *dev;
+ struct vfio_ap_queue *q = NULL;
+
+ dev = driver_find_device(&matrix_dev->vfio_ap_drv->driver, NULL,
+ &apqn, match_apqn);
+ if (dev) {
+ q = dev_get_drvdata(dev);
+ put_device(dev);
+ }
+
+ return q;
+}
+
+int vfio_ap_mdev_reset_queue(struct vfio_ap_queue *q,
+ unsigned int retry)
+{
+ struct ap_queue_status status;
+ int ret;
+ int retry2 = 2;
+
+ if (!q)
+ return 0;
+
+retry_zapq:
+ status = ap_zapq(q->apqn);
+ switch (status.response_code) {
+ case AP_RESPONSE_NORMAL:
+ ret = 0;
+ break;
+ case AP_RESPONSE_RESET_IN_PROGRESS:
+ if (retry--) {
+ msleep(20);
+ goto retry_zapq;
+ }
+ ret = -EBUSY;
+ break;
+ case AP_RESPONSE_Q_NOT_AVAIL:
+ case AP_RESPONSE_DECONFIGURED:
+ case AP_RESPONSE_CHECKSTOPPED:
+ WARN_ON_ONCE(status.irq_enabled);
+ ret = -EBUSY;
+ goto free_resources;
+ default:
+ /* things are really broken, give up */
+ WARN(true, "PQAP/ZAPQ completed with invalid rc (%x)\n",
+ status.response_code);
+ return -EIO;
+ }
+
+ /* wait for the reset to take effect */
+ while (retry2--) {
+ if (status.queue_empty && !status.irq_enabled)
+ break;
+ msleep(20);
+ status = ap_tapq(q->apqn, NULL);
+ }
+ WARN_ON_ONCE(retry2 <= 0);
+
+free_resources:
+ vfio_ap_free_aqic_resources(q);
+
+ return ret;
+}
+
+static int vfio_ap_mdev_reset_queues(struct mdev_device *mdev)
+{
+ int ret;
+ int rc = 0;
+ unsigned long apid, apqi;
+ struct vfio_ap_queue *q;
+ struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev);
+
+ for_each_set_bit_inv(apid, matrix_mdev->matrix.apm,
+ matrix_mdev->matrix.apm_max + 1) {
+ for_each_set_bit_inv(apqi, matrix_mdev->matrix.aqm,
+ matrix_mdev->matrix.aqm_max + 1) {
+ q = vfio_ap_find_queue(AP_MKQID(apid, apqi));
+ ret = vfio_ap_mdev_reset_queue(q, 1);
+ /*
+ * Regardless whether a queue turns out to be busy, or
+ * is not operational, we need to continue resetting
+ * the remaining queues.
+ */
+ if (ret)
+ rc = ret;
+ }
+ }
+
+ return rc;
+}
+
+static int vfio_ap_mdev_open(struct mdev_device *mdev)
+{
+ struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev);
+ unsigned long events;
+ int ret;
+
+
+ if (!try_module_get(THIS_MODULE))
+ return -ENODEV;
+
+ matrix_mdev->group_notifier.notifier_call = vfio_ap_mdev_group_notifier;
+ events = VFIO_GROUP_NOTIFY_SET_KVM;
+
+ ret = vfio_register_notifier(mdev_dev(mdev), VFIO_GROUP_NOTIFY,
+ &events, &matrix_mdev->group_notifier);
+ if (ret) {
+ module_put(THIS_MODULE);
+ return ret;
+ }
+
+ matrix_mdev->iommu_notifier.notifier_call = vfio_ap_mdev_iommu_notifier;
+ events = VFIO_IOMMU_NOTIFY_DMA_UNMAP;
+ ret = vfio_register_notifier(mdev_dev(mdev), VFIO_IOMMU_NOTIFY,
+ &events, &matrix_mdev->iommu_notifier);
+ if (!ret)
+ return ret;
+
+ vfio_unregister_notifier(mdev_dev(mdev), VFIO_GROUP_NOTIFY,
+ &matrix_mdev->group_notifier);
+ module_put(THIS_MODULE);
+ return ret;
+}
+
+static void vfio_ap_mdev_release(struct mdev_device *mdev)
+{
+ struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev);
+
+ mutex_lock(&matrix_dev->lock);
+ if (matrix_mdev->kvm) {
+ kvm_arch_crypto_clear_masks(matrix_mdev->kvm);
+ matrix_mdev->kvm->arch.crypto.pqap_hook = NULL;
+ vfio_ap_mdev_reset_queues(mdev);
+ kvm_put_kvm(matrix_mdev->kvm);
+ matrix_mdev->kvm = NULL;
+ }
+ mutex_unlock(&matrix_dev->lock);
+
+ vfio_unregister_notifier(mdev_dev(mdev), VFIO_IOMMU_NOTIFY,
+ &matrix_mdev->iommu_notifier);
+ vfio_unregister_notifier(mdev_dev(mdev), VFIO_GROUP_NOTIFY,
+ &matrix_mdev->group_notifier);
+ module_put(THIS_MODULE);
+}
+
+static int vfio_ap_mdev_get_device_info(unsigned long arg)
+{
+ unsigned long minsz;
+ struct vfio_device_info info;
+
+ minsz = offsetofend(struct vfio_device_info, num_irqs);
+
+ if (copy_from_user(&info, (void __user *)arg, minsz))
+ return -EFAULT;
+
+ if (info.argsz < minsz)
+ return -EINVAL;
+
+ info.flags = VFIO_DEVICE_FLAGS_AP | VFIO_DEVICE_FLAGS_RESET;
+ info.num_regions = 0;
+ info.num_irqs = 0;
+
+ return copy_to_user((void __user *)arg, &info, minsz) ? -EFAULT : 0;
+}
+
+static ssize_t vfio_ap_mdev_ioctl(struct mdev_device *mdev,
+ unsigned int cmd, unsigned long arg)
+{
+ int ret;
+
+ mutex_lock(&matrix_dev->lock);
+ switch (cmd) {
+ case VFIO_DEVICE_GET_INFO:
+ ret = vfio_ap_mdev_get_device_info(arg);
+ break;
+ case VFIO_DEVICE_RESET:
+ ret = vfio_ap_mdev_reset_queues(mdev);
+ break;
+ default:
+ ret = -EOPNOTSUPP;
+ break;
+ }
+ mutex_unlock(&matrix_dev->lock);
+
+ return ret;
+}
+
+static const struct mdev_parent_ops vfio_ap_matrix_ops = {
+ .owner = THIS_MODULE,
+ .supported_type_groups = vfio_ap_mdev_type_groups,
+ .mdev_attr_groups = vfio_ap_mdev_attr_groups,
+ .create = vfio_ap_mdev_create,
+ .remove = vfio_ap_mdev_remove,
+ .open = vfio_ap_mdev_open,
+ .release = vfio_ap_mdev_release,
+ .ioctl = vfio_ap_mdev_ioctl,
+};
+
+int vfio_ap_mdev_register(void)
+{
+ atomic_set(&matrix_dev->available_instances, MAX_ZDEV_ENTRIES_EXT);
+
+ return mdev_register_device(&matrix_dev->device, &vfio_ap_matrix_ops);
+}
+
+void vfio_ap_mdev_unregister(void)
+{
+ mdev_unregister_device(&matrix_dev->device);
+}
diff --git a/drivers/s390/crypto/vfio_ap_private.h b/drivers/s390/crypto/vfio_ap_private.h
new file mode 100644
index 000000000..28e9d9989
--- /dev/null
+++ b/drivers/s390/crypto/vfio_ap_private.h
@@ -0,0 +1,104 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Private data and functions for adjunct processor VFIO matrix driver.
+ *
+ * Author(s): Tony Krowiak <akrowiak@linux.ibm.com>
+ * Halil Pasic <pasic@linux.ibm.com>
+ * Pierre Morel <pmorel@linux.ibm.com>
+ *
+ * Copyright IBM Corp. 2018
+ */
+
+#ifndef _VFIO_AP_PRIVATE_H_
+#define _VFIO_AP_PRIVATE_H_
+
+#include <linux/types.h>
+#include <linux/device.h>
+#include <linux/mdev.h>
+#include <linux/delay.h>
+#include <linux/mutex.h>
+#include <linux/kvm_host.h>
+
+#include "ap_bus.h"
+
+#define VFIO_AP_MODULE_NAME "vfio_ap"
+#define VFIO_AP_DRV_NAME "vfio_ap"
+
+/**
+ * ap_matrix_dev - the AP matrix device structure
+ * @device: generic device structure associated with the AP matrix device
+ * @available_instances: number of mediated matrix devices that can be created
+ * @info: the struct containing the output from the PQAP(QCI) instruction
+ * mdev_list: the list of mediated matrix devices created
+ * lock: mutex for locking the AP matrix device. This lock will be
+ * taken every time we fiddle with state managed by the vfio_ap
+ * driver, be it using @mdev_list or writing the state of a
+ * single ap_matrix_mdev device. It's quite coarse but we don't
+ * expect much contention.
+ */
+struct ap_matrix_dev {
+ struct device device;
+ atomic_t available_instances;
+ struct ap_config_info info;
+ struct list_head mdev_list;
+ struct mutex lock;
+ struct ap_driver *vfio_ap_drv;
+};
+
+extern struct ap_matrix_dev *matrix_dev;
+
+/**
+ * The AP matrix is comprised of three bit masks identifying the adapters,
+ * queues (domains) and control domains that belong to an AP matrix. The bits i
+ * each mask, from least significant to most significant bit, correspond to IDs
+ * 0 to 255. When a bit is set, the corresponding ID belongs to the matrix.
+ *
+ * @apm_max: max adapter number in @apm
+ * @apm identifies the AP adapters in the matrix
+ * @aqm_max: max domain number in @aqm
+ * @aqm identifies the AP queues (domains) in the matrix
+ * @adm_max: max domain number in @adm
+ * @adm identifies the AP control domains in the matrix
+ */
+struct ap_matrix {
+ unsigned long apm_max;
+ DECLARE_BITMAP(apm, 256);
+ unsigned long aqm_max;
+ DECLARE_BITMAP(aqm, 256);
+ unsigned long adm_max;
+ DECLARE_BITMAP(adm, 256);
+};
+
+/**
+ * struct ap_matrix_mdev - the mediated matrix device structure
+ * @list: allows the ap_matrix_mdev struct to be added to a list
+ * @matrix: the adapters, usage domains and control domains assigned to the
+ * mediated matrix device.
+ * @group_notifier: notifier block used for specifying callback function for
+ * handling the VFIO_GROUP_NOTIFY_SET_KVM event
+ * @kvm: the struct holding guest's state
+ */
+struct ap_matrix_mdev {
+ struct list_head node;
+ struct ap_matrix matrix;
+ struct notifier_block group_notifier;
+ struct notifier_block iommu_notifier;
+ struct kvm *kvm;
+ struct kvm_s390_module_hook pqap_hook;
+ struct mdev_device *mdev;
+};
+
+struct vfio_ap_queue {
+ struct ap_matrix_mdev *matrix_mdev;
+ unsigned long saved_pfn;
+ int apqn;
+#define VFIO_AP_ISC_INVALID 0xff
+ unsigned char saved_isc;
+};
+
+int vfio_ap_mdev_register(void);
+void vfio_ap_mdev_unregister(void);
+int vfio_ap_mdev_reset_queue(struct vfio_ap_queue *q,
+ unsigned int retry);
+
+#endif /* _VFIO_AP_PRIVATE_H_ */
diff --git a/drivers/s390/crypto/zcrypt_api.c b/drivers/s390/crypto/zcrypt_api.c
new file mode 100644
index 000000000..b51800971
--- /dev/null
+++ b/drivers/s390/crypto/zcrypt_api.c
@@ -0,0 +1,2143 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright IBM Corp. 2001, 2018
+ * Author(s): Robert Burroughs
+ * Eric Rossman (edrossma@us.ibm.com)
+ * Cornelia Huck <cornelia.huck@de.ibm.com>
+ *
+ * Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com)
+ * Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com>
+ * Ralph Wuerthner <rwuerthn@de.ibm.com>
+ * MSGTYPE restruct: Holger Dengler <hd@linux.vnet.ibm.com>
+ * Multiple device nodes: Harald Freudenberger <freude@linux.ibm.com>
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/miscdevice.h>
+#include <linux/fs.h>
+#include <linux/compat.h>
+#include <linux/slab.h>
+#include <linux/atomic.h>
+#include <linux/uaccess.h>
+#include <linux/hw_random.h>
+#include <linux/debugfs.h>
+#include <linux/cdev.h>
+#include <linux/ctype.h>
+#include <linux/capability.h>
+#include <asm/debug.h>
+
+#define CREATE_TRACE_POINTS
+#include <asm/trace/zcrypt.h>
+
+#include "zcrypt_api.h"
+#include "zcrypt_debug.h"
+
+#include "zcrypt_msgtype6.h"
+#include "zcrypt_msgtype50.h"
+#include "zcrypt_ccamisc.h"
+#include "zcrypt_ep11misc.h"
+
+/*
+ * Module description.
+ */
+MODULE_AUTHOR("IBM Corporation");
+MODULE_DESCRIPTION("Cryptographic Coprocessor interface, " \
+ "Copyright IBM Corp. 2001, 2012");
+MODULE_LICENSE("GPL");
+
+/*
+ * zcrypt tracepoint functions
+ */
+EXPORT_TRACEPOINT_SYMBOL(s390_zcrypt_req);
+EXPORT_TRACEPOINT_SYMBOL(s390_zcrypt_rep);
+
+static int zcrypt_hwrng_seed = 1;
+module_param_named(hwrng_seed, zcrypt_hwrng_seed, int, 0440);
+MODULE_PARM_DESC(hwrng_seed, "Turn on/off hwrng auto seed, default is 1 (on).");
+
+DEFINE_SPINLOCK(zcrypt_list_lock);
+LIST_HEAD(zcrypt_card_list);
+int zcrypt_device_count;
+
+static atomic_t zcrypt_open_count = ATOMIC_INIT(0);
+static atomic_t zcrypt_rescan_count = ATOMIC_INIT(0);
+
+atomic_t zcrypt_rescan_req = ATOMIC_INIT(0);
+EXPORT_SYMBOL(zcrypt_rescan_req);
+
+static LIST_HEAD(zcrypt_ops_list);
+
+/* Zcrypt related debug feature stuff. */
+debug_info_t *zcrypt_dbf_info;
+
+/**
+ * Process a rescan of the transport layer.
+ *
+ * Returns 1, if the rescan has been processed, otherwise 0.
+ */
+static inline int zcrypt_process_rescan(void)
+{
+ if (atomic_read(&zcrypt_rescan_req)) {
+ atomic_set(&zcrypt_rescan_req, 0);
+ atomic_inc(&zcrypt_rescan_count);
+ ap_bus_force_rescan();
+ ZCRYPT_DBF(DBF_INFO, "rescan count=%07d\n",
+ atomic_inc_return(&zcrypt_rescan_count));
+ return 1;
+ }
+ return 0;
+}
+
+void zcrypt_msgtype_register(struct zcrypt_ops *zops)
+{
+ list_add_tail(&zops->list, &zcrypt_ops_list);
+}
+
+void zcrypt_msgtype_unregister(struct zcrypt_ops *zops)
+{
+ list_del_init(&zops->list);
+}
+
+struct zcrypt_ops *zcrypt_msgtype(unsigned char *name, int variant)
+{
+ struct zcrypt_ops *zops;
+
+ list_for_each_entry(zops, &zcrypt_ops_list, list)
+ if ((zops->variant == variant) &&
+ (!strncmp(zops->name, name, sizeof(zops->name))))
+ return zops;
+ return NULL;
+}
+EXPORT_SYMBOL(zcrypt_msgtype);
+
+/*
+ * Multi device nodes extension functions.
+ */
+
+#ifdef CONFIG_ZCRYPT_MULTIDEVNODES
+
+struct zcdn_device;
+
+static struct class *zcrypt_class;
+static dev_t zcrypt_devt;
+static struct cdev zcrypt_cdev;
+
+struct zcdn_device {
+ struct device device;
+ struct ap_perms perms;
+};
+
+#define to_zcdn_dev(x) container_of((x), struct zcdn_device, device)
+
+#define ZCDN_MAX_NAME 32
+
+static int zcdn_create(const char *name);
+static int zcdn_destroy(const char *name);
+
+/*
+ * Find zcdn device by name.
+ * Returns reference to the zcdn device which needs to be released
+ * with put_device() after use.
+ */
+static inline struct zcdn_device *find_zcdndev_by_name(const char *name)
+{
+ struct device *dev = class_find_device_by_name(zcrypt_class, name);
+
+ return dev ? to_zcdn_dev(dev) : NULL;
+}
+
+/*
+ * Find zcdn device by devt value.
+ * Returns reference to the zcdn device which needs to be released
+ * with put_device() after use.
+ */
+static inline struct zcdn_device *find_zcdndev_by_devt(dev_t devt)
+{
+ struct device *dev = class_find_device_by_devt(zcrypt_class, devt);
+
+ return dev ? to_zcdn_dev(dev) : NULL;
+}
+
+static ssize_t ioctlmask_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ int i, rc;
+ struct zcdn_device *zcdndev = to_zcdn_dev(dev);
+
+ if (mutex_lock_interruptible(&ap_perms_mutex))
+ return -ERESTARTSYS;
+
+ buf[0] = '0';
+ buf[1] = 'x';
+ for (i = 0; i < sizeof(zcdndev->perms.ioctlm) / sizeof(long); i++)
+ snprintf(buf + 2 + 2 * i * sizeof(long),
+ PAGE_SIZE - 2 - 2 * i * sizeof(long),
+ "%016lx", zcdndev->perms.ioctlm[i]);
+ buf[2 + 2 * i * sizeof(long)] = '\n';
+ buf[2 + 2 * i * sizeof(long) + 1] = '\0';
+ rc = 2 + 2 * i * sizeof(long) + 1;
+
+ mutex_unlock(&ap_perms_mutex);
+
+ return rc;
+}
+
+static ssize_t ioctlmask_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ int rc;
+ struct zcdn_device *zcdndev = to_zcdn_dev(dev);
+
+ rc = ap_parse_mask_str(buf, zcdndev->perms.ioctlm,
+ AP_IOCTLS, &ap_perms_mutex);
+ if (rc)
+ return rc;
+
+ return count;
+}
+
+static DEVICE_ATTR_RW(ioctlmask);
+
+static ssize_t apmask_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ int i, rc;
+ struct zcdn_device *zcdndev = to_zcdn_dev(dev);
+
+ if (mutex_lock_interruptible(&ap_perms_mutex))
+ return -ERESTARTSYS;
+
+ buf[0] = '0';
+ buf[1] = 'x';
+ for (i = 0; i < sizeof(zcdndev->perms.apm) / sizeof(long); i++)
+ snprintf(buf + 2 + 2 * i * sizeof(long),
+ PAGE_SIZE - 2 - 2 * i * sizeof(long),
+ "%016lx", zcdndev->perms.apm[i]);
+ buf[2 + 2 * i * sizeof(long)] = '\n';
+ buf[2 + 2 * i * sizeof(long) + 1] = '\0';
+ rc = 2 + 2 * i * sizeof(long) + 1;
+
+ mutex_unlock(&ap_perms_mutex);
+
+ return rc;
+}
+
+static ssize_t apmask_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ int rc;
+ struct zcdn_device *zcdndev = to_zcdn_dev(dev);
+
+ rc = ap_parse_mask_str(buf, zcdndev->perms.apm,
+ AP_DEVICES, &ap_perms_mutex);
+ if (rc)
+ return rc;
+
+ return count;
+}
+
+static DEVICE_ATTR_RW(apmask);
+
+static ssize_t aqmask_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ int i, rc;
+ struct zcdn_device *zcdndev = to_zcdn_dev(dev);
+
+ if (mutex_lock_interruptible(&ap_perms_mutex))
+ return -ERESTARTSYS;
+
+ buf[0] = '0';
+ buf[1] = 'x';
+ for (i = 0; i < sizeof(zcdndev->perms.aqm) / sizeof(long); i++)
+ snprintf(buf + 2 + 2 * i * sizeof(long),
+ PAGE_SIZE - 2 - 2 * i * sizeof(long),
+ "%016lx", zcdndev->perms.aqm[i]);
+ buf[2 + 2 * i * sizeof(long)] = '\n';
+ buf[2 + 2 * i * sizeof(long) + 1] = '\0';
+ rc = 2 + 2 * i * sizeof(long) + 1;
+
+ mutex_unlock(&ap_perms_mutex);
+
+ return rc;
+}
+
+static ssize_t aqmask_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ int rc;
+ struct zcdn_device *zcdndev = to_zcdn_dev(dev);
+
+ rc = ap_parse_mask_str(buf, zcdndev->perms.aqm,
+ AP_DOMAINS, &ap_perms_mutex);
+ if (rc)
+ return rc;
+
+ return count;
+}
+
+static DEVICE_ATTR_RW(aqmask);
+
+static struct attribute *zcdn_dev_attrs[] = {
+ &dev_attr_ioctlmask.attr,
+ &dev_attr_apmask.attr,
+ &dev_attr_aqmask.attr,
+ NULL
+};
+
+static struct attribute_group zcdn_dev_attr_group = {
+ .attrs = zcdn_dev_attrs
+};
+
+static const struct attribute_group *zcdn_dev_attr_groups[] = {
+ &zcdn_dev_attr_group,
+ NULL
+};
+
+static ssize_t zcdn_create_store(struct class *class,
+ struct class_attribute *attr,
+ const char *buf, size_t count)
+{
+ int rc;
+ char name[ZCDN_MAX_NAME];
+
+ strncpy(name, skip_spaces(buf), sizeof(name));
+ name[sizeof(name) - 1] = '\0';
+
+ rc = zcdn_create(strim(name));
+
+ return rc ? rc : count;
+}
+
+static const struct class_attribute class_attr_zcdn_create =
+ __ATTR(create, 0600, NULL, zcdn_create_store);
+
+static ssize_t zcdn_destroy_store(struct class *class,
+ struct class_attribute *attr,
+ const char *buf, size_t count)
+{
+ int rc;
+ char name[ZCDN_MAX_NAME];
+
+ strncpy(name, skip_spaces(buf), sizeof(name));
+ name[sizeof(name) - 1] = '\0';
+
+ rc = zcdn_destroy(strim(name));
+
+ return rc ? rc : count;
+}
+
+static const struct class_attribute class_attr_zcdn_destroy =
+ __ATTR(destroy, 0600, NULL, zcdn_destroy_store);
+
+static void zcdn_device_release(struct device *dev)
+{
+ struct zcdn_device *zcdndev = to_zcdn_dev(dev);
+
+ ZCRYPT_DBF(DBF_INFO, "releasing zcdn device %d:%d\n",
+ MAJOR(dev->devt), MINOR(dev->devt));
+
+ kfree(zcdndev);
+}
+
+static int zcdn_create(const char *name)
+{
+ dev_t devt;
+ int i, rc = 0;
+ char nodename[ZCDN_MAX_NAME];
+ struct zcdn_device *zcdndev;
+
+ if (mutex_lock_interruptible(&ap_perms_mutex))
+ return -ERESTARTSYS;
+
+ /* check if device node with this name already exists */
+ if (name[0]) {
+ zcdndev = find_zcdndev_by_name(name);
+ if (zcdndev) {
+ put_device(&zcdndev->device);
+ rc = -EEXIST;
+ goto unlockout;
+ }
+ }
+
+ /* find an unused minor number */
+ for (i = 0; i < ZCRYPT_MAX_MINOR_NODES; i++) {
+ devt = MKDEV(MAJOR(zcrypt_devt), MINOR(zcrypt_devt) + i);
+ zcdndev = find_zcdndev_by_devt(devt);
+ if (zcdndev)
+ put_device(&zcdndev->device);
+ else
+ break;
+ }
+ if (i == ZCRYPT_MAX_MINOR_NODES) {
+ rc = -ENOSPC;
+ goto unlockout;
+ }
+
+ /* alloc and prepare a new zcdn device */
+ zcdndev = kzalloc(sizeof(*zcdndev), GFP_KERNEL);
+ if (!zcdndev) {
+ rc = -ENOMEM;
+ goto unlockout;
+ }
+ zcdndev->device.release = zcdn_device_release;
+ zcdndev->device.class = zcrypt_class;
+ zcdndev->device.devt = devt;
+ zcdndev->device.groups = zcdn_dev_attr_groups;
+ if (name[0])
+ strncpy(nodename, name, sizeof(nodename));
+ else
+ snprintf(nodename, sizeof(nodename),
+ ZCRYPT_NAME "_%d", (int) MINOR(devt));
+ nodename[sizeof(nodename)-1] = '\0';
+ if (dev_set_name(&zcdndev->device, nodename)) {
+ kfree(zcdndev);
+ rc = -EINVAL;
+ goto unlockout;
+ }
+ rc = device_register(&zcdndev->device);
+ if (rc) {
+ put_device(&zcdndev->device);
+ goto unlockout;
+ }
+
+ ZCRYPT_DBF(DBF_INFO, "created zcdn device %d:%d\n",
+ MAJOR(devt), MINOR(devt));
+
+unlockout:
+ mutex_unlock(&ap_perms_mutex);
+ return rc;
+}
+
+static int zcdn_destroy(const char *name)
+{
+ int rc = 0;
+ struct zcdn_device *zcdndev;
+
+ if (mutex_lock_interruptible(&ap_perms_mutex))
+ return -ERESTARTSYS;
+
+ /* try to find this zcdn device */
+ zcdndev = find_zcdndev_by_name(name);
+ if (!zcdndev) {
+ rc = -ENOENT;
+ goto unlockout;
+ }
+
+ /*
+ * The zcdn device is not hard destroyed. It is subject to
+ * reference counting and thus just needs to be unregistered.
+ */
+ put_device(&zcdndev->device);
+ device_unregister(&zcdndev->device);
+
+unlockout:
+ mutex_unlock(&ap_perms_mutex);
+ return rc;
+}
+
+static void zcdn_destroy_all(void)
+{
+ int i;
+ dev_t devt;
+ struct zcdn_device *zcdndev;
+
+ mutex_lock(&ap_perms_mutex);
+ for (i = 0; i < ZCRYPT_MAX_MINOR_NODES; i++) {
+ devt = MKDEV(MAJOR(zcrypt_devt), MINOR(zcrypt_devt) + i);
+ zcdndev = find_zcdndev_by_devt(devt);
+ if (zcdndev) {
+ put_device(&zcdndev->device);
+ device_unregister(&zcdndev->device);
+ }
+ }
+ mutex_unlock(&ap_perms_mutex);
+}
+
+#endif
+
+/**
+ * zcrypt_read (): Not supported beyond zcrypt 1.3.1.
+ *
+ * This function is not supported beyond zcrypt 1.3.1.
+ */
+static ssize_t zcrypt_read(struct file *filp, char __user *buf,
+ size_t count, loff_t *f_pos)
+{
+ return -EPERM;
+}
+
+/**
+ * zcrypt_write(): Not allowed.
+ *
+ * Write is is not allowed
+ */
+static ssize_t zcrypt_write(struct file *filp, const char __user *buf,
+ size_t count, loff_t *f_pos)
+{
+ return -EPERM;
+}
+
+/**
+ * zcrypt_open(): Count number of users.
+ *
+ * Device open function to count number of users.
+ */
+static int zcrypt_open(struct inode *inode, struct file *filp)
+{
+ struct ap_perms *perms = &ap_perms;
+
+#ifdef CONFIG_ZCRYPT_MULTIDEVNODES
+ if (filp->f_inode->i_cdev == &zcrypt_cdev) {
+ struct zcdn_device *zcdndev;
+
+ if (mutex_lock_interruptible(&ap_perms_mutex))
+ return -ERESTARTSYS;
+ zcdndev = find_zcdndev_by_devt(filp->f_inode->i_rdev);
+ /* find returns a reference, no get_device() needed */
+ mutex_unlock(&ap_perms_mutex);
+ if (zcdndev)
+ perms = &zcdndev->perms;
+ }
+#endif
+ filp->private_data = (void *) perms;
+
+ atomic_inc(&zcrypt_open_count);
+ return stream_open(inode, filp);
+}
+
+/**
+ * zcrypt_release(): Count number of users.
+ *
+ * Device close function to count number of users.
+ */
+static int zcrypt_release(struct inode *inode, struct file *filp)
+{
+#ifdef CONFIG_ZCRYPT_MULTIDEVNODES
+ if (filp->f_inode->i_cdev == &zcrypt_cdev) {
+ struct zcdn_device *zcdndev;
+
+ mutex_lock(&ap_perms_mutex);
+ zcdndev = find_zcdndev_by_devt(filp->f_inode->i_rdev);
+ mutex_unlock(&ap_perms_mutex);
+ if (zcdndev) {
+ /* 2 puts here: one for find, one for open */
+ put_device(&zcdndev->device);
+ put_device(&zcdndev->device);
+ }
+ }
+#endif
+
+ atomic_dec(&zcrypt_open_count);
+ return 0;
+}
+
+static inline int zcrypt_check_ioctl(struct ap_perms *perms,
+ unsigned int cmd)
+{
+ int rc = -EPERM;
+ int ioctlnr = (cmd & _IOC_NRMASK) >> _IOC_NRSHIFT;
+
+ if (ioctlnr > 0 && ioctlnr < AP_IOCTLS) {
+ if (test_bit_inv(ioctlnr, perms->ioctlm))
+ rc = 0;
+ }
+
+ if (rc)
+ ZCRYPT_DBF(DBF_WARN,
+ "ioctl check failed: ioctlnr=0x%04x rc=%d\n",
+ ioctlnr, rc);
+
+ return rc;
+}
+
+static inline bool zcrypt_check_card(struct ap_perms *perms, int card)
+{
+ return test_bit_inv(card, perms->apm) ? true : false;
+}
+
+static inline bool zcrypt_check_queue(struct ap_perms *perms, int queue)
+{
+ return test_bit_inv(queue, perms->aqm) ? true : false;
+}
+
+static inline struct zcrypt_queue *zcrypt_pick_queue(struct zcrypt_card *zc,
+ struct zcrypt_queue *zq,
+ struct module **pmod,
+ unsigned int weight)
+{
+ if (!zq || !try_module_get(zq->queue->ap_dev.drv->driver.owner))
+ return NULL;
+ zcrypt_queue_get(zq);
+ get_device(&zq->queue->ap_dev.device);
+ atomic_add(weight, &zc->load);
+ atomic_add(weight, &zq->load);
+ zq->request_count++;
+ *pmod = zq->queue->ap_dev.drv->driver.owner;
+ return zq;
+}
+
+static inline void zcrypt_drop_queue(struct zcrypt_card *zc,
+ struct zcrypt_queue *zq,
+ struct module *mod,
+ unsigned int weight)
+{
+ zq->request_count--;
+ atomic_sub(weight, &zc->load);
+ atomic_sub(weight, &zq->load);
+ put_device(&zq->queue->ap_dev.device);
+ zcrypt_queue_put(zq);
+ module_put(mod);
+}
+
+static inline bool zcrypt_card_compare(struct zcrypt_card *zc,
+ struct zcrypt_card *pref_zc,
+ unsigned int weight,
+ unsigned int pref_weight)
+{
+ if (!pref_zc)
+ return true;
+ weight += atomic_read(&zc->load);
+ pref_weight += atomic_read(&pref_zc->load);
+ if (weight == pref_weight)
+ return atomic64_read(&zc->card->total_request_count) <
+ atomic64_read(&pref_zc->card->total_request_count);
+ return weight < pref_weight;
+}
+
+static inline bool zcrypt_queue_compare(struct zcrypt_queue *zq,
+ struct zcrypt_queue *pref_zq,
+ unsigned int weight,
+ unsigned int pref_weight)
+{
+ if (!pref_zq)
+ return true;
+ weight += atomic_read(&zq->load);
+ pref_weight += atomic_read(&pref_zq->load);
+ if (weight == pref_weight)
+ return zq->queue->total_request_count <
+ pref_zq->queue->total_request_count;
+ return weight < pref_weight;
+}
+
+/*
+ * zcrypt ioctls.
+ */
+static long zcrypt_rsa_modexpo(struct ap_perms *perms,
+ struct zcrypt_track *tr,
+ struct ica_rsa_modexpo *mex)
+{
+ struct zcrypt_card *zc, *pref_zc;
+ struct zcrypt_queue *zq, *pref_zq;
+ struct ap_message ap_msg;
+ unsigned int wgt = 0, pref_wgt = 0;
+ unsigned int func_code;
+ int cpen, qpen, qid = 0, rc = -ENODEV;
+ struct module *mod;
+
+ trace_s390_zcrypt_req(mex, TP_ICARSAMODEXPO);
+
+ ap_init_message(&ap_msg);
+
+#ifdef CONFIG_ZCRYPT_DEBUG
+ if (tr && tr->fi.cmd)
+ ap_msg.fi.cmd = tr->fi.cmd;
+#endif
+
+ if (mex->outputdatalength < mex->inputdatalength) {
+ func_code = 0;
+ rc = -EINVAL;
+ goto out;
+ }
+
+ /*
+ * As long as outputdatalength is big enough, we can set the
+ * outputdatalength equal to the inputdatalength, since that is the
+ * number of bytes we will copy in any case
+ */
+ mex->outputdatalength = mex->inputdatalength;
+
+ rc = get_rsa_modex_fc(mex, &func_code);
+ if (rc)
+ goto out;
+
+ pref_zc = NULL;
+ pref_zq = NULL;
+ spin_lock(&zcrypt_list_lock);
+ for_each_zcrypt_card(zc) {
+ /* Check for useable accelarator or CCA card */
+ if (!zc->online || !zc->card->config ||
+ !(zc->card->functions & 0x18000000))
+ continue;
+ /* Check for size limits */
+ if (zc->min_mod_size > mex->inputdatalength ||
+ zc->max_mod_size < mex->inputdatalength)
+ continue;
+ /* check if device node has admission for this card */
+ if (!zcrypt_check_card(perms, zc->card->id))
+ continue;
+ /* get weight index of the card device */
+ wgt = zc->speed_rating[func_code];
+ /* penalty if this msg was previously sent via this card */
+ cpen = (tr && tr->again_counter && tr->last_qid &&
+ AP_QID_CARD(tr->last_qid) == zc->card->id) ?
+ TRACK_AGAIN_CARD_WEIGHT_PENALTY : 0;
+ if (!zcrypt_card_compare(zc, pref_zc, wgt + cpen, pref_wgt))
+ continue;
+ for_each_zcrypt_queue(zq, zc) {
+ /* check if device is useable and eligible */
+ if (!zq->online || !zq->ops->rsa_modexpo ||
+ !zq->queue->config)
+ continue;
+ /* check if device node has admission for this queue */
+ if (!zcrypt_check_queue(perms,
+ AP_QID_QUEUE(zq->queue->qid)))
+ continue;
+ /* penalty if the msg was previously sent at this qid */
+ qpen = (tr && tr->again_counter && tr->last_qid &&
+ tr->last_qid == zq->queue->qid) ?
+ TRACK_AGAIN_QUEUE_WEIGHT_PENALTY : 0;
+ if (!zcrypt_queue_compare(zq, pref_zq,
+ wgt + cpen + qpen, pref_wgt))
+ continue;
+ pref_zc = zc;
+ pref_zq = zq;
+ pref_wgt = wgt + cpen + qpen;
+ }
+ }
+ pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, &mod, wgt);
+ spin_unlock(&zcrypt_list_lock);
+
+ if (!pref_zq) {
+ rc = -ENODEV;
+ goto out;
+ }
+
+ qid = pref_zq->queue->qid;
+ rc = pref_zq->ops->rsa_modexpo(pref_zq, mex, &ap_msg);
+
+ spin_lock(&zcrypt_list_lock);
+ zcrypt_drop_queue(pref_zc, pref_zq, mod, wgt);
+ spin_unlock(&zcrypt_list_lock);
+
+out:
+ ap_release_message(&ap_msg);
+ if (tr) {
+ tr->last_rc = rc;
+ tr->last_qid = qid;
+ }
+ trace_s390_zcrypt_rep(mex, func_code, rc,
+ AP_QID_CARD(qid), AP_QID_QUEUE(qid));
+ return rc;
+}
+
+static long zcrypt_rsa_crt(struct ap_perms *perms,
+ struct zcrypt_track *tr,
+ struct ica_rsa_modexpo_crt *crt)
+{
+ struct zcrypt_card *zc, *pref_zc;
+ struct zcrypt_queue *zq, *pref_zq;
+ struct ap_message ap_msg;
+ unsigned int wgt = 0, pref_wgt = 0;
+ unsigned int func_code;
+ int cpen, qpen, qid = 0, rc = -ENODEV;
+ struct module *mod;
+
+ trace_s390_zcrypt_req(crt, TP_ICARSACRT);
+
+ ap_init_message(&ap_msg);
+
+#ifdef CONFIG_ZCRYPT_DEBUG
+ if (tr && tr->fi.cmd)
+ ap_msg.fi.cmd = tr->fi.cmd;
+#endif
+
+ if (crt->outputdatalength < crt->inputdatalength) {
+ func_code = 0;
+ rc = -EINVAL;
+ goto out;
+ }
+
+ /*
+ * As long as outputdatalength is big enough, we can set the
+ * outputdatalength equal to the inputdatalength, since that is the
+ * number of bytes we will copy in any case
+ */
+ crt->outputdatalength = crt->inputdatalength;
+
+ rc = get_rsa_crt_fc(crt, &func_code);
+ if (rc)
+ goto out;
+
+ pref_zc = NULL;
+ pref_zq = NULL;
+ spin_lock(&zcrypt_list_lock);
+ for_each_zcrypt_card(zc) {
+ /* Check for useable accelarator or CCA card */
+ if (!zc->online || !zc->card->config ||
+ !(zc->card->functions & 0x18000000))
+ continue;
+ /* Check for size limits */
+ if (zc->min_mod_size > crt->inputdatalength ||
+ zc->max_mod_size < crt->inputdatalength)
+ continue;
+ /* check if device node has admission for this card */
+ if (!zcrypt_check_card(perms, zc->card->id))
+ continue;
+ /* get weight index of the card device */
+ wgt = zc->speed_rating[func_code];
+ /* penalty if this msg was previously sent via this card */
+ cpen = (tr && tr->again_counter && tr->last_qid &&
+ AP_QID_CARD(tr->last_qid) == zc->card->id) ?
+ TRACK_AGAIN_CARD_WEIGHT_PENALTY : 0;
+ if (!zcrypt_card_compare(zc, pref_zc, wgt + cpen, pref_wgt))
+ continue;
+ for_each_zcrypt_queue(zq, zc) {
+ /* check if device is useable and eligible */
+ if (!zq->online || !zq->ops->rsa_modexpo_crt ||
+ !zq->queue->config)
+ continue;
+ /* check if device node has admission for this queue */
+ if (!zcrypt_check_queue(perms,
+ AP_QID_QUEUE(zq->queue->qid)))
+ continue;
+ /* penalty if the msg was previously sent at this qid */
+ qpen = (tr && tr->again_counter && tr->last_qid &&
+ tr->last_qid == zq->queue->qid) ?
+ TRACK_AGAIN_QUEUE_WEIGHT_PENALTY : 0;
+ if (!zcrypt_queue_compare(zq, pref_zq,
+ wgt + cpen + qpen, pref_wgt))
+ continue;
+ pref_zc = zc;
+ pref_zq = zq;
+ pref_wgt = wgt + cpen + qpen;
+ }
+ }
+ pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, &mod, wgt);
+ spin_unlock(&zcrypt_list_lock);
+
+ if (!pref_zq) {
+ rc = -ENODEV;
+ goto out;
+ }
+
+ qid = pref_zq->queue->qid;
+ rc = pref_zq->ops->rsa_modexpo_crt(pref_zq, crt, &ap_msg);
+
+ spin_lock(&zcrypt_list_lock);
+ zcrypt_drop_queue(pref_zc, pref_zq, mod, wgt);
+ spin_unlock(&zcrypt_list_lock);
+
+out:
+ ap_release_message(&ap_msg);
+ if (tr) {
+ tr->last_rc = rc;
+ tr->last_qid = qid;
+ }
+ trace_s390_zcrypt_rep(crt, func_code, rc,
+ AP_QID_CARD(qid), AP_QID_QUEUE(qid));
+ return rc;
+}
+
+static long _zcrypt_send_cprb(bool userspace, struct ap_perms *perms,
+ struct zcrypt_track *tr,
+ struct ica_xcRB *xcRB)
+{
+ struct zcrypt_card *zc, *pref_zc;
+ struct zcrypt_queue *zq, *pref_zq;
+ struct ap_message ap_msg;
+ unsigned int wgt = 0, pref_wgt = 0;
+ unsigned int func_code;
+ unsigned short *domain, tdom;
+ int cpen, qpen, qid = 0, rc = -ENODEV;
+ struct module *mod;
+
+ trace_s390_zcrypt_req(xcRB, TB_ZSECSENDCPRB);
+
+ xcRB->status = 0;
+ ap_init_message(&ap_msg);
+
+#ifdef CONFIG_ZCRYPT_DEBUG
+ if (tr && tr->fi.cmd)
+ ap_msg.fi.cmd = tr->fi.cmd;
+ if (tr && tr->fi.action == AP_FI_ACTION_CCA_AGENT_FF) {
+ ZCRYPT_DBF_WARN("%s fi cmd 0x%04x: forcing invalid agent_ID 'FF'\n",
+ __func__, tr->fi.cmd);
+ xcRB->agent_ID = 0x4646;
+ }
+#endif
+
+ rc = get_cprb_fc(userspace, xcRB, &ap_msg, &func_code, &domain);
+ if (rc)
+ goto out;
+
+ /*
+ * If a valid target domain is set and this domain is NOT a usage
+ * domain but a control only domain, use the default domain as target.
+ */
+ tdom = *domain;
+ if (tdom < AP_DOMAINS &&
+ !ap_test_config_usage_domain(tdom) &&
+ ap_test_config_ctrl_domain(tdom) &&
+ ap_domain_index >= 0)
+ tdom = ap_domain_index;
+
+ pref_zc = NULL;
+ pref_zq = NULL;
+ spin_lock(&zcrypt_list_lock);
+ for_each_zcrypt_card(zc) {
+ /* Check for useable CCA card */
+ if (!zc->online || !zc->card->config ||
+ !(zc->card->functions & 0x10000000))
+ continue;
+ /* Check for user selected CCA card */
+ if (xcRB->user_defined != AUTOSELECT &&
+ xcRB->user_defined != zc->card->id)
+ continue;
+ /* check if device node has admission for this card */
+ if (!zcrypt_check_card(perms, zc->card->id))
+ continue;
+ /* get weight index of the card device */
+ wgt = speed_idx_cca(func_code) * zc->speed_rating[SECKEY];
+ /* penalty if this msg was previously sent via this card */
+ cpen = (tr && tr->again_counter && tr->last_qid &&
+ AP_QID_CARD(tr->last_qid) == zc->card->id) ?
+ TRACK_AGAIN_CARD_WEIGHT_PENALTY : 0;
+ if (!zcrypt_card_compare(zc, pref_zc, wgt + cpen, pref_wgt))
+ continue;
+ for_each_zcrypt_queue(zq, zc) {
+ /* check for device useable and eligible */
+ if (!zq->online ||
+ !zq->ops->send_cprb ||
+ !zq->queue->config ||
+ (tdom != AUTOSEL_DOM &&
+ tdom != AP_QID_QUEUE(zq->queue->qid)))
+ continue;
+ /* check if device node has admission for this queue */
+ if (!zcrypt_check_queue(perms,
+ AP_QID_QUEUE(zq->queue->qid)))
+ continue;
+ /* penalty if the msg was previously sent at this qid */
+ qpen = (tr && tr->again_counter && tr->last_qid &&
+ tr->last_qid == zq->queue->qid) ?
+ TRACK_AGAIN_QUEUE_WEIGHT_PENALTY : 0;
+ if (!zcrypt_queue_compare(zq, pref_zq,
+ wgt + cpen + qpen, pref_wgt))
+ continue;
+ pref_zc = zc;
+ pref_zq = zq;
+ pref_wgt = wgt + cpen + qpen;
+ }
+ }
+ pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, &mod, wgt);
+ spin_unlock(&zcrypt_list_lock);
+
+ if (!pref_zq) {
+ rc = -ENODEV;
+ goto out;
+ }
+
+ /* in case of auto select, provide the correct domain */
+ qid = pref_zq->queue->qid;
+ if (*domain == AUTOSEL_DOM)
+ *domain = AP_QID_QUEUE(qid);
+
+#ifdef CONFIG_ZCRYPT_DEBUG
+ if (tr && tr->fi.action == AP_FI_ACTION_CCA_DOM_INVAL) {
+ ZCRYPT_DBF_WARN("%s fi cmd 0x%04x: forcing invalid domain\n",
+ __func__, tr->fi.cmd);
+ *domain = 99;
+ }
+#endif
+
+ rc = pref_zq->ops->send_cprb(userspace, pref_zq, xcRB, &ap_msg);
+
+ spin_lock(&zcrypt_list_lock);
+ zcrypt_drop_queue(pref_zc, pref_zq, mod, wgt);
+ spin_unlock(&zcrypt_list_lock);
+
+out:
+ ap_release_message(&ap_msg);
+ if (tr) {
+ tr->last_rc = rc;
+ tr->last_qid = qid;
+ }
+ trace_s390_zcrypt_rep(xcRB, func_code, rc,
+ AP_QID_CARD(qid), AP_QID_QUEUE(qid));
+ return rc;
+}
+
+long zcrypt_send_cprb(struct ica_xcRB *xcRB)
+{
+ return _zcrypt_send_cprb(false, &ap_perms, NULL, xcRB);
+}
+EXPORT_SYMBOL(zcrypt_send_cprb);
+
+static bool is_desired_ep11_card(unsigned int dev_id,
+ unsigned short target_num,
+ struct ep11_target_dev *targets)
+{
+ while (target_num-- > 0) {
+ if (targets->ap_id == dev_id || targets->ap_id == AUTOSEL_AP)
+ return true;
+ targets++;
+ }
+ return false;
+}
+
+static bool is_desired_ep11_queue(unsigned int dev_qid,
+ unsigned short target_num,
+ struct ep11_target_dev *targets)
+{
+ int card = AP_QID_CARD(dev_qid), dom = AP_QID_QUEUE(dev_qid);
+
+ while (target_num-- > 0) {
+ if ((targets->ap_id == card || targets->ap_id == AUTOSEL_AP) &&
+ (targets->dom_id == dom || targets->dom_id == AUTOSEL_DOM))
+ return true;
+ targets++;
+ }
+ return false;
+}
+
+static long _zcrypt_send_ep11_cprb(bool userspace, struct ap_perms *perms,
+ struct zcrypt_track *tr,
+ struct ep11_urb *xcrb)
+{
+ struct zcrypt_card *zc, *pref_zc;
+ struct zcrypt_queue *zq, *pref_zq;
+ struct ep11_target_dev *targets;
+ unsigned short target_num;
+ unsigned int wgt = 0, pref_wgt = 0;
+ unsigned int func_code;
+ struct ap_message ap_msg;
+ int cpen, qpen, qid = 0, rc = -ENODEV;
+ struct module *mod;
+
+ trace_s390_zcrypt_req(xcrb, TP_ZSENDEP11CPRB);
+
+ ap_init_message(&ap_msg);
+
+#ifdef CONFIG_ZCRYPT_DEBUG
+ if (tr && tr->fi.cmd)
+ ap_msg.fi.cmd = tr->fi.cmd;
+#endif
+
+ target_num = (unsigned short) xcrb->targets_num;
+
+ /* empty list indicates autoselect (all available targets) */
+ targets = NULL;
+ if (target_num != 0) {
+ struct ep11_target_dev __user *uptr;
+
+ targets = kcalloc(target_num, sizeof(*targets), GFP_KERNEL);
+ if (!targets) {
+ func_code = 0;
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ uptr = (struct ep11_target_dev __force __user *) xcrb->targets;
+ if (z_copy_from_user(userspace, targets, uptr,
+ target_num * sizeof(*targets))) {
+ func_code = 0;
+ rc = -EFAULT;
+ goto out_free;
+ }
+ }
+
+ rc = get_ep11cprb_fc(userspace, xcrb, &ap_msg, &func_code);
+ if (rc)
+ goto out_free;
+
+ pref_zc = NULL;
+ pref_zq = NULL;
+ spin_lock(&zcrypt_list_lock);
+ for_each_zcrypt_card(zc) {
+ /* Check for useable EP11 card */
+ if (!zc->online || !zc->card->config ||
+ !(zc->card->functions & 0x04000000))
+ continue;
+ /* Check for user selected EP11 card */
+ if (targets &&
+ !is_desired_ep11_card(zc->card->id, target_num, targets))
+ continue;
+ /* check if device node has admission for this card */
+ if (!zcrypt_check_card(perms, zc->card->id))
+ continue;
+ /* get weight index of the card device */
+ wgt = speed_idx_ep11(func_code) * zc->speed_rating[SECKEY];
+ /* penalty if this msg was previously sent via this card */
+ cpen = (tr && tr->again_counter && tr->last_qid &&
+ AP_QID_CARD(tr->last_qid) == zc->card->id) ?
+ TRACK_AGAIN_CARD_WEIGHT_PENALTY : 0;
+ if (!zcrypt_card_compare(zc, pref_zc, wgt + cpen, pref_wgt))
+ continue;
+ for_each_zcrypt_queue(zq, zc) {
+ /* check if device is useable and eligible */
+ if (!zq->online ||
+ !zq->ops->send_ep11_cprb ||
+ !zq->queue->config ||
+ (targets &&
+ !is_desired_ep11_queue(zq->queue->qid,
+ target_num, targets)))
+ continue;
+ /* check if device node has admission for this queue */
+ if (!zcrypt_check_queue(perms,
+ AP_QID_QUEUE(zq->queue->qid)))
+ continue;
+ /* penalty if the msg was previously sent at this qid */
+ qpen = (tr && tr->again_counter && tr->last_qid &&
+ tr->last_qid == zq->queue->qid) ?
+ TRACK_AGAIN_QUEUE_WEIGHT_PENALTY : 0;
+ if (!zcrypt_queue_compare(zq, pref_zq,
+ wgt + cpen + qpen, pref_wgt))
+ continue;
+ pref_zc = zc;
+ pref_zq = zq;
+ pref_wgt = wgt + cpen + qpen;
+ }
+ }
+ pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, &mod, wgt);
+ spin_unlock(&zcrypt_list_lock);
+
+ if (!pref_zq) {
+ rc = -ENODEV;
+ goto out_free;
+ }
+
+ qid = pref_zq->queue->qid;
+ rc = pref_zq->ops->send_ep11_cprb(userspace, pref_zq, xcrb, &ap_msg);
+
+ spin_lock(&zcrypt_list_lock);
+ zcrypt_drop_queue(pref_zc, pref_zq, mod, wgt);
+ spin_unlock(&zcrypt_list_lock);
+
+out_free:
+ kfree(targets);
+out:
+ ap_release_message(&ap_msg);
+ if (tr) {
+ tr->last_rc = rc;
+ tr->last_qid = qid;
+ }
+ trace_s390_zcrypt_rep(xcrb, func_code, rc,
+ AP_QID_CARD(qid), AP_QID_QUEUE(qid));
+ return rc;
+}
+
+long zcrypt_send_ep11_cprb(struct ep11_urb *xcrb)
+{
+ return _zcrypt_send_ep11_cprb(false, &ap_perms, NULL, xcrb);
+}
+EXPORT_SYMBOL(zcrypt_send_ep11_cprb);
+
+static long zcrypt_rng(char *buffer)
+{
+ struct zcrypt_card *zc, *pref_zc;
+ struct zcrypt_queue *zq, *pref_zq;
+ unsigned int wgt = 0, pref_wgt = 0;
+ unsigned int func_code;
+ struct ap_message ap_msg;
+ unsigned int domain;
+ int qid = 0, rc = -ENODEV;
+ struct module *mod;
+
+ trace_s390_zcrypt_req(buffer, TP_HWRNGCPRB);
+
+ ap_init_message(&ap_msg);
+ rc = get_rng_fc(&ap_msg, &func_code, &domain);
+ if (rc)
+ goto out;
+
+ pref_zc = NULL;
+ pref_zq = NULL;
+ spin_lock(&zcrypt_list_lock);
+ for_each_zcrypt_card(zc) {
+ /* Check for useable CCA card */
+ if (!zc->online || !zc->card->config ||
+ !(zc->card->functions & 0x10000000))
+ continue;
+ /* get weight index of the card device */
+ wgt = zc->speed_rating[func_code];
+ if (!zcrypt_card_compare(zc, pref_zc, wgt, pref_wgt))
+ continue;
+ for_each_zcrypt_queue(zq, zc) {
+ /* check if device is useable and eligible */
+ if (!zq->online || !zq->ops->rng ||
+ !zq->queue->config)
+ continue;
+ if (!zcrypt_queue_compare(zq, pref_zq, wgt, pref_wgt))
+ continue;
+ pref_zc = zc;
+ pref_zq = zq;
+ pref_wgt = wgt;
+ }
+ }
+ pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, &mod, wgt);
+ spin_unlock(&zcrypt_list_lock);
+
+ if (!pref_zq) {
+ rc = -ENODEV;
+ goto out;
+ }
+
+ qid = pref_zq->queue->qid;
+ rc = pref_zq->ops->rng(pref_zq, buffer, &ap_msg);
+
+ spin_lock(&zcrypt_list_lock);
+ zcrypt_drop_queue(pref_zc, pref_zq, mod, wgt);
+ spin_unlock(&zcrypt_list_lock);
+
+out:
+ ap_release_message(&ap_msg);
+ trace_s390_zcrypt_rep(buffer, func_code, rc,
+ AP_QID_CARD(qid), AP_QID_QUEUE(qid));
+ return rc;
+}
+
+static void zcrypt_device_status_mask(struct zcrypt_device_status *devstatus)
+{
+ struct zcrypt_card *zc;
+ struct zcrypt_queue *zq;
+ struct zcrypt_device_status *stat;
+ int card, queue;
+
+ memset(devstatus, 0, MAX_ZDEV_ENTRIES
+ * sizeof(struct zcrypt_device_status));
+
+ spin_lock(&zcrypt_list_lock);
+ for_each_zcrypt_card(zc) {
+ for_each_zcrypt_queue(zq, zc) {
+ card = AP_QID_CARD(zq->queue->qid);
+ if (card >= MAX_ZDEV_CARDIDS)
+ continue;
+ queue = AP_QID_QUEUE(zq->queue->qid);
+ stat = &devstatus[card * AP_DOMAINS + queue];
+ stat->hwtype = zc->card->ap_dev.device_type;
+ stat->functions = zc->card->functions >> 26;
+ stat->qid = zq->queue->qid;
+ stat->online = zq->online ? 0x01 : 0x00;
+ }
+ }
+ spin_unlock(&zcrypt_list_lock);
+}
+
+void zcrypt_device_status_mask_ext(struct zcrypt_device_status_ext *devstatus)
+{
+ struct zcrypt_card *zc;
+ struct zcrypt_queue *zq;
+ struct zcrypt_device_status_ext *stat;
+ int card, queue;
+
+ memset(devstatus, 0, MAX_ZDEV_ENTRIES_EXT
+ * sizeof(struct zcrypt_device_status_ext));
+
+ spin_lock(&zcrypt_list_lock);
+ for_each_zcrypt_card(zc) {
+ for_each_zcrypt_queue(zq, zc) {
+ card = AP_QID_CARD(zq->queue->qid);
+ queue = AP_QID_QUEUE(zq->queue->qid);
+ stat = &devstatus[card * AP_DOMAINS + queue];
+ stat->hwtype = zc->card->ap_dev.device_type;
+ stat->functions = zc->card->functions >> 26;
+ stat->qid = zq->queue->qid;
+ stat->online = zq->online ? 0x01 : 0x00;
+ }
+ }
+ spin_unlock(&zcrypt_list_lock);
+}
+EXPORT_SYMBOL(zcrypt_device_status_mask_ext);
+
+int zcrypt_device_status_ext(int card, int queue,
+ struct zcrypt_device_status_ext *devstat)
+{
+ struct zcrypt_card *zc;
+ struct zcrypt_queue *zq;
+
+ memset(devstat, 0, sizeof(*devstat));
+
+ spin_lock(&zcrypt_list_lock);
+ for_each_zcrypt_card(zc) {
+ for_each_zcrypt_queue(zq, zc) {
+ if (card == AP_QID_CARD(zq->queue->qid) &&
+ queue == AP_QID_QUEUE(zq->queue->qid)) {
+ devstat->hwtype = zc->card->ap_dev.device_type;
+ devstat->functions = zc->card->functions >> 26;
+ devstat->qid = zq->queue->qid;
+ devstat->online = zq->online ? 0x01 : 0x00;
+ spin_unlock(&zcrypt_list_lock);
+ return 0;
+ }
+ }
+ }
+ spin_unlock(&zcrypt_list_lock);
+
+ return -ENODEV;
+}
+EXPORT_SYMBOL(zcrypt_device_status_ext);
+
+static void zcrypt_status_mask(char status[], size_t max_adapters)
+{
+ struct zcrypt_card *zc;
+ struct zcrypt_queue *zq;
+ int card;
+
+ memset(status, 0, max_adapters);
+ spin_lock(&zcrypt_list_lock);
+ for_each_zcrypt_card(zc) {
+ for_each_zcrypt_queue(zq, zc) {
+ card = AP_QID_CARD(zq->queue->qid);
+ if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index
+ || card >= max_adapters)
+ continue;
+ status[card] = zc->online ? zc->user_space_type : 0x0d;
+ }
+ }
+ spin_unlock(&zcrypt_list_lock);
+}
+
+static void zcrypt_qdepth_mask(char qdepth[], size_t max_adapters)
+{
+ struct zcrypt_card *zc;
+ struct zcrypt_queue *zq;
+ int card;
+
+ memset(qdepth, 0, max_adapters);
+ spin_lock(&zcrypt_list_lock);
+ local_bh_disable();
+ for_each_zcrypt_card(zc) {
+ for_each_zcrypt_queue(zq, zc) {
+ card = AP_QID_CARD(zq->queue->qid);
+ if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index
+ || card >= max_adapters)
+ continue;
+ spin_lock(&zq->queue->lock);
+ qdepth[card] =
+ zq->queue->pendingq_count +
+ zq->queue->requestq_count;
+ spin_unlock(&zq->queue->lock);
+ }
+ }
+ local_bh_enable();
+ spin_unlock(&zcrypt_list_lock);
+}
+
+static void zcrypt_perdev_reqcnt(u32 reqcnt[], size_t max_adapters)
+{
+ struct zcrypt_card *zc;
+ struct zcrypt_queue *zq;
+ int card;
+ u64 cnt;
+
+ memset(reqcnt, 0, sizeof(int) * max_adapters);
+ spin_lock(&zcrypt_list_lock);
+ local_bh_disable();
+ for_each_zcrypt_card(zc) {
+ for_each_zcrypt_queue(zq, zc) {
+ card = AP_QID_CARD(zq->queue->qid);
+ if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index
+ || card >= max_adapters)
+ continue;
+ spin_lock(&zq->queue->lock);
+ cnt = zq->queue->total_request_count;
+ spin_unlock(&zq->queue->lock);
+ reqcnt[card] = (cnt < UINT_MAX) ? (u32) cnt : UINT_MAX;
+ }
+ }
+ local_bh_enable();
+ spin_unlock(&zcrypt_list_lock);
+}
+
+static int zcrypt_pendingq_count(void)
+{
+ struct zcrypt_card *zc;
+ struct zcrypt_queue *zq;
+ int pendingq_count;
+
+ pendingq_count = 0;
+ spin_lock(&zcrypt_list_lock);
+ local_bh_disable();
+ for_each_zcrypt_card(zc) {
+ for_each_zcrypt_queue(zq, zc) {
+ if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index)
+ continue;
+ spin_lock(&zq->queue->lock);
+ pendingq_count += zq->queue->pendingq_count;
+ spin_unlock(&zq->queue->lock);
+ }
+ }
+ local_bh_enable();
+ spin_unlock(&zcrypt_list_lock);
+ return pendingq_count;
+}
+
+static int zcrypt_requestq_count(void)
+{
+ struct zcrypt_card *zc;
+ struct zcrypt_queue *zq;
+ int requestq_count;
+
+ requestq_count = 0;
+ spin_lock(&zcrypt_list_lock);
+ local_bh_disable();
+ for_each_zcrypt_card(zc) {
+ for_each_zcrypt_queue(zq, zc) {
+ if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index)
+ continue;
+ spin_lock(&zq->queue->lock);
+ requestq_count += zq->queue->requestq_count;
+ spin_unlock(&zq->queue->lock);
+ }
+ }
+ local_bh_enable();
+ spin_unlock(&zcrypt_list_lock);
+ return requestq_count;
+}
+
+static int icarsamodexpo_ioctl(struct ap_perms *perms, unsigned long arg)
+{
+ int rc;
+ struct zcrypt_track tr;
+ struct ica_rsa_modexpo mex;
+ struct ica_rsa_modexpo __user *umex = (void __user *) arg;
+
+ memset(&tr, 0, sizeof(tr));
+ if (copy_from_user(&mex, umex, sizeof(mex)))
+ return -EFAULT;
+
+#ifdef CONFIG_ZCRYPT_DEBUG
+ if (mex.inputdatalength & (1U << 31)) {
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+ tr.fi.cmd = (u16)(mex.inputdatalength >> 16);
+ }
+ mex.inputdatalength &= 0x0000FFFF;
+#endif
+
+ do {
+ rc = zcrypt_rsa_modexpo(perms, &tr, &mex);
+ if (rc == -EAGAIN)
+ tr.again_counter++;
+#ifdef CONFIG_ZCRYPT_DEBUG
+ if (rc == -EAGAIN && (tr.fi.flags & AP_FI_FLAG_NO_RETRY))
+ break;
+#endif
+ } while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX);
+ /* on failure: retry once again after a requested rescan */
+ if ((rc == -ENODEV) && (zcrypt_process_rescan()))
+ do {
+ rc = zcrypt_rsa_modexpo(perms, &tr, &mex);
+ if (rc == -EAGAIN)
+ tr.again_counter++;
+ } while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX);
+ if (rc == -EAGAIN && tr.again_counter >= TRACK_AGAIN_MAX)
+ rc = -EIO;
+ if (rc) {
+ ZCRYPT_DBF(DBF_DEBUG, "ioctl ICARSAMODEXPO rc=%d\n", rc);
+ return rc;
+ }
+ return put_user(mex.outputdatalength, &umex->outputdatalength);
+}
+
+static int icarsacrt_ioctl(struct ap_perms *perms, unsigned long arg)
+{
+ int rc;
+ struct zcrypt_track tr;
+ struct ica_rsa_modexpo_crt crt;
+ struct ica_rsa_modexpo_crt __user *ucrt = (void __user *) arg;
+
+ memset(&tr, 0, sizeof(tr));
+ if (copy_from_user(&crt, ucrt, sizeof(crt)))
+ return -EFAULT;
+
+#ifdef CONFIG_ZCRYPT_DEBUG
+ if (crt.inputdatalength & (1U << 31)) {
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+ tr.fi.cmd = (u16)(crt.inputdatalength >> 16);
+ }
+ crt.inputdatalength &= 0x0000FFFF;
+#endif
+
+ do {
+ rc = zcrypt_rsa_crt(perms, &tr, &crt);
+ if (rc == -EAGAIN)
+ tr.again_counter++;
+#ifdef CONFIG_ZCRYPT_DEBUG
+ if (rc == -EAGAIN && (tr.fi.flags & AP_FI_FLAG_NO_RETRY))
+ break;
+#endif
+ } while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX);
+ /* on failure: retry once again after a requested rescan */
+ if ((rc == -ENODEV) && (zcrypt_process_rescan()))
+ do {
+ rc = zcrypt_rsa_crt(perms, &tr, &crt);
+ if (rc == -EAGAIN)
+ tr.again_counter++;
+ } while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX);
+ if (rc == -EAGAIN && tr.again_counter >= TRACK_AGAIN_MAX)
+ rc = -EIO;
+ if (rc) {
+ ZCRYPT_DBF(DBF_DEBUG, "ioctl ICARSACRT rc=%d\n", rc);
+ return rc;
+ }
+ return put_user(crt.outputdatalength, &ucrt->outputdatalength);
+}
+
+static int zsecsendcprb_ioctl(struct ap_perms *perms, unsigned long arg)
+{
+ int rc;
+ struct ica_xcRB xcRB;
+ struct zcrypt_track tr;
+ struct ica_xcRB __user *uxcRB = (void __user *) arg;
+
+ memset(&tr, 0, sizeof(tr));
+ if (copy_from_user(&xcRB, uxcRB, sizeof(xcRB)))
+ return -EFAULT;
+
+#ifdef CONFIG_ZCRYPT_DEBUG
+ if (xcRB.status & (1U << 31)) {
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+ tr.fi.cmd = (u16)(xcRB.status >> 16);
+ }
+ xcRB.status &= 0x0000FFFF;
+#endif
+
+ do {
+ rc = _zcrypt_send_cprb(true, perms, &tr, &xcRB);
+ if (rc == -EAGAIN)
+ tr.again_counter++;
+#ifdef CONFIG_ZCRYPT_DEBUG
+ if (rc == -EAGAIN && (tr.fi.flags & AP_FI_FLAG_NO_RETRY))
+ break;
+#endif
+ } while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX);
+ /* on failure: retry once again after a requested rescan */
+ if ((rc == -ENODEV) && (zcrypt_process_rescan()))
+ do {
+ rc = _zcrypt_send_cprb(true, perms, &tr, &xcRB);
+ if (rc == -EAGAIN)
+ tr.again_counter++;
+ } while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX);
+ if (rc == -EAGAIN && tr.again_counter >= TRACK_AGAIN_MAX)
+ rc = -EIO;
+ if (rc)
+ ZCRYPT_DBF(DBF_DEBUG, "ioctl ZSENDCPRB rc=%d status=0x%x\n",
+ rc, xcRB.status);
+ if (copy_to_user(uxcRB, &xcRB, sizeof(xcRB)))
+ return -EFAULT;
+ return rc;
+}
+
+static int zsendep11cprb_ioctl(struct ap_perms *perms, unsigned long arg)
+{
+ int rc;
+ struct ep11_urb xcrb;
+ struct zcrypt_track tr;
+ struct ep11_urb __user *uxcrb = (void __user *)arg;
+
+ memset(&tr, 0, sizeof(tr));
+ if (copy_from_user(&xcrb, uxcrb, sizeof(xcrb)))
+ return -EFAULT;
+
+#ifdef CONFIG_ZCRYPT_DEBUG
+ if (xcrb.req_len & (1ULL << 63)) {
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+ tr.fi.cmd = (u16)(xcrb.req_len >> 48);
+ }
+ xcrb.req_len &= 0x0000FFFFFFFFFFFFULL;
+#endif
+
+ do {
+ rc = _zcrypt_send_ep11_cprb(true, perms, &tr, &xcrb);
+ if (rc == -EAGAIN)
+ tr.again_counter++;
+#ifdef CONFIG_ZCRYPT_DEBUG
+ if (rc == -EAGAIN && (tr.fi.flags & AP_FI_FLAG_NO_RETRY))
+ break;
+#endif
+ } while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX);
+ /* on failure: retry once again after a requested rescan */
+ if ((rc == -ENODEV) && (zcrypt_process_rescan()))
+ do {
+ rc = _zcrypt_send_ep11_cprb(true, perms, &tr, &xcrb);
+ if (rc == -EAGAIN)
+ tr.again_counter++;
+ } while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX);
+ if (rc == -EAGAIN && tr.again_counter >= TRACK_AGAIN_MAX)
+ rc = -EIO;
+ if (rc)
+ ZCRYPT_DBF(DBF_DEBUG, "ioctl ZSENDEP11CPRB rc=%d\n", rc);
+ if (copy_to_user(uxcrb, &xcrb, sizeof(xcrb)))
+ return -EFAULT;
+ return rc;
+}
+
+static long zcrypt_unlocked_ioctl(struct file *filp, unsigned int cmd,
+ unsigned long arg)
+{
+ int rc;
+ struct ap_perms *perms =
+ (struct ap_perms *) filp->private_data;
+
+ rc = zcrypt_check_ioctl(perms, cmd);
+ if (rc)
+ return rc;
+
+ switch (cmd) {
+ case ICARSAMODEXPO:
+ return icarsamodexpo_ioctl(perms, arg);
+ case ICARSACRT:
+ return icarsacrt_ioctl(perms, arg);
+ case ZSECSENDCPRB:
+ return zsecsendcprb_ioctl(perms, arg);
+ case ZSENDEP11CPRB:
+ return zsendep11cprb_ioctl(perms, arg);
+ case ZCRYPT_DEVICE_STATUS: {
+ struct zcrypt_device_status_ext *device_status;
+ size_t total_size = MAX_ZDEV_ENTRIES_EXT
+ * sizeof(struct zcrypt_device_status_ext);
+
+ device_status = kzalloc(total_size, GFP_KERNEL);
+ if (!device_status)
+ return -ENOMEM;
+ zcrypt_device_status_mask_ext(device_status);
+ if (copy_to_user((char __user *) arg, device_status,
+ total_size))
+ rc = -EFAULT;
+ kfree(device_status);
+ return rc;
+ }
+ case ZCRYPT_STATUS_MASK: {
+ char status[AP_DEVICES];
+
+ zcrypt_status_mask(status, AP_DEVICES);
+ if (copy_to_user((char __user *) arg, status, sizeof(status)))
+ return -EFAULT;
+ return 0;
+ }
+ case ZCRYPT_QDEPTH_MASK: {
+ char qdepth[AP_DEVICES];
+
+ zcrypt_qdepth_mask(qdepth, AP_DEVICES);
+ if (copy_to_user((char __user *) arg, qdepth, sizeof(qdepth)))
+ return -EFAULT;
+ return 0;
+ }
+ case ZCRYPT_PERDEV_REQCNT: {
+ u32 *reqcnt;
+
+ reqcnt = kcalloc(AP_DEVICES, sizeof(u32), GFP_KERNEL);
+ if (!reqcnt)
+ return -ENOMEM;
+ zcrypt_perdev_reqcnt(reqcnt, AP_DEVICES);
+ if (copy_to_user((int __user *) arg, reqcnt,
+ sizeof(u32) * AP_DEVICES))
+ rc = -EFAULT;
+ kfree(reqcnt);
+ return rc;
+ }
+ case Z90STAT_REQUESTQ_COUNT:
+ return put_user(zcrypt_requestq_count(), (int __user *) arg);
+ case Z90STAT_PENDINGQ_COUNT:
+ return put_user(zcrypt_pendingq_count(), (int __user *) arg);
+ case Z90STAT_TOTALOPEN_COUNT:
+ return put_user(atomic_read(&zcrypt_open_count),
+ (int __user *) arg);
+ case Z90STAT_DOMAIN_INDEX:
+ return put_user(ap_domain_index, (int __user *) arg);
+ /*
+ * Deprecated ioctls
+ */
+ case ZDEVICESTATUS: {
+ /* the old ioctl supports only 64 adapters */
+ struct zcrypt_device_status *device_status;
+ size_t total_size = MAX_ZDEV_ENTRIES
+ * sizeof(struct zcrypt_device_status);
+
+ device_status = kzalloc(total_size, GFP_KERNEL);
+ if (!device_status)
+ return -ENOMEM;
+ zcrypt_device_status_mask(device_status);
+ if (copy_to_user((char __user *) arg, device_status,
+ total_size))
+ rc = -EFAULT;
+ kfree(device_status);
+ return rc;
+ }
+ case Z90STAT_STATUS_MASK: {
+ /* the old ioctl supports only 64 adapters */
+ char status[MAX_ZDEV_CARDIDS];
+
+ zcrypt_status_mask(status, MAX_ZDEV_CARDIDS);
+ if (copy_to_user((char __user *) arg, status, sizeof(status)))
+ return -EFAULT;
+ return 0;
+ }
+ case Z90STAT_QDEPTH_MASK: {
+ /* the old ioctl supports only 64 adapters */
+ char qdepth[MAX_ZDEV_CARDIDS];
+
+ zcrypt_qdepth_mask(qdepth, MAX_ZDEV_CARDIDS);
+ if (copy_to_user((char __user *) arg, qdepth, sizeof(qdepth)))
+ return -EFAULT;
+ return 0;
+ }
+ case Z90STAT_PERDEV_REQCNT: {
+ /* the old ioctl supports only 64 adapters */
+ u32 reqcnt[MAX_ZDEV_CARDIDS];
+
+ zcrypt_perdev_reqcnt(reqcnt, MAX_ZDEV_CARDIDS);
+ if (copy_to_user((int __user *) arg, reqcnt, sizeof(reqcnt)))
+ return -EFAULT;
+ return 0;
+ }
+ /* unknown ioctl number */
+ default:
+ ZCRYPT_DBF(DBF_DEBUG, "unknown ioctl 0x%08x\n", cmd);
+ return -ENOIOCTLCMD;
+ }
+}
+
+#ifdef CONFIG_COMPAT
+/*
+ * ioctl32 conversion routines
+ */
+struct compat_ica_rsa_modexpo {
+ compat_uptr_t inputdata;
+ unsigned int inputdatalength;
+ compat_uptr_t outputdata;
+ unsigned int outputdatalength;
+ compat_uptr_t b_key;
+ compat_uptr_t n_modulus;
+};
+
+static long trans_modexpo32(struct ap_perms *perms, struct file *filp,
+ unsigned int cmd, unsigned long arg)
+{
+ struct compat_ica_rsa_modexpo __user *umex32 = compat_ptr(arg);
+ struct compat_ica_rsa_modexpo mex32;
+ struct ica_rsa_modexpo mex64;
+ struct zcrypt_track tr;
+ long rc;
+
+ memset(&tr, 0, sizeof(tr));
+ if (copy_from_user(&mex32, umex32, sizeof(mex32)))
+ return -EFAULT;
+ mex64.inputdata = compat_ptr(mex32.inputdata);
+ mex64.inputdatalength = mex32.inputdatalength;
+ mex64.outputdata = compat_ptr(mex32.outputdata);
+ mex64.outputdatalength = mex32.outputdatalength;
+ mex64.b_key = compat_ptr(mex32.b_key);
+ mex64.n_modulus = compat_ptr(mex32.n_modulus);
+ do {
+ rc = zcrypt_rsa_modexpo(perms, &tr, &mex64);
+ if (rc == -EAGAIN)
+ tr.again_counter++;
+ } while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX);
+ /* on failure: retry once again after a requested rescan */
+ if ((rc == -ENODEV) && (zcrypt_process_rescan()))
+ do {
+ rc = zcrypt_rsa_modexpo(perms, &tr, &mex64);
+ if (rc == -EAGAIN)
+ tr.again_counter++;
+ } while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX);
+ if (rc == -EAGAIN && tr.again_counter >= TRACK_AGAIN_MAX)
+ rc = -EIO;
+ if (rc)
+ return rc;
+ return put_user(mex64.outputdatalength,
+ &umex32->outputdatalength);
+}
+
+struct compat_ica_rsa_modexpo_crt {
+ compat_uptr_t inputdata;
+ unsigned int inputdatalength;
+ compat_uptr_t outputdata;
+ unsigned int outputdatalength;
+ compat_uptr_t bp_key;
+ compat_uptr_t bq_key;
+ compat_uptr_t np_prime;
+ compat_uptr_t nq_prime;
+ compat_uptr_t u_mult_inv;
+};
+
+static long trans_modexpo_crt32(struct ap_perms *perms, struct file *filp,
+ unsigned int cmd, unsigned long arg)
+{
+ struct compat_ica_rsa_modexpo_crt __user *ucrt32 = compat_ptr(arg);
+ struct compat_ica_rsa_modexpo_crt crt32;
+ struct ica_rsa_modexpo_crt crt64;
+ struct zcrypt_track tr;
+ long rc;
+
+ memset(&tr, 0, sizeof(tr));
+ if (copy_from_user(&crt32, ucrt32, sizeof(crt32)))
+ return -EFAULT;
+ crt64.inputdata = compat_ptr(crt32.inputdata);
+ crt64.inputdatalength = crt32.inputdatalength;
+ crt64.outputdata = compat_ptr(crt32.outputdata);
+ crt64.outputdatalength = crt32.outputdatalength;
+ crt64.bp_key = compat_ptr(crt32.bp_key);
+ crt64.bq_key = compat_ptr(crt32.bq_key);
+ crt64.np_prime = compat_ptr(crt32.np_prime);
+ crt64.nq_prime = compat_ptr(crt32.nq_prime);
+ crt64.u_mult_inv = compat_ptr(crt32.u_mult_inv);
+ do {
+ rc = zcrypt_rsa_crt(perms, &tr, &crt64);
+ if (rc == -EAGAIN)
+ tr.again_counter++;
+ } while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX);
+ /* on failure: retry once again after a requested rescan */
+ if ((rc == -ENODEV) && (zcrypt_process_rescan()))
+ do {
+ rc = zcrypt_rsa_crt(perms, &tr, &crt64);
+ if (rc == -EAGAIN)
+ tr.again_counter++;
+ } while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX);
+ if (rc == -EAGAIN && tr.again_counter >= TRACK_AGAIN_MAX)
+ rc = -EIO;
+ if (rc)
+ return rc;
+ return put_user(crt64.outputdatalength,
+ &ucrt32->outputdatalength);
+}
+
+struct compat_ica_xcRB {
+ unsigned short agent_ID;
+ unsigned int user_defined;
+ unsigned short request_ID;
+ unsigned int request_control_blk_length;
+ unsigned char padding1[16 - sizeof(compat_uptr_t)];
+ compat_uptr_t request_control_blk_addr;
+ unsigned int request_data_length;
+ char padding2[16 - sizeof(compat_uptr_t)];
+ compat_uptr_t request_data_address;
+ unsigned int reply_control_blk_length;
+ char padding3[16 - sizeof(compat_uptr_t)];
+ compat_uptr_t reply_control_blk_addr;
+ unsigned int reply_data_length;
+ char padding4[16 - sizeof(compat_uptr_t)];
+ compat_uptr_t reply_data_addr;
+ unsigned short priority_window;
+ unsigned int status;
+} __packed;
+
+static long trans_xcRB32(struct ap_perms *perms, struct file *filp,
+ unsigned int cmd, unsigned long arg)
+{
+ struct compat_ica_xcRB __user *uxcRB32 = compat_ptr(arg);
+ struct compat_ica_xcRB xcRB32;
+ struct zcrypt_track tr;
+ struct ica_xcRB xcRB64;
+ long rc;
+
+ memset(&tr, 0, sizeof(tr));
+ if (copy_from_user(&xcRB32, uxcRB32, sizeof(xcRB32)))
+ return -EFAULT;
+ xcRB64.agent_ID = xcRB32.agent_ID;
+ xcRB64.user_defined = xcRB32.user_defined;
+ xcRB64.request_ID = xcRB32.request_ID;
+ xcRB64.request_control_blk_length =
+ xcRB32.request_control_blk_length;
+ xcRB64.request_control_blk_addr =
+ compat_ptr(xcRB32.request_control_blk_addr);
+ xcRB64.request_data_length =
+ xcRB32.request_data_length;
+ xcRB64.request_data_address =
+ compat_ptr(xcRB32.request_data_address);
+ xcRB64.reply_control_blk_length =
+ xcRB32.reply_control_blk_length;
+ xcRB64.reply_control_blk_addr =
+ compat_ptr(xcRB32.reply_control_blk_addr);
+ xcRB64.reply_data_length = xcRB32.reply_data_length;
+ xcRB64.reply_data_addr =
+ compat_ptr(xcRB32.reply_data_addr);
+ xcRB64.priority_window = xcRB32.priority_window;
+ xcRB64.status = xcRB32.status;
+ do {
+ rc = _zcrypt_send_cprb(true, perms, &tr, &xcRB64);
+ if (rc == -EAGAIN)
+ tr.again_counter++;
+ } while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX);
+ /* on failure: retry once again after a requested rescan */
+ if ((rc == -ENODEV) && (zcrypt_process_rescan()))
+ do {
+ rc = _zcrypt_send_cprb(true, perms, &tr, &xcRB64);
+ if (rc == -EAGAIN)
+ tr.again_counter++;
+ } while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX);
+ if (rc == -EAGAIN && tr.again_counter >= TRACK_AGAIN_MAX)
+ rc = -EIO;
+ xcRB32.reply_control_blk_length = xcRB64.reply_control_blk_length;
+ xcRB32.reply_data_length = xcRB64.reply_data_length;
+ xcRB32.status = xcRB64.status;
+ if (copy_to_user(uxcRB32, &xcRB32, sizeof(xcRB32)))
+ return -EFAULT;
+ return rc;
+}
+
+static long zcrypt_compat_ioctl(struct file *filp, unsigned int cmd,
+ unsigned long arg)
+{
+ int rc;
+ struct ap_perms *perms =
+ (struct ap_perms *) filp->private_data;
+
+ rc = zcrypt_check_ioctl(perms, cmd);
+ if (rc)
+ return rc;
+
+ if (cmd == ICARSAMODEXPO)
+ return trans_modexpo32(perms, filp, cmd, arg);
+ if (cmd == ICARSACRT)
+ return trans_modexpo_crt32(perms, filp, cmd, arg);
+ if (cmd == ZSECSENDCPRB)
+ return trans_xcRB32(perms, filp, cmd, arg);
+ return zcrypt_unlocked_ioctl(filp, cmd, arg);
+}
+#endif
+
+/*
+ * Misc device file operations.
+ */
+static const struct file_operations zcrypt_fops = {
+ .owner = THIS_MODULE,
+ .read = zcrypt_read,
+ .write = zcrypt_write,
+ .unlocked_ioctl = zcrypt_unlocked_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = zcrypt_compat_ioctl,
+#endif
+ .open = zcrypt_open,
+ .release = zcrypt_release,
+ .llseek = no_llseek,
+};
+
+/*
+ * Misc device.
+ */
+static struct miscdevice zcrypt_misc_device = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = "z90crypt",
+ .fops = &zcrypt_fops,
+};
+
+static int zcrypt_rng_device_count;
+static u32 *zcrypt_rng_buffer;
+static int zcrypt_rng_buffer_index;
+static DEFINE_MUTEX(zcrypt_rng_mutex);
+
+static int zcrypt_rng_data_read(struct hwrng *rng, u32 *data)
+{
+ int rc;
+
+ /*
+ * We don't need locking here because the RNG API guarantees serialized
+ * read method calls.
+ */
+ if (zcrypt_rng_buffer_index == 0) {
+ rc = zcrypt_rng((char *) zcrypt_rng_buffer);
+ /* on failure: retry once again after a requested rescan */
+ if ((rc == -ENODEV) && (zcrypt_process_rescan()))
+ rc = zcrypt_rng((char *) zcrypt_rng_buffer);
+ if (rc < 0)
+ return -EIO;
+ zcrypt_rng_buffer_index = rc / sizeof(*data);
+ }
+ *data = zcrypt_rng_buffer[--zcrypt_rng_buffer_index];
+ return sizeof(*data);
+}
+
+static struct hwrng zcrypt_rng_dev = {
+ .name = "zcrypt",
+ .data_read = zcrypt_rng_data_read,
+ .quality = 990,
+};
+
+int zcrypt_rng_device_add(void)
+{
+ int rc = 0;
+
+ mutex_lock(&zcrypt_rng_mutex);
+ if (zcrypt_rng_device_count == 0) {
+ zcrypt_rng_buffer = (u32 *) get_zeroed_page(GFP_KERNEL);
+ if (!zcrypt_rng_buffer) {
+ rc = -ENOMEM;
+ goto out;
+ }
+ zcrypt_rng_buffer_index = 0;
+ if (!zcrypt_hwrng_seed)
+ zcrypt_rng_dev.quality = 0;
+ rc = hwrng_register(&zcrypt_rng_dev);
+ if (rc)
+ goto out_free;
+ zcrypt_rng_device_count = 1;
+ } else
+ zcrypt_rng_device_count++;
+ mutex_unlock(&zcrypt_rng_mutex);
+ return 0;
+
+out_free:
+ free_page((unsigned long) zcrypt_rng_buffer);
+out:
+ mutex_unlock(&zcrypt_rng_mutex);
+ return rc;
+}
+
+void zcrypt_rng_device_remove(void)
+{
+ mutex_lock(&zcrypt_rng_mutex);
+ zcrypt_rng_device_count--;
+ if (zcrypt_rng_device_count == 0) {
+ hwrng_unregister(&zcrypt_rng_dev);
+ free_page((unsigned long) zcrypt_rng_buffer);
+ }
+ mutex_unlock(&zcrypt_rng_mutex);
+}
+
+int __init zcrypt_debug_init(void)
+{
+ zcrypt_dbf_info = debug_register("zcrypt", 1, 1,
+ DBF_MAX_SPRINTF_ARGS * sizeof(long));
+ debug_register_view(zcrypt_dbf_info, &debug_sprintf_view);
+ debug_set_level(zcrypt_dbf_info, DBF_ERR);
+
+ return 0;
+}
+
+void zcrypt_debug_exit(void)
+{
+ debug_unregister(zcrypt_dbf_info);
+}
+
+#ifdef CONFIG_ZCRYPT_MULTIDEVNODES
+
+static int __init zcdn_init(void)
+{
+ int rc;
+
+ /* create a new class 'zcrypt' */
+ zcrypt_class = class_create(THIS_MODULE, ZCRYPT_NAME);
+ if (IS_ERR(zcrypt_class)) {
+ rc = PTR_ERR(zcrypt_class);
+ goto out_class_create_failed;
+ }
+ zcrypt_class->dev_release = zcdn_device_release;
+
+ /* alloc device minor range */
+ rc = alloc_chrdev_region(&zcrypt_devt,
+ 0, ZCRYPT_MAX_MINOR_NODES,
+ ZCRYPT_NAME);
+ if (rc)
+ goto out_alloc_chrdev_failed;
+
+ cdev_init(&zcrypt_cdev, &zcrypt_fops);
+ zcrypt_cdev.owner = THIS_MODULE;
+ rc = cdev_add(&zcrypt_cdev, zcrypt_devt, ZCRYPT_MAX_MINOR_NODES);
+ if (rc)
+ goto out_cdev_add_failed;
+
+ /* need some class specific sysfs attributes */
+ rc = class_create_file(zcrypt_class, &class_attr_zcdn_create);
+ if (rc)
+ goto out_class_create_file_1_failed;
+ rc = class_create_file(zcrypt_class, &class_attr_zcdn_destroy);
+ if (rc)
+ goto out_class_create_file_2_failed;
+
+ return 0;
+
+out_class_create_file_2_failed:
+ class_remove_file(zcrypt_class, &class_attr_zcdn_create);
+out_class_create_file_1_failed:
+ cdev_del(&zcrypt_cdev);
+out_cdev_add_failed:
+ unregister_chrdev_region(zcrypt_devt, ZCRYPT_MAX_MINOR_NODES);
+out_alloc_chrdev_failed:
+ class_destroy(zcrypt_class);
+out_class_create_failed:
+ return rc;
+}
+
+static void zcdn_exit(void)
+{
+ class_remove_file(zcrypt_class, &class_attr_zcdn_create);
+ class_remove_file(zcrypt_class, &class_attr_zcdn_destroy);
+ zcdn_destroy_all();
+ cdev_del(&zcrypt_cdev);
+ unregister_chrdev_region(zcrypt_devt, ZCRYPT_MAX_MINOR_NODES);
+ class_destroy(zcrypt_class);
+}
+
+#endif
+
+/**
+ * zcrypt_api_init(): Module initialization.
+ *
+ * The module initialization code.
+ */
+int __init zcrypt_api_init(void)
+{
+ int rc;
+
+ rc = zcrypt_debug_init();
+ if (rc)
+ goto out;
+
+#ifdef CONFIG_ZCRYPT_MULTIDEVNODES
+ rc = zcdn_init();
+ if (rc)
+ goto out;
+#endif
+
+ /* Register the request sprayer. */
+ rc = misc_register(&zcrypt_misc_device);
+ if (rc < 0)
+ goto out_misc_register_failed;
+
+ zcrypt_msgtype6_init();
+ zcrypt_msgtype50_init();
+
+ return 0;
+
+out_misc_register_failed:
+#ifdef CONFIG_ZCRYPT_MULTIDEVNODES
+ zcdn_exit();
+#endif
+ zcrypt_debug_exit();
+out:
+ return rc;
+}
+
+/**
+ * zcrypt_api_exit(): Module termination.
+ *
+ * The module termination code.
+ */
+void __exit zcrypt_api_exit(void)
+{
+#ifdef CONFIG_ZCRYPT_MULTIDEVNODES
+ zcdn_exit();
+#endif
+ misc_deregister(&zcrypt_misc_device);
+ zcrypt_msgtype6_exit();
+ zcrypt_msgtype50_exit();
+ zcrypt_ccamisc_exit();
+ zcrypt_ep11misc_exit();
+ zcrypt_debug_exit();
+}
+
+module_init(zcrypt_api_init);
+module_exit(zcrypt_api_exit);
diff --git a/drivers/s390/crypto/zcrypt_api.h b/drivers/s390/crypto/zcrypt_api.h
new file mode 100644
index 000000000..51c0b8bde
--- /dev/null
+++ b/drivers/s390/crypto/zcrypt_api.h
@@ -0,0 +1,187 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright IBM Corp. 2001, 2019
+ * Author(s): Robert Burroughs
+ * Eric Rossman (edrossma@us.ibm.com)
+ * Cornelia Huck <cornelia.huck@de.ibm.com>
+ *
+ * Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com)
+ * Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com>
+ * Ralph Wuerthner <rwuerthn@de.ibm.com>
+ * MSGTYPE restruct: Holger Dengler <hd@linux.vnet.ibm.com>
+ */
+
+#ifndef _ZCRYPT_API_H_
+#define _ZCRYPT_API_H_
+
+#include <linux/atomic.h>
+#include <asm/debug.h>
+#include <asm/zcrypt.h>
+#include "ap_bus.h"
+
+/**
+ * Supported device types
+ */
+#define ZCRYPT_CEX2C 5
+#define ZCRYPT_CEX2A 6
+#define ZCRYPT_CEX3C 7
+#define ZCRYPT_CEX3A 8
+#define ZCRYPT_CEX4 10
+#define ZCRYPT_CEX5 11
+#define ZCRYPT_CEX6 12
+#define ZCRYPT_CEX7 13
+
+/**
+ * Large random numbers are pulled in 4096 byte chunks from the crypto cards
+ * and stored in a page. Be careful when increasing this buffer due to size
+ * limitations for AP requests.
+ */
+#define ZCRYPT_RNG_BUFFER_SIZE 4096
+
+/*
+ * Identifier for Crypto Request Performance Index
+ */
+enum crypto_ops {
+ MEX_1K,
+ MEX_2K,
+ MEX_4K,
+ CRT_1K,
+ CRT_2K,
+ CRT_4K,
+ HWRNG,
+ SECKEY,
+ NUM_OPS
+};
+
+struct zcrypt_queue;
+
+/* struct to hold tracking information for a userspace request/response */
+struct zcrypt_track {
+ int again_counter; /* retry attempts counter */
+ int last_qid; /* last qid used */
+ int last_rc; /* last return code */
+#ifdef CONFIG_ZCRYPT_DEBUG
+ struct ap_fi fi; /* failure injection cmd */
+#endif
+};
+
+/* defines related to message tracking */
+#define TRACK_AGAIN_MAX 10
+#define TRACK_AGAIN_CARD_WEIGHT_PENALTY 1000
+#define TRACK_AGAIN_QUEUE_WEIGHT_PENALTY 10000
+
+struct zcrypt_ops {
+ long (*rsa_modexpo)(struct zcrypt_queue *, struct ica_rsa_modexpo *,
+ struct ap_message *);
+ long (*rsa_modexpo_crt)(struct zcrypt_queue *,
+ struct ica_rsa_modexpo_crt *,
+ struct ap_message *);
+ long (*send_cprb)(bool userspace, struct zcrypt_queue *, struct ica_xcRB *,
+ struct ap_message *);
+ long (*send_ep11_cprb)(bool userspace, struct zcrypt_queue *, struct ep11_urb *,
+ struct ap_message *);
+ long (*rng)(struct zcrypt_queue *, char *, struct ap_message *);
+ struct list_head list; /* zcrypt ops list. */
+ struct module *owner;
+ int variant;
+ char name[128];
+};
+
+struct zcrypt_card {
+ struct list_head list; /* Device list. */
+ struct list_head zqueues; /* List of zcrypt queues */
+ struct kref refcount; /* device refcounting */
+ struct ap_card *card; /* The "real" ap card device. */
+ int online; /* User online/offline */
+
+ int user_space_type; /* User space device id. */
+ char *type_string; /* User space device name. */
+ int min_mod_size; /* Min number of bits. */
+ int max_mod_size; /* Max number of bits. */
+ int max_exp_bit_length;
+ const int *speed_rating; /* Speed idx of crypto ops. */
+ atomic_t load; /* Utilization of the crypto device */
+
+ int request_count; /* # current requests. */
+};
+
+struct zcrypt_queue {
+ struct list_head list; /* Device list. */
+ struct kref refcount; /* device refcounting */
+ struct zcrypt_card *zcard;
+ struct zcrypt_ops *ops; /* Crypto operations. */
+ struct ap_queue *queue; /* The "real" ap queue device. */
+ int online; /* User online/offline */
+
+ atomic_t load; /* Utilization of the crypto device */
+
+ int request_count; /* # current requests. */
+
+ struct ap_message reply; /* Per-device reply structure. */
+};
+
+/* transport layer rescanning */
+extern atomic_t zcrypt_rescan_req;
+
+extern spinlock_t zcrypt_list_lock;
+extern int zcrypt_device_count;
+extern struct list_head zcrypt_card_list;
+
+#define for_each_zcrypt_card(_zc) \
+ list_for_each_entry(_zc, &zcrypt_card_list, list)
+
+#define for_each_zcrypt_queue(_zq, _zc) \
+ list_for_each_entry(_zq, &(_zc)->zqueues, list)
+
+struct zcrypt_card *zcrypt_card_alloc(void);
+void zcrypt_card_free(struct zcrypt_card *);
+void zcrypt_card_get(struct zcrypt_card *);
+int zcrypt_card_put(struct zcrypt_card *);
+int zcrypt_card_register(struct zcrypt_card *);
+void zcrypt_card_unregister(struct zcrypt_card *);
+
+struct zcrypt_queue *zcrypt_queue_alloc(size_t);
+void zcrypt_queue_free(struct zcrypt_queue *);
+void zcrypt_queue_get(struct zcrypt_queue *);
+int zcrypt_queue_put(struct zcrypt_queue *);
+int zcrypt_queue_register(struct zcrypt_queue *);
+void zcrypt_queue_unregister(struct zcrypt_queue *);
+void zcrypt_queue_force_online(struct zcrypt_queue *, int);
+
+int zcrypt_rng_device_add(void);
+void zcrypt_rng_device_remove(void);
+
+void zcrypt_msgtype_register(struct zcrypt_ops *);
+void zcrypt_msgtype_unregister(struct zcrypt_ops *);
+struct zcrypt_ops *zcrypt_msgtype(unsigned char *, int);
+int zcrypt_api_init(void);
+void zcrypt_api_exit(void);
+long zcrypt_send_cprb(struct ica_xcRB *xcRB);
+long zcrypt_send_ep11_cprb(struct ep11_urb *urb);
+void zcrypt_device_status_mask_ext(struct zcrypt_device_status_ext *devstatus);
+int zcrypt_device_status_ext(int card, int queue,
+ struct zcrypt_device_status_ext *devstatus);
+
+static inline unsigned long z_copy_from_user(bool userspace,
+ void *to,
+ const void __user *from,
+ unsigned long n)
+{
+ if (likely(userspace))
+ return copy_from_user(to, from, n);
+ memcpy(to, (void __force *) from, n);
+ return 0;
+}
+
+static inline unsigned long z_copy_to_user(bool userspace,
+ void __user *to,
+ const void *from,
+ unsigned long n)
+{
+ if (likely(userspace))
+ return copy_to_user(to, from, n);
+ memcpy((void __force *) to, from, n);
+ return 0;
+}
+
+#endif /* _ZCRYPT_API_H_ */
diff --git a/drivers/s390/crypto/zcrypt_card.c b/drivers/s390/crypto/zcrypt_card.c
new file mode 100644
index 000000000..09fe6bb88
--- /dev/null
+++ b/drivers/s390/crypto/zcrypt_card.c
@@ -0,0 +1,197 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright IBM Corp. 2001, 2012
+ * Author(s): Robert Burroughs
+ * Eric Rossman (edrossma@us.ibm.com)
+ * Cornelia Huck <cornelia.huck@de.ibm.com>
+ *
+ * Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com)
+ * Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com>
+ * Ralph Wuerthner <rwuerthn@de.ibm.com>
+ * MSGTYPE restruct: Holger Dengler <hd@linux.vnet.ibm.com>
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/miscdevice.h>
+#include <linux/fs.h>
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
+#include <linux/compat.h>
+#include <linux/slab.h>
+#include <linux/atomic.h>
+#include <linux/uaccess.h>
+#include <linux/hw_random.h>
+#include <linux/debugfs.h>
+#include <asm/debug.h>
+
+#include "zcrypt_debug.h"
+#include "zcrypt_api.h"
+
+#include "zcrypt_msgtype6.h"
+#include "zcrypt_msgtype50.h"
+
+/*
+ * Device attributes common for all crypto card devices.
+ */
+
+static ssize_t type_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct zcrypt_card *zc = to_ap_card(dev)->private;
+
+ return scnprintf(buf, PAGE_SIZE, "%s\n", zc->type_string);
+}
+
+static DEVICE_ATTR_RO(type);
+
+static ssize_t online_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct ap_card *ac = to_ap_card(dev);
+ struct zcrypt_card *zc = ac->private;
+ int online = ac->config && zc->online ? 1 : 0;
+
+ return scnprintf(buf, PAGE_SIZE, "%d\n", online);
+}
+
+static ssize_t online_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct ap_card *ac = to_ap_card(dev);
+ struct zcrypt_card *zc = ac->private;
+ struct zcrypt_queue *zq;
+ int online, id;
+
+ if (sscanf(buf, "%d\n", &online) != 1 || online < 0 || online > 1)
+ return -EINVAL;
+
+ if (online && !ac->config)
+ return -ENODEV;
+
+ zc->online = online;
+ id = zc->card->id;
+
+ ZCRYPT_DBF(DBF_INFO, "card=%02x online=%d\n", id, online);
+
+ spin_lock(&zcrypt_list_lock);
+ list_for_each_entry(zq, &zc->zqueues, list)
+ zcrypt_queue_force_online(zq, online);
+ spin_unlock(&zcrypt_list_lock);
+ return count;
+}
+
+static DEVICE_ATTR_RW(online);
+
+static ssize_t load_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct zcrypt_card *zc = to_ap_card(dev)->private;
+
+ return scnprintf(buf, PAGE_SIZE, "%d\n", atomic_read(&zc->load));
+}
+
+static DEVICE_ATTR_RO(load);
+
+static struct attribute *zcrypt_card_attrs[] = {
+ &dev_attr_type.attr,
+ &dev_attr_online.attr,
+ &dev_attr_load.attr,
+ NULL,
+};
+
+static const struct attribute_group zcrypt_card_attr_group = {
+ .attrs = zcrypt_card_attrs,
+};
+
+struct zcrypt_card *zcrypt_card_alloc(void)
+{
+ struct zcrypt_card *zc;
+
+ zc = kzalloc(sizeof(struct zcrypt_card), GFP_KERNEL);
+ if (!zc)
+ return NULL;
+ INIT_LIST_HEAD(&zc->list);
+ INIT_LIST_HEAD(&zc->zqueues);
+ kref_init(&zc->refcount);
+ return zc;
+}
+EXPORT_SYMBOL(zcrypt_card_alloc);
+
+void zcrypt_card_free(struct zcrypt_card *zc)
+{
+ kfree(zc);
+}
+EXPORT_SYMBOL(zcrypt_card_free);
+
+static void zcrypt_card_release(struct kref *kref)
+{
+ struct zcrypt_card *zdev =
+ container_of(kref, struct zcrypt_card, refcount);
+ zcrypt_card_free(zdev);
+}
+
+void zcrypt_card_get(struct zcrypt_card *zc)
+{
+ kref_get(&zc->refcount);
+}
+EXPORT_SYMBOL(zcrypt_card_get);
+
+int zcrypt_card_put(struct zcrypt_card *zc)
+{
+ return kref_put(&zc->refcount, zcrypt_card_release);
+}
+EXPORT_SYMBOL(zcrypt_card_put);
+
+/**
+ * zcrypt_card_register() - Register a crypto card device.
+ * @zc: Pointer to a crypto card device
+ *
+ * Register a crypto card device. Returns 0 if successful.
+ */
+int zcrypt_card_register(struct zcrypt_card *zc)
+{
+ int rc;
+
+ spin_lock(&zcrypt_list_lock);
+ list_add_tail(&zc->list, &zcrypt_card_list);
+ spin_unlock(&zcrypt_list_lock);
+
+ zc->online = 1;
+
+ ZCRYPT_DBF(DBF_INFO, "card=%02x register online=1\n", zc->card->id);
+
+ rc = sysfs_create_group(&zc->card->ap_dev.device.kobj,
+ &zcrypt_card_attr_group);
+ if (rc) {
+ spin_lock(&zcrypt_list_lock);
+ list_del_init(&zc->list);
+ spin_unlock(&zcrypt_list_lock);
+ }
+
+ return rc;
+}
+EXPORT_SYMBOL(zcrypt_card_register);
+
+/**
+ * zcrypt_card_unregister(): Unregister a crypto card device.
+ * @zc: Pointer to crypto card device
+ *
+ * Unregister a crypto card device.
+ */
+void zcrypt_card_unregister(struct zcrypt_card *zc)
+{
+ ZCRYPT_DBF(DBF_INFO, "card=%02x unregister\n", zc->card->id);
+
+ spin_lock(&zcrypt_list_lock);
+ list_del_init(&zc->list);
+ spin_unlock(&zcrypt_list_lock);
+ sysfs_remove_group(&zc->card->ap_dev.device.kobj,
+ &zcrypt_card_attr_group);
+ zcrypt_card_put(zc);
+}
+EXPORT_SYMBOL(zcrypt_card_unregister);
diff --git a/drivers/s390/crypto/zcrypt_cca_key.h b/drivers/s390/crypto/zcrypt_cca_key.h
new file mode 100644
index 000000000..f09bb8507
--- /dev/null
+++ b/drivers/s390/crypto/zcrypt_cca_key.h
@@ -0,0 +1,248 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright IBM Corp. 2001, 2006
+ * Author(s): Robert Burroughs
+ * Eric Rossman (edrossma@us.ibm.com)
+ *
+ * Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com)
+ * Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com>
+ */
+
+#ifndef _ZCRYPT_CCA_KEY_H_
+#define _ZCRYPT_CCA_KEY_H_
+
+struct T6_keyBlock_hdr {
+ unsigned short blen;
+ unsigned short ulen;
+ unsigned short flags;
+};
+
+/**
+ * mapping for the cca private ME key token.
+ * Three parts of interest here: the header, the private section and
+ * the public section.
+ *
+ * mapping for the cca key token header
+ */
+struct cca_token_hdr {
+ unsigned char token_identifier;
+ unsigned char version;
+ unsigned short token_length;
+ unsigned char reserved[4];
+} __packed;
+
+#define CCA_TKN_HDR_ID_EXT 0x1E
+
+#define CCA_PVT_USAGE_ALL 0x80
+
+/**
+ * mapping for the cca public section
+ * In a private key, the modulus doesn't appear in the public
+ * section. So, an arbitrary public exponent of 0x010001 will be
+ * used, for a section length of 0x0F always.
+ */
+struct cca_public_sec {
+ unsigned char section_identifier;
+ unsigned char version;
+ unsigned short section_length;
+ unsigned char reserved[2];
+ unsigned short exponent_len;
+ unsigned short modulus_bit_len;
+ unsigned short modulus_byte_len; /* In a private key, this is 0 */
+} __packed;
+
+/**
+ * mapping for the cca private CRT key 'token'
+ * The first three parts (the only parts considered in this release)
+ * are: the header, the private section and the public section.
+ * The header and public section are the same as for the
+ * struct cca_private_ext_ME
+ *
+ * Following the structure are the quantities p, q, dp, dq, u, pad,
+ * and modulus, in that order, where pad_len is the modulo 8
+ * complement of the residue modulo 8 of the sum of
+ * (p_len + q_len + dp_len + dq_len + u_len).
+ */
+struct cca_pvt_ext_CRT_sec {
+ unsigned char section_identifier;
+ unsigned char version;
+ unsigned short section_length;
+ unsigned char private_key_hash[20];
+ unsigned char reserved1[4];
+ unsigned char key_format;
+ unsigned char reserved2;
+ unsigned char key_name_hash[20];
+ unsigned char key_use_flags[4];
+ unsigned short p_len;
+ unsigned short q_len;
+ unsigned short dp_len;
+ unsigned short dq_len;
+ unsigned short u_len;
+ unsigned short mod_len;
+ unsigned char reserved3[4];
+ unsigned short pad_len;
+ unsigned char reserved4[52];
+ unsigned char confounder[8];
+} __packed;
+
+#define CCA_PVT_EXT_CRT_SEC_ID_PVT 0x08
+#define CCA_PVT_EXT_CRT_SEC_FMT_CL 0x40
+
+/**
+ * Set up private key fields of a type6 MEX message. The _pad variant
+ * strips leading zeroes from the b_key.
+ * Note that all numerics in the key token are big-endian,
+ * while the entries in the key block header are little-endian.
+ *
+ * @mex: pointer to user input data
+ * @p: pointer to memory area for the key
+ *
+ * Returns the size of the key area or negative errno value.
+ */
+static inline int zcrypt_type6_mex_key_en(struct ica_rsa_modexpo *mex, void *p)
+{
+ static struct cca_token_hdr static_pub_hdr = {
+ .token_identifier = 0x1E,
+ };
+ static struct cca_public_sec static_pub_sec = {
+ .section_identifier = 0x04,
+ };
+ struct {
+ struct T6_keyBlock_hdr t6_hdr;
+ struct cca_token_hdr pubHdr;
+ struct cca_public_sec pubSec;
+ char exponent[0];
+ } __packed *key = p;
+ unsigned char *temp;
+ int i;
+
+ /*
+ * The inputdatalength was a selection criteria in the dispatching
+ * function zcrypt_rsa_modexpo(). However, do a plausibility check
+ * here to make sure the following copy_from_user() can't be utilized
+ * to compromise the system.
+ */
+ if (WARN_ON_ONCE(mex->inputdatalength > 512))
+ return -EINVAL;
+
+ memset(key, 0, sizeof(*key));
+
+ key->pubHdr = static_pub_hdr;
+ key->pubSec = static_pub_sec;
+
+ /* key parameter block */
+ temp = key->exponent;
+ if (copy_from_user(temp, mex->b_key, mex->inputdatalength))
+ return -EFAULT;
+ /* Strip leading zeroes from b_key. */
+ for (i = 0; i < mex->inputdatalength; i++)
+ if (temp[i])
+ break;
+ if (i >= mex->inputdatalength)
+ return -EINVAL;
+ memmove(temp, temp + i, mex->inputdatalength - i);
+ temp += mex->inputdatalength - i;
+ /* modulus */
+ if (copy_from_user(temp, mex->n_modulus, mex->inputdatalength))
+ return -EFAULT;
+
+ key->pubSec.modulus_bit_len = 8 * mex->inputdatalength;
+ key->pubSec.modulus_byte_len = mex->inputdatalength;
+ key->pubSec.exponent_len = mex->inputdatalength - i;
+ key->pubSec.section_length = sizeof(key->pubSec) +
+ 2*mex->inputdatalength - i;
+ key->pubHdr.token_length =
+ key->pubSec.section_length + sizeof(key->pubHdr);
+ key->t6_hdr.ulen = key->pubHdr.token_length + 4;
+ key->t6_hdr.blen = key->pubHdr.token_length + 6;
+ return sizeof(*key) + 2*mex->inputdatalength - i;
+}
+
+/**
+ * Set up private key fields of a type6 CRT message.
+ * Note that all numerics in the key token are big-endian,
+ * while the entries in the key block header are little-endian.
+ *
+ * @mex: pointer to user input data
+ * @p: pointer to memory area for the key
+ *
+ * Returns the size of the key area or -EFAULT
+ */
+static inline int zcrypt_type6_crt_key(struct ica_rsa_modexpo_crt *crt, void *p)
+{
+ static struct cca_public_sec static_cca_pub_sec = {
+ .section_identifier = 4,
+ .section_length = 0x000f,
+ .exponent_len = 0x0003,
+ };
+ static char pk_exponent[3] = { 0x01, 0x00, 0x01 };
+ struct {
+ struct T6_keyBlock_hdr t6_hdr;
+ struct cca_token_hdr token;
+ struct cca_pvt_ext_CRT_sec pvt;
+ char key_parts[0];
+ } __packed *key = p;
+ struct cca_public_sec *pub;
+ int short_len, long_len, pad_len, key_len, size;
+
+ /*
+ * The inputdatalength was a selection criteria in the dispatching
+ * function zcrypt_rsa_crt(). However, do a plausibility check
+ * here to make sure the following copy_from_user() can't be utilized
+ * to compromise the system.
+ */
+ if (WARN_ON_ONCE(crt->inputdatalength > 512))
+ return -EINVAL;
+
+ memset(key, 0, sizeof(*key));
+
+ short_len = (crt->inputdatalength + 1) / 2;
+ long_len = short_len + 8;
+ pad_len = -(3*long_len + 2*short_len) & 7;
+ key_len = 3*long_len + 2*short_len + pad_len + crt->inputdatalength;
+ size = sizeof(*key) + key_len + sizeof(*pub) + 3;
+
+ /* parameter block.key block */
+ key->t6_hdr.blen = size;
+ key->t6_hdr.ulen = size - 2;
+
+ /* key token header */
+ key->token.token_identifier = CCA_TKN_HDR_ID_EXT;
+ key->token.token_length = size - 6;
+
+ /* private section */
+ key->pvt.section_identifier = CCA_PVT_EXT_CRT_SEC_ID_PVT;
+ key->pvt.section_length = sizeof(key->pvt) + key_len;
+ key->pvt.key_format = CCA_PVT_EXT_CRT_SEC_FMT_CL;
+ key->pvt.key_use_flags[0] = CCA_PVT_USAGE_ALL;
+ key->pvt.p_len = key->pvt.dp_len = key->pvt.u_len = long_len;
+ key->pvt.q_len = key->pvt.dq_len = short_len;
+ key->pvt.mod_len = crt->inputdatalength;
+ key->pvt.pad_len = pad_len;
+
+ /* key parts */
+ if (copy_from_user(key->key_parts, crt->np_prime, long_len) ||
+ copy_from_user(key->key_parts + long_len,
+ crt->nq_prime, short_len) ||
+ copy_from_user(key->key_parts + long_len + short_len,
+ crt->bp_key, long_len) ||
+ copy_from_user(key->key_parts + 2*long_len + short_len,
+ crt->bq_key, short_len) ||
+ copy_from_user(key->key_parts + 2*long_len + 2*short_len,
+ crt->u_mult_inv, long_len))
+ return -EFAULT;
+ memset(key->key_parts + 3*long_len + 2*short_len + pad_len,
+ 0xff, crt->inputdatalength);
+ pub = (struct cca_public_sec *)(key->key_parts + key_len);
+ *pub = static_cca_pub_sec;
+ pub->modulus_bit_len = 8 * crt->inputdatalength;
+ /*
+ * In a private key, the modulus doesn't appear in the public
+ * section. So, an arbitrary public exponent of 0x010001 will be
+ * used.
+ */
+ memcpy((char *) (pub + 1), pk_exponent, 3);
+ return size;
+}
+
+#endif /* _ZCRYPT_CCA_KEY_H_ */
diff --git a/drivers/s390/crypto/zcrypt_ccamisc.c b/drivers/s390/crypto/zcrypt_ccamisc.c
new file mode 100644
index 000000000..ffab935dd
--- /dev/null
+++ b/drivers/s390/crypto/zcrypt_ccamisc.c
@@ -0,0 +1,1970 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright IBM Corp. 2019
+ * Author(s): Harald Freudenberger <freude@linux.ibm.com>
+ * Ingo Franzki <ifranzki@linux.ibm.com>
+ *
+ * Collection of CCA misc functions used by zcrypt and pkey
+ */
+
+#define KMSG_COMPONENT "zcrypt"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/random.h>
+#include <asm/zcrypt.h>
+#include <asm/pkey.h>
+
+#include "ap_bus.h"
+#include "zcrypt_api.h"
+#include "zcrypt_debug.h"
+#include "zcrypt_msgtype6.h"
+#include "zcrypt_ccamisc.h"
+
+#define DEBUG_DBG(...) ZCRYPT_DBF(DBF_DEBUG, ##__VA_ARGS__)
+#define DEBUG_INFO(...) ZCRYPT_DBF(DBF_INFO, ##__VA_ARGS__)
+#define DEBUG_WARN(...) ZCRYPT_DBF(DBF_WARN, ##__VA_ARGS__)
+#define DEBUG_ERR(...) ZCRYPT_DBF(DBF_ERR, ##__VA_ARGS__)
+
+/* Size of parameter block used for all cca requests/replies */
+#define PARMBSIZE 512
+
+/* Size of vardata block used for some of the cca requests/replies */
+#define VARDATASIZE 4096
+
+struct cca_info_list_entry {
+ struct list_head list;
+ u16 cardnr;
+ u16 domain;
+ struct cca_info info;
+};
+
+/* a list with cca_info_list_entry entries */
+static LIST_HEAD(cca_info_list);
+static DEFINE_SPINLOCK(cca_info_list_lock);
+
+/*
+ * Simple check if the token is a valid CCA secure AES data key
+ * token. If keybitsize is given, the bitsize of the key is
+ * also checked. Returns 0 on success or errno value on failure.
+ */
+int cca_check_secaeskeytoken(debug_info_t *dbg, int dbflvl,
+ const u8 *token, int keybitsize)
+{
+ struct secaeskeytoken *t = (struct secaeskeytoken *) token;
+
+#define DBF(...) debug_sprintf_event(dbg, dbflvl, ##__VA_ARGS__)
+
+ if (t->type != TOKTYPE_CCA_INTERNAL) {
+ if (dbg)
+ DBF("%s token check failed, type 0x%02x != 0x%02x\n",
+ __func__, (int) t->type, TOKTYPE_CCA_INTERNAL);
+ return -EINVAL;
+ }
+ if (t->version != TOKVER_CCA_AES) {
+ if (dbg)
+ DBF("%s token check failed, version 0x%02x != 0x%02x\n",
+ __func__, (int) t->version, TOKVER_CCA_AES);
+ return -EINVAL;
+ }
+ if (keybitsize > 0 && t->bitsize != keybitsize) {
+ if (dbg)
+ DBF("%s token check failed, bitsize %d != %d\n",
+ __func__, (int) t->bitsize, keybitsize);
+ return -EINVAL;
+ }
+
+#undef DBF
+
+ return 0;
+}
+EXPORT_SYMBOL(cca_check_secaeskeytoken);
+
+/*
+ * Simple check if the token is a valid CCA secure AES cipher key
+ * token. If keybitsize is given, the bitsize of the key is
+ * also checked. If checkcpacfexport is enabled, the key is also
+ * checked for the export flag to allow CPACF export.
+ * Returns 0 on success or errno value on failure.
+ */
+int cca_check_secaescipherkey(debug_info_t *dbg, int dbflvl,
+ const u8 *token, int keybitsize,
+ int checkcpacfexport)
+{
+ struct cipherkeytoken *t = (struct cipherkeytoken *) token;
+ bool keybitsizeok = true;
+
+#define DBF(...) debug_sprintf_event(dbg, dbflvl, ##__VA_ARGS__)
+
+ if (t->type != TOKTYPE_CCA_INTERNAL) {
+ if (dbg)
+ DBF("%s token check failed, type 0x%02x != 0x%02x\n",
+ __func__, (int) t->type, TOKTYPE_CCA_INTERNAL);
+ return -EINVAL;
+ }
+ if (t->version != TOKVER_CCA_VLSC) {
+ if (dbg)
+ DBF("%s token check failed, version 0x%02x != 0x%02x\n",
+ __func__, (int) t->version, TOKVER_CCA_VLSC);
+ return -EINVAL;
+ }
+ if (t->algtype != 0x02) {
+ if (dbg)
+ DBF("%s token check failed, algtype 0x%02x != 0x02\n",
+ __func__, (int) t->algtype);
+ return -EINVAL;
+ }
+ if (t->keytype != 0x0001) {
+ if (dbg)
+ DBF("%s token check failed, keytype 0x%04x != 0x0001\n",
+ __func__, (int) t->keytype);
+ return -EINVAL;
+ }
+ if (t->plfver != 0x00 && t->plfver != 0x01) {
+ if (dbg)
+ DBF("%s token check failed, unknown plfver 0x%02x\n",
+ __func__, (int) t->plfver);
+ return -EINVAL;
+ }
+ if (t->wpllen != 512 && t->wpllen != 576 && t->wpllen != 640) {
+ if (dbg)
+ DBF("%s token check failed, unknown wpllen %d\n",
+ __func__, (int) t->wpllen);
+ return -EINVAL;
+ }
+ if (keybitsize > 0) {
+ switch (keybitsize) {
+ case 128:
+ if (t->wpllen != (t->plfver ? 640 : 512))
+ keybitsizeok = false;
+ break;
+ case 192:
+ if (t->wpllen != (t->plfver ? 640 : 576))
+ keybitsizeok = false;
+ break;
+ case 256:
+ if (t->wpllen != 640)
+ keybitsizeok = false;
+ break;
+ default:
+ keybitsizeok = false;
+ break;
+ }
+ if (!keybitsizeok) {
+ if (dbg)
+ DBF("%s token check failed, bitsize %d\n",
+ __func__, keybitsize);
+ return -EINVAL;
+ }
+ }
+ if (checkcpacfexport && !(t->kmf1 & KMF1_XPRT_CPAC)) {
+ if (dbg)
+ DBF("%s token check failed, XPRT_CPAC bit is 0\n",
+ __func__);
+ return -EINVAL;
+ }
+
+#undef DBF
+
+ return 0;
+}
+EXPORT_SYMBOL(cca_check_secaescipherkey);
+
+/*
+ * Simple check if the token is a valid CCA secure ECC private
+ * key token. Returns 0 on success or errno value on failure.
+ */
+int cca_check_sececckeytoken(debug_info_t *dbg, int dbflvl,
+ const u8 *token, size_t keysize,
+ int checkcpacfexport)
+{
+ struct eccprivkeytoken *t = (struct eccprivkeytoken *) token;
+
+#define DBF(...) debug_sprintf_event(dbg, dbflvl, ##__VA_ARGS__)
+
+ if (t->type != TOKTYPE_CCA_INTERNAL_PKA) {
+ if (dbg)
+ DBF("%s token check failed, type 0x%02x != 0x%02x\n",
+ __func__, (int) t->type, TOKTYPE_CCA_INTERNAL_PKA);
+ return -EINVAL;
+ }
+ if (t->len > keysize) {
+ if (dbg)
+ DBF("%s token check failed, len %d > keysize %zu\n",
+ __func__, (int) t->len, keysize);
+ return -EINVAL;
+ }
+ if (t->secid != 0x20) {
+ if (dbg)
+ DBF("%s token check failed, secid 0x%02x != 0x20\n",
+ __func__, (int) t->secid);
+ return -EINVAL;
+ }
+ if (checkcpacfexport && !(t->kutc & 0x01)) {
+ if (dbg)
+ DBF("%s token check failed, XPRTCPAC bit is 0\n",
+ __func__);
+ return -EINVAL;
+ }
+
+#undef DBF
+
+ return 0;
+}
+EXPORT_SYMBOL(cca_check_sececckeytoken);
+
+/*
+ * Allocate consecutive memory for request CPRB, request param
+ * block, reply CPRB and reply param block and fill in values
+ * for the common fields. Returns 0 on success or errno value
+ * on failure.
+ */
+static int alloc_and_prep_cprbmem(size_t paramblen,
+ u8 **pcprbmem,
+ struct CPRBX **preqCPRB,
+ struct CPRBX **prepCPRB)
+{
+ u8 *cprbmem;
+ size_t cprbplusparamblen = sizeof(struct CPRBX) + paramblen;
+ struct CPRBX *preqcblk, *prepcblk;
+
+ /*
+ * allocate consecutive memory for request CPRB, request param
+ * block, reply CPRB and reply param block
+ */
+ cprbmem = kcalloc(2, cprbplusparamblen, GFP_KERNEL);
+ if (!cprbmem)
+ return -ENOMEM;
+
+ preqcblk = (struct CPRBX *) cprbmem;
+ prepcblk = (struct CPRBX *) (cprbmem + cprbplusparamblen);
+
+ /* fill request cprb struct */
+ preqcblk->cprb_len = sizeof(struct CPRBX);
+ preqcblk->cprb_ver_id = 0x02;
+ memcpy(preqcblk->func_id, "T2", 2);
+ preqcblk->rpl_msgbl = cprbplusparamblen;
+ if (paramblen) {
+ preqcblk->req_parmb =
+ ((u8 __user *) preqcblk) + sizeof(struct CPRBX);
+ preqcblk->rpl_parmb =
+ ((u8 __user *) prepcblk) + sizeof(struct CPRBX);
+ }
+
+ *pcprbmem = cprbmem;
+ *preqCPRB = preqcblk;
+ *prepCPRB = prepcblk;
+
+ return 0;
+}
+
+/*
+ * Free the cprb memory allocated with the function above.
+ * If the scrub value is not zero, the memory is filled
+ * with zeros before freeing (useful if there was some
+ * clear key material in there).
+ */
+static void free_cprbmem(void *mem, size_t paramblen, int scrub)
+{
+ if (scrub)
+ memzero_explicit(mem, 2 * (sizeof(struct CPRBX) + paramblen));
+ kfree(mem);
+}
+
+/*
+ * Helper function to prepare the xcrb struct
+ */
+static inline void prep_xcrb(struct ica_xcRB *pxcrb,
+ u16 cardnr,
+ struct CPRBX *preqcblk,
+ struct CPRBX *prepcblk)
+{
+ memset(pxcrb, 0, sizeof(*pxcrb));
+ pxcrb->agent_ID = 0x4341; /* 'CA' */
+ pxcrb->user_defined = (cardnr == 0xFFFF ? AUTOSELECT : cardnr);
+ pxcrb->request_control_blk_length =
+ preqcblk->cprb_len + preqcblk->req_parml;
+ pxcrb->request_control_blk_addr = (void __user *) preqcblk;
+ pxcrb->reply_control_blk_length = preqcblk->rpl_msgbl;
+ pxcrb->reply_control_blk_addr = (void __user *) prepcblk;
+}
+
+/*
+ * Generate (random) CCA AES DATA secure key.
+ */
+int cca_genseckey(u16 cardnr, u16 domain,
+ u32 keybitsize, u8 seckey[SECKEYBLOBSIZE])
+{
+ int i, rc, keysize;
+ int seckeysize;
+ u8 *mem, *ptr;
+ struct CPRBX *preqcblk, *prepcblk;
+ struct ica_xcRB xcrb;
+ struct kgreqparm {
+ u8 subfunc_code[2];
+ u16 rule_array_len;
+ struct lv1 {
+ u16 len;
+ char key_form[8];
+ char key_length[8];
+ char key_type1[8];
+ char key_type2[8];
+ } lv1;
+ struct lv2 {
+ u16 len;
+ struct keyid {
+ u16 len;
+ u16 attr;
+ u8 data[SECKEYBLOBSIZE];
+ } keyid[6];
+ } lv2;
+ } __packed * preqparm;
+ struct kgrepparm {
+ u8 subfunc_code[2];
+ u16 rule_array_len;
+ struct lv3 {
+ u16 len;
+ u16 keyblocklen;
+ struct {
+ u16 toklen;
+ u16 tokattr;
+ u8 tok[0];
+ /* ... some more data ... */
+ } keyblock;
+ } lv3;
+ } __packed * prepparm;
+
+ /* get already prepared memory for 2 cprbs with param block each */
+ rc = alloc_and_prep_cprbmem(PARMBSIZE, &mem, &preqcblk, &prepcblk);
+ if (rc)
+ return rc;
+
+ /* fill request cprb struct */
+ preqcblk->domain = domain;
+
+ /* fill request cprb param block with KG request */
+ preqparm = (struct kgreqparm __force *) preqcblk->req_parmb;
+ memcpy(preqparm->subfunc_code, "KG", 2);
+ preqparm->rule_array_len = sizeof(preqparm->rule_array_len);
+ preqparm->lv1.len = sizeof(struct lv1);
+ memcpy(preqparm->lv1.key_form, "OP ", 8);
+ switch (keybitsize) {
+ case PKEY_SIZE_AES_128:
+ case PKEY_KEYTYPE_AES_128: /* older ioctls used this */
+ keysize = 16;
+ memcpy(preqparm->lv1.key_length, "KEYLN16 ", 8);
+ break;
+ case PKEY_SIZE_AES_192:
+ case PKEY_KEYTYPE_AES_192: /* older ioctls used this */
+ keysize = 24;
+ memcpy(preqparm->lv1.key_length, "KEYLN24 ", 8);
+ break;
+ case PKEY_SIZE_AES_256:
+ case PKEY_KEYTYPE_AES_256: /* older ioctls used this */
+ keysize = 32;
+ memcpy(preqparm->lv1.key_length, "KEYLN32 ", 8);
+ break;
+ default:
+ DEBUG_ERR("%s unknown/unsupported keybitsize %d\n",
+ __func__, keybitsize);
+ rc = -EINVAL;
+ goto out;
+ }
+ memcpy(preqparm->lv1.key_type1, "AESDATA ", 8);
+ preqparm->lv2.len = sizeof(struct lv2);
+ for (i = 0; i < 6; i++) {
+ preqparm->lv2.keyid[i].len = sizeof(struct keyid);
+ preqparm->lv2.keyid[i].attr = (i == 2 ? 0x30 : 0x10);
+ }
+ preqcblk->req_parml = sizeof(struct kgreqparm);
+
+ /* fill xcrb struct */
+ prep_xcrb(&xcrb, cardnr, preqcblk, prepcblk);
+
+ /* forward xcrb with request CPRB and reply CPRB to zcrypt dd */
+ rc = zcrypt_send_cprb(&xcrb);
+ if (rc) {
+ DEBUG_ERR("%s zcrypt_send_cprb (cardnr=%d domain=%d) failed, errno %d\n",
+ __func__, (int) cardnr, (int) domain, rc);
+ goto out;
+ }
+
+ /* check response returncode and reasoncode */
+ if (prepcblk->ccp_rtcode != 0) {
+ DEBUG_ERR("%s secure key generate failure, card response %d/%d\n",
+ __func__,
+ (int) prepcblk->ccp_rtcode,
+ (int) prepcblk->ccp_rscode);
+ rc = -EIO;
+ goto out;
+ }
+
+ /* process response cprb param block */
+ ptr = ((u8 *) prepcblk) + sizeof(struct CPRBX);
+ prepcblk->rpl_parmb = (u8 __user *) ptr;
+ prepparm = (struct kgrepparm *) ptr;
+
+ /* check length of the returned secure key token */
+ seckeysize = prepparm->lv3.keyblock.toklen
+ - sizeof(prepparm->lv3.keyblock.toklen)
+ - sizeof(prepparm->lv3.keyblock.tokattr);
+ if (seckeysize != SECKEYBLOBSIZE) {
+ DEBUG_ERR("%s secure token size mismatch %d != %d bytes\n",
+ __func__, seckeysize, SECKEYBLOBSIZE);
+ rc = -EIO;
+ goto out;
+ }
+
+ /* check secure key token */
+ rc = cca_check_secaeskeytoken(zcrypt_dbf_info, DBF_ERR,
+ prepparm->lv3.keyblock.tok, 8*keysize);
+ if (rc) {
+ rc = -EIO;
+ goto out;
+ }
+
+ /* copy the generated secure key token */
+ memcpy(seckey, prepparm->lv3.keyblock.tok, SECKEYBLOBSIZE);
+
+out:
+ free_cprbmem(mem, PARMBSIZE, 0);
+ return rc;
+}
+EXPORT_SYMBOL(cca_genseckey);
+
+/*
+ * Generate an CCA AES DATA secure key with given key value.
+ */
+int cca_clr2seckey(u16 cardnr, u16 domain, u32 keybitsize,
+ const u8 *clrkey, u8 seckey[SECKEYBLOBSIZE])
+{
+ int rc, keysize, seckeysize;
+ u8 *mem, *ptr;
+ struct CPRBX *preqcblk, *prepcblk;
+ struct ica_xcRB xcrb;
+ struct cmreqparm {
+ u8 subfunc_code[2];
+ u16 rule_array_len;
+ char rule_array[8];
+ struct lv1 {
+ u16 len;
+ u8 clrkey[0];
+ } lv1;
+ struct lv2 {
+ u16 len;
+ struct keyid {
+ u16 len;
+ u16 attr;
+ u8 data[SECKEYBLOBSIZE];
+ } keyid;
+ } lv2;
+ } __packed * preqparm;
+ struct lv2 *plv2;
+ struct cmrepparm {
+ u8 subfunc_code[2];
+ u16 rule_array_len;
+ struct lv3 {
+ u16 len;
+ u16 keyblocklen;
+ struct {
+ u16 toklen;
+ u16 tokattr;
+ u8 tok[0];
+ /* ... some more data ... */
+ } keyblock;
+ } lv3;
+ } __packed * prepparm;
+
+ /* get already prepared memory for 2 cprbs with param block each */
+ rc = alloc_and_prep_cprbmem(PARMBSIZE, &mem, &preqcblk, &prepcblk);
+ if (rc)
+ return rc;
+
+ /* fill request cprb struct */
+ preqcblk->domain = domain;
+
+ /* fill request cprb param block with CM request */
+ preqparm = (struct cmreqparm __force *) preqcblk->req_parmb;
+ memcpy(preqparm->subfunc_code, "CM", 2);
+ memcpy(preqparm->rule_array, "AES ", 8);
+ preqparm->rule_array_len =
+ sizeof(preqparm->rule_array_len) + sizeof(preqparm->rule_array);
+ switch (keybitsize) {
+ case PKEY_SIZE_AES_128:
+ case PKEY_KEYTYPE_AES_128: /* older ioctls used this */
+ keysize = 16;
+ break;
+ case PKEY_SIZE_AES_192:
+ case PKEY_KEYTYPE_AES_192: /* older ioctls used this */
+ keysize = 24;
+ break;
+ case PKEY_SIZE_AES_256:
+ case PKEY_KEYTYPE_AES_256: /* older ioctls used this */
+ keysize = 32;
+ break;
+ default:
+ DEBUG_ERR("%s unknown/unsupported keybitsize %d\n",
+ __func__, keybitsize);
+ rc = -EINVAL;
+ goto out;
+ }
+ preqparm->lv1.len = sizeof(struct lv1) + keysize;
+ memcpy(preqparm->lv1.clrkey, clrkey, keysize);
+ plv2 = (struct lv2 *) (((u8 *) &preqparm->lv2) + keysize);
+ plv2->len = sizeof(struct lv2);
+ plv2->keyid.len = sizeof(struct keyid);
+ plv2->keyid.attr = 0x30;
+ preqcblk->req_parml = sizeof(struct cmreqparm) + keysize;
+
+ /* fill xcrb struct */
+ prep_xcrb(&xcrb, cardnr, preqcblk, prepcblk);
+
+ /* forward xcrb with request CPRB and reply CPRB to zcrypt dd */
+ rc = zcrypt_send_cprb(&xcrb);
+ if (rc) {
+ DEBUG_ERR("%s zcrypt_send_cprb (cardnr=%d domain=%d) failed, rc=%d\n",
+ __func__, (int) cardnr, (int) domain, rc);
+ goto out;
+ }
+
+ /* check response returncode and reasoncode */
+ if (prepcblk->ccp_rtcode != 0) {
+ DEBUG_ERR("%s clear key import failure, card response %d/%d\n",
+ __func__,
+ (int) prepcblk->ccp_rtcode,
+ (int) prepcblk->ccp_rscode);
+ rc = -EIO;
+ goto out;
+ }
+
+ /* process response cprb param block */
+ ptr = ((u8 *) prepcblk) + sizeof(struct CPRBX);
+ prepcblk->rpl_parmb = (u8 __user *) ptr;
+ prepparm = (struct cmrepparm *) ptr;
+
+ /* check length of the returned secure key token */
+ seckeysize = prepparm->lv3.keyblock.toklen
+ - sizeof(prepparm->lv3.keyblock.toklen)
+ - sizeof(prepparm->lv3.keyblock.tokattr);
+ if (seckeysize != SECKEYBLOBSIZE) {
+ DEBUG_ERR("%s secure token size mismatch %d != %d bytes\n",
+ __func__, seckeysize, SECKEYBLOBSIZE);
+ rc = -EIO;
+ goto out;
+ }
+
+ /* check secure key token */
+ rc = cca_check_secaeskeytoken(zcrypt_dbf_info, DBF_ERR,
+ prepparm->lv3.keyblock.tok, 8*keysize);
+ if (rc) {
+ rc = -EIO;
+ goto out;
+ }
+
+ /* copy the generated secure key token */
+ if (seckey)
+ memcpy(seckey, prepparm->lv3.keyblock.tok, SECKEYBLOBSIZE);
+
+out:
+ free_cprbmem(mem, PARMBSIZE, 1);
+ return rc;
+}
+EXPORT_SYMBOL(cca_clr2seckey);
+
+/*
+ * Derive proteced key from an CCA AES DATA secure key.
+ */
+int cca_sec2protkey(u16 cardnr, u16 domain,
+ const u8 seckey[SECKEYBLOBSIZE],
+ u8 *protkey, u32 *protkeylen, u32 *protkeytype)
+{
+ int rc;
+ u8 *mem, *ptr;
+ struct CPRBX *preqcblk, *prepcblk;
+ struct ica_xcRB xcrb;
+ struct uskreqparm {
+ u8 subfunc_code[2];
+ u16 rule_array_len;
+ struct lv1 {
+ u16 len;
+ u16 attr_len;
+ u16 attr_flags;
+ } lv1;
+ struct lv2 {
+ u16 len;
+ u16 attr_len;
+ u16 attr_flags;
+ u8 token[0]; /* cca secure key token */
+ } lv2;
+ } __packed * preqparm;
+ struct uskrepparm {
+ u8 subfunc_code[2];
+ u16 rule_array_len;
+ struct lv3 {
+ u16 len;
+ u16 attr_len;
+ u16 attr_flags;
+ struct cpacfkeyblock {
+ u8 version; /* version of this struct */
+ u8 flags[2];
+ u8 algo;
+ u8 form;
+ u8 pad1[3];
+ u16 len;
+ u8 key[64]; /* the key (len bytes) */
+ u16 keyattrlen;
+ u8 keyattr[32];
+ u8 pad2[1];
+ u8 vptype;
+ u8 vp[32]; /* verification pattern */
+ } ckb;
+ } lv3;
+ } __packed * prepparm;
+
+ /* get already prepared memory for 2 cprbs with param block each */
+ rc = alloc_and_prep_cprbmem(PARMBSIZE, &mem, &preqcblk, &prepcblk);
+ if (rc)
+ return rc;
+
+ /* fill request cprb struct */
+ preqcblk->domain = domain;
+
+ /* fill request cprb param block with USK request */
+ preqparm = (struct uskreqparm __force *) preqcblk->req_parmb;
+ memcpy(preqparm->subfunc_code, "US", 2);
+ preqparm->rule_array_len = sizeof(preqparm->rule_array_len);
+ preqparm->lv1.len = sizeof(struct lv1);
+ preqparm->lv1.attr_len = sizeof(struct lv1) - sizeof(preqparm->lv1.len);
+ preqparm->lv1.attr_flags = 0x0001;
+ preqparm->lv2.len = sizeof(struct lv2) + SECKEYBLOBSIZE;
+ preqparm->lv2.attr_len = sizeof(struct lv2)
+ - sizeof(preqparm->lv2.len) + SECKEYBLOBSIZE;
+ preqparm->lv2.attr_flags = 0x0000;
+ memcpy(preqparm->lv2.token, seckey, SECKEYBLOBSIZE);
+ preqcblk->req_parml = sizeof(struct uskreqparm) + SECKEYBLOBSIZE;
+
+ /* fill xcrb struct */
+ prep_xcrb(&xcrb, cardnr, preqcblk, prepcblk);
+
+ /* forward xcrb with request CPRB and reply CPRB to zcrypt dd */
+ rc = zcrypt_send_cprb(&xcrb);
+ if (rc) {
+ DEBUG_ERR("%s zcrypt_send_cprb (cardnr=%d domain=%d) failed, rc=%d\n",
+ __func__, (int) cardnr, (int) domain, rc);
+ goto out;
+ }
+
+ /* check response returncode and reasoncode */
+ if (prepcblk->ccp_rtcode != 0) {
+ DEBUG_ERR("%s unwrap secure key failure, card response %d/%d\n",
+ __func__,
+ (int) prepcblk->ccp_rtcode,
+ (int) prepcblk->ccp_rscode);
+ rc = -EIO;
+ goto out;
+ }
+ if (prepcblk->ccp_rscode != 0) {
+ DEBUG_WARN("%s unwrap secure key warning, card response %d/%d\n",
+ __func__,
+ (int) prepcblk->ccp_rtcode,
+ (int) prepcblk->ccp_rscode);
+ }
+
+ /* process response cprb param block */
+ ptr = ((u8 *) prepcblk) + sizeof(struct CPRBX);
+ prepcblk->rpl_parmb = (u8 __user *) ptr;
+ prepparm = (struct uskrepparm *) ptr;
+
+ /* check the returned keyblock */
+ if (prepparm->lv3.ckb.version != 0x01 &&
+ prepparm->lv3.ckb.version != 0x02) {
+ DEBUG_ERR("%s reply param keyblock version mismatch 0x%02x\n",
+ __func__, (int) prepparm->lv3.ckb.version);
+ rc = -EIO;
+ goto out;
+ }
+
+ /* copy the tanslated protected key */
+ switch (prepparm->lv3.ckb.len) {
+ case 16+32:
+ /* AES 128 protected key */
+ if (protkeytype)
+ *protkeytype = PKEY_KEYTYPE_AES_128;
+ break;
+ case 24+32:
+ /* AES 192 protected key */
+ if (protkeytype)
+ *protkeytype = PKEY_KEYTYPE_AES_192;
+ break;
+ case 32+32:
+ /* AES 256 protected key */
+ if (protkeytype)
+ *protkeytype = PKEY_KEYTYPE_AES_256;
+ break;
+ default:
+ DEBUG_ERR("%s unknown/unsupported keylen %d\n",
+ __func__, prepparm->lv3.ckb.len);
+ rc = -EIO;
+ goto out;
+ }
+ memcpy(protkey, prepparm->lv3.ckb.key, prepparm->lv3.ckb.len);
+ if (protkeylen)
+ *protkeylen = prepparm->lv3.ckb.len;
+
+out:
+ free_cprbmem(mem, PARMBSIZE, 0);
+ return rc;
+}
+EXPORT_SYMBOL(cca_sec2protkey);
+
+/*
+ * AES cipher key skeleton created with CSNBKTB2 with these flags:
+ * INTERNAL, NO-KEY, AES, CIPHER, ANY-MODE, NOEX-SYM, NOEXAASY,
+ * NOEXUASY, XPRTCPAC, NOEX-RAW, NOEX-DES, NOEX-AES, NOEX-RSA
+ * used by cca_gencipherkey() and cca_clr2cipherkey().
+ */
+static const u8 aes_cipher_key_skeleton[] = {
+ 0x01, 0x00, 0x00, 0x38, 0x05, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00,
+ 0x00, 0x1a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x02, 0x00, 0x01, 0x02, 0xc0, 0x00, 0xff,
+ 0x00, 0x03, 0x08, 0xc8, 0x00, 0x00, 0x00, 0x00 };
+#define SIZEOF_SKELETON (sizeof(aes_cipher_key_skeleton))
+
+/*
+ * Generate (random) CCA AES CIPHER secure key.
+ */
+int cca_gencipherkey(u16 cardnr, u16 domain, u32 keybitsize, u32 keygenflags,
+ u8 *keybuf, size_t *keybufsize)
+{
+ int rc;
+ u8 *mem, *ptr;
+ struct CPRBX *preqcblk, *prepcblk;
+ struct ica_xcRB xcrb;
+ struct gkreqparm {
+ u8 subfunc_code[2];
+ u16 rule_array_len;
+ char rule_array[2*8];
+ struct {
+ u16 len;
+ u8 key_type_1[8];
+ u8 key_type_2[8];
+ u16 clear_key_bit_len;
+ u16 key_name_1_len;
+ u16 key_name_2_len;
+ u16 user_data_1_len;
+ u16 user_data_2_len;
+ u8 key_name_1[0];
+ u8 key_name_2[0];
+ u8 user_data_1[0];
+ u8 user_data_2[0];
+ } vud;
+ struct {
+ u16 len;
+ struct {
+ u16 len;
+ u16 flag;
+ u8 kek_id_1[0];
+ } tlv1;
+ struct {
+ u16 len;
+ u16 flag;
+ u8 kek_id_2[0];
+ } tlv2;
+ struct {
+ u16 len;
+ u16 flag;
+ u8 gen_key_id_1[SIZEOF_SKELETON];
+ } tlv3;
+ struct {
+ u16 len;
+ u16 flag;
+ u8 gen_key_id_1_label[0];
+ } tlv4;
+ struct {
+ u16 len;
+ u16 flag;
+ u8 gen_key_id_2[0];
+ } tlv5;
+ struct {
+ u16 len;
+ u16 flag;
+ u8 gen_key_id_2_label[0];
+ } tlv6;
+ } kb;
+ } __packed * preqparm;
+ struct gkrepparm {
+ u8 subfunc_code[2];
+ u16 rule_array_len;
+ struct {
+ u16 len;
+ } vud;
+ struct {
+ u16 len;
+ struct {
+ u16 len;
+ u16 flag;
+ u8 gen_key[0]; /* 120-136 bytes */
+ } tlv1;
+ } kb;
+ } __packed * prepparm;
+ struct cipherkeytoken *t;
+
+ /* get already prepared memory for 2 cprbs with param block each */
+ rc = alloc_and_prep_cprbmem(PARMBSIZE, &mem, &preqcblk, &prepcblk);
+ if (rc)
+ return rc;
+
+ /* fill request cprb struct */
+ preqcblk->domain = domain;
+ preqcblk->req_parml = sizeof(struct gkreqparm);
+
+ /* prepare request param block with GK request */
+ preqparm = (struct gkreqparm __force *) preqcblk->req_parmb;
+ memcpy(preqparm->subfunc_code, "GK", 2);
+ preqparm->rule_array_len = sizeof(uint16_t) + 2 * 8;
+ memcpy(preqparm->rule_array, "AES OP ", 2*8);
+
+ /* prepare vud block */
+ preqparm->vud.len = sizeof(preqparm->vud);
+ switch (keybitsize) {
+ case 128:
+ case 192:
+ case 256:
+ break;
+ default:
+ DEBUG_ERR(
+ "%s unknown/unsupported keybitsize %d\n",
+ __func__, keybitsize);
+ rc = -EINVAL;
+ goto out;
+ }
+ preqparm->vud.clear_key_bit_len = keybitsize;
+ memcpy(preqparm->vud.key_type_1, "TOKEN ", 8);
+ memset(preqparm->vud.key_type_2, ' ', sizeof(preqparm->vud.key_type_2));
+
+ /* prepare kb block */
+ preqparm->kb.len = sizeof(preqparm->kb);
+ preqparm->kb.tlv1.len = sizeof(preqparm->kb.tlv1);
+ preqparm->kb.tlv1.flag = 0x0030;
+ preqparm->kb.tlv2.len = sizeof(preqparm->kb.tlv2);
+ preqparm->kb.tlv2.flag = 0x0030;
+ preqparm->kb.tlv3.len = sizeof(preqparm->kb.tlv3);
+ preqparm->kb.tlv3.flag = 0x0030;
+ memcpy(preqparm->kb.tlv3.gen_key_id_1,
+ aes_cipher_key_skeleton, SIZEOF_SKELETON);
+ preqparm->kb.tlv4.len = sizeof(preqparm->kb.tlv4);
+ preqparm->kb.tlv4.flag = 0x0030;
+ preqparm->kb.tlv5.len = sizeof(preqparm->kb.tlv5);
+ preqparm->kb.tlv5.flag = 0x0030;
+ preqparm->kb.tlv6.len = sizeof(preqparm->kb.tlv6);
+ preqparm->kb.tlv6.flag = 0x0030;
+
+ /* patch the skeleton key token export flags inside the kb block */
+ if (keygenflags) {
+ t = (struct cipherkeytoken *) preqparm->kb.tlv3.gen_key_id_1;
+ t->kmf1 |= (u16) (keygenflags & 0x0000FF00);
+ t->kmf1 &= (u16) ~(keygenflags & 0x000000FF);
+ }
+
+ /* prepare xcrb struct */
+ prep_xcrb(&xcrb, cardnr, preqcblk, prepcblk);
+
+ /* forward xcrb with request CPRB and reply CPRB to zcrypt dd */
+ rc = zcrypt_send_cprb(&xcrb);
+ if (rc) {
+ DEBUG_ERR(
+ "%s zcrypt_send_cprb (cardnr=%d domain=%d) failed, rc=%d\n",
+ __func__, (int) cardnr, (int) domain, rc);
+ goto out;
+ }
+
+ /* check response returncode and reasoncode */
+ if (prepcblk->ccp_rtcode != 0) {
+ DEBUG_ERR(
+ "%s cipher key generate failure, card response %d/%d\n",
+ __func__,
+ (int) prepcblk->ccp_rtcode,
+ (int) prepcblk->ccp_rscode);
+ rc = -EIO;
+ goto out;
+ }
+
+ /* process response cprb param block */
+ ptr = ((u8 *) prepcblk) + sizeof(struct CPRBX);
+ prepcblk->rpl_parmb = (u8 __user *) ptr;
+ prepparm = (struct gkrepparm *) ptr;
+
+ /* do some plausibility checks on the key block */
+ if (prepparm->kb.len < 120 + 5 * sizeof(uint16_t) ||
+ prepparm->kb.len > 136 + 5 * sizeof(uint16_t)) {
+ DEBUG_ERR("%s reply with invalid or unknown key block\n",
+ __func__);
+ rc = -EIO;
+ goto out;
+ }
+
+ /* and some checks on the generated key */
+ rc = cca_check_secaescipherkey(zcrypt_dbf_info, DBF_ERR,
+ prepparm->kb.tlv1.gen_key,
+ keybitsize, 1);
+ if (rc) {
+ rc = -EIO;
+ goto out;
+ }
+
+ /* copy the generated vlsc key token */
+ t = (struct cipherkeytoken *) prepparm->kb.tlv1.gen_key;
+ if (keybuf) {
+ if (*keybufsize >= t->len)
+ memcpy(keybuf, t, t->len);
+ else
+ rc = -EINVAL;
+ }
+ *keybufsize = t->len;
+
+out:
+ free_cprbmem(mem, PARMBSIZE, 0);
+ return rc;
+}
+EXPORT_SYMBOL(cca_gencipherkey);
+
+/*
+ * Helper function, does a the CSNBKPI2 CPRB.
+ */
+static int _ip_cprb_helper(u16 cardnr, u16 domain,
+ const char *rule_array_1,
+ const char *rule_array_2,
+ const char *rule_array_3,
+ const u8 *clr_key_value,
+ int clr_key_bit_size,
+ u8 *key_token,
+ int *key_token_size)
+{
+ int rc, n;
+ u8 *mem, *ptr;
+ struct CPRBX *preqcblk, *prepcblk;
+ struct ica_xcRB xcrb;
+ struct rule_array_block {
+ u8 subfunc_code[2];
+ u16 rule_array_len;
+ char rule_array[0];
+ } __packed * preq_ra_block;
+ struct vud_block {
+ u16 len;
+ struct {
+ u16 len;
+ u16 flag; /* 0x0064 */
+ u16 clr_key_bit_len;
+ } tlv1;
+ struct {
+ u16 len;
+ u16 flag; /* 0x0063 */
+ u8 clr_key[0]; /* clear key value bytes */
+ } tlv2;
+ } __packed * preq_vud_block;
+ struct key_block {
+ u16 len;
+ struct {
+ u16 len;
+ u16 flag; /* 0x0030 */
+ u8 key_token[0]; /* key skeleton */
+ } tlv1;
+ } __packed * preq_key_block;
+ struct iprepparm {
+ u8 subfunc_code[2];
+ u16 rule_array_len;
+ struct {
+ u16 len;
+ } vud;
+ struct {
+ u16 len;
+ struct {
+ u16 len;
+ u16 flag; /* 0x0030 */
+ u8 key_token[0]; /* key token */
+ } tlv1;
+ } kb;
+ } __packed * prepparm;
+ struct cipherkeytoken *t;
+ int complete = strncmp(rule_array_2, "COMPLETE", 8) ? 0 : 1;
+
+ /* get already prepared memory for 2 cprbs with param block each */
+ rc = alloc_and_prep_cprbmem(PARMBSIZE, &mem, &preqcblk, &prepcblk);
+ if (rc)
+ return rc;
+
+ /* fill request cprb struct */
+ preqcblk->domain = domain;
+ preqcblk->req_parml = 0;
+
+ /* prepare request param block with IP request */
+ preq_ra_block = (struct rule_array_block __force *) preqcblk->req_parmb;
+ memcpy(preq_ra_block->subfunc_code, "IP", 2);
+ preq_ra_block->rule_array_len = sizeof(uint16_t) + 2 * 8;
+ memcpy(preq_ra_block->rule_array, rule_array_1, 8);
+ memcpy(preq_ra_block->rule_array + 8, rule_array_2, 8);
+ preqcblk->req_parml = sizeof(struct rule_array_block) + 2 * 8;
+ if (rule_array_3) {
+ preq_ra_block->rule_array_len += 8;
+ memcpy(preq_ra_block->rule_array + 16, rule_array_3, 8);
+ preqcblk->req_parml += 8;
+ }
+
+ /* prepare vud block */
+ preq_vud_block = (struct vud_block __force *)
+ (preqcblk->req_parmb + preqcblk->req_parml);
+ n = complete ? 0 : (clr_key_bit_size + 7) / 8;
+ preq_vud_block->len = sizeof(struct vud_block) + n;
+ preq_vud_block->tlv1.len = sizeof(preq_vud_block->tlv1);
+ preq_vud_block->tlv1.flag = 0x0064;
+ preq_vud_block->tlv1.clr_key_bit_len = complete ? 0 : clr_key_bit_size;
+ preq_vud_block->tlv2.len = sizeof(preq_vud_block->tlv2) + n;
+ preq_vud_block->tlv2.flag = 0x0063;
+ if (!complete)
+ memcpy(preq_vud_block->tlv2.clr_key, clr_key_value, n);
+ preqcblk->req_parml += preq_vud_block->len;
+
+ /* prepare key block */
+ preq_key_block = (struct key_block __force *)
+ (preqcblk->req_parmb + preqcblk->req_parml);
+ n = *key_token_size;
+ preq_key_block->len = sizeof(struct key_block) + n;
+ preq_key_block->tlv1.len = sizeof(preq_key_block->tlv1) + n;
+ preq_key_block->tlv1.flag = 0x0030;
+ memcpy(preq_key_block->tlv1.key_token, key_token, *key_token_size);
+ preqcblk->req_parml += preq_key_block->len;
+
+ /* prepare xcrb struct */
+ prep_xcrb(&xcrb, cardnr, preqcblk, prepcblk);
+
+ /* forward xcrb with request CPRB and reply CPRB to zcrypt dd */
+ rc = zcrypt_send_cprb(&xcrb);
+ if (rc) {
+ DEBUG_ERR(
+ "%s zcrypt_send_cprb (cardnr=%d domain=%d) failed, rc=%d\n",
+ __func__, (int) cardnr, (int) domain, rc);
+ goto out;
+ }
+
+ /* check response returncode and reasoncode */
+ if (prepcblk->ccp_rtcode != 0) {
+ DEBUG_ERR(
+ "%s CSNBKPI2 failure, card response %d/%d\n",
+ __func__,
+ (int) prepcblk->ccp_rtcode,
+ (int) prepcblk->ccp_rscode);
+ rc = -EIO;
+ goto out;
+ }
+
+ /* process response cprb param block */
+ ptr = ((u8 *) prepcblk) + sizeof(struct CPRBX);
+ prepcblk->rpl_parmb = (u8 __user *) ptr;
+ prepparm = (struct iprepparm *) ptr;
+
+ /* do some plausibility checks on the key block */
+ if (prepparm->kb.len < 120 + 3 * sizeof(uint16_t) ||
+ prepparm->kb.len > 136 + 3 * sizeof(uint16_t)) {
+ DEBUG_ERR("%s reply with invalid or unknown key block\n",
+ __func__);
+ rc = -EIO;
+ goto out;
+ }
+
+ /* do not check the key here, it may be incomplete */
+
+ /* copy the vlsc key token back */
+ t = (struct cipherkeytoken *) prepparm->kb.tlv1.key_token;
+ memcpy(key_token, t, t->len);
+ *key_token_size = t->len;
+
+out:
+ free_cprbmem(mem, PARMBSIZE, 0);
+ return rc;
+}
+
+/*
+ * Build CCA AES CIPHER secure key with a given clear key value.
+ */
+int cca_clr2cipherkey(u16 card, u16 dom, u32 keybitsize, u32 keygenflags,
+ const u8 *clrkey, u8 *keybuf, size_t *keybufsize)
+{
+ int rc;
+ u8 *token;
+ int tokensize;
+ u8 exorbuf[32];
+ struct cipherkeytoken *t;
+
+ /* fill exorbuf with random data */
+ get_random_bytes(exorbuf, sizeof(exorbuf));
+
+ /* allocate space for the key token to build */
+ token = kmalloc(MAXCCAVLSCTOKENSIZE, GFP_KERNEL);
+ if (!token)
+ return -ENOMEM;
+
+ /* prepare the token with the key skeleton */
+ tokensize = SIZEOF_SKELETON;
+ memcpy(token, aes_cipher_key_skeleton, tokensize);
+
+ /* patch the skeleton key token export flags */
+ if (keygenflags) {
+ t = (struct cipherkeytoken *) token;
+ t->kmf1 |= (u16) (keygenflags & 0x0000FF00);
+ t->kmf1 &= (u16) ~(keygenflags & 0x000000FF);
+ }
+
+ /*
+ * Do the key import with the clear key value in 4 steps:
+ * 1/4 FIRST import with only random data
+ * 2/4 EXOR the clear key
+ * 3/4 EXOR the very same random data again
+ * 4/4 COMPLETE the secure cipher key import
+ */
+ rc = _ip_cprb_helper(card, dom, "AES ", "FIRST ", "MIN3PART",
+ exorbuf, keybitsize, token, &tokensize);
+ if (rc) {
+ DEBUG_ERR(
+ "%s clear key import 1/4 with CSNBKPI2 failed, rc=%d\n",
+ __func__, rc);
+ goto out;
+ }
+ rc = _ip_cprb_helper(card, dom, "AES ", "ADD-PART", NULL,
+ clrkey, keybitsize, token, &tokensize);
+ if (rc) {
+ DEBUG_ERR(
+ "%s clear key import 2/4 with CSNBKPI2 failed, rc=%d\n",
+ __func__, rc);
+ goto out;
+ }
+ rc = _ip_cprb_helper(card, dom, "AES ", "ADD-PART", NULL,
+ exorbuf, keybitsize, token, &tokensize);
+ if (rc) {
+ DEBUG_ERR(
+ "%s clear key import 3/4 with CSNBKPI2 failed, rc=%d\n",
+ __func__, rc);
+ goto out;
+ }
+ rc = _ip_cprb_helper(card, dom, "AES ", "COMPLETE", NULL,
+ NULL, keybitsize, token, &tokensize);
+ if (rc) {
+ DEBUG_ERR(
+ "%s clear key import 4/4 with CSNBKPI2 failed, rc=%d\n",
+ __func__, rc);
+ goto out;
+ }
+
+ /* copy the generated key token */
+ if (keybuf) {
+ if (tokensize > *keybufsize)
+ rc = -EINVAL;
+ else
+ memcpy(keybuf, token, tokensize);
+ }
+ *keybufsize = tokensize;
+
+out:
+ kfree(token);
+ return rc;
+}
+EXPORT_SYMBOL(cca_clr2cipherkey);
+
+/*
+ * Derive proteced key from CCA AES cipher secure key.
+ */
+int cca_cipher2protkey(u16 cardnr, u16 domain, const u8 *ckey,
+ u8 *protkey, u32 *protkeylen, u32 *protkeytype)
+{
+ int rc;
+ u8 *mem, *ptr;
+ struct CPRBX *preqcblk, *prepcblk;
+ struct ica_xcRB xcrb;
+ struct aureqparm {
+ u8 subfunc_code[2];
+ u16 rule_array_len;
+ u8 rule_array[8];
+ struct {
+ u16 len;
+ u16 tk_blob_len;
+ u16 tk_blob_tag;
+ u8 tk_blob[66];
+ } vud;
+ struct {
+ u16 len;
+ u16 cca_key_token_len;
+ u16 cca_key_token_flags;
+ u8 cca_key_token[0]; // 64 or more
+ } kb;
+ } __packed * preqparm;
+ struct aurepparm {
+ u8 subfunc_code[2];
+ u16 rule_array_len;
+ struct {
+ u16 len;
+ u16 sublen;
+ u16 tag;
+ struct cpacfkeyblock {
+ u8 version; /* version of this struct */
+ u8 flags[2];
+ u8 algo;
+ u8 form;
+ u8 pad1[3];
+ u16 keylen;
+ u8 key[64]; /* the key (keylen bytes) */
+ u16 keyattrlen;
+ u8 keyattr[32];
+ u8 pad2[1];
+ u8 vptype;
+ u8 vp[32]; /* verification pattern */
+ } ckb;
+ } vud;
+ struct {
+ u16 len;
+ } kb;
+ } __packed * prepparm;
+ int keytoklen = ((struct cipherkeytoken *)ckey)->len;
+
+ /* get already prepared memory for 2 cprbs with param block each */
+ rc = alloc_and_prep_cprbmem(PARMBSIZE, &mem, &preqcblk, &prepcblk);
+ if (rc)
+ return rc;
+
+ /* fill request cprb struct */
+ preqcblk->domain = domain;
+
+ /* fill request cprb param block with AU request */
+ preqparm = (struct aureqparm __force *) preqcblk->req_parmb;
+ memcpy(preqparm->subfunc_code, "AU", 2);
+ preqparm->rule_array_len =
+ sizeof(preqparm->rule_array_len)
+ + sizeof(preqparm->rule_array);
+ memcpy(preqparm->rule_array, "EXPT-SK ", 8);
+ /* vud, tk blob */
+ preqparm->vud.len = sizeof(preqparm->vud);
+ preqparm->vud.tk_blob_len = sizeof(preqparm->vud.tk_blob)
+ + 2 * sizeof(uint16_t);
+ preqparm->vud.tk_blob_tag = 0x00C2;
+ /* kb, cca token */
+ preqparm->kb.len = keytoklen + 3 * sizeof(uint16_t);
+ preqparm->kb.cca_key_token_len = keytoklen + 2 * sizeof(uint16_t);
+ memcpy(preqparm->kb.cca_key_token, ckey, keytoklen);
+ /* now fill length of param block into cprb */
+ preqcblk->req_parml = sizeof(struct aureqparm) + keytoklen;
+
+ /* fill xcrb struct */
+ prep_xcrb(&xcrb, cardnr, preqcblk, prepcblk);
+
+ /* forward xcrb with request CPRB and reply CPRB to zcrypt dd */
+ rc = zcrypt_send_cprb(&xcrb);
+ if (rc) {
+ DEBUG_ERR(
+ "%s zcrypt_send_cprb (cardnr=%d domain=%d) failed, rc=%d\n",
+ __func__, (int) cardnr, (int) domain, rc);
+ goto out;
+ }
+
+ /* check response returncode and reasoncode */
+ if (prepcblk->ccp_rtcode != 0) {
+ DEBUG_ERR(
+ "%s unwrap secure key failure, card response %d/%d\n",
+ __func__,
+ (int) prepcblk->ccp_rtcode,
+ (int) prepcblk->ccp_rscode);
+ rc = -EIO;
+ goto out;
+ }
+ if (prepcblk->ccp_rscode != 0) {
+ DEBUG_WARN(
+ "%s unwrap secure key warning, card response %d/%d\n",
+ __func__,
+ (int) prepcblk->ccp_rtcode,
+ (int) prepcblk->ccp_rscode);
+ }
+
+ /* process response cprb param block */
+ ptr = ((u8 *) prepcblk) + sizeof(struct CPRBX);
+ prepcblk->rpl_parmb = (u8 __user *) ptr;
+ prepparm = (struct aurepparm *) ptr;
+
+ /* check the returned keyblock */
+ if (prepparm->vud.ckb.version != 0x01 &&
+ prepparm->vud.ckb.version != 0x02) {
+ DEBUG_ERR("%s reply param keyblock version mismatch 0x%02x\n",
+ __func__, (int) prepparm->vud.ckb.version);
+ rc = -EIO;
+ goto out;
+ }
+ if (prepparm->vud.ckb.algo != 0x02) {
+ DEBUG_ERR(
+ "%s reply param keyblock algo mismatch 0x%02x != 0x02\n",
+ __func__, (int) prepparm->vud.ckb.algo);
+ rc = -EIO;
+ goto out;
+ }
+
+ /* copy the translated protected key */
+ switch (prepparm->vud.ckb.keylen) {
+ case 16+32:
+ /* AES 128 protected key */
+ if (protkeytype)
+ *protkeytype = PKEY_KEYTYPE_AES_128;
+ break;
+ case 24+32:
+ /* AES 192 protected key */
+ if (protkeytype)
+ *protkeytype = PKEY_KEYTYPE_AES_192;
+ break;
+ case 32+32:
+ /* AES 256 protected key */
+ if (protkeytype)
+ *protkeytype = PKEY_KEYTYPE_AES_256;
+ break;
+ default:
+ DEBUG_ERR("%s unknown/unsupported keylen %d\n",
+ __func__, prepparm->vud.ckb.keylen);
+ rc = -EIO;
+ goto out;
+ }
+ memcpy(protkey, prepparm->vud.ckb.key, prepparm->vud.ckb.keylen);
+ if (protkeylen)
+ *protkeylen = prepparm->vud.ckb.keylen;
+
+out:
+ free_cprbmem(mem, PARMBSIZE, 0);
+ return rc;
+}
+EXPORT_SYMBOL(cca_cipher2protkey);
+
+/*
+ * Derive protected key from CCA ECC secure private key.
+ */
+int cca_ecc2protkey(u16 cardnr, u16 domain, const u8 *key,
+ u8 *protkey, u32 *protkeylen, u32 *protkeytype)
+{
+ int rc;
+ u8 *mem, *ptr;
+ struct CPRBX *preqcblk, *prepcblk;
+ struct ica_xcRB xcrb;
+ struct aureqparm {
+ u8 subfunc_code[2];
+ u16 rule_array_len;
+ u8 rule_array[8];
+ struct {
+ u16 len;
+ u16 tk_blob_len;
+ u16 tk_blob_tag;
+ u8 tk_blob[66];
+ } vud;
+ struct {
+ u16 len;
+ u16 cca_key_token_len;
+ u16 cca_key_token_flags;
+ u8 cca_key_token[0];
+ } kb;
+ } __packed * preqparm;
+ struct aurepparm {
+ u8 subfunc_code[2];
+ u16 rule_array_len;
+ struct {
+ u16 len;
+ u16 sublen;
+ u16 tag;
+ struct cpacfkeyblock {
+ u8 version; /* version of this struct */
+ u8 flags[2];
+ u8 algo;
+ u8 form;
+ u8 pad1[3];
+ u16 keylen;
+ u8 key[0]; /* the key (keylen bytes) */
+ u16 keyattrlen;
+ u8 keyattr[32];
+ u8 pad2[1];
+ u8 vptype;
+ u8 vp[32]; /* verification pattern */
+ } ckb;
+ } vud;
+ struct {
+ u16 len;
+ } kb;
+ } __packed * prepparm;
+ int keylen = ((struct eccprivkeytoken *)key)->len;
+
+ /* get already prepared memory for 2 cprbs with param block each */
+ rc = alloc_and_prep_cprbmem(PARMBSIZE, &mem, &preqcblk, &prepcblk);
+ if (rc)
+ return rc;
+
+ /* fill request cprb struct */
+ preqcblk->domain = domain;
+
+ /* fill request cprb param block with AU request */
+ preqparm = (struct aureqparm __force *) preqcblk->req_parmb;
+ memcpy(preqparm->subfunc_code, "AU", 2);
+ preqparm->rule_array_len =
+ sizeof(preqparm->rule_array_len)
+ + sizeof(preqparm->rule_array);
+ memcpy(preqparm->rule_array, "EXPT-SK ", 8);
+ /* vud, tk blob */
+ preqparm->vud.len = sizeof(preqparm->vud);
+ preqparm->vud.tk_blob_len = sizeof(preqparm->vud.tk_blob)
+ + 2 * sizeof(uint16_t);
+ preqparm->vud.tk_blob_tag = 0x00C2;
+ /* kb, cca token */
+ preqparm->kb.len = keylen + 3 * sizeof(uint16_t);
+ preqparm->kb.cca_key_token_len = keylen + 2 * sizeof(uint16_t);
+ memcpy(preqparm->kb.cca_key_token, key, keylen);
+ /* now fill length of param block into cprb */
+ preqcblk->req_parml = sizeof(struct aureqparm) + keylen;
+
+ /* fill xcrb struct */
+ prep_xcrb(&xcrb, cardnr, preqcblk, prepcblk);
+
+ /* forward xcrb with request CPRB and reply CPRB to zcrypt dd */
+ rc = zcrypt_send_cprb(&xcrb);
+ if (rc) {
+ DEBUG_ERR(
+ "%s zcrypt_send_cprb (cardnr=%d domain=%d) failed, rc=%d\n",
+ __func__, (int) cardnr, (int) domain, rc);
+ goto out;
+ }
+
+ /* check response returncode and reasoncode */
+ if (prepcblk->ccp_rtcode != 0) {
+ DEBUG_ERR(
+ "%s unwrap secure key failure, card response %d/%d\n",
+ __func__,
+ (int) prepcblk->ccp_rtcode,
+ (int) prepcblk->ccp_rscode);
+ rc = -EIO;
+ goto out;
+ }
+ if (prepcblk->ccp_rscode != 0) {
+ DEBUG_WARN(
+ "%s unwrap secure key warning, card response %d/%d\n",
+ __func__,
+ (int) prepcblk->ccp_rtcode,
+ (int) prepcblk->ccp_rscode);
+ }
+
+ /* process response cprb param block */
+ ptr = ((u8 *) prepcblk) + sizeof(struct CPRBX);
+ prepcblk->rpl_parmb = (u8 __user *) ptr;
+ prepparm = (struct aurepparm *) ptr;
+
+ /* check the returned keyblock */
+ if (prepparm->vud.ckb.version != 0x02) {
+ DEBUG_ERR("%s reply param keyblock version mismatch 0x%02x != 0x02\n",
+ __func__, (int) prepparm->vud.ckb.version);
+ rc = -EIO;
+ goto out;
+ }
+ if (prepparm->vud.ckb.algo != 0x81) {
+ DEBUG_ERR(
+ "%s reply param keyblock algo mismatch 0x%02x != 0x81\n",
+ __func__, (int) prepparm->vud.ckb.algo);
+ rc = -EIO;
+ goto out;
+ }
+
+ /* copy the translated protected key */
+ if (prepparm->vud.ckb.keylen > *protkeylen) {
+ DEBUG_ERR("%s prot keylen mismatch %d > buffersize %u\n",
+ __func__, prepparm->vud.ckb.keylen, *protkeylen);
+ rc = -EIO;
+ goto out;
+ }
+ memcpy(protkey, prepparm->vud.ckb.key, prepparm->vud.ckb.keylen);
+ *protkeylen = prepparm->vud.ckb.keylen;
+ if (protkeytype)
+ *protkeytype = PKEY_KEYTYPE_ECC;
+
+out:
+ free_cprbmem(mem, PARMBSIZE, 0);
+ return rc;
+}
+EXPORT_SYMBOL(cca_ecc2protkey);
+
+/*
+ * query cryptographic facility from CCA adapter
+ */
+int cca_query_crypto_facility(u16 cardnr, u16 domain,
+ const char *keyword,
+ u8 *rarray, size_t *rarraylen,
+ u8 *varray, size_t *varraylen)
+{
+ int rc;
+ u16 len;
+ u8 *mem, *ptr;
+ struct CPRBX *preqcblk, *prepcblk;
+ struct ica_xcRB xcrb;
+ struct fqreqparm {
+ u8 subfunc_code[2];
+ u16 rule_array_len;
+ char rule_array[8];
+ struct lv1 {
+ u16 len;
+ u8 data[VARDATASIZE];
+ } lv1;
+ u16 dummylen;
+ } __packed * preqparm;
+ size_t parmbsize = sizeof(struct fqreqparm);
+ struct fqrepparm {
+ u8 subfunc_code[2];
+ u8 lvdata[0];
+ } __packed * prepparm;
+
+ /* get already prepared memory for 2 cprbs with param block each */
+ rc = alloc_and_prep_cprbmem(parmbsize, &mem, &preqcblk, &prepcblk);
+ if (rc)
+ return rc;
+
+ /* fill request cprb struct */
+ preqcblk->domain = domain;
+
+ /* fill request cprb param block with FQ request */
+ preqparm = (struct fqreqparm __force *) preqcblk->req_parmb;
+ memcpy(preqparm->subfunc_code, "FQ", 2);
+ memcpy(preqparm->rule_array, keyword, sizeof(preqparm->rule_array));
+ preqparm->rule_array_len =
+ sizeof(preqparm->rule_array_len) + sizeof(preqparm->rule_array);
+ preqparm->lv1.len = sizeof(preqparm->lv1);
+ preqparm->dummylen = sizeof(preqparm->dummylen);
+ preqcblk->req_parml = parmbsize;
+
+ /* fill xcrb struct */
+ prep_xcrb(&xcrb, cardnr, preqcblk, prepcblk);
+
+ /* forward xcrb with request CPRB and reply CPRB to zcrypt dd */
+ rc = zcrypt_send_cprb(&xcrb);
+ if (rc) {
+ DEBUG_ERR("%s zcrypt_send_cprb (cardnr=%d domain=%d) failed, rc=%d\n",
+ __func__, (int) cardnr, (int) domain, rc);
+ goto out;
+ }
+
+ /* check response returncode and reasoncode */
+ if (prepcblk->ccp_rtcode != 0) {
+ DEBUG_ERR("%s unwrap secure key failure, card response %d/%d\n",
+ __func__,
+ (int) prepcblk->ccp_rtcode,
+ (int) prepcblk->ccp_rscode);
+ rc = -EIO;
+ goto out;
+ }
+
+ /* process response cprb param block */
+ ptr = ((u8 *) prepcblk) + sizeof(struct CPRBX);
+ prepcblk->rpl_parmb = (u8 __user *) ptr;
+ prepparm = (struct fqrepparm *) ptr;
+ ptr = prepparm->lvdata;
+
+ /* check and possibly copy reply rule array */
+ len = *((u16 *) ptr);
+ if (len > sizeof(u16)) {
+ ptr += sizeof(u16);
+ len -= sizeof(u16);
+ if (rarray && rarraylen && *rarraylen > 0) {
+ *rarraylen = (len > *rarraylen ? *rarraylen : len);
+ memcpy(rarray, ptr, *rarraylen);
+ }
+ ptr += len;
+ }
+ /* check and possible copy reply var array */
+ len = *((u16 *) ptr);
+ if (len > sizeof(u16)) {
+ ptr += sizeof(u16);
+ len -= sizeof(u16);
+ if (varray && varraylen && *varraylen > 0) {
+ *varraylen = (len > *varraylen ? *varraylen : len);
+ memcpy(varray, ptr, *varraylen);
+ }
+ ptr += len;
+ }
+
+out:
+ free_cprbmem(mem, parmbsize, 0);
+ return rc;
+}
+EXPORT_SYMBOL(cca_query_crypto_facility);
+
+static int cca_info_cache_fetch(u16 cardnr, u16 domain, struct cca_info *ci)
+{
+ int rc = -ENOENT;
+ struct cca_info_list_entry *ptr;
+
+ spin_lock_bh(&cca_info_list_lock);
+ list_for_each_entry(ptr, &cca_info_list, list) {
+ if (ptr->cardnr == cardnr && ptr->domain == domain) {
+ memcpy(ci, &ptr->info, sizeof(*ci));
+ rc = 0;
+ break;
+ }
+ }
+ spin_unlock_bh(&cca_info_list_lock);
+
+ return rc;
+}
+
+static void cca_info_cache_update(u16 cardnr, u16 domain,
+ const struct cca_info *ci)
+{
+ int found = 0;
+ struct cca_info_list_entry *ptr;
+
+ spin_lock_bh(&cca_info_list_lock);
+ list_for_each_entry(ptr, &cca_info_list, list) {
+ if (ptr->cardnr == cardnr &&
+ ptr->domain == domain) {
+ memcpy(&ptr->info, ci, sizeof(*ci));
+ found = 1;
+ break;
+ }
+ }
+ if (!found) {
+ ptr = kmalloc(sizeof(*ptr), GFP_ATOMIC);
+ if (!ptr) {
+ spin_unlock_bh(&cca_info_list_lock);
+ return;
+ }
+ ptr->cardnr = cardnr;
+ ptr->domain = domain;
+ memcpy(&ptr->info, ci, sizeof(*ci));
+ list_add(&ptr->list, &cca_info_list);
+ }
+ spin_unlock_bh(&cca_info_list_lock);
+}
+
+static void cca_info_cache_scrub(u16 cardnr, u16 domain)
+{
+ struct cca_info_list_entry *ptr;
+
+ spin_lock_bh(&cca_info_list_lock);
+ list_for_each_entry(ptr, &cca_info_list, list) {
+ if (ptr->cardnr == cardnr &&
+ ptr->domain == domain) {
+ list_del(&ptr->list);
+ kfree(ptr);
+ break;
+ }
+ }
+ spin_unlock_bh(&cca_info_list_lock);
+}
+
+static void __exit mkvp_cache_free(void)
+{
+ struct cca_info_list_entry *ptr, *pnext;
+
+ spin_lock_bh(&cca_info_list_lock);
+ list_for_each_entry_safe(ptr, pnext, &cca_info_list, list) {
+ list_del(&ptr->list);
+ kfree(ptr);
+ }
+ spin_unlock_bh(&cca_info_list_lock);
+}
+
+/*
+ * Fetch cca_info values via query_crypto_facility from adapter.
+ */
+static int fetch_cca_info(u16 cardnr, u16 domain, struct cca_info *ci)
+{
+ int rc, found = 0;
+ size_t rlen, vlen;
+ u8 *rarray, *varray, *pg;
+ struct zcrypt_device_status_ext devstat;
+
+ memset(ci, 0, sizeof(*ci));
+
+ /* get first info from zcrypt device driver about this apqn */
+ rc = zcrypt_device_status_ext(cardnr, domain, &devstat);
+ if (rc)
+ return rc;
+ ci->hwtype = devstat.hwtype;
+
+ /* prep page for rule array and var array use */
+ pg = (u8 *) __get_free_page(GFP_KERNEL);
+ if (!pg)
+ return -ENOMEM;
+ rarray = pg;
+ varray = pg + PAGE_SIZE/2;
+ rlen = vlen = PAGE_SIZE/2;
+
+ /* QF for this card/domain */
+ rc = cca_query_crypto_facility(cardnr, domain, "STATICSA",
+ rarray, &rlen, varray, &vlen);
+ if (rc == 0 && rlen >= 10*8 && vlen >= 204) {
+ memcpy(ci->serial, rarray, 8);
+ ci->new_aes_mk_state = (char) rarray[7*8];
+ ci->cur_aes_mk_state = (char) rarray[8*8];
+ ci->old_aes_mk_state = (char) rarray[9*8];
+ if (ci->old_aes_mk_state == '2')
+ memcpy(&ci->old_aes_mkvp, varray + 172, 8);
+ if (ci->cur_aes_mk_state == '2')
+ memcpy(&ci->cur_aes_mkvp, varray + 184, 8);
+ if (ci->new_aes_mk_state == '3')
+ memcpy(&ci->new_aes_mkvp, varray + 196, 8);
+ found++;
+ }
+ if (!found)
+ goto out;
+ rlen = vlen = PAGE_SIZE/2;
+ rc = cca_query_crypto_facility(cardnr, domain, "STATICSB",
+ rarray, &rlen, varray, &vlen);
+ if (rc == 0 && rlen >= 13*8 && vlen >= 240) {
+ ci->new_apka_mk_state = (char) rarray[10*8];
+ ci->cur_apka_mk_state = (char) rarray[11*8];
+ ci->old_apka_mk_state = (char) rarray[12*8];
+ if (ci->old_apka_mk_state == '2')
+ memcpy(&ci->old_apka_mkvp, varray + 208, 8);
+ if (ci->cur_apka_mk_state == '2')
+ memcpy(&ci->cur_apka_mkvp, varray + 220, 8);
+ if (ci->new_apka_mk_state == '3')
+ memcpy(&ci->new_apka_mkvp, varray + 232, 8);
+ found++;
+ }
+
+out:
+ free_page((unsigned long) pg);
+ return found == 2 ? 0 : -ENOENT;
+}
+
+/*
+ * Fetch cca information about a CCA queue.
+ */
+int cca_get_info(u16 card, u16 dom, struct cca_info *ci, int verify)
+{
+ int rc;
+
+ rc = cca_info_cache_fetch(card, dom, ci);
+ if (rc || verify) {
+ rc = fetch_cca_info(card, dom, ci);
+ if (rc == 0)
+ cca_info_cache_update(card, dom, ci);
+ }
+
+ return rc;
+}
+EXPORT_SYMBOL(cca_get_info);
+
+/*
+ * Search for a matching crypto card based on the
+ * Master Key Verification Pattern given.
+ */
+static int findcard(u64 mkvp, u16 *pcardnr, u16 *pdomain,
+ int verify, int minhwtype)
+{
+ struct zcrypt_device_status_ext *device_status;
+ u16 card, dom;
+ struct cca_info ci;
+ int i, rc, oi = -1;
+
+ /* mkvp must not be zero, minhwtype needs to be >= 0 */
+ if (mkvp == 0 || minhwtype < 0)
+ return -EINVAL;
+
+ /* fetch status of all crypto cards */
+ device_status = kvmalloc_array(MAX_ZDEV_ENTRIES_EXT,
+ sizeof(struct zcrypt_device_status_ext),
+ GFP_KERNEL);
+ if (!device_status)
+ return -ENOMEM;
+ zcrypt_device_status_mask_ext(device_status);
+
+ /* walk through all crypto cards */
+ for (i = 0; i < MAX_ZDEV_ENTRIES_EXT; i++) {
+ card = AP_QID_CARD(device_status[i].qid);
+ dom = AP_QID_QUEUE(device_status[i].qid);
+ if (device_status[i].online &&
+ device_status[i].functions & 0x04) {
+ /* enabled CCA card, check current mkvp from cache */
+ if (cca_info_cache_fetch(card, dom, &ci) == 0 &&
+ ci.hwtype >= minhwtype &&
+ ci.cur_aes_mk_state == '2' &&
+ ci.cur_aes_mkvp == mkvp) {
+ if (!verify)
+ break;
+ /* verify: refresh card info */
+ if (fetch_cca_info(card, dom, &ci) == 0) {
+ cca_info_cache_update(card, dom, &ci);
+ if (ci.hwtype >= minhwtype &&
+ ci.cur_aes_mk_state == '2' &&
+ ci.cur_aes_mkvp == mkvp)
+ break;
+ }
+ }
+ } else {
+ /* Card is offline and/or not a CCA card. */
+ /* del mkvp entry from cache if it exists */
+ cca_info_cache_scrub(card, dom);
+ }
+ }
+ if (i >= MAX_ZDEV_ENTRIES_EXT) {
+ /* nothing found, so this time without cache */
+ for (i = 0; i < MAX_ZDEV_ENTRIES_EXT; i++) {
+ if (!(device_status[i].online &&
+ device_status[i].functions & 0x04))
+ continue;
+ card = AP_QID_CARD(device_status[i].qid);
+ dom = AP_QID_QUEUE(device_status[i].qid);
+ /* fresh fetch mkvp from adapter */
+ if (fetch_cca_info(card, dom, &ci) == 0) {
+ cca_info_cache_update(card, dom, &ci);
+ if (ci.hwtype >= minhwtype &&
+ ci.cur_aes_mk_state == '2' &&
+ ci.cur_aes_mkvp == mkvp)
+ break;
+ if (ci.hwtype >= minhwtype &&
+ ci.old_aes_mk_state == '2' &&
+ ci.old_aes_mkvp == mkvp &&
+ oi < 0)
+ oi = i;
+ }
+ }
+ if (i >= MAX_ZDEV_ENTRIES_EXT && oi >= 0) {
+ /* old mkvp matched, use this card then */
+ card = AP_QID_CARD(device_status[oi].qid);
+ dom = AP_QID_QUEUE(device_status[oi].qid);
+ }
+ }
+ if (i < MAX_ZDEV_ENTRIES_EXT || oi >= 0) {
+ if (pcardnr)
+ *pcardnr = card;
+ if (pdomain)
+ *pdomain = dom;
+ rc = (i < MAX_ZDEV_ENTRIES_EXT ? 0 : 1);
+ } else
+ rc = -ENODEV;
+
+ kvfree(device_status);
+ return rc;
+}
+
+/*
+ * Search for a matching crypto card based on the Master Key
+ * Verification Pattern provided inside a secure key token.
+ */
+int cca_findcard(const u8 *key, u16 *pcardnr, u16 *pdomain, int verify)
+{
+ u64 mkvp;
+ int minhwtype = 0;
+ const struct keytoken_header *hdr = (struct keytoken_header *) key;
+
+ if (hdr->type != TOKTYPE_CCA_INTERNAL)
+ return -EINVAL;
+
+ switch (hdr->version) {
+ case TOKVER_CCA_AES:
+ mkvp = ((struct secaeskeytoken *)key)->mkvp;
+ break;
+ case TOKVER_CCA_VLSC:
+ mkvp = ((struct cipherkeytoken *)key)->mkvp0;
+ minhwtype = AP_DEVICE_TYPE_CEX6;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return findcard(mkvp, pcardnr, pdomain, verify, minhwtype);
+}
+EXPORT_SYMBOL(cca_findcard);
+
+int cca_findcard2(u32 **apqns, u32 *nr_apqns, u16 cardnr, u16 domain,
+ int minhwtype, int mktype, u64 cur_mkvp, u64 old_mkvp,
+ int verify)
+{
+ struct zcrypt_device_status_ext *device_status;
+ u32 *_apqns = NULL, _nr_apqns = 0;
+ int i, card, dom, curmatch, oldmatch, rc = 0;
+ struct cca_info ci;
+
+ /* fetch status of all crypto cards */
+ device_status = kvmalloc_array(MAX_ZDEV_ENTRIES_EXT,
+ sizeof(struct zcrypt_device_status_ext),
+ GFP_KERNEL);
+ if (!device_status)
+ return -ENOMEM;
+ zcrypt_device_status_mask_ext(device_status);
+
+ /* allocate 1k space for up to 256 apqns */
+ _apqns = kmalloc_array(256, sizeof(u32), GFP_KERNEL);
+ if (!_apqns) {
+ kvfree(device_status);
+ return -ENOMEM;
+ }
+
+ /* walk through all the crypto apqnss */
+ for (i = 0; i < MAX_ZDEV_ENTRIES_EXT; i++) {
+ card = AP_QID_CARD(device_status[i].qid);
+ dom = AP_QID_QUEUE(device_status[i].qid);
+ /* check online state */
+ if (!device_status[i].online)
+ continue;
+ /* check for cca functions */
+ if (!(device_status[i].functions & 0x04))
+ continue;
+ /* check cardnr */
+ if (cardnr != 0xFFFF && card != cardnr)
+ continue;
+ /* check domain */
+ if (domain != 0xFFFF && dom != domain)
+ continue;
+ /* get cca info on this apqn */
+ if (cca_get_info(card, dom, &ci, verify))
+ continue;
+ /* current master key needs to be valid */
+ if (mktype == AES_MK_SET && ci.cur_aes_mk_state != '2')
+ continue;
+ if (mktype == APKA_MK_SET && ci.cur_apka_mk_state != '2')
+ continue;
+ /* check min hardware type */
+ if (minhwtype > 0 && minhwtype > ci.hwtype)
+ continue;
+ if (cur_mkvp || old_mkvp) {
+ /* check mkvps */
+ curmatch = oldmatch = 0;
+ if (mktype == AES_MK_SET) {
+ if (cur_mkvp && cur_mkvp == ci.cur_aes_mkvp)
+ curmatch = 1;
+ if (old_mkvp && ci.old_aes_mk_state == '2' &&
+ old_mkvp == ci.old_aes_mkvp)
+ oldmatch = 1;
+ } else {
+ if (cur_mkvp && cur_mkvp == ci.cur_apka_mkvp)
+ curmatch = 1;
+ if (old_mkvp && ci.old_apka_mk_state == '2' &&
+ old_mkvp == ci.old_apka_mkvp)
+ oldmatch = 1;
+ }
+ if (curmatch + oldmatch < 1)
+ continue;
+ }
+ /* apqn passed all filtering criterons, add to the array */
+ if (_nr_apqns < 256)
+ _apqns[_nr_apqns++] = (((u16)card) << 16) | ((u16) dom);
+ }
+
+ /* nothing found ? */
+ if (!_nr_apqns) {
+ kfree(_apqns);
+ rc = -ENODEV;
+ } else {
+ /* no re-allocation, simple return the _apqns array */
+ *apqns = _apqns;
+ *nr_apqns = _nr_apqns;
+ rc = 0;
+ }
+
+ kvfree(device_status);
+ return rc;
+}
+EXPORT_SYMBOL(cca_findcard2);
+
+void __exit zcrypt_ccamisc_exit(void)
+{
+ mkvp_cache_free();
+}
diff --git a/drivers/s390/crypto/zcrypt_ccamisc.h b/drivers/s390/crypto/zcrypt_ccamisc.h
new file mode 100644
index 000000000..e7105443d
--- /dev/null
+++ b/drivers/s390/crypto/zcrypt_ccamisc.h
@@ -0,0 +1,270 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright IBM Corp. 2019
+ * Author(s): Harald Freudenberger <freude@linux.ibm.com>
+ * Ingo Franzki <ifranzki@linux.ibm.com>
+ *
+ * Collection of CCA misc functions used by zcrypt and pkey
+ */
+
+#ifndef _ZCRYPT_CCAMISC_H_
+#define _ZCRYPT_CCAMISC_H_
+
+#include <asm/zcrypt.h>
+#include <asm/pkey.h>
+
+/* Key token types */
+#define TOKTYPE_NON_CCA 0x00 /* Non-CCA key token */
+#define TOKTYPE_CCA_INTERNAL 0x01 /* CCA internal sym key token */
+#define TOKTYPE_CCA_INTERNAL_PKA 0x1f /* CCA internal asym key token */
+
+/* For TOKTYPE_NON_CCA: */
+#define TOKVER_PROTECTED_KEY 0x01 /* Protected key token */
+#define TOKVER_CLEAR_KEY 0x02 /* Clear key token */
+
+/* For TOKTYPE_CCA_INTERNAL: */
+#define TOKVER_CCA_AES 0x04 /* CCA AES key token */
+#define TOKVER_CCA_VLSC 0x05 /* var length sym cipher key token */
+
+/* Max size of a cca variable length cipher key token */
+#define MAXCCAVLSCTOKENSIZE 725
+
+/* header part of a CCA key token */
+struct keytoken_header {
+ u8 type; /* one of the TOKTYPE values */
+ u8 res0[1];
+ u16 len; /* vlsc token: total length in bytes */
+ u8 version; /* one of the TOKVER values */
+ u8 res1[3];
+} __packed;
+
+/* inside view of a CCA secure key token (only type 0x01 version 0x04) */
+struct secaeskeytoken {
+ u8 type; /* 0x01 for internal key token */
+ u8 res0[3];
+ u8 version; /* should be 0x04 */
+ u8 res1[1];
+ u8 flag; /* key flags */
+ u8 res2[1];
+ u64 mkvp; /* master key verification pattern */
+ u8 key[32]; /* key value (encrypted) */
+ u8 cv[8]; /* control vector */
+ u16 bitsize; /* key bit size */
+ u16 keysize; /* key byte size */
+ u8 tvv[4]; /* token validation value */
+} __packed;
+
+/* inside view of a variable length symmetric cipher AES key token */
+struct cipherkeytoken {
+ u8 type; /* 0x01 for internal key token */
+ u8 res0[1];
+ u16 len; /* total key token length in bytes */
+ u8 version; /* should be 0x05 */
+ u8 res1[3];
+ u8 kms; /* key material state, 0x03 means wrapped with MK */
+ u8 kvpt; /* key verification pattern type, should be 0x01 */
+ u64 mkvp0; /* master key verification pattern, lo part */
+ u64 mkvp1; /* master key verification pattern, hi part (unused) */
+ u8 eskwm; /* encrypted section key wrapping method */
+ u8 hashalg; /* hash algorithmus used for wrapping key */
+ u8 plfver; /* pay load format version */
+ u8 res2[1];
+ u8 adsver; /* associated data section version */
+ u8 res3[1];
+ u16 adslen; /* associated data section length */
+ u8 kllen; /* optional key label length */
+ u8 ieaslen; /* optional extended associated data length */
+ u8 uadlen; /* optional user definable associated data length */
+ u8 res4[1];
+ u16 wpllen; /* wrapped payload length in bits: */
+ /* plfver 0x00 0x01 */
+ /* AES-128 512 640 */
+ /* AES-192 576 640 */
+ /* AES-256 640 640 */
+ u8 res5[1];
+ u8 algtype; /* 0x02 for AES cipher */
+ u16 keytype; /* 0x0001 for 'cipher' */
+ u8 kufc; /* key usage field count */
+ u16 kuf1; /* key usage field 1 */
+ u16 kuf2; /* key usage field 2 */
+ u8 kmfc; /* key management field count */
+ u16 kmf1; /* key management field 1 */
+ u16 kmf2; /* key management field 2 */
+ u16 kmf3; /* key management field 3 */
+ u8 vdata[]; /* variable part data follows */
+} __packed;
+
+/* inside view of an CCA secure ECC private key */
+struct eccprivkeytoken {
+ u8 type; /* 0x1f for internal asym key token */
+ u8 version; /* should be 0x00 */
+ u16 len; /* total key token length in bytes */
+ u8 res1[4];
+ u8 secid; /* 0x20 for ECC priv key section marker */
+ u8 secver; /* section version */
+ u16 seclen; /* section length */
+ u8 wtype; /* wrapping method, 0x00 clear, 0x01 AES */
+ u8 htype; /* hash method, 0x02 for SHA-256 */
+ u8 res2[2];
+ u8 kutc; /* key usage and translation control */
+ u8 ctype; /* curve type */
+ u8 kfs; /* key format and security */
+ u8 ksrc; /* key source */
+ u16 pbitlen; /* length of prime p in bits */
+ u16 ibmadlen; /* IBM associated data length in bytes */
+ u64 mkvp; /* master key verification pattern */
+ u8 opk[48]; /* encrypted object protection key data */
+ u16 adatalen; /* associated data length in bytes */
+ u16 fseclen; /* formated section length in bytes */
+ u8 more_data[]; /* more data follows */
+} __packed;
+
+/* Some defines for the CCA AES cipherkeytoken kmf1 field */
+#define KMF1_XPRT_SYM 0x8000
+#define KMF1_XPRT_UASY 0x4000
+#define KMF1_XPRT_AASY 0x2000
+#define KMF1_XPRT_RAW 0x1000
+#define KMF1_XPRT_CPAC 0x0800
+#define KMF1_XPRT_DES 0x0080
+#define KMF1_XPRT_AES 0x0040
+#define KMF1_XPRT_RSA 0x0008
+
+/*
+ * Simple check if the token is a valid CCA secure AES data key
+ * token. If keybitsize is given, the bitsize of the key is
+ * also checked. Returns 0 on success or errno value on failure.
+ */
+int cca_check_secaeskeytoken(debug_info_t *dbg, int dbflvl,
+ const u8 *token, int keybitsize);
+
+/*
+ * Simple check if the token is a valid CCA secure AES cipher key
+ * token. If keybitsize is given, the bitsize of the key is
+ * also checked. If checkcpacfexport is enabled, the key is also
+ * checked for the export flag to allow CPACF export.
+ * Returns 0 on success or errno value on failure.
+ */
+int cca_check_secaescipherkey(debug_info_t *dbg, int dbflvl,
+ const u8 *token, int keybitsize,
+ int checkcpacfexport);
+
+/*
+ * Simple check if the token is a valid CCA secure ECC private
+ * key token. Returns 0 on success or errno value on failure.
+ */
+int cca_check_sececckeytoken(debug_info_t *dbg, int dbflvl,
+ const u8 *token, size_t keysize,
+ int checkcpacfexport);
+
+/*
+ * Generate (random) CCA AES DATA secure key.
+ */
+int cca_genseckey(u16 cardnr, u16 domain, u32 keybitsize, u8 *seckey);
+
+/*
+ * Generate CCA AES DATA secure key with given clear key value.
+ */
+int cca_clr2seckey(u16 cardnr, u16 domain, u32 keybitsize,
+ const u8 *clrkey, u8 *seckey);
+
+/*
+ * Derive proteced key from an CCA AES DATA secure key.
+ */
+int cca_sec2protkey(u16 cardnr, u16 domain,
+ const u8 seckey[SECKEYBLOBSIZE],
+ u8 *protkey, u32 *protkeylen, u32 *protkeytype);
+
+/*
+ * Generate (random) CCA AES CIPHER secure key.
+ */
+int cca_gencipherkey(u16 cardnr, u16 domain, u32 keybitsize, u32 keygenflags,
+ u8 *keybuf, size_t *keybufsize);
+
+/*
+ * Derive proteced key from CCA AES cipher secure key.
+ */
+int cca_cipher2protkey(u16 cardnr, u16 domain, const u8 *ckey,
+ u8 *protkey, u32 *protkeylen, u32 *protkeytype);
+
+/*
+ * Build CCA AES CIPHER secure key with a given clear key value.
+ */
+int cca_clr2cipherkey(u16 cardnr, u16 domain, u32 keybitsize, u32 keygenflags,
+ const u8 *clrkey, u8 *keybuf, size_t *keybufsize);
+
+/*
+ * Derive proteced key from CCA ECC secure private key.
+ */
+int cca_ecc2protkey(u16 cardnr, u16 domain, const u8 *key,
+ u8 *protkey, u32 *protkeylen, u32 *protkeytype);
+
+/*
+ * Query cryptographic facility from CCA adapter
+ */
+int cca_query_crypto_facility(u16 cardnr, u16 domain,
+ const char *keyword,
+ u8 *rarray, size_t *rarraylen,
+ u8 *varray, size_t *varraylen);
+
+/*
+ * Search for a matching crypto card based on the Master Key
+ * Verification Pattern provided inside a secure key.
+ * Works with CCA AES data and cipher keys.
+ * Returns < 0 on failure, 0 if CURRENT MKVP matches and
+ * 1 if OLD MKVP matches.
+ */
+int cca_findcard(const u8 *key, u16 *pcardnr, u16 *pdomain, int verify);
+
+/*
+ * Build a list of cca apqns meeting the following constrains:
+ * - apqn is online and is in fact a CCA apqn
+ * - if cardnr is not FFFF only apqns with this cardnr
+ * - if domain is not FFFF only apqns with this domainnr
+ * - if minhwtype > 0 only apqns with hwtype >= minhwtype
+ * - if cur_mkvp != 0 only apqns where cur_mkvp == mkvp
+ * - if old_mkvp != 0 only apqns where old_mkvp == mkvp
+ * - if verify is enabled and a cur_mkvp and/or old_mkvp
+ * value is given, then refetch the cca_info and make sure the current
+ * cur_mkvp or old_mkvp values of the apqn are used.
+ * The mktype determines which set of master keys to use:
+ * 0 = AES_MK_SET - AES MK set, 1 = APKA MK_SET - APKA MK set
+ * The array of apqn entries is allocated with kmalloc and returned in *apqns;
+ * the number of apqns stored into the list is returned in *nr_apqns. One apqn
+ * entry is simple a 32 bit value with 16 bit cardnr and 16 bit domain nr and
+ * may be casted to struct pkey_apqn. The return value is either 0 for success
+ * or a negative errno value. If no apqn meeting the criterias is found,
+ * -ENODEV is returned.
+ */
+int cca_findcard2(u32 **apqns, u32 *nr_apqns, u16 cardnr, u16 domain,
+ int minhwtype, int mktype, u64 cur_mkvp, u64 old_mkvp,
+ int verify);
+
+#define AES_MK_SET 0
+#define APKA_MK_SET 1
+
+/* struct to hold info for each CCA queue */
+struct cca_info {
+ int hwtype; /* one of the defined AP_DEVICE_TYPE_* */
+ char new_aes_mk_state; /* '1' empty, '2' partially full, '3' full */
+ char cur_aes_mk_state; /* '1' invalid, '2' valid */
+ char old_aes_mk_state; /* '1' invalid, '2' valid */
+ char new_apka_mk_state; /* '1' empty, '2' partially full, '3' full */
+ char cur_apka_mk_state; /* '1' invalid, '2' valid */
+ char old_apka_mk_state; /* '1' invalid, '2' valid */
+ u64 new_aes_mkvp; /* truncated sha256 of new aes master key */
+ u64 cur_aes_mkvp; /* truncated sha256 of current aes master key */
+ u64 old_aes_mkvp; /* truncated sha256 of old aes master key */
+ u64 new_apka_mkvp; /* truncated sha256 of new apka master key */
+ u64 cur_apka_mkvp; /* truncated sha256 of current apka mk */
+ u64 old_apka_mkvp; /* truncated sha256 of old apka mk */
+ char serial[9]; /* serial number (8 ascii numbers + 0x00) */
+};
+
+/*
+ * Fetch cca information about an CCA queue.
+ */
+int cca_get_info(u16 card, u16 dom, struct cca_info *ci, int verify);
+
+void zcrypt_ccamisc_exit(void);
+
+#endif /* _ZCRYPT_CCAMISC_H_ */
diff --git a/drivers/s390/crypto/zcrypt_cex2a.c b/drivers/s390/crypto/zcrypt_cex2a.c
new file mode 100644
index 000000000..226a5612e
--- /dev/null
+++ b/drivers/s390/crypto/zcrypt_cex2a.c
@@ -0,0 +1,233 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright IBM Corp. 2001, 2012
+ * Author(s): Robert Burroughs
+ * Eric Rossman (edrossma@us.ibm.com)
+ *
+ * Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com)
+ * Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com>
+ * Ralph Wuerthner <rwuerthn@de.ibm.com>
+ * MSGTYPE restruct: Holger Dengler <hd@linux.vnet.ibm.com>
+ */
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/atomic.h>
+#include <linux/uaccess.h>
+#include <linux/mod_devicetable.h>
+
+#include "ap_bus.h"
+#include "zcrypt_api.h"
+#include "zcrypt_error.h"
+#include "zcrypt_cex2a.h"
+#include "zcrypt_msgtype50.h"
+
+#define CEX2A_MIN_MOD_SIZE 1 /* 8 bits */
+#define CEX2A_MAX_MOD_SIZE 256 /* 2048 bits */
+#define CEX3A_MIN_MOD_SIZE CEX2A_MIN_MOD_SIZE
+#define CEX3A_MAX_MOD_SIZE 512 /* 4096 bits */
+
+#define CEX2A_MAX_MESSAGE_SIZE 0x390 /* sizeof(struct type50_crb2_msg) */
+#define CEX2A_MAX_RESPONSE_SIZE 0x110 /* max outputdatalength + type80_hdr */
+
+#define CEX3A_MAX_RESPONSE_SIZE 0x210 /* 512 bit modulus
+ * (max outputdatalength) +
+ * type80_hdr*/
+#define CEX3A_MAX_MESSAGE_SIZE sizeof(struct type50_crb3_msg)
+
+#define CEX2A_CLEANUP_TIME (15*HZ)
+#define CEX3A_CLEANUP_TIME CEX2A_CLEANUP_TIME
+
+MODULE_AUTHOR("IBM Corporation");
+MODULE_DESCRIPTION("CEX2A/CEX3A Cryptographic Coprocessor device driver, " \
+ "Copyright IBM Corp. 2001, 2018");
+MODULE_LICENSE("GPL");
+
+static struct ap_device_id zcrypt_cex2a_card_ids[] = {
+ { .dev_type = AP_DEVICE_TYPE_CEX2A,
+ .match_flags = AP_DEVICE_ID_MATCH_CARD_TYPE },
+ { .dev_type = AP_DEVICE_TYPE_CEX3A,
+ .match_flags = AP_DEVICE_ID_MATCH_CARD_TYPE },
+ { /* end of list */ },
+};
+
+MODULE_DEVICE_TABLE(ap, zcrypt_cex2a_card_ids);
+
+static struct ap_device_id zcrypt_cex2a_queue_ids[] = {
+ { .dev_type = AP_DEVICE_TYPE_CEX2A,
+ .match_flags = AP_DEVICE_ID_MATCH_QUEUE_TYPE },
+ { .dev_type = AP_DEVICE_TYPE_CEX3A,
+ .match_flags = AP_DEVICE_ID_MATCH_QUEUE_TYPE },
+ { /* end of list */ },
+};
+
+MODULE_DEVICE_TABLE(ap, zcrypt_cex2a_queue_ids);
+
+/**
+ * Probe function for CEX2A card devices. It always accepts the AP device
+ * since the bus_match already checked the card type.
+ * @ap_dev: pointer to the AP device.
+ */
+static int zcrypt_cex2a_card_probe(struct ap_device *ap_dev)
+{
+ /*
+ * Normalized speed ratings per crypto adapter
+ * MEX_1k, MEX_2k, MEX_4k, CRT_1k, CRT_2k, CRT_4k, RNG, SECKEY
+ */
+ static const int CEX2A_SPEED_IDX[] = {
+ 800, 1000, 2000, 900, 1200, 2400, 0, 0};
+ static const int CEX3A_SPEED_IDX[] = {
+ 400, 500, 1000, 450, 550, 1200, 0, 0};
+
+ struct ap_card *ac = to_ap_card(&ap_dev->device);
+ struct zcrypt_card *zc;
+ int rc = 0;
+
+ zc = zcrypt_card_alloc();
+ if (!zc)
+ return -ENOMEM;
+ zc->card = ac;
+ ac->private = zc;
+
+ if (ac->ap_dev.device_type == AP_DEVICE_TYPE_CEX2A) {
+ zc->min_mod_size = CEX2A_MIN_MOD_SIZE;
+ zc->max_mod_size = CEX2A_MAX_MOD_SIZE;
+ zc->speed_rating = CEX2A_SPEED_IDX;
+ zc->max_exp_bit_length = CEX2A_MAX_MOD_SIZE;
+ zc->type_string = "CEX2A";
+ zc->user_space_type = ZCRYPT_CEX2A;
+ } else if (ac->ap_dev.device_type == AP_DEVICE_TYPE_CEX3A) {
+ zc->min_mod_size = CEX2A_MIN_MOD_SIZE;
+ zc->max_mod_size = CEX2A_MAX_MOD_SIZE;
+ zc->max_exp_bit_length = CEX2A_MAX_MOD_SIZE;
+ if (ap_test_bit(&ac->functions, AP_FUNC_MEX4K) &&
+ ap_test_bit(&ac->functions, AP_FUNC_CRT4K)) {
+ zc->max_mod_size = CEX3A_MAX_MOD_SIZE;
+ zc->max_exp_bit_length = CEX3A_MAX_MOD_SIZE;
+ }
+ zc->speed_rating = CEX3A_SPEED_IDX;
+ zc->type_string = "CEX3A";
+ zc->user_space_type = ZCRYPT_CEX3A;
+ } else {
+ zcrypt_card_free(zc);
+ return -ENODEV;
+ }
+ zc->online = 1;
+
+ rc = zcrypt_card_register(zc);
+ if (rc) {
+ ac->private = NULL;
+ zcrypt_card_free(zc);
+ }
+
+ return rc;
+}
+
+/**
+ * This is called to remove the CEX2A card driver information
+ * if an AP card device is removed.
+ */
+static void zcrypt_cex2a_card_remove(struct ap_device *ap_dev)
+{
+ struct zcrypt_card *zc = to_ap_card(&ap_dev->device)->private;
+
+ if (zc)
+ zcrypt_card_unregister(zc);
+}
+
+static struct ap_driver zcrypt_cex2a_card_driver = {
+ .probe = zcrypt_cex2a_card_probe,
+ .remove = zcrypt_cex2a_card_remove,
+ .ids = zcrypt_cex2a_card_ids,
+ .flags = AP_DRIVER_FLAG_DEFAULT,
+};
+
+/**
+ * Probe function for CEX2A queue devices. It always accepts the AP device
+ * since the bus_match already checked the queue type.
+ * @ap_dev: pointer to the AP device.
+ */
+static int zcrypt_cex2a_queue_probe(struct ap_device *ap_dev)
+{
+ struct ap_queue *aq = to_ap_queue(&ap_dev->device);
+ struct zcrypt_queue *zq = NULL;
+ int rc;
+
+ switch (ap_dev->device_type) {
+ case AP_DEVICE_TYPE_CEX2A:
+ zq = zcrypt_queue_alloc(CEX2A_MAX_RESPONSE_SIZE);
+ if (!zq)
+ return -ENOMEM;
+ break;
+ case AP_DEVICE_TYPE_CEX3A:
+ zq = zcrypt_queue_alloc(CEX3A_MAX_RESPONSE_SIZE);
+ if (!zq)
+ return -ENOMEM;
+ break;
+ }
+ if (!zq)
+ return -ENODEV;
+ zq->ops = zcrypt_msgtype(MSGTYPE50_NAME, MSGTYPE50_VARIANT_DEFAULT);
+ zq->queue = aq;
+ zq->online = 1;
+ atomic_set(&zq->load, 0);
+ ap_queue_init_state(aq);
+ ap_queue_init_reply(aq, &zq->reply);
+ aq->request_timeout = CEX2A_CLEANUP_TIME,
+ aq->private = zq;
+ rc = zcrypt_queue_register(zq);
+ if (rc) {
+ aq->private = NULL;
+ zcrypt_queue_free(zq);
+ }
+
+ return rc;
+}
+
+/**
+ * This is called to remove the CEX2A queue driver information
+ * if an AP queue device is removed.
+ */
+static void zcrypt_cex2a_queue_remove(struct ap_device *ap_dev)
+{
+ struct ap_queue *aq = to_ap_queue(&ap_dev->device);
+ struct zcrypt_queue *zq = aq->private;
+
+ if (zq)
+ zcrypt_queue_unregister(zq);
+}
+
+static struct ap_driver zcrypt_cex2a_queue_driver = {
+ .probe = zcrypt_cex2a_queue_probe,
+ .remove = zcrypt_cex2a_queue_remove,
+ .ids = zcrypt_cex2a_queue_ids,
+ .flags = AP_DRIVER_FLAG_DEFAULT,
+};
+
+int __init zcrypt_cex2a_init(void)
+{
+ int rc;
+
+ rc = ap_driver_register(&zcrypt_cex2a_card_driver,
+ THIS_MODULE, "cex2acard");
+ if (rc)
+ return rc;
+
+ rc = ap_driver_register(&zcrypt_cex2a_queue_driver,
+ THIS_MODULE, "cex2aqueue");
+ if (rc)
+ ap_driver_unregister(&zcrypt_cex2a_card_driver);
+
+ return rc;
+}
+
+void __exit zcrypt_cex2a_exit(void)
+{
+ ap_driver_unregister(&zcrypt_cex2a_queue_driver);
+ ap_driver_unregister(&zcrypt_cex2a_card_driver);
+}
+
+module_init(zcrypt_cex2a_init);
+module_exit(zcrypt_cex2a_exit);
diff --git a/drivers/s390/crypto/zcrypt_cex2a.h b/drivers/s390/crypto/zcrypt_cex2a.h
new file mode 100644
index 000000000..7842214d9
--- /dev/null
+++ b/drivers/s390/crypto/zcrypt_cex2a.h
@@ -0,0 +1,134 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright IBM Corp. 2001, 2006
+ * Author(s): Robert Burroughs
+ * Eric Rossman (edrossma@us.ibm.com)
+ *
+ * Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com)
+ * Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com>
+ */
+
+#ifndef _ZCRYPT_CEX2A_H_
+#define _ZCRYPT_CEX2A_H_
+
+/**
+ * The type 50 message family is associated with CEXxA cards.
+ *
+ * The four members of the family are described below.
+ *
+ * Note that all unsigned char arrays are right-justified and left-padded
+ * with zeroes.
+ *
+ * Note that all reserved fields must be zeroes.
+ */
+struct type50_hdr {
+ unsigned char reserved1;
+ unsigned char msg_type_code; /* 0x50 */
+ unsigned short msg_len;
+ unsigned char reserved2;
+ unsigned char ignored;
+ unsigned short reserved3;
+} __packed;
+
+#define TYPE50_TYPE_CODE 0x50
+
+#define TYPE50_MEB1_FMT 0x0001
+#define TYPE50_MEB2_FMT 0x0002
+#define TYPE50_MEB3_FMT 0x0003
+#define TYPE50_CRB1_FMT 0x0011
+#define TYPE50_CRB2_FMT 0x0012
+#define TYPE50_CRB3_FMT 0x0013
+
+/* Mod-Exp, with a small modulus */
+struct type50_meb1_msg {
+ struct type50_hdr header;
+ unsigned short keyblock_type; /* 0x0001 */
+ unsigned char reserved[6];
+ unsigned char exponent[128];
+ unsigned char modulus[128];
+ unsigned char message[128];
+} __packed;
+
+/* Mod-Exp, with a large modulus */
+struct type50_meb2_msg {
+ struct type50_hdr header;
+ unsigned short keyblock_type; /* 0x0002 */
+ unsigned char reserved[6];
+ unsigned char exponent[256];
+ unsigned char modulus[256];
+ unsigned char message[256];
+} __packed;
+
+/* Mod-Exp, with a larger modulus */
+struct type50_meb3_msg {
+ struct type50_hdr header;
+ unsigned short keyblock_type; /* 0x0003 */
+ unsigned char reserved[6];
+ unsigned char exponent[512];
+ unsigned char modulus[512];
+ unsigned char message[512];
+} __packed;
+
+/* CRT, with a small modulus */
+struct type50_crb1_msg {
+ struct type50_hdr header;
+ unsigned short keyblock_type; /* 0x0011 */
+ unsigned char reserved[6];
+ unsigned char p[64];
+ unsigned char q[64];
+ unsigned char dp[64];
+ unsigned char dq[64];
+ unsigned char u[64];
+ unsigned char message[128];
+} __packed;
+
+/* CRT, with a large modulus */
+struct type50_crb2_msg {
+ struct type50_hdr header;
+ unsigned short keyblock_type; /* 0x0012 */
+ unsigned char reserved[6];
+ unsigned char p[128];
+ unsigned char q[128];
+ unsigned char dp[128];
+ unsigned char dq[128];
+ unsigned char u[128];
+ unsigned char message[256];
+} __packed;
+
+/* CRT, with a larger modulus */
+struct type50_crb3_msg {
+ struct type50_hdr header;
+ unsigned short keyblock_type; /* 0x0013 */
+ unsigned char reserved[6];
+ unsigned char p[256];
+ unsigned char q[256];
+ unsigned char dp[256];
+ unsigned char dq[256];
+ unsigned char u[256];
+ unsigned char message[512];
+} __packed;
+
+/**
+ * The type 80 response family is associated with a CEXxA cards.
+ *
+ * Note that all unsigned char arrays are right-justified and left-padded
+ * with zeroes.
+ *
+ * Note that all reserved fields must be zeroes.
+ */
+
+#define TYPE80_RSP_CODE 0x80
+
+struct type80_hdr {
+ unsigned char reserved1;
+ unsigned char type; /* 0x80 */
+ unsigned short len;
+ unsigned char code; /* 0x00 */
+ unsigned char reserved2[3];
+ unsigned char reserved3[8];
+} __packed;
+
+int zcrypt_cex2a_init(void);
+void zcrypt_cex2a_exit(void);
+
+#endif /* _ZCRYPT_CEX2A_H_ */
diff --git a/drivers/s390/crypto/zcrypt_cex2c.c b/drivers/s390/crypto/zcrypt_cex2c.c
new file mode 100644
index 000000000..7a8cbdbe4
--- /dev/null
+++ b/drivers/s390/crypto/zcrypt_cex2c.c
@@ -0,0 +1,429 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright IBM Corp. 2001, 2018
+ * Author(s): Robert Burroughs
+ * Eric Rossman (edrossma@us.ibm.com)
+ *
+ * Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com)
+ * Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com>
+ * Ralph Wuerthner <rwuerthn@de.ibm.com>
+ * MSGTYPE restruct: Holger Dengler <hd@linux.vnet.ibm.com>
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/atomic.h>
+#include <linux/uaccess.h>
+#include <linux/mod_devicetable.h>
+
+#include "ap_bus.h"
+#include "zcrypt_api.h"
+#include "zcrypt_error.h"
+#include "zcrypt_msgtype6.h"
+#include "zcrypt_cex2c.h"
+#include "zcrypt_cca_key.h"
+#include "zcrypt_ccamisc.h"
+
+#define CEX2C_MIN_MOD_SIZE 16 /* 128 bits */
+#define CEX2C_MAX_MOD_SIZE 256 /* 2048 bits */
+#define CEX3C_MIN_MOD_SIZE 16 /* 128 bits */
+#define CEX3C_MAX_MOD_SIZE 512 /* 4096 bits */
+#define CEX2C_MAX_XCRB_MESSAGE_SIZE (12*1024)
+#define CEX2C_CLEANUP_TIME (15*HZ)
+
+MODULE_AUTHOR("IBM Corporation");
+MODULE_DESCRIPTION("CEX2C/CEX3C Cryptographic Coprocessor device driver, " \
+ "Copyright IBM Corp. 2001, 2018");
+MODULE_LICENSE("GPL");
+
+static struct ap_device_id zcrypt_cex2c_card_ids[] = {
+ { .dev_type = AP_DEVICE_TYPE_CEX2C,
+ .match_flags = AP_DEVICE_ID_MATCH_CARD_TYPE },
+ { .dev_type = AP_DEVICE_TYPE_CEX3C,
+ .match_flags = AP_DEVICE_ID_MATCH_CARD_TYPE },
+ { /* end of list */ },
+};
+
+MODULE_DEVICE_TABLE(ap, zcrypt_cex2c_card_ids);
+
+static struct ap_device_id zcrypt_cex2c_queue_ids[] = {
+ { .dev_type = AP_DEVICE_TYPE_CEX2C,
+ .match_flags = AP_DEVICE_ID_MATCH_QUEUE_TYPE },
+ { .dev_type = AP_DEVICE_TYPE_CEX3C,
+ .match_flags = AP_DEVICE_ID_MATCH_QUEUE_TYPE },
+ { /* end of list */ },
+};
+
+MODULE_DEVICE_TABLE(ap, zcrypt_cex2c_queue_ids);
+
+/*
+ * CCA card additional device attributes
+ */
+static ssize_t cca_serialnr_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct cca_info ci;
+ struct ap_card *ac = to_ap_card(dev);
+ struct zcrypt_card *zc = ac->private;
+
+ memset(&ci, 0, sizeof(ci));
+
+ if (ap_domain_index >= 0)
+ cca_get_info(ac->id, ap_domain_index, &ci, zc->online);
+
+ return scnprintf(buf, PAGE_SIZE, "%s\n", ci.serial);
+}
+
+static struct device_attribute dev_attr_cca_serialnr =
+ __ATTR(serialnr, 0444, cca_serialnr_show, NULL);
+
+static struct attribute *cca_card_attrs[] = {
+ &dev_attr_cca_serialnr.attr,
+ NULL,
+};
+
+static const struct attribute_group cca_card_attr_grp = {
+ .attrs = cca_card_attrs,
+};
+
+ /*
+ * CCA queue additional device attributes
+ */
+static ssize_t cca_mkvps_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ int n = 0;
+ struct cca_info ci;
+ struct zcrypt_queue *zq = to_ap_queue(dev)->private;
+ static const char * const cao_state[] = { "invalid", "valid" };
+ static const char * const new_state[] = { "empty", "partial", "full" };
+
+ memset(&ci, 0, sizeof(ci));
+
+ cca_get_info(AP_QID_CARD(zq->queue->qid),
+ AP_QID_QUEUE(zq->queue->qid),
+ &ci, zq->online);
+
+ if (ci.new_aes_mk_state >= '1' && ci.new_aes_mk_state <= '3')
+ n = scnprintf(buf, PAGE_SIZE, "AES NEW: %s 0x%016llx\n",
+ new_state[ci.new_aes_mk_state - '1'],
+ ci.new_aes_mkvp);
+ else
+ n = scnprintf(buf, PAGE_SIZE, "AES NEW: - -\n");
+
+ if (ci.cur_aes_mk_state >= '1' && ci.cur_aes_mk_state <= '2')
+ n += scnprintf(buf + n, PAGE_SIZE - n,
+ "AES CUR: %s 0x%016llx\n",
+ cao_state[ci.cur_aes_mk_state - '1'],
+ ci.cur_aes_mkvp);
+ else
+ n += scnprintf(buf + n, PAGE_SIZE - n, "AES CUR: - -\n");
+
+ if (ci.old_aes_mk_state >= '1' && ci.old_aes_mk_state <= '2')
+ n += scnprintf(buf + n, PAGE_SIZE - n,
+ "AES OLD: %s 0x%016llx\n",
+ cao_state[ci.old_aes_mk_state - '1'],
+ ci.old_aes_mkvp);
+ else
+ n += scnprintf(buf + n, PAGE_SIZE - n, "AES OLD: - -\n");
+
+ if (ci.new_apka_mk_state >= '1' && ci.new_apka_mk_state <= '3')
+ n += scnprintf(buf + n, PAGE_SIZE - n,
+ "APKA NEW: %s 0x%016llx\n",
+ new_state[ci.new_apka_mk_state - '1'],
+ ci.new_apka_mkvp);
+ else
+ n += scnprintf(buf + n, PAGE_SIZE - n, "APKA NEW: - -\n");
+
+ if (ci.cur_apka_mk_state >= '1' && ci.cur_apka_mk_state <= '2')
+ n += scnprintf(buf + n, PAGE_SIZE - n,
+ "APKA CUR: %s 0x%016llx\n",
+ cao_state[ci.cur_apka_mk_state - '1'],
+ ci.cur_apka_mkvp);
+ else
+ n += scnprintf(buf + n, PAGE_SIZE - n, "APKA CUR: - -\n");
+
+ if (ci.old_apka_mk_state >= '1' && ci.old_apka_mk_state <= '2')
+ n += scnprintf(buf + n, PAGE_SIZE - n,
+ "APKA OLD: %s 0x%016llx\n",
+ cao_state[ci.old_apka_mk_state - '1'],
+ ci.old_apka_mkvp);
+ else
+ n += scnprintf(buf + n, PAGE_SIZE - n, "APKA OLD: - -\n");
+
+ return n;
+}
+
+static struct device_attribute dev_attr_cca_mkvps =
+ __ATTR(mkvps, 0444, cca_mkvps_show, NULL);
+
+static struct attribute *cca_queue_attrs[] = {
+ &dev_attr_cca_mkvps.attr,
+ NULL,
+};
+
+static const struct attribute_group cca_queue_attr_grp = {
+ .attrs = cca_queue_attrs,
+};
+
+/**
+ * Large random number detection function. Its sends a message to a CEX2C/CEX3C
+ * card to find out if large random numbers are supported.
+ * @ap_dev: pointer to the AP device.
+ *
+ * Returns 1 if large random numbers are supported, 0 if not and < 0 on error.
+ */
+static int zcrypt_cex2c_rng_supported(struct ap_queue *aq)
+{
+ struct ap_message ap_msg;
+ unsigned long long psmid;
+ unsigned int domain;
+ struct {
+ struct type86_hdr hdr;
+ struct type86_fmt2_ext fmt2;
+ struct CPRBX cprbx;
+ } __packed *reply;
+ struct {
+ struct type6_hdr hdr;
+ struct CPRBX cprbx;
+ char function_code[2];
+ short int rule_length;
+ char rule[8];
+ short int verb_length;
+ short int key_length;
+ } __packed *msg;
+ int rc, i;
+
+ ap_init_message(&ap_msg);
+ ap_msg.msg = (void *) get_zeroed_page(GFP_KERNEL);
+ if (!ap_msg.msg)
+ return -ENOMEM;
+
+ rng_type6CPRB_msgX(&ap_msg, 4, &domain);
+
+ msg = ap_msg.msg;
+ msg->cprbx.domain = AP_QID_QUEUE(aq->qid);
+
+ rc = ap_send(aq->qid, 0x0102030405060708ULL, ap_msg.msg, ap_msg.len);
+ if (rc)
+ goto out_free;
+
+ /* Wait for the test message to complete. */
+ for (i = 0; i < 2 * HZ; i++) {
+ msleep(1000 / HZ);
+ rc = ap_recv(aq->qid, &psmid, ap_msg.msg, 4096);
+ if (rc == 0 && psmid == 0x0102030405060708ULL)
+ break;
+ }
+
+ if (i >= 2 * HZ) {
+ /* Got no answer. */
+ rc = -ENODEV;
+ goto out_free;
+ }
+
+ reply = ap_msg.msg;
+ if (reply->cprbx.ccp_rtcode == 0 && reply->cprbx.ccp_rscode == 0)
+ rc = 1;
+ else
+ rc = 0;
+out_free:
+ free_page((unsigned long) ap_msg.msg);
+ return rc;
+}
+
+/**
+ * Probe function for CEX2C/CEX3C card devices. It always accepts the
+ * AP device since the bus_match already checked the hardware type.
+ * @ap_dev: pointer to the AP card device.
+ */
+static int zcrypt_cex2c_card_probe(struct ap_device *ap_dev)
+{
+ /*
+ * Normalized speed ratings per crypto adapter
+ * MEX_1k, MEX_2k, MEX_4k, CRT_1k, CRT_2k, CRT_4k, RNG, SECKEY
+ */
+ static const int CEX2C_SPEED_IDX[] = {
+ 1000, 1400, 2400, 1100, 1500, 2600, 100, 12};
+ static const int CEX3C_SPEED_IDX[] = {
+ 500, 700, 1400, 550, 800, 1500, 80, 10};
+
+ struct ap_card *ac = to_ap_card(&ap_dev->device);
+ struct zcrypt_card *zc;
+ int rc = 0;
+
+ zc = zcrypt_card_alloc();
+ if (!zc)
+ return -ENOMEM;
+ zc->card = ac;
+ ac->private = zc;
+ switch (ac->ap_dev.device_type) {
+ case AP_DEVICE_TYPE_CEX2C:
+ zc->user_space_type = ZCRYPT_CEX2C;
+ zc->type_string = "CEX2C";
+ zc->speed_rating = CEX2C_SPEED_IDX;
+ zc->min_mod_size = CEX2C_MIN_MOD_SIZE;
+ zc->max_mod_size = CEX2C_MAX_MOD_SIZE;
+ zc->max_exp_bit_length = CEX2C_MAX_MOD_SIZE;
+ break;
+ case AP_DEVICE_TYPE_CEX3C:
+ zc->user_space_type = ZCRYPT_CEX3C;
+ zc->type_string = "CEX3C";
+ zc->speed_rating = CEX3C_SPEED_IDX;
+ zc->min_mod_size = CEX3C_MIN_MOD_SIZE;
+ zc->max_mod_size = CEX3C_MAX_MOD_SIZE;
+ zc->max_exp_bit_length = CEX3C_MAX_MOD_SIZE;
+ break;
+ default:
+ zcrypt_card_free(zc);
+ return -ENODEV;
+ }
+ zc->online = 1;
+
+ rc = zcrypt_card_register(zc);
+ if (rc) {
+ ac->private = NULL;
+ zcrypt_card_free(zc);
+ return rc;
+ }
+
+ if (ap_test_bit(&ac->functions, AP_FUNC_COPRO)) {
+ rc = sysfs_create_group(&ap_dev->device.kobj,
+ &cca_card_attr_grp);
+ if (rc) {
+ zcrypt_card_unregister(zc);
+ ac->private = NULL;
+ zcrypt_card_free(zc);
+ }
+ }
+
+ return rc;
+}
+
+/**
+ * This is called to remove the CEX2C/CEX3C card driver information
+ * if an AP card device is removed.
+ */
+static void zcrypt_cex2c_card_remove(struct ap_device *ap_dev)
+{
+ struct ap_card *ac = to_ap_card(&ap_dev->device);
+ struct zcrypt_card *zc = to_ap_card(&ap_dev->device)->private;
+
+ if (ap_test_bit(&ac->functions, AP_FUNC_COPRO))
+ sysfs_remove_group(&ap_dev->device.kobj, &cca_card_attr_grp);
+ if (zc)
+ zcrypt_card_unregister(zc);
+}
+
+static struct ap_driver zcrypt_cex2c_card_driver = {
+ .probe = zcrypt_cex2c_card_probe,
+ .remove = zcrypt_cex2c_card_remove,
+ .ids = zcrypt_cex2c_card_ids,
+ .flags = AP_DRIVER_FLAG_DEFAULT,
+};
+
+/**
+ * Probe function for CEX2C/CEX3C queue devices. It always accepts the
+ * AP device since the bus_match already checked the hardware type.
+ * @ap_dev: pointer to the AP card device.
+ */
+static int zcrypt_cex2c_queue_probe(struct ap_device *ap_dev)
+{
+ struct ap_queue *aq = to_ap_queue(&ap_dev->device);
+ struct zcrypt_queue *zq;
+ int rc;
+
+ zq = zcrypt_queue_alloc(CEX2C_MAX_XCRB_MESSAGE_SIZE);
+ if (!zq)
+ return -ENOMEM;
+ zq->queue = aq;
+ zq->online = 1;
+ atomic_set(&zq->load, 0);
+ ap_rapq(aq->qid);
+ rc = zcrypt_cex2c_rng_supported(aq);
+ if (rc < 0) {
+ zcrypt_queue_free(zq);
+ return rc;
+ }
+ if (rc)
+ zq->ops = zcrypt_msgtype(MSGTYPE06_NAME,
+ MSGTYPE06_VARIANT_DEFAULT);
+ else
+ zq->ops = zcrypt_msgtype(MSGTYPE06_NAME,
+ MSGTYPE06_VARIANT_NORNG);
+ ap_queue_init_state(aq);
+ ap_queue_init_reply(aq, &zq->reply);
+ aq->request_timeout = CEX2C_CLEANUP_TIME;
+ aq->private = zq;
+ rc = zcrypt_queue_register(zq);
+ if (rc) {
+ aq->private = NULL;
+ zcrypt_queue_free(zq);
+ return rc;
+ }
+
+ if (ap_test_bit(&aq->card->functions, AP_FUNC_COPRO)) {
+ rc = sysfs_create_group(&ap_dev->device.kobj,
+ &cca_queue_attr_grp);
+ if (rc) {
+ zcrypt_queue_unregister(zq);
+ aq->private = NULL;
+ zcrypt_queue_free(zq);
+ }
+ }
+
+ return rc;
+}
+
+/**
+ * This is called to remove the CEX2C/CEX3C queue driver information
+ * if an AP queue device is removed.
+ */
+static void zcrypt_cex2c_queue_remove(struct ap_device *ap_dev)
+{
+ struct ap_queue *aq = to_ap_queue(&ap_dev->device);
+ struct zcrypt_queue *zq = aq->private;
+
+ if (ap_test_bit(&aq->card->functions, AP_FUNC_COPRO))
+ sysfs_remove_group(&ap_dev->device.kobj, &cca_queue_attr_grp);
+ if (zq)
+ zcrypt_queue_unregister(zq);
+}
+
+static struct ap_driver zcrypt_cex2c_queue_driver = {
+ .probe = zcrypt_cex2c_queue_probe,
+ .remove = zcrypt_cex2c_queue_remove,
+ .ids = zcrypt_cex2c_queue_ids,
+ .flags = AP_DRIVER_FLAG_DEFAULT,
+};
+
+int __init zcrypt_cex2c_init(void)
+{
+ int rc;
+
+ rc = ap_driver_register(&zcrypt_cex2c_card_driver,
+ THIS_MODULE, "cex2card");
+ if (rc)
+ return rc;
+
+ rc = ap_driver_register(&zcrypt_cex2c_queue_driver,
+ THIS_MODULE, "cex2cqueue");
+ if (rc)
+ ap_driver_unregister(&zcrypt_cex2c_card_driver);
+
+ return rc;
+}
+
+void zcrypt_cex2c_exit(void)
+{
+ ap_driver_unregister(&zcrypt_cex2c_queue_driver);
+ ap_driver_unregister(&zcrypt_cex2c_card_driver);
+}
+
+module_init(zcrypt_cex2c_init);
+module_exit(zcrypt_cex2c_exit);
diff --git a/drivers/s390/crypto/zcrypt_cex2c.h b/drivers/s390/crypto/zcrypt_cex2c.h
new file mode 100644
index 000000000..6ec405c2b
--- /dev/null
+++ b/drivers/s390/crypto/zcrypt_cex2c.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright IBM Corp. 2001, 2018
+ * Author(s): Robert Burroughs
+ * Eric Rossman (edrossma@us.ibm.com)
+ *
+ * Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com)
+ * Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com>
+ * MSGTYPE restruct: Holger Dengler <hd@linux.vnet.ibm.com>
+ */
+
+#ifndef _ZCRYPT_CEX2C_H_
+#define _ZCRYPT_CEX2C_H_
+
+int zcrypt_cex2c_init(void);
+void zcrypt_cex2c_exit(void);
+
+#endif /* _ZCRYPT_CEX2C_H_ */
diff --git a/drivers/s390/crypto/zcrypt_cex4.c b/drivers/s390/crypto/zcrypt_cex4.c
new file mode 100644
index 000000000..f5195bca1
--- /dev/null
+++ b/drivers/s390/crypto/zcrypt_cex4.c
@@ -0,0 +1,712 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright IBM Corp. 2012, 2019
+ * Author(s): Holger Dengler <hd@linux.vnet.ibm.com>
+ */
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/atomic.h>
+#include <linux/uaccess.h>
+#include <linux/mod_devicetable.h>
+
+#include "ap_bus.h"
+#include "zcrypt_api.h"
+#include "zcrypt_msgtype6.h"
+#include "zcrypt_msgtype50.h"
+#include "zcrypt_error.h"
+#include "zcrypt_cex4.h"
+#include "zcrypt_ccamisc.h"
+#include "zcrypt_ep11misc.h"
+
+#define CEX4A_MIN_MOD_SIZE 1 /* 8 bits */
+#define CEX4A_MAX_MOD_SIZE_2K 256 /* 2048 bits */
+#define CEX4A_MAX_MOD_SIZE_4K 512 /* 4096 bits */
+
+#define CEX4C_MIN_MOD_SIZE 16 /* 256 bits */
+#define CEX4C_MAX_MOD_SIZE 512 /* 4096 bits */
+
+#define CEX4A_MAX_MESSAGE_SIZE MSGTYPE50_CRB3_MAX_MSG_SIZE
+#define CEX4C_MAX_MESSAGE_SIZE MSGTYPE06_MAX_MSG_SIZE
+
+/* Waiting time for requests to be processed.
+ * Currently there are some types of request which are not deterministic.
+ * But the maximum time limit managed by the stomper code is set to 60sec.
+ * Hence we have to wait at least that time period.
+ */
+#define CEX4_CLEANUP_TIME (900*HZ)
+
+MODULE_AUTHOR("IBM Corporation");
+MODULE_DESCRIPTION("CEX4/CEX5/CEX6/CEX7 Cryptographic Card device driver, " \
+ "Copyright IBM Corp. 2019");
+MODULE_LICENSE("GPL");
+
+static struct ap_device_id zcrypt_cex4_card_ids[] = {
+ { .dev_type = AP_DEVICE_TYPE_CEX4,
+ .match_flags = AP_DEVICE_ID_MATCH_CARD_TYPE },
+ { .dev_type = AP_DEVICE_TYPE_CEX5,
+ .match_flags = AP_DEVICE_ID_MATCH_CARD_TYPE },
+ { .dev_type = AP_DEVICE_TYPE_CEX6,
+ .match_flags = AP_DEVICE_ID_MATCH_CARD_TYPE },
+ { .dev_type = AP_DEVICE_TYPE_CEX7,
+ .match_flags = AP_DEVICE_ID_MATCH_CARD_TYPE },
+ { /* end of list */ },
+};
+
+MODULE_DEVICE_TABLE(ap, zcrypt_cex4_card_ids);
+
+static struct ap_device_id zcrypt_cex4_queue_ids[] = {
+ { .dev_type = AP_DEVICE_TYPE_CEX4,
+ .match_flags = AP_DEVICE_ID_MATCH_QUEUE_TYPE },
+ { .dev_type = AP_DEVICE_TYPE_CEX5,
+ .match_flags = AP_DEVICE_ID_MATCH_QUEUE_TYPE },
+ { .dev_type = AP_DEVICE_TYPE_CEX6,
+ .match_flags = AP_DEVICE_ID_MATCH_QUEUE_TYPE },
+ { .dev_type = AP_DEVICE_TYPE_CEX7,
+ .match_flags = AP_DEVICE_ID_MATCH_QUEUE_TYPE },
+ { /* end of list */ },
+};
+
+MODULE_DEVICE_TABLE(ap, zcrypt_cex4_queue_ids);
+
+/*
+ * CCA card additional device attributes
+ */
+static ssize_t cca_serialnr_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct cca_info ci;
+ struct ap_card *ac = to_ap_card(dev);
+ struct zcrypt_card *zc = ac->private;
+
+ memset(&ci, 0, sizeof(ci));
+
+ if (ap_domain_index >= 0)
+ cca_get_info(ac->id, ap_domain_index, &ci, zc->online);
+
+ return scnprintf(buf, PAGE_SIZE, "%s\n", ci.serial);
+}
+
+static struct device_attribute dev_attr_cca_serialnr =
+ __ATTR(serialnr, 0444, cca_serialnr_show, NULL);
+
+static struct attribute *cca_card_attrs[] = {
+ &dev_attr_cca_serialnr.attr,
+ NULL,
+};
+
+static const struct attribute_group cca_card_attr_grp = {
+ .attrs = cca_card_attrs,
+};
+
+ /*
+ * CCA queue additional device attributes
+ */
+static ssize_t cca_mkvps_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ int n = 0;
+ struct cca_info ci;
+ struct zcrypt_queue *zq = to_ap_queue(dev)->private;
+ static const char * const cao_state[] = { "invalid", "valid" };
+ static const char * const new_state[] = { "empty", "partial", "full" };
+
+ memset(&ci, 0, sizeof(ci));
+
+ cca_get_info(AP_QID_CARD(zq->queue->qid),
+ AP_QID_QUEUE(zq->queue->qid),
+ &ci, zq->online);
+
+ if (ci.new_aes_mk_state >= '1' && ci.new_aes_mk_state <= '3')
+ n = scnprintf(buf, PAGE_SIZE, "AES NEW: %s 0x%016llx\n",
+ new_state[ci.new_aes_mk_state - '1'],
+ ci.new_aes_mkvp);
+ else
+ n = scnprintf(buf, PAGE_SIZE, "AES NEW: - -\n");
+
+ if (ci.cur_aes_mk_state >= '1' && ci.cur_aes_mk_state <= '2')
+ n += scnprintf(buf + n, PAGE_SIZE - n,
+ "AES CUR: %s 0x%016llx\n",
+ cao_state[ci.cur_aes_mk_state - '1'],
+ ci.cur_aes_mkvp);
+ else
+ n += scnprintf(buf + n, PAGE_SIZE - n, "AES CUR: - -\n");
+
+ if (ci.old_aes_mk_state >= '1' && ci.old_aes_mk_state <= '2')
+ n += scnprintf(buf + n, PAGE_SIZE - n,
+ "AES OLD: %s 0x%016llx\n",
+ cao_state[ci.old_aes_mk_state - '1'],
+ ci.old_aes_mkvp);
+ else
+ n += scnprintf(buf + n, PAGE_SIZE - n, "AES OLD: - -\n");
+
+ if (ci.new_apka_mk_state >= '1' && ci.new_apka_mk_state <= '3')
+ n += scnprintf(buf + n, PAGE_SIZE - n,
+ "APKA NEW: %s 0x%016llx\n",
+ new_state[ci.new_apka_mk_state - '1'],
+ ci.new_apka_mkvp);
+ else
+ n += scnprintf(buf + n, PAGE_SIZE - n, "APKA NEW: - -\n");
+
+ if (ci.cur_apka_mk_state >= '1' && ci.cur_apka_mk_state <= '2')
+ n += scnprintf(buf + n, PAGE_SIZE - n,
+ "APKA CUR: %s 0x%016llx\n",
+ cao_state[ci.cur_apka_mk_state - '1'],
+ ci.cur_apka_mkvp);
+ else
+ n += scnprintf(buf + n, PAGE_SIZE - n, "APKA CUR: - -\n");
+
+ if (ci.old_apka_mk_state >= '1' && ci.old_apka_mk_state <= '2')
+ n += scnprintf(buf + n, PAGE_SIZE - n,
+ "APKA OLD: %s 0x%016llx\n",
+ cao_state[ci.old_apka_mk_state - '1'],
+ ci.old_apka_mkvp);
+ else
+ n += scnprintf(buf + n, PAGE_SIZE - n, "APKA OLD: - -\n");
+
+ return n;
+}
+
+static struct device_attribute dev_attr_cca_mkvps =
+ __ATTR(mkvps, 0444, cca_mkvps_show, NULL);
+
+static struct attribute *cca_queue_attrs[] = {
+ &dev_attr_cca_mkvps.attr,
+ NULL,
+};
+
+static const struct attribute_group cca_queue_attr_grp = {
+ .attrs = cca_queue_attrs,
+};
+
+/*
+ * EP11 card additional device attributes
+ */
+static ssize_t ep11_api_ordinalnr_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct ep11_card_info ci;
+ struct ap_card *ac = to_ap_card(dev);
+ struct zcrypt_card *zc = ac->private;
+
+ memset(&ci, 0, sizeof(ci));
+
+ ep11_get_card_info(ac->id, &ci, zc->online);
+
+ if (ci.API_ord_nr > 0)
+ return scnprintf(buf, PAGE_SIZE, "%u\n", ci.API_ord_nr);
+ else
+ return scnprintf(buf, PAGE_SIZE, "\n");
+}
+
+static struct device_attribute dev_attr_ep11_api_ordinalnr =
+ __ATTR(API_ordinalnr, 0444, ep11_api_ordinalnr_show, NULL);
+
+static ssize_t ep11_fw_version_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct ep11_card_info ci;
+ struct ap_card *ac = to_ap_card(dev);
+ struct zcrypt_card *zc = ac->private;
+
+ memset(&ci, 0, sizeof(ci));
+
+ ep11_get_card_info(ac->id, &ci, zc->online);
+
+ if (ci.FW_version > 0)
+ return scnprintf(buf, PAGE_SIZE, "%d.%d\n",
+ (int)(ci.FW_version >> 8),
+ (int)(ci.FW_version & 0xFF));
+ else
+ return scnprintf(buf, PAGE_SIZE, "\n");
+}
+
+static struct device_attribute dev_attr_ep11_fw_version =
+ __ATTR(FW_version, 0444, ep11_fw_version_show, NULL);
+
+static ssize_t ep11_serialnr_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct ep11_card_info ci;
+ struct ap_card *ac = to_ap_card(dev);
+ struct zcrypt_card *zc = ac->private;
+
+ memset(&ci, 0, sizeof(ci));
+
+ ep11_get_card_info(ac->id, &ci, zc->online);
+
+ if (ci.serial[0])
+ return scnprintf(buf, PAGE_SIZE, "%16.16s\n", ci.serial);
+ else
+ return scnprintf(buf, PAGE_SIZE, "\n");
+}
+
+static struct device_attribute dev_attr_ep11_serialnr =
+ __ATTR(serialnr, 0444, ep11_serialnr_show, NULL);
+
+static const struct {
+ int mode_bit;
+ const char *mode_txt;
+} ep11_op_modes[] = {
+ { 0, "FIPS2009" },
+ { 1, "BSI2009" },
+ { 2, "FIPS2011" },
+ { 3, "BSI2011" },
+ { 6, "BSICC2017" },
+ { 0, NULL }
+};
+
+static ssize_t ep11_card_op_modes_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ int i, n = 0;
+ struct ep11_card_info ci;
+ struct ap_card *ac = to_ap_card(dev);
+ struct zcrypt_card *zc = ac->private;
+
+ memset(&ci, 0, sizeof(ci));
+
+ ep11_get_card_info(ac->id, &ci, zc->online);
+
+ for (i = 0; ep11_op_modes[i].mode_txt; i++) {
+ if (ci.op_mode & (1ULL << ep11_op_modes[i].mode_bit)) {
+ if (n > 0)
+ buf[n++] = ' ';
+ n += scnprintf(buf + n, PAGE_SIZE - n,
+ "%s", ep11_op_modes[i].mode_txt);
+ }
+ }
+ n += scnprintf(buf + n, PAGE_SIZE - n, "\n");
+
+ return n;
+}
+
+static struct device_attribute dev_attr_ep11_card_op_modes =
+ __ATTR(op_modes, 0444, ep11_card_op_modes_show, NULL);
+
+static struct attribute *ep11_card_attrs[] = {
+ &dev_attr_ep11_api_ordinalnr.attr,
+ &dev_attr_ep11_fw_version.attr,
+ &dev_attr_ep11_serialnr.attr,
+ &dev_attr_ep11_card_op_modes.attr,
+ NULL,
+};
+
+static const struct attribute_group ep11_card_attr_grp = {
+ .attrs = ep11_card_attrs,
+};
+
+/*
+ * EP11 queue additional device attributes
+ */
+
+static ssize_t ep11_mkvps_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ int n = 0;
+ struct ep11_domain_info di;
+ struct zcrypt_queue *zq = to_ap_queue(dev)->private;
+ static const char * const cwk_state[] = { "invalid", "valid" };
+ static const char * const nwk_state[] = { "empty", "uncommitted",
+ "committed" };
+
+ memset(&di, 0, sizeof(di));
+
+ if (zq->online)
+ ep11_get_domain_info(AP_QID_CARD(zq->queue->qid),
+ AP_QID_QUEUE(zq->queue->qid),
+ &di);
+
+ if (di.cur_wk_state == '0') {
+ n = scnprintf(buf, PAGE_SIZE, "WK CUR: %s -\n",
+ cwk_state[di.cur_wk_state - '0']);
+ } else if (di.cur_wk_state == '1') {
+ n = scnprintf(buf, PAGE_SIZE, "WK CUR: %s 0x",
+ cwk_state[di.cur_wk_state - '0']);
+ bin2hex(buf + n, di.cur_wkvp, sizeof(di.cur_wkvp));
+ n += 2 * sizeof(di.cur_wkvp);
+ n += scnprintf(buf + n, PAGE_SIZE - n, "\n");
+ } else
+ n = scnprintf(buf, PAGE_SIZE, "WK CUR: - -\n");
+
+ if (di.new_wk_state == '0') {
+ n += scnprintf(buf + n, PAGE_SIZE - n, "WK NEW: %s -\n",
+ nwk_state[di.new_wk_state - '0']);
+ } else if (di.new_wk_state >= '1' && di.new_wk_state <= '2') {
+ n += scnprintf(buf + n, PAGE_SIZE - n, "WK NEW: %s 0x",
+ nwk_state[di.new_wk_state - '0']);
+ bin2hex(buf + n, di.new_wkvp, sizeof(di.new_wkvp));
+ n += 2 * sizeof(di.new_wkvp);
+ n += scnprintf(buf + n, PAGE_SIZE - n, "\n");
+ } else
+ n += scnprintf(buf + n, PAGE_SIZE - n, "WK NEW: - -\n");
+
+ return n;
+}
+
+static struct device_attribute dev_attr_ep11_mkvps =
+ __ATTR(mkvps, 0444, ep11_mkvps_show, NULL);
+
+static ssize_t ep11_queue_op_modes_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ int i, n = 0;
+ struct ep11_domain_info di;
+ struct zcrypt_queue *zq = to_ap_queue(dev)->private;
+
+ memset(&di, 0, sizeof(di));
+
+ if (zq->online)
+ ep11_get_domain_info(AP_QID_CARD(zq->queue->qid),
+ AP_QID_QUEUE(zq->queue->qid),
+ &di);
+
+ for (i = 0; ep11_op_modes[i].mode_txt; i++) {
+ if (di.op_mode & (1ULL << ep11_op_modes[i].mode_bit)) {
+ if (n > 0)
+ buf[n++] = ' ';
+ n += scnprintf(buf + n, PAGE_SIZE - n,
+ "%s", ep11_op_modes[i].mode_txt);
+ }
+ }
+ n += scnprintf(buf + n, PAGE_SIZE - n, "\n");
+
+ return n;
+}
+
+static struct device_attribute dev_attr_ep11_queue_op_modes =
+ __ATTR(op_modes, 0444, ep11_queue_op_modes_show, NULL);
+
+static struct attribute *ep11_queue_attrs[] = {
+ &dev_attr_ep11_mkvps.attr,
+ &dev_attr_ep11_queue_op_modes.attr,
+ NULL,
+};
+
+static const struct attribute_group ep11_queue_attr_grp = {
+ .attrs = ep11_queue_attrs,
+};
+
+/**
+ * Probe function for CEX4/CEX5/CEX6/CEX7 card device. It always
+ * accepts the AP device since the bus_match already checked
+ * the hardware type.
+ * @ap_dev: pointer to the AP device.
+ */
+static int zcrypt_cex4_card_probe(struct ap_device *ap_dev)
+{
+ /*
+ * Normalized speed ratings per crypto adapter
+ * MEX_1k, MEX_2k, MEX_4k, CRT_1k, CRT_2k, CRT_4k, RNG, SECKEY
+ */
+ static const int CEX4A_SPEED_IDX[NUM_OPS] = {
+ 14, 19, 249, 42, 228, 1458, 0, 0};
+ static const int CEX5A_SPEED_IDX[NUM_OPS] = {
+ 8, 9, 20, 18, 66, 458, 0, 0};
+ static const int CEX6A_SPEED_IDX[NUM_OPS] = {
+ 6, 9, 20, 17, 65, 438, 0, 0};
+ static const int CEX7A_SPEED_IDX[NUM_OPS] = {
+ 6, 8, 17, 15, 54, 362, 0, 0};
+
+ static const int CEX4C_SPEED_IDX[NUM_OPS] = {
+ 59, 69, 308, 83, 278, 2204, 209, 40};
+ static const int CEX5C_SPEED_IDX[] = {
+ 24, 31, 50, 37, 90, 479, 27, 10};
+ static const int CEX6C_SPEED_IDX[NUM_OPS] = {
+ 16, 20, 32, 27, 77, 455, 24, 9};
+ static const int CEX7C_SPEED_IDX[NUM_OPS] = {
+ 14, 16, 26, 23, 64, 376, 23, 8};
+
+ static const int CEX4P_SPEED_IDX[NUM_OPS] = {
+ 0, 0, 0, 0, 0, 0, 0, 50};
+ static const int CEX5P_SPEED_IDX[NUM_OPS] = {
+ 0, 0, 0, 0, 0, 0, 0, 10};
+ static const int CEX6P_SPEED_IDX[NUM_OPS] = {
+ 0, 0, 0, 0, 0, 0, 0, 9};
+ static const int CEX7P_SPEED_IDX[NUM_OPS] = {
+ 0, 0, 0, 0, 0, 0, 0, 8};
+
+ struct ap_card *ac = to_ap_card(&ap_dev->device);
+ struct zcrypt_card *zc;
+ int rc = 0;
+
+ zc = zcrypt_card_alloc();
+ if (!zc)
+ return -ENOMEM;
+ zc->card = ac;
+ ac->private = zc;
+ if (ap_test_bit(&ac->functions, AP_FUNC_ACCEL)) {
+ if (ac->ap_dev.device_type == AP_DEVICE_TYPE_CEX4) {
+ zc->type_string = "CEX4A";
+ zc->user_space_type = ZCRYPT_CEX4;
+ zc->speed_rating = CEX4A_SPEED_IDX;
+ } else if (ac->ap_dev.device_type == AP_DEVICE_TYPE_CEX5) {
+ zc->type_string = "CEX5A";
+ zc->user_space_type = ZCRYPT_CEX5;
+ zc->speed_rating = CEX5A_SPEED_IDX;
+ } else if (ac->ap_dev.device_type == AP_DEVICE_TYPE_CEX6) {
+ zc->type_string = "CEX6A";
+ zc->user_space_type = ZCRYPT_CEX6;
+ zc->speed_rating = CEX6A_SPEED_IDX;
+ } else {
+ zc->type_string = "CEX7A";
+ /* wrong user space type, just for compatibility
+ * with the ZCRYPT_STATUS_MASK ioctl.
+ */
+ zc->user_space_type = ZCRYPT_CEX6;
+ zc->speed_rating = CEX7A_SPEED_IDX;
+ }
+ zc->min_mod_size = CEX4A_MIN_MOD_SIZE;
+ if (ap_test_bit(&ac->functions, AP_FUNC_MEX4K) &&
+ ap_test_bit(&ac->functions, AP_FUNC_CRT4K)) {
+ zc->max_mod_size = CEX4A_MAX_MOD_SIZE_4K;
+ zc->max_exp_bit_length =
+ CEX4A_MAX_MOD_SIZE_4K;
+ } else {
+ zc->max_mod_size = CEX4A_MAX_MOD_SIZE_2K;
+ zc->max_exp_bit_length =
+ CEX4A_MAX_MOD_SIZE_2K;
+ }
+ } else if (ap_test_bit(&ac->functions, AP_FUNC_COPRO)) {
+ if (ac->ap_dev.device_type == AP_DEVICE_TYPE_CEX4) {
+ zc->type_string = "CEX4C";
+ /* wrong user space type, must be CEX4
+ * just keep it for cca compatibility
+ */
+ zc->user_space_type = ZCRYPT_CEX3C;
+ zc->speed_rating = CEX4C_SPEED_IDX;
+ } else if (ac->ap_dev.device_type == AP_DEVICE_TYPE_CEX5) {
+ zc->type_string = "CEX5C";
+ /* wrong user space type, must be CEX5
+ * just keep it for cca compatibility
+ */
+ zc->user_space_type = ZCRYPT_CEX3C;
+ zc->speed_rating = CEX5C_SPEED_IDX;
+ } else if (ac->ap_dev.device_type == AP_DEVICE_TYPE_CEX6) {
+ zc->type_string = "CEX6C";
+ /* wrong user space type, must be CEX6
+ * just keep it for cca compatibility
+ */
+ zc->user_space_type = ZCRYPT_CEX3C;
+ zc->speed_rating = CEX6C_SPEED_IDX;
+ } else {
+ zc->type_string = "CEX7C";
+ /* wrong user space type, must be CEX7
+ * just keep it for cca compatibility
+ */
+ zc->user_space_type = ZCRYPT_CEX3C;
+ zc->speed_rating = CEX7C_SPEED_IDX;
+ }
+ zc->min_mod_size = CEX4C_MIN_MOD_SIZE;
+ zc->max_mod_size = CEX4C_MAX_MOD_SIZE;
+ zc->max_exp_bit_length = CEX4C_MAX_MOD_SIZE;
+ } else if (ap_test_bit(&ac->functions, AP_FUNC_EP11)) {
+ if (ac->ap_dev.device_type == AP_DEVICE_TYPE_CEX4) {
+ zc->type_string = "CEX4P";
+ zc->user_space_type = ZCRYPT_CEX4;
+ zc->speed_rating = CEX4P_SPEED_IDX;
+ } else if (ac->ap_dev.device_type == AP_DEVICE_TYPE_CEX5) {
+ zc->type_string = "CEX5P";
+ zc->user_space_type = ZCRYPT_CEX5;
+ zc->speed_rating = CEX5P_SPEED_IDX;
+ } else if (ac->ap_dev.device_type == AP_DEVICE_TYPE_CEX6) {
+ zc->type_string = "CEX6P";
+ zc->user_space_type = ZCRYPT_CEX6;
+ zc->speed_rating = CEX6P_SPEED_IDX;
+ } else {
+ zc->type_string = "CEX7P";
+ /* wrong user space type, just for compatibility
+ * with the ZCRYPT_STATUS_MASK ioctl.
+ */
+ zc->user_space_type = ZCRYPT_CEX6;
+ zc->speed_rating = CEX7P_SPEED_IDX;
+ }
+ zc->min_mod_size = CEX4C_MIN_MOD_SIZE;
+ zc->max_mod_size = CEX4C_MAX_MOD_SIZE;
+ zc->max_exp_bit_length = CEX4C_MAX_MOD_SIZE;
+ } else {
+ zcrypt_card_free(zc);
+ return -ENODEV;
+ }
+ zc->online = 1;
+
+ rc = zcrypt_card_register(zc);
+ if (rc) {
+ ac->private = NULL;
+ zcrypt_card_free(zc);
+ return rc;
+ }
+
+ if (ap_test_bit(&ac->functions, AP_FUNC_COPRO)) {
+ rc = sysfs_create_group(&ap_dev->device.kobj,
+ &cca_card_attr_grp);
+ if (rc) {
+ zcrypt_card_unregister(zc);
+ ac->private = NULL;
+ zcrypt_card_free(zc);
+ }
+ } else if (ap_test_bit(&ac->functions, AP_FUNC_EP11)) {
+ rc = sysfs_create_group(&ap_dev->device.kobj,
+ &ep11_card_attr_grp);
+ if (rc) {
+ zcrypt_card_unregister(zc);
+ ac->private = NULL;
+ zcrypt_card_free(zc);
+ }
+ }
+
+ return rc;
+}
+
+/**
+ * This is called to remove the CEX4/CEX5/CEX6/CEX7 card driver
+ * information if an AP card device is removed.
+ */
+static void zcrypt_cex4_card_remove(struct ap_device *ap_dev)
+{
+ struct ap_card *ac = to_ap_card(&ap_dev->device);
+ struct zcrypt_card *zc = ac->private;
+
+ if (ap_test_bit(&ac->functions, AP_FUNC_COPRO))
+ sysfs_remove_group(&ap_dev->device.kobj, &cca_card_attr_grp);
+ else if (ap_test_bit(&ac->functions, AP_FUNC_EP11))
+ sysfs_remove_group(&ap_dev->device.kobj, &ep11_card_attr_grp);
+ if (zc)
+ zcrypt_card_unregister(zc);
+}
+
+static struct ap_driver zcrypt_cex4_card_driver = {
+ .probe = zcrypt_cex4_card_probe,
+ .remove = zcrypt_cex4_card_remove,
+ .ids = zcrypt_cex4_card_ids,
+ .flags = AP_DRIVER_FLAG_DEFAULT,
+};
+
+/**
+ * Probe function for CEX4/CEX5/CEX6/CEX7 queue device. It always
+ * accepts the AP device since the bus_match already checked
+ * the hardware type.
+ * @ap_dev: pointer to the AP device.
+ */
+static int zcrypt_cex4_queue_probe(struct ap_device *ap_dev)
+{
+ struct ap_queue *aq = to_ap_queue(&ap_dev->device);
+ struct zcrypt_queue *zq;
+ int rc;
+
+ if (ap_test_bit(&aq->card->functions, AP_FUNC_ACCEL)) {
+ zq = zcrypt_queue_alloc(CEX4A_MAX_MESSAGE_SIZE);
+ if (!zq)
+ return -ENOMEM;
+ zq->ops = zcrypt_msgtype(MSGTYPE50_NAME,
+ MSGTYPE50_VARIANT_DEFAULT);
+ } else if (ap_test_bit(&aq->card->functions, AP_FUNC_COPRO)) {
+ zq = zcrypt_queue_alloc(CEX4C_MAX_MESSAGE_SIZE);
+ if (!zq)
+ return -ENOMEM;
+ zq->ops = zcrypt_msgtype(MSGTYPE06_NAME,
+ MSGTYPE06_VARIANT_DEFAULT);
+ } else if (ap_test_bit(&aq->card->functions, AP_FUNC_EP11)) {
+ zq = zcrypt_queue_alloc(CEX4C_MAX_MESSAGE_SIZE);
+ if (!zq)
+ return -ENOMEM;
+ zq->ops = zcrypt_msgtype(MSGTYPE06_NAME,
+ MSGTYPE06_VARIANT_EP11);
+ } else {
+ return -ENODEV;
+ }
+
+ zq->queue = aq;
+ zq->online = 1;
+ atomic_set(&zq->load, 0);
+ ap_queue_init_state(aq);
+ ap_queue_init_reply(aq, &zq->reply);
+ aq->request_timeout = CEX4_CLEANUP_TIME,
+ aq->private = zq;
+ rc = zcrypt_queue_register(zq);
+ if (rc) {
+ aq->private = NULL;
+ zcrypt_queue_free(zq);
+ return rc;
+ }
+
+ if (ap_test_bit(&aq->card->functions, AP_FUNC_COPRO)) {
+ rc = sysfs_create_group(&ap_dev->device.kobj,
+ &cca_queue_attr_grp);
+ if (rc) {
+ zcrypt_queue_unregister(zq);
+ aq->private = NULL;
+ zcrypt_queue_free(zq);
+ }
+ } else if (ap_test_bit(&aq->card->functions, AP_FUNC_EP11)) {
+ rc = sysfs_create_group(&ap_dev->device.kobj,
+ &ep11_queue_attr_grp);
+ if (rc) {
+ zcrypt_queue_unregister(zq);
+ aq->private = NULL;
+ zcrypt_queue_free(zq);
+ }
+ }
+
+ return rc;
+}
+
+/**
+ * This is called to remove the CEX4/CEX5/CEX6/CEX7 queue driver
+ * information if an AP queue device is removed.
+ */
+static void zcrypt_cex4_queue_remove(struct ap_device *ap_dev)
+{
+ struct ap_queue *aq = to_ap_queue(&ap_dev->device);
+ struct zcrypt_queue *zq = aq->private;
+
+ if (ap_test_bit(&aq->card->functions, AP_FUNC_COPRO))
+ sysfs_remove_group(&ap_dev->device.kobj, &cca_queue_attr_grp);
+ else if (ap_test_bit(&aq->card->functions, AP_FUNC_EP11))
+ sysfs_remove_group(&ap_dev->device.kobj, &ep11_queue_attr_grp);
+ if (zq)
+ zcrypt_queue_unregister(zq);
+}
+
+static struct ap_driver zcrypt_cex4_queue_driver = {
+ .probe = zcrypt_cex4_queue_probe,
+ .remove = zcrypt_cex4_queue_remove,
+ .ids = zcrypt_cex4_queue_ids,
+ .flags = AP_DRIVER_FLAG_DEFAULT,
+};
+
+int __init zcrypt_cex4_init(void)
+{
+ int rc;
+
+ rc = ap_driver_register(&zcrypt_cex4_card_driver,
+ THIS_MODULE, "cex4card");
+ if (rc)
+ return rc;
+
+ rc = ap_driver_register(&zcrypt_cex4_queue_driver,
+ THIS_MODULE, "cex4queue");
+ if (rc)
+ ap_driver_unregister(&zcrypt_cex4_card_driver);
+
+ return rc;
+}
+
+void __exit zcrypt_cex4_exit(void)
+{
+ ap_driver_unregister(&zcrypt_cex4_queue_driver);
+ ap_driver_unregister(&zcrypt_cex4_card_driver);
+}
+
+module_init(zcrypt_cex4_init);
+module_exit(zcrypt_cex4_exit);
diff --git a/drivers/s390/crypto/zcrypt_cex4.h b/drivers/s390/crypto/zcrypt_cex4.h
new file mode 100644
index 000000000..748390a37
--- /dev/null
+++ b/drivers/s390/crypto/zcrypt_cex4.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright IBM Corp. 2012
+ * Author(s): Holger Dengler <hd@linux.vnet.ibm.com>
+ */
+
+#ifndef _ZCRYPT_CEX4_H_
+#define _ZCRYPT_CEX4_H_
+
+int zcrypt_cex4_init(void);
+void zcrypt_cex4_exit(void);
+
+#endif /* _ZCRYPT_CEX4_H_ */
diff --git a/drivers/s390/crypto/zcrypt_debug.h b/drivers/s390/crypto/zcrypt_debug.h
new file mode 100644
index 000000000..3225489a1
--- /dev/null
+++ b/drivers/s390/crypto/zcrypt_debug.h
@@ -0,0 +1,38 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright IBM Corp. 2016
+ * Author(s): Holger Dengler (hd@linux.vnet.ibm.com)
+ * Harald Freudenberger <freude@de.ibm.com>
+ */
+#ifndef ZCRYPT_DEBUG_H
+#define ZCRYPT_DEBUG_H
+
+#include <asm/debug.h>
+
+#define DBF_ERR 3 /* error conditions */
+#define DBF_WARN 4 /* warning conditions */
+#define DBF_INFO 5 /* informational */
+#define DBF_DEBUG 6 /* for debugging only */
+
+#define RC2ERR(rc) ((rc) ? DBF_ERR : DBF_INFO)
+#define RC2WARN(rc) ((rc) ? DBF_WARN : DBF_INFO)
+
+#define DBF_MAX_SPRINTF_ARGS 5
+
+#define ZCRYPT_DBF(...) \
+ debug_sprintf_event(zcrypt_dbf_info, ##__VA_ARGS__)
+#define ZCRYPT_DBF_ERR(...) \
+ debug_sprintf_event(zcrypt_dbf_info, DBF_ERR, ##__VA_ARGS__)
+#define ZCRYPT_DBF_WARN(...) \
+ debug_sprintf_event(zcrypt_dbf_info, DBF_WARN, ##__VA_ARGS__)
+#define ZCRYPT_DBF_INFO(...) \
+ debug_sprintf_event(zcrypt_dbf_info, DBF_INFO, ##__VA_ARGS__)
+#define ZCRYPT_DBF_DBG(...) \
+ debug_sprintf_event(zcrypt_dbf_info, DBF_DEBUG, ##__VA_ARGS__)
+
+extern debug_info_t *zcrypt_dbf_info;
+
+int zcrypt_debug_init(void);
+void zcrypt_debug_exit(void);
+
+#endif /* ZCRYPT_DEBUG_H */
diff --git a/drivers/s390/crypto/zcrypt_ep11misc.c b/drivers/s390/crypto/zcrypt_ep11misc.c
new file mode 100644
index 000000000..3daf259ba
--- /dev/null
+++ b/drivers/s390/crypto/zcrypt_ep11misc.c
@@ -0,0 +1,1470 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright IBM Corp. 2019
+ * Author(s): Harald Freudenberger <freude@linux.ibm.com>
+ *
+ * Collection of EP11 misc functions used by zcrypt and pkey
+ */
+
+#define KMSG_COMPONENT "zcrypt"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/random.h>
+#include <asm/zcrypt.h>
+#include <asm/pkey.h>
+#include <crypto/aes.h>
+
+#include "ap_bus.h"
+#include "zcrypt_api.h"
+#include "zcrypt_debug.h"
+#include "zcrypt_msgtype6.h"
+#include "zcrypt_ep11misc.h"
+#include "zcrypt_ccamisc.h"
+
+#define DEBUG_DBG(...) ZCRYPT_DBF(DBF_DEBUG, ##__VA_ARGS__)
+#define DEBUG_INFO(...) ZCRYPT_DBF(DBF_INFO, ##__VA_ARGS__)
+#define DEBUG_WARN(...) ZCRYPT_DBF(DBF_WARN, ##__VA_ARGS__)
+#define DEBUG_ERR(...) ZCRYPT_DBF(DBF_ERR, ##__VA_ARGS__)
+
+/* default iv used here */
+static const u8 def_iv[16] = { 0x00, 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77,
+ 0x88, 0x99, 0xaa, 0xbb, 0xcc, 0xdd, 0xee, 0xff };
+
+/* ep11 card info cache */
+struct card_list_entry {
+ struct list_head list;
+ u16 cardnr;
+ struct ep11_card_info info;
+};
+static LIST_HEAD(card_list);
+static DEFINE_SPINLOCK(card_list_lock);
+
+static int card_cache_fetch(u16 cardnr, struct ep11_card_info *ci)
+{
+ int rc = -ENOENT;
+ struct card_list_entry *ptr;
+
+ spin_lock_bh(&card_list_lock);
+ list_for_each_entry(ptr, &card_list, list) {
+ if (ptr->cardnr == cardnr) {
+ memcpy(ci, &ptr->info, sizeof(*ci));
+ rc = 0;
+ break;
+ }
+ }
+ spin_unlock_bh(&card_list_lock);
+
+ return rc;
+}
+
+static void card_cache_update(u16 cardnr, const struct ep11_card_info *ci)
+{
+ int found = 0;
+ struct card_list_entry *ptr;
+
+ spin_lock_bh(&card_list_lock);
+ list_for_each_entry(ptr, &card_list, list) {
+ if (ptr->cardnr == cardnr) {
+ memcpy(&ptr->info, ci, sizeof(*ci));
+ found = 1;
+ break;
+ }
+ }
+ if (!found) {
+ ptr = kmalloc(sizeof(*ptr), GFP_ATOMIC);
+ if (!ptr) {
+ spin_unlock_bh(&card_list_lock);
+ return;
+ }
+ ptr->cardnr = cardnr;
+ memcpy(&ptr->info, ci, sizeof(*ci));
+ list_add(&ptr->list, &card_list);
+ }
+ spin_unlock_bh(&card_list_lock);
+}
+
+static void card_cache_scrub(u16 cardnr)
+{
+ struct card_list_entry *ptr;
+
+ spin_lock_bh(&card_list_lock);
+ list_for_each_entry(ptr, &card_list, list) {
+ if (ptr->cardnr == cardnr) {
+ list_del(&ptr->list);
+ kfree(ptr);
+ break;
+ }
+ }
+ spin_unlock_bh(&card_list_lock);
+}
+
+static void __exit card_cache_free(void)
+{
+ struct card_list_entry *ptr, *pnext;
+
+ spin_lock_bh(&card_list_lock);
+ list_for_each_entry_safe(ptr, pnext, &card_list, list) {
+ list_del(&ptr->list);
+ kfree(ptr);
+ }
+ spin_unlock_bh(&card_list_lock);
+}
+
+/*
+ * Simple check if the key blob is a valid EP11 AES key blob with header.
+ */
+int ep11_check_aes_key_with_hdr(debug_info_t *dbg, int dbflvl,
+ const u8 *key, size_t keylen, int checkcpacfexp)
+{
+ struct ep11kblob_header *hdr = (struct ep11kblob_header *) key;
+ struct ep11keyblob *kb = (struct ep11keyblob *) (key + sizeof(*hdr));
+
+#define DBF(...) debug_sprintf_event(dbg, dbflvl, ##__VA_ARGS__)
+
+ if (keylen < sizeof(*hdr) + sizeof(*kb)) {
+ DBF("%s key check failed, keylen %zu < %zu\n",
+ __func__, keylen, sizeof(*hdr) + sizeof(*kb));
+ return -EINVAL;
+ }
+
+ if (hdr->type != TOKTYPE_NON_CCA) {
+ if (dbg)
+ DBF("%s key check failed, type 0x%02x != 0x%02x\n",
+ __func__, (int) hdr->type, TOKTYPE_NON_CCA);
+ return -EINVAL;
+ }
+ if (hdr->hver != 0x00) {
+ if (dbg)
+ DBF("%s key check failed, header version 0x%02x != 0x00\n",
+ __func__, (int) hdr->hver);
+ return -EINVAL;
+ }
+ if (hdr->version != TOKVER_EP11_AES_WITH_HEADER) {
+ if (dbg)
+ DBF("%s key check failed, version 0x%02x != 0x%02x\n",
+ __func__, (int) hdr->version, TOKVER_EP11_AES_WITH_HEADER);
+ return -EINVAL;
+ }
+ if (hdr->len > keylen) {
+ if (dbg)
+ DBF("%s key check failed, header len %d keylen %zu mismatch\n",
+ __func__, (int) hdr->len, keylen);
+ return -EINVAL;
+ }
+ if (hdr->len < sizeof(*hdr) + sizeof(*kb)) {
+ if (dbg)
+ DBF("%s key check failed, header len %d < %zu\n",
+ __func__, (int) hdr->len, sizeof(*hdr) + sizeof(*kb));
+ return -EINVAL;
+ }
+
+ if (kb->version != EP11_STRUCT_MAGIC) {
+ if (dbg)
+ DBF("%s key check failed, blob magic 0x%04x != 0x%04x\n",
+ __func__, (int) kb->version, EP11_STRUCT_MAGIC);
+ return -EINVAL;
+ }
+ if (checkcpacfexp && !(kb->attr & EP11_BLOB_PKEY_EXTRACTABLE)) {
+ if (dbg)
+ DBF("%s key check failed, PKEY_EXTRACTABLE is off\n",
+ __func__);
+ return -EINVAL;
+ }
+
+#undef DBF
+
+ return 0;
+}
+EXPORT_SYMBOL(ep11_check_aes_key_with_hdr);
+
+/*
+ * Simple check if the key blob is a valid EP11 ECC key blob with header.
+ */
+int ep11_check_ecc_key_with_hdr(debug_info_t *dbg, int dbflvl,
+ const u8 *key, size_t keylen, int checkcpacfexp)
+{
+ struct ep11kblob_header *hdr = (struct ep11kblob_header *) key;
+ struct ep11keyblob *kb = (struct ep11keyblob *) (key + sizeof(*hdr));
+
+#define DBF(...) debug_sprintf_event(dbg, dbflvl, ##__VA_ARGS__)
+
+ if (keylen < sizeof(*hdr) + sizeof(*kb)) {
+ DBF("%s key check failed, keylen %zu < %zu\n",
+ __func__, keylen, sizeof(*hdr) + sizeof(*kb));
+ return -EINVAL;
+ }
+
+ if (hdr->type != TOKTYPE_NON_CCA) {
+ if (dbg)
+ DBF("%s key check failed, type 0x%02x != 0x%02x\n",
+ __func__, (int) hdr->type, TOKTYPE_NON_CCA);
+ return -EINVAL;
+ }
+ if (hdr->hver != 0x00) {
+ if (dbg)
+ DBF("%s key check failed, header version 0x%02x != 0x00\n",
+ __func__, (int) hdr->hver);
+ return -EINVAL;
+ }
+ if (hdr->version != TOKVER_EP11_ECC_WITH_HEADER) {
+ if (dbg)
+ DBF("%s key check failed, version 0x%02x != 0x%02x\n",
+ __func__, (int) hdr->version, TOKVER_EP11_ECC_WITH_HEADER);
+ return -EINVAL;
+ }
+ if (hdr->len > keylen) {
+ if (dbg)
+ DBF("%s key check failed, header len %d keylen %zu mismatch\n",
+ __func__, (int) hdr->len, keylen);
+ return -EINVAL;
+ }
+ if (hdr->len < sizeof(*hdr) + sizeof(*kb)) {
+ if (dbg)
+ DBF("%s key check failed, header len %d < %zu\n",
+ __func__, (int) hdr->len, sizeof(*hdr) + sizeof(*kb));
+ return -EINVAL;
+ }
+
+ if (kb->version != EP11_STRUCT_MAGIC) {
+ if (dbg)
+ DBF("%s key check failed, blob magic 0x%04x != 0x%04x\n",
+ __func__, (int) kb->version, EP11_STRUCT_MAGIC);
+ return -EINVAL;
+ }
+ if (checkcpacfexp && !(kb->attr & EP11_BLOB_PKEY_EXTRACTABLE)) {
+ if (dbg)
+ DBF("%s key check failed, PKEY_EXTRACTABLE is off\n",
+ __func__);
+ return -EINVAL;
+ }
+
+#undef DBF
+
+ return 0;
+}
+EXPORT_SYMBOL(ep11_check_ecc_key_with_hdr);
+
+/*
+ * Simple check if the key blob is a valid EP11 AES key blob with
+ * the header in the session field (old style EP11 AES key).
+ */
+int ep11_check_aes_key(debug_info_t *dbg, int dbflvl,
+ const u8 *key, size_t keylen, int checkcpacfexp)
+{
+ struct ep11keyblob *kb = (struct ep11keyblob *) key;
+
+#define DBF(...) debug_sprintf_event(dbg, dbflvl, ##__VA_ARGS__)
+
+ if (keylen < sizeof(*kb)) {
+ DBF("%s key check failed, keylen %zu < %zu\n",
+ __func__, keylen, sizeof(*kb));
+ return -EINVAL;
+ }
+
+ if (kb->head.type != TOKTYPE_NON_CCA) {
+ if (dbg)
+ DBF("%s key check failed, type 0x%02x != 0x%02x\n",
+ __func__, (int) kb->head.type, TOKTYPE_NON_CCA);
+ return -EINVAL;
+ }
+ if (kb->head.version != TOKVER_EP11_AES) {
+ if (dbg)
+ DBF("%s key check failed, version 0x%02x != 0x%02x\n",
+ __func__, (int) kb->head.version, TOKVER_EP11_AES);
+ return -EINVAL;
+ }
+ if (kb->head.len > keylen) {
+ if (dbg)
+ DBF("%s key check failed, header len %d keylen %zu mismatch\n",
+ __func__, (int) kb->head.len, keylen);
+ return -EINVAL;
+ }
+ if (kb->head.len < sizeof(*kb)) {
+ if (dbg)
+ DBF("%s key check failed, header len %d < %zu\n",
+ __func__, (int) kb->head.len, sizeof(*kb));
+ return -EINVAL;
+ }
+
+ if (kb->version != EP11_STRUCT_MAGIC) {
+ if (dbg)
+ DBF("%s key check failed, blob magic 0x%04x != 0x%04x\n",
+ __func__, (int) kb->version, EP11_STRUCT_MAGIC);
+ return -EINVAL;
+ }
+ if (checkcpacfexp && !(kb->attr & EP11_BLOB_PKEY_EXTRACTABLE)) {
+ if (dbg)
+ DBF("%s key check failed, PKEY_EXTRACTABLE is off\n",
+ __func__);
+ return -EINVAL;
+ }
+
+#undef DBF
+
+ return 0;
+}
+EXPORT_SYMBOL(ep11_check_aes_key);
+
+/*
+ * Allocate and prepare ep11 cprb plus additional payload.
+ */
+static inline struct ep11_cprb *alloc_cprb(size_t payload_len)
+{
+ size_t len = sizeof(struct ep11_cprb) + payload_len;
+ struct ep11_cprb *cprb;
+
+ cprb = kzalloc(len, GFP_KERNEL);
+ if (!cprb)
+ return NULL;
+
+ cprb->cprb_len = sizeof(struct ep11_cprb);
+ cprb->cprb_ver_id = 0x04;
+ memcpy(cprb->func_id, "T4", 2);
+ cprb->ret_code = 0xFFFFFFFF;
+ cprb->payload_len = payload_len;
+
+ return cprb;
+}
+
+/*
+ * Some helper functions related to ASN1 encoding.
+ * Limited to length info <= 2 byte.
+ */
+
+#define ASN1TAGLEN(x) (2 + (x) + ((x) > 127 ? 1 : 0) + ((x) > 255 ? 1 : 0))
+
+static int asn1tag_write(u8 *ptr, u8 tag, const u8 *pvalue, u16 valuelen)
+{
+ ptr[0] = tag;
+ if (valuelen > 255) {
+ ptr[1] = 0x82;
+ *((u16 *)(ptr + 2)) = valuelen;
+ memcpy(ptr + 4, pvalue, valuelen);
+ return 4 + valuelen;
+ }
+ if (valuelen > 127) {
+ ptr[1] = 0x81;
+ ptr[2] = (u8) valuelen;
+ memcpy(ptr + 3, pvalue, valuelen);
+ return 3 + valuelen;
+ }
+ ptr[1] = (u8) valuelen;
+ memcpy(ptr + 2, pvalue, valuelen);
+ return 2 + valuelen;
+}
+
+/* EP11 payload > 127 bytes starts with this struct */
+struct pl_head {
+ u8 tag;
+ u8 lenfmt;
+ u16 len;
+ u8 func_tag;
+ u8 func_len;
+ u32 func;
+ u8 dom_tag;
+ u8 dom_len;
+ u32 dom;
+} __packed;
+
+/* prep ep11 payload head helper function */
+static inline void prep_head(struct pl_head *h,
+ size_t pl_size, int api, int func)
+{
+ h->tag = 0x30;
+ h->lenfmt = 0x82;
+ h->len = pl_size - 4;
+ h->func_tag = 0x04;
+ h->func_len = sizeof(u32);
+ h->func = (api << 16) + func;
+ h->dom_tag = 0x04;
+ h->dom_len = sizeof(u32);
+}
+
+/* prep urb helper function */
+static inline void prep_urb(struct ep11_urb *u,
+ struct ep11_target_dev *t, int nt,
+ struct ep11_cprb *req, size_t req_len,
+ struct ep11_cprb *rep, size_t rep_len)
+{
+ u->targets = (u8 __user *) t;
+ u->targets_num = nt;
+ u->req = (u8 __user *) req;
+ u->req_len = req_len;
+ u->resp = (u8 __user *) rep;
+ u->resp_len = rep_len;
+}
+
+/* Check ep11 reply payload, return 0 or suggested errno value. */
+static int check_reply_pl(const u8 *pl, const char *func)
+{
+ int len;
+ u32 ret;
+
+ /* start tag */
+ if (*pl++ != 0x30) {
+ DEBUG_ERR("%s reply start tag mismatch\n", func);
+ return -EIO;
+ }
+
+ /* payload length format */
+ if (*pl < 127) {
+ len = *pl;
+ pl++;
+ } else if (*pl == 0x81) {
+ pl++;
+ len = *pl;
+ pl++;
+ } else if (*pl == 0x82) {
+ pl++;
+ len = *((u16 *)pl);
+ pl += 2;
+ } else {
+ DEBUG_ERR("%s reply start tag lenfmt mismatch 0x%02hhx\n",
+ func, *pl);
+ return -EIO;
+ }
+
+ /* len should cover at least 3 fields with 32 bit value each */
+ if (len < 3 * 6) {
+ DEBUG_ERR("%s reply length %d too small\n", func, len);
+ return -EIO;
+ }
+
+ /* function tag, length and value */
+ if (pl[0] != 0x04 || pl[1] != 0x04) {
+ DEBUG_ERR("%s function tag or length mismatch\n", func);
+ return -EIO;
+ }
+ pl += 6;
+
+ /* dom tag, length and value */
+ if (pl[0] != 0x04 || pl[1] != 0x04) {
+ DEBUG_ERR("%s dom tag or length mismatch\n", func);
+ return -EIO;
+ }
+ pl += 6;
+
+ /* return value tag, length and value */
+ if (pl[0] != 0x04 || pl[1] != 0x04) {
+ DEBUG_ERR("%s return value tag or length mismatch\n", func);
+ return -EIO;
+ }
+ pl += 2;
+ ret = *((u32 *)pl);
+ if (ret != 0) {
+ DEBUG_ERR("%s return value 0x%04x != 0\n", func, ret);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+
+/*
+ * Helper function which does an ep11 query with given query type.
+ */
+static int ep11_query_info(u16 cardnr, u16 domain, u32 query_type,
+ size_t buflen, u8 *buf)
+{
+ struct ep11_info_req_pl {
+ struct pl_head head;
+ u8 query_type_tag;
+ u8 query_type_len;
+ u32 query_type;
+ u8 query_subtype_tag;
+ u8 query_subtype_len;
+ u32 query_subtype;
+ } __packed * req_pl;
+ struct ep11_info_rep_pl {
+ struct pl_head head;
+ u8 rc_tag;
+ u8 rc_len;
+ u32 rc;
+ u8 data_tag;
+ u8 data_lenfmt;
+ u16 data_len;
+ } __packed * rep_pl;
+ struct ep11_cprb *req = NULL, *rep = NULL;
+ struct ep11_target_dev target;
+ struct ep11_urb *urb = NULL;
+ int api = 1, rc = -ENOMEM;
+
+ /* request cprb and payload */
+ req = alloc_cprb(sizeof(struct ep11_info_req_pl));
+ if (!req)
+ goto out;
+ req_pl = (struct ep11_info_req_pl *) (((u8 *) req) + sizeof(*req));
+ prep_head(&req_pl->head, sizeof(*req_pl), api, 38); /* get xcp info */
+ req_pl->query_type_tag = 0x04;
+ req_pl->query_type_len = sizeof(u32);
+ req_pl->query_type = query_type;
+ req_pl->query_subtype_tag = 0x04;
+ req_pl->query_subtype_len = sizeof(u32);
+
+ /* reply cprb and payload */
+ rep = alloc_cprb(sizeof(struct ep11_info_rep_pl) + buflen);
+ if (!rep)
+ goto out;
+ rep_pl = (struct ep11_info_rep_pl *) (((u8 *) rep) + sizeof(*rep));
+
+ /* urb and target */
+ urb = kmalloc(sizeof(struct ep11_urb), GFP_KERNEL);
+ if (!urb)
+ goto out;
+ target.ap_id = cardnr;
+ target.dom_id = domain;
+ prep_urb(urb, &target, 1,
+ req, sizeof(*req) + sizeof(*req_pl),
+ rep, sizeof(*rep) + sizeof(*rep_pl) + buflen);
+
+ rc = zcrypt_send_ep11_cprb(urb);
+ if (rc) {
+ DEBUG_ERR(
+ "%s zcrypt_send_ep11_cprb(card=%d dom=%d) failed, rc=%d\n",
+ __func__, (int) cardnr, (int) domain, rc);
+ goto out;
+ }
+
+ rc = check_reply_pl((u8 *)rep_pl, __func__);
+ if (rc)
+ goto out;
+ if (rep_pl->data_tag != 0x04 || rep_pl->data_lenfmt != 0x82) {
+ DEBUG_ERR("%s unknown reply data format\n", __func__);
+ rc = -EIO;
+ goto out;
+ }
+ if (rep_pl->data_len > buflen) {
+ DEBUG_ERR("%s mismatch between reply data len and buffer len\n",
+ __func__);
+ rc = -ENOSPC;
+ goto out;
+ }
+
+ memcpy(buf, ((u8 *) rep_pl) + sizeof(*rep_pl), rep_pl->data_len);
+
+out:
+ kfree(req);
+ kfree(rep);
+ kfree(urb);
+ return rc;
+}
+
+/*
+ * Provide information about an EP11 card.
+ */
+int ep11_get_card_info(u16 card, struct ep11_card_info *info, int verify)
+{
+ int rc;
+ struct ep11_module_query_info {
+ u32 API_ord_nr;
+ u32 firmware_id;
+ u8 FW_major_vers;
+ u8 FW_minor_vers;
+ u8 CSP_major_vers;
+ u8 CSP_minor_vers;
+ u8 fwid[32];
+ u8 xcp_config_hash[32];
+ u8 CSP_config_hash[32];
+ u8 serial[16];
+ u8 module_date_time[16];
+ u64 op_mode;
+ u32 PKCS11_flags;
+ u32 ext_flags;
+ u32 domains;
+ u32 sym_state_bytes;
+ u32 digest_state_bytes;
+ u32 pin_blob_bytes;
+ u32 SPKI_bytes;
+ u32 priv_key_blob_bytes;
+ u32 sym_blob_bytes;
+ u32 max_payload_bytes;
+ u32 CP_profile_bytes;
+ u32 max_CP_index;
+ } __packed * pmqi = NULL;
+
+ rc = card_cache_fetch(card, info);
+ if (rc || verify) {
+ pmqi = kmalloc(sizeof(*pmqi), GFP_KERNEL);
+ if (!pmqi)
+ return -ENOMEM;
+ rc = ep11_query_info(card, AUTOSEL_DOM,
+ 0x01 /* module info query */,
+ sizeof(*pmqi), (u8 *) pmqi);
+ if (rc) {
+ if (rc == -ENODEV)
+ card_cache_scrub(card);
+ goto out;
+ }
+ memset(info, 0, sizeof(*info));
+ info->API_ord_nr = pmqi->API_ord_nr;
+ info->FW_version =
+ (pmqi->FW_major_vers << 8) + pmqi->FW_minor_vers;
+ memcpy(info->serial, pmqi->serial, sizeof(info->serial));
+ info->op_mode = pmqi->op_mode;
+ card_cache_update(card, info);
+ }
+
+out:
+ kfree(pmqi);
+ return rc;
+}
+EXPORT_SYMBOL(ep11_get_card_info);
+
+/*
+ * Provide information about a domain within an EP11 card.
+ */
+int ep11_get_domain_info(u16 card, u16 domain, struct ep11_domain_info *info)
+{
+ int rc;
+ struct ep11_domain_query_info {
+ u32 dom_index;
+ u8 cur_WK_VP[32];
+ u8 new_WK_VP[32];
+ u32 dom_flags;
+ u64 op_mode;
+ } __packed * p_dom_info;
+
+ p_dom_info = kmalloc(sizeof(*p_dom_info), GFP_KERNEL);
+ if (!p_dom_info)
+ return -ENOMEM;
+
+ rc = ep11_query_info(card, domain, 0x03 /* domain info query */,
+ sizeof(*p_dom_info), (u8 *) p_dom_info);
+ if (rc)
+ goto out;
+
+ memset(info, 0, sizeof(*info));
+ info->cur_wk_state = '0';
+ info->new_wk_state = '0';
+ if (p_dom_info->dom_flags & 0x10 /* left imprint mode */) {
+ if (p_dom_info->dom_flags & 0x02 /* cur wk valid */) {
+ info->cur_wk_state = '1';
+ memcpy(info->cur_wkvp, p_dom_info->cur_WK_VP, 32);
+ }
+ if (p_dom_info->dom_flags & 0x04 /* new wk present */
+ || p_dom_info->dom_flags & 0x08 /* new wk committed */) {
+ info->new_wk_state =
+ p_dom_info->dom_flags & 0x08 ? '2' : '1';
+ memcpy(info->new_wkvp, p_dom_info->new_WK_VP, 32);
+ }
+ }
+ info->op_mode = p_dom_info->op_mode;
+
+out:
+ kfree(p_dom_info);
+ return rc;
+}
+EXPORT_SYMBOL(ep11_get_domain_info);
+
+/*
+ * Default EP11 AES key generate attributes, used when no keygenflags given:
+ * XCP_BLOB_ENCRYPT | XCP_BLOB_DECRYPT | XCP_BLOB_PROTKEY_EXTRACTABLE
+ */
+#define KEY_ATTR_DEFAULTS 0x00200c00
+
+int ep11_genaeskey(u16 card, u16 domain, u32 keybitsize, u32 keygenflags,
+ u8 *keybuf, size_t *keybufsize)
+{
+ struct keygen_req_pl {
+ struct pl_head head;
+ u8 var_tag;
+ u8 var_len;
+ u32 var;
+ u8 keybytes_tag;
+ u8 keybytes_len;
+ u32 keybytes;
+ u8 mech_tag;
+ u8 mech_len;
+ u32 mech;
+ u8 attr_tag;
+ u8 attr_len;
+ u32 attr_header;
+ u32 attr_bool_mask;
+ u32 attr_bool_bits;
+ u32 attr_val_len_type;
+ u32 attr_val_len_value;
+ u8 pin_tag;
+ u8 pin_len;
+ } __packed * req_pl;
+ struct keygen_rep_pl {
+ struct pl_head head;
+ u8 rc_tag;
+ u8 rc_len;
+ u32 rc;
+ u8 data_tag;
+ u8 data_lenfmt;
+ u16 data_len;
+ u8 data[512];
+ } __packed * rep_pl;
+ struct ep11_cprb *req = NULL, *rep = NULL;
+ struct ep11_target_dev target;
+ struct ep11_urb *urb = NULL;
+ struct ep11keyblob *kb;
+ int api, rc = -ENOMEM;
+
+ switch (keybitsize) {
+ case 128:
+ case 192:
+ case 256:
+ break;
+ default:
+ DEBUG_ERR(
+ "%s unknown/unsupported keybitsize %d\n",
+ __func__, keybitsize);
+ rc = -EINVAL;
+ goto out;
+ }
+
+ /* request cprb and payload */
+ req = alloc_cprb(sizeof(struct keygen_req_pl));
+ if (!req)
+ goto out;
+ req_pl = (struct keygen_req_pl *) (((u8 *) req) + sizeof(*req));
+ api = (!keygenflags || keygenflags & 0x00200000) ? 4 : 1;
+ prep_head(&req_pl->head, sizeof(*req_pl), api, 21); /* GenerateKey */
+ req_pl->var_tag = 0x04;
+ req_pl->var_len = sizeof(u32);
+ req_pl->keybytes_tag = 0x04;
+ req_pl->keybytes_len = sizeof(u32);
+ req_pl->keybytes = keybitsize / 8;
+ req_pl->mech_tag = 0x04;
+ req_pl->mech_len = sizeof(u32);
+ req_pl->mech = 0x00001080; /* CKM_AES_KEY_GEN */
+ req_pl->attr_tag = 0x04;
+ req_pl->attr_len = 5 * sizeof(u32);
+ req_pl->attr_header = 0x10010000;
+ req_pl->attr_bool_mask = keygenflags ? keygenflags : KEY_ATTR_DEFAULTS;
+ req_pl->attr_bool_bits = keygenflags ? keygenflags : KEY_ATTR_DEFAULTS;
+ req_pl->attr_val_len_type = 0x00000161; /* CKA_VALUE_LEN */
+ req_pl->attr_val_len_value = keybitsize / 8;
+ req_pl->pin_tag = 0x04;
+
+ /* reply cprb and payload */
+ rep = alloc_cprb(sizeof(struct keygen_rep_pl));
+ if (!rep)
+ goto out;
+ rep_pl = (struct keygen_rep_pl *) (((u8 *) rep) + sizeof(*rep));
+
+ /* urb and target */
+ urb = kmalloc(sizeof(struct ep11_urb), GFP_KERNEL);
+ if (!urb)
+ goto out;
+ target.ap_id = card;
+ target.dom_id = domain;
+ prep_urb(urb, &target, 1,
+ req, sizeof(*req) + sizeof(*req_pl),
+ rep, sizeof(*rep) + sizeof(*rep_pl));
+
+ rc = zcrypt_send_ep11_cprb(urb);
+ if (rc) {
+ DEBUG_ERR(
+ "%s zcrypt_send_ep11_cprb(card=%d dom=%d) failed, rc=%d\n",
+ __func__, (int) card, (int) domain, rc);
+ goto out;
+ }
+
+ rc = check_reply_pl((u8 *)rep_pl, __func__);
+ if (rc)
+ goto out;
+ if (rep_pl->data_tag != 0x04 || rep_pl->data_lenfmt != 0x82) {
+ DEBUG_ERR("%s unknown reply data format\n", __func__);
+ rc = -EIO;
+ goto out;
+ }
+ if (rep_pl->data_len > *keybufsize) {
+ DEBUG_ERR("%s mismatch reply data len / key buffer len\n",
+ __func__);
+ rc = -ENOSPC;
+ goto out;
+ }
+
+ /* copy key blob and set header values */
+ memcpy(keybuf, rep_pl->data, rep_pl->data_len);
+ *keybufsize = rep_pl->data_len;
+ kb = (struct ep11keyblob *) keybuf;
+ kb->head.type = TOKTYPE_NON_CCA;
+ kb->head.len = rep_pl->data_len;
+ kb->head.version = TOKVER_EP11_AES;
+ kb->head.bitlen = keybitsize;
+
+out:
+ kfree(req);
+ kfree(rep);
+ kfree(urb);
+ return rc;
+}
+EXPORT_SYMBOL(ep11_genaeskey);
+
+static int ep11_cryptsingle(u16 card, u16 domain,
+ u16 mode, u32 mech, const u8 *iv,
+ const u8 *key, size_t keysize,
+ const u8 *inbuf, size_t inbufsize,
+ u8 *outbuf, size_t *outbufsize)
+{
+ struct crypt_req_pl {
+ struct pl_head head;
+ u8 var_tag;
+ u8 var_len;
+ u32 var;
+ u8 mech_tag;
+ u8 mech_len;
+ u32 mech;
+ /*
+ * maybe followed by iv data
+ * followed by key tag + key blob
+ * followed by plaintext tag + plaintext
+ */
+ } __packed * req_pl;
+ struct crypt_rep_pl {
+ struct pl_head head;
+ u8 rc_tag;
+ u8 rc_len;
+ u32 rc;
+ u8 data_tag;
+ u8 data_lenfmt;
+ /* data follows */
+ } __packed * rep_pl;
+ struct ep11_cprb *req = NULL, *rep = NULL;
+ struct ep11_target_dev target;
+ struct ep11_urb *urb = NULL;
+ size_t req_pl_size, rep_pl_size;
+ int n, api = 1, rc = -ENOMEM;
+ u8 *p;
+
+ /* the simple asn1 coding used has length limits */
+ if (keysize > 0xFFFF || inbufsize > 0xFFFF)
+ return -EINVAL;
+
+ /* request cprb and payload */
+ req_pl_size = sizeof(struct crypt_req_pl) + (iv ? 16 : 0)
+ + ASN1TAGLEN(keysize) + ASN1TAGLEN(inbufsize);
+ req = alloc_cprb(req_pl_size);
+ if (!req)
+ goto out;
+ req_pl = (struct crypt_req_pl *) (((u8 *) req) + sizeof(*req));
+ prep_head(&req_pl->head, req_pl_size, api, (mode ? 20 : 19));
+ req_pl->var_tag = 0x04;
+ req_pl->var_len = sizeof(u32);
+ /* mech is mech + mech params (iv here) */
+ req_pl->mech_tag = 0x04;
+ req_pl->mech_len = sizeof(u32) + (iv ? 16 : 0);
+ req_pl->mech = (mech ? mech : 0x00001085); /* CKM_AES_CBC_PAD */
+ p = ((u8 *) req_pl) + sizeof(*req_pl);
+ if (iv) {
+ memcpy(p, iv, 16);
+ p += 16;
+ }
+ /* key and input data */
+ p += asn1tag_write(p, 0x04, key, keysize);
+ p += asn1tag_write(p, 0x04, inbuf, inbufsize);
+
+ /* reply cprb and payload, assume out data size <= in data size + 32 */
+ rep_pl_size = sizeof(struct crypt_rep_pl) + ASN1TAGLEN(inbufsize + 32);
+ rep = alloc_cprb(rep_pl_size);
+ if (!rep)
+ goto out;
+ rep_pl = (struct crypt_rep_pl *) (((u8 *) rep) + sizeof(*rep));
+
+ /* urb and target */
+ urb = kmalloc(sizeof(struct ep11_urb), GFP_KERNEL);
+ if (!urb)
+ goto out;
+ target.ap_id = card;
+ target.dom_id = domain;
+ prep_urb(urb, &target, 1,
+ req, sizeof(*req) + req_pl_size,
+ rep, sizeof(*rep) + rep_pl_size);
+
+ rc = zcrypt_send_ep11_cprb(urb);
+ if (rc) {
+ DEBUG_ERR(
+ "%s zcrypt_send_ep11_cprb(card=%d dom=%d) failed, rc=%d\n",
+ __func__, (int) card, (int) domain, rc);
+ goto out;
+ }
+
+ rc = check_reply_pl((u8 *)rep_pl, __func__);
+ if (rc)
+ goto out;
+ if (rep_pl->data_tag != 0x04) {
+ DEBUG_ERR("%s unknown reply data format\n", __func__);
+ rc = -EIO;
+ goto out;
+ }
+ p = ((u8 *) rep_pl) + sizeof(*rep_pl);
+ if (rep_pl->data_lenfmt <= 127)
+ n = rep_pl->data_lenfmt;
+ else if (rep_pl->data_lenfmt == 0x81)
+ n = *p++;
+ else if (rep_pl->data_lenfmt == 0x82) {
+ n = *((u16 *) p);
+ p += 2;
+ } else {
+ DEBUG_ERR("%s unknown reply data length format 0x%02hhx\n",
+ __func__, rep_pl->data_lenfmt);
+ rc = -EIO;
+ goto out;
+ }
+ if (n > *outbufsize) {
+ DEBUG_ERR("%s mismatch reply data len %d / output buffer %zu\n",
+ __func__, n, *outbufsize);
+ rc = -ENOSPC;
+ goto out;
+ }
+
+ memcpy(outbuf, p, n);
+ *outbufsize = n;
+
+out:
+ kfree(req);
+ kfree(rep);
+ kfree(urb);
+ return rc;
+}
+
+static int ep11_unwrapkey(u16 card, u16 domain,
+ const u8 *kek, size_t keksize,
+ const u8 *enckey, size_t enckeysize,
+ u32 mech, const u8 *iv,
+ u32 keybitsize, u32 keygenflags,
+ u8 *keybuf, size_t *keybufsize)
+{
+ struct uw_req_pl {
+ struct pl_head head;
+ u8 attr_tag;
+ u8 attr_len;
+ u32 attr_header;
+ u32 attr_bool_mask;
+ u32 attr_bool_bits;
+ u32 attr_key_type;
+ u32 attr_key_type_value;
+ u32 attr_val_len;
+ u32 attr_val_len_value;
+ u8 mech_tag;
+ u8 mech_len;
+ u32 mech;
+ /*
+ * maybe followed by iv data
+ * followed by kek tag + kek blob
+ * followed by empty mac tag
+ * followed by empty pin tag
+ * followed by encryted key tag + bytes
+ */
+ } __packed * req_pl;
+ struct uw_rep_pl {
+ struct pl_head head;
+ u8 rc_tag;
+ u8 rc_len;
+ u32 rc;
+ u8 data_tag;
+ u8 data_lenfmt;
+ u16 data_len;
+ u8 data[512];
+ } __packed * rep_pl;
+ struct ep11_cprb *req = NULL, *rep = NULL;
+ struct ep11_target_dev target;
+ struct ep11_urb *urb = NULL;
+ struct ep11keyblob *kb;
+ size_t req_pl_size;
+ int api, rc = -ENOMEM;
+ u8 *p;
+
+ /* request cprb and payload */
+ req_pl_size = sizeof(struct uw_req_pl) + (iv ? 16 : 0)
+ + ASN1TAGLEN(keksize) + 4 + ASN1TAGLEN(enckeysize);
+ req = alloc_cprb(req_pl_size);
+ if (!req)
+ goto out;
+ req_pl = (struct uw_req_pl *) (((u8 *) req) + sizeof(*req));
+ api = (!keygenflags || keygenflags & 0x00200000) ? 4 : 1;
+ prep_head(&req_pl->head, req_pl_size, api, 34); /* UnwrapKey */
+ req_pl->attr_tag = 0x04;
+ req_pl->attr_len = 7 * sizeof(u32);
+ req_pl->attr_header = 0x10020000;
+ req_pl->attr_bool_mask = keygenflags ? keygenflags : KEY_ATTR_DEFAULTS;
+ req_pl->attr_bool_bits = keygenflags ? keygenflags : KEY_ATTR_DEFAULTS;
+ req_pl->attr_key_type = 0x00000100; /* CKA_KEY_TYPE */
+ req_pl->attr_key_type_value = 0x0000001f; /* CKK_AES */
+ req_pl->attr_val_len = 0x00000161; /* CKA_VALUE_LEN */
+ req_pl->attr_val_len_value = keybitsize / 8;
+ /* mech is mech + mech params (iv here) */
+ req_pl->mech_tag = 0x04;
+ req_pl->mech_len = sizeof(u32) + (iv ? 16 : 0);
+ req_pl->mech = (mech ? mech : 0x00001085); /* CKM_AES_CBC_PAD */
+ p = ((u8 *) req_pl) + sizeof(*req_pl);
+ if (iv) {
+ memcpy(p, iv, 16);
+ p += 16;
+ }
+ /* kek */
+ p += asn1tag_write(p, 0x04, kek, keksize);
+ /* empty mac key tag */
+ *p++ = 0x04;
+ *p++ = 0;
+ /* empty pin tag */
+ *p++ = 0x04;
+ *p++ = 0;
+ /* encrypted key value tag and bytes */
+ p += asn1tag_write(p, 0x04, enckey, enckeysize);
+
+ /* reply cprb and payload */
+ rep = alloc_cprb(sizeof(struct uw_rep_pl));
+ if (!rep)
+ goto out;
+ rep_pl = (struct uw_rep_pl *) (((u8 *) rep) + sizeof(*rep));
+
+ /* urb and target */
+ urb = kmalloc(sizeof(struct ep11_urb), GFP_KERNEL);
+ if (!urb)
+ goto out;
+ target.ap_id = card;
+ target.dom_id = domain;
+ prep_urb(urb, &target, 1,
+ req, sizeof(*req) + req_pl_size,
+ rep, sizeof(*rep) + sizeof(*rep_pl));
+
+ rc = zcrypt_send_ep11_cprb(urb);
+ if (rc) {
+ DEBUG_ERR(
+ "%s zcrypt_send_ep11_cprb(card=%d dom=%d) failed, rc=%d\n",
+ __func__, (int) card, (int) domain, rc);
+ goto out;
+ }
+
+ rc = check_reply_pl((u8 *)rep_pl, __func__);
+ if (rc)
+ goto out;
+ if (rep_pl->data_tag != 0x04 || rep_pl->data_lenfmt != 0x82) {
+ DEBUG_ERR("%s unknown reply data format\n", __func__);
+ rc = -EIO;
+ goto out;
+ }
+ if (rep_pl->data_len > *keybufsize) {
+ DEBUG_ERR("%s mismatch reply data len / key buffer len\n",
+ __func__);
+ rc = -ENOSPC;
+ goto out;
+ }
+
+ /* copy key blob and set header values */
+ memcpy(keybuf, rep_pl->data, rep_pl->data_len);
+ *keybufsize = rep_pl->data_len;
+ kb = (struct ep11keyblob *) keybuf;
+ kb->head.type = TOKTYPE_NON_CCA;
+ kb->head.len = rep_pl->data_len;
+ kb->head.version = TOKVER_EP11_AES;
+ kb->head.bitlen = keybitsize;
+
+out:
+ kfree(req);
+ kfree(rep);
+ kfree(urb);
+ return rc;
+}
+
+static int ep11_wrapkey(u16 card, u16 domain,
+ const u8 *key, size_t keysize,
+ u32 mech, const u8 *iv,
+ u8 *databuf, size_t *datasize)
+{
+ struct wk_req_pl {
+ struct pl_head head;
+ u8 var_tag;
+ u8 var_len;
+ u32 var;
+ u8 mech_tag;
+ u8 mech_len;
+ u32 mech;
+ /*
+ * followed by iv data
+ * followed by key tag + key blob
+ * followed by dummy kek param
+ * followed by dummy mac param
+ */
+ } __packed * req_pl;
+ struct wk_rep_pl {
+ struct pl_head head;
+ u8 rc_tag;
+ u8 rc_len;
+ u32 rc;
+ u8 data_tag;
+ u8 data_lenfmt;
+ u16 data_len;
+ u8 data[1024];
+ } __packed * rep_pl;
+ struct ep11_cprb *req = NULL, *rep = NULL;
+ struct ep11_target_dev target;
+ struct ep11_urb *urb = NULL;
+ struct ep11keyblob *kb;
+ size_t req_pl_size;
+ int api, rc = -ENOMEM;
+ bool has_header = false;
+ u8 *p;
+
+ /* maybe the session field holds a header with key info */
+ kb = (struct ep11keyblob *) key;
+ if (kb->head.type == TOKTYPE_NON_CCA &&
+ kb->head.version == TOKVER_EP11_AES) {
+ has_header = true;
+ keysize = kb->head.len < keysize ? kb->head.len : keysize;
+ }
+
+ /* request cprb and payload */
+ req_pl_size = sizeof(struct wk_req_pl) + (iv ? 16 : 0)
+ + ASN1TAGLEN(keysize) + 4;
+ req = alloc_cprb(req_pl_size);
+ if (!req)
+ goto out;
+ if (!mech || mech == 0x80060001)
+ req->flags |= 0x20; /* CPACF_WRAP needs special bit */
+ req_pl = (struct wk_req_pl *) (((u8 *) req) + sizeof(*req));
+ api = (!mech || mech == 0x80060001) ? 4 : 1; /* CKM_IBM_CPACF_WRAP */
+ prep_head(&req_pl->head, req_pl_size, api, 33); /* WrapKey */
+ req_pl->var_tag = 0x04;
+ req_pl->var_len = sizeof(u32);
+ /* mech is mech + mech params (iv here) */
+ req_pl->mech_tag = 0x04;
+ req_pl->mech_len = sizeof(u32) + (iv ? 16 : 0);
+ req_pl->mech = (mech ? mech : 0x80060001); /* CKM_IBM_CPACF_WRAP */
+ p = ((u8 *) req_pl) + sizeof(*req_pl);
+ if (iv) {
+ memcpy(p, iv, 16);
+ p += 16;
+ }
+ /* key blob */
+ p += asn1tag_write(p, 0x04, key, keysize);
+ /* maybe the key argument needs the head data cleaned out */
+ if (has_header) {
+ kb = (struct ep11keyblob *)(p - keysize);
+ memset(&kb->head, 0, sizeof(kb->head));
+ }
+ /* empty kek tag */
+ *p++ = 0x04;
+ *p++ = 0;
+ /* empty mac tag */
+ *p++ = 0x04;
+ *p++ = 0;
+
+ /* reply cprb and payload */
+ rep = alloc_cprb(sizeof(struct wk_rep_pl));
+ if (!rep)
+ goto out;
+ rep_pl = (struct wk_rep_pl *) (((u8 *) rep) + sizeof(*rep));
+
+ /* urb and target */
+ urb = kmalloc(sizeof(struct ep11_urb), GFP_KERNEL);
+ if (!urb)
+ goto out;
+ target.ap_id = card;
+ target.dom_id = domain;
+ prep_urb(urb, &target, 1,
+ req, sizeof(*req) + req_pl_size,
+ rep, sizeof(*rep) + sizeof(*rep_pl));
+
+ rc = zcrypt_send_ep11_cprb(urb);
+ if (rc) {
+ DEBUG_ERR(
+ "%s zcrypt_send_ep11_cprb(card=%d dom=%d) failed, rc=%d\n",
+ __func__, (int) card, (int) domain, rc);
+ goto out;
+ }
+
+ rc = check_reply_pl((u8 *)rep_pl, __func__);
+ if (rc)
+ goto out;
+ if (rep_pl->data_tag != 0x04 || rep_pl->data_lenfmt != 0x82) {
+ DEBUG_ERR("%s unknown reply data format\n", __func__);
+ rc = -EIO;
+ goto out;
+ }
+ if (rep_pl->data_len > *datasize) {
+ DEBUG_ERR("%s mismatch reply data len / data buffer len\n",
+ __func__);
+ rc = -ENOSPC;
+ goto out;
+ }
+
+ /* copy the data from the cprb to the data buffer */
+ memcpy(databuf, rep_pl->data, rep_pl->data_len);
+ *datasize = rep_pl->data_len;
+
+out:
+ kfree(req);
+ kfree(rep);
+ kfree(urb);
+ return rc;
+}
+
+int ep11_clr2keyblob(u16 card, u16 domain, u32 keybitsize, u32 keygenflags,
+ const u8 *clrkey, u8 *keybuf, size_t *keybufsize)
+{
+ int rc;
+ struct ep11keyblob *kb;
+ u8 encbuf[64], *kek = NULL;
+ size_t clrkeylen, keklen, encbuflen = sizeof(encbuf);
+
+ if (keybitsize == 128 || keybitsize == 192 || keybitsize == 256)
+ clrkeylen = keybitsize / 8;
+ else {
+ DEBUG_ERR(
+ "%s unknown/unsupported keybitsize %d\n",
+ __func__, keybitsize);
+ return -EINVAL;
+ }
+
+ /* allocate memory for the temp kek */
+ keklen = MAXEP11AESKEYBLOBSIZE;
+ kek = kmalloc(keklen, GFP_ATOMIC);
+ if (!kek) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ /* Step 1: generate AES 256 bit random kek key */
+ rc = ep11_genaeskey(card, domain, 256,
+ 0x00006c00, /* EN/DECRYPT, WRAP/UNWRAP */
+ kek, &keklen);
+ if (rc) {
+ DEBUG_ERR(
+ "%s generate kek key failed, rc=%d\n",
+ __func__, rc);
+ goto out;
+ }
+ kb = (struct ep11keyblob *) kek;
+ memset(&kb->head, 0, sizeof(kb->head));
+
+ /* Step 2: encrypt clear key value with the kek key */
+ rc = ep11_cryptsingle(card, domain, 0, 0, def_iv, kek, keklen,
+ clrkey, clrkeylen, encbuf, &encbuflen);
+ if (rc) {
+ DEBUG_ERR(
+ "%s encrypting key value with kek key failed, rc=%d\n",
+ __func__, rc);
+ goto out;
+ }
+
+ /* Step 3: import the encrypted key value as a new key */
+ rc = ep11_unwrapkey(card, domain, kek, keklen,
+ encbuf, encbuflen, 0, def_iv,
+ keybitsize, 0, keybuf, keybufsize);
+ if (rc) {
+ DEBUG_ERR(
+ "%s importing key value as new key failed,, rc=%d\n",
+ __func__, rc);
+ goto out;
+ }
+
+out:
+ kfree(kek);
+ return rc;
+}
+EXPORT_SYMBOL(ep11_clr2keyblob);
+
+int ep11_kblob2protkey(u16 card, u16 dom, const u8 *keyblob, size_t keybloblen,
+ u8 *protkey, u32 *protkeylen, u32 *protkeytype)
+{
+ int rc = -EIO;
+ u8 *wkbuf = NULL;
+ size_t wkbuflen, keylen;
+ struct wk_info {
+ u16 version;
+ u8 res1[16];
+ u32 pkeytype;
+ u32 pkeybitsize;
+ u64 pkeysize;
+ u8 res2[8];
+ u8 pkey[0];
+ } __packed * wki;
+ const u8 *key;
+ struct ep11kblob_header *hdr;
+
+ /* key with or without header ? */
+ hdr = (struct ep11kblob_header *) keyblob;
+ if (hdr->type == TOKTYPE_NON_CCA
+ && (hdr->version == TOKVER_EP11_AES_WITH_HEADER
+ || hdr->version == TOKVER_EP11_ECC_WITH_HEADER)
+ && is_ep11_keyblob(keyblob + sizeof(struct ep11kblob_header))) {
+ /* EP11 AES or ECC key with header */
+ key = keyblob + sizeof(struct ep11kblob_header);
+ keylen = hdr->len - sizeof(struct ep11kblob_header);
+ } else if (hdr->type == TOKTYPE_NON_CCA
+ && hdr->version == TOKVER_EP11_AES
+ && is_ep11_keyblob(keyblob)) {
+ /* EP11 AES key (old style) */
+ key = keyblob;
+ keylen = hdr->len;
+ } else if (is_ep11_keyblob(keyblob)) {
+ /* raw EP11 key blob */
+ key = keyblob;
+ keylen = keybloblen;
+ } else
+ return -EINVAL;
+
+ /* alloc temp working buffer */
+ wkbuflen = (keylen + AES_BLOCK_SIZE) & (~(AES_BLOCK_SIZE - 1));
+ wkbuf = kmalloc(wkbuflen, GFP_ATOMIC);
+ if (!wkbuf)
+ return -ENOMEM;
+
+ /* ep11 secure key -> protected key + info */
+ rc = ep11_wrapkey(card, dom, key, keylen,
+ 0, def_iv, wkbuf, &wkbuflen);
+ if (rc) {
+ DEBUG_ERR(
+ "%s rewrapping ep11 key to pkey failed, rc=%d\n",
+ __func__, rc);
+ goto out;
+ }
+ wki = (struct wk_info *) wkbuf;
+
+ /* check struct version and pkey type */
+ if (wki->version != 1 || wki->pkeytype < 1 || wki->pkeytype > 5) {
+ DEBUG_ERR("%s wk info version %d or pkeytype %d mismatch.\n",
+ __func__, (int) wki->version, (int) wki->pkeytype);
+ rc = -EIO;
+ goto out;
+ }
+
+ /* check protected key type field */
+ switch (wki->pkeytype) {
+ case 1: /* AES */
+ switch (wki->pkeysize) {
+ case 16+32:
+ /* AES 128 protected key */
+ if (protkeytype)
+ *protkeytype = PKEY_KEYTYPE_AES_128;
+ break;
+ case 24+32:
+ /* AES 192 protected key */
+ if (protkeytype)
+ *protkeytype = PKEY_KEYTYPE_AES_192;
+ break;
+ case 32+32:
+ /* AES 256 protected key */
+ if (protkeytype)
+ *protkeytype = PKEY_KEYTYPE_AES_256;
+ break;
+ default:
+ DEBUG_ERR("%s unknown/unsupported AES pkeysize %d\n",
+ __func__, (int) wki->pkeysize);
+ rc = -EIO;
+ goto out;
+ }
+ break;
+ case 3: /* EC-P */
+ case 4: /* EC-ED */
+ case 5: /* EC-BP */
+ if (protkeytype)
+ *protkeytype = PKEY_KEYTYPE_ECC;
+ break;
+ case 2: /* TDES */
+ default:
+ DEBUG_ERR("%s unknown/unsupported key type %d\n",
+ __func__, (int) wki->pkeytype);
+ rc = -EIO;
+ goto out;
+ }
+
+ /* copy the tanslated protected key */
+ if (wki->pkeysize > *protkeylen) {
+ DEBUG_ERR("%s wk info pkeysize %llu > protkeysize %u\n",
+ __func__, wki->pkeysize, *protkeylen);
+ rc = -EINVAL;
+ goto out;
+ }
+ memcpy(protkey, wki->pkey, wki->pkeysize);
+ *protkeylen = wki->pkeysize;
+
+out:
+ kfree(wkbuf);
+ return rc;
+}
+EXPORT_SYMBOL(ep11_kblob2protkey);
+
+int ep11_findcard2(u32 **apqns, u32 *nr_apqns, u16 cardnr, u16 domain,
+ int minhwtype, int minapi, const u8 *wkvp)
+{
+ struct zcrypt_device_status_ext *device_status;
+ u32 *_apqns = NULL, _nr_apqns = 0;
+ int i, card, dom, rc = -ENOMEM;
+ struct ep11_domain_info edi;
+ struct ep11_card_info eci;
+
+ /* fetch status of all crypto cards */
+ device_status = kvmalloc_array(MAX_ZDEV_ENTRIES_EXT,
+ sizeof(struct zcrypt_device_status_ext),
+ GFP_KERNEL);
+ if (!device_status)
+ return -ENOMEM;
+ zcrypt_device_status_mask_ext(device_status);
+
+ /* allocate 1k space for up to 256 apqns */
+ _apqns = kmalloc_array(256, sizeof(u32), GFP_KERNEL);
+ if (!_apqns) {
+ kvfree(device_status);
+ return -ENOMEM;
+ }
+
+ /* walk through all the crypto apqnss */
+ for (i = 0; i < MAX_ZDEV_ENTRIES_EXT; i++) {
+ card = AP_QID_CARD(device_status[i].qid);
+ dom = AP_QID_QUEUE(device_status[i].qid);
+ /* check online state */
+ if (!device_status[i].online)
+ continue;
+ /* check for ep11 functions */
+ if (!(device_status[i].functions & 0x01))
+ continue;
+ /* check cardnr */
+ if (cardnr != 0xFFFF && card != cardnr)
+ continue;
+ /* check domain */
+ if (domain != 0xFFFF && dom != domain)
+ continue;
+ /* check min hardware type */
+ if (minhwtype && device_status[i].hwtype < minhwtype)
+ continue;
+ /* check min api version if given */
+ if (minapi > 0) {
+ if (ep11_get_card_info(card, &eci, 0))
+ continue;
+ if (minapi > eci.API_ord_nr)
+ continue;
+ }
+ /* check wkvp if given */
+ if (wkvp) {
+ if (ep11_get_domain_info(card, dom, &edi))
+ continue;
+ if (edi.cur_wk_state != '1')
+ continue;
+ if (memcmp(wkvp, edi.cur_wkvp, 16))
+ continue;
+ }
+ /* apqn passed all filtering criterons, add to the array */
+ if (_nr_apqns < 256)
+ _apqns[_nr_apqns++] = (((u16)card) << 16) | ((u16) dom);
+ }
+
+ /* nothing found ? */
+ if (!_nr_apqns) {
+ kfree(_apqns);
+ rc = -ENODEV;
+ } else {
+ /* no re-allocation, simple return the _apqns array */
+ *apqns = _apqns;
+ *nr_apqns = _nr_apqns;
+ rc = 0;
+ }
+
+ kvfree(device_status);
+ return rc;
+}
+EXPORT_SYMBOL(ep11_findcard2);
+
+void __exit zcrypt_ep11misc_exit(void)
+{
+ card_cache_free();
+}
diff --git a/drivers/s390/crypto/zcrypt_ep11misc.h b/drivers/s390/crypto/zcrypt_ep11misc.h
new file mode 100644
index 000000000..d424fa901
--- /dev/null
+++ b/drivers/s390/crypto/zcrypt_ep11misc.h
@@ -0,0 +1,148 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright IBM Corp. 2019
+ * Author(s): Harald Freudenberger <freude@linux.ibm.com>
+ *
+ * Collection of EP11 misc functions used by zcrypt and pkey
+ */
+
+#ifndef _ZCRYPT_EP11MISC_H_
+#define _ZCRYPT_EP11MISC_H_
+
+#include <asm/zcrypt.h>
+#include <asm/pkey.h>
+
+#define EP11_API_V 4 /* highest known and supported EP11 API version */
+#define EP11_STRUCT_MAGIC 0x1234
+#define EP11_BLOB_PKEY_EXTRACTABLE 0x00200000
+
+/*
+ * Internal used values for the version field of the key header.
+ * Should match to the enum pkey_key_type in pkey.h.
+ */
+#define TOKVER_EP11_AES 0x03 /* EP11 AES key blob (old style) */
+#define TOKVER_EP11_AES_WITH_HEADER 0x06 /* EP11 AES key blob with header */
+#define TOKVER_EP11_ECC_WITH_HEADER 0x07 /* EP11 ECC key blob with header */
+
+/* inside view of an EP11 secure key blob */
+struct ep11keyblob {
+ union {
+ u8 session[32];
+ /* only used for PKEY_TYPE_EP11: */
+ struct ep11kblob_header head;
+ };
+ u8 wkvp[16]; /* wrapping key verification pattern */
+ u64 attr; /* boolean key attributes */
+ u64 mode; /* mode bits */
+ u16 version; /* 0x1234, EP11_STRUCT_MAGIC */
+ u8 iv[14];
+ u8 encrypted_key_data[144];
+ u8 mac[32];
+} __packed;
+
+/* check ep11 key magic to find out if this is an ep11 key blob */
+static inline bool is_ep11_keyblob(const u8 *key)
+{
+ struct ep11keyblob *kb = (struct ep11keyblob *) key;
+
+ return (kb->version == EP11_STRUCT_MAGIC);
+}
+
+/*
+ * Simple check if the key blob is a valid EP11 AES key blob with header.
+ * If checkcpacfexport is enabled, the key is also checked for the
+ * attributes needed to export this key for CPACF use.
+ * Returns 0 on success or errno value on failure.
+ */
+int ep11_check_aes_key_with_hdr(debug_info_t *dbg, int dbflvl,
+ const u8 *key, size_t keylen, int checkcpacfexp);
+
+/*
+ * Simple check if the key blob is a valid EP11 ECC key blob with header.
+ * If checkcpacfexport is enabled, the key is also checked for the
+ * attributes needed to export this key for CPACF use.
+ * Returns 0 on success or errno value on failure.
+ */
+int ep11_check_ecc_key_with_hdr(debug_info_t *dbg, int dbflvl,
+ const u8 *key, size_t keylen, int checkcpacfexp);
+
+/*
+ * Simple check if the key blob is a valid EP11 AES key blob with
+ * the header in the session field (old style EP11 AES key).
+ * If checkcpacfexport is enabled, the key is also checked for the
+ * attributes needed to export this key for CPACF use.
+ * Returns 0 on success or errno value on failure.
+ */
+int ep11_check_aes_key(debug_info_t *dbg, int dbflvl,
+ const u8 *key, size_t keylen, int checkcpacfexp);
+
+/* EP11 card info struct */
+struct ep11_card_info {
+ u32 API_ord_nr; /* API ordinal number */
+ u16 FW_version; /* Firmware major and minor version */
+ char serial[16]; /* serial number string (16 ascii, no 0x00 !) */
+ u64 op_mode; /* card operational mode(s) */
+};
+
+/* EP11 domain info struct */
+struct ep11_domain_info {
+ char cur_wk_state; /* '0' invalid, '1' valid */
+ char new_wk_state; /* '0' empty, '1' uncommitted, '2' committed */
+ u8 cur_wkvp[32]; /* current wrapping key verification pattern */
+ u8 new_wkvp[32]; /* new wrapping key verification pattern */
+ u64 op_mode; /* domain operational mode(s) */
+};
+
+/*
+ * Provide information about an EP11 card.
+ */
+int ep11_get_card_info(u16 card, struct ep11_card_info *info, int verify);
+
+/*
+ * Provide information about a domain within an EP11 card.
+ */
+int ep11_get_domain_info(u16 card, u16 domain, struct ep11_domain_info *info);
+
+/*
+ * Generate (random) EP11 AES secure key.
+ */
+int ep11_genaeskey(u16 card, u16 domain, u32 keybitsize, u32 keygenflags,
+ u8 *keybuf, size_t *keybufsize);
+
+/*
+ * Generate EP11 AES secure key with given clear key value.
+ */
+int ep11_clr2keyblob(u16 cardnr, u16 domain, u32 keybitsize, u32 keygenflags,
+ const u8 *clrkey, u8 *keybuf, size_t *keybufsize);
+
+/*
+ * Build a list of ep11 apqns meeting the following constrains:
+ * - apqn is online and is in fact an EP11 apqn
+ * - if cardnr is not FFFF only apqns with this cardnr
+ * - if domain is not FFFF only apqns with this domainnr
+ * - if minhwtype > 0 only apqns with hwtype >= minhwtype
+ * - if minapi > 0 only apqns with API_ord_nr >= minapi
+ * - if wkvp != NULL only apqns where the wkvp (EP11_WKVPLEN bytes) matches
+ * to the first EP11_WKVPLEN bytes of the wkvp of the current wrapping
+ * key for this domain. When a wkvp is given there will aways be a re-fetch
+ * of the domain info for the potential apqn - so this triggers an request
+ * reply to each apqn eligible.
+ * The array of apqn entries is allocated with kmalloc and returned in *apqns;
+ * the number of apqns stored into the list is returned in *nr_apqns. One apqn
+ * entry is simple a 32 bit value with 16 bit cardnr and 16 bit domain nr and
+ * may be casted to struct pkey_apqn. The return value is either 0 for success
+ * or a negative errno value. If no apqn meeting the criterias is found,
+ * -ENODEV is returned.
+ */
+int ep11_findcard2(u32 **apqns, u32 *nr_apqns, u16 cardnr, u16 domain,
+ int minhwtype, int minapi, const u8 *wkvp);
+
+/*
+ * Derive proteced key from EP11 key blob (AES and ECC keys).
+ */
+int ep11_kblob2protkey(u16 card, u16 dom, const u8 *key, size_t keylen,
+ u8 *protkey, u32 *protkeylen, u32 *protkeytype);
+
+void zcrypt_ep11misc_exit(void);
+
+#endif /* _ZCRYPT_EP11MISC_H_ */
diff --git a/drivers/s390/crypto/zcrypt_error.h b/drivers/s390/crypto/zcrypt_error.h
new file mode 100644
index 000000000..39e626e3a
--- /dev/null
+++ b/drivers/s390/crypto/zcrypt_error.h
@@ -0,0 +1,139 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright IBM Corp. 2001, 2006
+ * Author(s): Robert Burroughs
+ * Eric Rossman (edrossma@us.ibm.com)
+ *
+ * Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com)
+ * Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com>
+ */
+
+#ifndef _ZCRYPT_ERROR_H_
+#define _ZCRYPT_ERROR_H_
+
+#include <linux/atomic.h>
+#include "zcrypt_debug.h"
+#include "zcrypt_api.h"
+#include "zcrypt_msgtype6.h"
+
+/**
+ * Reply Messages
+ *
+ * Error reply messages are of two types:
+ * 82: Error (see below)
+ * 88: Error (see below)
+ * Both type 82 and type 88 have the same structure in the header.
+ *
+ * Request reply messages are of three known types:
+ * 80: Reply from a Type 50 Request (see CEX2A-RELATED STRUCTS)
+ * 84: Reply from a Type 4 Request (see PCICA-RELATED STRUCTS)
+ * 86: Reply from a Type 6 Request (see PCICC/PCIXCC/CEX2C-RELATED STRUCTS)
+ *
+ */
+struct error_hdr {
+ unsigned char reserved1; /* 0x00 */
+ unsigned char type; /* 0x82 or 0x88 */
+ unsigned char reserved2[2]; /* 0x0000 */
+ unsigned char reply_code; /* reply code */
+ unsigned char reserved3[3]; /* 0x000000 */
+};
+
+#define TYPE82_RSP_CODE 0x82
+#define TYPE88_RSP_CODE 0x88
+
+#define REP82_ERROR_MACHINE_FAILURE 0x10
+#define REP82_ERROR_PREEMPT_FAILURE 0x12
+#define REP82_ERROR_CHECKPT_FAILURE 0x14
+#define REP82_ERROR_MESSAGE_TYPE 0x20
+#define REP82_ERROR_INVALID_COMM_CD 0x21 /* Type 84 */
+#define REP82_ERROR_INVALID_MSG_LEN 0x23
+#define REP82_ERROR_RESERVD_FIELD 0x24 /* was 0x50 */
+#define REP82_ERROR_FORMAT_FIELD 0x29
+#define REP82_ERROR_INVALID_COMMAND 0x30
+#define REP82_ERROR_MALFORMED_MSG 0x40
+#define REP82_ERROR_INVALID_SPECIAL_CMD 0x41
+#define REP82_ERROR_RESERVED_FIELDO 0x50 /* old value */
+#define REP82_ERROR_WORD_ALIGNMENT 0x60
+#define REP82_ERROR_MESSAGE_LENGTH 0x80
+#define REP82_ERROR_OPERAND_INVALID 0x82
+#define REP82_ERROR_OPERAND_SIZE 0x84
+#define REP82_ERROR_EVEN_MOD_IN_OPND 0x85
+#define REP82_ERROR_RESERVED_FIELD 0x88
+#define REP82_ERROR_INVALID_DOMAIN_PENDING 0x8A
+#define REP82_ERROR_FILTERED_BY_HYPERVISOR 0x8B
+#define REP82_ERROR_TRANSPORT_FAIL 0x90
+#define REP82_ERROR_PACKET_TRUNCATED 0xA0
+#define REP82_ERROR_ZERO_BUFFER_LEN 0xB0
+
+#define REP88_ERROR_MODULE_FAILURE 0x10
+#define REP88_ERROR_MESSAGE_TYPE 0x20
+#define REP88_ERROR_MESSAGE_MALFORMD 0x22
+#define REP88_ERROR_MESSAGE_LENGTH 0x23
+#define REP88_ERROR_RESERVED_FIELD 0x24
+#define REP88_ERROR_KEY_TYPE 0x34
+#define REP88_ERROR_INVALID_KEY 0x82 /* CEX2A */
+#define REP88_ERROR_OPERAND 0x84 /* CEX2A */
+#define REP88_ERROR_OPERAND_EVEN_MOD 0x85 /* CEX2A */
+
+static inline int convert_error(struct zcrypt_queue *zq,
+ struct ap_message *reply)
+{
+ struct error_hdr *ehdr = reply->msg;
+ int card = AP_QID_CARD(zq->queue->qid);
+ int queue = AP_QID_QUEUE(zq->queue->qid);
+
+ switch (ehdr->reply_code) {
+ case REP82_ERROR_INVALID_MSG_LEN: /* 0x23 */
+ case REP82_ERROR_RESERVD_FIELD: /* 0x24 */
+ case REP82_ERROR_FORMAT_FIELD: /* 0x29 */
+ case REP82_ERROR_MALFORMED_MSG: /* 0x40 */
+ case REP82_ERROR_INVALID_SPECIAL_CMD: /* 0x41 */
+ case REP82_ERROR_MESSAGE_LENGTH: /* 0x80 */
+ case REP82_ERROR_OPERAND_INVALID: /* 0x82 */
+ case REP82_ERROR_OPERAND_SIZE: /* 0x84 */
+ case REP82_ERROR_EVEN_MOD_IN_OPND: /* 0x85 */
+ case REP82_ERROR_INVALID_DOMAIN_PENDING: /* 0x8A */
+ case REP82_ERROR_FILTERED_BY_HYPERVISOR: /* 0x8B */
+ case REP82_ERROR_PACKET_TRUNCATED: /* 0xA0 */
+ case REP88_ERROR_MESSAGE_MALFORMD: /* 0x22 */
+ case REP88_ERROR_KEY_TYPE: /* 0x34 */
+ /* RY indicates malformed request */
+ ZCRYPT_DBF(DBF_WARN,
+ "dev=%02x.%04x RY=0x%02x => rc=EINVAL\n",
+ card, queue, ehdr->reply_code);
+ return -EINVAL;
+ case REP82_ERROR_MACHINE_FAILURE: /* 0x10 */
+ case REP82_ERROR_MESSAGE_TYPE: /* 0x20 */
+ case REP82_ERROR_TRANSPORT_FAIL: /* 0x90 */
+ /*
+ * Msg to wrong type or card/infrastructure failure.
+ * Trigger rescan of the ap bus, trigger retry request.
+ */
+ atomic_set(&zcrypt_rescan_req, 1);
+ /* For type 86 response show the apfs value (failure reason) */
+ if (ehdr->reply_code == REP82_ERROR_TRANSPORT_FAIL &&
+ ehdr->type == TYPE86_RSP_CODE) {
+ struct {
+ struct type86_hdr hdr;
+ struct type86_fmt2_ext fmt2;
+ } __packed * head = reply->msg;
+ unsigned int apfs = *((u32 *)head->fmt2.apfs);
+
+ ZCRYPT_DBF(DBF_WARN,
+ "dev=%02x.%04x RY=0x%02x apfs=0x%x => bus rescan, rc=EAGAIN\n",
+ card, queue, ehdr->reply_code, apfs);
+ } else
+ ZCRYPT_DBF(DBF_WARN,
+ "dev=%02x.%04x RY=0x%02x => bus rescan, rc=EAGAIN\n",
+ card, queue, ehdr->reply_code);
+ return -EAGAIN;
+ default:
+ /* Assume request is valid and a retry will be worth it */
+ ZCRYPT_DBF(DBF_WARN,
+ "dev=%02x.%04x RY=0x%02x => rc=EAGAIN\n",
+ card, queue, ehdr->reply_code);
+ return -EAGAIN;
+ }
+}
+
+#endif /* _ZCRYPT_ERROR_H_ */
diff --git a/drivers/s390/crypto/zcrypt_msgtype50.c b/drivers/s390/crypto/zcrypt_msgtype50.c
new file mode 100644
index 000000000..bf14ee445
--- /dev/null
+++ b/drivers/s390/crypto/zcrypt_msgtype50.c
@@ -0,0 +1,567 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright IBM Corp. 2001, 2012
+ * Author(s): Robert Burroughs
+ * Eric Rossman (edrossma@us.ibm.com)
+ *
+ * Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com)
+ * Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com>
+ * Ralph Wuerthner <rwuerthn@de.ibm.com>
+ * MSGTYPE restruct: Holger Dengler <hd@linux.vnet.ibm.com>
+ */
+
+#define KMSG_COMPONENT "zcrypt"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/atomic.h>
+#include <linux/uaccess.h>
+
+#include "ap_bus.h"
+#include "zcrypt_api.h"
+#include "zcrypt_error.h"
+#include "zcrypt_msgtype50.h"
+
+/* >= CEX3A: 4096 bits */
+#define CEX3A_MAX_MOD_SIZE 512
+
+/* CEX2A: max outputdatalength + type80_hdr */
+#define CEX2A_MAX_RESPONSE_SIZE 0x110
+
+/* >= CEX3A: 512 bit modulus, (max outputdatalength) + type80_hdr */
+#define CEX3A_MAX_RESPONSE_SIZE 0x210
+
+MODULE_AUTHOR("IBM Corporation");
+MODULE_DESCRIPTION("Cryptographic Accelerator (message type 50), " \
+ "Copyright IBM Corp. 2001, 2012");
+MODULE_LICENSE("GPL");
+
+/**
+ * The type 50 message family is associated with a CEXxA cards.
+ *
+ * The four members of the family are described below.
+ *
+ * Note that all unsigned char arrays are right-justified and left-padded
+ * with zeroes.
+ *
+ * Note that all reserved fields must be zeroes.
+ */
+struct type50_hdr {
+ unsigned char reserved1;
+ unsigned char msg_type_code; /* 0x50 */
+ unsigned short msg_len;
+ unsigned char reserved2;
+ unsigned char ignored;
+ unsigned short reserved3;
+} __packed;
+
+#define TYPE50_TYPE_CODE 0x50
+
+#define TYPE50_MEB1_FMT 0x0001
+#define TYPE50_MEB2_FMT 0x0002
+#define TYPE50_MEB3_FMT 0x0003
+#define TYPE50_CRB1_FMT 0x0011
+#define TYPE50_CRB2_FMT 0x0012
+#define TYPE50_CRB3_FMT 0x0013
+
+/* Mod-Exp, with a small modulus */
+struct type50_meb1_msg {
+ struct type50_hdr header;
+ unsigned short keyblock_type; /* 0x0001 */
+ unsigned char reserved[6];
+ unsigned char exponent[128];
+ unsigned char modulus[128];
+ unsigned char message[128];
+} __packed;
+
+/* Mod-Exp, with a large modulus */
+struct type50_meb2_msg {
+ struct type50_hdr header;
+ unsigned short keyblock_type; /* 0x0002 */
+ unsigned char reserved[6];
+ unsigned char exponent[256];
+ unsigned char modulus[256];
+ unsigned char message[256];
+} __packed;
+
+/* Mod-Exp, with a larger modulus */
+struct type50_meb3_msg {
+ struct type50_hdr header;
+ unsigned short keyblock_type; /* 0x0003 */
+ unsigned char reserved[6];
+ unsigned char exponent[512];
+ unsigned char modulus[512];
+ unsigned char message[512];
+} __packed;
+
+/* CRT, with a small modulus */
+struct type50_crb1_msg {
+ struct type50_hdr header;
+ unsigned short keyblock_type; /* 0x0011 */
+ unsigned char reserved[6];
+ unsigned char p[64];
+ unsigned char q[64];
+ unsigned char dp[64];
+ unsigned char dq[64];
+ unsigned char u[64];
+ unsigned char message[128];
+} __packed;
+
+/* CRT, with a large modulus */
+struct type50_crb2_msg {
+ struct type50_hdr header;
+ unsigned short keyblock_type; /* 0x0012 */
+ unsigned char reserved[6];
+ unsigned char p[128];
+ unsigned char q[128];
+ unsigned char dp[128];
+ unsigned char dq[128];
+ unsigned char u[128];
+ unsigned char message[256];
+} __packed;
+
+/* CRT, with a larger modulus */
+struct type50_crb3_msg {
+ struct type50_hdr header;
+ unsigned short keyblock_type; /* 0x0013 */
+ unsigned char reserved[6];
+ unsigned char p[256];
+ unsigned char q[256];
+ unsigned char dp[256];
+ unsigned char dq[256];
+ unsigned char u[256];
+ unsigned char message[512];
+} __packed;
+
+/**
+ * The type 80 response family is associated with a CEXxA cards.
+ *
+ * Note that all unsigned char arrays are right-justified and left-padded
+ * with zeroes.
+ *
+ * Note that all reserved fields must be zeroes.
+ */
+
+#define TYPE80_RSP_CODE 0x80
+
+struct type80_hdr {
+ unsigned char reserved1;
+ unsigned char type; /* 0x80 */
+ unsigned short len;
+ unsigned char code; /* 0x00 */
+ unsigned char reserved2[3];
+ unsigned char reserved3[8];
+} __packed;
+
+unsigned int get_rsa_modex_fc(struct ica_rsa_modexpo *mex, int *fcode)
+{
+
+ if (!mex->inputdatalength)
+ return -EINVAL;
+
+ if (mex->inputdatalength <= 128) /* 1024 bit */
+ *fcode = MEX_1K;
+ else if (mex->inputdatalength <= 256) /* 2048 bit */
+ *fcode = MEX_2K;
+ else /* 4096 bit */
+ *fcode = MEX_4K;
+
+ return 0;
+}
+
+unsigned int get_rsa_crt_fc(struct ica_rsa_modexpo_crt *crt, int *fcode)
+{
+
+ if (!crt->inputdatalength)
+ return -EINVAL;
+
+ if (crt->inputdatalength <= 128) /* 1024 bit */
+ *fcode = CRT_1K;
+ else if (crt->inputdatalength <= 256) /* 2048 bit */
+ *fcode = CRT_2K;
+ else /* 4096 bit */
+ *fcode = CRT_4K;
+
+ return 0;
+}
+
+/**
+ * Convert a ICAMEX message to a type50 MEX message.
+ *
+ * @zq: crypto queue pointer
+ * @ap_msg: crypto request pointer
+ * @mex: pointer to user input data
+ *
+ * Returns 0 on success or -EFAULT.
+ */
+static int ICAMEX_msg_to_type50MEX_msg(struct zcrypt_queue *zq,
+ struct ap_message *ap_msg,
+ struct ica_rsa_modexpo *mex)
+{
+ unsigned char *mod, *exp, *inp;
+ int mod_len;
+
+ mod_len = mex->inputdatalength;
+
+ if (mod_len <= 128) {
+ struct type50_meb1_msg *meb1 = ap_msg->msg;
+
+ memset(meb1, 0, sizeof(*meb1));
+ ap_msg->len = sizeof(*meb1);
+ meb1->header.msg_type_code = TYPE50_TYPE_CODE;
+ meb1->header.msg_len = sizeof(*meb1);
+ meb1->keyblock_type = TYPE50_MEB1_FMT;
+ mod = meb1->modulus + sizeof(meb1->modulus) - mod_len;
+ exp = meb1->exponent + sizeof(meb1->exponent) - mod_len;
+ inp = meb1->message + sizeof(meb1->message) - mod_len;
+ } else if (mod_len <= 256) {
+ struct type50_meb2_msg *meb2 = ap_msg->msg;
+
+ memset(meb2, 0, sizeof(*meb2));
+ ap_msg->len = sizeof(*meb2);
+ meb2->header.msg_type_code = TYPE50_TYPE_CODE;
+ meb2->header.msg_len = sizeof(*meb2);
+ meb2->keyblock_type = TYPE50_MEB2_FMT;
+ mod = meb2->modulus + sizeof(meb2->modulus) - mod_len;
+ exp = meb2->exponent + sizeof(meb2->exponent) - mod_len;
+ inp = meb2->message + sizeof(meb2->message) - mod_len;
+ } else if (mod_len <= 512) {
+ struct type50_meb3_msg *meb3 = ap_msg->msg;
+
+ memset(meb3, 0, sizeof(*meb3));
+ ap_msg->len = sizeof(*meb3);
+ meb3->header.msg_type_code = TYPE50_TYPE_CODE;
+ meb3->header.msg_len = sizeof(*meb3);
+ meb3->keyblock_type = TYPE50_MEB3_FMT;
+ mod = meb3->modulus + sizeof(meb3->modulus) - mod_len;
+ exp = meb3->exponent + sizeof(meb3->exponent) - mod_len;
+ inp = meb3->message + sizeof(meb3->message) - mod_len;
+ } else
+ return -EINVAL;
+
+ if (copy_from_user(mod, mex->n_modulus, mod_len) ||
+ copy_from_user(exp, mex->b_key, mod_len) ||
+ copy_from_user(inp, mex->inputdata, mod_len))
+ return -EFAULT;
+
+#ifdef CONFIG_ZCRYPT_DEBUG
+ if (ap_msg->fi.flags & AP_FI_FLAG_TOGGLE_SPECIAL)
+ ap_msg->flags ^= AP_MSG_FLAG_SPECIAL;
+#endif
+
+ return 0;
+}
+
+/**
+ * Convert a ICACRT message to a type50 CRT message.
+ *
+ * @zq: crypto queue pointer
+ * @ap_msg: crypto request pointer
+ * @crt: pointer to user input data
+ *
+ * Returns 0 on success or -EFAULT.
+ */
+static int ICACRT_msg_to_type50CRT_msg(struct zcrypt_queue *zq,
+ struct ap_message *ap_msg,
+ struct ica_rsa_modexpo_crt *crt)
+{
+ int mod_len, short_len;
+ unsigned char *p, *q, *dp, *dq, *u, *inp;
+
+ mod_len = crt->inputdatalength;
+ short_len = (mod_len + 1) / 2;
+
+ /*
+ * CEX2A and CEX3A w/o FW update can handle requests up to
+ * 256 byte modulus (2k keys).
+ * CEX3A with FW update and newer CEXxA cards are able to handle
+ * 512 byte modulus (4k keys).
+ */
+ if (mod_len <= 128) { /* up to 1024 bit key size */
+ struct type50_crb1_msg *crb1 = ap_msg->msg;
+
+ memset(crb1, 0, sizeof(*crb1));
+ ap_msg->len = sizeof(*crb1);
+ crb1->header.msg_type_code = TYPE50_TYPE_CODE;
+ crb1->header.msg_len = sizeof(*crb1);
+ crb1->keyblock_type = TYPE50_CRB1_FMT;
+ p = crb1->p + sizeof(crb1->p) - short_len;
+ q = crb1->q + sizeof(crb1->q) - short_len;
+ dp = crb1->dp + sizeof(crb1->dp) - short_len;
+ dq = crb1->dq + sizeof(crb1->dq) - short_len;
+ u = crb1->u + sizeof(crb1->u) - short_len;
+ inp = crb1->message + sizeof(crb1->message) - mod_len;
+ } else if (mod_len <= 256) { /* up to 2048 bit key size */
+ struct type50_crb2_msg *crb2 = ap_msg->msg;
+
+ memset(crb2, 0, sizeof(*crb2));
+ ap_msg->len = sizeof(*crb2);
+ crb2->header.msg_type_code = TYPE50_TYPE_CODE;
+ crb2->header.msg_len = sizeof(*crb2);
+ crb2->keyblock_type = TYPE50_CRB2_FMT;
+ p = crb2->p + sizeof(crb2->p) - short_len;
+ q = crb2->q + sizeof(crb2->q) - short_len;
+ dp = crb2->dp + sizeof(crb2->dp) - short_len;
+ dq = crb2->dq + sizeof(crb2->dq) - short_len;
+ u = crb2->u + sizeof(crb2->u) - short_len;
+ inp = crb2->message + sizeof(crb2->message) - mod_len;
+ } else if ((mod_len <= 512) && /* up to 4096 bit key size */
+ (zq->zcard->max_mod_size == CEX3A_MAX_MOD_SIZE)) {
+ struct type50_crb3_msg *crb3 = ap_msg->msg;
+
+ memset(crb3, 0, sizeof(*crb3));
+ ap_msg->len = sizeof(*crb3);
+ crb3->header.msg_type_code = TYPE50_TYPE_CODE;
+ crb3->header.msg_len = sizeof(*crb3);
+ crb3->keyblock_type = TYPE50_CRB3_FMT;
+ p = crb3->p + sizeof(crb3->p) - short_len;
+ q = crb3->q + sizeof(crb3->q) - short_len;
+ dp = crb3->dp + sizeof(crb3->dp) - short_len;
+ dq = crb3->dq + sizeof(crb3->dq) - short_len;
+ u = crb3->u + sizeof(crb3->u) - short_len;
+ inp = crb3->message + sizeof(crb3->message) - mod_len;
+ } else
+ return -EINVAL;
+
+ /*
+ * correct the offset of p, bp and mult_inv according zcrypt.h
+ * block size right aligned (skip the first byte)
+ */
+ if (copy_from_user(p, crt->np_prime + MSGTYPE_ADJUSTMENT, short_len) ||
+ copy_from_user(q, crt->nq_prime, short_len) ||
+ copy_from_user(dp, crt->bp_key + MSGTYPE_ADJUSTMENT, short_len) ||
+ copy_from_user(dq, crt->bq_key, short_len) ||
+ copy_from_user(u, crt->u_mult_inv + MSGTYPE_ADJUSTMENT, short_len) ||
+ copy_from_user(inp, crt->inputdata, mod_len))
+ return -EFAULT;
+
+#ifdef CONFIG_ZCRYPT_DEBUG
+ if (ap_msg->fi.flags & AP_FI_FLAG_TOGGLE_SPECIAL)
+ ap_msg->flags ^= AP_MSG_FLAG_SPECIAL;
+#endif
+
+ return 0;
+}
+
+/**
+ * Copy results from a type 80 reply message back to user space.
+ *
+ * @zq: crypto device pointer
+ * @reply: reply AP message.
+ * @data: pointer to user output data
+ * @length: size of user output data
+ *
+ * Returns 0 on success or -EFAULT.
+ */
+static int convert_type80(struct zcrypt_queue *zq,
+ struct ap_message *reply,
+ char __user *outputdata,
+ unsigned int outputdatalength)
+{
+ struct type80_hdr *t80h = reply->msg;
+ unsigned char *data;
+
+ if (t80h->len < sizeof(*t80h) + outputdatalength) {
+ /* The result is too short, the CEXxA card may not do that.. */
+ zq->online = 0;
+ pr_err("Crypto dev=%02x.%04x code=0x%02x => online=0 rc=EAGAIN\n",
+ AP_QID_CARD(zq->queue->qid),
+ AP_QID_QUEUE(zq->queue->qid),
+ t80h->code);
+ ZCRYPT_DBF_ERR("dev=%02x.%04x code=0x%02x => online=0 rc=EAGAIN\n",
+ AP_QID_CARD(zq->queue->qid),
+ AP_QID_QUEUE(zq->queue->qid),
+ t80h->code);
+ return -EAGAIN;
+ }
+ if (zq->zcard->user_space_type == ZCRYPT_CEX2A)
+ BUG_ON(t80h->len > CEX2A_MAX_RESPONSE_SIZE);
+ else
+ BUG_ON(t80h->len > CEX3A_MAX_RESPONSE_SIZE);
+ data = reply->msg + t80h->len - outputdatalength;
+ if (copy_to_user(outputdata, data, outputdatalength))
+ return -EFAULT;
+ return 0;
+}
+
+static int convert_response_cex2a(struct zcrypt_queue *zq,
+ struct ap_message *reply,
+ char __user *outputdata,
+ unsigned int outputdatalength)
+{
+ /* Response type byte is the second byte in the response. */
+ unsigned char rtype = ((unsigned char *) reply->msg)[1];
+
+ switch (rtype) {
+ case TYPE82_RSP_CODE:
+ case TYPE88_RSP_CODE:
+ return convert_error(zq, reply);
+ case TYPE80_RSP_CODE:
+ return convert_type80(zq, reply,
+ outputdata, outputdatalength);
+ default: /* Unknown response type, this should NEVER EVER happen */
+ zq->online = 0;
+ pr_err("Crypto dev=%02x.%04x unknown response type 0x%02x => online=0 rc=EAGAIN\n",
+ AP_QID_CARD(zq->queue->qid),
+ AP_QID_QUEUE(zq->queue->qid),
+ (int) rtype);
+ ZCRYPT_DBF_ERR("dev=%02x.%04x unknown response type 0x%02x => online=0 rc=EAGAIN\n",
+ AP_QID_CARD(zq->queue->qid),
+ AP_QID_QUEUE(zq->queue->qid),
+ (int) rtype);
+ return -EAGAIN;
+ }
+}
+
+/**
+ * This function is called from the AP bus code after a crypto request
+ * "msg" has finished with the reply message "reply".
+ * It is called from tasklet context.
+ * @aq: pointer to the AP device
+ * @msg: pointer to the AP message
+ * @reply: pointer to the AP reply message
+ */
+static void zcrypt_cex2a_receive(struct ap_queue *aq,
+ struct ap_message *msg,
+ struct ap_message *reply)
+{
+ static struct error_hdr error_reply = {
+ .type = TYPE82_RSP_CODE,
+ .reply_code = REP82_ERROR_MACHINE_FAILURE,
+ };
+ struct type80_hdr *t80h;
+ int len;
+
+ /* Copy the reply message to the request message buffer. */
+ if (!reply)
+ goto out; /* ap_msg->rc indicates the error */
+ t80h = reply->msg;
+ if (t80h->type == TYPE80_RSP_CODE) {
+ if (aq->ap_dev.device_type == AP_DEVICE_TYPE_CEX2A)
+ len = min_t(int, CEX2A_MAX_RESPONSE_SIZE, t80h->len);
+ else
+ len = min_t(int, CEX3A_MAX_RESPONSE_SIZE, t80h->len);
+ memcpy(msg->msg, reply->msg, len);
+ } else
+ memcpy(msg->msg, reply->msg, sizeof(error_reply));
+out:
+ complete((struct completion *) msg->private);
+}
+
+static atomic_t zcrypt_step = ATOMIC_INIT(0);
+
+/**
+ * The request distributor calls this function if it picked the CEXxA
+ * device to handle a modexpo request.
+ * @zq: pointer to zcrypt_queue structure that identifies the
+ * CEXxA device to the request distributor
+ * @mex: pointer to the modexpo request buffer
+ */
+static long zcrypt_cex2a_modexpo(struct zcrypt_queue *zq,
+ struct ica_rsa_modexpo *mex,
+ struct ap_message *ap_msg)
+{
+ struct completion work;
+ int rc;
+
+ if (zq->zcard->user_space_type == ZCRYPT_CEX2A)
+ ap_msg->msg = kmalloc(MSGTYPE50_CRB2_MAX_MSG_SIZE, GFP_KERNEL);
+ else
+ ap_msg->msg = kmalloc(MSGTYPE50_CRB3_MAX_MSG_SIZE, GFP_KERNEL);
+ if (!ap_msg->msg)
+ return -ENOMEM;
+ ap_msg->receive = zcrypt_cex2a_receive;
+ ap_msg->psmid = (((unsigned long long) current->pid) << 32) +
+ atomic_inc_return(&zcrypt_step);
+ ap_msg->private = &work;
+ rc = ICAMEX_msg_to_type50MEX_msg(zq, ap_msg, mex);
+ if (rc)
+ goto out;
+ init_completion(&work);
+ rc = ap_queue_message(zq->queue, ap_msg);
+ if (rc)
+ goto out;
+ rc = wait_for_completion_interruptible(&work);
+ if (rc == 0) {
+ rc = ap_msg->rc;
+ if (rc == 0)
+ rc = convert_response_cex2a(zq, ap_msg,
+ mex->outputdata,
+ mex->outputdatalength);
+ } else
+ /* Signal pending. */
+ ap_cancel_message(zq->queue, ap_msg);
+out:
+ ap_msg->private = NULL;
+ return rc;
+}
+
+/**
+ * The request distributor calls this function if it picked the CEXxA
+ * device to handle a modexpo_crt request.
+ * @zq: pointer to zcrypt_queue structure that identifies the
+ * CEXxA device to the request distributor
+ * @crt: pointer to the modexpoc_crt request buffer
+ */
+static long zcrypt_cex2a_modexpo_crt(struct zcrypt_queue *zq,
+ struct ica_rsa_modexpo_crt *crt,
+ struct ap_message *ap_msg)
+{
+ struct completion work;
+ int rc;
+
+ if (zq->zcard->user_space_type == ZCRYPT_CEX2A)
+ ap_msg->msg = kmalloc(MSGTYPE50_CRB2_MAX_MSG_SIZE, GFP_KERNEL);
+ else
+ ap_msg->msg = kmalloc(MSGTYPE50_CRB3_MAX_MSG_SIZE, GFP_KERNEL);
+ if (!ap_msg->msg)
+ return -ENOMEM;
+ ap_msg->receive = zcrypt_cex2a_receive;
+ ap_msg->psmid = (((unsigned long long) current->pid) << 32) +
+ atomic_inc_return(&zcrypt_step);
+ ap_msg->private = &work;
+ rc = ICACRT_msg_to_type50CRT_msg(zq, ap_msg, crt);
+ if (rc)
+ goto out;
+ init_completion(&work);
+ rc = ap_queue_message(zq->queue, ap_msg);
+ if (rc)
+ goto out;
+ rc = wait_for_completion_interruptible(&work);
+ if (rc == 0) {
+ rc = ap_msg->rc;
+ if (rc == 0)
+ rc = convert_response_cex2a(zq, ap_msg,
+ crt->outputdata,
+ crt->outputdatalength);
+ } else
+ /* Signal pending. */
+ ap_cancel_message(zq->queue, ap_msg);
+out:
+ ap_msg->private = NULL;
+ return rc;
+}
+
+/**
+ * The crypto operations for message type 50.
+ */
+static struct zcrypt_ops zcrypt_msgtype50_ops = {
+ .rsa_modexpo = zcrypt_cex2a_modexpo,
+ .rsa_modexpo_crt = zcrypt_cex2a_modexpo_crt,
+ .owner = THIS_MODULE,
+ .name = MSGTYPE50_NAME,
+ .variant = MSGTYPE50_VARIANT_DEFAULT,
+};
+
+void __init zcrypt_msgtype50_init(void)
+{
+ zcrypt_msgtype_register(&zcrypt_msgtype50_ops);
+}
+
+void __exit zcrypt_msgtype50_exit(void)
+{
+ zcrypt_msgtype_unregister(&zcrypt_msgtype50_ops);
+}
diff --git a/drivers/s390/crypto/zcrypt_msgtype50.h b/drivers/s390/crypto/zcrypt_msgtype50.h
new file mode 100644
index 000000000..66bec4f45
--- /dev/null
+++ b/drivers/s390/crypto/zcrypt_msgtype50.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright IBM Corp. 2001, 2012
+ * Author(s): Robert Burroughs
+ * Eric Rossman (edrossma@us.ibm.com)
+ *
+ * Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com)
+ * Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com>
+ * MSGTYPE restruct: Holger Dengler <hd@linux.vnet.ibm.com>
+ */
+
+#ifndef _ZCRYPT_MSGTYPE50_H_
+#define _ZCRYPT_MSGTYPE50_H_
+
+#define MSGTYPE50_NAME "zcrypt_msgtype50"
+#define MSGTYPE50_VARIANT_DEFAULT 0
+
+#define MSGTYPE50_CRB2_MAX_MSG_SIZE 0x390 /* sizeof(struct type50_crb2_msg) */
+#define MSGTYPE50_CRB3_MAX_MSG_SIZE 0x710 /* sizeof(struct type50_crb3_msg) */
+
+#define MSGTYPE_ADJUSTMENT 0x08 /* type04 extension (not needed in type50) */
+
+unsigned int get_rsa_modex_fc(struct ica_rsa_modexpo *, int *);
+unsigned int get_rsa_crt_fc(struct ica_rsa_modexpo_crt *, int *);
+
+void zcrypt_msgtype50_init(void);
+void zcrypt_msgtype50_exit(void);
+
+#endif /* _ZCRYPT_MSGTYPE50_H_ */
diff --git a/drivers/s390/crypto/zcrypt_msgtype6.c b/drivers/s390/crypto/zcrypt_msgtype6.c
new file mode 100644
index 000000000..307f90657
--- /dev/null
+++ b/drivers/s390/crypto/zcrypt_msgtype6.c
@@ -0,0 +1,1374 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright IBM Corp. 2001, 2012
+ * Author(s): Robert Burroughs
+ * Eric Rossman (edrossma@us.ibm.com)
+ *
+ * Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com)
+ * Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com>
+ * Ralph Wuerthner <rwuerthn@de.ibm.com>
+ * MSGTYPE restruct: Holger Dengler <hd@linux.vnet.ibm.com>
+ */
+
+#define KMSG_COMPONENT "zcrypt"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/atomic.h>
+#include <linux/uaccess.h>
+
+#include "ap_bus.h"
+#include "zcrypt_api.h"
+#include "zcrypt_error.h"
+#include "zcrypt_msgtype6.h"
+#include "zcrypt_cca_key.h"
+
+#define CEXXC_MAX_ICA_RESPONSE_SIZE 0x77c /* max size type86 v2 reply */
+
+#define CEIL4(x) ((((x)+3)/4)*4)
+
+struct response_type {
+ struct completion work;
+ int type;
+};
+#define CEXXC_RESPONSE_TYPE_ICA 0
+#define CEXXC_RESPONSE_TYPE_XCRB 1
+#define CEXXC_RESPONSE_TYPE_EP11 2
+
+MODULE_AUTHOR("IBM Corporation");
+MODULE_DESCRIPTION("Cryptographic Coprocessor (message type 6), " \
+ "Copyright IBM Corp. 2001, 2012");
+MODULE_LICENSE("GPL");
+
+/**
+ * CPRB
+ * Note that all shorts, ints and longs are little-endian.
+ * All pointer fields are 32-bits long, and mean nothing
+ *
+ * A request CPRB is followed by a request_parameter_block.
+ *
+ * The request (or reply) parameter block is organized thus:
+ * function code
+ * VUD block
+ * key block
+ */
+struct CPRB {
+ unsigned short cprb_len; /* CPRB length */
+ unsigned char cprb_ver_id; /* CPRB version id. */
+ unsigned char pad_000; /* Alignment pad byte. */
+ unsigned char srpi_rtcode[4]; /* SRPI return code LELONG */
+ unsigned char srpi_verb; /* SRPI verb type */
+ unsigned char flags; /* flags */
+ unsigned char func_id[2]; /* function id */
+ unsigned char checkpoint_flag; /* */
+ unsigned char resv2; /* reserved */
+ unsigned short req_parml; /* request parameter buffer */
+ /* length 16-bit little endian */
+ unsigned char req_parmp[4]; /* request parameter buffer *
+ * pointer (means nothing: the *
+ * parameter buffer follows *
+ * the CPRB). */
+ unsigned char req_datal[4]; /* request data buffer */
+ /* length ULELONG */
+ unsigned char req_datap[4]; /* request data buffer */
+ /* pointer */
+ unsigned short rpl_parml; /* reply parameter buffer */
+ /* length 16-bit little endian */
+ unsigned char pad_001[2]; /* Alignment pad bytes. ULESHORT */
+ unsigned char rpl_parmp[4]; /* reply parameter buffer *
+ * pointer (means nothing: the *
+ * parameter buffer follows *
+ * the CPRB). */
+ unsigned char rpl_datal[4]; /* reply data buffer len ULELONG */
+ unsigned char rpl_datap[4]; /* reply data buffer */
+ /* pointer */
+ unsigned short ccp_rscode; /* server reason code ULESHORT */
+ unsigned short ccp_rtcode; /* server return code ULESHORT */
+ unsigned char repd_parml[2]; /* replied parameter len ULESHORT*/
+ unsigned char mac_data_len[2]; /* Mac Data Length ULESHORT */
+ unsigned char repd_datal[4]; /* replied data length ULELONG */
+ unsigned char req_pc[2]; /* PC identifier */
+ unsigned char res_origin[8]; /* resource origin */
+ unsigned char mac_value[8]; /* Mac Value */
+ unsigned char logon_id[8]; /* Logon Identifier */
+ unsigned char usage_domain[2]; /* cdx */
+ unsigned char resv3[18]; /* reserved for requestor */
+ unsigned short svr_namel; /* server name length ULESHORT */
+ unsigned char svr_name[8]; /* server name */
+} __packed;
+
+struct function_and_rules_block {
+ unsigned char function_code[2];
+ unsigned short ulen;
+ unsigned char only_rule[8];
+} __packed;
+
+/**
+ * The following is used to initialize the CPRBX passed to the CEXxC/CEXxP
+ * card in a type6 message. The 3 fields that must be filled in at execution
+ * time are req_parml, rpl_parml and usage_domain.
+ * Everything about this interface is ascii/big-endian, since the
+ * device does *not* have 'Intel inside'.
+ *
+ * The CPRBX is followed immediately by the parm block.
+ * The parm block contains:
+ * - function code ('PD' 0x5044 or 'PK' 0x504B)
+ * - rule block (one of:)
+ * + 0x000A 'PKCS-1.2' (MCL2 'PD')
+ * + 0x000A 'ZERO-PAD' (MCL2 'PK')
+ * + 0x000A 'ZERO-PAD' (MCL3 'PD' or CEX2C 'PD')
+ * + 0x000A 'MRP ' (MCL3 'PK' or CEX2C 'PK')
+ * - VUD block
+ */
+static const struct CPRBX static_cprbx = {
+ .cprb_len = 0x00DC,
+ .cprb_ver_id = 0x02,
+ .func_id = {0x54, 0x32},
+};
+
+int speed_idx_cca(int req_type)
+{
+ switch (req_type) {
+ case 0x4142:
+ case 0x4149:
+ case 0x414D:
+ case 0x4341:
+ case 0x4344:
+ case 0x4354:
+ case 0x4358:
+ case 0x444B:
+ case 0x4558:
+ case 0x4643:
+ case 0x4651:
+ case 0x4C47:
+ case 0x4C4B:
+ case 0x4C51:
+ case 0x4F48:
+ case 0x504F:
+ case 0x5053:
+ case 0x5058:
+ case 0x5343:
+ case 0x5344:
+ case 0x5345:
+ case 0x5350:
+ return LOW;
+ case 0x414B:
+ case 0x4345:
+ case 0x4349:
+ case 0x434D:
+ case 0x4847:
+ case 0x4849:
+ case 0x484D:
+ case 0x4850:
+ case 0x4851:
+ case 0x4954:
+ case 0x4958:
+ case 0x4B43:
+ case 0x4B44:
+ case 0x4B45:
+ case 0x4B47:
+ case 0x4B48:
+ case 0x4B49:
+ case 0x4B4E:
+ case 0x4B50:
+ case 0x4B52:
+ case 0x4B54:
+ case 0x4B58:
+ case 0x4D50:
+ case 0x4D53:
+ case 0x4D56:
+ case 0x4D58:
+ case 0x5044:
+ case 0x5045:
+ case 0x5046:
+ case 0x5047:
+ case 0x5049:
+ case 0x504B:
+ case 0x504D:
+ case 0x5254:
+ case 0x5347:
+ case 0x5349:
+ case 0x534B:
+ case 0x534D:
+ case 0x5356:
+ case 0x5358:
+ case 0x5443:
+ case 0x544B:
+ case 0x5647:
+ return HIGH;
+ default:
+ return MEDIUM;
+ }
+}
+
+int speed_idx_ep11(int req_type)
+{
+ switch (req_type) {
+ case 1:
+ case 2:
+ case 36:
+ case 37:
+ case 38:
+ case 39:
+ case 40:
+ return LOW;
+ case 17:
+ case 18:
+ case 19:
+ case 20:
+ case 21:
+ case 22:
+ case 26:
+ case 30:
+ case 31:
+ case 32:
+ case 33:
+ case 34:
+ case 35:
+ return HIGH;
+ default:
+ return MEDIUM;
+ }
+}
+
+
+/**
+ * Convert a ICAMEX message to a type6 MEX message.
+ *
+ * @zq: crypto device pointer
+ * @ap_msg: pointer to AP message
+ * @mex: pointer to user input data
+ *
+ * Returns 0 on success or negative errno value.
+ */
+static int ICAMEX_msg_to_type6MEX_msgX(struct zcrypt_queue *zq,
+ struct ap_message *ap_msg,
+ struct ica_rsa_modexpo *mex)
+{
+ static struct type6_hdr static_type6_hdrX = {
+ .type = 0x06,
+ .offset1 = 0x00000058,
+ .agent_id = {'C', 'A',},
+ .function_code = {'P', 'K'},
+ };
+ static struct function_and_rules_block static_pke_fnr = {
+ .function_code = {'P', 'K'},
+ .ulen = 10,
+ .only_rule = {'M', 'R', 'P', ' ', ' ', ' ', ' ', ' '}
+ };
+ struct {
+ struct type6_hdr hdr;
+ struct CPRBX cprbx;
+ struct function_and_rules_block fr;
+ unsigned short length;
+ char text[0];
+ } __packed * msg = ap_msg->msg;
+ int size;
+
+ /*
+ * The inputdatalength was a selection criteria in the dispatching
+ * function zcrypt_rsa_modexpo(). However, make sure the following
+ * copy_from_user() never exceeds the allocated buffer space.
+ */
+ if (WARN_ON_ONCE(mex->inputdatalength > PAGE_SIZE))
+ return -EINVAL;
+
+ /* VUD.ciphertext */
+ msg->length = mex->inputdatalength + 2;
+ if (copy_from_user(msg->text, mex->inputdata, mex->inputdatalength))
+ return -EFAULT;
+
+ /* Set up key which is located after the variable length text. */
+ size = zcrypt_type6_mex_key_en(mex, msg->text+mex->inputdatalength);
+ if (size < 0)
+ return size;
+ size += sizeof(*msg) + mex->inputdatalength;
+
+ /* message header, cprbx and f&r */
+ msg->hdr = static_type6_hdrX;
+ msg->hdr.ToCardLen1 = size - sizeof(msg->hdr);
+ msg->hdr.FromCardLen1 = CEXXC_MAX_ICA_RESPONSE_SIZE - sizeof(msg->hdr);
+
+ msg->cprbx = static_cprbx;
+ msg->cprbx.domain = AP_QID_QUEUE(zq->queue->qid);
+ msg->cprbx.rpl_msgbl = msg->hdr.FromCardLen1;
+
+ msg->fr = static_pke_fnr;
+
+ msg->cprbx.req_parml = size - sizeof(msg->hdr) - sizeof(msg->cprbx);
+
+ ap_msg->len = size;
+ return 0;
+}
+
+/**
+ * Convert a ICACRT message to a type6 CRT message.
+ *
+ * @zq: crypto device pointer
+ * @ap_msg: pointer to AP message
+ * @crt: pointer to user input data
+ *
+ * Returns 0 on success or negative errno value.
+ */
+static int ICACRT_msg_to_type6CRT_msgX(struct zcrypt_queue *zq,
+ struct ap_message *ap_msg,
+ struct ica_rsa_modexpo_crt *crt)
+{
+ static struct type6_hdr static_type6_hdrX = {
+ .type = 0x06,
+ .offset1 = 0x00000058,
+ .agent_id = {'C', 'A',},
+ .function_code = {'P', 'D'},
+ };
+ static struct function_and_rules_block static_pkd_fnr = {
+ .function_code = {'P', 'D'},
+ .ulen = 10,
+ .only_rule = {'Z', 'E', 'R', 'O', '-', 'P', 'A', 'D'}
+ };
+
+ struct {
+ struct type6_hdr hdr;
+ struct CPRBX cprbx;
+ struct function_and_rules_block fr;
+ unsigned short length;
+ char text[0];
+ } __packed * msg = ap_msg->msg;
+ int size;
+
+ /*
+ * The inputdatalength was a selection criteria in the dispatching
+ * function zcrypt_rsa_crt(). However, make sure the following
+ * copy_from_user() never exceeds the allocated buffer space.
+ */
+ if (WARN_ON_ONCE(crt->inputdatalength > PAGE_SIZE))
+ return -EINVAL;
+
+ /* VUD.ciphertext */
+ msg->length = crt->inputdatalength + 2;
+ if (copy_from_user(msg->text, crt->inputdata, crt->inputdatalength))
+ return -EFAULT;
+
+ /* Set up key which is located after the variable length text. */
+ size = zcrypt_type6_crt_key(crt, msg->text + crt->inputdatalength);
+ if (size < 0)
+ return size;
+ size += sizeof(*msg) + crt->inputdatalength; /* total size of msg */
+
+ /* message header, cprbx and f&r */
+ msg->hdr = static_type6_hdrX;
+ msg->hdr.ToCardLen1 = size - sizeof(msg->hdr);
+ msg->hdr.FromCardLen1 = CEXXC_MAX_ICA_RESPONSE_SIZE - sizeof(msg->hdr);
+
+ msg->cprbx = static_cprbx;
+ msg->cprbx.domain = AP_QID_QUEUE(zq->queue->qid);
+ msg->cprbx.req_parml = msg->cprbx.rpl_msgbl =
+ size - sizeof(msg->hdr) - sizeof(msg->cprbx);
+
+ msg->fr = static_pkd_fnr;
+
+ ap_msg->len = size;
+ return 0;
+}
+
+/**
+ * Convert a XCRB message to a type6 CPRB message.
+ *
+ * @zq: crypto device pointer
+ * @ap_msg: pointer to AP message
+ * @xcRB: pointer to user input data
+ *
+ * Returns 0 on success or -EFAULT, -EINVAL.
+ */
+struct type86_fmt2_msg {
+ struct type86_hdr hdr;
+ struct type86_fmt2_ext fmt2;
+} __packed;
+
+static int XCRB_msg_to_type6CPRB_msgX(bool userspace, struct ap_message *ap_msg,
+ struct ica_xcRB *xcRB,
+ unsigned int *fcode,
+ unsigned short **dom)
+{
+ static struct type6_hdr static_type6_hdrX = {
+ .type = 0x06,
+ .offset1 = 0x00000058,
+ };
+ struct {
+ struct type6_hdr hdr;
+ struct CPRBX cprbx;
+ } __packed * msg = ap_msg->msg;
+
+ int rcblen = CEIL4(xcRB->request_control_blk_length);
+ int replylen, req_sumlen, resp_sumlen;
+ char *req_data = ap_msg->msg + sizeof(struct type6_hdr) + rcblen;
+ char *function_code;
+
+ if (CEIL4(xcRB->request_control_blk_length) <
+ xcRB->request_control_blk_length)
+ return -EINVAL; /* overflow after alignment*/
+
+ /* length checks */
+ ap_msg->len = sizeof(struct type6_hdr) +
+ CEIL4(xcRB->request_control_blk_length) +
+ xcRB->request_data_length;
+ if (ap_msg->len > MSGTYPE06_MAX_MSG_SIZE)
+ return -EINVAL;
+
+ /*
+ * Overflow check
+ * sum must be greater (or equal) than the largest operand
+ */
+ req_sumlen = CEIL4(xcRB->request_control_blk_length) +
+ xcRB->request_data_length;
+ if ((CEIL4(xcRB->request_control_blk_length) <=
+ xcRB->request_data_length) ?
+ (req_sumlen < xcRB->request_data_length) :
+ (req_sumlen < CEIL4(xcRB->request_control_blk_length))) {
+ return -EINVAL;
+ }
+
+ if (CEIL4(xcRB->reply_control_blk_length) <
+ xcRB->reply_control_blk_length)
+ return -EINVAL; /* overflow after alignment*/
+
+ replylen = sizeof(struct type86_fmt2_msg) +
+ CEIL4(xcRB->reply_control_blk_length) +
+ xcRB->reply_data_length;
+ if (replylen > MSGTYPE06_MAX_MSG_SIZE)
+ return -EINVAL;
+
+ /*
+ * Overflow check
+ * sum must be greater (or equal) than the largest operand
+ */
+ resp_sumlen = CEIL4(xcRB->reply_control_blk_length) +
+ xcRB->reply_data_length;
+ if ((CEIL4(xcRB->reply_control_blk_length) <= xcRB->reply_data_length) ?
+ (resp_sumlen < xcRB->reply_data_length) :
+ (resp_sumlen < CEIL4(xcRB->reply_control_blk_length))) {
+ return -EINVAL;
+ }
+
+ /* prepare type6 header */
+ msg->hdr = static_type6_hdrX;
+ memcpy(msg->hdr.agent_id, &(xcRB->agent_ID), sizeof(xcRB->agent_ID));
+ msg->hdr.ToCardLen1 = xcRB->request_control_blk_length;
+ if (xcRB->request_data_length) {
+ msg->hdr.offset2 = msg->hdr.offset1 + rcblen;
+ msg->hdr.ToCardLen2 = xcRB->request_data_length;
+ }
+ msg->hdr.FromCardLen1 = xcRB->reply_control_blk_length;
+ msg->hdr.FromCardLen2 = xcRB->reply_data_length;
+
+ /* prepare CPRB */
+ if (z_copy_from_user(userspace, &(msg->cprbx), xcRB->request_control_blk_addr,
+ xcRB->request_control_blk_length))
+ return -EFAULT;
+ if (msg->cprbx.cprb_len + sizeof(msg->hdr.function_code) >
+ xcRB->request_control_blk_length)
+ return -EINVAL;
+ function_code = ((unsigned char *)&msg->cprbx) + msg->cprbx.cprb_len;
+ memcpy(msg->hdr.function_code, function_code,
+ sizeof(msg->hdr.function_code));
+
+ *fcode = (msg->hdr.function_code[0] << 8) | msg->hdr.function_code[1];
+ *dom = (unsigned short *)&msg->cprbx.domain;
+
+ if (memcmp(function_code, "US", 2) == 0
+ || memcmp(function_code, "AU", 2) == 0)
+ ap_msg->flags |= AP_MSG_FLAG_SPECIAL;
+
+#ifdef CONFIG_ZCRYPT_DEBUG
+ if (ap_msg->fi.flags & AP_FI_FLAG_TOGGLE_SPECIAL)
+ ap_msg->flags ^= AP_MSG_FLAG_SPECIAL;
+#endif
+
+ /* copy data block */
+ if (xcRB->request_data_length &&
+ z_copy_from_user(userspace, req_data, xcRB->request_data_address,
+ xcRB->request_data_length))
+ return -EFAULT;
+
+ return 0;
+}
+
+static int xcrb_msg_to_type6_ep11cprb_msgx(bool userspace, struct ap_message *ap_msg,
+ struct ep11_urb *xcRB,
+ unsigned int *fcode)
+{
+ unsigned int lfmt;
+ static struct type6_hdr static_type6_ep11_hdr = {
+ .type = 0x06,
+ .rqid = {0x00, 0x01},
+ .function_code = {0x00, 0x00},
+ .agent_id[0] = 0x58, /* {'X'} */
+ .agent_id[1] = 0x43, /* {'C'} */
+ .offset1 = 0x00000058,
+ };
+
+ struct {
+ struct type6_hdr hdr;
+ struct ep11_cprb cprbx;
+ unsigned char pld_tag; /* fixed value 0x30 */
+ unsigned char pld_lenfmt; /* payload length format */
+ } __packed * msg = ap_msg->msg;
+
+ struct pld_hdr {
+ unsigned char func_tag; /* fixed value 0x4 */
+ unsigned char func_len; /* fixed value 0x4 */
+ unsigned int func_val; /* function ID */
+ unsigned char dom_tag; /* fixed value 0x4 */
+ unsigned char dom_len; /* fixed value 0x4 */
+ unsigned int dom_val; /* domain id */
+ } __packed * payload_hdr = NULL;
+
+ if (CEIL4(xcRB->req_len) < xcRB->req_len)
+ return -EINVAL; /* overflow after alignment*/
+
+ /* length checks */
+ ap_msg->len = sizeof(struct type6_hdr) + xcRB->req_len;
+ if (CEIL4(xcRB->req_len) > MSGTYPE06_MAX_MSG_SIZE -
+ (sizeof(struct type6_hdr)))
+ return -EINVAL;
+
+ if (CEIL4(xcRB->resp_len) < xcRB->resp_len)
+ return -EINVAL; /* overflow after alignment*/
+
+ if (CEIL4(xcRB->resp_len) > MSGTYPE06_MAX_MSG_SIZE -
+ (sizeof(struct type86_fmt2_msg)))
+ return -EINVAL;
+
+ /* prepare type6 header */
+ msg->hdr = static_type6_ep11_hdr;
+ msg->hdr.ToCardLen1 = xcRB->req_len;
+ msg->hdr.FromCardLen1 = xcRB->resp_len;
+
+ /* Import CPRB data from the ioctl input parameter */
+ if (z_copy_from_user(userspace, &(msg->cprbx.cprb_len),
+ (char __force __user *)xcRB->req, xcRB->req_len)) {
+ return -EFAULT;
+ }
+
+ if ((msg->pld_lenfmt & 0x80) == 0x80) { /*ext.len.fmt 2 or 3*/
+ switch (msg->pld_lenfmt & 0x03) {
+ case 1:
+ lfmt = 2;
+ break;
+ case 2:
+ lfmt = 3;
+ break;
+ default:
+ return -EINVAL;
+ }
+ } else {
+ lfmt = 1; /* length format #1 */
+ }
+ payload_hdr = (struct pld_hdr *)((&(msg->pld_lenfmt))+lfmt);
+ *fcode = payload_hdr->func_val & 0xFFFF;
+
+ /* enable special processing based on the cprbs flags special bit */
+ if (msg->cprbx.flags & 0x20)
+ ap_msg->flags |= AP_MSG_FLAG_SPECIAL;
+
+#ifdef CONFIG_ZCRYPT_DEBUG
+ if (ap_msg->fi.flags & AP_FI_FLAG_TOGGLE_SPECIAL)
+ ap_msg->flags ^= AP_MSG_FLAG_SPECIAL;
+#endif
+
+ return 0;
+}
+
+/**
+ * Copy results from a type 86 ICA reply message back to user space.
+ *
+ * @zq: crypto device pointer
+ * @reply: reply AP message.
+ * @data: pointer to user output data
+ * @length: size of user output data
+ *
+ * Returns 0 on success or -EINVAL, -EFAULT, -EAGAIN in case of an error.
+ */
+struct type86x_reply {
+ struct type86_hdr hdr;
+ struct type86_fmt2_ext fmt2;
+ struct CPRBX cprbx;
+ unsigned char pad[4]; /* 4 byte function code/rules block ? */
+ unsigned short length;
+ char text[];
+} __packed;
+
+struct type86_ep11_reply {
+ struct type86_hdr hdr;
+ struct type86_fmt2_ext fmt2;
+ struct ep11_cprb cprbx;
+} __packed;
+
+static int convert_type86_ica(struct zcrypt_queue *zq,
+ struct ap_message *reply,
+ char __user *outputdata,
+ unsigned int outputdatalength)
+{
+ static unsigned char static_pad[] = {
+ 0x00, 0x02,
+ 0x1B, 0x7B, 0x5D, 0xB5, 0x75, 0x01, 0x3D, 0xFD,
+ 0x8D, 0xD1, 0xC7, 0x03, 0x2D, 0x09, 0x23, 0x57,
+ 0x89, 0x49, 0xB9, 0x3F, 0xBB, 0x99, 0x41, 0x5B,
+ 0x75, 0x21, 0x7B, 0x9D, 0x3B, 0x6B, 0x51, 0x39,
+ 0xBB, 0x0D, 0x35, 0xB9, 0x89, 0x0F, 0x93, 0xA5,
+ 0x0B, 0x47, 0xF1, 0xD3, 0xBB, 0xCB, 0xF1, 0x9D,
+ 0x23, 0x73, 0x71, 0xFF, 0xF3, 0xF5, 0x45, 0xFB,
+ 0x61, 0x29, 0x23, 0xFD, 0xF1, 0x29, 0x3F, 0x7F,
+ 0x17, 0xB7, 0x1B, 0xA9, 0x19, 0xBD, 0x57, 0xA9,
+ 0xD7, 0x95, 0xA3, 0xCB, 0xED, 0x1D, 0xDB, 0x45,
+ 0x7D, 0x11, 0xD1, 0x51, 0x1B, 0xED, 0x71, 0xE9,
+ 0xB1, 0xD1, 0xAB, 0xAB, 0x21, 0x2B, 0x1B, 0x9F,
+ 0x3B, 0x9F, 0xF7, 0xF7, 0xBD, 0x63, 0xEB, 0xAD,
+ 0xDF, 0xB3, 0x6F, 0x5B, 0xDB, 0x8D, 0xA9, 0x5D,
+ 0xE3, 0x7D, 0x77, 0x49, 0x47, 0xF5, 0xA7, 0xFD,
+ 0xAB, 0x2F, 0x27, 0x35, 0x77, 0xD3, 0x49, 0xC9,
+ 0x09, 0xEB, 0xB1, 0xF9, 0xBF, 0x4B, 0xCB, 0x2B,
+ 0xEB, 0xEB, 0x05, 0xFF, 0x7D, 0xC7, 0x91, 0x8B,
+ 0x09, 0x83, 0xB9, 0xB9, 0x69, 0x33, 0x39, 0x6B,
+ 0x79, 0x75, 0x19, 0xBF, 0xBB, 0x07, 0x1D, 0xBD,
+ 0x29, 0xBF, 0x39, 0x95, 0x93, 0x1D, 0x35, 0xC7,
+ 0xC9, 0x4D, 0xE5, 0x97, 0x0B, 0x43, 0x9B, 0xF1,
+ 0x16, 0x93, 0x03, 0x1F, 0xA5, 0xFB, 0xDB, 0xF3,
+ 0x27, 0x4F, 0x27, 0x61, 0x05, 0x1F, 0xB9, 0x23,
+ 0x2F, 0xC3, 0x81, 0xA9, 0x23, 0x71, 0x55, 0x55,
+ 0xEB, 0xED, 0x41, 0xE5, 0xF3, 0x11, 0xF1, 0x43,
+ 0x69, 0x03, 0xBD, 0x0B, 0x37, 0x0F, 0x51, 0x8F,
+ 0x0B, 0xB5, 0x89, 0x5B, 0x67, 0xA9, 0xD9, 0x4F,
+ 0x01, 0xF9, 0x21, 0x77, 0x37, 0x73, 0x79, 0xC5,
+ 0x7F, 0x51, 0xC1, 0xCF, 0x97, 0xA1, 0x75, 0xAD,
+ 0x35, 0x9D, 0xD3, 0xD3, 0xA7, 0x9D, 0x5D, 0x41,
+ 0x6F, 0x65, 0x1B, 0xCF, 0xA9, 0x87, 0x91, 0x09
+ };
+ struct type86x_reply *msg = reply->msg;
+ unsigned short service_rc, service_rs;
+ unsigned int reply_len, pad_len;
+ char *data;
+
+ service_rc = msg->cprbx.ccp_rtcode;
+ if (unlikely(service_rc != 0)) {
+ service_rs = msg->cprbx.ccp_rscode;
+ if ((service_rc == 8 && service_rs == 66) ||
+ (service_rc == 8 && service_rs == 65) ||
+ (service_rc == 8 && service_rs == 72) ||
+ (service_rc == 8 && service_rs == 770) ||
+ (service_rc == 12 && service_rs == 769)) {
+ ZCRYPT_DBF_WARN("dev=%02x.%04x rc/rs=%d/%d => rc=EINVAL\n",
+ AP_QID_CARD(zq->queue->qid),
+ AP_QID_QUEUE(zq->queue->qid),
+ (int) service_rc, (int) service_rs);
+ return -EINVAL;
+ }
+ zq->online = 0;
+ pr_err("Crypto dev=%02x.%04x rc/rs=%d/%d online=0 rc=EAGAIN\n",
+ AP_QID_CARD(zq->queue->qid),
+ AP_QID_QUEUE(zq->queue->qid),
+ (int) service_rc, (int) service_rs);
+ ZCRYPT_DBF_ERR("dev=%02x.%04x rc/rs=%d/%d => online=0 rc=EAGAIN\n",
+ AP_QID_CARD(zq->queue->qid),
+ AP_QID_QUEUE(zq->queue->qid),
+ (int) service_rc, (int) service_rs);
+ return -EAGAIN;
+ }
+ data = msg->text;
+ reply_len = msg->length - 2;
+ if (reply_len > outputdatalength)
+ return -EINVAL;
+ /*
+ * For all encipher requests, the length of the ciphertext (reply_len)
+ * will always equal the modulus length. For MEX decipher requests
+ * the output needs to get padded. Minimum pad size is 10.
+ *
+ * Currently, the cases where padding will be added is for:
+ * - PCIXCC_MCL2 using a CRT form token (since PKD didn't support
+ * ZERO-PAD and CRT is only supported for PKD requests)
+ * - PCICC, always
+ */
+ pad_len = outputdatalength - reply_len;
+ if (pad_len > 0) {
+ if (pad_len < 10)
+ return -EINVAL;
+ /* 'restore' padding left in the CEXXC card. */
+ if (copy_to_user(outputdata, static_pad, pad_len - 1))
+ return -EFAULT;
+ if (put_user(0, outputdata + pad_len - 1))
+ return -EFAULT;
+ }
+ /* Copy the crypto response to user space. */
+ if (copy_to_user(outputdata + pad_len, data, reply_len))
+ return -EFAULT;
+ return 0;
+}
+
+/**
+ * Copy results from a type 86 XCRB reply message back to user space.
+ *
+ * @zq: crypto device pointer
+ * @reply: reply AP message.
+ * @xcRB: pointer to XCRB
+ *
+ * Returns 0 on success or -EINVAL, -EFAULT, -EAGAIN in case of an error.
+ */
+static int convert_type86_xcrb(bool userspace, struct zcrypt_queue *zq,
+ struct ap_message *reply,
+ struct ica_xcRB *xcRB)
+{
+ struct type86_fmt2_msg *msg = reply->msg;
+ char *data = reply->msg;
+
+ /* Copy CPRB to user */
+ if (z_copy_to_user(userspace, xcRB->reply_control_blk_addr,
+ data + msg->fmt2.offset1, msg->fmt2.count1))
+ return -EFAULT;
+ xcRB->reply_control_blk_length = msg->fmt2.count1;
+
+ /* Copy data buffer to user */
+ if (msg->fmt2.count2)
+ if (z_copy_to_user(userspace, xcRB->reply_data_addr,
+ data + msg->fmt2.offset2, msg->fmt2.count2))
+ return -EFAULT;
+ xcRB->reply_data_length = msg->fmt2.count2;
+ return 0;
+}
+
+/**
+ * Copy results from a type 86 EP11 XCRB reply message back to user space.
+ *
+ * @zq: crypto device pointer
+ * @reply: reply AP message.
+ * @xcRB: pointer to EP11 user request block
+ *
+ * Returns 0 on success or -EINVAL, -EFAULT, -EAGAIN in case of an error.
+ */
+static int convert_type86_ep11_xcrb(bool userspace, struct zcrypt_queue *zq,
+ struct ap_message *reply,
+ struct ep11_urb *xcRB)
+{
+ struct type86_fmt2_msg *msg = reply->msg;
+ char *data = reply->msg;
+
+ if (xcRB->resp_len < msg->fmt2.count1)
+ return -EINVAL;
+
+ /* Copy response CPRB to user */
+ if (z_copy_to_user(userspace, (char __force __user *)xcRB->resp,
+ data + msg->fmt2.offset1, msg->fmt2.count1))
+ return -EFAULT;
+ xcRB->resp_len = msg->fmt2.count1;
+ return 0;
+}
+
+static int convert_type86_rng(struct zcrypt_queue *zq,
+ struct ap_message *reply,
+ char *buffer)
+{
+ struct {
+ struct type86_hdr hdr;
+ struct type86_fmt2_ext fmt2;
+ struct CPRBX cprbx;
+ } __packed * msg = reply->msg;
+ char *data = reply->msg;
+
+ if (msg->cprbx.ccp_rtcode != 0 || msg->cprbx.ccp_rscode != 0)
+ return -EINVAL;
+ memcpy(buffer, data + msg->fmt2.offset2, msg->fmt2.count2);
+ return msg->fmt2.count2;
+}
+
+static int convert_response_ica(struct zcrypt_queue *zq,
+ struct ap_message *reply,
+ char __user *outputdata,
+ unsigned int outputdatalength)
+{
+ struct type86x_reply *msg = reply->msg;
+
+ switch (msg->hdr.type) {
+ case TYPE82_RSP_CODE:
+ case TYPE88_RSP_CODE:
+ return convert_error(zq, reply);
+ case TYPE86_RSP_CODE:
+ if (msg->cprbx.ccp_rtcode &&
+ (msg->cprbx.ccp_rscode == 0x14f) &&
+ (outputdatalength > 256)) {
+ if (zq->zcard->max_exp_bit_length <= 17) {
+ zq->zcard->max_exp_bit_length = 17;
+ return -EAGAIN;
+ } else
+ return -EINVAL;
+ }
+ if (msg->hdr.reply_code)
+ return convert_error(zq, reply);
+ if (msg->cprbx.cprb_ver_id == 0x02)
+ return convert_type86_ica(zq, reply,
+ outputdata, outputdatalength);
+ fallthrough; /* wrong cprb version is an unknown response */
+ default:
+ /* Unknown response type, this should NEVER EVER happen */
+ zq->online = 0;
+ pr_err("Crypto dev=%02x.%04x unknown response type 0x%02x => online=0 rc=EAGAIN\n",
+ AP_QID_CARD(zq->queue->qid),
+ AP_QID_QUEUE(zq->queue->qid),
+ (int) msg->hdr.type);
+ ZCRYPT_DBF_ERR("dev=%02x.%04x unknown response type 0x%02x => online=0 rc=EAGAIN\n",
+ AP_QID_CARD(zq->queue->qid),
+ AP_QID_QUEUE(zq->queue->qid),
+ (int) msg->hdr.type);
+ return -EAGAIN;
+ }
+}
+
+static int convert_response_xcrb(bool userspace, struct zcrypt_queue *zq,
+ struct ap_message *reply,
+ struct ica_xcRB *xcRB)
+{
+ struct type86x_reply *msg = reply->msg;
+
+ switch (msg->hdr.type) {
+ case TYPE82_RSP_CODE:
+ case TYPE88_RSP_CODE:
+ xcRB->status = 0x0008044DL; /* HDD_InvalidParm */
+ return convert_error(zq, reply);
+ case TYPE86_RSP_CODE:
+ if (msg->hdr.reply_code) {
+ memcpy(&(xcRB->status), msg->fmt2.apfs, sizeof(u32));
+ return convert_error(zq, reply);
+ }
+ if (msg->cprbx.cprb_ver_id == 0x02)
+ return convert_type86_xcrb(userspace, zq, reply, xcRB);
+ fallthrough; /* wrong cprb version is an unknown response */
+ default: /* Unknown response type, this should NEVER EVER happen */
+ xcRB->status = 0x0008044DL; /* HDD_InvalidParm */
+ zq->online = 0;
+ pr_err("Crypto dev=%02x.%04x unknown response type 0x%02x => online=0 rc=EAGAIN\n",
+ AP_QID_CARD(zq->queue->qid),
+ AP_QID_QUEUE(zq->queue->qid),
+ (int) msg->hdr.type);
+ ZCRYPT_DBF_ERR("dev=%02x.%04x unknown response type 0x%02x => online=0 rc=EAGAIN\n",
+ AP_QID_CARD(zq->queue->qid),
+ AP_QID_QUEUE(zq->queue->qid),
+ (int) msg->hdr.type);
+ return -EAGAIN;
+ }
+}
+
+static int convert_response_ep11_xcrb(bool userspace, struct zcrypt_queue *zq,
+ struct ap_message *reply, struct ep11_urb *xcRB)
+{
+ struct type86_ep11_reply *msg = reply->msg;
+
+ switch (msg->hdr.type) {
+ case TYPE82_RSP_CODE:
+ case TYPE87_RSP_CODE:
+ return convert_error(zq, reply);
+ case TYPE86_RSP_CODE:
+ if (msg->hdr.reply_code)
+ return convert_error(zq, reply);
+ if (msg->cprbx.cprb_ver_id == 0x04)
+ return convert_type86_ep11_xcrb(userspace, zq, reply, xcRB);
+ fallthrough; /* wrong cprb version is an unknown resp */
+ default: /* Unknown response type, this should NEVER EVER happen */
+ zq->online = 0;
+ pr_err("Crypto dev=%02x.%04x unknown response type 0x%02x => online=0 rc=EAGAIN\n",
+ AP_QID_CARD(zq->queue->qid),
+ AP_QID_QUEUE(zq->queue->qid),
+ (int) msg->hdr.type);
+ ZCRYPT_DBF_ERR("dev=%02x.%04x unknown response type 0x%02x => online=0 rc=EAGAIN\n",
+ AP_QID_CARD(zq->queue->qid),
+ AP_QID_QUEUE(zq->queue->qid),
+ (int) msg->hdr.type);
+ return -EAGAIN;
+ }
+}
+
+static int convert_response_rng(struct zcrypt_queue *zq,
+ struct ap_message *reply,
+ char *data)
+{
+ struct type86x_reply *msg = reply->msg;
+
+ switch (msg->hdr.type) {
+ case TYPE82_RSP_CODE:
+ case TYPE88_RSP_CODE:
+ return -EINVAL;
+ case TYPE86_RSP_CODE:
+ if (msg->hdr.reply_code)
+ return -EINVAL;
+ if (msg->cprbx.cprb_ver_id == 0x02)
+ return convert_type86_rng(zq, reply, data);
+ fallthrough; /* wrong cprb version is an unknown response */
+ default: /* Unknown response type, this should NEVER EVER happen */
+ zq->online = 0;
+ pr_err("Crypto dev=%02x.%04x unknown response type 0x%02x => online=0 rc=EAGAIN\n",
+ AP_QID_CARD(zq->queue->qid),
+ AP_QID_QUEUE(zq->queue->qid),
+ (int) msg->hdr.type);
+ ZCRYPT_DBF_ERR("dev=%02x.%04x unknown response type 0x%02x => online=0 rc=EAGAIN\n",
+ AP_QID_CARD(zq->queue->qid),
+ AP_QID_QUEUE(zq->queue->qid),
+ (int) msg->hdr.type);
+ return -EAGAIN;
+ }
+}
+
+/**
+ * This function is called from the AP bus code after a crypto request
+ * "msg" has finished with the reply message "reply".
+ * It is called from tasklet context.
+ * @aq: pointer to the AP queue
+ * @msg: pointer to the AP message
+ * @reply: pointer to the AP reply message
+ */
+static void zcrypt_msgtype6_receive(struct ap_queue *aq,
+ struct ap_message *msg,
+ struct ap_message *reply)
+{
+ static struct error_hdr error_reply = {
+ .type = TYPE82_RSP_CODE,
+ .reply_code = REP82_ERROR_MACHINE_FAILURE,
+ };
+ struct response_type *resp_type =
+ (struct response_type *) msg->private;
+ struct type86x_reply *t86r;
+ int len;
+
+ /* Copy the reply message to the request message buffer. */
+ if (!reply)
+ goto out; /* ap_msg->rc indicates the error */
+ t86r = reply->msg;
+ if (t86r->hdr.type == TYPE86_RSP_CODE &&
+ t86r->cprbx.cprb_ver_id == 0x02) {
+ switch (resp_type->type) {
+ case CEXXC_RESPONSE_TYPE_ICA:
+ len = sizeof(struct type86x_reply) + t86r->length - 2;
+ len = min_t(int, CEXXC_MAX_ICA_RESPONSE_SIZE, len);
+ memcpy(msg->msg, reply->msg, len);
+ break;
+ case CEXXC_RESPONSE_TYPE_XCRB:
+ len = t86r->fmt2.offset2 + t86r->fmt2.count2;
+ len = min_t(int, MSGTYPE06_MAX_MSG_SIZE, len);
+ memcpy(msg->msg, reply->msg, len);
+ break;
+ default:
+ memcpy(msg->msg, &error_reply, sizeof(error_reply));
+ }
+ } else
+ memcpy(msg->msg, reply->msg, sizeof(error_reply));
+out:
+ complete(&(resp_type->work));
+}
+
+/**
+ * This function is called from the AP bus code after a crypto request
+ * "msg" has finished with the reply message "reply".
+ * It is called from tasklet context.
+ * @aq: pointer to the AP queue
+ * @msg: pointer to the AP message
+ * @reply: pointer to the AP reply message
+ */
+static void zcrypt_msgtype6_receive_ep11(struct ap_queue *aq,
+ struct ap_message *msg,
+ struct ap_message *reply)
+{
+ static struct error_hdr error_reply = {
+ .type = TYPE82_RSP_CODE,
+ .reply_code = REP82_ERROR_MACHINE_FAILURE,
+ };
+ struct response_type *resp_type =
+ (struct response_type *)msg->private;
+ struct type86_ep11_reply *t86r;
+ int len;
+
+ /* Copy the reply message to the request message buffer. */
+ if (!reply)
+ goto out; /* ap_msg->rc indicates the error */
+ t86r = reply->msg;
+ if (t86r->hdr.type == TYPE86_RSP_CODE &&
+ t86r->cprbx.cprb_ver_id == 0x04) {
+ switch (resp_type->type) {
+ case CEXXC_RESPONSE_TYPE_EP11:
+ len = t86r->fmt2.offset1 + t86r->fmt2.count1;
+ len = min_t(int, MSGTYPE06_MAX_MSG_SIZE, len);
+ memcpy(msg->msg, reply->msg, len);
+ break;
+ default:
+ memcpy(msg->msg, &error_reply, sizeof(error_reply));
+ }
+ } else {
+ memcpy(msg->msg, reply->msg, sizeof(error_reply));
+ }
+out:
+ complete(&(resp_type->work));
+}
+
+static atomic_t zcrypt_step = ATOMIC_INIT(0);
+
+/**
+ * The request distributor calls this function if it picked the CEXxC
+ * device to handle a modexpo request.
+ * @zq: pointer to zcrypt_queue structure that identifies the
+ * CEXxC device to the request distributor
+ * @mex: pointer to the modexpo request buffer
+ */
+static long zcrypt_msgtype6_modexpo(struct zcrypt_queue *zq,
+ struct ica_rsa_modexpo *mex,
+ struct ap_message *ap_msg)
+{
+ struct response_type resp_type = {
+ .type = CEXXC_RESPONSE_TYPE_ICA,
+ };
+ int rc;
+
+ ap_msg->msg = (void *) get_zeroed_page(GFP_KERNEL);
+ if (!ap_msg->msg)
+ return -ENOMEM;
+ ap_msg->receive = zcrypt_msgtype6_receive;
+ ap_msg->psmid = (((unsigned long long) current->pid) << 32) +
+ atomic_inc_return(&zcrypt_step);
+ ap_msg->private = &resp_type;
+ rc = ICAMEX_msg_to_type6MEX_msgX(zq, ap_msg, mex);
+ if (rc)
+ goto out_free;
+ init_completion(&resp_type.work);
+ rc = ap_queue_message(zq->queue, ap_msg);
+ if (rc)
+ goto out_free;
+ rc = wait_for_completion_interruptible(&resp_type.work);
+ if (rc == 0) {
+ rc = ap_msg->rc;
+ if (rc == 0)
+ rc = convert_response_ica(zq, ap_msg,
+ mex->outputdata,
+ mex->outputdatalength);
+ } else
+ /* Signal pending. */
+ ap_cancel_message(zq->queue, ap_msg);
+out_free:
+ free_page((unsigned long) ap_msg->msg);
+ ap_msg->private = NULL;
+ ap_msg->msg = NULL;
+ return rc;
+}
+
+/**
+ * The request distributor calls this function if it picked the CEXxC
+ * device to handle a modexpo_crt request.
+ * @zq: pointer to zcrypt_queue structure that identifies the
+ * CEXxC device to the request distributor
+ * @crt: pointer to the modexpoc_crt request buffer
+ */
+static long zcrypt_msgtype6_modexpo_crt(struct zcrypt_queue *zq,
+ struct ica_rsa_modexpo_crt *crt,
+ struct ap_message *ap_msg)
+{
+ struct response_type resp_type = {
+ .type = CEXXC_RESPONSE_TYPE_ICA,
+ };
+ int rc;
+
+ ap_msg->msg = (void *) get_zeroed_page(GFP_KERNEL);
+ if (!ap_msg->msg)
+ return -ENOMEM;
+ ap_msg->receive = zcrypt_msgtype6_receive;
+ ap_msg->psmid = (((unsigned long long) current->pid) << 32) +
+ atomic_inc_return(&zcrypt_step);
+ ap_msg->private = &resp_type;
+ rc = ICACRT_msg_to_type6CRT_msgX(zq, ap_msg, crt);
+ if (rc)
+ goto out_free;
+ init_completion(&resp_type.work);
+ rc = ap_queue_message(zq->queue, ap_msg);
+ if (rc)
+ goto out_free;
+ rc = wait_for_completion_interruptible(&resp_type.work);
+ if (rc == 0) {
+ rc = ap_msg->rc;
+ if (rc == 0)
+ rc = convert_response_ica(zq, ap_msg,
+ crt->outputdata,
+ crt->outputdatalength);
+ } else {
+ /* Signal pending. */
+ ap_cancel_message(zq->queue, ap_msg);
+ }
+out_free:
+ free_page((unsigned long) ap_msg->msg);
+ ap_msg->private = NULL;
+ ap_msg->msg = NULL;
+ return rc;
+}
+
+/**
+ * Fetch function code from cprb.
+ * Extracting the fc requires to copy the cprb from userspace.
+ * So this function allocates memory and needs an ap_msg prepared
+ * by the caller with ap_init_message(). Also the caller has to
+ * make sure ap_release_message() is always called even on failure.
+ */
+unsigned int get_cprb_fc(bool userspace, struct ica_xcRB *xcRB,
+ struct ap_message *ap_msg,
+ unsigned int *func_code, unsigned short **dom)
+{
+ struct response_type resp_type = {
+ .type = CEXXC_RESPONSE_TYPE_XCRB,
+ };
+
+ ap_msg->msg = kmalloc(MSGTYPE06_MAX_MSG_SIZE, GFP_KERNEL);
+ if (!ap_msg->msg)
+ return -ENOMEM;
+ ap_msg->receive = zcrypt_msgtype6_receive;
+ ap_msg->psmid = (((unsigned long long) current->pid) << 32) +
+ atomic_inc_return(&zcrypt_step);
+ ap_msg->private = kmemdup(&resp_type, sizeof(resp_type), GFP_KERNEL);
+ if (!ap_msg->private)
+ return -ENOMEM;
+ return XCRB_msg_to_type6CPRB_msgX(userspace, ap_msg, xcRB, func_code, dom);
+}
+
+/**
+ * The request distributor calls this function if it picked the CEXxC
+ * device to handle a send_cprb request.
+ * @zq: pointer to zcrypt_queue structure that identifies the
+ * CEXxC device to the request distributor
+ * @xcRB: pointer to the send_cprb request buffer
+ */
+static long zcrypt_msgtype6_send_cprb(bool userspace, struct zcrypt_queue *zq,
+ struct ica_xcRB *xcRB,
+ struct ap_message *ap_msg)
+{
+ int rc;
+ struct response_type *rtype = (struct response_type *)(ap_msg->private);
+
+ init_completion(&rtype->work);
+ rc = ap_queue_message(zq->queue, ap_msg);
+ if (rc)
+ goto out;
+ rc = wait_for_completion_interruptible(&rtype->work);
+ if (rc == 0) {
+ rc = ap_msg->rc;
+ if (rc == 0)
+ rc = convert_response_xcrb(userspace, zq, ap_msg, xcRB);
+ } else
+ /* Signal pending. */
+ ap_cancel_message(zq->queue, ap_msg);
+out:
+ return rc;
+}
+
+/**
+ * Fetch function code from ep11 cprb.
+ * Extracting the fc requires to copy the ep11 cprb from userspace.
+ * So this function allocates memory and needs an ap_msg prepared
+ * by the caller with ap_init_message(). Also the caller has to
+ * make sure ap_release_message() is always called even on failure.
+ */
+unsigned int get_ep11cprb_fc(bool userspace, struct ep11_urb *xcrb,
+ struct ap_message *ap_msg,
+ unsigned int *func_code)
+{
+ struct response_type resp_type = {
+ .type = CEXXC_RESPONSE_TYPE_EP11,
+ };
+
+ ap_msg->msg = kmalloc(MSGTYPE06_MAX_MSG_SIZE, GFP_KERNEL);
+ if (!ap_msg->msg)
+ return -ENOMEM;
+ ap_msg->receive = zcrypt_msgtype6_receive_ep11;
+ ap_msg->psmid = (((unsigned long long) current->pid) << 32) +
+ atomic_inc_return(&zcrypt_step);
+ ap_msg->private = kmemdup(&resp_type, sizeof(resp_type), GFP_KERNEL);
+ if (!ap_msg->private)
+ return -ENOMEM;
+ return xcrb_msg_to_type6_ep11cprb_msgx(userspace, ap_msg, xcrb, func_code);
+}
+
+/**
+ * The request distributor calls this function if it picked the CEX4P
+ * device to handle a send_ep11_cprb request.
+ * @zq: pointer to zcrypt_queue structure that identifies the
+ * CEX4P device to the request distributor
+ * @xcRB: pointer to the ep11 user request block
+ */
+static long zcrypt_msgtype6_send_ep11_cprb(bool userspace, struct zcrypt_queue *zq,
+ struct ep11_urb *xcrb,
+ struct ap_message *ap_msg)
+{
+ int rc;
+ unsigned int lfmt;
+ struct response_type *rtype = (struct response_type *)(ap_msg->private);
+ struct {
+ struct type6_hdr hdr;
+ struct ep11_cprb cprbx;
+ unsigned char pld_tag; /* fixed value 0x30 */
+ unsigned char pld_lenfmt; /* payload length format */
+ } __packed * msg = ap_msg->msg;
+ struct pld_hdr {
+ unsigned char func_tag; /* fixed value 0x4 */
+ unsigned char func_len; /* fixed value 0x4 */
+ unsigned int func_val; /* function ID */
+ unsigned char dom_tag; /* fixed value 0x4 */
+ unsigned char dom_len; /* fixed value 0x4 */
+ unsigned int dom_val; /* domain id */
+ } __packed * payload_hdr = NULL;
+
+
+ /**
+ * The target domain field within the cprb body/payload block will be
+ * replaced by the usage domain for non-management commands only.
+ * Therefore we check the first bit of the 'flags' parameter for
+ * management command indication.
+ * 0 - non management command
+ * 1 - management command
+ */
+ if (!((msg->cprbx.flags & 0x80) == 0x80)) {
+ msg->cprbx.target_id = (unsigned int)
+ AP_QID_QUEUE(zq->queue->qid);
+
+ if ((msg->pld_lenfmt & 0x80) == 0x80) { /*ext.len.fmt 2 or 3*/
+ switch (msg->pld_lenfmt & 0x03) {
+ case 1:
+ lfmt = 2;
+ break;
+ case 2:
+ lfmt = 3;
+ break;
+ default:
+ return -EINVAL;
+ }
+ } else {
+ lfmt = 1; /* length format #1 */
+ }
+ payload_hdr = (struct pld_hdr *)((&(msg->pld_lenfmt))+lfmt);
+ payload_hdr->dom_val = (unsigned int)
+ AP_QID_QUEUE(zq->queue->qid);
+ }
+
+ init_completion(&rtype->work);
+ rc = ap_queue_message(zq->queue, ap_msg);
+ if (rc)
+ goto out;
+ rc = wait_for_completion_interruptible(&rtype->work);
+ if (rc == 0) {
+ rc = ap_msg->rc;
+ if (rc == 0)
+ rc = convert_response_ep11_xcrb(userspace, zq, ap_msg, xcrb);
+ } else
+ /* Signal pending. */
+ ap_cancel_message(zq->queue, ap_msg);
+out:
+ return rc;
+}
+
+unsigned int get_rng_fc(struct ap_message *ap_msg, int *func_code,
+ unsigned int *domain)
+{
+ struct response_type resp_type = {
+ .type = CEXXC_RESPONSE_TYPE_XCRB,
+ };
+
+ ap_msg->msg = kmalloc(MSGTYPE06_MAX_MSG_SIZE, GFP_KERNEL);
+ if (!ap_msg->msg)
+ return -ENOMEM;
+ ap_msg->receive = zcrypt_msgtype6_receive;
+ ap_msg->psmid = (((unsigned long long) current->pid) << 32) +
+ atomic_inc_return(&zcrypt_step);
+ ap_msg->private = kmemdup(&resp_type, sizeof(resp_type), GFP_KERNEL);
+ if (!ap_msg->private)
+ return -ENOMEM;
+
+ rng_type6CPRB_msgX(ap_msg, ZCRYPT_RNG_BUFFER_SIZE, domain);
+
+ *func_code = HWRNG;
+ return 0;
+}
+
+/**
+ * The request distributor calls this function if it picked the CEXxC
+ * device to generate random data.
+ * @zq: pointer to zcrypt_queue structure that identifies the
+ * CEXxC device to the request distributor
+ * @buffer: pointer to a memory page to return random data
+ */
+static long zcrypt_msgtype6_rng(struct zcrypt_queue *zq,
+ char *buffer, struct ap_message *ap_msg)
+{
+ struct {
+ struct type6_hdr hdr;
+ struct CPRBX cprbx;
+ char function_code[2];
+ short int rule_length;
+ char rule[8];
+ short int verb_length;
+ short int key_length;
+ } __packed * msg = ap_msg->msg;
+ struct response_type *rtype = (struct response_type *)(ap_msg->private);
+ int rc;
+
+ msg->cprbx.domain = AP_QID_QUEUE(zq->queue->qid);
+
+ init_completion(&rtype->work);
+ rc = ap_queue_message(zq->queue, ap_msg);
+ if (rc)
+ goto out;
+ rc = wait_for_completion_interruptible(&rtype->work);
+ if (rc == 0) {
+ rc = ap_msg->rc;
+ if (rc == 0)
+ rc = convert_response_rng(zq, ap_msg, buffer);
+ } else
+ /* Signal pending. */
+ ap_cancel_message(zq->queue, ap_msg);
+out:
+ return rc;
+}
+
+/**
+ * The crypto operations for a CEXxC card.
+ */
+static struct zcrypt_ops zcrypt_msgtype6_norng_ops = {
+ .owner = THIS_MODULE,
+ .name = MSGTYPE06_NAME,
+ .variant = MSGTYPE06_VARIANT_NORNG,
+ .rsa_modexpo = zcrypt_msgtype6_modexpo,
+ .rsa_modexpo_crt = zcrypt_msgtype6_modexpo_crt,
+ .send_cprb = zcrypt_msgtype6_send_cprb,
+};
+
+static struct zcrypt_ops zcrypt_msgtype6_ops = {
+ .owner = THIS_MODULE,
+ .name = MSGTYPE06_NAME,
+ .variant = MSGTYPE06_VARIANT_DEFAULT,
+ .rsa_modexpo = zcrypt_msgtype6_modexpo,
+ .rsa_modexpo_crt = zcrypt_msgtype6_modexpo_crt,
+ .send_cprb = zcrypt_msgtype6_send_cprb,
+ .rng = zcrypt_msgtype6_rng,
+};
+
+static struct zcrypt_ops zcrypt_msgtype6_ep11_ops = {
+ .owner = THIS_MODULE,
+ .name = MSGTYPE06_NAME,
+ .variant = MSGTYPE06_VARIANT_EP11,
+ .rsa_modexpo = NULL,
+ .rsa_modexpo_crt = NULL,
+ .send_ep11_cprb = zcrypt_msgtype6_send_ep11_cprb,
+};
+
+void __init zcrypt_msgtype6_init(void)
+{
+ zcrypt_msgtype_register(&zcrypt_msgtype6_norng_ops);
+ zcrypt_msgtype_register(&zcrypt_msgtype6_ops);
+ zcrypt_msgtype_register(&zcrypt_msgtype6_ep11_ops);
+}
+
+void __exit zcrypt_msgtype6_exit(void)
+{
+ zcrypt_msgtype_unregister(&zcrypt_msgtype6_norng_ops);
+ zcrypt_msgtype_unregister(&zcrypt_msgtype6_ops);
+ zcrypt_msgtype_unregister(&zcrypt_msgtype6_ep11_ops);
+}
diff --git a/drivers/s390/crypto/zcrypt_msgtype6.h b/drivers/s390/crypto/zcrypt_msgtype6.h
new file mode 100644
index 000000000..0a0bf0742
--- /dev/null
+++ b/drivers/s390/crypto/zcrypt_msgtype6.h
@@ -0,0 +1,164 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright IBM Corp. 2001, 2012
+ * Author(s): Robert Burroughs
+ * Eric Rossman (edrossma@us.ibm.com)
+ *
+ * Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com)
+ * Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com>
+ * MSGTYPE restruct: Holger Dengler <hd@linux.vnet.ibm.com>
+ */
+
+#ifndef _ZCRYPT_MSGTYPE6_H_
+#define _ZCRYPT_MSGTYPE6_H_
+
+#include <asm/zcrypt.h>
+
+#define MSGTYPE06_NAME "zcrypt_msgtype6"
+#define MSGTYPE06_VARIANT_DEFAULT 0
+#define MSGTYPE06_VARIANT_NORNG 1
+#define MSGTYPE06_VARIANT_EP11 2
+
+#define MSGTYPE06_MAX_MSG_SIZE (12*1024)
+
+/**
+ * The type 6 message family is associated with CEXxC/CEXxP cards.
+ *
+ * It contains a message header followed by a CPRB, both of which
+ * are described below.
+ *
+ * Note that all reserved fields must be zeroes.
+ */
+struct type6_hdr {
+ unsigned char reserved1; /* 0x00 */
+ unsigned char type; /* 0x06 */
+ unsigned char reserved2[2]; /* 0x0000 */
+ unsigned char right[4]; /* 0x00000000 */
+ unsigned char reserved3[2]; /* 0x0000 */
+ unsigned char reserved4[2]; /* 0x0000 */
+ unsigned char apfs[4]; /* 0x00000000 */
+ unsigned int offset1; /* 0x00000058 (offset to CPRB) */
+ unsigned int offset2; /* 0x00000000 */
+ unsigned int offset3; /* 0x00000000 */
+ unsigned int offset4; /* 0x00000000 */
+ unsigned char agent_id[16]; /* 0x4341000000000000 */
+ /* 0x0000000000000000 */
+ unsigned char rqid[2]; /* rqid. internal to 603 */
+ unsigned char reserved5[2]; /* 0x0000 */
+ unsigned char function_code[2]; /* for PKD, 0x5044 (ascii 'PD') */
+ unsigned char reserved6[2]; /* 0x0000 */
+ unsigned int ToCardLen1; /* (request CPRB len + 3) & -4 */
+ unsigned int ToCardLen2; /* db len 0x00000000 for PKD */
+ unsigned int ToCardLen3; /* 0x00000000 */
+ unsigned int ToCardLen4; /* 0x00000000 */
+ unsigned int FromCardLen1; /* response buffer length */
+ unsigned int FromCardLen2; /* db len 0x00000000 for PKD */
+ unsigned int FromCardLen3; /* 0x00000000 */
+ unsigned int FromCardLen4; /* 0x00000000 */
+} __packed;
+
+/**
+ * The type 86 message family is associated with CEXxC/CEXxP cards.
+ *
+ * It contains a message header followed by a CPRB. The CPRB is
+ * the same as the request CPRB, which is described above.
+ *
+ * If format is 1, an error condition exists and no data beyond
+ * the 8-byte message header is of interest.
+ *
+ * The non-error message is shown below.
+ *
+ * Note that all reserved fields must be zeroes.
+ */
+struct type86_hdr {
+ unsigned char reserved1; /* 0x00 */
+ unsigned char type; /* 0x86 */
+ unsigned char format; /* 0x01 (error) or 0x02 (ok) */
+ unsigned char reserved2; /* 0x00 */
+ unsigned char reply_code; /* reply code (see above) */
+ unsigned char reserved3[3]; /* 0x000000 */
+} __packed;
+
+#define TYPE86_RSP_CODE 0x86
+#define TYPE87_RSP_CODE 0x87
+#define TYPE86_FMT2 0x02
+
+struct type86_fmt2_ext {
+ unsigned char reserved[4]; /* 0x00000000 */
+ unsigned char apfs[4]; /* final status */
+ unsigned int count1; /* length of CPRB + parameters */
+ unsigned int offset1; /* offset to CPRB */
+ unsigned int count2; /* 0x00000000 */
+ unsigned int offset2; /* db offset 0x00000000 for PKD */
+ unsigned int count3; /* 0x00000000 */
+ unsigned int offset3; /* 0x00000000 */
+ unsigned int count4; /* 0x00000000 */
+ unsigned int offset4; /* 0x00000000 */
+} __packed;
+
+unsigned int get_cprb_fc(bool userspace, struct ica_xcRB *, struct ap_message *,
+ unsigned int *, unsigned short **);
+unsigned int get_ep11cprb_fc(bool userspace, struct ep11_urb *, struct ap_message *,
+ unsigned int *);
+unsigned int get_rng_fc(struct ap_message *, int *, unsigned int *);
+
+#define LOW 10
+#define MEDIUM 100
+#define HIGH 500
+
+int speed_idx_cca(int);
+int speed_idx_ep11(int);
+
+/**
+ * Prepare a type6 CPRB message for random number generation
+ *
+ * @ap_dev: AP device pointer
+ * @ap_msg: pointer to AP message
+ */
+static inline void rng_type6CPRB_msgX(struct ap_message *ap_msg,
+ unsigned int random_number_length,
+ unsigned int *domain)
+{
+ struct {
+ struct type6_hdr hdr;
+ struct CPRBX cprbx;
+ char function_code[2];
+ short int rule_length;
+ char rule[8];
+ short int verb_length;
+ short int key_length;
+ } __packed * msg = ap_msg->msg;
+ static struct type6_hdr static_type6_hdrX = {
+ .type = 0x06,
+ .offset1 = 0x00000058,
+ .agent_id = {'C', 'A'},
+ .function_code = {'R', 'L'},
+ .ToCardLen1 = sizeof(*msg) - sizeof(msg->hdr),
+ .FromCardLen1 = sizeof(*msg) - sizeof(msg->hdr),
+ };
+ static struct CPRBX local_cprbx = {
+ .cprb_len = 0x00dc,
+ .cprb_ver_id = 0x02,
+ .func_id = {0x54, 0x32},
+ .req_parml = sizeof(*msg) - sizeof(msg->hdr) -
+ sizeof(msg->cprbx),
+ .rpl_msgbl = sizeof(*msg) - sizeof(msg->hdr),
+ };
+
+ msg->hdr = static_type6_hdrX;
+ msg->hdr.FromCardLen2 = random_number_length,
+ msg->cprbx = local_cprbx;
+ msg->cprbx.rpl_datal = random_number_length,
+ memcpy(msg->function_code, msg->hdr.function_code, 0x02);
+ msg->rule_length = 0x0a;
+ memcpy(msg->rule, "RANDOM ", 8);
+ msg->verb_length = 0x02;
+ msg->key_length = 0x02;
+ ap_msg->len = sizeof(*msg);
+ *domain = (unsigned short)msg->cprbx.domain;
+}
+
+void zcrypt_msgtype6_init(void);
+void zcrypt_msgtype6_exit(void);
+
+#endif /* _ZCRYPT_MSGTYPE6_H_ */
diff --git a/drivers/s390/crypto/zcrypt_queue.c b/drivers/s390/crypto/zcrypt_queue.c
new file mode 100644
index 000000000..c3ffbd26b
--- /dev/null
+++ b/drivers/s390/crypto/zcrypt_queue.c
@@ -0,0 +1,228 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright IBM Corp. 2001, 2012
+ * Author(s): Robert Burroughs
+ * Eric Rossman (edrossma@us.ibm.com)
+ * Cornelia Huck <cornelia.huck@de.ibm.com>
+ *
+ * Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com)
+ * Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com>
+ * Ralph Wuerthner <rwuerthn@de.ibm.com>
+ * MSGTYPE restruct: Holger Dengler <hd@linux.vnet.ibm.com>
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/miscdevice.h>
+#include <linux/fs.h>
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
+#include <linux/compat.h>
+#include <linux/slab.h>
+#include <linux/atomic.h>
+#include <linux/uaccess.h>
+#include <linux/hw_random.h>
+#include <linux/debugfs.h>
+#include <asm/debug.h>
+
+#include "zcrypt_debug.h"
+#include "zcrypt_api.h"
+
+#include "zcrypt_msgtype6.h"
+#include "zcrypt_msgtype50.h"
+
+/*
+ * Device attributes common for all crypto queue devices.
+ */
+
+static ssize_t online_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct ap_queue *aq = to_ap_queue(dev);
+ struct zcrypt_queue *zq = aq->private;
+ int online = aq->config && zq->online ? 1 : 0;
+
+ return scnprintf(buf, PAGE_SIZE, "%d\n", online);
+}
+
+static ssize_t online_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct ap_queue *aq = to_ap_queue(dev);
+ struct zcrypt_queue *zq = aq->private;
+ struct zcrypt_card *zc = zq->zcard;
+ int online;
+
+ if (sscanf(buf, "%d\n", &online) != 1 || online < 0 || online > 1)
+ return -EINVAL;
+
+ if (online && (!aq->config || !aq->card->config))
+ return -ENODEV;
+ if (online && !zc->online)
+ return -EINVAL;
+ zq->online = online;
+
+ ZCRYPT_DBF(DBF_INFO, "queue=%02x.%04x online=%d\n",
+ AP_QID_CARD(zq->queue->qid),
+ AP_QID_QUEUE(zq->queue->qid),
+ online);
+
+ if (!online)
+ ap_flush_queue(zq->queue);
+ return count;
+}
+
+static DEVICE_ATTR_RW(online);
+
+static ssize_t load_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct zcrypt_queue *zq = to_ap_queue(dev)->private;
+
+ return scnprintf(buf, PAGE_SIZE, "%d\n", atomic_read(&zq->load));
+}
+
+static DEVICE_ATTR_RO(load);
+
+static struct attribute *zcrypt_queue_attrs[] = {
+ &dev_attr_online.attr,
+ &dev_attr_load.attr,
+ NULL,
+};
+
+static const struct attribute_group zcrypt_queue_attr_group = {
+ .attrs = zcrypt_queue_attrs,
+};
+
+void zcrypt_queue_force_online(struct zcrypt_queue *zq, int online)
+{
+ zq->online = online;
+ if (!online)
+ ap_flush_queue(zq->queue);
+}
+
+struct zcrypt_queue *zcrypt_queue_alloc(size_t max_response_size)
+{
+ struct zcrypt_queue *zq;
+
+ zq = kzalloc(sizeof(struct zcrypt_queue), GFP_KERNEL);
+ if (!zq)
+ return NULL;
+ zq->reply.msg = kmalloc(max_response_size, GFP_KERNEL);
+ if (!zq->reply.msg)
+ goto out_free;
+ zq->reply.len = max_response_size;
+ INIT_LIST_HEAD(&zq->list);
+ kref_init(&zq->refcount);
+ return zq;
+
+out_free:
+ kfree(zq);
+ return NULL;
+}
+EXPORT_SYMBOL(zcrypt_queue_alloc);
+
+void zcrypt_queue_free(struct zcrypt_queue *zq)
+{
+ kfree(zq->reply.msg);
+ kfree(zq);
+}
+EXPORT_SYMBOL(zcrypt_queue_free);
+
+static void zcrypt_queue_release(struct kref *kref)
+{
+ struct zcrypt_queue *zq =
+ container_of(kref, struct zcrypt_queue, refcount);
+ zcrypt_queue_free(zq);
+}
+
+void zcrypt_queue_get(struct zcrypt_queue *zq)
+{
+ kref_get(&zq->refcount);
+}
+EXPORT_SYMBOL(zcrypt_queue_get);
+
+int zcrypt_queue_put(struct zcrypt_queue *zq)
+{
+ return kref_put(&zq->refcount, zcrypt_queue_release);
+}
+EXPORT_SYMBOL(zcrypt_queue_put);
+
+/**
+ * zcrypt_queue_register() - Register a crypto queue device.
+ * @zq: Pointer to a crypto queue device
+ *
+ * Register a crypto queue device. Returns 0 if successful.
+ */
+int zcrypt_queue_register(struct zcrypt_queue *zq)
+{
+ struct zcrypt_card *zc;
+ int rc;
+
+ spin_lock(&zcrypt_list_lock);
+ zc = zq->queue->card->private;
+ zcrypt_card_get(zc);
+ zq->zcard = zc;
+ zq->online = 1; /* New devices are online by default. */
+
+ ZCRYPT_DBF(DBF_INFO, "queue=%02x.%04x register online=1\n",
+ AP_QID_CARD(zq->queue->qid), AP_QID_QUEUE(zq->queue->qid));
+
+ list_add_tail(&zq->list, &zc->zqueues);
+ zcrypt_device_count++;
+ spin_unlock(&zcrypt_list_lock);
+
+ rc = sysfs_create_group(&zq->queue->ap_dev.device.kobj,
+ &zcrypt_queue_attr_group);
+ if (rc)
+ goto out;
+
+ if (zq->ops->rng) {
+ rc = zcrypt_rng_device_add();
+ if (rc)
+ goto out_unregister;
+ }
+ return 0;
+
+out_unregister:
+ sysfs_remove_group(&zq->queue->ap_dev.device.kobj,
+ &zcrypt_queue_attr_group);
+out:
+ spin_lock(&zcrypt_list_lock);
+ list_del_init(&zq->list);
+ spin_unlock(&zcrypt_list_lock);
+ zcrypt_card_put(zc);
+ return rc;
+}
+EXPORT_SYMBOL(zcrypt_queue_register);
+
+/**
+ * zcrypt_queue_unregister(): Unregister a crypto queue device.
+ * @zq: Pointer to crypto queue device
+ *
+ * Unregister a crypto queue device.
+ */
+void zcrypt_queue_unregister(struct zcrypt_queue *zq)
+{
+ struct zcrypt_card *zc;
+
+ ZCRYPT_DBF(DBF_INFO, "queue=%02x.%04x unregister\n",
+ AP_QID_CARD(zq->queue->qid), AP_QID_QUEUE(zq->queue->qid));
+
+ zc = zq->zcard;
+ spin_lock(&zcrypt_list_lock);
+ list_del_init(&zq->list);
+ zcrypt_device_count--;
+ spin_unlock(&zcrypt_list_lock);
+ if (zq->ops->rng)
+ zcrypt_rng_device_remove();
+ sysfs_remove_group(&zq->queue->ap_dev.device.kobj,
+ &zcrypt_queue_attr_group);
+ zcrypt_card_put(zc);
+ zcrypt_queue_put(zq);
+}
+EXPORT_SYMBOL(zcrypt_queue_unregister);