diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-05-06 01:02:30 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-05-06 01:02:30 +0000 |
commit | 76cb841cb886eef6b3bee341a2266c76578724ad (patch) | |
tree | f5892e5ba6cc11949952a6ce4ecbe6d516d6ce58 /drivers/s390/crypto | |
parent | Initial commit. (diff) | |
download | linux-76cb841cb886eef6b3bee341a2266c76578724ad.tar.xz linux-76cb841cb886eef6b3bee341a2266c76578724ad.zip |
Adding upstream version 4.19.249.upstream/4.19.249upstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'drivers/s390/crypto')
24 files changed, 9604 insertions, 0 deletions
diff --git a/drivers/s390/crypto/Makefile b/drivers/s390/crypto/Makefile new file mode 100644 index 000000000..b59af548e --- /dev/null +++ b/drivers/s390/crypto/Makefile @@ -0,0 +1,17 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# S/390 crypto devices +# + +ap-objs := ap_bus.o ap_card.o ap_queue.o +obj-$(subst m,y,$(CONFIG_ZCRYPT)) += ap.o +# zcrypt_api.o and zcrypt_msgtype*.o depend on ap.o +zcrypt-objs := zcrypt_api.o zcrypt_card.o zcrypt_queue.o +zcrypt-objs += zcrypt_msgtype6.o zcrypt_msgtype50.o +obj-$(CONFIG_ZCRYPT) += zcrypt.o +# adapter drivers depend on ap.o and zcrypt.o +obj-$(CONFIG_ZCRYPT) += zcrypt_pcixcc.o zcrypt_cex2a.o zcrypt_cex4.o + +# pkey kernel module +pkey-objs := pkey_api.o +obj-$(CONFIG_PKEY) += pkey.o diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c new file mode 100644 index 000000000..027a53eec --- /dev/null +++ b/drivers/s390/crypto/ap_bus.c @@ -0,0 +1,1620 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Copyright IBM Corp. 2006, 2012 + * Author(s): Cornelia Huck <cornelia.huck@de.ibm.com> + * Martin Schwidefsky <schwidefsky@de.ibm.com> + * Ralph Wuerthner <rwuerthn@de.ibm.com> + * Felix Beck <felix.beck@de.ibm.com> + * Holger Dengler <hd@linux.vnet.ibm.com> + * + * Adjunct processor bus. + */ + +#define KMSG_COMPONENT "ap" +#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt + +#include <linux/kernel_stat.h> +#include <linux/moduleparam.h> +#include <linux/init.h> +#include <linux/delay.h> +#include <linux/err.h> +#include <linux/interrupt.h> +#include <linux/workqueue.h> +#include <linux/slab.h> +#include <linux/notifier.h> +#include <linux/kthread.h> +#include <linux/mutex.h> +#include <linux/suspend.h> +#include <asm/airq.h> +#include <linux/atomic.h> +#include <asm/isc.h> +#include <linux/hrtimer.h> +#include <linux/ktime.h> +#include <asm/facility.h> +#include <linux/crypto.h> +#include <linux/mod_devicetable.h> +#include <linux/debugfs.h> +#include <linux/ctype.h> + +#include "ap_bus.h" +#include "ap_debug.h" + +/* + * Module parameters; note though this file itself isn't modular. + */ +int ap_domain_index = -1; /* Adjunct Processor Domain Index */ +static DEFINE_SPINLOCK(ap_domain_lock); +module_param_named(domain, ap_domain_index, int, 0440); +MODULE_PARM_DESC(domain, "domain index for ap devices"); +EXPORT_SYMBOL(ap_domain_index); + +static int ap_thread_flag; +module_param_named(poll_thread, ap_thread_flag, int, 0440); +MODULE_PARM_DESC(poll_thread, "Turn on/off poll thread, default is 0 (off)."); + +static char *apm_str; +module_param_named(apmask, apm_str, charp, 0440); +MODULE_PARM_DESC(apmask, "AP bus adapter mask."); + +static char *aqm_str; +module_param_named(aqmask, aqm_str, charp, 0440); +MODULE_PARM_DESC(aqmask, "AP bus domain mask."); + +static struct device *ap_root_device; + +DEFINE_SPINLOCK(ap_list_lock); +LIST_HEAD(ap_card_list); + +/* Default permissions (card and domain masking) */ +static struct ap_perms { + DECLARE_BITMAP(apm, AP_DEVICES); + DECLARE_BITMAP(aqm, AP_DOMAINS); +} ap_perms; +static DEFINE_MUTEX(ap_perms_mutex); + +static struct ap_config_info *ap_configuration; +static bool initialised; + +/* + * AP bus related debug feature things. + */ +debug_info_t *ap_dbf_info; + +/* + * Workqueue timer for bus rescan. + */ +static struct timer_list ap_config_timer; +static int ap_config_time = AP_CONFIG_TIME; +static void ap_scan_bus(struct work_struct *); +static DECLARE_WORK(ap_scan_work, ap_scan_bus); + +/* + * Tasklet & timer for AP request polling and interrupts + */ +static void ap_tasklet_fn(unsigned long); +static DECLARE_TASKLET(ap_tasklet, ap_tasklet_fn, 0); +static DECLARE_WAIT_QUEUE_HEAD(ap_poll_wait); +static struct task_struct *ap_poll_kthread; +static DEFINE_MUTEX(ap_poll_thread_mutex); +static DEFINE_SPINLOCK(ap_poll_timer_lock); +static struct hrtimer ap_poll_timer; +/* + * In LPAR poll with 4kHz frequency. Poll every 250000 nanoseconds. + * If z/VM change to 1500000 nanoseconds to adjust to z/VM polling. + */ +static unsigned long long poll_timeout = 250000; + +/* Suspend flag */ +static int ap_suspend_flag; +/* Maximum domain id */ +static int ap_max_domain_id; +/* + * Flag to check if domain was set through module parameter domain=. This is + * important when supsend and resume is done in a z/VM environment where the + * domain might change. + */ +static int user_set_domain; +static struct bus_type ap_bus_type; + +/* Adapter interrupt definitions */ +static void ap_interrupt_handler(struct airq_struct *airq); + +static int ap_airq_flag; + +static struct airq_struct ap_airq = { + .handler = ap_interrupt_handler, + .isc = AP_ISC, +}; + +/** + * ap_using_interrupts() - Returns non-zero if interrupt support is + * available. + */ +static inline int ap_using_interrupts(void) +{ + return ap_airq_flag; +} + +/** + * ap_airq_ptr() - Get the address of the adapter interrupt indicator + * + * Returns the address of the local-summary-indicator of the adapter + * interrupt handler for AP, or NULL if adapter interrupts are not + * available. + */ +void *ap_airq_ptr(void) +{ + if (ap_using_interrupts()) + return ap_airq.lsi_ptr; + return NULL; +} + +/** + * ap_interrupts_available(): Test if AP interrupts are available. + * + * Returns 1 if AP interrupts are available. + */ +static int ap_interrupts_available(void) +{ + return test_facility(65); +} + +/** + * ap_configuration_available(): Test if AP configuration + * information is available. + * + * Returns 1 if AP configuration information is available. + */ +static int ap_configuration_available(void) +{ + return test_facility(12); +} + +/** + * ap_apft_available(): Test if AP facilities test (APFT) + * facility is available. + * + * Returns 1 if APFT is is available. + */ +static int ap_apft_available(void) +{ + return test_facility(15); +} + +/* + * ap_qact_available(): Test if the PQAP(QACT) subfunction is available. + * + * Returns 1 if the QACT subfunction is available. + */ +static inline int ap_qact_available(void) +{ + if (ap_configuration) + return ap_configuration->qact; + return 0; +} + +/* + * ap_query_configuration(): Fetch cryptographic config info + * + * Returns the ap configuration info fetched via PQAP(QCI). + * On success 0 is returned, on failure a negative errno + * is returned, e.g. if the PQAP(QCI) instruction is not + * available, the return value will be -EOPNOTSUPP. + */ +static inline int ap_query_configuration(struct ap_config_info *info) +{ + if (!ap_configuration_available()) + return -EOPNOTSUPP; + if (!info) + return -EINVAL; + return ap_qci(info); +} +EXPORT_SYMBOL(ap_query_configuration); + +/** + * ap_init_configuration(): Allocate and query configuration array. + */ +static void ap_init_configuration(void) +{ + if (!ap_configuration_available()) + return; + + ap_configuration = kzalloc(sizeof(*ap_configuration), GFP_KERNEL); + if (!ap_configuration) + return; + if (ap_query_configuration(ap_configuration) != 0) { + kfree(ap_configuration); + ap_configuration = NULL; + return; + } +} + +/* + * ap_test_config(): helper function to extract the nrth bit + * within the unsigned int array field. + */ +static inline int ap_test_config(unsigned int *field, unsigned int nr) +{ + return ap_test_bit((field + (nr >> 5)), (nr & 0x1f)); +} + +/* + * ap_test_config_card_id(): Test, whether an AP card ID is configured. + * @id AP card ID + * + * Returns 0 if the card is not configured + * 1 if the card is configured or + * if the configuration information is not available + */ +static inline int ap_test_config_card_id(unsigned int id) +{ + if (!ap_configuration) /* QCI not supported */ + /* only ids 0...3F may be probed */ + return id < 0x40 ? 1 : 0; + return ap_test_config(ap_configuration->apm, id); +} + +/* + * ap_test_config_domain(): Test, whether an AP usage domain is configured. + * @domain AP usage domain ID + * + * Returns 0 if the usage domain is not configured + * 1 if the usage domain is configured or + * if the configuration information is not available + */ +static inline int ap_test_config_domain(unsigned int domain) +{ + if (!ap_configuration) /* QCI not supported */ + return domain < 16; + return ap_test_config(ap_configuration->aqm, domain); +} + +/** + * ap_query_queue(): Check if an AP queue is available. + * @qid: The AP queue number + * @queue_depth: Pointer to queue depth value + * @device_type: Pointer to device type value + * @facilities: Pointer to facility indicator + */ +static int ap_query_queue(ap_qid_t qid, int *queue_depth, int *device_type, + unsigned int *facilities) +{ + struct ap_queue_status status; + unsigned long info; + int nd; + + if (!ap_test_config_card_id(AP_QID_CARD(qid))) + return -ENODEV; + + status = ap_test_queue(qid, ap_apft_available(), &info); + switch (status.response_code) { + case AP_RESPONSE_NORMAL: + *queue_depth = (int)(info & 0xff); + *device_type = (int)((info >> 24) & 0xff); + *facilities = (unsigned int)(info >> 32); + /* Update maximum domain id */ + nd = (info >> 16) & 0xff; + /* if N bit is available, z13 and newer */ + if ((info & (1UL << 57)) && nd > 0) + ap_max_domain_id = nd; + else /* older machine types */ + ap_max_domain_id = 15; + switch (*device_type) { + /* For CEX2 and CEX3 the available functions + * are not refrected by the facilities bits. + * Instead it is coded into the type. So here + * modify the function bits based on the type. + */ + case AP_DEVICE_TYPE_CEX2A: + case AP_DEVICE_TYPE_CEX3A: + *facilities |= 0x08000000; + break; + case AP_DEVICE_TYPE_CEX2C: + case AP_DEVICE_TYPE_CEX3C: + *facilities |= 0x10000000; + break; + default: + break; + } + return 0; + case AP_RESPONSE_Q_NOT_AVAIL: + case AP_RESPONSE_DECONFIGURED: + case AP_RESPONSE_CHECKSTOPPED: + case AP_RESPONSE_INVALID_ADDRESS: + return -ENODEV; + case AP_RESPONSE_RESET_IN_PROGRESS: + case AP_RESPONSE_OTHERWISE_CHANGED: + case AP_RESPONSE_BUSY: + return -EBUSY; + default: + BUG(); + } +} + +void ap_wait(enum ap_wait wait) +{ + ktime_t hr_time; + + switch (wait) { + case AP_WAIT_AGAIN: + case AP_WAIT_INTERRUPT: + if (ap_using_interrupts()) + break; + if (ap_poll_kthread) { + wake_up(&ap_poll_wait); + break; + } + /* Fall through */ + case AP_WAIT_TIMEOUT: + spin_lock_bh(&ap_poll_timer_lock); + if (!hrtimer_is_queued(&ap_poll_timer)) { + hr_time = poll_timeout; + hrtimer_forward_now(&ap_poll_timer, hr_time); + hrtimer_restart(&ap_poll_timer); + } + spin_unlock_bh(&ap_poll_timer_lock); + break; + case AP_WAIT_NONE: + default: + break; + } +} + +/** + * ap_request_timeout(): Handling of request timeouts + * @t: timer making this callback + * + * Handles request timeouts. + */ +void ap_request_timeout(struct timer_list *t) +{ + struct ap_queue *aq = from_timer(aq, t, timeout); + + if (ap_suspend_flag) + return; + spin_lock_bh(&aq->lock); + ap_wait(ap_sm_event(aq, AP_EVENT_TIMEOUT)); + spin_unlock_bh(&aq->lock); +} + +/** + * ap_poll_timeout(): AP receive polling for finished AP requests. + * @unused: Unused pointer. + * + * Schedules the AP tasklet using a high resolution timer. + */ +static enum hrtimer_restart ap_poll_timeout(struct hrtimer *unused) +{ + if (!ap_suspend_flag) + tasklet_schedule(&ap_tasklet); + return HRTIMER_NORESTART; +} + +/** + * ap_interrupt_handler() - Schedule ap_tasklet on interrupt + * @airq: pointer to adapter interrupt descriptor + */ +static void ap_interrupt_handler(struct airq_struct *airq) +{ + inc_irq_stat(IRQIO_APB); + if (!ap_suspend_flag) + tasklet_schedule(&ap_tasklet); +} + +/** + * ap_tasklet_fn(): Tasklet to poll all AP devices. + * @dummy: Unused variable + * + * Poll all AP devices on the bus. + */ +static void ap_tasklet_fn(unsigned long dummy) +{ + struct ap_card *ac; + struct ap_queue *aq; + enum ap_wait wait = AP_WAIT_NONE; + + /* Reset the indicator if interrupts are used. Thus new interrupts can + * be received. Doing it in the beginning of the tasklet is therefor + * important that no requests on any AP get lost. + */ + if (ap_using_interrupts()) + xchg(ap_airq.lsi_ptr, 0); + + spin_lock_bh(&ap_list_lock); + for_each_ap_card(ac) { + for_each_ap_queue(aq, ac) { + spin_lock_bh(&aq->lock); + wait = min(wait, ap_sm_event_loop(aq, AP_EVENT_POLL)); + spin_unlock_bh(&aq->lock); + } + } + spin_unlock_bh(&ap_list_lock); + + ap_wait(wait); +} + +static int ap_pending_requests(void) +{ + struct ap_card *ac; + struct ap_queue *aq; + + spin_lock_bh(&ap_list_lock); + for_each_ap_card(ac) { + for_each_ap_queue(aq, ac) { + if (aq->queue_count == 0) + continue; + spin_unlock_bh(&ap_list_lock); + return 1; + } + } + spin_unlock_bh(&ap_list_lock); + return 0; +} + +/** + * ap_poll_thread(): Thread that polls for finished requests. + * @data: Unused pointer + * + * AP bus poll thread. The purpose of this thread is to poll for + * finished requests in a loop if there is a "free" cpu - that is + * a cpu that doesn't have anything better to do. The polling stops + * as soon as there is another task or if all messages have been + * delivered. + */ +static int ap_poll_thread(void *data) +{ + DECLARE_WAITQUEUE(wait, current); + + set_user_nice(current, MAX_NICE); + set_freezable(); + while (!kthread_should_stop()) { + add_wait_queue(&ap_poll_wait, &wait); + set_current_state(TASK_INTERRUPTIBLE); + if (ap_suspend_flag || !ap_pending_requests()) { + schedule(); + try_to_freeze(); + } + set_current_state(TASK_RUNNING); + remove_wait_queue(&ap_poll_wait, &wait); + if (need_resched()) { + schedule(); + try_to_freeze(); + continue; + } + ap_tasklet_fn(0); + } + + return 0; +} + +static int ap_poll_thread_start(void) +{ + int rc; + + if (ap_using_interrupts() || ap_poll_kthread) + return 0; + mutex_lock(&ap_poll_thread_mutex); + ap_poll_kthread = kthread_run(ap_poll_thread, NULL, "appoll"); + rc = PTR_ERR_OR_ZERO(ap_poll_kthread); + if (rc) + ap_poll_kthread = NULL; + mutex_unlock(&ap_poll_thread_mutex); + return rc; +} + +static void ap_poll_thread_stop(void) +{ + if (!ap_poll_kthread) + return; + mutex_lock(&ap_poll_thread_mutex); + kthread_stop(ap_poll_kthread); + ap_poll_kthread = NULL; + mutex_unlock(&ap_poll_thread_mutex); +} + +#define is_card_dev(x) ((x)->parent == ap_root_device) +#define is_queue_dev(x) ((x)->parent != ap_root_device) + +/** + * ap_bus_match() + * @dev: Pointer to device + * @drv: Pointer to device_driver + * + * AP bus driver registration/unregistration. + */ +static int ap_bus_match(struct device *dev, struct device_driver *drv) +{ + struct ap_driver *ap_drv = to_ap_drv(drv); + struct ap_device_id *id; + + /* + * Compare device type of the device with the list of + * supported types of the device_driver. + */ + for (id = ap_drv->ids; id->match_flags; id++) { + if (is_card_dev(dev) && + id->match_flags & AP_DEVICE_ID_MATCH_CARD_TYPE && + id->dev_type == to_ap_dev(dev)->device_type) + return 1; + if (is_queue_dev(dev) && + id->match_flags & AP_DEVICE_ID_MATCH_QUEUE_TYPE && + id->dev_type == to_ap_dev(dev)->device_type) + return 1; + } + return 0; +} + +/** + * ap_uevent(): Uevent function for AP devices. + * @dev: Pointer to device + * @env: Pointer to kobj_uevent_env + * + * It sets up a single environment variable DEV_TYPE which contains the + * hardware device type. + */ +static int ap_uevent(struct device *dev, struct kobj_uevent_env *env) +{ + struct ap_device *ap_dev = to_ap_dev(dev); + int retval = 0; + + if (!ap_dev) + return -ENODEV; + + /* Set up DEV_TYPE environment variable. */ + retval = add_uevent_var(env, "DEV_TYPE=%04X", ap_dev->device_type); + if (retval) + return retval; + + /* Add MODALIAS= */ + retval = add_uevent_var(env, "MODALIAS=ap:t%02X", ap_dev->device_type); + + return retval; +} + +static int ap_dev_suspend(struct device *dev) +{ + struct ap_device *ap_dev = to_ap_dev(dev); + + if (ap_dev->drv && ap_dev->drv->suspend) + ap_dev->drv->suspend(ap_dev); + return 0; +} + +static int ap_dev_resume(struct device *dev) +{ + struct ap_device *ap_dev = to_ap_dev(dev); + + if (ap_dev->drv && ap_dev->drv->resume) + ap_dev->drv->resume(ap_dev); + return 0; +} + +static void ap_bus_suspend(void) +{ + AP_DBF(DBF_DEBUG, "%s running\n", __func__); + + ap_suspend_flag = 1; + /* + * Disable scanning for devices, thus we do not want to scan + * for them after removing. + */ + flush_work(&ap_scan_work); + tasklet_disable(&ap_tasklet); +} + +static int __ap_card_devices_unregister(struct device *dev, void *dummy) +{ + if (is_card_dev(dev)) + device_unregister(dev); + return 0; +} + +static int __ap_queue_devices_unregister(struct device *dev, void *dummy) +{ + if (is_queue_dev(dev)) + device_unregister(dev); + return 0; +} + +static int __ap_queue_devices_with_id_unregister(struct device *dev, void *data) +{ + if (is_queue_dev(dev) && + AP_QID_CARD(to_ap_queue(dev)->qid) == (int)(long) data) + device_unregister(dev); + return 0; +} + +static void ap_bus_resume(void) +{ + int rc; + + AP_DBF(DBF_DEBUG, "%s running\n", __func__); + + /* remove all queue devices */ + bus_for_each_dev(&ap_bus_type, NULL, NULL, + __ap_queue_devices_unregister); + /* remove all card devices */ + bus_for_each_dev(&ap_bus_type, NULL, NULL, + __ap_card_devices_unregister); + + /* Reset thin interrupt setting */ + if (ap_interrupts_available() && !ap_using_interrupts()) { + rc = register_adapter_interrupt(&ap_airq); + ap_airq_flag = (rc == 0); + } + if (!ap_interrupts_available() && ap_using_interrupts()) { + unregister_adapter_interrupt(&ap_airq); + ap_airq_flag = 0; + } + /* Reset domain */ + if (!user_set_domain) + ap_domain_index = -1; + /* Get things going again */ + ap_suspend_flag = 0; + if (ap_airq_flag) + xchg(ap_airq.lsi_ptr, 0); + tasklet_enable(&ap_tasklet); + queue_work(system_long_wq, &ap_scan_work); +} + +static int ap_power_event(struct notifier_block *this, unsigned long event, + void *ptr) +{ + switch (event) { + case PM_HIBERNATION_PREPARE: + case PM_SUSPEND_PREPARE: + ap_bus_suspend(); + break; + case PM_POST_HIBERNATION: + case PM_POST_SUSPEND: + ap_bus_resume(); + break; + default: + break; + } + return NOTIFY_DONE; +} +static struct notifier_block ap_power_notifier = { + .notifier_call = ap_power_event, +}; + +static SIMPLE_DEV_PM_OPS(ap_bus_pm_ops, ap_dev_suspend, ap_dev_resume); + +static struct bus_type ap_bus_type = { + .name = "ap", + .match = &ap_bus_match, + .uevent = &ap_uevent, + .pm = &ap_bus_pm_ops, +}; + +static int __ap_revise_reserved(struct device *dev, void *dummy) +{ + int rc, card, queue, devres, drvres; + + if (is_queue_dev(dev)) { + card = AP_QID_CARD(to_ap_queue(dev)->qid); + queue = AP_QID_QUEUE(to_ap_queue(dev)->qid); + mutex_lock(&ap_perms_mutex); + devres = test_bit_inv(card, ap_perms.apm) + && test_bit_inv(queue, ap_perms.aqm); + mutex_unlock(&ap_perms_mutex); + drvres = to_ap_drv(dev->driver)->flags + & AP_DRIVER_FLAG_DEFAULT; + if (!!devres != !!drvres) { + AP_DBF(DBF_DEBUG, "reprobing queue=%02x.%04x\n", + card, queue); + rc = device_reprobe(dev); + } + } + + return 0; +} + +static void ap_bus_revise_bindings(void) +{ + bus_for_each_dev(&ap_bus_type, NULL, NULL, __ap_revise_reserved); +} + +int ap_owned_by_def_drv(int card, int queue) +{ + int rc = 0; + + if (card < 0 || card >= AP_DEVICES || queue < 0 || queue >= AP_DOMAINS) + return -EINVAL; + + mutex_lock(&ap_perms_mutex); + + if (test_bit_inv(card, ap_perms.apm) + && test_bit_inv(queue, ap_perms.aqm)) + rc = 1; + + mutex_unlock(&ap_perms_mutex); + + return rc; +} +EXPORT_SYMBOL(ap_owned_by_def_drv); + +int ap_apqn_in_matrix_owned_by_def_drv(unsigned long *apm, + unsigned long *aqm) +{ + int card, queue, rc = 0; + + mutex_lock(&ap_perms_mutex); + + for (card = 0; !rc && card < AP_DEVICES; card++) + if (test_bit_inv(card, apm) && + test_bit_inv(card, ap_perms.apm)) + for (queue = 0; !rc && queue < AP_DOMAINS; queue++) + if (test_bit_inv(queue, aqm) && + test_bit_inv(queue, ap_perms.aqm)) + rc = 1; + + mutex_unlock(&ap_perms_mutex); + + return rc; +} +EXPORT_SYMBOL(ap_apqn_in_matrix_owned_by_def_drv); + +static int ap_device_probe(struct device *dev) +{ + struct ap_device *ap_dev = to_ap_dev(dev); + struct ap_driver *ap_drv = to_ap_drv(dev->driver); + int card, queue, devres, drvres, rc; + + if (is_queue_dev(dev)) { + /* + * If the apqn is marked as reserved/used by ap bus and + * default drivers, only probe with drivers with the default + * flag set. If it is not marked, only probe with drivers + * with the default flag not set. + */ + card = AP_QID_CARD(to_ap_queue(dev)->qid); + queue = AP_QID_QUEUE(to_ap_queue(dev)->qid); + mutex_lock(&ap_perms_mutex); + devres = test_bit_inv(card, ap_perms.apm) + && test_bit_inv(queue, ap_perms.aqm); + mutex_unlock(&ap_perms_mutex); + drvres = ap_drv->flags & AP_DRIVER_FLAG_DEFAULT; + if (!!devres != !!drvres) + return -ENODEV; + /* (re-)init queue's state machine */ + ap_queue_reinit_state(to_ap_queue(dev)); + } + + /* Add queue/card to list of active queues/cards */ + spin_lock_bh(&ap_list_lock); + if (is_card_dev(dev)) + list_add(&to_ap_card(dev)->list, &ap_card_list); + else + list_add(&to_ap_queue(dev)->list, + &to_ap_queue(dev)->card->queues); + spin_unlock_bh(&ap_list_lock); + + ap_dev->drv = ap_drv; + rc = ap_drv->probe ? ap_drv->probe(ap_dev) : -ENODEV; + + if (rc) { + spin_lock_bh(&ap_list_lock); + if (is_card_dev(dev)) + list_del_init(&to_ap_card(dev)->list); + else + list_del_init(&to_ap_queue(dev)->list); + spin_unlock_bh(&ap_list_lock); + ap_dev->drv = NULL; + } + + return rc; +} + +static int ap_device_remove(struct device *dev) +{ + struct ap_device *ap_dev = to_ap_dev(dev); + struct ap_driver *ap_drv = ap_dev->drv; + + if (is_queue_dev(dev)) + ap_queue_remove(to_ap_queue(dev)); + if (ap_drv->remove) + ap_drv->remove(ap_dev); + + /* Remove queue/card from list of active queues/cards */ + spin_lock_bh(&ap_list_lock); + if (is_card_dev(dev)) + list_del_init(&to_ap_card(dev)->list); + else + list_del_init(&to_ap_queue(dev)->list); + spin_unlock_bh(&ap_list_lock); + + return 0; +} + +int ap_driver_register(struct ap_driver *ap_drv, struct module *owner, + char *name) +{ + struct device_driver *drv = &ap_drv->driver; + + if (!initialised) + return -ENODEV; + + drv->bus = &ap_bus_type; + drv->probe = ap_device_probe; + drv->remove = ap_device_remove; + drv->owner = owner; + drv->name = name; + return driver_register(drv); +} +EXPORT_SYMBOL(ap_driver_register); + +void ap_driver_unregister(struct ap_driver *ap_drv) +{ + driver_unregister(&ap_drv->driver); +} +EXPORT_SYMBOL(ap_driver_unregister); + +void ap_bus_force_rescan(void) +{ + if (ap_suspend_flag) + return; + /* processing a asynchronous bus rescan */ + del_timer(&ap_config_timer); + queue_work(system_long_wq, &ap_scan_work); + flush_work(&ap_scan_work); +} +EXPORT_SYMBOL(ap_bus_force_rescan); + +/* + * hex2bitmap() - parse hex mask string and set bitmap. + * Valid strings are "0x012345678" with at least one valid hex number. + * Rest of the bitmap to the right is padded with 0. No spaces allowed + * within the string, the leading 0x may be omitted. + * Returns the bitmask with exactly the bits set as given by the hex + * string (both in big endian order). + */ +static int hex2bitmap(const char *str, unsigned long *bitmap, int bits) +{ + int i, n, b; + + /* bits needs to be a multiple of 8 */ + if (bits & 0x07) + return -EINVAL; + + if (str[0] == '0' && str[1] == 'x') + str++; + if (*str == 'x') + str++; + + for (i = 0; isxdigit(*str) && i < bits; str++) { + b = hex_to_bin(*str); + for (n = 0; n < 4; n++) + if (b & (0x08 >> n)) + set_bit_inv(i + n, bitmap); + i += 4; + } + + if (*str == '\n') + str++; + if (*str) + return -EINVAL; + return 0; +} + +/* + * modify_bitmap() - parse bitmask argument and modify an existing + * bit mask accordingly. A concatenation (done with ',') of these + * terms is recognized: + * +<bitnr>[-<bitnr>] or -<bitnr>[-<bitnr>] + * <bitnr> may be any valid number (hex, decimal or octal) in the range + * 0...bits-1; the leading + or - is required. Here are some examples: + * +0-15,+32,-128,-0xFF + * -0-255,+1-16,+0x128 + * +1,+2,+3,+4,-5,-7-10 + * Returns the new bitmap after all changes have been applied. Every + * positive value in the string will set a bit and every negative value + * in the string will clear a bit. As a bit may be touched more than once, + * the last 'operation' wins: + * +0-255,-128 = first bits 0-255 will be set, then bit 128 will be + * cleared again. All other bits are unmodified. + */ +static int modify_bitmap(const char *str, unsigned long *bitmap, int bits) +{ + int a, i, z; + char *np, sign; + + /* bits needs to be a multiple of 8 */ + if (bits & 0x07) + return -EINVAL; + + while (*str) { + sign = *str++; + if (sign != '+' && sign != '-') + return -EINVAL; + a = z = simple_strtoul(str, &np, 0); + if (str == np || a >= bits) + return -EINVAL; + str = np; + if (*str == '-') { + z = simple_strtoul(++str, &np, 0); + if (str == np || a > z || z >= bits) + return -EINVAL; + str = np; + } + for (i = a; i <= z; i++) + if (sign == '+') + set_bit_inv(i, bitmap); + else + clear_bit_inv(i, bitmap); + while (*str == ',' || *str == '\n') + str++; + } + + return 0; +} + +/* + * process_mask_arg() - parse a bitmap string and clear/set the + * bits in the bitmap accordingly. The string may be given as + * absolute value, a hex string like 0x1F2E3D4C5B6A" simple over- + * writing the current content of the bitmap. Or as relative string + * like "+1-16,-32,-0x40,+128" where only single bits or ranges of + * bits are cleared or set. Distinction is done based on the very + * first character which may be '+' or '-' for the relative string + * and othewise assume to be an absolute value string. If parsing fails + * a negative errno value is returned. All arguments and bitmaps are + * big endian order. + */ +static int process_mask_arg(const char *str, + unsigned long *bitmap, int bits, + struct mutex *lock) +{ + unsigned long *newmap, size; + int rc; + + /* bits needs to be a multiple of 8 */ + if (bits & 0x07) + return -EINVAL; + + size = BITS_TO_LONGS(bits)*sizeof(unsigned long); + newmap = kmalloc(size, GFP_KERNEL); + if (!newmap) + return -ENOMEM; + if (mutex_lock_interruptible(lock)) { + kfree(newmap); + return -ERESTARTSYS; + } + + if (*str == '+' || *str == '-') { + memcpy(newmap, bitmap, size); + rc = modify_bitmap(str, newmap, bits); + } else { + memset(newmap, 0, size); + rc = hex2bitmap(str, newmap, bits); + } + if (rc == 0) + memcpy(bitmap, newmap, size); + mutex_unlock(lock); + kfree(newmap); + return rc; +} + +/* + * AP bus attributes. + */ + +static ssize_t ap_domain_show(struct bus_type *bus, char *buf) +{ + return snprintf(buf, PAGE_SIZE, "%d\n", ap_domain_index); +} + +static ssize_t ap_domain_store(struct bus_type *bus, + const char *buf, size_t count) +{ + int domain; + + if (sscanf(buf, "%i\n", &domain) != 1 || + domain < 0 || domain > ap_max_domain_id || + !test_bit_inv(domain, ap_perms.aqm)) + return -EINVAL; + spin_lock_bh(&ap_domain_lock); + ap_domain_index = domain; + spin_unlock_bh(&ap_domain_lock); + + AP_DBF(DBF_DEBUG, "stored new default domain=%d\n", domain); + + return count; +} + +static BUS_ATTR_RW(ap_domain); + +static ssize_t ap_control_domain_mask_show(struct bus_type *bus, char *buf) +{ + if (!ap_configuration) /* QCI not supported */ + return snprintf(buf, PAGE_SIZE, "not supported\n"); + + return snprintf(buf, PAGE_SIZE, + "0x%08x%08x%08x%08x%08x%08x%08x%08x\n", + ap_configuration->adm[0], ap_configuration->adm[1], + ap_configuration->adm[2], ap_configuration->adm[3], + ap_configuration->adm[4], ap_configuration->adm[5], + ap_configuration->adm[6], ap_configuration->adm[7]); +} + +static BUS_ATTR_RO(ap_control_domain_mask); + +static ssize_t ap_usage_domain_mask_show(struct bus_type *bus, char *buf) +{ + if (!ap_configuration) /* QCI not supported */ + return snprintf(buf, PAGE_SIZE, "not supported\n"); + + return snprintf(buf, PAGE_SIZE, + "0x%08x%08x%08x%08x%08x%08x%08x%08x\n", + ap_configuration->aqm[0], ap_configuration->aqm[1], + ap_configuration->aqm[2], ap_configuration->aqm[3], + ap_configuration->aqm[4], ap_configuration->aqm[5], + ap_configuration->aqm[6], ap_configuration->aqm[7]); +} + +static BUS_ATTR_RO(ap_usage_domain_mask); + +static ssize_t ap_interrupts_show(struct bus_type *bus, char *buf) +{ + return snprintf(buf, PAGE_SIZE, "%d\n", + ap_using_interrupts() ? 1 : 0); +} + +static BUS_ATTR_RO(ap_interrupts); + +static ssize_t config_time_show(struct bus_type *bus, char *buf) +{ + return snprintf(buf, PAGE_SIZE, "%d\n", ap_config_time); +} + +static ssize_t config_time_store(struct bus_type *bus, + const char *buf, size_t count) +{ + int time; + + if (sscanf(buf, "%d\n", &time) != 1 || time < 5 || time > 120) + return -EINVAL; + ap_config_time = time; + mod_timer(&ap_config_timer, jiffies + ap_config_time * HZ); + return count; +} + +static BUS_ATTR_RW(config_time); + +static ssize_t poll_thread_show(struct bus_type *bus, char *buf) +{ + return snprintf(buf, PAGE_SIZE, "%d\n", ap_poll_kthread ? 1 : 0); +} + +static ssize_t poll_thread_store(struct bus_type *bus, + const char *buf, size_t count) +{ + int flag, rc; + + if (sscanf(buf, "%d\n", &flag) != 1) + return -EINVAL; + if (flag) { + rc = ap_poll_thread_start(); + if (rc) + count = rc; + } else + ap_poll_thread_stop(); + return count; +} + +static BUS_ATTR_RW(poll_thread); + +static ssize_t poll_timeout_show(struct bus_type *bus, char *buf) +{ + return snprintf(buf, PAGE_SIZE, "%llu\n", poll_timeout); +} + +static ssize_t poll_timeout_store(struct bus_type *bus, const char *buf, + size_t count) +{ + unsigned long long time; + ktime_t hr_time; + + /* 120 seconds = maximum poll interval */ + if (sscanf(buf, "%llu\n", &time) != 1 || time < 1 || + time > 120000000000ULL) + return -EINVAL; + poll_timeout = time; + hr_time = poll_timeout; + + spin_lock_bh(&ap_poll_timer_lock); + hrtimer_cancel(&ap_poll_timer); + hrtimer_set_expires(&ap_poll_timer, hr_time); + hrtimer_start_expires(&ap_poll_timer, HRTIMER_MODE_ABS); + spin_unlock_bh(&ap_poll_timer_lock); + + return count; +} + +static BUS_ATTR_RW(poll_timeout); + +static ssize_t ap_max_domain_id_show(struct bus_type *bus, char *buf) +{ + int max_domain_id; + + if (ap_configuration) + max_domain_id = ap_max_domain_id ? : -1; + else + max_domain_id = 15; + return snprintf(buf, PAGE_SIZE, "%d\n", max_domain_id); +} + +static BUS_ATTR_RO(ap_max_domain_id); + +static ssize_t apmask_show(struct bus_type *bus, char *buf) +{ + int rc; + + if (mutex_lock_interruptible(&ap_perms_mutex)) + return -ERESTARTSYS; + rc = snprintf(buf, PAGE_SIZE, + "0x%016lx%016lx%016lx%016lx\n", + ap_perms.apm[0], ap_perms.apm[1], + ap_perms.apm[2], ap_perms.apm[3]); + mutex_unlock(&ap_perms_mutex); + + return rc; +} + +static ssize_t apmask_store(struct bus_type *bus, const char *buf, + size_t count) +{ + int rc; + + rc = process_mask_arg(buf, ap_perms.apm, AP_DEVICES, &ap_perms_mutex); + if (rc) + return rc; + + ap_bus_revise_bindings(); + + return count; +} + +static BUS_ATTR_RW(apmask); + +static ssize_t aqmask_show(struct bus_type *bus, char *buf) +{ + int rc; + + if (mutex_lock_interruptible(&ap_perms_mutex)) + return -ERESTARTSYS; + rc = snprintf(buf, PAGE_SIZE, + "0x%016lx%016lx%016lx%016lx\n", + ap_perms.aqm[0], ap_perms.aqm[1], + ap_perms.aqm[2], ap_perms.aqm[3]); + mutex_unlock(&ap_perms_mutex); + + return rc; +} + +static ssize_t aqmask_store(struct bus_type *bus, const char *buf, + size_t count) +{ + int rc; + + rc = process_mask_arg(buf, ap_perms.aqm, AP_DOMAINS, &ap_perms_mutex); + if (rc) + return rc; + + ap_bus_revise_bindings(); + + return count; +} + +static BUS_ATTR_RW(aqmask); + +static struct bus_attribute *const ap_bus_attrs[] = { + &bus_attr_ap_domain, + &bus_attr_ap_control_domain_mask, + &bus_attr_ap_usage_domain_mask, + &bus_attr_config_time, + &bus_attr_poll_thread, + &bus_attr_ap_interrupts, + &bus_attr_poll_timeout, + &bus_attr_ap_max_domain_id, + &bus_attr_apmask, + &bus_attr_aqmask, + NULL, +}; + +/** + * ap_select_domain(): Select an AP domain if possible and we haven't + * already done so before. + */ +static void ap_select_domain(void) +{ + int count, max_count, best_domain; + struct ap_queue_status status; + int i, j; + + /* + * We want to use a single domain. Either the one specified with + * the "domain=" parameter or the domain with the maximum number + * of devices. + */ + spin_lock_bh(&ap_domain_lock); + if (ap_domain_index >= 0) { + /* Domain has already been selected. */ + spin_unlock_bh(&ap_domain_lock); + return; + } + best_domain = -1; + max_count = 0; + for (i = 0; i < AP_DOMAINS; i++) { + if (!ap_test_config_domain(i) || + !test_bit_inv(i, ap_perms.aqm)) + continue; + count = 0; + for (j = 0; j < AP_DEVICES; j++) { + if (!ap_test_config_card_id(j)) + continue; + status = ap_test_queue(AP_MKQID(j, i), + ap_apft_available(), + NULL); + if (status.response_code != AP_RESPONSE_NORMAL) + continue; + count++; + } + if (count > max_count) { + max_count = count; + best_domain = i; + } + } + if (best_domain >= 0) { + ap_domain_index = best_domain; + AP_DBF(DBF_DEBUG, "new ap_domain_index=%d\n", ap_domain_index); + } + spin_unlock_bh(&ap_domain_lock); +} + +/* + * This function checks the type and returns either 0 for not + * supported or the highest compatible type value (which may + * include the input type value). + */ +static int ap_get_compatible_type(ap_qid_t qid, int rawtype, unsigned int func) +{ + int comp_type = 0; + + /* < CEX2A is not supported */ + if (rawtype < AP_DEVICE_TYPE_CEX2A) + return 0; + /* up to CEX6 known and fully supported */ + if (rawtype <= AP_DEVICE_TYPE_CEX6) + return rawtype; + /* + * unknown new type > CEX6, check for compatibility + * to the highest known and supported type which is + * currently CEX6 with the help of the QACT function. + */ + if (ap_qact_available()) { + struct ap_queue_status status; + union ap_qact_ap_info apinfo = {0}; + + apinfo.mode = (func >> 26) & 0x07; + apinfo.cat = AP_DEVICE_TYPE_CEX6; + status = ap_qact(qid, 0, &apinfo); + if (status.response_code == AP_RESPONSE_NORMAL + && apinfo.cat >= AP_DEVICE_TYPE_CEX2A + && apinfo.cat <= AP_DEVICE_TYPE_CEX6) + comp_type = apinfo.cat; + } + if (!comp_type) + AP_DBF(DBF_WARN, "queue=%02x.%04x unable to map type %d\n", + AP_QID_CARD(qid), AP_QID_QUEUE(qid), rawtype); + else if (comp_type != rawtype) + AP_DBF(DBF_INFO, "queue=%02x.%04x map type %d to %d\n", + AP_QID_CARD(qid), AP_QID_QUEUE(qid), rawtype, comp_type); + return comp_type; +} + +/* + * helper function to be used with bus_find_dev + * matches for the card device with the given id + */ +static int __match_card_device_with_id(struct device *dev, void *data) +{ + return is_card_dev(dev) && to_ap_card(dev)->id == (int)(long) data; +} + +/* helper function to be used with bus_find_dev + * matches for the queue device with a given qid + */ +static int __match_queue_device_with_qid(struct device *dev, void *data) +{ + return is_queue_dev(dev) && to_ap_queue(dev)->qid == (int)(long) data; +} + +/** + * ap_scan_bus(): Scan the AP bus for new devices + * Runs periodically, workqueue timer (ap_config_time) + */ +static void ap_scan_bus(struct work_struct *unused) +{ + struct ap_queue *aq; + struct ap_card *ac; + struct device *dev; + ap_qid_t qid; + int comp_type, depth = 0, type = 0; + unsigned int func = 0; + int rc, id, dom, borked, domains, defdomdevs = 0; + + AP_DBF(DBF_DEBUG, "%s running\n", __func__); + + ap_query_configuration(ap_configuration); + ap_select_domain(); + + for (id = 0; id < AP_DEVICES; id++) { + /* check if device is registered */ + dev = bus_find_device(&ap_bus_type, NULL, + (void *)(long) id, + __match_card_device_with_id); + ac = dev ? to_ap_card(dev) : NULL; + if (!ap_test_config_card_id(id)) { + if (dev) { + /* Card device has been removed from + * configuration, remove the belonging + * queue devices. + */ + bus_for_each_dev(&ap_bus_type, NULL, + (void *)(long) id, + __ap_queue_devices_with_id_unregister); + /* now remove the card device */ + device_unregister(dev); + put_device(dev); + } + continue; + } + /* According to the configuration there should be a card + * device, so check if there is at least one valid queue + * and maybe create queue devices and the card device. + */ + domains = 0; + for (dom = 0; dom < AP_DOMAINS; dom++) { + qid = AP_MKQID(id, dom); + dev = bus_find_device(&ap_bus_type, NULL, + (void *)(long) qid, + __match_queue_device_with_qid); + aq = dev ? to_ap_queue(dev) : NULL; + if (!ap_test_config_domain(dom)) { + if (dev) { + /* Queue device exists but has been + * removed from configuration. + */ + device_unregister(dev); + put_device(dev); + } + continue; + } + rc = ap_query_queue(qid, &depth, &type, &func); + if (dev) { + spin_lock_bh(&aq->lock); + if (rc == -ENODEV || + /* adapter reconfiguration */ + (ac && ac->functions != func)) + aq->state = AP_STATE_BORKED; + borked = aq->state == AP_STATE_BORKED; + spin_unlock_bh(&aq->lock); + if (borked) /* Remove broken device */ + device_unregister(dev); + put_device(dev); + if (!borked) { + domains++; + if (dom == ap_domain_index) + defdomdevs++; + continue; + } + } + if (rc) + continue; + /* a new queue device is needed, check out comp type */ + comp_type = ap_get_compatible_type(qid, type, func); + if (!comp_type) + continue; + /* maybe a card device needs to be created first */ + if (!ac) { + ac = ap_card_create(id, depth, type, + comp_type, func); + if (!ac) + continue; + ac->ap_dev.device.bus = &ap_bus_type; + ac->ap_dev.device.parent = ap_root_device; + dev_set_name(&ac->ap_dev.device, + "card%02x", id); + /* Register card with AP bus */ + rc = device_register(&ac->ap_dev.device); + if (rc) { + put_device(&ac->ap_dev.device); + ac = NULL; + break; + } + /* get it and thus adjust reference counter */ + get_device(&ac->ap_dev.device); + } + /* now create the new queue device */ + aq = ap_queue_create(qid, comp_type); + if (!aq) + continue; + aq->card = ac; + aq->ap_dev.device.bus = &ap_bus_type; + aq->ap_dev.device.parent = &ac->ap_dev.device; + dev_set_name(&aq->ap_dev.device, + "%02x.%04x", id, dom); + /* Register device */ + rc = device_register(&aq->ap_dev.device); + if (rc) { + put_device(&aq->ap_dev.device); + continue; + } + domains++; + if (dom == ap_domain_index) + defdomdevs++; + } /* end domain loop */ + if (ac) { + /* remove card dev if there are no queue devices */ + if (!domains) + device_unregister(&ac->ap_dev.device); + put_device(&ac->ap_dev.device); + } + } /* end device loop */ + + if (ap_domain_index >= 0 && defdomdevs < 1) + AP_DBF(DBF_INFO, + "no queue device with default domain %d available\n", + ap_domain_index); + + mod_timer(&ap_config_timer, jiffies + ap_config_time * HZ); +} + +static void ap_config_timeout(struct timer_list *unused) +{ + if (ap_suspend_flag) + return; + queue_work(system_long_wq, &ap_scan_work); +} + +static int __init ap_debug_init(void) +{ + ap_dbf_info = debug_register("ap", 1, 1, + DBF_MAX_SPRINTF_ARGS * sizeof(long)); + debug_register_view(ap_dbf_info, &debug_sprintf_view); + debug_set_level(ap_dbf_info, DBF_ERR); + + return 0; +} + +static void __init ap_perms_init(void) +{ + /* all resources useable if no kernel parameter string given */ + memset(&ap_perms.apm, 0xFF, sizeof(ap_perms.apm)); + memset(&ap_perms.aqm, 0xFF, sizeof(ap_perms.aqm)); + + /* apm kernel parameter string */ + if (apm_str) { + memset(&ap_perms.apm, 0, sizeof(ap_perms.apm)); + process_mask_arg(apm_str, ap_perms.apm, AP_DEVICES, + &ap_perms_mutex); + } + + /* aqm kernel parameter string */ + if (aqm_str) { + memset(&ap_perms.aqm, 0, sizeof(ap_perms.aqm)); + process_mask_arg(aqm_str, ap_perms.aqm, AP_DOMAINS, + &ap_perms_mutex); + } +} + +/** + * ap_module_init(): The module initialization code. + * + * Initializes the module. + */ +static int __init ap_module_init(void) +{ + int max_domain_id; + int rc, i; + + rc = ap_debug_init(); + if (rc) + return rc; + + if (!ap_instructions_available()) { + pr_warn("The hardware system does not support AP instructions\n"); + return -ENODEV; + } + + /* set up the AP permissions (ap and aq masks) */ + ap_perms_init(); + + /* Get AP configuration data if available */ + ap_init_configuration(); + + if (ap_configuration) + max_domain_id = + ap_max_domain_id ? ap_max_domain_id : AP_DOMAINS - 1; + else + max_domain_id = 15; + if (ap_domain_index < -1 || ap_domain_index > max_domain_id || + (ap_domain_index >= 0 && + !test_bit_inv(ap_domain_index, ap_perms.aqm))) { + pr_warn("%d is not a valid cryptographic domain\n", + ap_domain_index); + ap_domain_index = -1; + } + /* In resume callback we need to know if the user had set the domain. + * If so, we can not just reset it. + */ + if (ap_domain_index >= 0) + user_set_domain = 1; + + if (ap_interrupts_available()) { + rc = register_adapter_interrupt(&ap_airq); + ap_airq_flag = (rc == 0); + } + + /* Create /sys/bus/ap. */ + rc = bus_register(&ap_bus_type); + if (rc) + goto out; + for (i = 0; ap_bus_attrs[i]; i++) { + rc = bus_create_file(&ap_bus_type, ap_bus_attrs[i]); + if (rc) + goto out_bus; + } + + /* Create /sys/devices/ap. */ + ap_root_device = root_device_register("ap"); + rc = PTR_ERR_OR_ZERO(ap_root_device); + if (rc) + goto out_bus; + + /* Setup the AP bus rescan timer. */ + timer_setup(&ap_config_timer, ap_config_timeout, 0); + + /* + * Setup the high resultion poll timer. + * If we are running under z/VM adjust polling to z/VM polling rate. + */ + if (MACHINE_IS_VM) + poll_timeout = 1500000; + spin_lock_init(&ap_poll_timer_lock); + hrtimer_init(&ap_poll_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); + ap_poll_timer.function = ap_poll_timeout; + + /* Start the low priority AP bus poll thread. */ + if (ap_thread_flag) { + rc = ap_poll_thread_start(); + if (rc) + goto out_work; + } + + rc = register_pm_notifier(&ap_power_notifier); + if (rc) + goto out_pm; + + queue_work(system_long_wq, &ap_scan_work); + initialised = true; + + return 0; + +out_pm: + ap_poll_thread_stop(); +out_work: + hrtimer_cancel(&ap_poll_timer); + root_device_unregister(ap_root_device); +out_bus: + while (i--) + bus_remove_file(&ap_bus_type, ap_bus_attrs[i]); + bus_unregister(&ap_bus_type); +out: + if (ap_using_interrupts()) + unregister_adapter_interrupt(&ap_airq); + kfree(ap_configuration); + return rc; +} +device_initcall(ap_module_init); diff --git a/drivers/s390/crypto/ap_bus.h b/drivers/s390/crypto/ap_bus.h new file mode 100644 index 000000000..1c799ddd9 --- /dev/null +++ b/drivers/s390/crypto/ap_bus.h @@ -0,0 +1,284 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * Copyright IBM Corp. 2006, 2012 + * Author(s): Cornelia Huck <cornelia.huck@de.ibm.com> + * Martin Schwidefsky <schwidefsky@de.ibm.com> + * Ralph Wuerthner <rwuerthn@de.ibm.com> + * Felix Beck <felix.beck@de.ibm.com> + * Holger Dengler <hd@linux.vnet.ibm.com> + * + * Adjunct processor bus header file. + */ + +#ifndef _AP_BUS_H_ +#define _AP_BUS_H_ + +#include <linux/device.h> +#include <linux/types.h> +#include <asm/isc.h> +#include <asm/ap.h> + +#define AP_DEVICES 256 /* Number of AP devices. */ +#define AP_DOMAINS 256 /* Number of AP domains. */ +#define AP_RESET_TIMEOUT (HZ*0.7) /* Time in ticks for reset timeouts. */ +#define AP_CONFIG_TIME 30 /* Time in seconds between AP bus rescans. */ +#define AP_POLL_TIME 1 /* Time in ticks between receive polls. */ + +extern int ap_domain_index; + +extern spinlock_t ap_list_lock; +extern struct list_head ap_card_list; + +static inline int ap_test_bit(unsigned int *ptr, unsigned int nr) +{ + return (*ptr & (0x80000000u >> nr)) != 0; +} + +#define AP_RESPONSE_NORMAL 0x00 +#define AP_RESPONSE_Q_NOT_AVAIL 0x01 +#define AP_RESPONSE_RESET_IN_PROGRESS 0x02 +#define AP_RESPONSE_DECONFIGURED 0x03 +#define AP_RESPONSE_CHECKSTOPPED 0x04 +#define AP_RESPONSE_BUSY 0x05 +#define AP_RESPONSE_INVALID_ADDRESS 0x06 +#define AP_RESPONSE_OTHERWISE_CHANGED 0x07 +#define AP_RESPONSE_Q_FULL 0x10 +#define AP_RESPONSE_NO_PENDING_REPLY 0x10 +#define AP_RESPONSE_INDEX_TOO_BIG 0x11 +#define AP_RESPONSE_NO_FIRST_PART 0x13 +#define AP_RESPONSE_MESSAGE_TOO_BIG 0x15 +#define AP_RESPONSE_REQ_FAC_NOT_INST 0x16 + +/* + * Known device types + */ +#define AP_DEVICE_TYPE_PCICC 3 +#define AP_DEVICE_TYPE_PCICA 4 +#define AP_DEVICE_TYPE_PCIXCC 5 +#define AP_DEVICE_TYPE_CEX2A 6 +#define AP_DEVICE_TYPE_CEX2C 7 +#define AP_DEVICE_TYPE_CEX3A 8 +#define AP_DEVICE_TYPE_CEX3C 9 +#define AP_DEVICE_TYPE_CEX4 10 +#define AP_DEVICE_TYPE_CEX5 11 +#define AP_DEVICE_TYPE_CEX6 12 + +/* + * Known function facilities + */ +#define AP_FUNC_MEX4K 1 +#define AP_FUNC_CRT4K 2 +#define AP_FUNC_COPRO 3 +#define AP_FUNC_ACCEL 4 +#define AP_FUNC_EP11 5 +#define AP_FUNC_APXA 6 + +/* + * AP interrupt states + */ +#define AP_INTR_DISABLED 0 /* AP interrupt disabled */ +#define AP_INTR_ENABLED 1 /* AP interrupt enabled */ + +/* + * AP device states + */ +enum ap_state { + AP_STATE_RESET_START, + AP_STATE_RESET_WAIT, + AP_STATE_SETIRQ_WAIT, + AP_STATE_IDLE, + AP_STATE_WORKING, + AP_STATE_QUEUE_FULL, + AP_STATE_SUSPEND_WAIT, + AP_STATE_BORKED, + NR_AP_STATES +}; + +/* + * AP device events + */ +enum ap_event { + AP_EVENT_POLL, + AP_EVENT_TIMEOUT, + NR_AP_EVENTS +}; + +/* + * AP wait behaviour + */ +enum ap_wait { + AP_WAIT_AGAIN, /* retry immediately */ + AP_WAIT_TIMEOUT, /* wait for timeout */ + AP_WAIT_INTERRUPT, /* wait for thin interrupt (if available) */ + AP_WAIT_NONE, /* no wait */ + NR_AP_WAIT +}; + +struct ap_device; +struct ap_message; + +/* + * The ap driver struct includes a flags field which holds some info for + * the ap bus about the driver. Currently only one flag is supported and + * used: The DEFAULT flag marks an ap driver as a default driver which is + * used together with the apmask and aqmask whitelisting of the ap bus. + */ +#define AP_DRIVER_FLAG_DEFAULT 0x0001 + +struct ap_driver { + struct device_driver driver; + struct ap_device_id *ids; + unsigned int flags; + + int (*probe)(struct ap_device *); + void (*remove)(struct ap_device *); + void (*suspend)(struct ap_device *); + void (*resume)(struct ap_device *); +}; + +#define to_ap_drv(x) container_of((x), struct ap_driver, driver) + +int ap_driver_register(struct ap_driver *, struct module *, char *); +void ap_driver_unregister(struct ap_driver *); + +struct ap_device { + struct device device; + struct ap_driver *drv; /* Pointer to AP device driver. */ + int device_type; /* AP device type. */ +}; + +#define to_ap_dev(x) container_of((x), struct ap_device, device) + +struct ap_card { + struct ap_device ap_dev; + struct list_head list; /* Private list of AP cards. */ + struct list_head queues; /* List of assoc. AP queues */ + void *private; /* ap driver private pointer. */ + int raw_hwtype; /* AP raw hardware type. */ + unsigned int functions; /* AP device function bitfield. */ + int queue_depth; /* AP queue depth.*/ + int id; /* AP card number. */ + atomic64_t total_request_count; /* # requests ever for this AP device.*/ +}; + +#define to_ap_card(x) container_of((x), struct ap_card, ap_dev.device) + +struct ap_queue { + struct ap_device ap_dev; + struct list_head list; /* Private list of AP queues. */ + struct ap_card *card; /* Ptr to assoc. AP card. */ + spinlock_t lock; /* Per device lock. */ + void *private; /* ap driver private pointer. */ + ap_qid_t qid; /* AP queue id. */ + int interrupt; /* indicate if interrupts are enabled */ + int queue_count; /* # messages currently on AP queue. */ + enum ap_state state; /* State of the AP device. */ + int pendingq_count; /* # requests on pendingq list. */ + int requestq_count; /* # requests on requestq list. */ + u64 total_request_count; /* # requests ever for this AP device.*/ + int request_timeout; /* Request timeout in jiffies. */ + struct timer_list timeout; /* Timer for request timeouts. */ + struct list_head pendingq; /* List of message sent to AP queue. */ + struct list_head requestq; /* List of message yet to be sent. */ + struct ap_message *reply; /* Per device reply message. */ +}; + +#define to_ap_queue(x) container_of((x), struct ap_queue, ap_dev.device) + +typedef enum ap_wait (ap_func_t)(struct ap_queue *queue); + +struct ap_message { + struct list_head list; /* Request queueing. */ + unsigned long long psmid; /* Message id. */ + void *message; /* Pointer to message buffer. */ + size_t length; /* Message length. */ + int rc; /* Return code for this message */ + + void *private; /* ap driver private pointer. */ + unsigned int special:1; /* Used for special commands. */ + /* receive is called from tasklet context */ + void (*receive)(struct ap_queue *, struct ap_message *, + struct ap_message *); +}; + +/** + * ap_init_message() - Initialize ap_message. + * Initialize a message before using. Otherwise this might result in + * unexpected behaviour. + */ +static inline void ap_init_message(struct ap_message *ap_msg) +{ + memset(ap_msg, 0, sizeof(*ap_msg)); +} + +/** + * ap_release_message() - Release ap_message. + * Releases all memory used internal within the ap_message struct + * Currently this is the message and private field. + */ +static inline void ap_release_message(struct ap_message *ap_msg) +{ + kzfree(ap_msg->message); + kzfree(ap_msg->private); +} + +#define for_each_ap_card(_ac) \ + list_for_each_entry(_ac, &ap_card_list, list) + +#define for_each_ap_queue(_aq, _ac) \ + list_for_each_entry(_aq, &(_ac)->queues, list) + +/* + * Note: don't use ap_send/ap_recv after using ap_queue_message + * for the first time. Otherwise the ap message queue will get + * confused. + */ +int ap_send(ap_qid_t, unsigned long long, void *, size_t); +int ap_recv(ap_qid_t, unsigned long long *, void *, size_t); + +enum ap_wait ap_sm_event(struct ap_queue *aq, enum ap_event event); +enum ap_wait ap_sm_event_loop(struct ap_queue *aq, enum ap_event event); + +void ap_queue_message(struct ap_queue *aq, struct ap_message *ap_msg); +void ap_cancel_message(struct ap_queue *aq, struct ap_message *ap_msg); +void ap_flush_queue(struct ap_queue *aq); + +void *ap_airq_ptr(void); +void ap_wait(enum ap_wait wait); +void ap_request_timeout(struct timer_list *t); +void ap_bus_force_rescan(void); + +void ap_queue_init_reply(struct ap_queue *aq, struct ap_message *ap_msg); +struct ap_queue *ap_queue_create(ap_qid_t qid, int device_type); +void ap_queue_remove(struct ap_queue *aq); +void ap_queue_suspend(struct ap_device *ap_dev); +void ap_queue_resume(struct ap_device *ap_dev); +void ap_queue_reinit_state(struct ap_queue *aq); + +struct ap_card *ap_card_create(int id, int queue_depth, int raw_device_type, + int comp_device_type, unsigned int functions); + +/* + * check APQN for owned/reserved by ap bus and default driver(s). + * Checks if this APQN is or will be in use by the ap bus + * and the default set of drivers. + * If yes, returns 1, if not returns 0. On error a negative + * errno value is returned. + */ +int ap_owned_by_def_drv(int card, int queue); + +/* + * check 'matrix' of APQNs for owned/reserved by ap bus and + * default driver(s). + * Checks if there is at least one APQN in the given 'matrix' + * marked as owned/reserved by the ap bus and default driver(s). + * If such an APQN is found the return value is 1, otherwise + * 0 is returned. On error a negative errno value is returned. + * The parameter apm is a bitmask which should be declared + * as DECLARE_BITMAP(apm, AP_DEVICES), the aqm parameter is + * similar, should be declared as DECLARE_BITMAP(aqm, AP_DOMAINS). + */ +int ap_apqn_in_matrix_owned_by_def_drv(unsigned long *apm, + unsigned long *aqm); + +#endif /* _AP_BUS_H_ */ diff --git a/drivers/s390/crypto/ap_card.c b/drivers/s390/crypto/ap_card.c new file mode 100644 index 000000000..e85bfca1e --- /dev/null +++ b/drivers/s390/crypto/ap_card.c @@ -0,0 +1,191 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright IBM Corp. 2016 + * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com> + * + * Adjunct processor bus, card related code. + */ + +#define KMSG_COMPONENT "ap" +#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt + +#include <linux/init.h> +#include <linux/slab.h> +#include <asm/facility.h> + +#include "ap_bus.h" + +/* + * AP card related attributes. + */ +static ssize_t hwtype_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct ap_card *ac = to_ap_card(dev); + + return snprintf(buf, PAGE_SIZE, "%d\n", ac->ap_dev.device_type); +} + +static DEVICE_ATTR_RO(hwtype); + +static ssize_t raw_hwtype_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct ap_card *ac = to_ap_card(dev); + + return snprintf(buf, PAGE_SIZE, "%d\n", ac->raw_hwtype); +} + +static DEVICE_ATTR_RO(raw_hwtype); + +static ssize_t depth_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct ap_card *ac = to_ap_card(dev); + + return snprintf(buf, PAGE_SIZE, "%d\n", ac->queue_depth); +} + +static DEVICE_ATTR_RO(depth); + +static ssize_t ap_functions_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct ap_card *ac = to_ap_card(dev); + + return snprintf(buf, PAGE_SIZE, "0x%08X\n", ac->functions); +} + +static DEVICE_ATTR_RO(ap_functions); + +static ssize_t request_count_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct ap_card *ac = to_ap_card(dev); + u64 req_cnt; + + req_cnt = 0; + spin_lock_bh(&ap_list_lock); + req_cnt = atomic64_read(&ac->total_request_count); + spin_unlock_bh(&ap_list_lock); + return snprintf(buf, PAGE_SIZE, "%llu\n", req_cnt); +} + +static ssize_t request_count_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct ap_card *ac = to_ap_card(dev); + struct ap_queue *aq; + + spin_lock_bh(&ap_list_lock); + for_each_ap_queue(aq, ac) + aq->total_request_count = 0; + spin_unlock_bh(&ap_list_lock); + atomic64_set(&ac->total_request_count, 0); + + return count; +} + +static DEVICE_ATTR_RW(request_count); + +static ssize_t requestq_count_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct ap_card *ac = to_ap_card(dev); + struct ap_queue *aq; + unsigned int reqq_cnt; + + reqq_cnt = 0; + spin_lock_bh(&ap_list_lock); + for_each_ap_queue(aq, ac) + reqq_cnt += aq->requestq_count; + spin_unlock_bh(&ap_list_lock); + return snprintf(buf, PAGE_SIZE, "%d\n", reqq_cnt); +} + +static DEVICE_ATTR_RO(requestq_count); + +static ssize_t pendingq_count_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct ap_card *ac = to_ap_card(dev); + struct ap_queue *aq; + unsigned int penq_cnt; + + penq_cnt = 0; + spin_lock_bh(&ap_list_lock); + for_each_ap_queue(aq, ac) + penq_cnt += aq->pendingq_count; + spin_unlock_bh(&ap_list_lock); + return snprintf(buf, PAGE_SIZE, "%d\n", penq_cnt); +} + +static DEVICE_ATTR_RO(pendingq_count); + +static ssize_t modalias_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + return sprintf(buf, "ap:t%02X\n", to_ap_dev(dev)->device_type); +} + +static DEVICE_ATTR_RO(modalias); + +static struct attribute *ap_card_dev_attrs[] = { + &dev_attr_hwtype.attr, + &dev_attr_raw_hwtype.attr, + &dev_attr_depth.attr, + &dev_attr_ap_functions.attr, + &dev_attr_request_count.attr, + &dev_attr_requestq_count.attr, + &dev_attr_pendingq_count.attr, + &dev_attr_modalias.attr, + NULL +}; + +static struct attribute_group ap_card_dev_attr_group = { + .attrs = ap_card_dev_attrs +}; + +static const struct attribute_group *ap_card_dev_attr_groups[] = { + &ap_card_dev_attr_group, + NULL +}; + +static struct device_type ap_card_type = { + .name = "ap_card", + .groups = ap_card_dev_attr_groups, +}; + +static void ap_card_device_release(struct device *dev) +{ + struct ap_card *ac = to_ap_card(dev); + + if (!list_empty(&ac->list)) { + spin_lock_bh(&ap_list_lock); + list_del_init(&ac->list); + spin_unlock_bh(&ap_list_lock); + } + kfree(ac); +} + +struct ap_card *ap_card_create(int id, int queue_depth, int raw_type, + int comp_type, unsigned int functions) +{ + struct ap_card *ac; + + ac = kzalloc(sizeof(*ac), GFP_KERNEL); + if (!ac) + return NULL; + INIT_LIST_HEAD(&ac->list); + INIT_LIST_HEAD(&ac->queues); + ac->ap_dev.device.release = ap_card_device_release; + ac->ap_dev.device.type = &ap_card_type; + ac->ap_dev.device_type = comp_type; + ac->raw_hwtype = raw_type; + ac->queue_depth = queue_depth; + ac->functions = functions; + ac->id = id; + return ac; +} diff --git a/drivers/s390/crypto/ap_debug.h b/drivers/s390/crypto/ap_debug.h new file mode 100644 index 000000000..dc675eb5a --- /dev/null +++ b/drivers/s390/crypto/ap_debug.h @@ -0,0 +1,26 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright IBM Corp. 2016 + * Author(s): Harald Freudenberger <freude@de.ibm.com> + */ +#ifndef AP_DEBUG_H +#define AP_DEBUG_H + +#include <asm/debug.h> + +#define DBF_ERR 3 /* error conditions */ +#define DBF_WARN 4 /* warning conditions */ +#define DBF_INFO 5 /* informational */ +#define DBF_DEBUG 6 /* for debugging only */ + +#define RC2ERR(rc) ((rc) ? DBF_ERR : DBF_INFO) +#define RC2WARN(rc) ((rc) ? DBF_WARN : DBF_INFO) + +#define DBF_MAX_SPRINTF_ARGS 5 + +#define AP_DBF(...) \ + debug_sprintf_event(ap_dbf_info, ##__VA_ARGS__) + +extern debug_info_t *ap_dbf_info; + +#endif /* AP_DEBUG_H */ diff --git a/drivers/s390/crypto/ap_queue.c b/drivers/s390/crypto/ap_queue.c new file mode 100644 index 000000000..e1647da12 --- /dev/null +++ b/drivers/s390/crypto/ap_queue.c @@ -0,0 +1,758 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright IBM Corp. 2016 + * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com> + * + * Adjunct processor bus, queue related code. + */ + +#define KMSG_COMPONENT "ap" +#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt + +#include <linux/init.h> +#include <linux/slab.h> +#include <asm/facility.h> + +#include "ap_bus.h" +#include "ap_debug.h" + +static void __ap_flush_queue(struct ap_queue *aq); + +/** + * ap_queue_enable_interruption(): Enable interruption on an AP queue. + * @qid: The AP queue number + * @ind: the notification indicator byte + * + * Enables interruption on AP queue via ap_aqic(). Based on the return + * value it waits a while and tests the AP queue if interrupts + * have been switched on using ap_test_queue(). + */ +static int ap_queue_enable_interruption(struct ap_queue *aq, void *ind) +{ + struct ap_queue_status status; + struct ap_qirq_ctrl qirqctrl = { 0 }; + + qirqctrl.ir = 1; + qirqctrl.isc = AP_ISC; + status = ap_aqic(aq->qid, qirqctrl, ind); + switch (status.response_code) { + case AP_RESPONSE_NORMAL: + case AP_RESPONSE_OTHERWISE_CHANGED: + return 0; + case AP_RESPONSE_Q_NOT_AVAIL: + case AP_RESPONSE_DECONFIGURED: + case AP_RESPONSE_CHECKSTOPPED: + case AP_RESPONSE_INVALID_ADDRESS: + pr_err("Registering adapter interrupts for AP device %02x.%04x failed\n", + AP_QID_CARD(aq->qid), + AP_QID_QUEUE(aq->qid)); + return -EOPNOTSUPP; + case AP_RESPONSE_RESET_IN_PROGRESS: + case AP_RESPONSE_BUSY: + default: + return -EBUSY; + } +} + +/** + * __ap_send(): Send message to adjunct processor queue. + * @qid: The AP queue number + * @psmid: The program supplied message identifier + * @msg: The message text + * @length: The message length + * @special: Special Bit + * + * Returns AP queue status structure. + * Condition code 1 on NQAP can't happen because the L bit is 1. + * Condition code 2 on NQAP also means the send is incomplete, + * because a segment boundary was reached. The NQAP is repeated. + */ +static inline struct ap_queue_status +__ap_send(ap_qid_t qid, unsigned long long psmid, void *msg, size_t length, + unsigned int special) +{ + if (special == 1) + qid |= 0x400000UL; + return ap_nqap(qid, psmid, msg, length); +} + +int ap_send(ap_qid_t qid, unsigned long long psmid, void *msg, size_t length) +{ + struct ap_queue_status status; + + status = __ap_send(qid, psmid, msg, length, 0); + switch (status.response_code) { + case AP_RESPONSE_NORMAL: + return 0; + case AP_RESPONSE_Q_FULL: + case AP_RESPONSE_RESET_IN_PROGRESS: + return -EBUSY; + case AP_RESPONSE_REQ_FAC_NOT_INST: + return -EINVAL; + default: /* Device is gone. */ + return -ENODEV; + } +} +EXPORT_SYMBOL(ap_send); + +int ap_recv(ap_qid_t qid, unsigned long long *psmid, void *msg, size_t length) +{ + struct ap_queue_status status; + + if (msg == NULL) + return -EINVAL; + status = ap_dqap(qid, psmid, msg, length); + switch (status.response_code) { + case AP_RESPONSE_NORMAL: + return 0; + case AP_RESPONSE_NO_PENDING_REPLY: + if (status.queue_empty) + return -ENOENT; + return -EBUSY; + case AP_RESPONSE_RESET_IN_PROGRESS: + return -EBUSY; + default: + return -ENODEV; + } +} +EXPORT_SYMBOL(ap_recv); + +/* State machine definitions and helpers */ + +static enum ap_wait ap_sm_nop(struct ap_queue *aq) +{ + return AP_WAIT_NONE; +} + +/** + * ap_sm_recv(): Receive pending reply messages from an AP queue but do + * not change the state of the device. + * @aq: pointer to the AP queue + * + * Returns AP_WAIT_NONE, AP_WAIT_AGAIN, or AP_WAIT_INTERRUPT + */ +static struct ap_queue_status ap_sm_recv(struct ap_queue *aq) +{ + struct ap_queue_status status; + struct ap_message *ap_msg; + + status = ap_dqap(aq->qid, &aq->reply->psmid, + aq->reply->message, aq->reply->length); + switch (status.response_code) { + case AP_RESPONSE_NORMAL: + aq->queue_count--; + if (aq->queue_count > 0) + mod_timer(&aq->timeout, + jiffies + aq->request_timeout); + list_for_each_entry(ap_msg, &aq->pendingq, list) { + if (ap_msg->psmid != aq->reply->psmid) + continue; + list_del_init(&ap_msg->list); + aq->pendingq_count--; + ap_msg->receive(aq, ap_msg, aq->reply); + break; + } + case AP_RESPONSE_NO_PENDING_REPLY: + if (!status.queue_empty || aq->queue_count <= 0) + break; + /* The card shouldn't forget requests but who knows. */ + aq->queue_count = 0; + list_splice_init(&aq->pendingq, &aq->requestq); + aq->requestq_count += aq->pendingq_count; + aq->pendingq_count = 0; + break; + default: + break; + } + return status; +} + +/** + * ap_sm_read(): Receive pending reply messages from an AP queue. + * @aq: pointer to the AP queue + * + * Returns AP_WAIT_NONE, AP_WAIT_AGAIN, or AP_WAIT_INTERRUPT + */ +static enum ap_wait ap_sm_read(struct ap_queue *aq) +{ + struct ap_queue_status status; + + if (!aq->reply) + return AP_WAIT_NONE; + status = ap_sm_recv(aq); + switch (status.response_code) { + case AP_RESPONSE_NORMAL: + if (aq->queue_count > 0) { + aq->state = AP_STATE_WORKING; + return AP_WAIT_AGAIN; + } + aq->state = AP_STATE_IDLE; + return AP_WAIT_NONE; + case AP_RESPONSE_NO_PENDING_REPLY: + if (aq->queue_count > 0) + return AP_WAIT_INTERRUPT; + aq->state = AP_STATE_IDLE; + return AP_WAIT_NONE; + default: + aq->state = AP_STATE_BORKED; + return AP_WAIT_NONE; + } +} + +/** + * ap_sm_suspend_read(): Receive pending reply messages from an AP queue + * without changing the device state in between. In suspend mode we don't + * allow sending new requests, therefore just fetch pending replies. + * @aq: pointer to the AP queue + * + * Returns AP_WAIT_NONE or AP_WAIT_AGAIN + */ +static enum ap_wait ap_sm_suspend_read(struct ap_queue *aq) +{ + struct ap_queue_status status; + + if (!aq->reply) + return AP_WAIT_NONE; + status = ap_sm_recv(aq); + switch (status.response_code) { + case AP_RESPONSE_NORMAL: + if (aq->queue_count > 0) + return AP_WAIT_AGAIN; + /* fall through */ + default: + return AP_WAIT_NONE; + } +} + +/** + * ap_sm_write(): Send messages from the request queue to an AP queue. + * @aq: pointer to the AP queue + * + * Returns AP_WAIT_NONE, AP_WAIT_AGAIN, or AP_WAIT_INTERRUPT + */ +static enum ap_wait ap_sm_write(struct ap_queue *aq) +{ + struct ap_queue_status status; + struct ap_message *ap_msg; + + if (aq->requestq_count <= 0) + return AP_WAIT_NONE; + /* Start the next request on the queue. */ + ap_msg = list_entry(aq->requestq.next, struct ap_message, list); + status = __ap_send(aq->qid, ap_msg->psmid, + ap_msg->message, ap_msg->length, ap_msg->special); + switch (status.response_code) { + case AP_RESPONSE_NORMAL: + aq->queue_count++; + if (aq->queue_count == 1) + mod_timer(&aq->timeout, jiffies + aq->request_timeout); + list_move_tail(&ap_msg->list, &aq->pendingq); + aq->requestq_count--; + aq->pendingq_count++; + if (aq->queue_count < aq->card->queue_depth) { + aq->state = AP_STATE_WORKING; + return AP_WAIT_AGAIN; + } + /* fall through */ + case AP_RESPONSE_Q_FULL: + aq->state = AP_STATE_QUEUE_FULL; + return AP_WAIT_INTERRUPT; + case AP_RESPONSE_RESET_IN_PROGRESS: + aq->state = AP_STATE_RESET_WAIT; + return AP_WAIT_TIMEOUT; + case AP_RESPONSE_MESSAGE_TOO_BIG: + case AP_RESPONSE_REQ_FAC_NOT_INST: + list_del_init(&ap_msg->list); + aq->requestq_count--; + ap_msg->rc = -EINVAL; + ap_msg->receive(aq, ap_msg, NULL); + return AP_WAIT_AGAIN; + default: + aq->state = AP_STATE_BORKED; + return AP_WAIT_NONE; + } +} + +/** + * ap_sm_read_write(): Send and receive messages to/from an AP queue. + * @aq: pointer to the AP queue + * + * Returns AP_WAIT_NONE, AP_WAIT_AGAIN, or AP_WAIT_INTERRUPT + */ +static enum ap_wait ap_sm_read_write(struct ap_queue *aq) +{ + return min(ap_sm_read(aq), ap_sm_write(aq)); +} + +/** + * ap_sm_reset(): Reset an AP queue. + * @qid: The AP queue number + * + * Submit the Reset command to an AP queue. + */ +static enum ap_wait ap_sm_reset(struct ap_queue *aq) +{ + struct ap_queue_status status; + + status = ap_rapq(aq->qid); + switch (status.response_code) { + case AP_RESPONSE_NORMAL: + case AP_RESPONSE_RESET_IN_PROGRESS: + aq->state = AP_STATE_RESET_WAIT; + aq->interrupt = AP_INTR_DISABLED; + return AP_WAIT_TIMEOUT; + case AP_RESPONSE_BUSY: + return AP_WAIT_TIMEOUT; + case AP_RESPONSE_Q_NOT_AVAIL: + case AP_RESPONSE_DECONFIGURED: + case AP_RESPONSE_CHECKSTOPPED: + default: + aq->state = AP_STATE_BORKED; + return AP_WAIT_NONE; + } +} + +/** + * ap_sm_reset_wait(): Test queue for completion of the reset operation + * @aq: pointer to the AP queue + * + * Returns AP_POLL_IMMEDIATELY, AP_POLL_AFTER_TIMEROUT or 0. + */ +static enum ap_wait ap_sm_reset_wait(struct ap_queue *aq) +{ + struct ap_queue_status status; + void *lsi_ptr; + + if (aq->queue_count > 0 && aq->reply) + /* Try to read a completed message and get the status */ + status = ap_sm_recv(aq); + else + /* Get the status with TAPQ */ + status = ap_tapq(aq->qid, NULL); + + switch (status.response_code) { + case AP_RESPONSE_NORMAL: + lsi_ptr = ap_airq_ptr(); + if (lsi_ptr && ap_queue_enable_interruption(aq, lsi_ptr) == 0) + aq->state = AP_STATE_SETIRQ_WAIT; + else + aq->state = (aq->queue_count > 0) ? + AP_STATE_WORKING : AP_STATE_IDLE; + return AP_WAIT_AGAIN; + case AP_RESPONSE_BUSY: + case AP_RESPONSE_RESET_IN_PROGRESS: + return AP_WAIT_TIMEOUT; + case AP_RESPONSE_Q_NOT_AVAIL: + case AP_RESPONSE_DECONFIGURED: + case AP_RESPONSE_CHECKSTOPPED: + default: + aq->state = AP_STATE_BORKED; + return AP_WAIT_NONE; + } +} + +/** + * ap_sm_setirq_wait(): Test queue for completion of the irq enablement + * @aq: pointer to the AP queue + * + * Returns AP_POLL_IMMEDIATELY, AP_POLL_AFTER_TIMEROUT or 0. + */ +static enum ap_wait ap_sm_setirq_wait(struct ap_queue *aq) +{ + struct ap_queue_status status; + + if (aq->queue_count > 0 && aq->reply) + /* Try to read a completed message and get the status */ + status = ap_sm_recv(aq); + else + /* Get the status with TAPQ */ + status = ap_tapq(aq->qid, NULL); + + if (status.irq_enabled == 1) { + /* Irqs are now enabled */ + aq->interrupt = AP_INTR_ENABLED; + aq->state = (aq->queue_count > 0) ? + AP_STATE_WORKING : AP_STATE_IDLE; + } + + switch (status.response_code) { + case AP_RESPONSE_NORMAL: + if (aq->queue_count > 0) + return AP_WAIT_AGAIN; + /* fallthrough */ + case AP_RESPONSE_NO_PENDING_REPLY: + return AP_WAIT_TIMEOUT; + default: + aq->state = AP_STATE_BORKED; + return AP_WAIT_NONE; + } +} + +/* + * AP state machine jump table + */ +static ap_func_t *ap_jumptable[NR_AP_STATES][NR_AP_EVENTS] = { + [AP_STATE_RESET_START] = { + [AP_EVENT_POLL] = ap_sm_reset, + [AP_EVENT_TIMEOUT] = ap_sm_nop, + }, + [AP_STATE_RESET_WAIT] = { + [AP_EVENT_POLL] = ap_sm_reset_wait, + [AP_EVENT_TIMEOUT] = ap_sm_nop, + }, + [AP_STATE_SETIRQ_WAIT] = { + [AP_EVENT_POLL] = ap_sm_setirq_wait, + [AP_EVENT_TIMEOUT] = ap_sm_nop, + }, + [AP_STATE_IDLE] = { + [AP_EVENT_POLL] = ap_sm_write, + [AP_EVENT_TIMEOUT] = ap_sm_nop, + }, + [AP_STATE_WORKING] = { + [AP_EVENT_POLL] = ap_sm_read_write, + [AP_EVENT_TIMEOUT] = ap_sm_reset, + }, + [AP_STATE_QUEUE_FULL] = { + [AP_EVENT_POLL] = ap_sm_read, + [AP_EVENT_TIMEOUT] = ap_sm_reset, + }, + [AP_STATE_SUSPEND_WAIT] = { + [AP_EVENT_POLL] = ap_sm_suspend_read, + [AP_EVENT_TIMEOUT] = ap_sm_nop, + }, + [AP_STATE_BORKED] = { + [AP_EVENT_POLL] = ap_sm_nop, + [AP_EVENT_TIMEOUT] = ap_sm_nop, + }, +}; + +enum ap_wait ap_sm_event(struct ap_queue *aq, enum ap_event event) +{ + return ap_jumptable[aq->state][event](aq); +} + +enum ap_wait ap_sm_event_loop(struct ap_queue *aq, enum ap_event event) +{ + enum ap_wait wait; + + while ((wait = ap_sm_event(aq, event)) == AP_WAIT_AGAIN) + ; + return wait; +} + +/* + * Power management for queue devices + */ +void ap_queue_suspend(struct ap_device *ap_dev) +{ + struct ap_queue *aq = to_ap_queue(&ap_dev->device); + + /* Poll on the device until all requests are finished. */ + spin_lock_bh(&aq->lock); + aq->state = AP_STATE_SUSPEND_WAIT; + while (ap_sm_event(aq, AP_EVENT_POLL) != AP_WAIT_NONE) + ; + aq->state = AP_STATE_BORKED; + spin_unlock_bh(&aq->lock); +} +EXPORT_SYMBOL(ap_queue_suspend); + +void ap_queue_resume(struct ap_device *ap_dev) +{ +} +EXPORT_SYMBOL(ap_queue_resume); + +/* + * AP queue related attributes. + */ +static ssize_t request_count_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct ap_queue *aq = to_ap_queue(dev); + u64 req_cnt; + + spin_lock_bh(&aq->lock); + req_cnt = aq->total_request_count; + spin_unlock_bh(&aq->lock); + return snprintf(buf, PAGE_SIZE, "%llu\n", req_cnt); +} + +static ssize_t request_count_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct ap_queue *aq = to_ap_queue(dev); + + spin_lock_bh(&aq->lock); + aq->total_request_count = 0; + spin_unlock_bh(&aq->lock); + + return count; +} + +static DEVICE_ATTR_RW(request_count); + +static ssize_t requestq_count_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct ap_queue *aq = to_ap_queue(dev); + unsigned int reqq_cnt = 0; + + spin_lock_bh(&aq->lock); + reqq_cnt = aq->requestq_count; + spin_unlock_bh(&aq->lock); + return snprintf(buf, PAGE_SIZE, "%d\n", reqq_cnt); +} + +static DEVICE_ATTR_RO(requestq_count); + +static ssize_t pendingq_count_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct ap_queue *aq = to_ap_queue(dev); + unsigned int penq_cnt = 0; + + spin_lock_bh(&aq->lock); + penq_cnt = aq->pendingq_count; + spin_unlock_bh(&aq->lock); + return snprintf(buf, PAGE_SIZE, "%d\n", penq_cnt); +} + +static DEVICE_ATTR_RO(pendingq_count); + +static ssize_t reset_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct ap_queue *aq = to_ap_queue(dev); + int rc = 0; + + spin_lock_bh(&aq->lock); + switch (aq->state) { + case AP_STATE_RESET_START: + case AP_STATE_RESET_WAIT: + rc = snprintf(buf, PAGE_SIZE, "Reset in progress.\n"); + break; + case AP_STATE_WORKING: + case AP_STATE_QUEUE_FULL: + rc = snprintf(buf, PAGE_SIZE, "Reset Timer armed.\n"); + break; + default: + rc = snprintf(buf, PAGE_SIZE, "No Reset Timer set.\n"); + } + spin_unlock_bh(&aq->lock); + return rc; +} + +static ssize_t reset_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct ap_queue *aq = to_ap_queue(dev); + + spin_lock_bh(&aq->lock); + __ap_flush_queue(aq); + aq->state = AP_STATE_RESET_START; + ap_wait(ap_sm_event(aq, AP_EVENT_POLL)); + spin_unlock_bh(&aq->lock); + + AP_DBF(DBF_INFO, "reset queue=%02x.%04x triggered by user\n", + AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid)); + + return count; +} + +static DEVICE_ATTR_RW(reset); + +static ssize_t interrupt_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct ap_queue *aq = to_ap_queue(dev); + int rc = 0; + + spin_lock_bh(&aq->lock); + if (aq->state == AP_STATE_SETIRQ_WAIT) + rc = snprintf(buf, PAGE_SIZE, "Enable Interrupt pending.\n"); + else if (aq->interrupt == AP_INTR_ENABLED) + rc = snprintf(buf, PAGE_SIZE, "Interrupts enabled.\n"); + else + rc = snprintf(buf, PAGE_SIZE, "Interrupts disabled.\n"); + spin_unlock_bh(&aq->lock); + return rc; +} + +static DEVICE_ATTR_RO(interrupt); + +static struct attribute *ap_queue_dev_attrs[] = { + &dev_attr_request_count.attr, + &dev_attr_requestq_count.attr, + &dev_attr_pendingq_count.attr, + &dev_attr_reset.attr, + &dev_attr_interrupt.attr, + NULL +}; + +static struct attribute_group ap_queue_dev_attr_group = { + .attrs = ap_queue_dev_attrs +}; + +static const struct attribute_group *ap_queue_dev_attr_groups[] = { + &ap_queue_dev_attr_group, + NULL +}; + +static struct device_type ap_queue_type = { + .name = "ap_queue", + .groups = ap_queue_dev_attr_groups, +}; + +static void ap_queue_device_release(struct device *dev) +{ + struct ap_queue *aq = to_ap_queue(dev); + + if (!list_empty(&aq->list)) { + spin_lock_bh(&ap_list_lock); + list_del_init(&aq->list); + spin_unlock_bh(&ap_list_lock); + } + kfree(aq); +} + +struct ap_queue *ap_queue_create(ap_qid_t qid, int device_type) +{ + struct ap_queue *aq; + + aq = kzalloc(sizeof(*aq), GFP_KERNEL); + if (!aq) + return NULL; + aq->ap_dev.device.release = ap_queue_device_release; + aq->ap_dev.device.type = &ap_queue_type; + aq->ap_dev.device_type = device_type; + aq->qid = qid; + aq->state = AP_STATE_RESET_START; + aq->interrupt = AP_INTR_DISABLED; + spin_lock_init(&aq->lock); + INIT_LIST_HEAD(&aq->list); + INIT_LIST_HEAD(&aq->pendingq); + INIT_LIST_HEAD(&aq->requestq); + timer_setup(&aq->timeout, ap_request_timeout, 0); + + return aq; +} + +void ap_queue_init_reply(struct ap_queue *aq, struct ap_message *reply) +{ + aq->reply = reply; + + spin_lock_bh(&aq->lock); + ap_wait(ap_sm_event(aq, AP_EVENT_POLL)); + spin_unlock_bh(&aq->lock); +} +EXPORT_SYMBOL(ap_queue_init_reply); + +/** + * ap_queue_message(): Queue a request to an AP device. + * @aq: The AP device to queue the message to + * @ap_msg: The message that is to be added + */ +void ap_queue_message(struct ap_queue *aq, struct ap_message *ap_msg) +{ + /* For asynchronous message handling a valid receive-callback + * is required. + */ + BUG_ON(!ap_msg->receive); + + spin_lock_bh(&aq->lock); + /* Queue the message. */ + list_add_tail(&ap_msg->list, &aq->requestq); + aq->requestq_count++; + aq->total_request_count++; + atomic64_inc(&aq->card->total_request_count); + /* Send/receive as many request from the queue as possible. */ + ap_wait(ap_sm_event_loop(aq, AP_EVENT_POLL)); + spin_unlock_bh(&aq->lock); +} +EXPORT_SYMBOL(ap_queue_message); + +/** + * ap_cancel_message(): Cancel a crypto request. + * @aq: The AP device that has the message queued + * @ap_msg: The message that is to be removed + * + * Cancel a crypto request. This is done by removing the request + * from the device pending or request queue. Note that the + * request stays on the AP queue. When it finishes the message + * reply will be discarded because the psmid can't be found. + */ +void ap_cancel_message(struct ap_queue *aq, struct ap_message *ap_msg) +{ + struct ap_message *tmp; + + spin_lock_bh(&aq->lock); + if (!list_empty(&ap_msg->list)) { + list_for_each_entry(tmp, &aq->pendingq, list) + if (tmp->psmid == ap_msg->psmid) { + aq->pendingq_count--; + goto found; + } + aq->requestq_count--; +found: + list_del_init(&ap_msg->list); + } + spin_unlock_bh(&aq->lock); +} +EXPORT_SYMBOL(ap_cancel_message); + +/** + * __ap_flush_queue(): Flush requests. + * @aq: Pointer to the AP queue + * + * Flush all requests from the request/pending queue of an AP device. + */ +static void __ap_flush_queue(struct ap_queue *aq) +{ + struct ap_message *ap_msg, *next; + + list_for_each_entry_safe(ap_msg, next, &aq->pendingq, list) { + list_del_init(&ap_msg->list); + aq->pendingq_count--; + ap_msg->rc = -EAGAIN; + ap_msg->receive(aq, ap_msg, NULL); + } + list_for_each_entry_safe(ap_msg, next, &aq->requestq, list) { + list_del_init(&ap_msg->list); + aq->requestq_count--; + ap_msg->rc = -EAGAIN; + ap_msg->receive(aq, ap_msg, NULL); + } +} + +void ap_flush_queue(struct ap_queue *aq) +{ + spin_lock_bh(&aq->lock); + __ap_flush_queue(aq); + spin_unlock_bh(&aq->lock); +} +EXPORT_SYMBOL(ap_flush_queue); + +void ap_queue_remove(struct ap_queue *aq) +{ + ap_flush_queue(aq); + del_timer_sync(&aq->timeout); + + /* reset with zero, also clears irq registration */ + spin_lock_bh(&aq->lock); + ap_zapq(aq->qid); + aq->state = AP_STATE_BORKED; + spin_unlock_bh(&aq->lock); +} +EXPORT_SYMBOL(ap_queue_remove); + +void ap_queue_reinit_state(struct ap_queue *aq) +{ + spin_lock_bh(&aq->lock); + aq->state = AP_STATE_RESET_START; + ap_wait(ap_sm_event(aq, AP_EVENT_POLL)); + spin_unlock_bh(&aq->lock); +} +EXPORT_SYMBOL(ap_queue_reinit_state); diff --git a/drivers/s390/crypto/pkey_api.c b/drivers/s390/crypto/pkey_api.c new file mode 100644 index 000000000..b16344479 --- /dev/null +++ b/drivers/s390/crypto/pkey_api.c @@ -0,0 +1,1227 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * pkey device driver + * + * Copyright IBM Corp. 2017 + * Author(s): Harald Freudenberger + */ + +#define KMSG_COMPONENT "pkey" +#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt + +#include <linux/fs.h> +#include <linux/init.h> +#include <linux/miscdevice.h> +#include <linux/module.h> +#include <linux/slab.h> +#include <linux/kallsyms.h> +#include <linux/debugfs.h> +#include <asm/zcrypt.h> +#include <asm/cpacf.h> +#include <asm/pkey.h> + +#include "zcrypt_api.h" + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("IBM Corporation"); +MODULE_DESCRIPTION("s390 protected key interface"); + +/* Size of parameter block used for all cca requests/replies */ +#define PARMBSIZE 512 + +/* Size of vardata block used for some of the cca requests/replies */ +#define VARDATASIZE 4096 + +/* + * debug feature data and functions + */ + +static debug_info_t *debug_info; + +#define DEBUG_DBG(...) debug_sprintf_event(debug_info, 6, ##__VA_ARGS__) +#define DEBUG_INFO(...) debug_sprintf_event(debug_info, 5, ##__VA_ARGS__) +#define DEBUG_WARN(...) debug_sprintf_event(debug_info, 4, ##__VA_ARGS__) +#define DEBUG_ERR(...) debug_sprintf_event(debug_info, 3, ##__VA_ARGS__) + +static void __init pkey_debug_init(void) +{ + /* 5 arguments per dbf entry (including the format string ptr) */ + debug_info = debug_register("pkey", 1, 1, 5 * sizeof(long)); + debug_register_view(debug_info, &debug_sprintf_view); + debug_set_level(debug_info, 3); +} + +static void __exit pkey_debug_exit(void) +{ + debug_unregister(debug_info); +} + +/* inside view of a secure key token (only type 0x01 version 0x04) */ +struct secaeskeytoken { + u8 type; /* 0x01 for internal key token */ + u8 res0[3]; + u8 version; /* should be 0x04 */ + u8 res1[1]; + u8 flag; /* key flags */ + u8 res2[1]; + u64 mkvp; /* master key verification pattern */ + u8 key[32]; /* key value (encrypted) */ + u8 cv[8]; /* control vector */ + u16 bitsize; /* key bit size */ + u16 keysize; /* key byte size */ + u8 tvv[4]; /* token validation value */ +} __packed; + +/* + * Simple check if the token is a valid CCA secure AES key + * token. If keybitsize is given, the bitsize of the key is + * also checked. Returns 0 on success or errno value on failure. + */ +static int check_secaeskeytoken(const u8 *token, int keybitsize) +{ + struct secaeskeytoken *t = (struct secaeskeytoken *) token; + + if (t->type != 0x01) { + DEBUG_ERR( + "%s secure token check failed, type mismatch 0x%02x != 0x01\n", + __func__, (int) t->type); + return -EINVAL; + } + if (t->version != 0x04) { + DEBUG_ERR( + "%s secure token check failed, version mismatch 0x%02x != 0x04\n", + __func__, (int) t->version); + return -EINVAL; + } + if (keybitsize > 0 && t->bitsize != keybitsize) { + DEBUG_ERR( + "%s secure token check failed, bitsize mismatch %d != %d\n", + __func__, (int) t->bitsize, keybitsize); + return -EINVAL; + } + + return 0; +} + +/* + * Allocate consecutive memory for request CPRB, request param + * block, reply CPRB and reply param block and fill in values + * for the common fields. Returns 0 on success or errno value + * on failure. + */ +static int alloc_and_prep_cprbmem(size_t paramblen, + u8 **pcprbmem, + struct CPRBX **preqCPRB, + struct CPRBX **prepCPRB) +{ + u8 *cprbmem; + size_t cprbplusparamblen = sizeof(struct CPRBX) + paramblen; + struct CPRBX *preqcblk, *prepcblk; + + /* + * allocate consecutive memory for request CPRB, request param + * block, reply CPRB and reply param block + */ + cprbmem = kcalloc(2, cprbplusparamblen, GFP_KERNEL); + if (!cprbmem) + return -ENOMEM; + + preqcblk = (struct CPRBX *) cprbmem; + prepcblk = (struct CPRBX *) (cprbmem + cprbplusparamblen); + + /* fill request cprb struct */ + preqcblk->cprb_len = sizeof(struct CPRBX); + preqcblk->cprb_ver_id = 0x02; + memcpy(preqcblk->func_id, "T2", 2); + preqcblk->rpl_msgbl = cprbplusparamblen; + if (paramblen) { + preqcblk->req_parmb = + ((u8 *) preqcblk) + sizeof(struct CPRBX); + preqcblk->rpl_parmb = + ((u8 *) prepcblk) + sizeof(struct CPRBX); + } + + *pcprbmem = cprbmem; + *preqCPRB = preqcblk; + *prepCPRB = prepcblk; + + return 0; +} + +/* + * Free the cprb memory allocated with the function above. + * If the scrub value is not zero, the memory is filled + * with zeros before freeing (useful if there was some + * clear key material in there). + */ +static void free_cprbmem(void *mem, size_t paramblen, int scrub) +{ + if (scrub) + memzero_explicit(mem, 2 * (sizeof(struct CPRBX) + paramblen)); + kfree(mem); +} + +/* + * Helper function to prepare the xcrb struct + */ +static inline void prep_xcrb(struct ica_xcRB *pxcrb, + u16 cardnr, + struct CPRBX *preqcblk, + struct CPRBX *prepcblk) +{ + memset(pxcrb, 0, sizeof(*pxcrb)); + pxcrb->agent_ID = 0x4341; /* 'CA' */ + pxcrb->user_defined = (cardnr == 0xFFFF ? AUTOSELECT : cardnr); + pxcrb->request_control_blk_length = + preqcblk->cprb_len + preqcblk->req_parml; + pxcrb->request_control_blk_addr = (void __user *) preqcblk; + pxcrb->reply_control_blk_length = preqcblk->rpl_msgbl; + pxcrb->reply_control_blk_addr = (void __user *) prepcblk; +} + +/* + * Helper function which calls zcrypt_send_cprb with + * memory management segment adjusted to kernel space + * so that the copy_from_user called within this + * function do in fact copy from kernel space. + */ +static inline int _zcrypt_send_cprb(struct ica_xcRB *xcrb) +{ + int rc; + mm_segment_t old_fs = get_fs(); + + set_fs(KERNEL_DS); + rc = zcrypt_send_cprb(xcrb); + set_fs(old_fs); + + return rc; +} + +/* + * Generate (random) AES secure key. + */ +int pkey_genseckey(u16 cardnr, u16 domain, + u32 keytype, struct pkey_seckey *seckey) +{ + int i, rc, keysize; + int seckeysize; + u8 *mem; + struct CPRBX *preqcblk, *prepcblk; + struct ica_xcRB xcrb; + struct kgreqparm { + u8 subfunc_code[2]; + u16 rule_array_len; + struct lv1 { + u16 len; + char key_form[8]; + char key_length[8]; + char key_type1[8]; + char key_type2[8]; + } lv1; + struct lv2 { + u16 len; + struct keyid { + u16 len; + u16 attr; + u8 data[SECKEYBLOBSIZE]; + } keyid[6]; + } lv2; + } *preqparm; + struct kgrepparm { + u8 subfunc_code[2]; + u16 rule_array_len; + struct lv3 { + u16 len; + u16 keyblocklen; + struct { + u16 toklen; + u16 tokattr; + u8 tok[0]; + /* ... some more data ... */ + } keyblock; + } lv3; + } *prepparm; + + /* get already prepared memory for 2 cprbs with param block each */ + rc = alloc_and_prep_cprbmem(PARMBSIZE, &mem, &preqcblk, &prepcblk); + if (rc) + return rc; + + /* fill request cprb struct */ + preqcblk->domain = domain; + + /* fill request cprb param block with KG request */ + preqparm = (struct kgreqparm *) preqcblk->req_parmb; + memcpy(preqparm->subfunc_code, "KG", 2); + preqparm->rule_array_len = sizeof(preqparm->rule_array_len); + preqparm->lv1.len = sizeof(struct lv1); + memcpy(preqparm->lv1.key_form, "OP ", 8); + switch (keytype) { + case PKEY_KEYTYPE_AES_128: + keysize = 16; + memcpy(preqparm->lv1.key_length, "KEYLN16 ", 8); + break; + case PKEY_KEYTYPE_AES_192: + keysize = 24; + memcpy(preqparm->lv1.key_length, "KEYLN24 ", 8); + break; + case PKEY_KEYTYPE_AES_256: + keysize = 32; + memcpy(preqparm->lv1.key_length, "KEYLN32 ", 8); + break; + default: + DEBUG_ERR( + "%s unknown/unsupported keytype %d\n", + __func__, keytype); + rc = -EINVAL; + goto out; + } + memcpy(preqparm->lv1.key_type1, "AESDATA ", 8); + preqparm->lv2.len = sizeof(struct lv2); + for (i = 0; i < 6; i++) { + preqparm->lv2.keyid[i].len = sizeof(struct keyid); + preqparm->lv2.keyid[i].attr = (i == 2 ? 0x30 : 0x10); + } + preqcblk->req_parml = sizeof(struct kgreqparm); + + /* fill xcrb struct */ + prep_xcrb(&xcrb, cardnr, preqcblk, prepcblk); + + /* forward xcrb with request CPRB and reply CPRB to zcrypt dd */ + rc = _zcrypt_send_cprb(&xcrb); + if (rc) { + DEBUG_ERR( + "%s zcrypt_send_cprb (cardnr=%d domain=%d) failed with errno %d\n", + __func__, (int) cardnr, (int) domain, rc); + goto out; + } + + /* check response returncode and reasoncode */ + if (prepcblk->ccp_rtcode != 0) { + DEBUG_ERR( + "%s secure key generate failure, card response %d/%d\n", + __func__, + (int) prepcblk->ccp_rtcode, + (int) prepcblk->ccp_rscode); + rc = -EIO; + goto out; + } + + /* process response cprb param block */ + prepcblk->rpl_parmb = ((u8 *) prepcblk) + sizeof(struct CPRBX); + prepparm = (struct kgrepparm *) prepcblk->rpl_parmb; + + /* check length of the returned secure key token */ + seckeysize = prepparm->lv3.keyblock.toklen + - sizeof(prepparm->lv3.keyblock.toklen) + - sizeof(prepparm->lv3.keyblock.tokattr); + if (seckeysize != SECKEYBLOBSIZE) { + DEBUG_ERR( + "%s secure token size mismatch %d != %d bytes\n", + __func__, seckeysize, SECKEYBLOBSIZE); + rc = -EIO; + goto out; + } + + /* check secure key token */ + rc = check_secaeskeytoken(prepparm->lv3.keyblock.tok, 8*keysize); + if (rc) { + rc = -EIO; + goto out; + } + + /* copy the generated secure key token */ + memcpy(seckey->seckey, prepparm->lv3.keyblock.tok, SECKEYBLOBSIZE); + +out: + free_cprbmem(mem, PARMBSIZE, 0); + return rc; +} +EXPORT_SYMBOL(pkey_genseckey); + +/* + * Generate an AES secure key with given key value. + */ +int pkey_clr2seckey(u16 cardnr, u16 domain, u32 keytype, + const struct pkey_clrkey *clrkey, + struct pkey_seckey *seckey) +{ + int rc, keysize, seckeysize; + u8 *mem; + struct CPRBX *preqcblk, *prepcblk; + struct ica_xcRB xcrb; + struct cmreqparm { + u8 subfunc_code[2]; + u16 rule_array_len; + char rule_array[8]; + struct lv1 { + u16 len; + u8 clrkey[0]; + } lv1; + struct lv2 { + u16 len; + struct keyid { + u16 len; + u16 attr; + u8 data[SECKEYBLOBSIZE]; + } keyid; + } lv2; + } *preqparm; + struct lv2 *plv2; + struct cmrepparm { + u8 subfunc_code[2]; + u16 rule_array_len; + struct lv3 { + u16 len; + u16 keyblocklen; + struct { + u16 toklen; + u16 tokattr; + u8 tok[0]; + /* ... some more data ... */ + } keyblock; + } lv3; + } *prepparm; + + /* get already prepared memory for 2 cprbs with param block each */ + rc = alloc_and_prep_cprbmem(PARMBSIZE, &mem, &preqcblk, &prepcblk); + if (rc) + return rc; + + /* fill request cprb struct */ + preqcblk->domain = domain; + + /* fill request cprb param block with CM request */ + preqparm = (struct cmreqparm *) preqcblk->req_parmb; + memcpy(preqparm->subfunc_code, "CM", 2); + memcpy(preqparm->rule_array, "AES ", 8); + preqparm->rule_array_len = + sizeof(preqparm->rule_array_len) + sizeof(preqparm->rule_array); + switch (keytype) { + case PKEY_KEYTYPE_AES_128: + keysize = 16; + break; + case PKEY_KEYTYPE_AES_192: + keysize = 24; + break; + case PKEY_KEYTYPE_AES_256: + keysize = 32; + break; + default: + DEBUG_ERR( + "%s unknown/unsupported keytype %d\n", + __func__, keytype); + rc = -EINVAL; + goto out; + } + preqparm->lv1.len = sizeof(struct lv1) + keysize; + memcpy(preqparm->lv1.clrkey, clrkey->clrkey, keysize); + plv2 = (struct lv2 *) (((u8 *) &preqparm->lv2) + keysize); + plv2->len = sizeof(struct lv2); + plv2->keyid.len = sizeof(struct keyid); + plv2->keyid.attr = 0x30; + preqcblk->req_parml = sizeof(struct cmreqparm) + keysize; + + /* fill xcrb struct */ + prep_xcrb(&xcrb, cardnr, preqcblk, prepcblk); + + /* forward xcrb with request CPRB and reply CPRB to zcrypt dd */ + rc = _zcrypt_send_cprb(&xcrb); + if (rc) { + DEBUG_ERR( + "%s zcrypt_send_cprb (cardnr=%d domain=%d) failed with errno %d\n", + __func__, (int) cardnr, (int) domain, rc); + goto out; + } + + /* check response returncode and reasoncode */ + if (prepcblk->ccp_rtcode != 0) { + DEBUG_ERR( + "%s clear key import failure, card response %d/%d\n", + __func__, + (int) prepcblk->ccp_rtcode, + (int) prepcblk->ccp_rscode); + rc = -EIO; + goto out; + } + + /* process response cprb param block */ + prepcblk->rpl_parmb = ((u8 *) prepcblk) + sizeof(struct CPRBX); + prepparm = (struct cmrepparm *) prepcblk->rpl_parmb; + + /* check length of the returned secure key token */ + seckeysize = prepparm->lv3.keyblock.toklen + - sizeof(prepparm->lv3.keyblock.toklen) + - sizeof(prepparm->lv3.keyblock.tokattr); + if (seckeysize != SECKEYBLOBSIZE) { + DEBUG_ERR( + "%s secure token size mismatch %d != %d bytes\n", + __func__, seckeysize, SECKEYBLOBSIZE); + rc = -EIO; + goto out; + } + + /* check secure key token */ + rc = check_secaeskeytoken(prepparm->lv3.keyblock.tok, 8*keysize); + if (rc) { + rc = -EIO; + goto out; + } + + /* copy the generated secure key token */ + memcpy(seckey->seckey, prepparm->lv3.keyblock.tok, SECKEYBLOBSIZE); + +out: + free_cprbmem(mem, PARMBSIZE, 1); + return rc; +} +EXPORT_SYMBOL(pkey_clr2seckey); + +/* + * Derive a proteced key from the secure key blob. + */ +int pkey_sec2protkey(u16 cardnr, u16 domain, + const struct pkey_seckey *seckey, + struct pkey_protkey *protkey) +{ + int rc; + u8 *mem; + struct CPRBX *preqcblk, *prepcblk; + struct ica_xcRB xcrb; + struct uskreqparm { + u8 subfunc_code[2]; + u16 rule_array_len; + struct lv1 { + u16 len; + u16 attr_len; + u16 attr_flags; + } lv1; + struct lv2 { + u16 len; + u16 attr_len; + u16 attr_flags; + u8 token[0]; /* cca secure key token */ + } lv2 __packed; + } *preqparm; + struct uskrepparm { + u8 subfunc_code[2]; + u16 rule_array_len; + struct lv3 { + u16 len; + u16 attr_len; + u16 attr_flags; + struct cpacfkeyblock { + u8 version; /* version of this struct */ + u8 flags[2]; + u8 algo; + u8 form; + u8 pad1[3]; + u16 keylen; + u8 key[64]; /* the key (keylen bytes) */ + u16 keyattrlen; + u8 keyattr[32]; + u8 pad2[1]; + u8 vptype; + u8 vp[32]; /* verification pattern */ + } keyblock; + } lv3 __packed; + } *prepparm; + + /* get already prepared memory for 2 cprbs with param block each */ + rc = alloc_and_prep_cprbmem(PARMBSIZE, &mem, &preqcblk, &prepcblk); + if (rc) + return rc; + + /* fill request cprb struct */ + preqcblk->domain = domain; + + /* fill request cprb param block with USK request */ + preqparm = (struct uskreqparm *) preqcblk->req_parmb; + memcpy(preqparm->subfunc_code, "US", 2); + preqparm->rule_array_len = sizeof(preqparm->rule_array_len); + preqparm->lv1.len = sizeof(struct lv1); + preqparm->lv1.attr_len = sizeof(struct lv1) - sizeof(preqparm->lv1.len); + preqparm->lv1.attr_flags = 0x0001; + preqparm->lv2.len = sizeof(struct lv2) + SECKEYBLOBSIZE; + preqparm->lv2.attr_len = sizeof(struct lv2) + - sizeof(preqparm->lv2.len) + SECKEYBLOBSIZE; + preqparm->lv2.attr_flags = 0x0000; + memcpy(preqparm->lv2.token, seckey->seckey, SECKEYBLOBSIZE); + preqcblk->req_parml = sizeof(struct uskreqparm) + SECKEYBLOBSIZE; + + /* fill xcrb struct */ + prep_xcrb(&xcrb, cardnr, preqcblk, prepcblk); + + /* forward xcrb with request CPRB and reply CPRB to zcrypt dd */ + rc = _zcrypt_send_cprb(&xcrb); + if (rc) { + DEBUG_ERR( + "%s zcrypt_send_cprb (cardnr=%d domain=%d) failed with errno %d\n", + __func__, (int) cardnr, (int) domain, rc); + goto out; + } + + /* check response returncode and reasoncode */ + if (prepcblk->ccp_rtcode != 0) { + DEBUG_ERR( + "%s unwrap secure key failure, card response %d/%d\n", + __func__, + (int) prepcblk->ccp_rtcode, + (int) prepcblk->ccp_rscode); + rc = -EIO; + goto out; + } + if (prepcblk->ccp_rscode != 0) { + DEBUG_WARN( + "%s unwrap secure key warning, card response %d/%d\n", + __func__, + (int) prepcblk->ccp_rtcode, + (int) prepcblk->ccp_rscode); + } + + /* process response cprb param block */ + prepcblk->rpl_parmb = ((u8 *) prepcblk) + sizeof(struct CPRBX); + prepparm = (struct uskrepparm *) prepcblk->rpl_parmb; + + /* check the returned keyblock */ + if (prepparm->lv3.keyblock.version != 0x01) { + DEBUG_ERR( + "%s reply param keyblock version mismatch 0x%02x != 0x01\n", + __func__, (int) prepparm->lv3.keyblock.version); + rc = -EIO; + goto out; + } + + /* copy the tanslated protected key */ + switch (prepparm->lv3.keyblock.keylen) { + case 16+32: + protkey->type = PKEY_KEYTYPE_AES_128; + break; + case 24+32: + protkey->type = PKEY_KEYTYPE_AES_192; + break; + case 32+32: + protkey->type = PKEY_KEYTYPE_AES_256; + break; + default: + DEBUG_ERR("%s unknown/unsupported keytype %d\n", + __func__, prepparm->lv3.keyblock.keylen); + rc = -EIO; + goto out; + } + protkey->len = prepparm->lv3.keyblock.keylen; + memcpy(protkey->protkey, prepparm->lv3.keyblock.key, protkey->len); + +out: + free_cprbmem(mem, PARMBSIZE, 0); + return rc; +} +EXPORT_SYMBOL(pkey_sec2protkey); + +/* + * Create a protected key from a clear key value. + */ +int pkey_clr2protkey(u32 keytype, + const struct pkey_clrkey *clrkey, + struct pkey_protkey *protkey) +{ + long fc; + int keysize; + u8 paramblock[64]; + + switch (keytype) { + case PKEY_KEYTYPE_AES_128: + keysize = 16; + fc = CPACF_PCKMO_ENC_AES_128_KEY; + break; + case PKEY_KEYTYPE_AES_192: + keysize = 24; + fc = CPACF_PCKMO_ENC_AES_192_KEY; + break; + case PKEY_KEYTYPE_AES_256: + keysize = 32; + fc = CPACF_PCKMO_ENC_AES_256_KEY; + break; + default: + DEBUG_ERR("%s unknown/unsupported keytype %d\n", + __func__, keytype); + return -EINVAL; + } + + /* prepare param block */ + memset(paramblock, 0, sizeof(paramblock)); + memcpy(paramblock, clrkey->clrkey, keysize); + + /* call the pckmo instruction */ + cpacf_pckmo(fc, paramblock); + + /* copy created protected key */ + protkey->type = keytype; + protkey->len = keysize + 32; + memcpy(protkey->protkey, paramblock, keysize + 32); + + return 0; +} +EXPORT_SYMBOL(pkey_clr2protkey); + +/* + * query cryptographic facility from adapter + */ +static int query_crypto_facility(u16 cardnr, u16 domain, + const char *keyword, + u8 *rarray, size_t *rarraylen, + u8 *varray, size_t *varraylen) +{ + int rc; + u16 len; + u8 *mem, *ptr; + struct CPRBX *preqcblk, *prepcblk; + struct ica_xcRB xcrb; + struct fqreqparm { + u8 subfunc_code[2]; + u16 rule_array_len; + char rule_array[8]; + struct lv1 { + u16 len; + u8 data[VARDATASIZE]; + } lv1; + u16 dummylen; + } *preqparm; + size_t parmbsize = sizeof(struct fqreqparm); + struct fqrepparm { + u8 subfunc_code[2]; + u8 lvdata[0]; + } *prepparm; + + /* get already prepared memory for 2 cprbs with param block each */ + rc = alloc_and_prep_cprbmem(parmbsize, &mem, &preqcblk, &prepcblk); + if (rc) + return rc; + + /* fill request cprb struct */ + preqcblk->domain = domain; + + /* fill request cprb param block with FQ request */ + preqparm = (struct fqreqparm *) preqcblk->req_parmb; + memcpy(preqparm->subfunc_code, "FQ", 2); + memcpy(preqparm->rule_array, keyword, sizeof(preqparm->rule_array)); + preqparm->rule_array_len = + sizeof(preqparm->rule_array_len) + sizeof(preqparm->rule_array); + preqparm->lv1.len = sizeof(preqparm->lv1); + preqparm->dummylen = sizeof(preqparm->dummylen); + preqcblk->req_parml = parmbsize; + + /* fill xcrb struct */ + prep_xcrb(&xcrb, cardnr, preqcblk, prepcblk); + + /* forward xcrb with request CPRB and reply CPRB to zcrypt dd */ + rc = _zcrypt_send_cprb(&xcrb); + if (rc) { + DEBUG_ERR( + "%s zcrypt_send_cprb (cardnr=%d domain=%d) failed with errno %d\n", + __func__, (int) cardnr, (int) domain, rc); + goto out; + } + + /* check response returncode and reasoncode */ + if (prepcblk->ccp_rtcode != 0) { + DEBUG_ERR( + "%s unwrap secure key failure, card response %d/%d\n", + __func__, + (int) prepcblk->ccp_rtcode, + (int) prepcblk->ccp_rscode); + rc = -EIO; + goto out; + } + + /* process response cprb param block */ + prepcblk->rpl_parmb = ((u8 *) prepcblk) + sizeof(struct CPRBX); + prepparm = (struct fqrepparm *) prepcblk->rpl_parmb; + ptr = prepparm->lvdata; + + /* check and possibly copy reply rule array */ + len = *((u16 *) ptr); + if (len > sizeof(u16)) { + ptr += sizeof(u16); + len -= sizeof(u16); + if (rarray && rarraylen && *rarraylen > 0) { + *rarraylen = (len > *rarraylen ? *rarraylen : len); + memcpy(rarray, ptr, *rarraylen); + } + ptr += len; + } + /* check and possible copy reply var array */ + len = *((u16 *) ptr); + if (len > sizeof(u16)) { + ptr += sizeof(u16); + len -= sizeof(u16); + if (varray && varraylen && *varraylen > 0) { + *varraylen = (len > *varraylen ? *varraylen : len); + memcpy(varray, ptr, *varraylen); + } + ptr += len; + } + +out: + free_cprbmem(mem, parmbsize, 0); + return rc; +} + +/* + * Fetch the current and old mkvp values via + * query_crypto_facility from adapter. + */ +static int fetch_mkvp(u16 cardnr, u16 domain, u64 mkvp[2]) +{ + int rc, found = 0; + size_t rlen, vlen; + u8 *rarray, *varray, *pg; + + pg = (u8 *) __get_free_page(GFP_KERNEL); + if (!pg) + return -ENOMEM; + rarray = pg; + varray = pg + PAGE_SIZE/2; + rlen = vlen = PAGE_SIZE/2; + + rc = query_crypto_facility(cardnr, domain, "STATICSA", + rarray, &rlen, varray, &vlen); + if (rc == 0 && rlen > 8*8 && vlen > 184+8) { + if (rarray[8*8] == '2') { + /* current master key state is valid */ + mkvp[0] = *((u64 *)(varray + 184)); + mkvp[1] = *((u64 *)(varray + 172)); + found = 1; + } + } + + free_page((unsigned long) pg); + + return found ? 0 : -ENOENT; +} + +/* struct to hold cached mkvp info for each card/domain */ +struct mkvp_info { + struct list_head list; + u16 cardnr; + u16 domain; + u64 mkvp[2]; +}; + +/* a list with mkvp_info entries */ +static LIST_HEAD(mkvp_list); +static DEFINE_SPINLOCK(mkvp_list_lock); + +static int mkvp_cache_fetch(u16 cardnr, u16 domain, u64 mkvp[2]) +{ + int rc = -ENOENT; + struct mkvp_info *ptr; + + spin_lock_bh(&mkvp_list_lock); + list_for_each_entry(ptr, &mkvp_list, list) { + if (ptr->cardnr == cardnr && + ptr->domain == domain) { + memcpy(mkvp, ptr->mkvp, 2 * sizeof(u64)); + rc = 0; + break; + } + } + spin_unlock_bh(&mkvp_list_lock); + + return rc; +} + +static void mkvp_cache_update(u16 cardnr, u16 domain, u64 mkvp[2]) +{ + int found = 0; + struct mkvp_info *ptr; + + spin_lock_bh(&mkvp_list_lock); + list_for_each_entry(ptr, &mkvp_list, list) { + if (ptr->cardnr == cardnr && + ptr->domain == domain) { + memcpy(ptr->mkvp, mkvp, 2 * sizeof(u64)); + found = 1; + break; + } + } + if (!found) { + ptr = kmalloc(sizeof(*ptr), GFP_ATOMIC); + if (!ptr) { + spin_unlock_bh(&mkvp_list_lock); + return; + } + ptr->cardnr = cardnr; + ptr->domain = domain; + memcpy(ptr->mkvp, mkvp, 2 * sizeof(u64)); + list_add(&ptr->list, &mkvp_list); + } + spin_unlock_bh(&mkvp_list_lock); +} + +static void mkvp_cache_scrub(u16 cardnr, u16 domain) +{ + struct mkvp_info *ptr; + + spin_lock_bh(&mkvp_list_lock); + list_for_each_entry(ptr, &mkvp_list, list) { + if (ptr->cardnr == cardnr && + ptr->domain == domain) { + list_del(&ptr->list); + kfree(ptr); + break; + } + } + spin_unlock_bh(&mkvp_list_lock); +} + +static void __exit mkvp_cache_free(void) +{ + struct mkvp_info *ptr, *pnext; + + spin_lock_bh(&mkvp_list_lock); + list_for_each_entry_safe(ptr, pnext, &mkvp_list, list) { + list_del(&ptr->list); + kfree(ptr); + } + spin_unlock_bh(&mkvp_list_lock); +} + +/* + * Search for a matching crypto card based on the Master Key + * Verification Pattern provided inside a secure key. + */ +int pkey_findcard(const struct pkey_seckey *seckey, + u16 *pcardnr, u16 *pdomain, int verify) +{ + struct secaeskeytoken *t = (struct secaeskeytoken *) seckey; + struct zcrypt_device_status_ext *device_status; + u16 card, dom; + u64 mkvp[2]; + int i, rc, oi = -1; + + /* mkvp must not be zero */ + if (t->mkvp == 0) + return -EINVAL; + + /* fetch status of all crypto cards */ + device_status = kmalloc_array(MAX_ZDEV_ENTRIES_EXT, + sizeof(struct zcrypt_device_status_ext), + GFP_KERNEL); + if (!device_status) + return -ENOMEM; + zcrypt_device_status_mask_ext(device_status); + + /* walk through all crypto cards */ + for (i = 0; i < MAX_ZDEV_ENTRIES_EXT; i++) { + card = AP_QID_CARD(device_status[i].qid); + dom = AP_QID_QUEUE(device_status[i].qid); + if (device_status[i].online && + device_status[i].functions & 0x04) { + /* an enabled CCA Coprocessor card */ + /* try cached mkvp */ + if (mkvp_cache_fetch(card, dom, mkvp) == 0 && + t->mkvp == mkvp[0]) { + if (!verify) + break; + /* verify: fetch mkvp from adapter */ + if (fetch_mkvp(card, dom, mkvp) == 0) { + mkvp_cache_update(card, dom, mkvp); + if (t->mkvp == mkvp[0]) + break; + } + } + } else { + /* Card is offline and/or not a CCA card. */ + /* del mkvp entry from cache if it exists */ + mkvp_cache_scrub(card, dom); + } + } + if (i >= MAX_ZDEV_ENTRIES_EXT) { + /* nothing found, so this time without cache */ + for (i = 0; i < MAX_ZDEV_ENTRIES_EXT; i++) { + if (!(device_status[i].online && + device_status[i].functions & 0x04)) + continue; + card = AP_QID_CARD(device_status[i].qid); + dom = AP_QID_QUEUE(device_status[i].qid); + /* fresh fetch mkvp from adapter */ + if (fetch_mkvp(card, dom, mkvp) == 0) { + mkvp_cache_update(card, dom, mkvp); + if (t->mkvp == mkvp[0]) + break; + if (t->mkvp == mkvp[1] && oi < 0) + oi = i; + } + } + if (i >= MAX_ZDEV_ENTRIES_EXT && oi >= 0) { + /* old mkvp matched, use this card then */ + card = AP_QID_CARD(device_status[oi].qid); + dom = AP_QID_QUEUE(device_status[oi].qid); + } + } + if (i < MAX_ZDEV_ENTRIES_EXT || oi >= 0) { + if (pcardnr) + *pcardnr = card; + if (pdomain) + *pdomain = dom; + rc = 0; + } else + rc = -ENODEV; + + kfree(device_status); + return rc; +} +EXPORT_SYMBOL(pkey_findcard); + +/* + * Find card and transform secure key into protected key. + */ +int pkey_skey2pkey(const struct pkey_seckey *seckey, + struct pkey_protkey *protkey) +{ + u16 cardnr, domain; + int rc, verify; + + /* + * The pkey_sec2protkey call may fail when a card has been + * addressed where the master key was changed after last fetch + * of the mkvp into the cache. So first try without verify then + * with verify enabled (thus refreshing the mkvp for each card). + */ + for (verify = 0; verify < 2; verify++) { + rc = pkey_findcard(seckey, &cardnr, &domain, verify); + if (rc) + continue; + rc = pkey_sec2protkey(cardnr, domain, seckey, protkey); + if (rc == 0) + break; + } + + if (rc) + DEBUG_DBG("%s failed rc=%d\n", __func__, rc); + + return rc; +} +EXPORT_SYMBOL(pkey_skey2pkey); + +/* + * Verify key and give back some info about the key. + */ +int pkey_verifykey(const struct pkey_seckey *seckey, + u16 *pcardnr, u16 *pdomain, + u16 *pkeysize, u32 *pattributes) +{ + struct secaeskeytoken *t = (struct secaeskeytoken *) seckey; + u16 cardnr, domain; + u64 mkvp[2]; + int rc; + + /* check the secure key for valid AES secure key */ + rc = check_secaeskeytoken((u8 *) seckey, 0); + if (rc) + goto out; + if (pattributes) + *pattributes = PKEY_VERIFY_ATTR_AES; + if (pkeysize) + *pkeysize = t->bitsize; + + /* try to find a card which can handle this key */ + rc = pkey_findcard(seckey, &cardnr, &domain, 1); + if (rc) + goto out; + + /* check mkvp for old mkvp match */ + rc = mkvp_cache_fetch(cardnr, domain, mkvp); + if (rc) + goto out; + if (t->mkvp == mkvp[1]) { + DEBUG_DBG("%s secure key has old mkvp\n", __func__); + if (pattributes) + *pattributes |= PKEY_VERIFY_ATTR_OLD_MKVP; + } + + if (pcardnr) + *pcardnr = cardnr; + if (pdomain) + *pdomain = domain; + +out: + DEBUG_DBG("%s rc=%d\n", __func__, rc); + return rc; +} +EXPORT_SYMBOL(pkey_verifykey); + +/* + * File io functions + */ + +static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd, + unsigned long arg) +{ + int rc; + + switch (cmd) { + case PKEY_GENSECK: { + struct pkey_genseck __user *ugs = (void __user *) arg; + struct pkey_genseck kgs; + + if (copy_from_user(&kgs, ugs, sizeof(kgs))) + return -EFAULT; + rc = pkey_genseckey(kgs.cardnr, kgs.domain, + kgs.keytype, &kgs.seckey); + DEBUG_DBG("%s pkey_genseckey()=%d\n", __func__, rc); + if (rc) + break; + if (copy_to_user(ugs, &kgs, sizeof(kgs))) + return -EFAULT; + break; + } + case PKEY_CLR2SECK: { + struct pkey_clr2seck __user *ucs = (void __user *) arg; + struct pkey_clr2seck kcs; + + if (copy_from_user(&kcs, ucs, sizeof(kcs))) + return -EFAULT; + rc = pkey_clr2seckey(kcs.cardnr, kcs.domain, kcs.keytype, + &kcs.clrkey, &kcs.seckey); + DEBUG_DBG("%s pkey_clr2seckey()=%d\n", __func__, rc); + if (rc) + break; + if (copy_to_user(ucs, &kcs, sizeof(kcs))) + return -EFAULT; + memzero_explicit(&kcs, sizeof(kcs)); + break; + } + case PKEY_SEC2PROTK: { + struct pkey_sec2protk __user *usp = (void __user *) arg; + struct pkey_sec2protk ksp; + + if (copy_from_user(&ksp, usp, sizeof(ksp))) + return -EFAULT; + rc = pkey_sec2protkey(ksp.cardnr, ksp.domain, + &ksp.seckey, &ksp.protkey); + DEBUG_DBG("%s pkey_sec2protkey()=%d\n", __func__, rc); + if (rc) + break; + if (copy_to_user(usp, &ksp, sizeof(ksp))) + return -EFAULT; + break; + } + case PKEY_CLR2PROTK: { + struct pkey_clr2protk __user *ucp = (void __user *) arg; + struct pkey_clr2protk kcp; + + if (copy_from_user(&kcp, ucp, sizeof(kcp))) + return -EFAULT; + rc = pkey_clr2protkey(kcp.keytype, + &kcp.clrkey, &kcp.protkey); + DEBUG_DBG("%s pkey_clr2protkey()=%d\n", __func__, rc); + if (rc) + break; + if (copy_to_user(ucp, &kcp, sizeof(kcp))) + return -EFAULT; + memzero_explicit(&kcp, sizeof(kcp)); + break; + } + case PKEY_FINDCARD: { + struct pkey_findcard __user *ufc = (void __user *) arg; + struct pkey_findcard kfc; + + if (copy_from_user(&kfc, ufc, sizeof(kfc))) + return -EFAULT; + rc = pkey_findcard(&kfc.seckey, + &kfc.cardnr, &kfc.domain, 1); + DEBUG_DBG("%s pkey_findcard()=%d\n", __func__, rc); + if (rc) + break; + if (copy_to_user(ufc, &kfc, sizeof(kfc))) + return -EFAULT; + break; + } + case PKEY_SKEY2PKEY: { + struct pkey_skey2pkey __user *usp = (void __user *) arg; + struct pkey_skey2pkey ksp; + + if (copy_from_user(&ksp, usp, sizeof(ksp))) + return -EFAULT; + rc = pkey_skey2pkey(&ksp.seckey, &ksp.protkey); + DEBUG_DBG("%s pkey_skey2pkey()=%d\n", __func__, rc); + if (rc) + break; + if (copy_to_user(usp, &ksp, sizeof(ksp))) + return -EFAULT; + break; + } + case PKEY_VERIFYKEY: { + struct pkey_verifykey __user *uvk = (void __user *) arg; + struct pkey_verifykey kvk; + + if (copy_from_user(&kvk, uvk, sizeof(kvk))) + return -EFAULT; + rc = pkey_verifykey(&kvk.seckey, &kvk.cardnr, &kvk.domain, + &kvk.keysize, &kvk.attributes); + DEBUG_DBG("%s pkey_verifykey()=%d\n", __func__, rc); + if (rc) + break; + if (copy_to_user(uvk, &kvk, sizeof(kvk))) + return -EFAULT; + break; + } + default: + /* unknown/unsupported ioctl cmd */ + return -ENOTTY; + } + + return rc; +} + +/* + * Sysfs and file io operations + */ +static const struct file_operations pkey_fops = { + .owner = THIS_MODULE, + .open = nonseekable_open, + .llseek = no_llseek, + .unlocked_ioctl = pkey_unlocked_ioctl, +}; + +static struct miscdevice pkey_dev = { + .name = "pkey", + .minor = MISC_DYNAMIC_MINOR, + .mode = 0666, + .fops = &pkey_fops, +}; + +/* + * Module init + */ +static int __init pkey_init(void) +{ + cpacf_mask_t pckmo_functions; + + /* check for pckmo instructions available */ + if (!cpacf_query(CPACF_PCKMO, &pckmo_functions)) + return -EOPNOTSUPP; + if (!cpacf_test_func(&pckmo_functions, CPACF_PCKMO_ENC_AES_128_KEY) || + !cpacf_test_func(&pckmo_functions, CPACF_PCKMO_ENC_AES_192_KEY) || + !cpacf_test_func(&pckmo_functions, CPACF_PCKMO_ENC_AES_256_KEY)) + return -EOPNOTSUPP; + + pkey_debug_init(); + + return misc_register(&pkey_dev); +} + +/* + * Module exit + */ +static void __exit pkey_exit(void) +{ + misc_deregister(&pkey_dev); + mkvp_cache_free(); + pkey_debug_exit(); +} + +module_init(pkey_init); +module_exit(pkey_exit); diff --git a/drivers/s390/crypto/zcrypt_api.c b/drivers/s390/crypto/zcrypt_api.c new file mode 100644 index 000000000..b7cb897cd --- /dev/null +++ b/drivers/s390/crypto/zcrypt_api.c @@ -0,0 +1,1306 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * zcrypt 2.1.0 + * + * Copyright IBM Corp. 2001, 2012 + * Author(s): Robert Burroughs + * Eric Rossman (edrossma@us.ibm.com) + * Cornelia Huck <cornelia.huck@de.ibm.com> + * + * Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com) + * Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com> + * Ralph Wuerthner <rwuerthn@de.ibm.com> + * MSGTYPE restruct: Holger Dengler <hd@linux.vnet.ibm.com> + */ + +#include <linux/module.h> +#include <linux/init.h> +#include <linux/interrupt.h> +#include <linux/miscdevice.h> +#include <linux/fs.h> +#include <linux/compat.h> +#include <linux/slab.h> +#include <linux/atomic.h> +#include <linux/uaccess.h> +#include <linux/hw_random.h> +#include <linux/debugfs.h> +#include <asm/debug.h> + +#define CREATE_TRACE_POINTS +#include <asm/trace/zcrypt.h> + +#include "zcrypt_api.h" +#include "zcrypt_debug.h" + +#include "zcrypt_msgtype6.h" +#include "zcrypt_msgtype50.h" + +/* + * Module description. + */ +MODULE_AUTHOR("IBM Corporation"); +MODULE_DESCRIPTION("Cryptographic Coprocessor interface, " \ + "Copyright IBM Corp. 2001, 2012"); +MODULE_LICENSE("GPL"); + +/* + * zcrypt tracepoint functions + */ +EXPORT_TRACEPOINT_SYMBOL(s390_zcrypt_req); +EXPORT_TRACEPOINT_SYMBOL(s390_zcrypt_rep); + +static int zcrypt_hwrng_seed = 1; +module_param_named(hwrng_seed, zcrypt_hwrng_seed, int, 0440); +MODULE_PARM_DESC(hwrng_seed, "Turn on/off hwrng auto seed, default is 1 (on)."); + +DEFINE_SPINLOCK(zcrypt_list_lock); +LIST_HEAD(zcrypt_card_list); +int zcrypt_device_count; + +static atomic_t zcrypt_open_count = ATOMIC_INIT(0); +static atomic_t zcrypt_rescan_count = ATOMIC_INIT(0); + +atomic_t zcrypt_rescan_req = ATOMIC_INIT(0); +EXPORT_SYMBOL(zcrypt_rescan_req); + +static LIST_HEAD(zcrypt_ops_list); + +/* Zcrypt related debug feature stuff. */ +debug_info_t *zcrypt_dbf_info; + +/** + * Process a rescan of the transport layer. + * + * Returns 1, if the rescan has been processed, otherwise 0. + */ +static inline int zcrypt_process_rescan(void) +{ + if (atomic_read(&zcrypt_rescan_req)) { + atomic_set(&zcrypt_rescan_req, 0); + atomic_inc(&zcrypt_rescan_count); + ap_bus_force_rescan(); + ZCRYPT_DBF(DBF_INFO, "rescan count=%07d\n", + atomic_inc_return(&zcrypt_rescan_count)); + return 1; + } + return 0; +} + +void zcrypt_msgtype_register(struct zcrypt_ops *zops) +{ + list_add_tail(&zops->list, &zcrypt_ops_list); +} + +void zcrypt_msgtype_unregister(struct zcrypt_ops *zops) +{ + list_del_init(&zops->list); +} + +struct zcrypt_ops *zcrypt_msgtype(unsigned char *name, int variant) +{ + struct zcrypt_ops *zops; + + list_for_each_entry(zops, &zcrypt_ops_list, list) + if ((zops->variant == variant) && + (!strncmp(zops->name, name, sizeof(zops->name)))) + return zops; + return NULL; +} +EXPORT_SYMBOL(zcrypt_msgtype); + +/** + * zcrypt_read (): Not supported beyond zcrypt 1.3.1. + * + * This function is not supported beyond zcrypt 1.3.1. + */ +static ssize_t zcrypt_read(struct file *filp, char __user *buf, + size_t count, loff_t *f_pos) +{ + return -EPERM; +} + +/** + * zcrypt_write(): Not allowed. + * + * Write is is not allowed + */ +static ssize_t zcrypt_write(struct file *filp, const char __user *buf, + size_t count, loff_t *f_pos) +{ + return -EPERM; +} + +/** + * zcrypt_open(): Count number of users. + * + * Device open function to count number of users. + */ +static int zcrypt_open(struct inode *inode, struct file *filp) +{ + atomic_inc(&zcrypt_open_count); + return nonseekable_open(inode, filp); +} + +/** + * zcrypt_release(): Count number of users. + * + * Device close function to count number of users. + */ +static int zcrypt_release(struct inode *inode, struct file *filp) +{ + atomic_dec(&zcrypt_open_count); + return 0; +} + +static inline struct zcrypt_queue *zcrypt_pick_queue(struct zcrypt_card *zc, + struct zcrypt_queue *zq, + unsigned int weight) +{ + if (!zq || !try_module_get(zq->queue->ap_dev.drv->driver.owner)) + return NULL; + zcrypt_queue_get(zq); + get_device(&zq->queue->ap_dev.device); + atomic_add(weight, &zc->load); + atomic_add(weight, &zq->load); + zq->request_count++; + return zq; +} + +static inline void zcrypt_drop_queue(struct zcrypt_card *zc, + struct zcrypt_queue *zq, + unsigned int weight) +{ + struct module *mod = zq->queue->ap_dev.drv->driver.owner; + + zq->request_count--; + atomic_sub(weight, &zc->load); + atomic_sub(weight, &zq->load); + put_device(&zq->queue->ap_dev.device); + zcrypt_queue_put(zq); + module_put(mod); +} + +static inline bool zcrypt_card_compare(struct zcrypt_card *zc, + struct zcrypt_card *pref_zc, + unsigned int weight, + unsigned int pref_weight) +{ + if (!pref_zc) + return false; + weight += atomic_read(&zc->load); + pref_weight += atomic_read(&pref_zc->load); + if (weight == pref_weight) + return atomic64_read(&zc->card->total_request_count) > + atomic64_read(&pref_zc->card->total_request_count); + return weight > pref_weight; +} + +static inline bool zcrypt_queue_compare(struct zcrypt_queue *zq, + struct zcrypt_queue *pref_zq, + unsigned int weight, + unsigned int pref_weight) +{ + if (!pref_zq) + return false; + weight += atomic_read(&zq->load); + pref_weight += atomic_read(&pref_zq->load); + if (weight == pref_weight) + return zq->queue->total_request_count > + pref_zq->queue->total_request_count; + return weight > pref_weight; +} + +/* + * zcrypt ioctls. + */ +static long zcrypt_rsa_modexpo(struct ica_rsa_modexpo *mex) +{ + struct zcrypt_card *zc, *pref_zc; + struct zcrypt_queue *zq, *pref_zq; + unsigned int weight, pref_weight; + unsigned int func_code; + int qid = 0, rc = -ENODEV; + + trace_s390_zcrypt_req(mex, TP_ICARSAMODEXPO); + + if (mex->outputdatalength < mex->inputdatalength) { + func_code = 0; + rc = -EINVAL; + goto out; + } + + /* + * As long as outputdatalength is big enough, we can set the + * outputdatalength equal to the inputdatalength, since that is the + * number of bytes we will copy in any case + */ + mex->outputdatalength = mex->inputdatalength; + + rc = get_rsa_modex_fc(mex, &func_code); + if (rc) + goto out; + + pref_zc = NULL; + pref_zq = NULL; + spin_lock(&zcrypt_list_lock); + for_each_zcrypt_card(zc) { + /* Check for online accelarator and CCA cards */ + if (!zc->online || !(zc->card->functions & 0x18000000)) + continue; + /* Check for size limits */ + if (zc->min_mod_size > mex->inputdatalength || + zc->max_mod_size < mex->inputdatalength) + continue; + /* get weight index of the card device */ + weight = zc->speed_rating[func_code]; + if (zcrypt_card_compare(zc, pref_zc, weight, pref_weight)) + continue; + for_each_zcrypt_queue(zq, zc) { + /* check if device is online and eligible */ + if (!zq->online || !zq->ops->rsa_modexpo) + continue; + if (zcrypt_queue_compare(zq, pref_zq, + weight, pref_weight)) + continue; + pref_zc = zc; + pref_zq = zq; + pref_weight = weight; + } + } + pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, weight); + spin_unlock(&zcrypt_list_lock); + + if (!pref_zq) { + rc = -ENODEV; + goto out; + } + + qid = pref_zq->queue->qid; + rc = pref_zq->ops->rsa_modexpo(pref_zq, mex); + + spin_lock(&zcrypt_list_lock); + zcrypt_drop_queue(pref_zc, pref_zq, weight); + spin_unlock(&zcrypt_list_lock); + +out: + trace_s390_zcrypt_rep(mex, func_code, rc, + AP_QID_CARD(qid), AP_QID_QUEUE(qid)); + return rc; +} + +static long zcrypt_rsa_crt(struct ica_rsa_modexpo_crt *crt) +{ + struct zcrypt_card *zc, *pref_zc; + struct zcrypt_queue *zq, *pref_zq; + unsigned int weight, pref_weight; + unsigned int func_code; + int qid = 0, rc = -ENODEV; + + trace_s390_zcrypt_req(crt, TP_ICARSACRT); + + if (crt->outputdatalength < crt->inputdatalength) { + func_code = 0; + rc = -EINVAL; + goto out; + } + + /* + * As long as outputdatalength is big enough, we can set the + * outputdatalength equal to the inputdatalength, since that is the + * number of bytes we will copy in any case + */ + crt->outputdatalength = crt->inputdatalength; + + rc = get_rsa_crt_fc(crt, &func_code); + if (rc) + goto out; + + pref_zc = NULL; + pref_zq = NULL; + spin_lock(&zcrypt_list_lock); + for_each_zcrypt_card(zc) { + /* Check for online accelarator and CCA cards */ + if (!zc->online || !(zc->card->functions & 0x18000000)) + continue; + /* Check for size limits */ + if (zc->min_mod_size > crt->inputdatalength || + zc->max_mod_size < crt->inputdatalength) + continue; + /* get weight index of the card device */ + weight = zc->speed_rating[func_code]; + if (zcrypt_card_compare(zc, pref_zc, weight, pref_weight)) + continue; + for_each_zcrypt_queue(zq, zc) { + /* check if device is online and eligible */ + if (!zq->online || !zq->ops->rsa_modexpo_crt) + continue; + if (zcrypt_queue_compare(zq, pref_zq, + weight, pref_weight)) + continue; + pref_zc = zc; + pref_zq = zq; + pref_weight = weight; + } + } + pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, weight); + spin_unlock(&zcrypt_list_lock); + + if (!pref_zq) { + rc = -ENODEV; + goto out; + } + + qid = pref_zq->queue->qid; + rc = pref_zq->ops->rsa_modexpo_crt(pref_zq, crt); + + spin_lock(&zcrypt_list_lock); + zcrypt_drop_queue(pref_zc, pref_zq, weight); + spin_unlock(&zcrypt_list_lock); + +out: + trace_s390_zcrypt_rep(crt, func_code, rc, + AP_QID_CARD(qid), AP_QID_QUEUE(qid)); + return rc; +} + +long zcrypt_send_cprb(struct ica_xcRB *xcRB) +{ + struct zcrypt_card *zc, *pref_zc; + struct zcrypt_queue *zq, *pref_zq; + struct ap_message ap_msg; + unsigned int weight, pref_weight; + unsigned int func_code; + unsigned short *domain; + int qid = 0, rc = -ENODEV; + + trace_s390_zcrypt_req(xcRB, TB_ZSECSENDCPRB); + + ap_init_message(&ap_msg); + rc = get_cprb_fc(xcRB, &ap_msg, &func_code, &domain); + if (rc) + goto out; + + pref_zc = NULL; + pref_zq = NULL; + spin_lock(&zcrypt_list_lock); + for_each_zcrypt_card(zc) { + /* Check for online CCA cards */ + if (!zc->online || !(zc->card->functions & 0x10000000)) + continue; + /* Check for user selected CCA card */ + if (xcRB->user_defined != AUTOSELECT && + xcRB->user_defined != zc->card->id) + continue; + /* get weight index of the card device */ + weight = speed_idx_cca(func_code) * zc->speed_rating[SECKEY]; + if (zcrypt_card_compare(zc, pref_zc, weight, pref_weight)) + continue; + for_each_zcrypt_queue(zq, zc) { + /* check if device is online and eligible */ + if (!zq->online || + !zq->ops->send_cprb || + ((*domain != (unsigned short) AUTOSELECT) && + (*domain != AP_QID_QUEUE(zq->queue->qid)))) + continue; + if (zcrypt_queue_compare(zq, pref_zq, + weight, pref_weight)) + continue; + pref_zc = zc; + pref_zq = zq; + pref_weight = weight; + } + } + pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, weight); + spin_unlock(&zcrypt_list_lock); + + if (!pref_zq) { + rc = -ENODEV; + goto out; + } + + /* in case of auto select, provide the correct domain */ + qid = pref_zq->queue->qid; + if (*domain == (unsigned short) AUTOSELECT) + *domain = AP_QID_QUEUE(qid); + + rc = pref_zq->ops->send_cprb(pref_zq, xcRB, &ap_msg); + + spin_lock(&zcrypt_list_lock); + zcrypt_drop_queue(pref_zc, pref_zq, weight); + spin_unlock(&zcrypt_list_lock); + +out: + ap_release_message(&ap_msg); + trace_s390_zcrypt_rep(xcRB, func_code, rc, + AP_QID_CARD(qid), AP_QID_QUEUE(qid)); + return rc; +} +EXPORT_SYMBOL(zcrypt_send_cprb); + +static bool is_desired_ep11_card(unsigned int dev_id, + unsigned short target_num, + struct ep11_target_dev *targets) +{ + while (target_num-- > 0) { + if (dev_id == targets->ap_id) + return true; + targets++; + } + return false; +} + +static bool is_desired_ep11_queue(unsigned int dev_qid, + unsigned short target_num, + struct ep11_target_dev *targets) +{ + while (target_num-- > 0) { + if (AP_MKQID(targets->ap_id, targets->dom_id) == dev_qid) + return true; + targets++; + } + return false; +} + +static long zcrypt_send_ep11_cprb(struct ep11_urb *xcrb) +{ + struct zcrypt_card *zc, *pref_zc; + struct zcrypt_queue *zq, *pref_zq; + struct ep11_target_dev *targets; + unsigned short target_num; + unsigned int weight, pref_weight; + unsigned int func_code; + struct ap_message ap_msg; + int qid = 0, rc = -ENODEV; + + trace_s390_zcrypt_req(xcrb, TP_ZSENDEP11CPRB); + + ap_init_message(&ap_msg); + + target_num = (unsigned short) xcrb->targets_num; + + /* empty list indicates autoselect (all available targets) */ + targets = NULL; + if (target_num != 0) { + struct ep11_target_dev __user *uptr; + + targets = kcalloc(target_num, sizeof(*targets), GFP_KERNEL); + if (!targets) { + func_code = 0; + rc = -ENOMEM; + goto out; + } + + uptr = (struct ep11_target_dev __force __user *) xcrb->targets; + if (copy_from_user(targets, uptr, + target_num * sizeof(*targets))) { + func_code = 0; + rc = -EFAULT; + goto out_free; + } + } + + rc = get_ep11cprb_fc(xcrb, &ap_msg, &func_code); + if (rc) + goto out_free; + + pref_zc = NULL; + pref_zq = NULL; + spin_lock(&zcrypt_list_lock); + for_each_zcrypt_card(zc) { + /* Check for online EP11 cards */ + if (!zc->online || !(zc->card->functions & 0x04000000)) + continue; + /* Check for user selected EP11 card */ + if (targets && + !is_desired_ep11_card(zc->card->id, target_num, targets)) + continue; + /* get weight index of the card device */ + weight = speed_idx_ep11(func_code) * zc->speed_rating[SECKEY]; + if (zcrypt_card_compare(zc, pref_zc, weight, pref_weight)) + continue; + for_each_zcrypt_queue(zq, zc) { + /* check if device is online and eligible */ + if (!zq->online || + !zq->ops->send_ep11_cprb || + (targets && + !is_desired_ep11_queue(zq->queue->qid, + target_num, targets))) + continue; + if (zcrypt_queue_compare(zq, pref_zq, + weight, pref_weight)) + continue; + pref_zc = zc; + pref_zq = zq; + pref_weight = weight; + } + } + pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, weight); + spin_unlock(&zcrypt_list_lock); + + if (!pref_zq) { + rc = -ENODEV; + goto out_free; + } + + qid = pref_zq->queue->qid; + rc = pref_zq->ops->send_ep11_cprb(pref_zq, xcrb, &ap_msg); + + spin_lock(&zcrypt_list_lock); + zcrypt_drop_queue(pref_zc, pref_zq, weight); + spin_unlock(&zcrypt_list_lock); + +out_free: + kfree(targets); +out: + ap_release_message(&ap_msg); + trace_s390_zcrypt_rep(xcrb, func_code, rc, + AP_QID_CARD(qid), AP_QID_QUEUE(qid)); + return rc; +} + +static long zcrypt_rng(char *buffer) +{ + struct zcrypt_card *zc, *pref_zc; + struct zcrypt_queue *zq, *pref_zq; + unsigned int weight, pref_weight; + unsigned int func_code; + struct ap_message ap_msg; + unsigned int domain; + int qid = 0, rc = -ENODEV; + + trace_s390_zcrypt_req(buffer, TP_HWRNGCPRB); + + ap_init_message(&ap_msg); + rc = get_rng_fc(&ap_msg, &func_code, &domain); + if (rc) + goto out; + + pref_zc = NULL; + pref_zq = NULL; + spin_lock(&zcrypt_list_lock); + for_each_zcrypt_card(zc) { + /* Check for online CCA cards */ + if (!zc->online || !(zc->card->functions & 0x10000000)) + continue; + /* get weight index of the card device */ + weight = zc->speed_rating[func_code]; + if (zcrypt_card_compare(zc, pref_zc, weight, pref_weight)) + continue; + for_each_zcrypt_queue(zq, zc) { + /* check if device is online and eligible */ + if (!zq->online || !zq->ops->rng) + continue; + if (zcrypt_queue_compare(zq, pref_zq, + weight, pref_weight)) + continue; + pref_zc = zc; + pref_zq = zq; + pref_weight = weight; + } + } + pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, weight); + spin_unlock(&zcrypt_list_lock); + + if (!pref_zq) { + rc = -ENODEV; + goto out; + } + + qid = pref_zq->queue->qid; + rc = pref_zq->ops->rng(pref_zq, buffer, &ap_msg); + + spin_lock(&zcrypt_list_lock); + zcrypt_drop_queue(pref_zc, pref_zq, weight); + spin_unlock(&zcrypt_list_lock); + +out: + ap_release_message(&ap_msg); + trace_s390_zcrypt_rep(buffer, func_code, rc, + AP_QID_CARD(qid), AP_QID_QUEUE(qid)); + return rc; +} + +static void zcrypt_device_status_mask(struct zcrypt_device_status *devstatus) +{ + struct zcrypt_card *zc; + struct zcrypt_queue *zq; + struct zcrypt_device_status *stat; + int card, queue; + + memset(devstatus, 0, MAX_ZDEV_ENTRIES + * sizeof(struct zcrypt_device_status)); + + spin_lock(&zcrypt_list_lock); + for_each_zcrypt_card(zc) { + for_each_zcrypt_queue(zq, zc) { + card = AP_QID_CARD(zq->queue->qid); + if (card >= MAX_ZDEV_CARDIDS) + continue; + queue = AP_QID_QUEUE(zq->queue->qid); + stat = &devstatus[card * AP_DOMAINS + queue]; + stat->hwtype = zc->card->ap_dev.device_type; + stat->functions = zc->card->functions >> 26; + stat->qid = zq->queue->qid; + stat->online = zq->online ? 0x01 : 0x00; + } + } + spin_unlock(&zcrypt_list_lock); +} + +void zcrypt_device_status_mask_ext(struct zcrypt_device_status_ext *devstatus) +{ + struct zcrypt_card *zc; + struct zcrypt_queue *zq; + struct zcrypt_device_status_ext *stat; + int card, queue; + + memset(devstatus, 0, MAX_ZDEV_ENTRIES_EXT + * sizeof(struct zcrypt_device_status_ext)); + + spin_lock(&zcrypt_list_lock); + for_each_zcrypt_card(zc) { + for_each_zcrypt_queue(zq, zc) { + card = AP_QID_CARD(zq->queue->qid); + queue = AP_QID_QUEUE(zq->queue->qid); + stat = &devstatus[card * AP_DOMAINS + queue]; + stat->hwtype = zc->card->ap_dev.device_type; + stat->functions = zc->card->functions >> 26; + stat->qid = zq->queue->qid; + stat->online = zq->online ? 0x01 : 0x00; + } + } + spin_unlock(&zcrypt_list_lock); +} +EXPORT_SYMBOL(zcrypt_device_status_mask_ext); + +static void zcrypt_status_mask(char status[], size_t max_adapters) +{ + struct zcrypt_card *zc; + struct zcrypt_queue *zq; + int card; + + memset(status, 0, max_adapters); + spin_lock(&zcrypt_list_lock); + for_each_zcrypt_card(zc) { + for_each_zcrypt_queue(zq, zc) { + card = AP_QID_CARD(zq->queue->qid); + if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index + || card >= max_adapters) + continue; + status[card] = zc->online ? zc->user_space_type : 0x0d; + } + } + spin_unlock(&zcrypt_list_lock); +} + +static void zcrypt_qdepth_mask(char qdepth[], size_t max_adapters) +{ + struct zcrypt_card *zc; + struct zcrypt_queue *zq; + int card; + + memset(qdepth, 0, max_adapters); + spin_lock(&zcrypt_list_lock); + local_bh_disable(); + for_each_zcrypt_card(zc) { + for_each_zcrypt_queue(zq, zc) { + card = AP_QID_CARD(zq->queue->qid); + if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index + || card >= max_adapters) + continue; + spin_lock(&zq->queue->lock); + qdepth[card] = + zq->queue->pendingq_count + + zq->queue->requestq_count; + spin_unlock(&zq->queue->lock); + } + } + local_bh_enable(); + spin_unlock(&zcrypt_list_lock); +} + +static void zcrypt_perdev_reqcnt(u32 reqcnt[], size_t max_adapters) +{ + struct zcrypt_card *zc; + struct zcrypt_queue *zq; + int card; + u64 cnt; + + memset(reqcnt, 0, sizeof(int) * max_adapters); + spin_lock(&zcrypt_list_lock); + local_bh_disable(); + for_each_zcrypt_card(zc) { + for_each_zcrypt_queue(zq, zc) { + card = AP_QID_CARD(zq->queue->qid); + if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index + || card >= max_adapters) + continue; + spin_lock(&zq->queue->lock); + cnt = zq->queue->total_request_count; + spin_unlock(&zq->queue->lock); + reqcnt[card] = (cnt < UINT_MAX) ? (u32) cnt : UINT_MAX; + } + } + local_bh_enable(); + spin_unlock(&zcrypt_list_lock); +} + +static int zcrypt_pendingq_count(void) +{ + struct zcrypt_card *zc; + struct zcrypt_queue *zq; + int pendingq_count; + + pendingq_count = 0; + spin_lock(&zcrypt_list_lock); + local_bh_disable(); + for_each_zcrypt_card(zc) { + for_each_zcrypt_queue(zq, zc) { + if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index) + continue; + spin_lock(&zq->queue->lock); + pendingq_count += zq->queue->pendingq_count; + spin_unlock(&zq->queue->lock); + } + } + local_bh_enable(); + spin_unlock(&zcrypt_list_lock); + return pendingq_count; +} + +static int zcrypt_requestq_count(void) +{ + struct zcrypt_card *zc; + struct zcrypt_queue *zq; + int requestq_count; + + requestq_count = 0; + spin_lock(&zcrypt_list_lock); + local_bh_disable(); + for_each_zcrypt_card(zc) { + for_each_zcrypt_queue(zq, zc) { + if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index) + continue; + spin_lock(&zq->queue->lock); + requestq_count += zq->queue->requestq_count; + spin_unlock(&zq->queue->lock); + } + } + local_bh_enable(); + spin_unlock(&zcrypt_list_lock); + return requestq_count; +} + +static long zcrypt_unlocked_ioctl(struct file *filp, unsigned int cmd, + unsigned long arg) +{ + int rc = 0; + + switch (cmd) { + case ICARSAMODEXPO: { + struct ica_rsa_modexpo __user *umex = (void __user *) arg; + struct ica_rsa_modexpo mex; + + if (copy_from_user(&mex, umex, sizeof(mex))) + return -EFAULT; + do { + rc = zcrypt_rsa_modexpo(&mex); + } while (rc == -EAGAIN); + /* on failure: retry once again after a requested rescan */ + if ((rc == -ENODEV) && (zcrypt_process_rescan())) + do { + rc = zcrypt_rsa_modexpo(&mex); + } while (rc == -EAGAIN); + if (rc) { + ZCRYPT_DBF(DBF_DEBUG, "ioctl ICARSAMODEXPO rc=%d\n", rc); + return rc; + } + return put_user(mex.outputdatalength, &umex->outputdatalength); + } + case ICARSACRT: { + struct ica_rsa_modexpo_crt __user *ucrt = (void __user *) arg; + struct ica_rsa_modexpo_crt crt; + + if (copy_from_user(&crt, ucrt, sizeof(crt))) + return -EFAULT; + do { + rc = zcrypt_rsa_crt(&crt); + } while (rc == -EAGAIN); + /* on failure: retry once again after a requested rescan */ + if ((rc == -ENODEV) && (zcrypt_process_rescan())) + do { + rc = zcrypt_rsa_crt(&crt); + } while (rc == -EAGAIN); + if (rc) { + ZCRYPT_DBF(DBF_DEBUG, "ioctl ICARSACRT rc=%d\n", rc); + return rc; + } + return put_user(crt.outputdatalength, &ucrt->outputdatalength); + } + case ZSECSENDCPRB: { + struct ica_xcRB __user *uxcRB = (void __user *) arg; + struct ica_xcRB xcRB; + + if (copy_from_user(&xcRB, uxcRB, sizeof(xcRB))) + return -EFAULT; + do { + rc = zcrypt_send_cprb(&xcRB); + } while (rc == -EAGAIN); + /* on failure: retry once again after a requested rescan */ + if ((rc == -ENODEV) && (zcrypt_process_rescan())) + do { + rc = zcrypt_send_cprb(&xcRB); + } while (rc == -EAGAIN); + if (rc) + ZCRYPT_DBF(DBF_DEBUG, "ioctl ZSENDCPRB rc=%d\n", rc); + if (copy_to_user(uxcRB, &xcRB, sizeof(xcRB))) + return -EFAULT; + return rc; + } + case ZSENDEP11CPRB: { + struct ep11_urb __user *uxcrb = (void __user *)arg; + struct ep11_urb xcrb; + + if (copy_from_user(&xcrb, uxcrb, sizeof(xcrb))) + return -EFAULT; + do { + rc = zcrypt_send_ep11_cprb(&xcrb); + } while (rc == -EAGAIN); + /* on failure: retry once again after a requested rescan */ + if ((rc == -ENODEV) && (zcrypt_process_rescan())) + do { + rc = zcrypt_send_ep11_cprb(&xcrb); + } while (rc == -EAGAIN); + if (rc) + ZCRYPT_DBF(DBF_DEBUG, "ioctl ZSENDEP11CPRB rc=%d\n", rc); + if (copy_to_user(uxcrb, &xcrb, sizeof(xcrb))) + return -EFAULT; + return rc; + } + case ZCRYPT_DEVICE_STATUS: { + struct zcrypt_device_status_ext *device_status; + size_t total_size = MAX_ZDEV_ENTRIES_EXT + * sizeof(struct zcrypt_device_status_ext); + + device_status = kzalloc(total_size, GFP_KERNEL); + if (!device_status) + return -ENOMEM; + zcrypt_device_status_mask_ext(device_status); + if (copy_to_user((char __user *) arg, device_status, + total_size)) + rc = -EFAULT; + kfree(device_status); + return rc; + } + case ZCRYPT_STATUS_MASK: { + char status[AP_DEVICES]; + + zcrypt_status_mask(status, AP_DEVICES); + if (copy_to_user((char __user *) arg, status, sizeof(status))) + return -EFAULT; + return 0; + } + case ZCRYPT_QDEPTH_MASK: { + char qdepth[AP_DEVICES]; + + zcrypt_qdepth_mask(qdepth, AP_DEVICES); + if (copy_to_user((char __user *) arg, qdepth, sizeof(qdepth))) + return -EFAULT; + return 0; + } + case ZCRYPT_PERDEV_REQCNT: { + u32 *reqcnt; + + reqcnt = kcalloc(AP_DEVICES, sizeof(u32), GFP_KERNEL); + if (!reqcnt) + return -ENOMEM; + zcrypt_perdev_reqcnt(reqcnt, AP_DEVICES); + if (copy_to_user((int __user *) arg, reqcnt, + sizeof(u32) * AP_DEVICES)) + rc = -EFAULT; + kfree(reqcnt); + return rc; + } + case Z90STAT_REQUESTQ_COUNT: + return put_user(zcrypt_requestq_count(), (int __user *) arg); + case Z90STAT_PENDINGQ_COUNT: + return put_user(zcrypt_pendingq_count(), (int __user *) arg); + case Z90STAT_TOTALOPEN_COUNT: + return put_user(atomic_read(&zcrypt_open_count), + (int __user *) arg); + case Z90STAT_DOMAIN_INDEX: + return put_user(ap_domain_index, (int __user *) arg); + /* + * Deprecated ioctls + */ + case ZDEVICESTATUS: { + /* the old ioctl supports only 64 adapters */ + struct zcrypt_device_status *device_status; + size_t total_size = MAX_ZDEV_ENTRIES + * sizeof(struct zcrypt_device_status); + + device_status = kzalloc(total_size, GFP_KERNEL); + if (!device_status) + return -ENOMEM; + zcrypt_device_status_mask(device_status); + if (copy_to_user((char __user *) arg, device_status, + total_size)) + rc = -EFAULT; + kfree(device_status); + return rc; + } + case Z90STAT_STATUS_MASK: { + /* the old ioctl supports only 64 adapters */ + char status[MAX_ZDEV_CARDIDS]; + + zcrypt_status_mask(status, MAX_ZDEV_CARDIDS); + if (copy_to_user((char __user *) arg, status, sizeof(status))) + return -EFAULT; + return 0; + } + case Z90STAT_QDEPTH_MASK: { + /* the old ioctl supports only 64 adapters */ + char qdepth[MAX_ZDEV_CARDIDS]; + + zcrypt_qdepth_mask(qdepth, MAX_ZDEV_CARDIDS); + if (copy_to_user((char __user *) arg, qdepth, sizeof(qdepth))) + return -EFAULT; + return 0; + } + case Z90STAT_PERDEV_REQCNT: { + /* the old ioctl supports only 64 adapters */ + u32 reqcnt[MAX_ZDEV_CARDIDS]; + + zcrypt_perdev_reqcnt(reqcnt, MAX_ZDEV_CARDIDS); + if (copy_to_user((int __user *) arg, reqcnt, sizeof(reqcnt))) + return -EFAULT; + return 0; + } + /* unknown ioctl number */ + default: + ZCRYPT_DBF(DBF_DEBUG, "unknown ioctl 0x%08x\n", cmd); + return -ENOIOCTLCMD; + } +} + +#ifdef CONFIG_COMPAT +/* + * ioctl32 conversion routines + */ +struct compat_ica_rsa_modexpo { + compat_uptr_t inputdata; + unsigned int inputdatalength; + compat_uptr_t outputdata; + unsigned int outputdatalength; + compat_uptr_t b_key; + compat_uptr_t n_modulus; +}; + +static long trans_modexpo32(struct file *filp, unsigned int cmd, + unsigned long arg) +{ + struct compat_ica_rsa_modexpo __user *umex32 = compat_ptr(arg); + struct compat_ica_rsa_modexpo mex32; + struct ica_rsa_modexpo mex64; + long rc; + + if (copy_from_user(&mex32, umex32, sizeof(mex32))) + return -EFAULT; + mex64.inputdata = compat_ptr(mex32.inputdata); + mex64.inputdatalength = mex32.inputdatalength; + mex64.outputdata = compat_ptr(mex32.outputdata); + mex64.outputdatalength = mex32.outputdatalength; + mex64.b_key = compat_ptr(mex32.b_key); + mex64.n_modulus = compat_ptr(mex32.n_modulus); + do { + rc = zcrypt_rsa_modexpo(&mex64); + } while (rc == -EAGAIN); + /* on failure: retry once again after a requested rescan */ + if ((rc == -ENODEV) && (zcrypt_process_rescan())) + do { + rc = zcrypt_rsa_modexpo(&mex64); + } while (rc == -EAGAIN); + if (rc) + return rc; + return put_user(mex64.outputdatalength, + &umex32->outputdatalength); +} + +struct compat_ica_rsa_modexpo_crt { + compat_uptr_t inputdata; + unsigned int inputdatalength; + compat_uptr_t outputdata; + unsigned int outputdatalength; + compat_uptr_t bp_key; + compat_uptr_t bq_key; + compat_uptr_t np_prime; + compat_uptr_t nq_prime; + compat_uptr_t u_mult_inv; +}; + +static long trans_modexpo_crt32(struct file *filp, unsigned int cmd, + unsigned long arg) +{ + struct compat_ica_rsa_modexpo_crt __user *ucrt32 = compat_ptr(arg); + struct compat_ica_rsa_modexpo_crt crt32; + struct ica_rsa_modexpo_crt crt64; + long rc; + + if (copy_from_user(&crt32, ucrt32, sizeof(crt32))) + return -EFAULT; + crt64.inputdata = compat_ptr(crt32.inputdata); + crt64.inputdatalength = crt32.inputdatalength; + crt64.outputdata = compat_ptr(crt32.outputdata); + crt64.outputdatalength = crt32.outputdatalength; + crt64.bp_key = compat_ptr(crt32.bp_key); + crt64.bq_key = compat_ptr(crt32.bq_key); + crt64.np_prime = compat_ptr(crt32.np_prime); + crt64.nq_prime = compat_ptr(crt32.nq_prime); + crt64.u_mult_inv = compat_ptr(crt32.u_mult_inv); + do { + rc = zcrypt_rsa_crt(&crt64); + } while (rc == -EAGAIN); + /* on failure: retry once again after a requested rescan */ + if ((rc == -ENODEV) && (zcrypt_process_rescan())) + do { + rc = zcrypt_rsa_crt(&crt64); + } while (rc == -EAGAIN); + if (rc) + return rc; + return put_user(crt64.outputdatalength, + &ucrt32->outputdatalength); +} + +struct compat_ica_xcRB { + unsigned short agent_ID; + unsigned int user_defined; + unsigned short request_ID; + unsigned int request_control_blk_length; + unsigned char padding1[16 - sizeof(compat_uptr_t)]; + compat_uptr_t request_control_blk_addr; + unsigned int request_data_length; + char padding2[16 - sizeof(compat_uptr_t)]; + compat_uptr_t request_data_address; + unsigned int reply_control_blk_length; + char padding3[16 - sizeof(compat_uptr_t)]; + compat_uptr_t reply_control_blk_addr; + unsigned int reply_data_length; + char padding4[16 - sizeof(compat_uptr_t)]; + compat_uptr_t reply_data_addr; + unsigned short priority_window; + unsigned int status; +} __packed; + +static long trans_xcRB32(struct file *filp, unsigned int cmd, + unsigned long arg) +{ + struct compat_ica_xcRB __user *uxcRB32 = compat_ptr(arg); + struct compat_ica_xcRB xcRB32; + struct ica_xcRB xcRB64; + long rc; + + if (copy_from_user(&xcRB32, uxcRB32, sizeof(xcRB32))) + return -EFAULT; + xcRB64.agent_ID = xcRB32.agent_ID; + xcRB64.user_defined = xcRB32.user_defined; + xcRB64.request_ID = xcRB32.request_ID; + xcRB64.request_control_blk_length = + xcRB32.request_control_blk_length; + xcRB64.request_control_blk_addr = + compat_ptr(xcRB32.request_control_blk_addr); + xcRB64.request_data_length = + xcRB32.request_data_length; + xcRB64.request_data_address = + compat_ptr(xcRB32.request_data_address); + xcRB64.reply_control_blk_length = + xcRB32.reply_control_blk_length; + xcRB64.reply_control_blk_addr = + compat_ptr(xcRB32.reply_control_blk_addr); + xcRB64.reply_data_length = xcRB32.reply_data_length; + xcRB64.reply_data_addr = + compat_ptr(xcRB32.reply_data_addr); + xcRB64.priority_window = xcRB32.priority_window; + xcRB64.status = xcRB32.status; + do { + rc = zcrypt_send_cprb(&xcRB64); + } while (rc == -EAGAIN); + /* on failure: retry once again after a requested rescan */ + if ((rc == -ENODEV) && (zcrypt_process_rescan())) + do { + rc = zcrypt_send_cprb(&xcRB64); + } while (rc == -EAGAIN); + xcRB32.reply_control_blk_length = xcRB64.reply_control_blk_length; + xcRB32.reply_data_length = xcRB64.reply_data_length; + xcRB32.status = xcRB64.status; + if (copy_to_user(uxcRB32, &xcRB32, sizeof(xcRB32))) + return -EFAULT; + return rc; +} + +static long zcrypt_compat_ioctl(struct file *filp, unsigned int cmd, + unsigned long arg) +{ + if (cmd == ICARSAMODEXPO) + return trans_modexpo32(filp, cmd, arg); + if (cmd == ICARSACRT) + return trans_modexpo_crt32(filp, cmd, arg); + if (cmd == ZSECSENDCPRB) + return trans_xcRB32(filp, cmd, arg); + return zcrypt_unlocked_ioctl(filp, cmd, arg); +} +#endif + +/* + * Misc device file operations. + */ +static const struct file_operations zcrypt_fops = { + .owner = THIS_MODULE, + .read = zcrypt_read, + .write = zcrypt_write, + .unlocked_ioctl = zcrypt_unlocked_ioctl, +#ifdef CONFIG_COMPAT + .compat_ioctl = zcrypt_compat_ioctl, +#endif + .open = zcrypt_open, + .release = zcrypt_release, + .llseek = no_llseek, +}; + +/* + * Misc device. + */ +static struct miscdevice zcrypt_misc_device = { + .minor = MISC_DYNAMIC_MINOR, + .name = "z90crypt", + .fops = &zcrypt_fops, +}; + +static int zcrypt_rng_device_count; +static u32 *zcrypt_rng_buffer; +static int zcrypt_rng_buffer_index; +static DEFINE_MUTEX(zcrypt_rng_mutex); + +static int zcrypt_rng_data_read(struct hwrng *rng, u32 *data) +{ + int rc; + + /* + * We don't need locking here because the RNG API guarantees serialized + * read method calls. + */ + if (zcrypt_rng_buffer_index == 0) { + rc = zcrypt_rng((char *) zcrypt_rng_buffer); + /* on failure: retry once again after a requested rescan */ + if ((rc == -ENODEV) && (zcrypt_process_rescan())) + rc = zcrypt_rng((char *) zcrypt_rng_buffer); + if (rc < 0) + return -EIO; + zcrypt_rng_buffer_index = rc / sizeof(*data); + } + *data = zcrypt_rng_buffer[--zcrypt_rng_buffer_index]; + return sizeof(*data); +} + +static struct hwrng zcrypt_rng_dev = { + .name = "zcrypt", + .data_read = zcrypt_rng_data_read, + .quality = 990, +}; + +int zcrypt_rng_device_add(void) +{ + int rc = 0; + + mutex_lock(&zcrypt_rng_mutex); + if (zcrypt_rng_device_count == 0) { + zcrypt_rng_buffer = (u32 *) get_zeroed_page(GFP_KERNEL); + if (!zcrypt_rng_buffer) { + rc = -ENOMEM; + goto out; + } + zcrypt_rng_buffer_index = 0; + if (!zcrypt_hwrng_seed) + zcrypt_rng_dev.quality = 0; + rc = hwrng_register(&zcrypt_rng_dev); + if (rc) + goto out_free; + zcrypt_rng_device_count = 1; + } else + zcrypt_rng_device_count++; + mutex_unlock(&zcrypt_rng_mutex); + return 0; + +out_free: + free_page((unsigned long) zcrypt_rng_buffer); +out: + mutex_unlock(&zcrypt_rng_mutex); + return rc; +} + +void zcrypt_rng_device_remove(void) +{ + mutex_lock(&zcrypt_rng_mutex); + zcrypt_rng_device_count--; + if (zcrypt_rng_device_count == 0) { + hwrng_unregister(&zcrypt_rng_dev); + free_page((unsigned long) zcrypt_rng_buffer); + } + mutex_unlock(&zcrypt_rng_mutex); +} + +int __init zcrypt_debug_init(void) +{ + zcrypt_dbf_info = debug_register("zcrypt", 1, 1, + DBF_MAX_SPRINTF_ARGS * sizeof(long)); + debug_register_view(zcrypt_dbf_info, &debug_sprintf_view); + debug_set_level(zcrypt_dbf_info, DBF_ERR); + + return 0; +} + +void zcrypt_debug_exit(void) +{ + debug_unregister(zcrypt_dbf_info); +} + +/** + * zcrypt_api_init(): Module initialization. + * + * The module initialization code. + */ +int __init zcrypt_api_init(void) +{ + int rc; + + rc = zcrypt_debug_init(); + if (rc) + goto out; + + /* Register the request sprayer. */ + rc = misc_register(&zcrypt_misc_device); + if (rc < 0) + goto out; + + zcrypt_msgtype6_init(); + zcrypt_msgtype50_init(); + return 0; + +out: + return rc; +} + +/** + * zcrypt_api_exit(): Module termination. + * + * The module termination code. + */ +void __exit zcrypt_api_exit(void) +{ + misc_deregister(&zcrypt_misc_device); + zcrypt_msgtype6_exit(); + zcrypt_msgtype50_exit(); + zcrypt_debug_exit(); +} + +module_init(zcrypt_api_init); +module_exit(zcrypt_api_exit); diff --git a/drivers/s390/crypto/zcrypt_api.h b/drivers/s390/crypto/zcrypt_api.h new file mode 100644 index 000000000..a848625c1 --- /dev/null +++ b/drivers/s390/crypto/zcrypt_api.h @@ -0,0 +1,160 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * zcrypt 2.1.0 + * + * Copyright IBM Corp. 2001, 2012 + * Author(s): Robert Burroughs + * Eric Rossman (edrossma@us.ibm.com) + * Cornelia Huck <cornelia.huck@de.ibm.com> + * + * Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com) + * Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com> + * Ralph Wuerthner <rwuerthn@de.ibm.com> + * MSGTYPE restruct: Holger Dengler <hd@linux.vnet.ibm.com> + */ + +#ifndef _ZCRYPT_API_H_ +#define _ZCRYPT_API_H_ + +#include <linux/atomic.h> +#include <asm/debug.h> +#include <asm/zcrypt.h> +#include "ap_bus.h" + +/** + * device type for an actual device is either PCICA, PCICC, PCIXCC_MCL2, + * PCIXCC_MCL3, CEX2C, or CEX2A + * + * NOTE: PCIXCC_MCL3 refers to a PCIXCC with May 2004 version of Licensed + * Internal Code (LIC) (EC J12220 level 29). + * PCIXCC_MCL2 refers to any LIC before this level. + */ +#define ZCRYPT_PCICA 1 +#define ZCRYPT_PCICC 2 +#define ZCRYPT_PCIXCC_MCL2 3 +#define ZCRYPT_PCIXCC_MCL3 4 +#define ZCRYPT_CEX2C 5 +#define ZCRYPT_CEX2A 6 +#define ZCRYPT_CEX3C 7 +#define ZCRYPT_CEX3A 8 +#define ZCRYPT_CEX4 10 +#define ZCRYPT_CEX5 11 +#define ZCRYPT_CEX6 12 + +/** + * Large random numbers are pulled in 4096 byte chunks from the crypto cards + * and stored in a page. Be careful when increasing this buffer due to size + * limitations for AP requests. + */ +#define ZCRYPT_RNG_BUFFER_SIZE 4096 + +/* + * Identifier for Crypto Request Performance Index + */ +enum crypto_ops { + MEX_1K, + MEX_2K, + MEX_4K, + CRT_1K, + CRT_2K, + CRT_4K, + HWRNG, + SECKEY, + NUM_OPS +}; + +struct zcrypt_queue; + +struct zcrypt_ops { + long (*rsa_modexpo)(struct zcrypt_queue *, struct ica_rsa_modexpo *); + long (*rsa_modexpo_crt)(struct zcrypt_queue *, + struct ica_rsa_modexpo_crt *); + long (*send_cprb)(struct zcrypt_queue *, struct ica_xcRB *, + struct ap_message *); + long (*send_ep11_cprb)(struct zcrypt_queue *, struct ep11_urb *, + struct ap_message *); + long (*rng)(struct zcrypt_queue *, char *, struct ap_message *); + struct list_head list; /* zcrypt ops list. */ + struct module *owner; + int variant; + char name[128]; +}; + +struct zcrypt_card { + struct list_head list; /* Device list. */ + struct list_head zqueues; /* List of zcrypt queues */ + struct kref refcount; /* device refcounting */ + struct ap_card *card; /* The "real" ap card device. */ + int online; /* User online/offline */ + + int user_space_type; /* User space device id. */ + char *type_string; /* User space device name. */ + int min_mod_size; /* Min number of bits. */ + int max_mod_size; /* Max number of bits. */ + int max_exp_bit_length; + int speed_rating[NUM_OPS]; /* Speed idx of crypto ops. */ + atomic_t load; /* Utilization of the crypto device */ + + int request_count; /* # current requests. */ +}; + +struct zcrypt_queue { + struct list_head list; /* Device list. */ + struct kref refcount; /* device refcounting */ + struct zcrypt_card *zcard; + struct zcrypt_ops *ops; /* Crypto operations. */ + struct ap_queue *queue; /* The "real" ap queue device. */ + int online; /* User online/offline */ + + atomic_t load; /* Utilization of the crypto device */ + + int request_count; /* # current requests. */ + + struct ap_message reply; /* Per-device reply structure. */ +}; + +/* transport layer rescanning */ +extern atomic_t zcrypt_rescan_req; + +extern spinlock_t zcrypt_list_lock; +extern int zcrypt_device_count; +extern struct list_head zcrypt_card_list; + +#define for_each_zcrypt_card(_zc) \ + list_for_each_entry(_zc, &zcrypt_card_list, list) + +#define for_each_zcrypt_queue(_zq, _zc) \ + list_for_each_entry(_zq, &(_zc)->zqueues, list) + +struct zcrypt_card *zcrypt_card_alloc(void); +void zcrypt_card_free(struct zcrypt_card *); +void zcrypt_card_get(struct zcrypt_card *); +int zcrypt_card_put(struct zcrypt_card *); +int zcrypt_card_register(struct zcrypt_card *); +void zcrypt_card_unregister(struct zcrypt_card *); +struct zcrypt_card *zcrypt_card_get_best(unsigned int *, + unsigned int, unsigned int); +void zcrypt_card_put_best(struct zcrypt_card *, unsigned int); + +struct zcrypt_queue *zcrypt_queue_alloc(size_t); +void zcrypt_queue_free(struct zcrypt_queue *); +void zcrypt_queue_get(struct zcrypt_queue *); +int zcrypt_queue_put(struct zcrypt_queue *); +int zcrypt_queue_register(struct zcrypt_queue *); +void zcrypt_queue_unregister(struct zcrypt_queue *); +void zcrypt_queue_force_online(struct zcrypt_queue *, int); +struct zcrypt_queue *zcrypt_queue_get_best(unsigned int, unsigned int); +void zcrypt_queue_put_best(struct zcrypt_queue *, unsigned int); + +int zcrypt_rng_device_add(void); +void zcrypt_rng_device_remove(void); + +void zcrypt_msgtype_register(struct zcrypt_ops *); +void zcrypt_msgtype_unregister(struct zcrypt_ops *); +struct zcrypt_ops *zcrypt_msgtype(unsigned char *, int); +int zcrypt_api_init(void); +void zcrypt_api_exit(void); +long zcrypt_send_cprb(struct ica_xcRB *xcRB); +void zcrypt_device_status_mask_ext(struct zcrypt_device_status_ext *devstatus); + +#endif /* _ZCRYPT_API_H_ */ diff --git a/drivers/s390/crypto/zcrypt_card.c b/drivers/s390/crypto/zcrypt_card.c new file mode 100644 index 000000000..40cd4c1c2 --- /dev/null +++ b/drivers/s390/crypto/zcrypt_card.c @@ -0,0 +1,189 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * zcrypt 2.1.0 + * + * Copyright IBM Corp. 2001, 2012 + * Author(s): Robert Burroughs + * Eric Rossman (edrossma@us.ibm.com) + * Cornelia Huck <cornelia.huck@de.ibm.com> + * + * Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com) + * Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com> + * Ralph Wuerthner <rwuerthn@de.ibm.com> + * MSGTYPE restruct: Holger Dengler <hd@linux.vnet.ibm.com> + */ + +#include <linux/module.h> +#include <linux/init.h> +#include <linux/interrupt.h> +#include <linux/miscdevice.h> +#include <linux/fs.h> +#include <linux/proc_fs.h> +#include <linux/seq_file.h> +#include <linux/compat.h> +#include <linux/slab.h> +#include <linux/atomic.h> +#include <linux/uaccess.h> +#include <linux/hw_random.h> +#include <linux/debugfs.h> +#include <asm/debug.h> + +#include "zcrypt_debug.h" +#include "zcrypt_api.h" + +#include "zcrypt_msgtype6.h" +#include "zcrypt_msgtype50.h" + +/* + * Device attributes common for all crypto card devices. + */ + +static ssize_t type_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct zcrypt_card *zc = to_ap_card(dev)->private; + + return snprintf(buf, PAGE_SIZE, "%s\n", zc->type_string); +} + +static DEVICE_ATTR_RO(type); + +static ssize_t online_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct zcrypt_card *zc = to_ap_card(dev)->private; + + return snprintf(buf, PAGE_SIZE, "%d\n", zc->online); +} + +static ssize_t online_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct zcrypt_card *zc = to_ap_card(dev)->private; + struct zcrypt_queue *zq; + int online, id; + + if (sscanf(buf, "%d\n", &online) != 1 || online < 0 || online > 1) + return -EINVAL; + + zc->online = online; + id = zc->card->id; + + ZCRYPT_DBF(DBF_INFO, "card=%02x online=%d\n", id, online); + + spin_lock(&zcrypt_list_lock); + list_for_each_entry(zq, &zc->zqueues, list) + zcrypt_queue_force_online(zq, online); + spin_unlock(&zcrypt_list_lock); + return count; +} + +static DEVICE_ATTR_RW(online); + +static ssize_t load_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct zcrypt_card *zc = to_ap_card(dev)->private; + + return snprintf(buf, PAGE_SIZE, "%d\n", atomic_read(&zc->load)); +} + +static DEVICE_ATTR_RO(load); + +static struct attribute *zcrypt_card_attrs[] = { + &dev_attr_type.attr, + &dev_attr_online.attr, + &dev_attr_load.attr, + NULL, +}; + +static const struct attribute_group zcrypt_card_attr_group = { + .attrs = zcrypt_card_attrs, +}; + +struct zcrypt_card *zcrypt_card_alloc(void) +{ + struct zcrypt_card *zc; + + zc = kzalloc(sizeof(struct zcrypt_card), GFP_KERNEL); + if (!zc) + return NULL; + INIT_LIST_HEAD(&zc->list); + INIT_LIST_HEAD(&zc->zqueues); + kref_init(&zc->refcount); + return zc; +} +EXPORT_SYMBOL(zcrypt_card_alloc); + +void zcrypt_card_free(struct zcrypt_card *zc) +{ + kfree(zc); +} +EXPORT_SYMBOL(zcrypt_card_free); + +static void zcrypt_card_release(struct kref *kref) +{ + struct zcrypt_card *zdev = + container_of(kref, struct zcrypt_card, refcount); + zcrypt_card_free(zdev); +} + +void zcrypt_card_get(struct zcrypt_card *zc) +{ + kref_get(&zc->refcount); +} +EXPORT_SYMBOL(zcrypt_card_get); + +int zcrypt_card_put(struct zcrypt_card *zc) +{ + return kref_put(&zc->refcount, zcrypt_card_release); +} +EXPORT_SYMBOL(zcrypt_card_put); + +/** + * zcrypt_card_register() - Register a crypto card device. + * @zc: Pointer to a crypto card device + * + * Register a crypto card device. Returns 0 if successful. + */ +int zcrypt_card_register(struct zcrypt_card *zc) +{ + int rc; + + rc = sysfs_create_group(&zc->card->ap_dev.device.kobj, + &zcrypt_card_attr_group); + if (rc) + return rc; + + spin_lock(&zcrypt_list_lock); + list_add_tail(&zc->list, &zcrypt_card_list); + spin_unlock(&zcrypt_list_lock); + + zc->online = 1; + + ZCRYPT_DBF(DBF_INFO, "card=%02x register online=1\n", zc->card->id); + + return rc; +} +EXPORT_SYMBOL(zcrypt_card_register); + +/** + * zcrypt_card_unregister(): Unregister a crypto card device. + * @zc: Pointer to crypto card device + * + * Unregister a crypto card device. + */ +void zcrypt_card_unregister(struct zcrypt_card *zc) +{ + ZCRYPT_DBF(DBF_INFO, "card=%02x unregister\n", zc->card->id); + + spin_lock(&zcrypt_list_lock); + list_del_init(&zc->list); + spin_unlock(&zcrypt_list_lock); + sysfs_remove_group(&zc->card->ap_dev.device.kobj, + &zcrypt_card_attr_group); +} +EXPORT_SYMBOL(zcrypt_card_unregister); diff --git a/drivers/s390/crypto/zcrypt_cca_key.h b/drivers/s390/crypto/zcrypt_cca_key.h new file mode 100644 index 000000000..e5b5c02c9 --- /dev/null +++ b/drivers/s390/crypto/zcrypt_cca_key.h @@ -0,0 +1,250 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * zcrypt 2.1.0 + * + * Copyright IBM Corp. 2001, 2006 + * Author(s): Robert Burroughs + * Eric Rossman (edrossma@us.ibm.com) + * + * Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com) + * Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com> + */ + +#ifndef _ZCRYPT_CCA_KEY_H_ +#define _ZCRYPT_CCA_KEY_H_ + +struct T6_keyBlock_hdr { + unsigned short blen; + unsigned short ulen; + unsigned short flags; +}; + +/** + * mapping for the cca private ME key token. + * Three parts of interest here: the header, the private section and + * the public section. + * + * mapping for the cca key token header + */ +struct cca_token_hdr { + unsigned char token_identifier; + unsigned char version; + unsigned short token_length; + unsigned char reserved[4]; +} __packed; + +#define CCA_TKN_HDR_ID_EXT 0x1E + +#define CCA_PVT_USAGE_ALL 0x80 + +/** + * mapping for the cca public section + * In a private key, the modulus doesn't appear in the public + * section. So, an arbitrary public exponent of 0x010001 will be + * used, for a section length of 0x0F always. + */ +struct cca_public_sec { + unsigned char section_identifier; + unsigned char version; + unsigned short section_length; + unsigned char reserved[2]; + unsigned short exponent_len; + unsigned short modulus_bit_len; + unsigned short modulus_byte_len; /* In a private key, this is 0 */ +} __packed; + +/** + * mapping for the cca private CRT key 'token' + * The first three parts (the only parts considered in this release) + * are: the header, the private section and the public section. + * The header and public section are the same as for the + * struct cca_private_ext_ME + * + * Following the structure are the quantities p, q, dp, dq, u, pad, + * and modulus, in that order, where pad_len is the modulo 8 + * complement of the residue modulo 8 of the sum of + * (p_len + q_len + dp_len + dq_len + u_len). + */ +struct cca_pvt_ext_CRT_sec { + unsigned char section_identifier; + unsigned char version; + unsigned short section_length; + unsigned char private_key_hash[20]; + unsigned char reserved1[4]; + unsigned char key_format; + unsigned char reserved2; + unsigned char key_name_hash[20]; + unsigned char key_use_flags[4]; + unsigned short p_len; + unsigned short q_len; + unsigned short dp_len; + unsigned short dq_len; + unsigned short u_len; + unsigned short mod_len; + unsigned char reserved3[4]; + unsigned short pad_len; + unsigned char reserved4[52]; + unsigned char confounder[8]; +} __packed; + +#define CCA_PVT_EXT_CRT_SEC_ID_PVT 0x08 +#define CCA_PVT_EXT_CRT_SEC_FMT_CL 0x40 + +/** + * Set up private key fields of a type6 MEX message. The _pad variant + * strips leading zeroes from the b_key. + * Note that all numerics in the key token are big-endian, + * while the entries in the key block header are little-endian. + * + * @mex: pointer to user input data + * @p: pointer to memory area for the key + * + * Returns the size of the key area or negative errno value. + */ +static inline int zcrypt_type6_mex_key_en(struct ica_rsa_modexpo *mex, void *p) +{ + static struct cca_token_hdr static_pub_hdr = { + .token_identifier = 0x1E, + }; + static struct cca_public_sec static_pub_sec = { + .section_identifier = 0x04, + }; + struct { + struct T6_keyBlock_hdr t6_hdr; + struct cca_token_hdr pubHdr; + struct cca_public_sec pubSec; + char exponent[0]; + } __packed *key = p; + unsigned char *temp; + int i; + + /* + * The inputdatalength was a selection criteria in the dispatching + * function zcrypt_rsa_modexpo(). However, do a plausibility check + * here to make sure the following copy_from_user() can't be utilized + * to compromise the system. + */ + if (WARN_ON_ONCE(mex->inputdatalength > 512)) + return -EINVAL; + + memset(key, 0, sizeof(*key)); + + key->pubHdr = static_pub_hdr; + key->pubSec = static_pub_sec; + + /* key parameter block */ + temp = key->exponent; + if (copy_from_user(temp, mex->b_key, mex->inputdatalength)) + return -EFAULT; + /* Strip leading zeroes from b_key. */ + for (i = 0; i < mex->inputdatalength; i++) + if (temp[i]) + break; + if (i >= mex->inputdatalength) + return -EINVAL; + memmove(temp, temp + i, mex->inputdatalength - i); + temp += mex->inputdatalength - i; + /* modulus */ + if (copy_from_user(temp, mex->n_modulus, mex->inputdatalength)) + return -EFAULT; + + key->pubSec.modulus_bit_len = 8 * mex->inputdatalength; + key->pubSec.modulus_byte_len = mex->inputdatalength; + key->pubSec.exponent_len = mex->inputdatalength - i; + key->pubSec.section_length = sizeof(key->pubSec) + + 2*mex->inputdatalength - i; + key->pubHdr.token_length = + key->pubSec.section_length + sizeof(key->pubHdr); + key->t6_hdr.ulen = key->pubHdr.token_length + 4; + key->t6_hdr.blen = key->pubHdr.token_length + 6; + return sizeof(*key) + 2*mex->inputdatalength - i; +} + +/** + * Set up private key fields of a type6 CRT message. + * Note that all numerics in the key token are big-endian, + * while the entries in the key block header are little-endian. + * + * @mex: pointer to user input data + * @p: pointer to memory area for the key + * + * Returns the size of the key area or -EFAULT + */ +static inline int zcrypt_type6_crt_key(struct ica_rsa_modexpo_crt *crt, void *p) +{ + static struct cca_public_sec static_cca_pub_sec = { + .section_identifier = 4, + .section_length = 0x000f, + .exponent_len = 0x0003, + }; + static char pk_exponent[3] = { 0x01, 0x00, 0x01 }; + struct { + struct T6_keyBlock_hdr t6_hdr; + struct cca_token_hdr token; + struct cca_pvt_ext_CRT_sec pvt; + char key_parts[0]; + } __packed *key = p; + struct cca_public_sec *pub; + int short_len, long_len, pad_len, key_len, size; + + /* + * The inputdatalength was a selection criteria in the dispatching + * function zcrypt_rsa_crt(). However, do a plausibility check + * here to make sure the following copy_from_user() can't be utilized + * to compromise the system. + */ + if (WARN_ON_ONCE(crt->inputdatalength > 512)) + return -EINVAL; + + memset(key, 0, sizeof(*key)); + + short_len = (crt->inputdatalength + 1) / 2; + long_len = short_len + 8; + pad_len = -(3*long_len + 2*short_len) & 7; + key_len = 3*long_len + 2*short_len + pad_len + crt->inputdatalength; + size = sizeof(*key) + key_len + sizeof(*pub) + 3; + + /* parameter block.key block */ + key->t6_hdr.blen = size; + key->t6_hdr.ulen = size - 2; + + /* key token header */ + key->token.token_identifier = CCA_TKN_HDR_ID_EXT; + key->token.token_length = size - 6; + + /* private section */ + key->pvt.section_identifier = CCA_PVT_EXT_CRT_SEC_ID_PVT; + key->pvt.section_length = sizeof(key->pvt) + key_len; + key->pvt.key_format = CCA_PVT_EXT_CRT_SEC_FMT_CL; + key->pvt.key_use_flags[0] = CCA_PVT_USAGE_ALL; + key->pvt.p_len = key->pvt.dp_len = key->pvt.u_len = long_len; + key->pvt.q_len = key->pvt.dq_len = short_len; + key->pvt.mod_len = crt->inputdatalength; + key->pvt.pad_len = pad_len; + + /* key parts */ + if (copy_from_user(key->key_parts, crt->np_prime, long_len) || + copy_from_user(key->key_parts + long_len, + crt->nq_prime, short_len) || + copy_from_user(key->key_parts + long_len + short_len, + crt->bp_key, long_len) || + copy_from_user(key->key_parts + 2*long_len + short_len, + crt->bq_key, short_len) || + copy_from_user(key->key_parts + 2*long_len + 2*short_len, + crt->u_mult_inv, long_len)) + return -EFAULT; + memset(key->key_parts + 3*long_len + 2*short_len + pad_len, + 0xff, crt->inputdatalength); + pub = (struct cca_public_sec *)(key->key_parts + key_len); + *pub = static_cca_pub_sec; + pub->modulus_bit_len = 8 * crt->inputdatalength; + /* + * In a private key, the modulus doesn't appear in the public + * section. So, an arbitrary public exponent of 0x010001 will be + * used. + */ + memcpy((char *) (pub + 1), pk_exponent, 3); + return size; +} + +#endif /* _ZCRYPT_CCA_KEY_H_ */ diff --git a/drivers/s390/crypto/zcrypt_cex2a.c b/drivers/s390/crypto/zcrypt_cex2a.c new file mode 100644 index 000000000..ff17a0027 --- /dev/null +++ b/drivers/s390/crypto/zcrypt_cex2a.c @@ -0,0 +1,238 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * zcrypt 2.1.0 + * + * Copyright IBM Corp. 2001, 2012 + * Author(s): Robert Burroughs + * Eric Rossman (edrossma@us.ibm.com) + * + * Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com) + * Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com> + * Ralph Wuerthner <rwuerthn@de.ibm.com> + * MSGTYPE restruct: Holger Dengler <hd@linux.vnet.ibm.com> + */ + +#include <linux/module.h> +#include <linux/slab.h> +#include <linux/init.h> +#include <linux/err.h> +#include <linux/atomic.h> +#include <linux/uaccess.h> +#include <linux/mod_devicetable.h> + +#include "ap_bus.h" +#include "zcrypt_api.h" +#include "zcrypt_error.h" +#include "zcrypt_cex2a.h" +#include "zcrypt_msgtype50.h" + +#define CEX2A_MIN_MOD_SIZE 1 /* 8 bits */ +#define CEX2A_MAX_MOD_SIZE 256 /* 2048 bits */ +#define CEX3A_MIN_MOD_SIZE CEX2A_MIN_MOD_SIZE +#define CEX3A_MAX_MOD_SIZE 512 /* 4096 bits */ + +#define CEX2A_MAX_MESSAGE_SIZE 0x390 /* sizeof(struct type50_crb2_msg) */ +#define CEX2A_MAX_RESPONSE_SIZE 0x110 /* max outputdatalength + type80_hdr */ + +#define CEX3A_MAX_RESPONSE_SIZE 0x210 /* 512 bit modulus + * (max outputdatalength) + + * type80_hdr*/ +#define CEX3A_MAX_MESSAGE_SIZE sizeof(struct type50_crb3_msg) + +#define CEX2A_CLEANUP_TIME (15*HZ) +#define CEX3A_CLEANUP_TIME CEX2A_CLEANUP_TIME + +MODULE_AUTHOR("IBM Corporation"); +MODULE_DESCRIPTION("CEX2A Cryptographic Coprocessor device driver, " \ + "Copyright IBM Corp. 2001, 2012"); +MODULE_LICENSE("GPL"); + +static struct ap_device_id zcrypt_cex2a_card_ids[] = { + { .dev_type = AP_DEVICE_TYPE_CEX2A, + .match_flags = AP_DEVICE_ID_MATCH_CARD_TYPE }, + { .dev_type = AP_DEVICE_TYPE_CEX3A, + .match_flags = AP_DEVICE_ID_MATCH_CARD_TYPE }, + { /* end of list */ }, +}; + +MODULE_DEVICE_TABLE(ap, zcrypt_cex2a_card_ids); + +static struct ap_device_id zcrypt_cex2a_queue_ids[] = { + { .dev_type = AP_DEVICE_TYPE_CEX2A, + .match_flags = AP_DEVICE_ID_MATCH_QUEUE_TYPE }, + { .dev_type = AP_DEVICE_TYPE_CEX3A, + .match_flags = AP_DEVICE_ID_MATCH_QUEUE_TYPE }, + { /* end of list */ }, +}; + +MODULE_DEVICE_TABLE(ap, zcrypt_cex2a_queue_ids); + +/** + * Probe function for CEX2A card devices. It always accepts the AP device + * since the bus_match already checked the card type. + * @ap_dev: pointer to the AP device. + */ +static int zcrypt_cex2a_card_probe(struct ap_device *ap_dev) +{ + /* + * Normalized speed ratings per crypto adapter + * MEX_1k, MEX_2k, MEX_4k, CRT_1k, CRT_2k, CRT_4k, RNG, SECKEY + */ + static const int CEX2A_SPEED_IDX[] = { + 800, 1000, 2000, 900, 1200, 2400, 0, 0}; + static const int CEX3A_SPEED_IDX[] = { + 400, 500, 1000, 450, 550, 1200, 0, 0}; + + struct ap_card *ac = to_ap_card(&ap_dev->device); + struct zcrypt_card *zc; + int rc = 0; + + zc = zcrypt_card_alloc(); + if (!zc) + return -ENOMEM; + zc->card = ac; + ac->private = zc; + + if (ac->ap_dev.device_type == AP_DEVICE_TYPE_CEX2A) { + zc->min_mod_size = CEX2A_MIN_MOD_SIZE; + zc->max_mod_size = CEX2A_MAX_MOD_SIZE; + memcpy(zc->speed_rating, CEX2A_SPEED_IDX, + sizeof(CEX2A_SPEED_IDX)); + zc->max_exp_bit_length = CEX2A_MAX_MOD_SIZE; + zc->type_string = "CEX2A"; + zc->user_space_type = ZCRYPT_CEX2A; + } else if (ac->ap_dev.device_type == AP_DEVICE_TYPE_CEX3A) { + zc->min_mod_size = CEX2A_MIN_MOD_SIZE; + zc->max_mod_size = CEX2A_MAX_MOD_SIZE; + zc->max_exp_bit_length = CEX2A_MAX_MOD_SIZE; + if (ap_test_bit(&ac->functions, AP_FUNC_MEX4K) && + ap_test_bit(&ac->functions, AP_FUNC_CRT4K)) { + zc->max_mod_size = CEX3A_MAX_MOD_SIZE; + zc->max_exp_bit_length = CEX3A_MAX_MOD_SIZE; + } + memcpy(zc->speed_rating, CEX3A_SPEED_IDX, + sizeof(CEX3A_SPEED_IDX)); + zc->type_string = "CEX3A"; + zc->user_space_type = ZCRYPT_CEX3A; + } else { + zcrypt_card_free(zc); + return -ENODEV; + } + zc->online = 1; + + rc = zcrypt_card_register(zc); + if (rc) { + ac->private = NULL; + zcrypt_card_free(zc); + } + + return rc; +} + +/** + * This is called to remove the CEX2A card driver information + * if an AP card device is removed. + */ +static void zcrypt_cex2a_card_remove(struct ap_device *ap_dev) +{ + struct zcrypt_card *zc = to_ap_card(&ap_dev->device)->private; + + if (zc) + zcrypt_card_unregister(zc); +} + +static struct ap_driver zcrypt_cex2a_card_driver = { + .probe = zcrypt_cex2a_card_probe, + .remove = zcrypt_cex2a_card_remove, + .ids = zcrypt_cex2a_card_ids, + .flags = AP_DRIVER_FLAG_DEFAULT, +}; + +/** + * Probe function for CEX2A queue devices. It always accepts the AP device + * since the bus_match already checked the queue type. + * @ap_dev: pointer to the AP device. + */ +static int zcrypt_cex2a_queue_probe(struct ap_device *ap_dev) +{ + struct ap_queue *aq = to_ap_queue(&ap_dev->device); + struct zcrypt_queue *zq = NULL; + int rc; + + switch (ap_dev->device_type) { + case AP_DEVICE_TYPE_CEX2A: + zq = zcrypt_queue_alloc(CEX2A_MAX_RESPONSE_SIZE); + if (!zq) + return -ENOMEM; + break; + case AP_DEVICE_TYPE_CEX3A: + zq = zcrypt_queue_alloc(CEX3A_MAX_RESPONSE_SIZE); + if (!zq) + return -ENOMEM; + break; + } + if (!zq) + return -ENODEV; + zq->ops = zcrypt_msgtype(MSGTYPE50_NAME, MSGTYPE50_VARIANT_DEFAULT); + zq->queue = aq; + zq->online = 1; + atomic_set(&zq->load, 0); + ap_queue_init_reply(aq, &zq->reply); + aq->request_timeout = CEX2A_CLEANUP_TIME, + aq->private = zq; + rc = zcrypt_queue_register(zq); + if (rc) { + aq->private = NULL; + zcrypt_queue_free(zq); + } + + return rc; +} + +/** + * This is called to remove the CEX2A queue driver information + * if an AP queue device is removed. + */ +static void zcrypt_cex2a_queue_remove(struct ap_device *ap_dev) +{ + struct ap_queue *aq = to_ap_queue(&ap_dev->device); + struct zcrypt_queue *zq = aq->private; + + if (zq) + zcrypt_queue_unregister(zq); +} + +static struct ap_driver zcrypt_cex2a_queue_driver = { + .probe = zcrypt_cex2a_queue_probe, + .remove = zcrypt_cex2a_queue_remove, + .suspend = ap_queue_suspend, + .resume = ap_queue_resume, + .ids = zcrypt_cex2a_queue_ids, + .flags = AP_DRIVER_FLAG_DEFAULT, +}; + +int __init zcrypt_cex2a_init(void) +{ + int rc; + + rc = ap_driver_register(&zcrypt_cex2a_card_driver, + THIS_MODULE, "cex2acard"); + if (rc) + return rc; + + rc = ap_driver_register(&zcrypt_cex2a_queue_driver, + THIS_MODULE, "cex2aqueue"); + if (rc) + ap_driver_unregister(&zcrypt_cex2a_card_driver); + + return rc; +} + +void __exit zcrypt_cex2a_exit(void) +{ + ap_driver_unregister(&zcrypt_cex2a_queue_driver); + ap_driver_unregister(&zcrypt_cex2a_card_driver); +} + +module_init(zcrypt_cex2a_init); +module_exit(zcrypt_cex2a_exit); diff --git a/drivers/s390/crypto/zcrypt_cex2a.h b/drivers/s390/crypto/zcrypt_cex2a.h new file mode 100644 index 000000000..66d58bc87 --- /dev/null +++ b/drivers/s390/crypto/zcrypt_cex2a.h @@ -0,0 +1,136 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * zcrypt 2.1.0 + * + * Copyright IBM Corp. 2001, 2006 + * Author(s): Robert Burroughs + * Eric Rossman (edrossma@us.ibm.com) + * + * Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com) + * Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com> + */ + +#ifndef _ZCRYPT_CEX2A_H_ +#define _ZCRYPT_CEX2A_H_ + +/** + * The type 50 message family is associated with a CEX2A card. + * + * The four members of the family are described below. + * + * Note that all unsigned char arrays are right-justified and left-padded + * with zeroes. + * + * Note that all reserved fields must be zeroes. + */ +struct type50_hdr { + unsigned char reserved1; + unsigned char msg_type_code; /* 0x50 */ + unsigned short msg_len; + unsigned char reserved2; + unsigned char ignored; + unsigned short reserved3; +} __packed; + +#define TYPE50_TYPE_CODE 0x50 + +#define TYPE50_MEB1_FMT 0x0001 +#define TYPE50_MEB2_FMT 0x0002 +#define TYPE50_MEB3_FMT 0x0003 +#define TYPE50_CRB1_FMT 0x0011 +#define TYPE50_CRB2_FMT 0x0012 +#define TYPE50_CRB3_FMT 0x0013 + +/* Mod-Exp, with a small modulus */ +struct type50_meb1_msg { + struct type50_hdr header; + unsigned short keyblock_type; /* 0x0001 */ + unsigned char reserved[6]; + unsigned char exponent[128]; + unsigned char modulus[128]; + unsigned char message[128]; +} __packed; + +/* Mod-Exp, with a large modulus */ +struct type50_meb2_msg { + struct type50_hdr header; + unsigned short keyblock_type; /* 0x0002 */ + unsigned char reserved[6]; + unsigned char exponent[256]; + unsigned char modulus[256]; + unsigned char message[256]; +} __packed; + +/* Mod-Exp, with a larger modulus */ +struct type50_meb3_msg { + struct type50_hdr header; + unsigned short keyblock_type; /* 0x0003 */ + unsigned char reserved[6]; + unsigned char exponent[512]; + unsigned char modulus[512]; + unsigned char message[512]; +} __packed; + +/* CRT, with a small modulus */ +struct type50_crb1_msg { + struct type50_hdr header; + unsigned short keyblock_type; /* 0x0011 */ + unsigned char reserved[6]; + unsigned char p[64]; + unsigned char q[64]; + unsigned char dp[64]; + unsigned char dq[64]; + unsigned char u[64]; + unsigned char message[128]; +} __packed; + +/* CRT, with a large modulus */ +struct type50_crb2_msg { + struct type50_hdr header; + unsigned short keyblock_type; /* 0x0012 */ + unsigned char reserved[6]; + unsigned char p[128]; + unsigned char q[128]; + unsigned char dp[128]; + unsigned char dq[128]; + unsigned char u[128]; + unsigned char message[256]; +} __packed; + +/* CRT, with a larger modulus */ +struct type50_crb3_msg { + struct type50_hdr header; + unsigned short keyblock_type; /* 0x0013 */ + unsigned char reserved[6]; + unsigned char p[256]; + unsigned char q[256]; + unsigned char dp[256]; + unsigned char dq[256]; + unsigned char u[256]; + unsigned char message[512]; +} __packed; + +/** + * The type 80 response family is associated with a CEX2A card. + * + * Note that all unsigned char arrays are right-justified and left-padded + * with zeroes. + * + * Note that all reserved fields must be zeroes. + */ + +#define TYPE80_RSP_CODE 0x80 + +struct type80_hdr { + unsigned char reserved1; + unsigned char type; /* 0x80 */ + unsigned short len; + unsigned char code; /* 0x00 */ + unsigned char reserved2[3]; + unsigned char reserved3[8]; +} __packed; + +int zcrypt_cex2a_init(void); +void zcrypt_cex2a_exit(void); + +#endif /* _ZCRYPT_CEX2A_H_ */ diff --git a/drivers/s390/crypto/zcrypt_cex4.c b/drivers/s390/crypto/zcrypt_cex4.c new file mode 100644 index 000000000..2a42e5962 --- /dev/null +++ b/drivers/s390/crypto/zcrypt_cex4.c @@ -0,0 +1,313 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright IBM Corp. 2012 + * Author(s): Holger Dengler <hd@linux.vnet.ibm.com> + */ + +#include <linux/module.h> +#include <linux/slab.h> +#include <linux/init.h> +#include <linux/err.h> +#include <linux/atomic.h> +#include <linux/uaccess.h> +#include <linux/mod_devicetable.h> + +#include "ap_bus.h" +#include "zcrypt_api.h" +#include "zcrypt_msgtype6.h" +#include "zcrypt_msgtype50.h" +#include "zcrypt_error.h" +#include "zcrypt_cex4.h" + +#define CEX4A_MIN_MOD_SIZE 1 /* 8 bits */ +#define CEX4A_MAX_MOD_SIZE_2K 256 /* 2048 bits */ +#define CEX4A_MAX_MOD_SIZE_4K 512 /* 4096 bits */ + +#define CEX4C_MIN_MOD_SIZE 16 /* 256 bits */ +#define CEX4C_MAX_MOD_SIZE 512 /* 4096 bits */ + +#define CEX4A_MAX_MESSAGE_SIZE MSGTYPE50_CRB3_MAX_MSG_SIZE +#define CEX4C_MAX_MESSAGE_SIZE MSGTYPE06_MAX_MSG_SIZE + +/* Waiting time for requests to be processed. + * Currently there are some types of request which are not deterministic. + * But the maximum time limit managed by the stomper code is set to 60sec. + * Hence we have to wait at least that time period. + */ +#define CEX4_CLEANUP_TIME (900*HZ) + +MODULE_AUTHOR("IBM Corporation"); +MODULE_DESCRIPTION("CEX4 Cryptographic Card device driver, " \ + "Copyright IBM Corp. 2012"); +MODULE_LICENSE("GPL"); + +static struct ap_device_id zcrypt_cex4_card_ids[] = { + { .dev_type = AP_DEVICE_TYPE_CEX4, + .match_flags = AP_DEVICE_ID_MATCH_CARD_TYPE }, + { .dev_type = AP_DEVICE_TYPE_CEX5, + .match_flags = AP_DEVICE_ID_MATCH_CARD_TYPE }, + { .dev_type = AP_DEVICE_TYPE_CEX6, + .match_flags = AP_DEVICE_ID_MATCH_CARD_TYPE }, + { /* end of list */ }, +}; + +MODULE_DEVICE_TABLE(ap, zcrypt_cex4_card_ids); + +static struct ap_device_id zcrypt_cex4_queue_ids[] = { + { .dev_type = AP_DEVICE_TYPE_CEX4, + .match_flags = AP_DEVICE_ID_MATCH_QUEUE_TYPE }, + { .dev_type = AP_DEVICE_TYPE_CEX5, + .match_flags = AP_DEVICE_ID_MATCH_QUEUE_TYPE }, + { .dev_type = AP_DEVICE_TYPE_CEX6, + .match_flags = AP_DEVICE_ID_MATCH_QUEUE_TYPE }, + { /* end of list */ }, +}; + +MODULE_DEVICE_TABLE(ap, zcrypt_cex4_queue_ids); + +/** + * Probe function for CEX4 card device. It always accepts the AP device + * since the bus_match already checked the hardware type. + * @ap_dev: pointer to the AP device. + */ +static int zcrypt_cex4_card_probe(struct ap_device *ap_dev) +{ + /* + * Normalized speed ratings per crypto adapter + * MEX_1k, MEX_2k, MEX_4k, CRT_1k, CRT_2k, CRT_4k, RNG, SECKEY + */ + static const int CEX4A_SPEED_IDX[] = { + 14, 19, 249, 42, 228, 1458, 0, 0}; + static const int CEX5A_SPEED_IDX[] = { + 8, 9, 20, 18, 66, 458, 0, 0}; + static const int CEX6A_SPEED_IDX[] = { + 6, 9, 20, 17, 65, 438, 0, 0}; + + static const int CEX4C_SPEED_IDX[] = { + 59, 69, 308, 83, 278, 2204, 209, 40}; + static const int CEX5C_SPEED_IDX[] = { + 24, 31, 50, 37, 90, 479, 27, 10}; + static const int CEX6C_SPEED_IDX[] = { + 16, 20, 32, 27, 77, 455, 23, 9}; + + static const int CEX4P_SPEED_IDX[] = { + 224, 313, 3560, 359, 605, 2827, 0, 50}; + static const int CEX5P_SPEED_IDX[] = { + 63, 84, 156, 83, 142, 533, 0, 10}; + static const int CEX6P_SPEED_IDX[] = { + 55, 70, 121, 73, 129, 522, 0, 9}; + + struct ap_card *ac = to_ap_card(&ap_dev->device); + struct zcrypt_card *zc; + int rc = 0; + + zc = zcrypt_card_alloc(); + if (!zc) + return -ENOMEM; + zc->card = ac; + ac->private = zc; + if (ap_test_bit(&ac->functions, AP_FUNC_ACCEL)) { + if (ac->ap_dev.device_type == AP_DEVICE_TYPE_CEX4) { + zc->type_string = "CEX4A"; + zc->user_space_type = ZCRYPT_CEX4; + memcpy(zc->speed_rating, CEX4A_SPEED_IDX, + sizeof(CEX4A_SPEED_IDX)); + } else if (ac->ap_dev.device_type == AP_DEVICE_TYPE_CEX5) { + zc->type_string = "CEX5A"; + zc->user_space_type = ZCRYPT_CEX5; + memcpy(zc->speed_rating, CEX5A_SPEED_IDX, + sizeof(CEX5A_SPEED_IDX)); + } else { + zc->type_string = "CEX6A"; + zc->user_space_type = ZCRYPT_CEX6; + memcpy(zc->speed_rating, CEX6A_SPEED_IDX, + sizeof(CEX6A_SPEED_IDX)); + } + zc->min_mod_size = CEX4A_MIN_MOD_SIZE; + if (ap_test_bit(&ac->functions, AP_FUNC_MEX4K) && + ap_test_bit(&ac->functions, AP_FUNC_CRT4K)) { + zc->max_mod_size = CEX4A_MAX_MOD_SIZE_4K; + zc->max_exp_bit_length = + CEX4A_MAX_MOD_SIZE_4K; + } else { + zc->max_mod_size = CEX4A_MAX_MOD_SIZE_2K; + zc->max_exp_bit_length = + CEX4A_MAX_MOD_SIZE_2K; + } + } else if (ap_test_bit(&ac->functions, AP_FUNC_COPRO)) { + if (ac->ap_dev.device_type == AP_DEVICE_TYPE_CEX4) { + zc->type_string = "CEX4C"; + /* wrong user space type, must be CEX4 + * just keep it for cca compatibility + */ + zc->user_space_type = ZCRYPT_CEX3C; + memcpy(zc->speed_rating, CEX4C_SPEED_IDX, + sizeof(CEX4C_SPEED_IDX)); + } else if (ac->ap_dev.device_type == AP_DEVICE_TYPE_CEX5) { + zc->type_string = "CEX5C"; + /* wrong user space type, must be CEX5 + * just keep it for cca compatibility + */ + zc->user_space_type = ZCRYPT_CEX3C; + memcpy(zc->speed_rating, CEX5C_SPEED_IDX, + sizeof(CEX5C_SPEED_IDX)); + } else { + zc->type_string = "CEX6C"; + /* wrong user space type, must be CEX6 + * just keep it for cca compatibility + */ + zc->user_space_type = ZCRYPT_CEX3C; + memcpy(zc->speed_rating, CEX6C_SPEED_IDX, + sizeof(CEX6C_SPEED_IDX)); + } + zc->min_mod_size = CEX4C_MIN_MOD_SIZE; + zc->max_mod_size = CEX4C_MAX_MOD_SIZE; + zc->max_exp_bit_length = CEX4C_MAX_MOD_SIZE; + } else if (ap_test_bit(&ac->functions, AP_FUNC_EP11)) { + if (ac->ap_dev.device_type == AP_DEVICE_TYPE_CEX4) { + zc->type_string = "CEX4P"; + zc->user_space_type = ZCRYPT_CEX4; + memcpy(zc->speed_rating, CEX4P_SPEED_IDX, + sizeof(CEX4P_SPEED_IDX)); + } else if (ac->ap_dev.device_type == AP_DEVICE_TYPE_CEX5) { + zc->type_string = "CEX5P"; + zc->user_space_type = ZCRYPT_CEX5; + memcpy(zc->speed_rating, CEX5P_SPEED_IDX, + sizeof(CEX5P_SPEED_IDX)); + } else { + zc->type_string = "CEX6P"; + zc->user_space_type = ZCRYPT_CEX6; + memcpy(zc->speed_rating, CEX6P_SPEED_IDX, + sizeof(CEX6P_SPEED_IDX)); + } + zc->min_mod_size = CEX4C_MIN_MOD_SIZE; + zc->max_mod_size = CEX4C_MAX_MOD_SIZE; + zc->max_exp_bit_length = CEX4C_MAX_MOD_SIZE; + } else { + zcrypt_card_free(zc); + return -ENODEV; + } + zc->online = 1; + + rc = zcrypt_card_register(zc); + if (rc) { + ac->private = NULL; + zcrypt_card_free(zc); + } + + return rc; +} + +/** + * This is called to remove the CEX4 card driver information + * if an AP card device is removed. + */ +static void zcrypt_cex4_card_remove(struct ap_device *ap_dev) +{ + struct zcrypt_card *zc = to_ap_card(&ap_dev->device)->private; + + if (zc) + zcrypt_card_unregister(zc); +} + +static struct ap_driver zcrypt_cex4_card_driver = { + .probe = zcrypt_cex4_card_probe, + .remove = zcrypt_cex4_card_remove, + .ids = zcrypt_cex4_card_ids, + .flags = AP_DRIVER_FLAG_DEFAULT, +}; + +/** + * Probe function for CEX4 queue device. It always accepts the AP device + * since the bus_match already checked the hardware type. + * @ap_dev: pointer to the AP device. + */ +static int zcrypt_cex4_queue_probe(struct ap_device *ap_dev) +{ + struct ap_queue *aq = to_ap_queue(&ap_dev->device); + struct zcrypt_queue *zq; + int rc; + + if (ap_test_bit(&aq->card->functions, AP_FUNC_ACCEL)) { + zq = zcrypt_queue_alloc(CEX4A_MAX_MESSAGE_SIZE); + if (!zq) + return -ENOMEM; + zq->ops = zcrypt_msgtype(MSGTYPE50_NAME, + MSGTYPE50_VARIANT_DEFAULT); + } else if (ap_test_bit(&aq->card->functions, AP_FUNC_COPRO)) { + zq = zcrypt_queue_alloc(CEX4C_MAX_MESSAGE_SIZE); + if (!zq) + return -ENOMEM; + zq->ops = zcrypt_msgtype(MSGTYPE06_NAME, + MSGTYPE06_VARIANT_DEFAULT); + } else if (ap_test_bit(&aq->card->functions, AP_FUNC_EP11)) { + zq = zcrypt_queue_alloc(CEX4C_MAX_MESSAGE_SIZE); + if (!zq) + return -ENOMEM; + zq->ops = zcrypt_msgtype(MSGTYPE06_NAME, + MSGTYPE06_VARIANT_EP11); + } else { + return -ENODEV; + } + zq->queue = aq; + zq->online = 1; + atomic_set(&zq->load, 0); + ap_queue_init_reply(aq, &zq->reply); + aq->request_timeout = CEX4_CLEANUP_TIME, + aq->private = zq; + rc = zcrypt_queue_register(zq); + if (rc) { + aq->private = NULL; + zcrypt_queue_free(zq); + } + + return rc; +} + +/** + * This is called to remove the CEX4 queue driver information + * if an AP queue device is removed. + */ +static void zcrypt_cex4_queue_remove(struct ap_device *ap_dev) +{ + struct ap_queue *aq = to_ap_queue(&ap_dev->device); + struct zcrypt_queue *zq = aq->private; + + if (zq) + zcrypt_queue_unregister(zq); +} + +static struct ap_driver zcrypt_cex4_queue_driver = { + .probe = zcrypt_cex4_queue_probe, + .remove = zcrypt_cex4_queue_remove, + .suspend = ap_queue_suspend, + .resume = ap_queue_resume, + .ids = zcrypt_cex4_queue_ids, + .flags = AP_DRIVER_FLAG_DEFAULT, +}; + +int __init zcrypt_cex4_init(void) +{ + int rc; + + rc = ap_driver_register(&zcrypt_cex4_card_driver, + THIS_MODULE, "cex4card"); + if (rc) + return rc; + + rc = ap_driver_register(&zcrypt_cex4_queue_driver, + THIS_MODULE, "cex4queue"); + if (rc) + ap_driver_unregister(&zcrypt_cex4_card_driver); + + return rc; +} + +void __exit zcrypt_cex4_exit(void) +{ + ap_driver_unregister(&zcrypt_cex4_queue_driver); + ap_driver_unregister(&zcrypt_cex4_card_driver); +} + +module_init(zcrypt_cex4_init); +module_exit(zcrypt_cex4_exit); diff --git a/drivers/s390/crypto/zcrypt_cex4.h b/drivers/s390/crypto/zcrypt_cex4.h new file mode 100644 index 000000000..748390a37 --- /dev/null +++ b/drivers/s390/crypto/zcrypt_cex4.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright IBM Corp. 2012 + * Author(s): Holger Dengler <hd@linux.vnet.ibm.com> + */ + +#ifndef _ZCRYPT_CEX4_H_ +#define _ZCRYPT_CEX4_H_ + +int zcrypt_cex4_init(void); +void zcrypt_cex4_exit(void); + +#endif /* _ZCRYPT_CEX4_H_ */ diff --git a/drivers/s390/crypto/zcrypt_debug.h b/drivers/s390/crypto/zcrypt_debug.h new file mode 100644 index 000000000..241dbb5f7 --- /dev/null +++ b/drivers/s390/crypto/zcrypt_debug.h @@ -0,0 +1,30 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright IBM Corp. 2016 + * Author(s): Holger Dengler (hd@linux.vnet.ibm.com) + * Harald Freudenberger <freude@de.ibm.com> + */ +#ifndef ZCRYPT_DEBUG_H +#define ZCRYPT_DEBUG_H + +#include <asm/debug.h> + +#define DBF_ERR 3 /* error conditions */ +#define DBF_WARN 4 /* warning conditions */ +#define DBF_INFO 5 /* informational */ +#define DBF_DEBUG 6 /* for debugging only */ + +#define RC2ERR(rc) ((rc) ? DBF_ERR : DBF_INFO) +#define RC2WARN(rc) ((rc) ? DBF_WARN : DBF_INFO) + +#define DBF_MAX_SPRINTF_ARGS 5 + +#define ZCRYPT_DBF(...) \ + debug_sprintf_event(zcrypt_dbf_info, ##__VA_ARGS__) + +extern debug_info_t *zcrypt_dbf_info; + +int zcrypt_debug_init(void); +void zcrypt_debug_exit(void); + +#endif /* ZCRYPT_DEBUG_H */ diff --git a/drivers/s390/crypto/zcrypt_error.h b/drivers/s390/crypto/zcrypt_error.h new file mode 100644 index 000000000..2126f4cc6 --- /dev/null +++ b/drivers/s390/crypto/zcrypt_error.h @@ -0,0 +1,143 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * zcrypt 2.1.0 + * + * Copyright IBM Corp. 2001, 2006 + * Author(s): Robert Burroughs + * Eric Rossman (edrossma@us.ibm.com) + * + * Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com) + * Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com> + */ + +#ifndef _ZCRYPT_ERROR_H_ +#define _ZCRYPT_ERROR_H_ + +#include <linux/atomic.h> +#include "zcrypt_debug.h" +#include "zcrypt_api.h" + +/** + * Reply Messages + * + * Error reply messages are of two types: + * 82: Error (see below) + * 88: Error (see below) + * Both type 82 and type 88 have the same structure in the header. + * + * Request reply messages are of three known types: + * 80: Reply from a Type 50 Request (see CEX2A-RELATED STRUCTS) + * 84: Reply from a Type 4 Request (see PCICA-RELATED STRUCTS) + * 86: Reply from a Type 6 Request (see PCICC/PCIXCC/CEX2C-RELATED STRUCTS) + * + */ +struct error_hdr { + unsigned char reserved1; /* 0x00 */ + unsigned char type; /* 0x82 or 0x88 */ + unsigned char reserved2[2]; /* 0x0000 */ + unsigned char reply_code; /* reply code */ + unsigned char reserved3[3]; /* 0x000000 */ +}; + +#define TYPE82_RSP_CODE 0x82 +#define TYPE88_RSP_CODE 0x88 + +#define REP82_ERROR_MACHINE_FAILURE 0x10 +#define REP82_ERROR_PREEMPT_FAILURE 0x12 +#define REP82_ERROR_CHECKPT_FAILURE 0x14 +#define REP82_ERROR_MESSAGE_TYPE 0x20 +#define REP82_ERROR_INVALID_COMM_CD 0x21 /* Type 84 */ +#define REP82_ERROR_INVALID_MSG_LEN 0x23 +#define REP82_ERROR_RESERVD_FIELD 0x24 /* was 0x50 */ +#define REP82_ERROR_FORMAT_FIELD 0x29 +#define REP82_ERROR_INVALID_COMMAND 0x30 +#define REP82_ERROR_MALFORMED_MSG 0x40 +#define REP82_ERROR_INVALID_SPECIAL_CMD 0x41 +#define REP82_ERROR_INVALID_DOMAIN_PRECHECK 0x42 +#define REP82_ERROR_RESERVED_FIELDO 0x50 /* old value */ +#define REP82_ERROR_WORD_ALIGNMENT 0x60 +#define REP82_ERROR_MESSAGE_LENGTH 0x80 +#define REP82_ERROR_OPERAND_INVALID 0x82 +#define REP82_ERROR_OPERAND_SIZE 0x84 +#define REP82_ERROR_EVEN_MOD_IN_OPND 0x85 +#define REP82_ERROR_RESERVED_FIELD 0x88 +#define REP82_ERROR_INVALID_DOMAIN_PENDING 0x8A +#define REP82_ERROR_FILTERED_BY_HYPERVISOR 0x8B +#define REP82_ERROR_TRANSPORT_FAIL 0x90 +#define REP82_ERROR_PACKET_TRUNCATED 0xA0 +#define REP82_ERROR_ZERO_BUFFER_LEN 0xB0 + +#define REP88_ERROR_MODULE_FAILURE 0x10 + +#define REP88_ERROR_MESSAGE_TYPE 0x20 +#define REP88_ERROR_MESSAGE_MALFORMD 0x22 +#define REP88_ERROR_MESSAGE_LENGTH 0x23 +#define REP88_ERROR_RESERVED_FIELD 0x24 +#define REP88_ERROR_KEY_TYPE 0x34 +#define REP88_ERROR_INVALID_KEY 0x82 /* CEX2A */ +#define REP88_ERROR_OPERAND 0x84 /* CEX2A */ +#define REP88_ERROR_OPERAND_EVEN_MOD 0x85 /* CEX2A */ + +static inline int convert_error(struct zcrypt_queue *zq, + struct ap_message *reply) +{ + struct error_hdr *ehdr = reply->message; + int card = AP_QID_CARD(zq->queue->qid); + int queue = AP_QID_QUEUE(zq->queue->qid); + + switch (ehdr->reply_code) { + case REP82_ERROR_OPERAND_INVALID: + case REP82_ERROR_OPERAND_SIZE: + case REP82_ERROR_EVEN_MOD_IN_OPND: + case REP88_ERROR_MESSAGE_MALFORMD: + case REP82_ERROR_INVALID_DOMAIN_PRECHECK: + case REP82_ERROR_INVALID_DOMAIN_PENDING: + case REP82_ERROR_INVALID_SPECIAL_CMD: + case REP82_ERROR_FILTERED_BY_HYPERVISOR: + // REP88_ERROR_INVALID_KEY // '82' CEX2A + // REP88_ERROR_OPERAND // '84' CEX2A + // REP88_ERROR_OPERAND_EVEN_MOD // '85' CEX2A + /* Invalid input data. */ + ZCRYPT_DBF(DBF_WARN, + "device=%02x.%04x reply=0x%02x => rc=EINVAL\n", + card, queue, ehdr->reply_code); + return -EINVAL; + case REP82_ERROR_MESSAGE_TYPE: + // REP88_ERROR_MESSAGE_TYPE // '20' CEX2A + /* + * To sent a message of the wrong type is a bug in the + * device driver. Send error msg, disable the device + * and then repeat the request. + */ + atomic_set(&zcrypt_rescan_req, 1); + zq->online = 0; + pr_err("Cryptographic device %02x.%04x failed and was set offline\n", + card, queue); + ZCRYPT_DBF(DBF_ERR, + "device=%02x.%04x reply=0x%02x => online=0 rc=EAGAIN\n", + card, queue, ehdr->reply_code); + return -EAGAIN; + case REP82_ERROR_TRANSPORT_FAIL: + case REP82_ERROR_MACHINE_FAILURE: + // REP88_ERROR_MODULE_FAILURE // '10' CEX2A + /* If a card fails disable it and repeat the request. */ + atomic_set(&zcrypt_rescan_req, 1); + zq->online = 0; + pr_err("Cryptographic device %02x.%04x failed and was set offline\n", + card, queue); + ZCRYPT_DBF(DBF_ERR, + "device=%02x.%04x reply=0x%02x => online=0 rc=EAGAIN\n", + card, queue, ehdr->reply_code); + return -EAGAIN; + default: + zq->online = 0; + pr_err("Cryptographic device %02x.%04x failed and was set offline\n", + card, queue); + ZCRYPT_DBF(DBF_ERR, + "device=%02x.%04x reply=0x%02x => online=0 rc=EAGAIN\n", + card, queue, ehdr->reply_code); + return -EAGAIN; /* repeat the request on a different device. */ + } +} + +#endif /* _ZCRYPT_ERROR_H_ */ diff --git a/drivers/s390/crypto/zcrypt_msgtype50.c b/drivers/s390/crypto/zcrypt_msgtype50.c new file mode 100644 index 000000000..f159662c9 --- /dev/null +++ b/drivers/s390/crypto/zcrypt_msgtype50.c @@ -0,0 +1,560 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * zcrypt 2.1.0 + * + * Copyright IBM Corp. 2001, 2012 + * Author(s): Robert Burroughs + * Eric Rossman (edrossma@us.ibm.com) + * + * Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com) + * Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com> + * Ralph Wuerthner <rwuerthn@de.ibm.com> + * MSGTYPE restruct: Holger Dengler <hd@linux.vnet.ibm.com> + */ + +#define KMSG_COMPONENT "zcrypt" +#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt + +#include <linux/module.h> +#include <linux/slab.h> +#include <linux/init.h> +#include <linux/err.h> +#include <linux/atomic.h> +#include <linux/uaccess.h> + +#include "ap_bus.h" +#include "zcrypt_api.h" +#include "zcrypt_error.h" +#include "zcrypt_msgtype50.h" + +/* 4096 bits */ +#define CEX3A_MAX_MOD_SIZE 512 + +/* max outputdatalength + type80_hdr */ +#define CEX2A_MAX_RESPONSE_SIZE 0x110 + +/* 512 bit modulus, (max outputdatalength) + type80_hdr */ +#define CEX3A_MAX_RESPONSE_SIZE 0x210 + +MODULE_AUTHOR("IBM Corporation"); +MODULE_DESCRIPTION("Cryptographic Accelerator (message type 50), " \ + "Copyright IBM Corp. 2001, 2012"); +MODULE_LICENSE("GPL"); + +/** + * The type 50 message family is associated with a CEX2A card. + * + * The four members of the family are described below. + * + * Note that all unsigned char arrays are right-justified and left-padded + * with zeroes. + * + * Note that all reserved fields must be zeroes. + */ +struct type50_hdr { + unsigned char reserved1; + unsigned char msg_type_code; /* 0x50 */ + unsigned short msg_len; + unsigned char reserved2; + unsigned char ignored; + unsigned short reserved3; +} __packed; + +#define TYPE50_TYPE_CODE 0x50 + +#define TYPE50_MEB1_FMT 0x0001 +#define TYPE50_MEB2_FMT 0x0002 +#define TYPE50_MEB3_FMT 0x0003 +#define TYPE50_CRB1_FMT 0x0011 +#define TYPE50_CRB2_FMT 0x0012 +#define TYPE50_CRB3_FMT 0x0013 + +/* Mod-Exp, with a small modulus */ +struct type50_meb1_msg { + struct type50_hdr header; + unsigned short keyblock_type; /* 0x0001 */ + unsigned char reserved[6]; + unsigned char exponent[128]; + unsigned char modulus[128]; + unsigned char message[128]; +} __packed; + +/* Mod-Exp, with a large modulus */ +struct type50_meb2_msg { + struct type50_hdr header; + unsigned short keyblock_type; /* 0x0002 */ + unsigned char reserved[6]; + unsigned char exponent[256]; + unsigned char modulus[256]; + unsigned char message[256]; +} __packed; + +/* Mod-Exp, with a larger modulus */ +struct type50_meb3_msg { + struct type50_hdr header; + unsigned short keyblock_type; /* 0x0003 */ + unsigned char reserved[6]; + unsigned char exponent[512]; + unsigned char modulus[512]; + unsigned char message[512]; +} __packed; + +/* CRT, with a small modulus */ +struct type50_crb1_msg { + struct type50_hdr header; + unsigned short keyblock_type; /* 0x0011 */ + unsigned char reserved[6]; + unsigned char p[64]; + unsigned char q[64]; + unsigned char dp[64]; + unsigned char dq[64]; + unsigned char u[64]; + unsigned char message[128]; +} __packed; + +/* CRT, with a large modulus */ +struct type50_crb2_msg { + struct type50_hdr header; + unsigned short keyblock_type; /* 0x0012 */ + unsigned char reserved[6]; + unsigned char p[128]; + unsigned char q[128]; + unsigned char dp[128]; + unsigned char dq[128]; + unsigned char u[128]; + unsigned char message[256]; +} __packed; + +/* CRT, with a larger modulus */ +struct type50_crb3_msg { + struct type50_hdr header; + unsigned short keyblock_type; /* 0x0013 */ + unsigned char reserved[6]; + unsigned char p[256]; + unsigned char q[256]; + unsigned char dp[256]; + unsigned char dq[256]; + unsigned char u[256]; + unsigned char message[512]; +} __packed; + +/** + * The type 80 response family is associated with a CEX2A card. + * + * Note that all unsigned char arrays are right-justified and left-padded + * with zeroes. + * + * Note that all reserved fields must be zeroes. + */ + +#define TYPE80_RSP_CODE 0x80 + +struct type80_hdr { + unsigned char reserved1; + unsigned char type; /* 0x80 */ + unsigned short len; + unsigned char code; /* 0x00 */ + unsigned char reserved2[3]; + unsigned char reserved3[8]; +} __packed; + +unsigned int get_rsa_modex_fc(struct ica_rsa_modexpo *mex, int *fcode) +{ + + if (!mex->inputdatalength) + return -EINVAL; + + if (mex->inputdatalength <= 128) /* 1024 bit */ + *fcode = MEX_1K; + else if (mex->inputdatalength <= 256) /* 2048 bit */ + *fcode = MEX_2K; + else /* 4096 bit */ + *fcode = MEX_4K; + + return 0; +} + +unsigned int get_rsa_crt_fc(struct ica_rsa_modexpo_crt *crt, int *fcode) +{ + + if (!crt->inputdatalength) + return -EINVAL; + + if (crt->inputdatalength <= 128) /* 1024 bit */ + *fcode = CRT_1K; + else if (crt->inputdatalength <= 256) /* 2048 bit */ + *fcode = CRT_2K; + else /* 4096 bit */ + *fcode = CRT_4K; + + return 0; +} + +/** + * Convert a ICAMEX message to a type50 MEX message. + * + * @zq: crypto queue pointer + * @ap_msg: crypto request pointer + * @mex: pointer to user input data + * + * Returns 0 on success or -EFAULT. + */ +static int ICAMEX_msg_to_type50MEX_msg(struct zcrypt_queue *zq, + struct ap_message *ap_msg, + struct ica_rsa_modexpo *mex) +{ + unsigned char *mod, *exp, *inp; + int mod_len; + + mod_len = mex->inputdatalength; + + if (mod_len <= 128) { + struct type50_meb1_msg *meb1 = ap_msg->message; + + memset(meb1, 0, sizeof(*meb1)); + ap_msg->length = sizeof(*meb1); + meb1->header.msg_type_code = TYPE50_TYPE_CODE; + meb1->header.msg_len = sizeof(*meb1); + meb1->keyblock_type = TYPE50_MEB1_FMT; + mod = meb1->modulus + sizeof(meb1->modulus) - mod_len; + exp = meb1->exponent + sizeof(meb1->exponent) - mod_len; + inp = meb1->message + sizeof(meb1->message) - mod_len; + } else if (mod_len <= 256) { + struct type50_meb2_msg *meb2 = ap_msg->message; + + memset(meb2, 0, sizeof(*meb2)); + ap_msg->length = sizeof(*meb2); + meb2->header.msg_type_code = TYPE50_TYPE_CODE; + meb2->header.msg_len = sizeof(*meb2); + meb2->keyblock_type = TYPE50_MEB2_FMT; + mod = meb2->modulus + sizeof(meb2->modulus) - mod_len; + exp = meb2->exponent + sizeof(meb2->exponent) - mod_len; + inp = meb2->message + sizeof(meb2->message) - mod_len; + } else if (mod_len <= 512) { + struct type50_meb3_msg *meb3 = ap_msg->message; + + memset(meb3, 0, sizeof(*meb3)); + ap_msg->length = sizeof(*meb3); + meb3->header.msg_type_code = TYPE50_TYPE_CODE; + meb3->header.msg_len = sizeof(*meb3); + meb3->keyblock_type = TYPE50_MEB3_FMT; + mod = meb3->modulus + sizeof(meb3->modulus) - mod_len; + exp = meb3->exponent + sizeof(meb3->exponent) - mod_len; + inp = meb3->message + sizeof(meb3->message) - mod_len; + } else + return -EINVAL; + + if (copy_from_user(mod, mex->n_modulus, mod_len) || + copy_from_user(exp, mex->b_key, mod_len) || + copy_from_user(inp, mex->inputdata, mod_len)) + return -EFAULT; + return 0; +} + +/** + * Convert a ICACRT message to a type50 CRT message. + * + * @zq: crypto queue pointer + * @ap_msg: crypto request pointer + * @crt: pointer to user input data + * + * Returns 0 on success or -EFAULT. + */ +static int ICACRT_msg_to_type50CRT_msg(struct zcrypt_queue *zq, + struct ap_message *ap_msg, + struct ica_rsa_modexpo_crt *crt) +{ + int mod_len, short_len; + unsigned char *p, *q, *dp, *dq, *u, *inp; + + mod_len = crt->inputdatalength; + short_len = (mod_len + 1) / 2; + + /* + * CEX2A and CEX3A w/o FW update can handle requests up to + * 256 byte modulus (2k keys). + * CEX3A with FW update and CEX4A cards are able to handle + * 512 byte modulus (4k keys). + */ + if (mod_len <= 128) { /* up to 1024 bit key size */ + struct type50_crb1_msg *crb1 = ap_msg->message; + + memset(crb1, 0, sizeof(*crb1)); + ap_msg->length = sizeof(*crb1); + crb1->header.msg_type_code = TYPE50_TYPE_CODE; + crb1->header.msg_len = sizeof(*crb1); + crb1->keyblock_type = TYPE50_CRB1_FMT; + p = crb1->p + sizeof(crb1->p) - short_len; + q = crb1->q + sizeof(crb1->q) - short_len; + dp = crb1->dp + sizeof(crb1->dp) - short_len; + dq = crb1->dq + sizeof(crb1->dq) - short_len; + u = crb1->u + sizeof(crb1->u) - short_len; + inp = crb1->message + sizeof(crb1->message) - mod_len; + } else if (mod_len <= 256) { /* up to 2048 bit key size */ + struct type50_crb2_msg *crb2 = ap_msg->message; + + memset(crb2, 0, sizeof(*crb2)); + ap_msg->length = sizeof(*crb2); + crb2->header.msg_type_code = TYPE50_TYPE_CODE; + crb2->header.msg_len = sizeof(*crb2); + crb2->keyblock_type = TYPE50_CRB2_FMT; + p = crb2->p + sizeof(crb2->p) - short_len; + q = crb2->q + sizeof(crb2->q) - short_len; + dp = crb2->dp + sizeof(crb2->dp) - short_len; + dq = crb2->dq + sizeof(crb2->dq) - short_len; + u = crb2->u + sizeof(crb2->u) - short_len; + inp = crb2->message + sizeof(crb2->message) - mod_len; + } else if ((mod_len <= 512) && /* up to 4096 bit key size */ + (zq->zcard->max_mod_size == CEX3A_MAX_MOD_SIZE)) { + struct type50_crb3_msg *crb3 = ap_msg->message; + + memset(crb3, 0, sizeof(*crb3)); + ap_msg->length = sizeof(*crb3); + crb3->header.msg_type_code = TYPE50_TYPE_CODE; + crb3->header.msg_len = sizeof(*crb3); + crb3->keyblock_type = TYPE50_CRB3_FMT; + p = crb3->p + sizeof(crb3->p) - short_len; + q = crb3->q + sizeof(crb3->q) - short_len; + dp = crb3->dp + sizeof(crb3->dp) - short_len; + dq = crb3->dq + sizeof(crb3->dq) - short_len; + u = crb3->u + sizeof(crb3->u) - short_len; + inp = crb3->message + sizeof(crb3->message) - mod_len; + } else + return -EINVAL; + + /* + * correct the offset of p, bp and mult_inv according zcrypt.h + * block size right aligned (skip the first byte) + */ + if (copy_from_user(p, crt->np_prime + MSGTYPE_ADJUSTMENT, short_len) || + copy_from_user(q, crt->nq_prime, short_len) || + copy_from_user(dp, crt->bp_key + MSGTYPE_ADJUSTMENT, short_len) || + copy_from_user(dq, crt->bq_key, short_len) || + copy_from_user(u, crt->u_mult_inv + MSGTYPE_ADJUSTMENT, short_len) || + copy_from_user(inp, crt->inputdata, mod_len)) + return -EFAULT; + + return 0; +} + +/** + * Copy results from a type 80 reply message back to user space. + * + * @zq: crypto device pointer + * @reply: reply AP message. + * @data: pointer to user output data + * @length: size of user output data + * + * Returns 0 on success or -EFAULT. + */ +static int convert_type80(struct zcrypt_queue *zq, + struct ap_message *reply, + char __user *outputdata, + unsigned int outputdatalength) +{ + struct type80_hdr *t80h = reply->message; + unsigned char *data; + + if (t80h->len < sizeof(*t80h) + outputdatalength) { + /* The result is too short, the CEX2A card may not do that.. */ + zq->online = 0; + pr_err("Cryptographic device %02x.%04x failed and was set offline\n", + AP_QID_CARD(zq->queue->qid), + AP_QID_QUEUE(zq->queue->qid)); + ZCRYPT_DBF(DBF_ERR, + "device=%02x.%04x code=0x%02x => online=0 rc=EAGAIN\n", + AP_QID_CARD(zq->queue->qid), + AP_QID_QUEUE(zq->queue->qid), + t80h->code); + return -EAGAIN; /* repeat the request on a different device. */ + } + if (zq->zcard->user_space_type == ZCRYPT_CEX2A) + BUG_ON(t80h->len > CEX2A_MAX_RESPONSE_SIZE); + else + BUG_ON(t80h->len > CEX3A_MAX_RESPONSE_SIZE); + data = reply->message + t80h->len - outputdatalength; + if (copy_to_user(outputdata, data, outputdatalength)) + return -EFAULT; + return 0; +} + +static int convert_response(struct zcrypt_queue *zq, + struct ap_message *reply, + char __user *outputdata, + unsigned int outputdatalength) +{ + /* Response type byte is the second byte in the response. */ + unsigned char rtype = ((unsigned char *) reply->message)[1]; + + switch (rtype) { + case TYPE82_RSP_CODE: + case TYPE88_RSP_CODE: + return convert_error(zq, reply); + case TYPE80_RSP_CODE: + return convert_type80(zq, reply, + outputdata, outputdatalength); + default: /* Unknown response type, this should NEVER EVER happen */ + zq->online = 0; + pr_err("Cryptographic device %02x.%04x failed and was set offline\n", + AP_QID_CARD(zq->queue->qid), + AP_QID_QUEUE(zq->queue->qid)); + ZCRYPT_DBF(DBF_ERR, + "device=%02x.%04x rtype=0x%02x => online=0 rc=EAGAIN\n", + AP_QID_CARD(zq->queue->qid), + AP_QID_QUEUE(zq->queue->qid), + (unsigned int) rtype); + return -EAGAIN; /* repeat the request on a different device. */ + } +} + +/** + * This function is called from the AP bus code after a crypto request + * "msg" has finished with the reply message "reply". + * It is called from tasklet context. + * @aq: pointer to the AP device + * @msg: pointer to the AP message + * @reply: pointer to the AP reply message + */ +static void zcrypt_cex2a_receive(struct ap_queue *aq, + struct ap_message *msg, + struct ap_message *reply) +{ + static struct error_hdr error_reply = { + .type = TYPE82_RSP_CODE, + .reply_code = REP82_ERROR_MACHINE_FAILURE, + }; + struct type80_hdr *t80h; + int length; + + /* Copy the reply message to the request message buffer. */ + if (!reply) + goto out; /* ap_msg->rc indicates the error */ + t80h = reply->message; + if (t80h->type == TYPE80_RSP_CODE) { + if (aq->ap_dev.device_type == AP_DEVICE_TYPE_CEX2A) + length = min_t(int, + CEX2A_MAX_RESPONSE_SIZE, t80h->len); + else + length = min_t(int, + CEX3A_MAX_RESPONSE_SIZE, t80h->len); + memcpy(msg->message, reply->message, length); + } else + memcpy(msg->message, reply->message, sizeof(error_reply)); +out: + complete((struct completion *) msg->private); +} + +static atomic_t zcrypt_step = ATOMIC_INIT(0); + +/** + * The request distributor calls this function if it picked the CEX2A + * device to handle a modexpo request. + * @zq: pointer to zcrypt_queue structure that identifies the + * CEX2A device to the request distributor + * @mex: pointer to the modexpo request buffer + */ +static long zcrypt_cex2a_modexpo(struct zcrypt_queue *zq, + struct ica_rsa_modexpo *mex) +{ + struct ap_message ap_msg; + struct completion work; + int rc; + + ap_init_message(&ap_msg); + if (zq->zcard->user_space_type == ZCRYPT_CEX2A) + ap_msg.message = kmalloc(MSGTYPE50_CRB2_MAX_MSG_SIZE, + GFP_KERNEL); + else + ap_msg.message = kmalloc(MSGTYPE50_CRB3_MAX_MSG_SIZE, + GFP_KERNEL); + if (!ap_msg.message) + return -ENOMEM; + ap_msg.receive = zcrypt_cex2a_receive; + ap_msg.psmid = (((unsigned long long) current->pid) << 32) + + atomic_inc_return(&zcrypt_step); + ap_msg.private = &work; + rc = ICAMEX_msg_to_type50MEX_msg(zq, &ap_msg, mex); + if (rc) + goto out_free; + init_completion(&work); + ap_queue_message(zq->queue, &ap_msg); + rc = wait_for_completion_interruptible(&work); + if (rc == 0) { + rc = ap_msg.rc; + if (rc == 0) + rc = convert_response(zq, &ap_msg, mex->outputdata, + mex->outputdatalength); + } else + /* Signal pending. */ + ap_cancel_message(zq->queue, &ap_msg); +out_free: + kfree(ap_msg.message); + return rc; +} + +/** + * The request distributor calls this function if it picked the CEX2A + * device to handle a modexpo_crt request. + * @zq: pointer to zcrypt_queue structure that identifies the + * CEX2A device to the request distributor + * @crt: pointer to the modexpoc_crt request buffer + */ +static long zcrypt_cex2a_modexpo_crt(struct zcrypt_queue *zq, + struct ica_rsa_modexpo_crt *crt) +{ + struct ap_message ap_msg; + struct completion work; + int rc; + + ap_init_message(&ap_msg); + if (zq->zcard->user_space_type == ZCRYPT_CEX2A) + ap_msg.message = kmalloc(MSGTYPE50_CRB2_MAX_MSG_SIZE, + GFP_KERNEL); + else + ap_msg.message = kmalloc(MSGTYPE50_CRB3_MAX_MSG_SIZE, + GFP_KERNEL); + if (!ap_msg.message) + return -ENOMEM; + ap_msg.receive = zcrypt_cex2a_receive; + ap_msg.psmid = (((unsigned long long) current->pid) << 32) + + atomic_inc_return(&zcrypt_step); + ap_msg.private = &work; + rc = ICACRT_msg_to_type50CRT_msg(zq, &ap_msg, crt); + if (rc) + goto out_free; + init_completion(&work); + ap_queue_message(zq->queue, &ap_msg); + rc = wait_for_completion_interruptible(&work); + if (rc == 0) { + rc = ap_msg.rc; + if (rc == 0) + rc = convert_response(zq, &ap_msg, crt->outputdata, + crt->outputdatalength); + } else + /* Signal pending. */ + ap_cancel_message(zq->queue, &ap_msg); +out_free: + kfree(ap_msg.message); + return rc; +} + +/** + * The crypto operations for message type 50. + */ +static struct zcrypt_ops zcrypt_msgtype50_ops = { + .rsa_modexpo = zcrypt_cex2a_modexpo, + .rsa_modexpo_crt = zcrypt_cex2a_modexpo_crt, + .owner = THIS_MODULE, + .name = MSGTYPE50_NAME, + .variant = MSGTYPE50_VARIANT_DEFAULT, +}; + +void __init zcrypt_msgtype50_init(void) +{ + zcrypt_msgtype_register(&zcrypt_msgtype50_ops); +} + +void __exit zcrypt_msgtype50_exit(void) +{ + zcrypt_msgtype_unregister(&zcrypt_msgtype50_ops); +} diff --git a/drivers/s390/crypto/zcrypt_msgtype50.h b/drivers/s390/crypto/zcrypt_msgtype50.h new file mode 100644 index 000000000..8530f652e --- /dev/null +++ b/drivers/s390/crypto/zcrypt_msgtype50.h @@ -0,0 +1,31 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * zcrypt 2.1.0 + * + * Copyright IBM Corp. 2001, 2012 + * Author(s): Robert Burroughs + * Eric Rossman (edrossma@us.ibm.com) + * + * Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com) + * Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com> + * MSGTYPE restruct: Holger Dengler <hd@linux.vnet.ibm.com> + */ + +#ifndef _ZCRYPT_MSGTYPE50_H_ +#define _ZCRYPT_MSGTYPE50_H_ + +#define MSGTYPE50_NAME "zcrypt_msgtype50" +#define MSGTYPE50_VARIANT_DEFAULT 0 + +#define MSGTYPE50_CRB2_MAX_MSG_SIZE 0x390 /* sizeof(struct type50_crb2_msg) */ +#define MSGTYPE50_CRB3_MAX_MSG_SIZE 0x710 /* sizeof(struct type50_crb3_msg) */ + +#define MSGTYPE_ADJUSTMENT 0x08 /* type04 extension (not needed in type50) */ + +unsigned int get_rsa_modex_fc(struct ica_rsa_modexpo *, int *); +unsigned int get_rsa_crt_fc(struct ica_rsa_modexpo_crt *, int *); + +void zcrypt_msgtype50_init(void); +void zcrypt_msgtype50_exit(void); + +#endif /* _ZCRYPT_MSGTYPE50_H_ */ diff --git a/drivers/s390/crypto/zcrypt_msgtype6.c b/drivers/s390/crypto/zcrypt_msgtype6.c new file mode 100644 index 000000000..2101776a8 --- /dev/null +++ b/drivers/s390/crypto/zcrypt_msgtype6.c @@ -0,0 +1,1377 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * zcrypt 2.1.0 + * + * Copyright IBM Corp. 2001, 2012 + * Author(s): Robert Burroughs + * Eric Rossman (edrossma@us.ibm.com) + * + * Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com) + * Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com> + * Ralph Wuerthner <rwuerthn@de.ibm.com> + * MSGTYPE restruct: Holger Dengler <hd@linux.vnet.ibm.com> + */ + +#define KMSG_COMPONENT "zcrypt" +#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt + +#include <linux/module.h> +#include <linux/init.h> +#include <linux/err.h> +#include <linux/delay.h> +#include <linux/slab.h> +#include <linux/atomic.h> +#include <linux/uaccess.h> + +#include "ap_bus.h" +#include "zcrypt_api.h" +#include "zcrypt_error.h" +#include "zcrypt_msgtype6.h" +#include "zcrypt_cca_key.h" + +#define PCIXCC_MIN_MOD_SIZE_OLD 64 /* 512 bits */ +#define PCIXCC_MAX_ICA_RESPONSE_SIZE 0x77c /* max size type86 v2 reply */ + +#define CEIL4(x) ((((x)+3)/4)*4) + +struct response_type { + struct completion work; + int type; +}; +#define PCIXCC_RESPONSE_TYPE_ICA 0 +#define PCIXCC_RESPONSE_TYPE_XCRB 1 +#define PCIXCC_RESPONSE_TYPE_EP11 2 + +MODULE_AUTHOR("IBM Corporation"); +MODULE_DESCRIPTION("Cryptographic Coprocessor (message type 6), " \ + "Copyright IBM Corp. 2001, 2012"); +MODULE_LICENSE("GPL"); + +/** + * CPRB + * Note that all shorts, ints and longs are little-endian. + * All pointer fields are 32-bits long, and mean nothing + * + * A request CPRB is followed by a request_parameter_block. + * + * The request (or reply) parameter block is organized thus: + * function code + * VUD block + * key block + */ +struct CPRB { + unsigned short cprb_len; /* CPRB length */ + unsigned char cprb_ver_id; /* CPRB version id. */ + unsigned char pad_000; /* Alignment pad byte. */ + unsigned char srpi_rtcode[4]; /* SRPI return code LELONG */ + unsigned char srpi_verb; /* SRPI verb type */ + unsigned char flags; /* flags */ + unsigned char func_id[2]; /* function id */ + unsigned char checkpoint_flag; /* */ + unsigned char resv2; /* reserved */ + unsigned short req_parml; /* request parameter buffer */ + /* length 16-bit little endian */ + unsigned char req_parmp[4]; /* request parameter buffer * + * pointer (means nothing: the * + * parameter buffer follows * + * the CPRB). */ + unsigned char req_datal[4]; /* request data buffer */ + /* length ULELONG */ + unsigned char req_datap[4]; /* request data buffer */ + /* pointer */ + unsigned short rpl_parml; /* reply parameter buffer */ + /* length 16-bit little endian */ + unsigned char pad_001[2]; /* Alignment pad bytes. ULESHORT */ + unsigned char rpl_parmp[4]; /* reply parameter buffer * + * pointer (means nothing: the * + * parameter buffer follows * + * the CPRB). */ + unsigned char rpl_datal[4]; /* reply data buffer len ULELONG */ + unsigned char rpl_datap[4]; /* reply data buffer */ + /* pointer */ + unsigned short ccp_rscode; /* server reason code ULESHORT */ + unsigned short ccp_rtcode; /* server return code ULESHORT */ + unsigned char repd_parml[2]; /* replied parameter len ULESHORT*/ + unsigned char mac_data_len[2]; /* Mac Data Length ULESHORT */ + unsigned char repd_datal[4]; /* replied data length ULELONG */ + unsigned char req_pc[2]; /* PC identifier */ + unsigned char res_origin[8]; /* resource origin */ + unsigned char mac_value[8]; /* Mac Value */ + unsigned char logon_id[8]; /* Logon Identifier */ + unsigned char usage_domain[2]; /* cdx */ + unsigned char resv3[18]; /* reserved for requestor */ + unsigned short svr_namel; /* server name length ULESHORT */ + unsigned char svr_name[8]; /* server name */ +} __packed; + +struct function_and_rules_block { + unsigned char function_code[2]; + unsigned short ulen; + unsigned char only_rule[8]; +} __packed; + +/** + * The following is used to initialize the CPRBX passed to the PCIXCC/CEX2C + * card in a type6 message. The 3 fields that must be filled in at execution + * time are req_parml, rpl_parml and usage_domain. + * Everything about this interface is ascii/big-endian, since the + * device does *not* have 'Intel inside'. + * + * The CPRBX is followed immediately by the parm block. + * The parm block contains: + * - function code ('PD' 0x5044 or 'PK' 0x504B) + * - rule block (one of:) + * + 0x000A 'PKCS-1.2' (MCL2 'PD') + * + 0x000A 'ZERO-PAD' (MCL2 'PK') + * + 0x000A 'ZERO-PAD' (MCL3 'PD' or CEX2C 'PD') + * + 0x000A 'MRP ' (MCL3 'PK' or CEX2C 'PK') + * - VUD block + */ +static const struct CPRBX static_cprbx = { + .cprb_len = 0x00DC, + .cprb_ver_id = 0x02, + .func_id = {0x54, 0x32}, +}; + +int speed_idx_cca(int req_type) +{ + switch (req_type) { + case 0x4142: + case 0x4149: + case 0x414D: + case 0x4341: + case 0x4344: + case 0x4354: + case 0x4358: + case 0x444B: + case 0x4558: + case 0x4643: + case 0x4651: + case 0x4C47: + case 0x4C4B: + case 0x4C51: + case 0x4F48: + case 0x504F: + case 0x5053: + case 0x5058: + case 0x5343: + case 0x5344: + case 0x5345: + case 0x5350: + return LOW; + case 0x414B: + case 0x4345: + case 0x4349: + case 0x434D: + case 0x4847: + case 0x4849: + case 0x484D: + case 0x4850: + case 0x4851: + case 0x4954: + case 0x4958: + case 0x4B43: + case 0x4B44: + case 0x4B45: + case 0x4B47: + case 0x4B48: + case 0x4B49: + case 0x4B4E: + case 0x4B50: + case 0x4B52: + case 0x4B54: + case 0x4B58: + case 0x4D50: + case 0x4D53: + case 0x4D56: + case 0x4D58: + case 0x5044: + case 0x5045: + case 0x5046: + case 0x5047: + case 0x5049: + case 0x504B: + case 0x504D: + case 0x5254: + case 0x5347: + case 0x5349: + case 0x534B: + case 0x534D: + case 0x5356: + case 0x5358: + case 0x5443: + case 0x544B: + case 0x5647: + return HIGH; + default: + return MEDIUM; + } +} + +int speed_idx_ep11(int req_type) +{ + switch (req_type) { + case 1: + case 2: + case 36: + case 37: + case 38: + case 39: + case 40: + return LOW; + case 17: + case 18: + case 19: + case 20: + case 21: + case 22: + case 26: + case 30: + case 31: + case 32: + case 33: + case 34: + case 35: + return HIGH; + default: + return MEDIUM; + } +} + + +/** + * Convert a ICAMEX message to a type6 MEX message. + * + * @zq: crypto device pointer + * @ap_msg: pointer to AP message + * @mex: pointer to user input data + * + * Returns 0 on success or negative errno value. + */ +static int ICAMEX_msg_to_type6MEX_msgX(struct zcrypt_queue *zq, + struct ap_message *ap_msg, + struct ica_rsa_modexpo *mex) +{ + static struct type6_hdr static_type6_hdrX = { + .type = 0x06, + .offset1 = 0x00000058, + .agent_id = {'C', 'A',}, + .function_code = {'P', 'K'}, + }; + static struct function_and_rules_block static_pke_fnr = { + .function_code = {'P', 'K'}, + .ulen = 10, + .only_rule = {'M', 'R', 'P', ' ', ' ', ' ', ' ', ' '} + }; + struct { + struct type6_hdr hdr; + struct CPRBX cprbx; + struct function_and_rules_block fr; + unsigned short length; + char text[0]; + } __packed * msg = ap_msg->message; + int size; + + /* + * The inputdatalength was a selection criteria in the dispatching + * function zcrypt_rsa_modexpo(). However, make sure the following + * copy_from_user() never exceeds the allocated buffer space. + */ + if (WARN_ON_ONCE(mex->inputdatalength > PAGE_SIZE)) + return -EINVAL; + + /* VUD.ciphertext */ + msg->length = mex->inputdatalength + 2; + if (copy_from_user(msg->text, mex->inputdata, mex->inputdatalength)) + return -EFAULT; + + /* Set up key which is located after the variable length text. */ + size = zcrypt_type6_mex_key_en(mex, msg->text+mex->inputdatalength); + if (size < 0) + return size; + size += sizeof(*msg) + mex->inputdatalength; + + /* message header, cprbx and f&r */ + msg->hdr = static_type6_hdrX; + msg->hdr.ToCardLen1 = size - sizeof(msg->hdr); + msg->hdr.FromCardLen1 = PCIXCC_MAX_ICA_RESPONSE_SIZE - sizeof(msg->hdr); + + msg->cprbx = static_cprbx; + msg->cprbx.domain = AP_QID_QUEUE(zq->queue->qid); + msg->cprbx.rpl_msgbl = msg->hdr.FromCardLen1; + + msg->fr = static_pke_fnr; + + msg->cprbx.req_parml = size - sizeof(msg->hdr) - sizeof(msg->cprbx); + + ap_msg->length = size; + return 0; +} + +/** + * Convert a ICACRT message to a type6 CRT message. + * + * @zq: crypto device pointer + * @ap_msg: pointer to AP message + * @crt: pointer to user input data + * + * Returns 0 on success or negative errno value. + */ +static int ICACRT_msg_to_type6CRT_msgX(struct zcrypt_queue *zq, + struct ap_message *ap_msg, + struct ica_rsa_modexpo_crt *crt) +{ + static struct type6_hdr static_type6_hdrX = { + .type = 0x06, + .offset1 = 0x00000058, + .agent_id = {'C', 'A',}, + .function_code = {'P', 'D'}, + }; + static struct function_and_rules_block static_pkd_fnr = { + .function_code = {'P', 'D'}, + .ulen = 10, + .only_rule = {'Z', 'E', 'R', 'O', '-', 'P', 'A', 'D'} + }; + + struct { + struct type6_hdr hdr; + struct CPRBX cprbx; + struct function_and_rules_block fr; + unsigned short length; + char text[0]; + } __packed * msg = ap_msg->message; + int size; + + /* + * The inputdatalength was a selection criteria in the dispatching + * function zcrypt_rsa_crt(). However, make sure the following + * copy_from_user() never exceeds the allocated buffer space. + */ + if (WARN_ON_ONCE(crt->inputdatalength > PAGE_SIZE)) + return -EINVAL; + + /* VUD.ciphertext */ + msg->length = crt->inputdatalength + 2; + if (copy_from_user(msg->text, crt->inputdata, crt->inputdatalength)) + return -EFAULT; + + /* Set up key which is located after the variable length text. */ + size = zcrypt_type6_crt_key(crt, msg->text + crt->inputdatalength); + if (size < 0) + return size; + size += sizeof(*msg) + crt->inputdatalength; /* total size of msg */ + + /* message header, cprbx and f&r */ + msg->hdr = static_type6_hdrX; + msg->hdr.ToCardLen1 = size - sizeof(msg->hdr); + msg->hdr.FromCardLen1 = PCIXCC_MAX_ICA_RESPONSE_SIZE - sizeof(msg->hdr); + + msg->cprbx = static_cprbx; + msg->cprbx.domain = AP_QID_QUEUE(zq->queue->qid); + msg->cprbx.req_parml = msg->cprbx.rpl_msgbl = + size - sizeof(msg->hdr) - sizeof(msg->cprbx); + + msg->fr = static_pkd_fnr; + + ap_msg->length = size; + return 0; +} + +/** + * Convert a XCRB message to a type6 CPRB message. + * + * @zq: crypto device pointer + * @ap_msg: pointer to AP message + * @xcRB: pointer to user input data + * + * Returns 0 on success or -EFAULT, -EINVAL. + */ +struct type86_fmt2_msg { + struct type86_hdr hdr; + struct type86_fmt2_ext fmt2; +} __packed; + +static int XCRB_msg_to_type6CPRB_msgX(struct ap_message *ap_msg, + struct ica_xcRB *xcRB, + unsigned int *fcode, + unsigned short **dom) +{ + static struct type6_hdr static_type6_hdrX = { + .type = 0x06, + .offset1 = 0x00000058, + }; + struct { + struct type6_hdr hdr; + struct CPRBX cprbx; + } __packed * msg = ap_msg->message; + + int rcblen = CEIL4(xcRB->request_control_blk_length); + int replylen, req_sumlen, resp_sumlen; + char *req_data = ap_msg->message + sizeof(struct type6_hdr) + rcblen; + char *function_code; + + if (CEIL4(xcRB->request_control_blk_length) < + xcRB->request_control_blk_length) + return -EINVAL; /* overflow after alignment*/ + + /* length checks */ + ap_msg->length = sizeof(struct type6_hdr) + + CEIL4(xcRB->request_control_blk_length) + + xcRB->request_data_length; + if (ap_msg->length > MSGTYPE06_MAX_MSG_SIZE) + return -EINVAL; + + /* + * Overflow check + * sum must be greater (or equal) than the largest operand + */ + req_sumlen = CEIL4(xcRB->request_control_blk_length) + + xcRB->request_data_length; + if ((CEIL4(xcRB->request_control_blk_length) <= + xcRB->request_data_length) ? + (req_sumlen < xcRB->request_data_length) : + (req_sumlen < CEIL4(xcRB->request_control_blk_length))) { + return -EINVAL; + } + + if (CEIL4(xcRB->reply_control_blk_length) < + xcRB->reply_control_blk_length) + return -EINVAL; /* overflow after alignment*/ + + replylen = sizeof(struct type86_fmt2_msg) + + CEIL4(xcRB->reply_control_blk_length) + + xcRB->reply_data_length; + if (replylen > MSGTYPE06_MAX_MSG_SIZE) + return -EINVAL; + + /* + * Overflow check + * sum must be greater (or equal) than the largest operand + */ + resp_sumlen = CEIL4(xcRB->reply_control_blk_length) + + xcRB->reply_data_length; + if ((CEIL4(xcRB->reply_control_blk_length) <= xcRB->reply_data_length) ? + (resp_sumlen < xcRB->reply_data_length) : + (resp_sumlen < CEIL4(xcRB->reply_control_blk_length))) { + return -EINVAL; + } + + /* prepare type6 header */ + msg->hdr = static_type6_hdrX; + memcpy(msg->hdr.agent_id, &(xcRB->agent_ID), sizeof(xcRB->agent_ID)); + msg->hdr.ToCardLen1 = xcRB->request_control_blk_length; + if (xcRB->request_data_length) { + msg->hdr.offset2 = msg->hdr.offset1 + rcblen; + msg->hdr.ToCardLen2 = xcRB->request_data_length; + } + msg->hdr.FromCardLen1 = xcRB->reply_control_blk_length; + msg->hdr.FromCardLen2 = xcRB->reply_data_length; + + /* prepare CPRB */ + if (copy_from_user(&(msg->cprbx), xcRB->request_control_blk_addr, + xcRB->request_control_blk_length)) + return -EFAULT; + if (msg->cprbx.cprb_len + sizeof(msg->hdr.function_code) > + xcRB->request_control_blk_length) + return -EINVAL; + function_code = ((unsigned char *)&msg->cprbx) + msg->cprbx.cprb_len; + memcpy(msg->hdr.function_code, function_code, + sizeof(msg->hdr.function_code)); + + *fcode = (msg->hdr.function_code[0] << 8) | msg->hdr.function_code[1]; + *dom = (unsigned short *)&msg->cprbx.domain; + + if (memcmp(function_code, "US", 2) == 0 + || memcmp(function_code, "AU", 2) == 0) + ap_msg->special = 1; + else + ap_msg->special = 0; + + /* copy data block */ + if (xcRB->request_data_length && + copy_from_user(req_data, xcRB->request_data_address, + xcRB->request_data_length)) + return -EFAULT; + + return 0; +} + +static int xcrb_msg_to_type6_ep11cprb_msgx(struct ap_message *ap_msg, + struct ep11_urb *xcRB, + unsigned int *fcode) +{ + unsigned int lfmt; + static struct type6_hdr static_type6_ep11_hdr = { + .type = 0x06, + .rqid = {0x00, 0x01}, + .function_code = {0x00, 0x00}, + .agent_id[0] = 0x58, /* {'X'} */ + .agent_id[1] = 0x43, /* {'C'} */ + .offset1 = 0x00000058, + }; + + struct { + struct type6_hdr hdr; + struct ep11_cprb cprbx; + unsigned char pld_tag; /* fixed value 0x30 */ + unsigned char pld_lenfmt; /* payload length format */ + } __packed * msg = ap_msg->message; + + struct pld_hdr { + unsigned char func_tag; /* fixed value 0x4 */ + unsigned char func_len; /* fixed value 0x4 */ + unsigned int func_val; /* function ID */ + unsigned char dom_tag; /* fixed value 0x4 */ + unsigned char dom_len; /* fixed value 0x4 */ + unsigned int dom_val; /* domain id */ + } __packed * payload_hdr = NULL; + + if (CEIL4(xcRB->req_len) < xcRB->req_len) + return -EINVAL; /* overflow after alignment*/ + + /* length checks */ + ap_msg->length = sizeof(struct type6_hdr) + xcRB->req_len; + if (CEIL4(xcRB->req_len) > MSGTYPE06_MAX_MSG_SIZE - + (sizeof(struct type6_hdr))) + return -EINVAL; + + if (CEIL4(xcRB->resp_len) < xcRB->resp_len) + return -EINVAL; /* overflow after alignment*/ + + if (CEIL4(xcRB->resp_len) > MSGTYPE06_MAX_MSG_SIZE - + (sizeof(struct type86_fmt2_msg))) + return -EINVAL; + + /* prepare type6 header */ + msg->hdr = static_type6_ep11_hdr; + msg->hdr.ToCardLen1 = xcRB->req_len; + msg->hdr.FromCardLen1 = xcRB->resp_len; + + /* Import CPRB data from the ioctl input parameter */ + if (copy_from_user(&(msg->cprbx.cprb_len), + (char __force __user *)xcRB->req, xcRB->req_len)) { + return -EFAULT; + } + + if ((msg->pld_lenfmt & 0x80) == 0x80) { /*ext.len.fmt 2 or 3*/ + switch (msg->pld_lenfmt & 0x03) { + case 1: + lfmt = 2; + break; + case 2: + lfmt = 3; + break; + default: + return -EINVAL; + } + } else { + lfmt = 1; /* length format #1 */ + } + payload_hdr = (struct pld_hdr *)((&(msg->pld_lenfmt))+lfmt); + *fcode = payload_hdr->func_val & 0xFFFF; + + return 0; +} + +/** + * Copy results from a type 86 ICA reply message back to user space. + * + * @zq: crypto device pointer + * @reply: reply AP message. + * @data: pointer to user output data + * @length: size of user output data + * + * Returns 0 on success or -EINVAL, -EFAULT, -EAGAIN in case of an error. + */ +struct type86x_reply { + struct type86_hdr hdr; + struct type86_fmt2_ext fmt2; + struct CPRBX cprbx; + unsigned char pad[4]; /* 4 byte function code/rules block ? */ + unsigned short length; + char text[0]; +} __packed; + +struct type86_ep11_reply { + struct type86_hdr hdr; + struct type86_fmt2_ext fmt2; + struct ep11_cprb cprbx; +} __packed; + +static int convert_type86_ica(struct zcrypt_queue *zq, + struct ap_message *reply, + char __user *outputdata, + unsigned int outputdatalength) +{ + static unsigned char static_pad[] = { + 0x00, 0x02, + 0x1B, 0x7B, 0x5D, 0xB5, 0x75, 0x01, 0x3D, 0xFD, + 0x8D, 0xD1, 0xC7, 0x03, 0x2D, 0x09, 0x23, 0x57, + 0x89, 0x49, 0xB9, 0x3F, 0xBB, 0x99, 0x41, 0x5B, + 0x75, 0x21, 0x7B, 0x9D, 0x3B, 0x6B, 0x51, 0x39, + 0xBB, 0x0D, 0x35, 0xB9, 0x89, 0x0F, 0x93, 0xA5, + 0x0B, 0x47, 0xF1, 0xD3, 0xBB, 0xCB, 0xF1, 0x9D, + 0x23, 0x73, 0x71, 0xFF, 0xF3, 0xF5, 0x45, 0xFB, + 0x61, 0x29, 0x23, 0xFD, 0xF1, 0x29, 0x3F, 0x7F, + 0x17, 0xB7, 0x1B, 0xA9, 0x19, 0xBD, 0x57, 0xA9, + 0xD7, 0x95, 0xA3, 0xCB, 0xED, 0x1D, 0xDB, 0x45, + 0x7D, 0x11, 0xD1, 0x51, 0x1B, 0xED, 0x71, 0xE9, + 0xB1, 0xD1, 0xAB, 0xAB, 0x21, 0x2B, 0x1B, 0x9F, + 0x3B, 0x9F, 0xF7, 0xF7, 0xBD, 0x63, 0xEB, 0xAD, + 0xDF, 0xB3, 0x6F, 0x5B, 0xDB, 0x8D, 0xA9, 0x5D, + 0xE3, 0x7D, 0x77, 0x49, 0x47, 0xF5, 0xA7, 0xFD, + 0xAB, 0x2F, 0x27, 0x35, 0x77, 0xD3, 0x49, 0xC9, + 0x09, 0xEB, 0xB1, 0xF9, 0xBF, 0x4B, 0xCB, 0x2B, + 0xEB, 0xEB, 0x05, 0xFF, 0x7D, 0xC7, 0x91, 0x8B, + 0x09, 0x83, 0xB9, 0xB9, 0x69, 0x33, 0x39, 0x6B, + 0x79, 0x75, 0x19, 0xBF, 0xBB, 0x07, 0x1D, 0xBD, + 0x29, 0xBF, 0x39, 0x95, 0x93, 0x1D, 0x35, 0xC7, + 0xC9, 0x4D, 0xE5, 0x97, 0x0B, 0x43, 0x9B, 0xF1, + 0x16, 0x93, 0x03, 0x1F, 0xA5, 0xFB, 0xDB, 0xF3, + 0x27, 0x4F, 0x27, 0x61, 0x05, 0x1F, 0xB9, 0x23, + 0x2F, 0xC3, 0x81, 0xA9, 0x23, 0x71, 0x55, 0x55, + 0xEB, 0xED, 0x41, 0xE5, 0xF3, 0x11, 0xF1, 0x43, + 0x69, 0x03, 0xBD, 0x0B, 0x37, 0x0F, 0x51, 0x8F, + 0x0B, 0xB5, 0x89, 0x5B, 0x67, 0xA9, 0xD9, 0x4F, + 0x01, 0xF9, 0x21, 0x77, 0x37, 0x73, 0x79, 0xC5, + 0x7F, 0x51, 0xC1, 0xCF, 0x97, 0xA1, 0x75, 0xAD, + 0x35, 0x9D, 0xD3, 0xD3, 0xA7, 0x9D, 0x5D, 0x41, + 0x6F, 0x65, 0x1B, 0xCF, 0xA9, 0x87, 0x91, 0x09 + }; + struct type86x_reply *msg = reply->message; + unsigned short service_rc, service_rs; + unsigned int reply_len, pad_len; + char *data; + + service_rc = msg->cprbx.ccp_rtcode; + if (unlikely(service_rc != 0)) { + service_rs = msg->cprbx.ccp_rscode; + if ((service_rc == 8 && service_rs == 66) || + (service_rc == 8 && service_rs == 65) || + (service_rc == 8 && service_rs == 72) || + (service_rc == 8 && service_rs == 770) || + (service_rc == 12 && service_rs == 769)) { + ZCRYPT_DBF(DBF_DEBUG, + "device=%02x.%04x rc/rs=%d/%d => rc=EINVAL\n", + AP_QID_CARD(zq->queue->qid), + AP_QID_QUEUE(zq->queue->qid), + (int) service_rc, (int) service_rs); + return -EINVAL; + } + if (service_rc == 8 && service_rs == 783) { + zq->zcard->min_mod_size = + PCIXCC_MIN_MOD_SIZE_OLD; + ZCRYPT_DBF(DBF_DEBUG, + "device=%02x.%04x rc/rs=%d/%d => rc=EAGAIN\n", + AP_QID_CARD(zq->queue->qid), + AP_QID_QUEUE(zq->queue->qid), + (int) service_rc, (int) service_rs); + return -EAGAIN; + } + zq->online = 0; + pr_err("Cryptographic device %02x.%04x failed and was set offline\n", + AP_QID_CARD(zq->queue->qid), + AP_QID_QUEUE(zq->queue->qid)); + ZCRYPT_DBF(DBF_ERR, + "device=%02x.%04x rc/rs=%d/%d => online=0 rc=EAGAIN\n", + AP_QID_CARD(zq->queue->qid), + AP_QID_QUEUE(zq->queue->qid), + (int) service_rc, (int) service_rs); + return -EAGAIN; /* repeat the request on a different device. */ + } + data = msg->text; + reply_len = msg->length - 2; + if (reply_len > outputdatalength) + return -EINVAL; + /* + * For all encipher requests, the length of the ciphertext (reply_len) + * will always equal the modulus length. For MEX decipher requests + * the output needs to get padded. Minimum pad size is 10. + * + * Currently, the cases where padding will be added is for: + * - PCIXCC_MCL2 using a CRT form token (since PKD didn't support + * ZERO-PAD and CRT is only supported for PKD requests) + * - PCICC, always + */ + pad_len = outputdatalength - reply_len; + if (pad_len > 0) { + if (pad_len < 10) + return -EINVAL; + /* 'restore' padding left in the PCICC/PCIXCC card. */ + if (copy_to_user(outputdata, static_pad, pad_len - 1)) + return -EFAULT; + if (put_user(0, outputdata + pad_len - 1)) + return -EFAULT; + } + /* Copy the crypto response to user space. */ + if (copy_to_user(outputdata + pad_len, data, reply_len)) + return -EFAULT; + return 0; +} + +/** + * Copy results from a type 86 XCRB reply message back to user space. + * + * @zq: crypto device pointer + * @reply: reply AP message. + * @xcRB: pointer to XCRB + * + * Returns 0 on success or -EINVAL, -EFAULT, -EAGAIN in case of an error. + */ +static int convert_type86_xcrb(struct zcrypt_queue *zq, + struct ap_message *reply, + struct ica_xcRB *xcRB) +{ + struct type86_fmt2_msg *msg = reply->message; + char *data = reply->message; + + /* Copy CPRB to user */ + if (copy_to_user(xcRB->reply_control_blk_addr, + data + msg->fmt2.offset1, msg->fmt2.count1)) + return -EFAULT; + xcRB->reply_control_blk_length = msg->fmt2.count1; + + /* Copy data buffer to user */ + if (msg->fmt2.count2) + if (copy_to_user(xcRB->reply_data_addr, + data + msg->fmt2.offset2, msg->fmt2.count2)) + return -EFAULT; + xcRB->reply_data_length = msg->fmt2.count2; + return 0; +} + +/** + * Copy results from a type 86 EP11 XCRB reply message back to user space. + * + * @zq: crypto device pointer + * @reply: reply AP message. + * @xcRB: pointer to EP11 user request block + * + * Returns 0 on success or -EINVAL, -EFAULT, -EAGAIN in case of an error. + */ +static int convert_type86_ep11_xcrb(struct zcrypt_queue *zq, + struct ap_message *reply, + struct ep11_urb *xcRB) +{ + struct type86_fmt2_msg *msg = reply->message; + char *data = reply->message; + + if (xcRB->resp_len < msg->fmt2.count1) + return -EINVAL; + + /* Copy response CPRB to user */ + if (copy_to_user((char __force __user *)xcRB->resp, + data + msg->fmt2.offset1, msg->fmt2.count1)) + return -EFAULT; + xcRB->resp_len = msg->fmt2.count1; + return 0; +} + +static int convert_type86_rng(struct zcrypt_queue *zq, + struct ap_message *reply, + char *buffer) +{ + struct { + struct type86_hdr hdr; + struct type86_fmt2_ext fmt2; + struct CPRBX cprbx; + } __packed * msg = reply->message; + char *data = reply->message; + + if (msg->cprbx.ccp_rtcode != 0 || msg->cprbx.ccp_rscode != 0) + return -EINVAL; + memcpy(buffer, data + msg->fmt2.offset2, msg->fmt2.count2); + return msg->fmt2.count2; +} + +static int convert_response_ica(struct zcrypt_queue *zq, + struct ap_message *reply, + char __user *outputdata, + unsigned int outputdatalength) +{ + struct type86x_reply *msg = reply->message; + + switch (msg->hdr.type) { + case TYPE82_RSP_CODE: + case TYPE88_RSP_CODE: + return convert_error(zq, reply); + case TYPE86_RSP_CODE: + if (msg->cprbx.ccp_rtcode && + (msg->cprbx.ccp_rscode == 0x14f) && + (outputdatalength > 256)) { + if (zq->zcard->max_exp_bit_length <= 17) { + zq->zcard->max_exp_bit_length = 17; + return -EAGAIN; + } else + return -EINVAL; + } + if (msg->hdr.reply_code) + return convert_error(zq, reply); + if (msg->cprbx.cprb_ver_id == 0x02) + return convert_type86_ica(zq, reply, + outputdata, outputdatalength); + /* + * Fall through, no break, incorrect cprb version is an unknown + * response + */ + default: /* Unknown response type, this should NEVER EVER happen */ + zq->online = 0; + pr_err("Cryptographic device %02x.%04x failed and was set offline\n", + AP_QID_CARD(zq->queue->qid), + AP_QID_QUEUE(zq->queue->qid)); + ZCRYPT_DBF(DBF_ERR, + "device=%02x.%04x rtype=0x%02x => online=0 rc=EAGAIN\n", + AP_QID_CARD(zq->queue->qid), + AP_QID_QUEUE(zq->queue->qid), + (int) msg->hdr.type); + return -EAGAIN; /* repeat the request on a different device. */ + } +} + +static int convert_response_xcrb(struct zcrypt_queue *zq, + struct ap_message *reply, + struct ica_xcRB *xcRB) +{ + struct type86x_reply *msg = reply->message; + + switch (msg->hdr.type) { + case TYPE82_RSP_CODE: + case TYPE88_RSP_CODE: + xcRB->status = 0x0008044DL; /* HDD_InvalidParm */ + return convert_error(zq, reply); + case TYPE86_RSP_CODE: + if (msg->hdr.reply_code) { + memcpy(&(xcRB->status), msg->fmt2.apfs, sizeof(u32)); + return convert_error(zq, reply); + } + if (msg->cprbx.cprb_ver_id == 0x02) + return convert_type86_xcrb(zq, reply, xcRB); + /* + * Fall through, no break, incorrect cprb version is an unknown + * response + */ + default: /* Unknown response type, this should NEVER EVER happen */ + xcRB->status = 0x0008044DL; /* HDD_InvalidParm */ + zq->online = 0; + pr_err("Cryptographic device %02x.%04x failed and was set offline\n", + AP_QID_CARD(zq->queue->qid), + AP_QID_QUEUE(zq->queue->qid)); + ZCRYPT_DBF(DBF_ERR, + "device=%02x.%04x rtype=0x%02x => online=0 rc=EAGAIN\n", + AP_QID_CARD(zq->queue->qid), + AP_QID_QUEUE(zq->queue->qid), + (int) msg->hdr.type); + return -EAGAIN; /* repeat the request on a different device. */ + } +} + +static int convert_response_ep11_xcrb(struct zcrypt_queue *zq, + struct ap_message *reply, struct ep11_urb *xcRB) +{ + struct type86_ep11_reply *msg = reply->message; + + switch (msg->hdr.type) { + case TYPE82_RSP_CODE: + case TYPE87_RSP_CODE: + return convert_error(zq, reply); + case TYPE86_RSP_CODE: + if (msg->hdr.reply_code) + return convert_error(zq, reply); + if (msg->cprbx.cprb_ver_id == 0x04) + return convert_type86_ep11_xcrb(zq, reply, xcRB); + /* Fall through, no break, incorrect cprb version is an unknown resp.*/ + default: /* Unknown response type, this should NEVER EVER happen */ + zq->online = 0; + pr_err("Cryptographic device %02x.%04x failed and was set offline\n", + AP_QID_CARD(zq->queue->qid), + AP_QID_QUEUE(zq->queue->qid)); + ZCRYPT_DBF(DBF_ERR, + "device=%02x.%04x rtype=0x%02x => online=0 rc=EAGAIN\n", + AP_QID_CARD(zq->queue->qid), + AP_QID_QUEUE(zq->queue->qid), + (int) msg->hdr.type); + return -EAGAIN; /* repeat the request on a different device. */ + } +} + +static int convert_response_rng(struct zcrypt_queue *zq, + struct ap_message *reply, + char *data) +{ + struct type86x_reply *msg = reply->message; + + switch (msg->hdr.type) { + case TYPE82_RSP_CODE: + case TYPE88_RSP_CODE: + return -EINVAL; + case TYPE86_RSP_CODE: + if (msg->hdr.reply_code) + return -EINVAL; + if (msg->cprbx.cprb_ver_id == 0x02) + return convert_type86_rng(zq, reply, data); + /* + * Fall through, no break, incorrect cprb version is an unknown + * response + */ + default: /* Unknown response type, this should NEVER EVER happen */ + zq->online = 0; + pr_err("Cryptographic device %02x.%04x failed and was set offline\n", + AP_QID_CARD(zq->queue->qid), + AP_QID_QUEUE(zq->queue->qid)); + ZCRYPT_DBF(DBF_ERR, + "device=%02x.%04x rtype=0x%02x => online=0 rc=EAGAIN\n", + AP_QID_CARD(zq->queue->qid), + AP_QID_QUEUE(zq->queue->qid), + (int) msg->hdr.type); + return -EAGAIN; /* repeat the request on a different device. */ + } +} + +/** + * This function is called from the AP bus code after a crypto request + * "msg" has finished with the reply message "reply". + * It is called from tasklet context. + * @aq: pointer to the AP queue + * @msg: pointer to the AP message + * @reply: pointer to the AP reply message + */ +static void zcrypt_msgtype6_receive(struct ap_queue *aq, + struct ap_message *msg, + struct ap_message *reply) +{ + static struct error_hdr error_reply = { + .type = TYPE82_RSP_CODE, + .reply_code = REP82_ERROR_MACHINE_FAILURE, + }; + struct response_type *resp_type = + (struct response_type *) msg->private; + struct type86x_reply *t86r; + int length; + + /* Copy the reply message to the request message buffer. */ + if (!reply) + goto out; /* ap_msg->rc indicates the error */ + t86r = reply->message; + if (t86r->hdr.type == TYPE86_RSP_CODE && + t86r->cprbx.cprb_ver_id == 0x02) { + switch (resp_type->type) { + case PCIXCC_RESPONSE_TYPE_ICA: + length = sizeof(struct type86x_reply) + + t86r->length - 2; + length = min(PCIXCC_MAX_ICA_RESPONSE_SIZE, length); + memcpy(msg->message, reply->message, length); + break; + case PCIXCC_RESPONSE_TYPE_XCRB: + length = t86r->fmt2.offset2 + t86r->fmt2.count2; + length = min(MSGTYPE06_MAX_MSG_SIZE, length); + memcpy(msg->message, reply->message, length); + break; + default: + memcpy(msg->message, &error_reply, + sizeof(error_reply)); + } + } else + memcpy(msg->message, reply->message, sizeof(error_reply)); +out: + complete(&(resp_type->work)); +} + +/** + * This function is called from the AP bus code after a crypto request + * "msg" has finished with the reply message "reply". + * It is called from tasklet context. + * @aq: pointer to the AP queue + * @msg: pointer to the AP message + * @reply: pointer to the AP reply message + */ +static void zcrypt_msgtype6_receive_ep11(struct ap_queue *aq, + struct ap_message *msg, + struct ap_message *reply) +{ + static struct error_hdr error_reply = { + .type = TYPE82_RSP_CODE, + .reply_code = REP82_ERROR_MACHINE_FAILURE, + }; + struct response_type *resp_type = + (struct response_type *)msg->private; + struct type86_ep11_reply *t86r; + int length; + + /* Copy the reply message to the request message buffer. */ + if (!reply) + goto out; /* ap_msg->rc indicates the error */ + t86r = reply->message; + if (t86r->hdr.type == TYPE86_RSP_CODE && + t86r->cprbx.cprb_ver_id == 0x04) { + switch (resp_type->type) { + case PCIXCC_RESPONSE_TYPE_EP11: + length = t86r->fmt2.offset1 + t86r->fmt2.count1; + length = min(MSGTYPE06_MAX_MSG_SIZE, length); + memcpy(msg->message, reply->message, length); + break; + default: + memcpy(msg->message, &error_reply, sizeof(error_reply)); + } + } else { + memcpy(msg->message, reply->message, sizeof(error_reply)); + } +out: + complete(&(resp_type->work)); +} + +static atomic_t zcrypt_step = ATOMIC_INIT(0); + +/** + * The request distributor calls this function if it picked the PCIXCC/CEX2C + * device to handle a modexpo request. + * @zq: pointer to zcrypt_queue structure that identifies the + * PCIXCC/CEX2C device to the request distributor + * @mex: pointer to the modexpo request buffer + */ +static long zcrypt_msgtype6_modexpo(struct zcrypt_queue *zq, + struct ica_rsa_modexpo *mex) +{ + struct ap_message ap_msg; + struct response_type resp_type = { + .type = PCIXCC_RESPONSE_TYPE_ICA, + }; + int rc; + + ap_init_message(&ap_msg); + ap_msg.message = (void *) get_zeroed_page(GFP_KERNEL); + if (!ap_msg.message) + return -ENOMEM; + ap_msg.receive = zcrypt_msgtype6_receive; + ap_msg.psmid = (((unsigned long long) current->pid) << 32) + + atomic_inc_return(&zcrypt_step); + ap_msg.private = &resp_type; + rc = ICAMEX_msg_to_type6MEX_msgX(zq, &ap_msg, mex); + if (rc) + goto out_free; + init_completion(&resp_type.work); + ap_queue_message(zq->queue, &ap_msg); + rc = wait_for_completion_interruptible(&resp_type.work); + if (rc == 0) { + rc = ap_msg.rc; + if (rc == 0) + rc = convert_response_ica(zq, &ap_msg, + mex->outputdata, + mex->outputdatalength); + } else + /* Signal pending. */ + ap_cancel_message(zq->queue, &ap_msg); +out_free: + free_page((unsigned long) ap_msg.message); + return rc; +} + +/** + * The request distributor calls this function if it picked the PCIXCC/CEX2C + * device to handle a modexpo_crt request. + * @zq: pointer to zcrypt_queue structure that identifies the + * PCIXCC/CEX2C device to the request distributor + * @crt: pointer to the modexpoc_crt request buffer + */ +static long zcrypt_msgtype6_modexpo_crt(struct zcrypt_queue *zq, + struct ica_rsa_modexpo_crt *crt) +{ + struct ap_message ap_msg; + struct response_type resp_type = { + .type = PCIXCC_RESPONSE_TYPE_ICA, + }; + int rc; + + ap_init_message(&ap_msg); + ap_msg.message = (void *) get_zeroed_page(GFP_KERNEL); + if (!ap_msg.message) + return -ENOMEM; + ap_msg.receive = zcrypt_msgtype6_receive; + ap_msg.psmid = (((unsigned long long) current->pid) << 32) + + atomic_inc_return(&zcrypt_step); + ap_msg.private = &resp_type; + rc = ICACRT_msg_to_type6CRT_msgX(zq, &ap_msg, crt); + if (rc) + goto out_free; + init_completion(&resp_type.work); + ap_queue_message(zq->queue, &ap_msg); + rc = wait_for_completion_interruptible(&resp_type.work); + if (rc == 0) { + rc = ap_msg.rc; + if (rc == 0) + rc = convert_response_ica(zq, &ap_msg, + crt->outputdata, + crt->outputdatalength); + } else { + /* Signal pending. */ + ap_cancel_message(zq->queue, &ap_msg); + } +out_free: + free_page((unsigned long) ap_msg.message); + return rc; +} + +/** + * Fetch function code from cprb. + * Extracting the fc requires to copy the cprb from userspace. + * So this function allocates memory and needs an ap_msg prepared + * by the caller with ap_init_message(). Also the caller has to + * make sure ap_release_message() is always called even on failure. + */ +unsigned int get_cprb_fc(struct ica_xcRB *xcRB, + struct ap_message *ap_msg, + unsigned int *func_code, unsigned short **dom) +{ + struct response_type resp_type = { + .type = PCIXCC_RESPONSE_TYPE_XCRB, + }; + + ap_msg->message = kmalloc(MSGTYPE06_MAX_MSG_SIZE, GFP_KERNEL); + if (!ap_msg->message) + return -ENOMEM; + ap_msg->receive = zcrypt_msgtype6_receive; + ap_msg->psmid = (((unsigned long long) current->pid) << 32) + + atomic_inc_return(&zcrypt_step); + ap_msg->private = kmalloc(sizeof(resp_type), GFP_KERNEL); + if (!ap_msg->private) + return -ENOMEM; + memcpy(ap_msg->private, &resp_type, sizeof(resp_type)); + return XCRB_msg_to_type6CPRB_msgX(ap_msg, xcRB, func_code, dom); +} + +/** + * The request distributor calls this function if it picked the PCIXCC/CEX2C + * device to handle a send_cprb request. + * @zq: pointer to zcrypt_queue structure that identifies the + * PCIXCC/CEX2C device to the request distributor + * @xcRB: pointer to the send_cprb request buffer + */ +static long zcrypt_msgtype6_send_cprb(struct zcrypt_queue *zq, + struct ica_xcRB *xcRB, + struct ap_message *ap_msg) +{ + int rc; + struct response_type *rtype = (struct response_type *)(ap_msg->private); + + init_completion(&rtype->work); + ap_queue_message(zq->queue, ap_msg); + rc = wait_for_completion_interruptible(&rtype->work); + if (rc == 0) { + rc = ap_msg->rc; + if (rc == 0) + rc = convert_response_xcrb(zq, ap_msg, xcRB); + } else + /* Signal pending. */ + ap_cancel_message(zq->queue, ap_msg); + + return rc; +} + +/** + * Fetch function code from ep11 cprb. + * Extracting the fc requires to copy the ep11 cprb from userspace. + * So this function allocates memory and needs an ap_msg prepared + * by the caller with ap_init_message(). Also the caller has to + * make sure ap_release_message() is always called even on failure. + */ +unsigned int get_ep11cprb_fc(struct ep11_urb *xcrb, + struct ap_message *ap_msg, + unsigned int *func_code) +{ + struct response_type resp_type = { + .type = PCIXCC_RESPONSE_TYPE_EP11, + }; + + ap_msg->message = kmalloc(MSGTYPE06_MAX_MSG_SIZE, GFP_KERNEL); + if (!ap_msg->message) + return -ENOMEM; + ap_msg->receive = zcrypt_msgtype6_receive_ep11; + ap_msg->psmid = (((unsigned long long) current->pid) << 32) + + atomic_inc_return(&zcrypt_step); + ap_msg->private = kmalloc(sizeof(resp_type), GFP_KERNEL); + if (!ap_msg->private) + return -ENOMEM; + memcpy(ap_msg->private, &resp_type, sizeof(resp_type)); + return xcrb_msg_to_type6_ep11cprb_msgx(ap_msg, xcrb, func_code); +} + +/** + * The request distributor calls this function if it picked the CEX4P + * device to handle a send_ep11_cprb request. + * @zq: pointer to zcrypt_queue structure that identifies the + * CEX4P device to the request distributor + * @xcRB: pointer to the ep11 user request block + */ +static long zcrypt_msgtype6_send_ep11_cprb(struct zcrypt_queue *zq, + struct ep11_urb *xcrb, + struct ap_message *ap_msg) +{ + int rc; + unsigned int lfmt; + struct response_type *rtype = (struct response_type *)(ap_msg->private); + struct { + struct type6_hdr hdr; + struct ep11_cprb cprbx; + unsigned char pld_tag; /* fixed value 0x30 */ + unsigned char pld_lenfmt; /* payload length format */ + } __packed * msg = ap_msg->message; + struct pld_hdr { + unsigned char func_tag; /* fixed value 0x4 */ + unsigned char func_len; /* fixed value 0x4 */ + unsigned int func_val; /* function ID */ + unsigned char dom_tag; /* fixed value 0x4 */ + unsigned char dom_len; /* fixed value 0x4 */ + unsigned int dom_val; /* domain id */ + } __packed * payload_hdr = NULL; + + + /** + * The target domain field within the cprb body/payload block will be + * replaced by the usage domain for non-management commands only. + * Therefore we check the first bit of the 'flags' parameter for + * management command indication. + * 0 - non management command + * 1 - management command + */ + if (!((msg->cprbx.flags & 0x80) == 0x80)) { + msg->cprbx.target_id = (unsigned int) + AP_QID_QUEUE(zq->queue->qid); + + if ((msg->pld_lenfmt & 0x80) == 0x80) { /*ext.len.fmt 2 or 3*/ + switch (msg->pld_lenfmt & 0x03) { + case 1: + lfmt = 2; + break; + case 2: + lfmt = 3; + break; + default: + return -EINVAL; + } + } else { + lfmt = 1; /* length format #1 */ + } + payload_hdr = (struct pld_hdr *)((&(msg->pld_lenfmt))+lfmt); + payload_hdr->dom_val = (unsigned int) + AP_QID_QUEUE(zq->queue->qid); + } + + init_completion(&rtype->work); + ap_queue_message(zq->queue, ap_msg); + rc = wait_for_completion_interruptible(&rtype->work); + if (rc == 0) { + rc = ap_msg->rc; + if (rc == 0) + rc = convert_response_ep11_xcrb(zq, ap_msg, xcrb); + } else + /* Signal pending. */ + ap_cancel_message(zq->queue, ap_msg); + + return rc; +} + +unsigned int get_rng_fc(struct ap_message *ap_msg, int *func_code, + unsigned int *domain) +{ + struct response_type resp_type = { + .type = PCIXCC_RESPONSE_TYPE_XCRB, + }; + + ap_msg->message = kmalloc(MSGTYPE06_MAX_MSG_SIZE, GFP_KERNEL); + if (!ap_msg->message) + return -ENOMEM; + ap_msg->receive = zcrypt_msgtype6_receive; + ap_msg->psmid = (((unsigned long long) current->pid) << 32) + + atomic_inc_return(&zcrypt_step); + ap_msg->private = kmalloc(sizeof(resp_type), GFP_KERNEL); + if (!ap_msg->private) + return -ENOMEM; + memcpy(ap_msg->private, &resp_type, sizeof(resp_type)); + + rng_type6CPRB_msgX(ap_msg, ZCRYPT_RNG_BUFFER_SIZE, domain); + + *func_code = HWRNG; + return 0; +} + +/** + * The request distributor calls this function if it picked the PCIXCC/CEX2C + * device to generate random data. + * @zq: pointer to zcrypt_queue structure that identifies the + * PCIXCC/CEX2C device to the request distributor + * @buffer: pointer to a memory page to return random data + */ +static long zcrypt_msgtype6_rng(struct zcrypt_queue *zq, + char *buffer, struct ap_message *ap_msg) +{ + struct { + struct type6_hdr hdr; + struct CPRBX cprbx; + char function_code[2]; + short int rule_length; + char rule[8]; + short int verb_length; + short int key_length; + } __packed * msg = ap_msg->message; + struct response_type *rtype = (struct response_type *)(ap_msg->private); + int rc; + + msg->cprbx.domain = AP_QID_QUEUE(zq->queue->qid); + + init_completion(&rtype->work); + ap_queue_message(zq->queue, ap_msg); + rc = wait_for_completion_interruptible(&rtype->work); + if (rc == 0) { + rc = ap_msg->rc; + if (rc == 0) + rc = convert_response_rng(zq, ap_msg, buffer); + } else + /* Signal pending. */ + ap_cancel_message(zq->queue, ap_msg); + + return rc; +} + +/** + * The crypto operations for a PCIXCC/CEX2C card. + */ +static struct zcrypt_ops zcrypt_msgtype6_norng_ops = { + .owner = THIS_MODULE, + .name = MSGTYPE06_NAME, + .variant = MSGTYPE06_VARIANT_NORNG, + .rsa_modexpo = zcrypt_msgtype6_modexpo, + .rsa_modexpo_crt = zcrypt_msgtype6_modexpo_crt, + .send_cprb = zcrypt_msgtype6_send_cprb, +}; + +static struct zcrypt_ops zcrypt_msgtype6_ops = { + .owner = THIS_MODULE, + .name = MSGTYPE06_NAME, + .variant = MSGTYPE06_VARIANT_DEFAULT, + .rsa_modexpo = zcrypt_msgtype6_modexpo, + .rsa_modexpo_crt = zcrypt_msgtype6_modexpo_crt, + .send_cprb = zcrypt_msgtype6_send_cprb, + .rng = zcrypt_msgtype6_rng, +}; + +static struct zcrypt_ops zcrypt_msgtype6_ep11_ops = { + .owner = THIS_MODULE, + .name = MSGTYPE06_NAME, + .variant = MSGTYPE06_VARIANT_EP11, + .rsa_modexpo = NULL, + .rsa_modexpo_crt = NULL, + .send_ep11_cprb = zcrypt_msgtype6_send_ep11_cprb, +}; + +void __init zcrypt_msgtype6_init(void) +{ + zcrypt_msgtype_register(&zcrypt_msgtype6_norng_ops); + zcrypt_msgtype_register(&zcrypt_msgtype6_ops); + zcrypt_msgtype_register(&zcrypt_msgtype6_ep11_ops); +} + +void __exit zcrypt_msgtype6_exit(void) +{ + zcrypt_msgtype_unregister(&zcrypt_msgtype6_norng_ops); + zcrypt_msgtype_unregister(&zcrypt_msgtype6_ops); + zcrypt_msgtype_unregister(&zcrypt_msgtype6_ep11_ops); +} diff --git a/drivers/s390/crypto/zcrypt_msgtype6.h b/drivers/s390/crypto/zcrypt_msgtype6.h new file mode 100644 index 000000000..e4c2f37d7 --- /dev/null +++ b/drivers/s390/crypto/zcrypt_msgtype6.h @@ -0,0 +1,171 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * zcrypt 2.1.0 + * + * Copyright IBM Corp. 2001, 2012 + * Author(s): Robert Burroughs + * Eric Rossman (edrossma@us.ibm.com) + * + * Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com) + * Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com> + * MSGTYPE restruct: Holger Dengler <hd@linux.vnet.ibm.com> + */ + +#ifndef _ZCRYPT_MSGTYPE6_H_ +#define _ZCRYPT_MSGTYPE6_H_ + +#include <asm/zcrypt.h> + +#define MSGTYPE06_NAME "zcrypt_msgtype6" +#define MSGTYPE06_VARIANT_DEFAULT 0 +#define MSGTYPE06_VARIANT_NORNG 1 +#define MSGTYPE06_VARIANT_EP11 2 + +#define MSGTYPE06_MAX_MSG_SIZE (12*1024) + +/** + * The type 6 message family is associated with PCICC or PCIXCC cards. + * + * It contains a message header followed by a CPRB, both of which + * are described below. + * + * Note that all reserved fields must be zeroes. + */ +struct type6_hdr { + unsigned char reserved1; /* 0x00 */ + unsigned char type; /* 0x06 */ + unsigned char reserved2[2]; /* 0x0000 */ + unsigned char right[4]; /* 0x00000000 */ + unsigned char reserved3[2]; /* 0x0000 */ + unsigned char reserved4[2]; /* 0x0000 */ + unsigned char apfs[4]; /* 0x00000000 */ + unsigned int offset1; /* 0x00000058 (offset to CPRB) */ + unsigned int offset2; /* 0x00000000 */ + unsigned int offset3; /* 0x00000000 */ + unsigned int offset4; /* 0x00000000 */ + unsigned char agent_id[16]; /* PCICC: */ + /* 0x0100 */ + /* 0x4343412d4150504c202020 */ + /* 0x010101 */ + /* PCIXCC: */ + /* 0x4341000000000000 */ + /* 0x0000000000000000 */ + unsigned char rqid[2]; /* rqid. internal to 603 */ + unsigned char reserved5[2]; /* 0x0000 */ + unsigned char function_code[2]; /* for PKD, 0x5044 (ascii 'PD') */ + unsigned char reserved6[2]; /* 0x0000 */ + unsigned int ToCardLen1; /* (request CPRB len + 3) & -4 */ + unsigned int ToCardLen2; /* db len 0x00000000 for PKD */ + unsigned int ToCardLen3; /* 0x00000000 */ + unsigned int ToCardLen4; /* 0x00000000 */ + unsigned int FromCardLen1; /* response buffer length */ + unsigned int FromCardLen2; /* db len 0x00000000 for PKD */ + unsigned int FromCardLen3; /* 0x00000000 */ + unsigned int FromCardLen4; /* 0x00000000 */ +} __packed; + +/** + * The type 86 message family is associated with PCICC and PCIXCC cards. + * + * It contains a message header followed by a CPRB. The CPRB is + * the same as the request CPRB, which is described above. + * + * If format is 1, an error condition exists and no data beyond + * the 8-byte message header is of interest. + * + * The non-error message is shown below. + * + * Note that all reserved fields must be zeroes. + */ +struct type86_hdr { + unsigned char reserved1; /* 0x00 */ + unsigned char type; /* 0x86 */ + unsigned char format; /* 0x01 (error) or 0x02 (ok) */ + unsigned char reserved2; /* 0x00 */ + unsigned char reply_code; /* reply code (see above) */ + unsigned char reserved3[3]; /* 0x000000 */ +} __packed; + +#define TYPE86_RSP_CODE 0x86 +#define TYPE87_RSP_CODE 0x87 +#define TYPE86_FMT2 0x02 + +struct type86_fmt2_ext { + unsigned char reserved[4]; /* 0x00000000 */ + unsigned char apfs[4]; /* final status */ + unsigned int count1; /* length of CPRB + parameters */ + unsigned int offset1; /* offset to CPRB */ + unsigned int count2; /* 0x00000000 */ + unsigned int offset2; /* db offset 0x00000000 for PKD */ + unsigned int count3; /* 0x00000000 */ + unsigned int offset3; /* 0x00000000 */ + unsigned int count4; /* 0x00000000 */ + unsigned int offset4; /* 0x00000000 */ +} __packed; + +unsigned int get_cprb_fc(struct ica_xcRB *, struct ap_message *, + unsigned int *, unsigned short **); +unsigned int get_ep11cprb_fc(struct ep11_urb *, struct ap_message *, + unsigned int *); +unsigned int get_rng_fc(struct ap_message *, int *, unsigned int *); + +#define LOW 10 +#define MEDIUM 100 +#define HIGH 500 + +int speed_idx_cca(int); +int speed_idx_ep11(int); + +/** + * Prepare a type6 CPRB message for random number generation + * + * @ap_dev: AP device pointer + * @ap_msg: pointer to AP message + */ +static inline void rng_type6CPRB_msgX(struct ap_message *ap_msg, + unsigned int random_number_length, + unsigned int *domain) +{ + struct { + struct type6_hdr hdr; + struct CPRBX cprbx; + char function_code[2]; + short int rule_length; + char rule[8]; + short int verb_length; + short int key_length; + } __packed * msg = ap_msg->message; + static struct type6_hdr static_type6_hdrX = { + .type = 0x06, + .offset1 = 0x00000058, + .agent_id = {'C', 'A'}, + .function_code = {'R', 'L'}, + .ToCardLen1 = sizeof(*msg) - sizeof(msg->hdr), + .FromCardLen1 = sizeof(*msg) - sizeof(msg->hdr), + }; + static struct CPRBX local_cprbx = { + .cprb_len = 0x00dc, + .cprb_ver_id = 0x02, + .func_id = {0x54, 0x32}, + .req_parml = sizeof(*msg) - sizeof(msg->hdr) - + sizeof(msg->cprbx), + .rpl_msgbl = sizeof(*msg) - sizeof(msg->hdr), + }; + + msg->hdr = static_type6_hdrX; + msg->hdr.FromCardLen2 = random_number_length, + msg->cprbx = local_cprbx; + msg->cprbx.rpl_datal = random_number_length, + memcpy(msg->function_code, msg->hdr.function_code, 0x02); + msg->rule_length = 0x0a; + memcpy(msg->rule, "RANDOM ", 8); + msg->verb_length = 0x02; + msg->key_length = 0x02; + ap_msg->length = sizeof(*msg); + *domain = (unsigned short)msg->cprbx.domain; +} + +void zcrypt_msgtype6_init(void); +void zcrypt_msgtype6_exit(void); + +#endif /* _ZCRYPT_MSGTYPE6_H_ */ diff --git a/drivers/s390/crypto/zcrypt_pcixcc.c b/drivers/s390/crypto/zcrypt_pcixcc.c new file mode 100644 index 000000000..baa683c3f --- /dev/null +++ b/drivers/s390/crypto/zcrypt_pcixcc.c @@ -0,0 +1,316 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * zcrypt 2.1.0 + * + * Copyright IBM Corp. 2001, 2012 + * Author(s): Robert Burroughs + * Eric Rossman (edrossma@us.ibm.com) + * + * Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com) + * Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com> + * Ralph Wuerthner <rwuerthn@de.ibm.com> + * MSGTYPE restruct: Holger Dengler <hd@linux.vnet.ibm.com> + */ + +#include <linux/module.h> +#include <linux/init.h> +#include <linux/err.h> +#include <linux/delay.h> +#include <linux/slab.h> +#include <linux/atomic.h> +#include <linux/uaccess.h> +#include <linux/mod_devicetable.h> + +#include "ap_bus.h" +#include "zcrypt_api.h" +#include "zcrypt_error.h" +#include "zcrypt_msgtype6.h" +#include "zcrypt_pcixcc.h" +#include "zcrypt_cca_key.h" + +#define PCIXCC_MIN_MOD_SIZE 16 /* 128 bits */ +#define PCIXCC_MIN_MOD_SIZE_OLD 64 /* 512 bits */ +#define PCIXCC_MAX_MOD_SIZE 256 /* 2048 bits */ +#define CEX3C_MIN_MOD_SIZE PCIXCC_MIN_MOD_SIZE +#define CEX3C_MAX_MOD_SIZE 512 /* 4096 bits */ + +#define PCIXCC_MAX_ICA_MESSAGE_SIZE 0x77c /* max size type6 v2 crt message */ +#define PCIXCC_MAX_ICA_RESPONSE_SIZE 0x77c /* max size type86 v2 reply */ + +#define PCIXCC_MAX_XCRB_MESSAGE_SIZE (12*1024) + +#define PCIXCC_CLEANUP_TIME (15*HZ) + +#define CEIL4(x) ((((x)+3)/4)*4) + +struct response_type { + struct completion work; + int type; +}; +#define PCIXCC_RESPONSE_TYPE_ICA 0 +#define PCIXCC_RESPONSE_TYPE_XCRB 1 + +MODULE_AUTHOR("IBM Corporation"); +MODULE_DESCRIPTION("PCIXCC Cryptographic Coprocessor device driver, " \ + "Copyright IBM Corp. 2001, 2012"); +MODULE_LICENSE("GPL"); + +static struct ap_device_id zcrypt_pcixcc_card_ids[] = { + { .dev_type = AP_DEVICE_TYPE_PCIXCC, + .match_flags = AP_DEVICE_ID_MATCH_CARD_TYPE }, + { .dev_type = AP_DEVICE_TYPE_CEX2C, + .match_flags = AP_DEVICE_ID_MATCH_CARD_TYPE }, + { .dev_type = AP_DEVICE_TYPE_CEX3C, + .match_flags = AP_DEVICE_ID_MATCH_CARD_TYPE }, + { /* end of list */ }, +}; + +MODULE_DEVICE_TABLE(ap, zcrypt_pcixcc_card_ids); + +static struct ap_device_id zcrypt_pcixcc_queue_ids[] = { + { .dev_type = AP_DEVICE_TYPE_PCIXCC, + .match_flags = AP_DEVICE_ID_MATCH_QUEUE_TYPE }, + { .dev_type = AP_DEVICE_TYPE_CEX2C, + .match_flags = AP_DEVICE_ID_MATCH_QUEUE_TYPE }, + { .dev_type = AP_DEVICE_TYPE_CEX3C, + .match_flags = AP_DEVICE_ID_MATCH_QUEUE_TYPE }, + { /* end of list */ }, +}; + +MODULE_DEVICE_TABLE(ap, zcrypt_pcixcc_queue_ids); + +/** + * Large random number detection function. Its sends a message to a pcixcc + * card to find out if large random numbers are supported. + * @ap_dev: pointer to the AP device. + * + * Returns 1 if large random numbers are supported, 0 if not and < 0 on error. + */ +static int zcrypt_pcixcc_rng_supported(struct ap_queue *aq) +{ + struct ap_message ap_msg; + unsigned long long psmid; + unsigned int domain; + struct { + struct type86_hdr hdr; + struct type86_fmt2_ext fmt2; + struct CPRBX cprbx; + } __packed *reply; + struct { + struct type6_hdr hdr; + struct CPRBX cprbx; + char function_code[2]; + short int rule_length; + char rule[8]; + short int verb_length; + short int key_length; + } __packed *msg; + int rc, i; + + ap_init_message(&ap_msg); + ap_msg.message = (void *) get_zeroed_page(GFP_KERNEL); + if (!ap_msg.message) + return -ENOMEM; + + rng_type6CPRB_msgX(&ap_msg, 4, &domain); + + msg = ap_msg.message; + msg->cprbx.domain = AP_QID_QUEUE(aq->qid); + + rc = ap_send(aq->qid, 0x0102030405060708ULL, ap_msg.message, + ap_msg.length); + if (rc) + goto out_free; + + /* Wait for the test message to complete. */ + for (i = 0; i < 2 * HZ; i++) { + msleep(1000 / HZ); + rc = ap_recv(aq->qid, &psmid, ap_msg.message, 4096); + if (rc == 0 && psmid == 0x0102030405060708ULL) + break; + } + + if (i >= 2 * HZ) { + /* Got no answer. */ + rc = -ENODEV; + goto out_free; + } + + reply = ap_msg.message; + if (reply->cprbx.ccp_rtcode == 0 && reply->cprbx.ccp_rscode == 0) + rc = 1; + else + rc = 0; +out_free: + free_page((unsigned long) ap_msg.message); + return rc; +} + +/** + * Probe function for PCIXCC/CEX2C card devices. It always accepts the + * AP device since the bus_match already checked the hardware type. The + * PCIXCC cards come in two flavours: micro code level 2 and micro code + * level 3. This is checked by sending a test message to the device. + * @ap_dev: pointer to the AP card device. + */ +static int zcrypt_pcixcc_card_probe(struct ap_device *ap_dev) +{ + /* + * Normalized speed ratings per crypto adapter + * MEX_1k, MEX_2k, MEX_4k, CRT_1k, CRT_2k, CRT_4k, RNG, SECKEY + */ + static const int CEX2C_SPEED_IDX[] = { + 1000, 1400, 2400, 1100, 1500, 2600, 100, 12}; + static const int CEX3C_SPEED_IDX[] = { + 500, 700, 1400, 550, 800, 1500, 80, 10}; + + struct ap_card *ac = to_ap_card(&ap_dev->device); + struct zcrypt_card *zc; + int rc = 0; + + zc = zcrypt_card_alloc(); + if (!zc) + return -ENOMEM; + zc->card = ac; + ac->private = zc; + switch (ac->ap_dev.device_type) { + case AP_DEVICE_TYPE_CEX2C: + zc->user_space_type = ZCRYPT_CEX2C; + zc->type_string = "CEX2C"; + memcpy(zc->speed_rating, CEX2C_SPEED_IDX, + sizeof(CEX2C_SPEED_IDX)); + zc->min_mod_size = PCIXCC_MIN_MOD_SIZE; + zc->max_mod_size = PCIXCC_MAX_MOD_SIZE; + zc->max_exp_bit_length = PCIXCC_MAX_MOD_SIZE; + break; + case AP_DEVICE_TYPE_CEX3C: + zc->user_space_type = ZCRYPT_CEX3C; + zc->type_string = "CEX3C"; + memcpy(zc->speed_rating, CEX3C_SPEED_IDX, + sizeof(CEX3C_SPEED_IDX)); + zc->min_mod_size = CEX3C_MIN_MOD_SIZE; + zc->max_mod_size = CEX3C_MAX_MOD_SIZE; + zc->max_exp_bit_length = CEX3C_MAX_MOD_SIZE; + break; + default: + zcrypt_card_free(zc); + return -ENODEV; + } + zc->online = 1; + + rc = zcrypt_card_register(zc); + if (rc) { + ac->private = NULL; + zcrypt_card_free(zc); + } + + return rc; +} + +/** + * This is called to remove the PCIXCC/CEX2C card driver information + * if an AP card device is removed. + */ +static void zcrypt_pcixcc_card_remove(struct ap_device *ap_dev) +{ + struct zcrypt_card *zc = to_ap_card(&ap_dev->device)->private; + + if (zc) + zcrypt_card_unregister(zc); +} + +static struct ap_driver zcrypt_pcixcc_card_driver = { + .probe = zcrypt_pcixcc_card_probe, + .remove = zcrypt_pcixcc_card_remove, + .ids = zcrypt_pcixcc_card_ids, + .flags = AP_DRIVER_FLAG_DEFAULT, +}; + +/** + * Probe function for PCIXCC/CEX2C queue devices. It always accepts the + * AP device since the bus_match already checked the hardware type. The + * PCIXCC cards come in two flavours: micro code level 2 and micro code + * level 3. This is checked by sending a test message to the device. + * @ap_dev: pointer to the AP card device. + */ +static int zcrypt_pcixcc_queue_probe(struct ap_device *ap_dev) +{ + struct ap_queue *aq = to_ap_queue(&ap_dev->device); + struct zcrypt_queue *zq; + int rc; + + zq = zcrypt_queue_alloc(PCIXCC_MAX_XCRB_MESSAGE_SIZE); + if (!zq) + return -ENOMEM; + zq->queue = aq; + zq->online = 1; + atomic_set(&zq->load, 0); + rc = zcrypt_pcixcc_rng_supported(aq); + if (rc < 0) { + zcrypt_queue_free(zq); + return rc; + } + if (rc) + zq->ops = zcrypt_msgtype(MSGTYPE06_NAME, + MSGTYPE06_VARIANT_DEFAULT); + else + zq->ops = zcrypt_msgtype(MSGTYPE06_NAME, + MSGTYPE06_VARIANT_NORNG); + ap_queue_init_reply(aq, &zq->reply); + aq->request_timeout = PCIXCC_CLEANUP_TIME, + aq->private = zq; + rc = zcrypt_queue_register(zq); + if (rc) { + aq->private = NULL; + zcrypt_queue_free(zq); + } + return rc; +} + +/** + * This is called to remove the PCIXCC/CEX2C queue driver information + * if an AP queue device is removed. + */ +static void zcrypt_pcixcc_queue_remove(struct ap_device *ap_dev) +{ + struct ap_queue *aq = to_ap_queue(&ap_dev->device); + struct zcrypt_queue *zq = aq->private; + + if (zq) + zcrypt_queue_unregister(zq); +} + +static struct ap_driver zcrypt_pcixcc_queue_driver = { + .probe = zcrypt_pcixcc_queue_probe, + .remove = zcrypt_pcixcc_queue_remove, + .suspend = ap_queue_suspend, + .resume = ap_queue_resume, + .ids = zcrypt_pcixcc_queue_ids, + .flags = AP_DRIVER_FLAG_DEFAULT, +}; + +int __init zcrypt_pcixcc_init(void) +{ + int rc; + + rc = ap_driver_register(&zcrypt_pcixcc_card_driver, + THIS_MODULE, "pcixcccard"); + if (rc) + return rc; + + rc = ap_driver_register(&zcrypt_pcixcc_queue_driver, + THIS_MODULE, "pcixccqueue"); + if (rc) + ap_driver_unregister(&zcrypt_pcixcc_card_driver); + + return rc; +} + +void zcrypt_pcixcc_exit(void) +{ + ap_driver_unregister(&zcrypt_pcixcc_queue_driver); + ap_driver_unregister(&zcrypt_pcixcc_card_driver); +} + +module_init(zcrypt_pcixcc_init); +module_exit(zcrypt_pcixcc_exit); diff --git a/drivers/s390/crypto/zcrypt_pcixcc.h b/drivers/s390/crypto/zcrypt_pcixcc.h new file mode 100644 index 000000000..cf73a0f91 --- /dev/null +++ b/drivers/s390/crypto/zcrypt_pcixcc.h @@ -0,0 +1,20 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * zcrypt 2.1.0 + * + * Copyright IBM Corp. 2001, 2012 + * Author(s): Robert Burroughs + * Eric Rossman (edrossma@us.ibm.com) + * + * Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com) + * Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com> + * MSGTYPE restruct: Holger Dengler <hd@linux.vnet.ibm.com> + */ + +#ifndef _ZCRYPT_PCIXCC_H_ +#define _ZCRYPT_PCIXCC_H_ + +int zcrypt_pcixcc_init(void); +void zcrypt_pcixcc_exit(void); + +#endif /* _ZCRYPT_PCIXCC_H_ */ diff --git a/drivers/s390/crypto/zcrypt_queue.c b/drivers/s390/crypto/zcrypt_queue.c new file mode 100644 index 000000000..8df82c6ef --- /dev/null +++ b/drivers/s390/crypto/zcrypt_queue.c @@ -0,0 +1,228 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * zcrypt 2.1.0 + * + * Copyright IBM Corp. 2001, 2012 + * Author(s): Robert Burroughs + * Eric Rossman (edrossma@us.ibm.com) + * Cornelia Huck <cornelia.huck@de.ibm.com> + * + * Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com) + * Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com> + * Ralph Wuerthner <rwuerthn@de.ibm.com> + * MSGTYPE restruct: Holger Dengler <hd@linux.vnet.ibm.com> + */ + +#include <linux/module.h> +#include <linux/init.h> +#include <linux/interrupt.h> +#include <linux/miscdevice.h> +#include <linux/fs.h> +#include <linux/proc_fs.h> +#include <linux/seq_file.h> +#include <linux/compat.h> +#include <linux/slab.h> +#include <linux/atomic.h> +#include <linux/uaccess.h> +#include <linux/hw_random.h> +#include <linux/debugfs.h> +#include <asm/debug.h> + +#include "zcrypt_debug.h" +#include "zcrypt_api.h" + +#include "zcrypt_msgtype6.h" +#include "zcrypt_msgtype50.h" + +/* + * Device attributes common for all crypto queue devices. + */ + +static ssize_t online_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct zcrypt_queue *zq = to_ap_queue(dev)->private; + + return snprintf(buf, PAGE_SIZE, "%d\n", zq->online); +} + +static ssize_t online_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct zcrypt_queue *zq = to_ap_queue(dev)->private; + struct zcrypt_card *zc = zq->zcard; + int online; + + if (sscanf(buf, "%d\n", &online) != 1 || online < 0 || online > 1) + return -EINVAL; + + if (online && !zc->online) + return -EINVAL; + zq->online = online; + + ZCRYPT_DBF(DBF_INFO, "queue=%02x.%04x online=%d\n", + AP_QID_CARD(zq->queue->qid), + AP_QID_QUEUE(zq->queue->qid), + online); + + if (!online) + ap_flush_queue(zq->queue); + return count; +} + +static DEVICE_ATTR_RW(online); + +static ssize_t load_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct zcrypt_queue *zq = to_ap_queue(dev)->private; + + return snprintf(buf, PAGE_SIZE, "%d\n", atomic_read(&zq->load)); +} + +static DEVICE_ATTR_RO(load); + +static struct attribute *zcrypt_queue_attrs[] = { + &dev_attr_online.attr, + &dev_attr_load.attr, + NULL, +}; + +static const struct attribute_group zcrypt_queue_attr_group = { + .attrs = zcrypt_queue_attrs, +}; + +void zcrypt_queue_force_online(struct zcrypt_queue *zq, int online) +{ + zq->online = online; + if (!online) + ap_flush_queue(zq->queue); +} + +struct zcrypt_queue *zcrypt_queue_alloc(size_t max_response_size) +{ + struct zcrypt_queue *zq; + + zq = kzalloc(sizeof(struct zcrypt_queue), GFP_KERNEL); + if (!zq) + return NULL; + zq->reply.message = kmalloc(max_response_size, GFP_KERNEL); + if (!zq->reply.message) + goto out_free; + zq->reply.length = max_response_size; + INIT_LIST_HEAD(&zq->list); + kref_init(&zq->refcount); + return zq; + +out_free: + kfree(zq); + return NULL; +} +EXPORT_SYMBOL(zcrypt_queue_alloc); + +void zcrypt_queue_free(struct zcrypt_queue *zq) +{ + kfree(zq->reply.message); + kfree(zq); +} +EXPORT_SYMBOL(zcrypt_queue_free); + +static void zcrypt_queue_release(struct kref *kref) +{ + struct zcrypt_queue *zq = + container_of(kref, struct zcrypt_queue, refcount); + zcrypt_queue_free(zq); +} + +void zcrypt_queue_get(struct zcrypt_queue *zq) +{ + kref_get(&zq->refcount); +} +EXPORT_SYMBOL(zcrypt_queue_get); + +int zcrypt_queue_put(struct zcrypt_queue *zq) +{ + return kref_put(&zq->refcount, zcrypt_queue_release); +} +EXPORT_SYMBOL(zcrypt_queue_put); + +/** + * zcrypt_queue_register() - Register a crypto queue device. + * @zq: Pointer to a crypto queue device + * + * Register a crypto queue device. Returns 0 if successful. + */ +int zcrypt_queue_register(struct zcrypt_queue *zq) +{ + struct zcrypt_card *zc; + int rc; + + spin_lock(&zcrypt_list_lock); + zc = zq->queue->card->private; + zcrypt_card_get(zc); + zq->zcard = zc; + zq->online = 1; /* New devices are online by default. */ + + ZCRYPT_DBF(DBF_INFO, "queue=%02x.%04x register online=1\n", + AP_QID_CARD(zq->queue->qid), AP_QID_QUEUE(zq->queue->qid)); + + list_add_tail(&zq->list, &zc->zqueues); + zcrypt_device_count++; + spin_unlock(&zcrypt_list_lock); + + rc = sysfs_create_group(&zq->queue->ap_dev.device.kobj, + &zcrypt_queue_attr_group); + if (rc) + goto out; + get_device(&zq->queue->ap_dev.device); + + if (zq->ops->rng) { + rc = zcrypt_rng_device_add(); + if (rc) + goto out_unregister; + } + return 0; + +out_unregister: + sysfs_remove_group(&zq->queue->ap_dev.device.kobj, + &zcrypt_queue_attr_group); + put_device(&zq->queue->ap_dev.device); +out: + spin_lock(&zcrypt_list_lock); + list_del_init(&zq->list); + spin_unlock(&zcrypt_list_lock); + zcrypt_card_put(zc); + return rc; +} +EXPORT_SYMBOL(zcrypt_queue_register); + +/** + * zcrypt_queue_unregister(): Unregister a crypto queue device. + * @zq: Pointer to crypto queue device + * + * Unregister a crypto queue device. + */ +void zcrypt_queue_unregister(struct zcrypt_queue *zq) +{ + struct zcrypt_card *zc; + + ZCRYPT_DBF(DBF_INFO, "queue=%02x.%04x unregister\n", + AP_QID_CARD(zq->queue->qid), AP_QID_QUEUE(zq->queue->qid)); + + zc = zq->zcard; + spin_lock(&zcrypt_list_lock); + list_del_init(&zq->list); + zcrypt_device_count--; + spin_unlock(&zcrypt_list_lock); + zcrypt_card_put(zc); + if (zq->ops->rng) + zcrypt_rng_device_remove(); + sysfs_remove_group(&zq->queue->ap_dev.device.kobj, + &zcrypt_queue_attr_group); + put_device(&zq->queue->ap_dev.device); + zcrypt_queue_put(zq); +} +EXPORT_SYMBOL(zcrypt_queue_unregister); |