summaryrefslogtreecommitdiffstats
path: root/drivers/s390/cio
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-27 10:05:51 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-27 10:05:51 +0000
commit5d1646d90e1f2cceb9f0828f4b28318cd0ec7744 (patch)
treea94efe259b9009378be6d90eb30d2b019d95c194 /drivers/s390/cio
parentInitial commit. (diff)
downloadlinux-5d1646d90e1f2cceb9f0828f4b28318cd0ec7744.tar.xz
linux-5d1646d90e1f2cceb9f0828f4b28318cd0ec7744.zip
Adding upstream version 5.10.209.upstream/5.10.209upstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'drivers/s390/cio')
-rw-r--r--drivers/s390/cio/Makefile25
-rw-r--r--drivers/s390/cio/airq.c311
-rw-r--r--drivers/s390/cio/blacklist.c424
-rw-r--r--drivers/s390/cio/blacklist.h7
-rw-r--r--drivers/s390/cio/ccwgroup.c603
-rw-r--r--drivers/s390/cio/ccwreq.c368
-rw-r--r--drivers/s390/cio/chp.c834
-rw-r--r--drivers/s390/cio/chp.h74
-rw-r--r--drivers/s390/cio/chsc.c1428
-rw-r--r--drivers/s390/cio/chsc.h222
-rw-r--r--drivers/s390/cio/chsc_sch.c1012
-rw-r--r--drivers/s390/cio/chsc_sch.h14
-rw-r--r--drivers/s390/cio/cio.c758
-rw-r--r--drivers/s390/cio/cio.h153
-rw-r--r--drivers/s390/cio/cio_debug.h29
-rw-r--r--drivers/s390/cio/cmf.c1309
-rw-r--r--drivers/s390/cio/crw.c163
-rw-r--r--drivers/s390/cio/css.c1578
-rw-r--r--drivers/s390/cio/css.h159
-rw-r--r--drivers/s390/cio/device.c2161
-rw-r--r--drivers/s390/cio/device.h148
-rw-r--r--drivers/s390/cio/device_fsm.c1134
-rw-r--r--drivers/s390/cio/device_id.c225
-rw-r--r--drivers/s390/cio/device_ops.c861
-rw-r--r--drivers/s390/cio/device_pgid.c726
-rw-r--r--drivers/s390/cio/device_status.c399
-rw-r--r--drivers/s390/cio/eadm_sch.c411
-rw-r--r--drivers/s390/cio/eadm_sch.h23
-rw-r--r--drivers/s390/cio/fcx.c351
-rw-r--r--drivers/s390/cio/idset.c100
-rw-r--r--drivers/s390/cio/idset.h25
-rw-r--r--drivers/s390/cio/io_sch.h185
-rw-r--r--drivers/s390/cio/ioasm.c277
-rw-r--r--drivers/s390/cio/ioasm.h28
-rw-r--r--drivers/s390/cio/isc.c69
-rw-r--r--drivers/s390/cio/itcw.c370
-rw-r--r--drivers/s390/cio/orb.h92
-rw-r--r--drivers/s390/cio/qdio.h393
-rw-r--r--drivers/s390/cio/qdio_debug.c345
-rw-r--r--drivers/s390/cio/qdio_debug.h73
-rw-r--r--drivers/s390/cio/qdio_main.c1703
-rw-r--r--drivers/s390/cio/qdio_setup.c617
-rw-r--r--drivers/s390/cio/qdio_thinint.c250
-rw-r--r--drivers/s390/cio/scm.c289
-rw-r--r--drivers/s390/cio/trace.c24
-rw-r--r--drivers/s390/cio/trace.h403
-rw-r--r--drivers/s390/cio/vfio_ccw_async.c88
-rw-r--r--drivers/s390/cio/vfio_ccw_chp.c149
-rw-r--r--drivers/s390/cio/vfio_ccw_cp.c875
-rw-r--r--drivers/s390/cio/vfio_ccw_cp.h53
-rw-r--r--drivers/s390/cio/vfio_ccw_drv.c513
-rw-r--r--drivers/s390/cio/vfio_ccw_fsm.c404
-rw-r--r--drivers/s390/cio/vfio_ccw_ops.c628
-rw-r--r--drivers/s390/cio/vfio_ccw_private.h176
-rw-r--r--drivers/s390/cio/vfio_ccw_trace.c15
-rw-r--r--drivers/s390/cio/vfio_ccw_trace.h146
56 files changed, 24200 insertions, 0 deletions
diff --git a/drivers/s390/cio/Makefile b/drivers/s390/cio/Makefile
new file mode 100644
index 000000000..a9235f111
--- /dev/null
+++ b/drivers/s390/cio/Makefile
@@ -0,0 +1,25 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for the S/390 common i/o drivers
+#
+
+# The following is required for define_trace.h to find ./trace.h
+CFLAGS_trace.o := -I$(src)
+CFLAGS_vfio_ccw_trace.o := -I$(src)
+
+obj-y += airq.o blacklist.o chsc.o cio.o css.o chp.o idset.o isc.o \
+ fcx.o itcw.o crw.o ccwreq.o trace.o ioasm.o
+ccw_device-objs += device.o device_fsm.o device_ops.o
+ccw_device-objs += device_id.o device_pgid.o device_status.o
+obj-y += ccw_device.o cmf.o
+obj-$(CONFIG_CHSC_SCH) += chsc_sch.o
+obj-$(CONFIG_EADM_SCH) += eadm_sch.o
+obj-$(CONFIG_SCM_BUS) += scm.o
+obj-$(CONFIG_CCWGROUP) += ccwgroup.o
+
+qdio-objs := qdio_main.o qdio_thinint.o qdio_debug.o qdio_setup.o
+obj-$(CONFIG_QDIO) += qdio.o
+
+vfio_ccw-objs += vfio_ccw_drv.o vfio_ccw_cp.o vfio_ccw_ops.o vfio_ccw_fsm.o \
+ vfio_ccw_async.o vfio_ccw_trace.o vfio_ccw_chp.o
+obj-$(CONFIG_VFIO_CCW) += vfio_ccw.o
diff --git a/drivers/s390/cio/airq.c b/drivers/s390/cio/airq.c
new file mode 100644
index 000000000..cb466ed7e
--- /dev/null
+++ b/drivers/s390/cio/airq.c
@@ -0,0 +1,311 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Support for adapter interruptions
+ *
+ * Copyright IBM Corp. 1999, 2007
+ * Author(s): Ingo Adlung <adlung@de.ibm.com>
+ * Cornelia Huck <cornelia.huck@de.ibm.com>
+ * Arnd Bergmann <arndb@de.ibm.com>
+ * Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
+ */
+
+#include <linux/init.h>
+#include <linux/irq.h>
+#include <linux/kernel_stat.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/rculist.h>
+#include <linux/slab.h>
+#include <linux/dmapool.h>
+
+#include <asm/airq.h>
+#include <asm/isc.h>
+#include <asm/cio.h>
+
+#include "cio.h"
+#include "cio_debug.h"
+#include "ioasm.h"
+
+static DEFINE_SPINLOCK(airq_lists_lock);
+static struct hlist_head airq_lists[MAX_ISC+1];
+
+static struct dma_pool *airq_iv_cache;
+
+/**
+ * register_adapter_interrupt() - register adapter interrupt handler
+ * @airq: pointer to adapter interrupt descriptor
+ *
+ * Returns 0 on success, or -EINVAL.
+ */
+int register_adapter_interrupt(struct airq_struct *airq)
+{
+ char dbf_txt[32];
+
+ if (!airq->handler || airq->isc > MAX_ISC)
+ return -EINVAL;
+ if (!airq->lsi_ptr) {
+ airq->lsi_ptr = kzalloc(1, GFP_KERNEL);
+ if (!airq->lsi_ptr)
+ return -ENOMEM;
+ airq->flags |= AIRQ_PTR_ALLOCATED;
+ }
+ if (!airq->lsi_mask)
+ airq->lsi_mask = 0xff;
+ snprintf(dbf_txt, sizeof(dbf_txt), "rairq:%p", airq);
+ CIO_TRACE_EVENT(4, dbf_txt);
+ isc_register(airq->isc);
+ spin_lock(&airq_lists_lock);
+ hlist_add_head_rcu(&airq->list, &airq_lists[airq->isc]);
+ spin_unlock(&airq_lists_lock);
+ return 0;
+}
+EXPORT_SYMBOL(register_adapter_interrupt);
+
+/**
+ * unregister_adapter_interrupt - unregister adapter interrupt handler
+ * @airq: pointer to adapter interrupt descriptor
+ */
+void unregister_adapter_interrupt(struct airq_struct *airq)
+{
+ char dbf_txt[32];
+
+ if (hlist_unhashed(&airq->list))
+ return;
+ snprintf(dbf_txt, sizeof(dbf_txt), "urairq:%p", airq);
+ CIO_TRACE_EVENT(4, dbf_txt);
+ spin_lock(&airq_lists_lock);
+ hlist_del_rcu(&airq->list);
+ spin_unlock(&airq_lists_lock);
+ synchronize_rcu();
+ isc_unregister(airq->isc);
+ if (airq->flags & AIRQ_PTR_ALLOCATED) {
+ kfree(airq->lsi_ptr);
+ airq->lsi_ptr = NULL;
+ airq->flags &= ~AIRQ_PTR_ALLOCATED;
+ }
+}
+EXPORT_SYMBOL(unregister_adapter_interrupt);
+
+static irqreturn_t do_airq_interrupt(int irq, void *dummy)
+{
+ struct tpi_info *tpi_info;
+ struct airq_struct *airq;
+ struct hlist_head *head;
+
+ set_cpu_flag(CIF_NOHZ_DELAY);
+ tpi_info = (struct tpi_info *) &get_irq_regs()->int_code;
+ trace_s390_cio_adapter_int(tpi_info);
+ head = &airq_lists[tpi_info->isc];
+ rcu_read_lock();
+ hlist_for_each_entry_rcu(airq, head, list)
+ if ((*airq->lsi_ptr & airq->lsi_mask) != 0)
+ airq->handler(airq, !tpi_info->directed_irq);
+ rcu_read_unlock();
+
+ return IRQ_HANDLED;
+}
+
+void __init init_airq_interrupts(void)
+{
+ irq_set_chip_and_handler(THIN_INTERRUPT,
+ &dummy_irq_chip, handle_percpu_irq);
+ if (request_irq(THIN_INTERRUPT, do_airq_interrupt, 0, "AIO", NULL))
+ panic("Failed to register AIO interrupt\n");
+}
+
+static inline unsigned long iv_size(unsigned long bits)
+{
+ return BITS_TO_LONGS(bits) * sizeof(unsigned long);
+}
+
+/**
+ * airq_iv_create - create an interrupt vector
+ * @bits: number of bits in the interrupt vector
+ * @flags: allocation flags
+ *
+ * Returns a pointer to an interrupt vector structure
+ */
+struct airq_iv *airq_iv_create(unsigned long bits, unsigned long flags)
+{
+ struct airq_iv *iv;
+ unsigned long size;
+
+ iv = kzalloc(sizeof(*iv), GFP_KERNEL);
+ if (!iv)
+ goto out;
+ iv->bits = bits;
+ iv->flags = flags;
+ size = iv_size(bits);
+
+ if (flags & AIRQ_IV_CACHELINE) {
+ if ((cache_line_size() * BITS_PER_BYTE) < bits
+ || !airq_iv_cache)
+ goto out_free;
+
+ iv->vector = dma_pool_zalloc(airq_iv_cache, GFP_KERNEL,
+ &iv->vector_dma);
+ if (!iv->vector)
+ goto out_free;
+ } else {
+ iv->vector = cio_dma_zalloc(size);
+ if (!iv->vector)
+ goto out_free;
+ }
+ if (flags & AIRQ_IV_ALLOC) {
+ iv->avail = kmalloc(size, GFP_KERNEL);
+ if (!iv->avail)
+ goto out_free;
+ memset(iv->avail, 0xff, size);
+ iv->end = 0;
+ } else
+ iv->end = bits;
+ if (flags & AIRQ_IV_BITLOCK) {
+ iv->bitlock = kzalloc(size, GFP_KERNEL);
+ if (!iv->bitlock)
+ goto out_free;
+ }
+ if (flags & AIRQ_IV_PTR) {
+ size = bits * sizeof(unsigned long);
+ iv->ptr = kzalloc(size, GFP_KERNEL);
+ if (!iv->ptr)
+ goto out_free;
+ }
+ if (flags & AIRQ_IV_DATA) {
+ size = bits * sizeof(unsigned int);
+ iv->data = kzalloc(size, GFP_KERNEL);
+ if (!iv->data)
+ goto out_free;
+ }
+ spin_lock_init(&iv->lock);
+ return iv;
+
+out_free:
+ kfree(iv->ptr);
+ kfree(iv->bitlock);
+ kfree(iv->avail);
+ if (iv->flags & AIRQ_IV_CACHELINE && iv->vector)
+ dma_pool_free(airq_iv_cache, iv->vector, iv->vector_dma);
+ else
+ cio_dma_free(iv->vector, size);
+ kfree(iv);
+out:
+ return NULL;
+}
+EXPORT_SYMBOL(airq_iv_create);
+
+/**
+ * airq_iv_release - release an interrupt vector
+ * @iv: pointer to interrupt vector structure
+ */
+void airq_iv_release(struct airq_iv *iv)
+{
+ kfree(iv->data);
+ kfree(iv->ptr);
+ kfree(iv->bitlock);
+ if (iv->flags & AIRQ_IV_CACHELINE)
+ dma_pool_free(airq_iv_cache, iv->vector, iv->vector_dma);
+ else
+ cio_dma_free(iv->vector, iv_size(iv->bits));
+ kfree(iv->avail);
+ kfree(iv);
+}
+EXPORT_SYMBOL(airq_iv_release);
+
+/**
+ * airq_iv_alloc - allocate irq bits from an interrupt vector
+ * @iv: pointer to an interrupt vector structure
+ * @num: number of consecutive irq bits to allocate
+ *
+ * Returns the bit number of the first irq in the allocated block of irqs,
+ * or -1UL if no bit is available or the AIRQ_IV_ALLOC flag has not been
+ * specified
+ */
+unsigned long airq_iv_alloc(struct airq_iv *iv, unsigned long num)
+{
+ unsigned long bit, i, flags;
+
+ if (!iv->avail || num == 0)
+ return -1UL;
+ spin_lock_irqsave(&iv->lock, flags);
+ bit = find_first_bit_inv(iv->avail, iv->bits);
+ while (bit + num <= iv->bits) {
+ for (i = 1; i < num; i++)
+ if (!test_bit_inv(bit + i, iv->avail))
+ break;
+ if (i >= num) {
+ /* Found a suitable block of irqs */
+ for (i = 0; i < num; i++)
+ clear_bit_inv(bit + i, iv->avail);
+ if (bit + num >= iv->end)
+ iv->end = bit + num + 1;
+ break;
+ }
+ bit = find_next_bit_inv(iv->avail, iv->bits, bit + i + 1);
+ }
+ if (bit + num > iv->bits)
+ bit = -1UL;
+ spin_unlock_irqrestore(&iv->lock, flags);
+ return bit;
+}
+EXPORT_SYMBOL(airq_iv_alloc);
+
+/**
+ * airq_iv_free - free irq bits of an interrupt vector
+ * @iv: pointer to interrupt vector structure
+ * @bit: number of the first irq bit to free
+ * @num: number of consecutive irq bits to free
+ */
+void airq_iv_free(struct airq_iv *iv, unsigned long bit, unsigned long num)
+{
+ unsigned long i, flags;
+
+ if (!iv->avail || num == 0)
+ return;
+ spin_lock_irqsave(&iv->lock, flags);
+ for (i = 0; i < num; i++) {
+ /* Clear (possibly left over) interrupt bit */
+ clear_bit_inv(bit + i, iv->vector);
+ /* Make the bit positions available again */
+ set_bit_inv(bit + i, iv->avail);
+ }
+ if (bit + num >= iv->end) {
+ /* Find new end of bit-field */
+ while (iv->end > 0 && !test_bit_inv(iv->end - 1, iv->avail))
+ iv->end--;
+ }
+ spin_unlock_irqrestore(&iv->lock, flags);
+}
+EXPORT_SYMBOL(airq_iv_free);
+
+/**
+ * airq_iv_scan - scan interrupt vector for non-zero bits
+ * @iv: pointer to interrupt vector structure
+ * @start: bit number to start the search
+ * @end: bit number to end the search
+ *
+ * Returns the bit number of the next non-zero interrupt bit, or
+ * -1UL if the scan completed without finding any more any non-zero bits.
+ */
+unsigned long airq_iv_scan(struct airq_iv *iv, unsigned long start,
+ unsigned long end)
+{
+ unsigned long bit;
+
+ /* Find non-zero bit starting from 'ivs->next'. */
+ bit = find_next_bit_inv(iv->vector, end, start);
+ if (bit >= end)
+ return -1UL;
+ clear_bit_inv(bit, iv->vector);
+ return bit;
+}
+EXPORT_SYMBOL(airq_iv_scan);
+
+int __init airq_init(void)
+{
+ airq_iv_cache = dma_pool_create("airq_iv_cache", cio_get_dma_css_dev(),
+ cache_line_size(),
+ cache_line_size(), PAGE_SIZE);
+ if (!airq_iv_cache)
+ return -ENOMEM;
+ return 0;
+}
diff --git a/drivers/s390/cio/blacklist.c b/drivers/s390/cio/blacklist.c
new file mode 100644
index 000000000..4dd2eb634
--- /dev/null
+++ b/drivers/s390/cio/blacklist.c
@@ -0,0 +1,424 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * S/390 common I/O routines -- blacklisting of specific devices
+ *
+ * Copyright IBM Corp. 1999, 2013
+ * Author(s): Ingo Adlung (adlung@de.ibm.com)
+ * Cornelia Huck (cornelia.huck@de.ibm.com)
+ * Arnd Bergmann (arndb@de.ibm.com)
+ */
+
+#define KMSG_COMPONENT "cio"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <linux/init.h>
+#include <linux/vmalloc.h>
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
+#include <linux/ctype.h>
+#include <linux/device.h>
+
+#include <linux/uaccess.h>
+#include <asm/cio.h>
+#include <asm/ipl.h>
+
+#include "blacklist.h"
+#include "cio.h"
+#include "cio_debug.h"
+#include "css.h"
+#include "device.h"
+
+/*
+ * "Blacklisting" of certain devices:
+ * Device numbers given in the commandline as cio_ignore=... won't be known
+ * to Linux.
+ *
+ * These can be single devices or ranges of devices
+ */
+
+/* 65536 bits for each set to indicate if a devno is blacklisted or not */
+#define __BL_DEV_WORDS ((__MAX_SUBCHANNEL + (8*sizeof(long) - 1)) / \
+ (8*sizeof(long)))
+static unsigned long bl_dev[__MAX_SSID + 1][__BL_DEV_WORDS];
+typedef enum {add, free} range_action;
+
+/*
+ * Function: blacklist_range
+ * (Un-)blacklist the devices from-to
+ */
+static int blacklist_range(range_action action, unsigned int from_ssid,
+ unsigned int to_ssid, unsigned int from,
+ unsigned int to, int msgtrigger)
+{
+ if ((from_ssid > to_ssid) || ((from_ssid == to_ssid) && (from > to))) {
+ if (msgtrigger)
+ pr_warn("0.%x.%04x to 0.%x.%04x is not a valid range for cio_ignore\n",
+ from_ssid, from, to_ssid, to);
+
+ return 1;
+ }
+
+ while ((from_ssid < to_ssid) || ((from_ssid == to_ssid) &&
+ (from <= to))) {
+ if (action == add)
+ set_bit(from, bl_dev[from_ssid]);
+ else
+ clear_bit(from, bl_dev[from_ssid]);
+ from++;
+ if (from > __MAX_SUBCHANNEL) {
+ from_ssid++;
+ from = 0;
+ }
+ }
+
+ return 0;
+}
+
+static int pure_hex(char **cp, unsigned int *val, int min_digit,
+ int max_digit, int max_val)
+{
+ int diff;
+
+ diff = 0;
+ *val = 0;
+
+ while (diff <= max_digit) {
+ int value = hex_to_bin(**cp);
+
+ if (value < 0)
+ break;
+ *val = *val * 16 + value;
+ (*cp)++;
+ diff++;
+ }
+
+ if ((diff < min_digit) || (diff > max_digit) || (*val > max_val))
+ return 1;
+
+ return 0;
+}
+
+static int parse_busid(char *str, unsigned int *cssid, unsigned int *ssid,
+ unsigned int *devno, int msgtrigger)
+{
+ char *str_work;
+ int val, rc, ret;
+
+ rc = 1;
+
+ if (*str == '\0')
+ goto out;
+
+ /* old style */
+ str_work = str;
+ val = simple_strtoul(str, &str_work, 16);
+
+ if (*str_work == '\0') {
+ if (val <= __MAX_SUBCHANNEL) {
+ *devno = val;
+ *ssid = 0;
+ *cssid = 0;
+ rc = 0;
+ }
+ goto out;
+ }
+
+ /* new style */
+ str_work = str;
+ ret = pure_hex(&str_work, cssid, 1, 2, __MAX_CSSID);
+ if (ret || (str_work[0] != '.'))
+ goto out;
+ str_work++;
+ ret = pure_hex(&str_work, ssid, 1, 1, __MAX_SSID);
+ if (ret || (str_work[0] != '.'))
+ goto out;
+ str_work++;
+ ret = pure_hex(&str_work, devno, 4, 4, __MAX_SUBCHANNEL);
+ if (ret || (str_work[0] != '\0'))
+ goto out;
+
+ rc = 0;
+out:
+ if (rc && msgtrigger)
+ pr_warn("%s is not a valid device for the cio_ignore kernel parameter\n",
+ str);
+
+ return rc;
+}
+
+static int blacklist_parse_parameters(char *str, range_action action,
+ int msgtrigger)
+{
+ unsigned int from_cssid, to_cssid, from_ssid, to_ssid, from, to;
+ int rc, totalrc;
+ char *parm;
+ range_action ra;
+
+ totalrc = 0;
+
+ while ((parm = strsep(&str, ","))) {
+ rc = 0;
+ ra = action;
+ if (*parm == '!') {
+ if (ra == add)
+ ra = free;
+ else
+ ra = add;
+ parm++;
+ }
+ if (strcmp(parm, "all") == 0) {
+ from_cssid = 0;
+ from_ssid = 0;
+ from = 0;
+ to_cssid = __MAX_CSSID;
+ to_ssid = __MAX_SSID;
+ to = __MAX_SUBCHANNEL;
+ } else if (strcmp(parm, "ipldev") == 0) {
+ if (ipl_info.type == IPL_TYPE_CCW) {
+ from_cssid = 0;
+ from_ssid = ipl_info.data.ccw.dev_id.ssid;
+ from = ipl_info.data.ccw.dev_id.devno;
+ } else if (ipl_info.type == IPL_TYPE_FCP ||
+ ipl_info.type == IPL_TYPE_FCP_DUMP) {
+ from_cssid = 0;
+ from_ssid = ipl_info.data.fcp.dev_id.ssid;
+ from = ipl_info.data.fcp.dev_id.devno;
+ } else {
+ continue;
+ }
+ to_cssid = from_cssid;
+ to_ssid = from_ssid;
+ to = from;
+ } else if (strcmp(parm, "condev") == 0) {
+ if (console_devno == -1)
+ continue;
+
+ from_cssid = to_cssid = 0;
+ from_ssid = to_ssid = 0;
+ from = to = console_devno;
+ } else {
+ rc = parse_busid(strsep(&parm, "-"), &from_cssid,
+ &from_ssid, &from, msgtrigger);
+ if (!rc) {
+ if (parm != NULL)
+ rc = parse_busid(parm, &to_cssid,
+ &to_ssid, &to,
+ msgtrigger);
+ else {
+ to_cssid = from_cssid;
+ to_ssid = from_ssid;
+ to = from;
+ }
+ }
+ }
+ if (!rc) {
+ rc = blacklist_range(ra, from_ssid, to_ssid, from, to,
+ msgtrigger);
+ if (rc)
+ totalrc = -EINVAL;
+ } else
+ totalrc = -EINVAL;
+ }
+
+ return totalrc;
+}
+
+static int __init
+blacklist_setup (char *str)
+{
+ CIO_MSG_EVENT(6, "Reading blacklist parameters\n");
+ if (blacklist_parse_parameters(str, add, 1))
+ return 0;
+ return 1;
+}
+
+__setup ("cio_ignore=", blacklist_setup);
+
+/* Checking if devices are blacklisted */
+
+/*
+ * Function: is_blacklisted
+ * Returns 1 if the given devicenumber can be found in the blacklist,
+ * otherwise 0.
+ * Used by validate_subchannel()
+ */
+int
+is_blacklisted (int ssid, int devno)
+{
+ return test_bit (devno, bl_dev[ssid]);
+}
+
+#ifdef CONFIG_PROC_FS
+/*
+ * Function: blacklist_parse_proc_parameters
+ * parse the stuff which is piped to /proc/cio_ignore
+ */
+static int blacklist_parse_proc_parameters(char *buf)
+{
+ int rc;
+ char *parm;
+
+ parm = strsep(&buf, " ");
+
+ if (strcmp("free", parm) == 0) {
+ rc = blacklist_parse_parameters(buf, free, 0);
+ css_schedule_eval_all_unreg(0);
+ } else if (strcmp("add", parm) == 0)
+ rc = blacklist_parse_parameters(buf, add, 0);
+ else if (strcmp("purge", parm) == 0)
+ return ccw_purge_blacklisted();
+ else
+ return -EINVAL;
+
+
+ return rc;
+}
+
+/* Iterator struct for all devices. */
+struct ccwdev_iter {
+ int devno;
+ int ssid;
+ int in_range;
+};
+
+static void *
+cio_ignore_proc_seq_start(struct seq_file *s, loff_t *offset)
+{
+ struct ccwdev_iter *iter = s->private;
+
+ if (*offset >= (__MAX_SUBCHANNEL + 1) * (__MAX_SSID + 1))
+ return NULL;
+ memset(iter, 0, sizeof(*iter));
+ iter->ssid = *offset / (__MAX_SUBCHANNEL + 1);
+ iter->devno = *offset % (__MAX_SUBCHANNEL + 1);
+ return iter;
+}
+
+static void
+cio_ignore_proc_seq_stop(struct seq_file *s, void *it)
+{
+}
+
+static void *
+cio_ignore_proc_seq_next(struct seq_file *s, void *it, loff_t *offset)
+{
+ struct ccwdev_iter *iter;
+ loff_t p = *offset;
+
+ (*offset)++;
+ if (p >= (__MAX_SUBCHANNEL + 1) * (__MAX_SSID + 1))
+ return NULL;
+ iter = it;
+ if (iter->devno == __MAX_SUBCHANNEL) {
+ iter->devno = 0;
+ iter->ssid++;
+ if (iter->ssid > __MAX_SSID)
+ return NULL;
+ } else
+ iter->devno++;
+ return iter;
+}
+
+static int
+cio_ignore_proc_seq_show(struct seq_file *s, void *it)
+{
+ struct ccwdev_iter *iter;
+
+ iter = it;
+ if (!is_blacklisted(iter->ssid, iter->devno))
+ /* Not blacklisted, nothing to output. */
+ return 0;
+ if (!iter->in_range) {
+ /* First device in range. */
+ if ((iter->devno == __MAX_SUBCHANNEL) ||
+ !is_blacklisted(iter->ssid, iter->devno + 1)) {
+ /* Singular device. */
+ seq_printf(s, "0.%x.%04x\n", iter->ssid, iter->devno);
+ return 0;
+ }
+ iter->in_range = 1;
+ seq_printf(s, "0.%x.%04x-", iter->ssid, iter->devno);
+ return 0;
+ }
+ if ((iter->devno == __MAX_SUBCHANNEL) ||
+ !is_blacklisted(iter->ssid, iter->devno + 1)) {
+ /* Last device in range. */
+ iter->in_range = 0;
+ seq_printf(s, "0.%x.%04x\n", iter->ssid, iter->devno);
+ }
+ return 0;
+}
+
+static ssize_t
+cio_ignore_write(struct file *file, const char __user *user_buf,
+ size_t user_len, loff_t *offset)
+{
+ char *buf;
+ ssize_t rc, ret, i;
+
+ if (*offset)
+ return -EINVAL;
+ if (user_len > 65536)
+ user_len = 65536;
+ buf = vzalloc(user_len + 1); /* maybe better use the stack? */
+ if (buf == NULL)
+ return -ENOMEM;
+
+ if (strncpy_from_user (buf, user_buf, user_len) < 0) {
+ rc = -EFAULT;
+ goto out_free;
+ }
+
+ i = user_len - 1;
+ while ((i >= 0) && (isspace(buf[i]) || (buf[i] == 0))) {
+ buf[i] = '\0';
+ i--;
+ }
+ ret = blacklist_parse_proc_parameters(buf);
+ if (ret)
+ rc = ret;
+ else
+ rc = user_len;
+
+out_free:
+ vfree (buf);
+ return rc;
+}
+
+static const struct seq_operations cio_ignore_proc_seq_ops = {
+ .start = cio_ignore_proc_seq_start,
+ .stop = cio_ignore_proc_seq_stop,
+ .next = cio_ignore_proc_seq_next,
+ .show = cio_ignore_proc_seq_show,
+};
+
+static int
+cio_ignore_proc_open(struct inode *inode, struct file *file)
+{
+ return seq_open_private(file, &cio_ignore_proc_seq_ops,
+ sizeof(struct ccwdev_iter));
+}
+
+static const struct proc_ops cio_ignore_proc_ops = {
+ .proc_open = cio_ignore_proc_open,
+ .proc_read = seq_read,
+ .proc_lseek = seq_lseek,
+ .proc_release = seq_release_private,
+ .proc_write = cio_ignore_write,
+};
+
+static int
+cio_ignore_proc_init (void)
+{
+ struct proc_dir_entry *entry;
+
+ entry = proc_create("cio_ignore", S_IFREG | S_IRUGO | S_IWUSR, NULL,
+ &cio_ignore_proc_ops);
+ if (!entry)
+ return -ENOENT;
+ return 0;
+}
+
+__initcall (cio_ignore_proc_init);
+
+#endif /* CONFIG_PROC_FS */
diff --git a/drivers/s390/cio/blacklist.h b/drivers/s390/cio/blacklist.h
new file mode 100644
index 000000000..140e3e4ee
--- /dev/null
+++ b/drivers/s390/cio/blacklist.h
@@ -0,0 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef S390_BLACKLIST_H
+#define S390_BLACKLIST_H
+
+extern int is_blacklisted (int ssid, int devno);
+
+#endif
diff --git a/drivers/s390/cio/ccwgroup.c b/drivers/s390/cio/ccwgroup.c
new file mode 100644
index 000000000..483a9ecfc
--- /dev/null
+++ b/drivers/s390/cio/ccwgroup.c
@@ -0,0 +1,603 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * bus driver for ccwgroup
+ *
+ * Copyright IBM Corp. 2002, 2012
+ *
+ * Author(s): Arnd Bergmann (arndb@de.ibm.com)
+ * Cornelia Huck (cornelia.huck@de.ibm.com)
+ */
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/list.h>
+#include <linux/device.h>
+#include <linux/init.h>
+#include <linux/ctype.h>
+#include <linux/dcache.h>
+
+#include <asm/cio.h>
+#include <asm/ccwdev.h>
+#include <asm/ccwgroup.h>
+
+#include "device.h"
+
+#define CCW_BUS_ID_SIZE 10
+
+/* In Linux 2.4, we had a channel device layer called "chandev"
+ * that did all sorts of obscure stuff for networking devices.
+ * This is another driver that serves as a replacement for just
+ * one of its functions, namely the translation of single subchannels
+ * to devices that use multiple subchannels.
+ */
+
+static struct bus_type ccwgroup_bus_type;
+
+static void __ccwgroup_remove_symlinks(struct ccwgroup_device *gdev)
+{
+ int i;
+ char str[16];
+
+ for (i = 0; i < gdev->count; i++) {
+ sprintf(str, "cdev%d", i);
+ sysfs_remove_link(&gdev->dev.kobj, str);
+ sysfs_remove_link(&gdev->cdev[i]->dev.kobj, "group_device");
+ }
+}
+
+/*
+ * Remove references from ccw devices to ccw group device and from
+ * ccw group device to ccw devices.
+ */
+static void __ccwgroup_remove_cdev_refs(struct ccwgroup_device *gdev)
+{
+ struct ccw_device *cdev;
+ int i;
+
+ for (i = 0; i < gdev->count; i++) {
+ cdev = gdev->cdev[i];
+ if (!cdev)
+ continue;
+ spin_lock_irq(cdev->ccwlock);
+ dev_set_drvdata(&cdev->dev, NULL);
+ spin_unlock_irq(cdev->ccwlock);
+ gdev->cdev[i] = NULL;
+ put_device(&cdev->dev);
+ }
+}
+
+/**
+ * ccwgroup_set_online() - enable a ccwgroup device
+ * @gdev: target ccwgroup device
+ *
+ * This function attempts to put the ccwgroup device into the online state.
+ * Returns:
+ * %0 on success and a negative error value on failure.
+ */
+int ccwgroup_set_online(struct ccwgroup_device *gdev)
+{
+ struct ccwgroup_driver *gdrv = to_ccwgroupdrv(gdev->dev.driver);
+ int ret = -EINVAL;
+
+ if (atomic_cmpxchg(&gdev->onoff, 0, 1) != 0)
+ return -EAGAIN;
+ if (gdev->state == CCWGROUP_ONLINE)
+ goto out;
+ if (gdrv->set_online)
+ ret = gdrv->set_online(gdev);
+ if (ret)
+ goto out;
+
+ gdev->state = CCWGROUP_ONLINE;
+out:
+ atomic_set(&gdev->onoff, 0);
+ return ret;
+}
+EXPORT_SYMBOL(ccwgroup_set_online);
+
+/**
+ * ccwgroup_set_offline() - disable a ccwgroup device
+ * @gdev: target ccwgroup device
+ *
+ * This function attempts to put the ccwgroup device into the offline state.
+ * Returns:
+ * %0 on success and a negative error value on failure.
+ */
+int ccwgroup_set_offline(struct ccwgroup_device *gdev)
+{
+ struct ccwgroup_driver *gdrv = to_ccwgroupdrv(gdev->dev.driver);
+ int ret = -EINVAL;
+
+ if (atomic_cmpxchg(&gdev->onoff, 0, 1) != 0)
+ return -EAGAIN;
+ if (gdev->state == CCWGROUP_OFFLINE)
+ goto out;
+ if (gdrv->set_offline)
+ ret = gdrv->set_offline(gdev);
+ if (ret)
+ goto out;
+
+ gdev->state = CCWGROUP_OFFLINE;
+out:
+ atomic_set(&gdev->onoff, 0);
+ return ret;
+}
+EXPORT_SYMBOL(ccwgroup_set_offline);
+
+static ssize_t ccwgroup_online_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct ccwgroup_device *gdev = to_ccwgroupdev(dev);
+ unsigned long value;
+ int ret;
+
+ device_lock(dev);
+ if (!dev->driver) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ret = kstrtoul(buf, 0, &value);
+ if (ret)
+ goto out;
+
+ if (value == 1)
+ ret = ccwgroup_set_online(gdev);
+ else if (value == 0)
+ ret = ccwgroup_set_offline(gdev);
+ else
+ ret = -EINVAL;
+out:
+ device_unlock(dev);
+ return (ret == 0) ? count : ret;
+}
+
+static ssize_t ccwgroup_online_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct ccwgroup_device *gdev = to_ccwgroupdev(dev);
+ int online;
+
+ online = (gdev->state == CCWGROUP_ONLINE) ? 1 : 0;
+
+ return scnprintf(buf, PAGE_SIZE, "%d\n", online);
+}
+
+/*
+ * Provide an 'ungroup' attribute so the user can remove group devices no
+ * longer needed or accidentially created. Saves memory :)
+ */
+static void ccwgroup_ungroup(struct ccwgroup_device *gdev)
+{
+ mutex_lock(&gdev->reg_mutex);
+ if (device_is_registered(&gdev->dev)) {
+ __ccwgroup_remove_symlinks(gdev);
+ device_unregister(&gdev->dev);
+ __ccwgroup_remove_cdev_refs(gdev);
+ }
+ mutex_unlock(&gdev->reg_mutex);
+}
+
+static ssize_t ccwgroup_ungroup_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct ccwgroup_device *gdev = to_ccwgroupdev(dev);
+ int rc = 0;
+
+ /* Prevent concurrent online/offline processing and ungrouping. */
+ if (atomic_cmpxchg(&gdev->onoff, 0, 1) != 0)
+ return -EAGAIN;
+ if (gdev->state != CCWGROUP_OFFLINE) {
+ rc = -EINVAL;
+ goto out;
+ }
+
+ if (device_remove_file_self(dev, attr))
+ ccwgroup_ungroup(gdev);
+ else
+ rc = -ENODEV;
+out:
+ if (rc) {
+ /* Release onoff "lock" when ungrouping failed. */
+ atomic_set(&gdev->onoff, 0);
+ return rc;
+ }
+ return count;
+}
+static DEVICE_ATTR(ungroup, 0200, NULL, ccwgroup_ungroup_store);
+static DEVICE_ATTR(online, 0644, ccwgroup_online_show, ccwgroup_online_store);
+
+static struct attribute *ccwgroup_attrs[] = {
+ &dev_attr_online.attr,
+ &dev_attr_ungroup.attr,
+ NULL,
+};
+static struct attribute_group ccwgroup_attr_group = {
+ .attrs = ccwgroup_attrs,
+};
+static const struct attribute_group *ccwgroup_attr_groups[] = {
+ &ccwgroup_attr_group,
+ NULL,
+};
+
+static void ccwgroup_ungroup_workfn(struct work_struct *work)
+{
+ struct ccwgroup_device *gdev =
+ container_of(work, struct ccwgroup_device, ungroup_work);
+
+ ccwgroup_ungroup(gdev);
+ put_device(&gdev->dev);
+}
+
+static void ccwgroup_release(struct device *dev)
+{
+ kfree(to_ccwgroupdev(dev));
+}
+
+static int __ccwgroup_create_symlinks(struct ccwgroup_device *gdev)
+{
+ char str[16];
+ int i, rc;
+
+ for (i = 0; i < gdev->count; i++) {
+ rc = sysfs_create_link(&gdev->cdev[i]->dev.kobj,
+ &gdev->dev.kobj, "group_device");
+ if (rc) {
+ for (--i; i >= 0; i--)
+ sysfs_remove_link(&gdev->cdev[i]->dev.kobj,
+ "group_device");
+ return rc;
+ }
+ }
+ for (i = 0; i < gdev->count; i++) {
+ sprintf(str, "cdev%d", i);
+ rc = sysfs_create_link(&gdev->dev.kobj,
+ &gdev->cdev[i]->dev.kobj, str);
+ if (rc) {
+ for (--i; i >= 0; i--) {
+ sprintf(str, "cdev%d", i);
+ sysfs_remove_link(&gdev->dev.kobj, str);
+ }
+ for (i = 0; i < gdev->count; i++)
+ sysfs_remove_link(&gdev->cdev[i]->dev.kobj,
+ "group_device");
+ return rc;
+ }
+ }
+ return 0;
+}
+
+static int __get_next_id(const char **buf, struct ccw_dev_id *id)
+{
+ unsigned int cssid, ssid, devno;
+ int ret = 0, len;
+ char *start, *end;
+
+ start = (char *)*buf;
+ end = strchr(start, ',');
+ if (!end) {
+ /* Last entry. Strip trailing newline, if applicable. */
+ end = strchr(start, '\n');
+ if (end)
+ *end = '\0';
+ len = strlen(start) + 1;
+ } else {
+ len = end - start + 1;
+ end++;
+ }
+ if (len <= CCW_BUS_ID_SIZE) {
+ if (sscanf(start, "%2x.%1x.%04x", &cssid, &ssid, &devno) != 3)
+ ret = -EINVAL;
+ } else
+ ret = -EINVAL;
+
+ if (!ret) {
+ id->ssid = ssid;
+ id->devno = devno;
+ }
+ *buf = end;
+ return ret;
+}
+
+/**
+ * ccwgroup_create_dev() - create and register a ccw group device
+ * @parent: parent device for the new device
+ * @gdrv: driver for the new group device
+ * @num_devices: number of slave devices
+ * @buf: buffer containing comma separated bus ids of slave devices
+ *
+ * Create and register a new ccw group device as a child of @parent. Slave
+ * devices are obtained from the list of bus ids given in @buf.
+ * Returns:
+ * %0 on success and an error code on failure.
+ * Context:
+ * non-atomic
+ */
+int ccwgroup_create_dev(struct device *parent, struct ccwgroup_driver *gdrv,
+ int num_devices, const char *buf)
+{
+ struct ccwgroup_device *gdev;
+ struct ccw_dev_id dev_id;
+ int rc, i;
+
+ if (num_devices < 1)
+ return -EINVAL;
+
+ gdev = kzalloc(struct_size(gdev, cdev, num_devices), GFP_KERNEL);
+ if (!gdev)
+ return -ENOMEM;
+
+ atomic_set(&gdev->onoff, 0);
+ mutex_init(&gdev->reg_mutex);
+ mutex_lock(&gdev->reg_mutex);
+ INIT_WORK(&gdev->ungroup_work, ccwgroup_ungroup_workfn);
+ gdev->count = num_devices;
+ gdev->dev.bus = &ccwgroup_bus_type;
+ gdev->dev.parent = parent;
+ gdev->dev.release = ccwgroup_release;
+ device_initialize(&gdev->dev);
+
+ for (i = 0; i < num_devices && buf; i++) {
+ rc = __get_next_id(&buf, &dev_id);
+ if (rc != 0)
+ goto error;
+ gdev->cdev[i] = get_ccwdev_by_dev_id(&dev_id);
+ /*
+ * All devices have to be of the same type in
+ * order to be grouped.
+ */
+ if (!gdev->cdev[i] || !gdev->cdev[i]->drv ||
+ gdev->cdev[i]->drv != gdev->cdev[0]->drv ||
+ gdev->cdev[i]->id.driver_info !=
+ gdev->cdev[0]->id.driver_info) {
+ rc = -EINVAL;
+ goto error;
+ }
+ /* Don't allow a device to belong to more than one group. */
+ spin_lock_irq(gdev->cdev[i]->ccwlock);
+ if (dev_get_drvdata(&gdev->cdev[i]->dev)) {
+ spin_unlock_irq(gdev->cdev[i]->ccwlock);
+ rc = -EINVAL;
+ goto error;
+ }
+ dev_set_drvdata(&gdev->cdev[i]->dev, gdev);
+ spin_unlock_irq(gdev->cdev[i]->ccwlock);
+ }
+ /* Check for sufficient number of bus ids. */
+ if (i < num_devices) {
+ rc = -EINVAL;
+ goto error;
+ }
+ /* Check for trailing stuff. */
+ if (i == num_devices && buf && strlen(buf) > 0) {
+ rc = -EINVAL;
+ goto error;
+ }
+ /* Check if the devices are bound to the required ccw driver. */
+ if (gdrv && gdrv->ccw_driver &&
+ gdev->cdev[0]->drv != gdrv->ccw_driver) {
+ rc = -EINVAL;
+ goto error;
+ }
+
+ dev_set_name(&gdev->dev, "%s", dev_name(&gdev->cdev[0]->dev));
+ gdev->dev.groups = ccwgroup_attr_groups;
+
+ if (gdrv) {
+ gdev->dev.driver = &gdrv->driver;
+ rc = gdrv->setup ? gdrv->setup(gdev) : 0;
+ if (rc)
+ goto error;
+ }
+ rc = device_add(&gdev->dev);
+ if (rc)
+ goto error;
+ rc = __ccwgroup_create_symlinks(gdev);
+ if (rc) {
+ device_del(&gdev->dev);
+ goto error;
+ }
+ mutex_unlock(&gdev->reg_mutex);
+ return 0;
+error:
+ for (i = 0; i < num_devices; i++)
+ if (gdev->cdev[i]) {
+ spin_lock_irq(gdev->cdev[i]->ccwlock);
+ if (dev_get_drvdata(&gdev->cdev[i]->dev) == gdev)
+ dev_set_drvdata(&gdev->cdev[i]->dev, NULL);
+ spin_unlock_irq(gdev->cdev[i]->ccwlock);
+ put_device(&gdev->cdev[i]->dev);
+ gdev->cdev[i] = NULL;
+ }
+ mutex_unlock(&gdev->reg_mutex);
+ put_device(&gdev->dev);
+ return rc;
+}
+EXPORT_SYMBOL(ccwgroup_create_dev);
+
+static int ccwgroup_notifier(struct notifier_block *nb, unsigned long action,
+ void *data)
+{
+ struct ccwgroup_device *gdev = to_ccwgroupdev(data);
+
+ if (action == BUS_NOTIFY_UNBIND_DRIVER) {
+ get_device(&gdev->dev);
+ schedule_work(&gdev->ungroup_work);
+ }
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block ccwgroup_nb = {
+ .notifier_call = ccwgroup_notifier
+};
+
+static int __init init_ccwgroup(void)
+{
+ int ret;
+
+ ret = bus_register(&ccwgroup_bus_type);
+ if (ret)
+ return ret;
+
+ ret = bus_register_notifier(&ccwgroup_bus_type, &ccwgroup_nb);
+ if (ret)
+ bus_unregister(&ccwgroup_bus_type);
+
+ return ret;
+}
+
+static void __exit cleanup_ccwgroup(void)
+{
+ bus_unregister_notifier(&ccwgroup_bus_type, &ccwgroup_nb);
+ bus_unregister(&ccwgroup_bus_type);
+}
+
+module_init(init_ccwgroup);
+module_exit(cleanup_ccwgroup);
+
+/************************** driver stuff ******************************/
+
+static int ccwgroup_remove(struct device *dev)
+{
+ struct ccwgroup_device *gdev = to_ccwgroupdev(dev);
+ struct ccwgroup_driver *gdrv = to_ccwgroupdrv(dev->driver);
+
+ if (!dev->driver)
+ return 0;
+ if (gdrv->remove)
+ gdrv->remove(gdev);
+
+ return 0;
+}
+
+static void ccwgroup_shutdown(struct device *dev)
+{
+ struct ccwgroup_device *gdev = to_ccwgroupdev(dev);
+ struct ccwgroup_driver *gdrv = to_ccwgroupdrv(dev->driver);
+
+ if (!dev->driver)
+ return;
+ if (gdrv->shutdown)
+ gdrv->shutdown(gdev);
+}
+
+static struct bus_type ccwgroup_bus_type = {
+ .name = "ccwgroup",
+ .remove = ccwgroup_remove,
+ .shutdown = ccwgroup_shutdown,
+};
+
+bool dev_is_ccwgroup(struct device *dev)
+{
+ return dev->bus == &ccwgroup_bus_type;
+}
+EXPORT_SYMBOL(dev_is_ccwgroup);
+
+/**
+ * ccwgroup_driver_register() - register a ccw group driver
+ * @cdriver: driver to be registered
+ *
+ * This function is mainly a wrapper around driver_register().
+ */
+int ccwgroup_driver_register(struct ccwgroup_driver *cdriver)
+{
+ /* register our new driver with the core */
+ cdriver->driver.bus = &ccwgroup_bus_type;
+
+ return driver_register(&cdriver->driver);
+}
+EXPORT_SYMBOL(ccwgroup_driver_register);
+
+/**
+ * ccwgroup_driver_unregister() - deregister a ccw group driver
+ * @cdriver: driver to be deregistered
+ *
+ * This function is mainly a wrapper around driver_unregister().
+ */
+void ccwgroup_driver_unregister(struct ccwgroup_driver *cdriver)
+{
+ struct device *dev;
+
+ /* We don't want ccwgroup devices to live longer than their driver. */
+ while ((dev = driver_find_next_device(&cdriver->driver, NULL))) {
+ struct ccwgroup_device *gdev = to_ccwgroupdev(dev);
+
+ ccwgroup_ungroup(gdev);
+ put_device(dev);
+ }
+ driver_unregister(&cdriver->driver);
+}
+EXPORT_SYMBOL(ccwgroup_driver_unregister);
+
+/**
+ * get_ccwgroupdev_by_busid() - obtain device from a bus id
+ * @gdrv: driver the device is owned by
+ * @bus_id: bus id of the device to be searched
+ *
+ * This function searches all devices owned by @gdrv for a device with a bus
+ * id matching @bus_id.
+ * Returns:
+ * If a match is found, its reference count of the found device is increased
+ * and it is returned; else %NULL is returned.
+ */
+struct ccwgroup_device *get_ccwgroupdev_by_busid(struct ccwgroup_driver *gdrv,
+ char *bus_id)
+{
+ struct device *dev;
+
+ dev = driver_find_device_by_name(&gdrv->driver, bus_id);
+
+ return dev ? to_ccwgroupdev(dev) : NULL;
+}
+EXPORT_SYMBOL_GPL(get_ccwgroupdev_by_busid);
+
+/**
+ * ccwgroup_probe_ccwdev() - probe function for slave devices
+ * @cdev: ccw device to be probed
+ *
+ * This is a dummy probe function for ccw devices that are slave devices in
+ * a ccw group device.
+ * Returns:
+ * always %0
+ */
+int ccwgroup_probe_ccwdev(struct ccw_device *cdev)
+{
+ return 0;
+}
+EXPORT_SYMBOL(ccwgroup_probe_ccwdev);
+
+/**
+ * ccwgroup_remove_ccwdev() - remove function for slave devices
+ * @cdev: ccw device to be removed
+ *
+ * This is a remove function for ccw devices that are slave devices in a ccw
+ * group device. It sets the ccw device offline and also deregisters the
+ * embedding ccw group device.
+ */
+void ccwgroup_remove_ccwdev(struct ccw_device *cdev)
+{
+ struct ccwgroup_device *gdev;
+
+ /* Ignore offlining errors, device is gone anyway. */
+ ccw_device_set_offline(cdev);
+ /* If one of its devices is gone, the whole group is done for. */
+ spin_lock_irq(cdev->ccwlock);
+ gdev = dev_get_drvdata(&cdev->dev);
+ if (!gdev) {
+ spin_unlock_irq(cdev->ccwlock);
+ return;
+ }
+ /* Get ccwgroup device reference for local processing. */
+ get_device(&gdev->dev);
+ spin_unlock_irq(cdev->ccwlock);
+ /* Unregister group device. */
+ ccwgroup_ungroup(gdev);
+ /* Release ccwgroup device reference for local processing. */
+ put_device(&gdev->dev);
+}
+EXPORT_SYMBOL(ccwgroup_remove_ccwdev);
+MODULE_LICENSE("GPL");
diff --git a/drivers/s390/cio/ccwreq.c b/drivers/s390/cio/ccwreq.c
new file mode 100644
index 000000000..73582a0a2
--- /dev/null
+++ b/drivers/s390/cio/ccwreq.c
@@ -0,0 +1,368 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Handling of internal CCW device requests.
+ *
+ * Copyright IBM Corp. 2009, 2011
+ * Author(s): Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
+ */
+
+#define KMSG_COMPONENT "cio"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <linux/types.h>
+#include <linux/err.h>
+#include <asm/ccwdev.h>
+#include <asm/cio.h>
+
+#include "io_sch.h"
+#include "cio.h"
+#include "device.h"
+#include "cio_debug.h"
+
+/**
+ * lpm_adjust - adjust path mask
+ * @lpm: path mask to adjust
+ * @mask: mask of available paths
+ *
+ * Shift @lpm right until @lpm and @mask have at least one bit in common or
+ * until @lpm is zero. Return the resulting lpm.
+ */
+int lpm_adjust(int lpm, int mask)
+{
+ while (lpm && ((lpm & mask) == 0))
+ lpm >>= 1;
+ return lpm;
+}
+
+/*
+ * Adjust path mask to use next path and reset retry count. Return resulting
+ * path mask.
+ */
+static u16 ccwreq_next_path(struct ccw_device *cdev)
+{
+ struct ccw_request *req = &cdev->private->req;
+
+ if (!req->singlepath) {
+ req->mask = 0;
+ goto out;
+ }
+ req->retries = req->maxretries;
+ req->mask = lpm_adjust(req->mask >> 1, req->lpm);
+out:
+ return req->mask;
+}
+
+/*
+ * Clean up device state and report to callback.
+ */
+static void ccwreq_stop(struct ccw_device *cdev, int rc)
+{
+ struct ccw_request *req = &cdev->private->req;
+
+ if (req->done)
+ return;
+ req->done = 1;
+ ccw_device_set_timeout(cdev, 0);
+ memset(&cdev->private->dma_area->irb, 0, sizeof(struct irb));
+ if (rc && rc != -ENODEV && req->drc)
+ rc = req->drc;
+ req->callback(cdev, req->data, rc);
+}
+
+/*
+ * (Re-)Start the operation until retries and paths are exhausted.
+ */
+static void ccwreq_do(struct ccw_device *cdev)
+{
+ struct ccw_request *req = &cdev->private->req;
+ struct subchannel *sch = to_subchannel(cdev->dev.parent);
+ struct ccw1 *cp = req->cp;
+ int rc = -EACCES;
+
+ while (req->mask) {
+ if (req->retries-- == 0) {
+ /* Retries exhausted, try next path. */
+ ccwreq_next_path(cdev);
+ continue;
+ }
+ /* Perform start function. */
+ memset(&cdev->private->dma_area->irb, 0, sizeof(struct irb));
+ rc = cio_start(sch, cp, (u8) req->mask);
+ if (rc == 0) {
+ /* I/O started successfully. */
+ ccw_device_set_timeout(cdev, req->timeout);
+ return;
+ }
+ if (rc == -ENODEV) {
+ /* Permanent device error. */
+ break;
+ }
+ if (rc == -EACCES) {
+ /* Permant path error. */
+ ccwreq_next_path(cdev);
+ continue;
+ }
+ /* Temporary improper status. */
+ rc = cio_clear(sch);
+ if (rc)
+ break;
+ return;
+ }
+ ccwreq_stop(cdev, rc);
+}
+
+/**
+ * ccw_request_start - perform I/O request
+ * @cdev: ccw device
+ *
+ * Perform the I/O request specified by cdev->req.
+ */
+void ccw_request_start(struct ccw_device *cdev)
+{
+ struct ccw_request *req = &cdev->private->req;
+
+ if (req->singlepath) {
+ /* Try all paths twice to counter link flapping. */
+ req->mask = 0x8080;
+ } else
+ req->mask = req->lpm;
+
+ req->retries = req->maxretries;
+ req->mask = lpm_adjust(req->mask, req->lpm);
+ req->drc = 0;
+ req->done = 0;
+ req->cancel = 0;
+ if (!req->mask)
+ goto out_nopath;
+ ccwreq_do(cdev);
+ return;
+
+out_nopath:
+ ccwreq_stop(cdev, -EACCES);
+}
+
+/**
+ * ccw_request_cancel - cancel running I/O request
+ * @cdev: ccw device
+ *
+ * Cancel the I/O request specified by cdev->req. Return non-zero if request
+ * has already finished, zero otherwise.
+ */
+int ccw_request_cancel(struct ccw_device *cdev)
+{
+ struct subchannel *sch = to_subchannel(cdev->dev.parent);
+ struct ccw_request *req = &cdev->private->req;
+ int rc;
+
+ if (req->done)
+ return 1;
+ req->cancel = 1;
+ rc = cio_clear(sch);
+ if (rc)
+ ccwreq_stop(cdev, rc);
+ return 0;
+}
+
+/*
+ * Return the status of the internal I/O started on the specified ccw device.
+ * Perform BASIC SENSE if required.
+ */
+static enum io_status ccwreq_status(struct ccw_device *cdev, struct irb *lcirb)
+{
+ struct irb *irb = &cdev->private->dma_area->irb;
+ struct cmd_scsw *scsw = &irb->scsw.cmd;
+ enum uc_todo todo;
+
+ /* Perform BASIC SENSE if needed. */
+ if (ccw_device_accumulate_and_sense(cdev, lcirb))
+ return IO_RUNNING;
+ /* Check for halt/clear interrupt. */
+ if (scsw->fctl & (SCSW_FCTL_HALT_FUNC | SCSW_FCTL_CLEAR_FUNC))
+ return IO_KILLED;
+ /* Check for path error. */
+ if (scsw->cc == 3 || scsw->pno)
+ return IO_PATH_ERROR;
+ /* Handle BASIC SENSE data. */
+ if (irb->esw.esw0.erw.cons) {
+ CIO_TRACE_EVENT(2, "sensedata");
+ CIO_HEX_EVENT(2, &cdev->private->dev_id,
+ sizeof(struct ccw_dev_id));
+ CIO_HEX_EVENT(2, &cdev->private->dma_area->irb.ecw,
+ SENSE_MAX_COUNT);
+ /* Check for command reject. */
+ if (irb->ecw[0] & SNS0_CMD_REJECT)
+ return IO_REJECTED;
+ /* Ask the driver what to do */
+ if (cdev->drv && cdev->drv->uc_handler) {
+ todo = cdev->drv->uc_handler(cdev, lcirb);
+ CIO_TRACE_EVENT(2, "uc_response");
+ CIO_HEX_EVENT(2, &todo, sizeof(todo));
+ switch (todo) {
+ case UC_TODO_RETRY:
+ return IO_STATUS_ERROR;
+ case UC_TODO_RETRY_ON_NEW_PATH:
+ return IO_PATH_ERROR;
+ case UC_TODO_STOP:
+ return IO_REJECTED;
+ default:
+ return IO_STATUS_ERROR;
+ }
+ }
+ /* Assume that unexpected SENSE data implies an error. */
+ return IO_STATUS_ERROR;
+ }
+ /* Check for channel errors. */
+ if (scsw->cstat != 0)
+ return IO_STATUS_ERROR;
+ /* Check for device errors. */
+ if (scsw->dstat & ~(DEV_STAT_CHN_END | DEV_STAT_DEV_END))
+ return IO_STATUS_ERROR;
+ /* Check for final state. */
+ if (!(scsw->dstat & DEV_STAT_DEV_END))
+ return IO_RUNNING;
+ /* Check for other improper status. */
+ if (scsw->cc == 1 && (scsw->stctl & SCSW_STCTL_ALERT_STATUS))
+ return IO_STATUS_ERROR;
+ return IO_DONE;
+}
+
+/*
+ * Log ccw request status.
+ */
+static void ccwreq_log_status(struct ccw_device *cdev, enum io_status status)
+{
+ struct ccw_request *req = &cdev->private->req;
+ struct {
+ struct ccw_dev_id dev_id;
+ u16 retries;
+ u8 lpm;
+ u8 status;
+ } __attribute__ ((packed)) data;
+ data.dev_id = cdev->private->dev_id;
+ data.retries = req->retries;
+ data.lpm = (u8) req->mask;
+ data.status = (u8) status;
+ CIO_TRACE_EVENT(2, "reqstat");
+ CIO_HEX_EVENT(2, &data, sizeof(data));
+}
+
+/**
+ * ccw_request_handler - interrupt handler for I/O request procedure.
+ * @cdev: ccw device
+ *
+ * Handle interrupt during I/O request procedure.
+ */
+void ccw_request_handler(struct ccw_device *cdev)
+{
+ struct irb *irb = this_cpu_ptr(&cio_irb);
+ struct ccw_request *req = &cdev->private->req;
+ enum io_status status;
+ int rc = -EOPNOTSUPP;
+
+ /* Check status of I/O request. */
+ status = ccwreq_status(cdev, irb);
+ if (req->filter)
+ status = req->filter(cdev, req->data, irb, status);
+ if (status != IO_RUNNING)
+ ccw_device_set_timeout(cdev, 0);
+ if (status != IO_DONE && status != IO_RUNNING)
+ ccwreq_log_status(cdev, status);
+ switch (status) {
+ case IO_DONE:
+ break;
+ case IO_RUNNING:
+ return;
+ case IO_REJECTED:
+ goto err;
+ case IO_PATH_ERROR:
+ goto out_next_path;
+ case IO_STATUS_ERROR:
+ goto out_restart;
+ case IO_KILLED:
+ /* Check if request was cancelled on purpose. */
+ if (req->cancel) {
+ rc = -EIO;
+ goto err;
+ }
+ goto out_restart;
+ }
+ /* Check back with request initiator. */
+ if (!req->check)
+ goto out;
+ switch (req->check(cdev, req->data)) {
+ case 0:
+ break;
+ case -EAGAIN:
+ goto out_restart;
+ case -EACCES:
+ goto out_next_path;
+ default:
+ goto err;
+ }
+out:
+ ccwreq_stop(cdev, 0);
+ return;
+
+out_next_path:
+ /* Try next path and restart I/O. */
+ if (!ccwreq_next_path(cdev)) {
+ rc = -EACCES;
+ goto err;
+ }
+out_restart:
+ /* Restart. */
+ ccwreq_do(cdev);
+ return;
+err:
+ ccwreq_stop(cdev, rc);
+}
+
+
+/**
+ * ccw_request_timeout - timeout handler for I/O request procedure
+ * @cdev: ccw device
+ *
+ * Handle timeout during I/O request procedure.
+ */
+void ccw_request_timeout(struct ccw_device *cdev)
+{
+ struct subchannel *sch = to_subchannel(cdev->dev.parent);
+ struct ccw_request *req = &cdev->private->req;
+ int rc = -ENODEV, chp;
+
+ if (cio_update_schib(sch))
+ goto err;
+
+ for (chp = 0; chp < 8; chp++) {
+ if ((0x80 >> chp) & sch->schib.pmcw.lpum)
+ pr_warn("%s: No interrupt was received within %lus (CS=%02x, DS=%02x, CHPID=%x.%02x)\n",
+ dev_name(&cdev->dev), req->timeout / HZ,
+ scsw_cstat(&sch->schib.scsw),
+ scsw_dstat(&sch->schib.scsw),
+ sch->schid.cssid,
+ sch->schib.pmcw.chpid[chp]);
+ }
+
+ if (!ccwreq_next_path(cdev)) {
+ /* set the final return code for this request */
+ req->drc = -ETIME;
+ }
+ rc = cio_clear(sch);
+ if (rc)
+ goto err;
+ return;
+
+err:
+ ccwreq_stop(cdev, rc);
+}
+
+/**
+ * ccw_request_notoper - notoper handler for I/O request procedure
+ * @cdev: ccw device
+ *
+ * Handle notoper during I/O request procedure.
+ */
+void ccw_request_notoper(struct ccw_device *cdev)
+{
+ ccwreq_stop(cdev, -ENODEV);
+}
diff --git a/drivers/s390/cio/chp.c b/drivers/s390/cio/chp.c
new file mode 100644
index 000000000..93e22785a
--- /dev/null
+++ b/drivers/s390/cio/chp.c
@@ -0,0 +1,834 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright IBM Corp. 1999, 2010
+ * Author(s): Cornelia Huck (cornelia.huck@de.ibm.com)
+ * Arnd Bergmann (arndb@de.ibm.com)
+ * Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
+ */
+
+#include <linux/bug.h>
+#include <linux/workqueue.h>
+#include <linux/spinlock.h>
+#include <linux/export.h>
+#include <linux/sched.h>
+#include <linux/init.h>
+#include <linux/jiffies.h>
+#include <linux/wait.h>
+#include <linux/mutex.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <asm/chpid.h>
+#include <asm/sclp.h>
+#include <asm/crw.h>
+
+#include "cio.h"
+#include "css.h"
+#include "ioasm.h"
+#include "cio_debug.h"
+#include "chp.h"
+
+#define to_channelpath(device) container_of(device, struct channel_path, dev)
+#define CHP_INFO_UPDATE_INTERVAL 1*HZ
+
+enum cfg_task_t {
+ cfg_none,
+ cfg_configure,
+ cfg_deconfigure
+};
+
+/* Map for pending configure tasks. */
+static enum cfg_task_t chp_cfg_task[__MAX_CSSID + 1][__MAX_CHPID + 1];
+static DEFINE_SPINLOCK(cfg_lock);
+
+/* Map for channel-path status. */
+static struct sclp_chp_info chp_info;
+static DEFINE_MUTEX(info_lock);
+
+/* Time after which channel-path status may be outdated. */
+static unsigned long chp_info_expires;
+
+static struct work_struct cfg_work;
+
+/* Wait queue for configure completion events. */
+static wait_queue_head_t cfg_wait_queue;
+
+/* Set vary state for given chpid. */
+static void set_chp_logically_online(struct chp_id chpid, int onoff)
+{
+ chpid_to_chp(chpid)->state = onoff;
+}
+
+/* On success return 0 if channel-path is varied offline, 1 if it is varied
+ * online. Return -ENODEV if channel-path is not registered. */
+int chp_get_status(struct chp_id chpid)
+{
+ return (chpid_to_chp(chpid) ? chpid_to_chp(chpid)->state : -ENODEV);
+}
+
+/**
+ * chp_get_sch_opm - return opm for subchannel
+ * @sch: subchannel
+ *
+ * Calculate and return the operational path mask (opm) based on the chpids
+ * used by the subchannel and the status of the associated channel-paths.
+ */
+u8 chp_get_sch_opm(struct subchannel *sch)
+{
+ struct chp_id chpid;
+ int opm;
+ int i;
+
+ opm = 0;
+ chp_id_init(&chpid);
+ for (i = 0; i < 8; i++) {
+ opm <<= 1;
+ chpid.id = sch->schib.pmcw.chpid[i];
+ if (chp_get_status(chpid) != 0)
+ opm |= 1;
+ }
+ return opm;
+}
+EXPORT_SYMBOL_GPL(chp_get_sch_opm);
+
+/**
+ * chp_is_registered - check if a channel-path is registered
+ * @chpid: channel-path ID
+ *
+ * Return non-zero if a channel-path with the given chpid is registered,
+ * zero otherwise.
+ */
+int chp_is_registered(struct chp_id chpid)
+{
+ return chpid_to_chp(chpid) != NULL;
+}
+
+/*
+ * Function: s390_vary_chpid
+ * Varies the specified chpid online or offline
+ */
+static int s390_vary_chpid(struct chp_id chpid, int on)
+{
+ char dbf_text[15];
+ int status;
+
+ sprintf(dbf_text, on?"varyon%x.%02x":"varyoff%x.%02x", chpid.cssid,
+ chpid.id);
+ CIO_TRACE_EVENT(2, dbf_text);
+
+ status = chp_get_status(chpid);
+ if (!on && !status)
+ return 0;
+
+ set_chp_logically_online(chpid, on);
+ chsc_chp_vary(chpid, on);
+ return 0;
+}
+
+/*
+ * Channel measurement related functions
+ */
+static ssize_t chp_measurement_chars_read(struct file *filp,
+ struct kobject *kobj,
+ struct bin_attribute *bin_attr,
+ char *buf, loff_t off, size_t count)
+{
+ struct channel_path *chp;
+ struct device *device;
+
+ device = kobj_to_dev(kobj);
+ chp = to_channelpath(device);
+ if (chp->cmg == -1)
+ return 0;
+
+ return memory_read_from_buffer(buf, count, &off, &chp->cmg_chars,
+ sizeof(chp->cmg_chars));
+}
+
+static const struct bin_attribute chp_measurement_chars_attr = {
+ .attr = {
+ .name = "measurement_chars",
+ .mode = S_IRUSR,
+ },
+ .size = sizeof(struct cmg_chars),
+ .read = chp_measurement_chars_read,
+};
+
+static void chp_measurement_copy_block(struct cmg_entry *buf,
+ struct channel_subsystem *css,
+ struct chp_id chpid)
+{
+ void *area;
+ struct cmg_entry *entry, reference_buf;
+ int idx;
+
+ if (chpid.id < 128) {
+ area = css->cub_addr1;
+ idx = chpid.id;
+ } else {
+ area = css->cub_addr2;
+ idx = chpid.id - 128;
+ }
+ entry = area + (idx * sizeof(struct cmg_entry));
+ do {
+ memcpy(buf, entry, sizeof(*entry));
+ memcpy(&reference_buf, entry, sizeof(*entry));
+ } while (reference_buf.values[0] != buf->values[0]);
+}
+
+static ssize_t chp_measurement_read(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr,
+ char *buf, loff_t off, size_t count)
+{
+ struct channel_path *chp;
+ struct channel_subsystem *css;
+ struct device *device;
+ unsigned int size;
+
+ device = kobj_to_dev(kobj);
+ chp = to_channelpath(device);
+ css = to_css(chp->dev.parent);
+
+ size = sizeof(struct cmg_entry);
+
+ /* Only allow single reads. */
+ if (off || count < size)
+ return 0;
+ chp_measurement_copy_block((struct cmg_entry *)buf, css, chp->chpid);
+ count = size;
+ return count;
+}
+
+static const struct bin_attribute chp_measurement_attr = {
+ .attr = {
+ .name = "measurement",
+ .mode = S_IRUSR,
+ },
+ .size = sizeof(struct cmg_entry),
+ .read = chp_measurement_read,
+};
+
+void chp_remove_cmg_attr(struct channel_path *chp)
+{
+ device_remove_bin_file(&chp->dev, &chp_measurement_chars_attr);
+ device_remove_bin_file(&chp->dev, &chp_measurement_attr);
+}
+
+int chp_add_cmg_attr(struct channel_path *chp)
+{
+ int ret;
+
+ ret = device_create_bin_file(&chp->dev, &chp_measurement_chars_attr);
+ if (ret)
+ return ret;
+ ret = device_create_bin_file(&chp->dev, &chp_measurement_attr);
+ if (ret)
+ device_remove_bin_file(&chp->dev, &chp_measurement_chars_attr);
+ return ret;
+}
+
+/*
+ * Files for the channel path entries.
+ */
+static ssize_t chp_status_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct channel_path *chp = to_channelpath(dev);
+ int status;
+
+ mutex_lock(&chp->lock);
+ status = chp->state;
+ mutex_unlock(&chp->lock);
+
+ return status ? sprintf(buf, "online\n") : sprintf(buf, "offline\n");
+}
+
+static ssize_t chp_status_write(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct channel_path *cp = to_channelpath(dev);
+ char cmd[10];
+ int num_args;
+ int error;
+
+ num_args = sscanf(buf, "%5s", cmd);
+ if (!num_args)
+ return count;
+
+ /* Wait until previous actions have settled. */
+ css_wait_for_slow_path();
+
+ if (!strncasecmp(cmd, "on", 2) || !strcmp(cmd, "1")) {
+ mutex_lock(&cp->lock);
+ error = s390_vary_chpid(cp->chpid, 1);
+ mutex_unlock(&cp->lock);
+ } else if (!strncasecmp(cmd, "off", 3) || !strcmp(cmd, "0")) {
+ mutex_lock(&cp->lock);
+ error = s390_vary_chpid(cp->chpid, 0);
+ mutex_unlock(&cp->lock);
+ } else
+ error = -EINVAL;
+
+ return error < 0 ? error : count;
+}
+
+static DEVICE_ATTR(status, 0644, chp_status_show, chp_status_write);
+
+static ssize_t chp_configure_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct channel_path *cp;
+ int status;
+
+ cp = to_channelpath(dev);
+ status = chp_info_get_status(cp->chpid);
+ if (status < 0)
+ return status;
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", status);
+}
+
+static int cfg_wait_idle(void);
+
+static ssize_t chp_configure_write(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct channel_path *cp;
+ int val;
+ char delim;
+
+ if (sscanf(buf, "%d %c", &val, &delim) != 1)
+ return -EINVAL;
+ if (val != 0 && val != 1)
+ return -EINVAL;
+ cp = to_channelpath(dev);
+ chp_cfg_schedule(cp->chpid, val);
+ cfg_wait_idle();
+
+ return count;
+}
+
+static DEVICE_ATTR(configure, 0644, chp_configure_show, chp_configure_write);
+
+static ssize_t chp_type_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct channel_path *chp = to_channelpath(dev);
+ u8 type;
+
+ mutex_lock(&chp->lock);
+ type = chp->desc.desc;
+ mutex_unlock(&chp->lock);
+ return sprintf(buf, "%x\n", type);
+}
+
+static DEVICE_ATTR(type, 0444, chp_type_show, NULL);
+
+static ssize_t chp_cmg_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct channel_path *chp = to_channelpath(dev);
+
+ if (!chp)
+ return 0;
+ if (chp->cmg == -1) /* channel measurements not available */
+ return sprintf(buf, "unknown\n");
+ return sprintf(buf, "%x\n", chp->cmg);
+}
+
+static DEVICE_ATTR(cmg, 0444, chp_cmg_show, NULL);
+
+static ssize_t chp_shared_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct channel_path *chp = to_channelpath(dev);
+
+ if (!chp)
+ return 0;
+ if (chp->shared == -1) /* channel measurements not available */
+ return sprintf(buf, "unknown\n");
+ return sprintf(buf, "%x\n", chp->shared);
+}
+
+static DEVICE_ATTR(shared, 0444, chp_shared_show, NULL);
+
+static ssize_t chp_chid_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct channel_path *chp = to_channelpath(dev);
+ ssize_t rc;
+
+ mutex_lock(&chp->lock);
+ if (chp->desc_fmt1.flags & 0x10)
+ rc = sprintf(buf, "%04x\n", chp->desc_fmt1.chid);
+ else
+ rc = 0;
+ mutex_unlock(&chp->lock);
+
+ return rc;
+}
+static DEVICE_ATTR(chid, 0444, chp_chid_show, NULL);
+
+static ssize_t chp_chid_external_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct channel_path *chp = to_channelpath(dev);
+ ssize_t rc;
+
+ mutex_lock(&chp->lock);
+ if (chp->desc_fmt1.flags & 0x10)
+ rc = sprintf(buf, "%x\n", chp->desc_fmt1.flags & 0x8 ? 1 : 0);
+ else
+ rc = 0;
+ mutex_unlock(&chp->lock);
+
+ return rc;
+}
+static DEVICE_ATTR(chid_external, 0444, chp_chid_external_show, NULL);
+
+static ssize_t util_string_read(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *attr, char *buf,
+ loff_t off, size_t count)
+{
+ struct channel_path *chp = to_channelpath(kobj_to_dev(kobj));
+ ssize_t rc;
+
+ mutex_lock(&chp->lock);
+ rc = memory_read_from_buffer(buf, count, &off, chp->desc_fmt3.util_str,
+ sizeof(chp->desc_fmt3.util_str));
+ mutex_unlock(&chp->lock);
+
+ return rc;
+}
+static BIN_ATTR_RO(util_string,
+ sizeof(((struct channel_path_desc_fmt3 *)0)->util_str));
+
+static struct bin_attribute *chp_bin_attrs[] = {
+ &bin_attr_util_string,
+ NULL,
+};
+
+static struct attribute *chp_attrs[] = {
+ &dev_attr_status.attr,
+ &dev_attr_configure.attr,
+ &dev_attr_type.attr,
+ &dev_attr_cmg.attr,
+ &dev_attr_shared.attr,
+ &dev_attr_chid.attr,
+ &dev_attr_chid_external.attr,
+ NULL,
+};
+static struct attribute_group chp_attr_group = {
+ .attrs = chp_attrs,
+ .bin_attrs = chp_bin_attrs,
+};
+static const struct attribute_group *chp_attr_groups[] = {
+ &chp_attr_group,
+ NULL,
+};
+
+static void chp_release(struct device *dev)
+{
+ struct channel_path *cp;
+
+ cp = to_channelpath(dev);
+ kfree(cp);
+}
+
+/**
+ * chp_update_desc - update channel-path description
+ * @chp: channel-path
+ *
+ * Update the channel-path description of the specified channel-path
+ * including channel measurement related information.
+ * Return zero on success, non-zero otherwise.
+ */
+int chp_update_desc(struct channel_path *chp)
+{
+ int rc;
+
+ rc = chsc_determine_fmt0_channel_path_desc(chp->chpid, &chp->desc);
+ if (rc)
+ return rc;
+
+ /*
+ * Fetching the following data is optional. Not all machines or
+ * hypervisors implement the required chsc commands.
+ */
+ chsc_determine_fmt1_channel_path_desc(chp->chpid, &chp->desc_fmt1);
+ chsc_determine_fmt3_channel_path_desc(chp->chpid, &chp->desc_fmt3);
+ chsc_get_channel_measurement_chars(chp);
+
+ return 0;
+}
+
+/**
+ * chp_new - register a new channel-path
+ * @chpid: channel-path ID
+ *
+ * Create and register data structure representing new channel-path. Return
+ * zero on success, non-zero otherwise.
+ */
+int chp_new(struct chp_id chpid)
+{
+ struct channel_subsystem *css = css_by_id(chpid.cssid);
+ struct channel_path *chp;
+ int ret = 0;
+
+ mutex_lock(&css->mutex);
+ if (chp_is_registered(chpid))
+ goto out;
+
+ chp = kzalloc(sizeof(struct channel_path), GFP_KERNEL);
+ if (!chp) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ /* fill in status, etc. */
+ chp->chpid = chpid;
+ chp->state = 1;
+ chp->dev.parent = &css->device;
+ chp->dev.groups = chp_attr_groups;
+ chp->dev.release = chp_release;
+ mutex_init(&chp->lock);
+
+ /* Obtain channel path description and fill it in. */
+ ret = chp_update_desc(chp);
+ if (ret)
+ goto out_free;
+ if ((chp->desc.flags & 0x80) == 0) {
+ ret = -ENODEV;
+ goto out_free;
+ }
+ dev_set_name(&chp->dev, "chp%x.%02x", chpid.cssid, chpid.id);
+
+ /* make it known to the system */
+ ret = device_register(&chp->dev);
+ if (ret) {
+ CIO_MSG_EVENT(0, "Could not register chp%x.%02x: %d\n",
+ chpid.cssid, chpid.id, ret);
+ put_device(&chp->dev);
+ goto out;
+ }
+
+ if (css->cm_enabled) {
+ ret = chp_add_cmg_attr(chp);
+ if (ret) {
+ device_unregister(&chp->dev);
+ goto out;
+ }
+ }
+ css->chps[chpid.id] = chp;
+ goto out;
+out_free:
+ kfree(chp);
+out:
+ mutex_unlock(&css->mutex);
+ return ret;
+}
+
+/**
+ * chp_get_chp_desc - return newly allocated channel-path description
+ * @chpid: channel-path ID
+ *
+ * On success return a newly allocated copy of the channel-path description
+ * data associated with the given channel-path ID. Return %NULL on error.
+ */
+struct channel_path_desc_fmt0 *chp_get_chp_desc(struct chp_id chpid)
+{
+ struct channel_path *chp;
+ struct channel_path_desc_fmt0 *desc;
+
+ chp = chpid_to_chp(chpid);
+ if (!chp)
+ return NULL;
+ desc = kmalloc(sizeof(*desc), GFP_KERNEL);
+ if (!desc)
+ return NULL;
+
+ mutex_lock(&chp->lock);
+ memcpy(desc, &chp->desc, sizeof(*desc));
+ mutex_unlock(&chp->lock);
+ return desc;
+}
+
+/**
+ * chp_process_crw - process channel-path status change
+ * @crw0: channel report-word to handler
+ * @crw1: second channel-report word (always NULL)
+ * @overflow: crw overflow indication
+ *
+ * Handle channel-report-words indicating that the status of a channel-path
+ * has changed.
+ */
+static void chp_process_crw(struct crw *crw0, struct crw *crw1,
+ int overflow)
+{
+ struct chp_id chpid;
+
+ if (overflow) {
+ css_schedule_eval_all();
+ return;
+ }
+ CIO_CRW_EVENT(2, "CRW reports slct=%d, oflw=%d, "
+ "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n",
+ crw0->slct, crw0->oflw, crw0->chn, crw0->rsc, crw0->anc,
+ crw0->erc, crw0->rsid);
+ /*
+ * Check for solicited machine checks. These are
+ * created by reset channel path and need not be
+ * handled here.
+ */
+ if (crw0->slct) {
+ CIO_CRW_EVENT(2, "solicited machine check for "
+ "channel path %02X\n", crw0->rsid);
+ return;
+ }
+ chp_id_init(&chpid);
+ chpid.id = crw0->rsid;
+ switch (crw0->erc) {
+ case CRW_ERC_IPARM: /* Path has come. */
+ case CRW_ERC_INIT:
+ chp_new(chpid);
+ chsc_chp_online(chpid);
+ break;
+ case CRW_ERC_PERRI: /* Path has gone. */
+ case CRW_ERC_PERRN:
+ chsc_chp_offline(chpid);
+ break;
+ default:
+ CIO_CRW_EVENT(2, "Don't know how to handle erc=%x\n",
+ crw0->erc);
+ }
+}
+
+int chp_ssd_get_mask(struct chsc_ssd_info *ssd, struct chp_link *link)
+{
+ int i;
+ int mask;
+
+ for (i = 0; i < 8; i++) {
+ mask = 0x80 >> i;
+ if (!(ssd->path_mask & mask))
+ continue;
+ if (!chp_id_is_equal(&ssd->chpid[i], &link->chpid))
+ continue;
+ if ((ssd->fla_valid_mask & mask) &&
+ ((ssd->fla[i] & link->fla_mask) != link->fla))
+ continue;
+ return mask;
+ }
+ return 0;
+}
+EXPORT_SYMBOL_GPL(chp_ssd_get_mask);
+
+static inline int info_bit_num(struct chp_id id)
+{
+ return id.id + id.cssid * (__MAX_CHPID + 1);
+}
+
+/* Force chp_info refresh on next call to info_validate(). */
+static void info_expire(void)
+{
+ mutex_lock(&info_lock);
+ chp_info_expires = jiffies - 1;
+ mutex_unlock(&info_lock);
+}
+
+/* Ensure that chp_info is up-to-date. */
+static int info_update(void)
+{
+ int rc;
+
+ mutex_lock(&info_lock);
+ rc = 0;
+ if (time_after(jiffies, chp_info_expires)) {
+ /* Data is too old, update. */
+ rc = sclp_chp_read_info(&chp_info);
+ chp_info_expires = jiffies + CHP_INFO_UPDATE_INTERVAL ;
+ }
+ mutex_unlock(&info_lock);
+
+ return rc;
+}
+
+/**
+ * chp_info_get_status - retrieve configure status of a channel-path
+ * @chpid: channel-path ID
+ *
+ * On success, return 0 for standby, 1 for configured, 2 for reserved,
+ * 3 for not recognized. Return negative error code on error.
+ */
+int chp_info_get_status(struct chp_id chpid)
+{
+ int rc;
+ int bit;
+
+ rc = info_update();
+ if (rc)
+ return rc;
+
+ bit = info_bit_num(chpid);
+ mutex_lock(&info_lock);
+ if (!chp_test_bit(chp_info.recognized, bit))
+ rc = CHP_STATUS_NOT_RECOGNIZED;
+ else if (chp_test_bit(chp_info.configured, bit))
+ rc = CHP_STATUS_CONFIGURED;
+ else if (chp_test_bit(chp_info.standby, bit))
+ rc = CHP_STATUS_STANDBY;
+ else
+ rc = CHP_STATUS_RESERVED;
+ mutex_unlock(&info_lock);
+
+ return rc;
+}
+
+/* Return configure task for chpid. */
+static enum cfg_task_t cfg_get_task(struct chp_id chpid)
+{
+ return chp_cfg_task[chpid.cssid][chpid.id];
+}
+
+/* Set configure task for chpid. */
+static void cfg_set_task(struct chp_id chpid, enum cfg_task_t cfg)
+{
+ chp_cfg_task[chpid.cssid][chpid.id] = cfg;
+}
+
+/* Fetch the first configure task. Set chpid accordingly. */
+static enum cfg_task_t chp_cfg_fetch_task(struct chp_id *chpid)
+{
+ enum cfg_task_t t = cfg_none;
+
+ chp_id_for_each(chpid) {
+ t = cfg_get_task(*chpid);
+ if (t != cfg_none)
+ break;
+ }
+
+ return t;
+}
+
+/* Perform one configure/deconfigure request. Reschedule work function until
+ * last request. */
+static void cfg_func(struct work_struct *work)
+{
+ struct chp_id chpid;
+ enum cfg_task_t t;
+ int rc;
+
+ spin_lock(&cfg_lock);
+ t = chp_cfg_fetch_task(&chpid);
+ spin_unlock(&cfg_lock);
+
+ switch (t) {
+ case cfg_configure:
+ rc = sclp_chp_configure(chpid);
+ if (rc)
+ CIO_MSG_EVENT(2, "chp: sclp_chp_configure(%x.%02x)="
+ "%d\n", chpid.cssid, chpid.id, rc);
+ else {
+ info_expire();
+ chsc_chp_online(chpid);
+ }
+ break;
+ case cfg_deconfigure:
+ rc = sclp_chp_deconfigure(chpid);
+ if (rc)
+ CIO_MSG_EVENT(2, "chp: sclp_chp_deconfigure(%x.%02x)="
+ "%d\n", chpid.cssid, chpid.id, rc);
+ else {
+ info_expire();
+ chsc_chp_offline(chpid);
+ }
+ break;
+ case cfg_none:
+ /* Get updated information after last change. */
+ info_update();
+ wake_up_interruptible(&cfg_wait_queue);
+ return;
+ }
+ spin_lock(&cfg_lock);
+ if (t == cfg_get_task(chpid))
+ cfg_set_task(chpid, cfg_none);
+ spin_unlock(&cfg_lock);
+ schedule_work(&cfg_work);
+}
+
+/**
+ * chp_cfg_schedule - schedule chpid configuration request
+ * @chpid: channel-path ID
+ * @configure: Non-zero for configure, zero for deconfigure
+ *
+ * Schedule a channel-path configuration/deconfiguration request.
+ */
+void chp_cfg_schedule(struct chp_id chpid, int configure)
+{
+ CIO_MSG_EVENT(2, "chp_cfg_sched%x.%02x=%d\n", chpid.cssid, chpid.id,
+ configure);
+ spin_lock(&cfg_lock);
+ cfg_set_task(chpid, configure ? cfg_configure : cfg_deconfigure);
+ spin_unlock(&cfg_lock);
+ schedule_work(&cfg_work);
+}
+
+/**
+ * chp_cfg_cancel_deconfigure - cancel chpid deconfiguration request
+ * @chpid: channel-path ID
+ *
+ * Cancel an active channel-path deconfiguration request if it has not yet
+ * been performed.
+ */
+void chp_cfg_cancel_deconfigure(struct chp_id chpid)
+{
+ CIO_MSG_EVENT(2, "chp_cfg_cancel:%x.%02x\n", chpid.cssid, chpid.id);
+ spin_lock(&cfg_lock);
+ if (cfg_get_task(chpid) == cfg_deconfigure)
+ cfg_set_task(chpid, cfg_none);
+ spin_unlock(&cfg_lock);
+}
+
+static bool cfg_idle(void)
+{
+ struct chp_id chpid;
+ enum cfg_task_t t;
+
+ spin_lock(&cfg_lock);
+ t = chp_cfg_fetch_task(&chpid);
+ spin_unlock(&cfg_lock);
+
+ return t == cfg_none;
+}
+
+static int cfg_wait_idle(void)
+{
+ if (wait_event_interruptible(cfg_wait_queue, cfg_idle()))
+ return -ERESTARTSYS;
+ return 0;
+}
+
+static int __init chp_init(void)
+{
+ struct chp_id chpid;
+ int state, ret;
+
+ ret = crw_register_handler(CRW_RSC_CPATH, chp_process_crw);
+ if (ret)
+ return ret;
+ INIT_WORK(&cfg_work, cfg_func);
+ init_waitqueue_head(&cfg_wait_queue);
+ if (info_update())
+ return 0;
+ /* Register available channel-paths. */
+ chp_id_for_each(&chpid) {
+ state = chp_info_get_status(chpid);
+ if (state == CHP_STATUS_CONFIGURED ||
+ state == CHP_STATUS_STANDBY)
+ chp_new(chpid);
+ }
+
+ return 0;
+}
+
+subsys_initcall(chp_init);
diff --git a/drivers/s390/cio/chp.h b/drivers/s390/cio/chp.h
new file mode 100644
index 000000000..20259f3fb
--- /dev/null
+++ b/drivers/s390/cio/chp.h
@@ -0,0 +1,74 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright IBM Corp. 2007, 2010
+ * Author(s): Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
+ */
+
+#ifndef S390_CHP_H
+#define S390_CHP_H
+
+#include <linux/types.h>
+#include <linux/device.h>
+#include <linux/mutex.h>
+#include <asm/chpid.h>
+#include "chsc.h"
+#include "css.h"
+
+#define CHP_STATUS_STANDBY 0
+#define CHP_STATUS_CONFIGURED 1
+#define CHP_STATUS_RESERVED 2
+#define CHP_STATUS_NOT_RECOGNIZED 3
+
+#define CHP_ONLINE 0
+#define CHP_OFFLINE 1
+#define CHP_VARY_ON 2
+#define CHP_VARY_OFF 3
+
+struct chp_link {
+ struct chp_id chpid;
+ u32 fla_mask;
+ u16 fla;
+};
+
+static inline int chp_test_bit(u8 *bitmap, int num)
+{
+ int byte = num >> 3;
+ int mask = 128 >> (num & 7);
+
+ return (bitmap[byte] & mask) ? 1 : 0;
+}
+
+
+struct channel_path {
+ struct device dev;
+ struct chp_id chpid;
+ struct mutex lock; /* Serialize access to below members. */
+ int state;
+ struct channel_path_desc_fmt0 desc;
+ struct channel_path_desc_fmt1 desc_fmt1;
+ struct channel_path_desc_fmt3 desc_fmt3;
+ /* Channel-measurement related stuff: */
+ int cmg;
+ int shared;
+ struct cmg_chars cmg_chars;
+};
+
+/* Return channel_path struct for given chpid. */
+static inline struct channel_path *chpid_to_chp(struct chp_id chpid)
+{
+ return css_by_id(chpid.cssid)->chps[chpid.id];
+}
+
+int chp_get_status(struct chp_id chpid);
+u8 chp_get_sch_opm(struct subchannel *sch);
+int chp_is_registered(struct chp_id chpid);
+struct channel_path_desc_fmt0 *chp_get_chp_desc(struct chp_id chpid);
+void chp_remove_cmg_attr(struct channel_path *chp);
+int chp_add_cmg_attr(struct channel_path *chp);
+int chp_update_desc(struct channel_path *chp);
+int chp_new(struct chp_id chpid);
+void chp_cfg_schedule(struct chp_id chpid, int configure);
+void chp_cfg_cancel_deconfigure(struct chp_id chpid);
+int chp_info_get_status(struct chp_id chpid);
+int chp_ssd_get_mask(struct chsc_ssd_info *, struct chp_link *);
+#endif /* S390_CHP_H */
diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c
new file mode 100644
index 000000000..93aa7eabe
--- /dev/null
+++ b/drivers/s390/cio/chsc.c
@@ -0,0 +1,1428 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * S/390 common I/O routines -- channel subsystem call
+ *
+ * Copyright IBM Corp. 1999,2012
+ * Author(s): Ingo Adlung (adlung@de.ibm.com)
+ * Cornelia Huck (cornelia.huck@de.ibm.com)
+ * Arnd Bergmann (arndb@de.ibm.com)
+ */
+
+#define KMSG_COMPONENT "cio"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/device.h>
+#include <linux/mutex.h>
+#include <linux/pci.h>
+
+#include <asm/cio.h>
+#include <asm/chpid.h>
+#include <asm/chsc.h>
+#include <asm/crw.h>
+#include <asm/isc.h>
+#include <asm/ebcdic.h>
+#include <asm/ap.h>
+
+#include "css.h"
+#include "cio.h"
+#include "cio_debug.h"
+#include "ioasm.h"
+#include "chp.h"
+#include "chsc.h"
+
+static void *sei_page;
+static void *chsc_page;
+static DEFINE_SPINLOCK(chsc_page_lock);
+
+/**
+ * chsc_error_from_response() - convert a chsc response to an error
+ * @response: chsc response code
+ *
+ * Returns an appropriate Linux error code for @response.
+ */
+int chsc_error_from_response(int response)
+{
+ switch (response) {
+ case 0x0001:
+ return 0;
+ case 0x0002:
+ case 0x0003:
+ case 0x0006:
+ case 0x0007:
+ case 0x0008:
+ case 0x000a:
+ case 0x0104:
+ return -EINVAL;
+ case 0x0004:
+ case 0x0106: /* "Wrong Channel Parm" for the op 0x003d */
+ return -EOPNOTSUPP;
+ case 0x000b:
+ case 0x0107: /* "Channel busy" for the op 0x003d */
+ return -EBUSY;
+ case 0x0100:
+ case 0x0102:
+ return -ENOMEM;
+ case 0x0108: /* "HW limit exceeded" for the op 0x003d */
+ return -EUSERS;
+ default:
+ return -EIO;
+ }
+}
+EXPORT_SYMBOL_GPL(chsc_error_from_response);
+
+struct chsc_ssd_area {
+ struct chsc_header request;
+ u16 :10;
+ u16 ssid:2;
+ u16 :4;
+ u16 f_sch; /* first subchannel */
+ u16 :16;
+ u16 l_sch; /* last subchannel */
+ u32 :32;
+ struct chsc_header response;
+ u32 :32;
+ u8 sch_valid : 1;
+ u8 dev_valid : 1;
+ u8 st : 3; /* subchannel type */
+ u8 zeroes : 3;
+ u8 unit_addr; /* unit address */
+ u16 devno; /* device number */
+ u8 path_mask;
+ u8 fla_valid_mask;
+ u16 sch; /* subchannel */
+ u8 chpid[8]; /* chpids 0-7 */
+ u16 fla[8]; /* full link addresses 0-7 */
+} __packed __aligned(PAGE_SIZE);
+
+int chsc_get_ssd_info(struct subchannel_id schid, struct chsc_ssd_info *ssd)
+{
+ struct chsc_ssd_area *ssd_area;
+ unsigned long flags;
+ int ccode;
+ int ret;
+ int i;
+ int mask;
+
+ spin_lock_irqsave(&chsc_page_lock, flags);
+ memset(chsc_page, 0, PAGE_SIZE);
+ ssd_area = chsc_page;
+ ssd_area->request.length = 0x0010;
+ ssd_area->request.code = 0x0004;
+ ssd_area->ssid = schid.ssid;
+ ssd_area->f_sch = schid.sch_no;
+ ssd_area->l_sch = schid.sch_no;
+
+ ccode = chsc(ssd_area);
+ /* Check response. */
+ if (ccode > 0) {
+ ret = (ccode == 3) ? -ENODEV : -EBUSY;
+ goto out;
+ }
+ ret = chsc_error_from_response(ssd_area->response.code);
+ if (ret != 0) {
+ CIO_MSG_EVENT(2, "chsc: ssd failed for 0.%x.%04x (rc=%04x)\n",
+ schid.ssid, schid.sch_no,
+ ssd_area->response.code);
+ goto out;
+ }
+ if (!ssd_area->sch_valid) {
+ ret = -ENODEV;
+ goto out;
+ }
+ /* Copy data */
+ ret = 0;
+ memset(ssd, 0, sizeof(struct chsc_ssd_info));
+ if ((ssd_area->st != SUBCHANNEL_TYPE_IO) &&
+ (ssd_area->st != SUBCHANNEL_TYPE_MSG))
+ goto out;
+ ssd->path_mask = ssd_area->path_mask;
+ ssd->fla_valid_mask = ssd_area->fla_valid_mask;
+ for (i = 0; i < 8; i++) {
+ mask = 0x80 >> i;
+ if (ssd_area->path_mask & mask) {
+ chp_id_init(&ssd->chpid[i]);
+ ssd->chpid[i].id = ssd_area->chpid[i];
+ }
+ if (ssd_area->fla_valid_mask & mask)
+ ssd->fla[i] = ssd_area->fla[i];
+ }
+out:
+ spin_unlock_irqrestore(&chsc_page_lock, flags);
+ return ret;
+}
+
+/**
+ * chsc_ssqd() - store subchannel QDIO data (SSQD)
+ * @schid: id of the subchannel on which SSQD is performed
+ * @ssqd: request and response block for SSQD
+ *
+ * Returns 0 on success.
+ */
+int chsc_ssqd(struct subchannel_id schid, struct chsc_ssqd_area *ssqd)
+{
+ memset(ssqd, 0, sizeof(*ssqd));
+ ssqd->request.length = 0x0010;
+ ssqd->request.code = 0x0024;
+ ssqd->first_sch = schid.sch_no;
+ ssqd->last_sch = schid.sch_no;
+ ssqd->ssid = schid.ssid;
+
+ if (chsc(ssqd))
+ return -EIO;
+
+ return chsc_error_from_response(ssqd->response.code);
+}
+EXPORT_SYMBOL_GPL(chsc_ssqd);
+
+/**
+ * chsc_sadc() - set adapter device controls (SADC)
+ * @schid: id of the subchannel on which SADC is performed
+ * @scssc: request and response block for SADC
+ * @summary_indicator_addr: summary indicator address
+ * @subchannel_indicator_addr: subchannel indicator address
+ * @isc: Interruption Subclass for this subchannel
+ *
+ * Returns 0 on success.
+ */
+int chsc_sadc(struct subchannel_id schid, struct chsc_scssc_area *scssc,
+ u64 summary_indicator_addr, u64 subchannel_indicator_addr, u8 isc)
+{
+ memset(scssc, 0, sizeof(*scssc));
+ scssc->request.length = 0x0fe0;
+ scssc->request.code = 0x0021;
+ scssc->operation_code = 0;
+
+ scssc->summary_indicator_addr = summary_indicator_addr;
+ scssc->subchannel_indicator_addr = subchannel_indicator_addr;
+
+ scssc->ks = PAGE_DEFAULT_KEY >> 4;
+ scssc->kc = PAGE_DEFAULT_KEY >> 4;
+ scssc->isc = isc;
+ scssc->schid = schid;
+
+ /* enable the time delay disablement facility */
+ if (css_general_characteristics.aif_tdd)
+ scssc->word_with_d_bit = 0x10000000;
+
+ if (chsc(scssc))
+ return -EIO;
+
+ return chsc_error_from_response(scssc->response.code);
+}
+EXPORT_SYMBOL_GPL(chsc_sadc);
+
+static int s390_subchannel_remove_chpid(struct subchannel *sch, void *data)
+{
+ spin_lock_irq(sch->lock);
+ if (sch->driver && sch->driver->chp_event)
+ if (sch->driver->chp_event(sch, data, CHP_OFFLINE) != 0)
+ goto out_unreg;
+ spin_unlock_irq(sch->lock);
+ return 0;
+
+out_unreg:
+ sch->lpm = 0;
+ spin_unlock_irq(sch->lock);
+ css_schedule_eval(sch->schid);
+ return 0;
+}
+
+void chsc_chp_offline(struct chp_id chpid)
+{
+ struct channel_path *chp = chpid_to_chp(chpid);
+ struct chp_link link;
+ char dbf_txt[15];
+
+ sprintf(dbf_txt, "chpr%x.%02x", chpid.cssid, chpid.id);
+ CIO_TRACE_EVENT(2, dbf_txt);
+
+ if (chp_get_status(chpid) <= 0)
+ return;
+ memset(&link, 0, sizeof(struct chp_link));
+ link.chpid = chpid;
+ /* Wait until previous actions have settled. */
+ css_wait_for_slow_path();
+
+ mutex_lock(&chp->lock);
+ chp_update_desc(chp);
+ mutex_unlock(&chp->lock);
+
+ for_each_subchannel_staged(s390_subchannel_remove_chpid, NULL, &link);
+}
+
+static int __s390_process_res_acc(struct subchannel *sch, void *data)
+{
+ spin_lock_irq(sch->lock);
+ if (sch->driver && sch->driver->chp_event)
+ sch->driver->chp_event(sch, data, CHP_ONLINE);
+ spin_unlock_irq(sch->lock);
+
+ return 0;
+}
+
+static void s390_process_res_acc(struct chp_link *link)
+{
+ char dbf_txt[15];
+
+ sprintf(dbf_txt, "accpr%x.%02x", link->chpid.cssid,
+ link->chpid.id);
+ CIO_TRACE_EVENT( 2, dbf_txt);
+ if (link->fla != 0) {
+ sprintf(dbf_txt, "fla%x", link->fla);
+ CIO_TRACE_EVENT( 2, dbf_txt);
+ }
+ /* Wait until previous actions have settled. */
+ css_wait_for_slow_path();
+ /*
+ * I/O resources may have become accessible.
+ * Scan through all subchannels that may be concerned and
+ * do a validation on those.
+ * The more information we have (info), the less scanning
+ * will we have to do.
+ */
+ for_each_subchannel_staged(__s390_process_res_acc, NULL, link);
+ css_schedule_reprobe();
+}
+
+struct chsc_sei_nt0_area {
+ u8 flags;
+ u8 vf; /* validity flags */
+ u8 rs; /* reporting source */
+ u8 cc; /* content code */
+ u16 fla; /* full link address */
+ u16 rsid; /* reporting source id */
+ u32 reserved1;
+ u32 reserved2;
+ /* ccdf has to be big enough for a link-incident record */
+ u8 ccdf[PAGE_SIZE - 24 - 16]; /* content-code dependent field */
+} __packed;
+
+struct chsc_sei_nt2_area {
+ u8 flags; /* p and v bit */
+ u8 reserved1;
+ u8 reserved2;
+ u8 cc; /* content code */
+ u32 reserved3[13];
+ u8 ccdf[PAGE_SIZE - 24 - 56]; /* content-code dependent field */
+} __packed;
+
+#define CHSC_SEI_NT0 (1ULL << 63)
+#define CHSC_SEI_NT2 (1ULL << 61)
+
+struct chsc_sei {
+ struct chsc_header request;
+ u32 reserved1;
+ u64 ntsm; /* notification type mask */
+ struct chsc_header response;
+ u32 :24;
+ u8 nt;
+ union {
+ struct chsc_sei_nt0_area nt0_area;
+ struct chsc_sei_nt2_area nt2_area;
+ u8 nt_area[PAGE_SIZE - 24];
+ } u;
+} __packed __aligned(PAGE_SIZE);
+
+/*
+ * Link Incident Record as defined in SA22-7202, "ESCON I/O Interface"
+ */
+
+#define LIR_IQ_CLASS_INFO 0
+#define LIR_IQ_CLASS_DEGRADED 1
+#define LIR_IQ_CLASS_NOT_OPERATIONAL 2
+
+struct lir {
+ struct {
+ u32 null:1;
+ u32 reserved:3;
+ u32 class:2;
+ u32 reserved2:2;
+ } __packed iq;
+ u32 ic:8;
+ u32 reserved:16;
+ struct node_descriptor incident_node;
+ struct node_descriptor attached_node;
+ u8 reserved2[32];
+} __packed;
+
+#define PARAMS_LEN 10 /* PARAMS=xx,xxxxxx */
+#define NODEID_LEN 35 /* NODEID=tttttt/mdl,mmm.ppssssssssssss,xxxx */
+
+/* Copy EBCIDC text, convert to ASCII and optionally add delimiter. */
+static char *store_ebcdic(char *dest, const char *src, unsigned long len,
+ char delim)
+{
+ memcpy(dest, src, len);
+ EBCASC(dest, len);
+
+ if (delim)
+ dest[len++] = delim;
+
+ return dest + len;
+}
+
+/* Format node ID and parameters for output in LIR log message. */
+static void format_node_data(char *params, char *id, struct node_descriptor *nd)
+{
+ memset(params, 0, PARAMS_LEN);
+ memset(id, 0, NODEID_LEN);
+
+ if (nd->validity != ND_VALIDITY_VALID) {
+ strncpy(params, "n/a", PARAMS_LEN - 1);
+ strncpy(id, "n/a", NODEID_LEN - 1);
+ return;
+ }
+
+ /* PARAMS=xx,xxxxxx */
+ snprintf(params, PARAMS_LEN, "%02x,%06x", nd->byte0, nd->params);
+ /* NODEID=tttttt/mdl,mmm.ppssssssssssss,xxxx */
+ id = store_ebcdic(id, nd->type, sizeof(nd->type), '/');
+ id = store_ebcdic(id, nd->model, sizeof(nd->model), ',');
+ id = store_ebcdic(id, nd->manufacturer, sizeof(nd->manufacturer), '.');
+ id = store_ebcdic(id, nd->plant, sizeof(nd->plant), 0);
+ id = store_ebcdic(id, nd->seq, sizeof(nd->seq), ',');
+ sprintf(id, "%04X", nd->tag);
+}
+
+static void chsc_process_sei_link_incident(struct chsc_sei_nt0_area *sei_area)
+{
+ struct lir *lir = (struct lir *) &sei_area->ccdf;
+ char iuparams[PARAMS_LEN], iunodeid[NODEID_LEN], auparams[PARAMS_LEN],
+ aunodeid[NODEID_LEN];
+
+ CIO_CRW_EVENT(4, "chsc: link incident (rs=%02x, rs_id=%04x, iq=%02x)\n",
+ sei_area->rs, sei_area->rsid, sei_area->ccdf[0]);
+
+ /* Ignore NULL Link Incident Records. */
+ if (lir->iq.null)
+ return;
+
+ /* Inform user that a link requires maintenance actions because it has
+ * become degraded or not operational. Note that this log message is
+ * the primary intention behind a Link Incident Record. */
+
+ format_node_data(iuparams, iunodeid, &lir->incident_node);
+ format_node_data(auparams, aunodeid, &lir->attached_node);
+
+ switch (lir->iq.class) {
+ case LIR_IQ_CLASS_DEGRADED:
+ pr_warn("Link degraded: RS=%02x RSID=%04x IC=%02x "
+ "IUPARAMS=%s IUNODEID=%s AUPARAMS=%s AUNODEID=%s\n",
+ sei_area->rs, sei_area->rsid, lir->ic, iuparams,
+ iunodeid, auparams, aunodeid);
+ break;
+ case LIR_IQ_CLASS_NOT_OPERATIONAL:
+ pr_err("Link stopped: RS=%02x RSID=%04x IC=%02x "
+ "IUPARAMS=%s IUNODEID=%s AUPARAMS=%s AUNODEID=%s\n",
+ sei_area->rs, sei_area->rsid, lir->ic, iuparams,
+ iunodeid, auparams, aunodeid);
+ break;
+ default:
+ break;
+ }
+}
+
+static void chsc_process_sei_res_acc(struct chsc_sei_nt0_area *sei_area)
+{
+ struct channel_path *chp;
+ struct chp_link link;
+ struct chp_id chpid;
+ int status;
+
+ CIO_CRW_EVENT(4, "chsc: resource accessibility event (rs=%02x, "
+ "rs_id=%04x)\n", sei_area->rs, sei_area->rsid);
+ if (sei_area->rs != 4)
+ return;
+ chp_id_init(&chpid);
+ chpid.id = sei_area->rsid;
+ /* allocate a new channel path structure, if needed */
+ status = chp_get_status(chpid);
+ if (!status)
+ return;
+
+ if (status < 0) {
+ chp_new(chpid);
+ } else {
+ chp = chpid_to_chp(chpid);
+ mutex_lock(&chp->lock);
+ chp_update_desc(chp);
+ mutex_unlock(&chp->lock);
+ }
+ memset(&link, 0, sizeof(struct chp_link));
+ link.chpid = chpid;
+ if ((sei_area->vf & 0xc0) != 0) {
+ link.fla = sei_area->fla;
+ if ((sei_area->vf & 0xc0) == 0xc0)
+ /* full link address */
+ link.fla_mask = 0xffff;
+ else
+ /* link address */
+ link.fla_mask = 0xff00;
+ }
+ s390_process_res_acc(&link);
+}
+
+static void chsc_process_sei_chp_avail(struct chsc_sei_nt0_area *sei_area)
+{
+ struct channel_path *chp;
+ struct chp_id chpid;
+ u8 *data;
+ int num;
+
+ CIO_CRW_EVENT(4, "chsc: channel path availability information\n");
+ if (sei_area->rs != 0)
+ return;
+ data = sei_area->ccdf;
+ chp_id_init(&chpid);
+ for (num = 0; num <= __MAX_CHPID; num++) {
+ if (!chp_test_bit(data, num))
+ continue;
+ chpid.id = num;
+
+ CIO_CRW_EVENT(4, "Update information for channel path "
+ "%x.%02x\n", chpid.cssid, chpid.id);
+ chp = chpid_to_chp(chpid);
+ if (!chp) {
+ chp_new(chpid);
+ continue;
+ }
+ mutex_lock(&chp->lock);
+ chp_update_desc(chp);
+ mutex_unlock(&chp->lock);
+ }
+}
+
+struct chp_config_data {
+ u8 map[32];
+ u8 op;
+ u8 pc;
+};
+
+static void chsc_process_sei_chp_config(struct chsc_sei_nt0_area *sei_area)
+{
+ struct chp_config_data *data;
+ struct chp_id chpid;
+ int num;
+ char *events[3] = {"configure", "deconfigure", "cancel deconfigure"};
+
+ CIO_CRW_EVENT(4, "chsc: channel-path-configuration notification\n");
+ if (sei_area->rs != 0)
+ return;
+ data = (struct chp_config_data *) &(sei_area->ccdf);
+ chp_id_init(&chpid);
+ for (num = 0; num <= __MAX_CHPID; num++) {
+ if (!chp_test_bit(data->map, num))
+ continue;
+ chpid.id = num;
+ pr_notice("Processing %s for channel path %x.%02x\n",
+ events[data->op], chpid.cssid, chpid.id);
+ switch (data->op) {
+ case 0:
+ chp_cfg_schedule(chpid, 1);
+ break;
+ case 1:
+ chp_cfg_schedule(chpid, 0);
+ break;
+ case 2:
+ chp_cfg_cancel_deconfigure(chpid);
+ break;
+ }
+ }
+}
+
+static void chsc_process_sei_scm_change(struct chsc_sei_nt0_area *sei_area)
+{
+ int ret;
+
+ CIO_CRW_EVENT(4, "chsc: scm change notification\n");
+ if (sei_area->rs != 7)
+ return;
+
+ ret = scm_update_information();
+ if (ret)
+ CIO_CRW_EVENT(0, "chsc: updating change notification"
+ " failed (rc=%d).\n", ret);
+}
+
+static void chsc_process_sei_scm_avail(struct chsc_sei_nt0_area *sei_area)
+{
+ int ret;
+
+ CIO_CRW_EVENT(4, "chsc: scm available information\n");
+ if (sei_area->rs != 7)
+ return;
+
+ ret = scm_process_availability_information();
+ if (ret)
+ CIO_CRW_EVENT(0, "chsc: process availability information"
+ " failed (rc=%d).\n", ret);
+}
+
+static void chsc_process_sei_ap_cfg_chg(struct chsc_sei_nt0_area *sei_area)
+{
+ CIO_CRW_EVENT(3, "chsc: ap config changed\n");
+ if (sei_area->rs != 5)
+ return;
+
+ ap_bus_cfg_chg();
+}
+
+static void chsc_process_sei_nt2(struct chsc_sei_nt2_area *sei_area)
+{
+ switch (sei_area->cc) {
+ case 1:
+ zpci_event_error(sei_area->ccdf);
+ break;
+ case 2:
+ zpci_event_availability(sei_area->ccdf);
+ break;
+ default:
+ CIO_CRW_EVENT(2, "chsc: sei nt2 unhandled cc=%d\n",
+ sei_area->cc);
+ break;
+ }
+}
+
+static void chsc_process_sei_nt0(struct chsc_sei_nt0_area *sei_area)
+{
+ /* which kind of information was stored? */
+ switch (sei_area->cc) {
+ case 1: /* link incident*/
+ chsc_process_sei_link_incident(sei_area);
+ break;
+ case 2: /* i/o resource accessibility */
+ chsc_process_sei_res_acc(sei_area);
+ break;
+ case 3: /* ap config changed */
+ chsc_process_sei_ap_cfg_chg(sei_area);
+ break;
+ case 7: /* channel-path-availability information */
+ chsc_process_sei_chp_avail(sei_area);
+ break;
+ case 8: /* channel-path-configuration notification */
+ chsc_process_sei_chp_config(sei_area);
+ break;
+ case 12: /* scm change notification */
+ chsc_process_sei_scm_change(sei_area);
+ break;
+ case 14: /* scm available notification */
+ chsc_process_sei_scm_avail(sei_area);
+ break;
+ default: /* other stuff */
+ CIO_CRW_EVENT(2, "chsc: sei nt0 unhandled cc=%d\n",
+ sei_area->cc);
+ break;
+ }
+
+ /* Check if we might have lost some information. */
+ if (sei_area->flags & 0x40) {
+ CIO_CRW_EVENT(2, "chsc: event overflow\n");
+ css_schedule_eval_all();
+ }
+}
+
+static void chsc_process_event_information(struct chsc_sei *sei, u64 ntsm)
+{
+ static int ntsm_unsupported;
+
+ while (true) {
+ memset(sei, 0, sizeof(*sei));
+ sei->request.length = 0x0010;
+ sei->request.code = 0x000e;
+ if (!ntsm_unsupported)
+ sei->ntsm = ntsm;
+
+ if (chsc(sei))
+ break;
+
+ if (sei->response.code != 0x0001) {
+ CIO_CRW_EVENT(2, "chsc: sei failed (rc=%04x, ntsm=%llx)\n",
+ sei->response.code, sei->ntsm);
+
+ if (sei->response.code == 3 && sei->ntsm) {
+ /* Fallback for old firmware. */
+ ntsm_unsupported = 1;
+ continue;
+ }
+ break;
+ }
+
+ CIO_CRW_EVENT(2, "chsc: sei successful (nt=%d)\n", sei->nt);
+ switch (sei->nt) {
+ case 0:
+ chsc_process_sei_nt0(&sei->u.nt0_area);
+ break;
+ case 2:
+ chsc_process_sei_nt2(&sei->u.nt2_area);
+ break;
+ default:
+ CIO_CRW_EVENT(2, "chsc: unhandled nt: %d\n", sei->nt);
+ break;
+ }
+
+ if (!(sei->u.nt0_area.flags & 0x80))
+ break;
+ }
+}
+
+/*
+ * Handle channel subsystem related CRWs.
+ * Use store event information to find out what's going on.
+ *
+ * Note: Access to sei_page is serialized through machine check handler
+ * thread, so no need for locking.
+ */
+static void chsc_process_crw(struct crw *crw0, struct crw *crw1, int overflow)
+{
+ struct chsc_sei *sei = sei_page;
+
+ if (overflow) {
+ css_schedule_eval_all();
+ return;
+ }
+ CIO_CRW_EVENT(2, "CRW reports slct=%d, oflw=%d, "
+ "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n",
+ crw0->slct, crw0->oflw, crw0->chn, crw0->rsc, crw0->anc,
+ crw0->erc, crw0->rsid);
+
+ CIO_TRACE_EVENT(2, "prcss");
+ chsc_process_event_information(sei, CHSC_SEI_NT0 | CHSC_SEI_NT2);
+}
+
+void chsc_chp_online(struct chp_id chpid)
+{
+ struct channel_path *chp = chpid_to_chp(chpid);
+ struct chp_link link;
+ char dbf_txt[15];
+
+ sprintf(dbf_txt, "cadd%x.%02x", chpid.cssid, chpid.id);
+ CIO_TRACE_EVENT(2, dbf_txt);
+
+ if (chp_get_status(chpid) != 0) {
+ memset(&link, 0, sizeof(struct chp_link));
+ link.chpid = chpid;
+ /* Wait until previous actions have settled. */
+ css_wait_for_slow_path();
+
+ mutex_lock(&chp->lock);
+ chp_update_desc(chp);
+ mutex_unlock(&chp->lock);
+
+ for_each_subchannel_staged(__s390_process_res_acc, NULL,
+ &link);
+ css_schedule_reprobe();
+ }
+}
+
+static void __s390_subchannel_vary_chpid(struct subchannel *sch,
+ struct chp_id chpid, int on)
+{
+ unsigned long flags;
+ struct chp_link link;
+
+ memset(&link, 0, sizeof(struct chp_link));
+ link.chpid = chpid;
+ spin_lock_irqsave(sch->lock, flags);
+ if (sch->driver && sch->driver->chp_event)
+ sch->driver->chp_event(sch, &link,
+ on ? CHP_VARY_ON : CHP_VARY_OFF);
+ spin_unlock_irqrestore(sch->lock, flags);
+}
+
+static int s390_subchannel_vary_chpid_off(struct subchannel *sch, void *data)
+{
+ struct chp_id *chpid = data;
+
+ __s390_subchannel_vary_chpid(sch, *chpid, 0);
+ return 0;
+}
+
+static int s390_subchannel_vary_chpid_on(struct subchannel *sch, void *data)
+{
+ struct chp_id *chpid = data;
+
+ __s390_subchannel_vary_chpid(sch, *chpid, 1);
+ return 0;
+}
+
+/**
+ * chsc_chp_vary - propagate channel-path vary operation to subchannels
+ * @chpid: channl-path ID
+ * @on: non-zero for vary online, zero for vary offline
+ */
+int chsc_chp_vary(struct chp_id chpid, int on)
+{
+ struct channel_path *chp = chpid_to_chp(chpid);
+
+ /*
+ * Redo PathVerification on the devices the chpid connects to
+ */
+ if (on) {
+ /* Try to update the channel path description. */
+ chp_update_desc(chp);
+ for_each_subchannel_staged(s390_subchannel_vary_chpid_on,
+ NULL, &chpid);
+ css_schedule_reprobe();
+ } else
+ for_each_subchannel_staged(s390_subchannel_vary_chpid_off,
+ NULL, &chpid);
+
+ return 0;
+}
+
+static void
+chsc_remove_cmg_attr(struct channel_subsystem *css)
+{
+ int i;
+
+ for (i = 0; i <= __MAX_CHPID; i++) {
+ if (!css->chps[i])
+ continue;
+ chp_remove_cmg_attr(css->chps[i]);
+ }
+}
+
+static int
+chsc_add_cmg_attr(struct channel_subsystem *css)
+{
+ int i, ret;
+
+ ret = 0;
+ for (i = 0; i <= __MAX_CHPID; i++) {
+ if (!css->chps[i])
+ continue;
+ ret = chp_add_cmg_attr(css->chps[i]);
+ if (ret)
+ goto cleanup;
+ }
+ return ret;
+cleanup:
+ for (--i; i >= 0; i--) {
+ if (!css->chps[i])
+ continue;
+ chp_remove_cmg_attr(css->chps[i]);
+ }
+ return ret;
+}
+
+int __chsc_do_secm(struct channel_subsystem *css, int enable)
+{
+ struct {
+ struct chsc_header request;
+ u32 operation_code : 2;
+ u32 : 30;
+ u32 key : 4;
+ u32 : 28;
+ u32 zeroes1;
+ u32 cub_addr1;
+ u32 zeroes2;
+ u32 cub_addr2;
+ u32 reserved[13];
+ struct chsc_header response;
+ u32 status : 8;
+ u32 : 4;
+ u32 fmt : 4;
+ u32 : 16;
+ } *secm_area;
+ unsigned long flags;
+ int ret, ccode;
+
+ spin_lock_irqsave(&chsc_page_lock, flags);
+ memset(chsc_page, 0, PAGE_SIZE);
+ secm_area = chsc_page;
+ secm_area->request.length = 0x0050;
+ secm_area->request.code = 0x0016;
+
+ secm_area->key = PAGE_DEFAULT_KEY >> 4;
+ secm_area->cub_addr1 = (u64)(unsigned long)css->cub_addr1;
+ secm_area->cub_addr2 = (u64)(unsigned long)css->cub_addr2;
+
+ secm_area->operation_code = enable ? 0 : 1;
+
+ ccode = chsc(secm_area);
+ if (ccode > 0) {
+ ret = (ccode == 3) ? -ENODEV : -EBUSY;
+ goto out;
+ }
+
+ switch (secm_area->response.code) {
+ case 0x0102:
+ case 0x0103:
+ ret = -EINVAL;
+ break;
+ default:
+ ret = chsc_error_from_response(secm_area->response.code);
+ }
+ if (ret != 0)
+ CIO_CRW_EVENT(2, "chsc: secm failed (rc=%04x)\n",
+ secm_area->response.code);
+out:
+ spin_unlock_irqrestore(&chsc_page_lock, flags);
+ return ret;
+}
+
+int
+chsc_secm(struct channel_subsystem *css, int enable)
+{
+ int ret;
+
+ if (enable && !css->cm_enabled) {
+ css->cub_addr1 = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
+ css->cub_addr2 = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
+ if (!css->cub_addr1 || !css->cub_addr2) {
+ free_page((unsigned long)css->cub_addr1);
+ free_page((unsigned long)css->cub_addr2);
+ return -ENOMEM;
+ }
+ }
+ ret = __chsc_do_secm(css, enable);
+ if (!ret) {
+ css->cm_enabled = enable;
+ if (css->cm_enabled) {
+ ret = chsc_add_cmg_attr(css);
+ if (ret) {
+ __chsc_do_secm(css, 0);
+ css->cm_enabled = 0;
+ }
+ } else
+ chsc_remove_cmg_attr(css);
+ }
+ if (!css->cm_enabled) {
+ free_page((unsigned long)css->cub_addr1);
+ free_page((unsigned long)css->cub_addr2);
+ }
+ return ret;
+}
+
+int chsc_determine_channel_path_desc(struct chp_id chpid, int fmt, int rfmt,
+ int c, int m, void *page)
+{
+ struct chsc_scpd *scpd_area;
+ int ccode, ret;
+
+ if ((rfmt == 1 || rfmt == 0) && c == 1 &&
+ !css_general_characteristics.fcs)
+ return -EINVAL;
+ if ((rfmt == 2) && !css_general_characteristics.cib)
+ return -EINVAL;
+ if ((rfmt == 3) && !css_general_characteristics.util_str)
+ return -EINVAL;
+
+ memset(page, 0, PAGE_SIZE);
+ scpd_area = page;
+ scpd_area->request.length = 0x0010;
+ scpd_area->request.code = 0x0002;
+ scpd_area->cssid = chpid.cssid;
+ scpd_area->first_chpid = chpid.id;
+ scpd_area->last_chpid = chpid.id;
+ scpd_area->m = m;
+ scpd_area->c = c;
+ scpd_area->fmt = fmt;
+ scpd_area->rfmt = rfmt;
+
+ ccode = chsc(scpd_area);
+ if (ccode > 0)
+ return (ccode == 3) ? -ENODEV : -EBUSY;
+
+ ret = chsc_error_from_response(scpd_area->response.code);
+ if (ret)
+ CIO_CRW_EVENT(2, "chsc: scpd failed (rc=%04x)\n",
+ scpd_area->response.code);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(chsc_determine_channel_path_desc);
+
+#define chsc_det_chp_desc(FMT, c) \
+int chsc_determine_fmt##FMT##_channel_path_desc( \
+ struct chp_id chpid, struct channel_path_desc_fmt##FMT *desc) \
+{ \
+ struct chsc_scpd *scpd_area; \
+ unsigned long flags; \
+ int ret; \
+ \
+ spin_lock_irqsave(&chsc_page_lock, flags); \
+ scpd_area = chsc_page; \
+ ret = chsc_determine_channel_path_desc(chpid, 0, FMT, c, 0, \
+ scpd_area); \
+ if (ret) \
+ goto out; \
+ \
+ memcpy(desc, scpd_area->data, sizeof(*desc)); \
+out: \
+ spin_unlock_irqrestore(&chsc_page_lock, flags); \
+ return ret; \
+}
+
+chsc_det_chp_desc(0, 0)
+chsc_det_chp_desc(1, 1)
+chsc_det_chp_desc(3, 0)
+
+static void
+chsc_initialize_cmg_chars(struct channel_path *chp, u8 cmcv,
+ struct cmg_chars *chars)
+{
+ int i, mask;
+
+ for (i = 0; i < NR_MEASUREMENT_CHARS; i++) {
+ mask = 0x80 >> (i + 3);
+ if (cmcv & mask)
+ chp->cmg_chars.values[i] = chars->values[i];
+ else
+ chp->cmg_chars.values[i] = 0;
+ }
+}
+
+int chsc_get_channel_measurement_chars(struct channel_path *chp)
+{
+ unsigned long flags;
+ int ccode, ret;
+
+ struct {
+ struct chsc_header request;
+ u32 : 24;
+ u32 first_chpid : 8;
+ u32 : 24;
+ u32 last_chpid : 8;
+ u32 zeroes1;
+ struct chsc_header response;
+ u32 zeroes2;
+ u32 not_valid : 1;
+ u32 shared : 1;
+ u32 : 22;
+ u32 chpid : 8;
+ u32 cmcv : 5;
+ u32 : 11;
+ u32 cmgq : 8;
+ u32 cmg : 8;
+ u32 zeroes3;
+ u32 data[NR_MEASUREMENT_CHARS];
+ } *scmc_area;
+
+ chp->shared = -1;
+ chp->cmg = -1;
+
+ if (!css_chsc_characteristics.scmc || !css_chsc_characteristics.secm)
+ return -EINVAL;
+
+ spin_lock_irqsave(&chsc_page_lock, flags);
+ memset(chsc_page, 0, PAGE_SIZE);
+ scmc_area = chsc_page;
+ scmc_area->request.length = 0x0010;
+ scmc_area->request.code = 0x0022;
+ scmc_area->first_chpid = chp->chpid.id;
+ scmc_area->last_chpid = chp->chpid.id;
+
+ ccode = chsc(scmc_area);
+ if (ccode > 0) {
+ ret = (ccode == 3) ? -ENODEV : -EBUSY;
+ goto out;
+ }
+
+ ret = chsc_error_from_response(scmc_area->response.code);
+ if (ret) {
+ CIO_CRW_EVENT(2, "chsc: scmc failed (rc=%04x)\n",
+ scmc_area->response.code);
+ goto out;
+ }
+ if (scmc_area->not_valid)
+ goto out;
+
+ chp->cmg = scmc_area->cmg;
+ chp->shared = scmc_area->shared;
+ if (chp->cmg != 2 && chp->cmg != 3) {
+ /* No cmg-dependent data. */
+ goto out;
+ }
+ chsc_initialize_cmg_chars(chp, scmc_area->cmcv,
+ (struct cmg_chars *) &scmc_area->data);
+out:
+ spin_unlock_irqrestore(&chsc_page_lock, flags);
+ return ret;
+}
+
+int __init chsc_init(void)
+{
+ int ret;
+
+ sei_page = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
+ chsc_page = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
+ if (!sei_page || !chsc_page) {
+ ret = -ENOMEM;
+ goto out_err;
+ }
+ ret = crw_register_handler(CRW_RSC_CSS, chsc_process_crw);
+ if (ret)
+ goto out_err;
+ return ret;
+out_err:
+ free_page((unsigned long)chsc_page);
+ free_page((unsigned long)sei_page);
+ return ret;
+}
+
+void __init chsc_init_cleanup(void)
+{
+ crw_unregister_handler(CRW_RSC_CSS);
+ free_page((unsigned long)chsc_page);
+ free_page((unsigned long)sei_page);
+}
+
+int __chsc_enable_facility(struct chsc_sda_area *sda_area, int operation_code)
+{
+ int ret;
+
+ sda_area->request.length = 0x0400;
+ sda_area->request.code = 0x0031;
+ sda_area->operation_code = operation_code;
+
+ ret = chsc(sda_area);
+ if (ret > 0) {
+ ret = (ret == 3) ? -ENODEV : -EBUSY;
+ goto out;
+ }
+
+ switch (sda_area->response.code) {
+ case 0x0101:
+ ret = -EOPNOTSUPP;
+ break;
+ default:
+ ret = chsc_error_from_response(sda_area->response.code);
+ }
+out:
+ return ret;
+}
+
+int chsc_enable_facility(int operation_code)
+{
+ struct chsc_sda_area *sda_area;
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&chsc_page_lock, flags);
+ memset(chsc_page, 0, PAGE_SIZE);
+ sda_area = chsc_page;
+
+ ret = __chsc_enable_facility(sda_area, operation_code);
+ if (ret != 0)
+ CIO_CRW_EVENT(2, "chsc: sda (oc=%x) failed (rc=%04x)\n",
+ operation_code, sda_area->response.code);
+
+ spin_unlock_irqrestore(&chsc_page_lock, flags);
+ return ret;
+}
+
+int __init chsc_get_cssid_iid(int idx, u8 *cssid, u8 *iid)
+{
+ struct {
+ struct chsc_header request;
+ u8 atype;
+ u32 : 24;
+ u32 reserved1[6];
+ struct chsc_header response;
+ u32 reserved2[3];
+ struct {
+ u8 cssid;
+ u8 iid;
+ u32 : 16;
+ } list[0];
+ } *sdcal_area;
+ int ret;
+
+ spin_lock_irq(&chsc_page_lock);
+ memset(chsc_page, 0, PAGE_SIZE);
+ sdcal_area = chsc_page;
+ sdcal_area->request.length = 0x0020;
+ sdcal_area->request.code = 0x0034;
+ sdcal_area->atype = 4;
+
+ ret = chsc(sdcal_area);
+ if (ret) {
+ ret = (ret == 3) ? -ENODEV : -EBUSY;
+ goto exit;
+ }
+
+ ret = chsc_error_from_response(sdcal_area->response.code);
+ if (ret) {
+ CIO_CRW_EVENT(2, "chsc: sdcal failed (rc=%04x)\n",
+ sdcal_area->response.code);
+ goto exit;
+ }
+
+ if ((addr_t) &sdcal_area->list[idx] <
+ (addr_t) &sdcal_area->response + sdcal_area->response.length) {
+ *cssid = sdcal_area->list[idx].cssid;
+ *iid = sdcal_area->list[idx].iid;
+ }
+ else
+ ret = -ENODEV;
+exit:
+ spin_unlock_irq(&chsc_page_lock);
+ return ret;
+}
+
+struct css_general_char css_general_characteristics;
+struct css_chsc_char css_chsc_characteristics;
+
+int __init
+chsc_determine_css_characteristics(void)
+{
+ unsigned long flags;
+ int result;
+ struct {
+ struct chsc_header request;
+ u32 reserved1;
+ u32 reserved2;
+ u32 reserved3;
+ struct chsc_header response;
+ u32 reserved4;
+ u32 general_char[510];
+ u32 chsc_char[508];
+ } *scsc_area;
+
+ spin_lock_irqsave(&chsc_page_lock, flags);
+ memset(chsc_page, 0, PAGE_SIZE);
+ scsc_area = chsc_page;
+ scsc_area->request.length = 0x0010;
+ scsc_area->request.code = 0x0010;
+
+ result = chsc(scsc_area);
+ if (result) {
+ result = (result == 3) ? -ENODEV : -EBUSY;
+ goto exit;
+ }
+
+ result = chsc_error_from_response(scsc_area->response.code);
+ if (result == 0) {
+ memcpy(&css_general_characteristics, scsc_area->general_char,
+ sizeof(css_general_characteristics));
+ memcpy(&css_chsc_characteristics, scsc_area->chsc_char,
+ sizeof(css_chsc_characteristics));
+ } else
+ CIO_CRW_EVENT(2, "chsc: scsc failed (rc=%04x)\n",
+ scsc_area->response.code);
+exit:
+ spin_unlock_irqrestore(&chsc_page_lock, flags);
+ return result;
+}
+
+EXPORT_SYMBOL_GPL(css_general_characteristics);
+EXPORT_SYMBOL_GPL(css_chsc_characteristics);
+
+int chsc_sstpc(void *page, unsigned int op, u16 ctrl, u64 *clock_delta)
+{
+ struct {
+ struct chsc_header request;
+ unsigned int rsvd0;
+ unsigned int op : 8;
+ unsigned int rsvd1 : 8;
+ unsigned int ctrl : 16;
+ unsigned int rsvd2[5];
+ struct chsc_header response;
+ unsigned int rsvd3[3];
+ u64 clock_delta;
+ unsigned int rsvd4[2];
+ } *rr;
+ int rc;
+
+ memset(page, 0, PAGE_SIZE);
+ rr = page;
+ rr->request.length = 0x0020;
+ rr->request.code = 0x0033;
+ rr->op = op;
+ rr->ctrl = ctrl;
+ rc = chsc(rr);
+ if (rc)
+ return -EIO;
+ rc = (rr->response.code == 0x0001) ? 0 : -EIO;
+ if (clock_delta)
+ *clock_delta = rr->clock_delta;
+ return rc;
+}
+
+int chsc_sstpi(void *page, void *result, size_t size)
+{
+ struct {
+ struct chsc_header request;
+ unsigned int rsvd0[3];
+ struct chsc_header response;
+ char data[];
+ } *rr;
+ int rc;
+
+ memset(page, 0, PAGE_SIZE);
+ rr = page;
+ rr->request.length = 0x0010;
+ rr->request.code = 0x0038;
+ rc = chsc(rr);
+ if (rc)
+ return -EIO;
+ memcpy(result, &rr->data, size);
+ return (rr->response.code == 0x0001) ? 0 : -EIO;
+}
+
+int chsc_stzi(void *page, void *result, size_t size)
+{
+ struct {
+ struct chsc_header request;
+ unsigned int rsvd0[3];
+ struct chsc_header response;
+ char data[];
+ } *rr;
+ int rc;
+
+ memset(page, 0, PAGE_SIZE);
+ rr = page;
+ rr->request.length = 0x0010;
+ rr->request.code = 0x003e;
+ rc = chsc(rr);
+ if (rc)
+ return -EIO;
+ memcpy(result, &rr->data, size);
+ return (rr->response.code == 0x0001) ? 0 : -EIO;
+}
+
+int chsc_siosl(struct subchannel_id schid)
+{
+ struct {
+ struct chsc_header request;
+ u32 word1;
+ struct subchannel_id sid;
+ u32 word3;
+ struct chsc_header response;
+ u32 word[11];
+ } *siosl_area;
+ unsigned long flags;
+ int ccode;
+ int rc;
+
+ spin_lock_irqsave(&chsc_page_lock, flags);
+ memset(chsc_page, 0, PAGE_SIZE);
+ siosl_area = chsc_page;
+ siosl_area->request.length = 0x0010;
+ siosl_area->request.code = 0x0046;
+ siosl_area->word1 = 0x80000000;
+ siosl_area->sid = schid;
+
+ ccode = chsc(siosl_area);
+ if (ccode > 0) {
+ if (ccode == 3)
+ rc = -ENODEV;
+ else
+ rc = -EBUSY;
+ CIO_MSG_EVENT(2, "chsc: chsc failed for 0.%x.%04x (ccode=%d)\n",
+ schid.ssid, schid.sch_no, ccode);
+ goto out;
+ }
+ rc = chsc_error_from_response(siosl_area->response.code);
+ if (rc)
+ CIO_MSG_EVENT(2, "chsc: siosl failed for 0.%x.%04x (rc=%04x)\n",
+ schid.ssid, schid.sch_no,
+ siosl_area->response.code);
+ else
+ CIO_MSG_EVENT(4, "chsc: siosl succeeded for 0.%x.%04x\n",
+ schid.ssid, schid.sch_no);
+out:
+ spin_unlock_irqrestore(&chsc_page_lock, flags);
+ return rc;
+}
+EXPORT_SYMBOL_GPL(chsc_siosl);
+
+/**
+ * chsc_scm_info() - store SCM information (SSI)
+ * @scm_area: request and response block for SSI
+ * @token: continuation token
+ *
+ * Returns 0 on success.
+ */
+int chsc_scm_info(struct chsc_scm_info *scm_area, u64 token)
+{
+ int ccode, ret;
+
+ memset(scm_area, 0, sizeof(*scm_area));
+ scm_area->request.length = 0x0020;
+ scm_area->request.code = 0x004C;
+ scm_area->reqtok = token;
+
+ ccode = chsc(scm_area);
+ if (ccode > 0) {
+ ret = (ccode == 3) ? -ENODEV : -EBUSY;
+ goto out;
+ }
+ ret = chsc_error_from_response(scm_area->response.code);
+ if (ret != 0)
+ CIO_MSG_EVENT(2, "chsc: scm info failed (rc=%04x)\n",
+ scm_area->response.code);
+out:
+ return ret;
+}
+EXPORT_SYMBOL_GPL(chsc_scm_info);
+
+/**
+ * chsc_pnso() - Perform Network-Subchannel Operation
+ * @schid: id of the subchannel on which PNSO is performed
+ * @pnso_area: request and response block for the operation
+ * @oc: Operation Code
+ * @resume_token: resume token for multiblock response
+ * @cnc: Boolean change-notification control
+ *
+ * pnso_area must be allocated by the caller with get_zeroed_page(GFP_KERNEL)
+ *
+ * Returns 0 on success.
+ */
+int chsc_pnso(struct subchannel_id schid, struct chsc_pnso_area *pnso_area,
+ u8 oc, struct chsc_pnso_resume_token resume_token, int cnc)
+{
+ memset(pnso_area, 0, sizeof(*pnso_area));
+ pnso_area->request.length = 0x0030;
+ pnso_area->request.code = 0x003d; /* network-subchannel operation */
+ pnso_area->m = schid.m;
+ pnso_area->ssid = schid.ssid;
+ pnso_area->sch = schid.sch_no;
+ pnso_area->cssid = schid.cssid;
+ pnso_area->oc = oc;
+ pnso_area->resume_token = resume_token;
+ pnso_area->n = (cnc != 0);
+ if (chsc(pnso_area))
+ return -EIO;
+ return chsc_error_from_response(pnso_area->response.code);
+}
+
+int chsc_sgib(u32 origin)
+{
+ struct {
+ struct chsc_header request;
+ u16 op;
+ u8 reserved01[2];
+ u8 reserved02:4;
+ u8 fmt:4;
+ u8 reserved03[7];
+ /* operation data area begin */
+ u8 reserved04[4];
+ u32 gib_origin;
+ u8 reserved05[10];
+ u8 aix;
+ u8 reserved06[4029];
+ struct chsc_header response;
+ u8 reserved07[4];
+ } *sgib_area;
+ int ret;
+
+ spin_lock_irq(&chsc_page_lock);
+ memset(chsc_page, 0, PAGE_SIZE);
+ sgib_area = chsc_page;
+ sgib_area->request.length = 0x0fe0;
+ sgib_area->request.code = 0x0021;
+ sgib_area->op = 0x1;
+ sgib_area->gib_origin = origin;
+
+ ret = chsc(sgib_area);
+ if (ret == 0)
+ ret = chsc_error_from_response(sgib_area->response.code);
+ spin_unlock_irq(&chsc_page_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(chsc_sgib);
diff --git a/drivers/s390/cio/chsc.h b/drivers/s390/cio/chsc.h
new file mode 100644
index 000000000..c2b83b68b
--- /dev/null
+++ b/drivers/s390/cio/chsc.h
@@ -0,0 +1,222 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef S390_CHSC_H
+#define S390_CHSC_H
+
+#include <linux/types.h>
+#include <linux/device.h>
+#include <asm/css_chars.h>
+#include <asm/chpid.h>
+#include <asm/chsc.h>
+#include <asm/schid.h>
+#include <asm/qdio.h>
+
+#define CHSC_SDA_OC_MSS 0x2
+
+#define NR_MEASUREMENT_CHARS 5
+struct cmg_chars {
+ u32 values[NR_MEASUREMENT_CHARS];
+};
+
+#define NR_MEASUREMENT_ENTRIES 8
+struct cmg_entry {
+ u32 values[NR_MEASUREMENT_ENTRIES];
+};
+
+struct channel_path_desc_fmt1 {
+ u8 flags;
+ u8 lsn;
+ u8 desc;
+ u8 chpid;
+ u32:24;
+ u8 chpp;
+ u32 unused[2];
+ u16 chid;
+ u32:16;
+ u16 mdc;
+ u16:13;
+ u8 r:1;
+ u8 s:1;
+ u8 f:1;
+ u32 zeros[2];
+};
+
+struct channel_path_desc_fmt3 {
+ struct channel_path_desc_fmt1 fmt1_desc;
+ u8 util_str[64];
+};
+
+struct channel_path;
+
+struct css_chsc_char {
+ u64 res;
+ u64 : 20;
+ u32 secm : 1; /* bit 84 */
+ u32 : 1;
+ u32 scmc : 1; /* bit 86 */
+ u32 : 20;
+ u32 scssc : 1; /* bit 107 */
+ u32 scsscf : 1; /* bit 108 */
+ u32:7;
+ u32 pnso:1; /* bit 116 */
+ u32:11;
+} __packed;
+
+extern struct css_chsc_char css_chsc_characteristics;
+
+struct chsc_ssd_info {
+ u8 path_mask;
+ u8 fla_valid_mask;
+ struct chp_id chpid[8];
+ u16 fla[8];
+};
+
+struct chsc_ssqd_area {
+ struct chsc_header request;
+ u16:10;
+ u8 ssid:2;
+ u8 fmt:4;
+ u16 first_sch;
+ u16:16;
+ u16 last_sch;
+ u32:32;
+ struct chsc_header response;
+ u32:32;
+ struct qdio_ssqd_desc qdio_ssqd;
+} __packed __aligned(PAGE_SIZE);
+
+struct chsc_scssc_area {
+ struct chsc_header request;
+ u16 operation_code;
+ u16:16;
+ u32:32;
+ u32:32;
+ u64 summary_indicator_addr;
+ u64 subchannel_indicator_addr;
+ u32 ks:4;
+ u32 kc:4;
+ u32:21;
+ u32 isc:3;
+ u32 word_with_d_bit;
+ u32:32;
+ struct subchannel_id schid;
+ u32 reserved[1004];
+ struct chsc_header response;
+ u32:32;
+} __packed __aligned(PAGE_SIZE);
+
+struct chsc_scpd {
+ struct chsc_header request;
+ u32:2;
+ u32 m:1;
+ u32 c:1;
+ u32 fmt:4;
+ u32 cssid:8;
+ u32:4;
+ u32 rfmt:4;
+ u32 first_chpid:8;
+ u32:24;
+ u32 last_chpid:8;
+ u32 zeroes1;
+ struct chsc_header response;
+ u32:32;
+ u8 data[0];
+} __packed __aligned(PAGE_SIZE);
+
+struct chsc_sda_area {
+ struct chsc_header request;
+ u8 :4;
+ u8 format:4;
+ u8 :8;
+ u16 operation_code;
+ u32 :32;
+ u32 :32;
+ u32 operation_data_area[252];
+ struct chsc_header response;
+ u32 :4;
+ u32 format2:4;
+ u32 :24;
+} __packed __aligned(PAGE_SIZE);
+
+extern int chsc_get_ssd_info(struct subchannel_id schid,
+ struct chsc_ssd_info *ssd);
+extern int chsc_determine_css_characteristics(void);
+extern int chsc_init(void);
+extern void chsc_init_cleanup(void);
+
+int __chsc_enable_facility(struct chsc_sda_area *sda_area, int operation_code);
+extern int chsc_enable_facility(int);
+struct channel_subsystem;
+extern int chsc_secm(struct channel_subsystem *, int);
+int __chsc_do_secm(struct channel_subsystem *css, int enable);
+
+int chsc_chp_vary(struct chp_id chpid, int on);
+int chsc_determine_channel_path_desc(struct chp_id chpid, int fmt, int rfmt,
+ int c, int m, void *page);
+int chsc_determine_fmt0_channel_path_desc(struct chp_id chpid,
+ struct channel_path_desc_fmt0 *desc);
+int chsc_determine_fmt1_channel_path_desc(struct chp_id chpid,
+ struct channel_path_desc_fmt1 *desc);
+int chsc_determine_fmt3_channel_path_desc(struct chp_id chpid,
+ struct channel_path_desc_fmt3 *desc);
+void chsc_chp_online(struct chp_id chpid);
+void chsc_chp_offline(struct chp_id chpid);
+int chsc_get_channel_measurement_chars(struct channel_path *chp);
+int chsc_ssqd(struct subchannel_id schid, struct chsc_ssqd_area *ssqd);
+int chsc_sadc(struct subchannel_id schid, struct chsc_scssc_area *scssc,
+ u64 summary_indicator_addr, u64 subchannel_indicator_addr,
+ u8 isc);
+int chsc_sgib(u32 origin);
+int chsc_error_from_response(int response);
+
+int chsc_siosl(struct subchannel_id schid);
+
+/* Functions and definitions to query storage-class memory. */
+struct sale {
+ u64 sa;
+ u32 p:4;
+ u32 op_state:4;
+ u32 data_state:4;
+ u32 rank:4;
+ u32 r:1;
+ u32:7;
+ u32 rid:8;
+ u32:32;
+} __packed;
+
+struct chsc_scm_info {
+ struct chsc_header request;
+ u32:32;
+ u64 reqtok;
+ u32 reserved1[4];
+ struct chsc_header response;
+ u64:56;
+ u8 rq;
+ u32 mbc;
+ u64 msa;
+ u16 is;
+ u16 mmc;
+ u32 mci;
+ u64 nr_scm_ini;
+ u64 nr_scm_unini;
+ u32 reserved2[10];
+ u64 restok;
+ struct sale scmal[248];
+} __packed __aligned(PAGE_SIZE);
+
+int chsc_scm_info(struct chsc_scm_info *scm_area, u64 token);
+
+int chsc_pnso(struct subchannel_id schid, struct chsc_pnso_area *pnso_area,
+ u8 oc, struct chsc_pnso_resume_token resume_token, int cnc);
+
+int __init chsc_get_cssid_iid(int idx, u8 *cssid, u8 *iid);
+
+#ifdef CONFIG_SCM_BUS
+int scm_update_information(void);
+int scm_process_availability_information(void);
+#else /* CONFIG_SCM_BUS */
+static inline int scm_update_information(void) { return 0; }
+static inline int scm_process_availability_information(void) { return 0; }
+#endif /* CONFIG_SCM_BUS */
+
+
+#endif
diff --git a/drivers/s390/cio/chsc_sch.c b/drivers/s390/cio/chsc_sch.c
new file mode 100644
index 000000000..8f080d3fd
--- /dev/null
+++ b/drivers/s390/cio/chsc_sch.c
@@ -0,0 +1,1012 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Driver for s390 chsc subchannels
+ *
+ * Copyright IBM Corp. 2008, 2011
+ *
+ * Author(s): Cornelia Huck <cornelia.huck@de.ibm.com>
+ *
+ */
+
+#include <linux/slab.h>
+#include <linux/compat.h>
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/uaccess.h>
+#include <linux/miscdevice.h>
+#include <linux/kernel_stat.h>
+
+#include <asm/cio.h>
+#include <asm/chsc.h>
+#include <asm/isc.h>
+
+#include "cio.h"
+#include "cio_debug.h"
+#include "css.h"
+#include "chsc_sch.h"
+#include "ioasm.h"
+
+static debug_info_t *chsc_debug_msg_id;
+static debug_info_t *chsc_debug_log_id;
+
+static struct chsc_request *on_close_request;
+static struct chsc_async_area *on_close_chsc_area;
+static DEFINE_MUTEX(on_close_mutex);
+
+#define CHSC_MSG(imp, args...) do { \
+ debug_sprintf_event(chsc_debug_msg_id, imp , ##args); \
+ } while (0)
+
+#define CHSC_LOG(imp, txt) do { \
+ debug_text_event(chsc_debug_log_id, imp , txt); \
+ } while (0)
+
+static void CHSC_LOG_HEX(int level, void *data, int length)
+{
+ debug_event(chsc_debug_log_id, level, data, length);
+}
+
+MODULE_AUTHOR("IBM Corporation");
+MODULE_DESCRIPTION("driver for s390 chsc subchannels");
+MODULE_LICENSE("GPL");
+
+static void chsc_subchannel_irq(struct subchannel *sch)
+{
+ struct chsc_private *private = dev_get_drvdata(&sch->dev);
+ struct chsc_request *request = private->request;
+ struct irb *irb = this_cpu_ptr(&cio_irb);
+
+ CHSC_LOG(4, "irb");
+ CHSC_LOG_HEX(4, irb, sizeof(*irb));
+ inc_irq_stat(IRQIO_CSC);
+
+ /* Copy irb to provided request and set done. */
+ if (!request) {
+ CHSC_MSG(0, "Interrupt on sch 0.%x.%04x with no request\n",
+ sch->schid.ssid, sch->schid.sch_no);
+ return;
+ }
+ private->request = NULL;
+ memcpy(&request->irb, irb, sizeof(*irb));
+ cio_update_schib(sch);
+ complete(&request->completion);
+ put_device(&sch->dev);
+}
+
+static int chsc_subchannel_probe(struct subchannel *sch)
+{
+ struct chsc_private *private;
+ int ret;
+
+ CHSC_MSG(6, "Detected chsc subchannel 0.%x.%04x\n",
+ sch->schid.ssid, sch->schid.sch_no);
+ sch->isc = CHSC_SCH_ISC;
+ private = kzalloc(sizeof(*private), GFP_KERNEL);
+ if (!private)
+ return -ENOMEM;
+ dev_set_drvdata(&sch->dev, private);
+ ret = cio_enable_subchannel(sch, (u32)(unsigned long)sch);
+ if (ret) {
+ CHSC_MSG(0, "Failed to enable 0.%x.%04x: %d\n",
+ sch->schid.ssid, sch->schid.sch_no, ret);
+ dev_set_drvdata(&sch->dev, NULL);
+ kfree(private);
+ } else {
+ if (dev_get_uevent_suppress(&sch->dev)) {
+ dev_set_uevent_suppress(&sch->dev, 0);
+ kobject_uevent(&sch->dev.kobj, KOBJ_ADD);
+ }
+ }
+ return ret;
+}
+
+static int chsc_subchannel_remove(struct subchannel *sch)
+{
+ struct chsc_private *private;
+
+ cio_disable_subchannel(sch);
+ private = dev_get_drvdata(&sch->dev);
+ dev_set_drvdata(&sch->dev, NULL);
+ if (private->request) {
+ complete(&private->request->completion);
+ put_device(&sch->dev);
+ }
+ kfree(private);
+ return 0;
+}
+
+static void chsc_subchannel_shutdown(struct subchannel *sch)
+{
+ cio_disable_subchannel(sch);
+}
+
+static int chsc_subchannel_prepare(struct subchannel *sch)
+{
+ int cc;
+ struct schib schib;
+ /*
+ * Don't allow suspend while the subchannel is not idle
+ * since we don't have a way to clear the subchannel and
+ * cannot disable it with a request running.
+ */
+ cc = stsch(sch->schid, &schib);
+ if (!cc && scsw_stctl(&schib.scsw))
+ return -EAGAIN;
+ return 0;
+}
+
+static int chsc_subchannel_freeze(struct subchannel *sch)
+{
+ return cio_disable_subchannel(sch);
+}
+
+static int chsc_subchannel_restore(struct subchannel *sch)
+{
+ return cio_enable_subchannel(sch, (u32)(unsigned long)sch);
+}
+
+static struct css_device_id chsc_subchannel_ids[] = {
+ { .match_flags = 0x1, .type =SUBCHANNEL_TYPE_CHSC, },
+ { /* end of list */ },
+};
+MODULE_DEVICE_TABLE(css, chsc_subchannel_ids);
+
+static struct css_driver chsc_subchannel_driver = {
+ .drv = {
+ .owner = THIS_MODULE,
+ .name = "chsc_subchannel",
+ },
+ .subchannel_type = chsc_subchannel_ids,
+ .irq = chsc_subchannel_irq,
+ .probe = chsc_subchannel_probe,
+ .remove = chsc_subchannel_remove,
+ .shutdown = chsc_subchannel_shutdown,
+ .prepare = chsc_subchannel_prepare,
+ .freeze = chsc_subchannel_freeze,
+ .thaw = chsc_subchannel_restore,
+ .restore = chsc_subchannel_restore,
+};
+
+static int __init chsc_init_dbfs(void)
+{
+ chsc_debug_msg_id = debug_register("chsc_msg", 8, 1, 4 * sizeof(long));
+ if (!chsc_debug_msg_id)
+ goto out;
+ debug_register_view(chsc_debug_msg_id, &debug_sprintf_view);
+ debug_set_level(chsc_debug_msg_id, 2);
+ chsc_debug_log_id = debug_register("chsc_log", 16, 1, 16);
+ if (!chsc_debug_log_id)
+ goto out;
+ debug_register_view(chsc_debug_log_id, &debug_hex_ascii_view);
+ debug_set_level(chsc_debug_log_id, 2);
+ return 0;
+out:
+ debug_unregister(chsc_debug_msg_id);
+ return -ENOMEM;
+}
+
+static void chsc_remove_dbfs(void)
+{
+ debug_unregister(chsc_debug_log_id);
+ debug_unregister(chsc_debug_msg_id);
+}
+
+static int __init chsc_init_sch_driver(void)
+{
+ return css_driver_register(&chsc_subchannel_driver);
+}
+
+static void chsc_cleanup_sch_driver(void)
+{
+ css_driver_unregister(&chsc_subchannel_driver);
+}
+
+static DEFINE_SPINLOCK(chsc_lock);
+
+static int chsc_subchannel_match_next_free(struct device *dev, const void *data)
+{
+ struct subchannel *sch = to_subchannel(dev);
+
+ return sch->schib.pmcw.ena && !scsw_fctl(&sch->schib.scsw);
+}
+
+static struct subchannel *chsc_get_next_subchannel(struct subchannel *sch)
+{
+ struct device *dev;
+
+ dev = driver_find_device(&chsc_subchannel_driver.drv,
+ sch ? &sch->dev : NULL, NULL,
+ chsc_subchannel_match_next_free);
+ return dev ? to_subchannel(dev) : NULL;
+}
+
+/**
+ * chsc_async() - try to start a chsc request asynchronously
+ * @chsc_area: request to be started
+ * @request: request structure to associate
+ *
+ * Tries to start a chsc request on one of the existing chsc subchannels.
+ * Returns:
+ * %0 if the request was performed synchronously
+ * %-EINPROGRESS if the request was successfully started
+ * %-EBUSY if all chsc subchannels are busy
+ * %-ENODEV if no chsc subchannels are available
+ * Context:
+ * interrupts disabled, chsc_lock held
+ */
+static int chsc_async(struct chsc_async_area *chsc_area,
+ struct chsc_request *request)
+{
+ int cc;
+ struct chsc_private *private;
+ struct subchannel *sch = NULL;
+ int ret = -ENODEV;
+ char dbf[10];
+
+ chsc_area->header.key = PAGE_DEFAULT_KEY >> 4;
+ while ((sch = chsc_get_next_subchannel(sch))) {
+ spin_lock(sch->lock);
+ private = dev_get_drvdata(&sch->dev);
+ if (private->request) {
+ spin_unlock(sch->lock);
+ ret = -EBUSY;
+ continue;
+ }
+ chsc_area->header.sid = sch->schid;
+ CHSC_LOG(2, "schid");
+ CHSC_LOG_HEX(2, &sch->schid, sizeof(sch->schid));
+ cc = chsc(chsc_area);
+ snprintf(dbf, sizeof(dbf), "cc:%d", cc);
+ CHSC_LOG(2, dbf);
+ switch (cc) {
+ case 0:
+ ret = 0;
+ break;
+ case 1:
+ sch->schib.scsw.cmd.fctl |= SCSW_FCTL_START_FUNC;
+ ret = -EINPROGRESS;
+ private->request = request;
+ break;
+ case 2:
+ ret = -EBUSY;
+ break;
+ default:
+ ret = -ENODEV;
+ }
+ spin_unlock(sch->lock);
+ CHSC_MSG(2, "chsc on 0.%x.%04x returned cc=%d\n",
+ sch->schid.ssid, sch->schid.sch_no, cc);
+ if (ret == -EINPROGRESS)
+ return -EINPROGRESS;
+ put_device(&sch->dev);
+ if (ret == 0)
+ return 0;
+ }
+ return ret;
+}
+
+static void chsc_log_command(void *chsc_area)
+{
+ char dbf[10];
+
+ snprintf(dbf, sizeof(dbf), "CHSC:%x", ((uint16_t *)chsc_area)[1]);
+ CHSC_LOG(0, dbf);
+ CHSC_LOG_HEX(0, chsc_area, 32);
+}
+
+static int chsc_examine_irb(struct chsc_request *request)
+{
+ int backed_up;
+
+ if (!(scsw_stctl(&request->irb.scsw) & SCSW_STCTL_STATUS_PEND))
+ return -EIO;
+ backed_up = scsw_cstat(&request->irb.scsw) & SCHN_STAT_CHAIN_CHECK;
+ request->irb.scsw.cmd.cstat &= ~SCHN_STAT_CHAIN_CHECK;
+ if (scsw_cstat(&request->irb.scsw) == 0)
+ return 0;
+ if (!backed_up)
+ return 0;
+ if (scsw_cstat(&request->irb.scsw) & SCHN_STAT_PROG_CHECK)
+ return -EIO;
+ if (scsw_cstat(&request->irb.scsw) & SCHN_STAT_PROT_CHECK)
+ return -EPERM;
+ if (scsw_cstat(&request->irb.scsw) & SCHN_STAT_CHN_DATA_CHK)
+ return -EAGAIN;
+ if (scsw_cstat(&request->irb.scsw) & SCHN_STAT_CHN_CTRL_CHK)
+ return -EAGAIN;
+ return -EIO;
+}
+
+static int chsc_ioctl_start(void __user *user_area)
+{
+ struct chsc_request *request;
+ struct chsc_async_area *chsc_area;
+ int ret;
+ char dbf[10];
+
+ if (!css_general_characteristics.dynio)
+ /* It makes no sense to try. */
+ return -EOPNOTSUPP;
+ chsc_area = (void *)get_zeroed_page(GFP_DMA | GFP_KERNEL);
+ if (!chsc_area)
+ return -ENOMEM;
+ request = kzalloc(sizeof(*request), GFP_KERNEL);
+ if (!request) {
+ ret = -ENOMEM;
+ goto out_free;
+ }
+ init_completion(&request->completion);
+ if (copy_from_user(chsc_area, user_area, PAGE_SIZE)) {
+ ret = -EFAULT;
+ goto out_free;
+ }
+ chsc_log_command(chsc_area);
+ spin_lock_irq(&chsc_lock);
+ ret = chsc_async(chsc_area, request);
+ spin_unlock_irq(&chsc_lock);
+ if (ret == -EINPROGRESS) {
+ wait_for_completion(&request->completion);
+ ret = chsc_examine_irb(request);
+ }
+ /* copy area back to user */
+ if (!ret)
+ if (copy_to_user(user_area, chsc_area, PAGE_SIZE))
+ ret = -EFAULT;
+out_free:
+ snprintf(dbf, sizeof(dbf), "ret:%d", ret);
+ CHSC_LOG(0, dbf);
+ kfree(request);
+ free_page((unsigned long)chsc_area);
+ return ret;
+}
+
+static int chsc_ioctl_on_close_set(void __user *user_area)
+{
+ char dbf[13];
+ int ret;
+
+ mutex_lock(&on_close_mutex);
+ if (on_close_chsc_area) {
+ ret = -EBUSY;
+ goto out_unlock;
+ }
+ on_close_request = kzalloc(sizeof(*on_close_request), GFP_KERNEL);
+ if (!on_close_request) {
+ ret = -ENOMEM;
+ goto out_unlock;
+ }
+ on_close_chsc_area = (void *)get_zeroed_page(GFP_DMA | GFP_KERNEL);
+ if (!on_close_chsc_area) {
+ ret = -ENOMEM;
+ goto out_free_request;
+ }
+ if (copy_from_user(on_close_chsc_area, user_area, PAGE_SIZE)) {
+ ret = -EFAULT;
+ goto out_free_chsc;
+ }
+ ret = 0;
+ goto out_unlock;
+
+out_free_chsc:
+ free_page((unsigned long)on_close_chsc_area);
+ on_close_chsc_area = NULL;
+out_free_request:
+ kfree(on_close_request);
+ on_close_request = NULL;
+out_unlock:
+ mutex_unlock(&on_close_mutex);
+ snprintf(dbf, sizeof(dbf), "ocsret:%d", ret);
+ CHSC_LOG(0, dbf);
+ return ret;
+}
+
+static int chsc_ioctl_on_close_remove(void)
+{
+ char dbf[13];
+ int ret;
+
+ mutex_lock(&on_close_mutex);
+ if (!on_close_chsc_area) {
+ ret = -ENOENT;
+ goto out_unlock;
+ }
+ free_page((unsigned long)on_close_chsc_area);
+ on_close_chsc_area = NULL;
+ kfree(on_close_request);
+ on_close_request = NULL;
+ ret = 0;
+out_unlock:
+ mutex_unlock(&on_close_mutex);
+ snprintf(dbf, sizeof(dbf), "ocrret:%d", ret);
+ CHSC_LOG(0, dbf);
+ return ret;
+}
+
+static int chsc_ioctl_start_sync(void __user *user_area)
+{
+ struct chsc_sync_area *chsc_area;
+ int ret, ccode;
+
+ chsc_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
+ if (!chsc_area)
+ return -ENOMEM;
+ if (copy_from_user(chsc_area, user_area, PAGE_SIZE)) {
+ ret = -EFAULT;
+ goto out_free;
+ }
+ if (chsc_area->header.code & 0x4000) {
+ ret = -EINVAL;
+ goto out_free;
+ }
+ chsc_log_command(chsc_area);
+ ccode = chsc(chsc_area);
+ if (ccode != 0) {
+ ret = -EIO;
+ goto out_free;
+ }
+ if (copy_to_user(user_area, chsc_area, PAGE_SIZE))
+ ret = -EFAULT;
+ else
+ ret = 0;
+out_free:
+ free_page((unsigned long)chsc_area);
+ return ret;
+}
+
+static int chsc_ioctl_info_channel_path(void __user *user_cd)
+{
+ struct chsc_chp_cd *cd;
+ int ret, ccode;
+ struct {
+ struct chsc_header request;
+ u32 : 2;
+ u32 m : 1;
+ u32 : 1;
+ u32 fmt1 : 4;
+ u32 cssid : 8;
+ u32 : 8;
+ u32 first_chpid : 8;
+ u32 : 24;
+ u32 last_chpid : 8;
+ u32 : 32;
+ struct chsc_header response;
+ u8 data[PAGE_SIZE - 20];
+ } __attribute__ ((packed)) *scpcd_area;
+
+ scpcd_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
+ if (!scpcd_area)
+ return -ENOMEM;
+ cd = kzalloc(sizeof(*cd), GFP_KERNEL);
+ if (!cd) {
+ ret = -ENOMEM;
+ goto out_free;
+ }
+ if (copy_from_user(cd, user_cd, sizeof(*cd))) {
+ ret = -EFAULT;
+ goto out_free;
+ }
+ scpcd_area->request.length = 0x0010;
+ scpcd_area->request.code = 0x0028;
+ scpcd_area->m = cd->m;
+ scpcd_area->fmt1 = cd->fmt;
+ scpcd_area->cssid = cd->chpid.cssid;
+ scpcd_area->first_chpid = cd->chpid.id;
+ scpcd_area->last_chpid = cd->chpid.id;
+
+ ccode = chsc(scpcd_area);
+ if (ccode != 0) {
+ ret = -EIO;
+ goto out_free;
+ }
+ if (scpcd_area->response.code != 0x0001) {
+ ret = -EIO;
+ CHSC_MSG(0, "scpcd: response code=%x\n",
+ scpcd_area->response.code);
+ goto out_free;
+ }
+ memcpy(&cd->cpcb, &scpcd_area->response, scpcd_area->response.length);
+ if (copy_to_user(user_cd, cd, sizeof(*cd)))
+ ret = -EFAULT;
+ else
+ ret = 0;
+out_free:
+ kfree(cd);
+ free_page((unsigned long)scpcd_area);
+ return ret;
+}
+
+static int chsc_ioctl_info_cu(void __user *user_cd)
+{
+ struct chsc_cu_cd *cd;
+ int ret, ccode;
+ struct {
+ struct chsc_header request;
+ u32 : 2;
+ u32 m : 1;
+ u32 : 1;
+ u32 fmt1 : 4;
+ u32 cssid : 8;
+ u32 : 8;
+ u32 first_cun : 8;
+ u32 : 24;
+ u32 last_cun : 8;
+ u32 : 32;
+ struct chsc_header response;
+ u8 data[PAGE_SIZE - 20];
+ } __attribute__ ((packed)) *scucd_area;
+
+ scucd_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
+ if (!scucd_area)
+ return -ENOMEM;
+ cd = kzalloc(sizeof(*cd), GFP_KERNEL);
+ if (!cd) {
+ ret = -ENOMEM;
+ goto out_free;
+ }
+ if (copy_from_user(cd, user_cd, sizeof(*cd))) {
+ ret = -EFAULT;
+ goto out_free;
+ }
+ scucd_area->request.length = 0x0010;
+ scucd_area->request.code = 0x0026;
+ scucd_area->m = cd->m;
+ scucd_area->fmt1 = cd->fmt;
+ scucd_area->cssid = cd->cssid;
+ scucd_area->first_cun = cd->cun;
+ scucd_area->last_cun = cd->cun;
+
+ ccode = chsc(scucd_area);
+ if (ccode != 0) {
+ ret = -EIO;
+ goto out_free;
+ }
+ if (scucd_area->response.code != 0x0001) {
+ ret = -EIO;
+ CHSC_MSG(0, "scucd: response code=%x\n",
+ scucd_area->response.code);
+ goto out_free;
+ }
+ memcpy(&cd->cucb, &scucd_area->response, scucd_area->response.length);
+ if (copy_to_user(user_cd, cd, sizeof(*cd)))
+ ret = -EFAULT;
+ else
+ ret = 0;
+out_free:
+ kfree(cd);
+ free_page((unsigned long)scucd_area);
+ return ret;
+}
+
+static int chsc_ioctl_info_sch_cu(void __user *user_cud)
+{
+ struct chsc_sch_cud *cud;
+ int ret, ccode;
+ struct {
+ struct chsc_header request;
+ u32 : 2;
+ u32 m : 1;
+ u32 : 5;
+ u32 fmt1 : 4;
+ u32 : 2;
+ u32 ssid : 2;
+ u32 first_sch : 16;
+ u32 : 8;
+ u32 cssid : 8;
+ u32 last_sch : 16;
+ u32 : 32;
+ struct chsc_header response;
+ u8 data[PAGE_SIZE - 20];
+ } __attribute__ ((packed)) *sscud_area;
+
+ sscud_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
+ if (!sscud_area)
+ return -ENOMEM;
+ cud = kzalloc(sizeof(*cud), GFP_KERNEL);
+ if (!cud) {
+ ret = -ENOMEM;
+ goto out_free;
+ }
+ if (copy_from_user(cud, user_cud, sizeof(*cud))) {
+ ret = -EFAULT;
+ goto out_free;
+ }
+ sscud_area->request.length = 0x0010;
+ sscud_area->request.code = 0x0006;
+ sscud_area->m = cud->schid.m;
+ sscud_area->fmt1 = cud->fmt;
+ sscud_area->ssid = cud->schid.ssid;
+ sscud_area->first_sch = cud->schid.sch_no;
+ sscud_area->cssid = cud->schid.cssid;
+ sscud_area->last_sch = cud->schid.sch_no;
+
+ ccode = chsc(sscud_area);
+ if (ccode != 0) {
+ ret = -EIO;
+ goto out_free;
+ }
+ if (sscud_area->response.code != 0x0001) {
+ ret = -EIO;
+ CHSC_MSG(0, "sscud: response code=%x\n",
+ sscud_area->response.code);
+ goto out_free;
+ }
+ memcpy(&cud->scub, &sscud_area->response, sscud_area->response.length);
+ if (copy_to_user(user_cud, cud, sizeof(*cud)))
+ ret = -EFAULT;
+ else
+ ret = 0;
+out_free:
+ kfree(cud);
+ free_page((unsigned long)sscud_area);
+ return ret;
+}
+
+static int chsc_ioctl_conf_info(void __user *user_ci)
+{
+ struct chsc_conf_info *ci;
+ int ret, ccode;
+ struct {
+ struct chsc_header request;
+ u32 : 2;
+ u32 m : 1;
+ u32 : 1;
+ u32 fmt1 : 4;
+ u32 cssid : 8;
+ u32 : 6;
+ u32 ssid : 2;
+ u32 : 8;
+ u64 : 64;
+ struct chsc_header response;
+ u8 data[PAGE_SIZE - 20];
+ } __attribute__ ((packed)) *sci_area;
+
+ sci_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
+ if (!sci_area)
+ return -ENOMEM;
+ ci = kzalloc(sizeof(*ci), GFP_KERNEL);
+ if (!ci) {
+ ret = -ENOMEM;
+ goto out_free;
+ }
+ if (copy_from_user(ci, user_ci, sizeof(*ci))) {
+ ret = -EFAULT;
+ goto out_free;
+ }
+ sci_area->request.length = 0x0010;
+ sci_area->request.code = 0x0012;
+ sci_area->m = ci->id.m;
+ sci_area->fmt1 = ci->fmt;
+ sci_area->cssid = ci->id.cssid;
+ sci_area->ssid = ci->id.ssid;
+
+ ccode = chsc(sci_area);
+ if (ccode != 0) {
+ ret = -EIO;
+ goto out_free;
+ }
+ if (sci_area->response.code != 0x0001) {
+ ret = -EIO;
+ CHSC_MSG(0, "sci: response code=%x\n",
+ sci_area->response.code);
+ goto out_free;
+ }
+ memcpy(&ci->scid, &sci_area->response, sci_area->response.length);
+ if (copy_to_user(user_ci, ci, sizeof(*ci)))
+ ret = -EFAULT;
+ else
+ ret = 0;
+out_free:
+ kfree(ci);
+ free_page((unsigned long)sci_area);
+ return ret;
+}
+
+static int chsc_ioctl_conf_comp_list(void __user *user_ccl)
+{
+ struct chsc_comp_list *ccl;
+ int ret, ccode;
+ struct {
+ struct chsc_header request;
+ u32 ctype : 8;
+ u32 : 4;
+ u32 fmt : 4;
+ u32 : 16;
+ u64 : 64;
+ u32 list_parm[2];
+ u64 : 64;
+ struct chsc_header response;
+ u8 data[PAGE_SIZE - 36];
+ } __attribute__ ((packed)) *sccl_area;
+ struct {
+ u32 m : 1;
+ u32 : 31;
+ u32 cssid : 8;
+ u32 : 16;
+ u32 chpid : 8;
+ } __attribute__ ((packed)) *chpid_parm;
+ struct {
+ u32 f_cssid : 8;
+ u32 l_cssid : 8;
+ u32 : 16;
+ u32 res;
+ } __attribute__ ((packed)) *cssids_parm;
+
+ sccl_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
+ if (!sccl_area)
+ return -ENOMEM;
+ ccl = kzalloc(sizeof(*ccl), GFP_KERNEL);
+ if (!ccl) {
+ ret = -ENOMEM;
+ goto out_free;
+ }
+ if (copy_from_user(ccl, user_ccl, sizeof(*ccl))) {
+ ret = -EFAULT;
+ goto out_free;
+ }
+ sccl_area->request.length = 0x0020;
+ sccl_area->request.code = 0x0030;
+ sccl_area->fmt = ccl->req.fmt;
+ sccl_area->ctype = ccl->req.ctype;
+ switch (sccl_area->ctype) {
+ case CCL_CU_ON_CHP:
+ case CCL_IOP_CHP:
+ chpid_parm = (void *)&sccl_area->list_parm;
+ chpid_parm->m = ccl->req.chpid.m;
+ chpid_parm->cssid = ccl->req.chpid.chp.cssid;
+ chpid_parm->chpid = ccl->req.chpid.chp.id;
+ break;
+ case CCL_CSS_IMG:
+ case CCL_CSS_IMG_CONF_CHAR:
+ cssids_parm = (void *)&sccl_area->list_parm;
+ cssids_parm->f_cssid = ccl->req.cssids.f_cssid;
+ cssids_parm->l_cssid = ccl->req.cssids.l_cssid;
+ break;
+ }
+ ccode = chsc(sccl_area);
+ if (ccode != 0) {
+ ret = -EIO;
+ goto out_free;
+ }
+ if (sccl_area->response.code != 0x0001) {
+ ret = -EIO;
+ CHSC_MSG(0, "sccl: response code=%x\n",
+ sccl_area->response.code);
+ goto out_free;
+ }
+ memcpy(&ccl->sccl, &sccl_area->response, sccl_area->response.length);
+ if (copy_to_user(user_ccl, ccl, sizeof(*ccl)))
+ ret = -EFAULT;
+ else
+ ret = 0;
+out_free:
+ kfree(ccl);
+ free_page((unsigned long)sccl_area);
+ return ret;
+}
+
+static int chsc_ioctl_chpd(void __user *user_chpd)
+{
+ struct chsc_scpd *scpd_area;
+ struct chsc_cpd_info *chpd;
+ int ret;
+
+ chpd = kzalloc(sizeof(*chpd), GFP_KERNEL);
+ scpd_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
+ if (!scpd_area || !chpd) {
+ ret = -ENOMEM;
+ goto out_free;
+ }
+ if (copy_from_user(chpd, user_chpd, sizeof(*chpd))) {
+ ret = -EFAULT;
+ goto out_free;
+ }
+ ret = chsc_determine_channel_path_desc(chpd->chpid, chpd->fmt,
+ chpd->rfmt, chpd->c, chpd->m,
+ scpd_area);
+ if (ret)
+ goto out_free;
+ memcpy(&chpd->chpdb, &scpd_area->response, scpd_area->response.length);
+ if (copy_to_user(user_chpd, chpd, sizeof(*chpd)))
+ ret = -EFAULT;
+out_free:
+ kfree(chpd);
+ free_page((unsigned long)scpd_area);
+ return ret;
+}
+
+static int chsc_ioctl_dcal(void __user *user_dcal)
+{
+ struct chsc_dcal *dcal;
+ int ret, ccode;
+ struct {
+ struct chsc_header request;
+ u32 atype : 8;
+ u32 : 4;
+ u32 fmt : 4;
+ u32 : 16;
+ u32 res0[2];
+ u32 list_parm[2];
+ u32 res1[2];
+ struct chsc_header response;
+ u8 data[PAGE_SIZE - 36];
+ } __attribute__ ((packed)) *sdcal_area;
+
+ sdcal_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
+ if (!sdcal_area)
+ return -ENOMEM;
+ dcal = kzalloc(sizeof(*dcal), GFP_KERNEL);
+ if (!dcal) {
+ ret = -ENOMEM;
+ goto out_free;
+ }
+ if (copy_from_user(dcal, user_dcal, sizeof(*dcal))) {
+ ret = -EFAULT;
+ goto out_free;
+ }
+ sdcal_area->request.length = 0x0020;
+ sdcal_area->request.code = 0x0034;
+ sdcal_area->atype = dcal->req.atype;
+ sdcal_area->fmt = dcal->req.fmt;
+ memcpy(&sdcal_area->list_parm, &dcal->req.list_parm,
+ sizeof(sdcal_area->list_parm));
+
+ ccode = chsc(sdcal_area);
+ if (ccode != 0) {
+ ret = -EIO;
+ goto out_free;
+ }
+ if (sdcal_area->response.code != 0x0001) {
+ ret = -EIO;
+ CHSC_MSG(0, "sdcal: response code=%x\n",
+ sdcal_area->response.code);
+ goto out_free;
+ }
+ memcpy(&dcal->sdcal, &sdcal_area->response,
+ sdcal_area->response.length);
+ if (copy_to_user(user_dcal, dcal, sizeof(*dcal)))
+ ret = -EFAULT;
+ else
+ ret = 0;
+out_free:
+ kfree(dcal);
+ free_page((unsigned long)sdcal_area);
+ return ret;
+}
+
+static long chsc_ioctl(struct file *filp, unsigned int cmd,
+ unsigned long arg)
+{
+ void __user *argp;
+
+ CHSC_MSG(2, "chsc_ioctl called, cmd=%x\n", cmd);
+ if (is_compat_task())
+ argp = compat_ptr(arg);
+ else
+ argp = (void __user *)arg;
+ switch (cmd) {
+ case CHSC_START:
+ return chsc_ioctl_start(argp);
+ case CHSC_START_SYNC:
+ return chsc_ioctl_start_sync(argp);
+ case CHSC_INFO_CHANNEL_PATH:
+ return chsc_ioctl_info_channel_path(argp);
+ case CHSC_INFO_CU:
+ return chsc_ioctl_info_cu(argp);
+ case CHSC_INFO_SCH_CU:
+ return chsc_ioctl_info_sch_cu(argp);
+ case CHSC_INFO_CI:
+ return chsc_ioctl_conf_info(argp);
+ case CHSC_INFO_CCL:
+ return chsc_ioctl_conf_comp_list(argp);
+ case CHSC_INFO_CPD:
+ return chsc_ioctl_chpd(argp);
+ case CHSC_INFO_DCAL:
+ return chsc_ioctl_dcal(argp);
+ case CHSC_ON_CLOSE_SET:
+ return chsc_ioctl_on_close_set(argp);
+ case CHSC_ON_CLOSE_REMOVE:
+ return chsc_ioctl_on_close_remove();
+ default: /* unknown ioctl number */
+ return -ENOIOCTLCMD;
+ }
+}
+
+static atomic_t chsc_ready_for_use = ATOMIC_INIT(1);
+
+static int chsc_open(struct inode *inode, struct file *file)
+{
+ if (!atomic_dec_and_test(&chsc_ready_for_use)) {
+ atomic_inc(&chsc_ready_for_use);
+ return -EBUSY;
+ }
+ return nonseekable_open(inode, file);
+}
+
+static int chsc_release(struct inode *inode, struct file *filp)
+{
+ char dbf[13];
+ int ret;
+
+ mutex_lock(&on_close_mutex);
+ if (!on_close_chsc_area)
+ goto out_unlock;
+ init_completion(&on_close_request->completion);
+ CHSC_LOG(0, "on_close");
+ chsc_log_command(on_close_chsc_area);
+ spin_lock_irq(&chsc_lock);
+ ret = chsc_async(on_close_chsc_area, on_close_request);
+ spin_unlock_irq(&chsc_lock);
+ if (ret == -EINPROGRESS) {
+ wait_for_completion(&on_close_request->completion);
+ ret = chsc_examine_irb(on_close_request);
+ }
+ snprintf(dbf, sizeof(dbf), "relret:%d", ret);
+ CHSC_LOG(0, dbf);
+ free_page((unsigned long)on_close_chsc_area);
+ on_close_chsc_area = NULL;
+ kfree(on_close_request);
+ on_close_request = NULL;
+out_unlock:
+ mutex_unlock(&on_close_mutex);
+ atomic_inc(&chsc_ready_for_use);
+ return 0;
+}
+
+static const struct file_operations chsc_fops = {
+ .owner = THIS_MODULE,
+ .open = chsc_open,
+ .release = chsc_release,
+ .unlocked_ioctl = chsc_ioctl,
+ .compat_ioctl = chsc_ioctl,
+ .llseek = no_llseek,
+};
+
+static struct miscdevice chsc_misc_device = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = "chsc",
+ .fops = &chsc_fops,
+};
+
+static int __init chsc_misc_init(void)
+{
+ return misc_register(&chsc_misc_device);
+}
+
+static void chsc_misc_cleanup(void)
+{
+ misc_deregister(&chsc_misc_device);
+}
+
+static int __init chsc_sch_init(void)
+{
+ int ret;
+
+ ret = chsc_init_dbfs();
+ if (ret)
+ return ret;
+ isc_register(CHSC_SCH_ISC);
+ ret = chsc_init_sch_driver();
+ if (ret)
+ goto out_dbf;
+ ret = chsc_misc_init();
+ if (ret)
+ goto out_driver;
+ return ret;
+out_driver:
+ chsc_cleanup_sch_driver();
+out_dbf:
+ isc_unregister(CHSC_SCH_ISC);
+ chsc_remove_dbfs();
+ return ret;
+}
+
+static void __exit chsc_sch_exit(void)
+{
+ chsc_misc_cleanup();
+ chsc_cleanup_sch_driver();
+ isc_unregister(CHSC_SCH_ISC);
+ chsc_remove_dbfs();
+}
+
+module_init(chsc_sch_init);
+module_exit(chsc_sch_exit);
diff --git a/drivers/s390/cio/chsc_sch.h b/drivers/s390/cio/chsc_sch.h
new file mode 100644
index 000000000..ff5328b0b
--- /dev/null
+++ b/drivers/s390/cio/chsc_sch.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _CHSC_SCH_H
+#define _CHSC_SCH_H
+
+struct chsc_request {
+ struct completion completion;
+ struct irb irb;
+};
+
+struct chsc_private {
+ struct chsc_request *request;
+};
+
+#endif
diff --git a/drivers/s390/cio/cio.c b/drivers/s390/cio/cio.c
new file mode 100644
index 000000000..6d716db2a
--- /dev/null
+++ b/drivers/s390/cio/cio.c
@@ -0,0 +1,758 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * S/390 common I/O routines -- low level i/o calls
+ *
+ * Copyright IBM Corp. 1999, 2008
+ * Author(s): Ingo Adlung (adlung@de.ibm.com)
+ * Cornelia Huck (cornelia.huck@de.ibm.com)
+ * Arnd Bergmann (arndb@de.ibm.com)
+ * Martin Schwidefsky (schwidefsky@de.ibm.com)
+ */
+
+#define KMSG_COMPONENT "cio"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <linux/ftrace.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/device.h>
+#include <linux/kernel_stat.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <asm/cio.h>
+#include <asm/delay.h>
+#include <asm/irq.h>
+#include <asm/irq_regs.h>
+#include <asm/setup.h>
+#include <asm/ipl.h>
+#include <asm/chpid.h>
+#include <asm/airq.h>
+#include <asm/isc.h>
+#include <linux/sched/cputime.h>
+#include <asm/fcx.h>
+#include <asm/nmi.h>
+#include <asm/crw.h>
+#include "cio.h"
+#include "css.h"
+#include "chsc.h"
+#include "ioasm.h"
+#include "io_sch.h"
+#include "blacklist.h"
+#include "cio_debug.h"
+#include "chp.h"
+#include "trace.h"
+
+debug_info_t *cio_debug_msg_id;
+debug_info_t *cio_debug_trace_id;
+debug_info_t *cio_debug_crw_id;
+
+DEFINE_PER_CPU_ALIGNED(struct irb, cio_irb);
+EXPORT_PER_CPU_SYMBOL(cio_irb);
+
+/*
+ * Function: cio_debug_init
+ * Initializes three debug logs for common I/O:
+ * - cio_msg logs generic cio messages
+ * - cio_trace logs the calling of different functions
+ * - cio_crw logs machine check related cio messages
+ */
+static int __init cio_debug_init(void)
+{
+ cio_debug_msg_id = debug_register("cio_msg", 16, 1, 11 * sizeof(long));
+ if (!cio_debug_msg_id)
+ goto out_unregister;
+ debug_register_view(cio_debug_msg_id, &debug_sprintf_view);
+ debug_set_level(cio_debug_msg_id, 2);
+ cio_debug_trace_id = debug_register("cio_trace", 16, 1, 16);
+ if (!cio_debug_trace_id)
+ goto out_unregister;
+ debug_register_view(cio_debug_trace_id, &debug_hex_ascii_view);
+ debug_set_level(cio_debug_trace_id, 2);
+ cio_debug_crw_id = debug_register("cio_crw", 8, 1, 8 * sizeof(long));
+ if (!cio_debug_crw_id)
+ goto out_unregister;
+ debug_register_view(cio_debug_crw_id, &debug_sprintf_view);
+ debug_set_level(cio_debug_crw_id, 4);
+ return 0;
+
+out_unregister:
+ debug_unregister(cio_debug_msg_id);
+ debug_unregister(cio_debug_trace_id);
+ debug_unregister(cio_debug_crw_id);
+ return -1;
+}
+
+arch_initcall (cio_debug_init);
+
+int cio_set_options(struct subchannel *sch, int flags)
+{
+ struct io_subchannel_private *priv = to_io_private(sch);
+
+ priv->options.suspend = (flags & DOIO_ALLOW_SUSPEND) != 0;
+ priv->options.prefetch = (flags & DOIO_DENY_PREFETCH) != 0;
+ priv->options.inter = (flags & DOIO_SUPPRESS_INTER) != 0;
+ return 0;
+}
+
+static int
+cio_start_handle_notoper(struct subchannel *sch, __u8 lpm)
+{
+ char dbf_text[15];
+
+ if (lpm != 0)
+ sch->lpm &= ~lpm;
+ else
+ sch->lpm = 0;
+
+ CIO_MSG_EVENT(2, "cio_start: 'not oper' status for "
+ "subchannel 0.%x.%04x!\n", sch->schid.ssid,
+ sch->schid.sch_no);
+
+ if (cio_update_schib(sch))
+ return -ENODEV;
+
+ sprintf(dbf_text, "no%s", dev_name(&sch->dev));
+ CIO_TRACE_EVENT(0, dbf_text);
+ CIO_HEX_EVENT(0, &sch->schib, sizeof (struct schib));
+
+ return (sch->lpm ? -EACCES : -ENODEV);
+}
+
+int
+cio_start_key (struct subchannel *sch, /* subchannel structure */
+ struct ccw1 * cpa, /* logical channel prog addr */
+ __u8 lpm, /* logical path mask */
+ __u8 key) /* storage key */
+{
+ struct io_subchannel_private *priv = to_io_private(sch);
+ union orb *orb = &priv->orb;
+ int ccode;
+
+ CIO_TRACE_EVENT(5, "stIO");
+ CIO_TRACE_EVENT(5, dev_name(&sch->dev));
+
+ memset(orb, 0, sizeof(union orb));
+ /* sch is always under 2G. */
+ orb->cmd.intparm = (u32)(addr_t)sch;
+ orb->cmd.fmt = 1;
+
+ orb->cmd.pfch = priv->options.prefetch == 0;
+ orb->cmd.spnd = priv->options.suspend;
+ orb->cmd.ssic = priv->options.suspend && priv->options.inter;
+ orb->cmd.lpm = (lpm != 0) ? lpm : sch->lpm;
+ /*
+ * for 64 bit we always support 64 bit IDAWs with 4k page size only
+ */
+ orb->cmd.c64 = 1;
+ orb->cmd.i2k = 0;
+ orb->cmd.key = key >> 4;
+ /* issue "Start Subchannel" */
+ orb->cmd.cpa = (__u32) __pa(cpa);
+ ccode = ssch(sch->schid, orb);
+
+ /* process condition code */
+ CIO_HEX_EVENT(5, &ccode, sizeof(ccode));
+
+ switch (ccode) {
+ case 0:
+ /*
+ * initialize device status information
+ */
+ sch->schib.scsw.cmd.actl |= SCSW_ACTL_START_PEND;
+ return 0;
+ case 1: /* status pending */
+ case 2: /* busy */
+ return -EBUSY;
+ case 3: /* device/path not operational */
+ return cio_start_handle_notoper(sch, lpm);
+ default:
+ return ccode;
+ }
+}
+EXPORT_SYMBOL_GPL(cio_start_key);
+
+int
+cio_start (struct subchannel *sch, struct ccw1 *cpa, __u8 lpm)
+{
+ return cio_start_key(sch, cpa, lpm, PAGE_DEFAULT_KEY);
+}
+EXPORT_SYMBOL_GPL(cio_start);
+
+/*
+ * resume suspended I/O operation
+ */
+int
+cio_resume (struct subchannel *sch)
+{
+ int ccode;
+
+ CIO_TRACE_EVENT(4, "resIO");
+ CIO_TRACE_EVENT(4, dev_name(&sch->dev));
+
+ ccode = rsch (sch->schid);
+
+ CIO_HEX_EVENT(4, &ccode, sizeof(ccode));
+
+ switch (ccode) {
+ case 0:
+ sch->schib.scsw.cmd.actl |= SCSW_ACTL_RESUME_PEND;
+ return 0;
+ case 1:
+ return -EBUSY;
+ case 2:
+ return -EINVAL;
+ default:
+ /*
+ * useless to wait for request completion
+ * as device is no longer operational !
+ */
+ return -ENODEV;
+ }
+}
+EXPORT_SYMBOL_GPL(cio_resume);
+
+/*
+ * halt I/O operation
+ */
+int
+cio_halt(struct subchannel *sch)
+{
+ int ccode;
+
+ if (!sch)
+ return -ENODEV;
+
+ CIO_TRACE_EVENT(2, "haltIO");
+ CIO_TRACE_EVENT(2, dev_name(&sch->dev));
+
+ /*
+ * Issue "Halt subchannel" and process condition code
+ */
+ ccode = hsch (sch->schid);
+
+ CIO_HEX_EVENT(2, &ccode, sizeof(ccode));
+
+ switch (ccode) {
+ case 0:
+ sch->schib.scsw.cmd.actl |= SCSW_ACTL_HALT_PEND;
+ return 0;
+ case 1: /* status pending */
+ case 2: /* busy */
+ return -EBUSY;
+ default: /* device not operational */
+ return -ENODEV;
+ }
+}
+EXPORT_SYMBOL_GPL(cio_halt);
+
+/*
+ * Clear I/O operation
+ */
+int
+cio_clear(struct subchannel *sch)
+{
+ int ccode;
+
+ if (!sch)
+ return -ENODEV;
+
+ CIO_TRACE_EVENT(2, "clearIO");
+ CIO_TRACE_EVENT(2, dev_name(&sch->dev));
+
+ /*
+ * Issue "Clear subchannel" and process condition code
+ */
+ ccode = csch (sch->schid);
+
+ CIO_HEX_EVENT(2, &ccode, sizeof(ccode));
+
+ switch (ccode) {
+ case 0:
+ sch->schib.scsw.cmd.actl |= SCSW_ACTL_CLEAR_PEND;
+ return 0;
+ default: /* device not operational */
+ return -ENODEV;
+ }
+}
+EXPORT_SYMBOL_GPL(cio_clear);
+
+/*
+ * Function: cio_cancel
+ * Issues a "Cancel Subchannel" on the specified subchannel
+ * Note: We don't need any fancy intparms and flags here
+ * since xsch is executed synchronously.
+ * Only for common I/O internal use as for now.
+ */
+int
+cio_cancel (struct subchannel *sch)
+{
+ int ccode;
+
+ if (!sch)
+ return -ENODEV;
+
+ CIO_TRACE_EVENT(2, "cancelIO");
+ CIO_TRACE_EVENT(2, dev_name(&sch->dev));
+
+ ccode = xsch (sch->schid);
+
+ CIO_HEX_EVENT(2, &ccode, sizeof(ccode));
+
+ switch (ccode) {
+ case 0: /* success */
+ /* Update information in scsw. */
+ if (cio_update_schib(sch))
+ return -ENODEV;
+ return 0;
+ case 1: /* status pending */
+ return -EBUSY;
+ case 2: /* not applicable */
+ return -EINVAL;
+ default: /* not oper */
+ return -ENODEV;
+ }
+}
+EXPORT_SYMBOL_GPL(cio_cancel);
+
+/**
+ * cio_cancel_halt_clear - Cancel running I/O by performing cancel, halt
+ * and clear ordinally if subchannel is valid.
+ * @sch: subchannel on which to perform the cancel_halt_clear operation
+ * @iretry: the number of the times remained to retry the next operation
+ *
+ * This should be called repeatedly since halt/clear are asynchronous
+ * operations. We do one try with cio_cancel, three tries with cio_halt,
+ * 255 tries with cio_clear. The caller should initialize @iretry with
+ * the value 255 for its first call to this, and keep using the same
+ * @iretry in the subsequent calls until it gets a non -EBUSY return.
+ *
+ * Returns 0 if device now idle, -ENODEV for device not operational,
+ * -EBUSY if an interrupt is expected (either from halt/clear or from a
+ * status pending), and -EIO if out of retries.
+ */
+int cio_cancel_halt_clear(struct subchannel *sch, int *iretry)
+{
+ int ret;
+
+ if (cio_update_schib(sch))
+ return -ENODEV;
+ if (!sch->schib.pmcw.ena)
+ /* Not operational -> done. */
+ return 0;
+ /* Stage 1: cancel io. */
+ if (!(scsw_actl(&sch->schib.scsw) & SCSW_ACTL_HALT_PEND) &&
+ !(scsw_actl(&sch->schib.scsw) & SCSW_ACTL_CLEAR_PEND)) {
+ if (!scsw_is_tm(&sch->schib.scsw)) {
+ ret = cio_cancel(sch);
+ if (ret != -EINVAL)
+ return ret;
+ }
+ /*
+ * Cancel io unsuccessful or not applicable (transport mode).
+ * Continue with asynchronous instructions.
+ */
+ *iretry = 3; /* 3 halt retries. */
+ }
+ /* Stage 2: halt io. */
+ if (!(scsw_actl(&sch->schib.scsw) & SCSW_ACTL_CLEAR_PEND)) {
+ if (*iretry) {
+ *iretry -= 1;
+ ret = cio_halt(sch);
+ if (ret != -EBUSY)
+ return (ret == 0) ? -EBUSY : ret;
+ }
+ /* Halt io unsuccessful. */
+ *iretry = 255; /* 255 clear retries. */
+ }
+ /* Stage 3: clear io. */
+ if (*iretry) {
+ *iretry -= 1;
+ ret = cio_clear(sch);
+ return (ret == 0) ? -EBUSY : ret;
+ }
+ /* Function was unsuccessful */
+ return -EIO;
+}
+EXPORT_SYMBOL_GPL(cio_cancel_halt_clear);
+
+static void cio_apply_config(struct subchannel *sch, struct schib *schib)
+{
+ schib->pmcw.intparm = sch->config.intparm;
+ schib->pmcw.mbi = sch->config.mbi;
+ schib->pmcw.isc = sch->config.isc;
+ schib->pmcw.ena = sch->config.ena;
+ schib->pmcw.mme = sch->config.mme;
+ schib->pmcw.mp = sch->config.mp;
+ schib->pmcw.csense = sch->config.csense;
+ schib->pmcw.mbfc = sch->config.mbfc;
+ if (sch->config.mbfc)
+ schib->mba = sch->config.mba;
+}
+
+static int cio_check_config(struct subchannel *sch, struct schib *schib)
+{
+ return (schib->pmcw.intparm == sch->config.intparm) &&
+ (schib->pmcw.mbi == sch->config.mbi) &&
+ (schib->pmcw.isc == sch->config.isc) &&
+ (schib->pmcw.ena == sch->config.ena) &&
+ (schib->pmcw.mme == sch->config.mme) &&
+ (schib->pmcw.mp == sch->config.mp) &&
+ (schib->pmcw.csense == sch->config.csense) &&
+ (schib->pmcw.mbfc == sch->config.mbfc) &&
+ (!sch->config.mbfc || (schib->mba == sch->config.mba));
+}
+
+/*
+ * cio_commit_config - apply configuration to the subchannel
+ */
+int cio_commit_config(struct subchannel *sch)
+{
+ int ccode, retry, ret = 0;
+ struct schib schib;
+ struct irb irb;
+
+ if (stsch(sch->schid, &schib) || !css_sch_is_valid(&schib))
+ return -ENODEV;
+
+ for (retry = 0; retry < 5; retry++) {
+ /* copy desired changes to local schib */
+ cio_apply_config(sch, &schib);
+ ccode = msch(sch->schid, &schib);
+ if (ccode < 0) /* -EIO if msch gets a program check. */
+ return ccode;
+ switch (ccode) {
+ case 0: /* successful */
+ if (stsch(sch->schid, &schib) ||
+ !css_sch_is_valid(&schib))
+ return -ENODEV;
+ if (cio_check_config(sch, &schib)) {
+ /* commit changes from local schib */
+ memcpy(&sch->schib, &schib, sizeof(schib));
+ return 0;
+ }
+ ret = -EAGAIN;
+ break;
+ case 1: /* status pending */
+ ret = -EBUSY;
+ if (tsch(sch->schid, &irb))
+ return ret;
+ break;
+ case 2: /* busy */
+ udelay(100); /* allow for recovery */
+ ret = -EBUSY;
+ break;
+ case 3: /* not operational */
+ return -ENODEV;
+ }
+ }
+ return ret;
+}
+EXPORT_SYMBOL_GPL(cio_commit_config);
+
+/**
+ * cio_update_schib - Perform stsch and update schib if subchannel is valid.
+ * @sch: subchannel on which to perform stsch
+ * Return zero on success, -ENODEV otherwise.
+ */
+int cio_update_schib(struct subchannel *sch)
+{
+ struct schib schib;
+
+ if (stsch(sch->schid, &schib) || !css_sch_is_valid(&schib))
+ return -ENODEV;
+
+ memcpy(&sch->schib, &schib, sizeof(schib));
+ return 0;
+}
+EXPORT_SYMBOL_GPL(cio_update_schib);
+
+/**
+ * cio_enable_subchannel - enable a subchannel.
+ * @sch: subchannel to be enabled
+ * @intparm: interruption parameter to set
+ */
+int cio_enable_subchannel(struct subchannel *sch, u32 intparm)
+{
+ int ret;
+
+ CIO_TRACE_EVENT(2, "ensch");
+ CIO_TRACE_EVENT(2, dev_name(&sch->dev));
+
+ if (sch_is_pseudo_sch(sch))
+ return -EINVAL;
+ if (cio_update_schib(sch))
+ return -ENODEV;
+
+ sch->config.ena = 1;
+ sch->config.isc = sch->isc;
+ sch->config.intparm = intparm;
+
+ ret = cio_commit_config(sch);
+ if (ret == -EIO) {
+ /*
+ * Got a program check in msch. Try without
+ * the concurrent sense bit the next time.
+ */
+ sch->config.csense = 0;
+ ret = cio_commit_config(sch);
+ }
+ CIO_HEX_EVENT(2, &ret, sizeof(ret));
+ return ret;
+}
+EXPORT_SYMBOL_GPL(cio_enable_subchannel);
+
+/**
+ * cio_disable_subchannel - disable a subchannel.
+ * @sch: subchannel to disable
+ */
+int cio_disable_subchannel(struct subchannel *sch)
+{
+ int ret;
+
+ CIO_TRACE_EVENT(2, "dissch");
+ CIO_TRACE_EVENT(2, dev_name(&sch->dev));
+
+ if (sch_is_pseudo_sch(sch))
+ return 0;
+ if (cio_update_schib(sch))
+ return -ENODEV;
+
+ sch->config.ena = 0;
+ ret = cio_commit_config(sch);
+
+ CIO_HEX_EVENT(2, &ret, sizeof(ret));
+ return ret;
+}
+EXPORT_SYMBOL_GPL(cio_disable_subchannel);
+
+/*
+ * do_cio_interrupt() handles all normal I/O device IRQ's
+ */
+static irqreturn_t do_cio_interrupt(int irq, void *dummy)
+{
+ struct tpi_info *tpi_info;
+ struct subchannel *sch;
+ struct irb *irb;
+
+ set_cpu_flag(CIF_NOHZ_DELAY);
+ tpi_info = (struct tpi_info *) &get_irq_regs()->int_code;
+ trace_s390_cio_interrupt(tpi_info);
+ irb = this_cpu_ptr(&cio_irb);
+ sch = (struct subchannel *)(unsigned long) tpi_info->intparm;
+ if (!sch) {
+ /* Clear pending interrupt condition. */
+ inc_irq_stat(IRQIO_CIO);
+ tsch(tpi_info->schid, irb);
+ return IRQ_HANDLED;
+ }
+ spin_lock(sch->lock);
+ /* Store interrupt response block to lowcore. */
+ if (tsch(tpi_info->schid, irb) == 0) {
+ /* Keep subchannel information word up to date. */
+ memcpy (&sch->schib.scsw, &irb->scsw, sizeof (irb->scsw));
+ /* Call interrupt handler if there is one. */
+ if (sch->driver && sch->driver->irq)
+ sch->driver->irq(sch);
+ else
+ inc_irq_stat(IRQIO_CIO);
+ } else
+ inc_irq_stat(IRQIO_CIO);
+ spin_unlock(sch->lock);
+
+ return IRQ_HANDLED;
+}
+
+void __init init_cio_interrupts(void)
+{
+ irq_set_chip_and_handler(IO_INTERRUPT,
+ &dummy_irq_chip, handle_percpu_irq);
+ if (request_irq(IO_INTERRUPT, do_cio_interrupt, 0, "I/O", NULL))
+ panic("Failed to register I/O interrupt\n");
+}
+
+#ifdef CONFIG_CCW_CONSOLE
+static struct subchannel *console_sch;
+static struct lock_class_key console_sch_key;
+
+/*
+ * Use cio_tsch to update the subchannel status and call the interrupt handler
+ * if status had been pending. Called with the subchannel's lock held.
+ */
+void cio_tsch(struct subchannel *sch)
+{
+ struct irb *irb;
+ int irq_context;
+
+ irb = this_cpu_ptr(&cio_irb);
+ /* Store interrupt response block to lowcore. */
+ if (tsch(sch->schid, irb) != 0)
+ /* Not status pending or not operational. */
+ return;
+ memcpy(&sch->schib.scsw, &irb->scsw, sizeof(union scsw));
+ /* Call interrupt handler with updated status. */
+ irq_context = in_interrupt();
+ if (!irq_context) {
+ local_bh_disable();
+ irq_enter();
+ }
+ kstat_incr_irq_this_cpu(IO_INTERRUPT);
+ if (sch->driver && sch->driver->irq)
+ sch->driver->irq(sch);
+ else
+ inc_irq_stat(IRQIO_CIO);
+ if (!irq_context) {
+ irq_exit();
+ _local_bh_enable();
+ }
+}
+
+static int cio_test_for_console(struct subchannel_id schid, void *data)
+{
+ struct schib schib;
+
+ if (stsch(schid, &schib) != 0)
+ return -ENXIO;
+ if ((schib.pmcw.st == SUBCHANNEL_TYPE_IO) && schib.pmcw.dnv &&
+ (schib.pmcw.dev == console_devno)) {
+ console_irq = schid.sch_no;
+ return 1; /* found */
+ }
+ return 0;
+}
+
+static int cio_get_console_sch_no(void)
+{
+ struct subchannel_id schid;
+ struct schib schib;
+
+ init_subchannel_id(&schid);
+ if (console_irq != -1) {
+ /* VM provided us with the irq number of the console. */
+ schid.sch_no = console_irq;
+ if (stsch(schid, &schib) != 0 ||
+ (schib.pmcw.st != SUBCHANNEL_TYPE_IO) || !schib.pmcw.dnv)
+ return -1;
+ console_devno = schib.pmcw.dev;
+ } else if (console_devno != -1) {
+ /* At least the console device number is known. */
+ for_each_subchannel(cio_test_for_console, NULL);
+ }
+ return console_irq;
+}
+
+struct subchannel *cio_probe_console(void)
+{
+ struct subchannel_id schid;
+ struct subchannel *sch;
+ struct schib schib;
+ int sch_no, ret;
+
+ sch_no = cio_get_console_sch_no();
+ if (sch_no == -1) {
+ pr_warn("No CCW console was found\n");
+ return ERR_PTR(-ENODEV);
+ }
+ init_subchannel_id(&schid);
+ schid.sch_no = sch_no;
+ ret = stsch(schid, &schib);
+ if (ret)
+ return ERR_PTR(-ENODEV);
+
+ sch = css_alloc_subchannel(schid, &schib);
+ if (IS_ERR(sch))
+ return sch;
+
+ lockdep_set_class(sch->lock, &console_sch_key);
+ isc_register(CONSOLE_ISC);
+ sch->config.isc = CONSOLE_ISC;
+ sch->config.intparm = (u32)(addr_t)sch;
+ ret = cio_commit_config(sch);
+ if (ret) {
+ isc_unregister(CONSOLE_ISC);
+ put_device(&sch->dev);
+ return ERR_PTR(ret);
+ }
+ console_sch = sch;
+ return sch;
+}
+
+int cio_is_console(struct subchannel_id schid)
+{
+ if (!console_sch)
+ return 0;
+ return schid_equal(&schid, &console_sch->schid);
+}
+
+void cio_register_early_subchannels(void)
+{
+ int ret;
+
+ if (!console_sch)
+ return;
+
+ ret = css_register_subchannel(console_sch);
+ if (ret)
+ put_device(&console_sch->dev);
+}
+#endif /* CONFIG_CCW_CONSOLE */
+
+/**
+ * cio_tm_start_key - perform start function
+ * @sch: subchannel on which to perform the start function
+ * @tcw: transport-command word to be started
+ * @lpm: mask of paths to use
+ * @key: storage key to use for storage access
+ *
+ * Start the tcw on the given subchannel. Return zero on success, non-zero
+ * otherwise.
+ */
+int cio_tm_start_key(struct subchannel *sch, struct tcw *tcw, u8 lpm, u8 key)
+{
+ int cc;
+ union orb *orb = &to_io_private(sch)->orb;
+
+ memset(orb, 0, sizeof(union orb));
+ orb->tm.intparm = (u32) (addr_t) sch;
+ orb->tm.key = key >> 4;
+ orb->tm.b = 1;
+ orb->tm.lpm = lpm ? lpm : sch->lpm;
+ orb->tm.tcw = (u32) (addr_t) tcw;
+ cc = ssch(sch->schid, orb);
+ switch (cc) {
+ case 0:
+ return 0;
+ case 1:
+ case 2:
+ return -EBUSY;
+ default:
+ return cio_start_handle_notoper(sch, lpm);
+ }
+}
+EXPORT_SYMBOL_GPL(cio_tm_start_key);
+
+/**
+ * cio_tm_intrg - perform interrogate function
+ * @sch: subchannel on which to perform the interrogate function
+ *
+ * If the specified subchannel is running in transport-mode, perform the
+ * interrogate function. Return zero on success, non-zero otherwie.
+ */
+int cio_tm_intrg(struct subchannel *sch)
+{
+ int cc;
+
+ if (!to_io_private(sch)->orb.tm.b)
+ return -EINVAL;
+ cc = xsch(sch->schid);
+ switch (cc) {
+ case 0:
+ case 2:
+ return 0;
+ case 1:
+ return -EBUSY;
+ default:
+ return -ENODEV;
+ }
+}
+EXPORT_SYMBOL_GPL(cio_tm_intrg);
diff --git a/drivers/s390/cio/cio.h b/drivers/s390/cio/cio.h
new file mode 100644
index 000000000..dcdaba689
--- /dev/null
+++ b/drivers/s390/cio/cio.h
@@ -0,0 +1,153 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef S390_CIO_H
+#define S390_CIO_H
+
+#include <linux/mutex.h>
+#include <linux/device.h>
+#include <linux/mod_devicetable.h>
+#include <asm/chpid.h>
+#include <asm/cio.h>
+#include <asm/fcx.h>
+#include <asm/schid.h>
+#include "chsc.h"
+
+/*
+ * path management control word
+ */
+struct pmcw {
+ u32 intparm; /* interruption parameter */
+ u32 qf : 1; /* qdio facility */
+ u32 w : 1;
+ u32 isc : 3; /* interruption sublass */
+ u32 res5 : 3; /* reserved zeros */
+ u32 ena : 1; /* enabled */
+ u32 lm : 2; /* limit mode */
+ u32 mme : 2; /* measurement-mode enable */
+ u32 mp : 1; /* multipath mode */
+ u32 tf : 1; /* timing facility */
+ u32 dnv : 1; /* device number valid */
+ u32 dev : 16; /* device number */
+ u8 lpm; /* logical path mask */
+ u8 pnom; /* path not operational mask */
+ u8 lpum; /* last path used mask */
+ u8 pim; /* path installed mask */
+ u16 mbi; /* measurement-block index */
+ u8 pom; /* path operational mask */
+ u8 pam; /* path available mask */
+ u8 chpid[8]; /* CHPID 0-7 (if available) */
+ u32 unused1 : 8; /* reserved zeros */
+ u32 st : 3; /* subchannel type */
+ u32 unused2 : 18; /* reserved zeros */
+ u32 mbfc : 1; /* measurement block format control */
+ u32 xmwme : 1; /* extended measurement word mode enable */
+ u32 csense : 1; /* concurrent sense; can be enabled ...*/
+ /* ... per MSCH, however, if facility */
+ /* ... is not installed, this results */
+ /* ... in an operand exception. */
+} __attribute__ ((packed));
+
+/* I/O-Interruption Code as stored by TEST PENDING INTERRUPTION (TPI). */
+struct tpi_info {
+ struct subchannel_id schid;
+ u32 intparm;
+ u32 adapter_IO:1;
+ u32 directed_irq:1;
+ u32 isc:3;
+ u32 :27;
+ u32 type:3;
+ u32 :12;
+} __packed __aligned(4);
+
+/* Target SCHIB configuration. */
+struct schib_config {
+ u64 mba;
+ u32 intparm;
+ u16 mbi;
+ u32 isc:3;
+ u32 ena:1;
+ u32 mme:2;
+ u32 mp:1;
+ u32 csense:1;
+ u32 mbfc:1;
+} __attribute__ ((packed));
+
+/*
+ * subchannel information block
+ */
+struct schib {
+ struct pmcw pmcw; /* path management control word */
+ union scsw scsw; /* subchannel status word */
+ __u64 mba; /* measurement block address */
+ __u8 mda[4]; /* model dependent area */
+} __attribute__ ((packed,aligned(4)));
+
+/*
+ * When rescheduled, todo's with higher values will overwrite those
+ * with lower values.
+ */
+enum sch_todo {
+ SCH_TODO_NOTHING,
+ SCH_TODO_EVAL,
+ SCH_TODO_UNREG,
+};
+
+/* subchannel data structure used by I/O subroutines */
+struct subchannel {
+ struct subchannel_id schid;
+ spinlock_t *lock; /* subchannel lock */
+ struct mutex reg_mutex;
+ enum {
+ SUBCHANNEL_TYPE_IO = 0,
+ SUBCHANNEL_TYPE_CHSC = 1,
+ SUBCHANNEL_TYPE_MSG = 2,
+ SUBCHANNEL_TYPE_ADM = 3,
+ } st; /* subchannel type */
+ __u8 vpm; /* verified path mask */
+ __u8 lpm; /* logical path mask */
+ __u8 opm; /* operational path mask */
+ struct schib schib; /* subchannel information block */
+ int isc; /* desired interruption subclass */
+ struct chsc_ssd_info ssd_info; /* subchannel description */
+ struct device dev; /* entry in device tree */
+ struct css_driver *driver;
+ enum sch_todo todo;
+ struct work_struct todo_work;
+ struct schib_config config;
+ u64 dma_mask;
+ char *driver_override; /* Driver name to force a match */
+} __attribute__ ((aligned(8)));
+
+DECLARE_PER_CPU_ALIGNED(struct irb, cio_irb);
+
+#define to_subchannel(n) container_of(n, struct subchannel, dev)
+
+extern int cio_enable_subchannel(struct subchannel *, u32);
+extern int cio_disable_subchannel (struct subchannel *);
+extern int cio_cancel (struct subchannel *);
+extern int cio_clear (struct subchannel *);
+extern int cio_cancel_halt_clear(struct subchannel *, int *);
+extern int cio_resume (struct subchannel *);
+extern int cio_halt (struct subchannel *);
+extern int cio_start (struct subchannel *, struct ccw1 *, __u8);
+extern int cio_start_key (struct subchannel *, struct ccw1 *, __u8, __u8);
+extern int cio_set_options (struct subchannel *, int);
+extern int cio_update_schib(struct subchannel *sch);
+extern int cio_commit_config(struct subchannel *sch);
+
+int cio_tm_start_key(struct subchannel *sch, struct tcw *tcw, u8 lpm, u8 key);
+int cio_tm_intrg(struct subchannel *sch);
+
+extern int __init airq_init(void);
+
+/* Use with care. */
+#ifdef CONFIG_CCW_CONSOLE
+extern struct subchannel *cio_probe_console(void);
+extern int cio_is_console(struct subchannel_id);
+extern void cio_register_early_subchannels(void);
+extern void cio_tsch(struct subchannel *sch);
+#else
+#define cio_is_console(schid) 0
+static inline void cio_register_early_subchannels(void) {}
+#endif
+
+#endif
diff --git a/drivers/s390/cio/cio_debug.h b/drivers/s390/cio/cio_debug.h
new file mode 100644
index 000000000..7bdbe7370
--- /dev/null
+++ b/drivers/s390/cio/cio_debug.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef CIO_DEBUG_H
+#define CIO_DEBUG_H
+
+#include <asm/debug.h>
+
+/* for use of debug feature */
+extern debug_info_t *cio_debug_msg_id;
+extern debug_info_t *cio_debug_trace_id;
+extern debug_info_t *cio_debug_crw_id;
+
+#define CIO_TRACE_EVENT(imp, txt) do { \
+ debug_text_event(cio_debug_trace_id, imp, txt); \
+ } while (0)
+
+#define CIO_MSG_EVENT(imp, args...) do { \
+ debug_sprintf_event(cio_debug_msg_id, imp , ##args); \
+ } while (0)
+
+#define CIO_CRW_EVENT(imp, args...) do { \
+ debug_sprintf_event(cio_debug_crw_id, imp , ##args); \
+ } while (0)
+
+static inline void CIO_HEX_EVENT(int level, void *data, int length)
+{
+ debug_event(cio_debug_trace_id, level, data, length);
+}
+
+#endif
diff --git a/drivers/s390/cio/cmf.c b/drivers/s390/cio/cmf.c
new file mode 100644
index 000000000..72dd2471e
--- /dev/null
+++ b/drivers/s390/cio/cmf.c
@@ -0,0 +1,1309 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Linux on zSeries Channel Measurement Facility support
+ *
+ * Copyright IBM Corp. 2000, 2006
+ *
+ * Authors: Arnd Bergmann <arndb@de.ibm.com>
+ * Cornelia Huck <cornelia.huck@de.ibm.com>
+ *
+ * original idea from Natarajan Krishnaswami <nkrishna@us.ibm.com>
+ */
+
+#define KMSG_COMPONENT "cio"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <linux/memblock.h>
+#include <linux/device.h>
+#include <linux/init.h>
+#include <linux/list.h>
+#include <linux/export.h>
+#include <linux/moduleparam.h>
+#include <linux/slab.h>
+#include <linux/timex.h> /* get_tod_clock() */
+
+#include <asm/ccwdev.h>
+#include <asm/cio.h>
+#include <asm/cmb.h>
+#include <asm/div64.h>
+
+#include "cio.h"
+#include "css.h"
+#include "device.h"
+#include "ioasm.h"
+#include "chsc.h"
+
+/*
+ * parameter to enable cmf during boot, possible uses are:
+ * "s390cmf" -- enable cmf and allocate 2 MB of ram so measuring can be
+ * used on any subchannel
+ * "s390cmf=<num>" -- enable cmf and allocate enough memory to measure
+ * <num> subchannel, where <num> is an integer
+ * between 1 and 65535, default is 1024
+ */
+#define ARGSTRING "s390cmf"
+
+/* indices for READCMB */
+enum cmb_index {
+ avg_utilization = -1,
+ /* basic and exended format: */
+ cmb_ssch_rsch_count = 0,
+ cmb_sample_count,
+ cmb_device_connect_time,
+ cmb_function_pending_time,
+ cmb_device_disconnect_time,
+ cmb_control_unit_queuing_time,
+ cmb_device_active_only_time,
+ /* extended format only: */
+ cmb_device_busy_time,
+ cmb_initial_command_response_time,
+};
+
+/**
+ * enum cmb_format - types of supported measurement block formats
+ *
+ * @CMF_BASIC: traditional channel measurement blocks supported
+ * by all machines that we run on
+ * @CMF_EXTENDED: improved format that was introduced with the z990
+ * machine
+ * @CMF_AUTODETECT: default: use extended format when running on a machine
+ * supporting extended format, otherwise fall back to
+ * basic format
+ */
+enum cmb_format {
+ CMF_BASIC,
+ CMF_EXTENDED,
+ CMF_AUTODETECT = -1,
+};
+
+/*
+ * format - actual format for all measurement blocks
+ *
+ * The format module parameter can be set to a value of 0 (zero)
+ * or 1, indicating basic or extended format as described for
+ * enum cmb_format.
+ */
+static int format = CMF_AUTODETECT;
+module_param(format, bint, 0444);
+
+/**
+ * struct cmb_operations - functions to use depending on cmb_format
+ *
+ * Most of these functions operate on a struct ccw_device. There is only
+ * one instance of struct cmb_operations because the format of the measurement
+ * data is guaranteed to be the same for every ccw_device.
+ *
+ * @alloc: allocate memory for a channel measurement block,
+ * either with the help of a special pool or with kmalloc
+ * @free: free memory allocated with @alloc
+ * @set: enable or disable measurement
+ * @read: read a measurement entry at an index
+ * @readall: read a measurement block in a common format
+ * @reset: clear the data in the associated measurement block and
+ * reset its time stamp
+ */
+struct cmb_operations {
+ int (*alloc) (struct ccw_device *);
+ void (*free) (struct ccw_device *);
+ int (*set) (struct ccw_device *, u32);
+ u64 (*read) (struct ccw_device *, int);
+ int (*readall)(struct ccw_device *, struct cmbdata *);
+ void (*reset) (struct ccw_device *);
+/* private: */
+ struct attribute_group *attr_group;
+};
+static struct cmb_operations *cmbops;
+
+struct cmb_data {
+ void *hw_block; /* Pointer to block updated by hardware */
+ void *last_block; /* Last changed block copied from hardware block */
+ int size; /* Size of hw_block and last_block */
+ unsigned long long last_update; /* when last_block was updated */
+};
+
+/*
+ * Our user interface is designed in terms of nanoseconds,
+ * while the hardware measures total times in its own
+ * unit.
+ */
+static inline u64 time_to_nsec(u32 value)
+{
+ return ((u64)value) * 128000ull;
+}
+
+/*
+ * Users are usually interested in average times,
+ * not accumulated time.
+ * This also helps us with atomicity problems
+ * when reading sinlge values.
+ */
+static inline u64 time_to_avg_nsec(u32 value, u32 count)
+{
+ u64 ret;
+
+ /* no samples yet, avoid division by 0 */
+ if (count == 0)
+ return 0;
+
+ /* value comes in units of 128 µsec */
+ ret = time_to_nsec(value);
+ do_div(ret, count);
+
+ return ret;
+}
+
+#define CMF_OFF 0
+#define CMF_ON 2
+
+/*
+ * Activate or deactivate the channel monitor. When area is NULL,
+ * the monitor is deactivated. The channel monitor needs to
+ * be active in order to measure subchannels, which also need
+ * to be enabled.
+ */
+static inline void cmf_activate(void *area, unsigned int onoff)
+{
+ register void * __gpr2 asm("2");
+ register long __gpr1 asm("1");
+
+ __gpr2 = area;
+ __gpr1 = onoff;
+ /* activate channel measurement */
+ asm("schm" : : "d" (__gpr2), "d" (__gpr1) );
+}
+
+static int set_schib(struct ccw_device *cdev, u32 mme, int mbfc,
+ unsigned long address)
+{
+ struct subchannel *sch = to_subchannel(cdev->dev.parent);
+ int ret;
+
+ sch->config.mme = mme;
+ sch->config.mbfc = mbfc;
+ /* address can be either a block address or a block index */
+ if (mbfc)
+ sch->config.mba = address;
+ else
+ sch->config.mbi = address;
+
+ ret = cio_commit_config(sch);
+ if (!mme && ret == -ENODEV) {
+ /*
+ * The task was to disable measurement block updates but
+ * the subchannel is already gone. Report success.
+ */
+ ret = 0;
+ }
+ return ret;
+}
+
+struct set_schib_struct {
+ u32 mme;
+ int mbfc;
+ unsigned long address;
+ wait_queue_head_t wait;
+ int ret;
+};
+
+#define CMF_PENDING 1
+#define SET_SCHIB_TIMEOUT (10 * HZ)
+
+static int set_schib_wait(struct ccw_device *cdev, u32 mme,
+ int mbfc, unsigned long address)
+{
+ struct set_schib_struct set_data;
+ int ret = -ENODEV;
+
+ spin_lock_irq(cdev->ccwlock);
+ if (!cdev->private->cmb)
+ goto out;
+
+ ret = set_schib(cdev, mme, mbfc, address);
+ if (ret != -EBUSY)
+ goto out;
+
+ /* if the device is not online, don't even try again */
+ if (cdev->private->state != DEV_STATE_ONLINE)
+ goto out;
+
+ init_waitqueue_head(&set_data.wait);
+ set_data.mme = mme;
+ set_data.mbfc = mbfc;
+ set_data.address = address;
+ set_data.ret = CMF_PENDING;
+
+ cdev->private->state = DEV_STATE_CMFCHANGE;
+ cdev->private->cmb_wait = &set_data;
+ spin_unlock_irq(cdev->ccwlock);
+
+ ret = wait_event_interruptible_timeout(set_data.wait,
+ set_data.ret != CMF_PENDING,
+ SET_SCHIB_TIMEOUT);
+ spin_lock_irq(cdev->ccwlock);
+ if (ret <= 0) {
+ if (set_data.ret == CMF_PENDING) {
+ set_data.ret = (ret == 0) ? -ETIME : ret;
+ if (cdev->private->state == DEV_STATE_CMFCHANGE)
+ cdev->private->state = DEV_STATE_ONLINE;
+ }
+ }
+ cdev->private->cmb_wait = NULL;
+ ret = set_data.ret;
+out:
+ spin_unlock_irq(cdev->ccwlock);
+ return ret;
+}
+
+void retry_set_schib(struct ccw_device *cdev)
+{
+ struct set_schib_struct *set_data = cdev->private->cmb_wait;
+
+ if (!set_data)
+ return;
+
+ set_data->ret = set_schib(cdev, set_data->mme, set_data->mbfc,
+ set_data->address);
+ wake_up(&set_data->wait);
+}
+
+static int cmf_copy_block(struct ccw_device *cdev)
+{
+ struct subchannel *sch = to_subchannel(cdev->dev.parent);
+ struct cmb_data *cmb_data;
+ void *hw_block;
+
+ if (cio_update_schib(sch))
+ return -ENODEV;
+
+ if (scsw_fctl(&sch->schib.scsw) & SCSW_FCTL_START_FUNC) {
+ /* Don't copy if a start function is in progress. */
+ if ((!(scsw_actl(&sch->schib.scsw) & SCSW_ACTL_SUSPENDED)) &&
+ (scsw_actl(&sch->schib.scsw) &
+ (SCSW_ACTL_DEVACT | SCSW_ACTL_SCHACT)) &&
+ (!(scsw_stctl(&sch->schib.scsw) & SCSW_STCTL_SEC_STATUS)))
+ return -EBUSY;
+ }
+ cmb_data = cdev->private->cmb;
+ hw_block = cmb_data->hw_block;
+ memcpy(cmb_data->last_block, hw_block, cmb_data->size);
+ cmb_data->last_update = get_tod_clock();
+ return 0;
+}
+
+struct copy_block_struct {
+ wait_queue_head_t wait;
+ int ret;
+};
+
+static int cmf_cmb_copy_wait(struct ccw_device *cdev)
+{
+ struct copy_block_struct copy_block;
+ int ret = -ENODEV;
+
+ spin_lock_irq(cdev->ccwlock);
+ if (!cdev->private->cmb)
+ goto out;
+
+ ret = cmf_copy_block(cdev);
+ if (ret != -EBUSY)
+ goto out;
+
+ if (cdev->private->state != DEV_STATE_ONLINE)
+ goto out;
+
+ init_waitqueue_head(&copy_block.wait);
+ copy_block.ret = CMF_PENDING;
+
+ cdev->private->state = DEV_STATE_CMFUPDATE;
+ cdev->private->cmb_wait = &copy_block;
+ spin_unlock_irq(cdev->ccwlock);
+
+ ret = wait_event_interruptible(copy_block.wait,
+ copy_block.ret != CMF_PENDING);
+ spin_lock_irq(cdev->ccwlock);
+ if (ret) {
+ if (copy_block.ret == CMF_PENDING) {
+ copy_block.ret = -ERESTARTSYS;
+ if (cdev->private->state == DEV_STATE_CMFUPDATE)
+ cdev->private->state = DEV_STATE_ONLINE;
+ }
+ }
+ cdev->private->cmb_wait = NULL;
+ ret = copy_block.ret;
+out:
+ spin_unlock_irq(cdev->ccwlock);
+ return ret;
+}
+
+void cmf_retry_copy_block(struct ccw_device *cdev)
+{
+ struct copy_block_struct *copy_block = cdev->private->cmb_wait;
+
+ if (!copy_block)
+ return;
+
+ copy_block->ret = cmf_copy_block(cdev);
+ wake_up(&copy_block->wait);
+}
+
+static void cmf_generic_reset(struct ccw_device *cdev)
+{
+ struct cmb_data *cmb_data;
+
+ spin_lock_irq(cdev->ccwlock);
+ cmb_data = cdev->private->cmb;
+ if (cmb_data) {
+ memset(cmb_data->last_block, 0, cmb_data->size);
+ /*
+ * Need to reset hw block as well to make the hardware start
+ * from 0 again.
+ */
+ memset(cmb_data->hw_block, 0, cmb_data->size);
+ cmb_data->last_update = 0;
+ }
+ cdev->private->cmb_start_time = get_tod_clock();
+ spin_unlock_irq(cdev->ccwlock);
+}
+
+/**
+ * struct cmb_area - container for global cmb data
+ *
+ * @mem: pointer to CMBs (only in basic measurement mode)
+ * @list: contains a linked list of all subchannels
+ * @num_channels: number of channels to be measured
+ * @lock: protect concurrent access to @mem and @list
+ */
+struct cmb_area {
+ struct cmb *mem;
+ struct list_head list;
+ int num_channels;
+ spinlock_t lock;
+};
+
+static struct cmb_area cmb_area = {
+ .lock = __SPIN_LOCK_UNLOCKED(cmb_area.lock),
+ .list = LIST_HEAD_INIT(cmb_area.list),
+ .num_channels = 1024,
+};
+
+/* ****** old style CMB handling ********/
+
+/*
+ * Basic channel measurement blocks are allocated in one contiguous
+ * block of memory, which can not be moved as long as any channel
+ * is active. Therefore, a maximum number of subchannels needs to
+ * be defined somewhere. This is a module parameter, defaulting to
+ * a reasonable value of 1024, or 32 kb of memory.
+ * Current kernels don't allow kmalloc with more than 128kb, so the
+ * maximum is 4096.
+ */
+
+module_param_named(maxchannels, cmb_area.num_channels, uint, 0444);
+
+/**
+ * struct cmb - basic channel measurement block
+ * @ssch_rsch_count: number of ssch and rsch
+ * @sample_count: number of samples
+ * @device_connect_time: time of device connect
+ * @function_pending_time: time of function pending
+ * @device_disconnect_time: time of device disconnect
+ * @control_unit_queuing_time: time of control unit queuing
+ * @device_active_only_time: time of device active only
+ * @reserved: unused in basic measurement mode
+ *
+ * The measurement block as used by the hardware. The fields are described
+ * further in z/Architecture Principles of Operation, chapter 17.
+ *
+ * The cmb area made up from these blocks must be a contiguous array and may
+ * not be reallocated or freed.
+ * Only one cmb area can be present in the system.
+ */
+struct cmb {
+ u16 ssch_rsch_count;
+ u16 sample_count;
+ u32 device_connect_time;
+ u32 function_pending_time;
+ u32 device_disconnect_time;
+ u32 control_unit_queuing_time;
+ u32 device_active_only_time;
+ u32 reserved[2];
+};
+
+/*
+ * Insert a single device into the cmb_area list.
+ * Called with cmb_area.lock held from alloc_cmb.
+ */
+static int alloc_cmb_single(struct ccw_device *cdev,
+ struct cmb_data *cmb_data)
+{
+ struct cmb *cmb;
+ struct ccw_device_private *node;
+ int ret;
+
+ spin_lock_irq(cdev->ccwlock);
+ if (!list_empty(&cdev->private->cmb_list)) {
+ ret = -EBUSY;
+ goto out;
+ }
+
+ /*
+ * Find first unused cmb in cmb_area.mem.
+ * This is a little tricky: cmb_area.list
+ * remains sorted by ->cmb->hw_data pointers.
+ */
+ cmb = cmb_area.mem;
+ list_for_each_entry(node, &cmb_area.list, cmb_list) {
+ struct cmb_data *data;
+ data = node->cmb;
+ if ((struct cmb*)data->hw_block > cmb)
+ break;
+ cmb++;
+ }
+ if (cmb - cmb_area.mem >= cmb_area.num_channels) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ /* insert new cmb */
+ list_add_tail(&cdev->private->cmb_list, &node->cmb_list);
+ cmb_data->hw_block = cmb;
+ cdev->private->cmb = cmb_data;
+ ret = 0;
+out:
+ spin_unlock_irq(cdev->ccwlock);
+ return ret;
+}
+
+static int alloc_cmb(struct ccw_device *cdev)
+{
+ int ret;
+ struct cmb *mem;
+ ssize_t size;
+ struct cmb_data *cmb_data;
+
+ /* Allocate private cmb_data. */
+ cmb_data = kzalloc(sizeof(struct cmb_data), GFP_KERNEL);
+ if (!cmb_data)
+ return -ENOMEM;
+
+ cmb_data->last_block = kzalloc(sizeof(struct cmb), GFP_KERNEL);
+ if (!cmb_data->last_block) {
+ kfree(cmb_data);
+ return -ENOMEM;
+ }
+ cmb_data->size = sizeof(struct cmb);
+ spin_lock(&cmb_area.lock);
+
+ if (!cmb_area.mem) {
+ /* there is no user yet, so we need a new area */
+ size = sizeof(struct cmb) * cmb_area.num_channels;
+ WARN_ON(!list_empty(&cmb_area.list));
+
+ spin_unlock(&cmb_area.lock);
+ mem = (void*)__get_free_pages(GFP_KERNEL | GFP_DMA,
+ get_order(size));
+ spin_lock(&cmb_area.lock);
+
+ if (cmb_area.mem) {
+ /* ok, another thread was faster */
+ free_pages((unsigned long)mem, get_order(size));
+ } else if (!mem) {
+ /* no luck */
+ ret = -ENOMEM;
+ goto out;
+ } else {
+ /* everything ok */
+ memset(mem, 0, size);
+ cmb_area.mem = mem;
+ cmf_activate(cmb_area.mem, CMF_ON);
+ }
+ }
+
+ /* do the actual allocation */
+ ret = alloc_cmb_single(cdev, cmb_data);
+out:
+ spin_unlock(&cmb_area.lock);
+ if (ret) {
+ kfree(cmb_data->last_block);
+ kfree(cmb_data);
+ }
+ return ret;
+}
+
+static void free_cmb(struct ccw_device *cdev)
+{
+ struct ccw_device_private *priv;
+ struct cmb_data *cmb_data;
+
+ spin_lock(&cmb_area.lock);
+ spin_lock_irq(cdev->ccwlock);
+
+ priv = cdev->private;
+ cmb_data = priv->cmb;
+ priv->cmb = NULL;
+ if (cmb_data)
+ kfree(cmb_data->last_block);
+ kfree(cmb_data);
+ list_del_init(&priv->cmb_list);
+
+ if (list_empty(&cmb_area.list)) {
+ ssize_t size;
+ size = sizeof(struct cmb) * cmb_area.num_channels;
+ cmf_activate(NULL, CMF_OFF);
+ free_pages((unsigned long)cmb_area.mem, get_order(size));
+ cmb_area.mem = NULL;
+ }
+ spin_unlock_irq(cdev->ccwlock);
+ spin_unlock(&cmb_area.lock);
+}
+
+static int set_cmb(struct ccw_device *cdev, u32 mme)
+{
+ u16 offset;
+ struct cmb_data *cmb_data;
+ unsigned long flags;
+
+ spin_lock_irqsave(cdev->ccwlock, flags);
+ if (!cdev->private->cmb) {
+ spin_unlock_irqrestore(cdev->ccwlock, flags);
+ return -EINVAL;
+ }
+ cmb_data = cdev->private->cmb;
+ offset = mme ? (struct cmb *)cmb_data->hw_block - cmb_area.mem : 0;
+ spin_unlock_irqrestore(cdev->ccwlock, flags);
+
+ return set_schib_wait(cdev, mme, 0, offset);
+}
+
+/* calculate utilization in 0.1 percent units */
+static u64 __cmb_utilization(u64 device_connect_time, u64 function_pending_time,
+ u64 device_disconnect_time, u64 start_time)
+{
+ u64 utilization, elapsed_time;
+
+ utilization = time_to_nsec(device_connect_time +
+ function_pending_time +
+ device_disconnect_time);
+
+ elapsed_time = get_tod_clock() - start_time;
+ elapsed_time = tod_to_ns(elapsed_time);
+ elapsed_time /= 1000;
+
+ return elapsed_time ? (utilization / elapsed_time) : 0;
+}
+
+static u64 read_cmb(struct ccw_device *cdev, int index)
+{
+ struct cmb_data *cmb_data;
+ unsigned long flags;
+ struct cmb *cmb;
+ u64 ret = 0;
+ u32 val;
+
+ spin_lock_irqsave(cdev->ccwlock, flags);
+ cmb_data = cdev->private->cmb;
+ if (!cmb_data)
+ goto out;
+
+ cmb = cmb_data->hw_block;
+ switch (index) {
+ case avg_utilization:
+ ret = __cmb_utilization(cmb->device_connect_time,
+ cmb->function_pending_time,
+ cmb->device_disconnect_time,
+ cdev->private->cmb_start_time);
+ goto out;
+ case cmb_ssch_rsch_count:
+ ret = cmb->ssch_rsch_count;
+ goto out;
+ case cmb_sample_count:
+ ret = cmb->sample_count;
+ goto out;
+ case cmb_device_connect_time:
+ val = cmb->device_connect_time;
+ break;
+ case cmb_function_pending_time:
+ val = cmb->function_pending_time;
+ break;
+ case cmb_device_disconnect_time:
+ val = cmb->device_disconnect_time;
+ break;
+ case cmb_control_unit_queuing_time:
+ val = cmb->control_unit_queuing_time;
+ break;
+ case cmb_device_active_only_time:
+ val = cmb->device_active_only_time;
+ break;
+ default:
+ goto out;
+ }
+ ret = time_to_avg_nsec(val, cmb->sample_count);
+out:
+ spin_unlock_irqrestore(cdev->ccwlock, flags);
+ return ret;
+}
+
+static int readall_cmb(struct ccw_device *cdev, struct cmbdata *data)
+{
+ struct cmb *cmb;
+ struct cmb_data *cmb_data;
+ u64 time;
+ unsigned long flags;
+ int ret;
+
+ ret = cmf_cmb_copy_wait(cdev);
+ if (ret < 0)
+ return ret;
+ spin_lock_irqsave(cdev->ccwlock, flags);
+ cmb_data = cdev->private->cmb;
+ if (!cmb_data) {
+ ret = -ENODEV;
+ goto out;
+ }
+ if (cmb_data->last_update == 0) {
+ ret = -EAGAIN;
+ goto out;
+ }
+ cmb = cmb_data->last_block;
+ time = cmb_data->last_update - cdev->private->cmb_start_time;
+
+ memset(data, 0, sizeof(struct cmbdata));
+
+ /* we only know values before device_busy_time */
+ data->size = offsetof(struct cmbdata, device_busy_time);
+
+ data->elapsed_time = tod_to_ns(time);
+
+ /* copy data to new structure */
+ data->ssch_rsch_count = cmb->ssch_rsch_count;
+ data->sample_count = cmb->sample_count;
+
+ /* time fields are converted to nanoseconds while copying */
+ data->device_connect_time = time_to_nsec(cmb->device_connect_time);
+ data->function_pending_time = time_to_nsec(cmb->function_pending_time);
+ data->device_disconnect_time =
+ time_to_nsec(cmb->device_disconnect_time);
+ data->control_unit_queuing_time
+ = time_to_nsec(cmb->control_unit_queuing_time);
+ data->device_active_only_time
+ = time_to_nsec(cmb->device_active_only_time);
+ ret = 0;
+out:
+ spin_unlock_irqrestore(cdev->ccwlock, flags);
+ return ret;
+}
+
+static void reset_cmb(struct ccw_device *cdev)
+{
+ cmf_generic_reset(cdev);
+}
+
+static int cmf_enabled(struct ccw_device *cdev)
+{
+ int enabled;
+
+ spin_lock_irq(cdev->ccwlock);
+ enabled = !!cdev->private->cmb;
+ spin_unlock_irq(cdev->ccwlock);
+
+ return enabled;
+}
+
+static struct attribute_group cmf_attr_group;
+
+static struct cmb_operations cmbops_basic = {
+ .alloc = alloc_cmb,
+ .free = free_cmb,
+ .set = set_cmb,
+ .read = read_cmb,
+ .readall = readall_cmb,
+ .reset = reset_cmb,
+ .attr_group = &cmf_attr_group,
+};
+
+/* ******** extended cmb handling ********/
+
+/**
+ * struct cmbe - extended channel measurement block
+ * @ssch_rsch_count: number of ssch and rsch
+ * @sample_count: number of samples
+ * @device_connect_time: time of device connect
+ * @function_pending_time: time of function pending
+ * @device_disconnect_time: time of device disconnect
+ * @control_unit_queuing_time: time of control unit queuing
+ * @device_active_only_time: time of device active only
+ * @device_busy_time: time of device busy
+ * @initial_command_response_time: initial command response time
+ * @reserved: unused
+ *
+ * The measurement block as used by the hardware. May be in any 64 bit physical
+ * location.
+ * The fields are described further in z/Architecture Principles of Operation,
+ * third edition, chapter 17.
+ */
+struct cmbe {
+ u32 ssch_rsch_count;
+ u32 sample_count;
+ u32 device_connect_time;
+ u32 function_pending_time;
+ u32 device_disconnect_time;
+ u32 control_unit_queuing_time;
+ u32 device_active_only_time;
+ u32 device_busy_time;
+ u32 initial_command_response_time;
+ u32 reserved[7];
+} __packed __aligned(64);
+
+static struct kmem_cache *cmbe_cache;
+
+static int alloc_cmbe(struct ccw_device *cdev)
+{
+ struct cmb_data *cmb_data;
+ struct cmbe *cmbe;
+ int ret = -ENOMEM;
+
+ cmbe = kmem_cache_zalloc(cmbe_cache, GFP_KERNEL);
+ if (!cmbe)
+ return ret;
+
+ cmb_data = kzalloc(sizeof(*cmb_data), GFP_KERNEL);
+ if (!cmb_data)
+ goto out_free;
+
+ cmb_data->last_block = kzalloc(sizeof(struct cmbe), GFP_KERNEL);
+ if (!cmb_data->last_block)
+ goto out_free;
+
+ cmb_data->size = sizeof(*cmbe);
+ cmb_data->hw_block = cmbe;
+
+ spin_lock(&cmb_area.lock);
+ spin_lock_irq(cdev->ccwlock);
+ if (cdev->private->cmb)
+ goto out_unlock;
+
+ cdev->private->cmb = cmb_data;
+
+ /* activate global measurement if this is the first channel */
+ if (list_empty(&cmb_area.list))
+ cmf_activate(NULL, CMF_ON);
+ list_add_tail(&cdev->private->cmb_list, &cmb_area.list);
+
+ spin_unlock_irq(cdev->ccwlock);
+ spin_unlock(&cmb_area.lock);
+ return 0;
+
+out_unlock:
+ spin_unlock_irq(cdev->ccwlock);
+ spin_unlock(&cmb_area.lock);
+ ret = -EBUSY;
+out_free:
+ if (cmb_data)
+ kfree(cmb_data->last_block);
+ kfree(cmb_data);
+ kmem_cache_free(cmbe_cache, cmbe);
+
+ return ret;
+}
+
+static void free_cmbe(struct ccw_device *cdev)
+{
+ struct cmb_data *cmb_data;
+
+ spin_lock(&cmb_area.lock);
+ spin_lock_irq(cdev->ccwlock);
+ cmb_data = cdev->private->cmb;
+ cdev->private->cmb = NULL;
+ if (cmb_data) {
+ kfree(cmb_data->last_block);
+ kmem_cache_free(cmbe_cache, cmb_data->hw_block);
+ }
+ kfree(cmb_data);
+
+ /* deactivate global measurement if this is the last channel */
+ list_del_init(&cdev->private->cmb_list);
+ if (list_empty(&cmb_area.list))
+ cmf_activate(NULL, CMF_OFF);
+ spin_unlock_irq(cdev->ccwlock);
+ spin_unlock(&cmb_area.lock);
+}
+
+static int set_cmbe(struct ccw_device *cdev, u32 mme)
+{
+ unsigned long mba;
+ struct cmb_data *cmb_data;
+ unsigned long flags;
+
+ spin_lock_irqsave(cdev->ccwlock, flags);
+ if (!cdev->private->cmb) {
+ spin_unlock_irqrestore(cdev->ccwlock, flags);
+ return -EINVAL;
+ }
+ cmb_data = cdev->private->cmb;
+ mba = mme ? (unsigned long) cmb_data->hw_block : 0;
+ spin_unlock_irqrestore(cdev->ccwlock, flags);
+
+ return set_schib_wait(cdev, mme, 1, mba);
+}
+
+static u64 read_cmbe(struct ccw_device *cdev, int index)
+{
+ struct cmb_data *cmb_data;
+ unsigned long flags;
+ struct cmbe *cmb;
+ u64 ret = 0;
+ u32 val;
+
+ spin_lock_irqsave(cdev->ccwlock, flags);
+ cmb_data = cdev->private->cmb;
+ if (!cmb_data)
+ goto out;
+
+ cmb = cmb_data->hw_block;
+ switch (index) {
+ case avg_utilization:
+ ret = __cmb_utilization(cmb->device_connect_time,
+ cmb->function_pending_time,
+ cmb->device_disconnect_time,
+ cdev->private->cmb_start_time);
+ goto out;
+ case cmb_ssch_rsch_count:
+ ret = cmb->ssch_rsch_count;
+ goto out;
+ case cmb_sample_count:
+ ret = cmb->sample_count;
+ goto out;
+ case cmb_device_connect_time:
+ val = cmb->device_connect_time;
+ break;
+ case cmb_function_pending_time:
+ val = cmb->function_pending_time;
+ break;
+ case cmb_device_disconnect_time:
+ val = cmb->device_disconnect_time;
+ break;
+ case cmb_control_unit_queuing_time:
+ val = cmb->control_unit_queuing_time;
+ break;
+ case cmb_device_active_only_time:
+ val = cmb->device_active_only_time;
+ break;
+ case cmb_device_busy_time:
+ val = cmb->device_busy_time;
+ break;
+ case cmb_initial_command_response_time:
+ val = cmb->initial_command_response_time;
+ break;
+ default:
+ goto out;
+ }
+ ret = time_to_avg_nsec(val, cmb->sample_count);
+out:
+ spin_unlock_irqrestore(cdev->ccwlock, flags);
+ return ret;
+}
+
+static int readall_cmbe(struct ccw_device *cdev, struct cmbdata *data)
+{
+ struct cmbe *cmb;
+ struct cmb_data *cmb_data;
+ u64 time;
+ unsigned long flags;
+ int ret;
+
+ ret = cmf_cmb_copy_wait(cdev);
+ if (ret < 0)
+ return ret;
+ spin_lock_irqsave(cdev->ccwlock, flags);
+ cmb_data = cdev->private->cmb;
+ if (!cmb_data) {
+ ret = -ENODEV;
+ goto out;
+ }
+ if (cmb_data->last_update == 0) {
+ ret = -EAGAIN;
+ goto out;
+ }
+ time = cmb_data->last_update - cdev->private->cmb_start_time;
+
+ memset (data, 0, sizeof(struct cmbdata));
+
+ /* we only know values before device_busy_time */
+ data->size = offsetof(struct cmbdata, device_busy_time);
+
+ data->elapsed_time = tod_to_ns(time);
+
+ cmb = cmb_data->last_block;
+ /* copy data to new structure */
+ data->ssch_rsch_count = cmb->ssch_rsch_count;
+ data->sample_count = cmb->sample_count;
+
+ /* time fields are converted to nanoseconds while copying */
+ data->device_connect_time = time_to_nsec(cmb->device_connect_time);
+ data->function_pending_time = time_to_nsec(cmb->function_pending_time);
+ data->device_disconnect_time =
+ time_to_nsec(cmb->device_disconnect_time);
+ data->control_unit_queuing_time
+ = time_to_nsec(cmb->control_unit_queuing_time);
+ data->device_active_only_time
+ = time_to_nsec(cmb->device_active_only_time);
+ data->device_busy_time = time_to_nsec(cmb->device_busy_time);
+ data->initial_command_response_time
+ = time_to_nsec(cmb->initial_command_response_time);
+
+ ret = 0;
+out:
+ spin_unlock_irqrestore(cdev->ccwlock, flags);
+ return ret;
+}
+
+static void reset_cmbe(struct ccw_device *cdev)
+{
+ cmf_generic_reset(cdev);
+}
+
+static struct attribute_group cmf_attr_group_ext;
+
+static struct cmb_operations cmbops_extended = {
+ .alloc = alloc_cmbe,
+ .free = free_cmbe,
+ .set = set_cmbe,
+ .read = read_cmbe,
+ .readall = readall_cmbe,
+ .reset = reset_cmbe,
+ .attr_group = &cmf_attr_group_ext,
+};
+
+static ssize_t cmb_show_attr(struct device *dev, char *buf, enum cmb_index idx)
+{
+ return sprintf(buf, "%lld\n",
+ (unsigned long long) cmf_read(to_ccwdev(dev), idx));
+}
+
+static ssize_t cmb_show_avg_sample_interval(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct ccw_device *cdev = to_ccwdev(dev);
+ unsigned long count;
+ long interval;
+
+ count = cmf_read(cdev, cmb_sample_count);
+ spin_lock_irq(cdev->ccwlock);
+ if (count) {
+ interval = get_tod_clock() - cdev->private->cmb_start_time;
+ interval = tod_to_ns(interval);
+ interval /= count;
+ } else
+ interval = -1;
+ spin_unlock_irq(cdev->ccwlock);
+ return sprintf(buf, "%ld\n", interval);
+}
+
+static ssize_t cmb_show_avg_utilization(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ unsigned long u = cmf_read(to_ccwdev(dev), avg_utilization);
+
+ return sprintf(buf, "%02lu.%01lu%%\n", u / 10, u % 10);
+}
+
+#define cmf_attr(name) \
+static ssize_t show_##name(struct device *dev, \
+ struct device_attribute *attr, char *buf) \
+{ return cmb_show_attr((dev), buf, cmb_##name); } \
+static DEVICE_ATTR(name, 0444, show_##name, NULL);
+
+#define cmf_attr_avg(name) \
+static ssize_t show_avg_##name(struct device *dev, \
+ struct device_attribute *attr, char *buf) \
+{ return cmb_show_attr((dev), buf, cmb_##name); } \
+static DEVICE_ATTR(avg_##name, 0444, show_avg_##name, NULL);
+
+cmf_attr(ssch_rsch_count);
+cmf_attr(sample_count);
+cmf_attr_avg(device_connect_time);
+cmf_attr_avg(function_pending_time);
+cmf_attr_avg(device_disconnect_time);
+cmf_attr_avg(control_unit_queuing_time);
+cmf_attr_avg(device_active_only_time);
+cmf_attr_avg(device_busy_time);
+cmf_attr_avg(initial_command_response_time);
+
+static DEVICE_ATTR(avg_sample_interval, 0444, cmb_show_avg_sample_interval,
+ NULL);
+static DEVICE_ATTR(avg_utilization, 0444, cmb_show_avg_utilization, NULL);
+
+static struct attribute *cmf_attributes[] = {
+ &dev_attr_avg_sample_interval.attr,
+ &dev_attr_avg_utilization.attr,
+ &dev_attr_ssch_rsch_count.attr,
+ &dev_attr_sample_count.attr,
+ &dev_attr_avg_device_connect_time.attr,
+ &dev_attr_avg_function_pending_time.attr,
+ &dev_attr_avg_device_disconnect_time.attr,
+ &dev_attr_avg_control_unit_queuing_time.attr,
+ &dev_attr_avg_device_active_only_time.attr,
+ NULL,
+};
+
+static struct attribute_group cmf_attr_group = {
+ .name = "cmf",
+ .attrs = cmf_attributes,
+};
+
+static struct attribute *cmf_attributes_ext[] = {
+ &dev_attr_avg_sample_interval.attr,
+ &dev_attr_avg_utilization.attr,
+ &dev_attr_ssch_rsch_count.attr,
+ &dev_attr_sample_count.attr,
+ &dev_attr_avg_device_connect_time.attr,
+ &dev_attr_avg_function_pending_time.attr,
+ &dev_attr_avg_device_disconnect_time.attr,
+ &dev_attr_avg_control_unit_queuing_time.attr,
+ &dev_attr_avg_device_active_only_time.attr,
+ &dev_attr_avg_device_busy_time.attr,
+ &dev_attr_avg_initial_command_response_time.attr,
+ NULL,
+};
+
+static struct attribute_group cmf_attr_group_ext = {
+ .name = "cmf",
+ .attrs = cmf_attributes_ext,
+};
+
+static ssize_t cmb_enable_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct ccw_device *cdev = to_ccwdev(dev);
+
+ return sprintf(buf, "%d\n", cmf_enabled(cdev));
+}
+
+static ssize_t cmb_enable_store(struct device *dev,
+ struct device_attribute *attr, const char *buf,
+ size_t c)
+{
+ struct ccw_device *cdev = to_ccwdev(dev);
+ unsigned long val;
+ int ret;
+
+ ret = kstrtoul(buf, 16, &val);
+ if (ret)
+ return ret;
+
+ switch (val) {
+ case 0:
+ ret = disable_cmf(cdev);
+ break;
+ case 1:
+ ret = enable_cmf(cdev);
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ return ret ? ret : c;
+}
+DEVICE_ATTR_RW(cmb_enable);
+
+int ccw_set_cmf(struct ccw_device *cdev, int enable)
+{
+ return cmbops->set(cdev, enable ? 2 : 0);
+}
+
+/**
+ * enable_cmf() - switch on the channel measurement for a specific device
+ * @cdev: The ccw device to be enabled
+ *
+ * Enable channel measurements for @cdev. If this is called on a device
+ * for which channel measurement is already enabled a reset of the
+ * measurement data is triggered.
+ * Returns: %0 for success or a negative error value.
+ * Context:
+ * non-atomic
+ */
+int enable_cmf(struct ccw_device *cdev)
+{
+ int ret = 0;
+
+ device_lock(&cdev->dev);
+ if (cmf_enabled(cdev)) {
+ cmbops->reset(cdev);
+ goto out_unlock;
+ }
+ get_device(&cdev->dev);
+ ret = cmbops->alloc(cdev);
+ if (ret)
+ goto out;
+ cmbops->reset(cdev);
+ ret = sysfs_create_group(&cdev->dev.kobj, cmbops->attr_group);
+ if (ret) {
+ cmbops->free(cdev);
+ goto out;
+ }
+ ret = cmbops->set(cdev, 2);
+ if (ret) {
+ sysfs_remove_group(&cdev->dev.kobj, cmbops->attr_group);
+ cmbops->free(cdev);
+ }
+out:
+ if (ret)
+ put_device(&cdev->dev);
+out_unlock:
+ device_unlock(&cdev->dev);
+ return ret;
+}
+
+/**
+ * __disable_cmf() - switch off the channel measurement for a specific device
+ * @cdev: The ccw device to be disabled
+ *
+ * Returns: %0 for success or a negative error value.
+ *
+ * Context:
+ * non-atomic, device_lock() held.
+ */
+int __disable_cmf(struct ccw_device *cdev)
+{
+ int ret;
+
+ ret = cmbops->set(cdev, 0);
+ if (ret)
+ return ret;
+
+ sysfs_remove_group(&cdev->dev.kobj, cmbops->attr_group);
+ cmbops->free(cdev);
+ put_device(&cdev->dev);
+
+ return ret;
+}
+
+/**
+ * disable_cmf() - switch off the channel measurement for a specific device
+ * @cdev: The ccw device to be disabled
+ *
+ * Returns: %0 for success or a negative error value.
+ *
+ * Context:
+ * non-atomic
+ */
+int disable_cmf(struct ccw_device *cdev)
+{
+ int ret;
+
+ device_lock(&cdev->dev);
+ ret = __disable_cmf(cdev);
+ device_unlock(&cdev->dev);
+
+ return ret;
+}
+
+/**
+ * cmf_read() - read one value from the current channel measurement block
+ * @cdev: the channel to be read
+ * @index: the index of the value to be read
+ *
+ * Returns: The value read or %0 if the value cannot be read.
+ *
+ * Context:
+ * any
+ */
+u64 cmf_read(struct ccw_device *cdev, int index)
+{
+ return cmbops->read(cdev, index);
+}
+
+/**
+ * cmf_readall() - read the current channel measurement block
+ * @cdev: the channel to be read
+ * @data: a pointer to a data block that will be filled
+ *
+ * Returns: %0 on success, a negative error value otherwise.
+ *
+ * Context:
+ * any
+ */
+int cmf_readall(struct ccw_device *cdev, struct cmbdata *data)
+{
+ return cmbops->readall(cdev, data);
+}
+
+/* Reenable cmf when a disconnected device becomes available again. */
+int cmf_reenable(struct ccw_device *cdev)
+{
+ cmbops->reset(cdev);
+ return cmbops->set(cdev, 2);
+}
+
+/**
+ * cmf_reactivate() - reactivate measurement block updates
+ *
+ * Use this during resume from hibernate.
+ */
+void cmf_reactivate(void)
+{
+ spin_lock(&cmb_area.lock);
+ if (!list_empty(&cmb_area.list))
+ cmf_activate(cmb_area.mem, CMF_ON);
+ spin_unlock(&cmb_area.lock);
+}
+
+static int __init init_cmbe(void)
+{
+ cmbe_cache = kmem_cache_create("cmbe_cache", sizeof(struct cmbe),
+ __alignof__(struct cmbe), 0, NULL);
+
+ return cmbe_cache ? 0 : -ENOMEM;
+}
+
+static int __init init_cmf(void)
+{
+ char *format_string;
+ char *detect_string;
+ int ret;
+
+ /*
+ * If the user did not give a parameter, see if we are running on a
+ * machine supporting extended measurement blocks, otherwise fall back
+ * to basic mode.
+ */
+ if (format == CMF_AUTODETECT) {
+ if (!css_general_characteristics.ext_mb) {
+ format = CMF_BASIC;
+ } else {
+ format = CMF_EXTENDED;
+ }
+ detect_string = "autodetected";
+ } else {
+ detect_string = "parameter";
+ }
+
+ switch (format) {
+ case CMF_BASIC:
+ format_string = "basic";
+ cmbops = &cmbops_basic;
+ break;
+ case CMF_EXTENDED:
+ format_string = "extended";
+ cmbops = &cmbops_extended;
+
+ ret = init_cmbe();
+ if (ret)
+ return ret;
+ break;
+ default:
+ return -EINVAL;
+ }
+ pr_info("Channel measurement facility initialized using format "
+ "%s (mode %s)\n", format_string, detect_string);
+ return 0;
+}
+device_initcall(init_cmf);
+
+EXPORT_SYMBOL_GPL(enable_cmf);
+EXPORT_SYMBOL_GPL(disable_cmf);
+EXPORT_SYMBOL_GPL(cmf_read);
+EXPORT_SYMBOL_GPL(cmf_readall);
diff --git a/drivers/s390/cio/crw.c b/drivers/s390/cio/crw.c
new file mode 100644
index 000000000..fc285ca41
--- /dev/null
+++ b/drivers/s390/cio/crw.c
@@ -0,0 +1,163 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Channel report handling code
+ *
+ * Copyright IBM Corp. 2000, 2009
+ * Author(s): Ingo Adlung <adlung@de.ibm.com>,
+ * Martin Schwidefsky <schwidefsky@de.ibm.com>,
+ * Cornelia Huck <cornelia.huck@de.ibm.com>,
+ * Heiko Carstens <heiko.carstens@de.ibm.com>,
+ */
+
+#include <linux/mutex.h>
+#include <linux/kthread.h>
+#include <linux/init.h>
+#include <linux/wait.h>
+#include <asm/crw.h>
+#include <asm/ctl_reg.h>
+#include "ioasm.h"
+
+static DEFINE_MUTEX(crw_handler_mutex);
+static crw_handler_t crw_handlers[NR_RSCS];
+static atomic_t crw_nr_req = ATOMIC_INIT(0);
+static DECLARE_WAIT_QUEUE_HEAD(crw_handler_wait_q);
+
+/**
+ * crw_register_handler() - register a channel report word handler
+ * @rsc: reporting source code to handle
+ * @handler: handler to be registered
+ *
+ * Returns %0 on success and a negative error value otherwise.
+ */
+int crw_register_handler(int rsc, crw_handler_t handler)
+{
+ int rc = 0;
+
+ if ((rsc < 0) || (rsc >= NR_RSCS))
+ return -EINVAL;
+ mutex_lock(&crw_handler_mutex);
+ if (crw_handlers[rsc])
+ rc = -EBUSY;
+ else
+ crw_handlers[rsc] = handler;
+ mutex_unlock(&crw_handler_mutex);
+ return rc;
+}
+
+/**
+ * crw_unregister_handler() - unregister a channel report word handler
+ * @rsc: reporting source code to handle
+ */
+void crw_unregister_handler(int rsc)
+{
+ if ((rsc < 0) || (rsc >= NR_RSCS))
+ return;
+ mutex_lock(&crw_handler_mutex);
+ crw_handlers[rsc] = NULL;
+ mutex_unlock(&crw_handler_mutex);
+}
+
+/*
+ * Retrieve CRWs and call function to handle event.
+ */
+static int crw_collect_info(void *unused)
+{
+ struct crw crw[2];
+ int ccode, signal;
+ unsigned int chain;
+
+repeat:
+ signal = wait_event_interruptible(crw_handler_wait_q,
+ atomic_read(&crw_nr_req) > 0);
+ if (unlikely(signal))
+ atomic_inc(&crw_nr_req);
+ chain = 0;
+ while (1) {
+ crw_handler_t handler;
+
+ if (unlikely(chain > 1)) {
+ struct crw tmp_crw;
+
+ printk(KERN_WARNING"%s: Code does not support more "
+ "than two chained crws; please report to "
+ "linux390@de.ibm.com!\n", __func__);
+ ccode = stcrw(&tmp_crw);
+ printk(KERN_WARNING"%s: crw reports slct=%d, oflw=%d, "
+ "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n",
+ __func__, tmp_crw.slct, tmp_crw.oflw,
+ tmp_crw.chn, tmp_crw.rsc, tmp_crw.anc,
+ tmp_crw.erc, tmp_crw.rsid);
+ printk(KERN_WARNING"%s: This was crw number %x in the "
+ "chain\n", __func__, chain);
+ if (ccode != 0)
+ break;
+ chain = tmp_crw.chn ? chain + 1 : 0;
+ continue;
+ }
+ ccode = stcrw(&crw[chain]);
+ if (ccode != 0)
+ break;
+ printk(KERN_DEBUG "crw_info : CRW reports slct=%d, oflw=%d, "
+ "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n",
+ crw[chain].slct, crw[chain].oflw, crw[chain].chn,
+ crw[chain].rsc, crw[chain].anc, crw[chain].erc,
+ crw[chain].rsid);
+ /* Check for overflows. */
+ if (crw[chain].oflw) {
+ int i;
+
+ pr_debug("%s: crw overflow detected!\n", __func__);
+ mutex_lock(&crw_handler_mutex);
+ for (i = 0; i < NR_RSCS; i++) {
+ if (crw_handlers[i])
+ crw_handlers[i](NULL, NULL, 1);
+ }
+ mutex_unlock(&crw_handler_mutex);
+ chain = 0;
+ continue;
+ }
+ if (crw[0].chn && !chain) {
+ chain++;
+ continue;
+ }
+ mutex_lock(&crw_handler_mutex);
+ handler = crw_handlers[crw[chain].rsc];
+ if (handler)
+ handler(&crw[0], chain ? &crw[1] : NULL, 0);
+ mutex_unlock(&crw_handler_mutex);
+ /* chain is always 0 or 1 here. */
+ chain = crw[chain].chn ? chain + 1 : 0;
+ }
+ if (atomic_dec_and_test(&crw_nr_req))
+ wake_up(&crw_handler_wait_q);
+ goto repeat;
+ return 0;
+}
+
+void crw_handle_channel_report(void)
+{
+ atomic_inc(&crw_nr_req);
+ wake_up(&crw_handler_wait_q);
+}
+
+void crw_wait_for_channel_report(void)
+{
+ crw_handle_channel_report();
+ wait_event(crw_handler_wait_q, atomic_read(&crw_nr_req) == 0);
+}
+
+/*
+ * Machine checks for the channel subsystem must be enabled
+ * after the channel subsystem is initialized
+ */
+static int __init crw_machine_check_init(void)
+{
+ struct task_struct *task;
+
+ task = kthread_run(crw_collect_info, NULL, "kmcheck");
+ if (IS_ERR(task))
+ return PTR_ERR(task);
+ ctl_set_bit(14, 28); /* enable channel report MCH */
+ return 0;
+}
+device_initcall(crw_machine_check_init);
diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c
new file mode 100644
index 000000000..cf2c3c4c5
--- /dev/null
+++ b/drivers/s390/cio/css.c
@@ -0,0 +1,1578 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * driver for channel subsystem
+ *
+ * Copyright IBM Corp. 2002, 2010
+ *
+ * Author(s): Arnd Bergmann (arndb@de.ibm.com)
+ * Cornelia Huck (cornelia.huck@de.ibm.com)
+ */
+
+#define KMSG_COMPONENT "cio"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <linux/export.h>
+#include <linux/init.h>
+#include <linux/device.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/list.h>
+#include <linux/reboot.h>
+#include <linux/suspend.h>
+#include <linux/proc_fs.h>
+#include <linux/genalloc.h>
+#include <linux/dma-mapping.h>
+#include <asm/isc.h>
+#include <asm/crw.h>
+
+#include "css.h"
+#include "cio.h"
+#include "blacklist.h"
+#include "cio_debug.h"
+#include "ioasm.h"
+#include "chsc.h"
+#include "device.h"
+#include "idset.h"
+#include "chp.h"
+
+int css_init_done = 0;
+int max_ssid;
+
+#define MAX_CSS_IDX 0
+struct channel_subsystem *channel_subsystems[MAX_CSS_IDX + 1];
+static struct bus_type css_bus_type;
+
+int
+for_each_subchannel(int(*fn)(struct subchannel_id, void *), void *data)
+{
+ struct subchannel_id schid;
+ int ret;
+
+ init_subchannel_id(&schid);
+ do {
+ do {
+ ret = fn(schid, data);
+ if (ret)
+ break;
+ } while (schid.sch_no++ < __MAX_SUBCHANNEL);
+ schid.sch_no = 0;
+ } while (schid.ssid++ < max_ssid);
+ return ret;
+}
+
+struct cb_data {
+ void *data;
+ struct idset *set;
+ int (*fn_known_sch)(struct subchannel *, void *);
+ int (*fn_unknown_sch)(struct subchannel_id, void *);
+};
+
+static int call_fn_known_sch(struct device *dev, void *data)
+{
+ struct subchannel *sch = to_subchannel(dev);
+ struct cb_data *cb = data;
+ int rc = 0;
+
+ if (cb->set)
+ idset_sch_del(cb->set, sch->schid);
+ if (cb->fn_known_sch)
+ rc = cb->fn_known_sch(sch, cb->data);
+ return rc;
+}
+
+static int call_fn_unknown_sch(struct subchannel_id schid, void *data)
+{
+ struct cb_data *cb = data;
+ int rc = 0;
+
+ if (idset_sch_contains(cb->set, schid))
+ rc = cb->fn_unknown_sch(schid, cb->data);
+ return rc;
+}
+
+static int call_fn_all_sch(struct subchannel_id schid, void *data)
+{
+ struct cb_data *cb = data;
+ struct subchannel *sch;
+ int rc = 0;
+
+ sch = get_subchannel_by_schid(schid);
+ if (sch) {
+ if (cb->fn_known_sch)
+ rc = cb->fn_known_sch(sch, cb->data);
+ put_device(&sch->dev);
+ } else {
+ if (cb->fn_unknown_sch)
+ rc = cb->fn_unknown_sch(schid, cb->data);
+ }
+
+ return rc;
+}
+
+int for_each_subchannel_staged(int (*fn_known)(struct subchannel *, void *),
+ int (*fn_unknown)(struct subchannel_id,
+ void *), void *data)
+{
+ struct cb_data cb;
+ int rc;
+
+ cb.data = data;
+ cb.fn_known_sch = fn_known;
+ cb.fn_unknown_sch = fn_unknown;
+
+ if (fn_known && !fn_unknown) {
+ /* Skip idset allocation in case of known-only loop. */
+ cb.set = NULL;
+ return bus_for_each_dev(&css_bus_type, NULL, &cb,
+ call_fn_known_sch);
+ }
+
+ cb.set = idset_sch_new();
+ if (!cb.set)
+ /* fall back to brute force scanning in case of oom */
+ return for_each_subchannel(call_fn_all_sch, &cb);
+
+ idset_fill(cb.set);
+
+ /* Process registered subchannels. */
+ rc = bus_for_each_dev(&css_bus_type, NULL, &cb, call_fn_known_sch);
+ if (rc)
+ goto out;
+ /* Process unregistered subchannels. */
+ if (fn_unknown)
+ rc = for_each_subchannel(call_fn_unknown_sch, &cb);
+out:
+ idset_free(cb.set);
+
+ return rc;
+}
+
+static void css_sch_todo(struct work_struct *work);
+
+static int css_sch_create_locks(struct subchannel *sch)
+{
+ sch->lock = kmalloc(sizeof(*sch->lock), GFP_KERNEL);
+ if (!sch->lock)
+ return -ENOMEM;
+
+ spin_lock_init(sch->lock);
+ mutex_init(&sch->reg_mutex);
+
+ return 0;
+}
+
+static void css_subchannel_release(struct device *dev)
+{
+ struct subchannel *sch = to_subchannel(dev);
+
+ sch->config.intparm = 0;
+ cio_commit_config(sch);
+ kfree(sch->driver_override);
+ kfree(sch->lock);
+ kfree(sch);
+}
+
+static int css_validate_subchannel(struct subchannel_id schid,
+ struct schib *schib)
+{
+ int err;
+
+ switch (schib->pmcw.st) {
+ case SUBCHANNEL_TYPE_IO:
+ case SUBCHANNEL_TYPE_MSG:
+ if (!css_sch_is_valid(schib))
+ err = -ENODEV;
+ else if (is_blacklisted(schid.ssid, schib->pmcw.dev)) {
+ CIO_MSG_EVENT(6, "Blacklisted device detected "
+ "at devno %04X, subchannel set %x\n",
+ schib->pmcw.dev, schid.ssid);
+ err = -ENODEV;
+ } else
+ err = 0;
+ break;
+ default:
+ err = 0;
+ }
+ if (err)
+ goto out;
+
+ CIO_MSG_EVENT(4, "Subchannel 0.%x.%04x reports subchannel type %04X\n",
+ schid.ssid, schid.sch_no, schib->pmcw.st);
+out:
+ return err;
+}
+
+struct subchannel *css_alloc_subchannel(struct subchannel_id schid,
+ struct schib *schib)
+{
+ struct subchannel *sch;
+ int ret;
+
+ ret = css_validate_subchannel(schid, schib);
+ if (ret < 0)
+ return ERR_PTR(ret);
+
+ sch = kzalloc(sizeof(*sch), GFP_KERNEL | GFP_DMA);
+ if (!sch)
+ return ERR_PTR(-ENOMEM);
+
+ sch->schid = schid;
+ sch->schib = *schib;
+ sch->st = schib->pmcw.st;
+
+ ret = css_sch_create_locks(sch);
+ if (ret)
+ goto err;
+
+ INIT_WORK(&sch->todo_work, css_sch_todo);
+ sch->dev.release = &css_subchannel_release;
+ device_initialize(&sch->dev);
+ /*
+ * The physical addresses of some the dma structures that can
+ * belong to a subchannel need to fit 31 bit width (e.g. ccw).
+ */
+ sch->dev.coherent_dma_mask = DMA_BIT_MASK(31);
+ /*
+ * But we don't have such restrictions imposed on the stuff that
+ * is handled by the streaming API.
+ */
+ sch->dma_mask = DMA_BIT_MASK(64);
+ sch->dev.dma_mask = &sch->dma_mask;
+ return sch;
+
+err:
+ kfree(sch);
+ return ERR_PTR(ret);
+}
+
+static int css_sch_device_register(struct subchannel *sch)
+{
+ int ret;
+
+ mutex_lock(&sch->reg_mutex);
+ dev_set_name(&sch->dev, "0.%x.%04x", sch->schid.ssid,
+ sch->schid.sch_no);
+ ret = device_add(&sch->dev);
+ mutex_unlock(&sch->reg_mutex);
+ return ret;
+}
+
+/**
+ * css_sch_device_unregister - unregister a subchannel
+ * @sch: subchannel to be unregistered
+ */
+void css_sch_device_unregister(struct subchannel *sch)
+{
+ mutex_lock(&sch->reg_mutex);
+ if (device_is_registered(&sch->dev))
+ device_unregister(&sch->dev);
+ mutex_unlock(&sch->reg_mutex);
+}
+EXPORT_SYMBOL_GPL(css_sch_device_unregister);
+
+static void ssd_from_pmcw(struct chsc_ssd_info *ssd, struct pmcw *pmcw)
+{
+ int i;
+ int mask;
+
+ memset(ssd, 0, sizeof(struct chsc_ssd_info));
+ ssd->path_mask = pmcw->pim;
+ for (i = 0; i < 8; i++) {
+ mask = 0x80 >> i;
+ if (pmcw->pim & mask) {
+ chp_id_init(&ssd->chpid[i]);
+ ssd->chpid[i].id = pmcw->chpid[i];
+ }
+ }
+}
+
+static void ssd_register_chpids(struct chsc_ssd_info *ssd)
+{
+ int i;
+ int mask;
+
+ for (i = 0; i < 8; i++) {
+ mask = 0x80 >> i;
+ if (ssd->path_mask & mask)
+ chp_new(ssd->chpid[i]);
+ }
+}
+
+void css_update_ssd_info(struct subchannel *sch)
+{
+ int ret;
+
+ ret = chsc_get_ssd_info(sch->schid, &sch->ssd_info);
+ if (ret)
+ ssd_from_pmcw(&sch->ssd_info, &sch->schib.pmcw);
+
+ ssd_register_chpids(&sch->ssd_info);
+}
+
+static ssize_t type_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct subchannel *sch = to_subchannel(dev);
+
+ return sprintf(buf, "%01x\n", sch->st);
+}
+
+static DEVICE_ATTR_RO(type);
+
+static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct subchannel *sch = to_subchannel(dev);
+
+ return sprintf(buf, "css:t%01X\n", sch->st);
+}
+
+static DEVICE_ATTR_RO(modalias);
+
+static ssize_t driver_override_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct subchannel *sch = to_subchannel(dev);
+ char *driver_override, *old, *cp;
+
+ /* We need to keep extra room for a newline */
+ if (count >= (PAGE_SIZE - 1))
+ return -EINVAL;
+
+ driver_override = kstrndup(buf, count, GFP_KERNEL);
+ if (!driver_override)
+ return -ENOMEM;
+
+ cp = strchr(driver_override, '\n');
+ if (cp)
+ *cp = '\0';
+
+ device_lock(dev);
+ old = sch->driver_override;
+ if (strlen(driver_override)) {
+ sch->driver_override = driver_override;
+ } else {
+ kfree(driver_override);
+ sch->driver_override = NULL;
+ }
+ device_unlock(dev);
+
+ kfree(old);
+
+ return count;
+}
+
+static ssize_t driver_override_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct subchannel *sch = to_subchannel(dev);
+ ssize_t len;
+
+ device_lock(dev);
+ len = snprintf(buf, PAGE_SIZE, "%s\n", sch->driver_override);
+ device_unlock(dev);
+ return len;
+}
+static DEVICE_ATTR_RW(driver_override);
+
+static struct attribute *subch_attrs[] = {
+ &dev_attr_type.attr,
+ &dev_attr_modalias.attr,
+ &dev_attr_driver_override.attr,
+ NULL,
+};
+
+static struct attribute_group subch_attr_group = {
+ .attrs = subch_attrs,
+};
+
+static const struct attribute_group *default_subch_attr_groups[] = {
+ &subch_attr_group,
+ NULL,
+};
+
+static ssize_t chpids_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct subchannel *sch = to_subchannel(dev);
+ struct chsc_ssd_info *ssd = &sch->ssd_info;
+ ssize_t ret = 0;
+ int mask;
+ int chp;
+
+ for (chp = 0; chp < 8; chp++) {
+ mask = 0x80 >> chp;
+ if (ssd->path_mask & mask)
+ ret += sprintf(buf + ret, "%02x ", ssd->chpid[chp].id);
+ else
+ ret += sprintf(buf + ret, "00 ");
+ }
+ ret += sprintf(buf + ret, "\n");
+ return ret;
+}
+static DEVICE_ATTR_RO(chpids);
+
+static ssize_t pimpampom_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct subchannel *sch = to_subchannel(dev);
+ struct pmcw *pmcw = &sch->schib.pmcw;
+
+ return sprintf(buf, "%02x %02x %02x\n",
+ pmcw->pim, pmcw->pam, pmcw->pom);
+}
+static DEVICE_ATTR_RO(pimpampom);
+
+static ssize_t dev_busid_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct subchannel *sch = to_subchannel(dev);
+ struct pmcw *pmcw = &sch->schib.pmcw;
+
+ if ((pmcw->st == SUBCHANNEL_TYPE_IO && pmcw->dnv) ||
+ (pmcw->st == SUBCHANNEL_TYPE_MSG && pmcw->w))
+ return sysfs_emit(buf, "0.%x.%04x\n", sch->schid.ssid,
+ pmcw->dev);
+ else
+ return sysfs_emit(buf, "none\n");
+}
+static DEVICE_ATTR_RO(dev_busid);
+
+static struct attribute *io_subchannel_type_attrs[] = {
+ &dev_attr_chpids.attr,
+ &dev_attr_pimpampom.attr,
+ &dev_attr_dev_busid.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(io_subchannel_type);
+
+static const struct device_type io_subchannel_type = {
+ .groups = io_subchannel_type_groups,
+};
+
+int css_register_subchannel(struct subchannel *sch)
+{
+ int ret;
+
+ /* Initialize the subchannel structure */
+ sch->dev.parent = &channel_subsystems[0]->device;
+ sch->dev.bus = &css_bus_type;
+ sch->dev.groups = default_subch_attr_groups;
+
+ if (sch->st == SUBCHANNEL_TYPE_IO)
+ sch->dev.type = &io_subchannel_type;
+
+ /*
+ * We don't want to generate uevents for I/O subchannels that don't
+ * have a working ccw device behind them since they will be
+ * unregistered before they can be used anyway, so we delay the add
+ * uevent until after device recognition was successful.
+ * Note that we suppress the uevent for all subchannel types;
+ * the subchannel driver can decide itself when it wants to inform
+ * userspace of its existence.
+ */
+ dev_set_uevent_suppress(&sch->dev, 1);
+ css_update_ssd_info(sch);
+ /* make it known to the system */
+ ret = css_sch_device_register(sch);
+ if (ret) {
+ CIO_MSG_EVENT(0, "Could not register sch 0.%x.%04x: %d\n",
+ sch->schid.ssid, sch->schid.sch_no, ret);
+ return ret;
+ }
+ if (!sch->driver) {
+ /*
+ * No driver matched. Generate the uevent now so that
+ * a fitting driver module may be loaded based on the
+ * modalias.
+ */
+ dev_set_uevent_suppress(&sch->dev, 0);
+ kobject_uevent(&sch->dev.kobj, KOBJ_ADD);
+ }
+ return ret;
+}
+
+static int css_probe_device(struct subchannel_id schid, struct schib *schib)
+{
+ struct subchannel *sch;
+ int ret;
+
+ sch = css_alloc_subchannel(schid, schib);
+ if (IS_ERR(sch))
+ return PTR_ERR(sch);
+
+ ret = css_register_subchannel(sch);
+ if (ret)
+ put_device(&sch->dev);
+
+ return ret;
+}
+
+static int
+check_subchannel(struct device *dev, const void *data)
+{
+ struct subchannel *sch;
+ struct subchannel_id *schid = (void *)data;
+
+ sch = to_subchannel(dev);
+ return schid_equal(&sch->schid, schid);
+}
+
+struct subchannel *
+get_subchannel_by_schid(struct subchannel_id schid)
+{
+ struct device *dev;
+
+ dev = bus_find_device(&css_bus_type, NULL,
+ &schid, check_subchannel);
+
+ return dev ? to_subchannel(dev) : NULL;
+}
+
+/**
+ * css_sch_is_valid() - check if a subchannel is valid
+ * @schib: subchannel information block for the subchannel
+ */
+int css_sch_is_valid(struct schib *schib)
+{
+ if ((schib->pmcw.st == SUBCHANNEL_TYPE_IO) && !schib->pmcw.dnv)
+ return 0;
+ if ((schib->pmcw.st == SUBCHANNEL_TYPE_MSG) && !schib->pmcw.w)
+ return 0;
+ return 1;
+}
+EXPORT_SYMBOL_GPL(css_sch_is_valid);
+
+static int css_evaluate_new_subchannel(struct subchannel_id schid, int slow)
+{
+ struct schib schib;
+ int ccode;
+
+ if (!slow) {
+ /* Will be done on the slow path. */
+ return -EAGAIN;
+ }
+ /*
+ * The first subchannel that is not-operational (ccode==3)
+ * indicates that there aren't any more devices available.
+ * If stsch gets an exception, it means the current subchannel set
+ * is not valid.
+ */
+ ccode = stsch(schid, &schib);
+ if (ccode)
+ return (ccode == 3) ? -ENXIO : ccode;
+
+ return css_probe_device(schid, &schib);
+}
+
+static int css_evaluate_known_subchannel(struct subchannel *sch, int slow)
+{
+ int ret = 0;
+
+ if (sch->driver) {
+ if (sch->driver->sch_event)
+ ret = sch->driver->sch_event(sch, slow);
+ else
+ dev_dbg(&sch->dev,
+ "Got subchannel machine check but "
+ "no sch_event handler provided.\n");
+ }
+ if (ret != 0 && ret != -EAGAIN) {
+ CIO_MSG_EVENT(2, "eval: sch 0.%x.%04x, rc=%d\n",
+ sch->schid.ssid, sch->schid.sch_no, ret);
+ }
+ return ret;
+}
+
+static void css_evaluate_subchannel(struct subchannel_id schid, int slow)
+{
+ struct subchannel *sch;
+ int ret;
+
+ sch = get_subchannel_by_schid(schid);
+ if (sch) {
+ ret = css_evaluate_known_subchannel(sch, slow);
+ put_device(&sch->dev);
+ } else
+ ret = css_evaluate_new_subchannel(schid, slow);
+ if (ret == -EAGAIN)
+ css_schedule_eval(schid);
+}
+
+/**
+ * css_sched_sch_todo - schedule a subchannel operation
+ * @sch: subchannel
+ * @todo: todo
+ *
+ * Schedule the operation identified by @todo to be performed on the slow path
+ * workqueue. Do nothing if another operation with higher priority is already
+ * scheduled. Needs to be called with subchannel lock held.
+ */
+void css_sched_sch_todo(struct subchannel *sch, enum sch_todo todo)
+{
+ CIO_MSG_EVENT(4, "sch_todo: sched sch=0.%x.%04x todo=%d\n",
+ sch->schid.ssid, sch->schid.sch_no, todo);
+ if (sch->todo >= todo)
+ return;
+ /* Get workqueue ref. */
+ if (!get_device(&sch->dev))
+ return;
+ sch->todo = todo;
+ if (!queue_work(cio_work_q, &sch->todo_work)) {
+ /* Already queued, release workqueue ref. */
+ put_device(&sch->dev);
+ }
+}
+EXPORT_SYMBOL_GPL(css_sched_sch_todo);
+
+static void css_sch_todo(struct work_struct *work)
+{
+ struct subchannel *sch;
+ enum sch_todo todo;
+ int ret;
+
+ sch = container_of(work, struct subchannel, todo_work);
+ /* Find out todo. */
+ spin_lock_irq(sch->lock);
+ todo = sch->todo;
+ CIO_MSG_EVENT(4, "sch_todo: sch=0.%x.%04x, todo=%d\n", sch->schid.ssid,
+ sch->schid.sch_no, todo);
+ sch->todo = SCH_TODO_NOTHING;
+ spin_unlock_irq(sch->lock);
+ /* Perform todo. */
+ switch (todo) {
+ case SCH_TODO_NOTHING:
+ break;
+ case SCH_TODO_EVAL:
+ ret = css_evaluate_known_subchannel(sch, 1);
+ if (ret == -EAGAIN) {
+ spin_lock_irq(sch->lock);
+ css_sched_sch_todo(sch, todo);
+ spin_unlock_irq(sch->lock);
+ }
+ break;
+ case SCH_TODO_UNREG:
+ css_sch_device_unregister(sch);
+ break;
+ }
+ /* Release workqueue ref. */
+ put_device(&sch->dev);
+}
+
+static struct idset *slow_subchannel_set;
+static spinlock_t slow_subchannel_lock;
+static wait_queue_head_t css_eval_wq;
+static atomic_t css_eval_scheduled;
+
+static int __init slow_subchannel_init(void)
+{
+ spin_lock_init(&slow_subchannel_lock);
+ atomic_set(&css_eval_scheduled, 0);
+ init_waitqueue_head(&css_eval_wq);
+ slow_subchannel_set = idset_sch_new();
+ if (!slow_subchannel_set) {
+ CIO_MSG_EVENT(0, "could not allocate slow subchannel set\n");
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+static int slow_eval_known_fn(struct subchannel *sch, void *data)
+{
+ int eval;
+ int rc;
+
+ spin_lock_irq(&slow_subchannel_lock);
+ eval = idset_sch_contains(slow_subchannel_set, sch->schid);
+ idset_sch_del(slow_subchannel_set, sch->schid);
+ spin_unlock_irq(&slow_subchannel_lock);
+ if (eval) {
+ rc = css_evaluate_known_subchannel(sch, 1);
+ if (rc == -EAGAIN)
+ css_schedule_eval(sch->schid);
+ /*
+ * The loop might take long time for platforms with lots of
+ * known devices. Allow scheduling here.
+ */
+ cond_resched();
+ }
+ return 0;
+}
+
+static int slow_eval_unknown_fn(struct subchannel_id schid, void *data)
+{
+ int eval;
+ int rc = 0;
+
+ spin_lock_irq(&slow_subchannel_lock);
+ eval = idset_sch_contains(slow_subchannel_set, schid);
+ idset_sch_del(slow_subchannel_set, schid);
+ spin_unlock_irq(&slow_subchannel_lock);
+ if (eval) {
+ rc = css_evaluate_new_subchannel(schid, 1);
+ switch (rc) {
+ case -EAGAIN:
+ css_schedule_eval(schid);
+ rc = 0;
+ break;
+ case -ENXIO:
+ case -ENOMEM:
+ case -EIO:
+ /* These should abort looping */
+ spin_lock_irq(&slow_subchannel_lock);
+ idset_sch_del_subseq(slow_subchannel_set, schid);
+ spin_unlock_irq(&slow_subchannel_lock);
+ break;
+ default:
+ rc = 0;
+ }
+ /* Allow scheduling here since the containing loop might
+ * take a while. */
+ cond_resched();
+ }
+ return rc;
+}
+
+static void css_slow_path_func(struct work_struct *unused)
+{
+ unsigned long flags;
+
+ CIO_TRACE_EVENT(4, "slowpath");
+ for_each_subchannel_staged(slow_eval_known_fn, slow_eval_unknown_fn,
+ NULL);
+ spin_lock_irqsave(&slow_subchannel_lock, flags);
+ if (idset_is_empty(slow_subchannel_set)) {
+ atomic_set(&css_eval_scheduled, 0);
+ wake_up(&css_eval_wq);
+ }
+ spin_unlock_irqrestore(&slow_subchannel_lock, flags);
+}
+
+static DECLARE_DELAYED_WORK(slow_path_work, css_slow_path_func);
+struct workqueue_struct *cio_work_q;
+
+void css_schedule_eval(struct subchannel_id schid)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&slow_subchannel_lock, flags);
+ idset_sch_add(slow_subchannel_set, schid);
+ atomic_set(&css_eval_scheduled, 1);
+ queue_delayed_work(cio_work_q, &slow_path_work, 0);
+ spin_unlock_irqrestore(&slow_subchannel_lock, flags);
+}
+
+void css_schedule_eval_all(void)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&slow_subchannel_lock, flags);
+ idset_fill(slow_subchannel_set);
+ atomic_set(&css_eval_scheduled, 1);
+ queue_delayed_work(cio_work_q, &slow_path_work, 0);
+ spin_unlock_irqrestore(&slow_subchannel_lock, flags);
+}
+
+static int __unset_registered(struct device *dev, void *data)
+{
+ struct idset *set = data;
+ struct subchannel *sch = to_subchannel(dev);
+
+ idset_sch_del(set, sch->schid);
+ return 0;
+}
+
+void css_schedule_eval_all_unreg(unsigned long delay)
+{
+ unsigned long flags;
+ struct idset *unreg_set;
+
+ /* Find unregistered subchannels. */
+ unreg_set = idset_sch_new();
+ if (!unreg_set) {
+ /* Fallback. */
+ css_schedule_eval_all();
+ return;
+ }
+ idset_fill(unreg_set);
+ bus_for_each_dev(&css_bus_type, NULL, unreg_set, __unset_registered);
+ /* Apply to slow_subchannel_set. */
+ spin_lock_irqsave(&slow_subchannel_lock, flags);
+ idset_add_set(slow_subchannel_set, unreg_set);
+ atomic_set(&css_eval_scheduled, 1);
+ queue_delayed_work(cio_work_q, &slow_path_work, delay);
+ spin_unlock_irqrestore(&slow_subchannel_lock, flags);
+ idset_free(unreg_set);
+}
+
+void css_wait_for_slow_path(void)
+{
+ flush_workqueue(cio_work_q);
+}
+
+/* Schedule reprobing of all unregistered subchannels. */
+void css_schedule_reprobe(void)
+{
+ /* Schedule with a delay to allow merging of subsequent calls. */
+ css_schedule_eval_all_unreg(1 * HZ);
+}
+EXPORT_SYMBOL_GPL(css_schedule_reprobe);
+
+/*
+ * Called from the machine check handler for subchannel report words.
+ */
+static void css_process_crw(struct crw *crw0, struct crw *crw1, int overflow)
+{
+ struct subchannel_id mchk_schid;
+ struct subchannel *sch;
+
+ if (overflow) {
+ css_schedule_eval_all();
+ return;
+ }
+ CIO_CRW_EVENT(2, "CRW0 reports slct=%d, oflw=%d, "
+ "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n",
+ crw0->slct, crw0->oflw, crw0->chn, crw0->rsc, crw0->anc,
+ crw0->erc, crw0->rsid);
+ if (crw1)
+ CIO_CRW_EVENT(2, "CRW1 reports slct=%d, oflw=%d, "
+ "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n",
+ crw1->slct, crw1->oflw, crw1->chn, crw1->rsc,
+ crw1->anc, crw1->erc, crw1->rsid);
+ init_subchannel_id(&mchk_schid);
+ mchk_schid.sch_no = crw0->rsid;
+ if (crw1)
+ mchk_schid.ssid = (crw1->rsid >> 4) & 3;
+
+ if (crw0->erc == CRW_ERC_PMOD) {
+ sch = get_subchannel_by_schid(mchk_schid);
+ if (sch) {
+ css_update_ssd_info(sch);
+ put_device(&sch->dev);
+ }
+ }
+ /*
+ * Since we are always presented with IPI in the CRW, we have to
+ * use stsch() to find out if the subchannel in question has come
+ * or gone.
+ */
+ css_evaluate_subchannel(mchk_schid, 0);
+}
+
+static void __init
+css_generate_pgid(struct channel_subsystem *css, u32 tod_high)
+{
+ struct cpuid cpu_id;
+
+ if (css_general_characteristics.mcss) {
+ css->global_pgid.pgid_high.ext_cssid.version = 0x80;
+ css->global_pgid.pgid_high.ext_cssid.cssid =
+ css->id_valid ? css->cssid : 0;
+ } else {
+ css->global_pgid.pgid_high.cpu_addr = stap();
+ }
+ get_cpu_id(&cpu_id);
+ css->global_pgid.cpu_id = cpu_id.ident;
+ css->global_pgid.cpu_model = cpu_id.machine;
+ css->global_pgid.tod_high = tod_high;
+}
+
+static void channel_subsystem_release(struct device *dev)
+{
+ struct channel_subsystem *css = to_css(dev);
+
+ mutex_destroy(&css->mutex);
+ kfree(css);
+}
+
+static ssize_t real_cssid_show(struct device *dev, struct device_attribute *a,
+ char *buf)
+{
+ struct channel_subsystem *css = to_css(dev);
+
+ if (!css->id_valid)
+ return -EINVAL;
+
+ return sprintf(buf, "%x\n", css->cssid);
+}
+static DEVICE_ATTR_RO(real_cssid);
+
+static ssize_t cm_enable_show(struct device *dev, struct device_attribute *a,
+ char *buf)
+{
+ struct channel_subsystem *css = to_css(dev);
+ int ret;
+
+ mutex_lock(&css->mutex);
+ ret = sprintf(buf, "%x\n", css->cm_enabled);
+ mutex_unlock(&css->mutex);
+ return ret;
+}
+
+static ssize_t cm_enable_store(struct device *dev, struct device_attribute *a,
+ const char *buf, size_t count)
+{
+ struct channel_subsystem *css = to_css(dev);
+ unsigned long val;
+ int ret;
+
+ ret = kstrtoul(buf, 16, &val);
+ if (ret)
+ return ret;
+ mutex_lock(&css->mutex);
+ switch (val) {
+ case 0:
+ ret = css->cm_enabled ? chsc_secm(css, 0) : 0;
+ break;
+ case 1:
+ ret = css->cm_enabled ? 0 : chsc_secm(css, 1);
+ break;
+ default:
+ ret = -EINVAL;
+ }
+ mutex_unlock(&css->mutex);
+ return ret < 0 ? ret : count;
+}
+static DEVICE_ATTR_RW(cm_enable);
+
+static umode_t cm_enable_mode(struct kobject *kobj, struct attribute *attr,
+ int index)
+{
+ return css_chsc_characteristics.secm ? attr->mode : 0;
+}
+
+static struct attribute *cssdev_attrs[] = {
+ &dev_attr_real_cssid.attr,
+ NULL,
+};
+
+static struct attribute_group cssdev_attr_group = {
+ .attrs = cssdev_attrs,
+};
+
+static struct attribute *cssdev_cm_attrs[] = {
+ &dev_attr_cm_enable.attr,
+ NULL,
+};
+
+static struct attribute_group cssdev_cm_attr_group = {
+ .attrs = cssdev_cm_attrs,
+ .is_visible = cm_enable_mode,
+};
+
+static const struct attribute_group *cssdev_attr_groups[] = {
+ &cssdev_attr_group,
+ &cssdev_cm_attr_group,
+ NULL,
+};
+
+static int __init setup_css(int nr)
+{
+ struct channel_subsystem *css;
+ int ret;
+
+ css = kzalloc(sizeof(*css), GFP_KERNEL);
+ if (!css)
+ return -ENOMEM;
+
+ channel_subsystems[nr] = css;
+ dev_set_name(&css->device, "css%x", nr);
+ css->device.groups = cssdev_attr_groups;
+ css->device.release = channel_subsystem_release;
+ /*
+ * We currently allocate notifier bits with this (using
+ * css->device as the device argument with the DMA API)
+ * and are fine with 64 bit addresses.
+ */
+ css->device.coherent_dma_mask = DMA_BIT_MASK(64);
+ css->device.dma_mask = &css->device.coherent_dma_mask;
+
+ mutex_init(&css->mutex);
+ ret = chsc_get_cssid_iid(nr, &css->cssid, &css->iid);
+ if (!ret) {
+ css->id_valid = true;
+ pr_info("Partition identifier %01x.%01x\n", css->cssid,
+ css->iid);
+ }
+ css_generate_pgid(css, (u32) (get_tod_clock() >> 32));
+
+ ret = device_register(&css->device);
+ if (ret) {
+ put_device(&css->device);
+ goto out_err;
+ }
+
+ css->pseudo_subchannel = kzalloc(sizeof(*css->pseudo_subchannel),
+ GFP_KERNEL);
+ if (!css->pseudo_subchannel) {
+ device_unregister(&css->device);
+ ret = -ENOMEM;
+ goto out_err;
+ }
+
+ css->pseudo_subchannel->dev.parent = &css->device;
+ css->pseudo_subchannel->dev.release = css_subchannel_release;
+ mutex_init(&css->pseudo_subchannel->reg_mutex);
+ ret = css_sch_create_locks(css->pseudo_subchannel);
+ if (ret) {
+ kfree(css->pseudo_subchannel);
+ device_unregister(&css->device);
+ goto out_err;
+ }
+
+ dev_set_name(&css->pseudo_subchannel->dev, "defunct");
+ ret = device_register(&css->pseudo_subchannel->dev);
+ if (ret) {
+ put_device(&css->pseudo_subchannel->dev);
+ device_unregister(&css->device);
+ goto out_err;
+ }
+
+ return ret;
+out_err:
+ channel_subsystems[nr] = NULL;
+ return ret;
+}
+
+static int css_reboot_event(struct notifier_block *this,
+ unsigned long event,
+ void *ptr)
+{
+ struct channel_subsystem *css;
+ int ret;
+
+ ret = NOTIFY_DONE;
+ for_each_css(css) {
+ mutex_lock(&css->mutex);
+ if (css->cm_enabled)
+ if (chsc_secm(css, 0))
+ ret = NOTIFY_BAD;
+ mutex_unlock(&css->mutex);
+ }
+
+ return ret;
+}
+
+static struct notifier_block css_reboot_notifier = {
+ .notifier_call = css_reboot_event,
+};
+
+/*
+ * Since the css devices are neither on a bus nor have a class
+ * nor have a special device type, we cannot stop/restart channel
+ * path measurements via the normal suspend/resume callbacks, but have
+ * to use notifiers.
+ */
+static int css_power_event(struct notifier_block *this, unsigned long event,
+ void *ptr)
+{
+ struct channel_subsystem *css;
+ int ret;
+
+ switch (event) {
+ case PM_HIBERNATION_PREPARE:
+ case PM_SUSPEND_PREPARE:
+ ret = NOTIFY_DONE;
+ for_each_css(css) {
+ mutex_lock(&css->mutex);
+ if (!css->cm_enabled) {
+ mutex_unlock(&css->mutex);
+ continue;
+ }
+ ret = __chsc_do_secm(css, 0);
+ ret = notifier_from_errno(ret);
+ mutex_unlock(&css->mutex);
+ }
+ break;
+ case PM_POST_HIBERNATION:
+ case PM_POST_SUSPEND:
+ ret = NOTIFY_DONE;
+ for_each_css(css) {
+ mutex_lock(&css->mutex);
+ if (!css->cm_enabled) {
+ mutex_unlock(&css->mutex);
+ continue;
+ }
+ ret = __chsc_do_secm(css, 1);
+ ret = notifier_from_errno(ret);
+ mutex_unlock(&css->mutex);
+ }
+ /* search for subchannels, which appeared during hibernation */
+ css_schedule_reprobe();
+ break;
+ default:
+ ret = NOTIFY_DONE;
+ }
+ return ret;
+
+}
+static struct notifier_block css_power_notifier = {
+ .notifier_call = css_power_event,
+};
+
+#define CIO_DMA_GFP (GFP_KERNEL | __GFP_ZERO)
+static struct gen_pool *cio_dma_pool;
+
+/* Currently cio supports only a single css */
+struct device *cio_get_dma_css_dev(void)
+{
+ return &channel_subsystems[0]->device;
+}
+
+struct gen_pool *cio_gp_dma_create(struct device *dma_dev, int nr_pages)
+{
+ struct gen_pool *gp_dma;
+ void *cpu_addr;
+ dma_addr_t dma_addr;
+ int i;
+
+ gp_dma = gen_pool_create(3, -1);
+ if (!gp_dma)
+ return NULL;
+ for (i = 0; i < nr_pages; ++i) {
+ cpu_addr = dma_alloc_coherent(dma_dev, PAGE_SIZE, &dma_addr,
+ CIO_DMA_GFP);
+ if (!cpu_addr)
+ return gp_dma;
+ gen_pool_add_virt(gp_dma, (unsigned long) cpu_addr,
+ dma_addr, PAGE_SIZE, -1);
+ }
+ return gp_dma;
+}
+
+static void __gp_dma_free_dma(struct gen_pool *pool,
+ struct gen_pool_chunk *chunk, void *data)
+{
+ size_t chunk_size = chunk->end_addr - chunk->start_addr + 1;
+
+ dma_free_coherent((struct device *) data, chunk_size,
+ (void *) chunk->start_addr,
+ (dma_addr_t) chunk->phys_addr);
+}
+
+void cio_gp_dma_destroy(struct gen_pool *gp_dma, struct device *dma_dev)
+{
+ if (!gp_dma)
+ return;
+ /* this is quite ugly but no better idea */
+ gen_pool_for_each_chunk(gp_dma, __gp_dma_free_dma, dma_dev);
+ gen_pool_destroy(gp_dma);
+}
+
+static int cio_dma_pool_init(void)
+{
+ /* No need to free up the resources: compiled in */
+ cio_dma_pool = cio_gp_dma_create(cio_get_dma_css_dev(), 1);
+ if (!cio_dma_pool)
+ return -ENOMEM;
+ return 0;
+}
+
+void *cio_gp_dma_zalloc(struct gen_pool *gp_dma, struct device *dma_dev,
+ size_t size)
+{
+ dma_addr_t dma_addr;
+ unsigned long addr;
+ size_t chunk_size;
+
+ if (!gp_dma)
+ return NULL;
+ addr = gen_pool_alloc(gp_dma, size);
+ while (!addr) {
+ chunk_size = round_up(size, PAGE_SIZE);
+ addr = (unsigned long) dma_alloc_coherent(dma_dev,
+ chunk_size, &dma_addr, CIO_DMA_GFP);
+ if (!addr)
+ return NULL;
+ gen_pool_add_virt(gp_dma, addr, dma_addr, chunk_size, -1);
+ addr = gen_pool_alloc(gp_dma, size);
+ }
+ return (void *) addr;
+}
+
+void cio_gp_dma_free(struct gen_pool *gp_dma, void *cpu_addr, size_t size)
+{
+ if (!cpu_addr)
+ return;
+ memset(cpu_addr, 0, size);
+ gen_pool_free(gp_dma, (unsigned long) cpu_addr, size);
+}
+
+/*
+ * Allocate dma memory from the css global pool. Intended for memory not
+ * specific to any single device within the css. The allocated memory
+ * is not guaranteed to be 31-bit addressable.
+ *
+ * Caution: Not suitable for early stuff like console.
+ */
+void *cio_dma_zalloc(size_t size)
+{
+ return cio_gp_dma_zalloc(cio_dma_pool, cio_get_dma_css_dev(), size);
+}
+
+void cio_dma_free(void *cpu_addr, size_t size)
+{
+ cio_gp_dma_free(cio_dma_pool, cpu_addr, size);
+}
+
+/*
+ * Now that the driver core is running, we can setup our channel subsystem.
+ * The struct subchannel's are created during probing.
+ */
+static int __init css_bus_init(void)
+{
+ int ret, i;
+
+ ret = chsc_init();
+ if (ret)
+ return ret;
+
+ chsc_determine_css_characteristics();
+ /* Try to enable MSS. */
+ ret = chsc_enable_facility(CHSC_SDA_OC_MSS);
+ if (ret)
+ max_ssid = 0;
+ else /* Success. */
+ max_ssid = __MAX_SSID;
+
+ ret = slow_subchannel_init();
+ if (ret)
+ goto out;
+
+ ret = crw_register_handler(CRW_RSC_SCH, css_process_crw);
+ if (ret)
+ goto out;
+
+ if ((ret = bus_register(&css_bus_type)))
+ goto out;
+
+ /* Setup css structure. */
+ for (i = 0; i <= MAX_CSS_IDX; i++) {
+ ret = setup_css(i);
+ if (ret)
+ goto out_unregister;
+ }
+ ret = register_reboot_notifier(&css_reboot_notifier);
+ if (ret)
+ goto out_unregister;
+ ret = register_pm_notifier(&css_power_notifier);
+ if (ret)
+ goto out_unregister_rn;
+ ret = cio_dma_pool_init();
+ if (ret)
+ goto out_unregister_pmn;
+ airq_init();
+ css_init_done = 1;
+
+ /* Enable default isc for I/O subchannels. */
+ isc_register(IO_SCH_ISC);
+
+ return 0;
+out_unregister_pmn:
+ unregister_pm_notifier(&css_power_notifier);
+out_unregister_rn:
+ unregister_reboot_notifier(&css_reboot_notifier);
+out_unregister:
+ while (i-- > 0) {
+ struct channel_subsystem *css = channel_subsystems[i];
+ device_unregister(&css->pseudo_subchannel->dev);
+ device_unregister(&css->device);
+ }
+ bus_unregister(&css_bus_type);
+out:
+ crw_unregister_handler(CRW_RSC_SCH);
+ idset_free(slow_subchannel_set);
+ chsc_init_cleanup();
+ pr_alert("The CSS device driver initialization failed with "
+ "errno=%d\n", ret);
+ return ret;
+}
+
+static void __init css_bus_cleanup(void)
+{
+ struct channel_subsystem *css;
+
+ for_each_css(css) {
+ device_unregister(&css->pseudo_subchannel->dev);
+ device_unregister(&css->device);
+ }
+ bus_unregister(&css_bus_type);
+ crw_unregister_handler(CRW_RSC_SCH);
+ idset_free(slow_subchannel_set);
+ chsc_init_cleanup();
+ isc_unregister(IO_SCH_ISC);
+}
+
+static int __init channel_subsystem_init(void)
+{
+ int ret;
+
+ ret = css_bus_init();
+ if (ret)
+ return ret;
+ cio_work_q = create_singlethread_workqueue("cio");
+ if (!cio_work_q) {
+ ret = -ENOMEM;
+ goto out_bus;
+ }
+ ret = io_subchannel_init();
+ if (ret)
+ goto out_wq;
+
+ /* Register subchannels which are already in use. */
+ cio_register_early_subchannels();
+ /* Start initial subchannel evaluation. */
+ css_schedule_eval_all();
+
+ return ret;
+out_wq:
+ destroy_workqueue(cio_work_q);
+out_bus:
+ css_bus_cleanup();
+ return ret;
+}
+subsys_initcall(channel_subsystem_init);
+
+static int css_settle(struct device_driver *drv, void *unused)
+{
+ struct css_driver *cssdrv = to_cssdriver(drv);
+
+ if (cssdrv->settle)
+ return cssdrv->settle();
+ return 0;
+}
+
+int css_complete_work(void)
+{
+ int ret;
+
+ /* Wait for the evaluation of subchannels to finish. */
+ ret = wait_event_interruptible(css_eval_wq,
+ atomic_read(&css_eval_scheduled) == 0);
+ if (ret)
+ return -EINTR;
+ flush_workqueue(cio_work_q);
+ /* Wait for the subchannel type specific initialization to finish */
+ return bus_for_each_drv(&css_bus_type, NULL, NULL, css_settle);
+}
+
+
+/*
+ * Wait for the initialization of devices to finish, to make sure we are
+ * done with our setup if the search for the root device starts.
+ */
+static int __init channel_subsystem_init_sync(void)
+{
+ css_complete_work();
+ return 0;
+}
+subsys_initcall_sync(channel_subsystem_init_sync);
+
+#ifdef CONFIG_PROC_FS
+static ssize_t cio_settle_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ int ret;
+
+ /* Handle pending CRW's. */
+ crw_wait_for_channel_report();
+ ret = css_complete_work();
+
+ return ret ? ret : count;
+}
+
+static const struct proc_ops cio_settle_proc_ops = {
+ .proc_open = nonseekable_open,
+ .proc_write = cio_settle_write,
+ .proc_lseek = no_llseek,
+};
+
+static int __init cio_settle_init(void)
+{
+ struct proc_dir_entry *entry;
+
+ entry = proc_create("cio_settle", S_IWUSR, NULL, &cio_settle_proc_ops);
+ if (!entry)
+ return -ENOMEM;
+ return 0;
+}
+device_initcall(cio_settle_init);
+#endif /*CONFIG_PROC_FS*/
+
+int sch_is_pseudo_sch(struct subchannel *sch)
+{
+ if (!sch->dev.parent)
+ return 0;
+ return sch == to_css(sch->dev.parent)->pseudo_subchannel;
+}
+
+static int css_bus_match(struct device *dev, struct device_driver *drv)
+{
+ struct subchannel *sch = to_subchannel(dev);
+ struct css_driver *driver = to_cssdriver(drv);
+ struct css_device_id *id;
+
+ /* When driver_override is set, only bind to the matching driver */
+ if (sch->driver_override && strcmp(sch->driver_override, drv->name))
+ return 0;
+
+ for (id = driver->subchannel_type; id->match_flags; id++) {
+ if (sch->st == id->type)
+ return 1;
+ }
+
+ return 0;
+}
+
+static int css_probe(struct device *dev)
+{
+ struct subchannel *sch;
+ int ret;
+
+ sch = to_subchannel(dev);
+ sch->driver = to_cssdriver(dev->driver);
+ ret = sch->driver->probe ? sch->driver->probe(sch) : 0;
+ if (ret)
+ sch->driver = NULL;
+ return ret;
+}
+
+static int css_remove(struct device *dev)
+{
+ struct subchannel *sch;
+ int ret;
+
+ sch = to_subchannel(dev);
+ ret = sch->driver->remove ? sch->driver->remove(sch) : 0;
+ sch->driver = NULL;
+ return ret;
+}
+
+static void css_shutdown(struct device *dev)
+{
+ struct subchannel *sch;
+
+ sch = to_subchannel(dev);
+ if (sch->driver && sch->driver->shutdown)
+ sch->driver->shutdown(sch);
+}
+
+static int css_uevent(struct device *dev, struct kobj_uevent_env *env)
+{
+ struct subchannel *sch = to_subchannel(dev);
+ int ret;
+
+ ret = add_uevent_var(env, "ST=%01X", sch->st);
+ if (ret)
+ return ret;
+ ret = add_uevent_var(env, "MODALIAS=css:t%01X", sch->st);
+ return ret;
+}
+
+static int css_pm_prepare(struct device *dev)
+{
+ struct subchannel *sch = to_subchannel(dev);
+ struct css_driver *drv;
+
+ if (mutex_is_locked(&sch->reg_mutex))
+ return -EAGAIN;
+ if (!sch->dev.driver)
+ return 0;
+ drv = to_cssdriver(sch->dev.driver);
+ /* Notify drivers that they may not register children. */
+ return drv->prepare ? drv->prepare(sch) : 0;
+}
+
+static void css_pm_complete(struct device *dev)
+{
+ struct subchannel *sch = to_subchannel(dev);
+ struct css_driver *drv;
+
+ if (!sch->dev.driver)
+ return;
+ drv = to_cssdriver(sch->dev.driver);
+ if (drv->complete)
+ drv->complete(sch);
+}
+
+static int css_pm_freeze(struct device *dev)
+{
+ struct subchannel *sch = to_subchannel(dev);
+ struct css_driver *drv;
+
+ if (!sch->dev.driver)
+ return 0;
+ drv = to_cssdriver(sch->dev.driver);
+ return drv->freeze ? drv->freeze(sch) : 0;
+}
+
+static int css_pm_thaw(struct device *dev)
+{
+ struct subchannel *sch = to_subchannel(dev);
+ struct css_driver *drv;
+
+ if (!sch->dev.driver)
+ return 0;
+ drv = to_cssdriver(sch->dev.driver);
+ return drv->thaw ? drv->thaw(sch) : 0;
+}
+
+static int css_pm_restore(struct device *dev)
+{
+ struct subchannel *sch = to_subchannel(dev);
+ struct css_driver *drv;
+
+ css_update_ssd_info(sch);
+ if (!sch->dev.driver)
+ return 0;
+ drv = to_cssdriver(sch->dev.driver);
+ return drv->restore ? drv->restore(sch) : 0;
+}
+
+static const struct dev_pm_ops css_pm_ops = {
+ .prepare = css_pm_prepare,
+ .complete = css_pm_complete,
+ .freeze = css_pm_freeze,
+ .thaw = css_pm_thaw,
+ .restore = css_pm_restore,
+};
+
+static struct bus_type css_bus_type = {
+ .name = "css",
+ .match = css_bus_match,
+ .probe = css_probe,
+ .remove = css_remove,
+ .shutdown = css_shutdown,
+ .uevent = css_uevent,
+ .pm = &css_pm_ops,
+};
+
+/**
+ * css_driver_register - register a css driver
+ * @cdrv: css driver to register
+ *
+ * This is mainly a wrapper around driver_register that sets name
+ * and bus_type in the embedded struct device_driver correctly.
+ */
+int css_driver_register(struct css_driver *cdrv)
+{
+ cdrv->drv.bus = &css_bus_type;
+ return driver_register(&cdrv->drv);
+}
+EXPORT_SYMBOL_GPL(css_driver_register);
+
+/**
+ * css_driver_unregister - unregister a css driver
+ * @cdrv: css driver to unregister
+ *
+ * This is a wrapper around driver_unregister.
+ */
+void css_driver_unregister(struct css_driver *cdrv)
+{
+ driver_unregister(&cdrv->drv);
+}
+EXPORT_SYMBOL_GPL(css_driver_unregister);
diff --git a/drivers/s390/cio/css.h b/drivers/s390/cio/css.h
new file mode 100644
index 000000000..3f322ea0f
--- /dev/null
+++ b/drivers/s390/cio/css.h
@@ -0,0 +1,159 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _CSS_H
+#define _CSS_H
+
+#include <linux/mutex.h>
+#include <linux/wait.h>
+#include <linux/workqueue.h>
+#include <linux/device.h>
+#include <linux/types.h>
+
+#include <asm/cio.h>
+#include <asm/chpid.h>
+#include <asm/schid.h>
+
+#include "cio.h"
+
+/*
+ * path grouping stuff
+ */
+#define SPID_FUNC_SINGLE_PATH 0x00
+#define SPID_FUNC_MULTI_PATH 0x80
+#define SPID_FUNC_ESTABLISH 0x00
+#define SPID_FUNC_RESIGN 0x40
+#define SPID_FUNC_DISBAND 0x20
+
+#define SNID_STATE1_RESET 0
+#define SNID_STATE1_UNGROUPED 2
+#define SNID_STATE1_GROUPED 3
+
+#define SNID_STATE2_NOT_RESVD 0
+#define SNID_STATE2_RESVD_ELSE 2
+#define SNID_STATE2_RESVD_SELF 3
+
+#define SNID_STATE3_MULTI_PATH 1
+#define SNID_STATE3_SINGLE_PATH 0
+
+struct path_state {
+ __u8 state1 : 2; /* path state value 1 */
+ __u8 state2 : 2; /* path state value 2 */
+ __u8 state3 : 1; /* path state value 3 */
+ __u8 resvd : 3; /* reserved */
+} __attribute__ ((packed));
+
+struct extended_cssid {
+ u8 version;
+ u8 cssid;
+} __attribute__ ((packed));
+
+struct pgid {
+ union {
+ __u8 fc; /* SPID function code */
+ struct path_state ps; /* SNID path state */
+ } __attribute__ ((packed)) inf;
+ union {
+ __u32 cpu_addr : 16; /* CPU address */
+ struct extended_cssid ext_cssid;
+ } __attribute__ ((packed)) pgid_high;
+ __u32 cpu_id : 24; /* CPU identification */
+ __u32 cpu_model : 16; /* CPU model */
+ __u32 tod_high; /* high word TOD clock */
+} __attribute__ ((packed));
+
+struct subchannel;
+struct chp_link;
+/**
+ * struct css_driver - device driver for subchannels
+ * @subchannel_type: subchannel type supported by this driver
+ * @drv: embedded device driver structure
+ * @irq: called on interrupts
+ * @chp_event: called for events affecting a channel path
+ * @sch_event: called for events affecting the subchannel
+ * @probe: function called on probe
+ * @remove: function called on remove
+ * @shutdown: called at device shutdown
+ * @prepare: prepare for pm state transition
+ * @complete: undo work done in @prepare
+ * @freeze: callback for freezing during hibernation snapshotting
+ * @thaw: undo work done in @freeze
+ * @restore: callback for restoring after hibernation
+ * @settle: wait for asynchronous work to finish
+ */
+struct css_driver {
+ struct css_device_id *subchannel_type;
+ struct device_driver drv;
+ void (*irq)(struct subchannel *);
+ int (*chp_event)(struct subchannel *, struct chp_link *, int);
+ int (*sch_event)(struct subchannel *, int);
+ int (*probe)(struct subchannel *);
+ int (*remove)(struct subchannel *);
+ void (*shutdown)(struct subchannel *);
+ int (*prepare) (struct subchannel *);
+ void (*complete) (struct subchannel *);
+ int (*freeze)(struct subchannel *);
+ int (*thaw) (struct subchannel *);
+ int (*restore)(struct subchannel *);
+ int (*settle)(void);
+};
+
+#define to_cssdriver(n) container_of(n, struct css_driver, drv)
+
+extern int css_driver_register(struct css_driver *);
+extern void css_driver_unregister(struct css_driver *);
+
+extern void css_sch_device_unregister(struct subchannel *);
+extern int css_register_subchannel(struct subchannel *);
+extern struct subchannel *css_alloc_subchannel(struct subchannel_id,
+ struct schib *schib);
+extern struct subchannel *get_subchannel_by_schid(struct subchannel_id);
+extern int css_init_done;
+extern int max_ssid;
+int for_each_subchannel_staged(int (*fn_known)(struct subchannel *, void *),
+ int (*fn_unknown)(struct subchannel_id,
+ void *), void *data);
+extern int for_each_subchannel(int(*fn)(struct subchannel_id, void *), void *);
+void css_update_ssd_info(struct subchannel *sch);
+
+struct channel_subsystem {
+ u8 cssid;
+ u8 iid;
+ bool id_valid; /* cssid,iid */
+ struct channel_path *chps[__MAX_CHPID + 1];
+ struct device device;
+ struct pgid global_pgid;
+ struct mutex mutex;
+ /* channel measurement related */
+ int cm_enabled;
+ void *cub_addr1;
+ void *cub_addr2;
+ /* for orphaned ccw devices */
+ struct subchannel *pseudo_subchannel;
+};
+#define to_css(dev) container_of(dev, struct channel_subsystem, device)
+
+extern struct channel_subsystem *channel_subsystems[];
+
+/* Dummy helper which needs to change once we support more than one css. */
+static inline struct channel_subsystem *css_by_id(u8 cssid)
+{
+ return channel_subsystems[0];
+}
+
+/* Dummy iterator which needs to change once we support more than one css. */
+#define for_each_css(css) \
+ for ((css) = channel_subsystems[0]; (css); (css) = NULL)
+
+/* Helper functions to build lists for the slow path. */
+void css_schedule_eval(struct subchannel_id schid);
+void css_schedule_eval_all(void);
+void css_schedule_eval_all_unreg(unsigned long delay);
+int css_complete_work(void);
+
+int sch_is_pseudo_sch(struct subchannel *);
+struct schib;
+int css_sch_is_valid(struct schib *);
+
+extern struct workqueue_struct *cio_work_q;
+void css_wait_for_slow_path(void);
+void css_sched_sch_todo(struct subchannel *sch, enum sch_todo todo);
+#endif
diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c
new file mode 100644
index 000000000..6f9c81db6
--- /dev/null
+++ b/drivers/s390/cio/device.c
@@ -0,0 +1,2161 @@
+// SPDX-License-Identifier: GPL-1.0+
+/*
+ * bus driver for ccw devices
+ *
+ * Copyright IBM Corp. 2002, 2008
+ * Author(s): Arnd Bergmann (arndb@de.ibm.com)
+ * Cornelia Huck (cornelia.huck@de.ibm.com)
+ * Martin Schwidefsky (schwidefsky@de.ibm.com)
+ */
+
+#define KMSG_COMPONENT "cio"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <linux/export.h>
+#include <linux/init.h>
+#include <linux/spinlock.h>
+#include <linux/errno.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/list.h>
+#include <linux/device.h>
+#include <linux/workqueue.h>
+#include <linux/delay.h>
+#include <linux/timer.h>
+#include <linux/kernel_stat.h>
+#include <linux/sched/signal.h>
+#include <linux/dma-mapping.h>
+
+#include <asm/ccwdev.h>
+#include <asm/cio.h>
+#include <asm/param.h> /* HZ */
+#include <asm/cmb.h>
+#include <asm/isc.h>
+
+#include "chp.h"
+#include "cio.h"
+#include "cio_debug.h"
+#include "css.h"
+#include "device.h"
+#include "ioasm.h"
+#include "io_sch.h"
+#include "blacklist.h"
+#include "chsc.h"
+
+static struct timer_list recovery_timer;
+static DEFINE_SPINLOCK(recovery_lock);
+static int recovery_phase;
+static const unsigned long recovery_delay[] = { 3, 30, 300 };
+
+static atomic_t ccw_device_init_count = ATOMIC_INIT(0);
+static DECLARE_WAIT_QUEUE_HEAD(ccw_device_init_wq);
+static struct bus_type ccw_bus_type;
+
+/******************* bus type handling ***********************/
+
+/* The Linux driver model distinguishes between a bus type and
+ * the bus itself. Of course we only have one channel
+ * subsystem driver and one channel system per machine, but
+ * we still use the abstraction. T.R. says it's a good idea. */
+static int
+ccw_bus_match (struct device * dev, struct device_driver * drv)
+{
+ struct ccw_device *cdev = to_ccwdev(dev);
+ struct ccw_driver *cdrv = to_ccwdrv(drv);
+ const struct ccw_device_id *ids = cdrv->ids, *found;
+
+ if (!ids)
+ return 0;
+
+ found = ccw_device_id_match(ids, &cdev->id);
+ if (!found)
+ return 0;
+
+ cdev->id.driver_info = found->driver_info;
+
+ return 1;
+}
+
+/* Store modalias string delimited by prefix/suffix string into buffer with
+ * specified size. Return length of resulting string (excluding trailing '\0')
+ * even if string doesn't fit buffer (snprintf semantics). */
+static int snprint_alias(char *buf, size_t size,
+ struct ccw_device_id *id, const char *suffix)
+{
+ int len;
+
+ len = snprintf(buf, size, "ccw:t%04Xm%02X", id->cu_type, id->cu_model);
+ if (len > size)
+ return len;
+ buf += len;
+ size -= len;
+
+ if (id->dev_type != 0)
+ len += snprintf(buf, size, "dt%04Xdm%02X%s", id->dev_type,
+ id->dev_model, suffix);
+ else
+ len += snprintf(buf, size, "dtdm%s", suffix);
+
+ return len;
+}
+
+/* Set up environment variables for ccw device uevent. Return 0 on success,
+ * non-zero otherwise. */
+static int ccw_uevent(struct device *dev, struct kobj_uevent_env *env)
+{
+ struct ccw_device *cdev = to_ccwdev(dev);
+ struct ccw_device_id *id = &(cdev->id);
+ int ret;
+ char modalias_buf[30];
+
+ /* CU_TYPE= */
+ ret = add_uevent_var(env, "CU_TYPE=%04X", id->cu_type);
+ if (ret)
+ return ret;
+
+ /* CU_MODEL= */
+ ret = add_uevent_var(env, "CU_MODEL=%02X", id->cu_model);
+ if (ret)
+ return ret;
+
+ /* The next two can be zero, that's ok for us */
+ /* DEV_TYPE= */
+ ret = add_uevent_var(env, "DEV_TYPE=%04X", id->dev_type);
+ if (ret)
+ return ret;
+
+ /* DEV_MODEL= */
+ ret = add_uevent_var(env, "DEV_MODEL=%02X", id->dev_model);
+ if (ret)
+ return ret;
+
+ /* MODALIAS= */
+ snprint_alias(modalias_buf, sizeof(modalias_buf), id, "");
+ ret = add_uevent_var(env, "MODALIAS=%s", modalias_buf);
+ return ret;
+}
+
+static void io_subchannel_irq(struct subchannel *);
+static int io_subchannel_probe(struct subchannel *);
+static int io_subchannel_remove(struct subchannel *);
+static void io_subchannel_shutdown(struct subchannel *);
+static int io_subchannel_sch_event(struct subchannel *, int);
+static int io_subchannel_chp_event(struct subchannel *, struct chp_link *,
+ int);
+static void recovery_func(struct timer_list *unused);
+
+static struct css_device_id io_subchannel_ids[] = {
+ { .match_flags = 0x1, .type = SUBCHANNEL_TYPE_IO, },
+ { /* end of list */ },
+};
+
+static int io_subchannel_prepare(struct subchannel *sch)
+{
+ struct ccw_device *cdev;
+ /*
+ * Don't allow suspend while a ccw device registration
+ * is still outstanding.
+ */
+ cdev = sch_get_cdev(sch);
+ if (cdev && !device_is_registered(&cdev->dev))
+ return -EAGAIN;
+ return 0;
+}
+
+static int io_subchannel_settle(void)
+{
+ int ret;
+
+ ret = wait_event_interruptible(ccw_device_init_wq,
+ atomic_read(&ccw_device_init_count) == 0);
+ if (ret)
+ return -EINTR;
+ flush_workqueue(cio_work_q);
+ return 0;
+}
+
+static struct css_driver io_subchannel_driver = {
+ .drv = {
+ .owner = THIS_MODULE,
+ .name = "io_subchannel",
+ },
+ .subchannel_type = io_subchannel_ids,
+ .irq = io_subchannel_irq,
+ .sch_event = io_subchannel_sch_event,
+ .chp_event = io_subchannel_chp_event,
+ .probe = io_subchannel_probe,
+ .remove = io_subchannel_remove,
+ .shutdown = io_subchannel_shutdown,
+ .prepare = io_subchannel_prepare,
+ .settle = io_subchannel_settle,
+};
+
+int __init io_subchannel_init(void)
+{
+ int ret;
+
+ timer_setup(&recovery_timer, recovery_func, 0);
+ ret = bus_register(&ccw_bus_type);
+ if (ret)
+ return ret;
+ ret = css_driver_register(&io_subchannel_driver);
+ if (ret)
+ bus_unregister(&ccw_bus_type);
+
+ return ret;
+}
+
+
+/************************ device handling **************************/
+
+static ssize_t
+devtype_show (struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct ccw_device *cdev = to_ccwdev(dev);
+ struct ccw_device_id *id = &(cdev->id);
+
+ if (id->dev_type != 0)
+ return sprintf(buf, "%04x/%02x\n",
+ id->dev_type, id->dev_model);
+ else
+ return sprintf(buf, "n/a\n");
+}
+
+static ssize_t
+cutype_show (struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct ccw_device *cdev = to_ccwdev(dev);
+ struct ccw_device_id *id = &(cdev->id);
+
+ return sprintf(buf, "%04x/%02x\n",
+ id->cu_type, id->cu_model);
+}
+
+static ssize_t
+modalias_show (struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct ccw_device *cdev = to_ccwdev(dev);
+ struct ccw_device_id *id = &(cdev->id);
+ int len;
+
+ len = snprint_alias(buf, PAGE_SIZE, id, "\n");
+
+ return len > PAGE_SIZE ? PAGE_SIZE : len;
+}
+
+static ssize_t
+online_show (struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct ccw_device *cdev = to_ccwdev(dev);
+
+ return sprintf(buf, cdev->online ? "1\n" : "0\n");
+}
+
+int ccw_device_is_orphan(struct ccw_device *cdev)
+{
+ return sch_is_pseudo_sch(to_subchannel(cdev->dev.parent));
+}
+
+static void ccw_device_unregister(struct ccw_device *cdev)
+{
+ if (device_is_registered(&cdev->dev)) {
+ /* Undo device_add(). */
+ device_del(&cdev->dev);
+ }
+ if (cdev->private->flags.initialized) {
+ cdev->private->flags.initialized = 0;
+ /* Release reference from device_initialize(). */
+ put_device(&cdev->dev);
+ }
+}
+
+static void io_subchannel_quiesce(struct subchannel *);
+
+/**
+ * ccw_device_set_offline() - disable a ccw device for I/O
+ * @cdev: target ccw device
+ *
+ * This function calls the driver's set_offline() function for @cdev, if
+ * given, and then disables @cdev.
+ * Returns:
+ * %0 on success and a negative error value on failure.
+ * Context:
+ * enabled, ccw device lock not held
+ */
+int ccw_device_set_offline(struct ccw_device *cdev)
+{
+ struct subchannel *sch;
+ int ret, state;
+
+ if (!cdev)
+ return -ENODEV;
+ if (!cdev->online || !cdev->drv)
+ return -EINVAL;
+
+ if (cdev->drv->set_offline) {
+ ret = cdev->drv->set_offline(cdev);
+ if (ret != 0)
+ return ret;
+ }
+ spin_lock_irq(cdev->ccwlock);
+ sch = to_subchannel(cdev->dev.parent);
+ cdev->online = 0;
+ /* Wait until a final state or DISCONNECTED is reached */
+ while (!dev_fsm_final_state(cdev) &&
+ cdev->private->state != DEV_STATE_DISCONNECTED) {
+ spin_unlock_irq(cdev->ccwlock);
+ wait_event(cdev->private->wait_q, (dev_fsm_final_state(cdev) ||
+ cdev->private->state == DEV_STATE_DISCONNECTED));
+ spin_lock_irq(cdev->ccwlock);
+ }
+ do {
+ ret = ccw_device_offline(cdev);
+ if (!ret)
+ break;
+ CIO_MSG_EVENT(0, "ccw_device_offline returned %d, device "
+ "0.%x.%04x\n", ret, cdev->private->dev_id.ssid,
+ cdev->private->dev_id.devno);
+ if (ret != -EBUSY)
+ goto error;
+ state = cdev->private->state;
+ spin_unlock_irq(cdev->ccwlock);
+ io_subchannel_quiesce(sch);
+ spin_lock_irq(cdev->ccwlock);
+ cdev->private->state = state;
+ } while (ret == -EBUSY);
+ spin_unlock_irq(cdev->ccwlock);
+ wait_event(cdev->private->wait_q, (dev_fsm_final_state(cdev) ||
+ cdev->private->state == DEV_STATE_DISCONNECTED));
+ /* Inform the user if set offline failed. */
+ if (cdev->private->state == DEV_STATE_BOXED) {
+ pr_warn("%s: The device entered boxed state while being set offline\n",
+ dev_name(&cdev->dev));
+ } else if (cdev->private->state == DEV_STATE_NOT_OPER) {
+ pr_warn("%s: The device stopped operating while being set offline\n",
+ dev_name(&cdev->dev));
+ }
+ /* Give up reference from ccw_device_set_online(). */
+ put_device(&cdev->dev);
+ return 0;
+
+error:
+ cdev->private->state = DEV_STATE_OFFLINE;
+ dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
+ spin_unlock_irq(cdev->ccwlock);
+ /* Give up reference from ccw_device_set_online(). */
+ put_device(&cdev->dev);
+ return -ENODEV;
+}
+
+/**
+ * ccw_device_set_online() - enable a ccw device for I/O
+ * @cdev: target ccw device
+ *
+ * This function first enables @cdev and then calls the driver's set_online()
+ * function for @cdev, if given. If set_online() returns an error, @cdev is
+ * disabled again.
+ * Returns:
+ * %0 on success and a negative error value on failure.
+ * Context:
+ * enabled, ccw device lock not held
+ */
+int ccw_device_set_online(struct ccw_device *cdev)
+{
+ int ret;
+ int ret2;
+
+ if (!cdev)
+ return -ENODEV;
+ if (cdev->online || !cdev->drv)
+ return -EINVAL;
+ /* Hold on to an extra reference while device is online. */
+ if (!get_device(&cdev->dev))
+ return -ENODEV;
+
+ spin_lock_irq(cdev->ccwlock);
+ ret = ccw_device_online(cdev);
+ spin_unlock_irq(cdev->ccwlock);
+ if (ret == 0)
+ wait_event(cdev->private->wait_q, dev_fsm_final_state(cdev));
+ else {
+ CIO_MSG_EVENT(0, "ccw_device_online returned %d, "
+ "device 0.%x.%04x\n",
+ ret, cdev->private->dev_id.ssid,
+ cdev->private->dev_id.devno);
+ /* Give up online reference since onlining failed. */
+ put_device(&cdev->dev);
+ return ret;
+ }
+ spin_lock_irq(cdev->ccwlock);
+ /* Check if online processing was successful */
+ if ((cdev->private->state != DEV_STATE_ONLINE) &&
+ (cdev->private->state != DEV_STATE_W4SENSE)) {
+ spin_unlock_irq(cdev->ccwlock);
+ /* Inform the user that set online failed. */
+ if (cdev->private->state == DEV_STATE_BOXED) {
+ pr_warn("%s: Setting the device online failed because it is boxed\n",
+ dev_name(&cdev->dev));
+ } else if (cdev->private->state == DEV_STATE_NOT_OPER) {
+ pr_warn("%s: Setting the device online failed because it is not operational\n",
+ dev_name(&cdev->dev));
+ }
+ /* Give up online reference since onlining failed. */
+ put_device(&cdev->dev);
+ return -ENODEV;
+ }
+ spin_unlock_irq(cdev->ccwlock);
+ if (cdev->drv->set_online)
+ ret = cdev->drv->set_online(cdev);
+ if (ret)
+ goto rollback;
+
+ spin_lock_irq(cdev->ccwlock);
+ cdev->online = 1;
+ spin_unlock_irq(cdev->ccwlock);
+ return 0;
+
+rollback:
+ spin_lock_irq(cdev->ccwlock);
+ /* Wait until a final state or DISCONNECTED is reached */
+ while (!dev_fsm_final_state(cdev) &&
+ cdev->private->state != DEV_STATE_DISCONNECTED) {
+ spin_unlock_irq(cdev->ccwlock);
+ wait_event(cdev->private->wait_q, (dev_fsm_final_state(cdev) ||
+ cdev->private->state == DEV_STATE_DISCONNECTED));
+ spin_lock_irq(cdev->ccwlock);
+ }
+ ret2 = ccw_device_offline(cdev);
+ if (ret2)
+ goto error;
+ spin_unlock_irq(cdev->ccwlock);
+ wait_event(cdev->private->wait_q, (dev_fsm_final_state(cdev) ||
+ cdev->private->state == DEV_STATE_DISCONNECTED));
+ /* Give up online reference since onlining failed. */
+ put_device(&cdev->dev);
+ return ret;
+
+error:
+ CIO_MSG_EVENT(0, "rollback ccw_device_offline returned %d, "
+ "device 0.%x.%04x\n",
+ ret2, cdev->private->dev_id.ssid,
+ cdev->private->dev_id.devno);
+ cdev->private->state = DEV_STATE_OFFLINE;
+ spin_unlock_irq(cdev->ccwlock);
+ /* Give up online reference since onlining failed. */
+ put_device(&cdev->dev);
+ return ret;
+}
+
+static int online_store_handle_offline(struct ccw_device *cdev)
+{
+ if (cdev->private->state == DEV_STATE_DISCONNECTED) {
+ spin_lock_irq(cdev->ccwlock);
+ ccw_device_sched_todo(cdev, CDEV_TODO_UNREG_EVAL);
+ spin_unlock_irq(cdev->ccwlock);
+ return 0;
+ }
+ if (cdev->drv && cdev->drv->set_offline)
+ return ccw_device_set_offline(cdev);
+ return -EINVAL;
+}
+
+static int online_store_recog_and_online(struct ccw_device *cdev)
+{
+ /* Do device recognition, if needed. */
+ if (cdev->private->state == DEV_STATE_BOXED) {
+ spin_lock_irq(cdev->ccwlock);
+ ccw_device_recognition(cdev);
+ spin_unlock_irq(cdev->ccwlock);
+ wait_event(cdev->private->wait_q,
+ cdev->private->flags.recog_done);
+ if (cdev->private->state != DEV_STATE_OFFLINE)
+ /* recognition failed */
+ return -EAGAIN;
+ }
+ if (cdev->drv && cdev->drv->set_online)
+ return ccw_device_set_online(cdev);
+ return -EINVAL;
+}
+
+static int online_store_handle_online(struct ccw_device *cdev, int force)
+{
+ int ret;
+
+ ret = online_store_recog_and_online(cdev);
+ if (ret && !force)
+ return ret;
+ if (force && cdev->private->state == DEV_STATE_BOXED) {
+ ret = ccw_device_stlck(cdev);
+ if (ret)
+ return ret;
+ if (cdev->id.cu_type == 0)
+ cdev->private->state = DEV_STATE_NOT_OPER;
+ ret = online_store_recog_and_online(cdev);
+ if (ret)
+ return ret;
+ }
+ return 0;
+}
+
+static ssize_t online_store (struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct ccw_device *cdev = to_ccwdev(dev);
+ int force, ret;
+ unsigned long i;
+
+ /* Prevent conflict between multiple on-/offline processing requests. */
+ if (atomic_cmpxchg(&cdev->private->onoff, 0, 1) != 0)
+ return -EAGAIN;
+ /* Prevent conflict between internal I/Os and on-/offline processing. */
+ if (!dev_fsm_final_state(cdev) &&
+ cdev->private->state != DEV_STATE_DISCONNECTED) {
+ ret = -EAGAIN;
+ goto out;
+ }
+ /* Prevent conflict between pending work and on-/offline processing.*/
+ if (work_pending(&cdev->private->todo_work)) {
+ ret = -EAGAIN;
+ goto out;
+ }
+ if (!strncmp(buf, "force\n", count)) {
+ force = 1;
+ i = 1;
+ ret = 0;
+ } else {
+ force = 0;
+ ret = kstrtoul(buf, 16, &i);
+ }
+ if (ret)
+ goto out;
+
+ device_lock(dev);
+ switch (i) {
+ case 0:
+ ret = online_store_handle_offline(cdev);
+ break;
+ case 1:
+ ret = online_store_handle_online(cdev, force);
+ break;
+ default:
+ ret = -EINVAL;
+ }
+ device_unlock(dev);
+
+out:
+ atomic_set(&cdev->private->onoff, 0);
+ return (ret < 0) ? ret : count;
+}
+
+static ssize_t
+available_show (struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct ccw_device *cdev = to_ccwdev(dev);
+ struct subchannel *sch;
+
+ if (ccw_device_is_orphan(cdev))
+ return sprintf(buf, "no device\n");
+ switch (cdev->private->state) {
+ case DEV_STATE_BOXED:
+ return sprintf(buf, "boxed\n");
+ case DEV_STATE_DISCONNECTED:
+ case DEV_STATE_DISCONNECTED_SENSE_ID:
+ case DEV_STATE_NOT_OPER:
+ sch = to_subchannel(dev->parent);
+ if (!sch->lpm)
+ return sprintf(buf, "no path\n");
+ else
+ return sprintf(buf, "no device\n");
+ default:
+ /* All other states considered fine. */
+ return sprintf(buf, "good\n");
+ }
+}
+
+static ssize_t
+initiate_logging(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct subchannel *sch = to_subchannel(dev);
+ int rc;
+
+ rc = chsc_siosl(sch->schid);
+ if (rc < 0) {
+ pr_warn("Logging for subchannel 0.%x.%04x failed with errno=%d\n",
+ sch->schid.ssid, sch->schid.sch_no, rc);
+ return rc;
+ }
+ pr_notice("Logging for subchannel 0.%x.%04x was triggered\n",
+ sch->schid.ssid, sch->schid.sch_no);
+ return count;
+}
+
+static ssize_t vpm_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct subchannel *sch = to_subchannel(dev);
+
+ return sprintf(buf, "%02x\n", sch->vpm);
+}
+
+static DEVICE_ATTR_RO(devtype);
+static DEVICE_ATTR_RO(cutype);
+static DEVICE_ATTR_RO(modalias);
+static DEVICE_ATTR_RW(online);
+static DEVICE_ATTR(availability, 0444, available_show, NULL);
+static DEVICE_ATTR(logging, 0200, NULL, initiate_logging);
+static DEVICE_ATTR_RO(vpm);
+
+static struct attribute *io_subchannel_attrs[] = {
+ &dev_attr_logging.attr,
+ &dev_attr_vpm.attr,
+ NULL,
+};
+
+static const struct attribute_group io_subchannel_attr_group = {
+ .attrs = io_subchannel_attrs,
+};
+
+static struct attribute * ccwdev_attrs[] = {
+ &dev_attr_devtype.attr,
+ &dev_attr_cutype.attr,
+ &dev_attr_modalias.attr,
+ &dev_attr_online.attr,
+ &dev_attr_cmb_enable.attr,
+ &dev_attr_availability.attr,
+ NULL,
+};
+
+static const struct attribute_group ccwdev_attr_group = {
+ .attrs = ccwdev_attrs,
+};
+
+static const struct attribute_group *ccwdev_attr_groups[] = {
+ &ccwdev_attr_group,
+ NULL,
+};
+
+static int ccw_device_add(struct ccw_device *cdev)
+{
+ struct device *dev = &cdev->dev;
+
+ dev->bus = &ccw_bus_type;
+ return device_add(dev);
+}
+
+static int match_dev_id(struct device *dev, const void *data)
+{
+ struct ccw_device *cdev = to_ccwdev(dev);
+ struct ccw_dev_id *dev_id = (void *)data;
+
+ return ccw_dev_id_is_equal(&cdev->private->dev_id, dev_id);
+}
+
+/**
+ * get_ccwdev_by_dev_id() - obtain device from a ccw device id
+ * @dev_id: id of the device to be searched
+ *
+ * This function searches all devices attached to the ccw bus for a device
+ * matching @dev_id.
+ * Returns:
+ * If a device is found its reference count is increased and returned;
+ * else %NULL is returned.
+ */
+struct ccw_device *get_ccwdev_by_dev_id(struct ccw_dev_id *dev_id)
+{
+ struct device *dev;
+
+ dev = bus_find_device(&ccw_bus_type, NULL, dev_id, match_dev_id);
+
+ return dev ? to_ccwdev(dev) : NULL;
+}
+EXPORT_SYMBOL_GPL(get_ccwdev_by_dev_id);
+
+static void ccw_device_do_unbind_bind(struct ccw_device *cdev)
+{
+ int ret;
+
+ if (device_is_registered(&cdev->dev)) {
+ device_release_driver(&cdev->dev);
+ ret = device_attach(&cdev->dev);
+ WARN_ON(ret == -ENODEV);
+ }
+}
+
+static void
+ccw_device_release(struct device *dev)
+{
+ struct ccw_device *cdev;
+
+ cdev = to_ccwdev(dev);
+ cio_gp_dma_free(cdev->private->dma_pool, cdev->private->dma_area,
+ sizeof(*cdev->private->dma_area));
+ cio_gp_dma_destroy(cdev->private->dma_pool, &cdev->dev);
+ /* Release reference of parent subchannel. */
+ put_device(cdev->dev.parent);
+ kfree(cdev->private);
+ kfree(cdev);
+}
+
+static struct ccw_device * io_subchannel_allocate_dev(struct subchannel *sch)
+{
+ struct ccw_device *cdev;
+ struct gen_pool *dma_pool;
+
+ cdev = kzalloc(sizeof(*cdev), GFP_KERNEL);
+ if (!cdev)
+ goto err_cdev;
+ cdev->private = kzalloc(sizeof(struct ccw_device_private),
+ GFP_KERNEL | GFP_DMA);
+ if (!cdev->private)
+ goto err_priv;
+ cdev->dev.coherent_dma_mask = sch->dev.coherent_dma_mask;
+ cdev->dev.dma_mask = sch->dev.dma_mask;
+ dma_pool = cio_gp_dma_create(&cdev->dev, 1);
+ if (!dma_pool)
+ goto err_dma_pool;
+ cdev->private->dma_pool = dma_pool;
+ cdev->private->dma_area = cio_gp_dma_zalloc(dma_pool, &cdev->dev,
+ sizeof(*cdev->private->dma_area));
+ if (!cdev->private->dma_area)
+ goto err_dma_area;
+ return cdev;
+err_dma_area:
+ cio_gp_dma_destroy(dma_pool, &cdev->dev);
+err_dma_pool:
+ kfree(cdev->private);
+err_priv:
+ kfree(cdev);
+err_cdev:
+ return ERR_PTR(-ENOMEM);
+}
+
+static void ccw_device_todo(struct work_struct *work);
+
+static int io_subchannel_initialize_dev(struct subchannel *sch,
+ struct ccw_device *cdev)
+{
+ struct ccw_device_private *priv = cdev->private;
+ int ret;
+
+ priv->cdev = cdev;
+ priv->int_class = IRQIO_CIO;
+ priv->state = DEV_STATE_NOT_OPER;
+ priv->dev_id.devno = sch->schib.pmcw.dev;
+ priv->dev_id.ssid = sch->schid.ssid;
+
+ INIT_WORK(&priv->todo_work, ccw_device_todo);
+ INIT_LIST_HEAD(&priv->cmb_list);
+ init_waitqueue_head(&priv->wait_q);
+ timer_setup(&priv->timer, ccw_device_timeout, 0);
+
+ atomic_set(&priv->onoff, 0);
+ cdev->ccwlock = sch->lock;
+ cdev->dev.parent = &sch->dev;
+ cdev->dev.release = ccw_device_release;
+ cdev->dev.groups = ccwdev_attr_groups;
+ /* Do first half of device_register. */
+ device_initialize(&cdev->dev);
+ ret = dev_set_name(&cdev->dev, "0.%x.%04x", cdev->private->dev_id.ssid,
+ cdev->private->dev_id.devno);
+ if (ret)
+ goto out_put;
+ if (!get_device(&sch->dev)) {
+ ret = -ENODEV;
+ goto out_put;
+ }
+ priv->flags.initialized = 1;
+ spin_lock_irq(sch->lock);
+ sch_set_cdev(sch, cdev);
+ spin_unlock_irq(sch->lock);
+ return 0;
+
+out_put:
+ /* Release reference from device_initialize(). */
+ put_device(&cdev->dev);
+ return ret;
+}
+
+static struct ccw_device * io_subchannel_create_ccwdev(struct subchannel *sch)
+{
+ struct ccw_device *cdev;
+ int ret;
+
+ cdev = io_subchannel_allocate_dev(sch);
+ if (!IS_ERR(cdev)) {
+ ret = io_subchannel_initialize_dev(sch, cdev);
+ if (ret)
+ cdev = ERR_PTR(ret);
+ }
+ return cdev;
+}
+
+static void io_subchannel_recog(struct ccw_device *, struct subchannel *);
+
+static void sch_create_and_recog_new_device(struct subchannel *sch)
+{
+ struct ccw_device *cdev;
+
+ /* Need to allocate a new ccw device. */
+ cdev = io_subchannel_create_ccwdev(sch);
+ if (IS_ERR(cdev)) {
+ /* OK, we did everything we could... */
+ css_sch_device_unregister(sch);
+ return;
+ }
+ /* Start recognition for the new ccw device. */
+ io_subchannel_recog(cdev, sch);
+}
+
+/*
+ * Register recognized device.
+ */
+static void io_subchannel_register(struct ccw_device *cdev)
+{
+ struct subchannel *sch;
+ int ret, adjust_init_count = 1;
+ unsigned long flags;
+
+ sch = to_subchannel(cdev->dev.parent);
+ /*
+ * Check if subchannel is still registered. It may have become
+ * unregistered if a machine check hit us after finishing
+ * device recognition but before the register work could be
+ * queued.
+ */
+ if (!device_is_registered(&sch->dev))
+ goto out_err;
+ css_update_ssd_info(sch);
+ /*
+ * io_subchannel_register() will also be called after device
+ * recognition has been done for a boxed device (which will already
+ * be registered). We need to reprobe since we may now have sense id
+ * information.
+ */
+ if (device_is_registered(&cdev->dev)) {
+ if (!cdev->drv) {
+ ret = device_reprobe(&cdev->dev);
+ if (ret)
+ /* We can't do much here. */
+ CIO_MSG_EVENT(0, "device_reprobe() returned"
+ " %d for 0.%x.%04x\n", ret,
+ cdev->private->dev_id.ssid,
+ cdev->private->dev_id.devno);
+ }
+ adjust_init_count = 0;
+ goto out;
+ }
+ /*
+ * Now we know this subchannel will stay, we can throw
+ * our delayed uevent.
+ */
+ if (dev_get_uevent_suppress(&sch->dev)) {
+ dev_set_uevent_suppress(&sch->dev, 0);
+ kobject_uevent(&sch->dev.kobj, KOBJ_ADD);
+ }
+ /* make it known to the system */
+ ret = ccw_device_add(cdev);
+ if (ret) {
+ CIO_MSG_EVENT(0, "Could not register ccw dev 0.%x.%04x: %d\n",
+ cdev->private->dev_id.ssid,
+ cdev->private->dev_id.devno, ret);
+ spin_lock_irqsave(sch->lock, flags);
+ sch_set_cdev(sch, NULL);
+ spin_unlock_irqrestore(sch->lock, flags);
+ /* Release initial device reference. */
+ put_device(&cdev->dev);
+ goto out_err;
+ }
+out:
+ cdev->private->flags.recog_done = 1;
+ wake_up(&cdev->private->wait_q);
+out_err:
+ if (adjust_init_count && atomic_dec_and_test(&ccw_device_init_count))
+ wake_up(&ccw_device_init_wq);
+}
+
+static void ccw_device_call_sch_unregister(struct ccw_device *cdev)
+{
+ struct subchannel *sch;
+
+ /* Get subchannel reference for local processing. */
+ if (!get_device(cdev->dev.parent))
+ return;
+ sch = to_subchannel(cdev->dev.parent);
+ css_sch_device_unregister(sch);
+ /* Release subchannel reference for local processing. */
+ put_device(&sch->dev);
+}
+
+/*
+ * subchannel recognition done. Called from the state machine.
+ */
+void
+io_subchannel_recog_done(struct ccw_device *cdev)
+{
+ if (css_init_done == 0) {
+ cdev->private->flags.recog_done = 1;
+ return;
+ }
+ switch (cdev->private->state) {
+ case DEV_STATE_BOXED:
+ /* Device did not respond in time. */
+ case DEV_STATE_NOT_OPER:
+ cdev->private->flags.recog_done = 1;
+ /* Remove device found not operational. */
+ ccw_device_sched_todo(cdev, CDEV_TODO_UNREG);
+ if (atomic_dec_and_test(&ccw_device_init_count))
+ wake_up(&ccw_device_init_wq);
+ break;
+ case DEV_STATE_OFFLINE:
+ /*
+ * We can't register the device in interrupt context so
+ * we schedule a work item.
+ */
+ ccw_device_sched_todo(cdev, CDEV_TODO_REGISTER);
+ break;
+ }
+}
+
+static void io_subchannel_recog(struct ccw_device *cdev, struct subchannel *sch)
+{
+ /* Increase counter of devices currently in recognition. */
+ atomic_inc(&ccw_device_init_count);
+
+ /* Start async. device sensing. */
+ spin_lock_irq(sch->lock);
+ ccw_device_recognition(cdev);
+ spin_unlock_irq(sch->lock);
+}
+
+static int ccw_device_move_to_sch(struct ccw_device *cdev,
+ struct subchannel *sch)
+{
+ struct subchannel *old_sch;
+ int rc, old_enabled = 0;
+
+ old_sch = to_subchannel(cdev->dev.parent);
+ /* Obtain child reference for new parent. */
+ if (!get_device(&sch->dev))
+ return -ENODEV;
+
+ if (!sch_is_pseudo_sch(old_sch)) {
+ spin_lock_irq(old_sch->lock);
+ old_enabled = old_sch->schib.pmcw.ena;
+ rc = 0;
+ if (old_enabled)
+ rc = cio_disable_subchannel(old_sch);
+ spin_unlock_irq(old_sch->lock);
+ if (rc == -EBUSY) {
+ /* Release child reference for new parent. */
+ put_device(&sch->dev);
+ return rc;
+ }
+ }
+
+ mutex_lock(&sch->reg_mutex);
+ rc = device_move(&cdev->dev, &sch->dev, DPM_ORDER_PARENT_BEFORE_DEV);
+ mutex_unlock(&sch->reg_mutex);
+ if (rc) {
+ CIO_MSG_EVENT(0, "device_move(0.%x.%04x,0.%x.%04x)=%d\n",
+ cdev->private->dev_id.ssid,
+ cdev->private->dev_id.devno, sch->schid.ssid,
+ sch->schib.pmcw.dev, rc);
+ if (old_enabled) {
+ /* Try to reenable the old subchannel. */
+ spin_lock_irq(old_sch->lock);
+ cio_enable_subchannel(old_sch, (u32)(addr_t)old_sch);
+ spin_unlock_irq(old_sch->lock);
+ }
+ /* Release child reference for new parent. */
+ put_device(&sch->dev);
+ return rc;
+ }
+ /* Clean up old subchannel. */
+ if (!sch_is_pseudo_sch(old_sch)) {
+ spin_lock_irq(old_sch->lock);
+ sch_set_cdev(old_sch, NULL);
+ spin_unlock_irq(old_sch->lock);
+ css_schedule_eval(old_sch->schid);
+ }
+ /* Release child reference for old parent. */
+ put_device(&old_sch->dev);
+ /* Initialize new subchannel. */
+ spin_lock_irq(sch->lock);
+ cdev->ccwlock = sch->lock;
+ if (!sch_is_pseudo_sch(sch))
+ sch_set_cdev(sch, cdev);
+ spin_unlock_irq(sch->lock);
+ if (!sch_is_pseudo_sch(sch))
+ css_update_ssd_info(sch);
+ return 0;
+}
+
+static int ccw_device_move_to_orph(struct ccw_device *cdev)
+{
+ struct subchannel *sch = to_subchannel(cdev->dev.parent);
+ struct channel_subsystem *css = to_css(sch->dev.parent);
+
+ return ccw_device_move_to_sch(cdev, css->pseudo_subchannel);
+}
+
+static void io_subchannel_irq(struct subchannel *sch)
+{
+ struct ccw_device *cdev;
+
+ cdev = sch_get_cdev(sch);
+
+ CIO_TRACE_EVENT(6, "IRQ");
+ CIO_TRACE_EVENT(6, dev_name(&sch->dev));
+ if (cdev)
+ dev_fsm_event(cdev, DEV_EVENT_INTERRUPT);
+ else
+ inc_irq_stat(IRQIO_CIO);
+}
+
+void io_subchannel_init_config(struct subchannel *sch)
+{
+ memset(&sch->config, 0, sizeof(sch->config));
+ sch->config.csense = 1;
+}
+
+static void io_subchannel_init_fields(struct subchannel *sch)
+{
+ if (cio_is_console(sch->schid))
+ sch->opm = 0xff;
+ else
+ sch->opm = chp_get_sch_opm(sch);
+ sch->lpm = sch->schib.pmcw.pam & sch->opm;
+ sch->isc = cio_is_console(sch->schid) ? CONSOLE_ISC : IO_SCH_ISC;
+
+ CIO_MSG_EVENT(6, "Detected device %04x on subchannel 0.%x.%04X"
+ " - PIM = %02X, PAM = %02X, POM = %02X\n",
+ sch->schib.pmcw.dev, sch->schid.ssid,
+ sch->schid.sch_no, sch->schib.pmcw.pim,
+ sch->schib.pmcw.pam, sch->schib.pmcw.pom);
+
+ io_subchannel_init_config(sch);
+}
+
+/*
+ * Note: We always return 0 so that we bind to the device even on error.
+ * This is needed so that our remove function is called on unregister.
+ */
+static int io_subchannel_probe(struct subchannel *sch)
+{
+ struct io_subchannel_private *io_priv;
+ struct ccw_device *cdev;
+ int rc;
+
+ if (cio_is_console(sch->schid)) {
+ rc = sysfs_create_group(&sch->dev.kobj,
+ &io_subchannel_attr_group);
+ if (rc)
+ CIO_MSG_EVENT(0, "Failed to create io subchannel "
+ "attributes for subchannel "
+ "0.%x.%04x (rc=%d)\n",
+ sch->schid.ssid, sch->schid.sch_no, rc);
+ /*
+ * The console subchannel already has an associated ccw_device.
+ * Throw the delayed uevent for the subchannel, register
+ * the ccw_device and exit.
+ */
+ if (dev_get_uevent_suppress(&sch->dev)) {
+ /* should always be the case for the console */
+ dev_set_uevent_suppress(&sch->dev, 0);
+ kobject_uevent(&sch->dev.kobj, KOBJ_ADD);
+ }
+ cdev = sch_get_cdev(sch);
+ rc = ccw_device_add(cdev);
+ if (rc) {
+ /* Release online reference. */
+ put_device(&cdev->dev);
+ goto out_schedule;
+ }
+ if (atomic_dec_and_test(&ccw_device_init_count))
+ wake_up(&ccw_device_init_wq);
+ return 0;
+ }
+ io_subchannel_init_fields(sch);
+ rc = cio_commit_config(sch);
+ if (rc)
+ goto out_schedule;
+ rc = sysfs_create_group(&sch->dev.kobj,
+ &io_subchannel_attr_group);
+ if (rc)
+ goto out_schedule;
+ /* Allocate I/O subchannel private data. */
+ io_priv = kzalloc(sizeof(*io_priv), GFP_KERNEL | GFP_DMA);
+ if (!io_priv)
+ goto out_schedule;
+
+ io_priv->dma_area = dma_alloc_coherent(&sch->dev,
+ sizeof(*io_priv->dma_area),
+ &io_priv->dma_area_dma, GFP_KERNEL);
+ if (!io_priv->dma_area) {
+ kfree(io_priv);
+ goto out_schedule;
+ }
+
+ set_io_private(sch, io_priv);
+ css_schedule_eval(sch->schid);
+ return 0;
+
+out_schedule:
+ spin_lock_irq(sch->lock);
+ css_sched_sch_todo(sch, SCH_TODO_UNREG);
+ spin_unlock_irq(sch->lock);
+ return 0;
+}
+
+static int io_subchannel_remove(struct subchannel *sch)
+{
+ struct io_subchannel_private *io_priv = to_io_private(sch);
+ struct ccw_device *cdev;
+
+ cdev = sch_get_cdev(sch);
+ if (!cdev)
+ goto out_free;
+
+ ccw_device_unregister(cdev);
+ spin_lock_irq(sch->lock);
+ sch_set_cdev(sch, NULL);
+ set_io_private(sch, NULL);
+ spin_unlock_irq(sch->lock);
+out_free:
+ dma_free_coherent(&sch->dev, sizeof(*io_priv->dma_area),
+ io_priv->dma_area, io_priv->dma_area_dma);
+ kfree(io_priv);
+ sysfs_remove_group(&sch->dev.kobj, &io_subchannel_attr_group);
+ return 0;
+}
+
+static void io_subchannel_verify(struct subchannel *sch)
+{
+ struct ccw_device *cdev;
+
+ cdev = sch_get_cdev(sch);
+ if (cdev)
+ dev_fsm_event(cdev, DEV_EVENT_VERIFY);
+}
+
+static void io_subchannel_terminate_path(struct subchannel *sch, u8 mask)
+{
+ struct ccw_device *cdev;
+
+ cdev = sch_get_cdev(sch);
+ if (!cdev)
+ return;
+ if (cio_update_schib(sch))
+ goto err;
+ /* Check for I/O on path. */
+ if (scsw_actl(&sch->schib.scsw) == 0 || sch->schib.pmcw.lpum != mask)
+ goto out;
+ if (cdev->private->state == DEV_STATE_ONLINE) {
+ ccw_device_kill_io(cdev);
+ goto out;
+ }
+ if (cio_clear(sch))
+ goto err;
+out:
+ /* Trigger path verification. */
+ dev_fsm_event(cdev, DEV_EVENT_VERIFY);
+ return;
+
+err:
+ dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
+}
+
+static int io_subchannel_chp_event(struct subchannel *sch,
+ struct chp_link *link, int event)
+{
+ struct ccw_device *cdev = sch_get_cdev(sch);
+ int mask;
+
+ mask = chp_ssd_get_mask(&sch->ssd_info, link);
+ if (!mask)
+ return 0;
+ switch (event) {
+ case CHP_VARY_OFF:
+ sch->opm &= ~mask;
+ sch->lpm &= ~mask;
+ if (cdev)
+ cdev->private->path_gone_mask |= mask;
+ io_subchannel_terminate_path(sch, mask);
+ break;
+ case CHP_VARY_ON:
+ sch->opm |= mask;
+ sch->lpm |= mask;
+ if (cdev)
+ cdev->private->path_new_mask |= mask;
+ io_subchannel_verify(sch);
+ break;
+ case CHP_OFFLINE:
+ if (cio_update_schib(sch))
+ return -ENODEV;
+ if (cdev)
+ cdev->private->path_gone_mask |= mask;
+ io_subchannel_terminate_path(sch, mask);
+ break;
+ case CHP_ONLINE:
+ if (cio_update_schib(sch))
+ return -ENODEV;
+ sch->lpm |= mask & sch->opm;
+ if (cdev)
+ cdev->private->path_new_mask |= mask;
+ io_subchannel_verify(sch);
+ break;
+ }
+ return 0;
+}
+
+static void io_subchannel_quiesce(struct subchannel *sch)
+{
+ struct ccw_device *cdev;
+ int ret;
+
+ spin_lock_irq(sch->lock);
+ cdev = sch_get_cdev(sch);
+ if (cio_is_console(sch->schid))
+ goto out_unlock;
+ if (!sch->schib.pmcw.ena)
+ goto out_unlock;
+ ret = cio_disable_subchannel(sch);
+ if (ret != -EBUSY)
+ goto out_unlock;
+ if (cdev->handler)
+ cdev->handler(cdev, cdev->private->intparm, ERR_PTR(-EIO));
+ while (ret == -EBUSY) {
+ cdev->private->state = DEV_STATE_QUIESCE;
+ cdev->private->iretry = 255;
+ ret = ccw_device_cancel_halt_clear(cdev);
+ if (ret == -EBUSY) {
+ ccw_device_set_timeout(cdev, HZ/10);
+ spin_unlock_irq(sch->lock);
+ wait_event(cdev->private->wait_q,
+ cdev->private->state != DEV_STATE_QUIESCE);
+ spin_lock_irq(sch->lock);
+ }
+ ret = cio_disable_subchannel(sch);
+ }
+out_unlock:
+ spin_unlock_irq(sch->lock);
+}
+
+static void io_subchannel_shutdown(struct subchannel *sch)
+{
+ io_subchannel_quiesce(sch);
+}
+
+static int device_is_disconnected(struct ccw_device *cdev)
+{
+ if (!cdev)
+ return 0;
+ return (cdev->private->state == DEV_STATE_DISCONNECTED ||
+ cdev->private->state == DEV_STATE_DISCONNECTED_SENSE_ID);
+}
+
+static int recovery_check(struct device *dev, void *data)
+{
+ struct ccw_device *cdev = to_ccwdev(dev);
+ struct subchannel *sch;
+ int *redo = data;
+
+ spin_lock_irq(cdev->ccwlock);
+ switch (cdev->private->state) {
+ case DEV_STATE_ONLINE:
+ sch = to_subchannel(cdev->dev.parent);
+ if ((sch->schib.pmcw.pam & sch->opm) == sch->vpm)
+ break;
+ fallthrough;
+ case DEV_STATE_DISCONNECTED:
+ CIO_MSG_EVENT(3, "recovery: trigger 0.%x.%04x\n",
+ cdev->private->dev_id.ssid,
+ cdev->private->dev_id.devno);
+ dev_fsm_event(cdev, DEV_EVENT_VERIFY);
+ *redo = 1;
+ break;
+ case DEV_STATE_DISCONNECTED_SENSE_ID:
+ *redo = 1;
+ break;
+ }
+ spin_unlock_irq(cdev->ccwlock);
+
+ return 0;
+}
+
+static void recovery_work_func(struct work_struct *unused)
+{
+ int redo = 0;
+
+ bus_for_each_dev(&ccw_bus_type, NULL, &redo, recovery_check);
+ if (redo) {
+ spin_lock_irq(&recovery_lock);
+ if (!timer_pending(&recovery_timer)) {
+ if (recovery_phase < ARRAY_SIZE(recovery_delay) - 1)
+ recovery_phase++;
+ mod_timer(&recovery_timer, jiffies +
+ recovery_delay[recovery_phase] * HZ);
+ }
+ spin_unlock_irq(&recovery_lock);
+ } else
+ CIO_MSG_EVENT(3, "recovery: end\n");
+}
+
+static DECLARE_WORK(recovery_work, recovery_work_func);
+
+static void recovery_func(struct timer_list *unused)
+{
+ /*
+ * We can't do our recovery in softirq context and it's not
+ * performance critical, so we schedule it.
+ */
+ schedule_work(&recovery_work);
+}
+
+void ccw_device_schedule_recovery(void)
+{
+ unsigned long flags;
+
+ CIO_MSG_EVENT(3, "recovery: schedule\n");
+ spin_lock_irqsave(&recovery_lock, flags);
+ if (!timer_pending(&recovery_timer) || (recovery_phase != 0)) {
+ recovery_phase = 0;
+ mod_timer(&recovery_timer, jiffies + recovery_delay[0] * HZ);
+ }
+ spin_unlock_irqrestore(&recovery_lock, flags);
+}
+
+static int purge_fn(struct device *dev, void *data)
+{
+ struct ccw_device *cdev = to_ccwdev(dev);
+ struct ccw_dev_id *id = &cdev->private->dev_id;
+
+ spin_lock_irq(cdev->ccwlock);
+ if (is_blacklisted(id->ssid, id->devno) &&
+ (cdev->private->state == DEV_STATE_OFFLINE) &&
+ (atomic_cmpxchg(&cdev->private->onoff, 0, 1) == 0)) {
+ CIO_MSG_EVENT(3, "ccw: purging 0.%x.%04x\n", id->ssid,
+ id->devno);
+ ccw_device_sched_todo(cdev, CDEV_TODO_UNREG);
+ atomic_set(&cdev->private->onoff, 0);
+ }
+ spin_unlock_irq(cdev->ccwlock);
+ /* Abort loop in case of pending signal. */
+ if (signal_pending(current))
+ return -EINTR;
+
+ return 0;
+}
+
+/**
+ * ccw_purge_blacklisted - purge unused, blacklisted devices
+ *
+ * Unregister all ccw devices that are offline and on the blacklist.
+ */
+int ccw_purge_blacklisted(void)
+{
+ CIO_MSG_EVENT(2, "ccw: purging blacklisted devices\n");
+ bus_for_each_dev(&ccw_bus_type, NULL, NULL, purge_fn);
+ return 0;
+}
+
+void ccw_device_set_disconnected(struct ccw_device *cdev)
+{
+ if (!cdev)
+ return;
+ ccw_device_set_timeout(cdev, 0);
+ cdev->private->flags.fake_irb = 0;
+ cdev->private->state = DEV_STATE_DISCONNECTED;
+ if (cdev->online)
+ ccw_device_schedule_recovery();
+}
+
+void ccw_device_set_notoper(struct ccw_device *cdev)
+{
+ struct subchannel *sch = to_subchannel(cdev->dev.parent);
+
+ CIO_TRACE_EVENT(2, "notoper");
+ CIO_TRACE_EVENT(2, dev_name(&sch->dev));
+ ccw_device_set_timeout(cdev, 0);
+ cio_disable_subchannel(sch);
+ cdev->private->state = DEV_STATE_NOT_OPER;
+}
+
+enum io_sch_action {
+ IO_SCH_UNREG,
+ IO_SCH_ORPH_UNREG,
+ IO_SCH_UNREG_CDEV,
+ IO_SCH_ATTACH,
+ IO_SCH_UNREG_ATTACH,
+ IO_SCH_ORPH_ATTACH,
+ IO_SCH_REPROBE,
+ IO_SCH_VERIFY,
+ IO_SCH_DISC,
+ IO_SCH_NOP,
+};
+
+static enum io_sch_action sch_get_action(struct subchannel *sch)
+{
+ struct ccw_device *cdev;
+
+ cdev = sch_get_cdev(sch);
+ if (cio_update_schib(sch)) {
+ /* Not operational. */
+ if (!cdev)
+ return IO_SCH_UNREG;
+ if (ccw_device_notify(cdev, CIO_GONE) != NOTIFY_OK)
+ return IO_SCH_UNREG;
+ return IO_SCH_ORPH_UNREG;
+ }
+ /* Operational. */
+ if (!cdev)
+ return IO_SCH_ATTACH;
+ if (sch->schib.pmcw.dev != cdev->private->dev_id.devno) {
+ if (ccw_device_notify(cdev, CIO_GONE) != NOTIFY_OK)
+ return IO_SCH_UNREG_ATTACH;
+ return IO_SCH_ORPH_ATTACH;
+ }
+ if ((sch->schib.pmcw.pam & sch->opm) == 0) {
+ if (ccw_device_notify(cdev, CIO_NO_PATH) != NOTIFY_OK)
+ return IO_SCH_UNREG_CDEV;
+ return IO_SCH_DISC;
+ }
+ if (device_is_disconnected(cdev))
+ return IO_SCH_REPROBE;
+ if (cdev->online && !cdev->private->flags.resuming)
+ return IO_SCH_VERIFY;
+ if (cdev->private->state == DEV_STATE_NOT_OPER)
+ return IO_SCH_UNREG_ATTACH;
+ return IO_SCH_NOP;
+}
+
+/**
+ * io_subchannel_sch_event - process subchannel event
+ * @sch: subchannel
+ * @process: non-zero if function is called in process context
+ *
+ * An unspecified event occurred for this subchannel. Adjust data according
+ * to the current operational state of the subchannel and device. Return
+ * zero when the event has been handled sufficiently or -EAGAIN when this
+ * function should be called again in process context.
+ */
+static int io_subchannel_sch_event(struct subchannel *sch, int process)
+{
+ unsigned long flags;
+ struct ccw_device *cdev;
+ struct ccw_dev_id dev_id;
+ enum io_sch_action action;
+ int rc = -EAGAIN;
+
+ spin_lock_irqsave(sch->lock, flags);
+ if (!device_is_registered(&sch->dev))
+ goto out_unlock;
+ if (work_pending(&sch->todo_work))
+ goto out_unlock;
+ cdev = sch_get_cdev(sch);
+ if (cdev && work_pending(&cdev->private->todo_work))
+ goto out_unlock;
+ action = sch_get_action(sch);
+ CIO_MSG_EVENT(2, "event: sch 0.%x.%04x, process=%d, action=%d\n",
+ sch->schid.ssid, sch->schid.sch_no, process,
+ action);
+ /* Perform immediate actions while holding the lock. */
+ switch (action) {
+ case IO_SCH_REPROBE:
+ /* Trigger device recognition. */
+ ccw_device_trigger_reprobe(cdev);
+ rc = 0;
+ goto out_unlock;
+ case IO_SCH_VERIFY:
+ /* Trigger path verification. */
+ io_subchannel_verify(sch);
+ rc = 0;
+ goto out_unlock;
+ case IO_SCH_DISC:
+ ccw_device_set_disconnected(cdev);
+ rc = 0;
+ goto out_unlock;
+ case IO_SCH_ORPH_UNREG:
+ case IO_SCH_ORPH_ATTACH:
+ ccw_device_set_disconnected(cdev);
+ break;
+ case IO_SCH_UNREG_CDEV:
+ case IO_SCH_UNREG_ATTACH:
+ case IO_SCH_UNREG:
+ if (!cdev)
+ break;
+ if (cdev->private->state == DEV_STATE_SENSE_ID) {
+ /*
+ * Note: delayed work triggered by this event
+ * and repeated calls to sch_event are synchronized
+ * by the above check for work_pending(cdev).
+ */
+ dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
+ } else
+ ccw_device_set_notoper(cdev);
+ break;
+ case IO_SCH_NOP:
+ rc = 0;
+ goto out_unlock;
+ default:
+ break;
+ }
+ spin_unlock_irqrestore(sch->lock, flags);
+ /* All other actions require process context. */
+ if (!process)
+ goto out;
+ /* Handle attached ccw device. */
+ switch (action) {
+ case IO_SCH_ORPH_UNREG:
+ case IO_SCH_ORPH_ATTACH:
+ /* Move ccw device to orphanage. */
+ rc = ccw_device_move_to_orph(cdev);
+ if (rc)
+ goto out;
+ break;
+ case IO_SCH_UNREG_CDEV:
+ case IO_SCH_UNREG_ATTACH:
+ spin_lock_irqsave(sch->lock, flags);
+ if (cdev->private->flags.resuming) {
+ /* Device will be handled later. */
+ rc = 0;
+ goto out_unlock;
+ }
+ sch_set_cdev(sch, NULL);
+ spin_unlock_irqrestore(sch->lock, flags);
+ /* Unregister ccw device. */
+ ccw_device_unregister(cdev);
+ break;
+ default:
+ break;
+ }
+ /* Handle subchannel. */
+ switch (action) {
+ case IO_SCH_ORPH_UNREG:
+ case IO_SCH_UNREG:
+ if (!cdev || !cdev->private->flags.resuming)
+ css_sch_device_unregister(sch);
+ break;
+ case IO_SCH_ORPH_ATTACH:
+ case IO_SCH_UNREG_ATTACH:
+ case IO_SCH_ATTACH:
+ dev_id.ssid = sch->schid.ssid;
+ dev_id.devno = sch->schib.pmcw.dev;
+ cdev = get_ccwdev_by_dev_id(&dev_id);
+ if (!cdev) {
+ sch_create_and_recog_new_device(sch);
+ break;
+ }
+ rc = ccw_device_move_to_sch(cdev, sch);
+ if (rc) {
+ /* Release reference from get_ccwdev_by_dev_id() */
+ put_device(&cdev->dev);
+ goto out;
+ }
+ spin_lock_irqsave(sch->lock, flags);
+ ccw_device_trigger_reprobe(cdev);
+ spin_unlock_irqrestore(sch->lock, flags);
+ /* Release reference from get_ccwdev_by_dev_id() */
+ put_device(&cdev->dev);
+ break;
+ default:
+ break;
+ }
+ return 0;
+
+out_unlock:
+ spin_unlock_irqrestore(sch->lock, flags);
+out:
+ return rc;
+}
+
+static void ccw_device_set_int_class(struct ccw_device *cdev)
+{
+ struct ccw_driver *cdrv = cdev->drv;
+
+ /* Note: we interpret class 0 in this context as an uninitialized
+ * field since it translates to a non-I/O interrupt class. */
+ if (cdrv->int_class != 0)
+ cdev->private->int_class = cdrv->int_class;
+ else
+ cdev->private->int_class = IRQIO_CIO;
+}
+
+#ifdef CONFIG_CCW_CONSOLE
+int __init ccw_device_enable_console(struct ccw_device *cdev)
+{
+ struct subchannel *sch = to_subchannel(cdev->dev.parent);
+ int rc;
+
+ if (!cdev->drv || !cdev->handler)
+ return -EINVAL;
+
+ io_subchannel_init_fields(sch);
+ rc = cio_commit_config(sch);
+ if (rc)
+ return rc;
+ sch->driver = &io_subchannel_driver;
+ io_subchannel_recog(cdev, sch);
+ /* Now wait for the async. recognition to come to an end. */
+ spin_lock_irq(cdev->ccwlock);
+ while (!dev_fsm_final_state(cdev))
+ ccw_device_wait_idle(cdev);
+
+ /* Hold on to an extra reference while device is online. */
+ get_device(&cdev->dev);
+ rc = ccw_device_online(cdev);
+ if (rc)
+ goto out_unlock;
+
+ while (!dev_fsm_final_state(cdev))
+ ccw_device_wait_idle(cdev);
+
+ if (cdev->private->state == DEV_STATE_ONLINE)
+ cdev->online = 1;
+ else
+ rc = -EIO;
+out_unlock:
+ spin_unlock_irq(cdev->ccwlock);
+ if (rc) /* Give up online reference since onlining failed. */
+ put_device(&cdev->dev);
+ return rc;
+}
+
+struct ccw_device * __init ccw_device_create_console(struct ccw_driver *drv)
+{
+ struct io_subchannel_private *io_priv;
+ struct ccw_device *cdev;
+ struct subchannel *sch;
+
+ sch = cio_probe_console();
+ if (IS_ERR(sch))
+ return ERR_CAST(sch);
+
+ io_priv = kzalloc(sizeof(*io_priv), GFP_KERNEL | GFP_DMA);
+ if (!io_priv)
+ goto err_priv;
+ io_priv->dma_area = dma_alloc_coherent(&sch->dev,
+ sizeof(*io_priv->dma_area),
+ &io_priv->dma_area_dma, GFP_KERNEL);
+ if (!io_priv->dma_area)
+ goto err_dma_area;
+ set_io_private(sch, io_priv);
+ cdev = io_subchannel_create_ccwdev(sch);
+ if (IS_ERR(cdev)) {
+ dma_free_coherent(&sch->dev, sizeof(*io_priv->dma_area),
+ io_priv->dma_area, io_priv->dma_area_dma);
+ set_io_private(sch, NULL);
+ put_device(&sch->dev);
+ kfree(io_priv);
+ return cdev;
+ }
+ cdev->drv = drv;
+ ccw_device_set_int_class(cdev);
+ return cdev;
+
+err_dma_area:
+ kfree(io_priv);
+err_priv:
+ put_device(&sch->dev);
+ return ERR_PTR(-ENOMEM);
+}
+
+void __init ccw_device_destroy_console(struct ccw_device *cdev)
+{
+ struct subchannel *sch = to_subchannel(cdev->dev.parent);
+ struct io_subchannel_private *io_priv = to_io_private(sch);
+
+ set_io_private(sch, NULL);
+ dma_free_coherent(&sch->dev, sizeof(*io_priv->dma_area),
+ io_priv->dma_area, io_priv->dma_area_dma);
+ put_device(&sch->dev);
+ put_device(&cdev->dev);
+ kfree(io_priv);
+}
+
+/**
+ * ccw_device_wait_idle() - busy wait for device to become idle
+ * @cdev: ccw device
+ *
+ * Poll until activity control is zero, that is, no function or data
+ * transfer is pending/active.
+ * Called with device lock being held.
+ */
+void ccw_device_wait_idle(struct ccw_device *cdev)
+{
+ struct subchannel *sch = to_subchannel(cdev->dev.parent);
+
+ while (1) {
+ cio_tsch(sch);
+ if (sch->schib.scsw.cmd.actl == 0)
+ break;
+ udelay_simple(100);
+ }
+}
+
+static int ccw_device_pm_restore(struct device *dev);
+
+int ccw_device_force_console(struct ccw_device *cdev)
+{
+ return ccw_device_pm_restore(&cdev->dev);
+}
+EXPORT_SYMBOL_GPL(ccw_device_force_console);
+#endif
+
+/**
+ * get_ccwdev_by_busid() - obtain device from a bus id
+ * @cdrv: driver the device is owned by
+ * @bus_id: bus id of the device to be searched
+ *
+ * This function searches all devices owned by @cdrv for a device with a bus
+ * id matching @bus_id.
+ * Returns:
+ * If a match is found, its reference count of the found device is increased
+ * and it is returned; else %NULL is returned.
+ */
+struct ccw_device *get_ccwdev_by_busid(struct ccw_driver *cdrv,
+ const char *bus_id)
+{
+ struct device *dev;
+
+ dev = driver_find_device_by_name(&cdrv->driver, bus_id);
+
+ return dev ? to_ccwdev(dev) : NULL;
+}
+
+/************************** device driver handling ************************/
+
+/* This is the implementation of the ccw_driver class. The probe, remove
+ * and release methods are initially very similar to the device_driver
+ * implementations, with the difference that they have ccw_device
+ * arguments.
+ *
+ * A ccw driver also contains the information that is needed for
+ * device matching.
+ */
+static int
+ccw_device_probe (struct device *dev)
+{
+ struct ccw_device *cdev = to_ccwdev(dev);
+ struct ccw_driver *cdrv = to_ccwdrv(dev->driver);
+ int ret;
+
+ cdev->drv = cdrv; /* to let the driver call _set_online */
+ ccw_device_set_int_class(cdev);
+ ret = cdrv->probe ? cdrv->probe(cdev) : -ENODEV;
+ if (ret) {
+ cdev->drv = NULL;
+ cdev->private->int_class = IRQIO_CIO;
+ return ret;
+ }
+
+ return 0;
+}
+
+static int ccw_device_remove(struct device *dev)
+{
+ struct ccw_device *cdev = to_ccwdev(dev);
+ struct ccw_driver *cdrv = cdev->drv;
+ struct subchannel *sch;
+ int ret;
+
+ if (cdrv->remove)
+ cdrv->remove(cdev);
+
+ spin_lock_irq(cdev->ccwlock);
+ if (cdev->online) {
+ cdev->online = 0;
+ ret = ccw_device_offline(cdev);
+ spin_unlock_irq(cdev->ccwlock);
+ if (ret == 0)
+ wait_event(cdev->private->wait_q,
+ dev_fsm_final_state(cdev));
+ else
+ CIO_MSG_EVENT(0, "ccw_device_offline returned %d, "
+ "device 0.%x.%04x\n",
+ ret, cdev->private->dev_id.ssid,
+ cdev->private->dev_id.devno);
+ /* Give up reference obtained in ccw_device_set_online(). */
+ put_device(&cdev->dev);
+ spin_lock_irq(cdev->ccwlock);
+ }
+ ccw_device_set_timeout(cdev, 0);
+ cdev->drv = NULL;
+ cdev->private->int_class = IRQIO_CIO;
+ sch = to_subchannel(cdev->dev.parent);
+ spin_unlock_irq(cdev->ccwlock);
+ io_subchannel_quiesce(sch);
+ __disable_cmf(cdev);
+
+ return 0;
+}
+
+static void ccw_device_shutdown(struct device *dev)
+{
+ struct ccw_device *cdev;
+
+ cdev = to_ccwdev(dev);
+ if (cdev->drv && cdev->drv->shutdown)
+ cdev->drv->shutdown(cdev);
+ __disable_cmf(cdev);
+}
+
+static int ccw_device_pm_prepare(struct device *dev)
+{
+ struct ccw_device *cdev = to_ccwdev(dev);
+
+ if (work_pending(&cdev->private->todo_work))
+ return -EAGAIN;
+ /* Fail while device is being set online/offline. */
+ if (atomic_read(&cdev->private->onoff))
+ return -EAGAIN;
+
+ if (cdev->online && cdev->drv && cdev->drv->prepare)
+ return cdev->drv->prepare(cdev);
+
+ return 0;
+}
+
+static void ccw_device_pm_complete(struct device *dev)
+{
+ struct ccw_device *cdev = to_ccwdev(dev);
+
+ if (cdev->online && cdev->drv && cdev->drv->complete)
+ cdev->drv->complete(cdev);
+}
+
+static int ccw_device_pm_freeze(struct device *dev)
+{
+ struct ccw_device *cdev = to_ccwdev(dev);
+ struct subchannel *sch = to_subchannel(cdev->dev.parent);
+ int ret, cm_enabled;
+
+ /* Fail suspend while device is in transistional state. */
+ if (!dev_fsm_final_state(cdev))
+ return -EAGAIN;
+ if (!cdev->online)
+ return 0;
+ if (cdev->drv && cdev->drv->freeze) {
+ ret = cdev->drv->freeze(cdev);
+ if (ret)
+ return ret;
+ }
+
+ spin_lock_irq(sch->lock);
+ cm_enabled = cdev->private->cmb != NULL;
+ spin_unlock_irq(sch->lock);
+ if (cm_enabled) {
+ /* Don't have the css write on memory. */
+ ret = ccw_set_cmf(cdev, 0);
+ if (ret)
+ return ret;
+ }
+ /* From here on, disallow device driver I/O. */
+ spin_lock_irq(sch->lock);
+ ret = cio_disable_subchannel(sch);
+ spin_unlock_irq(sch->lock);
+
+ return ret;
+}
+
+static int ccw_device_pm_thaw(struct device *dev)
+{
+ struct ccw_device *cdev = to_ccwdev(dev);
+ struct subchannel *sch = to_subchannel(cdev->dev.parent);
+ int ret, cm_enabled;
+
+ if (!cdev->online)
+ return 0;
+
+ spin_lock_irq(sch->lock);
+ /* Allow device driver I/O again. */
+ ret = cio_enable_subchannel(sch, (u32)(addr_t)sch);
+ cm_enabled = cdev->private->cmb != NULL;
+ spin_unlock_irq(sch->lock);
+ if (ret)
+ return ret;
+
+ if (cm_enabled) {
+ ret = ccw_set_cmf(cdev, 1);
+ if (ret)
+ return ret;
+ }
+
+ if (cdev->drv && cdev->drv->thaw)
+ ret = cdev->drv->thaw(cdev);
+
+ return ret;
+}
+
+static void __ccw_device_pm_restore(struct ccw_device *cdev)
+{
+ struct subchannel *sch = to_subchannel(cdev->dev.parent);
+
+ spin_lock_irq(sch->lock);
+ if (cio_is_console(sch->schid)) {
+ cio_enable_subchannel(sch, (u32)(addr_t)sch);
+ goto out_unlock;
+ }
+ /*
+ * While we were sleeping, devices may have gone or become
+ * available again. Kick re-detection.
+ */
+ cdev->private->flags.resuming = 1;
+ cdev->private->path_new_mask = LPM_ANYPATH;
+ css_sched_sch_todo(sch, SCH_TODO_EVAL);
+ spin_unlock_irq(sch->lock);
+ css_wait_for_slow_path();
+
+ /* cdev may have been moved to a different subchannel. */
+ sch = to_subchannel(cdev->dev.parent);
+ spin_lock_irq(sch->lock);
+ if (cdev->private->state != DEV_STATE_ONLINE &&
+ cdev->private->state != DEV_STATE_OFFLINE)
+ goto out_unlock;
+
+ ccw_device_recognition(cdev);
+ spin_unlock_irq(sch->lock);
+ wait_event(cdev->private->wait_q, dev_fsm_final_state(cdev) ||
+ cdev->private->state == DEV_STATE_DISCONNECTED);
+ spin_lock_irq(sch->lock);
+
+out_unlock:
+ cdev->private->flags.resuming = 0;
+ spin_unlock_irq(sch->lock);
+}
+
+static int resume_handle_boxed(struct ccw_device *cdev)
+{
+ cdev->private->state = DEV_STATE_BOXED;
+ if (ccw_device_notify(cdev, CIO_BOXED) == NOTIFY_OK)
+ return 0;
+ ccw_device_sched_todo(cdev, CDEV_TODO_UNREG);
+ return -ENODEV;
+}
+
+static int resume_handle_disc(struct ccw_device *cdev)
+{
+ cdev->private->state = DEV_STATE_DISCONNECTED;
+ if (ccw_device_notify(cdev, CIO_GONE) == NOTIFY_OK)
+ return 0;
+ ccw_device_sched_todo(cdev, CDEV_TODO_UNREG);
+ return -ENODEV;
+}
+
+static int ccw_device_pm_restore(struct device *dev)
+{
+ struct ccw_device *cdev = to_ccwdev(dev);
+ struct subchannel *sch;
+ int ret = 0;
+
+ __ccw_device_pm_restore(cdev);
+ sch = to_subchannel(cdev->dev.parent);
+ spin_lock_irq(sch->lock);
+ if (cio_is_console(sch->schid))
+ goto out_restore;
+
+ /* check recognition results */
+ switch (cdev->private->state) {
+ case DEV_STATE_OFFLINE:
+ case DEV_STATE_ONLINE:
+ cdev->private->flags.donotify = 0;
+ break;
+ case DEV_STATE_BOXED:
+ ret = resume_handle_boxed(cdev);
+ if (ret)
+ goto out_unlock;
+ goto out_restore;
+ default:
+ ret = resume_handle_disc(cdev);
+ if (ret)
+ goto out_unlock;
+ goto out_restore;
+ }
+ /* check if the device type has changed */
+ if (!ccw_device_test_sense_data(cdev)) {
+ ccw_device_update_sense_data(cdev);
+ ccw_device_sched_todo(cdev, CDEV_TODO_REBIND);
+ ret = -ENODEV;
+ goto out_unlock;
+ }
+ if (!cdev->online)
+ goto out_unlock;
+
+ if (ccw_device_online(cdev)) {
+ ret = resume_handle_disc(cdev);
+ if (ret)
+ goto out_unlock;
+ goto out_restore;
+ }
+ spin_unlock_irq(sch->lock);
+ wait_event(cdev->private->wait_q, dev_fsm_final_state(cdev));
+ spin_lock_irq(sch->lock);
+
+ if (ccw_device_notify(cdev, CIO_OPER) == NOTIFY_BAD) {
+ ccw_device_sched_todo(cdev, CDEV_TODO_UNREG);
+ ret = -ENODEV;
+ goto out_unlock;
+ }
+
+ /* reenable cmf, if needed */
+ if (cdev->private->cmb) {
+ spin_unlock_irq(sch->lock);
+ ret = ccw_set_cmf(cdev, 1);
+ spin_lock_irq(sch->lock);
+ if (ret) {
+ CIO_MSG_EVENT(2, "resume: cdev 0.%x.%04x: cmf failed "
+ "(rc=%d)\n", cdev->private->dev_id.ssid,
+ cdev->private->dev_id.devno, ret);
+ ret = 0;
+ }
+ }
+
+out_restore:
+ spin_unlock_irq(sch->lock);
+ if (cdev->online && cdev->drv && cdev->drv->restore)
+ ret = cdev->drv->restore(cdev);
+ return ret;
+
+out_unlock:
+ spin_unlock_irq(sch->lock);
+ return ret;
+}
+
+static const struct dev_pm_ops ccw_pm_ops = {
+ .prepare = ccw_device_pm_prepare,
+ .complete = ccw_device_pm_complete,
+ .freeze = ccw_device_pm_freeze,
+ .thaw = ccw_device_pm_thaw,
+ .restore = ccw_device_pm_restore,
+};
+
+static struct bus_type ccw_bus_type = {
+ .name = "ccw",
+ .match = ccw_bus_match,
+ .uevent = ccw_uevent,
+ .probe = ccw_device_probe,
+ .remove = ccw_device_remove,
+ .shutdown = ccw_device_shutdown,
+ .pm = &ccw_pm_ops,
+};
+
+/**
+ * ccw_driver_register() - register a ccw driver
+ * @cdriver: driver to be registered
+ *
+ * This function is mainly a wrapper around driver_register().
+ * Returns:
+ * %0 on success and a negative error value on failure.
+ */
+int ccw_driver_register(struct ccw_driver *cdriver)
+{
+ struct device_driver *drv = &cdriver->driver;
+
+ drv->bus = &ccw_bus_type;
+
+ return driver_register(drv);
+}
+
+/**
+ * ccw_driver_unregister() - deregister a ccw driver
+ * @cdriver: driver to be deregistered
+ *
+ * This function is mainly a wrapper around driver_unregister().
+ */
+void ccw_driver_unregister(struct ccw_driver *cdriver)
+{
+ driver_unregister(&cdriver->driver);
+}
+
+static void ccw_device_todo(struct work_struct *work)
+{
+ struct ccw_device_private *priv;
+ struct ccw_device *cdev;
+ struct subchannel *sch;
+ enum cdev_todo todo;
+
+ priv = container_of(work, struct ccw_device_private, todo_work);
+ cdev = priv->cdev;
+ sch = to_subchannel(cdev->dev.parent);
+ /* Find out todo. */
+ spin_lock_irq(cdev->ccwlock);
+ todo = priv->todo;
+ priv->todo = CDEV_TODO_NOTHING;
+ CIO_MSG_EVENT(4, "cdev_todo: cdev=0.%x.%04x todo=%d\n",
+ priv->dev_id.ssid, priv->dev_id.devno, todo);
+ spin_unlock_irq(cdev->ccwlock);
+ /* Perform todo. */
+ switch (todo) {
+ case CDEV_TODO_ENABLE_CMF:
+ cmf_reenable(cdev);
+ break;
+ case CDEV_TODO_REBIND:
+ ccw_device_do_unbind_bind(cdev);
+ break;
+ case CDEV_TODO_REGISTER:
+ io_subchannel_register(cdev);
+ break;
+ case CDEV_TODO_UNREG_EVAL:
+ if (!sch_is_pseudo_sch(sch))
+ css_schedule_eval(sch->schid);
+ fallthrough;
+ case CDEV_TODO_UNREG:
+ if (sch_is_pseudo_sch(sch))
+ ccw_device_unregister(cdev);
+ else
+ ccw_device_call_sch_unregister(cdev);
+ break;
+ default:
+ break;
+ }
+ /* Release workqueue ref. */
+ put_device(&cdev->dev);
+}
+
+/**
+ * ccw_device_sched_todo - schedule ccw device operation
+ * @cdev: ccw device
+ * @todo: todo
+ *
+ * Schedule the operation identified by @todo to be performed on the slow path
+ * workqueue. Do nothing if another operation with higher priority is already
+ * scheduled. Needs to be called with ccwdev lock held.
+ */
+void ccw_device_sched_todo(struct ccw_device *cdev, enum cdev_todo todo)
+{
+ CIO_MSG_EVENT(4, "cdev_todo: sched cdev=0.%x.%04x todo=%d\n",
+ cdev->private->dev_id.ssid, cdev->private->dev_id.devno,
+ todo);
+ if (cdev->private->todo >= todo)
+ return;
+ cdev->private->todo = todo;
+ /* Get workqueue ref. */
+ if (!get_device(&cdev->dev))
+ return;
+ if (!queue_work(cio_work_q, &cdev->private->todo_work)) {
+ /* Already queued, release workqueue ref. */
+ put_device(&cdev->dev);
+ }
+}
+
+/**
+ * ccw_device_siosl() - initiate logging
+ * @cdev: ccw device
+ *
+ * This function is used to invoke model-dependent logging within the channel
+ * subsystem.
+ */
+int ccw_device_siosl(struct ccw_device *cdev)
+{
+ struct subchannel *sch = to_subchannel(cdev->dev.parent);
+
+ return chsc_siosl(sch->schid);
+}
+EXPORT_SYMBOL_GPL(ccw_device_siosl);
+
+EXPORT_SYMBOL(ccw_device_set_online);
+EXPORT_SYMBOL(ccw_device_set_offline);
+EXPORT_SYMBOL(ccw_driver_register);
+EXPORT_SYMBOL(ccw_driver_unregister);
+EXPORT_SYMBOL(get_ccwdev_by_busid);
diff --git a/drivers/s390/cio/device.h b/drivers/s390/cio/device.h
new file mode 100644
index 000000000..853b6a8ca
--- /dev/null
+++ b/drivers/s390/cio/device.h
@@ -0,0 +1,148 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef S390_DEVICE_H
+#define S390_DEVICE_H
+
+#include <asm/ccwdev.h>
+#include <linux/atomic.h>
+#include <linux/timer.h>
+#include <linux/wait.h>
+#include <linux/notifier.h>
+#include <linux/kernel_stat.h>
+#include "io_sch.h"
+
+/*
+ * states of the device statemachine
+ */
+enum dev_state {
+ DEV_STATE_NOT_OPER,
+ DEV_STATE_SENSE_ID,
+ DEV_STATE_OFFLINE,
+ DEV_STATE_VERIFY,
+ DEV_STATE_ONLINE,
+ DEV_STATE_W4SENSE,
+ DEV_STATE_DISBAND_PGID,
+ DEV_STATE_BOXED,
+ /* states to wait for i/o completion before doing something */
+ DEV_STATE_TIMEOUT_KILL,
+ DEV_STATE_QUIESCE,
+ /* special states for devices gone not operational */
+ DEV_STATE_DISCONNECTED,
+ DEV_STATE_DISCONNECTED_SENSE_ID,
+ DEV_STATE_CMFCHANGE,
+ DEV_STATE_CMFUPDATE,
+ DEV_STATE_STEAL_LOCK,
+ /* last element! */
+ NR_DEV_STATES
+};
+
+/*
+ * asynchronous events of the device statemachine
+ */
+enum dev_event {
+ DEV_EVENT_NOTOPER,
+ DEV_EVENT_INTERRUPT,
+ DEV_EVENT_TIMEOUT,
+ DEV_EVENT_VERIFY,
+ /* last element! */
+ NR_DEV_EVENTS
+};
+
+struct ccw_device;
+
+/*
+ * action called through jumptable
+ */
+typedef void (fsm_func_t)(struct ccw_device *, enum dev_event);
+extern fsm_func_t *dev_jumptable[NR_DEV_STATES][NR_DEV_EVENTS];
+
+static inline void
+dev_fsm_event(struct ccw_device *cdev, enum dev_event dev_event)
+{
+ int state = cdev->private->state;
+
+ if (dev_event == DEV_EVENT_INTERRUPT) {
+ if (state == DEV_STATE_ONLINE)
+ inc_irq_stat(cdev->private->int_class);
+ else if (state != DEV_STATE_CMFCHANGE &&
+ state != DEV_STATE_CMFUPDATE)
+ inc_irq_stat(IRQIO_CIO);
+ }
+ dev_jumptable[state][dev_event](cdev, dev_event);
+}
+
+/*
+ * Delivers 1 if the device state is final.
+ */
+static inline int
+dev_fsm_final_state(struct ccw_device *cdev)
+{
+ return (cdev->private->state == DEV_STATE_NOT_OPER ||
+ cdev->private->state == DEV_STATE_OFFLINE ||
+ cdev->private->state == DEV_STATE_ONLINE ||
+ cdev->private->state == DEV_STATE_BOXED);
+}
+
+int __init io_subchannel_init(void);
+
+void io_subchannel_recog_done(struct ccw_device *cdev);
+void io_subchannel_init_config(struct subchannel *sch);
+
+int ccw_device_cancel_halt_clear(struct ccw_device *);
+
+int ccw_device_is_orphan(struct ccw_device *);
+
+void ccw_device_recognition(struct ccw_device *);
+int ccw_device_online(struct ccw_device *);
+int ccw_device_offline(struct ccw_device *);
+void ccw_device_update_sense_data(struct ccw_device *);
+int ccw_device_test_sense_data(struct ccw_device *);
+int ccw_purge_blacklisted(void);
+void ccw_device_sched_todo(struct ccw_device *cdev, enum cdev_todo todo);
+struct ccw_device *get_ccwdev_by_dev_id(struct ccw_dev_id *dev_id);
+
+/* Function prototypes for device status and basic sense stuff. */
+void ccw_device_accumulate_irb(struct ccw_device *, struct irb *);
+void ccw_device_accumulate_basic_sense(struct ccw_device *, struct irb *);
+int ccw_device_accumulate_and_sense(struct ccw_device *, struct irb *);
+int ccw_device_do_sense(struct ccw_device *, struct irb *);
+
+/* Function prototype for internal request handling. */
+int lpm_adjust(int lpm, int mask);
+void ccw_request_start(struct ccw_device *);
+int ccw_request_cancel(struct ccw_device *cdev);
+void ccw_request_handler(struct ccw_device *cdev);
+void ccw_request_timeout(struct ccw_device *cdev);
+void ccw_request_notoper(struct ccw_device *cdev);
+
+/* Function prototypes for sense id stuff. */
+void ccw_device_sense_id_start(struct ccw_device *);
+void ccw_device_sense_id_done(struct ccw_device *, int);
+
+/* Function prototypes for path grouping stuff. */
+void ccw_device_verify_start(struct ccw_device *);
+void ccw_device_verify_done(struct ccw_device *, int);
+
+void ccw_device_disband_start(struct ccw_device *);
+void ccw_device_disband_done(struct ccw_device *, int);
+
+int ccw_device_stlck(struct ccw_device *);
+
+/* Helper function for machine check handling. */
+void ccw_device_trigger_reprobe(struct ccw_device *);
+void ccw_device_kill_io(struct ccw_device *);
+int ccw_device_notify(struct ccw_device *, int);
+void ccw_device_set_disconnected(struct ccw_device *cdev);
+void ccw_device_set_notoper(struct ccw_device *cdev);
+
+void ccw_device_timeout(struct timer_list *t);
+void ccw_device_set_timeout(struct ccw_device *, int);
+void ccw_device_schedule_recovery(void);
+
+/* Channel measurement facility related */
+void retry_set_schib(struct ccw_device *cdev);
+void cmf_retry_copy_block(struct ccw_device *);
+int cmf_reenable(struct ccw_device *);
+void cmf_reactivate(void);
+int ccw_set_cmf(struct ccw_device *cdev, int enable);
+extern struct device_attribute dev_attr_cmb_enable;
+#endif
diff --git a/drivers/s390/cio/device_fsm.c b/drivers/s390/cio/device_fsm.c
new file mode 100644
index 000000000..8fc267324
--- /dev/null
+++ b/drivers/s390/cio/device_fsm.c
@@ -0,0 +1,1134 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * finite state machine for device handling
+ *
+ * Copyright IBM Corp. 2002, 2008
+ * Author(s): Cornelia Huck (cornelia.huck@de.ibm.com)
+ * Martin Schwidefsky (schwidefsky@de.ibm.com)
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/jiffies.h>
+#include <linux/string.h>
+
+#include <asm/ccwdev.h>
+#include <asm/cio.h>
+#include <asm/chpid.h>
+
+#include "cio.h"
+#include "cio_debug.h"
+#include "css.h"
+#include "device.h"
+#include "chsc.h"
+#include "ioasm.h"
+#include "chp.h"
+
+static int timeout_log_enabled;
+
+static int __init ccw_timeout_log_setup(char *unused)
+{
+ timeout_log_enabled = 1;
+ return 1;
+}
+
+__setup("ccw_timeout_log", ccw_timeout_log_setup);
+
+static void ccw_timeout_log(struct ccw_device *cdev)
+{
+ struct schib schib;
+ struct subchannel *sch;
+ struct io_subchannel_private *private;
+ union orb *orb;
+ int cc;
+
+ sch = to_subchannel(cdev->dev.parent);
+ private = to_io_private(sch);
+ orb = &private->orb;
+ cc = stsch(sch->schid, &schib);
+
+ printk(KERN_WARNING "cio: ccw device timeout occurred at %llx, "
+ "device information:\n", get_tod_clock());
+ printk(KERN_WARNING "cio: orb:\n");
+ print_hex_dump(KERN_WARNING, "cio: ", DUMP_PREFIX_NONE, 16, 1,
+ orb, sizeof(*orb), 0);
+ printk(KERN_WARNING "cio: ccw device bus id: %s\n",
+ dev_name(&cdev->dev));
+ printk(KERN_WARNING "cio: subchannel bus id: %s\n",
+ dev_name(&sch->dev));
+ printk(KERN_WARNING "cio: subchannel lpm: %02x, opm: %02x, "
+ "vpm: %02x\n", sch->lpm, sch->opm, sch->vpm);
+
+ if (orb->tm.b) {
+ printk(KERN_WARNING "cio: orb indicates transport mode\n");
+ printk(KERN_WARNING "cio: last tcw:\n");
+ print_hex_dump(KERN_WARNING, "cio: ", DUMP_PREFIX_NONE, 16, 1,
+ (void *)(addr_t)orb->tm.tcw,
+ sizeof(struct tcw), 0);
+ } else {
+ printk(KERN_WARNING "cio: orb indicates command mode\n");
+ if ((void *)(addr_t)orb->cmd.cpa ==
+ &private->dma_area->sense_ccw ||
+ (void *)(addr_t)orb->cmd.cpa ==
+ cdev->private->dma_area->iccws)
+ printk(KERN_WARNING "cio: last channel program "
+ "(intern):\n");
+ else
+ printk(KERN_WARNING "cio: last channel program:\n");
+
+ print_hex_dump(KERN_WARNING, "cio: ", DUMP_PREFIX_NONE, 16, 1,
+ (void *)(addr_t)orb->cmd.cpa,
+ sizeof(struct ccw1), 0);
+ }
+ printk(KERN_WARNING "cio: ccw device state: %d\n",
+ cdev->private->state);
+ printk(KERN_WARNING "cio: store subchannel returned: cc=%d\n", cc);
+ printk(KERN_WARNING "cio: schib:\n");
+ print_hex_dump(KERN_WARNING, "cio: ", DUMP_PREFIX_NONE, 16, 1,
+ &schib, sizeof(schib), 0);
+ printk(KERN_WARNING "cio: ccw device flags:\n");
+ print_hex_dump(KERN_WARNING, "cio: ", DUMP_PREFIX_NONE, 16, 1,
+ &cdev->private->flags, sizeof(cdev->private->flags), 0);
+}
+
+/*
+ * Timeout function. It just triggers a DEV_EVENT_TIMEOUT.
+ */
+void
+ccw_device_timeout(struct timer_list *t)
+{
+ struct ccw_device_private *priv = from_timer(priv, t, timer);
+ struct ccw_device *cdev = priv->cdev;
+
+ spin_lock_irq(cdev->ccwlock);
+ if (timeout_log_enabled)
+ ccw_timeout_log(cdev);
+ dev_fsm_event(cdev, DEV_EVENT_TIMEOUT);
+ spin_unlock_irq(cdev->ccwlock);
+}
+
+/*
+ * Set timeout
+ */
+void
+ccw_device_set_timeout(struct ccw_device *cdev, int expires)
+{
+ if (expires == 0) {
+ del_timer(&cdev->private->timer);
+ return;
+ }
+ if (timer_pending(&cdev->private->timer)) {
+ if (mod_timer(&cdev->private->timer, jiffies + expires))
+ return;
+ }
+ cdev->private->timer.expires = jiffies + expires;
+ add_timer(&cdev->private->timer);
+}
+
+int
+ccw_device_cancel_halt_clear(struct ccw_device *cdev)
+{
+ struct subchannel *sch;
+ int ret;
+
+ sch = to_subchannel(cdev->dev.parent);
+ ret = cio_cancel_halt_clear(sch, &cdev->private->iretry);
+
+ if (ret == -EIO)
+ CIO_MSG_EVENT(0, "0.%x.%04x: could not stop I/O\n",
+ cdev->private->dev_id.ssid,
+ cdev->private->dev_id.devno);
+
+ return ret;
+}
+
+void ccw_device_update_sense_data(struct ccw_device *cdev)
+{
+ memset(&cdev->id, 0, sizeof(cdev->id));
+ cdev->id.cu_type = cdev->private->dma_area->senseid.cu_type;
+ cdev->id.cu_model = cdev->private->dma_area->senseid.cu_model;
+ cdev->id.dev_type = cdev->private->dma_area->senseid.dev_type;
+ cdev->id.dev_model = cdev->private->dma_area->senseid.dev_model;
+}
+
+int ccw_device_test_sense_data(struct ccw_device *cdev)
+{
+ return cdev->id.cu_type ==
+ cdev->private->dma_area->senseid.cu_type &&
+ cdev->id.cu_model ==
+ cdev->private->dma_area->senseid.cu_model &&
+ cdev->id.dev_type ==
+ cdev->private->dma_area->senseid.dev_type &&
+ cdev->id.dev_model ==
+ cdev->private->dma_area->senseid.dev_model;
+}
+
+/*
+ * The machine won't give us any notification by machine check if a chpid has
+ * been varied online on the SE so we have to find out by magic (i. e. driving
+ * the channel subsystem to device selection and updating our path masks).
+ */
+static void
+__recover_lost_chpids(struct subchannel *sch, int old_lpm)
+{
+ int mask, i;
+ struct chp_id chpid;
+
+ chp_id_init(&chpid);
+ for (i = 0; i<8; i++) {
+ mask = 0x80 >> i;
+ if (!(sch->lpm & mask))
+ continue;
+ if (old_lpm & mask)
+ continue;
+ chpid.id = sch->schib.pmcw.chpid[i];
+ if (!chp_is_registered(chpid))
+ css_schedule_eval_all();
+ }
+}
+
+/*
+ * Stop device recognition.
+ */
+static void
+ccw_device_recog_done(struct ccw_device *cdev, int state)
+{
+ struct subchannel *sch;
+ int old_lpm;
+
+ sch = to_subchannel(cdev->dev.parent);
+
+ if (cio_disable_subchannel(sch))
+ state = DEV_STATE_NOT_OPER;
+ /*
+ * Now that we tried recognition, we have performed device selection
+ * through ssch() and the path information is up to date.
+ */
+ old_lpm = sch->lpm;
+
+ /* Check since device may again have become not operational. */
+ if (cio_update_schib(sch))
+ state = DEV_STATE_NOT_OPER;
+ else
+ sch->lpm = sch->schib.pmcw.pam & sch->opm;
+
+ if (cdev->private->state == DEV_STATE_DISCONNECTED_SENSE_ID)
+ /* Force reprobe on all chpids. */
+ old_lpm = 0;
+ if (sch->lpm != old_lpm)
+ __recover_lost_chpids(sch, old_lpm);
+ if (cdev->private->state == DEV_STATE_DISCONNECTED_SENSE_ID &&
+ (state == DEV_STATE_NOT_OPER || state == DEV_STATE_BOXED)) {
+ cdev->private->flags.recog_done = 1;
+ cdev->private->state = DEV_STATE_DISCONNECTED;
+ wake_up(&cdev->private->wait_q);
+ return;
+ }
+ if (cdev->private->flags.resuming) {
+ cdev->private->state = state;
+ cdev->private->flags.recog_done = 1;
+ wake_up(&cdev->private->wait_q);
+ return;
+ }
+ switch (state) {
+ case DEV_STATE_NOT_OPER:
+ break;
+ case DEV_STATE_OFFLINE:
+ if (!cdev->online) {
+ ccw_device_update_sense_data(cdev);
+ break;
+ }
+ cdev->private->state = DEV_STATE_OFFLINE;
+ cdev->private->flags.recog_done = 1;
+ if (ccw_device_test_sense_data(cdev)) {
+ cdev->private->flags.donotify = 1;
+ ccw_device_online(cdev);
+ wake_up(&cdev->private->wait_q);
+ } else {
+ ccw_device_update_sense_data(cdev);
+ ccw_device_sched_todo(cdev, CDEV_TODO_REBIND);
+ }
+ return;
+ case DEV_STATE_BOXED:
+ if (cdev->id.cu_type != 0) { /* device was recognized before */
+ cdev->private->flags.recog_done = 1;
+ cdev->private->state = DEV_STATE_BOXED;
+ wake_up(&cdev->private->wait_q);
+ return;
+ }
+ break;
+ }
+ cdev->private->state = state;
+ io_subchannel_recog_done(cdev);
+ wake_up(&cdev->private->wait_q);
+}
+
+/*
+ * Function called from device_id.c after sense id has completed.
+ */
+void
+ccw_device_sense_id_done(struct ccw_device *cdev, int err)
+{
+ switch (err) {
+ case 0:
+ ccw_device_recog_done(cdev, DEV_STATE_OFFLINE);
+ break;
+ case -ETIME: /* Sense id stopped by timeout. */
+ ccw_device_recog_done(cdev, DEV_STATE_BOXED);
+ break;
+ default:
+ ccw_device_recog_done(cdev, DEV_STATE_NOT_OPER);
+ break;
+ }
+}
+
+/**
+ * ccw_device_notify() - inform the device's driver about an event
+ * @cdev: device for which an event occurred
+ * @event: event that occurred
+ *
+ * Returns:
+ * -%EINVAL if the device is offline or has no driver.
+ * -%EOPNOTSUPP if the device's driver has no notifier registered.
+ * %NOTIFY_OK if the driver wants to keep the device.
+ * %NOTIFY_BAD if the driver doesn't want to keep the device.
+ */
+int ccw_device_notify(struct ccw_device *cdev, int event)
+{
+ int ret = -EINVAL;
+
+ if (!cdev->drv)
+ goto out;
+ if (!cdev->online)
+ goto out;
+ CIO_MSG_EVENT(2, "notify called for 0.%x.%04x, event=%d\n",
+ cdev->private->dev_id.ssid, cdev->private->dev_id.devno,
+ event);
+ if (!cdev->drv->notify) {
+ ret = -EOPNOTSUPP;
+ goto out;
+ }
+ if (cdev->drv->notify(cdev, event))
+ ret = NOTIFY_OK;
+ else
+ ret = NOTIFY_BAD;
+out:
+ return ret;
+}
+
+static void ccw_device_oper_notify(struct ccw_device *cdev)
+{
+ struct subchannel *sch = to_subchannel(cdev->dev.parent);
+
+ if (ccw_device_notify(cdev, CIO_OPER) == NOTIFY_OK) {
+ /* Reenable channel measurements, if needed. */
+ ccw_device_sched_todo(cdev, CDEV_TODO_ENABLE_CMF);
+ /* Save indication for new paths. */
+ cdev->private->path_new_mask = sch->vpm;
+ return;
+ }
+ /* Driver doesn't want device back. */
+ ccw_device_set_notoper(cdev);
+ ccw_device_sched_todo(cdev, CDEV_TODO_REBIND);
+}
+
+/*
+ * Finished with online/offline processing.
+ */
+static void
+ccw_device_done(struct ccw_device *cdev, int state)
+{
+ struct subchannel *sch;
+
+ sch = to_subchannel(cdev->dev.parent);
+
+ ccw_device_set_timeout(cdev, 0);
+
+ if (state != DEV_STATE_ONLINE)
+ cio_disable_subchannel(sch);
+
+ /* Reset device status. */
+ memset(&cdev->private->dma_area->irb, 0, sizeof(struct irb));
+
+ cdev->private->state = state;
+
+ switch (state) {
+ case DEV_STATE_BOXED:
+ CIO_MSG_EVENT(0, "Boxed device %04x on subchannel %04x\n",
+ cdev->private->dev_id.devno, sch->schid.sch_no);
+ if (cdev->online &&
+ ccw_device_notify(cdev, CIO_BOXED) != NOTIFY_OK)
+ ccw_device_sched_todo(cdev, CDEV_TODO_UNREG);
+ cdev->private->flags.donotify = 0;
+ break;
+ case DEV_STATE_NOT_OPER:
+ CIO_MSG_EVENT(0, "Device %04x gone on subchannel %04x\n",
+ cdev->private->dev_id.devno, sch->schid.sch_no);
+ if (ccw_device_notify(cdev, CIO_GONE) != NOTIFY_OK)
+ ccw_device_sched_todo(cdev, CDEV_TODO_UNREG);
+ else
+ ccw_device_set_disconnected(cdev);
+ cdev->private->flags.donotify = 0;
+ break;
+ case DEV_STATE_DISCONNECTED:
+ CIO_MSG_EVENT(0, "Disconnected device %04x on subchannel "
+ "%04x\n", cdev->private->dev_id.devno,
+ sch->schid.sch_no);
+ if (ccw_device_notify(cdev, CIO_NO_PATH) != NOTIFY_OK) {
+ cdev->private->state = DEV_STATE_NOT_OPER;
+ ccw_device_sched_todo(cdev, CDEV_TODO_UNREG);
+ } else
+ ccw_device_set_disconnected(cdev);
+ cdev->private->flags.donotify = 0;
+ break;
+ default:
+ break;
+ }
+
+ if (cdev->private->flags.donotify) {
+ cdev->private->flags.donotify = 0;
+ ccw_device_oper_notify(cdev);
+ }
+ wake_up(&cdev->private->wait_q);
+}
+
+/*
+ * Start device recognition.
+ */
+void ccw_device_recognition(struct ccw_device *cdev)
+{
+ struct subchannel *sch = to_subchannel(cdev->dev.parent);
+
+ /*
+ * We used to start here with a sense pgid to find out whether a device
+ * is locked by someone else. Unfortunately, the sense pgid command
+ * code has other meanings on devices predating the path grouping
+ * algorithm, so we start with sense id and box the device after an
+ * timeout (or if sense pgid during path verification detects the device
+ * is locked, as may happen on newer devices).
+ */
+ cdev->private->flags.recog_done = 0;
+ cdev->private->state = DEV_STATE_SENSE_ID;
+ if (cio_enable_subchannel(sch, (u32) (addr_t) sch)) {
+ ccw_device_recog_done(cdev, DEV_STATE_NOT_OPER);
+ return;
+ }
+ ccw_device_sense_id_start(cdev);
+}
+
+/*
+ * Handle events for states that use the ccw request infrastructure.
+ */
+static void ccw_device_request_event(struct ccw_device *cdev, enum dev_event e)
+{
+ switch (e) {
+ case DEV_EVENT_NOTOPER:
+ ccw_request_notoper(cdev);
+ break;
+ case DEV_EVENT_INTERRUPT:
+ ccw_request_handler(cdev);
+ break;
+ case DEV_EVENT_TIMEOUT:
+ ccw_request_timeout(cdev);
+ break;
+ default:
+ break;
+ }
+}
+
+static void ccw_device_report_path_events(struct ccw_device *cdev)
+{
+ struct subchannel *sch = to_subchannel(cdev->dev.parent);
+ int path_event[8];
+ int chp, mask;
+
+ for (chp = 0, mask = 0x80; chp < 8; chp++, mask >>= 1) {
+ path_event[chp] = PE_NONE;
+ if (mask & cdev->private->path_gone_mask & ~(sch->vpm))
+ path_event[chp] |= PE_PATH_GONE;
+ if (mask & cdev->private->path_new_mask & sch->vpm)
+ path_event[chp] |= PE_PATH_AVAILABLE;
+ if (mask & cdev->private->pgid_reset_mask & sch->vpm)
+ path_event[chp] |= PE_PATHGROUP_ESTABLISHED;
+ }
+ if (cdev->online && cdev->drv->path_event)
+ cdev->drv->path_event(cdev, path_event);
+}
+
+static void ccw_device_reset_path_events(struct ccw_device *cdev)
+{
+ cdev->private->path_gone_mask = 0;
+ cdev->private->path_new_mask = 0;
+ cdev->private->pgid_reset_mask = 0;
+}
+
+static void create_fake_irb(struct irb *irb, int type)
+{
+ memset(irb, 0, sizeof(*irb));
+ if (type == FAKE_CMD_IRB) {
+ struct cmd_scsw *scsw = &irb->scsw.cmd;
+ scsw->cc = 1;
+ scsw->fctl = SCSW_FCTL_START_FUNC;
+ scsw->actl = SCSW_ACTL_START_PEND;
+ scsw->stctl = SCSW_STCTL_STATUS_PEND;
+ } else if (type == FAKE_TM_IRB) {
+ struct tm_scsw *scsw = &irb->scsw.tm;
+ scsw->x = 1;
+ scsw->cc = 1;
+ scsw->fctl = SCSW_FCTL_START_FUNC;
+ scsw->actl = SCSW_ACTL_START_PEND;
+ scsw->stctl = SCSW_STCTL_STATUS_PEND;
+ }
+}
+
+static void ccw_device_handle_broken_paths(struct ccw_device *cdev)
+{
+ struct subchannel *sch = to_subchannel(cdev->dev.parent);
+ u8 broken_paths = (sch->schib.pmcw.pam & sch->opm) ^ sch->vpm;
+
+ if (broken_paths && (cdev->private->path_broken_mask != broken_paths))
+ ccw_device_schedule_recovery();
+
+ cdev->private->path_broken_mask = broken_paths;
+}
+
+void ccw_device_verify_done(struct ccw_device *cdev, int err)
+{
+ struct subchannel *sch;
+
+ sch = to_subchannel(cdev->dev.parent);
+ /* Update schib - pom may have changed. */
+ if (cio_update_schib(sch)) {
+ err = -ENODEV;
+ goto callback;
+ }
+ /* Update lpm with verified path mask. */
+ sch->lpm = sch->vpm;
+ /* Repeat path verification? */
+ if (cdev->private->flags.doverify) {
+ ccw_device_verify_start(cdev);
+ return;
+ }
+callback:
+ switch (err) {
+ case 0:
+ ccw_device_done(cdev, DEV_STATE_ONLINE);
+ /* Deliver fake irb to device driver, if needed. */
+ if (cdev->private->flags.fake_irb) {
+ create_fake_irb(&cdev->private->dma_area->irb,
+ cdev->private->flags.fake_irb);
+ cdev->private->flags.fake_irb = 0;
+ if (cdev->handler)
+ cdev->handler(cdev, cdev->private->intparm,
+ &cdev->private->dma_area->irb);
+ memset(&cdev->private->dma_area->irb, 0,
+ sizeof(struct irb));
+ }
+ ccw_device_report_path_events(cdev);
+ ccw_device_handle_broken_paths(cdev);
+ break;
+ case -ETIME:
+ case -EUSERS:
+ /* Reset oper notify indication after verify error. */
+ cdev->private->flags.donotify = 0;
+ ccw_device_done(cdev, DEV_STATE_BOXED);
+ break;
+ case -EACCES:
+ /* Reset oper notify indication after verify error. */
+ cdev->private->flags.donotify = 0;
+ ccw_device_done(cdev, DEV_STATE_DISCONNECTED);
+ break;
+ default:
+ /* Reset oper notify indication after verify error. */
+ cdev->private->flags.donotify = 0;
+ ccw_device_done(cdev, DEV_STATE_NOT_OPER);
+ break;
+ }
+ ccw_device_reset_path_events(cdev);
+}
+
+/*
+ * Get device online.
+ */
+int
+ccw_device_online(struct ccw_device *cdev)
+{
+ struct subchannel *sch;
+ int ret;
+
+ if ((cdev->private->state != DEV_STATE_OFFLINE) &&
+ (cdev->private->state != DEV_STATE_BOXED))
+ return -EINVAL;
+ sch = to_subchannel(cdev->dev.parent);
+ ret = cio_enable_subchannel(sch, (u32)(addr_t)sch);
+ if (ret != 0) {
+ /* Couldn't enable the subchannel for i/o. Sick device. */
+ if (ret == -ENODEV)
+ dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
+ return ret;
+ }
+ /* Start initial path verification. */
+ cdev->private->state = DEV_STATE_VERIFY;
+ ccw_device_verify_start(cdev);
+ return 0;
+}
+
+void
+ccw_device_disband_done(struct ccw_device *cdev, int err)
+{
+ switch (err) {
+ case 0:
+ ccw_device_done(cdev, DEV_STATE_OFFLINE);
+ break;
+ case -ETIME:
+ ccw_device_done(cdev, DEV_STATE_BOXED);
+ break;
+ default:
+ cdev->private->flags.donotify = 0;
+ ccw_device_done(cdev, DEV_STATE_NOT_OPER);
+ break;
+ }
+}
+
+/*
+ * Shutdown device.
+ */
+int
+ccw_device_offline(struct ccw_device *cdev)
+{
+ struct subchannel *sch;
+
+ /* Allow ccw_device_offline while disconnected. */
+ if (cdev->private->state == DEV_STATE_DISCONNECTED ||
+ cdev->private->state == DEV_STATE_NOT_OPER) {
+ cdev->private->flags.donotify = 0;
+ ccw_device_done(cdev, DEV_STATE_NOT_OPER);
+ return 0;
+ }
+ if (cdev->private->state == DEV_STATE_BOXED) {
+ ccw_device_done(cdev, DEV_STATE_BOXED);
+ return 0;
+ }
+ if (ccw_device_is_orphan(cdev)) {
+ ccw_device_done(cdev, DEV_STATE_OFFLINE);
+ return 0;
+ }
+ sch = to_subchannel(cdev->dev.parent);
+ if (cio_update_schib(sch))
+ return -ENODEV;
+ if (scsw_actl(&sch->schib.scsw) != 0)
+ return -EBUSY;
+ if (cdev->private->state != DEV_STATE_ONLINE)
+ return -EINVAL;
+ /* Are we doing path grouping? */
+ if (!cdev->private->flags.pgroup) {
+ /* No, set state offline immediately. */
+ ccw_device_done(cdev, DEV_STATE_OFFLINE);
+ return 0;
+ }
+ /* Start Set Path Group commands. */
+ cdev->private->state = DEV_STATE_DISBAND_PGID;
+ ccw_device_disband_start(cdev);
+ return 0;
+}
+
+/*
+ * Handle not operational event in non-special state.
+ */
+static void ccw_device_generic_notoper(struct ccw_device *cdev,
+ enum dev_event dev_event)
+{
+ if (ccw_device_notify(cdev, CIO_GONE) != NOTIFY_OK)
+ ccw_device_sched_todo(cdev, CDEV_TODO_UNREG);
+ else
+ ccw_device_set_disconnected(cdev);
+}
+
+/*
+ * Handle path verification event in offline state.
+ */
+static void ccw_device_offline_verify(struct ccw_device *cdev,
+ enum dev_event dev_event)
+{
+ struct subchannel *sch = to_subchannel(cdev->dev.parent);
+
+ css_schedule_eval(sch->schid);
+}
+
+/*
+ * Handle path verification event.
+ */
+static void
+ccw_device_online_verify(struct ccw_device *cdev, enum dev_event dev_event)
+{
+ struct subchannel *sch;
+
+ if (cdev->private->state == DEV_STATE_W4SENSE) {
+ cdev->private->flags.doverify = 1;
+ return;
+ }
+ sch = to_subchannel(cdev->dev.parent);
+ /*
+ * Since we might not just be coming from an interrupt from the
+ * subchannel we have to update the schib.
+ */
+ if (cio_update_schib(sch)) {
+ ccw_device_verify_done(cdev, -ENODEV);
+ return;
+ }
+
+ if (scsw_actl(&sch->schib.scsw) != 0 ||
+ (scsw_stctl(&sch->schib.scsw) & SCSW_STCTL_STATUS_PEND) ||
+ (scsw_stctl(&cdev->private->dma_area->irb.scsw) &
+ SCSW_STCTL_STATUS_PEND)) {
+ /*
+ * No final status yet or final status not yet delivered
+ * to the device driver. Can't do path verification now,
+ * delay until final status was delivered.
+ */
+ cdev->private->flags.doverify = 1;
+ return;
+ }
+ /* Device is idle, we can do the path verification. */
+ cdev->private->state = DEV_STATE_VERIFY;
+ ccw_device_verify_start(cdev);
+}
+
+/*
+ * Handle path verification event in boxed state.
+ */
+static void ccw_device_boxed_verify(struct ccw_device *cdev,
+ enum dev_event dev_event)
+{
+ struct subchannel *sch = to_subchannel(cdev->dev.parent);
+
+ if (cdev->online) {
+ if (cio_enable_subchannel(sch, (u32) (addr_t) sch))
+ ccw_device_done(cdev, DEV_STATE_NOT_OPER);
+ else
+ ccw_device_online_verify(cdev, dev_event);
+ } else
+ css_schedule_eval(sch->schid);
+}
+
+/*
+ * Pass interrupt to device driver.
+ */
+static int ccw_device_call_handler(struct ccw_device *cdev)
+{
+ unsigned int stctl;
+ int ending_status;
+
+ /*
+ * we allow for the device action handler if .
+ * - we received ending status
+ * - the action handler requested to see all interrupts
+ * - we received an intermediate status
+ * - fast notification was requested (primary status)
+ * - unsolicited interrupts
+ */
+ stctl = scsw_stctl(&cdev->private->dma_area->irb.scsw);
+ ending_status = (stctl & SCSW_STCTL_SEC_STATUS) ||
+ (stctl == (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND)) ||
+ (stctl == SCSW_STCTL_STATUS_PEND);
+ if (!ending_status &&
+ !cdev->private->options.repall &&
+ !(stctl & SCSW_STCTL_INTER_STATUS) &&
+ !(cdev->private->options.fast &&
+ (stctl & SCSW_STCTL_PRIM_STATUS)))
+ return 0;
+
+ if (ending_status)
+ ccw_device_set_timeout(cdev, 0);
+
+ if (cdev->handler)
+ cdev->handler(cdev, cdev->private->intparm,
+ &cdev->private->dma_area->irb);
+
+ memset(&cdev->private->dma_area->irb, 0, sizeof(struct irb));
+ return 1;
+}
+
+/*
+ * Got an interrupt for a normal io (state online).
+ */
+static void
+ccw_device_irq(struct ccw_device *cdev, enum dev_event dev_event)
+{
+ struct irb *irb;
+ int is_cmd;
+
+ irb = this_cpu_ptr(&cio_irb);
+ is_cmd = !scsw_is_tm(&irb->scsw);
+ /* Check for unsolicited interrupt. */
+ if (!scsw_is_solicited(&irb->scsw)) {
+ if (is_cmd && (irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) &&
+ !irb->esw.esw0.erw.cons) {
+ /* Unit check but no sense data. Need basic sense. */
+ if (ccw_device_do_sense(cdev, irb) != 0)
+ goto call_handler_unsol;
+ memcpy(&cdev->private->dma_area->irb, irb,
+ sizeof(struct irb));
+ cdev->private->state = DEV_STATE_W4SENSE;
+ cdev->private->intparm = 0;
+ return;
+ }
+call_handler_unsol:
+ if (cdev->handler)
+ cdev->handler (cdev, 0, irb);
+ if (cdev->private->flags.doverify)
+ ccw_device_online_verify(cdev, 0);
+ return;
+ }
+ /* Accumulate status and find out if a basic sense is needed. */
+ ccw_device_accumulate_irb(cdev, irb);
+ if (is_cmd && cdev->private->flags.dosense) {
+ if (ccw_device_do_sense(cdev, irb) == 0) {
+ cdev->private->state = DEV_STATE_W4SENSE;
+ }
+ return;
+ }
+ /* Call the handler. */
+ if (ccw_device_call_handler(cdev) && cdev->private->flags.doverify)
+ /* Start delayed path verification. */
+ ccw_device_online_verify(cdev, 0);
+}
+
+/*
+ * Got an timeout in online state.
+ */
+static void
+ccw_device_online_timeout(struct ccw_device *cdev, enum dev_event dev_event)
+{
+ int ret;
+
+ ccw_device_set_timeout(cdev, 0);
+ cdev->private->iretry = 255;
+ cdev->private->async_kill_io_rc = -ETIMEDOUT;
+ ret = ccw_device_cancel_halt_clear(cdev);
+ if (ret == -EBUSY) {
+ ccw_device_set_timeout(cdev, 3*HZ);
+ cdev->private->state = DEV_STATE_TIMEOUT_KILL;
+ return;
+ }
+ if (ret)
+ dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
+ else if (cdev->handler)
+ cdev->handler(cdev, cdev->private->intparm,
+ ERR_PTR(-ETIMEDOUT));
+}
+
+/*
+ * Got an interrupt for a basic sense.
+ */
+static void
+ccw_device_w4sense(struct ccw_device *cdev, enum dev_event dev_event)
+{
+ struct irb *irb;
+
+ irb = this_cpu_ptr(&cio_irb);
+ /* Check for unsolicited interrupt. */
+ if (scsw_stctl(&irb->scsw) ==
+ (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) {
+ if (scsw_cc(&irb->scsw) == 1)
+ /* Basic sense hasn't started. Try again. */
+ ccw_device_do_sense(cdev, irb);
+ else {
+ CIO_MSG_EVENT(0, "0.%x.%04x: unsolicited "
+ "interrupt during w4sense...\n",
+ cdev->private->dev_id.ssid,
+ cdev->private->dev_id.devno);
+ if (cdev->handler)
+ cdev->handler (cdev, 0, irb);
+ }
+ return;
+ }
+ /*
+ * Check if a halt or clear has been issued in the meanwhile. If yes,
+ * only deliver the halt/clear interrupt to the device driver as if it
+ * had killed the original request.
+ */
+ if (scsw_fctl(&irb->scsw) &
+ (SCSW_FCTL_CLEAR_FUNC | SCSW_FCTL_HALT_FUNC)) {
+ cdev->private->flags.dosense = 0;
+ memset(&cdev->private->dma_area->irb, 0, sizeof(struct irb));
+ ccw_device_accumulate_irb(cdev, irb);
+ goto call_handler;
+ }
+ /* Add basic sense info to irb. */
+ ccw_device_accumulate_basic_sense(cdev, irb);
+ if (cdev->private->flags.dosense) {
+ /* Another basic sense is needed. */
+ ccw_device_do_sense(cdev, irb);
+ return;
+ }
+call_handler:
+ cdev->private->state = DEV_STATE_ONLINE;
+ /* In case sensing interfered with setting the device online */
+ wake_up(&cdev->private->wait_q);
+ /* Call the handler. */
+ if (ccw_device_call_handler(cdev) && cdev->private->flags.doverify)
+ /* Start delayed path verification. */
+ ccw_device_online_verify(cdev, 0);
+}
+
+static void
+ccw_device_killing_irq(struct ccw_device *cdev, enum dev_event dev_event)
+{
+ ccw_device_set_timeout(cdev, 0);
+ /* Start delayed path verification. */
+ ccw_device_online_verify(cdev, 0);
+ /* OK, i/o is dead now. Call interrupt handler. */
+ if (cdev->handler)
+ cdev->handler(cdev, cdev->private->intparm,
+ ERR_PTR(cdev->private->async_kill_io_rc));
+}
+
+static void
+ccw_device_killing_timeout(struct ccw_device *cdev, enum dev_event dev_event)
+{
+ int ret;
+
+ ret = ccw_device_cancel_halt_clear(cdev);
+ if (ret == -EBUSY) {
+ ccw_device_set_timeout(cdev, 3*HZ);
+ return;
+ }
+ /* Start delayed path verification. */
+ ccw_device_online_verify(cdev, 0);
+ if (cdev->handler)
+ cdev->handler(cdev, cdev->private->intparm,
+ ERR_PTR(cdev->private->async_kill_io_rc));
+}
+
+void ccw_device_kill_io(struct ccw_device *cdev)
+{
+ int ret;
+
+ ccw_device_set_timeout(cdev, 0);
+ cdev->private->iretry = 255;
+ cdev->private->async_kill_io_rc = -EIO;
+ ret = ccw_device_cancel_halt_clear(cdev);
+ if (ret == -EBUSY) {
+ ccw_device_set_timeout(cdev, 3*HZ);
+ cdev->private->state = DEV_STATE_TIMEOUT_KILL;
+ return;
+ }
+ /* Start delayed path verification. */
+ ccw_device_online_verify(cdev, 0);
+ if (cdev->handler)
+ cdev->handler(cdev, cdev->private->intparm,
+ ERR_PTR(-EIO));
+}
+
+static void
+ccw_device_delay_verify(struct ccw_device *cdev, enum dev_event dev_event)
+{
+ /* Start verification after current task finished. */
+ cdev->private->flags.doverify = 1;
+}
+
+static void
+ccw_device_start_id(struct ccw_device *cdev, enum dev_event dev_event)
+{
+ struct subchannel *sch;
+
+ sch = to_subchannel(cdev->dev.parent);
+ if (cio_enable_subchannel(sch, (u32)(addr_t)sch) != 0)
+ /* Couldn't enable the subchannel for i/o. Sick device. */
+ return;
+ cdev->private->state = DEV_STATE_DISCONNECTED_SENSE_ID;
+ ccw_device_sense_id_start(cdev);
+}
+
+void ccw_device_trigger_reprobe(struct ccw_device *cdev)
+{
+ struct subchannel *sch;
+
+ if (cdev->private->state != DEV_STATE_DISCONNECTED)
+ return;
+
+ sch = to_subchannel(cdev->dev.parent);
+ /* Update some values. */
+ if (cio_update_schib(sch))
+ return;
+ /*
+ * The pim, pam, pom values may not be accurate, but they are the best
+ * we have before performing device selection :/
+ */
+ sch->lpm = sch->schib.pmcw.pam & sch->opm;
+ /*
+ * Use the initial configuration since we can't be shure that the old
+ * paths are valid.
+ */
+ io_subchannel_init_config(sch);
+ if (cio_commit_config(sch))
+ return;
+
+ /* We should also udate ssd info, but this has to wait. */
+ /* Check if this is another device which appeared on the same sch. */
+ if (sch->schib.pmcw.dev != cdev->private->dev_id.devno)
+ css_schedule_eval(sch->schid);
+ else
+ ccw_device_start_id(cdev, 0);
+}
+
+static void ccw_device_disabled_irq(struct ccw_device *cdev,
+ enum dev_event dev_event)
+{
+ struct subchannel *sch;
+
+ sch = to_subchannel(cdev->dev.parent);
+ /*
+ * An interrupt in a disabled state means a previous disable was not
+ * successful - should not happen, but we try to disable again.
+ */
+ cio_disable_subchannel(sch);
+}
+
+static void
+ccw_device_change_cmfstate(struct ccw_device *cdev, enum dev_event dev_event)
+{
+ retry_set_schib(cdev);
+ cdev->private->state = DEV_STATE_ONLINE;
+ dev_fsm_event(cdev, dev_event);
+}
+
+static void ccw_device_update_cmfblock(struct ccw_device *cdev,
+ enum dev_event dev_event)
+{
+ cmf_retry_copy_block(cdev);
+ cdev->private->state = DEV_STATE_ONLINE;
+ dev_fsm_event(cdev, dev_event);
+}
+
+static void
+ccw_device_quiesce_done(struct ccw_device *cdev, enum dev_event dev_event)
+{
+ ccw_device_set_timeout(cdev, 0);
+ cdev->private->state = DEV_STATE_NOT_OPER;
+ wake_up(&cdev->private->wait_q);
+}
+
+static void
+ccw_device_quiesce_timeout(struct ccw_device *cdev, enum dev_event dev_event)
+{
+ int ret;
+
+ ret = ccw_device_cancel_halt_clear(cdev);
+ if (ret == -EBUSY) {
+ ccw_device_set_timeout(cdev, HZ/10);
+ } else {
+ cdev->private->state = DEV_STATE_NOT_OPER;
+ wake_up(&cdev->private->wait_q);
+ }
+}
+
+/*
+ * No operation action. This is used e.g. to ignore a timeout event in
+ * state offline.
+ */
+static void
+ccw_device_nop(struct ccw_device *cdev, enum dev_event dev_event)
+{
+}
+
+/*
+ * device statemachine
+ */
+fsm_func_t *dev_jumptable[NR_DEV_STATES][NR_DEV_EVENTS] = {
+ [DEV_STATE_NOT_OPER] = {
+ [DEV_EVENT_NOTOPER] = ccw_device_nop,
+ [DEV_EVENT_INTERRUPT] = ccw_device_disabled_irq,
+ [DEV_EVENT_TIMEOUT] = ccw_device_nop,
+ [DEV_EVENT_VERIFY] = ccw_device_nop,
+ },
+ [DEV_STATE_SENSE_ID] = {
+ [DEV_EVENT_NOTOPER] = ccw_device_request_event,
+ [DEV_EVENT_INTERRUPT] = ccw_device_request_event,
+ [DEV_EVENT_TIMEOUT] = ccw_device_request_event,
+ [DEV_EVENT_VERIFY] = ccw_device_nop,
+ },
+ [DEV_STATE_OFFLINE] = {
+ [DEV_EVENT_NOTOPER] = ccw_device_generic_notoper,
+ [DEV_EVENT_INTERRUPT] = ccw_device_disabled_irq,
+ [DEV_EVENT_TIMEOUT] = ccw_device_nop,
+ [DEV_EVENT_VERIFY] = ccw_device_offline_verify,
+ },
+ [DEV_STATE_VERIFY] = {
+ [DEV_EVENT_NOTOPER] = ccw_device_request_event,
+ [DEV_EVENT_INTERRUPT] = ccw_device_request_event,
+ [DEV_EVENT_TIMEOUT] = ccw_device_request_event,
+ [DEV_EVENT_VERIFY] = ccw_device_delay_verify,
+ },
+ [DEV_STATE_ONLINE] = {
+ [DEV_EVENT_NOTOPER] = ccw_device_generic_notoper,
+ [DEV_EVENT_INTERRUPT] = ccw_device_irq,
+ [DEV_EVENT_TIMEOUT] = ccw_device_online_timeout,
+ [DEV_EVENT_VERIFY] = ccw_device_online_verify,
+ },
+ [DEV_STATE_W4SENSE] = {
+ [DEV_EVENT_NOTOPER] = ccw_device_generic_notoper,
+ [DEV_EVENT_INTERRUPT] = ccw_device_w4sense,
+ [DEV_EVENT_TIMEOUT] = ccw_device_nop,
+ [DEV_EVENT_VERIFY] = ccw_device_online_verify,
+ },
+ [DEV_STATE_DISBAND_PGID] = {
+ [DEV_EVENT_NOTOPER] = ccw_device_request_event,
+ [DEV_EVENT_INTERRUPT] = ccw_device_request_event,
+ [DEV_EVENT_TIMEOUT] = ccw_device_request_event,
+ [DEV_EVENT_VERIFY] = ccw_device_nop,
+ },
+ [DEV_STATE_BOXED] = {
+ [DEV_EVENT_NOTOPER] = ccw_device_generic_notoper,
+ [DEV_EVENT_INTERRUPT] = ccw_device_nop,
+ [DEV_EVENT_TIMEOUT] = ccw_device_nop,
+ [DEV_EVENT_VERIFY] = ccw_device_boxed_verify,
+ },
+ /* states to wait for i/o completion before doing something */
+ [DEV_STATE_TIMEOUT_KILL] = {
+ [DEV_EVENT_NOTOPER] = ccw_device_generic_notoper,
+ [DEV_EVENT_INTERRUPT] = ccw_device_killing_irq,
+ [DEV_EVENT_TIMEOUT] = ccw_device_killing_timeout,
+ [DEV_EVENT_VERIFY] = ccw_device_nop, //FIXME
+ },
+ [DEV_STATE_QUIESCE] = {
+ [DEV_EVENT_NOTOPER] = ccw_device_quiesce_done,
+ [DEV_EVENT_INTERRUPT] = ccw_device_quiesce_done,
+ [DEV_EVENT_TIMEOUT] = ccw_device_quiesce_timeout,
+ [DEV_EVENT_VERIFY] = ccw_device_nop,
+ },
+ /* special states for devices gone not operational */
+ [DEV_STATE_DISCONNECTED] = {
+ [DEV_EVENT_NOTOPER] = ccw_device_nop,
+ [DEV_EVENT_INTERRUPT] = ccw_device_start_id,
+ [DEV_EVENT_TIMEOUT] = ccw_device_nop,
+ [DEV_EVENT_VERIFY] = ccw_device_start_id,
+ },
+ [DEV_STATE_DISCONNECTED_SENSE_ID] = {
+ [DEV_EVENT_NOTOPER] = ccw_device_request_event,
+ [DEV_EVENT_INTERRUPT] = ccw_device_request_event,
+ [DEV_EVENT_TIMEOUT] = ccw_device_request_event,
+ [DEV_EVENT_VERIFY] = ccw_device_nop,
+ },
+ [DEV_STATE_CMFCHANGE] = {
+ [DEV_EVENT_NOTOPER] = ccw_device_change_cmfstate,
+ [DEV_EVENT_INTERRUPT] = ccw_device_change_cmfstate,
+ [DEV_EVENT_TIMEOUT] = ccw_device_change_cmfstate,
+ [DEV_EVENT_VERIFY] = ccw_device_change_cmfstate,
+ },
+ [DEV_STATE_CMFUPDATE] = {
+ [DEV_EVENT_NOTOPER] = ccw_device_update_cmfblock,
+ [DEV_EVENT_INTERRUPT] = ccw_device_update_cmfblock,
+ [DEV_EVENT_TIMEOUT] = ccw_device_update_cmfblock,
+ [DEV_EVENT_VERIFY] = ccw_device_update_cmfblock,
+ },
+ [DEV_STATE_STEAL_LOCK] = {
+ [DEV_EVENT_NOTOPER] = ccw_device_request_event,
+ [DEV_EVENT_INTERRUPT] = ccw_device_request_event,
+ [DEV_EVENT_TIMEOUT] = ccw_device_request_event,
+ [DEV_EVENT_VERIFY] = ccw_device_nop,
+ },
+};
+
+EXPORT_SYMBOL_GPL(ccw_device_set_timeout);
diff --git a/drivers/s390/cio/device_id.c b/drivers/s390/cio/device_id.c
new file mode 100644
index 000000000..740996d0d
--- /dev/null
+++ b/drivers/s390/cio/device_id.c
@@ -0,0 +1,225 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * CCW device SENSE ID I/O handling.
+ *
+ * Copyright IBM Corp. 2002, 2009
+ * Author(s): Cornelia Huck <cornelia.huck@de.ibm.com>
+ * Martin Schwidefsky <schwidefsky@de.ibm.com>
+ * Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
+ */
+
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <asm/ccwdev.h>
+#include <asm/setup.h>
+#include <asm/cio.h>
+#include <asm/diag.h>
+
+#include "cio.h"
+#include "cio_debug.h"
+#include "device.h"
+#include "io_sch.h"
+
+#define SENSE_ID_RETRIES 256
+#define SENSE_ID_TIMEOUT (10 * HZ)
+#define SENSE_ID_MIN_LEN 4
+#define SENSE_ID_BASIC_LEN 7
+
+/**
+ * diag210_to_senseid - convert diag 0x210 data to sense id information
+ * @senseid: sense id
+ * @diag: diag 0x210 data
+ *
+ * Return 0 on success, non-zero otherwise.
+ */
+static int diag210_to_senseid(struct senseid *senseid, struct diag210 *diag)
+{
+ static struct {
+ int class, type, cu_type;
+ } vm_devices[] = {
+ { 0x08, 0x01, 0x3480 },
+ { 0x08, 0x02, 0x3430 },
+ { 0x08, 0x10, 0x3420 },
+ { 0x08, 0x42, 0x3424 },
+ { 0x08, 0x44, 0x9348 },
+ { 0x08, 0x81, 0x3490 },
+ { 0x08, 0x82, 0x3422 },
+ { 0x10, 0x41, 0x1403 },
+ { 0x10, 0x42, 0x3211 },
+ { 0x10, 0x43, 0x3203 },
+ { 0x10, 0x45, 0x3800 },
+ { 0x10, 0x47, 0x3262 },
+ { 0x10, 0x48, 0x3820 },
+ { 0x10, 0x49, 0x3800 },
+ { 0x10, 0x4a, 0x4245 },
+ { 0x10, 0x4b, 0x4248 },
+ { 0x10, 0x4d, 0x3800 },
+ { 0x10, 0x4e, 0x3820 },
+ { 0x10, 0x4f, 0x3820 },
+ { 0x10, 0x82, 0x2540 },
+ { 0x10, 0x84, 0x3525 },
+ { 0x20, 0x81, 0x2501 },
+ { 0x20, 0x82, 0x2540 },
+ { 0x20, 0x84, 0x3505 },
+ { 0x40, 0x01, 0x3278 },
+ { 0x40, 0x04, 0x3277 },
+ { 0x40, 0x80, 0x2250 },
+ { 0x40, 0xc0, 0x5080 },
+ { 0x80, 0x00, 0x3215 },
+ };
+ int i;
+
+ /* Special case for osa devices. */
+ if (diag->vrdcvcla == 0x02 && diag->vrdcvtyp == 0x20) {
+ senseid->cu_type = 0x3088;
+ senseid->cu_model = 0x60;
+ senseid->reserved = 0xff;
+ return 0;
+ }
+ for (i = 0; i < ARRAY_SIZE(vm_devices); i++) {
+ if (diag->vrdcvcla == vm_devices[i].class &&
+ diag->vrdcvtyp == vm_devices[i].type) {
+ senseid->cu_type = vm_devices[i].cu_type;
+ senseid->reserved = 0xff;
+ return 0;
+ }
+ }
+
+ return -ENODEV;
+}
+
+/**
+ * diag_get_dev_info - retrieve device information via diag 0x210
+ * @cdev: ccw device
+ *
+ * Returns zero on success, non-zero otherwise.
+ */
+static int diag210_get_dev_info(struct ccw_device *cdev)
+{
+ struct ccw_dev_id *dev_id = &cdev->private->dev_id;
+ struct senseid *senseid = &cdev->private->dma_area->senseid;
+ struct diag210 diag_data;
+ int rc;
+
+ if (dev_id->ssid != 0)
+ return -ENODEV;
+ memset(&diag_data, 0, sizeof(diag_data));
+ diag_data.vrdcdvno = dev_id->devno;
+ diag_data.vrdclen = sizeof(diag_data);
+ rc = diag210(&diag_data);
+ CIO_TRACE_EVENT(4, "diag210");
+ CIO_HEX_EVENT(4, &rc, sizeof(rc));
+ CIO_HEX_EVENT(4, &diag_data, sizeof(diag_data));
+ if (rc != 0 && rc != 2)
+ goto err_failed;
+ if (diag210_to_senseid(senseid, &diag_data))
+ goto err_unknown;
+ return 0;
+
+err_unknown:
+ CIO_MSG_EVENT(0, "snsid: device 0.%x.%04x: unknown diag210 data\n",
+ dev_id->ssid, dev_id->devno);
+ return -ENODEV;
+err_failed:
+ CIO_MSG_EVENT(0, "snsid: device 0.%x.%04x: diag210 failed (rc=%d)\n",
+ dev_id->ssid, dev_id->devno, rc);
+ return -ENODEV;
+}
+
+/*
+ * Initialize SENSE ID data.
+ */
+static void snsid_init(struct ccw_device *cdev)
+{
+ cdev->private->flags.esid = 0;
+
+ memset(&cdev->private->dma_area->senseid, 0,
+ sizeof(cdev->private->dma_area->senseid));
+ cdev->private->dma_area->senseid.cu_type = 0xffff;
+}
+
+/*
+ * Check for complete SENSE ID data.
+ */
+static int snsid_check(struct ccw_device *cdev, void *data)
+{
+ struct cmd_scsw *scsw = &cdev->private->dma_area->irb.scsw.cmd;
+ int len = sizeof(struct senseid) - scsw->count;
+
+ /* Check for incomplete SENSE ID data. */
+ if (len < SENSE_ID_MIN_LEN)
+ goto out_restart;
+ if (cdev->private->dma_area->senseid.cu_type == 0xffff)
+ goto out_restart;
+ /* Check for incompatible SENSE ID data. */
+ if (cdev->private->dma_area->senseid.reserved != 0xff)
+ return -EOPNOTSUPP;
+ /* Check for extended-identification information. */
+ if (len > SENSE_ID_BASIC_LEN)
+ cdev->private->flags.esid = 1;
+ return 0;
+
+out_restart:
+ snsid_init(cdev);
+ return -EAGAIN;
+}
+
+/*
+ * Process SENSE ID request result.
+ */
+static void snsid_callback(struct ccw_device *cdev, void *data, int rc)
+{
+ struct ccw_dev_id *id = &cdev->private->dev_id;
+ struct senseid *senseid = &cdev->private->dma_area->senseid;
+ int vm = 0;
+
+ if (rc && MACHINE_IS_VM) {
+ /* Try diag 0x210 fallback on z/VM. */
+ snsid_init(cdev);
+ if (diag210_get_dev_info(cdev) == 0) {
+ rc = 0;
+ vm = 1;
+ }
+ }
+ CIO_MSG_EVENT(2, "snsid: device 0.%x.%04x: rc=%d %04x/%02x "
+ "%04x/%02x%s\n", id->ssid, id->devno, rc,
+ senseid->cu_type, senseid->cu_model, senseid->dev_type,
+ senseid->dev_model, vm ? " (diag210)" : "");
+ ccw_device_sense_id_done(cdev, rc);
+}
+
+/**
+ * ccw_device_sense_id_start - perform SENSE ID
+ * @cdev: ccw device
+ *
+ * Execute a SENSE ID channel program on @cdev to update its sense id
+ * information. When finished, call ccw_device_sense_id_done with a
+ * return code specifying the result.
+ */
+void ccw_device_sense_id_start(struct ccw_device *cdev)
+{
+ struct subchannel *sch = to_subchannel(cdev->dev.parent);
+ struct ccw_request *req = &cdev->private->req;
+ struct ccw1 *cp = cdev->private->dma_area->iccws;
+
+ CIO_TRACE_EVENT(4, "snsid");
+ CIO_HEX_EVENT(4, &cdev->private->dev_id, sizeof(cdev->private->dev_id));
+ /* Data setup. */
+ snsid_init(cdev);
+ /* Channel program setup. */
+ cp->cmd_code = CCW_CMD_SENSE_ID;
+ cp->cda = (u32) (addr_t) &cdev->private->dma_area->senseid;
+ cp->count = sizeof(struct senseid);
+ cp->flags = CCW_FLAG_SLI;
+ /* Request setup. */
+ memset(req, 0, sizeof(*req));
+ req->cp = cp;
+ req->timeout = SENSE_ID_TIMEOUT;
+ req->maxretries = SENSE_ID_RETRIES;
+ req->lpm = sch->schib.pmcw.pam & sch->opm;
+ req->check = snsid_check;
+ req->callback = snsid_callback;
+ ccw_request_start(cdev);
+}
diff --git a/drivers/s390/cio/device_ops.c b/drivers/s390/cio/device_ops.c
new file mode 100644
index 000000000..c533d1dad
--- /dev/null
+++ b/drivers/s390/cio/device_ops.c
@@ -0,0 +1,861 @@
+// SPDX-License-Identifier: GPL-1.0+
+/*
+ * Copyright IBM Corp. 2002, 2009
+ *
+ * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
+ * Cornelia Huck (cornelia.huck@de.ibm.com)
+ */
+#include <linux/export.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/list.h>
+#include <linux/device.h>
+#include <linux/delay.h>
+#include <linux/completion.h>
+
+#include <asm/ccwdev.h>
+#include <asm/idals.h>
+#include <asm/chpid.h>
+#include <asm/fcx.h>
+
+#include "cio.h"
+#include "cio_debug.h"
+#include "css.h"
+#include "chsc.h"
+#include "device.h"
+#include "chp.h"
+
+/**
+ * ccw_device_set_options_mask() - set some options and unset the rest
+ * @cdev: device for which the options are to be set
+ * @flags: options to be set
+ *
+ * All flags specified in @flags are set, all flags not specified in @flags
+ * are cleared.
+ * Returns:
+ * %0 on success, -%EINVAL on an invalid flag combination.
+ */
+int ccw_device_set_options_mask(struct ccw_device *cdev, unsigned long flags)
+{
+ /*
+ * The flag usage is mutal exclusive ...
+ */
+ if ((flags & CCWDEV_EARLY_NOTIFICATION) &&
+ (flags & CCWDEV_REPORT_ALL))
+ return -EINVAL;
+ cdev->private->options.fast = (flags & CCWDEV_EARLY_NOTIFICATION) != 0;
+ cdev->private->options.repall = (flags & CCWDEV_REPORT_ALL) != 0;
+ cdev->private->options.pgroup = (flags & CCWDEV_DO_PATHGROUP) != 0;
+ cdev->private->options.force = (flags & CCWDEV_ALLOW_FORCE) != 0;
+ cdev->private->options.mpath = (flags & CCWDEV_DO_MULTIPATH) != 0;
+ return 0;
+}
+
+/**
+ * ccw_device_set_options() - set some options
+ * @cdev: device for which the options are to be set
+ * @flags: options to be set
+ *
+ * All flags specified in @flags are set, the remainder is left untouched.
+ * Returns:
+ * %0 on success, -%EINVAL if an invalid flag combination would ensue.
+ */
+int ccw_device_set_options(struct ccw_device *cdev, unsigned long flags)
+{
+ /*
+ * The flag usage is mutal exclusive ...
+ */
+ if (((flags & CCWDEV_EARLY_NOTIFICATION) &&
+ (flags & CCWDEV_REPORT_ALL)) ||
+ ((flags & CCWDEV_EARLY_NOTIFICATION) &&
+ cdev->private->options.repall) ||
+ ((flags & CCWDEV_REPORT_ALL) &&
+ cdev->private->options.fast))
+ return -EINVAL;
+ cdev->private->options.fast |= (flags & CCWDEV_EARLY_NOTIFICATION) != 0;
+ cdev->private->options.repall |= (flags & CCWDEV_REPORT_ALL) != 0;
+ cdev->private->options.pgroup |= (flags & CCWDEV_DO_PATHGROUP) != 0;
+ cdev->private->options.force |= (flags & CCWDEV_ALLOW_FORCE) != 0;
+ cdev->private->options.mpath |= (flags & CCWDEV_DO_MULTIPATH) != 0;
+ return 0;
+}
+
+/**
+ * ccw_device_clear_options() - clear some options
+ * @cdev: device for which the options are to be cleared
+ * @flags: options to be cleared
+ *
+ * All flags specified in @flags are cleared, the remainder is left untouched.
+ */
+void ccw_device_clear_options(struct ccw_device *cdev, unsigned long flags)
+{
+ cdev->private->options.fast &= (flags & CCWDEV_EARLY_NOTIFICATION) == 0;
+ cdev->private->options.repall &= (flags & CCWDEV_REPORT_ALL) == 0;
+ cdev->private->options.pgroup &= (flags & CCWDEV_DO_PATHGROUP) == 0;
+ cdev->private->options.force &= (flags & CCWDEV_ALLOW_FORCE) == 0;
+ cdev->private->options.mpath &= (flags & CCWDEV_DO_MULTIPATH) == 0;
+}
+
+/**
+ * ccw_device_is_pathgroup() - determine if paths to this device are grouped
+ * @cdev: ccw device
+ *
+ * Return non-zero if there is a path group, zero otherwise.
+ */
+int ccw_device_is_pathgroup(struct ccw_device *cdev)
+{
+ return cdev->private->flags.pgroup;
+}
+EXPORT_SYMBOL(ccw_device_is_pathgroup);
+
+/**
+ * ccw_device_is_multipath() - determine if device is operating in multipath mode
+ * @cdev: ccw device
+ *
+ * Return non-zero if device is operating in multipath mode, zero otherwise.
+ */
+int ccw_device_is_multipath(struct ccw_device *cdev)
+{
+ return cdev->private->flags.mpath;
+}
+EXPORT_SYMBOL(ccw_device_is_multipath);
+
+/**
+ * ccw_device_clear() - terminate I/O request processing
+ * @cdev: target ccw device
+ * @intparm: interruption parameter to be returned upon conclusion of csch
+ *
+ * ccw_device_clear() calls csch on @cdev's subchannel.
+ * Returns:
+ * %0 on success,
+ * -%ENODEV on device not operational,
+ * -%EINVAL on invalid device state.
+ * Context:
+ * Interrupts disabled, ccw device lock held
+ */
+int ccw_device_clear(struct ccw_device *cdev, unsigned long intparm)
+{
+ struct subchannel *sch;
+ int ret;
+
+ if (!cdev || !cdev->dev.parent)
+ return -ENODEV;
+ sch = to_subchannel(cdev->dev.parent);
+ if (!sch->schib.pmcw.ena)
+ return -EINVAL;
+ if (cdev->private->state == DEV_STATE_NOT_OPER)
+ return -ENODEV;
+ if (cdev->private->state != DEV_STATE_ONLINE &&
+ cdev->private->state != DEV_STATE_W4SENSE)
+ return -EINVAL;
+
+ ret = cio_clear(sch);
+ if (ret == 0)
+ cdev->private->intparm = intparm;
+ return ret;
+}
+
+/**
+ * ccw_device_start_timeout_key() - start a s390 channel program with timeout and key
+ * @cdev: target ccw device
+ * @cpa: logical start address of channel program
+ * @intparm: user specific interruption parameter; will be presented back to
+ * @cdev's interrupt handler. Allows a device driver to associate
+ * the interrupt with a particular I/O request.
+ * @lpm: defines the channel path to be used for a specific I/O request. A
+ * value of 0 will make cio use the opm.
+ * @key: storage key to be used for the I/O
+ * @flags: additional flags; defines the action to be performed for I/O
+ * processing.
+ * @expires: timeout value in jiffies
+ *
+ * Start a S/390 channel program. When the interrupt arrives, the
+ * IRQ handler is called, either immediately, delayed (dev-end missing,
+ * or sense required) or never (no IRQ handler registered).
+ * This function notifies the device driver if the channel program has not
+ * completed during the time specified by @expires. If a timeout occurs, the
+ * channel program is terminated via xsch, hsch or csch, and the device's
+ * interrupt handler will be called with an irb containing ERR_PTR(-%ETIMEDOUT).
+ * The interruption handler will echo back the @intparm specified here, unless
+ * another interruption parameter is specified by a subsequent invocation of
+ * ccw_device_halt() or ccw_device_clear().
+ * Returns:
+ * %0, if the operation was successful;
+ * -%EBUSY, if the device is busy, or status pending;
+ * -%EACCES, if no path specified in @lpm is operational;
+ * -%ENODEV, if the device is not operational.
+ * Context:
+ * Interrupts disabled, ccw device lock held
+ */
+int ccw_device_start_timeout_key(struct ccw_device *cdev, struct ccw1 *cpa,
+ unsigned long intparm, __u8 lpm, __u8 key,
+ unsigned long flags, int expires)
+{
+ struct subchannel *sch;
+ int ret;
+
+ if (!cdev || !cdev->dev.parent)
+ return -ENODEV;
+ sch = to_subchannel(cdev->dev.parent);
+ if (!sch->schib.pmcw.ena)
+ return -EINVAL;
+ if (cdev->private->state == DEV_STATE_NOT_OPER)
+ return -ENODEV;
+ if (cdev->private->state == DEV_STATE_VERIFY) {
+ /* Remember to fake irb when finished. */
+ if (!cdev->private->flags.fake_irb) {
+ cdev->private->flags.fake_irb = FAKE_CMD_IRB;
+ cdev->private->intparm = intparm;
+ return 0;
+ } else
+ /* There's already a fake I/O around. */
+ return -EBUSY;
+ }
+ if (cdev->private->state != DEV_STATE_ONLINE ||
+ ((sch->schib.scsw.cmd.stctl & SCSW_STCTL_PRIM_STATUS) &&
+ !(sch->schib.scsw.cmd.stctl & SCSW_STCTL_SEC_STATUS)) ||
+ cdev->private->flags.doverify)
+ return -EBUSY;
+ ret = cio_set_options (sch, flags);
+ if (ret)
+ return ret;
+ /* Adjust requested path mask to exclude unusable paths. */
+ if (lpm) {
+ lpm &= sch->lpm;
+ if (lpm == 0)
+ return -EACCES;
+ }
+ ret = cio_start_key (sch, cpa, lpm, key);
+ switch (ret) {
+ case 0:
+ cdev->private->intparm = intparm;
+ if (expires)
+ ccw_device_set_timeout(cdev, expires);
+ break;
+ case -EACCES:
+ case -ENODEV:
+ dev_fsm_event(cdev, DEV_EVENT_VERIFY);
+ break;
+ }
+ return ret;
+}
+
+/**
+ * ccw_device_start_key() - start a s390 channel program with key
+ * @cdev: target ccw device
+ * @cpa: logical start address of channel program
+ * @intparm: user specific interruption parameter; will be presented back to
+ * @cdev's interrupt handler. Allows a device driver to associate
+ * the interrupt with a particular I/O request.
+ * @lpm: defines the channel path to be used for a specific I/O request. A
+ * value of 0 will make cio use the opm.
+ * @key: storage key to be used for the I/O
+ * @flags: additional flags; defines the action to be performed for I/O
+ * processing.
+ *
+ * Start a S/390 channel program. When the interrupt arrives, the
+ * IRQ handler is called, either immediately, delayed (dev-end missing,
+ * or sense required) or never (no IRQ handler registered).
+ * The interruption handler will echo back the @intparm specified here, unless
+ * another interruption parameter is specified by a subsequent invocation of
+ * ccw_device_halt() or ccw_device_clear().
+ * Returns:
+ * %0, if the operation was successful;
+ * -%EBUSY, if the device is busy, or status pending;
+ * -%EACCES, if no path specified in @lpm is operational;
+ * -%ENODEV, if the device is not operational.
+ * Context:
+ * Interrupts disabled, ccw device lock held
+ */
+int ccw_device_start_key(struct ccw_device *cdev, struct ccw1 *cpa,
+ unsigned long intparm, __u8 lpm, __u8 key,
+ unsigned long flags)
+{
+ return ccw_device_start_timeout_key(cdev, cpa, intparm, lpm, key,
+ flags, 0);
+}
+
+/**
+ * ccw_device_start() - start a s390 channel program
+ * @cdev: target ccw device
+ * @cpa: logical start address of channel program
+ * @intparm: user specific interruption parameter; will be presented back to
+ * @cdev's interrupt handler. Allows a device driver to associate
+ * the interrupt with a particular I/O request.
+ * @lpm: defines the channel path to be used for a specific I/O request. A
+ * value of 0 will make cio use the opm.
+ * @flags: additional flags; defines the action to be performed for I/O
+ * processing.
+ *
+ * Start a S/390 channel program. When the interrupt arrives, the
+ * IRQ handler is called, either immediately, delayed (dev-end missing,
+ * or sense required) or never (no IRQ handler registered).
+ * The interruption handler will echo back the @intparm specified here, unless
+ * another interruption parameter is specified by a subsequent invocation of
+ * ccw_device_halt() or ccw_device_clear().
+ * Returns:
+ * %0, if the operation was successful;
+ * -%EBUSY, if the device is busy, or status pending;
+ * -%EACCES, if no path specified in @lpm is operational;
+ * -%ENODEV, if the device is not operational.
+ * Context:
+ * Interrupts disabled, ccw device lock held
+ */
+int ccw_device_start(struct ccw_device *cdev, struct ccw1 *cpa,
+ unsigned long intparm, __u8 lpm, unsigned long flags)
+{
+ return ccw_device_start_key(cdev, cpa, intparm, lpm,
+ PAGE_DEFAULT_KEY, flags);
+}
+
+/**
+ * ccw_device_start_timeout() - start a s390 channel program with timeout
+ * @cdev: target ccw device
+ * @cpa: logical start address of channel program
+ * @intparm: user specific interruption parameter; will be presented back to
+ * @cdev's interrupt handler. Allows a device driver to associate
+ * the interrupt with a particular I/O request.
+ * @lpm: defines the channel path to be used for a specific I/O request. A
+ * value of 0 will make cio use the opm.
+ * @flags: additional flags; defines the action to be performed for I/O
+ * processing.
+ * @expires: timeout value in jiffies
+ *
+ * Start a S/390 channel program. When the interrupt arrives, the
+ * IRQ handler is called, either immediately, delayed (dev-end missing,
+ * or sense required) or never (no IRQ handler registered).
+ * This function notifies the device driver if the channel program has not
+ * completed during the time specified by @expires. If a timeout occurs, the
+ * channel program is terminated via xsch, hsch or csch, and the device's
+ * interrupt handler will be called with an irb containing ERR_PTR(-%ETIMEDOUT).
+ * The interruption handler will echo back the @intparm specified here, unless
+ * another interruption parameter is specified by a subsequent invocation of
+ * ccw_device_halt() or ccw_device_clear().
+ * Returns:
+ * %0, if the operation was successful;
+ * -%EBUSY, if the device is busy, or status pending;
+ * -%EACCES, if no path specified in @lpm is operational;
+ * -%ENODEV, if the device is not operational.
+ * Context:
+ * Interrupts disabled, ccw device lock held
+ */
+int ccw_device_start_timeout(struct ccw_device *cdev, struct ccw1 *cpa,
+ unsigned long intparm, __u8 lpm,
+ unsigned long flags, int expires)
+{
+ return ccw_device_start_timeout_key(cdev, cpa, intparm, lpm,
+ PAGE_DEFAULT_KEY, flags,
+ expires);
+}
+
+
+/**
+ * ccw_device_halt() - halt I/O request processing
+ * @cdev: target ccw device
+ * @intparm: interruption parameter to be returned upon conclusion of hsch
+ *
+ * ccw_device_halt() calls hsch on @cdev's subchannel.
+ * The interruption handler will echo back the @intparm specified here, unless
+ * another interruption parameter is specified by a subsequent invocation of
+ * ccw_device_clear().
+ * Returns:
+ * %0 on success,
+ * -%ENODEV on device not operational,
+ * -%EINVAL on invalid device state,
+ * -%EBUSY on device busy or interrupt pending.
+ * Context:
+ * Interrupts disabled, ccw device lock held
+ */
+int ccw_device_halt(struct ccw_device *cdev, unsigned long intparm)
+{
+ struct subchannel *sch;
+ int ret;
+
+ if (!cdev || !cdev->dev.parent)
+ return -ENODEV;
+ sch = to_subchannel(cdev->dev.parent);
+ if (!sch->schib.pmcw.ena)
+ return -EINVAL;
+ if (cdev->private->state == DEV_STATE_NOT_OPER)
+ return -ENODEV;
+ if (cdev->private->state != DEV_STATE_ONLINE &&
+ cdev->private->state != DEV_STATE_W4SENSE)
+ return -EINVAL;
+
+ ret = cio_halt(sch);
+ if (ret == 0)
+ cdev->private->intparm = intparm;
+ return ret;
+}
+
+/**
+ * ccw_device_resume() - resume channel program execution
+ * @cdev: target ccw device
+ *
+ * ccw_device_resume() calls rsch on @cdev's subchannel.
+ * Returns:
+ * %0 on success,
+ * -%ENODEV on device not operational,
+ * -%EINVAL on invalid device state,
+ * -%EBUSY on device busy or interrupt pending.
+ * Context:
+ * Interrupts disabled, ccw device lock held
+ */
+int ccw_device_resume(struct ccw_device *cdev)
+{
+ struct subchannel *sch;
+
+ if (!cdev || !cdev->dev.parent)
+ return -ENODEV;
+ sch = to_subchannel(cdev->dev.parent);
+ if (!sch->schib.pmcw.ena)
+ return -EINVAL;
+ if (cdev->private->state == DEV_STATE_NOT_OPER)
+ return -ENODEV;
+ if (cdev->private->state != DEV_STATE_ONLINE ||
+ !(sch->schib.scsw.cmd.actl & SCSW_ACTL_SUSPENDED))
+ return -EINVAL;
+ return cio_resume(sch);
+}
+
+/**
+ * ccw_device_get_ciw() - Search for CIW command in extended sense data.
+ * @cdev: ccw device to inspect
+ * @ct: command type to look for
+ *
+ * During SenseID, command information words (CIWs) describing special
+ * commands available to the device may have been stored in the extended
+ * sense data. This function searches for CIWs of a specified command
+ * type in the extended sense data.
+ * Returns:
+ * %NULL if no extended sense data has been stored or if no CIW of the
+ * specified command type could be found,
+ * else a pointer to the CIW of the specified command type.
+ */
+struct ciw *ccw_device_get_ciw(struct ccw_device *cdev, __u32 ct)
+{
+ int ciw_cnt;
+
+ if (cdev->private->flags.esid == 0)
+ return NULL;
+ for (ciw_cnt = 0; ciw_cnt < MAX_CIWS; ciw_cnt++)
+ if (cdev->private->dma_area->senseid.ciw[ciw_cnt].ct == ct)
+ return cdev->private->dma_area->senseid.ciw + ciw_cnt;
+ return NULL;
+}
+
+/**
+ * ccw_device_get_path_mask() - get currently available paths
+ * @cdev: ccw device to be queried
+ * Returns:
+ * %0 if no subchannel for the device is available,
+ * else the mask of currently available paths for the ccw device's subchannel.
+ */
+__u8 ccw_device_get_path_mask(struct ccw_device *cdev)
+{
+ struct subchannel *sch;
+
+ if (!cdev->dev.parent)
+ return 0;
+
+ sch = to_subchannel(cdev->dev.parent);
+ return sch->lpm;
+}
+
+/**
+ * ccw_device_get_chp_desc() - return newly allocated channel-path descriptor
+ * @cdev: device to obtain the descriptor for
+ * @chp_idx: index of the channel path
+ *
+ * On success return a newly allocated copy of the channel-path description
+ * data associated with the given channel path. Return %NULL on error.
+ */
+struct channel_path_desc_fmt0 *ccw_device_get_chp_desc(struct ccw_device *cdev,
+ int chp_idx)
+{
+ struct subchannel *sch;
+ struct chp_id chpid;
+
+ sch = to_subchannel(cdev->dev.parent);
+ chp_id_init(&chpid);
+ chpid.id = sch->schib.pmcw.chpid[chp_idx];
+ return chp_get_chp_desc(chpid);
+}
+
+/**
+ * ccw_device_get_util_str() - return newly allocated utility strings
+ * @cdev: device to obtain the utility strings for
+ * @chp_idx: index of the channel path
+ *
+ * On success return a newly allocated copy of the utility strings
+ * associated with the given channel path. Return %NULL on error.
+ */
+u8 *ccw_device_get_util_str(struct ccw_device *cdev, int chp_idx)
+{
+ struct subchannel *sch = to_subchannel(cdev->dev.parent);
+ struct channel_path *chp;
+ struct chp_id chpid;
+ u8 *util_str;
+
+ chp_id_init(&chpid);
+ chpid.id = sch->schib.pmcw.chpid[chp_idx];
+ chp = chpid_to_chp(chpid);
+
+ util_str = kmalloc(sizeof(chp->desc_fmt3.util_str), GFP_KERNEL);
+ if (!util_str)
+ return NULL;
+
+ mutex_lock(&chp->lock);
+ memcpy(util_str, chp->desc_fmt3.util_str, sizeof(chp->desc_fmt3.util_str));
+ mutex_unlock(&chp->lock);
+
+ return util_str;
+}
+
+/**
+ * ccw_device_get_id() - obtain a ccw device id
+ * @cdev: device to obtain the id for
+ * @dev_id: where to fill in the values
+ */
+void ccw_device_get_id(struct ccw_device *cdev, struct ccw_dev_id *dev_id)
+{
+ *dev_id = cdev->private->dev_id;
+}
+EXPORT_SYMBOL(ccw_device_get_id);
+
+/**
+ * ccw_device_tm_start_timeout_key() - perform start function
+ * @cdev: ccw device on which to perform the start function
+ * @tcw: transport-command word to be started
+ * @intparm: user defined parameter to be passed to the interrupt handler
+ * @lpm: mask of paths to use
+ * @key: storage key to use for storage access
+ * @expires: time span in jiffies after which to abort request
+ *
+ * Start the tcw on the given ccw device. Return zero on success, non-zero
+ * otherwise.
+ */
+int ccw_device_tm_start_timeout_key(struct ccw_device *cdev, struct tcw *tcw,
+ unsigned long intparm, u8 lpm, u8 key,
+ int expires)
+{
+ struct subchannel *sch;
+ int rc;
+
+ sch = to_subchannel(cdev->dev.parent);
+ if (!sch->schib.pmcw.ena)
+ return -EINVAL;
+ if (cdev->private->state == DEV_STATE_VERIFY) {
+ /* Remember to fake irb when finished. */
+ if (!cdev->private->flags.fake_irb) {
+ cdev->private->flags.fake_irb = FAKE_TM_IRB;
+ cdev->private->intparm = intparm;
+ return 0;
+ } else
+ /* There's already a fake I/O around. */
+ return -EBUSY;
+ }
+ if (cdev->private->state != DEV_STATE_ONLINE)
+ return -EIO;
+ /* Adjust requested path mask to exclude unusable paths. */
+ if (lpm) {
+ lpm &= sch->lpm;
+ if (lpm == 0)
+ return -EACCES;
+ }
+ rc = cio_tm_start_key(sch, tcw, lpm, key);
+ if (rc == 0) {
+ cdev->private->intparm = intparm;
+ if (expires)
+ ccw_device_set_timeout(cdev, expires);
+ }
+ return rc;
+}
+EXPORT_SYMBOL(ccw_device_tm_start_timeout_key);
+
+/**
+ * ccw_device_tm_start_key() - perform start function
+ * @cdev: ccw device on which to perform the start function
+ * @tcw: transport-command word to be started
+ * @intparm: user defined parameter to be passed to the interrupt handler
+ * @lpm: mask of paths to use
+ * @key: storage key to use for storage access
+ *
+ * Start the tcw on the given ccw device. Return zero on success, non-zero
+ * otherwise.
+ */
+int ccw_device_tm_start_key(struct ccw_device *cdev, struct tcw *tcw,
+ unsigned long intparm, u8 lpm, u8 key)
+{
+ return ccw_device_tm_start_timeout_key(cdev, tcw, intparm, lpm, key, 0);
+}
+EXPORT_SYMBOL(ccw_device_tm_start_key);
+
+/**
+ * ccw_device_tm_start() - perform start function
+ * @cdev: ccw device on which to perform the start function
+ * @tcw: transport-command word to be started
+ * @intparm: user defined parameter to be passed to the interrupt handler
+ * @lpm: mask of paths to use
+ *
+ * Start the tcw on the given ccw device. Return zero on success, non-zero
+ * otherwise.
+ */
+int ccw_device_tm_start(struct ccw_device *cdev, struct tcw *tcw,
+ unsigned long intparm, u8 lpm)
+{
+ return ccw_device_tm_start_key(cdev, tcw, intparm, lpm,
+ PAGE_DEFAULT_KEY);
+}
+EXPORT_SYMBOL(ccw_device_tm_start);
+
+/**
+ * ccw_device_tm_start_timeout() - perform start function
+ * @cdev: ccw device on which to perform the start function
+ * @tcw: transport-command word to be started
+ * @intparm: user defined parameter to be passed to the interrupt handler
+ * @lpm: mask of paths to use
+ * @expires: time span in jiffies after which to abort request
+ *
+ * Start the tcw on the given ccw device. Return zero on success, non-zero
+ * otherwise.
+ */
+int ccw_device_tm_start_timeout(struct ccw_device *cdev, struct tcw *tcw,
+ unsigned long intparm, u8 lpm, int expires)
+{
+ return ccw_device_tm_start_timeout_key(cdev, tcw, intparm, lpm,
+ PAGE_DEFAULT_KEY, expires);
+}
+EXPORT_SYMBOL(ccw_device_tm_start_timeout);
+
+/**
+ * ccw_device_get_mdc() - accumulate max data count
+ * @cdev: ccw device for which the max data count is accumulated
+ * @mask: mask of paths to use
+ *
+ * Return the number of 64K-bytes blocks all paths at least support
+ * for a transport command. Return value 0 indicates failure.
+ */
+int ccw_device_get_mdc(struct ccw_device *cdev, u8 mask)
+{
+ struct subchannel *sch = to_subchannel(cdev->dev.parent);
+ struct channel_path *chp;
+ struct chp_id chpid;
+ int mdc = 0, i;
+
+ /* Adjust requested path mask to excluded varied off paths. */
+ if (mask)
+ mask &= sch->lpm;
+ else
+ mask = sch->lpm;
+
+ chp_id_init(&chpid);
+ for (i = 0; i < 8; i++) {
+ if (!(mask & (0x80 >> i)))
+ continue;
+ chpid.id = sch->schib.pmcw.chpid[i];
+ chp = chpid_to_chp(chpid);
+ if (!chp)
+ continue;
+
+ mutex_lock(&chp->lock);
+ if (!chp->desc_fmt1.f) {
+ mutex_unlock(&chp->lock);
+ return 0;
+ }
+ if (!chp->desc_fmt1.r)
+ mdc = 1;
+ mdc = mdc ? min_t(int, mdc, chp->desc_fmt1.mdc) :
+ chp->desc_fmt1.mdc;
+ mutex_unlock(&chp->lock);
+ }
+
+ return mdc;
+}
+EXPORT_SYMBOL(ccw_device_get_mdc);
+
+/**
+ * ccw_device_tm_intrg() - perform interrogate function
+ * @cdev: ccw device on which to perform the interrogate function
+ *
+ * Perform an interrogate function on the given ccw device. Return zero on
+ * success, non-zero otherwise.
+ */
+int ccw_device_tm_intrg(struct ccw_device *cdev)
+{
+ struct subchannel *sch = to_subchannel(cdev->dev.parent);
+
+ if (!sch->schib.pmcw.ena)
+ return -EINVAL;
+ if (cdev->private->state != DEV_STATE_ONLINE)
+ return -EIO;
+ if (!scsw_is_tm(&sch->schib.scsw) ||
+ !(scsw_actl(&sch->schib.scsw) & SCSW_ACTL_START_PEND))
+ return -EINVAL;
+ return cio_tm_intrg(sch);
+}
+EXPORT_SYMBOL(ccw_device_tm_intrg);
+
+/**
+ * ccw_device_get_schid() - obtain a subchannel id
+ * @cdev: device to obtain the id for
+ * @schid: where to fill in the values
+ */
+void ccw_device_get_schid(struct ccw_device *cdev, struct subchannel_id *schid)
+{
+ struct subchannel *sch = to_subchannel(cdev->dev.parent);
+
+ *schid = sch->schid;
+}
+EXPORT_SYMBOL_GPL(ccw_device_get_schid);
+
+/**
+ * ccw_device_pnso() - Perform Network-Subchannel Operation
+ * @cdev: device on which PNSO is performed
+ * @pnso_area: request and response block for the operation
+ * @oc: Operation Code
+ * @resume_token: resume token for multiblock response
+ * @cnc: Boolean change-notification control
+ *
+ * pnso_area must be allocated by the caller with get_zeroed_page(GFP_KERNEL)
+ *
+ * Returns 0 on success.
+ */
+int ccw_device_pnso(struct ccw_device *cdev,
+ struct chsc_pnso_area *pnso_area, u8 oc,
+ struct chsc_pnso_resume_token resume_token, int cnc)
+{
+ struct subchannel_id schid;
+
+ ccw_device_get_schid(cdev, &schid);
+ return chsc_pnso(schid, pnso_area, oc, resume_token, cnc);
+}
+EXPORT_SYMBOL_GPL(ccw_device_pnso);
+
+/**
+ * ccw_device_get_cssid() - obtain Channel Subsystem ID
+ * @cdev: device to obtain the CSSID for
+ * @cssid: The resulting Channel Subsystem ID
+ */
+int ccw_device_get_cssid(struct ccw_device *cdev, u8 *cssid)
+{
+ struct device *sch_dev = cdev->dev.parent;
+ struct channel_subsystem *css = to_css(sch_dev->parent);
+
+ if (css->id_valid)
+ *cssid = css->cssid;
+ return css->id_valid ? 0 : -ENODEV;
+}
+EXPORT_SYMBOL_GPL(ccw_device_get_cssid);
+
+/**
+ * ccw_device_get_iid() - obtain MIF-image ID
+ * @cdev: device to obtain the MIF-image ID for
+ * @iid: The resulting MIF-image ID
+ */
+int ccw_device_get_iid(struct ccw_device *cdev, u8 *iid)
+{
+ struct device *sch_dev = cdev->dev.parent;
+ struct channel_subsystem *css = to_css(sch_dev->parent);
+
+ if (css->id_valid)
+ *iid = css->iid;
+ return css->id_valid ? 0 : -ENODEV;
+}
+EXPORT_SYMBOL_GPL(ccw_device_get_iid);
+
+/**
+ * ccw_device_get_chpid() - obtain Channel Path ID
+ * @cdev: device to obtain the Channel Path ID for
+ * @chp_idx: Index of the channel path
+ * @chpid: The resulting Channel Path ID
+ */
+int ccw_device_get_chpid(struct ccw_device *cdev, int chp_idx, u8 *chpid)
+{
+ struct subchannel *sch = to_subchannel(cdev->dev.parent);
+ int mask;
+
+ if ((chp_idx < 0) || (chp_idx > 7))
+ return -EINVAL;
+ mask = 0x80 >> chp_idx;
+ if (!(sch->schib.pmcw.pim & mask))
+ return -ENODEV;
+
+ *chpid = sch->schib.pmcw.chpid[chp_idx];
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ccw_device_get_chpid);
+
+/**
+ * ccw_device_get_chid() - obtain Channel ID associated with specified CHPID
+ * @cdev: device to obtain the Channel ID for
+ * @chp_idx: Index of the channel path
+ * @chid: The resulting Channel ID
+ */
+int ccw_device_get_chid(struct ccw_device *cdev, int chp_idx, u16 *chid)
+{
+ struct chp_id cssid_chpid;
+ struct channel_path *chp;
+ int rc;
+
+ chp_id_init(&cssid_chpid);
+ rc = ccw_device_get_chpid(cdev, chp_idx, &cssid_chpid.id);
+ if (rc)
+ return rc;
+ chp = chpid_to_chp(cssid_chpid);
+ if (!chp)
+ return -ENODEV;
+
+ mutex_lock(&chp->lock);
+ if (chp->desc_fmt1.flags & 0x10)
+ *chid = chp->desc_fmt1.chid;
+ else
+ rc = -ENODEV;
+ mutex_unlock(&chp->lock);
+
+ return rc;
+}
+EXPORT_SYMBOL_GPL(ccw_device_get_chid);
+
+/*
+ * Allocate zeroed dma coherent 31 bit addressable memory using
+ * the subchannels dma pool. Maximal size of allocation supported
+ * is PAGE_SIZE.
+ */
+void *ccw_device_dma_zalloc(struct ccw_device *cdev, size_t size)
+{
+ void *addr;
+
+ if (!get_device(&cdev->dev))
+ return NULL;
+ addr = cio_gp_dma_zalloc(cdev->private->dma_pool, &cdev->dev, size);
+ if (IS_ERR_OR_NULL(addr))
+ put_device(&cdev->dev);
+ return addr;
+}
+EXPORT_SYMBOL(ccw_device_dma_zalloc);
+
+void ccw_device_dma_free(struct ccw_device *cdev, void *cpu_addr, size_t size)
+{
+ if (!cpu_addr)
+ return;
+ cio_gp_dma_free(cdev->private->dma_pool, cpu_addr, size);
+ put_device(&cdev->dev);
+}
+EXPORT_SYMBOL(ccw_device_dma_free);
+
+EXPORT_SYMBOL(ccw_device_set_options_mask);
+EXPORT_SYMBOL(ccw_device_set_options);
+EXPORT_SYMBOL(ccw_device_clear_options);
+EXPORT_SYMBOL(ccw_device_clear);
+EXPORT_SYMBOL(ccw_device_halt);
+EXPORT_SYMBOL(ccw_device_resume);
+EXPORT_SYMBOL(ccw_device_start_timeout);
+EXPORT_SYMBOL(ccw_device_start);
+EXPORT_SYMBOL(ccw_device_start_timeout_key);
+EXPORT_SYMBOL(ccw_device_start_key);
+EXPORT_SYMBOL(ccw_device_get_ciw);
+EXPORT_SYMBOL(ccw_device_get_path_mask);
+EXPORT_SYMBOL_GPL(ccw_device_get_chp_desc);
+EXPORT_SYMBOL_GPL(ccw_device_get_util_str);
diff --git a/drivers/s390/cio/device_pgid.c b/drivers/s390/cio/device_pgid.c
new file mode 100644
index 000000000..767a85635
--- /dev/null
+++ b/drivers/s390/cio/device_pgid.c
@@ -0,0 +1,726 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * CCW device PGID and path verification I/O handling.
+ *
+ * Copyright IBM Corp. 2002, 2009
+ * Author(s): Cornelia Huck <cornelia.huck@de.ibm.com>
+ * Martin Schwidefsky <schwidefsky@de.ibm.com>
+ * Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
+ */
+
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/bitops.h>
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <asm/ccwdev.h>
+#include <asm/cio.h>
+
+#include "cio.h"
+#include "cio_debug.h"
+#include "device.h"
+#include "io_sch.h"
+
+#define PGID_RETRIES 256
+#define PGID_TIMEOUT (10 * HZ)
+
+static void verify_start(struct ccw_device *cdev);
+
+/*
+ * Process path verification data and report result.
+ */
+static void verify_done(struct ccw_device *cdev, int rc)
+{
+ struct subchannel *sch = to_subchannel(cdev->dev.parent);
+ struct ccw_dev_id *id = &cdev->private->dev_id;
+ int mpath = cdev->private->flags.mpath;
+ int pgroup = cdev->private->flags.pgroup;
+
+ if (rc)
+ goto out;
+ /* Ensure consistent multipathing state at device and channel. */
+ if (sch->config.mp != mpath) {
+ sch->config.mp = mpath;
+ rc = cio_commit_config(sch);
+ }
+out:
+ CIO_MSG_EVENT(2, "vrfy: device 0.%x.%04x: rc=%d pgroup=%d mpath=%d "
+ "vpm=%02x\n", id->ssid, id->devno, rc, pgroup, mpath,
+ sch->vpm);
+ ccw_device_verify_done(cdev, rc);
+}
+
+/*
+ * Create channel program to perform a NOOP.
+ */
+static void nop_build_cp(struct ccw_device *cdev)
+{
+ struct ccw_request *req = &cdev->private->req;
+ struct ccw1 *cp = cdev->private->dma_area->iccws;
+
+ cp->cmd_code = CCW_CMD_NOOP;
+ cp->cda = 0;
+ cp->count = 0;
+ cp->flags = CCW_FLAG_SLI;
+ req->cp = cp;
+}
+
+/*
+ * Perform NOOP on a single path.
+ */
+static void nop_do(struct ccw_device *cdev)
+{
+ struct subchannel *sch = to_subchannel(cdev->dev.parent);
+ struct ccw_request *req = &cdev->private->req;
+
+ req->lpm = lpm_adjust(req->lpm, sch->schib.pmcw.pam & sch->opm &
+ ~cdev->private->path_noirq_mask);
+ if (!req->lpm)
+ goto out_nopath;
+ nop_build_cp(cdev);
+ ccw_request_start(cdev);
+ return;
+
+out_nopath:
+ verify_done(cdev, sch->vpm ? 0 : -EACCES);
+}
+
+/*
+ * Adjust NOOP I/O status.
+ */
+static enum io_status nop_filter(struct ccw_device *cdev, void *data,
+ struct irb *irb, enum io_status status)
+{
+ /* Only subchannel status might indicate a path error. */
+ if (status == IO_STATUS_ERROR && irb->scsw.cmd.cstat == 0)
+ return IO_DONE;
+ return status;
+}
+
+/*
+ * Process NOOP request result for a single path.
+ */
+static void nop_callback(struct ccw_device *cdev, void *data, int rc)
+{
+ struct subchannel *sch = to_subchannel(cdev->dev.parent);
+ struct ccw_request *req = &cdev->private->req;
+
+ switch (rc) {
+ case 0:
+ sch->vpm |= req->lpm;
+ break;
+ case -ETIME:
+ cdev->private->path_noirq_mask |= req->lpm;
+ break;
+ case -EACCES:
+ cdev->private->path_notoper_mask |= req->lpm;
+ break;
+ default:
+ goto err;
+ }
+ /* Continue on the next path. */
+ req->lpm >>= 1;
+ nop_do(cdev);
+ return;
+
+err:
+ verify_done(cdev, rc);
+}
+
+/*
+ * Create channel program to perform SET PGID on a single path.
+ */
+static void spid_build_cp(struct ccw_device *cdev, u8 fn)
+{
+ struct ccw_request *req = &cdev->private->req;
+ struct ccw1 *cp = cdev->private->dma_area->iccws;
+ int i = pathmask_to_pos(req->lpm);
+ struct pgid *pgid = &cdev->private->dma_area->pgid[i];
+
+ pgid->inf.fc = fn;
+ cp->cmd_code = CCW_CMD_SET_PGID;
+ cp->cda = (u32) (addr_t) pgid;
+ cp->count = sizeof(*pgid);
+ cp->flags = CCW_FLAG_SLI;
+ req->cp = cp;
+}
+
+static void pgid_wipeout_callback(struct ccw_device *cdev, void *data, int rc)
+{
+ if (rc) {
+ /* We don't know the path groups' state. Abort. */
+ verify_done(cdev, rc);
+ return;
+ }
+ /*
+ * Path groups have been reset. Restart path verification but
+ * leave paths in path_noirq_mask out.
+ */
+ cdev->private->flags.pgid_unknown = 0;
+ verify_start(cdev);
+}
+
+/*
+ * Reset pathgroups and restart path verification, leave unusable paths out.
+ */
+static void pgid_wipeout_start(struct ccw_device *cdev)
+{
+ struct subchannel *sch = to_subchannel(cdev->dev.parent);
+ struct ccw_dev_id *id = &cdev->private->dev_id;
+ struct ccw_request *req = &cdev->private->req;
+ u8 fn;
+
+ CIO_MSG_EVENT(2, "wipe: device 0.%x.%04x: pvm=%02x nim=%02x\n",
+ id->ssid, id->devno, cdev->private->pgid_valid_mask,
+ cdev->private->path_noirq_mask);
+
+ /* Initialize request data. */
+ memset(req, 0, sizeof(*req));
+ req->timeout = PGID_TIMEOUT;
+ req->maxretries = PGID_RETRIES;
+ req->lpm = sch->schib.pmcw.pam;
+ req->callback = pgid_wipeout_callback;
+ fn = SPID_FUNC_DISBAND;
+ if (cdev->private->flags.mpath)
+ fn |= SPID_FUNC_MULTI_PATH;
+ spid_build_cp(cdev, fn);
+ ccw_request_start(cdev);
+}
+
+/*
+ * Perform establish/resign SET PGID on a single path.
+ */
+static void spid_do(struct ccw_device *cdev)
+{
+ struct subchannel *sch = to_subchannel(cdev->dev.parent);
+ struct ccw_request *req = &cdev->private->req;
+ u8 fn;
+
+ /* Use next available path that is not already in correct state. */
+ req->lpm = lpm_adjust(req->lpm, cdev->private->pgid_todo_mask);
+ if (!req->lpm)
+ goto out_nopath;
+ /* Channel program setup. */
+ if (req->lpm & sch->opm)
+ fn = SPID_FUNC_ESTABLISH;
+ else
+ fn = SPID_FUNC_RESIGN;
+ if (cdev->private->flags.mpath)
+ fn |= SPID_FUNC_MULTI_PATH;
+ spid_build_cp(cdev, fn);
+ ccw_request_start(cdev);
+ return;
+
+out_nopath:
+ if (cdev->private->flags.pgid_unknown) {
+ /* At least one SPID could be partially done. */
+ pgid_wipeout_start(cdev);
+ return;
+ }
+ verify_done(cdev, sch->vpm ? 0 : -EACCES);
+}
+
+/*
+ * Process SET PGID request result for a single path.
+ */
+static void spid_callback(struct ccw_device *cdev, void *data, int rc)
+{
+ struct subchannel *sch = to_subchannel(cdev->dev.parent);
+ struct ccw_request *req = &cdev->private->req;
+
+ switch (rc) {
+ case 0:
+ sch->vpm |= req->lpm & sch->opm;
+ break;
+ case -ETIME:
+ cdev->private->flags.pgid_unknown = 1;
+ cdev->private->path_noirq_mask |= req->lpm;
+ break;
+ case -EACCES:
+ cdev->private->path_notoper_mask |= req->lpm;
+ break;
+ case -EOPNOTSUPP:
+ if (cdev->private->flags.mpath) {
+ /* Try without multipathing. */
+ cdev->private->flags.mpath = 0;
+ goto out_restart;
+ }
+ /* Try without pathgrouping. */
+ cdev->private->flags.pgroup = 0;
+ goto out_restart;
+ default:
+ goto err;
+ }
+ req->lpm >>= 1;
+ spid_do(cdev);
+ return;
+
+out_restart:
+ verify_start(cdev);
+ return;
+err:
+ verify_done(cdev, rc);
+}
+
+static void spid_start(struct ccw_device *cdev)
+{
+ struct ccw_request *req = &cdev->private->req;
+
+ /* Initialize request data. */
+ memset(req, 0, sizeof(*req));
+ req->timeout = PGID_TIMEOUT;
+ req->maxretries = PGID_RETRIES;
+ req->lpm = 0x80;
+ req->singlepath = 1;
+ req->callback = spid_callback;
+ spid_do(cdev);
+}
+
+static int pgid_is_reset(struct pgid *p)
+{
+ char *c;
+
+ for (c = (char *)p + 1; c < (char *)(p + 1); c++) {
+ if (*c != 0)
+ return 0;
+ }
+ return 1;
+}
+
+static int pgid_cmp(struct pgid *p1, struct pgid *p2)
+{
+ return memcmp((char *) p1 + 1, (char *) p2 + 1,
+ sizeof(struct pgid) - 1);
+}
+
+/*
+ * Determine pathgroup state from PGID data.
+ */
+static void pgid_analyze(struct ccw_device *cdev, struct pgid **p,
+ int *mismatch, u8 *reserved, u8 *reset)
+{
+ struct pgid *pgid = &cdev->private->dma_area->pgid[0];
+ struct pgid *first = NULL;
+ int lpm;
+ int i;
+
+ *mismatch = 0;
+ *reserved = 0;
+ *reset = 0;
+ for (i = 0, lpm = 0x80; i < 8; i++, pgid++, lpm >>= 1) {
+ if ((cdev->private->pgid_valid_mask & lpm) == 0)
+ continue;
+ if (pgid->inf.ps.state2 == SNID_STATE2_RESVD_ELSE)
+ *reserved |= lpm;
+ if (pgid_is_reset(pgid)) {
+ *reset |= lpm;
+ continue;
+ }
+ if (!first) {
+ first = pgid;
+ continue;
+ }
+ if (pgid_cmp(pgid, first) != 0)
+ *mismatch = 1;
+ }
+ if (!first)
+ first = &channel_subsystems[0]->global_pgid;
+ *p = first;
+}
+
+static u8 pgid_to_donepm(struct ccw_device *cdev)
+{
+ struct subchannel *sch = to_subchannel(cdev->dev.parent);
+ struct pgid *pgid;
+ int i;
+ int lpm;
+ u8 donepm = 0;
+
+ /* Set bits for paths which are already in the target state. */
+ for (i = 0; i < 8; i++) {
+ lpm = 0x80 >> i;
+ if ((cdev->private->pgid_valid_mask & lpm) == 0)
+ continue;
+ pgid = &cdev->private->dma_area->pgid[i];
+ if (sch->opm & lpm) {
+ if (pgid->inf.ps.state1 != SNID_STATE1_GROUPED)
+ continue;
+ } else {
+ if (pgid->inf.ps.state1 != SNID_STATE1_UNGROUPED)
+ continue;
+ }
+ if (cdev->private->flags.mpath) {
+ if (pgid->inf.ps.state3 != SNID_STATE3_MULTI_PATH)
+ continue;
+ } else {
+ if (pgid->inf.ps.state3 != SNID_STATE3_SINGLE_PATH)
+ continue;
+ }
+ donepm |= lpm;
+ }
+
+ return donepm;
+}
+
+static void pgid_fill(struct ccw_device *cdev, struct pgid *pgid)
+{
+ int i;
+
+ for (i = 0; i < 8; i++)
+ memcpy(&cdev->private->dma_area->pgid[i], pgid,
+ sizeof(struct pgid));
+}
+
+/*
+ * Process SENSE PGID data and report result.
+ */
+static void snid_done(struct ccw_device *cdev, int rc)
+{
+ struct ccw_dev_id *id = &cdev->private->dev_id;
+ struct subchannel *sch = to_subchannel(cdev->dev.parent);
+ struct pgid *pgid;
+ int mismatch = 0;
+ u8 reserved = 0;
+ u8 reset = 0;
+ u8 donepm;
+
+ if (rc)
+ goto out;
+ pgid_analyze(cdev, &pgid, &mismatch, &reserved, &reset);
+ if (reserved == cdev->private->pgid_valid_mask)
+ rc = -EUSERS;
+ else if (mismatch)
+ rc = -EOPNOTSUPP;
+ else {
+ donepm = pgid_to_donepm(cdev);
+ sch->vpm = donepm & sch->opm;
+ cdev->private->pgid_reset_mask |= reset;
+ cdev->private->pgid_todo_mask &=
+ ~(donepm | cdev->private->path_noirq_mask);
+ pgid_fill(cdev, pgid);
+ }
+out:
+ CIO_MSG_EVENT(2, "snid: device 0.%x.%04x: rc=%d pvm=%02x vpm=%02x "
+ "todo=%02x mism=%d rsvd=%02x reset=%02x\n", id->ssid,
+ id->devno, rc, cdev->private->pgid_valid_mask, sch->vpm,
+ cdev->private->pgid_todo_mask, mismatch, reserved, reset);
+ switch (rc) {
+ case 0:
+ if (cdev->private->flags.pgid_unknown) {
+ pgid_wipeout_start(cdev);
+ return;
+ }
+ /* Anything left to do? */
+ if (cdev->private->pgid_todo_mask == 0) {
+ verify_done(cdev, sch->vpm == 0 ? -EACCES : 0);
+ return;
+ }
+ /* Perform path-grouping. */
+ spid_start(cdev);
+ break;
+ case -EOPNOTSUPP:
+ /* Path-grouping not supported. */
+ cdev->private->flags.pgroup = 0;
+ cdev->private->flags.mpath = 0;
+ verify_start(cdev);
+ break;
+ default:
+ verify_done(cdev, rc);
+ }
+}
+
+/*
+ * Create channel program to perform a SENSE PGID on a single path.
+ */
+static void snid_build_cp(struct ccw_device *cdev)
+{
+ struct ccw_request *req = &cdev->private->req;
+ struct ccw1 *cp = cdev->private->dma_area->iccws;
+ int i = pathmask_to_pos(req->lpm);
+
+ /* Channel program setup. */
+ cp->cmd_code = CCW_CMD_SENSE_PGID;
+ cp->cda = (u32) (addr_t) &cdev->private->dma_area->pgid[i];
+ cp->count = sizeof(struct pgid);
+ cp->flags = CCW_FLAG_SLI;
+ req->cp = cp;
+}
+
+/*
+ * Perform SENSE PGID on a single path.
+ */
+static void snid_do(struct ccw_device *cdev)
+{
+ struct subchannel *sch = to_subchannel(cdev->dev.parent);
+ struct ccw_request *req = &cdev->private->req;
+ int ret;
+
+ req->lpm = lpm_adjust(req->lpm, sch->schib.pmcw.pam &
+ ~cdev->private->path_noirq_mask);
+ if (!req->lpm)
+ goto out_nopath;
+ snid_build_cp(cdev);
+ ccw_request_start(cdev);
+ return;
+
+out_nopath:
+ if (cdev->private->pgid_valid_mask)
+ ret = 0;
+ else if (cdev->private->path_noirq_mask)
+ ret = -ETIME;
+ else
+ ret = -EACCES;
+ snid_done(cdev, ret);
+}
+
+/*
+ * Process SENSE PGID request result for single path.
+ */
+static void snid_callback(struct ccw_device *cdev, void *data, int rc)
+{
+ struct ccw_request *req = &cdev->private->req;
+
+ switch (rc) {
+ case 0:
+ cdev->private->pgid_valid_mask |= req->lpm;
+ break;
+ case -ETIME:
+ cdev->private->flags.pgid_unknown = 1;
+ cdev->private->path_noirq_mask |= req->lpm;
+ break;
+ case -EACCES:
+ cdev->private->path_notoper_mask |= req->lpm;
+ break;
+ default:
+ goto err;
+ }
+ /* Continue on the next path. */
+ req->lpm >>= 1;
+ snid_do(cdev);
+ return;
+
+err:
+ snid_done(cdev, rc);
+}
+
+/*
+ * Perform path verification.
+ */
+static void verify_start(struct ccw_device *cdev)
+{
+ struct subchannel *sch = to_subchannel(cdev->dev.parent);
+ struct ccw_request *req = &cdev->private->req;
+ struct ccw_dev_id *devid = &cdev->private->dev_id;
+
+ sch->vpm = 0;
+ sch->lpm = sch->schib.pmcw.pam;
+
+ /* Initialize PGID data. */
+ memset(cdev->private->dma_area->pgid, 0,
+ sizeof(cdev->private->dma_area->pgid));
+ cdev->private->pgid_valid_mask = 0;
+ cdev->private->pgid_todo_mask = sch->schib.pmcw.pam;
+ cdev->private->path_notoper_mask = 0;
+
+ /* Initialize request data. */
+ memset(req, 0, sizeof(*req));
+ req->timeout = PGID_TIMEOUT;
+ req->maxretries = PGID_RETRIES;
+ req->lpm = 0x80;
+ req->singlepath = 1;
+ if (cdev->private->flags.pgroup) {
+ CIO_TRACE_EVENT(4, "snid");
+ CIO_HEX_EVENT(4, devid, sizeof(*devid));
+ req->callback = snid_callback;
+ snid_do(cdev);
+ } else {
+ CIO_TRACE_EVENT(4, "nop");
+ CIO_HEX_EVENT(4, devid, sizeof(*devid));
+ req->filter = nop_filter;
+ req->callback = nop_callback;
+ nop_do(cdev);
+ }
+}
+
+/**
+ * ccw_device_verify_start - perform path verification
+ * @cdev: ccw device
+ *
+ * Perform an I/O on each available channel path to @cdev to determine which
+ * paths are operational. The resulting path mask is stored in sch->vpm.
+ * If device options specify pathgrouping, establish a pathgroup for the
+ * operational paths. When finished, call ccw_device_verify_done with a
+ * return code specifying the result.
+ */
+void ccw_device_verify_start(struct ccw_device *cdev)
+{
+ CIO_TRACE_EVENT(4, "vrfy");
+ CIO_HEX_EVENT(4, &cdev->private->dev_id, sizeof(cdev->private->dev_id));
+ /*
+ * Initialize pathgroup and multipath state with target values.
+ * They may change in the course of path verification.
+ */
+ cdev->private->flags.pgroup = cdev->private->options.pgroup;
+ cdev->private->flags.mpath = cdev->private->options.mpath;
+ cdev->private->flags.doverify = 0;
+ cdev->private->path_noirq_mask = 0;
+ verify_start(cdev);
+}
+
+/*
+ * Process disband SET PGID request result.
+ */
+static void disband_callback(struct ccw_device *cdev, void *data, int rc)
+{
+ struct subchannel *sch = to_subchannel(cdev->dev.parent);
+ struct ccw_dev_id *id = &cdev->private->dev_id;
+
+ if (rc)
+ goto out;
+ /* Ensure consistent multipathing state at device and channel. */
+ cdev->private->flags.mpath = 0;
+ if (sch->config.mp) {
+ sch->config.mp = 0;
+ rc = cio_commit_config(sch);
+ }
+out:
+ CIO_MSG_EVENT(0, "disb: device 0.%x.%04x: rc=%d\n", id->ssid, id->devno,
+ rc);
+ ccw_device_disband_done(cdev, rc);
+}
+
+/**
+ * ccw_device_disband_start - disband pathgroup
+ * @cdev: ccw device
+ *
+ * Execute a SET PGID channel program on @cdev to disband a previously
+ * established pathgroup. When finished, call ccw_device_disband_done with
+ * a return code specifying the result.
+ */
+void ccw_device_disband_start(struct ccw_device *cdev)
+{
+ struct subchannel *sch = to_subchannel(cdev->dev.parent);
+ struct ccw_request *req = &cdev->private->req;
+ u8 fn;
+
+ CIO_TRACE_EVENT(4, "disb");
+ CIO_HEX_EVENT(4, &cdev->private->dev_id, sizeof(cdev->private->dev_id));
+ /* Request setup. */
+ memset(req, 0, sizeof(*req));
+ req->timeout = PGID_TIMEOUT;
+ req->maxretries = PGID_RETRIES;
+ req->lpm = sch->schib.pmcw.pam & sch->opm;
+ req->singlepath = 1;
+ req->callback = disband_callback;
+ fn = SPID_FUNC_DISBAND;
+ if (cdev->private->flags.mpath)
+ fn |= SPID_FUNC_MULTI_PATH;
+ spid_build_cp(cdev, fn);
+ ccw_request_start(cdev);
+}
+
+struct stlck_data {
+ struct completion done;
+ int rc;
+};
+
+static void stlck_build_cp(struct ccw_device *cdev, void *buf1, void *buf2)
+{
+ struct ccw_request *req = &cdev->private->req;
+ struct ccw1 *cp = cdev->private->dma_area->iccws;
+
+ cp[0].cmd_code = CCW_CMD_STLCK;
+ cp[0].cda = (u32) (addr_t) buf1;
+ cp[0].count = 32;
+ cp[0].flags = CCW_FLAG_CC;
+ cp[1].cmd_code = CCW_CMD_RELEASE;
+ cp[1].cda = (u32) (addr_t) buf2;
+ cp[1].count = 32;
+ cp[1].flags = 0;
+ req->cp = cp;
+}
+
+static void stlck_callback(struct ccw_device *cdev, void *data, int rc)
+{
+ struct stlck_data *sdata = data;
+
+ sdata->rc = rc;
+ complete(&sdata->done);
+}
+
+/**
+ * ccw_device_stlck_start - perform unconditional release
+ * @cdev: ccw device
+ * @data: data pointer to be passed to ccw_device_stlck_done
+ * @buf1: data pointer used in channel program
+ * @buf2: data pointer used in channel program
+ *
+ * Execute a channel program on @cdev to release an existing PGID reservation.
+ */
+static void ccw_device_stlck_start(struct ccw_device *cdev, void *data,
+ void *buf1, void *buf2)
+{
+ struct subchannel *sch = to_subchannel(cdev->dev.parent);
+ struct ccw_request *req = &cdev->private->req;
+
+ CIO_TRACE_EVENT(4, "stlck");
+ CIO_HEX_EVENT(4, &cdev->private->dev_id, sizeof(cdev->private->dev_id));
+ /* Request setup. */
+ memset(req, 0, sizeof(*req));
+ req->timeout = PGID_TIMEOUT;
+ req->maxretries = PGID_RETRIES;
+ req->lpm = sch->schib.pmcw.pam & sch->opm;
+ req->data = data;
+ req->callback = stlck_callback;
+ stlck_build_cp(cdev, buf1, buf2);
+ ccw_request_start(cdev);
+}
+
+/*
+ * Perform unconditional reserve + release.
+ */
+int ccw_device_stlck(struct ccw_device *cdev)
+{
+ struct subchannel *sch = to_subchannel(cdev->dev.parent);
+ struct stlck_data data;
+ u8 *buffer;
+ int rc;
+
+ /* Check if steal lock operation is valid for this device. */
+ if (cdev->drv) {
+ if (!cdev->private->options.force)
+ return -EINVAL;
+ }
+ buffer = kzalloc(64, GFP_DMA | GFP_KERNEL);
+ if (!buffer)
+ return -ENOMEM;
+ init_completion(&data.done);
+ data.rc = -EIO;
+ spin_lock_irq(sch->lock);
+ rc = cio_enable_subchannel(sch, (u32) (addr_t) sch);
+ if (rc)
+ goto out_unlock;
+ /* Perform operation. */
+ cdev->private->state = DEV_STATE_STEAL_LOCK;
+ ccw_device_stlck_start(cdev, &data, &buffer[0], &buffer[32]);
+ spin_unlock_irq(sch->lock);
+ /* Wait for operation to finish. */
+ if (wait_for_completion_interruptible(&data.done)) {
+ /* Got a signal. */
+ spin_lock_irq(sch->lock);
+ ccw_request_cancel(cdev);
+ spin_unlock_irq(sch->lock);
+ wait_for_completion(&data.done);
+ }
+ rc = data.rc;
+ /* Check results. */
+ spin_lock_irq(sch->lock);
+ cio_disable_subchannel(sch);
+ cdev->private->state = DEV_STATE_BOXED;
+out_unlock:
+ spin_unlock_irq(sch->lock);
+ kfree(buffer);
+
+ return rc;
+}
diff --git a/drivers/s390/cio/device_status.c b/drivers/s390/cio/device_status.c
new file mode 100644
index 000000000..0bd8f2642
--- /dev/null
+++ b/drivers/s390/cio/device_status.c
@@ -0,0 +1,399 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright IBM Corp. 2002
+ * Author(s): Cornelia Huck (cornelia.huck@de.ibm.com)
+ * Martin Schwidefsky (schwidefsky@de.ibm.com)
+ *
+ * Status accumulation and basic sense functions.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+
+#include <asm/ccwdev.h>
+#include <asm/cio.h>
+
+#include "cio.h"
+#include "cio_debug.h"
+#include "css.h"
+#include "device.h"
+#include "ioasm.h"
+#include "io_sch.h"
+
+/*
+ * Check for any kind of channel or interface control check but don't
+ * issue the message for the console device
+ */
+static void
+ccw_device_msg_control_check(struct ccw_device *cdev, struct irb *irb)
+{
+ struct subchannel *sch = to_subchannel(cdev->dev.parent);
+ char dbf_text[15];
+
+ if (!scsw_is_valid_cstat(&irb->scsw) ||
+ !(scsw_cstat(&irb->scsw) & (SCHN_STAT_CHN_DATA_CHK |
+ SCHN_STAT_CHN_CTRL_CHK | SCHN_STAT_INTF_CTRL_CHK)))
+ return;
+ CIO_MSG_EVENT(0, "Channel-Check or Interface-Control-Check "
+ "received"
+ " ... device %04x on subchannel 0.%x.%04x, dev_stat "
+ ": %02X sch_stat : %02X\n",
+ cdev->private->dev_id.devno, sch->schid.ssid,
+ sch->schid.sch_no,
+ scsw_dstat(&irb->scsw), scsw_cstat(&irb->scsw));
+ sprintf(dbf_text, "chk%x", sch->schid.sch_no);
+ CIO_TRACE_EVENT(0, dbf_text);
+ CIO_HEX_EVENT(0, irb, sizeof(struct irb));
+}
+
+/*
+ * Some paths became not operational (pno bit in scsw is set).
+ */
+static void
+ccw_device_path_notoper(struct ccw_device *cdev)
+{
+ struct subchannel *sch;
+
+ sch = to_subchannel(cdev->dev.parent);
+ if (cio_update_schib(sch))
+ goto doverify;
+
+ CIO_MSG_EVENT(0, "%s(0.%x.%04x) - path(s) %02x are "
+ "not operational \n", __func__,
+ sch->schid.ssid, sch->schid.sch_no,
+ sch->schib.pmcw.pnom);
+
+ sch->lpm &= ~sch->schib.pmcw.pnom;
+doverify:
+ cdev->private->flags.doverify = 1;
+}
+
+/*
+ * Copy valid bits from the extended control word to device irb.
+ */
+static void
+ccw_device_accumulate_ecw(struct ccw_device *cdev, struct irb *irb)
+{
+ /*
+ * Copy extended control bit if it is valid... yes there
+ * are condition that have to be met for the extended control
+ * bit to have meaning. Sick.
+ */
+ cdev->private->dma_area->irb.scsw.cmd.ectl = 0;
+ if ((irb->scsw.cmd.stctl & SCSW_STCTL_ALERT_STATUS) &&
+ !(irb->scsw.cmd.stctl & SCSW_STCTL_INTER_STATUS))
+ cdev->private->dma_area->irb.scsw.cmd.ectl = irb->scsw.cmd.ectl;
+ /* Check if extended control word is valid. */
+ if (!cdev->private->dma_area->irb.scsw.cmd.ectl)
+ return;
+ /* Copy concurrent sense / model dependent information. */
+ memcpy(&cdev->private->dma_area->irb.ecw, irb->ecw, sizeof(irb->ecw));
+}
+
+/*
+ * Check if extended status word is valid.
+ */
+static int
+ccw_device_accumulate_esw_valid(struct irb *irb)
+{
+ if (!irb->scsw.cmd.eswf &&
+ (irb->scsw.cmd.stctl == SCSW_STCTL_STATUS_PEND))
+ return 0;
+ if (irb->scsw.cmd.stctl ==
+ (SCSW_STCTL_INTER_STATUS|SCSW_STCTL_STATUS_PEND) &&
+ !(irb->scsw.cmd.actl & SCSW_ACTL_SUSPENDED))
+ return 0;
+ return 1;
+}
+
+/*
+ * Copy valid bits from the extended status word to device irb.
+ */
+static void
+ccw_device_accumulate_esw(struct ccw_device *cdev, struct irb *irb)
+{
+ struct irb *cdev_irb;
+ struct sublog *cdev_sublog, *sublog;
+
+ if (!ccw_device_accumulate_esw_valid(irb))
+ return;
+
+ cdev_irb = &cdev->private->dma_area->irb;
+
+ /* Copy last path used mask. */
+ cdev_irb->esw.esw1.lpum = irb->esw.esw1.lpum;
+
+ /* Copy subchannel logout information if esw is of format 0. */
+ if (irb->scsw.cmd.eswf) {
+ cdev_sublog = &cdev_irb->esw.esw0.sublog;
+ sublog = &irb->esw.esw0.sublog;
+ /* Copy extended status flags. */
+ cdev_sublog->esf = sublog->esf;
+ /*
+ * Copy fields that have a meaning for channel data check
+ * channel control check and interface control check.
+ */
+ if (irb->scsw.cmd.cstat & (SCHN_STAT_CHN_DATA_CHK |
+ SCHN_STAT_CHN_CTRL_CHK |
+ SCHN_STAT_INTF_CTRL_CHK)) {
+ /* Copy ancillary report bit. */
+ cdev_sublog->arep = sublog->arep;
+ /* Copy field-validity-flags. */
+ cdev_sublog->fvf = sublog->fvf;
+ /* Copy storage access code. */
+ cdev_sublog->sacc = sublog->sacc;
+ /* Copy termination code. */
+ cdev_sublog->termc = sublog->termc;
+ /* Copy sequence code. */
+ cdev_sublog->seqc = sublog->seqc;
+ }
+ /* Copy device status check. */
+ cdev_sublog->devsc = sublog->devsc;
+ /* Copy secondary error. */
+ cdev_sublog->serr = sublog->serr;
+ /* Copy i/o-error alert. */
+ cdev_sublog->ioerr = sublog->ioerr;
+ /* Copy channel path timeout bit. */
+ if (irb->scsw.cmd.cstat & SCHN_STAT_INTF_CTRL_CHK)
+ cdev_irb->esw.esw0.erw.cpt = irb->esw.esw0.erw.cpt;
+ /* Copy failing storage address validity flag. */
+ cdev_irb->esw.esw0.erw.fsavf = irb->esw.esw0.erw.fsavf;
+ if (cdev_irb->esw.esw0.erw.fsavf) {
+ /* ... and copy the failing storage address. */
+ memcpy(cdev_irb->esw.esw0.faddr, irb->esw.esw0.faddr,
+ sizeof (irb->esw.esw0.faddr));
+ /* ... and copy the failing storage address format. */
+ cdev_irb->esw.esw0.erw.fsaf = irb->esw.esw0.erw.fsaf;
+ }
+ /* Copy secondary ccw address validity bit. */
+ cdev_irb->esw.esw0.erw.scavf = irb->esw.esw0.erw.scavf;
+ if (irb->esw.esw0.erw.scavf)
+ /* ... and copy the secondary ccw address. */
+ cdev_irb->esw.esw0.saddr = irb->esw.esw0.saddr;
+
+ }
+ /* FIXME: DCTI for format 2? */
+
+ /* Copy authorization bit. */
+ cdev_irb->esw.esw0.erw.auth = irb->esw.esw0.erw.auth;
+ /* Copy path verification required flag. */
+ cdev_irb->esw.esw0.erw.pvrf = irb->esw.esw0.erw.pvrf;
+ if (irb->esw.esw0.erw.pvrf)
+ cdev->private->flags.doverify = 1;
+ /* Copy concurrent sense bit. */
+ cdev_irb->esw.esw0.erw.cons = irb->esw.esw0.erw.cons;
+ if (irb->esw.esw0.erw.cons)
+ cdev_irb->esw.esw0.erw.scnt = irb->esw.esw0.erw.scnt;
+}
+
+/*
+ * Accumulate status from irb to devstat.
+ */
+void
+ccw_device_accumulate_irb(struct ccw_device *cdev, struct irb *irb)
+{
+ struct irb *cdev_irb;
+
+ /*
+ * Check if the status pending bit is set in stctl.
+ * If not, the remaining bit have no meaning and we must ignore them.
+ * The esw is not meaningful as well...
+ */
+ if (!(scsw_stctl(&irb->scsw) & SCSW_STCTL_STATUS_PEND))
+ return;
+
+ /* Check for channel checks and interface control checks. */
+ ccw_device_msg_control_check(cdev, irb);
+
+ /* Check for path not operational. */
+ if (scsw_is_valid_pno(&irb->scsw) && scsw_pno(&irb->scsw))
+ ccw_device_path_notoper(cdev);
+ /* No irb accumulation for transport mode irbs. */
+ if (scsw_is_tm(&irb->scsw)) {
+ memcpy(&cdev->private->dma_area->irb, irb, sizeof(struct irb));
+ return;
+ }
+ /*
+ * Don't accumulate unsolicited interrupts.
+ */
+ if (!scsw_is_solicited(&irb->scsw))
+ return;
+
+ cdev_irb = &cdev->private->dma_area->irb;
+
+ /*
+ * If the clear function had been performed, all formerly pending
+ * status at the subchannel has been cleared and we must not pass
+ * intermediate accumulated status to the device driver.
+ */
+ if (irb->scsw.cmd.fctl & SCSW_FCTL_CLEAR_FUNC)
+ memset(&cdev->private->dma_area->irb, 0, sizeof(struct irb));
+
+ /* Copy bits which are valid only for the start function. */
+ if (irb->scsw.cmd.fctl & SCSW_FCTL_START_FUNC) {
+ /* Copy key. */
+ cdev_irb->scsw.cmd.key = irb->scsw.cmd.key;
+ /* Copy suspend control bit. */
+ cdev_irb->scsw.cmd.sctl = irb->scsw.cmd.sctl;
+ /* Accumulate deferred condition code. */
+ cdev_irb->scsw.cmd.cc |= irb->scsw.cmd.cc;
+ /* Copy ccw format bit. */
+ cdev_irb->scsw.cmd.fmt = irb->scsw.cmd.fmt;
+ /* Copy prefetch bit. */
+ cdev_irb->scsw.cmd.pfch = irb->scsw.cmd.pfch;
+ /* Copy initial-status-interruption-control. */
+ cdev_irb->scsw.cmd.isic = irb->scsw.cmd.isic;
+ /* Copy address limit checking control. */
+ cdev_irb->scsw.cmd.alcc = irb->scsw.cmd.alcc;
+ /* Copy suppress suspend bit. */
+ cdev_irb->scsw.cmd.ssi = irb->scsw.cmd.ssi;
+ }
+
+ /* Take care of the extended control bit and extended control word. */
+ ccw_device_accumulate_ecw(cdev, irb);
+
+ /* Accumulate function control. */
+ cdev_irb->scsw.cmd.fctl |= irb->scsw.cmd.fctl;
+ /* Copy activity control. */
+ cdev_irb->scsw.cmd.actl = irb->scsw.cmd.actl;
+ /* Accumulate status control. */
+ cdev_irb->scsw.cmd.stctl |= irb->scsw.cmd.stctl;
+ /*
+ * Copy ccw address if it is valid. This is a bit simplified
+ * but should be close enough for all practical purposes.
+ */
+ if ((irb->scsw.cmd.stctl & SCSW_STCTL_PRIM_STATUS) ||
+ ((irb->scsw.cmd.stctl ==
+ (SCSW_STCTL_INTER_STATUS|SCSW_STCTL_STATUS_PEND)) &&
+ (irb->scsw.cmd.actl & SCSW_ACTL_DEVACT) &&
+ (irb->scsw.cmd.actl & SCSW_ACTL_SCHACT)) ||
+ (irb->scsw.cmd.actl & SCSW_ACTL_SUSPENDED))
+ cdev_irb->scsw.cmd.cpa = irb->scsw.cmd.cpa;
+ /* Accumulate device status, but not the device busy flag. */
+ cdev_irb->scsw.cmd.dstat &= ~DEV_STAT_BUSY;
+ /* dstat is not always valid. */
+ if (irb->scsw.cmd.stctl &
+ (SCSW_STCTL_PRIM_STATUS | SCSW_STCTL_SEC_STATUS
+ | SCSW_STCTL_INTER_STATUS | SCSW_STCTL_ALERT_STATUS))
+ cdev_irb->scsw.cmd.dstat |= irb->scsw.cmd.dstat;
+ /* Accumulate subchannel status. */
+ cdev_irb->scsw.cmd.cstat |= irb->scsw.cmd.cstat;
+ /* Copy residual count if it is valid. */
+ if ((irb->scsw.cmd.stctl & SCSW_STCTL_PRIM_STATUS) &&
+ (irb->scsw.cmd.cstat & ~(SCHN_STAT_PCI | SCHN_STAT_INCORR_LEN))
+ == 0)
+ cdev_irb->scsw.cmd.count = irb->scsw.cmd.count;
+
+ /* Take care of bits in the extended status word. */
+ ccw_device_accumulate_esw(cdev, irb);
+
+ /*
+ * Check whether we must issue a SENSE CCW ourselves if there is no
+ * concurrent sense facility installed for the subchannel.
+ * No sense is required if no delayed sense is pending
+ * and we did not get a unit check without sense information.
+ *
+ * Note: We should check for ioinfo[irq]->flags.consns but VM
+ * violates the ESA/390 architecture and doesn't present an
+ * operand exception for virtual devices without concurrent
+ * sense facility available/supported when enabling the
+ * concurrent sense facility.
+ */
+ if ((cdev_irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) &&
+ !(cdev_irb->esw.esw0.erw.cons))
+ cdev->private->flags.dosense = 1;
+}
+
+/*
+ * Do a basic sense.
+ */
+int
+ccw_device_do_sense(struct ccw_device *cdev, struct irb *irb)
+{
+ struct subchannel *sch;
+ struct ccw1 *sense_ccw;
+ int rc;
+
+ sch = to_subchannel(cdev->dev.parent);
+
+ /* A sense is required, can we do it now ? */
+ if (scsw_actl(&irb->scsw) & (SCSW_ACTL_DEVACT | SCSW_ACTL_SCHACT))
+ /*
+ * we received an Unit Check but we have no final
+ * status yet, therefore we must delay the SENSE
+ * processing. We must not report this intermediate
+ * status to the device interrupt handler.
+ */
+ return -EBUSY;
+
+ /*
+ * We have ending status but no sense information. Do a basic sense.
+ */
+ sense_ccw = &to_io_private(sch)->dma_area->sense_ccw;
+ sense_ccw->cmd_code = CCW_CMD_BASIC_SENSE;
+ sense_ccw->cda = (__u32) __pa(cdev->private->dma_area->irb.ecw);
+ sense_ccw->count = SENSE_MAX_COUNT;
+ sense_ccw->flags = CCW_FLAG_SLI;
+
+ rc = cio_start(sch, sense_ccw, 0xff);
+ if (rc == -ENODEV || rc == -EACCES)
+ dev_fsm_event(cdev, DEV_EVENT_VERIFY);
+ return rc;
+}
+
+/*
+ * Add information from basic sense to devstat.
+ */
+void
+ccw_device_accumulate_basic_sense(struct ccw_device *cdev, struct irb *irb)
+{
+ /*
+ * Check if the status pending bit is set in stctl.
+ * If not, the remaining bit have no meaning and we must ignore them.
+ * The esw is not meaningful as well...
+ */
+ if (!(scsw_stctl(&irb->scsw) & SCSW_STCTL_STATUS_PEND))
+ return;
+
+ /* Check for channel checks and interface control checks. */
+ ccw_device_msg_control_check(cdev, irb);
+
+ /* Check for path not operational. */
+ if (scsw_is_valid_pno(&irb->scsw) && scsw_pno(&irb->scsw))
+ ccw_device_path_notoper(cdev);
+
+ if (!(irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) &&
+ (irb->scsw.cmd.dstat & DEV_STAT_CHN_END)) {
+ cdev->private->dma_area->irb.esw.esw0.erw.cons = 1;
+ cdev->private->flags.dosense = 0;
+ }
+ /* Check if path verification is required. */
+ if (ccw_device_accumulate_esw_valid(irb) &&
+ irb->esw.esw0.erw.pvrf)
+ cdev->private->flags.doverify = 1;
+}
+
+/*
+ * This function accumulates the status into the private devstat and
+ * starts a basic sense if one is needed.
+ */
+int
+ccw_device_accumulate_and_sense(struct ccw_device *cdev, struct irb *irb)
+{
+ ccw_device_accumulate_irb(cdev, irb);
+ if ((irb->scsw.cmd.actl & (SCSW_ACTL_DEVACT | SCSW_ACTL_SCHACT)) != 0)
+ return -EBUSY;
+ /* Check for basic sense. */
+ if (cdev->private->flags.dosense &&
+ !(irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK)) {
+ cdev->private->dma_area->irb.esw.esw0.erw.cons = 1;
+ cdev->private->flags.dosense = 0;
+ return 0;
+ }
+ if (cdev->private->flags.dosense) {
+ ccw_device_do_sense(cdev, irb);
+ return -EBUSY;
+ }
+ return 0;
+}
+
diff --git a/drivers/s390/cio/eadm_sch.c b/drivers/s390/cio/eadm_sch.c
new file mode 100644
index 000000000..53468ae64
--- /dev/null
+++ b/drivers/s390/cio/eadm_sch.c
@@ -0,0 +1,411 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Driver for s390 eadm subchannels
+ *
+ * Copyright IBM Corp. 2012
+ * Author(s): Sebastian Ott <sebott@linux.vnet.ibm.com>
+ */
+
+#include <linux/kernel_stat.h>
+#include <linux/completion.h>
+#include <linux/workqueue.h>
+#include <linux/spinlock.h>
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/timer.h>
+#include <linux/slab.h>
+#include <linux/list.h>
+
+#include <asm/css_chars.h>
+#include <asm/debug.h>
+#include <asm/isc.h>
+#include <asm/cio.h>
+#include <asm/scsw.h>
+#include <asm/eadm.h>
+
+#include "eadm_sch.h"
+#include "ioasm.h"
+#include "cio.h"
+#include "css.h"
+#include "orb.h"
+
+MODULE_DESCRIPTION("driver for s390 eadm subchannels");
+MODULE_LICENSE("GPL");
+
+#define EADM_TIMEOUT (7 * HZ)
+static DEFINE_SPINLOCK(list_lock);
+static LIST_HEAD(eadm_list);
+
+static debug_info_t *eadm_debug;
+
+#define EADM_LOG(imp, txt) do { \
+ debug_text_event(eadm_debug, imp, txt); \
+ } while (0)
+
+static void EADM_LOG_HEX(int level, void *data, int length)
+{
+ debug_event(eadm_debug, level, data, length);
+}
+
+static void orb_init(union orb *orb)
+{
+ memset(orb, 0, sizeof(union orb));
+ orb->eadm.compat1 = 1;
+ orb->eadm.compat2 = 1;
+ orb->eadm.fmt = 1;
+ orb->eadm.x = 1;
+}
+
+static int eadm_subchannel_start(struct subchannel *sch, struct aob *aob)
+{
+ union orb *orb = &get_eadm_private(sch)->orb;
+ int cc;
+
+ orb_init(orb);
+ orb->eadm.aob = (u32)__pa(aob);
+ orb->eadm.intparm = (u32)(addr_t)sch;
+ orb->eadm.key = PAGE_DEFAULT_KEY >> 4;
+
+ EADM_LOG(6, "start");
+ EADM_LOG_HEX(6, &sch->schid, sizeof(sch->schid));
+
+ cc = ssch(sch->schid, orb);
+ switch (cc) {
+ case 0:
+ sch->schib.scsw.eadm.actl |= SCSW_ACTL_START_PEND;
+ break;
+ case 1: /* status pending */
+ case 2: /* busy */
+ return -EBUSY;
+ case 3: /* not operational */
+ return -ENODEV;
+ }
+ return 0;
+}
+
+static int eadm_subchannel_clear(struct subchannel *sch)
+{
+ int cc;
+
+ cc = csch(sch->schid);
+ if (cc)
+ return -ENODEV;
+
+ sch->schib.scsw.eadm.actl |= SCSW_ACTL_CLEAR_PEND;
+ return 0;
+}
+
+static void eadm_subchannel_timeout(struct timer_list *t)
+{
+ struct eadm_private *private = from_timer(private, t, timer);
+ struct subchannel *sch = private->sch;
+
+ spin_lock_irq(sch->lock);
+ EADM_LOG(1, "timeout");
+ EADM_LOG_HEX(1, &sch->schid, sizeof(sch->schid));
+ if (eadm_subchannel_clear(sch))
+ EADM_LOG(0, "clear failed");
+ spin_unlock_irq(sch->lock);
+}
+
+static void eadm_subchannel_set_timeout(struct subchannel *sch, int expires)
+{
+ struct eadm_private *private = get_eadm_private(sch);
+
+ if (expires == 0) {
+ del_timer(&private->timer);
+ return;
+ }
+ if (timer_pending(&private->timer)) {
+ if (mod_timer(&private->timer, jiffies + expires))
+ return;
+ }
+ private->timer.expires = jiffies + expires;
+ add_timer(&private->timer);
+}
+
+static void eadm_subchannel_irq(struct subchannel *sch)
+{
+ struct eadm_private *private = get_eadm_private(sch);
+ struct eadm_scsw *scsw = &sch->schib.scsw.eadm;
+ struct irb *irb = this_cpu_ptr(&cio_irb);
+ blk_status_t error = BLK_STS_OK;
+
+ EADM_LOG(6, "irq");
+ EADM_LOG_HEX(6, irb, sizeof(*irb));
+
+ inc_irq_stat(IRQIO_ADM);
+
+ if ((scsw->stctl & (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND))
+ && scsw->eswf == 1 && irb->esw.eadm.erw.r)
+ error = BLK_STS_IOERR;
+
+ if (scsw->fctl & SCSW_FCTL_CLEAR_FUNC)
+ error = BLK_STS_TIMEOUT;
+
+ eadm_subchannel_set_timeout(sch, 0);
+
+ if (private->state != EADM_BUSY) {
+ EADM_LOG(1, "irq unsol");
+ EADM_LOG_HEX(1, irb, sizeof(*irb));
+ private->state = EADM_NOT_OPER;
+ css_sched_sch_todo(sch, SCH_TODO_EVAL);
+ return;
+ }
+ scm_irq_handler((struct aob *)(unsigned long)scsw->aob, error);
+ private->state = EADM_IDLE;
+
+ if (private->completion)
+ complete(private->completion);
+}
+
+static struct subchannel *eadm_get_idle_sch(void)
+{
+ struct eadm_private *private;
+ struct subchannel *sch;
+ unsigned long flags;
+
+ spin_lock_irqsave(&list_lock, flags);
+ list_for_each_entry(private, &eadm_list, head) {
+ sch = private->sch;
+ spin_lock(sch->lock);
+ if (private->state == EADM_IDLE) {
+ private->state = EADM_BUSY;
+ list_move_tail(&private->head, &eadm_list);
+ spin_unlock(sch->lock);
+ spin_unlock_irqrestore(&list_lock, flags);
+
+ return sch;
+ }
+ spin_unlock(sch->lock);
+ }
+ spin_unlock_irqrestore(&list_lock, flags);
+
+ return NULL;
+}
+
+int eadm_start_aob(struct aob *aob)
+{
+ struct eadm_private *private;
+ struct subchannel *sch;
+ unsigned long flags;
+ int ret;
+
+ sch = eadm_get_idle_sch();
+ if (!sch)
+ return -EBUSY;
+
+ spin_lock_irqsave(sch->lock, flags);
+ eadm_subchannel_set_timeout(sch, EADM_TIMEOUT);
+ ret = eadm_subchannel_start(sch, aob);
+ if (!ret)
+ goto out_unlock;
+
+ /* Handle start subchannel failure. */
+ eadm_subchannel_set_timeout(sch, 0);
+ private = get_eadm_private(sch);
+ private->state = EADM_NOT_OPER;
+ css_sched_sch_todo(sch, SCH_TODO_EVAL);
+
+out_unlock:
+ spin_unlock_irqrestore(sch->lock, flags);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(eadm_start_aob);
+
+static int eadm_subchannel_probe(struct subchannel *sch)
+{
+ struct eadm_private *private;
+ int ret;
+
+ private = kzalloc(sizeof(*private), GFP_KERNEL | GFP_DMA);
+ if (!private)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&private->head);
+ timer_setup(&private->timer, eadm_subchannel_timeout, 0);
+
+ spin_lock_irq(sch->lock);
+ set_eadm_private(sch, private);
+ private->state = EADM_IDLE;
+ private->sch = sch;
+ sch->isc = EADM_SCH_ISC;
+ ret = cio_enable_subchannel(sch, (u32)(unsigned long)sch);
+ if (ret) {
+ set_eadm_private(sch, NULL);
+ spin_unlock_irq(sch->lock);
+ kfree(private);
+ goto out;
+ }
+ spin_unlock_irq(sch->lock);
+
+ spin_lock_irq(&list_lock);
+ list_add(&private->head, &eadm_list);
+ spin_unlock_irq(&list_lock);
+
+ if (dev_get_uevent_suppress(&sch->dev)) {
+ dev_set_uevent_suppress(&sch->dev, 0);
+ kobject_uevent(&sch->dev.kobj, KOBJ_ADD);
+ }
+out:
+ return ret;
+}
+
+static void eadm_quiesce(struct subchannel *sch)
+{
+ struct eadm_private *private = get_eadm_private(sch);
+ DECLARE_COMPLETION_ONSTACK(completion);
+ int ret;
+
+ spin_lock_irq(sch->lock);
+ if (private->state != EADM_BUSY)
+ goto disable;
+
+ if (eadm_subchannel_clear(sch))
+ goto disable;
+
+ private->completion = &completion;
+ spin_unlock_irq(sch->lock);
+
+ wait_for_completion_io(&completion);
+
+ spin_lock_irq(sch->lock);
+ private->completion = NULL;
+
+disable:
+ eadm_subchannel_set_timeout(sch, 0);
+ do {
+ ret = cio_disable_subchannel(sch);
+ } while (ret == -EBUSY);
+
+ spin_unlock_irq(sch->lock);
+}
+
+static int eadm_subchannel_remove(struct subchannel *sch)
+{
+ struct eadm_private *private = get_eadm_private(sch);
+
+ spin_lock_irq(&list_lock);
+ list_del(&private->head);
+ spin_unlock_irq(&list_lock);
+
+ eadm_quiesce(sch);
+
+ spin_lock_irq(sch->lock);
+ set_eadm_private(sch, NULL);
+ spin_unlock_irq(sch->lock);
+
+ kfree(private);
+
+ return 0;
+}
+
+static void eadm_subchannel_shutdown(struct subchannel *sch)
+{
+ eadm_quiesce(sch);
+}
+
+static int eadm_subchannel_freeze(struct subchannel *sch)
+{
+ return cio_disable_subchannel(sch);
+}
+
+static int eadm_subchannel_restore(struct subchannel *sch)
+{
+ return cio_enable_subchannel(sch, (u32)(unsigned long)sch);
+}
+
+/**
+ * eadm_subchannel_sch_event - process subchannel event
+ * @sch: subchannel
+ * @process: non-zero if function is called in process context
+ *
+ * An unspecified event occurred for this subchannel. Adjust data according
+ * to the current operational state of the subchannel. Return zero when the
+ * event has been handled sufficiently or -EAGAIN when this function should
+ * be called again in process context.
+ */
+static int eadm_subchannel_sch_event(struct subchannel *sch, int process)
+{
+ struct eadm_private *private;
+ unsigned long flags;
+
+ spin_lock_irqsave(sch->lock, flags);
+ if (!device_is_registered(&sch->dev))
+ goto out_unlock;
+
+ if (work_pending(&sch->todo_work))
+ goto out_unlock;
+
+ if (cio_update_schib(sch)) {
+ css_sched_sch_todo(sch, SCH_TODO_UNREG);
+ goto out_unlock;
+ }
+ private = get_eadm_private(sch);
+ if (private->state == EADM_NOT_OPER)
+ private->state = EADM_IDLE;
+
+out_unlock:
+ spin_unlock_irqrestore(sch->lock, flags);
+
+ return 0;
+}
+
+static struct css_device_id eadm_subchannel_ids[] = {
+ { .match_flags = 0x1, .type = SUBCHANNEL_TYPE_ADM, },
+ { /* end of list */ },
+};
+MODULE_DEVICE_TABLE(css, eadm_subchannel_ids);
+
+static struct css_driver eadm_subchannel_driver = {
+ .drv = {
+ .name = "eadm_subchannel",
+ .owner = THIS_MODULE,
+ },
+ .subchannel_type = eadm_subchannel_ids,
+ .irq = eadm_subchannel_irq,
+ .probe = eadm_subchannel_probe,
+ .remove = eadm_subchannel_remove,
+ .shutdown = eadm_subchannel_shutdown,
+ .sch_event = eadm_subchannel_sch_event,
+ .freeze = eadm_subchannel_freeze,
+ .thaw = eadm_subchannel_restore,
+ .restore = eadm_subchannel_restore,
+};
+
+static int __init eadm_sch_init(void)
+{
+ int ret;
+
+ if (!css_general_characteristics.eadm)
+ return -ENXIO;
+
+ eadm_debug = debug_register("eadm_log", 16, 1, 16);
+ if (!eadm_debug)
+ return -ENOMEM;
+
+ debug_register_view(eadm_debug, &debug_hex_ascii_view);
+ debug_set_level(eadm_debug, 2);
+
+ isc_register(EADM_SCH_ISC);
+ ret = css_driver_register(&eadm_subchannel_driver);
+ if (ret)
+ goto cleanup;
+
+ return ret;
+
+cleanup:
+ isc_unregister(EADM_SCH_ISC);
+ debug_unregister(eadm_debug);
+ return ret;
+}
+
+static void __exit eadm_sch_exit(void)
+{
+ css_driver_unregister(&eadm_subchannel_driver);
+ isc_unregister(EADM_SCH_ISC);
+ debug_unregister(eadm_debug);
+}
+module_init(eadm_sch_init);
+module_exit(eadm_sch_exit);
diff --git a/drivers/s390/cio/eadm_sch.h b/drivers/s390/cio/eadm_sch.h
new file mode 100644
index 000000000..390ab5a6b
--- /dev/null
+++ b/drivers/s390/cio/eadm_sch.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef EADM_SCH_H
+#define EADM_SCH_H
+
+#include <linux/completion.h>
+#include <linux/device.h>
+#include <linux/timer.h>
+#include <linux/list.h>
+#include "orb.h"
+
+struct eadm_private {
+ union orb orb;
+ enum {EADM_IDLE, EADM_BUSY, EADM_NOT_OPER} state;
+ struct completion *completion;
+ struct subchannel *sch;
+ struct timer_list timer;
+ struct list_head head;
+} __aligned(8);
+
+#define get_eadm_private(n) ((struct eadm_private *)dev_get_drvdata(&n->dev))
+#define set_eadm_private(n, p) (dev_set_drvdata(&n->dev, p))
+
+#endif
diff --git a/drivers/s390/cio/fcx.c b/drivers/s390/cio/fcx.c
new file mode 100644
index 000000000..99c900cc3
--- /dev/null
+++ b/drivers/s390/cio/fcx.c
@@ -0,0 +1,351 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Functions for assembling fcx enabled I/O control blocks.
+ *
+ * Copyright IBM Corp. 2008
+ * Author(s): Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
+ */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/err.h>
+#include <linux/module.h>
+#include <asm/fcx.h>
+#include "cio.h"
+
+/**
+ * tcw_get_intrg - return pointer to associated interrogate tcw
+ * @tcw: pointer to the original tcw
+ *
+ * Return a pointer to the interrogate tcw associated with the specified tcw
+ * or %NULL if there is no associated interrogate tcw.
+ */
+struct tcw *tcw_get_intrg(struct tcw *tcw)
+{
+ return (struct tcw *) ((addr_t) tcw->intrg);
+}
+EXPORT_SYMBOL(tcw_get_intrg);
+
+/**
+ * tcw_get_data - return pointer to input/output data associated with tcw
+ * @tcw: pointer to the tcw
+ *
+ * Return the input or output data address specified in the tcw depending
+ * on whether the r-bit or the w-bit is set. If neither bit is set, return
+ * %NULL.
+ */
+void *tcw_get_data(struct tcw *tcw)
+{
+ if (tcw->r)
+ return (void *) ((addr_t) tcw->input);
+ if (tcw->w)
+ return (void *) ((addr_t) tcw->output);
+ return NULL;
+}
+EXPORT_SYMBOL(tcw_get_data);
+
+/**
+ * tcw_get_tccb - return pointer to tccb associated with tcw
+ * @tcw: pointer to the tcw
+ *
+ * Return pointer to the tccb associated with this tcw.
+ */
+struct tccb *tcw_get_tccb(struct tcw *tcw)
+{
+ return (struct tccb *) ((addr_t) tcw->tccb);
+}
+EXPORT_SYMBOL(tcw_get_tccb);
+
+/**
+ * tcw_get_tsb - return pointer to tsb associated with tcw
+ * @tcw: pointer to the tcw
+ *
+ * Return pointer to the tsb associated with this tcw.
+ */
+struct tsb *tcw_get_tsb(struct tcw *tcw)
+{
+ return (struct tsb *) ((addr_t) tcw->tsb);
+}
+EXPORT_SYMBOL(tcw_get_tsb);
+
+/**
+ * tcw_init - initialize tcw data structure
+ * @tcw: pointer to the tcw to be initialized
+ * @r: initial value of the r-bit
+ * @w: initial value of the w-bit
+ *
+ * Initialize all fields of the specified tcw data structure with zero and
+ * fill in the format, flags, r and w fields.
+ */
+void tcw_init(struct tcw *tcw, int r, int w)
+{
+ memset(tcw, 0, sizeof(struct tcw));
+ tcw->format = TCW_FORMAT_DEFAULT;
+ tcw->flags = TCW_FLAGS_TIDAW_FORMAT(TCW_TIDAW_FORMAT_DEFAULT);
+ if (r)
+ tcw->r = 1;
+ if (w)
+ tcw->w = 1;
+}
+EXPORT_SYMBOL(tcw_init);
+
+static inline size_t tca_size(struct tccb *tccb)
+{
+ return tccb->tcah.tcal - 12;
+}
+
+static u32 calc_dcw_count(struct tccb *tccb)
+{
+ int offset;
+ struct dcw *dcw;
+ u32 count = 0;
+ size_t size;
+
+ size = tca_size(tccb);
+ for (offset = 0; offset < size;) {
+ dcw = (struct dcw *) &tccb->tca[offset];
+ count += dcw->count;
+ if (!(dcw->flags & DCW_FLAGS_CC))
+ break;
+ offset += sizeof(struct dcw) + ALIGN((int) dcw->cd_count, 4);
+ }
+ return count;
+}
+
+static u32 calc_cbc_size(struct tidaw *tidaw, int num)
+{
+ int i;
+ u32 cbc_data;
+ u32 cbc_count = 0;
+ u64 data_count = 0;
+
+ for (i = 0; i < num; i++) {
+ if (tidaw[i].flags & TIDAW_FLAGS_LAST)
+ break;
+ /* TODO: find out if padding applies to total of data
+ * transferred or data transferred by this tidaw. Assumption:
+ * applies to total. */
+ data_count += tidaw[i].count;
+ if (tidaw[i].flags & TIDAW_FLAGS_INSERT_CBC) {
+ cbc_data = 4 + ALIGN(data_count, 4) - data_count;
+ cbc_count += cbc_data;
+ data_count += cbc_data;
+ }
+ }
+ return cbc_count;
+}
+
+/**
+ * tcw_finalize - finalize tcw length fields and tidaw list
+ * @tcw: pointer to the tcw
+ * @num_tidaws: the number of tidaws used to address input/output data or zero
+ * if no tida is used
+ *
+ * Calculate the input-/output-count and tccbl field in the tcw, add a
+ * tcat the tccb and terminate the data tidaw list if used.
+ *
+ * Note: in case input- or output-tida is used, the tidaw-list must be stored
+ * in contiguous storage (no ttic). The tcal field in the tccb must be
+ * up-to-date.
+ */
+void tcw_finalize(struct tcw *tcw, int num_tidaws)
+{
+ struct tidaw *tidaw;
+ struct tccb *tccb;
+ struct tccb_tcat *tcat;
+ u32 count;
+
+ /* Terminate tidaw list. */
+ tidaw = tcw_get_data(tcw);
+ if (num_tidaws > 0)
+ tidaw[num_tidaws - 1].flags |= TIDAW_FLAGS_LAST;
+ /* Add tcat to tccb. */
+ tccb = tcw_get_tccb(tcw);
+ tcat = (struct tccb_tcat *) &tccb->tca[tca_size(tccb)];
+ memset(tcat, 0, sizeof(*tcat));
+ /* Calculate tcw input/output count and tcat transport count. */
+ count = calc_dcw_count(tccb);
+ if (tcw->w && (tcw->flags & TCW_FLAGS_OUTPUT_TIDA))
+ count += calc_cbc_size(tidaw, num_tidaws);
+ if (tcw->r)
+ tcw->input_count = count;
+ else if (tcw->w)
+ tcw->output_count = count;
+ tcat->count = ALIGN(count, 4) + 4;
+ /* Calculate tccbl. */
+ tcw->tccbl = (sizeof(struct tccb) + tca_size(tccb) +
+ sizeof(struct tccb_tcat) - 20) >> 2;
+}
+EXPORT_SYMBOL(tcw_finalize);
+
+/**
+ * tcw_set_intrg - set the interrogate tcw address of a tcw
+ * @tcw: the tcw address
+ * @intrg_tcw: the address of the interrogate tcw
+ *
+ * Set the address of the interrogate tcw in the specified tcw.
+ */
+void tcw_set_intrg(struct tcw *tcw, struct tcw *intrg_tcw)
+{
+ tcw->intrg = (u32) ((addr_t) intrg_tcw);
+}
+EXPORT_SYMBOL(tcw_set_intrg);
+
+/**
+ * tcw_set_data - set data address and tida flag of a tcw
+ * @tcw: the tcw address
+ * @data: the data address
+ * @use_tidal: zero of the data address specifies a contiguous block of data,
+ * non-zero if it specifies a list if tidaws.
+ *
+ * Set the input/output data address of a tcw (depending on the value of the
+ * r-flag and w-flag). If @use_tidal is non-zero, the corresponding tida flag
+ * is set as well.
+ */
+void tcw_set_data(struct tcw *tcw, void *data, int use_tidal)
+{
+ if (tcw->r) {
+ tcw->input = (u64) ((addr_t) data);
+ if (use_tidal)
+ tcw->flags |= TCW_FLAGS_INPUT_TIDA;
+ } else if (tcw->w) {
+ tcw->output = (u64) ((addr_t) data);
+ if (use_tidal)
+ tcw->flags |= TCW_FLAGS_OUTPUT_TIDA;
+ }
+}
+EXPORT_SYMBOL(tcw_set_data);
+
+/**
+ * tcw_set_tccb - set tccb address of a tcw
+ * @tcw: the tcw address
+ * @tccb: the tccb address
+ *
+ * Set the address of the tccb in the specified tcw.
+ */
+void tcw_set_tccb(struct tcw *tcw, struct tccb *tccb)
+{
+ tcw->tccb = (u64) ((addr_t) tccb);
+}
+EXPORT_SYMBOL(tcw_set_tccb);
+
+/**
+ * tcw_set_tsb - set tsb address of a tcw
+ * @tcw: the tcw address
+ * @tsb: the tsb address
+ *
+ * Set the address of the tsb in the specified tcw.
+ */
+void tcw_set_tsb(struct tcw *tcw, struct tsb *tsb)
+{
+ tcw->tsb = (u64) ((addr_t) tsb);
+}
+EXPORT_SYMBOL(tcw_set_tsb);
+
+/**
+ * tccb_init - initialize tccb
+ * @tccb: the tccb address
+ * @size: the maximum size of the tccb
+ * @sac: the service-action-code to be user
+ *
+ * Initialize the header of the specified tccb by resetting all values to zero
+ * and filling in defaults for format, sac and initial tcal fields.
+ */
+void tccb_init(struct tccb *tccb, size_t size, u32 sac)
+{
+ memset(tccb, 0, size);
+ tccb->tcah.format = TCCB_FORMAT_DEFAULT;
+ tccb->tcah.sac = sac;
+ tccb->tcah.tcal = 12;
+}
+EXPORT_SYMBOL(tccb_init);
+
+/**
+ * tsb_init - initialize tsb
+ * @tsb: the tsb address
+ *
+ * Initialize the specified tsb by resetting all values to zero.
+ */
+void tsb_init(struct tsb *tsb)
+{
+ memset(tsb, 0, sizeof(*tsb));
+}
+EXPORT_SYMBOL(tsb_init);
+
+/**
+ * tccb_add_dcw - add a dcw to the tccb
+ * @tccb: the tccb address
+ * @tccb_size: the maximum tccb size
+ * @cmd: the dcw command
+ * @flags: flags for the dcw
+ * @cd: pointer to control data for this dcw or NULL if none is required
+ * @cd_count: number of control data bytes for this dcw
+ * @count: number of data bytes for this dcw
+ *
+ * Add a new dcw to the specified tccb by writing the dcw information specified
+ * by @cmd, @flags, @cd, @cd_count and @count to the tca of the tccb. Return
+ * a pointer to the newly added dcw on success or -%ENOSPC if the new dcw
+ * would exceed the available space as defined by @tccb_size.
+ *
+ * Note: the tcal field of the tccb header will be updates to reflect added
+ * content.
+ */
+struct dcw *tccb_add_dcw(struct tccb *tccb, size_t tccb_size, u8 cmd, u8 flags,
+ void *cd, u8 cd_count, u32 count)
+{
+ struct dcw *dcw;
+ int size;
+ int tca_offset;
+
+ /* Check for space. */
+ tca_offset = tca_size(tccb);
+ size = ALIGN(sizeof(struct dcw) + cd_count, 4);
+ if (sizeof(struct tccb_tcah) + tca_offset + size +
+ sizeof(struct tccb_tcat) > tccb_size)
+ return ERR_PTR(-ENOSPC);
+ /* Add dcw to tca. */
+ dcw = (struct dcw *) &tccb->tca[tca_offset];
+ memset(dcw, 0, size);
+ dcw->cmd = cmd;
+ dcw->flags = flags;
+ dcw->count = count;
+ dcw->cd_count = cd_count;
+ if (cd)
+ memcpy(&dcw->cd[0], cd, cd_count);
+ tccb->tcah.tcal += size;
+ return dcw;
+}
+EXPORT_SYMBOL(tccb_add_dcw);
+
+/**
+ * tcw_add_tidaw - add a tidaw to a tcw
+ * @tcw: the tcw address
+ * @num_tidaws: the current number of tidaws
+ * @flags: flags for the new tidaw
+ * @addr: address value for the new tidaw
+ * @count: count value for the new tidaw
+ *
+ * Add a new tidaw to the input/output data tidaw-list of the specified tcw
+ * (depending on the value of the r-flag and w-flag) and return a pointer to
+ * the new tidaw.
+ *
+ * Note: the tidaw-list is assumed to be contiguous with no ttics. The caller
+ * must ensure that there is enough space for the new tidaw. The last-tidaw
+ * flag for the last tidaw in the list will be set by tcw_finalize.
+ */
+struct tidaw *tcw_add_tidaw(struct tcw *tcw, int num_tidaws, u8 flags,
+ void *addr, u32 count)
+{
+ struct tidaw *tidaw;
+
+ /* Add tidaw to tidaw-list. */
+ tidaw = ((struct tidaw *) tcw_get_data(tcw)) + num_tidaws;
+ memset(tidaw, 0, sizeof(struct tidaw));
+ tidaw->flags = flags;
+ tidaw->count = count;
+ tidaw->addr = (u64) ((addr_t) addr);
+ return tidaw;
+}
+EXPORT_SYMBOL(tcw_add_tidaw);
diff --git a/drivers/s390/cio/idset.c b/drivers/s390/cio/idset.c
new file mode 100644
index 000000000..45f9c0736
--- /dev/null
+++ b/drivers/s390/cio/idset.c
@@ -0,0 +1,100 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright IBM Corp. 2007, 2012
+ * Author(s): Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
+ */
+
+#include <linux/vmalloc.h>
+#include <linux/bitmap.h>
+#include <linux/bitops.h>
+#include "idset.h"
+#include "css.h"
+
+struct idset {
+ int num_ssid;
+ int num_id;
+ unsigned long bitmap[];
+};
+
+static inline unsigned long bitmap_size(int num_ssid, int num_id)
+{
+ return BITS_TO_LONGS(num_ssid * num_id) * sizeof(unsigned long);
+}
+
+static struct idset *idset_new(int num_ssid, int num_id)
+{
+ struct idset *set;
+
+ set = vmalloc(sizeof(struct idset) + bitmap_size(num_ssid, num_id));
+ if (set) {
+ set->num_ssid = num_ssid;
+ set->num_id = num_id;
+ memset(set->bitmap, 0, bitmap_size(num_ssid, num_id));
+ }
+ return set;
+}
+
+void idset_free(struct idset *set)
+{
+ vfree(set);
+}
+
+void idset_fill(struct idset *set)
+{
+ memset(set->bitmap, 0xff, bitmap_size(set->num_ssid, set->num_id));
+}
+
+static inline void idset_add(struct idset *set, int ssid, int id)
+{
+ set_bit(ssid * set->num_id + id, set->bitmap);
+}
+
+static inline void idset_del(struct idset *set, int ssid, int id)
+{
+ clear_bit(ssid * set->num_id + id, set->bitmap);
+}
+
+static inline int idset_contains(struct idset *set, int ssid, int id)
+{
+ return test_bit(ssid * set->num_id + id, set->bitmap);
+}
+
+struct idset *idset_sch_new(void)
+{
+ return idset_new(max_ssid + 1, __MAX_SUBCHANNEL + 1);
+}
+
+void idset_sch_add(struct idset *set, struct subchannel_id schid)
+{
+ idset_add(set, schid.ssid, schid.sch_no);
+}
+
+void idset_sch_del(struct idset *set, struct subchannel_id schid)
+{
+ idset_del(set, schid.ssid, schid.sch_no);
+}
+
+/* Clear ids starting from @schid up to end of subchannel set. */
+void idset_sch_del_subseq(struct idset *set, struct subchannel_id schid)
+{
+ int pos = schid.ssid * set->num_id + schid.sch_no;
+
+ bitmap_clear(set->bitmap, pos, set->num_id - schid.sch_no);
+}
+
+int idset_sch_contains(struct idset *set, struct subchannel_id schid)
+{
+ return idset_contains(set, schid.ssid, schid.sch_no);
+}
+
+int idset_is_empty(struct idset *set)
+{
+ return bitmap_empty(set->bitmap, set->num_ssid * set->num_id);
+}
+
+void idset_add_set(struct idset *to, struct idset *from)
+{
+ int len = min(to->num_ssid * to->num_id, from->num_ssid * from->num_id);
+
+ bitmap_or(to->bitmap, to->bitmap, from->bitmap, len);
+}
diff --git a/drivers/s390/cio/idset.h b/drivers/s390/cio/idset.h
new file mode 100644
index 000000000..a3ece8d80
--- /dev/null
+++ b/drivers/s390/cio/idset.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright IBM Corp. 2007, 2012
+ * Author(s): Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
+ */
+
+#ifndef S390_IDSET_H
+#define S390_IDSET_H
+
+#include <asm/schid.h>
+
+struct idset;
+
+void idset_free(struct idset *set);
+void idset_fill(struct idset *set);
+
+struct idset *idset_sch_new(void);
+void idset_sch_add(struct idset *set, struct subchannel_id id);
+void idset_sch_del(struct idset *set, struct subchannel_id id);
+void idset_sch_del_subseq(struct idset *set, struct subchannel_id schid);
+int idset_sch_contains(struct idset *set, struct subchannel_id id);
+int idset_is_empty(struct idset *set);
+void idset_add_set(struct idset *to, struct idset *from);
+
+#endif /* S390_IDSET_H */
diff --git a/drivers/s390/cio/io_sch.h b/drivers/s390/cio/io_sch.h
new file mode 100644
index 000000000..c03b4a199
--- /dev/null
+++ b/drivers/s390/cio/io_sch.h
@@ -0,0 +1,185 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef S390_IO_SCH_H
+#define S390_IO_SCH_H
+
+#include <linux/types.h>
+#include <asm/schid.h>
+#include <asm/ccwdev.h>
+#include <asm/irq.h>
+#include "css.h"
+#include "orb.h"
+
+struct io_subchannel_dma_area {
+ struct ccw1 sense_ccw; /* static ccw for sense command */
+};
+
+struct io_subchannel_private {
+ union orb orb; /* operation request block */
+ struct ccw_device *cdev;/* pointer to the child ccw device */
+ struct {
+ unsigned int suspend:1; /* allow suspend */
+ unsigned int prefetch:1;/* deny prefetch */
+ unsigned int inter:1; /* suppress intermediate interrupts */
+ } __packed options;
+ struct io_subchannel_dma_area *dma_area;
+ dma_addr_t dma_area_dma;
+} __aligned(8);
+
+#define to_io_private(n) ((struct io_subchannel_private *) \
+ dev_get_drvdata(&(n)->dev))
+#define set_io_private(n, p) (dev_set_drvdata(&(n)->dev, p))
+
+static inline struct ccw_device *sch_get_cdev(struct subchannel *sch)
+{
+ struct io_subchannel_private *priv = to_io_private(sch);
+ return priv ? priv->cdev : NULL;
+}
+
+static inline void sch_set_cdev(struct subchannel *sch,
+ struct ccw_device *cdev)
+{
+ struct io_subchannel_private *priv = to_io_private(sch);
+ if (priv)
+ priv->cdev = cdev;
+}
+
+#define MAX_CIWS 8
+
+/*
+ * Possible status values for a CCW request's I/O.
+ */
+enum io_status {
+ IO_DONE,
+ IO_RUNNING,
+ IO_STATUS_ERROR,
+ IO_PATH_ERROR,
+ IO_REJECTED,
+ IO_KILLED
+};
+
+/**
+ * ccw_request - Internal CCW request.
+ * @cp: channel program to start
+ * @timeout: maximum allowable time in jiffies between start I/O and interrupt
+ * @maxretries: number of retries per I/O operation and path
+ * @lpm: mask of paths to use
+ * @check: optional callback that determines if results are final
+ * @filter: optional callback to adjust request status based on IRB data
+ * @callback: final callback
+ * @data: user-defined pointer passed to all callbacks
+ * @singlepath: if set, use only one path from @lpm per start I/O
+ * @cancel: non-zero if request was cancelled
+ * @done: non-zero if request was finished
+ * @mask: current path mask
+ * @retries: current number of retries
+ * @drc: delayed return code
+ */
+struct ccw_request {
+ struct ccw1 *cp;
+ unsigned long timeout;
+ u16 maxretries;
+ u8 lpm;
+ int (*check)(struct ccw_device *, void *);
+ enum io_status (*filter)(struct ccw_device *, void *, struct irb *,
+ enum io_status);
+ void (*callback)(struct ccw_device *, void *, int);
+ void *data;
+ unsigned int singlepath:1;
+ /* These fields are used internally. */
+ unsigned int cancel:1;
+ unsigned int done:1;
+ u16 mask;
+ u16 retries;
+ int drc;
+} __attribute__((packed));
+
+/*
+ * sense-id response buffer layout
+ */
+struct senseid {
+ /* common part */
+ u8 reserved; /* always 0x'FF' */
+ u16 cu_type; /* control unit type */
+ u8 cu_model; /* control unit model */
+ u16 dev_type; /* device type */
+ u8 dev_model; /* device model */
+ u8 unused; /* padding byte */
+ /* extended part */
+ struct ciw ciw[MAX_CIWS]; /* variable # of CIWs */
+} __attribute__ ((packed, aligned(4)));
+
+enum cdev_todo {
+ CDEV_TODO_NOTHING,
+ CDEV_TODO_ENABLE_CMF,
+ CDEV_TODO_REBIND,
+ CDEV_TODO_REGISTER,
+ CDEV_TODO_UNREG,
+ CDEV_TODO_UNREG_EVAL,
+};
+
+#define FAKE_CMD_IRB 1
+#define FAKE_TM_IRB 2
+
+struct ccw_device_dma_area {
+ struct senseid senseid; /* SenseID info */
+ struct ccw1 iccws[2]; /* ccws for SNID/SID/SPGID commands */
+ struct irb irb; /* device status */
+ struct pgid pgid[8]; /* path group IDs per chpid*/
+};
+
+struct ccw_device_private {
+ struct ccw_device *cdev;
+ struct subchannel *sch;
+ int state; /* device state */
+ atomic_t onoff;
+ struct ccw_dev_id dev_id; /* device id */
+ struct ccw_request req; /* internal I/O request */
+ int iretry;
+ u8 pgid_valid_mask; /* mask of valid PGIDs */
+ u8 pgid_todo_mask; /* mask of PGIDs to be adjusted */
+ u8 pgid_reset_mask; /* mask of PGIDs which were reset */
+ u8 path_noirq_mask; /* mask of paths for which no irq was
+ received */
+ u8 path_notoper_mask; /* mask of paths which were found
+ not operable */
+ u8 path_gone_mask; /* mask of paths, that became unavailable */
+ u8 path_new_mask; /* mask of paths, that became available */
+ u8 path_broken_mask; /* mask of paths, which were found to be
+ unusable */
+ struct {
+ unsigned int fast:1; /* post with "channel end" */
+ unsigned int repall:1; /* report every interrupt status */
+ unsigned int pgroup:1; /* do path grouping */
+ unsigned int force:1; /* allow forced online */
+ unsigned int mpath:1; /* do multipathing */
+ } __attribute__ ((packed)) options;
+ struct {
+ unsigned int esid:1; /* Ext. SenseID supported by HW */
+ unsigned int dosense:1; /* delayed SENSE required */
+ unsigned int doverify:1; /* delayed path verification */
+ unsigned int donotify:1; /* call notify function */
+ unsigned int recog_done:1; /* dev. recog. complete */
+ unsigned int fake_irb:2; /* deliver faked irb */
+ unsigned int resuming:1; /* recognition while resume */
+ unsigned int pgroup:1; /* pathgroup is set up */
+ unsigned int mpath:1; /* multipathing is set up */
+ unsigned int pgid_unknown:1;/* unknown pgid state */
+ unsigned int initialized:1; /* set if initial reference held */
+ } __attribute__((packed)) flags;
+ unsigned long intparm; /* user interruption parameter */
+ struct qdio_irq *qdio_data;
+ int async_kill_io_rc;
+ struct work_struct todo_work;
+ enum cdev_todo todo;
+ wait_queue_head_t wait_q;
+ struct timer_list timer;
+ void *cmb; /* measurement information */
+ struct list_head cmb_list; /* list of measured devices */
+ u64 cmb_start_time; /* clock value of cmb reset */
+ void *cmb_wait; /* deferred cmb enable/disable */
+ struct gen_pool *dma_pool;
+ struct ccw_device_dma_area *dma_area;
+ enum interruption_class int_class;
+};
+
+#endif
diff --git a/drivers/s390/cio/ioasm.c b/drivers/s390/cio/ioasm.c
new file mode 100644
index 000000000..08eb10283
--- /dev/null
+++ b/drivers/s390/cio/ioasm.c
@@ -0,0 +1,277 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Channel subsystem I/O instructions.
+ */
+
+#include <linux/export.h>
+
+#include <asm/chpid.h>
+#include <asm/schid.h>
+#include <asm/crw.h>
+
+#include "ioasm.h"
+#include "orb.h"
+#include "cio.h"
+
+static inline int __stsch(struct subchannel_id schid, struct schib *addr)
+{
+ register struct subchannel_id reg1 asm ("1") = schid;
+ int ccode = -EIO;
+
+ asm volatile(
+ " stsch 0(%3)\n"
+ "0: ipm %0\n"
+ " srl %0,28\n"
+ "1:\n"
+ EX_TABLE(0b, 1b)
+ : "+d" (ccode), "=m" (*addr)
+ : "d" (reg1), "a" (addr)
+ : "cc");
+ return ccode;
+}
+
+int stsch(struct subchannel_id schid, struct schib *addr)
+{
+ int ccode;
+
+ ccode = __stsch(schid, addr);
+ trace_s390_cio_stsch(schid, addr, ccode);
+
+ return ccode;
+}
+EXPORT_SYMBOL(stsch);
+
+static inline int __msch(struct subchannel_id schid, struct schib *addr)
+{
+ register struct subchannel_id reg1 asm ("1") = schid;
+ int ccode = -EIO;
+
+ asm volatile(
+ " msch 0(%2)\n"
+ "0: ipm %0\n"
+ " srl %0,28\n"
+ "1:\n"
+ EX_TABLE(0b, 1b)
+ : "+d" (ccode)
+ : "d" (reg1), "a" (addr), "m" (*addr)
+ : "cc");
+ return ccode;
+}
+
+int msch(struct subchannel_id schid, struct schib *addr)
+{
+ int ccode;
+
+ ccode = __msch(schid, addr);
+ trace_s390_cio_msch(schid, addr, ccode);
+
+ return ccode;
+}
+
+static inline int __tsch(struct subchannel_id schid, struct irb *addr)
+{
+ register struct subchannel_id reg1 asm ("1") = schid;
+ int ccode;
+
+ asm volatile(
+ " tsch 0(%3)\n"
+ " ipm %0\n"
+ " srl %0,28"
+ : "=d" (ccode), "=m" (*addr)
+ : "d" (reg1), "a" (addr)
+ : "cc");
+ return ccode;
+}
+
+int tsch(struct subchannel_id schid, struct irb *addr)
+{
+ int ccode;
+
+ ccode = __tsch(schid, addr);
+ trace_s390_cio_tsch(schid, addr, ccode);
+
+ return ccode;
+}
+
+static inline int __ssch(struct subchannel_id schid, union orb *addr)
+{
+ register struct subchannel_id reg1 asm("1") = schid;
+ int ccode = -EIO;
+
+ asm volatile(
+ " ssch 0(%2)\n"
+ "0: ipm %0\n"
+ " srl %0,28\n"
+ "1:\n"
+ EX_TABLE(0b, 1b)
+ : "+d" (ccode)
+ : "d" (reg1), "a" (addr), "m" (*addr)
+ : "cc", "memory");
+ return ccode;
+}
+
+int ssch(struct subchannel_id schid, union orb *addr)
+{
+ int ccode;
+
+ ccode = __ssch(schid, addr);
+ trace_s390_cio_ssch(schid, addr, ccode);
+
+ return ccode;
+}
+EXPORT_SYMBOL(ssch);
+
+static inline int __csch(struct subchannel_id schid)
+{
+ register struct subchannel_id reg1 asm("1") = schid;
+ int ccode;
+
+ asm volatile(
+ " csch\n"
+ " ipm %0\n"
+ " srl %0,28"
+ : "=d" (ccode)
+ : "d" (reg1)
+ : "cc");
+ return ccode;
+}
+
+int csch(struct subchannel_id schid)
+{
+ int ccode;
+
+ ccode = __csch(schid);
+ trace_s390_cio_csch(schid, ccode);
+
+ return ccode;
+}
+EXPORT_SYMBOL(csch);
+
+int tpi(struct tpi_info *addr)
+{
+ int ccode;
+
+ asm volatile(
+ " tpi 0(%2)\n"
+ " ipm %0\n"
+ " srl %0,28"
+ : "=d" (ccode), "=m" (*addr)
+ : "a" (addr)
+ : "cc");
+ trace_s390_cio_tpi(addr, ccode);
+
+ return ccode;
+}
+
+int chsc(void *chsc_area)
+{
+ typedef struct { char _[4096]; } addr_type;
+ int cc = -EIO;
+
+ asm volatile(
+ " .insn rre,0xb25f0000,%2,0\n"
+ "0: ipm %0\n"
+ " srl %0,28\n"
+ "1:\n"
+ EX_TABLE(0b, 1b)
+ : "+d" (cc), "=m" (*(addr_type *) chsc_area)
+ : "d" (chsc_area), "m" (*(addr_type *) chsc_area)
+ : "cc");
+ trace_s390_cio_chsc(chsc_area, cc);
+
+ return cc;
+}
+EXPORT_SYMBOL(chsc);
+
+static inline int __rsch(struct subchannel_id schid)
+{
+ register struct subchannel_id reg1 asm("1") = schid;
+ int ccode;
+
+ asm volatile(
+ " rsch\n"
+ " ipm %0\n"
+ " srl %0,28"
+ : "=d" (ccode)
+ : "d" (reg1)
+ : "cc", "memory");
+
+ return ccode;
+}
+
+int rsch(struct subchannel_id schid)
+{
+ int ccode;
+
+ ccode = __rsch(schid);
+ trace_s390_cio_rsch(schid, ccode);
+
+ return ccode;
+}
+
+static inline int __hsch(struct subchannel_id schid)
+{
+ register struct subchannel_id reg1 asm("1") = schid;
+ int ccode;
+
+ asm volatile(
+ " hsch\n"
+ " ipm %0\n"
+ " srl %0,28"
+ : "=d" (ccode)
+ : "d" (reg1)
+ : "cc");
+ return ccode;
+}
+
+int hsch(struct subchannel_id schid)
+{
+ int ccode;
+
+ ccode = __hsch(schid);
+ trace_s390_cio_hsch(schid, ccode);
+
+ return ccode;
+}
+EXPORT_SYMBOL(hsch);
+
+static inline int __xsch(struct subchannel_id schid)
+{
+ register struct subchannel_id reg1 asm("1") = schid;
+ int ccode;
+
+ asm volatile(
+ " xsch\n"
+ " ipm %0\n"
+ " srl %0,28"
+ : "=d" (ccode)
+ : "d" (reg1)
+ : "cc");
+ return ccode;
+}
+
+int xsch(struct subchannel_id schid)
+{
+ int ccode;
+
+ ccode = __xsch(schid);
+ trace_s390_cio_xsch(schid, ccode);
+
+ return ccode;
+}
+
+int stcrw(struct crw *crw)
+{
+ int ccode;
+
+ asm volatile(
+ " stcrw 0(%2)\n"
+ " ipm %0\n"
+ " srl %0,28\n"
+ : "=d" (ccode), "=m" (*crw)
+ : "a" (crw)
+ : "cc");
+ trace_s390_cio_stcrw(crw, ccode);
+
+ return ccode;
+}
diff --git a/drivers/s390/cio/ioasm.h b/drivers/s390/cio/ioasm.h
new file mode 100644
index 000000000..4be539cb9
--- /dev/null
+++ b/drivers/s390/cio/ioasm.h
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef S390_CIO_IOASM_H
+#define S390_CIO_IOASM_H
+
+#include <asm/chpid.h>
+#include <asm/schid.h>
+#include <asm/crw.h>
+#include "orb.h"
+#include "cio.h"
+#include "trace.h"
+
+/*
+ * Some S390 specific IO instructions
+ */
+
+int stsch(struct subchannel_id schid, struct schib *addr);
+int msch(struct subchannel_id schid, struct schib *addr);
+int tsch(struct subchannel_id schid, struct irb *addr);
+int ssch(struct subchannel_id schid, union orb *addr);
+int csch(struct subchannel_id schid);
+int tpi(struct tpi_info *addr);
+int chsc(void *chsc_area);
+int rsch(struct subchannel_id schid);
+int hsch(struct subchannel_id schid);
+int xsch(struct subchannel_id schid);
+int stcrw(struct crw *crw);
+
+#endif
diff --git a/drivers/s390/cio/isc.c b/drivers/s390/cio/isc.c
new file mode 100644
index 000000000..77fde9f5e
--- /dev/null
+++ b/drivers/s390/cio/isc.c
@@ -0,0 +1,69 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Functions for registration of I/O interruption subclasses on s390.
+ *
+ * Copyright IBM Corp. 2008
+ * Authors: Sebastian Ott <sebott@linux.vnet.ibm.com>
+ */
+
+#include <linux/spinlock.h>
+#include <linux/module.h>
+#include <asm/isc.h>
+
+static unsigned int isc_refs[MAX_ISC + 1];
+static DEFINE_SPINLOCK(isc_ref_lock);
+
+
+/**
+ * isc_register - register an I/O interruption subclass.
+ * @isc: I/O interruption subclass to register
+ *
+ * The number of users for @isc is increased. If this is the first user to
+ * register @isc, the corresponding I/O interruption subclass mask is enabled.
+ *
+ * Context:
+ * This function must not be called in interrupt context.
+ */
+void isc_register(unsigned int isc)
+{
+ if (isc > MAX_ISC) {
+ WARN_ON(1);
+ return;
+ }
+
+ spin_lock(&isc_ref_lock);
+ if (isc_refs[isc] == 0)
+ ctl_set_bit(6, 31 - isc);
+ isc_refs[isc]++;
+ spin_unlock(&isc_ref_lock);
+}
+EXPORT_SYMBOL_GPL(isc_register);
+
+/**
+ * isc_unregister - unregister an I/O interruption subclass.
+ * @isc: I/O interruption subclass to unregister
+ *
+ * The number of users for @isc is decreased. If this is the last user to
+ * unregister @isc, the corresponding I/O interruption subclass mask is
+ * disabled.
+ * Note: This function must not be called if isc_register() hasn't been called
+ * before by the driver for @isc.
+ *
+ * Context:
+ * This function must not be called in interrupt context.
+ */
+void isc_unregister(unsigned int isc)
+{
+ spin_lock(&isc_ref_lock);
+ /* check for misuse */
+ if (isc > MAX_ISC || isc_refs[isc] == 0) {
+ WARN_ON(1);
+ goto out_unlock;
+ }
+ if (isc_refs[isc] == 1)
+ ctl_clear_bit(6, 31 - isc);
+ isc_refs[isc]--;
+out_unlock:
+ spin_unlock(&isc_ref_lock);
+}
+EXPORT_SYMBOL_GPL(isc_unregister);
diff --git a/drivers/s390/cio/itcw.c b/drivers/s390/cio/itcw.c
new file mode 100644
index 000000000..19e463633
--- /dev/null
+++ b/drivers/s390/cio/itcw.c
@@ -0,0 +1,370 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Functions for incremental construction of fcx enabled I/O control blocks.
+ *
+ * Copyright IBM Corp. 2008
+ * Author(s): Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
+ */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/err.h>
+#include <linux/module.h>
+#include <asm/fcx.h>
+#include <asm/itcw.h>
+
+/*
+ * struct itcw - incremental tcw helper data type
+ *
+ * This structure serves as a handle for the incremental construction of a
+ * tcw and associated tccb, tsb, data tidaw-list plus an optional interrogate
+ * tcw and associated data. The data structures are contained inside a single
+ * contiguous buffer provided by the user.
+ *
+ * The itcw construction functions take care of overall data integrity:
+ * - reset unused fields to zero
+ * - fill in required pointers
+ * - ensure required alignment for data structures
+ * - prevent data structures to cross 4k-byte boundary where required
+ * - calculate tccb-related length fields
+ * - optionally provide ready-made interrogate tcw and associated structures
+ *
+ * Restrictions apply to the itcws created with these construction functions:
+ * - tida only supported for data address, not for tccb
+ * - only contiguous tidaw-lists (no ttic)
+ * - total number of bytes required per itcw may not exceed 4k bytes
+ * - either read or write operation (may not work with r=0 and w=0)
+ *
+ * Example:
+ * struct itcw *itcw;
+ * void *buffer;
+ * size_t size;
+ *
+ * size = itcw_calc_size(1, 2, 0);
+ * buffer = kmalloc(size, GFP_KERNEL | GFP_DMA);
+ * if (!buffer)
+ * return -ENOMEM;
+ * itcw = itcw_init(buffer, size, ITCW_OP_READ, 1, 2, 0);
+ * if (IS_ERR(itcw))
+ * return PTR_ER(itcw);
+ * itcw_add_dcw(itcw, 0x2, 0, NULL, 0, 72);
+ * itcw_add_tidaw(itcw, 0, 0x30000, 20);
+ * itcw_add_tidaw(itcw, 0, 0x40000, 52);
+ * itcw_finalize(itcw);
+ *
+ */
+struct itcw {
+ struct tcw *tcw;
+ struct tcw *intrg_tcw;
+ int num_tidaws;
+ int max_tidaws;
+ int intrg_num_tidaws;
+ int intrg_max_tidaws;
+};
+
+/**
+ * itcw_get_tcw - return pointer to tcw associated with the itcw
+ * @itcw: address of the itcw
+ *
+ * Return pointer to the tcw associated with the itcw.
+ */
+struct tcw *itcw_get_tcw(struct itcw *itcw)
+{
+ return itcw->tcw;
+}
+EXPORT_SYMBOL(itcw_get_tcw);
+
+/**
+ * itcw_calc_size - return the size of an itcw with the given parameters
+ * @intrg: if non-zero, add an interrogate tcw
+ * @max_tidaws: maximum number of tidaws to be used for data addressing or zero
+ * if no tida is to be used.
+ * @intrg_max_tidaws: maximum number of tidaws to be used for data addressing
+ * by the interrogate tcw, if specified
+ *
+ * Calculate and return the number of bytes required to hold an itcw with the
+ * given parameters and assuming tccbs with maximum size.
+ *
+ * Note that the resulting size also contains bytes needed for alignment
+ * padding as well as padding to ensure that data structures don't cross a
+ * 4k-boundary where required.
+ */
+size_t itcw_calc_size(int intrg, int max_tidaws, int intrg_max_tidaws)
+{
+ size_t len;
+ int cross_count;
+
+ /* Main data. */
+ len = sizeof(struct itcw);
+ len += /* TCW */ sizeof(struct tcw) + /* TCCB */ TCCB_MAX_SIZE +
+ /* TSB */ sizeof(struct tsb) +
+ /* TIDAL */ max_tidaws * sizeof(struct tidaw);
+ /* Interrogate data. */
+ if (intrg) {
+ len += /* TCW */ sizeof(struct tcw) + /* TCCB */ TCCB_MAX_SIZE +
+ /* TSB */ sizeof(struct tsb) +
+ /* TIDAL */ intrg_max_tidaws * sizeof(struct tidaw);
+ }
+
+ /* Maximum required alignment padding. */
+ len += /* Initial TCW */ 63 + /* Interrogate TCCB */ 7;
+
+ /* TIDAW lists may not cross a 4k boundary. To cross a
+ * boundary we need to add a TTIC TIDAW. We need to reserve
+ * one additional TIDAW for a TTIC that we may need to add due
+ * to the placement of the data chunk in memory, and a further
+ * TIDAW for each page boundary that the TIDAW list may cross
+ * due to it's own size.
+ */
+ if (max_tidaws) {
+ cross_count = 1 + ((max_tidaws * sizeof(struct tidaw) - 1)
+ >> PAGE_SHIFT);
+ len += cross_count * sizeof(struct tidaw);
+ }
+ if (intrg_max_tidaws) {
+ cross_count = 1 + ((intrg_max_tidaws * sizeof(struct tidaw) - 1)
+ >> PAGE_SHIFT);
+ len += cross_count * sizeof(struct tidaw);
+ }
+ return len;
+}
+EXPORT_SYMBOL(itcw_calc_size);
+
+#define CROSS4K(x, l) (((x) & ~4095) != ((x + l) & ~4095))
+
+static inline void *fit_chunk(addr_t *start, addr_t end, size_t len,
+ int align, int check_4k)
+{
+ addr_t addr;
+
+ addr = ALIGN(*start, align);
+ if (check_4k && CROSS4K(addr, len)) {
+ addr = ALIGN(addr, 4096);
+ addr = ALIGN(addr, align);
+ }
+ if (addr + len > end)
+ return ERR_PTR(-ENOSPC);
+ *start = addr + len;
+ return (void *) addr;
+}
+
+/**
+ * itcw_init - initialize incremental tcw data structure
+ * @buffer: address of buffer to use for data structures
+ * @size: number of bytes in buffer
+ * @op: %ITCW_OP_READ for a read operation tcw, %ITCW_OP_WRITE for a write
+ * operation tcw
+ * @intrg: if non-zero, add and initialize an interrogate tcw
+ * @max_tidaws: maximum number of tidaws to be used for data addressing or zero
+ * if no tida is to be used.
+ * @intrg_max_tidaws: maximum number of tidaws to be used for data addressing
+ * by the interrogate tcw, if specified
+ *
+ * Prepare the specified buffer to be used as an incremental tcw, i.e. a
+ * helper data structure that can be used to construct a valid tcw by
+ * successive calls to other helper functions. Note: the buffer needs to be
+ * located below the 2G address limit. The resulting tcw has the following
+ * restrictions:
+ * - no tccb tidal
+ * - input/output tidal is contiguous (no ttic)
+ * - total data should not exceed 4k
+ * - tcw specifies either read or write operation
+ *
+ * On success, return pointer to the resulting incremental tcw data structure,
+ * ERR_PTR otherwise.
+ */
+struct itcw *itcw_init(void *buffer, size_t size, int op, int intrg,
+ int max_tidaws, int intrg_max_tidaws)
+{
+ struct itcw *itcw;
+ void *chunk;
+ addr_t start;
+ addr_t end;
+ int cross_count;
+
+ /* Check for 2G limit. */
+ start = (addr_t) buffer;
+ end = start + size;
+ if (end > (1 << 31))
+ return ERR_PTR(-EINVAL);
+ memset(buffer, 0, size);
+ /* ITCW. */
+ chunk = fit_chunk(&start, end, sizeof(struct itcw), 1, 0);
+ if (IS_ERR(chunk))
+ return chunk;
+ itcw = chunk;
+ /* allow for TTIC tidaws that may be needed to cross a page boundary */
+ cross_count = 0;
+ if (max_tidaws)
+ cross_count = 1 + ((max_tidaws * sizeof(struct tidaw) - 1)
+ >> PAGE_SHIFT);
+ itcw->max_tidaws = max_tidaws + cross_count;
+ cross_count = 0;
+ if (intrg_max_tidaws)
+ cross_count = 1 + ((intrg_max_tidaws * sizeof(struct tidaw) - 1)
+ >> PAGE_SHIFT);
+ itcw->intrg_max_tidaws = intrg_max_tidaws + cross_count;
+ /* Main TCW. */
+ chunk = fit_chunk(&start, end, sizeof(struct tcw), 64, 0);
+ if (IS_ERR(chunk))
+ return chunk;
+ itcw->tcw = chunk;
+ tcw_init(itcw->tcw, (op == ITCW_OP_READ) ? 1 : 0,
+ (op == ITCW_OP_WRITE) ? 1 : 0);
+ /* Interrogate TCW. */
+ if (intrg) {
+ chunk = fit_chunk(&start, end, sizeof(struct tcw), 64, 0);
+ if (IS_ERR(chunk))
+ return chunk;
+ itcw->intrg_tcw = chunk;
+ tcw_init(itcw->intrg_tcw, 1, 0);
+ tcw_set_intrg(itcw->tcw, itcw->intrg_tcw);
+ }
+ /* Data TIDAL. */
+ if (max_tidaws > 0) {
+ chunk = fit_chunk(&start, end, sizeof(struct tidaw) *
+ itcw->max_tidaws, 16, 0);
+ if (IS_ERR(chunk))
+ return chunk;
+ tcw_set_data(itcw->tcw, chunk, 1);
+ }
+ /* Interrogate data TIDAL. */
+ if (intrg && (intrg_max_tidaws > 0)) {
+ chunk = fit_chunk(&start, end, sizeof(struct tidaw) *
+ itcw->intrg_max_tidaws, 16, 0);
+ if (IS_ERR(chunk))
+ return chunk;
+ tcw_set_data(itcw->intrg_tcw, chunk, 1);
+ }
+ /* TSB. */
+ chunk = fit_chunk(&start, end, sizeof(struct tsb), 8, 0);
+ if (IS_ERR(chunk))
+ return chunk;
+ tsb_init(chunk);
+ tcw_set_tsb(itcw->tcw, chunk);
+ /* Interrogate TSB. */
+ if (intrg) {
+ chunk = fit_chunk(&start, end, sizeof(struct tsb), 8, 0);
+ if (IS_ERR(chunk))
+ return chunk;
+ tsb_init(chunk);
+ tcw_set_tsb(itcw->intrg_tcw, chunk);
+ }
+ /* TCCB. */
+ chunk = fit_chunk(&start, end, TCCB_MAX_SIZE, 8, 0);
+ if (IS_ERR(chunk))
+ return chunk;
+ tccb_init(chunk, TCCB_MAX_SIZE, TCCB_SAC_DEFAULT);
+ tcw_set_tccb(itcw->tcw, chunk);
+ /* Interrogate TCCB. */
+ if (intrg) {
+ chunk = fit_chunk(&start, end, TCCB_MAX_SIZE, 8, 0);
+ if (IS_ERR(chunk))
+ return chunk;
+ tccb_init(chunk, TCCB_MAX_SIZE, TCCB_SAC_INTRG);
+ tcw_set_tccb(itcw->intrg_tcw, chunk);
+ tccb_add_dcw(chunk, TCCB_MAX_SIZE, DCW_CMD_INTRG, 0, NULL,
+ sizeof(struct dcw_intrg_data), 0);
+ tcw_finalize(itcw->intrg_tcw, 0);
+ }
+ return itcw;
+}
+EXPORT_SYMBOL(itcw_init);
+
+/**
+ * itcw_add_dcw - add a dcw to the itcw
+ * @itcw: address of the itcw
+ * @cmd: the dcw command
+ * @flags: flags for the dcw
+ * @cd: address of control data for this dcw or NULL if none is required
+ * @cd_count: number of control data bytes for this dcw
+ * @count: number of data bytes for this dcw
+ *
+ * Add a new dcw to the specified itcw by writing the dcw information specified
+ * by @cmd, @flags, @cd, @cd_count and @count to the tca of the tccb. Return
+ * a pointer to the newly added dcw on success or -%ENOSPC if the new dcw
+ * would exceed the available space.
+ *
+ * Note: the tcal field of the tccb header will be updated to reflect added
+ * content.
+ */
+struct dcw *itcw_add_dcw(struct itcw *itcw, u8 cmd, u8 flags, void *cd,
+ u8 cd_count, u32 count)
+{
+ return tccb_add_dcw(tcw_get_tccb(itcw->tcw), TCCB_MAX_SIZE, cmd,
+ flags, cd, cd_count, count);
+}
+EXPORT_SYMBOL(itcw_add_dcw);
+
+/**
+ * itcw_add_tidaw - add a tidaw to the itcw
+ * @itcw: address of the itcw
+ * @flags: flags for the new tidaw
+ * @addr: address value for the new tidaw
+ * @count: count value for the new tidaw
+ *
+ * Add a new tidaw to the input/output data tidaw-list of the specified itcw
+ * (depending on the value of the r-flag and w-flag). Return a pointer to
+ * the new tidaw on success or -%ENOSPC if the new tidaw would exceed the
+ * available space.
+ *
+ * Note: TTIC tidaws are automatically added when needed, so explicitly calling
+ * this interface with the TTIC flag is not supported. The last-tidaw flag
+ * for the last tidaw in the list will be set by itcw_finalize.
+ */
+struct tidaw *itcw_add_tidaw(struct itcw *itcw, u8 flags, void *addr, u32 count)
+{
+ struct tidaw *following;
+
+ if (itcw->num_tidaws >= itcw->max_tidaws)
+ return ERR_PTR(-ENOSPC);
+ /*
+ * Is the tidaw, which follows the one we are about to fill, on the next
+ * page? Then we have to insert a TTIC tidaw first, that points to the
+ * tidaw on the new page.
+ */
+ following = ((struct tidaw *) tcw_get_data(itcw->tcw))
+ + itcw->num_tidaws + 1;
+ if (itcw->num_tidaws && !((unsigned long) following & ~PAGE_MASK)) {
+ tcw_add_tidaw(itcw->tcw, itcw->num_tidaws++,
+ TIDAW_FLAGS_TTIC, following, 0);
+ if (itcw->num_tidaws >= itcw->max_tidaws)
+ return ERR_PTR(-ENOSPC);
+ }
+ return tcw_add_tidaw(itcw->tcw, itcw->num_tidaws++, flags, addr, count);
+}
+EXPORT_SYMBOL(itcw_add_tidaw);
+
+/**
+ * itcw_set_data - set data address and tida flag of the itcw
+ * @itcw: address of the itcw
+ * @addr: the data address
+ * @use_tidal: zero of the data address specifies a contiguous block of data,
+ * non-zero if it specifies a list if tidaws.
+ *
+ * Set the input/output data address of the itcw (depending on the value of the
+ * r-flag and w-flag). If @use_tidal is non-zero, the corresponding tida flag
+ * is set as well.
+ */
+void itcw_set_data(struct itcw *itcw, void *addr, int use_tidal)
+{
+ tcw_set_data(itcw->tcw, addr, use_tidal);
+}
+EXPORT_SYMBOL(itcw_set_data);
+
+/**
+ * itcw_finalize - calculate length and count fields of the itcw
+ * @itcw: address of the itcw
+ *
+ * Calculate tcw input-/output-count and tccbl fields and add a tcat the tccb.
+ * In case input- or output-tida is used, the tidaw-list must be stored in
+ * continuous storage (no ttic). The tcal field in the tccb must be
+ * up-to-date.
+ */
+void itcw_finalize(struct itcw *itcw)
+{
+ tcw_finalize(itcw->tcw, itcw->num_tidaws);
+}
+EXPORT_SYMBOL(itcw_finalize);
diff --git a/drivers/s390/cio/orb.h b/drivers/s390/cio/orb.h
new file mode 100644
index 000000000..a2d3778b2
--- /dev/null
+++ b/drivers/s390/cio/orb.h
@@ -0,0 +1,92 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Orb related data structures.
+ *
+ * Copyright IBM Corp. 2007, 2011
+ *
+ * Author(s): Cornelia Huck <cornelia.huck@de.ibm.com>
+ * Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
+ * Sebastian Ott <sebott@linux.vnet.ibm.com>
+ */
+
+#ifndef S390_ORB_H
+#define S390_ORB_H
+
+/*
+ * Command-mode operation request block
+ */
+struct cmd_orb {
+ u32 intparm; /* interruption parameter */
+ u32 key:4; /* flags, like key, suspend control, etc. */
+ u32 spnd:1; /* suspend control */
+ u32 res1:1; /* reserved */
+ u32 mod:1; /* modification control */
+ u32 sync:1; /* synchronize control */
+ u32 fmt:1; /* format control */
+ u32 pfch:1; /* prefetch control */
+ u32 isic:1; /* initial-status-interruption control */
+ u32 alcc:1; /* address-limit-checking control */
+ u32 ssic:1; /* suppress-suspended-interr. control */
+ u32 res2:1; /* reserved */
+ u32 c64:1; /* IDAW/QDIO 64 bit control */
+ u32 i2k:1; /* IDAW 2/4kB block size control */
+ u32 lpm:8; /* logical path mask */
+ u32 ils:1; /* incorrect length */
+ u32 zero:6; /* reserved zeros */
+ u32 orbx:1; /* ORB extension control */
+ u32 cpa; /* channel program address */
+} __packed __aligned(4);
+
+/*
+ * Transport-mode operation request block
+ */
+struct tm_orb {
+ u32 intparm;
+ u32 key:4;
+ u32:9;
+ u32 b:1;
+ u32:2;
+ u32 lpm:8;
+ u32:7;
+ u32 x:1;
+ u32 tcw;
+ u32 prio:8;
+ u32:8;
+ u32 rsvpgm:8;
+ u32:8;
+ u32:32;
+ u32:32;
+ u32:32;
+ u32:32;
+} __packed __aligned(4);
+
+/*
+ * eadm operation request block
+ */
+struct eadm_orb {
+ u32 intparm;
+ u32 key:4;
+ u32:4;
+ u32 compat1:1;
+ u32 compat2:1;
+ u32:21;
+ u32 x:1;
+ u32 aob;
+ u32 css_prio:8;
+ u32:8;
+ u32 scm_prio:8;
+ u32:8;
+ u32:29;
+ u32 fmt:3;
+ u32:32;
+ u32:32;
+ u32:32;
+} __packed __aligned(4);
+
+union orb {
+ struct cmd_orb cmd;
+ struct tm_orb tm;
+ struct eadm_orb eadm;
+} __packed __aligned(4);
+
+#endif /* S390_ORB_H */
diff --git a/drivers/s390/cio/qdio.h b/drivers/s390/cio/qdio.h
new file mode 100644
index 000000000..919d10614
--- /dev/null
+++ b/drivers/s390/cio/qdio.h
@@ -0,0 +1,393 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright IBM Corp. 2000, 2009
+ * Author(s): Utz Bacher <utz.bacher@de.ibm.com>
+ * Jan Glauber <jang@linux.vnet.ibm.com>
+ */
+#ifndef _CIO_QDIO_H
+#define _CIO_QDIO_H
+
+#include <asm/page.h>
+#include <asm/schid.h>
+#include <asm/debug.h>
+#include "chsc.h"
+
+#define QDIO_BUSY_BIT_PATIENCE (100 << 12) /* 100 microseconds */
+#define QDIO_BUSY_BIT_RETRY_DELAY 10 /* 10 milliseconds */
+#define QDIO_BUSY_BIT_RETRIES 1000 /* = 10s retry time */
+
+enum qdio_irq_states {
+ QDIO_IRQ_STATE_INACTIVE,
+ QDIO_IRQ_STATE_ESTABLISHED,
+ QDIO_IRQ_STATE_ACTIVE,
+ QDIO_IRQ_STATE_STOPPED,
+ QDIO_IRQ_STATE_CLEANUP,
+ QDIO_IRQ_STATE_ERR,
+ NR_QDIO_IRQ_STATES,
+};
+
+/* used as intparm in do_IO */
+#define QDIO_DOING_ESTABLISH 1
+#define QDIO_DOING_ACTIVATE 2
+#define QDIO_DOING_CLEANUP 3
+
+#define SLSB_STATE_NOT_INIT 0x0
+#define SLSB_STATE_EMPTY 0x1
+#define SLSB_STATE_PRIMED 0x2
+#define SLSB_STATE_PENDING 0x3
+#define SLSB_STATE_HALTED 0xe
+#define SLSB_STATE_ERROR 0xf
+#define SLSB_TYPE_INPUT 0x0
+#define SLSB_TYPE_OUTPUT 0x20
+#define SLSB_OWNER_PROG 0x80
+#define SLSB_OWNER_CU 0x40
+
+#define SLSB_P_INPUT_NOT_INIT \
+ (SLSB_OWNER_PROG | SLSB_TYPE_INPUT | SLSB_STATE_NOT_INIT) /* 0x80 */
+#define SLSB_P_INPUT_ACK \
+ (SLSB_OWNER_PROG | SLSB_TYPE_INPUT | SLSB_STATE_EMPTY) /* 0x81 */
+#define SLSB_CU_INPUT_EMPTY \
+ (SLSB_OWNER_CU | SLSB_TYPE_INPUT | SLSB_STATE_EMPTY) /* 0x41 */
+#define SLSB_P_INPUT_PRIMED \
+ (SLSB_OWNER_PROG | SLSB_TYPE_INPUT | SLSB_STATE_PRIMED) /* 0x82 */
+#define SLSB_P_INPUT_HALTED \
+ (SLSB_OWNER_PROG | SLSB_TYPE_INPUT | SLSB_STATE_HALTED) /* 0x8e */
+#define SLSB_P_INPUT_ERROR \
+ (SLSB_OWNER_PROG | SLSB_TYPE_INPUT | SLSB_STATE_ERROR) /* 0x8f */
+#define SLSB_P_OUTPUT_NOT_INIT \
+ (SLSB_OWNER_PROG | SLSB_TYPE_OUTPUT | SLSB_STATE_NOT_INIT) /* 0xa0 */
+#define SLSB_P_OUTPUT_EMPTY \
+ (SLSB_OWNER_PROG | SLSB_TYPE_OUTPUT | SLSB_STATE_EMPTY) /* 0xa1 */
+#define SLSB_P_OUTPUT_PENDING \
+ (SLSB_OWNER_PROG | SLSB_TYPE_OUTPUT | SLSB_STATE_PENDING) /* 0xa3 */
+#define SLSB_CU_OUTPUT_PRIMED \
+ (SLSB_OWNER_CU | SLSB_TYPE_OUTPUT | SLSB_STATE_PRIMED) /* 0x62 */
+#define SLSB_P_OUTPUT_HALTED \
+ (SLSB_OWNER_PROG | SLSB_TYPE_OUTPUT | SLSB_STATE_HALTED) /* 0xae */
+#define SLSB_P_OUTPUT_ERROR \
+ (SLSB_OWNER_PROG | SLSB_TYPE_OUTPUT | SLSB_STATE_ERROR) /* 0xaf */
+
+#define SLSB_ERROR_DURING_LOOKUP 0xff
+
+/* additional CIWs returned by extended Sense-ID */
+#define CIW_TYPE_EQUEUE 0x3 /* establish QDIO queues */
+#define CIW_TYPE_AQUEUE 0x4 /* activate QDIO queues */
+
+/* flags for st qdio sch data */
+#define CHSC_FLAG_QDIO_CAPABILITY 0x80
+#define CHSC_FLAG_VALIDITY 0x40
+
+/* SIGA flags */
+#define QDIO_SIGA_WRITE 0x00
+#define QDIO_SIGA_READ 0x01
+#define QDIO_SIGA_SYNC 0x02
+#define QDIO_SIGA_WRITEM 0x03
+#define QDIO_SIGA_WRITEQ 0x04
+#define QDIO_SIGA_QEBSM_FLAG 0x80
+
+static inline int do_sqbs(u64 token, unsigned char state, int queue,
+ int *start, int *count)
+{
+ unsigned long _queuestart = ((unsigned long)queue << 32) | *start;
+ unsigned long _ccq = *count;
+
+ asm volatile(
+ " lgr 1,%[token]\n"
+ " .insn rsy,0xeb000000008a,%[qs],%[ccq],0(%[state])"
+ : [ccq] "+&d" (_ccq), [qs] "+&d" (_queuestart)
+ : [state] "a" ((unsigned long)state), [token] "d" (token)
+ : "memory", "cc", "1");
+ *count = _ccq & 0xff;
+ *start = _queuestart & 0xff;
+
+ return (_ccq >> 32) & 0xff;
+}
+
+static inline int do_eqbs(u64 token, unsigned char *state, int queue,
+ int *start, int *count, int ack)
+{
+ unsigned long _queuestart = ((unsigned long)queue << 32) | *start;
+ unsigned long _state = (unsigned long)ack << 63;
+ unsigned long _ccq = *count;
+
+ asm volatile(
+ " lgr 1,%[token]\n"
+ " .insn rrf,0xb99c0000,%[qs],%[state],%[ccq],0"
+ : [ccq] "+&d" (_ccq), [qs] "+&d" (_queuestart),
+ [state] "+&d" (_state)
+ : [token] "d" (token)
+ : "memory", "cc", "1");
+ *count = _ccq & 0xff;
+ *start = _queuestart & 0xff;
+ *state = _state & 0xff;
+
+ return (_ccq >> 32) & 0xff;
+}
+
+struct qdio_irq;
+
+struct siga_flag {
+ u8 input:1;
+ u8 output:1;
+ u8 sync:1;
+ u8 sync_after_ai:1;
+ u8 sync_out_after_pci:1;
+ u8:3;
+} __attribute__ ((packed));
+
+struct qdio_dev_perf_stat {
+ unsigned int adapter_int;
+ unsigned int qdio_int;
+ unsigned int pci_request_int;
+
+ unsigned int tasklet_inbound;
+ unsigned int tasklet_inbound_resched;
+ unsigned int tasklet_inbound_resched2;
+ unsigned int tasklet_outbound;
+
+ unsigned int siga_read;
+ unsigned int siga_write;
+ unsigned int siga_sync;
+
+ unsigned int inbound_call;
+ unsigned int inbound_handler;
+ unsigned int stop_polling;
+ unsigned int inbound_queue_full;
+ unsigned int outbound_call;
+ unsigned int outbound_handler;
+ unsigned int outbound_queue_full;
+ unsigned int fast_requeue;
+ unsigned int target_full;
+ unsigned int eqbs;
+ unsigned int eqbs_partial;
+ unsigned int sqbs;
+ unsigned int sqbs_partial;
+ unsigned int int_discarded;
+} ____cacheline_aligned;
+
+struct qdio_queue_perf_stat {
+ /* Sorted into order-2 buckets: 1, 2-3, 4-7, ... 64-127, 128. */
+ unsigned int nr_sbals[8];
+ unsigned int nr_sbal_error;
+ unsigned int nr_sbal_nop;
+ unsigned int nr_sbal_total;
+};
+
+enum qdio_irq_poll_states {
+ QDIO_IRQ_DISABLED,
+};
+
+struct qdio_input_q {
+ /* Batch of SBALs that we processed while polling the queue: */
+ unsigned int batch_start;
+ unsigned int batch_count;
+};
+
+struct qdio_output_q {
+ /* PCIs are enabled for the queue */
+ int pci_out_enabled;
+ /* cq: use asynchronous output buffers */
+ int use_cq;
+ /* cq: aobs used for particual SBAL */
+ struct qaob **aobs;
+ /* cq: sbal state related to asynchronous operation */
+ struct qdio_outbuf_state *sbal_state;
+ /* timer to check for more outbound work */
+ struct timer_list timer;
+};
+
+/*
+ * Note on cache alignment: grouped slsb and write mostly data at the beginning
+ * sbal[] is read-only and starts on a new cacheline followed by read mostly.
+ */
+struct qdio_q {
+ struct slsb slsb;
+
+ union {
+ struct qdio_input_q in;
+ struct qdio_output_q out;
+ } u;
+
+ /*
+ * inbound: next buffer the program should check for
+ * outbound: next buffer to check if adapter processed it
+ */
+ int first_to_check;
+
+ /* number of buffers in use by the adapter */
+ atomic_t nr_buf_used;
+
+ /* error condition during a data transfer */
+ unsigned int qdio_error;
+
+ /* last scan of the queue */
+ u64 timestamp;
+
+ struct tasklet_struct tasklet;
+ struct qdio_queue_perf_stat q_stats;
+
+ struct qdio_buffer *sbal[QDIO_MAX_BUFFERS_PER_Q] ____cacheline_aligned;
+
+ /* queue number */
+ int nr;
+
+ /* bitmask of queue number */
+ int mask;
+
+ /* input or output queue */
+ int is_input_q;
+
+ /* upper-layer program handler */
+ qdio_handler_t (*handler);
+
+ struct qdio_irq *irq_ptr;
+ struct sl *sl;
+ /*
+ * A page is allocated under this pointer and used for slib and sl.
+ * slib is 2048 bytes big and sl points to offset PAGE_SIZE / 2.
+ */
+ struct slib *slib;
+} __attribute__ ((aligned(256)));
+
+struct qdio_irq {
+ struct qib qib;
+ u32 *dsci; /* address of device state change indicator */
+ struct ccw_device *cdev;
+ struct list_head entry; /* list of thinint devices */
+ struct dentry *debugfs_dev;
+
+ unsigned long int_parm;
+ struct subchannel_id schid;
+ unsigned long sch_token; /* QEBSM facility */
+
+ enum qdio_irq_states state;
+
+ struct siga_flag siga_flag; /* siga sync information from qdioac */
+
+ int nr_input_qs;
+ int nr_output_qs;
+
+ struct ccw1 ccw;
+ struct ciw equeue;
+ struct ciw aqueue;
+
+ struct qdio_ssqd_desc ssqd_desc;
+ void (*orig_handler) (struct ccw_device *, unsigned long, struct irb *);
+
+ unsigned int scan_threshold; /* used SBALs before tasklet schedule */
+ int perf_stat_enabled;
+
+ struct qdr *qdr;
+ unsigned long chsc_page;
+
+ struct qdio_q *input_qs[QDIO_MAX_QUEUES_PER_IRQ];
+ struct qdio_q *output_qs[QDIO_MAX_QUEUES_PER_IRQ];
+ unsigned int max_input_qs;
+ unsigned int max_output_qs;
+
+ void (*irq_poll)(struct ccw_device *cdev, unsigned long data);
+ unsigned long poll_state;
+
+ debug_info_t *debug_area;
+ struct mutex setup_mutex;
+ struct qdio_dev_perf_stat perf_stat;
+};
+
+/* helper functions */
+#define queue_type(q) q->irq_ptr->qib.qfmt
+#define SCH_NO(q) (q->irq_ptr->schid.sch_no)
+
+#define is_thinint_irq(irq) \
+ (irq->qib.qfmt == QDIO_IQDIO_QFMT || \
+ css_general_characteristics.aif_osa)
+
+#define qperf(__qdev, __attr) ((__qdev)->perf_stat.(__attr))
+
+#define QDIO_PERF_STAT_INC(__irq, __attr) \
+({ \
+ struct qdio_irq *qdev = __irq; \
+ if (qdev->perf_stat_enabled) \
+ (qdev->perf_stat.__attr)++; \
+})
+
+#define qperf_inc(__q, __attr) QDIO_PERF_STAT_INC((__q)->irq_ptr, __attr)
+
+static inline void account_sbals_error(struct qdio_q *q, int count)
+{
+ q->q_stats.nr_sbal_error += count;
+ q->q_stats.nr_sbal_total += count;
+}
+
+/* the highest iqdio queue is used for multicast */
+static inline int multicast_outbound(struct qdio_q *q)
+{
+ return (q->irq_ptr->nr_output_qs > 1) &&
+ (q->nr == q->irq_ptr->nr_output_qs - 1);
+}
+
+#define pci_out_supported(irq) ((irq)->qib.ac & QIB_AC_OUTBOUND_PCI_SUPPORTED)
+#define is_qebsm(q) (q->irq_ptr->sch_token != 0)
+
+#define need_siga_in(q) (q->irq_ptr->siga_flag.input)
+#define need_siga_out(q) (q->irq_ptr->siga_flag.output)
+#define need_siga_sync(q) (unlikely(q->irq_ptr->siga_flag.sync))
+#define need_siga_sync_after_ai(q) \
+ (unlikely(q->irq_ptr->siga_flag.sync_after_ai))
+#define need_siga_sync_out_after_pci(q) \
+ (unlikely(q->irq_ptr->siga_flag.sync_out_after_pci))
+
+#define for_each_input_queue(irq_ptr, q, i) \
+ for (i = 0; i < irq_ptr->nr_input_qs && \
+ ({ q = irq_ptr->input_qs[i]; 1; }); i++)
+#define for_each_output_queue(irq_ptr, q, i) \
+ for (i = 0; i < irq_ptr->nr_output_qs && \
+ ({ q = irq_ptr->output_qs[i]; 1; }); i++)
+
+#define add_buf(bufnr, inc) QDIO_BUFNR((bufnr) + (inc))
+#define next_buf(bufnr) add_buf(bufnr, 1)
+#define sub_buf(bufnr, dec) QDIO_BUFNR((bufnr) - (dec))
+#define prev_buf(bufnr) sub_buf(bufnr, 1)
+
+#define queue_irqs_enabled(q) \
+ (test_bit(QDIO_QUEUE_IRQS_DISABLED, &q->u.in.queue_irq_state) == 0)
+#define queue_irqs_disabled(q) \
+ (test_bit(QDIO_QUEUE_IRQS_DISABLED, &q->u.in.queue_irq_state) != 0)
+
+extern u64 last_ai_time;
+
+/* prototypes for thin interrupt */
+int qdio_establish_thinint(struct qdio_irq *irq_ptr);
+void qdio_shutdown_thinint(struct qdio_irq *irq_ptr);
+void tiqdio_add_device(struct qdio_irq *irq_ptr);
+void tiqdio_remove_device(struct qdio_irq *irq_ptr);
+void tiqdio_inbound_processing(unsigned long q);
+int qdio_thinint_init(void);
+void qdio_thinint_exit(void);
+int test_nonshared_ind(struct qdio_irq *);
+
+/* prototypes for setup */
+void qdio_inbound_processing(unsigned long data);
+void qdio_outbound_processing(unsigned long data);
+void qdio_outbound_timer(struct timer_list *t);
+void qdio_int_handler(struct ccw_device *cdev, unsigned long intparm,
+ struct irb *irb);
+int qdio_allocate_qs(struct qdio_irq *irq_ptr, int nr_input_qs,
+ int nr_output_qs);
+void qdio_setup_ssqd_info(struct qdio_irq *irq_ptr);
+int qdio_setup_get_ssqd(struct qdio_irq *irq_ptr,
+ struct subchannel_id *schid,
+ struct qdio_ssqd_desc *data);
+int qdio_setup_irq(struct qdio_irq *irq_ptr, struct qdio_initialize *init_data);
+void qdio_shutdown_irq(struct qdio_irq *irq);
+void qdio_print_subchannel_info(struct qdio_irq *irq_ptr);
+void qdio_free_queues(struct qdio_irq *irq_ptr);
+void qdio_free_async_data(struct qdio_irq *irq_ptr);
+int qdio_setup_init(void);
+void qdio_setup_exit(void);
+int qdio_enable_async_operation(struct qdio_output_q *q);
+void qdio_disable_async_operation(struct qdio_output_q *q);
+struct qaob *qdio_allocate_aob(void);
+
+int debug_get_buf_state(struct qdio_q *q, unsigned int bufnr,
+ unsigned char *state);
+#endif /* _CIO_QDIO_H */
diff --git a/drivers/s390/cio/qdio_debug.c b/drivers/s390/cio/qdio_debug.c
new file mode 100644
index 000000000..863d17c80
--- /dev/null
+++ b/drivers/s390/cio/qdio_debug.c
@@ -0,0 +1,345 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright IBM Corp. 2008, 2009
+ *
+ * Author: Jan Glauber (jang@linux.vnet.ibm.com)
+ */
+#include <linux/seq_file.h>
+#include <linux/debugfs.h>
+#include <linux/uaccess.h>
+#include <linux/export.h>
+#include <linux/slab.h>
+#include <asm/debug.h>
+#include "qdio_debug.h"
+#include "qdio.h"
+
+debug_info_t *qdio_dbf_setup;
+debug_info_t *qdio_dbf_error;
+
+static struct dentry *debugfs_root;
+#define QDIO_DEBUGFS_NAME_LEN 10
+#define QDIO_DBF_NAME_LEN 20
+
+struct qdio_dbf_entry {
+ char dbf_name[QDIO_DBF_NAME_LEN];
+ debug_info_t *dbf_info;
+ struct list_head dbf_list;
+};
+
+static LIST_HEAD(qdio_dbf_list);
+static DEFINE_MUTEX(qdio_dbf_list_mutex);
+
+static debug_info_t *qdio_get_dbf_entry(char *name)
+{
+ struct qdio_dbf_entry *entry;
+ debug_info_t *rc = NULL;
+
+ mutex_lock(&qdio_dbf_list_mutex);
+ list_for_each_entry(entry, &qdio_dbf_list, dbf_list) {
+ if (strcmp(entry->dbf_name, name) == 0) {
+ rc = entry->dbf_info;
+ break;
+ }
+ }
+ mutex_unlock(&qdio_dbf_list_mutex);
+ return rc;
+}
+
+static void qdio_clear_dbf_list(void)
+{
+ struct qdio_dbf_entry *entry, *tmp;
+
+ mutex_lock(&qdio_dbf_list_mutex);
+ list_for_each_entry_safe(entry, tmp, &qdio_dbf_list, dbf_list) {
+ list_del(&entry->dbf_list);
+ debug_unregister(entry->dbf_info);
+ kfree(entry);
+ }
+ mutex_unlock(&qdio_dbf_list_mutex);
+}
+
+int qdio_allocate_dbf(struct qdio_irq *irq_ptr)
+{
+ char text[QDIO_DBF_NAME_LEN];
+ struct qdio_dbf_entry *new_entry;
+
+ DBF_EVENT("irq:%8lx", (unsigned long)irq_ptr);
+
+ /* allocate trace view for the interface */
+ snprintf(text, QDIO_DBF_NAME_LEN, "qdio_%s",
+ dev_name(&irq_ptr->cdev->dev));
+ irq_ptr->debug_area = qdio_get_dbf_entry(text);
+ if (irq_ptr->debug_area)
+ DBF_DEV_EVENT(DBF_ERR, irq_ptr, "dbf reused");
+ else {
+ irq_ptr->debug_area = debug_register(text, 2, 1, 16);
+ if (!irq_ptr->debug_area)
+ return -ENOMEM;
+ if (debug_register_view(irq_ptr->debug_area,
+ &debug_hex_ascii_view)) {
+ debug_unregister(irq_ptr->debug_area);
+ return -ENOMEM;
+ }
+ debug_set_level(irq_ptr->debug_area, DBF_WARN);
+ DBF_DEV_EVENT(DBF_ERR, irq_ptr, "dbf created");
+ new_entry = kzalloc(sizeof(struct qdio_dbf_entry), GFP_KERNEL);
+ if (!new_entry) {
+ debug_unregister(irq_ptr->debug_area);
+ return -ENOMEM;
+ }
+ strlcpy(new_entry->dbf_name, text, QDIO_DBF_NAME_LEN);
+ new_entry->dbf_info = irq_ptr->debug_area;
+ mutex_lock(&qdio_dbf_list_mutex);
+ list_add(&new_entry->dbf_list, &qdio_dbf_list);
+ mutex_unlock(&qdio_dbf_list_mutex);
+ }
+ return 0;
+}
+
+static int qstat_show(struct seq_file *m, void *v)
+{
+ unsigned char state;
+ struct qdio_q *q = m->private;
+ int i;
+
+ if (!q)
+ return 0;
+
+ seq_printf(m, "Timestamp: %Lx Last AI: %Lx\n",
+ q->timestamp, last_ai_time);
+ seq_printf(m, "nr_used: %d ftc: %d\n",
+ atomic_read(&q->nr_buf_used), q->first_to_check);
+ if (q->is_input_q) {
+ seq_printf(m, "batch start: %u batch count: %u\n",
+ q->u.in.batch_start, q->u.in.batch_count);
+ seq_printf(m, "DSCI: %x IRQs disabled: %u\n",
+ *(u8 *)q->irq_ptr->dsci,
+ test_bit(QDIO_IRQ_DISABLED,
+ &q->irq_ptr->poll_state));
+ }
+ seq_printf(m, "SBAL states:\n");
+ seq_printf(m, "|0 |8 |16 |24 |32 |40 |48 |56 63|\n");
+
+ for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; i++) {
+ debug_get_buf_state(q, i, &state);
+ switch (state) {
+ case SLSB_P_INPUT_NOT_INIT:
+ case SLSB_P_OUTPUT_NOT_INIT:
+ seq_printf(m, "N");
+ break;
+ case SLSB_P_OUTPUT_PENDING:
+ seq_printf(m, "P");
+ break;
+ case SLSB_P_INPUT_PRIMED:
+ case SLSB_CU_OUTPUT_PRIMED:
+ seq_printf(m, "+");
+ break;
+ case SLSB_P_INPUT_ACK:
+ seq_printf(m, "A");
+ break;
+ case SLSB_P_INPUT_ERROR:
+ case SLSB_P_OUTPUT_ERROR:
+ seq_printf(m, "x");
+ break;
+ case SLSB_CU_INPUT_EMPTY:
+ case SLSB_P_OUTPUT_EMPTY:
+ seq_printf(m, "-");
+ break;
+ case SLSB_P_INPUT_HALTED:
+ case SLSB_P_OUTPUT_HALTED:
+ seq_printf(m, ".");
+ break;
+ default:
+ seq_printf(m, "?");
+ }
+ if (i == 63)
+ seq_printf(m, "\n");
+ }
+ seq_printf(m, "\n");
+ seq_printf(m, "|64 |72 |80 |88 |96 |104 |112 | 127|\n");
+
+ seq_printf(m, "\nSBAL statistics:");
+ if (!q->irq_ptr->perf_stat_enabled) {
+ seq_printf(m, " disabled\n");
+ return 0;
+ }
+
+ seq_printf(m, "\n1 2.. 4.. 8.. "
+ "16.. 32.. 64.. 128\n");
+ for (i = 0; i < ARRAY_SIZE(q->q_stats.nr_sbals); i++)
+ seq_printf(m, "%-10u ", q->q_stats.nr_sbals[i]);
+ seq_printf(m, "\nError NOP Total\n%-10u %-10u %-10u\n\n",
+ q->q_stats.nr_sbal_error, q->q_stats.nr_sbal_nop,
+ q->q_stats.nr_sbal_total);
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(qstat);
+
+static int ssqd_show(struct seq_file *m, void *v)
+{
+ struct ccw_device *cdev = m->private;
+ struct qdio_ssqd_desc ssqd;
+ int rc;
+
+ rc = qdio_get_ssqd_desc(cdev, &ssqd);
+ if (rc)
+ return rc;
+
+ seq_hex_dump(m, "", DUMP_PREFIX_NONE, 16, 4, &ssqd, sizeof(ssqd),
+ false);
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(ssqd);
+
+static char *qperf_names[] = {
+ "Assumed adapter interrupts",
+ "QDIO interrupts",
+ "Requested PCIs",
+ "Inbound tasklet runs",
+ "Inbound tasklet resched",
+ "Inbound tasklet resched2",
+ "Outbound tasklet runs",
+ "SIGA read",
+ "SIGA write",
+ "SIGA sync",
+ "Inbound calls",
+ "Inbound handler",
+ "Inbound stop_polling",
+ "Inbound queue full",
+ "Outbound calls",
+ "Outbound handler",
+ "Outbound queue full",
+ "Outbound fast_requeue",
+ "Outbound target_full",
+ "QEBSM eqbs",
+ "QEBSM eqbs partial",
+ "QEBSM sqbs",
+ "QEBSM sqbs partial",
+ "Discarded interrupts"
+};
+
+static int qperf_show(struct seq_file *m, void *v)
+{
+ struct qdio_irq *irq_ptr = m->private;
+ unsigned int *stat;
+ int i;
+
+ if (!irq_ptr)
+ return 0;
+ if (!irq_ptr->perf_stat_enabled) {
+ seq_printf(m, "disabled\n");
+ return 0;
+ }
+ stat = (unsigned int *)&irq_ptr->perf_stat;
+
+ for (i = 0; i < ARRAY_SIZE(qperf_names); i++)
+ seq_printf(m, "%26s:\t%u\n",
+ qperf_names[i], *(stat + i));
+ return 0;
+}
+
+static ssize_t qperf_seq_write(struct file *file, const char __user *ubuf,
+ size_t count, loff_t *off)
+{
+ struct seq_file *seq = file->private_data;
+ struct qdio_irq *irq_ptr = seq->private;
+ struct qdio_q *q;
+ unsigned long val;
+ int ret, i;
+
+ if (!irq_ptr)
+ return 0;
+
+ ret = kstrtoul_from_user(ubuf, count, 10, &val);
+ if (ret)
+ return ret;
+
+ switch (val) {
+ case 0:
+ irq_ptr->perf_stat_enabled = 0;
+ memset(&irq_ptr->perf_stat, 0, sizeof(irq_ptr->perf_stat));
+ for_each_input_queue(irq_ptr, q, i)
+ memset(&q->q_stats, 0, sizeof(q->q_stats));
+ for_each_output_queue(irq_ptr, q, i)
+ memset(&q->q_stats, 0, sizeof(q->q_stats));
+ break;
+ case 1:
+ irq_ptr->perf_stat_enabled = 1;
+ break;
+ }
+ return count;
+}
+
+static int qperf_seq_open(struct inode *inode, struct file *filp)
+{
+ return single_open(filp, qperf_show,
+ file_inode(filp)->i_private);
+}
+
+static const struct file_operations debugfs_perf_fops = {
+ .owner = THIS_MODULE,
+ .open = qperf_seq_open,
+ .read = seq_read,
+ .write = qperf_seq_write,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static void setup_debugfs_entry(struct dentry *parent, struct qdio_q *q)
+{
+ char name[QDIO_DEBUGFS_NAME_LEN];
+
+ snprintf(name, QDIO_DEBUGFS_NAME_LEN, "%s_%d",
+ q->is_input_q ? "input" : "output",
+ q->nr);
+ debugfs_create_file(name, 0444, parent, q, &qstat_fops);
+}
+
+void qdio_setup_debug_entries(struct qdio_irq *irq_ptr)
+{
+ struct qdio_q *q;
+ int i;
+
+ irq_ptr->debugfs_dev = debugfs_create_dir(dev_name(&irq_ptr->cdev->dev),
+ debugfs_root);
+ debugfs_create_file("statistics", S_IFREG | S_IRUGO | S_IWUSR,
+ irq_ptr->debugfs_dev, irq_ptr, &debugfs_perf_fops);
+ debugfs_create_file("ssqd", 0444, irq_ptr->debugfs_dev, irq_ptr->cdev,
+ &ssqd_fops);
+
+ for_each_input_queue(irq_ptr, q, i)
+ setup_debugfs_entry(irq_ptr->debugfs_dev, q);
+ for_each_output_queue(irq_ptr, q, i)
+ setup_debugfs_entry(irq_ptr->debugfs_dev, q);
+}
+
+void qdio_shutdown_debug_entries(struct qdio_irq *irq_ptr)
+{
+ debugfs_remove_recursive(irq_ptr->debugfs_dev);
+}
+
+int __init qdio_debug_init(void)
+{
+ debugfs_root = debugfs_create_dir("qdio", NULL);
+
+ qdio_dbf_setup = debug_register("qdio_setup", 16, 1, 16);
+ debug_register_view(qdio_dbf_setup, &debug_hex_ascii_view);
+ debug_set_level(qdio_dbf_setup, DBF_INFO);
+ DBF_EVENT("dbf created\n");
+
+ qdio_dbf_error = debug_register("qdio_error", 4, 1, 16);
+ debug_register_view(qdio_dbf_error, &debug_hex_ascii_view);
+ debug_set_level(qdio_dbf_error, DBF_INFO);
+ DBF_ERROR("dbf created\n");
+ return 0;
+}
+
+void qdio_debug_exit(void)
+{
+ qdio_clear_dbf_list();
+ debugfs_remove_recursive(debugfs_root);
+ debug_unregister(qdio_dbf_setup);
+ debug_unregister(qdio_dbf_error);
+}
diff --git a/drivers/s390/cio/qdio_debug.h b/drivers/s390/cio/qdio_debug.h
new file mode 100644
index 000000000..0dfba085f
--- /dev/null
+++ b/drivers/s390/cio/qdio_debug.h
@@ -0,0 +1,73 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright IBM Corp. 2008
+ *
+ * Author: Jan Glauber (jang@linux.vnet.ibm.com)
+ */
+#ifndef QDIO_DEBUG_H
+#define QDIO_DEBUG_H
+
+#include <asm/debug.h>
+#include <asm/qdio.h>
+#include "qdio.h"
+
+/* that gives us 15 characters in the text event views */
+#define QDIO_DBF_LEN 32
+
+extern debug_info_t *qdio_dbf_setup;
+extern debug_info_t *qdio_dbf_error;
+
+#define DBF_ERR 3 /* error conditions */
+#define DBF_WARN 4 /* warning conditions */
+#define DBF_INFO 6 /* informational */
+
+#undef DBF_EVENT
+#undef DBF_ERROR
+#undef DBF_DEV_EVENT
+
+#define DBF_EVENT(text...) \
+ do { \
+ char debug_buffer[QDIO_DBF_LEN]; \
+ snprintf(debug_buffer, QDIO_DBF_LEN, text); \
+ debug_text_event(qdio_dbf_setup, DBF_ERR, debug_buffer); \
+ } while (0)
+
+static inline void DBF_HEX(void *addr, int len)
+{
+ debug_event(qdio_dbf_setup, DBF_ERR, addr, len);
+}
+
+#define DBF_ERROR(text...) \
+ do { \
+ char debug_buffer[QDIO_DBF_LEN]; \
+ snprintf(debug_buffer, QDIO_DBF_LEN, text); \
+ debug_text_event(qdio_dbf_error, DBF_ERR, debug_buffer); \
+ } while (0)
+
+static inline void DBF_ERROR_HEX(void *addr, int len)
+{
+ debug_event(qdio_dbf_error, DBF_ERR, addr, len);
+}
+
+#define DBF_DEV_EVENT(level, device, text...) \
+ do { \
+ char debug_buffer[QDIO_DBF_LEN]; \
+ if (debug_level_enabled(device->debug_area, level)) { \
+ snprintf(debug_buffer, QDIO_DBF_LEN, text); \
+ debug_text_event(device->debug_area, level, debug_buffer); \
+ } \
+ } while (0)
+
+static inline void DBF_DEV_HEX(struct qdio_irq *dev, void *addr,
+ int len, int level)
+{
+ debug_event(dev->debug_area, level, addr, len);
+}
+
+int qdio_allocate_dbf(struct qdio_irq *irq_ptr);
+void qdio_setup_debug_entries(struct qdio_irq *irq_ptr);
+void qdio_shutdown_debug_entries(struct qdio_irq *irq_ptr);
+int qdio_debug_init(void);
+void qdio_debug_exit(void);
+
+#endif
diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c
new file mode 100644
index 000000000..e3c55fc23
--- /dev/null
+++ b/drivers/s390/cio/qdio_main.c
@@ -0,0 +1,1703 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Linux for s390 qdio support, buffer handling, qdio API and module support.
+ *
+ * Copyright IBM Corp. 2000, 2008
+ * Author(s): Utz Bacher <utz.bacher@de.ibm.com>
+ * Jan Glauber <jang@linux.vnet.ibm.com>
+ * 2.6 cio integration by Cornelia Huck <cornelia.huck@de.ibm.com>
+ */
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/timer.h>
+#include <linux/delay.h>
+#include <linux/gfp.h>
+#include <linux/io.h>
+#include <linux/atomic.h>
+#include <asm/debug.h>
+#include <asm/qdio.h>
+#include <asm/ipl.h>
+
+#include "cio.h"
+#include "css.h"
+#include "device.h"
+#include "qdio.h"
+#include "qdio_debug.h"
+
+MODULE_AUTHOR("Utz Bacher <utz.bacher@de.ibm.com>,"\
+ "Jan Glauber <jang@linux.vnet.ibm.com>");
+MODULE_DESCRIPTION("QDIO base support");
+MODULE_LICENSE("GPL");
+
+static inline int do_siga_sync(unsigned long schid,
+ unsigned long out_mask, unsigned long in_mask,
+ unsigned int fc)
+{
+ int cc;
+
+ asm volatile(
+ " lgr 0,%[fc]\n"
+ " lgr 1,%[schid]\n"
+ " lgr 2,%[out]\n"
+ " lgr 3,%[in]\n"
+ " siga 0\n"
+ " ipm %[cc]\n"
+ " srl %[cc],28\n"
+ : [cc] "=&d" (cc)
+ : [fc] "d" (fc), [schid] "d" (schid),
+ [out] "d" (out_mask), [in] "d" (in_mask)
+ : "cc", "0", "1", "2", "3");
+ return cc;
+}
+
+static inline int do_siga_input(unsigned long schid, unsigned long mask,
+ unsigned long fc)
+{
+ int cc;
+
+ asm volatile(
+ " lgr 0,%[fc]\n"
+ " lgr 1,%[schid]\n"
+ " lgr 2,%[mask]\n"
+ " siga 0\n"
+ " ipm %[cc]\n"
+ " srl %[cc],28\n"
+ : [cc] "=&d" (cc)
+ : [fc] "d" (fc), [schid] "d" (schid), [mask] "d" (mask)
+ : "cc", "0", "1", "2");
+ return cc;
+}
+
+/**
+ * do_siga_output - perform SIGA-w/wt function
+ * @schid: subchannel id or in case of QEBSM the subchannel token
+ * @mask: which output queues to process
+ * @bb: busy bit indicator, set only if SIGA-w/wt could not access a buffer
+ * @fc: function code to perform
+ * @aob: asynchronous operation block
+ *
+ * Returns condition code.
+ * Note: For IQDC unicast queues only the highest priority queue is processed.
+ */
+static inline int do_siga_output(unsigned long schid, unsigned long mask,
+ unsigned int *bb, unsigned long fc,
+ unsigned long aob)
+{
+ int cc;
+
+ asm volatile(
+ " lgr 0,%[fc]\n"
+ " lgr 1,%[schid]\n"
+ " lgr 2,%[mask]\n"
+ " lgr 3,%[aob]\n"
+ " siga 0\n"
+ " lgr %[fc],0\n"
+ " ipm %[cc]\n"
+ " srl %[cc],28\n"
+ : [cc] "=&d" (cc), [fc] "+&d" (fc)
+ : [schid] "d" (schid), [mask] "d" (mask), [aob] "d" (aob)
+ : "cc", "0", "1", "2", "3");
+ *bb = fc >> 31;
+ return cc;
+}
+
+/**
+ * qdio_do_eqbs - extract buffer states for QEBSM
+ * @q: queue to manipulate
+ * @state: state of the extracted buffers
+ * @start: buffer number to start at
+ * @count: count of buffers to examine
+ * @auto_ack: automatically acknowledge buffers
+ *
+ * Returns the number of successfully extracted equal buffer states.
+ * Stops processing if a state is different from the last buffers state.
+ */
+static int qdio_do_eqbs(struct qdio_q *q, unsigned char *state,
+ int start, int count, int auto_ack)
+{
+ int tmp_count = count, tmp_start = start, nr = q->nr;
+ unsigned int ccq = 0;
+
+ qperf_inc(q, eqbs);
+
+ if (!q->is_input_q)
+ nr += q->irq_ptr->nr_input_qs;
+again:
+ ccq = do_eqbs(q->irq_ptr->sch_token, state, nr, &tmp_start, &tmp_count,
+ auto_ack);
+
+ switch (ccq) {
+ case 0:
+ case 32:
+ /* all done, or next buffer state different */
+ return count - tmp_count;
+ case 96:
+ /* not all buffers processed */
+ qperf_inc(q, eqbs_partial);
+ DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "EQBS part:%02x",
+ tmp_count);
+ return count - tmp_count;
+ case 97:
+ /* no buffer processed */
+ DBF_DEV_EVENT(DBF_WARN, q->irq_ptr, "EQBS again:%2d", ccq);
+ goto again;
+ default:
+ DBF_ERROR("%4x ccq:%3d", SCH_NO(q), ccq);
+ DBF_ERROR("%4x EQBS ERROR", SCH_NO(q));
+ DBF_ERROR("%3d%3d%2d", count, tmp_count, nr);
+ q->handler(q->irq_ptr->cdev, QDIO_ERROR_GET_BUF_STATE, q->nr,
+ q->first_to_check, count, q->irq_ptr->int_parm);
+ return 0;
+ }
+}
+
+/**
+ * qdio_do_sqbs - set buffer states for QEBSM
+ * @q: queue to manipulate
+ * @state: new state of the buffers
+ * @start: first buffer number to change
+ * @count: how many buffers to change
+ *
+ * Returns the number of successfully changed buffers.
+ * Does retrying until the specified count of buffer states is set or an
+ * error occurs.
+ */
+static int qdio_do_sqbs(struct qdio_q *q, unsigned char state, int start,
+ int count)
+{
+ unsigned int ccq = 0;
+ int tmp_count = count, tmp_start = start;
+ int nr = q->nr;
+
+ if (!count)
+ return 0;
+ qperf_inc(q, sqbs);
+
+ if (!q->is_input_q)
+ nr += q->irq_ptr->nr_input_qs;
+again:
+ ccq = do_sqbs(q->irq_ptr->sch_token, state, nr, &tmp_start, &tmp_count);
+
+ switch (ccq) {
+ case 0:
+ case 32:
+ /* all done, or active buffer adapter-owned */
+ WARN_ON_ONCE(tmp_count);
+ return count - tmp_count;
+ case 96:
+ /* not all buffers processed */
+ DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "SQBS again:%2d", ccq);
+ qperf_inc(q, sqbs_partial);
+ goto again;
+ default:
+ DBF_ERROR("%4x ccq:%3d", SCH_NO(q), ccq);
+ DBF_ERROR("%4x SQBS ERROR", SCH_NO(q));
+ DBF_ERROR("%3d%3d%2d", count, tmp_count, nr);
+ q->handler(q->irq_ptr->cdev, QDIO_ERROR_SET_BUF_STATE, q->nr,
+ q->first_to_check, count, q->irq_ptr->int_parm);
+ return 0;
+ }
+}
+
+/*
+ * Returns number of examined buffers and their common state in *state.
+ * Requested number of buffers-to-examine must be > 0.
+ */
+static inline int get_buf_states(struct qdio_q *q, unsigned int bufnr,
+ unsigned char *state, unsigned int count,
+ int auto_ack, int merge_pending)
+{
+ unsigned char __state = 0;
+ int i = 1;
+
+ if (is_qebsm(q))
+ return qdio_do_eqbs(q, state, bufnr, count, auto_ack);
+
+ /* get initial state: */
+ __state = q->slsb.val[bufnr];
+
+ /* Bail out early if there is no work on the queue: */
+ if (__state & SLSB_OWNER_CU)
+ goto out;
+
+ if (merge_pending && __state == SLSB_P_OUTPUT_PENDING)
+ __state = SLSB_P_OUTPUT_EMPTY;
+
+ for (; i < count; i++) {
+ bufnr = next_buf(bufnr);
+
+ /* merge PENDING into EMPTY: */
+ if (merge_pending &&
+ q->slsb.val[bufnr] == SLSB_P_OUTPUT_PENDING &&
+ __state == SLSB_P_OUTPUT_EMPTY)
+ continue;
+
+ /* stop if next state differs from initial state: */
+ if (q->slsb.val[bufnr] != __state)
+ break;
+ }
+
+out:
+ *state = __state;
+ return i;
+}
+
+static inline int get_buf_state(struct qdio_q *q, unsigned int bufnr,
+ unsigned char *state, int auto_ack)
+{
+ return get_buf_states(q, bufnr, state, 1, auto_ack, 0);
+}
+
+/* wrap-around safe setting of slsb states, returns number of changed buffers */
+static inline int set_buf_states(struct qdio_q *q, int bufnr,
+ unsigned char state, int count)
+{
+ int i;
+
+ if (is_qebsm(q))
+ return qdio_do_sqbs(q, state, bufnr, count);
+
+ /* Ensure that all preceding changes to the SBALs are visible: */
+ mb();
+
+ for (i = 0; i < count; i++) {
+ WRITE_ONCE(q->slsb.val[bufnr], state);
+ bufnr = next_buf(bufnr);
+ }
+
+ /* Make our SLSB changes visible: */
+ mb();
+
+ return count;
+}
+
+static inline int set_buf_state(struct qdio_q *q, int bufnr,
+ unsigned char state)
+{
+ return set_buf_states(q, bufnr, state, 1);
+}
+
+/* set slsb states to initial state */
+static void qdio_init_buf_states(struct qdio_irq *irq_ptr)
+{
+ struct qdio_q *q;
+ int i;
+
+ for_each_input_queue(irq_ptr, q, i)
+ set_buf_states(q, 0, SLSB_P_INPUT_NOT_INIT,
+ QDIO_MAX_BUFFERS_PER_Q);
+ for_each_output_queue(irq_ptr, q, i)
+ set_buf_states(q, 0, SLSB_P_OUTPUT_NOT_INIT,
+ QDIO_MAX_BUFFERS_PER_Q);
+}
+
+static inline int qdio_siga_sync(struct qdio_q *q, unsigned int output,
+ unsigned int input)
+{
+ unsigned long schid = *((u32 *) &q->irq_ptr->schid);
+ unsigned int fc = QDIO_SIGA_SYNC;
+ int cc;
+
+ DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-s:%1d", q->nr);
+ qperf_inc(q, siga_sync);
+
+ if (is_qebsm(q)) {
+ schid = q->irq_ptr->sch_token;
+ fc |= QDIO_SIGA_QEBSM_FLAG;
+ }
+
+ cc = do_siga_sync(schid, output, input, fc);
+ if (unlikely(cc))
+ DBF_ERROR("%4x SIGA-S:%2d", SCH_NO(q), cc);
+ return (cc) ? -EIO : 0;
+}
+
+static inline int qdio_siga_sync_q(struct qdio_q *q)
+{
+ if (q->is_input_q)
+ return qdio_siga_sync(q, 0, q->mask);
+ else
+ return qdio_siga_sync(q, q->mask, 0);
+}
+
+static int qdio_siga_output(struct qdio_q *q, unsigned int count,
+ unsigned int *busy_bit, unsigned long aob)
+{
+ unsigned long schid = *((u32 *) &q->irq_ptr->schid);
+ unsigned int fc = QDIO_SIGA_WRITE;
+ u64 start_time = 0;
+ int retries = 0, cc;
+
+ if (queue_type(q) == QDIO_IQDIO_QFMT && !multicast_outbound(q)) {
+ if (count > 1)
+ fc = QDIO_SIGA_WRITEM;
+ else if (aob)
+ fc = QDIO_SIGA_WRITEQ;
+ }
+
+ if (is_qebsm(q)) {
+ schid = q->irq_ptr->sch_token;
+ fc |= QDIO_SIGA_QEBSM_FLAG;
+ }
+again:
+ cc = do_siga_output(schid, q->mask, busy_bit, fc, aob);
+
+ /* hipersocket busy condition */
+ if (unlikely(*busy_bit)) {
+ retries++;
+
+ if (!start_time) {
+ start_time = get_tod_clock_fast();
+ goto again;
+ }
+ if (get_tod_clock_fast() - start_time < QDIO_BUSY_BIT_PATIENCE)
+ goto again;
+ }
+ if (retries) {
+ DBF_DEV_EVENT(DBF_WARN, q->irq_ptr,
+ "%4x cc2 BB1:%1d", SCH_NO(q), q->nr);
+ DBF_DEV_EVENT(DBF_WARN, q->irq_ptr, "count:%u", retries);
+ }
+ return cc;
+}
+
+static inline int qdio_siga_input(struct qdio_q *q)
+{
+ unsigned long schid = *((u32 *) &q->irq_ptr->schid);
+ unsigned int fc = QDIO_SIGA_READ;
+ int cc;
+
+ DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-r:%1d", q->nr);
+ qperf_inc(q, siga_read);
+
+ if (is_qebsm(q)) {
+ schid = q->irq_ptr->sch_token;
+ fc |= QDIO_SIGA_QEBSM_FLAG;
+ }
+
+ cc = do_siga_input(schid, q->mask, fc);
+ if (unlikely(cc))
+ DBF_ERROR("%4x SIGA-R:%2d", SCH_NO(q), cc);
+ return (cc) ? -EIO : 0;
+}
+
+#define qdio_siga_sync_out(q) qdio_siga_sync(q, ~0U, 0)
+#define qdio_siga_sync_all(q) qdio_siga_sync(q, ~0U, ~0U)
+
+static inline void qdio_sync_queues(struct qdio_q *q)
+{
+ /* PCI capable outbound queues will also be scanned so sync them too */
+ if (pci_out_supported(q->irq_ptr))
+ qdio_siga_sync_all(q);
+ else
+ qdio_siga_sync_q(q);
+}
+
+int debug_get_buf_state(struct qdio_q *q, unsigned int bufnr,
+ unsigned char *state)
+{
+ if (need_siga_sync(q))
+ qdio_siga_sync_q(q);
+ return get_buf_state(q, bufnr, state, 0);
+}
+
+static inline void qdio_stop_polling(struct qdio_q *q)
+{
+ if (!q->u.in.batch_count)
+ return;
+
+ qperf_inc(q, stop_polling);
+
+ /* show the card that we are not polling anymore */
+ set_buf_states(q, q->u.in.batch_start, SLSB_P_INPUT_NOT_INIT,
+ q->u.in.batch_count);
+ q->u.in.batch_count = 0;
+}
+
+static inline void account_sbals(struct qdio_q *q, unsigned int count)
+{
+ q->q_stats.nr_sbal_total += count;
+ q->q_stats.nr_sbals[ilog2(count)]++;
+}
+
+static void process_buffer_error(struct qdio_q *q, unsigned int start,
+ int count)
+{
+ q->qdio_error = QDIO_ERROR_SLSB_STATE;
+
+ /* special handling for no target buffer empty */
+ if (queue_type(q) == QDIO_IQDIO_QFMT && !q->is_input_q &&
+ q->sbal[start]->element[15].sflags == 0x10) {
+ qperf_inc(q, target_full);
+ DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "OUTFULL FTC:%02x", start);
+ return;
+ }
+
+ DBF_ERROR("%4x BUF ERROR", SCH_NO(q));
+ DBF_ERROR((q->is_input_q) ? "IN:%2d" : "OUT:%2d", q->nr);
+ DBF_ERROR("FTC:%3d C:%3d", start, count);
+ DBF_ERROR("F14:%2x F15:%2x",
+ q->sbal[start]->element[14].sflags,
+ q->sbal[start]->element[15].sflags);
+}
+
+static inline void inbound_handle_work(struct qdio_q *q, unsigned int start,
+ int count, bool auto_ack)
+{
+ /* ACK the newest SBAL: */
+ if (!auto_ack)
+ set_buf_state(q, add_buf(start, count - 1), SLSB_P_INPUT_ACK);
+
+ if (!q->u.in.batch_count)
+ q->u.in.batch_start = start;
+ q->u.in.batch_count += count;
+}
+
+static int get_inbound_buffer_frontier(struct qdio_q *q, unsigned int start)
+{
+ unsigned char state = 0;
+ int count;
+
+ q->timestamp = get_tod_clock_fast();
+
+ count = atomic_read(&q->nr_buf_used);
+ if (!count)
+ return 0;
+
+ /*
+ * No siga sync here, as a PCI or we after a thin interrupt
+ * already sync'ed the queues.
+ */
+ count = get_buf_states(q, start, &state, count, 1, 0);
+ if (!count)
+ return 0;
+
+ switch (state) {
+ case SLSB_P_INPUT_PRIMED:
+ DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in prim:%1d %02x", q->nr,
+ count);
+
+ inbound_handle_work(q, start, count, is_qebsm(q));
+ if (atomic_sub_return(count, &q->nr_buf_used) == 0)
+ qperf_inc(q, inbound_queue_full);
+ if (q->irq_ptr->perf_stat_enabled)
+ account_sbals(q, count);
+ return count;
+ case SLSB_P_INPUT_ERROR:
+ DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in err:%1d %02x", q->nr,
+ count);
+
+ process_buffer_error(q, start, count);
+ inbound_handle_work(q, start, count, false);
+ if (atomic_sub_return(count, &q->nr_buf_used) == 0)
+ qperf_inc(q, inbound_queue_full);
+ if (q->irq_ptr->perf_stat_enabled)
+ account_sbals_error(q, count);
+ return count;
+ case SLSB_CU_INPUT_EMPTY:
+ if (q->irq_ptr->perf_stat_enabled)
+ q->q_stats.nr_sbal_nop++;
+ DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in nop:%1d %#02x",
+ q->nr, start);
+ return 0;
+ case SLSB_P_INPUT_NOT_INIT:
+ case SLSB_P_INPUT_ACK:
+ /* We should never see this state, throw a WARN: */
+ default:
+ dev_WARN_ONCE(&q->irq_ptr->cdev->dev, 1,
+ "found state %#x at index %u on queue %u\n",
+ state, start, q->nr);
+ return 0;
+ }
+}
+
+static int qdio_inbound_q_moved(struct qdio_q *q, unsigned int start)
+{
+ return get_inbound_buffer_frontier(q, start);
+}
+
+static inline int qdio_inbound_q_done(struct qdio_q *q, unsigned int start)
+{
+ unsigned char state = 0;
+
+ if (!atomic_read(&q->nr_buf_used))
+ return 1;
+
+ if (need_siga_sync(q))
+ qdio_siga_sync_q(q);
+ get_buf_state(q, start, &state, 0);
+
+ if (state == SLSB_P_INPUT_PRIMED || state == SLSB_P_INPUT_ERROR)
+ /* more work coming */
+ return 0;
+
+ return 1;
+}
+
+static inline unsigned long qdio_aob_for_buffer(struct qdio_output_q *q,
+ int bufnr)
+{
+ unsigned long phys_aob = 0;
+
+ if (!q->aobs[bufnr]) {
+ struct qaob *aob = qdio_allocate_aob();
+ q->aobs[bufnr] = aob;
+ }
+ if (q->aobs[bufnr]) {
+ q->aobs[bufnr]->user1 = (u64) q->sbal_state[bufnr].user;
+ phys_aob = virt_to_phys(q->aobs[bufnr]);
+ WARN_ON_ONCE(phys_aob & 0xFF);
+ }
+
+ q->sbal_state[bufnr].flags = 0;
+ return phys_aob;
+}
+
+static void qdio_kick_handler(struct qdio_q *q, unsigned int start,
+ unsigned int count)
+{
+ if (unlikely(q->irq_ptr->state != QDIO_IRQ_STATE_ACTIVE))
+ return;
+
+ if (q->is_input_q) {
+ qperf_inc(q, inbound_handler);
+ DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "kih s:%02x c:%02x", start, count);
+ } else {
+ qperf_inc(q, outbound_handler);
+ DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "koh: s:%02x c:%02x",
+ start, count);
+ }
+
+ q->handler(q->irq_ptr->cdev, q->qdio_error, q->nr, start, count,
+ q->irq_ptr->int_parm);
+
+ /* for the next time */
+ q->qdio_error = 0;
+}
+
+static inline int qdio_tasklet_schedule(struct qdio_q *q)
+{
+ if (likely(q->irq_ptr->state == QDIO_IRQ_STATE_ACTIVE)) {
+ tasklet_schedule(&q->tasklet);
+ return 0;
+ }
+ return -EPERM;
+}
+
+static void __qdio_inbound_processing(struct qdio_q *q)
+{
+ unsigned int start = q->first_to_check;
+ int count;
+
+ qperf_inc(q, tasklet_inbound);
+
+ count = qdio_inbound_q_moved(q, start);
+ if (count == 0)
+ return;
+
+ qdio_kick_handler(q, start, count);
+ start = add_buf(start, count);
+ q->first_to_check = start;
+
+ if (!qdio_inbound_q_done(q, start)) {
+ /* means poll time is not yet over */
+ qperf_inc(q, tasklet_inbound_resched);
+ if (!qdio_tasklet_schedule(q))
+ return;
+ }
+
+ qdio_stop_polling(q);
+ /*
+ * We need to check again to not lose initiative after
+ * resetting the ACK state.
+ */
+ if (!qdio_inbound_q_done(q, start)) {
+ qperf_inc(q, tasklet_inbound_resched2);
+ qdio_tasklet_schedule(q);
+ }
+}
+
+void qdio_inbound_processing(unsigned long data)
+{
+ struct qdio_q *q = (struct qdio_q *)data;
+ __qdio_inbound_processing(q);
+}
+
+static void qdio_check_pending(struct qdio_q *q, unsigned int index)
+{
+ unsigned char state;
+
+ if (get_buf_state(q, index, &state, 0) > 0 &&
+ state == SLSB_P_OUTPUT_PENDING &&
+ q->u.out.aobs[index]) {
+ q->u.out.sbal_state[index].flags |=
+ QDIO_OUTBUF_STATE_FLAG_PENDING;
+ q->u.out.aobs[index] = NULL;
+ }
+}
+
+static int get_outbound_buffer_frontier(struct qdio_q *q, unsigned int start)
+{
+ unsigned char state = 0;
+ int count;
+
+ q->timestamp = get_tod_clock_fast();
+
+ if (need_siga_sync(q))
+ if (((queue_type(q) != QDIO_IQDIO_QFMT) &&
+ !pci_out_supported(q->irq_ptr)) ||
+ (queue_type(q) == QDIO_IQDIO_QFMT &&
+ multicast_outbound(q)))
+ qdio_siga_sync_q(q);
+
+ count = atomic_read(&q->nr_buf_used);
+ if (!count)
+ return 0;
+
+ count = get_buf_states(q, start, &state, count, 0, q->u.out.use_cq);
+ if (!count)
+ return 0;
+
+ switch (state) {
+ case SLSB_P_OUTPUT_EMPTY:
+ case SLSB_P_OUTPUT_PENDING:
+ /* the adapter got it */
+ DBF_DEV_EVENT(DBF_INFO, q->irq_ptr,
+ "out empty:%1d %02x", q->nr, count);
+
+ atomic_sub(count, &q->nr_buf_used);
+ if (q->irq_ptr->perf_stat_enabled)
+ account_sbals(q, count);
+ return count;
+ case SLSB_P_OUTPUT_ERROR:
+ process_buffer_error(q, start, count);
+ atomic_sub(count, &q->nr_buf_used);
+ if (q->irq_ptr->perf_stat_enabled)
+ account_sbals_error(q, count);
+ return count;
+ case SLSB_CU_OUTPUT_PRIMED:
+ /* the adapter has not fetched the output yet */
+ if (q->irq_ptr->perf_stat_enabled)
+ q->q_stats.nr_sbal_nop++;
+ DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "out primed:%1d",
+ q->nr);
+ return 0;
+ case SLSB_P_OUTPUT_HALTED:
+ return 0;
+ case SLSB_P_OUTPUT_NOT_INIT:
+ /* We should never see this state, throw a WARN: */
+ default:
+ dev_WARN_ONCE(&q->irq_ptr->cdev->dev, 1,
+ "found state %#x at index %u on queue %u\n",
+ state, start, q->nr);
+ return 0;
+ }
+}
+
+/* all buffers processed? */
+static inline int qdio_outbound_q_done(struct qdio_q *q)
+{
+ return atomic_read(&q->nr_buf_used) == 0;
+}
+
+static inline int qdio_outbound_q_moved(struct qdio_q *q, unsigned int start)
+{
+ int count;
+
+ count = get_outbound_buffer_frontier(q, start);
+
+ if (count) {
+ DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "out moved:%1d", q->nr);
+
+ if (q->u.out.use_cq) {
+ unsigned int i;
+
+ for (i = 0; i < count; i++)
+ qdio_check_pending(q, QDIO_BUFNR(start + i));
+ }
+ }
+
+ return count;
+}
+
+static int qdio_kick_outbound_q(struct qdio_q *q, unsigned int count,
+ unsigned long aob)
+{
+ int retries = 0, cc;
+ unsigned int busy_bit;
+
+ if (!need_siga_out(q))
+ return 0;
+
+ DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-w:%1d", q->nr);
+retry:
+ qperf_inc(q, siga_write);
+
+ cc = qdio_siga_output(q, count, &busy_bit, aob);
+ switch (cc) {
+ case 0:
+ break;
+ case 2:
+ if (busy_bit) {
+ while (++retries < QDIO_BUSY_BIT_RETRIES) {
+ mdelay(QDIO_BUSY_BIT_RETRY_DELAY);
+ goto retry;
+ }
+ DBF_ERROR("%4x cc2 BBC:%1d", SCH_NO(q), q->nr);
+ cc = -EBUSY;
+ } else {
+ DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-w cc2:%1d", q->nr);
+ cc = -ENOBUFS;
+ }
+ break;
+ case 1:
+ case 3:
+ DBF_ERROR("%4x SIGA-W:%1d", SCH_NO(q), cc);
+ cc = -EIO;
+ break;
+ }
+ if (retries) {
+ DBF_ERROR("%4x cc2 BB2:%1d", SCH_NO(q), q->nr);
+ DBF_ERROR("count:%u", retries);
+ }
+ return cc;
+}
+
+static void __qdio_outbound_processing(struct qdio_q *q)
+{
+ unsigned int start = q->first_to_check;
+ int count;
+
+ qperf_inc(q, tasklet_outbound);
+ WARN_ON_ONCE(atomic_read(&q->nr_buf_used) < 0);
+
+ count = qdio_outbound_q_moved(q, start);
+ if (count) {
+ q->first_to_check = add_buf(start, count);
+ qdio_kick_handler(q, start, count);
+ }
+
+ if (queue_type(q) == QDIO_ZFCP_QFMT && !pci_out_supported(q->irq_ptr) &&
+ !qdio_outbound_q_done(q))
+ goto sched;
+
+ if (q->u.out.pci_out_enabled)
+ return;
+
+ /*
+ * Now we know that queue type is either qeth without pci enabled
+ * or HiperSockets. Make sure buffer switch from PRIMED to EMPTY
+ * is noticed and outbound_handler is called after some time.
+ */
+ if (qdio_outbound_q_done(q))
+ del_timer_sync(&q->u.out.timer);
+ else
+ if (!timer_pending(&q->u.out.timer) &&
+ likely(q->irq_ptr->state == QDIO_IRQ_STATE_ACTIVE))
+ mod_timer(&q->u.out.timer, jiffies + 10 * HZ);
+ return;
+
+sched:
+ qdio_tasklet_schedule(q);
+}
+
+/* outbound tasklet */
+void qdio_outbound_processing(unsigned long data)
+{
+ struct qdio_q *q = (struct qdio_q *)data;
+ __qdio_outbound_processing(q);
+}
+
+void qdio_outbound_timer(struct timer_list *t)
+{
+ struct qdio_q *q = from_timer(q, t, u.out.timer);
+
+ qdio_tasklet_schedule(q);
+}
+
+static inline void qdio_check_outbound_pci_queues(struct qdio_irq *irq)
+{
+ struct qdio_q *out;
+ int i;
+
+ if (!pci_out_supported(irq) || !irq->scan_threshold)
+ return;
+
+ for_each_output_queue(irq, out, i)
+ if (!qdio_outbound_q_done(out))
+ qdio_tasklet_schedule(out);
+}
+
+void tiqdio_inbound_processing(unsigned long data)
+{
+ struct qdio_q *q = (struct qdio_q *)data;
+
+ if (need_siga_sync(q) && need_siga_sync_after_ai(q))
+ qdio_sync_queues(q);
+
+ /* The interrupt could be caused by a PCI request: */
+ qdio_check_outbound_pci_queues(q->irq_ptr);
+
+ __qdio_inbound_processing(q);
+}
+
+static inline void qdio_set_state(struct qdio_irq *irq_ptr,
+ enum qdio_irq_states state)
+{
+ DBF_DEV_EVENT(DBF_INFO, irq_ptr, "newstate: %1d", state);
+
+ irq_ptr->state = state;
+ mb();
+}
+
+static void qdio_irq_check_sense(struct qdio_irq *irq_ptr, struct irb *irb)
+{
+ if (irb->esw.esw0.erw.cons) {
+ DBF_ERROR("%4x sense:", irq_ptr->schid.sch_no);
+ DBF_ERROR_HEX(irb, 64);
+ DBF_ERROR_HEX(irb->ecw, 64);
+ }
+}
+
+/* PCI interrupt handler */
+static void qdio_int_handler_pci(struct qdio_irq *irq_ptr)
+{
+ int i;
+ struct qdio_q *q;
+
+ if (unlikely(irq_ptr->state != QDIO_IRQ_STATE_ACTIVE))
+ return;
+
+ if (irq_ptr->irq_poll) {
+ if (!test_and_set_bit(QDIO_IRQ_DISABLED, &irq_ptr->poll_state))
+ irq_ptr->irq_poll(irq_ptr->cdev, irq_ptr->int_parm);
+ else
+ QDIO_PERF_STAT_INC(irq_ptr, int_discarded);
+ } else {
+ for_each_input_queue(irq_ptr, q, i)
+ tasklet_schedule(&q->tasklet);
+ }
+
+ if (!pci_out_supported(irq_ptr) || !irq_ptr->scan_threshold)
+ return;
+
+ for_each_output_queue(irq_ptr, q, i) {
+ if (qdio_outbound_q_done(q))
+ continue;
+ if (need_siga_sync(q) && need_siga_sync_out_after_pci(q))
+ qdio_siga_sync_q(q);
+ qdio_tasklet_schedule(q);
+ }
+}
+
+static void qdio_handle_activate_check(struct qdio_irq *irq_ptr,
+ unsigned long intparm, int cstat,
+ int dstat)
+{
+ struct qdio_q *q;
+
+ DBF_ERROR("%4x ACT CHECK", irq_ptr->schid.sch_no);
+ DBF_ERROR("intp :%lx", intparm);
+ DBF_ERROR("ds: %2x cs:%2x", dstat, cstat);
+
+ if (irq_ptr->nr_input_qs) {
+ q = irq_ptr->input_qs[0];
+ } else if (irq_ptr->nr_output_qs) {
+ q = irq_ptr->output_qs[0];
+ } else {
+ dump_stack();
+ goto no_handler;
+ }
+
+ q->handler(q->irq_ptr->cdev, QDIO_ERROR_ACTIVATE,
+ q->nr, q->first_to_check, 0, irq_ptr->int_parm);
+no_handler:
+ qdio_set_state(irq_ptr, QDIO_IRQ_STATE_STOPPED);
+ /*
+ * In case of z/VM LGR (Live Guest Migration) QDIO recovery will happen.
+ * Therefore we call the LGR detection function here.
+ */
+ lgr_info_log();
+}
+
+static void qdio_establish_handle_irq(struct qdio_irq *irq_ptr, int cstat,
+ int dstat)
+{
+ DBF_DEV_EVENT(DBF_INFO, irq_ptr, "qest irq");
+
+ if (cstat)
+ goto error;
+ if (dstat & ~(DEV_STAT_DEV_END | DEV_STAT_CHN_END))
+ goto error;
+ if (!(dstat & DEV_STAT_DEV_END))
+ goto error;
+ qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ESTABLISHED);
+ return;
+
+error:
+ DBF_ERROR("%4x EQ:error", irq_ptr->schid.sch_no);
+ DBF_ERROR("ds: %2x cs:%2x", dstat, cstat);
+ qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR);
+}
+
+/* qdio interrupt handler */
+void qdio_int_handler(struct ccw_device *cdev, unsigned long intparm,
+ struct irb *irb)
+{
+ struct qdio_irq *irq_ptr = cdev->private->qdio_data;
+ struct subchannel_id schid;
+ int cstat, dstat;
+
+ if (!intparm || !irq_ptr) {
+ ccw_device_get_schid(cdev, &schid);
+ DBF_ERROR("qint:%4x", schid.sch_no);
+ return;
+ }
+
+ if (irq_ptr->perf_stat_enabled)
+ irq_ptr->perf_stat.qdio_int++;
+
+ if (IS_ERR(irb)) {
+ DBF_ERROR("%4x IO error", irq_ptr->schid.sch_no);
+ qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR);
+ wake_up(&cdev->private->wait_q);
+ return;
+ }
+ qdio_irq_check_sense(irq_ptr, irb);
+ cstat = irb->scsw.cmd.cstat;
+ dstat = irb->scsw.cmd.dstat;
+
+ switch (irq_ptr->state) {
+ case QDIO_IRQ_STATE_INACTIVE:
+ qdio_establish_handle_irq(irq_ptr, cstat, dstat);
+ break;
+ case QDIO_IRQ_STATE_CLEANUP:
+ qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE);
+ break;
+ case QDIO_IRQ_STATE_ESTABLISHED:
+ case QDIO_IRQ_STATE_ACTIVE:
+ if (cstat & SCHN_STAT_PCI) {
+ qdio_int_handler_pci(irq_ptr);
+ return;
+ }
+ if (cstat || dstat)
+ qdio_handle_activate_check(irq_ptr, intparm, cstat,
+ dstat);
+ break;
+ case QDIO_IRQ_STATE_STOPPED:
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ }
+ wake_up(&cdev->private->wait_q);
+}
+
+/**
+ * qdio_get_ssqd_desc - get qdio subchannel description
+ * @cdev: ccw device to get description for
+ * @data: where to store the ssqd
+ *
+ * Returns 0 or an error code. The results of the chsc are stored in the
+ * specified structure.
+ */
+int qdio_get_ssqd_desc(struct ccw_device *cdev,
+ struct qdio_ssqd_desc *data)
+{
+ struct subchannel_id schid;
+
+ if (!cdev || !cdev->private)
+ return -EINVAL;
+
+ ccw_device_get_schid(cdev, &schid);
+ DBF_EVENT("get ssqd:%4x", schid.sch_no);
+ return qdio_setup_get_ssqd(NULL, &schid, data);
+}
+EXPORT_SYMBOL_GPL(qdio_get_ssqd_desc);
+
+static void qdio_shutdown_queues(struct qdio_irq *irq_ptr)
+{
+ struct qdio_q *q;
+ int i;
+
+ for_each_input_queue(irq_ptr, q, i)
+ tasklet_kill(&q->tasklet);
+
+ for_each_output_queue(irq_ptr, q, i) {
+ del_timer_sync(&q->u.out.timer);
+ tasklet_kill(&q->tasklet);
+ }
+}
+
+static int qdio_cancel_ccw(struct qdio_irq *irq, int how)
+{
+ struct ccw_device *cdev = irq->cdev;
+ int rc;
+
+ spin_lock_irq(get_ccwdev_lock(cdev));
+ qdio_set_state(irq, QDIO_IRQ_STATE_CLEANUP);
+ if (how & QDIO_FLAG_CLEANUP_USING_CLEAR)
+ rc = ccw_device_clear(cdev, QDIO_DOING_CLEANUP);
+ else
+ /* default behaviour is halt */
+ rc = ccw_device_halt(cdev, QDIO_DOING_CLEANUP);
+ spin_unlock_irq(get_ccwdev_lock(cdev));
+ if (rc) {
+ DBF_ERROR("%4x SHUTD ERR", irq->schid.sch_no);
+ DBF_ERROR("rc:%4d", rc);
+ return rc;
+ }
+
+ wait_event_interruptible_timeout(cdev->private->wait_q,
+ irq->state == QDIO_IRQ_STATE_INACTIVE ||
+ irq->state == QDIO_IRQ_STATE_ERR,
+ 10 * HZ);
+
+ return 0;
+}
+
+/**
+ * qdio_shutdown - shut down a qdio subchannel
+ * @cdev: associated ccw device
+ * @how: use halt or clear to shutdown
+ */
+int qdio_shutdown(struct ccw_device *cdev, int how)
+{
+ struct qdio_irq *irq_ptr = cdev->private->qdio_data;
+ struct subchannel_id schid;
+ int rc;
+
+ if (!irq_ptr)
+ return -ENODEV;
+
+ WARN_ON_ONCE(irqs_disabled());
+ ccw_device_get_schid(cdev, &schid);
+ DBF_EVENT("qshutdown:%4x", schid.sch_no);
+
+ mutex_lock(&irq_ptr->setup_mutex);
+ /*
+ * Subchannel was already shot down. We cannot prevent being called
+ * twice since cio may trigger a shutdown asynchronously.
+ */
+ if (irq_ptr->state == QDIO_IRQ_STATE_INACTIVE) {
+ mutex_unlock(&irq_ptr->setup_mutex);
+ return 0;
+ }
+
+ /*
+ * Indicate that the device is going down. Scheduling the queue
+ * tasklets is forbidden from here on.
+ */
+ qdio_set_state(irq_ptr, QDIO_IRQ_STATE_STOPPED);
+
+ tiqdio_remove_device(irq_ptr);
+ qdio_shutdown_queues(irq_ptr);
+ qdio_shutdown_debug_entries(irq_ptr);
+
+ rc = qdio_cancel_ccw(irq_ptr, how);
+ qdio_shutdown_thinint(irq_ptr);
+ qdio_shutdown_irq(irq_ptr);
+
+ qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE);
+ mutex_unlock(&irq_ptr->setup_mutex);
+ if (rc)
+ return rc;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(qdio_shutdown);
+
+/**
+ * qdio_free - free data structures for a qdio subchannel
+ * @cdev: associated ccw device
+ */
+int qdio_free(struct ccw_device *cdev)
+{
+ struct qdio_irq *irq_ptr = cdev->private->qdio_data;
+ struct subchannel_id schid;
+
+ if (!irq_ptr)
+ return -ENODEV;
+
+ ccw_device_get_schid(cdev, &schid);
+ DBF_EVENT("qfree:%4x", schid.sch_no);
+ DBF_DEV_EVENT(DBF_ERR, irq_ptr, "dbf abandoned");
+ mutex_lock(&irq_ptr->setup_mutex);
+
+ irq_ptr->debug_area = NULL;
+ cdev->private->qdio_data = NULL;
+ mutex_unlock(&irq_ptr->setup_mutex);
+
+ qdio_free_async_data(irq_ptr);
+ qdio_free_queues(irq_ptr);
+ free_page((unsigned long) irq_ptr->qdr);
+ free_page(irq_ptr->chsc_page);
+ free_page((unsigned long) irq_ptr);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(qdio_free);
+
+/**
+ * qdio_allocate - allocate qdio queues and associated data
+ * @cdev: associated ccw device
+ * @no_input_qs: allocate this number of Input Queues
+ * @no_output_qs: allocate this number of Output Queues
+ */
+int qdio_allocate(struct ccw_device *cdev, unsigned int no_input_qs,
+ unsigned int no_output_qs)
+{
+ struct subchannel_id schid;
+ struct qdio_irq *irq_ptr;
+ int rc = -ENOMEM;
+
+ ccw_device_get_schid(cdev, &schid);
+ DBF_EVENT("qallocate:%4x", schid.sch_no);
+
+ if (no_input_qs > QDIO_MAX_QUEUES_PER_IRQ ||
+ no_output_qs > QDIO_MAX_QUEUES_PER_IRQ)
+ return -EINVAL;
+
+ /* irq_ptr must be in GFP_DMA since it contains ccw1.cda */
+ irq_ptr = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
+ if (!irq_ptr)
+ return -ENOMEM;
+
+ irq_ptr->cdev = cdev;
+ mutex_init(&irq_ptr->setup_mutex);
+ if (qdio_allocate_dbf(irq_ptr))
+ goto err_dbf;
+
+ DBF_DEV_EVENT(DBF_ERR, irq_ptr, "alloc niq:%1u noq:%1u", no_input_qs,
+ no_output_qs);
+
+ /*
+ * Allocate a page for the chsc calls in qdio_establish.
+ * Must be pre-allocated since a zfcp recovery will call
+ * qdio_establish. In case of low memory and swap on a zfcp disk
+ * we may not be able to allocate memory otherwise.
+ */
+ irq_ptr->chsc_page = get_zeroed_page(GFP_KERNEL);
+ if (!irq_ptr->chsc_page)
+ goto err_chsc;
+
+ /* qdr is used in ccw1.cda which is u32 */
+ irq_ptr->qdr = (struct qdr *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
+ if (!irq_ptr->qdr)
+ goto err_qdr;
+
+ rc = qdio_allocate_qs(irq_ptr, no_input_qs, no_output_qs);
+ if (rc)
+ goto err_queues;
+
+ INIT_LIST_HEAD(&irq_ptr->entry);
+ cdev->private->qdio_data = irq_ptr;
+ qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE);
+ return 0;
+
+err_queues:
+ free_page((unsigned long) irq_ptr->qdr);
+err_qdr:
+ free_page(irq_ptr->chsc_page);
+err_chsc:
+err_dbf:
+ free_page((unsigned long) irq_ptr);
+ return rc;
+}
+EXPORT_SYMBOL_GPL(qdio_allocate);
+
+static void qdio_detect_hsicq(struct qdio_irq *irq_ptr)
+{
+ struct qdio_q *q = irq_ptr->input_qs[0];
+ int i, use_cq = 0;
+
+ if (irq_ptr->nr_input_qs > 1 && queue_type(q) == QDIO_IQDIO_QFMT)
+ use_cq = 1;
+
+ for_each_output_queue(irq_ptr, q, i) {
+ if (use_cq) {
+ if (multicast_outbound(q))
+ continue;
+ if (qdio_enable_async_operation(&q->u.out) < 0) {
+ use_cq = 0;
+ continue;
+ }
+ } else
+ qdio_disable_async_operation(&q->u.out);
+ }
+ DBF_EVENT("use_cq:%d", use_cq);
+}
+
+static void qdio_trace_init_data(struct qdio_irq *irq,
+ struct qdio_initialize *data)
+{
+ DBF_DEV_EVENT(DBF_ERR, irq, "qfmt:%1u", data->q_format);
+ DBF_DEV_EVENT(DBF_ERR, irq, "qpff%4x", data->qib_param_field_format);
+ DBF_DEV_HEX(irq, &data->qib_param_field, sizeof(void *), DBF_ERR);
+ DBF_DEV_HEX(irq, &data->input_slib_elements, sizeof(void *), DBF_ERR);
+ DBF_DEV_HEX(irq, &data->output_slib_elements, sizeof(void *), DBF_ERR);
+ DBF_DEV_EVENT(DBF_ERR, irq, "niq:%1u noq:%1u", data->no_input_qs,
+ data->no_output_qs);
+ DBF_DEV_HEX(irq, &data->input_handler, sizeof(void *), DBF_ERR);
+ DBF_DEV_HEX(irq, &data->output_handler, sizeof(void *), DBF_ERR);
+ DBF_DEV_HEX(irq, &data->int_parm, sizeof(long), DBF_ERR);
+ DBF_DEV_HEX(irq, &data->input_sbal_addr_array, sizeof(void *), DBF_ERR);
+ DBF_DEV_HEX(irq, &data->output_sbal_addr_array, sizeof(void *),
+ DBF_ERR);
+}
+
+/**
+ * qdio_establish - establish queues on a qdio subchannel
+ * @cdev: associated ccw device
+ * @init_data: initialization data
+ */
+int qdio_establish(struct ccw_device *cdev,
+ struct qdio_initialize *init_data)
+{
+ struct qdio_irq *irq_ptr = cdev->private->qdio_data;
+ struct subchannel_id schid;
+ long timeout;
+ int rc;
+
+ ccw_device_get_schid(cdev, &schid);
+ DBF_EVENT("qestablish:%4x", schid.sch_no);
+
+ if (!irq_ptr)
+ return -ENODEV;
+
+ if (init_data->no_input_qs > irq_ptr->max_input_qs ||
+ init_data->no_output_qs > irq_ptr->max_output_qs)
+ return -EINVAL;
+
+ if ((init_data->no_input_qs && !init_data->input_handler) ||
+ (init_data->no_output_qs && !init_data->output_handler))
+ return -EINVAL;
+
+ if (!init_data->input_sbal_addr_array ||
+ !init_data->output_sbal_addr_array)
+ return -EINVAL;
+
+ mutex_lock(&irq_ptr->setup_mutex);
+ qdio_trace_init_data(irq_ptr, init_data);
+ qdio_setup_irq(irq_ptr, init_data);
+
+ rc = qdio_establish_thinint(irq_ptr);
+ if (rc)
+ goto err_thinint;
+
+ /* establish q */
+ irq_ptr->ccw.cmd_code = irq_ptr->equeue.cmd;
+ irq_ptr->ccw.flags = CCW_FLAG_SLI;
+ irq_ptr->ccw.count = irq_ptr->equeue.count;
+ irq_ptr->ccw.cda = (u32)((addr_t)irq_ptr->qdr);
+
+ spin_lock_irq(get_ccwdev_lock(cdev));
+ ccw_device_set_options_mask(cdev, 0);
+
+ rc = ccw_device_start(cdev, &irq_ptr->ccw, QDIO_DOING_ESTABLISH, 0, 0);
+ spin_unlock_irq(get_ccwdev_lock(cdev));
+ if (rc) {
+ DBF_ERROR("%4x est IO ERR", irq_ptr->schid.sch_no);
+ DBF_ERROR("rc:%4x", rc);
+ goto err_ccw_start;
+ }
+
+ timeout = wait_event_interruptible_timeout(cdev->private->wait_q,
+ irq_ptr->state == QDIO_IRQ_STATE_ESTABLISHED ||
+ irq_ptr->state == QDIO_IRQ_STATE_ERR, HZ);
+ if (timeout <= 0) {
+ rc = (timeout == -ERESTARTSYS) ? -EINTR : -ETIME;
+ goto err_ccw_timeout;
+ }
+
+ if (irq_ptr->state != QDIO_IRQ_STATE_ESTABLISHED) {
+ mutex_unlock(&irq_ptr->setup_mutex);
+ qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR);
+ return -EIO;
+ }
+
+ qdio_setup_ssqd_info(irq_ptr);
+
+ qdio_detect_hsicq(irq_ptr);
+
+ /* qebsm is now setup if available, initialize buffer states */
+ qdio_init_buf_states(irq_ptr);
+
+ mutex_unlock(&irq_ptr->setup_mutex);
+ qdio_print_subchannel_info(irq_ptr);
+ qdio_setup_debug_entries(irq_ptr);
+ return 0;
+
+err_ccw_timeout:
+ qdio_cancel_ccw(irq_ptr, QDIO_FLAG_CLEANUP_USING_CLEAR);
+err_ccw_start:
+ qdio_shutdown_thinint(irq_ptr);
+err_thinint:
+ qdio_shutdown_irq(irq_ptr);
+ qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE);
+ mutex_unlock(&irq_ptr->setup_mutex);
+ return rc;
+}
+EXPORT_SYMBOL_GPL(qdio_establish);
+
+/**
+ * qdio_activate - activate queues on a qdio subchannel
+ * @cdev: associated cdev
+ */
+int qdio_activate(struct ccw_device *cdev)
+{
+ struct qdio_irq *irq_ptr = cdev->private->qdio_data;
+ struct subchannel_id schid;
+ int rc;
+
+ ccw_device_get_schid(cdev, &schid);
+ DBF_EVENT("qactivate:%4x", schid.sch_no);
+
+ if (!irq_ptr)
+ return -ENODEV;
+
+ mutex_lock(&irq_ptr->setup_mutex);
+ if (irq_ptr->state == QDIO_IRQ_STATE_INACTIVE) {
+ rc = -EBUSY;
+ goto out;
+ }
+
+ irq_ptr->ccw.cmd_code = irq_ptr->aqueue.cmd;
+ irq_ptr->ccw.flags = CCW_FLAG_SLI;
+ irq_ptr->ccw.count = irq_ptr->aqueue.count;
+ irq_ptr->ccw.cda = 0;
+
+ spin_lock_irq(get_ccwdev_lock(cdev));
+ ccw_device_set_options(cdev, CCWDEV_REPORT_ALL);
+
+ rc = ccw_device_start(cdev, &irq_ptr->ccw, QDIO_DOING_ACTIVATE,
+ 0, DOIO_DENY_PREFETCH);
+ spin_unlock_irq(get_ccwdev_lock(cdev));
+ if (rc) {
+ DBF_ERROR("%4x act IO ERR", irq_ptr->schid.sch_no);
+ DBF_ERROR("rc:%4x", rc);
+ goto out;
+ }
+
+ if (is_thinint_irq(irq_ptr))
+ tiqdio_add_device(irq_ptr);
+
+ /* wait for subchannel to become active */
+ msleep(5);
+
+ switch (irq_ptr->state) {
+ case QDIO_IRQ_STATE_STOPPED:
+ case QDIO_IRQ_STATE_ERR:
+ rc = -EIO;
+ break;
+ default:
+ qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ACTIVE);
+ rc = 0;
+ }
+out:
+ mutex_unlock(&irq_ptr->setup_mutex);
+ return rc;
+}
+EXPORT_SYMBOL_GPL(qdio_activate);
+
+/**
+ * handle_inbound - reset processed input buffers
+ * @q: queue containing the buffers
+ * @callflags: flags
+ * @bufnr: first buffer to process
+ * @count: how many buffers are emptied
+ */
+static int handle_inbound(struct qdio_q *q, unsigned int callflags,
+ int bufnr, int count)
+{
+ int overlap;
+
+ qperf_inc(q, inbound_call);
+
+ /* If any processed SBALs are returned to HW, adjust our tracking: */
+ overlap = min_t(int, count - sub_buf(q->u.in.batch_start, bufnr),
+ q->u.in.batch_count);
+ if (overlap > 0) {
+ q->u.in.batch_start = add_buf(q->u.in.batch_start, overlap);
+ q->u.in.batch_count -= overlap;
+ }
+
+ count = set_buf_states(q, bufnr, SLSB_CU_INPUT_EMPTY, count);
+ atomic_add(count, &q->nr_buf_used);
+
+ if (need_siga_in(q))
+ return qdio_siga_input(q);
+
+ return 0;
+}
+
+/**
+ * handle_outbound - process filled outbound buffers
+ * @q: queue containing the buffers
+ * @callflags: flags
+ * @bufnr: first buffer to process
+ * @count: how many buffers are filled
+ */
+static int handle_outbound(struct qdio_q *q, unsigned int callflags,
+ unsigned int bufnr, unsigned int count)
+{
+ const unsigned int scan_threshold = q->irq_ptr->scan_threshold;
+ unsigned char state = 0;
+ int used, rc = 0;
+
+ qperf_inc(q, outbound_call);
+
+ count = set_buf_states(q, bufnr, SLSB_CU_OUTPUT_PRIMED, count);
+ used = atomic_add_return(count, &q->nr_buf_used);
+
+ if (used == QDIO_MAX_BUFFERS_PER_Q)
+ qperf_inc(q, outbound_queue_full);
+
+ if (callflags & QDIO_FLAG_PCI_OUT) {
+ q->u.out.pci_out_enabled = 1;
+ qperf_inc(q, pci_request_int);
+ } else
+ q->u.out.pci_out_enabled = 0;
+
+ if (queue_type(q) == QDIO_IQDIO_QFMT) {
+ unsigned long phys_aob = 0;
+
+ if (q->u.out.use_cq && count == 1)
+ phys_aob = qdio_aob_for_buffer(&q->u.out, bufnr);
+
+ rc = qdio_kick_outbound_q(q, count, phys_aob);
+ } else if (need_siga_sync(q)) {
+ rc = qdio_siga_sync_q(q);
+ } else if (count < QDIO_MAX_BUFFERS_PER_Q &&
+ get_buf_state(q, prev_buf(bufnr), &state, 0) > 0 &&
+ state == SLSB_CU_OUTPUT_PRIMED) {
+ /* The previous buffer is not processed yet, tack on. */
+ qperf_inc(q, fast_requeue);
+ } else {
+ rc = qdio_kick_outbound_q(q, count, 0);
+ }
+
+ /* Let drivers implement their own completion scanning: */
+ if (!scan_threshold)
+ return rc;
+
+ /* in case of SIGA errors we must process the error immediately */
+ if (used >= scan_threshold || rc)
+ qdio_tasklet_schedule(q);
+ else
+ /* free the SBALs in case of no further traffic */
+ if (!timer_pending(&q->u.out.timer) &&
+ likely(q->irq_ptr->state == QDIO_IRQ_STATE_ACTIVE))
+ mod_timer(&q->u.out.timer, jiffies + HZ);
+ return rc;
+}
+
+/**
+ * do_QDIO - process input or output buffers
+ * @cdev: associated ccw_device for the qdio subchannel
+ * @callflags: input or output and special flags from the program
+ * @q_nr: queue number
+ * @bufnr: buffer number
+ * @count: how many buffers to process
+ */
+int do_QDIO(struct ccw_device *cdev, unsigned int callflags,
+ int q_nr, unsigned int bufnr, unsigned int count)
+{
+ struct qdio_irq *irq_ptr = cdev->private->qdio_data;
+
+ if (bufnr >= QDIO_MAX_BUFFERS_PER_Q || count > QDIO_MAX_BUFFERS_PER_Q)
+ return -EINVAL;
+
+ if (!irq_ptr)
+ return -ENODEV;
+
+ DBF_DEV_EVENT(DBF_INFO, irq_ptr,
+ "do%02x b:%02x c:%02x", callflags, bufnr, count);
+
+ if (irq_ptr->state != QDIO_IRQ_STATE_ACTIVE)
+ return -EIO;
+ if (!count)
+ return 0;
+ if (callflags & QDIO_FLAG_SYNC_INPUT)
+ return handle_inbound(irq_ptr->input_qs[q_nr],
+ callflags, bufnr, count);
+ else if (callflags & QDIO_FLAG_SYNC_OUTPUT)
+ return handle_outbound(irq_ptr->output_qs[q_nr],
+ callflags, bufnr, count);
+ return -EINVAL;
+}
+EXPORT_SYMBOL_GPL(do_QDIO);
+
+/**
+ * qdio_start_irq - enable interrupt processing for the device
+ * @cdev: associated ccw_device for the qdio subchannel
+ *
+ * Return codes
+ * 0 - success
+ * 1 - irqs not started since new data is available
+ */
+int qdio_start_irq(struct ccw_device *cdev)
+{
+ struct qdio_q *q;
+ struct qdio_irq *irq_ptr = cdev->private->qdio_data;
+ unsigned int i;
+
+ if (!irq_ptr)
+ return -ENODEV;
+
+ for_each_input_queue(irq_ptr, q, i)
+ qdio_stop_polling(q);
+
+ clear_bit(QDIO_IRQ_DISABLED, &irq_ptr->poll_state);
+
+ /*
+ * We need to check again to not lose initiative after
+ * resetting the ACK state.
+ */
+ if (test_nonshared_ind(irq_ptr))
+ goto rescan;
+
+ for_each_input_queue(irq_ptr, q, i) {
+ if (!qdio_inbound_q_done(q, q->first_to_check))
+ goto rescan;
+ }
+
+ return 0;
+
+rescan:
+ if (test_and_set_bit(QDIO_IRQ_DISABLED, &irq_ptr->poll_state))
+ return 0;
+ else
+ return 1;
+
+}
+EXPORT_SYMBOL(qdio_start_irq);
+
+static int __qdio_inspect_queue(struct qdio_q *q, unsigned int *bufnr,
+ unsigned int *error)
+{
+ unsigned int start = q->first_to_check;
+ int count;
+
+ count = q->is_input_q ? qdio_inbound_q_moved(q, start) :
+ qdio_outbound_q_moved(q, start);
+ if (count == 0)
+ return 0;
+
+ *bufnr = start;
+ *error = q->qdio_error;
+
+ /* for the next time */
+ q->first_to_check = add_buf(start, count);
+ q->qdio_error = 0;
+
+ return count;
+}
+
+int qdio_inspect_queue(struct ccw_device *cdev, unsigned int nr, bool is_input,
+ unsigned int *bufnr, unsigned int *error)
+{
+ struct qdio_irq *irq_ptr = cdev->private->qdio_data;
+ struct qdio_q *q;
+
+ if (!irq_ptr)
+ return -ENODEV;
+ q = is_input ? irq_ptr->input_qs[nr] : irq_ptr->output_qs[nr];
+
+ if (need_siga_sync(q))
+ qdio_siga_sync_q(q);
+
+ return __qdio_inspect_queue(q, bufnr, error);
+}
+EXPORT_SYMBOL_GPL(qdio_inspect_queue);
+
+/**
+ * qdio_get_next_buffers - process input buffers
+ * @cdev: associated ccw_device for the qdio subchannel
+ * @nr: input queue number
+ * @bufnr: first filled buffer number
+ * @error: buffers are in error state
+ *
+ * Return codes
+ * < 0 - error
+ * = 0 - no new buffers found
+ * > 0 - number of processed buffers
+ */
+int qdio_get_next_buffers(struct ccw_device *cdev, int nr, int *bufnr,
+ int *error)
+{
+ struct qdio_q *q;
+ struct qdio_irq *irq_ptr = cdev->private->qdio_data;
+
+ if (!irq_ptr)
+ return -ENODEV;
+ q = irq_ptr->input_qs[nr];
+
+ /*
+ * Cannot rely on automatic sync after interrupt since queues may
+ * also be examined without interrupt.
+ */
+ if (need_siga_sync(q))
+ qdio_sync_queues(q);
+
+ qdio_check_outbound_pci_queues(irq_ptr);
+
+ /* Note: upper-layer MUST stop processing immediately here ... */
+ if (unlikely(q->irq_ptr->state != QDIO_IRQ_STATE_ACTIVE))
+ return -EIO;
+
+ return __qdio_inspect_queue(q, bufnr, error);
+}
+EXPORT_SYMBOL(qdio_get_next_buffers);
+
+/**
+ * qdio_stop_irq - disable interrupt processing for the device
+ * @cdev: associated ccw_device for the qdio subchannel
+ *
+ * Return codes
+ * 0 - interrupts were already disabled
+ * 1 - interrupts successfully disabled
+ */
+int qdio_stop_irq(struct ccw_device *cdev)
+{
+ struct qdio_irq *irq_ptr = cdev->private->qdio_data;
+
+ if (!irq_ptr)
+ return -ENODEV;
+
+ if (test_and_set_bit(QDIO_IRQ_DISABLED, &irq_ptr->poll_state))
+ return 0;
+ else
+ return 1;
+}
+EXPORT_SYMBOL(qdio_stop_irq);
+
+static int __init init_QDIO(void)
+{
+ int rc;
+
+ rc = qdio_debug_init();
+ if (rc)
+ return rc;
+ rc = qdio_setup_init();
+ if (rc)
+ goto out_debug;
+ rc = qdio_thinint_init();
+ if (rc)
+ goto out_cache;
+ return 0;
+
+out_cache:
+ qdio_setup_exit();
+out_debug:
+ qdio_debug_exit();
+ return rc;
+}
+
+static void __exit exit_QDIO(void)
+{
+ qdio_thinint_exit();
+ qdio_setup_exit();
+ qdio_debug_exit();
+}
+
+module_init(init_QDIO);
+module_exit(exit_QDIO);
diff --git a/drivers/s390/cio/qdio_setup.c b/drivers/s390/cio/qdio_setup.c
new file mode 100644
index 000000000..a5b2e16b7
--- /dev/null
+++ b/drivers/s390/cio/qdio_setup.c
@@ -0,0 +1,617 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * qdio queue initialization
+ *
+ * Copyright IBM Corp. 2008
+ * Author(s): Jan Glauber <jang@linux.vnet.ibm.com>
+ */
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/export.h>
+#include <linux/io.h>
+
+#include <asm/ebcdic.h>
+#include <asm/qdio.h>
+
+#include "cio.h"
+#include "css.h"
+#include "device.h"
+#include "ioasm.h"
+#include "chsc.h"
+#include "qdio.h"
+#include "qdio_debug.h"
+
+#define QBUFF_PER_PAGE (PAGE_SIZE / sizeof(struct qdio_buffer))
+
+static struct kmem_cache *qdio_q_cache;
+static struct kmem_cache *qdio_aob_cache;
+
+struct qaob *qdio_allocate_aob(void)
+{
+ return kmem_cache_zalloc(qdio_aob_cache, GFP_ATOMIC);
+}
+
+void qdio_release_aob(struct qaob *aob)
+{
+ kmem_cache_free(qdio_aob_cache, aob);
+}
+EXPORT_SYMBOL_GPL(qdio_release_aob);
+
+/**
+ * qdio_free_buffers() - free qdio buffers
+ * @buf: array of pointers to qdio buffers
+ * @count: number of qdio buffers to free
+ */
+void qdio_free_buffers(struct qdio_buffer **buf, unsigned int count)
+{
+ int pos;
+
+ for (pos = 0; pos < count; pos += QBUFF_PER_PAGE)
+ free_page((unsigned long) buf[pos]);
+}
+EXPORT_SYMBOL_GPL(qdio_free_buffers);
+
+/**
+ * qdio_alloc_buffers() - allocate qdio buffers
+ * @buf: array of pointers to qdio buffers
+ * @count: number of qdio buffers to allocate
+ */
+int qdio_alloc_buffers(struct qdio_buffer **buf, unsigned int count)
+{
+ int pos;
+
+ for (pos = 0; pos < count; pos += QBUFF_PER_PAGE) {
+ buf[pos] = (void *) get_zeroed_page(GFP_KERNEL);
+ if (!buf[pos]) {
+ qdio_free_buffers(buf, count);
+ return -ENOMEM;
+ }
+ }
+ for (pos = 0; pos < count; pos++)
+ if (pos % QBUFF_PER_PAGE)
+ buf[pos] = buf[pos - 1] + 1;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(qdio_alloc_buffers);
+
+/**
+ * qdio_reset_buffers() - reset qdio buffers
+ * @buf: array of pointers to qdio buffers
+ * @count: number of qdio buffers that will be zeroed
+ */
+void qdio_reset_buffers(struct qdio_buffer **buf, unsigned int count)
+{
+ int pos;
+
+ for (pos = 0; pos < count; pos++)
+ memset(buf[pos], 0, sizeof(struct qdio_buffer));
+}
+EXPORT_SYMBOL_GPL(qdio_reset_buffers);
+
+/*
+ * qebsm is only available under 64bit but the adapter sets the feature
+ * flag anyway, so we manually override it.
+ */
+static inline int qebsm_possible(void)
+{
+ return css_general_characteristics.qebsm;
+}
+
+/*
+ * qib_param_field: pointer to 128 bytes or NULL, if no param field
+ * nr_input_qs: pointer to nr_queues*128 words of data or NULL
+ */
+static void set_impl_params(struct qdio_irq *irq_ptr,
+ unsigned int qib_param_field_format,
+ unsigned char *qib_param_field,
+ unsigned long *input_slib_elements,
+ unsigned long *output_slib_elements)
+{
+ struct qdio_q *q;
+ int i, j;
+
+ if (!irq_ptr)
+ return;
+
+ irq_ptr->qib.pfmt = qib_param_field_format;
+ if (qib_param_field)
+ memcpy(irq_ptr->qib.parm, qib_param_field,
+ sizeof(irq_ptr->qib.parm));
+
+ if (!input_slib_elements)
+ goto output;
+
+ for_each_input_queue(irq_ptr, q, i) {
+ for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; j++)
+ q->slib->slibe[j].parms =
+ input_slib_elements[i * QDIO_MAX_BUFFERS_PER_Q + j];
+ }
+output:
+ if (!output_slib_elements)
+ return;
+
+ for_each_output_queue(irq_ptr, q, i) {
+ for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; j++)
+ q->slib->slibe[j].parms =
+ output_slib_elements[i * QDIO_MAX_BUFFERS_PER_Q + j];
+ }
+}
+
+static void __qdio_free_queues(struct qdio_q **queues, unsigned int count)
+{
+ struct qdio_q *q;
+ unsigned int i;
+
+ for (i = 0; i < count; i++) {
+ q = queues[i];
+ free_page((unsigned long) q->slib);
+ kmem_cache_free(qdio_q_cache, q);
+ }
+}
+
+void qdio_free_queues(struct qdio_irq *irq_ptr)
+{
+ __qdio_free_queues(irq_ptr->input_qs, irq_ptr->max_input_qs);
+ irq_ptr->max_input_qs = 0;
+
+ __qdio_free_queues(irq_ptr->output_qs, irq_ptr->max_output_qs);
+ irq_ptr->max_output_qs = 0;
+}
+
+static int __qdio_allocate_qs(struct qdio_q **irq_ptr_qs, int nr_queues)
+{
+ struct qdio_q *q;
+ int i;
+
+ for (i = 0; i < nr_queues; i++) {
+ q = kmem_cache_zalloc(qdio_q_cache, GFP_KERNEL);
+ if (!q) {
+ __qdio_free_queues(irq_ptr_qs, i);
+ return -ENOMEM;
+ }
+
+ q->slib = (struct slib *) __get_free_page(GFP_KERNEL);
+ if (!q->slib) {
+ kmem_cache_free(qdio_q_cache, q);
+ __qdio_free_queues(irq_ptr_qs, i);
+ return -ENOMEM;
+ }
+ irq_ptr_qs[i] = q;
+ }
+ return 0;
+}
+
+int qdio_allocate_qs(struct qdio_irq *irq_ptr, int nr_input_qs, int nr_output_qs)
+{
+ int rc;
+
+ rc = __qdio_allocate_qs(irq_ptr->input_qs, nr_input_qs);
+ if (rc)
+ return rc;
+
+ rc = __qdio_allocate_qs(irq_ptr->output_qs, nr_output_qs);
+ if (rc) {
+ __qdio_free_queues(irq_ptr->input_qs, nr_input_qs);
+ return rc;
+ }
+
+ irq_ptr->max_input_qs = nr_input_qs;
+ irq_ptr->max_output_qs = nr_output_qs;
+ return 0;
+}
+
+static void setup_queues_misc(struct qdio_q *q, struct qdio_irq *irq_ptr,
+ qdio_handler_t *handler, int i)
+{
+ struct slib *slib = q->slib;
+
+ /* queue must be cleared for qdio_establish */
+ memset(q, 0, sizeof(*q));
+ memset(slib, 0, PAGE_SIZE);
+ q->slib = slib;
+ q->irq_ptr = irq_ptr;
+ q->mask = 1 << (31 - i);
+ q->nr = i;
+ q->handler = handler;
+}
+
+static void setup_storage_lists(struct qdio_q *q, struct qdio_irq *irq_ptr,
+ struct qdio_buffer **sbals_array, int i)
+{
+ struct qdio_q *prev;
+ int j;
+
+ DBF_HEX(&q, sizeof(void *));
+ q->sl = (struct sl *)((char *)q->slib + PAGE_SIZE / 2);
+
+ /* fill in sbal */
+ for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; j++)
+ q->sbal[j] = *sbals_array++;
+
+ /* fill in slib */
+ if (i > 0) {
+ prev = (q->is_input_q) ? irq_ptr->input_qs[i - 1]
+ : irq_ptr->output_qs[i - 1];
+ prev->slib->nsliba = (unsigned long)q->slib;
+ }
+
+ q->slib->sla = (unsigned long)q->sl;
+ q->slib->slsba = (unsigned long)&q->slsb.val[0];
+
+ /* fill in sl */
+ for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; j++)
+ q->sl->element[j].sbal = virt_to_phys(q->sbal[j]);
+}
+
+static void setup_queues(struct qdio_irq *irq_ptr,
+ struct qdio_initialize *qdio_init)
+{
+ struct qdio_q *q;
+ struct qdio_outbuf_state *output_sbal_state_array =
+ qdio_init->output_sbal_state_array;
+ int i;
+
+ for_each_input_queue(irq_ptr, q, i) {
+ DBF_EVENT("inq:%1d", i);
+ setup_queues_misc(q, irq_ptr, qdio_init->input_handler, i);
+
+ q->is_input_q = 1;
+
+ setup_storage_lists(q, irq_ptr,
+ qdio_init->input_sbal_addr_array[i], i);
+
+ if (is_thinint_irq(irq_ptr)) {
+ tasklet_init(&q->tasklet, tiqdio_inbound_processing,
+ (unsigned long) q);
+ } else {
+ tasklet_init(&q->tasklet, qdio_inbound_processing,
+ (unsigned long) q);
+ }
+ }
+
+ for_each_output_queue(irq_ptr, q, i) {
+ DBF_EVENT("outq:%1d", i);
+ setup_queues_misc(q, irq_ptr, qdio_init->output_handler, i);
+
+ q->u.out.sbal_state = output_sbal_state_array;
+ output_sbal_state_array += QDIO_MAX_BUFFERS_PER_Q;
+
+ q->is_input_q = 0;
+ setup_storage_lists(q, irq_ptr,
+ qdio_init->output_sbal_addr_array[i], i);
+
+ tasklet_init(&q->tasklet, qdio_outbound_processing,
+ (unsigned long) q);
+ timer_setup(&q->u.out.timer, qdio_outbound_timer, 0);
+ }
+}
+
+static void process_ac_flags(struct qdio_irq *irq_ptr, unsigned char qdioac)
+{
+ if (qdioac & AC1_SIGA_INPUT_NEEDED)
+ irq_ptr->siga_flag.input = 1;
+ if (qdioac & AC1_SIGA_OUTPUT_NEEDED)
+ irq_ptr->siga_flag.output = 1;
+ if (qdioac & AC1_SIGA_SYNC_NEEDED)
+ irq_ptr->siga_flag.sync = 1;
+ if (!(qdioac & AC1_AUTOMATIC_SYNC_ON_THININT))
+ irq_ptr->siga_flag.sync_after_ai = 1;
+ if (!(qdioac & AC1_AUTOMATIC_SYNC_ON_OUT_PCI))
+ irq_ptr->siga_flag.sync_out_after_pci = 1;
+}
+
+static void check_and_setup_qebsm(struct qdio_irq *irq_ptr,
+ unsigned char qdioac, unsigned long token)
+{
+ if (!(irq_ptr->qib.rflags & QIB_RFLAGS_ENABLE_QEBSM))
+ goto no_qebsm;
+ if (!(qdioac & AC1_SC_QEBSM_AVAILABLE) ||
+ (!(qdioac & AC1_SC_QEBSM_ENABLED)))
+ goto no_qebsm;
+
+ irq_ptr->sch_token = token;
+
+ DBF_EVENT("V=V:1");
+ DBF_EVENT("%8lx", irq_ptr->sch_token);
+ return;
+
+no_qebsm:
+ irq_ptr->sch_token = 0;
+ irq_ptr->qib.rflags &= ~QIB_RFLAGS_ENABLE_QEBSM;
+ DBF_EVENT("noV=V");
+}
+
+/*
+ * If there is a qdio_irq we use the chsc_page and store the information
+ * in the qdio_irq, otherwise we copy it to the specified structure.
+ */
+int qdio_setup_get_ssqd(struct qdio_irq *irq_ptr,
+ struct subchannel_id *schid,
+ struct qdio_ssqd_desc *data)
+{
+ struct chsc_ssqd_area *ssqd;
+ int rc;
+
+ DBF_EVENT("getssqd:%4x", schid->sch_no);
+ if (!irq_ptr) {
+ ssqd = (struct chsc_ssqd_area *)__get_free_page(GFP_KERNEL);
+ if (!ssqd)
+ return -ENOMEM;
+ } else {
+ ssqd = (struct chsc_ssqd_area *)irq_ptr->chsc_page;
+ }
+
+ rc = chsc_ssqd(*schid, ssqd);
+ if (rc)
+ goto out;
+
+ if (!(ssqd->qdio_ssqd.flags & CHSC_FLAG_QDIO_CAPABILITY) ||
+ !(ssqd->qdio_ssqd.flags & CHSC_FLAG_VALIDITY) ||
+ (ssqd->qdio_ssqd.sch != schid->sch_no))
+ rc = -EINVAL;
+
+ if (!rc)
+ memcpy(data, &ssqd->qdio_ssqd, sizeof(*data));
+
+out:
+ if (!irq_ptr)
+ free_page((unsigned long)ssqd);
+
+ return rc;
+}
+
+void qdio_setup_ssqd_info(struct qdio_irq *irq_ptr)
+{
+ unsigned char qdioac;
+ int rc;
+
+ rc = qdio_setup_get_ssqd(irq_ptr, &irq_ptr->schid, &irq_ptr->ssqd_desc);
+ if (rc) {
+ DBF_ERROR("%4x ssqd ERR", irq_ptr->schid.sch_no);
+ DBF_ERROR("rc:%x", rc);
+ /* all flags set, worst case */
+ qdioac = AC1_SIGA_INPUT_NEEDED | AC1_SIGA_OUTPUT_NEEDED |
+ AC1_SIGA_SYNC_NEEDED;
+ } else
+ qdioac = irq_ptr->ssqd_desc.qdioac1;
+
+ check_and_setup_qebsm(irq_ptr, qdioac, irq_ptr->ssqd_desc.sch_token);
+ process_ac_flags(irq_ptr, qdioac);
+ DBF_EVENT("ac 1:%2x 2:%4x", qdioac, irq_ptr->ssqd_desc.qdioac2);
+ DBF_EVENT("3:%4x qib:%4x", irq_ptr->ssqd_desc.qdioac3, irq_ptr->qib.ac);
+}
+
+void qdio_free_async_data(struct qdio_irq *irq_ptr)
+{
+ struct qdio_q *q;
+ int i;
+
+ for (i = 0; i < irq_ptr->max_output_qs; i++) {
+ q = irq_ptr->output_qs[i];
+ if (q->u.out.use_cq) {
+ unsigned int n;
+
+ for (n = 0; n < QDIO_MAX_BUFFERS_PER_Q; n++) {
+ struct qaob *aob = q->u.out.aobs[n];
+
+ if (aob) {
+ qdio_release_aob(aob);
+ q->u.out.aobs[n] = NULL;
+ }
+ }
+
+ qdio_disable_async_operation(&q->u.out);
+ }
+ }
+}
+
+static void qdio_fill_qdr_desc(struct qdesfmt0 *desc, struct qdio_q *queue)
+{
+ desc->sliba = virt_to_phys(queue->slib);
+ desc->sla = virt_to_phys(queue->sl);
+ desc->slsba = virt_to_phys(&queue->slsb);
+
+ desc->akey = PAGE_DEFAULT_KEY >> 4;
+ desc->bkey = PAGE_DEFAULT_KEY >> 4;
+ desc->ckey = PAGE_DEFAULT_KEY >> 4;
+ desc->dkey = PAGE_DEFAULT_KEY >> 4;
+}
+
+static void setup_qdr(struct qdio_irq *irq_ptr,
+ struct qdio_initialize *qdio_init)
+{
+ struct qdesfmt0 *desc = &irq_ptr->qdr->qdf0[0];
+ int i;
+
+ irq_ptr->qdr->qfmt = qdio_init->q_format;
+ irq_ptr->qdr->ac = qdio_init->qdr_ac;
+ irq_ptr->qdr->iqdcnt = qdio_init->no_input_qs;
+ irq_ptr->qdr->oqdcnt = qdio_init->no_output_qs;
+ irq_ptr->qdr->iqdsz = sizeof(struct qdesfmt0) / 4; /* size in words */
+ irq_ptr->qdr->oqdsz = sizeof(struct qdesfmt0) / 4;
+ irq_ptr->qdr->qiba = virt_to_phys(&irq_ptr->qib);
+ irq_ptr->qdr->qkey = PAGE_DEFAULT_KEY >> 4;
+
+ for (i = 0; i < qdio_init->no_input_qs; i++)
+ qdio_fill_qdr_desc(desc++, irq_ptr->input_qs[i]);
+
+ for (i = 0; i < qdio_init->no_output_qs; i++)
+ qdio_fill_qdr_desc(desc++, irq_ptr->output_qs[i]);
+}
+
+static void setup_qib(struct qdio_irq *irq_ptr,
+ struct qdio_initialize *init_data)
+{
+ if (qebsm_possible())
+ irq_ptr->qib.rflags |= QIB_RFLAGS_ENABLE_QEBSM;
+
+ irq_ptr->qib.rflags |= init_data->qib_rflags;
+
+ irq_ptr->qib.qfmt = init_data->q_format;
+ if (init_data->no_input_qs)
+ irq_ptr->qib.isliba =
+ (unsigned long)(irq_ptr->input_qs[0]->slib);
+ if (init_data->no_output_qs)
+ irq_ptr->qib.osliba =
+ (unsigned long)(irq_ptr->output_qs[0]->slib);
+ memcpy(irq_ptr->qib.ebcnam, dev_name(&irq_ptr->cdev->dev), 8);
+ ASCEBC(irq_ptr->qib.ebcnam, 8);
+}
+
+int qdio_setup_irq(struct qdio_irq *irq_ptr, struct qdio_initialize *init_data)
+{
+ struct ccw_device *cdev = irq_ptr->cdev;
+ struct ciw *ciw;
+
+ memset(&irq_ptr->qib, 0, sizeof(irq_ptr->qib));
+ memset(&irq_ptr->siga_flag, 0, sizeof(irq_ptr->siga_flag));
+ memset(&irq_ptr->ccw, 0, sizeof(irq_ptr->ccw));
+ memset(&irq_ptr->ssqd_desc, 0, sizeof(irq_ptr->ssqd_desc));
+ memset(&irq_ptr->perf_stat, 0, sizeof(irq_ptr->perf_stat));
+
+ irq_ptr->debugfs_dev = NULL;
+ irq_ptr->sch_token = irq_ptr->perf_stat_enabled = 0;
+ irq_ptr->state = QDIO_IRQ_STATE_INACTIVE;
+
+ /* wipes qib.ac, required by ar7063 */
+ memset(irq_ptr->qdr, 0, sizeof(struct qdr));
+
+ irq_ptr->int_parm = init_data->int_parm;
+ irq_ptr->nr_input_qs = init_data->no_input_qs;
+ irq_ptr->nr_output_qs = init_data->no_output_qs;
+ irq_ptr->scan_threshold = init_data->scan_threshold;
+ ccw_device_get_schid(cdev, &irq_ptr->schid);
+ setup_queues(irq_ptr, init_data);
+
+ if (init_data->irq_poll) {
+ irq_ptr->irq_poll = init_data->irq_poll;
+ set_bit(QDIO_IRQ_DISABLED, &irq_ptr->poll_state);
+ } else {
+ irq_ptr->irq_poll = NULL;
+ }
+
+ setup_qib(irq_ptr, init_data);
+ set_impl_params(irq_ptr, init_data->qib_param_field_format,
+ init_data->qib_param_field,
+ init_data->input_slib_elements,
+ init_data->output_slib_elements);
+
+ /* fill input and output descriptors */
+ setup_qdr(irq_ptr, init_data);
+
+ /* qdr, qib, sls, slsbs, slibs, sbales are filled now */
+
+ /* set our IRQ handler */
+ spin_lock_irq(get_ccwdev_lock(cdev));
+ irq_ptr->orig_handler = cdev->handler;
+ cdev->handler = qdio_int_handler;
+ spin_unlock_irq(get_ccwdev_lock(cdev));
+
+ /* get qdio commands */
+ ciw = ccw_device_get_ciw(cdev, CIW_TYPE_EQUEUE);
+ if (!ciw) {
+ DBF_ERROR("%4x NO EQ", irq_ptr->schid.sch_no);
+ return -EINVAL;
+ }
+ irq_ptr->equeue = *ciw;
+
+ ciw = ccw_device_get_ciw(cdev, CIW_TYPE_AQUEUE);
+ if (!ciw) {
+ DBF_ERROR("%4x NO AQ", irq_ptr->schid.sch_no);
+ return -EINVAL;
+ }
+ irq_ptr->aqueue = *ciw;
+
+ return 0;
+}
+
+void qdio_shutdown_irq(struct qdio_irq *irq)
+{
+ struct ccw_device *cdev = irq->cdev;
+
+ /* restore IRQ handler */
+ spin_lock_irq(get_ccwdev_lock(cdev));
+ cdev->handler = irq->orig_handler;
+ cdev->private->intparm = 0;
+ spin_unlock_irq(get_ccwdev_lock(cdev));
+}
+
+void qdio_print_subchannel_info(struct qdio_irq *irq_ptr)
+{
+ char s[80];
+
+ snprintf(s, 80, "qdio: %s %s on SC %x using "
+ "AI:%d QEBSM:%d PRI:%d TDD:%d SIGA:%s%s%s%s%s\n",
+ dev_name(&irq_ptr->cdev->dev),
+ (irq_ptr->qib.qfmt == QDIO_QETH_QFMT) ? "OSA" :
+ ((irq_ptr->qib.qfmt == QDIO_ZFCP_QFMT) ? "ZFCP" : "HS"),
+ irq_ptr->schid.sch_no,
+ is_thinint_irq(irq_ptr),
+ (irq_ptr->sch_token) ? 1 : 0,
+ pci_out_supported(irq_ptr) ? 1 : 0,
+ css_general_characteristics.aif_tdd,
+ (irq_ptr->siga_flag.input) ? "R" : " ",
+ (irq_ptr->siga_flag.output) ? "W" : " ",
+ (irq_ptr->siga_flag.sync) ? "S" : " ",
+ (irq_ptr->siga_flag.sync_after_ai) ? "A" : " ",
+ (irq_ptr->siga_flag.sync_out_after_pci) ? "P" : " ");
+ printk(KERN_INFO "%s", s);
+}
+
+int qdio_enable_async_operation(struct qdio_output_q *outq)
+{
+ outq->aobs = kcalloc(QDIO_MAX_BUFFERS_PER_Q, sizeof(struct qaob *),
+ GFP_KERNEL);
+ if (!outq->aobs) {
+ outq->use_cq = 0;
+ return -ENOMEM;
+ }
+ outq->use_cq = 1;
+ return 0;
+}
+
+void qdio_disable_async_operation(struct qdio_output_q *q)
+{
+ kfree(q->aobs);
+ q->aobs = NULL;
+ q->use_cq = 0;
+}
+
+int __init qdio_setup_init(void)
+{
+ int rc;
+
+ qdio_q_cache = kmem_cache_create("qdio_q", sizeof(struct qdio_q),
+ 256, 0, NULL);
+ if (!qdio_q_cache)
+ return -ENOMEM;
+
+ qdio_aob_cache = kmem_cache_create("qdio_aob",
+ sizeof(struct qaob),
+ sizeof(struct qaob),
+ 0,
+ NULL);
+ if (!qdio_aob_cache) {
+ rc = -ENOMEM;
+ goto free_qdio_q_cache;
+ }
+
+ /* Check for OSA/FCP thin interrupts (bit 67). */
+ DBF_EVENT("thinint:%1d",
+ (css_general_characteristics.aif_osa) ? 1 : 0);
+
+ /* Check for QEBSM support in general (bit 58). */
+ DBF_EVENT("cssQEBSM:%1d", (qebsm_possible()) ? 1 : 0);
+ rc = 0;
+out:
+ return rc;
+free_qdio_q_cache:
+ kmem_cache_destroy(qdio_q_cache);
+ goto out;
+}
+
+void qdio_setup_exit(void)
+{
+ kmem_cache_destroy(qdio_aob_cache);
+ kmem_cache_destroy(qdio_q_cache);
+}
diff --git a/drivers/s390/cio/qdio_thinint.c b/drivers/s390/cio/qdio_thinint.c
new file mode 100644
index 000000000..7a440e432
--- /dev/null
+++ b/drivers/s390/cio/qdio_thinint.c
@@ -0,0 +1,250 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright IBM Corp. 2000, 2009
+ * Author(s): Utz Bacher <utz.bacher@de.ibm.com>
+ * Cornelia Huck <cornelia.huck@de.ibm.com>
+ * Jan Glauber <jang@linux.vnet.ibm.com>
+ */
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <linux/kernel_stat.h>
+#include <linux/atomic.h>
+#include <linux/rculist.h>
+
+#include <asm/debug.h>
+#include <asm/qdio.h>
+#include <asm/airq.h>
+#include <asm/isc.h>
+
+#include "cio.h"
+#include "ioasm.h"
+#include "qdio.h"
+#include "qdio_debug.h"
+
+/*
+ * Restriction: only 63 iqdio subchannels would have its own indicator,
+ * after that, subsequent subchannels share one indicator
+ */
+#define TIQDIO_NR_NONSHARED_IND 63
+#define TIQDIO_NR_INDICATORS (TIQDIO_NR_NONSHARED_IND + 1)
+#define TIQDIO_SHARED_IND 63
+
+/* device state change indicators */
+struct indicator_t {
+ u32 ind; /* u32 because of compare-and-swap performance */
+ atomic_t count; /* use count, 0 or 1 for non-shared indicators */
+};
+
+/* list of thin interrupt input queues */
+static LIST_HEAD(tiq_list);
+static DEFINE_MUTEX(tiq_list_lock);
+
+static struct indicator_t *q_indicators;
+
+u64 last_ai_time;
+
+/* returns addr for the device state change indicator */
+static u32 *get_indicator(void)
+{
+ int i;
+
+ for (i = 0; i < TIQDIO_NR_NONSHARED_IND; i++)
+ if (!atomic_cmpxchg(&q_indicators[i].count, 0, 1))
+ return &q_indicators[i].ind;
+
+ /* use the shared indicator */
+ atomic_inc(&q_indicators[TIQDIO_SHARED_IND].count);
+ return &q_indicators[TIQDIO_SHARED_IND].ind;
+}
+
+static void put_indicator(u32 *addr)
+{
+ struct indicator_t *ind = container_of(addr, struct indicator_t, ind);
+
+ if (!addr)
+ return;
+ atomic_dec(&ind->count);
+}
+
+void tiqdio_add_device(struct qdio_irq *irq_ptr)
+{
+ mutex_lock(&tiq_list_lock);
+ list_add_rcu(&irq_ptr->entry, &tiq_list);
+ mutex_unlock(&tiq_list_lock);
+}
+
+void tiqdio_remove_device(struct qdio_irq *irq_ptr)
+{
+ mutex_lock(&tiq_list_lock);
+ list_del_rcu(&irq_ptr->entry);
+ mutex_unlock(&tiq_list_lock);
+ synchronize_rcu();
+ INIT_LIST_HEAD(&irq_ptr->entry);
+}
+
+static inline int references_shared_dsci(struct qdio_irq *irq_ptr)
+{
+ return irq_ptr->dsci == &q_indicators[TIQDIO_SHARED_IND].ind;
+}
+
+int test_nonshared_ind(struct qdio_irq *irq_ptr)
+{
+ if (!is_thinint_irq(irq_ptr))
+ return 0;
+ if (references_shared_dsci(irq_ptr))
+ return 0;
+ if (*irq_ptr->dsci)
+ return 1;
+ else
+ return 0;
+}
+
+static inline u32 clear_shared_ind(void)
+{
+ if (!atomic_read(&q_indicators[TIQDIO_SHARED_IND].count))
+ return 0;
+ return xchg(&q_indicators[TIQDIO_SHARED_IND].ind, 0);
+}
+
+static inline void tiqdio_call_inq_handlers(struct qdio_irq *irq)
+{
+ struct qdio_q *q;
+ int i;
+
+ if (!references_shared_dsci(irq))
+ xchg(irq->dsci, 0);
+
+ if (irq->irq_poll) {
+ if (!test_and_set_bit(QDIO_IRQ_DISABLED, &irq->poll_state))
+ irq->irq_poll(irq->cdev, irq->int_parm);
+ else
+ QDIO_PERF_STAT_INC(irq, int_discarded);
+
+ return;
+ }
+
+ for_each_input_queue(irq, q, i) {
+ /*
+ * Call inbound processing but not directly
+ * since that could starve other thinint queues.
+ */
+ tasklet_schedule(&q->tasklet);
+ }
+}
+
+/**
+ * tiqdio_thinint_handler - thin interrupt handler for qdio
+ * @airq: pointer to adapter interrupt descriptor
+ * @floating: flag to recognize floating vs. directed interrupts (unused)
+ */
+static void tiqdio_thinint_handler(struct airq_struct *airq, bool floating)
+{
+ u32 si_used = clear_shared_ind();
+ struct qdio_irq *irq;
+
+ last_ai_time = S390_lowcore.int_clock;
+ inc_irq_stat(IRQIO_QAI);
+
+ /* protect tiq_list entries, only changed in activate or shutdown */
+ rcu_read_lock();
+
+ list_for_each_entry_rcu(irq, &tiq_list, entry) {
+ /* only process queues from changed sets */
+ if (unlikely(references_shared_dsci(irq))) {
+ if (!si_used)
+ continue;
+ } else if (!*irq->dsci)
+ continue;
+
+ tiqdio_call_inq_handlers(irq);
+
+ QDIO_PERF_STAT_INC(irq, adapter_int);
+ }
+ rcu_read_unlock();
+}
+
+static struct airq_struct tiqdio_airq = {
+ .handler = tiqdio_thinint_handler,
+ .isc = QDIO_AIRQ_ISC,
+};
+
+static int set_subchannel_ind(struct qdio_irq *irq_ptr, int reset)
+{
+ struct chsc_scssc_area *scssc = (void *)irq_ptr->chsc_page;
+ u64 summary_indicator_addr, subchannel_indicator_addr;
+ int rc;
+
+ if (reset) {
+ summary_indicator_addr = 0;
+ subchannel_indicator_addr = 0;
+ } else {
+ summary_indicator_addr = virt_to_phys(tiqdio_airq.lsi_ptr);
+ subchannel_indicator_addr = virt_to_phys(irq_ptr->dsci);
+ }
+
+ rc = chsc_sadc(irq_ptr->schid, scssc, summary_indicator_addr,
+ subchannel_indicator_addr, tiqdio_airq.isc);
+ if (rc) {
+ DBF_ERROR("%4x SSI r:%4x", irq_ptr->schid.sch_no,
+ scssc->response.code);
+ goto out;
+ }
+
+ DBF_EVENT("setscind");
+ DBF_HEX(&summary_indicator_addr, sizeof(summary_indicator_addr));
+ DBF_HEX(&subchannel_indicator_addr, sizeof(subchannel_indicator_addr));
+out:
+ return rc;
+}
+
+int qdio_establish_thinint(struct qdio_irq *irq_ptr)
+{
+ int rc;
+
+ if (!is_thinint_irq(irq_ptr))
+ return 0;
+
+ irq_ptr->dsci = get_indicator();
+ DBF_HEX(&irq_ptr->dsci, sizeof(void *));
+
+ rc = set_subchannel_ind(irq_ptr, 0);
+ if (rc)
+ put_indicator(irq_ptr->dsci);
+
+ return rc;
+}
+
+void qdio_shutdown_thinint(struct qdio_irq *irq_ptr)
+{
+ if (!is_thinint_irq(irq_ptr))
+ return;
+
+ /* reset adapter interrupt indicators */
+ set_subchannel_ind(irq_ptr, 1);
+ put_indicator(irq_ptr->dsci);
+}
+
+int __init qdio_thinint_init(void)
+{
+ int rc;
+
+ q_indicators = kcalloc(TIQDIO_NR_INDICATORS, sizeof(struct indicator_t),
+ GFP_KERNEL);
+ if (!q_indicators)
+ return -ENOMEM;
+
+ rc = register_adapter_interrupt(&tiqdio_airq);
+ if (rc) {
+ DBF_EVENT("RTI:%x", rc);
+ kfree(q_indicators);
+ return rc;
+ }
+ return 0;
+}
+
+void __exit qdio_thinint_exit(void)
+{
+ WARN_ON(!list_empty(&tiq_list));
+ unregister_adapter_interrupt(&tiqdio_airq);
+ kfree(q_indicators);
+}
diff --git a/drivers/s390/cio/scm.c b/drivers/s390/cio/scm.c
new file mode 100644
index 000000000..9f26d4310
--- /dev/null
+++ b/drivers/s390/cio/scm.c
@@ -0,0 +1,289 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Recognize and maintain s390 storage class memory.
+ *
+ * Copyright IBM Corp. 2012
+ * Author(s): Sebastian Ott <sebott@linux.vnet.ibm.com>
+ */
+
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/err.h>
+#include <asm/eadm.h>
+#include "chsc.h"
+
+static struct device *scm_root;
+
+#define to_scm_dev(n) container_of(n, struct scm_device, dev)
+#define to_scm_drv(d) container_of(d, struct scm_driver, drv)
+
+static int scmdev_probe(struct device *dev)
+{
+ struct scm_device *scmdev = to_scm_dev(dev);
+ struct scm_driver *scmdrv = to_scm_drv(dev->driver);
+
+ return scmdrv->probe ? scmdrv->probe(scmdev) : -ENODEV;
+}
+
+static int scmdev_remove(struct device *dev)
+{
+ struct scm_device *scmdev = to_scm_dev(dev);
+ struct scm_driver *scmdrv = to_scm_drv(dev->driver);
+
+ return scmdrv->remove ? scmdrv->remove(scmdev) : -ENODEV;
+}
+
+static int scmdev_uevent(struct device *dev, struct kobj_uevent_env *env)
+{
+ return add_uevent_var(env, "MODALIAS=scm:scmdev");
+}
+
+static struct bus_type scm_bus_type = {
+ .name = "scm",
+ .probe = scmdev_probe,
+ .remove = scmdev_remove,
+ .uevent = scmdev_uevent,
+};
+
+/**
+ * scm_driver_register() - register a scm driver
+ * @scmdrv: driver to be registered
+ */
+int scm_driver_register(struct scm_driver *scmdrv)
+{
+ struct device_driver *drv = &scmdrv->drv;
+
+ drv->bus = &scm_bus_type;
+
+ return driver_register(drv);
+}
+EXPORT_SYMBOL_GPL(scm_driver_register);
+
+/**
+ * scm_driver_unregister() - deregister a scm driver
+ * @scmdrv: driver to be deregistered
+ */
+void scm_driver_unregister(struct scm_driver *scmdrv)
+{
+ driver_unregister(&scmdrv->drv);
+}
+EXPORT_SYMBOL_GPL(scm_driver_unregister);
+
+void scm_irq_handler(struct aob *aob, blk_status_t error)
+{
+ struct aob_rq_header *aobrq = (void *) aob->request.data;
+ struct scm_device *scmdev = aobrq->scmdev;
+ struct scm_driver *scmdrv = to_scm_drv(scmdev->dev.driver);
+
+ scmdrv->handler(scmdev, aobrq->data, error);
+}
+EXPORT_SYMBOL_GPL(scm_irq_handler);
+
+#define scm_attr(name) \
+static ssize_t show_##name(struct device *dev, \
+ struct device_attribute *attr, char *buf) \
+{ \
+ struct scm_device *scmdev = to_scm_dev(dev); \
+ int ret; \
+ \
+ device_lock(dev); \
+ ret = sprintf(buf, "%u\n", scmdev->attrs.name); \
+ device_unlock(dev); \
+ \
+ return ret; \
+} \
+static DEVICE_ATTR(name, S_IRUGO, show_##name, NULL);
+
+scm_attr(persistence);
+scm_attr(oper_state);
+scm_attr(data_state);
+scm_attr(rank);
+scm_attr(release);
+scm_attr(res_id);
+
+static struct attribute *scmdev_attrs[] = {
+ &dev_attr_persistence.attr,
+ &dev_attr_oper_state.attr,
+ &dev_attr_data_state.attr,
+ &dev_attr_rank.attr,
+ &dev_attr_release.attr,
+ &dev_attr_res_id.attr,
+ NULL,
+};
+
+static struct attribute_group scmdev_attr_group = {
+ .attrs = scmdev_attrs,
+};
+
+static const struct attribute_group *scmdev_attr_groups[] = {
+ &scmdev_attr_group,
+ NULL,
+};
+
+static void scmdev_release(struct device *dev)
+{
+ struct scm_device *scmdev = to_scm_dev(dev);
+
+ kfree(scmdev);
+}
+
+static void scmdev_setup(struct scm_device *scmdev, struct sale *sale,
+ unsigned int size, unsigned int max_blk_count)
+{
+ dev_set_name(&scmdev->dev, "%016llx", (unsigned long long) sale->sa);
+ scmdev->nr_max_block = max_blk_count;
+ scmdev->address = sale->sa;
+ scmdev->size = 1UL << size;
+ scmdev->attrs.rank = sale->rank;
+ scmdev->attrs.persistence = sale->p;
+ scmdev->attrs.oper_state = sale->op_state;
+ scmdev->attrs.data_state = sale->data_state;
+ scmdev->attrs.rank = sale->rank;
+ scmdev->attrs.release = sale->r;
+ scmdev->attrs.res_id = sale->rid;
+ scmdev->dev.parent = scm_root;
+ scmdev->dev.bus = &scm_bus_type;
+ scmdev->dev.release = scmdev_release;
+ scmdev->dev.groups = scmdev_attr_groups;
+}
+
+/*
+ * Check for state-changes, notify the driver and userspace.
+ */
+static void scmdev_update(struct scm_device *scmdev, struct sale *sale)
+{
+ struct scm_driver *scmdrv;
+ bool changed;
+
+ device_lock(&scmdev->dev);
+ changed = scmdev->attrs.rank != sale->rank ||
+ scmdev->attrs.oper_state != sale->op_state;
+ scmdev->attrs.rank = sale->rank;
+ scmdev->attrs.oper_state = sale->op_state;
+ if (!scmdev->dev.driver)
+ goto out;
+ scmdrv = to_scm_drv(scmdev->dev.driver);
+ if (changed && scmdrv->notify)
+ scmdrv->notify(scmdev, SCM_CHANGE);
+out:
+ device_unlock(&scmdev->dev);
+ if (changed)
+ kobject_uevent(&scmdev->dev.kobj, KOBJ_CHANGE);
+}
+
+static int check_address(struct device *dev, const void *data)
+{
+ struct scm_device *scmdev = to_scm_dev(dev);
+ const struct sale *sale = data;
+
+ return scmdev->address == sale->sa;
+}
+
+static struct scm_device *scmdev_find(struct sale *sale)
+{
+ struct device *dev;
+
+ dev = bus_find_device(&scm_bus_type, NULL, sale, check_address);
+
+ return dev ? to_scm_dev(dev) : NULL;
+}
+
+static int scm_add(struct chsc_scm_info *scm_info, size_t num)
+{
+ struct sale *sale, *scmal = scm_info->scmal;
+ struct scm_device *scmdev;
+ int ret;
+
+ for (sale = scmal; sale < scmal + num; sale++) {
+ scmdev = scmdev_find(sale);
+ if (scmdev) {
+ scmdev_update(scmdev, sale);
+ /* Release reference from scm_find(). */
+ put_device(&scmdev->dev);
+ continue;
+ }
+ scmdev = kzalloc(sizeof(*scmdev), GFP_KERNEL);
+ if (!scmdev)
+ return -ENODEV;
+ scmdev_setup(scmdev, sale, scm_info->is, scm_info->mbc);
+ ret = device_register(&scmdev->dev);
+ if (ret) {
+ /* Release reference from device_initialize(). */
+ put_device(&scmdev->dev);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+int scm_update_information(void)
+{
+ struct chsc_scm_info *scm_info;
+ u64 token = 0;
+ size_t num;
+ int ret;
+
+ scm_info = (void *)__get_free_page(GFP_KERNEL | GFP_DMA);
+ if (!scm_info)
+ return -ENOMEM;
+
+ do {
+ ret = chsc_scm_info(scm_info, token);
+ if (ret)
+ break;
+
+ num = (scm_info->response.length -
+ (offsetof(struct chsc_scm_info, scmal) -
+ offsetof(struct chsc_scm_info, response))
+ ) / sizeof(struct sale);
+
+ ret = scm_add(scm_info, num);
+ if (ret)
+ break;
+
+ token = scm_info->restok;
+ } while (token);
+
+ free_page((unsigned long)scm_info);
+
+ return ret;
+}
+
+static int scm_dev_avail(struct device *dev, void *unused)
+{
+ struct scm_driver *scmdrv = to_scm_drv(dev->driver);
+ struct scm_device *scmdev = to_scm_dev(dev);
+
+ if (dev->driver && scmdrv->notify)
+ scmdrv->notify(scmdev, SCM_AVAIL);
+
+ return 0;
+}
+
+int scm_process_availability_information(void)
+{
+ return bus_for_each_dev(&scm_bus_type, NULL, NULL, scm_dev_avail);
+}
+
+static int __init scm_init(void)
+{
+ int ret;
+
+ ret = bus_register(&scm_bus_type);
+ if (ret)
+ return ret;
+
+ scm_root = root_device_register("scm");
+ if (IS_ERR(scm_root)) {
+ bus_unregister(&scm_bus_type);
+ return PTR_ERR(scm_root);
+ }
+
+ scm_update_information();
+ return 0;
+}
+subsys_initcall_sync(scm_init);
diff --git a/drivers/s390/cio/trace.c b/drivers/s390/cio/trace.c
new file mode 100644
index 000000000..882ee538c
--- /dev/null
+++ b/drivers/s390/cio/trace.c
@@ -0,0 +1,24 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Tracepoint definitions for s390_cio
+ *
+ * Copyright IBM Corp. 2015
+ * Author(s): Peter Oberparleiter <oberpar@linux.vnet.ibm.com>
+ */
+
+#include <asm/crw.h>
+#include "cio.h"
+
+#define CREATE_TRACE_POINTS
+#include "trace.h"
+
+EXPORT_TRACEPOINT_SYMBOL(s390_cio_stsch);
+EXPORT_TRACEPOINT_SYMBOL(s390_cio_msch);
+EXPORT_TRACEPOINT_SYMBOL(s390_cio_tsch);
+EXPORT_TRACEPOINT_SYMBOL(s390_cio_tpi);
+EXPORT_TRACEPOINT_SYMBOL(s390_cio_ssch);
+EXPORT_TRACEPOINT_SYMBOL(s390_cio_csch);
+EXPORT_TRACEPOINT_SYMBOL(s390_cio_hsch);
+EXPORT_TRACEPOINT_SYMBOL(s390_cio_xsch);
+EXPORT_TRACEPOINT_SYMBOL(s390_cio_rsch);
+EXPORT_TRACEPOINT_SYMBOL(s390_cio_chsc);
diff --git a/drivers/s390/cio/trace.h b/drivers/s390/cio/trace.h
new file mode 100644
index 000000000..4803139bc
--- /dev/null
+++ b/drivers/s390/cio/trace.h
@@ -0,0 +1,403 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Tracepoint header for the s390 Common I/O layer (CIO)
+ *
+ * Copyright IBM Corp. 2015
+ * Author(s): Peter Oberparleiter <oberpar@linux.vnet.ibm.com>
+ */
+
+#include <linux/kernel.h>
+#include <asm/crw.h>
+#include <uapi/asm/chpid.h>
+#include <uapi/asm/schid.h>
+#include "cio.h"
+#include "orb.h"
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM s390
+
+#if !defined(_TRACE_S390_CIO_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_S390_CIO_H
+
+#include <linux/tracepoint.h>
+
+DECLARE_EVENT_CLASS(s390_class_schib,
+ TP_PROTO(struct subchannel_id schid, struct schib *schib, int cc),
+ TP_ARGS(schid, schib, cc),
+ TP_STRUCT__entry(
+ __field(u8, cssid)
+ __field(u8, ssid)
+ __field(u16, schno)
+ __field(u16, devno)
+ __field_struct(struct schib, schib)
+ __field(u8, pmcw_ena)
+ __field(u8, pmcw_st)
+ __field(u8, pmcw_dnv)
+ __field(u16, pmcw_dev)
+ __field(u8, pmcw_lpm)
+ __field(u8, pmcw_pnom)
+ __field(u8, pmcw_lpum)
+ __field(u8, pmcw_pim)
+ __field(u8, pmcw_pam)
+ __field(u8, pmcw_pom)
+ __field(u64, pmcw_chpid)
+ __field(int, cc)
+ ),
+ TP_fast_assign(
+ __entry->cssid = schid.cssid;
+ __entry->ssid = schid.ssid;
+ __entry->schno = schid.sch_no;
+ __entry->devno = schib->pmcw.dev;
+ __entry->schib = *schib;
+ __entry->pmcw_ena = schib->pmcw.ena;
+ __entry->pmcw_st = schib->pmcw.ena;
+ __entry->pmcw_dnv = schib->pmcw.dnv;
+ __entry->pmcw_dev = schib->pmcw.dev;
+ __entry->pmcw_lpm = schib->pmcw.lpm;
+ __entry->pmcw_pnom = schib->pmcw.pnom;
+ __entry->pmcw_lpum = schib->pmcw.lpum;
+ __entry->pmcw_pim = schib->pmcw.pim;
+ __entry->pmcw_pam = schib->pmcw.pam;
+ __entry->pmcw_pom = schib->pmcw.pom;
+ memcpy(&__entry->pmcw_chpid, &schib->pmcw.chpid, 8);
+ __entry->cc = cc;
+ ),
+ TP_printk("schid=%x.%x.%04x cc=%d ena=%d st=%d dnv=%d dev=%04x "
+ "lpm=0x%02x pnom=0x%02x lpum=0x%02x pim=0x%02x pam=0x%02x "
+ "pom=0x%02x chpids=%016llx",
+ __entry->cssid, __entry->ssid, __entry->schno, __entry->cc,
+ __entry->pmcw_ena, __entry->pmcw_st,
+ __entry->pmcw_dnv, __entry->pmcw_dev,
+ __entry->pmcw_lpm, __entry->pmcw_pnom,
+ __entry->pmcw_lpum, __entry->pmcw_pim,
+ __entry->pmcw_pam, __entry->pmcw_pom,
+ __entry->pmcw_chpid
+ )
+);
+
+/**
+ * s390_cio_stsch - Store Subchannel instruction (STSCH) was performed
+ * @schid: Subchannel ID
+ * @schib: Subchannel-Information block
+ * @cc: Condition code
+ */
+DEFINE_EVENT(s390_class_schib, s390_cio_stsch,
+ TP_PROTO(struct subchannel_id schid, struct schib *schib, int cc),
+ TP_ARGS(schid, schib, cc)
+);
+
+/**
+ * s390_cio_msch - Modify Subchannel instruction (MSCH) was performed
+ * @schid: Subchannel ID
+ * @schib: Subchannel-Information block
+ * @cc: Condition code
+ */
+DEFINE_EVENT(s390_class_schib, s390_cio_msch,
+ TP_PROTO(struct subchannel_id schid, struct schib *schib, int cc),
+ TP_ARGS(schid, schib, cc)
+);
+
+/**
+ * s390_cio_tsch - Test Subchannel instruction (TSCH) was performed
+ * @schid: Subchannel ID
+ * @irb: Interruption-Response Block
+ * @cc: Condition code
+ */
+TRACE_EVENT(s390_cio_tsch,
+ TP_PROTO(struct subchannel_id schid, struct irb *irb, int cc),
+ TP_ARGS(schid, irb, cc),
+ TP_STRUCT__entry(
+ __field(u8, cssid)
+ __field(u8, ssid)
+ __field(u16, schno)
+ __field_struct(struct irb, irb)
+ __field(u8, scsw_dcc)
+ __field(u8, scsw_pno)
+ __field(u8, scsw_fctl)
+ __field(u8, scsw_actl)
+ __field(u8, scsw_stctl)
+ __field(u8, scsw_dstat)
+ __field(u8, scsw_cstat)
+ __field(int, cc)
+ ),
+ TP_fast_assign(
+ __entry->cssid = schid.cssid;
+ __entry->ssid = schid.ssid;
+ __entry->schno = schid.sch_no;
+ __entry->irb = *irb;
+ __entry->scsw_dcc = scsw_cc(&irb->scsw);
+ __entry->scsw_pno = scsw_pno(&irb->scsw);
+ __entry->scsw_fctl = scsw_fctl(&irb->scsw);
+ __entry->scsw_actl = scsw_actl(&irb->scsw);
+ __entry->scsw_stctl = scsw_stctl(&irb->scsw);
+ __entry->scsw_dstat = scsw_dstat(&irb->scsw);
+ __entry->scsw_cstat = scsw_cstat(&irb->scsw);
+ __entry->cc = cc;
+ ),
+ TP_printk("schid=%x.%x.%04x cc=%d dcc=%d pno=%d fctl=0x%x actl=0x%x "
+ "stctl=0x%x dstat=0x%x cstat=0x%x",
+ __entry->cssid, __entry->ssid, __entry->schno, __entry->cc,
+ __entry->scsw_dcc, __entry->scsw_pno,
+ __entry->scsw_fctl, __entry->scsw_actl,
+ __entry->scsw_stctl,
+ __entry->scsw_dstat, __entry->scsw_cstat
+ )
+);
+
+/**
+ * s390_cio_tpi - Test Pending Interruption instruction (TPI) was performed
+ * @addr: Address of the I/O interruption code or %NULL
+ * @cc: Condition code
+ */
+TRACE_EVENT(s390_cio_tpi,
+ TP_PROTO(struct tpi_info *addr, int cc),
+ TP_ARGS(addr, cc),
+ TP_STRUCT__entry(
+ __field(int, cc)
+ __field_struct(struct tpi_info, tpi_info)
+ __field(u8, cssid)
+ __field(u8, ssid)
+ __field(u16, schno)
+ __field(u8, adapter_IO)
+ __field(u8, isc)
+ __field(u8, type)
+ ),
+ TP_fast_assign(
+ __entry->cc = cc;
+ if (cc != 0)
+ memset(&__entry->tpi_info, 0, sizeof(struct tpi_info));
+ else if (addr)
+ __entry->tpi_info = *addr;
+ else {
+ memcpy(&__entry->tpi_info, &S390_lowcore.subchannel_id,
+ sizeof(struct tpi_info));
+ }
+ __entry->cssid = __entry->tpi_info.schid.cssid;
+ __entry->ssid = __entry->tpi_info.schid.ssid;
+ __entry->schno = __entry->tpi_info.schid.sch_no;
+ __entry->adapter_IO = __entry->tpi_info.adapter_IO;
+ __entry->isc = __entry->tpi_info.isc;
+ __entry->type = __entry->tpi_info.type;
+ ),
+ TP_printk("schid=%x.%x.%04x cc=%d a=%d isc=%d type=%d",
+ __entry->cssid, __entry->ssid, __entry->schno, __entry->cc,
+ __entry->adapter_IO, __entry->isc,
+ __entry->type
+ )
+);
+
+/**
+ * s390_cio_ssch - Start Subchannel instruction (SSCH) was performed
+ * @schid: Subchannel ID
+ * @orb: Operation-Request Block
+ * @cc: Condition code
+ */
+TRACE_EVENT(s390_cio_ssch,
+ TP_PROTO(struct subchannel_id schid, union orb *orb, int cc),
+ TP_ARGS(schid, orb, cc),
+ TP_STRUCT__entry(
+ __field(u8, cssid)
+ __field(u8, ssid)
+ __field(u16, schno)
+ __field_struct(union orb, orb)
+ __field(int, cc)
+ ),
+ TP_fast_assign(
+ __entry->cssid = schid.cssid;
+ __entry->ssid = schid.ssid;
+ __entry->schno = schid.sch_no;
+ __entry->orb = *orb;
+ __entry->cc = cc;
+ ),
+ TP_printk("schid=%x.%x.%04x cc=%d", __entry->cssid, __entry->ssid,
+ __entry->schno, __entry->cc
+ )
+);
+
+DECLARE_EVENT_CLASS(s390_class_schid,
+ TP_PROTO(struct subchannel_id schid, int cc),
+ TP_ARGS(schid, cc),
+ TP_STRUCT__entry(
+ __field(u8, cssid)
+ __field(u8, ssid)
+ __field(u16, schno)
+ __field(int, cc)
+ ),
+ TP_fast_assign(
+ __entry->cssid = schid.cssid;
+ __entry->ssid = schid.ssid;
+ __entry->schno = schid.sch_no;
+ __entry->cc = cc;
+ ),
+ TP_printk("schid=%x.%x.%04x cc=%d", __entry->cssid, __entry->ssid,
+ __entry->schno, __entry->cc
+ )
+);
+
+/**
+ * s390_cio_csch - Clear Subchannel instruction (CSCH) was performed
+ * @schid: Subchannel ID
+ * @cc: Condition code
+ */
+DEFINE_EVENT(s390_class_schid, s390_cio_csch,
+ TP_PROTO(struct subchannel_id schid, int cc),
+ TP_ARGS(schid, cc)
+);
+
+/**
+ * s390_cio_hsch - Halt Subchannel instruction (HSCH) was performed
+ * @schid: Subchannel ID
+ * @cc: Condition code
+ */
+DEFINE_EVENT(s390_class_schid, s390_cio_hsch,
+ TP_PROTO(struct subchannel_id schid, int cc),
+ TP_ARGS(schid, cc)
+);
+
+/**
+ * s390_cio_xsch - Cancel Subchannel instruction (XSCH) was performed
+ * @schid: Subchannel ID
+ * @cc: Condition code
+ */
+DEFINE_EVENT(s390_class_schid, s390_cio_xsch,
+ TP_PROTO(struct subchannel_id schid, int cc),
+ TP_ARGS(schid, cc)
+);
+
+/**
+ * s390_cio_rsch - Resume Subchannel instruction (RSCH) was performed
+ * @schid: Subchannel ID
+ * @cc: Condition code
+ */
+DEFINE_EVENT(s390_class_schid, s390_cio_rsch,
+ TP_PROTO(struct subchannel_id schid, int cc),
+ TP_ARGS(schid, cc)
+);
+
+#define CHSC_MAX_REQUEST_LEN 64
+#define CHSC_MAX_RESPONSE_LEN 64
+
+/**
+ * s390_cio_chsc - Channel Subsystem Call (CHSC) instruction was performed
+ * @chsc: CHSC block
+ * @cc: Condition code
+ */
+TRACE_EVENT(s390_cio_chsc,
+ TP_PROTO(struct chsc_header *chsc, int cc),
+ TP_ARGS(chsc, cc),
+ TP_STRUCT__entry(
+ __field(int, cc)
+ __field(u16, code)
+ __field(u16, rcode)
+ __array(u8, request, CHSC_MAX_REQUEST_LEN)
+ __array(u8, response, CHSC_MAX_RESPONSE_LEN)
+ ),
+ TP_fast_assign(
+ __entry->cc = cc;
+ __entry->code = chsc->code;
+ memcpy(&entry->request, chsc,
+ min_t(u16, chsc->length, CHSC_MAX_REQUEST_LEN));
+ chsc = (struct chsc_header *) ((char *) chsc + chsc->length);
+ __entry->rcode = chsc->code;
+ memcpy(&entry->response, chsc,
+ min_t(u16, chsc->length, CHSC_MAX_RESPONSE_LEN));
+ ),
+ TP_printk("code=0x%04x cc=%d rcode=0x%04x", __entry->code,
+ __entry->cc, __entry->rcode)
+);
+
+/**
+ * s390_cio_interrupt - An I/O interrupt occurred
+ * @tpi_info: Address of the I/O interruption code
+ */
+TRACE_EVENT(s390_cio_interrupt,
+ TP_PROTO(struct tpi_info *tpi_info),
+ TP_ARGS(tpi_info),
+ TP_STRUCT__entry(
+ __field_struct(struct tpi_info, tpi_info)
+ __field(u8, cssid)
+ __field(u8, ssid)
+ __field(u16, schno)
+ __field(u8, isc)
+ __field(u8, type)
+ ),
+ TP_fast_assign(
+ __entry->tpi_info = *tpi_info;
+ __entry->cssid = tpi_info->schid.cssid;
+ __entry->ssid = tpi_info->schid.ssid;
+ __entry->schno = tpi_info->schid.sch_no;
+ __entry->isc = tpi_info->isc;
+ __entry->type = tpi_info->type;
+ ),
+ TP_printk("schid=%x.%x.%04x isc=%d type=%d",
+ __entry->cssid, __entry->ssid, __entry->schno,
+ __entry->isc, __entry->type
+ )
+);
+
+/**
+ * s390_cio_adapter_int - An adapter interrupt occurred
+ * @tpi_info: Address of the I/O interruption code
+ */
+TRACE_EVENT(s390_cio_adapter_int,
+ TP_PROTO(struct tpi_info *tpi_info),
+ TP_ARGS(tpi_info),
+ TP_STRUCT__entry(
+ __field_struct(struct tpi_info, tpi_info)
+ __field(u8, isc)
+ ),
+ TP_fast_assign(
+ __entry->tpi_info = *tpi_info;
+ __entry->isc = tpi_info->isc;
+ ),
+ TP_printk("isc=%d", __entry->isc)
+);
+
+/**
+ * s390_cio_stcrw - Store Channel Report Word (STCRW) was performed
+ * @crw: Channel Report Word
+ * @cc: Condition code
+ */
+TRACE_EVENT(s390_cio_stcrw,
+ TP_PROTO(struct crw *crw, int cc),
+ TP_ARGS(crw, cc),
+ TP_STRUCT__entry(
+ __field_struct(struct crw, crw)
+ __field(int, cc)
+ __field(u8, slct)
+ __field(u8, oflw)
+ __field(u8, chn)
+ __field(u8, rsc)
+ __field(u8, anc)
+ __field(u8, erc)
+ __field(u16, rsid)
+ ),
+ TP_fast_assign(
+ __entry->crw = *crw;
+ __entry->cc = cc;
+ __entry->slct = crw->slct;
+ __entry->oflw = crw->oflw;
+ __entry->chn = crw->chn;
+ __entry->rsc = crw->rsc;
+ __entry->anc = crw->anc;
+ __entry->erc = crw->erc;
+ __entry->rsid = crw->rsid;
+ ),
+ TP_printk("cc=%d slct=%d oflw=%d chn=%d rsc=%d anc=%d erc=0x%x "
+ "rsid=0x%x",
+ __entry->cc, __entry->slct, __entry->oflw,
+ __entry->chn, __entry->rsc, __entry->anc,
+ __entry->erc, __entry->rsid
+ )
+);
+
+#endif /* _TRACE_S390_CIO_H */
+
+/* This part must be outside protection */
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE trace
+
+#include <trace/define_trace.h>
diff --git a/drivers/s390/cio/vfio_ccw_async.c b/drivers/s390/cio/vfio_ccw_async.c
new file mode 100644
index 000000000..7a838e3d7
--- /dev/null
+++ b/drivers/s390/cio/vfio_ccw_async.c
@@ -0,0 +1,88 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Async I/O region for vfio_ccw
+ *
+ * Copyright Red Hat, Inc. 2019
+ *
+ * Author(s): Cornelia Huck <cohuck@redhat.com>
+ */
+
+#include <linux/vfio.h>
+#include <linux/mdev.h>
+
+#include "vfio_ccw_private.h"
+
+static ssize_t vfio_ccw_async_region_read(struct vfio_ccw_private *private,
+ char __user *buf, size_t count,
+ loff_t *ppos)
+{
+ unsigned int i = VFIO_CCW_OFFSET_TO_INDEX(*ppos) - VFIO_CCW_NUM_REGIONS;
+ loff_t pos = *ppos & VFIO_CCW_OFFSET_MASK;
+ struct ccw_cmd_region *region;
+ int ret;
+
+ if (pos + count > sizeof(*region))
+ return -EINVAL;
+
+ mutex_lock(&private->io_mutex);
+ region = private->region[i].data;
+ if (copy_to_user(buf, (void *)region + pos, count))
+ ret = -EFAULT;
+ else
+ ret = count;
+ mutex_unlock(&private->io_mutex);
+ return ret;
+}
+
+static ssize_t vfio_ccw_async_region_write(struct vfio_ccw_private *private,
+ const char __user *buf, size_t count,
+ loff_t *ppos)
+{
+ unsigned int i = VFIO_CCW_OFFSET_TO_INDEX(*ppos) - VFIO_CCW_NUM_REGIONS;
+ loff_t pos = *ppos & VFIO_CCW_OFFSET_MASK;
+ struct ccw_cmd_region *region;
+ int ret;
+
+ if (pos + count > sizeof(*region))
+ return -EINVAL;
+
+ if (!mutex_trylock(&private->io_mutex))
+ return -EAGAIN;
+
+ region = private->region[i].data;
+ if (copy_from_user((void *)region + pos, buf, count)) {
+ ret = -EFAULT;
+ goto out_unlock;
+ }
+
+ vfio_ccw_fsm_event(private, VFIO_CCW_EVENT_ASYNC_REQ);
+
+ ret = region->ret_code ? region->ret_code : count;
+
+out_unlock:
+ mutex_unlock(&private->io_mutex);
+ return ret;
+}
+
+static void vfio_ccw_async_region_release(struct vfio_ccw_private *private,
+ struct vfio_ccw_region *region)
+{
+
+}
+
+static const struct vfio_ccw_regops vfio_ccw_async_region_ops = {
+ .read = vfio_ccw_async_region_read,
+ .write = vfio_ccw_async_region_write,
+ .release = vfio_ccw_async_region_release,
+};
+
+int vfio_ccw_register_async_dev_regions(struct vfio_ccw_private *private)
+{
+ return vfio_ccw_register_dev_region(private,
+ VFIO_REGION_SUBTYPE_CCW_ASYNC_CMD,
+ &vfio_ccw_async_region_ops,
+ sizeof(struct ccw_cmd_region),
+ VFIO_REGION_INFO_FLAG_READ |
+ VFIO_REGION_INFO_FLAG_WRITE,
+ private->cmd_region);
+}
diff --git a/drivers/s390/cio/vfio_ccw_chp.c b/drivers/s390/cio/vfio_ccw_chp.c
new file mode 100644
index 000000000..13b26a1c7
--- /dev/null
+++ b/drivers/s390/cio/vfio_ccw_chp.c
@@ -0,0 +1,149 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Channel path related status regions for vfio_ccw
+ *
+ * Copyright IBM Corp. 2020
+ *
+ * Author(s): Farhan Ali <alifm@linux.ibm.com>
+ * Eric Farman <farman@linux.ibm.com>
+ */
+
+#include <linux/slab.h>
+#include <linux/vfio.h>
+#include "vfio_ccw_private.h"
+
+static ssize_t vfio_ccw_schib_region_read(struct vfio_ccw_private *private,
+ char __user *buf, size_t count,
+ loff_t *ppos)
+{
+ unsigned int i = VFIO_CCW_OFFSET_TO_INDEX(*ppos) - VFIO_CCW_NUM_REGIONS;
+ loff_t pos = *ppos & VFIO_CCW_OFFSET_MASK;
+ struct ccw_schib_region *region;
+ int ret;
+
+ if (pos + count > sizeof(*region))
+ return -EINVAL;
+
+ mutex_lock(&private->io_mutex);
+ region = private->region[i].data;
+
+ if (cio_update_schib(private->sch)) {
+ ret = -ENODEV;
+ goto out;
+ }
+
+ memcpy(region, &private->sch->schib, sizeof(*region));
+
+ if (copy_to_user(buf, (void *)region + pos, count)) {
+ ret = -EFAULT;
+ goto out;
+ }
+
+ ret = count;
+
+out:
+ mutex_unlock(&private->io_mutex);
+ return ret;
+}
+
+static ssize_t vfio_ccw_schib_region_write(struct vfio_ccw_private *private,
+ const char __user *buf, size_t count,
+ loff_t *ppos)
+{
+ return -EINVAL;
+}
+
+
+static void vfio_ccw_schib_region_release(struct vfio_ccw_private *private,
+ struct vfio_ccw_region *region)
+{
+
+}
+
+static const struct vfio_ccw_regops vfio_ccw_schib_region_ops = {
+ .read = vfio_ccw_schib_region_read,
+ .write = vfio_ccw_schib_region_write,
+ .release = vfio_ccw_schib_region_release,
+};
+
+int vfio_ccw_register_schib_dev_regions(struct vfio_ccw_private *private)
+{
+ return vfio_ccw_register_dev_region(private,
+ VFIO_REGION_SUBTYPE_CCW_SCHIB,
+ &vfio_ccw_schib_region_ops,
+ sizeof(struct ccw_schib_region),
+ VFIO_REGION_INFO_FLAG_READ,
+ private->schib_region);
+}
+
+static ssize_t vfio_ccw_crw_region_read(struct vfio_ccw_private *private,
+ char __user *buf, size_t count,
+ loff_t *ppos)
+{
+ unsigned int i = VFIO_CCW_OFFSET_TO_INDEX(*ppos) - VFIO_CCW_NUM_REGIONS;
+ loff_t pos = *ppos & VFIO_CCW_OFFSET_MASK;
+ struct ccw_crw_region *region;
+ struct vfio_ccw_crw *crw;
+ int ret;
+
+ if (pos + count > sizeof(*region))
+ return -EINVAL;
+
+ crw = list_first_entry_or_null(&private->crw,
+ struct vfio_ccw_crw, next);
+
+ if (crw)
+ list_del(&crw->next);
+
+ mutex_lock(&private->io_mutex);
+ region = private->region[i].data;
+
+ if (crw)
+ memcpy(&region->crw, &crw->crw, sizeof(region->crw));
+
+ if (copy_to_user(buf, (void *)region + pos, count))
+ ret = -EFAULT;
+ else
+ ret = count;
+
+ region->crw = 0;
+
+ mutex_unlock(&private->io_mutex);
+
+ kfree(crw);
+
+ /* Notify the guest if more CRWs are on our queue */
+ if (!list_empty(&private->crw) && private->crw_trigger)
+ eventfd_signal(private->crw_trigger, 1);
+
+ return ret;
+}
+
+static ssize_t vfio_ccw_crw_region_write(struct vfio_ccw_private *private,
+ const char __user *buf, size_t count,
+ loff_t *ppos)
+{
+ return -EINVAL;
+}
+
+static void vfio_ccw_crw_region_release(struct vfio_ccw_private *private,
+ struct vfio_ccw_region *region)
+{
+
+}
+
+static const struct vfio_ccw_regops vfio_ccw_crw_region_ops = {
+ .read = vfio_ccw_crw_region_read,
+ .write = vfio_ccw_crw_region_write,
+ .release = vfio_ccw_crw_region_release,
+};
+
+int vfio_ccw_register_crw_dev_regions(struct vfio_ccw_private *private)
+{
+ return vfio_ccw_register_dev_region(private,
+ VFIO_REGION_SUBTYPE_CCW_CRW,
+ &vfio_ccw_crw_region_ops,
+ sizeof(struct ccw_crw_region),
+ VFIO_REGION_INFO_FLAG_READ,
+ private->crw_region);
+}
diff --git a/drivers/s390/cio/vfio_ccw_cp.c b/drivers/s390/cio/vfio_ccw_cp.c
new file mode 100644
index 000000000..8d1b2771c
--- /dev/null
+++ b/drivers/s390/cio/vfio_ccw_cp.c
@@ -0,0 +1,875 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * channel program interfaces
+ *
+ * Copyright IBM Corp. 2017
+ *
+ * Author(s): Dong Jia Shi <bjsdjshi@linux.vnet.ibm.com>
+ * Xiao Feng Ren <renxiaof@linux.vnet.ibm.com>
+ */
+
+#include <linux/ratelimit.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <linux/iommu.h>
+#include <linux/vfio.h>
+#include <asm/idals.h>
+
+#include "vfio_ccw_cp.h"
+
+struct pfn_array {
+ /* Starting guest physical I/O address. */
+ unsigned long pa_iova;
+ /* Array that stores PFNs of the pages need to pin. */
+ unsigned long *pa_iova_pfn;
+ /* Array that receives PFNs of the pages pinned. */
+ unsigned long *pa_pfn;
+ /* Number of pages pinned from @pa_iova. */
+ int pa_nr;
+};
+
+struct ccwchain {
+ struct list_head next;
+ struct ccw1 *ch_ccw;
+ /* Guest physical address of the current chain. */
+ u64 ch_iova;
+ /* Count of the valid ccws in chain. */
+ int ch_len;
+ /* Pinned PAGEs for the original data. */
+ struct pfn_array *ch_pa;
+};
+
+/*
+ * pfn_array_alloc() - alloc memory for PFNs
+ * @pa: pfn_array on which to perform the operation
+ * @iova: target guest physical address
+ * @len: number of bytes that should be pinned from @iova
+ *
+ * Attempt to allocate memory for PFNs.
+ *
+ * Usage of pfn_array:
+ * We expect (pa_nr == 0) and (pa_iova_pfn == NULL), any field in
+ * this structure will be filled in by this function.
+ *
+ * Returns:
+ * 0 if PFNs are allocated
+ * -EINVAL if pa->pa_nr is not initially zero, or pa->pa_iova_pfn is not NULL
+ * -ENOMEM if alloc failed
+ */
+static int pfn_array_alloc(struct pfn_array *pa, u64 iova, unsigned int len)
+{
+ int i;
+
+ if (pa->pa_nr || pa->pa_iova_pfn)
+ return -EINVAL;
+
+ pa->pa_iova = iova;
+
+ pa->pa_nr = ((iova & ~PAGE_MASK) + len + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
+ if (!pa->pa_nr)
+ return -EINVAL;
+
+ pa->pa_iova_pfn = kcalloc(pa->pa_nr,
+ sizeof(*pa->pa_iova_pfn) +
+ sizeof(*pa->pa_pfn),
+ GFP_KERNEL);
+ if (unlikely(!pa->pa_iova_pfn)) {
+ pa->pa_nr = 0;
+ return -ENOMEM;
+ }
+ pa->pa_pfn = pa->pa_iova_pfn + pa->pa_nr;
+
+ pa->pa_iova_pfn[0] = pa->pa_iova >> PAGE_SHIFT;
+ pa->pa_pfn[0] = -1ULL;
+ for (i = 1; i < pa->pa_nr; i++) {
+ pa->pa_iova_pfn[i] = pa->pa_iova_pfn[i - 1] + 1;
+ pa->pa_pfn[i] = -1ULL;
+ }
+
+ return 0;
+}
+
+/*
+ * pfn_array_pin() - Pin user pages in memory
+ * @pa: pfn_array on which to perform the operation
+ * @mdev: the mediated device to perform pin operations
+ *
+ * Returns number of pages pinned upon success.
+ * If the pin request partially succeeds, or fails completely,
+ * all pages are left unpinned and a negative error value is returned.
+ */
+static int pfn_array_pin(struct pfn_array *pa, struct device *mdev)
+{
+ int ret = 0;
+
+ ret = vfio_pin_pages(mdev, pa->pa_iova_pfn, pa->pa_nr,
+ IOMMU_READ | IOMMU_WRITE, pa->pa_pfn);
+
+ if (ret < 0) {
+ goto err_out;
+ } else if (ret > 0 && ret != pa->pa_nr) {
+ vfio_unpin_pages(mdev, pa->pa_iova_pfn, ret);
+ ret = -EINVAL;
+ goto err_out;
+ }
+
+ return ret;
+
+err_out:
+ pa->pa_nr = 0;
+
+ return ret;
+}
+
+/* Unpin the pages before releasing the memory. */
+static void pfn_array_unpin_free(struct pfn_array *pa, struct device *mdev)
+{
+ /* Only unpin if any pages were pinned to begin with */
+ if (pa->pa_nr)
+ vfio_unpin_pages(mdev, pa->pa_iova_pfn, pa->pa_nr);
+ pa->pa_nr = 0;
+ kfree(pa->pa_iova_pfn);
+}
+
+static bool pfn_array_iova_pinned(struct pfn_array *pa, unsigned long iova)
+{
+ unsigned long iova_pfn = iova >> PAGE_SHIFT;
+ int i;
+
+ for (i = 0; i < pa->pa_nr; i++)
+ if (pa->pa_iova_pfn[i] == iova_pfn)
+ return true;
+
+ return false;
+}
+/* Create the list of IDAL words for a pfn_array. */
+static inline void pfn_array_idal_create_words(
+ struct pfn_array *pa,
+ unsigned long *idaws)
+{
+ int i;
+
+ /*
+ * Idal words (execept the first one) rely on the memory being 4k
+ * aligned. If a user virtual address is 4K aligned, then it's
+ * corresponding kernel physical address will also be 4K aligned. Thus
+ * there will be no problem here to simply use the phys to create an
+ * idaw.
+ */
+
+ for (i = 0; i < pa->pa_nr; i++)
+ idaws[i] = pa->pa_pfn[i] << PAGE_SHIFT;
+
+ /* Adjust the first IDAW, since it may not start on a page boundary */
+ idaws[0] += pa->pa_iova & (PAGE_SIZE - 1);
+}
+
+static void convert_ccw0_to_ccw1(struct ccw1 *source, unsigned long len)
+{
+ struct ccw0 ccw0;
+ struct ccw1 *pccw1 = source;
+ int i;
+
+ for (i = 0; i < len; i++) {
+ ccw0 = *(struct ccw0 *)pccw1;
+ if ((pccw1->cmd_code & 0x0f) == CCW_CMD_TIC) {
+ pccw1->cmd_code = CCW_CMD_TIC;
+ pccw1->flags = 0;
+ pccw1->count = 0;
+ } else {
+ pccw1->cmd_code = ccw0.cmd_code;
+ pccw1->flags = ccw0.flags;
+ pccw1->count = ccw0.count;
+ }
+ pccw1->cda = ccw0.cda;
+ pccw1++;
+ }
+}
+
+/*
+ * Within the domain (@mdev), copy @n bytes from a guest physical
+ * address (@iova) to a host physical address (@to).
+ */
+static long copy_from_iova(struct device *mdev,
+ void *to, u64 iova,
+ unsigned long n)
+{
+ struct pfn_array pa = {0};
+ u64 from;
+ int i, ret;
+ unsigned long l, m;
+
+ ret = pfn_array_alloc(&pa, iova, n);
+ if (ret < 0)
+ return ret;
+
+ ret = pfn_array_pin(&pa, mdev);
+ if (ret < 0) {
+ pfn_array_unpin_free(&pa, mdev);
+ return ret;
+ }
+
+ l = n;
+ for (i = 0; i < pa.pa_nr; i++) {
+ from = pa.pa_pfn[i] << PAGE_SHIFT;
+ m = PAGE_SIZE;
+ if (i == 0) {
+ from += iova & (PAGE_SIZE - 1);
+ m -= iova & (PAGE_SIZE - 1);
+ }
+
+ m = min(l, m);
+ memcpy(to + (n - l), (void *)from, m);
+
+ l -= m;
+ if (l == 0)
+ break;
+ }
+
+ pfn_array_unpin_free(&pa, mdev);
+
+ return l;
+}
+
+/*
+ * Helpers to operate ccwchain.
+ */
+#define ccw_is_read(_ccw) (((_ccw)->cmd_code & 0x03) == 0x02)
+#define ccw_is_read_backward(_ccw) (((_ccw)->cmd_code & 0x0F) == 0x0C)
+#define ccw_is_sense(_ccw) (((_ccw)->cmd_code & 0x0F) == CCW_CMD_BASIC_SENSE)
+
+#define ccw_is_noop(_ccw) ((_ccw)->cmd_code == CCW_CMD_NOOP)
+
+#define ccw_is_tic(_ccw) ((_ccw)->cmd_code == CCW_CMD_TIC)
+
+#define ccw_is_idal(_ccw) ((_ccw)->flags & CCW_FLAG_IDA)
+#define ccw_is_skip(_ccw) ((_ccw)->flags & CCW_FLAG_SKIP)
+
+#define ccw_is_chain(_ccw) ((_ccw)->flags & (CCW_FLAG_CC | CCW_FLAG_DC))
+
+/*
+ * ccw_does_data_transfer()
+ *
+ * Determine whether a CCW will move any data, such that the guest pages
+ * would need to be pinned before performing the I/O.
+ *
+ * Returns 1 if yes, 0 if no.
+ */
+static inline int ccw_does_data_transfer(struct ccw1 *ccw)
+{
+ /* If the count field is zero, then no data will be transferred */
+ if (ccw->count == 0)
+ return 0;
+
+ /* If the command is a NOP, then no data will be transferred */
+ if (ccw_is_noop(ccw))
+ return 0;
+
+ /* If the skip flag is off, then data will be transferred */
+ if (!ccw_is_skip(ccw))
+ return 1;
+
+ /*
+ * If the skip flag is on, it is only meaningful if the command
+ * code is a read, read backward, sense, or sense ID. In those
+ * cases, no data will be transferred.
+ */
+ if (ccw_is_read(ccw) || ccw_is_read_backward(ccw))
+ return 0;
+
+ if (ccw_is_sense(ccw))
+ return 0;
+
+ /* The skip flag is on, but it is ignored for this command code. */
+ return 1;
+}
+
+/*
+ * is_cpa_within_range()
+ *
+ * @cpa: channel program address being questioned
+ * @head: address of the beginning of a CCW chain
+ * @len: number of CCWs within the chain
+ *
+ * Determine whether the address of a CCW (whether a new chain,
+ * or the target of a TIC) falls within a range (including the end points).
+ *
+ * Returns 1 if yes, 0 if no.
+ */
+static inline int is_cpa_within_range(u32 cpa, u32 head, int len)
+{
+ u32 tail = head + (len - 1) * sizeof(struct ccw1);
+
+ return (head <= cpa && cpa <= tail);
+}
+
+static inline int is_tic_within_range(struct ccw1 *ccw, u32 head, int len)
+{
+ if (!ccw_is_tic(ccw))
+ return 0;
+
+ return is_cpa_within_range(ccw->cda, head, len);
+}
+
+static struct ccwchain *ccwchain_alloc(struct channel_program *cp, int len)
+{
+ struct ccwchain *chain;
+ void *data;
+ size_t size;
+
+ /* Make ccw address aligned to 8. */
+ size = ((sizeof(*chain) + 7L) & -8L) +
+ sizeof(*chain->ch_ccw) * len +
+ sizeof(*chain->ch_pa) * len;
+ chain = kzalloc(size, GFP_DMA | GFP_KERNEL);
+ if (!chain)
+ return NULL;
+
+ data = (u8 *)chain + ((sizeof(*chain) + 7L) & -8L);
+ chain->ch_ccw = (struct ccw1 *)data;
+
+ data = (u8 *)(chain->ch_ccw) + sizeof(*chain->ch_ccw) * len;
+ chain->ch_pa = (struct pfn_array *)data;
+
+ chain->ch_len = len;
+
+ list_add_tail(&chain->next, &cp->ccwchain_list);
+
+ return chain;
+}
+
+static void ccwchain_free(struct ccwchain *chain)
+{
+ list_del(&chain->next);
+ kfree(chain);
+}
+
+/* Free resource for a ccw that allocated memory for its cda. */
+static void ccwchain_cda_free(struct ccwchain *chain, int idx)
+{
+ struct ccw1 *ccw = chain->ch_ccw + idx;
+
+ if (ccw_is_tic(ccw))
+ return;
+
+ kfree((void *)(u64)ccw->cda);
+}
+
+/**
+ * ccwchain_calc_length - calculate the length of the ccw chain.
+ * @iova: guest physical address of the target ccw chain
+ * @cp: channel_program on which to perform the operation
+ *
+ * This is the chain length not considering any TICs.
+ * You need to do a new round for each TIC target.
+ *
+ * The program is also validated for absence of not yet supported
+ * indirect data addressing scenarios.
+ *
+ * Returns: the length of the ccw chain or -errno.
+ */
+static int ccwchain_calc_length(u64 iova, struct channel_program *cp)
+{
+ struct ccw1 *ccw = cp->guest_cp;
+ int cnt = 0;
+
+ do {
+ cnt++;
+
+ /*
+ * As we don't want to fail direct addressing even if the
+ * orb specified one of the unsupported formats, we defer
+ * checking for IDAWs in unsupported formats to here.
+ */
+ if ((!cp->orb.cmd.c64 || cp->orb.cmd.i2k) && ccw_is_idal(ccw))
+ return -EOPNOTSUPP;
+
+ /*
+ * We want to keep counting if the current CCW has the
+ * command-chaining flag enabled, or if it is a TIC CCW
+ * that loops back into the current chain. The latter
+ * is used for device orientation, where the CCW PRIOR to
+ * the TIC can either jump to the TIC or a CCW immediately
+ * after the TIC, depending on the results of its operation.
+ */
+ if (!ccw_is_chain(ccw) && !is_tic_within_range(ccw, iova, cnt))
+ break;
+
+ ccw++;
+ } while (cnt < CCWCHAIN_LEN_MAX + 1);
+
+ if (cnt == CCWCHAIN_LEN_MAX + 1)
+ cnt = -EINVAL;
+
+ return cnt;
+}
+
+static int tic_target_chain_exists(struct ccw1 *tic, struct channel_program *cp)
+{
+ struct ccwchain *chain;
+ u32 ccw_head;
+
+ list_for_each_entry(chain, &cp->ccwchain_list, next) {
+ ccw_head = chain->ch_iova;
+ if (is_cpa_within_range(tic->cda, ccw_head, chain->ch_len))
+ return 1;
+ }
+
+ return 0;
+}
+
+static int ccwchain_loop_tic(struct ccwchain *chain,
+ struct channel_program *cp);
+
+static int ccwchain_handle_ccw(u32 cda, struct channel_program *cp)
+{
+ struct ccwchain *chain;
+ int len, ret;
+
+ /* Copy 2K (the most we support today) of possible CCWs */
+ len = copy_from_iova(cp->mdev, cp->guest_cp, cda,
+ CCWCHAIN_LEN_MAX * sizeof(struct ccw1));
+ if (len)
+ return len;
+
+ /* Convert any Format-0 CCWs to Format-1 */
+ if (!cp->orb.cmd.fmt)
+ convert_ccw0_to_ccw1(cp->guest_cp, CCWCHAIN_LEN_MAX);
+
+ /* Count the CCWs in the current chain */
+ len = ccwchain_calc_length(cda, cp);
+ if (len < 0)
+ return len;
+
+ /* Need alloc a new chain for this one. */
+ chain = ccwchain_alloc(cp, len);
+ if (!chain)
+ return -ENOMEM;
+ chain->ch_iova = cda;
+
+ /* Copy the actual CCWs into the new chain */
+ memcpy(chain->ch_ccw, cp->guest_cp, len * sizeof(struct ccw1));
+
+ /* Loop for tics on this new chain. */
+ ret = ccwchain_loop_tic(chain, cp);
+
+ if (ret)
+ ccwchain_free(chain);
+
+ return ret;
+}
+
+/* Loop for TICs. */
+static int ccwchain_loop_tic(struct ccwchain *chain, struct channel_program *cp)
+{
+ struct ccw1 *tic;
+ int i, ret;
+
+ for (i = 0; i < chain->ch_len; i++) {
+ tic = chain->ch_ccw + i;
+
+ if (!ccw_is_tic(tic))
+ continue;
+
+ /* May transfer to an existing chain. */
+ if (tic_target_chain_exists(tic, cp))
+ continue;
+
+ /* Build a ccwchain for the next segment */
+ ret = ccwchain_handle_ccw(tic->cda, cp);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int ccwchain_fetch_tic(struct ccwchain *chain,
+ int idx,
+ struct channel_program *cp)
+{
+ struct ccw1 *ccw = chain->ch_ccw + idx;
+ struct ccwchain *iter;
+ u32 ccw_head;
+
+ list_for_each_entry(iter, &cp->ccwchain_list, next) {
+ ccw_head = iter->ch_iova;
+ if (is_cpa_within_range(ccw->cda, ccw_head, iter->ch_len)) {
+ ccw->cda = (__u32) (addr_t) (((char *)iter->ch_ccw) +
+ (ccw->cda - ccw_head));
+ return 0;
+ }
+ }
+
+ return -EFAULT;
+}
+
+static int ccwchain_fetch_direct(struct ccwchain *chain,
+ int idx,
+ struct channel_program *cp)
+{
+ struct ccw1 *ccw;
+ struct pfn_array *pa;
+ u64 iova;
+ unsigned long *idaws;
+ int ret;
+ int bytes = 1;
+ int idaw_nr, idal_len;
+ int i;
+
+ ccw = chain->ch_ccw + idx;
+
+ if (ccw->count)
+ bytes = ccw->count;
+
+ /* Calculate size of IDAL */
+ if (ccw_is_idal(ccw)) {
+ /* Read first IDAW to see if it's 4K-aligned or not. */
+ /* All subsequent IDAws will be 4K-aligned. */
+ ret = copy_from_iova(cp->mdev, &iova, ccw->cda, sizeof(iova));
+ if (ret)
+ return ret;
+ } else {
+ iova = ccw->cda;
+ }
+ idaw_nr = idal_nr_words((void *)iova, bytes);
+ idal_len = idaw_nr * sizeof(*idaws);
+
+ /* Allocate an IDAL from host storage */
+ idaws = kcalloc(idaw_nr, sizeof(*idaws), GFP_DMA | GFP_KERNEL);
+ if (!idaws) {
+ ret = -ENOMEM;
+ goto out_init;
+ }
+
+ /*
+ * Allocate an array of pfn's for pages to pin/translate.
+ * The number of pages is actually the count of the idaws
+ * required for the data transfer, since we only only support
+ * 4K IDAWs today.
+ */
+ pa = chain->ch_pa + idx;
+ ret = pfn_array_alloc(pa, iova, bytes);
+ if (ret < 0)
+ goto out_free_idaws;
+
+ if (ccw_is_idal(ccw)) {
+ /* Copy guest IDAL into host IDAL */
+ ret = copy_from_iova(cp->mdev, idaws, ccw->cda, idal_len);
+ if (ret)
+ goto out_unpin;
+
+ /*
+ * Copy guest IDAWs into pfn_array, in case the memory they
+ * occupy is not contiguous.
+ */
+ for (i = 0; i < idaw_nr; i++)
+ pa->pa_iova_pfn[i] = idaws[i] >> PAGE_SHIFT;
+ } else {
+ /*
+ * No action is required here; the iova addresses in pfn_array
+ * were initialized sequentially in pfn_array_alloc() beginning
+ * with the contents of ccw->cda.
+ */
+ }
+
+ if (ccw_does_data_transfer(ccw)) {
+ ret = pfn_array_pin(pa, cp->mdev);
+ if (ret < 0)
+ goto out_unpin;
+ } else {
+ pa->pa_nr = 0;
+ }
+
+ ccw->cda = (__u32) virt_to_phys(idaws);
+ ccw->flags |= CCW_FLAG_IDA;
+
+ /* Populate the IDAL with pinned/translated addresses from pfn */
+ pfn_array_idal_create_words(pa, idaws);
+
+ return 0;
+
+out_unpin:
+ pfn_array_unpin_free(pa, cp->mdev);
+out_free_idaws:
+ kfree(idaws);
+out_init:
+ ccw->cda = 0;
+ return ret;
+}
+
+/*
+ * Fetch one ccw.
+ * To reduce memory copy, we'll pin the cda page in memory,
+ * and to get rid of the cda 2G limitiaion of ccw1, we'll translate
+ * direct ccws to idal ccws.
+ */
+static int ccwchain_fetch_one(struct ccwchain *chain,
+ int idx,
+ struct channel_program *cp)
+{
+ struct ccw1 *ccw = chain->ch_ccw + idx;
+
+ if (ccw_is_tic(ccw))
+ return ccwchain_fetch_tic(chain, idx, cp);
+
+ return ccwchain_fetch_direct(chain, idx, cp);
+}
+
+/**
+ * cp_init() - allocate ccwchains for a channel program.
+ * @cp: channel_program on which to perform the operation
+ * @mdev: the mediated device to perform pin/unpin operations
+ * @orb: control block for the channel program from the guest
+ *
+ * This creates one or more ccwchain(s), and copies the raw data of
+ * the target channel program from @orb->cmd.iova to the new ccwchain(s).
+ *
+ * Limitations:
+ * 1. Supports idal(c64) ccw chaining.
+ * 2. Supports 4k idaw.
+ *
+ * Returns:
+ * %0 on success and a negative error value on failure.
+ */
+int cp_init(struct channel_program *cp, struct device *mdev, union orb *orb)
+{
+ /* custom ratelimit used to avoid flood during guest IPL */
+ static DEFINE_RATELIMIT_STATE(ratelimit_state, 5 * HZ, 1);
+ int ret;
+
+ /* this is an error in the caller */
+ if (cp->initialized)
+ return -EBUSY;
+
+ /*
+ * We only support prefetching the channel program. We assume all channel
+ * programs executed by supported guests likewise support prefetching.
+ * Executing a channel program that does not specify prefetching will
+ * typically not cause an error, but a warning is issued to help identify
+ * the problem if something does break.
+ */
+ if (!orb->cmd.pfch && __ratelimit(&ratelimit_state))
+ dev_warn(mdev, "Prefetching channel program even though prefetch not specified in ORB");
+
+ INIT_LIST_HEAD(&cp->ccwchain_list);
+ memcpy(&cp->orb, orb, sizeof(*orb));
+ cp->mdev = mdev;
+
+ /* Build a ccwchain for the first CCW segment */
+ ret = ccwchain_handle_ccw(orb->cmd.cpa, cp);
+
+ if (!ret) {
+ cp->initialized = true;
+
+ /* It is safe to force: if it was not set but idals used
+ * ccwchain_calc_length would have returned an error.
+ */
+ cp->orb.cmd.c64 = 1;
+ }
+
+ return ret;
+}
+
+
+/**
+ * cp_free() - free resources for channel program.
+ * @cp: channel_program on which to perform the operation
+ *
+ * This unpins the memory pages and frees the memory space occupied by
+ * @cp, which must have been returned by a previous call to cp_init().
+ * Otherwise, undefined behavior occurs.
+ */
+void cp_free(struct channel_program *cp)
+{
+ struct ccwchain *chain, *temp;
+ int i;
+
+ if (!cp->initialized)
+ return;
+
+ cp->initialized = false;
+ list_for_each_entry_safe(chain, temp, &cp->ccwchain_list, next) {
+ for (i = 0; i < chain->ch_len; i++) {
+ pfn_array_unpin_free(chain->ch_pa + i, cp->mdev);
+ ccwchain_cda_free(chain, i);
+ }
+ ccwchain_free(chain);
+ }
+}
+
+/**
+ * cp_prefetch() - translate a guest physical address channel program to
+ * a real-device runnable channel program.
+ * @cp: channel_program on which to perform the operation
+ *
+ * This function translates the guest-physical-address channel program
+ * and stores the result to ccwchain list. @cp must have been
+ * initialized by a previous call with cp_init(). Otherwise, undefined
+ * behavior occurs.
+ * For each chain composing the channel program:
+ * - On entry ch_len holds the count of CCWs to be translated.
+ * - On exit ch_len is adjusted to the count of successfully translated CCWs.
+ * This allows cp_free to find in ch_len the count of CCWs to free in a chain.
+ *
+ * The S/390 CCW Translation APIS (prefixed by 'cp_') are introduced
+ * as helpers to do ccw chain translation inside the kernel. Basically
+ * they accept a channel program issued by a virtual machine, and
+ * translate the channel program to a real-device runnable channel
+ * program.
+ *
+ * These APIs will copy the ccws into kernel-space buffers, and update
+ * the guest phsical addresses with their corresponding host physical
+ * addresses. Then channel I/O device drivers could issue the
+ * translated channel program to real devices to perform an I/O
+ * operation.
+ *
+ * These interfaces are designed to support translation only for
+ * channel programs, which are generated and formatted by a
+ * guest. Thus this will make it possible for things like VFIO to
+ * leverage the interfaces to passthrough a channel I/O mediated
+ * device in QEMU.
+ *
+ * We support direct ccw chaining by translating them to idal ccws.
+ *
+ * Returns:
+ * %0 on success and a negative error value on failure.
+ */
+int cp_prefetch(struct channel_program *cp)
+{
+ struct ccwchain *chain;
+ int len, idx, ret;
+
+ /* this is an error in the caller */
+ if (!cp->initialized)
+ return -EINVAL;
+
+ list_for_each_entry(chain, &cp->ccwchain_list, next) {
+ len = chain->ch_len;
+ for (idx = 0; idx < len; idx++) {
+ ret = ccwchain_fetch_one(chain, idx, cp);
+ if (ret)
+ goto out_err;
+ }
+ }
+
+ return 0;
+out_err:
+ /* Only cleanup the chain elements that were actually translated. */
+ chain->ch_len = idx;
+ list_for_each_entry_continue(chain, &cp->ccwchain_list, next) {
+ chain->ch_len = 0;
+ }
+ return ret;
+}
+
+/**
+ * cp_get_orb() - get the orb of the channel program
+ * @cp: channel_program on which to perform the operation
+ * @intparm: new intparm for the returned orb
+ * @lpm: candidate value of the logical-path mask for the returned orb
+ *
+ * This function returns the address of the updated orb of the channel
+ * program. Channel I/O device drivers could use this orb to issue a
+ * ssch.
+ */
+union orb *cp_get_orb(struct channel_program *cp, u32 intparm, u8 lpm)
+{
+ union orb *orb;
+ struct ccwchain *chain;
+ struct ccw1 *cpa;
+
+ /* this is an error in the caller */
+ if (!cp->initialized)
+ return NULL;
+
+ orb = &cp->orb;
+
+ orb->cmd.intparm = intparm;
+ orb->cmd.fmt = 1;
+ orb->cmd.key = PAGE_DEFAULT_KEY >> 4;
+
+ if (orb->cmd.lpm == 0)
+ orb->cmd.lpm = lpm;
+
+ chain = list_first_entry(&cp->ccwchain_list, struct ccwchain, next);
+ cpa = chain->ch_ccw;
+ orb->cmd.cpa = (__u32) __pa(cpa);
+
+ return orb;
+}
+
+/**
+ * cp_update_scsw() - update scsw for a channel program.
+ * @cp: channel_program on which to perform the operation
+ * @scsw: I/O results of the channel program and also the target to be
+ * updated
+ *
+ * @scsw contains the I/O results of the channel program that pointed
+ * to by @cp. However what @scsw->cpa stores is a host physical
+ * address, which is meaningless for the guest, which is waiting for
+ * the I/O results.
+ *
+ * This function updates @scsw->cpa to its coressponding guest physical
+ * address.
+ */
+void cp_update_scsw(struct channel_program *cp, union scsw *scsw)
+{
+ struct ccwchain *chain;
+ u32 cpa = scsw->cmd.cpa;
+ u32 ccw_head;
+
+ if (!cp->initialized)
+ return;
+
+ /*
+ * LATER:
+ * For now, only update the cmd.cpa part. We may need to deal with
+ * other portions of the schib as well, even if we don't return them
+ * in the ioctl directly. Path status changes etc.
+ */
+ list_for_each_entry(chain, &cp->ccwchain_list, next) {
+ ccw_head = (u32)(u64)chain->ch_ccw;
+ /*
+ * On successful execution, cpa points just beyond the end
+ * of the chain.
+ */
+ if (is_cpa_within_range(cpa, ccw_head, chain->ch_len + 1)) {
+ /*
+ * (cpa - ccw_head) is the offset value of the host
+ * physical ccw to its chain head.
+ * Adding this value to the guest physical ccw chain
+ * head gets us the guest cpa.
+ */
+ cpa = chain->ch_iova + (cpa - ccw_head);
+ break;
+ }
+ }
+
+ scsw->cmd.cpa = cpa;
+}
+
+/**
+ * cp_iova_pinned() - check if an iova is pinned for a ccw chain.
+ * @cp: channel_program on which to perform the operation
+ * @iova: the iova to check
+ *
+ * If the @iova is currently pinned for the ccw chain, return true;
+ * else return false.
+ */
+bool cp_iova_pinned(struct channel_program *cp, u64 iova)
+{
+ struct ccwchain *chain;
+ int i;
+
+ if (!cp->initialized)
+ return false;
+
+ list_for_each_entry(chain, &cp->ccwchain_list, next) {
+ for (i = 0; i < chain->ch_len; i++)
+ if (pfn_array_iova_pinned(chain->ch_pa + i, iova))
+ return true;
+ }
+
+ return false;
+}
diff --git a/drivers/s390/cio/vfio_ccw_cp.h b/drivers/s390/cio/vfio_ccw_cp.h
new file mode 100644
index 000000000..ba31240ce
--- /dev/null
+++ b/drivers/s390/cio/vfio_ccw_cp.h
@@ -0,0 +1,53 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * channel program interfaces
+ *
+ * Copyright IBM Corp. 2017
+ *
+ * Author(s): Dong Jia Shi <bjsdjshi@linux.vnet.ibm.com>
+ * Xiao Feng Ren <renxiaof@linux.vnet.ibm.com>
+ */
+
+#ifndef _VFIO_CCW_CP_H_
+#define _VFIO_CCW_CP_H_
+
+#include <asm/cio.h>
+#include <asm/scsw.h>
+
+#include "orb.h"
+#include "vfio_ccw_trace.h"
+
+/*
+ * Max length for ccw chain.
+ * XXX: Limit to 256, need to check more?
+ */
+#define CCWCHAIN_LEN_MAX 256
+
+/**
+ * struct channel_program - manage information for channel program
+ * @ccwchain_list: list head of ccwchains
+ * @orb: orb for the currently processed ssch request
+ * @mdev: the mediated device to perform page pinning/unpinning
+ * @initialized: whether this instance is actually initialized
+ *
+ * @ccwchain_list is the head of a ccwchain list, that contents the
+ * translated result of the guest channel program that pointed out by
+ * the iova parameter when calling cp_init.
+ */
+struct channel_program {
+ struct list_head ccwchain_list;
+ union orb orb;
+ struct device *mdev;
+ bool initialized;
+ struct ccw1 *guest_cp;
+};
+
+extern int cp_init(struct channel_program *cp, struct device *mdev,
+ union orb *orb);
+extern void cp_free(struct channel_program *cp);
+extern int cp_prefetch(struct channel_program *cp);
+extern union orb *cp_get_orb(struct channel_program *cp, u32 intparm, u8 lpm);
+extern void cp_update_scsw(struct channel_program *cp, union scsw *scsw);
+extern bool cp_iova_pinned(struct channel_program *cp, u64 iova);
+
+#endif
diff --git a/drivers/s390/cio/vfio_ccw_drv.c b/drivers/s390/cio/vfio_ccw_drv.c
new file mode 100644
index 000000000..e3c1060b6
--- /dev/null
+++ b/drivers/s390/cio/vfio_ccw_drv.c
@@ -0,0 +1,513 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * VFIO based Physical Subchannel device driver
+ *
+ * Copyright IBM Corp. 2017
+ * Copyright Red Hat, Inc. 2019
+ *
+ * Author(s): Dong Jia Shi <bjsdjshi@linux.vnet.ibm.com>
+ * Xiao Feng Ren <renxiaof@linux.vnet.ibm.com>
+ * Cornelia Huck <cohuck@redhat.com>
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/device.h>
+#include <linux/slab.h>
+#include <linux/uuid.h>
+#include <linux/mdev.h>
+
+#include <asm/isc.h>
+
+#include "chp.h"
+#include "ioasm.h"
+#include "css.h"
+#include "vfio_ccw_private.h"
+
+struct workqueue_struct *vfio_ccw_work_q;
+static struct kmem_cache *vfio_ccw_io_region;
+static struct kmem_cache *vfio_ccw_cmd_region;
+static struct kmem_cache *vfio_ccw_schib_region;
+static struct kmem_cache *vfio_ccw_crw_region;
+
+debug_info_t *vfio_ccw_debug_msg_id;
+debug_info_t *vfio_ccw_debug_trace_id;
+
+/*
+ * Helpers
+ */
+int vfio_ccw_sch_quiesce(struct subchannel *sch)
+{
+ struct vfio_ccw_private *private = dev_get_drvdata(&sch->dev);
+ DECLARE_COMPLETION_ONSTACK(completion);
+ int iretry, ret = 0;
+
+ spin_lock_irq(sch->lock);
+ if (!sch->schib.pmcw.ena)
+ goto out_unlock;
+ ret = cio_disable_subchannel(sch);
+ if (ret != -EBUSY)
+ goto out_unlock;
+
+ iretry = 255;
+ do {
+
+ ret = cio_cancel_halt_clear(sch, &iretry);
+
+ if (ret == -EIO) {
+ pr_err("vfio_ccw: could not quiesce subchannel 0.%x.%04x!\n",
+ sch->schid.ssid, sch->schid.sch_no);
+ break;
+ }
+
+ /*
+ * Flush all I/O and wait for
+ * cancel/halt/clear completion.
+ */
+ private->completion = &completion;
+ spin_unlock_irq(sch->lock);
+
+ if (ret == -EBUSY)
+ wait_for_completion_timeout(&completion, 3*HZ);
+
+ private->completion = NULL;
+ flush_workqueue(vfio_ccw_work_q);
+ spin_lock_irq(sch->lock);
+ ret = cio_disable_subchannel(sch);
+ } while (ret == -EBUSY);
+out_unlock:
+ private->state = VFIO_CCW_STATE_NOT_OPER;
+ spin_unlock_irq(sch->lock);
+ return ret;
+}
+
+static void vfio_ccw_sch_io_todo(struct work_struct *work)
+{
+ struct vfio_ccw_private *private;
+ struct irb *irb;
+ bool is_final;
+ bool cp_is_finished = false;
+
+ private = container_of(work, struct vfio_ccw_private, io_work);
+ irb = &private->irb;
+
+ is_final = !(scsw_actl(&irb->scsw) &
+ (SCSW_ACTL_DEVACT | SCSW_ACTL_SCHACT));
+ if (scsw_is_solicited(&irb->scsw)) {
+ cp_update_scsw(&private->cp, &irb->scsw);
+ if (is_final && private->state == VFIO_CCW_STATE_CP_PENDING) {
+ cp_free(&private->cp);
+ cp_is_finished = true;
+ }
+ }
+ mutex_lock(&private->io_mutex);
+ memcpy(private->io_region->irb_area, irb, sizeof(*irb));
+ mutex_unlock(&private->io_mutex);
+
+ /*
+ * Reset to IDLE only if processing of a channel program
+ * has finished. Do not overwrite a possible processing
+ * state if the final interrupt was for HSCH or CSCH.
+ */
+ if (private->mdev && cp_is_finished)
+ private->state = VFIO_CCW_STATE_IDLE;
+
+ if (private->io_trigger)
+ eventfd_signal(private->io_trigger, 1);
+}
+
+static void vfio_ccw_crw_todo(struct work_struct *work)
+{
+ struct vfio_ccw_private *private;
+
+ private = container_of(work, struct vfio_ccw_private, crw_work);
+
+ if (!list_empty(&private->crw) && private->crw_trigger)
+ eventfd_signal(private->crw_trigger, 1);
+}
+
+/*
+ * Css driver callbacks
+ */
+static void vfio_ccw_sch_irq(struct subchannel *sch)
+{
+ struct vfio_ccw_private *private = dev_get_drvdata(&sch->dev);
+
+ inc_irq_stat(IRQIO_CIO);
+ vfio_ccw_fsm_event(private, VFIO_CCW_EVENT_INTERRUPT);
+}
+
+static void vfio_ccw_free_regions(struct vfio_ccw_private *private)
+{
+ if (private->crw_region)
+ kmem_cache_free(vfio_ccw_crw_region, private->crw_region);
+ if (private->schib_region)
+ kmem_cache_free(vfio_ccw_schib_region, private->schib_region);
+ if (private->cmd_region)
+ kmem_cache_free(vfio_ccw_cmd_region, private->cmd_region);
+ if (private->io_region)
+ kmem_cache_free(vfio_ccw_io_region, private->io_region);
+}
+
+static int vfio_ccw_sch_probe(struct subchannel *sch)
+{
+ struct pmcw *pmcw = &sch->schib.pmcw;
+ struct vfio_ccw_private *private;
+ int ret = -ENOMEM;
+
+ if (pmcw->qf) {
+ dev_warn(&sch->dev, "vfio: ccw: does not support QDIO: %s\n",
+ dev_name(&sch->dev));
+ return -ENODEV;
+ }
+
+ private = kzalloc(sizeof(*private), GFP_KERNEL | GFP_DMA);
+ if (!private)
+ return -ENOMEM;
+
+ private->cp.guest_cp = kcalloc(CCWCHAIN_LEN_MAX, sizeof(struct ccw1),
+ GFP_KERNEL);
+ if (!private->cp.guest_cp)
+ goto out_free;
+
+ private->io_region = kmem_cache_zalloc(vfio_ccw_io_region,
+ GFP_KERNEL | GFP_DMA);
+ if (!private->io_region)
+ goto out_free;
+
+ private->cmd_region = kmem_cache_zalloc(vfio_ccw_cmd_region,
+ GFP_KERNEL | GFP_DMA);
+ if (!private->cmd_region)
+ goto out_free;
+
+ private->schib_region = kmem_cache_zalloc(vfio_ccw_schib_region,
+ GFP_KERNEL | GFP_DMA);
+
+ if (!private->schib_region)
+ goto out_free;
+
+ private->crw_region = kmem_cache_zalloc(vfio_ccw_crw_region,
+ GFP_KERNEL | GFP_DMA);
+
+ if (!private->crw_region)
+ goto out_free;
+
+ private->sch = sch;
+ dev_set_drvdata(&sch->dev, private);
+ mutex_init(&private->io_mutex);
+
+ spin_lock_irq(sch->lock);
+ private->state = VFIO_CCW_STATE_NOT_OPER;
+ sch->isc = VFIO_CCW_ISC;
+ ret = cio_enable_subchannel(sch, (u32)(unsigned long)sch);
+ spin_unlock_irq(sch->lock);
+ if (ret)
+ goto out_free;
+
+ INIT_LIST_HEAD(&private->crw);
+ INIT_WORK(&private->io_work, vfio_ccw_sch_io_todo);
+ INIT_WORK(&private->crw_work, vfio_ccw_crw_todo);
+ atomic_set(&private->avail, 1);
+ private->state = VFIO_CCW_STATE_STANDBY;
+
+ ret = vfio_ccw_mdev_reg(sch);
+ if (ret)
+ goto out_disable;
+
+ if (dev_get_uevent_suppress(&sch->dev)) {
+ dev_set_uevent_suppress(&sch->dev, 0);
+ kobject_uevent(&sch->dev.kobj, KOBJ_ADD);
+ }
+
+ VFIO_CCW_MSG_EVENT(4, "bound to subchannel %x.%x.%04x\n",
+ sch->schid.cssid, sch->schid.ssid,
+ sch->schid.sch_no);
+ return 0;
+
+out_disable:
+ cio_disable_subchannel(sch);
+out_free:
+ dev_set_drvdata(&sch->dev, NULL);
+ vfio_ccw_free_regions(private);
+ kfree(private->cp.guest_cp);
+ kfree(private);
+ return ret;
+}
+
+static int vfio_ccw_sch_remove(struct subchannel *sch)
+{
+ struct vfio_ccw_private *private = dev_get_drvdata(&sch->dev);
+ struct vfio_ccw_crw *crw, *temp;
+
+ vfio_ccw_sch_quiesce(sch);
+
+ list_for_each_entry_safe(crw, temp, &private->crw, next) {
+ list_del(&crw->next);
+ kfree(crw);
+ }
+
+ vfio_ccw_mdev_unreg(sch);
+
+ dev_set_drvdata(&sch->dev, NULL);
+
+ vfio_ccw_free_regions(private);
+ kfree(private->cp.guest_cp);
+ kfree(private);
+
+ VFIO_CCW_MSG_EVENT(4, "unbound from subchannel %x.%x.%04x\n",
+ sch->schid.cssid, sch->schid.ssid,
+ sch->schid.sch_no);
+ return 0;
+}
+
+static void vfio_ccw_sch_shutdown(struct subchannel *sch)
+{
+ vfio_ccw_sch_quiesce(sch);
+}
+
+/**
+ * vfio_ccw_sch_event - process subchannel event
+ * @sch: subchannel
+ * @process: non-zero if function is called in process context
+ *
+ * An unspecified event occurred for this subchannel. Adjust data according
+ * to the current operational state of the subchannel. Return zero when the
+ * event has been handled sufficiently or -EAGAIN when this function should
+ * be called again in process context.
+ */
+static int vfio_ccw_sch_event(struct subchannel *sch, int process)
+{
+ struct vfio_ccw_private *private = dev_get_drvdata(&sch->dev);
+ unsigned long flags;
+ int rc = -EAGAIN;
+
+ spin_lock_irqsave(sch->lock, flags);
+ if (!device_is_registered(&sch->dev))
+ goto out_unlock;
+
+ if (work_pending(&sch->todo_work))
+ goto out_unlock;
+
+ rc = 0;
+
+ if (cio_update_schib(sch))
+ vfio_ccw_fsm_event(private, VFIO_CCW_EVENT_NOT_OPER);
+
+out_unlock:
+ spin_unlock_irqrestore(sch->lock, flags);
+
+ return rc;
+}
+
+static void vfio_ccw_queue_crw(struct vfio_ccw_private *private,
+ unsigned int rsc,
+ unsigned int erc,
+ unsigned int rsid)
+{
+ struct vfio_ccw_crw *crw;
+
+ /*
+ * If unable to allocate a CRW, just drop the event and
+ * carry on. The guest will either see a later one or
+ * learn when it issues its own store subchannel.
+ */
+ crw = kzalloc(sizeof(*crw), GFP_ATOMIC);
+ if (!crw)
+ return;
+
+ /*
+ * Build the CRW based on the inputs given to us.
+ */
+ crw->crw.rsc = rsc;
+ crw->crw.erc = erc;
+ crw->crw.rsid = rsid;
+
+ list_add_tail(&crw->next, &private->crw);
+ queue_work(vfio_ccw_work_q, &private->crw_work);
+}
+
+static int vfio_ccw_chp_event(struct subchannel *sch,
+ struct chp_link *link, int event)
+{
+ struct vfio_ccw_private *private = dev_get_drvdata(&sch->dev);
+ int mask = chp_ssd_get_mask(&sch->ssd_info, link);
+ int retry = 255;
+
+ if (!private || !mask)
+ return 0;
+
+ trace_vfio_ccw_chp_event(private->sch->schid, mask, event);
+ VFIO_CCW_MSG_EVENT(2, "%pUl (%x.%x.%04x): mask=0x%x event=%d\n",
+ mdev_uuid(private->mdev), sch->schid.cssid,
+ sch->schid.ssid, sch->schid.sch_no,
+ mask, event);
+
+ if (cio_update_schib(sch))
+ return -ENODEV;
+
+ switch (event) {
+ case CHP_VARY_OFF:
+ /* Path logically turned off */
+ sch->opm &= ~mask;
+ sch->lpm &= ~mask;
+ if (sch->schib.pmcw.lpum & mask)
+ cio_cancel_halt_clear(sch, &retry);
+ break;
+ case CHP_OFFLINE:
+ /* Path is gone */
+ if (sch->schib.pmcw.lpum & mask)
+ cio_cancel_halt_clear(sch, &retry);
+ vfio_ccw_queue_crw(private, CRW_RSC_CPATH, CRW_ERC_PERRN,
+ link->chpid.id);
+ break;
+ case CHP_VARY_ON:
+ /* Path logically turned on */
+ sch->opm |= mask;
+ sch->lpm |= mask;
+ break;
+ case CHP_ONLINE:
+ /* Path became available */
+ sch->lpm |= mask & sch->opm;
+ vfio_ccw_queue_crw(private, CRW_RSC_CPATH, CRW_ERC_INIT,
+ link->chpid.id);
+ break;
+ }
+
+ return 0;
+}
+
+static struct css_device_id vfio_ccw_sch_ids[] = {
+ { .match_flags = 0x1, .type = SUBCHANNEL_TYPE_IO, },
+ { /* end of list */ },
+};
+MODULE_DEVICE_TABLE(css, vfio_ccw_sch_ids);
+
+static struct css_driver vfio_ccw_sch_driver = {
+ .drv = {
+ .name = "vfio_ccw",
+ .owner = THIS_MODULE,
+ },
+ .subchannel_type = vfio_ccw_sch_ids,
+ .irq = vfio_ccw_sch_irq,
+ .probe = vfio_ccw_sch_probe,
+ .remove = vfio_ccw_sch_remove,
+ .shutdown = vfio_ccw_sch_shutdown,
+ .sch_event = vfio_ccw_sch_event,
+ .chp_event = vfio_ccw_chp_event,
+};
+
+static int __init vfio_ccw_debug_init(void)
+{
+ vfio_ccw_debug_msg_id = debug_register("vfio_ccw_msg", 16, 1,
+ 11 * sizeof(long));
+ if (!vfio_ccw_debug_msg_id)
+ goto out_unregister;
+ debug_register_view(vfio_ccw_debug_msg_id, &debug_sprintf_view);
+ debug_set_level(vfio_ccw_debug_msg_id, 2);
+ vfio_ccw_debug_trace_id = debug_register("vfio_ccw_trace", 16, 1, 16);
+ if (!vfio_ccw_debug_trace_id)
+ goto out_unregister;
+ debug_register_view(vfio_ccw_debug_trace_id, &debug_hex_ascii_view);
+ debug_set_level(vfio_ccw_debug_trace_id, 2);
+ return 0;
+
+out_unregister:
+ debug_unregister(vfio_ccw_debug_msg_id);
+ debug_unregister(vfio_ccw_debug_trace_id);
+ return -1;
+}
+
+static void vfio_ccw_debug_exit(void)
+{
+ debug_unregister(vfio_ccw_debug_msg_id);
+ debug_unregister(vfio_ccw_debug_trace_id);
+}
+
+static void vfio_ccw_destroy_regions(void)
+{
+ kmem_cache_destroy(vfio_ccw_crw_region);
+ kmem_cache_destroy(vfio_ccw_schib_region);
+ kmem_cache_destroy(vfio_ccw_cmd_region);
+ kmem_cache_destroy(vfio_ccw_io_region);
+}
+
+static int __init vfio_ccw_sch_init(void)
+{
+ int ret;
+
+ ret = vfio_ccw_debug_init();
+ if (ret)
+ return ret;
+
+ vfio_ccw_work_q = create_singlethread_workqueue("vfio-ccw");
+ if (!vfio_ccw_work_q) {
+ ret = -ENOMEM;
+ goto out_err;
+ }
+
+ vfio_ccw_io_region = kmem_cache_create_usercopy("vfio_ccw_io_region",
+ sizeof(struct ccw_io_region), 0,
+ SLAB_ACCOUNT, 0,
+ sizeof(struct ccw_io_region), NULL);
+ if (!vfio_ccw_io_region) {
+ ret = -ENOMEM;
+ goto out_err;
+ }
+
+ vfio_ccw_cmd_region = kmem_cache_create_usercopy("vfio_ccw_cmd_region",
+ sizeof(struct ccw_cmd_region), 0,
+ SLAB_ACCOUNT, 0,
+ sizeof(struct ccw_cmd_region), NULL);
+ if (!vfio_ccw_cmd_region) {
+ ret = -ENOMEM;
+ goto out_err;
+ }
+
+ vfio_ccw_schib_region = kmem_cache_create_usercopy("vfio_ccw_schib_region",
+ sizeof(struct ccw_schib_region), 0,
+ SLAB_ACCOUNT, 0,
+ sizeof(struct ccw_schib_region), NULL);
+
+ if (!vfio_ccw_schib_region) {
+ ret = -ENOMEM;
+ goto out_err;
+ }
+
+ vfio_ccw_crw_region = kmem_cache_create_usercopy("vfio_ccw_crw_region",
+ sizeof(struct ccw_crw_region), 0,
+ SLAB_ACCOUNT, 0,
+ sizeof(struct ccw_crw_region), NULL);
+
+ if (!vfio_ccw_crw_region) {
+ ret = -ENOMEM;
+ goto out_err;
+ }
+
+ isc_register(VFIO_CCW_ISC);
+ ret = css_driver_register(&vfio_ccw_sch_driver);
+ if (ret) {
+ isc_unregister(VFIO_CCW_ISC);
+ goto out_err;
+ }
+
+ return ret;
+
+out_err:
+ vfio_ccw_destroy_regions();
+ destroy_workqueue(vfio_ccw_work_q);
+ vfio_ccw_debug_exit();
+ return ret;
+}
+
+static void __exit vfio_ccw_sch_exit(void)
+{
+ css_driver_unregister(&vfio_ccw_sch_driver);
+ isc_unregister(VFIO_CCW_ISC);
+ vfio_ccw_destroy_regions();
+ destroy_workqueue(vfio_ccw_work_q);
+ vfio_ccw_debug_exit();
+}
+module_init(vfio_ccw_sch_init);
+module_exit(vfio_ccw_sch_exit);
+
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/s390/cio/vfio_ccw_fsm.c b/drivers/s390/cio/vfio_ccw_fsm.c
new file mode 100644
index 000000000..e435a9cd9
--- /dev/null
+++ b/drivers/s390/cio/vfio_ccw_fsm.c
@@ -0,0 +1,404 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Finite state machine for vfio-ccw device handling
+ *
+ * Copyright IBM Corp. 2017
+ * Copyright Red Hat, Inc. 2019
+ *
+ * Author(s): Dong Jia Shi <bjsdjshi@linux.vnet.ibm.com>
+ * Cornelia Huck <cohuck@redhat.com>
+ */
+
+#include <linux/vfio.h>
+#include <linux/mdev.h>
+
+#include "ioasm.h"
+#include "vfio_ccw_private.h"
+
+static int fsm_io_helper(struct vfio_ccw_private *private)
+{
+ struct subchannel *sch;
+ union orb *orb;
+ int ccode;
+ __u8 lpm;
+ unsigned long flags;
+ int ret;
+
+ sch = private->sch;
+
+ spin_lock_irqsave(sch->lock, flags);
+
+ orb = cp_get_orb(&private->cp, (u32)(addr_t)sch, sch->lpm);
+ if (!orb) {
+ ret = -EIO;
+ goto out;
+ }
+
+ VFIO_CCW_TRACE_EVENT(5, "stIO");
+ VFIO_CCW_TRACE_EVENT(5, dev_name(&sch->dev));
+
+ /* Issue "Start Subchannel" */
+ ccode = ssch(sch->schid, orb);
+
+ VFIO_CCW_HEX_EVENT(5, &ccode, sizeof(ccode));
+
+ switch (ccode) {
+ case 0:
+ /*
+ * Initialize device status information
+ */
+ sch->schib.scsw.cmd.actl |= SCSW_ACTL_START_PEND;
+ ret = 0;
+ private->state = VFIO_CCW_STATE_CP_PENDING;
+ break;
+ case 1: /* Status pending */
+ case 2: /* Busy */
+ ret = -EBUSY;
+ break;
+ case 3: /* Device/path not operational */
+ {
+ lpm = orb->cmd.lpm;
+ if (lpm != 0)
+ sch->lpm &= ~lpm;
+ else
+ sch->lpm = 0;
+
+ if (cio_update_schib(sch))
+ ret = -ENODEV;
+ else
+ ret = sch->lpm ? -EACCES : -ENODEV;
+ break;
+ }
+ default:
+ ret = ccode;
+ }
+out:
+ spin_unlock_irqrestore(sch->lock, flags);
+ return ret;
+}
+
+static int fsm_do_halt(struct vfio_ccw_private *private)
+{
+ struct subchannel *sch;
+ unsigned long flags;
+ int ccode;
+ int ret;
+
+ sch = private->sch;
+
+ spin_lock_irqsave(sch->lock, flags);
+
+ VFIO_CCW_TRACE_EVENT(2, "haltIO");
+ VFIO_CCW_TRACE_EVENT(2, dev_name(&sch->dev));
+
+ /* Issue "Halt Subchannel" */
+ ccode = hsch(sch->schid);
+
+ VFIO_CCW_HEX_EVENT(2, &ccode, sizeof(ccode));
+
+ switch (ccode) {
+ case 0:
+ /*
+ * Initialize device status information
+ */
+ sch->schib.scsw.cmd.actl |= SCSW_ACTL_HALT_PEND;
+ ret = 0;
+ break;
+ case 1: /* Status pending */
+ case 2: /* Busy */
+ ret = -EBUSY;
+ break;
+ case 3: /* Device not operational */
+ ret = -ENODEV;
+ break;
+ default:
+ ret = ccode;
+ }
+ spin_unlock_irqrestore(sch->lock, flags);
+ return ret;
+}
+
+static int fsm_do_clear(struct vfio_ccw_private *private)
+{
+ struct subchannel *sch;
+ unsigned long flags;
+ int ccode;
+ int ret;
+
+ sch = private->sch;
+
+ spin_lock_irqsave(sch->lock, flags);
+
+ VFIO_CCW_TRACE_EVENT(2, "clearIO");
+ VFIO_CCW_TRACE_EVENT(2, dev_name(&sch->dev));
+
+ /* Issue "Clear Subchannel" */
+ ccode = csch(sch->schid);
+
+ VFIO_CCW_HEX_EVENT(2, &ccode, sizeof(ccode));
+
+ switch (ccode) {
+ case 0:
+ /*
+ * Initialize device status information
+ */
+ sch->schib.scsw.cmd.actl = SCSW_ACTL_CLEAR_PEND;
+ /* TODO: check what else we might need to clear */
+ ret = 0;
+ break;
+ case 3: /* Device not operational */
+ ret = -ENODEV;
+ break;
+ default:
+ ret = ccode;
+ }
+ spin_unlock_irqrestore(sch->lock, flags);
+ return ret;
+}
+
+static void fsm_notoper(struct vfio_ccw_private *private,
+ enum vfio_ccw_event event)
+{
+ struct subchannel *sch = private->sch;
+
+ VFIO_CCW_TRACE_EVENT(2, "notoper");
+ VFIO_CCW_TRACE_EVENT(2, dev_name(&sch->dev));
+
+ /*
+ * TODO:
+ * Probably we should send the machine check to the guest.
+ */
+ css_sched_sch_todo(sch, SCH_TODO_UNREG);
+ private->state = VFIO_CCW_STATE_NOT_OPER;
+}
+
+/*
+ * No operation action.
+ */
+static void fsm_nop(struct vfio_ccw_private *private,
+ enum vfio_ccw_event event)
+{
+}
+
+static void fsm_io_error(struct vfio_ccw_private *private,
+ enum vfio_ccw_event event)
+{
+ pr_err("vfio-ccw: FSM: I/O request from state:%d\n", private->state);
+ private->io_region->ret_code = -EIO;
+}
+
+static void fsm_io_busy(struct vfio_ccw_private *private,
+ enum vfio_ccw_event event)
+{
+ private->io_region->ret_code = -EBUSY;
+}
+
+static void fsm_io_retry(struct vfio_ccw_private *private,
+ enum vfio_ccw_event event)
+{
+ private->io_region->ret_code = -EAGAIN;
+}
+
+static void fsm_async_error(struct vfio_ccw_private *private,
+ enum vfio_ccw_event event)
+{
+ struct ccw_cmd_region *cmd_region = private->cmd_region;
+
+ pr_err("vfio-ccw: FSM: %s request from state:%d\n",
+ cmd_region->command == VFIO_CCW_ASYNC_CMD_HSCH ? "halt" :
+ cmd_region->command == VFIO_CCW_ASYNC_CMD_CSCH ? "clear" :
+ "<unknown>", private->state);
+ cmd_region->ret_code = -EIO;
+}
+
+static void fsm_async_retry(struct vfio_ccw_private *private,
+ enum vfio_ccw_event event)
+{
+ private->cmd_region->ret_code = -EAGAIN;
+}
+
+static void fsm_disabled_irq(struct vfio_ccw_private *private,
+ enum vfio_ccw_event event)
+{
+ struct subchannel *sch = private->sch;
+
+ /*
+ * An interrupt in a disabled state means a previous disable was not
+ * successful - should not happen, but we try to disable again.
+ */
+ cio_disable_subchannel(sch);
+}
+inline struct subchannel_id get_schid(struct vfio_ccw_private *p)
+{
+ return p->sch->schid;
+}
+
+/*
+ * Deal with the ccw command request from the userspace.
+ */
+static void fsm_io_request(struct vfio_ccw_private *private,
+ enum vfio_ccw_event event)
+{
+ union orb *orb;
+ union scsw *scsw = &private->scsw;
+ struct ccw_io_region *io_region = private->io_region;
+ struct mdev_device *mdev = private->mdev;
+ char *errstr = "request";
+ struct subchannel_id schid = get_schid(private);
+
+ private->state = VFIO_CCW_STATE_CP_PROCESSING;
+ memcpy(scsw, io_region->scsw_area, sizeof(*scsw));
+
+ if (scsw->cmd.fctl & SCSW_FCTL_START_FUNC) {
+ orb = (union orb *)io_region->orb_area;
+
+ /* Don't try to build a cp if transport mode is specified. */
+ if (orb->tm.b) {
+ io_region->ret_code = -EOPNOTSUPP;
+ VFIO_CCW_MSG_EVENT(2,
+ "%pUl (%x.%x.%04x): transport mode\n",
+ mdev_uuid(mdev), schid.cssid,
+ schid.ssid, schid.sch_no);
+ errstr = "transport mode";
+ goto err_out;
+ }
+ io_region->ret_code = cp_init(&private->cp, mdev_dev(mdev),
+ orb);
+ if (io_region->ret_code) {
+ VFIO_CCW_MSG_EVENT(2,
+ "%pUl (%x.%x.%04x): cp_init=%d\n",
+ mdev_uuid(mdev), schid.cssid,
+ schid.ssid, schid.sch_no,
+ io_region->ret_code);
+ errstr = "cp init";
+ goto err_out;
+ }
+
+ io_region->ret_code = cp_prefetch(&private->cp);
+ if (io_region->ret_code) {
+ VFIO_CCW_MSG_EVENT(2,
+ "%pUl (%x.%x.%04x): cp_prefetch=%d\n",
+ mdev_uuid(mdev), schid.cssid,
+ schid.ssid, schid.sch_no,
+ io_region->ret_code);
+ errstr = "cp prefetch";
+ cp_free(&private->cp);
+ goto err_out;
+ }
+
+ /* Start channel program and wait for I/O interrupt. */
+ io_region->ret_code = fsm_io_helper(private);
+ if (io_region->ret_code) {
+ VFIO_CCW_MSG_EVENT(2,
+ "%pUl (%x.%x.%04x): fsm_io_helper=%d\n",
+ mdev_uuid(mdev), schid.cssid,
+ schid.ssid, schid.sch_no,
+ io_region->ret_code);
+ errstr = "cp fsm_io_helper";
+ cp_free(&private->cp);
+ goto err_out;
+ }
+ return;
+ } else if (scsw->cmd.fctl & SCSW_FCTL_HALT_FUNC) {
+ VFIO_CCW_MSG_EVENT(2,
+ "%pUl (%x.%x.%04x): halt on io_region\n",
+ mdev_uuid(mdev), schid.cssid,
+ schid.ssid, schid.sch_no);
+ /* halt is handled via the async cmd region */
+ io_region->ret_code = -EOPNOTSUPP;
+ goto err_out;
+ } else if (scsw->cmd.fctl & SCSW_FCTL_CLEAR_FUNC) {
+ VFIO_CCW_MSG_EVENT(2,
+ "%pUl (%x.%x.%04x): clear on io_region\n",
+ mdev_uuid(mdev), schid.cssid,
+ schid.ssid, schid.sch_no);
+ /* clear is handled via the async cmd region */
+ io_region->ret_code = -EOPNOTSUPP;
+ goto err_out;
+ }
+
+err_out:
+ private->state = VFIO_CCW_STATE_IDLE;
+ trace_vfio_ccw_fsm_io_request(scsw->cmd.fctl, schid,
+ io_region->ret_code, errstr);
+}
+
+/*
+ * Deal with an async request from userspace.
+ */
+static void fsm_async_request(struct vfio_ccw_private *private,
+ enum vfio_ccw_event event)
+{
+ struct ccw_cmd_region *cmd_region = private->cmd_region;
+
+ switch (cmd_region->command) {
+ case VFIO_CCW_ASYNC_CMD_HSCH:
+ cmd_region->ret_code = fsm_do_halt(private);
+ break;
+ case VFIO_CCW_ASYNC_CMD_CSCH:
+ cmd_region->ret_code = fsm_do_clear(private);
+ break;
+ default:
+ /* should not happen? */
+ cmd_region->ret_code = -EINVAL;
+ }
+
+ trace_vfio_ccw_fsm_async_request(get_schid(private),
+ cmd_region->command,
+ cmd_region->ret_code);
+}
+
+/*
+ * Got an interrupt for a normal io (state busy).
+ */
+static void fsm_irq(struct vfio_ccw_private *private,
+ enum vfio_ccw_event event)
+{
+ struct irb *irb = this_cpu_ptr(&cio_irb);
+
+ VFIO_CCW_TRACE_EVENT(6, "IRQ");
+ VFIO_CCW_TRACE_EVENT(6, dev_name(&private->sch->dev));
+
+ memcpy(&private->irb, irb, sizeof(*irb));
+
+ queue_work(vfio_ccw_work_q, &private->io_work);
+
+ if (private->completion)
+ complete(private->completion);
+}
+
+/*
+ * Device statemachine
+ */
+fsm_func_t *vfio_ccw_jumptable[NR_VFIO_CCW_STATES][NR_VFIO_CCW_EVENTS] = {
+ [VFIO_CCW_STATE_NOT_OPER] = {
+ [VFIO_CCW_EVENT_NOT_OPER] = fsm_nop,
+ [VFIO_CCW_EVENT_IO_REQ] = fsm_io_error,
+ [VFIO_CCW_EVENT_ASYNC_REQ] = fsm_async_error,
+ [VFIO_CCW_EVENT_INTERRUPT] = fsm_disabled_irq,
+ },
+ [VFIO_CCW_STATE_STANDBY] = {
+ [VFIO_CCW_EVENT_NOT_OPER] = fsm_notoper,
+ [VFIO_CCW_EVENT_IO_REQ] = fsm_io_error,
+ [VFIO_CCW_EVENT_ASYNC_REQ] = fsm_async_error,
+ [VFIO_CCW_EVENT_INTERRUPT] = fsm_irq,
+ },
+ [VFIO_CCW_STATE_IDLE] = {
+ [VFIO_CCW_EVENT_NOT_OPER] = fsm_notoper,
+ [VFIO_CCW_EVENT_IO_REQ] = fsm_io_request,
+ [VFIO_CCW_EVENT_ASYNC_REQ] = fsm_async_request,
+ [VFIO_CCW_EVENT_INTERRUPT] = fsm_irq,
+ },
+ [VFIO_CCW_STATE_CP_PROCESSING] = {
+ [VFIO_CCW_EVENT_NOT_OPER] = fsm_notoper,
+ [VFIO_CCW_EVENT_IO_REQ] = fsm_io_retry,
+ [VFIO_CCW_EVENT_ASYNC_REQ] = fsm_async_retry,
+ [VFIO_CCW_EVENT_INTERRUPT] = fsm_irq,
+ },
+ [VFIO_CCW_STATE_CP_PENDING] = {
+ [VFIO_CCW_EVENT_NOT_OPER] = fsm_notoper,
+ [VFIO_CCW_EVENT_IO_REQ] = fsm_io_busy,
+ [VFIO_CCW_EVENT_ASYNC_REQ] = fsm_async_request,
+ [VFIO_CCW_EVENT_INTERRUPT] = fsm_irq,
+ },
+};
diff --git a/drivers/s390/cio/vfio_ccw_ops.c b/drivers/s390/cio/vfio_ccw_ops.c
new file mode 100644
index 000000000..2280f51dd
--- /dev/null
+++ b/drivers/s390/cio/vfio_ccw_ops.c
@@ -0,0 +1,628 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Physical device callbacks for vfio_ccw
+ *
+ * Copyright IBM Corp. 2017
+ * Copyright Red Hat, Inc. 2019
+ *
+ * Author(s): Dong Jia Shi <bjsdjshi@linux.vnet.ibm.com>
+ * Xiao Feng Ren <renxiaof@linux.vnet.ibm.com>
+ * Cornelia Huck <cohuck@redhat.com>
+ */
+
+#include <linux/vfio.h>
+#include <linux/mdev.h>
+#include <linux/nospec.h>
+#include <linux/slab.h>
+
+#include "vfio_ccw_private.h"
+
+static int vfio_ccw_mdev_reset(struct mdev_device *mdev)
+{
+ struct vfio_ccw_private *private;
+ struct subchannel *sch;
+ int ret;
+
+ private = dev_get_drvdata(mdev_parent_dev(mdev));
+ sch = private->sch;
+ /*
+ * TODO:
+ * In the cureent stage, some things like "no I/O running" and "no
+ * interrupt pending" are clear, but we are not sure what other state
+ * we need to care about.
+ * There are still a lot more instructions need to be handled. We
+ * should come back here later.
+ */
+ ret = vfio_ccw_sch_quiesce(sch);
+ if (ret)
+ return ret;
+
+ ret = cio_enable_subchannel(sch, (u32)(unsigned long)sch);
+ if (!ret)
+ private->state = VFIO_CCW_STATE_IDLE;
+
+ return ret;
+}
+
+static int vfio_ccw_mdev_notifier(struct notifier_block *nb,
+ unsigned long action,
+ void *data)
+{
+ struct vfio_ccw_private *private =
+ container_of(nb, struct vfio_ccw_private, nb);
+
+ /*
+ * Vendor drivers MUST unpin pages in response to an
+ * invalidation.
+ */
+ if (action == VFIO_IOMMU_NOTIFY_DMA_UNMAP) {
+ struct vfio_iommu_type1_dma_unmap *unmap = data;
+
+ if (!cp_iova_pinned(&private->cp, unmap->iova))
+ return NOTIFY_OK;
+
+ if (vfio_ccw_mdev_reset(private->mdev))
+ return NOTIFY_BAD;
+
+ cp_free(&private->cp);
+ return NOTIFY_OK;
+ }
+
+ return NOTIFY_DONE;
+}
+
+static ssize_t name_show(struct kobject *kobj, struct device *dev, char *buf)
+{
+ return sprintf(buf, "I/O subchannel (Non-QDIO)\n");
+}
+static MDEV_TYPE_ATTR_RO(name);
+
+static ssize_t device_api_show(struct kobject *kobj, struct device *dev,
+ char *buf)
+{
+ return sprintf(buf, "%s\n", VFIO_DEVICE_API_CCW_STRING);
+}
+static MDEV_TYPE_ATTR_RO(device_api);
+
+static ssize_t available_instances_show(struct kobject *kobj,
+ struct device *dev, char *buf)
+{
+ struct vfio_ccw_private *private = dev_get_drvdata(dev);
+
+ return sprintf(buf, "%d\n", atomic_read(&private->avail));
+}
+static MDEV_TYPE_ATTR_RO(available_instances);
+
+static struct attribute *mdev_types_attrs[] = {
+ &mdev_type_attr_name.attr,
+ &mdev_type_attr_device_api.attr,
+ &mdev_type_attr_available_instances.attr,
+ NULL,
+};
+
+static struct attribute_group mdev_type_group = {
+ .name = "io",
+ .attrs = mdev_types_attrs,
+};
+
+static struct attribute_group *mdev_type_groups[] = {
+ &mdev_type_group,
+ NULL,
+};
+
+static int vfio_ccw_mdev_create(struct kobject *kobj, struct mdev_device *mdev)
+{
+ struct vfio_ccw_private *private =
+ dev_get_drvdata(mdev_parent_dev(mdev));
+
+ if (private->state == VFIO_CCW_STATE_NOT_OPER)
+ return -ENODEV;
+
+ if (atomic_dec_if_positive(&private->avail) < 0)
+ return -EPERM;
+
+ private->mdev = mdev;
+ private->state = VFIO_CCW_STATE_IDLE;
+
+ VFIO_CCW_MSG_EVENT(2, "mdev %pUl, sch %x.%x.%04x: create\n",
+ mdev_uuid(mdev), private->sch->schid.cssid,
+ private->sch->schid.ssid,
+ private->sch->schid.sch_no);
+
+ return 0;
+}
+
+static int vfio_ccw_mdev_remove(struct mdev_device *mdev)
+{
+ struct vfio_ccw_private *private =
+ dev_get_drvdata(mdev_parent_dev(mdev));
+
+ VFIO_CCW_MSG_EVENT(2, "mdev %pUl, sch %x.%x.%04x: remove\n",
+ mdev_uuid(mdev), private->sch->schid.cssid,
+ private->sch->schid.ssid,
+ private->sch->schid.sch_no);
+
+ if ((private->state != VFIO_CCW_STATE_NOT_OPER) &&
+ (private->state != VFIO_CCW_STATE_STANDBY)) {
+ if (!vfio_ccw_sch_quiesce(private->sch))
+ private->state = VFIO_CCW_STATE_STANDBY;
+ /* The state will be NOT_OPER on error. */
+ }
+
+ cp_free(&private->cp);
+ private->mdev = NULL;
+ atomic_inc(&private->avail);
+
+ return 0;
+}
+
+static int vfio_ccw_mdev_open(struct mdev_device *mdev)
+{
+ struct vfio_ccw_private *private =
+ dev_get_drvdata(mdev_parent_dev(mdev));
+ unsigned long events = VFIO_IOMMU_NOTIFY_DMA_UNMAP;
+ int ret;
+
+ private->nb.notifier_call = vfio_ccw_mdev_notifier;
+
+ ret = vfio_register_notifier(mdev_dev(mdev), VFIO_IOMMU_NOTIFY,
+ &events, &private->nb);
+ if (ret)
+ return ret;
+
+ ret = vfio_ccw_register_async_dev_regions(private);
+ if (ret)
+ goto out_unregister;
+
+ ret = vfio_ccw_register_schib_dev_regions(private);
+ if (ret)
+ goto out_unregister;
+
+ ret = vfio_ccw_register_crw_dev_regions(private);
+ if (ret)
+ goto out_unregister;
+
+ return ret;
+
+out_unregister:
+ vfio_ccw_unregister_dev_regions(private);
+ vfio_unregister_notifier(mdev_dev(mdev), VFIO_IOMMU_NOTIFY,
+ &private->nb);
+ return ret;
+}
+
+static void vfio_ccw_mdev_release(struct mdev_device *mdev)
+{
+ struct vfio_ccw_private *private =
+ dev_get_drvdata(mdev_parent_dev(mdev));
+
+ if ((private->state != VFIO_CCW_STATE_NOT_OPER) &&
+ (private->state != VFIO_CCW_STATE_STANDBY)) {
+ if (!vfio_ccw_mdev_reset(mdev))
+ private->state = VFIO_CCW_STATE_STANDBY;
+ /* The state will be NOT_OPER on error. */
+ }
+
+ cp_free(&private->cp);
+ vfio_ccw_unregister_dev_regions(private);
+ vfio_unregister_notifier(mdev_dev(mdev), VFIO_IOMMU_NOTIFY,
+ &private->nb);
+}
+
+static ssize_t vfio_ccw_mdev_read_io_region(struct vfio_ccw_private *private,
+ char __user *buf, size_t count,
+ loff_t *ppos)
+{
+ loff_t pos = *ppos & VFIO_CCW_OFFSET_MASK;
+ struct ccw_io_region *region;
+ int ret;
+
+ if (pos + count > sizeof(*region))
+ return -EINVAL;
+
+ mutex_lock(&private->io_mutex);
+ region = private->io_region;
+ if (copy_to_user(buf, (void *)region + pos, count))
+ ret = -EFAULT;
+ else
+ ret = count;
+ mutex_unlock(&private->io_mutex);
+ return ret;
+}
+
+static ssize_t vfio_ccw_mdev_read(struct mdev_device *mdev,
+ char __user *buf,
+ size_t count,
+ loff_t *ppos)
+{
+ unsigned int index = VFIO_CCW_OFFSET_TO_INDEX(*ppos);
+ struct vfio_ccw_private *private;
+
+ private = dev_get_drvdata(mdev_parent_dev(mdev));
+
+ if (index >= VFIO_CCW_NUM_REGIONS + private->num_regions)
+ return -EINVAL;
+
+ switch (index) {
+ case VFIO_CCW_CONFIG_REGION_INDEX:
+ return vfio_ccw_mdev_read_io_region(private, buf, count, ppos);
+ default:
+ index -= VFIO_CCW_NUM_REGIONS;
+ return private->region[index].ops->read(private, buf, count,
+ ppos);
+ }
+
+ return -EINVAL;
+}
+
+static ssize_t vfio_ccw_mdev_write_io_region(struct vfio_ccw_private *private,
+ const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ loff_t pos = *ppos & VFIO_CCW_OFFSET_MASK;
+ struct ccw_io_region *region;
+ int ret;
+
+ if (pos + count > sizeof(*region))
+ return -EINVAL;
+
+ if (!mutex_trylock(&private->io_mutex))
+ return -EAGAIN;
+
+ region = private->io_region;
+ if (copy_from_user((void *)region + pos, buf, count)) {
+ ret = -EFAULT;
+ goto out_unlock;
+ }
+
+ vfio_ccw_fsm_event(private, VFIO_CCW_EVENT_IO_REQ);
+ ret = (region->ret_code != 0) ? region->ret_code : count;
+
+out_unlock:
+ mutex_unlock(&private->io_mutex);
+ return ret;
+}
+
+static ssize_t vfio_ccw_mdev_write(struct mdev_device *mdev,
+ const char __user *buf,
+ size_t count,
+ loff_t *ppos)
+{
+ unsigned int index = VFIO_CCW_OFFSET_TO_INDEX(*ppos);
+ struct vfio_ccw_private *private;
+
+ private = dev_get_drvdata(mdev_parent_dev(mdev));
+
+ if (index >= VFIO_CCW_NUM_REGIONS + private->num_regions)
+ return -EINVAL;
+
+ switch (index) {
+ case VFIO_CCW_CONFIG_REGION_INDEX:
+ return vfio_ccw_mdev_write_io_region(private, buf, count, ppos);
+ default:
+ index -= VFIO_CCW_NUM_REGIONS;
+ return private->region[index].ops->write(private, buf, count,
+ ppos);
+ }
+
+ return -EINVAL;
+}
+
+static int vfio_ccw_mdev_get_device_info(struct vfio_device_info *info,
+ struct mdev_device *mdev)
+{
+ struct vfio_ccw_private *private;
+
+ private = dev_get_drvdata(mdev_parent_dev(mdev));
+ info->flags = VFIO_DEVICE_FLAGS_CCW | VFIO_DEVICE_FLAGS_RESET;
+ info->num_regions = VFIO_CCW_NUM_REGIONS + private->num_regions;
+ info->num_irqs = VFIO_CCW_NUM_IRQS;
+
+ return 0;
+}
+
+static int vfio_ccw_mdev_get_region_info(struct vfio_region_info *info,
+ struct mdev_device *mdev,
+ unsigned long arg)
+{
+ struct vfio_ccw_private *private;
+ int i;
+
+ private = dev_get_drvdata(mdev_parent_dev(mdev));
+ switch (info->index) {
+ case VFIO_CCW_CONFIG_REGION_INDEX:
+ info->offset = 0;
+ info->size = sizeof(struct ccw_io_region);
+ info->flags = VFIO_REGION_INFO_FLAG_READ
+ | VFIO_REGION_INFO_FLAG_WRITE;
+ return 0;
+ default: /* all other regions are handled via capability chain */
+ {
+ struct vfio_info_cap caps = { .buf = NULL, .size = 0 };
+ struct vfio_region_info_cap_type cap_type = {
+ .header.id = VFIO_REGION_INFO_CAP_TYPE,
+ .header.version = 1 };
+ int ret;
+
+ if (info->index >=
+ VFIO_CCW_NUM_REGIONS + private->num_regions)
+ return -EINVAL;
+
+ info->index = array_index_nospec(info->index,
+ VFIO_CCW_NUM_REGIONS +
+ private->num_regions);
+
+ i = info->index - VFIO_CCW_NUM_REGIONS;
+
+ info->offset = VFIO_CCW_INDEX_TO_OFFSET(info->index);
+ info->size = private->region[i].size;
+ info->flags = private->region[i].flags;
+
+ cap_type.type = private->region[i].type;
+ cap_type.subtype = private->region[i].subtype;
+
+ ret = vfio_info_add_capability(&caps, &cap_type.header,
+ sizeof(cap_type));
+ if (ret)
+ return ret;
+
+ info->flags |= VFIO_REGION_INFO_FLAG_CAPS;
+ if (info->argsz < sizeof(*info) + caps.size) {
+ info->argsz = sizeof(*info) + caps.size;
+ info->cap_offset = 0;
+ } else {
+ vfio_info_cap_shift(&caps, sizeof(*info));
+ if (copy_to_user((void __user *)arg + sizeof(*info),
+ caps.buf, caps.size)) {
+ kfree(caps.buf);
+ return -EFAULT;
+ }
+ info->cap_offset = sizeof(*info);
+ }
+
+ kfree(caps.buf);
+
+ }
+ }
+ return 0;
+}
+
+static int vfio_ccw_mdev_get_irq_info(struct vfio_irq_info *info)
+{
+ switch (info->index) {
+ case VFIO_CCW_IO_IRQ_INDEX:
+ case VFIO_CCW_CRW_IRQ_INDEX:
+ info->count = 1;
+ info->flags = VFIO_IRQ_INFO_EVENTFD;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int vfio_ccw_mdev_set_irqs(struct mdev_device *mdev,
+ uint32_t flags,
+ uint32_t index,
+ void __user *data)
+{
+ struct vfio_ccw_private *private;
+ struct eventfd_ctx **ctx;
+
+ if (!(flags & VFIO_IRQ_SET_ACTION_TRIGGER))
+ return -EINVAL;
+
+ private = dev_get_drvdata(mdev_parent_dev(mdev));
+
+ switch (index) {
+ case VFIO_CCW_IO_IRQ_INDEX:
+ ctx = &private->io_trigger;
+ break;
+ case VFIO_CCW_CRW_IRQ_INDEX:
+ ctx = &private->crw_trigger;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ switch (flags & VFIO_IRQ_SET_DATA_TYPE_MASK) {
+ case VFIO_IRQ_SET_DATA_NONE:
+ {
+ if (*ctx)
+ eventfd_signal(*ctx, 1);
+ return 0;
+ }
+ case VFIO_IRQ_SET_DATA_BOOL:
+ {
+ uint8_t trigger;
+
+ if (get_user(trigger, (uint8_t __user *)data))
+ return -EFAULT;
+
+ if (trigger && *ctx)
+ eventfd_signal(*ctx, 1);
+ return 0;
+ }
+ case VFIO_IRQ_SET_DATA_EVENTFD:
+ {
+ int32_t fd;
+
+ if (get_user(fd, (int32_t __user *)data))
+ return -EFAULT;
+
+ if (fd == -1) {
+ if (*ctx)
+ eventfd_ctx_put(*ctx);
+ *ctx = NULL;
+ } else if (fd >= 0) {
+ struct eventfd_ctx *efdctx;
+
+ efdctx = eventfd_ctx_fdget(fd);
+ if (IS_ERR(efdctx))
+ return PTR_ERR(efdctx);
+
+ if (*ctx)
+ eventfd_ctx_put(*ctx);
+
+ *ctx = efdctx;
+ } else
+ return -EINVAL;
+
+ return 0;
+ }
+ default:
+ return -EINVAL;
+ }
+}
+
+int vfio_ccw_register_dev_region(struct vfio_ccw_private *private,
+ unsigned int subtype,
+ const struct vfio_ccw_regops *ops,
+ size_t size, u32 flags, void *data)
+{
+ struct vfio_ccw_region *region;
+
+ region = krealloc(private->region,
+ (private->num_regions + 1) * sizeof(*region),
+ GFP_KERNEL);
+ if (!region)
+ return -ENOMEM;
+
+ private->region = region;
+ private->region[private->num_regions].type = VFIO_REGION_TYPE_CCW;
+ private->region[private->num_regions].subtype = subtype;
+ private->region[private->num_regions].ops = ops;
+ private->region[private->num_regions].size = size;
+ private->region[private->num_regions].flags = flags;
+ private->region[private->num_regions].data = data;
+
+ private->num_regions++;
+
+ return 0;
+}
+
+void vfio_ccw_unregister_dev_regions(struct vfio_ccw_private *private)
+{
+ int i;
+
+ for (i = 0; i < private->num_regions; i++)
+ private->region[i].ops->release(private, &private->region[i]);
+ private->num_regions = 0;
+ kfree(private->region);
+ private->region = NULL;
+}
+
+static ssize_t vfio_ccw_mdev_ioctl(struct mdev_device *mdev,
+ unsigned int cmd,
+ unsigned long arg)
+{
+ int ret = 0;
+ unsigned long minsz;
+
+ switch (cmd) {
+ case VFIO_DEVICE_GET_INFO:
+ {
+ struct vfio_device_info info;
+
+ minsz = offsetofend(struct vfio_device_info, num_irqs);
+
+ if (copy_from_user(&info, (void __user *)arg, minsz))
+ return -EFAULT;
+
+ if (info.argsz < minsz)
+ return -EINVAL;
+
+ ret = vfio_ccw_mdev_get_device_info(&info, mdev);
+ if (ret)
+ return ret;
+
+ return copy_to_user((void __user *)arg, &info, minsz) ? -EFAULT : 0;
+ }
+ case VFIO_DEVICE_GET_REGION_INFO:
+ {
+ struct vfio_region_info info;
+
+ minsz = offsetofend(struct vfio_region_info, offset);
+
+ if (copy_from_user(&info, (void __user *)arg, minsz))
+ return -EFAULT;
+
+ if (info.argsz < minsz)
+ return -EINVAL;
+
+ ret = vfio_ccw_mdev_get_region_info(&info, mdev, arg);
+ if (ret)
+ return ret;
+
+ return copy_to_user((void __user *)arg, &info, minsz) ? -EFAULT : 0;
+ }
+ case VFIO_DEVICE_GET_IRQ_INFO:
+ {
+ struct vfio_irq_info info;
+
+ minsz = offsetofend(struct vfio_irq_info, count);
+
+ if (copy_from_user(&info, (void __user *)arg, minsz))
+ return -EFAULT;
+
+ if (info.argsz < minsz || info.index >= VFIO_CCW_NUM_IRQS)
+ return -EINVAL;
+
+ ret = vfio_ccw_mdev_get_irq_info(&info);
+ if (ret)
+ return ret;
+
+ if (info.count == -1)
+ return -EINVAL;
+
+ return copy_to_user((void __user *)arg, &info, minsz) ? -EFAULT : 0;
+ }
+ case VFIO_DEVICE_SET_IRQS:
+ {
+ struct vfio_irq_set hdr;
+ size_t data_size;
+ void __user *data;
+
+ minsz = offsetofend(struct vfio_irq_set, count);
+
+ if (copy_from_user(&hdr, (void __user *)arg, minsz))
+ return -EFAULT;
+
+ ret = vfio_set_irqs_validate_and_prepare(&hdr, 1,
+ VFIO_CCW_NUM_IRQS,
+ &data_size);
+ if (ret)
+ return ret;
+
+ data = (void __user *)(arg + minsz);
+ return vfio_ccw_mdev_set_irqs(mdev, hdr.flags, hdr.index, data);
+ }
+ case VFIO_DEVICE_RESET:
+ return vfio_ccw_mdev_reset(mdev);
+ default:
+ return -ENOTTY;
+ }
+}
+
+static const struct mdev_parent_ops vfio_ccw_mdev_ops = {
+ .owner = THIS_MODULE,
+ .supported_type_groups = mdev_type_groups,
+ .create = vfio_ccw_mdev_create,
+ .remove = vfio_ccw_mdev_remove,
+ .open = vfio_ccw_mdev_open,
+ .release = vfio_ccw_mdev_release,
+ .read = vfio_ccw_mdev_read,
+ .write = vfio_ccw_mdev_write,
+ .ioctl = vfio_ccw_mdev_ioctl,
+};
+
+int vfio_ccw_mdev_reg(struct subchannel *sch)
+{
+ return mdev_register_device(&sch->dev, &vfio_ccw_mdev_ops);
+}
+
+void vfio_ccw_mdev_unreg(struct subchannel *sch)
+{
+ mdev_unregister_device(&sch->dev);
+}
diff --git a/drivers/s390/cio/vfio_ccw_private.h b/drivers/s390/cio/vfio_ccw_private.h
new file mode 100644
index 000000000..8723156b2
--- /dev/null
+++ b/drivers/s390/cio/vfio_ccw_private.h
@@ -0,0 +1,176 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Private stuff for vfio_ccw driver
+ *
+ * Copyright IBM Corp. 2017
+ * Copyright Red Hat, Inc. 2019
+ *
+ * Author(s): Dong Jia Shi <bjsdjshi@linux.vnet.ibm.com>
+ * Xiao Feng Ren <renxiaof@linux.vnet.ibm.com>
+ * Cornelia Huck <cohuck@redhat.com>
+ */
+
+#ifndef _VFIO_CCW_PRIVATE_H_
+#define _VFIO_CCW_PRIVATE_H_
+
+#include <linux/completion.h>
+#include <linux/eventfd.h>
+#include <linux/workqueue.h>
+#include <linux/vfio_ccw.h>
+#include <asm/crw.h>
+#include <asm/debug.h>
+
+#include "css.h"
+#include "vfio_ccw_cp.h"
+
+#define VFIO_CCW_OFFSET_SHIFT 10
+#define VFIO_CCW_OFFSET_TO_INDEX(off) (off >> VFIO_CCW_OFFSET_SHIFT)
+#define VFIO_CCW_INDEX_TO_OFFSET(index) ((u64)(index) << VFIO_CCW_OFFSET_SHIFT)
+#define VFIO_CCW_OFFSET_MASK (((u64)(1) << VFIO_CCW_OFFSET_SHIFT) - 1)
+
+/* capability chain handling similar to vfio-pci */
+struct vfio_ccw_private;
+struct vfio_ccw_region;
+
+struct vfio_ccw_regops {
+ ssize_t (*read)(struct vfio_ccw_private *private, char __user *buf,
+ size_t count, loff_t *ppos);
+ ssize_t (*write)(struct vfio_ccw_private *private,
+ const char __user *buf, size_t count, loff_t *ppos);
+ void (*release)(struct vfio_ccw_private *private,
+ struct vfio_ccw_region *region);
+};
+
+struct vfio_ccw_region {
+ u32 type;
+ u32 subtype;
+ const struct vfio_ccw_regops *ops;
+ void *data;
+ size_t size;
+ u32 flags;
+};
+
+int vfio_ccw_register_dev_region(struct vfio_ccw_private *private,
+ unsigned int subtype,
+ const struct vfio_ccw_regops *ops,
+ size_t size, u32 flags, void *data);
+void vfio_ccw_unregister_dev_regions(struct vfio_ccw_private *private);
+
+int vfio_ccw_register_async_dev_regions(struct vfio_ccw_private *private);
+int vfio_ccw_register_schib_dev_regions(struct vfio_ccw_private *private);
+int vfio_ccw_register_crw_dev_regions(struct vfio_ccw_private *private);
+
+struct vfio_ccw_crw {
+ struct list_head next;
+ struct crw crw;
+};
+
+/**
+ * struct vfio_ccw_private
+ * @sch: pointer to the subchannel
+ * @state: internal state of the device
+ * @completion: synchronization helper of the I/O completion
+ * @avail: available for creating a mediated device
+ * @mdev: pointer to the mediated device
+ * @nb: notifier for vfio events
+ * @io_region: MMIO region to input/output I/O arguments/results
+ * @io_mutex: protect against concurrent update of I/O regions
+ * @region: additional regions for other subchannel operations
+ * @cmd_region: MMIO region for asynchronous I/O commands other than START
+ * @schib_region: MMIO region for SCHIB information
+ * @crw_region: MMIO region for getting channel report words
+ * @num_regions: number of additional regions
+ * @cp: channel program for the current I/O operation
+ * @irb: irb info received from interrupt
+ * @scsw: scsw info
+ * @io_trigger: eventfd ctx for signaling userspace I/O results
+ * @io_work: work for deferral process of I/O handling
+ */
+struct vfio_ccw_private {
+ struct subchannel *sch;
+ int state;
+ struct completion *completion;
+ atomic_t avail;
+ struct mdev_device *mdev;
+ struct notifier_block nb;
+ struct ccw_io_region *io_region;
+ struct mutex io_mutex;
+ struct vfio_ccw_region *region;
+ struct ccw_cmd_region *cmd_region;
+ struct ccw_schib_region *schib_region;
+ struct ccw_crw_region *crw_region;
+ int num_regions;
+
+ struct channel_program cp;
+ struct irb irb;
+ union scsw scsw;
+ struct list_head crw;
+
+ struct eventfd_ctx *io_trigger;
+ struct eventfd_ctx *crw_trigger;
+ struct work_struct io_work;
+ struct work_struct crw_work;
+} __aligned(8);
+
+extern int vfio_ccw_mdev_reg(struct subchannel *sch);
+extern void vfio_ccw_mdev_unreg(struct subchannel *sch);
+
+extern int vfio_ccw_sch_quiesce(struct subchannel *sch);
+
+/*
+ * States of the device statemachine.
+ */
+enum vfio_ccw_state {
+ VFIO_CCW_STATE_NOT_OPER,
+ VFIO_CCW_STATE_STANDBY,
+ VFIO_CCW_STATE_IDLE,
+ VFIO_CCW_STATE_CP_PROCESSING,
+ VFIO_CCW_STATE_CP_PENDING,
+ /* last element! */
+ NR_VFIO_CCW_STATES
+};
+
+/*
+ * Asynchronous events of the device statemachine.
+ */
+enum vfio_ccw_event {
+ VFIO_CCW_EVENT_NOT_OPER,
+ VFIO_CCW_EVENT_IO_REQ,
+ VFIO_CCW_EVENT_INTERRUPT,
+ VFIO_CCW_EVENT_ASYNC_REQ,
+ /* last element! */
+ NR_VFIO_CCW_EVENTS
+};
+
+/*
+ * Action called through jumptable.
+ */
+typedef void (fsm_func_t)(struct vfio_ccw_private *, enum vfio_ccw_event);
+extern fsm_func_t *vfio_ccw_jumptable[NR_VFIO_CCW_STATES][NR_VFIO_CCW_EVENTS];
+
+static inline void vfio_ccw_fsm_event(struct vfio_ccw_private *private,
+ int event)
+{
+ trace_vfio_ccw_fsm_event(private->sch->schid, private->state, event);
+ vfio_ccw_jumptable[private->state][event](private, event);
+}
+
+extern struct workqueue_struct *vfio_ccw_work_q;
+
+
+/* s390 debug feature, similar to base cio */
+extern debug_info_t *vfio_ccw_debug_msg_id;
+extern debug_info_t *vfio_ccw_debug_trace_id;
+
+#define VFIO_CCW_TRACE_EVENT(imp, txt) \
+ debug_text_event(vfio_ccw_debug_trace_id, imp, txt)
+
+#define VFIO_CCW_MSG_EVENT(imp, args...) \
+ debug_sprintf_event(vfio_ccw_debug_msg_id, imp, ##args)
+
+static inline void VFIO_CCW_HEX_EVENT(int level, void *data, int length)
+{
+ debug_event(vfio_ccw_debug_trace_id, level, data, length);
+}
+
+#endif
diff --git a/drivers/s390/cio/vfio_ccw_trace.c b/drivers/s390/cio/vfio_ccw_trace.c
new file mode 100644
index 000000000..4a0205905
--- /dev/null
+++ b/drivers/s390/cio/vfio_ccw_trace.c
@@ -0,0 +1,15 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Tracepoint definitions for vfio_ccw
+ *
+ * Copyright IBM Corp. 2019
+ * Author(s): Eric Farman <farman@linux.ibm.com>
+ */
+
+#define CREATE_TRACE_POINTS
+#include "vfio_ccw_trace.h"
+
+EXPORT_TRACEPOINT_SYMBOL(vfio_ccw_chp_event);
+EXPORT_TRACEPOINT_SYMBOL(vfio_ccw_fsm_async_request);
+EXPORT_TRACEPOINT_SYMBOL(vfio_ccw_fsm_event);
+EXPORT_TRACEPOINT_SYMBOL(vfio_ccw_fsm_io_request);
diff --git a/drivers/s390/cio/vfio_ccw_trace.h b/drivers/s390/cio/vfio_ccw_trace.h
new file mode 100644
index 000000000..62fb30598
--- /dev/null
+++ b/drivers/s390/cio/vfio_ccw_trace.h
@@ -0,0 +1,146 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Tracepoints for vfio_ccw driver
+ *
+ * Copyright IBM Corp. 2018
+ *
+ * Author(s): Dong Jia Shi <bjsdjshi@linux.vnet.ibm.com>
+ * Halil Pasic <pasic@linux.vnet.ibm.com>
+ */
+
+#include "cio.h"
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM vfio_ccw
+
+#if !defined(_VFIO_CCW_TRACE_) || defined(TRACE_HEADER_MULTI_READ)
+#define _VFIO_CCW_TRACE_
+
+#include <linux/tracepoint.h>
+
+TRACE_EVENT(vfio_ccw_chp_event,
+ TP_PROTO(struct subchannel_id schid,
+ int mask,
+ int event),
+ TP_ARGS(schid, mask, event),
+
+ TP_STRUCT__entry(
+ __field(u8, cssid)
+ __field(u8, ssid)
+ __field(u16, sch_no)
+ __field(int, mask)
+ __field(int, event)
+ ),
+
+ TP_fast_assign(
+ __entry->cssid = schid.cssid;
+ __entry->ssid = schid.ssid;
+ __entry->sch_no = schid.sch_no;
+ __entry->mask = mask;
+ __entry->event = event;
+ ),
+
+ TP_printk("schid=%x.%x.%04x mask=0x%x event=%d",
+ __entry->cssid,
+ __entry->ssid,
+ __entry->sch_no,
+ __entry->mask,
+ __entry->event)
+);
+
+TRACE_EVENT(vfio_ccw_fsm_async_request,
+ TP_PROTO(struct subchannel_id schid,
+ int command,
+ int errno),
+ TP_ARGS(schid, command, errno),
+
+ TP_STRUCT__entry(
+ __field(u8, cssid)
+ __field(u8, ssid)
+ __field(u16, sch_no)
+ __field(int, command)
+ __field(int, errno)
+ ),
+
+ TP_fast_assign(
+ __entry->cssid = schid.cssid;
+ __entry->ssid = schid.ssid;
+ __entry->sch_no = schid.sch_no;
+ __entry->command = command;
+ __entry->errno = errno;
+ ),
+
+ TP_printk("schid=%x.%x.%04x command=0x%x errno=%d",
+ __entry->cssid,
+ __entry->ssid,
+ __entry->sch_no,
+ __entry->command,
+ __entry->errno)
+);
+
+TRACE_EVENT(vfio_ccw_fsm_event,
+ TP_PROTO(struct subchannel_id schid, int state, int event),
+ TP_ARGS(schid, state, event),
+
+ TP_STRUCT__entry(
+ __field(u8, cssid)
+ __field(u8, ssid)
+ __field(u16, schno)
+ __field(int, state)
+ __field(int, event)
+ ),
+
+ TP_fast_assign(
+ __entry->cssid = schid.cssid;
+ __entry->ssid = schid.ssid;
+ __entry->schno = schid.sch_no;
+ __entry->state = state;
+ __entry->event = event;
+ ),
+
+ TP_printk("schid=%x.%x.%04x state=%d event=%d",
+ __entry->cssid, __entry->ssid, __entry->schno,
+ __entry->state,
+ __entry->event)
+);
+
+TRACE_EVENT(vfio_ccw_fsm_io_request,
+ TP_PROTO(int fctl, struct subchannel_id schid, int errno, char *errstr),
+ TP_ARGS(fctl, schid, errno, errstr),
+
+ TP_STRUCT__entry(
+ __field(u8, cssid)
+ __field(u8, ssid)
+ __field(u16, sch_no)
+ __field(int, fctl)
+ __field(int, errno)
+ __field(char*, errstr)
+ ),
+
+ TP_fast_assign(
+ __entry->cssid = schid.cssid;
+ __entry->ssid = schid.ssid;
+ __entry->sch_no = schid.sch_no;
+ __entry->fctl = fctl;
+ __entry->errno = errno;
+ __entry->errstr = errstr;
+ ),
+
+ TP_printk("schid=%x.%x.%04x fctl=0x%x errno=%d info=%s",
+ __entry->cssid,
+ __entry->ssid,
+ __entry->sch_no,
+ __entry->fctl,
+ __entry->errno,
+ __entry->errstr)
+);
+
+#endif /* _VFIO_CCW_TRACE_ */
+
+/* This part must be outside protection */
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE vfio_ccw_trace
+
+#include <trace/define_trace.h>