summaryrefslogtreecommitdiffstats
path: root/drivers/s390/block
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-11 08:27:49 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-11 08:27:49 +0000
commitace9429bb58fd418f0c81d4c2835699bddf6bde6 (patch)
treeb2d64bc10158fdd5497876388cd68142ca374ed3 /drivers/s390/block
parentInitial commit. (diff)
downloadlinux-ace9429bb58fd418f0c81d4c2835699bddf6bde6.tar.xz
linux-ace9429bb58fd418f0c81d4c2835699bddf6bde6.zip
Adding upstream version 6.6.15.upstream/6.6.15
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'drivers/s390/block')
-rw-r--r--drivers/s390/block/Kconfig74
-rw-r--r--drivers/s390/block/Makefile22
-rw-r--r--drivers/s390/block/dasd.c4124
-rw-r--r--drivers/s390/block/dasd_3990_erp.c2859
-rw-r--r--drivers/s390/block/dasd_alias.c981
-rw-r--r--drivers/s390/block/dasd_devmap.c2623
-rw-r--r--drivers/s390/block/dasd_diag.c695
-rw-r--r--drivers/s390/block/dasd_diag.h81
-rw-r--r--drivers/s390/block/dasd_eckd.c7027
-rw-r--r--drivers/s390/block/dasd_eckd.h711
-rw-r--r--drivers/s390/block/dasd_eer.c725
-rw-r--r--drivers/s390/block/dasd_erp.c203
-rw-r--r--drivers/s390/block/dasd_fba.c849
-rw-r--r--drivers/s390/block/dasd_fba.h77
-rw-r--r--drivers/s390/block/dasd_genhd.c203
-rw-r--r--drivers/s390/block/dasd_int.h1496
-rw-r--r--drivers/s390/block/dasd_ioctl.c735
-rw-r--r--drivers/s390/block/dasd_proc.c368
-rw-r--r--drivers/s390/block/dcssblk.c1032
-rw-r--r--drivers/s390/block/scm_blk.c588
-rw-r--r--drivers/s390/block/scm_blk.h76
-rw-r--r--drivers/s390/block/scm_drv.c91
22 files changed, 25640 insertions, 0 deletions
diff --git a/drivers/s390/block/Kconfig b/drivers/s390/block/Kconfig
new file mode 100644
index 0000000000..e3710a762a
--- /dev/null
+++ b/drivers/s390/block/Kconfig
@@ -0,0 +1,74 @@
+# SPDX-License-Identifier: GPL-2.0
+comment "S/390 block device drivers"
+ depends on S390 && BLOCK
+
+config DCSSBLK
+ def_tristate m
+ select FS_DAX_LIMITED
+ select DAX
+ prompt "DCSSBLK support"
+ depends on S390 && BLOCK
+ help
+ Support for dcss block device
+
+config DASD
+ def_tristate y
+ prompt "Support for DASD devices"
+ depends on CCW && BLOCK
+ help
+ Enable this option if you want to access DASDs directly utilizing
+ S/390s channel subsystem commands. This is necessary for running
+ natively on a single image or an LPAR.
+
+config DASD_PROFILE
+ def_bool y
+ prompt "Profiling support for dasd devices"
+ depends on DASD
+ help
+ Enable this option if you want to see profiling information
+ in /proc/dasd/statistics.
+
+config DASD_ECKD
+ def_tristate y
+ prompt "Support for ECKD Disks"
+ depends on DASD
+ help
+ ECKD devices are the most commonly used devices. You should enable
+ this option unless you are very sure to have no ECKD device.
+
+config DASD_FBA
+ def_tristate y
+ prompt "Support for FBA Disks"
+ depends on DASD
+ help
+ Select this option to be able to access FBA devices. It is safe to
+ say "Y".
+
+config DASD_DIAG
+ def_tristate y
+ prompt "Support for DIAG access to Disks"
+ depends on DASD
+ help
+ Select this option if you want to use Diagnose250 command to access
+ Disks under VM. If you are not running under VM or unsure what it is,
+ say "N".
+
+config DASD_EER
+ def_bool y
+ prompt "Extended error reporting (EER)"
+ depends on DASD
+ help
+ This driver provides a character device interface to the
+ DASD extended error reporting. This is only needed if you want to
+ use applications written for the EER facility.
+
+config SCM_BLOCK
+ def_tristate m
+ prompt "Support for Storage Class Memory"
+ depends on S390 && BLOCK && EADM_SCH && SCM_BUS
+ help
+ Block device driver for Storage Class Memory (SCM). This driver
+ provides a block device interface for each available SCM increment.
+
+ To compile this driver as a module, choose M here: the
+ module will be called scm_block.
diff --git a/drivers/s390/block/Makefile b/drivers/s390/block/Makefile
new file mode 100644
index 0000000000..a0a54d2f06
--- /dev/null
+++ b/drivers/s390/block/Makefile
@@ -0,0 +1,22 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# S/390 block devices
+#
+
+dasd_eckd_mod-objs := dasd_eckd.o dasd_3990_erp.o dasd_alias.o
+dasd_fba_mod-objs := dasd_fba.o
+dasd_diag_mod-objs := dasd_diag.o
+dasd_mod-objs := dasd.o dasd_ioctl.o dasd_proc.o dasd_devmap.o \
+ dasd_genhd.o dasd_erp.o
+ifdef CONFIG_DASD_EER
+dasd_mod-objs += dasd_eer.o
+endif
+
+obj-$(CONFIG_DASD) += dasd_mod.o
+obj-$(CONFIG_DASD_DIAG) += dasd_diag_mod.o
+obj-$(CONFIG_DASD_ECKD) += dasd_eckd_mod.o
+obj-$(CONFIG_DASD_FBA) += dasd_fba_mod.o
+obj-$(CONFIG_DCSSBLK) += dcssblk.o
+
+scm_block-objs := scm_drv.o scm_blk.o
+obj-$(CONFIG_SCM_BLOCK) += scm_block.o
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
new file mode 100644
index 0000000000..5b11ee9234
--- /dev/null
+++ b/drivers/s390/block/dasd.c
@@ -0,0 +1,4124 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
+ * Horst Hummel <Horst.Hummel@de.ibm.com>
+ * Carsten Otte <Cotte@de.ibm.com>
+ * Martin Schwidefsky <schwidefsky@de.ibm.com>
+ * Bugreports.to..: <Linux390@de.ibm.com>
+ * Copyright IBM Corp. 1999, 2009
+ */
+
+#define KMSG_COMPONENT "dasd"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <linux/kmod.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/ctype.h>
+#include <linux/major.h>
+#include <linux/slab.h>
+#include <linux/hdreg.h>
+#include <linux/async.h>
+#include <linux/mutex.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#include <linux/vmalloc.h>
+
+#include <asm/ccwdev.h>
+#include <asm/ebcdic.h>
+#include <asm/idals.h>
+#include <asm/itcw.h>
+#include <asm/diag.h>
+
+/* This is ugly... */
+#define PRINTK_HEADER "dasd:"
+
+#include "dasd_int.h"
+/*
+ * SECTION: Constant definitions to be used within this file
+ */
+#define DASD_CHANQ_MAX_SIZE 4
+
+#define DASD_DIAG_MOD "dasd_diag_mod"
+
+/*
+ * SECTION: exported variables of dasd.c
+ */
+debug_info_t *dasd_debug_area;
+EXPORT_SYMBOL(dasd_debug_area);
+static struct dentry *dasd_debugfs_root_entry;
+struct dasd_discipline *dasd_diag_discipline_pointer;
+EXPORT_SYMBOL(dasd_diag_discipline_pointer);
+void dasd_int_handler(struct ccw_device *, unsigned long, struct irb *);
+
+MODULE_AUTHOR("Holger Smolinski <Holger.Smolinski@de.ibm.com>");
+MODULE_DESCRIPTION("Linux on S/390 DASD device driver,"
+ " Copyright IBM Corp. 2000");
+MODULE_LICENSE("GPL");
+
+/*
+ * SECTION: prototypes for static functions of dasd.c
+ */
+static int dasd_flush_block_queue(struct dasd_block *);
+static void dasd_device_tasklet(unsigned long);
+static void dasd_block_tasklet(unsigned long);
+static void do_kick_device(struct work_struct *);
+static void do_reload_device(struct work_struct *);
+static void do_requeue_requests(struct work_struct *);
+static void dasd_return_cqr_cb(struct dasd_ccw_req *, void *);
+static void dasd_device_timeout(struct timer_list *);
+static void dasd_block_timeout(struct timer_list *);
+static void __dasd_process_erp(struct dasd_device *, struct dasd_ccw_req *);
+static void dasd_profile_init(struct dasd_profile *, struct dentry *);
+static void dasd_profile_exit(struct dasd_profile *);
+static void dasd_hosts_init(struct dentry *, struct dasd_device *);
+static void dasd_hosts_exit(struct dasd_device *);
+static int dasd_handle_autoquiesce(struct dasd_device *, struct dasd_ccw_req *,
+ unsigned int);
+/*
+ * SECTION: Operations on the device structure.
+ */
+static wait_queue_head_t dasd_init_waitq;
+static wait_queue_head_t dasd_flush_wq;
+static wait_queue_head_t generic_waitq;
+static wait_queue_head_t shutdown_waitq;
+
+/*
+ * Allocate memory for a new device structure.
+ */
+struct dasd_device *dasd_alloc_device(void)
+{
+ struct dasd_device *device;
+
+ device = kzalloc(sizeof(struct dasd_device), GFP_ATOMIC);
+ if (!device)
+ return ERR_PTR(-ENOMEM);
+
+ /* Get two pages for normal block device operations. */
+ device->ccw_mem = (void *) __get_free_pages(GFP_ATOMIC | GFP_DMA, 1);
+ if (!device->ccw_mem) {
+ kfree(device);
+ return ERR_PTR(-ENOMEM);
+ }
+ /* Get one page for error recovery. */
+ device->erp_mem = (void *) get_zeroed_page(GFP_ATOMIC | GFP_DMA);
+ if (!device->erp_mem) {
+ free_pages((unsigned long) device->ccw_mem, 1);
+ kfree(device);
+ return ERR_PTR(-ENOMEM);
+ }
+ /* Get two pages for ese format. */
+ device->ese_mem = (void *)__get_free_pages(GFP_ATOMIC | GFP_DMA, 1);
+ if (!device->ese_mem) {
+ free_page((unsigned long) device->erp_mem);
+ free_pages((unsigned long) device->ccw_mem, 1);
+ kfree(device);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ dasd_init_chunklist(&device->ccw_chunks, device->ccw_mem, PAGE_SIZE*2);
+ dasd_init_chunklist(&device->erp_chunks, device->erp_mem, PAGE_SIZE);
+ dasd_init_chunklist(&device->ese_chunks, device->ese_mem, PAGE_SIZE * 2);
+ spin_lock_init(&device->mem_lock);
+ atomic_set(&device->tasklet_scheduled, 0);
+ tasklet_init(&device->tasklet, dasd_device_tasklet,
+ (unsigned long) device);
+ INIT_LIST_HEAD(&device->ccw_queue);
+ timer_setup(&device->timer, dasd_device_timeout, 0);
+ INIT_WORK(&device->kick_work, do_kick_device);
+ INIT_WORK(&device->reload_device, do_reload_device);
+ INIT_WORK(&device->requeue_requests, do_requeue_requests);
+ device->state = DASD_STATE_NEW;
+ device->target = DASD_STATE_NEW;
+ mutex_init(&device->state_mutex);
+ spin_lock_init(&device->profile.lock);
+ return device;
+}
+
+/*
+ * Free memory of a device structure.
+ */
+void dasd_free_device(struct dasd_device *device)
+{
+ kfree(device->private);
+ free_pages((unsigned long) device->ese_mem, 1);
+ free_page((unsigned long) device->erp_mem);
+ free_pages((unsigned long) device->ccw_mem, 1);
+ kfree(device);
+}
+
+/*
+ * Allocate memory for a new device structure.
+ */
+struct dasd_block *dasd_alloc_block(void)
+{
+ struct dasd_block *block;
+
+ block = kzalloc(sizeof(*block), GFP_ATOMIC);
+ if (!block)
+ return ERR_PTR(-ENOMEM);
+ /* open_count = 0 means device online but not in use */
+ atomic_set(&block->open_count, -1);
+
+ atomic_set(&block->tasklet_scheduled, 0);
+ tasklet_init(&block->tasklet, dasd_block_tasklet,
+ (unsigned long) block);
+ INIT_LIST_HEAD(&block->ccw_queue);
+ spin_lock_init(&block->queue_lock);
+ INIT_LIST_HEAD(&block->format_list);
+ spin_lock_init(&block->format_lock);
+ timer_setup(&block->timer, dasd_block_timeout, 0);
+ spin_lock_init(&block->profile.lock);
+
+ return block;
+}
+EXPORT_SYMBOL_GPL(dasd_alloc_block);
+
+/*
+ * Free memory of a device structure.
+ */
+void dasd_free_block(struct dasd_block *block)
+{
+ kfree(block);
+}
+EXPORT_SYMBOL_GPL(dasd_free_block);
+
+/*
+ * Make a new device known to the system.
+ */
+static int dasd_state_new_to_known(struct dasd_device *device)
+{
+ /*
+ * As long as the device is not in state DASD_STATE_NEW we want to
+ * keep the reference count > 0.
+ */
+ dasd_get_device(device);
+ device->state = DASD_STATE_KNOWN;
+ return 0;
+}
+
+/*
+ * Let the system forget about a device.
+ */
+static int dasd_state_known_to_new(struct dasd_device *device)
+{
+ /* Disable extended error reporting for this device. */
+ dasd_eer_disable(device);
+ device->state = DASD_STATE_NEW;
+
+ /* Give up reference we took in dasd_state_new_to_known. */
+ dasd_put_device(device);
+ return 0;
+}
+
+static struct dentry *dasd_debugfs_setup(const char *name,
+ struct dentry *base_dentry)
+{
+ struct dentry *pde;
+
+ if (!base_dentry)
+ return NULL;
+ pde = debugfs_create_dir(name, base_dentry);
+ if (!pde || IS_ERR(pde))
+ return NULL;
+ return pde;
+}
+
+/*
+ * Request the irq line for the device.
+ */
+static int dasd_state_known_to_basic(struct dasd_device *device)
+{
+ struct dasd_block *block = device->block;
+ int rc = 0;
+
+ /* Allocate and register gendisk structure. */
+ if (block) {
+ rc = dasd_gendisk_alloc(block);
+ if (rc)
+ return rc;
+ block->debugfs_dentry =
+ dasd_debugfs_setup(block->gdp->disk_name,
+ dasd_debugfs_root_entry);
+ dasd_profile_init(&block->profile, block->debugfs_dentry);
+ if (dasd_global_profile_level == DASD_PROFILE_ON)
+ dasd_profile_on(&device->block->profile);
+ }
+ device->debugfs_dentry =
+ dasd_debugfs_setup(dev_name(&device->cdev->dev),
+ dasd_debugfs_root_entry);
+ dasd_profile_init(&device->profile, device->debugfs_dentry);
+ dasd_hosts_init(device->debugfs_dentry, device);
+
+ /* register 'device' debug area, used for all DBF_DEV_XXX calls */
+ device->debug_area = debug_register(dev_name(&device->cdev->dev), 4, 1,
+ 8 * sizeof(long));
+ debug_register_view(device->debug_area, &debug_sprintf_view);
+ debug_set_level(device->debug_area, DBF_WARNING);
+ DBF_DEV_EVENT(DBF_EMERG, device, "%s", "debug area created");
+
+ device->state = DASD_STATE_BASIC;
+
+ return rc;
+}
+
+/*
+ * Release the irq line for the device. Terminate any running i/o.
+ */
+static int dasd_state_basic_to_known(struct dasd_device *device)
+{
+ int rc;
+
+ if (device->discipline->basic_to_known) {
+ rc = device->discipline->basic_to_known(device);
+ if (rc)
+ return rc;
+ }
+
+ if (device->block) {
+ dasd_profile_exit(&device->block->profile);
+ debugfs_remove(device->block->debugfs_dentry);
+ dasd_gendisk_free(device->block);
+ dasd_block_clear_timer(device->block);
+ }
+ rc = dasd_flush_device_queue(device);
+ if (rc)
+ return rc;
+ dasd_device_clear_timer(device);
+ dasd_profile_exit(&device->profile);
+ dasd_hosts_exit(device);
+ debugfs_remove(device->debugfs_dentry);
+ DBF_DEV_EVENT(DBF_EMERG, device, "%p debug area deleted", device);
+ if (device->debug_area != NULL) {
+ debug_unregister(device->debug_area);
+ device->debug_area = NULL;
+ }
+ device->state = DASD_STATE_KNOWN;
+ return 0;
+}
+
+/*
+ * Do the initial analysis. The do_analysis function may return
+ * -EAGAIN in which case the device keeps the state DASD_STATE_BASIC
+ * until the discipline decides to continue the startup sequence
+ * by calling the function dasd_change_state. The eckd disciplines
+ * uses this to start a ccw that detects the format. The completion
+ * interrupt for this detection ccw uses the kernel event daemon to
+ * trigger the call to dasd_change_state. All this is done in the
+ * discipline code, see dasd_eckd.c.
+ * After the analysis ccw is done (do_analysis returned 0) the block
+ * device is setup.
+ * In case the analysis returns an error, the device setup is stopped
+ * (a fake disk was already added to allow formatting).
+ */
+static int dasd_state_basic_to_ready(struct dasd_device *device)
+{
+ int rc;
+ struct dasd_block *block;
+ struct gendisk *disk;
+
+ rc = 0;
+ block = device->block;
+ /* make disk known with correct capacity */
+ if (block) {
+ if (block->base->discipline->do_analysis != NULL)
+ rc = block->base->discipline->do_analysis(block);
+ if (rc) {
+ if (rc != -EAGAIN) {
+ device->state = DASD_STATE_UNFMT;
+ disk = device->block->gdp;
+ kobject_uevent(&disk_to_dev(disk)->kobj,
+ KOBJ_CHANGE);
+ goto out;
+ }
+ return rc;
+ }
+ if (device->discipline->setup_blk_queue)
+ device->discipline->setup_blk_queue(block);
+ set_capacity(block->gdp,
+ block->blocks << block->s2b_shift);
+ device->state = DASD_STATE_READY;
+ rc = dasd_scan_partitions(block);
+ if (rc) {
+ device->state = DASD_STATE_BASIC;
+ return rc;
+ }
+ } else {
+ device->state = DASD_STATE_READY;
+ }
+out:
+ if (device->discipline->basic_to_ready)
+ rc = device->discipline->basic_to_ready(device);
+ return rc;
+}
+
+static inline
+int _wait_for_empty_queues(struct dasd_device *device)
+{
+ if (device->block)
+ return list_empty(&device->ccw_queue) &&
+ list_empty(&device->block->ccw_queue);
+ else
+ return list_empty(&device->ccw_queue);
+}
+
+/*
+ * Remove device from block device layer. Destroy dirty buffers.
+ * Forget format information. Check if the target level is basic
+ * and if it is create fake disk for formatting.
+ */
+static int dasd_state_ready_to_basic(struct dasd_device *device)
+{
+ int rc;
+
+ device->state = DASD_STATE_BASIC;
+ if (device->block) {
+ struct dasd_block *block = device->block;
+ rc = dasd_flush_block_queue(block);
+ if (rc) {
+ device->state = DASD_STATE_READY;
+ return rc;
+ }
+ dasd_destroy_partitions(block);
+ block->blocks = 0;
+ block->bp_block = 0;
+ block->s2b_shift = 0;
+ }
+ return 0;
+}
+
+/*
+ * Back to basic.
+ */
+static int dasd_state_unfmt_to_basic(struct dasd_device *device)
+{
+ device->state = DASD_STATE_BASIC;
+ return 0;
+}
+
+/*
+ * Make the device online and schedule the bottom half to start
+ * the requeueing of requests from the linux request queue to the
+ * ccw queue.
+ */
+static int
+dasd_state_ready_to_online(struct dasd_device * device)
+{
+ device->state = DASD_STATE_ONLINE;
+ if (device->block) {
+ dasd_schedule_block_bh(device->block);
+ if ((device->features & DASD_FEATURE_USERAW)) {
+ kobject_uevent(&disk_to_dev(device->block->gdp)->kobj,
+ KOBJ_CHANGE);
+ return 0;
+ }
+ disk_uevent(device->block->bdev->bd_disk, KOBJ_CHANGE);
+ }
+ return 0;
+}
+
+/*
+ * Stop the requeueing of requests again.
+ */
+static int dasd_state_online_to_ready(struct dasd_device *device)
+{
+ int rc;
+
+ if (device->discipline->online_to_ready) {
+ rc = device->discipline->online_to_ready(device);
+ if (rc)
+ return rc;
+ }
+
+ device->state = DASD_STATE_READY;
+ if (device->block && !(device->features & DASD_FEATURE_USERAW))
+ disk_uevent(device->block->bdev->bd_disk, KOBJ_CHANGE);
+ return 0;
+}
+
+/*
+ * Device startup state changes.
+ */
+static int dasd_increase_state(struct dasd_device *device)
+{
+ int rc;
+
+ rc = 0;
+ if (device->state == DASD_STATE_NEW &&
+ device->target >= DASD_STATE_KNOWN)
+ rc = dasd_state_new_to_known(device);
+
+ if (!rc &&
+ device->state == DASD_STATE_KNOWN &&
+ device->target >= DASD_STATE_BASIC)
+ rc = dasd_state_known_to_basic(device);
+
+ if (!rc &&
+ device->state == DASD_STATE_BASIC &&
+ device->target >= DASD_STATE_READY)
+ rc = dasd_state_basic_to_ready(device);
+
+ if (!rc &&
+ device->state == DASD_STATE_UNFMT &&
+ device->target > DASD_STATE_UNFMT)
+ rc = -EPERM;
+
+ if (!rc &&
+ device->state == DASD_STATE_READY &&
+ device->target >= DASD_STATE_ONLINE)
+ rc = dasd_state_ready_to_online(device);
+
+ return rc;
+}
+
+/*
+ * Device shutdown state changes.
+ */
+static int dasd_decrease_state(struct dasd_device *device)
+{
+ int rc;
+
+ rc = 0;
+ if (device->state == DASD_STATE_ONLINE &&
+ device->target <= DASD_STATE_READY)
+ rc = dasd_state_online_to_ready(device);
+
+ if (!rc &&
+ device->state == DASD_STATE_READY &&
+ device->target <= DASD_STATE_BASIC)
+ rc = dasd_state_ready_to_basic(device);
+
+ if (!rc &&
+ device->state == DASD_STATE_UNFMT &&
+ device->target <= DASD_STATE_BASIC)
+ rc = dasd_state_unfmt_to_basic(device);
+
+ if (!rc &&
+ device->state == DASD_STATE_BASIC &&
+ device->target <= DASD_STATE_KNOWN)
+ rc = dasd_state_basic_to_known(device);
+
+ if (!rc &&
+ device->state == DASD_STATE_KNOWN &&
+ device->target <= DASD_STATE_NEW)
+ rc = dasd_state_known_to_new(device);
+
+ return rc;
+}
+
+/*
+ * This is the main startup/shutdown routine.
+ */
+static void dasd_change_state(struct dasd_device *device)
+{
+ int rc;
+
+ if (device->state == device->target)
+ /* Already where we want to go today... */
+ return;
+ if (device->state < device->target)
+ rc = dasd_increase_state(device);
+ else
+ rc = dasd_decrease_state(device);
+ if (rc == -EAGAIN)
+ return;
+ if (rc)
+ device->target = device->state;
+
+ /* let user-space know that the device status changed */
+ kobject_uevent(&device->cdev->dev.kobj, KOBJ_CHANGE);
+
+ if (device->state == device->target)
+ wake_up(&dasd_init_waitq);
+}
+
+/*
+ * Kick starter for devices that did not complete the startup/shutdown
+ * procedure or were sleeping because of a pending state.
+ * dasd_kick_device will schedule a call do do_kick_device to the kernel
+ * event daemon.
+ */
+static void do_kick_device(struct work_struct *work)
+{
+ struct dasd_device *device = container_of(work, struct dasd_device, kick_work);
+ mutex_lock(&device->state_mutex);
+ dasd_change_state(device);
+ mutex_unlock(&device->state_mutex);
+ dasd_schedule_device_bh(device);
+ dasd_put_device(device);
+}
+
+void dasd_kick_device(struct dasd_device *device)
+{
+ dasd_get_device(device);
+ /* queue call to dasd_kick_device to the kernel event daemon. */
+ if (!schedule_work(&device->kick_work))
+ dasd_put_device(device);
+}
+EXPORT_SYMBOL(dasd_kick_device);
+
+/*
+ * dasd_reload_device will schedule a call do do_reload_device to the kernel
+ * event daemon.
+ */
+static void do_reload_device(struct work_struct *work)
+{
+ struct dasd_device *device = container_of(work, struct dasd_device,
+ reload_device);
+ device->discipline->reload(device);
+ dasd_put_device(device);
+}
+
+void dasd_reload_device(struct dasd_device *device)
+{
+ dasd_get_device(device);
+ /* queue call to dasd_reload_device to the kernel event daemon. */
+ if (!schedule_work(&device->reload_device))
+ dasd_put_device(device);
+}
+EXPORT_SYMBOL(dasd_reload_device);
+
+/*
+ * Set the target state for a device and starts the state change.
+ */
+void dasd_set_target_state(struct dasd_device *device, int target)
+{
+ dasd_get_device(device);
+ mutex_lock(&device->state_mutex);
+ /* If we are in probeonly mode stop at DASD_STATE_READY. */
+ if (dasd_probeonly && target > DASD_STATE_READY)
+ target = DASD_STATE_READY;
+ if (device->target != target) {
+ if (device->state == target)
+ wake_up(&dasd_init_waitq);
+ device->target = target;
+ }
+ if (device->state != device->target)
+ dasd_change_state(device);
+ mutex_unlock(&device->state_mutex);
+ dasd_put_device(device);
+}
+
+/*
+ * Enable devices with device numbers in [from..to].
+ */
+static inline int _wait_for_device(struct dasd_device *device)
+{
+ return (device->state == device->target);
+}
+
+void dasd_enable_device(struct dasd_device *device)
+{
+ dasd_set_target_state(device, DASD_STATE_ONLINE);
+ if (device->state <= DASD_STATE_KNOWN)
+ /* No discipline for device found. */
+ dasd_set_target_state(device, DASD_STATE_NEW);
+ /* Now wait for the devices to come up. */
+ wait_event(dasd_init_waitq, _wait_for_device(device));
+
+ dasd_reload_device(device);
+ if (device->discipline->kick_validate)
+ device->discipline->kick_validate(device);
+}
+EXPORT_SYMBOL(dasd_enable_device);
+
+/*
+ * SECTION: device operation (interrupt handler, start i/o, term i/o ...)
+ */
+
+unsigned int dasd_global_profile_level = DASD_PROFILE_OFF;
+
+#ifdef CONFIG_DASD_PROFILE
+struct dasd_profile dasd_global_profile = {
+ .lock = __SPIN_LOCK_UNLOCKED(dasd_global_profile.lock),
+};
+static struct dentry *dasd_debugfs_global_entry;
+
+/*
+ * Add profiling information for cqr before execution.
+ */
+static void dasd_profile_start(struct dasd_block *block,
+ struct dasd_ccw_req *cqr,
+ struct request *req)
+{
+ struct list_head *l;
+ unsigned int counter;
+ struct dasd_device *device;
+
+ /* count the length of the chanq for statistics */
+ counter = 0;
+ if (dasd_global_profile_level || block->profile.data)
+ list_for_each(l, &block->ccw_queue)
+ if (++counter >= 31)
+ break;
+
+ spin_lock(&dasd_global_profile.lock);
+ if (dasd_global_profile.data) {
+ dasd_global_profile.data->dasd_io_nr_req[counter]++;
+ if (rq_data_dir(req) == READ)
+ dasd_global_profile.data->dasd_read_nr_req[counter]++;
+ }
+ spin_unlock(&dasd_global_profile.lock);
+
+ spin_lock(&block->profile.lock);
+ if (block->profile.data) {
+ block->profile.data->dasd_io_nr_req[counter]++;
+ if (rq_data_dir(req) == READ)
+ block->profile.data->dasd_read_nr_req[counter]++;
+ }
+ spin_unlock(&block->profile.lock);
+
+ /*
+ * We count the request for the start device, even though it may run on
+ * some other device due to error recovery. This way we make sure that
+ * we count each request only once.
+ */
+ device = cqr->startdev;
+ if (!device->profile.data)
+ return;
+
+ spin_lock(get_ccwdev_lock(device->cdev));
+ counter = 1; /* request is not yet queued on the start device */
+ list_for_each(l, &device->ccw_queue)
+ if (++counter >= 31)
+ break;
+ spin_unlock(get_ccwdev_lock(device->cdev));
+
+ spin_lock(&device->profile.lock);
+ device->profile.data->dasd_io_nr_req[counter]++;
+ if (rq_data_dir(req) == READ)
+ device->profile.data->dasd_read_nr_req[counter]++;
+ spin_unlock(&device->profile.lock);
+}
+
+/*
+ * Add profiling information for cqr after execution.
+ */
+
+#define dasd_profile_counter(value, index) \
+{ \
+ for (index = 0; index < 31 && value >> (2+index); index++) \
+ ; \
+}
+
+static void dasd_profile_end_add_data(struct dasd_profile_info *data,
+ int is_alias,
+ int is_tpm,
+ int is_read,
+ long sectors,
+ int sectors_ind,
+ int tottime_ind,
+ int tottimeps_ind,
+ int strtime_ind,
+ int irqtime_ind,
+ int irqtimeps_ind,
+ int endtime_ind)
+{
+ /* in case of an overflow, reset the whole profile */
+ if (data->dasd_io_reqs == UINT_MAX) {
+ memset(data, 0, sizeof(*data));
+ ktime_get_real_ts64(&data->starttod);
+ }
+ data->dasd_io_reqs++;
+ data->dasd_io_sects += sectors;
+ if (is_alias)
+ data->dasd_io_alias++;
+ if (is_tpm)
+ data->dasd_io_tpm++;
+
+ data->dasd_io_secs[sectors_ind]++;
+ data->dasd_io_times[tottime_ind]++;
+ data->dasd_io_timps[tottimeps_ind]++;
+ data->dasd_io_time1[strtime_ind]++;
+ data->dasd_io_time2[irqtime_ind]++;
+ data->dasd_io_time2ps[irqtimeps_ind]++;
+ data->dasd_io_time3[endtime_ind]++;
+
+ if (is_read) {
+ data->dasd_read_reqs++;
+ data->dasd_read_sects += sectors;
+ if (is_alias)
+ data->dasd_read_alias++;
+ if (is_tpm)
+ data->dasd_read_tpm++;
+ data->dasd_read_secs[sectors_ind]++;
+ data->dasd_read_times[tottime_ind]++;
+ data->dasd_read_time1[strtime_ind]++;
+ data->dasd_read_time2[irqtime_ind]++;
+ data->dasd_read_time3[endtime_ind]++;
+ }
+}
+
+static void dasd_profile_end(struct dasd_block *block,
+ struct dasd_ccw_req *cqr,
+ struct request *req)
+{
+ unsigned long strtime, irqtime, endtime, tottime;
+ unsigned long tottimeps, sectors;
+ struct dasd_device *device;
+ int sectors_ind, tottime_ind, tottimeps_ind, strtime_ind;
+ int irqtime_ind, irqtimeps_ind, endtime_ind;
+ struct dasd_profile_info *data;
+
+ device = cqr->startdev;
+ if (!(dasd_global_profile_level ||
+ block->profile.data ||
+ device->profile.data))
+ return;
+
+ sectors = blk_rq_sectors(req);
+ if (!cqr->buildclk || !cqr->startclk ||
+ !cqr->stopclk || !cqr->endclk ||
+ !sectors)
+ return;
+
+ strtime = ((cqr->startclk - cqr->buildclk) >> 12);
+ irqtime = ((cqr->stopclk - cqr->startclk) >> 12);
+ endtime = ((cqr->endclk - cqr->stopclk) >> 12);
+ tottime = ((cqr->endclk - cqr->buildclk) >> 12);
+ tottimeps = tottime / sectors;
+
+ dasd_profile_counter(sectors, sectors_ind);
+ dasd_profile_counter(tottime, tottime_ind);
+ dasd_profile_counter(tottimeps, tottimeps_ind);
+ dasd_profile_counter(strtime, strtime_ind);
+ dasd_profile_counter(irqtime, irqtime_ind);
+ dasd_profile_counter(irqtime / sectors, irqtimeps_ind);
+ dasd_profile_counter(endtime, endtime_ind);
+
+ spin_lock(&dasd_global_profile.lock);
+ if (dasd_global_profile.data) {
+ data = dasd_global_profile.data;
+ data->dasd_sum_times += tottime;
+ data->dasd_sum_time_str += strtime;
+ data->dasd_sum_time_irq += irqtime;
+ data->dasd_sum_time_end += endtime;
+ dasd_profile_end_add_data(dasd_global_profile.data,
+ cqr->startdev != block->base,
+ cqr->cpmode == 1,
+ rq_data_dir(req) == READ,
+ sectors, sectors_ind, tottime_ind,
+ tottimeps_ind, strtime_ind,
+ irqtime_ind, irqtimeps_ind,
+ endtime_ind);
+ }
+ spin_unlock(&dasd_global_profile.lock);
+
+ spin_lock(&block->profile.lock);
+ if (block->profile.data) {
+ data = block->profile.data;
+ data->dasd_sum_times += tottime;
+ data->dasd_sum_time_str += strtime;
+ data->dasd_sum_time_irq += irqtime;
+ data->dasd_sum_time_end += endtime;
+ dasd_profile_end_add_data(block->profile.data,
+ cqr->startdev != block->base,
+ cqr->cpmode == 1,
+ rq_data_dir(req) == READ,
+ sectors, sectors_ind, tottime_ind,
+ tottimeps_ind, strtime_ind,
+ irqtime_ind, irqtimeps_ind,
+ endtime_ind);
+ }
+ spin_unlock(&block->profile.lock);
+
+ spin_lock(&device->profile.lock);
+ if (device->profile.data) {
+ data = device->profile.data;
+ data->dasd_sum_times += tottime;
+ data->dasd_sum_time_str += strtime;
+ data->dasd_sum_time_irq += irqtime;
+ data->dasd_sum_time_end += endtime;
+ dasd_profile_end_add_data(device->profile.data,
+ cqr->startdev != block->base,
+ cqr->cpmode == 1,
+ rq_data_dir(req) == READ,
+ sectors, sectors_ind, tottime_ind,
+ tottimeps_ind, strtime_ind,
+ irqtime_ind, irqtimeps_ind,
+ endtime_ind);
+ }
+ spin_unlock(&device->profile.lock);
+}
+
+void dasd_profile_reset(struct dasd_profile *profile)
+{
+ struct dasd_profile_info *data;
+
+ spin_lock_bh(&profile->lock);
+ data = profile->data;
+ if (!data) {
+ spin_unlock_bh(&profile->lock);
+ return;
+ }
+ memset(data, 0, sizeof(*data));
+ ktime_get_real_ts64(&data->starttod);
+ spin_unlock_bh(&profile->lock);
+}
+
+int dasd_profile_on(struct dasd_profile *profile)
+{
+ struct dasd_profile_info *data;
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+ spin_lock_bh(&profile->lock);
+ if (profile->data) {
+ spin_unlock_bh(&profile->lock);
+ kfree(data);
+ return 0;
+ }
+ ktime_get_real_ts64(&data->starttod);
+ profile->data = data;
+ spin_unlock_bh(&profile->lock);
+ return 0;
+}
+
+void dasd_profile_off(struct dasd_profile *profile)
+{
+ spin_lock_bh(&profile->lock);
+ kfree(profile->data);
+ profile->data = NULL;
+ spin_unlock_bh(&profile->lock);
+}
+
+char *dasd_get_user_string(const char __user *user_buf, size_t user_len)
+{
+ char *buffer;
+
+ buffer = vmalloc(user_len + 1);
+ if (buffer == NULL)
+ return ERR_PTR(-ENOMEM);
+ if (copy_from_user(buffer, user_buf, user_len) != 0) {
+ vfree(buffer);
+ return ERR_PTR(-EFAULT);
+ }
+ /* got the string, now strip linefeed. */
+ if (buffer[user_len - 1] == '\n')
+ buffer[user_len - 1] = 0;
+ else
+ buffer[user_len] = 0;
+ return buffer;
+}
+
+static ssize_t dasd_stats_write(struct file *file,
+ const char __user *user_buf,
+ size_t user_len, loff_t *pos)
+{
+ char *buffer, *str;
+ int rc;
+ struct seq_file *m = (struct seq_file *)file->private_data;
+ struct dasd_profile *prof = m->private;
+
+ if (user_len > 65536)
+ user_len = 65536;
+ buffer = dasd_get_user_string(user_buf, user_len);
+ if (IS_ERR(buffer))
+ return PTR_ERR(buffer);
+
+ str = skip_spaces(buffer);
+ rc = user_len;
+ if (strncmp(str, "reset", 5) == 0) {
+ dasd_profile_reset(prof);
+ } else if (strncmp(str, "on", 2) == 0) {
+ rc = dasd_profile_on(prof);
+ if (rc)
+ goto out;
+ rc = user_len;
+ if (prof == &dasd_global_profile) {
+ dasd_profile_reset(prof);
+ dasd_global_profile_level = DASD_PROFILE_GLOBAL_ONLY;
+ }
+ } else if (strncmp(str, "off", 3) == 0) {
+ if (prof == &dasd_global_profile)
+ dasd_global_profile_level = DASD_PROFILE_OFF;
+ dasd_profile_off(prof);
+ } else
+ rc = -EINVAL;
+out:
+ vfree(buffer);
+ return rc;
+}
+
+static void dasd_stats_array(struct seq_file *m, unsigned int *array)
+{
+ int i;
+
+ for (i = 0; i < 32; i++)
+ seq_printf(m, "%u ", array[i]);
+ seq_putc(m, '\n');
+}
+
+static void dasd_stats_seq_print(struct seq_file *m,
+ struct dasd_profile_info *data)
+{
+ seq_printf(m, "start_time %lld.%09ld\n",
+ (s64)data->starttod.tv_sec, data->starttod.tv_nsec);
+ seq_printf(m, "total_requests %u\n", data->dasd_io_reqs);
+ seq_printf(m, "total_sectors %u\n", data->dasd_io_sects);
+ seq_printf(m, "total_pav %u\n", data->dasd_io_alias);
+ seq_printf(m, "total_hpf %u\n", data->dasd_io_tpm);
+ seq_printf(m, "avg_total %lu\n", data->dasd_io_reqs ?
+ data->dasd_sum_times / data->dasd_io_reqs : 0UL);
+ seq_printf(m, "avg_build_to_ssch %lu\n", data->dasd_io_reqs ?
+ data->dasd_sum_time_str / data->dasd_io_reqs : 0UL);
+ seq_printf(m, "avg_ssch_to_irq %lu\n", data->dasd_io_reqs ?
+ data->dasd_sum_time_irq / data->dasd_io_reqs : 0UL);
+ seq_printf(m, "avg_irq_to_end %lu\n", data->dasd_io_reqs ?
+ data->dasd_sum_time_end / data->dasd_io_reqs : 0UL);
+ seq_puts(m, "histogram_sectors ");
+ dasd_stats_array(m, data->dasd_io_secs);
+ seq_puts(m, "histogram_io_times ");
+ dasd_stats_array(m, data->dasd_io_times);
+ seq_puts(m, "histogram_io_times_weighted ");
+ dasd_stats_array(m, data->dasd_io_timps);
+ seq_puts(m, "histogram_time_build_to_ssch ");
+ dasd_stats_array(m, data->dasd_io_time1);
+ seq_puts(m, "histogram_time_ssch_to_irq ");
+ dasd_stats_array(m, data->dasd_io_time2);
+ seq_puts(m, "histogram_time_ssch_to_irq_weighted ");
+ dasd_stats_array(m, data->dasd_io_time2ps);
+ seq_puts(m, "histogram_time_irq_to_end ");
+ dasd_stats_array(m, data->dasd_io_time3);
+ seq_puts(m, "histogram_ccw_queue_length ");
+ dasd_stats_array(m, data->dasd_io_nr_req);
+ seq_printf(m, "total_read_requests %u\n", data->dasd_read_reqs);
+ seq_printf(m, "total_read_sectors %u\n", data->dasd_read_sects);
+ seq_printf(m, "total_read_pav %u\n", data->dasd_read_alias);
+ seq_printf(m, "total_read_hpf %u\n", data->dasd_read_tpm);
+ seq_puts(m, "histogram_read_sectors ");
+ dasd_stats_array(m, data->dasd_read_secs);
+ seq_puts(m, "histogram_read_times ");
+ dasd_stats_array(m, data->dasd_read_times);
+ seq_puts(m, "histogram_read_time_build_to_ssch ");
+ dasd_stats_array(m, data->dasd_read_time1);
+ seq_puts(m, "histogram_read_time_ssch_to_irq ");
+ dasd_stats_array(m, data->dasd_read_time2);
+ seq_puts(m, "histogram_read_time_irq_to_end ");
+ dasd_stats_array(m, data->dasd_read_time3);
+ seq_puts(m, "histogram_read_ccw_queue_length ");
+ dasd_stats_array(m, data->dasd_read_nr_req);
+}
+
+static int dasd_stats_show(struct seq_file *m, void *v)
+{
+ struct dasd_profile *profile;
+ struct dasd_profile_info *data;
+
+ profile = m->private;
+ spin_lock_bh(&profile->lock);
+ data = profile->data;
+ if (!data) {
+ spin_unlock_bh(&profile->lock);
+ seq_puts(m, "disabled\n");
+ return 0;
+ }
+ dasd_stats_seq_print(m, data);
+ spin_unlock_bh(&profile->lock);
+ return 0;
+}
+
+static int dasd_stats_open(struct inode *inode, struct file *file)
+{
+ struct dasd_profile *profile = inode->i_private;
+ return single_open(file, dasd_stats_show, profile);
+}
+
+static const struct file_operations dasd_stats_raw_fops = {
+ .owner = THIS_MODULE,
+ .open = dasd_stats_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .write = dasd_stats_write,
+};
+
+static void dasd_profile_init(struct dasd_profile *profile,
+ struct dentry *base_dentry)
+{
+ umode_t mode;
+ struct dentry *pde;
+
+ if (!base_dentry)
+ return;
+ profile->dentry = NULL;
+ profile->data = NULL;
+ mode = (S_IRUSR | S_IWUSR | S_IFREG);
+ pde = debugfs_create_file("statistics", mode, base_dentry,
+ profile, &dasd_stats_raw_fops);
+ if (pde && !IS_ERR(pde))
+ profile->dentry = pde;
+ return;
+}
+
+static void dasd_profile_exit(struct dasd_profile *profile)
+{
+ dasd_profile_off(profile);
+ debugfs_remove(profile->dentry);
+ profile->dentry = NULL;
+}
+
+static void dasd_statistics_removeroot(void)
+{
+ dasd_global_profile_level = DASD_PROFILE_OFF;
+ dasd_profile_exit(&dasd_global_profile);
+ debugfs_remove(dasd_debugfs_global_entry);
+ debugfs_remove(dasd_debugfs_root_entry);
+}
+
+static void dasd_statistics_createroot(void)
+{
+ struct dentry *pde;
+
+ dasd_debugfs_root_entry = NULL;
+ pde = debugfs_create_dir("dasd", NULL);
+ if (!pde || IS_ERR(pde))
+ goto error;
+ dasd_debugfs_root_entry = pde;
+ pde = debugfs_create_dir("global", dasd_debugfs_root_entry);
+ if (!pde || IS_ERR(pde))
+ goto error;
+ dasd_debugfs_global_entry = pde;
+ dasd_profile_init(&dasd_global_profile, dasd_debugfs_global_entry);
+ return;
+
+error:
+ DBF_EVENT(DBF_ERR, "%s",
+ "Creation of the dasd debugfs interface failed");
+ dasd_statistics_removeroot();
+ return;
+}
+
+#else
+#define dasd_profile_start(block, cqr, req) do {} while (0)
+#define dasd_profile_end(block, cqr, req) do {} while (0)
+
+static void dasd_statistics_createroot(void)
+{
+ return;
+}
+
+static void dasd_statistics_removeroot(void)
+{
+ return;
+}
+
+int dasd_stats_generic_show(struct seq_file *m, void *v)
+{
+ seq_puts(m, "Statistics are not activated in this kernel\n");
+ return 0;
+}
+
+static void dasd_profile_init(struct dasd_profile *profile,
+ struct dentry *base_dentry)
+{
+ return;
+}
+
+static void dasd_profile_exit(struct dasd_profile *profile)
+{
+ return;
+}
+
+int dasd_profile_on(struct dasd_profile *profile)
+{
+ return 0;
+}
+
+#endif /* CONFIG_DASD_PROFILE */
+
+static int dasd_hosts_show(struct seq_file *m, void *v)
+{
+ struct dasd_device *device;
+ int rc = -EOPNOTSUPP;
+
+ device = m->private;
+ dasd_get_device(device);
+
+ if (device->discipline->hosts_print)
+ rc = device->discipline->hosts_print(device, m);
+
+ dasd_put_device(device);
+ return rc;
+}
+
+DEFINE_SHOW_ATTRIBUTE(dasd_hosts);
+
+static void dasd_hosts_exit(struct dasd_device *device)
+{
+ debugfs_remove(device->hosts_dentry);
+ device->hosts_dentry = NULL;
+}
+
+static void dasd_hosts_init(struct dentry *base_dentry,
+ struct dasd_device *device)
+{
+ struct dentry *pde;
+ umode_t mode;
+
+ if (!base_dentry)
+ return;
+
+ mode = S_IRUSR | S_IFREG;
+ pde = debugfs_create_file("host_access_list", mode, base_dentry,
+ device, &dasd_hosts_fops);
+ if (pde && !IS_ERR(pde))
+ device->hosts_dentry = pde;
+}
+
+struct dasd_ccw_req *dasd_smalloc_request(int magic, int cplength, int datasize,
+ struct dasd_device *device,
+ struct dasd_ccw_req *cqr)
+{
+ unsigned long flags;
+ char *data, *chunk;
+ int size = 0;
+
+ if (cplength > 0)
+ size += cplength * sizeof(struct ccw1);
+ if (datasize > 0)
+ size += datasize;
+ if (!cqr)
+ size += (sizeof(*cqr) + 7L) & -8L;
+
+ spin_lock_irqsave(&device->mem_lock, flags);
+ data = chunk = dasd_alloc_chunk(&device->ccw_chunks, size);
+ spin_unlock_irqrestore(&device->mem_lock, flags);
+ if (!chunk)
+ return ERR_PTR(-ENOMEM);
+ if (!cqr) {
+ cqr = (void *) data;
+ data += (sizeof(*cqr) + 7L) & -8L;
+ }
+ memset(cqr, 0, sizeof(*cqr));
+ cqr->mem_chunk = chunk;
+ if (cplength > 0) {
+ cqr->cpaddr = data;
+ data += cplength * sizeof(struct ccw1);
+ memset(cqr->cpaddr, 0, cplength * sizeof(struct ccw1));
+ }
+ if (datasize > 0) {
+ cqr->data = data;
+ memset(cqr->data, 0, datasize);
+ }
+ cqr->magic = magic;
+ set_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
+ dasd_get_device(device);
+ return cqr;
+}
+EXPORT_SYMBOL(dasd_smalloc_request);
+
+struct dasd_ccw_req *dasd_fmalloc_request(int magic, int cplength,
+ int datasize,
+ struct dasd_device *device)
+{
+ struct dasd_ccw_req *cqr;
+ unsigned long flags;
+ int size, cqr_size;
+ char *data;
+
+ cqr_size = (sizeof(*cqr) + 7L) & -8L;
+ size = cqr_size;
+ if (cplength > 0)
+ size += cplength * sizeof(struct ccw1);
+ if (datasize > 0)
+ size += datasize;
+
+ spin_lock_irqsave(&device->mem_lock, flags);
+ cqr = dasd_alloc_chunk(&device->ese_chunks, size);
+ spin_unlock_irqrestore(&device->mem_lock, flags);
+ if (!cqr)
+ return ERR_PTR(-ENOMEM);
+ memset(cqr, 0, sizeof(*cqr));
+ data = (char *)cqr + cqr_size;
+ cqr->cpaddr = NULL;
+ if (cplength > 0) {
+ cqr->cpaddr = data;
+ data += cplength * sizeof(struct ccw1);
+ memset(cqr->cpaddr, 0, cplength * sizeof(struct ccw1));
+ }
+ cqr->data = NULL;
+ if (datasize > 0) {
+ cqr->data = data;
+ memset(cqr->data, 0, datasize);
+ }
+
+ cqr->magic = magic;
+ set_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
+ dasd_get_device(device);
+
+ return cqr;
+}
+EXPORT_SYMBOL(dasd_fmalloc_request);
+
+void dasd_sfree_request(struct dasd_ccw_req *cqr, struct dasd_device *device)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&device->mem_lock, flags);
+ dasd_free_chunk(&device->ccw_chunks, cqr->mem_chunk);
+ spin_unlock_irqrestore(&device->mem_lock, flags);
+ dasd_put_device(device);
+}
+EXPORT_SYMBOL(dasd_sfree_request);
+
+void dasd_ffree_request(struct dasd_ccw_req *cqr, struct dasd_device *device)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&device->mem_lock, flags);
+ dasd_free_chunk(&device->ese_chunks, cqr);
+ spin_unlock_irqrestore(&device->mem_lock, flags);
+ dasd_put_device(device);
+}
+EXPORT_SYMBOL(dasd_ffree_request);
+
+/*
+ * Check discipline magic in cqr.
+ */
+static inline int dasd_check_cqr(struct dasd_ccw_req *cqr)
+{
+ struct dasd_device *device;
+
+ if (cqr == NULL)
+ return -EINVAL;
+ device = cqr->startdev;
+ if (strncmp((char *) &cqr->magic, device->discipline->ebcname, 4)) {
+ DBF_DEV_EVENT(DBF_WARNING, device,
+ " dasd_ccw_req 0x%08x magic doesn't match"
+ " discipline 0x%08x",
+ cqr->magic,
+ *(unsigned int *) device->discipline->name);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+/*
+ * Terminate the current i/o and set the request to clear_pending.
+ * Timer keeps device runnig.
+ * ccw_device_clear can fail if the i/o subsystem
+ * is in a bad mood.
+ */
+int dasd_term_IO(struct dasd_ccw_req *cqr)
+{
+ struct dasd_device *device;
+ int retries, rc;
+ char errorstring[ERRORLENGTH];
+
+ /* Check the cqr */
+ rc = dasd_check_cqr(cqr);
+ if (rc)
+ return rc;
+ retries = 0;
+ device = (struct dasd_device *) cqr->startdev;
+ while ((retries < 5) && (cqr->status == DASD_CQR_IN_IO)) {
+ rc = ccw_device_clear(device->cdev, (long) cqr);
+ switch (rc) {
+ case 0: /* termination successful */
+ cqr->status = DASD_CQR_CLEAR_PENDING;
+ cqr->stopclk = get_tod_clock();
+ cqr->starttime = 0;
+ DBF_DEV_EVENT(DBF_DEBUG, device,
+ "terminate cqr %p successful",
+ cqr);
+ break;
+ case -ENODEV:
+ DBF_DEV_EVENT(DBF_ERR, device, "%s",
+ "device gone, retry");
+ break;
+ case -EINVAL:
+ /*
+ * device not valid so no I/O could be running
+ * handle CQR as termination successful
+ */
+ cqr->status = DASD_CQR_CLEARED;
+ cqr->stopclk = get_tod_clock();
+ cqr->starttime = 0;
+ /* no retries for invalid devices */
+ cqr->retries = -1;
+ DBF_DEV_EVENT(DBF_ERR, device, "%s",
+ "EINVAL, handle as terminated");
+ /* fake rc to success */
+ rc = 0;
+ break;
+ default:
+ /* internal error 10 - unknown rc*/
+ snprintf(errorstring, ERRORLENGTH, "10 %d", rc);
+ dev_err(&device->cdev->dev, "An error occurred in the "
+ "DASD device driver, reason=%s\n", errorstring);
+ BUG();
+ break;
+ }
+ retries++;
+ }
+ dasd_schedule_device_bh(device);
+ return rc;
+}
+EXPORT_SYMBOL(dasd_term_IO);
+
+/*
+ * Start the i/o. This start_IO can fail if the channel is really busy.
+ * In that case set up a timer to start the request later.
+ */
+int dasd_start_IO(struct dasd_ccw_req *cqr)
+{
+ struct dasd_device *device;
+ int rc;
+ char errorstring[ERRORLENGTH];
+
+ /* Check the cqr */
+ rc = dasd_check_cqr(cqr);
+ if (rc) {
+ cqr->intrc = rc;
+ return rc;
+ }
+ device = (struct dasd_device *) cqr->startdev;
+ if (((cqr->block &&
+ test_bit(DASD_FLAG_LOCK_STOLEN, &cqr->block->base->flags)) ||
+ test_bit(DASD_FLAG_LOCK_STOLEN, &device->flags)) &&
+ !test_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags)) {
+ DBF_DEV_EVENT(DBF_DEBUG, device, "start_IO: return request %p "
+ "because of stolen lock", cqr);
+ cqr->status = DASD_CQR_ERROR;
+ cqr->intrc = -EPERM;
+ return -EPERM;
+ }
+ if (cqr->retries < 0) {
+ /* internal error 14 - start_IO run out of retries */
+ sprintf(errorstring, "14 %p", cqr);
+ dev_err(&device->cdev->dev, "An error occurred in the DASD "
+ "device driver, reason=%s\n", errorstring);
+ cqr->status = DASD_CQR_ERROR;
+ return -EIO;
+ }
+ cqr->startclk = get_tod_clock();
+ cqr->starttime = jiffies;
+ cqr->retries--;
+ if (!test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags)) {
+ cqr->lpm &= dasd_path_get_opm(device);
+ if (!cqr->lpm)
+ cqr->lpm = dasd_path_get_opm(device);
+ }
+ /*
+ * remember the amount of formatted tracks to prevent double format on
+ * ESE devices
+ */
+ if (cqr->block)
+ cqr->trkcount = atomic_read(&cqr->block->trkcount);
+
+ if (cqr->cpmode == 1) {
+ rc = ccw_device_tm_start(device->cdev, cqr->cpaddr,
+ (long) cqr, cqr->lpm);
+ } else {
+ rc = ccw_device_start(device->cdev, cqr->cpaddr,
+ (long) cqr, cqr->lpm, 0);
+ }
+ switch (rc) {
+ case 0:
+ cqr->status = DASD_CQR_IN_IO;
+ break;
+ case -EBUSY:
+ DBF_DEV_EVENT(DBF_WARNING, device, "%s",
+ "start_IO: device busy, retry later");
+ break;
+ case -EACCES:
+ /* -EACCES indicates that the request used only a subset of the
+ * available paths and all these paths are gone. If the lpm of
+ * this request was only a subset of the opm (e.g. the ppm) then
+ * we just do a retry with all available paths.
+ * If we already use the full opm, something is amiss, and we
+ * need a full path verification.
+ */
+ if (test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags)) {
+ DBF_DEV_EVENT(DBF_WARNING, device,
+ "start_IO: selected paths gone (%x)",
+ cqr->lpm);
+ } else if (cqr->lpm != dasd_path_get_opm(device)) {
+ cqr->lpm = dasd_path_get_opm(device);
+ DBF_DEV_EVENT(DBF_DEBUG, device, "%s",
+ "start_IO: selected paths gone,"
+ " retry on all paths");
+ } else {
+ DBF_DEV_EVENT(DBF_WARNING, device, "%s",
+ "start_IO: all paths in opm gone,"
+ " do path verification");
+ dasd_generic_last_path_gone(device);
+ dasd_path_no_path(device);
+ dasd_path_set_tbvpm(device,
+ ccw_device_get_path_mask(
+ device->cdev));
+ }
+ break;
+ case -ENODEV:
+ DBF_DEV_EVENT(DBF_WARNING, device, "%s",
+ "start_IO: -ENODEV device gone, retry");
+ /* this is equivalent to CC=3 for SSCH report this to EER */
+ dasd_handle_autoquiesce(device, cqr, DASD_EER_STARTIO);
+ break;
+ case -EIO:
+ DBF_DEV_EVENT(DBF_WARNING, device, "%s",
+ "start_IO: -EIO device gone, retry");
+ break;
+ case -EINVAL:
+ DBF_DEV_EVENT(DBF_WARNING, device, "%s",
+ "start_IO: -EINVAL device currently "
+ "not accessible");
+ break;
+ default:
+ /* internal error 11 - unknown rc */
+ snprintf(errorstring, ERRORLENGTH, "11 %d", rc);
+ dev_err(&device->cdev->dev,
+ "An error occurred in the DASD device driver, "
+ "reason=%s\n", errorstring);
+ BUG();
+ break;
+ }
+ cqr->intrc = rc;
+ return rc;
+}
+EXPORT_SYMBOL(dasd_start_IO);
+
+/*
+ * Timeout function for dasd devices. This is used for different purposes
+ * 1) missing interrupt handler for normal operation
+ * 2) delayed start of request where start_IO failed with -EBUSY
+ * 3) timeout for missing state change interrupts
+ * The head of the ccw queue will have status DASD_CQR_IN_IO for 1),
+ * DASD_CQR_QUEUED for 2) and 3).
+ */
+static void dasd_device_timeout(struct timer_list *t)
+{
+ unsigned long flags;
+ struct dasd_device *device;
+
+ device = from_timer(device, t, timer);
+ spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
+ /* re-activate request queue */
+ dasd_device_remove_stop_bits(device, DASD_STOPPED_PENDING);
+ spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
+ dasd_schedule_device_bh(device);
+}
+
+/*
+ * Setup timeout for a device in jiffies.
+ */
+void dasd_device_set_timer(struct dasd_device *device, int expires)
+{
+ if (expires == 0)
+ del_timer(&device->timer);
+ else
+ mod_timer(&device->timer, jiffies + expires);
+}
+EXPORT_SYMBOL(dasd_device_set_timer);
+
+/*
+ * Clear timeout for a device.
+ */
+void dasd_device_clear_timer(struct dasd_device *device)
+{
+ del_timer(&device->timer);
+}
+EXPORT_SYMBOL(dasd_device_clear_timer);
+
+static void dasd_handle_killed_request(struct ccw_device *cdev,
+ unsigned long intparm)
+{
+ struct dasd_ccw_req *cqr;
+ struct dasd_device *device;
+
+ if (!intparm)
+ return;
+ cqr = (struct dasd_ccw_req *) intparm;
+ if (cqr->status != DASD_CQR_IN_IO) {
+ DBF_EVENT_DEVID(DBF_DEBUG, cdev,
+ "invalid status in handle_killed_request: "
+ "%02x", cqr->status);
+ return;
+ }
+
+ device = dasd_device_from_cdev_locked(cdev);
+ if (IS_ERR(device)) {
+ DBF_EVENT_DEVID(DBF_DEBUG, cdev, "%s",
+ "unable to get device from cdev");
+ return;
+ }
+
+ if (!cqr->startdev ||
+ device != cqr->startdev ||
+ strncmp(cqr->startdev->discipline->ebcname,
+ (char *) &cqr->magic, 4)) {
+ DBF_EVENT_DEVID(DBF_DEBUG, cdev, "%s",
+ "invalid device in request");
+ dasd_put_device(device);
+ return;
+ }
+
+ /* Schedule request to be retried. */
+ cqr->status = DASD_CQR_QUEUED;
+
+ dasd_device_clear_timer(device);
+ dasd_schedule_device_bh(device);
+ dasd_put_device(device);
+}
+
+void dasd_generic_handle_state_change(struct dasd_device *device)
+{
+ /* First of all start sense subsystem status request. */
+ dasd_eer_snss(device);
+
+ dasd_device_remove_stop_bits(device, DASD_STOPPED_PENDING);
+ dasd_schedule_device_bh(device);
+ if (device->block) {
+ dasd_schedule_block_bh(device->block);
+ if (device->block->gdp)
+ blk_mq_run_hw_queues(device->block->gdp->queue, true);
+ }
+}
+EXPORT_SYMBOL_GPL(dasd_generic_handle_state_change);
+
+static int dasd_check_hpf_error(struct irb *irb)
+{
+ return (scsw_tm_is_valid_schxs(&irb->scsw) &&
+ (irb->scsw.tm.sesq == SCSW_SESQ_DEV_NOFCX ||
+ irb->scsw.tm.sesq == SCSW_SESQ_PATH_NOFCX));
+}
+
+static int dasd_ese_needs_format(struct dasd_block *block, struct irb *irb)
+{
+ struct dasd_device *device = NULL;
+ u8 *sense = NULL;
+
+ if (!block)
+ return 0;
+ device = block->base;
+ if (!device || !device->discipline->is_ese)
+ return 0;
+ if (!device->discipline->is_ese(device))
+ return 0;
+
+ sense = dasd_get_sense(irb);
+ if (!sense)
+ return 0;
+
+ return !!(sense[1] & SNS1_NO_REC_FOUND) ||
+ !!(sense[1] & SNS1_FILE_PROTECTED) ||
+ scsw_cstat(&irb->scsw) == SCHN_STAT_INCORR_LEN;
+}
+
+static int dasd_ese_oos_cond(u8 *sense)
+{
+ return sense[0] & SNS0_EQUIPMENT_CHECK &&
+ sense[1] & SNS1_PERM_ERR &&
+ sense[1] & SNS1_WRITE_INHIBITED &&
+ sense[25] == 0x01;
+}
+
+/*
+ * Interrupt handler for "normal" ssch-io based dasd devices.
+ */
+void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm,
+ struct irb *irb)
+{
+ struct dasd_ccw_req *cqr, *next, *fcqr;
+ struct dasd_device *device;
+ unsigned long now;
+ int nrf_suppressed = 0;
+ int fp_suppressed = 0;
+ struct request *req;
+ u8 *sense = NULL;
+ int expires;
+
+ cqr = (struct dasd_ccw_req *) intparm;
+ if (IS_ERR(irb)) {
+ switch (PTR_ERR(irb)) {
+ case -EIO:
+ if (cqr && cqr->status == DASD_CQR_CLEAR_PENDING) {
+ device = cqr->startdev;
+ cqr->status = DASD_CQR_CLEARED;
+ dasd_device_clear_timer(device);
+ wake_up(&dasd_flush_wq);
+ dasd_schedule_device_bh(device);
+ return;
+ }
+ break;
+ case -ETIMEDOUT:
+ DBF_EVENT_DEVID(DBF_WARNING, cdev, "%s: "
+ "request timed out\n", __func__);
+ break;
+ default:
+ DBF_EVENT_DEVID(DBF_WARNING, cdev, "%s: "
+ "unknown error %ld\n", __func__,
+ PTR_ERR(irb));
+ }
+ dasd_handle_killed_request(cdev, intparm);
+ return;
+ }
+
+ now = get_tod_clock();
+ /* check for conditions that should be handled immediately */
+ if (!cqr ||
+ !(scsw_dstat(&irb->scsw) == (DEV_STAT_CHN_END | DEV_STAT_DEV_END) &&
+ scsw_cstat(&irb->scsw) == 0)) {
+ if (cqr)
+ memcpy(&cqr->irb, irb, sizeof(*irb));
+ device = dasd_device_from_cdev_locked(cdev);
+ if (IS_ERR(device))
+ return;
+ /* ignore unsolicited interrupts for DIAG discipline */
+ if (device->discipline == dasd_diag_discipline_pointer) {
+ dasd_put_device(device);
+ return;
+ }
+
+ /*
+ * In some cases 'File Protected' or 'No Record Found' errors
+ * might be expected and debug log messages for the
+ * corresponding interrupts shouldn't be written then.
+ * Check if either of the according suppress bits is set.
+ */
+ sense = dasd_get_sense(irb);
+ if (sense) {
+ fp_suppressed = (sense[1] & SNS1_FILE_PROTECTED) &&
+ test_bit(DASD_CQR_SUPPRESS_FP, &cqr->flags);
+ nrf_suppressed = (sense[1] & SNS1_NO_REC_FOUND) &&
+ test_bit(DASD_CQR_SUPPRESS_NRF, &cqr->flags);
+
+ /*
+ * Extent pool probably out-of-space.
+ * Stop device and check exhaust level.
+ */
+ if (dasd_ese_oos_cond(sense)) {
+ dasd_generic_space_exhaust(device, cqr);
+ device->discipline->ext_pool_exhaust(device, cqr);
+ dasd_put_device(device);
+ return;
+ }
+ }
+ if (!(fp_suppressed || nrf_suppressed))
+ device->discipline->dump_sense_dbf(device, irb, "int");
+
+ if (device->features & DASD_FEATURE_ERPLOG)
+ device->discipline->dump_sense(device, cqr, irb);
+ device->discipline->check_for_device_change(device, cqr, irb);
+ dasd_put_device(device);
+ }
+
+ /* check for attention message */
+ if (scsw_dstat(&irb->scsw) & DEV_STAT_ATTENTION) {
+ device = dasd_device_from_cdev_locked(cdev);
+ if (!IS_ERR(device)) {
+ device->discipline->check_attention(device,
+ irb->esw.esw1.lpum);
+ dasd_put_device(device);
+ }
+ }
+
+ if (!cqr)
+ return;
+
+ device = (struct dasd_device *) cqr->startdev;
+ if (!device ||
+ strncmp(device->discipline->ebcname, (char *) &cqr->magic, 4)) {
+ DBF_EVENT_DEVID(DBF_DEBUG, cdev, "%s",
+ "invalid device in request");
+ return;
+ }
+
+ if (dasd_ese_needs_format(cqr->block, irb)) {
+ req = dasd_get_callback_data(cqr);
+ if (!req) {
+ cqr->status = DASD_CQR_ERROR;
+ return;
+ }
+ if (rq_data_dir(req) == READ) {
+ device->discipline->ese_read(cqr, irb);
+ cqr->status = DASD_CQR_SUCCESS;
+ cqr->stopclk = now;
+ dasd_device_clear_timer(device);
+ dasd_schedule_device_bh(device);
+ return;
+ }
+ fcqr = device->discipline->ese_format(device, cqr, irb);
+ if (IS_ERR(fcqr)) {
+ if (PTR_ERR(fcqr) == -EINVAL) {
+ cqr->status = DASD_CQR_ERROR;
+ return;
+ }
+ /*
+ * If we can't format now, let the request go
+ * one extra round. Maybe we can format later.
+ */
+ cqr->status = DASD_CQR_QUEUED;
+ dasd_schedule_device_bh(device);
+ return;
+ } else {
+ fcqr->status = DASD_CQR_QUEUED;
+ cqr->status = DASD_CQR_QUEUED;
+ list_add(&fcqr->devlist, &device->ccw_queue);
+ dasd_schedule_device_bh(device);
+ return;
+ }
+ }
+
+ /* Check for clear pending */
+ if (cqr->status == DASD_CQR_CLEAR_PENDING &&
+ scsw_fctl(&irb->scsw) & SCSW_FCTL_CLEAR_FUNC) {
+ cqr->status = DASD_CQR_CLEARED;
+ dasd_device_clear_timer(device);
+ wake_up(&dasd_flush_wq);
+ dasd_schedule_device_bh(device);
+ return;
+ }
+
+ /* check status - the request might have been killed by dyn detach */
+ if (cqr->status != DASD_CQR_IN_IO) {
+ DBF_DEV_EVENT(DBF_DEBUG, device, "invalid status: bus_id %s, "
+ "status %02x", dev_name(&cdev->dev), cqr->status);
+ return;
+ }
+
+ next = NULL;
+ expires = 0;
+ if (scsw_dstat(&irb->scsw) == (DEV_STAT_CHN_END | DEV_STAT_DEV_END) &&
+ scsw_cstat(&irb->scsw) == 0) {
+ /* request was completed successfully */
+ cqr->status = DASD_CQR_SUCCESS;
+ cqr->stopclk = now;
+ /* Start first request on queue if possible -> fast_io. */
+ if (cqr->devlist.next != &device->ccw_queue) {
+ next = list_entry(cqr->devlist.next,
+ struct dasd_ccw_req, devlist);
+ }
+ } else { /* error */
+ /* check for HPF error
+ * call discipline function to requeue all requests
+ * and disable HPF accordingly
+ */
+ if (cqr->cpmode && dasd_check_hpf_error(irb) &&
+ device->discipline->handle_hpf_error)
+ device->discipline->handle_hpf_error(device, irb);
+ /*
+ * If we don't want complex ERP for this request, then just
+ * reset this and retry it in the fastpath
+ */
+ if (!test_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags) &&
+ cqr->retries > 0) {
+ if (cqr->lpm == dasd_path_get_opm(device))
+ DBF_DEV_EVENT(DBF_DEBUG, device,
+ "default ERP in fastpath "
+ "(%i retries left)",
+ cqr->retries);
+ if (!test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags))
+ cqr->lpm = dasd_path_get_opm(device);
+ cqr->status = DASD_CQR_QUEUED;
+ next = cqr;
+ } else
+ cqr->status = DASD_CQR_ERROR;
+ }
+ if (next && (next->status == DASD_CQR_QUEUED) &&
+ (!device->stopped)) {
+ if (device->discipline->start_IO(next) == 0)
+ expires = next->expires;
+ }
+ if (expires != 0)
+ dasd_device_set_timer(device, expires);
+ else
+ dasd_device_clear_timer(device);
+ dasd_schedule_device_bh(device);
+}
+EXPORT_SYMBOL(dasd_int_handler);
+
+enum uc_todo dasd_generic_uc_handler(struct ccw_device *cdev, struct irb *irb)
+{
+ struct dasd_device *device;
+
+ device = dasd_device_from_cdev_locked(cdev);
+
+ if (IS_ERR(device))
+ goto out;
+ if (test_bit(DASD_FLAG_OFFLINE, &device->flags) ||
+ device->state != device->target ||
+ !device->discipline->check_for_device_change){
+ dasd_put_device(device);
+ goto out;
+ }
+ if (device->discipline->dump_sense_dbf)
+ device->discipline->dump_sense_dbf(device, irb, "uc");
+ device->discipline->check_for_device_change(device, NULL, irb);
+ dasd_put_device(device);
+out:
+ return UC_TODO_RETRY;
+}
+EXPORT_SYMBOL_GPL(dasd_generic_uc_handler);
+
+/*
+ * If we have an error on a dasd_block layer request then we cancel
+ * and return all further requests from the same dasd_block as well.
+ */
+static void __dasd_device_recovery(struct dasd_device *device,
+ struct dasd_ccw_req *ref_cqr)
+{
+ struct list_head *l, *n;
+ struct dasd_ccw_req *cqr;
+
+ /*
+ * only requeue request that came from the dasd_block layer
+ */
+ if (!ref_cqr->block)
+ return;
+
+ list_for_each_safe(l, n, &device->ccw_queue) {
+ cqr = list_entry(l, struct dasd_ccw_req, devlist);
+ if (cqr->status == DASD_CQR_QUEUED &&
+ ref_cqr->block == cqr->block) {
+ cqr->status = DASD_CQR_CLEARED;
+ }
+ }
+};
+
+/*
+ * Remove those ccw requests from the queue that need to be returned
+ * to the upper layer.
+ */
+static void __dasd_device_process_ccw_queue(struct dasd_device *device,
+ struct list_head *final_queue)
+{
+ struct list_head *l, *n;
+ struct dasd_ccw_req *cqr;
+
+ /* Process request with final status. */
+ list_for_each_safe(l, n, &device->ccw_queue) {
+ cqr = list_entry(l, struct dasd_ccw_req, devlist);
+
+ /* Skip any non-final request. */
+ if (cqr->status == DASD_CQR_QUEUED ||
+ cqr->status == DASD_CQR_IN_IO ||
+ cqr->status == DASD_CQR_CLEAR_PENDING)
+ continue;
+ if (cqr->status == DASD_CQR_ERROR) {
+ __dasd_device_recovery(device, cqr);
+ }
+ /* Rechain finished requests to final queue */
+ list_move_tail(&cqr->devlist, final_queue);
+ }
+}
+
+static void __dasd_process_cqr(struct dasd_device *device,
+ struct dasd_ccw_req *cqr)
+{
+ char errorstring[ERRORLENGTH];
+
+ switch (cqr->status) {
+ case DASD_CQR_SUCCESS:
+ cqr->status = DASD_CQR_DONE;
+ break;
+ case DASD_CQR_ERROR:
+ cqr->status = DASD_CQR_NEED_ERP;
+ break;
+ case DASD_CQR_CLEARED:
+ cqr->status = DASD_CQR_TERMINATED;
+ break;
+ default:
+ /* internal error 12 - wrong cqr status*/
+ snprintf(errorstring, ERRORLENGTH, "12 %p %x02", cqr, cqr->status);
+ dev_err(&device->cdev->dev,
+ "An error occurred in the DASD device driver, "
+ "reason=%s\n", errorstring);
+ BUG();
+ }
+ if (cqr->callback)
+ cqr->callback(cqr, cqr->callback_data);
+}
+
+/*
+ * the cqrs from the final queue are returned to the upper layer
+ * by setting a dasd_block state and calling the callback function
+ */
+static void __dasd_device_process_final_queue(struct dasd_device *device,
+ struct list_head *final_queue)
+{
+ struct list_head *l, *n;
+ struct dasd_ccw_req *cqr;
+ struct dasd_block *block;
+
+ list_for_each_safe(l, n, final_queue) {
+ cqr = list_entry(l, struct dasd_ccw_req, devlist);
+ list_del_init(&cqr->devlist);
+ block = cqr->block;
+ if (!block) {
+ __dasd_process_cqr(device, cqr);
+ } else {
+ spin_lock_bh(&block->queue_lock);
+ __dasd_process_cqr(device, cqr);
+ spin_unlock_bh(&block->queue_lock);
+ }
+ }
+}
+
+/*
+ * check if device should be autoquiesced due to too many timeouts
+ */
+static void __dasd_device_check_autoquiesce_timeout(struct dasd_device *device,
+ struct dasd_ccw_req *cqr)
+{
+ if ((device->default_retries - cqr->retries) >= device->aq_timeouts)
+ dasd_handle_autoquiesce(device, cqr, DASD_EER_TIMEOUTS);
+}
+
+/*
+ * Take a look at the first request on the ccw queue and check
+ * if it reached its expire time. If so, terminate the IO.
+ */
+static void __dasd_device_check_expire(struct dasd_device *device)
+{
+ struct dasd_ccw_req *cqr;
+
+ if (list_empty(&device->ccw_queue))
+ return;
+ cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, devlist);
+ if ((cqr->status == DASD_CQR_IN_IO && cqr->expires != 0) &&
+ (time_after_eq(jiffies, cqr->expires + cqr->starttime))) {
+ if (test_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) {
+ /*
+ * IO in safe offline processing should not
+ * run out of retries
+ */
+ cqr->retries++;
+ }
+ if (device->discipline->term_IO(cqr) != 0) {
+ /* Hmpf, try again in 5 sec */
+ dev_err(&device->cdev->dev,
+ "cqr %p timed out (%lus) but cannot be "
+ "ended, retrying in 5 s\n",
+ cqr, (cqr->expires/HZ));
+ cqr->expires += 5*HZ;
+ dasd_device_set_timer(device, 5*HZ);
+ } else {
+ dev_err(&device->cdev->dev,
+ "cqr %p timed out (%lus), %i retries "
+ "remaining\n", cqr, (cqr->expires/HZ),
+ cqr->retries);
+ }
+ __dasd_device_check_autoquiesce_timeout(device, cqr);
+ }
+}
+
+/*
+ * return 1 when device is not eligible for IO
+ */
+static int __dasd_device_is_unusable(struct dasd_device *device,
+ struct dasd_ccw_req *cqr)
+{
+ int mask = ~(DASD_STOPPED_DC_WAIT | DASD_STOPPED_NOSPC);
+
+ if (test_bit(DASD_FLAG_OFFLINE, &device->flags) &&
+ !test_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) {
+ /*
+ * dasd is being set offline
+ * but it is no safe offline where we have to allow I/O
+ */
+ return 1;
+ }
+ if (device->stopped) {
+ if (device->stopped & mask) {
+ /* stopped and CQR will not change that. */
+ return 1;
+ }
+ if (!test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags)) {
+ /* CQR is not able to change device to
+ * operational. */
+ return 1;
+ }
+ /* CQR required to get device operational. */
+ }
+ return 0;
+}
+
+/*
+ * Take a look at the first request on the ccw queue and check
+ * if it needs to be started.
+ */
+static void __dasd_device_start_head(struct dasd_device *device)
+{
+ struct dasd_ccw_req *cqr;
+ int rc;
+
+ if (list_empty(&device->ccw_queue))
+ return;
+ cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, devlist);
+ if (cqr->status != DASD_CQR_QUEUED)
+ return;
+ /* if device is not usable return request to upper layer */
+ if (__dasd_device_is_unusable(device, cqr)) {
+ cqr->intrc = -EAGAIN;
+ cqr->status = DASD_CQR_CLEARED;
+ dasd_schedule_device_bh(device);
+ return;
+ }
+
+ rc = device->discipline->start_IO(cqr);
+ if (rc == 0)
+ dasd_device_set_timer(device, cqr->expires);
+ else if (rc == -EACCES) {
+ dasd_schedule_device_bh(device);
+ } else
+ /* Hmpf, try again in 1/2 sec */
+ dasd_device_set_timer(device, 50);
+}
+
+static void __dasd_device_check_path_events(struct dasd_device *device)
+{
+ __u8 tbvpm, fcsecpm;
+ int rc;
+
+ tbvpm = dasd_path_get_tbvpm(device);
+ fcsecpm = dasd_path_get_fcsecpm(device);
+
+ if (!tbvpm && !fcsecpm)
+ return;
+
+ if (device->stopped & ~(DASD_STOPPED_DC_WAIT))
+ return;
+
+ dasd_path_clear_all_verify(device);
+ dasd_path_clear_all_fcsec(device);
+
+ rc = device->discipline->pe_handler(device, tbvpm, fcsecpm);
+ if (rc) {
+ dasd_path_add_tbvpm(device, tbvpm);
+ dasd_path_add_fcsecpm(device, fcsecpm);
+ dasd_device_set_timer(device, 50);
+ }
+};
+
+/*
+ * Go through all request on the dasd_device request queue,
+ * terminate them on the cdev if necessary, and return them to the
+ * submitting layer via callback.
+ * Note:
+ * Make sure that all 'submitting layers' still exist when
+ * this function is called!. In other words, when 'device' is a base
+ * device then all block layer requests must have been removed before
+ * via dasd_flush_block_queue.
+ */
+int dasd_flush_device_queue(struct dasd_device *device)
+{
+ struct dasd_ccw_req *cqr, *n;
+ int rc;
+ struct list_head flush_queue;
+
+ INIT_LIST_HEAD(&flush_queue);
+ spin_lock_irq(get_ccwdev_lock(device->cdev));
+ rc = 0;
+ list_for_each_entry_safe(cqr, n, &device->ccw_queue, devlist) {
+ /* Check status and move request to flush_queue */
+ switch (cqr->status) {
+ case DASD_CQR_IN_IO:
+ rc = device->discipline->term_IO(cqr);
+ if (rc) {
+ /* unable to terminate requeust */
+ dev_err(&device->cdev->dev,
+ "Flushing the DASD request queue "
+ "failed for request %p\n", cqr);
+ /* stop flush processing */
+ goto finished;
+ }
+ break;
+ case DASD_CQR_QUEUED:
+ cqr->stopclk = get_tod_clock();
+ cqr->status = DASD_CQR_CLEARED;
+ break;
+ default: /* no need to modify the others */
+ break;
+ }
+ list_move_tail(&cqr->devlist, &flush_queue);
+ }
+finished:
+ spin_unlock_irq(get_ccwdev_lock(device->cdev));
+ /*
+ * After this point all requests must be in state CLEAR_PENDING,
+ * CLEARED, SUCCESS or ERROR. Now wait for CLEAR_PENDING to become
+ * one of the others.
+ */
+ list_for_each_entry_safe(cqr, n, &flush_queue, devlist)
+ wait_event(dasd_flush_wq,
+ (cqr->status != DASD_CQR_CLEAR_PENDING));
+ /*
+ * Now set each request back to TERMINATED, DONE or NEED_ERP
+ * and call the callback function of flushed requests
+ */
+ __dasd_device_process_final_queue(device, &flush_queue);
+ return rc;
+}
+EXPORT_SYMBOL_GPL(dasd_flush_device_queue);
+
+/*
+ * Acquire the device lock and process queues for the device.
+ */
+static void dasd_device_tasklet(unsigned long data)
+{
+ struct dasd_device *device = (struct dasd_device *) data;
+ struct list_head final_queue;
+
+ atomic_set (&device->tasklet_scheduled, 0);
+ INIT_LIST_HEAD(&final_queue);
+ spin_lock_irq(get_ccwdev_lock(device->cdev));
+ /* Check expire time of first request on the ccw queue. */
+ __dasd_device_check_expire(device);
+ /* find final requests on ccw queue */
+ __dasd_device_process_ccw_queue(device, &final_queue);
+ __dasd_device_check_path_events(device);
+ spin_unlock_irq(get_ccwdev_lock(device->cdev));
+ /* Now call the callback function of requests with final status */
+ __dasd_device_process_final_queue(device, &final_queue);
+ spin_lock_irq(get_ccwdev_lock(device->cdev));
+ /* Now check if the head of the ccw queue needs to be started. */
+ __dasd_device_start_head(device);
+ spin_unlock_irq(get_ccwdev_lock(device->cdev));
+ if (waitqueue_active(&shutdown_waitq))
+ wake_up(&shutdown_waitq);
+ dasd_put_device(device);
+}
+
+/*
+ * Schedules a call to dasd_tasklet over the device tasklet.
+ */
+void dasd_schedule_device_bh(struct dasd_device *device)
+{
+ /* Protect against rescheduling. */
+ if (atomic_cmpxchg (&device->tasklet_scheduled, 0, 1) != 0)
+ return;
+ dasd_get_device(device);
+ tasklet_hi_schedule(&device->tasklet);
+}
+EXPORT_SYMBOL(dasd_schedule_device_bh);
+
+void dasd_device_set_stop_bits(struct dasd_device *device, int bits)
+{
+ device->stopped |= bits;
+}
+EXPORT_SYMBOL_GPL(dasd_device_set_stop_bits);
+
+void dasd_device_remove_stop_bits(struct dasd_device *device, int bits)
+{
+ device->stopped &= ~bits;
+ if (!device->stopped)
+ wake_up(&generic_waitq);
+}
+EXPORT_SYMBOL_GPL(dasd_device_remove_stop_bits);
+
+/*
+ * Queue a request to the head of the device ccw_queue.
+ * Start the I/O if possible.
+ */
+void dasd_add_request_head(struct dasd_ccw_req *cqr)
+{
+ struct dasd_device *device;
+ unsigned long flags;
+
+ device = cqr->startdev;
+ spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
+ cqr->status = DASD_CQR_QUEUED;
+ list_add(&cqr->devlist, &device->ccw_queue);
+ /* let the bh start the request to keep them in order */
+ dasd_schedule_device_bh(device);
+ spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
+}
+EXPORT_SYMBOL(dasd_add_request_head);
+
+/*
+ * Queue a request to the tail of the device ccw_queue.
+ * Start the I/O if possible.
+ */
+void dasd_add_request_tail(struct dasd_ccw_req *cqr)
+{
+ struct dasd_device *device;
+ unsigned long flags;
+
+ device = cqr->startdev;
+ spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
+ cqr->status = DASD_CQR_QUEUED;
+ list_add_tail(&cqr->devlist, &device->ccw_queue);
+ /* let the bh start the request to keep them in order */
+ dasd_schedule_device_bh(device);
+ spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
+}
+EXPORT_SYMBOL(dasd_add_request_tail);
+
+/*
+ * Wakeup helper for the 'sleep_on' functions.
+ */
+void dasd_wakeup_cb(struct dasd_ccw_req *cqr, void *data)
+{
+ spin_lock_irq(get_ccwdev_lock(cqr->startdev->cdev));
+ cqr->callback_data = DASD_SLEEPON_END_TAG;
+ spin_unlock_irq(get_ccwdev_lock(cqr->startdev->cdev));
+ wake_up(&generic_waitq);
+}
+EXPORT_SYMBOL_GPL(dasd_wakeup_cb);
+
+static inline int _wait_for_wakeup(struct dasd_ccw_req *cqr)
+{
+ struct dasd_device *device;
+ int rc;
+
+ device = cqr->startdev;
+ spin_lock_irq(get_ccwdev_lock(device->cdev));
+ rc = (cqr->callback_data == DASD_SLEEPON_END_TAG);
+ spin_unlock_irq(get_ccwdev_lock(device->cdev));
+ return rc;
+}
+
+/*
+ * checks if error recovery is necessary, returns 1 if yes, 0 otherwise.
+ */
+static int __dasd_sleep_on_erp(struct dasd_ccw_req *cqr)
+{
+ struct dasd_device *device;
+ dasd_erp_fn_t erp_fn;
+
+ if (cqr->status == DASD_CQR_FILLED)
+ return 0;
+ device = cqr->startdev;
+ if (test_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags)) {
+ if (cqr->status == DASD_CQR_TERMINATED) {
+ device->discipline->handle_terminated_request(cqr);
+ return 1;
+ }
+ if (cqr->status == DASD_CQR_NEED_ERP) {
+ erp_fn = device->discipline->erp_action(cqr);
+ erp_fn(cqr);
+ return 1;
+ }
+ if (cqr->status == DASD_CQR_FAILED)
+ dasd_log_sense(cqr, &cqr->irb);
+ if (cqr->refers) {
+ __dasd_process_erp(device, cqr);
+ return 1;
+ }
+ }
+ return 0;
+}
+
+static int __dasd_sleep_on_loop_condition(struct dasd_ccw_req *cqr)
+{
+ if (test_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags)) {
+ if (cqr->refers) /* erp is not done yet */
+ return 1;
+ return ((cqr->status != DASD_CQR_DONE) &&
+ (cqr->status != DASD_CQR_FAILED));
+ } else
+ return (cqr->status == DASD_CQR_FILLED);
+}
+
+static int _dasd_sleep_on(struct dasd_ccw_req *maincqr, int interruptible)
+{
+ struct dasd_device *device;
+ int rc;
+ struct list_head ccw_queue;
+ struct dasd_ccw_req *cqr;
+
+ INIT_LIST_HEAD(&ccw_queue);
+ maincqr->status = DASD_CQR_FILLED;
+ device = maincqr->startdev;
+ list_add(&maincqr->blocklist, &ccw_queue);
+ for (cqr = maincqr; __dasd_sleep_on_loop_condition(cqr);
+ cqr = list_first_entry(&ccw_queue,
+ struct dasd_ccw_req, blocklist)) {
+
+ if (__dasd_sleep_on_erp(cqr))
+ continue;
+ if (cqr->status != DASD_CQR_FILLED) /* could be failed */
+ continue;
+ if (test_bit(DASD_FLAG_LOCK_STOLEN, &device->flags) &&
+ !test_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags)) {
+ cqr->status = DASD_CQR_FAILED;
+ cqr->intrc = -EPERM;
+ continue;
+ }
+ /* Non-temporary stop condition will trigger fail fast */
+ if (device->stopped & ~DASD_STOPPED_PENDING &&
+ test_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags) &&
+ !dasd_eer_enabled(device) && device->aq_mask == 0) {
+ cqr->status = DASD_CQR_FAILED;
+ cqr->intrc = -ENOLINK;
+ continue;
+ }
+ /*
+ * Don't try to start requests if device is in
+ * offline processing, it might wait forever
+ */
+ if (test_bit(DASD_FLAG_OFFLINE, &device->flags)) {
+ cqr->status = DASD_CQR_FAILED;
+ cqr->intrc = -ENODEV;
+ continue;
+ }
+ /*
+ * Don't try to start requests if device is stopped
+ * except path verification requests
+ */
+ if (!test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags)) {
+ if (interruptible) {
+ rc = wait_event_interruptible(
+ generic_waitq, !(device->stopped));
+ if (rc == -ERESTARTSYS) {
+ cqr->status = DASD_CQR_FAILED;
+ maincqr->intrc = rc;
+ continue;
+ }
+ } else
+ wait_event(generic_waitq, !(device->stopped));
+ }
+ if (!cqr->callback)
+ cqr->callback = dasd_wakeup_cb;
+
+ cqr->callback_data = DASD_SLEEPON_START_TAG;
+ dasd_add_request_tail(cqr);
+ if (interruptible) {
+ rc = wait_event_interruptible(
+ generic_waitq, _wait_for_wakeup(cqr));
+ if (rc == -ERESTARTSYS) {
+ dasd_cancel_req(cqr);
+ /* wait (non-interruptible) for final status */
+ wait_event(generic_waitq,
+ _wait_for_wakeup(cqr));
+ cqr->status = DASD_CQR_FAILED;
+ maincqr->intrc = rc;
+ continue;
+ }
+ } else
+ wait_event(generic_waitq, _wait_for_wakeup(cqr));
+ }
+
+ maincqr->endclk = get_tod_clock();
+ if ((maincqr->status != DASD_CQR_DONE) &&
+ (maincqr->intrc != -ERESTARTSYS))
+ dasd_log_sense(maincqr, &maincqr->irb);
+ if (maincqr->status == DASD_CQR_DONE)
+ rc = 0;
+ else if (maincqr->intrc)
+ rc = maincqr->intrc;
+ else
+ rc = -EIO;
+ return rc;
+}
+
+static inline int _wait_for_wakeup_queue(struct list_head *ccw_queue)
+{
+ struct dasd_ccw_req *cqr;
+
+ list_for_each_entry(cqr, ccw_queue, blocklist) {
+ if (cqr->callback_data != DASD_SLEEPON_END_TAG)
+ return 0;
+ }
+
+ return 1;
+}
+
+static int _dasd_sleep_on_queue(struct list_head *ccw_queue, int interruptible)
+{
+ struct dasd_device *device;
+ struct dasd_ccw_req *cqr, *n;
+ u8 *sense = NULL;
+ int rc;
+
+retry:
+ list_for_each_entry_safe(cqr, n, ccw_queue, blocklist) {
+ device = cqr->startdev;
+ if (cqr->status != DASD_CQR_FILLED) /*could be failed*/
+ continue;
+
+ if (test_bit(DASD_FLAG_LOCK_STOLEN, &device->flags) &&
+ !test_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags)) {
+ cqr->status = DASD_CQR_FAILED;
+ cqr->intrc = -EPERM;
+ continue;
+ }
+ /*Non-temporary stop condition will trigger fail fast*/
+ if (device->stopped & ~DASD_STOPPED_PENDING &&
+ test_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags) &&
+ !dasd_eer_enabled(device)) {
+ cqr->status = DASD_CQR_FAILED;
+ cqr->intrc = -EAGAIN;
+ continue;
+ }
+
+ /*Don't try to start requests if device is stopped*/
+ if (interruptible) {
+ rc = wait_event_interruptible(
+ generic_waitq, !device->stopped);
+ if (rc == -ERESTARTSYS) {
+ cqr->status = DASD_CQR_FAILED;
+ cqr->intrc = rc;
+ continue;
+ }
+ } else
+ wait_event(generic_waitq, !(device->stopped));
+
+ if (!cqr->callback)
+ cqr->callback = dasd_wakeup_cb;
+ cqr->callback_data = DASD_SLEEPON_START_TAG;
+ dasd_add_request_tail(cqr);
+ }
+
+ wait_event(generic_waitq, _wait_for_wakeup_queue(ccw_queue));
+
+ rc = 0;
+ list_for_each_entry_safe(cqr, n, ccw_queue, blocklist) {
+ /*
+ * In some cases the 'File Protected' or 'Incorrect Length'
+ * error might be expected and error recovery would be
+ * unnecessary in these cases. Check if the according suppress
+ * bit is set.
+ */
+ sense = dasd_get_sense(&cqr->irb);
+ if (sense && sense[1] & SNS1_FILE_PROTECTED &&
+ test_bit(DASD_CQR_SUPPRESS_FP, &cqr->flags))
+ continue;
+ if (scsw_cstat(&cqr->irb.scsw) == 0x40 &&
+ test_bit(DASD_CQR_SUPPRESS_IL, &cqr->flags))
+ continue;
+
+ /*
+ * for alias devices simplify error recovery and
+ * return to upper layer
+ * do not skip ERP requests
+ */
+ if (cqr->startdev != cqr->basedev && !cqr->refers &&
+ (cqr->status == DASD_CQR_TERMINATED ||
+ cqr->status == DASD_CQR_NEED_ERP))
+ return -EAGAIN;
+
+ /* normal recovery for basedev IO */
+ if (__dasd_sleep_on_erp(cqr))
+ /* handle erp first */
+ goto retry;
+ }
+
+ return 0;
+}
+
+/*
+ * Queue a request to the tail of the device ccw_queue and wait for
+ * it's completion.
+ */
+int dasd_sleep_on(struct dasd_ccw_req *cqr)
+{
+ return _dasd_sleep_on(cqr, 0);
+}
+EXPORT_SYMBOL(dasd_sleep_on);
+
+/*
+ * Start requests from a ccw_queue and wait for their completion.
+ */
+int dasd_sleep_on_queue(struct list_head *ccw_queue)
+{
+ return _dasd_sleep_on_queue(ccw_queue, 0);
+}
+EXPORT_SYMBOL(dasd_sleep_on_queue);
+
+/*
+ * Start requests from a ccw_queue and wait interruptible for their completion.
+ */
+int dasd_sleep_on_queue_interruptible(struct list_head *ccw_queue)
+{
+ return _dasd_sleep_on_queue(ccw_queue, 1);
+}
+EXPORT_SYMBOL(dasd_sleep_on_queue_interruptible);
+
+/*
+ * Queue a request to the tail of the device ccw_queue and wait
+ * interruptible for it's completion.
+ */
+int dasd_sleep_on_interruptible(struct dasd_ccw_req *cqr)
+{
+ return _dasd_sleep_on(cqr, 1);
+}
+EXPORT_SYMBOL(dasd_sleep_on_interruptible);
+
+/*
+ * Whoa nelly now it gets really hairy. For some functions (e.g. steal lock
+ * for eckd devices) the currently running request has to be terminated
+ * and be put back to status queued, before the special request is added
+ * to the head of the queue. Then the special request is waited on normally.
+ */
+static inline int _dasd_term_running_cqr(struct dasd_device *device)
+{
+ struct dasd_ccw_req *cqr;
+ int rc;
+
+ if (list_empty(&device->ccw_queue))
+ return 0;
+ cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, devlist);
+ rc = device->discipline->term_IO(cqr);
+ if (!rc)
+ /*
+ * CQR terminated because a more important request is pending.
+ * Undo decreasing of retry counter because this is
+ * not an error case.
+ */
+ cqr->retries++;
+ return rc;
+}
+
+int dasd_sleep_on_immediatly(struct dasd_ccw_req *cqr)
+{
+ struct dasd_device *device;
+ int rc;
+
+ device = cqr->startdev;
+ if (test_bit(DASD_FLAG_LOCK_STOLEN, &device->flags) &&
+ !test_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags)) {
+ cqr->status = DASD_CQR_FAILED;
+ cqr->intrc = -EPERM;
+ return -EIO;
+ }
+ spin_lock_irq(get_ccwdev_lock(device->cdev));
+ rc = _dasd_term_running_cqr(device);
+ if (rc) {
+ spin_unlock_irq(get_ccwdev_lock(device->cdev));
+ return rc;
+ }
+ cqr->callback = dasd_wakeup_cb;
+ cqr->callback_data = DASD_SLEEPON_START_TAG;
+ cqr->status = DASD_CQR_QUEUED;
+ /*
+ * add new request as second
+ * first the terminated cqr needs to be finished
+ */
+ list_add(&cqr->devlist, device->ccw_queue.next);
+
+ /* let the bh start the request to keep them in order */
+ dasd_schedule_device_bh(device);
+
+ spin_unlock_irq(get_ccwdev_lock(device->cdev));
+
+ wait_event(generic_waitq, _wait_for_wakeup(cqr));
+
+ if (cqr->status == DASD_CQR_DONE)
+ rc = 0;
+ else if (cqr->intrc)
+ rc = cqr->intrc;
+ else
+ rc = -EIO;
+
+ /* kick tasklets */
+ dasd_schedule_device_bh(device);
+ if (device->block)
+ dasd_schedule_block_bh(device->block);
+
+ return rc;
+}
+EXPORT_SYMBOL(dasd_sleep_on_immediatly);
+
+/*
+ * Cancels a request that was started with dasd_sleep_on_req.
+ * This is useful to timeout requests. The request will be
+ * terminated if it is currently in i/o.
+ * Returns 0 if request termination was successful
+ * negative error code if termination failed
+ * Cancellation of a request is an asynchronous operation! The calling
+ * function has to wait until the request is properly returned via callback.
+ */
+static int __dasd_cancel_req(struct dasd_ccw_req *cqr)
+{
+ struct dasd_device *device = cqr->startdev;
+ int rc = 0;
+
+ switch (cqr->status) {
+ case DASD_CQR_QUEUED:
+ /* request was not started - just set to cleared */
+ cqr->status = DASD_CQR_CLEARED;
+ break;
+ case DASD_CQR_IN_IO:
+ /* request in IO - terminate IO and release again */
+ rc = device->discipline->term_IO(cqr);
+ if (rc) {
+ dev_err(&device->cdev->dev,
+ "Cancelling request %p failed with rc=%d\n",
+ cqr, rc);
+ } else {
+ cqr->stopclk = get_tod_clock();
+ }
+ break;
+ default: /* already finished or clear pending - do nothing */
+ break;
+ }
+ dasd_schedule_device_bh(device);
+ return rc;
+}
+
+int dasd_cancel_req(struct dasd_ccw_req *cqr)
+{
+ struct dasd_device *device = cqr->startdev;
+ unsigned long flags;
+ int rc;
+
+ spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
+ rc = __dasd_cancel_req(cqr);
+ spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
+ return rc;
+}
+
+/*
+ * SECTION: Operations of the dasd_block layer.
+ */
+
+/*
+ * Timeout function for dasd_block. This is used when the block layer
+ * is waiting for something that may not come reliably, (e.g. a state
+ * change interrupt)
+ */
+static void dasd_block_timeout(struct timer_list *t)
+{
+ unsigned long flags;
+ struct dasd_block *block;
+
+ block = from_timer(block, t, timer);
+ spin_lock_irqsave(get_ccwdev_lock(block->base->cdev), flags);
+ /* re-activate request queue */
+ dasd_device_remove_stop_bits(block->base, DASD_STOPPED_PENDING);
+ spin_unlock_irqrestore(get_ccwdev_lock(block->base->cdev), flags);
+ dasd_schedule_block_bh(block);
+ blk_mq_run_hw_queues(block->gdp->queue, true);
+}
+
+/*
+ * Setup timeout for a dasd_block in jiffies.
+ */
+void dasd_block_set_timer(struct dasd_block *block, int expires)
+{
+ if (expires == 0)
+ del_timer(&block->timer);
+ else
+ mod_timer(&block->timer, jiffies + expires);
+}
+EXPORT_SYMBOL(dasd_block_set_timer);
+
+/*
+ * Clear timeout for a dasd_block.
+ */
+void dasd_block_clear_timer(struct dasd_block *block)
+{
+ del_timer(&block->timer);
+}
+EXPORT_SYMBOL(dasd_block_clear_timer);
+
+/*
+ * Process finished error recovery ccw.
+ */
+static void __dasd_process_erp(struct dasd_device *device,
+ struct dasd_ccw_req *cqr)
+{
+ dasd_erp_fn_t erp_fn;
+
+ if (cqr->status == DASD_CQR_DONE)
+ DBF_DEV_EVENT(DBF_NOTICE, device, "%s", "ERP successful");
+ else
+ dev_err(&device->cdev->dev, "ERP failed for the DASD\n");
+ erp_fn = device->discipline->erp_postaction(cqr);
+ erp_fn(cqr);
+}
+
+static void __dasd_cleanup_cqr(struct dasd_ccw_req *cqr)
+{
+ struct request *req;
+ blk_status_t error = BLK_STS_OK;
+ unsigned int proc_bytes;
+ int status;
+
+ req = (struct request *) cqr->callback_data;
+ dasd_profile_end(cqr->block, cqr, req);
+
+ proc_bytes = cqr->proc_bytes;
+ status = cqr->block->base->discipline->free_cp(cqr, req);
+ if (status < 0)
+ error = errno_to_blk_status(status);
+ else if (status == 0) {
+ switch (cqr->intrc) {
+ case -EPERM:
+ /*
+ * DASD doesn't implement SCSI/NVMe reservations, but it
+ * implements a locking scheme similar to them. We
+ * return this error when we no longer have the lock.
+ */
+ error = BLK_STS_RESV_CONFLICT;
+ break;
+ case -ENOLINK:
+ error = BLK_STS_TRANSPORT;
+ break;
+ case -ETIMEDOUT:
+ error = BLK_STS_TIMEOUT;
+ break;
+ default:
+ error = BLK_STS_IOERR;
+ break;
+ }
+ }
+
+ /*
+ * We need to take care for ETIMEDOUT errors here since the
+ * complete callback does not get called in this case.
+ * Take care of all errors here and avoid additional code to
+ * transfer the error value to the complete callback.
+ */
+ if (error) {
+ blk_mq_end_request(req, error);
+ blk_mq_run_hw_queues(req->q, true);
+ } else {
+ /*
+ * Partial completed requests can happen with ESE devices.
+ * During read we might have gotten a NRF error and have to
+ * complete a request partially.
+ */
+ if (proc_bytes) {
+ blk_update_request(req, BLK_STS_OK, proc_bytes);
+ blk_mq_requeue_request(req, true);
+ } else if (likely(!blk_should_fake_timeout(req->q))) {
+ blk_mq_complete_request(req);
+ }
+ }
+}
+
+/*
+ * Process ccw request queue.
+ */
+static void __dasd_process_block_ccw_queue(struct dasd_block *block,
+ struct list_head *final_queue)
+{
+ struct list_head *l, *n;
+ struct dasd_ccw_req *cqr;
+ dasd_erp_fn_t erp_fn;
+ unsigned long flags;
+ struct dasd_device *base = block->base;
+
+restart:
+ /* Process request with final status. */
+ list_for_each_safe(l, n, &block->ccw_queue) {
+ cqr = list_entry(l, struct dasd_ccw_req, blocklist);
+ if (cqr->status != DASD_CQR_DONE &&
+ cqr->status != DASD_CQR_FAILED &&
+ cqr->status != DASD_CQR_NEED_ERP &&
+ cqr->status != DASD_CQR_TERMINATED)
+ continue;
+
+ if (cqr->status == DASD_CQR_TERMINATED) {
+ base->discipline->handle_terminated_request(cqr);
+ goto restart;
+ }
+
+ /* Process requests that may be recovered */
+ if (cqr->status == DASD_CQR_NEED_ERP) {
+ erp_fn = base->discipline->erp_action(cqr);
+ if (IS_ERR(erp_fn(cqr)))
+ continue;
+ goto restart;
+ }
+
+ /* log sense for fatal error */
+ if (cqr->status == DASD_CQR_FAILED) {
+ dasd_log_sense(cqr, &cqr->irb);
+ }
+
+ /*
+ * First call extended error reporting and check for autoquiesce
+ */
+ spin_lock_irqsave(get_ccwdev_lock(base->cdev), flags);
+ if (cqr->status == DASD_CQR_FAILED &&
+ dasd_handle_autoquiesce(base, cqr, DASD_EER_FATALERROR)) {
+ cqr->status = DASD_CQR_FILLED;
+ cqr->retries = 255;
+ spin_unlock_irqrestore(get_ccwdev_lock(base->cdev), flags);
+ goto restart;
+ }
+ spin_unlock_irqrestore(get_ccwdev_lock(base->cdev), flags);
+
+ /* Process finished ERP request. */
+ if (cqr->refers) {
+ __dasd_process_erp(base, cqr);
+ goto restart;
+ }
+
+ /* Rechain finished requests to final queue */
+ cqr->endclk = get_tod_clock();
+ list_move_tail(&cqr->blocklist, final_queue);
+ }
+}
+
+static void dasd_return_cqr_cb(struct dasd_ccw_req *cqr, void *data)
+{
+ dasd_schedule_block_bh(cqr->block);
+}
+
+static void __dasd_block_start_head(struct dasd_block *block)
+{
+ struct dasd_ccw_req *cqr;
+
+ if (list_empty(&block->ccw_queue))
+ return;
+ /* We allways begin with the first requests on the queue, as some
+ * of previously started requests have to be enqueued on a
+ * dasd_device again for error recovery.
+ */
+ list_for_each_entry(cqr, &block->ccw_queue, blocklist) {
+ if (cqr->status != DASD_CQR_FILLED)
+ continue;
+ if (test_bit(DASD_FLAG_LOCK_STOLEN, &block->base->flags) &&
+ !test_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags)) {
+ cqr->status = DASD_CQR_FAILED;
+ cqr->intrc = -EPERM;
+ dasd_schedule_block_bh(block);
+ continue;
+ }
+ /* Non-temporary stop condition will trigger fail fast */
+ if (block->base->stopped & ~DASD_STOPPED_PENDING &&
+ test_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags) &&
+ !dasd_eer_enabled(block->base) && block->base->aq_mask == 0) {
+ cqr->status = DASD_CQR_FAILED;
+ cqr->intrc = -ENOLINK;
+ dasd_schedule_block_bh(block);
+ continue;
+ }
+ /* Don't try to start requests if device is stopped */
+ if (block->base->stopped)
+ return;
+
+ /* just a fail safe check, should not happen */
+ if (!cqr->startdev)
+ cqr->startdev = block->base;
+
+ /* make sure that the requests we submit find their way back */
+ cqr->callback = dasd_return_cqr_cb;
+
+ dasd_add_request_tail(cqr);
+ }
+}
+
+/*
+ * Central dasd_block layer routine. Takes requests from the generic
+ * block layer request queue, creates ccw requests, enqueues them on
+ * a dasd_device and processes ccw requests that have been returned.
+ */
+static void dasd_block_tasklet(unsigned long data)
+{
+ struct dasd_block *block = (struct dasd_block *) data;
+ struct list_head final_queue;
+ struct list_head *l, *n;
+ struct dasd_ccw_req *cqr;
+ struct dasd_queue *dq;
+
+ atomic_set(&block->tasklet_scheduled, 0);
+ INIT_LIST_HEAD(&final_queue);
+ spin_lock_irq(&block->queue_lock);
+ /* Finish off requests on ccw queue */
+ __dasd_process_block_ccw_queue(block, &final_queue);
+ spin_unlock_irq(&block->queue_lock);
+
+ /* Now call the callback function of requests with final status */
+ list_for_each_safe(l, n, &final_queue) {
+ cqr = list_entry(l, struct dasd_ccw_req, blocklist);
+ dq = cqr->dq;
+ spin_lock_irq(&dq->lock);
+ list_del_init(&cqr->blocklist);
+ __dasd_cleanup_cqr(cqr);
+ spin_unlock_irq(&dq->lock);
+ }
+
+ spin_lock_irq(&block->queue_lock);
+ /* Now check if the head of the ccw queue needs to be started. */
+ __dasd_block_start_head(block);
+ spin_unlock_irq(&block->queue_lock);
+
+ if (waitqueue_active(&shutdown_waitq))
+ wake_up(&shutdown_waitq);
+ dasd_put_device(block->base);
+}
+
+static void _dasd_wake_block_flush_cb(struct dasd_ccw_req *cqr, void *data)
+{
+ wake_up(&dasd_flush_wq);
+}
+
+/*
+ * Requeue a request back to the block request queue
+ * only works for block requests
+ */
+static void _dasd_requeue_request(struct dasd_ccw_req *cqr)
+{
+ struct request *req;
+
+ /*
+ * If the request is an ERP request there is nothing to requeue.
+ * This will be done with the remaining original request.
+ */
+ if (cqr->refers)
+ return;
+ spin_lock_irq(&cqr->dq->lock);
+ req = (struct request *) cqr->callback_data;
+ blk_mq_requeue_request(req, true);
+ spin_unlock_irq(&cqr->dq->lock);
+
+ return;
+}
+
+static int _dasd_requests_to_flushqueue(struct dasd_block *block,
+ struct list_head *flush_queue)
+{
+ struct dasd_ccw_req *cqr, *n;
+ unsigned long flags;
+ int rc, i;
+
+ spin_lock_irqsave(&block->queue_lock, flags);
+ rc = 0;
+restart:
+ list_for_each_entry_safe(cqr, n, &block->ccw_queue, blocklist) {
+ /* if this request currently owned by a dasd_device cancel it */
+ if (cqr->status >= DASD_CQR_QUEUED)
+ rc = dasd_cancel_req(cqr);
+ if (rc < 0)
+ break;
+ /* Rechain request (including erp chain) so it won't be
+ * touched by the dasd_block_tasklet anymore.
+ * Replace the callback so we notice when the request
+ * is returned from the dasd_device layer.
+ */
+ cqr->callback = _dasd_wake_block_flush_cb;
+ for (i = 0; cqr; cqr = cqr->refers, i++)
+ list_move_tail(&cqr->blocklist, flush_queue);
+ if (i > 1)
+ /* moved more than one request - need to restart */
+ goto restart;
+ }
+ spin_unlock_irqrestore(&block->queue_lock, flags);
+
+ return rc;
+}
+
+/*
+ * Go through all request on the dasd_block request queue, cancel them
+ * on the respective dasd_device, and return them to the generic
+ * block layer.
+ */
+static int dasd_flush_block_queue(struct dasd_block *block)
+{
+ struct dasd_ccw_req *cqr, *n;
+ struct list_head flush_queue;
+ unsigned long flags;
+ int rc;
+
+ INIT_LIST_HEAD(&flush_queue);
+ rc = _dasd_requests_to_flushqueue(block, &flush_queue);
+
+ /* Now call the callback function of flushed requests */
+restart_cb:
+ list_for_each_entry_safe(cqr, n, &flush_queue, blocklist) {
+ wait_event(dasd_flush_wq, (cqr->status < DASD_CQR_QUEUED));
+ /* Process finished ERP request. */
+ if (cqr->refers) {
+ spin_lock_bh(&block->queue_lock);
+ __dasd_process_erp(block->base, cqr);
+ spin_unlock_bh(&block->queue_lock);
+ /* restart list_for_xx loop since dasd_process_erp
+ * might remove multiple elements */
+ goto restart_cb;
+ }
+ /* call the callback function */
+ spin_lock_irqsave(&cqr->dq->lock, flags);
+ cqr->endclk = get_tod_clock();
+ list_del_init(&cqr->blocklist);
+ __dasd_cleanup_cqr(cqr);
+ spin_unlock_irqrestore(&cqr->dq->lock, flags);
+ }
+ return rc;
+}
+
+/*
+ * Schedules a call to dasd_tasklet over the device tasklet.
+ */
+void dasd_schedule_block_bh(struct dasd_block *block)
+{
+ /* Protect against rescheduling. */
+ if (atomic_cmpxchg(&block->tasklet_scheduled, 0, 1) != 0)
+ return;
+ /* life cycle of block is bound to it's base device */
+ dasd_get_device(block->base);
+ tasklet_hi_schedule(&block->tasklet);
+}
+EXPORT_SYMBOL(dasd_schedule_block_bh);
+
+
+/*
+ * SECTION: external block device operations
+ * (request queue handling, open, release, etc.)
+ */
+
+/*
+ * Dasd request queue function. Called from ll_rw_blk.c
+ */
+static blk_status_t do_dasd_request(struct blk_mq_hw_ctx *hctx,
+ const struct blk_mq_queue_data *qd)
+{
+ struct dasd_block *block = hctx->queue->queuedata;
+ struct dasd_queue *dq = hctx->driver_data;
+ struct request *req = qd->rq;
+ struct dasd_device *basedev;
+ struct dasd_ccw_req *cqr;
+ blk_status_t rc = BLK_STS_OK;
+
+ basedev = block->base;
+ spin_lock_irq(&dq->lock);
+ if (basedev->state < DASD_STATE_READY ||
+ test_bit(DASD_FLAG_OFFLINE, &basedev->flags)) {
+ DBF_DEV_EVENT(DBF_ERR, basedev,
+ "device not ready for request %p", req);
+ rc = BLK_STS_IOERR;
+ goto out;
+ }
+
+ /*
+ * if device is stopped do not fetch new requests
+ * except failfast is active which will let requests fail
+ * immediately in __dasd_block_start_head()
+ */
+ if (basedev->stopped && !(basedev->features & DASD_FEATURE_FAILFAST)) {
+ DBF_DEV_EVENT(DBF_ERR, basedev,
+ "device stopped request %p", req);
+ rc = BLK_STS_RESOURCE;
+ goto out;
+ }
+
+ if (basedev->features & DASD_FEATURE_READONLY &&
+ rq_data_dir(req) == WRITE) {
+ DBF_DEV_EVENT(DBF_ERR, basedev,
+ "Rejecting write request %p", req);
+ rc = BLK_STS_IOERR;
+ goto out;
+ }
+
+ if (test_bit(DASD_FLAG_ABORTALL, &basedev->flags) &&
+ (basedev->features & DASD_FEATURE_FAILFAST ||
+ blk_noretry_request(req))) {
+ DBF_DEV_EVENT(DBF_ERR, basedev,
+ "Rejecting failfast request %p", req);
+ rc = BLK_STS_IOERR;
+ goto out;
+ }
+
+ cqr = basedev->discipline->build_cp(basedev, block, req);
+ if (IS_ERR(cqr)) {
+ if (PTR_ERR(cqr) == -EBUSY ||
+ PTR_ERR(cqr) == -ENOMEM ||
+ PTR_ERR(cqr) == -EAGAIN) {
+ rc = BLK_STS_RESOURCE;
+ goto out;
+ }
+ DBF_DEV_EVENT(DBF_ERR, basedev,
+ "CCW creation failed (rc=%ld) on request %p",
+ PTR_ERR(cqr), req);
+ rc = BLK_STS_IOERR;
+ goto out;
+ }
+ /*
+ * Note: callback is set to dasd_return_cqr_cb in
+ * __dasd_block_start_head to cover erp requests as well
+ */
+ cqr->callback_data = req;
+ cqr->status = DASD_CQR_FILLED;
+ cqr->dq = dq;
+
+ blk_mq_start_request(req);
+ spin_lock(&block->queue_lock);
+ list_add_tail(&cqr->blocklist, &block->ccw_queue);
+ INIT_LIST_HEAD(&cqr->devlist);
+ dasd_profile_start(block, cqr, req);
+ dasd_schedule_block_bh(block);
+ spin_unlock(&block->queue_lock);
+
+out:
+ spin_unlock_irq(&dq->lock);
+ return rc;
+}
+
+/*
+ * Block timeout callback, called from the block layer
+ *
+ * Return values:
+ * BLK_EH_RESET_TIMER if the request should be left running
+ * BLK_EH_DONE if the request is handled or terminated
+ * by the driver.
+ */
+enum blk_eh_timer_return dasd_times_out(struct request *req)
+{
+ struct dasd_block *block = req->q->queuedata;
+ struct dasd_device *device;
+ struct dasd_ccw_req *cqr;
+ unsigned long flags;
+ int rc = 0;
+
+ cqr = blk_mq_rq_to_pdu(req);
+ if (!cqr)
+ return BLK_EH_DONE;
+
+ spin_lock_irqsave(&cqr->dq->lock, flags);
+ device = cqr->startdev ? cqr->startdev : block->base;
+ if (!device->blk_timeout) {
+ spin_unlock_irqrestore(&cqr->dq->lock, flags);
+ return BLK_EH_RESET_TIMER;
+ }
+ DBF_DEV_EVENT(DBF_WARNING, device,
+ " dasd_times_out cqr %p status %x",
+ cqr, cqr->status);
+
+ spin_lock(&block->queue_lock);
+ spin_lock(get_ccwdev_lock(device->cdev));
+ cqr->retries = -1;
+ cqr->intrc = -ETIMEDOUT;
+ if (cqr->status >= DASD_CQR_QUEUED) {
+ rc = __dasd_cancel_req(cqr);
+ } else if (cqr->status == DASD_CQR_FILLED ||
+ cqr->status == DASD_CQR_NEED_ERP) {
+ cqr->status = DASD_CQR_TERMINATED;
+ } else if (cqr->status == DASD_CQR_IN_ERP) {
+ struct dasd_ccw_req *searchcqr, *nextcqr, *tmpcqr;
+
+ list_for_each_entry_safe(searchcqr, nextcqr,
+ &block->ccw_queue, blocklist) {
+ tmpcqr = searchcqr;
+ while (tmpcqr->refers)
+ tmpcqr = tmpcqr->refers;
+ if (tmpcqr != cqr)
+ continue;
+ /* searchcqr is an ERP request for cqr */
+ searchcqr->retries = -1;
+ searchcqr->intrc = -ETIMEDOUT;
+ if (searchcqr->status >= DASD_CQR_QUEUED) {
+ rc = __dasd_cancel_req(searchcqr);
+ } else if ((searchcqr->status == DASD_CQR_FILLED) ||
+ (searchcqr->status == DASD_CQR_NEED_ERP)) {
+ searchcqr->status = DASD_CQR_TERMINATED;
+ rc = 0;
+ } else if (searchcqr->status == DASD_CQR_IN_ERP) {
+ /*
+ * Shouldn't happen; most recent ERP
+ * request is at the front of queue
+ */
+ continue;
+ }
+ break;
+ }
+ }
+ spin_unlock(get_ccwdev_lock(device->cdev));
+ dasd_schedule_block_bh(block);
+ spin_unlock(&block->queue_lock);
+ spin_unlock_irqrestore(&cqr->dq->lock, flags);
+
+ return rc ? BLK_EH_RESET_TIMER : BLK_EH_DONE;
+}
+
+static int dasd_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
+ unsigned int idx)
+{
+ struct dasd_queue *dq = kzalloc(sizeof(*dq), GFP_KERNEL);
+
+ if (!dq)
+ return -ENOMEM;
+
+ spin_lock_init(&dq->lock);
+ hctx->driver_data = dq;
+
+ return 0;
+}
+
+static void dasd_exit_hctx(struct blk_mq_hw_ctx *hctx, unsigned int idx)
+{
+ kfree(hctx->driver_data);
+ hctx->driver_data = NULL;
+}
+
+static void dasd_request_done(struct request *req)
+{
+ blk_mq_end_request(req, 0);
+ blk_mq_run_hw_queues(req->q, true);
+}
+
+struct blk_mq_ops dasd_mq_ops = {
+ .queue_rq = do_dasd_request,
+ .complete = dasd_request_done,
+ .timeout = dasd_times_out,
+ .init_hctx = dasd_init_hctx,
+ .exit_hctx = dasd_exit_hctx,
+};
+
+static int dasd_open(struct gendisk *disk, blk_mode_t mode)
+{
+ struct dasd_device *base;
+ int rc;
+
+ base = dasd_device_from_gendisk(disk);
+ if (!base)
+ return -ENODEV;
+
+ atomic_inc(&base->block->open_count);
+ if (test_bit(DASD_FLAG_OFFLINE, &base->flags)) {
+ rc = -ENODEV;
+ goto unlock;
+ }
+
+ if (!try_module_get(base->discipline->owner)) {
+ rc = -EINVAL;
+ goto unlock;
+ }
+
+ if (dasd_probeonly) {
+ dev_info(&base->cdev->dev,
+ "Accessing the DASD failed because it is in "
+ "probeonly mode\n");
+ rc = -EPERM;
+ goto out;
+ }
+
+ if (base->state <= DASD_STATE_BASIC) {
+ DBF_DEV_EVENT(DBF_ERR, base, " %s",
+ " Cannot open unrecognized device");
+ rc = -ENODEV;
+ goto out;
+ }
+ if ((mode & BLK_OPEN_WRITE) &&
+ (test_bit(DASD_FLAG_DEVICE_RO, &base->flags) ||
+ (base->features & DASD_FEATURE_READONLY))) {
+ rc = -EROFS;
+ goto out;
+ }
+ dasd_put_device(base);
+ return 0;
+
+out:
+ module_put(base->discipline->owner);
+unlock:
+ atomic_dec(&base->block->open_count);
+ dasd_put_device(base);
+ return rc;
+}
+
+static void dasd_release(struct gendisk *disk)
+{
+ struct dasd_device *base = dasd_device_from_gendisk(disk);
+ if (base) {
+ atomic_dec(&base->block->open_count);
+ module_put(base->discipline->owner);
+ dasd_put_device(base);
+ }
+}
+
+/*
+ * Return disk geometry.
+ */
+static int dasd_getgeo(struct block_device *bdev, struct hd_geometry *geo)
+{
+ struct dasd_device *base;
+
+ base = dasd_device_from_gendisk(bdev->bd_disk);
+ if (!base)
+ return -ENODEV;
+
+ if (!base->discipline ||
+ !base->discipline->fill_geometry) {
+ dasd_put_device(base);
+ return -EINVAL;
+ }
+ base->discipline->fill_geometry(base->block, geo);
+ geo->start = get_start_sect(bdev) >> base->block->s2b_shift;
+ dasd_put_device(base);
+ return 0;
+}
+
+const struct block_device_operations
+dasd_device_operations = {
+ .owner = THIS_MODULE,
+ .open = dasd_open,
+ .release = dasd_release,
+ .ioctl = dasd_ioctl,
+ .compat_ioctl = dasd_ioctl,
+ .getgeo = dasd_getgeo,
+ .set_read_only = dasd_set_read_only,
+};
+
+/*******************************************************************************
+ * end of block device operations
+ */
+
+static void
+dasd_exit(void)
+{
+#ifdef CONFIG_PROC_FS
+ dasd_proc_exit();
+#endif
+ dasd_eer_exit();
+ kmem_cache_destroy(dasd_page_cache);
+ dasd_page_cache = NULL;
+ dasd_gendisk_exit();
+ dasd_devmap_exit();
+ if (dasd_debug_area != NULL) {
+ debug_unregister(dasd_debug_area);
+ dasd_debug_area = NULL;
+ }
+ dasd_statistics_removeroot();
+}
+
+/*
+ * SECTION: common functions for ccw_driver use
+ */
+
+/*
+ * Is the device read-only?
+ * Note that this function does not report the setting of the
+ * readonly device attribute, but how it is configured in z/VM.
+ */
+int dasd_device_is_ro(struct dasd_device *device)
+{
+ struct ccw_dev_id dev_id;
+ struct diag210 diag_data;
+ int rc;
+
+ if (!MACHINE_IS_VM)
+ return 0;
+ ccw_device_get_id(device->cdev, &dev_id);
+ memset(&diag_data, 0, sizeof(diag_data));
+ diag_data.vrdcdvno = dev_id.devno;
+ diag_data.vrdclen = sizeof(diag_data);
+ rc = diag210(&diag_data);
+ if (rc == 0 || rc == 2) {
+ return diag_data.vrdcvfla & 0x80;
+ } else {
+ DBF_EVENT(DBF_WARNING, "diag210 failed for dev=%04x with rc=%d",
+ dev_id.devno, rc);
+ return 0;
+ }
+}
+EXPORT_SYMBOL_GPL(dasd_device_is_ro);
+
+static void dasd_generic_auto_online(void *data, async_cookie_t cookie)
+{
+ struct ccw_device *cdev = data;
+ int ret;
+
+ ret = ccw_device_set_online(cdev);
+ if (ret)
+ pr_warn("%s: Setting the DASD online failed with rc=%d\n",
+ dev_name(&cdev->dev), ret);
+}
+
+/*
+ * Initial attempt at a probe function. this can be simplified once
+ * the other detection code is gone.
+ */
+int dasd_generic_probe(struct ccw_device *cdev)
+{
+ cdev->handler = &dasd_int_handler;
+
+ /*
+ * Automatically online either all dasd devices (dasd_autodetect)
+ * or all devices specified with dasd= parameters during
+ * initial probe.
+ */
+ if ((dasd_get_feature(cdev, DASD_FEATURE_INITIAL_ONLINE) > 0 ) ||
+ (dasd_autodetect && dasd_busid_known(dev_name(&cdev->dev)) != 0))
+ async_schedule(dasd_generic_auto_online, cdev);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(dasd_generic_probe);
+
+void dasd_generic_free_discipline(struct dasd_device *device)
+{
+ /* Forget the discipline information. */
+ if (device->discipline) {
+ if (device->discipline->uncheck_device)
+ device->discipline->uncheck_device(device);
+ module_put(device->discipline->owner);
+ device->discipline = NULL;
+ }
+ if (device->base_discipline) {
+ module_put(device->base_discipline->owner);
+ device->base_discipline = NULL;
+ }
+}
+EXPORT_SYMBOL_GPL(dasd_generic_free_discipline);
+
+/*
+ * This will one day be called from a global not_oper handler.
+ * It is also used by driver_unregister during module unload.
+ */
+void dasd_generic_remove(struct ccw_device *cdev)
+{
+ struct dasd_device *device;
+ struct dasd_block *block;
+
+ device = dasd_device_from_cdev(cdev);
+ if (IS_ERR(device))
+ return;
+
+ if (test_and_set_bit(DASD_FLAG_OFFLINE, &device->flags) &&
+ !test_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) {
+ /* Already doing offline processing */
+ dasd_put_device(device);
+ return;
+ }
+ /*
+ * This device is removed unconditionally. Set offline
+ * flag to prevent dasd_open from opening it while it is
+ * no quite down yet.
+ */
+ dasd_set_target_state(device, DASD_STATE_NEW);
+ cdev->handler = NULL;
+ /* dasd_delete_device destroys the device reference. */
+ block = device->block;
+ dasd_delete_device(device);
+ /*
+ * life cycle of block is bound to device, so delete it after
+ * device was safely removed
+ */
+ if (block)
+ dasd_free_block(block);
+}
+EXPORT_SYMBOL_GPL(dasd_generic_remove);
+
+/*
+ * Activate a device. This is called from dasd_{eckd,fba}_probe() when either
+ * the device is detected for the first time and is supposed to be used
+ * or the user has started activation through sysfs.
+ */
+int dasd_generic_set_online(struct ccw_device *cdev,
+ struct dasd_discipline *base_discipline)
+{
+ struct dasd_discipline *discipline;
+ struct dasd_device *device;
+ int rc;
+
+ /* first online clears initial online feature flag */
+ dasd_set_feature(cdev, DASD_FEATURE_INITIAL_ONLINE, 0);
+ device = dasd_create_device(cdev);
+ if (IS_ERR(device))
+ return PTR_ERR(device);
+
+ discipline = base_discipline;
+ if (device->features & DASD_FEATURE_USEDIAG) {
+ if (!dasd_diag_discipline_pointer) {
+ /* Try to load the required module. */
+ rc = request_module(DASD_DIAG_MOD);
+ if (rc) {
+ pr_warn("%s Setting the DASD online failed "
+ "because the required module %s "
+ "could not be loaded (rc=%d)\n",
+ dev_name(&cdev->dev), DASD_DIAG_MOD,
+ rc);
+ dasd_delete_device(device);
+ return -ENODEV;
+ }
+ }
+ /* Module init could have failed, so check again here after
+ * request_module(). */
+ if (!dasd_diag_discipline_pointer) {
+ pr_warn("%s Setting the DASD online failed because of missing DIAG discipline\n",
+ dev_name(&cdev->dev));
+ dasd_delete_device(device);
+ return -ENODEV;
+ }
+ discipline = dasd_diag_discipline_pointer;
+ }
+ if (!try_module_get(base_discipline->owner)) {
+ dasd_delete_device(device);
+ return -EINVAL;
+ }
+ if (!try_module_get(discipline->owner)) {
+ module_put(base_discipline->owner);
+ dasd_delete_device(device);
+ return -EINVAL;
+ }
+ device->base_discipline = base_discipline;
+ device->discipline = discipline;
+
+ /* check_device will allocate block device if necessary */
+ rc = discipline->check_device(device);
+ if (rc) {
+ pr_warn("%s Setting the DASD online with discipline %s failed with rc=%i\n",
+ dev_name(&cdev->dev), discipline->name, rc);
+ module_put(discipline->owner);
+ module_put(base_discipline->owner);
+ dasd_delete_device(device);
+ return rc;
+ }
+
+ dasd_set_target_state(device, DASD_STATE_ONLINE);
+ if (device->state <= DASD_STATE_KNOWN) {
+ pr_warn("%s Setting the DASD online failed because of a missing discipline\n",
+ dev_name(&cdev->dev));
+ rc = -ENODEV;
+ dasd_set_target_state(device, DASD_STATE_NEW);
+ if (device->block)
+ dasd_free_block(device->block);
+ dasd_delete_device(device);
+ } else
+ pr_debug("dasd_generic device %s found\n",
+ dev_name(&cdev->dev));
+
+ wait_event(dasd_init_waitq, _wait_for_device(device));
+
+ dasd_put_device(device);
+ return rc;
+}
+EXPORT_SYMBOL_GPL(dasd_generic_set_online);
+
+int dasd_generic_set_offline(struct ccw_device *cdev)
+{
+ struct dasd_device *device;
+ struct dasd_block *block;
+ int max_count, open_count, rc;
+ unsigned long flags;
+
+ rc = 0;
+ spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
+ device = dasd_device_from_cdev_locked(cdev);
+ if (IS_ERR(device)) {
+ spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
+ return PTR_ERR(device);
+ }
+
+ /*
+ * We must make sure that this device is currently not in use.
+ * The open_count is increased for every opener, that includes
+ * the blkdev_get in dasd_scan_partitions. We are only interested
+ * in the other openers.
+ */
+ if (device->block) {
+ max_count = device->block->bdev ? 0 : -1;
+ open_count = atomic_read(&device->block->open_count);
+ if (open_count > max_count) {
+ if (open_count > 0)
+ pr_warn("%s: The DASD cannot be set offline with open count %i\n",
+ dev_name(&cdev->dev), open_count);
+ else
+ pr_warn("%s: The DASD cannot be set offline while it is in use\n",
+ dev_name(&cdev->dev));
+ rc = -EBUSY;
+ goto out_err;
+ }
+ }
+
+ /*
+ * Test if the offline processing is already running and exit if so.
+ * If a safe offline is being processed this could only be a normal
+ * offline that should be able to overtake the safe offline and
+ * cancel any I/O we do not want to wait for any longer
+ */
+ if (test_bit(DASD_FLAG_OFFLINE, &device->flags)) {
+ if (test_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) {
+ clear_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING,
+ &device->flags);
+ } else {
+ rc = -EBUSY;
+ goto out_err;
+ }
+ }
+ set_bit(DASD_FLAG_OFFLINE, &device->flags);
+
+ /*
+ * if safe_offline is called set safe_offline_running flag and
+ * clear safe_offline so that a call to normal offline
+ * can overrun safe_offline processing
+ */
+ if (test_and_clear_bit(DASD_FLAG_SAFE_OFFLINE, &device->flags) &&
+ !test_and_set_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) {
+ /* need to unlock here to wait for outstanding I/O */
+ spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
+ /*
+ * If we want to set the device safe offline all IO operations
+ * should be finished before continuing the offline process
+ * so sync bdev first and then wait for our queues to become
+ * empty
+ */
+ if (device->block)
+ bdev_mark_dead(device->block->bdev, false);
+ dasd_schedule_device_bh(device);
+ rc = wait_event_interruptible(shutdown_waitq,
+ _wait_for_empty_queues(device));
+ if (rc != 0)
+ goto interrupted;
+
+ /*
+ * check if a normal offline process overtook the offline
+ * processing in this case simply do nothing beside returning
+ * that we got interrupted
+ * otherwise mark safe offline as not running any longer and
+ * continue with normal offline
+ */
+ spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
+ if (!test_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) {
+ rc = -ERESTARTSYS;
+ goto out_err;
+ }
+ clear_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags);
+ }
+ spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
+
+ dasd_set_target_state(device, DASD_STATE_NEW);
+ /* dasd_delete_device destroys the device reference. */
+ block = device->block;
+ dasd_delete_device(device);
+ /*
+ * life cycle of block is bound to device, so delete it after
+ * device was safely removed
+ */
+ if (block)
+ dasd_free_block(block);
+
+ return 0;
+
+interrupted:
+ /* interrupted by signal */
+ spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
+ clear_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags);
+ clear_bit(DASD_FLAG_OFFLINE, &device->flags);
+out_err:
+ dasd_put_device(device);
+ spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
+ return rc;
+}
+EXPORT_SYMBOL_GPL(dasd_generic_set_offline);
+
+int dasd_generic_last_path_gone(struct dasd_device *device)
+{
+ struct dasd_ccw_req *cqr;
+
+ dev_warn(&device->cdev->dev, "No operational channel path is left "
+ "for the device\n");
+ DBF_DEV_EVENT(DBF_WARNING, device, "%s", "last path gone");
+ /* First call extended error reporting and check for autoquiesce. */
+ dasd_handle_autoquiesce(device, NULL, DASD_EER_NOPATH);
+
+ if (device->state < DASD_STATE_BASIC)
+ return 0;
+ /* Device is active. We want to keep it. */
+ list_for_each_entry(cqr, &device->ccw_queue, devlist)
+ if ((cqr->status == DASD_CQR_IN_IO) ||
+ (cqr->status == DASD_CQR_CLEAR_PENDING)) {
+ cqr->status = DASD_CQR_QUEUED;
+ cqr->retries++;
+ }
+ dasd_device_set_stop_bits(device, DASD_STOPPED_DC_WAIT);
+ dasd_device_clear_timer(device);
+ dasd_schedule_device_bh(device);
+ return 1;
+}
+EXPORT_SYMBOL_GPL(dasd_generic_last_path_gone);
+
+int dasd_generic_path_operational(struct dasd_device *device)
+{
+ dev_info(&device->cdev->dev, "A channel path to the device has become "
+ "operational\n");
+ DBF_DEV_EVENT(DBF_WARNING, device, "%s", "path operational");
+ dasd_device_remove_stop_bits(device, DASD_STOPPED_DC_WAIT);
+ dasd_schedule_device_bh(device);
+ if (device->block) {
+ dasd_schedule_block_bh(device->block);
+ if (device->block->gdp)
+ blk_mq_run_hw_queues(device->block->gdp->queue, true);
+ }
+
+ if (!device->stopped)
+ wake_up(&generic_waitq);
+
+ return 1;
+}
+EXPORT_SYMBOL_GPL(dasd_generic_path_operational);
+
+int dasd_generic_notify(struct ccw_device *cdev, int event)
+{
+ struct dasd_device *device;
+ int ret;
+
+ device = dasd_device_from_cdev_locked(cdev);
+ if (IS_ERR(device))
+ return 0;
+ ret = 0;
+ switch (event) {
+ case CIO_GONE:
+ case CIO_BOXED:
+ case CIO_NO_PATH:
+ dasd_path_no_path(device);
+ ret = dasd_generic_last_path_gone(device);
+ break;
+ case CIO_OPER:
+ ret = 1;
+ if (dasd_path_get_opm(device))
+ ret = dasd_generic_path_operational(device);
+ break;
+ }
+ dasd_put_device(device);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(dasd_generic_notify);
+
+void dasd_generic_path_event(struct ccw_device *cdev, int *path_event)
+{
+ struct dasd_device *device;
+ int chp, oldopm, hpfpm, ifccpm;
+
+ device = dasd_device_from_cdev_locked(cdev);
+ if (IS_ERR(device))
+ return;
+
+ oldopm = dasd_path_get_opm(device);
+ for (chp = 0; chp < 8; chp++) {
+ if (path_event[chp] & PE_PATH_GONE) {
+ dasd_path_notoper(device, chp);
+ }
+ if (path_event[chp] & PE_PATH_AVAILABLE) {
+ dasd_path_available(device, chp);
+ dasd_schedule_device_bh(device);
+ }
+ if (path_event[chp] & PE_PATHGROUP_ESTABLISHED) {
+ if (!dasd_path_is_operational(device, chp) &&
+ !dasd_path_need_verify(device, chp)) {
+ /*
+ * we can not establish a pathgroup on an
+ * unavailable path, so trigger a path
+ * verification first
+ */
+ dasd_path_available(device, chp);
+ dasd_schedule_device_bh(device);
+ }
+ DBF_DEV_EVENT(DBF_WARNING, device, "%s",
+ "Pathgroup re-established\n");
+ if (device->discipline->kick_validate)
+ device->discipline->kick_validate(device);
+ }
+ if (path_event[chp] & PE_PATH_FCES_EVENT) {
+ dasd_path_fcsec_update(device, chp);
+ dasd_schedule_device_bh(device);
+ }
+ }
+ hpfpm = dasd_path_get_hpfpm(device);
+ ifccpm = dasd_path_get_ifccpm(device);
+ if (!dasd_path_get_opm(device) && hpfpm) {
+ /*
+ * device has no operational paths but at least one path is
+ * disabled due to HPF errors
+ * disable HPF at all and use the path(s) again
+ */
+ if (device->discipline->disable_hpf)
+ device->discipline->disable_hpf(device);
+ dasd_device_set_stop_bits(device, DASD_STOPPED_NOT_ACC);
+ dasd_path_set_tbvpm(device, hpfpm);
+ dasd_schedule_device_bh(device);
+ dasd_schedule_requeue(device);
+ } else if (!dasd_path_get_opm(device) && ifccpm) {
+ /*
+ * device has no operational paths but at least one path is
+ * disabled due to IFCC errors
+ * trigger path verification on paths with IFCC errors
+ */
+ dasd_path_set_tbvpm(device, ifccpm);
+ dasd_schedule_device_bh(device);
+ }
+ if (oldopm && !dasd_path_get_opm(device) && !hpfpm && !ifccpm) {
+ dev_warn(&device->cdev->dev,
+ "No verified channel paths remain for the device\n");
+ DBF_DEV_EVENT(DBF_WARNING, device,
+ "%s", "last verified path gone");
+ /* First call extended error reporting and check for autoquiesce. */
+ dasd_handle_autoquiesce(device, NULL, DASD_EER_NOPATH);
+ dasd_device_set_stop_bits(device,
+ DASD_STOPPED_DC_WAIT);
+ }
+ dasd_put_device(device);
+}
+EXPORT_SYMBOL_GPL(dasd_generic_path_event);
+
+int dasd_generic_verify_path(struct dasd_device *device, __u8 lpm)
+{
+ if (!dasd_path_get_opm(device) && lpm) {
+ dasd_path_set_opm(device, lpm);
+ dasd_generic_path_operational(device);
+ } else
+ dasd_path_add_opm(device, lpm);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(dasd_generic_verify_path);
+
+void dasd_generic_space_exhaust(struct dasd_device *device,
+ struct dasd_ccw_req *cqr)
+{
+ /* First call extended error reporting and check for autoquiesce. */
+ dasd_handle_autoquiesce(device, NULL, DASD_EER_NOSPC);
+
+ if (device->state < DASD_STATE_BASIC)
+ return;
+
+ if (cqr->status == DASD_CQR_IN_IO ||
+ cqr->status == DASD_CQR_CLEAR_PENDING) {
+ cqr->status = DASD_CQR_QUEUED;
+ cqr->retries++;
+ }
+ dasd_device_set_stop_bits(device, DASD_STOPPED_NOSPC);
+ dasd_device_clear_timer(device);
+ dasd_schedule_device_bh(device);
+}
+EXPORT_SYMBOL_GPL(dasd_generic_space_exhaust);
+
+void dasd_generic_space_avail(struct dasd_device *device)
+{
+ dev_info(&device->cdev->dev, "Extent pool space is available\n");
+ DBF_DEV_EVENT(DBF_WARNING, device, "%s", "space available");
+
+ dasd_device_remove_stop_bits(device, DASD_STOPPED_NOSPC);
+ dasd_schedule_device_bh(device);
+
+ if (device->block) {
+ dasd_schedule_block_bh(device->block);
+ if (device->block->gdp)
+ blk_mq_run_hw_queues(device->block->gdp->queue, true);
+ }
+ if (!device->stopped)
+ wake_up(&generic_waitq);
+}
+EXPORT_SYMBOL_GPL(dasd_generic_space_avail);
+
+/*
+ * clear active requests and requeue them to block layer if possible
+ */
+int dasd_generic_requeue_all_requests(struct dasd_device *device)
+{
+ struct dasd_block *block = device->block;
+ struct list_head requeue_queue;
+ struct dasd_ccw_req *cqr, *n;
+ int rc;
+
+ if (!block)
+ return 0;
+
+ INIT_LIST_HEAD(&requeue_queue);
+ rc = _dasd_requests_to_flushqueue(block, &requeue_queue);
+
+ /* Now call the callback function of flushed requests */
+restart_cb:
+ list_for_each_entry_safe(cqr, n, &requeue_queue, blocklist) {
+ wait_event(dasd_flush_wq, (cqr->status < DASD_CQR_QUEUED));
+ /* Process finished ERP request. */
+ if (cqr->refers) {
+ spin_lock_bh(&block->queue_lock);
+ __dasd_process_erp(block->base, cqr);
+ spin_unlock_bh(&block->queue_lock);
+ /* restart list_for_xx loop since dasd_process_erp
+ * might remove multiple elements
+ */
+ goto restart_cb;
+ }
+ _dasd_requeue_request(cqr);
+ list_del_init(&cqr->blocklist);
+ cqr->block->base->discipline->free_cp(
+ cqr, (struct request *) cqr->callback_data);
+ }
+ dasd_schedule_device_bh(device);
+ return rc;
+}
+EXPORT_SYMBOL_GPL(dasd_generic_requeue_all_requests);
+
+static void do_requeue_requests(struct work_struct *work)
+{
+ struct dasd_device *device = container_of(work, struct dasd_device,
+ requeue_requests);
+ dasd_generic_requeue_all_requests(device);
+ dasd_device_remove_stop_bits(device, DASD_STOPPED_NOT_ACC);
+ if (device->block)
+ dasd_schedule_block_bh(device->block);
+ dasd_put_device(device);
+}
+
+void dasd_schedule_requeue(struct dasd_device *device)
+{
+ dasd_get_device(device);
+ /* queue call to dasd_reload_device to the kernel event daemon. */
+ if (!schedule_work(&device->requeue_requests))
+ dasd_put_device(device);
+}
+EXPORT_SYMBOL(dasd_schedule_requeue);
+
+static int dasd_handle_autoquiesce(struct dasd_device *device,
+ struct dasd_ccw_req *cqr,
+ unsigned int reason)
+{
+ /* in any case write eer message with reason */
+ if (dasd_eer_enabled(device))
+ dasd_eer_write(device, cqr, reason);
+
+ if (!test_bit(reason, &device->aq_mask))
+ return 0;
+
+ /* notify eer about autoquiesce */
+ if (dasd_eer_enabled(device))
+ dasd_eer_write(device, NULL, DASD_EER_AUTOQUIESCE);
+
+ pr_info("%s: The DASD has been put in the quiesce state\n",
+ dev_name(&device->cdev->dev));
+ dasd_device_set_stop_bits(device, DASD_STOPPED_QUIESCE);
+
+ if (device->features & DASD_FEATURE_REQUEUEQUIESCE)
+ dasd_schedule_requeue(device);
+
+ return 1;
+}
+
+static struct dasd_ccw_req *dasd_generic_build_rdc(struct dasd_device *device,
+ int rdc_buffer_size,
+ int magic)
+{
+ struct dasd_ccw_req *cqr;
+ struct ccw1 *ccw;
+
+ cqr = dasd_smalloc_request(magic, 1 /* RDC */, rdc_buffer_size, device,
+ NULL);
+
+ if (IS_ERR(cqr)) {
+ /* internal error 13 - Allocating the RDC request failed*/
+ dev_err(&device->cdev->dev,
+ "An error occurred in the DASD device driver, "
+ "reason=%s\n", "13");
+ return cqr;
+ }
+
+ ccw = cqr->cpaddr;
+ ccw->cmd_code = CCW_CMD_RDC;
+ ccw->cda = (__u32)virt_to_phys(cqr->data);
+ ccw->flags = 0;
+ ccw->count = rdc_buffer_size;
+ cqr->startdev = device;
+ cqr->memdev = device;
+ cqr->expires = 10*HZ;
+ cqr->retries = 256;
+ cqr->buildclk = get_tod_clock();
+ cqr->status = DASD_CQR_FILLED;
+ return cqr;
+}
+
+
+int dasd_generic_read_dev_chars(struct dasd_device *device, int magic,
+ void *rdc_buffer, int rdc_buffer_size)
+{
+ int ret;
+ struct dasd_ccw_req *cqr;
+
+ cqr = dasd_generic_build_rdc(device, rdc_buffer_size, magic);
+ if (IS_ERR(cqr))
+ return PTR_ERR(cqr);
+
+ ret = dasd_sleep_on(cqr);
+ if (ret == 0)
+ memcpy(rdc_buffer, cqr->data, rdc_buffer_size);
+ dasd_sfree_request(cqr, cqr->memdev);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(dasd_generic_read_dev_chars);
+
+/*
+ * In command mode and transport mode we need to look for sense
+ * data in different places. The sense data itself is allways
+ * an array of 32 bytes, so we can unify the sense data access
+ * for both modes.
+ */
+char *dasd_get_sense(struct irb *irb)
+{
+ struct tsb *tsb = NULL;
+ char *sense = NULL;
+
+ if (scsw_is_tm(&irb->scsw) && (irb->scsw.tm.fcxs == 0x01)) {
+ if (irb->scsw.tm.tcw)
+ tsb = tcw_get_tsb(phys_to_virt(irb->scsw.tm.tcw));
+ if (tsb && tsb->length == 64 && tsb->flags)
+ switch (tsb->flags & 0x07) {
+ case 1: /* tsa_iostat */
+ sense = tsb->tsa.iostat.sense;
+ break;
+ case 2: /* tsa_ddpc */
+ sense = tsb->tsa.ddpc.sense;
+ break;
+ default:
+ /* currently we don't use interrogate data */
+ break;
+ }
+ } else if (irb->esw.esw0.erw.cons) {
+ sense = irb->ecw;
+ }
+ return sense;
+}
+EXPORT_SYMBOL_GPL(dasd_get_sense);
+
+void dasd_generic_shutdown(struct ccw_device *cdev)
+{
+ struct dasd_device *device;
+
+ device = dasd_device_from_cdev(cdev);
+ if (IS_ERR(device))
+ return;
+
+ if (device->block)
+ dasd_schedule_block_bh(device->block);
+
+ dasd_schedule_device_bh(device);
+
+ wait_event(shutdown_waitq, _wait_for_empty_queues(device));
+}
+EXPORT_SYMBOL_GPL(dasd_generic_shutdown);
+
+static int __init dasd_init(void)
+{
+ int rc;
+
+ init_waitqueue_head(&dasd_init_waitq);
+ init_waitqueue_head(&dasd_flush_wq);
+ init_waitqueue_head(&generic_waitq);
+ init_waitqueue_head(&shutdown_waitq);
+
+ /* register 'common' DASD debug area, used for all DBF_XXX calls */
+ dasd_debug_area = debug_register("dasd", 1, 1, 8 * sizeof(long));
+ if (dasd_debug_area == NULL) {
+ rc = -ENOMEM;
+ goto failed;
+ }
+ debug_register_view(dasd_debug_area, &debug_sprintf_view);
+ debug_set_level(dasd_debug_area, DBF_WARNING);
+
+ DBF_EVENT(DBF_EMERG, "%s", "debug area created");
+
+ dasd_diag_discipline_pointer = NULL;
+
+ dasd_statistics_createroot();
+
+ rc = dasd_devmap_init();
+ if (rc)
+ goto failed;
+ rc = dasd_gendisk_init();
+ if (rc)
+ goto failed;
+ rc = dasd_parse();
+ if (rc)
+ goto failed;
+ rc = dasd_eer_init();
+ if (rc)
+ goto failed;
+#ifdef CONFIG_PROC_FS
+ rc = dasd_proc_init();
+ if (rc)
+ goto failed;
+#endif
+
+ return 0;
+failed:
+ pr_info("The DASD device driver could not be initialized\n");
+ dasd_exit();
+ return rc;
+}
+
+module_init(dasd_init);
+module_exit(dasd_exit);
diff --git a/drivers/s390/block/dasd_3990_erp.c b/drivers/s390/block/dasd_3990_erp.c
new file mode 100644
index 0000000000..89957bb724
--- /dev/null
+++ b/drivers/s390/block/dasd_3990_erp.c
@@ -0,0 +1,2859 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Author(s)......: Horst Hummel <Horst.Hummel@de.ibm.com>
+ * Holger Smolinski <Holger.Smolinski@de.ibm.com>
+ * Bugreports.to..: <Linux390@de.ibm.com>
+ * Copyright IBM Corp. 2000, 2001
+ *
+ */
+
+#define KMSG_COMPONENT "dasd-eckd"
+
+#include <linux/timer.h>
+#include <asm/idals.h>
+
+#define PRINTK_HEADER "dasd_erp(3990): "
+
+#include "dasd_int.h"
+#include "dasd_eckd.h"
+
+
+struct DCTL_data {
+ unsigned char subcommand; /* e.g Inhibit Write, Enable Write,... */
+ unsigned char modifier; /* Subcommand modifier */
+ unsigned short res; /* reserved */
+} __attribute__ ((packed));
+
+/*
+ *****************************************************************************
+ * SECTION ERP HANDLING
+ *****************************************************************************
+ */
+/*
+ *****************************************************************************
+ * 24 and 32 byte sense ERP functions
+ *****************************************************************************
+ */
+
+/*
+ * DASD_3990_ERP_CLEANUP
+ *
+ * DESCRIPTION
+ * Removes the already build but not necessary ERP request and sets
+ * the status of the original cqr / erp to the given (final) status
+ *
+ * PARAMETER
+ * erp request to be blocked
+ * final_status either DASD_CQR_DONE or DASD_CQR_FAILED
+ *
+ * RETURN VALUES
+ * cqr original cqr
+ */
+static struct dasd_ccw_req *
+dasd_3990_erp_cleanup(struct dasd_ccw_req * erp, char final_status)
+{
+ struct dasd_ccw_req *cqr = erp->refers;
+
+ dasd_free_erp_request(erp, erp->memdev);
+ cqr->status = final_status;
+ return cqr;
+
+} /* end dasd_3990_erp_cleanup */
+
+/*
+ * DASD_3990_ERP_BLOCK_QUEUE
+ *
+ * DESCRIPTION
+ * Block the given device request queue to prevent from further
+ * processing until the started timer has expired or an related
+ * interrupt was received.
+ */
+static void dasd_3990_erp_block_queue(struct dasd_ccw_req *erp, int expires)
+{
+
+ struct dasd_device *device = erp->startdev;
+ unsigned long flags;
+
+ DBF_DEV_EVENT(DBF_INFO, device,
+ "blocking request queue for %is", expires/HZ);
+
+ spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
+ dasd_device_set_stop_bits(device, DASD_STOPPED_PENDING);
+ spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
+ erp->status = DASD_CQR_FILLED;
+ if (erp->block)
+ dasd_block_set_timer(erp->block, expires);
+ else
+ dasd_device_set_timer(device, expires);
+}
+
+/*
+ * DASD_3990_ERP_INT_REQ
+ *
+ * DESCRIPTION
+ * Handles 'Intervention Required' error.
+ * This means either device offline or not installed.
+ *
+ * PARAMETER
+ * erp current erp
+ * RETURN VALUES
+ * erp modified erp
+ */
+static struct dasd_ccw_req *
+dasd_3990_erp_int_req(struct dasd_ccw_req * erp)
+{
+
+ struct dasd_device *device = erp->startdev;
+
+ /* first time set initial retry counter and erp_function */
+ /* and retry once without blocking queue */
+ /* (this enables easier enqueing of the cqr) */
+ if (erp->function != dasd_3990_erp_int_req) {
+
+ erp->retries = 256;
+ erp->function = dasd_3990_erp_int_req;
+
+ } else {
+
+ /* issue a message and wait for 'device ready' interrupt */
+ dev_err(&device->cdev->dev,
+ "is offline or not installed - "
+ "INTERVENTION REQUIRED!!\n");
+
+ dasd_3990_erp_block_queue(erp, 60*HZ);
+ }
+
+ return erp;
+
+} /* end dasd_3990_erp_int_req */
+
+/*
+ * DASD_3990_ERP_ALTERNATE_PATH
+ *
+ * DESCRIPTION
+ * Repeat the operation on a different channel path.
+ * If all alternate paths have been tried, the request is posted with a
+ * permanent error.
+ *
+ * PARAMETER
+ * erp pointer to the current ERP
+ *
+ * RETURN VALUES
+ * erp modified pointer to the ERP
+ */
+static void
+dasd_3990_erp_alternate_path(struct dasd_ccw_req * erp)
+{
+ struct dasd_device *device = erp->startdev;
+ __u8 opm;
+ unsigned long flags;
+
+ /* try alternate valid path */
+ spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
+ opm = ccw_device_get_path_mask(device->cdev);
+ spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
+ if (erp->lpm == 0)
+ erp->lpm = dasd_path_get_opm(device) &
+ ~(erp->irb.esw.esw0.sublog.lpum);
+ else
+ erp->lpm &= ~(erp->irb.esw.esw0.sublog.lpum);
+
+ if ((erp->lpm & opm) != 0x00) {
+
+ DBF_DEV_EVENT(DBF_WARNING, device,
+ "try alternate lpm=%x (lpum=%x / opm=%x)",
+ erp->lpm, erp->irb.esw.esw0.sublog.lpum, opm);
+
+ /* reset status to submit the request again... */
+ erp->status = DASD_CQR_FILLED;
+ erp->retries = 10;
+ } else {
+ dev_err(&device->cdev->dev,
+ "The DASD cannot be reached on any path (lpum=%x"
+ "/opm=%x)\n", erp->irb.esw.esw0.sublog.lpum, opm);
+
+ /* post request with permanent error */
+ erp->status = DASD_CQR_FAILED;
+ }
+} /* end dasd_3990_erp_alternate_path */
+
+/*
+ * DASD_3990_ERP_DCTL
+ *
+ * DESCRIPTION
+ * Setup cqr to do the Diagnostic Control (DCTL) command with an
+ * Inhibit Write subcommand (0x20) and the given modifier.
+ *
+ * PARAMETER
+ * erp pointer to the current (failed) ERP
+ * modifier subcommand modifier
+ *
+ * RETURN VALUES
+ * dctl_cqr pointer to NEW dctl_cqr
+ *
+ */
+static struct dasd_ccw_req *
+dasd_3990_erp_DCTL(struct dasd_ccw_req * erp, char modifier)
+{
+
+ struct dasd_device *device = erp->startdev;
+ struct DCTL_data *DCTL_data;
+ struct ccw1 *ccw;
+ struct dasd_ccw_req *dctl_cqr;
+
+ dctl_cqr = dasd_alloc_erp_request(erp->magic, 1,
+ sizeof(struct DCTL_data),
+ device);
+ if (IS_ERR(dctl_cqr)) {
+ dev_err(&device->cdev->dev,
+ "Unable to allocate DCTL-CQR\n");
+ erp->status = DASD_CQR_FAILED;
+ return erp;
+ }
+
+ DCTL_data = dctl_cqr->data;
+
+ DCTL_data->subcommand = 0x02; /* Inhibit Write */
+ DCTL_data->modifier = modifier;
+
+ ccw = dctl_cqr->cpaddr;
+ memset(ccw, 0, sizeof(struct ccw1));
+ ccw->cmd_code = CCW_CMD_DCTL;
+ ccw->count = 4;
+ ccw->cda = (__u32)virt_to_phys(DCTL_data);
+ dctl_cqr->flags = erp->flags;
+ dctl_cqr->function = dasd_3990_erp_DCTL;
+ dctl_cqr->refers = erp;
+ dctl_cqr->startdev = device;
+ dctl_cqr->memdev = device;
+ dctl_cqr->magic = erp->magic;
+ dctl_cqr->expires = 5 * 60 * HZ;
+ dctl_cqr->retries = 2;
+
+ dctl_cqr->buildclk = get_tod_clock();
+
+ dctl_cqr->status = DASD_CQR_FILLED;
+
+ return dctl_cqr;
+
+} /* end dasd_3990_erp_DCTL */
+
+/*
+ * DASD_3990_ERP_ACTION_1
+ *
+ * DESCRIPTION
+ * Setup ERP to do the ERP action 1 (see Reference manual).
+ * Repeat the operation on a different channel path.
+ * As deviation from the recommended recovery action, we reset the path mask
+ * after we have tried each path and go through all paths a second time.
+ * This will cover situations where only one path at a time is actually down,
+ * but all paths fail and recover just with the same sequence and timing as
+ * we try to use them (flapping links).
+ * If all alternate paths have been tried twice, the request is posted with
+ * a permanent error.
+ *
+ * PARAMETER
+ * erp pointer to the current ERP
+ *
+ * RETURN VALUES
+ * erp pointer to the ERP
+ *
+ */
+static struct dasd_ccw_req *dasd_3990_erp_action_1_sec(struct dasd_ccw_req *erp)
+{
+ erp->function = dasd_3990_erp_action_1_sec;
+ dasd_3990_erp_alternate_path(erp);
+ return erp;
+}
+
+static struct dasd_ccw_req *dasd_3990_erp_action_1(struct dasd_ccw_req *erp)
+{
+ erp->function = dasd_3990_erp_action_1;
+ dasd_3990_erp_alternate_path(erp);
+ if (erp->status == DASD_CQR_FAILED &&
+ !test_bit(DASD_CQR_VERIFY_PATH, &erp->flags)) {
+ erp->status = DASD_CQR_FILLED;
+ erp->retries = 10;
+ erp->lpm = dasd_path_get_opm(erp->startdev);
+ erp->function = dasd_3990_erp_action_1_sec;
+ }
+ return erp;
+} /* end dasd_3990_erp_action_1(b) */
+
+/*
+ * DASD_3990_ERP_ACTION_4
+ *
+ * DESCRIPTION
+ * Setup ERP to do the ERP action 4 (see Reference manual).
+ * Set the current request to PENDING to block the CQR queue for that device
+ * until the state change interrupt appears.
+ * Use a timer (20 seconds) to retry the cqr if the interrupt is still
+ * missing.
+ *
+ * PARAMETER
+ * sense sense data of the actual error
+ * erp pointer to the current ERP
+ *
+ * RETURN VALUES
+ * erp pointer to the ERP
+ *
+ */
+static struct dasd_ccw_req *
+dasd_3990_erp_action_4(struct dasd_ccw_req * erp, char *sense)
+{
+
+ struct dasd_device *device = erp->startdev;
+
+ /* first time set initial retry counter and erp_function */
+ /* and retry once without waiting for state change pending */
+ /* interrupt (this enables easier enqueing of the cqr) */
+ if (erp->function != dasd_3990_erp_action_4) {
+
+ DBF_DEV_EVENT(DBF_INFO, device, "%s",
+ "dasd_3990_erp_action_4: first time retry");
+
+ erp->retries = 256;
+ erp->function = dasd_3990_erp_action_4;
+
+ } else {
+ if (sense && (sense[25] == 0x1D)) { /* state change pending */
+
+ DBF_DEV_EVENT(DBF_INFO, device,
+ "waiting for state change pending "
+ "interrupt, %d retries left",
+ erp->retries);
+
+ dasd_3990_erp_block_queue(erp, 30*HZ);
+
+ } else if (sense && (sense[25] == 0x1E)) { /* busy */
+ DBF_DEV_EVENT(DBF_INFO, device,
+ "busy - redriving request later, "
+ "%d retries left",
+ erp->retries);
+ dasd_3990_erp_block_queue(erp, HZ);
+ } else {
+ /* no state change pending - retry */
+ DBF_DEV_EVENT(DBF_INFO, device,
+ "redriving request immediately, "
+ "%d retries left",
+ erp->retries);
+ erp->status = DASD_CQR_FILLED;
+ }
+ }
+
+ return erp;
+
+} /* end dasd_3990_erp_action_4 */
+
+/*
+ *****************************************************************************
+ * 24 byte sense ERP functions (only)
+ *****************************************************************************
+ */
+
+/*
+ * DASD_3990_ERP_ACTION_5
+ *
+ * DESCRIPTION
+ * Setup ERP to do the ERP action 5 (see Reference manual).
+ * NOTE: Further handling is done in xxx_further_erp after the retries.
+ *
+ * PARAMETER
+ * erp pointer to the current ERP
+ *
+ * RETURN VALUES
+ * erp pointer to the ERP
+ *
+ */
+static struct dasd_ccw_req *
+dasd_3990_erp_action_5(struct dasd_ccw_req * erp)
+{
+
+ /* first of all retry */
+ erp->retries = 10;
+ erp->function = dasd_3990_erp_action_5;
+
+ return erp;
+
+} /* end dasd_3990_erp_action_5 */
+
+/*
+ * DASD_3990_HANDLE_ENV_DATA
+ *
+ * DESCRIPTION
+ * Handles 24 byte 'Environmental data present'.
+ * Does a analysis of the sense data (message Format)
+ * and prints the error messages.
+ *
+ * PARAMETER
+ * sense current sense data
+ *
+ * RETURN VALUES
+ * void
+ */
+static void
+dasd_3990_handle_env_data(struct dasd_ccw_req * erp, char *sense)
+{
+
+ struct dasd_device *device = erp->startdev;
+ char msg_format = (sense[7] & 0xF0);
+ char msg_no = (sense[7] & 0x0F);
+ char errorstring[ERRORLENGTH];
+
+ switch (msg_format) {
+ case 0x00: /* Format 0 - Program or System Checks */
+
+ if (sense[1] & 0x10) { /* check message to operator bit */
+
+ switch (msg_no) {
+ case 0x00: /* No Message */
+ break;
+ case 0x01:
+ dev_warn(&device->cdev->dev,
+ "FORMAT 0 - Invalid Command\n");
+ break;
+ case 0x02:
+ dev_warn(&device->cdev->dev,
+ "FORMAT 0 - Invalid Command "
+ "Sequence\n");
+ break;
+ case 0x03:
+ dev_warn(&device->cdev->dev,
+ "FORMAT 0 - CCW Count less than "
+ "required\n");
+ break;
+ case 0x04:
+ dev_warn(&device->cdev->dev,
+ "FORMAT 0 - Invalid Parameter\n");
+ break;
+ case 0x05:
+ dev_warn(&device->cdev->dev,
+ "FORMAT 0 - Diagnostic of Special"
+ " Command Violates File Mask\n");
+ break;
+ case 0x07:
+ dev_warn(&device->cdev->dev,
+ "FORMAT 0 - Channel Returned with "
+ "Incorrect retry CCW\n");
+ break;
+ case 0x08:
+ dev_warn(&device->cdev->dev,
+ "FORMAT 0 - Reset Notification\n");
+ break;
+ case 0x09:
+ dev_warn(&device->cdev->dev,
+ "FORMAT 0 - Storage Path Restart\n");
+ break;
+ case 0x0A:
+ dev_warn(&device->cdev->dev,
+ "FORMAT 0 - Channel requested "
+ "... %02x\n", sense[8]);
+ break;
+ case 0x0B:
+ dev_warn(&device->cdev->dev,
+ "FORMAT 0 - Invalid Defective/"
+ "Alternate Track Pointer\n");
+ break;
+ case 0x0C:
+ dev_warn(&device->cdev->dev,
+ "FORMAT 0 - DPS Installation "
+ "Check\n");
+ break;
+ case 0x0E:
+ dev_warn(&device->cdev->dev,
+ "FORMAT 0 - Command Invalid on "
+ "Secondary Address\n");
+ break;
+ case 0x0F:
+ dev_warn(&device->cdev->dev,
+ "FORMAT 0 - Status Not As "
+ "Required: reason %02x\n",
+ sense[8]);
+ break;
+ default:
+ dev_warn(&device->cdev->dev,
+ "FORMAT 0 - Reserved\n");
+ }
+ } else {
+ switch (msg_no) {
+ case 0x00: /* No Message */
+ break;
+ case 0x01:
+ dev_warn(&device->cdev->dev,
+ "FORMAT 0 - Device Error "
+ "Source\n");
+ break;
+ case 0x02:
+ dev_warn(&device->cdev->dev,
+ "FORMAT 0 - Reserved\n");
+ break;
+ case 0x03:
+ dev_warn(&device->cdev->dev,
+ "FORMAT 0 - Device Fenced - "
+ "device = %02x\n", sense[4]);
+ break;
+ case 0x04:
+ dev_warn(&device->cdev->dev,
+ "FORMAT 0 - Data Pinned for "
+ "Device\n");
+ break;
+ default:
+ dev_warn(&device->cdev->dev,
+ "FORMAT 0 - Reserved\n");
+ }
+ }
+ break;
+
+ case 0x10: /* Format 1 - Device Equipment Checks */
+ switch (msg_no) {
+ case 0x00: /* No Message */
+ break;
+ case 0x01:
+ dev_warn(&device->cdev->dev,
+ "FORMAT 1 - Device Status 1 not as "
+ "expected\n");
+ break;
+ case 0x03:
+ dev_warn(&device->cdev->dev,
+ "FORMAT 1 - Index missing\n");
+ break;
+ case 0x04:
+ dev_warn(&device->cdev->dev,
+ "FORMAT 1 - Interruption cannot be "
+ "reset\n");
+ break;
+ case 0x05:
+ dev_warn(&device->cdev->dev,
+ "FORMAT 1 - Device did not respond to "
+ "selection\n");
+ break;
+ case 0x06:
+ dev_warn(&device->cdev->dev,
+ "FORMAT 1 - Device check-2 error or Set "
+ "Sector is not complete\n");
+ break;
+ case 0x07:
+ dev_warn(&device->cdev->dev,
+ "FORMAT 1 - Head address does not "
+ "compare\n");
+ break;
+ case 0x08:
+ dev_warn(&device->cdev->dev,
+ "FORMAT 1 - Device status 1 not valid\n");
+ break;
+ case 0x09:
+ dev_warn(&device->cdev->dev,
+ "FORMAT 1 - Device not ready\n");
+ break;
+ case 0x0A:
+ dev_warn(&device->cdev->dev,
+ "FORMAT 1 - Track physical address did "
+ "not compare\n");
+ break;
+ case 0x0B:
+ dev_warn(&device->cdev->dev,
+ "FORMAT 1 - Missing device address bit\n");
+ break;
+ case 0x0C:
+ dev_warn(&device->cdev->dev,
+ "FORMAT 1 - Drive motor switch is off\n");
+ break;
+ case 0x0D:
+ dev_warn(&device->cdev->dev,
+ "FORMAT 1 - Seek incomplete\n");
+ break;
+ case 0x0E:
+ dev_warn(&device->cdev->dev,
+ "FORMAT 1 - Cylinder address did not "
+ "compare\n");
+ break;
+ case 0x0F:
+ dev_warn(&device->cdev->dev,
+ "FORMAT 1 - Offset active cannot be "
+ "reset\n");
+ break;
+ default:
+ dev_warn(&device->cdev->dev,
+ "FORMAT 1 - Reserved\n");
+ }
+ break;
+
+ case 0x20: /* Format 2 - 3990 Equipment Checks */
+ switch (msg_no) {
+ case 0x08:
+ dev_warn(&device->cdev->dev,
+ "FORMAT 2 - 3990 check-2 error\n");
+ break;
+ case 0x0E:
+ dev_warn(&device->cdev->dev,
+ "FORMAT 2 - Support facility errors\n");
+ break;
+ case 0x0F:
+ dev_warn(&device->cdev->dev,
+ "FORMAT 2 - Microcode detected error "
+ "%02x\n",
+ sense[8]);
+ break;
+ default:
+ dev_warn(&device->cdev->dev,
+ "FORMAT 2 - Reserved\n");
+ }
+ break;
+
+ case 0x30: /* Format 3 - 3990 Control Checks */
+ switch (msg_no) {
+ case 0x0F:
+ dev_warn(&device->cdev->dev,
+ "FORMAT 3 - Allegiance terminated\n");
+ break;
+ default:
+ dev_warn(&device->cdev->dev,
+ "FORMAT 3 - Reserved\n");
+ }
+ break;
+
+ case 0x40: /* Format 4 - Data Checks */
+ switch (msg_no) {
+ case 0x00:
+ dev_warn(&device->cdev->dev,
+ "FORMAT 4 - Home address area error\n");
+ break;
+ case 0x01:
+ dev_warn(&device->cdev->dev,
+ "FORMAT 4 - Count area error\n");
+ break;
+ case 0x02:
+ dev_warn(&device->cdev->dev,
+ "FORMAT 4 - Key area error\n");
+ break;
+ case 0x03:
+ dev_warn(&device->cdev->dev,
+ "FORMAT 4 - Data area error\n");
+ break;
+ case 0x04:
+ dev_warn(&device->cdev->dev,
+ "FORMAT 4 - No sync byte in home address "
+ "area\n");
+ break;
+ case 0x05:
+ dev_warn(&device->cdev->dev,
+ "FORMAT 4 - No sync byte in count address "
+ "area\n");
+ break;
+ case 0x06:
+ dev_warn(&device->cdev->dev,
+ "FORMAT 4 - No sync byte in key area\n");
+ break;
+ case 0x07:
+ dev_warn(&device->cdev->dev,
+ "FORMAT 4 - No sync byte in data area\n");
+ break;
+ case 0x08:
+ dev_warn(&device->cdev->dev,
+ "FORMAT 4 - Home address area error; "
+ "offset active\n");
+ break;
+ case 0x09:
+ dev_warn(&device->cdev->dev,
+ "FORMAT 4 - Count area error; offset "
+ "active\n");
+ break;
+ case 0x0A:
+ dev_warn(&device->cdev->dev,
+ "FORMAT 4 - Key area error; offset "
+ "active\n");
+ break;
+ case 0x0B:
+ dev_warn(&device->cdev->dev,
+ "FORMAT 4 - Data area error; "
+ "offset active\n");
+ break;
+ case 0x0C:
+ dev_warn(&device->cdev->dev,
+ "FORMAT 4 - No sync byte in home "
+ "address area; offset active\n");
+ break;
+ case 0x0D:
+ dev_warn(&device->cdev->dev,
+ "FORMAT 4 - No sync byte in count "
+ "address area; offset active\n");
+ break;
+ case 0x0E:
+ dev_warn(&device->cdev->dev,
+ "FORMAT 4 - No sync byte in key area; "
+ "offset active\n");
+ break;
+ case 0x0F:
+ dev_warn(&device->cdev->dev,
+ "FORMAT 4 - No sync byte in data area; "
+ "offset active\n");
+ break;
+ default:
+ dev_warn(&device->cdev->dev,
+ "FORMAT 4 - Reserved\n");
+ }
+ break;
+
+ case 0x50: /* Format 5 - Data Check with displacement information */
+ switch (msg_no) {
+ case 0x00:
+ dev_warn(&device->cdev->dev,
+ "FORMAT 5 - Data Check in the "
+ "home address area\n");
+ break;
+ case 0x01:
+ dev_warn(&device->cdev->dev,
+ "FORMAT 5 - Data Check in the count "
+ "area\n");
+ break;
+ case 0x02:
+ dev_warn(&device->cdev->dev,
+ "FORMAT 5 - Data Check in the key area\n");
+ break;
+ case 0x03:
+ dev_warn(&device->cdev->dev,
+ "FORMAT 5 - Data Check in the data "
+ "area\n");
+ break;
+ case 0x08:
+ dev_warn(&device->cdev->dev,
+ "FORMAT 5 - Data Check in the "
+ "home address area; offset active\n");
+ break;
+ case 0x09:
+ dev_warn(&device->cdev->dev,
+ "FORMAT 5 - Data Check in the count area; "
+ "offset active\n");
+ break;
+ case 0x0A:
+ dev_warn(&device->cdev->dev,
+ "FORMAT 5 - Data Check in the key area; "
+ "offset active\n");
+ break;
+ case 0x0B:
+ dev_warn(&device->cdev->dev,
+ "FORMAT 5 - Data Check in the data area; "
+ "offset active\n");
+ break;
+ default:
+ dev_warn(&device->cdev->dev,
+ "FORMAT 5 - Reserved\n");
+ }
+ break;
+
+ case 0x60: /* Format 6 - Usage Statistics/Overrun Errors */
+ switch (msg_no) {
+ case 0x00:
+ dev_warn(&device->cdev->dev,
+ "FORMAT 6 - Overrun on channel A\n");
+ break;
+ case 0x01:
+ dev_warn(&device->cdev->dev,
+ "FORMAT 6 - Overrun on channel B\n");
+ break;
+ case 0x02:
+ dev_warn(&device->cdev->dev,
+ "FORMAT 6 - Overrun on channel C\n");
+ break;
+ case 0x03:
+ dev_warn(&device->cdev->dev,
+ "FORMAT 6 - Overrun on channel D\n");
+ break;
+ case 0x04:
+ dev_warn(&device->cdev->dev,
+ "FORMAT 6 - Overrun on channel E\n");
+ break;
+ case 0x05:
+ dev_warn(&device->cdev->dev,
+ "FORMAT 6 - Overrun on channel F\n");
+ break;
+ case 0x06:
+ dev_warn(&device->cdev->dev,
+ "FORMAT 6 - Overrun on channel G\n");
+ break;
+ case 0x07:
+ dev_warn(&device->cdev->dev,
+ "FORMAT 6 - Overrun on channel H\n");
+ break;
+ default:
+ dev_warn(&device->cdev->dev,
+ "FORMAT 6 - Reserved\n");
+ }
+ break;
+
+ case 0x70: /* Format 7 - Device Connection Control Checks */
+ switch (msg_no) {
+ case 0x00:
+ dev_warn(&device->cdev->dev,
+ "FORMAT 7 - RCC initiated by a connection "
+ "check alert\n");
+ break;
+ case 0x01:
+ dev_warn(&device->cdev->dev,
+ "FORMAT 7 - RCC 1 sequence not "
+ "successful\n");
+ break;
+ case 0x02:
+ dev_warn(&device->cdev->dev,
+ "FORMAT 7 - RCC 1 and RCC 2 sequences not "
+ "successful\n");
+ break;
+ case 0x03:
+ dev_warn(&device->cdev->dev,
+ "FORMAT 7 - Invalid tag-in during "
+ "selection sequence\n");
+ break;
+ case 0x04:
+ dev_warn(&device->cdev->dev,
+ "FORMAT 7 - extra RCC required\n");
+ break;
+ case 0x05:
+ dev_warn(&device->cdev->dev,
+ "FORMAT 7 - Invalid DCC selection "
+ "response or timeout\n");
+ break;
+ case 0x06:
+ dev_warn(&device->cdev->dev,
+ "FORMAT 7 - Missing end operation; device "
+ "transfer complete\n");
+ break;
+ case 0x07:
+ dev_warn(&device->cdev->dev,
+ "FORMAT 7 - Missing end operation; device "
+ "transfer incomplete\n");
+ break;
+ case 0x08:
+ dev_warn(&device->cdev->dev,
+ "FORMAT 7 - Invalid tag-in for an "
+ "immediate command sequence\n");
+ break;
+ case 0x09:
+ dev_warn(&device->cdev->dev,
+ "FORMAT 7 - Invalid tag-in for an "
+ "extended command sequence\n");
+ break;
+ case 0x0A:
+ dev_warn(&device->cdev->dev,
+ "FORMAT 7 - 3990 microcode time out when "
+ "stopping selection\n");
+ break;
+ case 0x0B:
+ dev_warn(&device->cdev->dev,
+ "FORMAT 7 - No response to selection "
+ "after a poll interruption\n");
+ break;
+ case 0x0C:
+ dev_warn(&device->cdev->dev,
+ "FORMAT 7 - Permanent path error (DASD "
+ "controller not available)\n");
+ break;
+ case 0x0D:
+ dev_warn(&device->cdev->dev,
+ "FORMAT 7 - DASD controller not available"
+ " on disconnected command chain\n");
+ break;
+ default:
+ dev_warn(&device->cdev->dev,
+ "FORMAT 7 - Reserved\n");
+ }
+ break;
+
+ case 0x80: /* Format 8 - Additional Device Equipment Checks */
+ switch (msg_no) {
+ case 0x00: /* No Message */
+ case 0x01:
+ dev_warn(&device->cdev->dev,
+ "FORMAT 8 - Error correction code "
+ "hardware fault\n");
+ break;
+ case 0x03:
+ dev_warn(&device->cdev->dev,
+ "FORMAT 8 - Unexpected end operation "
+ "response code\n");
+ break;
+ case 0x04:
+ dev_warn(&device->cdev->dev,
+ "FORMAT 8 - End operation with transfer "
+ "count not zero\n");
+ break;
+ case 0x05:
+ dev_warn(&device->cdev->dev,
+ "FORMAT 8 - End operation with transfer "
+ "count zero\n");
+ break;
+ case 0x06:
+ dev_warn(&device->cdev->dev,
+ "FORMAT 8 - DPS checks after a system "
+ "reset or selective reset\n");
+ break;
+ case 0x07:
+ dev_warn(&device->cdev->dev,
+ "FORMAT 8 - DPS cannot be filled\n");
+ break;
+ case 0x08:
+ dev_warn(&device->cdev->dev,
+ "FORMAT 8 - Short busy time-out during "
+ "device selection\n");
+ break;
+ case 0x09:
+ dev_warn(&device->cdev->dev,
+ "FORMAT 8 - DASD controller failed to "
+ "set or reset the long busy latch\n");
+ break;
+ case 0x0A:
+ dev_warn(&device->cdev->dev,
+ "FORMAT 8 - No interruption from device "
+ "during a command chain\n");
+ break;
+ default:
+ dev_warn(&device->cdev->dev,
+ "FORMAT 8 - Reserved\n");
+ }
+ break;
+
+ case 0x90: /* Format 9 - Device Read, Write, and Seek Checks */
+ switch (msg_no) {
+ case 0x00:
+ break; /* No Message */
+ case 0x06:
+ dev_warn(&device->cdev->dev,
+ "FORMAT 9 - Device check-2 error\n");
+ break;
+ case 0x07:
+ dev_warn(&device->cdev->dev,
+ "FORMAT 9 - Head address did not "
+ "compare\n");
+ break;
+ case 0x0A:
+ dev_warn(&device->cdev->dev,
+ "FORMAT 9 - Track physical address did "
+ "not compare while oriented\n");
+ break;
+ case 0x0E:
+ dev_warn(&device->cdev->dev,
+ "FORMAT 9 - Cylinder address did not "
+ "compare\n");
+ break;
+ default:
+ dev_warn(&device->cdev->dev,
+ "FORMAT 9 - Reserved\n");
+ }
+ break;
+
+ case 0xF0: /* Format F - Cache Storage Checks */
+ switch (msg_no) {
+ case 0x00:
+ dev_warn(&device->cdev->dev,
+ "FORMAT F - Operation Terminated\n");
+ break;
+ case 0x01:
+ dev_warn(&device->cdev->dev,
+ "FORMAT F - Subsystem Processing Error\n");
+ break;
+ case 0x02:
+ dev_warn(&device->cdev->dev,
+ "FORMAT F - Cache or nonvolatile storage "
+ "equipment failure\n");
+ break;
+ case 0x04:
+ dev_warn(&device->cdev->dev,
+ "FORMAT F - Caching terminated\n");
+ break;
+ case 0x06:
+ dev_warn(&device->cdev->dev,
+ "FORMAT F - Cache fast write access not "
+ "authorized\n");
+ break;
+ case 0x07:
+ dev_warn(&device->cdev->dev,
+ "FORMAT F - Track format incorrect\n");
+ break;
+ case 0x09:
+ dev_warn(&device->cdev->dev,
+ "FORMAT F - Caching reinitiated\n");
+ break;
+ case 0x0A:
+ dev_warn(&device->cdev->dev,
+ "FORMAT F - Nonvolatile storage "
+ "terminated\n");
+ break;
+ case 0x0B:
+ dev_warn(&device->cdev->dev,
+ "FORMAT F - Volume is suspended duplex\n");
+ /* call extended error reporting (EER) */
+ dasd_eer_write(device, erp->refers,
+ DASD_EER_PPRCSUSPEND);
+ break;
+ case 0x0C:
+ dev_warn(&device->cdev->dev,
+ "FORMAT F - Subsystem status cannot be "
+ "determined\n");
+ break;
+ case 0x0D:
+ dev_warn(&device->cdev->dev,
+ "FORMAT F - Caching status reset to "
+ "default\n");
+ break;
+ case 0x0E:
+ dev_warn(&device->cdev->dev,
+ "FORMAT F - DASD Fast Write inhibited\n");
+ break;
+ default:
+ dev_warn(&device->cdev->dev,
+ "FORMAT F - Reserved\n");
+ }
+ break;
+
+ default: /* unknown message format - should not happen
+ internal error 03 - unknown message format */
+ snprintf(errorstring, ERRORLENGTH, "03 %x02", msg_format);
+ dev_err(&device->cdev->dev,
+ "An error occurred in the DASD device driver, "
+ "reason=%s\n", errorstring);
+ break;
+ } /* end switch message format */
+
+} /* end dasd_3990_handle_env_data */
+
+/*
+ * DASD_3990_ERP_COM_REJ
+ *
+ * DESCRIPTION
+ * Handles 24 byte 'Command Reject' error.
+ *
+ * PARAMETER
+ * erp current erp_head
+ * sense current sense data
+ *
+ * RETURN VALUES
+ * erp 'new' erp_head - pointer to new ERP
+ */
+static struct dasd_ccw_req *
+dasd_3990_erp_com_rej(struct dasd_ccw_req * erp, char *sense)
+{
+
+ struct dasd_device *device = erp->startdev;
+
+ erp->function = dasd_3990_erp_com_rej;
+
+ /* env data present (ACTION 10 - retry should work) */
+ if (sense[2] & SNS2_ENV_DATA_PRESENT) {
+
+ DBF_DEV_EVENT(DBF_WARNING, device, "%s",
+ "Command Reject - environmental data present");
+
+ dasd_3990_handle_env_data(erp, sense);
+
+ erp->retries = 5;
+
+ } else if (sense[1] & SNS1_WRITE_INHIBITED) {
+ dev_err(&device->cdev->dev, "An I/O request was rejected"
+ " because writing is inhibited\n");
+ erp = dasd_3990_erp_cleanup(erp, DASD_CQR_FAILED);
+ } else if (sense[7] == SNS7_INVALID_ON_SEC) {
+ dev_err(&device->cdev->dev, "An I/O request was rejected on a copy pair secondary device\n");
+ /* suppress dump of sense data for this error */
+ set_bit(DASD_CQR_SUPPRESS_CR, &erp->refers->flags);
+ erp = dasd_3990_erp_cleanup(erp, DASD_CQR_FAILED);
+ } else {
+ /* fatal error - set status to FAILED
+ internal error 09 - Command Reject */
+ if (!test_bit(DASD_CQR_SUPPRESS_CR, &erp->flags))
+ dev_err(&device->cdev->dev,
+ "An error occurred in the DASD device driver, reason=09\n");
+
+ erp = dasd_3990_erp_cleanup(erp, DASD_CQR_FAILED);
+ }
+
+ return erp;
+
+} /* end dasd_3990_erp_com_rej */
+
+/*
+ * DASD_3990_ERP_BUS_OUT
+ *
+ * DESCRIPTION
+ * Handles 24 byte 'Bus Out Parity Check' error.
+ *
+ * PARAMETER
+ * erp current erp_head
+ * RETURN VALUES
+ * erp new erp_head - pointer to new ERP
+ */
+static struct dasd_ccw_req *
+dasd_3990_erp_bus_out(struct dasd_ccw_req * erp)
+{
+
+ struct dasd_device *device = erp->startdev;
+
+ /* first time set initial retry counter and erp_function */
+ /* and retry once without blocking queue */
+ /* (this enables easier enqueing of the cqr) */
+ if (erp->function != dasd_3990_erp_bus_out) {
+ erp->retries = 256;
+ erp->function = dasd_3990_erp_bus_out;
+
+ } else {
+
+ /* issue a message and wait for 'device ready' interrupt */
+ DBF_DEV_EVENT(DBF_WARNING, device, "%s",
+ "bus out parity error or BOPC requested by "
+ "channel");
+
+ dasd_3990_erp_block_queue(erp, 60*HZ);
+
+ }
+
+ return erp;
+
+} /* end dasd_3990_erp_bus_out */
+
+/*
+ * DASD_3990_ERP_EQUIP_CHECK
+ *
+ * DESCRIPTION
+ * Handles 24 byte 'Equipment Check' error.
+ *
+ * PARAMETER
+ * erp current erp_head
+ * RETURN VALUES
+ * erp new erp_head - pointer to new ERP
+ */
+static struct dasd_ccw_req *
+dasd_3990_erp_equip_check(struct dasd_ccw_req * erp, char *sense)
+{
+
+ struct dasd_device *device = erp->startdev;
+
+ erp->function = dasd_3990_erp_equip_check;
+
+ if (sense[1] & SNS1_WRITE_INHIBITED) {
+ dev_info(&device->cdev->dev,
+ "Write inhibited path encountered\n");
+
+ /* vary path offline
+ internal error 04 - Path should be varied off-line.*/
+ dev_err(&device->cdev->dev, "An error occurred in the DASD "
+ "device driver, reason=%s\n", "04");
+
+ erp = dasd_3990_erp_action_1(erp);
+
+ } else if (sense[2] & SNS2_ENV_DATA_PRESENT) {
+
+ DBF_DEV_EVENT(DBF_WARNING, device, "%s",
+ "Equipment Check - " "environmental data present");
+
+ dasd_3990_handle_env_data(erp, sense);
+
+ erp = dasd_3990_erp_action_4(erp, sense);
+
+ } else if (sense[1] & SNS1_PERM_ERR) {
+
+ DBF_DEV_EVENT(DBF_WARNING, device, "%s",
+ "Equipment Check - retry exhausted or "
+ "undesirable");
+
+ erp = dasd_3990_erp_action_1(erp);
+
+ } else {
+ /* all other equipment checks - Action 5 */
+ /* rest is done when retries == 0 */
+ DBF_DEV_EVENT(DBF_WARNING, device, "%s",
+ "Equipment check or processing error");
+
+ erp = dasd_3990_erp_action_5(erp);
+ }
+ return erp;
+
+} /* end dasd_3990_erp_equip_check */
+
+/*
+ * DASD_3990_ERP_DATA_CHECK
+ *
+ * DESCRIPTION
+ * Handles 24 byte 'Data Check' error.
+ *
+ * PARAMETER
+ * erp current erp_head
+ * RETURN VALUES
+ * erp new erp_head - pointer to new ERP
+ */
+static struct dasd_ccw_req *
+dasd_3990_erp_data_check(struct dasd_ccw_req * erp, char *sense)
+{
+
+ struct dasd_device *device = erp->startdev;
+
+ erp->function = dasd_3990_erp_data_check;
+
+ if (sense[2] & SNS2_CORRECTABLE) { /* correctable data check */
+
+ /* issue message that the data has been corrected */
+ dev_emerg(&device->cdev->dev,
+ "Data recovered during retry with PCI "
+ "fetch mode active\n");
+
+ /* not possible to handle this situation in Linux */
+ panic("No way to inform application about the possibly "
+ "incorrect data");
+
+ } else if (sense[2] & SNS2_ENV_DATA_PRESENT) {
+
+ DBF_DEV_EVENT(DBF_WARNING, device, "%s",
+ "Uncorrectable data check recovered secondary "
+ "addr of duplex pair");
+
+ erp = dasd_3990_erp_action_4(erp, sense);
+
+ } else if (sense[1] & SNS1_PERM_ERR) {
+
+ DBF_DEV_EVENT(DBF_WARNING, device, "%s",
+ "Uncorrectable data check with internal "
+ "retry exhausted");
+
+ erp = dasd_3990_erp_action_1(erp);
+
+ } else {
+ /* all other data checks */
+ DBF_DEV_EVENT(DBF_WARNING, device, "%s",
+ "Uncorrectable data check with retry count "
+ "exhausted...");
+
+ erp = dasd_3990_erp_action_5(erp);
+ }
+
+ return erp;
+
+} /* end dasd_3990_erp_data_check */
+
+/*
+ * DASD_3990_ERP_OVERRUN
+ *
+ * DESCRIPTION
+ * Handles 24 byte 'Overrun' error.
+ *
+ * PARAMETER
+ * erp current erp_head
+ * RETURN VALUES
+ * erp new erp_head - pointer to new ERP
+ */
+static struct dasd_ccw_req *
+dasd_3990_erp_overrun(struct dasd_ccw_req * erp, char *sense)
+{
+
+ struct dasd_device *device = erp->startdev;
+
+ erp->function = dasd_3990_erp_overrun;
+
+ DBF_DEV_EVENT(DBF_WARNING, device, "%s",
+ "Overrun - service overrun or overrun"
+ " error requested by channel");
+
+ erp = dasd_3990_erp_action_5(erp);
+
+ return erp;
+
+} /* end dasd_3990_erp_overrun */
+
+/*
+ * DASD_3990_ERP_INV_FORMAT
+ *
+ * DESCRIPTION
+ * Handles 24 byte 'Invalid Track Format' error.
+ *
+ * PARAMETER
+ * erp current erp_head
+ * RETURN VALUES
+ * erp new erp_head - pointer to new ERP
+ */
+static struct dasd_ccw_req *
+dasd_3990_erp_inv_format(struct dasd_ccw_req * erp, char *sense)
+{
+
+ struct dasd_device *device = erp->startdev;
+
+ erp->function = dasd_3990_erp_inv_format;
+
+ if (sense[2] & SNS2_ENV_DATA_PRESENT) {
+
+ DBF_DEV_EVENT(DBF_WARNING, device, "%s",
+ "Track format error when destaging or "
+ "staging data");
+
+ dasd_3990_handle_env_data(erp, sense);
+
+ erp = dasd_3990_erp_action_4(erp, sense);
+
+ } else {
+ /* internal error 06 - The track format is not valid*/
+ dev_err(&device->cdev->dev,
+ "An error occurred in the DASD device driver, "
+ "reason=%s\n", "06");
+
+ erp = dasd_3990_erp_cleanup(erp, DASD_CQR_FAILED);
+ }
+
+ return erp;
+
+} /* end dasd_3990_erp_inv_format */
+
+/*
+ * DASD_3990_ERP_EOC
+ *
+ * DESCRIPTION
+ * Handles 24 byte 'End-of-Cylinder' error.
+ *
+ * PARAMETER
+ * erp already added default erp
+ * RETURN VALUES
+ * erp pointer to original (failed) cqr.
+ */
+static struct dasd_ccw_req *
+dasd_3990_erp_EOC(struct dasd_ccw_req * default_erp, char *sense)
+{
+
+ struct dasd_device *device = default_erp->startdev;
+
+ dev_err(&device->cdev->dev,
+ "The cylinder data for accessing the DASD is inconsistent\n");
+
+ /* implement action 7 - BUG */
+ return dasd_3990_erp_cleanup(default_erp, DASD_CQR_FAILED);
+
+} /* end dasd_3990_erp_EOC */
+
+/*
+ * DASD_3990_ERP_ENV_DATA
+ *
+ * DESCRIPTION
+ * Handles 24 byte 'Environmental-Data Present' error.
+ *
+ * PARAMETER
+ * erp current erp_head
+ * RETURN VALUES
+ * erp new erp_head - pointer to new ERP
+ */
+static struct dasd_ccw_req *
+dasd_3990_erp_env_data(struct dasd_ccw_req * erp, char *sense)
+{
+
+ struct dasd_device *device = erp->startdev;
+
+ erp->function = dasd_3990_erp_env_data;
+
+ DBF_DEV_EVENT(DBF_WARNING, device, "%s", "Environmental data present");
+
+ dasd_3990_handle_env_data(erp, sense);
+
+ /* don't retry on disabled interface */
+ if (sense[7] != 0x0F) {
+ erp = dasd_3990_erp_action_4(erp, sense);
+ } else {
+ erp->status = DASD_CQR_FILLED;
+ }
+
+ return erp;
+
+} /* end dasd_3990_erp_env_data */
+
+/*
+ * DASD_3990_ERP_NO_REC
+ *
+ * DESCRIPTION
+ * Handles 24 byte 'No Record Found' error.
+ *
+ * PARAMETER
+ * erp already added default ERP
+ *
+ * RETURN VALUES
+ * erp new erp_head - pointer to new ERP
+ */
+static struct dasd_ccw_req *
+dasd_3990_erp_no_rec(struct dasd_ccw_req * default_erp, char *sense)
+{
+
+ struct dasd_device *device = default_erp->startdev;
+
+ /*
+ * In some cases the 'No Record Found' error might be expected and
+ * log messages shouldn't be written then.
+ * Check if the according suppress bit is set.
+ */
+ if (!test_bit(DASD_CQR_SUPPRESS_NRF, &default_erp->flags))
+ dev_err(&device->cdev->dev,
+ "The specified record was not found\n");
+
+ return dasd_3990_erp_cleanup(default_erp, DASD_CQR_FAILED);
+
+} /* end dasd_3990_erp_no_rec */
+
+/*
+ * DASD_3990_ERP_FILE_PROT
+ *
+ * DESCRIPTION
+ * Handles 24 byte 'File Protected' error.
+ * Note: Seek related recovery is not implemented because
+ * wee don't use the seek command yet.
+ *
+ * PARAMETER
+ * erp current erp_head
+ * RETURN VALUES
+ * erp new erp_head - pointer to new ERP
+ */
+static struct dasd_ccw_req *
+dasd_3990_erp_file_prot(struct dasd_ccw_req * erp)
+{
+
+ struct dasd_device *device = erp->startdev;
+
+ /*
+ * In some cases the 'File Protected' error might be expected and
+ * log messages shouldn't be written then.
+ * Check if the according suppress bit is set.
+ */
+ if (!test_bit(DASD_CQR_SUPPRESS_FP, &erp->flags))
+ dev_err(&device->cdev->dev,
+ "Accessing the DASD failed because of a hardware error\n");
+
+ return dasd_3990_erp_cleanup(erp, DASD_CQR_FAILED);
+
+} /* end dasd_3990_erp_file_prot */
+
+/*
+ * DASD_3990_ERP_INSPECT_ALIAS
+ *
+ * DESCRIPTION
+ * Checks if the original request was started on an alias device.
+ * If yes, it modifies the original and the erp request so that
+ * the erp request can be started on a base device.
+ *
+ * PARAMETER
+ * erp pointer to the currently created default ERP
+ *
+ * RETURN VALUES
+ * erp pointer to the modified ERP, or NULL
+ */
+
+static struct dasd_ccw_req *dasd_3990_erp_inspect_alias(
+ struct dasd_ccw_req *erp)
+{
+ struct dasd_ccw_req *cqr = erp->refers;
+ char *sense;
+
+ if (cqr->block &&
+ (cqr->block->base != cqr->startdev)) {
+
+ sense = dasd_get_sense(&erp->refers->irb);
+ /*
+ * dynamic pav may have changed base alias mapping
+ */
+ if (!test_bit(DASD_FLAG_OFFLINE, &cqr->startdev->flags) && sense
+ && (sense[0] == 0x10) && (sense[7] == 0x0F)
+ && (sense[8] == 0x67)) {
+ /*
+ * remove device from alias handling to prevent new
+ * requests from being scheduled on the
+ * wrong alias device
+ */
+ dasd_alias_remove_device(cqr->startdev);
+
+ /* schedule worker to reload device */
+ dasd_reload_device(cqr->startdev);
+ }
+
+ if (cqr->startdev->features & DASD_FEATURE_ERPLOG) {
+ DBF_DEV_EVENT(DBF_ERR, cqr->startdev,
+ "ERP on alias device for request %p,"
+ " recover on base device %s", cqr,
+ dev_name(&cqr->block->base->cdev->dev));
+ }
+ dasd_eckd_reset_ccw_to_base_io(cqr);
+ erp->startdev = cqr->block->base;
+ erp->function = dasd_3990_erp_inspect_alias;
+ return erp;
+ } else
+ return NULL;
+}
+
+
+/*
+ * DASD_3990_ERP_INSPECT_24
+ *
+ * DESCRIPTION
+ * Does a detailed inspection of the 24 byte sense data
+ * and sets up a related error recovery action.
+ *
+ * PARAMETER
+ * sense sense data of the actual error
+ * erp pointer to the currently created default ERP
+ *
+ * RETURN VALUES
+ * erp pointer to the (addtitional) ERP
+ */
+static struct dasd_ccw_req *
+dasd_3990_erp_inspect_24(struct dasd_ccw_req * erp, char *sense)
+{
+
+ struct dasd_ccw_req *erp_filled = NULL;
+
+ /* Check sense for .... */
+ /* 'Command Reject' */
+ if ((erp_filled == NULL) && (sense[0] & SNS0_CMD_REJECT)) {
+ erp_filled = dasd_3990_erp_com_rej(erp, sense);
+ }
+ /* 'Intervention Required' */
+ if ((erp_filled == NULL) && (sense[0] & SNS0_INTERVENTION_REQ)) {
+ erp_filled = dasd_3990_erp_int_req(erp);
+ }
+ /* 'Bus Out Parity Check' */
+ if ((erp_filled == NULL) && (sense[0] & SNS0_BUS_OUT_CHECK)) {
+ erp_filled = dasd_3990_erp_bus_out(erp);
+ }
+ /* 'Equipment Check' */
+ if ((erp_filled == NULL) && (sense[0] & SNS0_EQUIPMENT_CHECK)) {
+ erp_filled = dasd_3990_erp_equip_check(erp, sense);
+ }
+ /* 'Data Check' */
+ if ((erp_filled == NULL) && (sense[0] & SNS0_DATA_CHECK)) {
+ erp_filled = dasd_3990_erp_data_check(erp, sense);
+ }
+ /* 'Overrun' */
+ if ((erp_filled == NULL) && (sense[0] & SNS0_OVERRUN)) {
+ erp_filled = dasd_3990_erp_overrun(erp, sense);
+ }
+ /* 'Invalid Track Format' */
+ if ((erp_filled == NULL) && (sense[1] & SNS1_INV_TRACK_FORMAT)) {
+ erp_filled = dasd_3990_erp_inv_format(erp, sense);
+ }
+ /* 'End-of-Cylinder' */
+ if ((erp_filled == NULL) && (sense[1] & SNS1_EOC)) {
+ erp_filled = dasd_3990_erp_EOC(erp, sense);
+ }
+ /* 'Environmental Data' */
+ if ((erp_filled == NULL) && (sense[2] & SNS2_ENV_DATA_PRESENT)) {
+ erp_filled = dasd_3990_erp_env_data(erp, sense);
+ }
+ /* 'No Record Found' */
+ if ((erp_filled == NULL) && (sense[1] & SNS1_NO_REC_FOUND)) {
+ erp_filled = dasd_3990_erp_no_rec(erp, sense);
+ }
+ /* 'File Protected' */
+ if ((erp_filled == NULL) && (sense[1] & SNS1_FILE_PROTECTED)) {
+ erp_filled = dasd_3990_erp_file_prot(erp);
+ }
+ /* other (unknown) error - do default ERP */
+ if (erp_filled == NULL) {
+
+ erp_filled = erp;
+ }
+
+ return erp_filled;
+
+} /* END dasd_3990_erp_inspect_24 */
+
+/*
+ *****************************************************************************
+ * 32 byte sense ERP functions (only)
+ *****************************************************************************
+ */
+
+/*
+ * DASD_3990_ERPACTION_10_32
+ *
+ * DESCRIPTION
+ * Handles 32 byte 'Action 10' of Single Program Action Codes.
+ * Just retry and if retry doesn't work, return with error.
+ *
+ * PARAMETER
+ * erp current erp_head
+ * sense current sense data
+ * RETURN VALUES
+ * erp modified erp_head
+ */
+static struct dasd_ccw_req *
+dasd_3990_erp_action_10_32(struct dasd_ccw_req * erp, char *sense)
+{
+
+ struct dasd_device *device = erp->startdev;
+
+ erp->retries = 256;
+ erp->function = dasd_3990_erp_action_10_32;
+
+ DBF_DEV_EVENT(DBF_WARNING, device, "%s", "Perform logging requested");
+
+ return erp;
+
+} /* end dasd_3990_erp_action_10_32 */
+
+/*
+ * DASD_3990_ERP_ACTION_1B_32
+ *
+ * DESCRIPTION
+ * Handles 32 byte 'Action 1B' of Single Program Action Codes.
+ * A write operation could not be finished because of an unexpected
+ * condition.
+ * The already created 'default erp' is used to get the link to
+ * the erp chain, but it can not be used for this recovery
+ * action because it contains no DE/LO data space.
+ *
+ * PARAMETER
+ * default_erp already added default erp.
+ * sense current sense data
+ *
+ * RETURN VALUES
+ * erp new erp or
+ * default_erp in case of imprecise ending or error
+ */
+static struct dasd_ccw_req *
+dasd_3990_erp_action_1B_32(struct dasd_ccw_req * default_erp, char *sense)
+{
+
+ struct dasd_device *device = default_erp->startdev;
+ __u32 cpa = 0;
+ struct dasd_ccw_req *cqr;
+ struct dasd_ccw_req *erp;
+ struct DE_eckd_data *DE_data;
+ struct PFX_eckd_data *PFX_data;
+ char *LO_data; /* LO_eckd_data_t */
+ struct ccw1 *ccw, *oldccw;
+
+ DBF_DEV_EVENT(DBF_WARNING, device, "%s",
+ "Write not finished because of unexpected condition");
+
+ default_erp->function = dasd_3990_erp_action_1B_32;
+
+ /* determine the original cqr */
+ cqr = default_erp;
+
+ while (cqr->refers != NULL) {
+ cqr = cqr->refers;
+ }
+
+ if (scsw_is_tm(&cqr->irb.scsw)) {
+ DBF_DEV_EVENT(DBF_WARNING, device, "%s",
+ "32 bit sense, action 1B is not defined"
+ " in transport mode - just retry");
+ return default_erp;
+ }
+
+ /* for imprecise ending just do default erp */
+ if (sense[1] & 0x01) {
+ DBF_DEV_EVENT(DBF_WARNING, device, "%s",
+ "Imprecise ending is set - just retry");
+
+ return default_erp;
+ }
+
+ /* determine the address of the CCW to be restarted */
+ /* Imprecise ending is not set -> addr from IRB-SCSW */
+ cpa = default_erp->refers->irb.scsw.cmd.cpa;
+
+ if (cpa == 0) {
+ DBF_DEV_EVENT(DBF_WARNING, device, "%s",
+ "Unable to determine address of the CCW "
+ "to be restarted");
+
+ return dasd_3990_erp_cleanup(default_erp, DASD_CQR_FAILED);
+ }
+
+ /* Build new ERP request including DE/LO */
+ erp = dasd_alloc_erp_request(cqr->magic,
+ 2 + 1,/* DE/LO + TIC */
+ sizeof(struct DE_eckd_data) +
+ sizeof(struct LO_eckd_data), device);
+
+ if (IS_ERR(erp)) {
+ /* internal error 01 - Unable to allocate ERP */
+ dev_err(&device->cdev->dev, "An error occurred in the DASD "
+ "device driver, reason=%s\n", "01");
+ return dasd_3990_erp_cleanup(default_erp, DASD_CQR_FAILED);
+ }
+
+ /* use original DE */
+ DE_data = erp->data;
+ oldccw = cqr->cpaddr;
+ if (oldccw->cmd_code == DASD_ECKD_CCW_PFX) {
+ PFX_data = cqr->data;
+ memcpy(DE_data, &PFX_data->define_extent,
+ sizeof(struct DE_eckd_data));
+ } else
+ memcpy(DE_data, cqr->data, sizeof(struct DE_eckd_data));
+
+ /* create LO */
+ LO_data = erp->data + sizeof(struct DE_eckd_data);
+
+ if ((sense[3] == 0x01) && (LO_data[1] & 0x01)) {
+ /* should not */
+ return dasd_3990_erp_cleanup(default_erp, DASD_CQR_FAILED);
+ }
+
+ if ((sense[7] & 0x3F) == 0x01) {
+ /* operation code is WRITE DATA -> data area orientation */
+ LO_data[0] = 0x81;
+
+ } else if ((sense[7] & 0x3F) == 0x03) {
+ /* operation code is FORMAT WRITE -> index orientation */
+ LO_data[0] = 0xC3;
+
+ } else {
+ LO_data[0] = sense[7]; /* operation */
+ }
+
+ LO_data[1] = sense[8]; /* auxiliary */
+ LO_data[2] = sense[9];
+ LO_data[3] = sense[3]; /* count */
+ LO_data[4] = sense[29]; /* seek_addr.cyl */
+ LO_data[5] = sense[30]; /* seek_addr.cyl 2nd byte */
+ LO_data[7] = sense[31]; /* seek_addr.head 2nd byte */
+
+ memcpy(&(LO_data[8]), &(sense[11]), 8);
+
+ /* create DE ccw */
+ ccw = erp->cpaddr;
+ memset(ccw, 0, sizeof(struct ccw1));
+ ccw->cmd_code = DASD_ECKD_CCW_DEFINE_EXTENT;
+ ccw->flags = CCW_FLAG_CC;
+ ccw->count = 16;
+ ccw->cda = (__u32)virt_to_phys(DE_data);
+
+ /* create LO ccw */
+ ccw++;
+ memset(ccw, 0, sizeof(struct ccw1));
+ ccw->cmd_code = DASD_ECKD_CCW_LOCATE_RECORD;
+ ccw->flags = CCW_FLAG_CC;
+ ccw->count = 16;
+ ccw->cda = (__u32)virt_to_phys(LO_data);
+
+ /* TIC to the failed ccw */
+ ccw++;
+ ccw->cmd_code = CCW_CMD_TIC;
+ ccw->cda = cpa;
+
+ /* fill erp related fields */
+ erp->flags = default_erp->flags;
+ erp->function = dasd_3990_erp_action_1B_32;
+ erp->refers = default_erp->refers;
+ erp->startdev = device;
+ erp->memdev = device;
+ erp->magic = default_erp->magic;
+ erp->expires = default_erp->expires;
+ erp->retries = 256;
+ erp->buildclk = get_tod_clock();
+ erp->status = DASD_CQR_FILLED;
+
+ /* remove the default erp */
+ dasd_free_erp_request(default_erp, device);
+
+ return erp;
+
+} /* end dasd_3990_erp_action_1B_32 */
+
+/*
+ * DASD_3990_UPDATE_1B
+ *
+ * DESCRIPTION
+ * Handles the update to the 32 byte 'Action 1B' of Single Program
+ * Action Codes in case the first action was not successful.
+ * The already created 'previous_erp' is the currently not successful
+ * ERP.
+ *
+ * PARAMETER
+ * previous_erp already created previous erp.
+ * sense current sense data
+ * RETURN VALUES
+ * erp modified erp
+ */
+static struct dasd_ccw_req *
+dasd_3990_update_1B(struct dasd_ccw_req * previous_erp, char *sense)
+{
+
+ struct dasd_device *device = previous_erp->startdev;
+ __u32 cpa = 0;
+ struct dasd_ccw_req *cqr;
+ struct dasd_ccw_req *erp;
+ char *LO_data; /* struct LO_eckd_data */
+ struct ccw1 *ccw;
+
+ DBF_DEV_EVENT(DBF_WARNING, device, "%s",
+ "Write not finished because of unexpected condition"
+ " - follow on");
+
+ /* determine the original cqr */
+ cqr = previous_erp;
+
+ while (cqr->refers != NULL) {
+ cqr = cqr->refers;
+ }
+
+ if (scsw_is_tm(&cqr->irb.scsw)) {
+ DBF_DEV_EVENT(DBF_WARNING, device, "%s",
+ "32 bit sense, action 1B, update,"
+ " in transport mode - just retry");
+ return previous_erp;
+ }
+
+ /* for imprecise ending just do default erp */
+ if (sense[1] & 0x01) {
+ DBF_DEV_EVENT(DBF_WARNING, device, "%s",
+ "Imprecise ending is set - just retry");
+
+ previous_erp->status = DASD_CQR_FILLED;
+
+ return previous_erp;
+ }
+
+ /* determine the address of the CCW to be restarted */
+ /* Imprecise ending is not set -> addr from IRB-SCSW */
+ cpa = previous_erp->irb.scsw.cmd.cpa;
+
+ if (cpa == 0) {
+ /* internal error 02 -
+ Unable to determine address of the CCW to be restarted */
+ dev_err(&device->cdev->dev, "An error occurred in the DASD "
+ "device driver, reason=%s\n", "02");
+
+ previous_erp->status = DASD_CQR_FAILED;
+
+ return previous_erp;
+ }
+
+ erp = previous_erp;
+
+ /* update the LO with the new returned sense data */
+ LO_data = erp->data + sizeof(struct DE_eckd_data);
+
+ if ((sense[3] == 0x01) && (LO_data[1] & 0x01)) {
+ /* should not happen */
+ previous_erp->status = DASD_CQR_FAILED;
+
+ return previous_erp;
+ }
+
+ if ((sense[7] & 0x3F) == 0x01) {
+ /* operation code is WRITE DATA -> data area orientation */
+ LO_data[0] = 0x81;
+
+ } else if ((sense[7] & 0x3F) == 0x03) {
+ /* operation code is FORMAT WRITE -> index orientation */
+ LO_data[0] = 0xC3;
+
+ } else {
+ LO_data[0] = sense[7]; /* operation */
+ }
+
+ LO_data[1] = sense[8]; /* auxiliary */
+ LO_data[2] = sense[9];
+ LO_data[3] = sense[3]; /* count */
+ LO_data[4] = sense[29]; /* seek_addr.cyl */
+ LO_data[5] = sense[30]; /* seek_addr.cyl 2nd byte */
+ LO_data[7] = sense[31]; /* seek_addr.head 2nd byte */
+
+ memcpy(&(LO_data[8]), &(sense[11]), 8);
+
+ /* TIC to the failed ccw */
+ ccw = erp->cpaddr; /* addr of DE ccw */
+ ccw++; /* addr of LE ccw */
+ ccw++; /* addr of TIC ccw */
+ ccw->cda = cpa;
+
+ erp->status = DASD_CQR_FILLED;
+
+ return erp;
+
+} /* end dasd_3990_update_1B */
+
+/*
+ * DASD_3990_ERP_COMPOUND_RETRY
+ *
+ * DESCRIPTION
+ * Handles the compound ERP action retry code.
+ * NOTE: At least one retry is done even if zero is specified
+ * by the sense data. This makes enqueueing of the request
+ * easier.
+ *
+ * PARAMETER
+ * sense sense data of the actual error
+ * erp pointer to the currently created ERP
+ *
+ * RETURN VALUES
+ * erp modified ERP pointer
+ *
+ */
+static void
+dasd_3990_erp_compound_retry(struct dasd_ccw_req * erp, char *sense)
+{
+
+ switch (sense[25] & 0x03) {
+ case 0x00: /* no not retry */
+ erp->retries = 1;
+ break;
+
+ case 0x01: /* retry 2 times */
+ erp->retries = 2;
+ break;
+
+ case 0x02: /* retry 10 times */
+ erp->retries = 10;
+ break;
+
+ case 0x03: /* retry 256 times */
+ erp->retries = 256;
+ break;
+
+ default:
+ BUG();
+ }
+
+ erp->function = dasd_3990_erp_compound_retry;
+
+} /* end dasd_3990_erp_compound_retry */
+
+/*
+ * DASD_3990_ERP_COMPOUND_PATH
+ *
+ * DESCRIPTION
+ * Handles the compound ERP action for retry on alternate
+ * channel path.
+ *
+ * PARAMETER
+ * sense sense data of the actual error
+ * erp pointer to the currently created ERP
+ *
+ * RETURN VALUES
+ * erp modified ERP pointer
+ *
+ */
+static void
+dasd_3990_erp_compound_path(struct dasd_ccw_req * erp, char *sense)
+{
+ if (sense[25] & DASD_SENSE_BIT_3) {
+ dasd_3990_erp_alternate_path(erp);
+
+ if (erp->status == DASD_CQR_FAILED &&
+ !test_bit(DASD_CQR_VERIFY_PATH, &erp->flags)) {
+ /* reset the lpm and the status to be able to
+ * try further actions. */
+ erp->lpm = dasd_path_get_opm(erp->startdev);
+ erp->status = DASD_CQR_NEED_ERP;
+ }
+ }
+
+ erp->function = dasd_3990_erp_compound_path;
+
+} /* end dasd_3990_erp_compound_path */
+
+/*
+ * DASD_3990_ERP_COMPOUND_CODE
+ *
+ * DESCRIPTION
+ * Handles the compound ERP action for retry code.
+ *
+ * PARAMETER
+ * sense sense data of the actual error
+ * erp pointer to the currently created ERP
+ *
+ * RETURN VALUES
+ * erp NEW ERP pointer
+ *
+ */
+static struct dasd_ccw_req *
+dasd_3990_erp_compound_code(struct dasd_ccw_req * erp, char *sense)
+{
+
+ if (sense[25] & DASD_SENSE_BIT_2) {
+
+ switch (sense[28]) {
+ case 0x17:
+ /* issue a Diagnostic Control command with an
+ * Inhibit Write subcommand and controller modifier */
+ erp = dasd_3990_erp_DCTL(erp, 0x20);
+ break;
+
+ case 0x25:
+ /* wait for 5 seconds and retry again */
+ erp->retries = 1;
+
+ dasd_3990_erp_block_queue (erp, 5*HZ);
+ break;
+
+ default:
+ /* should not happen - continue */
+ break;
+ }
+ }
+
+ erp->function = dasd_3990_erp_compound_code;
+
+ return erp;
+
+} /* end dasd_3990_erp_compound_code */
+
+/*
+ * DASD_3990_ERP_COMPOUND_CONFIG
+ *
+ * DESCRIPTION
+ * Handles the compound ERP action for configuration
+ * dependent error.
+ * Note: duplex handling is not implemented (yet).
+ *
+ * PARAMETER
+ * sense sense data of the actual error
+ * erp pointer to the currently created ERP
+ *
+ * RETURN VALUES
+ * erp modified ERP pointer
+ *
+ */
+static void
+dasd_3990_erp_compound_config(struct dasd_ccw_req * erp, char *sense)
+{
+
+ if ((sense[25] & DASD_SENSE_BIT_1) && (sense[26] & DASD_SENSE_BIT_2)) {
+
+ /* set to suspended duplex state then restart
+ internal error 05 - Set device to suspended duplex state
+ should be done */
+ struct dasd_device *device = erp->startdev;
+ dev_err(&device->cdev->dev,
+ "An error occurred in the DASD device driver, "
+ "reason=%s\n", "05");
+
+ }
+
+ erp->function = dasd_3990_erp_compound_config;
+
+} /* end dasd_3990_erp_compound_config */
+
+/*
+ * DASD_3990_ERP_COMPOUND
+ *
+ * DESCRIPTION
+ * Does the further compound program action if
+ * compound retry was not successful.
+ *
+ * PARAMETER
+ * sense sense data of the actual error
+ * erp pointer to the current (failed) ERP
+ *
+ * RETURN VALUES
+ * erp (additional) ERP pointer
+ *
+ */
+static struct dasd_ccw_req *
+dasd_3990_erp_compound(struct dasd_ccw_req * erp, char *sense)
+{
+
+ if ((erp->function == dasd_3990_erp_compound_retry) &&
+ (erp->status == DASD_CQR_NEED_ERP)) {
+
+ dasd_3990_erp_compound_path(erp, sense);
+ }
+
+ if ((erp->function == dasd_3990_erp_compound_path) &&
+ (erp->status == DASD_CQR_NEED_ERP)) {
+
+ erp = dasd_3990_erp_compound_code(erp, sense);
+ }
+
+ if ((erp->function == dasd_3990_erp_compound_code) &&
+ (erp->status == DASD_CQR_NEED_ERP)) {
+
+ dasd_3990_erp_compound_config(erp, sense);
+ }
+
+ /* if no compound action ERP specified, the request failed */
+ if (erp->status == DASD_CQR_NEED_ERP)
+ erp->status = DASD_CQR_FAILED;
+
+ return erp;
+
+} /* end dasd_3990_erp_compound */
+
+/*
+ *DASD_3990_ERP_HANDLE_SIM
+ *
+ *DESCRIPTION
+ * inspects the SIM SENSE data and starts an appropriate action
+ *
+ * PARAMETER
+ * sense sense data of the actual error
+ *
+ * RETURN VALUES
+ * none
+ */
+void
+dasd_3990_erp_handle_sim(struct dasd_device *device, char *sense)
+{
+ /* print message according to log or message to operator mode */
+ if ((sense[24] & DASD_SIM_MSG_TO_OP) || (sense[1] & 0x10)) {
+ /* print SIM SRC from RefCode */
+ dev_err(&device->cdev->dev, "SIM - SRC: "
+ "%02x%02x%02x%02x\n", sense[22],
+ sense[23], sense[11], sense[12]);
+ } else if (sense[24] & DASD_SIM_LOG) {
+ /* print SIM SRC Refcode */
+ dev_warn(&device->cdev->dev, "log SIM - SRC: "
+ "%02x%02x%02x%02x\n", sense[22],
+ sense[23], sense[11], sense[12]);
+ }
+}
+
+/*
+ * DASD_3990_ERP_INSPECT_32
+ *
+ * DESCRIPTION
+ * Does a detailed inspection of the 32 byte sense data
+ * and sets up a related error recovery action.
+ *
+ * PARAMETER
+ * sense sense data of the actual error
+ * erp pointer to the currently created default ERP
+ *
+ * RETURN VALUES
+ * erp_filled pointer to the ERP
+ *
+ */
+static struct dasd_ccw_req *
+dasd_3990_erp_inspect_32(struct dasd_ccw_req * erp, char *sense)
+{
+
+ struct dasd_device *device = erp->startdev;
+
+ erp->function = dasd_3990_erp_inspect_32;
+
+ /* check for SIM sense data */
+ if ((sense[6] & DASD_SIM_SENSE) == DASD_SIM_SENSE)
+ dasd_3990_erp_handle_sim(device, sense);
+
+ if (sense[25] & DASD_SENSE_BIT_0) {
+
+ /* compound program action codes (byte25 bit 0 == '1') */
+ dasd_3990_erp_compound_retry(erp, sense);
+
+ } else {
+
+ /* single program action codes (byte25 bit 0 == '0') */
+ switch (sense[25]) {
+
+ case 0x00: /* success - use default ERP for retries */
+ DBF_DEV_EVENT(DBF_DEBUG, device, "%s",
+ "ERP called for successful request"
+ " - just retry");
+ break;
+
+ case 0x01: /* fatal error */
+ dev_err(&device->cdev->dev,
+ "ERP failed for the DASD\n");
+
+ erp = dasd_3990_erp_cleanup(erp, DASD_CQR_FAILED);
+ break;
+
+ case 0x02: /* intervention required */
+ case 0x03: /* intervention required during dual copy */
+ erp = dasd_3990_erp_int_req(erp);
+ break;
+
+ case 0x0F: /* length mismatch during update write command
+ internal error 08 - update write command error*/
+ dev_err(&device->cdev->dev, "An error occurred in the "
+ "DASD device driver, reason=%s\n", "08");
+
+ erp = dasd_3990_erp_cleanup(erp, DASD_CQR_FAILED);
+ break;
+
+ case 0x10: /* logging required for other channel program */
+ erp = dasd_3990_erp_action_10_32(erp, sense);
+ break;
+
+ case 0x15: /* next track outside defined extend
+ internal error 07 - The next track is not
+ within the defined storage extent */
+ dev_err(&device->cdev->dev,
+ "An error occurred in the DASD device driver, "
+ "reason=%s\n", "07");
+
+ erp = dasd_3990_erp_cleanup(erp, DASD_CQR_FAILED);
+ break;
+
+ case 0x1B: /* unexpected condition during write */
+
+ erp = dasd_3990_erp_action_1B_32(erp, sense);
+ break;
+
+ case 0x1C: /* invalid data */
+ dev_emerg(&device->cdev->dev,
+ "Data recovered during retry with PCI "
+ "fetch mode active\n");
+
+ /* not possible to handle this situation in Linux */
+ panic
+ ("Invalid data - No way to inform application "
+ "about the possibly incorrect data");
+ break;
+
+ case 0x1D: /* state-change pending */
+ DBF_DEV_EVENT(DBF_WARNING, device, "%s",
+ "A State change pending condition exists "
+ "for the subsystem or device");
+
+ erp = dasd_3990_erp_action_4(erp, sense);
+ break;
+
+ case 0x1E: /* busy */
+ DBF_DEV_EVENT(DBF_WARNING, device, "%s",
+ "Busy condition exists "
+ "for the subsystem or device");
+ erp = dasd_3990_erp_action_4(erp, sense);
+ break;
+
+ default: /* all others errors - default erp */
+ break;
+ }
+ }
+
+ return erp;
+
+} /* end dasd_3990_erp_inspect_32 */
+
+static void dasd_3990_erp_disable_path(struct dasd_device *device, __u8 lpum)
+{
+ int pos = pathmask_to_pos(lpum);
+
+ if (!(device->features & DASD_FEATURE_PATH_AUTODISABLE)) {
+ dev_err(&device->cdev->dev,
+ "Path %x.%02x (pathmask %02x) is operational despite excessive IFCCs\n",
+ device->path[pos].cssid, device->path[pos].chpid, lpum);
+ goto out;
+ }
+
+ /* no remaining path, cannot disable */
+ if (!(dasd_path_get_opm(device) & ~lpum)) {
+ dev_err(&device->cdev->dev,
+ "Last path %x.%02x (pathmask %02x) is operational despite excessive IFCCs\n",
+ device->path[pos].cssid, device->path[pos].chpid, lpum);
+ goto out;
+ }
+
+ dev_err(&device->cdev->dev,
+ "Path %x.%02x (pathmask %02x) is disabled - IFCC threshold exceeded\n",
+ device->path[pos].cssid, device->path[pos].chpid, lpum);
+ dasd_path_remove_opm(device, lpum);
+ dasd_path_add_ifccpm(device, lpum);
+
+out:
+ device->path[pos].errorclk = 0;
+ atomic_set(&device->path[pos].error_count, 0);
+}
+
+static void dasd_3990_erp_account_error(struct dasd_ccw_req *erp)
+{
+ struct dasd_device *device = erp->startdev;
+ __u8 lpum = erp->refers->irb.esw.esw1.lpum;
+ int pos = pathmask_to_pos(lpum);
+ unsigned long clk;
+
+ if (!device->path_thrhld)
+ return;
+
+ clk = get_tod_clock();
+ /*
+ * check if the last error is longer ago than the timeout,
+ * if so reset error state
+ */
+ if ((tod_to_ns(clk - device->path[pos].errorclk) / NSEC_PER_SEC)
+ >= device->path_interval) {
+ atomic_set(&device->path[pos].error_count, 0);
+ device->path[pos].errorclk = 0;
+ }
+ atomic_inc(&device->path[pos].error_count);
+ device->path[pos].errorclk = clk;
+ /* threshold exceeded disable path if possible */
+ if (atomic_read(&device->path[pos].error_count) >=
+ device->path_thrhld)
+ dasd_3990_erp_disable_path(device, lpum);
+}
+
+/*
+ *****************************************************************************
+ * main ERP control functions (24 and 32 byte sense)
+ *****************************************************************************
+ */
+
+/*
+ * DASD_3990_ERP_CONTROL_CHECK
+ *
+ * DESCRIPTION
+ * Does a generic inspection if a control check occurred and sets up
+ * the related error recovery procedure
+ *
+ * PARAMETER
+ * erp pointer to the currently created default ERP
+ *
+ * RETURN VALUES
+ * erp_filled pointer to the erp
+ */
+
+static struct dasd_ccw_req *
+dasd_3990_erp_control_check(struct dasd_ccw_req *erp)
+{
+ struct dasd_device *device = erp->startdev;
+
+ if (scsw_cstat(&erp->refers->irb.scsw) & (SCHN_STAT_INTF_CTRL_CHK
+ | SCHN_STAT_CHN_CTRL_CHK)) {
+ DBF_DEV_EVENT(DBF_WARNING, device, "%s",
+ "channel or interface control check");
+ dasd_3990_erp_account_error(erp);
+ erp = dasd_3990_erp_action_4(erp, NULL);
+ }
+ return erp;
+}
+
+/*
+ * DASD_3990_ERP_INSPECT
+ *
+ * DESCRIPTION
+ * Does a detailed inspection for sense data by calling either
+ * the 24-byte or the 32-byte inspection routine.
+ *
+ * PARAMETER
+ * erp pointer to the currently created default ERP
+ * RETURN VALUES
+ * erp_new contens was possibly modified
+ */
+static struct dasd_ccw_req *
+dasd_3990_erp_inspect(struct dasd_ccw_req *erp)
+{
+
+ struct dasd_ccw_req *erp_new = NULL;
+ char *sense;
+
+ /* if this problem occurred on an alias retry on base */
+ erp_new = dasd_3990_erp_inspect_alias(erp);
+ if (erp_new)
+ return erp_new;
+
+ /* sense data are located in the refers record of the
+ * already set up new ERP !
+ * check if concurrent sens is available
+ */
+ sense = dasd_get_sense(&erp->refers->irb);
+ if (!sense)
+ erp_new = dasd_3990_erp_control_check(erp);
+ /* distinguish between 24 and 32 byte sense data */
+ else if (sense[27] & DASD_SENSE_BIT_0) {
+
+ /* inspect the 24 byte sense data */
+ erp_new = dasd_3990_erp_inspect_24(erp, sense);
+
+ } else {
+
+ /* inspect the 32 byte sense data */
+ erp_new = dasd_3990_erp_inspect_32(erp, sense);
+
+ } /* end distinguish between 24 and 32 byte sense data */
+
+ return erp_new;
+}
+
+/*
+ * DASD_3990_ERP_ADD_ERP
+ *
+ * DESCRIPTION
+ * This function adds an additional request block (ERP) to the head of
+ * the given cqr (or erp).
+ * For a command mode cqr the erp is initialized as an default erp
+ * (retry TIC).
+ * For transport mode we make a copy of the original TCW (points to
+ * the original TCCB, TIDALs, etc.) but give it a fresh
+ * TSB so the original sense data will not be changed.
+ *
+ * PARAMETER
+ * cqr head of the current ERP-chain (or single cqr if
+ * first error)
+ * RETURN VALUES
+ * erp pointer to new ERP-chain head
+ */
+static struct dasd_ccw_req *dasd_3990_erp_add_erp(struct dasd_ccw_req *cqr)
+{
+
+ struct dasd_device *device = cqr->startdev;
+ struct ccw1 *ccw;
+ struct dasd_ccw_req *erp;
+ int cplength, datasize;
+ struct tcw *tcw;
+ struct tsb *tsb;
+
+ if (cqr->cpmode == 1) {
+ cplength = 0;
+ /* TCW needs to be 64 byte aligned, so leave enough room */
+ datasize = 64 + sizeof(struct tcw) + sizeof(struct tsb);
+ } else {
+ cplength = 2;
+ datasize = 0;
+ }
+
+ /* allocate additional request block */
+ erp = dasd_alloc_erp_request(cqr->magic,
+ cplength, datasize, device);
+ if (IS_ERR(erp)) {
+ if (cqr->retries <= 0) {
+ DBF_DEV_EVENT(DBF_ERR, device, "%s",
+ "Unable to allocate ERP request");
+ cqr->status = DASD_CQR_FAILED;
+ cqr->stopclk = get_tod_clock();
+ } else {
+ DBF_DEV_EVENT(DBF_ERR, device,
+ "Unable to allocate ERP request "
+ "(%i retries left)",
+ cqr->retries);
+ dasd_block_set_timer(device->block, (HZ << 3));
+ }
+ return erp;
+ }
+
+ ccw = cqr->cpaddr;
+ if (cqr->cpmode == 1) {
+ /* make a shallow copy of the original tcw but set new tsb */
+ erp->cpmode = 1;
+ erp->cpaddr = PTR_ALIGN(erp->data, 64);
+ tcw = erp->cpaddr;
+ tsb = (struct tsb *) &tcw[1];
+ *tcw = *((struct tcw *)cqr->cpaddr);
+ tcw->tsb = virt_to_phys(tsb);
+ } else if (ccw->cmd_code == DASD_ECKD_CCW_PSF) {
+ /* PSF cannot be chained from NOOP/TIC */
+ erp->cpaddr = cqr->cpaddr;
+ } else {
+ /* initialize request with default TIC to current ERP/CQR */
+ ccw = erp->cpaddr;
+ ccw->cmd_code = CCW_CMD_NOOP;
+ ccw->flags = CCW_FLAG_CC;
+ ccw++;
+ ccw->cmd_code = CCW_CMD_TIC;
+ ccw->cda = (__u32)virt_to_phys(cqr->cpaddr);
+ }
+
+ erp->flags = cqr->flags;
+ erp->function = dasd_3990_erp_add_erp;
+ erp->refers = cqr;
+ erp->startdev = device;
+ erp->memdev = device;
+ erp->block = cqr->block;
+ erp->magic = cqr->magic;
+ erp->expires = cqr->expires;
+ erp->retries = device->default_retries;
+ erp->buildclk = get_tod_clock();
+ erp->status = DASD_CQR_FILLED;
+
+ return erp;
+}
+
+/*
+ * DASD_3990_ERP_ADDITIONAL_ERP
+ *
+ * DESCRIPTION
+ * An additional ERP is needed to handle the current error.
+ * Add ERP to the head of the ERP-chain containing the ERP processing
+ * determined based on the sense data.
+ *
+ * PARAMETER
+ * cqr head of the current ERP-chain (or single cqr if
+ * first error)
+ *
+ * RETURN VALUES
+ * erp pointer to new ERP-chain head
+ */
+static struct dasd_ccw_req *
+dasd_3990_erp_additional_erp(struct dasd_ccw_req * cqr)
+{
+
+ struct dasd_ccw_req *erp = NULL;
+
+ /* add erp and initialize with default TIC */
+ erp = dasd_3990_erp_add_erp(cqr);
+
+ if (IS_ERR(erp))
+ return erp;
+
+ /* inspect sense, determine specific ERP if possible */
+ if (erp != cqr) {
+
+ erp = dasd_3990_erp_inspect(erp);
+ }
+
+ return erp;
+
+} /* end dasd_3990_erp_additional_erp */
+
+/*
+ * DASD_3990_ERP_ERROR_MATCH
+ *
+ * DESCRIPTION
+ * Check if the device status of the given cqr is the same.
+ * This means that the failed CCW and the relevant sense data
+ * must match.
+ * I don't distinguish between 24 and 32 byte sense because in case of
+ * 24 byte sense byte 25 and 27 is set as well.
+ *
+ * PARAMETER
+ * cqr1 first cqr, which will be compared with the
+ * cqr2 second cqr.
+ *
+ * RETURN VALUES
+ * match 'boolean' for match found
+ * returns 1 if match found, otherwise 0.
+ */
+static int dasd_3990_erp_error_match(struct dasd_ccw_req *cqr1,
+ struct dasd_ccw_req *cqr2)
+{
+ char *sense1, *sense2;
+
+ if (cqr1->startdev != cqr2->startdev)
+ return 0;
+
+ sense1 = dasd_get_sense(&cqr1->irb);
+ sense2 = dasd_get_sense(&cqr2->irb);
+
+ /* one request has sense data, the other not -> no match, return 0 */
+ if (!sense1 != !sense2)
+ return 0;
+ /* no sense data in both cases -> check cstat for IFCC */
+ if (!sense1 && !sense2) {
+ if ((scsw_cstat(&cqr1->irb.scsw) & (SCHN_STAT_INTF_CTRL_CHK |
+ SCHN_STAT_CHN_CTRL_CHK)) ==
+ (scsw_cstat(&cqr2->irb.scsw) & (SCHN_STAT_INTF_CTRL_CHK |
+ SCHN_STAT_CHN_CTRL_CHK)))
+ return 1; /* match with ifcc*/
+ }
+ /* check sense data; byte 0-2,25,27 */
+ if (!(sense1 && sense2 &&
+ (memcmp(sense1, sense2, 3) == 0) &&
+ (sense1[27] == sense2[27]) &&
+ (sense1[25] == sense2[25]))) {
+
+ return 0; /* sense doesn't match */
+ }
+
+ return 1; /* match */
+
+} /* end dasd_3990_erp_error_match */
+
+/*
+ * DASD_3990_ERP_IN_ERP
+ *
+ * DESCRIPTION
+ * check if the current error already happened before.
+ * quick exit if current cqr is not an ERP (cqr->refers=NULL)
+ *
+ * PARAMETER
+ * cqr failed cqr (either original cqr or already an erp)
+ *
+ * RETURN VALUES
+ * erp erp-pointer to the already defined error
+ * recovery procedure OR
+ * NULL if a 'new' error occurred.
+ */
+static struct dasd_ccw_req *
+dasd_3990_erp_in_erp(struct dasd_ccw_req *cqr)
+{
+
+ struct dasd_ccw_req *erp_head = cqr, /* save erp chain head */
+ *erp_match = NULL; /* save erp chain head */
+ int match = 0; /* 'boolean' for matching error found */
+
+ if (cqr->refers == NULL) { /* return if not in erp */
+ return NULL;
+ }
+
+ /* check the erp/cqr chain for current error */
+ do {
+ match = dasd_3990_erp_error_match(erp_head, cqr->refers);
+ erp_match = cqr; /* save possible matching erp */
+ cqr = cqr->refers; /* check next erp/cqr in queue */
+
+ } while ((cqr->refers != NULL) && (!match));
+
+ if (!match) {
+ return NULL; /* no match was found */
+ }
+
+ return erp_match; /* return address of matching erp */
+
+} /* END dasd_3990_erp_in_erp */
+
+/*
+ * DASD_3990_ERP_FURTHER_ERP (24 & 32 byte sense)
+ *
+ * DESCRIPTION
+ * No retry is left for the current ERP. Check what has to be done
+ * with the ERP.
+ * - do further defined ERP action or
+ * - wait for interrupt or
+ * - exit with permanent error
+ *
+ * PARAMETER
+ * erp ERP which is in progress with no retry left
+ *
+ * RETURN VALUES
+ * erp modified/additional ERP
+ */
+static struct dasd_ccw_req *
+dasd_3990_erp_further_erp(struct dasd_ccw_req *erp)
+{
+
+ struct dasd_device *device = erp->startdev;
+ char *sense = dasd_get_sense(&erp->irb);
+
+ /* check for 24 byte sense ERP */
+ if ((erp->function == dasd_3990_erp_bus_out) ||
+ (erp->function == dasd_3990_erp_action_1) ||
+ (erp->function == dasd_3990_erp_action_4)) {
+
+ erp = dasd_3990_erp_action_1(erp);
+
+ } else if (erp->function == dasd_3990_erp_action_1_sec) {
+ erp = dasd_3990_erp_action_1_sec(erp);
+ } else if (erp->function == dasd_3990_erp_action_5) {
+
+ /* retries have not been successful */
+ /* prepare erp for retry on different channel path */
+ erp = dasd_3990_erp_action_1(erp);
+
+ if (sense && !(sense[2] & DASD_SENSE_BIT_0)) {
+
+ /* issue a Diagnostic Control command with an
+ * Inhibit Write subcommand */
+
+ switch (sense[25]) {
+ case 0x17:
+ case 0x57:{ /* controller */
+ erp = dasd_3990_erp_DCTL(erp, 0x20);
+ break;
+ }
+ case 0x18:
+ case 0x58:{ /* channel path */
+ erp = dasd_3990_erp_DCTL(erp, 0x40);
+ break;
+ }
+ case 0x19:
+ case 0x59:{ /* storage director */
+ erp = dasd_3990_erp_DCTL(erp, 0x80);
+ break;
+ }
+ default:
+ DBF_DEV_EVENT(DBF_WARNING, device,
+ "invalid subcommand modifier 0x%x "
+ "for Diagnostic Control Command",
+ sense[25]);
+ }
+ }
+
+ /* check for 32 byte sense ERP */
+ } else if (sense &&
+ ((erp->function == dasd_3990_erp_compound_retry) ||
+ (erp->function == dasd_3990_erp_compound_path) ||
+ (erp->function == dasd_3990_erp_compound_code) ||
+ (erp->function == dasd_3990_erp_compound_config))) {
+
+ erp = dasd_3990_erp_compound(erp, sense);
+
+ } else {
+ /*
+ * No retry left and no additional special handling
+ * necessary
+ */
+ dev_err(&device->cdev->dev,
+ "ERP %p has run out of retries and failed\n", erp);
+
+ erp->status = DASD_CQR_FAILED;
+ }
+
+ return erp;
+
+} /* end dasd_3990_erp_further_erp */
+
+/*
+ * DASD_3990_ERP_HANDLE_MATCH_ERP
+ *
+ * DESCRIPTION
+ * An error occurred again and an ERP has been detected which is already
+ * used to handle this error (e.g. retries).
+ * All prior ERP's are asumed to be successful and therefore removed
+ * from queue.
+ * If retry counter of matching erp is already 0, it is checked if further
+ * action is needed (besides retry) or if the ERP has failed.
+ *
+ * PARAMETER
+ * erp_head first ERP in ERP-chain
+ * erp ERP that handles the actual error.
+ * (matching erp)
+ *
+ * RETURN VALUES
+ * erp modified/additional ERP
+ */
+static struct dasd_ccw_req *
+dasd_3990_erp_handle_match_erp(struct dasd_ccw_req *erp_head,
+ struct dasd_ccw_req *erp)
+{
+
+ struct dasd_device *device = erp_head->startdev;
+ struct dasd_ccw_req *erp_done = erp_head; /* finished req */
+ struct dasd_ccw_req *erp_free = NULL; /* req to be freed */
+
+ /* loop over successful ERPs and remove them from chanq */
+ while (erp_done != erp) {
+
+ if (erp_done == NULL) /* end of chain reached */
+ panic(PRINTK_HEADER "Programming error in ERP! The "
+ "original request was lost\n");
+
+ /* remove the request from the device queue */
+ list_del(&erp_done->blocklist);
+
+ erp_free = erp_done;
+ erp_done = erp_done->refers;
+
+ /* free the finished erp request */
+ dasd_free_erp_request(erp_free, erp_free->memdev);
+
+ } /* end while */
+
+ if (erp->retries > 0) {
+
+ char *sense = dasd_get_sense(&erp->refers->irb);
+
+ /* check for special retries */
+ if (sense && erp->function == dasd_3990_erp_action_4) {
+
+ erp = dasd_3990_erp_action_4(erp, sense);
+
+ } else if (sense &&
+ erp->function == dasd_3990_erp_action_1B_32) {
+
+ erp = dasd_3990_update_1B(erp, sense);
+
+ } else if (sense && erp->function == dasd_3990_erp_int_req) {
+
+ erp = dasd_3990_erp_int_req(erp);
+
+ } else {
+ /* simple retry */
+ DBF_DEV_EVENT(DBF_DEBUG, device,
+ "%i retries left for erp %p",
+ erp->retries, erp);
+
+ /* handle the request again... */
+ erp->status = DASD_CQR_FILLED;
+ }
+
+ } else {
+ /* no retry left - check for further necessary action */
+ /* if no further actions, handle rest as permanent error */
+ erp = dasd_3990_erp_further_erp(erp);
+ }
+
+ return erp;
+
+} /* end dasd_3990_erp_handle_match_erp */
+
+/*
+ * DASD_3990_ERP_ACTION
+ *
+ * DESCRIPTION
+ * control routine for 3990 erp actions.
+ * Has to be called with the queue lock (namely the s390_irq_lock) acquired.
+ *
+ * PARAMETER
+ * cqr failed cqr (either original cqr or already an erp)
+ *
+ * RETURN VALUES
+ * erp erp-pointer to the head of the ERP action chain.
+ * This means:
+ * - either a ptr to an additional ERP cqr or
+ * - the original given cqr (which's status might
+ * be modified)
+ */
+struct dasd_ccw_req *
+dasd_3990_erp_action(struct dasd_ccw_req * cqr)
+{
+ struct dasd_ccw_req *erp = NULL;
+ struct dasd_device *device = cqr->startdev;
+ struct dasd_ccw_req *temp_erp = NULL;
+
+ if (device->features & DASD_FEATURE_ERPLOG) {
+ /* print current erp_chain */
+ dev_err(&device->cdev->dev,
+ "ERP chain at BEGINNING of ERP-ACTION\n");
+ for (temp_erp = cqr;
+ temp_erp != NULL; temp_erp = temp_erp->refers) {
+
+ dev_err(&device->cdev->dev,
+ "ERP %p (%02x) refers to %p\n",
+ temp_erp, temp_erp->status,
+ temp_erp->refers);
+ }
+ }
+
+ /* double-check if current erp/cqr was successful */
+ if ((scsw_cstat(&cqr->irb.scsw) == 0x00) &&
+ (scsw_dstat(&cqr->irb.scsw) ==
+ (DEV_STAT_CHN_END | DEV_STAT_DEV_END))) {
+
+ DBF_DEV_EVENT(DBF_DEBUG, device,
+ "ERP called for successful request %p"
+ " - NO ERP necessary", cqr);
+
+ cqr->status = DASD_CQR_DONE;
+
+ return cqr;
+ }
+
+ /* check if error happened before */
+ erp = dasd_3990_erp_in_erp(cqr);
+
+ if (erp == NULL) {
+ /* no matching erp found - set up erp */
+ erp = dasd_3990_erp_additional_erp(cqr);
+ if (IS_ERR(erp))
+ return erp;
+ } else {
+ /* matching erp found - set all leading erp's to DONE */
+ erp = dasd_3990_erp_handle_match_erp(cqr, erp);
+ }
+
+
+ /*
+ * For path verification work we need to stick with the path that was
+ * originally chosen so that the per path configuration data is
+ * assigned correctly.
+ */
+ if (test_bit(DASD_CQR_VERIFY_PATH, &erp->flags) && cqr->lpm) {
+ erp->lpm = cqr->lpm;
+ }
+
+ if (device->features & DASD_FEATURE_ERPLOG) {
+ /* print current erp_chain */
+ dev_err(&device->cdev->dev,
+ "ERP chain at END of ERP-ACTION\n");
+ for (temp_erp = erp;
+ temp_erp != NULL; temp_erp = temp_erp->refers) {
+
+ dev_err(&device->cdev->dev,
+ "ERP %p (%02x) refers to %p\n",
+ temp_erp, temp_erp->status,
+ temp_erp->refers);
+ }
+ }
+
+ /* enqueue ERP request if it's a new one */
+ if (list_empty(&erp->blocklist)) {
+ cqr->status = DASD_CQR_IN_ERP;
+ /* add erp request before the cqr */
+ list_add_tail(&erp->blocklist, &cqr->blocklist);
+ }
+
+
+
+ return erp;
+
+} /* end dasd_3990_erp_action */
diff --git a/drivers/s390/block/dasd_alias.c b/drivers/s390/block/dasd_alias.c
new file mode 100644
index 0000000000..c9740ae88d
--- /dev/null
+++ b/drivers/s390/block/dasd_alias.c
@@ -0,0 +1,981 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * PAV alias management for the DASD ECKD discipline
+ *
+ * Copyright IBM Corp. 2007
+ * Author(s): Stefan Weinhuber <wein@de.ibm.com>
+ */
+
+#define KMSG_COMPONENT "dasd-eckd"
+
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <asm/ebcdic.h>
+#include "dasd_int.h"
+#include "dasd_eckd.h"
+
+#ifdef PRINTK_HEADER
+#undef PRINTK_HEADER
+#endif /* PRINTK_HEADER */
+#define PRINTK_HEADER "dasd(eckd):"
+
+
+/*
+ * General concept of alias management:
+ * - PAV and DASD alias management is specific to the eckd discipline.
+ * - A device is connected to an lcu as long as the device exists.
+ * dasd_alias_make_device_known_to_lcu will be called wenn the
+ * device is checked by the eckd discipline and
+ * dasd_alias_disconnect_device_from_lcu will be called
+ * before the device is deleted.
+ * - The dasd_alias_add_device / dasd_alias_remove_device
+ * functions mark the point when a device is 'ready for service'.
+ * - A summary unit check is a rare occasion, but it is mandatory to
+ * support it. It requires some complex recovery actions before the
+ * devices can be used again (see dasd_alias_handle_summary_unit_check).
+ * - dasd_alias_get_start_dev will find an alias device that can be used
+ * instead of the base device and does some (very simple) load balancing.
+ * This is the function that gets called for each I/O, so when improving
+ * something, this function should get faster or better, the rest has just
+ * to be correct.
+ */
+
+
+static void summary_unit_check_handling_work(struct work_struct *);
+static void lcu_update_work(struct work_struct *);
+static int _schedule_lcu_update(struct alias_lcu *, struct dasd_device *);
+
+static struct alias_root aliastree = {
+ .serverlist = LIST_HEAD_INIT(aliastree.serverlist),
+ .lock = __SPIN_LOCK_UNLOCKED(aliastree.lock),
+};
+
+static struct alias_server *_find_server(struct dasd_uid *uid)
+{
+ struct alias_server *pos;
+ list_for_each_entry(pos, &aliastree.serverlist, server) {
+ if (!strncmp(pos->uid.vendor, uid->vendor,
+ sizeof(uid->vendor))
+ && !strncmp(pos->uid.serial, uid->serial,
+ sizeof(uid->serial)))
+ return pos;
+ }
+ return NULL;
+}
+
+static struct alias_lcu *_find_lcu(struct alias_server *server,
+ struct dasd_uid *uid)
+{
+ struct alias_lcu *pos;
+ list_for_each_entry(pos, &server->lculist, lcu) {
+ if (pos->uid.ssid == uid->ssid)
+ return pos;
+ }
+ return NULL;
+}
+
+static struct alias_pav_group *_find_group(struct alias_lcu *lcu,
+ struct dasd_uid *uid)
+{
+ struct alias_pav_group *pos;
+ __u8 search_unit_addr;
+
+ /* for hyper pav there is only one group */
+ if (lcu->pav == HYPER_PAV) {
+ if (list_empty(&lcu->grouplist))
+ return NULL;
+ else
+ return list_first_entry(&lcu->grouplist,
+ struct alias_pav_group, group);
+ }
+
+ /* for base pav we have to find the group that matches the base */
+ if (uid->type == UA_BASE_DEVICE)
+ search_unit_addr = uid->real_unit_addr;
+ else
+ search_unit_addr = uid->base_unit_addr;
+ list_for_each_entry(pos, &lcu->grouplist, group) {
+ if (pos->uid.base_unit_addr == search_unit_addr &&
+ !strncmp(pos->uid.vduit, uid->vduit, sizeof(uid->vduit)))
+ return pos;
+ }
+ return NULL;
+}
+
+static struct alias_server *_allocate_server(struct dasd_uid *uid)
+{
+ struct alias_server *server;
+
+ server = kzalloc(sizeof(*server), GFP_KERNEL);
+ if (!server)
+ return ERR_PTR(-ENOMEM);
+ memcpy(server->uid.vendor, uid->vendor, sizeof(uid->vendor));
+ memcpy(server->uid.serial, uid->serial, sizeof(uid->serial));
+ INIT_LIST_HEAD(&server->server);
+ INIT_LIST_HEAD(&server->lculist);
+ return server;
+}
+
+static void _free_server(struct alias_server *server)
+{
+ kfree(server);
+}
+
+static struct alias_lcu *_allocate_lcu(struct dasd_uid *uid)
+{
+ struct alias_lcu *lcu;
+
+ lcu = kzalloc(sizeof(*lcu), GFP_KERNEL);
+ if (!lcu)
+ return ERR_PTR(-ENOMEM);
+ lcu->uac = kzalloc(sizeof(*(lcu->uac)), GFP_KERNEL | GFP_DMA);
+ if (!lcu->uac)
+ goto out_err1;
+ lcu->rsu_cqr = kzalloc(sizeof(*lcu->rsu_cqr), GFP_KERNEL | GFP_DMA);
+ if (!lcu->rsu_cqr)
+ goto out_err2;
+ lcu->rsu_cqr->cpaddr = kzalloc(sizeof(struct ccw1),
+ GFP_KERNEL | GFP_DMA);
+ if (!lcu->rsu_cqr->cpaddr)
+ goto out_err3;
+ lcu->rsu_cqr->data = kzalloc(16, GFP_KERNEL | GFP_DMA);
+ if (!lcu->rsu_cqr->data)
+ goto out_err4;
+
+ memcpy(lcu->uid.vendor, uid->vendor, sizeof(uid->vendor));
+ memcpy(lcu->uid.serial, uid->serial, sizeof(uid->serial));
+ lcu->uid.ssid = uid->ssid;
+ lcu->pav = NO_PAV;
+ lcu->flags = NEED_UAC_UPDATE | UPDATE_PENDING;
+ INIT_LIST_HEAD(&lcu->lcu);
+ INIT_LIST_HEAD(&lcu->inactive_devices);
+ INIT_LIST_HEAD(&lcu->active_devices);
+ INIT_LIST_HEAD(&lcu->grouplist);
+ INIT_WORK(&lcu->suc_data.worker, summary_unit_check_handling_work);
+ INIT_DELAYED_WORK(&lcu->ruac_data.dwork, lcu_update_work);
+ spin_lock_init(&lcu->lock);
+ init_completion(&lcu->lcu_setup);
+ return lcu;
+
+out_err4:
+ kfree(lcu->rsu_cqr->cpaddr);
+out_err3:
+ kfree(lcu->rsu_cqr);
+out_err2:
+ kfree(lcu->uac);
+out_err1:
+ kfree(lcu);
+ return ERR_PTR(-ENOMEM);
+}
+
+static void _free_lcu(struct alias_lcu *lcu)
+{
+ kfree(lcu->rsu_cqr->data);
+ kfree(lcu->rsu_cqr->cpaddr);
+ kfree(lcu->rsu_cqr);
+ kfree(lcu->uac);
+ kfree(lcu);
+}
+
+/*
+ * This is the function that will allocate all the server and lcu data,
+ * so this function must be called first for a new device.
+ * If the return value is 1, the lcu was already known before, if it
+ * is 0, this is a new lcu.
+ * Negative return code indicates that something went wrong (e.g. -ENOMEM)
+ */
+int dasd_alias_make_device_known_to_lcu(struct dasd_device *device)
+{
+ struct dasd_eckd_private *private = device->private;
+ unsigned long flags;
+ struct alias_server *server, *newserver;
+ struct alias_lcu *lcu, *newlcu;
+ struct dasd_uid uid;
+
+ device->discipline->get_uid(device, &uid);
+ spin_lock_irqsave(&aliastree.lock, flags);
+ server = _find_server(&uid);
+ if (!server) {
+ spin_unlock_irqrestore(&aliastree.lock, flags);
+ newserver = _allocate_server(&uid);
+ if (IS_ERR(newserver))
+ return PTR_ERR(newserver);
+ spin_lock_irqsave(&aliastree.lock, flags);
+ server = _find_server(&uid);
+ if (!server) {
+ list_add(&newserver->server, &aliastree.serverlist);
+ server = newserver;
+ } else {
+ /* someone was faster */
+ _free_server(newserver);
+ }
+ }
+
+ lcu = _find_lcu(server, &uid);
+ if (!lcu) {
+ spin_unlock_irqrestore(&aliastree.lock, flags);
+ newlcu = _allocate_lcu(&uid);
+ if (IS_ERR(newlcu))
+ return PTR_ERR(newlcu);
+ spin_lock_irqsave(&aliastree.lock, flags);
+ lcu = _find_lcu(server, &uid);
+ if (!lcu) {
+ list_add(&newlcu->lcu, &server->lculist);
+ lcu = newlcu;
+ } else {
+ /* someone was faster */
+ _free_lcu(newlcu);
+ }
+ }
+ spin_lock(&lcu->lock);
+ list_add(&device->alias_list, &lcu->inactive_devices);
+ private->lcu = lcu;
+ spin_unlock(&lcu->lock);
+ spin_unlock_irqrestore(&aliastree.lock, flags);
+
+ return 0;
+}
+
+/*
+ * This function removes a device from the scope of alias management.
+ * The complicated part is to make sure that it is not in use by
+ * any of the workers. If necessary cancel the work.
+ */
+void dasd_alias_disconnect_device_from_lcu(struct dasd_device *device)
+{
+ struct dasd_eckd_private *private = device->private;
+ unsigned long flags;
+ struct alias_lcu *lcu;
+ struct alias_server *server;
+ int was_pending;
+ struct dasd_uid uid;
+
+ lcu = private->lcu;
+ /* nothing to do if already disconnected */
+ if (!lcu)
+ return;
+ device->discipline->get_uid(device, &uid);
+ spin_lock_irqsave(&lcu->lock, flags);
+ /* make sure that the workers don't use this device */
+ if (device == lcu->suc_data.device) {
+ spin_unlock_irqrestore(&lcu->lock, flags);
+ cancel_work_sync(&lcu->suc_data.worker);
+ spin_lock_irqsave(&lcu->lock, flags);
+ if (device == lcu->suc_data.device) {
+ dasd_put_device(device);
+ lcu->suc_data.device = NULL;
+ }
+ }
+ was_pending = 0;
+ if (device == lcu->ruac_data.device) {
+ spin_unlock_irqrestore(&lcu->lock, flags);
+ was_pending = 1;
+ cancel_delayed_work_sync(&lcu->ruac_data.dwork);
+ spin_lock_irqsave(&lcu->lock, flags);
+ if (device == lcu->ruac_data.device) {
+ dasd_put_device(device);
+ lcu->ruac_data.device = NULL;
+ }
+ }
+ private->lcu = NULL;
+ spin_unlock_irqrestore(&lcu->lock, flags);
+
+ spin_lock_irqsave(&aliastree.lock, flags);
+ spin_lock(&lcu->lock);
+ list_del_init(&device->alias_list);
+ if (list_empty(&lcu->grouplist) &&
+ list_empty(&lcu->active_devices) &&
+ list_empty(&lcu->inactive_devices)) {
+ list_del(&lcu->lcu);
+ spin_unlock(&lcu->lock);
+ _free_lcu(lcu);
+ lcu = NULL;
+ } else {
+ if (was_pending)
+ _schedule_lcu_update(lcu, NULL);
+ spin_unlock(&lcu->lock);
+ }
+ server = _find_server(&uid);
+ if (server && list_empty(&server->lculist)) {
+ list_del(&server->server);
+ _free_server(server);
+ }
+ spin_unlock_irqrestore(&aliastree.lock, flags);
+}
+
+/*
+ * This function assumes that the unit address configuration stored
+ * in the lcu is up to date and will update the device uid before
+ * adding it to a pav group.
+ */
+
+static int _add_device_to_lcu(struct alias_lcu *lcu,
+ struct dasd_device *device,
+ struct dasd_device *pos)
+{
+
+ struct dasd_eckd_private *private = device->private;
+ struct alias_pav_group *group;
+ struct dasd_uid uid;
+
+ spin_lock(get_ccwdev_lock(device->cdev));
+ private->uid.type = lcu->uac->unit[private->uid.real_unit_addr].ua_type;
+ private->uid.base_unit_addr =
+ lcu->uac->unit[private->uid.real_unit_addr].base_ua;
+ uid = private->uid;
+ spin_unlock(get_ccwdev_lock(device->cdev));
+ /* if we have no PAV anyway, we don't need to bother with PAV groups */
+ if (lcu->pav == NO_PAV) {
+ list_move(&device->alias_list, &lcu->active_devices);
+ return 0;
+ }
+ group = _find_group(lcu, &uid);
+ if (!group) {
+ group = kzalloc(sizeof(*group), GFP_ATOMIC);
+ if (!group)
+ return -ENOMEM;
+ memcpy(group->uid.vendor, uid.vendor, sizeof(uid.vendor));
+ memcpy(group->uid.serial, uid.serial, sizeof(uid.serial));
+ group->uid.ssid = uid.ssid;
+ if (uid.type == UA_BASE_DEVICE)
+ group->uid.base_unit_addr = uid.real_unit_addr;
+ else
+ group->uid.base_unit_addr = uid.base_unit_addr;
+ memcpy(group->uid.vduit, uid.vduit, sizeof(uid.vduit));
+ INIT_LIST_HEAD(&group->group);
+ INIT_LIST_HEAD(&group->baselist);
+ INIT_LIST_HEAD(&group->aliaslist);
+ list_add(&group->group, &lcu->grouplist);
+ }
+ if (uid.type == UA_BASE_DEVICE)
+ list_move(&device->alias_list, &group->baselist);
+ else
+ list_move(&device->alias_list, &group->aliaslist);
+ private->pavgroup = group;
+ return 0;
+};
+
+static void _remove_device_from_lcu(struct alias_lcu *lcu,
+ struct dasd_device *device)
+{
+ struct dasd_eckd_private *private = device->private;
+ struct alias_pav_group *group;
+
+ list_move(&device->alias_list, &lcu->inactive_devices);
+ group = private->pavgroup;
+ if (!group)
+ return;
+ private->pavgroup = NULL;
+ if (list_empty(&group->baselist) && list_empty(&group->aliaslist)) {
+ list_del(&group->group);
+ kfree(group);
+ return;
+ }
+ if (group->next == device)
+ group->next = NULL;
+};
+
+static int
+suborder_not_supported(struct dasd_ccw_req *cqr)
+{
+ char *sense;
+ char reason;
+ char msg_format;
+ char msg_no;
+
+ /*
+ * intrc values ENODEV, ENOLINK and EPERM
+ * will be optained from sleep_on to indicate that no
+ * IO operation can be started
+ */
+ if (cqr->intrc == -ENODEV)
+ return 1;
+
+ if (cqr->intrc == -ENOLINK)
+ return 1;
+
+ if (cqr->intrc == -EPERM)
+ return 1;
+
+ sense = dasd_get_sense(&cqr->irb);
+ if (!sense)
+ return 0;
+
+ reason = sense[0];
+ msg_format = (sense[7] & 0xF0);
+ msg_no = (sense[7] & 0x0F);
+
+ /* command reject, Format 0 MSG 4 - invalid parameter */
+ if ((reason == 0x80) && (msg_format == 0x00) && (msg_no == 0x04))
+ return 1;
+
+ return 0;
+}
+
+static int read_unit_address_configuration(struct dasd_device *device,
+ struct alias_lcu *lcu)
+{
+ struct dasd_psf_prssd_data *prssdp;
+ struct dasd_ccw_req *cqr;
+ struct ccw1 *ccw;
+ int rc;
+ unsigned long flags;
+
+ cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ + 1 /* RSSD */,
+ (sizeof(struct dasd_psf_prssd_data)),
+ device, NULL);
+ if (IS_ERR(cqr))
+ return PTR_ERR(cqr);
+ cqr->startdev = device;
+ cqr->memdev = device;
+ clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
+ cqr->retries = 10;
+ cqr->expires = 20 * HZ;
+
+ /* Prepare for Read Subsystem Data */
+ prssdp = (struct dasd_psf_prssd_data *) cqr->data;
+ memset(prssdp, 0, sizeof(struct dasd_psf_prssd_data));
+ prssdp->order = PSF_ORDER_PRSSD;
+ prssdp->suborder = 0x0e; /* Read unit address configuration */
+ /* all other bytes of prssdp must be zero */
+
+ ccw = cqr->cpaddr;
+ ccw->cmd_code = DASD_ECKD_CCW_PSF;
+ ccw->count = sizeof(struct dasd_psf_prssd_data);
+ ccw->flags |= CCW_FLAG_CC;
+ ccw->cda = (__u32)virt_to_phys(prssdp);
+
+ /* Read Subsystem Data - feature codes */
+ memset(lcu->uac, 0, sizeof(*(lcu->uac)));
+
+ ccw++;
+ ccw->cmd_code = DASD_ECKD_CCW_RSSD;
+ ccw->count = sizeof(*(lcu->uac));
+ ccw->cda = (__u32)virt_to_phys(lcu->uac);
+
+ cqr->buildclk = get_tod_clock();
+ cqr->status = DASD_CQR_FILLED;
+
+ /* need to unset flag here to detect race with summary unit check */
+ spin_lock_irqsave(&lcu->lock, flags);
+ lcu->flags &= ~NEED_UAC_UPDATE;
+ spin_unlock_irqrestore(&lcu->lock, flags);
+
+ rc = dasd_sleep_on(cqr);
+ if (!rc)
+ goto out;
+
+ if (suborder_not_supported(cqr)) {
+ /* suborder not supported or device unusable for IO */
+ rc = -EOPNOTSUPP;
+ } else {
+ /* IO failed but should be retried */
+ spin_lock_irqsave(&lcu->lock, flags);
+ lcu->flags |= NEED_UAC_UPDATE;
+ spin_unlock_irqrestore(&lcu->lock, flags);
+ }
+out:
+ dasd_sfree_request(cqr, cqr->memdev);
+ return rc;
+}
+
+static int _lcu_update(struct dasd_device *refdev, struct alias_lcu *lcu)
+{
+ unsigned long flags;
+ struct alias_pav_group *pavgroup, *tempgroup;
+ struct dasd_device *device, *tempdev;
+ int i, rc;
+ struct dasd_eckd_private *private;
+
+ spin_lock_irqsave(&lcu->lock, flags);
+ list_for_each_entry_safe(pavgroup, tempgroup, &lcu->grouplist, group) {
+ list_for_each_entry_safe(device, tempdev, &pavgroup->baselist,
+ alias_list) {
+ list_move(&device->alias_list, &lcu->active_devices);
+ private = device->private;
+ private->pavgroup = NULL;
+ }
+ list_for_each_entry_safe(device, tempdev, &pavgroup->aliaslist,
+ alias_list) {
+ list_move(&device->alias_list, &lcu->active_devices);
+ private = device->private;
+ private->pavgroup = NULL;
+ }
+ list_del(&pavgroup->group);
+ kfree(pavgroup);
+ }
+ spin_unlock_irqrestore(&lcu->lock, flags);
+
+ rc = read_unit_address_configuration(refdev, lcu);
+ if (rc)
+ return rc;
+
+ spin_lock_irqsave(&lcu->lock, flags);
+ /*
+ * there is another update needed skip the remaining handling
+ * the data might already be outdated
+ * but especially do not add the device to an LCU with pending
+ * update
+ */
+ if (lcu->flags & NEED_UAC_UPDATE)
+ goto out;
+ lcu->pav = NO_PAV;
+ for (i = 0; i < MAX_DEVICES_PER_LCU; ++i) {
+ switch (lcu->uac->unit[i].ua_type) {
+ case UA_BASE_PAV_ALIAS:
+ lcu->pav = BASE_PAV;
+ break;
+ case UA_HYPER_PAV_ALIAS:
+ lcu->pav = HYPER_PAV;
+ break;
+ }
+ if (lcu->pav != NO_PAV)
+ break;
+ }
+
+ list_for_each_entry_safe(device, tempdev, &lcu->active_devices,
+ alias_list) {
+ _add_device_to_lcu(lcu, device, refdev);
+ }
+out:
+ spin_unlock_irqrestore(&lcu->lock, flags);
+ return 0;
+}
+
+static void lcu_update_work(struct work_struct *work)
+{
+ struct alias_lcu *lcu;
+ struct read_uac_work_data *ruac_data;
+ struct dasd_device *device;
+ unsigned long flags;
+ int rc;
+
+ ruac_data = container_of(work, struct read_uac_work_data, dwork.work);
+ lcu = container_of(ruac_data, struct alias_lcu, ruac_data);
+ device = ruac_data->device;
+ rc = _lcu_update(device, lcu);
+ /*
+ * Need to check flags again, as there could have been another
+ * prepare_update or a new device a new device while we were still
+ * processing the data
+ */
+ spin_lock_irqsave(&lcu->lock, flags);
+ if ((rc && (rc != -EOPNOTSUPP)) || (lcu->flags & NEED_UAC_UPDATE)) {
+ DBF_DEV_EVENT(DBF_WARNING, device, "could not update"
+ " alias data in lcu (rc = %d), retry later", rc);
+ if (!schedule_delayed_work(&lcu->ruac_data.dwork, 30*HZ))
+ dasd_put_device(device);
+ } else {
+ dasd_put_device(device);
+ lcu->ruac_data.device = NULL;
+ lcu->flags &= ~UPDATE_PENDING;
+ }
+ spin_unlock_irqrestore(&lcu->lock, flags);
+}
+
+static int _schedule_lcu_update(struct alias_lcu *lcu,
+ struct dasd_device *device)
+{
+ struct dasd_device *usedev = NULL;
+ struct alias_pav_group *group;
+
+ lcu->flags |= NEED_UAC_UPDATE;
+ if (lcu->ruac_data.device) {
+ /* already scheduled or running */
+ return 0;
+ }
+ if (device && !list_empty(&device->alias_list))
+ usedev = device;
+
+ if (!usedev && !list_empty(&lcu->grouplist)) {
+ group = list_first_entry(&lcu->grouplist,
+ struct alias_pav_group, group);
+ if (!list_empty(&group->baselist))
+ usedev = list_first_entry(&group->baselist,
+ struct dasd_device,
+ alias_list);
+ else if (!list_empty(&group->aliaslist))
+ usedev = list_first_entry(&group->aliaslist,
+ struct dasd_device,
+ alias_list);
+ }
+ if (!usedev && !list_empty(&lcu->active_devices)) {
+ usedev = list_first_entry(&lcu->active_devices,
+ struct dasd_device, alias_list);
+ }
+ /*
+ * if we haven't found a proper device yet, give up for now, the next
+ * device that will be set active will trigger an lcu update
+ */
+ if (!usedev)
+ return -EINVAL;
+ dasd_get_device(usedev);
+ lcu->ruac_data.device = usedev;
+ if (!schedule_delayed_work(&lcu->ruac_data.dwork, 0))
+ dasd_put_device(usedev);
+ return 0;
+}
+
+int dasd_alias_add_device(struct dasd_device *device)
+{
+ struct dasd_eckd_private *private = device->private;
+ __u8 uaddr = private->uid.real_unit_addr;
+ struct alias_lcu *lcu = private->lcu;
+ unsigned long flags;
+ int rc;
+
+ rc = 0;
+ spin_lock_irqsave(&lcu->lock, flags);
+ /*
+ * Check if device and lcu type differ. If so, the uac data may be
+ * outdated and needs to be updated.
+ */
+ if (private->uid.type != lcu->uac->unit[uaddr].ua_type) {
+ lcu->flags |= UPDATE_PENDING;
+ DBF_DEV_EVENT(DBF_WARNING, device, "%s",
+ "uid type mismatch - trigger rescan");
+ }
+ if (!(lcu->flags & UPDATE_PENDING)) {
+ rc = _add_device_to_lcu(lcu, device, device);
+ if (rc)
+ lcu->flags |= UPDATE_PENDING;
+ }
+ if (lcu->flags & UPDATE_PENDING) {
+ list_move(&device->alias_list, &lcu->active_devices);
+ private->pavgroup = NULL;
+ _schedule_lcu_update(lcu, device);
+ }
+ spin_unlock_irqrestore(&lcu->lock, flags);
+ return rc;
+}
+
+int dasd_alias_update_add_device(struct dasd_device *device)
+{
+ struct dasd_eckd_private *private = device->private;
+
+ private->lcu->flags |= UPDATE_PENDING;
+ return dasd_alias_add_device(device);
+}
+
+int dasd_alias_remove_device(struct dasd_device *device)
+{
+ struct dasd_eckd_private *private = device->private;
+ struct alias_lcu *lcu = private->lcu;
+ unsigned long flags;
+
+ /* nothing to do if already removed */
+ if (!lcu)
+ return 0;
+ spin_lock_irqsave(&lcu->lock, flags);
+ _remove_device_from_lcu(lcu, device);
+ spin_unlock_irqrestore(&lcu->lock, flags);
+ return 0;
+}
+
+struct dasd_device *dasd_alias_get_start_dev(struct dasd_device *base_device)
+{
+ struct dasd_eckd_private *alias_priv, *private = base_device->private;
+ struct alias_lcu *lcu = private->lcu;
+ struct dasd_device *alias_device;
+ struct alias_pav_group *group;
+ unsigned long flags;
+
+ if (!lcu)
+ return NULL;
+ if (lcu->pav == NO_PAV ||
+ lcu->flags & (NEED_UAC_UPDATE | UPDATE_PENDING))
+ return NULL;
+ if (unlikely(!(private->features.feature[8] & 0x01))) {
+ /*
+ * PAV enabled but prefix not, very unlikely
+ * seems to be a lost pathgroup
+ * use base device to do IO
+ */
+ DBF_DEV_EVENT(DBF_ERR, base_device, "%s",
+ "Prefix not enabled with PAV enabled\n");
+ return NULL;
+ }
+
+ spin_lock_irqsave(&lcu->lock, flags);
+ group = private->pavgroup;
+ if (!group) {
+ spin_unlock_irqrestore(&lcu->lock, flags);
+ return NULL;
+ }
+ alias_device = group->next;
+ if (!alias_device) {
+ if (list_empty(&group->aliaslist)) {
+ spin_unlock_irqrestore(&lcu->lock, flags);
+ return NULL;
+ } else {
+ alias_device = list_first_entry(&group->aliaslist,
+ struct dasd_device,
+ alias_list);
+ }
+ }
+ if (list_is_last(&alias_device->alias_list, &group->aliaslist))
+ group->next = list_first_entry(&group->aliaslist,
+ struct dasd_device, alias_list);
+ else
+ group->next = list_first_entry(&alias_device->alias_list,
+ struct dasd_device, alias_list);
+ spin_unlock_irqrestore(&lcu->lock, flags);
+ alias_priv = alias_device->private;
+ if ((alias_priv->count < private->count) && !alias_device->stopped &&
+ !test_bit(DASD_FLAG_OFFLINE, &alias_device->flags))
+ return alias_device;
+ else
+ return NULL;
+}
+
+/*
+ * Summary unit check handling depends on the way alias devices
+ * are handled so it is done here rather then in dasd_eckd.c
+ */
+static int reset_summary_unit_check(struct alias_lcu *lcu,
+ struct dasd_device *device,
+ char reason)
+{
+ struct dasd_ccw_req *cqr;
+ int rc = 0;
+ struct ccw1 *ccw;
+
+ cqr = lcu->rsu_cqr;
+ memcpy((char *) &cqr->magic, "ECKD", 4);
+ ASCEBC((char *) &cqr->magic, 4);
+ ccw = cqr->cpaddr;
+ ccw->cmd_code = DASD_ECKD_CCW_RSCK;
+ ccw->flags = CCW_FLAG_SLI;
+ ccw->count = 16;
+ ccw->cda = (__u32)virt_to_phys(cqr->data);
+ ((char *)cqr->data)[0] = reason;
+
+ clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
+ cqr->retries = 255; /* set retry counter to enable basic ERP */
+ cqr->startdev = device;
+ cqr->memdev = device;
+ cqr->block = NULL;
+ cqr->expires = 5 * HZ;
+ cqr->buildclk = get_tod_clock();
+ cqr->status = DASD_CQR_FILLED;
+
+ rc = dasd_sleep_on_immediatly(cqr);
+ return rc;
+}
+
+static void _restart_all_base_devices_on_lcu(struct alias_lcu *lcu)
+{
+ struct alias_pav_group *pavgroup;
+ struct dasd_device *device;
+ struct dasd_eckd_private *private;
+
+ /* active and inactive list can contain alias as well as base devices */
+ list_for_each_entry(device, &lcu->active_devices, alias_list) {
+ private = device->private;
+ if (private->uid.type != UA_BASE_DEVICE)
+ continue;
+ dasd_schedule_block_bh(device->block);
+ dasd_schedule_device_bh(device);
+ }
+ list_for_each_entry(device, &lcu->inactive_devices, alias_list) {
+ private = device->private;
+ if (private->uid.type != UA_BASE_DEVICE)
+ continue;
+ dasd_schedule_block_bh(device->block);
+ dasd_schedule_device_bh(device);
+ }
+ list_for_each_entry(pavgroup, &lcu->grouplist, group) {
+ list_for_each_entry(device, &pavgroup->baselist, alias_list) {
+ dasd_schedule_block_bh(device->block);
+ dasd_schedule_device_bh(device);
+ }
+ }
+}
+
+static void flush_all_alias_devices_on_lcu(struct alias_lcu *lcu)
+{
+ struct alias_pav_group *pavgroup;
+ struct dasd_device *device, *temp;
+ struct dasd_eckd_private *private;
+ unsigned long flags;
+ LIST_HEAD(active);
+
+ /*
+ * Problem here ist that dasd_flush_device_queue may wait
+ * for termination of a request to complete. We can't keep
+ * the lcu lock during that time, so we must assume that
+ * the lists may have changed.
+ * Idea: first gather all active alias devices in a separate list,
+ * then flush the first element of this list unlocked, and afterwards
+ * check if it is still on the list before moving it to the
+ * active_devices list.
+ */
+
+ spin_lock_irqsave(&lcu->lock, flags);
+ list_for_each_entry_safe(device, temp, &lcu->active_devices,
+ alias_list) {
+ private = device->private;
+ if (private->uid.type == UA_BASE_DEVICE)
+ continue;
+ list_move(&device->alias_list, &active);
+ }
+
+ list_for_each_entry(pavgroup, &lcu->grouplist, group) {
+ list_splice_init(&pavgroup->aliaslist, &active);
+ }
+ while (!list_empty(&active)) {
+ device = list_first_entry(&active, struct dasd_device,
+ alias_list);
+ spin_unlock_irqrestore(&lcu->lock, flags);
+ dasd_flush_device_queue(device);
+ spin_lock_irqsave(&lcu->lock, flags);
+ /*
+ * only move device around if it wasn't moved away while we
+ * were waiting for the flush
+ */
+ if (device == list_first_entry(&active,
+ struct dasd_device, alias_list)) {
+ list_move(&device->alias_list, &lcu->active_devices);
+ private = device->private;
+ private->pavgroup = NULL;
+ }
+ }
+ spin_unlock_irqrestore(&lcu->lock, flags);
+}
+
+static void _stop_all_devices_on_lcu(struct alias_lcu *lcu)
+{
+ struct alias_pav_group *pavgroup;
+ struct dasd_device *device;
+
+ list_for_each_entry(device, &lcu->active_devices, alias_list) {
+ spin_lock(get_ccwdev_lock(device->cdev));
+ dasd_device_set_stop_bits(device, DASD_STOPPED_SU);
+ spin_unlock(get_ccwdev_lock(device->cdev));
+ }
+ list_for_each_entry(device, &lcu->inactive_devices, alias_list) {
+ spin_lock(get_ccwdev_lock(device->cdev));
+ dasd_device_set_stop_bits(device, DASD_STOPPED_SU);
+ spin_unlock(get_ccwdev_lock(device->cdev));
+ }
+ list_for_each_entry(pavgroup, &lcu->grouplist, group) {
+ list_for_each_entry(device, &pavgroup->baselist, alias_list) {
+ spin_lock(get_ccwdev_lock(device->cdev));
+ dasd_device_set_stop_bits(device, DASD_STOPPED_SU);
+ spin_unlock(get_ccwdev_lock(device->cdev));
+ }
+ list_for_each_entry(device, &pavgroup->aliaslist, alias_list) {
+ spin_lock(get_ccwdev_lock(device->cdev));
+ dasd_device_set_stop_bits(device, DASD_STOPPED_SU);
+ spin_unlock(get_ccwdev_lock(device->cdev));
+ }
+ }
+}
+
+static void _unstop_all_devices_on_lcu(struct alias_lcu *lcu)
+{
+ struct alias_pav_group *pavgroup;
+ struct dasd_device *device;
+
+ list_for_each_entry(device, &lcu->active_devices, alias_list) {
+ spin_lock(get_ccwdev_lock(device->cdev));
+ dasd_device_remove_stop_bits(device, DASD_STOPPED_SU);
+ spin_unlock(get_ccwdev_lock(device->cdev));
+ }
+ list_for_each_entry(device, &lcu->inactive_devices, alias_list) {
+ spin_lock(get_ccwdev_lock(device->cdev));
+ dasd_device_remove_stop_bits(device, DASD_STOPPED_SU);
+ spin_unlock(get_ccwdev_lock(device->cdev));
+ }
+ list_for_each_entry(pavgroup, &lcu->grouplist, group) {
+ list_for_each_entry(device, &pavgroup->baselist, alias_list) {
+ spin_lock(get_ccwdev_lock(device->cdev));
+ dasd_device_remove_stop_bits(device, DASD_STOPPED_SU);
+ spin_unlock(get_ccwdev_lock(device->cdev));
+ }
+ list_for_each_entry(device, &pavgroup->aliaslist, alias_list) {
+ spin_lock(get_ccwdev_lock(device->cdev));
+ dasd_device_remove_stop_bits(device, DASD_STOPPED_SU);
+ spin_unlock(get_ccwdev_lock(device->cdev));
+ }
+ }
+}
+
+static void summary_unit_check_handling_work(struct work_struct *work)
+{
+ struct alias_lcu *lcu;
+ struct summary_unit_check_work_data *suc_data;
+ unsigned long flags;
+ struct dasd_device *device;
+
+ suc_data = container_of(work, struct summary_unit_check_work_data,
+ worker);
+ lcu = container_of(suc_data, struct alias_lcu, suc_data);
+ device = suc_data->device;
+
+ /* 1. flush alias devices */
+ flush_all_alias_devices_on_lcu(lcu);
+
+ /* 2. reset summary unit check */
+ spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
+ dasd_device_remove_stop_bits(device,
+ (DASD_STOPPED_SU | DASD_STOPPED_PENDING));
+ spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
+ reset_summary_unit_check(lcu, device, suc_data->reason);
+
+ spin_lock_irqsave(&lcu->lock, flags);
+ _unstop_all_devices_on_lcu(lcu);
+ _restart_all_base_devices_on_lcu(lcu);
+ /* 3. read new alias configuration */
+ _schedule_lcu_update(lcu, device);
+ lcu->suc_data.device = NULL;
+ dasd_put_device(device);
+ spin_unlock_irqrestore(&lcu->lock, flags);
+}
+
+void dasd_alias_handle_summary_unit_check(struct work_struct *work)
+{
+ struct dasd_device *device = container_of(work, struct dasd_device,
+ suc_work);
+ struct dasd_eckd_private *private = device->private;
+ struct alias_lcu *lcu;
+ unsigned long flags;
+
+ lcu = private->lcu;
+ if (!lcu) {
+ DBF_DEV_EVENT(DBF_WARNING, device, "%s",
+ "device not ready to handle summary"
+ " unit check (no lcu structure)");
+ goto out;
+ }
+ spin_lock_irqsave(&lcu->lock, flags);
+ /* If this device is about to be removed just return and wait for
+ * the next interrupt on a different device
+ */
+ if (list_empty(&device->alias_list)) {
+ DBF_DEV_EVENT(DBF_WARNING, device, "%s",
+ "device is in offline processing,"
+ " don't do summary unit check handling");
+ goto out_unlock;
+ }
+ if (lcu->suc_data.device) {
+ /* already scheduled or running */
+ DBF_DEV_EVENT(DBF_WARNING, device, "%s",
+ "previous instance of summary unit check worker"
+ " still pending");
+ goto out_unlock;
+ }
+ _stop_all_devices_on_lcu(lcu);
+ /* prepare for lcu_update */
+ lcu->flags |= NEED_UAC_UPDATE | UPDATE_PENDING;
+ lcu->suc_data.reason = private->suc_reason;
+ lcu->suc_data.device = device;
+ dasd_get_device(device);
+ if (!schedule_work(&lcu->suc_data.worker))
+ dasd_put_device(device);
+out_unlock:
+ spin_unlock_irqrestore(&lcu->lock, flags);
+out:
+ clear_bit(DASD_FLAG_SUC, &device->flags);
+ dasd_put_device(device);
+};
diff --git a/drivers/s390/block/dasd_devmap.c b/drivers/s390/block/dasd_devmap.c
new file mode 100644
index 0000000000..c4e36650c4
--- /dev/null
+++ b/drivers/s390/block/dasd_devmap.c
@@ -0,0 +1,2623 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
+ * Horst Hummel <Horst.Hummel@de.ibm.com>
+ * Carsten Otte <Cotte@de.ibm.com>
+ * Martin Schwidefsky <schwidefsky@de.ibm.com>
+ * Bugreports.to..: <Linux390@de.ibm.com>
+ * Copyright IBM Corp. 1999,2001
+ *
+ * Device mapping and dasd= parameter parsing functions. All devmap
+ * functions may not be called from interrupt context. In particular
+ * dasd_get_device is a no-no from interrupt context.
+ *
+ */
+
+#define KMSG_COMPONENT "dasd"
+
+#include <linux/ctype.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+
+#include <asm/debug.h>
+#include <linux/uaccess.h>
+#include <asm/ipl.h>
+
+/* This is ugly... */
+#define PRINTK_HEADER "dasd_devmap:"
+#define DASD_MAX_PARAMS 256
+
+#include "dasd_int.h"
+
+struct kmem_cache *dasd_page_cache;
+EXPORT_SYMBOL_GPL(dasd_page_cache);
+
+/*
+ * dasd_devmap_t is used to store the features and the relation
+ * between device number and device index. To find a dasd_devmap_t
+ * that corresponds to a device number of a device index each
+ * dasd_devmap_t is added to two linked lists, one to search by
+ * the device number and one to search by the device index. As
+ * soon as big minor numbers are available the device index list
+ * can be removed since the device number will then be identical
+ * to the device index.
+ */
+struct dasd_devmap {
+ struct list_head list;
+ char bus_id[DASD_BUS_ID_SIZE];
+ unsigned int devindex;
+ unsigned short features;
+ struct dasd_device *device;
+ struct dasd_copy_relation *copy;
+ unsigned int aq_mask;
+};
+
+/*
+ * Parameter parsing functions for dasd= parameter. The syntax is:
+ * <devno> : (0x)?[0-9a-fA-F]+
+ * <busid> : [0-0a-f]\.[0-9a-f]\.(0x)?[0-9a-fA-F]+
+ * <feature> : ro
+ * <feature_list> : \(<feature>(:<feature>)*\)
+ * <devno-range> : <devno>(-<devno>)?<feature_list>?
+ * <busid-range> : <busid>(-<busid>)?<feature_list>?
+ * <devices> : <devno-range>|<busid-range>
+ * <dasd_module> : dasd_diag_mod|dasd_eckd_mod|dasd_fba_mod
+ *
+ * <dasd> : autodetect|probeonly|<devices>(,<devices>)*
+ */
+
+int dasd_probeonly = 0; /* is true, when probeonly mode is active */
+int dasd_autodetect = 0; /* is true, when autodetection is active */
+int dasd_nopav = 0; /* is true, when PAV is disabled */
+EXPORT_SYMBOL_GPL(dasd_nopav);
+int dasd_nofcx; /* disable High Performance Ficon */
+EXPORT_SYMBOL_GPL(dasd_nofcx);
+
+/*
+ * char *dasd[] is intended to hold the ranges supplied by the dasd= statement
+ * it is named 'dasd' to directly be filled by insmod with the comma separated
+ * strings when running as a module.
+ */
+static char *dasd[DASD_MAX_PARAMS];
+module_param_array(dasd, charp, NULL, S_IRUGO);
+
+/*
+ * Single spinlock to protect devmap and servermap structures and lists.
+ */
+static DEFINE_SPINLOCK(dasd_devmap_lock);
+
+/*
+ * Hash lists for devmap structures.
+ */
+static struct list_head dasd_hashlists[256];
+int dasd_max_devindex;
+
+static struct dasd_devmap *dasd_add_busid(const char *, int);
+
+static inline int
+dasd_hash_busid(const char *bus_id)
+{
+ int hash, i;
+
+ hash = 0;
+ for (i = 0; (i < DASD_BUS_ID_SIZE) && *bus_id; i++, bus_id++)
+ hash += *bus_id;
+ return hash & 0xff;
+}
+
+#ifndef MODULE
+static int __init dasd_call_setup(char *opt)
+{
+ static int i __initdata;
+ char *tmp;
+
+ while (i < DASD_MAX_PARAMS) {
+ tmp = strsep(&opt, ",");
+ if (!tmp)
+ break;
+
+ dasd[i++] = tmp;
+ }
+
+ return 1;
+}
+
+__setup ("dasd=", dasd_call_setup);
+#endif /* #ifndef MODULE */
+
+#define DASD_IPLDEV "ipldev"
+
+/*
+ * Read a device busid/devno from a string.
+ */
+static int dasd_busid(char *str, int *id0, int *id1, int *devno)
+{
+ unsigned int val;
+ char *tok;
+
+ /* Interpret ipldev busid */
+ if (strncmp(DASD_IPLDEV, str, strlen(DASD_IPLDEV)) == 0) {
+ if (ipl_info.type != IPL_TYPE_CCW) {
+ pr_err("The IPL device is not a CCW device\n");
+ return -EINVAL;
+ }
+ *id0 = 0;
+ *id1 = ipl_info.data.ccw.dev_id.ssid;
+ *devno = ipl_info.data.ccw.dev_id.devno;
+
+ return 0;
+ }
+
+ /* Old style 0xXXXX or XXXX */
+ if (!kstrtouint(str, 16, &val)) {
+ *id0 = *id1 = 0;
+ if (val > 0xffff)
+ return -EINVAL;
+ *devno = val;
+ return 0;
+ }
+
+ /* New style x.y.z busid */
+ tok = strsep(&str, ".");
+ if (kstrtouint(tok, 16, &val) || val > 0xff)
+ return -EINVAL;
+ *id0 = val;
+
+ tok = strsep(&str, ".");
+ if (kstrtouint(tok, 16, &val) || val > 0xff)
+ return -EINVAL;
+ *id1 = val;
+
+ tok = strsep(&str, ".");
+ if (kstrtouint(tok, 16, &val) || val > 0xffff)
+ return -EINVAL;
+ *devno = val;
+
+ return 0;
+}
+
+/*
+ * Read colon separated list of dasd features.
+ */
+static int __init dasd_feature_list(char *str)
+{
+ int features, len, rc;
+
+ features = 0;
+ rc = 0;
+
+ if (!str)
+ return DASD_FEATURE_DEFAULT;
+
+ while (1) {
+ for (len = 0;
+ str[len] && str[len] != ':' && str[len] != ')'; len++);
+ if (len == 2 && !strncmp(str, "ro", 2))
+ features |= DASD_FEATURE_READONLY;
+ else if (len == 4 && !strncmp(str, "diag", 4))
+ features |= DASD_FEATURE_USEDIAG;
+ else if (len == 3 && !strncmp(str, "raw", 3))
+ features |= DASD_FEATURE_USERAW;
+ else if (len == 6 && !strncmp(str, "erplog", 6))
+ features |= DASD_FEATURE_ERPLOG;
+ else if (len == 8 && !strncmp(str, "failfast", 8))
+ features |= DASD_FEATURE_FAILFAST;
+ else {
+ pr_warn("%.*s is not a supported device option\n",
+ len, str);
+ rc = -EINVAL;
+ }
+ str += len;
+ if (*str != ':')
+ break;
+ str++;
+ }
+
+ return rc ? : features;
+}
+
+/*
+ * Try to match the first element on the comma separated parse string
+ * with one of the known keywords. If a keyword is found, take the approprate
+ * action and return a pointer to the residual string. If the first element
+ * could not be matched to any keyword then return an error code.
+ */
+static int __init dasd_parse_keyword(char *keyword)
+{
+ int length = strlen(keyword);
+
+ if (strncmp("autodetect", keyword, length) == 0) {
+ dasd_autodetect = 1;
+ pr_info("The autodetection mode has been activated\n");
+ return 0;
+ }
+ if (strncmp("probeonly", keyword, length) == 0) {
+ dasd_probeonly = 1;
+ pr_info("The probeonly mode has been activated\n");
+ return 0;
+ }
+ if (strncmp("nopav", keyword, length) == 0) {
+ if (MACHINE_IS_VM)
+ pr_info("'nopav' is not supported on z/VM\n");
+ else {
+ dasd_nopav = 1;
+ pr_info("PAV support has be deactivated\n");
+ }
+ return 0;
+ }
+ if (strncmp("nofcx", keyword, length) == 0) {
+ dasd_nofcx = 1;
+ pr_info("High Performance FICON support has been "
+ "deactivated\n");
+ return 0;
+ }
+ if (strncmp("fixedbuffers", keyword, length) == 0) {
+ if (dasd_page_cache)
+ return 0;
+ dasd_page_cache =
+ kmem_cache_create("dasd_page_cache", PAGE_SIZE,
+ PAGE_SIZE, SLAB_CACHE_DMA,
+ NULL);
+ if (!dasd_page_cache)
+ DBF_EVENT(DBF_WARNING, "%s", "Failed to create slab, "
+ "fixed buffer mode disabled.");
+ else
+ DBF_EVENT(DBF_INFO, "%s",
+ "turning on fixed buffer mode");
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+/*
+ * Split a string of a device range into its pieces and return the from, to, and
+ * feature parts separately.
+ * e.g.:
+ * 0.0.1234-0.0.5678(ro:erplog) -> from: 0.0.1234 to: 0.0.5678 features: ro:erplog
+ * 0.0.8765(raw) -> from: 0.0.8765 to: null features: raw
+ * 0x4321 -> from: 0x4321 to: null features: null
+ */
+static int __init dasd_evaluate_range_param(char *range, char **from_str,
+ char **to_str, char **features_str)
+{
+ int rc = 0;
+
+ /* Do we have a range or a single device? */
+ if (strchr(range, '-')) {
+ *from_str = strsep(&range, "-");
+ *to_str = strsep(&range, "(");
+ *features_str = strsep(&range, ")");
+ } else {
+ *from_str = strsep(&range, "(");
+ *features_str = strsep(&range, ")");
+ }
+
+ if (*features_str && !range) {
+ pr_warn("A closing parenthesis ')' is missing in the dasd= parameter\n");
+ rc = -EINVAL;
+ }
+
+ return rc;
+}
+
+/*
+ * Try to interprete the range string as a device number or a range of devices.
+ * If the interpretation is successful, create the matching dasd_devmap entries.
+ * If interpretation fails or in case of an error, return an error code.
+ */
+static int __init dasd_parse_range(const char *range)
+{
+ struct dasd_devmap *devmap;
+ int from, from_id0, from_id1;
+ int to, to_id0, to_id1;
+ int features;
+ char bus_id[DASD_BUS_ID_SIZE + 1];
+ char *features_str = NULL;
+ char *from_str = NULL;
+ char *to_str = NULL;
+ int rc = 0;
+ char *tmp;
+
+ tmp = kstrdup(range, GFP_KERNEL);
+ if (!tmp)
+ return -ENOMEM;
+
+ if (dasd_evaluate_range_param(tmp, &from_str, &to_str, &features_str)) {
+ rc = -EINVAL;
+ goto out;
+ }
+
+ if (dasd_busid(from_str, &from_id0, &from_id1, &from)) {
+ rc = -EINVAL;
+ goto out;
+ }
+
+ to = from;
+ to_id0 = from_id0;
+ to_id1 = from_id1;
+ if (to_str) {
+ if (dasd_busid(to_str, &to_id0, &to_id1, &to)) {
+ rc = -EINVAL;
+ goto out;
+ }
+ if (from_id0 != to_id0 || from_id1 != to_id1 || from > to) {
+ pr_err("%s is not a valid device range\n", range);
+ rc = -EINVAL;
+ goto out;
+ }
+ }
+
+ features = dasd_feature_list(features_str);
+ if (features < 0) {
+ rc = -EINVAL;
+ goto out;
+ }
+ /* each device in dasd= parameter should be set initially online */
+ features |= DASD_FEATURE_INITIAL_ONLINE;
+ while (from <= to) {
+ sprintf(bus_id, "%01x.%01x.%04x", from_id0, from_id1, from++);
+ devmap = dasd_add_busid(bus_id, features);
+ if (IS_ERR(devmap)) {
+ rc = PTR_ERR(devmap);
+ goto out;
+ }
+ }
+
+out:
+ kfree(tmp);
+
+ return rc;
+}
+
+/*
+ * Parse parameters stored in dasd[]
+ * The 'dasd=...' parameter allows to specify a comma separated list of
+ * keywords and device ranges. The parameters in that list will be stored as
+ * separate elementes in dasd[].
+ */
+int __init dasd_parse(void)
+{
+ int rc, i;
+ char *cur;
+
+ rc = 0;
+ for (i = 0; i < DASD_MAX_PARAMS; i++) {
+ cur = dasd[i];
+ if (!cur)
+ break;
+ if (*cur == '\0')
+ continue;
+
+ rc = dasd_parse_keyword(cur);
+ if (rc)
+ rc = dasd_parse_range(cur);
+
+ if (rc)
+ break;
+ }
+
+ return rc;
+}
+
+/*
+ * Add a devmap for the device specified by busid. It is possible that
+ * the devmap already exists (dasd= parameter). The order of the devices
+ * added through this function will define the kdevs for the individual
+ * devices.
+ */
+static struct dasd_devmap *
+dasd_add_busid(const char *bus_id, int features)
+{
+ struct dasd_devmap *devmap, *new, *tmp;
+ int hash;
+
+ new = kzalloc(sizeof(struct dasd_devmap), GFP_KERNEL);
+ if (!new)
+ return ERR_PTR(-ENOMEM);
+ spin_lock(&dasd_devmap_lock);
+ devmap = NULL;
+ hash = dasd_hash_busid(bus_id);
+ list_for_each_entry(tmp, &dasd_hashlists[hash], list)
+ if (strncmp(tmp->bus_id, bus_id, DASD_BUS_ID_SIZE) == 0) {
+ devmap = tmp;
+ break;
+ }
+ if (!devmap) {
+ /* This bus_id is new. */
+ new->devindex = dasd_max_devindex++;
+ strscpy(new->bus_id, bus_id, DASD_BUS_ID_SIZE);
+ new->features = features;
+ new->device = NULL;
+ list_add(&new->list, &dasd_hashlists[hash]);
+ devmap = new;
+ new = NULL;
+ }
+ spin_unlock(&dasd_devmap_lock);
+ kfree(new);
+ return devmap;
+}
+
+static struct dasd_devmap *
+dasd_find_busid_locked(const char *bus_id)
+{
+ struct dasd_devmap *devmap, *tmp;
+ int hash;
+
+ devmap = ERR_PTR(-ENODEV);
+ hash = dasd_hash_busid(bus_id);
+ list_for_each_entry(tmp, &dasd_hashlists[hash], list) {
+ if (strncmp(tmp->bus_id, bus_id, DASD_BUS_ID_SIZE) == 0) {
+ devmap = tmp;
+ break;
+ }
+ }
+ return devmap;
+}
+
+/*
+ * Find devmap for device with given bus_id.
+ */
+static struct dasd_devmap *
+dasd_find_busid(const char *bus_id)
+{
+ struct dasd_devmap *devmap;
+
+ spin_lock(&dasd_devmap_lock);
+ devmap = dasd_find_busid_locked(bus_id);
+ spin_unlock(&dasd_devmap_lock);
+ return devmap;
+}
+
+/*
+ * Check if busid has been added to the list of dasd ranges.
+ */
+int
+dasd_busid_known(const char *bus_id)
+{
+ return IS_ERR(dasd_find_busid(bus_id)) ? -ENOENT : 0;
+}
+
+/*
+ * Forget all about the device numbers added so far.
+ * This may only be called at module unload or system shutdown.
+ */
+static void
+dasd_forget_ranges(void)
+{
+ struct dasd_devmap *devmap, *n;
+ int i;
+
+ spin_lock(&dasd_devmap_lock);
+ for (i = 0; i < 256; i++) {
+ list_for_each_entry_safe(devmap, n, &dasd_hashlists[i], list) {
+ BUG_ON(devmap->device != NULL);
+ list_del(&devmap->list);
+ kfree(devmap);
+ }
+ }
+ spin_unlock(&dasd_devmap_lock);
+}
+
+/*
+ * Find the device struct by its device index.
+ */
+struct dasd_device *
+dasd_device_from_devindex(int devindex)
+{
+ struct dasd_devmap *devmap, *tmp;
+ struct dasd_device *device;
+ int i;
+
+ spin_lock(&dasd_devmap_lock);
+ devmap = NULL;
+ for (i = 0; (i < 256) && !devmap; i++)
+ list_for_each_entry(tmp, &dasd_hashlists[i], list)
+ if (tmp->devindex == devindex) {
+ /* Found the devmap for the device. */
+ devmap = tmp;
+ break;
+ }
+ if (devmap && devmap->device) {
+ device = devmap->device;
+ dasd_get_device(device);
+ } else
+ device = ERR_PTR(-ENODEV);
+ spin_unlock(&dasd_devmap_lock);
+ return device;
+}
+
+/*
+ * Return devmap for cdev. If no devmap exists yet, create one and
+ * connect it to the cdev.
+ */
+static struct dasd_devmap *
+dasd_devmap_from_cdev(struct ccw_device *cdev)
+{
+ struct dasd_devmap *devmap;
+
+ devmap = dasd_find_busid(dev_name(&cdev->dev));
+ if (IS_ERR(devmap))
+ devmap = dasd_add_busid(dev_name(&cdev->dev),
+ DASD_FEATURE_DEFAULT);
+ return devmap;
+}
+
+/*
+ * Create a dasd device structure for cdev.
+ */
+struct dasd_device *
+dasd_create_device(struct ccw_device *cdev)
+{
+ struct dasd_devmap *devmap;
+ struct dasd_device *device;
+ unsigned long flags;
+ int rc;
+
+ devmap = dasd_devmap_from_cdev(cdev);
+ if (IS_ERR(devmap))
+ return (void *) devmap;
+
+ device = dasd_alloc_device();
+ if (IS_ERR(device))
+ return device;
+ atomic_set(&device->ref_count, 3);
+
+ spin_lock(&dasd_devmap_lock);
+ if (!devmap->device) {
+ devmap->device = device;
+ device->devindex = devmap->devindex;
+ device->features = devmap->features;
+ get_device(&cdev->dev);
+ device->cdev = cdev;
+ rc = 0;
+ } else
+ /* Someone else was faster. */
+ rc = -EBUSY;
+ spin_unlock(&dasd_devmap_lock);
+
+ if (rc) {
+ dasd_free_device(device);
+ return ERR_PTR(rc);
+ }
+
+ spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
+ dev_set_drvdata(&cdev->dev, device);
+ spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
+
+ device->paths_info = kset_create_and_add("paths_info", NULL,
+ &device->cdev->dev.kobj);
+ if (!device->paths_info)
+ dev_warn(&cdev->dev, "Could not create paths_info kset\n");
+
+ return device;
+}
+
+/*
+ * allocate a PPRC data structure and call the discipline function to fill
+ */
+static int dasd_devmap_get_pprc_status(struct dasd_device *device,
+ struct dasd_pprc_data_sc4 **data)
+{
+ struct dasd_pprc_data_sc4 *temp;
+
+ if (!device->discipline || !device->discipline->pprc_status) {
+ dev_warn(&device->cdev->dev, "Unable to query copy relation status\n");
+ return -EOPNOTSUPP;
+ }
+ temp = kzalloc(sizeof(*temp), GFP_KERNEL);
+ if (!temp)
+ return -ENOMEM;
+
+ /* get PPRC information from storage */
+ if (device->discipline->pprc_status(device, temp)) {
+ dev_warn(&device->cdev->dev, "Error during copy relation status query\n");
+ kfree(temp);
+ return -EINVAL;
+ }
+ *data = temp;
+
+ return 0;
+}
+
+/*
+ * find an entry in a PPRC device_info array by a given UID
+ * depending on the primary/secondary state of the device it has to be
+ * matched with the respective fields
+ */
+static int dasd_devmap_entry_from_pprc_data(struct dasd_pprc_data_sc4 *data,
+ struct dasd_uid uid,
+ bool primary)
+{
+ int i;
+
+ for (i = 0; i < DASD_CP_ENTRIES; i++) {
+ if (primary) {
+ if (data->dev_info[i].prim_cu_ssid == uid.ssid &&
+ data->dev_info[i].primary == uid.real_unit_addr)
+ return i;
+ } else {
+ if (data->dev_info[i].sec_cu_ssid == uid.ssid &&
+ data->dev_info[i].secondary == uid.real_unit_addr)
+ return i;
+ }
+ }
+ return -1;
+}
+
+/*
+ * check the consistency of a specified copy relation by checking
+ * the following things:
+ *
+ * - is the given device part of a copy pair setup
+ * - does the state of the device match the state in the PPRC status data
+ * - does the device UID match with the UID in the PPRC status data
+ * - to prevent misrouted IO check if the given device is present in all
+ * related PPRC status data
+ */
+static int dasd_devmap_check_copy_relation(struct dasd_device *device,
+ struct dasd_copy_entry *entry,
+ struct dasd_pprc_data_sc4 *data,
+ struct dasd_copy_relation *copy)
+{
+ struct dasd_pprc_data_sc4 *tmp_dat;
+ struct dasd_device *tmp_dev;
+ struct dasd_uid uid;
+ int i, j;
+
+ if (!device->discipline || !device->discipline->get_uid ||
+ device->discipline->get_uid(device, &uid))
+ return 1;
+
+ i = dasd_devmap_entry_from_pprc_data(data, uid, entry->primary);
+ if (i < 0) {
+ dev_warn(&device->cdev->dev, "Device not part of a copy relation\n");
+ return 1;
+ }
+
+ /* double check which role the current device has */
+ if (entry->primary) {
+ if (data->dev_info[i].flags & 0x80) {
+ dev_warn(&device->cdev->dev, "Copy pair secondary is setup as primary\n");
+ return 1;
+ }
+ if (data->dev_info[i].prim_cu_ssid != uid.ssid ||
+ data->dev_info[i].primary != uid.real_unit_addr) {
+ dev_warn(&device->cdev->dev,
+ "Primary device %s does not match copy pair status primary device %04x\n",
+ dev_name(&device->cdev->dev),
+ data->dev_info[i].prim_cu_ssid |
+ data->dev_info[i].primary);
+ return 1;
+ }
+ } else {
+ if (!(data->dev_info[i].flags & 0x80)) {
+ dev_warn(&device->cdev->dev, "Copy pair primary is setup as secondary\n");
+ return 1;
+ }
+ if (data->dev_info[i].sec_cu_ssid != uid.ssid ||
+ data->dev_info[i].secondary != uid.real_unit_addr) {
+ dev_warn(&device->cdev->dev,
+ "Secondary device %s does not match copy pair status secondary device %04x\n",
+ dev_name(&device->cdev->dev),
+ data->dev_info[i].sec_cu_ssid |
+ data->dev_info[i].secondary);
+ return 1;
+ }
+ }
+
+ /*
+ * the current device has to be part of the copy relation of all
+ * entries to prevent misrouted IO to another copy pair
+ */
+ for (j = 0; j < DASD_CP_ENTRIES; j++) {
+ if (entry == &copy->entry[j])
+ tmp_dev = device;
+ else
+ tmp_dev = copy->entry[j].device;
+
+ if (!tmp_dev)
+ continue;
+
+ if (dasd_devmap_get_pprc_status(tmp_dev, &tmp_dat))
+ return 1;
+
+ if (dasd_devmap_entry_from_pprc_data(tmp_dat, uid, entry->primary) < 0) {
+ dev_warn(&tmp_dev->cdev->dev,
+ "Copy pair relation does not contain device: %s\n",
+ dev_name(&device->cdev->dev));
+ kfree(tmp_dat);
+ return 1;
+ }
+ kfree(tmp_dat);
+ }
+ return 0;
+}
+
+/* delete device from copy relation entry */
+static void dasd_devmap_delete_copy_relation_device(struct dasd_device *device)
+{
+ struct dasd_copy_relation *copy;
+ int i;
+
+ if (!device->copy)
+ return;
+
+ copy = device->copy;
+ for (i = 0; i < DASD_CP_ENTRIES; i++) {
+ if (copy->entry[i].device == device)
+ copy->entry[i].device = NULL;
+ }
+ dasd_put_device(device);
+ device->copy = NULL;
+}
+
+/*
+ * read all required information for a copy relation setup and setup the device
+ * accordingly
+ */
+int dasd_devmap_set_device_copy_relation(struct ccw_device *cdev,
+ bool pprc_enabled)
+{
+ struct dasd_pprc_data_sc4 *data = NULL;
+ struct dasd_copy_entry *entry = NULL;
+ struct dasd_copy_relation *copy;
+ struct dasd_devmap *devmap;
+ struct dasd_device *device;
+ int i, rc = 0;
+
+ devmap = dasd_devmap_from_cdev(cdev);
+ if (IS_ERR(devmap))
+ return PTR_ERR(devmap);
+
+ device = devmap->device;
+ if (!device)
+ return -ENODEV;
+
+ copy = devmap->copy;
+ /* no copy pair setup for this device */
+ if (!copy)
+ goto out;
+
+ rc = dasd_devmap_get_pprc_status(device, &data);
+ if (rc)
+ return rc;
+
+ /* print error if PPRC is requested but not enabled on storage server */
+ if (!pprc_enabled) {
+ dev_err(&cdev->dev, "Copy relation not enabled on storage server\n");
+ rc = -EINVAL;
+ goto out;
+ }
+
+ if (!data->dev_info[0].state) {
+ dev_warn(&device->cdev->dev, "Copy pair setup requested for device not in copy relation\n");
+ rc = -EINVAL;
+ goto out;
+ }
+ /* find entry */
+ for (i = 0; i < DASD_CP_ENTRIES; i++) {
+ if (copy->entry[i].configured &&
+ strncmp(dev_name(&cdev->dev),
+ copy->entry[i].busid, DASD_BUS_ID_SIZE) == 0) {
+ entry = &copy->entry[i];
+ break;
+ }
+ }
+ if (!entry) {
+ dev_warn(&device->cdev->dev, "Copy relation entry not found\n");
+ rc = -EINVAL;
+ goto out;
+ }
+ /* check if the copy relation is valid */
+ if (dasd_devmap_check_copy_relation(device, entry, data, copy)) {
+ dev_warn(&device->cdev->dev, "Copy relation faulty\n");
+ rc = -EINVAL;
+ goto out;
+ }
+
+ dasd_get_device(device);
+ copy->entry[i].device = device;
+ device->copy = copy;
+out:
+ kfree(data);
+ return rc;
+}
+EXPORT_SYMBOL_GPL(dasd_devmap_set_device_copy_relation);
+
+/*
+ * Wait queue for dasd_delete_device waits.
+ */
+static DECLARE_WAIT_QUEUE_HEAD(dasd_delete_wq);
+
+/*
+ * Remove a dasd device structure. The passed referenced
+ * is destroyed.
+ */
+void
+dasd_delete_device(struct dasd_device *device)
+{
+ struct ccw_device *cdev;
+ struct dasd_devmap *devmap;
+ unsigned long flags;
+
+ /* First remove device pointer from devmap. */
+ devmap = dasd_find_busid(dev_name(&device->cdev->dev));
+ BUG_ON(IS_ERR(devmap));
+ spin_lock(&dasd_devmap_lock);
+ if (devmap->device != device) {
+ spin_unlock(&dasd_devmap_lock);
+ dasd_put_device(device);
+ return;
+ }
+ devmap->device = NULL;
+ spin_unlock(&dasd_devmap_lock);
+
+ /* Disconnect dasd_device structure from ccw_device structure. */
+ spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
+ dev_set_drvdata(&device->cdev->dev, NULL);
+ spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
+
+ /* Removve copy relation */
+ dasd_devmap_delete_copy_relation_device(device);
+ /*
+ * Drop ref_count by 3, one for the devmap reference, one for
+ * the cdev reference and one for the passed reference.
+ */
+ atomic_sub(3, &device->ref_count);
+
+ /* Wait for reference counter to drop to zero. */
+ wait_event(dasd_delete_wq, atomic_read(&device->ref_count) == 0);
+
+ dasd_generic_free_discipline(device);
+
+ kset_unregister(device->paths_info);
+
+ /* Disconnect dasd_device structure from ccw_device structure. */
+ cdev = device->cdev;
+ device->cdev = NULL;
+
+ /* Put ccw_device structure. */
+ put_device(&cdev->dev);
+
+ /* Now the device structure can be freed. */
+ dasd_free_device(device);
+}
+
+/*
+ * Reference counter dropped to zero. Wake up waiter
+ * in dasd_delete_device.
+ */
+void
+dasd_put_device_wake(struct dasd_device *device)
+{
+ wake_up(&dasd_delete_wq);
+}
+EXPORT_SYMBOL_GPL(dasd_put_device_wake);
+
+/*
+ * Return dasd_device structure associated with cdev.
+ * This function needs to be called with the ccw device
+ * lock held. It can be used from interrupt context.
+ */
+struct dasd_device *
+dasd_device_from_cdev_locked(struct ccw_device *cdev)
+{
+ struct dasd_device *device = dev_get_drvdata(&cdev->dev);
+
+ if (!device)
+ return ERR_PTR(-ENODEV);
+ dasd_get_device(device);
+ return device;
+}
+
+/*
+ * Return dasd_device structure associated with cdev.
+ */
+struct dasd_device *
+dasd_device_from_cdev(struct ccw_device *cdev)
+{
+ struct dasd_device *device;
+ unsigned long flags;
+
+ spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
+ device = dasd_device_from_cdev_locked(cdev);
+ spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
+ return device;
+}
+
+void dasd_add_link_to_gendisk(struct gendisk *gdp, struct dasd_device *device)
+{
+ struct dasd_devmap *devmap;
+
+ devmap = dasd_find_busid(dev_name(&device->cdev->dev));
+ if (IS_ERR(devmap))
+ return;
+ spin_lock(&dasd_devmap_lock);
+ gdp->private_data = devmap;
+ spin_unlock(&dasd_devmap_lock);
+}
+EXPORT_SYMBOL(dasd_add_link_to_gendisk);
+
+struct dasd_device *dasd_device_from_gendisk(struct gendisk *gdp)
+{
+ struct dasd_device *device;
+ struct dasd_devmap *devmap;
+
+ if (!gdp->private_data)
+ return NULL;
+ device = NULL;
+ spin_lock(&dasd_devmap_lock);
+ devmap = gdp->private_data;
+ if (devmap && devmap->device) {
+ device = devmap->device;
+ dasd_get_device(device);
+ }
+ spin_unlock(&dasd_devmap_lock);
+ return device;
+}
+
+/*
+ * SECTION: files in sysfs
+ */
+
+/*
+ * failfast controls the behaviour, if no path is available
+ */
+static ssize_t dasd_ff_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct dasd_devmap *devmap;
+ int ff_flag;
+
+ devmap = dasd_find_busid(dev_name(dev));
+ if (!IS_ERR(devmap))
+ ff_flag = (devmap->features & DASD_FEATURE_FAILFAST) != 0;
+ else
+ ff_flag = (DASD_FEATURE_DEFAULT & DASD_FEATURE_FAILFAST) != 0;
+ return sysfs_emit(buf, ff_flag ? "1\n" : "0\n");
+}
+
+static ssize_t dasd_ff_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ unsigned int val;
+ int rc;
+
+ if (kstrtouint(buf, 0, &val) || val > 1)
+ return -EINVAL;
+
+ rc = dasd_set_feature(to_ccwdev(dev), DASD_FEATURE_FAILFAST, val);
+
+ return rc ? : count;
+}
+
+static DEVICE_ATTR(failfast, 0644, dasd_ff_show, dasd_ff_store);
+
+/*
+ * readonly controls the readonly status of a dasd
+ */
+static ssize_t
+dasd_ro_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct dasd_devmap *devmap;
+ struct dasd_device *device;
+ int ro_flag = 0;
+
+ devmap = dasd_find_busid(dev_name(dev));
+ if (IS_ERR(devmap))
+ goto out;
+
+ ro_flag = !!(devmap->features & DASD_FEATURE_READONLY);
+
+ spin_lock(&dasd_devmap_lock);
+ device = devmap->device;
+ if (device)
+ ro_flag |= test_bit(DASD_FLAG_DEVICE_RO, &device->flags);
+ spin_unlock(&dasd_devmap_lock);
+
+out:
+ return sysfs_emit(buf, ro_flag ? "1\n" : "0\n");
+}
+
+static ssize_t
+dasd_ro_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct ccw_device *cdev = to_ccwdev(dev);
+ struct dasd_device *device;
+ unsigned long flags;
+ unsigned int val;
+ int rc;
+
+ if (kstrtouint(buf, 0, &val) || val > 1)
+ return -EINVAL;
+
+ rc = dasd_set_feature(cdev, DASD_FEATURE_READONLY, val);
+ if (rc)
+ return rc;
+
+ device = dasd_device_from_cdev(cdev);
+ if (IS_ERR(device))
+ return count;
+
+ spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
+ val = val || test_bit(DASD_FLAG_DEVICE_RO, &device->flags);
+
+ if (!device->block || !device->block->gdp ||
+ test_bit(DASD_FLAG_OFFLINE, &device->flags)) {
+ spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
+ goto out;
+ }
+ /* Increase open_count to avoid losing the block device */
+ atomic_inc(&device->block->open_count);
+ spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
+
+ set_disk_ro(device->block->gdp, val);
+ atomic_dec(&device->block->open_count);
+
+out:
+ dasd_put_device(device);
+
+ return count;
+}
+
+static DEVICE_ATTR(readonly, 0644, dasd_ro_show, dasd_ro_store);
+/*
+ * erplog controls the logging of ERP related data
+ * (e.g. failing channel programs).
+ */
+static ssize_t
+dasd_erplog_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct dasd_devmap *devmap;
+ int erplog;
+
+ devmap = dasd_find_busid(dev_name(dev));
+ if (!IS_ERR(devmap))
+ erplog = (devmap->features & DASD_FEATURE_ERPLOG) != 0;
+ else
+ erplog = (DASD_FEATURE_DEFAULT & DASD_FEATURE_ERPLOG) != 0;
+ return sysfs_emit(buf, erplog ? "1\n" : "0\n");
+}
+
+static ssize_t
+dasd_erplog_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ unsigned int val;
+ int rc;
+
+ if (kstrtouint(buf, 0, &val) || val > 1)
+ return -EINVAL;
+
+ rc = dasd_set_feature(to_ccwdev(dev), DASD_FEATURE_ERPLOG, val);
+
+ return rc ? : count;
+}
+
+static DEVICE_ATTR(erplog, 0644, dasd_erplog_show, dasd_erplog_store);
+
+/*
+ * use_diag controls whether the driver should use diag rather than ssch
+ * to talk to the device
+ */
+static ssize_t
+dasd_use_diag_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct dasd_devmap *devmap;
+ int use_diag;
+
+ devmap = dasd_find_busid(dev_name(dev));
+ if (!IS_ERR(devmap))
+ use_diag = (devmap->features & DASD_FEATURE_USEDIAG) != 0;
+ else
+ use_diag = (DASD_FEATURE_DEFAULT & DASD_FEATURE_USEDIAG) != 0;
+ return sprintf(buf, use_diag ? "1\n" : "0\n");
+}
+
+static ssize_t
+dasd_use_diag_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct dasd_devmap *devmap;
+ unsigned int val;
+ ssize_t rc;
+
+ devmap = dasd_devmap_from_cdev(to_ccwdev(dev));
+ if (IS_ERR(devmap))
+ return PTR_ERR(devmap);
+
+ if (kstrtouint(buf, 0, &val) || val > 1)
+ return -EINVAL;
+
+ spin_lock(&dasd_devmap_lock);
+ /* Changing diag discipline flag is only allowed in offline state. */
+ rc = count;
+ if (!devmap->device && !(devmap->features & DASD_FEATURE_USERAW)) {
+ if (val)
+ devmap->features |= DASD_FEATURE_USEDIAG;
+ else
+ devmap->features &= ~DASD_FEATURE_USEDIAG;
+ } else
+ rc = -EPERM;
+ spin_unlock(&dasd_devmap_lock);
+ return rc;
+}
+
+static DEVICE_ATTR(use_diag, 0644, dasd_use_diag_show, dasd_use_diag_store);
+
+/*
+ * use_raw controls whether the driver should give access to raw eckd data or
+ * operate in standard mode
+ */
+static ssize_t
+dasd_use_raw_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct dasd_devmap *devmap;
+ int use_raw;
+
+ devmap = dasd_find_busid(dev_name(dev));
+ if (!IS_ERR(devmap))
+ use_raw = (devmap->features & DASD_FEATURE_USERAW) != 0;
+ else
+ use_raw = (DASD_FEATURE_DEFAULT & DASD_FEATURE_USERAW) != 0;
+ return sprintf(buf, use_raw ? "1\n" : "0\n");
+}
+
+static ssize_t
+dasd_use_raw_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct dasd_devmap *devmap;
+ ssize_t rc;
+ unsigned long val;
+
+ devmap = dasd_devmap_from_cdev(to_ccwdev(dev));
+ if (IS_ERR(devmap))
+ return PTR_ERR(devmap);
+
+ if ((kstrtoul(buf, 10, &val) != 0) || val > 1)
+ return -EINVAL;
+
+ spin_lock(&dasd_devmap_lock);
+ /* Changing diag discipline flag is only allowed in offline state. */
+ rc = count;
+ if (!devmap->device && !(devmap->features & DASD_FEATURE_USEDIAG)) {
+ if (val)
+ devmap->features |= DASD_FEATURE_USERAW;
+ else
+ devmap->features &= ~DASD_FEATURE_USERAW;
+ } else
+ rc = -EPERM;
+ spin_unlock(&dasd_devmap_lock);
+ return rc;
+}
+
+static DEVICE_ATTR(raw_track_access, 0644, dasd_use_raw_show,
+ dasd_use_raw_store);
+
+static ssize_t
+dasd_safe_offline_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct ccw_device *cdev = to_ccwdev(dev);
+ struct dasd_device *device;
+ unsigned long flags;
+ int rc;
+
+ spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
+ device = dasd_device_from_cdev_locked(cdev);
+ if (IS_ERR(device)) {
+ rc = PTR_ERR(device);
+ spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
+ goto out;
+ }
+
+ if (test_bit(DASD_FLAG_OFFLINE, &device->flags) ||
+ test_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) {
+ /* Already doing offline processing */
+ dasd_put_device(device);
+ spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
+ rc = -EBUSY;
+ goto out;
+ }
+
+ set_bit(DASD_FLAG_SAFE_OFFLINE, &device->flags);
+ dasd_put_device(device);
+ spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
+
+ rc = ccw_device_set_offline(cdev);
+
+out:
+ return rc ? rc : count;
+}
+
+static DEVICE_ATTR(safe_offline, 0200, NULL, dasd_safe_offline_store);
+
+static ssize_t
+dasd_access_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct ccw_device *cdev = to_ccwdev(dev);
+ struct dasd_device *device;
+ int count;
+
+ device = dasd_device_from_cdev(cdev);
+ if (IS_ERR(device))
+ return PTR_ERR(device);
+
+ if (!device->discipline)
+ count = -ENODEV;
+ else if (!device->discipline->host_access_count)
+ count = -EOPNOTSUPP;
+ else
+ count = device->discipline->host_access_count(device);
+
+ dasd_put_device(device);
+ if (count < 0)
+ return count;
+
+ return sprintf(buf, "%d\n", count);
+}
+
+static DEVICE_ATTR(host_access_count, 0444, dasd_access_show, NULL);
+
+static ssize_t
+dasd_discipline_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct dasd_device *device;
+ ssize_t len;
+
+ device = dasd_device_from_cdev(to_ccwdev(dev));
+ if (IS_ERR(device))
+ goto out;
+ else if (!device->discipline) {
+ dasd_put_device(device);
+ goto out;
+ } else {
+ len = sysfs_emit(buf, "%s\n",
+ device->discipline->name);
+ dasd_put_device(device);
+ return len;
+ }
+out:
+ len = sysfs_emit(buf, "none\n");
+ return len;
+}
+
+static DEVICE_ATTR(discipline, 0444, dasd_discipline_show, NULL);
+
+static ssize_t
+dasd_device_status_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct dasd_device *device;
+ ssize_t len;
+
+ device = dasd_device_from_cdev(to_ccwdev(dev));
+ if (!IS_ERR(device)) {
+ switch (device->state) {
+ case DASD_STATE_NEW:
+ len = sysfs_emit(buf, "new\n");
+ break;
+ case DASD_STATE_KNOWN:
+ len = sysfs_emit(buf, "detected\n");
+ break;
+ case DASD_STATE_BASIC:
+ len = sysfs_emit(buf, "basic\n");
+ break;
+ case DASD_STATE_UNFMT:
+ len = sysfs_emit(buf, "unformatted\n");
+ break;
+ case DASD_STATE_READY:
+ len = sysfs_emit(buf, "ready\n");
+ break;
+ case DASD_STATE_ONLINE:
+ len = sysfs_emit(buf, "online\n");
+ break;
+ default:
+ len = sysfs_emit(buf, "no stat\n");
+ break;
+ }
+ dasd_put_device(device);
+ } else
+ len = sysfs_emit(buf, "unknown\n");
+ return len;
+}
+
+static DEVICE_ATTR(status, 0444, dasd_device_status_show, NULL);
+
+static ssize_t dasd_alias_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct dasd_device *device;
+ struct dasd_uid uid;
+
+ device = dasd_device_from_cdev(to_ccwdev(dev));
+ if (IS_ERR(device))
+ return sprintf(buf, "0\n");
+
+ if (device->discipline && device->discipline->get_uid &&
+ !device->discipline->get_uid(device, &uid)) {
+ if (uid.type == UA_BASE_PAV_ALIAS ||
+ uid.type == UA_HYPER_PAV_ALIAS) {
+ dasd_put_device(device);
+ return sprintf(buf, "1\n");
+ }
+ }
+ dasd_put_device(device);
+
+ return sprintf(buf, "0\n");
+}
+
+static DEVICE_ATTR(alias, 0444, dasd_alias_show, NULL);
+
+static ssize_t dasd_vendor_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct dasd_device *device;
+ struct dasd_uid uid;
+ char *vendor;
+
+ device = dasd_device_from_cdev(to_ccwdev(dev));
+ vendor = "";
+ if (IS_ERR(device))
+ return sysfs_emit(buf, "%s\n", vendor);
+
+ if (device->discipline && device->discipline->get_uid &&
+ !device->discipline->get_uid(device, &uid))
+ vendor = uid.vendor;
+
+ dasd_put_device(device);
+
+ return sysfs_emit(buf, "%s\n", vendor);
+}
+
+static DEVICE_ATTR(vendor, 0444, dasd_vendor_show, NULL);
+
+static ssize_t
+dasd_uid_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ char uid_string[DASD_UID_STRLEN];
+ struct dasd_device *device;
+ struct dasd_uid uid;
+ char ua_string[3];
+
+ device = dasd_device_from_cdev(to_ccwdev(dev));
+ uid_string[0] = 0;
+ if (IS_ERR(device))
+ return sysfs_emit(buf, "%s\n", uid_string);
+
+ if (device->discipline && device->discipline->get_uid &&
+ !device->discipline->get_uid(device, &uid)) {
+ switch (uid.type) {
+ case UA_BASE_DEVICE:
+ snprintf(ua_string, sizeof(ua_string), "%02x",
+ uid.real_unit_addr);
+ break;
+ case UA_BASE_PAV_ALIAS:
+ snprintf(ua_string, sizeof(ua_string), "%02x",
+ uid.base_unit_addr);
+ break;
+ case UA_HYPER_PAV_ALIAS:
+ snprintf(ua_string, sizeof(ua_string), "xx");
+ break;
+ default:
+ /* should not happen, treat like base device */
+ snprintf(ua_string, sizeof(ua_string), "%02x",
+ uid.real_unit_addr);
+ break;
+ }
+
+ if (strlen(uid.vduit) > 0)
+ snprintf(uid_string, sizeof(uid_string),
+ "%s.%s.%04x.%s.%s",
+ uid.vendor, uid.serial, uid.ssid, ua_string,
+ uid.vduit);
+ else
+ snprintf(uid_string, sizeof(uid_string),
+ "%s.%s.%04x.%s",
+ uid.vendor, uid.serial, uid.ssid, ua_string);
+ }
+ dasd_put_device(device);
+
+ return sysfs_emit(buf, "%s\n", uid_string);
+}
+static DEVICE_ATTR(uid, 0444, dasd_uid_show, NULL);
+
+/*
+ * extended error-reporting
+ */
+static ssize_t
+dasd_eer_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct dasd_devmap *devmap;
+ int eer_flag;
+
+ devmap = dasd_find_busid(dev_name(dev));
+ if (!IS_ERR(devmap) && devmap->device)
+ eer_flag = dasd_eer_enabled(devmap->device);
+ else
+ eer_flag = 0;
+ return sysfs_emit(buf, eer_flag ? "1\n" : "0\n");
+}
+
+static ssize_t
+dasd_eer_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct dasd_device *device;
+ unsigned int val;
+ int rc = 0;
+
+ device = dasd_device_from_cdev(to_ccwdev(dev));
+ if (IS_ERR(device))
+ return PTR_ERR(device);
+
+ if (kstrtouint(buf, 0, &val) || val > 1)
+ return -EINVAL;
+
+ if (val)
+ rc = dasd_eer_enable(device);
+ else
+ dasd_eer_disable(device);
+
+ dasd_put_device(device);
+
+ return rc ? : count;
+}
+
+static DEVICE_ATTR(eer_enabled, 0644, dasd_eer_show, dasd_eer_store);
+
+/*
+ * aq_mask controls if the DASD should be quiesced on certain triggers
+ * The aq_mask attribute is interpreted as bitmap of the DASD_EER_* triggers.
+ */
+static ssize_t dasd_aq_mask_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct dasd_devmap *devmap;
+ unsigned int aq_mask = 0;
+
+ devmap = dasd_find_busid(dev_name(dev));
+ if (!IS_ERR(devmap))
+ aq_mask = devmap->aq_mask;
+
+ return sysfs_emit(buf, "%d\n", aq_mask);
+}
+
+static ssize_t dasd_aq_mask_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct dasd_devmap *devmap;
+ unsigned int val;
+
+ if (kstrtouint(buf, 0, &val) || val > DASD_EER_VALID)
+ return -EINVAL;
+
+ devmap = dasd_devmap_from_cdev(to_ccwdev(dev));
+ if (IS_ERR(devmap))
+ return PTR_ERR(devmap);
+
+ spin_lock(&dasd_devmap_lock);
+ devmap->aq_mask = val;
+ if (devmap->device)
+ devmap->device->aq_mask = devmap->aq_mask;
+ spin_unlock(&dasd_devmap_lock);
+
+ return count;
+}
+
+static DEVICE_ATTR(aq_mask, 0644, dasd_aq_mask_show, dasd_aq_mask_store);
+
+/*
+ * aq_requeue controls if requests are returned to the blocklayer on quiesce
+ * or if requests are only not started
+ */
+static ssize_t dasd_aqr_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct dasd_devmap *devmap;
+ int flag;
+
+ devmap = dasd_find_busid(dev_name(dev));
+ if (!IS_ERR(devmap))
+ flag = (devmap->features & DASD_FEATURE_REQUEUEQUIESCE) != 0;
+ else
+ flag = (DASD_FEATURE_DEFAULT &
+ DASD_FEATURE_REQUEUEQUIESCE) != 0;
+ return sysfs_emit(buf, "%d\n", flag);
+}
+
+static ssize_t dasd_aqr_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ bool val;
+ int rc;
+
+ if (kstrtobool(buf, &val))
+ return -EINVAL;
+
+ rc = dasd_set_feature(to_ccwdev(dev), DASD_FEATURE_REQUEUEQUIESCE, val);
+
+ return rc ? : count;
+}
+
+static DEVICE_ATTR(aq_requeue, 0644, dasd_aqr_show, dasd_aqr_store);
+
+/*
+ * aq_timeouts controls how much retries have to time out until
+ * a device gets autoquiesced
+ */
+static ssize_t
+dasd_aq_timeouts_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct dasd_device *device;
+ int len;
+
+ device = dasd_device_from_cdev(to_ccwdev(dev));
+ if (IS_ERR(device))
+ return -ENODEV;
+ len = sysfs_emit(buf, "%u\n", device->aq_timeouts);
+ dasd_put_device(device);
+ return len;
+}
+
+static ssize_t
+dasd_aq_timeouts_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct dasd_device *device;
+ unsigned int val;
+
+ device = dasd_device_from_cdev(to_ccwdev(dev));
+ if (IS_ERR(device))
+ return -ENODEV;
+
+ if ((kstrtouint(buf, 10, &val) != 0) ||
+ val > DASD_RETRIES_MAX || val == 0) {
+ dasd_put_device(device);
+ return -EINVAL;
+ }
+
+ if (val)
+ device->aq_timeouts = val;
+
+ dasd_put_device(device);
+ return count;
+}
+
+static DEVICE_ATTR(aq_timeouts, 0644, dasd_aq_timeouts_show,
+ dasd_aq_timeouts_store);
+
+/*
+ * expiration time for default requests
+ */
+static ssize_t
+dasd_expires_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct dasd_device *device;
+ int len;
+
+ device = dasd_device_from_cdev(to_ccwdev(dev));
+ if (IS_ERR(device))
+ return -ENODEV;
+ len = sysfs_emit(buf, "%lu\n", device->default_expires);
+ dasd_put_device(device);
+ return len;
+}
+
+static ssize_t
+dasd_expires_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct dasd_device *device;
+ unsigned long val;
+
+ device = dasd_device_from_cdev(to_ccwdev(dev));
+ if (IS_ERR(device))
+ return -ENODEV;
+
+ if ((kstrtoul(buf, 10, &val) != 0) ||
+ (val > DASD_EXPIRES_MAX) || val == 0) {
+ dasd_put_device(device);
+ return -EINVAL;
+ }
+
+ if (val)
+ device->default_expires = val;
+
+ dasd_put_device(device);
+ return count;
+}
+
+static DEVICE_ATTR(expires, 0644, dasd_expires_show, dasd_expires_store);
+
+static ssize_t
+dasd_retries_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct dasd_device *device;
+ int len;
+
+ device = dasd_device_from_cdev(to_ccwdev(dev));
+ if (IS_ERR(device))
+ return -ENODEV;
+ len = sysfs_emit(buf, "%lu\n", device->default_retries);
+ dasd_put_device(device);
+ return len;
+}
+
+static ssize_t
+dasd_retries_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct dasd_device *device;
+ unsigned long val;
+
+ device = dasd_device_from_cdev(to_ccwdev(dev));
+ if (IS_ERR(device))
+ return -ENODEV;
+
+ if ((kstrtoul(buf, 10, &val) != 0) ||
+ (val > DASD_RETRIES_MAX)) {
+ dasd_put_device(device);
+ return -EINVAL;
+ }
+
+ if (val)
+ device->default_retries = val;
+
+ dasd_put_device(device);
+ return count;
+}
+
+static DEVICE_ATTR(retries, 0644, dasd_retries_show, dasd_retries_store);
+
+static ssize_t
+dasd_timeout_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct dasd_device *device;
+ int len;
+
+ device = dasd_device_from_cdev(to_ccwdev(dev));
+ if (IS_ERR(device))
+ return -ENODEV;
+ len = sysfs_emit(buf, "%lu\n", device->blk_timeout);
+ dasd_put_device(device);
+ return len;
+}
+
+static ssize_t
+dasd_timeout_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct dasd_device *device;
+ unsigned long val;
+
+ device = dasd_device_from_cdev(to_ccwdev(dev));
+ if (IS_ERR(device) || !device->block)
+ return -ENODEV;
+
+ if ((kstrtoul(buf, 10, &val) != 0) ||
+ val > UINT_MAX / HZ) {
+ dasd_put_device(device);
+ return -EINVAL;
+ }
+ if (!device->block->gdp) {
+ dasd_put_device(device);
+ return -ENODEV;
+ }
+
+ device->blk_timeout = val;
+ blk_queue_rq_timeout(device->block->gdp->queue, val * HZ);
+
+ dasd_put_device(device);
+ return count;
+}
+
+static DEVICE_ATTR(timeout, 0644,
+ dasd_timeout_show, dasd_timeout_store);
+
+
+static ssize_t
+dasd_path_reset_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct dasd_device *device;
+ unsigned int val;
+
+ device = dasd_device_from_cdev(to_ccwdev(dev));
+ if (IS_ERR(device))
+ return -ENODEV;
+
+ if ((kstrtouint(buf, 16, &val) != 0) || val > 0xff)
+ val = 0;
+
+ if (device->discipline && device->discipline->reset_path)
+ device->discipline->reset_path(device, (__u8) val);
+
+ dasd_put_device(device);
+ return count;
+}
+
+static DEVICE_ATTR(path_reset, 0200, NULL, dasd_path_reset_store);
+
+static ssize_t dasd_hpf_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct dasd_device *device;
+ int hpf;
+
+ device = dasd_device_from_cdev(to_ccwdev(dev));
+ if (IS_ERR(device))
+ return -ENODEV;
+ if (!device->discipline || !device->discipline->hpf_enabled) {
+ dasd_put_device(device);
+ return sysfs_emit(buf, "%d\n", dasd_nofcx);
+ }
+ hpf = device->discipline->hpf_enabled(device);
+ dasd_put_device(device);
+ return sysfs_emit(buf, "%d\n", hpf);
+}
+
+static DEVICE_ATTR(hpf, 0444, dasd_hpf_show, NULL);
+
+static ssize_t dasd_reservation_policy_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct dasd_devmap *devmap;
+ int rc = 0;
+
+ devmap = dasd_find_busid(dev_name(dev));
+ if (IS_ERR(devmap)) {
+ rc = sysfs_emit(buf, "ignore\n");
+ } else {
+ spin_lock(&dasd_devmap_lock);
+ if (devmap->features & DASD_FEATURE_FAILONSLCK)
+ rc = sysfs_emit(buf, "fail\n");
+ else
+ rc = sysfs_emit(buf, "ignore\n");
+ spin_unlock(&dasd_devmap_lock);
+ }
+ return rc;
+}
+
+static ssize_t dasd_reservation_policy_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct ccw_device *cdev = to_ccwdev(dev);
+ int rc;
+
+ if (sysfs_streq("ignore", buf))
+ rc = dasd_set_feature(cdev, DASD_FEATURE_FAILONSLCK, 0);
+ else if (sysfs_streq("fail", buf))
+ rc = dasd_set_feature(cdev, DASD_FEATURE_FAILONSLCK, 1);
+ else
+ rc = -EINVAL;
+
+ return rc ? : count;
+}
+
+static DEVICE_ATTR(reservation_policy, 0644,
+ dasd_reservation_policy_show, dasd_reservation_policy_store);
+
+static ssize_t dasd_reservation_state_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct dasd_device *device;
+ int rc = 0;
+
+ device = dasd_device_from_cdev(to_ccwdev(dev));
+ if (IS_ERR(device))
+ return sysfs_emit(buf, "none\n");
+
+ if (test_bit(DASD_FLAG_IS_RESERVED, &device->flags))
+ rc = sysfs_emit(buf, "reserved\n");
+ else if (test_bit(DASD_FLAG_LOCK_STOLEN, &device->flags))
+ rc = sysfs_emit(buf, "lost\n");
+ else
+ rc = sysfs_emit(buf, "none\n");
+ dasd_put_device(device);
+ return rc;
+}
+
+static ssize_t dasd_reservation_state_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct dasd_device *device;
+ int rc = 0;
+
+ device = dasd_device_from_cdev(to_ccwdev(dev));
+ if (IS_ERR(device))
+ return -ENODEV;
+ if (sysfs_streq("reset", buf))
+ clear_bit(DASD_FLAG_LOCK_STOLEN, &device->flags);
+ else
+ rc = -EINVAL;
+ dasd_put_device(device);
+
+ if (rc)
+ return rc;
+ else
+ return count;
+}
+
+static DEVICE_ATTR(last_known_reservation_state, 0644,
+ dasd_reservation_state_show, dasd_reservation_state_store);
+
+static ssize_t dasd_pm_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct dasd_device *device;
+ u8 opm, nppm, cablepm, cuirpm, hpfpm, ifccpm;
+
+ device = dasd_device_from_cdev(to_ccwdev(dev));
+ if (IS_ERR(device))
+ return sprintf(buf, "0\n");
+
+ opm = dasd_path_get_opm(device);
+ nppm = dasd_path_get_nppm(device);
+ cablepm = dasd_path_get_cablepm(device);
+ cuirpm = dasd_path_get_cuirpm(device);
+ hpfpm = dasd_path_get_hpfpm(device);
+ ifccpm = dasd_path_get_ifccpm(device);
+ dasd_put_device(device);
+
+ return sprintf(buf, "%02x %02x %02x %02x %02x %02x\n", opm, nppm,
+ cablepm, cuirpm, hpfpm, ifccpm);
+}
+
+static DEVICE_ATTR(path_masks, 0444, dasd_pm_show, NULL);
+
+/*
+ * threshold value for IFCC/CCC errors
+ */
+static ssize_t
+dasd_path_threshold_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct dasd_device *device;
+ int len;
+
+ device = dasd_device_from_cdev(to_ccwdev(dev));
+ if (IS_ERR(device))
+ return -ENODEV;
+ len = sysfs_emit(buf, "%lu\n", device->path_thrhld);
+ dasd_put_device(device);
+ return len;
+}
+
+static ssize_t
+dasd_path_threshold_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct dasd_device *device;
+ unsigned long flags;
+ unsigned long val;
+
+ device = dasd_device_from_cdev(to_ccwdev(dev));
+ if (IS_ERR(device))
+ return -ENODEV;
+
+ if (kstrtoul(buf, 10, &val) != 0 || val > DASD_THRHLD_MAX) {
+ dasd_put_device(device);
+ return -EINVAL;
+ }
+ spin_lock_irqsave(get_ccwdev_lock(to_ccwdev(dev)), flags);
+ device->path_thrhld = val;
+ spin_unlock_irqrestore(get_ccwdev_lock(to_ccwdev(dev)), flags);
+ dasd_put_device(device);
+ return count;
+}
+static DEVICE_ATTR(path_threshold, 0644, dasd_path_threshold_show,
+ dasd_path_threshold_store);
+
+/*
+ * configure if path is disabled after IFCC/CCC error threshold is
+ * exceeded
+ */
+static ssize_t
+dasd_path_autodisable_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct dasd_devmap *devmap;
+ int flag;
+
+ devmap = dasd_find_busid(dev_name(dev));
+ if (!IS_ERR(devmap))
+ flag = (devmap->features & DASD_FEATURE_PATH_AUTODISABLE) != 0;
+ else
+ flag = (DASD_FEATURE_DEFAULT &
+ DASD_FEATURE_PATH_AUTODISABLE) != 0;
+ return sysfs_emit(buf, flag ? "1\n" : "0\n");
+}
+
+static ssize_t
+dasd_path_autodisable_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ unsigned int val;
+ int rc;
+
+ if (kstrtouint(buf, 0, &val) || val > 1)
+ return -EINVAL;
+
+ rc = dasd_set_feature(to_ccwdev(dev),
+ DASD_FEATURE_PATH_AUTODISABLE, val);
+
+ return rc ? : count;
+}
+
+static DEVICE_ATTR(path_autodisable, 0644,
+ dasd_path_autodisable_show,
+ dasd_path_autodisable_store);
+/*
+ * interval for IFCC/CCC checks
+ * meaning time with no IFCC/CCC error before the error counter
+ * gets reset
+ */
+static ssize_t
+dasd_path_interval_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct dasd_device *device;
+ int len;
+
+ device = dasd_device_from_cdev(to_ccwdev(dev));
+ if (IS_ERR(device))
+ return -ENODEV;
+ len = sysfs_emit(buf, "%lu\n", device->path_interval);
+ dasd_put_device(device);
+ return len;
+}
+
+static ssize_t
+dasd_path_interval_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct dasd_device *device;
+ unsigned long flags;
+ unsigned long val;
+
+ device = dasd_device_from_cdev(to_ccwdev(dev));
+ if (IS_ERR(device))
+ return -ENODEV;
+
+ if ((kstrtoul(buf, 10, &val) != 0) ||
+ (val > DASD_INTERVAL_MAX) || val == 0) {
+ dasd_put_device(device);
+ return -EINVAL;
+ }
+ spin_lock_irqsave(get_ccwdev_lock(to_ccwdev(dev)), flags);
+ if (val)
+ device->path_interval = val;
+ spin_unlock_irqrestore(get_ccwdev_lock(to_ccwdev(dev)), flags);
+ dasd_put_device(device);
+ return count;
+}
+
+static DEVICE_ATTR(path_interval, 0644, dasd_path_interval_show,
+ dasd_path_interval_store);
+
+static ssize_t
+dasd_device_fcs_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct dasd_device *device;
+ int fc_sec;
+ int rc;
+
+ device = dasd_device_from_cdev(to_ccwdev(dev));
+ if (IS_ERR(device))
+ return -ENODEV;
+ fc_sec = dasd_path_get_fcs_device(device);
+ if (fc_sec == -EINVAL)
+ rc = sysfs_emit(buf, "Inconsistent\n");
+ else
+ rc = sysfs_emit(buf, "%s\n", dasd_path_get_fcs_str(fc_sec));
+ dasd_put_device(device);
+
+ return rc;
+}
+static DEVICE_ATTR(fc_security, 0444, dasd_device_fcs_show, NULL);
+
+static ssize_t
+dasd_path_fcs_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
+{
+ struct dasd_path *path = to_dasd_path(kobj);
+ unsigned int fc_sec = path->fc_security;
+
+ return sysfs_emit(buf, "%s\n", dasd_path_get_fcs_str(fc_sec));
+}
+
+static struct kobj_attribute path_fcs_attribute =
+ __ATTR(fc_security, 0444, dasd_path_fcs_show, NULL);
+
+/*
+ * print copy relation in the form
+ * primary,secondary[1] primary,secondary[2], ...
+ */
+static ssize_t
+dasd_copy_pair_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ char prim_busid[DASD_BUS_ID_SIZE];
+ struct dasd_copy_relation *copy;
+ struct dasd_devmap *devmap;
+ int len = 0;
+ int i;
+
+ devmap = dasd_find_busid(dev_name(dev));
+ if (IS_ERR(devmap))
+ return -ENODEV;
+
+ if (!devmap->copy)
+ return -ENODEV;
+
+ copy = devmap->copy;
+ /* find primary */
+ for (i = 0; i < DASD_CP_ENTRIES; i++) {
+ if (copy->entry[i].configured && copy->entry[i].primary) {
+ strscpy(prim_busid, copy->entry[i].busid,
+ DASD_BUS_ID_SIZE);
+ break;
+ }
+ }
+ if (i == DASD_CP_ENTRIES)
+ goto out;
+
+ /* print all secondary */
+ for (i = 0; i < DASD_CP_ENTRIES; i++) {
+ if (copy->entry[i].configured && !copy->entry[i].primary)
+ len += sysfs_emit_at(buf, len, "%s,%s ", prim_busid,
+ copy->entry[i].busid);
+ }
+
+ len += sysfs_emit_at(buf, len, "\n");
+out:
+ return len;
+}
+
+static int dasd_devmap_set_copy_relation(struct dasd_devmap *devmap,
+ struct dasd_copy_relation *copy,
+ char *busid, bool primary)
+{
+ int i;
+
+ /* find free entry */
+ for (i = 0; i < DASD_CP_ENTRIES; i++) {
+ /* current bus_id already included, nothing to do */
+ if (copy->entry[i].configured &&
+ strncmp(copy->entry[i].busid, busid, DASD_BUS_ID_SIZE) == 0)
+ return 0;
+
+ if (!copy->entry[i].configured)
+ break;
+ }
+ if (i == DASD_CP_ENTRIES)
+ return -EINVAL;
+
+ copy->entry[i].configured = true;
+ strscpy(copy->entry[i].busid, busid, DASD_BUS_ID_SIZE);
+ if (primary) {
+ copy->active = &copy->entry[i];
+ copy->entry[i].primary = true;
+ }
+ if (!devmap->copy)
+ devmap->copy = copy;
+
+ return 0;
+}
+
+static void dasd_devmap_del_copy_relation(struct dasd_copy_relation *copy,
+ char *busid)
+{
+ int i;
+
+ spin_lock(&dasd_devmap_lock);
+ /* find entry */
+ for (i = 0; i < DASD_CP_ENTRIES; i++) {
+ if (copy->entry[i].configured &&
+ strncmp(copy->entry[i].busid, busid, DASD_BUS_ID_SIZE) == 0)
+ break;
+ }
+ if (i == DASD_CP_ENTRIES || !copy->entry[i].configured) {
+ spin_unlock(&dasd_devmap_lock);
+ return;
+ }
+
+ copy->entry[i].configured = false;
+ memset(copy->entry[i].busid, 0, DASD_BUS_ID_SIZE);
+ if (copy->active == &copy->entry[i]) {
+ copy->active = NULL;
+ copy->entry[i].primary = false;
+ }
+ spin_unlock(&dasd_devmap_lock);
+}
+
+static int dasd_devmap_clear_copy_relation(struct device *dev)
+{
+ struct dasd_copy_relation *copy;
+ struct dasd_devmap *devmap;
+ int i, rc = 1;
+
+ devmap = dasd_devmap_from_cdev(to_ccwdev(dev));
+ if (IS_ERR(devmap))
+ return 1;
+
+ spin_lock(&dasd_devmap_lock);
+ if (!devmap->copy)
+ goto out;
+
+ copy = devmap->copy;
+ /* first check if all secondary devices are offline*/
+ for (i = 0; i < DASD_CP_ENTRIES; i++) {
+ if (!copy->entry[i].configured)
+ continue;
+
+ if (copy->entry[i].device == copy->active->device)
+ continue;
+
+ if (copy->entry[i].device)
+ goto out;
+ }
+ /* clear all devmap entries */
+ for (i = 0; i < DASD_CP_ENTRIES; i++) {
+ if (strlen(copy->entry[i].busid) == 0)
+ continue;
+ if (copy->entry[i].device) {
+ dasd_put_device(copy->entry[i].device);
+ copy->entry[i].device->copy = NULL;
+ copy->entry[i].device = NULL;
+ }
+ devmap = dasd_find_busid_locked(copy->entry[i].busid);
+ devmap->copy = NULL;
+ memset(copy->entry[i].busid, 0, DASD_BUS_ID_SIZE);
+ }
+ kfree(copy);
+ rc = 0;
+out:
+ spin_unlock(&dasd_devmap_lock);
+ return rc;
+}
+
+/*
+ * parse BUSIDs from a copy pair
+ */
+static int dasd_devmap_parse_busid(const char *buf, char *prim_busid,
+ char *sec_busid)
+{
+ char *primary, *secondary, *tmp, *pt;
+ int id0, id1, id2;
+
+ pt = kstrdup(buf, GFP_KERNEL);
+ tmp = pt;
+ if (!tmp)
+ return -ENOMEM;
+
+ primary = strsep(&tmp, ",");
+ if (!primary) {
+ kfree(pt);
+ return -EINVAL;
+ }
+ secondary = strsep(&tmp, ",");
+ if (!secondary) {
+ kfree(pt);
+ return -EINVAL;
+ }
+ if (dasd_busid(primary, &id0, &id1, &id2)) {
+ kfree(pt);
+ return -EINVAL;
+ }
+ sprintf(prim_busid, "%01x.%01x.%04x", id0, id1, id2);
+ if (dasd_busid(secondary, &id0, &id1, &id2)) {
+ kfree(pt);
+ return -EINVAL;
+ }
+ sprintf(sec_busid, "%01x.%01x.%04x", id0, id1, id2);
+ kfree(pt);
+
+ return 0;
+}
+
+static ssize_t dasd_copy_pair_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct dasd_devmap *prim_devmap, *sec_devmap;
+ char prim_busid[DASD_BUS_ID_SIZE];
+ char sec_busid[DASD_BUS_ID_SIZE];
+ struct dasd_copy_relation *copy;
+ struct dasd_device *device;
+ bool pprc_enabled;
+ int rc;
+
+ if (strncmp(buf, "clear", strlen("clear")) == 0) {
+ if (dasd_devmap_clear_copy_relation(dev))
+ return -EINVAL;
+ return count;
+ }
+
+ rc = dasd_devmap_parse_busid(buf, prim_busid, sec_busid);
+ if (rc)
+ return rc;
+
+ if (strncmp(dev_name(dev), prim_busid, DASD_BUS_ID_SIZE) != 0 &&
+ strncmp(dev_name(dev), sec_busid, DASD_BUS_ID_SIZE) != 0)
+ return -EINVAL;
+
+ /* allocate primary devmap if needed */
+ prim_devmap = dasd_find_busid(prim_busid);
+ if (IS_ERR(prim_devmap))
+ prim_devmap = dasd_add_busid(prim_busid, DASD_FEATURE_DEFAULT);
+
+ /* allocate secondary devmap if needed */
+ sec_devmap = dasd_find_busid(sec_busid);
+ if (IS_ERR(sec_devmap))
+ sec_devmap = dasd_add_busid(sec_busid, DASD_FEATURE_DEFAULT);
+
+ /* setting copy relation is only allowed for offline secondary */
+ if (sec_devmap->device)
+ return -EINVAL;
+
+ if (prim_devmap->copy) {
+ copy = prim_devmap->copy;
+ } else if (sec_devmap->copy) {
+ copy = sec_devmap->copy;
+ } else {
+ copy = kzalloc(sizeof(*copy), GFP_KERNEL);
+ if (!copy)
+ return -ENOMEM;
+ }
+ spin_lock(&dasd_devmap_lock);
+ rc = dasd_devmap_set_copy_relation(prim_devmap, copy, prim_busid, true);
+ if (rc) {
+ spin_unlock(&dasd_devmap_lock);
+ return rc;
+ }
+ rc = dasd_devmap_set_copy_relation(sec_devmap, copy, sec_busid, false);
+ if (rc) {
+ spin_unlock(&dasd_devmap_lock);
+ return rc;
+ }
+ spin_unlock(&dasd_devmap_lock);
+
+ /* if primary device is already online call device setup directly */
+ if (prim_devmap->device && !prim_devmap->device->copy) {
+ device = prim_devmap->device;
+ if (device->discipline->pprc_enabled) {
+ pprc_enabled = device->discipline->pprc_enabled(device);
+ rc = dasd_devmap_set_device_copy_relation(device->cdev,
+ pprc_enabled);
+ } else {
+ rc = -EOPNOTSUPP;
+ }
+ }
+ if (rc) {
+ dasd_devmap_del_copy_relation(copy, prim_busid);
+ dasd_devmap_del_copy_relation(copy, sec_busid);
+ count = rc;
+ }
+
+ return count;
+}
+static DEVICE_ATTR(copy_pair, 0644, dasd_copy_pair_show,
+ dasd_copy_pair_store);
+
+static ssize_t
+dasd_copy_role_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct dasd_copy_relation *copy;
+ struct dasd_device *device;
+ int len, i;
+
+ device = dasd_device_from_cdev(to_ccwdev(dev));
+ if (IS_ERR(device))
+ return -ENODEV;
+
+ if (!device->copy) {
+ len = sysfs_emit(buf, "none\n");
+ goto out;
+ }
+ copy = device->copy;
+ /* only the active device is primary */
+ if (copy->active->device == device) {
+ len = sysfs_emit(buf, "primary\n");
+ goto out;
+ }
+ for (i = 0; i < DASD_CP_ENTRIES; i++) {
+ if (copy->entry[i].device == device) {
+ len = sysfs_emit(buf, "secondary\n");
+ goto out;
+ }
+ }
+ /* not in the list, no COPY role */
+ len = sysfs_emit(buf, "none\n");
+out:
+ dasd_put_device(device);
+ return len;
+}
+static DEVICE_ATTR(copy_role, 0444, dasd_copy_role_show, NULL);
+
+static ssize_t dasd_device_ping(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct dasd_device *device;
+ size_t rc;
+
+ device = dasd_device_from_cdev(to_ccwdev(dev));
+ if (IS_ERR(device))
+ return -ENODEV;
+
+ /*
+ * do not try during offline processing
+ * early check only
+ * the sleep_on function itself checks for offline
+ * processing again
+ */
+ if (test_bit(DASD_FLAG_OFFLINE, &device->flags)) {
+ rc = -EBUSY;
+ goto out;
+ }
+ if (!device->discipline || !device->discipline->device_ping) {
+ rc = -EOPNOTSUPP;
+ goto out;
+ }
+ rc = device->discipline->device_ping(device);
+ if (!rc)
+ rc = count;
+out:
+ dasd_put_device(device);
+ return rc;
+}
+static DEVICE_ATTR(ping, 0200, NULL, dasd_device_ping);
+
+#define DASD_DEFINE_ATTR(_name, _func) \
+static ssize_t dasd_##_name##_show(struct device *dev, \
+ struct device_attribute *attr, \
+ char *buf) \
+{ \
+ struct ccw_device *cdev = to_ccwdev(dev); \
+ struct dasd_device *device = dasd_device_from_cdev(cdev); \
+ int val = 0; \
+ \
+ if (IS_ERR(device)) \
+ return -ENODEV; \
+ if (device->discipline && _func) \
+ val = _func(device); \
+ dasd_put_device(device); \
+ \
+ return sysfs_emit(buf, "%d\n", val); \
+} \
+static DEVICE_ATTR(_name, 0444, dasd_##_name##_show, NULL); \
+
+DASD_DEFINE_ATTR(ese, device->discipline->is_ese);
+DASD_DEFINE_ATTR(extent_size, device->discipline->ext_size);
+DASD_DEFINE_ATTR(pool_id, device->discipline->ext_pool_id);
+DASD_DEFINE_ATTR(space_configured, device->discipline->space_configured);
+DASD_DEFINE_ATTR(space_allocated, device->discipline->space_allocated);
+DASD_DEFINE_ATTR(logical_capacity, device->discipline->logical_capacity);
+DASD_DEFINE_ATTR(warn_threshold, device->discipline->ext_pool_warn_thrshld);
+DASD_DEFINE_ATTR(cap_at_warnlevel, device->discipline->ext_pool_cap_at_warnlevel);
+DASD_DEFINE_ATTR(pool_oos, device->discipline->ext_pool_oos);
+
+static struct attribute * dasd_attrs[] = {
+ &dev_attr_readonly.attr,
+ &dev_attr_discipline.attr,
+ &dev_attr_status.attr,
+ &dev_attr_alias.attr,
+ &dev_attr_vendor.attr,
+ &dev_attr_uid.attr,
+ &dev_attr_use_diag.attr,
+ &dev_attr_raw_track_access.attr,
+ &dev_attr_eer_enabled.attr,
+ &dev_attr_erplog.attr,
+ &dev_attr_failfast.attr,
+ &dev_attr_expires.attr,
+ &dev_attr_retries.attr,
+ &dev_attr_timeout.attr,
+ &dev_attr_reservation_policy.attr,
+ &dev_attr_last_known_reservation_state.attr,
+ &dev_attr_safe_offline.attr,
+ &dev_attr_host_access_count.attr,
+ &dev_attr_path_masks.attr,
+ &dev_attr_path_threshold.attr,
+ &dev_attr_path_autodisable.attr,
+ &dev_attr_path_interval.attr,
+ &dev_attr_path_reset.attr,
+ &dev_attr_hpf.attr,
+ &dev_attr_ese.attr,
+ &dev_attr_fc_security.attr,
+ &dev_attr_copy_pair.attr,
+ &dev_attr_copy_role.attr,
+ &dev_attr_ping.attr,
+ &dev_attr_aq_mask.attr,
+ &dev_attr_aq_requeue.attr,
+ &dev_attr_aq_timeouts.attr,
+ NULL,
+};
+
+static const struct attribute_group dasd_attr_group = {
+ .attrs = dasd_attrs,
+};
+
+static struct attribute *capacity_attrs[] = {
+ &dev_attr_space_configured.attr,
+ &dev_attr_space_allocated.attr,
+ &dev_attr_logical_capacity.attr,
+ NULL,
+};
+
+static const struct attribute_group capacity_attr_group = {
+ .name = "capacity",
+ .attrs = capacity_attrs,
+};
+
+static struct attribute *ext_pool_attrs[] = {
+ &dev_attr_pool_id.attr,
+ &dev_attr_extent_size.attr,
+ &dev_attr_warn_threshold.attr,
+ &dev_attr_cap_at_warnlevel.attr,
+ &dev_attr_pool_oos.attr,
+ NULL,
+};
+
+static const struct attribute_group ext_pool_attr_group = {
+ .name = "extent_pool",
+ .attrs = ext_pool_attrs,
+};
+
+const struct attribute_group *dasd_dev_groups[] = {
+ &dasd_attr_group,
+ &capacity_attr_group,
+ &ext_pool_attr_group,
+ NULL,
+};
+EXPORT_SYMBOL_GPL(dasd_dev_groups);
+
+/*
+ * Return value of the specified feature.
+ */
+int
+dasd_get_feature(struct ccw_device *cdev, int feature)
+{
+ struct dasd_devmap *devmap;
+
+ devmap = dasd_find_busid(dev_name(&cdev->dev));
+ if (IS_ERR(devmap))
+ return PTR_ERR(devmap);
+
+ return ((devmap->features & feature) != 0);
+}
+
+/*
+ * Set / reset given feature.
+ * Flag indicates whether to set (!=0) or the reset (=0) the feature.
+ */
+int
+dasd_set_feature(struct ccw_device *cdev, int feature, int flag)
+{
+ struct dasd_devmap *devmap;
+
+ devmap = dasd_devmap_from_cdev(cdev);
+ if (IS_ERR(devmap))
+ return PTR_ERR(devmap);
+
+ spin_lock(&dasd_devmap_lock);
+ if (flag)
+ devmap->features |= feature;
+ else
+ devmap->features &= ~feature;
+ if (devmap->device)
+ devmap->device->features = devmap->features;
+ spin_unlock(&dasd_devmap_lock);
+ return 0;
+}
+EXPORT_SYMBOL(dasd_set_feature);
+
+static struct attribute *paths_info_attrs[] = {
+ &path_fcs_attribute.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(paths_info);
+
+static struct kobj_type path_attr_type = {
+ .release = dasd_path_release,
+ .default_groups = paths_info_groups,
+ .sysfs_ops = &kobj_sysfs_ops,
+};
+
+static void dasd_path_init_kobj(struct dasd_device *device, int chp)
+{
+ device->path[chp].kobj.kset = device->paths_info;
+ kobject_init(&device->path[chp].kobj, &path_attr_type);
+}
+
+void dasd_path_create_kobj(struct dasd_device *device, int chp)
+{
+ int rc;
+
+ if (test_bit(DASD_FLAG_OFFLINE, &device->flags))
+ return;
+ if (!device->paths_info) {
+ dev_warn(&device->cdev->dev, "Unable to create paths objects\n");
+ return;
+ }
+ if (device->path[chp].in_sysfs)
+ return;
+ if (!device->path[chp].conf_data)
+ return;
+
+ dasd_path_init_kobj(device, chp);
+
+ rc = kobject_add(&device->path[chp].kobj, NULL, "%x.%02x",
+ device->path[chp].cssid, device->path[chp].chpid);
+ if (rc)
+ kobject_put(&device->path[chp].kobj);
+ device->path[chp].in_sysfs = true;
+}
+EXPORT_SYMBOL(dasd_path_create_kobj);
+
+void dasd_path_create_kobjects(struct dasd_device *device)
+{
+ u8 lpm, opm;
+
+ opm = dasd_path_get_opm(device);
+ for (lpm = 0x80; lpm; lpm >>= 1) {
+ if (!(lpm & opm))
+ continue;
+ dasd_path_create_kobj(device, pathmask_to_pos(lpm));
+ }
+}
+EXPORT_SYMBOL(dasd_path_create_kobjects);
+
+static void dasd_path_remove_kobj(struct dasd_device *device, int chp)
+{
+ if (device->path[chp].in_sysfs) {
+ kobject_put(&device->path[chp].kobj);
+ device->path[chp].in_sysfs = false;
+ }
+}
+
+/*
+ * As we keep kobjects for the lifetime of a device, this function must not be
+ * called anywhere but in the context of offlining a device.
+ */
+void dasd_path_remove_kobjects(struct dasd_device *device)
+{
+ int i;
+
+ for (i = 0; i < 8; i++)
+ dasd_path_remove_kobj(device, i);
+}
+EXPORT_SYMBOL(dasd_path_remove_kobjects);
+
+int
+dasd_devmap_init(void)
+{
+ int i;
+
+ /* Initialize devmap structures. */
+ dasd_max_devindex = 0;
+ for (i = 0; i < 256; i++)
+ INIT_LIST_HEAD(&dasd_hashlists[i]);
+ return 0;
+}
+
+void
+dasd_devmap_exit(void)
+{
+ dasd_forget_ranges();
+}
diff --git a/drivers/s390/block/dasd_diag.c b/drivers/s390/block/dasd_diag.c
new file mode 100644
index 0000000000..2e4e555b37
--- /dev/null
+++ b/drivers/s390/block/dasd_diag.c
@@ -0,0 +1,695 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
+ * Based on.......: linux/drivers/s390/block/mdisk.c
+ * ...............: by Hartmunt Penner <hpenner@de.ibm.com>
+ * Bugreports.to..: <Linux390@de.ibm.com>
+ * Copyright IBM Corp. 1999, 2000
+ *
+ */
+
+#define KMSG_COMPONENT "dasd"
+
+#include <linux/kernel_stat.h>
+#include <linux/stddef.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/hdreg.h>
+#include <linux/bio.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/jiffies.h>
+#include <asm/asm-extable.h>
+#include <asm/dasd.h>
+#include <asm/debug.h>
+#include <asm/diag.h>
+#include <asm/ebcdic.h>
+#include <linux/io.h>
+#include <asm/irq.h>
+#include <asm/vtoc.h>
+
+#include "dasd_int.h"
+#include "dasd_diag.h"
+
+#define PRINTK_HEADER "dasd(diag):"
+
+MODULE_LICENSE("GPL");
+
+/* The maximum number of blocks per request (max_blocks) is dependent on the
+ * amount of storage that is available in the static I/O buffer for each
+ * device. Currently each device gets 2 pages. We want to fit two requests
+ * into the available memory so that we can immediately start the next if one
+ * finishes. */
+#define DIAG_MAX_BLOCKS (((2 * PAGE_SIZE - sizeof(struct dasd_ccw_req) - \
+ sizeof(struct dasd_diag_req)) / \
+ sizeof(struct dasd_diag_bio)) / 2)
+#define DIAG_MAX_RETRIES 32
+#define DIAG_TIMEOUT 50
+
+static struct dasd_discipline dasd_diag_discipline;
+
+struct dasd_diag_private {
+ struct dasd_diag_characteristics rdc_data;
+ struct dasd_diag_rw_io iob;
+ struct dasd_diag_init_io iib;
+ blocknum_t pt_block;
+ struct ccw_dev_id dev_id;
+};
+
+struct dasd_diag_req {
+ unsigned int block_count;
+ struct dasd_diag_bio bio[];
+};
+
+static const u8 DASD_DIAG_CMS1[] = { 0xc3, 0xd4, 0xe2, 0xf1 };/* EBCDIC CMS1 */
+
+/* Perform DIAG250 call with block I/O parameter list iob (input and output)
+ * and function code cmd.
+ * In case of an exception return 3. Otherwise return result of bitwise OR of
+ * resulting condition code and DIAG return code. */
+static inline int __dia250(void *iob, int cmd)
+{
+ union register_pair rx = { .even = (unsigned long)iob, };
+ typedef union {
+ struct dasd_diag_init_io init_io;
+ struct dasd_diag_rw_io rw_io;
+ } addr_type;
+ int cc;
+
+ cc = 3;
+ asm volatile(
+ " diag %[rx],%[cmd],0x250\n"
+ "0: ipm %[cc]\n"
+ " srl %[cc],28\n"
+ "1:\n"
+ EX_TABLE(0b,1b)
+ : [cc] "+&d" (cc), [rx] "+&d" (rx.pair), "+m" (*(addr_type *)iob)
+ : [cmd] "d" (cmd)
+ : "cc");
+ return cc | rx.odd;
+}
+
+static inline int dia250(void *iob, int cmd)
+{
+ diag_stat_inc(DIAG_STAT_X250);
+ return __dia250(iob, cmd);
+}
+
+/* Initialize block I/O to DIAG device using the specified blocksize and
+ * block offset. On success, return zero and set end_block to contain the
+ * number of blocks on the device minus the specified offset. Return non-zero
+ * otherwise. */
+static inline int
+mdsk_init_io(struct dasd_device *device, unsigned int blocksize,
+ blocknum_t offset, blocknum_t *end_block)
+{
+ struct dasd_diag_private *private = device->private;
+ struct dasd_diag_init_io *iib = &private->iib;
+ int rc;
+
+ memset(iib, 0, sizeof (struct dasd_diag_init_io));
+
+ iib->dev_nr = private->dev_id.devno;
+ iib->block_size = blocksize;
+ iib->offset = offset;
+ iib->flaga = DASD_DIAG_FLAGA_DEFAULT;
+
+ rc = dia250(iib, INIT_BIO);
+
+ if ((rc & 3) == 0 && end_block)
+ *end_block = iib->end_block;
+
+ return rc;
+}
+
+/* Remove block I/O environment for device. Return zero on success, non-zero
+ * otherwise. */
+static inline int
+mdsk_term_io(struct dasd_device * device)
+{
+ struct dasd_diag_private *private = device->private;
+ struct dasd_diag_init_io *iib = &private->iib;
+ int rc;
+
+ memset(iib, 0, sizeof (struct dasd_diag_init_io));
+ iib->dev_nr = private->dev_id.devno;
+ rc = dia250(iib, TERM_BIO);
+ return rc;
+}
+
+/* Error recovery for failed DIAG requests - try to reestablish the DIAG
+ * environment. */
+static void
+dasd_diag_erp(struct dasd_device *device)
+{
+ int rc;
+
+ mdsk_term_io(device);
+ rc = mdsk_init_io(device, device->block->bp_block, 0, NULL);
+ if (rc == 4) {
+ if (!(test_and_set_bit(DASD_FLAG_DEVICE_RO, &device->flags)))
+ pr_warn("%s: The access mode of a DIAG device changed to read-only\n",
+ dev_name(&device->cdev->dev));
+ rc = 0;
+ }
+ if (rc)
+ pr_warn("%s: DIAG ERP failed with rc=%d\n",
+ dev_name(&device->cdev->dev), rc);
+}
+
+/* Start a given request at the device. Return zero on success, non-zero
+ * otherwise. */
+static int
+dasd_start_diag(struct dasd_ccw_req * cqr)
+{
+ struct dasd_device *device;
+ struct dasd_diag_private *private;
+ struct dasd_diag_req *dreq;
+ int rc;
+
+ device = cqr->startdev;
+ if (cqr->retries < 0) {
+ DBF_DEV_EVENT(DBF_ERR, device, "DIAG start_IO: request %p "
+ "- no retry left)", cqr);
+ cqr->status = DASD_CQR_ERROR;
+ return -EIO;
+ }
+ private = device->private;
+ dreq = cqr->data;
+
+ private->iob.dev_nr = private->dev_id.devno;
+ private->iob.key = 0;
+ private->iob.flags = DASD_DIAG_RWFLAG_ASYNC;
+ private->iob.block_count = dreq->block_count;
+ private->iob.interrupt_params = (addr_t) cqr;
+ private->iob.bio_list = dreq->bio;
+ private->iob.flaga = DASD_DIAG_FLAGA_DEFAULT;
+
+ cqr->startclk = get_tod_clock();
+ cqr->starttime = jiffies;
+ cqr->retries--;
+
+ rc = dia250(&private->iob, RW_BIO);
+ switch (rc) {
+ case 0: /* Synchronous I/O finished successfully */
+ cqr->stopclk = get_tod_clock();
+ cqr->status = DASD_CQR_SUCCESS;
+ /* Indicate to calling function that only a dasd_schedule_bh()
+ and no timer is needed */
+ rc = -EACCES;
+ break;
+ case 8: /* Asynchronous I/O was started */
+ cqr->status = DASD_CQR_IN_IO;
+ rc = 0;
+ break;
+ default: /* Error condition */
+ cqr->status = DASD_CQR_QUEUED;
+ DBF_DEV_EVENT(DBF_WARNING, device, "dia250 returned rc=%d", rc);
+ dasd_diag_erp(device);
+ rc = -EIO;
+ break;
+ }
+ cqr->intrc = rc;
+ return rc;
+}
+
+/* Terminate given request at the device. */
+static int
+dasd_diag_term_IO(struct dasd_ccw_req * cqr)
+{
+ struct dasd_device *device;
+
+ device = cqr->startdev;
+ mdsk_term_io(device);
+ mdsk_init_io(device, device->block->bp_block, 0, NULL);
+ cqr->status = DASD_CQR_CLEAR_PENDING;
+ cqr->stopclk = get_tod_clock();
+ dasd_schedule_device_bh(device);
+ return 0;
+}
+
+/* Handle external interruption. */
+static void dasd_ext_handler(struct ext_code ext_code,
+ unsigned int param32, unsigned long param64)
+{
+ struct dasd_ccw_req *cqr, *next;
+ struct dasd_device *device;
+ unsigned long expires;
+ unsigned long flags;
+ addr_t ip;
+ int rc;
+
+ switch (ext_code.subcode >> 8) {
+ case DASD_DIAG_CODE_31BIT:
+ ip = (addr_t) param32;
+ break;
+ case DASD_DIAG_CODE_64BIT:
+ ip = (addr_t) param64;
+ break;
+ default:
+ return;
+ }
+ inc_irq_stat(IRQEXT_DSD);
+ if (!ip) { /* no intparm: unsolicited interrupt */
+ DBF_EVENT(DBF_NOTICE, "%s", "caught unsolicited "
+ "interrupt");
+ return;
+ }
+ cqr = (struct dasd_ccw_req *) ip;
+ device = (struct dasd_device *) cqr->startdev;
+ if (strncmp(device->discipline->ebcname, (char *) &cqr->magic, 4)) {
+ DBF_DEV_EVENT(DBF_WARNING, device,
+ " magic number of dasd_ccw_req 0x%08X doesn't"
+ " match discipline 0x%08X",
+ cqr->magic, *(int *) (&device->discipline->name));
+ return;
+ }
+
+ /* get irq lock to modify request queue */
+ spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
+
+ /* Check for a pending clear operation */
+ if (cqr->status == DASD_CQR_CLEAR_PENDING) {
+ cqr->status = DASD_CQR_CLEARED;
+ dasd_device_clear_timer(device);
+ dasd_schedule_device_bh(device);
+ spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
+ return;
+ }
+
+ cqr->stopclk = get_tod_clock();
+
+ expires = 0;
+ if ((ext_code.subcode & 0xff) == 0) {
+ cqr->status = DASD_CQR_SUCCESS;
+ /* Start first request on queue if possible -> fast_io. */
+ if (!list_empty(&device->ccw_queue)) {
+ next = list_entry(device->ccw_queue.next,
+ struct dasd_ccw_req, devlist);
+ if (next->status == DASD_CQR_QUEUED) {
+ rc = dasd_start_diag(next);
+ if (rc == 0)
+ expires = next->expires;
+ }
+ }
+ } else {
+ cqr->status = DASD_CQR_QUEUED;
+ DBF_DEV_EVENT(DBF_DEBUG, device, "interrupt status for "
+ "request %p was %d (%d retries left)", cqr,
+ ext_code.subcode & 0xff, cqr->retries);
+ dasd_diag_erp(device);
+ }
+
+ if (expires != 0)
+ dasd_device_set_timer(device, expires);
+ else
+ dasd_device_clear_timer(device);
+ dasd_schedule_device_bh(device);
+
+ spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
+}
+
+/* Check whether device can be controlled by DIAG discipline. Return zero on
+ * success, non-zero otherwise. */
+static int
+dasd_diag_check_device(struct dasd_device *device)
+{
+ struct dasd_diag_private *private = device->private;
+ struct dasd_diag_characteristics *rdc_data;
+ struct vtoc_cms_label *label;
+ struct dasd_block *block;
+ struct dasd_diag_bio *bio;
+ unsigned int sb, bsize;
+ blocknum_t end_block;
+ int rc;
+
+ if (private == NULL) {
+ private = kzalloc(sizeof(*private), GFP_KERNEL);
+ if (private == NULL) {
+ DBF_DEV_EVENT(DBF_WARNING, device, "%s",
+ "Allocating memory for private DASD data "
+ "failed\n");
+ return -ENOMEM;
+ }
+ ccw_device_get_id(device->cdev, &private->dev_id);
+ device->private = private;
+ }
+ block = dasd_alloc_block();
+ if (IS_ERR(block)) {
+ DBF_DEV_EVENT(DBF_WARNING, device, "%s",
+ "could not allocate dasd block structure");
+ device->private = NULL;
+ kfree(private);
+ return PTR_ERR(block);
+ }
+ device->block = block;
+ block->base = device;
+
+ /* Read Device Characteristics */
+ rdc_data = &private->rdc_data;
+ rdc_data->dev_nr = private->dev_id.devno;
+ rdc_data->rdc_len = sizeof (struct dasd_diag_characteristics);
+
+ rc = diag210((struct diag210 *) rdc_data);
+ if (rc) {
+ DBF_DEV_EVENT(DBF_WARNING, device, "failed to retrieve device "
+ "information (rc=%d)", rc);
+ rc = -EOPNOTSUPP;
+ goto out;
+ }
+
+ device->default_expires = DIAG_TIMEOUT;
+ device->default_retries = DIAG_MAX_RETRIES;
+
+ /* Figure out position of label block */
+ switch (private->rdc_data.vdev_class) {
+ case DEV_CLASS_FBA:
+ private->pt_block = 1;
+ break;
+ case DEV_CLASS_ECKD:
+ private->pt_block = 2;
+ break;
+ default:
+ pr_warn("%s: Device type %d is not supported in DIAG mode\n",
+ dev_name(&device->cdev->dev),
+ private->rdc_data.vdev_class);
+ rc = -EOPNOTSUPP;
+ goto out;
+ }
+
+ DBF_DEV_EVENT(DBF_INFO, device,
+ "%04X: %04X on real %04X/%02X",
+ rdc_data->dev_nr,
+ rdc_data->vdev_type,
+ rdc_data->rdev_type, rdc_data->rdev_model);
+
+ /* terminate all outstanding operations */
+ mdsk_term_io(device);
+
+ /* figure out blocksize of device */
+ label = (struct vtoc_cms_label *) get_zeroed_page(GFP_KERNEL);
+ if (label == NULL) {
+ DBF_DEV_EVENT(DBF_WARNING, device, "%s",
+ "No memory to allocate initialization request");
+ rc = -ENOMEM;
+ goto out;
+ }
+ bio = kzalloc(sizeof(*bio), GFP_KERNEL);
+ if (bio == NULL) {
+ DBF_DEV_EVENT(DBF_WARNING, device, "%s",
+ "No memory to allocate initialization bio");
+ rc = -ENOMEM;
+ goto out_label;
+ }
+ rc = 0;
+ end_block = 0;
+ /* try all sizes - needed for ECKD devices */
+ for (bsize = 512; bsize <= PAGE_SIZE; bsize <<= 1) {
+ mdsk_init_io(device, bsize, 0, &end_block);
+ memset(bio, 0, sizeof(*bio));
+ bio->type = MDSK_READ_REQ;
+ bio->block_number = private->pt_block + 1;
+ bio->buffer = label;
+ memset(&private->iob, 0, sizeof (struct dasd_diag_rw_io));
+ private->iob.dev_nr = rdc_data->dev_nr;
+ private->iob.key = 0;
+ private->iob.flags = 0; /* do synchronous io */
+ private->iob.block_count = 1;
+ private->iob.interrupt_params = 0;
+ private->iob.bio_list = bio;
+ private->iob.flaga = DASD_DIAG_FLAGA_DEFAULT;
+ rc = dia250(&private->iob, RW_BIO);
+ if (rc == 3) {
+ pr_warn("%s: A 64-bit DIAG call failed\n",
+ dev_name(&device->cdev->dev));
+ rc = -EOPNOTSUPP;
+ goto out_bio;
+ }
+ mdsk_term_io(device);
+ if (rc == 0)
+ break;
+ }
+ if (bsize > PAGE_SIZE) {
+ pr_warn("%s: Accessing the DASD failed because of an incorrect format (rc=%d)\n",
+ dev_name(&device->cdev->dev), rc);
+ rc = -EIO;
+ goto out_bio;
+ }
+ /* check for label block */
+ if (memcmp(label->label_id, DASD_DIAG_CMS1,
+ sizeof(DASD_DIAG_CMS1)) == 0) {
+ /* get formatted blocksize from label block */
+ bsize = (unsigned int) label->block_size;
+ block->blocks = (unsigned long) label->block_count;
+ } else
+ block->blocks = end_block;
+ block->bp_block = bsize;
+ block->s2b_shift = 0; /* bits to shift 512 to get a block */
+ for (sb = 512; sb < bsize; sb = sb << 1)
+ block->s2b_shift++;
+ rc = mdsk_init_io(device, block->bp_block, 0, NULL);
+ if (rc && (rc != 4)) {
+ pr_warn("%s: DIAG initialization failed with rc=%d\n",
+ dev_name(&device->cdev->dev), rc);
+ rc = -EIO;
+ } else {
+ if (rc == 4)
+ set_bit(DASD_FLAG_DEVICE_RO, &device->flags);
+ pr_info("%s: New DASD with %ld byte/block, total size %ld "
+ "KB%s\n", dev_name(&device->cdev->dev),
+ (unsigned long) block->bp_block,
+ (unsigned long) (block->blocks <<
+ block->s2b_shift) >> 1,
+ (rc == 4) ? ", read-only device" : "");
+ rc = 0;
+ }
+out_bio:
+ kfree(bio);
+out_label:
+ free_page((long) label);
+out:
+ if (rc) {
+ device->block = NULL;
+ dasd_free_block(block);
+ device->private = NULL;
+ kfree(private);
+ }
+ return rc;
+}
+
+/* Fill in virtual disk geometry for device. Return zero on success, non-zero
+ * otherwise. */
+static int
+dasd_diag_fill_geometry(struct dasd_block *block, struct hd_geometry *geo)
+{
+ if (dasd_check_blocksize(block->bp_block) != 0)
+ return -EINVAL;
+ geo->cylinders = (block->blocks << block->s2b_shift) >> 10;
+ geo->heads = 16;
+ geo->sectors = 128 >> block->s2b_shift;
+ return 0;
+}
+
+static dasd_erp_fn_t
+dasd_diag_erp_action(struct dasd_ccw_req * cqr)
+{
+ return dasd_default_erp_action;
+}
+
+static dasd_erp_fn_t
+dasd_diag_erp_postaction(struct dasd_ccw_req * cqr)
+{
+ return dasd_default_erp_postaction;
+}
+
+/* Create DASD request from block device request. Return pointer to new
+ * request on success, ERR_PTR otherwise. */
+static struct dasd_ccw_req *dasd_diag_build_cp(struct dasd_device *memdev,
+ struct dasd_block *block,
+ struct request *req)
+{
+ struct dasd_ccw_req *cqr;
+ struct dasd_diag_req *dreq;
+ struct dasd_diag_bio *dbio;
+ struct req_iterator iter;
+ struct bio_vec bv;
+ char *dst;
+ unsigned int count;
+ sector_t recid, first_rec, last_rec;
+ unsigned int blksize, off;
+ unsigned char rw_cmd;
+
+ if (rq_data_dir(req) == READ)
+ rw_cmd = MDSK_READ_REQ;
+ else if (rq_data_dir(req) == WRITE)
+ rw_cmd = MDSK_WRITE_REQ;
+ else
+ return ERR_PTR(-EINVAL);
+ blksize = block->bp_block;
+ /* Calculate record id of first and last block. */
+ first_rec = blk_rq_pos(req) >> block->s2b_shift;
+ last_rec =
+ (blk_rq_pos(req) + blk_rq_sectors(req) - 1) >> block->s2b_shift;
+ /* Check struct bio and count the number of blocks for the request. */
+ count = 0;
+ rq_for_each_segment(bv, req, iter) {
+ if (bv.bv_len & (blksize - 1))
+ /* Fba can only do full blocks. */
+ return ERR_PTR(-EINVAL);
+ count += bv.bv_len >> (block->s2b_shift + 9);
+ }
+ /* Paranoia. */
+ if (count != last_rec - first_rec + 1)
+ return ERR_PTR(-EINVAL);
+ /* Build the request */
+ cqr = dasd_smalloc_request(DASD_DIAG_MAGIC, 0, struct_size(dreq, bio, count),
+ memdev, blk_mq_rq_to_pdu(req));
+ if (IS_ERR(cqr))
+ return cqr;
+
+ dreq = (struct dasd_diag_req *) cqr->data;
+ dreq->block_count = count;
+ dbio = dreq->bio;
+ recid = first_rec;
+ rq_for_each_segment(bv, req, iter) {
+ dst = bvec_virt(&bv);
+ for (off = 0; off < bv.bv_len; off += blksize) {
+ memset(dbio, 0, sizeof (struct dasd_diag_bio));
+ dbio->type = rw_cmd;
+ dbio->block_number = recid + 1;
+ dbio->buffer = dst;
+ dbio++;
+ dst += blksize;
+ recid++;
+ }
+ }
+ cqr->retries = memdev->default_retries;
+ cqr->buildclk = get_tod_clock();
+ if (blk_noretry_request(req) ||
+ block->base->features & DASD_FEATURE_FAILFAST)
+ set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
+ cqr->startdev = memdev;
+ cqr->memdev = memdev;
+ cqr->block = block;
+ cqr->expires = memdev->default_expires * HZ;
+ cqr->status = DASD_CQR_FILLED;
+ return cqr;
+}
+
+/* Release DASD request. Return non-zero if request was successful, zero
+ * otherwise. */
+static int
+dasd_diag_free_cp(struct dasd_ccw_req *cqr, struct request *req)
+{
+ int status;
+
+ status = cqr->status == DASD_CQR_DONE;
+ dasd_sfree_request(cqr, cqr->memdev);
+ return status;
+}
+
+static void dasd_diag_handle_terminated_request(struct dasd_ccw_req *cqr)
+{
+ if (cqr->retries < 0)
+ cqr->status = DASD_CQR_FAILED;
+ else
+ cqr->status = DASD_CQR_FILLED;
+};
+
+/* Fill in IOCTL data for device. */
+static int
+dasd_diag_fill_info(struct dasd_device * device,
+ struct dasd_information2_t * info)
+{
+ struct dasd_diag_private *private = device->private;
+
+ info->label_block = (unsigned int) private->pt_block;
+ info->FBA_layout = 1;
+ info->format = DASD_FORMAT_LDL;
+ info->characteristics_size = sizeof(private->rdc_data);
+ memcpy(info->characteristics, &private->rdc_data,
+ sizeof(private->rdc_data));
+ info->confdata_size = 0;
+ return 0;
+}
+
+static void
+dasd_diag_dump_sense(struct dasd_device *device, struct dasd_ccw_req * req,
+ struct irb *stat)
+{
+ DBF_DEV_EVENT(DBF_WARNING, device, "%s",
+ "dump sense not available for DIAG data");
+}
+
+/*
+ * Initialize block layer request queue.
+ */
+static void dasd_diag_setup_blk_queue(struct dasd_block *block)
+{
+ unsigned int logical_block_size = block->bp_block;
+ struct request_queue *q = block->gdp->queue;
+ int max;
+
+ max = DIAG_MAX_BLOCKS << block->s2b_shift;
+ blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
+ q->limits.max_dev_sectors = max;
+ blk_queue_logical_block_size(q, logical_block_size);
+ blk_queue_max_hw_sectors(q, max);
+ blk_queue_max_segments(q, USHRT_MAX);
+ /* With page sized segments each segment can be translated into one idaw/tidaw */
+ blk_queue_max_segment_size(q, PAGE_SIZE);
+ blk_queue_segment_boundary(q, PAGE_SIZE - 1);
+ blk_queue_dma_alignment(q, PAGE_SIZE - 1);
+}
+
+static int dasd_diag_pe_handler(struct dasd_device *device,
+ __u8 tbvpm, __u8 fcsecpm)
+{
+ return dasd_generic_verify_path(device, tbvpm);
+}
+
+static struct dasd_discipline dasd_diag_discipline = {
+ .owner = THIS_MODULE,
+ .name = "DIAG",
+ .ebcname = "DIAG",
+ .check_device = dasd_diag_check_device,
+ .pe_handler = dasd_diag_pe_handler,
+ .fill_geometry = dasd_diag_fill_geometry,
+ .setup_blk_queue = dasd_diag_setup_blk_queue,
+ .start_IO = dasd_start_diag,
+ .term_IO = dasd_diag_term_IO,
+ .handle_terminated_request = dasd_diag_handle_terminated_request,
+ .erp_action = dasd_diag_erp_action,
+ .erp_postaction = dasd_diag_erp_postaction,
+ .build_cp = dasd_diag_build_cp,
+ .free_cp = dasd_diag_free_cp,
+ .dump_sense = dasd_diag_dump_sense,
+ .fill_info = dasd_diag_fill_info,
+};
+
+static int __init
+dasd_diag_init(void)
+{
+ if (!MACHINE_IS_VM) {
+ pr_info("Discipline %s cannot be used without z/VM\n",
+ dasd_diag_discipline.name);
+ return -ENODEV;
+ }
+ ASCEBC(dasd_diag_discipline.ebcname, 4);
+
+ irq_subclass_register(IRQ_SUBCLASS_SERVICE_SIGNAL);
+ register_external_irq(EXT_IRQ_CP_SERVICE, dasd_ext_handler);
+ dasd_diag_discipline_pointer = &dasd_diag_discipline;
+ return 0;
+}
+
+static void __exit
+dasd_diag_cleanup(void)
+{
+ unregister_external_irq(EXT_IRQ_CP_SERVICE, dasd_ext_handler);
+ irq_subclass_unregister(IRQ_SUBCLASS_SERVICE_SIGNAL);
+ dasd_diag_discipline_pointer = NULL;
+}
+
+module_init(dasd_diag_init);
+module_exit(dasd_diag_cleanup);
diff --git a/drivers/s390/block/dasd_diag.h b/drivers/s390/block/dasd_diag.h
new file mode 100644
index 0000000000..405b6feed4
--- /dev/null
+++ b/drivers/s390/block/dasd_diag.h
@@ -0,0 +1,81 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
+ * Based on.......: linux/drivers/s390/block/mdisk.h
+ * ...............: by Hartmunt Penner <hpenner@de.ibm.com>
+ * Bugreports.to..: <Linux390@de.ibm.com>
+ * Copyright IBM Corp. 1999, 2000
+ *
+ */
+
+#define MDSK_WRITE_REQ 0x01
+#define MDSK_READ_REQ 0x02
+
+#define INIT_BIO 0x00
+#define RW_BIO 0x01
+#define TERM_BIO 0x02
+
+#define DEV_CLASS_FBA 0x01
+#define DEV_CLASS_ECKD 0x04
+
+#define DASD_DIAG_CODE_31BIT 0x03
+#define DASD_DIAG_CODE_64BIT 0x07
+
+#define DASD_DIAG_RWFLAG_ASYNC 0x02
+#define DASD_DIAG_RWFLAG_NOCACHE 0x01
+
+#define DASD_DIAG_FLAGA_FORMAT_64BIT 0x80
+
+struct dasd_diag_characteristics {
+ u16 dev_nr;
+ u16 rdc_len;
+ u8 vdev_class;
+ u8 vdev_type;
+ u8 vdev_status;
+ u8 vdev_flags;
+ u8 rdev_class;
+ u8 rdev_type;
+ u8 rdev_model;
+ u8 rdev_features;
+} __attribute__ ((packed, aligned(4)));
+
+#define DASD_DIAG_FLAGA_DEFAULT DASD_DIAG_FLAGA_FORMAT_64BIT
+
+typedef u64 blocknum_t;
+typedef s64 sblocknum_t;
+
+struct dasd_diag_bio {
+ u8 type;
+ u8 status;
+ u8 spare1[2];
+ u32 alet;
+ blocknum_t block_number;
+ void *buffer;
+} __attribute__ ((packed, aligned(8)));
+
+struct dasd_diag_init_io {
+ u16 dev_nr;
+ u8 flaga;
+ u8 spare1[21];
+ u32 block_size;
+ u8 spare2[4];
+ blocknum_t offset;
+ sblocknum_t start_block;
+ blocknum_t end_block;
+ u8 spare3[8];
+} __attribute__ ((packed, aligned(8)));
+
+struct dasd_diag_rw_io {
+ u16 dev_nr;
+ u8 flaga;
+ u8 spare1[21];
+ u8 key;
+ u8 flags;
+ u8 spare2[2];
+ u32 block_count;
+ u32 alet;
+ u8 spare3[4];
+ u64 interrupt_params;
+ struct dasd_diag_bio *bio_list;
+ u8 spare4[8];
+} __attribute__ ((packed, aligned(8)));
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
new file mode 100644
index 0000000000..bd89b03296
--- /dev/null
+++ b/drivers/s390/block/dasd_eckd.c
@@ -0,0 +1,7027 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
+ * Horst Hummel <Horst.Hummel@de.ibm.com>
+ * Carsten Otte <Cotte@de.ibm.com>
+ * Martin Schwidefsky <schwidefsky@de.ibm.com>
+ * Bugreports.to..: <Linux390@de.ibm.com>
+ * Copyright IBM Corp. 1999, 2009
+ * EMC Symmetrix ioctl Copyright EMC Corporation, 2008
+ * Author.........: Nigel Hislop <hislop_nigel@emc.com>
+ */
+
+#define KMSG_COMPONENT "dasd-eckd"
+
+#include <linux/stddef.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/hdreg.h> /* HDIO_GETGEO */
+#include <linux/bio.h>
+#include <linux/module.h>
+#include <linux/compat.h>
+#include <linux/init.h>
+#include <linux/seq_file.h>
+#include <linux/uaccess.h>
+#include <linux/io.h>
+
+#include <asm/css_chars.h>
+#include <asm/debug.h>
+#include <asm/idals.h>
+#include <asm/ebcdic.h>
+#include <asm/cio.h>
+#include <asm/ccwdev.h>
+#include <asm/itcw.h>
+#include <asm/schid.h>
+#include <asm/chpid.h>
+
+#include "dasd_int.h"
+#include "dasd_eckd.h"
+
+#ifdef PRINTK_HEADER
+#undef PRINTK_HEADER
+#endif /* PRINTK_HEADER */
+#define PRINTK_HEADER "dasd(eckd):"
+
+/*
+ * raw track access always map to 64k in memory
+ * so it maps to 16 blocks of 4k per track
+ */
+#define DASD_RAW_BLOCK_PER_TRACK 16
+#define DASD_RAW_BLOCKSIZE 4096
+/* 64k are 128 x 512 byte sectors */
+#define DASD_RAW_SECTORS_PER_TRACK 128
+
+MODULE_LICENSE("GPL");
+
+static struct dasd_discipline dasd_eckd_discipline;
+
+/* The ccw bus type uses this table to find devices that it sends to
+ * dasd_eckd_probe */
+static struct ccw_device_id dasd_eckd_ids[] = {
+ { CCW_DEVICE_DEVTYPE (0x3990, 0, 0x3390, 0), .driver_info = 0x1},
+ { CCW_DEVICE_DEVTYPE (0x2105, 0, 0x3390, 0), .driver_info = 0x2},
+ { CCW_DEVICE_DEVTYPE (0x3880, 0, 0x3380, 0), .driver_info = 0x3},
+ { CCW_DEVICE_DEVTYPE (0x3990, 0, 0x3380, 0), .driver_info = 0x4},
+ { CCW_DEVICE_DEVTYPE (0x2105, 0, 0x3380, 0), .driver_info = 0x5},
+ { CCW_DEVICE_DEVTYPE (0x9343, 0, 0x9345, 0), .driver_info = 0x6},
+ { CCW_DEVICE_DEVTYPE (0x2107, 0, 0x3390, 0), .driver_info = 0x7},
+ { CCW_DEVICE_DEVTYPE (0x2107, 0, 0x3380, 0), .driver_info = 0x8},
+ { CCW_DEVICE_DEVTYPE (0x1750, 0, 0x3390, 0), .driver_info = 0x9},
+ { CCW_DEVICE_DEVTYPE (0x1750, 0, 0x3380, 0), .driver_info = 0xa},
+ { /* end of list */ },
+};
+
+MODULE_DEVICE_TABLE(ccw, dasd_eckd_ids);
+
+static struct ccw_driver dasd_eckd_driver; /* see below */
+
+static void *rawpadpage;
+
+#define INIT_CQR_OK 0
+#define INIT_CQR_UNFORMATTED 1
+#define INIT_CQR_ERROR 2
+
+/* emergency request for reserve/release */
+static struct {
+ struct dasd_ccw_req cqr;
+ struct ccw1 ccw;
+ char data[32];
+} *dasd_reserve_req;
+static DEFINE_MUTEX(dasd_reserve_mutex);
+
+static struct {
+ struct dasd_ccw_req cqr;
+ struct ccw1 ccw[2];
+ char data[40];
+} *dasd_vol_info_req;
+static DEFINE_MUTEX(dasd_vol_info_mutex);
+
+struct ext_pool_exhaust_work_data {
+ struct work_struct worker;
+ struct dasd_device *device;
+ struct dasd_device *base;
+};
+
+/* definitions for the path verification worker */
+struct pe_handler_work_data {
+ struct work_struct worker;
+ struct dasd_device *device;
+ struct dasd_ccw_req cqr;
+ struct ccw1 ccw;
+ __u8 rcd_buffer[DASD_ECKD_RCD_DATA_SIZE];
+ int isglobal;
+ __u8 tbvpm;
+ __u8 fcsecpm;
+};
+static struct pe_handler_work_data *pe_handler_worker;
+static DEFINE_MUTEX(dasd_pe_handler_mutex);
+
+struct check_attention_work_data {
+ struct work_struct worker;
+ struct dasd_device *device;
+ __u8 lpum;
+};
+
+static int dasd_eckd_ext_pool_id(struct dasd_device *);
+static int prepare_itcw(struct itcw *, unsigned int, unsigned int, int,
+ struct dasd_device *, struct dasd_device *,
+ unsigned int, int, unsigned int, unsigned int,
+ unsigned int, unsigned int);
+static int dasd_eckd_query_pprc_status(struct dasd_device *,
+ struct dasd_pprc_data_sc4 *);
+
+/* initial attempt at a probe function. this can be simplified once
+ * the other detection code is gone */
+static int
+dasd_eckd_probe (struct ccw_device *cdev)
+{
+ int ret;
+
+ /* set ECKD specific ccw-device options */
+ ret = ccw_device_set_options(cdev, CCWDEV_ALLOW_FORCE |
+ CCWDEV_DO_PATHGROUP | CCWDEV_DO_MULTIPATH);
+ if (ret) {
+ DBF_EVENT_DEVID(DBF_WARNING, cdev, "%s",
+ "dasd_eckd_probe: could not set "
+ "ccw-device options");
+ return ret;
+ }
+ ret = dasd_generic_probe(cdev);
+ return ret;
+}
+
+static int
+dasd_eckd_set_online(struct ccw_device *cdev)
+{
+ return dasd_generic_set_online(cdev, &dasd_eckd_discipline);
+}
+
+static const int sizes_trk0[] = { 28, 148, 84 };
+#define LABEL_SIZE 140
+
+/* head and record addresses of count_area read in analysis ccw */
+static const int count_area_head[] = { 0, 0, 0, 0, 1 };
+static const int count_area_rec[] = { 1, 2, 3, 4, 1 };
+
+static inline unsigned int
+ceil_quot(unsigned int d1, unsigned int d2)
+{
+ return (d1 + (d2 - 1)) / d2;
+}
+
+static unsigned int
+recs_per_track(struct dasd_eckd_characteristics * rdc,
+ unsigned int kl, unsigned int dl)
+{
+ int dn, kn;
+
+ switch (rdc->dev_type) {
+ case 0x3380:
+ if (kl)
+ return 1499 / (15 + 7 + ceil_quot(kl + 12, 32) +
+ ceil_quot(dl + 12, 32));
+ else
+ return 1499 / (15 + ceil_quot(dl + 12, 32));
+ case 0x3390:
+ dn = ceil_quot(dl + 6, 232) + 1;
+ if (kl) {
+ kn = ceil_quot(kl + 6, 232) + 1;
+ return 1729 / (10 + 9 + ceil_quot(kl + 6 * kn, 34) +
+ 9 + ceil_quot(dl + 6 * dn, 34));
+ } else
+ return 1729 / (10 + 9 + ceil_quot(dl + 6 * dn, 34));
+ case 0x9345:
+ dn = ceil_quot(dl + 6, 232) + 1;
+ if (kl) {
+ kn = ceil_quot(kl + 6, 232) + 1;
+ return 1420 / (18 + 7 + ceil_quot(kl + 6 * kn, 34) +
+ ceil_quot(dl + 6 * dn, 34));
+ } else
+ return 1420 / (18 + 7 + ceil_quot(dl + 6 * dn, 34));
+ }
+ return 0;
+}
+
+static void set_ch_t(struct ch_t *geo, __u32 cyl, __u8 head)
+{
+ geo->cyl = (__u16) cyl;
+ geo->head = cyl >> 16;
+ geo->head <<= 4;
+ geo->head |= head;
+}
+
+/*
+ * calculate failing track from sense data depending if
+ * it is an EAV device or not
+ */
+static int dasd_eckd_track_from_irb(struct irb *irb, struct dasd_device *device,
+ sector_t *track)
+{
+ struct dasd_eckd_private *private = device->private;
+ u8 *sense = NULL;
+ u32 cyl;
+ u8 head;
+
+ sense = dasd_get_sense(irb);
+ if (!sense) {
+ DBF_DEV_EVENT(DBF_WARNING, device, "%s",
+ "ESE error no sense data\n");
+ return -EINVAL;
+ }
+ if (!(sense[27] & DASD_SENSE_BIT_2)) {
+ DBF_DEV_EVENT(DBF_WARNING, device, "%s",
+ "ESE error no valid track data\n");
+ return -EINVAL;
+ }
+
+ if (sense[27] & DASD_SENSE_BIT_3) {
+ /* enhanced addressing */
+ cyl = sense[30] << 20;
+ cyl |= (sense[31] & 0xF0) << 12;
+ cyl |= sense[28] << 8;
+ cyl |= sense[29];
+ } else {
+ cyl = sense[29] << 8;
+ cyl |= sense[30];
+ }
+ head = sense[31] & 0x0F;
+ *track = cyl * private->rdc_data.trk_per_cyl + head;
+ return 0;
+}
+
+static int set_timestamp(struct ccw1 *ccw, struct DE_eckd_data *data,
+ struct dasd_device *device)
+{
+ struct dasd_eckd_private *private = device->private;
+ int rc;
+
+ rc = get_phys_clock(&data->ep_sys_time);
+ /*
+ * Ignore return code if XRC is not supported or
+ * sync clock is switched off
+ */
+ if ((rc && !private->rdc_data.facilities.XRC_supported) ||
+ rc == -EOPNOTSUPP || rc == -EACCES)
+ return 0;
+
+ /* switch on System Time Stamp - needed for XRC Support */
+ data->ga_extended |= 0x08; /* switch on 'Time Stamp Valid' */
+ data->ga_extended |= 0x02; /* switch on 'Extended Parameter' */
+
+ if (ccw) {
+ ccw->count = sizeof(struct DE_eckd_data);
+ ccw->flags |= CCW_FLAG_SLI;
+ }
+
+ return rc;
+}
+
+static int
+define_extent(struct ccw1 *ccw, struct DE_eckd_data *data, unsigned int trk,
+ unsigned int totrk, int cmd, struct dasd_device *device,
+ int blksize)
+{
+ struct dasd_eckd_private *private = device->private;
+ u16 heads, beghead, endhead;
+ u32 begcyl, endcyl;
+ int rc = 0;
+
+ if (ccw) {
+ ccw->cmd_code = DASD_ECKD_CCW_DEFINE_EXTENT;
+ ccw->flags = 0;
+ ccw->count = 16;
+ ccw->cda = (__u32)virt_to_phys(data);
+ }
+
+ memset(data, 0, sizeof(struct DE_eckd_data));
+ switch (cmd) {
+ case DASD_ECKD_CCW_READ_HOME_ADDRESS:
+ case DASD_ECKD_CCW_READ_RECORD_ZERO:
+ case DASD_ECKD_CCW_READ:
+ case DASD_ECKD_CCW_READ_MT:
+ case DASD_ECKD_CCW_READ_CKD:
+ case DASD_ECKD_CCW_READ_CKD_MT:
+ case DASD_ECKD_CCW_READ_KD:
+ case DASD_ECKD_CCW_READ_KD_MT:
+ data->mask.perm = 0x1;
+ data->attributes.operation = private->attrib.operation;
+ break;
+ case DASD_ECKD_CCW_READ_COUNT:
+ data->mask.perm = 0x1;
+ data->attributes.operation = DASD_BYPASS_CACHE;
+ break;
+ case DASD_ECKD_CCW_READ_TRACK:
+ case DASD_ECKD_CCW_READ_TRACK_DATA:
+ data->mask.perm = 0x1;
+ data->attributes.operation = private->attrib.operation;
+ data->blk_size = 0;
+ break;
+ case DASD_ECKD_CCW_WRITE:
+ case DASD_ECKD_CCW_WRITE_MT:
+ case DASD_ECKD_CCW_WRITE_KD:
+ case DASD_ECKD_CCW_WRITE_KD_MT:
+ data->mask.perm = 0x02;
+ data->attributes.operation = private->attrib.operation;
+ rc = set_timestamp(ccw, data, device);
+ break;
+ case DASD_ECKD_CCW_WRITE_CKD:
+ case DASD_ECKD_CCW_WRITE_CKD_MT:
+ data->attributes.operation = DASD_BYPASS_CACHE;
+ rc = set_timestamp(ccw, data, device);
+ break;
+ case DASD_ECKD_CCW_ERASE:
+ case DASD_ECKD_CCW_WRITE_HOME_ADDRESS:
+ case DASD_ECKD_CCW_WRITE_RECORD_ZERO:
+ data->mask.perm = 0x3;
+ data->mask.auth = 0x1;
+ data->attributes.operation = DASD_BYPASS_CACHE;
+ rc = set_timestamp(ccw, data, device);
+ break;
+ case DASD_ECKD_CCW_WRITE_FULL_TRACK:
+ data->mask.perm = 0x03;
+ data->attributes.operation = private->attrib.operation;
+ data->blk_size = 0;
+ break;
+ case DASD_ECKD_CCW_WRITE_TRACK_DATA:
+ data->mask.perm = 0x02;
+ data->attributes.operation = private->attrib.operation;
+ data->blk_size = blksize;
+ rc = set_timestamp(ccw, data, device);
+ break;
+ default:
+ dev_err(&device->cdev->dev,
+ "0x%x is not a known command\n", cmd);
+ break;
+ }
+
+ data->attributes.mode = 0x3; /* ECKD */
+
+ if ((private->rdc_data.cu_type == 0x2105 ||
+ private->rdc_data.cu_type == 0x2107 ||
+ private->rdc_data.cu_type == 0x1750)
+ && !(private->uses_cdl && trk < 2))
+ data->ga_extended |= 0x40; /* Regular Data Format Mode */
+
+ heads = private->rdc_data.trk_per_cyl;
+ begcyl = trk / heads;
+ beghead = trk % heads;
+ endcyl = totrk / heads;
+ endhead = totrk % heads;
+
+ /* check for sequential prestage - enhance cylinder range */
+ if (data->attributes.operation == DASD_SEQ_PRESTAGE ||
+ data->attributes.operation == DASD_SEQ_ACCESS) {
+
+ if (endcyl + private->attrib.nr_cyl < private->real_cyl)
+ endcyl += private->attrib.nr_cyl;
+ else
+ endcyl = (private->real_cyl - 1);
+ }
+
+ set_ch_t(&data->beg_ext, begcyl, beghead);
+ set_ch_t(&data->end_ext, endcyl, endhead);
+ return rc;
+}
+
+
+static void locate_record_ext(struct ccw1 *ccw, struct LRE_eckd_data *data,
+ unsigned int trk, unsigned int rec_on_trk,
+ int count, int cmd, struct dasd_device *device,
+ unsigned int reclen, unsigned int tlf)
+{
+ struct dasd_eckd_private *private = device->private;
+ int sector;
+ int dn, d;
+
+ if (ccw) {
+ ccw->cmd_code = DASD_ECKD_CCW_LOCATE_RECORD_EXT;
+ ccw->flags = 0;
+ if (cmd == DASD_ECKD_CCW_WRITE_FULL_TRACK)
+ ccw->count = 22;
+ else
+ ccw->count = 20;
+ ccw->cda = (__u32)virt_to_phys(data);
+ }
+
+ memset(data, 0, sizeof(*data));
+ sector = 0;
+ if (rec_on_trk) {
+ switch (private->rdc_data.dev_type) {
+ case 0x3390:
+ dn = ceil_quot(reclen + 6, 232);
+ d = 9 + ceil_quot(reclen + 6 * (dn + 1), 34);
+ sector = (49 + (rec_on_trk - 1) * (10 + d)) / 8;
+ break;
+ case 0x3380:
+ d = 7 + ceil_quot(reclen + 12, 32);
+ sector = (39 + (rec_on_trk - 1) * (8 + d)) / 7;
+ break;
+ }
+ }
+ data->sector = sector;
+ /* note: meaning of count depends on the operation
+ * for record based I/O it's the number of records, but for
+ * track based I/O it's the number of tracks
+ */
+ data->count = count;
+ switch (cmd) {
+ case DASD_ECKD_CCW_WRITE_HOME_ADDRESS:
+ data->operation.orientation = 0x3;
+ data->operation.operation = 0x03;
+ break;
+ case DASD_ECKD_CCW_READ_HOME_ADDRESS:
+ data->operation.orientation = 0x3;
+ data->operation.operation = 0x16;
+ break;
+ case DASD_ECKD_CCW_WRITE_RECORD_ZERO:
+ data->operation.orientation = 0x1;
+ data->operation.operation = 0x03;
+ data->count++;
+ break;
+ case DASD_ECKD_CCW_READ_RECORD_ZERO:
+ data->operation.orientation = 0x3;
+ data->operation.operation = 0x16;
+ data->count++;
+ break;
+ case DASD_ECKD_CCW_WRITE:
+ case DASD_ECKD_CCW_WRITE_MT:
+ case DASD_ECKD_CCW_WRITE_KD:
+ case DASD_ECKD_CCW_WRITE_KD_MT:
+ data->auxiliary.length_valid = 0x1;
+ data->length = reclen;
+ data->operation.operation = 0x01;
+ break;
+ case DASD_ECKD_CCW_WRITE_CKD:
+ case DASD_ECKD_CCW_WRITE_CKD_MT:
+ data->auxiliary.length_valid = 0x1;
+ data->length = reclen;
+ data->operation.operation = 0x03;
+ break;
+ case DASD_ECKD_CCW_WRITE_FULL_TRACK:
+ data->operation.orientation = 0x0;
+ data->operation.operation = 0x3F;
+ data->extended_operation = 0x11;
+ data->length = 0;
+ data->extended_parameter_length = 0x02;
+ if (data->count > 8) {
+ data->extended_parameter[0] = 0xFF;
+ data->extended_parameter[1] = 0xFF;
+ data->extended_parameter[1] <<= (16 - count);
+ } else {
+ data->extended_parameter[0] = 0xFF;
+ data->extended_parameter[0] <<= (8 - count);
+ data->extended_parameter[1] = 0x00;
+ }
+ data->sector = 0xFF;
+ break;
+ case DASD_ECKD_CCW_WRITE_TRACK_DATA:
+ data->auxiliary.length_valid = 0x1;
+ data->length = reclen; /* not tlf, as one might think */
+ data->operation.operation = 0x3F;
+ data->extended_operation = 0x23;
+ break;
+ case DASD_ECKD_CCW_READ:
+ case DASD_ECKD_CCW_READ_MT:
+ case DASD_ECKD_CCW_READ_KD:
+ case DASD_ECKD_CCW_READ_KD_MT:
+ data->auxiliary.length_valid = 0x1;
+ data->length = reclen;
+ data->operation.operation = 0x06;
+ break;
+ case DASD_ECKD_CCW_READ_CKD:
+ case DASD_ECKD_CCW_READ_CKD_MT:
+ data->auxiliary.length_valid = 0x1;
+ data->length = reclen;
+ data->operation.operation = 0x16;
+ break;
+ case DASD_ECKD_CCW_READ_COUNT:
+ data->operation.operation = 0x06;
+ break;
+ case DASD_ECKD_CCW_READ_TRACK:
+ data->operation.orientation = 0x1;
+ data->operation.operation = 0x0C;
+ data->extended_parameter_length = 0;
+ data->sector = 0xFF;
+ break;
+ case DASD_ECKD_CCW_READ_TRACK_DATA:
+ data->auxiliary.length_valid = 0x1;
+ data->length = tlf;
+ data->operation.operation = 0x0C;
+ break;
+ case DASD_ECKD_CCW_ERASE:
+ data->length = reclen;
+ data->auxiliary.length_valid = 0x1;
+ data->operation.operation = 0x0b;
+ break;
+ default:
+ DBF_DEV_EVENT(DBF_ERR, device,
+ "fill LRE unknown opcode 0x%x", cmd);
+ BUG();
+ }
+ set_ch_t(&data->seek_addr,
+ trk / private->rdc_data.trk_per_cyl,
+ trk % private->rdc_data.trk_per_cyl);
+ data->search_arg.cyl = data->seek_addr.cyl;
+ data->search_arg.head = data->seek_addr.head;
+ data->search_arg.record = rec_on_trk;
+}
+
+static int prefix_LRE(struct ccw1 *ccw, struct PFX_eckd_data *pfxdata,
+ unsigned int trk, unsigned int totrk, int cmd,
+ struct dasd_device *basedev, struct dasd_device *startdev,
+ unsigned int format, unsigned int rec_on_trk, int count,
+ unsigned int blksize, unsigned int tlf)
+{
+ struct dasd_eckd_private *basepriv, *startpriv;
+ struct LRE_eckd_data *lredata;
+ struct DE_eckd_data *dedata;
+ int rc = 0;
+
+ basepriv = basedev->private;
+ startpriv = startdev->private;
+ dedata = &pfxdata->define_extent;
+ lredata = &pfxdata->locate_record;
+
+ ccw->cmd_code = DASD_ECKD_CCW_PFX;
+ ccw->flags = 0;
+ if (cmd == DASD_ECKD_CCW_WRITE_FULL_TRACK) {
+ ccw->count = sizeof(*pfxdata) + 2;
+ ccw->cda = (__u32)virt_to_phys(pfxdata);
+ memset(pfxdata, 0, sizeof(*pfxdata) + 2);
+ } else {
+ ccw->count = sizeof(*pfxdata);
+ ccw->cda = (__u32)virt_to_phys(pfxdata);
+ memset(pfxdata, 0, sizeof(*pfxdata));
+ }
+
+ /* prefix data */
+ if (format > 1) {
+ DBF_DEV_EVENT(DBF_ERR, basedev,
+ "PFX LRE unknown format 0x%x", format);
+ BUG();
+ return -EINVAL;
+ }
+ pfxdata->format = format;
+ pfxdata->base_address = basepriv->conf.ned->unit_addr;
+ pfxdata->base_lss = basepriv->conf.ned->ID;
+ pfxdata->validity.define_extent = 1;
+
+ /* private uid is kept up to date, conf_data may be outdated */
+ if (startpriv->uid.type == UA_BASE_PAV_ALIAS)
+ pfxdata->validity.verify_base = 1;
+
+ if (startpriv->uid.type == UA_HYPER_PAV_ALIAS) {
+ pfxdata->validity.verify_base = 1;
+ pfxdata->validity.hyper_pav = 1;
+ }
+
+ rc = define_extent(NULL, dedata, trk, totrk, cmd, basedev, blksize);
+
+ /*
+ * For some commands the System Time Stamp is set in the define extent
+ * data when XRC is supported. The validity of the time stamp must be
+ * reflected in the prefix data as well.
+ */
+ if (dedata->ga_extended & 0x08 && dedata->ga_extended & 0x02)
+ pfxdata->validity.time_stamp = 1; /* 'Time Stamp Valid' */
+
+ if (format == 1) {
+ locate_record_ext(NULL, lredata, trk, rec_on_trk, count, cmd,
+ basedev, blksize, tlf);
+ }
+
+ return rc;
+}
+
+static int prefix(struct ccw1 *ccw, struct PFX_eckd_data *pfxdata,
+ unsigned int trk, unsigned int totrk, int cmd,
+ struct dasd_device *basedev, struct dasd_device *startdev)
+{
+ return prefix_LRE(ccw, pfxdata, trk, totrk, cmd, basedev, startdev,
+ 0, 0, 0, 0, 0);
+}
+
+static void
+locate_record(struct ccw1 *ccw, struct LO_eckd_data *data, unsigned int trk,
+ unsigned int rec_on_trk, int no_rec, int cmd,
+ struct dasd_device * device, int reclen)
+{
+ struct dasd_eckd_private *private = device->private;
+ int sector;
+ int dn, d;
+
+ DBF_DEV_EVENT(DBF_INFO, device,
+ "Locate: trk %d, rec %d, no_rec %d, cmd %d, reclen %d",
+ trk, rec_on_trk, no_rec, cmd, reclen);
+
+ ccw->cmd_code = DASD_ECKD_CCW_LOCATE_RECORD;
+ ccw->flags = 0;
+ ccw->count = 16;
+ ccw->cda = (__u32)virt_to_phys(data);
+
+ memset(data, 0, sizeof(struct LO_eckd_data));
+ sector = 0;
+ if (rec_on_trk) {
+ switch (private->rdc_data.dev_type) {
+ case 0x3390:
+ dn = ceil_quot(reclen + 6, 232);
+ d = 9 + ceil_quot(reclen + 6 * (dn + 1), 34);
+ sector = (49 + (rec_on_trk - 1) * (10 + d)) / 8;
+ break;
+ case 0x3380:
+ d = 7 + ceil_quot(reclen + 12, 32);
+ sector = (39 + (rec_on_trk - 1) * (8 + d)) / 7;
+ break;
+ }
+ }
+ data->sector = sector;
+ data->count = no_rec;
+ switch (cmd) {
+ case DASD_ECKD_CCW_WRITE_HOME_ADDRESS:
+ data->operation.orientation = 0x3;
+ data->operation.operation = 0x03;
+ break;
+ case DASD_ECKD_CCW_READ_HOME_ADDRESS:
+ data->operation.orientation = 0x3;
+ data->operation.operation = 0x16;
+ break;
+ case DASD_ECKD_CCW_WRITE_RECORD_ZERO:
+ data->operation.orientation = 0x1;
+ data->operation.operation = 0x03;
+ data->count++;
+ break;
+ case DASD_ECKD_CCW_READ_RECORD_ZERO:
+ data->operation.orientation = 0x3;
+ data->operation.operation = 0x16;
+ data->count++;
+ break;
+ case DASD_ECKD_CCW_WRITE:
+ case DASD_ECKD_CCW_WRITE_MT:
+ case DASD_ECKD_CCW_WRITE_KD:
+ case DASD_ECKD_CCW_WRITE_KD_MT:
+ data->auxiliary.last_bytes_used = 0x1;
+ data->length = reclen;
+ data->operation.operation = 0x01;
+ break;
+ case DASD_ECKD_CCW_WRITE_CKD:
+ case DASD_ECKD_CCW_WRITE_CKD_MT:
+ data->auxiliary.last_bytes_used = 0x1;
+ data->length = reclen;
+ data->operation.operation = 0x03;
+ break;
+ case DASD_ECKD_CCW_READ:
+ case DASD_ECKD_CCW_READ_MT:
+ case DASD_ECKD_CCW_READ_KD:
+ case DASD_ECKD_CCW_READ_KD_MT:
+ data->auxiliary.last_bytes_used = 0x1;
+ data->length = reclen;
+ data->operation.operation = 0x06;
+ break;
+ case DASD_ECKD_CCW_READ_CKD:
+ case DASD_ECKD_CCW_READ_CKD_MT:
+ data->auxiliary.last_bytes_used = 0x1;
+ data->length = reclen;
+ data->operation.operation = 0x16;
+ break;
+ case DASD_ECKD_CCW_READ_COUNT:
+ data->operation.operation = 0x06;
+ break;
+ case DASD_ECKD_CCW_ERASE:
+ data->length = reclen;
+ data->auxiliary.last_bytes_used = 0x1;
+ data->operation.operation = 0x0b;
+ break;
+ default:
+ DBF_DEV_EVENT(DBF_ERR, device, "unknown locate record "
+ "opcode 0x%x", cmd);
+ }
+ set_ch_t(&data->seek_addr,
+ trk / private->rdc_data.trk_per_cyl,
+ trk % private->rdc_data.trk_per_cyl);
+ data->search_arg.cyl = data->seek_addr.cyl;
+ data->search_arg.head = data->seek_addr.head;
+ data->search_arg.record = rec_on_trk;
+}
+
+/*
+ * Returns 1 if the block is one of the special blocks that needs
+ * to get read/written with the KD variant of the command.
+ * That is DASD_ECKD_READ_KD_MT instead of DASD_ECKD_READ_MT and
+ * DASD_ECKD_WRITE_KD_MT instead of DASD_ECKD_WRITE_MT.
+ * Luckily the KD variants differ only by one bit (0x08) from the
+ * normal variant. So don't wonder about code like:
+ * if (dasd_eckd_cdl_special(blk_per_trk, recid))
+ * ccw->cmd_code |= 0x8;
+ */
+static inline int
+dasd_eckd_cdl_special(int blk_per_trk, int recid)
+{
+ if (recid < 3)
+ return 1;
+ if (recid < blk_per_trk)
+ return 0;
+ if (recid < 2 * blk_per_trk)
+ return 1;
+ return 0;
+}
+
+/*
+ * Returns the record size for the special blocks of the cdl format.
+ * Only returns something useful if dasd_eckd_cdl_special is true
+ * for the recid.
+ */
+static inline int
+dasd_eckd_cdl_reclen(int recid)
+{
+ if (recid < 3)
+ return sizes_trk0[recid];
+ return LABEL_SIZE;
+}
+/* create unique id from private structure. */
+static void create_uid(struct dasd_conf *conf, struct dasd_uid *uid)
+{
+ int count;
+
+ memset(uid, 0, sizeof(struct dasd_uid));
+ memcpy(uid->vendor, conf->ned->HDA_manufacturer,
+ sizeof(uid->vendor) - 1);
+ EBCASC(uid->vendor, sizeof(uid->vendor) - 1);
+ memcpy(uid->serial, &conf->ned->serial,
+ sizeof(uid->serial) - 1);
+ EBCASC(uid->serial, sizeof(uid->serial) - 1);
+ uid->ssid = conf->gneq->subsystemID;
+ uid->real_unit_addr = conf->ned->unit_addr;
+ if (conf->sneq) {
+ uid->type = conf->sneq->sua_flags;
+ if (uid->type == UA_BASE_PAV_ALIAS)
+ uid->base_unit_addr = conf->sneq->base_unit_addr;
+ } else {
+ uid->type = UA_BASE_DEVICE;
+ }
+ if (conf->vdsneq) {
+ for (count = 0; count < 16; count++) {
+ sprintf(uid->vduit+2*count, "%02x",
+ conf->vdsneq->uit[count]);
+ }
+ }
+}
+
+/*
+ * Generate device unique id that specifies the physical device.
+ */
+static int dasd_eckd_generate_uid(struct dasd_device *device)
+{
+ struct dasd_eckd_private *private = device->private;
+ unsigned long flags;
+
+ if (!private)
+ return -ENODEV;
+ if (!private->conf.ned || !private->conf.gneq)
+ return -ENODEV;
+ spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
+ create_uid(&private->conf, &private->uid);
+ spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
+ return 0;
+}
+
+static int dasd_eckd_get_uid(struct dasd_device *device, struct dasd_uid *uid)
+{
+ struct dasd_eckd_private *private = device->private;
+ unsigned long flags;
+
+ if (private) {
+ spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
+ *uid = private->uid;
+ spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
+ return 0;
+ }
+ return -EINVAL;
+}
+
+/*
+ * compare device UID with data of a given dasd_eckd_private structure
+ * return 0 for match
+ */
+static int dasd_eckd_compare_path_uid(struct dasd_device *device,
+ struct dasd_conf *path_conf)
+{
+ struct dasd_uid device_uid;
+ struct dasd_uid path_uid;
+
+ create_uid(path_conf, &path_uid);
+ dasd_eckd_get_uid(device, &device_uid);
+
+ return memcmp(&device_uid, &path_uid, sizeof(struct dasd_uid));
+}
+
+static void dasd_eckd_fill_rcd_cqr(struct dasd_device *device,
+ struct dasd_ccw_req *cqr,
+ __u8 *rcd_buffer,
+ __u8 lpm)
+{
+ struct ccw1 *ccw;
+ /*
+ * buffer has to start with EBCDIC "V1.0" to show
+ * support for virtual device SNEQ
+ */
+ rcd_buffer[0] = 0xE5;
+ rcd_buffer[1] = 0xF1;
+ rcd_buffer[2] = 0x4B;
+ rcd_buffer[3] = 0xF0;
+
+ ccw = cqr->cpaddr;
+ ccw->cmd_code = DASD_ECKD_CCW_RCD;
+ ccw->flags = 0;
+ ccw->cda = (__u32)virt_to_phys(rcd_buffer);
+ ccw->count = DASD_ECKD_RCD_DATA_SIZE;
+ cqr->magic = DASD_ECKD_MAGIC;
+
+ cqr->startdev = device;
+ cqr->memdev = device;
+ cqr->block = NULL;
+ cqr->expires = 10*HZ;
+ cqr->lpm = lpm;
+ cqr->retries = 256;
+ cqr->buildclk = get_tod_clock();
+ cqr->status = DASD_CQR_FILLED;
+ set_bit(DASD_CQR_VERIFY_PATH, &cqr->flags);
+}
+
+/*
+ * Wakeup helper for read_conf
+ * if the cqr is not done and needs some error recovery
+ * the buffer has to be re-initialized with the EBCDIC "V1.0"
+ * to show support for virtual device SNEQ
+ */
+static void read_conf_cb(struct dasd_ccw_req *cqr, void *data)
+{
+ struct ccw1 *ccw;
+ __u8 *rcd_buffer;
+
+ if (cqr->status != DASD_CQR_DONE) {
+ ccw = cqr->cpaddr;
+ rcd_buffer = phys_to_virt(ccw->cda);
+ memset(rcd_buffer, 0, sizeof(*rcd_buffer));
+
+ rcd_buffer[0] = 0xE5;
+ rcd_buffer[1] = 0xF1;
+ rcd_buffer[2] = 0x4B;
+ rcd_buffer[3] = 0xF0;
+ }
+ dasd_wakeup_cb(cqr, data);
+}
+
+static int dasd_eckd_read_conf_immediately(struct dasd_device *device,
+ struct dasd_ccw_req *cqr,
+ __u8 *rcd_buffer,
+ __u8 lpm)
+{
+ struct ciw *ciw;
+ int rc;
+ /*
+ * sanity check: scan for RCD command in extended SenseID data
+ * some devices do not support RCD
+ */
+ ciw = ccw_device_get_ciw(device->cdev, CIW_TYPE_RCD);
+ if (!ciw || ciw->cmd != DASD_ECKD_CCW_RCD)
+ return -EOPNOTSUPP;
+
+ dasd_eckd_fill_rcd_cqr(device, cqr, rcd_buffer, lpm);
+ clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
+ set_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags);
+ cqr->retries = 5;
+ cqr->callback = read_conf_cb;
+ rc = dasd_sleep_on_immediatly(cqr);
+ return rc;
+}
+
+static int dasd_eckd_read_conf_lpm(struct dasd_device *device,
+ void **rcd_buffer,
+ int *rcd_buffer_size, __u8 lpm)
+{
+ struct ciw *ciw;
+ char *rcd_buf = NULL;
+ int ret;
+ struct dasd_ccw_req *cqr;
+
+ /*
+ * sanity check: scan for RCD command in extended SenseID data
+ * some devices do not support RCD
+ */
+ ciw = ccw_device_get_ciw(device->cdev, CIW_TYPE_RCD);
+ if (!ciw || ciw->cmd != DASD_ECKD_CCW_RCD) {
+ ret = -EOPNOTSUPP;
+ goto out_error;
+ }
+ rcd_buf = kzalloc(DASD_ECKD_RCD_DATA_SIZE, GFP_KERNEL | GFP_DMA);
+ if (!rcd_buf) {
+ ret = -ENOMEM;
+ goto out_error;
+ }
+ cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* RCD */,
+ 0, /* use rcd_buf as data ara */
+ device, NULL);
+ if (IS_ERR(cqr)) {
+ DBF_DEV_EVENT(DBF_WARNING, device, "%s",
+ "Could not allocate RCD request");
+ ret = -ENOMEM;
+ goto out_error;
+ }
+ dasd_eckd_fill_rcd_cqr(device, cqr, rcd_buf, lpm);
+ cqr->callback = read_conf_cb;
+ ret = dasd_sleep_on(cqr);
+ /*
+ * on success we update the user input parms
+ */
+ dasd_sfree_request(cqr, cqr->memdev);
+ if (ret)
+ goto out_error;
+
+ *rcd_buffer_size = DASD_ECKD_RCD_DATA_SIZE;
+ *rcd_buffer = rcd_buf;
+ return 0;
+out_error:
+ kfree(rcd_buf);
+ *rcd_buffer = NULL;
+ *rcd_buffer_size = 0;
+ return ret;
+}
+
+static int dasd_eckd_identify_conf_parts(struct dasd_conf *conf)
+{
+
+ struct dasd_sneq *sneq;
+ int i, count;
+
+ conf->ned = NULL;
+ conf->sneq = NULL;
+ conf->vdsneq = NULL;
+ conf->gneq = NULL;
+ count = conf->len / sizeof(struct dasd_sneq);
+ sneq = (struct dasd_sneq *)conf->data;
+ for (i = 0; i < count; ++i) {
+ if (sneq->flags.identifier == 1 && sneq->format == 1)
+ conf->sneq = sneq;
+ else if (sneq->flags.identifier == 1 && sneq->format == 4)
+ conf->vdsneq = (struct vd_sneq *)sneq;
+ else if (sneq->flags.identifier == 2)
+ conf->gneq = (struct dasd_gneq *)sneq;
+ else if (sneq->flags.identifier == 3 && sneq->res1 == 1)
+ conf->ned = (struct dasd_ned *)sneq;
+ sneq++;
+ }
+ if (!conf->ned || !conf->gneq) {
+ conf->ned = NULL;
+ conf->sneq = NULL;
+ conf->vdsneq = NULL;
+ conf->gneq = NULL;
+ return -EINVAL;
+ }
+ return 0;
+
+};
+
+static unsigned char dasd_eckd_path_access(void *conf_data, int conf_len)
+{
+ struct dasd_gneq *gneq;
+ int i, count, found;
+
+ count = conf_len / sizeof(*gneq);
+ gneq = (struct dasd_gneq *)conf_data;
+ found = 0;
+ for (i = 0; i < count; ++i) {
+ if (gneq->flags.identifier == 2) {
+ found = 1;
+ break;
+ }
+ gneq++;
+ }
+ if (found)
+ return ((char *)gneq)[18] & 0x07;
+ else
+ return 0;
+}
+
+static void dasd_eckd_store_conf_data(struct dasd_device *device,
+ struct dasd_conf_data *conf_data, int chp)
+{
+ struct dasd_eckd_private *private = device->private;
+ struct channel_path_desc_fmt0 *chp_desc;
+ struct subchannel_id sch_id;
+ void *cdp;
+
+ /*
+ * path handling and read_conf allocate data
+ * free it before replacing the pointer
+ * also replace the old private->conf_data pointer
+ * with the new one if this points to the same data
+ */
+ cdp = device->path[chp].conf_data;
+ if (private->conf.data == cdp) {
+ private->conf.data = (void *)conf_data;
+ dasd_eckd_identify_conf_parts(&private->conf);
+ }
+ ccw_device_get_schid(device->cdev, &sch_id);
+ device->path[chp].conf_data = conf_data;
+ device->path[chp].cssid = sch_id.cssid;
+ device->path[chp].ssid = sch_id.ssid;
+ chp_desc = ccw_device_get_chp_desc(device->cdev, chp);
+ if (chp_desc)
+ device->path[chp].chpid = chp_desc->chpid;
+ kfree(chp_desc);
+ kfree(cdp);
+}
+
+static void dasd_eckd_clear_conf_data(struct dasd_device *device)
+{
+ struct dasd_eckd_private *private = device->private;
+ int i;
+
+ private->conf.data = NULL;
+ private->conf.len = 0;
+ for (i = 0; i < 8; i++) {
+ kfree(device->path[i].conf_data);
+ device->path[i].conf_data = NULL;
+ device->path[i].cssid = 0;
+ device->path[i].ssid = 0;
+ device->path[i].chpid = 0;
+ dasd_path_notoper(device, i);
+ }
+}
+
+static void dasd_eckd_read_fc_security(struct dasd_device *device)
+{
+ struct dasd_eckd_private *private = device->private;
+ u8 esm_valid;
+ u8 esm[8];
+ int chp;
+ int rc;
+
+ rc = chsc_scud(private->uid.ssid, (u64 *)esm, &esm_valid);
+ if (rc) {
+ for (chp = 0; chp < 8; chp++)
+ device->path[chp].fc_security = 0;
+ return;
+ }
+
+ for (chp = 0; chp < 8; chp++) {
+ if (esm_valid & (0x80 >> chp))
+ device->path[chp].fc_security = esm[chp];
+ else
+ device->path[chp].fc_security = 0;
+ }
+}
+
+static void dasd_eckd_get_uid_string(struct dasd_conf *conf,
+ char *print_uid)
+{
+ struct dasd_uid uid;
+
+ create_uid(conf, &uid);
+ if (strlen(uid.vduit) > 0)
+ snprintf(print_uid, DASD_UID_STRLEN,
+ "%s.%s.%04x.%02x.%s",
+ uid.vendor, uid.serial, uid.ssid,
+ uid.real_unit_addr, uid.vduit);
+ else
+ snprintf(print_uid, DASD_UID_STRLEN,
+ "%s.%s.%04x.%02x",
+ uid.vendor, uid.serial, uid.ssid,
+ uid.real_unit_addr);
+}
+
+static int dasd_eckd_check_cabling(struct dasd_device *device,
+ void *conf_data, __u8 lpm)
+{
+ char print_path_uid[DASD_UID_STRLEN], print_device_uid[DASD_UID_STRLEN];
+ struct dasd_eckd_private *private = device->private;
+ struct dasd_conf path_conf;
+
+ path_conf.data = conf_data;
+ path_conf.len = DASD_ECKD_RCD_DATA_SIZE;
+ if (dasd_eckd_identify_conf_parts(&path_conf))
+ return 1;
+
+ if (dasd_eckd_compare_path_uid(device, &path_conf)) {
+ dasd_eckd_get_uid_string(&path_conf, print_path_uid);
+ dasd_eckd_get_uid_string(&private->conf, print_device_uid);
+ dev_err(&device->cdev->dev,
+ "Not all channel paths lead to the same device, path %02X leads to device %s instead of %s\n",
+ lpm, print_path_uid, print_device_uid);
+ return 1;
+ }
+
+ return 0;
+}
+
+static int dasd_eckd_read_conf(struct dasd_device *device)
+{
+ void *conf_data;
+ int conf_len, conf_data_saved;
+ int rc, path_err, pos;
+ __u8 lpm, opm;
+ struct dasd_eckd_private *private;
+
+ private = device->private;
+ opm = ccw_device_get_path_mask(device->cdev);
+ conf_data_saved = 0;
+ path_err = 0;
+ /* get configuration data per operational path */
+ for (lpm = 0x80; lpm; lpm>>= 1) {
+ if (!(lpm & opm))
+ continue;
+ rc = dasd_eckd_read_conf_lpm(device, &conf_data,
+ &conf_len, lpm);
+ if (rc && rc != -EOPNOTSUPP) { /* -EOPNOTSUPP is ok */
+ DBF_EVENT_DEVID(DBF_WARNING, device->cdev,
+ "Read configuration data returned "
+ "error %d", rc);
+ return rc;
+ }
+ if (conf_data == NULL) {
+ DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
+ "No configuration data "
+ "retrieved");
+ /* no further analysis possible */
+ dasd_path_add_opm(device, opm);
+ continue; /* no error */
+ }
+ /* save first valid configuration data */
+ if (!conf_data_saved) {
+ /* initially clear previously stored conf_data */
+ dasd_eckd_clear_conf_data(device);
+ private->conf.data = conf_data;
+ private->conf.len = conf_len;
+ if (dasd_eckd_identify_conf_parts(&private->conf)) {
+ private->conf.data = NULL;
+ private->conf.len = 0;
+ kfree(conf_data);
+ continue;
+ }
+ /*
+ * build device UID that other path data
+ * can be compared to it
+ */
+ dasd_eckd_generate_uid(device);
+ conf_data_saved++;
+ } else if (dasd_eckd_check_cabling(device, conf_data, lpm)) {
+ dasd_path_add_cablepm(device, lpm);
+ path_err = -EINVAL;
+ kfree(conf_data);
+ continue;
+ }
+
+ pos = pathmask_to_pos(lpm);
+ dasd_eckd_store_conf_data(device, conf_data, pos);
+
+ switch (dasd_eckd_path_access(conf_data, conf_len)) {
+ case 0x02:
+ dasd_path_add_nppm(device, lpm);
+ break;
+ case 0x03:
+ dasd_path_add_ppm(device, lpm);
+ break;
+ }
+ if (!dasd_path_get_opm(device)) {
+ dasd_path_set_opm(device, lpm);
+ dasd_generic_path_operational(device);
+ } else {
+ dasd_path_add_opm(device, lpm);
+ }
+ }
+
+ return path_err;
+}
+
+static u32 get_fcx_max_data(struct dasd_device *device)
+{
+ struct dasd_eckd_private *private = device->private;
+ int fcx_in_css, fcx_in_gneq, fcx_in_features;
+ unsigned int mdc;
+ int tpm;
+
+ if (dasd_nofcx)
+ return 0;
+ /* is transport mode supported? */
+ fcx_in_css = css_general_characteristics.fcx;
+ fcx_in_gneq = private->conf.gneq->reserved2[7] & 0x04;
+ fcx_in_features = private->features.feature[40] & 0x80;
+ tpm = fcx_in_css && fcx_in_gneq && fcx_in_features;
+
+ if (!tpm)
+ return 0;
+
+ mdc = ccw_device_get_mdc(device->cdev, 0);
+ if (mdc == 0) {
+ dev_warn(&device->cdev->dev, "Detecting the maximum supported data size for zHPF requests failed\n");
+ return 0;
+ } else {
+ return (u32)mdc * FCX_MAX_DATA_FACTOR;
+ }
+}
+
+static int verify_fcx_max_data(struct dasd_device *device, __u8 lpm)
+{
+ struct dasd_eckd_private *private = device->private;
+ unsigned int mdc;
+ u32 fcx_max_data;
+
+ if (private->fcx_max_data) {
+ mdc = ccw_device_get_mdc(device->cdev, lpm);
+ if (mdc == 0) {
+ dev_warn(&device->cdev->dev,
+ "Detecting the maximum data size for zHPF "
+ "requests failed (rc=%d) for a new path %x\n",
+ mdc, lpm);
+ return mdc;
+ }
+ fcx_max_data = (u32)mdc * FCX_MAX_DATA_FACTOR;
+ if (fcx_max_data < private->fcx_max_data) {
+ dev_warn(&device->cdev->dev,
+ "The maximum data size for zHPF requests %u "
+ "on a new path %x is below the active maximum "
+ "%u\n", fcx_max_data, lpm,
+ private->fcx_max_data);
+ return -EACCES;
+ }
+ }
+ return 0;
+}
+
+static int rebuild_device_uid(struct dasd_device *device,
+ struct pe_handler_work_data *data)
+{
+ struct dasd_eckd_private *private = device->private;
+ __u8 lpm, opm = dasd_path_get_opm(device);
+ int rc = -ENODEV;
+
+ for (lpm = 0x80; lpm; lpm >>= 1) {
+ if (!(lpm & opm))
+ continue;
+ memset(&data->rcd_buffer, 0, sizeof(data->rcd_buffer));
+ memset(&data->cqr, 0, sizeof(data->cqr));
+ data->cqr.cpaddr = &data->ccw;
+ rc = dasd_eckd_read_conf_immediately(device, &data->cqr,
+ data->rcd_buffer,
+ lpm);
+
+ if (rc) {
+ if (rc == -EOPNOTSUPP) /* -EOPNOTSUPP is ok */
+ continue;
+ DBF_EVENT_DEVID(DBF_WARNING, device->cdev,
+ "Read configuration data "
+ "returned error %d", rc);
+ break;
+ }
+ memcpy(private->conf.data, data->rcd_buffer,
+ DASD_ECKD_RCD_DATA_SIZE);
+ if (dasd_eckd_identify_conf_parts(&private->conf)) {
+ rc = -ENODEV;
+ } else /* first valid path is enough */
+ break;
+ }
+
+ if (!rc)
+ rc = dasd_eckd_generate_uid(device);
+
+ return rc;
+}
+
+static void dasd_eckd_path_available_action(struct dasd_device *device,
+ struct pe_handler_work_data *data)
+{
+ __u8 path_rcd_buf[DASD_ECKD_RCD_DATA_SIZE];
+ __u8 lpm, opm, npm, ppm, epm, hpfpm, cablepm;
+ struct dasd_conf_data *conf_data;
+ char print_uid[DASD_UID_STRLEN];
+ struct dasd_conf path_conf;
+ unsigned long flags;
+ int rc, pos;
+
+ opm = 0;
+ npm = 0;
+ ppm = 0;
+ epm = 0;
+ hpfpm = 0;
+ cablepm = 0;
+
+ for (lpm = 0x80; lpm; lpm >>= 1) {
+ if (!(lpm & data->tbvpm))
+ continue;
+ memset(&data->rcd_buffer, 0, sizeof(data->rcd_buffer));
+ memset(&data->cqr, 0, sizeof(data->cqr));
+ data->cqr.cpaddr = &data->ccw;
+ rc = dasd_eckd_read_conf_immediately(device, &data->cqr,
+ data->rcd_buffer,
+ lpm);
+ if (!rc) {
+ switch (dasd_eckd_path_access(data->rcd_buffer,
+ DASD_ECKD_RCD_DATA_SIZE)
+ ) {
+ case 0x02:
+ npm |= lpm;
+ break;
+ case 0x03:
+ ppm |= lpm;
+ break;
+ }
+ opm |= lpm;
+ } else if (rc == -EOPNOTSUPP) {
+ DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
+ "path verification: No configuration "
+ "data retrieved");
+ opm |= lpm;
+ } else if (rc == -EAGAIN) {
+ DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
+ "path verification: device is stopped,"
+ " try again later");
+ epm |= lpm;
+ } else {
+ dev_warn(&device->cdev->dev,
+ "Reading device feature codes failed "
+ "(rc=%d) for new path %x\n", rc, lpm);
+ continue;
+ }
+ if (verify_fcx_max_data(device, lpm)) {
+ opm &= ~lpm;
+ npm &= ~lpm;
+ ppm &= ~lpm;
+ hpfpm |= lpm;
+ continue;
+ }
+
+ /*
+ * save conf_data for comparison after
+ * rebuild_device_uid may have changed
+ * the original data
+ */
+ memcpy(&path_rcd_buf, data->rcd_buffer,
+ DASD_ECKD_RCD_DATA_SIZE);
+ path_conf.data = (void *)&path_rcd_buf;
+ path_conf.len = DASD_ECKD_RCD_DATA_SIZE;
+ if (dasd_eckd_identify_conf_parts(&path_conf)) {
+ path_conf.data = NULL;
+ path_conf.len = 0;
+ continue;
+ }
+
+ /*
+ * compare path UID with device UID only if at least
+ * one valid path is left
+ * in other case the device UID may have changed and
+ * the first working path UID will be used as device UID
+ */
+ if (dasd_path_get_opm(device) &&
+ dasd_eckd_compare_path_uid(device, &path_conf)) {
+ /*
+ * the comparison was not successful
+ * rebuild the device UID with at least one
+ * known path in case a z/VM hyperswap command
+ * has changed the device
+ *
+ * after this compare again
+ *
+ * if either the rebuild or the recompare fails
+ * the path can not be used
+ */
+ if (rebuild_device_uid(device, data) ||
+ dasd_eckd_compare_path_uid(
+ device, &path_conf)) {
+ dasd_eckd_get_uid_string(&path_conf, print_uid);
+ dev_err(&device->cdev->dev,
+ "The newly added channel path %02X "
+ "will not be used because it leads "
+ "to a different device %s\n",
+ lpm, print_uid);
+ opm &= ~lpm;
+ npm &= ~lpm;
+ ppm &= ~lpm;
+ cablepm |= lpm;
+ continue;
+ }
+ }
+
+ conf_data = kzalloc(DASD_ECKD_RCD_DATA_SIZE, GFP_KERNEL);
+ if (conf_data) {
+ memcpy(conf_data, data->rcd_buffer,
+ DASD_ECKD_RCD_DATA_SIZE);
+ } else {
+ /*
+ * path is operational but path config data could not
+ * be stored due to low mem condition
+ * add it to the error path mask and schedule a path
+ * verification later that this could be added again
+ */
+ epm |= lpm;
+ }
+ pos = pathmask_to_pos(lpm);
+ dasd_eckd_store_conf_data(device, conf_data, pos);
+
+ /*
+ * There is a small chance that a path is lost again between
+ * above path verification and the following modification of
+ * the device opm mask. We could avoid that race here by using
+ * yet another path mask, but we rather deal with this unlikely
+ * situation in dasd_start_IO.
+ */
+ spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
+ if (!dasd_path_get_opm(device) && opm) {
+ dasd_path_set_opm(device, opm);
+ dasd_generic_path_operational(device);
+ } else {
+ dasd_path_add_opm(device, opm);
+ }
+ dasd_path_add_nppm(device, npm);
+ dasd_path_add_ppm(device, ppm);
+ if (epm) {
+ dasd_path_add_tbvpm(device, epm);
+ dasd_device_set_timer(device, 50);
+ }
+ dasd_path_add_cablepm(device, cablepm);
+ dasd_path_add_nohpfpm(device, hpfpm);
+ spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
+
+ dasd_path_create_kobj(device, pos);
+ }
+}
+
+static void do_pe_handler_work(struct work_struct *work)
+{
+ struct pe_handler_work_data *data;
+ struct dasd_device *device;
+
+ data = container_of(work, struct pe_handler_work_data, worker);
+ device = data->device;
+
+ /* delay path verification until device was resumed */
+ if (test_bit(DASD_FLAG_SUSPENDED, &device->flags)) {
+ schedule_work(work);
+ return;
+ }
+ /* check if path verification already running and delay if so */
+ if (test_and_set_bit(DASD_FLAG_PATH_VERIFY, &device->flags)) {
+ schedule_work(work);
+ return;
+ }
+
+ if (data->tbvpm)
+ dasd_eckd_path_available_action(device, data);
+ if (data->fcsecpm)
+ dasd_eckd_read_fc_security(device);
+
+ clear_bit(DASD_FLAG_PATH_VERIFY, &device->flags);
+ dasd_put_device(device);
+ if (data->isglobal)
+ mutex_unlock(&dasd_pe_handler_mutex);
+ else
+ kfree(data);
+}
+
+static int dasd_eckd_pe_handler(struct dasd_device *device,
+ __u8 tbvpm, __u8 fcsecpm)
+{
+ struct pe_handler_work_data *data;
+
+ data = kzalloc(sizeof(*data), GFP_ATOMIC | GFP_DMA);
+ if (!data) {
+ if (mutex_trylock(&dasd_pe_handler_mutex)) {
+ data = pe_handler_worker;
+ data->isglobal = 1;
+ } else {
+ return -ENOMEM;
+ }
+ }
+ INIT_WORK(&data->worker, do_pe_handler_work);
+ dasd_get_device(device);
+ data->device = device;
+ data->tbvpm = tbvpm;
+ data->fcsecpm = fcsecpm;
+ schedule_work(&data->worker);
+ return 0;
+}
+
+static void dasd_eckd_reset_path(struct dasd_device *device, __u8 pm)
+{
+ struct dasd_eckd_private *private = device->private;
+ unsigned long flags;
+
+ if (!private->fcx_max_data)
+ private->fcx_max_data = get_fcx_max_data(device);
+ spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
+ dasd_path_set_tbvpm(device, pm ? : dasd_path_get_notoperpm(device));
+ dasd_schedule_device_bh(device);
+ spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
+}
+
+static int dasd_eckd_read_features(struct dasd_device *device)
+{
+ struct dasd_eckd_private *private = device->private;
+ struct dasd_psf_prssd_data *prssdp;
+ struct dasd_rssd_features *features;
+ struct dasd_ccw_req *cqr;
+ struct ccw1 *ccw;
+ int rc;
+
+ memset(&private->features, 0, sizeof(struct dasd_rssd_features));
+ cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ + 1 /* RSSD */,
+ (sizeof(struct dasd_psf_prssd_data) +
+ sizeof(struct dasd_rssd_features)),
+ device, NULL);
+ if (IS_ERR(cqr)) {
+ DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s", "Could not "
+ "allocate initialization request");
+ return PTR_ERR(cqr);
+ }
+ cqr->startdev = device;
+ cqr->memdev = device;
+ cqr->block = NULL;
+ cqr->retries = 256;
+ cqr->expires = 10 * HZ;
+
+ /* Prepare for Read Subsystem Data */
+ prssdp = (struct dasd_psf_prssd_data *) cqr->data;
+ memset(prssdp, 0, sizeof(struct dasd_psf_prssd_data));
+ prssdp->order = PSF_ORDER_PRSSD;
+ prssdp->suborder = 0x41; /* Read Feature Codes */
+ /* all other bytes of prssdp must be zero */
+
+ ccw = cqr->cpaddr;
+ ccw->cmd_code = DASD_ECKD_CCW_PSF;
+ ccw->count = sizeof(struct dasd_psf_prssd_data);
+ ccw->flags |= CCW_FLAG_CC;
+ ccw->cda = (__u32)virt_to_phys(prssdp);
+
+ /* Read Subsystem Data - feature codes */
+ features = (struct dasd_rssd_features *) (prssdp + 1);
+ memset(features, 0, sizeof(struct dasd_rssd_features));
+
+ ccw++;
+ ccw->cmd_code = DASD_ECKD_CCW_RSSD;
+ ccw->count = sizeof(struct dasd_rssd_features);
+ ccw->cda = (__u32)virt_to_phys(features);
+
+ cqr->buildclk = get_tod_clock();
+ cqr->status = DASD_CQR_FILLED;
+ rc = dasd_sleep_on(cqr);
+ if (rc == 0) {
+ prssdp = (struct dasd_psf_prssd_data *) cqr->data;
+ features = (struct dasd_rssd_features *) (prssdp + 1);
+ memcpy(&private->features, features,
+ sizeof(struct dasd_rssd_features));
+ } else
+ dev_warn(&device->cdev->dev, "Reading device feature codes"
+ " failed with rc=%d\n", rc);
+ dasd_sfree_request(cqr, cqr->memdev);
+ return rc;
+}
+
+/* Read Volume Information - Volume Storage Query */
+static int dasd_eckd_read_vol_info(struct dasd_device *device)
+{
+ struct dasd_eckd_private *private = device->private;
+ struct dasd_psf_prssd_data *prssdp;
+ struct dasd_rssd_vsq *vsq;
+ struct dasd_ccw_req *cqr;
+ struct ccw1 *ccw;
+ int useglobal;
+ int rc;
+
+ /* This command cannot be executed on an alias device */
+ if (private->uid.type == UA_BASE_PAV_ALIAS ||
+ private->uid.type == UA_HYPER_PAV_ALIAS)
+ return 0;
+
+ useglobal = 0;
+ cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 2 /* PSF + RSSD */,
+ sizeof(*prssdp) + sizeof(*vsq), device, NULL);
+ if (IS_ERR(cqr)) {
+ DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
+ "Could not allocate initialization request");
+ mutex_lock(&dasd_vol_info_mutex);
+ useglobal = 1;
+ cqr = &dasd_vol_info_req->cqr;
+ memset(cqr, 0, sizeof(*cqr));
+ memset(dasd_vol_info_req, 0, sizeof(*dasd_vol_info_req));
+ cqr->cpaddr = &dasd_vol_info_req->ccw;
+ cqr->data = &dasd_vol_info_req->data;
+ cqr->magic = DASD_ECKD_MAGIC;
+ }
+
+ /* Prepare for Read Subsystem Data */
+ prssdp = cqr->data;
+ prssdp->order = PSF_ORDER_PRSSD;
+ prssdp->suborder = PSF_SUBORDER_VSQ; /* Volume Storage Query */
+ prssdp->lss = private->conf.ned->ID;
+ prssdp->volume = private->conf.ned->unit_addr;
+
+ ccw = cqr->cpaddr;
+ ccw->cmd_code = DASD_ECKD_CCW_PSF;
+ ccw->count = sizeof(*prssdp);
+ ccw->flags |= CCW_FLAG_CC;
+ ccw->cda = (__u32)virt_to_phys(prssdp);
+
+ /* Read Subsystem Data - Volume Storage Query */
+ vsq = (struct dasd_rssd_vsq *)(prssdp + 1);
+ memset(vsq, 0, sizeof(*vsq));
+
+ ccw++;
+ ccw->cmd_code = DASD_ECKD_CCW_RSSD;
+ ccw->count = sizeof(*vsq);
+ ccw->flags |= CCW_FLAG_SLI;
+ ccw->cda = (__u32)virt_to_phys(vsq);
+
+ cqr->buildclk = get_tod_clock();
+ cqr->status = DASD_CQR_FILLED;
+ cqr->startdev = device;
+ cqr->memdev = device;
+ cqr->block = NULL;
+ cqr->retries = 256;
+ cqr->expires = device->default_expires * HZ;
+ /* The command might not be supported. Suppress the error output */
+ __set_bit(DASD_CQR_SUPPRESS_CR, &cqr->flags);
+
+ rc = dasd_sleep_on_interruptible(cqr);
+ if (rc == 0) {
+ memcpy(&private->vsq, vsq, sizeof(*vsq));
+ } else {
+ DBF_EVENT_DEVID(DBF_WARNING, device->cdev,
+ "Reading the volume storage information failed with rc=%d", rc);
+ }
+
+ if (useglobal)
+ mutex_unlock(&dasd_vol_info_mutex);
+ else
+ dasd_sfree_request(cqr, cqr->memdev);
+
+ return rc;
+}
+
+static int dasd_eckd_is_ese(struct dasd_device *device)
+{
+ struct dasd_eckd_private *private = device->private;
+
+ return private->vsq.vol_info.ese;
+}
+
+static int dasd_eckd_ext_pool_id(struct dasd_device *device)
+{
+ struct dasd_eckd_private *private = device->private;
+
+ return private->vsq.extent_pool_id;
+}
+
+/*
+ * This value represents the total amount of available space. As more space is
+ * allocated by ESE volumes, this value will decrease.
+ * The data for this value is therefore updated on any call.
+ */
+static int dasd_eckd_space_configured(struct dasd_device *device)
+{
+ struct dasd_eckd_private *private = device->private;
+ int rc;
+
+ rc = dasd_eckd_read_vol_info(device);
+
+ return rc ? : private->vsq.space_configured;
+}
+
+/*
+ * The value of space allocated by an ESE volume may have changed and is
+ * therefore updated on any call.
+ */
+static int dasd_eckd_space_allocated(struct dasd_device *device)
+{
+ struct dasd_eckd_private *private = device->private;
+ int rc;
+
+ rc = dasd_eckd_read_vol_info(device);
+
+ return rc ? : private->vsq.space_allocated;
+}
+
+static int dasd_eckd_logical_capacity(struct dasd_device *device)
+{
+ struct dasd_eckd_private *private = device->private;
+
+ return private->vsq.logical_capacity;
+}
+
+static void dasd_eckd_ext_pool_exhaust_work(struct work_struct *work)
+{
+ struct ext_pool_exhaust_work_data *data;
+ struct dasd_device *device;
+ struct dasd_device *base;
+
+ data = container_of(work, struct ext_pool_exhaust_work_data, worker);
+ device = data->device;
+ base = data->base;
+
+ if (!base)
+ base = device;
+ if (dasd_eckd_space_configured(base) != 0) {
+ dasd_generic_space_avail(device);
+ } else {
+ dev_warn(&device->cdev->dev, "No space left in the extent pool\n");
+ DBF_DEV_EVENT(DBF_WARNING, device, "%s", "out of space");
+ }
+
+ dasd_put_device(device);
+ kfree(data);
+}
+
+static int dasd_eckd_ext_pool_exhaust(struct dasd_device *device,
+ struct dasd_ccw_req *cqr)
+{
+ struct ext_pool_exhaust_work_data *data;
+
+ data = kzalloc(sizeof(*data), GFP_ATOMIC);
+ if (!data)
+ return -ENOMEM;
+ INIT_WORK(&data->worker, dasd_eckd_ext_pool_exhaust_work);
+ dasd_get_device(device);
+ data->device = device;
+
+ if (cqr->block)
+ data->base = cqr->block->base;
+ else if (cqr->basedev)
+ data->base = cqr->basedev;
+ else
+ data->base = NULL;
+
+ schedule_work(&data->worker);
+
+ return 0;
+}
+
+static void dasd_eckd_cpy_ext_pool_data(struct dasd_device *device,
+ struct dasd_rssd_lcq *lcq)
+{
+ struct dasd_eckd_private *private = device->private;
+ int pool_id = dasd_eckd_ext_pool_id(device);
+ struct dasd_ext_pool_sum eps;
+ int i;
+
+ for (i = 0; i < lcq->pool_count; i++) {
+ eps = lcq->ext_pool_sum[i];
+ if (eps.pool_id == pool_id) {
+ memcpy(&private->eps, &eps,
+ sizeof(struct dasd_ext_pool_sum));
+ }
+ }
+}
+
+/* Read Extent Pool Information - Logical Configuration Query */
+static int dasd_eckd_read_ext_pool_info(struct dasd_device *device)
+{
+ struct dasd_eckd_private *private = device->private;
+ struct dasd_psf_prssd_data *prssdp;
+ struct dasd_rssd_lcq *lcq;
+ struct dasd_ccw_req *cqr;
+ struct ccw1 *ccw;
+ int rc;
+
+ /* This command cannot be executed on an alias device */
+ if (private->uid.type == UA_BASE_PAV_ALIAS ||
+ private->uid.type == UA_HYPER_PAV_ALIAS)
+ return 0;
+
+ cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 2 /* PSF + RSSD */,
+ sizeof(*prssdp) + sizeof(*lcq), device, NULL);
+ if (IS_ERR(cqr)) {
+ DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
+ "Could not allocate initialization request");
+ return PTR_ERR(cqr);
+ }
+
+ /* Prepare for Read Subsystem Data */
+ prssdp = cqr->data;
+ memset(prssdp, 0, sizeof(*prssdp));
+ prssdp->order = PSF_ORDER_PRSSD;
+ prssdp->suborder = PSF_SUBORDER_LCQ; /* Logical Configuration Query */
+
+ ccw = cqr->cpaddr;
+ ccw->cmd_code = DASD_ECKD_CCW_PSF;
+ ccw->count = sizeof(*prssdp);
+ ccw->flags |= CCW_FLAG_CC;
+ ccw->cda = (__u32)virt_to_phys(prssdp);
+
+ lcq = (struct dasd_rssd_lcq *)(prssdp + 1);
+ memset(lcq, 0, sizeof(*lcq));
+
+ ccw++;
+ ccw->cmd_code = DASD_ECKD_CCW_RSSD;
+ ccw->count = sizeof(*lcq);
+ ccw->flags |= CCW_FLAG_SLI;
+ ccw->cda = (__u32)virt_to_phys(lcq);
+
+ cqr->buildclk = get_tod_clock();
+ cqr->status = DASD_CQR_FILLED;
+ cqr->startdev = device;
+ cqr->memdev = device;
+ cqr->block = NULL;
+ cqr->retries = 256;
+ cqr->expires = device->default_expires * HZ;
+ /* The command might not be supported. Suppress the error output */
+ __set_bit(DASD_CQR_SUPPRESS_CR, &cqr->flags);
+
+ rc = dasd_sleep_on_interruptible(cqr);
+ if (rc == 0) {
+ dasd_eckd_cpy_ext_pool_data(device, lcq);
+ } else {
+ DBF_EVENT_DEVID(DBF_WARNING, device->cdev,
+ "Reading the logical configuration failed with rc=%d", rc);
+ }
+
+ dasd_sfree_request(cqr, cqr->memdev);
+
+ return rc;
+}
+
+/*
+ * Depending on the device type, the extent size is specified either as
+ * cylinders per extent (CKD) or size per extent (FBA)
+ * A 1GB size corresponds to 1113cyl, and 16MB to 21cyl.
+ */
+static int dasd_eckd_ext_size(struct dasd_device *device)
+{
+ struct dasd_eckd_private *private = device->private;
+ struct dasd_ext_pool_sum eps = private->eps;
+
+ if (!eps.flags.extent_size_valid)
+ return 0;
+ if (eps.extent_size.size_1G)
+ return 1113;
+ if (eps.extent_size.size_16M)
+ return 21;
+
+ return 0;
+}
+
+static int dasd_eckd_ext_pool_warn_thrshld(struct dasd_device *device)
+{
+ struct dasd_eckd_private *private = device->private;
+
+ return private->eps.warn_thrshld;
+}
+
+static int dasd_eckd_ext_pool_cap_at_warnlevel(struct dasd_device *device)
+{
+ struct dasd_eckd_private *private = device->private;
+
+ return private->eps.flags.capacity_at_warnlevel;
+}
+
+/*
+ * Extent Pool out of space
+ */
+static int dasd_eckd_ext_pool_oos(struct dasd_device *device)
+{
+ struct dasd_eckd_private *private = device->private;
+
+ return private->eps.flags.pool_oos;
+}
+
+/*
+ * Build CP for Perform Subsystem Function - SSC.
+ */
+static struct dasd_ccw_req *dasd_eckd_build_psf_ssc(struct dasd_device *device,
+ int enable_pav)
+{
+ struct dasd_ccw_req *cqr;
+ struct dasd_psf_ssc_data *psf_ssc_data;
+ struct ccw1 *ccw;
+
+ cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ ,
+ sizeof(struct dasd_psf_ssc_data),
+ device, NULL);
+
+ if (IS_ERR(cqr)) {
+ DBF_DEV_EVENT(DBF_WARNING, device, "%s",
+ "Could not allocate PSF-SSC request");
+ return cqr;
+ }
+ psf_ssc_data = (struct dasd_psf_ssc_data *)cqr->data;
+ psf_ssc_data->order = PSF_ORDER_SSC;
+ psf_ssc_data->suborder = 0xc0;
+ if (enable_pav) {
+ psf_ssc_data->suborder |= 0x08;
+ psf_ssc_data->reserved[0] = 0x88;
+ }
+ ccw = cqr->cpaddr;
+ ccw->cmd_code = DASD_ECKD_CCW_PSF;
+ ccw->cda = (__u32)virt_to_phys(psf_ssc_data);
+ ccw->count = 66;
+
+ cqr->startdev = device;
+ cqr->memdev = device;
+ cqr->block = NULL;
+ cqr->retries = 256;
+ cqr->expires = 10*HZ;
+ cqr->buildclk = get_tod_clock();
+ cqr->status = DASD_CQR_FILLED;
+ return cqr;
+}
+
+/*
+ * Perform Subsystem Function.
+ * It is necessary to trigger CIO for channel revalidation since this
+ * call might change behaviour of DASD devices.
+ */
+static int
+dasd_eckd_psf_ssc(struct dasd_device *device, int enable_pav,
+ unsigned long flags)
+{
+ struct dasd_ccw_req *cqr;
+ int rc;
+
+ cqr = dasd_eckd_build_psf_ssc(device, enable_pav);
+ if (IS_ERR(cqr))
+ return PTR_ERR(cqr);
+
+ /*
+ * set flags e.g. turn on failfast, to prevent blocking
+ * the calling function should handle failed requests
+ */
+ cqr->flags |= flags;
+
+ rc = dasd_sleep_on(cqr);
+ if (!rc)
+ /* trigger CIO to reprobe devices */
+ css_schedule_reprobe();
+ else if (cqr->intrc == -EAGAIN)
+ rc = -EAGAIN;
+
+ dasd_sfree_request(cqr, cqr->memdev);
+ return rc;
+}
+
+/*
+ * Valide storage server of current device.
+ */
+static int dasd_eckd_validate_server(struct dasd_device *device,
+ unsigned long flags)
+{
+ struct dasd_eckd_private *private = device->private;
+ int enable_pav, rc;
+
+ if (private->uid.type == UA_BASE_PAV_ALIAS ||
+ private->uid.type == UA_HYPER_PAV_ALIAS)
+ return 0;
+ if (dasd_nopav || MACHINE_IS_VM)
+ enable_pav = 0;
+ else
+ enable_pav = 1;
+ rc = dasd_eckd_psf_ssc(device, enable_pav, flags);
+
+ /* may be requested feature is not available on server,
+ * therefore just report error and go ahead */
+ DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "PSF-SSC for SSID %04x "
+ "returned rc=%d", private->uid.ssid, rc);
+ return rc;
+}
+
+/*
+ * worker to do a validate server in case of a lost pathgroup
+ */
+static void dasd_eckd_do_validate_server(struct work_struct *work)
+{
+ struct dasd_device *device = container_of(work, struct dasd_device,
+ kick_validate);
+ unsigned long flags = 0;
+
+ set_bit(DASD_CQR_FLAGS_FAILFAST, &flags);
+ if (dasd_eckd_validate_server(device, flags)
+ == -EAGAIN) {
+ /* schedule worker again if failed */
+ schedule_work(&device->kick_validate);
+ return;
+ }
+
+ dasd_put_device(device);
+}
+
+static void dasd_eckd_kick_validate_server(struct dasd_device *device)
+{
+ dasd_get_device(device);
+ /* exit if device not online or in offline processing */
+ if (test_bit(DASD_FLAG_OFFLINE, &device->flags) ||
+ device->state < DASD_STATE_ONLINE) {
+ dasd_put_device(device);
+ return;
+ }
+ /* queue call to do_validate_server to the kernel event daemon. */
+ if (!schedule_work(&device->kick_validate))
+ dasd_put_device(device);
+}
+
+/*
+ * return if the device is the copy relation primary if a copy relation is active
+ */
+static int dasd_device_is_primary(struct dasd_device *device)
+{
+ if (!device->copy)
+ return 1;
+
+ if (device->copy->active->device == device)
+ return 1;
+
+ return 0;
+}
+
+static int dasd_eckd_alloc_block(struct dasd_device *device)
+{
+ struct dasd_block *block;
+ struct dasd_uid temp_uid;
+
+ if (!dasd_device_is_primary(device))
+ return 0;
+
+ dasd_eckd_get_uid(device, &temp_uid);
+ if (temp_uid.type == UA_BASE_DEVICE) {
+ block = dasd_alloc_block();
+ if (IS_ERR(block)) {
+ DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
+ "could not allocate dasd block structure");
+ return PTR_ERR(block);
+ }
+ device->block = block;
+ block->base = device;
+ }
+ return 0;
+}
+
+static bool dasd_eckd_pprc_enabled(struct dasd_device *device)
+{
+ struct dasd_eckd_private *private = device->private;
+
+ return private->rdc_data.facilities.PPRC_enabled;
+}
+
+/*
+ * Check device characteristics.
+ * If the device is accessible using ECKD discipline, the device is enabled.
+ */
+static int
+dasd_eckd_check_characteristics(struct dasd_device *device)
+{
+ struct dasd_eckd_private *private = device->private;
+ int rc, i;
+ int readonly;
+ unsigned long value;
+
+ /* setup work queue for validate server*/
+ INIT_WORK(&device->kick_validate, dasd_eckd_do_validate_server);
+ /* setup work queue for summary unit check */
+ INIT_WORK(&device->suc_work, dasd_alias_handle_summary_unit_check);
+
+ if (!ccw_device_is_pathgroup(device->cdev)) {
+ dev_warn(&device->cdev->dev,
+ "A channel path group could not be established\n");
+ return -EIO;
+ }
+ if (!ccw_device_is_multipath(device->cdev)) {
+ dev_info(&device->cdev->dev,
+ "The DASD is not operating in multipath mode\n");
+ }
+ if (!private) {
+ private = kzalloc(sizeof(*private), GFP_KERNEL | GFP_DMA);
+ if (!private) {
+ dev_warn(&device->cdev->dev,
+ "Allocating memory for private DASD data "
+ "failed\n");
+ return -ENOMEM;
+ }
+ device->private = private;
+ } else {
+ memset(private, 0, sizeof(*private));
+ }
+ /* Invalidate status of initial analysis. */
+ private->init_cqr_status = -1;
+ /* Set default cache operations. */
+ private->attrib.operation = DASD_NORMAL_CACHE;
+ private->attrib.nr_cyl = 0;
+
+ /* Read Configuration Data */
+ rc = dasd_eckd_read_conf(device);
+ if (rc)
+ goto out_err1;
+
+ /* set some default values */
+ device->default_expires = DASD_EXPIRES;
+ device->default_retries = DASD_RETRIES;
+ device->path_thrhld = DASD_ECKD_PATH_THRHLD;
+ device->path_interval = DASD_ECKD_PATH_INTERVAL;
+ device->aq_timeouts = DASD_RETRIES_MAX;
+
+ if (private->conf.gneq) {
+ value = 1;
+ for (i = 0; i < private->conf.gneq->timeout.value; i++)
+ value = 10 * value;
+ value = value * private->conf.gneq->timeout.number;
+ /* do not accept useless values */
+ if (value != 0 && value <= DASD_EXPIRES_MAX)
+ device->default_expires = value;
+ }
+
+ /* Read Device Characteristics */
+ rc = dasd_generic_read_dev_chars(device, DASD_ECKD_MAGIC,
+ &private->rdc_data, 64);
+ if (rc) {
+ DBF_EVENT_DEVID(DBF_WARNING, device->cdev,
+ "Read device characteristic failed, rc=%d", rc);
+ goto out_err1;
+ }
+
+ /* setup PPRC for device from devmap */
+ rc = dasd_devmap_set_device_copy_relation(device->cdev,
+ dasd_eckd_pprc_enabled(device));
+ if (rc) {
+ DBF_EVENT_DEVID(DBF_WARNING, device->cdev,
+ "copy relation setup failed, rc=%d", rc);
+ goto out_err1;
+ }
+
+ /* check if block device is needed and allocate in case */
+ rc = dasd_eckd_alloc_block(device);
+ if (rc)
+ goto out_err1;
+
+ /* register lcu with alias handling, enable PAV */
+ rc = dasd_alias_make_device_known_to_lcu(device);
+ if (rc)
+ goto out_err2;
+
+ dasd_eckd_validate_server(device, 0);
+
+ /* device may report different configuration data after LCU setup */
+ rc = dasd_eckd_read_conf(device);
+ if (rc)
+ goto out_err3;
+
+ dasd_eckd_read_fc_security(device);
+ dasd_path_create_kobjects(device);
+
+ /* Read Feature Codes */
+ dasd_eckd_read_features(device);
+
+ /* Read Volume Information */
+ dasd_eckd_read_vol_info(device);
+
+ /* Read Extent Pool Information */
+ dasd_eckd_read_ext_pool_info(device);
+
+ if ((device->features & DASD_FEATURE_USERAW) &&
+ !(private->rdc_data.facilities.RT_in_LR)) {
+ dev_err(&device->cdev->dev, "The storage server does not "
+ "support raw-track access\n");
+ rc = -EINVAL;
+ goto out_err3;
+ }
+
+ /* find the valid cylinder size */
+ if (private->rdc_data.no_cyl == LV_COMPAT_CYL &&
+ private->rdc_data.long_no_cyl)
+ private->real_cyl = private->rdc_data.long_no_cyl;
+ else
+ private->real_cyl = private->rdc_data.no_cyl;
+
+ private->fcx_max_data = get_fcx_max_data(device);
+
+ readonly = dasd_device_is_ro(device);
+ if (readonly)
+ set_bit(DASD_FLAG_DEVICE_RO, &device->flags);
+
+ dev_info(&device->cdev->dev, "New DASD %04X/%02X (CU %04X/%02X) "
+ "with %d cylinders, %d heads, %d sectors%s\n",
+ private->rdc_data.dev_type,
+ private->rdc_data.dev_model,
+ private->rdc_data.cu_type,
+ private->rdc_data.cu_model.model,
+ private->real_cyl,
+ private->rdc_data.trk_per_cyl,
+ private->rdc_data.sec_per_trk,
+ readonly ? ", read-only device" : "");
+ return 0;
+
+out_err3:
+ dasd_alias_disconnect_device_from_lcu(device);
+out_err2:
+ dasd_free_block(device->block);
+ device->block = NULL;
+out_err1:
+ dasd_eckd_clear_conf_data(device);
+ dasd_path_remove_kobjects(device);
+ kfree(device->private);
+ device->private = NULL;
+ return rc;
+}
+
+static void dasd_eckd_uncheck_device(struct dasd_device *device)
+{
+ struct dasd_eckd_private *private = device->private;
+
+ if (!private)
+ return;
+
+ dasd_alias_disconnect_device_from_lcu(device);
+ private->conf.ned = NULL;
+ private->conf.sneq = NULL;
+ private->conf.vdsneq = NULL;
+ private->conf.gneq = NULL;
+ dasd_eckd_clear_conf_data(device);
+ dasd_path_remove_kobjects(device);
+}
+
+static struct dasd_ccw_req *
+dasd_eckd_analysis_ccw(struct dasd_device *device)
+{
+ struct dasd_eckd_private *private = device->private;
+ struct eckd_count *count_data;
+ struct LO_eckd_data *LO_data;
+ struct dasd_ccw_req *cqr;
+ struct ccw1 *ccw;
+ int cplength, datasize;
+ int i;
+
+ cplength = 8;
+ datasize = sizeof(struct DE_eckd_data) + 2*sizeof(struct LO_eckd_data);
+ cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength, datasize, device,
+ NULL);
+ if (IS_ERR(cqr))
+ return cqr;
+ ccw = cqr->cpaddr;
+ /* Define extent for the first 2 tracks. */
+ define_extent(ccw++, cqr->data, 0, 1,
+ DASD_ECKD_CCW_READ_COUNT, device, 0);
+ LO_data = cqr->data + sizeof(struct DE_eckd_data);
+ /* Locate record for the first 4 records on track 0. */
+ ccw[-1].flags |= CCW_FLAG_CC;
+ locate_record(ccw++, LO_data++, 0, 0, 4,
+ DASD_ECKD_CCW_READ_COUNT, device, 0);
+
+ count_data = private->count_area;
+ for (i = 0; i < 4; i++) {
+ ccw[-1].flags |= CCW_FLAG_CC;
+ ccw->cmd_code = DASD_ECKD_CCW_READ_COUNT;
+ ccw->flags = 0;
+ ccw->count = 8;
+ ccw->cda = (__u32)virt_to_phys(count_data);
+ ccw++;
+ count_data++;
+ }
+
+ /* Locate record for the first record on track 1. */
+ ccw[-1].flags |= CCW_FLAG_CC;
+ locate_record(ccw++, LO_data++, 1, 0, 1,
+ DASD_ECKD_CCW_READ_COUNT, device, 0);
+ /* Read count ccw. */
+ ccw[-1].flags |= CCW_FLAG_CC;
+ ccw->cmd_code = DASD_ECKD_CCW_READ_COUNT;
+ ccw->flags = 0;
+ ccw->count = 8;
+ ccw->cda = (__u32)virt_to_phys(count_data);
+
+ cqr->block = NULL;
+ cqr->startdev = device;
+ cqr->memdev = device;
+ cqr->retries = 255;
+ cqr->buildclk = get_tod_clock();
+ cqr->status = DASD_CQR_FILLED;
+ /* Set flags to suppress output for expected errors */
+ set_bit(DASD_CQR_SUPPRESS_NRF, &cqr->flags);
+
+ return cqr;
+}
+
+/* differentiate between 'no record found' and any other error */
+static int dasd_eckd_analysis_evaluation(struct dasd_ccw_req *init_cqr)
+{
+ char *sense;
+ if (init_cqr->status == DASD_CQR_DONE)
+ return INIT_CQR_OK;
+ else if (init_cqr->status == DASD_CQR_NEED_ERP ||
+ init_cqr->status == DASD_CQR_FAILED) {
+ sense = dasd_get_sense(&init_cqr->irb);
+ if (sense && (sense[1] & SNS1_NO_REC_FOUND))
+ return INIT_CQR_UNFORMATTED;
+ else
+ return INIT_CQR_ERROR;
+ } else
+ return INIT_CQR_ERROR;
+}
+
+/*
+ * This is the callback function for the init_analysis cqr. It saves
+ * the status of the initial analysis ccw before it frees it and kicks
+ * the device to continue the startup sequence. This will call
+ * dasd_eckd_do_analysis again (if the devices has not been marked
+ * for deletion in the meantime).
+ */
+static void dasd_eckd_analysis_callback(struct dasd_ccw_req *init_cqr,
+ void *data)
+{
+ struct dasd_device *device = init_cqr->startdev;
+ struct dasd_eckd_private *private = device->private;
+
+ private->init_cqr_status = dasd_eckd_analysis_evaluation(init_cqr);
+ dasd_sfree_request(init_cqr, device);
+ dasd_kick_device(device);
+}
+
+static int dasd_eckd_start_analysis(struct dasd_block *block)
+{
+ struct dasd_ccw_req *init_cqr;
+
+ init_cqr = dasd_eckd_analysis_ccw(block->base);
+ if (IS_ERR(init_cqr))
+ return PTR_ERR(init_cqr);
+ init_cqr->callback = dasd_eckd_analysis_callback;
+ init_cqr->callback_data = NULL;
+ init_cqr->expires = 5*HZ;
+ /* first try without ERP, so we can later handle unformatted
+ * devices as special case
+ */
+ clear_bit(DASD_CQR_FLAGS_USE_ERP, &init_cqr->flags);
+ init_cqr->retries = 0;
+ dasd_add_request_head(init_cqr);
+ return -EAGAIN;
+}
+
+static int dasd_eckd_end_analysis(struct dasd_block *block)
+{
+ struct dasd_device *device = block->base;
+ struct dasd_eckd_private *private = device->private;
+ struct eckd_count *count_area;
+ unsigned int sb, blk_per_trk;
+ int status, i;
+ struct dasd_ccw_req *init_cqr;
+
+ status = private->init_cqr_status;
+ private->init_cqr_status = -1;
+ if (status == INIT_CQR_ERROR) {
+ /* try again, this time with full ERP */
+ init_cqr = dasd_eckd_analysis_ccw(device);
+ dasd_sleep_on(init_cqr);
+ status = dasd_eckd_analysis_evaluation(init_cqr);
+ dasd_sfree_request(init_cqr, device);
+ }
+
+ if (device->features & DASD_FEATURE_USERAW) {
+ block->bp_block = DASD_RAW_BLOCKSIZE;
+ blk_per_trk = DASD_RAW_BLOCK_PER_TRACK;
+ block->s2b_shift = 3;
+ goto raw;
+ }
+
+ if (status == INIT_CQR_UNFORMATTED) {
+ dev_warn(&device->cdev->dev, "The DASD is not formatted\n");
+ return -EMEDIUMTYPE;
+ } else if (status == INIT_CQR_ERROR) {
+ dev_err(&device->cdev->dev,
+ "Detecting the DASD disk layout failed because "
+ "of an I/O error\n");
+ return -EIO;
+ }
+
+ private->uses_cdl = 1;
+ /* Check Track 0 for Compatible Disk Layout */
+ count_area = NULL;
+ for (i = 0; i < 3; i++) {
+ if (private->count_area[i].kl != 4 ||
+ private->count_area[i].dl != dasd_eckd_cdl_reclen(i) - 4 ||
+ private->count_area[i].cyl != 0 ||
+ private->count_area[i].head != count_area_head[i] ||
+ private->count_area[i].record != count_area_rec[i]) {
+ private->uses_cdl = 0;
+ break;
+ }
+ }
+ if (i == 3)
+ count_area = &private->count_area[3];
+
+ if (private->uses_cdl == 0) {
+ for (i = 0; i < 5; i++) {
+ if ((private->count_area[i].kl != 0) ||
+ (private->count_area[i].dl !=
+ private->count_area[0].dl) ||
+ private->count_area[i].cyl != 0 ||
+ private->count_area[i].head != count_area_head[i] ||
+ private->count_area[i].record != count_area_rec[i])
+ break;
+ }
+ if (i == 5)
+ count_area = &private->count_area[0];
+ } else {
+ if (private->count_area[3].record == 1)
+ dev_warn(&device->cdev->dev,
+ "Track 0 has no records following the VTOC\n");
+ }
+
+ if (count_area != NULL && count_area->kl == 0) {
+ /* we found notthing violating our disk layout */
+ if (dasd_check_blocksize(count_area->dl) == 0)
+ block->bp_block = count_area->dl;
+ }
+ if (block->bp_block == 0) {
+ dev_warn(&device->cdev->dev,
+ "The disk layout of the DASD is not supported\n");
+ return -EMEDIUMTYPE;
+ }
+ block->s2b_shift = 0; /* bits to shift 512 to get a block */
+ for (sb = 512; sb < block->bp_block; sb = sb << 1)
+ block->s2b_shift++;
+
+ blk_per_trk = recs_per_track(&private->rdc_data, 0, block->bp_block);
+
+raw:
+ block->blocks = ((unsigned long) private->real_cyl *
+ private->rdc_data.trk_per_cyl *
+ blk_per_trk);
+
+ dev_info(&device->cdev->dev,
+ "DASD with %u KB/block, %lu KB total size, %u KB/track, "
+ "%s\n", (block->bp_block >> 10),
+ (((unsigned long) private->real_cyl *
+ private->rdc_data.trk_per_cyl *
+ blk_per_trk * (block->bp_block >> 9)) >> 1),
+ ((blk_per_trk * block->bp_block) >> 10),
+ private->uses_cdl ?
+ "compatible disk layout" : "linux disk layout");
+
+ return 0;
+}
+
+static int dasd_eckd_do_analysis(struct dasd_block *block)
+{
+ struct dasd_eckd_private *private = block->base->private;
+
+ if (private->init_cqr_status < 0)
+ return dasd_eckd_start_analysis(block);
+ else
+ return dasd_eckd_end_analysis(block);
+}
+
+static int dasd_eckd_basic_to_ready(struct dasd_device *device)
+{
+ return dasd_alias_add_device(device);
+};
+
+static int dasd_eckd_online_to_ready(struct dasd_device *device)
+{
+ if (cancel_work_sync(&device->reload_device))
+ dasd_put_device(device);
+ if (cancel_work_sync(&device->kick_validate))
+ dasd_put_device(device);
+
+ return 0;
+};
+
+static int dasd_eckd_basic_to_known(struct dasd_device *device)
+{
+ return dasd_alias_remove_device(device);
+};
+
+static int
+dasd_eckd_fill_geometry(struct dasd_block *block, struct hd_geometry *geo)
+{
+ struct dasd_eckd_private *private = block->base->private;
+
+ if (dasd_check_blocksize(block->bp_block) == 0) {
+ geo->sectors = recs_per_track(&private->rdc_data,
+ 0, block->bp_block);
+ }
+ geo->cylinders = private->rdc_data.no_cyl;
+ geo->heads = private->rdc_data.trk_per_cyl;
+ return 0;
+}
+
+/*
+ * Build the TCW request for the format check
+ */
+static struct dasd_ccw_req *
+dasd_eckd_build_check_tcw(struct dasd_device *base, struct format_data_t *fdata,
+ int enable_pav, struct eckd_count *fmt_buffer,
+ int rpt)
+{
+ struct dasd_eckd_private *start_priv;
+ struct dasd_device *startdev = NULL;
+ struct tidaw *last_tidaw = NULL;
+ struct dasd_ccw_req *cqr;
+ struct itcw *itcw;
+ int itcw_size;
+ int count;
+ int rc;
+ int i;
+
+ if (enable_pav)
+ startdev = dasd_alias_get_start_dev(base);
+
+ if (!startdev)
+ startdev = base;
+
+ start_priv = startdev->private;
+
+ count = rpt * (fdata->stop_unit - fdata->start_unit + 1);
+
+ /*
+ * we're adding 'count' amount of tidaw to the itcw.
+ * calculate the corresponding itcw_size
+ */
+ itcw_size = itcw_calc_size(0, count, 0);
+
+ cqr = dasd_fmalloc_request(DASD_ECKD_MAGIC, 0, itcw_size, startdev);
+ if (IS_ERR(cqr))
+ return cqr;
+
+ start_priv->count++;
+
+ itcw = itcw_init(cqr->data, itcw_size, ITCW_OP_READ, 0, count, 0);
+ if (IS_ERR(itcw)) {
+ rc = -EINVAL;
+ goto out_err;
+ }
+
+ cqr->cpaddr = itcw_get_tcw(itcw);
+ rc = prepare_itcw(itcw, fdata->start_unit, fdata->stop_unit,
+ DASD_ECKD_CCW_READ_COUNT_MT, base, startdev, 0, count,
+ sizeof(struct eckd_count),
+ count * sizeof(struct eckd_count), 0, rpt);
+ if (rc)
+ goto out_err;
+
+ for (i = 0; i < count; i++) {
+ last_tidaw = itcw_add_tidaw(itcw, 0, fmt_buffer++,
+ sizeof(struct eckd_count));
+ if (IS_ERR(last_tidaw)) {
+ rc = -EINVAL;
+ goto out_err;
+ }
+ }
+
+ last_tidaw->flags |= TIDAW_FLAGS_LAST;
+ itcw_finalize(itcw);
+
+ cqr->cpmode = 1;
+ cqr->startdev = startdev;
+ cqr->memdev = startdev;
+ cqr->basedev = base;
+ cqr->retries = startdev->default_retries;
+ cqr->expires = startdev->default_expires * HZ;
+ cqr->buildclk = get_tod_clock();
+ cqr->status = DASD_CQR_FILLED;
+ /* Set flags to suppress output for expected errors */
+ set_bit(DASD_CQR_SUPPRESS_FP, &cqr->flags);
+ set_bit(DASD_CQR_SUPPRESS_IL, &cqr->flags);
+
+ return cqr;
+
+out_err:
+ dasd_sfree_request(cqr, startdev);
+
+ return ERR_PTR(rc);
+}
+
+/*
+ * Build the CCW request for the format check
+ */
+static struct dasd_ccw_req *
+dasd_eckd_build_check(struct dasd_device *base, struct format_data_t *fdata,
+ int enable_pav, struct eckd_count *fmt_buffer, int rpt)
+{
+ struct dasd_eckd_private *start_priv;
+ struct dasd_eckd_private *base_priv;
+ struct dasd_device *startdev = NULL;
+ struct dasd_ccw_req *cqr;
+ struct ccw1 *ccw;
+ void *data;
+ int cplength, datasize;
+ int use_prefix;
+ int count;
+ int i;
+
+ if (enable_pav)
+ startdev = dasd_alias_get_start_dev(base);
+
+ if (!startdev)
+ startdev = base;
+
+ start_priv = startdev->private;
+ base_priv = base->private;
+
+ count = rpt * (fdata->stop_unit - fdata->start_unit + 1);
+
+ use_prefix = base_priv->features.feature[8] & 0x01;
+
+ if (use_prefix) {
+ cplength = 1;
+ datasize = sizeof(struct PFX_eckd_data);
+ } else {
+ cplength = 2;
+ datasize = sizeof(struct DE_eckd_data) +
+ sizeof(struct LO_eckd_data);
+ }
+ cplength += count;
+
+ cqr = dasd_fmalloc_request(DASD_ECKD_MAGIC, cplength, datasize, startdev);
+ if (IS_ERR(cqr))
+ return cqr;
+
+ start_priv->count++;
+ data = cqr->data;
+ ccw = cqr->cpaddr;
+
+ if (use_prefix) {
+ prefix_LRE(ccw++, data, fdata->start_unit, fdata->stop_unit,
+ DASD_ECKD_CCW_READ_COUNT, base, startdev, 1, 0,
+ count, 0, 0);
+ } else {
+ define_extent(ccw++, data, fdata->start_unit, fdata->stop_unit,
+ DASD_ECKD_CCW_READ_COUNT, startdev, 0);
+
+ data += sizeof(struct DE_eckd_data);
+ ccw[-1].flags |= CCW_FLAG_CC;
+
+ locate_record(ccw++, data, fdata->start_unit, 0, count,
+ DASD_ECKD_CCW_READ_COUNT, base, 0);
+ }
+
+ for (i = 0; i < count; i++) {
+ ccw[-1].flags |= CCW_FLAG_CC;
+ ccw->cmd_code = DASD_ECKD_CCW_READ_COUNT;
+ ccw->flags = CCW_FLAG_SLI;
+ ccw->count = 8;
+ ccw->cda = (__u32)virt_to_phys(fmt_buffer);
+ ccw++;
+ fmt_buffer++;
+ }
+
+ cqr->startdev = startdev;
+ cqr->memdev = startdev;
+ cqr->basedev = base;
+ cqr->retries = DASD_RETRIES;
+ cqr->expires = startdev->default_expires * HZ;
+ cqr->buildclk = get_tod_clock();
+ cqr->status = DASD_CQR_FILLED;
+ /* Set flags to suppress output for expected errors */
+ set_bit(DASD_CQR_SUPPRESS_NRF, &cqr->flags);
+
+ return cqr;
+}
+
+static struct dasd_ccw_req *
+dasd_eckd_build_format(struct dasd_device *base, struct dasd_device *startdev,
+ struct format_data_t *fdata, int enable_pav)
+{
+ struct dasd_eckd_private *base_priv;
+ struct dasd_eckd_private *start_priv;
+ struct dasd_ccw_req *fcp;
+ struct eckd_count *ect;
+ struct ch_t address;
+ struct ccw1 *ccw;
+ void *data;
+ int rpt;
+ int cplength, datasize;
+ int i, j;
+ int intensity = 0;
+ int r0_perm;
+ int nr_tracks;
+ int use_prefix;
+
+ if (enable_pav)
+ startdev = dasd_alias_get_start_dev(base);
+
+ if (!startdev)
+ startdev = base;
+
+ start_priv = startdev->private;
+ base_priv = base->private;
+
+ rpt = recs_per_track(&base_priv->rdc_data, 0, fdata->blksize);
+
+ nr_tracks = fdata->stop_unit - fdata->start_unit + 1;
+
+ /*
+ * fdata->intensity is a bit string that tells us what to do:
+ * Bit 0: write record zero
+ * Bit 1: write home address, currently not supported
+ * Bit 2: invalidate tracks
+ * Bit 3: use OS/390 compatible disk layout (cdl)
+ * Bit 4: do not allow storage subsystem to modify record zero
+ * Only some bit combinations do make sense.
+ */
+ if (fdata->intensity & 0x10) {
+ r0_perm = 0;
+ intensity = fdata->intensity & ~0x10;
+ } else {
+ r0_perm = 1;
+ intensity = fdata->intensity;
+ }
+
+ use_prefix = base_priv->features.feature[8] & 0x01;
+
+ switch (intensity) {
+ case 0x00: /* Normal format */
+ case 0x08: /* Normal format, use cdl. */
+ cplength = 2 + (rpt*nr_tracks);
+ if (use_prefix)
+ datasize = sizeof(struct PFX_eckd_data) +
+ sizeof(struct LO_eckd_data) +
+ rpt * nr_tracks * sizeof(struct eckd_count);
+ else
+ datasize = sizeof(struct DE_eckd_data) +
+ sizeof(struct LO_eckd_data) +
+ rpt * nr_tracks * sizeof(struct eckd_count);
+ break;
+ case 0x01: /* Write record zero and format track. */
+ case 0x09: /* Write record zero and format track, use cdl. */
+ cplength = 2 + rpt * nr_tracks;
+ if (use_prefix)
+ datasize = sizeof(struct PFX_eckd_data) +
+ sizeof(struct LO_eckd_data) +
+ sizeof(struct eckd_count) +
+ rpt * nr_tracks * sizeof(struct eckd_count);
+ else
+ datasize = sizeof(struct DE_eckd_data) +
+ sizeof(struct LO_eckd_data) +
+ sizeof(struct eckd_count) +
+ rpt * nr_tracks * sizeof(struct eckd_count);
+ break;
+ case 0x04: /* Invalidate track. */
+ case 0x0c: /* Invalidate track, use cdl. */
+ cplength = 3;
+ if (use_prefix)
+ datasize = sizeof(struct PFX_eckd_data) +
+ sizeof(struct LO_eckd_data) +
+ sizeof(struct eckd_count);
+ else
+ datasize = sizeof(struct DE_eckd_data) +
+ sizeof(struct LO_eckd_data) +
+ sizeof(struct eckd_count);
+ break;
+ default:
+ dev_warn(&startdev->cdev->dev,
+ "An I/O control call used incorrect flags 0x%x\n",
+ fdata->intensity);
+ return ERR_PTR(-EINVAL);
+ }
+
+ fcp = dasd_fmalloc_request(DASD_ECKD_MAGIC, cplength, datasize, startdev);
+ if (IS_ERR(fcp))
+ return fcp;
+
+ start_priv->count++;
+ data = fcp->data;
+ ccw = fcp->cpaddr;
+
+ switch (intensity & ~0x08) {
+ case 0x00: /* Normal format. */
+ if (use_prefix) {
+ prefix(ccw++, (struct PFX_eckd_data *) data,
+ fdata->start_unit, fdata->stop_unit,
+ DASD_ECKD_CCW_WRITE_CKD, base, startdev);
+ /* grant subsystem permission to format R0 */
+ if (r0_perm)
+ ((struct PFX_eckd_data *)data)
+ ->define_extent.ga_extended |= 0x04;
+ data += sizeof(struct PFX_eckd_data);
+ } else {
+ define_extent(ccw++, (struct DE_eckd_data *) data,
+ fdata->start_unit, fdata->stop_unit,
+ DASD_ECKD_CCW_WRITE_CKD, startdev, 0);
+ /* grant subsystem permission to format R0 */
+ if (r0_perm)
+ ((struct DE_eckd_data *) data)
+ ->ga_extended |= 0x04;
+ data += sizeof(struct DE_eckd_data);
+ }
+ ccw[-1].flags |= CCW_FLAG_CC;
+ locate_record(ccw++, (struct LO_eckd_data *) data,
+ fdata->start_unit, 0, rpt*nr_tracks,
+ DASD_ECKD_CCW_WRITE_CKD, base,
+ fdata->blksize);
+ data += sizeof(struct LO_eckd_data);
+ break;
+ case 0x01: /* Write record zero + format track. */
+ if (use_prefix) {
+ prefix(ccw++, (struct PFX_eckd_data *) data,
+ fdata->start_unit, fdata->stop_unit,
+ DASD_ECKD_CCW_WRITE_RECORD_ZERO,
+ base, startdev);
+ data += sizeof(struct PFX_eckd_data);
+ } else {
+ define_extent(ccw++, (struct DE_eckd_data *) data,
+ fdata->start_unit, fdata->stop_unit,
+ DASD_ECKD_CCW_WRITE_RECORD_ZERO, startdev, 0);
+ data += sizeof(struct DE_eckd_data);
+ }
+ ccw[-1].flags |= CCW_FLAG_CC;
+ locate_record(ccw++, (struct LO_eckd_data *) data,
+ fdata->start_unit, 0, rpt * nr_tracks + 1,
+ DASD_ECKD_CCW_WRITE_RECORD_ZERO, base,
+ base->block->bp_block);
+ data += sizeof(struct LO_eckd_data);
+ break;
+ case 0x04: /* Invalidate track. */
+ if (use_prefix) {
+ prefix(ccw++, (struct PFX_eckd_data *) data,
+ fdata->start_unit, fdata->stop_unit,
+ DASD_ECKD_CCW_WRITE_CKD, base, startdev);
+ data += sizeof(struct PFX_eckd_data);
+ } else {
+ define_extent(ccw++, (struct DE_eckd_data *) data,
+ fdata->start_unit, fdata->stop_unit,
+ DASD_ECKD_CCW_WRITE_CKD, startdev, 0);
+ data += sizeof(struct DE_eckd_data);
+ }
+ ccw[-1].flags |= CCW_FLAG_CC;
+ locate_record(ccw++, (struct LO_eckd_data *) data,
+ fdata->start_unit, 0, 1,
+ DASD_ECKD_CCW_WRITE_CKD, base, 8);
+ data += sizeof(struct LO_eckd_data);
+ break;
+ }
+
+ for (j = 0; j < nr_tracks; j++) {
+ /* calculate cylinder and head for the current track */
+ set_ch_t(&address,
+ (fdata->start_unit + j) /
+ base_priv->rdc_data.trk_per_cyl,
+ (fdata->start_unit + j) %
+ base_priv->rdc_data.trk_per_cyl);
+ if (intensity & 0x01) { /* write record zero */
+ ect = (struct eckd_count *) data;
+ data += sizeof(struct eckd_count);
+ ect->cyl = address.cyl;
+ ect->head = address.head;
+ ect->record = 0;
+ ect->kl = 0;
+ ect->dl = 8;
+ ccw[-1].flags |= CCW_FLAG_CC;
+ ccw->cmd_code = DASD_ECKD_CCW_WRITE_RECORD_ZERO;
+ ccw->flags = CCW_FLAG_SLI;
+ ccw->count = 8;
+ ccw->cda = (__u32)virt_to_phys(ect);
+ ccw++;
+ }
+ if ((intensity & ~0x08) & 0x04) { /* erase track */
+ ect = (struct eckd_count *) data;
+ data += sizeof(struct eckd_count);
+ ect->cyl = address.cyl;
+ ect->head = address.head;
+ ect->record = 1;
+ ect->kl = 0;
+ ect->dl = 0;
+ ccw[-1].flags |= CCW_FLAG_CC;
+ ccw->cmd_code = DASD_ECKD_CCW_WRITE_CKD;
+ ccw->flags = CCW_FLAG_SLI;
+ ccw->count = 8;
+ ccw->cda = (__u32)virt_to_phys(ect);
+ } else { /* write remaining records */
+ for (i = 0; i < rpt; i++) {
+ ect = (struct eckd_count *) data;
+ data += sizeof(struct eckd_count);
+ ect->cyl = address.cyl;
+ ect->head = address.head;
+ ect->record = i + 1;
+ ect->kl = 0;
+ ect->dl = fdata->blksize;
+ /*
+ * Check for special tracks 0-1
+ * when formatting CDL
+ */
+ if ((intensity & 0x08) &&
+ address.cyl == 0 && address.head == 0) {
+ if (i < 3) {
+ ect->kl = 4;
+ ect->dl = sizes_trk0[i] - 4;
+ }
+ }
+ if ((intensity & 0x08) &&
+ address.cyl == 0 && address.head == 1) {
+ ect->kl = 44;
+ ect->dl = LABEL_SIZE - 44;
+ }
+ ccw[-1].flags |= CCW_FLAG_CC;
+ if (i != 0 || j == 0)
+ ccw->cmd_code =
+ DASD_ECKD_CCW_WRITE_CKD;
+ else
+ ccw->cmd_code =
+ DASD_ECKD_CCW_WRITE_CKD_MT;
+ ccw->flags = CCW_FLAG_SLI;
+ ccw->count = 8;
+ ccw->cda = (__u32)virt_to_phys(ect);
+ ccw++;
+ }
+ }
+ }
+
+ fcp->startdev = startdev;
+ fcp->memdev = startdev;
+ fcp->basedev = base;
+ fcp->retries = 256;
+ fcp->expires = startdev->default_expires * HZ;
+ fcp->buildclk = get_tod_clock();
+ fcp->status = DASD_CQR_FILLED;
+
+ return fcp;
+}
+
+/*
+ * Wrapper function to build a CCW request depending on input data
+ */
+static struct dasd_ccw_req *
+dasd_eckd_format_build_ccw_req(struct dasd_device *base,
+ struct format_data_t *fdata, int enable_pav,
+ int tpm, struct eckd_count *fmt_buffer, int rpt)
+{
+ struct dasd_ccw_req *ccw_req;
+
+ if (!fmt_buffer) {
+ ccw_req = dasd_eckd_build_format(base, NULL, fdata, enable_pav);
+ } else {
+ if (tpm)
+ ccw_req = dasd_eckd_build_check_tcw(base, fdata,
+ enable_pav,
+ fmt_buffer, rpt);
+ else
+ ccw_req = dasd_eckd_build_check(base, fdata, enable_pav,
+ fmt_buffer, rpt);
+ }
+
+ return ccw_req;
+}
+
+/*
+ * Sanity checks on format_data
+ */
+static int dasd_eckd_format_sanity_checks(struct dasd_device *base,
+ struct format_data_t *fdata)
+{
+ struct dasd_eckd_private *private = base->private;
+
+ if (fdata->start_unit >=
+ (private->real_cyl * private->rdc_data.trk_per_cyl)) {
+ dev_warn(&base->cdev->dev,
+ "Start track number %u used in formatting is too big\n",
+ fdata->start_unit);
+ return -EINVAL;
+ }
+ if (fdata->stop_unit >=
+ (private->real_cyl * private->rdc_data.trk_per_cyl)) {
+ dev_warn(&base->cdev->dev,
+ "Stop track number %u used in formatting is too big\n",
+ fdata->stop_unit);
+ return -EINVAL;
+ }
+ if (fdata->start_unit > fdata->stop_unit) {
+ dev_warn(&base->cdev->dev,
+ "Start track %u used in formatting exceeds end track\n",
+ fdata->start_unit);
+ return -EINVAL;
+ }
+ if (dasd_check_blocksize(fdata->blksize) != 0) {
+ dev_warn(&base->cdev->dev,
+ "The DASD cannot be formatted with block size %u\n",
+ fdata->blksize);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+/*
+ * This function will process format_data originally coming from an IOCTL
+ */
+static int dasd_eckd_format_process_data(struct dasd_device *base,
+ struct format_data_t *fdata,
+ int enable_pav, int tpm,
+ struct eckd_count *fmt_buffer, int rpt,
+ struct irb *irb)
+{
+ struct dasd_eckd_private *private = base->private;
+ struct dasd_ccw_req *cqr, *n;
+ struct list_head format_queue;
+ struct dasd_device *device;
+ char *sense = NULL;
+ int old_start, old_stop, format_step;
+ int step, retry;
+ int rc;
+
+ rc = dasd_eckd_format_sanity_checks(base, fdata);
+ if (rc)
+ return rc;
+
+ INIT_LIST_HEAD(&format_queue);
+
+ old_start = fdata->start_unit;
+ old_stop = fdata->stop_unit;
+
+ if (!tpm && fmt_buffer != NULL) {
+ /* Command Mode / Format Check */
+ format_step = 1;
+ } else if (tpm && fmt_buffer != NULL) {
+ /* Transport Mode / Format Check */
+ format_step = DASD_CQR_MAX_CCW / rpt;
+ } else {
+ /* Normal Formatting */
+ format_step = DASD_CQR_MAX_CCW /
+ recs_per_track(&private->rdc_data, 0, fdata->blksize);
+ }
+
+ do {
+ retry = 0;
+ while (fdata->start_unit <= old_stop) {
+ step = fdata->stop_unit - fdata->start_unit + 1;
+ if (step > format_step) {
+ fdata->stop_unit =
+ fdata->start_unit + format_step - 1;
+ }
+
+ cqr = dasd_eckd_format_build_ccw_req(base, fdata,
+ enable_pav, tpm,
+ fmt_buffer, rpt);
+ if (IS_ERR(cqr)) {
+ rc = PTR_ERR(cqr);
+ if (rc == -ENOMEM) {
+ if (list_empty(&format_queue))
+ goto out;
+ /*
+ * not enough memory available, start
+ * requests retry after first requests
+ * were finished
+ */
+ retry = 1;
+ break;
+ }
+ goto out_err;
+ }
+ list_add_tail(&cqr->blocklist, &format_queue);
+
+ if (fmt_buffer) {
+ step = fdata->stop_unit - fdata->start_unit + 1;
+ fmt_buffer += rpt * step;
+ }
+ fdata->start_unit = fdata->stop_unit + 1;
+ fdata->stop_unit = old_stop;
+ }
+
+ rc = dasd_sleep_on_queue(&format_queue);
+
+out_err:
+ list_for_each_entry_safe(cqr, n, &format_queue, blocklist) {
+ device = cqr->startdev;
+ private = device->private;
+
+ if (cqr->status == DASD_CQR_FAILED) {
+ /*
+ * Only get sense data if called by format
+ * check
+ */
+ if (fmt_buffer && irb) {
+ sense = dasd_get_sense(&cqr->irb);
+ memcpy(irb, &cqr->irb, sizeof(*irb));
+ }
+ rc = -EIO;
+ }
+ list_del_init(&cqr->blocklist);
+ dasd_ffree_request(cqr, device);
+ private->count--;
+ }
+
+ if (rc && rc != -EIO)
+ goto out;
+ if (rc == -EIO) {
+ /*
+ * In case fewer than the expected records are on the
+ * track, we will most likely get a 'No Record Found'
+ * error (in command mode) or a 'File Protected' error
+ * (in transport mode). Those particular cases shouldn't
+ * pass the -EIO to the IOCTL, therefore reset the rc
+ * and continue.
+ */
+ if (sense &&
+ (sense[1] & SNS1_NO_REC_FOUND ||
+ sense[1] & SNS1_FILE_PROTECTED))
+ retry = 1;
+ else
+ goto out;
+ }
+
+ } while (retry);
+
+out:
+ fdata->start_unit = old_start;
+ fdata->stop_unit = old_stop;
+
+ return rc;
+}
+
+static int dasd_eckd_format_device(struct dasd_device *base,
+ struct format_data_t *fdata, int enable_pav)
+{
+ return dasd_eckd_format_process_data(base, fdata, enable_pav, 0, NULL,
+ 0, NULL);
+}
+
+static bool test_and_set_format_track(struct dasd_format_entry *to_format,
+ struct dasd_ccw_req *cqr)
+{
+ struct dasd_block *block = cqr->block;
+ struct dasd_format_entry *format;
+ unsigned long flags;
+ bool rc = false;
+
+ spin_lock_irqsave(&block->format_lock, flags);
+ if (cqr->trkcount != atomic_read(&block->trkcount)) {
+ /*
+ * The number of formatted tracks has changed after request
+ * start and we can not tell if the current track was involved.
+ * To avoid data corruption treat it as if the current track is
+ * involved
+ */
+ rc = true;
+ goto out;
+ }
+ list_for_each_entry(format, &block->format_list, list) {
+ if (format->track == to_format->track) {
+ rc = true;
+ goto out;
+ }
+ }
+ list_add_tail(&to_format->list, &block->format_list);
+
+out:
+ spin_unlock_irqrestore(&block->format_lock, flags);
+ return rc;
+}
+
+static void clear_format_track(struct dasd_format_entry *format,
+ struct dasd_block *block)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&block->format_lock, flags);
+ atomic_inc(&block->trkcount);
+ list_del_init(&format->list);
+ spin_unlock_irqrestore(&block->format_lock, flags);
+}
+
+/*
+ * Callback function to free ESE format requests.
+ */
+static void dasd_eckd_ese_format_cb(struct dasd_ccw_req *cqr, void *data)
+{
+ struct dasd_device *device = cqr->startdev;
+ struct dasd_eckd_private *private = device->private;
+ struct dasd_format_entry *format = data;
+
+ clear_format_track(format, cqr->basedev->block);
+ private->count--;
+ dasd_ffree_request(cqr, device);
+}
+
+static struct dasd_ccw_req *
+dasd_eckd_ese_format(struct dasd_device *startdev, struct dasd_ccw_req *cqr,
+ struct irb *irb)
+{
+ struct dasd_eckd_private *private;
+ struct dasd_format_entry *format;
+ struct format_data_t fdata;
+ unsigned int recs_per_trk;
+ struct dasd_ccw_req *fcqr;
+ struct dasd_device *base;
+ struct dasd_block *block;
+ unsigned int blksize;
+ struct request *req;
+ sector_t first_trk;
+ sector_t last_trk;
+ sector_t curr_trk;
+ int rc;
+
+ req = dasd_get_callback_data(cqr);
+ block = cqr->block;
+ base = block->base;
+ private = base->private;
+ blksize = block->bp_block;
+ recs_per_trk = recs_per_track(&private->rdc_data, 0, blksize);
+ format = &startdev->format_entry;
+
+ first_trk = blk_rq_pos(req) >> block->s2b_shift;
+ sector_div(first_trk, recs_per_trk);
+ last_trk =
+ (blk_rq_pos(req) + blk_rq_sectors(req) - 1) >> block->s2b_shift;
+ sector_div(last_trk, recs_per_trk);
+ rc = dasd_eckd_track_from_irb(irb, base, &curr_trk);
+ if (rc)
+ return ERR_PTR(rc);
+
+ if (curr_trk < first_trk || curr_trk > last_trk) {
+ DBF_DEV_EVENT(DBF_WARNING, startdev,
+ "ESE error track %llu not within range %llu - %llu\n",
+ curr_trk, first_trk, last_trk);
+ return ERR_PTR(-EINVAL);
+ }
+ format->track = curr_trk;
+ /* test if track is already in formatting by another thread */
+ if (test_and_set_format_track(format, cqr)) {
+ /* this is no real error so do not count down retries */
+ cqr->retries++;
+ return ERR_PTR(-EEXIST);
+ }
+
+ fdata.start_unit = curr_trk;
+ fdata.stop_unit = curr_trk;
+ fdata.blksize = blksize;
+ fdata.intensity = private->uses_cdl ? DASD_FMT_INT_COMPAT : 0;
+
+ rc = dasd_eckd_format_sanity_checks(base, &fdata);
+ if (rc)
+ return ERR_PTR(-EINVAL);
+
+ /*
+ * We're building the request with PAV disabled as we're reusing
+ * the former startdev.
+ */
+ fcqr = dasd_eckd_build_format(base, startdev, &fdata, 0);
+ if (IS_ERR(fcqr))
+ return fcqr;
+
+ fcqr->callback = dasd_eckd_ese_format_cb;
+ fcqr->callback_data = (void *) format;
+
+ return fcqr;
+}
+
+/*
+ * When data is read from an unformatted area of an ESE volume, this function
+ * returns zeroed data and thereby mimics a read of zero data.
+ *
+ * The first unformatted track is the one that got the NRF error, the address is
+ * encoded in the sense data.
+ *
+ * All tracks before have returned valid data and should not be touched.
+ * All tracks after the unformatted track might be formatted or not. This is
+ * currently not known, remember the processed data and return the remainder of
+ * the request to the blocklayer in __dasd_cleanup_cqr().
+ */
+static int dasd_eckd_ese_read(struct dasd_ccw_req *cqr, struct irb *irb)
+{
+ struct dasd_eckd_private *private;
+ sector_t first_trk, last_trk;
+ sector_t first_blk, last_blk;
+ unsigned int blksize, off;
+ unsigned int recs_per_trk;
+ struct dasd_device *base;
+ struct req_iterator iter;
+ struct dasd_block *block;
+ unsigned int skip_block;
+ unsigned int blk_count;
+ struct request *req;
+ struct bio_vec bv;
+ sector_t curr_trk;
+ sector_t end_blk;
+ char *dst;
+ int rc;
+
+ req = (struct request *) cqr->callback_data;
+ base = cqr->block->base;
+ blksize = base->block->bp_block;
+ block = cqr->block;
+ private = base->private;
+ skip_block = 0;
+ blk_count = 0;
+
+ recs_per_trk = recs_per_track(&private->rdc_data, 0, blksize);
+ first_trk = first_blk = blk_rq_pos(req) >> block->s2b_shift;
+ sector_div(first_trk, recs_per_trk);
+ last_trk = last_blk =
+ (blk_rq_pos(req) + blk_rq_sectors(req) - 1) >> block->s2b_shift;
+ sector_div(last_trk, recs_per_trk);
+ rc = dasd_eckd_track_from_irb(irb, base, &curr_trk);
+ if (rc)
+ return rc;
+
+ /* sanity check if the current track from sense data is valid */
+ if (curr_trk < first_trk || curr_trk > last_trk) {
+ DBF_DEV_EVENT(DBF_WARNING, base,
+ "ESE error track %llu not within range %llu - %llu\n",
+ curr_trk, first_trk, last_trk);
+ return -EINVAL;
+ }
+
+ /*
+ * if not the first track got the NRF error we have to skip over valid
+ * blocks
+ */
+ if (curr_trk != first_trk)
+ skip_block = curr_trk * recs_per_trk - first_blk;
+
+ /* we have no information beyond the current track */
+ end_blk = (curr_trk + 1) * recs_per_trk;
+
+ rq_for_each_segment(bv, req, iter) {
+ dst = bvec_virt(&bv);
+ for (off = 0; off < bv.bv_len; off += blksize) {
+ if (first_blk + blk_count >= end_blk) {
+ cqr->proc_bytes = blk_count * blksize;
+ return 0;
+ }
+ if (dst && !skip_block)
+ memset(dst, 0, blksize);
+ else
+ skip_block--;
+ dst += blksize;
+ blk_count++;
+ }
+ }
+ return 0;
+}
+
+/*
+ * Helper function to count consecutive records of a single track.
+ */
+static int dasd_eckd_count_records(struct eckd_count *fmt_buffer, int start,
+ int max)
+{
+ int head;
+ int i;
+
+ head = fmt_buffer[start].head;
+
+ /*
+ * There are 3 conditions where we stop counting:
+ * - if data reoccurs (same head and record may reoccur), which may
+ * happen due to the way DASD_ECKD_CCW_READ_COUNT works
+ * - when the head changes, because we're iterating over several tracks
+ * then (DASD_ECKD_CCW_READ_COUNT_MT)
+ * - when we've reached the end of sensible data in the buffer (the
+ * record will be 0 then)
+ */
+ for (i = start; i < max; i++) {
+ if (i > start) {
+ if ((fmt_buffer[i].head == head &&
+ fmt_buffer[i].record == 1) ||
+ fmt_buffer[i].head != head ||
+ fmt_buffer[i].record == 0)
+ break;
+ }
+ }
+
+ return i - start;
+}
+
+/*
+ * Evaluate a given range of tracks. Data like number of records, blocksize,
+ * record ids, and key length are compared with expected data.
+ *
+ * If a mismatch occurs, the corresponding error bit is set, as well as
+ * additional information, depending on the error.
+ */
+static void dasd_eckd_format_evaluate_tracks(struct eckd_count *fmt_buffer,
+ struct format_check_t *cdata,
+ int rpt_max, int rpt_exp,
+ int trk_per_cyl, int tpm)
+{
+ struct ch_t geo;
+ int max_entries;
+ int count = 0;
+ int trkcount;
+ int blksize;
+ int pos = 0;
+ int i, j;
+ int kl;
+
+ trkcount = cdata->expect.stop_unit - cdata->expect.start_unit + 1;
+ max_entries = trkcount * rpt_max;
+
+ for (i = cdata->expect.start_unit; i <= cdata->expect.stop_unit; i++) {
+ /* Calculate the correct next starting position in the buffer */
+ if (tpm) {
+ while (fmt_buffer[pos].record == 0 &&
+ fmt_buffer[pos].dl == 0) {
+ if (pos++ > max_entries)
+ break;
+ }
+ } else {
+ if (i != cdata->expect.start_unit)
+ pos += rpt_max - count;
+ }
+
+ /* Calculate the expected geo values for the current track */
+ set_ch_t(&geo, i / trk_per_cyl, i % trk_per_cyl);
+
+ /* Count and check number of records */
+ count = dasd_eckd_count_records(fmt_buffer, pos, pos + rpt_max);
+
+ if (count < rpt_exp) {
+ cdata->result = DASD_FMT_ERR_TOO_FEW_RECORDS;
+ break;
+ }
+ if (count > rpt_exp) {
+ cdata->result = DASD_FMT_ERR_TOO_MANY_RECORDS;
+ break;
+ }
+
+ for (j = 0; j < count; j++, pos++) {
+ blksize = cdata->expect.blksize;
+ kl = 0;
+
+ /*
+ * Set special values when checking CDL formatted
+ * devices.
+ */
+ if ((cdata->expect.intensity & 0x08) &&
+ geo.cyl == 0 && geo.head == 0) {
+ if (j < 3) {
+ blksize = sizes_trk0[j] - 4;
+ kl = 4;
+ }
+ }
+ if ((cdata->expect.intensity & 0x08) &&
+ geo.cyl == 0 && geo.head == 1) {
+ blksize = LABEL_SIZE - 44;
+ kl = 44;
+ }
+
+ /* Check blocksize */
+ if (fmt_buffer[pos].dl != blksize) {
+ cdata->result = DASD_FMT_ERR_BLKSIZE;
+ goto out;
+ }
+ /* Check if key length is 0 */
+ if (fmt_buffer[pos].kl != kl) {
+ cdata->result = DASD_FMT_ERR_KEY_LENGTH;
+ goto out;
+ }
+ /* Check if record_id is correct */
+ if (fmt_buffer[pos].cyl != geo.cyl ||
+ fmt_buffer[pos].head != geo.head ||
+ fmt_buffer[pos].record != (j + 1)) {
+ cdata->result = DASD_FMT_ERR_RECORD_ID;
+ goto out;
+ }
+ }
+ }
+
+out:
+ /*
+ * In case of no errors, we need to decrease by one
+ * to get the correct positions.
+ */
+ if (!cdata->result) {
+ i--;
+ pos--;
+ }
+
+ cdata->unit = i;
+ cdata->num_records = count;
+ cdata->rec = fmt_buffer[pos].record;
+ cdata->blksize = fmt_buffer[pos].dl;
+ cdata->key_length = fmt_buffer[pos].kl;
+}
+
+/*
+ * Check the format of a range of tracks of a DASD.
+ */
+static int dasd_eckd_check_device_format(struct dasd_device *base,
+ struct format_check_t *cdata,
+ int enable_pav)
+{
+ struct dasd_eckd_private *private = base->private;
+ struct eckd_count *fmt_buffer;
+ struct irb irb;
+ int rpt_max, rpt_exp;
+ int fmt_buffer_size;
+ int trk_per_cyl;
+ int trkcount;
+ int tpm = 0;
+ int rc;
+
+ trk_per_cyl = private->rdc_data.trk_per_cyl;
+
+ /* Get maximum and expected amount of records per track */
+ rpt_max = recs_per_track(&private->rdc_data, 0, 512) + 1;
+ rpt_exp = recs_per_track(&private->rdc_data, 0, cdata->expect.blksize);
+
+ trkcount = cdata->expect.stop_unit - cdata->expect.start_unit + 1;
+ fmt_buffer_size = trkcount * rpt_max * sizeof(struct eckd_count);
+
+ fmt_buffer = kzalloc(fmt_buffer_size, GFP_KERNEL | GFP_DMA);
+ if (!fmt_buffer)
+ return -ENOMEM;
+
+ /*
+ * A certain FICON feature subset is needed to operate in transport
+ * mode. Additionally, the support for transport mode is implicitly
+ * checked by comparing the buffer size with fcx_max_data. As long as
+ * the buffer size is smaller we can operate in transport mode and
+ * process multiple tracks. If not, only one track at once is being
+ * processed using command mode.
+ */
+ if ((private->features.feature[40] & 0x04) &&
+ fmt_buffer_size <= private->fcx_max_data)
+ tpm = 1;
+
+ rc = dasd_eckd_format_process_data(base, &cdata->expect, enable_pav,
+ tpm, fmt_buffer, rpt_max, &irb);
+ if (rc && rc != -EIO)
+ goto out;
+ if (rc == -EIO) {
+ /*
+ * If our first attempt with transport mode enabled comes back
+ * with an incorrect length error, we're going to retry the
+ * check with command mode.
+ */
+ if (tpm && scsw_cstat(&irb.scsw) == 0x40) {
+ tpm = 0;
+ rc = dasd_eckd_format_process_data(base, &cdata->expect,
+ enable_pav, tpm,
+ fmt_buffer, rpt_max,
+ &irb);
+ if (rc)
+ goto out;
+ } else {
+ goto out;
+ }
+ }
+
+ dasd_eckd_format_evaluate_tracks(fmt_buffer, cdata, rpt_max, rpt_exp,
+ trk_per_cyl, tpm);
+
+out:
+ kfree(fmt_buffer);
+
+ return rc;
+}
+
+static void dasd_eckd_handle_terminated_request(struct dasd_ccw_req *cqr)
+{
+ if (cqr->retries < 0) {
+ cqr->status = DASD_CQR_FAILED;
+ return;
+ }
+ cqr->status = DASD_CQR_FILLED;
+ if (cqr->block && (cqr->startdev != cqr->block->base)) {
+ dasd_eckd_reset_ccw_to_base_io(cqr);
+ cqr->startdev = cqr->block->base;
+ cqr->lpm = dasd_path_get_opm(cqr->block->base);
+ }
+};
+
+static dasd_erp_fn_t
+dasd_eckd_erp_action(struct dasd_ccw_req * cqr)
+{
+ struct dasd_device *device = (struct dasd_device *) cqr->startdev;
+ struct ccw_device *cdev = device->cdev;
+
+ switch (cdev->id.cu_type) {
+ case 0x3990:
+ case 0x2105:
+ case 0x2107:
+ case 0x1750:
+ return dasd_3990_erp_action;
+ case 0x9343:
+ case 0x3880:
+ default:
+ return dasd_default_erp_action;
+ }
+}
+
+static dasd_erp_fn_t
+dasd_eckd_erp_postaction(struct dasd_ccw_req * cqr)
+{
+ return dasd_default_erp_postaction;
+}
+
+static void dasd_eckd_check_for_device_change(struct dasd_device *device,
+ struct dasd_ccw_req *cqr,
+ struct irb *irb)
+{
+ char mask;
+ char *sense = NULL;
+ struct dasd_eckd_private *private = device->private;
+
+ /* first of all check for state change pending interrupt */
+ mask = DEV_STAT_ATTENTION | DEV_STAT_DEV_END | DEV_STAT_UNIT_EXCEP;
+ if ((scsw_dstat(&irb->scsw) & mask) == mask) {
+ /*
+ * for alias only, not in offline processing
+ * and only if not suspended
+ */
+ if (!device->block && private->lcu &&
+ device->state == DASD_STATE_ONLINE &&
+ !test_bit(DASD_FLAG_OFFLINE, &device->flags) &&
+ !test_bit(DASD_FLAG_SUSPENDED, &device->flags)) {
+ /* schedule worker to reload device */
+ dasd_reload_device(device);
+ }
+ dasd_generic_handle_state_change(device);
+ return;
+ }
+
+ sense = dasd_get_sense(irb);
+ if (!sense)
+ return;
+
+ /* summary unit check */
+ if ((sense[27] & DASD_SENSE_BIT_0) && (sense[7] == 0x0D) &&
+ (scsw_dstat(&irb->scsw) & DEV_STAT_UNIT_CHECK)) {
+ if (test_and_set_bit(DASD_FLAG_SUC, &device->flags)) {
+ DBF_DEV_EVENT(DBF_WARNING, device, "%s",
+ "eckd suc: device already notified");
+ return;
+ }
+ sense = dasd_get_sense(irb);
+ if (!sense) {
+ DBF_DEV_EVENT(DBF_WARNING, device, "%s",
+ "eckd suc: no reason code available");
+ clear_bit(DASD_FLAG_SUC, &device->flags);
+ return;
+
+ }
+ private->suc_reason = sense[8];
+ DBF_DEV_EVENT(DBF_NOTICE, device, "%s %x",
+ "eckd handle summary unit check: reason",
+ private->suc_reason);
+ dasd_get_device(device);
+ if (!schedule_work(&device->suc_work))
+ dasd_put_device(device);
+
+ return;
+ }
+
+ /* service information message SIM */
+ if (!cqr && !(sense[27] & DASD_SENSE_BIT_0) &&
+ ((sense[6] & DASD_SIM_SENSE) == DASD_SIM_SENSE)) {
+ dasd_3990_erp_handle_sim(device, sense);
+ return;
+ }
+
+ /* loss of device reservation is handled via base devices only
+ * as alias devices may be used with several bases
+ */
+ if (device->block && (sense[27] & DASD_SENSE_BIT_0) &&
+ (sense[7] == 0x3F) &&
+ (scsw_dstat(&irb->scsw) & DEV_STAT_UNIT_CHECK) &&
+ test_bit(DASD_FLAG_IS_RESERVED, &device->flags)) {
+ if (device->features & DASD_FEATURE_FAILONSLCK)
+ set_bit(DASD_FLAG_LOCK_STOLEN, &device->flags);
+ clear_bit(DASD_FLAG_IS_RESERVED, &device->flags);
+ dev_err(&device->cdev->dev,
+ "The device reservation was lost\n");
+ }
+}
+
+static int dasd_eckd_ras_sanity_checks(struct dasd_device *device,
+ unsigned int first_trk,
+ unsigned int last_trk)
+{
+ struct dasd_eckd_private *private = device->private;
+ unsigned int trks_per_vol;
+ int rc = 0;
+
+ trks_per_vol = private->real_cyl * private->rdc_data.trk_per_cyl;
+
+ if (first_trk >= trks_per_vol) {
+ dev_warn(&device->cdev->dev,
+ "Start track number %u used in the space release command is too big\n",
+ first_trk);
+ rc = -EINVAL;
+ } else if (last_trk >= trks_per_vol) {
+ dev_warn(&device->cdev->dev,
+ "Stop track number %u used in the space release command is too big\n",
+ last_trk);
+ rc = -EINVAL;
+ } else if (first_trk > last_trk) {
+ dev_warn(&device->cdev->dev,
+ "Start track %u used in the space release command exceeds the end track\n",
+ first_trk);
+ rc = -EINVAL;
+ }
+ return rc;
+}
+
+/*
+ * Helper function to count the amount of involved extents within a given range
+ * with extent alignment in mind.
+ */
+static int count_exts(unsigned int from, unsigned int to, int trks_per_ext)
+{
+ int cur_pos = 0;
+ int count = 0;
+ int tmp;
+
+ if (from == to)
+ return 1;
+
+ /* Count first partial extent */
+ if (from % trks_per_ext != 0) {
+ tmp = from + trks_per_ext - (from % trks_per_ext) - 1;
+ if (tmp > to)
+ tmp = to;
+ cur_pos = tmp - from + 1;
+ count++;
+ }
+ /* Count full extents */
+ if (to - (from + cur_pos) + 1 >= trks_per_ext) {
+ tmp = to - ((to - trks_per_ext + 1) % trks_per_ext);
+ count += (tmp - (from + cur_pos) + 1) / trks_per_ext;
+ cur_pos = tmp;
+ }
+ /* Count last partial extent */
+ if (cur_pos < to)
+ count++;
+
+ return count;
+}
+
+static int dasd_in_copy_relation(struct dasd_device *device)
+{
+ struct dasd_pprc_data_sc4 *temp;
+ int rc;
+
+ if (!dasd_eckd_pprc_enabled(device))
+ return 0;
+
+ temp = kzalloc(sizeof(*temp), GFP_KERNEL);
+ if (!temp)
+ return -ENOMEM;
+
+ rc = dasd_eckd_query_pprc_status(device, temp);
+ if (!rc)
+ rc = temp->dev_info[0].state;
+
+ kfree(temp);
+ return rc;
+}
+
+/*
+ * Release allocated space for a given range or an entire volume.
+ */
+static struct dasd_ccw_req *
+dasd_eckd_dso_ras(struct dasd_device *device, struct dasd_block *block,
+ struct request *req, unsigned int first_trk,
+ unsigned int last_trk, int by_extent)
+{
+ struct dasd_eckd_private *private = device->private;
+ struct dasd_dso_ras_ext_range *ras_range;
+ struct dasd_rssd_features *features;
+ struct dasd_dso_ras_data *ras_data;
+ u16 heads, beg_head, end_head;
+ int cur_to_trk, cur_from_trk;
+ struct dasd_ccw_req *cqr;
+ u32 beg_cyl, end_cyl;
+ int copy_relation;
+ struct ccw1 *ccw;
+ int trks_per_ext;
+ size_t ras_size;
+ size_t size;
+ int nr_exts;
+ void *rq;
+ int i;
+
+ if (dasd_eckd_ras_sanity_checks(device, first_trk, last_trk))
+ return ERR_PTR(-EINVAL);
+
+ copy_relation = dasd_in_copy_relation(device);
+ if (copy_relation < 0)
+ return ERR_PTR(copy_relation);
+
+ rq = req ? blk_mq_rq_to_pdu(req) : NULL;
+
+ features = &private->features;
+
+ trks_per_ext = dasd_eckd_ext_size(device) * private->rdc_data.trk_per_cyl;
+ nr_exts = 0;
+ if (by_extent)
+ nr_exts = count_exts(first_trk, last_trk, trks_per_ext);
+ ras_size = sizeof(*ras_data);
+ size = ras_size + (nr_exts * sizeof(*ras_range));
+
+ cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1, size, device, rq);
+ if (IS_ERR(cqr)) {
+ DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
+ "Could not allocate RAS request");
+ return cqr;
+ }
+
+ ras_data = cqr->data;
+ memset(ras_data, 0, size);
+
+ ras_data->order = DSO_ORDER_RAS;
+ ras_data->flags.vol_type = 0; /* CKD volume */
+ /* Release specified extents or entire volume */
+ ras_data->op_flags.by_extent = by_extent;
+ /*
+ * This bit guarantees initialisation of tracks within an extent that is
+ * not fully specified, but is only supported with a certain feature
+ * subset and for devices not in a copy relation.
+ */
+ if (features->feature[56] & 0x01 && !copy_relation)
+ ras_data->op_flags.guarantee_init = 1;
+
+ ras_data->lss = private->conf.ned->ID;
+ ras_data->dev_addr = private->conf.ned->unit_addr;
+ ras_data->nr_exts = nr_exts;
+
+ if (by_extent) {
+ heads = private->rdc_data.trk_per_cyl;
+ cur_from_trk = first_trk;
+ cur_to_trk = first_trk + trks_per_ext -
+ (first_trk % trks_per_ext) - 1;
+ if (cur_to_trk > last_trk)
+ cur_to_trk = last_trk;
+ ras_range = (struct dasd_dso_ras_ext_range *)(cqr->data + ras_size);
+
+ for (i = 0; i < nr_exts; i++) {
+ beg_cyl = cur_from_trk / heads;
+ beg_head = cur_from_trk % heads;
+ end_cyl = cur_to_trk / heads;
+ end_head = cur_to_trk % heads;
+
+ set_ch_t(&ras_range->beg_ext, beg_cyl, beg_head);
+ set_ch_t(&ras_range->end_ext, end_cyl, end_head);
+
+ cur_from_trk = cur_to_trk + 1;
+ cur_to_trk = cur_from_trk + trks_per_ext - 1;
+ if (cur_to_trk > last_trk)
+ cur_to_trk = last_trk;
+ ras_range++;
+ }
+ }
+
+ ccw = cqr->cpaddr;
+ ccw->cda = (__u32)virt_to_phys(cqr->data);
+ ccw->cmd_code = DASD_ECKD_CCW_DSO;
+ ccw->count = size;
+
+ cqr->startdev = device;
+ cqr->memdev = device;
+ cqr->block = block;
+ cqr->retries = 256;
+ cqr->expires = device->default_expires * HZ;
+ cqr->buildclk = get_tod_clock();
+ cqr->status = DASD_CQR_FILLED;
+
+ return cqr;
+}
+
+static int dasd_eckd_release_space_full(struct dasd_device *device)
+{
+ struct dasd_ccw_req *cqr;
+ int rc;
+
+ cqr = dasd_eckd_dso_ras(device, NULL, NULL, 0, 0, 0);
+ if (IS_ERR(cqr))
+ return PTR_ERR(cqr);
+
+ rc = dasd_sleep_on_interruptible(cqr);
+
+ dasd_sfree_request(cqr, cqr->memdev);
+
+ return rc;
+}
+
+static int dasd_eckd_release_space_trks(struct dasd_device *device,
+ unsigned int from, unsigned int to)
+{
+ struct dasd_eckd_private *private = device->private;
+ struct dasd_block *block = device->block;
+ struct dasd_ccw_req *cqr, *n;
+ struct list_head ras_queue;
+ unsigned int device_exts;
+ int trks_per_ext;
+ int stop, step;
+ int cur_pos;
+ int rc = 0;
+ int retry;
+
+ INIT_LIST_HEAD(&ras_queue);
+
+ device_exts = private->real_cyl / dasd_eckd_ext_size(device);
+ trks_per_ext = dasd_eckd_ext_size(device) * private->rdc_data.trk_per_cyl;
+
+ /* Make sure device limits are not exceeded */
+ step = trks_per_ext * min(device_exts, DASD_ECKD_RAS_EXTS_MAX);
+ cur_pos = from;
+
+ do {
+ retry = 0;
+ while (cur_pos < to) {
+ stop = cur_pos + step -
+ ((cur_pos + step) % trks_per_ext) - 1;
+ if (stop > to)
+ stop = to;
+
+ cqr = dasd_eckd_dso_ras(device, NULL, NULL, cur_pos, stop, 1);
+ if (IS_ERR(cqr)) {
+ rc = PTR_ERR(cqr);
+ if (rc == -ENOMEM) {
+ if (list_empty(&ras_queue))
+ goto out;
+ retry = 1;
+ break;
+ }
+ goto err_out;
+ }
+
+ spin_lock_irq(&block->queue_lock);
+ list_add_tail(&cqr->blocklist, &ras_queue);
+ spin_unlock_irq(&block->queue_lock);
+ cur_pos = stop + 1;
+ }
+
+ rc = dasd_sleep_on_queue_interruptible(&ras_queue);
+
+err_out:
+ list_for_each_entry_safe(cqr, n, &ras_queue, blocklist) {
+ device = cqr->startdev;
+ private = device->private;
+
+ spin_lock_irq(&block->queue_lock);
+ list_del_init(&cqr->blocklist);
+ spin_unlock_irq(&block->queue_lock);
+ dasd_sfree_request(cqr, device);
+ private->count--;
+ }
+ } while (retry);
+
+out:
+ return rc;
+}
+
+static int dasd_eckd_release_space(struct dasd_device *device,
+ struct format_data_t *rdata)
+{
+ if (rdata->intensity & DASD_FMT_INT_ESE_FULL)
+ return dasd_eckd_release_space_full(device);
+ else if (rdata->intensity == 0)
+ return dasd_eckd_release_space_trks(device, rdata->start_unit,
+ rdata->stop_unit);
+ else
+ return -EINVAL;
+}
+
+static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_single(
+ struct dasd_device *startdev,
+ struct dasd_block *block,
+ struct request *req,
+ sector_t first_rec,
+ sector_t last_rec,
+ sector_t first_trk,
+ sector_t last_trk,
+ unsigned int first_offs,
+ unsigned int last_offs,
+ unsigned int blk_per_trk,
+ unsigned int blksize)
+{
+ struct dasd_eckd_private *private;
+ unsigned long *idaws;
+ struct LO_eckd_data *LO_data;
+ struct dasd_ccw_req *cqr;
+ struct ccw1 *ccw;
+ struct req_iterator iter;
+ struct bio_vec bv;
+ char *dst;
+ unsigned int off;
+ int count, cidaw, cplength, datasize;
+ sector_t recid;
+ unsigned char cmd, rcmd;
+ int use_prefix;
+ struct dasd_device *basedev;
+
+ basedev = block->base;
+ private = basedev->private;
+ if (rq_data_dir(req) == READ)
+ cmd = DASD_ECKD_CCW_READ_MT;
+ else if (rq_data_dir(req) == WRITE)
+ cmd = DASD_ECKD_CCW_WRITE_MT;
+ else
+ return ERR_PTR(-EINVAL);
+
+ /* Check struct bio and count the number of blocks for the request. */
+ count = 0;
+ cidaw = 0;
+ rq_for_each_segment(bv, req, iter) {
+ if (bv.bv_len & (blksize - 1))
+ /* Eckd can only do full blocks. */
+ return ERR_PTR(-EINVAL);
+ count += bv.bv_len >> (block->s2b_shift + 9);
+ if (idal_is_needed (page_address(bv.bv_page), bv.bv_len))
+ cidaw += bv.bv_len >> (block->s2b_shift + 9);
+ }
+ /* Paranoia. */
+ if (count != last_rec - first_rec + 1)
+ return ERR_PTR(-EINVAL);
+
+ /* use the prefix command if available */
+ use_prefix = private->features.feature[8] & 0x01;
+ if (use_prefix) {
+ /* 1x prefix + number of blocks */
+ cplength = 2 + count;
+ /* 1x prefix + cidaws*sizeof(long) */
+ datasize = sizeof(struct PFX_eckd_data) +
+ sizeof(struct LO_eckd_data) +
+ cidaw * sizeof(unsigned long);
+ } else {
+ /* 1x define extent + 1x locate record + number of blocks */
+ cplength = 2 + count;
+ /* 1x define extent + 1x locate record + cidaws*sizeof(long) */
+ datasize = sizeof(struct DE_eckd_data) +
+ sizeof(struct LO_eckd_data) +
+ cidaw * sizeof(unsigned long);
+ }
+ /* Find out the number of additional locate record ccws for cdl. */
+ if (private->uses_cdl && first_rec < 2*blk_per_trk) {
+ if (last_rec >= 2*blk_per_trk)
+ count = 2*blk_per_trk - first_rec;
+ cplength += count;
+ datasize += count*sizeof(struct LO_eckd_data);
+ }
+ /* Allocate the ccw request. */
+ cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength, datasize,
+ startdev, blk_mq_rq_to_pdu(req));
+ if (IS_ERR(cqr))
+ return cqr;
+ ccw = cqr->cpaddr;
+ /* First ccw is define extent or prefix. */
+ if (use_prefix) {
+ if (prefix(ccw++, cqr->data, first_trk,
+ last_trk, cmd, basedev, startdev) == -EAGAIN) {
+ /* Clock not in sync and XRC is enabled.
+ * Try again later.
+ */
+ dasd_sfree_request(cqr, startdev);
+ return ERR_PTR(-EAGAIN);
+ }
+ idaws = (unsigned long *) (cqr->data +
+ sizeof(struct PFX_eckd_data));
+ } else {
+ if (define_extent(ccw++, cqr->data, first_trk,
+ last_trk, cmd, basedev, 0) == -EAGAIN) {
+ /* Clock not in sync and XRC is enabled.
+ * Try again later.
+ */
+ dasd_sfree_request(cqr, startdev);
+ return ERR_PTR(-EAGAIN);
+ }
+ idaws = (unsigned long *) (cqr->data +
+ sizeof(struct DE_eckd_data));
+ }
+ /* Build locate_record+read/write/ccws. */
+ LO_data = (struct LO_eckd_data *) (idaws + cidaw);
+ recid = first_rec;
+ if (private->uses_cdl == 0 || recid > 2*blk_per_trk) {
+ /* Only standard blocks so there is just one locate record. */
+ ccw[-1].flags |= CCW_FLAG_CC;
+ locate_record(ccw++, LO_data++, first_trk, first_offs + 1,
+ last_rec - recid + 1, cmd, basedev, blksize);
+ }
+ rq_for_each_segment(bv, req, iter) {
+ dst = bvec_virt(&bv);
+ if (dasd_page_cache) {
+ char *copy = kmem_cache_alloc(dasd_page_cache,
+ GFP_DMA | __GFP_NOWARN);
+ if (copy && rq_data_dir(req) == WRITE)
+ memcpy(copy + bv.bv_offset, dst, bv.bv_len);
+ if (copy)
+ dst = copy + bv.bv_offset;
+ }
+ for (off = 0; off < bv.bv_len; off += blksize) {
+ sector_t trkid = recid;
+ unsigned int recoffs = sector_div(trkid, blk_per_trk);
+ rcmd = cmd;
+ count = blksize;
+ /* Locate record for cdl special block ? */
+ if (private->uses_cdl && recid < 2*blk_per_trk) {
+ if (dasd_eckd_cdl_special(blk_per_trk, recid)){
+ rcmd |= 0x8;
+ count = dasd_eckd_cdl_reclen(recid);
+ if (count < blksize &&
+ rq_data_dir(req) == READ)
+ memset(dst + count, 0xe5,
+ blksize - count);
+ }
+ ccw[-1].flags |= CCW_FLAG_CC;
+ locate_record(ccw++, LO_data++,
+ trkid, recoffs + 1,
+ 1, rcmd, basedev, count);
+ }
+ /* Locate record for standard blocks ? */
+ if (private->uses_cdl && recid == 2*blk_per_trk) {
+ ccw[-1].flags |= CCW_FLAG_CC;
+ locate_record(ccw++, LO_data++,
+ trkid, recoffs + 1,
+ last_rec - recid + 1,
+ cmd, basedev, count);
+ }
+ /* Read/write ccw. */
+ ccw[-1].flags |= CCW_FLAG_CC;
+ ccw->cmd_code = rcmd;
+ ccw->count = count;
+ if (idal_is_needed(dst, blksize)) {
+ ccw->cda = (__u32)virt_to_phys(idaws);
+ ccw->flags = CCW_FLAG_IDA;
+ idaws = idal_create_words(idaws, dst, blksize);
+ } else {
+ ccw->cda = (__u32)virt_to_phys(dst);
+ ccw->flags = 0;
+ }
+ ccw++;
+ dst += blksize;
+ recid++;
+ }
+ }
+ if (blk_noretry_request(req) ||
+ block->base->features & DASD_FEATURE_FAILFAST)
+ set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
+ cqr->startdev = startdev;
+ cqr->memdev = startdev;
+ cqr->block = block;
+ cqr->expires = startdev->default_expires * HZ; /* default 5 minutes */
+ cqr->lpm = dasd_path_get_ppm(startdev);
+ cqr->retries = startdev->default_retries;
+ cqr->buildclk = get_tod_clock();
+ cqr->status = DASD_CQR_FILLED;
+
+ /* Set flags to suppress output for expected errors */
+ if (dasd_eckd_is_ese(basedev)) {
+ set_bit(DASD_CQR_SUPPRESS_FP, &cqr->flags);
+ set_bit(DASD_CQR_SUPPRESS_IL, &cqr->flags);
+ set_bit(DASD_CQR_SUPPRESS_NRF, &cqr->flags);
+ }
+
+ return cqr;
+}
+
+static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_track(
+ struct dasd_device *startdev,
+ struct dasd_block *block,
+ struct request *req,
+ sector_t first_rec,
+ sector_t last_rec,
+ sector_t first_trk,
+ sector_t last_trk,
+ unsigned int first_offs,
+ unsigned int last_offs,
+ unsigned int blk_per_trk,
+ unsigned int blksize)
+{
+ unsigned long *idaws;
+ struct dasd_ccw_req *cqr;
+ struct ccw1 *ccw;
+ struct req_iterator iter;
+ struct bio_vec bv;
+ char *dst, *idaw_dst;
+ unsigned int cidaw, cplength, datasize;
+ unsigned int tlf;
+ sector_t recid;
+ unsigned char cmd;
+ struct dasd_device *basedev;
+ unsigned int trkcount, count, count_to_trk_end;
+ unsigned int idaw_len, seg_len, part_len, len_to_track_end;
+ unsigned char new_track, end_idaw;
+ sector_t trkid;
+ unsigned int recoffs;
+
+ basedev = block->base;
+ if (rq_data_dir(req) == READ)
+ cmd = DASD_ECKD_CCW_READ_TRACK_DATA;
+ else if (rq_data_dir(req) == WRITE)
+ cmd = DASD_ECKD_CCW_WRITE_TRACK_DATA;
+ else
+ return ERR_PTR(-EINVAL);
+
+ /* Track based I/O needs IDAWs for each page, and not just for
+ * 64 bit addresses. We need additional idals for pages
+ * that get filled from two tracks, so we use the number
+ * of records as upper limit.
+ */
+ cidaw = last_rec - first_rec + 1;
+ trkcount = last_trk - first_trk + 1;
+
+ /* 1x prefix + one read/write ccw per track */
+ cplength = 1 + trkcount;
+
+ datasize = sizeof(struct PFX_eckd_data) + cidaw * sizeof(unsigned long);
+
+ /* Allocate the ccw request. */
+ cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength, datasize,
+ startdev, blk_mq_rq_to_pdu(req));
+ if (IS_ERR(cqr))
+ return cqr;
+ ccw = cqr->cpaddr;
+ /* transfer length factor: how many bytes to read from the last track */
+ if (first_trk == last_trk)
+ tlf = last_offs - first_offs + 1;
+ else
+ tlf = last_offs + 1;
+ tlf *= blksize;
+
+ if (prefix_LRE(ccw++, cqr->data, first_trk,
+ last_trk, cmd, basedev, startdev,
+ 1 /* format */, first_offs + 1,
+ trkcount, blksize,
+ tlf) == -EAGAIN) {
+ /* Clock not in sync and XRC is enabled.
+ * Try again later.
+ */
+ dasd_sfree_request(cqr, startdev);
+ return ERR_PTR(-EAGAIN);
+ }
+
+ /*
+ * The translation of request into ccw programs must meet the
+ * following conditions:
+ * - all idaws but the first and the last must address full pages
+ * (or 2K blocks on 31-bit)
+ * - the scope of a ccw and it's idal ends with the track boundaries
+ */
+ idaws = (unsigned long *) (cqr->data + sizeof(struct PFX_eckd_data));
+ recid = first_rec;
+ new_track = 1;
+ end_idaw = 0;
+ len_to_track_end = 0;
+ idaw_dst = NULL;
+ idaw_len = 0;
+ rq_for_each_segment(bv, req, iter) {
+ dst = bvec_virt(&bv);
+ seg_len = bv.bv_len;
+ while (seg_len) {
+ if (new_track) {
+ trkid = recid;
+ recoffs = sector_div(trkid, blk_per_trk);
+ count_to_trk_end = blk_per_trk - recoffs;
+ count = min((last_rec - recid + 1),
+ (sector_t)count_to_trk_end);
+ len_to_track_end = count * blksize;
+ ccw[-1].flags |= CCW_FLAG_CC;
+ ccw->cmd_code = cmd;
+ ccw->count = len_to_track_end;
+ ccw->cda = (__u32)virt_to_phys(idaws);
+ ccw->flags = CCW_FLAG_IDA;
+ ccw++;
+ recid += count;
+ new_track = 0;
+ /* first idaw for a ccw may start anywhere */
+ if (!idaw_dst)
+ idaw_dst = dst;
+ }
+ /* If we start a new idaw, we must make sure that it
+ * starts on an IDA_BLOCK_SIZE boundary.
+ * If we continue an idaw, we must make sure that the
+ * current segment begins where the so far accumulated
+ * idaw ends
+ */
+ if (!idaw_dst) {
+ if ((__u32)virt_to_phys(dst) & (IDA_BLOCK_SIZE - 1)) {
+ dasd_sfree_request(cqr, startdev);
+ return ERR_PTR(-ERANGE);
+ } else
+ idaw_dst = dst;
+ }
+ if ((idaw_dst + idaw_len) != dst) {
+ dasd_sfree_request(cqr, startdev);
+ return ERR_PTR(-ERANGE);
+ }
+ part_len = min(seg_len, len_to_track_end);
+ seg_len -= part_len;
+ dst += part_len;
+ idaw_len += part_len;
+ len_to_track_end -= part_len;
+ /* collected memory area ends on an IDA_BLOCK border,
+ * -> create an idaw
+ * idal_create_words will handle cases where idaw_len
+ * is larger then IDA_BLOCK_SIZE
+ */
+ if (!((__u32)virt_to_phys(idaw_dst + idaw_len) & (IDA_BLOCK_SIZE - 1)))
+ end_idaw = 1;
+ /* We also need to end the idaw at track end */
+ if (!len_to_track_end) {
+ new_track = 1;
+ end_idaw = 1;
+ }
+ if (end_idaw) {
+ idaws = idal_create_words(idaws, idaw_dst,
+ idaw_len);
+ idaw_dst = NULL;
+ idaw_len = 0;
+ end_idaw = 0;
+ }
+ }
+ }
+
+ if (blk_noretry_request(req) ||
+ block->base->features & DASD_FEATURE_FAILFAST)
+ set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
+ cqr->startdev = startdev;
+ cqr->memdev = startdev;
+ cqr->block = block;
+ cqr->expires = startdev->default_expires * HZ; /* default 5 minutes */
+ cqr->lpm = dasd_path_get_ppm(startdev);
+ cqr->retries = startdev->default_retries;
+ cqr->buildclk = get_tod_clock();
+ cqr->status = DASD_CQR_FILLED;
+
+ /* Set flags to suppress output for expected errors */
+ if (dasd_eckd_is_ese(basedev))
+ set_bit(DASD_CQR_SUPPRESS_NRF, &cqr->flags);
+
+ return cqr;
+}
+
+static int prepare_itcw(struct itcw *itcw,
+ unsigned int trk, unsigned int totrk, int cmd,
+ struct dasd_device *basedev,
+ struct dasd_device *startdev,
+ unsigned int rec_on_trk, int count,
+ unsigned int blksize,
+ unsigned int total_data_size,
+ unsigned int tlf,
+ unsigned int blk_per_trk)
+{
+ struct PFX_eckd_data pfxdata;
+ struct dasd_eckd_private *basepriv, *startpriv;
+ struct DE_eckd_data *dedata;
+ struct LRE_eckd_data *lredata;
+ struct dcw *dcw;
+
+ u32 begcyl, endcyl;
+ u16 heads, beghead, endhead;
+ u8 pfx_cmd;
+
+ int rc = 0;
+ int sector = 0;
+ int dn, d;
+
+
+ /* setup prefix data */
+ basepriv = basedev->private;
+ startpriv = startdev->private;
+ dedata = &pfxdata.define_extent;
+ lredata = &pfxdata.locate_record;
+
+ memset(&pfxdata, 0, sizeof(pfxdata));
+ pfxdata.format = 1; /* PFX with LRE */
+ pfxdata.base_address = basepriv->conf.ned->unit_addr;
+ pfxdata.base_lss = basepriv->conf.ned->ID;
+ pfxdata.validity.define_extent = 1;
+
+ /* private uid is kept up to date, conf_data may be outdated */
+ if (startpriv->uid.type == UA_BASE_PAV_ALIAS)
+ pfxdata.validity.verify_base = 1;
+
+ if (startpriv->uid.type == UA_HYPER_PAV_ALIAS) {
+ pfxdata.validity.verify_base = 1;
+ pfxdata.validity.hyper_pav = 1;
+ }
+
+ switch (cmd) {
+ case DASD_ECKD_CCW_READ_TRACK_DATA:
+ dedata->mask.perm = 0x1;
+ dedata->attributes.operation = basepriv->attrib.operation;
+ dedata->blk_size = blksize;
+ dedata->ga_extended |= 0x42;
+ lredata->operation.orientation = 0x0;
+ lredata->operation.operation = 0x0C;
+ lredata->auxiliary.check_bytes = 0x01;
+ pfx_cmd = DASD_ECKD_CCW_PFX_READ;
+ break;
+ case DASD_ECKD_CCW_WRITE_TRACK_DATA:
+ dedata->mask.perm = 0x02;
+ dedata->attributes.operation = basepriv->attrib.operation;
+ dedata->blk_size = blksize;
+ rc = set_timestamp(NULL, dedata, basedev);
+ dedata->ga_extended |= 0x42;
+ lredata->operation.orientation = 0x0;
+ lredata->operation.operation = 0x3F;
+ lredata->extended_operation = 0x23;
+ lredata->auxiliary.check_bytes = 0x2;
+ /*
+ * If XRC is supported the System Time Stamp is set. The
+ * validity of the time stamp must be reflected in the prefix
+ * data as well.
+ */
+ if (dedata->ga_extended & 0x08 && dedata->ga_extended & 0x02)
+ pfxdata.validity.time_stamp = 1; /* 'Time Stamp Valid' */
+ pfx_cmd = DASD_ECKD_CCW_PFX;
+ break;
+ case DASD_ECKD_CCW_READ_COUNT_MT:
+ dedata->mask.perm = 0x1;
+ dedata->attributes.operation = DASD_BYPASS_CACHE;
+ dedata->ga_extended |= 0x42;
+ dedata->blk_size = blksize;
+ lredata->operation.orientation = 0x2;
+ lredata->operation.operation = 0x16;
+ lredata->auxiliary.check_bytes = 0x01;
+ pfx_cmd = DASD_ECKD_CCW_PFX_READ;
+ break;
+ default:
+ DBF_DEV_EVENT(DBF_ERR, basedev,
+ "prepare itcw, unknown opcode 0x%x", cmd);
+ BUG();
+ break;
+ }
+ if (rc)
+ return rc;
+
+ dedata->attributes.mode = 0x3; /* ECKD */
+
+ heads = basepriv->rdc_data.trk_per_cyl;
+ begcyl = trk / heads;
+ beghead = trk % heads;
+ endcyl = totrk / heads;
+ endhead = totrk % heads;
+
+ /* check for sequential prestage - enhance cylinder range */
+ if (dedata->attributes.operation == DASD_SEQ_PRESTAGE ||
+ dedata->attributes.operation == DASD_SEQ_ACCESS) {
+
+ if (endcyl + basepriv->attrib.nr_cyl < basepriv->real_cyl)
+ endcyl += basepriv->attrib.nr_cyl;
+ else
+ endcyl = (basepriv->real_cyl - 1);
+ }
+
+ set_ch_t(&dedata->beg_ext, begcyl, beghead);
+ set_ch_t(&dedata->end_ext, endcyl, endhead);
+
+ dedata->ep_format = 0x20; /* records per track is valid */
+ dedata->ep_rec_per_track = blk_per_trk;
+
+ if (rec_on_trk) {
+ switch (basepriv->rdc_data.dev_type) {
+ case 0x3390:
+ dn = ceil_quot(blksize + 6, 232);
+ d = 9 + ceil_quot(blksize + 6 * (dn + 1), 34);
+ sector = (49 + (rec_on_trk - 1) * (10 + d)) / 8;
+ break;
+ case 0x3380:
+ d = 7 + ceil_quot(blksize + 12, 32);
+ sector = (39 + (rec_on_trk - 1) * (8 + d)) / 7;
+ break;
+ }
+ }
+
+ if (cmd == DASD_ECKD_CCW_READ_COUNT_MT) {
+ lredata->auxiliary.length_valid = 0;
+ lredata->auxiliary.length_scope = 0;
+ lredata->sector = 0xff;
+ } else {
+ lredata->auxiliary.length_valid = 1;
+ lredata->auxiliary.length_scope = 1;
+ lredata->sector = sector;
+ }
+ lredata->auxiliary.imbedded_ccw_valid = 1;
+ lredata->length = tlf;
+ lredata->imbedded_ccw = cmd;
+ lredata->count = count;
+ set_ch_t(&lredata->seek_addr, begcyl, beghead);
+ lredata->search_arg.cyl = lredata->seek_addr.cyl;
+ lredata->search_arg.head = lredata->seek_addr.head;
+ lredata->search_arg.record = rec_on_trk;
+
+ dcw = itcw_add_dcw(itcw, pfx_cmd, 0,
+ &pfxdata, sizeof(pfxdata), total_data_size);
+ return PTR_ERR_OR_ZERO(dcw);
+}
+
+static struct dasd_ccw_req *dasd_eckd_build_cp_tpm_track(
+ struct dasd_device *startdev,
+ struct dasd_block *block,
+ struct request *req,
+ sector_t first_rec,
+ sector_t last_rec,
+ sector_t first_trk,
+ sector_t last_trk,
+ unsigned int first_offs,
+ unsigned int last_offs,
+ unsigned int blk_per_trk,
+ unsigned int blksize)
+{
+ struct dasd_ccw_req *cqr;
+ struct req_iterator iter;
+ struct bio_vec bv;
+ char *dst;
+ unsigned int trkcount, ctidaw;
+ unsigned char cmd;
+ struct dasd_device *basedev;
+ unsigned int tlf;
+ struct itcw *itcw;
+ struct tidaw *last_tidaw = NULL;
+ int itcw_op;
+ size_t itcw_size;
+ u8 tidaw_flags;
+ unsigned int seg_len, part_len, len_to_track_end;
+ unsigned char new_track;
+ sector_t recid, trkid;
+ unsigned int offs;
+ unsigned int count, count_to_trk_end;
+ int ret;
+
+ basedev = block->base;
+ if (rq_data_dir(req) == READ) {
+ cmd = DASD_ECKD_CCW_READ_TRACK_DATA;
+ itcw_op = ITCW_OP_READ;
+ } else if (rq_data_dir(req) == WRITE) {
+ cmd = DASD_ECKD_CCW_WRITE_TRACK_DATA;
+ itcw_op = ITCW_OP_WRITE;
+ } else
+ return ERR_PTR(-EINVAL);
+
+ /* trackbased I/O needs address all memory via TIDAWs,
+ * not just for 64 bit addresses. This allows us to map
+ * each segment directly to one tidaw.
+ * In the case of write requests, additional tidaws may
+ * be needed when a segment crosses a track boundary.
+ */
+ trkcount = last_trk - first_trk + 1;
+ ctidaw = 0;
+ rq_for_each_segment(bv, req, iter) {
+ ++ctidaw;
+ }
+ if (rq_data_dir(req) == WRITE)
+ ctidaw += (last_trk - first_trk);
+
+ /* Allocate the ccw request. */
+ itcw_size = itcw_calc_size(0, ctidaw, 0);
+ cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 0, itcw_size, startdev,
+ blk_mq_rq_to_pdu(req));
+ if (IS_ERR(cqr))
+ return cqr;
+
+ /* transfer length factor: how many bytes to read from the last track */
+ if (first_trk == last_trk)
+ tlf = last_offs - first_offs + 1;
+ else
+ tlf = last_offs + 1;
+ tlf *= blksize;
+
+ itcw = itcw_init(cqr->data, itcw_size, itcw_op, 0, ctidaw, 0);
+ if (IS_ERR(itcw)) {
+ ret = -EINVAL;
+ goto out_error;
+ }
+ cqr->cpaddr = itcw_get_tcw(itcw);
+ if (prepare_itcw(itcw, first_trk, last_trk,
+ cmd, basedev, startdev,
+ first_offs + 1,
+ trkcount, blksize,
+ (last_rec - first_rec + 1) * blksize,
+ tlf, blk_per_trk) == -EAGAIN) {
+ /* Clock not in sync and XRC is enabled.
+ * Try again later.
+ */
+ ret = -EAGAIN;
+ goto out_error;
+ }
+ len_to_track_end = 0;
+ /*
+ * A tidaw can address 4k of memory, but must not cross page boundaries
+ * We can let the block layer handle this by setting
+ * blk_queue_segment_boundary to page boundaries and
+ * blk_max_segment_size to page size when setting up the request queue.
+ * For write requests, a TIDAW must not cross track boundaries, because
+ * we have to set the CBC flag on the last tidaw for each track.
+ */
+ if (rq_data_dir(req) == WRITE) {
+ new_track = 1;
+ recid = first_rec;
+ rq_for_each_segment(bv, req, iter) {
+ dst = bvec_virt(&bv);
+ seg_len = bv.bv_len;
+ while (seg_len) {
+ if (new_track) {
+ trkid = recid;
+ offs = sector_div(trkid, blk_per_trk);
+ count_to_trk_end = blk_per_trk - offs;
+ count = min((last_rec - recid + 1),
+ (sector_t)count_to_trk_end);
+ len_to_track_end = count * blksize;
+ recid += count;
+ new_track = 0;
+ }
+ part_len = min(seg_len, len_to_track_end);
+ seg_len -= part_len;
+ len_to_track_end -= part_len;
+ /* We need to end the tidaw at track end */
+ if (!len_to_track_end) {
+ new_track = 1;
+ tidaw_flags = TIDAW_FLAGS_INSERT_CBC;
+ } else
+ tidaw_flags = 0;
+ last_tidaw = itcw_add_tidaw(itcw, tidaw_flags,
+ dst, part_len);
+ if (IS_ERR(last_tidaw)) {
+ ret = -EINVAL;
+ goto out_error;
+ }
+ dst += part_len;
+ }
+ }
+ } else {
+ rq_for_each_segment(bv, req, iter) {
+ dst = bvec_virt(&bv);
+ last_tidaw = itcw_add_tidaw(itcw, 0x00,
+ dst, bv.bv_len);
+ if (IS_ERR(last_tidaw)) {
+ ret = -EINVAL;
+ goto out_error;
+ }
+ }
+ }
+ last_tidaw->flags |= TIDAW_FLAGS_LAST;
+ last_tidaw->flags &= ~TIDAW_FLAGS_INSERT_CBC;
+ itcw_finalize(itcw);
+
+ if (blk_noretry_request(req) ||
+ block->base->features & DASD_FEATURE_FAILFAST)
+ set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
+ cqr->cpmode = 1;
+ cqr->startdev = startdev;
+ cqr->memdev = startdev;
+ cqr->block = block;
+ cqr->expires = startdev->default_expires * HZ; /* default 5 minutes */
+ cqr->lpm = dasd_path_get_ppm(startdev);
+ cqr->retries = startdev->default_retries;
+ cqr->buildclk = get_tod_clock();
+ cqr->status = DASD_CQR_FILLED;
+
+ /* Set flags to suppress output for expected errors */
+ if (dasd_eckd_is_ese(basedev)) {
+ set_bit(DASD_CQR_SUPPRESS_FP, &cqr->flags);
+ set_bit(DASD_CQR_SUPPRESS_IL, &cqr->flags);
+ set_bit(DASD_CQR_SUPPRESS_NRF, &cqr->flags);
+ }
+
+ return cqr;
+out_error:
+ dasd_sfree_request(cqr, startdev);
+ return ERR_PTR(ret);
+}
+
+static struct dasd_ccw_req *dasd_eckd_build_cp(struct dasd_device *startdev,
+ struct dasd_block *block,
+ struct request *req)
+{
+ int cmdrtd, cmdwtd;
+ int use_prefix;
+ int fcx_multitrack;
+ struct dasd_eckd_private *private;
+ struct dasd_device *basedev;
+ sector_t first_rec, last_rec;
+ sector_t first_trk, last_trk;
+ unsigned int first_offs, last_offs;
+ unsigned int blk_per_trk, blksize;
+ int cdlspecial;
+ unsigned int data_size;
+ struct dasd_ccw_req *cqr;
+
+ basedev = block->base;
+ private = basedev->private;
+
+ /* Calculate number of blocks/records per track. */
+ blksize = block->bp_block;
+ blk_per_trk = recs_per_track(&private->rdc_data, 0, blksize);
+ if (blk_per_trk == 0)
+ return ERR_PTR(-EINVAL);
+ /* Calculate record id of first and last block. */
+ first_rec = first_trk = blk_rq_pos(req) >> block->s2b_shift;
+ first_offs = sector_div(first_trk, blk_per_trk);
+ last_rec = last_trk =
+ (blk_rq_pos(req) + blk_rq_sectors(req) - 1) >> block->s2b_shift;
+ last_offs = sector_div(last_trk, blk_per_trk);
+ cdlspecial = (private->uses_cdl && first_rec < 2*blk_per_trk);
+
+ fcx_multitrack = private->features.feature[40] & 0x20;
+ data_size = blk_rq_bytes(req);
+ if (data_size % blksize)
+ return ERR_PTR(-EINVAL);
+ /* tpm write request add CBC data on each track boundary */
+ if (rq_data_dir(req) == WRITE)
+ data_size += (last_trk - first_trk) * 4;
+
+ /* is read track data and write track data in command mode supported? */
+ cmdrtd = private->features.feature[9] & 0x20;
+ cmdwtd = private->features.feature[12] & 0x40;
+ use_prefix = private->features.feature[8] & 0x01;
+
+ cqr = NULL;
+ if (cdlspecial || dasd_page_cache) {
+ /* do nothing, just fall through to the cmd mode single case */
+ } else if ((data_size <= private->fcx_max_data)
+ && (fcx_multitrack || (first_trk == last_trk))) {
+ cqr = dasd_eckd_build_cp_tpm_track(startdev, block, req,
+ first_rec, last_rec,
+ first_trk, last_trk,
+ first_offs, last_offs,
+ blk_per_trk, blksize);
+ if (IS_ERR(cqr) && (PTR_ERR(cqr) != -EAGAIN) &&
+ (PTR_ERR(cqr) != -ENOMEM))
+ cqr = NULL;
+ } else if (use_prefix &&
+ (((rq_data_dir(req) == READ) && cmdrtd) ||
+ ((rq_data_dir(req) == WRITE) && cmdwtd))) {
+ cqr = dasd_eckd_build_cp_cmd_track(startdev, block, req,
+ first_rec, last_rec,
+ first_trk, last_trk,
+ first_offs, last_offs,
+ blk_per_trk, blksize);
+ if (IS_ERR(cqr) && (PTR_ERR(cqr) != -EAGAIN) &&
+ (PTR_ERR(cqr) != -ENOMEM))
+ cqr = NULL;
+ }
+ if (!cqr)
+ cqr = dasd_eckd_build_cp_cmd_single(startdev, block, req,
+ first_rec, last_rec,
+ first_trk, last_trk,
+ first_offs, last_offs,
+ blk_per_trk, blksize);
+ return cqr;
+}
+
+static struct dasd_ccw_req *dasd_eckd_build_cp_raw(struct dasd_device *startdev,
+ struct dasd_block *block,
+ struct request *req)
+{
+ sector_t start_padding_sectors, end_sector_offset, end_padding_sectors;
+ unsigned int seg_len, len_to_track_end;
+ unsigned int cidaw, cplength, datasize;
+ sector_t first_trk, last_trk, sectors;
+ struct dasd_eckd_private *base_priv;
+ struct dasd_device *basedev;
+ struct req_iterator iter;
+ struct dasd_ccw_req *cqr;
+ unsigned int trkcount;
+ unsigned long *idaws;
+ unsigned int size;
+ unsigned char cmd;
+ struct bio_vec bv;
+ struct ccw1 *ccw;
+ int use_prefix;
+ void *data;
+ char *dst;
+
+ /*
+ * raw track access needs to be mutiple of 64k and on 64k boundary
+ * For read requests we can fix an incorrect alignment by padding
+ * the request with dummy pages.
+ */
+ start_padding_sectors = blk_rq_pos(req) % DASD_RAW_SECTORS_PER_TRACK;
+ end_sector_offset = (blk_rq_pos(req) + blk_rq_sectors(req)) %
+ DASD_RAW_SECTORS_PER_TRACK;
+ end_padding_sectors = (DASD_RAW_SECTORS_PER_TRACK - end_sector_offset) %
+ DASD_RAW_SECTORS_PER_TRACK;
+ basedev = block->base;
+ if ((start_padding_sectors || end_padding_sectors) &&
+ (rq_data_dir(req) == WRITE)) {
+ DBF_DEV_EVENT(DBF_ERR, basedev,
+ "raw write not track aligned (%llu,%llu) req %p",
+ start_padding_sectors, end_padding_sectors, req);
+ return ERR_PTR(-EINVAL);
+ }
+
+ first_trk = blk_rq_pos(req) / DASD_RAW_SECTORS_PER_TRACK;
+ last_trk = (blk_rq_pos(req) + blk_rq_sectors(req) - 1) /
+ DASD_RAW_SECTORS_PER_TRACK;
+ trkcount = last_trk - first_trk + 1;
+
+ if (rq_data_dir(req) == READ)
+ cmd = DASD_ECKD_CCW_READ_TRACK;
+ else if (rq_data_dir(req) == WRITE)
+ cmd = DASD_ECKD_CCW_WRITE_FULL_TRACK;
+ else
+ return ERR_PTR(-EINVAL);
+
+ /*
+ * Raw track based I/O needs IDAWs for each page,
+ * and not just for 64 bit addresses.
+ */
+ cidaw = trkcount * DASD_RAW_BLOCK_PER_TRACK;
+
+ /*
+ * struct PFX_eckd_data and struct LRE_eckd_data can have up to 2 bytes
+ * of extended parameter. This is needed for write full track.
+ */
+ base_priv = basedev->private;
+ use_prefix = base_priv->features.feature[8] & 0x01;
+ if (use_prefix) {
+ cplength = 1 + trkcount;
+ size = sizeof(struct PFX_eckd_data) + 2;
+ } else {
+ cplength = 2 + trkcount;
+ size = sizeof(struct DE_eckd_data) +
+ sizeof(struct LRE_eckd_data) + 2;
+ }
+ size = ALIGN(size, 8);
+
+ datasize = size + cidaw * sizeof(unsigned long);
+
+ /* Allocate the ccw request. */
+ cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength,
+ datasize, startdev, blk_mq_rq_to_pdu(req));
+ if (IS_ERR(cqr))
+ return cqr;
+
+ ccw = cqr->cpaddr;
+ data = cqr->data;
+
+ if (use_prefix) {
+ prefix_LRE(ccw++, data, first_trk, last_trk, cmd, basedev,
+ startdev, 1, 0, trkcount, 0, 0);
+ } else {
+ define_extent(ccw++, data, first_trk, last_trk, cmd, basedev, 0);
+ ccw[-1].flags |= CCW_FLAG_CC;
+
+ data += sizeof(struct DE_eckd_data);
+ locate_record_ext(ccw++, data, first_trk, 0,
+ trkcount, cmd, basedev, 0, 0);
+ }
+
+ idaws = (unsigned long *)(cqr->data + size);
+ len_to_track_end = 0;
+ if (start_padding_sectors) {
+ ccw[-1].flags |= CCW_FLAG_CC;
+ ccw->cmd_code = cmd;
+ /* maximum 3390 track size */
+ ccw->count = 57326;
+ /* 64k map to one track */
+ len_to_track_end = 65536 - start_padding_sectors * 512;
+ ccw->cda = (__u32)virt_to_phys(idaws);
+ ccw->flags |= CCW_FLAG_IDA;
+ ccw->flags |= CCW_FLAG_SLI;
+ ccw++;
+ for (sectors = 0; sectors < start_padding_sectors; sectors += 8)
+ idaws = idal_create_words(idaws, rawpadpage, PAGE_SIZE);
+ }
+ rq_for_each_segment(bv, req, iter) {
+ dst = bvec_virt(&bv);
+ seg_len = bv.bv_len;
+ if (cmd == DASD_ECKD_CCW_READ_TRACK)
+ memset(dst, 0, seg_len);
+ if (!len_to_track_end) {
+ ccw[-1].flags |= CCW_FLAG_CC;
+ ccw->cmd_code = cmd;
+ /* maximum 3390 track size */
+ ccw->count = 57326;
+ /* 64k map to one track */
+ len_to_track_end = 65536;
+ ccw->cda = (__u32)virt_to_phys(idaws);
+ ccw->flags |= CCW_FLAG_IDA;
+ ccw->flags |= CCW_FLAG_SLI;
+ ccw++;
+ }
+ len_to_track_end -= seg_len;
+ idaws = idal_create_words(idaws, dst, seg_len);
+ }
+ for (sectors = 0; sectors < end_padding_sectors; sectors += 8)
+ idaws = idal_create_words(idaws, rawpadpage, PAGE_SIZE);
+ if (blk_noretry_request(req) ||
+ block->base->features & DASD_FEATURE_FAILFAST)
+ set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
+ cqr->startdev = startdev;
+ cqr->memdev = startdev;
+ cqr->block = block;
+ cqr->expires = startdev->default_expires * HZ;
+ cqr->lpm = dasd_path_get_ppm(startdev);
+ cqr->retries = startdev->default_retries;
+ cqr->buildclk = get_tod_clock();
+ cqr->status = DASD_CQR_FILLED;
+
+ return cqr;
+}
+
+
+static int
+dasd_eckd_free_cp(struct dasd_ccw_req *cqr, struct request *req)
+{
+ struct dasd_eckd_private *private;
+ struct ccw1 *ccw;
+ struct req_iterator iter;
+ struct bio_vec bv;
+ char *dst, *cda;
+ unsigned int blksize, blk_per_trk, off;
+ sector_t recid;
+ int status;
+
+ if (!dasd_page_cache)
+ goto out;
+ private = cqr->block->base->private;
+ blksize = cqr->block->bp_block;
+ blk_per_trk = recs_per_track(&private->rdc_data, 0, blksize);
+ recid = blk_rq_pos(req) >> cqr->block->s2b_shift;
+ ccw = cqr->cpaddr;
+ /* Skip over define extent & locate record. */
+ ccw++;
+ if (private->uses_cdl == 0 || recid > 2*blk_per_trk)
+ ccw++;
+ rq_for_each_segment(bv, req, iter) {
+ dst = bvec_virt(&bv);
+ for (off = 0; off < bv.bv_len; off += blksize) {
+ /* Skip locate record. */
+ if (private->uses_cdl && recid <= 2*blk_per_trk)
+ ccw++;
+ if (dst) {
+ if (ccw->flags & CCW_FLAG_IDA)
+ cda = *((char **)phys_to_virt(ccw->cda));
+ else
+ cda = phys_to_virt(ccw->cda);
+ if (dst != cda) {
+ if (rq_data_dir(req) == READ)
+ memcpy(dst, cda, bv.bv_len);
+ kmem_cache_free(dasd_page_cache,
+ (void *)((addr_t)cda & PAGE_MASK));
+ }
+ dst = NULL;
+ }
+ ccw++;
+ recid++;
+ }
+ }
+out:
+ status = cqr->status == DASD_CQR_DONE;
+ dasd_sfree_request(cqr, cqr->memdev);
+ return status;
+}
+
+/*
+ * Modify ccw/tcw in cqr so it can be started on a base device.
+ *
+ * Note that this is not enough to restart the cqr!
+ * Either reset cqr->startdev as well (summary unit check handling)
+ * or restart via separate cqr (as in ERP handling).
+ */
+void dasd_eckd_reset_ccw_to_base_io(struct dasd_ccw_req *cqr)
+{
+ struct ccw1 *ccw;
+ struct PFX_eckd_data *pfxdata;
+ struct tcw *tcw;
+ struct tccb *tccb;
+ struct dcw *dcw;
+
+ if (cqr->cpmode == 1) {
+ tcw = cqr->cpaddr;
+ tccb = tcw_get_tccb(tcw);
+ dcw = (struct dcw *)&tccb->tca[0];
+ pfxdata = (struct PFX_eckd_data *)&dcw->cd[0];
+ pfxdata->validity.verify_base = 0;
+ pfxdata->validity.hyper_pav = 0;
+ } else {
+ ccw = cqr->cpaddr;
+ pfxdata = cqr->data;
+ if (ccw->cmd_code == DASD_ECKD_CCW_PFX) {
+ pfxdata->validity.verify_base = 0;
+ pfxdata->validity.hyper_pav = 0;
+ }
+ }
+}
+
+#define DASD_ECKD_CHANQ_MAX_SIZE 4
+
+static struct dasd_ccw_req *dasd_eckd_build_alias_cp(struct dasd_device *base,
+ struct dasd_block *block,
+ struct request *req)
+{
+ struct dasd_eckd_private *private;
+ struct dasd_device *startdev;
+ unsigned long flags;
+ struct dasd_ccw_req *cqr;
+
+ startdev = dasd_alias_get_start_dev(base);
+ if (!startdev)
+ startdev = base;
+ private = startdev->private;
+ if (private->count >= DASD_ECKD_CHANQ_MAX_SIZE)
+ return ERR_PTR(-EBUSY);
+
+ spin_lock_irqsave(get_ccwdev_lock(startdev->cdev), flags);
+ private->count++;
+ if ((base->features & DASD_FEATURE_USERAW))
+ cqr = dasd_eckd_build_cp_raw(startdev, block, req);
+ else
+ cqr = dasd_eckd_build_cp(startdev, block, req);
+ if (IS_ERR(cqr))
+ private->count--;
+ spin_unlock_irqrestore(get_ccwdev_lock(startdev->cdev), flags);
+ return cqr;
+}
+
+static int dasd_eckd_free_alias_cp(struct dasd_ccw_req *cqr,
+ struct request *req)
+{
+ struct dasd_eckd_private *private;
+ unsigned long flags;
+
+ spin_lock_irqsave(get_ccwdev_lock(cqr->memdev->cdev), flags);
+ private = cqr->memdev->private;
+ private->count--;
+ spin_unlock_irqrestore(get_ccwdev_lock(cqr->memdev->cdev), flags);
+ return dasd_eckd_free_cp(cqr, req);
+}
+
+static int
+dasd_eckd_fill_info(struct dasd_device * device,
+ struct dasd_information2_t * info)
+{
+ struct dasd_eckd_private *private = device->private;
+
+ info->label_block = 2;
+ info->FBA_layout = private->uses_cdl ? 0 : 1;
+ info->format = private->uses_cdl ? DASD_FORMAT_CDL : DASD_FORMAT_LDL;
+ info->characteristics_size = sizeof(private->rdc_data);
+ memcpy(info->characteristics, &private->rdc_data,
+ sizeof(private->rdc_data));
+ info->confdata_size = min_t(unsigned long, private->conf.len,
+ sizeof(info->configuration_data));
+ memcpy(info->configuration_data, private->conf.data,
+ info->confdata_size);
+ return 0;
+}
+
+/*
+ * SECTION: ioctl functions for eckd devices.
+ */
+
+/*
+ * Release device ioctl.
+ * Buils a channel programm to releases a prior reserved
+ * (see dasd_eckd_reserve) device.
+ */
+static int
+dasd_eckd_release(struct dasd_device *device)
+{
+ struct dasd_ccw_req *cqr;
+ int rc;
+ struct ccw1 *ccw;
+ int useglobal;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EACCES;
+
+ useglobal = 0;
+ cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1, 32, device, NULL);
+ if (IS_ERR(cqr)) {
+ mutex_lock(&dasd_reserve_mutex);
+ useglobal = 1;
+ cqr = &dasd_reserve_req->cqr;
+ memset(cqr, 0, sizeof(*cqr));
+ memset(&dasd_reserve_req->ccw, 0,
+ sizeof(dasd_reserve_req->ccw));
+ cqr->cpaddr = &dasd_reserve_req->ccw;
+ cqr->data = &dasd_reserve_req->data;
+ cqr->magic = DASD_ECKD_MAGIC;
+ }
+ ccw = cqr->cpaddr;
+ ccw->cmd_code = DASD_ECKD_CCW_RELEASE;
+ ccw->flags |= CCW_FLAG_SLI;
+ ccw->count = 32;
+ ccw->cda = (__u32)virt_to_phys(cqr->data);
+ cqr->startdev = device;
+ cqr->memdev = device;
+ clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
+ set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
+ cqr->retries = 2; /* set retry counter to enable basic ERP */
+ cqr->expires = 2 * HZ;
+ cqr->buildclk = get_tod_clock();
+ cqr->status = DASD_CQR_FILLED;
+
+ rc = dasd_sleep_on_immediatly(cqr);
+ if (!rc)
+ clear_bit(DASD_FLAG_IS_RESERVED, &device->flags);
+
+ if (useglobal)
+ mutex_unlock(&dasd_reserve_mutex);
+ else
+ dasd_sfree_request(cqr, cqr->memdev);
+ return rc;
+}
+
+/*
+ * Reserve device ioctl.
+ * Options are set to 'synchronous wait for interrupt' and
+ * 'timeout the request'. This leads to a terminate IO if
+ * the interrupt is outstanding for a certain time.
+ */
+static int
+dasd_eckd_reserve(struct dasd_device *device)
+{
+ struct dasd_ccw_req *cqr;
+ int rc;
+ struct ccw1 *ccw;
+ int useglobal;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EACCES;
+
+ useglobal = 0;
+ cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1, 32, device, NULL);
+ if (IS_ERR(cqr)) {
+ mutex_lock(&dasd_reserve_mutex);
+ useglobal = 1;
+ cqr = &dasd_reserve_req->cqr;
+ memset(cqr, 0, sizeof(*cqr));
+ memset(&dasd_reserve_req->ccw, 0,
+ sizeof(dasd_reserve_req->ccw));
+ cqr->cpaddr = &dasd_reserve_req->ccw;
+ cqr->data = &dasd_reserve_req->data;
+ cqr->magic = DASD_ECKD_MAGIC;
+ }
+ ccw = cqr->cpaddr;
+ ccw->cmd_code = DASD_ECKD_CCW_RESERVE;
+ ccw->flags |= CCW_FLAG_SLI;
+ ccw->count = 32;
+ ccw->cda = (__u32)virt_to_phys(cqr->data);
+ cqr->startdev = device;
+ cqr->memdev = device;
+ clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
+ set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
+ cqr->retries = 2; /* set retry counter to enable basic ERP */
+ cqr->expires = 2 * HZ;
+ cqr->buildclk = get_tod_clock();
+ cqr->status = DASD_CQR_FILLED;
+
+ rc = dasd_sleep_on_immediatly(cqr);
+ if (!rc)
+ set_bit(DASD_FLAG_IS_RESERVED, &device->flags);
+
+ if (useglobal)
+ mutex_unlock(&dasd_reserve_mutex);
+ else
+ dasd_sfree_request(cqr, cqr->memdev);
+ return rc;
+}
+
+/*
+ * Steal lock ioctl - unconditional reserve device.
+ * Buils a channel programm to break a device's reservation.
+ * (unconditional reserve)
+ */
+static int
+dasd_eckd_steal_lock(struct dasd_device *device)
+{
+ struct dasd_ccw_req *cqr;
+ int rc;
+ struct ccw1 *ccw;
+ int useglobal;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EACCES;
+
+ useglobal = 0;
+ cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1, 32, device, NULL);
+ if (IS_ERR(cqr)) {
+ mutex_lock(&dasd_reserve_mutex);
+ useglobal = 1;
+ cqr = &dasd_reserve_req->cqr;
+ memset(cqr, 0, sizeof(*cqr));
+ memset(&dasd_reserve_req->ccw, 0,
+ sizeof(dasd_reserve_req->ccw));
+ cqr->cpaddr = &dasd_reserve_req->ccw;
+ cqr->data = &dasd_reserve_req->data;
+ cqr->magic = DASD_ECKD_MAGIC;
+ }
+ ccw = cqr->cpaddr;
+ ccw->cmd_code = DASD_ECKD_CCW_SLCK;
+ ccw->flags |= CCW_FLAG_SLI;
+ ccw->count = 32;
+ ccw->cda = (__u32)virt_to_phys(cqr->data);
+ cqr->startdev = device;
+ cqr->memdev = device;
+ clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
+ set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
+ cqr->retries = 2; /* set retry counter to enable basic ERP */
+ cqr->expires = 2 * HZ;
+ cqr->buildclk = get_tod_clock();
+ cqr->status = DASD_CQR_FILLED;
+
+ rc = dasd_sleep_on_immediatly(cqr);
+ if (!rc)
+ set_bit(DASD_FLAG_IS_RESERVED, &device->flags);
+
+ if (useglobal)
+ mutex_unlock(&dasd_reserve_mutex);
+ else
+ dasd_sfree_request(cqr, cqr->memdev);
+ return rc;
+}
+
+/*
+ * SNID - Sense Path Group ID
+ * This ioctl may be used in situations where I/O is stalled due to
+ * a reserve, so if the normal dasd_smalloc_request fails, we use the
+ * preallocated dasd_reserve_req.
+ */
+static int dasd_eckd_snid(struct dasd_device *device,
+ void __user *argp)
+{
+ struct dasd_ccw_req *cqr;
+ int rc;
+ struct ccw1 *ccw;
+ int useglobal;
+ struct dasd_snid_ioctl_data usrparm;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EACCES;
+
+ if (copy_from_user(&usrparm, argp, sizeof(usrparm)))
+ return -EFAULT;
+
+ useglobal = 0;
+ cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1,
+ sizeof(struct dasd_snid_data), device,
+ NULL);
+ if (IS_ERR(cqr)) {
+ mutex_lock(&dasd_reserve_mutex);
+ useglobal = 1;
+ cqr = &dasd_reserve_req->cqr;
+ memset(cqr, 0, sizeof(*cqr));
+ memset(&dasd_reserve_req->ccw, 0,
+ sizeof(dasd_reserve_req->ccw));
+ cqr->cpaddr = &dasd_reserve_req->ccw;
+ cqr->data = &dasd_reserve_req->data;
+ cqr->magic = DASD_ECKD_MAGIC;
+ }
+ ccw = cqr->cpaddr;
+ ccw->cmd_code = DASD_ECKD_CCW_SNID;
+ ccw->flags |= CCW_FLAG_SLI;
+ ccw->count = 12;
+ ccw->cda = (__u32)virt_to_phys(cqr->data);
+ cqr->startdev = device;
+ cqr->memdev = device;
+ clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
+ set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
+ set_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags);
+ cqr->retries = 5;
+ cqr->expires = 10 * HZ;
+ cqr->buildclk = get_tod_clock();
+ cqr->status = DASD_CQR_FILLED;
+ cqr->lpm = usrparm.path_mask;
+
+ rc = dasd_sleep_on_immediatly(cqr);
+ /* verify that I/O processing didn't modify the path mask */
+ if (!rc && usrparm.path_mask && (cqr->lpm != usrparm.path_mask))
+ rc = -EIO;
+ if (!rc) {
+ usrparm.data = *((struct dasd_snid_data *)cqr->data);
+ if (copy_to_user(argp, &usrparm, sizeof(usrparm)))
+ rc = -EFAULT;
+ }
+
+ if (useglobal)
+ mutex_unlock(&dasd_reserve_mutex);
+ else
+ dasd_sfree_request(cqr, cqr->memdev);
+ return rc;
+}
+
+/*
+ * Read performance statistics
+ */
+static int
+dasd_eckd_performance(struct dasd_device *device, void __user *argp)
+{
+ struct dasd_psf_prssd_data *prssdp;
+ struct dasd_rssd_perf_stats_t *stats;
+ struct dasd_ccw_req *cqr;
+ struct ccw1 *ccw;
+ int rc;
+
+ cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ + 1 /* RSSD */,
+ (sizeof(struct dasd_psf_prssd_data) +
+ sizeof(struct dasd_rssd_perf_stats_t)),
+ device, NULL);
+ if (IS_ERR(cqr)) {
+ DBF_DEV_EVENT(DBF_WARNING, device, "%s",
+ "Could not allocate initialization request");
+ return PTR_ERR(cqr);
+ }
+ cqr->startdev = device;
+ cqr->memdev = device;
+ cqr->retries = 0;
+ clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
+ cqr->expires = 10 * HZ;
+
+ /* Prepare for Read Subsystem Data */
+ prssdp = (struct dasd_psf_prssd_data *) cqr->data;
+ memset(prssdp, 0, sizeof(struct dasd_psf_prssd_data));
+ prssdp->order = PSF_ORDER_PRSSD;
+ prssdp->suborder = 0x01; /* Performance Statistics */
+ prssdp->varies[1] = 0x01; /* Perf Statistics for the Subsystem */
+
+ ccw = cqr->cpaddr;
+ ccw->cmd_code = DASD_ECKD_CCW_PSF;
+ ccw->count = sizeof(struct dasd_psf_prssd_data);
+ ccw->flags |= CCW_FLAG_CC;
+ ccw->cda = (__u32)virt_to_phys(prssdp);
+
+ /* Read Subsystem Data - Performance Statistics */
+ stats = (struct dasd_rssd_perf_stats_t *) (prssdp + 1);
+ memset(stats, 0, sizeof(struct dasd_rssd_perf_stats_t));
+
+ ccw++;
+ ccw->cmd_code = DASD_ECKD_CCW_RSSD;
+ ccw->count = sizeof(struct dasd_rssd_perf_stats_t);
+ ccw->cda = (__u32)virt_to_phys(stats);
+
+ cqr->buildclk = get_tod_clock();
+ cqr->status = DASD_CQR_FILLED;
+ rc = dasd_sleep_on(cqr);
+ if (rc == 0) {
+ prssdp = (struct dasd_psf_prssd_data *) cqr->data;
+ stats = (struct dasd_rssd_perf_stats_t *) (prssdp + 1);
+ if (copy_to_user(argp, stats,
+ sizeof(struct dasd_rssd_perf_stats_t)))
+ rc = -EFAULT;
+ }
+ dasd_sfree_request(cqr, cqr->memdev);
+ return rc;
+}
+
+/*
+ * Get attributes (cache operations)
+ * Returnes the cache attributes used in Define Extend (DE).
+ */
+static int
+dasd_eckd_get_attrib(struct dasd_device *device, void __user *argp)
+{
+ struct dasd_eckd_private *private = device->private;
+ struct attrib_data_t attrib = private->attrib;
+ int rc;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EACCES;
+ if (!argp)
+ return -EINVAL;
+
+ rc = 0;
+ if (copy_to_user(argp, (long *) &attrib,
+ sizeof(struct attrib_data_t)))
+ rc = -EFAULT;
+
+ return rc;
+}
+
+/*
+ * Set attributes (cache operations)
+ * Stores the attributes for cache operation to be used in Define Extend (DE).
+ */
+static int
+dasd_eckd_set_attrib(struct dasd_device *device, void __user *argp)
+{
+ struct dasd_eckd_private *private = device->private;
+ struct attrib_data_t attrib;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EACCES;
+ if (!argp)
+ return -EINVAL;
+
+ if (copy_from_user(&attrib, argp, sizeof(struct attrib_data_t)))
+ return -EFAULT;
+ private->attrib = attrib;
+
+ dev_info(&device->cdev->dev,
+ "The DASD cache mode was set to %x (%i cylinder prestage)\n",
+ private->attrib.operation, private->attrib.nr_cyl);
+ return 0;
+}
+
+/*
+ * Issue syscall I/O to EMC Symmetrix array.
+ * CCWs are PSF and RSSD
+ */
+static int dasd_symm_io(struct dasd_device *device, void __user *argp)
+{
+ struct dasd_symmio_parms usrparm;
+ char *psf_data, *rssd_result;
+ struct dasd_ccw_req *cqr;
+ struct ccw1 *ccw;
+ char psf0, psf1;
+ int rc;
+
+ if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RAWIO))
+ return -EACCES;
+ psf0 = psf1 = 0;
+
+ /* Copy parms from caller */
+ rc = -EFAULT;
+ if (copy_from_user(&usrparm, argp, sizeof(usrparm)))
+ goto out;
+ if (is_compat_task()) {
+ /* Make sure pointers are sane even on 31 bit. */
+ rc = -EINVAL;
+ if ((usrparm.psf_data >> 32) != 0)
+ goto out;
+ if ((usrparm.rssd_result >> 32) != 0)
+ goto out;
+ usrparm.psf_data &= 0x7fffffffULL;
+ usrparm.rssd_result &= 0x7fffffffULL;
+ }
+ /* at least 2 bytes are accessed and should be allocated */
+ if (usrparm.psf_data_len < 2) {
+ DBF_DEV_EVENT(DBF_WARNING, device,
+ "Symmetrix ioctl invalid data length %d",
+ usrparm.psf_data_len);
+ rc = -EINVAL;
+ goto out;
+ }
+ /* alloc I/O data area */
+ psf_data = kzalloc(usrparm.psf_data_len, GFP_KERNEL | GFP_DMA);
+ rssd_result = kzalloc(usrparm.rssd_result_len, GFP_KERNEL | GFP_DMA);
+ if (!psf_data || !rssd_result) {
+ rc = -ENOMEM;
+ goto out_free;
+ }
+
+ /* get syscall header from user space */
+ rc = -EFAULT;
+ if (copy_from_user(psf_data,
+ (void __user *)(unsigned long) usrparm.psf_data,
+ usrparm.psf_data_len))
+ goto out_free;
+ psf0 = psf_data[0];
+ psf1 = psf_data[1];
+
+ /* setup CCWs for PSF + RSSD */
+ cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 2, 0, device, NULL);
+ if (IS_ERR(cqr)) {
+ DBF_DEV_EVENT(DBF_WARNING, device, "%s",
+ "Could not allocate initialization request");
+ rc = PTR_ERR(cqr);
+ goto out_free;
+ }
+
+ cqr->startdev = device;
+ cqr->memdev = device;
+ cqr->retries = 3;
+ cqr->expires = 10 * HZ;
+ cqr->buildclk = get_tod_clock();
+ cqr->status = DASD_CQR_FILLED;
+
+ /* Build the ccws */
+ ccw = cqr->cpaddr;
+
+ /* PSF ccw */
+ ccw->cmd_code = DASD_ECKD_CCW_PSF;
+ ccw->count = usrparm.psf_data_len;
+ ccw->flags |= CCW_FLAG_CC;
+ ccw->cda = (__u32)virt_to_phys(psf_data);
+
+ ccw++;
+
+ /* RSSD ccw */
+ ccw->cmd_code = DASD_ECKD_CCW_RSSD;
+ ccw->count = usrparm.rssd_result_len;
+ ccw->flags = CCW_FLAG_SLI ;
+ ccw->cda = (__u32)virt_to_phys(rssd_result);
+
+ rc = dasd_sleep_on(cqr);
+ if (rc)
+ goto out_sfree;
+
+ rc = -EFAULT;
+ if (copy_to_user((void __user *)(unsigned long) usrparm.rssd_result,
+ rssd_result, usrparm.rssd_result_len))
+ goto out_sfree;
+ rc = 0;
+
+out_sfree:
+ dasd_sfree_request(cqr, cqr->memdev);
+out_free:
+ kfree(rssd_result);
+ kfree(psf_data);
+out:
+ DBF_DEV_EVENT(DBF_WARNING, device,
+ "Symmetrix ioctl (0x%02x 0x%02x): rc=%d",
+ (int) psf0, (int) psf1, rc);
+ return rc;
+}
+
+static int
+dasd_eckd_ioctl(struct dasd_block *block, unsigned int cmd, void __user *argp)
+{
+ struct dasd_device *device = block->base;
+
+ switch (cmd) {
+ case BIODASDGATTR:
+ return dasd_eckd_get_attrib(device, argp);
+ case BIODASDSATTR:
+ return dasd_eckd_set_attrib(device, argp);
+ case BIODASDPSRD:
+ return dasd_eckd_performance(device, argp);
+ case BIODASDRLSE:
+ return dasd_eckd_release(device);
+ case BIODASDRSRV:
+ return dasd_eckd_reserve(device);
+ case BIODASDSLCK:
+ return dasd_eckd_steal_lock(device);
+ case BIODASDSNID:
+ return dasd_eckd_snid(device, argp);
+ case BIODASDSYMMIO:
+ return dasd_symm_io(device, argp);
+ default:
+ return -ENOTTY;
+ }
+}
+
+/*
+ * Dump the range of CCWs into 'page' buffer
+ * and return number of printed chars.
+ */
+static void
+dasd_eckd_dump_ccw_range(struct ccw1 *from, struct ccw1 *to, char *page)
+{
+ int len, count;
+ char *datap;
+
+ len = 0;
+ while (from <= to) {
+ len += sprintf(page + len, PRINTK_HEADER
+ " CCW %p: %08X %08X DAT:",
+ from, ((int *) from)[0], ((int *) from)[1]);
+
+ /* get pointer to data (consider IDALs) */
+ if (from->flags & CCW_FLAG_IDA)
+ datap = (char *)*((addr_t *)phys_to_virt(from->cda));
+ else
+ datap = phys_to_virt(from->cda);
+
+ /* dump data (max 128 bytes) */
+ for (count = 0; count < from->count && count < 128; count++) {
+ if (count % 32 == 0)
+ len += sprintf(page + len, "\n");
+ if (count % 8 == 0)
+ len += sprintf(page + len, " ");
+ if (count % 4 == 0)
+ len += sprintf(page + len, " ");
+ len += sprintf(page + len, "%02x", datap[count]);
+ }
+ len += sprintf(page + len, "\n");
+ from++;
+ }
+ if (len > 0)
+ printk(KERN_ERR "%s", page);
+}
+
+static void
+dasd_eckd_dump_sense_dbf(struct dasd_device *device, struct irb *irb,
+ char *reason)
+{
+ u64 *sense;
+ u64 *stat;
+
+ sense = (u64 *) dasd_get_sense(irb);
+ stat = (u64 *) &irb->scsw;
+ if (sense) {
+ DBF_DEV_EVENT(DBF_EMERG, device, "%s: %016llx %08x : "
+ "%016llx %016llx %016llx %016llx",
+ reason, *stat, *((u32 *) (stat + 1)),
+ sense[0], sense[1], sense[2], sense[3]);
+ } else {
+ DBF_DEV_EVENT(DBF_EMERG, device, "%s: %016llx %08x : %s",
+ reason, *stat, *((u32 *) (stat + 1)),
+ "NO VALID SENSE");
+ }
+}
+
+/*
+ * Print sense data and related channel program.
+ * Parts are printed because printk buffer is only 1024 bytes.
+ */
+static void dasd_eckd_dump_sense_ccw(struct dasd_device *device,
+ struct dasd_ccw_req *req, struct irb *irb)
+{
+ char *page;
+ struct ccw1 *first, *last, *fail, *from, *to;
+ int len, sl, sct;
+
+ page = (char *) get_zeroed_page(GFP_ATOMIC);
+ if (page == NULL) {
+ DBF_DEV_EVENT(DBF_WARNING, device, "%s",
+ "No memory to dump sense data\n");
+ return;
+ }
+ /* dump the sense data */
+ len = sprintf(page, PRINTK_HEADER
+ " I/O status report for device %s:\n",
+ dev_name(&device->cdev->dev));
+ len += sprintf(page + len, PRINTK_HEADER
+ " in req: %p CC:%02X FC:%02X AC:%02X SC:%02X DS:%02X "
+ "CS:%02X RC:%d\n",
+ req, scsw_cc(&irb->scsw), scsw_fctl(&irb->scsw),
+ scsw_actl(&irb->scsw), scsw_stctl(&irb->scsw),
+ scsw_dstat(&irb->scsw), scsw_cstat(&irb->scsw),
+ req ? req->intrc : 0);
+ len += sprintf(page + len, PRINTK_HEADER
+ " device %s: Failing CCW: %p\n",
+ dev_name(&device->cdev->dev),
+ phys_to_virt(irb->scsw.cmd.cpa));
+ if (irb->esw.esw0.erw.cons) {
+ for (sl = 0; sl < 4; sl++) {
+ len += sprintf(page + len, PRINTK_HEADER
+ " Sense(hex) %2d-%2d:",
+ (8 * sl), ((8 * sl) + 7));
+
+ for (sct = 0; sct < 8; sct++) {
+ len += sprintf(page + len, " %02x",
+ irb->ecw[8 * sl + sct]);
+ }
+ len += sprintf(page + len, "\n");
+ }
+
+ if (irb->ecw[27] & DASD_SENSE_BIT_0) {
+ /* 24 Byte Sense Data */
+ sprintf(page + len, PRINTK_HEADER
+ " 24 Byte: %x MSG %x, "
+ "%s MSGb to SYSOP\n",
+ irb->ecw[7] >> 4, irb->ecw[7] & 0x0f,
+ irb->ecw[1] & 0x10 ? "" : "no");
+ } else {
+ /* 32 Byte Sense Data */
+ sprintf(page + len, PRINTK_HEADER
+ " 32 Byte: Format: %x "
+ "Exception class %x\n",
+ irb->ecw[6] & 0x0f, irb->ecw[22] >> 4);
+ }
+ } else {
+ sprintf(page + len, PRINTK_HEADER
+ " SORRY - NO VALID SENSE AVAILABLE\n");
+ }
+ printk(KERN_ERR "%s", page);
+
+ if (req) {
+ /* req == NULL for unsolicited interrupts */
+ /* dump the Channel Program (max 140 Bytes per line) */
+ /* Count CCW and print first CCWs (maximum 7) */
+ first = req->cpaddr;
+ for (last = first; last->flags & (CCW_FLAG_CC | CCW_FLAG_DC); last++);
+ to = min(first + 6, last);
+ printk(KERN_ERR PRINTK_HEADER " Related CP in req: %p\n", req);
+ dasd_eckd_dump_ccw_range(first, to, page);
+
+ /* print failing CCW area (maximum 4) */
+ /* scsw->cda is either valid or zero */
+ from = ++to;
+ fail = phys_to_virt(irb->scsw.cmd.cpa); /* failing CCW */
+ if (from < fail - 2) {
+ from = fail - 2; /* there is a gap - print header */
+ printk(KERN_ERR PRINTK_HEADER "......\n");
+ }
+ to = min(fail + 1, last);
+ dasd_eckd_dump_ccw_range(from, to, page + len);
+
+ /* print last CCWs (maximum 2) */
+ len = 0;
+ from = max(from, ++to);
+ if (from < last - 1) {
+ from = last - 1; /* there is a gap - print header */
+ printk(KERN_ERR PRINTK_HEADER "......\n");
+ }
+ dasd_eckd_dump_ccw_range(from, last, page + len);
+ }
+ free_page((unsigned long) page);
+}
+
+
+/*
+ * Print sense data from a tcw.
+ */
+static void dasd_eckd_dump_sense_tcw(struct dasd_device *device,
+ struct dasd_ccw_req *req, struct irb *irb)
+{
+ char *page;
+ int len, sl, sct, residual;
+ struct tsb *tsb;
+ u8 *sense, *rcq;
+
+ page = (char *) get_zeroed_page(GFP_ATOMIC);
+ if (page == NULL) {
+ DBF_DEV_EVENT(DBF_WARNING, device, " %s",
+ "No memory to dump sense data");
+ return;
+ }
+ /* dump the sense data */
+ len = sprintf(page, PRINTK_HEADER
+ " I/O status report for device %s:\n",
+ dev_name(&device->cdev->dev));
+ len += sprintf(page + len, PRINTK_HEADER
+ " in req: %p CC:%02X FC:%02X AC:%02X SC:%02X DS:%02X "
+ "CS:%02X fcxs:%02X schxs:%02X RC:%d\n",
+ req, scsw_cc(&irb->scsw), scsw_fctl(&irb->scsw),
+ scsw_actl(&irb->scsw), scsw_stctl(&irb->scsw),
+ scsw_dstat(&irb->scsw), scsw_cstat(&irb->scsw),
+ irb->scsw.tm.fcxs,
+ (irb->scsw.tm.ifob << 7) | irb->scsw.tm.sesq,
+ req ? req->intrc : 0);
+ len += sprintf(page + len, PRINTK_HEADER
+ " device %s: Failing TCW: %p\n",
+ dev_name(&device->cdev->dev),
+ phys_to_virt(irb->scsw.tm.tcw));
+
+ tsb = NULL;
+ sense = NULL;
+ if (irb->scsw.tm.tcw && (irb->scsw.tm.fcxs & 0x01))
+ tsb = tcw_get_tsb(phys_to_virt(irb->scsw.tm.tcw));
+
+ if (tsb) {
+ len += sprintf(page + len, PRINTK_HEADER
+ " tsb->length %d\n", tsb->length);
+ len += sprintf(page + len, PRINTK_HEADER
+ " tsb->flags %x\n", tsb->flags);
+ len += sprintf(page + len, PRINTK_HEADER
+ " tsb->dcw_offset %d\n", tsb->dcw_offset);
+ len += sprintf(page + len, PRINTK_HEADER
+ " tsb->count %d\n", tsb->count);
+ residual = tsb->count - 28;
+ len += sprintf(page + len, PRINTK_HEADER
+ " residual %d\n", residual);
+
+ switch (tsb->flags & 0x07) {
+ case 1: /* tsa_iostat */
+ len += sprintf(page + len, PRINTK_HEADER
+ " tsb->tsa.iostat.dev_time %d\n",
+ tsb->tsa.iostat.dev_time);
+ len += sprintf(page + len, PRINTK_HEADER
+ " tsb->tsa.iostat.def_time %d\n",
+ tsb->tsa.iostat.def_time);
+ len += sprintf(page + len, PRINTK_HEADER
+ " tsb->tsa.iostat.queue_time %d\n",
+ tsb->tsa.iostat.queue_time);
+ len += sprintf(page + len, PRINTK_HEADER
+ " tsb->tsa.iostat.dev_busy_time %d\n",
+ tsb->tsa.iostat.dev_busy_time);
+ len += sprintf(page + len, PRINTK_HEADER
+ " tsb->tsa.iostat.dev_act_time %d\n",
+ tsb->tsa.iostat.dev_act_time);
+ sense = tsb->tsa.iostat.sense;
+ break;
+ case 2: /* ts_ddpc */
+ len += sprintf(page + len, PRINTK_HEADER
+ " tsb->tsa.ddpc.rc %d\n", tsb->tsa.ddpc.rc);
+ for (sl = 0; sl < 2; sl++) {
+ len += sprintf(page + len, PRINTK_HEADER
+ " tsb->tsa.ddpc.rcq %2d-%2d: ",
+ (8 * sl), ((8 * sl) + 7));
+ rcq = tsb->tsa.ddpc.rcq;
+ for (sct = 0; sct < 8; sct++) {
+ len += sprintf(page + len, " %02x",
+ rcq[8 * sl + sct]);
+ }
+ len += sprintf(page + len, "\n");
+ }
+ sense = tsb->tsa.ddpc.sense;
+ break;
+ case 3: /* tsa_intrg */
+ len += sprintf(page + len, PRINTK_HEADER
+ " tsb->tsa.intrg.: not supported yet\n");
+ break;
+ }
+
+ if (sense) {
+ for (sl = 0; sl < 4; sl++) {
+ len += sprintf(page + len, PRINTK_HEADER
+ " Sense(hex) %2d-%2d:",
+ (8 * sl), ((8 * sl) + 7));
+ for (sct = 0; sct < 8; sct++) {
+ len += sprintf(page + len, " %02x",
+ sense[8 * sl + sct]);
+ }
+ len += sprintf(page + len, "\n");
+ }
+
+ if (sense[27] & DASD_SENSE_BIT_0) {
+ /* 24 Byte Sense Data */
+ sprintf(page + len, PRINTK_HEADER
+ " 24 Byte: %x MSG %x, "
+ "%s MSGb to SYSOP\n",
+ sense[7] >> 4, sense[7] & 0x0f,
+ sense[1] & 0x10 ? "" : "no");
+ } else {
+ /* 32 Byte Sense Data */
+ sprintf(page + len, PRINTK_HEADER
+ " 32 Byte: Format: %x "
+ "Exception class %x\n",
+ sense[6] & 0x0f, sense[22] >> 4);
+ }
+ } else {
+ sprintf(page + len, PRINTK_HEADER
+ " SORRY - NO VALID SENSE AVAILABLE\n");
+ }
+ } else {
+ sprintf(page + len, PRINTK_HEADER
+ " SORRY - NO TSB DATA AVAILABLE\n");
+ }
+ printk(KERN_ERR "%s", page);
+ free_page((unsigned long) page);
+}
+
+static void dasd_eckd_dump_sense(struct dasd_device *device,
+ struct dasd_ccw_req *req, struct irb *irb)
+{
+ u8 *sense = dasd_get_sense(irb);
+
+ if (scsw_is_tm(&irb->scsw)) {
+ /*
+ * In some cases the 'File Protected' or 'Incorrect Length'
+ * error might be expected and log messages shouldn't be written
+ * then. Check if the according suppress bit is set.
+ */
+ if (sense && (sense[1] & SNS1_FILE_PROTECTED) &&
+ test_bit(DASD_CQR_SUPPRESS_FP, &req->flags))
+ return;
+ if (scsw_cstat(&irb->scsw) == 0x40 &&
+ test_bit(DASD_CQR_SUPPRESS_IL, &req->flags))
+ return;
+
+ dasd_eckd_dump_sense_tcw(device, req, irb);
+ } else {
+ /*
+ * In some cases the 'Command Reject' or 'No Record Found'
+ * error might be expected and log messages shouldn't be
+ * written then. Check if the according suppress bit is set.
+ */
+ if (sense && sense[0] & SNS0_CMD_REJECT &&
+ test_bit(DASD_CQR_SUPPRESS_CR, &req->flags))
+ return;
+
+ if (sense && sense[1] & SNS1_NO_REC_FOUND &&
+ test_bit(DASD_CQR_SUPPRESS_NRF, &req->flags))
+ return;
+
+ dasd_eckd_dump_sense_ccw(device, req, irb);
+ }
+}
+
+static int dasd_eckd_reload_device(struct dasd_device *device)
+{
+ struct dasd_eckd_private *private = device->private;
+ char print_uid[DASD_UID_STRLEN];
+ int rc, old_base;
+ struct dasd_uid uid;
+ unsigned long flags;
+
+ /*
+ * remove device from alias handling to prevent new requests
+ * from being scheduled on the wrong alias device
+ */
+ dasd_alias_remove_device(device);
+
+ spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
+ old_base = private->uid.base_unit_addr;
+ spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
+
+ /* Read Configuration Data */
+ rc = dasd_eckd_read_conf(device);
+ if (rc)
+ goto out_err;
+
+ dasd_eckd_read_fc_security(device);
+
+ rc = dasd_eckd_generate_uid(device);
+ if (rc)
+ goto out_err;
+ /*
+ * update unit address configuration and
+ * add device to alias management
+ */
+ dasd_alias_update_add_device(device);
+
+ dasd_eckd_get_uid(device, &uid);
+
+ if (old_base != uid.base_unit_addr) {
+ dasd_eckd_get_uid_string(&private->conf, print_uid);
+ dev_info(&device->cdev->dev,
+ "An Alias device was reassigned to a new base device "
+ "with UID: %s\n", print_uid);
+ }
+ return 0;
+
+out_err:
+ return -1;
+}
+
+static int dasd_eckd_read_message_buffer(struct dasd_device *device,
+ struct dasd_rssd_messages *messages,
+ __u8 lpum)
+{
+ struct dasd_rssd_messages *message_buf;
+ struct dasd_psf_prssd_data *prssdp;
+ struct dasd_ccw_req *cqr;
+ struct ccw1 *ccw;
+ int rc;
+
+ cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ + 1 /* RSSD */,
+ (sizeof(struct dasd_psf_prssd_data) +
+ sizeof(struct dasd_rssd_messages)),
+ device, NULL);
+ if (IS_ERR(cqr)) {
+ DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
+ "Could not allocate read message buffer request");
+ return PTR_ERR(cqr);
+ }
+
+ cqr->lpm = lpum;
+retry:
+ cqr->startdev = device;
+ cqr->memdev = device;
+ cqr->block = NULL;
+ cqr->expires = 10 * HZ;
+ set_bit(DASD_CQR_VERIFY_PATH, &cqr->flags);
+ /* dasd_sleep_on_immediatly does not do complex error
+ * recovery so clear erp flag and set retry counter to
+ * do basic erp */
+ clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
+ cqr->retries = 256;
+
+ /* Prepare for Read Subsystem Data */
+ prssdp = (struct dasd_psf_prssd_data *) cqr->data;
+ memset(prssdp, 0, sizeof(struct dasd_psf_prssd_data));
+ prssdp->order = PSF_ORDER_PRSSD;
+ prssdp->suborder = 0x03; /* Message Buffer */
+ /* all other bytes of prssdp must be zero */
+
+ ccw = cqr->cpaddr;
+ ccw->cmd_code = DASD_ECKD_CCW_PSF;
+ ccw->count = sizeof(struct dasd_psf_prssd_data);
+ ccw->flags |= CCW_FLAG_CC;
+ ccw->flags |= CCW_FLAG_SLI;
+ ccw->cda = (__u32)virt_to_phys(prssdp);
+
+ /* Read Subsystem Data - message buffer */
+ message_buf = (struct dasd_rssd_messages *) (prssdp + 1);
+ memset(message_buf, 0, sizeof(struct dasd_rssd_messages));
+
+ ccw++;
+ ccw->cmd_code = DASD_ECKD_CCW_RSSD;
+ ccw->count = sizeof(struct dasd_rssd_messages);
+ ccw->flags |= CCW_FLAG_SLI;
+ ccw->cda = (__u32)virt_to_phys(message_buf);
+
+ cqr->buildclk = get_tod_clock();
+ cqr->status = DASD_CQR_FILLED;
+ rc = dasd_sleep_on_immediatly(cqr);
+ if (rc == 0) {
+ prssdp = (struct dasd_psf_prssd_data *) cqr->data;
+ message_buf = (struct dasd_rssd_messages *)
+ (prssdp + 1);
+ memcpy(messages, message_buf,
+ sizeof(struct dasd_rssd_messages));
+ } else if (cqr->lpm) {
+ /*
+ * on z/VM we might not be able to do I/O on the requested path
+ * but instead we get the required information on any path
+ * so retry with open path mask
+ */
+ cqr->lpm = 0;
+ goto retry;
+ } else
+ DBF_EVENT_DEVID(DBF_WARNING, device->cdev,
+ "Reading messages failed with rc=%d\n"
+ , rc);
+ dasd_sfree_request(cqr, cqr->memdev);
+ return rc;
+}
+
+static int dasd_eckd_query_host_access(struct dasd_device *device,
+ struct dasd_psf_query_host_access *data)
+{
+ struct dasd_eckd_private *private = device->private;
+ struct dasd_psf_query_host_access *host_access;
+ struct dasd_psf_prssd_data *prssdp;
+ struct dasd_ccw_req *cqr;
+ struct ccw1 *ccw;
+ int rc;
+
+ /* not available for HYPER PAV alias devices */
+ if (!device->block && private->lcu->pav == HYPER_PAV)
+ return -EOPNOTSUPP;
+
+ /* may not be supported by the storage server */
+ if (!(private->features.feature[14] & 0x80))
+ return -EOPNOTSUPP;
+
+ cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ + 1 /* RSSD */,
+ sizeof(struct dasd_psf_prssd_data) + 1,
+ device, NULL);
+ if (IS_ERR(cqr)) {
+ DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
+ "Could not allocate read message buffer request");
+ return PTR_ERR(cqr);
+ }
+ host_access = kzalloc(sizeof(*host_access), GFP_KERNEL | GFP_DMA);
+ if (!host_access) {
+ dasd_sfree_request(cqr, device);
+ DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
+ "Could not allocate host_access buffer");
+ return -ENOMEM;
+ }
+ cqr->startdev = device;
+ cqr->memdev = device;
+ cqr->block = NULL;
+ cqr->retries = 256;
+ cqr->expires = 10 * HZ;
+
+ /* Prepare for Read Subsystem Data */
+ prssdp = (struct dasd_psf_prssd_data *) cqr->data;
+ memset(prssdp, 0, sizeof(struct dasd_psf_prssd_data));
+ prssdp->order = PSF_ORDER_PRSSD;
+ prssdp->suborder = PSF_SUBORDER_QHA; /* query host access */
+ /* LSS and Volume that will be queried */
+ prssdp->lss = private->conf.ned->ID;
+ prssdp->volume = private->conf.ned->unit_addr;
+ /* all other bytes of prssdp must be zero */
+
+ ccw = cqr->cpaddr;
+ ccw->cmd_code = DASD_ECKD_CCW_PSF;
+ ccw->count = sizeof(struct dasd_psf_prssd_data);
+ ccw->flags |= CCW_FLAG_CC;
+ ccw->flags |= CCW_FLAG_SLI;
+ ccw->cda = (__u32)virt_to_phys(prssdp);
+
+ /* Read Subsystem Data - query host access */
+ ccw++;
+ ccw->cmd_code = DASD_ECKD_CCW_RSSD;
+ ccw->count = sizeof(struct dasd_psf_query_host_access);
+ ccw->flags |= CCW_FLAG_SLI;
+ ccw->cda = (__u32)virt_to_phys(host_access);
+
+ cqr->buildclk = get_tod_clock();
+ cqr->status = DASD_CQR_FILLED;
+ /* the command might not be supported, suppress error message */
+ __set_bit(DASD_CQR_SUPPRESS_CR, &cqr->flags);
+ rc = dasd_sleep_on_interruptible(cqr);
+ if (rc == 0) {
+ *data = *host_access;
+ } else {
+ DBF_EVENT_DEVID(DBF_WARNING, device->cdev,
+ "Reading host access data failed with rc=%d\n",
+ rc);
+ rc = -EOPNOTSUPP;
+ }
+
+ dasd_sfree_request(cqr, cqr->memdev);
+ kfree(host_access);
+ return rc;
+}
+/*
+ * return number of grouped devices
+ */
+static int dasd_eckd_host_access_count(struct dasd_device *device)
+{
+ struct dasd_psf_query_host_access *access;
+ struct dasd_ckd_path_group_entry *entry;
+ struct dasd_ckd_host_information *info;
+ int count = 0;
+ int rc, i;
+
+ access = kzalloc(sizeof(*access), GFP_NOIO);
+ if (!access) {
+ DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
+ "Could not allocate access buffer");
+ return -ENOMEM;
+ }
+ rc = dasd_eckd_query_host_access(device, access);
+ if (rc) {
+ kfree(access);
+ return rc;
+ }
+
+ info = (struct dasd_ckd_host_information *)
+ access->host_access_information;
+ for (i = 0; i < info->entry_count; i++) {
+ entry = (struct dasd_ckd_path_group_entry *)
+ (info->entry + i * info->entry_size);
+ if (entry->status_flags & DASD_ECKD_PG_GROUPED)
+ count++;
+ }
+
+ kfree(access);
+ return count;
+}
+
+/*
+ * write host access information to a sequential file
+ */
+static int dasd_hosts_print(struct dasd_device *device, struct seq_file *m)
+{
+ struct dasd_psf_query_host_access *access;
+ struct dasd_ckd_path_group_entry *entry;
+ struct dasd_ckd_host_information *info;
+ char sysplex[9] = "";
+ int rc, i;
+
+ access = kzalloc(sizeof(*access), GFP_NOIO);
+ if (!access) {
+ DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
+ "Could not allocate access buffer");
+ return -ENOMEM;
+ }
+ rc = dasd_eckd_query_host_access(device, access);
+ if (rc) {
+ kfree(access);
+ return rc;
+ }
+
+ info = (struct dasd_ckd_host_information *)
+ access->host_access_information;
+ for (i = 0; i < info->entry_count; i++) {
+ entry = (struct dasd_ckd_path_group_entry *)
+ (info->entry + i * info->entry_size);
+ /* PGID */
+ seq_printf(m, "pgid %*phN\n", 11, entry->pgid);
+ /* FLAGS */
+ seq_printf(m, "status_flags %02x\n", entry->status_flags);
+ /* SYSPLEX NAME */
+ memcpy(&sysplex, &entry->sysplex_name, sizeof(sysplex) - 1);
+ EBCASC(sysplex, sizeof(sysplex));
+ seq_printf(m, "sysplex_name %8s\n", sysplex);
+ /* SUPPORTED CYLINDER */
+ seq_printf(m, "supported_cylinder %d\n", entry->cylinder);
+ /* TIMESTAMP */
+ seq_printf(m, "timestamp %lu\n", (unsigned long)
+ entry->timestamp);
+ }
+ kfree(access);
+
+ return 0;
+}
+
+static struct dasd_device
+*copy_relation_find_device(struct dasd_copy_relation *copy,
+ char *busid)
+{
+ int i;
+
+ for (i = 0; i < DASD_CP_ENTRIES; i++) {
+ if (copy->entry[i].configured &&
+ strncmp(copy->entry[i].busid, busid, DASD_BUS_ID_SIZE) == 0)
+ return copy->entry[i].device;
+ }
+ return NULL;
+}
+
+/*
+ * set the new active/primary device
+ */
+static void copy_pair_set_active(struct dasd_copy_relation *copy, char *new_busid,
+ char *old_busid)
+{
+ int i;
+
+ for (i = 0; i < DASD_CP_ENTRIES; i++) {
+ if (copy->entry[i].configured &&
+ strncmp(copy->entry[i].busid, new_busid,
+ DASD_BUS_ID_SIZE) == 0) {
+ copy->active = &copy->entry[i];
+ copy->entry[i].primary = true;
+ } else if (copy->entry[i].configured &&
+ strncmp(copy->entry[i].busid, old_busid,
+ DASD_BUS_ID_SIZE) == 0) {
+ copy->entry[i].primary = false;
+ }
+ }
+}
+
+/*
+ * The function will swap the role of a given copy pair.
+ * During the swap operation the relation of the blockdevice is disconnected
+ * from the old primary and connected to the new.
+ *
+ * IO is paused on the block queue before swap and may be resumed afterwards.
+ */
+static int dasd_eckd_copy_pair_swap(struct dasd_device *device, char *prim_busid,
+ char *sec_busid)
+{
+ struct dasd_device *primary, *secondary;
+ struct dasd_copy_relation *copy;
+ struct dasd_block *block;
+ struct gendisk *gdp;
+
+ copy = device->copy;
+ if (!copy)
+ return DASD_COPYPAIRSWAP_INVALID;
+ primary = copy->active->device;
+ if (!primary)
+ return DASD_COPYPAIRSWAP_INVALID;
+ /* double check if swap has correct primary */
+ if (strncmp(dev_name(&primary->cdev->dev), prim_busid, DASD_BUS_ID_SIZE) != 0)
+ return DASD_COPYPAIRSWAP_PRIMARY;
+
+ secondary = copy_relation_find_device(copy, sec_busid);
+ if (!secondary)
+ return DASD_COPYPAIRSWAP_SECONDARY;
+
+ /*
+ * usually the device should be quiesced for swap
+ * for paranoia stop device and requeue requests again
+ */
+ dasd_device_set_stop_bits(primary, DASD_STOPPED_PPRC);
+ dasd_device_set_stop_bits(secondary, DASD_STOPPED_PPRC);
+ dasd_generic_requeue_all_requests(primary);
+
+ /* swap DASD internal device <> block assignment */
+ block = primary->block;
+ primary->block = NULL;
+ secondary->block = block;
+ block->base = secondary;
+ /* set new primary device in COPY relation */
+ copy_pair_set_active(copy, sec_busid, prim_busid);
+
+ /* swap blocklayer device link */
+ gdp = block->gdp;
+ dasd_add_link_to_gendisk(gdp, secondary);
+
+ /* re-enable device */
+ dasd_device_remove_stop_bits(primary, DASD_STOPPED_PPRC);
+ dasd_device_remove_stop_bits(secondary, DASD_STOPPED_PPRC);
+ dasd_schedule_device_bh(secondary);
+
+ return DASD_COPYPAIRSWAP_SUCCESS;
+}
+
+/*
+ * Perform Subsystem Function - Peer-to-Peer Remote Copy Extended Query
+ */
+static int dasd_eckd_query_pprc_status(struct dasd_device *device,
+ struct dasd_pprc_data_sc4 *data)
+{
+ struct dasd_pprc_data_sc4 *pprc_data;
+ struct dasd_psf_prssd_data *prssdp;
+ struct dasd_ccw_req *cqr;
+ struct ccw1 *ccw;
+ int rc;
+
+ cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ + 1 /* RSSD */,
+ sizeof(*prssdp) + sizeof(*pprc_data) + 1,
+ device, NULL);
+ if (IS_ERR(cqr)) {
+ DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
+ "Could not allocate query PPRC status request");
+ return PTR_ERR(cqr);
+ }
+ cqr->startdev = device;
+ cqr->memdev = device;
+ cqr->block = NULL;
+ cqr->retries = 256;
+ cqr->expires = 10 * HZ;
+
+ /* Prepare for Read Subsystem Data */
+ prssdp = (struct dasd_psf_prssd_data *)cqr->data;
+ memset(prssdp, 0, sizeof(struct dasd_psf_prssd_data));
+ prssdp->order = PSF_ORDER_PRSSD;
+ prssdp->suborder = PSF_SUBORDER_PPRCEQ;
+ prssdp->varies[0] = PPRCEQ_SCOPE_4;
+ pprc_data = (struct dasd_pprc_data_sc4 *)(prssdp + 1);
+
+ ccw = cqr->cpaddr;
+ ccw->cmd_code = DASD_ECKD_CCW_PSF;
+ ccw->count = sizeof(struct dasd_psf_prssd_data);
+ ccw->flags |= CCW_FLAG_CC;
+ ccw->flags |= CCW_FLAG_SLI;
+ ccw->cda = (__u32)(addr_t)prssdp;
+
+ /* Read Subsystem Data - query host access */
+ ccw++;
+ ccw->cmd_code = DASD_ECKD_CCW_RSSD;
+ ccw->count = sizeof(*pprc_data);
+ ccw->flags |= CCW_FLAG_SLI;
+ ccw->cda = (__u32)(addr_t)pprc_data;
+
+ cqr->buildclk = get_tod_clock();
+ cqr->status = DASD_CQR_FILLED;
+
+ rc = dasd_sleep_on_interruptible(cqr);
+ if (rc == 0) {
+ *data = *pprc_data;
+ } else {
+ DBF_EVENT_DEVID(DBF_WARNING, device->cdev,
+ "PPRC Extended Query failed with rc=%d\n",
+ rc);
+ rc = -EOPNOTSUPP;
+ }
+
+ dasd_sfree_request(cqr, cqr->memdev);
+ return rc;
+}
+
+/*
+ * ECKD NOP - no operation
+ */
+static int dasd_eckd_nop(struct dasd_device *device)
+{
+ struct dasd_ccw_req *cqr;
+ struct ccw1 *ccw;
+ int rc;
+
+ cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1, 1, device, NULL);
+ if (IS_ERR(cqr)) {
+ DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
+ "Could not allocate NOP request");
+ return PTR_ERR(cqr);
+ }
+ cqr->startdev = device;
+ cqr->memdev = device;
+ cqr->block = NULL;
+ cqr->retries = 1;
+ cqr->expires = 10 * HZ;
+
+ ccw = cqr->cpaddr;
+ ccw->cmd_code = DASD_ECKD_CCW_NOP;
+ ccw->flags |= CCW_FLAG_SLI;
+
+ cqr->buildclk = get_tod_clock();
+ cqr->status = DASD_CQR_FILLED;
+
+ rc = dasd_sleep_on_interruptible(cqr);
+ if (rc != 0) {
+ DBF_EVENT_DEVID(DBF_WARNING, device->cdev,
+ "NOP failed with rc=%d\n", rc);
+ rc = -EOPNOTSUPP;
+ }
+ dasd_sfree_request(cqr, cqr->memdev);
+ return rc;
+}
+
+static int dasd_eckd_device_ping(struct dasd_device *device)
+{
+ return dasd_eckd_nop(device);
+}
+
+/*
+ * Perform Subsystem Function - CUIR response
+ */
+static int
+dasd_eckd_psf_cuir_response(struct dasd_device *device, int response,
+ __u32 message_id, __u8 lpum)
+{
+ struct dasd_psf_cuir_response *psf_cuir;
+ int pos = pathmask_to_pos(lpum);
+ struct dasd_ccw_req *cqr;
+ struct ccw1 *ccw;
+ int rc;
+
+ cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ ,
+ sizeof(struct dasd_psf_cuir_response),
+ device, NULL);
+
+ if (IS_ERR(cqr)) {
+ DBF_DEV_EVENT(DBF_WARNING, device, "%s",
+ "Could not allocate PSF-CUIR request");
+ return PTR_ERR(cqr);
+ }
+
+ psf_cuir = (struct dasd_psf_cuir_response *)cqr->data;
+ psf_cuir->order = PSF_ORDER_CUIR_RESPONSE;
+ psf_cuir->cc = response;
+ psf_cuir->chpid = device->path[pos].chpid;
+ psf_cuir->message_id = message_id;
+ psf_cuir->cssid = device->path[pos].cssid;
+ psf_cuir->ssid = device->path[pos].ssid;
+ ccw = cqr->cpaddr;
+ ccw->cmd_code = DASD_ECKD_CCW_PSF;
+ ccw->cda = (__u32)virt_to_phys(psf_cuir);
+ ccw->flags = CCW_FLAG_SLI;
+ ccw->count = sizeof(struct dasd_psf_cuir_response);
+
+ cqr->startdev = device;
+ cqr->memdev = device;
+ cqr->block = NULL;
+ cqr->retries = 256;
+ cqr->expires = 10*HZ;
+ cqr->buildclk = get_tod_clock();
+ cqr->status = DASD_CQR_FILLED;
+ set_bit(DASD_CQR_VERIFY_PATH, &cqr->flags);
+
+ rc = dasd_sleep_on(cqr);
+
+ dasd_sfree_request(cqr, cqr->memdev);
+ return rc;
+}
+
+/*
+ * return configuration data that is referenced by record selector
+ * if a record selector is specified or per default return the
+ * conf_data pointer for the path specified by lpum
+ */
+static struct dasd_conf_data *dasd_eckd_get_ref_conf(struct dasd_device *device,
+ __u8 lpum,
+ struct dasd_cuir_message *cuir)
+{
+ struct dasd_conf_data *conf_data;
+ int path, pos;
+
+ if (cuir->record_selector == 0)
+ goto out;
+ for (path = 0x80, pos = 0; path; path >>= 1, pos++) {
+ conf_data = device->path[pos].conf_data;
+ if (conf_data->gneq.record_selector ==
+ cuir->record_selector)
+ return conf_data;
+ }
+out:
+ return device->path[pathmask_to_pos(lpum)].conf_data;
+}
+
+/*
+ * This function determines the scope of a reconfiguration request by
+ * analysing the path and device selection data provided in the CUIR request.
+ * Returns a path mask containing CUIR affected paths for the give device.
+ *
+ * If the CUIR request does not contain the required information return the
+ * path mask of the path the attention message for the CUIR request was reveived
+ * on.
+ */
+static int dasd_eckd_cuir_scope(struct dasd_device *device, __u8 lpum,
+ struct dasd_cuir_message *cuir)
+{
+ struct dasd_conf_data *ref_conf_data;
+ unsigned long bitmask = 0, mask = 0;
+ struct dasd_conf_data *conf_data;
+ unsigned int pos, path;
+ char *ref_gneq, *gneq;
+ char *ref_ned, *ned;
+ int tbcpm = 0;
+
+ /* if CUIR request does not specify the scope use the path
+ the attention message was presented on */
+ if (!cuir->ned_map ||
+ !(cuir->neq_map[0] | cuir->neq_map[1] | cuir->neq_map[2]))
+ return lpum;
+
+ /* get reference conf data */
+ ref_conf_data = dasd_eckd_get_ref_conf(device, lpum, cuir);
+ /* reference ned is determined by ned_map field */
+ pos = 8 - ffs(cuir->ned_map);
+ ref_ned = (char *)&ref_conf_data->neds[pos];
+ ref_gneq = (char *)&ref_conf_data->gneq;
+ /* transfer 24 bit neq_map to mask */
+ mask = cuir->neq_map[2];
+ mask |= cuir->neq_map[1] << 8;
+ mask |= cuir->neq_map[0] << 16;
+
+ for (path = 0; path < 8; path++) {
+ /* initialise data per path */
+ bitmask = mask;
+ conf_data = device->path[path].conf_data;
+ pos = 8 - ffs(cuir->ned_map);
+ ned = (char *) &conf_data->neds[pos];
+ /* compare reference ned and per path ned */
+ if (memcmp(ref_ned, ned, sizeof(*ned)) != 0)
+ continue;
+ gneq = (char *)&conf_data->gneq;
+ /* compare reference gneq and per_path gneq under
+ 24 bit mask where mask bit 0 equals byte 7 of
+ the gneq and mask bit 24 equals byte 31 */
+ while (bitmask) {
+ pos = ffs(bitmask) - 1;
+ if (memcmp(&ref_gneq[31 - pos], &gneq[31 - pos], 1)
+ != 0)
+ break;
+ clear_bit(pos, &bitmask);
+ }
+ if (bitmask)
+ continue;
+ /* device and path match the reference values
+ add path to CUIR scope */
+ tbcpm |= 0x80 >> path;
+ }
+ return tbcpm;
+}
+
+static void dasd_eckd_cuir_notify_user(struct dasd_device *device,
+ unsigned long paths, int action)
+{
+ int pos;
+
+ while (paths) {
+ /* get position of bit in mask */
+ pos = 8 - ffs(paths);
+ /* get channel path descriptor from this position */
+ if (action == CUIR_QUIESCE)
+ pr_warn("Service on the storage server caused path %x.%02x to go offline",
+ device->path[pos].cssid,
+ device->path[pos].chpid);
+ else if (action == CUIR_RESUME)
+ pr_info("Path %x.%02x is back online after service on the storage server",
+ device->path[pos].cssid,
+ device->path[pos].chpid);
+ clear_bit(7 - pos, &paths);
+ }
+}
+
+static int dasd_eckd_cuir_remove_path(struct dasd_device *device, __u8 lpum,
+ struct dasd_cuir_message *cuir)
+{
+ unsigned long tbcpm;
+
+ tbcpm = dasd_eckd_cuir_scope(device, lpum, cuir);
+ /* nothing to do if path is not in use */
+ if (!(dasd_path_get_opm(device) & tbcpm))
+ return 0;
+ if (!(dasd_path_get_opm(device) & ~tbcpm)) {
+ /* no path would be left if the CUIR action is taken
+ return error */
+ return -EINVAL;
+ }
+ /* remove device from operational path mask */
+ dasd_path_remove_opm(device, tbcpm);
+ dasd_path_add_cuirpm(device, tbcpm);
+ return tbcpm;
+}
+
+/*
+ * walk through all devices and build a path mask to quiesce them
+ * return an error if the last path to a device would be removed
+ *
+ * if only part of the devices are quiesced and an error
+ * occurs no onlining necessary, the storage server will
+ * notify the already set offline devices again
+ */
+static int dasd_eckd_cuir_quiesce(struct dasd_device *device, __u8 lpum,
+ struct dasd_cuir_message *cuir)
+{
+ struct dasd_eckd_private *private = device->private;
+ struct alias_pav_group *pavgroup, *tempgroup;
+ struct dasd_device *dev, *n;
+ unsigned long paths = 0;
+ unsigned long flags;
+ int tbcpm;
+
+ /* active devices */
+ list_for_each_entry_safe(dev, n, &private->lcu->active_devices,
+ alias_list) {
+ spin_lock_irqsave(get_ccwdev_lock(dev->cdev), flags);
+ tbcpm = dasd_eckd_cuir_remove_path(dev, lpum, cuir);
+ spin_unlock_irqrestore(get_ccwdev_lock(dev->cdev), flags);
+ if (tbcpm < 0)
+ goto out_err;
+ paths |= tbcpm;
+ }
+ /* inactive devices */
+ list_for_each_entry_safe(dev, n, &private->lcu->inactive_devices,
+ alias_list) {
+ spin_lock_irqsave(get_ccwdev_lock(dev->cdev), flags);
+ tbcpm = dasd_eckd_cuir_remove_path(dev, lpum, cuir);
+ spin_unlock_irqrestore(get_ccwdev_lock(dev->cdev), flags);
+ if (tbcpm < 0)
+ goto out_err;
+ paths |= tbcpm;
+ }
+ /* devices in PAV groups */
+ list_for_each_entry_safe(pavgroup, tempgroup,
+ &private->lcu->grouplist, group) {
+ list_for_each_entry_safe(dev, n, &pavgroup->baselist,
+ alias_list) {
+ spin_lock_irqsave(get_ccwdev_lock(dev->cdev), flags);
+ tbcpm = dasd_eckd_cuir_remove_path(dev, lpum, cuir);
+ spin_unlock_irqrestore(
+ get_ccwdev_lock(dev->cdev), flags);
+ if (tbcpm < 0)
+ goto out_err;
+ paths |= tbcpm;
+ }
+ list_for_each_entry_safe(dev, n, &pavgroup->aliaslist,
+ alias_list) {
+ spin_lock_irqsave(get_ccwdev_lock(dev->cdev), flags);
+ tbcpm = dasd_eckd_cuir_remove_path(dev, lpum, cuir);
+ spin_unlock_irqrestore(
+ get_ccwdev_lock(dev->cdev), flags);
+ if (tbcpm < 0)
+ goto out_err;
+ paths |= tbcpm;
+ }
+ }
+ /* notify user about all paths affected by CUIR action */
+ dasd_eckd_cuir_notify_user(device, paths, CUIR_QUIESCE);
+ return 0;
+out_err:
+ return tbcpm;
+}
+
+static int dasd_eckd_cuir_resume(struct dasd_device *device, __u8 lpum,
+ struct dasd_cuir_message *cuir)
+{
+ struct dasd_eckd_private *private = device->private;
+ struct alias_pav_group *pavgroup, *tempgroup;
+ struct dasd_device *dev, *n;
+ unsigned long paths = 0;
+ int tbcpm;
+
+ /*
+ * the path may have been added through a generic path event before
+ * only trigger path verification if the path is not already in use
+ */
+ list_for_each_entry_safe(dev, n,
+ &private->lcu->active_devices,
+ alias_list) {
+ tbcpm = dasd_eckd_cuir_scope(dev, lpum, cuir);
+ paths |= tbcpm;
+ if (!(dasd_path_get_opm(dev) & tbcpm)) {
+ dasd_path_add_tbvpm(dev, tbcpm);
+ dasd_schedule_device_bh(dev);
+ }
+ }
+ list_for_each_entry_safe(dev, n,
+ &private->lcu->inactive_devices,
+ alias_list) {
+ tbcpm = dasd_eckd_cuir_scope(dev, lpum, cuir);
+ paths |= tbcpm;
+ if (!(dasd_path_get_opm(dev) & tbcpm)) {
+ dasd_path_add_tbvpm(dev, tbcpm);
+ dasd_schedule_device_bh(dev);
+ }
+ }
+ /* devices in PAV groups */
+ list_for_each_entry_safe(pavgroup, tempgroup,
+ &private->lcu->grouplist,
+ group) {
+ list_for_each_entry_safe(dev, n,
+ &pavgroup->baselist,
+ alias_list) {
+ tbcpm = dasd_eckd_cuir_scope(dev, lpum, cuir);
+ paths |= tbcpm;
+ if (!(dasd_path_get_opm(dev) & tbcpm)) {
+ dasd_path_add_tbvpm(dev, tbcpm);
+ dasd_schedule_device_bh(dev);
+ }
+ }
+ list_for_each_entry_safe(dev, n,
+ &pavgroup->aliaslist,
+ alias_list) {
+ tbcpm = dasd_eckd_cuir_scope(dev, lpum, cuir);
+ paths |= tbcpm;
+ if (!(dasd_path_get_opm(dev) & tbcpm)) {
+ dasd_path_add_tbvpm(dev, tbcpm);
+ dasd_schedule_device_bh(dev);
+ }
+ }
+ }
+ /* notify user about all paths affected by CUIR action */
+ dasd_eckd_cuir_notify_user(device, paths, CUIR_RESUME);
+ return 0;
+}
+
+static void dasd_eckd_handle_cuir(struct dasd_device *device, void *messages,
+ __u8 lpum)
+{
+ struct dasd_cuir_message *cuir = messages;
+ int response;
+
+ DBF_DEV_EVENT(DBF_WARNING, device,
+ "CUIR request: %016llx %016llx %016llx %08x",
+ ((u64 *)cuir)[0], ((u64 *)cuir)[1], ((u64 *)cuir)[2],
+ ((u32 *)cuir)[3]);
+
+ if (cuir->code == CUIR_QUIESCE) {
+ /* quiesce */
+ if (dasd_eckd_cuir_quiesce(device, lpum, cuir))
+ response = PSF_CUIR_LAST_PATH;
+ else
+ response = PSF_CUIR_COMPLETED;
+ } else if (cuir->code == CUIR_RESUME) {
+ /* resume */
+ dasd_eckd_cuir_resume(device, lpum, cuir);
+ response = PSF_CUIR_COMPLETED;
+ } else
+ response = PSF_CUIR_NOT_SUPPORTED;
+
+ dasd_eckd_psf_cuir_response(device, response,
+ cuir->message_id, lpum);
+ DBF_DEV_EVENT(DBF_WARNING, device,
+ "CUIR response: %d on message ID %08x", response,
+ cuir->message_id);
+ /* to make sure there is no attention left schedule work again */
+ device->discipline->check_attention(device, lpum);
+}
+
+static void dasd_eckd_oos_resume(struct dasd_device *device)
+{
+ struct dasd_eckd_private *private = device->private;
+ struct alias_pav_group *pavgroup, *tempgroup;
+ struct dasd_device *dev, *n;
+ unsigned long flags;
+
+ spin_lock_irqsave(&private->lcu->lock, flags);
+ list_for_each_entry_safe(dev, n, &private->lcu->active_devices,
+ alias_list) {
+ if (dev->stopped & DASD_STOPPED_NOSPC)
+ dasd_generic_space_avail(dev);
+ }
+ list_for_each_entry_safe(dev, n, &private->lcu->inactive_devices,
+ alias_list) {
+ if (dev->stopped & DASD_STOPPED_NOSPC)
+ dasd_generic_space_avail(dev);
+ }
+ /* devices in PAV groups */
+ list_for_each_entry_safe(pavgroup, tempgroup,
+ &private->lcu->grouplist,
+ group) {
+ list_for_each_entry_safe(dev, n, &pavgroup->baselist,
+ alias_list) {
+ if (dev->stopped & DASD_STOPPED_NOSPC)
+ dasd_generic_space_avail(dev);
+ }
+ list_for_each_entry_safe(dev, n, &pavgroup->aliaslist,
+ alias_list) {
+ if (dev->stopped & DASD_STOPPED_NOSPC)
+ dasd_generic_space_avail(dev);
+ }
+ }
+ spin_unlock_irqrestore(&private->lcu->lock, flags);
+}
+
+static void dasd_eckd_handle_oos(struct dasd_device *device, void *messages,
+ __u8 lpum)
+{
+ struct dasd_oos_message *oos = messages;
+
+ switch (oos->code) {
+ case REPO_WARN:
+ case POOL_WARN:
+ dev_warn(&device->cdev->dev,
+ "Extent pool usage has reached a critical value\n");
+ dasd_eckd_oos_resume(device);
+ break;
+ case REPO_EXHAUST:
+ case POOL_EXHAUST:
+ dev_warn(&device->cdev->dev,
+ "Extent pool is exhausted\n");
+ break;
+ case REPO_RELIEVE:
+ case POOL_RELIEVE:
+ dev_info(&device->cdev->dev,
+ "Extent pool physical space constraint has been relieved\n");
+ break;
+ }
+
+ /* In any case, update related data */
+ dasd_eckd_read_ext_pool_info(device);
+
+ /* to make sure there is no attention left schedule work again */
+ device->discipline->check_attention(device, lpum);
+}
+
+static void dasd_eckd_check_attention_work(struct work_struct *work)
+{
+ struct check_attention_work_data *data;
+ struct dasd_rssd_messages *messages;
+ struct dasd_device *device;
+ int rc;
+
+ data = container_of(work, struct check_attention_work_data, worker);
+ device = data->device;
+ messages = kzalloc(sizeof(*messages), GFP_KERNEL);
+ if (!messages) {
+ DBF_DEV_EVENT(DBF_WARNING, device, "%s",
+ "Could not allocate attention message buffer");
+ goto out;
+ }
+ rc = dasd_eckd_read_message_buffer(device, messages, data->lpum);
+ if (rc)
+ goto out;
+
+ if (messages->length == ATTENTION_LENGTH_CUIR &&
+ messages->format == ATTENTION_FORMAT_CUIR)
+ dasd_eckd_handle_cuir(device, messages, data->lpum);
+ if (messages->length == ATTENTION_LENGTH_OOS &&
+ messages->format == ATTENTION_FORMAT_OOS)
+ dasd_eckd_handle_oos(device, messages, data->lpum);
+
+out:
+ dasd_put_device(device);
+ kfree(messages);
+ kfree(data);
+}
+
+static int dasd_eckd_check_attention(struct dasd_device *device, __u8 lpum)
+{
+ struct check_attention_work_data *data;
+
+ data = kzalloc(sizeof(*data), GFP_ATOMIC);
+ if (!data)
+ return -ENOMEM;
+ INIT_WORK(&data->worker, dasd_eckd_check_attention_work);
+ dasd_get_device(device);
+ data->device = device;
+ data->lpum = lpum;
+ schedule_work(&data->worker);
+ return 0;
+}
+
+static int dasd_eckd_disable_hpf_path(struct dasd_device *device, __u8 lpum)
+{
+ if (~lpum & dasd_path_get_opm(device)) {
+ dasd_path_add_nohpfpm(device, lpum);
+ dasd_path_remove_opm(device, lpum);
+ dev_err(&device->cdev->dev,
+ "Channel path %02X lost HPF functionality and is disabled\n",
+ lpum);
+ return 1;
+ }
+ return 0;
+}
+
+static void dasd_eckd_disable_hpf_device(struct dasd_device *device)
+{
+ struct dasd_eckd_private *private = device->private;
+
+ dev_err(&device->cdev->dev,
+ "High Performance FICON disabled\n");
+ private->fcx_max_data = 0;
+}
+
+static int dasd_eckd_hpf_enabled(struct dasd_device *device)
+{
+ struct dasd_eckd_private *private = device->private;
+
+ return private->fcx_max_data ? 1 : 0;
+}
+
+static void dasd_eckd_handle_hpf_error(struct dasd_device *device,
+ struct irb *irb)
+{
+ struct dasd_eckd_private *private = device->private;
+
+ if (!private->fcx_max_data) {
+ /* sanity check for no HPF, the error makes no sense */
+ DBF_DEV_EVENT(DBF_WARNING, device, "%s",
+ "Trying to disable HPF for a non HPF device");
+ return;
+ }
+ if (irb->scsw.tm.sesq == SCSW_SESQ_DEV_NOFCX) {
+ dasd_eckd_disable_hpf_device(device);
+ } else if (irb->scsw.tm.sesq == SCSW_SESQ_PATH_NOFCX) {
+ if (dasd_eckd_disable_hpf_path(device, irb->esw.esw1.lpum))
+ return;
+ dasd_eckd_disable_hpf_device(device);
+ dasd_path_set_tbvpm(device,
+ dasd_path_get_hpfpm(device));
+ }
+ /*
+ * prevent that any new I/O ist started on the device and schedule a
+ * requeue of existing requests
+ */
+ dasd_device_set_stop_bits(device, DASD_STOPPED_NOT_ACC);
+ dasd_schedule_requeue(device);
+}
+
+/*
+ * Initialize block layer request queue.
+ */
+static void dasd_eckd_setup_blk_queue(struct dasd_block *block)
+{
+ unsigned int logical_block_size = block->bp_block;
+ struct request_queue *q = block->gdp->queue;
+ struct dasd_device *device = block->base;
+ int max;
+
+ if (device->features & DASD_FEATURE_USERAW) {
+ /*
+ * the max_blocks value for raw_track access is 256
+ * it is higher than the native ECKD value because we
+ * only need one ccw per track
+ * so the max_hw_sectors are
+ * 2048 x 512B = 1024kB = 16 tracks
+ */
+ max = DASD_ECKD_MAX_BLOCKS_RAW << block->s2b_shift;
+ } else {
+ max = DASD_ECKD_MAX_BLOCKS << block->s2b_shift;
+ }
+ blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
+ q->limits.max_dev_sectors = max;
+ blk_queue_logical_block_size(q, logical_block_size);
+ blk_queue_max_hw_sectors(q, max);
+ blk_queue_max_segments(q, USHRT_MAX);
+ /* With page sized segments each segment can be translated into one idaw/tidaw */
+ blk_queue_max_segment_size(q, PAGE_SIZE);
+ blk_queue_segment_boundary(q, PAGE_SIZE - 1);
+ blk_queue_dma_alignment(q, PAGE_SIZE - 1);
+}
+
+static struct ccw_driver dasd_eckd_driver = {
+ .driver = {
+ .name = "dasd-eckd",
+ .owner = THIS_MODULE,
+ .dev_groups = dasd_dev_groups,
+ },
+ .ids = dasd_eckd_ids,
+ .probe = dasd_eckd_probe,
+ .remove = dasd_generic_remove,
+ .set_offline = dasd_generic_set_offline,
+ .set_online = dasd_eckd_set_online,
+ .notify = dasd_generic_notify,
+ .path_event = dasd_generic_path_event,
+ .shutdown = dasd_generic_shutdown,
+ .uc_handler = dasd_generic_uc_handler,
+ .int_class = IRQIO_DAS,
+};
+
+static struct dasd_discipline dasd_eckd_discipline = {
+ .owner = THIS_MODULE,
+ .name = "ECKD",
+ .ebcname = "ECKD",
+ .check_device = dasd_eckd_check_characteristics,
+ .uncheck_device = dasd_eckd_uncheck_device,
+ .do_analysis = dasd_eckd_do_analysis,
+ .pe_handler = dasd_eckd_pe_handler,
+ .basic_to_ready = dasd_eckd_basic_to_ready,
+ .online_to_ready = dasd_eckd_online_to_ready,
+ .basic_to_known = dasd_eckd_basic_to_known,
+ .setup_blk_queue = dasd_eckd_setup_blk_queue,
+ .fill_geometry = dasd_eckd_fill_geometry,
+ .start_IO = dasd_start_IO,
+ .term_IO = dasd_term_IO,
+ .handle_terminated_request = dasd_eckd_handle_terminated_request,
+ .format_device = dasd_eckd_format_device,
+ .check_device_format = dasd_eckd_check_device_format,
+ .erp_action = dasd_eckd_erp_action,
+ .erp_postaction = dasd_eckd_erp_postaction,
+ .check_for_device_change = dasd_eckd_check_for_device_change,
+ .build_cp = dasd_eckd_build_alias_cp,
+ .free_cp = dasd_eckd_free_alias_cp,
+ .dump_sense = dasd_eckd_dump_sense,
+ .dump_sense_dbf = dasd_eckd_dump_sense_dbf,
+ .fill_info = dasd_eckd_fill_info,
+ .ioctl = dasd_eckd_ioctl,
+ .reload = dasd_eckd_reload_device,
+ .get_uid = dasd_eckd_get_uid,
+ .kick_validate = dasd_eckd_kick_validate_server,
+ .check_attention = dasd_eckd_check_attention,
+ .host_access_count = dasd_eckd_host_access_count,
+ .hosts_print = dasd_hosts_print,
+ .handle_hpf_error = dasd_eckd_handle_hpf_error,
+ .disable_hpf = dasd_eckd_disable_hpf_device,
+ .hpf_enabled = dasd_eckd_hpf_enabled,
+ .reset_path = dasd_eckd_reset_path,
+ .is_ese = dasd_eckd_is_ese,
+ .space_allocated = dasd_eckd_space_allocated,
+ .space_configured = dasd_eckd_space_configured,
+ .logical_capacity = dasd_eckd_logical_capacity,
+ .release_space = dasd_eckd_release_space,
+ .ext_pool_id = dasd_eckd_ext_pool_id,
+ .ext_size = dasd_eckd_ext_size,
+ .ext_pool_cap_at_warnlevel = dasd_eckd_ext_pool_cap_at_warnlevel,
+ .ext_pool_warn_thrshld = dasd_eckd_ext_pool_warn_thrshld,
+ .ext_pool_oos = dasd_eckd_ext_pool_oos,
+ .ext_pool_exhaust = dasd_eckd_ext_pool_exhaust,
+ .ese_format = dasd_eckd_ese_format,
+ .ese_read = dasd_eckd_ese_read,
+ .pprc_status = dasd_eckd_query_pprc_status,
+ .pprc_enabled = dasd_eckd_pprc_enabled,
+ .copy_pair_swap = dasd_eckd_copy_pair_swap,
+ .device_ping = dasd_eckd_device_ping,
+};
+
+static int __init
+dasd_eckd_init(void)
+{
+ int ret;
+
+ ASCEBC(dasd_eckd_discipline.ebcname, 4);
+ dasd_reserve_req = kmalloc(sizeof(*dasd_reserve_req),
+ GFP_KERNEL | GFP_DMA);
+ if (!dasd_reserve_req)
+ return -ENOMEM;
+ dasd_vol_info_req = kmalloc(sizeof(*dasd_vol_info_req),
+ GFP_KERNEL | GFP_DMA);
+ if (!dasd_vol_info_req) {
+ kfree(dasd_reserve_req);
+ return -ENOMEM;
+ }
+ pe_handler_worker = kmalloc(sizeof(*pe_handler_worker),
+ GFP_KERNEL | GFP_DMA);
+ if (!pe_handler_worker) {
+ kfree(dasd_reserve_req);
+ kfree(dasd_vol_info_req);
+ return -ENOMEM;
+ }
+ rawpadpage = (void *)__get_free_page(GFP_KERNEL);
+ if (!rawpadpage) {
+ kfree(pe_handler_worker);
+ kfree(dasd_reserve_req);
+ kfree(dasd_vol_info_req);
+ return -ENOMEM;
+ }
+ ret = ccw_driver_register(&dasd_eckd_driver);
+ if (!ret)
+ wait_for_device_probe();
+ else {
+ kfree(pe_handler_worker);
+ kfree(dasd_reserve_req);
+ kfree(dasd_vol_info_req);
+ free_page((unsigned long)rawpadpage);
+ }
+ return ret;
+}
+
+static void __exit
+dasd_eckd_cleanup(void)
+{
+ ccw_driver_unregister(&dasd_eckd_driver);
+ kfree(pe_handler_worker);
+ kfree(dasd_reserve_req);
+ free_page((unsigned long)rawpadpage);
+}
+
+module_init(dasd_eckd_init);
+module_exit(dasd_eckd_cleanup);
diff --git a/drivers/s390/block/dasd_eckd.h b/drivers/s390/block/dasd_eckd.h
new file mode 100644
index 0000000000..f9299bd184
--- /dev/null
+++ b/drivers/s390/block/dasd_eckd.h
@@ -0,0 +1,711 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
+ * Horst Hummel <Horst.Hummel@de.ibm.com>
+ * Bugreports.to..: <Linux390@de.ibm.com>
+ * Copyright IBM Corp. 1999, 2000
+ *
+ */
+
+#ifndef DASD_ECKD_H
+#define DASD_ECKD_H
+
+/*****************************************************************************
+ * SECTION: CCW Definitions
+ ****************************************************************************/
+#define DASD_ECKD_CCW_NOP 0x03
+#define DASD_ECKD_CCW_WRITE 0x05
+#define DASD_ECKD_CCW_READ 0x06
+#define DASD_ECKD_CCW_WRITE_HOME_ADDRESS 0x09
+#define DASD_ECKD_CCW_READ_HOME_ADDRESS 0x0a
+#define DASD_ECKD_CCW_WRITE_KD 0x0d
+#define DASD_ECKD_CCW_READ_KD 0x0e
+#define DASD_ECKD_CCW_ERASE 0x11
+#define DASD_ECKD_CCW_READ_COUNT 0x12
+#define DASD_ECKD_CCW_SLCK 0x14
+#define DASD_ECKD_CCW_WRITE_RECORD_ZERO 0x15
+#define DASD_ECKD_CCW_READ_RECORD_ZERO 0x16
+#define DASD_ECKD_CCW_WRITE_CKD 0x1d
+#define DASD_ECKD_CCW_READ_CKD 0x1e
+#define DASD_ECKD_CCW_PSF 0x27
+#define DASD_ECKD_CCW_SNID 0x34
+#define DASD_ECKD_CCW_RSSD 0x3e
+#define DASD_ECKD_CCW_LOCATE_RECORD 0x47
+#define DASD_ECKD_CCW_LOCATE_RECORD_EXT 0x4b
+#define DASD_ECKD_CCW_SNSS 0x54
+#define DASD_ECKD_CCW_DEFINE_EXTENT 0x63
+#define DASD_ECKD_CCW_WRITE_MT 0x85
+#define DASD_ECKD_CCW_READ_MT 0x86
+#define DASD_ECKD_CCW_WRITE_KD_MT 0x8d
+#define DASD_ECKD_CCW_READ_KD_MT 0x8e
+#define DASD_ECKD_CCW_READ_COUNT_MT 0x92
+#define DASD_ECKD_CCW_RELEASE 0x94
+#define DASD_ECKD_CCW_WRITE_FULL_TRACK 0x95
+#define DASD_ECKD_CCW_READ_CKD_MT 0x9e
+#define DASD_ECKD_CCW_WRITE_CKD_MT 0x9d
+#define DASD_ECKD_CCW_WRITE_TRACK_DATA 0xA5
+#define DASD_ECKD_CCW_READ_TRACK_DATA 0xA6
+#define DASD_ECKD_CCW_RESERVE 0xB4
+#define DASD_ECKD_CCW_READ_TRACK 0xDE
+#define DASD_ECKD_CCW_PFX 0xE7
+#define DASD_ECKD_CCW_PFX_READ 0xEA
+#define DASD_ECKD_CCW_RSCK 0xF9
+#define DASD_ECKD_CCW_RCD 0xFA
+#define DASD_ECKD_CCW_DSO 0xF7
+
+/* Define Subsystem Function / Orders */
+#define DSO_ORDER_RAS 0x81
+
+/*
+ * Perform Subsystem Function / Orders
+ */
+#define PSF_ORDER_PRSSD 0x18
+#define PSF_ORDER_CUIR_RESPONSE 0x1A
+#define PSF_ORDER_SSC 0x1D
+
+/*
+ * Perform Subsystem Function / Sub-Orders
+ */
+#define PSF_SUBORDER_QHA 0x1C /* Query Host Access */
+#define PSF_SUBORDER_PPRCEQ 0x50 /* PPRC Extended Query */
+#define PSF_SUBORDER_VSQ 0x52 /* Volume Storage Query */
+#define PSF_SUBORDER_LCQ 0x53 /* Logical Configuration Query */
+
+/*
+ * PPRC Extended Query Scopes
+ */
+#define PPRCEQ_SCOPE_4 0x04 /* Scope 4 for PPRC Extended Query */
+
+/*
+ * CUIR response condition codes
+ */
+#define PSF_CUIR_INVALID 0x00
+#define PSF_CUIR_COMPLETED 0x01
+#define PSF_CUIR_NOT_SUPPORTED 0x02
+#define PSF_CUIR_ERROR_IN_REQ 0x03
+#define PSF_CUIR_DENIED 0x04
+#define PSF_CUIR_LAST_PATH 0x05
+#define PSF_CUIR_DEVICE_ONLINE 0x06
+#define PSF_CUIR_VARY_FAILURE 0x07
+#define PSF_CUIR_SOFTWARE_FAILURE 0x08
+#define PSF_CUIR_NOT_RECOGNIZED 0x09
+
+/*
+ * CUIR codes
+ */
+#define CUIR_QUIESCE 0x01
+#define CUIR_RESUME 0x02
+
+/*
+ * Out-of-space (OOS) Codes
+ */
+#define REPO_WARN 0x01
+#define REPO_EXHAUST 0x02
+#define POOL_WARN 0x03
+#define POOL_EXHAUST 0x04
+#define REPO_RELIEVE 0x05
+#define POOL_RELIEVE 0x06
+
+/*
+ * attention message definitions
+ */
+#define ATTENTION_LENGTH_CUIR 0x0e
+#define ATTENTION_FORMAT_CUIR 0x01
+#define ATTENTION_LENGTH_OOS 0x10
+#define ATTENTION_FORMAT_OOS 0x06
+
+#define DASD_ECKD_PG_GROUPED 0x10
+
+/*
+ * Size that is reported for large volumes in the old 16-bit no_cyl field
+ */
+#define LV_COMPAT_CYL 0xFFFE
+
+
+#define FCX_MAX_DATA_FACTOR 65536
+#define DASD_ECKD_RCD_DATA_SIZE 256
+
+#define DASD_ECKD_PATH_THRHLD 256
+#define DASD_ECKD_PATH_INTERVAL 300
+
+/*
+ * Maximum number of blocks to be chained
+ */
+#define DASD_ECKD_MAX_BLOCKS 190
+#define DASD_ECKD_MAX_BLOCKS_RAW 256
+
+/*****************************************************************************
+ * SECTION: Type Definitions
+ ****************************************************************************/
+
+struct eckd_count {
+ __u16 cyl;
+ __u16 head;
+ __u8 record;
+ __u8 kl;
+ __u16 dl;
+} __attribute__ ((packed));
+
+struct ch_t {
+ __u16 cyl;
+ __u16 head;
+} __attribute__ ((packed));
+
+struct chr_t {
+ __u16 cyl;
+ __u16 head;
+ __u8 record;
+} __attribute__ ((packed));
+
+struct DE_eckd_data {
+ struct {
+ unsigned char perm:2; /* Permissions on this extent */
+ unsigned char reserved:1;
+ unsigned char seek:2; /* Seek control */
+ unsigned char auth:2; /* Access authorization */
+ unsigned char pci:1; /* PCI Fetch mode */
+ } __attribute__ ((packed)) mask;
+ struct {
+ unsigned char mode:2; /* Architecture mode */
+ unsigned char ckd:1; /* CKD Conversion */
+ unsigned char operation:3; /* Operation mode */
+ unsigned char cfw:1; /* Cache fast write */
+ unsigned char dfw:1; /* DASD fast write */
+ } __attribute__ ((packed)) attributes;
+ __u16 blk_size; /* Blocksize */
+ __u16 fast_write_id;
+ __u8 ga_additional; /* Global Attributes Additional */
+ __u8 ga_extended; /* Global Attributes Extended */
+ struct ch_t beg_ext;
+ struct ch_t end_ext;
+ unsigned long ep_sys_time; /* Ext Parameter - System Time Stamp */
+ __u8 ep_format; /* Extended Parameter format byte */
+ __u8 ep_prio; /* Extended Parameter priority I/O byte */
+ __u8 ep_reserved1; /* Extended Parameter Reserved */
+ __u8 ep_rec_per_track; /* Number of records on a track */
+ __u8 ep_reserved[4]; /* Extended Parameter Reserved */
+} __attribute__ ((packed));
+
+struct LO_eckd_data {
+ struct {
+ unsigned char orientation:2;
+ unsigned char operation:6;
+ } __attribute__ ((packed)) operation;
+ struct {
+ unsigned char last_bytes_used:1;
+ unsigned char reserved:6;
+ unsigned char read_count_suffix:1;
+ } __attribute__ ((packed)) auxiliary;
+ __u8 unused;
+ __u8 count;
+ struct ch_t seek_addr;
+ struct chr_t search_arg;
+ __u8 sector;
+ __u16 length;
+} __attribute__ ((packed));
+
+struct LRE_eckd_data {
+ struct {
+ unsigned char orientation:2;
+ unsigned char operation:6;
+ } __attribute__ ((packed)) operation;
+ struct {
+ unsigned char length_valid:1;
+ unsigned char length_scope:1;
+ unsigned char imbedded_ccw_valid:1;
+ unsigned char check_bytes:2;
+ unsigned char imbedded_count_valid:1;
+ unsigned char reserved:1;
+ unsigned char read_count_suffix:1;
+ } __attribute__ ((packed)) auxiliary;
+ __u8 imbedded_ccw;
+ __u8 count;
+ struct ch_t seek_addr;
+ struct chr_t search_arg;
+ __u8 sector;
+ __u16 length;
+ __u8 imbedded_count;
+ __u8 extended_operation;
+ __u16 extended_parameter_length;
+ __u8 extended_parameter[];
+} __attribute__ ((packed));
+
+/* Prefix data for format 0x00 and 0x01 */
+struct PFX_eckd_data {
+ unsigned char format;
+ struct {
+ unsigned char define_extent:1;
+ unsigned char time_stamp:1;
+ unsigned char verify_base:1;
+ unsigned char hyper_pav:1;
+ unsigned char reserved:4;
+ } __attribute__ ((packed)) validity;
+ __u8 base_address;
+ __u8 aux;
+ __u8 base_lss;
+ __u8 reserved[7];
+ struct DE_eckd_data define_extent;
+ struct LRE_eckd_data locate_record;
+} __attribute__ ((packed));
+
+struct dasd_eckd_characteristics {
+ __u16 cu_type;
+ struct {
+ unsigned char support:2;
+ unsigned char async:1;
+ unsigned char reserved:1;
+ unsigned char cache_info:1;
+ unsigned char model:3;
+ } __attribute__ ((packed)) cu_model;
+ __u16 dev_type;
+ __u8 dev_model;
+ struct {
+ unsigned char mult_burst:1;
+ unsigned char RT_in_LR:1;
+ unsigned char reserved1:1;
+ unsigned char RD_IN_LR:1;
+ unsigned char reserved2:4;
+ unsigned char reserved3:8;
+ unsigned char defect_wr:1;
+ unsigned char XRC_supported:1;
+ unsigned char PPRC_enabled:1;
+ unsigned char striping:1;
+ unsigned char reserved5:4;
+ unsigned char cfw:1;
+ unsigned char reserved6:2;
+ unsigned char cache:1;
+ unsigned char dual_copy:1;
+ unsigned char dfw:1;
+ unsigned char reset_alleg:1;
+ unsigned char sense_down:1;
+ } __attribute__ ((packed)) facilities;
+ __u8 dev_class;
+ __u8 unit_type;
+ __u16 no_cyl;
+ __u16 trk_per_cyl;
+ __u8 sec_per_trk;
+ __u8 byte_per_track[3];
+ __u16 home_bytes;
+ __u8 formula;
+ union {
+ struct {
+ __u8 f1;
+ __u16 f2;
+ __u16 f3;
+ } __attribute__ ((packed)) f_0x01;
+ struct {
+ __u8 f1;
+ __u8 f2;
+ __u8 f3;
+ __u8 f4;
+ __u8 f5;
+ } __attribute__ ((packed)) f_0x02;
+ } __attribute__ ((packed)) factors;
+ __u16 first_alt_trk;
+ __u16 no_alt_trk;
+ __u16 first_dia_trk;
+ __u16 no_dia_trk;
+ __u16 first_sup_trk;
+ __u16 no_sup_trk;
+ __u8 MDR_ID;
+ __u8 OBR_ID;
+ __u8 director;
+ __u8 rd_trk_set;
+ __u16 max_rec_zero;
+ __u8 reserved1;
+ __u8 RWANY_in_LR;
+ __u8 factor6;
+ __u8 factor7;
+ __u8 factor8;
+ __u8 reserved2[3];
+ __u8 reserved3[6];
+ __u32 long_no_cyl;
+} __attribute__ ((packed));
+
+/* elements of the configuration data */
+struct dasd_ned {
+ struct {
+ __u8 identifier:2;
+ __u8 token_id:1;
+ __u8 sno_valid:1;
+ __u8 subst_sno:1;
+ __u8 recNED:1;
+ __u8 emuNED:1;
+ __u8 reserved:1;
+ } __attribute__ ((packed)) flags;
+ __u8 descriptor;
+ __u8 dev_class;
+ __u8 reserved;
+ __u8 dev_type[6];
+ __u8 dev_model[3];
+ __u8 HDA_manufacturer[3];
+ struct {
+ __u8 HDA_location[2];
+ __u8 HDA_seqno[12];
+ } serial;
+ __u8 ID;
+ __u8 unit_addr;
+} __attribute__ ((packed));
+
+struct dasd_sneq {
+ struct {
+ __u8 identifier:2;
+ __u8 reserved:6;
+ } __attribute__ ((packed)) flags;
+ __u8 res1;
+ __u16 format;
+ __u8 res2[4]; /* byte 4- 7 */
+ __u8 sua_flags; /* byte 8 */
+ __u8 base_unit_addr; /* byte 9 */
+ __u8 res3[22]; /* byte 10-31 */
+} __attribute__ ((packed));
+
+struct vd_sneq {
+ struct {
+ __u8 identifier:2;
+ __u8 reserved:6;
+ } __attribute__ ((packed)) flags;
+ __u8 res1;
+ __u16 format;
+ __u8 res2[4]; /* byte 4- 7 */
+ __u8 uit[16]; /* byte 8-23 */
+ __u8 res3[8]; /* byte 24-31 */
+} __attribute__ ((packed));
+
+struct dasd_gneq {
+ struct {
+ __u8 identifier:2;
+ __u8 reserved:6;
+ } __attribute__ ((packed)) flags;
+ __u8 record_selector;
+ __u8 reserved[4];
+ struct {
+ __u8 value:2;
+ __u8 number:6;
+ } __attribute__ ((packed)) timeout;
+ __u8 reserved3;
+ __u16 subsystemID;
+ __u8 reserved2[22];
+} __attribute__ ((packed));
+
+struct dasd_rssd_features {
+ char feature[256];
+} __attribute__((packed));
+
+struct dasd_rssd_messages {
+ __u16 length;
+ __u8 format;
+ __u8 code;
+ __u32 message_id;
+ __u8 flags;
+ char messages[4087];
+} __packed;
+
+/*
+ * Read Subsystem Data - Volume Storage Query
+ */
+struct dasd_rssd_vsq {
+ struct {
+ __u8 tse:1;
+ __u8 space_not_available:1;
+ __u8 ese:1;
+ __u8 unused:5;
+ } __packed vol_info;
+ __u8 unused1;
+ __u16 extent_pool_id;
+ __u8 warn_cap_limit;
+ __u8 warn_cap_guaranteed;
+ __u16 unused2;
+ __u32 limit_capacity;
+ __u32 guaranteed_capacity;
+ __u32 space_allocated;
+ __u32 space_configured;
+ __u32 logical_capacity;
+} __packed;
+
+/*
+ * Extent Pool Summary
+ */
+struct dasd_ext_pool_sum {
+ __u16 pool_id;
+ __u8 repo_warn_thrshld;
+ __u8 warn_thrshld;
+ struct {
+ __u8 type:1; /* 0 - CKD / 1 - FB */
+ __u8 track_space_efficient:1;
+ __u8 extent_space_efficient:1;
+ __u8 standard_volume:1;
+ __u8 extent_size_valid:1;
+ __u8 capacity_at_warnlevel:1;
+ __u8 pool_oos:1;
+ __u8 unused0:1;
+ __u8 unused1;
+ } __packed flags;
+ struct {
+ __u8 reserved0:1;
+ __u8 size_1G:1;
+ __u8 reserved1:5;
+ __u8 size_16M:1;
+ } __packed extent_size;
+ __u8 unused;
+} __packed;
+
+/*
+ * Read Subsystem Data-Response - Logical Configuration Query - Header
+ */
+struct dasd_rssd_lcq {
+ __u16 data_length; /* Length of data returned */
+ __u16 pool_count; /* Count of extent pools returned - Max: 448 */
+ struct {
+ __u8 pool_info_valid:1; /* Detailed Information valid */
+ __u8 pool_id_volume:1;
+ __u8 pool_id_cec:1;
+ __u8 unused0:5;
+ __u8 unused1;
+ } __packed header_flags;
+ char sfi_type[6]; /* Storage Facility Image Type (EBCDIC) */
+ char sfi_model[3]; /* Storage Facility Image Model (EBCDIC) */
+ __u8 sfi_seq_num[10]; /* Storage Facility Image Sequence Number */
+ __u8 reserved[7];
+ struct dasd_ext_pool_sum ext_pool_sum[448];
+} __packed;
+
+struct dasd_oos_message {
+ __u16 length;
+ __u8 format;
+ __u8 code;
+ __u8 percentage_empty;
+ __u8 reserved;
+ __u16 ext_pool_id;
+ __u16 token;
+ __u8 unused[6];
+} __packed;
+
+struct dasd_cuir_message {
+ __u16 length;
+ __u8 format;
+ __u8 code;
+ __u32 message_id;
+ __u8 flags;
+ __u8 neq_map[3];
+ __u8 ned_map;
+ __u8 record_selector;
+} __packed;
+
+struct dasd_psf_cuir_response {
+ __u8 order;
+ __u8 flags;
+ __u8 cc;
+ __u8 chpid;
+ __u16 device_nr;
+ __u16 reserved;
+ __u32 message_id;
+ __u64 system_id;
+ __u8 cssid;
+ __u8 ssid;
+} __packed;
+
+struct dasd_ckd_path_group_entry {
+ __u8 status_flags;
+ __u8 pgid[11];
+ __u8 sysplex_name[8];
+ __u32 timestamp;
+ __u32 cylinder;
+ __u8 reserved[4];
+} __packed;
+
+struct dasd_ckd_host_information {
+ __u8 access_flags;
+ __u8 entry_size;
+ __u16 entry_count;
+ __u8 entry[16390];
+} __packed;
+
+struct dasd_psf_query_host_access {
+ __u8 access_flag;
+ __u8 version;
+ __u16 CKD_length;
+ __u16 SCSI_length;
+ __u8 unused[10];
+ __u8 host_access_information[16394];
+} __packed;
+
+/*
+ * Perform Subsystem Function - Prepare for Read Subsystem Data
+ */
+struct dasd_psf_prssd_data {
+ unsigned char order;
+ unsigned char flags;
+ unsigned char reserved1;
+ unsigned char reserved2;
+ unsigned char lss;
+ unsigned char volume;
+ unsigned char suborder;
+ unsigned char varies[5];
+} __attribute__ ((packed));
+
+/*
+ * Perform Subsystem Function - Set Subsystem Characteristics
+ */
+struct dasd_psf_ssc_data {
+ unsigned char order;
+ unsigned char flags;
+ unsigned char cu_type[4];
+ unsigned char suborder;
+ unsigned char reserved[59];
+} __attribute__((packed));
+
+/* Maximum number of extents for a single Release Allocated Space command */
+#define DASD_ECKD_RAS_EXTS_MAX 110U
+
+struct dasd_dso_ras_ext_range {
+ struct ch_t beg_ext;
+ struct ch_t end_ext;
+} __packed;
+
+/*
+ * Define Subsystem Operation - Release Allocated Space
+ */
+struct dasd_dso_ras_data {
+ __u8 order;
+ struct {
+ __u8 message:1; /* Must be zero */
+ __u8 reserved1:2;
+ __u8 vol_type:1; /* 0 - CKD/FBA, 1 - FB */
+ __u8 reserved2:4;
+ } __packed flags;
+ /* Operation Flags to specify scope */
+ struct {
+ __u8 reserved1:2;
+ /* Release Space by Extent */
+ __u8 by_extent:1; /* 0 - entire volume, 1 - specified extents */
+ __u8 guarantee_init:1;
+ __u8 force_release:1; /* Internal - will be ignored */
+ __u16 reserved2:11;
+ } __packed op_flags;
+ __u8 lss;
+ __u8 dev_addr;
+ __u32 reserved1;
+ __u8 reserved2[10];
+ __u16 nr_exts; /* Defines number of ext_scope - max 110 */
+ __u16 reserved3;
+} __packed;
+
+
+/*
+ * some structures and definitions for alias handling
+ */
+struct dasd_unit_address_configuration {
+ struct {
+ char ua_type;
+ char base_ua;
+ } unit[256];
+} __attribute__((packed));
+
+
+#define MAX_DEVICES_PER_LCU 256
+
+/* flags on the LCU */
+#define NEED_UAC_UPDATE 0x01
+#define UPDATE_PENDING 0x02
+
+enum pavtype {NO_PAV, BASE_PAV, HYPER_PAV};
+
+
+struct alias_root {
+ struct list_head serverlist;
+ spinlock_t lock;
+};
+
+struct alias_server {
+ struct list_head server;
+ struct dasd_uid uid;
+ struct list_head lculist;
+};
+
+struct summary_unit_check_work_data {
+ char reason;
+ struct dasd_device *device;
+ struct work_struct worker;
+};
+
+struct read_uac_work_data {
+ struct dasd_device *device;
+ struct delayed_work dwork;
+};
+
+struct alias_lcu {
+ struct list_head lcu;
+ struct dasd_uid uid;
+ enum pavtype pav;
+ char flags;
+ spinlock_t lock;
+ struct list_head grouplist;
+ struct list_head active_devices;
+ struct list_head inactive_devices;
+ struct dasd_unit_address_configuration *uac;
+ struct summary_unit_check_work_data suc_data;
+ struct read_uac_work_data ruac_data;
+ struct dasd_ccw_req *rsu_cqr;
+ struct completion lcu_setup;
+};
+
+struct alias_pav_group {
+ struct list_head group;
+ struct dasd_uid uid;
+ struct alias_lcu *lcu;
+ struct list_head baselist;
+ struct list_head aliaslist;
+ struct dasd_device *next;
+};
+
+struct dasd_conf_data {
+ struct dasd_ned neds[5];
+ u8 reserved[64];
+ struct dasd_gneq gneq;
+} __packed;
+
+struct dasd_conf {
+ u8 *data;
+ int len;
+ /* pointers to specific parts in the conf_data */
+ struct dasd_ned *ned;
+ struct dasd_sneq *sneq;
+ struct vd_sneq *vdsneq;
+ struct dasd_gneq *gneq;
+};
+
+struct dasd_eckd_private {
+ struct dasd_eckd_characteristics rdc_data;
+ struct dasd_conf conf;
+
+ struct eckd_count count_area[5];
+ int init_cqr_status;
+ int uses_cdl;
+ struct attrib_data_t attrib; /* e.g. cache operations */
+ struct dasd_rssd_features features;
+ struct dasd_rssd_vsq vsq;
+ struct dasd_ext_pool_sum eps;
+ u32 real_cyl;
+
+ /* alias management */
+ struct dasd_uid uid;
+ struct alias_pav_group *pavgroup;
+ struct alias_lcu *lcu;
+ int count;
+
+ u32 fcx_max_data;
+ char suc_reason;
+};
+
+
+
+int dasd_alias_make_device_known_to_lcu(struct dasd_device *);
+void dasd_alias_disconnect_device_from_lcu(struct dasd_device *);
+int dasd_alias_add_device(struct dasd_device *);
+int dasd_alias_remove_device(struct dasd_device *);
+struct dasd_device *dasd_alias_get_start_dev(struct dasd_device *);
+void dasd_alias_handle_summary_unit_check(struct work_struct *);
+void dasd_eckd_reset_ccw_to_base_io(struct dasd_ccw_req *);
+int dasd_alias_update_add_device(struct dasd_device *);
+#endif /* DASD_ECKD_H */
diff --git a/drivers/s390/block/dasd_eer.c b/drivers/s390/block/dasd_eer.c
new file mode 100644
index 0000000000..c956de711c
--- /dev/null
+++ b/drivers/s390/block/dasd_eer.c
@@ -0,0 +1,725 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Character device driver for extended error reporting.
+ *
+ * Copyright IBM Corp. 2005
+ * extended error reporting for DASD ECKD devices
+ * Author(s): Stefan Weinhuber <wein@de.ibm.com>
+ */
+
+#define KMSG_COMPONENT "dasd-eckd"
+
+#include <linux/init.h>
+#include <linux/fs.h>
+#include <linux/kernel.h>
+#include <linux/miscdevice.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/device.h>
+#include <linux/poll.h>
+#include <linux/mutex.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+
+#include <linux/uaccess.h>
+#include <linux/atomic.h>
+#include <asm/ebcdic.h>
+
+#include "dasd_int.h"
+#include "dasd_eckd.h"
+
+#ifdef PRINTK_HEADER
+#undef PRINTK_HEADER
+#endif /* PRINTK_HEADER */
+#define PRINTK_HEADER "dasd(eer):"
+
+/*
+ * SECTION: the internal buffer
+ */
+
+/*
+ * The internal buffer is meant to store obaque blobs of data, so it does
+ * not know of higher level concepts like triggers.
+ * It consists of a number of pages that are used as a ringbuffer. Each data
+ * blob is stored in a simple record that consists of an integer, which
+ * contains the size of the following data, and the data bytes themselfes.
+ *
+ * To allow for multiple independent readers we create one internal buffer
+ * each time the device is opened and destroy the buffer when the file is
+ * closed again. The number of pages used for this buffer is determined by
+ * the module parmeter eer_pages.
+ *
+ * One record can be written to a buffer by using the functions
+ * - dasd_eer_start_record (one time per record to write the size to the
+ * buffer and reserve the space for the data)
+ * - dasd_eer_write_buffer (one or more times per record to write the data)
+ * The data can be written in several steps but you will have to compute
+ * the total size up front for the invocation of dasd_eer_start_record.
+ * If the ringbuffer is full, dasd_eer_start_record will remove the required
+ * number of old records.
+ *
+ * A record is typically read in two steps, first read the integer that
+ * specifies the size of the following data, then read the data.
+ * Both can be done by
+ * - dasd_eer_read_buffer
+ *
+ * For all mentioned functions you need to get the bufferlock first and keep
+ * it until a complete record is written or read.
+ *
+ * All information necessary to keep track of an internal buffer is kept in
+ * a struct eerbuffer. The buffer specific to a file pointer is strored in
+ * the private_data field of that file. To be able to write data to all
+ * existing buffers, each buffer is also added to the bufferlist.
+ * If the user does not want to read a complete record in one go, we have to
+ * keep track of the rest of the record. residual stores the number of bytes
+ * that are still to deliver. If the rest of the record is invalidated between
+ * two reads then residual will be set to -1 so that the next read will fail.
+ * All entries in the eerbuffer structure are protected with the bufferlock.
+ * To avoid races between writing to a buffer on the one side and creating
+ * and destroying buffers on the other side, the bufferlock must also be used
+ * to protect the bufferlist.
+ */
+
+static int eer_pages = 5;
+module_param(eer_pages, int, S_IRUGO|S_IWUSR);
+
+struct eerbuffer {
+ struct list_head list;
+ char **buffer;
+ int buffersize;
+ int buffer_page_count;
+ int head;
+ int tail;
+ int residual;
+};
+
+static LIST_HEAD(bufferlist);
+static DEFINE_SPINLOCK(bufferlock);
+static DECLARE_WAIT_QUEUE_HEAD(dasd_eer_read_wait_queue);
+
+/*
+ * How many free bytes are available on the buffer.
+ * Needs to be called with bufferlock held.
+ */
+static int dasd_eer_get_free_bytes(struct eerbuffer *eerb)
+{
+ if (eerb->head < eerb->tail)
+ return eerb->tail - eerb->head - 1;
+ return eerb->buffersize - eerb->head + eerb->tail -1;
+}
+
+/*
+ * How many bytes of buffer space are used.
+ * Needs to be called with bufferlock held.
+ */
+static int dasd_eer_get_filled_bytes(struct eerbuffer *eerb)
+{
+
+ if (eerb->head >= eerb->tail)
+ return eerb->head - eerb->tail;
+ return eerb->buffersize - eerb->tail + eerb->head;
+}
+
+/*
+ * The dasd_eer_write_buffer function just copies count bytes of data
+ * to the buffer. Make sure to call dasd_eer_start_record first, to
+ * make sure that enough free space is available.
+ * Needs to be called with bufferlock held.
+ */
+static void dasd_eer_write_buffer(struct eerbuffer *eerb,
+ char *data, int count)
+{
+
+ unsigned long headindex,localhead;
+ unsigned long rest, len;
+ char *nextdata;
+
+ nextdata = data;
+ rest = count;
+ while (rest > 0) {
+ headindex = eerb->head / PAGE_SIZE;
+ localhead = eerb->head % PAGE_SIZE;
+ len = min(rest, PAGE_SIZE - localhead);
+ memcpy(eerb->buffer[headindex]+localhead, nextdata, len);
+ nextdata += len;
+ rest -= len;
+ eerb->head += len;
+ if (eerb->head == eerb->buffersize)
+ eerb->head = 0; /* wrap around */
+ BUG_ON(eerb->head > eerb->buffersize);
+ }
+}
+
+/*
+ * Needs to be called with bufferlock held.
+ */
+static int dasd_eer_read_buffer(struct eerbuffer *eerb, char *data, int count)
+{
+
+ unsigned long tailindex,localtail;
+ unsigned long rest, len, finalcount;
+ char *nextdata;
+
+ finalcount = min(count, dasd_eer_get_filled_bytes(eerb));
+ nextdata = data;
+ rest = finalcount;
+ while (rest > 0) {
+ tailindex = eerb->tail / PAGE_SIZE;
+ localtail = eerb->tail % PAGE_SIZE;
+ len = min(rest, PAGE_SIZE - localtail);
+ memcpy(nextdata, eerb->buffer[tailindex] + localtail, len);
+ nextdata += len;
+ rest -= len;
+ eerb->tail += len;
+ if (eerb->tail == eerb->buffersize)
+ eerb->tail = 0; /* wrap around */
+ BUG_ON(eerb->tail > eerb->buffersize);
+ }
+ return finalcount;
+}
+
+/*
+ * Whenever you want to write a blob of data to the internal buffer you
+ * have to start by using this function first. It will write the number
+ * of bytes that will be written to the buffer. If necessary it will remove
+ * old records to make room for the new one.
+ * Needs to be called with bufferlock held.
+ */
+static int dasd_eer_start_record(struct eerbuffer *eerb, int count)
+{
+ int tailcount;
+
+ if (count + sizeof(count) > eerb->buffersize)
+ return -ENOMEM;
+ while (dasd_eer_get_free_bytes(eerb) < count + sizeof(count)) {
+ if (eerb->residual > 0) {
+ eerb->tail += eerb->residual;
+ if (eerb->tail >= eerb->buffersize)
+ eerb->tail -= eerb->buffersize;
+ eerb->residual = -1;
+ }
+ dasd_eer_read_buffer(eerb, (char *) &tailcount,
+ sizeof(tailcount));
+ eerb->tail += tailcount;
+ if (eerb->tail >= eerb->buffersize)
+ eerb->tail -= eerb->buffersize;
+ }
+ dasd_eer_write_buffer(eerb, (char*) &count, sizeof(count));
+
+ return 0;
+};
+
+/*
+ * Release pages that are not used anymore.
+ */
+static void dasd_eer_free_buffer_pages(char **buf, int no_pages)
+{
+ int i;
+
+ for (i = 0; i < no_pages; i++)
+ free_page((unsigned long) buf[i]);
+}
+
+/*
+ * Allocate a new set of memory pages.
+ */
+static int dasd_eer_allocate_buffer_pages(char **buf, int no_pages)
+{
+ int i;
+
+ for (i = 0; i < no_pages; i++) {
+ buf[i] = (char *) get_zeroed_page(GFP_KERNEL);
+ if (!buf[i]) {
+ dasd_eer_free_buffer_pages(buf, i);
+ return -ENOMEM;
+ }
+ }
+ return 0;
+}
+
+/*
+ * SECTION: The extended error reporting functionality
+ */
+
+/*
+ * When a DASD device driver wants to report an error, it calls the
+ * function dasd_eer_write and gives the respective trigger ID as
+ * parameter. Currently there are four kinds of triggers:
+ *
+ * DASD_EER_FATALERROR: all kinds of unrecoverable I/O problems
+ * DASD_EER_PPRCSUSPEND: PPRC was suspended
+ * DASD_EER_NOPATH: There is no path to the device left.
+ * DASD_EER_STATECHANGE: The state of the device has changed.
+ *
+ * For the first three triggers all required information can be supplied by
+ * the caller. For these triggers a record is written by the function
+ * dasd_eer_write_standard_trigger.
+ *
+ * The DASD_EER_STATECHANGE trigger is special since a sense subsystem
+ * status ccw need to be executed to gather the necessary sense data first.
+ * The dasd_eer_snss function will queue the SNSS request and the request
+ * callback will then call dasd_eer_write with the DASD_EER_STATCHANGE
+ * trigger.
+ *
+ * To avoid memory allocations at runtime, the necessary memory is allocated
+ * when the extended error reporting is enabled for a device (by
+ * dasd_eer_probe). There is one sense subsystem status request for each
+ * eer enabled DASD device. The presence of the cqr in device->eer_cqr
+ * indicates that eer is enable for the device. The use of the snss request
+ * is protected by the DASD_FLAG_EER_IN_USE bit. When this flag indicates
+ * that the cqr is currently in use, dasd_eer_snss cannot start a second
+ * request but sets the DASD_FLAG_EER_SNSS flag instead. The callback of
+ * the SNSS request will check the bit and call dasd_eer_snss again.
+ */
+
+#define SNSS_DATA_SIZE 44
+
+#define DASD_EER_BUSID_SIZE 10
+struct dasd_eer_header {
+ __u32 total_size;
+ __u32 trigger;
+ __u64 tv_sec;
+ __u64 tv_usec;
+ char busid[DASD_EER_BUSID_SIZE];
+} __attribute__ ((packed));
+
+/*
+ * The following function can be used for those triggers that have
+ * all necessary data available when the function is called.
+ * If the parameter cqr is not NULL, the chain of requests will be searched
+ * for valid sense data, and all valid sense data sets will be added to
+ * the triggers data.
+ */
+static void dasd_eer_write_standard_trigger(struct dasd_device *device,
+ struct dasd_ccw_req *cqr,
+ int trigger)
+{
+ struct dasd_ccw_req *temp_cqr;
+ int data_size;
+ struct timespec64 ts;
+ struct dasd_eer_header header;
+ unsigned long flags;
+ struct eerbuffer *eerb;
+ char *sense;
+
+ /* go through cqr chain and count the valid sense data sets */
+ data_size = 0;
+ for (temp_cqr = cqr; temp_cqr; temp_cqr = temp_cqr->refers)
+ if (dasd_get_sense(&temp_cqr->irb))
+ data_size += 32;
+
+ header.total_size = sizeof(header) + data_size + 4; /* "EOR" */
+ header.trigger = trigger;
+ ktime_get_real_ts64(&ts);
+ header.tv_sec = ts.tv_sec;
+ header.tv_usec = ts.tv_nsec / NSEC_PER_USEC;
+ strscpy(header.busid, dev_name(&device->cdev->dev),
+ DASD_EER_BUSID_SIZE);
+
+ spin_lock_irqsave(&bufferlock, flags);
+ list_for_each_entry(eerb, &bufferlist, list) {
+ dasd_eer_start_record(eerb, header.total_size);
+ dasd_eer_write_buffer(eerb, (char *) &header, sizeof(header));
+ for (temp_cqr = cqr; temp_cqr; temp_cqr = temp_cqr->refers) {
+ sense = dasd_get_sense(&temp_cqr->irb);
+ if (sense)
+ dasd_eer_write_buffer(eerb, sense, 32);
+ }
+ dasd_eer_write_buffer(eerb, "EOR", 4);
+ }
+ spin_unlock_irqrestore(&bufferlock, flags);
+ wake_up_interruptible(&dasd_eer_read_wait_queue);
+}
+
+/*
+ * This function writes a DASD_EER_STATECHANGE trigger.
+ */
+static void dasd_eer_write_snss_trigger(struct dasd_device *device,
+ struct dasd_ccw_req *cqr,
+ int trigger)
+{
+ int data_size;
+ int snss_rc;
+ struct timespec64 ts;
+ struct dasd_eer_header header;
+ unsigned long flags;
+ struct eerbuffer *eerb;
+
+ snss_rc = (cqr->status == DASD_CQR_DONE) ? 0 : -EIO;
+ if (snss_rc)
+ data_size = 0;
+ else
+ data_size = SNSS_DATA_SIZE;
+
+ header.total_size = sizeof(header) + data_size + 4; /* "EOR" */
+ header.trigger = DASD_EER_STATECHANGE;
+ ktime_get_real_ts64(&ts);
+ header.tv_sec = ts.tv_sec;
+ header.tv_usec = ts.tv_nsec / NSEC_PER_USEC;
+ strscpy(header.busid, dev_name(&device->cdev->dev),
+ DASD_EER_BUSID_SIZE);
+
+ spin_lock_irqsave(&bufferlock, flags);
+ list_for_each_entry(eerb, &bufferlist, list) {
+ dasd_eer_start_record(eerb, header.total_size);
+ dasd_eer_write_buffer(eerb, (char *) &header , sizeof(header));
+ if (!snss_rc)
+ dasd_eer_write_buffer(eerb, cqr->data, SNSS_DATA_SIZE);
+ dasd_eer_write_buffer(eerb, "EOR", 4);
+ }
+ spin_unlock_irqrestore(&bufferlock, flags);
+ wake_up_interruptible(&dasd_eer_read_wait_queue);
+}
+
+/*
+ * This function is called for all triggers. It calls the appropriate
+ * function that writes the actual trigger records.
+ */
+void dasd_eer_write(struct dasd_device *device, struct dasd_ccw_req *cqr,
+ unsigned int id)
+{
+ if (!device->eer_cqr)
+ return;
+ switch (id) {
+ case DASD_EER_FATALERROR:
+ case DASD_EER_PPRCSUSPEND:
+ dasd_eer_write_standard_trigger(device, cqr, id);
+ break;
+ case DASD_EER_NOPATH:
+ case DASD_EER_NOSPC:
+ case DASD_EER_AUTOQUIESCE:
+ dasd_eer_write_standard_trigger(device, NULL, id);
+ break;
+ case DASD_EER_STATECHANGE:
+ dasd_eer_write_snss_trigger(device, cqr, id);
+ break;
+ default: /* unknown trigger, so we write it without any sense data */
+ dasd_eer_write_standard_trigger(device, NULL, id);
+ break;
+ }
+}
+EXPORT_SYMBOL(dasd_eer_write);
+
+/*
+ * Start a sense subsystem status request.
+ * Needs to be called with the device held.
+ */
+void dasd_eer_snss(struct dasd_device *device)
+{
+ struct dasd_ccw_req *cqr;
+
+ cqr = device->eer_cqr;
+ if (!cqr) /* Device not eer enabled. */
+ return;
+ if (test_and_set_bit(DASD_FLAG_EER_IN_USE, &device->flags)) {
+ /* Sense subsystem status request in use. */
+ set_bit(DASD_FLAG_EER_SNSS, &device->flags);
+ return;
+ }
+ /* cdev is already locked, can't use dasd_add_request_head */
+ clear_bit(DASD_FLAG_EER_SNSS, &device->flags);
+ cqr->status = DASD_CQR_QUEUED;
+ list_add(&cqr->devlist, &device->ccw_queue);
+ dasd_schedule_device_bh(device);
+}
+
+/*
+ * Callback function for use with sense subsystem status request.
+ */
+static void dasd_eer_snss_cb(struct dasd_ccw_req *cqr, void *data)
+{
+ struct dasd_device *device = cqr->startdev;
+ unsigned long flags;
+
+ dasd_eer_write(device, cqr, DASD_EER_STATECHANGE);
+ spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
+ if (device->eer_cqr == cqr) {
+ clear_bit(DASD_FLAG_EER_IN_USE, &device->flags);
+ if (test_bit(DASD_FLAG_EER_SNSS, &device->flags))
+ /* Another SNSS has been requested in the meantime. */
+ dasd_eer_snss(device);
+ cqr = NULL;
+ }
+ spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
+ if (cqr)
+ /*
+ * Extended error recovery has been switched off while
+ * the SNSS request was running. It could even have
+ * been switched off and on again in which case there
+ * is a new ccw in device->eer_cqr. Free the "old"
+ * snss request now.
+ */
+ dasd_sfree_request(cqr, device);
+}
+
+/*
+ * Enable error reporting on a given device.
+ */
+int dasd_eer_enable(struct dasd_device *device)
+{
+ struct dasd_ccw_req *cqr = NULL;
+ unsigned long flags;
+ struct ccw1 *ccw;
+ int rc = 0;
+
+ spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
+ if (device->eer_cqr)
+ goto out;
+ else if (!device->discipline ||
+ strcmp(device->discipline->name, "ECKD"))
+ rc = -EMEDIUMTYPE;
+ else if (test_bit(DASD_FLAG_OFFLINE, &device->flags))
+ rc = -EBUSY;
+
+ if (rc)
+ goto out;
+
+ cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* SNSS */,
+ SNSS_DATA_SIZE, device, NULL);
+ if (IS_ERR(cqr)) {
+ rc = -ENOMEM;
+ cqr = NULL;
+ goto out;
+ }
+
+ cqr->startdev = device;
+ cqr->retries = 255;
+ cqr->expires = 10 * HZ;
+ clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
+ set_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags);
+
+ ccw = cqr->cpaddr;
+ ccw->cmd_code = DASD_ECKD_CCW_SNSS;
+ ccw->count = SNSS_DATA_SIZE;
+ ccw->flags = 0;
+ ccw->cda = (__u32)virt_to_phys(cqr->data);
+
+ cqr->buildclk = get_tod_clock();
+ cqr->status = DASD_CQR_FILLED;
+ cqr->callback = dasd_eer_snss_cb;
+
+ if (!device->eer_cqr) {
+ device->eer_cqr = cqr;
+ cqr = NULL;
+ }
+
+out:
+ spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
+
+ if (cqr)
+ dasd_sfree_request(cqr, device);
+
+ return rc;
+}
+
+/*
+ * Disable error reporting on a given device.
+ */
+void dasd_eer_disable(struct dasd_device *device)
+{
+ struct dasd_ccw_req *cqr;
+ unsigned long flags;
+ int in_use;
+
+ if (!device->eer_cqr)
+ return;
+ spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
+ cqr = device->eer_cqr;
+ device->eer_cqr = NULL;
+ clear_bit(DASD_FLAG_EER_SNSS, &device->flags);
+ in_use = test_and_clear_bit(DASD_FLAG_EER_IN_USE, &device->flags);
+ spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
+ if (cqr && !in_use)
+ dasd_sfree_request(cqr, device);
+}
+
+/*
+ * SECTION: the device operations
+ */
+
+/*
+ * On the one side we need a lock to access our internal buffer, on the
+ * other side a copy_to_user can sleep. So we need to copy the data we have
+ * to transfer in a readbuffer, which is protected by the readbuffer_mutex.
+ */
+static char readbuffer[PAGE_SIZE];
+static DEFINE_MUTEX(readbuffer_mutex);
+
+static int dasd_eer_open(struct inode *inp, struct file *filp)
+{
+ struct eerbuffer *eerb;
+ unsigned long flags;
+
+ eerb = kzalloc(sizeof(struct eerbuffer), GFP_KERNEL);
+ if (!eerb)
+ return -ENOMEM;
+ eerb->buffer_page_count = eer_pages;
+ if (eerb->buffer_page_count < 1 ||
+ eerb->buffer_page_count > INT_MAX / PAGE_SIZE) {
+ kfree(eerb);
+ DBF_EVENT(DBF_WARNING, "can't open device since module "
+ "parameter eer_pages is smaller than 1 or"
+ " bigger than %d", (int)(INT_MAX / PAGE_SIZE));
+ return -EINVAL;
+ }
+ eerb->buffersize = eerb->buffer_page_count * PAGE_SIZE;
+ eerb->buffer = kmalloc_array(eerb->buffer_page_count, sizeof(char *),
+ GFP_KERNEL);
+ if (!eerb->buffer) {
+ kfree(eerb);
+ return -ENOMEM;
+ }
+ if (dasd_eer_allocate_buffer_pages(eerb->buffer,
+ eerb->buffer_page_count)) {
+ kfree(eerb->buffer);
+ kfree(eerb);
+ return -ENOMEM;
+ }
+ filp->private_data = eerb;
+ spin_lock_irqsave(&bufferlock, flags);
+ list_add(&eerb->list, &bufferlist);
+ spin_unlock_irqrestore(&bufferlock, flags);
+
+ return nonseekable_open(inp,filp);
+}
+
+static int dasd_eer_close(struct inode *inp, struct file *filp)
+{
+ struct eerbuffer *eerb;
+ unsigned long flags;
+
+ eerb = (struct eerbuffer *) filp->private_data;
+ spin_lock_irqsave(&bufferlock, flags);
+ list_del(&eerb->list);
+ spin_unlock_irqrestore(&bufferlock, flags);
+ dasd_eer_free_buffer_pages(eerb->buffer, eerb->buffer_page_count);
+ kfree(eerb->buffer);
+ kfree(eerb);
+
+ return 0;
+}
+
+static ssize_t dasd_eer_read(struct file *filp, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ int tc,rc;
+ int tailcount,effective_count;
+ unsigned long flags;
+ struct eerbuffer *eerb;
+
+ eerb = (struct eerbuffer *) filp->private_data;
+ if (mutex_lock_interruptible(&readbuffer_mutex))
+ return -ERESTARTSYS;
+
+ spin_lock_irqsave(&bufferlock, flags);
+
+ if (eerb->residual < 0) { /* the remainder of this record */
+ /* has been deleted */
+ eerb->residual = 0;
+ spin_unlock_irqrestore(&bufferlock, flags);
+ mutex_unlock(&readbuffer_mutex);
+ return -EIO;
+ } else if (eerb->residual > 0) {
+ /* OK we still have a second half of a record to deliver */
+ effective_count = min(eerb->residual, (int) count);
+ eerb->residual -= effective_count;
+ } else {
+ tc = 0;
+ while (!tc) {
+ tc = dasd_eer_read_buffer(eerb, (char *) &tailcount,
+ sizeof(tailcount));
+ if (!tc) {
+ /* no data available */
+ spin_unlock_irqrestore(&bufferlock, flags);
+ mutex_unlock(&readbuffer_mutex);
+ if (filp->f_flags & O_NONBLOCK)
+ return -EAGAIN;
+ rc = wait_event_interruptible(
+ dasd_eer_read_wait_queue,
+ eerb->head != eerb->tail);
+ if (rc)
+ return rc;
+ if (mutex_lock_interruptible(&readbuffer_mutex))
+ return -ERESTARTSYS;
+ spin_lock_irqsave(&bufferlock, flags);
+ }
+ }
+ WARN_ON(tc != sizeof(tailcount));
+ effective_count = min(tailcount,(int)count);
+ eerb->residual = tailcount - effective_count;
+ }
+
+ tc = dasd_eer_read_buffer(eerb, readbuffer, effective_count);
+ WARN_ON(tc != effective_count);
+
+ spin_unlock_irqrestore(&bufferlock, flags);
+
+ if (copy_to_user(buf, readbuffer, effective_count)) {
+ mutex_unlock(&readbuffer_mutex);
+ return -EFAULT;
+ }
+
+ mutex_unlock(&readbuffer_mutex);
+ return effective_count;
+}
+
+static __poll_t dasd_eer_poll(struct file *filp, poll_table *ptable)
+{
+ __poll_t mask;
+ unsigned long flags;
+ struct eerbuffer *eerb;
+
+ eerb = (struct eerbuffer *) filp->private_data;
+ poll_wait(filp, &dasd_eer_read_wait_queue, ptable);
+ spin_lock_irqsave(&bufferlock, flags);
+ if (eerb->head != eerb->tail)
+ mask = EPOLLIN | EPOLLRDNORM ;
+ else
+ mask = 0;
+ spin_unlock_irqrestore(&bufferlock, flags);
+ return mask;
+}
+
+static const struct file_operations dasd_eer_fops = {
+ .open = &dasd_eer_open,
+ .release = &dasd_eer_close,
+ .read = &dasd_eer_read,
+ .poll = &dasd_eer_poll,
+ .owner = THIS_MODULE,
+ .llseek = noop_llseek,
+};
+
+static struct miscdevice *dasd_eer_dev = NULL;
+
+int __init dasd_eer_init(void)
+{
+ int rc;
+
+ dasd_eer_dev = kzalloc(sizeof(*dasd_eer_dev), GFP_KERNEL);
+ if (!dasd_eer_dev)
+ return -ENOMEM;
+
+ dasd_eer_dev->minor = MISC_DYNAMIC_MINOR;
+ dasd_eer_dev->name = "dasd_eer";
+ dasd_eer_dev->fops = &dasd_eer_fops;
+
+ rc = misc_register(dasd_eer_dev);
+ if (rc) {
+ kfree(dasd_eer_dev);
+ dasd_eer_dev = NULL;
+ DBF_EVENT(DBF_ERR, "%s", "dasd_eer_init could not "
+ "register misc device");
+ return rc;
+ }
+
+ return 0;
+}
+
+void dasd_eer_exit(void)
+{
+ if (dasd_eer_dev) {
+ misc_deregister(dasd_eer_dev);
+ kfree(dasd_eer_dev);
+ dasd_eer_dev = NULL;
+ }
+}
diff --git a/drivers/s390/block/dasd_erp.c b/drivers/s390/block/dasd_erp.c
new file mode 100644
index 0000000000..c07e6e7135
--- /dev/null
+++ b/drivers/s390/block/dasd_erp.c
@@ -0,0 +1,203 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
+ * Horst Hummel <Horst.Hummel@de.ibm.com>
+ * Carsten Otte <Cotte@de.ibm.com>
+ * Martin Schwidefsky <schwidefsky@de.ibm.com>
+ * Bugreports.to..: <Linux390@de.ibm.com>
+ * Copyright IBM Corp. 1999, 2001
+ *
+ */
+
+#define KMSG_COMPONENT "dasd"
+
+#include <linux/ctype.h>
+#include <linux/init.h>
+
+#include <asm/debug.h>
+#include <asm/ebcdic.h>
+#include <linux/uaccess.h>
+
+/* This is ugly... */
+#define PRINTK_HEADER "dasd_erp:"
+
+#include "dasd_int.h"
+
+struct dasd_ccw_req *
+dasd_alloc_erp_request(unsigned int magic, int cplength, int datasize,
+ struct dasd_device * device)
+{
+ unsigned long flags;
+ struct dasd_ccw_req *cqr;
+ char *data;
+ int size;
+
+ /* Sanity checks */
+ BUG_ON(datasize > PAGE_SIZE ||
+ (cplength*sizeof(struct ccw1)) > PAGE_SIZE);
+
+ size = (sizeof(struct dasd_ccw_req) + 7L) & -8L;
+ if (cplength > 0)
+ size += cplength * sizeof(struct ccw1);
+ if (datasize > 0)
+ size += datasize;
+ spin_lock_irqsave(&device->mem_lock, flags);
+ cqr = (struct dasd_ccw_req *)
+ dasd_alloc_chunk(&device->erp_chunks, size);
+ spin_unlock_irqrestore(&device->mem_lock, flags);
+ if (cqr == NULL)
+ return ERR_PTR(-ENOMEM);
+ memset(cqr, 0, sizeof(struct dasd_ccw_req));
+ INIT_LIST_HEAD(&cqr->devlist);
+ INIT_LIST_HEAD(&cqr->blocklist);
+ data = (char *) cqr + ((sizeof(struct dasd_ccw_req) + 7L) & -8L);
+ cqr->cpaddr = NULL;
+ if (cplength > 0) {
+ cqr->cpaddr = (struct ccw1 *) data;
+ data += cplength*sizeof(struct ccw1);
+ memset(cqr->cpaddr, 0, cplength*sizeof(struct ccw1));
+ }
+ cqr->data = NULL;
+ if (datasize > 0) {
+ cqr->data = data;
+ memset(cqr->data, 0, datasize);
+ }
+ cqr->magic = magic;
+ ASCEBC((char *) &cqr->magic, 4);
+ set_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
+ dasd_get_device(device);
+ return cqr;
+}
+
+void
+dasd_free_erp_request(struct dasd_ccw_req *cqr, struct dasd_device * device)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&device->mem_lock, flags);
+ dasd_free_chunk(&device->erp_chunks, cqr);
+ spin_unlock_irqrestore(&device->mem_lock, flags);
+ atomic_dec(&device->ref_count);
+}
+
+
+/*
+ * dasd_default_erp_action just retries the current cqr
+ */
+struct dasd_ccw_req *
+dasd_default_erp_action(struct dasd_ccw_req *cqr)
+{
+ struct dasd_device *device;
+
+ device = cqr->startdev;
+
+ /* just retry - there is nothing to save ... I got no sense data.... */
+ if (cqr->retries > 0) {
+ DBF_DEV_EVENT(DBF_DEBUG, device,
+ "default ERP called (%i retries left)",
+ cqr->retries);
+ if (!test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags))
+ cqr->lpm = dasd_path_get_opm(device);
+ cqr->status = DASD_CQR_FILLED;
+ } else {
+ pr_err("%s: default ERP has run out of retries and failed\n",
+ dev_name(&device->cdev->dev));
+ cqr->status = DASD_CQR_FAILED;
+ cqr->stopclk = get_tod_clock();
+ }
+ return cqr;
+} /* end dasd_default_erp_action */
+
+/*
+ * DESCRIPTION
+ * Frees all ERPs of the current ERP Chain and set the status
+ * of the original CQR either to DASD_CQR_DONE if ERP was successful
+ * or to DASD_CQR_FAILED if ERP was NOT successful.
+ * NOTE: This function is only called if no discipline postaction
+ * is available
+ *
+ * PARAMETER
+ * erp current erp_head
+ *
+ * RETURN VALUES
+ * cqr pointer to the original CQR
+ */
+struct dasd_ccw_req *dasd_default_erp_postaction(struct dasd_ccw_req *cqr)
+{
+ int success;
+ unsigned long startclk, stopclk;
+ struct dasd_device *startdev;
+
+ BUG_ON(cqr->refers == NULL || cqr->function == NULL);
+
+ success = cqr->status == DASD_CQR_DONE;
+ startclk = cqr->startclk;
+ stopclk = cqr->stopclk;
+ startdev = cqr->startdev;
+
+ /* free all ERPs - but NOT the original cqr */
+ while (cqr->refers != NULL) {
+ struct dasd_ccw_req *refers;
+
+ refers = cqr->refers;
+ /* remove the request from the block queue */
+ list_del(&cqr->blocklist);
+ /* free the finished erp request */
+ dasd_free_erp_request(cqr, cqr->memdev);
+ cqr = refers;
+ }
+
+ /* set corresponding status to original cqr */
+ cqr->startclk = startclk;
+ cqr->stopclk = stopclk;
+ cqr->startdev = startdev;
+ if (success)
+ cqr->status = DASD_CQR_DONE;
+ else {
+ cqr->status = DASD_CQR_FAILED;
+ cqr->stopclk = get_tod_clock();
+ }
+
+ return cqr;
+
+} /* end default_erp_postaction */
+
+void
+dasd_log_sense(struct dasd_ccw_req *cqr, struct irb *irb)
+{
+ struct dasd_device *device;
+
+ device = cqr->startdev;
+ if (cqr->intrc == -ETIMEDOUT) {
+ dev_err(&device->cdev->dev,
+ "A timeout error occurred for cqr %p\n", cqr);
+ return;
+ }
+ if (cqr->intrc == -ENOLINK) {
+ dev_err(&device->cdev->dev,
+ "A transport error occurred for cqr %p\n", cqr);
+ return;
+ }
+ /* dump sense data */
+ if (device->discipline && device->discipline->dump_sense)
+ device->discipline->dump_sense(device, cqr, irb);
+}
+
+void
+dasd_log_sense_dbf(struct dasd_ccw_req *cqr, struct irb *irb)
+{
+ struct dasd_device *device;
+
+ device = cqr->startdev;
+ /* dump sense data to s390 debugfeature*/
+ if (device->discipline && device->discipline->dump_sense_dbf)
+ device->discipline->dump_sense_dbf(device, irb, "log");
+}
+EXPORT_SYMBOL(dasd_log_sense_dbf);
+
+EXPORT_SYMBOL(dasd_default_erp_action);
+EXPORT_SYMBOL(dasd_default_erp_postaction);
+EXPORT_SYMBOL(dasd_alloc_erp_request);
+EXPORT_SYMBOL(dasd_free_erp_request);
+EXPORT_SYMBOL(dasd_log_sense);
+
diff --git a/drivers/s390/block/dasd_fba.c b/drivers/s390/block/dasd_fba.c
new file mode 100644
index 0000000000..c06fa2b271
--- /dev/null
+++ b/drivers/s390/block/dasd_fba.c
@@ -0,0 +1,849 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
+ * Bugreports.to..: <Linux390@de.ibm.com>
+ * Copyright IBM Corp. 1999, 2009
+ */
+
+#define KMSG_COMPONENT "dasd-fba"
+
+#include <linux/stddef.h>
+#include <linux/kernel.h>
+#include <asm/debug.h>
+
+#include <linux/slab.h>
+#include <linux/hdreg.h> /* HDIO_GETGEO */
+#include <linux/bio.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/io.h>
+
+#include <asm/idals.h>
+#include <asm/ebcdic.h>
+#include <asm/ccwdev.h>
+
+#include "dasd_int.h"
+#include "dasd_fba.h"
+
+#ifdef PRINTK_HEADER
+#undef PRINTK_HEADER
+#endif /* PRINTK_HEADER */
+#define PRINTK_HEADER "dasd(fba):"
+
+#define FBA_DEFAULT_RETRIES 32
+
+#define DASD_FBA_CCW_WRITE 0x41
+#define DASD_FBA_CCW_READ 0x42
+#define DASD_FBA_CCW_LOCATE 0x43
+#define DASD_FBA_CCW_DEFINE_EXTENT 0x63
+
+MODULE_LICENSE("GPL");
+
+static struct dasd_discipline dasd_fba_discipline;
+static void *dasd_fba_zero_page;
+
+struct dasd_fba_private {
+ struct dasd_fba_characteristics rdc_data;
+};
+
+static struct ccw_device_id dasd_fba_ids[] = {
+ { CCW_DEVICE_DEVTYPE (0x6310, 0, 0x9336, 0), .driver_info = 0x1},
+ { CCW_DEVICE_DEVTYPE (0x3880, 0, 0x3370, 0), .driver_info = 0x2},
+ { /* end of list */ },
+};
+
+MODULE_DEVICE_TABLE(ccw, dasd_fba_ids);
+
+static int
+dasd_fba_set_online(struct ccw_device *cdev)
+{
+ return dasd_generic_set_online(cdev, &dasd_fba_discipline);
+}
+
+static struct ccw_driver dasd_fba_driver = {
+ .driver = {
+ .name = "dasd-fba",
+ .owner = THIS_MODULE,
+ .dev_groups = dasd_dev_groups,
+ },
+ .ids = dasd_fba_ids,
+ .probe = dasd_generic_probe,
+ .remove = dasd_generic_remove,
+ .set_offline = dasd_generic_set_offline,
+ .set_online = dasd_fba_set_online,
+ .notify = dasd_generic_notify,
+ .path_event = dasd_generic_path_event,
+ .int_class = IRQIO_DAS,
+};
+
+static void
+define_extent(struct ccw1 * ccw, struct DE_fba_data *data, int rw,
+ int blksize, int beg, int nr)
+{
+ ccw->cmd_code = DASD_FBA_CCW_DEFINE_EXTENT;
+ ccw->flags = 0;
+ ccw->count = 16;
+ ccw->cda = (__u32)virt_to_phys(data);
+ memset(data, 0, sizeof (struct DE_fba_data));
+ if (rw == WRITE)
+ (data->mask).perm = 0x0;
+ else if (rw == READ)
+ (data->mask).perm = 0x1;
+ else
+ data->mask.perm = 0x2;
+ data->blk_size = blksize;
+ data->ext_loc = beg;
+ data->ext_end = nr - 1;
+}
+
+static void
+locate_record(struct ccw1 * ccw, struct LO_fba_data *data, int rw,
+ int block_nr, int block_ct)
+{
+ ccw->cmd_code = DASD_FBA_CCW_LOCATE;
+ ccw->flags = 0;
+ ccw->count = 8;
+ ccw->cda = (__u32)virt_to_phys(data);
+ memset(data, 0, sizeof (struct LO_fba_data));
+ if (rw == WRITE)
+ data->operation.cmd = 0x5;
+ else if (rw == READ)
+ data->operation.cmd = 0x6;
+ else
+ data->operation.cmd = 0x8;
+ data->blk_nr = block_nr;
+ data->blk_ct = block_ct;
+}
+
+static int
+dasd_fba_check_characteristics(struct dasd_device *device)
+{
+ struct dasd_fba_private *private = device->private;
+ struct ccw_device *cdev = device->cdev;
+ struct dasd_block *block;
+ int readonly, rc;
+
+ if (!private) {
+ private = kzalloc(sizeof(*private), GFP_KERNEL | GFP_DMA);
+ if (!private) {
+ dev_warn(&device->cdev->dev,
+ "Allocating memory for private DASD "
+ "data failed\n");
+ return -ENOMEM;
+ }
+ device->private = private;
+ } else {
+ memset(private, 0, sizeof(*private));
+ }
+ block = dasd_alloc_block();
+ if (IS_ERR(block)) {
+ DBF_EVENT_DEVID(DBF_WARNING, cdev, "%s", "could not allocate "
+ "dasd block structure");
+ device->private = NULL;
+ kfree(private);
+ return PTR_ERR(block);
+ }
+ device->block = block;
+ block->base = device;
+
+ /* Read Device Characteristics */
+ rc = dasd_generic_read_dev_chars(device, DASD_FBA_MAGIC,
+ &private->rdc_data, 32);
+ if (rc) {
+ DBF_EVENT_DEVID(DBF_WARNING, cdev, "Read device "
+ "characteristics returned error %d", rc);
+ device->block = NULL;
+ dasd_free_block(block);
+ device->private = NULL;
+ kfree(private);
+ return rc;
+ }
+
+ device->default_expires = DASD_EXPIRES;
+ device->default_retries = FBA_DEFAULT_RETRIES;
+ dasd_path_set_opm(device, LPM_ANYPATH);
+
+ readonly = dasd_device_is_ro(device);
+ if (readonly)
+ set_bit(DASD_FLAG_DEVICE_RO, &device->flags);
+
+ /* FBA supports discard, set the according feature bit */
+ dasd_set_feature(cdev, DASD_FEATURE_DISCARD, 1);
+
+ dev_info(&device->cdev->dev,
+ "New FBA DASD %04X/%02X (CU %04X/%02X) with %d MB "
+ "and %d B/blk%s\n",
+ cdev->id.dev_type,
+ cdev->id.dev_model,
+ cdev->id.cu_type,
+ cdev->id.cu_model,
+ ((private->rdc_data.blk_bdsa *
+ (private->rdc_data.blk_size >> 9)) >> 11),
+ private->rdc_data.blk_size,
+ readonly ? ", read-only device" : "");
+ return 0;
+}
+
+static int dasd_fba_do_analysis(struct dasd_block *block)
+{
+ struct dasd_fba_private *private = block->base->private;
+ int sb, rc;
+
+ rc = dasd_check_blocksize(private->rdc_data.blk_size);
+ if (rc) {
+ DBF_DEV_EVENT(DBF_WARNING, block->base, "unknown blocksize %d",
+ private->rdc_data.blk_size);
+ return rc;
+ }
+ block->blocks = private->rdc_data.blk_bdsa;
+ block->bp_block = private->rdc_data.blk_size;
+ block->s2b_shift = 0; /* bits to shift 512 to get a block */
+ for (sb = 512; sb < private->rdc_data.blk_size; sb = sb << 1)
+ block->s2b_shift++;
+ return 0;
+}
+
+static int dasd_fba_fill_geometry(struct dasd_block *block,
+ struct hd_geometry *geo)
+{
+ if (dasd_check_blocksize(block->bp_block) != 0)
+ return -EINVAL;
+ geo->cylinders = (block->blocks << block->s2b_shift) >> 10;
+ geo->heads = 16;
+ geo->sectors = 128 >> block->s2b_shift;
+ return 0;
+}
+
+static dasd_erp_fn_t
+dasd_fba_erp_action(struct dasd_ccw_req * cqr)
+{
+ return dasd_default_erp_action;
+}
+
+static dasd_erp_fn_t
+dasd_fba_erp_postaction(struct dasd_ccw_req * cqr)
+{
+ if (cqr->function == dasd_default_erp_action)
+ return dasd_default_erp_postaction;
+
+ DBF_DEV_EVENT(DBF_WARNING, cqr->startdev, "unknown ERP action %p",
+ cqr->function);
+ return NULL;
+}
+
+static void dasd_fba_check_for_device_change(struct dasd_device *device,
+ struct dasd_ccw_req *cqr,
+ struct irb *irb)
+{
+ char mask;
+
+ /* first of all check for state change pending interrupt */
+ mask = DEV_STAT_ATTENTION | DEV_STAT_DEV_END | DEV_STAT_UNIT_EXCEP;
+ if ((irb->scsw.cmd.dstat & mask) == mask)
+ dasd_generic_handle_state_change(device);
+};
+
+
+/*
+ * Builds a CCW with no data payload
+ */
+static void ccw_write_no_data(struct ccw1 *ccw)
+{
+ ccw->cmd_code = DASD_FBA_CCW_WRITE;
+ ccw->flags |= CCW_FLAG_SLI;
+ ccw->count = 0;
+}
+
+/*
+ * Builds a CCW that writes only zeroes.
+ */
+static void ccw_write_zero(struct ccw1 *ccw, int count)
+{
+ ccw->cmd_code = DASD_FBA_CCW_WRITE;
+ ccw->flags |= CCW_FLAG_SLI;
+ ccw->count = count;
+ ccw->cda = (__u32)virt_to_phys(dasd_fba_zero_page);
+}
+
+/*
+ * Helper function to count the amount of necessary CCWs within a given range
+ * with 4k alignment and command chaining in mind.
+ */
+static int count_ccws(sector_t first_rec, sector_t last_rec,
+ unsigned int blocks_per_page)
+{
+ sector_t wz_stop = 0, d_stop = 0;
+ int cur_pos = 0;
+ int count = 0;
+
+ if (first_rec % blocks_per_page != 0) {
+ wz_stop = first_rec + blocks_per_page -
+ (first_rec % blocks_per_page) - 1;
+ if (wz_stop > last_rec)
+ wz_stop = last_rec;
+ cur_pos = wz_stop - first_rec + 1;
+ count++;
+ }
+
+ if (last_rec - (first_rec + cur_pos) + 1 >= blocks_per_page) {
+ if ((last_rec - blocks_per_page + 1) % blocks_per_page != 0)
+ d_stop = last_rec - ((last_rec - blocks_per_page + 1) %
+ blocks_per_page);
+ else
+ d_stop = last_rec;
+
+ cur_pos += d_stop - (first_rec + cur_pos) + 1;
+ count++;
+ }
+
+ if (cur_pos == 0 || first_rec + cur_pos - 1 < last_rec)
+ count++;
+
+ return count;
+}
+
+/*
+ * This function builds a CCW request for block layer discard requests.
+ * Each page in the z/VM hypervisor that represents certain records of an FBA
+ * device will be padded with zeros. This is a special behaviour of the WRITE
+ * command which is triggered when no data payload is added to the CCW.
+ *
+ * Note: Due to issues in some z/VM versions, we can't fully utilise this
+ * special behaviour. We have to keep a 4k (or 8 block) alignment in mind to
+ * work around those issues and write actual zeroes to the unaligned parts in
+ * the request. This workaround might be removed in the future.
+ */
+static struct dasd_ccw_req *dasd_fba_build_cp_discard(
+ struct dasd_device *memdev,
+ struct dasd_block *block,
+ struct request *req)
+{
+ struct LO_fba_data *LO_data;
+ struct dasd_ccw_req *cqr;
+ struct ccw1 *ccw;
+
+ sector_t wz_stop = 0, d_stop = 0;
+ sector_t first_rec, last_rec;
+
+ unsigned int blksize = block->bp_block;
+ unsigned int blocks_per_page;
+ int wz_count = 0;
+ int d_count = 0;
+ int cur_pos = 0; /* Current position within the extent */
+ int count = 0;
+ int cplength;
+ int datasize;
+ int nr_ccws;
+
+ first_rec = blk_rq_pos(req) >> block->s2b_shift;
+ last_rec =
+ (blk_rq_pos(req) + blk_rq_sectors(req) - 1) >> block->s2b_shift;
+ count = last_rec - first_rec + 1;
+
+ blocks_per_page = BLOCKS_PER_PAGE(blksize);
+ nr_ccws = count_ccws(first_rec, last_rec, blocks_per_page);
+
+ /* define extent + nr_ccws * locate record + nr_ccws * single CCW */
+ cplength = 1 + 2 * nr_ccws;
+ datasize = sizeof(struct DE_fba_data) +
+ nr_ccws * (sizeof(struct LO_fba_data) + sizeof(struct ccw1));
+
+ cqr = dasd_smalloc_request(DASD_FBA_MAGIC, cplength, datasize, memdev,
+ blk_mq_rq_to_pdu(req));
+ if (IS_ERR(cqr))
+ return cqr;
+
+ ccw = cqr->cpaddr;
+
+ define_extent(ccw++, cqr->data, WRITE, blksize, first_rec, count);
+ LO_data = cqr->data + sizeof(struct DE_fba_data);
+
+ /* First part is not aligned. Calculate range to write zeroes. */
+ if (first_rec % blocks_per_page != 0) {
+ wz_stop = first_rec + blocks_per_page -
+ (first_rec % blocks_per_page) - 1;
+ if (wz_stop > last_rec)
+ wz_stop = last_rec;
+ wz_count = wz_stop - first_rec + 1;
+
+ ccw[-1].flags |= CCW_FLAG_CC;
+ locate_record(ccw++, LO_data++, WRITE, cur_pos, wz_count);
+
+ ccw[-1].flags |= CCW_FLAG_CC;
+ ccw_write_zero(ccw++, wz_count * blksize);
+
+ cur_pos = wz_count;
+ }
+
+ /* We can do proper discard when we've got at least blocks_per_page blocks. */
+ if (last_rec - (first_rec + cur_pos) + 1 >= blocks_per_page) {
+ /* is last record at page boundary? */
+ if ((last_rec - blocks_per_page + 1) % blocks_per_page != 0)
+ d_stop = last_rec - ((last_rec - blocks_per_page + 1) %
+ blocks_per_page);
+ else
+ d_stop = last_rec;
+
+ d_count = d_stop - (first_rec + cur_pos) + 1;
+
+ ccw[-1].flags |= CCW_FLAG_CC;
+ locate_record(ccw++, LO_data++, WRITE, cur_pos, d_count);
+
+ ccw[-1].flags |= CCW_FLAG_CC;
+ ccw_write_no_data(ccw++);
+
+ cur_pos += d_count;
+ }
+
+ /* We might still have some bits left which need to be zeroed. */
+ if (cur_pos == 0 || first_rec + cur_pos - 1 < last_rec) {
+ if (d_stop != 0)
+ wz_count = last_rec - d_stop;
+ else if (wz_stop != 0)
+ wz_count = last_rec - wz_stop;
+ else
+ wz_count = count;
+
+ ccw[-1].flags |= CCW_FLAG_CC;
+ locate_record(ccw++, LO_data++, WRITE, cur_pos, wz_count);
+
+ ccw[-1].flags |= CCW_FLAG_CC;
+ ccw_write_zero(ccw++, wz_count * blksize);
+ }
+
+ if (blk_noretry_request(req) ||
+ block->base->features & DASD_FEATURE_FAILFAST)
+ set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
+
+ cqr->startdev = memdev;
+ cqr->memdev = memdev;
+ cqr->block = block;
+ cqr->expires = memdev->default_expires * HZ; /* default 5 minutes */
+ cqr->retries = memdev->default_retries;
+ cqr->buildclk = get_tod_clock();
+ cqr->status = DASD_CQR_FILLED;
+
+ return cqr;
+}
+
+static struct dasd_ccw_req *dasd_fba_build_cp_regular(
+ struct dasd_device *memdev,
+ struct dasd_block *block,
+ struct request *req)
+{
+ struct dasd_fba_private *private = block->base->private;
+ unsigned long *idaws;
+ struct LO_fba_data *LO_data;
+ struct dasd_ccw_req *cqr;
+ struct ccw1 *ccw;
+ struct req_iterator iter;
+ struct bio_vec bv;
+ char *dst;
+ int count, cidaw, cplength, datasize;
+ sector_t recid, first_rec, last_rec;
+ unsigned int blksize, off;
+ unsigned char cmd;
+
+ if (rq_data_dir(req) == READ) {
+ cmd = DASD_FBA_CCW_READ;
+ } else if (rq_data_dir(req) == WRITE) {
+ cmd = DASD_FBA_CCW_WRITE;
+ } else
+ return ERR_PTR(-EINVAL);
+ blksize = block->bp_block;
+ /* Calculate record id of first and last block. */
+ first_rec = blk_rq_pos(req) >> block->s2b_shift;
+ last_rec =
+ (blk_rq_pos(req) + blk_rq_sectors(req) - 1) >> block->s2b_shift;
+ /* Check struct bio and count the number of blocks for the request. */
+ count = 0;
+ cidaw = 0;
+ rq_for_each_segment(bv, req, iter) {
+ if (bv.bv_len & (blksize - 1))
+ /* Fba can only do full blocks. */
+ return ERR_PTR(-EINVAL);
+ count += bv.bv_len >> (block->s2b_shift + 9);
+ if (idal_is_needed (page_address(bv.bv_page), bv.bv_len))
+ cidaw += bv.bv_len / blksize;
+ }
+ /* Paranoia. */
+ if (count != last_rec - first_rec + 1)
+ return ERR_PTR(-EINVAL);
+ /* 1x define extent + 1x locate record + number of blocks */
+ cplength = 2 + count;
+ /* 1x define extent + 1x locate record */
+ datasize = sizeof(struct DE_fba_data) + sizeof(struct LO_fba_data) +
+ cidaw * sizeof(unsigned long);
+ /*
+ * Find out number of additional locate record ccws if the device
+ * can't do data chaining.
+ */
+ if (private->rdc_data.mode.bits.data_chain == 0) {
+ cplength += count - 1;
+ datasize += (count - 1)*sizeof(struct LO_fba_data);
+ }
+ /* Allocate the ccw request. */
+ cqr = dasd_smalloc_request(DASD_FBA_MAGIC, cplength, datasize, memdev,
+ blk_mq_rq_to_pdu(req));
+ if (IS_ERR(cqr))
+ return cqr;
+ ccw = cqr->cpaddr;
+ /* First ccw is define extent. */
+ define_extent(ccw++, cqr->data, rq_data_dir(req),
+ block->bp_block, blk_rq_pos(req), blk_rq_sectors(req));
+ /* Build locate_record + read/write ccws. */
+ idaws = (unsigned long *) (cqr->data + sizeof(struct DE_fba_data));
+ LO_data = (struct LO_fba_data *) (idaws + cidaw);
+ /* Locate record for all blocks for smart devices. */
+ if (private->rdc_data.mode.bits.data_chain != 0) {
+ ccw[-1].flags |= CCW_FLAG_CC;
+ locate_record(ccw++, LO_data++, rq_data_dir(req), 0, count);
+ }
+ recid = first_rec;
+ rq_for_each_segment(bv, req, iter) {
+ dst = bvec_virt(&bv);
+ if (dasd_page_cache) {
+ char *copy = kmem_cache_alloc(dasd_page_cache,
+ GFP_DMA | __GFP_NOWARN);
+ if (copy && rq_data_dir(req) == WRITE)
+ memcpy(copy + bv.bv_offset, dst, bv.bv_len);
+ if (copy)
+ dst = copy + bv.bv_offset;
+ }
+ for (off = 0; off < bv.bv_len; off += blksize) {
+ /* Locate record for stupid devices. */
+ if (private->rdc_data.mode.bits.data_chain == 0) {
+ ccw[-1].flags |= CCW_FLAG_CC;
+ locate_record(ccw, LO_data++,
+ rq_data_dir(req),
+ recid - first_rec, 1);
+ ccw->flags = CCW_FLAG_CC;
+ ccw++;
+ } else {
+ if (recid > first_rec)
+ ccw[-1].flags |= CCW_FLAG_DC;
+ else
+ ccw[-1].flags |= CCW_FLAG_CC;
+ }
+ ccw->cmd_code = cmd;
+ ccw->count = block->bp_block;
+ if (idal_is_needed(dst, blksize)) {
+ ccw->cda = (__u32)virt_to_phys(idaws);
+ ccw->flags = CCW_FLAG_IDA;
+ idaws = idal_create_words(idaws, dst, blksize);
+ } else {
+ ccw->cda = (__u32)virt_to_phys(dst);
+ ccw->flags = 0;
+ }
+ ccw++;
+ dst += blksize;
+ recid++;
+ }
+ }
+ if (blk_noretry_request(req) ||
+ block->base->features & DASD_FEATURE_FAILFAST)
+ set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
+ cqr->startdev = memdev;
+ cqr->memdev = memdev;
+ cqr->block = block;
+ cqr->expires = memdev->default_expires * HZ; /* default 5 minutes */
+ cqr->retries = memdev->default_retries;
+ cqr->buildclk = get_tod_clock();
+ cqr->status = DASD_CQR_FILLED;
+ return cqr;
+}
+
+static struct dasd_ccw_req *dasd_fba_build_cp(struct dasd_device *memdev,
+ struct dasd_block *block,
+ struct request *req)
+{
+ if (req_op(req) == REQ_OP_DISCARD || req_op(req) == REQ_OP_WRITE_ZEROES)
+ return dasd_fba_build_cp_discard(memdev, block, req);
+ else
+ return dasd_fba_build_cp_regular(memdev, block, req);
+}
+
+static int
+dasd_fba_free_cp(struct dasd_ccw_req *cqr, struct request *req)
+{
+ struct dasd_fba_private *private = cqr->block->base->private;
+ struct ccw1 *ccw;
+ struct req_iterator iter;
+ struct bio_vec bv;
+ char *dst, *cda;
+ unsigned int blksize, off;
+ int status;
+
+ if (!dasd_page_cache)
+ goto out;
+ blksize = cqr->block->bp_block;
+ ccw = cqr->cpaddr;
+ /* Skip over define extent & locate record. */
+ ccw++;
+ if (private->rdc_data.mode.bits.data_chain != 0)
+ ccw++;
+ rq_for_each_segment(bv, req, iter) {
+ dst = bvec_virt(&bv);
+ for (off = 0; off < bv.bv_len; off += blksize) {
+ /* Skip locate record. */
+ if (private->rdc_data.mode.bits.data_chain == 0)
+ ccw++;
+ if (dst) {
+ if (ccw->flags & CCW_FLAG_IDA)
+ cda = *((char **)phys_to_virt(ccw->cda));
+ else
+ cda = phys_to_virt(ccw->cda);
+ if (dst != cda) {
+ if (rq_data_dir(req) == READ)
+ memcpy(dst, cda, bv.bv_len);
+ kmem_cache_free(dasd_page_cache,
+ (void *)((addr_t)cda & PAGE_MASK));
+ }
+ dst = NULL;
+ }
+ ccw++;
+ }
+ }
+out:
+ status = cqr->status == DASD_CQR_DONE;
+ dasd_sfree_request(cqr, cqr->memdev);
+ return status;
+}
+
+static void dasd_fba_handle_terminated_request(struct dasd_ccw_req *cqr)
+{
+ if (cqr->retries < 0)
+ cqr->status = DASD_CQR_FAILED;
+ else
+ cqr->status = DASD_CQR_FILLED;
+};
+
+static int
+dasd_fba_fill_info(struct dasd_device * device,
+ struct dasd_information2_t * info)
+{
+ struct dasd_fba_private *private = device->private;
+
+ info->label_block = 1;
+ info->FBA_layout = 1;
+ info->format = DASD_FORMAT_LDL;
+ info->characteristics_size = sizeof(private->rdc_data);
+ memcpy(info->characteristics, &private->rdc_data,
+ sizeof(private->rdc_data));
+ info->confdata_size = 0;
+ return 0;
+}
+
+static void
+dasd_fba_dump_sense_dbf(struct dasd_device *device, struct irb *irb,
+ char *reason)
+{
+ u64 *sense;
+
+ sense = (u64 *) dasd_get_sense(irb);
+ if (sense) {
+ DBF_DEV_EVENT(DBF_EMERG, device,
+ "%s: %s %02x%02x%02x %016llx %016llx %016llx "
+ "%016llx", reason,
+ scsw_is_tm(&irb->scsw) ? "t" : "c",
+ scsw_cc(&irb->scsw), scsw_cstat(&irb->scsw),
+ scsw_dstat(&irb->scsw), sense[0], sense[1],
+ sense[2], sense[3]);
+ } else {
+ DBF_DEV_EVENT(DBF_EMERG, device, "%s",
+ "SORRY - NO VALID SENSE AVAILABLE\n");
+ }
+}
+
+
+static void
+dasd_fba_dump_sense(struct dasd_device *device, struct dasd_ccw_req * req,
+ struct irb *irb)
+{
+ char *page;
+ struct ccw1 *act, *end, *last;
+ int len, sl, sct, count;
+
+ page = (char *) get_zeroed_page(GFP_ATOMIC);
+ if (page == NULL) {
+ DBF_DEV_EVENT(DBF_WARNING, device, "%s",
+ "No memory to dump sense data");
+ return;
+ }
+ len = sprintf(page, PRINTK_HEADER
+ " I/O status report for device %s:\n",
+ dev_name(&device->cdev->dev));
+ len += sprintf(page + len, PRINTK_HEADER
+ " in req: %p CS: 0x%02X DS: 0x%02X\n", req,
+ irb->scsw.cmd.cstat, irb->scsw.cmd.dstat);
+ len += sprintf(page + len, PRINTK_HEADER
+ " device %s: Failing CCW: %p\n",
+ dev_name(&device->cdev->dev),
+ (void *) (addr_t) irb->scsw.cmd.cpa);
+ if (irb->esw.esw0.erw.cons) {
+ for (sl = 0; sl < 4; sl++) {
+ len += sprintf(page + len, PRINTK_HEADER
+ " Sense(hex) %2d-%2d:",
+ (8 * sl), ((8 * sl) + 7));
+
+ for (sct = 0; sct < 8; sct++) {
+ len += sprintf(page + len, " %02x",
+ irb->ecw[8 * sl + sct]);
+ }
+ len += sprintf(page + len, "\n");
+ }
+ } else {
+ len += sprintf(page + len, PRINTK_HEADER
+ " SORRY - NO VALID SENSE AVAILABLE\n");
+ }
+ printk(KERN_ERR "%s", page);
+
+ /* dump the Channel Program */
+ /* print first CCWs (maximum 8) */
+ act = req->cpaddr;
+ for (last = act; last->flags & (CCW_FLAG_CC | CCW_FLAG_DC); last++);
+ end = min(act + 8, last);
+ len = sprintf(page, PRINTK_HEADER " Related CP in req: %p\n", req);
+ while (act <= end) {
+ len += sprintf(page + len, PRINTK_HEADER
+ " CCW %p: %08X %08X DAT:",
+ act, ((int *) act)[0], ((int *) act)[1]);
+ for (count = 0; count < 32 && count < act->count;
+ count += sizeof(int))
+ len += sprintf(page + len, " %08X",
+ ((int *) (addr_t) act->cda)
+ [(count>>2)]);
+ len += sprintf(page + len, "\n");
+ act++;
+ }
+ printk(KERN_ERR "%s", page);
+
+
+ /* print failing CCW area */
+ len = 0;
+ if (act < ((struct ccw1 *)(addr_t) irb->scsw.cmd.cpa) - 2) {
+ act = ((struct ccw1 *)(addr_t) irb->scsw.cmd.cpa) - 2;
+ len += sprintf(page + len, PRINTK_HEADER "......\n");
+ }
+ end = min((struct ccw1 *)(addr_t) irb->scsw.cmd.cpa + 2, last);
+ while (act <= end) {
+ len += sprintf(page + len, PRINTK_HEADER
+ " CCW %p: %08X %08X DAT:",
+ act, ((int *) act)[0], ((int *) act)[1]);
+ for (count = 0; count < 32 && count < act->count;
+ count += sizeof(int))
+ len += sprintf(page + len, " %08X",
+ ((int *) (addr_t) act->cda)
+ [(count>>2)]);
+ len += sprintf(page + len, "\n");
+ act++;
+ }
+
+ /* print last CCWs */
+ if (act < last - 2) {
+ act = last - 2;
+ len += sprintf(page + len, PRINTK_HEADER "......\n");
+ }
+ while (act <= last) {
+ len += sprintf(page + len, PRINTK_HEADER
+ " CCW %p: %08X %08X DAT:",
+ act, ((int *) act)[0], ((int *) act)[1]);
+ for (count = 0; count < 32 && count < act->count;
+ count += sizeof(int))
+ len += sprintf(page + len, " %08X",
+ ((int *) (addr_t) act->cda)
+ [(count>>2)]);
+ len += sprintf(page + len, "\n");
+ act++;
+ }
+ if (len > 0)
+ printk(KERN_ERR "%s", page);
+ free_page((unsigned long) page);
+}
+
+/*
+ * Initialize block layer request queue.
+ */
+static void dasd_fba_setup_blk_queue(struct dasd_block *block)
+{
+ unsigned int logical_block_size = block->bp_block;
+ struct request_queue *q = block->gdp->queue;
+ unsigned int max_bytes, max_discard_sectors;
+ int max;
+
+ max = DASD_FBA_MAX_BLOCKS << block->s2b_shift;
+ blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
+ q->limits.max_dev_sectors = max;
+ blk_queue_logical_block_size(q, logical_block_size);
+ blk_queue_max_hw_sectors(q, max);
+ blk_queue_max_segments(q, USHRT_MAX);
+ /* With page sized segments each segment can be translated into one idaw/tidaw */
+ blk_queue_max_segment_size(q, PAGE_SIZE);
+ blk_queue_segment_boundary(q, PAGE_SIZE - 1);
+
+ q->limits.discard_granularity = logical_block_size;
+
+ /* Calculate max_discard_sectors and make it PAGE aligned */
+ max_bytes = USHRT_MAX * logical_block_size;
+ max_bytes = ALIGN_DOWN(max_bytes, PAGE_SIZE);
+ max_discard_sectors = max_bytes / logical_block_size;
+
+ blk_queue_max_discard_sectors(q, max_discard_sectors);
+ blk_queue_max_write_zeroes_sectors(q, max_discard_sectors);
+}
+
+static int dasd_fba_pe_handler(struct dasd_device *device,
+ __u8 tbvpm, __u8 fcsecpm)
+{
+ return dasd_generic_verify_path(device, tbvpm);
+}
+
+static struct dasd_discipline dasd_fba_discipline = {
+ .owner = THIS_MODULE,
+ .name = "FBA ",
+ .ebcname = "FBA ",
+ .check_device = dasd_fba_check_characteristics,
+ .do_analysis = dasd_fba_do_analysis,
+ .pe_handler = dasd_fba_pe_handler,
+ .setup_blk_queue = dasd_fba_setup_blk_queue,
+ .fill_geometry = dasd_fba_fill_geometry,
+ .start_IO = dasd_start_IO,
+ .term_IO = dasd_term_IO,
+ .handle_terminated_request = dasd_fba_handle_terminated_request,
+ .erp_action = dasd_fba_erp_action,
+ .erp_postaction = dasd_fba_erp_postaction,
+ .check_for_device_change = dasd_fba_check_for_device_change,
+ .build_cp = dasd_fba_build_cp,
+ .free_cp = dasd_fba_free_cp,
+ .dump_sense = dasd_fba_dump_sense,
+ .dump_sense_dbf = dasd_fba_dump_sense_dbf,
+ .fill_info = dasd_fba_fill_info,
+};
+
+static int __init
+dasd_fba_init(void)
+{
+ int ret;
+
+ ASCEBC(dasd_fba_discipline.ebcname, 4);
+
+ dasd_fba_zero_page = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
+ if (!dasd_fba_zero_page)
+ return -ENOMEM;
+
+ ret = ccw_driver_register(&dasd_fba_driver);
+ if (!ret)
+ wait_for_device_probe();
+
+ return ret;
+}
+
+static void __exit
+dasd_fba_cleanup(void)
+{
+ ccw_driver_unregister(&dasd_fba_driver);
+ free_page((unsigned long)dasd_fba_zero_page);
+}
+
+module_init(dasd_fba_init);
+module_exit(dasd_fba_cleanup);
diff --git a/drivers/s390/block/dasd_fba.h b/drivers/s390/block/dasd_fba.h
new file mode 100644
index 0000000000..45ddabec40
--- /dev/null
+++ b/drivers/s390/block/dasd_fba.h
@@ -0,0 +1,77 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
+ * Bugreports.to..: <Linux390@de.ibm.com>
+ * Copyright IBM Corp. 1999, 2000
+ *
+ */
+
+#ifndef DASD_FBA_H
+#define DASD_FBA_H
+
+/*
+ * Maximum number of blocks to be chained
+ */
+#define DASD_FBA_MAX_BLOCKS 96
+
+struct DE_fba_data {
+ struct {
+ unsigned char perm:2; /* Permissions on this extent */
+ unsigned char zero:2; /* Must be zero */
+ unsigned char da:1; /* usually zero */
+ unsigned char diag:1; /* allow diagnose */
+ unsigned char zero2:2; /* zero */
+ } __attribute__ ((packed)) mask;
+ __u8 zero; /* Must be zero */
+ __u16 blk_size; /* Blocksize */
+ __u32 ext_loc; /* Extent locator */
+ __u32 ext_beg; /* logical number of block 0 in extent */
+ __u32 ext_end; /* logocal number of last block in extent */
+} __attribute__ ((packed));
+
+struct LO_fba_data {
+ struct {
+ unsigned char zero:4;
+ unsigned char cmd:4;
+ } __attribute__ ((packed)) operation;
+ __u8 auxiliary;
+ __u16 blk_ct;
+ __u32 blk_nr;
+} __attribute__ ((packed));
+
+struct dasd_fba_characteristics {
+ union {
+ __u8 c;
+ struct {
+ unsigned char reserved:1;
+ unsigned char overrunnable:1;
+ unsigned char burst_byte:1;
+ unsigned char data_chain:1;
+ unsigned char zeros:4;
+ } __attribute__ ((packed)) bits;
+ } __attribute__ ((packed)) mode;
+ union {
+ __u8 c;
+ struct {
+ unsigned char zero0:1;
+ unsigned char removable:1;
+ unsigned char shared:1;
+ unsigned char zero1:1;
+ unsigned char mam:1;
+ unsigned char zeros:3;
+ } __attribute__ ((packed)) bits;
+ } __attribute__ ((packed)) features;
+ __u8 dev_class;
+ __u8 unit_type;
+ __u16 blk_size;
+ __u32 blk_per_cycl;
+ __u32 blk_per_bound;
+ __u32 blk_bdsa;
+ __u32 reserved0;
+ __u16 reserved1;
+ __u16 blk_ce;
+ __u32 reserved2;
+ __u16 reserved3;
+} __attribute__ ((packed));
+
+#endif /* DASD_FBA_H */
diff --git a/drivers/s390/block/dasd_genhd.c b/drivers/s390/block/dasd_genhd.c
new file mode 100644
index 0000000000..fe5108a1b3
--- /dev/null
+++ b/drivers/s390/block/dasd_genhd.c
@@ -0,0 +1,203 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
+ * Horst Hummel <Horst.Hummel@de.ibm.com>
+ * Carsten Otte <Cotte@de.ibm.com>
+ * Martin Schwidefsky <schwidefsky@de.ibm.com>
+ * Bugreports.to..: <Linux390@de.ibm.com>
+ * Copyright IBM Corp. 1999, 2001
+ *
+ * gendisk related functions for the dasd driver.
+ *
+ */
+
+#define KMSG_COMPONENT "dasd"
+
+#include <linux/interrupt.h>
+#include <linux/major.h>
+#include <linux/fs.h>
+#include <linux/blkpg.h>
+
+#include <linux/uaccess.h>
+
+/* This is ugly... */
+#define PRINTK_HEADER "dasd_gendisk:"
+
+#include "dasd_int.h"
+
+static unsigned int queue_depth = 32;
+static unsigned int nr_hw_queues = 4;
+
+module_param(queue_depth, uint, 0444);
+MODULE_PARM_DESC(queue_depth, "Default queue depth for new DASD devices");
+
+module_param(nr_hw_queues, uint, 0444);
+MODULE_PARM_DESC(nr_hw_queues, "Default number of hardware queues for new DASD devices");
+
+/*
+ * Allocate and register gendisk structure for device.
+ */
+int dasd_gendisk_alloc(struct dasd_block *block)
+{
+ struct gendisk *gdp;
+ struct dasd_device *base;
+ int len, rc;
+
+ /* Make sure the minor for this device exists. */
+ base = block->base;
+ if (base->devindex >= DASD_PER_MAJOR)
+ return -EBUSY;
+
+ block->tag_set.ops = &dasd_mq_ops;
+ block->tag_set.cmd_size = sizeof(struct dasd_ccw_req);
+ block->tag_set.nr_hw_queues = nr_hw_queues;
+ block->tag_set.queue_depth = queue_depth;
+ block->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
+ block->tag_set.numa_node = NUMA_NO_NODE;
+ rc = blk_mq_alloc_tag_set(&block->tag_set);
+ if (rc)
+ return rc;
+
+ gdp = blk_mq_alloc_disk(&block->tag_set, block);
+ if (IS_ERR(gdp)) {
+ blk_mq_free_tag_set(&block->tag_set);
+ return PTR_ERR(gdp);
+ }
+
+ /* Initialize gendisk structure. */
+ gdp->major = DASD_MAJOR;
+ gdp->first_minor = base->devindex << DASD_PARTN_BITS;
+ gdp->minors = 1 << DASD_PARTN_BITS;
+ gdp->fops = &dasd_device_operations;
+
+ /*
+ * Set device name.
+ * dasda - dasdz : 26 devices
+ * dasdaa - dasdzz : 676 devices, added up = 702
+ * dasdaaa - dasdzzz : 17576 devices, added up = 18278
+ * dasdaaaa - dasdzzzz : 456976 devices, added up = 475252
+ */
+ len = sprintf(gdp->disk_name, "dasd");
+ if (base->devindex > 25) {
+ if (base->devindex > 701) {
+ if (base->devindex > 18277)
+ len += sprintf(gdp->disk_name + len, "%c",
+ 'a'+(((base->devindex-18278)
+ /17576)%26));
+ len += sprintf(gdp->disk_name + len, "%c",
+ 'a'+(((base->devindex-702)/676)%26));
+ }
+ len += sprintf(gdp->disk_name + len, "%c",
+ 'a'+(((base->devindex-26)/26)%26));
+ }
+ len += sprintf(gdp->disk_name + len, "%c", 'a'+(base->devindex%26));
+
+ if (base->features & DASD_FEATURE_READONLY ||
+ test_bit(DASD_FLAG_DEVICE_RO, &base->flags))
+ set_disk_ro(gdp, 1);
+ dasd_add_link_to_gendisk(gdp, base);
+ block->gdp = gdp;
+ set_capacity(block->gdp, 0);
+
+ rc = device_add_disk(&base->cdev->dev, block->gdp, NULL);
+ if (rc) {
+ dasd_gendisk_free(block);
+ return rc;
+ }
+
+ return 0;
+}
+
+/*
+ * Unregister and free gendisk structure for device.
+ */
+void dasd_gendisk_free(struct dasd_block *block)
+{
+ if (block->gdp) {
+ del_gendisk(block->gdp);
+ block->gdp->private_data = NULL;
+ put_disk(block->gdp);
+ block->gdp = NULL;
+ blk_mq_free_tag_set(&block->tag_set);
+ }
+}
+
+/*
+ * Trigger a partition detection.
+ */
+int dasd_scan_partitions(struct dasd_block *block)
+{
+ struct block_device *bdev;
+ int rc;
+
+ bdev = blkdev_get_by_dev(disk_devt(block->gdp), BLK_OPEN_READ, NULL,
+ NULL);
+ if (IS_ERR(bdev)) {
+ DBF_DEV_EVENT(DBF_ERR, block->base,
+ "scan partitions error, blkdev_get returned %ld",
+ PTR_ERR(bdev));
+ return -ENODEV;
+ }
+
+ mutex_lock(&block->gdp->open_mutex);
+ rc = bdev_disk_changed(block->gdp, false);
+ mutex_unlock(&block->gdp->open_mutex);
+ if (rc)
+ DBF_DEV_EVENT(DBF_ERR, block->base,
+ "scan partitions error, rc %d", rc);
+
+ /*
+ * Since the matching blkdev_put call to the blkdev_get in
+ * this function is not called before dasd_destroy_partitions
+ * the offline open_count limit needs to be increased from
+ * 0 to 1. This is done by setting device->bdev (see
+ * dasd_generic_set_offline). As long as the partition
+ * detection is running no offline should be allowed. That
+ * is why the assignment to device->bdev is done AFTER
+ * the BLKRRPART ioctl.
+ */
+ block->bdev = bdev;
+ return 0;
+}
+
+/*
+ * Remove all inodes in the system for a device, delete the
+ * partitions and make device unusable by setting its size to zero.
+ */
+void dasd_destroy_partitions(struct dasd_block *block)
+{
+ struct block_device *bdev;
+
+ /*
+ * Get the bdev pointer from the device structure and clear
+ * device->bdev to lower the offline open_count limit again.
+ */
+ bdev = block->bdev;
+ block->bdev = NULL;
+
+ mutex_lock(&bdev->bd_disk->open_mutex);
+ bdev_disk_changed(bdev->bd_disk, true);
+ mutex_unlock(&bdev->bd_disk->open_mutex);
+
+ /* Matching blkdev_put to the blkdev_get in dasd_scan_partitions. */
+ blkdev_put(bdev, NULL);
+}
+
+int dasd_gendisk_init(void)
+{
+ int rc;
+
+ /* Register to static dasd major 94 */
+ rc = register_blkdev(DASD_MAJOR, "dasd");
+ if (rc != 0) {
+ pr_warn("Registering the device driver with major number %d failed\n",
+ DASD_MAJOR);
+ return rc;
+ }
+ return 0;
+}
+
+void dasd_gendisk_exit(void)
+{
+ unregister_blkdev(DASD_MAJOR, "dasd");
+}
diff --git a/drivers/s390/block/dasd_int.h b/drivers/s390/block/dasd_int.h
new file mode 100644
index 0000000000..8a4dbe9d77
--- /dev/null
+++ b/drivers/s390/block/dasd_int.h
@@ -0,0 +1,1496 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
+ * Horst Hummel <Horst.Hummel@de.ibm.com>
+ * Martin Schwidefsky <schwidefsky@de.ibm.com>
+ * Bugreports.to..: <Linux390@de.ibm.com>
+ * Copyright IBM Corp. 1999, 2009
+ */
+
+#ifndef DASD_INT_H
+#define DASD_INT_H
+
+/* we keep old device allocation scheme; IOW, minors are still in 0..255 */
+#define DASD_PER_MAJOR (1U << (MINORBITS - DASD_PARTN_BITS))
+#define DASD_PARTN_MASK ((1 << DASD_PARTN_BITS) - 1)
+
+/*
+ * States a dasd device can have:
+ * new: the dasd_device structure is allocated.
+ * known: the discipline for the device is identified.
+ * basic: the device can do basic i/o.
+ * unfmt: the device could not be analyzed (format is unknown).
+ * ready: partition detection is done and the device is can do block io.
+ * online: the device accepts requests from the block device queue.
+ *
+ * Things to do for startup state transitions:
+ * new -> known: find discipline for the device and create devfs entries.
+ * known -> basic: request irq line for the device.
+ * basic -> ready: do the initial analysis, e.g. format detection,
+ * do block device setup and detect partitions.
+ * ready -> online: schedule the device tasklet.
+ * Things to do for shutdown state transitions:
+ * online -> ready: just set the new device state.
+ * ready -> basic: flush requests from the block device layer, clear
+ * partition information and reset format information.
+ * basic -> known: terminate all requests and free irq.
+ * known -> new: remove devfs entries and forget discipline.
+ */
+
+#define DASD_STATE_NEW 0
+#define DASD_STATE_KNOWN 1
+#define DASD_STATE_BASIC 2
+#define DASD_STATE_UNFMT 3
+#define DASD_STATE_READY 4
+#define DASD_STATE_ONLINE 5
+
+#include <linux/module.h>
+#include <linux/wait.h>
+#include <linux/blkdev.h>
+#include <linux/hdreg.h>
+#include <linux/interrupt.h>
+#include <linux/log2.h>
+#include <asm/ccwdev.h>
+#include <linux/workqueue.h>
+#include <asm/debug.h>
+#include <asm/dasd.h>
+#include <asm/idals.h>
+#include <linux/bitops.h>
+#include <linux/blk-mq.h>
+
+/* DASD discipline magic */
+#define DASD_ECKD_MAGIC 0xC5C3D2C4
+#define DASD_DIAG_MAGIC 0xC4C9C1C7
+#define DASD_FBA_MAGIC 0xC6C2C140
+
+/*
+ * SECTION: Type definitions
+ */
+struct dasd_device;
+struct dasd_block;
+
+/* BIT DEFINITIONS FOR SENSE DATA */
+#define DASD_SENSE_BIT_0 0x80
+#define DASD_SENSE_BIT_1 0x40
+#define DASD_SENSE_BIT_2 0x20
+#define DASD_SENSE_BIT_3 0x10
+
+/* BIT DEFINITIONS FOR SIM SENSE */
+#define DASD_SIM_SENSE 0x0F
+#define DASD_SIM_MSG_TO_OP 0x03
+#define DASD_SIM_LOG 0x0C
+
+/* lock class for nested cdev lock */
+#define CDEV_NESTED_FIRST 1
+#define CDEV_NESTED_SECOND 2
+
+/*
+ * SECTION: MACROs for klogd and s390 debug feature (dbf)
+ */
+#define DBF_DEV_EVENT(d_level, d_device, d_str, d_data...) \
+do { \
+ debug_sprintf_event(d_device->debug_area, \
+ d_level, \
+ d_str "\n", \
+ d_data); \
+} while(0)
+
+#define DBF_EVENT(d_level, d_str, d_data...)\
+do { \
+ debug_sprintf_event(dasd_debug_area, \
+ d_level,\
+ d_str "\n", \
+ d_data); \
+} while(0)
+
+#define DBF_EVENT_DEVID(d_level, d_cdev, d_str, d_data...) \
+do { \
+ struct ccw_dev_id __dev_id; \
+ ccw_device_get_id(d_cdev, &__dev_id); \
+ debug_sprintf_event(dasd_debug_area, \
+ d_level, \
+ "0.%x.%04x " d_str "\n", \
+ __dev_id.ssid, __dev_id.devno, d_data); \
+} while (0)
+
+/* limit size for an errorstring */
+#define ERRORLENGTH 30
+
+/* definition of dbf debug levels */
+#define DBF_EMERG 0 /* system is unusable */
+#define DBF_ALERT 1 /* action must be taken immediately */
+#define DBF_CRIT 2 /* critical conditions */
+#define DBF_ERR 3 /* error conditions */
+#define DBF_WARNING 4 /* warning conditions */
+#define DBF_NOTICE 5 /* normal but significant condition */
+#define DBF_INFO 6 /* informational */
+#define DBF_DEBUG 6 /* debug-level messages */
+
+/* messages to be written via klogd and dbf */
+#define DEV_MESSAGE(d_loglevel,d_device,d_string,d_args...)\
+do { \
+ printk(d_loglevel PRINTK_HEADER " %s: " d_string "\n", \
+ dev_name(&d_device->cdev->dev), d_args); \
+ DBF_DEV_EVENT(DBF_ALERT, d_device, d_string, d_args); \
+} while(0)
+
+#define MESSAGE(d_loglevel,d_string,d_args...)\
+do { \
+ printk(d_loglevel PRINTK_HEADER " " d_string "\n", d_args); \
+ DBF_EVENT(DBF_ALERT, d_string, d_args); \
+} while(0)
+
+/* messages to be written via klogd only */
+#define DEV_MESSAGE_LOG(d_loglevel,d_device,d_string,d_args...)\
+do { \
+ printk(d_loglevel PRINTK_HEADER " %s: " d_string "\n", \
+ dev_name(&d_device->cdev->dev), d_args); \
+} while(0)
+
+#define MESSAGE_LOG(d_loglevel,d_string,d_args...)\
+do { \
+ printk(d_loglevel PRINTK_HEADER " " d_string "\n", d_args); \
+} while(0)
+
+/* Macro to calculate number of blocks per page */
+#define BLOCKS_PER_PAGE(blksize) (PAGE_SIZE / blksize)
+
+struct dasd_ccw_req {
+ unsigned int magic; /* Eye catcher */
+ int intrc; /* internal error, e.g. from start_IO */
+ struct list_head devlist; /* for dasd_device request queue */
+ struct list_head blocklist; /* for dasd_block request queue */
+ struct dasd_block *block; /* the originating block device */
+ struct dasd_device *memdev; /* the device used to allocate this */
+ struct dasd_device *startdev; /* device the request is started on */
+ struct dasd_device *basedev; /* base device if no block->base */
+ void *cpaddr; /* address of ccw or tcw */
+ short retries; /* A retry counter */
+ unsigned char cpmode; /* 0 = cmd mode, 1 = itcw */
+ char status; /* status of this request */
+ char lpm; /* logical path mask */
+ unsigned long flags; /* flags of this request */
+ struct dasd_queue *dq;
+ unsigned long starttime; /* jiffies time of request start */
+ unsigned long expires; /* expiration period in jiffies */
+ void *data; /* pointer to data area */
+ struct irb irb; /* device status in case of an error */
+ struct dasd_ccw_req *refers; /* ERP-chain queueing. */
+ void *function; /* originating ERP action */
+ void *mem_chunk;
+
+ unsigned long buildclk; /* TOD-clock of request generation */
+ unsigned long startclk; /* TOD-clock of request start */
+ unsigned long stopclk; /* TOD-clock of request interrupt */
+ unsigned long endclk; /* TOD-clock of request termination */
+
+ void (*callback)(struct dasd_ccw_req *, void *data);
+ void *callback_data;
+ unsigned int proc_bytes; /* bytes for partial completion */
+ unsigned int trkcount; /* count formatted tracks */
+};
+
+/*
+ * dasd_ccw_req -> status can be:
+ */
+#define DASD_CQR_FILLED 0x00 /* request is ready to be processed */
+#define DASD_CQR_DONE 0x01 /* request is completed successfully */
+#define DASD_CQR_NEED_ERP 0x02 /* request needs recovery action */
+#define DASD_CQR_IN_ERP 0x03 /* request is in recovery */
+#define DASD_CQR_FAILED 0x04 /* request is finally failed */
+#define DASD_CQR_TERMINATED 0x05 /* request was stopped by driver */
+
+#define DASD_CQR_QUEUED 0x80 /* request is queued to be processed */
+#define DASD_CQR_IN_IO 0x81 /* request is currently in IO */
+#define DASD_CQR_ERROR 0x82 /* request is completed with error */
+#define DASD_CQR_CLEAR_PENDING 0x83 /* request is clear pending */
+#define DASD_CQR_CLEARED 0x84 /* request was cleared */
+#define DASD_CQR_SUCCESS 0x85 /* request was successful */
+
+/* default expiration time*/
+#define DASD_EXPIRES 300
+#define DASD_EXPIRES_MAX 40000000
+#define DASD_RETRIES 256
+#define DASD_RETRIES_MAX 32768
+
+/* per dasd_ccw_req flags */
+#define DASD_CQR_FLAGS_USE_ERP 0 /* use ERP for this request */
+#define DASD_CQR_FLAGS_FAILFAST 1 /* FAILFAST */
+#define DASD_CQR_VERIFY_PATH 2 /* path verification request */
+#define DASD_CQR_ALLOW_SLOCK 3 /* Try this request even when lock was
+ * stolen. Should not be combined with
+ * DASD_CQR_FLAGS_USE_ERP
+ */
+/*
+ * The following flags are used to suppress output of certain errors.
+ */
+#define DASD_CQR_SUPPRESS_NRF 4 /* Suppress 'No Record Found' error */
+#define DASD_CQR_SUPPRESS_FP 5 /* Suppress 'File Protected' error*/
+#define DASD_CQR_SUPPRESS_IL 6 /* Suppress 'Incorrect Length' error */
+#define DASD_CQR_SUPPRESS_CR 7 /* Suppress 'Command Reject' error */
+
+#define DASD_REQ_PER_DEV 4
+
+/* Signature for error recovery functions. */
+typedef struct dasd_ccw_req *(*dasd_erp_fn_t) (struct dasd_ccw_req *);
+
+/*
+ * A single CQR can only contain a maximum of 255 CCWs. It is limited by
+ * the locate record and locate record extended count value which can only hold
+ * 1 Byte max.
+ */
+#define DASD_CQR_MAX_CCW 255
+
+/*
+ * Unique identifier for dasd device.
+ */
+#define UA_NOT_CONFIGURED 0x00
+#define UA_BASE_DEVICE 0x01
+#define UA_BASE_PAV_ALIAS 0x02
+#define UA_HYPER_PAV_ALIAS 0x03
+
+struct dasd_uid {
+ __u8 type;
+ char vendor[4];
+ char serial[15];
+ __u16 ssid;
+ __u8 real_unit_addr;
+ __u8 base_unit_addr;
+ char vduit[33];
+};
+
+#define DASD_UID_STRLEN ( /* vendor */ 3 + 1 + /* serial */ 14 + 1 + \
+ /* SSID */ 4 + 1 + /* unit addr */ 2 + 1 + \
+ /* vduit */ 32 + 1)
+
+/*
+ * PPRC Status data
+ */
+struct dasd_pprc_header {
+ __u8 entries; /* 0 Number of device entries */
+ __u8 unused; /* 1 unused */
+ __u16 entry_length; /* 2-3 Length of device entry */
+ __u32 unused2; /* 4-7 unused */
+} __packed;
+
+struct dasd_pprc_dev_info {
+ __u8 state; /* 0 Copy State */
+ __u8 flags; /* 1 Flags */
+ __u8 reserved1[2]; /* 2-3 reserved */
+ __u8 prim_lss; /* 4 Primary device LSS */
+ __u8 primary; /* 5 Primary device address */
+ __u8 sec_lss; /* 6 Secondary device LSS */
+ __u8 secondary; /* 7 Secondary device address */
+ __u16 pprc_id; /* 8-9 Peer-to-Peer Remote Copy ID */
+ __u8 reserved2[12]; /* 10-21 reserved */
+ __u16 prim_cu_ssid; /* 22-23 Pimary Control Unit SSID */
+ __u8 reserved3[12]; /* 24-35 reserved */
+ __u16 sec_cu_ssid; /* 36-37 Secondary Control Unit SSID */
+ __u8 reserved4[90]; /* 38-127 reserved */
+} __packed;
+
+struct dasd_pprc_data_sc4 {
+ struct dasd_pprc_header header;
+ struct dasd_pprc_dev_info dev_info[5];
+} __packed;
+
+#define DASD_BUS_ID_SIZE 20
+#define DASD_CP_ENTRIES 5
+
+struct dasd_copy_entry {
+ char busid[DASD_BUS_ID_SIZE];
+ struct dasd_device *device;
+ bool primary;
+ bool configured;
+};
+
+struct dasd_copy_relation {
+ struct dasd_copy_entry entry[DASD_CP_ENTRIES];
+ struct dasd_copy_entry *active;
+};
+
+int dasd_devmap_set_device_copy_relation(struct ccw_device *,
+ bool pprc_enabled);
+
+/*
+ * the struct dasd_discipline is
+ * sth like a table of virtual functions, if you think of dasd_eckd
+ * inheriting dasd...
+ * no, currently we are not planning to reimplement the driver in C++
+ */
+struct dasd_discipline {
+ struct module *owner;
+ char ebcname[8]; /* a name used for tagging and printks */
+ char name[8]; /* a name used for tagging and printks */
+
+ struct list_head list; /* used for list of disciplines */
+
+ /*
+ * Device recognition functions. check_device is used to verify
+ * the sense data and the information returned by read device
+ * characteristics. It returns 0 if the discipline can be used
+ * for the device in question. uncheck_device is called during
+ * device shutdown to deregister a device from its discipline.
+ */
+ int (*check_device) (struct dasd_device *);
+ void (*uncheck_device) (struct dasd_device *);
+
+ /*
+ * do_analysis is used in the step from device state "basic" to
+ * state "accept". It returns 0 if the device can be made ready,
+ * it returns -EMEDIUMTYPE if the device can't be made ready or
+ * -EAGAIN if do_analysis started a ccw that needs to complete
+ * before the analysis may be repeated.
+ */
+ int (*do_analysis) (struct dasd_block *);
+
+ /*
+ * This function is called, when new paths become available.
+ * Disciplins may use this callback to do necessary setup work,
+ * e.g. verify that new path is compatible with the current
+ * configuration.
+ */
+ int (*pe_handler)(struct dasd_device *, __u8, __u8);
+
+ /*
+ * Last things to do when a device is set online, and first things
+ * when it is set offline.
+ */
+ int (*basic_to_ready) (struct dasd_device *);
+ int (*online_to_ready) (struct dasd_device *);
+ int (*basic_to_known)(struct dasd_device *);
+
+ /*
+ * Initialize block layer request queue.
+ */
+ void (*setup_blk_queue)(struct dasd_block *);
+ /* (struct dasd_device *);
+ * Device operation functions. build_cp creates a ccw chain for
+ * a block device request, start_io starts the request and
+ * term_IO cancels it (e.g. in case of a timeout). format_device
+ * formats the device and check_device_format compares the format of
+ * a device with the expected format_data.
+ * handle_terminated_request allows to examine a cqr and prepare
+ * it for retry.
+ */
+ struct dasd_ccw_req *(*build_cp) (struct dasd_device *,
+ struct dasd_block *,
+ struct request *);
+ int (*start_IO) (struct dasd_ccw_req *);
+ int (*term_IO) (struct dasd_ccw_req *);
+ void (*handle_terminated_request) (struct dasd_ccw_req *);
+ int (*format_device) (struct dasd_device *,
+ struct format_data_t *, int);
+ int (*check_device_format)(struct dasd_device *,
+ struct format_check_t *, int);
+ int (*free_cp) (struct dasd_ccw_req *, struct request *);
+
+ /*
+ * Error recovery functions. examine_error() returns a value that
+ * indicates what to do for an error condition. If examine_error()
+ * returns 'dasd_era_recover' erp_action() is called to create a
+ * special error recovery ccw. erp_postaction() is called after
+ * an error recovery ccw has finished its execution. dump_sense
+ * is called for every error condition to print the sense data
+ * to the console.
+ */
+ dasd_erp_fn_t(*erp_action) (struct dasd_ccw_req *);
+ dasd_erp_fn_t(*erp_postaction) (struct dasd_ccw_req *);
+ void (*dump_sense) (struct dasd_device *, struct dasd_ccw_req *,
+ struct irb *);
+ void (*dump_sense_dbf) (struct dasd_device *, struct irb *, char *);
+ void (*check_for_device_change) (struct dasd_device *,
+ struct dasd_ccw_req *,
+ struct irb *);
+
+ /* i/o control functions. */
+ int (*fill_geometry) (struct dasd_block *, struct hd_geometry *);
+ int (*fill_info) (struct dasd_device *, struct dasd_information2_t *);
+ int (*ioctl) (struct dasd_block *, unsigned int, void __user *);
+
+ /* reload device after state change */
+ int (*reload) (struct dasd_device *);
+
+ int (*get_uid) (struct dasd_device *, struct dasd_uid *);
+ void (*kick_validate) (struct dasd_device *);
+ int (*check_attention)(struct dasd_device *, __u8);
+ int (*host_access_count)(struct dasd_device *);
+ int (*hosts_print)(struct dasd_device *, struct seq_file *);
+ void (*handle_hpf_error)(struct dasd_device *, struct irb *);
+ void (*disable_hpf)(struct dasd_device *);
+ int (*hpf_enabled)(struct dasd_device *);
+ void (*reset_path)(struct dasd_device *, __u8);
+
+ /*
+ * Extent Space Efficient (ESE) relevant functions
+ */
+ int (*is_ese)(struct dasd_device *);
+ /* Capacity */
+ int (*space_allocated)(struct dasd_device *);
+ int (*space_configured)(struct dasd_device *);
+ int (*logical_capacity)(struct dasd_device *);
+ int (*release_space)(struct dasd_device *, struct format_data_t *);
+ /* Extent Pool */
+ int (*ext_pool_id)(struct dasd_device *);
+ int (*ext_size)(struct dasd_device *);
+ int (*ext_pool_cap_at_warnlevel)(struct dasd_device *);
+ int (*ext_pool_warn_thrshld)(struct dasd_device *);
+ int (*ext_pool_oos)(struct dasd_device *);
+ int (*ext_pool_exhaust)(struct dasd_device *, struct dasd_ccw_req *);
+ struct dasd_ccw_req *(*ese_format)(struct dasd_device *,
+ struct dasd_ccw_req *, struct irb *);
+ int (*ese_read)(struct dasd_ccw_req *, struct irb *);
+ int (*pprc_status)(struct dasd_device *, struct dasd_pprc_data_sc4 *);
+ bool (*pprc_enabled)(struct dasd_device *);
+ int (*copy_pair_swap)(struct dasd_device *, char *, char *);
+ int (*device_ping)(struct dasd_device *);
+};
+
+extern struct dasd_discipline *dasd_diag_discipline_pointer;
+
+/* Trigger IDs for extended error reporting DASD EER and autoquiesce */
+enum eer_trigger {
+ DASD_EER_FATALERROR = 1,
+ DASD_EER_NOPATH,
+ DASD_EER_STATECHANGE,
+ DASD_EER_PPRCSUSPEND,
+ DASD_EER_NOSPC,
+ DASD_EER_TIMEOUTS,
+ DASD_EER_STARTIO,
+
+ /* enum end marker, only add new trigger above */
+ DASD_EER_MAX,
+ DASD_EER_AUTOQUIESCE = 31, /* internal only */
+};
+
+#define DASD_EER_VALID ((1U << DASD_EER_MAX) - 1)
+
+/* DASD path handling */
+
+#define DASD_PATH_OPERATIONAL 1
+#define DASD_PATH_TBV 2
+#define DASD_PATH_PP 3
+#define DASD_PATH_NPP 4
+#define DASD_PATH_MISCABLED 5
+#define DASD_PATH_NOHPF 6
+#define DASD_PATH_CUIR 7
+#define DASD_PATH_IFCC 8
+#define DASD_PATH_FCSEC 9
+
+#define DASD_THRHLD_MAX 4294967295U
+#define DASD_INTERVAL_MAX 4294967295U
+
+/* FC Endpoint Security Capabilities */
+#define DASD_FC_SECURITY_UNSUP 0
+#define DASD_FC_SECURITY_AUTH 1
+#define DASD_FC_SECURITY_ENC_FCSP2 2
+#define DASD_FC_SECURITY_ENC_ERAS 3
+
+#define DASD_FC_SECURITY_ENC_STR "Encryption"
+static const struct {
+ u8 value;
+ char *name;
+} dasd_path_fcs_mnemonics[] = {
+ { DASD_FC_SECURITY_UNSUP, "Unsupported" },
+ { DASD_FC_SECURITY_AUTH, "Authentication" },
+ { DASD_FC_SECURITY_ENC_FCSP2, DASD_FC_SECURITY_ENC_STR },
+ { DASD_FC_SECURITY_ENC_ERAS, DASD_FC_SECURITY_ENC_STR },
+};
+
+static inline char *dasd_path_get_fcs_str(int val)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(dasd_path_fcs_mnemonics); i++) {
+ if (dasd_path_fcs_mnemonics[i].value == val)
+ return dasd_path_fcs_mnemonics[i].name;
+ }
+
+ return dasd_path_fcs_mnemonics[0].name;
+}
+
+struct dasd_path {
+ unsigned long flags;
+ u8 cssid;
+ u8 ssid;
+ u8 chpid;
+ struct dasd_conf_data *conf_data;
+ atomic_t error_count;
+ unsigned long errorclk;
+ u8 fc_security;
+ struct kobject kobj;
+ bool in_sysfs;
+};
+
+#define to_dasd_path(path) container_of(path, struct dasd_path, kobj)
+
+static inline void dasd_path_release(struct kobject *kobj)
+{
+/* Memory for the dasd_path kobject is freed when dasd_free_device() is called */
+}
+
+
+struct dasd_profile_info {
+ /* legacy part of profile data, as in dasd_profile_info_t */
+ unsigned int dasd_io_reqs; /* number of requests processed */
+ unsigned int dasd_io_sects; /* number of sectors processed */
+ unsigned int dasd_io_secs[32]; /* histogram of request's sizes */
+ unsigned int dasd_io_times[32]; /* histogram of requests's times */
+ unsigned int dasd_io_timps[32]; /* h. of requests's times per sector */
+ unsigned int dasd_io_time1[32]; /* hist. of time from build to start */
+ unsigned int dasd_io_time2[32]; /* hist. of time from start to irq */
+ unsigned int dasd_io_time2ps[32]; /* hist. of time from start to irq */
+ unsigned int dasd_io_time3[32]; /* hist. of time from irq to end */
+ unsigned int dasd_io_nr_req[32]; /* hist. of # of requests in chanq */
+
+ /* new data */
+ struct timespec64 starttod; /* time of start or last reset */
+ unsigned int dasd_io_alias; /* requests using an alias */
+ unsigned int dasd_io_tpm; /* requests using transport mode */
+ unsigned int dasd_read_reqs; /* total number of read requests */
+ unsigned int dasd_read_sects; /* total number read sectors */
+ unsigned int dasd_read_alias; /* read request using an alias */
+ unsigned int dasd_read_tpm; /* read requests in transport mode */
+ unsigned int dasd_read_secs[32]; /* histogram of request's sizes */
+ unsigned int dasd_read_times[32]; /* histogram of requests's times */
+ unsigned int dasd_read_time1[32]; /* hist. time from build to start */
+ unsigned int dasd_read_time2[32]; /* hist. of time from start to irq */
+ unsigned int dasd_read_time3[32]; /* hist. of time from irq to end */
+ unsigned int dasd_read_nr_req[32]; /* hist. of # of requests in chanq */
+ unsigned long dasd_sum_times; /* sum of request times */
+ unsigned long dasd_sum_time_str; /* sum of time from build to start */
+ unsigned long dasd_sum_time_irq; /* sum of time from start to irq */
+ unsigned long dasd_sum_time_end; /* sum of time from irq to end */
+};
+
+struct dasd_profile {
+ struct dentry *dentry;
+ struct dasd_profile_info *data;
+ spinlock_t lock;
+};
+
+struct dasd_format_entry {
+ struct list_head list;
+ sector_t track;
+};
+
+struct dasd_device {
+ /* Block device stuff. */
+ struct dasd_block *block;
+
+ unsigned int devindex;
+ unsigned long flags; /* per device flags */
+ unsigned short features; /* copy of devmap-features (read-only!) */
+
+ /* extended error reporting stuff (eer) */
+ struct dasd_ccw_req *eer_cqr;
+
+ /* Device discipline stuff. */
+ struct dasd_discipline *discipline;
+ struct dasd_discipline *base_discipline;
+ void *private;
+ struct dasd_path path[8];
+ __u8 opm;
+
+ /* Device state and target state. */
+ int state, target;
+ struct mutex state_mutex;
+ int stopped; /* device (ccw_device_start) was stopped */
+
+ /* reference count. */
+ atomic_t ref_count;
+
+ /* ccw queue and memory for static ccw/erp buffers. */
+ struct list_head ccw_queue;
+ spinlock_t mem_lock;
+ void *ccw_mem;
+ void *erp_mem;
+ void *ese_mem;
+ struct list_head ccw_chunks;
+ struct list_head erp_chunks;
+ struct list_head ese_chunks;
+
+ atomic_t tasklet_scheduled;
+ struct tasklet_struct tasklet;
+ struct work_struct kick_work;
+ struct work_struct reload_device;
+ struct work_struct kick_validate;
+ struct work_struct suc_work;
+ struct work_struct requeue_requests;
+ struct timer_list timer;
+
+ debug_info_t *debug_area;
+
+ struct ccw_device *cdev;
+
+ /* hook for alias management */
+ struct list_head alias_list;
+
+ /* default expiration time in s */
+ unsigned long default_expires;
+ unsigned long default_retries;
+
+ unsigned long blk_timeout;
+
+ unsigned long path_thrhld;
+ unsigned long path_interval;
+
+ struct dentry *debugfs_dentry;
+ struct dentry *hosts_dentry;
+ struct dasd_profile profile;
+ struct dasd_format_entry format_entry;
+ struct kset *paths_info;
+ struct dasd_copy_relation *copy;
+ unsigned long aq_mask;
+ unsigned int aq_timeouts;
+};
+
+struct dasd_block {
+ /* Block device stuff. */
+ struct gendisk *gdp;
+ spinlock_t request_queue_lock;
+ struct blk_mq_tag_set tag_set;
+ struct block_device *bdev;
+ atomic_t open_count;
+
+ unsigned long blocks; /* size of volume in blocks */
+ unsigned int bp_block; /* bytes per block */
+ unsigned int s2b_shift; /* log2 (bp_block/512) */
+
+ struct dasd_device *base;
+ struct list_head ccw_queue;
+ spinlock_t queue_lock;
+
+ atomic_t tasklet_scheduled;
+ struct tasklet_struct tasklet;
+ struct timer_list timer;
+
+ struct dentry *debugfs_dentry;
+ struct dasd_profile profile;
+
+ struct list_head format_list;
+ spinlock_t format_lock;
+ atomic_t trkcount;
+};
+
+struct dasd_attention_data {
+ struct dasd_device *device;
+ __u8 lpum;
+};
+
+struct dasd_queue {
+ spinlock_t lock;
+};
+
+/* reasons why device (ccw_device_start) was stopped */
+#define DASD_STOPPED_NOT_ACC 1 /* not accessible */
+#define DASD_STOPPED_QUIESCE 2 /* Quiesced */
+#define DASD_STOPPED_PENDING 4 /* long busy */
+#define DASD_STOPPED_DC_WAIT 8 /* disconnected, wait */
+#define DASD_STOPPED_SU 16 /* summary unit check handling */
+#define DASD_STOPPED_PPRC 32 /* PPRC swap */
+#define DASD_STOPPED_NOSPC 128 /* no space left */
+
+/* per device flags */
+#define DASD_FLAG_OFFLINE 3 /* device is in offline processing */
+#define DASD_FLAG_EER_SNSS 4 /* A SNSS is required */
+#define DASD_FLAG_EER_IN_USE 5 /* A SNSS request is running */
+#define DASD_FLAG_DEVICE_RO 6 /* The device itself is read-only. Don't
+ * confuse this with the user specified
+ * read-only feature.
+ */
+#define DASD_FLAG_IS_RESERVED 7 /* The device is reserved */
+#define DASD_FLAG_LOCK_STOLEN 8 /* The device lock was stolen */
+#define DASD_FLAG_SUSPENDED 9 /* The device was suspended */
+#define DASD_FLAG_SAFE_OFFLINE 10 /* safe offline processing requested*/
+#define DASD_FLAG_SAFE_OFFLINE_RUNNING 11 /* safe offline running */
+#define DASD_FLAG_ABORTALL 12 /* Abort all noretry requests */
+#define DASD_FLAG_PATH_VERIFY 13 /* Path verification worker running */
+#define DASD_FLAG_SUC 14 /* unhandled summary unit check */
+
+#define DASD_SLEEPON_START_TAG ((void *) 1)
+#define DASD_SLEEPON_END_TAG ((void *) 2)
+
+void dasd_put_device_wake(struct dasd_device *);
+
+/*
+ * return values to be returned from the copy pair swap function
+ * 0x00: swap successful
+ * 0x01: swap data invalid
+ * 0x02: no active device found
+ * 0x03: wrong primary specified
+ * 0x04: secondary device not found
+ * 0x05: swap already running
+ */
+#define DASD_COPYPAIRSWAP_SUCCESS 0
+#define DASD_COPYPAIRSWAP_INVALID 1
+#define DASD_COPYPAIRSWAP_NOACTIVE 2
+#define DASD_COPYPAIRSWAP_PRIMARY 3
+#define DASD_COPYPAIRSWAP_SECONDARY 4
+#define DASD_COPYPAIRSWAP_MULTIPLE 5
+
+/*
+ * Reference count inliners
+ */
+static inline void
+dasd_get_device(struct dasd_device *device)
+{
+ atomic_inc(&device->ref_count);
+}
+
+static inline void
+dasd_put_device(struct dasd_device *device)
+{
+ if (atomic_dec_return(&device->ref_count) == 0)
+ dasd_put_device_wake(device);
+}
+
+/*
+ * The static memory in ccw_mem and erp_mem is managed by a sorted
+ * list of free memory chunks.
+ */
+struct dasd_mchunk
+{
+ struct list_head list;
+ unsigned long size;
+} __attribute__ ((aligned(8)));
+
+static inline void
+dasd_init_chunklist(struct list_head *chunk_list, void *mem,
+ unsigned long size)
+{
+ struct dasd_mchunk *chunk;
+
+ INIT_LIST_HEAD(chunk_list);
+ chunk = (struct dasd_mchunk *) mem;
+ chunk->size = size - sizeof(struct dasd_mchunk);
+ list_add(&chunk->list, chunk_list);
+}
+
+static inline void *
+dasd_alloc_chunk(struct list_head *chunk_list, unsigned long size)
+{
+ struct dasd_mchunk *chunk, *tmp;
+
+ size = (size + 7L) & -8L;
+ list_for_each_entry(chunk, chunk_list, list) {
+ if (chunk->size < size)
+ continue;
+ if (chunk->size > size + sizeof(struct dasd_mchunk)) {
+ char *endaddr = (char *) (chunk + 1) + chunk->size;
+ tmp = (struct dasd_mchunk *) (endaddr - size) - 1;
+ tmp->size = size;
+ chunk->size -= size + sizeof(struct dasd_mchunk);
+ chunk = tmp;
+ } else
+ list_del(&chunk->list);
+ return (void *) (chunk + 1);
+ }
+ return NULL;
+}
+
+static inline void
+dasd_free_chunk(struct list_head *chunk_list, void *mem)
+{
+ struct dasd_mchunk *chunk, *tmp;
+ struct list_head *p, *left;
+
+ chunk = (struct dasd_mchunk *)
+ ((char *) mem - sizeof(struct dasd_mchunk));
+ /* Find out the left neighbour in chunk_list. */
+ left = chunk_list;
+ list_for_each(p, chunk_list) {
+ if (list_entry(p, struct dasd_mchunk, list) > chunk)
+ break;
+ left = p;
+ }
+ /* Try to merge with right neighbour = next element from left. */
+ if (left->next != chunk_list) {
+ tmp = list_entry(left->next, struct dasd_mchunk, list);
+ if ((char *) (chunk + 1) + chunk->size == (char *) tmp) {
+ list_del(&tmp->list);
+ chunk->size += tmp->size + sizeof(struct dasd_mchunk);
+ }
+ }
+ /* Try to merge with left neighbour. */
+ if (left != chunk_list) {
+ tmp = list_entry(left, struct dasd_mchunk, list);
+ if ((char *) (tmp + 1) + tmp->size == (char *) chunk) {
+ tmp->size += chunk->size + sizeof(struct dasd_mchunk);
+ return;
+ }
+ }
+ __list_add(&chunk->list, left, left->next);
+}
+
+/*
+ * Check if bsize is in { 512, 1024, 2048, 4096 }
+ */
+static inline int
+dasd_check_blocksize(int bsize)
+{
+ if (bsize < 512 || bsize > 4096 || !is_power_of_2(bsize))
+ return -EMEDIUMTYPE;
+ return 0;
+}
+
+/*
+ * return the callback data of the original request in case there are
+ * ERP requests build on top of it
+ */
+static inline void *dasd_get_callback_data(struct dasd_ccw_req *cqr)
+{
+ while (cqr->refers)
+ cqr = cqr->refers;
+
+ return cqr->callback_data;
+}
+
+/* externals in dasd.c */
+#define DASD_PROFILE_OFF 0
+#define DASD_PROFILE_ON 1
+#define DASD_PROFILE_GLOBAL_ONLY 2
+
+extern debug_info_t *dasd_debug_area;
+extern struct dasd_profile dasd_global_profile;
+extern unsigned int dasd_global_profile_level;
+extern const struct block_device_operations dasd_device_operations;
+extern struct blk_mq_ops dasd_mq_ops;
+
+extern struct kmem_cache *dasd_page_cache;
+
+struct dasd_ccw_req *
+dasd_smalloc_request(int, int, int, struct dasd_device *, struct dasd_ccw_req *);
+struct dasd_ccw_req *dasd_fmalloc_request(int, int, int, struct dasd_device *);
+void dasd_sfree_request(struct dasd_ccw_req *, struct dasd_device *);
+void dasd_ffree_request(struct dasd_ccw_req *, struct dasd_device *);
+void dasd_wakeup_cb(struct dasd_ccw_req *, void *);
+
+struct dasd_device *dasd_alloc_device(void);
+void dasd_free_device(struct dasd_device *);
+
+struct dasd_block *dasd_alloc_block(void);
+void dasd_free_block(struct dasd_block *);
+
+enum blk_eh_timer_return dasd_times_out(struct request *req);
+
+void dasd_enable_device(struct dasd_device *);
+void dasd_set_target_state(struct dasd_device *, int);
+void dasd_kick_device(struct dasd_device *);
+void dasd_reload_device(struct dasd_device *);
+void dasd_schedule_requeue(struct dasd_device *);
+
+void dasd_add_request_head(struct dasd_ccw_req *);
+void dasd_add_request_tail(struct dasd_ccw_req *);
+int dasd_start_IO(struct dasd_ccw_req *);
+int dasd_term_IO(struct dasd_ccw_req *);
+void dasd_schedule_device_bh(struct dasd_device *);
+void dasd_schedule_block_bh(struct dasd_block *);
+int dasd_sleep_on(struct dasd_ccw_req *);
+int dasd_sleep_on_queue(struct list_head *);
+int dasd_sleep_on_immediatly(struct dasd_ccw_req *);
+int dasd_sleep_on_queue_interruptible(struct list_head *);
+int dasd_sleep_on_interruptible(struct dasd_ccw_req *);
+void dasd_device_set_timer(struct dasd_device *, int);
+void dasd_device_clear_timer(struct dasd_device *);
+void dasd_block_set_timer(struct dasd_block *, int);
+void dasd_block_clear_timer(struct dasd_block *);
+int dasd_cancel_req(struct dasd_ccw_req *);
+int dasd_flush_device_queue(struct dasd_device *);
+int dasd_generic_probe(struct ccw_device *);
+void dasd_generic_free_discipline(struct dasd_device *);
+void dasd_generic_remove (struct ccw_device *cdev);
+int dasd_generic_set_online(struct ccw_device *, struct dasd_discipline *);
+int dasd_generic_set_offline (struct ccw_device *cdev);
+int dasd_generic_notify(struct ccw_device *, int);
+int dasd_generic_last_path_gone(struct dasd_device *);
+int dasd_generic_path_operational(struct dasd_device *);
+void dasd_generic_shutdown(struct ccw_device *);
+
+void dasd_generic_handle_state_change(struct dasd_device *);
+enum uc_todo dasd_generic_uc_handler(struct ccw_device *, struct irb *);
+void dasd_generic_path_event(struct ccw_device *, int *);
+int dasd_generic_verify_path(struct dasd_device *, __u8);
+void dasd_generic_space_exhaust(struct dasd_device *, struct dasd_ccw_req *);
+void dasd_generic_space_avail(struct dasd_device *);
+
+int dasd_generic_requeue_all_requests(struct dasd_device *);
+
+int dasd_generic_read_dev_chars(struct dasd_device *, int, void *, int);
+char *dasd_get_sense(struct irb *);
+
+void dasd_device_set_stop_bits(struct dasd_device *, int);
+void dasd_device_remove_stop_bits(struct dasd_device *, int);
+
+int dasd_device_is_ro(struct dasd_device *);
+
+void dasd_profile_reset(struct dasd_profile *);
+int dasd_profile_on(struct dasd_profile *);
+void dasd_profile_off(struct dasd_profile *);
+char *dasd_get_user_string(const char __user *, size_t);
+
+/* externals in dasd_devmap.c */
+extern int dasd_max_devindex;
+extern int dasd_probeonly;
+extern int dasd_autodetect;
+extern int dasd_nopav;
+extern int dasd_nofcx;
+
+int dasd_devmap_init(void);
+void dasd_devmap_exit(void);
+
+struct dasd_device *dasd_create_device(struct ccw_device *);
+void dasd_delete_device(struct dasd_device *);
+
+int dasd_get_feature(struct ccw_device *, int);
+int dasd_set_feature(struct ccw_device *, int, int);
+
+extern const struct attribute_group *dasd_dev_groups[];
+void dasd_path_create_kobj(struct dasd_device *, int);
+void dasd_path_create_kobjects(struct dasd_device *);
+void dasd_path_remove_kobjects(struct dasd_device *);
+
+struct dasd_device *dasd_device_from_cdev(struct ccw_device *);
+struct dasd_device *dasd_device_from_cdev_locked(struct ccw_device *);
+struct dasd_device *dasd_device_from_devindex(int);
+
+void dasd_add_link_to_gendisk(struct gendisk *, struct dasd_device *);
+struct dasd_device *dasd_device_from_gendisk(struct gendisk *);
+
+int dasd_parse(void) __init;
+int dasd_busid_known(const char *);
+
+/* externals in dasd_gendisk.c */
+int dasd_gendisk_init(void);
+void dasd_gendisk_exit(void);
+int dasd_gendisk_alloc(struct dasd_block *);
+void dasd_gendisk_free(struct dasd_block *);
+int dasd_scan_partitions(struct dasd_block *);
+void dasd_destroy_partitions(struct dasd_block *);
+
+/* externals in dasd_ioctl.c */
+int dasd_ioctl(struct block_device *bdev, blk_mode_t mode, unsigned int cmd,
+ unsigned long arg);
+int dasd_set_read_only(struct block_device *bdev, bool ro);
+
+/* externals in dasd_proc.c */
+int dasd_proc_init(void);
+void dasd_proc_exit(void);
+
+/* externals in dasd_erp.c */
+struct dasd_ccw_req *dasd_default_erp_action(struct dasd_ccw_req *);
+struct dasd_ccw_req *dasd_default_erp_postaction(struct dasd_ccw_req *);
+struct dasd_ccw_req *dasd_alloc_erp_request(unsigned int, int, int,
+ struct dasd_device *);
+void dasd_free_erp_request(struct dasd_ccw_req *, struct dasd_device *);
+void dasd_log_sense(struct dasd_ccw_req *, struct irb *);
+void dasd_log_sense_dbf(struct dasd_ccw_req *cqr, struct irb *irb);
+
+/* externals in dasd_3990_erp.c */
+struct dasd_ccw_req *dasd_3990_erp_action(struct dasd_ccw_req *);
+void dasd_3990_erp_handle_sim(struct dasd_device *, char *);
+
+/* externals in dasd_eer.c */
+#ifdef CONFIG_DASD_EER
+int dasd_eer_init(void);
+void dasd_eer_exit(void);
+int dasd_eer_enable(struct dasd_device *);
+void dasd_eer_disable(struct dasd_device *);
+void dasd_eer_write(struct dasd_device *, struct dasd_ccw_req *cqr,
+ unsigned int id);
+void dasd_eer_snss(struct dasd_device *);
+
+static inline int dasd_eer_enabled(struct dasd_device *device)
+{
+ return device->eer_cqr != NULL;
+}
+#else
+#define dasd_eer_init() (0)
+#define dasd_eer_exit() do { } while (0)
+#define dasd_eer_enable(d) (0)
+#define dasd_eer_disable(d) do { } while (0)
+#define dasd_eer_write(d,c,i) do { } while (0)
+#define dasd_eer_snss(d) do { } while (0)
+#define dasd_eer_enabled(d) (0)
+#endif /* CONFIG_DASD_ERR */
+
+
+/* DASD path handling functions */
+
+/*
+ * helper functions to modify bit masks for a given channel path for a device
+ */
+static inline int dasd_path_is_operational(struct dasd_device *device, int chp)
+{
+ return test_bit(DASD_PATH_OPERATIONAL, &device->path[chp].flags);
+}
+
+static inline int dasd_path_need_verify(struct dasd_device *device, int chp)
+{
+ return test_bit(DASD_PATH_TBV, &device->path[chp].flags);
+}
+
+static inline void dasd_path_verify(struct dasd_device *device, int chp)
+{
+ __set_bit(DASD_PATH_TBV, &device->path[chp].flags);
+}
+
+static inline void dasd_path_clear_verify(struct dasd_device *device, int chp)
+{
+ __clear_bit(DASD_PATH_TBV, &device->path[chp].flags);
+}
+
+static inline void dasd_path_clear_all_verify(struct dasd_device *device)
+{
+ int chp;
+
+ for (chp = 0; chp < 8; chp++)
+ dasd_path_clear_verify(device, chp);
+}
+
+static inline void dasd_path_fcsec(struct dasd_device *device, int chp)
+{
+ __set_bit(DASD_PATH_FCSEC, &device->path[chp].flags);
+}
+
+static inline void dasd_path_clear_fcsec(struct dasd_device *device, int chp)
+{
+ __clear_bit(DASD_PATH_FCSEC, &device->path[chp].flags);
+}
+
+static inline int dasd_path_need_fcsec(struct dasd_device *device, int chp)
+{
+ return test_bit(DASD_PATH_FCSEC, &device->path[chp].flags);
+}
+
+static inline void dasd_path_clear_all_fcsec(struct dasd_device *device)
+{
+ int chp;
+
+ for (chp = 0; chp < 8; chp++)
+ dasd_path_clear_fcsec(device, chp);
+}
+
+static inline void dasd_path_operational(struct dasd_device *device, int chp)
+{
+ __set_bit(DASD_PATH_OPERATIONAL, &device->path[chp].flags);
+ device->opm |= (0x80 >> chp);
+}
+
+static inline void dasd_path_nonpreferred(struct dasd_device *device, int chp)
+{
+ __set_bit(DASD_PATH_NPP, &device->path[chp].flags);
+}
+
+static inline int dasd_path_is_nonpreferred(struct dasd_device *device, int chp)
+{
+ return test_bit(DASD_PATH_NPP, &device->path[chp].flags);
+}
+
+static inline void dasd_path_clear_nonpreferred(struct dasd_device *device,
+ int chp)
+{
+ __clear_bit(DASD_PATH_NPP, &device->path[chp].flags);
+}
+
+static inline void dasd_path_preferred(struct dasd_device *device, int chp)
+{
+ __set_bit(DASD_PATH_PP, &device->path[chp].flags);
+}
+
+static inline int dasd_path_is_preferred(struct dasd_device *device, int chp)
+{
+ return test_bit(DASD_PATH_PP, &device->path[chp].flags);
+}
+
+static inline void dasd_path_clear_preferred(struct dasd_device *device,
+ int chp)
+{
+ __clear_bit(DASD_PATH_PP, &device->path[chp].flags);
+}
+
+static inline void dasd_path_clear_oper(struct dasd_device *device, int chp)
+{
+ __clear_bit(DASD_PATH_OPERATIONAL, &device->path[chp].flags);
+ device->opm &= ~(0x80 >> chp);
+}
+
+static inline void dasd_path_clear_cable(struct dasd_device *device, int chp)
+{
+ __clear_bit(DASD_PATH_MISCABLED, &device->path[chp].flags);
+}
+
+static inline void dasd_path_cuir(struct dasd_device *device, int chp)
+{
+ __set_bit(DASD_PATH_CUIR, &device->path[chp].flags);
+}
+
+static inline int dasd_path_is_cuir(struct dasd_device *device, int chp)
+{
+ return test_bit(DASD_PATH_CUIR, &device->path[chp].flags);
+}
+
+static inline void dasd_path_clear_cuir(struct dasd_device *device, int chp)
+{
+ __clear_bit(DASD_PATH_CUIR, &device->path[chp].flags);
+}
+
+static inline void dasd_path_ifcc(struct dasd_device *device, int chp)
+{
+ set_bit(DASD_PATH_IFCC, &device->path[chp].flags);
+}
+
+static inline int dasd_path_is_ifcc(struct dasd_device *device, int chp)
+{
+ return test_bit(DASD_PATH_IFCC, &device->path[chp].flags);
+}
+
+static inline void dasd_path_clear_ifcc(struct dasd_device *device, int chp)
+{
+ clear_bit(DASD_PATH_IFCC, &device->path[chp].flags);
+}
+
+static inline void dasd_path_clear_nohpf(struct dasd_device *device, int chp)
+{
+ __clear_bit(DASD_PATH_NOHPF, &device->path[chp].flags);
+}
+
+static inline void dasd_path_miscabled(struct dasd_device *device, int chp)
+{
+ __set_bit(DASD_PATH_MISCABLED, &device->path[chp].flags);
+}
+
+static inline int dasd_path_is_miscabled(struct dasd_device *device, int chp)
+{
+ return test_bit(DASD_PATH_MISCABLED, &device->path[chp].flags);
+}
+
+static inline void dasd_path_nohpf(struct dasd_device *device, int chp)
+{
+ __set_bit(DASD_PATH_NOHPF, &device->path[chp].flags);
+}
+
+static inline int dasd_path_is_nohpf(struct dasd_device *device, int chp)
+{
+ return test_bit(DASD_PATH_NOHPF, &device->path[chp].flags);
+}
+
+/*
+ * get functions for path masks
+ * will return a path masks for the given device
+ */
+
+static inline __u8 dasd_path_get_opm(struct dasd_device *device)
+{
+ return device->opm;
+}
+
+static inline __u8 dasd_path_get_tbvpm(struct dasd_device *device)
+{
+ int chp;
+ __u8 tbvpm = 0x00;
+
+ for (chp = 0; chp < 8; chp++)
+ if (dasd_path_need_verify(device, chp))
+ tbvpm |= 0x80 >> chp;
+ return tbvpm;
+}
+
+static inline int dasd_path_get_fcsecpm(struct dasd_device *device)
+{
+ int chp;
+
+ for (chp = 0; chp < 8; chp++)
+ if (dasd_path_need_fcsec(device, chp))
+ return 1;
+
+ return 0;
+}
+
+static inline __u8 dasd_path_get_nppm(struct dasd_device *device)
+{
+ int chp;
+ __u8 npm = 0x00;
+
+ for (chp = 0; chp < 8; chp++) {
+ if (dasd_path_is_nonpreferred(device, chp))
+ npm |= 0x80 >> chp;
+ }
+ return npm;
+}
+
+static inline __u8 dasd_path_get_ppm(struct dasd_device *device)
+{
+ int chp;
+ __u8 ppm = 0x00;
+
+ for (chp = 0; chp < 8; chp++)
+ if (dasd_path_is_preferred(device, chp))
+ ppm |= 0x80 >> chp;
+ return ppm;
+}
+
+static inline __u8 dasd_path_get_cablepm(struct dasd_device *device)
+{
+ int chp;
+ __u8 cablepm = 0x00;
+
+ for (chp = 0; chp < 8; chp++)
+ if (dasd_path_is_miscabled(device, chp))
+ cablepm |= 0x80 >> chp;
+ return cablepm;
+}
+
+static inline __u8 dasd_path_get_cuirpm(struct dasd_device *device)
+{
+ int chp;
+ __u8 cuirpm = 0x00;
+
+ for (chp = 0; chp < 8; chp++)
+ if (dasd_path_is_cuir(device, chp))
+ cuirpm |= 0x80 >> chp;
+ return cuirpm;
+}
+
+static inline __u8 dasd_path_get_ifccpm(struct dasd_device *device)
+{
+ int chp;
+ __u8 ifccpm = 0x00;
+
+ for (chp = 0; chp < 8; chp++)
+ if (dasd_path_is_ifcc(device, chp))
+ ifccpm |= 0x80 >> chp;
+ return ifccpm;
+}
+
+static inline __u8 dasd_path_get_hpfpm(struct dasd_device *device)
+{
+ int chp;
+ __u8 hpfpm = 0x00;
+
+ for (chp = 0; chp < 8; chp++)
+ if (dasd_path_is_nohpf(device, chp))
+ hpfpm |= 0x80 >> chp;
+ return hpfpm;
+}
+
+static inline u8 dasd_path_get_fcs_path(struct dasd_device *device, int chp)
+{
+ return device->path[chp].fc_security;
+}
+
+static inline int dasd_path_get_fcs_device(struct dasd_device *device)
+{
+ u8 fc_sec = 0;
+ int chp;
+
+ for (chp = 0; chp < 8; chp++) {
+ if (device->opm & (0x80 >> chp)) {
+ fc_sec = device->path[chp].fc_security;
+ break;
+ }
+ }
+ for (; chp < 8; chp++) {
+ if (device->opm & (0x80 >> chp))
+ if (device->path[chp].fc_security != fc_sec)
+ return -EINVAL;
+ }
+
+ return fc_sec;
+}
+
+/*
+ * add functions for path masks
+ * the existing path mask will be extended by the given path mask
+ */
+static inline void dasd_path_add_tbvpm(struct dasd_device *device, __u8 pm)
+{
+ int chp;
+
+ for (chp = 0; chp < 8; chp++)
+ if (pm & (0x80 >> chp))
+ dasd_path_verify(device, chp);
+}
+
+static inline __u8 dasd_path_get_notoperpm(struct dasd_device *device)
+{
+ int chp;
+ __u8 nopm = 0x00;
+
+ for (chp = 0; chp < 8; chp++)
+ if (dasd_path_is_nohpf(device, chp) ||
+ dasd_path_is_ifcc(device, chp) ||
+ dasd_path_is_cuir(device, chp) ||
+ dasd_path_is_miscabled(device, chp))
+ nopm |= 0x80 >> chp;
+ return nopm;
+}
+
+static inline void dasd_path_add_opm(struct dasd_device *device, __u8 pm)
+{
+ int chp;
+
+ for (chp = 0; chp < 8; chp++)
+ if (pm & (0x80 >> chp)) {
+ dasd_path_operational(device, chp);
+ /*
+ * if the path is used
+ * it should not be in one of the negative lists
+ */
+ dasd_path_clear_nohpf(device, chp);
+ dasd_path_clear_cuir(device, chp);
+ dasd_path_clear_cable(device, chp);
+ dasd_path_clear_ifcc(device, chp);
+ }
+}
+
+static inline void dasd_path_add_cablepm(struct dasd_device *device, __u8 pm)
+{
+ int chp;
+
+ for (chp = 0; chp < 8; chp++)
+ if (pm & (0x80 >> chp))
+ dasd_path_miscabled(device, chp);
+}
+
+static inline void dasd_path_add_cuirpm(struct dasd_device *device, __u8 pm)
+{
+ int chp;
+
+ for (chp = 0; chp < 8; chp++)
+ if (pm & (0x80 >> chp))
+ dasd_path_cuir(device, chp);
+}
+
+static inline void dasd_path_add_ifccpm(struct dasd_device *device, __u8 pm)
+{
+ int chp;
+
+ for (chp = 0; chp < 8; chp++)
+ if (pm & (0x80 >> chp))
+ dasd_path_ifcc(device, chp);
+}
+
+static inline void dasd_path_add_nppm(struct dasd_device *device, __u8 pm)
+{
+ int chp;
+
+ for (chp = 0; chp < 8; chp++)
+ if (pm & (0x80 >> chp))
+ dasd_path_nonpreferred(device, chp);
+}
+
+static inline void dasd_path_add_nohpfpm(struct dasd_device *device, __u8 pm)
+{
+ int chp;
+
+ for (chp = 0; chp < 8; chp++)
+ if (pm & (0x80 >> chp))
+ dasd_path_nohpf(device, chp);
+}
+
+static inline void dasd_path_add_ppm(struct dasd_device *device, __u8 pm)
+{
+ int chp;
+
+ for (chp = 0; chp < 8; chp++)
+ if (pm & (0x80 >> chp))
+ dasd_path_preferred(device, chp);
+}
+
+static inline void dasd_path_add_fcsecpm(struct dasd_device *device, __u8 pm)
+{
+ int chp;
+
+ for (chp = 0; chp < 8; chp++)
+ if (pm & (0x80 >> chp))
+ dasd_path_fcsec(device, chp);
+}
+
+/*
+ * set functions for path masks
+ * the existing path mask will be replaced by the given path mask
+ */
+static inline void dasd_path_set_tbvpm(struct dasd_device *device, __u8 pm)
+{
+ int chp;
+
+ for (chp = 0; chp < 8; chp++)
+ if (pm & (0x80 >> chp))
+ dasd_path_verify(device, chp);
+ else
+ dasd_path_clear_verify(device, chp);
+}
+
+static inline void dasd_path_set_opm(struct dasd_device *device, __u8 pm)
+{
+ int chp;
+
+ for (chp = 0; chp < 8; chp++) {
+ dasd_path_clear_oper(device, chp);
+ if (pm & (0x80 >> chp)) {
+ dasd_path_operational(device, chp);
+ /*
+ * if the path is used
+ * it should not be in one of the negative lists
+ */
+ dasd_path_clear_nohpf(device, chp);
+ dasd_path_clear_cuir(device, chp);
+ dasd_path_clear_cable(device, chp);
+ dasd_path_clear_ifcc(device, chp);
+ }
+ }
+}
+
+/*
+ * remove functions for path masks
+ * the existing path mask will be cleared with the given path mask
+ */
+static inline void dasd_path_remove_opm(struct dasd_device *device, __u8 pm)
+{
+ int chp;
+
+ for (chp = 0; chp < 8; chp++) {
+ if (pm & (0x80 >> chp))
+ dasd_path_clear_oper(device, chp);
+ }
+}
+
+/*
+ * add the newly available path to the to be verified pm and remove it from
+ * normal operation until it is verified
+ */
+static inline void dasd_path_available(struct dasd_device *device, int chp)
+{
+ dasd_path_clear_oper(device, chp);
+ dasd_path_verify(device, chp);
+}
+
+static inline void dasd_path_notoper(struct dasd_device *device, int chp)
+{
+ dasd_path_clear_oper(device, chp);
+ dasd_path_clear_preferred(device, chp);
+ dasd_path_clear_nonpreferred(device, chp);
+}
+
+static inline void dasd_path_fcsec_update(struct dasd_device *device, int chp)
+{
+ dasd_path_fcsec(device, chp);
+}
+
+/*
+ * remove all paths from normal operation
+ */
+static inline void dasd_path_no_path(struct dasd_device *device)
+{
+ int chp;
+
+ for (chp = 0; chp < 8; chp++)
+ dasd_path_notoper(device, chp);
+
+ dasd_path_clear_all_verify(device);
+}
+
+/* end - path handling */
+
+#endif /* DASD_H */
diff --git a/drivers/s390/block/dasd_ioctl.c b/drivers/s390/block/dasd_ioctl.c
new file mode 100644
index 0000000000..d55862605b
--- /dev/null
+++ b/drivers/s390/block/dasd_ioctl.c
@@ -0,0 +1,735 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
+ * Horst Hummel <Horst.Hummel@de.ibm.com>
+ * Carsten Otte <Cotte@de.ibm.com>
+ * Martin Schwidefsky <schwidefsky@de.ibm.com>
+ * Bugreports.to..: <Linux390@de.ibm.com>
+ * Copyright IBM Corp. 1999, 2001
+ *
+ * i/o controls for the dasd driver.
+ */
+
+#define KMSG_COMPONENT "dasd"
+
+#include <linux/interrupt.h>
+#include <linux/compat.h>
+#include <linux/major.h>
+#include <linux/fs.h>
+#include <linux/blkpg.h>
+#include <linux/slab.h>
+#include <asm/ccwdev.h>
+#include <asm/schid.h>
+#include <asm/cmb.h>
+#include <linux/uaccess.h>
+#include <linux/dasd_mod.h>
+
+/* This is ugly... */
+#define PRINTK_HEADER "dasd_ioctl:"
+
+#include "dasd_int.h"
+
+
+static int
+dasd_ioctl_api_version(void __user *argp)
+{
+ int ver = DASD_API_VERSION;
+ return put_user(ver, (int __user *)argp);
+}
+
+/*
+ * Enable device.
+ * used by dasdfmt after BIODASDDISABLE to retrigger blocksize detection
+ */
+static int
+dasd_ioctl_enable(struct block_device *bdev)
+{
+ struct dasd_device *base;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EACCES;
+
+ base = dasd_device_from_gendisk(bdev->bd_disk);
+ if (!base)
+ return -ENODEV;
+
+ dasd_enable_device(base);
+ dasd_put_device(base);
+ return 0;
+}
+
+/*
+ * Disable device.
+ * Used by dasdfmt. Disable I/O operations but allow ioctls.
+ */
+static int
+dasd_ioctl_disable(struct block_device *bdev)
+{
+ struct dasd_device *base;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EACCES;
+
+ base = dasd_device_from_gendisk(bdev->bd_disk);
+ if (!base)
+ return -ENODEV;
+ /*
+ * Man this is sick. We don't do a real disable but only downgrade
+ * the device to DASD_STATE_BASIC. The reason is that dasdfmt uses
+ * BIODASDDISABLE to disable accesses to the device via the block
+ * device layer but it still wants to do i/o on the device by
+ * using the BIODASDFMT ioctl. Therefore the correct state for the
+ * device is DASD_STATE_BASIC that allows to do basic i/o.
+ */
+ dasd_set_target_state(base, DASD_STATE_BASIC);
+ /*
+ * Set i_size to zero, since read, write, etc. check against this
+ * value.
+ */
+ set_capacity(bdev->bd_disk, 0);
+ dasd_put_device(base);
+ return 0;
+}
+
+/*
+ * Quiesce device.
+ */
+static int dasd_ioctl_quiesce(struct dasd_block *block)
+{
+ unsigned long flags;
+ struct dasd_device *base;
+
+ base = block->base;
+ if (!capable (CAP_SYS_ADMIN))
+ return -EACCES;
+
+ pr_info("%s: The DASD has been put in the quiesce "
+ "state\n", dev_name(&base->cdev->dev));
+ spin_lock_irqsave(get_ccwdev_lock(base->cdev), flags);
+ dasd_device_set_stop_bits(base, DASD_STOPPED_QUIESCE);
+ spin_unlock_irqrestore(get_ccwdev_lock(base->cdev), flags);
+ return 0;
+}
+
+
+/*
+ * Resume device.
+ */
+static int dasd_ioctl_resume(struct dasd_block *block)
+{
+ unsigned long flags;
+ struct dasd_device *base;
+
+ base = block->base;
+ if (!capable (CAP_SYS_ADMIN))
+ return -EACCES;
+
+ pr_info("%s: I/O operations have been resumed "
+ "on the DASD\n", dev_name(&base->cdev->dev));
+ spin_lock_irqsave(get_ccwdev_lock(base->cdev), flags);
+ dasd_device_remove_stop_bits(base, DASD_STOPPED_QUIESCE);
+ spin_unlock_irqrestore(get_ccwdev_lock(base->cdev), flags);
+
+ dasd_schedule_block_bh(block);
+ dasd_schedule_device_bh(base);
+ return 0;
+}
+
+/*
+ * Abort all failfast I/O on a device.
+ */
+static int dasd_ioctl_abortio(struct dasd_block *block)
+{
+ unsigned long flags;
+ struct dasd_device *base;
+ struct dasd_ccw_req *cqr, *n;
+
+ base = block->base;
+ if (!capable(CAP_SYS_ADMIN))
+ return -EACCES;
+
+ if (test_and_set_bit(DASD_FLAG_ABORTALL, &base->flags))
+ return 0;
+ DBF_DEV_EVENT(DBF_NOTICE, base, "%s", "abortall flag set");
+
+ spin_lock_irqsave(&block->request_queue_lock, flags);
+ spin_lock(&block->queue_lock);
+ list_for_each_entry_safe(cqr, n, &block->ccw_queue, blocklist) {
+ if (test_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags) &&
+ cqr->callback_data &&
+ cqr->callback_data != DASD_SLEEPON_START_TAG &&
+ cqr->callback_data != DASD_SLEEPON_END_TAG) {
+ spin_unlock(&block->queue_lock);
+ blk_abort_request(cqr->callback_data);
+ spin_lock(&block->queue_lock);
+ }
+ }
+ spin_unlock(&block->queue_lock);
+ spin_unlock_irqrestore(&block->request_queue_lock, flags);
+
+ dasd_schedule_block_bh(block);
+ return 0;
+}
+
+/*
+ * Allow I/O on a device
+ */
+static int dasd_ioctl_allowio(struct dasd_block *block)
+{
+ struct dasd_device *base;
+
+ base = block->base;
+ if (!capable(CAP_SYS_ADMIN))
+ return -EACCES;
+
+ if (test_and_clear_bit(DASD_FLAG_ABORTALL, &base->flags))
+ DBF_DEV_EVENT(DBF_NOTICE, base, "%s", "abortall flag unset");
+
+ return 0;
+}
+
+/*
+ * performs formatting of _device_ according to _fdata_
+ * Note: The discipline's format_function is assumed to deliver formatting
+ * commands to format multiple units of the device. In terms of the ECKD
+ * devices this means CCWs are generated to format multiple tracks.
+ */
+static int
+dasd_format(struct dasd_block *block, struct format_data_t *fdata)
+{
+ struct dasd_device *base;
+ int rc;
+
+ base = block->base;
+ if (base->discipline->format_device == NULL)
+ return -EPERM;
+
+ if (base->state != DASD_STATE_BASIC) {
+ pr_warn("%s: The DASD cannot be formatted while it is enabled\n",
+ dev_name(&base->cdev->dev));
+ return -EBUSY;
+ }
+
+ DBF_DEV_EVENT(DBF_NOTICE, base,
+ "formatting units %u to %u (%u B blocks) flags %u",
+ fdata->start_unit,
+ fdata->stop_unit, fdata->blksize, fdata->intensity);
+
+ /* Since dasdfmt keeps the device open after it was disabled,
+ * there still exists an inode for this device.
+ * We must update i_blkbits, otherwise we might get errors when
+ * enabling the device later.
+ */
+ if (fdata->start_unit == 0) {
+ block->gdp->part0->bd_inode->i_blkbits =
+ blksize_bits(fdata->blksize);
+ }
+
+ rc = base->discipline->format_device(base, fdata, 1);
+ if (rc == -EAGAIN)
+ rc = base->discipline->format_device(base, fdata, 0);
+
+ return rc;
+}
+
+static int dasd_check_format(struct dasd_block *block,
+ struct format_check_t *cdata)
+{
+ struct dasd_device *base;
+ int rc;
+
+ base = block->base;
+ if (!base->discipline->check_device_format)
+ return -ENOTTY;
+
+ rc = base->discipline->check_device_format(base, cdata, 1);
+ if (rc == -EAGAIN)
+ rc = base->discipline->check_device_format(base, cdata, 0);
+
+ return rc;
+}
+
+/*
+ * Format device.
+ */
+static int
+dasd_ioctl_format(struct block_device *bdev, void __user *argp)
+{
+ struct dasd_device *base;
+ struct format_data_t fdata;
+ int rc;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EACCES;
+ if (!argp)
+ return -EINVAL;
+ base = dasd_device_from_gendisk(bdev->bd_disk);
+ if (!base)
+ return -ENODEV;
+ if (base->features & DASD_FEATURE_READONLY ||
+ test_bit(DASD_FLAG_DEVICE_RO, &base->flags)) {
+ dasd_put_device(base);
+ return -EROFS;
+ }
+ if (copy_from_user(&fdata, argp, sizeof(struct format_data_t))) {
+ dasd_put_device(base);
+ return -EFAULT;
+ }
+ if (bdev_is_partition(bdev)) {
+ pr_warn("%s: The specified DASD is a partition and cannot be formatted\n",
+ dev_name(&base->cdev->dev));
+ dasd_put_device(base);
+ return -EINVAL;
+ }
+ rc = dasd_format(base->block, &fdata);
+ dasd_put_device(base);
+
+ return rc;
+}
+
+/*
+ * Check device format
+ */
+static int dasd_ioctl_check_format(struct block_device *bdev, void __user *argp)
+{
+ struct format_check_t cdata;
+ struct dasd_device *base;
+ int rc = 0;
+
+ if (!argp)
+ return -EINVAL;
+
+ base = dasd_device_from_gendisk(bdev->bd_disk);
+ if (!base)
+ return -ENODEV;
+ if (bdev_is_partition(bdev)) {
+ pr_warn("%s: The specified DASD is a partition and cannot be checked\n",
+ dev_name(&base->cdev->dev));
+ rc = -EINVAL;
+ goto out_err;
+ }
+
+ if (copy_from_user(&cdata, argp, sizeof(cdata))) {
+ rc = -EFAULT;
+ goto out_err;
+ }
+
+ rc = dasd_check_format(base->block, &cdata);
+ if (rc)
+ goto out_err;
+
+ if (copy_to_user(argp, &cdata, sizeof(cdata)))
+ rc = -EFAULT;
+
+out_err:
+ dasd_put_device(base);
+
+ return rc;
+}
+
+static int dasd_release_space(struct dasd_device *device,
+ struct format_data_t *rdata)
+{
+ if (!device->discipline->is_ese && !device->discipline->is_ese(device))
+ return -ENOTSUPP;
+ if (!device->discipline->release_space)
+ return -ENOTSUPP;
+
+ return device->discipline->release_space(device, rdata);
+}
+
+/*
+ * Release allocated space
+ */
+static int dasd_ioctl_release_space(struct block_device *bdev, void __user *argp)
+{
+ struct format_data_t rdata;
+ struct dasd_device *base;
+ int rc = 0;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EACCES;
+ if (!argp)
+ return -EINVAL;
+
+ base = dasd_device_from_gendisk(bdev->bd_disk);
+ if (!base)
+ return -ENODEV;
+ if (base->features & DASD_FEATURE_READONLY ||
+ test_bit(DASD_FLAG_DEVICE_RO, &base->flags)) {
+ rc = -EROFS;
+ goto out_err;
+ }
+ if (bdev_is_partition(bdev)) {
+ pr_warn("%s: The specified DASD is a partition and tracks cannot be released\n",
+ dev_name(&base->cdev->dev));
+ rc = -EINVAL;
+ goto out_err;
+ }
+
+ if (copy_from_user(&rdata, argp, sizeof(rdata))) {
+ rc = -EFAULT;
+ goto out_err;
+ }
+
+ rc = dasd_release_space(base, &rdata);
+
+out_err:
+ dasd_put_device(base);
+
+ return rc;
+}
+
+/*
+ * Swap driver iternal copy relation.
+ */
+static int
+dasd_ioctl_copy_pair_swap(struct block_device *bdev, void __user *argp)
+{
+ struct dasd_copypair_swap_data_t data;
+ struct dasd_device *device;
+ int rc;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EACCES;
+
+ device = dasd_device_from_gendisk(bdev->bd_disk);
+ if (!device)
+ return -ENODEV;
+
+ if (copy_from_user(&data, argp, sizeof(struct dasd_copypair_swap_data_t))) {
+ dasd_put_device(device);
+ return -EFAULT;
+ }
+ if (memchr_inv(data.reserved, 0, sizeof(data.reserved))) {
+ pr_warn("%s: Invalid swap data specified\n",
+ dev_name(&device->cdev->dev));
+ dasd_put_device(device);
+ return DASD_COPYPAIRSWAP_INVALID;
+ }
+ if (bdev_is_partition(bdev)) {
+ pr_warn("%s: The specified DASD is a partition and cannot be swapped\n",
+ dev_name(&device->cdev->dev));
+ dasd_put_device(device);
+ return DASD_COPYPAIRSWAP_INVALID;
+ }
+ if (!device->copy) {
+ pr_warn("%s: The specified DASD has no copy pair set up\n",
+ dev_name(&device->cdev->dev));
+ dasd_put_device(device);
+ return -ENODEV;
+ }
+ if (!device->discipline->copy_pair_swap) {
+ dasd_put_device(device);
+ return -EOPNOTSUPP;
+ }
+ rc = device->discipline->copy_pair_swap(device, data.primary,
+ data.secondary);
+ dasd_put_device(device);
+
+ return rc;
+}
+
+#ifdef CONFIG_DASD_PROFILE
+/*
+ * Reset device profile information
+ */
+static int dasd_ioctl_reset_profile(struct dasd_block *block)
+{
+ dasd_profile_reset(&block->profile);
+ return 0;
+}
+
+/*
+ * Return device profile information
+ */
+static int dasd_ioctl_read_profile(struct dasd_block *block, void __user *argp)
+{
+ struct dasd_profile_info_t *data;
+ int rc = 0;
+
+ data = kmalloc(sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ spin_lock_bh(&block->profile.lock);
+ if (block->profile.data) {
+ data->dasd_io_reqs = block->profile.data->dasd_io_reqs;
+ data->dasd_io_sects = block->profile.data->dasd_io_sects;
+ memcpy(data->dasd_io_secs, block->profile.data->dasd_io_secs,
+ sizeof(data->dasd_io_secs));
+ memcpy(data->dasd_io_times, block->profile.data->dasd_io_times,
+ sizeof(data->dasd_io_times));
+ memcpy(data->dasd_io_timps, block->profile.data->dasd_io_timps,
+ sizeof(data->dasd_io_timps));
+ memcpy(data->dasd_io_time1, block->profile.data->dasd_io_time1,
+ sizeof(data->dasd_io_time1));
+ memcpy(data->dasd_io_time2, block->profile.data->dasd_io_time2,
+ sizeof(data->dasd_io_time2));
+ memcpy(data->dasd_io_time2ps,
+ block->profile.data->dasd_io_time2ps,
+ sizeof(data->dasd_io_time2ps));
+ memcpy(data->dasd_io_time3, block->profile.data->dasd_io_time3,
+ sizeof(data->dasd_io_time3));
+ memcpy(data->dasd_io_nr_req,
+ block->profile.data->dasd_io_nr_req,
+ sizeof(data->dasd_io_nr_req));
+ spin_unlock_bh(&block->profile.lock);
+ } else {
+ spin_unlock_bh(&block->profile.lock);
+ rc = -EIO;
+ goto out;
+ }
+ if (copy_to_user(argp, data, sizeof(*data)))
+ rc = -EFAULT;
+out:
+ kfree(data);
+ return rc;
+}
+#else
+static int dasd_ioctl_reset_profile(struct dasd_block *block)
+{
+ return -ENOTTY;
+}
+
+static int dasd_ioctl_read_profile(struct dasd_block *block, void __user *argp)
+{
+ return -ENOTTY;
+}
+#endif
+
+/*
+ * Return dasd information. Used for BIODASDINFO and BIODASDINFO2.
+ */
+static int __dasd_ioctl_information(struct dasd_block *block,
+ struct dasd_information2_t *dasd_info)
+{
+ struct subchannel_id sch_id;
+ struct ccw_dev_id dev_id;
+ struct dasd_device *base;
+ struct ccw_device *cdev;
+ struct list_head *l;
+ unsigned long flags;
+ int rc;
+
+ base = block->base;
+ if (!base->discipline || !base->discipline->fill_info)
+ return -EINVAL;
+
+ rc = base->discipline->fill_info(base, dasd_info);
+ if (rc)
+ return rc;
+
+ cdev = base->cdev;
+ ccw_device_get_id(cdev, &dev_id);
+ ccw_device_get_schid(cdev, &sch_id);
+
+ dasd_info->devno = dev_id.devno;
+ dasd_info->schid = sch_id.sch_no;
+ dasd_info->cu_type = cdev->id.cu_type;
+ dasd_info->cu_model = cdev->id.cu_model;
+ dasd_info->dev_type = cdev->id.dev_type;
+ dasd_info->dev_model = cdev->id.dev_model;
+ dasd_info->status = base->state;
+ /*
+ * The open_count is increased for every opener, that includes
+ * the blkdev_get in dasd_scan_partitions.
+ * This must be hidden from user-space.
+ */
+ dasd_info->open_count = atomic_read(&block->open_count);
+ if (!block->bdev)
+ dasd_info->open_count++;
+
+ /*
+ * check if device is really formatted
+ * LDL / CDL was returned by 'fill_info'
+ */
+ if ((base->state < DASD_STATE_READY) ||
+ (dasd_check_blocksize(block->bp_block)))
+ dasd_info->format = DASD_FORMAT_NONE;
+
+ dasd_info->features |=
+ ((base->features & DASD_FEATURE_READONLY) != 0);
+
+ memcpy(dasd_info->type, base->discipline->name, 4);
+
+ spin_lock_irqsave(get_ccwdev_lock(base->cdev), flags);
+ list_for_each(l, &base->ccw_queue)
+ dasd_info->chanq_len++;
+ spin_unlock_irqrestore(get_ccwdev_lock(base->cdev), flags);
+ return 0;
+}
+
+static int dasd_ioctl_information(struct dasd_block *block, void __user *argp,
+ size_t copy_size)
+{
+ struct dasd_information2_t *dasd_info;
+ int error;
+
+ dasd_info = kzalloc(sizeof(*dasd_info), GFP_KERNEL);
+ if (!dasd_info)
+ return -ENOMEM;
+
+ error = __dasd_ioctl_information(block, dasd_info);
+ if (!error && copy_to_user(argp, dasd_info, copy_size))
+ error = -EFAULT;
+ kfree(dasd_info);
+ return error;
+}
+
+/*
+ * Set read only
+ */
+int dasd_set_read_only(struct block_device *bdev, bool ro)
+{
+ struct dasd_device *base;
+ int rc;
+
+ /* do not manipulate hardware state for partitions */
+ if (bdev_is_partition(bdev))
+ return 0;
+
+ base = dasd_device_from_gendisk(bdev->bd_disk);
+ if (!base)
+ return -ENODEV;
+ if (!ro && test_bit(DASD_FLAG_DEVICE_RO, &base->flags))
+ rc = -EROFS;
+ else
+ rc = dasd_set_feature(base->cdev, DASD_FEATURE_READONLY, ro);
+ dasd_put_device(base);
+ return rc;
+}
+
+static int dasd_ioctl_readall_cmb(struct dasd_block *block, unsigned int cmd,
+ struct cmbdata __user *argp)
+{
+ size_t size = _IOC_SIZE(cmd);
+ struct cmbdata data;
+ int ret;
+
+ ret = cmf_readall(block->base->cdev, &data);
+ if (!ret && copy_to_user(argp, &data, min(size, sizeof(*argp))))
+ return -EFAULT;
+ return ret;
+}
+
+int dasd_ioctl(struct block_device *bdev, blk_mode_t mode,
+ unsigned int cmd, unsigned long arg)
+{
+ struct dasd_block *block;
+ struct dasd_device *base;
+ void __user *argp;
+ int rc;
+
+ if (is_compat_task())
+ argp = compat_ptr(arg);
+ else
+ argp = (void __user *)arg;
+
+ if ((_IOC_DIR(cmd) != _IOC_NONE) && !arg)
+ return -EINVAL;
+
+ base = dasd_device_from_gendisk(bdev->bd_disk);
+ if (!base)
+ return -ENODEV;
+ block = base->block;
+ rc = 0;
+ switch (cmd) {
+ case BIODASDDISABLE:
+ rc = dasd_ioctl_disable(bdev);
+ break;
+ case BIODASDENABLE:
+ rc = dasd_ioctl_enable(bdev);
+ break;
+ case BIODASDQUIESCE:
+ rc = dasd_ioctl_quiesce(block);
+ break;
+ case BIODASDRESUME:
+ rc = dasd_ioctl_resume(block);
+ break;
+ case BIODASDABORTIO:
+ rc = dasd_ioctl_abortio(block);
+ break;
+ case BIODASDALLOWIO:
+ rc = dasd_ioctl_allowio(block);
+ break;
+ case BIODASDFMT:
+ rc = dasd_ioctl_format(bdev, argp);
+ break;
+ case BIODASDCHECKFMT:
+ rc = dasd_ioctl_check_format(bdev, argp);
+ break;
+ case BIODASDINFO:
+ rc = dasd_ioctl_information(block, argp,
+ sizeof(struct dasd_information_t));
+ break;
+ case BIODASDINFO2:
+ rc = dasd_ioctl_information(block, argp,
+ sizeof(struct dasd_information2_t));
+ break;
+ case BIODASDPRRD:
+ rc = dasd_ioctl_read_profile(block, argp);
+ break;
+ case BIODASDPRRST:
+ rc = dasd_ioctl_reset_profile(block);
+ break;
+ case DASDAPIVER:
+ rc = dasd_ioctl_api_version(argp);
+ break;
+ case BIODASDCMFENABLE:
+ rc = enable_cmf(base->cdev);
+ break;
+ case BIODASDCMFDISABLE:
+ rc = disable_cmf(base->cdev);
+ break;
+ case BIODASDREADALLCMB:
+ rc = dasd_ioctl_readall_cmb(block, cmd, argp);
+ break;
+ case BIODASDRAS:
+ rc = dasd_ioctl_release_space(bdev, argp);
+ break;
+ case BIODASDCOPYPAIRSWAP:
+ rc = dasd_ioctl_copy_pair_swap(bdev, argp);
+ break;
+ default:
+ /* if the discipline has an ioctl method try it. */
+ rc = -ENOTTY;
+ if (base->discipline->ioctl)
+ rc = base->discipline->ioctl(block, cmd, argp);
+ }
+ dasd_put_device(base);
+ return rc;
+}
+
+
+/**
+ * dasd_biodasdinfo() - fill out the dasd information structure
+ * @disk: [in] pointer to gendisk structure that references a DASD
+ * @info: [out] pointer to the dasd_information2_t structure
+ *
+ * Provide access to DASD specific information.
+ * The gendisk structure is checked if it belongs to the DASD driver by
+ * comparing the gendisk->fops pointer.
+ * If it does not belong to the DASD driver -EINVAL is returned.
+ * Otherwise the provided dasd_information2_t structure is filled out.
+ *
+ * Returns:
+ * %0 on success and a negative error value on failure.
+ */
+int dasd_biodasdinfo(struct gendisk *disk, struct dasd_information2_t *info)
+{
+ struct dasd_device *base;
+ int error;
+
+ if (disk->fops != &dasd_device_operations)
+ return -EINVAL;
+
+ base = dasd_device_from_gendisk(disk);
+ if (!base)
+ return -ENODEV;
+ error = __dasd_ioctl_information(base->block, info);
+ dasd_put_device(base);
+ return error;
+}
+/* export that symbol_get in partition detection is possible */
+EXPORT_SYMBOL_GPL(dasd_biodasdinfo);
diff --git a/drivers/s390/block/dasd_proc.c b/drivers/s390/block/dasd_proc.c
new file mode 100644
index 0000000000..62a859ea67
--- /dev/null
+++ b/drivers/s390/block/dasd_proc.c
@@ -0,0 +1,368 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
+ * Horst Hummel <Horst.Hummel@de.ibm.com>
+ * Carsten Otte <Cotte@de.ibm.com>
+ * Martin Schwidefsky <schwidefsky@de.ibm.com>
+ * Bugreports.to..: <Linux390@de.ibm.com>
+ * Copyright IBM Corp. 1999, 2002
+ *
+ * /proc interface for the dasd driver.
+ *
+ */
+
+#define KMSG_COMPONENT "dasd"
+
+#include <linux/ctype.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/seq_file.h>
+#include <linux/vmalloc.h>
+#include <linux/proc_fs.h>
+
+#include <asm/debug.h>
+#include <linux/uaccess.h>
+
+/* This is ugly... */
+#define PRINTK_HEADER "dasd_proc:"
+
+#include "dasd_int.h"
+
+static struct proc_dir_entry *dasd_proc_root_entry = NULL;
+static struct proc_dir_entry *dasd_devices_entry = NULL;
+static struct proc_dir_entry *dasd_statistics_entry = NULL;
+
+static int
+dasd_devices_show(struct seq_file *m, void *v)
+{
+ struct dasd_device *device;
+ struct dasd_block *block;
+ char *substr;
+
+ device = dasd_device_from_devindex((unsigned long) v - 1);
+ if (IS_ERR(device))
+ return 0;
+ if (device->block)
+ block = device->block;
+ else {
+ dasd_put_device(device);
+ return 0;
+ }
+ /* Print device number. */
+ seq_printf(m, "%s", dev_name(&device->cdev->dev));
+ /* Print discipline string. */
+ if (device->discipline != NULL)
+ seq_printf(m, "(%s)", device->discipline->name);
+ else
+ seq_printf(m, "(none)");
+ /* Print kdev. */
+ if (block->gdp)
+ seq_printf(m, " at (%3d:%6d)",
+ MAJOR(disk_devt(block->gdp)),
+ MINOR(disk_devt(block->gdp)));
+ else
+ seq_printf(m, " at (???:??????)");
+ /* Print device name. */
+ if (block->gdp)
+ seq_printf(m, " is %-8s", block->gdp->disk_name);
+ else
+ seq_printf(m, " is ????????");
+ /* Print devices features. */
+ substr = (device->features & DASD_FEATURE_READONLY) ? "(ro)" : " ";
+ seq_printf(m, "%4s: ", substr);
+ /* Print device status information. */
+ switch (device->state) {
+ case DASD_STATE_NEW:
+ seq_printf(m, "new");
+ break;
+ case DASD_STATE_KNOWN:
+ seq_printf(m, "detected");
+ break;
+ case DASD_STATE_BASIC:
+ seq_printf(m, "basic");
+ break;
+ case DASD_STATE_UNFMT:
+ seq_printf(m, "unformatted");
+ break;
+ case DASD_STATE_READY:
+ case DASD_STATE_ONLINE:
+ seq_printf(m, "active ");
+ if (dasd_check_blocksize(block->bp_block))
+ seq_printf(m, "n/f ");
+ else
+ seq_printf(m,
+ "at blocksize: %u, %lu blocks, %lu MB",
+ block->bp_block, block->blocks,
+ ((block->bp_block >> 9) *
+ block->blocks) >> 11);
+ break;
+ default:
+ seq_printf(m, "no stat");
+ break;
+ }
+ dasd_put_device(device);
+ if (dasd_probeonly)
+ seq_printf(m, "(probeonly)");
+ seq_printf(m, "\n");
+ return 0;
+}
+
+static void *dasd_devices_start(struct seq_file *m, loff_t *pos)
+{
+ if (*pos >= dasd_max_devindex)
+ return NULL;
+ return (void *)((unsigned long) *pos + 1);
+}
+
+static void *dasd_devices_next(struct seq_file *m, void *v, loff_t *pos)
+{
+ ++*pos;
+ return dasd_devices_start(m, pos);
+}
+
+static void dasd_devices_stop(struct seq_file *m, void *v)
+{
+}
+
+static const struct seq_operations dasd_devices_seq_ops = {
+ .start = dasd_devices_start,
+ .next = dasd_devices_next,
+ .stop = dasd_devices_stop,
+ .show = dasd_devices_show,
+};
+
+#ifdef CONFIG_DASD_PROFILE
+static int dasd_stats_all_block_on(void)
+{
+ int i, rc;
+ struct dasd_device *device;
+
+ rc = 0;
+ for (i = 0; i < dasd_max_devindex; ++i) {
+ device = dasd_device_from_devindex(i);
+ if (IS_ERR(device))
+ continue;
+ if (device->block)
+ rc = dasd_profile_on(&device->block->profile);
+ dasd_put_device(device);
+ if (rc)
+ return rc;
+ }
+ return 0;
+}
+
+static void dasd_stats_all_block_off(void)
+{
+ int i;
+ struct dasd_device *device;
+
+ for (i = 0; i < dasd_max_devindex; ++i) {
+ device = dasd_device_from_devindex(i);
+ if (IS_ERR(device))
+ continue;
+ if (device->block)
+ dasd_profile_off(&device->block->profile);
+ dasd_put_device(device);
+ }
+}
+
+static void dasd_stats_all_block_reset(void)
+{
+ int i;
+ struct dasd_device *device;
+
+ for (i = 0; i < dasd_max_devindex; ++i) {
+ device = dasd_device_from_devindex(i);
+ if (IS_ERR(device))
+ continue;
+ if (device->block)
+ dasd_profile_reset(&device->block->profile);
+ dasd_put_device(device);
+ }
+}
+
+static void dasd_statistics_array(struct seq_file *m, unsigned int *array, int factor)
+{
+ int i;
+
+ for (i = 0; i < 32; i++) {
+ seq_printf(m, "%7d ", array[i] / factor);
+ if (i == 15)
+ seq_putc(m, '\n');
+ }
+ seq_putc(m, '\n');
+}
+#endif /* CONFIG_DASD_PROFILE */
+
+static int dasd_stats_proc_show(struct seq_file *m, void *v)
+{
+#ifdef CONFIG_DASD_PROFILE
+ struct dasd_profile_info *prof;
+ int factor;
+
+ spin_lock_bh(&dasd_global_profile.lock);
+ prof = dasd_global_profile.data;
+ if (!prof) {
+ spin_unlock_bh(&dasd_global_profile.lock);
+ seq_printf(m, "Statistics are off - they might be "
+ "switched on using 'echo set on > "
+ "/proc/dasd/statistics'\n");
+ return 0;
+ }
+
+ /* prevent counter 'overflow' on output */
+ for (factor = 1; (prof->dasd_io_reqs / factor) > 9999999;
+ factor *= 10);
+
+ seq_printf(m, "%d dasd I/O requests\n", prof->dasd_io_reqs);
+ seq_printf(m, "with %u sectors(512B each)\n",
+ prof->dasd_io_sects);
+ seq_printf(m, "Scale Factor is %d\n", factor);
+ seq_printf(m,
+ " __<4 ___8 __16 __32 __64 _128 "
+ " _256 _512 __1k __2k __4k __8k "
+ " _16k _32k _64k 128k\n");
+ seq_printf(m,
+ " _256 _512 __1M __2M __4M __8M "
+ " _16M _32M _64M 128M 256M 512M "
+ " __1G __2G __4G " " _>4G\n");
+
+ seq_printf(m, "Histogram of sizes (512B secs)\n");
+ dasd_statistics_array(m, prof->dasd_io_secs, factor);
+ seq_printf(m, "Histogram of I/O times (microseconds)\n");
+ dasd_statistics_array(m, prof->dasd_io_times, factor);
+ seq_printf(m, "Histogram of I/O times per sector\n");
+ dasd_statistics_array(m, prof->dasd_io_timps, factor);
+ seq_printf(m, "Histogram of I/O time till ssch\n");
+ dasd_statistics_array(m, prof->dasd_io_time1, factor);
+ seq_printf(m, "Histogram of I/O time between ssch and irq\n");
+ dasd_statistics_array(m, prof->dasd_io_time2, factor);
+ seq_printf(m, "Histogram of I/O time between ssch "
+ "and irq per sector\n");
+ dasd_statistics_array(m, prof->dasd_io_time2ps, factor);
+ seq_printf(m, "Histogram of I/O time between irq and end\n");
+ dasd_statistics_array(m, prof->dasd_io_time3, factor);
+ seq_printf(m, "# of req in chanq at enqueuing (1..32) \n");
+ dasd_statistics_array(m, prof->dasd_io_nr_req, factor);
+ spin_unlock_bh(&dasd_global_profile.lock);
+#else
+ seq_printf(m, "Statistics are not activated in this kernel\n");
+#endif
+ return 0;
+}
+
+static int dasd_stats_proc_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, dasd_stats_proc_show, NULL);
+}
+
+static ssize_t dasd_stats_proc_write(struct file *file,
+ const char __user *user_buf, size_t user_len, loff_t *pos)
+{
+#ifdef CONFIG_DASD_PROFILE
+ char *buffer, *str;
+ int rc;
+
+ if (user_len > 65536)
+ user_len = 65536;
+ buffer = dasd_get_user_string(user_buf, user_len);
+ if (IS_ERR(buffer))
+ return PTR_ERR(buffer);
+
+ /* check for valid verbs */
+ str = skip_spaces(buffer);
+ if (strncmp(str, "set", 3) == 0 && isspace(str[3])) {
+ /* 'set xxx' was given */
+ str = skip_spaces(str + 4);
+ if (strcmp(str, "on") == 0) {
+ /* switch on statistics profiling */
+ rc = dasd_stats_all_block_on();
+ if (rc) {
+ dasd_stats_all_block_off();
+ goto out_error;
+ }
+ rc = dasd_profile_on(&dasd_global_profile);
+ if (rc) {
+ dasd_stats_all_block_off();
+ goto out_error;
+ }
+ dasd_profile_reset(&dasd_global_profile);
+ dasd_global_profile_level = DASD_PROFILE_ON;
+ pr_info("The statistics feature has been switched "
+ "on\n");
+ } else if (strcmp(str, "off") == 0) {
+ /* switch off statistics profiling */
+ dasd_global_profile_level = DASD_PROFILE_OFF;
+ dasd_profile_off(&dasd_global_profile);
+ dasd_stats_all_block_off();
+ pr_info("The statistics feature has been switched "
+ "off\n");
+ } else
+ goto out_parse_error;
+ } else if (strncmp(str, "reset", 5) == 0) {
+ /* reset the statistics */
+ dasd_profile_reset(&dasd_global_profile);
+ dasd_stats_all_block_reset();
+ pr_info("The statistics have been reset\n");
+ } else
+ goto out_parse_error;
+ vfree(buffer);
+ return user_len;
+out_parse_error:
+ rc = -EINVAL;
+ pr_warn("%s is not a supported value for /proc/dasd/statistics\n", str);
+out_error:
+ vfree(buffer);
+ return rc;
+#else
+ pr_warn("/proc/dasd/statistics: is not activated in this kernel\n");
+ return user_len;
+#endif /* CONFIG_DASD_PROFILE */
+}
+
+static const struct proc_ops dasd_stats_proc_ops = {
+ .proc_open = dasd_stats_proc_open,
+ .proc_read = seq_read,
+ .proc_lseek = seq_lseek,
+ .proc_release = single_release,
+ .proc_write = dasd_stats_proc_write,
+};
+
+/*
+ * Create dasd proc-fs entries.
+ * In case creation failed, cleanup and return -ENOENT.
+ */
+int
+dasd_proc_init(void)
+{
+ dasd_proc_root_entry = proc_mkdir("dasd", NULL);
+ if (!dasd_proc_root_entry)
+ goto out_nodasd;
+ dasd_devices_entry = proc_create_seq("devices", 0444,
+ dasd_proc_root_entry,
+ &dasd_devices_seq_ops);
+ if (!dasd_devices_entry)
+ goto out_nodevices;
+ dasd_statistics_entry = proc_create("statistics",
+ S_IFREG | S_IRUGO | S_IWUSR,
+ dasd_proc_root_entry,
+ &dasd_stats_proc_ops);
+ if (!dasd_statistics_entry)
+ goto out_nostatistics;
+ return 0;
+
+ out_nostatistics:
+ remove_proc_entry("devices", dasd_proc_root_entry);
+ out_nodevices:
+ remove_proc_entry("dasd", NULL);
+ out_nodasd:
+ return -ENOENT;
+}
+
+void
+dasd_proc_exit(void)
+{
+ remove_proc_entry("devices", dasd_proc_root_entry);
+ remove_proc_entry("statistics", dasd_proc_root_entry);
+ remove_proc_entry("dasd", NULL);
+}
diff --git a/drivers/s390/block/dcssblk.c b/drivers/s390/block/dcssblk.c
new file mode 100644
index 0000000000..4b7ecd4fd4
--- /dev/null
+++ b/drivers/s390/block/dcssblk.c
@@ -0,0 +1,1032 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * dcssblk.c -- the S/390 block driver for dcss memory
+ *
+ * Authors: Carsten Otte, Stefan Weinhuber, Gerald Schaefer
+ */
+
+#define KMSG_COMPONENT "dcssblk"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/ctype.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/blkdev.h>
+#include <linux/completion.h>
+#include <linux/interrupt.h>
+#include <linux/pfn_t.h>
+#include <linux/uio.h>
+#include <linux/dax.h>
+#include <linux/io.h>
+#include <asm/extmem.h>
+
+#define DCSSBLK_NAME "dcssblk"
+#define DCSSBLK_MINORS_PER_DISK 1
+#define DCSSBLK_PARM_LEN 400
+#define DCSS_BUS_ID_SIZE 20
+
+static int dcssblk_open(struct gendisk *disk, blk_mode_t mode);
+static void dcssblk_release(struct gendisk *disk);
+static void dcssblk_submit_bio(struct bio *bio);
+static long dcssblk_dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff,
+ long nr_pages, enum dax_access_mode mode, void **kaddr,
+ pfn_t *pfn);
+
+static char dcssblk_segments[DCSSBLK_PARM_LEN] = "\0";
+
+static int dcssblk_major;
+static const struct block_device_operations dcssblk_devops = {
+ .owner = THIS_MODULE,
+ .submit_bio = dcssblk_submit_bio,
+ .open = dcssblk_open,
+ .release = dcssblk_release,
+};
+
+static int dcssblk_dax_zero_page_range(struct dax_device *dax_dev,
+ pgoff_t pgoff, size_t nr_pages)
+{
+ long rc;
+ void *kaddr;
+
+ rc = dax_direct_access(dax_dev, pgoff, nr_pages, DAX_ACCESS,
+ &kaddr, NULL);
+ if (rc < 0)
+ return dax_mem2blk_err(rc);
+
+ memset(kaddr, 0, nr_pages << PAGE_SHIFT);
+ dax_flush(dax_dev, kaddr, nr_pages << PAGE_SHIFT);
+ return 0;
+}
+
+static const struct dax_operations dcssblk_dax_ops = {
+ .direct_access = dcssblk_dax_direct_access,
+ .zero_page_range = dcssblk_dax_zero_page_range,
+};
+
+struct dcssblk_dev_info {
+ struct list_head lh;
+ struct device dev;
+ char segment_name[DCSS_BUS_ID_SIZE];
+ atomic_t use_count;
+ struct gendisk *gd;
+ unsigned long start;
+ unsigned long end;
+ int segment_type;
+ unsigned char save_pending;
+ unsigned char is_shared;
+ int num_of_segments;
+ struct list_head seg_list;
+ struct dax_device *dax_dev;
+};
+
+struct segment_info {
+ struct list_head lh;
+ char segment_name[DCSS_BUS_ID_SIZE];
+ unsigned long start;
+ unsigned long end;
+ int segment_type;
+};
+
+static ssize_t dcssblk_add_store(struct device * dev, struct device_attribute *attr, const char * buf,
+ size_t count);
+static ssize_t dcssblk_remove_store(struct device * dev, struct device_attribute *attr, const char * buf,
+ size_t count);
+
+static DEVICE_ATTR(add, S_IWUSR, NULL, dcssblk_add_store);
+static DEVICE_ATTR(remove, S_IWUSR, NULL, dcssblk_remove_store);
+
+static struct device *dcssblk_root_dev;
+
+static LIST_HEAD(dcssblk_devices);
+static struct rw_semaphore dcssblk_devices_sem;
+
+/*
+ * release function for segment device.
+ */
+static void
+dcssblk_release_segment(struct device *dev)
+{
+ struct dcssblk_dev_info *dev_info;
+ struct segment_info *entry, *temp;
+
+ dev_info = container_of(dev, struct dcssblk_dev_info, dev);
+ list_for_each_entry_safe(entry, temp, &dev_info->seg_list, lh) {
+ list_del(&entry->lh);
+ kfree(entry);
+ }
+ kfree(dev_info);
+ module_put(THIS_MODULE);
+}
+
+/*
+ * get a minor number. needs to be called with
+ * down_write(&dcssblk_devices_sem) and the
+ * device needs to be enqueued before the semaphore is
+ * freed.
+ */
+static int
+dcssblk_assign_free_minor(struct dcssblk_dev_info *dev_info)
+{
+ int minor, found;
+ struct dcssblk_dev_info *entry;
+
+ if (dev_info == NULL)
+ return -EINVAL;
+ for (minor = 0; minor < (1<<MINORBITS); minor++) {
+ found = 0;
+ // test if minor available
+ list_for_each_entry(entry, &dcssblk_devices, lh)
+ if (minor == entry->gd->first_minor)
+ found++;
+ if (!found) break; // got unused minor
+ }
+ if (found)
+ return -EBUSY;
+ dev_info->gd->first_minor = minor;
+ return 0;
+}
+
+/*
+ * get the struct dcssblk_dev_info from dcssblk_devices
+ * for the given name.
+ * down_read(&dcssblk_devices_sem) must be held.
+ */
+static struct dcssblk_dev_info *
+dcssblk_get_device_by_name(char *name)
+{
+ struct dcssblk_dev_info *entry;
+
+ list_for_each_entry(entry, &dcssblk_devices, lh) {
+ if (!strcmp(name, entry->segment_name)) {
+ return entry;
+ }
+ }
+ return NULL;
+}
+
+/*
+ * get the struct segment_info from seg_list
+ * for the given name.
+ * down_read(&dcssblk_devices_sem) must be held.
+ */
+static struct segment_info *
+dcssblk_get_segment_by_name(char *name)
+{
+ struct dcssblk_dev_info *dev_info;
+ struct segment_info *entry;
+
+ list_for_each_entry(dev_info, &dcssblk_devices, lh) {
+ list_for_each_entry(entry, &dev_info->seg_list, lh) {
+ if (!strcmp(name, entry->segment_name))
+ return entry;
+ }
+ }
+ return NULL;
+}
+
+/*
+ * get the highest address of the multi-segment block.
+ */
+static unsigned long
+dcssblk_find_highest_addr(struct dcssblk_dev_info *dev_info)
+{
+ unsigned long highest_addr;
+ struct segment_info *entry;
+
+ highest_addr = 0;
+ list_for_each_entry(entry, &dev_info->seg_list, lh) {
+ if (highest_addr < entry->end)
+ highest_addr = entry->end;
+ }
+ return highest_addr;
+}
+
+/*
+ * get the lowest address of the multi-segment block.
+ */
+static unsigned long
+dcssblk_find_lowest_addr(struct dcssblk_dev_info *dev_info)
+{
+ int set_first;
+ unsigned long lowest_addr;
+ struct segment_info *entry;
+
+ set_first = 0;
+ lowest_addr = 0;
+ list_for_each_entry(entry, &dev_info->seg_list, lh) {
+ if (set_first == 0) {
+ lowest_addr = entry->start;
+ set_first = 1;
+ } else {
+ if (lowest_addr > entry->start)
+ lowest_addr = entry->start;
+ }
+ }
+ return lowest_addr;
+}
+
+/*
+ * Check continuity of segments.
+ */
+static int
+dcssblk_is_continuous(struct dcssblk_dev_info *dev_info)
+{
+ int i, j, rc;
+ struct segment_info *sort_list, *entry, temp;
+
+ if (dev_info->num_of_segments <= 1)
+ return 0;
+
+ sort_list = kcalloc(dev_info->num_of_segments,
+ sizeof(struct segment_info),
+ GFP_KERNEL);
+ if (sort_list == NULL)
+ return -ENOMEM;
+ i = 0;
+ list_for_each_entry(entry, &dev_info->seg_list, lh) {
+ memcpy(&sort_list[i], entry, sizeof(struct segment_info));
+ i++;
+ }
+
+ /* sort segments */
+ for (i = 0; i < dev_info->num_of_segments; i++)
+ for (j = 0; j < dev_info->num_of_segments; j++)
+ if (sort_list[j].start > sort_list[i].start) {
+ memcpy(&temp, &sort_list[i],
+ sizeof(struct segment_info));
+ memcpy(&sort_list[i], &sort_list[j],
+ sizeof(struct segment_info));
+ memcpy(&sort_list[j], &temp,
+ sizeof(struct segment_info));
+ }
+
+ /* check continuity */
+ for (i = 0; i < dev_info->num_of_segments - 1; i++) {
+ if ((sort_list[i].end + 1) != sort_list[i+1].start) {
+ pr_err("Adjacent DCSSs %s and %s are not "
+ "contiguous\n", sort_list[i].segment_name,
+ sort_list[i+1].segment_name);
+ rc = -EINVAL;
+ goto out;
+ }
+ /* EN and EW are allowed in a block device */
+ if (sort_list[i].segment_type != sort_list[i+1].segment_type) {
+ if (!(sort_list[i].segment_type & SEGMENT_EXCLUSIVE) ||
+ (sort_list[i].segment_type == SEG_TYPE_ER) ||
+ !(sort_list[i+1].segment_type &
+ SEGMENT_EXCLUSIVE) ||
+ (sort_list[i+1].segment_type == SEG_TYPE_ER)) {
+ pr_err("DCSS %s and DCSS %s have "
+ "incompatible types\n",
+ sort_list[i].segment_name,
+ sort_list[i+1].segment_name);
+ rc = -EINVAL;
+ goto out;
+ }
+ }
+ }
+ rc = 0;
+out:
+ kfree(sort_list);
+ return rc;
+}
+
+/*
+ * Load a segment
+ */
+static int
+dcssblk_load_segment(char *name, struct segment_info **seg_info)
+{
+ int rc;
+
+ /* already loaded? */
+ down_read(&dcssblk_devices_sem);
+ *seg_info = dcssblk_get_segment_by_name(name);
+ up_read(&dcssblk_devices_sem);
+ if (*seg_info != NULL)
+ return -EEXIST;
+
+ /* get a struct segment_info */
+ *seg_info = kzalloc(sizeof(struct segment_info), GFP_KERNEL);
+ if (*seg_info == NULL)
+ return -ENOMEM;
+
+ strcpy((*seg_info)->segment_name, name);
+
+ /* load the segment */
+ rc = segment_load(name, SEGMENT_SHARED,
+ &(*seg_info)->start, &(*seg_info)->end);
+ if (rc < 0) {
+ segment_warning(rc, (*seg_info)->segment_name);
+ kfree(*seg_info);
+ } else {
+ INIT_LIST_HEAD(&(*seg_info)->lh);
+ (*seg_info)->segment_type = rc;
+ }
+ return rc;
+}
+
+/*
+ * device attribute for switching shared/nonshared (exclusive)
+ * operation (show + store)
+ */
+static ssize_t
+dcssblk_shared_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct dcssblk_dev_info *dev_info;
+
+ dev_info = container_of(dev, struct dcssblk_dev_info, dev);
+ return sprintf(buf, dev_info->is_shared ? "1\n" : "0\n");
+}
+
+static ssize_t
+dcssblk_shared_store(struct device *dev, struct device_attribute *attr, const char *inbuf, size_t count)
+{
+ struct dcssblk_dev_info *dev_info;
+ struct segment_info *entry, *temp;
+ int rc;
+
+ if ((count > 1) && (inbuf[1] != '\n') && (inbuf[1] != '\0'))
+ return -EINVAL;
+ down_write(&dcssblk_devices_sem);
+ dev_info = container_of(dev, struct dcssblk_dev_info, dev);
+ if (atomic_read(&dev_info->use_count)) {
+ rc = -EBUSY;
+ goto out;
+ }
+ if (inbuf[0] == '1') {
+ /* reload segments in shared mode */
+ list_for_each_entry(entry, &dev_info->seg_list, lh) {
+ rc = segment_modify_shared(entry->segment_name,
+ SEGMENT_SHARED);
+ if (rc < 0) {
+ BUG_ON(rc == -EINVAL);
+ if (rc != -EAGAIN)
+ goto removeseg;
+ }
+ }
+ dev_info->is_shared = 1;
+ switch (dev_info->segment_type) {
+ case SEG_TYPE_SR:
+ case SEG_TYPE_ER:
+ case SEG_TYPE_SC:
+ set_disk_ro(dev_info->gd, 1);
+ }
+ } else if (inbuf[0] == '0') {
+ /* reload segments in exclusive mode */
+ if (dev_info->segment_type == SEG_TYPE_SC) {
+ pr_err("DCSS %s is of type SC and cannot be "
+ "loaded as exclusive-writable\n",
+ dev_info->segment_name);
+ rc = -EINVAL;
+ goto out;
+ }
+ list_for_each_entry(entry, &dev_info->seg_list, lh) {
+ rc = segment_modify_shared(entry->segment_name,
+ SEGMENT_EXCLUSIVE);
+ if (rc < 0) {
+ BUG_ON(rc == -EINVAL);
+ if (rc != -EAGAIN)
+ goto removeseg;
+ }
+ }
+ dev_info->is_shared = 0;
+ set_disk_ro(dev_info->gd, 0);
+ } else {
+ rc = -EINVAL;
+ goto out;
+ }
+ rc = count;
+ goto out;
+
+removeseg:
+ pr_err("DCSS device %s is removed after a failed access mode "
+ "change\n", dev_info->segment_name);
+ temp = entry;
+ list_for_each_entry(entry, &dev_info->seg_list, lh) {
+ if (entry != temp)
+ segment_unload(entry->segment_name);
+ }
+ list_del(&dev_info->lh);
+ up_write(&dcssblk_devices_sem);
+
+ dax_remove_host(dev_info->gd);
+ kill_dax(dev_info->dax_dev);
+ put_dax(dev_info->dax_dev);
+ del_gendisk(dev_info->gd);
+ put_disk(dev_info->gd);
+
+ if (device_remove_file_self(dev, attr)) {
+ device_unregister(dev);
+ put_device(dev);
+ }
+ return rc;
+out:
+ up_write(&dcssblk_devices_sem);
+ return rc;
+}
+static DEVICE_ATTR(shared, S_IWUSR | S_IRUSR, dcssblk_shared_show,
+ dcssblk_shared_store);
+
+/*
+ * device attribute for save operation on current copy
+ * of the segment. If the segment is busy, saving will
+ * become pending until it gets released, which can be
+ * undone by storing a non-true value to this entry.
+ * (show + store)
+ */
+static ssize_t
+dcssblk_save_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct dcssblk_dev_info *dev_info;
+
+ dev_info = container_of(dev, struct dcssblk_dev_info, dev);
+ return sprintf(buf, dev_info->save_pending ? "1\n" : "0\n");
+}
+
+static ssize_t
+dcssblk_save_store(struct device *dev, struct device_attribute *attr, const char *inbuf, size_t count)
+{
+ struct dcssblk_dev_info *dev_info;
+ struct segment_info *entry;
+
+ if ((count > 1) && (inbuf[1] != '\n') && (inbuf[1] != '\0'))
+ return -EINVAL;
+ dev_info = container_of(dev, struct dcssblk_dev_info, dev);
+
+ down_write(&dcssblk_devices_sem);
+ if (inbuf[0] == '1') {
+ if (atomic_read(&dev_info->use_count) == 0) {
+ // device is idle => we save immediately
+ pr_info("All DCSSs that map to device %s are "
+ "saved\n", dev_info->segment_name);
+ list_for_each_entry(entry, &dev_info->seg_list, lh) {
+ if (entry->segment_type == SEG_TYPE_EN ||
+ entry->segment_type == SEG_TYPE_SN)
+ pr_warn("DCSS %s is of type SN or EN"
+ " and cannot be saved\n",
+ entry->segment_name);
+ else
+ segment_save(entry->segment_name);
+ }
+ } else {
+ // device is busy => we save it when it becomes
+ // idle in dcssblk_release
+ pr_info("Device %s is in use, its DCSSs will be "
+ "saved when it becomes idle\n",
+ dev_info->segment_name);
+ dev_info->save_pending = 1;
+ }
+ } else if (inbuf[0] == '0') {
+ if (dev_info->save_pending) {
+ // device is busy & the user wants to undo his save
+ // request
+ dev_info->save_pending = 0;
+ pr_info("A pending save request for device %s "
+ "has been canceled\n",
+ dev_info->segment_name);
+ }
+ } else {
+ up_write(&dcssblk_devices_sem);
+ return -EINVAL;
+ }
+ up_write(&dcssblk_devices_sem);
+ return count;
+}
+static DEVICE_ATTR(save, S_IWUSR | S_IRUSR, dcssblk_save_show,
+ dcssblk_save_store);
+
+/*
+ * device attribute for showing all segments in a device
+ */
+static ssize_t
+dcssblk_seglist_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ int i;
+
+ struct dcssblk_dev_info *dev_info;
+ struct segment_info *entry;
+
+ down_read(&dcssblk_devices_sem);
+ dev_info = container_of(dev, struct dcssblk_dev_info, dev);
+ i = 0;
+ buf[0] = '\0';
+ list_for_each_entry(entry, &dev_info->seg_list, lh) {
+ strcpy(&buf[i], entry->segment_name);
+ i += strlen(entry->segment_name);
+ buf[i] = '\n';
+ i++;
+ }
+ up_read(&dcssblk_devices_sem);
+ return i;
+}
+static DEVICE_ATTR(seglist, S_IRUSR, dcssblk_seglist_show, NULL);
+
+static struct attribute *dcssblk_dev_attrs[] = {
+ &dev_attr_shared.attr,
+ &dev_attr_save.attr,
+ &dev_attr_seglist.attr,
+ NULL,
+};
+static struct attribute_group dcssblk_dev_attr_group = {
+ .attrs = dcssblk_dev_attrs,
+};
+static const struct attribute_group *dcssblk_dev_attr_groups[] = {
+ &dcssblk_dev_attr_group,
+ NULL,
+};
+
+/*
+ * device attribute for adding devices
+ */
+static ssize_t
+dcssblk_add_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
+{
+ int rc, i, j, num_of_segments;
+ struct dcssblk_dev_info *dev_info;
+ struct segment_info *seg_info, *temp;
+ char *local_buf;
+ unsigned long seg_byte_size;
+
+ dev_info = NULL;
+ seg_info = NULL;
+ if (dev != dcssblk_root_dev) {
+ rc = -EINVAL;
+ goto out_nobuf;
+ }
+ if ((count < 1) || (buf[0] == '\0') || (buf[0] == '\n')) {
+ rc = -ENAMETOOLONG;
+ goto out_nobuf;
+ }
+
+ local_buf = kmalloc(count + 1, GFP_KERNEL);
+ if (local_buf == NULL) {
+ rc = -ENOMEM;
+ goto out_nobuf;
+ }
+
+ /*
+ * parse input
+ */
+ num_of_segments = 0;
+ for (i = 0; (i < count && (buf[i] != '\0') && (buf[i] != '\n')); i++) {
+ for (j = i; j < count &&
+ (buf[j] != ':') &&
+ (buf[j] != '\0') &&
+ (buf[j] != '\n'); j++) {
+ local_buf[j-i] = toupper(buf[j]);
+ }
+ local_buf[j-i] = '\0';
+ if (((j - i) == 0) || ((j - i) > 8)) {
+ rc = -ENAMETOOLONG;
+ goto seg_list_del;
+ }
+
+ rc = dcssblk_load_segment(local_buf, &seg_info);
+ if (rc < 0)
+ goto seg_list_del;
+ /*
+ * get a struct dcssblk_dev_info
+ */
+ if (num_of_segments == 0) {
+ dev_info = kzalloc(sizeof(struct dcssblk_dev_info),
+ GFP_KERNEL);
+ if (dev_info == NULL) {
+ rc = -ENOMEM;
+ goto out;
+ }
+ strcpy(dev_info->segment_name, local_buf);
+ dev_info->segment_type = seg_info->segment_type;
+ INIT_LIST_HEAD(&dev_info->seg_list);
+ }
+ list_add_tail(&seg_info->lh, &dev_info->seg_list);
+ num_of_segments++;
+ i = j;
+
+ if ((buf[j] == '\0') || (buf[j] == '\n'))
+ break;
+ }
+
+ /* no trailing colon at the end of the input */
+ if ((i > 0) && (buf[i-1] == ':')) {
+ rc = -ENAMETOOLONG;
+ goto seg_list_del;
+ }
+ strscpy(local_buf, buf, i + 1);
+ dev_info->num_of_segments = num_of_segments;
+ rc = dcssblk_is_continuous(dev_info);
+ if (rc < 0)
+ goto seg_list_del;
+
+ dev_info->start = dcssblk_find_lowest_addr(dev_info);
+ dev_info->end = dcssblk_find_highest_addr(dev_info);
+
+ dev_set_name(&dev_info->dev, "%s", dev_info->segment_name);
+ dev_info->dev.release = dcssblk_release_segment;
+ dev_info->dev.groups = dcssblk_dev_attr_groups;
+ INIT_LIST_HEAD(&dev_info->lh);
+ dev_info->gd = blk_alloc_disk(NUMA_NO_NODE);
+ if (dev_info->gd == NULL) {
+ rc = -ENOMEM;
+ goto seg_list_del;
+ }
+ dev_info->gd->major = dcssblk_major;
+ dev_info->gd->minors = DCSSBLK_MINORS_PER_DISK;
+ dev_info->gd->fops = &dcssblk_devops;
+ dev_info->gd->private_data = dev_info;
+ dev_info->gd->flags |= GENHD_FL_NO_PART;
+ blk_queue_logical_block_size(dev_info->gd->queue, 4096);
+ blk_queue_flag_set(QUEUE_FLAG_DAX, dev_info->gd->queue);
+
+ seg_byte_size = (dev_info->end - dev_info->start + 1);
+ set_capacity(dev_info->gd, seg_byte_size >> 9); // size in sectors
+ pr_info("Loaded %s with total size %lu bytes and capacity %lu "
+ "sectors\n", local_buf, seg_byte_size, seg_byte_size >> 9);
+
+ dev_info->save_pending = 0;
+ dev_info->is_shared = 1;
+ dev_info->dev.parent = dcssblk_root_dev;
+
+ /*
+ *get minor, add to list
+ */
+ down_write(&dcssblk_devices_sem);
+ if (dcssblk_get_segment_by_name(local_buf)) {
+ rc = -EEXIST;
+ goto release_gd;
+ }
+ rc = dcssblk_assign_free_minor(dev_info);
+ if (rc)
+ goto release_gd;
+ sprintf(dev_info->gd->disk_name, "dcssblk%d",
+ dev_info->gd->first_minor);
+ list_add_tail(&dev_info->lh, &dcssblk_devices);
+
+ if (!try_module_get(THIS_MODULE)) {
+ rc = -ENODEV;
+ goto dev_list_del;
+ }
+ /*
+ * register the device
+ */
+ rc = device_register(&dev_info->dev);
+ if (rc)
+ goto put_dev;
+
+ dev_info->dax_dev = alloc_dax(dev_info, &dcssblk_dax_ops);
+ if (IS_ERR(dev_info->dax_dev)) {
+ rc = PTR_ERR(dev_info->dax_dev);
+ dev_info->dax_dev = NULL;
+ goto put_dev;
+ }
+ set_dax_synchronous(dev_info->dax_dev);
+ rc = dax_add_host(dev_info->dax_dev, dev_info->gd);
+ if (rc)
+ goto out_dax;
+
+ get_device(&dev_info->dev);
+ rc = device_add_disk(&dev_info->dev, dev_info->gd, NULL);
+ if (rc)
+ goto out_dax_host;
+
+ switch (dev_info->segment_type) {
+ case SEG_TYPE_SR:
+ case SEG_TYPE_ER:
+ case SEG_TYPE_SC:
+ set_disk_ro(dev_info->gd,1);
+ break;
+ default:
+ set_disk_ro(dev_info->gd,0);
+ break;
+ }
+ up_write(&dcssblk_devices_sem);
+ rc = count;
+ goto out;
+
+out_dax_host:
+ put_device(&dev_info->dev);
+ dax_remove_host(dev_info->gd);
+out_dax:
+ kill_dax(dev_info->dax_dev);
+ put_dax(dev_info->dax_dev);
+put_dev:
+ list_del(&dev_info->lh);
+ put_disk(dev_info->gd);
+ list_for_each_entry(seg_info, &dev_info->seg_list, lh) {
+ segment_unload(seg_info->segment_name);
+ }
+ put_device(&dev_info->dev);
+ up_write(&dcssblk_devices_sem);
+ goto out;
+dev_list_del:
+ list_del(&dev_info->lh);
+release_gd:
+ put_disk(dev_info->gd);
+ up_write(&dcssblk_devices_sem);
+seg_list_del:
+ if (dev_info == NULL)
+ goto out;
+ list_for_each_entry_safe(seg_info, temp, &dev_info->seg_list, lh) {
+ list_del(&seg_info->lh);
+ segment_unload(seg_info->segment_name);
+ kfree(seg_info);
+ }
+ kfree(dev_info);
+out:
+ kfree(local_buf);
+out_nobuf:
+ return rc;
+}
+
+/*
+ * device attribute for removing devices
+ */
+static ssize_t
+dcssblk_remove_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct dcssblk_dev_info *dev_info;
+ struct segment_info *entry;
+ int rc, i;
+ char *local_buf;
+
+ if (dev != dcssblk_root_dev) {
+ return -EINVAL;
+ }
+ local_buf = kmalloc(count + 1, GFP_KERNEL);
+ if (local_buf == NULL) {
+ return -ENOMEM;
+ }
+ /*
+ * parse input
+ */
+ for (i = 0; (i < count && (*(buf+i)!='\0') && (*(buf+i)!='\n')); i++) {
+ local_buf[i] = toupper(buf[i]);
+ }
+ local_buf[i] = '\0';
+ if ((i == 0) || (i > 8)) {
+ rc = -ENAMETOOLONG;
+ goto out_buf;
+ }
+
+ down_write(&dcssblk_devices_sem);
+ dev_info = dcssblk_get_device_by_name(local_buf);
+ if (dev_info == NULL) {
+ up_write(&dcssblk_devices_sem);
+ pr_warn("Device %s cannot be removed because it is not a known device\n",
+ local_buf);
+ rc = -ENODEV;
+ goto out_buf;
+ }
+ if (atomic_read(&dev_info->use_count) != 0) {
+ up_write(&dcssblk_devices_sem);
+ pr_warn("Device %s cannot be removed while it is in use\n",
+ local_buf);
+ rc = -EBUSY;
+ goto out_buf;
+ }
+
+ list_del(&dev_info->lh);
+ /* unload all related segments */
+ list_for_each_entry(entry, &dev_info->seg_list, lh)
+ segment_unload(entry->segment_name);
+ up_write(&dcssblk_devices_sem);
+
+ dax_remove_host(dev_info->gd);
+ kill_dax(dev_info->dax_dev);
+ put_dax(dev_info->dax_dev);
+ del_gendisk(dev_info->gd);
+ put_disk(dev_info->gd);
+
+ device_unregister(&dev_info->dev);
+ put_device(&dev_info->dev);
+
+ rc = count;
+out_buf:
+ kfree(local_buf);
+ return rc;
+}
+
+static int
+dcssblk_open(struct gendisk *disk, blk_mode_t mode)
+{
+ struct dcssblk_dev_info *dev_info = disk->private_data;
+ int rc;
+
+ if (NULL == dev_info) {
+ rc = -ENODEV;
+ goto out;
+ }
+ atomic_inc(&dev_info->use_count);
+ rc = 0;
+out:
+ return rc;
+}
+
+static void
+dcssblk_release(struct gendisk *disk)
+{
+ struct dcssblk_dev_info *dev_info = disk->private_data;
+ struct segment_info *entry;
+
+ if (!dev_info) {
+ WARN_ON(1);
+ return;
+ }
+ down_write(&dcssblk_devices_sem);
+ if (atomic_dec_and_test(&dev_info->use_count)
+ && (dev_info->save_pending)) {
+ pr_info("Device %s has become idle and is being saved "
+ "now\n", dev_info->segment_name);
+ list_for_each_entry(entry, &dev_info->seg_list, lh) {
+ if (entry->segment_type == SEG_TYPE_EN ||
+ entry->segment_type == SEG_TYPE_SN)
+ pr_warn("DCSS %s is of type SN or EN and cannot"
+ " be saved\n", entry->segment_name);
+ else
+ segment_save(entry->segment_name);
+ }
+ dev_info->save_pending = 0;
+ }
+ up_write(&dcssblk_devices_sem);
+}
+
+static void
+dcssblk_submit_bio(struct bio *bio)
+{
+ struct dcssblk_dev_info *dev_info;
+ struct bio_vec bvec;
+ struct bvec_iter iter;
+ unsigned long index;
+ void *page_addr;
+ unsigned long source_addr;
+ unsigned long bytes_done;
+
+ bytes_done = 0;
+ dev_info = bio->bi_bdev->bd_disk->private_data;
+ if (dev_info == NULL)
+ goto fail;
+ if (!IS_ALIGNED(bio->bi_iter.bi_sector, 8) ||
+ !IS_ALIGNED(bio->bi_iter.bi_size, PAGE_SIZE))
+ /* Request is not page-aligned. */
+ goto fail;
+ /* verify data transfer direction */
+ if (dev_info->is_shared) {
+ switch (dev_info->segment_type) {
+ case SEG_TYPE_SR:
+ case SEG_TYPE_ER:
+ case SEG_TYPE_SC:
+ /* cannot write to these segments */
+ if (bio_data_dir(bio) == WRITE) {
+ pr_warn("Writing to %s failed because it is a read-only device\n",
+ dev_name(&dev_info->dev));
+ goto fail;
+ }
+ }
+ }
+
+ index = (bio->bi_iter.bi_sector >> 3);
+ bio_for_each_segment(bvec, bio, iter) {
+ page_addr = bvec_virt(&bvec);
+ source_addr = dev_info->start + (index<<12) + bytes_done;
+ if (unlikely(!IS_ALIGNED((unsigned long)page_addr, PAGE_SIZE) ||
+ !IS_ALIGNED(bvec.bv_len, PAGE_SIZE)))
+ // More paranoia.
+ goto fail;
+ if (bio_data_dir(bio) == READ)
+ memcpy(page_addr, __va(source_addr), bvec.bv_len);
+ else
+ memcpy(__va(source_addr), page_addr, bvec.bv_len);
+ bytes_done += bvec.bv_len;
+ }
+ bio_endio(bio);
+ return;
+fail:
+ bio_io_error(bio);
+}
+
+static long
+__dcssblk_direct_access(struct dcssblk_dev_info *dev_info, pgoff_t pgoff,
+ long nr_pages, void **kaddr, pfn_t *pfn)
+{
+ resource_size_t offset = pgoff * PAGE_SIZE;
+ unsigned long dev_sz;
+
+ dev_sz = dev_info->end - dev_info->start + 1;
+ if (kaddr)
+ *kaddr = (void *) dev_info->start + offset;
+ if (pfn)
+ *pfn = __pfn_to_pfn_t(PFN_DOWN(dev_info->start + offset),
+ PFN_DEV|PFN_SPECIAL);
+
+ return (dev_sz - offset) / PAGE_SIZE;
+}
+
+static long
+dcssblk_dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff,
+ long nr_pages, enum dax_access_mode mode, void **kaddr,
+ pfn_t *pfn)
+{
+ struct dcssblk_dev_info *dev_info = dax_get_private(dax_dev);
+
+ return __dcssblk_direct_access(dev_info, pgoff, nr_pages, kaddr, pfn);
+}
+
+static void
+dcssblk_check_params(void)
+{
+ int rc, i, j, k;
+ char buf[DCSSBLK_PARM_LEN + 1];
+ struct dcssblk_dev_info *dev_info;
+
+ for (i = 0; (i < DCSSBLK_PARM_LEN) && (dcssblk_segments[i] != '\0');
+ i++) {
+ for (j = i; (j < DCSSBLK_PARM_LEN) &&
+ (dcssblk_segments[j] != ',') &&
+ (dcssblk_segments[j] != '\0') &&
+ (dcssblk_segments[j] != '('); j++)
+ {
+ buf[j-i] = dcssblk_segments[j];
+ }
+ buf[j-i] = '\0';
+ rc = dcssblk_add_store(dcssblk_root_dev, NULL, buf, j-i);
+ if ((rc >= 0) && (dcssblk_segments[j] == '(')) {
+ for (k = 0; (buf[k] != ':') && (buf[k] != '\0'); k++)
+ buf[k] = toupper(buf[k]);
+ buf[k] = '\0';
+ if (!strncmp(&dcssblk_segments[j], "(local)", 7)) {
+ down_read(&dcssblk_devices_sem);
+ dev_info = dcssblk_get_device_by_name(buf);
+ up_read(&dcssblk_devices_sem);
+ if (dev_info)
+ dcssblk_shared_store(&dev_info->dev,
+ NULL, "0\n", 2);
+ }
+ }
+ while ((dcssblk_segments[j] != ',') &&
+ (dcssblk_segments[j] != '\0'))
+ {
+ j++;
+ }
+ if (dcssblk_segments[j] == '\0')
+ break;
+ i = j;
+ }
+}
+
+/*
+ * The init/exit functions.
+ */
+static void __exit
+dcssblk_exit(void)
+{
+ root_device_unregister(dcssblk_root_dev);
+ unregister_blkdev(dcssblk_major, DCSSBLK_NAME);
+}
+
+static int __init
+dcssblk_init(void)
+{
+ int rc;
+
+ dcssblk_root_dev = root_device_register("dcssblk");
+ if (IS_ERR(dcssblk_root_dev))
+ return PTR_ERR(dcssblk_root_dev);
+ rc = device_create_file(dcssblk_root_dev, &dev_attr_add);
+ if (rc)
+ goto out_root;
+ rc = device_create_file(dcssblk_root_dev, &dev_attr_remove);
+ if (rc)
+ goto out_root;
+ rc = register_blkdev(0, DCSSBLK_NAME);
+ if (rc < 0)
+ goto out_root;
+ dcssblk_major = rc;
+ init_rwsem(&dcssblk_devices_sem);
+
+ dcssblk_check_params();
+ return 0;
+
+out_root:
+ root_device_unregister(dcssblk_root_dev);
+
+ return rc;
+}
+
+module_init(dcssblk_init);
+module_exit(dcssblk_exit);
+
+module_param_string(segments, dcssblk_segments, DCSSBLK_PARM_LEN, 0444);
+MODULE_PARM_DESC(segments, "Name of DCSS segment(s) to be loaded, "
+ "comma-separated list, names in each set separated "
+ "by commas are separated by colons, each set contains "
+ "names of contiguous segments and each name max. 8 chars.\n"
+ "Adding \"(local)\" to the end of each set equals echoing 0 "
+ "to /sys/devices/dcssblk/<device name>/shared after loading "
+ "the contiguous segments - \n"
+ "e.g. segments=\"mydcss1,mydcss2:mydcss3,mydcss4(local)\"");
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/s390/block/scm_blk.c b/drivers/s390/block/scm_blk.c
new file mode 100644
index 0000000000..ade95e91b3
--- /dev/null
+++ b/drivers/s390/block/scm_blk.c
@@ -0,0 +1,588 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Block driver for s390 storage class memory.
+ *
+ * Copyright IBM Corp. 2012
+ * Author(s): Sebastian Ott <sebott@linux.vnet.ibm.com>
+ */
+
+#define KMSG_COMPONENT "scm_block"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+#include <linux/mempool.h>
+#include <linux/module.h>
+#include <linux/blkdev.h>
+#include <linux/blk-mq.h>
+#include <linux/slab.h>
+#include <linux/list.h>
+#include <linux/io.h>
+#include <asm/eadm.h>
+#include "scm_blk.h"
+
+debug_info_t *scm_debug;
+static int scm_major;
+static mempool_t *aidaw_pool;
+static DEFINE_SPINLOCK(list_lock);
+static LIST_HEAD(inactive_requests);
+static unsigned int nr_requests = 64;
+static unsigned int nr_requests_per_io = 8;
+static atomic_t nr_devices = ATOMIC_INIT(0);
+module_param(nr_requests, uint, S_IRUGO);
+MODULE_PARM_DESC(nr_requests, "Number of parallel requests.");
+
+module_param(nr_requests_per_io, uint, S_IRUGO);
+MODULE_PARM_DESC(nr_requests_per_io, "Number of requests per IO.");
+
+MODULE_DESCRIPTION("Block driver for s390 storage class memory.");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("scm:scmdev*");
+
+static void __scm_free_rq(struct scm_request *scmrq)
+{
+ struct aob_rq_header *aobrq = to_aobrq(scmrq);
+
+ free_page((unsigned long) scmrq->aob);
+ kfree(scmrq->request);
+ kfree(aobrq);
+}
+
+static void scm_free_rqs(void)
+{
+ struct list_head *iter, *safe;
+ struct scm_request *scmrq;
+
+ spin_lock_irq(&list_lock);
+ list_for_each_safe(iter, safe, &inactive_requests) {
+ scmrq = list_entry(iter, struct scm_request, list);
+ list_del(&scmrq->list);
+ __scm_free_rq(scmrq);
+ }
+ spin_unlock_irq(&list_lock);
+
+ mempool_destroy(aidaw_pool);
+}
+
+static int __scm_alloc_rq(void)
+{
+ struct aob_rq_header *aobrq;
+ struct scm_request *scmrq;
+
+ aobrq = kzalloc(sizeof(*aobrq) + sizeof(*scmrq), GFP_KERNEL);
+ if (!aobrq)
+ return -ENOMEM;
+
+ scmrq = (void *) aobrq->data;
+ scmrq->aob = (void *) get_zeroed_page(GFP_DMA);
+ if (!scmrq->aob)
+ goto free;
+
+ scmrq->request = kcalloc(nr_requests_per_io, sizeof(scmrq->request[0]),
+ GFP_KERNEL);
+ if (!scmrq->request)
+ goto free;
+
+ INIT_LIST_HEAD(&scmrq->list);
+ spin_lock_irq(&list_lock);
+ list_add(&scmrq->list, &inactive_requests);
+ spin_unlock_irq(&list_lock);
+
+ return 0;
+free:
+ __scm_free_rq(scmrq);
+ return -ENOMEM;
+}
+
+static int scm_alloc_rqs(unsigned int nrqs)
+{
+ int ret = 0;
+
+ aidaw_pool = mempool_create_page_pool(max(nrqs/8, 1U), 0);
+ if (!aidaw_pool)
+ return -ENOMEM;
+
+ while (nrqs-- && !ret)
+ ret = __scm_alloc_rq();
+
+ return ret;
+}
+
+static struct scm_request *scm_request_fetch(void)
+{
+ struct scm_request *scmrq = NULL;
+
+ spin_lock_irq(&list_lock);
+ if (list_empty(&inactive_requests))
+ goto out;
+ scmrq = list_first_entry(&inactive_requests, struct scm_request, list);
+ list_del(&scmrq->list);
+out:
+ spin_unlock_irq(&list_lock);
+ return scmrq;
+}
+
+static void scm_request_done(struct scm_request *scmrq)
+{
+ unsigned long flags;
+ struct msb *msb;
+ u64 aidaw;
+ int i;
+
+ for (i = 0; i < nr_requests_per_io && scmrq->request[i]; i++) {
+ msb = &scmrq->aob->msb[i];
+ aidaw = (u64)phys_to_virt(msb->data_addr);
+
+ if ((msb->flags & MSB_FLAG_IDA) && aidaw &&
+ IS_ALIGNED(aidaw, PAGE_SIZE))
+ mempool_free(virt_to_page((void *)aidaw), aidaw_pool);
+ }
+
+ spin_lock_irqsave(&list_lock, flags);
+ list_add(&scmrq->list, &inactive_requests);
+ spin_unlock_irqrestore(&list_lock, flags);
+}
+
+static bool scm_permit_request(struct scm_blk_dev *bdev, struct request *req)
+{
+ return rq_data_dir(req) != WRITE || bdev->state != SCM_WR_PROHIBIT;
+}
+
+static inline struct aidaw *scm_aidaw_alloc(void)
+{
+ struct page *page = mempool_alloc(aidaw_pool, GFP_ATOMIC);
+
+ return page ? page_address(page) : NULL;
+}
+
+static inline unsigned long scm_aidaw_bytes(struct aidaw *aidaw)
+{
+ unsigned long _aidaw = (unsigned long) aidaw;
+ unsigned long bytes = ALIGN(_aidaw, PAGE_SIZE) - _aidaw;
+
+ return (bytes / sizeof(*aidaw)) * PAGE_SIZE;
+}
+
+struct aidaw *scm_aidaw_fetch(struct scm_request *scmrq, unsigned int bytes)
+{
+ struct aidaw *aidaw;
+
+ if (scm_aidaw_bytes(scmrq->next_aidaw) >= bytes)
+ return scmrq->next_aidaw;
+
+ aidaw = scm_aidaw_alloc();
+ if (aidaw)
+ memset(aidaw, 0, PAGE_SIZE);
+ return aidaw;
+}
+
+static int scm_request_prepare(struct scm_request *scmrq)
+{
+ struct scm_blk_dev *bdev = scmrq->bdev;
+ struct scm_device *scmdev = bdev->gendisk->private_data;
+ int pos = scmrq->aob->request.msb_count;
+ struct msb *msb = &scmrq->aob->msb[pos];
+ struct request *req = scmrq->request[pos];
+ struct req_iterator iter;
+ struct aidaw *aidaw;
+ struct bio_vec bv;
+
+ aidaw = scm_aidaw_fetch(scmrq, blk_rq_bytes(req));
+ if (!aidaw)
+ return -ENOMEM;
+
+ msb->bs = MSB_BS_4K;
+ scmrq->aob->request.msb_count++;
+ msb->scm_addr = scmdev->address + ((u64) blk_rq_pos(req) << 9);
+ msb->oc = (rq_data_dir(req) == READ) ? MSB_OC_READ : MSB_OC_WRITE;
+ msb->flags |= MSB_FLAG_IDA;
+ msb->data_addr = (u64)virt_to_phys(aidaw);
+
+ rq_for_each_segment(bv, req, iter) {
+ WARN_ON(bv.bv_offset);
+ msb->blk_count += bv.bv_len >> 12;
+ aidaw->data_addr = virt_to_phys(page_address(bv.bv_page));
+ aidaw++;
+ }
+
+ scmrq->next_aidaw = aidaw;
+ return 0;
+}
+
+static inline void scm_request_set(struct scm_request *scmrq,
+ struct request *req)
+{
+ scmrq->request[scmrq->aob->request.msb_count] = req;
+}
+
+static inline void scm_request_init(struct scm_blk_dev *bdev,
+ struct scm_request *scmrq)
+{
+ struct aob_rq_header *aobrq = to_aobrq(scmrq);
+ struct aob *aob = scmrq->aob;
+
+ memset(scmrq->request, 0,
+ nr_requests_per_io * sizeof(scmrq->request[0]));
+ memset(aob, 0, sizeof(*aob));
+ aobrq->scmdev = bdev->scmdev;
+ aob->request.cmd_code = ARQB_CMD_MOVE;
+ aob->request.data = (u64) aobrq;
+ scmrq->bdev = bdev;
+ scmrq->retries = 4;
+ scmrq->error = BLK_STS_OK;
+ /* We don't use all msbs - place aidaws at the end of the aob page. */
+ scmrq->next_aidaw = (void *) &aob->msb[nr_requests_per_io];
+}
+
+static void scm_request_requeue(struct scm_request *scmrq)
+{
+ struct scm_blk_dev *bdev = scmrq->bdev;
+ int i;
+
+ for (i = 0; i < nr_requests_per_io && scmrq->request[i]; i++)
+ blk_mq_requeue_request(scmrq->request[i], false);
+
+ atomic_dec(&bdev->queued_reqs);
+ scm_request_done(scmrq);
+ blk_mq_kick_requeue_list(bdev->rq);
+}
+
+static void scm_request_finish(struct scm_request *scmrq)
+{
+ struct scm_blk_dev *bdev = scmrq->bdev;
+ blk_status_t *error;
+ int i;
+
+ for (i = 0; i < nr_requests_per_io && scmrq->request[i]; i++) {
+ error = blk_mq_rq_to_pdu(scmrq->request[i]);
+ *error = scmrq->error;
+ if (likely(!blk_should_fake_timeout(scmrq->request[i]->q)))
+ blk_mq_complete_request(scmrq->request[i]);
+ }
+
+ atomic_dec(&bdev->queued_reqs);
+ scm_request_done(scmrq);
+}
+
+static void scm_request_start(struct scm_request *scmrq)
+{
+ struct scm_blk_dev *bdev = scmrq->bdev;
+
+ atomic_inc(&bdev->queued_reqs);
+ if (eadm_start_aob(scmrq->aob)) {
+ SCM_LOG(5, "no subchannel");
+ scm_request_requeue(scmrq);
+ }
+}
+
+struct scm_queue {
+ struct scm_request *scmrq;
+ spinlock_t lock;
+};
+
+static blk_status_t scm_blk_request(struct blk_mq_hw_ctx *hctx,
+ const struct blk_mq_queue_data *qd)
+{
+ struct scm_device *scmdev = hctx->queue->queuedata;
+ struct scm_blk_dev *bdev = dev_get_drvdata(&scmdev->dev);
+ struct scm_queue *sq = hctx->driver_data;
+ struct request *req = qd->rq;
+ struct scm_request *scmrq;
+
+ spin_lock(&sq->lock);
+ if (!scm_permit_request(bdev, req)) {
+ spin_unlock(&sq->lock);
+ return BLK_STS_RESOURCE;
+ }
+
+ scmrq = sq->scmrq;
+ if (!scmrq) {
+ scmrq = scm_request_fetch();
+ if (!scmrq) {
+ SCM_LOG(5, "no request");
+ spin_unlock(&sq->lock);
+ return BLK_STS_RESOURCE;
+ }
+ scm_request_init(bdev, scmrq);
+ sq->scmrq = scmrq;
+ }
+ scm_request_set(scmrq, req);
+
+ if (scm_request_prepare(scmrq)) {
+ SCM_LOG(5, "aidaw alloc failed");
+ scm_request_set(scmrq, NULL);
+
+ if (scmrq->aob->request.msb_count)
+ scm_request_start(scmrq);
+
+ sq->scmrq = NULL;
+ spin_unlock(&sq->lock);
+ return BLK_STS_RESOURCE;
+ }
+ blk_mq_start_request(req);
+
+ if (qd->last || scmrq->aob->request.msb_count == nr_requests_per_io) {
+ scm_request_start(scmrq);
+ sq->scmrq = NULL;
+ }
+ spin_unlock(&sq->lock);
+ return BLK_STS_OK;
+}
+
+static int scm_blk_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
+ unsigned int idx)
+{
+ struct scm_queue *qd = kzalloc(sizeof(*qd), GFP_KERNEL);
+
+ if (!qd)
+ return -ENOMEM;
+
+ spin_lock_init(&qd->lock);
+ hctx->driver_data = qd;
+
+ return 0;
+}
+
+static void scm_blk_exit_hctx(struct blk_mq_hw_ctx *hctx, unsigned int idx)
+{
+ struct scm_queue *qd = hctx->driver_data;
+
+ WARN_ON(qd->scmrq);
+ kfree(hctx->driver_data);
+ hctx->driver_data = NULL;
+}
+
+static void __scmrq_log_error(struct scm_request *scmrq)
+{
+ struct aob *aob = scmrq->aob;
+
+ if (scmrq->error == BLK_STS_TIMEOUT)
+ SCM_LOG(1, "Request timeout");
+ else {
+ SCM_LOG(1, "Request error");
+ SCM_LOG_HEX(1, &aob->response, sizeof(aob->response));
+ }
+ if (scmrq->retries)
+ SCM_LOG(1, "Retry request");
+ else
+ pr_err("An I/O operation to SCM failed with rc=%d\n",
+ scmrq->error);
+}
+
+static void scm_blk_handle_error(struct scm_request *scmrq)
+{
+ struct scm_blk_dev *bdev = scmrq->bdev;
+ unsigned long flags;
+
+ if (scmrq->error != BLK_STS_IOERR)
+ goto restart;
+
+ /* For -EIO the response block is valid. */
+ switch (scmrq->aob->response.eqc) {
+ case EQC_WR_PROHIBIT:
+ spin_lock_irqsave(&bdev->lock, flags);
+ if (bdev->state != SCM_WR_PROHIBIT)
+ pr_info("%lx: Write access to the SCM increment is suspended\n",
+ (unsigned long) bdev->scmdev->address);
+ bdev->state = SCM_WR_PROHIBIT;
+ spin_unlock_irqrestore(&bdev->lock, flags);
+ goto requeue;
+ default:
+ break;
+ }
+
+restart:
+ if (!eadm_start_aob(scmrq->aob))
+ return;
+
+requeue:
+ scm_request_requeue(scmrq);
+}
+
+void scm_blk_irq(struct scm_device *scmdev, void *data, blk_status_t error)
+{
+ struct scm_request *scmrq = data;
+
+ scmrq->error = error;
+ if (error) {
+ __scmrq_log_error(scmrq);
+ if (scmrq->retries-- > 0) {
+ scm_blk_handle_error(scmrq);
+ return;
+ }
+ }
+
+ scm_request_finish(scmrq);
+}
+
+static void scm_blk_request_done(struct request *req)
+{
+ blk_status_t *error = blk_mq_rq_to_pdu(req);
+
+ blk_mq_end_request(req, *error);
+}
+
+static const struct block_device_operations scm_blk_devops = {
+ .owner = THIS_MODULE,
+};
+
+static const struct blk_mq_ops scm_mq_ops = {
+ .queue_rq = scm_blk_request,
+ .complete = scm_blk_request_done,
+ .init_hctx = scm_blk_init_hctx,
+ .exit_hctx = scm_blk_exit_hctx,
+};
+
+int scm_blk_dev_setup(struct scm_blk_dev *bdev, struct scm_device *scmdev)
+{
+ unsigned int devindex, nr_max_blk;
+ struct request_queue *rq;
+ int len, ret;
+
+ devindex = atomic_inc_return(&nr_devices) - 1;
+ /* scma..scmz + scmaa..scmzz */
+ if (devindex > 701) {
+ ret = -ENODEV;
+ goto out;
+ }
+
+ bdev->scmdev = scmdev;
+ bdev->state = SCM_OPER;
+ spin_lock_init(&bdev->lock);
+ atomic_set(&bdev->queued_reqs, 0);
+
+ bdev->tag_set.ops = &scm_mq_ops;
+ bdev->tag_set.cmd_size = sizeof(blk_status_t);
+ bdev->tag_set.nr_hw_queues = nr_requests;
+ bdev->tag_set.queue_depth = nr_requests_per_io * nr_requests;
+ bdev->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
+ bdev->tag_set.numa_node = NUMA_NO_NODE;
+
+ ret = blk_mq_alloc_tag_set(&bdev->tag_set);
+ if (ret)
+ goto out;
+
+ bdev->gendisk = blk_mq_alloc_disk(&bdev->tag_set, scmdev);
+ if (IS_ERR(bdev->gendisk)) {
+ ret = PTR_ERR(bdev->gendisk);
+ goto out_tag;
+ }
+ rq = bdev->rq = bdev->gendisk->queue;
+ nr_max_blk = min(scmdev->nr_max_block,
+ (unsigned int) (PAGE_SIZE / sizeof(struct aidaw)));
+
+ blk_queue_logical_block_size(rq, 1 << 12);
+ blk_queue_max_hw_sectors(rq, nr_max_blk << 3); /* 8 * 512 = blk_size */
+ blk_queue_max_segments(rq, nr_max_blk);
+ blk_queue_flag_set(QUEUE_FLAG_NONROT, rq);
+ blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, rq);
+
+ bdev->gendisk->private_data = scmdev;
+ bdev->gendisk->fops = &scm_blk_devops;
+ bdev->gendisk->major = scm_major;
+ bdev->gendisk->first_minor = devindex * SCM_NR_PARTS;
+ bdev->gendisk->minors = SCM_NR_PARTS;
+
+ len = snprintf(bdev->gendisk->disk_name, DISK_NAME_LEN, "scm");
+ if (devindex > 25) {
+ len += snprintf(bdev->gendisk->disk_name + len,
+ DISK_NAME_LEN - len, "%c",
+ 'a' + (devindex / 26) - 1);
+ devindex = devindex % 26;
+ }
+ snprintf(bdev->gendisk->disk_name + len, DISK_NAME_LEN - len, "%c",
+ 'a' + devindex);
+
+ /* 512 byte sectors */
+ set_capacity(bdev->gendisk, scmdev->size >> 9);
+ ret = device_add_disk(&scmdev->dev, bdev->gendisk, NULL);
+ if (ret)
+ goto out_cleanup_disk;
+
+ return 0;
+
+out_cleanup_disk:
+ put_disk(bdev->gendisk);
+out_tag:
+ blk_mq_free_tag_set(&bdev->tag_set);
+out:
+ atomic_dec(&nr_devices);
+ return ret;
+}
+
+void scm_blk_dev_cleanup(struct scm_blk_dev *bdev)
+{
+ del_gendisk(bdev->gendisk);
+ put_disk(bdev->gendisk);
+ blk_mq_free_tag_set(&bdev->tag_set);
+}
+
+void scm_blk_set_available(struct scm_blk_dev *bdev)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&bdev->lock, flags);
+ if (bdev->state == SCM_WR_PROHIBIT)
+ pr_info("%lx: Write access to the SCM increment is restored\n",
+ (unsigned long) bdev->scmdev->address);
+ bdev->state = SCM_OPER;
+ spin_unlock_irqrestore(&bdev->lock, flags);
+}
+
+static bool __init scm_blk_params_valid(void)
+{
+ if (!nr_requests_per_io || nr_requests_per_io > 64)
+ return false;
+
+ return true;
+}
+
+static int __init scm_blk_init(void)
+{
+ int ret = -EINVAL;
+
+ if (!scm_blk_params_valid())
+ goto out;
+
+ ret = register_blkdev(0, "scm");
+ if (ret < 0)
+ goto out;
+
+ scm_major = ret;
+ ret = scm_alloc_rqs(nr_requests);
+ if (ret)
+ goto out_free;
+
+ scm_debug = debug_register("scm_log", 16, 1, 16);
+ if (!scm_debug) {
+ ret = -ENOMEM;
+ goto out_free;
+ }
+
+ debug_register_view(scm_debug, &debug_hex_ascii_view);
+ debug_set_level(scm_debug, 2);
+
+ ret = scm_drv_init();
+ if (ret)
+ goto out_dbf;
+
+ return ret;
+
+out_dbf:
+ debug_unregister(scm_debug);
+out_free:
+ scm_free_rqs();
+ unregister_blkdev(scm_major, "scm");
+out:
+ return ret;
+}
+module_init(scm_blk_init);
+
+static void __exit scm_blk_cleanup(void)
+{
+ scm_drv_cleanup();
+ debug_unregister(scm_debug);
+ scm_free_rqs();
+ unregister_blkdev(scm_major, "scm");
+}
+module_exit(scm_blk_cleanup);
diff --git a/drivers/s390/block/scm_blk.h b/drivers/s390/block/scm_blk.h
new file mode 100644
index 0000000000..af82b32147
--- /dev/null
+++ b/drivers/s390/block/scm_blk.h
@@ -0,0 +1,76 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef SCM_BLK_H
+#define SCM_BLK_H
+
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+#include <linux/blkdev.h>
+#include <linux/blk-mq.h>
+#include <linux/list.h>
+
+#include <asm/debug.h>
+#include <asm/eadm.h>
+
+#define SCM_NR_PARTS 8
+#define SCM_QUEUE_DELAY 5
+
+struct scm_blk_dev {
+ struct request_queue *rq;
+ struct gendisk *gendisk;
+ struct blk_mq_tag_set tag_set;
+ struct scm_device *scmdev;
+ spinlock_t lock;
+ atomic_t queued_reqs;
+ enum {SCM_OPER, SCM_WR_PROHIBIT} state;
+ struct list_head finished_requests;
+};
+
+struct scm_request {
+ struct scm_blk_dev *bdev;
+ struct aidaw *next_aidaw;
+ struct request **request;
+ struct aob *aob;
+ struct list_head list;
+ u8 retries;
+ blk_status_t error;
+};
+
+#define to_aobrq(rq) container_of((void *) rq, struct aob_rq_header, data)
+
+int scm_blk_dev_setup(struct scm_blk_dev *, struct scm_device *);
+void scm_blk_dev_cleanup(struct scm_blk_dev *);
+void scm_blk_set_available(struct scm_blk_dev *);
+void scm_blk_irq(struct scm_device *, void *, blk_status_t);
+
+struct aidaw *scm_aidaw_fetch(struct scm_request *scmrq, unsigned int bytes);
+
+int scm_drv_init(void);
+void scm_drv_cleanup(void);
+
+extern debug_info_t *scm_debug;
+
+#define SCM_LOG(imp, txt) do { \
+ debug_text_event(scm_debug, imp, txt); \
+ } while (0)
+
+static inline void SCM_LOG_HEX(int level, void *data, int length)
+{
+ debug_event(scm_debug, level, data, length);
+}
+
+static inline void SCM_LOG_STATE(int level, struct scm_device *scmdev)
+{
+ struct {
+ u64 address;
+ u8 oper_state;
+ u8 rank;
+ } __packed data = {
+ .address = scmdev->address,
+ .oper_state = scmdev->attrs.oper_state,
+ .rank = scmdev->attrs.rank,
+ };
+
+ SCM_LOG_HEX(level, &data, sizeof(data));
+}
+
+#endif /* SCM_BLK_H */
diff --git a/drivers/s390/block/scm_drv.c b/drivers/s390/block/scm_drv.c
new file mode 100644
index 0000000000..69a845eb8b
--- /dev/null
+++ b/drivers/s390/block/scm_drv.c
@@ -0,0 +1,91 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Device driver for s390 storage class memory.
+ *
+ * Copyright IBM Corp. 2012
+ * Author(s): Sebastian Ott <sebott@linux.vnet.ibm.com>
+ */
+
+#define KMSG_COMPONENT "scm_block"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <asm/eadm.h>
+#include "scm_blk.h"
+
+static void scm_notify(struct scm_device *scmdev, enum scm_event event)
+{
+ struct scm_blk_dev *bdev = dev_get_drvdata(&scmdev->dev);
+
+ switch (event) {
+ case SCM_CHANGE:
+ pr_info("%lx: The capabilities of the SCM increment changed\n",
+ (unsigned long) scmdev->address);
+ SCM_LOG(2, "State changed");
+ SCM_LOG_STATE(2, scmdev);
+ break;
+ case SCM_AVAIL:
+ SCM_LOG(2, "Increment available");
+ SCM_LOG_STATE(2, scmdev);
+ scm_blk_set_available(bdev);
+ break;
+ }
+}
+
+static int scm_probe(struct scm_device *scmdev)
+{
+ struct scm_blk_dev *bdev;
+ int ret;
+
+ SCM_LOG(2, "probe");
+ SCM_LOG_STATE(2, scmdev);
+
+ if (scmdev->attrs.oper_state != OP_STATE_GOOD)
+ return -EINVAL;
+
+ bdev = kzalloc(sizeof(*bdev), GFP_KERNEL);
+ if (!bdev)
+ return -ENOMEM;
+
+ dev_set_drvdata(&scmdev->dev, bdev);
+ ret = scm_blk_dev_setup(bdev, scmdev);
+ if (ret) {
+ dev_set_drvdata(&scmdev->dev, NULL);
+ kfree(bdev);
+ goto out;
+ }
+
+out:
+ return ret;
+}
+
+static void scm_remove(struct scm_device *scmdev)
+{
+ struct scm_blk_dev *bdev = dev_get_drvdata(&scmdev->dev);
+
+ scm_blk_dev_cleanup(bdev);
+ dev_set_drvdata(&scmdev->dev, NULL);
+ kfree(bdev);
+}
+
+static struct scm_driver scm_drv = {
+ .drv = {
+ .name = "scm_block",
+ .owner = THIS_MODULE,
+ },
+ .notify = scm_notify,
+ .probe = scm_probe,
+ .remove = scm_remove,
+ .handler = scm_blk_irq,
+};
+
+int __init scm_drv_init(void)
+{
+ return scm_driver_register(&scm_drv);
+}
+
+void scm_drv_cleanup(void)
+{
+ scm_driver_unregister(&scm_drv);
+}