From 2c3c1048746a4622d8c89a29670120dc8fab93c4 Mon Sep 17 00:00:00 2001 From: Daniel Baumann Date: Sun, 7 Apr 2024 20:49:45 +0200 Subject: Adding upstream version 6.1.76. Signed-off-by: Daniel Baumann --- drivers/scsi/device_handler/Kconfig | 41 + drivers/scsi/device_handler/Makefile | 8 + drivers/scsi/device_handler/scsi_dh_alua.c | 1287 +++++++++++++++++++++++++++ drivers/scsi/device_handler/scsi_dh_emc.c | 545 ++++++++++++ drivers/scsi/device_handler/scsi_dh_hp_sw.c | 261 ++++++ drivers/scsi/device_handler/scsi_dh_rdac.c | 835 +++++++++++++++++ 6 files changed, 2977 insertions(+) create mode 100644 drivers/scsi/device_handler/Kconfig create mode 100644 drivers/scsi/device_handler/Makefile create mode 100644 drivers/scsi/device_handler/scsi_dh_alua.c create mode 100644 drivers/scsi/device_handler/scsi_dh_emc.c create mode 100644 drivers/scsi/device_handler/scsi_dh_hp_sw.c create mode 100644 drivers/scsi/device_handler/scsi_dh_rdac.c (limited to 'drivers/scsi/device_handler') diff --git a/drivers/scsi/device_handler/Kconfig b/drivers/scsi/device_handler/Kconfig new file mode 100644 index 000000000..368eb94c2 --- /dev/null +++ b/drivers/scsi/device_handler/Kconfig @@ -0,0 +1,41 @@ +# SPDX-License-Identifier: GPL-2.0-only +# +# SCSI Device Handler configuration +# + +menuconfig SCSI_DH + bool "SCSI Device Handlers" + depends on SCSI + default n + help + SCSI Device Handlers provide device specific support for + devices utilized in multipath configurations. Say Y here to + select support for specific hardware. + +config SCSI_DH_RDAC + tristate "LSI RDAC Device Handler" + depends on SCSI_DH && SCSI + help + If you have a LSI RDAC select y. Otherwise, say N. + +config SCSI_DH_HP_SW + tristate "HP/COMPAQ MSA Device Handler" + depends on SCSI_DH && SCSI + help + If you have a HP/COMPAQ MSA device that requires START_STOP to + be sent to start it and cannot upgrade the firmware then select y. + Otherwise, say N. + +config SCSI_DH_EMC + tristate "EMC CLARiiON Device Handler" + depends on SCSI_DH && SCSI + help + If you have a EMC CLARiiON select y. Otherwise, say N. + +config SCSI_DH_ALUA + tristate "SPC-3 ALUA Device Handler" + depends on SCSI_DH && SCSI + help + SCSI Device handler for generic SPC-3 Asymmetric Logical Unit + Access (ALUA). + diff --git a/drivers/scsi/device_handler/Makefile b/drivers/scsi/device_handler/Makefile new file mode 100644 index 000000000..0a603aefd --- /dev/null +++ b/drivers/scsi/device_handler/Makefile @@ -0,0 +1,8 @@ +# SPDX-License-Identifier: GPL-2.0-only +# +# SCSI Device Handler +# +obj-$(CONFIG_SCSI_DH_RDAC) += scsi_dh_rdac.o +obj-$(CONFIG_SCSI_DH_HP_SW) += scsi_dh_hp_sw.o +obj-$(CONFIG_SCSI_DH_EMC) += scsi_dh_emc.o +obj-$(CONFIG_SCSI_DH_ALUA) += scsi_dh_alua.o diff --git a/drivers/scsi/device_handler/scsi_dh_alua.c b/drivers/scsi/device_handler/scsi_dh_alua.c new file mode 100644 index 000000000..0781f991e --- /dev/null +++ b/drivers/scsi/device_handler/scsi_dh_alua.c @@ -0,0 +1,1287 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Generic SCSI-3 ALUA SCSI Device Handler + * + * Copyright (C) 2007-2010 Hannes Reinecke, SUSE Linux Products GmbH. + * All rights reserved. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define ALUA_DH_NAME "alua" +#define ALUA_DH_VER "2.0" + +#define TPGS_SUPPORT_NONE 0x00 +#define TPGS_SUPPORT_OPTIMIZED 0x01 +#define TPGS_SUPPORT_NONOPTIMIZED 0x02 +#define TPGS_SUPPORT_STANDBY 0x04 +#define TPGS_SUPPORT_UNAVAILABLE 0x08 +#define TPGS_SUPPORT_LBA_DEPENDENT 0x10 +#define TPGS_SUPPORT_OFFLINE 0x40 +#define TPGS_SUPPORT_TRANSITION 0x80 +#define TPGS_SUPPORT_ALL 0xdf + +#define RTPG_FMT_MASK 0x70 +#define RTPG_FMT_EXT_HDR 0x10 + +#define TPGS_MODE_UNINITIALIZED -1 +#define TPGS_MODE_NONE 0x0 +#define TPGS_MODE_IMPLICIT 0x1 +#define TPGS_MODE_EXPLICIT 0x2 + +#define ALUA_RTPG_SIZE 128 +#define ALUA_FAILOVER_TIMEOUT 60 +#define ALUA_FAILOVER_RETRIES 5 +#define ALUA_RTPG_DELAY_MSECS 5 +#define ALUA_RTPG_RETRY_DELAY 2 + +/* device handler flags */ +#define ALUA_OPTIMIZE_STPG 0x01 +#define ALUA_RTPG_EXT_HDR_UNSUPP 0x02 +/* State machine flags */ +#define ALUA_PG_RUN_RTPG 0x10 +#define ALUA_PG_RUN_STPG 0x20 +#define ALUA_PG_RUNNING 0x40 + +static uint optimize_stpg; +module_param(optimize_stpg, uint, S_IRUGO|S_IWUSR); +MODULE_PARM_DESC(optimize_stpg, "Allow use of a non-optimized path, rather than sending a STPG, when implicit TPGS is supported (0=No,1=Yes). Default is 0."); + +static LIST_HEAD(port_group_list); +static DEFINE_SPINLOCK(port_group_lock); +static struct workqueue_struct *kaluad_wq; + +struct alua_port_group { + struct kref kref; + struct rcu_head rcu; + struct list_head node; + struct list_head dh_list; + unsigned char device_id_str[256]; + int device_id_len; + int group_id; + int tpgs; + int state; + int pref; + int valid_states; + unsigned flags; /* used for optimizing STPG */ + unsigned char transition_tmo; + unsigned long expiry; + unsigned long interval; + struct delayed_work rtpg_work; + spinlock_t lock; + struct list_head rtpg_list; + struct scsi_device *rtpg_sdev; +}; + +struct alua_dh_data { + struct list_head node; + struct alua_port_group __rcu *pg; + int group_id; + spinlock_t pg_lock; + struct scsi_device *sdev; + int init_error; + struct mutex init_mutex; + bool disabled; +}; + +struct alua_queue_data { + struct list_head entry; + activate_complete callback_fn; + void *callback_data; +}; + +#define ALUA_POLICY_SWITCH_CURRENT 0 +#define ALUA_POLICY_SWITCH_ALL 1 + +static void alua_rtpg_work(struct work_struct *work); +static bool alua_rtpg_queue(struct alua_port_group *pg, + struct scsi_device *sdev, + struct alua_queue_data *qdata, bool force); +static void alua_check(struct scsi_device *sdev, bool force); + +static void release_port_group(struct kref *kref) +{ + struct alua_port_group *pg; + + pg = container_of(kref, struct alua_port_group, kref); + if (pg->rtpg_sdev) + flush_delayed_work(&pg->rtpg_work); + spin_lock(&port_group_lock); + list_del(&pg->node); + spin_unlock(&port_group_lock); + kfree_rcu(pg, rcu); +} + +/* + * submit_rtpg - Issue a REPORT TARGET GROUP STATES command + * @sdev: sdev the command should be sent to + */ +static int submit_rtpg(struct scsi_device *sdev, unsigned char *buff, + int bufflen, struct scsi_sense_hdr *sshdr, int flags) +{ + u8 cdb[MAX_COMMAND_SIZE]; + blk_opf_t req_flags = REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | + REQ_FAILFAST_DRIVER; + + /* Prepare the command. */ + memset(cdb, 0x0, MAX_COMMAND_SIZE); + cdb[0] = MAINTENANCE_IN; + if (!(flags & ALUA_RTPG_EXT_HDR_UNSUPP)) + cdb[1] = MI_REPORT_TARGET_PGS | MI_EXT_HDR_PARAM_FMT; + else + cdb[1] = MI_REPORT_TARGET_PGS; + put_unaligned_be32(bufflen, &cdb[6]); + + return scsi_execute(sdev, cdb, DMA_FROM_DEVICE, buff, bufflen, NULL, + sshdr, ALUA_FAILOVER_TIMEOUT * HZ, + ALUA_FAILOVER_RETRIES, req_flags, 0, NULL); +} + +/* + * submit_stpg - Issue a SET TARGET PORT GROUP command + * + * Currently we're only setting the current target port group state + * to 'active/optimized' and let the array firmware figure out + * the states of the remaining groups. + */ +static int submit_stpg(struct scsi_device *sdev, int group_id, + struct scsi_sense_hdr *sshdr) +{ + u8 cdb[MAX_COMMAND_SIZE]; + unsigned char stpg_data[8]; + int stpg_len = 8; + blk_opf_t req_flags = REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | + REQ_FAILFAST_DRIVER; + + /* Prepare the data buffer */ + memset(stpg_data, 0, stpg_len); + stpg_data[4] = SCSI_ACCESS_STATE_OPTIMAL; + put_unaligned_be16(group_id, &stpg_data[6]); + + /* Prepare the command. */ + memset(cdb, 0x0, MAX_COMMAND_SIZE); + cdb[0] = MAINTENANCE_OUT; + cdb[1] = MO_SET_TARGET_PGS; + put_unaligned_be32(stpg_len, &cdb[6]); + + return scsi_execute(sdev, cdb, DMA_TO_DEVICE, stpg_data, stpg_len, NULL, + sshdr, ALUA_FAILOVER_TIMEOUT * HZ, + ALUA_FAILOVER_RETRIES, req_flags, 0, NULL); +} + +static struct alua_port_group *alua_find_get_pg(char *id_str, size_t id_size, + int group_id) +{ + struct alua_port_group *pg; + + if (!id_str || !id_size || !strlen(id_str)) + return NULL; + + list_for_each_entry(pg, &port_group_list, node) { + if (pg->group_id != group_id) + continue; + if (!pg->device_id_len || pg->device_id_len != id_size) + continue; + if (strncmp(pg->device_id_str, id_str, id_size)) + continue; + if (!kref_get_unless_zero(&pg->kref)) + continue; + return pg; + } + + return NULL; +} + +/* + * alua_alloc_pg - Allocate a new port_group structure + * @sdev: scsi device + * @group_id: port group id + * @tpgs: target port group settings + * + * Allocate a new port_group structure for a given + * device. + */ +static struct alua_port_group *alua_alloc_pg(struct scsi_device *sdev, + int group_id, int tpgs) +{ + struct alua_port_group *pg, *tmp_pg; + + pg = kzalloc(sizeof(struct alua_port_group), GFP_KERNEL); + if (!pg) + return ERR_PTR(-ENOMEM); + + pg->device_id_len = scsi_vpd_lun_id(sdev, pg->device_id_str, + sizeof(pg->device_id_str)); + if (pg->device_id_len <= 0) { + /* + * TPGS supported but no device identification found. + * Generate private device identification. + */ + sdev_printk(KERN_INFO, sdev, + "%s: No device descriptors found\n", + ALUA_DH_NAME); + pg->device_id_str[0] = '\0'; + pg->device_id_len = 0; + } + pg->group_id = group_id; + pg->tpgs = tpgs; + pg->state = SCSI_ACCESS_STATE_OPTIMAL; + pg->valid_states = TPGS_SUPPORT_ALL; + if (optimize_stpg) + pg->flags |= ALUA_OPTIMIZE_STPG; + kref_init(&pg->kref); + INIT_DELAYED_WORK(&pg->rtpg_work, alua_rtpg_work); + INIT_LIST_HEAD(&pg->rtpg_list); + INIT_LIST_HEAD(&pg->node); + INIT_LIST_HEAD(&pg->dh_list); + spin_lock_init(&pg->lock); + + spin_lock(&port_group_lock); + tmp_pg = alua_find_get_pg(pg->device_id_str, pg->device_id_len, + group_id); + if (tmp_pg) { + spin_unlock(&port_group_lock); + kfree(pg); + return tmp_pg; + } + + list_add(&pg->node, &port_group_list); + spin_unlock(&port_group_lock); + + return pg; +} + +/* + * alua_check_tpgs - Evaluate TPGS setting + * @sdev: device to be checked + * + * Examine the TPGS setting of the sdev to find out if ALUA + * is supported. + */ +static int alua_check_tpgs(struct scsi_device *sdev) +{ + int tpgs = TPGS_MODE_NONE; + + /* + * ALUA support for non-disk devices is fraught with + * difficulties, so disable it for now. + */ + if (sdev->type != TYPE_DISK) { + sdev_printk(KERN_INFO, sdev, + "%s: disable for non-disk devices\n", + ALUA_DH_NAME); + return tpgs; + } + + tpgs = scsi_device_tpgs(sdev); + switch (tpgs) { + case TPGS_MODE_EXPLICIT|TPGS_MODE_IMPLICIT: + sdev_printk(KERN_INFO, sdev, + "%s: supports implicit and explicit TPGS\n", + ALUA_DH_NAME); + break; + case TPGS_MODE_EXPLICIT: + sdev_printk(KERN_INFO, sdev, "%s: supports explicit TPGS\n", + ALUA_DH_NAME); + break; + case TPGS_MODE_IMPLICIT: + sdev_printk(KERN_INFO, sdev, "%s: supports implicit TPGS\n", + ALUA_DH_NAME); + break; + case TPGS_MODE_NONE: + sdev_printk(KERN_INFO, sdev, "%s: not supported\n", + ALUA_DH_NAME); + break; + default: + sdev_printk(KERN_INFO, sdev, + "%s: unsupported TPGS setting %d\n", + ALUA_DH_NAME, tpgs); + tpgs = TPGS_MODE_NONE; + break; + } + + return tpgs; +} + +/* + * alua_check_vpd - Evaluate INQUIRY vpd page 0x83 + * @sdev: device to be checked + * + * Extract the relative target port and the target port group + * descriptor from the list of identificators. + */ +static int alua_check_vpd(struct scsi_device *sdev, struct alua_dh_data *h, + int tpgs) +{ + int rel_port = -1, group_id; + struct alua_port_group *pg, *old_pg = NULL; + bool pg_updated = false; + unsigned long flags; + + group_id = scsi_vpd_tpg_id(sdev, &rel_port); + if (group_id < 0) { + /* + * Internal error; TPGS supported but required + * VPD identification descriptors not present. + * Disable ALUA support + */ + sdev_printk(KERN_INFO, sdev, + "%s: No target port descriptors found\n", + ALUA_DH_NAME); + return SCSI_DH_DEV_UNSUPP; + } + + pg = alua_alloc_pg(sdev, group_id, tpgs); + if (IS_ERR(pg)) { + if (PTR_ERR(pg) == -ENOMEM) + return SCSI_DH_NOMEM; + return SCSI_DH_DEV_UNSUPP; + } + if (pg->device_id_len) + sdev_printk(KERN_INFO, sdev, + "%s: device %s port group %x rel port %x\n", + ALUA_DH_NAME, pg->device_id_str, + group_id, rel_port); + else + sdev_printk(KERN_INFO, sdev, + "%s: port group %x rel port %x\n", + ALUA_DH_NAME, group_id, rel_port); + + /* Check for existing port group references */ + spin_lock(&h->pg_lock); + old_pg = rcu_dereference_protected(h->pg, lockdep_is_held(&h->pg_lock)); + if (old_pg != pg) { + /* port group has changed. Update to new port group */ + if (h->pg) { + spin_lock_irqsave(&old_pg->lock, flags); + list_del_rcu(&h->node); + spin_unlock_irqrestore(&old_pg->lock, flags); + } + rcu_assign_pointer(h->pg, pg); + pg_updated = true; + } + + spin_lock_irqsave(&pg->lock, flags); + if (pg_updated) + list_add_rcu(&h->node, &pg->dh_list); + spin_unlock_irqrestore(&pg->lock, flags); + + alua_rtpg_queue(rcu_dereference_protected(h->pg, + lockdep_is_held(&h->pg_lock)), + sdev, NULL, true); + spin_unlock(&h->pg_lock); + + if (old_pg) + kref_put(&old_pg->kref, release_port_group); + + return SCSI_DH_OK; +} + +static char print_alua_state(unsigned char state) +{ + switch (state) { + case SCSI_ACCESS_STATE_OPTIMAL: + return 'A'; + case SCSI_ACCESS_STATE_ACTIVE: + return 'N'; + case SCSI_ACCESS_STATE_STANDBY: + return 'S'; + case SCSI_ACCESS_STATE_UNAVAILABLE: + return 'U'; + case SCSI_ACCESS_STATE_LBA: + return 'L'; + case SCSI_ACCESS_STATE_OFFLINE: + return 'O'; + case SCSI_ACCESS_STATE_TRANSITIONING: + return 'T'; + default: + return 'X'; + } +} + +static enum scsi_disposition alua_check_sense(struct scsi_device *sdev, + struct scsi_sense_hdr *sense_hdr) +{ + struct alua_dh_data *h = sdev->handler_data; + struct alua_port_group *pg; + + switch (sense_hdr->sense_key) { + case NOT_READY: + if (sense_hdr->asc == 0x04 && sense_hdr->ascq == 0x0a) { + /* + * LUN Not Accessible - ALUA state transition + */ + rcu_read_lock(); + pg = rcu_dereference(h->pg); + if (pg) + pg->state = SCSI_ACCESS_STATE_TRANSITIONING; + rcu_read_unlock(); + alua_check(sdev, false); + return NEEDS_RETRY; + } + break; + case UNIT_ATTENTION: + if (sense_hdr->asc == 0x29 && sense_hdr->ascq == 0x00) { + /* + * Power On, Reset, or Bus Device Reset. + * Might have obscured a state transition, + * so schedule a recheck. + */ + alua_check(sdev, true); + return ADD_TO_MLQUEUE; + } + if (sense_hdr->asc == 0x29 && sense_hdr->ascq == 0x04) + /* + * Device internal reset + */ + return ADD_TO_MLQUEUE; + if (sense_hdr->asc == 0x2a && sense_hdr->ascq == 0x01) + /* + * Mode Parameters Changed + */ + return ADD_TO_MLQUEUE; + if (sense_hdr->asc == 0x2a && sense_hdr->ascq == 0x06) { + /* + * ALUA state changed + */ + alua_check(sdev, true); + return ADD_TO_MLQUEUE; + } + if (sense_hdr->asc == 0x2a && sense_hdr->ascq == 0x07) { + /* + * Implicit ALUA state transition failed + */ + alua_check(sdev, true); + return ADD_TO_MLQUEUE; + } + if (sense_hdr->asc == 0x3f && sense_hdr->ascq == 0x03) + /* + * Inquiry data has changed + */ + return ADD_TO_MLQUEUE; + if (sense_hdr->asc == 0x3f && sense_hdr->ascq == 0x0e) + /* + * REPORTED_LUNS_DATA_HAS_CHANGED is reported + * when switching controllers on targets like + * Intel Multi-Flex. We can just retry. + */ + return ADD_TO_MLQUEUE; + break; + } + + return SCSI_RETURN_NOT_HANDLED; +} + +/* + * alua_tur - Send a TEST UNIT READY + * @sdev: device to which the TEST UNIT READY command should be send + * + * Send a TEST UNIT READY to @sdev to figure out the device state + * Returns SCSI_DH_RETRY if the sense code is NOT READY/ALUA TRANSITIONING, + * SCSI_DH_OK if no error occurred, and SCSI_DH_IO otherwise. + */ +static int alua_tur(struct scsi_device *sdev) +{ + struct scsi_sense_hdr sense_hdr; + int retval; + + retval = scsi_test_unit_ready(sdev, ALUA_FAILOVER_TIMEOUT * HZ, + ALUA_FAILOVER_RETRIES, &sense_hdr); + if (sense_hdr.sense_key == NOT_READY && + sense_hdr.asc == 0x04 && sense_hdr.ascq == 0x0a) + return SCSI_DH_RETRY; + else if (retval) + return SCSI_DH_IO; + else + return SCSI_DH_OK; +} + +/* + * alua_rtpg - Evaluate REPORT TARGET GROUP STATES + * @sdev: the device to be evaluated. + * + * Evaluate the Target Port Group State. + * Returns SCSI_DH_DEV_OFFLINED if the path is + * found to be unusable. + */ +static int alua_rtpg(struct scsi_device *sdev, struct alua_port_group *pg) +{ + struct scsi_sense_hdr sense_hdr; + struct alua_port_group *tmp_pg; + int len, k, off, bufflen = ALUA_RTPG_SIZE; + int group_id_old, state_old, pref_old, valid_states_old; + unsigned char *desc, *buff; + unsigned err; + int retval; + unsigned int tpg_desc_tbl_off; + unsigned char orig_transition_tmo; + unsigned long flags; + bool transitioning_sense = false; + + group_id_old = pg->group_id; + state_old = pg->state; + pref_old = pg->pref; + valid_states_old = pg->valid_states; + + if (!pg->expiry) { + unsigned long transition_tmo = ALUA_FAILOVER_TIMEOUT * HZ; + + if (pg->transition_tmo) + transition_tmo = pg->transition_tmo * HZ; + + pg->expiry = round_jiffies_up(jiffies + transition_tmo); + } + + buff = kzalloc(bufflen, GFP_KERNEL); + if (!buff) + return SCSI_DH_DEV_TEMP_BUSY; + + retry: + err = 0; + retval = submit_rtpg(sdev, buff, bufflen, &sense_hdr, pg->flags); + + if (retval) { + /* + * Some (broken) implementations have a habit of returning + * an error during things like firmware update etc. + * But if the target only supports active/optimized there's + * not much we can do; it's not that we can switch paths + * or anything. + * So ignore any errors to avoid spurious failures during + * path failover. + */ + if ((pg->valid_states & ~TPGS_SUPPORT_OPTIMIZED) == 0) { + sdev_printk(KERN_INFO, sdev, + "%s: ignoring rtpg result %d\n", + ALUA_DH_NAME, retval); + kfree(buff); + return SCSI_DH_OK; + } + if (retval < 0 || !scsi_sense_valid(&sense_hdr)) { + sdev_printk(KERN_INFO, sdev, + "%s: rtpg failed, result %d\n", + ALUA_DH_NAME, retval); + kfree(buff); + if (retval < 0) + return SCSI_DH_DEV_TEMP_BUSY; + if (host_byte(retval) == DID_NO_CONNECT) + return SCSI_DH_RES_TEMP_UNAVAIL; + return SCSI_DH_IO; + } + + /* + * submit_rtpg() has failed on existing arrays + * when requesting extended header info, and + * the array doesn't support extended headers, + * even though it shouldn't according to T10. + * The retry without rtpg_ext_hdr_req set + * handles this. + * Note: some arrays return a sense key of ILLEGAL_REQUEST + * with ASC 00h if they don't support the extended header. + */ + if (!(pg->flags & ALUA_RTPG_EXT_HDR_UNSUPP) && + sense_hdr.sense_key == ILLEGAL_REQUEST) { + pg->flags |= ALUA_RTPG_EXT_HDR_UNSUPP; + goto retry; + } + /* + * If the array returns with 'ALUA state transition' + * sense code here it cannot return RTPG data during + * transition. So set the state to 'transitioning' directly. + */ + if (sense_hdr.sense_key == NOT_READY && + sense_hdr.asc == 0x04 && sense_hdr.ascq == 0x0a) { + transitioning_sense = true; + goto skip_rtpg; + } + /* + * Retry on any other UNIT ATTENTION occurred. + */ + if (sense_hdr.sense_key == UNIT_ATTENTION) + err = SCSI_DH_RETRY; + if (err == SCSI_DH_RETRY && + pg->expiry != 0 && time_before(jiffies, pg->expiry)) { + sdev_printk(KERN_ERR, sdev, "%s: rtpg retry\n", + ALUA_DH_NAME); + scsi_print_sense_hdr(sdev, ALUA_DH_NAME, &sense_hdr); + kfree(buff); + return err; + } + sdev_printk(KERN_ERR, sdev, "%s: rtpg failed\n", + ALUA_DH_NAME); + scsi_print_sense_hdr(sdev, ALUA_DH_NAME, &sense_hdr); + kfree(buff); + pg->expiry = 0; + return SCSI_DH_IO; + } + + len = get_unaligned_be32(&buff[0]) + 4; + + if (len > bufflen) { + /* Resubmit with the correct length */ + kfree(buff); + bufflen = len; + buff = kmalloc(bufflen, GFP_KERNEL); + if (!buff) { + sdev_printk(KERN_WARNING, sdev, + "%s: kmalloc buffer failed\n",__func__); + /* Temporary failure, bypass */ + pg->expiry = 0; + return SCSI_DH_DEV_TEMP_BUSY; + } + goto retry; + } + + orig_transition_tmo = pg->transition_tmo; + if ((buff[4] & RTPG_FMT_MASK) == RTPG_FMT_EXT_HDR && buff[5] != 0) + pg->transition_tmo = buff[5]; + else + pg->transition_tmo = ALUA_FAILOVER_TIMEOUT; + + if (orig_transition_tmo != pg->transition_tmo) { + sdev_printk(KERN_INFO, sdev, + "%s: transition timeout set to %d seconds\n", + ALUA_DH_NAME, pg->transition_tmo); + pg->expiry = jiffies + pg->transition_tmo * HZ; + } + + if ((buff[4] & RTPG_FMT_MASK) == RTPG_FMT_EXT_HDR) + tpg_desc_tbl_off = 8; + else + tpg_desc_tbl_off = 4; + + for (k = tpg_desc_tbl_off, desc = buff + tpg_desc_tbl_off; + k < len; + k += off, desc += off) { + u16 group_id = get_unaligned_be16(&desc[2]); + + spin_lock_irqsave(&port_group_lock, flags); + tmp_pg = alua_find_get_pg(pg->device_id_str, pg->device_id_len, + group_id); + spin_unlock_irqrestore(&port_group_lock, flags); + if (tmp_pg) { + if (spin_trylock_irqsave(&tmp_pg->lock, flags)) { + if ((tmp_pg == pg) || + !(tmp_pg->flags & ALUA_PG_RUNNING)) { + struct alua_dh_data *h; + + tmp_pg->state = desc[0] & 0x0f; + tmp_pg->pref = desc[0] >> 7; + rcu_read_lock(); + list_for_each_entry_rcu(h, + &tmp_pg->dh_list, node) { + if (!h->sdev) + continue; + h->sdev->access_state = desc[0]; + } + rcu_read_unlock(); + } + if (tmp_pg == pg) + tmp_pg->valid_states = desc[1]; + spin_unlock_irqrestore(&tmp_pg->lock, flags); + } + kref_put(&tmp_pg->kref, release_port_group); + } + off = 8 + (desc[7] * 4); + } + + skip_rtpg: + spin_lock_irqsave(&pg->lock, flags); + if (transitioning_sense) + pg->state = SCSI_ACCESS_STATE_TRANSITIONING; + + if (group_id_old != pg->group_id || state_old != pg->state || + pref_old != pg->pref || valid_states_old != pg->valid_states) + sdev_printk(KERN_INFO, sdev, + "%s: port group %02x state %c %s supports %c%c%c%c%c%c%c\n", + ALUA_DH_NAME, pg->group_id, print_alua_state(pg->state), + pg->pref ? "preferred" : "non-preferred", + pg->valid_states&TPGS_SUPPORT_TRANSITION?'T':'t', + pg->valid_states&TPGS_SUPPORT_OFFLINE?'O':'o', + pg->valid_states&TPGS_SUPPORT_LBA_DEPENDENT?'L':'l', + pg->valid_states&TPGS_SUPPORT_UNAVAILABLE?'U':'u', + pg->valid_states&TPGS_SUPPORT_STANDBY?'S':'s', + pg->valid_states&TPGS_SUPPORT_NONOPTIMIZED?'N':'n', + pg->valid_states&TPGS_SUPPORT_OPTIMIZED?'A':'a'); + + switch (pg->state) { + case SCSI_ACCESS_STATE_TRANSITIONING: + if (time_before(jiffies, pg->expiry)) { + /* State transition, retry */ + pg->interval = ALUA_RTPG_RETRY_DELAY; + err = SCSI_DH_RETRY; + } else { + struct alua_dh_data *h; + + /* Transitioning time exceeded, set port to standby */ + err = SCSI_DH_IO; + pg->state = SCSI_ACCESS_STATE_STANDBY; + pg->expiry = 0; + rcu_read_lock(); + list_for_each_entry_rcu(h, &pg->dh_list, node) { + if (!h->sdev) + continue; + h->sdev->access_state = + (pg->state & SCSI_ACCESS_STATE_MASK); + if (pg->pref) + h->sdev->access_state |= + SCSI_ACCESS_STATE_PREFERRED; + } + rcu_read_unlock(); + } + break; + case SCSI_ACCESS_STATE_OFFLINE: + /* Path unusable */ + err = SCSI_DH_DEV_OFFLINED; + pg->expiry = 0; + break; + default: + /* Useable path if active */ + err = SCSI_DH_OK; + pg->expiry = 0; + break; + } + spin_unlock_irqrestore(&pg->lock, flags); + kfree(buff); + return err; +} + +/* + * alua_stpg - Issue a SET TARGET PORT GROUP command + * + * Issue a SET TARGET PORT GROUP command and evaluate the + * response. Returns SCSI_DH_RETRY per default to trigger + * a re-evaluation of the target group state or SCSI_DH_OK + * if no further action needs to be taken. + */ +static unsigned alua_stpg(struct scsi_device *sdev, struct alua_port_group *pg) +{ + int retval; + struct scsi_sense_hdr sense_hdr; + + if (!(pg->tpgs & TPGS_MODE_EXPLICIT)) { + /* Only implicit ALUA supported, retry */ + return SCSI_DH_RETRY; + } + switch (pg->state) { + case SCSI_ACCESS_STATE_OPTIMAL: + return SCSI_DH_OK; + case SCSI_ACCESS_STATE_ACTIVE: + if ((pg->flags & ALUA_OPTIMIZE_STPG) && + !pg->pref && + (pg->tpgs & TPGS_MODE_IMPLICIT)) + return SCSI_DH_OK; + break; + case SCSI_ACCESS_STATE_STANDBY: + case SCSI_ACCESS_STATE_UNAVAILABLE: + break; + case SCSI_ACCESS_STATE_OFFLINE: + return SCSI_DH_IO; + case SCSI_ACCESS_STATE_TRANSITIONING: + break; + default: + sdev_printk(KERN_INFO, sdev, + "%s: stpg failed, unhandled TPGS state %d", + ALUA_DH_NAME, pg->state); + return SCSI_DH_NOSYS; + } + retval = submit_stpg(sdev, pg->group_id, &sense_hdr); + + if (retval) { + if (retval < 0 || !scsi_sense_valid(&sense_hdr)) { + sdev_printk(KERN_INFO, sdev, + "%s: stpg failed, result %d", + ALUA_DH_NAME, retval); + if (retval < 0) + return SCSI_DH_DEV_TEMP_BUSY; + } else { + sdev_printk(KERN_INFO, sdev, "%s: stpg failed\n", + ALUA_DH_NAME); + scsi_print_sense_hdr(sdev, ALUA_DH_NAME, &sense_hdr); + } + } + /* Retry RTPG */ + return SCSI_DH_RETRY; +} + +static bool alua_rtpg_select_sdev(struct alua_port_group *pg) +{ + struct alua_dh_data *h; + struct scsi_device *sdev = NULL; + + lockdep_assert_held(&pg->lock); + if (WARN_ON(!pg->rtpg_sdev)) + return false; + + /* + * RCU protection isn't necessary for dh_list here + * as we hold pg->lock, but for access to h->pg. + */ + rcu_read_lock(); + list_for_each_entry_rcu(h, &pg->dh_list, node) { + if (!h->sdev) + continue; + if (h->sdev == pg->rtpg_sdev) { + h->disabled = true; + continue; + } + if (rcu_dereference(h->pg) == pg && + !h->disabled && + !scsi_device_get(h->sdev)) { + sdev = h->sdev; + break; + } + } + rcu_read_unlock(); + + if (!sdev) { + pr_warn("%s: no device found for rtpg\n", + (pg->device_id_len ? + (char *)pg->device_id_str : "(nameless PG)")); + return false; + } + + sdev_printk(KERN_INFO, sdev, "rtpg retry on different device\n"); + + scsi_device_put(pg->rtpg_sdev); + pg->rtpg_sdev = sdev; + + return true; +} + +static void alua_rtpg_work(struct work_struct *work) +{ + struct alua_port_group *pg = + container_of(work, struct alua_port_group, rtpg_work.work); + struct scsi_device *sdev; + LIST_HEAD(qdata_list); + int err = SCSI_DH_OK; + struct alua_queue_data *qdata, *tmp; + struct alua_dh_data *h; + unsigned long flags; + + spin_lock_irqsave(&pg->lock, flags); + sdev = pg->rtpg_sdev; + if (!sdev) { + WARN_ON(pg->flags & ALUA_PG_RUN_RTPG); + WARN_ON(pg->flags & ALUA_PG_RUN_STPG); + spin_unlock_irqrestore(&pg->lock, flags); + kref_put(&pg->kref, release_port_group); + return; + } + pg->flags |= ALUA_PG_RUNNING; + if (pg->flags & ALUA_PG_RUN_RTPG) { + int state = pg->state; + + pg->flags &= ~ALUA_PG_RUN_RTPG; + spin_unlock_irqrestore(&pg->lock, flags); + if (state == SCSI_ACCESS_STATE_TRANSITIONING) { + if (alua_tur(sdev) == SCSI_DH_RETRY) { + spin_lock_irqsave(&pg->lock, flags); + pg->flags &= ~ALUA_PG_RUNNING; + pg->flags |= ALUA_PG_RUN_RTPG; + if (!pg->interval) + pg->interval = ALUA_RTPG_RETRY_DELAY; + spin_unlock_irqrestore(&pg->lock, flags); + queue_delayed_work(kaluad_wq, &pg->rtpg_work, + pg->interval * HZ); + return; + } + /* Send RTPG on failure or if TUR indicates SUCCESS */ + } + err = alua_rtpg(sdev, pg); + spin_lock_irqsave(&pg->lock, flags); + + /* If RTPG failed on the current device, try using another */ + if (err == SCSI_DH_RES_TEMP_UNAVAIL && + alua_rtpg_select_sdev(pg)) + err = SCSI_DH_IMM_RETRY; + + if (err == SCSI_DH_RETRY || err == SCSI_DH_IMM_RETRY || + pg->flags & ALUA_PG_RUN_RTPG) { + pg->flags &= ~ALUA_PG_RUNNING; + if (err == SCSI_DH_IMM_RETRY) + pg->interval = 0; + else if (!pg->interval && !(pg->flags & ALUA_PG_RUN_RTPG)) + pg->interval = ALUA_RTPG_RETRY_DELAY; + pg->flags |= ALUA_PG_RUN_RTPG; + spin_unlock_irqrestore(&pg->lock, flags); + queue_delayed_work(kaluad_wq, &pg->rtpg_work, + pg->interval * HZ); + return; + } + if (err != SCSI_DH_OK) + pg->flags &= ~ALUA_PG_RUN_STPG; + } + if (pg->flags & ALUA_PG_RUN_STPG) { + pg->flags &= ~ALUA_PG_RUN_STPG; + spin_unlock_irqrestore(&pg->lock, flags); + err = alua_stpg(sdev, pg); + spin_lock_irqsave(&pg->lock, flags); + if (err == SCSI_DH_RETRY || pg->flags & ALUA_PG_RUN_RTPG) { + pg->flags |= ALUA_PG_RUN_RTPG; + pg->interval = 0; + pg->flags &= ~ALUA_PG_RUNNING; + spin_unlock_irqrestore(&pg->lock, flags); + queue_delayed_work(kaluad_wq, &pg->rtpg_work, + pg->interval * HZ); + return; + } + } + + list_splice_init(&pg->rtpg_list, &qdata_list); + /* + * We went through an RTPG, for good or bad. + * Re-enable all devices for the next attempt. + */ + list_for_each_entry(h, &pg->dh_list, node) + h->disabled = false; + pg->rtpg_sdev = NULL; + spin_unlock_irqrestore(&pg->lock, flags); + + list_for_each_entry_safe(qdata, tmp, &qdata_list, entry) { + list_del(&qdata->entry); + if (qdata->callback_fn) + qdata->callback_fn(qdata->callback_data, err); + kfree(qdata); + } + spin_lock_irqsave(&pg->lock, flags); + pg->flags &= ~ALUA_PG_RUNNING; + spin_unlock_irqrestore(&pg->lock, flags); + scsi_device_put(sdev); + kref_put(&pg->kref, release_port_group); +} + +/** + * alua_rtpg_queue() - cause RTPG to be submitted asynchronously + * @pg: ALUA port group associated with @sdev. + * @sdev: SCSI device for which to submit an RTPG. + * @qdata: Information about the callback to invoke after the RTPG. + * @force: Whether or not to submit an RTPG if a work item that will submit an + * RTPG already has been scheduled. + * + * Returns true if and only if alua_rtpg_work() will be called asynchronously. + * That function is responsible for calling @qdata->fn(). + */ +static bool alua_rtpg_queue(struct alua_port_group *pg, + struct scsi_device *sdev, + struct alua_queue_data *qdata, bool force) +{ + int start_queue = 0; + unsigned long flags; + if (WARN_ON_ONCE(!pg) || scsi_device_get(sdev)) + return false; + + spin_lock_irqsave(&pg->lock, flags); + if (qdata) { + list_add_tail(&qdata->entry, &pg->rtpg_list); + pg->flags |= ALUA_PG_RUN_STPG; + force = true; + } + if (pg->rtpg_sdev == NULL) { + pg->interval = 0; + pg->flags |= ALUA_PG_RUN_RTPG; + kref_get(&pg->kref); + pg->rtpg_sdev = sdev; + start_queue = 1; + } else if (!(pg->flags & ALUA_PG_RUN_RTPG) && force) { + pg->flags |= ALUA_PG_RUN_RTPG; + /* Do not queue if the worker is already running */ + if (!(pg->flags & ALUA_PG_RUNNING)) { + kref_get(&pg->kref); + start_queue = 1; + } + } + + spin_unlock_irqrestore(&pg->lock, flags); + + if (start_queue) { + if (queue_delayed_work(kaluad_wq, &pg->rtpg_work, + msecs_to_jiffies(ALUA_RTPG_DELAY_MSECS))) + sdev = NULL; + else + kref_put(&pg->kref, release_port_group); + } + if (sdev) + scsi_device_put(sdev); + + return true; +} + +/* + * alua_initialize - Initialize ALUA state + * @sdev: the device to be initialized + * + * For the prep_fn to work correctly we have + * to initialize the ALUA state for the device. + */ +static int alua_initialize(struct scsi_device *sdev, struct alua_dh_data *h) +{ + int err = SCSI_DH_DEV_UNSUPP, tpgs; + + mutex_lock(&h->init_mutex); + h->disabled = false; + tpgs = alua_check_tpgs(sdev); + if (tpgs != TPGS_MODE_NONE) + err = alua_check_vpd(sdev, h, tpgs); + h->init_error = err; + mutex_unlock(&h->init_mutex); + return err; +} +/* + * alua_set_params - set/unset the optimize flag + * @sdev: device on the path to be activated + * params - parameters in the following format + * "no_of_params\0param1\0param2\0param3\0...\0" + * For example, to set the flag pass the following parameters + * from multipath.conf + * hardware_handler "2 alua 1" + */ +static int alua_set_params(struct scsi_device *sdev, const char *params) +{ + struct alua_dh_data *h = sdev->handler_data; + struct alua_port_group *pg = NULL; + unsigned int optimize = 0, argc; + const char *p = params; + int result = SCSI_DH_OK; + unsigned long flags; + + if ((sscanf(params, "%u", &argc) != 1) || (argc != 1)) + return -EINVAL; + + while (*p++) + ; + if ((sscanf(p, "%u", &optimize) != 1) || (optimize > 1)) + return -EINVAL; + + rcu_read_lock(); + pg = rcu_dereference(h->pg); + if (!pg) { + rcu_read_unlock(); + return -ENXIO; + } + spin_lock_irqsave(&pg->lock, flags); + if (optimize) + pg->flags |= ALUA_OPTIMIZE_STPG; + else + pg->flags &= ~ALUA_OPTIMIZE_STPG; + spin_unlock_irqrestore(&pg->lock, flags); + rcu_read_unlock(); + + return result; +} + +/* + * alua_activate - activate a path + * @sdev: device on the path to be activated + * + * We're currently switching the port group to be activated only and + * let the array figure out the rest. + * There may be other arrays which require us to switch all port groups + * based on a certain policy. But until we actually encounter them it + * should be okay. + */ +static int alua_activate(struct scsi_device *sdev, + activate_complete fn, void *data) +{ + struct alua_dh_data *h = sdev->handler_data; + int err = SCSI_DH_OK; + struct alua_queue_data *qdata; + struct alua_port_group *pg; + + qdata = kzalloc(sizeof(*qdata), GFP_KERNEL); + if (!qdata) { + err = SCSI_DH_RES_TEMP_UNAVAIL; + goto out; + } + qdata->callback_fn = fn; + qdata->callback_data = data; + + mutex_lock(&h->init_mutex); + rcu_read_lock(); + pg = rcu_dereference(h->pg); + if (!pg || !kref_get_unless_zero(&pg->kref)) { + rcu_read_unlock(); + kfree(qdata); + err = h->init_error; + mutex_unlock(&h->init_mutex); + goto out; + } + rcu_read_unlock(); + mutex_unlock(&h->init_mutex); + + if (alua_rtpg_queue(pg, sdev, qdata, true)) { + fn = NULL; + } else { + kfree(qdata); + err = SCSI_DH_DEV_OFFLINED; + } + kref_put(&pg->kref, release_port_group); +out: + if (fn) + fn(data, err); + return 0; +} + +/* + * alua_check - check path status + * @sdev: device on the path to be checked + * + * Check the device status + */ +static void alua_check(struct scsi_device *sdev, bool force) +{ + struct alua_dh_data *h = sdev->handler_data; + struct alua_port_group *pg; + + rcu_read_lock(); + pg = rcu_dereference(h->pg); + if (!pg || !kref_get_unless_zero(&pg->kref)) { + rcu_read_unlock(); + return; + } + rcu_read_unlock(); + alua_rtpg_queue(pg, sdev, NULL, force); + kref_put(&pg->kref, release_port_group); +} + +/* + * alua_prep_fn - request callback + * + * Fail I/O to all paths not in state + * active/optimized or active/non-optimized. + */ +static blk_status_t alua_prep_fn(struct scsi_device *sdev, struct request *req) +{ + struct alua_dh_data *h = sdev->handler_data; + struct alua_port_group *pg; + unsigned char state = SCSI_ACCESS_STATE_OPTIMAL; + + rcu_read_lock(); + pg = rcu_dereference(h->pg); + if (pg) + state = pg->state; + rcu_read_unlock(); + + switch (state) { + case SCSI_ACCESS_STATE_OPTIMAL: + case SCSI_ACCESS_STATE_ACTIVE: + case SCSI_ACCESS_STATE_LBA: + case SCSI_ACCESS_STATE_TRANSITIONING: + return BLK_STS_OK; + default: + req->rq_flags |= RQF_QUIET; + return BLK_STS_IOERR; + } +} + +static void alua_rescan(struct scsi_device *sdev) +{ + struct alua_dh_data *h = sdev->handler_data; + + alua_initialize(sdev, h); +} + +/* + * alua_bus_attach - Attach device handler + * @sdev: device to be attached to + */ +static int alua_bus_attach(struct scsi_device *sdev) +{ + struct alua_dh_data *h; + int err; + + h = kzalloc(sizeof(*h) , GFP_KERNEL); + if (!h) + return SCSI_DH_NOMEM; + spin_lock_init(&h->pg_lock); + rcu_assign_pointer(h->pg, NULL); + h->init_error = SCSI_DH_OK; + h->sdev = sdev; + INIT_LIST_HEAD(&h->node); + + mutex_init(&h->init_mutex); + err = alua_initialize(sdev, h); + if (err != SCSI_DH_OK && err != SCSI_DH_DEV_OFFLINED) + goto failed; + + sdev->handler_data = h; + return SCSI_DH_OK; +failed: + kfree(h); + return err; +} + +/* + * alua_bus_detach - Detach device handler + * @sdev: device to be detached from + */ +static void alua_bus_detach(struct scsi_device *sdev) +{ + struct alua_dh_data *h = sdev->handler_data; + struct alua_port_group *pg; + + spin_lock(&h->pg_lock); + pg = rcu_dereference_protected(h->pg, lockdep_is_held(&h->pg_lock)); + rcu_assign_pointer(h->pg, NULL); + spin_unlock(&h->pg_lock); + if (pg) { + spin_lock_irq(&pg->lock); + list_del_rcu(&h->node); + spin_unlock_irq(&pg->lock); + kref_put(&pg->kref, release_port_group); + } + sdev->handler_data = NULL; + synchronize_rcu(); + kfree(h); +} + +static struct scsi_device_handler alua_dh = { + .name = ALUA_DH_NAME, + .module = THIS_MODULE, + .attach = alua_bus_attach, + .detach = alua_bus_detach, + .prep_fn = alua_prep_fn, + .check_sense = alua_check_sense, + .activate = alua_activate, + .rescan = alua_rescan, + .set_params = alua_set_params, +}; + +static int __init alua_init(void) +{ + int r; + + kaluad_wq = alloc_workqueue("kaluad", WQ_MEM_RECLAIM, 0); + if (!kaluad_wq) + return -ENOMEM; + + r = scsi_register_device_handler(&alua_dh); + if (r != 0) { + printk(KERN_ERR "%s: Failed to register scsi device handler", + ALUA_DH_NAME); + destroy_workqueue(kaluad_wq); + } + return r; +} + +static void __exit alua_exit(void) +{ + scsi_unregister_device_handler(&alua_dh); + destroy_workqueue(kaluad_wq); +} + +module_init(alua_init); +module_exit(alua_exit); + +MODULE_DESCRIPTION("DM Multipath ALUA support"); +MODULE_AUTHOR("Hannes Reinecke "); +MODULE_LICENSE("GPL"); +MODULE_VERSION(ALUA_DH_VER); diff --git a/drivers/scsi/device_handler/scsi_dh_emc.c b/drivers/scsi/device_handler/scsi_dh_emc.c new file mode 100644 index 000000000..2e21ab447 --- /dev/null +++ b/drivers/scsi/device_handler/scsi_dh_emc.c @@ -0,0 +1,545 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Target driver for EMC CLARiiON AX/CX-series hardware. + * Based on code from Lars Marowsky-Bree + * and Ed Goggin . + * + * Copyright (C) 2006 Red Hat, Inc. All rights reserved. + * Copyright (C) 2006 Mike Christie + */ +#include +#include +#include +#include +#include +#include + +#define CLARIION_NAME "emc" + +#define CLARIION_TRESPASS_PAGE 0x22 +#define CLARIION_BUFFER_SIZE 0xFC +#define CLARIION_TIMEOUT (60 * HZ) +#define CLARIION_RETRIES 3 +#define CLARIION_UNBOUND_LU -1 +#define CLARIION_SP_A 0 +#define CLARIION_SP_B 1 + +/* Flags */ +#define CLARIION_SHORT_TRESPASS 1 +#define CLARIION_HONOR_RESERVATIONS 2 + +/* LUN states */ +#define CLARIION_LUN_UNINITIALIZED -1 +#define CLARIION_LUN_UNBOUND 0 +#define CLARIION_LUN_BOUND 1 +#define CLARIION_LUN_OWNED 2 + +static unsigned char long_trespass[] = { + 0, 0, 0, 0, 0, 0, 0, 0, + CLARIION_TRESPASS_PAGE, /* Page code */ + 0x09, /* Page length - 2 */ + 0x01, /* Trespass code */ + 0xff, 0xff, /* Trespass target */ + 0, 0, 0, 0, 0, 0 /* Reserved bytes / unknown */ +}; + +static unsigned char short_trespass[] = { + 0, 0, 0, 0, + CLARIION_TRESPASS_PAGE, /* Page code */ + 0x02, /* Page length - 2 */ + 0x01, /* Trespass code */ + 0xff, /* Trespass target */ +}; + +static const char * lun_state[] = +{ + "not bound", + "bound", + "owned", +}; + +struct clariion_dh_data { + /* + * Flags: + * CLARIION_SHORT_TRESPASS + * Use short trespass command (FC-series) or the long version + * (default for AX/CX CLARiiON arrays). + * + * CLARIION_HONOR_RESERVATIONS + * Whether or not (default) to honor SCSI reservations when + * initiating a switch-over. + */ + unsigned flags; + /* + * I/O buffer for both MODE_SELECT and INQUIRY commands. + */ + unsigned char buffer[CLARIION_BUFFER_SIZE]; + /* + * LUN state + */ + int lun_state; + /* + * SP Port number + */ + int port; + /* + * which SP (A=0,B=1,UNBOUND=-1) is the default SP for this + * path's mapped LUN + */ + int default_sp; + /* + * which SP (A=0,B=1,UNBOUND=-1) is the active SP for this + * path's mapped LUN + */ + int current_sp; +}; + +/* + * Parse MODE_SELECT cmd reply. + */ +static int trespass_endio(struct scsi_device *sdev, + struct scsi_sense_hdr *sshdr) +{ + int err = SCSI_DH_IO; + + sdev_printk(KERN_ERR, sdev, "%s: Found valid sense data 0x%2x, " + "0x%2x, 0x%2x while sending CLARiiON trespass " + "command.\n", CLARIION_NAME, sshdr->sense_key, + sshdr->asc, sshdr->ascq); + + if (sshdr->sense_key == 0x05 && sshdr->asc == 0x04 && + sshdr->ascq == 0x00) { + /* + * Array based copy in progress -- do not send + * mode_select or copy will be aborted mid-stream. + */ + sdev_printk(KERN_INFO, sdev, "%s: Array Based Copy in " + "progress while sending CLARiiON trespass " + "command.\n", CLARIION_NAME); + err = SCSI_DH_DEV_TEMP_BUSY; + } else if (sshdr->sense_key == 0x02 && sshdr->asc == 0x04 && + sshdr->ascq == 0x03) { + /* + * LUN Not Ready - Manual Intervention Required + * indicates in-progress ucode upgrade (NDU). + */ + sdev_printk(KERN_INFO, sdev, "%s: Detected in-progress " + "ucode upgrade NDU operation while sending " + "CLARiiON trespass command.\n", CLARIION_NAME); + err = SCSI_DH_DEV_TEMP_BUSY; + } else + err = SCSI_DH_DEV_FAILED; + return err; +} + +static int parse_sp_info_reply(struct scsi_device *sdev, + struct clariion_dh_data *csdev) +{ + int err = SCSI_DH_OK; + + /* check for in-progress ucode upgrade (NDU) */ + if (csdev->buffer[48] != 0) { + sdev_printk(KERN_NOTICE, sdev, "%s: Detected in-progress " + "ucode upgrade NDU operation while finding " + "current active SP.", CLARIION_NAME); + err = SCSI_DH_DEV_TEMP_BUSY; + goto out; + } + if (csdev->buffer[4] > 2) { + /* Invalid buffer format */ + sdev_printk(KERN_NOTICE, sdev, + "%s: invalid VPD page 0xC0 format\n", + CLARIION_NAME); + err = SCSI_DH_NOSYS; + goto out; + } + switch (csdev->buffer[28] & 0x0f) { + case 6: + sdev_printk(KERN_NOTICE, sdev, + "%s: ALUA failover mode detected\n", + CLARIION_NAME); + break; + case 4: + /* Linux failover */ + break; + default: + sdev_printk(KERN_WARNING, sdev, + "%s: Invalid failover mode %d\n", + CLARIION_NAME, csdev->buffer[28] & 0x0f); + err = SCSI_DH_NOSYS; + goto out; + } + + csdev->default_sp = csdev->buffer[5]; + csdev->lun_state = csdev->buffer[4]; + csdev->current_sp = csdev->buffer[8]; + csdev->port = csdev->buffer[7]; + if (csdev->lun_state == CLARIION_LUN_OWNED) + sdev->access_state = SCSI_ACCESS_STATE_OPTIMAL; + else + sdev->access_state = SCSI_ACCESS_STATE_STANDBY; + if (csdev->default_sp == csdev->current_sp) + sdev->access_state |= SCSI_ACCESS_STATE_PREFERRED; +out: + return err; +} + +#define emc_default_str "FC (Legacy)" + +static char * parse_sp_model(struct scsi_device *sdev, unsigned char *buffer) +{ + unsigned char len = buffer[4] + 5; + char *sp_model = NULL; + unsigned char sp_len, serial_len; + + if (len < 160) { + sdev_printk(KERN_WARNING, sdev, + "%s: Invalid information section length %d\n", + CLARIION_NAME, len); + /* Check for old FC arrays */ + if (!strncmp(buffer + 8, "DGC", 3)) { + /* Old FC array, not supporting extended information */ + sp_model = emc_default_str; + } + goto out; + } + + /* + * Parse extended information for SP model number + */ + serial_len = buffer[160]; + if (serial_len == 0 || serial_len + 161 > len) { + sdev_printk(KERN_WARNING, sdev, + "%s: Invalid array serial number length %d\n", + CLARIION_NAME, serial_len); + goto out; + } + sp_len = buffer[99]; + if (sp_len == 0 || serial_len + sp_len + 161 > len) { + sdev_printk(KERN_WARNING, sdev, + "%s: Invalid model number length %d\n", + CLARIION_NAME, sp_len); + goto out; + } + sp_model = &buffer[serial_len + 161]; + /* Strip whitespace at the end */ + while (sp_len > 1 && sp_model[sp_len - 1] == ' ') + sp_len--; + + sp_model[sp_len] = '\0'; + +out: + return sp_model; +} + +static int send_trespass_cmd(struct scsi_device *sdev, + struct clariion_dh_data *csdev) +{ + unsigned char *page22; + unsigned char cdb[MAX_COMMAND_SIZE]; + int err, res = SCSI_DH_OK, len; + struct scsi_sense_hdr sshdr; + blk_opf_t req_flags = REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | + REQ_FAILFAST_DRIVER; + + if (csdev->flags & CLARIION_SHORT_TRESPASS) { + page22 = short_trespass; + if (!(csdev->flags & CLARIION_HONOR_RESERVATIONS)) + /* Set Honor Reservations bit */ + page22[6] |= 0x80; + len = sizeof(short_trespass); + cdb[0] = MODE_SELECT; + cdb[1] = 0x10; + cdb[4] = len; + } else { + page22 = long_trespass; + if (!(csdev->flags & CLARIION_HONOR_RESERVATIONS)) + /* Set Honor Reservations bit */ + page22[10] |= 0x80; + len = sizeof(long_trespass); + cdb[0] = MODE_SELECT_10; + cdb[8] = len; + } + BUG_ON((len > CLARIION_BUFFER_SIZE)); + memcpy(csdev->buffer, page22, len); + + err = scsi_execute(sdev, cdb, DMA_TO_DEVICE, csdev->buffer, len, NULL, + &sshdr, CLARIION_TIMEOUT * HZ, CLARIION_RETRIES, + req_flags, 0, NULL); + if (err) { + if (scsi_sense_valid(&sshdr)) + res = trespass_endio(sdev, &sshdr); + else { + sdev_printk(KERN_INFO, sdev, + "%s: failed to send MODE SELECT: %x\n", + CLARIION_NAME, err); + res = SCSI_DH_IO; + } + } + + return res; +} + +static enum scsi_disposition clariion_check_sense(struct scsi_device *sdev, + struct scsi_sense_hdr *sense_hdr) +{ + switch (sense_hdr->sense_key) { + case NOT_READY: + if (sense_hdr->asc == 0x04 && sense_hdr->ascq == 0x03) + /* + * LUN Not Ready - Manual Intervention Required + * indicates this is a passive path. + * + * FIXME: However, if this is seen and EVPD C0 + * indicates that this is due to a NDU in + * progress, we should set FAIL_PATH too. + * This indicates we might have to do a SCSI + * inquiry in the end_io path. Ugh. + * + * Can return FAILED only when we want the error + * recovery process to kick in. + */ + return SUCCESS; + break; + case ILLEGAL_REQUEST: + if (sense_hdr->asc == 0x25 && sense_hdr->ascq == 0x01) + /* + * An array based copy is in progress. Do not + * fail the path, do not bypass to another PG, + * do not retry. Fail the IO immediately. + * (Actually this is the same conclusion as in + * the default handler, but lets make sure.) + * + * Can return FAILED only when we want the error + * recovery process to kick in. + */ + return SUCCESS; + break; + case UNIT_ATTENTION: + if (sense_hdr->asc == 0x29 && sense_hdr->ascq == 0x00) + /* + * Unit Attention Code. This is the first IO + * to the new path, so just retry. + */ + return ADD_TO_MLQUEUE; + break; + } + + return SCSI_RETURN_NOT_HANDLED; +} + +static blk_status_t clariion_prep_fn(struct scsi_device *sdev, + struct request *req) +{ + struct clariion_dh_data *h = sdev->handler_data; + + if (h->lun_state != CLARIION_LUN_OWNED) { + req->rq_flags |= RQF_QUIET; + return BLK_STS_IOERR; + } + + return BLK_STS_OK; +} + +static int clariion_std_inquiry(struct scsi_device *sdev, + struct clariion_dh_data *csdev) +{ + int err = SCSI_DH_OK; + char *sp_model; + + sp_model = parse_sp_model(sdev, sdev->inquiry); + if (!sp_model) { + err = SCSI_DH_DEV_UNSUPP; + goto out; + } + + /* + * FC Series arrays do not support long trespass + */ + if (!strlen(sp_model) || !strncmp(sp_model, "FC",2)) + csdev->flags |= CLARIION_SHORT_TRESPASS; + + sdev_printk(KERN_INFO, sdev, + "%s: detected Clariion %s, flags %x\n", + CLARIION_NAME, sp_model, csdev->flags); +out: + return err; +} + +static int clariion_send_inquiry(struct scsi_device *sdev, + struct clariion_dh_data *csdev) +{ + int err = SCSI_DH_IO; + + if (!scsi_get_vpd_page(sdev, 0xC0, csdev->buffer, + CLARIION_BUFFER_SIZE)) + err = parse_sp_info_reply(sdev, csdev); + + return err; +} + +static int clariion_activate(struct scsi_device *sdev, + activate_complete fn, void *data) +{ + struct clariion_dh_data *csdev = sdev->handler_data; + int result; + + result = clariion_send_inquiry(sdev, csdev); + if (result != SCSI_DH_OK) + goto done; + + if (csdev->lun_state == CLARIION_LUN_OWNED) + goto done; + + result = send_trespass_cmd(sdev, csdev); + if (result != SCSI_DH_OK) + goto done; + sdev_printk(KERN_INFO, sdev,"%s: %s trespass command sent\n", + CLARIION_NAME, + csdev->flags&CLARIION_SHORT_TRESPASS?"short":"long" ); + + /* Update status */ + result = clariion_send_inquiry(sdev, csdev); + if (result != SCSI_DH_OK) + goto done; + +done: + sdev_printk(KERN_INFO, sdev, + "%s: at SP %c Port %d (%s, default SP %c)\n", + CLARIION_NAME, csdev->current_sp + 'A', + csdev->port, lun_state[csdev->lun_state], + csdev->default_sp + 'A'); + + if (fn) + fn(data, result); + return 0; +} +/* + * params - parameters in the following format + * "no_of_params\0param1\0param2\0param3\0...\0" + * for example, string for 2 parameters with value 10 and 21 + * is specified as "2\010\021\0". + */ +static int clariion_set_params(struct scsi_device *sdev, const char *params) +{ + struct clariion_dh_data *csdev = sdev->handler_data; + unsigned int hr = 0, st = 0, argc; + const char *p = params; + int result = SCSI_DH_OK; + + if ((sscanf(params, "%u", &argc) != 1) || (argc != 2)) + return -EINVAL; + + while (*p++) + ; + if ((sscanf(p, "%u", &st) != 1) || (st > 1)) + return -EINVAL; + + while (*p++) + ; + if ((sscanf(p, "%u", &hr) != 1) || (hr > 1)) + return -EINVAL; + + if (st) + csdev->flags |= CLARIION_SHORT_TRESPASS; + else + csdev->flags &= ~CLARIION_SHORT_TRESPASS; + + if (hr) + csdev->flags |= CLARIION_HONOR_RESERVATIONS; + else + csdev->flags &= ~CLARIION_HONOR_RESERVATIONS; + + /* + * If this path is owned, we have to send a trespass command + * with the new parameters. If not, simply return. Next trespass + * command would use the parameters. + */ + if (csdev->lun_state != CLARIION_LUN_OWNED) + goto done; + + csdev->lun_state = CLARIION_LUN_UNINITIALIZED; + result = send_trespass_cmd(sdev, csdev); + if (result != SCSI_DH_OK) + goto done; + + /* Update status */ + result = clariion_send_inquiry(sdev, csdev); + +done: + return result; +} + +static int clariion_bus_attach(struct scsi_device *sdev) +{ + struct clariion_dh_data *h; + int err; + + h = kzalloc(sizeof(*h) , GFP_KERNEL); + if (!h) + return SCSI_DH_NOMEM; + h->lun_state = CLARIION_LUN_UNINITIALIZED; + h->default_sp = CLARIION_UNBOUND_LU; + h->current_sp = CLARIION_UNBOUND_LU; + + err = clariion_std_inquiry(sdev, h); + if (err != SCSI_DH_OK) + goto failed; + + err = clariion_send_inquiry(sdev, h); + if (err != SCSI_DH_OK) + goto failed; + + sdev_printk(KERN_INFO, sdev, + "%s: connected to SP %c Port %d (%s, default SP %c)\n", + CLARIION_NAME, h->current_sp + 'A', + h->port, lun_state[h->lun_state], + h->default_sp + 'A'); + + sdev->handler_data = h; + return SCSI_DH_OK; + +failed: + kfree(h); + return err; +} + +static void clariion_bus_detach(struct scsi_device *sdev) +{ + kfree(sdev->handler_data); + sdev->handler_data = NULL; +} + +static struct scsi_device_handler clariion_dh = { + .name = CLARIION_NAME, + .module = THIS_MODULE, + .attach = clariion_bus_attach, + .detach = clariion_bus_detach, + .check_sense = clariion_check_sense, + .activate = clariion_activate, + .prep_fn = clariion_prep_fn, + .set_params = clariion_set_params, +}; + +static int __init clariion_init(void) +{ + int r; + + r = scsi_register_device_handler(&clariion_dh); + if (r != 0) + printk(KERN_ERR "%s: Failed to register scsi device handler.", + CLARIION_NAME); + return r; +} + +static void __exit clariion_exit(void) +{ + scsi_unregister_device_handler(&clariion_dh); +} + +module_init(clariion_init); +module_exit(clariion_exit); + +MODULE_DESCRIPTION("EMC CX/AX/FC-family driver"); +MODULE_AUTHOR("Mike Christie , Chandra Seetharaman "); +MODULE_LICENSE("GPL"); diff --git a/drivers/scsi/device_handler/scsi_dh_hp_sw.c b/drivers/scsi/device_handler/scsi_dh_hp_sw.c new file mode 100644 index 000000000..0d2cfa60a --- /dev/null +++ b/drivers/scsi/device_handler/scsi_dh_hp_sw.c @@ -0,0 +1,261 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Basic HP/COMPAQ MSA 1000 support. This is only needed if your HW cannot be + * upgraded. + * + * Copyright (C) 2006 Red Hat, Inc. All rights reserved. + * Copyright (C) 2006 Mike Christie + * Copyright (C) 2008 Hannes Reinecke + */ + +#include +#include +#include +#include +#include +#include + +#define HP_SW_NAME "hp_sw" + +#define HP_SW_TIMEOUT (60 * HZ) +#define HP_SW_RETRIES 3 + +#define HP_SW_PATH_UNINITIALIZED -1 +#define HP_SW_PATH_ACTIVE 0 +#define HP_SW_PATH_PASSIVE 1 + +struct hp_sw_dh_data { + int path_state; + int retries; + int retry_cnt; + struct scsi_device *sdev; +}; + +static int hp_sw_start_stop(struct hp_sw_dh_data *); + +/* + * tur_done - Handle TEST UNIT READY return status + * @sdev: sdev the command has been sent to + * @errors: blk error code + * + * Returns SCSI_DH_DEV_OFFLINED if the sdev is on the passive path + */ +static int tur_done(struct scsi_device *sdev, struct hp_sw_dh_data *h, + struct scsi_sense_hdr *sshdr) +{ + int ret = SCSI_DH_IO; + + switch (sshdr->sense_key) { + case UNIT_ATTENTION: + ret = SCSI_DH_IMM_RETRY; + break; + case NOT_READY: + if (sshdr->asc == 0x04 && sshdr->ascq == 2) { + /* + * LUN not ready - Initialization command required + * + * This is the passive path + */ + h->path_state = HP_SW_PATH_PASSIVE; + ret = SCSI_DH_OK; + break; + } + fallthrough; + default: + sdev_printk(KERN_WARNING, sdev, + "%s: sending tur failed, sense %x/%x/%x\n", + HP_SW_NAME, sshdr->sense_key, sshdr->asc, + sshdr->ascq); + break; + } + return ret; +} + +/* + * hp_sw_tur - Send TEST UNIT READY + * @sdev: sdev command should be sent to + * + * Use the TEST UNIT READY command to determine + * the path state. + */ +static int hp_sw_tur(struct scsi_device *sdev, struct hp_sw_dh_data *h) +{ + unsigned char cmd[6] = { TEST_UNIT_READY }; + struct scsi_sense_hdr sshdr; + int ret = SCSI_DH_OK, res; + blk_opf_t req_flags = REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | + REQ_FAILFAST_DRIVER; + +retry: + res = scsi_execute(sdev, cmd, DMA_NONE, NULL, 0, NULL, &sshdr, + HP_SW_TIMEOUT, HP_SW_RETRIES, req_flags, 0, NULL); + if (res) { + if (scsi_sense_valid(&sshdr)) + ret = tur_done(sdev, h, &sshdr); + else { + sdev_printk(KERN_WARNING, sdev, + "%s: sending tur failed with %x\n", + HP_SW_NAME, res); + ret = SCSI_DH_IO; + } + } else { + h->path_state = HP_SW_PATH_ACTIVE; + ret = SCSI_DH_OK; + } + if (ret == SCSI_DH_IMM_RETRY) + goto retry; + + return ret; +} + +/* + * hp_sw_start_stop - Send START STOP UNIT command + * @sdev: sdev command should be sent to + * + * Sending START STOP UNIT activates the SP. + */ +static int hp_sw_start_stop(struct hp_sw_dh_data *h) +{ + unsigned char cmd[6] = { START_STOP, 0, 0, 0, 1, 0 }; + struct scsi_sense_hdr sshdr; + struct scsi_device *sdev = h->sdev; + int res, rc = SCSI_DH_OK; + int retry_cnt = HP_SW_RETRIES; + blk_opf_t req_flags = REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | + REQ_FAILFAST_DRIVER; + +retry: + res = scsi_execute(sdev, cmd, DMA_NONE, NULL, 0, NULL, &sshdr, + HP_SW_TIMEOUT, HP_SW_RETRIES, req_flags, 0, NULL); + if (res) { + if (!scsi_sense_valid(&sshdr)) { + sdev_printk(KERN_WARNING, sdev, + "%s: sending start_stop_unit failed, " + "no sense available\n", HP_SW_NAME); + return SCSI_DH_IO; + } + switch (sshdr.sense_key) { + case NOT_READY: + if (sshdr.asc == 0x04 && sshdr.ascq == 3) { + /* + * LUN not ready - manual intervention required + * + * Switch-over in progress, retry. + */ + if (--retry_cnt) + goto retry; + rc = SCSI_DH_RETRY; + break; + } + fallthrough; + default: + sdev_printk(KERN_WARNING, sdev, + "%s: sending start_stop_unit failed, " + "sense %x/%x/%x\n", HP_SW_NAME, + sshdr.sense_key, sshdr.asc, sshdr.ascq); + rc = SCSI_DH_IO; + } + } + return rc; +} + +static blk_status_t hp_sw_prep_fn(struct scsi_device *sdev, struct request *req) +{ + struct hp_sw_dh_data *h = sdev->handler_data; + + if (h->path_state != HP_SW_PATH_ACTIVE) { + req->rq_flags |= RQF_QUIET; + return BLK_STS_IOERR; + } + + return BLK_STS_OK; +} + +/* + * hp_sw_activate - Activate a path + * @sdev: sdev on the path to be activated + * + * The HP Active/Passive firmware is pretty simple; + * the passive path reports NOT READY with sense codes + * 0x04/0x02; a START STOP UNIT command will then + * activate the passive path (and deactivate the + * previously active one). + */ +static int hp_sw_activate(struct scsi_device *sdev, + activate_complete fn, void *data) +{ + int ret = SCSI_DH_OK; + struct hp_sw_dh_data *h = sdev->handler_data; + + ret = hp_sw_tur(sdev, h); + + if (ret == SCSI_DH_OK && h->path_state == HP_SW_PATH_PASSIVE) + ret = hp_sw_start_stop(h); + + if (fn) + fn(data, ret); + return 0; +} + +static int hp_sw_bus_attach(struct scsi_device *sdev) +{ + struct hp_sw_dh_data *h; + int ret; + + h = kzalloc(sizeof(*h), GFP_KERNEL); + if (!h) + return SCSI_DH_NOMEM; + h->path_state = HP_SW_PATH_UNINITIALIZED; + h->retries = HP_SW_RETRIES; + h->sdev = sdev; + + ret = hp_sw_tur(sdev, h); + if (ret != SCSI_DH_OK) + goto failed; + if (h->path_state == HP_SW_PATH_UNINITIALIZED) { + ret = SCSI_DH_NOSYS; + goto failed; + } + + sdev_printk(KERN_INFO, sdev, "%s: attached to %s path\n", + HP_SW_NAME, h->path_state == HP_SW_PATH_ACTIVE? + "active":"passive"); + + sdev->handler_data = h; + return SCSI_DH_OK; +failed: + kfree(h); + return ret; +} + +static void hp_sw_bus_detach( struct scsi_device *sdev ) +{ + kfree(sdev->handler_data); + sdev->handler_data = NULL; +} + +static struct scsi_device_handler hp_sw_dh = { + .name = HP_SW_NAME, + .module = THIS_MODULE, + .attach = hp_sw_bus_attach, + .detach = hp_sw_bus_detach, + .activate = hp_sw_activate, + .prep_fn = hp_sw_prep_fn, +}; + +static int __init hp_sw_init(void) +{ + return scsi_register_device_handler(&hp_sw_dh); +} + +static void __exit hp_sw_exit(void) +{ + scsi_unregister_device_handler(&hp_sw_dh); +} + +module_init(hp_sw_init); +module_exit(hp_sw_exit); + +MODULE_DESCRIPTION("HP Active/Passive driver"); +MODULE_AUTHOR("Mike Christie +#include +#include +#include +#include +#include + +#define RDAC_NAME "rdac" +#define RDAC_RETRY_COUNT 5 + +/* + * LSI mode page stuff + * + * These struct definitions and the forming of the + * mode page were taken from the LSI RDAC 2.4 GPL'd + * driver, and then converted to Linux conventions. + */ +#define RDAC_QUIESCENCE_TIME 20 +/* + * Page Codes + */ +#define RDAC_PAGE_CODE_REDUNDANT_CONTROLLER 0x2c + +/* + * Controller modes definitions + */ +#define RDAC_MODE_TRANSFER_SPECIFIED_LUNS 0x02 + +/* + * RDAC Options field + */ +#define RDAC_FORCED_QUIESENCE 0x02 + +#define RDAC_TIMEOUT (60 * HZ) +#define RDAC_RETRIES 3 + +struct rdac_mode_6_hdr { + u8 data_len; + u8 medium_type; + u8 device_params; + u8 block_desc_len; +}; + +struct rdac_mode_10_hdr { + u16 data_len; + u8 medium_type; + u8 device_params; + u16 reserved; + u16 block_desc_len; +}; + +struct rdac_mode_common { + u8 controller_serial[16]; + u8 alt_controller_serial[16]; + u8 rdac_mode[2]; + u8 alt_rdac_mode[2]; + u8 quiescence_timeout; + u8 rdac_options; +}; + +struct rdac_pg_legacy { + struct rdac_mode_6_hdr hdr; + u8 page_code; + u8 page_len; + struct rdac_mode_common common; +#define MODE6_MAX_LUN 32 + u8 lun_table[MODE6_MAX_LUN]; + u8 reserved2[32]; + u8 reserved3; + u8 reserved4; +}; + +struct rdac_pg_expanded { + struct rdac_mode_10_hdr hdr; + u8 page_code; + u8 subpage_code; + u8 page_len[2]; + struct rdac_mode_common common; + u8 lun_table[256]; + u8 reserved3; + u8 reserved4; +}; + +struct c9_inquiry { + u8 peripheral_info; + u8 page_code; /* 0xC9 */ + u8 reserved1; + u8 page_len; + u8 page_id[4]; /* "vace" */ + u8 avte_cvp; + u8 path_prio; + u8 reserved2[38]; +}; + +#define SUBSYS_ID_LEN 16 +#define SLOT_ID_LEN 2 +#define ARRAY_LABEL_LEN 31 + +struct c4_inquiry { + u8 peripheral_info; + u8 page_code; /* 0xC4 */ + u8 reserved1; + u8 page_len; + u8 page_id[4]; /* "subs" */ + u8 subsys_id[SUBSYS_ID_LEN]; + u8 revision[4]; + u8 slot_id[SLOT_ID_LEN]; + u8 reserved[2]; +}; + +#define UNIQUE_ID_LEN 16 +struct c8_inquiry { + u8 peripheral_info; + u8 page_code; /* 0xC8 */ + u8 reserved1; + u8 page_len; + u8 page_id[4]; /* "edid" */ + u8 reserved2[3]; + u8 vol_uniq_id_len; + u8 vol_uniq_id[16]; + u8 vol_user_label_len; + u8 vol_user_label[60]; + u8 array_uniq_id_len; + u8 array_unique_id[UNIQUE_ID_LEN]; + u8 array_user_label_len; + u8 array_user_label[60]; + u8 lun[8]; +}; + +struct rdac_controller { + u8 array_id[UNIQUE_ID_LEN]; + int use_ms10; + struct kref kref; + struct list_head node; /* list of all controllers */ + union { + struct rdac_pg_legacy legacy; + struct rdac_pg_expanded expanded; + } mode_select; + u8 index; + u8 array_name[ARRAY_LABEL_LEN]; + struct Scsi_Host *host; + spinlock_t ms_lock; + int ms_queued; + struct work_struct ms_work; + struct scsi_device *ms_sdev; + struct list_head ms_head; + struct list_head dh_list; +}; + +struct c2_inquiry { + u8 peripheral_info; + u8 page_code; /* 0xC2 */ + u8 reserved1; + u8 page_len; + u8 page_id[4]; /* "swr4" */ + u8 sw_version[3]; + u8 sw_date[3]; + u8 features_enabled; + u8 max_lun_supported; + u8 partitions[239]; /* Total allocation length should be 0xFF */ +}; + +struct rdac_dh_data { + struct list_head node; + struct rdac_controller *ctlr; + struct scsi_device *sdev; +#define UNINITIALIZED_LUN (1 << 8) + unsigned lun; + +#define RDAC_MODE 0 +#define RDAC_MODE_AVT 1 +#define RDAC_MODE_IOSHIP 2 + unsigned char mode; + +#define RDAC_STATE_ACTIVE 0 +#define RDAC_STATE_PASSIVE 1 + unsigned char state; + +#define RDAC_LUN_UNOWNED 0 +#define RDAC_LUN_OWNED 1 + char lun_state; + +#define RDAC_PREFERRED 0 +#define RDAC_NON_PREFERRED 1 + char preferred; + + union { + struct c2_inquiry c2; + struct c4_inquiry c4; + struct c8_inquiry c8; + struct c9_inquiry c9; + } inq; +}; + +static const char *mode[] = { + "RDAC", + "AVT", + "IOSHIP", +}; +static const char *lun_state[] = +{ + "unowned", + "owned", +}; + +struct rdac_queue_data { + struct list_head entry; + struct rdac_dh_data *h; + activate_complete callback_fn; + void *callback_data; +}; + +static LIST_HEAD(ctlr_list); +static DEFINE_SPINLOCK(list_lock); +static struct workqueue_struct *kmpath_rdacd; +static void send_mode_select(struct work_struct *work); + +/* + * module parameter to enable rdac debug logging. + * 2 bits for each type of logging, only two types defined for now + * Can be enhanced if required at later point + */ +static int rdac_logging = 1; +module_param(rdac_logging, int, S_IRUGO|S_IWUSR); +MODULE_PARM_DESC(rdac_logging, "A bit mask of rdac logging levels, " + "Default is 1 - failover logging enabled, " + "set it to 0xF to enable all the logs"); + +#define RDAC_LOG_FAILOVER 0 +#define RDAC_LOG_SENSE 2 + +#define RDAC_LOG_BITS 2 + +#define RDAC_LOG_LEVEL(SHIFT) \ + ((rdac_logging >> (SHIFT)) & ((1 << (RDAC_LOG_BITS)) - 1)) + +#define RDAC_LOG(SHIFT, sdev, f, arg...) \ +do { \ + if (unlikely(RDAC_LOG_LEVEL(SHIFT))) \ + sdev_printk(KERN_INFO, sdev, RDAC_NAME ": " f "\n", ## arg); \ +} while (0); + +static unsigned int rdac_failover_get(struct rdac_controller *ctlr, + struct list_head *list, + unsigned char *cdb) +{ + struct rdac_mode_common *common; + unsigned data_size; + struct rdac_queue_data *qdata; + u8 *lun_table; + + if (ctlr->use_ms10) { + struct rdac_pg_expanded *rdac_pg; + + data_size = sizeof(struct rdac_pg_expanded); + rdac_pg = &ctlr->mode_select.expanded; + memset(rdac_pg, 0, data_size); + common = &rdac_pg->common; + rdac_pg->page_code = RDAC_PAGE_CODE_REDUNDANT_CONTROLLER + 0x40; + rdac_pg->subpage_code = 0x1; + rdac_pg->page_len[0] = 0x01; + rdac_pg->page_len[1] = 0x28; + lun_table = rdac_pg->lun_table; + } else { + struct rdac_pg_legacy *rdac_pg; + + data_size = sizeof(struct rdac_pg_legacy); + rdac_pg = &ctlr->mode_select.legacy; + memset(rdac_pg, 0, data_size); + common = &rdac_pg->common; + rdac_pg->page_code = RDAC_PAGE_CODE_REDUNDANT_CONTROLLER; + rdac_pg->page_len = 0x68; + lun_table = rdac_pg->lun_table; + } + common->rdac_mode[1] = RDAC_MODE_TRANSFER_SPECIFIED_LUNS; + common->quiescence_timeout = RDAC_QUIESCENCE_TIME; + common->rdac_options = RDAC_FORCED_QUIESENCE; + + list_for_each_entry(qdata, list, entry) { + lun_table[qdata->h->lun] = 0x81; + } + + /* Prepare the command. */ + if (ctlr->use_ms10) { + cdb[0] = MODE_SELECT_10; + cdb[7] = data_size >> 8; + cdb[8] = data_size & 0xff; + } else { + cdb[0] = MODE_SELECT; + cdb[4] = data_size; + } + + return data_size; +} + +static void release_controller(struct kref *kref) +{ + struct rdac_controller *ctlr; + ctlr = container_of(kref, struct rdac_controller, kref); + + list_del(&ctlr->node); + kfree(ctlr); +} + +static struct rdac_controller *get_controller(int index, char *array_name, + u8 *array_id, struct scsi_device *sdev) +{ + struct rdac_controller *ctlr, *tmp; + + list_for_each_entry(tmp, &ctlr_list, node) { + if ((memcmp(tmp->array_id, array_id, UNIQUE_ID_LEN) == 0) && + (tmp->index == index) && + (tmp->host == sdev->host)) { + kref_get(&tmp->kref); + return tmp; + } + } + ctlr = kmalloc(sizeof(*ctlr), GFP_ATOMIC); + if (!ctlr) + return NULL; + + /* initialize fields of controller */ + memcpy(ctlr->array_id, array_id, UNIQUE_ID_LEN); + ctlr->index = index; + ctlr->host = sdev->host; + memcpy(ctlr->array_name, array_name, ARRAY_LABEL_LEN); + + kref_init(&ctlr->kref); + ctlr->use_ms10 = -1; + ctlr->ms_queued = 0; + ctlr->ms_sdev = NULL; + spin_lock_init(&ctlr->ms_lock); + INIT_WORK(&ctlr->ms_work, send_mode_select); + INIT_LIST_HEAD(&ctlr->ms_head); + list_add(&ctlr->node, &ctlr_list); + INIT_LIST_HEAD(&ctlr->dh_list); + + return ctlr; +} + +static int get_lun_info(struct scsi_device *sdev, struct rdac_dh_data *h, + char *array_name, u8 *array_id) +{ + int err = SCSI_DH_IO, i; + struct c8_inquiry *inqp = &h->inq.c8; + + if (!scsi_get_vpd_page(sdev, 0xC8, (unsigned char *)inqp, + sizeof(struct c8_inquiry))) { + if (inqp->page_code != 0xc8) + return SCSI_DH_NOSYS; + if (inqp->page_id[0] != 'e' || inqp->page_id[1] != 'd' || + inqp->page_id[2] != 'i' || inqp->page_id[3] != 'd') + return SCSI_DH_NOSYS; + h->lun = inqp->lun[7]; /* Uses only the last byte */ + + for(i=0; iarray_user_label[(2*i)+1]; + + *(array_name+ARRAY_LABEL_LEN-1) = '\0'; + memset(array_id, 0, UNIQUE_ID_LEN); + memcpy(array_id, inqp->array_unique_id, inqp->array_uniq_id_len); + err = SCSI_DH_OK; + } + return err; +} + +static int check_ownership(struct scsi_device *sdev, struct rdac_dh_data *h) +{ + int err = SCSI_DH_IO, access_state; + struct rdac_dh_data *tmp; + struct c9_inquiry *inqp = &h->inq.c9; + + h->state = RDAC_STATE_ACTIVE; + if (!scsi_get_vpd_page(sdev, 0xC9, (unsigned char *)inqp, + sizeof(struct c9_inquiry))) { + /* detect the operating mode */ + if ((inqp->avte_cvp >> 5) & 0x1) + h->mode = RDAC_MODE_IOSHIP; /* LUN in IOSHIP mode */ + else if (inqp->avte_cvp >> 7) + h->mode = RDAC_MODE_AVT; /* LUN in AVT mode */ + else + h->mode = RDAC_MODE; /* LUN in RDAC mode */ + + /* Update ownership */ + if (inqp->avte_cvp & 0x1) { + h->lun_state = RDAC_LUN_OWNED; + access_state = SCSI_ACCESS_STATE_OPTIMAL; + } else { + h->lun_state = RDAC_LUN_UNOWNED; + if (h->mode == RDAC_MODE) { + h->state = RDAC_STATE_PASSIVE; + access_state = SCSI_ACCESS_STATE_STANDBY; + } else + access_state = SCSI_ACCESS_STATE_ACTIVE; + } + + /* Update path prio*/ + if (inqp->path_prio & 0x1) { + h->preferred = RDAC_PREFERRED; + access_state |= SCSI_ACCESS_STATE_PREFERRED; + } else + h->preferred = RDAC_NON_PREFERRED; + rcu_read_lock(); + list_for_each_entry_rcu(tmp, &h->ctlr->dh_list, node) { + /* h->sdev should always be valid */ + BUG_ON(!tmp->sdev); + tmp->sdev->access_state = access_state; + } + rcu_read_unlock(); + err = SCSI_DH_OK; + } + + return err; +} + +static int initialize_controller(struct scsi_device *sdev, + struct rdac_dh_data *h, char *array_name, u8 *array_id) +{ + int err = SCSI_DH_IO, index; + struct c4_inquiry *inqp = &h->inq.c4; + + if (!scsi_get_vpd_page(sdev, 0xC4, (unsigned char *)inqp, + sizeof(struct c4_inquiry))) { + /* get the controller index */ + if (inqp->slot_id[1] == 0x31) + index = 0; + else + index = 1; + + spin_lock(&list_lock); + h->ctlr = get_controller(index, array_name, array_id, sdev); + if (!h->ctlr) + err = SCSI_DH_RES_TEMP_UNAVAIL; + else { + h->sdev = sdev; + list_add_rcu(&h->node, &h->ctlr->dh_list); + } + spin_unlock(&list_lock); + err = SCSI_DH_OK; + } + return err; +} + +static int set_mode_select(struct scsi_device *sdev, struct rdac_dh_data *h) +{ + int err = SCSI_DH_IO; + struct c2_inquiry *inqp = &h->inq.c2; + + if (!scsi_get_vpd_page(sdev, 0xC2, (unsigned char *)inqp, + sizeof(struct c2_inquiry))) { + /* + * If more than MODE6_MAX_LUN luns are supported, use + * mode select 10 + */ + if (inqp->max_lun_supported >= MODE6_MAX_LUN) + h->ctlr->use_ms10 = 1; + else + h->ctlr->use_ms10 = 0; + err = SCSI_DH_OK; + } + return err; +} + +static int mode_select_handle_sense(struct scsi_device *sdev, + struct scsi_sense_hdr *sense_hdr) +{ + int err = SCSI_DH_IO; + struct rdac_dh_data *h = sdev->handler_data; + + if (!scsi_sense_valid(sense_hdr)) + goto done; + + switch (sense_hdr->sense_key) { + case NO_SENSE: + case ABORTED_COMMAND: + case UNIT_ATTENTION: + err = SCSI_DH_RETRY; + break; + case NOT_READY: + if (sense_hdr->asc == 0x04 && sense_hdr->ascq == 0x01) + /* LUN Not Ready and is in the Process of Becoming + * Ready + */ + err = SCSI_DH_RETRY; + break; + case ILLEGAL_REQUEST: + if (sense_hdr->asc == 0x91 && sense_hdr->ascq == 0x36) + /* + * Command Lock contention + */ + err = SCSI_DH_IMM_RETRY; + break; + default: + break; + } + + RDAC_LOG(RDAC_LOG_FAILOVER, sdev, "array %s, ctlr %d, " + "MODE_SELECT returned with sense %02x/%02x/%02x", + (char *) h->ctlr->array_name, h->ctlr->index, + sense_hdr->sense_key, sense_hdr->asc, sense_hdr->ascq); + +done: + return err; +} + +static void send_mode_select(struct work_struct *work) +{ + struct rdac_controller *ctlr = + container_of(work, struct rdac_controller, ms_work); + struct scsi_device *sdev = ctlr->ms_sdev; + struct rdac_dh_data *h = sdev->handler_data; + int err = SCSI_DH_OK, retry_cnt = RDAC_RETRY_COUNT; + struct rdac_queue_data *tmp, *qdata; + LIST_HEAD(list); + unsigned char cdb[MAX_COMMAND_SIZE]; + struct scsi_sense_hdr sshdr; + unsigned int data_size; + blk_opf_t req_flags = REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | + REQ_FAILFAST_DRIVER; + + spin_lock(&ctlr->ms_lock); + list_splice_init(&ctlr->ms_head, &list); + ctlr->ms_queued = 0; + ctlr->ms_sdev = NULL; + spin_unlock(&ctlr->ms_lock); + + retry: + memset(cdb, 0, sizeof(cdb)); + + data_size = rdac_failover_get(ctlr, &list, cdb); + + RDAC_LOG(RDAC_LOG_FAILOVER, sdev, "array %s, ctlr %d, " + "%s MODE_SELECT command", + (char *) h->ctlr->array_name, h->ctlr->index, + (retry_cnt == RDAC_RETRY_COUNT) ? "queueing" : "retrying"); + + if (scsi_execute(sdev, cdb, DMA_TO_DEVICE, &h->ctlr->mode_select, + data_size, NULL, &sshdr, RDAC_TIMEOUT * HZ, + RDAC_RETRIES, req_flags, 0, NULL)) { + err = mode_select_handle_sense(sdev, &sshdr); + if (err == SCSI_DH_RETRY && retry_cnt--) + goto retry; + if (err == SCSI_DH_IMM_RETRY) + goto retry; + } + if (err == SCSI_DH_OK) { + h->state = RDAC_STATE_ACTIVE; + RDAC_LOG(RDAC_LOG_FAILOVER, sdev, "array %s, ctlr %d, " + "MODE_SELECT completed", + (char *) h->ctlr->array_name, h->ctlr->index); + } + + list_for_each_entry_safe(qdata, tmp, &list, entry) { + list_del(&qdata->entry); + if (err == SCSI_DH_OK) + qdata->h->state = RDAC_STATE_ACTIVE; + if (qdata->callback_fn) + qdata->callback_fn(qdata->callback_data, err); + kfree(qdata); + } + return; +} + +static int queue_mode_select(struct scsi_device *sdev, + activate_complete fn, void *data) +{ + struct rdac_queue_data *qdata; + struct rdac_controller *ctlr; + + qdata = kzalloc(sizeof(*qdata), GFP_KERNEL); + if (!qdata) + return SCSI_DH_RETRY; + + qdata->h = sdev->handler_data; + qdata->callback_fn = fn; + qdata->callback_data = data; + + ctlr = qdata->h->ctlr; + spin_lock(&ctlr->ms_lock); + list_add_tail(&qdata->entry, &ctlr->ms_head); + if (!ctlr->ms_queued) { + ctlr->ms_queued = 1; + ctlr->ms_sdev = sdev; + queue_work(kmpath_rdacd, &ctlr->ms_work); + } + spin_unlock(&ctlr->ms_lock); + return SCSI_DH_OK; +} + +static int rdac_activate(struct scsi_device *sdev, + activate_complete fn, void *data) +{ + struct rdac_dh_data *h = sdev->handler_data; + int err = SCSI_DH_OK; + int act = 0; + + err = check_ownership(sdev, h); + if (err != SCSI_DH_OK) + goto done; + + switch (h->mode) { + case RDAC_MODE: + if (h->lun_state == RDAC_LUN_UNOWNED) + act = 1; + break; + case RDAC_MODE_IOSHIP: + if ((h->lun_state == RDAC_LUN_UNOWNED) && + (h->preferred == RDAC_PREFERRED)) + act = 1; + break; + default: + break; + } + + if (act) { + err = queue_mode_select(sdev, fn, data); + if (err == SCSI_DH_OK) + return 0; + } +done: + if (fn) + fn(data, err); + return 0; +} + +static blk_status_t rdac_prep_fn(struct scsi_device *sdev, struct request *req) +{ + struct rdac_dh_data *h = sdev->handler_data; + + if (h->state != RDAC_STATE_ACTIVE) { + req->rq_flags |= RQF_QUIET; + return BLK_STS_IOERR; + } + + return BLK_STS_OK; +} + +static enum scsi_disposition rdac_check_sense(struct scsi_device *sdev, + struct scsi_sense_hdr *sense_hdr) +{ + struct rdac_dh_data *h = sdev->handler_data; + + RDAC_LOG(RDAC_LOG_SENSE, sdev, "array %s, ctlr %d, " + "I/O returned with sense %02x/%02x/%02x", + (char *) h->ctlr->array_name, h->ctlr->index, + sense_hdr->sense_key, sense_hdr->asc, sense_hdr->ascq); + + switch (sense_hdr->sense_key) { + case NOT_READY: + if (sense_hdr->asc == 0x04 && sense_hdr->ascq == 0x01) + /* LUN Not Ready - Logical Unit Not Ready and is in + * the process of becoming ready + * Just retry. + */ + return ADD_TO_MLQUEUE; + if (sense_hdr->asc == 0x04 && sense_hdr->ascq == 0x81) + /* LUN Not Ready - Storage firmware incompatible + * Manual code synchonisation required. + * + * Nothing we can do here. Try to bypass the path. + */ + return SUCCESS; + if (sense_hdr->asc == 0x04 && sense_hdr->ascq == 0xA1) + /* LUN Not Ready - Quiescense in progress + * + * Just retry and wait. + */ + return ADD_TO_MLQUEUE; + if (sense_hdr->asc == 0xA1 && sense_hdr->ascq == 0x02) + /* LUN Not Ready - Quiescense in progress + * or has been achieved + * Just retry. + */ + return ADD_TO_MLQUEUE; + break; + case ILLEGAL_REQUEST: + if (sense_hdr->asc == 0x94 && sense_hdr->ascq == 0x01) { + /* Invalid Request - Current Logical Unit Ownership. + * Controller is not the current owner of the LUN, + * Fail the path, so that the other path be used. + */ + h->state = RDAC_STATE_PASSIVE; + return SUCCESS; + } + break; + case UNIT_ATTENTION: + if (sense_hdr->asc == 0x29 && sense_hdr->ascq == 0x00) + /* + * Power On, Reset, or Bus Device Reset, just retry. + */ + return ADD_TO_MLQUEUE; + if (sense_hdr->asc == 0x8b && sense_hdr->ascq == 0x02) + /* + * Quiescence in progress , just retry. + */ + return ADD_TO_MLQUEUE; + break; + } + /* success just means we do not care what scsi-ml does */ + return SCSI_RETURN_NOT_HANDLED; +} + +static int rdac_bus_attach(struct scsi_device *sdev) +{ + struct rdac_dh_data *h; + int err; + char array_name[ARRAY_LABEL_LEN]; + char array_id[UNIQUE_ID_LEN]; + + h = kzalloc(sizeof(*h) , GFP_KERNEL); + if (!h) + return SCSI_DH_NOMEM; + h->lun = UNINITIALIZED_LUN; + h->state = RDAC_STATE_ACTIVE; + + err = get_lun_info(sdev, h, array_name, array_id); + if (err != SCSI_DH_OK) + goto failed; + + err = initialize_controller(sdev, h, array_name, array_id); + if (err != SCSI_DH_OK) + goto failed; + + err = check_ownership(sdev, h); + if (err != SCSI_DH_OK) + goto clean_ctlr; + + err = set_mode_select(sdev, h); + if (err != SCSI_DH_OK) + goto clean_ctlr; + + sdev_printk(KERN_NOTICE, sdev, + "%s: LUN %d (%s) (%s)\n", + RDAC_NAME, h->lun, mode[(int)h->mode], + lun_state[(int)h->lun_state]); + + sdev->handler_data = h; + return SCSI_DH_OK; + +clean_ctlr: + spin_lock(&list_lock); + kref_put(&h->ctlr->kref, release_controller); + spin_unlock(&list_lock); + +failed: + kfree(h); + return err; +} + +static void rdac_bus_detach( struct scsi_device *sdev ) +{ + struct rdac_dh_data *h = sdev->handler_data; + + if (h->ctlr && h->ctlr->ms_queued) + flush_workqueue(kmpath_rdacd); + + spin_lock(&list_lock); + if (h->ctlr) { + list_del_rcu(&h->node); + kref_put(&h->ctlr->kref, release_controller); + } + spin_unlock(&list_lock); + sdev->handler_data = NULL; + synchronize_rcu(); + kfree(h); +} + +static struct scsi_device_handler rdac_dh = { + .name = RDAC_NAME, + .module = THIS_MODULE, + .prep_fn = rdac_prep_fn, + .check_sense = rdac_check_sense, + .attach = rdac_bus_attach, + .detach = rdac_bus_detach, + .activate = rdac_activate, +}; + +static int __init rdac_init(void) +{ + int r; + + r = scsi_register_device_handler(&rdac_dh); + if (r != 0) { + printk(KERN_ERR "Failed to register scsi device handler."); + goto done; + } + + /* + * Create workqueue to handle mode selects for rdac + */ + kmpath_rdacd = create_singlethread_workqueue("kmpath_rdacd"); + if (!kmpath_rdacd) { + scsi_unregister_device_handler(&rdac_dh); + printk(KERN_ERR "kmpath_rdacd creation failed.\n"); + + r = -EINVAL; + } +done: + return r; +} + +static void __exit rdac_exit(void) +{ + destroy_workqueue(kmpath_rdacd); + scsi_unregister_device_handler(&rdac_dh); +} + +module_init(rdac_init); +module_exit(rdac_exit); + +MODULE_DESCRIPTION("Multipath LSI/Engenio/NetApp E-Series RDAC driver"); +MODULE_AUTHOR("Mike Christie, Chandra Seetharaman"); +MODULE_VERSION("01.00.0000.0000"); +MODULE_LICENSE("GPL"); -- cgit v1.2.3